diff --git a/Makefile.am b/Makefile.am
index 5d06719..c6573c4 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -2,11 +2,15 @@ ACLOCAL_AMFLAGS=-I m4
DISTCHECK_CONFIGURE_FLAGS = --enable-gtk-doc
+if ! WITH_LIBDB
+LIBDB = libdb
+endif
+
if ENABLE_CALENDAR
CALENDAR_DIR = calendar
endif
-SUBDIRS = libedataserver libebackend servers camel addressbook $(CALENDAR_DIR) libedataserverui docs art po
+SUBDIRS = $(LIBDB) libedataserver libebackend servers camel addressbook $(CALENDAR_DIR) libedataserverui docs art po
DIST_SUBDIRS = libedataserver libebackend servers camel addressbook calendar libedataserverui docs art po
changelogs = \
@@ -32,12 +36,2131 @@ EXTRA_DIST = \
intltool-update.in \
intltool-extract.in \
evolution-data-server-zip.in \
- $(pkgconfig_DATA:-$(API_VERSION).pc=.pc.in)
+ $(pkgconfig_DATA:-$(API_VERSION).pc=.pc.in) \
+ $(LIBDB_FILES)
DISTCLEANFILES = \
iconv-detect.h \
$(pkgconfig_DATA)
+distclean-local:
+ (cd libdb && $(MAKE) $(AM_MAKEFLAGS) distclean)
+
+LIBDB_FILES= \
+libdb/Makefile.am \
+libdb/Makefile.in \
+libdb/dist/NO-AUTO-GEN \
+libdb/LICENSE \
+libdb/README \
+libdb/btree/bt_compare.c \
+libdb/btree/bt_conv.c \
+libdb/btree/bt_curadj.c \
+libdb/btree/bt_cursor.c \
+libdb/btree/bt_delete.c \
+libdb/btree/bt_method.c \
+libdb/btree/bt_open.c \
+libdb/btree/bt_put.c \
+libdb/btree/bt_rec.c \
+libdb/btree/bt_reclaim.c \
+libdb/btree/bt_recno.c \
+libdb/btree/bt_rsearch.c \
+libdb/btree/bt_search.c \
+libdb/btree/bt_split.c \
+libdb/btree/bt_stat.c \
+libdb/btree/bt_upgrade.c \
+libdb/btree/bt_verify.c \
+libdb/btree/btree.src \
+libdb/btree/btree_auto.c \
+libdb/build_unix/.IGNORE_ME \
+libdb/build_vxworks/BerkeleyDB.wpj \
+libdb/build_vxworks/BerkeleyDB.wsp \
+libdb/build_vxworks/db.h \
+libdb/build_vxworks/db_config.h \
+libdb/build_vxworks/db_int.h \
+libdb/build_vxworks/BerkeleyDB/Makefile.custom \
+libdb/build_vxworks/BerkeleyDB/component.cdf \
+libdb/build_vxworks/BerkeleyDB/component.wpj \
+libdb/build_vxworks/db_archive/db_archive.c \
+libdb/build_vxworks/db_archive/db_archive.wpj \
+libdb/build_vxworks/db_archive/db_archive/Makefile.custom \
+libdb/build_vxworks/db_archive/db_archive/component.cdf \
+libdb/build_vxworks/db_archive/db_archive/component.wpj \
+libdb/build_vxworks/db_checkpoint/db_checkpoint.c \
+libdb/build_vxworks/db_checkpoint/db_checkpoint.wpj \
+libdb/build_vxworks/db_checkpoint/db_checkpoint/Makefile.custom \
+libdb/build_vxworks/db_checkpoint/db_checkpoint/component.cdf \
+libdb/build_vxworks/db_checkpoint/db_checkpoint/component.wpj \
+libdb/build_vxworks/db_deadlock/db_deadlock.c \
+libdb/build_vxworks/db_deadlock/db_deadlock.wpj \
+libdb/build_vxworks/db_deadlock/db_deadlock/Makefile.custom \
+libdb/build_vxworks/db_deadlock/db_deadlock/component.cdf \
+libdb/build_vxworks/db_deadlock/db_deadlock/component.wpj \
+libdb/build_vxworks/db_dump/db_dump.c \
+libdb/build_vxworks/db_dump/db_dump.wpj \
+libdb/build_vxworks/db_dump/db_dump/Makefile.custom \
+libdb/build_vxworks/db_dump/db_dump/component.cdf \
+libdb/build_vxworks/db_dump/db_dump/component.wpj \
+libdb/build_vxworks/db_load/db_load.c \
+libdb/build_vxworks/db_load/db_load.wpj \
+libdb/build_vxworks/db_load/db_load/Makefile.custom \
+libdb/build_vxworks/db_load/db_load/component.cdf \
+libdb/build_vxworks/db_load/db_load/component.wpj \
+libdb/build_vxworks/db_printlog/db_printlog.c \
+libdb/build_vxworks/db_printlog/db_printlog.wpj \
+libdb/build_vxworks/db_printlog/db_printlog/Makefile.custom \
+libdb/build_vxworks/db_printlog/db_printlog/component.cdf \
+libdb/build_vxworks/db_printlog/db_printlog/component.wpj \
+libdb/build_vxworks/db_recover/db_recover.c \
+libdb/build_vxworks/db_recover/db_recover.wpj \
+libdb/build_vxworks/db_recover/db_recover/Makefile.custom \
+libdb/build_vxworks/db_recover/db_recover/component.cdf \
+libdb/build_vxworks/db_recover/db_recover/component.wpj \
+libdb/build_vxworks/db_stat/db_stat.c \
+libdb/build_vxworks/db_stat/db_stat.wpj \
+libdb/build_vxworks/db_stat/db_stat/Makefile.custom \
+libdb/build_vxworks/db_stat/db_stat/component.cdf \
+libdb/build_vxworks/db_stat/db_stat/component.wpj \
+libdb/build_vxworks/db_upgrade/db_upgrade.c \
+libdb/build_vxworks/db_upgrade/db_upgrade.wpj \
+libdb/build_vxworks/db_upgrade/db_upgrade/Makefile.custom \
+libdb/build_vxworks/db_upgrade/db_upgrade/component.cdf \
+libdb/build_vxworks/db_upgrade/db_upgrade/component.wpj \
+libdb/build_vxworks/db_verify/db_verify.c \
+libdb/build_vxworks/db_verify/db_verify.wpj \
+libdb/build_vxworks/db_verify/db_verify/Makefile.custom \
+libdb/build_vxworks/db_verify/db_verify/component.cdf \
+libdb/build_vxworks/db_verify/db_verify/component.wpj \
+libdb/build_vxworks/dbdemo/README \
+libdb/build_vxworks/dbdemo/dbdemo.c \
+libdb/build_vxworks/dbdemo/dbdemo.wpj \
+libdb/build_vxworks/dbdemo/dbdemo/Makefile.custom \
+libdb/build_vxworks/dbdemo/dbdemo/component.cdf \
+libdb/build_vxworks/dbdemo/dbdemo/component.wpj \
+libdb/build_win32/Berkeley_DB.dsw \
+libdb/build_win32/app_dsp.src \
+libdb/build_win32/build_all.dsp \
+libdb/build_win32/db.h \
+libdb/build_win32/db_archive.dsp \
+libdb/build_win32/db_checkpoint.dsp \
+libdb/build_win32/db_config.h \
+libdb/build_win32/db_cxx.h \
+libdb/build_win32/db_deadlock.dsp \
+libdb/build_win32/db_dll.dsp \
+libdb/build_win32/db_dump.dsp \
+libdb/build_win32/db_int.h \
+libdb/build_win32/db_java.dsp \
+libdb/build_win32/db_java_xa.dsp \
+libdb/build_win32/db_java_xaj.mak \
+libdb/build_win32/db_lib.dsp \
+libdb/build_win32/db_load.dsp \
+libdb/build_win32/db_perf.dsp \
+libdb/build_win32/db_printlog.dsp \
+libdb/build_win32/db_recover.dsp \
+libdb/build_win32/db_stat.dsp \
+libdb/build_win32/db_static.dsp \
+libdb/build_win32/db_tcl.dsp \
+libdb/build_win32/db_test.dsp \
+libdb/build_win32/db_test.src \
+libdb/build_win32/db_upgrade.dsp \
+libdb/build_win32/db_verify.dsp \
+libdb/build_win32/dbkill.cpp \
+libdb/build_win32/dllmain.c \
+libdb/build_win32/dynamic_dsp.src \
+libdb/build_win32/ex_access.dsp \
+libdb/build_win32/ex_btrec.dsp \
+libdb/build_win32/ex_env.dsp \
+libdb/build_win32/ex_lock.dsp \
+libdb/build_win32/ex_mpool.dsp \
+libdb/build_win32/ex_tpcb.dsp \
+libdb/build_win32/excxx_access.dsp \
+libdb/build_win32/excxx_btrec.dsp \
+libdb/build_win32/excxx_env.dsp \
+libdb/build_win32/excxx_lock.dsp \
+libdb/build_win32/excxx_mpool.dsp \
+libdb/build_win32/excxx_tpcb.dsp \
+libdb/build_win32/include.tcl \
+libdb/build_win32/java_dsp.src \
+libdb/build_win32/libdb.def \
+libdb/build_win32/libdb.rc \
+libdb/build_win32/libdb_tcl.def \
+libdb/build_win32/libdbrc.src \
+libdb/build_win32/srcfile_dsp.src \
+libdb/build_win32/static_dsp.src \
+libdb/build_win32/tcl_dsp.src \
+libdb/clib/getcwd.c \
+libdb/clib/getopt.c \
+libdb/clib/memcmp.c \
+libdb/clib/memmove.c \
+libdb/clib/raise.c \
+libdb/clib/snprintf.c \
+libdb/clib/strcasecmp.c \
+libdb/clib/strdup.c \
+libdb/clib/strerror.c \
+libdb/clib/vsnprintf.c \
+libdb/common/db_byteorder.c \
+libdb/common/db_err.c \
+libdb/common/db_getlong.c \
+libdb/common/db_idspace.c \
+libdb/common/db_log2.c \
+libdb/common/util_arg.c \
+libdb/common/util_cache.c \
+libdb/common/util_log.c \
+libdb/common/util_sig.c \
+libdb/cxx/cxx_db.cpp \
+libdb/cxx/cxx_dbc.cpp \
+libdb/cxx/cxx_dbt.cpp \
+libdb/cxx/cxx_env.cpp \
+libdb/cxx/cxx_except.cpp \
+libdb/cxx/cxx_lock.cpp \
+libdb/cxx/cxx_logc.cpp \
+libdb/cxx/cxx_mpool.cpp \
+libdb/cxx/cxx_txn.cpp \
+libdb/db/crdel.src \
+libdb/db/crdel_auto.c \
+libdb/db/crdel_rec.c \
+libdb/db/db.c \
+libdb/db/db.src \
+libdb/db/db_am.c \
+libdb/db/db_auto.c \
+libdb/db/db_cam.c \
+libdb/db/db_conv.c \
+libdb/db/db_dispatch.c \
+libdb/db/db_dup.c \
+libdb/db/db_iface.c \
+libdb/db/db_join.c \
+libdb/db/db_meta.c \
+libdb/db/db_method.c \
+libdb/db/db_open.c \
+libdb/db/db_overflow.c \
+libdb/db/db_pr.c \
+libdb/db/db_rec.c \
+libdb/db/db_reclaim.c \
+libdb/db/db_remove.c \
+libdb/db/db_rename.c \
+libdb/db/db_ret.c \
+libdb/db/db_truncate.c \
+libdb/db/db_upg.c \
+libdb/db/db_upg_opd.c \
+libdb/db/db_vrfy.c \
+libdb/db/db_vrfyutil.c \
+libdb/db185/db185.c \
+libdb/db185/db185_int.in \
+libdb/db_archive/db_archive.c \
+libdb/db_checkpoint/db_checkpoint.c \
+libdb/db_deadlock/db_deadlock.c \
+libdb/db_dump/db_dump.c \
+libdb/db_dump185/db_dump185.c \
+libdb/db_load/db_load.c \
+libdb/db_printlog/README \
+libdb/db_printlog/commit.awk \
+libdb/db_printlog/count.awk \
+libdb/db_printlog/db_printlog.c \
+libdb/db_printlog/dbname.awk \
+libdb/db_printlog/fileid.awk \
+libdb/db_printlog/logstat.awk \
+libdb/db_printlog/pgno.awk \
+libdb/db_printlog/range.awk \
+libdb/db_printlog/rectype.awk \
+libdb/db_printlog/status.awk \
+libdb/db_printlog/txn.awk \
+libdb/db_recover/db_recover.c \
+libdb/db_stat/db_stat.c \
+libdb/db_upgrade/db_upgrade.c \
+libdb/db_verify/db_verify.c \
+libdb/dbinc/btree.h \
+libdb/dbinc/crypto.h \
+libdb/dbinc/cxx_common.h \
+libdb/dbinc/cxx_except.h \
+libdb/dbinc/cxx_int.h \
+libdb/dbinc/db.in \
+libdb/dbinc/db_185.in \
+libdb/dbinc/db_am.h \
+libdb/dbinc/db_cxx.in \
+libdb/dbinc/db_dispatch.h \
+libdb/dbinc/db_int.in \
+libdb/dbinc/db_join.h \
+libdb/dbinc/db_page.h \
+libdb/dbinc/db_server_int.h \
+libdb/dbinc/db_shash.h \
+libdb/dbinc/db_swap.h \
+libdb/dbinc/db_upgrade.h \
+libdb/dbinc/db_verify.h \
+libdb/dbinc/debug.h \
+libdb/dbinc/fop.h \
+libdb/dbinc/globals.h \
+libdb/dbinc/hash.h \
+libdb/dbinc/hmac.h \
+libdb/dbinc/lock.h \
+libdb/dbinc/log.h \
+libdb/dbinc/mp.h \
+libdb/dbinc/mutex.h \
+libdb/dbinc/os.h \
+libdb/dbinc/qam.h \
+libdb/dbinc/queue.h \
+libdb/dbinc/region.h \
+libdb/dbinc/rep.h \
+libdb/dbinc/shqueue.h \
+libdb/dbinc/tcl_db.h \
+libdb/dbinc/txn.h \
+libdb/dbinc/xa.h \
+libdb/dbinc_auto/btree_auto.h \
+libdb/dbinc_auto/btree_ext.h \
+libdb/dbinc_auto/clib_ext.h \
+libdb/dbinc_auto/common_ext.h \
+libdb/dbinc_auto/crdel_auto.h \
+libdb/dbinc_auto/crypto_ext.h \
+libdb/dbinc_auto/db_auto.h \
+libdb/dbinc_auto/db_ext.h \
+libdb/dbinc_auto/db_server.h \
+libdb/dbinc_auto/dbreg_auto.h \
+libdb/dbinc_auto/dbreg_ext.h \
+libdb/dbinc_auto/env_ext.h \
+libdb/dbinc_auto/ext_185_def.in \
+libdb/dbinc_auto/ext_185_prot.in \
+libdb/dbinc_auto/ext_def.in \
+libdb/dbinc_auto/ext_prot.in \
+libdb/dbinc_auto/fileops_auto.h \
+libdb/dbinc_auto/fileops_ext.h \
+libdb/dbinc_auto/hash_auto.h \
+libdb/dbinc_auto/hash_ext.h \
+libdb/dbinc_auto/hmac_ext.h \
+libdb/dbinc_auto/int_def.in \
+libdb/dbinc_auto/lock_ext.h \
+libdb/dbinc_auto/log_ext.h \
+libdb/dbinc_auto/mp_ext.h \
+libdb/dbinc_auto/mutex_ext.h \
+libdb/dbinc_auto/os_ext.h \
+libdb/dbinc_auto/qam_auto.h \
+libdb/dbinc_auto/qam_ext.h \
+libdb/dbinc_auto/rep_ext.h \
+libdb/dbinc_auto/rpc_client_ext.h \
+libdb/dbinc_auto/rpc_defs.in \
+libdb/dbinc_auto/rpc_server_ext.h \
+libdb/dbinc_auto/tcl_ext.h \
+libdb/dbinc_auto/txn_auto.h \
+libdb/dbinc_auto/txn_ext.h \
+libdb/dbinc_auto/xa_ext.h \
+libdb/dbm/dbm.c \
+libdb/dbreg/dbreg.c \
+libdb/dbreg/dbreg.src \
+libdb/dbreg/dbreg_auto.c \
+libdb/dbreg/dbreg_rec.c \
+libdb/dbreg/dbreg_util.c \
+libdb/env/db_salloc.c \
+libdb/env/db_shash.c \
+libdb/env/env_file.c \
+libdb/env/env_method.c \
+libdb/env/env_open.c \
+libdb/env/env_recover.c \
+libdb/env/env_region.c \
+libdb/dist/Makefile.in \
+libdb/dist/RELEASE \
+libdb/dist/buildrel \
+libdb/dist/config.guess \
+libdb/dist/config.hin \
+libdb/dist/config.sub \
+libdb/dist/configure \
+libdb/dist/configure.ac \
+libdb/dist/db.ecd.in \
+libdb/dist/db.spec.in \
+libdb/dist/gen_inc.awk \
+libdb/dist/gen_rec.awk \
+libdb/dist/gen_rpc.awk \
+libdb/dist/install-sh \
+libdb/dist/ltmain.sh \
+libdb/dist/pubdef.in \
+libdb/dist/s_all \
+libdb/dist/s_config \
+libdb/dist/s_crypto \
+libdb/dist/s_include \
+libdb/dist/s_java \
+libdb/dist/s_javah \
+libdb/dist/s_perm \
+libdb/dist/s_readme \
+libdb/dist/s_recover \
+libdb/dist/s_rpc \
+libdb/dist/s_symlink \
+libdb/dist/s_tags \
+libdb/dist/s_test \
+libdb/dist/s_vxworks \
+libdb/dist/s_win32 \
+libdb/dist/s_win32_dsp \
+libdb/dist/srcfiles.in \
+libdb/dist/tags \
+libdb/dist/vx_buildcd \
+libdb/dist/vx_config.in \
+libdb/dist/win_config.in \
+libdb/dist/win_exports.in \
+libdb/dist/aclocal/config.ac \
+libdb/dist/aclocal/cxx.ac \
+libdb/dist/aclocal/gcc.ac \
+libdb/dist/aclocal/libtool.ac \
+libdb/dist/aclocal/mutex.ac \
+libdb/dist/aclocal/options.ac \
+libdb/dist/aclocal/programs.ac \
+libdb/dist/aclocal/sosuffix.ac \
+libdb/dist/aclocal/tcl.ac \
+libdb/dist/aclocal/types.ac \
+libdb/dist/aclocal_java/ac_check_class.ac \
+libdb/dist/aclocal_java/ac_check_classpath.ac \
+libdb/dist/aclocal_java/ac_check_junit.ac \
+libdb/dist/aclocal_java/ac_check_rqrd_class.ac \
+libdb/dist/aclocal_java/ac_java_options.ac \
+libdb/dist/aclocal_java/ac_jni_include_dirs.ac \
+libdb/dist/aclocal_java/ac_prog_jar.ac \
+libdb/dist/aclocal_java/ac_prog_java.ac \
+libdb/dist/aclocal_java/ac_prog_java_works.ac \
+libdb/dist/aclocal_java/ac_prog_javac.ac \
+libdb/dist/aclocal_java/ac_prog_javac_works.ac \
+libdb/dist/aclocal_java/ac_prog_javadoc.ac \
+libdb/dist/aclocal_java/ac_prog_javah.ac \
+libdb/dist/aclocal_java/ac_try_compile_java.ac \
+libdb/dist/aclocal_java/ac_try_run_javac.ac \
+libdb/dist/template/db_server_proc \
+libdb/dist/template/gen_client_ret \
+libdb/dist/template/rec_btree \
+libdb/dist/template/rec_crdel \
+libdb/dist/template/rec_ctemp \
+libdb/dist/template/rec_db \
+libdb/dist/template/rec_dbreg \
+libdb/dist/template/rec_fileops \
+libdb/dist/template/rec_hash \
+libdb/dist/template/rec_qam \
+libdb/dist/template/rec_txn \
+libdb/dist/vx_2.0/BerkeleyDB.wpj \
+libdb/dist/vx_2.0/wpj.in \
+libdb/dist/vx_3.1/Makefile.custom \
+libdb/dist/vx_3.1/cdf.1 \
+libdb/dist/vx_3.1/cdf.2 \
+libdb/dist/vx_3.1/cdf.3 \
+libdb/dist/vx_3.1/component.cdf \
+libdb/dist/vx_3.1/component.wpj \
+libdb/dist/vx_3.1/wpj.1 \
+libdb/dist/vx_3.1/wpj.2 \
+libdb/dist/vx_3.1/wpj.3 \
+libdb/dist/vx_3.1/wpj.4 \
+libdb/dist/vx_3.1/wpj.5 \
+libdb/dist/vx_setup/CONFIG.in \
+libdb/dist/vx_setup/LICENSE.TXT \
+libdb/dist/vx_setup/MESSAGES.TCL \
+libdb/dist/vx_setup/README.in \
+libdb/dist/vx_setup/SETUP.BMP \
+libdb/dist/vx_setup/vx_allfile.in \
+libdb/dist/vx_setup/vx_demofile.in \
+libdb/dist/vx_setup/vx_setup.in \
+libdb/dist/NO-AUTO-GEN \
+libdb/dist/config.log \
+libdb/dist/config.status \
+libdb/dist/Makefile \
+libdb/dist/db_cxx.h \
+libdb/dist/db_int.h \
+libdb/dist/libtool \
+libdb/dist/include.tcl \
+libdb/dist/db.h \
+libdb/dist/db_config.h \
+libdb/examples_c/README \
+libdb/examples_c/bench_001.c \
+libdb/examples_c/ex_access.c \
+libdb/examples_c/ex_btrec.c \
+libdb/examples_c/ex_dbclient.c \
+libdb/examples_c/ex_env.c \
+libdb/examples_c/ex_lock.c \
+libdb/examples_c/ex_mpool.c \
+libdb/examples_c/ex_thread.c \
+libdb/examples_c/ex_tpcb.c \
+libdb/examples_c/ex_tpcb.h \
+libdb/examples_c/ex_apprec/auto_rebuild \
+libdb/examples_c/ex_apprec/ex_apprec.c \
+libdb/examples_c/ex_apprec/ex_apprec.h \
+libdb/examples_c/ex_apprec/ex_apprec.src \
+libdb/examples_c/ex_apprec/ex_apprec_auto.c \
+libdb/examples_c/ex_apprec/ex_apprec_auto.h \
+libdb/examples_c/ex_apprec/ex_apprec_rec.c \
+libdb/examples_c/ex_apprec/ex_apprec_template \
+libdb/examples_c/ex_repquote/ex_repquote.h \
+libdb/examples_c/ex_repquote/ex_rq_client.c \
+libdb/examples_c/ex_repquote/ex_rq_main.c \
+libdb/examples_c/ex_repquote/ex_rq_master.c \
+libdb/examples_c/ex_repquote/ex_rq_net.c \
+libdb/examples_c/ex_repquote/ex_rq_util.c \
+libdb/examples_cxx/AccessExample.cpp \
+libdb/examples_cxx/BtRecExample.cpp \
+libdb/examples_cxx/EnvExample.cpp \
+libdb/examples_cxx/LockExample.cpp \
+libdb/examples_cxx/MpoolExample.cpp \
+libdb/examples_cxx/TpcbExample.cpp \
+libdb/fileops/fileops.src \
+libdb/fileops/fileops_auto.c \
+libdb/fileops/fop_basic.c \
+libdb/fileops/fop_rec.c \
+libdb/fileops/fop_util.c \
+libdb/hash/hash.c \
+libdb/hash/hash.src \
+libdb/hash/hash_auto.c \
+libdb/hash/hash_conv.c \
+libdb/hash/hash_dup.c \
+libdb/hash/hash_func.c \
+libdb/hash/hash_meta.c \
+libdb/hash/hash_method.c \
+libdb/hash/hash_open.c \
+libdb/hash/hash_page.c \
+libdb/hash/hash_rec.c \
+libdb/hash/hash_reclaim.c \
+libdb/hash/hash_stat.c \
+libdb/hash/hash_upgrade.c \
+libdb/hash/hash_verify.c \
+libdb/hmac/hmac.c \
+libdb/hmac/sha1.c \
+libdb/hsearch/hsearch.c \
+libdb/java/src/com/sleepycat/db/Db.java \
+libdb/java/src/com/sleepycat/db/DbAppDispatch.java \
+libdb/java/src/com/sleepycat/db/DbAppendRecno.java \
+libdb/java/src/com/sleepycat/db/DbBtreeCompare.java \
+libdb/java/src/com/sleepycat/db/DbBtreePrefix.java \
+libdb/java/src/com/sleepycat/db/DbBtreeStat.java \
+libdb/java/src/com/sleepycat/db/DbClient.java \
+libdb/java/src/com/sleepycat/db/DbConstants.java \
+libdb/java/src/com/sleepycat/db/DbDeadlockException.java \
+libdb/java/src/com/sleepycat/db/DbDupCompare.java \
+libdb/java/src/com/sleepycat/db/DbEnv.java \
+libdb/java/src/com/sleepycat/db/DbEnvFeedback.java \
+libdb/java/src/com/sleepycat/db/DbErrcall.java \
+libdb/java/src/com/sleepycat/db/DbException.java \
+libdb/java/src/com/sleepycat/db/DbFeedback.java \
+libdb/java/src/com/sleepycat/db/DbHash.java \
+libdb/java/src/com/sleepycat/db/DbHashStat.java \
+libdb/java/src/com/sleepycat/db/DbKeyRange.java \
+libdb/java/src/com/sleepycat/db/DbLock.java \
+libdb/java/src/com/sleepycat/db/DbLockNotGrantedException.java \
+libdb/java/src/com/sleepycat/db/DbLockRequest.java \
+libdb/java/src/com/sleepycat/db/DbLockStat.java \
+libdb/java/src/com/sleepycat/db/DbLogStat.java \
+libdb/java/src/com/sleepycat/db/DbLogc.java \
+libdb/java/src/com/sleepycat/db/DbLsn.java \
+libdb/java/src/com/sleepycat/db/DbMemoryException.java \
+libdb/java/src/com/sleepycat/db/DbMpoolFStat.java \
+libdb/java/src/com/sleepycat/db/DbMpoolStat.java \
+libdb/java/src/com/sleepycat/db/DbMultipleDataIterator.java \
+libdb/java/src/com/sleepycat/db/DbMultipleIterator.java \
+libdb/java/src/com/sleepycat/db/DbMultipleKeyDataIterator.java \
+libdb/java/src/com/sleepycat/db/DbMultipleRecnoDataIterator.java \
+libdb/java/src/com/sleepycat/db/DbOutputStreamErrcall.java \
+libdb/java/src/com/sleepycat/db/DbPreplist.java \
+libdb/java/src/com/sleepycat/db/DbQueueStat.java \
+libdb/java/src/com/sleepycat/db/DbRepStat.java \
+libdb/java/src/com/sleepycat/db/DbRepTransport.java \
+libdb/java/src/com/sleepycat/db/DbRunRecoveryException.java \
+libdb/java/src/com/sleepycat/db/DbSecondaryKeyCreate.java \
+libdb/java/src/com/sleepycat/db/DbTxn.java \
+libdb/java/src/com/sleepycat/db/DbTxnStat.java \
+libdb/java/src/com/sleepycat/db/DbUtil.java \
+libdb/java/src/com/sleepycat/db/Dbc.java \
+libdb/java/src/com/sleepycat/db/Dbt.java \
+libdb/java/src/com/sleepycat/db/xa/DbXAResource.java \
+libdb/java/src/com/sleepycat/db/xa/DbXid.java \
+libdb/java/src/com/sleepycat/examples/AccessExample.java \
+libdb/java/src/com/sleepycat/examples/BtRecExample.java \
+libdb/java/src/com/sleepycat/examples/BulkAccessExample.java \
+libdb/java/src/com/sleepycat/examples/EnvExample.java \
+libdb/java/src/com/sleepycat/examples/LockExample.java \
+libdb/java/src/com/sleepycat/examples/TpcbExample.java \
+libdb/libdb_java/checkapi.prl \
+libdb/libdb_java/com_sleepycat_db_Db.h \
+libdb/libdb_java/com_sleepycat_db_DbEnv.h \
+libdb/libdb_java/com_sleepycat_db_DbLock.h \
+libdb/libdb_java/com_sleepycat_db_DbLogc.h \
+libdb/libdb_java/com_sleepycat_db_DbLsn.h \
+libdb/libdb_java/com_sleepycat_db_DbTxn.h \
+libdb/libdb_java/com_sleepycat_db_DbUtil.h \
+libdb/libdb_java/com_sleepycat_db_Dbc.h \
+libdb/libdb_java/com_sleepycat_db_Dbt.h \
+libdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h \
+libdb/libdb_java/java_Db.c \
+libdb/libdb_java/java_DbEnv.c \
+libdb/libdb_java/java_DbLock.c \
+libdb/libdb_java/java_DbLogc.c \
+libdb/libdb_java/java_DbLsn.c \
+libdb/libdb_java/java_DbTxn.c \
+libdb/libdb_java/java_DbUtil.c \
+libdb/libdb_java/java_DbXAResource.c \
+libdb/libdb_java/java_Dbc.c \
+libdb/libdb_java/java_Dbt.c \
+libdb/libdb_java/java_info.c \
+libdb/libdb_java/java_info.h \
+libdb/libdb_java/java_locked.c \
+libdb/libdb_java/java_locked.h \
+libdb/libdb_java/java_stat_auto.c \
+libdb/libdb_java/java_stat_auto.h \
+libdb/libdb_java/java_util.c \
+libdb/libdb_java/java_util.h \
+libdb/lock/Design \
+libdb/lock/lock.c \
+libdb/lock/lock_deadlock.c \
+libdb/lock/lock_method.c \
+libdb/lock/lock_region.c \
+libdb/lock/lock_stat.c \
+libdb/lock/lock_util.c \
+libdb/log/log.c \
+libdb/log/log_archive.c \
+libdb/log/log_compare.c \
+libdb/log/log_get.c \
+libdb/log/log_method.c \
+libdb/log/log_put.c \
+libdb/mp/mp_alloc.c \
+libdb/mp/mp_bh.c \
+libdb/mp/mp_fget.c \
+libdb/mp/mp_fopen.c \
+libdb/mp/mp_fput.c \
+libdb/mp/mp_fset.c \
+libdb/mp/mp_method.c \
+libdb/mp/mp_region.c \
+libdb/mp/mp_register.c \
+libdb/mp/mp_stat.c \
+libdb/mp/mp_sync.c \
+libdb/mp/mp_trickle.c \
+libdb/mutex/README \
+libdb/mutex/mut_fcntl.c \
+libdb/mutex/mut_pthread.c \
+libdb/mutex/mut_tas.c \
+libdb/mutex/mut_win32.c \
+libdb/mutex/mutex.c \
+libdb/mutex/tm.c \
+libdb/mutex/uts4_cc.s \
+libdb/os/os_abs.c \
+libdb/os/os_alloc.c \
+libdb/os/os_clock.c \
+libdb/os/os_config.c \
+libdb/os/os_dir.c \
+libdb/os/os_errno.c \
+libdb/os/os_fid.c \
+libdb/os/os_fsync.c \
+libdb/os/os_handle.c \
+libdb/os/os_id.c \
+libdb/os/os_map.c \
+libdb/os/os_method.c \
+libdb/os/os_oflags.c \
+libdb/os/os_open.c \
+libdb/os/os_region.c \
+libdb/os/os_rename.c \
+libdb/os/os_root.c \
+libdb/os/os_rpath.c \
+libdb/os/os_rw.c \
+libdb/os/os_seek.c \
+libdb/os/os_sleep.c \
+libdb/os/os_spin.c \
+libdb/os/os_stat.c \
+libdb/os/os_tmpdir.c \
+libdb/os/os_unlink.c \
+libdb/os_vxworks/os_vx_abs.c \
+libdb/os_vxworks/os_vx_config.c \
+libdb/os_vxworks/os_vx_map.c \
+libdb/os_win32/os_abs.c \
+libdb/os_win32/os_clock.c \
+libdb/os_win32/os_config.c \
+libdb/os_win32/os_dir.c \
+libdb/os_win32/os_errno.c \
+libdb/os_win32/os_fid.c \
+libdb/os_win32/os_fsync.c \
+libdb/os_win32/os_handle.c \
+libdb/os_win32/os_map.c \
+libdb/os_win32/os_open.c \
+libdb/os_win32/os_rename.c \
+libdb/os_win32/os_rw.c \
+libdb/os_win32/os_seek.c \
+libdb/os_win32/os_sleep.c \
+libdb/os_win32/os_spin.c \
+libdb/os_win32/os_stat.c \
+libdb/os_win32/os_type.c \
+libdb/perl/BerkeleyDB/BerkeleyDB.pm \
+libdb/perl/BerkeleyDB/BerkeleyDB.pod \
+libdb/perl/BerkeleyDB/BerkeleyDB.pod.P \
+libdb/perl/BerkeleyDB/BerkeleyDB.xs \
+libdb/perl/BerkeleyDB/Changes \
+libdb/perl/BerkeleyDB/MANIFEST \
+libdb/perl/BerkeleyDB/Makefile.PL \
+libdb/perl/BerkeleyDB/README \
+libdb/perl/BerkeleyDB/Todo \
+libdb/perl/BerkeleyDB/config.in \
+libdb/perl/BerkeleyDB/constants.h \
+libdb/perl/BerkeleyDB/constants.xs \
+libdb/perl/BerkeleyDB/dbinfo \
+libdb/perl/BerkeleyDB/mkconsts \
+libdb/perl/BerkeleyDB/mkpod \
+libdb/perl/BerkeleyDB/ppport.h \
+libdb/perl/BerkeleyDB/scan \
+libdb/perl/BerkeleyDB/typemap \
+libdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm \
+libdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm \
+libdb/perl/BerkeleyDB/hints/dec_osf.pl \
+libdb/perl/BerkeleyDB/hints/irix_6_5.pl \
+libdb/perl/BerkeleyDB/hints/solaris.pl \
+libdb/perl/BerkeleyDB/patches/5.004 \
+libdb/perl/BerkeleyDB/patches/5.004_01 \
+libdb/perl/BerkeleyDB/patches/5.004_02 \
+libdb/perl/BerkeleyDB/patches/5.004_03 \
+libdb/perl/BerkeleyDB/patches/5.004_04 \
+libdb/perl/BerkeleyDB/patches/5.004_05 \
+libdb/perl/BerkeleyDB/patches/5.005 \
+libdb/perl/BerkeleyDB/patches/5.005_01 \
+libdb/perl/BerkeleyDB/patches/5.005_02 \
+libdb/perl/BerkeleyDB/patches/5.005_03 \
+libdb/perl/BerkeleyDB/patches/5.6.0 \
+libdb/perl/BerkeleyDB/t/btree.t \
+libdb/perl/BerkeleyDB/t/db-3.0.t \
+libdb/perl/BerkeleyDB/t/db-3.1.t \
+libdb/perl/BerkeleyDB/t/db-3.2.t \
+libdb/perl/BerkeleyDB/t/db-3.3.t \
+libdb/perl/BerkeleyDB/t/destroy.t \
+libdb/perl/BerkeleyDB/t/env.t \
+libdb/perl/BerkeleyDB/t/examples.t \
+libdb/perl/BerkeleyDB/t/examples.t.T \
+libdb/perl/BerkeleyDB/t/examples3.t \
+libdb/perl/BerkeleyDB/t/examples3.t.T \
+libdb/perl/BerkeleyDB/t/filter.t \
+libdb/perl/BerkeleyDB/t/hash.t \
+libdb/perl/BerkeleyDB/t/join.t \
+libdb/perl/BerkeleyDB/t/mldbm.t \
+libdb/perl/BerkeleyDB/t/queue.t \
+libdb/perl/BerkeleyDB/t/recno.t \
+libdb/perl/BerkeleyDB/t/strict.t \
+libdb/perl/BerkeleyDB/t/subdb.t \
+libdb/perl/BerkeleyDB/t/txn.t \
+libdb/perl/BerkeleyDB/t/unknown.t \
+libdb/perl/BerkeleyDB/t/util.pm \
+libdb/perl/DB_File/Changes \
+libdb/perl/DB_File/DB_File.pm \
+libdb/perl/DB_File/DB_File.xs \
+libdb/perl/DB_File/DB_File_BS \
+libdb/perl/DB_File/MANIFEST \
+libdb/perl/DB_File/Makefile.PL \
+libdb/perl/DB_File/README \
+libdb/perl/DB_File/config.in \
+libdb/perl/DB_File/dbinfo \
+libdb/perl/DB_File/fallback.h \
+libdb/perl/DB_File/fallback.xs \
+libdb/perl/DB_File/ppport.h \
+libdb/perl/DB_File/typemap \
+libdb/perl/DB_File/version.c \
+libdb/perl/DB_File/hints/dynixptx.pl \
+libdb/perl/DB_File/hints/sco.pl \
+libdb/perl/DB_File/patches/5.004 \
+libdb/perl/DB_File/patches/5.004_01 \
+libdb/perl/DB_File/patches/5.004_02 \
+libdb/perl/DB_File/patches/5.004_03 \
+libdb/perl/DB_File/patches/5.004_04 \
+libdb/perl/DB_File/patches/5.004_05 \
+libdb/perl/DB_File/patches/5.005 \
+libdb/perl/DB_File/patches/5.005_01 \
+libdb/perl/DB_File/patches/5.005_02 \
+libdb/perl/DB_File/patches/5.005_03 \
+libdb/perl/DB_File/patches/5.6.0 \
+libdb/perl/DB_File/t/db-btree.t \
+libdb/perl/DB_File/t/db-hash.t \
+libdb/perl/DB_File/t/db-recno.t \
+libdb/qam/qam.c \
+libdb/qam/qam.src \
+libdb/qam/qam_auto.c \
+libdb/qam/qam_conv.c \
+libdb/qam/qam_files.c \
+libdb/qam/qam_method.c \
+libdb/qam/qam_open.c \
+libdb/qam/qam_rec.c \
+libdb/qam/qam_stat.c \
+libdb/qam/qam_upgrade.c \
+libdb/qam/qam_verify.c \
+libdb/rep/rep_method.c \
+libdb/rep/rep_record.c \
+libdb/rep/rep_region.c \
+libdb/rep/rep_util.c \
+libdb/rpc_client/client.c \
+libdb/rpc_client/db_server_clnt.c \
+libdb/rpc_client/gen_client.c \
+libdb/rpc_client/gen_client_ret.c \
+libdb/rpc_server/clsrv.html \
+libdb/rpc_server/db_server.x \
+libdb/rpc_server/rpc.src \
+libdb/rpc_server/c/db_server_proc.c \
+libdb/rpc_server/c/db_server_proc.sed \
+libdb/rpc_server/c/db_server_svc.c \
+libdb/rpc_server/c/db_server_util.c \
+libdb/rpc_server/c/db_server_xdr.c \
+libdb/rpc_server/c/gen_db_server.c \
+libdb/rpc_server/cxx/db_server_cxxproc.cpp \
+libdb/rpc_server/cxx/db_server_cxxutil.cpp \
+libdb/rpc_server/java/DbDispatcher.java \
+libdb/rpc_server/java/DbServer.java \
+libdb/rpc_server/java/FreeList.java \
+libdb/rpc_server/java/LocalIterator.java \
+libdb/rpc_server/java/README \
+libdb/rpc_server/java/RpcDb.java \
+libdb/rpc_server/java/RpcDbEnv.java \
+libdb/rpc_server/java/RpcDbTxn.java \
+libdb/rpc_server/java/RpcDbc.java \
+libdb/rpc_server/java/Timer.java \
+libdb/rpc_server/java/jrpcgen.jar \
+libdb/rpc_server/java/oncrpc.jar \
+libdb/rpc_server/java/s_jrpcgen \
+libdb/rpc_server/java/gen/DbServerStub.java \
+libdb/rpc_server/java/gen/__db_associate_msg.java \
+libdb/rpc_server/java/gen/__db_associate_reply.java \
+libdb/rpc_server/java/gen/__db_bt_maxkey_msg.java \
+libdb/rpc_server/java/gen/__db_bt_maxkey_reply.java \
+libdb/rpc_server/java/gen/__db_bt_minkey_msg.java \
+libdb/rpc_server/java/gen/__db_bt_minkey_reply.java \
+libdb/rpc_server/java/gen/__db_close_msg.java \
+libdb/rpc_server/java/gen/__db_close_reply.java \
+libdb/rpc_server/java/gen/__db_create_msg.java \
+libdb/rpc_server/java/gen/__db_create_reply.java \
+libdb/rpc_server/java/gen/__db_cursor_msg.java \
+libdb/rpc_server/java/gen/__db_cursor_reply.java \
+libdb/rpc_server/java/gen/__db_del_msg.java \
+libdb/rpc_server/java/gen/__db_del_reply.java \
+libdb/rpc_server/java/gen/__db_encrypt_msg.java \
+libdb/rpc_server/java/gen/__db_encrypt_reply.java \
+libdb/rpc_server/java/gen/__db_extentsize_msg.java \
+libdb/rpc_server/java/gen/__db_extentsize_reply.java \
+libdb/rpc_server/java/gen/__db_flags_msg.java \
+libdb/rpc_server/java/gen/__db_flags_reply.java \
+libdb/rpc_server/java/gen/__db_get_msg.java \
+libdb/rpc_server/java/gen/__db_get_reply.java \
+libdb/rpc_server/java/gen/__db_h_ffactor_msg.java \
+libdb/rpc_server/java/gen/__db_h_ffactor_reply.java \
+libdb/rpc_server/java/gen/__db_h_nelem_msg.java \
+libdb/rpc_server/java/gen/__db_h_nelem_reply.java \
+libdb/rpc_server/java/gen/__db_join_msg.java \
+libdb/rpc_server/java/gen/__db_join_reply.java \
+libdb/rpc_server/java/gen/__db_key_range_msg.java \
+libdb/rpc_server/java/gen/__db_key_range_reply.java \
+libdb/rpc_server/java/gen/__db_lorder_msg.java \
+libdb/rpc_server/java/gen/__db_lorder_reply.java \
+libdb/rpc_server/java/gen/__db_open_msg.java \
+libdb/rpc_server/java/gen/__db_open_reply.java \
+libdb/rpc_server/java/gen/__db_pagesize_msg.java \
+libdb/rpc_server/java/gen/__db_pagesize_reply.java \
+libdb/rpc_server/java/gen/__db_pget_msg.java \
+libdb/rpc_server/java/gen/__db_pget_reply.java \
+libdb/rpc_server/java/gen/__db_put_msg.java \
+libdb/rpc_server/java/gen/__db_put_reply.java \
+libdb/rpc_server/java/gen/__db_re_delim_msg.java \
+libdb/rpc_server/java/gen/__db_re_delim_reply.java \
+libdb/rpc_server/java/gen/__db_re_len_msg.java \
+libdb/rpc_server/java/gen/__db_re_len_reply.java \
+libdb/rpc_server/java/gen/__db_re_pad_msg.java \
+libdb/rpc_server/java/gen/__db_re_pad_reply.java \
+libdb/rpc_server/java/gen/__db_remove_msg.java \
+libdb/rpc_server/java/gen/__db_remove_reply.java \
+libdb/rpc_server/java/gen/__db_rename_msg.java \
+libdb/rpc_server/java/gen/__db_rename_reply.java \
+libdb/rpc_server/java/gen/__db_stat_msg.java \
+libdb/rpc_server/java/gen/__db_stat_reply.java \
+libdb/rpc_server/java/gen/__db_sync_msg.java \
+libdb/rpc_server/java/gen/__db_sync_reply.java \
+libdb/rpc_server/java/gen/__db_truncate_msg.java \
+libdb/rpc_server/java/gen/__db_truncate_reply.java \
+libdb/rpc_server/java/gen/__dbc_close_msg.java \
+libdb/rpc_server/java/gen/__dbc_close_reply.java \
+libdb/rpc_server/java/gen/__dbc_count_msg.java \
+libdb/rpc_server/java/gen/__dbc_count_reply.java \
+libdb/rpc_server/java/gen/__dbc_del_msg.java \
+libdb/rpc_server/java/gen/__dbc_del_reply.java \
+libdb/rpc_server/java/gen/__dbc_dup_msg.java \
+libdb/rpc_server/java/gen/__dbc_dup_reply.java \
+libdb/rpc_server/java/gen/__dbc_get_msg.java \
+libdb/rpc_server/java/gen/__dbc_get_reply.java \
+libdb/rpc_server/java/gen/__dbc_pget_msg.java \
+libdb/rpc_server/java/gen/__dbc_pget_reply.java \
+libdb/rpc_server/java/gen/__dbc_put_msg.java \
+libdb/rpc_server/java/gen/__dbc_put_reply.java \
+libdb/rpc_server/java/gen/__env_cachesize_msg.java \
+libdb/rpc_server/java/gen/__env_cachesize_reply.java \
+libdb/rpc_server/java/gen/__env_close_msg.java \
+libdb/rpc_server/java/gen/__env_close_reply.java \
+libdb/rpc_server/java/gen/__env_create_msg.java \
+libdb/rpc_server/java/gen/__env_create_reply.java \
+libdb/rpc_server/java/gen/__env_dbremove_msg.java \
+libdb/rpc_server/java/gen/__env_dbremove_reply.java \
+libdb/rpc_server/java/gen/__env_dbrename_msg.java \
+libdb/rpc_server/java/gen/__env_dbrename_reply.java \
+libdb/rpc_server/java/gen/__env_encrypt_msg.java \
+libdb/rpc_server/java/gen/__env_encrypt_reply.java \
+libdb/rpc_server/java/gen/__env_flags_msg.java \
+libdb/rpc_server/java/gen/__env_flags_reply.java \
+libdb/rpc_server/java/gen/__env_open_msg.java \
+libdb/rpc_server/java/gen/__env_open_reply.java \
+libdb/rpc_server/java/gen/__env_remove_msg.java \
+libdb/rpc_server/java/gen/__env_remove_reply.java \
+libdb/rpc_server/java/gen/__txn_abort_msg.java \
+libdb/rpc_server/java/gen/__txn_abort_reply.java \
+libdb/rpc_server/java/gen/__txn_begin_msg.java \
+libdb/rpc_server/java/gen/__txn_begin_reply.java \
+libdb/rpc_server/java/gen/__txn_commit_msg.java \
+libdb/rpc_server/java/gen/db_server.java \
+libdb/rpc_server/java/gen/__txn_commit_reply.java \
+libdb/rpc_server/java/gen/__txn_discard_msg.java \
+libdb/rpc_server/java/gen/__txn_discard_reply.java \
+libdb/rpc_server/java/gen/__txn_prepare_msg.java \
+libdb/rpc_server/java/gen/__txn_prepare_reply.java \
+libdb/rpc_server/java/gen/__txn_recover_msg.java \
+libdb/rpc_server/java/gen/__txn_recover_reply.java \
+libdb/tcl/tcl_compat.c \
+libdb/tcl/tcl_db.c \
+libdb/tcl/tcl_db_pkg.c \
+libdb/tcl/tcl_dbcursor.c \
+libdb/tcl/tcl_env.c \
+libdb/tcl/tcl_internal.c \
+libdb/tcl/tcl_lock.c \
+libdb/tcl/tcl_log.c \
+libdb/tcl/tcl_mp.c \
+libdb/tcl/tcl_rep.c \
+libdb/tcl/tcl_txn.c \
+libdb/tcl/tcl_util.c \
+libdb/tcl/docs/db.html \
+libdb/tcl/docs/env.html \
+libdb/tcl/docs/historic.html \
+libdb/tcl/docs/index.html \
+libdb/tcl/docs/library.html \
+libdb/tcl/docs/lock.html \
+libdb/tcl/docs/log.html \
+libdb/tcl/docs/mpool.html \
+libdb/tcl/docs/rep.html \
+libdb/tcl/docs/test.html \
+libdb/tcl/docs/txn.html \
+libdb/test/TESTS \
+libdb/test/archive.tcl \
+libdb/test/bigfile001.tcl \
+libdb/test/bigfile002.tcl \
+libdb/test/byteorder.tcl \
+libdb/test/conscript.tcl \
+libdb/test/dbm.tcl \
+libdb/test/dbscript.tcl \
+libdb/test/ddoyscript.tcl \
+libdb/test/ddscript.tcl \
+libdb/test/dead001.tcl \
+libdb/test/dead002.tcl \
+libdb/test/dead003.tcl \
+libdb/test/dead004.tcl \
+libdb/test/dead005.tcl \
+libdb/test/dead006.tcl \
+libdb/test/dead007.tcl \
+libdb/test/env001.tcl \
+libdb/test/env002.tcl \
+libdb/test/env003.tcl \
+libdb/test/env004.tcl \
+libdb/test/env005.tcl \
+libdb/test/env006.tcl \
+libdb/test/env007.tcl \
+libdb/test/env008.tcl \
+libdb/test/env009.tcl \
+libdb/test/env010.tcl \
+libdb/test/env011.tcl \
+libdb/test/hsearch.tcl \
+libdb/test/include.tcl \
+libdb/test/join.tcl \
+libdb/test/lock001.tcl \
+libdb/test/lock002.tcl \
+libdb/test/lock003.tcl \
+libdb/test/lock004.tcl \
+libdb/test/lock005.tcl \
+libdb/test/lockscript.tcl \
+libdb/test/log001.tcl \
+libdb/test/log002.tcl \
+libdb/test/log003.tcl \
+libdb/test/log004.tcl \
+libdb/test/log005.tcl \
+libdb/test/logtrack.list \
+libdb/test/logtrack.tcl \
+libdb/test/mdbscript.tcl \
+libdb/test/memp001.tcl \
+libdb/test/scr001/chk.code \
+libdb/test/memp002.tcl \
+libdb/test/memp003.tcl \
+libdb/test/mpoolscript.tcl \
+libdb/test/mutex001.tcl \
+libdb/test/mutex002.tcl \
+libdb/test/mutex003.tcl \
+libdb/test/mutexscript.tcl \
+libdb/test/ndbm.tcl \
+libdb/test/parallel.tcl \
+libdb/test/recd001.tcl \
+libdb/test/recd002.tcl \
+libdb/test/recd003.tcl \
+libdb/test/recd004.tcl \
+libdb/test/recd005.tcl \
+libdb/test/recd006.tcl \
+libdb/test/recd007.tcl \
+libdb/test/recd008.tcl \
+libdb/test/recd009.tcl \
+libdb/test/recd010.tcl \
+libdb/test/recd011.tcl \
+libdb/test/recd012.tcl \
+libdb/test/recd013.tcl \
+libdb/test/recd014.tcl \
+libdb/test/recd015.tcl \
+libdb/test/recd016.tcl \
+libdb/test/recd017.tcl \
+libdb/test/recd018.tcl \
+libdb/test/recd019.tcl \
+libdb/test/recd020.tcl \
+libdb/test/recd15scr.tcl \
+libdb/test/recdscript.tcl \
+libdb/test/rep001.tcl \
+libdb/test/rep002.tcl \
+libdb/test/rep003.tcl \
+libdb/test/rep004.tcl \
+libdb/test/rep005.tcl \
+libdb/test/reputils.tcl \
+libdb/test/rpc001.tcl \
+libdb/test/rpc002.tcl \
+libdb/test/rpc003.tcl \
+libdb/test/rpc004.tcl \
+libdb/test/rpc005.tcl \
+libdb/test/rsrc001.tcl \
+libdb/test/rsrc002.tcl \
+libdb/test/rsrc003.tcl \
+libdb/test/rsrc004.tcl \
+libdb/test/sdb001.tcl \
+libdb/test/sdb002.tcl \
+libdb/test/sdb003.tcl \
+libdb/test/sdb004.tcl \
+libdb/test/sdb005.tcl \
+libdb/test/sdb006.tcl \
+libdb/test/sdb007.tcl \
+libdb/test/sdb008.tcl \
+libdb/test/sdb009.tcl \
+libdb/test/sdb010.tcl \
+libdb/test/sdb011.tcl \
+libdb/test/sdb012.tcl \
+libdb/test/sdbscript.tcl \
+libdb/test/sdbtest001.tcl \
+libdb/test/sdbtest002.tcl \
+libdb/test/sdbutils.tcl \
+libdb/test/sec001.tcl \
+libdb/test/sec002.tcl \
+libdb/test/shelltest.tcl \
+libdb/test/si001.tcl \
+libdb/test/si002.tcl \
+libdb/test/si003.tcl \
+libdb/test/si004.tcl \
+libdb/test/si005.tcl \
+libdb/test/si006.tcl \
+libdb/test/sindex.tcl \
+libdb/test/sysscript.tcl \
+libdb/test/test.tcl \
+libdb/test/test001.tcl \
+libdb/test/test002.tcl \
+libdb/test/test003.tcl \
+libdb/test/test004.tcl \
+libdb/test/test005.tcl \
+libdb/test/test006.tcl \
+libdb/test/test007.tcl \
+libdb/test/test008.tcl \
+libdb/test/test009.tcl \
+libdb/test/test010.tcl \
+libdb/test/test011.tcl \
+libdb/test/test012.tcl \
+libdb/test/test013.tcl \
+libdb/test/test014.tcl \
+libdb/test/test015.tcl \
+libdb/test/test016.tcl \
+libdb/test/test017.tcl \
+libdb/test/test018.tcl \
+libdb/test/test019.tcl \
+libdb/test/test020.tcl \
+libdb/test/test021.tcl \
+libdb/test/test022.tcl \
+libdb/test/test023.tcl \
+libdb/test/test024.tcl \
+libdb/test/test025.tcl \
+libdb/test/test026.tcl \
+libdb/test/test027.tcl \
+libdb/test/test028.tcl \
+libdb/test/test029.tcl \
+libdb/test/test030.tcl \
+libdb/test/test031.tcl \
+libdb/test/test032.tcl \
+libdb/test/test033.tcl \
+libdb/test/test034.tcl \
+libdb/test/test035.tcl \
+libdb/test/test036.tcl \
+libdb/test/test037.tcl \
+libdb/test/test038.tcl \
+libdb/test/test039.tcl \
+libdb/test/test040.tcl \
+libdb/test/test041.tcl \
+libdb/test/test042.tcl \
+libdb/test/test043.tcl \
+libdb/test/test044.tcl \
+libdb/test/test045.tcl \
+libdb/test/test046.tcl \
+libdb/test/test047.tcl \
+libdb/test/test048.tcl \
+libdb/test/test049.tcl \
+libdb/test/test050.tcl \
+libdb/test/test051.tcl \
+libdb/test/test052.tcl \
+libdb/test/test053.tcl \
+libdb/test/test054.tcl \
+libdb/test/test055.tcl \
+libdb/test/test056.tcl \
+libdb/test/test057.tcl \
+libdb/test/test058.tcl \
+libdb/test/test059.tcl \
+libdb/test/test060.tcl \
+libdb/test/test061.tcl \
+libdb/test/test062.tcl \
+libdb/test/test063.tcl \
+libdb/test/test064.tcl \
+libdb/test/test065.tcl \
+libdb/test/test066.tcl \
+libdb/test/test067.tcl \
+libdb/test/test068.tcl \
+libdb/test/test069.tcl \
+libdb/test/test070.tcl \
+libdb/test/test071.tcl \
+libdb/test/test072.tcl \
+libdb/test/test073.tcl \
+libdb/test/test074.tcl \
+libdb/test/test075.tcl \
+libdb/test/test076.tcl \
+libdb/test/test077.tcl \
+libdb/test/test078.tcl \
+libdb/test/test079.tcl \
+libdb/test/test080.tcl \
+libdb/test/test081.tcl \
+libdb/test/test082.tcl \
+libdb/test/test083.tcl \
+libdb/test/test084.tcl \
+libdb/test/test085.tcl \
+libdb/test/test086.tcl \
+libdb/test/test087.tcl \
+libdb/test/test088.tcl \
+libdb/test/test089.tcl \
+libdb/test/test090.tcl \
+libdb/test/test091.tcl \
+libdb/test/test092.tcl \
+libdb/test/test093.tcl \
+libdb/test/test094.tcl \
+libdb/test/test095.tcl \
+libdb/test/test096.tcl \
+libdb/test/test097.tcl \
+libdb/test/test098.tcl \
+libdb/test/test099.tcl \
+libdb/test/test100.tcl \
+libdb/test/test101.tcl \
+libdb/test/testparams.tcl \
+libdb/test/testutils.tcl \
+libdb/test/txn001.tcl \
+libdb/test/txn002.tcl \
+libdb/test/txn003.tcl \
+libdb/test/txn004.tcl \
+libdb/test/txn005.tcl \
+libdb/test/txn006.tcl \
+libdb/test/txn007.tcl \
+libdb/test/txn008.tcl \
+libdb/test/txn009.tcl \
+libdb/test/txnscript.tcl \
+libdb/test/update.tcl \
+libdb/test/upgrade.tcl \
+libdb/test/wordlist \
+libdb/test/wrap.tcl \
+libdb/test/scr002/chk.def \
+libdb/test/scr003/chk.define \
+libdb/test/scr004/chk.javafiles \
+libdb/test/scr005/chk.nl \
+libdb/test/scr006/chk.offt \
+libdb/test/scr007/chk.proto \
+libdb/test/scr008/chk.pubdef \
+libdb/test/scr009/chk.srcfiles \
+libdb/test/scr010/chk.str \
+libdb/test/scr010/spell.ok \
+libdb/test/scr011/chk.tags \
+libdb/test/scr012/chk.vx_code \
+libdb/test/scr013/chk.stats \
+libdb/test/scr014/chk.err \
+libdb/test/scr015/README \
+libdb/test/scr015/TestConstruct01.cpp \
+libdb/test/scr015/TestConstruct01.testerr \
+libdb/test/scr015/TestConstruct01.testout \
+libdb/test/scr015/TestExceptInclude.cpp \
+libdb/test/scr015/TestGetSetMethods.cpp \
+libdb/test/scr015/TestKeyRange.cpp \
+libdb/test/scr015/TestKeyRange.testin \
+libdb/test/scr015/TestKeyRange.testout \
+libdb/test/scr015/TestLogc.cpp \
+libdb/test/scr015/TestLogc.testout \
+libdb/test/scr015/TestSimpleAccess.cpp \
+libdb/test/scr015/TestSimpleAccess.testout \
+libdb/test/scr015/TestTruncate.cpp \
+libdb/test/scr015/TestTruncate.testout \
+libdb/test/scr015/chk.cxxtests \
+libdb/test/scr015/ignore \
+libdb/test/scr015/testall \
+libdb/test/scr015/testone \
+libdb/test/scr016/CallbackTest.java \
+libdb/test/scr016/CallbackTest.testout \
+libdb/test/scr016/README \
+libdb/test/scr016/TestAppendRecno.java \
+libdb/test/scr016/TestAppendRecno.testout \
+libdb/test/scr016/TestAssociate.java \
+libdb/test/scr016/TestAssociate.testout \
+libdb/test/scr016/TestClosedDb.java \
+libdb/test/scr016/TestClosedDb.testout \
+libdb/test/scr016/TestConstruct01.java \
+libdb/test/scr016/TestConstruct01.testerr \
+libdb/test/scr016/TestConstruct01.testout \
+libdb/test/scr016/TestConstruct02.java \
+libdb/test/scr016/TestConstruct02.testout \
+libdb/test/scr016/TestDbtFlags.java \
+libdb/test/scr016/TestDbtFlags.testerr \
+libdb/test/scr016/TestDbtFlags.testout \
+libdb/test/scr016/TestGetSetMethods.java \
+libdb/test/scr016/TestKeyRange.java \
+libdb/test/scr016/TestKeyRange.testout \
+libdb/test/scr016/TestLockVec.java \
+libdb/test/scr016/TestLockVec.testout \
+libdb/test/scr016/TestLogc.java \
+libdb/test/scr016/TestLogc.testout \
+libdb/test/scr016/TestOpenEmpty.java \
+libdb/test/scr016/TestOpenEmpty.testerr \
+libdb/test/scr016/TestReplication.java \
+libdb/test/scr016/TestRpcServer.java \
+libdb/test/scr016/TestSameDbt.java \
+libdb/test/scr016/TestSameDbt.testout \
+libdb/test/scr016/TestSimpleAccess.java \
+libdb/test/scr016/TestSimpleAccess.testout \
+libdb/test/scr016/TestStat.java \
+libdb/test/scr016/TestStat.testout \
+libdb/test/scr016/TestTruncate.java \
+libdb/test/scr016/TestTruncate.testout \
+libdb/test/scr016/TestUtil.java \
+libdb/test/scr016/TestXAServlet.java \
+libdb/test/scr016/chk.javatests \
+libdb/test/scr016/ignore \
+libdb/test/scr016/testall \
+libdb/test/scr016/testone \
+libdb/test/scr017/O.BH \
+libdb/test/scr017/O.R \
+libdb/test/scr017/chk.db185 \
+libdb/test/scr017/t.c \
+libdb/test/scr018/chk.comma \
+libdb/test/scr018/t.c \
+libdb/test/scr019/chk.include \
+libdb/test/scr020/chk.inc \
+libdb/test/scr021/chk.flags \
+libdb/test/scr022/chk.rr \
+libdb/txn/txn.c \
+libdb/txn/txn.src \
+libdb/txn/txn_auto.c \
+libdb/txn/txn_method.c \
+libdb/txn/txn_rec.c \
+libdb/txn/txn_recover.c \
+libdb/txn/txn_region.c \
+libdb/txn/txn_stat.c \
+libdb/txn/txn_util.c \
+libdb/xa/xa.c \
+libdb/xa/xa_db.c \
+libdb/xa/xa_map.c \
+libdb/docs/ref/am/close.html \
+libdb/docs/ref/am/count.html \
+libdb/docs/ref/am/curclose.html \
+libdb/docs/ref/am/curdel.html \
+libdb/docs/ref/am/curdup.html \
+libdb/docs/ref/am/curget.html \
+libdb/docs/ref/am/curput.html \
+libdb/docs/ref/am/cursor.html \
+libdb/docs/ref/am/delete.html \
+libdb/docs/ref/am/get.html \
+libdb/docs/ref/am/join.html \
+libdb/docs/ref/am/open.html \
+libdb/docs/ref/am/opensub.html \
+libdb/docs/ref/am/ops.html \
+libdb/docs/ref/am/put.html \
+libdb/docs/ref/am/second.html \
+libdb/docs/ref/am/stat.html \
+libdb/docs/ref/am/sync.html \
+libdb/docs/ref/am/truncate.html \
+libdb/docs/ref/am/upgrade.html \
+libdb/docs/ref/am/verify.html \
+libdb/docs/ref/pindex.src \
+libdb/docs/ref/am_conf/bt_compare.html \
+libdb/docs/ref/am_conf/bt_minkey.html \
+libdb/docs/ref/am_conf/bt_prefix.html \
+libdb/docs/ref/am_conf/bt_recnum.html \
+libdb/docs/ref/am_conf/byteorder.html \
+libdb/docs/ref/am_conf/cachesize.html \
+libdb/docs/ref/am_conf/dup.html \
+libdb/docs/ref/am_conf/extentsize.html \
+libdb/docs/ref/am_conf/h_ffactor.html \
+libdb/docs/ref/am_conf/h_hash.html \
+libdb/docs/ref/am_conf/h_nelem.html \
+libdb/docs/ref/am_conf/intro.html \
+libdb/docs/ref/am_conf/logrec.html \
+libdb/docs/ref/am_conf/malloc.html \
+libdb/docs/ref/am_conf/pagesize.html \
+libdb/docs/ref/am_conf/re_source.html \
+libdb/docs/ref/am_conf/recno.html \
+libdb/docs/ref/am_conf/renumber.html \
+libdb/docs/ref/am_conf/select.html \
+libdb/docs/ref/am_misc/align.html \
+libdb/docs/ref/am_misc/dbsizes.html \
+libdb/docs/ref/am_misc/diskspace.html \
+libdb/docs/ref/am_misc/error.html \
+libdb/docs/ref/am_misc/faq.html \
+libdb/docs/ref/am_misc/get_bulk.html \
+libdb/docs/ref/am_misc/partial.html \
+libdb/docs/ref/am_misc/perm.html \
+libdb/docs/ref/am_misc/stability.html \
+libdb/docs/ref/am_misc/struct.html \
+libdb/docs/ref/am_misc/tune.html \
+libdb/docs/ref/apprec/auto.html \
+libdb/docs/ref/apprec/config.html \
+libdb/docs/ref/apprec/def.html \
+libdb/docs/ref/apprec/intro.html \
+libdb/docs/ref/arch/bigpic.gif \
+libdb/docs/ref/arch/smallpic.gif \
+libdb/docs/ref/arch/apis.html \
+libdb/docs/ref/arch/bigpic.html \
+libdb/docs/ref/arch/progmodel.html \
+libdb/docs/ref/arch/script.html \
+libdb/docs/ref/arch/utilities.html \
+libdb/docs/ref/build_unix/aix.html \
+libdb/docs/ref/build_unix/conf.html \
+libdb/docs/ref/build_unix/embedix.html \
+libdb/docs/ref/build_unix/flags.html \
+libdb/docs/ref/build_unix/freebsd.html \
+libdb/docs/ref/build_unix/hpux.html \
+libdb/docs/ref/build_unix/install.html \
+libdb/docs/ref/build_unix/intro.html \
+libdb/docs/ref/build_unix/irix.html \
+libdb/docs/ref/build_unix/linux.html \
+libdb/docs/ref/build_unix/macosx.html \
+libdb/docs/ref/build_unix/notes.html \
+libdb/docs/ref/build_unix/osf1.html \
+libdb/docs/ref/build_unix/qnx.html \
+libdb/docs/ref/build_unix/sco.html \
+libdb/docs/ref/build_unix/shlib.html \
+libdb/docs/ref/build_unix/solaris.html \
+libdb/docs/ref/build_unix/sunos.html \
+libdb/docs/ref/build_unix/test.html \
+libdb/docs/ref/build_unix/ultrix.html \
+libdb/docs/ref/build_vxworks/faq.html \
+libdb/docs/ref/build_vxworks/intro.html \
+libdb/docs/ref/build_vxworks/introae.html \
+libdb/docs/ref/build_vxworks/notes.html \
+libdb/docs/ref/build_win/faq.html \
+libdb/docs/ref/build_win/intro.html \
+libdb/docs/ref/build_win/notes.html \
+libdb/docs/ref/build_win/test.html \
+libdb/docs/ref/cam/intro.html \
+libdb/docs/ref/debug/intro.html \
+libdb/docs/ref/debug/common.html \
+libdb/docs/ref/debug/compile.html \
+libdb/docs/ref/debug/printlog.html \
+libdb/docs/ref/debug/runtime.html \
+libdb/docs/ref/distrib/layout.html \
+libdb/docs/ref/distrib/port.html \
+libdb/docs/ref/dumpload/format.html \
+libdb/docs/ref/dumpload/text.html \
+libdb/docs/ref/dumpload/utility.html \
+libdb/docs/ref/env/create.html \
+libdb/docs/ref/env/db_config.html \
+libdb/docs/ref/env/encrypt.html \
+libdb/docs/ref/env/error.html \
+libdb/docs/ref/env/faq.html \
+libdb/docs/ref/env/intro.html \
+libdb/docs/ref/env/naming.html \
+libdb/docs/ref/env/open.html \
+libdb/docs/ref/env/region.html \
+libdb/docs/ref/env/remote.html \
+libdb/docs/ref/env/security.html \
+libdb/docs/ref/intro/data.html \
+libdb/docs/ref/intro/dbis.html \
+libdb/docs/ref/intro/dbisnot.html \
+libdb/docs/ref/intro/distrib.html \
+libdb/docs/ref/intro/need.html \
+libdb/docs/ref/intro/products.html \
+libdb/docs/ref/intro/terrain.html \
+libdb/docs/ref/intro/what.html \
+libdb/docs/ref/intro/where.html \
+libdb/docs/ref/install/magic.txt \
+libdb/docs/ref/install/magic.s5.be.txt \
+libdb/docs/ref/install/magic.s5.le.txt \
+libdb/docs/ref/install/file.html \
+libdb/docs/ref/install/multiple.html \
+libdb/docs/ref/install/rpm.html \
+libdb/docs/ref/java/compat.html \
+libdb/docs/ref/java/conf.html \
+libdb/docs/ref/java/faq.html \
+libdb/docs/ref/java/program.html \
+libdb/docs/ref/lock/am_conv.html \
+libdb/docs/ref/lock/cam_conv.html \
+libdb/docs/ref/lock/config.html \
+libdb/docs/ref/lock/dead.html \
+libdb/docs/ref/lock/deaddbg.html \
+libdb/docs/ref/lock/intro.html \
+libdb/docs/ref/lock/max.html \
+libdb/docs/ref/lock/nondb.html \
+libdb/docs/ref/lock/notxn.html \
+libdb/docs/ref/lock/page.html \
+libdb/docs/ref/lock/stdmode.html \
+libdb/docs/ref/lock/timeout.html \
+libdb/docs/ref/lock/twopl.html \
+libdb/docs/ref/log/config.html \
+libdb/docs/ref/log/intro.html \
+libdb/docs/ref/log/limits.html \
+libdb/docs/ref/mp/intro.html \
+libdb/docs/ref/mp/config.html \
+libdb/docs/ref/perl/intro.html \
+libdb/docs/ref/program/solaris.txt \
+libdb/docs/ref/program/appsignals.html \
+libdb/docs/ref/program/cache.html \
+libdb/docs/ref/program/compatible.html \
+libdb/docs/ref/program/copy.html \
+libdb/docs/ref/program/environ.html \
+libdb/docs/ref/program/errorret.html \
+libdb/docs/ref/program/faq.html \
+libdb/docs/ref/program/mt.html \
+libdb/docs/ref/program/namespace.html \
+libdb/docs/ref/program/runtime.html \
+libdb/docs/ref/program/scope.html \
+libdb/docs/ref/refs/bdb_usenix.html \
+libdb/docs/ref/refs/bdb_usenix.ps \
+libdb/docs/ref/refs/embedded.html \
+libdb/docs/ref/refs/hash_usenix.ps \
+libdb/docs/ref/refs/libtp_usenix.ps \
+libdb/docs/ref/refs/refs.html \
+libdb/docs/ref/refs/witold.html \
+libdb/docs/ref/rep/app.html \
+libdb/docs/ref/rep/comm.html \
+libdb/docs/ref/rep/elect.html \
+libdb/docs/ref/rep/ex.html \
+libdb/docs/ref/rep/ex_comm.html \
+libdb/docs/ref/rep/ex_rq.html \
+libdb/docs/ref/rep/faq.html \
+libdb/docs/ref/rep/id.html \
+libdb/docs/ref/rep/init.html \
+libdb/docs/ref/rep/intro.html \
+libdb/docs/ref/rep/logonly.html \
+libdb/docs/ref/rep/newsite.html \
+libdb/docs/ref/rep/partition.html \
+libdb/docs/ref/rep/pri.html \
+libdb/docs/ref/rep/trans.html \
+libdb/docs/ref/rpc/client.html \
+libdb/docs/ref/rpc/faq.html \
+libdb/docs/ref/rpc/intro.html \
+libdb/docs/ref/rpc/server.html \
+libdb/docs/ref/sendmail/intro.html \
+libdb/docs/ref/simple_tut/example.cs \
+libdb/docs/ref/simple_tut/close.html \
+libdb/docs/ref/simple_tut/del.html \
+libdb/docs/ref/simple_tut/errors.html \
+libdb/docs/ref/simple_tut/get.html \
+libdb/docs/ref/simple_tut/handles.html \
+libdb/docs/ref/simple_tut/intro.html \
+libdb/docs/ref/simple_tut/keydata.html \
+libdb/docs/ref/simple_tut/open.html \
+libdb/docs/ref/simple_tut/put.html \
+libdb/docs/ref/tcl/intro.html \
+libdb/docs/ref/tcl/error.html \
+libdb/docs/ref/tcl/faq.html \
+libdb/docs/ref/tcl/program.html \
+libdb/docs/ref/tcl/using.html \
+libdb/docs/ref/test/faq.html \
+libdb/docs/ref/test/run.html \
+libdb/docs/ref/transapp/transapp.cs \
+libdb/docs/ref/transapp/writetest.cs \
+libdb/docs/ref/transapp/admin.html \
+libdb/docs/ref/transapp/app.html \
+libdb/docs/ref/transapp/archival.html \
+libdb/docs/ref/transapp/atomicity.html \
+libdb/docs/ref/transapp/checkpoint.html \
+libdb/docs/ref/transapp/cursor.html \
+libdb/docs/ref/transapp/data_open.html \
+libdb/docs/ref/transapp/deadlock.html \
+libdb/docs/ref/transapp/env_open.html \
+libdb/docs/ref/transapp/faq.html \
+libdb/docs/ref/transapp/filesys.html \
+libdb/docs/ref/transapp/hotfail.html \
+libdb/docs/ref/transapp/inc.html \
+libdb/docs/ref/transapp/intro.html \
+libdb/docs/ref/transapp/logfile.html \
+libdb/docs/ref/transapp/nested.html \
+libdb/docs/ref/transapp/put.html \
+libdb/docs/ref/transapp/read.html \
+libdb/docs/ref/transapp/reclimit.html \
+libdb/docs/ref/transapp/recovery.html \
+libdb/docs/ref/transapp/term.html \
+libdb/docs/ref/transapp/throughput.html \
+libdb/docs/ref/transapp/tune.html \
+libdb/docs/ref/transapp/why.html \
+libdb/docs/ref/txn/config.html \
+libdb/docs/ref/txn/intro.html \
+libdb/docs/ref/txn/limits.html \
+libdb/docs/ref/upgrade/process.html \
+libdb/docs/ref/upgrade/version.html \
+libdb/docs/ref/upgrade.2.0/convert.html \
+libdb/docs/ref/upgrade.2.0/disk.html \
+libdb/docs/ref/upgrade.2.0/intro.html \
+libdb/docs/ref/upgrade.2.0/system.html \
+libdb/docs/ref/upgrade.2.0/toc.html \
+libdb/docs/ref/upgrade.3.0/close.html \
+libdb/docs/ref/upgrade.3.0/cxx.html \
+libdb/docs/ref/upgrade.3.0/db.html \
+libdb/docs/ref/upgrade.3.0/db_cxx.html \
+libdb/docs/ref/upgrade.3.0/dbenv.html \
+libdb/docs/ref/upgrade.3.0/dbenv_cxx.html \
+libdb/docs/ref/upgrade.3.0/dbinfo.html \
+libdb/docs/ref/upgrade.3.0/disk.html \
+libdb/docs/ref/upgrade.3.0/eacces.html \
+libdb/docs/ref/upgrade.3.0/eagain.html \
+libdb/docs/ref/upgrade.3.0/envopen.html \
+libdb/docs/ref/upgrade.3.0/func.html \
+libdb/docs/ref/upgrade.3.0/intro.html \
+libdb/docs/ref/upgrade.3.0/java.html \
+libdb/docs/ref/upgrade.3.0/join.html \
+libdb/docs/ref/upgrade.3.0/jump_set.html \
+libdb/docs/ref/upgrade.3.0/lock_detect.html \
+libdb/docs/ref/upgrade.3.0/lock_notheld.html \
+libdb/docs/ref/upgrade.3.0/lock_put.html \
+libdb/docs/ref/upgrade.3.0/lock_stat.html \
+libdb/docs/ref/upgrade.3.0/log_register.html \
+libdb/docs/ref/upgrade.3.0/log_stat.html \
+libdb/docs/ref/upgrade.3.0/memp_stat.html \
+libdb/docs/ref/upgrade.3.0/open.html \
+libdb/docs/ref/upgrade.3.0/rmw.html \
+libdb/docs/ref/upgrade.3.0/stat.html \
+libdb/docs/ref/upgrade.3.0/toc.html \
+libdb/docs/ref/upgrade.3.0/txn_begin.html \
+libdb/docs/ref/upgrade.3.0/txn_commit.html \
+libdb/docs/ref/upgrade.3.0/txn_stat.html \
+libdb/docs/ref/upgrade.3.0/value_set.html \
+libdb/docs/ref/upgrade.3.0/xa.html \
+libdb/docs/ref/upgrade.3.1/btstat.html \
+libdb/docs/ref/upgrade.3.1/config.html \
+libdb/docs/ref/upgrade.3.1/disk.html \
+libdb/docs/ref/upgrade.3.1/dup.html \
+libdb/docs/ref/upgrade.3.1/env.html \
+libdb/docs/ref/upgrade.3.1/intro.html \
+libdb/docs/ref/upgrade.3.1/log_register.html \
+libdb/docs/ref/upgrade.3.1/logalloc.html \
+libdb/docs/ref/upgrade.3.1/memp_register.html \
+libdb/docs/ref/upgrade.3.1/put.html \
+libdb/docs/ref/upgrade.3.1/set_feedback.html \
+libdb/docs/ref/upgrade.3.1/set_paniccall.html \
+libdb/docs/ref/upgrade.3.1/set_tx_recover.html \
+libdb/docs/ref/upgrade.3.1/sysmem.html \
+libdb/docs/ref/upgrade.3.1/tcl.html \
+libdb/docs/ref/upgrade.3.1/toc.html \
+libdb/docs/ref/upgrade.3.1/tmp.html \
+libdb/docs/ref/upgrade.3.1/txn_check.html \
+libdb/docs/ref/upgrade.3.2/callback.html \
+libdb/docs/ref/upgrade.3.2/db_dump.html \
+libdb/docs/ref/upgrade.3.2/disk.html \
+libdb/docs/ref/upgrade.3.2/handle.html \
+libdb/docs/ref/upgrade.3.2/incomplete.html \
+libdb/docs/ref/upgrade.3.2/intro.html \
+libdb/docs/ref/upgrade.3.2/mutexlock.html \
+libdb/docs/ref/upgrade.3.2/notfound.html \
+libdb/docs/ref/upgrade.3.2/renumber.html \
+libdb/docs/ref/upgrade.3.2/set_flags.html \
+libdb/docs/ref/upgrade.3.2/toc.html \
+libdb/docs/ref/upgrade.3.2/tx_recover.html \
+libdb/docs/ref/upgrade.3.3/alloc.html \
+libdb/docs/ref/upgrade.3.3/bigfile.html \
+libdb/docs/ref/upgrade.3.3/conflict.html \
+libdb/docs/ref/upgrade.3.3/disk.html \
+libdb/docs/ref/upgrade.3.3/getswap.html \
+libdb/docs/ref/upgrade.3.3/gettype.html \
+libdb/docs/ref/upgrade.3.3/intro.html \
+libdb/docs/ref/upgrade.3.3/memp_fget.html \
+libdb/docs/ref/upgrade.3.3/rpc.html \
+libdb/docs/ref/upgrade.3.3/shared.html \
+libdb/docs/ref/upgrade.3.3/toc.html \
+libdb/docs/ref/upgrade.3.3/txn_prepare.html \
+libdb/docs/ref/upgrade.4.0/asr.html \
+libdb/docs/ref/upgrade.4.0/cxx.html \
+libdb/docs/ref/upgrade.4.0/deadlock.html \
+libdb/docs/ref/upgrade.4.0/disk.html \
+libdb/docs/ref/upgrade.4.0/env.html \
+libdb/docs/ref/upgrade.4.0/intro.html \
+libdb/docs/ref/upgrade.4.0/java.html \
+libdb/docs/ref/upgrade.4.0/lock.html \
+libdb/docs/ref/upgrade.4.0/lock_id_free.html \
+libdb/docs/ref/upgrade.4.0/log.html \
+libdb/docs/ref/upgrade.4.0/mp.html \
+libdb/docs/ref/upgrade.4.0/rpc.html \
+libdb/docs/ref/upgrade.4.0/set_lk_max.html \
+libdb/docs/ref/upgrade.4.0/toc.html \
+libdb/docs/ref/upgrade.4.0/txn.html \
+libdb/docs/ref/upgrade.4.1/app_dispatch.html \
+libdb/docs/ref/upgrade.4.1/checkpoint.html \
+libdb/docs/ref/upgrade.4.1/cxx.html \
+libdb/docs/ref/upgrade.4.1/disk.html \
+libdb/docs/ref/upgrade.4.1/excl.html \
+libdb/docs/ref/upgrade.4.1/fop.html \
+libdb/docs/ref/upgrade.4.1/hash_nelem.html \
+libdb/docs/ref/upgrade.4.1/incomplete.html \
+libdb/docs/ref/upgrade.4.1/intro.html \
+libdb/docs/ref/upgrade.4.1/java.html \
+libdb/docs/ref/upgrade.4.1/log_register.html \
+libdb/docs/ref/upgrade.4.1/log_stat.html \
+libdb/docs/ref/upgrade.4.1/memp_sync.html \
+libdb/docs/ref/upgrade.4.1/toc.html \
+libdb/docs/ref/xa/build.html \
+libdb/docs/ref/xa/intro.html \
+libdb/docs/ref/xa/faq.html \
+libdb/docs/ref/xa/xa_config.html \
+libdb/docs/ref/xa/xa_intro.html \
+libdb/docs/index.html \
+libdb/docs/reftoc.html \
+libdb/docs/images/api.gif \
+libdb/docs/images/next.gif \
+libdb/docs/images/prev.gif \
+libdb/docs/images/ps.gif \
+libdb/docs/images/ref.gif \
+libdb/docs/images/sleepycat.gif \
+libdb/docs/sleepycat/contact.html \
+libdb/docs/sleepycat/legal.html \
+libdb/docs/sleepycat/license.html \
+libdb/docs/api_c/db_associate.html \
+libdb/docs/api_c/pindex.src \
+libdb/docs/api_c/db_class.html \
+libdb/docs/api_c/db_close.html \
+libdb/docs/api_c/db_cursor.html \
+libdb/docs/api_c/db_del.html \
+libdb/docs/api_c/db_err.html \
+libdb/docs/api_c/db_fd.html \
+libdb/docs/api_c/db_get.html \
+libdb/docs/api_c/db_get_byteswapped.html \
+libdb/docs/api_c/db_get_type.html \
+libdb/docs/api_c/db_join.html \
+libdb/docs/api_c/db_key_range.html \
+libdb/docs/api_c/db_list.html \
+libdb/docs/api_c/db_open.html \
+libdb/docs/api_c/db_put.html \
+libdb/docs/api_c/db_remove.html \
+libdb/docs/api_c/db_rename.html \
+libdb/docs/api_c/db_set_append_recno.html \
+libdb/docs/api_c/db_stat.html \
+libdb/docs/api_c/db_set_bt_compare.html \
+libdb/docs/api_c/db_set_bt_minkey.html \
+libdb/docs/api_c/db_set_bt_prefix.html \
+libdb/docs/api_c/db_set_cache_priority.html \
+libdb/docs/api_c/db_set_cachesize.html \
+libdb/docs/api_c/db_set_dup_compare.html \
+libdb/docs/api_c/db_set_encrypt.html \
+libdb/docs/api_c/db_set_errcall.html \
+libdb/docs/api_c/db_set_errpfx.html \
+libdb/docs/api_c/db_set_feedback.html \
+libdb/docs/api_c/db_set_flags.html \
+libdb/docs/api_c/db_set_h_ffactor.html \
+libdb/docs/api_c/db_set_h_hash.html \
+libdb/docs/api_c/db_set_h_nelem.html \
+libdb/docs/api_c/db_set_lorder.html \
+libdb/docs/api_c/db_set_pagesize.html \
+libdb/docs/api_c/db_sync.html \
+libdb/docs/api_c/db_set_q_extentsize.html \
+libdb/docs/api_c/db_set_re_delim.html \
+libdb/docs/api_c/db_set_re_len.html \
+libdb/docs/api_c/db_set_re_pad.html \
+libdb/docs/api_c/db_set_re_source.html \
+libdb/docs/api_c/db_truncate.html \
+libdb/docs/api_c/db_upgrade.html \
+libdb/docs/api_c/db_verify.html \
+libdb/docs/api_c/dbt_class.html \
+libdb/docs/api_c/db_create.html \
+libdb/docs/api_c/db_set_alloc.html \
+libdb/docs/api_c/db_set_errfile.html \
+libdb/docs/api_c/db_set_paniccall.html \
+libdb/docs/api_c/dbt_bulk.html \
+libdb/docs/api_c/dbc_class.html \
+libdb/docs/api_c/dbc_close.html \
+libdb/docs/api_c/dbc_count.html \
+libdb/docs/api_c/dbc_del.html \
+libdb/docs/api_c/dbc_dup.html \
+libdb/docs/api_c/dbc_get.html \
+libdb/docs/api_c/dbc_list.html \
+libdb/docs/api_c/dbc_put.html \
+libdb/docs/api_c/c_index.html \
+libdb/docs/api_c/env_class.html \
+libdb/docs/api_c/env_close.html \
+libdb/docs/api_c/env_dbremove.html \
+libdb/docs/api_c/env_dbrename.html \
+libdb/docs/api_c/env_err.html \
+libdb/docs/api_c/env_list.html \
+libdb/docs/api_c/env_open.html \
+libdb/docs/api_c/env_remove.html \
+libdb/docs/api_c/env_set_app_dispatch.html \
+libdb/docs/api_c/env_set_cachesize.html \
+libdb/docs/api_c/env_set_data_dir.html \
+libdb/docs/api_c/env_set_encrypt.html \
+libdb/docs/api_c/env_set_errcall.html \
+libdb/docs/api_c/env_set_errpfx.html \
+libdb/docs/api_c/dbm.html \
+libdb/docs/api_c/env_set_feedback.html \
+libdb/docs/api_c/env_set_flags.html \
+libdb/docs/api_c/env_set_lg_bsize.html \
+libdb/docs/api_c/env_set_lg_dir.html \
+libdb/docs/api_c/env_set_lg_max.html \
+libdb/docs/api_c/env_set_lg_regionmax.html \
+libdb/docs/api_c/env_set_lk_conflicts.html \
+libdb/docs/api_c/env_set_lk_detect.html \
+libdb/docs/api_c/env_set_lk_max_lockers.html \
+libdb/docs/api_c/env_set_lk_max_locks.html \
+libdb/docs/api_c/env_set_lk_max_objects.html \
+libdb/docs/api_c/env_set_mp_mmapsize.html \
+libdb/docs/api_c/env_set_rpc_server.html \
+libdb/docs/api_c/env_set_shm_key.html \
+libdb/docs/api_c/env_set_tas_spins.html \
+libdb/docs/api_c/txn_id.html \
+libdb/docs/api_c/env_set_timeout.html \
+libdb/docs/api_c/env_set_tmp_dir.html \
+libdb/docs/api_c/env_set_tx_max.html \
+libdb/docs/api_c/env_set_tx_timestamp.html \
+libdb/docs/api_c/env_set_verbose.html \
+libdb/docs/api_c/env_strerror.html \
+libdb/docs/api_c/env_version.html \
+libdb/docs/api_c/env_create.html \
+libdb/docs/api_c/env_set_alloc.html \
+libdb/docs/api_c/env_set_errfile.html \
+libdb/docs/api_c/env_set_paniccall.html \
+libdb/docs/api_c/hsearch.html \
+libdb/docs/api_c/lock_class.html \
+libdb/docs/api_c/lock_detect.html \
+libdb/docs/api_c/lock_get.html \
+libdb/docs/api_c/lock_id.html \
+libdb/docs/api_c/lock_id_free.html \
+libdb/docs/api_c/lock_list.html \
+libdb/docs/api_c/lock_put.html \
+libdb/docs/api_c/lock_stat.html \
+libdb/docs/api_c/lock_vec.html \
+libdb/docs/api_c/log_archive.html \
+libdb/docs/api_c/log_compare.html \
+libdb/docs/api_c/log_cursor.html \
+libdb/docs/api_c/log_file.html \
+libdb/docs/api_c/log_flush.html \
+libdb/docs/api_c/log_list.html \
+libdb/docs/api_c/log_put.html \
+libdb/docs/api_c/log_stat.html \
+libdb/docs/api_c/logc_class.html \
+libdb/docs/api_c/logc_close.html \
+libdb/docs/api_c/logc_get.html \
+libdb/docs/api_c/lsn_class.html \
+libdb/docs/api_c/memp_fclose.html \
+libdb/docs/api_c/memp_fopen.html \
+libdb/docs/api_c/memp_fsync.html \
+libdb/docs/api_c/memp_list.html \
+libdb/docs/api_c/memp_register.html \
+libdb/docs/api_c/memp_stat.html \
+libdb/docs/api_c/memp_sync.html \
+libdb/docs/api_c/memp_trickle.html \
+libdb/docs/api_c/mempfile_class.html \
+libdb/docs/api_c/memp_fcreate.html \
+libdb/docs/api_c/memp_fget.html \
+libdb/docs/api_c/memp_fput.html \
+libdb/docs/api_c/memp_fset.html \
+libdb/docs/api_c/memp_set_clear_len.html \
+libdb/docs/api_c/memp_set_fileid.html \
+libdb/docs/api_c/memp_set_ftype.html \
+libdb/docs/api_c/memp_set_lsn_offset.html \
+libdb/docs/api_c/memp_set_pgcookie.html \
+libdb/docs/api_c/rep_elect.html \
+libdb/docs/api_c/rep_limit.html \
+libdb/docs/api_c/rep_list.html \
+libdb/docs/api_c/rep_message.html \
+libdb/docs/api_c/rep_start.html \
+libdb/docs/api_c/rep_stat.html \
+libdb/docs/api_c/rep_transport.html \
+libdb/docs/api_c/set_func_close.html \
+libdb/docs/api_c/set_func_dirfree.html \
+libdb/docs/api_c/set_func_dirlist.html \
+libdb/docs/api_c/set_func_exists.html \
+libdb/docs/api_c/set_func_free.html \
+libdb/docs/api_c/set_func_fsync.html \
+libdb/docs/api_c/set_func_ioinfo.html \
+libdb/docs/api_c/set_func_malloc.html \
+libdb/docs/api_c/set_func_map.html \
+libdb/docs/api_c/set_func_open.html \
+libdb/docs/api_c/set_func_read.html \
+libdb/docs/api_c/set_func_realloc.html \
+libdb/docs/api_c/set_func_rename.html \
+libdb/docs/api_c/set_func_seek.html \
+libdb/docs/api_c/set_func_sleep.html \
+libdb/docs/api_c/set_func_unlink.html \
+libdb/docs/api_c/set_func_unmap.html \
+libdb/docs/api_c/set_func_write.html \
+libdb/docs/api_c/set_func_yield.html \
+libdb/docs/api_c/txn_abort.html \
+libdb/docs/api_c/txn_begin.html \
+libdb/docs/api_c/txn_checkpoint.html \
+libdb/docs/api_c/txn_class.html \
+libdb/docs/api_c/txn_commit.html \
+libdb/docs/api_c/txn_discard.html \
+libdb/docs/api_c/txn_list.html \
+libdb/docs/api_c/txn_prepare.html \
+libdb/docs/api_c/txn_recover.html \
+libdb/docs/api_c/txn_set_timeout.html \
+libdb/docs/api_c/txn_stat.html \
+libdb/docs/api_c/c_pindex.html \
+libdb/docs/api_cxx/db_associate.html \
+libdb/docs/api_cxx/pindex.src \
+libdb/docs/api_cxx/db_class.html \
+libdb/docs/api_cxx/db_close.html \
+libdb/docs/api_cxx/db_cursor.html \
+libdb/docs/api_cxx/db_del.html \
+libdb/docs/api_cxx/db_err.html \
+libdb/docs/api_cxx/db_fd.html \
+libdb/docs/api_cxx/db_get.html \
+libdb/docs/api_cxx/db_get_byteswapped.html \
+libdb/docs/api_cxx/db_get_type.html \
+libdb/docs/api_cxx/db_join.html \
+libdb/docs/api_cxx/db_key_range.html \
+libdb/docs/api_cxx/db_list.html \
+libdb/docs/api_cxx/db_open.html \
+libdb/docs/api_cxx/db_put.html \
+libdb/docs/api_cxx/db_remove.html \
+libdb/docs/api_cxx/db_rename.html \
+libdb/docs/api_cxx/db_set_append_recno.html \
+libdb/docs/api_cxx/db_stat.html \
+libdb/docs/api_cxx/db_set_bt_compare.html \
+libdb/docs/api_cxx/db_set_bt_minkey.html \
+libdb/docs/api_cxx/db_set_bt_prefix.html \
+libdb/docs/api_cxx/db_set_cache_priority.html \
+libdb/docs/api_cxx/db_set_cachesize.html \
+libdb/docs/api_cxx/db_set_dup_compare.html \
+libdb/docs/api_cxx/db_set_encrypt.html \
+libdb/docs/api_cxx/db_set_errcall.html \
+libdb/docs/api_cxx/db_set_errpfx.html \
+libdb/docs/api_cxx/db_set_feedback.html \
+libdb/docs/api_cxx/db_set_flags.html \
+libdb/docs/api_cxx/db_set_h_ffactor.html \
+libdb/docs/api_cxx/db_set_h_hash.html \
+libdb/docs/api_cxx/db_set_h_nelem.html \
+libdb/docs/api_cxx/db_set_lorder.html \
+libdb/docs/api_cxx/db_set_pagesize.html \
+libdb/docs/api_cxx/db_sync.html \
+libdb/docs/api_cxx/db_set_q_extentsize.html \
+libdb/docs/api_cxx/db_set_re_delim.html \
+libdb/docs/api_cxx/db_set_re_len.html \
+libdb/docs/api_cxx/db_set_re_pad.html \
+libdb/docs/api_cxx/db_set_re_source.html \
+libdb/docs/api_cxx/db_truncate.html \
+libdb/docs/api_cxx/db_upgrade.html \
+libdb/docs/api_cxx/db_verify.html \
+libdb/docs/api_cxx/dbt_class.html \
+libdb/docs/api_cxx/db_set_alloc.html \
+libdb/docs/api_cxx/db_set_errfile.html \
+libdb/docs/api_cxx/db_set_error_stream.html \
+libdb/docs/api_cxx/db_set_paniccall.html \
+libdb/docs/api_cxx/dbt_bulk.html \
+libdb/docs/api_cxx/dbc_class.html \
+libdb/docs/api_cxx/dbc_close.html \
+libdb/docs/api_cxx/dbc_count.html \
+libdb/docs/api_cxx/dbc_del.html \
+libdb/docs/api_cxx/dbc_dup.html \
+libdb/docs/api_cxx/dbc_get.html \
+libdb/docs/api_cxx/dbc_list.html \
+libdb/docs/api_cxx/dbc_put.html \
+libdb/docs/api_cxx/except_class.html \
+libdb/docs/api_cxx/runrec_class.html \
+libdb/docs/api_cxx/c_index.html \
+libdb/docs/api_cxx/env_class.html \
+libdb/docs/api_cxx/env_close.html \
+libdb/docs/api_cxx/env_dbremove.html \
+libdb/docs/api_cxx/env_dbrename.html \
+libdb/docs/api_cxx/env_err.html \
+libdb/docs/api_cxx/env_list.html \
+libdb/docs/api_cxx/env_open.html \
+libdb/docs/api_cxx/env_remove.html \
+libdb/docs/api_cxx/env_set_app_dispatch.html \
+libdb/docs/api_cxx/env_set_cachesize.html \
+libdb/docs/api_cxx/env_set_data_dir.html \
+libdb/docs/api_cxx/env_set_encrypt.html \
+libdb/docs/api_cxx/env_set_errcall.html \
+libdb/docs/api_cxx/env_set_errpfx.html \
+libdb/docs/api_cxx/env_set_feedback.html \
+libdb/docs/api_cxx/env_set_flags.html \
+libdb/docs/api_cxx/env_set_lg_bsize.html \
+libdb/docs/api_cxx/env_set_lg_dir.html \
+libdb/docs/api_cxx/env_set_lg_max.html \
+libdb/docs/api_cxx/env_set_lg_regionmax.html \
+libdb/docs/api_cxx/env_set_lk_conflicts.html \
+libdb/docs/api_cxx/env_set_lk_detect.html \
+libdb/docs/api_cxx/env_set_lk_max_lockers.html \
+libdb/docs/api_cxx/env_set_lk_max_locks.html \
+libdb/docs/api_cxx/env_set_lk_max_objects.html \
+libdb/docs/api_cxx/env_set_mp_mmapsize.html \
+libdb/docs/api_cxx/lock_class.html \
+libdb/docs/api_cxx/env_set_rpc_server.html \
+libdb/docs/api_cxx/env_set_shm_key.html \
+libdb/docs/api_cxx/env_set_tas_spins.html \
+libdb/docs/api_cxx/env_set_timeout.html \
+libdb/docs/api_cxx/env_set_tmp_dir.html \
+libdb/docs/api_cxx/env_set_tx_max.html \
+libdb/docs/api_cxx/env_set_tx_timestamp.html \
+libdb/docs/api_cxx/env_set_verbose.html \
+libdb/docs/api_cxx/env_strerror.html \
+libdb/docs/api_cxx/env_version.html \
+libdb/docs/api_cxx/env_set_errfile.html \
+libdb/docs/api_cxx/env_set_paniccall.html \
+libdb/docs/api_cxx/env_set_alloc.html \
+libdb/docs/api_cxx/env_set_error_stream.html \
+libdb/docs/api_cxx/lock_detect.html \
+libdb/docs/api_cxx/lock_get.html \
+libdb/docs/api_cxx/txn_id.html \
+libdb/docs/api_cxx/lock_id.html \
+libdb/docs/api_cxx/lock_id_free.html \
+libdb/docs/api_cxx/lock_list.html \
+libdb/docs/api_cxx/lock_put.html \
+libdb/docs/api_cxx/lock_stat.html \
+libdb/docs/api_cxx/lock_vec.html \
+libdb/docs/api_cxx/deadlock_class.html \
+libdb/docs/api_cxx/lockng_class.html \
+libdb/docs/api_cxx/log_archive.html \
+libdb/docs/api_cxx/log_compare.html \
+libdb/docs/api_cxx/log_cursor.html \
+libdb/docs/api_cxx/log_file.html \
+libdb/docs/api_cxx/log_flush.html \
+libdb/docs/api_cxx/log_list.html \
+libdb/docs/api_cxx/log_put.html \
+libdb/docs/api_cxx/log_stat.html \
+libdb/docs/api_cxx/logc_class.html \
+libdb/docs/api_cxx/logc_close.html \
+libdb/docs/api_cxx/logc_get.html \
+libdb/docs/api_cxx/lsn_class.html \
+libdb/docs/api_cxx/memp_fclose.html \
+libdb/docs/api_cxx/memp_fopen.html \
+libdb/docs/api_cxx/memp_fsync.html \
+libdb/docs/api_cxx/memp_list.html \
+libdb/docs/api_cxx/memp_register.html \
+libdb/docs/api_cxx/memp_stat.html \
+libdb/docs/api_cxx/memp_sync.html \
+libdb/docs/api_cxx/memp_trickle.html \
+libdb/docs/api_cxx/mempfile_class.html \
+libdb/docs/api_cxx/memp_fcreate.html \
+libdb/docs/api_cxx/memp_fget.html \
+libdb/docs/api_cxx/memp_fput.html \
+libdb/docs/api_cxx/memp_fset.html \
+libdb/docs/api_cxx/memp_set_clear_len.html \
+libdb/docs/api_cxx/memp_set_fileid.html \
+libdb/docs/api_cxx/memp_set_ftype.html \
+libdb/docs/api_cxx/memp_set_lsn_offset.html \
+libdb/docs/api_cxx/memp_set_pgcookie.html \
+libdb/docs/api_cxx/memp_class.html \
+libdb/docs/api_cxx/rep_elect.html \
+libdb/docs/api_cxx/rep_limit.html \
+libdb/docs/api_cxx/rep_list.html \
+libdb/docs/api_cxx/rep_message.html \
+libdb/docs/api_cxx/rep_start.html \
+libdb/docs/api_cxx/rep_stat.html \
+libdb/docs/api_cxx/rep_transport.html \
+libdb/docs/api_cxx/txn_abort.html \
+libdb/docs/api_cxx/txn_begin.html \
+libdb/docs/api_cxx/txn_checkpoint.html \
+libdb/docs/api_cxx/txn_class.html \
+libdb/docs/api_cxx/txn_commit.html \
+libdb/docs/api_cxx/txn_discard.html \
+libdb/docs/api_cxx/txn_list.html \
+libdb/docs/api_cxx/txn_prepare.html \
+libdb/docs/api_cxx/txn_recover.html \
+libdb/docs/api_cxx/txn_set_timeout.html \
+libdb/docs/api_cxx/txn_stat.html \
+libdb/docs/api_cxx/cxx_pindex.html \
+libdb/docs/api_java/db_associate.html \
+libdb/docs/api_java/pindex.src \
+libdb/docs/api_java/db_class.html \
+libdb/docs/api_java/db_close.html \
+libdb/docs/api_java/db_cursor.html \
+libdb/docs/api_java/db_del.html \
+libdb/docs/api_java/db_err.html \
+libdb/docs/api_java/db_fd.html \
+libdb/docs/api_java/db_get.html \
+libdb/docs/api_java/db_get_byteswapped.html \
+libdb/docs/api_java/db_get_type.html \
+libdb/docs/api_java/db_join.html \
+libdb/docs/api_java/db_key_range.html \
+libdb/docs/api_java/db_list.html \
+libdb/docs/api_java/db_open.html \
+libdb/docs/api_java/db_put.html \
+libdb/docs/api_java/db_remove.html \
+libdb/docs/api_java/db_rename.html \
+libdb/docs/api_java/db_set_append_recno.html \
+libdb/docs/api_java/db_stat.html \
+libdb/docs/api_java/db_set_bt_compare.html \
+libdb/docs/api_java/db_set_bt_minkey.html \
+libdb/docs/api_java/db_set_bt_prefix.html \
+libdb/docs/api_java/db_set_cache_priority.html \
+libdb/docs/api_java/db_set_cachesize.html \
+libdb/docs/api_java/db_set_dup_compare.html \
+libdb/docs/api_java/db_set_encrypt.html \
+libdb/docs/api_java/db_set_errcall.html \
+libdb/docs/api_java/db_set_errpfx.html \
+libdb/docs/api_java/db_set_feedback.html \
+libdb/docs/api_java/db_set_flags.html \
+libdb/docs/api_java/db_set_h_ffactor.html \
+libdb/docs/api_java/db_set_h_hash.html \
+libdb/docs/api_java/db_set_h_nelem.html \
+libdb/docs/api_java/db_set_lorder.html \
+libdb/docs/api_java/db_set_pagesize.html \
+libdb/docs/api_java/db_sync.html \
+libdb/docs/api_java/db_set_q_extentsize.html \
+libdb/docs/api_java/db_set_re_delim.html \
+libdb/docs/api_java/db_set_re_len.html \
+libdb/docs/api_java/db_set_re_pad.html \
+libdb/docs/api_java/db_set_re_source.html \
+libdb/docs/api_java/db_truncate.html \
+libdb/docs/api_java/db_upgrade.html \
+libdb/docs/api_java/db_verify.html \
+libdb/docs/api_java/dbt_class.html \
+libdb/docs/api_java/db_set_error_stream.html \
+libdb/docs/api_java/dbt_bulk_class.html \
+libdb/docs/api_java/dbc_class.html \
+libdb/docs/api_java/dbc_close.html \
+libdb/docs/api_java/dbc_count.html \
+libdb/docs/api_java/dbc_del.html \
+libdb/docs/api_java/dbc_dup.html \
+libdb/docs/api_java/dbc_get.html \
+libdb/docs/api_java/dbc_list.html \
+libdb/docs/api_java/dbc_put.html \
+libdb/docs/api_java/except_class.html \
+libdb/docs/api_java/runrec_class.html \
+libdb/docs/api_java/c_index.html \
+libdb/docs/api_java/env_class.html \
+libdb/docs/api_java/env_close.html \
+libdb/docs/api_java/env_dbremove.html \
+libdb/docs/api_java/env_dbrename.html \
+libdb/docs/api_java/env_err.html \
+libdb/docs/api_java/env_list.html \
+libdb/docs/api_java/env_open.html \
+libdb/docs/api_java/env_remove.html \
+libdb/docs/api_java/env_set_app_dispatch.html \
+libdb/docs/api_java/env_set_cachesize.html \
+libdb/docs/api_java/env_set_data_dir.html \
+libdb/docs/api_java/env_set_encrypt.html \
+libdb/docs/api_java/env_set_errcall.html \
+libdb/docs/api_java/env_set_errpfx.html \
+libdb/docs/api_java/env_set_feedback.html \
+libdb/docs/api_java/env_set_flags.html \
+libdb/docs/api_java/env_set_lg_bsize.html \
+libdb/docs/api_java/env_set_lg_dir.html \
+libdb/docs/api_java/env_set_lg_max.html \
+libdb/docs/api_java/env_set_lg_regionmax.html \
+libdb/docs/api_java/env_set_lk_conflicts.html \
+libdb/docs/api_java/env_set_lk_detect.html \
+libdb/docs/api_java/env_set_lk_max_lockers.html \
+libdb/docs/api_java/env_set_lk_max_locks.html \
+libdb/docs/api_java/env_set_lk_max_objects.html \
+libdb/docs/api_java/env_set_mp_mmapsize.html \
+libdb/docs/api_java/env_set_rpc_server.html \
+libdb/docs/api_java/env_set_shm_key.html \
+libdb/docs/api_java/env_set_tas_spins.html \
+libdb/docs/api_java/txn_id.html \
+libdb/docs/api_java/env_set_timeout.html \
+libdb/docs/api_java/env_set_tmp_dir.html \
+libdb/docs/api_java/env_set_tx_max.html \
+libdb/docs/api_java/env_set_tx_timestamp.html \
+libdb/docs/api_java/env_set_verbose.html \
+libdb/docs/api_java/env_strerror.html \
+libdb/docs/api_java/env_version.html \
+libdb/docs/api_java/env_set_error_stream.html \
+libdb/docs/api_java/lock_class.html \
+libdb/docs/api_java/lock_detect.html \
+libdb/docs/api_java/lock_get.html \
+libdb/docs/api_java/lock_id.html \
+libdb/docs/api_java/lock_id_free.html \
+libdb/docs/api_java/lock_list.html \
+libdb/docs/api_java/lock_put.html \
+libdb/docs/api_java/lock_stat.html \
+libdb/docs/api_java/lock_vec.html \
+libdb/docs/api_java/deadlock_class.html \
+libdb/docs/api_java/lockng_class.html \
+libdb/docs/api_java/log_archive.html \
+libdb/docs/api_java/log_compare.html \
+libdb/docs/api_java/log_cursor.html \
+libdb/docs/api_java/log_file.html \
+libdb/docs/api_java/log_flush.html \
+libdb/docs/api_java/log_list.html \
+libdb/docs/api_java/log_put.html \
+libdb/docs/api_java/log_stat.html \
+libdb/docs/api_java/logc_class.html \
+libdb/docs/api_java/logc_close.html \
+libdb/docs/api_java/logc_get.html \
+libdb/docs/api_java/lsn_class.html \
+libdb/docs/api_java/memp_fclose.html \
+libdb/docs/api_java/memp_fopen.html \
+libdb/docs/api_java/memp_fsync.html \
+libdb/docs/api_java/memp_list.html \
+libdb/docs/api_java/memp_register.html \
+libdb/docs/api_java/memp_stat.html \
+libdb/docs/api_java/memp_sync.html \
+libdb/docs/api_java/memp_trickle.html \
+libdb/docs/api_java/mempfile_class.html \
+libdb/docs/api_java/memp_class.html \
+libdb/docs/api_java/rep_elect.html \
+libdb/docs/api_java/rep_limit.html \
+libdb/docs/api_java/rep_list.html \
+libdb/docs/api_java/rep_message.html \
+libdb/docs/api_java/rep_start.html \
+libdb/docs/api_java/rep_stat.html \
+libdb/docs/api_java/rep_transport.html \
+libdb/docs/api_java/txn_abort.html \
+libdb/docs/api_java/txn_begin.html \
+libdb/docs/api_java/txn_checkpoint.html \
+libdb/docs/api_java/txn_class.html \
+libdb/docs/api_java/txn_commit.html \
+libdb/docs/api_java/txn_discard.html \
+libdb/docs/api_java/txn_list.html \
+libdb/docs/api_java/txn_prepare.html \
+libdb/docs/api_java/txn_recover.html \
+libdb/docs/api_java/txn_stat.html \
+libdb/docs/api_java/java_pindex.html \
+libdb/docs/api_java/txn_set_timeout.html \
+libdb/docs/api_tcl/db_close.html \
+libdb/docs/api_tcl/pindex.src \
+libdb/docs/api_tcl/db_count.html \
+libdb/docs/api_tcl/db_cursor.html \
+libdb/docs/api_tcl/db_del.html \
+libdb/docs/api_tcl/db_get.html \
+libdb/docs/api_tcl/db_get_join.html \
+libdb/docs/api_tcl/db_get_type.html \
+libdb/docs/api_tcl/db_is_byteswapped.html \
+libdb/docs/api_tcl/db_join.html \
+libdb/docs/api_tcl/db_open.html \
+libdb/docs/api_tcl/db_put.html \
+libdb/docs/api_tcl/db_remove.html \
+libdb/docs/api_tcl/db_rename.html \
+libdb/docs/api_tcl/db_stat.html \
+libdb/docs/api_tcl/db_sync.html \
+libdb/docs/api_tcl/db_truncate.html \
+libdb/docs/api_tcl/dbc_close.html \
+libdb/docs/api_tcl/dbc_del.html \
+libdb/docs/api_tcl/dbc_dup.html \
+libdb/docs/api_tcl/dbc_get.html \
+libdb/docs/api_tcl/dbc_put.html \
+libdb/docs/api_tcl/env_close.html \
+libdb/docs/api_tcl/env_dbremove.html \
+libdb/docs/api_tcl/env_dbrename.html \
+libdb/docs/api_tcl/env_open.html \
+libdb/docs/api_tcl/env_remove.html \
+libdb/docs/api_tcl/tcl_index.html \
+libdb/docs/api_tcl/txn.html \
+libdb/docs/api_tcl/txn_abort.html \
+libdb/docs/api_tcl/txn_commit.html \
+libdb/docs/api_tcl/version.html \
+libdb/docs/api_tcl/tcl_pindex.html \
+libdb/docs/utility/index.html \
+libdb/docs/utility/berkeley_db_svc.html \
+libdb/docs/utility/db_archive.html \
+libdb/docs/utility/db_checkpoint.html \
+libdb/docs/utility/db_deadlock.html \
+libdb/docs/utility/db_dump.html \
+libdb/docs/utility/db_load.html \
+libdb/docs/utility/db_printlog.html \
+libdb/docs/utility/db_recover.html \
+libdb/docs/utility/db_stat.html \
+libdb/docs/utility/db_upgrade.html \
+libdb/docs/utility/db_verify.html
+
MAINTAINERCLEANFILES = \
$(srcdir)/INSTALL \
$(srcdir)/aclocal.m4 \
diff --git a/configure.ac b/configure.ac
index f6e449c..ef78989 100644
--- a/configure.ac
+++ b/configure.ac
@@ -297,23 +297,37 @@ dnl ******************************
dnl libdb checking
dnl ******************************
AC_ARG_WITH([libdb],
- AS_HELP_STRING([--with-libdb=PREFIX],
- [Prefix where libdb is installed]),
- [libdb_prefix="$withval"], [libdb_prefix='${prefix}'])
-
-DB_CFLAGS="-I$libdb_prefix/include"
-DB_LIBS="-L$libdb_prefix/lib -ldb"
-
-AC_MSG_CHECKING([Berkeley DB])
-save_cflags=$CFLAGS; CFLAGS=$DB_CFLAGS
-save_libs=$LIBS; LIBS="$DB_LIBS"
-AC_LINK_IFELSE([AC_LANG_PROGRAM(
- [[#include <db.h>]],
- [[db_create(NULL, NULL, 0)]])],
- [AC_MSG_RESULT([yes])],
- [AC_MSG_ERROR([Cannot find libdb])])
-CFLAGS=$save_cflags
-LIBS=$save_libs
+ AS_HELP_STRING([--with-libdb],[Prefix where libdb is installed]),
+ [dynamic_libdb=yes], [dynamic_libdb=no])
+
+if test "x${dynamic_libdb}" = "xyes"; then
+ DB_CFLAGS="-I$withval/include"
+ DB_LIBS="-L$withval/lib -ldb"
+
+ AC_MSG_CHECKING([Berkeley DB])
+ save_cflags=$CFLAGS; CFLAGS=$DB_CFLAGS
+ save_libs=$LIBS; LIBS="$DB_LIBS"
+ AC_LINK_IFELSE([AC_LANG_PROGRAM(
+ [[#include <db.h>]],
+ [[db_create(NULL, NULL, 0)]])],
+ [AC_MSG_RESULT([yes])],
+ [AC_MSG_ERROR([cannot find libdb])])
+ CFLAGS=$save_cflags
+ LIBS=$save_libs
+ msg_libdb="dynamically linked"
+else
+ DB_CFLAGS="-I\$(top_builddir)/libdb/dist"
+ DB_LIBS="\$(top_builddir)/libdb/dist/libdb-4.1.la"
+ if test $os_win32 = yes; then
+ dnl Don't pointlessly auto-export the global symbols
+ dnl from the static libdb
+ DB_LIBS="$DB_LIBS -Wl,--exclude-libs=libdb-4.1.a"
+ fi
+ AC_MSG_NOTICE([Using local libdb])
+ msg_libdb="statically linked to local copy"
+fi
+AM_CONDITIONAL(WITH_LIBDB, [test "x$dynamic_libdb" != "xno"])
+
AC_SUBST(DB_CFLAGS)
AC_SUBST(DB_LIBS)
@@ -1430,6 +1444,10 @@ export privlibdir
export privincludedir
export privdatadir
+if test $dynamic_libdb = no; then
+ AC_CONFIG_SUBDIRS(libdb/dist)
+fi
+
AC_CONFIG_FILES([
Makefile
evolution-data-server-zip
@@ -1487,6 +1505,7 @@ camel/tests/smime/Makefile
camel/tests/stream/Makefile
camel/camel.pc
camel/camel-provider.pc
+libdb/Makefile
libebackend/Makefile
libebackend/libebackend.pc
libedataserver/Makefile
@@ -1519,6 +1538,13 @@ po/Makefile.in
])
AC_OUTPUT
+case $host in
+*-mingw*)
+ dnl Override the old libtool in libdb/dist with the one from here
+ cp libtool libdb/dist
+ ;;
+esac
+
echo "
evolution-data-server has been configured as follows:
Calendar: $msg_calendar
@@ -1531,6 +1557,7 @@ echo "
SSL support: $msg_ssl
SMIME support: $msg_smime
IPv6 support: $msg_ipv6
+ Berkeley DB; $msg_libdb
Dot Locking: $msg_dot
File Locking: $msg_file
Large files: $enable_largefile
diff --git a/libdb/LICENSE b/libdb/LICENSE
new file mode 100644
index 0000000..b5b5908
--- /dev/null
+++ b/libdb/LICENSE
@@ -0,0 +1,102 @@
+/*-
+ * $Id$
+ */
+
+The following is the license that applies to this copy of the Berkeley DB
+software. For a license to use the Berkeley DB software under conditions
+other than those described here, or to purchase support for this software,
+please contact Sleepycat Software by email at db@sleepycat.com, or on the
+Web at http://www.sleepycat.com.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+/*
+ * Copyright (c) 1990-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Redistributions in any form must be accompanied by information on
+ * how to obtain complete source code for the DB software and any
+ * accompanying software that uses the DB software. The source code
+ * must either be included in the distribution or be available for no
+ * more than the cost of distribution plus a nominal fee, and must be
+ * freely redistributable under reasonable conditions. For an
+ * executable file, complete source code means the source code for all
+ * modules it contains. It does not include source code for modules or
+ * files that typically accompany the major components of the operating
+ * system on which the executable file runs.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SLEEPYCAT SOFTWARE ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
+ * NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL SLEEPYCAT SOFTWARE
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY HARVARD AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL HARVARD OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
diff --git a/libdb/Makefile.am b/libdb/Makefile.am
new file mode 100644
index 0000000..af7d9bd
--- /dev/null
+++ b/libdb/Makefile.am
@@ -0,0 +1 @@
+SUBDIRS=dist
diff --git a/libdb/README b/libdb/README
new file mode 100644
index 0000000..7328538
--- /dev/null
+++ b/libdb/README
@@ -0,0 +1,5 @@
+Sleepycat Software: Berkeley DB 4.1.25: (December 19, 2002)
+
+This is version 4.1.25 of Berkeley DB from Sleepycat Software. To view
+the release and installation documentation, load the distribution file
+docs/index.html into your web browser.
diff --git a/libdb/btree/bt_compare.c b/libdb/btree/bt_compare.c
new file mode 100644
index 0000000..4530a4e
--- /dev/null
+++ b/libdb/btree/bt_compare.c
@@ -0,0 +1,211 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+
+/*
+ * __bam_cmp --
+ * Compare a key to a given record.
+ *
+ * PUBLIC: int __bam_cmp __P((DB *, const DBT *, PAGE *,
+ * PUBLIC: u_int32_t, int (*)(DB *, const DBT *, const DBT *), int *));
+ */
+int
+__bam_cmp(dbp, dbt, h, indx, func, cmpp)
+ DB *dbp;
+ const DBT *dbt;
+ PAGE *h;
+ u_int32_t indx;
+ int (*func)__P((DB *, const DBT *, const DBT *));
+ int *cmpp;
+{
+ BINTERNAL *bi;
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+ DBT pg_dbt;
+
+ /*
+ * Returns:
+ * < 0 if dbt is < page record
+ * = 0 if dbt is = page record
+ * > 0 if dbt is > page record
+ *
+ * !!!
+ * We do not clear the pg_dbt DBT even though it's likely to contain
+ * random bits. That should be okay, because the app's comparison
+ * routine had better not be looking at fields other than data/size.
+ * We don't clear it because we go through this path a lot and it's
+ * expensive.
+ */
+ switch (TYPE(h)) {
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ bk = GET_BKEYDATA(dbp, h, indx);
+ if (B_TYPE(bk->type) == B_OVERFLOW)
+ bo = (BOVERFLOW *)bk;
+ else {
+ pg_dbt.data = bk->data;
+ pg_dbt.size = bk->len;
+ *cmpp = func(dbp, dbt, &pg_dbt);
+ return (0);
+ }
+ break;
+ case P_IBTREE:
+ /*
+ * The following code guarantees that the left-most key on an
+ * internal page at any place in the tree sorts less than any
+ * user-specified key. The reason is that if we have reached
+ * this internal page, we know the user key must sort greater
+ * than the key we're storing for this page in any internal
+ * pages at levels above us in the tree. It then follows that
+ * any user-specified key cannot sort less than the first page
+ * which we reference, and so there's no reason to call the
+ * comparison routine. While this may save us a comparison
+ * routine call or two, the real reason for this is because
+ * we don't maintain a copy of the smallest key in the tree,
+ * so that we don't have to update all the levels of the tree
+ * should the application store a new smallest key. And, so,
+ * we may not have a key to compare, which makes doing the
+ * comparison difficult and error prone.
+ */
+ if (indx == 0) {
+ *cmpp = 1;
+ return (0);
+ }
+
+ bi = GET_BINTERNAL(dbp, h, indx);
+ if (B_TYPE(bi->type) == B_OVERFLOW)
+ bo = (BOVERFLOW *)(bi->data);
+ else {
+ pg_dbt.data = bi->data;
+ pg_dbt.size = bi->len;
+ *cmpp = func(dbp, dbt, &pg_dbt);
+ return (0);
+ }
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, PGNO(h)));
+ }
+
+ /*
+ * Overflow.
+ */
+ return (__db_moff(dbp, dbt,
+ bo->pgno, bo->tlen, func == __bam_defcmp ? NULL : func, cmpp));
+}
+
+/*
+ * __bam_defcmp --
+ * Default comparison routine.
+ *
+ * PUBLIC: int __bam_defcmp __P((DB *, const DBT *, const DBT *));
+ */
+int
+__bam_defcmp(dbp, a, b)
+ DB *dbp;
+ const DBT *a, *b;
+{
+ size_t len;
+ u_int8_t *p1, *p2;
+
+ COMPQUIET(dbp, NULL);
+
+ /*
+ * Returns:
+ * < 0 if a is < b
+ * = 0 if a is = b
+ * > 0 if a is > b
+ *
+ * XXX
+ * If a size_t doesn't fit into a long, or if the difference between
+ * any two characters doesn't fit into an int, this routine can lose.
+ * What we need is a signed integral type that's guaranteed to be at
+ * least as large as a size_t, and there is no such thing.
+ */
+ len = a->size > b->size ? b->size : a->size;
+ for (p1 = a->data, p2 = b->data; len--; ++p1, ++p2)
+ if (*p1 != *p2)
+ return ((long)*p1 - (long)*p2);
+ return ((long)a->size - (long)b->size);
+}
+
+/*
+ * __bam_defpfx --
+ * Default prefix routine.
+ *
+ * PUBLIC: size_t __bam_defpfx __P((DB *, const DBT *, const DBT *));
+ */
+size_t
+__bam_defpfx(dbp, a, b)
+ DB *dbp;
+ const DBT *a, *b;
+{
+ size_t cnt, len;
+ u_int8_t *p1, *p2;
+
+ COMPQUIET(dbp, NULL);
+
+ cnt = 1;
+ len = a->size > b->size ? b->size : a->size;
+ for (p1 = a->data, p2 = b->data; len--; ++p1, ++p2, ++cnt)
+ if (*p1 != *p2)
+ return (cnt);
+
+ /*
+ * We know that a->size must be <= b->size, or they wouldn't be
+ * in this order.
+ */
+ return (a->size < b->size ? a->size + 1 : a->size);
+}
diff --git a/libdb/btree/bt_conv.c b/libdb/btree/bt_conv.c
new file mode 100644
index 0000000..a928973
--- /dev/null
+++ b/libdb/btree/bt_conv.c
@@ -0,0 +1,102 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/btree.h"
+
+/*
+ * __bam_pgin --
+ * Convert host-specific page layout from the host-independent format
+ * stored on disk.
+ *
+ * PUBLIC: int __bam_pgin __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
+ */
+int
+__bam_pgin(dbenv, dummydbp, pg, pp, cookie)
+ DB_ENV *dbenv;
+ DB *dummydbp;
+ db_pgno_t pg;
+ void *pp;
+ DBT *cookie;
+{
+ DB_PGINFO *pginfo;
+ PAGE *h;
+
+ pginfo = (DB_PGINFO *)cookie->data;
+ if (!F_ISSET(pginfo, DB_AM_SWAP))
+ return (0);
+
+ h = pp;
+ return (TYPE(h) == P_BTREEMETA ? __bam_mswap(pp) :
+ __db_byteswap(dbenv, dummydbp, pg, pp, pginfo->db_pagesize, 1));
+}
+
+/*
+ * __bam_pgout --
+ * Convert host-specific page layout to the host-independent format
+ * stored on disk.
+ *
+ * PUBLIC: int __bam_pgout __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
+ */
+int
+__bam_pgout(dbenv, dummydbp, pg, pp, cookie)
+ DB_ENV *dbenv;
+ DB *dummydbp;
+ db_pgno_t pg;
+ void *pp;
+ DBT *cookie;
+{
+ DB_PGINFO *pginfo;
+ PAGE *h;
+
+ pginfo = (DB_PGINFO *)cookie->data;
+ if (!F_ISSET(pginfo, DB_AM_SWAP))
+ return (0);
+
+ h = pp;
+ return (TYPE(h) == P_BTREEMETA ? __bam_mswap(pp) :
+ __db_byteswap(dbenv, dummydbp, pg, pp, pginfo->db_pagesize, 0));
+}
+
+/*
+ * __bam_mswap --
+ * Swap the bytes on the btree metadata page.
+ *
+ * PUBLIC: int __bam_mswap __P((PAGE *));
+ */
+int
+__bam_mswap(pg)
+ PAGE *pg;
+{
+ u_int8_t *p;
+
+ __db_metaswap(pg);
+
+ p = (u_int8_t *)pg + sizeof(DBMETA);
+
+ SWAP32(p); /* maxkey */
+ SWAP32(p); /* minkey */
+ SWAP32(p); /* re_len */
+ SWAP32(p); /* re_pad */
+ SWAP32(p); /* root */
+ p += 92 * sizeof(u_int32_t); /* unused */
+ SWAP32(p); /* crypto_magic */
+
+ return (0);
+}
diff --git a/libdb/btree/bt_curadj.c b/libdb/btree/bt_curadj.c
new file mode 100644
index 0000000..413b686
--- /dev/null
+++ b/libdb/btree/bt_curadj.c
@@ -0,0 +1,582 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+
+static int __bam_opd_cursor __P((DB *, DBC *, db_pgno_t, u_int32_t, u_int32_t));
+
+#ifdef DEBUG
+/*
+ * __bam_cprint --
+ * Display the current internal cursor.
+ *
+ * PUBLIC: void __bam_cprint __P((DBC *));
+ */
+void
+__bam_cprint(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ fprintf(stderr, "\tinternal: ovflsize: %lu", (u_long)cp->ovflsize);
+ if (dbc->dbtype == DB_RECNO)
+ fprintf(stderr, " recno: %lu", (u_long)cp->recno);
+ if (F_ISSET(cp, C_DELETED))
+ fprintf(stderr, " (deleted)");
+ fprintf(stderr, "\n");
+}
+#endif
+
+/*
+ * Cursor adjustments are logged if they are for subtransactions. This is
+ * because it's possible for a subtransaction to adjust cursors which will
+ * still be active after the subtransaction aborts, and so which must be
+ * restored to their previous locations. Cursors that can be both affected
+ * by our cursor adjustments and active after our transaction aborts can
+ * only be found in our parent transaction -- cursors in other transactions,
+ * including other child transactions of our parent, must have conflicting
+ * locker IDs, and so cannot be affected by adjustments in this transaction.
+ */
+
+/*
+ * __bam_ca_delete --
+ * Update the cursors when items are deleted and when already deleted
+ * items are overwritten. Return the number of relevant cursors found.
+ *
+ * PUBLIC: int __bam_ca_delete __P((DB *, db_pgno_t, u_int32_t, int));
+ */
+int
+__bam_ca_delete(dbp, pgno, indx, delete)
+ DB *dbp;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ int delete;
+{
+ BTREE_CURSOR *cp;
+ DB *ldbp;
+ DB_ENV *dbenv;
+ DBC *dbc;
+ int count; /* !!!: Has to contain max number of cursors. */
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * Adjust the cursors. We have the page write locked, so the
+ * only other cursors that can be pointing at a page are
+ * those in the same thread of control. Unfortunately, we don't
+ * know that they're using the same DB handle, so traverse
+ * all matching DB handles in the same DB_ENV, then all cursors
+ * on each matching DB handle.
+ *
+ * Each cursor is single-threaded, so we only need to lock the
+ * list of DBs and then the list of cursors in each DB.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (count = 0, ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ cp = (BTREE_CURSOR *)dbc->internal;
+ if (cp->pgno == pgno && cp->indx == indx) {
+ if (delete)
+ F_SET(cp, C_DELETED);
+ else
+ F_CLR(cp, C_DELETED);
+ ++count;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ return (count);
+}
+
+/*
+ * __ram_ca_delete --
+ * Return the number of relevant cursors.
+ *
+ * PUBLIC: int __ram_ca_delete __P((DB *, db_pgno_t));
+ */
+int
+__ram_ca_delete(dbp, root_pgno)
+ DB *dbp;
+ db_pgno_t root_pgno;
+{
+ DB *ldbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ int found;
+
+ found = 0;
+ dbenv = dbp->dbenv;
+
+ /*
+ * Review the cursors. See the comment in __bam_ca_delete().
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ found == 0 && ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ found == 0 && dbc != NULL; dbc = TAILQ_NEXT(dbc, links))
+ if (dbc->internal->root == root_pgno)
+ found = 1;
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+ return (found);
+}
+
+/*
+ * __bam_ca_di --
+ * Adjust the cursors during a delete or insert.
+ *
+ * PUBLIC: int __bam_ca_di __P((DBC *, db_pgno_t, u_int32_t, int));
+ */
+int
+__bam_ca_di(my_dbc, pgno, indx, adjust)
+ DBC *my_dbc;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ int adjust;
+{
+ DB *dbp, *ldbp;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ DBC *dbc;
+ DBC_INTERNAL *cp;
+ int found, ret;
+
+ dbp = my_dbc->dbp;
+ dbenv = dbp->dbenv;
+
+ my_txn = IS_SUBTRANSACTION(my_dbc->txn) ? my_dbc->txn : NULL;
+
+ /*
+ * Adjust the cursors. See the comment in __bam_ca_delete().
+ */
+ found = 0;
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ if (dbc->dbtype == DB_RECNO)
+ continue;
+ cp = dbc->internal;
+ if (cp->pgno == pgno && cp->indx >= indx) {
+ /* Cursor indices should never be negative. */
+ DB_ASSERT(cp->indx != 0 || adjust > 0);
+
+ cp->indx += adjust;
+ if (my_txn != NULL && dbc->txn != my_txn)
+ found = 1;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DBC_LOGGING(my_dbc)) {
+ if ((ret = __bam_curadj_log(dbp, my_dbc->txn,
+ &lsn, 0, DB_CA_DI, pgno, 0, 0, adjust, indx, 0)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * __bam_opd_cursor -- create a new opd cursor.
+ */
+static int
+__bam_opd_cursor(dbp, dbc, first, tpgno, ti)
+ DB *dbp;
+ DBC *dbc;
+ db_pgno_t tpgno;
+ u_int32_t first, ti;
+{
+ BTREE_CURSOR *cp, *orig_cp;
+ DBC *dbc_nopd;
+ int ret;
+
+ orig_cp = (BTREE_CURSOR *)dbc->internal;
+ dbc_nopd = NULL;
+
+ /*
+ * Allocate a new cursor and create the stack. If duplicates
+ * are sorted, we've just created an off-page duplicate Btree.
+ * If duplicates aren't sorted, we've just created a Recno tree.
+ *
+ * Note that in order to get here at all, there shouldn't be
+ * an old off-page dup cursor--to augment the checking db_c_newopd
+ * will do, assert this.
+ */
+ DB_ASSERT(orig_cp->opd == NULL);
+ if ((ret = __db_c_newopd(dbc, tpgno, orig_cp->opd, &dbc_nopd)) != 0)
+ return (ret);
+
+ cp = (BTREE_CURSOR *)dbc_nopd->internal;
+ cp->pgno = tpgno;
+ cp->indx = ti;
+
+ if (dbp->dup_compare == NULL) {
+ /*
+ * Converting to off-page Recno trees is tricky. The
+ * record number for the cursor is the index + 1 (to
+ * convert to 1-based record numbers).
+ */
+ cp->recno = ti + 1;
+ }
+
+ /*
+ * Transfer the deleted flag from the top-level cursor to the
+ * created one.
+ */
+ if (F_ISSET(orig_cp, C_DELETED)) {
+ F_SET(cp, C_DELETED);
+ F_CLR(orig_cp, C_DELETED);
+ }
+
+ /* Stack the cursors and reset the initial cursor's index. */
+ orig_cp->opd = dbc_nopd;
+ orig_cp->indx = first;
+ return (0);
+}
+
+/*
+ * __bam_ca_dup --
+ * Adjust the cursors when moving items from a leaf page to a duplicates
+ * page.
+ *
+ * PUBLIC: int __bam_ca_dup __P((DBC *,
+ * PUBLIC: u_int32_t, db_pgno_t, u_int32_t, db_pgno_t, u_int32_t));
+ */
+int
+__bam_ca_dup(my_dbc, first, fpgno, fi, tpgno, ti)
+ DBC *my_dbc;
+ db_pgno_t fpgno, tpgno;
+ u_int32_t first, fi, ti;
+{
+ BTREE_CURSOR *orig_cp;
+ DB *dbp, *ldbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ int found, ret;
+
+ dbp = my_dbc->dbp;
+ dbenv = dbp->dbenv;
+ my_txn = IS_SUBTRANSACTION(my_dbc->txn) ? my_dbc->txn : NULL;
+
+ /*
+ * Adjust the cursors. See the comment in __bam_ca_delete().
+ */
+ found = 0;
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+loop: MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ /* Find cursors pointing to this record. */
+ orig_cp = (BTREE_CURSOR *)dbc->internal;
+ if (orig_cp->pgno != fpgno || orig_cp->indx != fi)
+ continue;
+
+ /*
+ * Since we rescan the list see if this is already
+ * converted.
+ */
+ if (orig_cp->opd != NULL)
+ continue;
+
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ if ((ret = __bam_opd_cursor(dbp,
+ dbc, first, tpgno, ti)) !=0)
+ return (ret);
+ if (my_txn != NULL && dbc->txn != my_txn)
+ found = 1;
+ /* We released the mutex to get a cursor, start over. */
+ goto loop;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DBC_LOGGING(my_dbc)) {
+ if ((ret = __bam_curadj_log(dbp, my_dbc->txn,
+ &lsn, 0, DB_CA_DUP, fpgno, tpgno, 0, first, fi, ti)) != 0)
+ return (ret);
+ }
+ return (0);
+}
+
+/*
+ * __bam_ca_undodup --
+ * Adjust the cursors when returning items to a leaf page
+ * from a duplicate page.
+ * Called only during undo processing.
+ *
+ * PUBLIC: int __bam_ca_undodup __P((DB *,
+ * PUBLIC: u_int32_t, db_pgno_t, u_int32_t, u_int32_t));
+ */
+int
+__bam_ca_undodup(dbp, first, fpgno, fi, ti)
+ DB *dbp;
+ db_pgno_t fpgno;
+ u_int32_t first, fi, ti;
+{
+ BTREE_CURSOR *orig_cp;
+ DB *ldbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * Adjust the cursors. See the comment in __bam_ca_delete().
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+loop: MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ orig_cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * A note on the orig_cp->opd != NULL requirement here:
+ * it's possible that there's a cursor that refers to
+ * the same duplicate set, but which has no opd cursor,
+ * because it refers to a different item and we took
+ * care of it while processing a previous record.
+ */
+ if (orig_cp->pgno != fpgno ||
+ orig_cp->indx != first ||
+ orig_cp->opd == NULL ||
+ ((BTREE_CURSOR *)orig_cp->opd->internal)->indx
+ != ti)
+ continue;
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ if ((ret = orig_cp->opd->c_close(orig_cp->opd)) != 0)
+ return (ret);
+ orig_cp->opd = NULL;
+ orig_cp->indx = fi;
+ /*
+ * We released the mutex to free a cursor,
+ * start over.
+ */
+ goto loop;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ return (0);
+}
+
+/*
+ * __bam_ca_rsplit --
+ * Adjust the cursors when doing reverse splits.
+ *
+ * PUBLIC: int __bam_ca_rsplit __P((DBC *, db_pgno_t, db_pgno_t));
+ */
+int
+__bam_ca_rsplit(my_dbc, fpgno, tpgno)
+ DBC* my_dbc;
+ db_pgno_t fpgno, tpgno;
+{
+ DB *dbp, *ldbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ int found, ret;
+
+ dbp = my_dbc->dbp;
+ dbenv = dbp->dbenv;
+ my_txn = IS_SUBTRANSACTION(my_dbc->txn) ? my_dbc->txn : NULL;
+
+ /*
+ * Adjust the cursors. See the comment in __bam_ca_delete().
+ */
+ found = 0;
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ if (dbc->dbtype == DB_RECNO)
+ continue;
+ if (dbc->internal->pgno == fpgno) {
+ dbc->internal->pgno = tpgno;
+ if (my_txn != NULL && dbc->txn != my_txn)
+ found = 1;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DBC_LOGGING(my_dbc)) {
+ if ((ret = __bam_curadj_log(dbp, my_dbc->txn,
+ &lsn, 0, DB_CA_RSPLIT, fpgno, tpgno, 0, 0, 0, 0)) != 0)
+ return (ret);
+ }
+ return (0);
+}
+
+/*
+ * __bam_ca_split --
+ * Adjust the cursors when splitting a page.
+ *
+ * PUBLIC: int __bam_ca_split __P((DBC *,
+ * PUBLIC: db_pgno_t, db_pgno_t, db_pgno_t, u_int32_t, int));
+ */
+int
+__bam_ca_split(my_dbc, ppgno, lpgno, rpgno, split_indx, cleft)
+ DBC *my_dbc;
+ db_pgno_t ppgno, lpgno, rpgno;
+ u_int32_t split_indx;
+ int cleft;
+{
+ DB *dbp, *ldbp;
+ DBC *dbc;
+ DBC_INTERNAL *cp;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ int found, ret;
+
+ dbp = my_dbc->dbp;
+ dbenv = dbp->dbenv;
+ my_txn = IS_SUBTRANSACTION(my_dbc->txn) ? my_dbc->txn : NULL;
+
+ /*
+ * Adjust the cursors. See the comment in __bam_ca_delete().
+ *
+ * If splitting the page that a cursor was on, the cursor has to be
+ * adjusted to point to the same record as before the split. Most
+ * of the time we don't adjust pointers to the left page, because
+ * we're going to copy its contents back over the original page. If
+ * the cursor is on the right page, it is decremented by the number of
+ * records split to the left page.
+ */
+ found = 0;
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ if (dbc->dbtype == DB_RECNO)
+ continue;
+ cp = dbc->internal;
+ if (cp->pgno == ppgno) {
+ if (my_txn != NULL && dbc->txn != my_txn)
+ found = 1;
+ if (cp->indx < split_indx) {
+ if (cleft)
+ cp->pgno = lpgno;
+ } else {
+ cp->pgno = rpgno;
+ cp->indx -= split_indx;
+ }
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DBC_LOGGING(my_dbc)) {
+ if ((ret = __bam_curadj_log(dbp,
+ my_dbc->txn, &lsn, 0, DB_CA_SPLIT, ppgno, rpgno,
+ cleft ? lpgno : PGNO_INVALID, 0, split_indx, 0)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * __bam_ca_undosplit --
+ * Adjust the cursors when undoing a split of a page.
+ * If we grew a level we will execute this for both the
+ * left and the right pages.
+ * Called only during undo processing.
+ *
+ * PUBLIC: void __bam_ca_undosplit __P((DB *,
+ * PUBLIC: db_pgno_t, db_pgno_t, db_pgno_t, u_int32_t));
+ */
+void
+__bam_ca_undosplit(dbp, frompgno, topgno, lpgno, split_indx)
+ DB *dbp;
+ db_pgno_t frompgno, topgno, lpgno;
+ u_int32_t split_indx;
+{
+ DB *ldbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DBC_INTERNAL *cp;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * Adjust the cursors. See the comment in __bam_ca_delete().
+ *
+ * When backing out a split, we move the cursor back
+ * to the original offset and bump it by the split_indx.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ if (dbc->dbtype == DB_RECNO)
+ continue;
+ cp = dbc->internal;
+ if (cp->pgno == topgno) {
+ cp->pgno = frompgno;
+ cp->indx += split_indx;
+ } else if (cp->pgno == lpgno)
+ cp->pgno = frompgno;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+}
diff --git a/libdb/btree/bt_cursor.c b/libdb/btree/bt_cursor.c
new file mode 100644
index 0000000..911ebc7
--- /dev/null
+++ b/libdb/btree/bt_cursor.c
@@ -0,0 +1,2794 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/lock.h"
+
+static int __bam_bulk __P((DBC *, DBT *, u_int32_t));
+static int __bam_c_close __P((DBC *, db_pgno_t, int *));
+static int __bam_c_del __P((DBC *));
+static int __bam_c_destroy __P((DBC *));
+static int __bam_c_first __P((DBC *));
+static int __bam_c_get __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+static int __bam_c_getstack __P((DBC *));
+static int __bam_c_last __P((DBC *));
+static int __bam_c_next __P((DBC *, int, int));
+static int __bam_c_physdel __P((DBC *));
+static int __bam_c_prev __P((DBC *));
+static int __bam_c_put __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+static int __bam_c_search __P((DBC *,
+ db_pgno_t, const DBT *, u_int32_t, int *));
+static int __bam_c_writelock __P((DBC *));
+static int __bam_getboth_finddatum __P((DBC *, DBT *, u_int32_t));
+static int __bam_getbothc __P((DBC *, DBT *));
+static int __bam_get_prev __P((DBC *));
+static int __bam_isopd __P((DBC *, db_pgno_t *));
+
+/*
+ * Acquire a new page/lock. If we hold a page/lock, discard the page, and
+ * lock-couple the lock.
+ *
+ * !!!
+ * We have to handle both where we have a lock to lock-couple and where we
+ * don't -- we don't duplicate locks when we duplicate cursors if we are
+ * running in a transaction environment as there's no point if locks are
+ * never discarded. This means that the cursor may or may not hold a lock.
+ * In the case where we are decending the tree we always want to
+ * unlock the held interior page so we use ACQUIRE_COUPLE.
+ */
+#undef ACQUIRE
+#define ACQUIRE(dbc, mode, lpgno, lock, fpgno, pagep, ret) { \
+ DB_MPOOLFILE *__mpf = (dbc)->dbp->mpf; \
+ if ((pagep) != NULL) { \
+ ret = __mpf->put(__mpf, pagep, 0); \
+ pagep = NULL; \
+ } else \
+ ret = 0; \
+ if ((ret) == 0 && STD_LOCKING(dbc)) \
+ ret = __db_lget(dbc, LCK_COUPLE, lpgno, mode, 0, &(lock));\
+ if ((ret) == 0) \
+ ret = __mpf->get(__mpf, &(fpgno), 0, &(pagep)); \
+}
+
+#undef ACQUIRE_COUPLE
+#define ACQUIRE_COUPLE(dbc, mode, lpgno, lock, fpgno, pagep, ret) { \
+ DB_MPOOLFILE *__mpf = (dbc)->dbp->mpf; \
+ if ((pagep) != NULL) { \
+ ret = __mpf->put(__mpf, pagep, 0); \
+ pagep = NULL; \
+ } else \
+ ret = 0; \
+ if ((ret) == 0 && STD_LOCKING(dbc)) \
+ ret = __db_lget(dbc, \
+ LCK_COUPLE_ALWAYS, lpgno, mode, 0, &(lock)); \
+ if ((ret) == 0) \
+ ret = __mpf->get(__mpf, &(fpgno), 0, &(pagep)); \
+}
+
+/* Acquire a new page/lock for a cursor. */
+#undef ACQUIRE_CUR
+#define ACQUIRE_CUR(dbc, mode, p, ret) { \
+ BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \
+ ACQUIRE(dbc, mode, p, __cp->lock, p, __cp->page, ret); \
+ if ((ret) == 0) { \
+ __cp->pgno = p; \
+ __cp->lock_mode = (mode); \
+ } \
+}
+
+/*
+ * Acquire a new page/lock for a cursor and release the previous.
+ * This is typically used when decending a tree and we do not
+ * want to hold the interior nodes locked.
+ */
+#undef ACQUIRE_CUR_COUPLE
+#define ACQUIRE_CUR_COUPLE(dbc, mode, p, ret) { \
+ BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \
+ ACQUIRE_COUPLE(dbc, mode, p, __cp->lock, p, __cp->page, ret); \
+ if ((ret) == 0) { \
+ __cp->pgno = p; \
+ __cp->lock_mode = (mode); \
+ } \
+}
+
+/*
+ * Acquire a write lock if we don't already have one.
+ *
+ * !!!
+ * See ACQUIRE macro on why we handle cursors that don't have locks.
+ */
+#undef ACQUIRE_WRITE_LOCK
+#define ACQUIRE_WRITE_LOCK(dbc, ret) { \
+ BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \
+ ret = 0; \
+ if (STD_LOCKING(dbc) && \
+ __cp->lock_mode != DB_LOCK_WRITE && \
+ ((ret) = __db_lget(dbc, \
+ LOCK_ISSET(__cp->lock) ? LCK_COUPLE : 0, \
+ __cp->pgno, DB_LOCK_WRITE, 0, &__cp->lock)) == 0) \
+ __cp->lock_mode = DB_LOCK_WRITE; \
+}
+
+/* Discard the current page/lock. */
+#undef DISCARD
+#define DISCARD(dbc, ldiscard, lock, pagep, ret) { \
+ DB_MPOOLFILE *__mpf = (dbc)->dbp->mpf; \
+ int __t_ret; \
+ if ((pagep) != NULL) { \
+ ret = __mpf->put(__mpf, pagep, 0); \
+ pagep = NULL; \
+ } else \
+ ret = 0; \
+ if (ldiscard) \
+ __t_ret = __LPUT((dbc), lock); \
+ else \
+ __t_ret = __TLPUT((dbc), lock); \
+ if (__t_ret != 0 && (ret) == 0) \
+ ret = __t_ret; \
+}
+
+/* Discard the current page/lock for a cursor. */
+#undef DISCARD_CUR
+#define DISCARD_CUR(dbc, ret) { \
+ BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \
+ DISCARD(dbc, 0, __cp->lock, __cp->page, ret); \
+ if ((ret) == 0) \
+ __cp->lock_mode = DB_LOCK_NG; \
+}
+
+/* If on-page item is a deleted record. */
+#undef IS_DELETED
+#define IS_DELETED(dbp, page, indx) \
+ B_DISSET(GET_BKEYDATA(dbp, page, \
+ (indx) + (TYPE(page) == P_LBTREE ? O_INDX : 0))->type)
+#undef IS_CUR_DELETED
+#define IS_CUR_DELETED(dbc) \
+ IS_DELETED((dbc)->dbp, (dbc)->internal->page, (dbc)->internal->indx)
+
+/*
+ * Test to see if two cursors could point to duplicates of the same key.
+ * In the case of off-page duplicates they are they same, as the cursors
+ * will be in the same off-page duplicate tree. In the case of on-page
+ * duplicates, the key index offsets must be the same. For the last test,
+ * as the original cursor may not have a valid page pointer, we use the
+ * current cursor's.
+ */
+#undef IS_DUPLICATE
+#define IS_DUPLICATE(dbc, i1, i2) \
+ (P_INP((dbc)->dbp,((PAGE *)(dbc)->internal->page))[i1] == \
+ P_INP((dbc)->dbp,((PAGE *)(dbc)->internal->page))[i2])
+#undef IS_CUR_DUPLICATE
+#define IS_CUR_DUPLICATE(dbc, orig_pgno, orig_indx) \
+ (F_ISSET(dbc, DBC_OPD) || \
+ (orig_pgno == (dbc)->internal->pgno && \
+ IS_DUPLICATE(dbc, (dbc)->internal->indx, orig_indx)))
+
+/*
+ * __bam_c_init --
+ * Initialize the access private portion of a cursor
+ *
+ * PUBLIC: int __bam_c_init __P((DBC *, DBTYPE));
+ */
+int
+__bam_c_init(dbc, dbtype)
+ DBC *dbc;
+ DBTYPE dbtype;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = dbc->dbp->dbenv;
+
+ /* Allocate/initialize the internal structure. */
+ if (dbc->internal == NULL && (ret =
+ __os_malloc(dbenv, sizeof(BTREE_CURSOR), &dbc->internal)) != 0)
+ return (ret);
+
+ /* Initialize methods. */
+ dbc->c_close = __db_c_close;
+ dbc->c_count = __db_c_count;
+ dbc->c_del = __db_c_del;
+ dbc->c_dup = __db_c_dup;
+ dbc->c_get = dbc->c_real_get = __db_c_get;
+ dbc->c_pget = __db_c_pget;
+ dbc->c_put = __db_c_put;
+ if (dbtype == DB_BTREE) {
+ dbc->c_am_bulk = __bam_bulk;
+ dbc->c_am_close = __bam_c_close;
+ dbc->c_am_del = __bam_c_del;
+ dbc->c_am_destroy = __bam_c_destroy;
+ dbc->c_am_get = __bam_c_get;
+ dbc->c_am_put = __bam_c_put;
+ dbc->c_am_writelock = __bam_c_writelock;
+ } else {
+ dbc->c_am_bulk = __bam_bulk;
+ dbc->c_am_close = __bam_c_close;
+ dbc->c_am_del = __ram_c_del;
+ dbc->c_am_destroy = __bam_c_destroy;
+ dbc->c_am_get = __ram_c_get;
+ dbc->c_am_put = __ram_c_put;
+ dbc->c_am_writelock = __bam_c_writelock;
+ }
+
+ return (0);
+}
+
+/*
+ * __bam_c_refresh
+ * Set things up properly for cursor re-use.
+ *
+ * PUBLIC: int __bam_c_refresh __P((DBC *));
+ */
+int
+__bam_c_refresh(dbc)
+ DBC *dbc;
+{
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+
+ dbp = dbc->dbp;
+ t = dbp->bt_internal;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * If our caller set the root page number, it's because the root was
+ * known. This is always the case for off page dup cursors. Else,
+ * pull it out of our internal information.
+ */
+ if (cp->root == PGNO_INVALID)
+ cp->root = t->bt_root;
+
+ LOCK_INIT(cp->lock);
+ cp->lock_mode = DB_LOCK_NG;
+
+ cp->sp = cp->csp = cp->stack;
+ cp->esp = cp->stack + sizeof(cp->stack) / sizeof(cp->stack[0]);
+
+ /*
+ * The btree leaf page data structures require that two key/data pairs
+ * (or four items) fit on a page, but other than that there's no fixed
+ * requirement. The btree off-page duplicates only require two items,
+ * to be exact, but requiring four for them as well seems reasonable.
+ *
+ * Recno uses the btree bt_ovflsize value -- it's close enough.
+ */
+ cp->ovflsize = B_MINKEY_TO_OVFLSIZE(
+ dbp, F_ISSET(dbc, DBC_OPD) ? 2 : t->bt_minkey, dbp->pgsize);
+
+ cp->recno = RECNO_OOB;
+ cp->order = INVALID_ORDER;
+ cp->flags = 0;
+
+ /* Initialize for record numbers. */
+ if (F_ISSET(dbc, DBC_OPD) ||
+ dbc->dbtype == DB_RECNO || F_ISSET(dbp, DB_AM_RECNUM)) {
+ F_SET(cp, C_RECNUM);
+
+ /*
+ * All btrees that support record numbers, optionally standard
+ * recno trees, and all off-page duplicate recno trees have
+ * mutable record numbers.
+ */
+ if ((F_ISSET(dbc, DBC_OPD) && dbc->dbtype == DB_RECNO) ||
+ F_ISSET(dbp, DB_AM_RECNUM | DB_AM_RENUMBER))
+ F_SET(cp, C_RENUMBER);
+ }
+
+ return (0);
+}
+
+/*
+ * __bam_c_close --
+ * Close down the cursor.
+ */
+static int
+__bam_c_close(dbc, root_pgno, rmroot)
+ DBC *dbc;
+ db_pgno_t root_pgno;
+ int *rmroot;
+{
+ BTREE_CURSOR *cp, *cp_opd, *cp_c;
+ DB *dbp;
+ DBC *dbc_opd, *dbc_c;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ int cdb_lock, ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ cp_opd = (dbc_opd = cp->opd) == NULL ?
+ NULL : (BTREE_CURSOR *)dbc_opd->internal;
+ cdb_lock = ret = 0;
+
+ /*
+ * There are 3 ways this function is called:
+ *
+ * 1. Closing a primary cursor: we get called with a pointer to a
+ * primary cursor that has a NULL opd field. This happens when
+ * closing a btree/recno database cursor without an associated
+ * off-page duplicate tree.
+ *
+ * 2. Closing a primary and an off-page duplicate cursor stack: we
+ * get called with a pointer to the primary cursor which has a
+ * non-NULL opd field. This happens when closing a btree cursor
+ * into database with an associated off-page btree/recno duplicate
+ * tree. (It can't be a primary recno database, recno databases
+ * don't support duplicates.)
+ *
+ * 3. Closing an off-page duplicate cursor stack: we get called with
+ * a pointer to the off-page duplicate cursor. This happens when
+ * closing a non-btree database that has an associated off-page
+ * btree/recno duplicate tree or for a btree database when the
+ * opd tree is not empty (root_pgno == PGNO_INVALID).
+ *
+ * If either the primary or off-page duplicate cursor deleted a btree
+ * key/data pair, check to see if the item is still referenced by a
+ * different cursor. If it is, confirm that cursor's delete flag is
+ * set and leave it to that cursor to do the delete.
+ *
+ * NB: The test for == 0 below is correct. Our caller already removed
+ * our cursor argument from the active queue, we won't find it when we
+ * search the queue in __bam_ca_delete().
+ * NB: It can't be true that both the primary and off-page duplicate
+ * cursors have deleted a btree key/data pair. Either the primary
+ * cursor may have deleted an item and there's no off-page duplicate
+ * cursor, or there's an off-page duplicate cursor and it may have
+ * deleted an item.
+ *
+ * Primary recno databases aren't an issue here. Recno keys are either
+ * deleted immediately or never deleted, and do not have to be handled
+ * here.
+ *
+ * Off-page duplicate recno databases are an issue here, cases #2 and
+ * #3 above can both be off-page recno databases. The problem is the
+ * same as the final problem for off-page duplicate btree databases.
+ * If we no longer need the off-page duplicate tree, we want to remove
+ * it. For off-page duplicate btrees, we are done with the tree when
+ * we delete the last item it contains, i.e., there can be no further
+ * references to it when it's empty. For off-page duplicate recnos,
+ * we remove items from the tree as the application calls the remove
+ * function, so we are done with the tree when we close the last cursor
+ * that references it.
+ *
+ * We optionally take the root page number from our caller. If the
+ * primary database is a btree, we can get it ourselves because dbc
+ * is the primary cursor. If the primary database is not a btree,
+ * the problem is that we may be dealing with a stack of pages. The
+ * cursor we're using to do the delete points at the bottom of that
+ * stack and we need the top of the stack.
+ */
+ if (F_ISSET(cp, C_DELETED)) {
+ dbc_c = dbc;
+ switch (dbc->dbtype) {
+ case DB_BTREE: /* Case #1, #3. */
+ if (__bam_ca_delete(dbp, cp->pgno, cp->indx, 1) == 0)
+ goto lock;
+ goto done;
+ case DB_RECNO:
+ if (!F_ISSET(dbc, DBC_OPD)) /* Case #1. */
+ goto done;
+ /* Case #3. */
+ if (__ram_ca_delete(dbp, cp->root) == 0)
+ goto lock;
+ goto done;
+ default:
+ return (__db_unknown_type(dbp->dbenv,
+ "__bam_c_close", dbc->dbtype));
+ }
+ }
+
+ if (dbc_opd == NULL)
+ goto done;
+
+ if (F_ISSET(cp_opd, C_DELETED)) { /* Case #2. */
+ /*
+ * We will not have been provided a root page number. Acquire
+ * one from the primary database.
+ */
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &h)) != 0)
+ goto err;
+ root_pgno = GET_BOVERFLOW(dbp, h, cp->indx + O_INDX)->pgno;
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ goto err;
+
+ dbc_c = dbc_opd;
+ switch (dbc_opd->dbtype) {
+ case DB_BTREE:
+ if (__bam_ca_delete(
+ dbp, cp_opd->pgno, cp_opd->indx, 1) == 0)
+ goto lock;
+ goto done;
+ case DB_RECNO:
+ if (__ram_ca_delete(dbp, cp_opd->root) == 0)
+ goto lock;
+ goto done;
+ default:
+ return (__db_unknown_type(dbp->dbenv,
+ "__bam_c_close", dbc->dbtype));
+ }
+ }
+ goto done;
+
+lock: cp_c = (BTREE_CURSOR *)dbc_c->internal;
+
+ /*
+ * If this is CDB, upgrade the lock if necessary. While we acquired
+ * the write lock to logically delete the record, we released it when
+ * we returned from that call, and so may not be holding a write lock
+ * at the moment. NB: to get here in CDB we must either be holding a
+ * write lock or be the only cursor that is permitted to acquire write
+ * locks. The reason is that there can never be more than a single CDB
+ * write cursor (that cursor cannot be dup'd), and so that cursor must
+ * be closed and the item therefore deleted before any other cursor
+ * could acquire a reference to this item.
+ *
+ * Note that dbc may be an off-page dup cursor; this is the sole
+ * instance in which an OPD cursor does any locking, but it's necessary
+ * because we may be closed by ourselves without a parent cursor
+ * handy, and we have to do a lock upgrade on behalf of somebody.
+ * If this is the case, the OPD has been given the parent's locking
+ * info in __db_c_get--the OPD is also a WRITEDUP.
+ */
+ if (CDB_LOCKING(dbp->dbenv)) {
+ if (F_ISSET(dbc, DBC_WRITEDUP | DBC_WRITECURSOR)) {
+ if ((ret = dbp->dbenv->lock_get(
+ dbp->dbenv, dbc->locker, DB_LOCK_UPGRADE,
+ &dbc->lock_dbt, DB_LOCK_WRITE, &dbc->mylock)) != 0)
+ goto err;
+ cdb_lock = 1;
+ }
+ if ((ret = mpf->get(mpf, &cp_c->pgno, 0, &cp_c->page)) != 0)
+ goto err;
+
+ goto delete;
+ }
+
+ /*
+ * The variable dbc_c has been initialized to reference the cursor in
+ * which we're going to do the delete. Initialize the cursor's page
+ * and lock structures as necessary.
+ *
+ * First, we may not need to acquire any locks. If we're in case #3,
+ * that is, the primary database isn't a btree database, our caller
+ * is responsible for acquiring any necessary locks before calling us.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ if ((ret = mpf->get(mpf, &cp_c->pgno, 0, &cp_c->page)) != 0)
+ goto err;
+ goto delete;
+ }
+
+ /*
+ * Otherwise, acquire a write lock. If the cursor that did the initial
+ * logical deletion (and which had a write lock) is not the same as the
+ * cursor doing the physical deletion (which may have only ever had a
+ * read lock on the item), we need to upgrade. The confusion comes as
+ * follows:
+ *
+ * C1 created, acquires item read lock
+ * C2 dup C1, create C2, also has item read lock.
+ * C1 acquire write lock, delete item
+ * C1 close
+ * C2 close, needs a write lock to physically delete item.
+ *
+ * If we're in a TXN, we know that C2 will be able to acquire the write
+ * lock, because no locker other than the one shared by C1 and C2 can
+ * acquire a write lock -- the original write lock C1 acquire was never
+ * discarded.
+ *
+ * If we're not in a TXN, it's nastier. Other cursors might acquire
+ * read locks on the item after C1 closed, discarding its write lock,
+ * and such locks would prevent C2 from acquiring a read lock. That's
+ * OK, though, we'll simply wait until we can acquire a read lock, or
+ * we'll deadlock. (Which better not happen, since we're not in a TXN.)
+ *
+ * Lock the primary database page, regardless of whether we're deleting
+ * an item on a primary database page or an off-page duplicates page.
+ */
+ ACQUIRE(dbc, DB_LOCK_WRITE,
+ cp->pgno, cp_c->lock, cp_c->pgno, cp_c->page, ret);
+ if (ret != 0)
+ goto err;
+
+delete: /*
+ * If the delete occurred in a btree, delete the on-page physical item
+ * referenced by the cursor.
+ */
+ if (dbc_c->dbtype == DB_BTREE && (ret = __bam_c_physdel(dbc_c)) != 0)
+ goto err;
+
+ /*
+ * If we're not working in an off-page duplicate tree, then we're
+ * done.
+ */
+ if (!F_ISSET(dbc_c, DBC_OPD) || root_pgno == PGNO_INVALID)
+ goto done;
+
+ /*
+ * We may have just deleted the last element in the off-page duplicate
+ * tree, and closed the last cursor in the tree. For an off-page btree
+ * there are no other cursors in the tree by definition, if the tree is
+ * empty. For an off-page recno we know we have closed the last cursor
+ * in the tree because the __ram_ca_delete call above returned 0 only
+ * in that case. So, if the off-page duplicate tree is empty at this
+ * point, we want to remove it.
+ */
+ if ((ret = mpf->get(mpf, &root_pgno, 0, &h)) != 0)
+ goto err;
+ if (NUM_ENT(h) == 0) {
+ if ((ret = __db_free(dbc, h)) != 0)
+ goto err;
+ } else {
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ goto err;
+ goto done;
+ }
+
+ /*
+ * When removing the tree, we have to do one of two things. If this is
+ * case #2, that is, the primary tree is a btree, delete the key that's
+ * associated with the tree from the btree leaf page. We know we are
+ * the only reference to it and we already have the correct lock. We
+ * detect this case because the cursor that was passed to us references
+ * an off-page duplicate cursor.
+ *
+ * If this is case #3, that is, the primary tree isn't a btree, pass
+ * the information back to our caller, it's their job to do cleanup on
+ * the primary page.
+ */
+ if (dbc_opd != NULL) {
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &cp->page)) != 0)
+ goto err;
+ if ((ret = __bam_c_physdel(dbc)) != 0)
+ goto err;
+ } else
+ *rmroot = 1;
+err:
+done: /*
+ * Discard the page references and locks, and confirm that the stack
+ * has been emptied.
+ */
+ if (dbc_opd != NULL) {
+ DISCARD_CUR(dbc_opd, t_ret);
+ if (t_ret != 0 && ret == 0)
+ ret = t_ret;
+ }
+ DISCARD_CUR(dbc, t_ret);
+ if (t_ret != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Downgrade any CDB lock we acquired. */
+ if (cdb_lock)
+ (void)__lock_downgrade(
+ dbp->dbenv, &dbc->mylock, DB_LOCK_IWRITE, 0);
+
+ return (ret);
+}
+
+/*
+ * __bam_c_destroy --
+ * Close a single cursor -- internal version.
+ */
+static int
+__bam_c_destroy(dbc)
+ DBC *dbc;
+{
+ /* Discard the structures. */
+ __os_free(dbc->dbp->dbenv, dbc->internal);
+
+ return (0);
+}
+
+/*
+ * __bam_c_count --
+ * Return a count of on and off-page duplicates.
+ *
+ * PUBLIC: int __bam_c_count __P((DBC *, db_recno_t *));
+ */
+int
+__bam_c_count(dbc, recnop)
+ DBC *dbc;
+ db_recno_t *recnop;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ db_indx_t indx, top;
+ db_recno_t recno;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * Called with the top-level cursor that may reference an off-page
+ * duplicates page. If it's a set of on-page duplicates, get the
+ * page and count. Otherwise, get the root page of the off-page
+ * duplicate tree, and use the count. We don't have to acquire any
+ * new locks, we have to have a read lock to even get here.
+ */
+ if (cp->opd == NULL) {
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &cp->page)) != 0)
+ return (ret);
+
+ /*
+ * Move back to the beginning of the set of duplicates and
+ * then count forward.
+ */
+ for (indx = cp->indx;; indx -= P_INDX)
+ if (indx == 0 ||
+ !IS_DUPLICATE(dbc, indx, indx - P_INDX))
+ break;
+ for (recno = 1, top = NUM_ENT(cp->page) - P_INDX;
+ indx < top; ++recno, indx += P_INDX)
+ if (!IS_DUPLICATE(dbc, indx, indx + P_INDX))
+ break;
+ *recnop = recno;
+ } else {
+ if ((ret =
+ mpf->get(mpf, &cp->opd->internal->root, 0, &cp->page)) != 0)
+ return (ret);
+
+ *recnop = RE_NREC(cp->page);
+ }
+
+ ret = mpf->put(mpf, cp->page, 0);
+ cp->page = NULL;
+
+ return (ret);
+}
+
+/*
+ * __bam_c_del --
+ * Delete using a cursor.
+ */
+static int
+__bam_c_del(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ ret = 0;
+
+ /* If the item was already deleted, return failure. */
+ if (F_ISSET(cp, C_DELETED))
+ return (DB_KEYEMPTY);
+
+ /*
+ * This code is always called with a page lock but no page.
+ */
+ DB_ASSERT(cp->page == NULL);
+
+ /*
+ * We don't physically delete the record until the cursor moves, so
+ * we have to have a long-lived write lock on the page instead of a
+ * a long-lived read lock. Note, we have to have a read lock to even
+ * get here.
+ *
+ * If we're maintaining record numbers, we lock the entire tree, else
+ * we lock the single page.
+ */
+ if (F_ISSET(cp, C_RECNUM)) {
+ if ((ret = __bam_c_getstack(dbc)) != 0)
+ goto err;
+ cp->page = cp->csp->page;
+ } else {
+ ACQUIRE_CUR(dbc, DB_LOCK_WRITE, cp->pgno, ret);
+ if (ret != 0)
+ goto err;
+ }
+
+ /* Log the change. */
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __bam_cdel_log(dbp, dbc->txn, &LSN(cp->page), 0,
+ PGNO(cp->page), &LSN(cp->page), cp->indx)) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(LSN(cp->page));
+
+ /* Set the intent-to-delete flag on the page. */
+ if (TYPE(cp->page) == P_LBTREE)
+ B_DSET(GET_BKEYDATA(dbp, cp->page, cp->indx + O_INDX)->type);
+ else
+ B_DSET(GET_BKEYDATA(dbp, cp->page, cp->indx)->type);
+
+ /* Mark the page dirty. */
+ ret = mpf->set(mpf, cp->page, DB_MPOOL_DIRTY);
+
+err: /*
+ * If we've been successful so far and the tree has record numbers,
+ * adjust the record counts. Either way, release acquired page(s).
+ */
+ if (F_ISSET(cp, C_RECNUM)) {
+ if (ret == 0)
+ ret = __bam_adjust(dbc, -1);
+ (void)__bam_stkrel(dbc, 0);
+ } else
+ if (cp->page != NULL &&
+ (t_ret = mpf->put(mpf, cp->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ cp->page = NULL;
+
+ /* Update the cursors last, after all chance of failure is past. */
+ if (ret == 0)
+ (void)__bam_ca_delete(dbp, cp->pgno, cp->indx, 1);
+
+ return (ret);
+}
+
+/*
+ * __bam_c_dup --
+ * Duplicate a btree cursor, such that the new one holds appropriate
+ * locks for the position of the original.
+ *
+ * PUBLIC: int __bam_c_dup __P((DBC *, DBC *));
+ */
+int
+__bam_c_dup(orig_dbc, new_dbc)
+ DBC *orig_dbc, *new_dbc;
+{
+ BTREE_CURSOR *orig, *new;
+ int ret;
+
+ orig = (BTREE_CURSOR *)orig_dbc->internal;
+ new = (BTREE_CURSOR *)new_dbc->internal;
+
+ /*
+ * If we're holding a lock we need to acquire a copy of it, unless
+ * we're in a transaction. We don't need to copy any lock we're
+ * holding inside a transaction because all the locks are retained
+ * until the transaction commits or aborts.
+ */
+ if (LOCK_ISSET(orig->lock) && orig_dbc->txn == NULL) {
+ if ((ret = __db_lget(new_dbc,
+ 0, new->pgno, new->lock_mode, 0, &new->lock)) != 0)
+ return (ret);
+ }
+ new->ovflsize = orig->ovflsize;
+ new->recno = orig->recno;
+ new->flags = orig->flags;
+
+ return (0);
+}
+
+/*
+ * __bam_c_get --
+ * Get using a cursor (btree).
+ */
+static int
+__bam_c_get(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ db_pgno_t orig_pgno;
+ db_indx_t orig_indx;
+ int exact, newopd, ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ orig_pgno = cp->pgno;
+ orig_indx = cp->indx;
+
+ newopd = 0;
+ switch (flags) {
+ case DB_CURRENT:
+ /* It's not possible to return a deleted record. */
+ if (F_ISSET(cp, C_DELETED)) {
+ ret = DB_KEYEMPTY;
+ goto err;
+ }
+
+ /*
+ * Acquire the current page. We have at least a read-lock
+ * already. The caller may have set DB_RMW asking for a
+ * write lock, but upgrading to a write lock has no better
+ * chance of succeeding now instead of later, so don't try.
+ */
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &cp->page)) != 0)
+ goto err;
+ break;
+ case DB_FIRST:
+ newopd = 1;
+ if ((ret = __bam_c_first(dbc)) != 0)
+ goto err;
+ break;
+ case DB_GET_BOTH:
+ case DB_GET_BOTH_RANGE:
+ /*
+ * There are two ways to get here based on DBcursor->c_get
+ * with the DB_GET_BOTH/DB_GET_BOTH_RANGE flags set:
+ *
+ * 1. Searching a sorted off-page duplicate tree: do a tree
+ * search.
+ *
+ * 2. Searching btree: do a tree search. If it returns a
+ * reference to off-page duplicate tree, return immediately
+ * and let our caller deal with it. If the search doesn't
+ * return a reference to off-page duplicate tree, continue
+ * with an on-page search.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ if ((ret = __bam_c_search(
+ dbc, PGNO_INVALID, data, flags, &exact)) != 0)
+ goto err;
+ if (flags == DB_GET_BOTH) {
+ if (!exact) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ break;
+ }
+
+ /*
+ * We didn't require an exact match, so the search may
+ * may have returned an entry past the end of the page,
+ * or we may be referencing a deleted record. If so,
+ * move to the next entry.
+ */
+ if ((cp->indx == NUM_ENT(cp->page) ||
+ IS_CUR_DELETED(dbc)) &&
+ (ret = __bam_c_next(dbc, 1, 0)) != 0)
+ goto err;
+ } else {
+ if ((ret = __bam_c_search(
+ dbc, PGNO_INVALID, key, flags, &exact)) != 0)
+ return (ret);
+ if (!exact) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+
+ if (pgnop != NULL && __bam_isopd(dbc, pgnop)) {
+ newopd = 1;
+ break;
+ }
+ if ((ret =
+ __bam_getboth_finddatum(dbc, data, flags)) != 0)
+ goto err;
+ }
+ break;
+ case DB_GET_BOTHC:
+ if ((ret = __bam_getbothc(dbc, data)) != 0)
+ goto err;
+ break;
+ case DB_LAST:
+ newopd = 1;
+ if ((ret = __bam_c_last(dbc)) != 0)
+ goto err;
+ break;
+ case DB_NEXT:
+ newopd = 1;
+ if (cp->pgno == PGNO_INVALID) {
+ if ((ret = __bam_c_first(dbc)) != 0)
+ goto err;
+ } else
+ if ((ret = __bam_c_next(dbc, 1, 0)) != 0)
+ goto err;
+ break;
+ case DB_NEXT_DUP:
+ if ((ret = __bam_c_next(dbc, 1, 0)) != 0)
+ goto err;
+ if (!IS_CUR_DUPLICATE(dbc, orig_pgno, orig_indx)) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ break;
+ case DB_NEXT_NODUP:
+ newopd = 1;
+ if (cp->pgno == PGNO_INVALID) {
+ if ((ret = __bam_c_first(dbc)) != 0)
+ goto err;
+ } else
+ do {
+ if ((ret = __bam_c_next(dbc, 1, 0)) != 0)
+ goto err;
+ } while (IS_CUR_DUPLICATE(dbc, orig_pgno, orig_indx));
+ break;
+ case DB_PREV:
+ newopd = 1;
+ if (cp->pgno == PGNO_INVALID) {
+ if ((ret = __bam_c_last(dbc)) != 0)
+ goto err;
+ } else
+ if ((ret = __bam_c_prev(dbc)) != 0)
+ goto err;
+ break;
+ case DB_PREV_NODUP:
+ newopd = 1;
+ if (cp->pgno == PGNO_INVALID) {
+ if ((ret = __bam_c_last(dbc)) != 0)
+ goto err;
+ } else
+ do {
+ if ((ret = __bam_c_prev(dbc)) != 0)
+ goto err;
+ } while (IS_CUR_DUPLICATE(dbc, orig_pgno, orig_indx));
+ break;
+ case DB_SET:
+ case DB_SET_RECNO:
+ newopd = 1;
+ if ((ret = __bam_c_search(dbc,
+ PGNO_INVALID, key, flags, &exact)) != 0)
+ goto err;
+ break;
+ case DB_SET_RANGE:
+ newopd = 1;
+ if ((ret = __bam_c_search(dbc,
+ PGNO_INVALID, key, flags, &exact)) != 0)
+ goto err;
+
+ /*
+ * As we didn't require an exact match, the search function
+ * may have returned an entry past the end of the page. Or,
+ * we may be referencing a deleted record. If so, move to
+ * the next entry.
+ */
+ if (cp->indx == NUM_ENT(cp->page) || IS_CUR_DELETED(dbc))
+ if ((ret = __bam_c_next(dbc, 0, 0)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_flag(dbp->dbenv, "__bam_c_get", flags);
+ goto err;
+ }
+
+ /*
+ * We may have moved to an off-page duplicate tree. Return that
+ * information to our caller.
+ */
+ if (newopd && pgnop != NULL)
+ (void)__bam_isopd(dbc, pgnop);
+
+ /*
+ * Don't return the key, it was passed to us (this is true even if the
+ * application defines a compare function returning equality for more
+ * than one key value, since in that case which actual value we store
+ * in the database is undefined -- and particularly true in the case of
+ * duplicates where we only store one key value).
+ */
+ if (flags == DB_GET_BOTH ||
+ flags == DB_GET_BOTH_RANGE || flags == DB_SET)
+ F_SET(key, DB_DBT_ISSET);
+
+err: /*
+ * Regardless of whether we were successful or not, if the cursor
+ * moved, clear the delete flag, DBcursor->c_get never references
+ * a deleted key, if it moved at all.
+ */
+ if (F_ISSET(cp, C_DELETED) &&
+ (cp->pgno != orig_pgno || cp->indx != orig_indx))
+ F_CLR(cp, C_DELETED);
+
+ return (ret);
+}
+
+static int
+__bam_get_prev(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ DBT key, data;
+ db_pgno_t pgno;
+ int ret;
+
+ if ((ret = __bam_c_prev(dbc)) != 0)
+ return (ret);
+
+ if (__bam_isopd(dbc, &pgno)) {
+ cp = (BTREE_CURSOR *)dbc->internal;
+ if ((ret = __db_c_newopd(dbc, pgno, cp->opd, &cp->opd)) != 0)
+ return (ret);
+ if ((ret = cp->opd->c_am_get(cp->opd,
+ &key, &data, DB_LAST, NULL)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * __bam_bulk -- Return bulk data from a btree.
+ */
+static int
+__bam_bulk(dbc, data, flags)
+ DBC *dbc;
+ DBT *data;
+ u_int32_t flags;
+{
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+ BTREE_CURSOR *cp;
+ PAGE *pg;
+ db_indx_t *inp, indx, pg_keyoff;
+ int32_t *endp, key_off, *offp, *saveoffp;
+ u_int8_t *dbuf, *dp, *np;
+ u_int32_t key_size, size, space;
+ int adj, is_key, need_pg, next_key, no_dup;
+ int pagesize, rec_key, ret;
+
+ ret = 0;
+ key_off = 0;
+ size = 0;
+ pagesize = dbc->dbp->pgsize;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * dp tracks the beginging of the page in the buffer.
+ * np is the next place to copy things into the buffer.
+ * dbuf always stays at the beging of the buffer.
+ */
+ dbuf = data->data;
+ np = dp = dbuf;
+
+ /* Keep track of space that is left. There is a termination entry */
+ space = data->ulen;
+ space -= sizeof(*offp);
+
+ /* Build the offset/size table from the end up. */
+ endp = (int32_t *)((u_int8_t *)dbuf + data->ulen);
+ endp--;
+ offp = endp;
+
+ key_size = 0;
+
+ /*
+ * Distinguish between BTREE and RECNO.
+ * There are no keys in RECNO. If MULTIPLE_KEY is specified
+ * then we return the record numbers.
+ * is_key indicates that multiple btree keys are returned.
+ * rec_key is set if we are returning record numbers.
+ * next_key is set if we are going after the next key rather than dup.
+ */
+ if (dbc->dbtype == DB_BTREE) {
+ is_key = LF_ISSET(DB_MULTIPLE_KEY) ? 1: 0;
+ rec_key = 0;
+ next_key = is_key && LF_ISSET(DB_OPFLAGS_MASK) != DB_NEXT_DUP;
+ adj = 2;
+ } else {
+ is_key = 0;
+ rec_key = LF_ISSET(DB_MULTIPLE_KEY) ? 1 : 0;
+ next_key = LF_ISSET(DB_OPFLAGS_MASK) != DB_NEXT_DUP;
+ adj = 1;
+ }
+ no_dup = LF_ISSET(DB_OPFLAGS_MASK) == DB_NEXT_NODUP;
+
+next_pg:
+ indx = cp->indx;
+ pg = cp->page;
+
+ inp = P_INP(dbc->dbp, pg);
+ /* The current page is not yet in the buffer. */
+ need_pg = 1;
+
+ /*
+ * Keep track of the offset of the current key on the page.
+ * If we are returning keys, set it to 0 first so we force
+ * the copy of the key to the buffer.
+ */
+ pg_keyoff = 0;
+ if (is_key == 0)
+ pg_keyoff = inp[indx];
+
+ do {
+ if (IS_DELETED(dbc->dbp, pg, indx)) {
+ if (dbc->dbtype != DB_RECNO)
+ continue;
+
+ cp->recno++;
+ /*
+ * If we are not returning recnos then we
+ * need to fill in every slot so the user
+ * can calculate the record numbers.
+ */
+ if (rec_key != 0)
+ continue;
+
+ space -= 2 * sizeof(*offp);
+ /* Check if space as underflowed. */
+ if (space > data->ulen)
+ goto back_up;
+
+ /* Just mark the empty recno slots. */
+ *offp-- = 0;
+ *offp-- = 0;
+ continue;
+ }
+
+ /*
+ * Check to see if we have a new key.
+ * If so, then see if we need to put the
+ * key on the page. If its already there
+ * then we just point to it.
+ */
+ if (is_key && pg_keyoff != inp[indx]) {
+ bk = GET_BKEYDATA(dbc->dbp, pg, indx);
+ if (B_TYPE(bk->type) == B_OVERFLOW) {
+ bo = (BOVERFLOW *)bk;
+ size = key_size = bo->tlen;
+ if (key_size > space)
+ goto get_key_space;
+ if ((ret = __bam_bulk_overflow(dbc,
+ bo->tlen, bo->pgno, np)) != 0)
+ return (ret);
+ space -= key_size;
+ key_off = (int32_t)(np - dbuf);
+ np += key_size;
+ } else {
+ if (need_pg) {
+ dp = np;
+ size = pagesize - HOFFSET(pg);
+ if (space < size) {
+get_key_space:
+ /* Nothing added, then error. */
+ if (offp == endp) {
+ data->size =
+ ALIGN(size +
+ pagesize,
+ sizeof(u_int32_t));
+ return (ENOMEM);
+ }
+ /*
+ * We need to back up to the
+ * last record put into the
+ * buffer so that it is
+ * CURRENT.
+ */
+ if (indx != 0)
+ indx -= P_INDX;
+ else {
+ if ((ret =
+ __bam_get_prev(
+ dbc)) != 0)
+ return (ret);
+ indx = cp->indx;
+ pg = cp->page;
+ }
+ break;
+ }
+ /*
+ * Move the data part of the page
+ * to the buffer.
+ */
+ memcpy(dp,
+ (u_int8_t *)pg + HOFFSET(pg), size);
+ need_pg = 0;
+ space -= size;
+ np += size;
+ }
+ key_size = bk->len;
+ key_off = (int32_t)(inp[indx] - HOFFSET(pg)
+ + dp - dbuf + SSZA(BKEYDATA, data));
+ pg_keyoff = inp[indx];
+ }
+ }
+
+ /*
+ * Reserve space for the pointers and sizes.
+ * Either key/data pair or just for a data item.
+ */
+ space -= (is_key ? 4 : 2) * sizeof(*offp);
+ if (rec_key)
+ space -= sizeof(*offp);
+
+ /* Check to see if space has underflowed. */
+ if (space > data->ulen)
+ goto back_up;
+
+ /*
+ * Determine if the next record is in the
+ * buffer already or if it needs to be copied in.
+ * If we have an off page dup, then copy as many
+ * as will fit into the buffer.
+ */
+ bk = GET_BKEYDATA(dbc->dbp, pg, indx + adj - 1);
+ if (B_TYPE(bk->type) == B_DUPLICATE) {
+ bo = (BOVERFLOW *)bk;
+ if (is_key) {
+ *offp-- = key_off;
+ *offp-- = key_size;
+ }
+ /*
+ * We pass the offset of the current key.
+ * On return we check to see if offp has
+ * moved to see if any data fit.
+ */
+ saveoffp = offp;
+ if ((ret = __bam_bulk_duplicates(dbc, bo->pgno,
+ dbuf, is_key ? offp + P_INDX : NULL,
+ &offp, &np, &space, no_dup)) != 0) {
+ if (ret == ENOMEM) {
+ size = space;
+ /* If nothing was added, then error. */
+ if (offp == saveoffp) {
+ offp += 2;
+ goto back_up;
+ }
+ goto get_space;
+ }
+ return (ret);
+ }
+ } else if (B_TYPE(bk->type) == B_OVERFLOW) {
+ bo = (BOVERFLOW *)bk;
+ size = bo->tlen;
+ if (size > space)
+ goto back_up;
+ if ((ret =
+ __bam_bulk_overflow(dbc,
+ bo->tlen, bo->pgno, np)) != 0)
+ return (ret);
+ space -= size;
+ if (is_key) {
+ *offp-- = key_off;
+ *offp-- = key_size;
+ } else if (rec_key)
+ *offp-- = cp->recno;
+ *offp-- = (int32_t)(np - dbuf);
+ np += size;
+ *offp-- = size;
+ } else {
+ if (need_pg) {
+ dp = np;
+ size = pagesize - HOFFSET(pg);
+ if (space < size) {
+back_up:
+ /*
+ * Back up the index so that the
+ * last record in the buffer is CURRENT
+ */
+ if (indx >= adj)
+ indx -= adj;
+ else {
+ if ((ret =
+ __bam_get_prev(dbc)) != 0 &&
+ ret != DB_NOTFOUND)
+ return (ret);
+ indx = cp->indx;
+ pg = cp->page;
+ }
+ if (dbc->dbtype == DB_RECNO)
+ cp->recno--;
+get_space:
+ /*
+ * See if we put anything in the
+ * buffer or if we are doing a DBP->get
+ * did we get all of the data.
+ */
+ if (offp >=
+ (is_key ? &endp[-1] : endp) ||
+ F_ISSET(dbc, DBC_TRANSIENT)) {
+ data->size = ALIGN(size +
+ data->ulen - space,
+ sizeof(u_int32_t));
+ return (ENOMEM);
+ }
+ break;
+ }
+ memcpy(dp, (u_int8_t *)pg + HOFFSET(pg), size);
+ need_pg = 0;
+ space -= size;
+ np += size;
+ }
+ /*
+ * Add the offsets and sizes to the end of the buffer.
+ * First add the key info then the data info.
+ */
+ if (is_key) {
+ *offp-- = key_off;
+ *offp-- = key_size;
+ } else if (rec_key)
+ *offp-- = cp->recno;
+ *offp-- = (int32_t)(inp[indx + adj - 1] - HOFFSET(pg)
+ + dp - dbuf + SSZA(BKEYDATA, data));
+ *offp-- = bk->len;
+ }
+ if (dbc->dbtype == DB_RECNO)
+ cp->recno++;
+ else if (no_dup) {
+ while (indx + adj < NUM_ENT(pg) &&
+ pg_keyoff == inp[indx + adj])
+ indx += adj;
+ }
+ /*
+ * Stop when we either run off the page or we
+ * move to the next key and we are not returning mulitple keys.
+ */
+ } while ((indx += adj) < NUM_ENT(pg) &&
+ (next_key || pg_keyoff == inp[indx]));
+
+ /* If we are off the page then try to the next page. */
+ if (ret == 0 && next_key && indx >= NUM_ENT(pg)) {
+ cp->indx = indx;
+ ret = __bam_c_next(dbc, 0, 1);
+ if (ret == 0)
+ goto next_pg;
+ if (ret != DB_NOTFOUND)
+ return (ret);
+ }
+
+ /*
+ * If we did a DBP->get we must error if we did not return
+ * all the data for the current key because there is
+ * no way to know if we did not get it all, nor any
+ * interface to fetch the balance.
+ */
+
+ if (ret == 0 &&
+ F_ISSET(dbc, DBC_TRANSIENT) && pg_keyoff == inp[indx]) {
+ data->size = (data->ulen - space) + size;
+ return (ENOMEM);
+ }
+ /*
+ * Must leave the index pointing at the last record fetched.
+ * If we are not fetching keys, we may have stepped to the
+ * next key.
+ */
+ if (next_key || pg_keyoff == inp[indx])
+ cp->indx = indx;
+ else
+ cp->indx = indx - P_INDX;
+
+ if (rec_key == 1)
+ *offp = (u_int32_t) RECNO_OOB;
+ else
+ *offp = (u_int32_t) -1;
+ return (0);
+}
+
+/*
+ * __bam_bulk_overflow --
+ * Dump overflow record into the buffer.
+ * The space requirements have already been checked.
+ * PUBLIC: int __bam_bulk_overflow
+ * PUBLIC: __P((DBC *, u_int32_t, db_pgno_t, u_int8_t *));
+ */
+int
+__bam_bulk_overflow(dbc, len, pgno, dp)
+ DBC *dbc;
+ u_int32_t len;
+ db_pgno_t pgno;
+ u_int8_t *dp;
+{
+ DBT dbt;
+
+ memset(&dbt, 0, sizeof(dbt));
+ F_SET(&dbt, DB_DBT_USERMEM);
+ dbt.ulen = len;
+ dbt.data = (void *)dp;
+ return (__db_goff(dbc->dbp, &dbt, len, pgno, NULL, NULL));
+}
+
+/*
+ * __bam_bulk_duplicates --
+ * Put as many off page duplicates as will fit into the buffer.
+ * This routine will adjust the cursor to reflect the position in
+ * the overflow tree.
+ * PUBLIC: int __bam_bulk_duplicates __P((DBC *,
+ * PUBLIC: db_pgno_t, u_int8_t *, int32_t *,
+ * PUBLIC: int32_t **, u_int8_t **, u_int32_t *, int));
+ */
+int
+__bam_bulk_duplicates(dbc, pgno, dbuf, keyoff, offpp, dpp, spacep, no_dup)
+ DBC *dbc;
+ db_pgno_t pgno;
+ u_int8_t *dbuf;
+ int32_t *keyoff, **offpp;
+ u_int8_t **dpp;
+ u_int32_t *spacep;
+ int no_dup;
+{
+ DB *dbp;
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+ BTREE_CURSOR *cp;
+ DBC *opd;
+ DBT key, data;
+ PAGE *pg;
+ db_indx_t indx, *inp;
+ int32_t *offp;
+ u_int32_t size, space;
+ u_int8_t *dp, *np;
+ int first, need_pg, pagesize, ret, t_ret;
+
+ ret = 0;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ opd = cp->opd;
+
+ if (opd == NULL) {
+ if ((ret = __db_c_newopd(dbc, pgno, NULL, &opd)) != 0)
+ return (ret);
+ cp->opd = opd;
+ if ((ret = opd->c_am_get(opd,
+ &key, &data, DB_FIRST, NULL)) != 0)
+ return (ret);
+ }
+
+ pagesize = opd->dbp->pgsize;
+ cp = (BTREE_CURSOR *)opd->internal;
+ space = *spacep;
+ /* Get current offset slot. */
+ offp = *offpp;
+
+ /*
+ * np is the next place to put data.
+ * dp is the begining of the current page in the buffer.
+ */
+ np = dp = *dpp;
+ first = 1;
+ indx = cp->indx;
+
+ do {
+ /* Fetch the current record. No initial move. */
+ if ((ret = __bam_c_next(opd, 0, 0)) != 0)
+ break;
+ pg = cp->page;
+ indx = cp->indx;
+ inp = P_INP(dbp, pg);
+ /* We need to copy the page to the buffer. */
+ need_pg = 1;
+
+ do {
+ if (IS_DELETED(dbp, pg, indx))
+ goto contin;
+ bk = GET_BKEYDATA(dbp, pg, indx);
+ space -= 2 * sizeof(*offp);
+ /* Allocate space for key if needed. */
+ if (first == 0 && keyoff != NULL)
+ space -= 2 * sizeof(*offp);
+
+ /* Did space underflow? */
+ if (space > *spacep) {
+ ret = ENOMEM;
+ if (first == 1) {
+ space = *spacep + -(int32_t)space;
+ if (need_pg)
+ space += pagesize - HOFFSET(pg);
+ }
+ break;
+ }
+ if (B_TYPE(bk->type) == B_OVERFLOW) {
+ bo = (BOVERFLOW *)bk;
+ size = bo->tlen;
+ if (size > space) {
+ ret = ENOMEM;
+ if (first == 1) {
+ space = *spacep + size;
+ }
+ break;
+ }
+ if (first == 0 && keyoff != NULL) {
+ *offp-- = keyoff[0];
+ *offp-- = keyoff[-1];
+ }
+ if ((ret = __bam_bulk_overflow(dbc,
+ bo->tlen, bo->pgno, np)) != 0)
+ return (ret);
+ space -= size;
+ *offp-- = (int32_t)(np - dbuf);
+ np += size;
+ } else {
+ if (need_pg) {
+ dp = np;
+ size = pagesize - HOFFSET(pg);
+ if (space < size) {
+ ret = ENOMEM;
+ /* Return space required. */
+ if (first == 1) {
+ space = *spacep + size;
+ }
+ break;
+ }
+ memcpy(dp,
+ (u_int8_t *)pg + HOFFSET(pg), size);
+ need_pg = 0;
+ space -= size;
+ np += size;
+ }
+ if (first == 0 && keyoff != NULL) {
+ *offp-- = keyoff[0];
+ *offp-- = keyoff[-1];
+ }
+ size = bk->len;
+ *offp-- = (int32_t)(inp[indx] - HOFFSET(pg)
+ + dp - dbuf + SSZA(BKEYDATA, data));
+ }
+ *offp-- = size;
+ first = 0;
+ if (no_dup)
+ break;
+contin:
+ indx++;
+ if (opd->dbtype == DB_RECNO)
+ cp->recno++;
+ } while (indx < NUM_ENT(pg));
+ if (no_dup)
+ break;
+ cp->indx = indx;
+
+ } while (ret == 0);
+
+ /* Return the updated information. */
+ *spacep = space;
+ *offpp = offp;
+ *dpp = np;
+
+ /*
+ * If we ran out of space back up the pointer.
+ * If we did not return any dups or reached the end, close the opd.
+ */
+ if (ret == ENOMEM) {
+ if (opd->dbtype == DB_RECNO) {
+ if (--cp->recno == 0)
+ goto close_opd;
+ } else if (indx != 0)
+ cp->indx--;
+ else {
+ t_ret = __bam_c_prev(opd);
+ if (t_ret == DB_NOTFOUND)
+ goto close_opd;
+ if (t_ret != 0)
+ ret = t_ret;
+ }
+ } else if (keyoff == NULL && ret == DB_NOTFOUND) {
+ cp->indx--;
+ if (opd->dbtype == DB_RECNO)
+ --cp->recno;
+ } else if (indx == 0 || ret == DB_NOTFOUND) {
+close_opd:
+ opd->c_close(opd);
+ ((BTREE_CURSOR *)dbc->internal)->opd = NULL;
+ }
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+
+ return (ret);
+}
+
+/*
+ * __bam_getbothc --
+ * Search for a matching data item on a join.
+ */
+static int
+__bam_getbothc(dbc, data)
+ DBC *dbc;
+ DBT *data;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ int cmp, exact, ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * Acquire the current page. We have at least a read-lock
+ * already. The caller may have set DB_RMW asking for a
+ * write lock, but upgrading to a write lock has no better
+ * chance of succeeding now instead of later, so don't try.
+ */
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &cp->page)) != 0)
+ return (ret);
+
+ /*
+ * An off-page duplicate cursor. Search the remaining duplicates
+ * for one which matches (do a normal btree search, then verify
+ * that the retrieved record is greater than the original one).
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ /*
+ * Check to make sure the desired item comes strictly after
+ * the current position; if it doesn't, return DB_NOTFOUND.
+ */
+ if ((ret = __bam_cmp(dbp, data, cp->page, cp->indx,
+ dbp->dup_compare == NULL ? __bam_defcmp : dbp->dup_compare,
+ &cmp)) != 0)
+ return (ret);
+
+ if (cmp <= 0)
+ return (DB_NOTFOUND);
+
+ /* Discard the current page, we're going to do a full search. */
+ if ((ret = mpf->put(mpf, cp->page, 0)) != 0)
+ return (ret);
+ cp->page = NULL;
+
+ return (__bam_c_search(dbc,
+ PGNO_INVALID, data, DB_GET_BOTH, &exact));
+ }
+
+ /*
+ * We're doing a DBC->c_get(DB_GET_BOTHC) and we're already searching
+ * a set of on-page duplicates (either sorted or unsorted). Continue
+ * a linear search from after the current position.
+ *
+ * (Note that we could have just finished a "set" of one duplicate,
+ * i.e. not a duplicate at all, but the following check will always
+ * return DB_NOTFOUND in this case, which is the desired behavior.)
+ */
+ if (cp->indx + P_INDX >= NUM_ENT(cp->page) ||
+ !IS_DUPLICATE(dbc, cp->indx, cp->indx + P_INDX))
+ return (DB_NOTFOUND);
+ cp->indx += P_INDX;
+
+ return (__bam_getboth_finddatum(dbc, data, DB_GET_BOTH));
+}
+
+/*
+ * __bam_getboth_finddatum --
+ * Find a matching on-page data item.
+ */
+static int
+__bam_getboth_finddatum(dbc, data, flags)
+ DBC *dbc;
+ DBT *data;
+ u_int32_t flags;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ db_indx_t base, lim, top;
+ int cmp, ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * Called (sometimes indirectly) from DBC->get to search on-page data
+ * item(s) for a matching value. If the original flag was DB_GET_BOTH
+ * or DB_GET_BOTH_RANGE, the cursor is set to the first undeleted data
+ * item for the key. If the original flag was DB_GET_BOTHC, the cursor
+ * argument is set to the first data item we can potentially return.
+ * In both cases, there may or may not be additional duplicate data
+ * items to search.
+ *
+ * If the duplicates are not sorted, do a linear search.
+ */
+ if (dbp->dup_compare == NULL) {
+ for (;; cp->indx += P_INDX) {
+ if (!IS_CUR_DELETED(dbc) &&
+ (ret = __bam_cmp(dbp, data, cp->page,
+ cp->indx + O_INDX, __bam_defcmp, &cmp)) != 0)
+ return (ret);
+ if (cmp == 0)
+ return (0);
+
+ if (cp->indx + P_INDX >= NUM_ENT(cp->page) ||
+ !IS_DUPLICATE(dbc, cp->indx, cp->indx + P_INDX))
+ break;
+ }
+ return (DB_NOTFOUND);
+ }
+
+ /*
+ * If the duplicates are sorted, do a binary search. The reason for
+ * this is that large pages and small key/data pairs result in large
+ * numbers of on-page duplicates before they get pushed off-page.
+ *
+ * Find the top and bottom of the duplicate set. Binary search
+ * requires at least two items, don't loop if there's only one.
+ */
+ for (base = top = cp->indx; top < NUM_ENT(cp->page); top += P_INDX)
+ if (!IS_DUPLICATE(dbc, cp->indx, top))
+ break;
+ if (base == (top - P_INDX)) {
+ if ((ret = __bam_cmp(dbp, data,
+ cp->page, cp->indx + O_INDX, dbp->dup_compare, &cmp)) != 0)
+ return (ret);
+ return (cmp == 0 ||
+ (cmp < 0 && flags == DB_GET_BOTH_RANGE) ? 0 : DB_NOTFOUND);
+ }
+
+ for (lim = (top - base) / (db_indx_t)P_INDX; lim != 0; lim >>= 1) {
+ cp->indx = base + ((lim >> 1) * P_INDX);
+ if ((ret = __bam_cmp(dbp, data, cp->page,
+ cp->indx + O_INDX, dbp->dup_compare, &cmp)) != 0)
+ return (ret);
+ if (cmp == 0) {
+ /*
+ * XXX
+ * No duplicate duplicates in sorted duplicate sets,
+ * so there can be only one.
+ */
+ if (!IS_CUR_DELETED(dbc))
+ return (0);
+ break;
+ }
+ if (cmp > 0) {
+ base = cp->indx + P_INDX;
+ --lim;
+ }
+ }
+
+ /* No match found; if we're looking for an exact match, we're done. */
+ if (flags == DB_GET_BOTH)
+ return (DB_NOTFOUND);
+
+ /*
+ * Base is the smallest index greater than the data item, may be zero
+ * or a last + O_INDX index, and may be deleted. Find an undeleted
+ * item.
+ */
+ cp->indx = base;
+ while (cp->indx < top && IS_CUR_DELETED(dbc))
+ cp->indx += P_INDX;
+ return (cp->indx < top ? 0 : DB_NOTFOUND);
+}
+
+/*
+ * __bam_c_put --
+ * Put using a cursor.
+ */
+static int
+__bam_c_put(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT dbt;
+ DB_MPOOLFILE *mpf;
+ db_pgno_t root_pgno;
+ u_int32_t iiop;
+ int cmp, exact, ret, stack;
+ void *arg;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ root_pgno = cp->root;
+
+split: ret = stack = 0;
+ switch (flags) {
+ case DB_AFTER:
+ case DB_BEFORE:
+ case DB_CURRENT:
+ iiop = flags;
+
+ /*
+ * If the Btree has record numbers (and we're not replacing an
+ * existing record), we need a complete stack so that we can
+ * adjust the record counts. The check for flags == DB_CURRENT
+ * is superfluous but left in for clarity. (If C_RECNUM is set
+ * we know that flags must be DB_CURRENT, as DB_AFTER/DB_BEFORE
+ * are illegal in a Btree unless it's configured for duplicates
+ * and you cannot configure a Btree for both record renumbering
+ * and duplicates.)
+ */
+ if (flags == DB_CURRENT &&
+ F_ISSET(cp, C_RECNUM) && F_ISSET(cp, C_DELETED)) {
+ if ((ret = __bam_c_getstack(dbc)) != 0)
+ goto err;
+ /*
+ * Initialize the cursor from the stack. Don't take
+ * the page number or page index, they should already
+ * be set.
+ */
+ cp->page = cp->csp->page;
+ cp->lock = cp->csp->lock;
+ cp->lock_mode = cp->csp->lock_mode;
+
+ stack = 1;
+ break;
+ }
+
+ /* Acquire the current page with a write lock. */
+ ACQUIRE_WRITE_LOCK(dbc, ret);
+ if (ret != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &cp->page)) != 0)
+ goto err;
+ break;
+ case DB_KEYFIRST:
+ case DB_KEYLAST:
+ case DB_NODUPDATA:
+ /*
+ * Searching off-page, sorted duplicate tree: do a tree search
+ * for the correct item; __bam_c_search returns the smallest
+ * slot greater than the key, use it.
+ *
+ * See comment below regarding where we can start the search.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ if ((ret = __bam_c_search(dbc,
+ F_ISSET(cp, C_RECNUM) ? cp->root : root_pgno,
+ data, flags, &exact)) != 0)
+ goto err;
+ stack = 1;
+
+ /* Disallow "sorted" duplicate duplicates. */
+ if (exact) {
+ if (IS_DELETED(dbp, cp->page, cp->indx)) {
+ iiop = DB_CURRENT;
+ break;
+ }
+ ret = __db_duperr(dbp, flags);
+ goto err;
+ }
+ iiop = DB_BEFORE;
+ break;
+ }
+
+ /*
+ * Searching a btree.
+ *
+ * If we've done a split, we can start the search from the
+ * parent of the split page, which __bam_split returned
+ * for us in root_pgno, unless we're in a Btree with record
+ * numbering. In that case, we'll need the true root page
+ * in order to adjust the record count.
+ */
+ if ((ret = __bam_c_search(dbc,
+ F_ISSET(cp, C_RECNUM) ? cp->root : root_pgno, key,
+ flags == DB_KEYFIRST || dbp->dup_compare != NULL ?
+ DB_KEYFIRST : DB_KEYLAST, &exact)) != 0)
+ goto err;
+ stack = 1;
+
+ /*
+ * If we don't have an exact match, __bam_c_search returned
+ * the smallest slot greater than the key, use it.
+ */
+ if (!exact) {
+ iiop = DB_KEYFIRST;
+ break;
+ }
+
+ /*
+ * If duplicates aren't supported, replace the current item.
+ * (If implementing the DB->put function, our caller already
+ * checked the DB_NOOVERWRITE flag.)
+ */
+ if (!F_ISSET(dbp, DB_AM_DUP)) {
+ iiop = DB_CURRENT;
+ break;
+ }
+
+ /*
+ * If we find a matching entry, it may be an off-page duplicate
+ * tree. Return the page number to our caller, we need a new
+ * cursor.
+ */
+ if (pgnop != NULL && __bam_isopd(dbc, pgnop))
+ goto done;
+
+ /* If the duplicates aren't sorted, move to the right slot. */
+ if (dbp->dup_compare == NULL) {
+ if (flags == DB_KEYFIRST)
+ iiop = DB_BEFORE;
+ else
+ for (;; cp->indx += P_INDX)
+ if (cp->indx + P_INDX >=
+ NUM_ENT(cp->page) ||
+ !IS_DUPLICATE(dbc, cp->indx,
+ cp->indx + P_INDX)) {
+ iiop = DB_AFTER;
+ break;
+ }
+ break;
+ }
+
+ /*
+ * We know that we're looking at the first of a set of sorted
+ * on-page duplicates. Walk the list to find the right slot.
+ */
+ for (;; cp->indx += P_INDX) {
+ if ((ret = __bam_cmp(dbp, data, cp->page,
+ cp->indx + O_INDX, dbp->dup_compare, &cmp)) != 0)
+ goto err;
+ if (cmp < 0) {
+ iiop = DB_BEFORE;
+ break;
+ }
+
+ /* Disallow "sorted" duplicate duplicates. */
+ if (cmp == 0) {
+ if (IS_DELETED(dbp, cp->page, cp->indx)) {
+ iiop = DB_CURRENT;
+ break;
+ }
+ ret = __db_duperr(dbp, flags);
+ goto err;
+ }
+
+ if (cp->indx + P_INDX >= NUM_ENT(cp->page) ||
+ P_INP(dbp, ((PAGE *)cp->page))[cp->indx] !=
+ P_INP(dbp, ((PAGE *)cp->page))[cp->indx + P_INDX]) {
+ iiop = DB_AFTER;
+ break;
+ }
+ }
+ break;
+ default:
+ ret = __db_unknown_flag(dbp->dbenv, "__bam_c_put", flags);
+ goto err;
+ }
+
+ switch (ret = __bam_iitem(dbc, key, data, iiop, 0)) {
+ case 0:
+ break;
+ case DB_NEEDSPLIT:
+ /*
+ * To split, we need a key for the page. Either use the key
+ * argument or get a copy of the key from the page.
+ */
+ if (flags == DB_AFTER ||
+ flags == DB_BEFORE || flags == DB_CURRENT) {
+ memset(&dbt, 0, sizeof(DBT));
+ if ((ret = __db_ret(dbp, cp->page, 0, &dbt,
+ &dbc->rkey->data, &dbc->rkey->ulen)) != 0)
+ goto err;
+ arg = &dbt;
+ } else
+ arg = F_ISSET(dbc, DBC_OPD) ? data : key;
+
+ /*
+ * Discard any locks and pinned pages (the locks are discarded
+ * even if we're running with transactions, as they lock pages
+ * that we're sorry we ever acquired). If stack is set and the
+ * cursor entries are valid, they point to the same entries as
+ * the stack, don't free them twice.
+ */
+ if (stack)
+ ret = __bam_stkrel(dbc, STK_CLRDBC | STK_NOLOCK);
+ else
+ DISCARD_CUR(dbc, ret);
+ if (ret != 0)
+ goto err;
+
+ /* Split the tree. */
+ if ((ret = __bam_split(dbc, arg, &root_pgno)) != 0)
+ return (ret);
+
+ goto split;
+ default:
+ goto err;
+ }
+
+err:
+done: /*
+ * Discard any pages pinned in the tree and their locks, except for
+ * the leaf page. Note, the leaf page participated in any stack we
+ * acquired, and so we have to adjust the stack as necessary. If
+ * there was only a single page on the stack, we don't have to free
+ * further stack pages.
+ */
+ if (stack && BT_STK_POP(cp) != NULL)
+ (void)__bam_stkrel(dbc, 0);
+
+ /*
+ * Regardless of whether we were successful or not, clear the delete
+ * flag. If we're successful, we either moved the cursor or the item
+ * is no longer deleted. If we're not successful, then we're just a
+ * copy, no need to have the flag set.
+ */
+ F_CLR(cp, C_DELETED);
+
+ return (ret);
+}
+
+/*
+ * __bam_c_rget --
+ * Return the record number for a cursor.
+ *
+ * PUBLIC: int __bam_c_rget __P((DBC *, DBT *));
+ */
+int
+__bam_c_rget(dbc, data)
+ DBC *dbc;
+ DBT *data;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT dbt;
+ DB_MPOOLFILE *mpf;
+ db_recno_t recno;
+ int exact, ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * Get the page with the current item on it.
+ * Get a copy of the key.
+ * Release the page, making sure we don't release it twice.
+ */
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &cp->page)) != 0)
+ return (ret);
+ memset(&dbt, 0, sizeof(DBT));
+ if ((ret = __db_ret(dbp, cp->page,
+ cp->indx, &dbt, &dbc->rkey->data, &dbc->rkey->ulen)) != 0)
+ goto err;
+ ret = mpf->put(mpf, cp->page, 0);
+ cp->page = NULL;
+ if (ret != 0)
+ return (ret);
+
+ if ((ret = __bam_search(dbc, PGNO_INVALID, &dbt,
+ F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND,
+ 1, &recno, &exact)) != 0)
+ goto err;
+
+ ret = __db_retcopy(dbp->dbenv, data,
+ &recno, sizeof(recno), &dbc->rdata->data, &dbc->rdata->ulen);
+
+ /* Release the stack. */
+err: __bam_stkrel(dbc, 0);
+
+ return (ret);
+}
+
+/*
+ * __bam_c_writelock --
+ * Upgrade the cursor to a write lock.
+ */
+static int
+__bam_c_writelock(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ int ret;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ if (cp->lock_mode == DB_LOCK_WRITE)
+ return (0);
+
+ /*
+ * When writing to an off-page duplicate tree, we need to have the
+ * appropriate page in the primary tree locked. The general DBC
+ * code calls us first with the primary cursor so we can acquire the
+ * appropriate lock.
+ */
+ ACQUIRE_WRITE_LOCK(dbc, ret);
+ return (ret);
+}
+
+/*
+ * __bam_c_first --
+ * Return the first record.
+ */
+static int
+__bam_c_first(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ db_pgno_t pgno;
+ int ret;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+ ret = 0;
+
+ /* Walk down the left-hand side of the tree. */
+ for (pgno = cp->root;;) {
+ ACQUIRE_CUR_COUPLE(dbc, DB_LOCK_READ, pgno, ret);
+ if (ret != 0)
+ return (ret);
+
+ /* If we find a leaf page, we're done. */
+ if (ISLEAF(cp->page))
+ break;
+
+ pgno = GET_BINTERNAL(dbc->dbp, cp->page, 0)->pgno;
+ }
+
+ /* If we want a write lock instead of a read lock, get it now. */
+ if (F_ISSET(dbc, DBC_RMW)) {
+ ACQUIRE_WRITE_LOCK(dbc, ret);
+ if (ret != 0)
+ return (ret);
+ }
+
+ cp->indx = 0;
+
+ /* If on an empty page or a deleted record, move to the next one. */
+ if (NUM_ENT(cp->page) == 0 || IS_CUR_DELETED(dbc))
+ if ((ret = __bam_c_next(dbc, 0, 0)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __bam_c_last --
+ * Return the last record.
+ */
+static int
+__bam_c_last(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ db_pgno_t pgno;
+ int ret;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+ ret = 0;
+
+ /* Walk down the right-hand side of the tree. */
+ for (pgno = cp->root;;) {
+ ACQUIRE_CUR_COUPLE(dbc, DB_LOCK_READ, pgno, ret);
+ if (ret != 0)
+ return (ret);
+
+ /* If we find a leaf page, we're done. */
+ if (ISLEAF(cp->page))
+ break;
+
+ pgno = GET_BINTERNAL(dbc->dbp, cp->page,
+ NUM_ENT(cp->page) - O_INDX)->pgno;
+ }
+
+ /* If we want a write lock instead of a read lock, get it now. */
+ if (F_ISSET(dbc, DBC_RMW)) {
+ ACQUIRE_WRITE_LOCK(dbc, ret);
+ if (ret != 0)
+ return (ret);
+ }
+
+ cp->indx = NUM_ENT(cp->page) == 0 ? 0 :
+ NUM_ENT(cp->page) -
+ (TYPE(cp->page) == P_LBTREE ? P_INDX : O_INDX);
+
+ /* If on an empty page or a deleted record, move to the previous one. */
+ if (NUM_ENT(cp->page) == 0 || IS_CUR_DELETED(dbc))
+ if ((ret = __bam_c_prev(dbc)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __bam_c_next --
+ * Move to the next record.
+ */
+static int
+__bam_c_next(dbc, initial_move, deleted_okay)
+ DBC *dbc;
+ int initial_move, deleted_okay;
+{
+ BTREE_CURSOR *cp;
+ db_indx_t adjust;
+ db_lockmode_t lock_mode;
+ db_pgno_t pgno;
+ int ret;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+ ret = 0;
+
+ /*
+ * We're either moving through a page of duplicates or a btree leaf
+ * page.
+ *
+ * !!!
+ * This code handles empty pages and pages with only deleted entries.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ adjust = O_INDX;
+ lock_mode = DB_LOCK_NG;
+ } else {
+ adjust = dbc->dbtype == DB_BTREE ? P_INDX : O_INDX;
+ lock_mode =
+ F_ISSET(dbc, DBC_RMW) ? DB_LOCK_WRITE : DB_LOCK_READ;
+ }
+ if (cp->page == NULL) {
+ ACQUIRE_CUR(dbc, lock_mode, cp->pgno, ret);
+ if (ret != 0)
+ return (ret);
+ }
+
+ if (initial_move)
+ cp->indx += adjust;
+
+ for (;;) {
+ /*
+ * If at the end of the page, move to a subsequent page.
+ *
+ * !!!
+ * Check for >= NUM_ENT. If the original search landed us on
+ * NUM_ENT, we may have incremented indx before the test.
+ */
+ if (cp->indx >= NUM_ENT(cp->page)) {
+ if ((pgno
+ = NEXT_PGNO(cp->page)) == PGNO_INVALID)
+ return (DB_NOTFOUND);
+
+ ACQUIRE_CUR(dbc, lock_mode, pgno, ret);
+ if (ret != 0)
+ return (ret);
+ cp->indx = 0;
+ continue;
+ }
+ if (!deleted_okay && IS_CUR_DELETED(dbc)) {
+ cp->indx += adjust;
+ continue;
+ }
+ break;
+ }
+ return (0);
+}
+
+/*
+ * __bam_c_prev --
+ * Move to the previous record.
+ */
+static int
+__bam_c_prev(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ db_indx_t adjust;
+ db_lockmode_t lock_mode;
+ db_pgno_t pgno;
+ int ret;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+ ret = 0;
+
+ /*
+ * We're either moving through a page of duplicates or a btree leaf
+ * page.
+ *
+ * !!!
+ * This code handles empty pages and pages with only deleted entries.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ adjust = O_INDX;
+ lock_mode = DB_LOCK_NG;
+ } else {
+ adjust = dbc->dbtype == DB_BTREE ? P_INDX : O_INDX;
+ lock_mode =
+ F_ISSET(dbc, DBC_RMW) ? DB_LOCK_WRITE : DB_LOCK_READ;
+ }
+ if (cp->page == NULL) {
+ ACQUIRE_CUR(dbc, lock_mode, cp->pgno, ret);
+ if (ret != 0)
+ return (ret);
+ }
+
+ for (;;) {
+ /* If at the beginning of the page, move to a previous one. */
+ if (cp->indx == 0) {
+ if ((pgno =
+ PREV_PGNO(cp->page)) == PGNO_INVALID)
+ return (DB_NOTFOUND);
+
+ ACQUIRE_CUR(dbc, lock_mode, pgno, ret);
+ if (ret != 0)
+ return (ret);
+
+ if ((cp->indx = NUM_ENT(cp->page)) == 0)
+ continue;
+ }
+
+ /* Ignore deleted records. */
+ cp->indx -= adjust;
+ if (IS_CUR_DELETED(dbc))
+ continue;
+
+ break;
+ }
+ return (0);
+}
+
+/*
+ * __bam_c_search --
+ * Move to a specified record.
+ */
+static int
+__bam_c_search(dbc, root_pgno, key, flags, exactp)
+ DBC *dbc;
+ db_pgno_t root_pgno;
+ const DBT *key;
+ u_int32_t flags;
+ int *exactp;
+{
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ PAGE *h;
+ db_indx_t indx, *inp;
+ db_pgno_t bt_lpgno;
+ db_recno_t recno;
+ u_int32_t sflags;
+ int cmp, ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ t = dbp->bt_internal;
+ ret = 0;
+
+ /*
+ * Find an entry in the database. Discard any lock we currently hold,
+ * we're going to search the tree.
+ */
+ DISCARD_CUR(dbc, ret);
+ if (ret != 0)
+ return (ret);
+
+ switch (flags) {
+ case DB_SET_RECNO:
+ if ((ret = __ram_getno(dbc, key, &recno, 0)) != 0)
+ return (ret);
+ sflags = (F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND) | S_EXACT;
+ if ((ret = __bam_rsearch(dbc, &recno, sflags, 1, exactp)) != 0)
+ return (ret);
+ break;
+ case DB_SET:
+ case DB_GET_BOTH:
+ sflags = (F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND) | S_EXACT;
+ goto search;
+ case DB_GET_BOTH_RANGE:
+ sflags = (F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND);
+ goto search;
+ case DB_SET_RANGE:
+ sflags =
+ (F_ISSET(dbc, DBC_RMW) ? S_WRITE : S_READ) | S_DUPFIRST;
+ goto search;
+ case DB_KEYFIRST:
+ sflags = S_KEYFIRST;
+ goto fast_search;
+ case DB_KEYLAST:
+ case DB_NODUPDATA:
+ sflags = S_KEYLAST;
+fast_search: /*
+ * If the application has a history of inserting into the first
+ * or last pages of the database, we check those pages first to
+ * avoid doing a full search.
+ *
+ * If the tree has record numbers, we need a complete stack so
+ * that we can adjust the record counts, so fast_search isn't
+ * possible.
+ */
+ if (F_ISSET(cp, C_RECNUM))
+ goto search;
+
+ /*
+ * !!!
+ * We do not mutex protect the t->bt_lpgno field, which means
+ * that it can only be used in an advisory manner. If we find
+ * page we can use, great. If we don't, we don't care, we do
+ * it the slow way instead. Regardless, copy it into a local
+ * variable, otherwise we might acquire a lock for a page and
+ * then read a different page because it changed underfoot.
+ */
+ bt_lpgno = t->bt_lpgno;
+
+ /*
+ * If the tree has no history of insertion, do it the slow way.
+ */
+ if (bt_lpgno == PGNO_INVALID)
+ goto search;
+
+ /* Lock and retrieve the page on which we last inserted. */
+ h = NULL;
+ ACQUIRE(dbc,
+ DB_LOCK_WRITE, bt_lpgno, cp->lock, bt_lpgno, h, ret);
+ if (ret != 0)
+ goto fast_miss;
+
+ inp = P_INP(dbp, h);
+ /*
+ * It's okay if the page type isn't right or it's empty, it
+ * just means that the world changed.
+ */
+ if (TYPE(h) != P_LBTREE || NUM_ENT(h) == 0)
+ goto fast_miss;
+
+ /*
+ * What we do here is test to see if we're at the beginning or
+ * end of the tree and if the new item sorts before/after the
+ * first/last page entry. We don't try and catch inserts into
+ * the middle of the tree (although we could, as long as there
+ * were two keys on the page and we saved both the index and
+ * the page number of the last insert).
+ */
+ if (h->next_pgno == PGNO_INVALID) {
+ indx = NUM_ENT(h) - P_INDX;
+ if ((ret = __bam_cmp(dbp,
+ key, h, indx, t->bt_compare, &cmp)) != 0)
+ return (ret);
+
+ if (cmp < 0)
+ goto try_begin;
+ if (cmp > 0) {
+ indx += P_INDX;
+ goto fast_hit;
+ }
+
+ /*
+ * Found a duplicate. If doing DB_KEYLAST, we're at
+ * the correct position, otherwise, move to the first
+ * of the duplicates. If we're looking at off-page
+ * duplicates, duplicate duplicates aren't permitted,
+ * so we're done.
+ */
+ if (flags == DB_KEYLAST)
+ goto fast_hit;
+ for (;
+ indx > 0 && inp[indx - P_INDX] == inp[indx];
+ indx -= P_INDX)
+ ;
+ goto fast_hit;
+ }
+try_begin: if (h->prev_pgno == PGNO_INVALID) {
+ indx = 0;
+ if ((ret = __bam_cmp(dbp,
+ key, h, indx, t->bt_compare, &cmp)) != 0)
+ return (ret);
+
+ if (cmp > 0)
+ goto fast_miss;
+ if (cmp < 0)
+ goto fast_hit;
+
+ /*
+ * Found a duplicate. If doing DB_KEYFIRST, we're at
+ * the correct position, otherwise, move to the last
+ * of the duplicates. If we're looking at off-page
+ * duplicates, duplicate duplicates aren't permitted,
+ * so we're done.
+ */
+ if (flags == DB_KEYFIRST)
+ goto fast_hit;
+ for (;
+ indx < (db_indx_t)(NUM_ENT(h) - P_INDX) &&
+ inp[indx] == inp[indx + P_INDX];
+ indx += P_INDX)
+ ;
+ goto fast_hit;
+ }
+ goto fast_miss;
+
+fast_hit: /* Set the exact match flag, we may have found a duplicate. */
+ *exactp = cmp == 0;
+
+ /*
+ * Insert the entry in the stack. (Our caller is likely to
+ * call __bam_stkrel() after our return.)
+ */
+ BT_STK_CLR(cp);
+ BT_STK_ENTER(dbp->dbenv,
+ cp, h, indx, cp->lock, cp->lock_mode, ret);
+ if (ret != 0)
+ return (ret);
+ break;
+
+fast_miss: /*
+ * This was not the right page, so we do not need to retain
+ * the lock even in the presence of transactions.
+ */
+ DISCARD(dbc, 1, cp->lock, h, ret);
+ if (ret != 0)
+ return (ret);
+
+search: if ((ret = __bam_search(dbc, root_pgno,
+ key, sflags, 1, NULL, exactp)) != 0)
+ return (ret);
+ break;
+ default:
+ return (__db_unknown_flag(dbp->dbenv, "__bam_c_search", flags));
+ }
+
+ /* Initialize the cursor from the stack. */
+ cp->page = cp->csp->page;
+ cp->pgno = cp->csp->page->pgno;
+ cp->indx = cp->csp->indx;
+ cp->lock = cp->csp->lock;
+ cp->lock_mode = cp->csp->lock_mode;
+
+ /*
+ * If we inserted a key into the first or last slot of the tree,
+ * remember where it was so we can do it more quickly next time.
+ * If there are duplicates and we are inserting into the last slot,
+ * the cursor will point _to_ the last item, not after it, which
+ * is why we subtract P_INDX below.
+ */
+ if (TYPE(cp->page) == P_LBTREE &&
+ (flags == DB_KEYFIRST || flags == DB_KEYLAST))
+ t->bt_lpgno =
+ (NEXT_PGNO(cp->page) == PGNO_INVALID &&
+ cp->indx >= NUM_ENT(cp->page) - P_INDX) ||
+ (PREV_PGNO(cp->page) == PGNO_INVALID &&
+ cp->indx == 0) ? cp->pgno : PGNO_INVALID;
+ return (0);
+}
+
+/*
+ * __bam_c_physdel --
+ * Physically remove an item from the page.
+ */
+static int
+__bam_c_physdel(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT key;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_pgno_t pgno;
+ int delete_page, empty_page, exact, level, ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ delete_page = empty_page = ret = 0;
+
+ /* If the page is going to be emptied, consider deleting it. */
+ delete_page = empty_page =
+ NUM_ENT(cp->page) == (TYPE(cp->page) == P_LBTREE ? 2 : 1);
+
+ /*
+ * Check if the application turned off reverse splits. Applications
+ * can't turn off reverse splits in off-page duplicate trees, that
+ * space will never be reused unless the exact same key is specified.
+ */
+ if (delete_page &&
+ !F_ISSET(dbc, DBC_OPD) && F_ISSET(dbp, DB_AM_REVSPLITOFF))
+ delete_page = 0;
+
+ /*
+ * We never delete the last leaf page. (Not really true -- we delete
+ * the last leaf page of off-page duplicate trees, but that's handled
+ * by our caller, not down here.)
+ */
+ if (delete_page && cp->pgno == cp->root)
+ delete_page = 0;
+
+ /*
+ * To delete a leaf page other than an empty root page, we need a
+ * copy of a key from the page. Use the 0th page index since it's
+ * the last key the page held.
+ *
+ * !!!
+ * Note that because __bam_c_physdel is always called from a cursor
+ * close, it should be safe to use the cursor's own "my_rkey" memory
+ * to temporarily hold this key. We shouldn't own any returned-data
+ * memory of interest--if we do, we're in trouble anyway.
+ */
+ if (delete_page) {
+ memset(&key, 0, sizeof(DBT));
+ if ((ret = __db_ret(dbp, cp->page,
+ 0, &key, &dbc->my_rkey.data, &dbc->my_rkey.ulen)) != 0)
+ return (ret);
+ }
+
+ /*
+ * Delete the items. If page isn't empty, we adjust the cursors.
+ *
+ * !!!
+ * The following operations to delete a page may deadlock. The easy
+ * scenario is if we're deleting an item because we're closing cursors
+ * because we've already deadlocked and want to call txn->abort. If
+ * we fail due to deadlock, we'll leave a locked, possibly empty page
+ * in the tree, which won't be empty long because we'll undo the delete
+ * when we undo the transaction's modifications.
+ *
+ * !!!
+ * Delete the key item first, otherwise the on-page duplicate checks
+ * in __bam_ditem() won't work!
+ */
+ if (TYPE(cp->page) == P_LBTREE) {
+ if ((ret = __bam_ditem(dbc, cp->page, cp->indx)) != 0)
+ return (ret);
+ if (!empty_page)
+ if ((ret = __bam_ca_di(dbc,
+ PGNO(cp->page), cp->indx, -1)) != 0)
+ return (ret);
+ }
+ if ((ret = __bam_ditem(dbc, cp->page, cp->indx)) != 0)
+ return (ret);
+ if (!empty_page)
+ if ((ret = __bam_ca_di(dbc, PGNO(cp->page), cp->indx, -1)) != 0)
+ return (ret);
+
+ /* If we're not going to try and delete the page, we're done. */
+ if (!delete_page)
+ return (0);
+
+ /*
+ * Call __bam_search to reacquire the empty leaf page, but this time
+ * get both the leaf page and it's parent, locked. Jump back up the
+ * tree, until we have the top pair of pages that we want to delete.
+ * Once we have the top page that we want to delete locked, lock the
+ * underlying pages and check to make sure they're still empty. If
+ * they are, delete them.
+ */
+ for (level = LEAFLEVEL;; ++level) {
+ /* Acquire a page and its parent, locked. */
+ if ((ret = __bam_search(dbc, PGNO_INVALID,
+ &key, S_WRPAIR, level, NULL, &exact)) != 0)
+ return (ret);
+
+ /*
+ * If we reach the root or the parent page isn't going to be
+ * empty when we delete one record, stop.
+ */
+ h = cp->csp[-1].page;
+ if (h->pgno == cp->root || NUM_ENT(h) != 1)
+ break;
+
+ /* Discard the stack, retaining no locks. */
+ (void)__bam_stkrel(dbc, STK_NOLOCK);
+ }
+
+ /*
+ * Move the stack pointer one after the last entry, we may be about
+ * to push more items onto the page stack.
+ */
+ ++cp->csp;
+
+ /*
+ * cp->csp[-2].page is now the parent page, which we may or may not be
+ * going to delete, and cp->csp[-1].page is the first page we know we
+ * are going to delete. Walk down the chain of pages, acquiring pages
+ * until we've acquired a leaf page. Generally, this shouldn't happen;
+ * we should only see a single internal page with one item and a single
+ * leaf page with no items. The scenario where we could see something
+ * else is if reverse splits were turned off for awhile and then turned
+ * back on. That could result in all sorts of strangeness, e.g., empty
+ * pages in the tree, trees that looked like linked lists, and so on.
+ *
+ * !!!
+ * Sheer paranoia: if we find any pages that aren't going to be emptied
+ * by the delete, someone else added an item while we were walking the
+ * tree, and we discontinue the delete. Shouldn't be possible, but we
+ * check regardless.
+ */
+ for (h = cp->csp[-1].page;;) {
+ if (ISLEAF(h)) {
+ if (NUM_ENT(h) != 0)
+ break;
+ break;
+ } else
+ if (NUM_ENT(h) != 1)
+ break;
+
+ /*
+ * Get the next page, write lock it and push it onto the stack.
+ * We know it's index 0, because it can only have one element.
+ */
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ pgno = GET_BINTERNAL(dbp, h, 0)->pgno;
+ break;
+ case P_IRECNO:
+ pgno = GET_RINTERNAL(dbp, h, 0)->pgno;
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, PGNO(h)));
+ }
+
+ if ((ret =
+ __db_lget(dbc, 0, pgno, DB_LOCK_WRITE, 0, &lock)) != 0)
+ break;
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ break;
+ BT_STK_PUSH(dbp->dbenv, cp, h, 0, lock, DB_LOCK_WRITE, ret);
+ if (ret != 0)
+ break;
+ }
+
+ /* Adjust the cursor stack to reference the last page on the stack. */
+ BT_STK_POP(cp);
+
+ /*
+ * If everything worked, delete the stack, otherwise, release the
+ * stack and page locks without further damage.
+ */
+ if (ret == 0)
+ ret = __bam_dpages(dbc, cp->sp);
+ else
+ (void)__bam_stkrel(dbc, 0);
+
+ return (ret);
+}
+
+/*
+ * __bam_c_getstack --
+ * Acquire a full stack for a cursor.
+ */
+static int
+__bam_c_getstack(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT dbt;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ int exact, ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * Get the page with the current item on it. The caller of this
+ * routine has to already hold a read lock on the page, so there
+ * is no additional lock to acquire.
+ */
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &h)) != 0)
+ return (ret);
+
+ /* Get a copy of a key from the page. */
+ memset(&dbt, 0, sizeof(DBT));
+ if ((ret = __db_ret(dbp,
+ h, 0, &dbt, &dbc->rkey->data, &dbc->rkey->ulen)) != 0)
+ goto err;
+
+ /* Get a write-locked stack for the page. */
+ exact = 0;
+ ret = __bam_search(dbc, PGNO_INVALID,
+ &dbt, S_KEYFIRST, 1, NULL, &exact);
+
+err: /* Discard the key and the page. */
+ if ((t_ret = mpf->put(mpf, h, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __bam_isopd --
+ * Return if the cursor references an off-page duplicate tree via its
+ * page number.
+ */
+static int
+__bam_isopd(dbc, pgnop)
+ DBC *dbc;
+ db_pgno_t *pgnop;
+{
+ BOVERFLOW *bo;
+
+ if (TYPE(dbc->internal->page) != P_LBTREE)
+ return (0);
+
+ bo = GET_BOVERFLOW(dbc->dbp,
+ dbc->internal->page, dbc->internal->indx + O_INDX);
+ if (B_TYPE(bo->type) == B_DUPLICATE) {
+ *pgnop = bo->pgno;
+ return (1);
+ }
+ return (0);
+}
diff --git a/libdb/btree/bt_delete.c b/libdb/btree/bt_delete.c
new file mode 100644
index 0000000..ec989ed
--- /dev/null
+++ b/libdb/btree/bt_delete.c
@@ -0,0 +1,460 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/lock.h"
+
+/*
+ * __bam_ditem --
+ * Delete one or more entries from a page.
+ *
+ * PUBLIC: int __bam_ditem __P((DBC *, PAGE *, u_int32_t));
+ */
+int
+__bam_ditem(dbc, h, indx)
+ DBC *dbc;
+ PAGE *h;
+ u_int32_t indx;
+{
+ BINTERNAL *bi;
+ BKEYDATA *bk;
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ u_int32_t nbytes;
+ int ret;
+ db_indx_t *inp;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ inp = P_INP(dbp, h);
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ bi = GET_BINTERNAL(dbp, h, indx);
+ switch (B_TYPE(bi->type)) {
+ case B_DUPLICATE:
+ case B_KEYDATA:
+ nbytes = BINTERNAL_SIZE(bi->len);
+ break;
+ case B_OVERFLOW:
+ nbytes = BINTERNAL_SIZE(bi->len);
+ if ((ret =
+ __db_doff(dbc, ((BOVERFLOW *)bi->data)->pgno)) != 0)
+ return (ret);
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, PGNO(h)));
+ }
+ break;
+ case P_IRECNO:
+ nbytes = RINTERNAL_SIZE;
+ break;
+ case P_LBTREE:
+ /*
+ * If it's a duplicate key, discard the index and don't touch
+ * the actual page item.
+ *
+ * !!!
+ * This works because no data item can have an index matching
+ * any other index so even if the data item is in a key "slot",
+ * it won't match any other index.
+ */
+ if ((indx % 2) == 0) {
+ /*
+ * Check for a duplicate after us on the page. NOTE:
+ * we have to delete the key item before deleting the
+ * data item, otherwise the "indx + P_INDX" calculation
+ * won't work!
+ */
+ if (indx + P_INDX < (u_int32_t)NUM_ENT(h) &&
+ inp[indx] == inp[indx + P_INDX])
+ return (__bam_adjindx(dbc,
+ h, indx, indx + O_INDX, 0));
+ /*
+ * Check for a duplicate before us on the page. It
+ * doesn't matter if we delete the key item before or
+ * after the data item for the purposes of this one.
+ */
+ if (indx > 0 && inp[indx] == inp[indx - P_INDX])
+ return (__bam_adjindx(dbc,
+ h, indx, indx - P_INDX, 0));
+ }
+ /* FALLTHROUGH */
+ case P_LDUP:
+ case P_LRECNO:
+ bk = GET_BKEYDATA(dbp, h, indx);
+ switch (B_TYPE(bk->type)) {
+ case B_DUPLICATE:
+ nbytes = BOVERFLOW_SIZE;
+ break;
+ case B_OVERFLOW:
+ nbytes = BOVERFLOW_SIZE;
+ if ((ret = __db_doff(
+ dbc, (GET_BOVERFLOW(dbp, h, indx))->pgno)) != 0)
+ return (ret);
+ break;
+ case B_KEYDATA:
+ nbytes = BKEYDATA_SIZE(bk->len);
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, PGNO(h)));
+ }
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, PGNO(h)));
+ }
+
+ /* Delete the item and mark the page dirty. */
+ if ((ret = __db_ditem(dbc, h, indx, nbytes)) != 0)
+ return (ret);
+ if ((ret = mpf->set(mpf, h, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __bam_adjindx --
+ * Adjust an index on the page.
+ *
+ * PUBLIC: int __bam_adjindx __P((DBC *, PAGE *, u_int32_t, u_int32_t, int));
+ */
+int
+__bam_adjindx(dbc, h, indx, indx_copy, is_insert)
+ DBC *dbc;
+ PAGE *h;
+ u_int32_t indx, indx_copy;
+ int is_insert;
+{
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ db_indx_t copy, *inp;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ inp = P_INP(dbp, h);
+
+ /* Log the change. */
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __bam_adj_log(dbp, dbc->txn, &LSN(h), 0,
+ PGNO(h), &LSN(h), indx, indx_copy, (u_int32_t)is_insert)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(h));
+
+ /* Shuffle the indices and mark the page dirty. */
+ if (is_insert) {
+ copy = inp[indx_copy];
+ if (indx != NUM_ENT(h))
+ memmove(&inp[indx + O_INDX], &inp[indx],
+ sizeof(db_indx_t) * (NUM_ENT(h) - indx));
+ inp[indx] = copy;
+ ++NUM_ENT(h);
+ } else {
+ --NUM_ENT(h);
+ if (indx != NUM_ENT(h))
+ memmove(&inp[indx], &inp[indx + O_INDX],
+ sizeof(db_indx_t) * (NUM_ENT(h) - indx));
+ }
+ if ((ret = mpf->set(mpf, h, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __bam_dpages --
+ * Delete a set of locked pages.
+ *
+ * PUBLIC: int __bam_dpages __P((DBC *, EPG *));
+ */
+int
+__bam_dpages(dbc, stack_epg)
+ DBC *dbc;
+ EPG *stack_epg;
+{
+ BTREE_CURSOR *cp;
+ BINTERNAL *bi;
+ DB *dbp;
+ DBT a, b;
+ DB_LOCK c_lock, p_lock;
+ DB_MPOOLFILE *mpf;
+ EPG *epg;
+ PAGE *child, *parent;
+ db_indx_t nitems;
+ db_pgno_t pgno, root_pgno;
+ db_recno_t rcnt;
+ int done, ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * We have the entire stack of deletable pages locked.
+ *
+ * Btree calls us with a pointer to the beginning of a stack, where
+ * the first page in the stack is to have a single item deleted, and
+ * the rest of the pages are to be removed.
+ *
+ * Recno calls us with a pointer into the middle of the stack, where
+ * the referenced page is to have a single item deleted, and pages
+ * after the stack reference are to be removed.
+ *
+ * First, discard any pages that we don't care about.
+ */
+ ret = 0;
+ for (epg = cp->sp; epg < stack_epg; ++epg) {
+ if ((t_ret = mpf->put(mpf, epg->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ (void)__TLPUT(dbc, epg->lock);
+ }
+ if (ret != 0)
+ goto err;
+
+ /*
+ * !!!
+ * There is an interesting deadlock situation here. We have to relink
+ * the leaf page chain around the leaf page being deleted. Consider
+ * a cursor walking through the leaf pages, that has the previous page
+ * read-locked and is waiting on a lock for the page we're deleting.
+ * It will deadlock here. Before we unlink the subtree, we relink the
+ * leaf page chain.
+ */
+ if ((ret = __db_relink(dbc, DB_REM_PAGE, cp->csp->page, NULL, 1)) != 0)
+ goto err;
+
+ /*
+ * Delete the last item that references the underlying pages that are
+ * to be deleted, and adjust cursors that reference that page. Then,
+ * save that page's page number and item count and release it. If
+ * the application isn't retaining locks because it's running without
+ * transactions, this lets the rest of the tree get back to business
+ * immediately.
+ */
+ if ((ret = __bam_ditem(dbc, epg->page, epg->indx)) != 0)
+ goto err;
+ if ((ret = __bam_ca_di(dbc, PGNO(epg->page), epg->indx, -1)) != 0)
+ goto err;
+
+ pgno = PGNO(epg->page);
+ nitems = NUM_ENT(epg->page);
+
+ if ((ret = mpf->put(mpf, epg->page, 0)) != 0)
+ goto err_inc;
+ (void)__TLPUT(dbc, epg->lock);
+
+ /* Free the rest of the pages in the stack. */
+ while (++epg <= cp->csp) {
+ /*
+ * Delete page entries so they will be restored as part of
+ * recovery. We don't need to do cursor adjustment here as
+ * the pages are being emptied by definition and so cannot
+ * be referenced by a cursor.
+ */
+ if (NUM_ENT(epg->page) != 0) {
+ DB_ASSERT(NUM_ENT(epg->page) == 1);
+
+ if ((ret = __bam_ditem(dbc, epg->page, epg->indx)) != 0)
+ goto err;
+ }
+
+ if ((ret = __db_free(dbc, epg->page)) != 0) {
+ epg->page = NULL;
+ goto err_inc;
+ }
+ (void)__TLPUT(dbc, epg->lock);
+ }
+
+ if (0) {
+err_inc: ++epg;
+err: for (; epg <= cp->csp; ++epg) {
+ if (epg->page != NULL)
+ (void)mpf->put(mpf, epg->page, 0);
+ (void)__TLPUT(dbc, epg->lock);
+ }
+ BT_STK_CLR(cp);
+ return (ret);
+ }
+ BT_STK_CLR(cp);
+
+ /*
+ * If we just deleted the next-to-last item from the root page, the
+ * tree can collapse one or more levels. While there remains only a
+ * single item on the root page, write lock the last page referenced
+ * by the root page and copy it over the root page.
+ */
+ root_pgno = cp->root;
+ if (pgno != root_pgno || nitems != 1)
+ return (0);
+
+ for (done = 0; !done;) {
+ /* Initialize. */
+ parent = child = NULL;
+ LOCK_INIT(p_lock);
+ LOCK_INIT(c_lock);
+
+ /* Lock the root. */
+ pgno = root_pgno;
+ if ((ret =
+ __db_lget(dbc, 0, pgno, DB_LOCK_WRITE, 0, &p_lock)) != 0)
+ goto stop;
+ if ((ret = mpf->get(mpf, &pgno, 0, &parent)) != 0)
+ goto stop;
+
+ if (NUM_ENT(parent) != 1)
+ goto stop;
+
+ switch (TYPE(parent)) {
+ case P_IBTREE:
+ /*
+ * If this is overflow, then try to delete it.
+ * The child may or may not still point at it.
+ */
+ bi = GET_BINTERNAL(dbp, parent, 0);
+ if (B_TYPE(bi->type) == B_OVERFLOW)
+ if ((ret = __db_doff(dbc,
+ ((BOVERFLOW *)bi->data)->pgno)) != 0)
+ goto stop;
+ pgno = bi->pgno;
+ break;
+ case P_IRECNO:
+ pgno = GET_RINTERNAL(dbp, parent, 0)->pgno;
+ break;
+ default:
+ goto stop;
+ }
+
+ /* Lock the child page. */
+ if ((ret =
+ __db_lget(dbc, 0, pgno, DB_LOCK_WRITE, 0, &c_lock)) != 0)
+ goto stop;
+ if ((ret = mpf->get(mpf, &pgno, 0, &child)) != 0)
+ goto stop;
+
+ /* Log the change. */
+ if (DBC_LOGGING(dbc)) {
+ memset(&a, 0, sizeof(a));
+ a.data = child;
+ a.size = dbp->pgsize;
+ memset(&b, 0, sizeof(b));
+ b.data = P_ENTRY(dbp, parent, 0);
+ b.size = TYPE(parent) == P_IRECNO ? RINTERNAL_SIZE :
+ BINTERNAL_SIZE(((BINTERNAL *)b.data)->len);
+ if ((ret = __bam_rsplit_log(dbp, dbc->txn,
+ &child->lsn, 0, PGNO(child), &a, PGNO(parent),
+ RE_NREC(parent), &b, &parent->lsn)) != 0)
+ goto stop;
+ } else
+ LSN_NOT_LOGGED(child->lsn);
+
+ /*
+ * Make the switch.
+ *
+ * One fixup -- internal pages below the top level do not store
+ * a record count, so we have to preserve it if we're not
+ * converting to a leaf page. Note also that we are about to
+ * overwrite the parent page, including its LSN. This is OK
+ * because the log message we wrote describing this update
+ * stores its LSN on the child page. When the child is copied
+ * onto the parent, the correct LSN is copied into place.
+ */
+ COMPQUIET(rcnt, 0);
+ if (F_ISSET(cp, C_RECNUM) && LEVEL(child) > LEAFLEVEL)
+ rcnt = RE_NREC(parent);
+ memcpy(parent, child, dbp->pgsize);
+ PGNO(parent) = root_pgno;
+ if (F_ISSET(cp, C_RECNUM) && LEVEL(child) > LEAFLEVEL)
+ RE_NREC_SET(parent, rcnt);
+
+ /* Mark the pages dirty. */
+ if ((ret = mpf->set(mpf, parent, DB_MPOOL_DIRTY)) != 0)
+ goto stop;
+ if ((ret = mpf->set(mpf, child, DB_MPOOL_DIRTY)) != 0)
+ goto stop;
+
+ /* Adjust the cursors. */
+ if ((ret = __bam_ca_rsplit(dbc, PGNO(child), root_pgno)) != 0)
+ goto stop;
+
+ /*
+ * Free the page copied onto the root page and discard its
+ * lock. (The call to __db_free() discards our reference
+ * to the page.)
+ */
+ if ((ret = __db_free(dbc, child)) != 0) {
+ child = NULL;
+ goto stop;
+ }
+ child = NULL;
+
+ if (0) {
+stop: done = 1;
+ }
+ (void)__TLPUT(dbc, p_lock);
+ if (parent != NULL &&
+ (t_ret = mpf->put(mpf, parent, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ (void)__TLPUT(dbc, c_lock);
+ if (child != NULL &&
+ (t_ret = mpf->put(mpf, child, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ return (ret);
+}
diff --git a/libdb/btree/bt_method.c b/libdb/btree/bt_method.c
new file mode 100644
index 0000000..e19c2bd
--- /dev/null
+++ b/libdb/btree/bt_method.c
@@ -0,0 +1,388 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/qam.h"
+
+static int __bam_set_bt_compare
+ __P((DB *, int (*)(DB *, const DBT *, const DBT *)));
+static int __bam_set_bt_maxkey __P((DB *, u_int32_t));
+static int __bam_set_bt_minkey __P((DB *, u_int32_t));
+static int __bam_set_bt_prefix
+ __P((DB *, size_t(*)(DB *, const DBT *, const DBT *)));
+static int __ram_set_re_delim __P((DB *, int));
+static int __ram_set_re_len __P((DB *, u_int32_t));
+static int __ram_set_re_pad __P((DB *, int));
+static int __ram_set_re_source __P((DB *, const char *));
+
+/*
+ * __bam_db_create --
+ * Btree specific initialization of the DB structure.
+ *
+ * PUBLIC: int __bam_db_create __P((DB *));
+ */
+int
+__bam_db_create(dbp)
+ DB *dbp;
+{
+ BTREE *t;
+ int ret;
+
+ /* Allocate and initialize the private btree structure. */
+ if ((ret = __os_calloc(dbp->dbenv, 1, sizeof(BTREE), &t)) != 0)
+ return (ret);
+ dbp->bt_internal = t;
+
+ t->bt_minkey = DEFMINKEYPAGE; /* Btree */
+ t->bt_compare = __bam_defcmp;
+ t->bt_prefix = __bam_defpfx;
+
+ dbp->set_bt_compare = __bam_set_bt_compare;
+ dbp->set_bt_maxkey = __bam_set_bt_maxkey;
+ dbp->set_bt_minkey = __bam_set_bt_minkey;
+ dbp->set_bt_prefix = __bam_set_bt_prefix;
+
+ t->re_pad = ' '; /* Recno */
+ t->re_delim = '\n';
+ t->re_eof = 1;
+
+ dbp->set_re_delim = __ram_set_re_delim;
+ dbp->set_re_len = __ram_set_re_len;
+ dbp->set_re_pad = __ram_set_re_pad;
+ dbp->set_re_source = __ram_set_re_source;
+
+ return (0);
+}
+
+/*
+ * __bam_db_close --
+ * Btree specific discard of the DB structure.
+ *
+ * PUBLIC: int __bam_db_close __P((DB *));
+ */
+int
+__bam_db_close(dbp)
+ DB *dbp;
+{
+ BTREE *t;
+
+ if ((t = dbp->bt_internal) == NULL)
+ return (0);
+ /* Recno */
+ /* Close any backing source file descriptor. */
+ if (t->re_fp != NULL)
+ (void)fclose(t->re_fp);
+
+ /* Free any backing source file name. */
+ if (t->re_source != NULL)
+ __os_free(dbp->dbenv, t->re_source);
+
+ __os_free(dbp->dbenv, t);
+ dbp->bt_internal = NULL;
+
+ return (0);
+}
+
+/*
+ * __bam_set_flags --
+ * Set Btree specific flags.
+ *
+ * PUBLIC: int __bam_set_flags __P((DB *, u_int32_t *flagsp));
+ */
+int
+__bam_set_flags(dbp, flagsp)
+ DB *dbp;
+ u_int32_t *flagsp;
+{
+ u_int32_t flags;
+
+ flags = *flagsp;
+ if (LF_ISSET(DB_DUP | DB_DUPSORT | DB_RECNUM | DB_REVSPLITOFF)) {
+ DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_flags");
+
+ /*
+ * The DB_DUP and DB_DUPSORT flags are shared by the Hash
+ * and Btree access methods.
+ */
+ if (LF_ISSET(DB_DUP | DB_DUPSORT))
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE | DB_OK_HASH);
+
+ if (LF_ISSET(DB_RECNUM | DB_REVSPLITOFF))
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
+
+ if (LF_ISSET(DB_DUP | DB_DUPSORT)) {
+ /* DB_DUP/DB_DUPSORT is incompatible with DB_RECNUM. */
+ if (F_ISSET(dbp, DB_AM_RECNUM))
+ goto incompat;
+
+ if (LF_ISSET(DB_DUPSORT)) {
+ if (dbp->dup_compare == NULL)
+ dbp->dup_compare = __bam_defcmp;
+ F_SET(dbp, DB_AM_DUPSORT);
+ }
+
+ F_SET(dbp, DB_AM_DUP);
+ LF_CLR(DB_DUP | DB_DUPSORT);
+ }
+
+ if (LF_ISSET(DB_RECNUM)) {
+ /* DB_RECNUM is incompatible with DB_DUP/DB_DUPSORT. */
+ if (F_ISSET(dbp, DB_AM_DUP))
+ goto incompat;
+
+ F_SET(dbp, DB_AM_RECNUM);
+ LF_CLR(DB_RECNUM);
+ }
+
+ if (LF_ISSET(DB_REVSPLITOFF)) {
+ F_SET(dbp, DB_AM_REVSPLITOFF);
+ LF_CLR(DB_REVSPLITOFF);
+ }
+
+ *flagsp = flags;
+ }
+ return (0);
+
+incompat:
+ return (__db_ferr(dbp->dbenv, "DB->set_flags", 1));
+}
+
+/*
+ * __bam_set_bt_compare --
+ * Set the comparison function.
+ */
+static int
+__bam_set_bt_compare(dbp, func)
+ DB *dbp;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+{
+ BTREE *t;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_bt_compare");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
+
+ t = dbp->bt_internal;
+
+ /*
+ * Can't default the prefix routine if the user supplies a comparison
+ * routine; shortening the keys can break their comparison algorithm.
+ */
+ t->bt_compare = func;
+ if (t->bt_prefix == __bam_defpfx)
+ t->bt_prefix = NULL;
+
+ return (0);
+}
+
+/*
+ * __bam_set_bt_maxkey --
+ * Set the maximum keys per page.
+ */
+static int
+__bam_set_bt_maxkey(dbp, bt_maxkey)
+ DB *dbp;
+ u_int32_t bt_maxkey;
+{
+ BTREE *t;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_bt_maxkey");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
+
+ t = dbp->bt_internal;
+
+ if (bt_maxkey < 1) {
+ __db_err(dbp->dbenv, "minimum bt_maxkey value is 1");
+ return (EINVAL);
+ }
+
+ t->bt_maxkey = bt_maxkey;
+ return (0);
+}
+
+/*
+ * __bam_set_bt_minkey --
+ * Set the minimum keys per page.
+ */
+static int
+__bam_set_bt_minkey(dbp, bt_minkey)
+ DB *dbp;
+ u_int32_t bt_minkey;
+{
+ BTREE *t;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_bt_minkey");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
+
+ t = dbp->bt_internal;
+
+ if (bt_minkey < 2) {
+ __db_err(dbp->dbenv, "minimum bt_minkey value is 2");
+ return (EINVAL);
+ }
+
+ t->bt_minkey = bt_minkey;
+ return (0);
+}
+
+/*
+ * __bam_set_bt_prefix --
+ * Set the prefix function.
+ */
+static int
+__bam_set_bt_prefix(dbp, func)
+ DB *dbp;
+ size_t (*func) __P((DB *, const DBT *, const DBT *));
+{
+ BTREE *t;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_bt_prefix");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
+
+ t = dbp->bt_internal;
+
+ t->bt_prefix = func;
+ return (0);
+}
+
+/*
+ * __ram_set_flags --
+ * Set Recno specific flags.
+ *
+ * PUBLIC: int __ram_set_flags __P((DB *, u_int32_t *flagsp));
+ */
+int
+__ram_set_flags(dbp, flagsp)
+ DB *dbp;
+ u_int32_t *flagsp;
+{
+ u_int32_t flags;
+
+ flags = *flagsp;
+ if (LF_ISSET(DB_RENUMBER | DB_SNAPSHOT)) {
+ DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_flags");
+
+ DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO);
+
+ if (LF_ISSET(DB_RENUMBER)) {
+ F_SET(dbp, DB_AM_RENUMBER);
+ LF_CLR(DB_RENUMBER);
+ }
+
+ if (LF_ISSET(DB_SNAPSHOT)) {
+ F_SET(dbp, DB_AM_SNAPSHOT);
+ LF_CLR(DB_SNAPSHOT);
+ }
+
+ *flagsp = flags;
+ }
+ return (0);
+}
+
+/*
+ * __ram_set_re_delim --
+ * Set the variable-length input record delimiter.
+ */
+static int
+__ram_set_re_delim(dbp, re_delim)
+ DB *dbp;
+ int re_delim;
+{
+ BTREE *t;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_re_delim");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO);
+
+ t = dbp->bt_internal;
+
+ t->re_delim = re_delim;
+ F_SET(dbp, DB_AM_DELIMITER);
+
+ return (0);
+}
+
+/*
+ * __ram_set_re_len --
+ * Set the variable-length input record length.
+ */
+static int
+__ram_set_re_len(dbp, re_len)
+ DB *dbp;
+ u_int32_t re_len;
+{
+ BTREE *t;
+ QUEUE *q;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_re_len");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE | DB_OK_RECNO);
+
+ t = dbp->bt_internal;
+ t->re_len = re_len;
+
+ q = dbp->q_internal;
+ q->re_len = re_len;
+
+ F_SET(dbp, DB_AM_FIXEDLEN);
+
+ return (0);
+}
+
+/*
+ * __ram_set_re_pad --
+ * Set the fixed-length record pad character.
+ */
+static int
+__ram_set_re_pad(dbp, re_pad)
+ DB *dbp;
+ int re_pad;
+{
+ BTREE *t;
+ QUEUE *q;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_re_pad");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE | DB_OK_RECNO);
+
+ t = dbp->bt_internal;
+ t->re_pad = re_pad;
+
+ q = dbp->q_internal;
+ q->re_pad = re_pad;
+
+ F_SET(dbp, DB_AM_PAD);
+
+ return (0);
+}
+
+/*
+ * __ram_set_re_source --
+ * Set the backing source file name.
+ */
+static int
+__ram_set_re_source(dbp, re_source)
+ DB *dbp;
+ const char *re_source;
+{
+ BTREE *t;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_re_source");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO);
+
+ t = dbp->bt_internal;
+
+ return (__os_strdup(dbp->dbenv, re_source, &t->re_source));
+}
diff --git a/libdb/btree/bt_open.c b/libdb/btree/bt_open.c
new file mode 100644
index 0000000..95f779a
--- /dev/null
+++ b/libdb/btree/bt_open.c
@@ -0,0 +1,605 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/btree.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/fop.h"
+
+static void __bam_init_meta __P((DB *, BTMETA *, db_pgno_t, DB_LSN *));
+
+/*
+ * __bam_open --
+ * Open a btree.
+ *
+ * PUBLIC: int __bam_open __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, db_pgno_t, u_int32_t));
+ */
+int
+__bam_open(dbp, txn, name, base_pgno, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name;
+ db_pgno_t base_pgno;
+ u_int32_t flags;
+{
+ BTREE *t;
+
+ COMPQUIET(name, NULL);
+ t = dbp->bt_internal;
+
+ /* Initialize the remaining fields/methods of the DB. */
+ dbp->key_range = __bam_key_range;
+ dbp->stat = __bam_stat;
+
+ /*
+ * We don't permit the user to specify a prefix routine if they didn't
+ * also specify a comparison routine, they can't know enough about our
+ * comparison routine to get it right.
+ */
+ if (t->bt_compare == __bam_defcmp && t->bt_prefix != __bam_defpfx) {
+ __db_err(dbp->dbenv,
+"prefix comparison may not be specified for default comparison routine");
+ return (EINVAL);
+ }
+
+ /*
+ * Verify that the bt_minkey value specified won't cause the
+ * calculation of ovflsize to underflow [#2406] for this pagesize.
+ */
+ if (B_MINKEY_TO_OVFLSIZE(dbp, t->bt_minkey, dbp->pgsize) >
+ B_MINKEY_TO_OVFLSIZE(dbp, DEFMINKEYPAGE, dbp->pgsize)) {
+ __db_err(dbp->dbenv,
+ "bt_minkey value of %lu too high for page size of %lu",
+ (u_long)t->bt_minkey, (u_long)dbp->pgsize);
+ return (EINVAL);
+ }
+
+ /* Start up the tree. */
+ return (__bam_read_root(dbp, txn, base_pgno, flags));
+}
+
+/*
+ * __bam_metachk --
+ *
+ * PUBLIC: int __bam_metachk __P((DB *, const char *, BTMETA *));
+ */
+int
+__bam_metachk(dbp, name, btm)
+ DB *dbp;
+ const char *name;
+ BTMETA *btm;
+{
+ DB_ENV *dbenv;
+ u_int32_t vers;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * At this point, all we know is that the magic number is for a Btree.
+ * Check the version, the database may be out of date.
+ */
+ vers = btm->dbmeta.version;
+ if (F_ISSET(dbp, DB_AM_SWAP))
+ M_32_SWAP(vers);
+ switch (vers) {
+ case 6:
+ case 7:
+ __db_err(dbenv,
+ "%s: btree version %lu requires a version upgrade",
+ name, (u_long)vers);
+ return (DB_OLD_VERSION);
+ case 8:
+ case 9:
+ break;
+ default:
+ __db_err(dbenv,
+ "%s: unsupported btree version: %lu", name, (u_long)vers);
+ return (EINVAL);
+ }
+
+ /* Swap the page if we need to. */
+ if (F_ISSET(dbp, DB_AM_SWAP) && (ret = __bam_mswap((PAGE *)btm)) != 0)
+ return (ret);
+
+ /*
+ * Check application info against metadata info, and set info, flags,
+ * and type based on metadata info.
+ */
+ if ((ret =
+ __db_fchk(dbenv, "DB->open", btm->dbmeta.flags, BTM_MASK)) != 0)
+ return (ret);
+
+ if (F_ISSET(&btm->dbmeta, BTM_RECNO)) {
+ if (dbp->type == DB_BTREE)
+ goto wrong_type;
+ dbp->type = DB_RECNO;
+ DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO);
+ } else {
+ if (dbp->type == DB_RECNO)
+ goto wrong_type;
+ dbp->type = DB_BTREE;
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
+ }
+
+ if (F_ISSET(&btm->dbmeta, BTM_DUP))
+ F_SET(dbp, DB_AM_DUP);
+ else
+ if (F_ISSET(dbp, DB_AM_DUP)) {
+ __db_err(dbenv,
+ "%s: DB_DUP specified to open method but not set in database",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&btm->dbmeta, BTM_RECNUM)) {
+ if (dbp->type != DB_BTREE)
+ goto wrong_type;
+ F_SET(dbp, DB_AM_RECNUM);
+
+ if ((ret = __db_fcchk(dbenv,
+ "DB->open", dbp->flags, DB_AM_DUP, DB_AM_RECNUM)) != 0)
+ return (ret);
+ } else
+ if (F_ISSET(dbp, DB_AM_RECNUM)) {
+ __db_err(dbenv,
+ "%s: DB_RECNUM specified to open method but not set in database",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&btm->dbmeta, BTM_FIXEDLEN)) {
+ if (dbp->type != DB_RECNO)
+ goto wrong_type;
+ F_SET(dbp, DB_AM_FIXEDLEN);
+ } else
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN)) {
+ __db_err(dbenv,
+ "%s: DB_FIXEDLEN specified to open method but not set in database",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&btm->dbmeta, BTM_RENUMBER)) {
+ if (dbp->type != DB_RECNO)
+ goto wrong_type;
+ F_SET(dbp, DB_AM_RENUMBER);
+ } else
+ if (F_ISSET(dbp, DB_AM_RENUMBER)) {
+ __db_err(dbenv,
+ "%s: DB_RENUMBER specified to open method but not set in database",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&btm->dbmeta, BTM_SUBDB))
+ F_SET(dbp, DB_AM_SUBDB);
+ else
+ if (F_ISSET(dbp, DB_AM_SUBDB)) {
+ __db_err(dbenv,
+ "%s: multiple databases specified but not supported by file",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&btm->dbmeta, BTM_DUPSORT)) {
+ if (dbp->dup_compare == NULL)
+ dbp->dup_compare = __bam_defcmp;
+ F_SET(dbp, DB_AM_DUPSORT);
+ } else
+ if (dbp->dup_compare != NULL) {
+ __db_err(dbenv,
+ "%s: duplicate sort specified but not supported in database",
+ name);
+ return (EINVAL);
+ }
+
+ /* Set the page size. */
+ dbp->pgsize = btm->dbmeta.pagesize;
+
+ /* Copy the file's ID. */
+ memcpy(dbp->fileid, btm->dbmeta.uid, DB_FILE_ID_LEN);
+
+ return (0);
+
+wrong_type:
+ if (dbp->type == DB_BTREE)
+ __db_err(dbenv,
+ "open method type is Btree, database type is Recno");
+ else
+ __db_err(dbenv,
+ "open method type is Recno, database type is Btree");
+ return (EINVAL);
+}
+
+/*
+ * __bam_read_root --
+ * Read the root page and check a tree.
+ *
+ * PUBLIC: int __bam_read_root __P((DB *, DB_TXN *, db_pgno_t, u_int32_t));
+ */
+int
+__bam_read_root(dbp, txn, base_pgno, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ db_pgno_t base_pgno;
+ u_int32_t flags;
+{
+ BTMETA *meta;
+ BTREE *t;
+ DBC *dbc;
+ DB_LOCK metalock;
+ DB_MPOOLFILE *mpf;
+ int ret, t_ret;
+
+ meta = NULL;
+ t = dbp->bt_internal;
+ LOCK_INIT(metalock);
+ mpf = dbp->mpf;
+ ret = 0;
+
+ /* Get a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+
+ /* Get the metadata page. */
+ if ((ret =
+ __db_lget(dbc, 0, base_pgno, DB_LOCK_READ, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &base_pgno, 0, (PAGE **)&meta)) != 0)
+ goto err;
+
+ /*
+ * If the magic number is set, the tree has been created. Correct
+ * any fields that may not be right. Note, all of the local flags
+ * were set by DB->open.
+ *
+ * Otherwise, we'd better be in recovery or abort, in which case the
+ * metadata page will be created/initialized elsewhere.
+ */
+ DB_ASSERT(meta->dbmeta.magic != 0 ||
+ IS_RECOVERING(dbp->dbenv) || F_ISSET(dbp, DB_AM_RECOVER));
+
+ t->bt_maxkey = meta->maxkey;
+ t->bt_minkey = meta->minkey;
+ t->re_pad = meta->re_pad;
+ t->re_len = meta->re_len;
+
+ t->bt_meta = base_pgno;
+ t->bt_root = meta->root;
+
+ /*
+ * !!!
+ * If creating a subdatabase, we've already done an insert when
+ * we put the subdatabase's entry into the master database, so
+ * our last-page-inserted value is wrongly initialized for the
+ * master database, not the subdatabase we're creating. I'm not
+ * sure where the *right* place to clear this value is, it's not
+ * intuitively obvious that it belongs here.
+ */
+ t->bt_lpgno = PGNO_INVALID;
+
+ /* We must initialize last_pgno, it could be stale. */
+ if (!LF_ISSET(DB_RDONLY) && dbp->meta_pgno == PGNO_BASE_MD) {
+ mpf->last_pgno(mpf, &meta->dbmeta.last_pgno);
+ ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY);
+ } else
+ ret = mpf->put(mpf, meta, 0);
+ meta = NULL;
+
+err: /* Put the metadata page back. */
+ if (meta != NULL && (t_ret = mpf->put(mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __bam_init_meta --
+ *
+ * Initialize a btree meta-data page. The following fields may need
+ * to be updated later: last_pgno, root.
+ */
+static void
+__bam_init_meta(dbp, meta, pgno, lsnp)
+ DB *dbp;
+ BTMETA *meta;
+ db_pgno_t pgno;
+ DB_LSN *lsnp;
+{
+ BTREE *t;
+
+ memset(meta, 0, sizeof(BTMETA));
+ meta->dbmeta.lsn = *lsnp;
+ meta->dbmeta.pgno = pgno;
+ meta->dbmeta.magic = DB_BTREEMAGIC;
+ meta->dbmeta.version = DB_BTREEVERSION;
+ meta->dbmeta.pagesize = dbp->pgsize;
+ if (F_ISSET(dbp, DB_AM_CHKSUM))
+ FLD_SET(meta->dbmeta.metaflags, DBMETA_CHKSUM);
+ if (F_ISSET(dbp, DB_AM_ENCRYPT)) {
+ meta->dbmeta.encrypt_alg =
+ ((DB_CIPHER *)dbp->dbenv->crypto_handle)->alg;
+ DB_ASSERT(meta->dbmeta.encrypt_alg != 0);
+ meta->crypto_magic = meta->dbmeta.magic;
+ }
+ meta->dbmeta.type = P_BTREEMETA;
+ meta->dbmeta.free = PGNO_INVALID;
+ meta->dbmeta.last_pgno = pgno;
+ if (F_ISSET(dbp, DB_AM_DUP))
+ F_SET(&meta->dbmeta, BTM_DUP);
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN))
+ F_SET(&meta->dbmeta, BTM_FIXEDLEN);
+ if (F_ISSET(dbp, DB_AM_RECNUM))
+ F_SET(&meta->dbmeta, BTM_RECNUM);
+ if (F_ISSET(dbp, DB_AM_RENUMBER))
+ F_SET(&meta->dbmeta, BTM_RENUMBER);
+ if (F_ISSET(dbp, DB_AM_SUBDB))
+ F_SET(&meta->dbmeta, BTM_SUBDB);
+ if (dbp->dup_compare != NULL)
+ F_SET(&meta->dbmeta, BTM_DUPSORT);
+ if (dbp->type == DB_RECNO)
+ F_SET(&meta->dbmeta, BTM_RECNO);
+ memcpy(meta->dbmeta.uid, dbp->fileid, DB_FILE_ID_LEN);
+
+ t = dbp->bt_internal;
+ meta->maxkey = t->bt_maxkey;
+ meta->minkey = t->bt_minkey;
+ meta->re_len = t->re_len;
+ meta->re_pad = t->re_pad;
+}
+
+/*
+ * __bam_new_file --
+ * Create the necessary pages to begin a new database file.
+ *
+ * This code appears more complex than it is because of the two cases (named
+ * and unnamed). The way to read the code is that for each page being created,
+ * there are three parts: 1) a "get page" chunk (which either uses malloc'd
+ * memory or calls mpf->get), 2) the initialization, and 3) the "put page"
+ * chunk which either does a fop write or an mpf->put.
+ *
+ * PUBLIC: int __bam_new_file __P((DB *, DB_TXN *, DB_FH *, const char *));
+ */
+int
+__bam_new_file(dbp, txn, fhp, name)
+ DB *dbp;
+ DB_TXN *txn;
+ DB_FH *fhp;
+ const char *name;
+{
+ BTMETA *meta;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_MPOOLFILE *mpf;
+ DB_PGINFO pginfo;
+ DBT pdbt;
+ PAGE *root;
+ db_pgno_t pgno;
+ int ret;
+ void *buf;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ root = NULL;
+ meta = NULL;
+ memset(&pdbt, 0, sizeof(pdbt));
+
+ /* Build meta-data page. */
+
+ if (name == NULL) {
+ pgno = PGNO_BASE_MD;
+ ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &meta);
+ } else {
+ pginfo.db_pagesize = dbp->pgsize;
+ pginfo.flags =
+ F_ISSET(dbp, (DB_AM_CHKSUM | DB_AM_ENCRYPT | DB_AM_SWAP));
+ pginfo.type = dbp->type;
+ pdbt.data = &pginfo;
+ pdbt.size = sizeof(pginfo);
+ ret = __os_calloc(dbp->dbenv, 1, dbp->pgsize, &buf);
+ meta = (BTMETA *)buf;
+ }
+ if (ret != 0)
+ return (ret);
+
+ LSN_NOT_LOGGED(lsn);
+ __bam_init_meta(dbp, meta, PGNO_BASE_MD, &lsn);
+ meta->root = 1;
+ meta->dbmeta.last_pgno = 1;
+
+ if (name == NULL)
+ ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY);
+ else {
+ if ((ret = __db_pgout(dbenv, PGNO_BASE_MD, meta, &pdbt)) != 0)
+ goto err;
+ ret = __fop_write(dbenv,
+ txn, name, DB_APP_DATA, fhp, 0, buf, dbp->pgsize, 1);
+ }
+ if (ret != 0)
+ goto err;
+ meta = NULL;
+
+ /* Now build root page. */
+ if (name == NULL) {
+ pgno = 1;
+ if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &root)) != 0)
+ goto err;
+ } else {
+#ifdef DIAGNOSTIC
+ memset(buf, dbp->pgsize, 0);
+#endif
+ root = (PAGE *)buf;
+ }
+
+ P_INIT(root, dbp->pgsize, 1, PGNO_INVALID, PGNO_INVALID,
+ LEAFLEVEL, dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE);
+ LSN_NOT_LOGGED(root->lsn);
+
+ if (name == NULL)
+ ret = mpf->put(mpf, root, DB_MPOOL_DIRTY);
+ else {
+ if ((ret = __db_pgout(dbenv, root->pgno, root, &pdbt)) != 0)
+ goto err;
+ ret = __fop_write(dbenv, txn,
+ name, DB_APP_DATA, fhp, dbp->pgsize, buf, dbp->pgsize, 1);
+ }
+ if (ret != 0)
+ goto err;
+ root = NULL;
+
+err: if (name != NULL)
+ __os_free(dbenv, buf);
+ else {
+ if (meta != NULL)
+ (void)mpf->put(mpf, meta, 0);
+ if (root != NULL)
+ (void)mpf->put(mpf, root, 0);
+ }
+ return (ret);
+}
+
+/*
+ * __bam_new_subdb --
+ * Create a metadata page and a root page for a new btree.
+ *
+ * PUBLIC: int __bam_new_subdb __P((DB *, DB *, DB_TXN *));
+ */
+int
+__bam_new_subdb(mdbp, dbp, txn)
+ DB *mdbp, *dbp;
+ DB_TXN *txn;
+{
+ BTMETA *meta;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LOCK metalock;
+ DB_LSN lsn;
+ DB_MPOOLFILE *mpf;
+ PAGE *root;
+ int ret, t_ret;
+
+ dbenv = mdbp->dbenv;
+ mpf = mdbp->mpf;
+ dbc = NULL;
+ meta = NULL;
+ root = NULL;
+
+ if ((ret = mdbp->cursor(mdbp, txn,
+ &dbc, CDB_LOCKING(dbenv) ? DB_WRITECURSOR : 0)) != 0)
+ return (ret);
+
+ /* Get, and optionally create the metadata page. */
+ if ((ret = __db_lget(dbc,
+ 0, dbp->meta_pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &dbp->meta_pgno, DB_MPOOL_CREATE, &meta)) != 0)
+ goto err;
+
+ /* Build meta-data page. */
+ lsn = meta->dbmeta.lsn;
+ __bam_init_meta(dbp, meta, dbp->meta_pgno, &lsn);
+ if ((ret = __db_log_page(mdbp,
+ txn, &meta->dbmeta.lsn, dbp->meta_pgno, (PAGE *)meta)) != 0)
+ goto err;
+
+ /* Create and initialize a root page. */
+ if ((ret = __db_new(dbc,
+ dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE, &root)) != 0)
+ goto err;
+ root->level = LEAFLEVEL;
+
+ if (DBENV_LOGGING(dbenv) &&
+ (ret = __bam_root_log(mdbp, txn, &meta->dbmeta.lsn, 0,
+ meta->dbmeta.pgno, root->pgno, &meta->dbmeta.lsn)) != 0)
+ goto err;
+
+ meta->root = root->pgno;
+ if ((ret =
+ __db_log_page(mdbp, txn, &root->lsn, root->pgno, root)) != 0)
+ goto err;
+
+ /* Release the metadata and root pages. */
+ if ((ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ meta = NULL;
+ if ((ret = mpf->put(mpf, root, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ root = NULL;
+err:
+ if (meta != NULL)
+ if ((t_ret = mpf->put(mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (root != NULL)
+ if ((t_ret = mpf->put(mpf, root, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (LOCK_ISSET(metalock))
+ if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
+ ret = t_ret;
+ if (dbc != NULL)
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
diff --git a/libdb/btree/bt_put.c b/libdb/btree/bt_put.c
new file mode 100644
index 0000000..0d5097c
--- /dev/null
+++ b/libdb/btree/bt_put.c
@@ -0,0 +1,854 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+
+static int __bam_build
+ __P((DBC *, u_int32_t, DBT *, PAGE *, u_int32_t, u_int32_t));
+static int __bam_dup_convert __P((DBC *, PAGE *, u_int32_t));
+static int __bam_ovput
+ __P((DBC *, u_int32_t, db_pgno_t, PAGE *, u_int32_t, DBT *));
+static u_int32_t
+ __bam_partsize __P((DB *, u_int32_t, DBT *, PAGE *, u_int32_t));
+
+/*
+ * __bam_iitem --
+ * Insert an item into the tree.
+ *
+ * PUBLIC: int __bam_iitem __P((DBC *, DBT *, DBT *, u_int32_t, u_int32_t));
+ */
+int
+__bam_iitem(dbc, key, data, op, flags)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t op, flags;
+{
+ BKEYDATA *bk, bk_tmp;
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT bk_hdr, tdbt;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_indx_t indx;
+ u_int32_t data_size, have_bytes, need_bytes, needed;
+ int cmp, bigkey, bigdata, dupadjust, padrec, replace, ret, was_deleted;
+
+ COMPQUIET(bk, NULL);
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ t = dbp->bt_internal;
+ h = cp->page;
+ indx = cp->indx;
+ dupadjust = replace = was_deleted = 0;
+
+ /*
+ * Fixed-length records with partial puts: it's an error to specify
+ * anything other simple overwrite.
+ */
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN) &&
+ F_ISSET(data, DB_DBT_PARTIAL) && data->dlen != data->size) {
+ data_size = data->size;
+ goto len_err;
+ }
+
+ /*
+ * Figure out how much space the data will take, including if it's a
+ * partial record.
+ *
+ * Fixed-length records: it's an error to specify a record that's
+ * longer than the fixed-length, and we never require less than
+ * the fixed-length record size.
+ */
+ data_size = F_ISSET(data, DB_DBT_PARTIAL) ?
+ __bam_partsize(dbp, op, data, h, indx) : data->size;
+ padrec = 0;
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN)) {
+ if (data_size > t->re_len) {
+len_err: __db_err(dbp->dbenv,
+ "Length improper for fixed length record %lu",
+ (u_long)data_size);
+ return (EINVAL);
+ }
+
+ /* Records that are deleted anyway needn't be padded out. */
+ if (!LF_ISSET(BI_DELETED) && data_size < t->re_len) {
+ padrec = 1;
+ data_size = t->re_len;
+ }
+ }
+
+ /*
+ * Handle partial puts or short fixed-length records: build the
+ * real record.
+ */
+ if (padrec || F_ISSET(data, DB_DBT_PARTIAL)) {
+ tdbt = *data;
+ if ((ret =
+ __bam_build(dbc, op, &tdbt, h, indx, data_size)) != 0)
+ return (ret);
+ data = &tdbt;
+ }
+
+ /*
+ * If the user has specified a duplicate comparison function, return
+ * an error if DB_CURRENT was specified and the replacement data
+ * doesn't compare equal to the current data. This stops apps from
+ * screwing up the duplicate sort order. We have to do this after
+ * we build the real record so that we're comparing the real items.
+ */
+ if (op == DB_CURRENT && dbp->dup_compare != NULL) {
+ if ((ret = __bam_cmp(dbp, data, h,
+ indx + (TYPE(h) == P_LBTREE ? O_INDX : 0),
+ dbp->dup_compare, &cmp)) != 0)
+ return (ret);
+ if (cmp != 0) {
+ __db_err(dbp->dbenv,
+ "Current data differs from put data");
+ return (EINVAL);
+ }
+ }
+
+ /*
+ * If the key or data item won't fit on a page, we'll have to store
+ * them on overflow pages.
+ */
+ needed = 0;
+ bigdata = data_size > cp->ovflsize;
+ switch (op) {
+ case DB_KEYFIRST:
+ /* We're adding a new key and data pair. */
+ bigkey = key->size > cp->ovflsize;
+ if (bigkey)
+ needed += BOVERFLOW_PSIZE;
+ else
+ needed += BKEYDATA_PSIZE(key->size);
+ if (bigdata)
+ needed += BOVERFLOW_PSIZE;
+ else
+ needed += BKEYDATA_PSIZE(data_size);
+ break;
+ case DB_AFTER:
+ case DB_BEFORE:
+ case DB_CURRENT:
+ /*
+ * We're either overwriting the data item of a key/data pair
+ * or we're creating a new on-page duplicate and only adding
+ * a data item.
+ *
+ * !!!
+ * We're not currently correcting for space reclaimed from
+ * already deleted items, but I don't think it's worth the
+ * complexity.
+ */
+ bigkey = 0;
+ if (op == DB_CURRENT) {
+ bk = GET_BKEYDATA(dbp, h,
+ indx + (TYPE(h) == P_LBTREE ? O_INDX : 0));
+ if (B_TYPE(bk->type) == B_KEYDATA)
+ have_bytes = BKEYDATA_PSIZE(bk->len);
+ else
+ have_bytes = BOVERFLOW_PSIZE;
+ need_bytes = 0;
+ } else {
+ have_bytes = 0;
+ need_bytes = sizeof(db_indx_t);
+ }
+ if (bigdata)
+ need_bytes += BOVERFLOW_PSIZE;
+ else
+ need_bytes += BKEYDATA_PSIZE(data_size);
+
+ if (have_bytes < need_bytes)
+ needed += need_bytes - have_bytes;
+ break;
+ default:
+ return (__db_unknown_flag(dbp->dbenv, "__bam_iitem", op));
+ }
+
+ /*
+ * If there's not enough room, or the user has put a ceiling on the
+ * number of keys permitted in the page, split the page.
+ *
+ * XXX
+ * The t->bt_maxkey test here may be insufficient -- do we have to
+ * check in the btree split code, so we don't undo it there!?!?
+ */
+ if (P_FREESPACE(dbp, h) < needed ||
+ (t->bt_maxkey != 0 && NUM_ENT(h) > t->bt_maxkey))
+ return (DB_NEEDSPLIT);
+
+ /*
+ * The code breaks it up into five cases:
+ *
+ * 1. Insert a new key/data pair.
+ * 2. Append a new data item (a new duplicate).
+ * 3. Insert a new data item (a new duplicate).
+ * 4. Delete and re-add the data item (overflow item).
+ * 5. Overwrite the data item.
+ */
+ switch (op) {
+ case DB_KEYFIRST: /* 1. Insert a new key/data pair. */
+ if (bigkey) {
+ if ((ret = __bam_ovput(dbc,
+ B_OVERFLOW, PGNO_INVALID, h, indx, key)) != 0)
+ return (ret);
+ } else
+ if ((ret = __db_pitem(dbc, h, indx,
+ BKEYDATA_SIZE(key->size), NULL, key)) != 0)
+ return (ret);
+
+ if ((ret = __bam_ca_di(dbc, PGNO(h), indx, 1)) != 0)
+ return (ret);
+ ++indx;
+ break;
+ case DB_AFTER: /* 2. Append a new data item. */
+ if (TYPE(h) == P_LBTREE) {
+ /* Copy the key for the duplicate and adjust cursors. */
+ if ((ret =
+ __bam_adjindx(dbc, h, indx + P_INDX, indx, 1)) != 0)
+ return (ret);
+ if ((ret =
+ __bam_ca_di(dbc, PGNO(h), indx + P_INDX, 1)) != 0)
+ return (ret);
+
+ indx += 3;
+ dupadjust = 1;
+
+ cp->indx += 2;
+ } else {
+ ++indx;
+ cp->indx += 1;
+ }
+ break;
+ case DB_BEFORE: /* 3. Insert a new data item. */
+ if (TYPE(h) == P_LBTREE) {
+ /* Copy the key for the duplicate and adjust cursors. */
+ if ((ret = __bam_adjindx(dbc, h, indx, indx, 1)) != 0)
+ return (ret);
+ if ((ret = __bam_ca_di(dbc, PGNO(h), indx, 1)) != 0)
+ return (ret);
+
+ ++indx;
+ dupadjust = 1;
+ }
+ break;
+ case DB_CURRENT:
+ /*
+ * Clear the cursor's deleted flag. The problem is that if
+ * we deadlock or fail while deleting the overflow item or
+ * replacing the non-overflow item, a subsequent cursor close
+ * will try and remove the item because the cursor's delete
+ * flag is set
+ */
+ (void)__bam_ca_delete(dbp, PGNO(h), indx, 0);
+
+ if (TYPE(h) == P_LBTREE) {
+ ++indx;
+ dupadjust = 1;
+
+ /*
+ * In a Btree deleted records aren't counted (deleted
+ * records are counted in a Recno because all accesses
+ * are based on record number). If it's a Btree and
+ * it's a DB_CURRENT operation overwriting a previously
+ * deleted record, increment the record count.
+ */
+ was_deleted = B_DISSET(bk->type);
+ }
+
+ /*
+ * 4. Delete and re-add the data item.
+ *
+ * If we're changing the type of the on-page structure, or we
+ * are referencing offpage items, we have to delete and then
+ * re-add the item. We do not do any cursor adjustments here
+ * because we're going to immediately re-add the item into the
+ * same slot.
+ */
+ if (bigdata || B_TYPE(bk->type) != B_KEYDATA) {
+ if ((ret = __bam_ditem(dbc, h, indx)) != 0)
+ return (ret);
+ break;
+ }
+
+ /* 5. Overwrite the data item. */
+ replace = 1;
+ break;
+ default:
+ return (__db_unknown_flag(dbp->dbenv, "__bam_iitem", op));
+ }
+
+ /* Add the data. */
+ if (bigdata) {
+ /*
+ * We do not have to handle deleted (BI_DELETED) records
+ * in this case; the actual records should never be created.
+ */
+ DB_ASSERT(!LF_ISSET(BI_DELETED));
+ if ((ret = __bam_ovput(dbc,
+ B_OVERFLOW, PGNO_INVALID, h, indx, data)) != 0)
+ return (ret);
+ } else {
+ if (LF_ISSET(BI_DELETED)) {
+ B_TSET(bk_tmp.type, B_KEYDATA, 1);
+ bk_tmp.len = data->size;
+ bk_hdr.data = &bk_tmp;
+ bk_hdr.size = SSZA(BKEYDATA, data);
+ ret = __db_pitem(dbc, h, indx,
+ BKEYDATA_SIZE(data->size), &bk_hdr, data);
+ } else if (replace)
+ ret = __bam_ritem(dbc, h, indx, data);
+ else
+ ret = __db_pitem(dbc, h, indx,
+ BKEYDATA_SIZE(data->size), NULL, data);
+ if (ret != 0)
+ return (ret);
+ }
+ if ((ret = mpf->set(mpf, h, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+
+ /*
+ * Re-position the cursors if necessary and reset the current cursor
+ * to point to the new item.
+ */
+ if (op != DB_CURRENT) {
+ if ((ret = __bam_ca_di(dbc, PGNO(h), indx, 1)) != 0)
+ return (ret);
+ cp->indx = TYPE(h) == P_LBTREE ? indx - O_INDX : indx;
+ }
+
+ /*
+ * If we've changed the record count, update the tree. There's no
+ * need to adjust the count if the operation not performed on the
+ * current record or when the current record was previously deleted.
+ */
+ if (F_ISSET(cp, C_RECNUM) && (op != DB_CURRENT || was_deleted))
+ if ((ret = __bam_adjust(dbc, 1)) != 0)
+ return (ret);
+
+ /*
+ * If a Btree leaf page is at least 50% full and we may have added or
+ * modified a duplicate data item, see if the set of duplicates takes
+ * up at least 25% of the space on the page. If it does, move it onto
+ * its own page.
+ */
+ if (dupadjust && P_FREESPACE(dbp, h) <= dbp->pgsize / 2) {
+ if ((ret = __bam_dup_convert(dbc, h, indx - O_INDX)) != 0)
+ return (ret);
+ }
+
+ /* If we've modified a recno file, set the flag. */
+ if (dbc->dbtype == DB_RECNO)
+ t->re_modified = 1;
+
+ return (ret);
+}
+
+/*
+ * __bam_partsize --
+ * Figure out how much space a partial data item is in total.
+ */
+static u_int32_t
+__bam_partsize(dbp, op, data, h, indx)
+ DB *dbp;
+ u_int32_t op, indx;
+ DBT *data;
+ PAGE *h;
+{
+ BKEYDATA *bk;
+ u_int32_t nbytes;
+
+ /*
+ * If the record doesn't already exist, it's simply the data we're
+ * provided.
+ */
+ if (op != DB_CURRENT)
+ return (data->doff + data->size);
+
+ /*
+ * Otherwise, it's the data provided plus any already existing data
+ * that we're not replacing.
+ */
+ bk = GET_BKEYDATA(dbp, h, indx + (TYPE(h) == P_LBTREE ? O_INDX : 0));
+ nbytes =
+ B_TYPE(bk->type) == B_OVERFLOW ? ((BOVERFLOW *)bk)->tlen : bk->len;
+
+ return (__db_partsize(nbytes, data));
+}
+
+/*
+ * __bam_build --
+ * Build the real record for a partial put, or short fixed-length record.
+ */
+static int
+__bam_build(dbc, op, dbt, h, indx, nbytes)
+ DBC *dbc;
+ u_int32_t op, indx, nbytes;
+ DBT *dbt;
+ PAGE *h;
+{
+ BKEYDATA *bk, tbk;
+ BOVERFLOW *bo;
+ BTREE *t;
+ DB *dbp;
+ DBT copy, *rdata;
+ u_int32_t len, tlen;
+ u_int8_t *p;
+ int ret;
+
+ COMPQUIET(bo, NULL);
+
+ dbp = dbc->dbp;
+ t = dbp->bt_internal;
+
+ /* We use the record data return memory, it's only a short-term use. */
+ rdata = &dbc->my_rdata;
+ if (rdata->ulen < nbytes) {
+ if ((ret = __os_realloc(dbp->dbenv,
+ nbytes, &rdata->data)) != 0) {
+ rdata->ulen = 0;
+ rdata->data = NULL;
+ return (ret);
+ }
+ rdata->ulen = nbytes;
+ }
+
+ /*
+ * We use nul or pad bytes for any part of the record that isn't
+ * specified; get it over with.
+ */
+ memset(rdata->data,
+ F_ISSET(dbp, DB_AM_FIXEDLEN) ? t->re_pad : 0, nbytes);
+
+ /*
+ * In the next clauses, we need to do three things: a) set p to point
+ * to the place at which to copy the user's data, b) set tlen to the
+ * total length of the record, not including the bytes contributed by
+ * the user, and c) copy any valid data from an existing record. If
+ * it's not a partial put (this code is called for both partial puts
+ * and fixed-length record padding) or it's a new key, we can cut to
+ * the chase.
+ */
+ if (!F_ISSET(dbt, DB_DBT_PARTIAL) || op != DB_CURRENT) {
+ p = (u_int8_t *)rdata->data + dbt->doff;
+ tlen = dbt->doff;
+ goto user_copy;
+ }
+
+ /* Find the current record. */
+ if (indx < NUM_ENT(h)) {
+ bk = GET_BKEYDATA(dbp, h, indx + (TYPE(h) == P_LBTREE ?
+ O_INDX : 0));
+ bo = (BOVERFLOW *)bk;
+ } else {
+ bk = &tbk;
+ B_TSET(bk->type, B_KEYDATA, 0);
+ bk->len = 0;
+ }
+ if (B_TYPE(bk->type) == B_OVERFLOW) {
+ /*
+ * In the case of an overflow record, we shift things around
+ * in the current record rather than allocate a separate copy.
+ */
+ memset(&copy, 0, sizeof(copy));
+ if ((ret = __db_goff(dbp, &copy, bo->tlen,
+ bo->pgno, &rdata->data, &rdata->ulen)) != 0)
+ return (ret);
+
+ /* Skip any leading data from the original record. */
+ tlen = dbt->doff;
+ p = (u_int8_t *)rdata->data + dbt->doff;
+
+ /*
+ * Copy in any trailing data from the original record.
+ *
+ * If the original record was larger than the original offset
+ * plus the bytes being deleted, there is trailing data in the
+ * original record we need to preserve. If we aren't deleting
+ * the same number of bytes as we're inserting, copy it up or
+ * down, into place.
+ *
+ * Use memmove(), the regions may overlap.
+ */
+ if (bo->tlen > dbt->doff + dbt->dlen) {
+ len = bo->tlen - (dbt->doff + dbt->dlen);
+ if (dbt->dlen != dbt->size)
+ memmove(p + dbt->size, p + dbt->dlen, len);
+ tlen += len;
+ }
+ } else {
+ /* Copy in any leading data from the original record. */
+ memcpy(rdata->data,
+ bk->data, dbt->doff > bk->len ? bk->len : dbt->doff);
+ tlen = dbt->doff;
+ p = (u_int8_t *)rdata->data + dbt->doff;
+
+ /* Copy in any trailing data from the original record. */
+ len = dbt->doff + dbt->dlen;
+ if (bk->len > len) {
+ memcpy(p + dbt->size, bk->data + len, bk->len - len);
+ tlen += bk->len - len;
+ }
+ }
+
+user_copy:
+ /*
+ * Copy in the application provided data -- p and tlen must have been
+ * initialized above.
+ */
+ memcpy(p, dbt->data, dbt->size);
+ tlen += dbt->size;
+
+ /* Set the DBT to reference our new record. */
+ rdata->size = F_ISSET(dbp, DB_AM_FIXEDLEN) ? t->re_len : tlen;
+ rdata->dlen = 0;
+ rdata->doff = 0;
+ rdata->flags = 0;
+ *dbt = *rdata;
+ return (0);
+}
+
+/*
+ * __bam_ritem --
+ * Replace an item on a page.
+ *
+ * PUBLIC: int __bam_ritem __P((DBC *, PAGE *, u_int32_t, DBT *));
+ */
+int
+__bam_ritem(dbc, h, indx, data)
+ DBC *dbc;
+ PAGE *h;
+ u_int32_t indx;
+ DBT *data;
+{
+ BKEYDATA *bk;
+ DB *dbp;
+ DBT orig, repl;
+ db_indx_t cnt, lo, ln, min, off, prefix, suffix;
+ int32_t nbytes;
+ int ret;
+ db_indx_t *inp;
+ u_int8_t *p, *t;
+
+ dbp = dbc->dbp;
+
+ /*
+ * Replace a single item onto a page. The logic figuring out where
+ * to insert and whether it fits is handled in the caller. All we do
+ * here is manage the page shuffling.
+ */
+ bk = GET_BKEYDATA(dbp, h, indx);
+
+ /* Log the change. */
+ if (DBC_LOGGING(dbc)) {
+ /*
+ * We might as well check to see if the two data items share
+ * a common prefix and suffix -- it can save us a lot of log
+ * message if they're large.
+ */
+ min = data->size < bk->len ? data->size : bk->len;
+ for (prefix = 0,
+ p = bk->data, t = data->data;
+ prefix < min && *p == *t; ++prefix, ++p, ++t)
+ ;
+
+ min -= prefix;
+ for (suffix = 0,
+ p = (u_int8_t *)bk->data + bk->len - 1,
+ t = (u_int8_t *)data->data + data->size - 1;
+ suffix < min && *p == *t; ++suffix, --p, --t)
+ ;
+
+ /* We only log the parts of the keys that have changed. */
+ orig.data = (u_int8_t *)bk->data + prefix;
+ orig.size = bk->len - (prefix + suffix);
+ repl.data = (u_int8_t *)data->data + prefix;
+ repl.size = data->size - (prefix + suffix);
+ if ((ret = __bam_repl_log(dbp, dbc->txn, &LSN(h), 0, PGNO(h),
+ &LSN(h), (u_int32_t)indx, (u_int32_t)B_DISSET(bk->type),
+ &orig, &repl, (u_int32_t)prefix, (u_int32_t)suffix)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(h));
+
+ /*
+ * Set references to the first in-use byte on the page and the
+ * first byte of the item being replaced.
+ */
+ inp = P_INP(dbp, h);
+ p = (u_int8_t *)h + HOFFSET(h);
+ t = (u_int8_t *)bk;
+
+ /*
+ * If the entry is growing in size, shift the beginning of the data
+ * part of the page down. If the entry is shrinking in size, shift
+ * the beginning of the data part of the page up. Use memmove(3),
+ * the regions overlap.
+ */
+ lo = BKEYDATA_SIZE(bk->len);
+ ln = (db_indx_t)BKEYDATA_SIZE(data->size);
+ if (lo != ln) {
+ nbytes = lo - ln; /* Signed difference. */
+ if (p == t) /* First index is fast. */
+ inp[indx] += nbytes;
+ else { /* Else, shift the page. */
+ memmove(p + nbytes, p, t - p);
+
+ /* Adjust the indices' offsets. */
+ off = inp[indx];
+ for (cnt = 0; cnt < NUM_ENT(h); ++cnt)
+ if (inp[cnt] <= off)
+ inp[cnt] += nbytes;
+ }
+
+ /* Clean up the page and adjust the item's reference. */
+ HOFFSET(h) += nbytes;
+ t += nbytes;
+ }
+
+ /* Copy the new item onto the page. */
+ bk = (BKEYDATA *)t;
+ B_TSET(bk->type, B_KEYDATA, 0);
+ bk->len = data->size;
+ memcpy(bk->data, data->data, data->size);
+
+ return (0);
+}
+
+/*
+ * __bam_dup_convert --
+ * Check to see if the duplicate set at indx should have its own page.
+ * If it should, create it.
+ */
+static int
+__bam_dup_convert(dbc, h, indx)
+ DBC *dbc;
+ PAGE *h;
+ u_int32_t indx;
+{
+ BKEYDATA *bk;
+ DB *dbp;
+ DBT hdr;
+ DB_MPOOLFILE *mpf;
+ PAGE *dp;
+ db_indx_t cnt, cpindx, dindx, first, *inp, sz;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ inp = P_INP(dbp, h);
+
+ /*
+ * Count the duplicate records and calculate how much room they're
+ * using on the page.
+ */
+ while (indx > 0 && inp[indx] == inp[indx - P_INDX])
+ indx -= P_INDX;
+ for (cnt = 0, sz = 0, first = indx;; ++cnt, indx += P_INDX) {
+ if (indx >= NUM_ENT(h) || inp[first] != inp[indx])
+ break;
+ bk = GET_BKEYDATA(dbp, h, indx);
+ sz += B_TYPE(bk->type) == B_KEYDATA ?
+ BKEYDATA_PSIZE(bk->len) : BOVERFLOW_PSIZE;
+ bk = GET_BKEYDATA(dbp, h, indx + O_INDX);
+ sz += B_TYPE(bk->type) == B_KEYDATA ?
+ BKEYDATA_PSIZE(bk->len) : BOVERFLOW_PSIZE;
+ }
+
+ /*
+ * We have to do these checks when the user is replacing the cursor's
+ * data item -- if the application replaces a duplicate item with a
+ * larger data item, it can increase the amount of space used by the
+ * duplicates, requiring this check. But that means we may have done
+ * this check when it wasn't a duplicate item after all.
+ */
+ if (cnt == 1)
+ return (0);
+
+ /*
+ * If this set of duplicates is using more than 25% of the page, move
+ * them off. The choice of 25% is a WAG, but the value must be small
+ * enough that we can always split a page without putting duplicates
+ * on two different pages.
+ */
+ if (sz < dbp->pgsize / 4)
+ return (0);
+
+ /* Get a new page. */
+ if ((ret = __db_new(dbc,
+ dbp->dup_compare == NULL ? P_LRECNO : P_LDUP, &dp)) != 0)
+ return (ret);
+ P_INIT(dp, dbp->pgsize, dp->pgno,
+ PGNO_INVALID, PGNO_INVALID, LEAFLEVEL, TYPE(dp));
+
+ /*
+ * Move this set of duplicates off the page. First points to the first
+ * key of the first duplicate key/data pair, cnt is the number of pairs
+ * we're dealing with.
+ */
+ memset(&hdr, 0, sizeof(hdr));
+ dindx = first;
+ indx = first;
+ cpindx = 0;
+ do {
+ /* Move cursors referencing the old entry to the new entry. */
+ if ((ret = __bam_ca_dup(dbc, first,
+ PGNO(h), indx, PGNO(dp), cpindx)) != 0)
+ goto err;
+
+ /*
+ * Copy the entry to the new page. If the off-duplicate page
+ * If the off-duplicate page is a Btree page (i.e. dup_compare
+ * will be non-NULL, we use Btree pages for sorted dups,
+ * and Recno pages for unsorted dups), move all entries
+ * normally, even deleted ones. If it's a Recno page,
+ * deleted entries are discarded (if the deleted entry is
+ * overflow, then free up those pages).
+ */
+ bk = GET_BKEYDATA(dbp, h, dindx + 1);
+ hdr.data = bk;
+ hdr.size = B_TYPE(bk->type) == B_KEYDATA ?
+ BKEYDATA_SIZE(bk->len) : BOVERFLOW_SIZE;
+ if (dbp->dup_compare == NULL && B_DISSET(bk->type)) {
+ /*
+ * Unsorted dups, i.e. recno page, and we have
+ * a deleted entry, don't move it, but if it was
+ * an overflow entry, we need to free those pages.
+ */
+ if (B_TYPE(bk->type) == B_OVERFLOW &&
+ (ret = __db_doff(dbc,
+ (GET_BOVERFLOW(dbp, h, dindx + 1))->pgno)) != 0)
+ goto err;
+ } else {
+ if ((ret = __db_pitem(
+ dbc, dp, cpindx, hdr.size, &hdr, NULL)) != 0)
+ goto err;
+ ++cpindx;
+ }
+ /* Delete all but the last reference to the key. */
+ if (cnt != 1) {
+ if ((ret = __bam_adjindx(dbc,
+ h, dindx, first + 1, 0)) != 0)
+ goto err;
+ } else
+ dindx++;
+
+ /* Delete the data item. */
+ if ((ret = __db_ditem(dbc, h, dindx, hdr.size)) != 0)
+ goto err;
+ indx += P_INDX;
+ } while (--cnt);
+
+ /* Put in a new data item that points to the duplicates page. */
+ if ((ret = __bam_ovput(dbc,
+ B_DUPLICATE, dp->pgno, h, first + 1, NULL)) != 0)
+ goto err;
+
+ /* Adjust cursors for all the above movments. */
+ if ((ret = __bam_ca_di(dbc,
+ PGNO(h), first + P_INDX, first + P_INDX - indx)) != 0)
+ goto err;
+
+ return (mpf->put(mpf, dp, DB_MPOOL_DIRTY));
+
+err: (void)mpf->put(mpf, dp, 0);
+ return (ret);
+}
+
+/*
+ * __bam_ovput --
+ * Build an item for an off-page duplicates page or overflow page and
+ * insert it on the page.
+ */
+static int
+__bam_ovput(dbc, type, pgno, h, indx, item)
+ DBC *dbc;
+ u_int32_t type, indx;
+ db_pgno_t pgno;
+ PAGE *h;
+ DBT *item;
+{
+ BOVERFLOW bo;
+ DBT hdr;
+ int ret;
+
+ UMRW_SET(bo.unused1);
+ B_TSET(bo.type, type, 0);
+ UMRW_SET(bo.unused2);
+
+ /*
+ * If we're creating an overflow item, do so and acquire the page
+ * number for it. If we're creating an off-page duplicates tree,
+ * we are giving the page number as an argument.
+ */
+ if (type == B_OVERFLOW) {
+ if ((ret = __db_poff(dbc, item, &bo.pgno)) != 0)
+ return (ret);
+ bo.tlen = item->size;
+ } else {
+ bo.pgno = pgno;
+ bo.tlen = 0;
+ }
+
+ /* Store the new record on the page. */
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.data = &bo;
+ hdr.size = BOVERFLOW_SIZE;
+ return (__db_pitem(dbc, h, indx, BOVERFLOW_SIZE, &hdr, NULL));
+}
diff --git a/libdb/btree/bt_rec.c b/libdb/btree/bt_rec.c
new file mode 100644
index 0000000..aeb4aef
--- /dev/null
+++ b/libdb/btree/bt_rec.c
@@ -0,0 +1,971 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+
+#define IS_BTREE_PAGE(pagep) \
+ (TYPE(pagep) == P_IBTREE || \
+ TYPE(pagep) == P_LBTREE || TYPE(pagep) == P_LDUP)
+
+/*
+ * __bam_split_recover --
+ * Recovery function for split.
+ *
+ * PUBLIC: int __bam_split_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_split_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_split_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *_lp, *lp, *np, *pp, *_rp, *rp, *sp;
+ db_pgno_t pgno, root_pgno;
+ u_int32_t ptype;
+ int cmp, l_update, p_update, r_update, rc, ret, ret_l, rootsplit, t_ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_split_print);
+
+ mpf = NULL;
+ _lp = lp = np = pp = _rp = rp = NULL;
+ sp = NULL;
+
+ REC_INTRO(__bam_split_read, 1);
+
+ /*
+ * There are two kinds of splits that we have to recover from. The
+ * first is a root-page split, where the root page is split from a
+ * leaf page into an internal page and two new leaf pages are created.
+ * The second is where a page is split into two pages, and a new key
+ * is inserted into the parent page.
+ *
+ * DBTs are not aligned in log records, so we need to copy the page
+ * so that we can access fields within it throughout this routine.
+ * Although we could hardcode the unaligned copies in this routine,
+ * we will be calling into regular btree functions with this page,
+ * so it's got to be aligned. Copying it into allocated memory is
+ * the only way to guarantee this.
+ */
+ if ((ret = __os_malloc(dbenv, argp->pg.size, &sp)) != 0)
+ goto out;
+ memcpy(sp, argp->pg.data, argp->pg.size);
+
+ pgno = PGNO(sp);
+ root_pgno = argp->root_pgno;
+ rootsplit = root_pgno != PGNO_INVALID;
+ if ((ret_l = mpf->get(mpf, &argp->left, 0, &lp)) != 0)
+ lp = NULL;
+ if (mpf->get(mpf, &argp->right, 0, &rp) != 0)
+ rp = NULL;
+
+ if (DB_REDO(op)) {
+ l_update = r_update = p_update = 0;
+ /*
+ * Decide if we need to resplit the page.
+ *
+ * If this is a root split, then the root has to exist, it's
+ * the page we're splitting and it gets modified. If this is
+ * not a root split, then the left page has to exist, for the
+ * same reason.
+ */
+ if (rootsplit) {
+ if ((ret = mpf->get(mpf, &pgno, 0, &pp)) != 0) {
+ __db_pgerr(file_dbp, pgno, ret);
+ pp = NULL;
+ goto out;
+ }
+ cmp = log_compare(&LSN(pp), &LSN(argp->pg.data));
+ CHECK_LSN(op, cmp, &LSN(pp), &LSN(argp->pg.data));
+ p_update = cmp == 0;
+ } else if (lp == NULL) {
+ __db_pgerr(file_dbp, argp->left, ret_l);
+ goto out;
+ }
+
+ if (lp != NULL) {
+ cmp = log_compare(&LSN(lp), &argp->llsn);
+ CHECK_LSN(op, cmp, &LSN(lp), &argp->llsn);
+ if (cmp == 0)
+ l_update = 1;
+ } else
+ l_update = 1;
+
+ if (rp != NULL) {
+ cmp = log_compare(&LSN(rp), &argp->rlsn);
+ CHECK_LSN(op, cmp, &LSN(rp), &argp->rlsn);
+ if (cmp == 0)
+ r_update = 1;
+ } else
+ r_update = 1;
+ if (!p_update && !l_update && !r_update)
+ goto check_next;
+
+ /* Allocate and initialize new left/right child pages. */
+ if ((ret = __os_malloc(dbenv, file_dbp->pgsize, &_lp)) != 0 ||
+ (ret = __os_malloc(dbenv, file_dbp->pgsize, &_rp)) != 0)
+ goto out;
+ if (rootsplit) {
+ P_INIT(_lp, file_dbp->pgsize, argp->left,
+ PGNO_INVALID,
+ ISINTERNAL(sp) ? PGNO_INVALID : argp->right,
+ LEVEL(sp), TYPE(sp));
+ P_INIT(_rp, file_dbp->pgsize, argp->right,
+ ISINTERNAL(sp) ? PGNO_INVALID : argp->left,
+ PGNO_INVALID, LEVEL(sp), TYPE(sp));
+ } else {
+ P_INIT(_lp, file_dbp->pgsize, PGNO(sp),
+ ISINTERNAL(sp) ? PGNO_INVALID : PREV_PGNO(sp),
+ ISINTERNAL(sp) ? PGNO_INVALID : argp->right,
+ LEVEL(sp), TYPE(sp));
+ P_INIT(_rp, file_dbp->pgsize, argp->right,
+ ISINTERNAL(sp) ? PGNO_INVALID : sp->pgno,
+ ISINTERNAL(sp) ? PGNO_INVALID : NEXT_PGNO(sp),
+ LEVEL(sp), TYPE(sp));
+ }
+
+ /* Split the page. */
+ if ((ret = __bam_copy(file_dbp, sp, _lp, 0, argp->indx)) != 0 ||
+ (ret = __bam_copy(file_dbp, sp, _rp, argp->indx,
+ NUM_ENT(sp))) != 0)
+ goto out;
+
+ /* If the left child is wrong, update it. */
+ if (lp == NULL && (ret = mpf->get(
+ mpf, &argp->left, DB_MPOOL_CREATE, &lp)) != 0) {
+ __db_pgerr(file_dbp, argp->left, ret);
+ lp = NULL;
+ goto out;
+ }
+ if (l_update) {
+ memcpy(lp, _lp, file_dbp->pgsize);
+ lp->lsn = *lsnp;
+ if ((ret = mpf->put(mpf, lp, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ lp = NULL;
+ }
+
+ /* If the right child is wrong, update it. */
+ if (rp == NULL && (ret = mpf->get(
+ mpf, &argp->right, DB_MPOOL_CREATE, &rp)) != 0) {
+ __db_pgerr(file_dbp, argp->right, ret);
+ rp = NULL;
+ goto out;
+ }
+ if (r_update) {
+ memcpy(rp, _rp, file_dbp->pgsize);
+ rp->lsn = *lsnp;
+ if ((ret = mpf->put(mpf, rp, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ rp = NULL;
+ }
+
+ /*
+ * If the parent page is wrong, update it. This is of interest
+ * only if it was a root split, since root splits create parent
+ * pages. All other splits modify a parent page, but those are
+ * separately logged and recovered.
+ */
+ if (rootsplit && p_update) {
+ if (IS_BTREE_PAGE(sp)) {
+ ptype = P_IBTREE;
+ rc = argp->opflags & SPL_NRECS ? 1 : 0;
+ } else {
+ ptype = P_IRECNO;
+ rc = 1;
+ }
+
+ P_INIT(pp, file_dbp->pgsize, root_pgno,
+ PGNO_INVALID, PGNO_INVALID, _lp->level + 1, ptype);
+ RE_NREC_SET(pp, rc ? __bam_total(file_dbp, _lp) +
+ __bam_total(file_dbp, _rp) : 0);
+
+ pp->lsn = *lsnp;
+ if ((ret = mpf->put(mpf, pp, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ pp = NULL;
+ }
+
+check_next: /*
+ * Finally, redo the next-page link if necessary. This is of
+ * interest only if it wasn't a root split -- inserting a new
+ * page in the tree requires that any following page have its
+ * previous-page pointer updated to our new page. The next
+ * page must exist because we're redoing the operation.
+ */
+ if (!rootsplit && !IS_ZERO_LSN(argp->nlsn)) {
+ if ((ret = mpf->get(mpf, &argp->npgno, 0, &np)) != 0) {
+ __db_pgerr(file_dbp, argp->npgno, ret);
+ np = NULL;
+ goto out;
+ }
+ cmp = log_compare(&LSN(np), &argp->nlsn);
+ CHECK_LSN(op, cmp, &LSN(np), &argp->nlsn);
+ if (cmp == 0) {
+ PREV_PGNO(np) = argp->right;
+ np->lsn = *lsnp;
+ if ((ret =
+ mpf->put(mpf, np, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ np = NULL;
+ }
+ }
+ } else {
+ /*
+ * If the split page is wrong, replace its contents with the
+ * logged page contents. If the page doesn't exist, it means
+ * that the create of the page never happened, nor did any of
+ * the adds onto the page that caused the split, and there's
+ * really no undo-ing to be done.
+ */
+ if ((ret = mpf->get(mpf, &pgno, 0, &pp)) != 0) {
+ pp = NULL;
+ goto lrundo;
+ }
+ if (log_compare(lsnp, &LSN(pp)) == 0) {
+ memcpy(pp, argp->pg.data, argp->pg.size);
+ if ((ret = mpf->put(mpf, pp, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ pp = NULL;
+ }
+
+ /*
+ * If it's a root split and the left child ever existed, update
+ * its LSN. (If it's not a root split, we've updated the left
+ * page already -- it's the same as the split page.) If the
+ * right child ever existed, root split or not, update its LSN.
+ * The undo of the page allocation(s) will restore them to the
+ * free list.
+ */
+lrundo: if ((rootsplit && lp != NULL) || rp != NULL) {
+ if (rootsplit && lp != NULL &&
+ log_compare(lsnp, &LSN(lp)) == 0) {
+ lp->lsn = argp->llsn;
+ if ((ret =
+ mpf->put(mpf, lp, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ lp = NULL;
+ }
+ if (rp != NULL &&
+ log_compare(lsnp, &LSN(rp)) == 0) {
+ rp->lsn = argp->rlsn;
+ if ((ret =
+ mpf->put(mpf, rp, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ rp = NULL;
+ }
+ }
+
+ /*
+ * Finally, undo the next-page link if necessary. This is of
+ * interest only if it wasn't a root split -- inserting a new
+ * page in the tree requires that any following page have its
+ * previous-page pointer updated to our new page. Since it's
+ * possible that the next-page never existed, we ignore it as
+ * if there's nothing to undo.
+ */
+ if (!rootsplit && !IS_ZERO_LSN(argp->nlsn)) {
+ if ((ret = mpf->get(mpf, &argp->npgno, 0, &np)) != 0) {
+ np = NULL;
+ goto done;
+ }
+ if (log_compare(lsnp, &LSN(np)) == 0) {
+ PREV_PGNO(np) = argp->left;
+ np->lsn = argp->nlsn;
+ if (mpf->put(mpf, np, DB_MPOOL_DIRTY))
+ goto out;
+ np = NULL;
+ }
+ }
+ }
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: /* Free any pages that weren't dirtied. */
+ if (pp != NULL && (t_ret = mpf->put(mpf, pp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (lp != NULL && (t_ret = mpf->put(mpf, lp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (np != NULL && (t_ret = mpf->put(mpf, np, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (rp != NULL && (t_ret = mpf->put(mpf, rp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Free any allocated space. */
+ if (_lp != NULL)
+ __os_free(dbenv, _lp);
+ if (_rp != NULL)
+ __os_free(dbenv, _rp);
+ if (sp != NULL)
+ __os_free(dbenv, sp);
+
+ REC_CLOSE;
+}
+
+/*
+ * __bam_rsplit_recover --
+ * Recovery function for a reverse split.
+ *
+ * PUBLIC: int __bam_rsplit_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_rsplit_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_rsplit_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_LSN copy_lsn;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ db_pgno_t pgno, root_pgno;
+ int cmp_n, cmp_p, modified, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_rsplit_print);
+ REC_INTRO(__bam_rsplit_read, 1);
+
+ /* Fix the root page. */
+ pgno = root_pgno = argp->root_pgno;
+ if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) != 0) {
+ /* The root page must always exist if we are going forward. */
+ if (DB_REDO(op)) {
+ __db_pgerr(file_dbp, pgno, ret);
+ goto out;
+ }
+ /* This must be the root of an OPD tree. */
+ DB_ASSERT(root_pgno !=
+ ((BTREE *)file_dbp->bt_internal)->bt_root);
+ ret = 0;
+ goto do_page;
+ }
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->rootlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->rootlsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ memcpy(pagep, argp->pgdbt.data, argp->pgdbt.size);
+ pagep->pgno = root_pgno;
+ pagep->lsn = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ P_INIT(pagep, file_dbp->pgsize, root_pgno,
+ argp->nrec, PGNO_INVALID, pagep->level + 1,
+ IS_BTREE_PAGE(pagep) ? P_IBTREE : P_IRECNO);
+ if ((ret = __db_pitem(dbc, pagep, 0,
+ argp->rootent.size, &argp->rootent, NULL)) != 0)
+ goto out;
+ pagep->lsn = argp->rootlsn;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+do_page:
+ /*
+ * Fix the page copied over the root page. It's possible that the
+ * page never made it to disk, so if we're undo-ing and the page
+ * doesn't exist, it's okay and there's nothing further to do.
+ */
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op))
+ goto done;
+ __db_pgerr(file_dbp, argp->pgno, ret);
+ goto out;
+ }
+ modified = 0;
+ (void)__ua_memcpy(&copy_lsn, &LSN(argp->pgdbt.data), sizeof(DB_LSN));
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &copy_lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &copy_lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ pagep->lsn = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ memcpy(pagep, argp->pgdbt.data, argp->pgdbt.size);
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __bam_adj_recover --
+ * Recovery function for adj.
+ *
+ * PUBLIC: int __bam_adj_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_adj_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_adj_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_adj_print);
+ REC_INTRO(__bam_adj_read, 1);
+
+ /* Get the page; if it never existed and we're undoing, we're done. */
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op))
+ goto done;
+ __db_pgerr(file_dbp, argp->pgno, ret);
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ if ((ret = __bam_adjindx(dbc,
+ pagep, argp->indx, argp->indx_copy, argp->is_insert)) != 0)
+ goto out;
+
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ if ((ret = __bam_adjindx(dbc,
+ pagep, argp->indx, argp->indx_copy, !argp->is_insert)) != 0)
+ goto out;
+
+ LSN(pagep) = argp->lsn;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __bam_cadjust_recover --
+ * Recovery function for the adjust of a count change in an internal
+ * page.
+ *
+ * PUBLIC: int __bam_cadjust_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_cadjust_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_cadjust_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_cadjust_print);
+ REC_INTRO(__bam_cadjust_read, 1);
+
+ /* Get the page; if it never existed and we're undoing, we're done. */
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op))
+ goto done;
+ __db_pgerr(file_dbp, argp->pgno, ret);
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ if (IS_BTREE_PAGE(pagep)) {
+ GET_BINTERNAL(file_dbp, pagep, argp->indx)->nrecs +=
+ argp->adjust;
+ if (argp->opflags & CAD_UPDATEROOT)
+ RE_NREC_ADJ(pagep, argp->adjust);
+ } else {
+ GET_RINTERNAL(file_dbp, pagep, argp->indx)->nrecs +=
+ argp->adjust;
+ if (argp->opflags & CAD_UPDATEROOT)
+ RE_NREC_ADJ(pagep, argp->adjust);
+ }
+
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ if (IS_BTREE_PAGE(pagep)) {
+ GET_BINTERNAL(file_dbp, pagep, argp->indx)->nrecs -=
+ argp->adjust;
+ if (argp->opflags & CAD_UPDATEROOT)
+ RE_NREC_ADJ(pagep, -(argp->adjust));
+ } else {
+ GET_RINTERNAL(file_dbp, pagep, argp->indx)->nrecs -=
+ argp->adjust;
+ if (argp->opflags & CAD_UPDATEROOT)
+ RE_NREC_ADJ(pagep, -(argp->adjust));
+ }
+ LSN(pagep) = argp->lsn;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __bam_cdel_recover --
+ * Recovery function for the intent-to-delete of a cursor record.
+ *
+ * PUBLIC: int __bam_cdel_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_cdel_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_cdel_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int32_t indx;
+ int cmp_n, cmp_p, modified, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_cdel_print);
+ REC_INTRO(__bam_cdel_read, 1);
+
+ /* Get the page; if it never existed and we're undoing, we're done. */
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op))
+ goto done;
+ __db_pgerr(file_dbp, argp->pgno, ret);
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ indx = argp->indx + (TYPE(pagep) == P_LBTREE ? O_INDX : 0);
+ B_DSET(GET_BKEYDATA(file_dbp, pagep, indx)->type);
+
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ indx = argp->indx + (TYPE(pagep) == P_LBTREE ? O_INDX : 0);
+ B_DCLR(GET_BKEYDATA(file_dbp, pagep, indx)->type);
+
+ (void)__bam_ca_delete(file_dbp, argp->pgno, argp->indx, 0);
+
+ LSN(pagep) = argp->lsn;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __bam_repl_recover --
+ * Recovery function for page item replacement.
+ *
+ * PUBLIC: int __bam_repl_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_repl_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_repl_args *argp;
+ BKEYDATA *bk;
+ DB *file_dbp;
+ DBC *dbc;
+ DBT dbt;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+ u_int8_t *p;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_repl_print);
+ REC_INTRO(__bam_repl_read, 1);
+
+ /* Get the page; if it never existed and we're undoing, we're done. */
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op))
+ goto done;
+ __db_pgerr(file_dbp, argp->pgno, ret);
+ goto out;
+ }
+ bk = GET_BKEYDATA(file_dbp, pagep, argp->indx);
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /*
+ * Need to redo update described.
+ *
+ * Re-build the replacement item.
+ */
+ memset(&dbt, 0, sizeof(dbt));
+ dbt.size = argp->prefix + argp->suffix + argp->repl.size;
+ if ((ret = __os_malloc(dbenv, dbt.size, &dbt.data)) != 0)
+ goto out;
+ p = dbt.data;
+ memcpy(p, bk->data, argp->prefix);
+ p += argp->prefix;
+ memcpy(p, argp->repl.data, argp->repl.size);
+ p += argp->repl.size;
+ memcpy(p, bk->data + (bk->len - argp->suffix), argp->suffix);
+
+ ret = __bam_ritem(dbc, pagep, argp->indx, &dbt);
+ __os_free(dbenv, dbt.data);
+ if (ret != 0)
+ goto out;
+
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /*
+ * Need to undo update described.
+ *
+ * Re-build the original item.
+ */
+ memset(&dbt, 0, sizeof(dbt));
+ dbt.size = argp->prefix + argp->suffix + argp->orig.size;
+ if ((ret = __os_malloc(dbenv, dbt.size, &dbt.data)) != 0)
+ goto out;
+ p = dbt.data;
+ memcpy(p, bk->data, argp->prefix);
+ p += argp->prefix;
+ memcpy(p, argp->orig.data, argp->orig.size);
+ p += argp->orig.size;
+ memcpy(p, bk->data + (bk->len - argp->suffix), argp->suffix);
+
+ ret = __bam_ritem(dbc, pagep, argp->indx, &dbt);
+ __os_free(dbenv, dbt.data);
+ if (ret != 0)
+ goto out;
+
+ /* Reset the deleted flag, if necessary. */
+ if (argp->isdeleted)
+ B_DSET(GET_BKEYDATA(file_dbp, pagep, argp->indx)->type);
+
+ LSN(pagep) = argp->lsn;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __bam_root_recover --
+ * Recovery function for setting the root page on the meta-data page.
+ *
+ * PUBLIC: int __bam_root_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_root_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_root_args *argp;
+ BTMETA *meta;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ int cmp_n, cmp_p, modified, ret;
+
+ meta = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_root_print);
+ REC_INTRO(__bam_root_read, 0);
+
+ if ((ret = mpf->get(mpf, &argp->meta_pgno, 0, &meta)) != 0) {
+ /* The metadata page must always exist on redo. */
+ if (DB_REDO(op)) {
+ __db_pgerr(file_dbp, argp->meta_pgno, ret);
+ goto out;
+ } else
+ goto done;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(meta));
+ cmp_p = log_compare(&LSN(meta), &argp->meta_lsn);
+ CHECK_LSN(op, cmp_p, &LSN(meta), &argp->meta_lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ meta->root = argp->root_pgno;
+ meta->dbmeta.lsn = *lsnp;
+ ((BTREE *)file_dbp->bt_internal)->bt_root = meta->root;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Nothing to undo except lsn. */
+ meta->dbmeta.lsn = argp->meta_lsn;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ meta = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (meta != NULL)
+ (void)mpf->put(mpf, meta, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __bam_curadj_recover --
+ * Transaction abort function to undo cursor adjustments.
+ * This should only be triggered by subtransaction aborts.
+ *
+ * PUBLIC: int __bam_curadj_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_curadj_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_curadj_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ int ret;
+
+ COMPQUIET(info, NULL);
+
+ REC_PRINT(__bam_curadj_print);
+ REC_INTRO(__bam_curadj_read, 0);
+
+ ret = 0;
+ if (op != DB_TXN_ABORT)
+ goto done;
+
+ switch(argp->mode) {
+ case DB_CA_DI:
+ if ((ret = __bam_ca_di(dbc, argp->from_pgno,
+ argp->from_indx, -(int)argp->first_indx)) != 0)
+ goto out;
+ break;
+ case DB_CA_DUP:
+ if ((ret = __bam_ca_undodup(file_dbp, argp->first_indx,
+ argp->from_pgno, argp->from_indx, argp->to_indx)) != 0)
+ goto out;
+ break;
+
+ case DB_CA_RSPLIT:
+ if ((ret =
+ __bam_ca_rsplit(dbc, argp->to_pgno, argp->from_pgno)) != 0)
+ goto out;
+ break;
+
+ case DB_CA_SPLIT:
+ __bam_ca_undosplit(file_dbp, argp->from_pgno,
+ argp->to_pgno, argp->left_pgno, argp->from_indx);
+ break;
+ }
+
+done: *lsnp = argp->prev_lsn;
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_rcuradj_recover --
+ * Transaction abort function to undo cursor adjustments in rrecno.
+ * This should only be triggered by subtransaction aborts.
+ *
+ * PUBLIC: int __bam_rcuradj_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_rcuradj_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_rcuradj_args *argp;
+ BTREE_CURSOR *cp;
+ DB *file_dbp;
+ DBC *dbc, *rdbc;
+ DB_MPOOLFILE *mpf;
+ int ret, t_ret;
+
+ COMPQUIET(info, NULL);
+ rdbc = NULL;
+
+ REC_PRINT(__bam_rcuradj_print);
+ REC_INTRO(__bam_rcuradj_read, 0);
+
+ ret = t_ret = 0;
+
+ if (op != DB_TXN_ABORT)
+ goto done;
+
+ /*
+ * We don't know whether we're in an offpage dup set, and
+ * thus don't know whether the dbc REC_INTRO has handed us is
+ * of a reasonable type. It's certainly unset, so if this is
+ * an offpage dup set, we don't have an OPD cursor. The
+ * simplest solution is just to allocate a whole new cursor
+ * for our use; we're only really using it to hold pass some
+ * state into __ram_ca, and this way we don't need to make
+ * this function know anything about how offpage dups work.
+ */
+ if ((ret =
+ __db_icursor(file_dbp,
+ NULL, DB_RECNO, argp->root, 0, DB_LOCK_INVALIDID, &rdbc)) != 0)
+ goto out;
+
+ cp = (BTREE_CURSOR *)rdbc->internal;
+ F_SET(cp, C_RENUMBER);
+ cp->recno = argp->recno;
+
+ switch(argp->mode) {
+ case CA_DELETE:
+ /*
+ * The way to undo a delete is with an insert. Since
+ * we're undoing it, the delete flag must be set.
+ */
+ F_SET(cp, C_DELETED);
+ F_SET(cp, C_RENUMBER); /* Just in case. */
+ cp->order = argp->order;
+ __ram_ca(rdbc, CA_ICURRENT);
+ break;
+ case CA_IAFTER:
+ case CA_IBEFORE:
+ case CA_ICURRENT:
+ /*
+ * The way to undo an insert is with a delete. The delete
+ * flag is unset to start with.
+ */
+ F_CLR(cp, C_DELETED);
+ cp->order = INVALID_ORDER;
+ __ram_ca(rdbc, CA_DELETE);
+ break;
+ }
+
+done: *lsnp = argp->prev_lsn;
+out: if (rdbc != NULL && (t_ret = rdbc->c_close(rdbc)) != 0 && ret == 0)
+ ret = t_ret;
+ REC_CLOSE;
+}
diff --git a/libdb/btree/bt_reclaim.c b/libdb/btree/bt_reclaim.c
new file mode 100644
index 0000000..a55d16f
--- /dev/null
+++ b/libdb/btree/bt_reclaim.c
@@ -0,0 +1,86 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+
+/*
+ * __bam_reclaim --
+ * Free a database.
+ *
+ * PUBLIC: int __bam_reclaim __P((DB *, DB_TXN *));
+ */
+int
+__bam_reclaim(dbp, txn)
+ DB *dbp;
+ DB_TXN *txn;
+{
+ DBC *dbc;
+ int ret, t_ret;
+
+ /* Acquire a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+
+ /* Walk the tree, freeing pages. */
+ ret = __bam_traverse(dbc,
+ DB_LOCK_WRITE, dbc->internal->root, __db_reclaim_callback, dbc);
+
+ /* Discard the cursor. */
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __bam_truncate --
+ * Truncate a database.
+ *
+ * PUBLIC: int __bam_truncate __P((DB *, DB_TXN *, u_int32_t *));
+ */
+int
+__bam_truncate(dbp, txn, countp)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t *countp;
+{
+ DBC *dbc;
+ db_trunc_param trunc;
+ int ret, t_ret;
+
+ /* Acquire a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+
+ trunc.count = 0;
+ trunc.dbc = dbc;
+ /* Walk the tree, freeing pages. */
+ ret = __bam_traverse(dbc,
+ DB_LOCK_WRITE, dbc->internal->root, __db_truncate_callback, &trunc);
+
+ /* Discard the cursor. */
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ *countp = trunc.count;
+
+ return (ret);
+}
diff --git a/libdb/btree/bt_recno.c b/libdb/btree/bt_recno.c
new file mode 100644
index 0000000..b51d00a
--- /dev/null
+++ b/libdb/btree/bt_recno.c
@@ -0,0 +1,1327 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <stdio.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+
+static int __ram_add __P((DBC *, db_recno_t *, DBT *, u_int32_t, u_int32_t));
+static int __ram_source __P((DB *));
+static int __ram_sread __P((DBC *, db_recno_t));
+static int __ram_update __P((DBC *, db_recno_t, int));
+
+/*
+ * In recno, there are two meanings to the on-page "deleted" flag. If we're
+ * re-numbering records, it means the record was implicitly created. We skip
+ * over implicitly created records if doing a cursor "next" or "prev", and
+ * return DB_KEYEMPTY if they're explicitly requested.. If not re-numbering
+ * records, it means that the record was implicitly created, or was deleted.
+ * We skip over implicitly created or deleted records if doing a cursor "next"
+ * or "prev", and return DB_KEYEMPTY if they're explicitly requested.
+ *
+ * If we're re-numbering records, then we have to detect in the cursor that
+ * a record was deleted, and adjust the cursor as necessary on the next get.
+ * If we're not re-numbering records, then we can detect that a record has
+ * been deleted by looking at the actual on-page record, so we completely
+ * ignore the cursor's delete flag. This is different from the B+tree code.
+ * It also maintains whether the cursor references a deleted record in the
+ * cursor, and it doesn't always check the on-page value.
+ */
+#define CD_SET(cp) { \
+ if (F_ISSET(cp, C_RENUMBER)) \
+ F_SET(cp, C_DELETED); \
+}
+#define CD_CLR(cp) { \
+ if (F_ISSET(cp, C_RENUMBER)) { \
+ F_CLR(cp, C_DELETED); \
+ cp->order = INVALID_ORDER; \
+ } \
+}
+#define CD_ISSET(cp) \
+ (F_ISSET(cp, C_RENUMBER) && F_ISSET(cp, C_DELETED))
+
+/*
+ * Macros for comparing the ordering of two cursors.
+ * cp1 comes before cp2 iff one of the following holds:
+ * cp1's recno is less than cp2's recno
+ * recnos are equal, both deleted, and cp1's order is less than cp2's
+ * recnos are equal, cp1 deleted, and cp2 not deleted
+ */
+#define C_LESSTHAN(cp1, cp2) \
+ (((cp1)->recno < (cp2)->recno) || \
+ (((cp1)->recno == (cp2)->recno) && \
+ ((CD_ISSET((cp1)) && CD_ISSET((cp2)) && (cp1)->order < (cp2)->order) || \
+ (CD_ISSET((cp1)) && !CD_ISSET((cp2))))))
+
+/*
+ * cp1 is equal to cp2 iff their recnos and delete flags are identical,
+ * and if the delete flag is set their orders are also identical.
+ */
+#define C_EQUAL(cp1, cp2) \
+ ((cp1)->recno == (cp2)->recno && CD_ISSET((cp1)) == CD_ISSET((cp2)) && \
+ (!CD_ISSET((cp1)) || (cp1)->order == (cp2)->order))
+
+/*
+ * Do we need to log the current cursor adjustment?
+ */
+#define CURADJ_LOG(dbc) \
+ (DBC_LOGGING((dbc)) && (dbc)->txn != NULL && (dbc)->txn->parent != NULL)
+
+/*
+ * After a search, copy the found page into the cursor, discarding any
+ * currently held lock.
+ */
+#define STACK_TO_CURSOR(cp) { \
+ (cp)->page = (cp)->csp->page; \
+ (cp)->pgno = (cp)->csp->page->pgno; \
+ (cp)->indx = (cp)->csp->indx; \
+ (void)__TLPUT(dbc, (cp)->lock); \
+ (cp)->lock = (cp)->csp->lock; \
+ (cp)->lock_mode = (cp)->csp->lock_mode; \
+}
+
+/*
+ * __ram_open --
+ * Recno open function.
+ *
+ * PUBLIC: int __ram_open __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, db_pgno_t, u_int32_t));
+ */
+int
+__ram_open(dbp, txn, name, base_pgno, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name;
+ db_pgno_t base_pgno;
+ u_int32_t flags;
+{
+ BTREE *t;
+ DBC *dbc;
+ int ret, t_ret;
+
+ COMPQUIET(name, NULL);
+ t = dbp->bt_internal;
+
+ /* Initialize the remaining fields/methods of the DB. */
+ dbp->stat = __bam_stat;
+
+ /* Start up the tree. */
+ if ((ret = __bam_read_root(dbp, txn, base_pgno, flags)) != 0)
+ return (ret);
+
+ /*
+ * If the user specified a source tree, open it and map it in.
+ *
+ * !!!
+ * We don't complain if the user specified transactions or threads.
+ * It's possible to make it work, but you'd better know what you're
+ * doing!
+ */
+ if (t->re_source != NULL && (ret = __ram_source(dbp)) != 0)
+ return (ret);
+
+ /* If we're snapshotting an underlying source file, do it now. */
+ if (F_ISSET(dbp, DB_AM_SNAPSHOT)) {
+ /* Allocate a cursor. */
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ return (ret);
+
+ /* Do the snapshot. */
+ if ((ret = __ram_update(dbc,
+ DB_MAX_RECORDS, 0)) != 0 && ret == DB_NOTFOUND)
+ ret = 0;
+
+ /* Discard the cursor. */
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ return (ret);
+}
+
+/*
+ * __ram_append --
+ * Recno append function.
+ *
+ * PUBLIC: int __ram_append __P((DBC *, DBT *, DBT *));
+ */
+int
+__ram_append(dbc, key, data)
+ DBC *dbc;
+ DBT *key, *data;
+{
+ BTREE_CURSOR *cp;
+ int ret;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * Make sure we've read in all of the backing source file. If
+ * we found the record or it simply didn't exist, add the
+ * user's record.
+ */
+ ret = __ram_update(dbc, DB_MAX_RECORDS, 0);
+ if (ret == 0 || ret == DB_NOTFOUND)
+ ret = __ram_add(dbc, &cp->recno, data, DB_APPEND, 0);
+
+ /* Return the record number. */
+ if (ret == 0)
+ ret = __db_retcopy(dbc->dbp->dbenv, key, &cp->recno,
+ sizeof(cp->recno), &dbc->rkey->data, &dbc->rkey->ulen);
+
+ return (ret);
+}
+
+/*
+ * __ram_c_del --
+ * Recno cursor->c_del function.
+ *
+ * PUBLIC: int __ram_c_del __P((DBC *));
+ */
+int
+__ram_c_del(dbc)
+ DBC *dbc;
+{
+ BKEYDATA bk;
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_LSN lsn;
+ DBT hdr, data;
+ EPG *epg;
+ int exact, ret, stack;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ t = dbp->bt_internal;
+ stack = 0;
+
+ /*
+ * The semantics of cursors during delete are as follows: in
+ * non-renumbering recnos, records are replaced with a marker
+ * containing a delete flag. If the record referenced by this cursor
+ * has already been deleted, we will detect that as part of the delete
+ * operation, and fail.
+ *
+ * In renumbering recnos, cursors which represent deleted items
+ * are flagged with the C_DELETED flag, and it is an error to
+ * call c_del a second time without an intervening cursor motion.
+ */
+ if (CD_ISSET(cp))
+ return (DB_KEYEMPTY);
+
+ /* Search the tree for the key; delete only deletes exact matches. */
+ if ((ret = __bam_rsearch(dbc, &cp->recno, S_DELETE, 1, &exact)) != 0)
+ goto err;
+ if (!exact) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ stack = 1;
+
+ /* Copy the page into the cursor. */
+ STACK_TO_CURSOR(cp);
+
+ /*
+ * If re-numbering records, the on-page deleted flag can only mean
+ * that this record was implicitly created. Applications aren't
+ * permitted to delete records they never created, return an error.
+ *
+ * If not re-numbering records, the on-page deleted flag means that
+ * this record was implicitly created, or, was deleted at some time.
+ * The former is an error because applications aren't permitted to
+ * delete records they never created, the latter is an error because
+ * if the record was "deleted", we could never have found it.
+ */
+ if (B_DISSET(GET_BKEYDATA(dbp, cp->page, cp->indx)->type)) {
+ ret = DB_KEYEMPTY;
+ goto err;
+ }
+
+ if (F_ISSET(cp, C_RENUMBER)) {
+ /* Delete the item, adjust the counts, adjust the cursors. */
+ if ((ret = __bam_ditem(dbc, cp->page, cp->indx)) != 0)
+ goto err;
+ __bam_adjust(dbc, -1);
+ if (__ram_ca(dbc, CA_DELETE) > 0 &&
+ CURADJ_LOG(dbc) && (ret = __bam_rcuradj_log(dbp, dbc->txn,
+ &lsn, 0, CA_DELETE, cp->root, cp->recno, cp->order)) != 0)
+ goto err;
+
+ /*
+ * If the page is empty, delete it.
+ *
+ * We never delete a root page. First, root pages of primary
+ * databases never go away, recno or otherwise. However, if
+ * it's the root page of an off-page duplicates database, then
+ * it can be deleted. We don't delete it here because we have
+ * no way of telling the primary database page holder (e.g.,
+ * the hash access method) that its page element should cleaned
+ * up because the underlying tree is gone. So, we keep the page
+ * around until the last cursor referencing the empty tree is
+ * are closed, and then clean it up.
+ */
+ if (NUM_ENT(cp->page) == 0 && PGNO(cp->page) != cp->root) {
+ /*
+ * We already have a locked stack of pages. However,
+ * there are likely entries in the stack that aren't
+ * going to be emptied by removing the single reference
+ * to the emptied page (or one of its parents).
+ */
+ for (epg = cp->csp; epg >= cp->sp; --epg)
+ if (NUM_ENT(epg->page) > 1)
+ break;
+
+ /*
+ * We want to delete a single item out of the last page
+ * that we're not deleting.
+ */
+ ret = __bam_dpages(dbc, epg);
+
+ /*
+ * Regardless of the return from __bam_dpages, it will
+ * discard our stack and pinned page.
+ */
+ stack = 0;
+ cp->page = NULL;
+ }
+ } else {
+ /* Use a delete/put pair to replace the record with a marker. */
+ if ((ret = __bam_ditem(dbc, cp->page, cp->indx)) != 0)
+ goto err;
+
+ B_TSET(bk.type, B_KEYDATA, 1);
+ bk.len = 0;
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.data = &bk;
+ hdr.size = SSZA(BKEYDATA, data);
+ memset(&data, 0, sizeof(data));
+ data.data = (void *)"";
+ data.size = 0;
+ if ((ret = __db_pitem(dbc,
+ cp->page, cp->indx, BKEYDATA_SIZE(0), &hdr, &data)) != 0)
+ goto err;
+ }
+
+ t->re_modified = 1;
+
+err: if (stack)
+ __bam_stkrel(dbc, STK_CLRDBC);
+
+ return (ret);
+}
+
+/*
+ * __ram_c_get --
+ * Recno cursor->c_get function.
+ *
+ * PUBLIC: int __ram_c_get
+ * PUBLIC: __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+ */
+int
+__ram_c_get(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ int cmp, exact, ret;
+
+ COMPQUIET(pgnop, NULL);
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ LF_CLR(DB_MULTIPLE|DB_MULTIPLE_KEY);
+retry: switch (flags) {
+ case DB_CURRENT:
+ /*
+ * If we're using mutable records and the deleted flag is
+ * set, the cursor is pointing at a nonexistent record;
+ * return an error.
+ */
+ if (CD_ISSET(cp))
+ return (DB_KEYEMPTY);
+ break;
+ case DB_NEXT_DUP:
+ /*
+ * If we're not in an off-page dup set, we know there's no
+ * next duplicate since recnos don't have them. If we
+ * are in an off-page dup set, the next item assuredly is
+ * a dup, so we set flags to DB_NEXT and keep going.
+ */
+ if (!F_ISSET(dbc, DBC_OPD))
+ return (DB_NOTFOUND);
+ /* FALLTHROUGH */
+ case DB_NEXT_NODUP:
+ /*
+ * Recno databases don't have duplicates, set flags to DB_NEXT
+ * and keep going.
+ */
+ /* FALLTHROUGH */
+ case DB_NEXT:
+ flags = DB_NEXT;
+ /*
+ * If record numbers are mutable: if we just deleted a record,
+ * we have to avoid incrementing the record number so that we
+ * return the right record by virtue of renumbering the tree.
+ */
+ if (CD_ISSET(cp))
+ break;
+
+ if (cp->recno != RECNO_OOB) {
+ ++cp->recno;
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_FIRST:
+ flags = DB_NEXT;
+ cp->recno = 1;
+ break;
+ case DB_PREV_NODUP:
+ /*
+ * Recno databases don't have duplicates, set flags to DB_PREV
+ * and keep going.
+ */
+ /* FALLTHROUGH */
+ case DB_PREV:
+ flags = DB_PREV;
+ if (cp->recno != RECNO_OOB) {
+ if (cp->recno == 1) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ --cp->recno;
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_LAST:
+ flags = DB_PREV;
+ if (((ret = __ram_update(dbc,
+ DB_MAX_RECORDS, 0)) != 0) && ret != DB_NOTFOUND)
+ goto err;
+ if ((ret = __bam_nrecs(dbc, &cp->recno)) != 0)
+ goto err;
+ if (cp->recno == 0) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ break;
+ case DB_GET_BOTHC:
+ /*
+ * If we're doing a join and these are offpage dups,
+ * we want to keep searching forward from after the
+ * current cursor position. Increment the recno by 1,
+ * then proceed as for a DB_SET.
+ *
+ * Otherwise, we know there are no additional matching
+ * data, as recnos don't have dups. return DB_NOTFOUND.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ cp->recno++;
+ break;
+ }
+ ret = DB_NOTFOUND;
+ goto err;
+ /* NOTREACHED */
+ case DB_GET_BOTH:
+ case DB_GET_BOTH_RANGE:
+ /*
+ * If we're searching a set of off-page dups, we start
+ * a new linear search from the first record. Otherwise,
+ * we compare the single data item associated with the
+ * requested record for a match.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ cp->recno = 1;
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_SET:
+ case DB_SET_RANGE:
+ if ((ret = __ram_getno(dbc, key, &cp->recno, 0)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_flag(dbp->dbenv, "__ram_c_get", flags);
+ goto err;
+ }
+
+ /*
+ * For DB_PREV, DB_LAST, DB_SET and DB_SET_RANGE, we have already
+ * called __ram_update() to make sure sufficient records have been
+ * read from the backing source file. Do it now for DB_CURRENT (if
+ * the current record was deleted we may need more records from the
+ * backing file for a DB_CURRENT operation), DB_FIRST and DB_NEXT.
+ * (We don't have to test for flags == DB_FIRST, because the switch
+ * statement above re-set flags to DB_NEXT in that case.)
+ */
+ if ((flags == DB_NEXT || flags == DB_CURRENT) && ((ret =
+ __ram_update(dbc, cp->recno, 0)) != 0) && ret != DB_NOTFOUND)
+ goto err;
+
+ for (;; ++cp->recno) {
+ /* Search the tree for the record. */
+ if ((ret = __bam_rsearch(dbc, &cp->recno,
+ F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND,
+ 1, &exact)) != 0)
+ goto err;
+ if (!exact) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+
+ /* Copy the page into the cursor. */
+ STACK_TO_CURSOR(cp);
+
+ /*
+ * If re-numbering records, the on-page deleted flag means this
+ * record was implicitly created. If not re-numbering records,
+ * the on-page deleted flag means this record was implicitly
+ * created, or, it was deleted at some time. Regardless, we
+ * skip such records if doing cursor next/prev operations or
+ * walking through off-page duplicates, and fail if they were
+ * requested explicitly by the application.
+ */
+ if (B_DISSET(GET_BKEYDATA(dbp, cp->page, cp->indx)->type))
+ switch (flags) {
+ case DB_NEXT:
+ case DB_PREV:
+ (void)__bam_stkrel(dbc, STK_CLRDBC);
+ goto retry;
+ case DB_GET_BOTH:
+ case DB_GET_BOTH_RANGE:
+ /*
+ * If we're an OPD tree, we don't care about
+ * matching a record number on a DB_GET_BOTH
+ * -- everything belongs to the same tree. A
+ * normal recno should give up and return
+ * DB_NOTFOUND if the matching recno is deleted.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ (void)__bam_stkrel(dbc, STK_CLRDBC);
+ continue;
+ }
+ ret = DB_NOTFOUND;
+ goto err;
+ default:
+ ret = DB_KEYEMPTY;
+ goto err;
+ }
+
+ if (flags == DB_GET_BOTH ||
+ flags == DB_GET_BOTHC || flags == DB_GET_BOTH_RANGE) {
+ if ((ret = __bam_cmp(dbp, data,
+ cp->page, cp->indx, __bam_defcmp, &cmp)) != 0)
+ return (ret);
+ if (cmp == 0)
+ break;
+ if (!F_ISSET(dbc, DBC_OPD)) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ (void)__bam_stkrel(dbc, STK_CLRDBC);
+ } else
+ break;
+ }
+
+ /* Return the key if the user didn't give us one. */
+ if (!F_ISSET(dbc, DBC_OPD)) {
+ if (flags != DB_GET_BOTH && flags != DB_GET_BOTH_RANGE &&
+ flags != DB_SET && flags != DB_SET_RANGE)
+ ret = __db_retcopy(dbp->dbenv,
+ key, &cp->recno, sizeof(cp->recno),
+ &dbc->rkey->data, &dbc->rkey->ulen);
+ F_SET(key, DB_DBT_ISSET);
+ }
+
+ /* The cursor was reset, no further delete adjustment is necessary. */
+err: CD_CLR(cp);
+
+ return (ret);
+}
+
+/*
+ * __ram_c_put --
+ * Recno cursor->c_put function.
+ *
+ * PUBLIC: int __ram_c_put __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+ */
+int
+__ram_c_put(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_LSN lsn;
+ int exact, nc, ret, t_ret;
+ u_int32_t iiflags;
+ void *arg;
+
+ COMPQUIET(pgnop, NULL);
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * DB_KEYFIRST and DB_KEYLAST mean different things if they're
+ * used in an off-page duplicate tree. If we're an off-page
+ * duplicate tree, they really mean "put at the beginning of the
+ * tree" and "put at the end of the tree" respectively, so translate
+ * them to something else.
+ */
+ if (F_ISSET(dbc, DBC_OPD))
+ switch (flags) {
+ case DB_KEYFIRST:
+ cp->recno = 1;
+ flags = DB_BEFORE;
+ break;
+ case DB_KEYLAST:
+ if ((ret = __ram_add(dbc,
+ &cp->recno, data, DB_APPEND, 0)) != 0)
+ return (ret);
+ if (CURADJ_LOG(dbc) &&
+ (ret = __bam_rcuradj_log(dbp, dbc->txn, &lsn, 0,
+ CA_ICURRENT, cp->root, cp->recno, cp->order)))
+ return (ret);
+ return (0);
+ }
+
+ /*
+ * Handle normal DB_KEYFIRST/DB_KEYLAST; for a recno, which has
+ * no duplicates, these are identical and mean "put the given
+ * datum at the given recno".
+ *
+ * Note that the code here used to be in __ram_put; now, we
+ * go through the access-method-common __db_put function, which
+ * handles DB_NOOVERWRITE, so we and __ram_add don't have to.
+ */
+ if (flags == DB_KEYFIRST || flags == DB_KEYLAST) {
+ ret = __ram_getno(dbc, key, &cp->recno, 1);
+ if (ret == 0 || ret == DB_NOTFOUND)
+ ret = __ram_add(dbc, &cp->recno, data, 0, 0);
+ return (ret);
+ }
+
+ /*
+ * If we're putting with a cursor that's marked C_DELETED, we need to
+ * take special care; the cursor doesn't "really" reference the item
+ * corresponding to its current recno, but instead is "between" that
+ * record and the current one. Translate the actual insert into
+ * DB_BEFORE, and let the __ram_ca work out the gory details of what
+ * should wind up pointing where.
+ */
+ if (CD_ISSET(cp))
+ iiflags = DB_BEFORE;
+ else
+ iiflags = flags;
+
+split: if ((ret = __bam_rsearch(dbc, &cp->recno, S_INSERT, 1, &exact)) != 0)
+ goto err;
+ /*
+ * An inexact match is okay; it just means we're one record past the
+ * end, which is reasonable if we're marked deleted.
+ */
+ DB_ASSERT(exact || CD_ISSET(cp));
+
+ /* Copy the page into the cursor. */
+ STACK_TO_CURSOR(cp);
+
+ ret = __bam_iitem(dbc, key, data, iiflags, 0);
+ t_ret = __bam_stkrel(dbc, STK_CLRDBC);
+
+ if (t_ret != 0 && (ret == 0 || ret == DB_NEEDSPLIT))
+ ret = t_ret;
+ else if (ret == DB_NEEDSPLIT) {
+ arg = &cp->recno;
+ if ((ret = __bam_split(dbc, arg, NULL)) != 0)
+ goto err;
+ goto split;
+ }
+ if (ret != 0)
+ goto err;
+
+ switch (flags) { /* Adjust the cursors. */
+ case DB_AFTER:
+ nc = __ram_ca(dbc, CA_IAFTER);
+
+ /*
+ * We only need to adjust this cursor forward if we truly added
+ * the item after the current recno, rather than remapping it
+ * to DB_BEFORE.
+ */
+ if (iiflags == DB_AFTER)
+ ++cp->recno;
+
+ /* Only log if __ram_ca found any relevant cursors. */
+ if (nc > 0 && CURADJ_LOG(dbc) &&
+ (ret = __bam_rcuradj_log(dbp, dbc->txn, &lsn, 0, CA_IAFTER,
+ cp->root, cp->recno, cp->order)) != 0)
+ goto err;
+ break;
+ case DB_BEFORE:
+ nc = __ram_ca(dbc, CA_IBEFORE);
+ --cp->recno;
+
+ /* Only log if __ram_ca found any relevant cursors. */
+ if (nc > 0 && CURADJ_LOG(dbc) &&
+ (ret = __bam_rcuradj_log(dbp, dbc->txn, &lsn, 0, CA_IBEFORE,
+ cp->root, cp->recno, cp->order)) != 0)
+ goto err;
+ break;
+ case DB_CURRENT:
+ /*
+ * We only need to do an adjustment if we actually
+ * added an item, which we only would have done if the
+ * cursor was marked deleted.
+ *
+ * Only log if __ram_ca found any relevant cursors.
+ */
+ if (CD_ISSET(cp) && __ram_ca(dbc, CA_ICURRENT) > 0 &&
+ CURADJ_LOG(dbc) &&
+ (ret = __bam_rcuradj_log(dbp, dbc->txn, &lsn, 0,
+ CA_ICURRENT, cp->root, cp->recno, cp->order)) != 0)
+ goto err;
+ break;
+ }
+
+ /* Return the key if we've created a new record. */
+ if (!F_ISSET(dbc, DBC_OPD) && (flags == DB_AFTER || flags == DB_BEFORE))
+ ret = __db_retcopy(dbp->dbenv, key, &cp->recno,
+ sizeof(cp->recno), &dbc->rkey->data, &dbc->rkey->ulen);
+
+ /* The cursor was reset, no further delete adjustment is necessary. */
+err: CD_CLR(cp);
+
+ return (ret);
+}
+
+/*
+ * __ram_ca --
+ * Adjust cursors. Returns the number of relevant cursors.
+ *
+ * PUBLIC: int __ram_ca __P((DBC *, ca_recno_arg));
+ */
+int
+__ram_ca(dbc_arg, op)
+ DBC *dbc_arg;
+ ca_recno_arg op;
+{
+ BTREE_CURSOR *cp, *cp_arg;
+ DB *dbp, *ldbp;
+ DB_ENV *dbenv;
+ DBC *dbc;
+ db_recno_t recno;
+ int adjusted, found;
+ u_int32_t order;
+
+ dbp = dbc_arg->dbp;
+ dbenv = dbp->dbenv;
+ cp_arg = (BTREE_CURSOR *)dbc_arg->internal;
+ recno = cp_arg->recno;
+
+ found = 0;
+
+ /*
+ * It only makes sense to adjust cursors if we're a renumbering
+ * recno; we should only be called if this is one.
+ */
+ DB_ASSERT(F_ISSET(cp_arg, C_RENUMBER));
+
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ /*
+ * Adjust the cursors. See the comment in __bam_ca_delete().
+ */
+ /*
+ * If we're doing a delete, we need to find the highest
+ * order of any cursor currently pointing at this item,
+ * so we can assign a higher order to the newly deleted
+ * cursor. Unfortunately, this requires a second pass through
+ * the cursor list.
+ */
+ if (op == CA_DELETE) {
+ order = 1;
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ cp = (BTREE_CURSOR *)dbc->internal;
+ if (cp_arg->root == cp->root &&
+ recno == cp->recno && CD_ISSET(cp) &&
+ order <= cp->order)
+ order = cp->order + 1;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ } else
+ order = INVALID_ORDER;
+
+ /* Now go through and do the actual adjustments. */
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ cp = (BTREE_CURSOR *)dbc->internal;
+ if (cp_arg->root != cp->root)
+ continue;
+ ++found;
+ adjusted = 0;
+ switch (op) {
+ case CA_DELETE:
+ if (recno < cp->recno) {
+ --cp->recno;
+ /*
+ * If the adjustment made them equal,
+ * we have to merge the orders.
+ */
+ if (recno == cp->recno && CD_ISSET(cp))
+ cp->order += order;
+ } else if (recno == cp->recno &&
+ !CD_ISSET(cp)) {
+ CD_SET(cp);
+ cp->order = order;
+ }
+ break;
+ case CA_IBEFORE:
+ /*
+ * IBEFORE is just like IAFTER, except that we
+ * adjust cursors on the current record too.
+ */
+ if (C_EQUAL(cp_arg, cp)) {
+ ++cp->recno;
+ adjusted = 1;
+ }
+ goto iafter;
+ case CA_ICURRENT:
+
+ /*
+ * If the original cursor wasn't deleted, we
+ * just did a replacement and so there's no
+ * need to adjust anything--we shouldn't have
+ * gotten this far. Otherwise, we behave
+ * much like an IAFTER, except that all
+ * cursors pointing to the current item get
+ * marked undeleted and point to the new
+ * item.
+ */
+ DB_ASSERT(CD_ISSET(cp_arg));
+ if (C_EQUAL(cp_arg, cp)) {
+ CD_CLR(cp);
+ break;
+ }
+ /* FALLTHROUGH */
+ case CA_IAFTER:
+iafter: if (!adjusted && C_LESSTHAN(cp_arg, cp)) {
+ ++cp->recno;
+ adjusted = 1;
+ }
+ if (recno == cp->recno && adjusted)
+ /*
+ * If we've moved this cursor's recno,
+ * split its order number--i.e.,
+ * decrement it by enough so that
+ * the lowest cursor moved has order 1.
+ * cp_arg->order is the split point,
+ * so decrement by one less than that.
+ */
+ cp->order -= (cp_arg->order - 1);
+ break;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ return (found);
+}
+
+/*
+ * __ram_getno --
+ * Check the user's record number, and make sure we've seen it.
+ *
+ * PUBLIC: int __ram_getno __P((DBC *, const DBT *, db_recno_t *, int));
+ */
+int
+__ram_getno(dbc, key, rep, can_create)
+ DBC *dbc;
+ const DBT *key;
+ db_recno_t *rep;
+ int can_create;
+{
+ DB *dbp;
+ db_recno_t recno;
+
+ dbp = dbc->dbp;
+
+ /* Check the user's record number. */
+ if ((recno = *(db_recno_t *)key->data) == 0) {
+ __db_err(dbp->dbenv, "illegal record number of 0");
+ return (EINVAL);
+ }
+ if (rep != NULL)
+ *rep = recno;
+
+ /*
+ * Btree can neither create records nor read them in. Recno can
+ * do both, see if we can find the record.
+ */
+ return (dbc->dbtype == DB_RECNO ?
+ __ram_update(dbc, recno, can_create) : 0);
+}
+
+/*
+ * __ram_update --
+ * Ensure the tree has records up to and including the specified one.
+ */
+static int
+__ram_update(dbc, recno, can_create)
+ DBC *dbc;
+ db_recno_t recno;
+ int can_create;
+{
+ BTREE *t;
+ DB *dbp;
+ DBT *rdata;
+ db_recno_t nrecs;
+ int ret;
+
+ dbp = dbc->dbp;
+ t = dbp->bt_internal;
+
+ /*
+ * If we can't create records and we've read the entire backing input
+ * file, we're done.
+ */
+ if (!can_create && t->re_eof)
+ return (0);
+
+ /*
+ * If we haven't seen this record yet, try to get it from the original
+ * file.
+ */
+ if ((ret = __bam_nrecs(dbc, &nrecs)) != 0)
+ return (ret);
+ if (!t->re_eof && recno > nrecs) {
+ if ((ret = __ram_sread(dbc, recno)) != 0 && ret != DB_NOTFOUND)
+ return (ret);
+ if ((ret = __bam_nrecs(dbc, &nrecs)) != 0)
+ return (ret);
+ }
+
+ /*
+ * If we can create records, create empty ones up to the requested
+ * record.
+ */
+ if (!can_create || recno <= nrecs + 1)
+ return (0);
+
+ rdata = &dbc->my_rdata;
+ rdata->flags = 0;
+ rdata->size = 0;
+
+ while (recno > ++nrecs)
+ if ((ret = __ram_add(dbc,
+ &nrecs, rdata, 0, BI_DELETED)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * __ram_source --
+ * Load information about the backing file.
+ */
+static int
+__ram_source(dbp)
+ DB *dbp;
+{
+ BTREE *t;
+ char *source;
+ int ret;
+
+ t = dbp->bt_internal;
+
+ /* Find the real name, and swap out the one we had before. */
+ if ((ret = __db_appname(dbp->dbenv,
+ DB_APP_DATA, t->re_source, 0, NULL, &source)) != 0)
+ return (ret);
+ __os_free(dbp->dbenv, t->re_source);
+ t->re_source = source;
+
+ /*
+ * !!!
+ * It's possible that the backing source file is read-only. We don't
+ * much care other than we'll complain if there are any modifications
+ * when it comes time to write the database back to the source.
+ */
+ if ((t->re_fp = fopen(t->re_source, "r")) == NULL) {
+ ret = errno;
+ __db_err(dbp->dbenv, "%s: %s", t->re_source, db_strerror(ret));
+ return (ret);
+ }
+
+ t->re_eof = 0;
+ return (0);
+}
+
+/*
+ * __ram_writeback --
+ * Rewrite the backing file.
+ *
+ * PUBLIC: int __ram_writeback __P((DB *));
+ */
+int
+__ram_writeback(dbp)
+ DB *dbp;
+{
+ BTREE *t;
+ DB_ENV *dbenv;
+ DBC *dbc;
+ DBT key, data;
+ FILE *fp;
+ db_recno_t keyno;
+ int ret, t_ret;
+ u_int8_t delim, *pad;
+
+ t = dbp->bt_internal;
+ dbenv = dbp->dbenv;
+ fp = NULL;
+ pad = NULL;
+
+ /* If the file wasn't modified, we're done. */
+ if (!t->re_modified)
+ return (0);
+
+ /* If there's no backing source file, we're done. */
+ if (t->re_source == NULL) {
+ t->re_modified = 0;
+ return (0);
+ }
+
+ /* Allocate a cursor. */
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ return (ret);
+
+ /*
+ * Read any remaining records into the tree.
+ *
+ * !!!
+ * This is why we can't support transactions when applications specify
+ * backing (re_source) files. At this point we have to read in the
+ * rest of the records from the file so that we can write all of the
+ * records back out again, which could modify a page for which we'd
+ * have to log changes and which we don't have locked. This could be
+ * partially fixed by taking a snapshot of the entire file during the
+ * DB->open as DB->open is transaction protected. But, if a checkpoint
+ * occurs then, the part of the log holding the copy of the file could
+ * be discarded, and that would make it impossible to recover in the
+ * face of disaster. This could all probably be fixed, but it would
+ * require transaction protecting the backing source file.
+ *
+ * XXX
+ * This could be made to work now that we have transactions protecting
+ * file operations. Margo has specifically asked for the privilege of
+ * doing this work.
+ */
+ if ((ret =
+ __ram_update(dbc, DB_MAX_RECORDS, 0)) != 0 && ret != DB_NOTFOUND)
+ return (ret);
+
+ /*
+ * Close any existing file handle and re-open the file, truncating it.
+ */
+ if (t->re_fp != NULL) {
+ if (fclose(t->re_fp) != 0) {
+ ret = errno;
+ goto err;
+ }
+ t->re_fp = NULL;
+ }
+ if ((fp = fopen(t->re_source, "w")) == NULL) {
+ ret = errno;
+ __db_err(dbenv, "%s: %s", t->re_source, db_strerror(ret));
+ goto err;
+ }
+
+ /*
+ * We step through the records, writing each one out. Use the record
+ * number and the dbp->get() function, instead of a cursor, so we find
+ * and write out "deleted" or non-existent records. The DB handle may
+ * be threaded, so allocate memory as we go.
+ */
+ memset(&key, 0, sizeof(key));
+ key.size = sizeof(db_recno_t);
+ key.data = &keyno;
+ memset(&data, 0, sizeof(data));
+ F_SET(&data, DB_DBT_REALLOC);
+
+ /*
+ * We'll need the delimiter if we're doing variable-length records,
+ * and the pad character if we're doing fixed-length records.
+ */
+ delim = t->re_delim;
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN)) {
+ if ((ret = __os_malloc(dbenv, t->re_len, &pad)) != 0)
+ goto err;
+ memset(pad, t->re_pad, t->re_len);
+ }
+ for (keyno = 1;; ++keyno) {
+ switch (ret = dbp->get(dbp, NULL, &key, &data, 0)) {
+ case 0:
+ if (data.size != 0 && (u_int32_t)fwrite(
+ data.data, 1, data.size, fp) != data.size)
+ goto write_err;
+ break;
+ case DB_KEYEMPTY:
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN) &&
+ (u_int32_t)fwrite(pad, 1, t->re_len, fp) !=
+ t->re_len)
+ goto write_err;
+ break;
+ case DB_NOTFOUND:
+ ret = 0;
+ goto done;
+ default:
+ goto err;
+ }
+ if (!F_ISSET(dbp, DB_AM_FIXEDLEN) &&
+ fwrite(&delim, 1, 1, fp) != 1) {
+write_err: ret = errno;
+ __db_err(dbp->dbenv,
+ "%s: write failed to backing file: %s",
+ t->re_source, strerror(ret));
+ goto err;
+ }
+ }
+
+err:
+done: /* Close the file descriptor. */
+ if (fp != NULL && fclose(fp) != 0) {
+ if (ret == 0)
+ ret = errno;
+ __db_err(dbenv, "%s: %s", t->re_source, db_strerror(errno));
+ }
+
+ /* Discard the cursor. */
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Discard memory allocated to hold the data items. */
+ if (data.data != NULL)
+ __os_ufree(dbenv, data.data);
+ if (pad != NULL)
+ __os_free(dbenv, pad);
+
+ if (ret == 0)
+ t->re_modified = 0;
+
+ return (ret);
+}
+
+/*
+ * __ram_sread --
+ * Read records from a source file.
+ */
+static int
+__ram_sread(dbc, top)
+ DBC *dbc;
+ db_recno_t top;
+{
+ BTREE *t;
+ DB *dbp;
+ DBT data, *rdata;
+ db_recno_t recno;
+ size_t len;
+ int ch, ret, was_modified;
+
+ t = dbc->dbp->bt_internal;
+ dbp = dbc->dbp;
+ was_modified = t->re_modified;
+
+ if ((ret = __bam_nrecs(dbc, &recno)) != 0)
+ return (ret);
+
+ /*
+ * Use the record key return memory, it's only a short-term use.
+ * The record data return memory is used by __bam_iitem, which
+ * we'll indirectly call, so use the key so as not to collide.
+ */
+ len = F_ISSET(dbp, DB_AM_FIXEDLEN) ? t->re_len : 256;
+ rdata = &dbc->my_rkey;
+ if (rdata->ulen < len) {
+ if ((ret = __os_realloc(
+ dbp->dbenv, len, &rdata->data)) != 0) {
+ rdata->ulen = 0;
+ rdata->data = NULL;
+ return (ret);
+ }
+ rdata->ulen = (u_int32_t)len;
+ }
+
+ memset(&data, 0, sizeof(data));
+ while (recno < top) {
+ data.data = rdata->data;
+ data.size = 0;
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN))
+ for (len = t->re_len; len > 0; --len) {
+ if ((ch = getc(t->re_fp)) == EOF) {
+ if (data.size == 0)
+ goto eof;
+ break;
+ }
+ ((u_int8_t *)data.data)[data.size++] = ch;
+ }
+ else
+ for (;;) {
+ if ((ch = getc(t->re_fp)) == EOF) {
+ if (data.size == 0)
+ goto eof;
+ break;
+ }
+ if (ch == t->re_delim)
+ break;
+
+ ((u_int8_t *)data.data)[data.size++] = ch;
+ if (data.size == rdata->ulen) {
+ if ((ret = __os_realloc(dbp->dbenv,
+ rdata->ulen *= 2,
+ &rdata->data)) != 0) {
+ rdata->ulen = 0;
+ rdata->data = NULL;
+ return (ret);
+ } else
+ data.data = rdata->data;
+ }
+ }
+
+ /*
+ * Another process may have read this record from the input
+ * file and stored it into the database already, in which
+ * case we don't need to repeat that operation. We detect
+ * this by checking if the last record we've read is greater
+ * or equal to the number of records in the database.
+ */
+ if (t->re_last >= recno) {
+ ++recno;
+ if ((ret = __ram_add(dbc, &recno, &data, 0, 0)) != 0)
+ goto err;
+ }
+ ++t->re_last;
+ }
+
+ if (0) {
+eof: t->re_eof = 1;
+ ret = DB_NOTFOUND;
+ }
+err: if (!was_modified)
+ t->re_modified = 0;
+
+ return (ret);
+}
+
+/*
+ * __ram_add --
+ * Add records into the tree.
+ */
+static int
+__ram_add(dbc, recnop, data, flags, bi_flags)
+ DBC *dbc;
+ db_recno_t *recnop;
+ DBT *data;
+ u_int32_t flags, bi_flags;
+{
+ BTREE_CURSOR *cp;
+ int exact, ret, stack;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+retry: /* Find the slot for insertion. */
+ if ((ret = __bam_rsearch(dbc, recnop,
+ S_INSERT | (flags == DB_APPEND ? S_APPEND : 0), 1, &exact)) != 0)
+ return (ret);
+ stack = 1;
+
+ /* Copy the page into the cursor. */
+ STACK_TO_CURSOR(cp);
+
+ /*
+ * The application may modify the data based on the selected record
+ * number.
+ */
+ if (flags == DB_APPEND && dbc->dbp->db_append_recno != NULL &&
+ (ret = dbc->dbp->db_append_recno(dbc->dbp, data, *recnop)) != 0)
+ goto err;
+
+ /*
+ * Select the arguments for __bam_iitem() and do the insert. If the
+ * key is an exact match, or we're replacing the data item with a
+ * new data item, replace the current item. If the key isn't an exact
+ * match, we're inserting a new key/data pair, before the search
+ * location.
+ */
+ switch (ret = __bam_iitem(dbc,
+ NULL, data, exact ? DB_CURRENT : DB_BEFORE, bi_flags)) {
+ case 0:
+ /*
+ * Don't adjust anything.
+ *
+ * If we inserted a record, no cursors need adjusting because
+ * the only new record it's possible to insert is at the very
+ * end of the tree. The necessary adjustments to the internal
+ * page counts were made by __bam_iitem().
+ *
+ * If we overwrote a record, no cursors need adjusting because
+ * future DBcursor->get calls will simply return the underlying
+ * record (there's no adjustment made for the DB_CURRENT flag
+ * when a cursor get operation immediately follows a cursor
+ * delete operation, and the normal adjustment for the DB_NEXT
+ * flag is still correct).
+ */
+ break;
+ case DB_NEEDSPLIT:
+ /* Discard the stack of pages and split the page. */
+ (void)__bam_stkrel(dbc, STK_CLRDBC);
+ stack = 0;
+
+ if ((ret = __bam_split(dbc, recnop, NULL)) != 0)
+ goto err;
+
+ goto retry;
+ /* NOTREACHED */
+ default:
+ goto err;
+ }
+
+err: if (stack)
+ __bam_stkrel(dbc, STK_CLRDBC);
+
+ return (ret);
+}
diff --git a/libdb/btree/bt_rsearch.c b/libdb/btree/bt_rsearch.c
new file mode 100644
index 0000000..486584d
--- /dev/null
+++ b/libdb/btree/bt_rsearch.c
@@ -0,0 +1,442 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+
+/*
+ * __bam_rsearch --
+ * Search a btree for a record number.
+ *
+ * PUBLIC: int __bam_rsearch __P((DBC *, db_recno_t *, u_int32_t, int, int *));
+ */
+int
+__bam_rsearch(dbc, recnop, flags, stop, exactp)
+ DBC *dbc;
+ db_recno_t *recnop;
+ u_int32_t flags;
+ int stop, *exactp;
+{
+ BINTERNAL *bi;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ RINTERNAL *ri;
+ db_indx_t adjust, deloffset, indx, top;
+ db_lockmode_t lock_mode;
+ db_pgno_t pg;
+ db_recno_t recno, t_recno, total;
+ int ret, stack;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ BT_STK_CLR(cp);
+
+ /*
+ * There are several ways we search a btree tree. The flags argument
+ * specifies if we're acquiring read or write locks and if we are
+ * locking pairs of pages. In addition, if we're adding or deleting
+ * an item, we have to lock the entire tree, regardless. See btree.h
+ * for more details.
+ *
+ * If write-locking pages, we need to know whether or not to acquire a
+ * write lock on a page before getting it. This depends on how deep it
+ * is in tree, which we don't know until we acquire the root page. So,
+ * if we need to lock the root page we may have to upgrade it later,
+ * because we won't get the correct lock initially.
+ *
+ * Retrieve the root page.
+ */
+ pg = cp->root;
+ stack = LF_ISSET(S_STACK) ? 1 : 0;
+ lock_mode = stack ? DB_LOCK_WRITE : DB_LOCK_READ;
+ if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = mpf->get(mpf, &pg, 0, &h)) != 0) {
+ /* Did not read it, so we can release the lock */
+ (void)__LPUT(dbc, lock);
+ return (ret);
+ }
+
+ /*
+ * Decide if we need to save this page; if we do, write lock it.
+ * We deliberately don't lock-couple on this call. If the tree
+ * is tiny, i.e., one page, and two threads are busily updating
+ * the root page, we're almost guaranteed deadlocks galore, as
+ * each one gets a read lock and then blocks the other's attempt
+ * for a write lock.
+ */
+ if (!stack &&
+ ((LF_ISSET(S_PARENT) && (u_int8_t)(stop + 1) >= h->level) ||
+ (LF_ISSET(S_WRITE) && h->level == LEAFLEVEL))) {
+ (void)mpf->put(mpf, h, 0);
+ (void)__LPUT(dbc, lock);
+ lock_mode = DB_LOCK_WRITE;
+ if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = mpf->get(mpf, &pg, 0, &h)) != 0) {
+ /* Did not read it, so we can release the lock */
+ (void)__LPUT(dbc, lock);
+ return (ret);
+ }
+ stack = 1;
+ }
+
+ /*
+ * If appending to the tree, set the record number now -- we have the
+ * root page locked.
+ *
+ * Delete only deletes exact matches, read only returns exact matches.
+ * Note, this is different from __bam_search(), which returns non-exact
+ * matches for read.
+ *
+ * The record may not exist. We can only return the correct location
+ * for the record immediately after the last record in the tree, so do
+ * a fast check now.
+ */
+ total = RE_NREC(h);
+ if (LF_ISSET(S_APPEND)) {
+ *exactp = 0;
+ *recnop = recno = total + 1;
+ } else {
+ recno = *recnop;
+ if (recno <= total)
+ *exactp = 1;
+ else {
+ *exactp = 0;
+ if (!LF_ISSET(S_PAST_EOF) || recno > total + 1) {
+ /*
+ * Keep the page locked for serializability.
+ *
+ * XXX
+ * This leaves the root page locked, which will
+ * eliminate any concurrency. A possible fix
+ * would be to lock the last leaf page instead.
+ */
+ (void)mpf->put(mpf, h, 0);
+ (void)__TLPUT(dbc, lock);
+ return (DB_NOTFOUND);
+ }
+ }
+ }
+
+ /*
+ * !!!
+ * Record numbers in the tree are 0-based, but the recno is
+ * 1-based. All of the calculations below have to take this
+ * into account.
+ */
+ for (total = 0;;) {
+ switch (TYPE(h)) {
+ case P_LBTREE:
+ case P_LDUP:
+ recno -= total;
+ /*
+ * There may be logically deleted records on the page.
+ * If there are enough, the record may not exist.
+ */
+ if (TYPE(h) == P_LBTREE) {
+ adjust = P_INDX;
+ deloffset = O_INDX;
+ } else {
+ adjust = O_INDX;
+ deloffset = 0;
+ }
+ for (t_recno = 0, indx = 0;; indx += adjust) {
+ if (indx >= NUM_ENT(h)) {
+ *exactp = 0;
+ if (!LF_ISSET(S_PAST_EOF) ||
+ recno > t_recno + 1) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ }
+ if (!B_DISSET(GET_BKEYDATA(dbp, h,
+ indx + deloffset)->type) &&
+ ++t_recno == recno)
+ break;
+ }
+
+ /* Correct from 1-based to 0-based for a page offset. */
+ BT_STK_ENTER(dbp->dbenv,
+ cp, h, indx, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+ return (0);
+ case P_IBTREE:
+ for (indx = 0, top = NUM_ENT(h);;) {
+ bi = GET_BINTERNAL(dbp, h, indx);
+ if (++indx == top || total + bi->nrecs >= recno)
+ break;
+ total += bi->nrecs;
+ }
+ pg = bi->pgno;
+ break;
+ case P_LRECNO:
+ recno -= total;
+
+ /* Correct from 1-based to 0-based for a page offset. */
+ --recno;
+ BT_STK_ENTER(dbp->dbenv,
+ cp, h, recno, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+ return (0);
+ case P_IRECNO:
+ for (indx = 0, top = NUM_ENT(h);;) {
+ ri = GET_RINTERNAL(dbp, h, indx);
+ if (++indx == top || total + ri->nrecs >= recno)
+ break;
+ total += ri->nrecs;
+ }
+ pg = ri->pgno;
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, h->pgno));
+ }
+ --indx;
+
+ if (stack) {
+ /* Return if this is the lowest page wanted. */
+ if (LF_ISSET(S_PARENT) && stop == h->level) {
+ BT_STK_ENTER(dbp->dbenv,
+ cp, h, indx, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+ return (0);
+ }
+ BT_STK_PUSH(dbp->dbenv,
+ cp, h, indx, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+
+ lock_mode = DB_LOCK_WRITE;
+ if ((ret =
+ __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
+ goto err;
+ } else {
+ /*
+ * Decide if we want to return a pointer to the next
+ * page in the stack. If we do, write lock it and
+ * never unlock it.
+ */
+ if ((LF_ISSET(S_PARENT) &&
+ (u_int8_t)(stop + 1) >= (u_int8_t)(h->level - 1)) ||
+ (h->level - 1) == LEAFLEVEL)
+ stack = 1;
+
+ (void)mpf->put(mpf, h, 0);
+
+ lock_mode = stack &&
+ LF_ISSET(S_WRITE) ? DB_LOCK_WRITE : DB_LOCK_READ;
+ if ((ret = __db_lget(dbc,
+ LCK_COUPLE_ALWAYS, pg, lock_mode, 0, &lock)) != 0) {
+ /*
+ * If we fail, discard the lock we held. This
+ * is OK because this only happens when we are
+ * descending the tree holding read-locks.
+ */
+ __LPUT(dbc, lock);
+ goto err;
+ }
+ }
+
+ if ((ret = mpf->get(mpf, &pg, 0, &h)) != 0)
+ goto err;
+ }
+ /* NOTREACHED */
+
+err: BT_STK_POP(cp);
+ __bam_stkrel(dbc, 0);
+ return (ret);
+}
+
+/*
+ * __bam_adjust --
+ * Adjust the tree after adding or deleting a record.
+ *
+ * PUBLIC: int __bam_adjust __P((DBC *, int32_t));
+ */
+int
+__bam_adjust(dbc, adjust)
+ DBC *dbc;
+ int32_t adjust;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ EPG *epg;
+ PAGE *h;
+ db_pgno_t root_pgno;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ root_pgno = cp->root;
+
+ /* Update the record counts for the tree. */
+ for (epg = cp->sp; epg <= cp->csp; ++epg) {
+ h = epg->page;
+ if (TYPE(h) == P_IBTREE || TYPE(h) == P_IRECNO) {
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __bam_cadjust_log(dbp, dbc->txn,
+ &LSN(h), 0, PGNO(h), &LSN(h),
+ (u_int32_t)epg->indx, adjust,
+ PGNO(h) == root_pgno ?
+ CAD_UPDATEROOT : 0)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(h));
+
+ if (TYPE(h) == P_IBTREE)
+ GET_BINTERNAL(dbp, h, epg->indx)->nrecs +=
+ adjust;
+ else
+ GET_RINTERNAL(dbp, h, epg->indx)->nrecs +=
+ adjust;
+
+ if (PGNO(h) == root_pgno)
+ RE_NREC_ADJ(h, adjust);
+
+ if ((ret = mpf->set(mpf, h, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+ }
+ }
+ return (0);
+}
+
+/*
+ * __bam_nrecs --
+ * Return the number of records in the tree.
+ *
+ * PUBLIC: int __bam_nrecs __P((DBC *, db_recno_t *));
+ */
+int
+__bam_nrecs(dbc, rep)
+ DBC *dbc;
+ db_recno_t *rep;
+{
+ DB *dbp;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_pgno_t pgno;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+
+ pgno = dbc->internal->root;
+ if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ return (ret);
+
+ *rep = RE_NREC(h);
+
+ (void)mpf->put(mpf, h, 0);
+ (void)__TLPUT(dbc, lock);
+
+ return (0);
+}
+
+/*
+ * __bam_total --
+ * Return the number of records below a page.
+ *
+ * PUBLIC: db_recno_t __bam_total __P((DB *, PAGE *));
+ */
+db_recno_t
+__bam_total(dbp, h)
+ DB *dbp;
+ PAGE *h;
+{
+ db_recno_t nrecs;
+ db_indx_t indx, top;
+
+ nrecs = 0;
+ top = NUM_ENT(h);
+
+ switch (TYPE(h)) {
+ case P_LBTREE:
+ /* Check for logically deleted records. */
+ for (indx = 0; indx < top; indx += P_INDX)
+ if (!B_DISSET(
+ GET_BKEYDATA(dbp, h, indx + O_INDX)->type))
+ ++nrecs;
+ break;
+ case P_LDUP:
+ /* Check for logically deleted records. */
+ for (indx = 0; indx < top; indx += O_INDX)
+ if (!B_DISSET(GET_BKEYDATA(dbp, h, indx)->type))
+ ++nrecs;
+ break;
+ case P_IBTREE:
+ for (indx = 0; indx < top; indx += O_INDX)
+ nrecs += GET_BINTERNAL(dbp, h, indx)->nrecs;
+ break;
+ case P_LRECNO:
+ nrecs = NUM_ENT(h);
+ break;
+ case P_IRECNO:
+ for (indx = 0; indx < top; indx += O_INDX)
+ nrecs += GET_RINTERNAL(dbp, h, indx)->nrecs;
+ break;
+ }
+
+ return (nrecs);
+}
diff --git a/libdb/btree/bt_search.c b/libdb/btree/bt_search.c
new file mode 100644
index 0000000..70f7568
--- /dev/null
+++ b/libdb/btree/bt_search.c
@@ -0,0 +1,475 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/lock.h"
+
+/*
+ * __bam_search --
+ * Search a btree for a key.
+ *
+ * PUBLIC: int __bam_search __P((DBC *, db_pgno_t,
+ * PUBLIC: const DBT *, u_int32_t, int, db_recno_t *, int *));
+ */
+int
+__bam_search(dbc, root_pgno, key, flags, stop, recnop, exactp)
+ DBC *dbc;
+ db_pgno_t root_pgno;
+ const DBT *key;
+ u_int32_t flags;
+ int stop, *exactp;
+ db_recno_t *recnop;
+{
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_indx_t base, i, indx, *inp, lim;
+ db_lockmode_t lock_mode;
+ db_pgno_t pg;
+ db_recno_t recno;
+ int adjust, cmp, deloffset, ret, stack;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ t = dbp->bt_internal;
+ recno = 0;
+
+ BT_STK_CLR(cp);
+
+ /*
+ * There are several ways we search a btree tree. The flags argument
+ * specifies if we're acquiring read or write locks, if we position
+ * to the first or last item in a set of duplicates, if we return
+ * deleted items, and if we are locking pairs of pages. In addition,
+ * if we're modifying record numbers, we have to lock the entire tree
+ * regardless. See btree.h for more details.
+ *
+ * If write-locking pages, we need to know whether or not to acquire a
+ * write lock on a page before getting it. This depends on how deep it
+ * is in tree, which we don't know until we acquire the root page. So,
+ * if we need to lock the root page we may have to upgrade it later,
+ * because we won't get the correct lock initially.
+ *
+ * Retrieve the root page.
+ */
+try_again:
+ pg = root_pgno == PGNO_INVALID ? cp->root : root_pgno;
+ stack = LF_ISSET(S_STACK) && F_ISSET(cp, C_RECNUM);
+ lock_mode = stack ? DB_LOCK_WRITE : DB_LOCK_READ;
+ if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = mpf->get(mpf, &pg, 0, &h)) != 0) {
+ /* Did not read it, so we can release the lock */
+ (void)__LPUT(dbc, lock);
+ return (ret);
+ }
+
+ /*
+ * Decide if we need to save this page; if we do, write lock it.
+ * We deliberately don't lock-couple on this call. If the tree
+ * is tiny, i.e., one page, and two threads are busily updating
+ * the root page, we're almost guaranteed deadlocks galore, as
+ * each one gets a read lock and then blocks the other's attempt
+ * for a write lock.
+ */
+ if (!stack &&
+ ((LF_ISSET(S_PARENT) && (u_int8_t)(stop + 1) >= h->level) ||
+ (LF_ISSET(S_WRITE) && h->level == LEAFLEVEL))) {
+ (void)mpf->put(mpf, h, 0);
+ (void)__LPUT(dbc, lock);
+ lock_mode = DB_LOCK_WRITE;
+ if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = mpf->get(mpf, &pg, 0, &h)) != 0) {
+ /* Did not read it, so we can release the lock */
+ (void)__LPUT(dbc, lock);
+ return (ret);
+ }
+ if (!((LF_ISSET(S_PARENT) &&
+ (u_int8_t)(stop + 1) >= h->level) ||
+ (LF_ISSET(S_WRITE) && h->level == LEAFLEVEL))) {
+ /* Someone else split the root, start over. */
+ (void)mpf->put(mpf, h, 0);
+ (void)__LPUT(dbc, lock);
+ goto try_again;
+ }
+ stack = 1;
+ }
+
+ /* Choose a comparison function. */
+ func = F_ISSET(dbc, DBC_OPD) ?
+ (dbp->dup_compare == NULL ? __bam_defcmp : dbp->dup_compare) :
+ t->bt_compare;
+
+ for (;;) {
+ inp = P_INP(dbp, h);
+ /*
+ * Do a binary search on the current page. If we're searching
+ * a Btree leaf page, we have to walk the indices in groups of
+ * two. If we're searching an internal page or a off-page dup
+ * page, they're an index per page item. If we find an exact
+ * match on a leaf page, we're done.
+ */
+ adjust = TYPE(h) == P_LBTREE ? P_INDX : O_INDX;
+ for (base = 0,
+ lim = NUM_ENT(h) / (db_indx_t)adjust; lim != 0; lim >>= 1) {
+ indx = base + ((lim >> 1) * adjust);
+ if ((ret =
+ __bam_cmp(dbp, key, h, indx, func, &cmp)) != 0)
+ goto err;
+ if (cmp == 0) {
+ if (TYPE(h) == P_LBTREE || TYPE(h) == P_LDUP)
+ goto found;
+ goto next;
+ }
+ if (cmp > 0) {
+ base = indx + adjust;
+ --lim;
+ }
+ }
+
+ /*
+ * No match found. Base is the smallest index greater than
+ * key and may be zero or a last + O_INDX index.
+ *
+ * If it's a leaf page, return base as the "found" value.
+ * Delete only deletes exact matches.
+ */
+ if (TYPE(h) == P_LBTREE || TYPE(h) == P_LDUP) {
+ *exactp = 0;
+
+ if (LF_ISSET(S_EXACT))
+ goto notfound;
+
+ if (LF_ISSET(S_STK_ONLY)) {
+ BT_STK_NUM(dbp->dbenv, cp, h, base, ret);
+ __LPUT(dbc, lock);
+ (void)mpf->put(mpf, h, 0);
+ return (ret);
+ }
+
+ /*
+ * !!!
+ * Possibly returning a deleted record -- DB_SET_RANGE,
+ * DB_KEYFIRST and DB_KEYLAST don't require an exact
+ * match, and we don't want to walk multiple pages here
+ * to find an undeleted record. This is handled by the
+ * calling routine.
+ */
+ BT_STK_ENTER(dbp->dbenv,
+ cp, h, base, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+ return (0);
+ }
+
+ /*
+ * If it's not a leaf page, record the internal page (which is
+ * a parent page for the key). Decrement the base by 1 if it's
+ * non-zero so that if a split later occurs, the inserted page
+ * will be to the right of the saved page.
+ */
+ indx = base > 0 ? base - O_INDX : base;
+
+ /*
+ * If we're trying to calculate the record number, sum up
+ * all the record numbers on this page up to the indx point.
+ */
+next: if (recnop != NULL)
+ for (i = 0; i < indx; ++i)
+ recno += GET_BINTERNAL(dbp, h, i)->nrecs;
+
+ pg = GET_BINTERNAL(dbp, h, indx)->pgno;
+
+ if (LF_ISSET(S_STK_ONLY)) {
+ if (stop == h->level) {
+ BT_STK_NUM(dbp->dbenv, cp, h, indx, ret);
+ __LPUT(dbc, lock);
+ (void)mpf->put(mpf, h, 0);
+ return (ret);
+ }
+ BT_STK_NUMPUSH(dbp->dbenv, cp, h, indx, ret);
+ (void)mpf->put(mpf, h, 0);
+ if ((ret = __db_lget(dbc,
+ LCK_COUPLE_ALWAYS, pg, lock_mode, 0, &lock)) != 0) {
+ /*
+ * Discard our lock and return on failure. This
+ * is OK because it only happens when descending
+ * the tree holding read-locks.
+ */
+ __LPUT(dbc, lock);
+ return (ret);
+ }
+ } else if (stack) {
+ /* Return if this is the lowest page wanted. */
+ if (LF_ISSET(S_PARENT) && stop == h->level) {
+ BT_STK_ENTER(dbp->dbenv,
+ cp, h, indx, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+ return (0);
+ }
+ BT_STK_PUSH(dbp->dbenv,
+ cp, h, indx, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+
+ lock_mode = DB_LOCK_WRITE;
+ if ((ret =
+ __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
+ goto err;
+ } else {
+ /*
+ * Decide if we want to return a reference to the next
+ * page in the return stack. If so, lock it and never
+ * unlock it.
+ */
+ if ((LF_ISSET(S_PARENT) &&
+ (u_int8_t)(stop + 1) >= (u_int8_t)(h->level - 1)) ||
+ (h->level - 1) == LEAFLEVEL)
+ stack = 1;
+
+ (void)mpf->put(mpf, h, 0);
+
+ lock_mode = stack &&
+ LF_ISSET(S_WRITE) ? DB_LOCK_WRITE : DB_LOCK_READ;
+ if ((ret = __db_lget(dbc,
+ LCK_COUPLE_ALWAYS, pg, lock_mode, 0, &lock)) != 0) {
+ /*
+ * If we fail, discard the lock we held. This
+ * is OK because this only happens when we are
+ * descending the tree holding read-locks.
+ */
+ __LPUT(dbc, lock);
+ goto err;
+ }
+ }
+ if ((ret = mpf->get(mpf, &pg, 0, &h)) != 0)
+ goto err;
+ }
+ /* NOTREACHED */
+
+found: *exactp = 1;
+
+ /*
+ * If we're trying to calculate the record number, add in the
+ * offset on this page and correct for the fact that records
+ * in the tree are 0-based.
+ */
+ if (recnop != NULL)
+ *recnop = recno + (indx / P_INDX) + 1;
+
+ /*
+ * If we got here, we know that we have a Btree leaf or off-page
+ * duplicates page. If it's a Btree leaf page, we have to handle
+ * on-page duplicates.
+ *
+ * If there are duplicates, go to the first/last one. This is
+ * safe because we know that we're not going to leave the page,
+ * all duplicate sets that are not on overflow pages exist on a
+ * single leaf page.
+ */
+ if (TYPE(h) == P_LBTREE) {
+ if (LF_ISSET(S_DUPLAST))
+ while (indx < (db_indx_t)(NUM_ENT(h) - P_INDX) &&
+ inp[indx] == inp[indx + P_INDX])
+ indx += P_INDX;
+ else
+ while (indx > 0 &&
+ inp[indx] == inp[indx - P_INDX])
+ indx -= P_INDX;
+ }
+
+ /*
+ * Now check if we are allowed to return deleted items; if not, then
+ * find the next (or previous) non-deleted duplicate entry. (We do
+ * not move from the original found key on the basis of the S_DELNO
+ * flag.)
+ */
+ if (LF_ISSET(S_DELNO)) {
+ deloffset = TYPE(h) == P_LBTREE ? O_INDX : 0;
+ if (LF_ISSET(S_DUPLAST))
+ while (B_DISSET(GET_BKEYDATA(dbp,
+ h, indx + deloffset)->type) && indx > 0 &&
+ inp[indx] == inp[indx - adjust])
+ indx -= adjust;
+ else
+ while (B_DISSET(GET_BKEYDATA(dbp,
+ h, indx + deloffset)->type) &&
+ indx < (db_indx_t)(NUM_ENT(h) - adjust) &&
+ inp[indx] == inp[indx + adjust])
+ indx += adjust;
+
+ /*
+ * If we weren't able to find a non-deleted duplicate, return
+ * DB_NOTFOUND.
+ */
+ if (B_DISSET(GET_BKEYDATA(dbp, h, indx + deloffset)->type))
+ goto notfound;
+ }
+
+ if (LF_ISSET(S_STK_ONLY)) {
+ BT_STK_NUM(dbp->dbenv, cp, h, indx, ret);
+ __LPUT(dbc, lock);
+ (void)mpf->put(mpf, h, 0);
+ } else {
+ BT_STK_ENTER(dbp->dbenv, cp, h, indx, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+ }
+ return (0);
+
+notfound:
+ /* Keep the page locked for serializability. */
+ (void)mpf->put(mpf, h, 0);
+ (void)__TLPUT(dbc, lock);
+ ret = DB_NOTFOUND;
+
+err: BT_STK_POP(cp);
+ __bam_stkrel(dbc, 0);
+ return (ret);
+}
+
+/*
+ * __bam_stkrel --
+ * Release all pages currently held in the stack.
+ *
+ * PUBLIC: int __bam_stkrel __P((DBC *, u_int32_t));
+ */
+int
+__bam_stkrel(dbc, flags)
+ DBC *dbc;
+ u_int32_t flags;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ EPG *epg;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * Release inner pages first.
+ *
+ * The caller must be sure that setting STK_NOLOCK will not effect
+ * either serializability or recoverability.
+ */
+ for (ret = 0, epg = cp->sp; epg <= cp->csp; ++epg) {
+ if (epg->page != NULL) {
+ if (LF_ISSET(STK_CLRDBC) && cp->page == epg->page) {
+ cp->page = NULL;
+ LOCK_INIT(cp->lock);
+ }
+ if ((t_ret =
+ mpf->put(mpf, epg->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ /*
+ * XXX
+ * Temporary fix for #3243 -- under certain deadlock
+ * conditions we call here again and re-free the page.
+ * The correct fix is to never release a stack that
+ * doesn't hold items.
+ */
+ epg->page = NULL;
+ }
+ if (LF_ISSET(STK_NOLOCK))
+ (void)__LPUT(dbc, epg->lock);
+ else
+ (void)__TLPUT(dbc, epg->lock);
+ }
+
+ /* Clear the stack, all pages have been released. */
+ BT_STK_CLR(cp);
+
+ return (ret);
+}
+
+/*
+ * __bam_stkgrow --
+ * Grow the stack.
+ *
+ * PUBLIC: int __bam_stkgrow __P((DB_ENV *, BTREE_CURSOR *));
+ */
+int
+__bam_stkgrow(dbenv, cp)
+ DB_ENV *dbenv;
+ BTREE_CURSOR *cp;
+{
+ EPG *p;
+ size_t entries;
+ int ret;
+
+ entries = cp->esp - cp->sp;
+
+ if ((ret = __os_calloc(dbenv, entries * 2, sizeof(EPG), &p)) != 0)
+ return (ret);
+ memcpy(p, cp->sp, entries * sizeof(EPG));
+ if (cp->sp != cp->stack)
+ __os_free(dbenv, cp->sp);
+ cp->sp = p;
+ cp->csp = p + entries;
+ cp->esp = p + entries * 2;
+ return (0);
+}
diff --git a/libdb/btree/bt_split.c b/libdb/btree/bt_split.c
new file mode 100644
index 0000000..8112ae3
--- /dev/null
+++ b/libdb/btree/bt_split.c
@@ -0,0 +1,1177 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/btree.h"
+
+static int __bam_broot __P((DBC *, PAGE *, PAGE *, PAGE *));
+static int __bam_page __P((DBC *, EPG *, EPG *));
+static int __bam_pinsert __P((DBC *, EPG *, PAGE *, PAGE *, int));
+static int __bam_psplit __P((DBC *, EPG *, PAGE *, PAGE *, db_indx_t *));
+static int __bam_root __P((DBC *, EPG *));
+static int __ram_root __P((DBC *, PAGE *, PAGE *, PAGE *));
+
+/*
+ * __bam_split --
+ * Split a page.
+ *
+ * PUBLIC: int __bam_split __P((DBC *, void *, db_pgno_t *));
+ */
+int
+__bam_split(dbc, arg, root_pgnop)
+ DBC *dbc;
+ void *arg;
+ db_pgno_t *root_pgnop;
+{
+ BTREE_CURSOR *cp;
+ enum { UP, DOWN } dir;
+ db_pgno_t root_pgno;
+ int exact, level, ret;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+ root_pgno = cp->root;
+
+ /*
+ * The locking protocol we use to avoid deadlock to acquire locks by
+ * walking down the tree, but we do it as lazily as possible, locking
+ * the root only as a last resort. We expect all stack pages to have
+ * been discarded before we're called; we discard all short-term locks.
+ *
+ * When __bam_split is first called, we know that a leaf page was too
+ * full for an insert. We don't know what leaf page it was, but we
+ * have the key/recno that caused the problem. We call XX_search to
+ * reacquire the leaf page, but this time get both the leaf page and
+ * its parent, locked. We then split the leaf page and see if the new
+ * internal key will fit into the parent page. If it will, we're done.
+ *
+ * If it won't, we discard our current locks and repeat the process,
+ * only this time acquiring the parent page and its parent, locked.
+ * This process repeats until we succeed in the split, splitting the
+ * root page as the final resort. The entire process then repeats,
+ * as necessary, until we split a leaf page.
+ *
+ * XXX
+ * A traditional method of speeding this up is to maintain a stack of
+ * the pages traversed in the original search. You can detect if the
+ * stack is correct by storing the page's LSN when it was searched and
+ * comparing that LSN with the current one when it's locked during the
+ * split. This would be an easy change for this code, but I have no
+ * numbers that indicate it's worthwhile.
+ */
+ for (dir = UP, level = LEAFLEVEL;; dir == UP ? ++level : --level) {
+ /*
+ * Acquire a page and its parent, locked.
+ */
+ if ((ret = (dbc->dbtype == DB_BTREE ?
+ __bam_search(dbc, PGNO_INVALID,
+ arg, S_WRPAIR, level, NULL, &exact) :
+ __bam_rsearch(dbc,
+ (db_recno_t *)arg, S_WRPAIR, level, &exact))) != 0)
+ return (ret);
+
+ if (root_pgnop != NULL)
+ *root_pgnop = cp->csp[0].page->pgno == root_pgno ?
+ root_pgno : cp->csp[-1].page->pgno;
+ /*
+ * Split the page if it still needs it (it's possible another
+ * thread of control has already split the page). If we are
+ * guaranteed that two items will fit on the page, the split
+ * is no longer necessary.
+ */
+ if (2 * B_MAXSIZEONPAGE(cp->ovflsize)
+ <= (db_indx_t)P_FREESPACE(dbc->dbp, cp->csp[0].page)) {
+ __bam_stkrel(dbc, STK_NOLOCK);
+ return (0);
+ }
+ ret = cp->csp[0].page->pgno == root_pgno ?
+ __bam_root(dbc, &cp->csp[0]) :
+ __bam_page(dbc, &cp->csp[-1], &cp->csp[0]);
+ BT_STK_CLR(cp);
+
+ switch (ret) {
+ case 0:
+ /* Once we've split the leaf page, we're done. */
+ if (level == LEAFLEVEL)
+ return (0);
+
+ /* Switch directions. */
+ if (dir == UP)
+ dir = DOWN;
+ break;
+ case DB_NEEDSPLIT:
+ /*
+ * It's possible to fail to split repeatedly, as other
+ * threads may be modifying the tree, or the page usage
+ * is sufficiently bad that we don't get enough space
+ * the first time.
+ */
+ if (dir == DOWN)
+ dir = UP;
+ break;
+ default:
+ return (ret);
+ }
+ }
+ /* NOTREACHED */
+}
+
+/*
+ * __bam_root --
+ * Split the root page of a btree.
+ */
+static int
+__bam_root(dbc, cp)
+ DBC *dbc;
+ EPG *cp;
+{
+ DB *dbp;
+ DBT log_dbt;
+ DB_LSN log_lsn;
+ DB_MPOOLFILE *mpf;
+ PAGE *lp, *rp;
+ db_indx_t split;
+ u_int32_t opflags;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+
+ /* Yeah, right. */
+ if (cp->page->level >= MAXBTREELEVEL) {
+ __db_err(dbp->dbenv,
+ "Too many btree levels: %d", cp->page->level);
+ ret = ENOSPC;
+ goto err;
+ }
+
+ /* Create new left and right pages for the split. */
+ lp = rp = NULL;
+ if ((ret = __db_new(dbc, TYPE(cp->page), &lp)) != 0 ||
+ (ret = __db_new(dbc, TYPE(cp->page), &rp)) != 0)
+ goto err;
+ P_INIT(lp, dbp->pgsize, lp->pgno,
+ PGNO_INVALID, ISINTERNAL(cp->page) ? PGNO_INVALID : rp->pgno,
+ cp->page->level, TYPE(cp->page));
+ P_INIT(rp, dbp->pgsize, rp->pgno,
+ ISINTERNAL(cp->page) ? PGNO_INVALID : lp->pgno, PGNO_INVALID,
+ cp->page->level, TYPE(cp->page));
+
+ /* Split the page. */
+ if ((ret = __bam_psplit(dbc, cp, lp, rp, &split)) != 0)
+ goto err;
+
+ /* Log the change. */
+ if (DBC_LOGGING(dbc)) {
+ memset(&log_dbt, 0, sizeof(log_dbt));
+ log_dbt.data = cp->page;
+ log_dbt.size = dbp->pgsize;
+ ZERO_LSN(log_lsn);
+ opflags = F_ISSET(
+ (BTREE_CURSOR *)dbc->internal, C_RECNUM) ? SPL_NRECS : 0;
+ if ((ret = __bam_split_log(dbp,
+ dbc->txn, &LSN(cp->page), 0, PGNO(lp), &LSN(lp), PGNO(rp),
+ &LSN(rp), (u_int32_t)NUM_ENT(lp), 0, &log_lsn,
+ dbc->internal->root, &log_dbt, opflags)) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(LSN(cp->page));
+ LSN(lp) = LSN(cp->page);
+ LSN(rp) = LSN(cp->page);
+
+ /* Clean up the new root page. */
+ if ((ret = (dbc->dbtype == DB_RECNO ?
+ __ram_root(dbc, cp->page, lp, rp) :
+ __bam_broot(dbc, cp->page, lp, rp))) != 0)
+ goto err;
+
+ /* Adjust any cursors. */
+ if ((ret = __bam_ca_split(dbc,
+ cp->page->pgno, lp->pgno, rp->pgno, split, 1)) != 0)
+ goto err;
+
+ /* Success -- write the real pages back to the store. */
+ (void)mpf->put(mpf, cp->page, DB_MPOOL_DIRTY);
+ (void)__TLPUT(dbc, cp->lock);
+ (void)mpf->put(mpf, lp, DB_MPOOL_DIRTY);
+ (void)mpf->put(mpf, rp, DB_MPOOL_DIRTY);
+
+ return (0);
+
+err: if (lp != NULL)
+ (void)mpf->put(mpf, lp, 0);
+ if (rp != NULL)
+ (void)mpf->put(mpf, rp, 0);
+ (void)mpf->put(mpf, cp->page, 0);
+ (void)__TLPUT(dbc, cp->lock);
+ return (ret);
+}
+
+/*
+ * __bam_page --
+ * Split the non-root page of a btree.
+ */
+static int
+__bam_page(dbc, pp, cp)
+ DBC *dbc;
+ EPG *pp, *cp;
+{
+ BTREE_CURSOR *bc;
+ DBT log_dbt;
+ DB_LSN log_lsn;
+ DB *dbp;
+ DB_LOCK rplock, tplock;
+ DB_MPOOLFILE *mpf;
+ DB_LSN save_lsn;
+ PAGE *lp, *rp, *alloc_rp, *tp;
+ db_indx_t split;
+ u_int32_t opflags;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ alloc_rp = lp = rp = tp = NULL;
+ LOCK_INIT(rplock);
+ LOCK_INIT(tplock);
+ ret = -1;
+
+ /*
+ * Create a new right page for the split, and fill in everything
+ * except its LSN and page number.
+ *
+ * We malloc space for both the left and right pages, so we don't get
+ * a new page from the underlying buffer pool until we know the split
+ * is going to succeed. The reason is that we can't release locks
+ * acquired during the get-a-new-page process because metadata page
+ * locks can't be discarded on failure since we may have modified the
+ * free list. So, if you assume that we're holding a write lock on the
+ * leaf page which ran out of space and started this split (e.g., we
+ * have already written records to the page, or we retrieved a record
+ * from it with the DB_RMW flag set), failing in a split with both a
+ * leaf page locked and the metadata page locked can potentially lock
+ * up the tree badly, because we've violated the rule of always locking
+ * down the tree, and never up.
+ */
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &rp)) != 0)
+ goto err;
+ P_INIT(rp, dbp->pgsize, 0,
+ ISINTERNAL(cp->page) ? PGNO_INVALID : PGNO(cp->page),
+ ISINTERNAL(cp->page) ? PGNO_INVALID : NEXT_PGNO(cp->page),
+ cp->page->level, TYPE(cp->page));
+
+ /*
+ * Create new left page for the split, and fill in everything
+ * except its LSN and next-page page number.
+ */
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &lp)) != 0)
+ goto err;
+ P_INIT(lp, dbp->pgsize, PGNO(cp->page),
+ ISINTERNAL(cp->page) ? PGNO_INVALID : PREV_PGNO(cp->page),
+ ISINTERNAL(cp->page) ? PGNO_INVALID : 0,
+ cp->page->level, TYPE(cp->page));
+
+ /*
+ * Split right.
+ *
+ * Only the indices are sorted on the page, i.e., the key/data pairs
+ * aren't, so it's simpler to copy the data from the split page onto
+ * two new pages instead of copying half the data to a new right page
+ * and compacting the left page in place. Since the left page can't
+ * change, we swap the original and the allocated left page after the
+ * split.
+ */
+ if ((ret = __bam_psplit(dbc, cp, lp, rp, &split)) != 0)
+ goto err;
+
+ /*
+ * Test to see if we are going to be able to insert the new pages into
+ * the parent page. The interesting failure here is that the parent
+ * page can't hold the new keys, and has to be split in turn, in which
+ * case we want to release all the locks we can.
+ */
+ if ((ret = __bam_pinsert(dbc, pp, lp, rp, 1)) != 0)
+ goto err;
+
+ /*
+ * Fix up the previous pointer of any leaf page following the split
+ * page.
+ *
+ * There's interesting deadlock situations here as we try to write-lock
+ * a page that's not in our direct ancestry. Consider a cursor walking
+ * backward through the leaf pages, that has our following page locked,
+ * and is waiting on a lock for the page we're splitting. In that case
+ * we're going to deadlock here . It's probably OK, stepping backward
+ * through the tree isn't a common operation.
+ */
+ if (ISLEAF(cp->page) && NEXT_PGNO(cp->page) != PGNO_INVALID) {
+ if ((ret = __db_lget(dbc,
+ 0, NEXT_PGNO(cp->page), DB_LOCK_WRITE, 0, &tplock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &NEXT_PGNO(cp->page), 0, &tp)) != 0)
+ goto err;
+ }
+
+ /*
+ * We've got everything locked down we need, and we know the split
+ * is going to succeed. Go and get the additional page we'll need.
+ */
+ if ((ret = __db_new(dbc, TYPE(cp->page), &alloc_rp)) != 0)
+ goto err;
+
+ /*
+ * Lock the new page. We need to do this because someone
+ * could get here through bt_lpgno if this page was recently
+ * dealocated. They can't look at it before we commit.
+ */
+ if ((ret = __db_lget(dbc,
+ 0, PGNO(alloc_rp), DB_LOCK_WRITE, 0, &rplock)) != 0)
+ goto err;
+
+ /*
+ * Fix up the page numbers we didn't have before. We have to do this
+ * before calling __bam_pinsert because it may copy a page number onto
+ * the parent page and it takes the page number from its page argument.
+ */
+ PGNO(rp) = NEXT_PGNO(lp) = PGNO(alloc_rp);
+
+ /* Actually update the parent page. */
+ if ((ret = __bam_pinsert(dbc, pp, lp, rp, 0)) != 0)
+ goto err;
+
+ bc = (BTREE_CURSOR *)dbc->internal;
+ /* Log the change. */
+ if (DBC_LOGGING(dbc)) {
+ memset(&log_dbt, 0, sizeof(log_dbt));
+ log_dbt.data = cp->page;
+ log_dbt.size = dbp->pgsize;
+ if (tp == NULL)
+ ZERO_LSN(log_lsn);
+ opflags = F_ISSET(bc, C_RECNUM) ? SPL_NRECS : 0;
+ if ((ret = __bam_split_log(dbp, dbc->txn, &LSN(cp->page), 0,
+ PGNO(cp->page), &LSN(cp->page), PGNO(alloc_rp),
+ &LSN(alloc_rp), (u_int32_t)NUM_ENT(lp),
+ tp == NULL ? 0 : PGNO(tp),
+ tp == NULL ? &log_lsn : &LSN(tp),
+ PGNO_INVALID, &log_dbt, opflags)) != 0)
+ goto err;
+
+ } else
+ LSN_NOT_LOGGED(LSN(cp->page));
+
+ /* Update the LSNs for all involved pages. */
+ LSN(alloc_rp) = LSN(cp->page);
+ LSN(lp) = LSN(cp->page);
+ LSN(rp) = LSN(cp->page);
+ if (tp != NULL)
+ LSN(tp) = LSN(cp->page);
+
+ /*
+ * Copy the left and right pages into place. There are two paths
+ * through here. Either we are logging and we set the LSNs in the
+ * logging path. However, if we are not logging, then we do not
+ * have valid LSNs on lp or rp. The correct LSNs to use are the
+ * ones on the page we got from __db_new or the one that was
+ * originally on cp->page. In both cases, we save the LSN from the
+ * real database page (not a malloc'd one) and reapply it after we
+ * do the copy.
+ */
+ save_lsn = alloc_rp->lsn;
+ memcpy(alloc_rp, rp, LOFFSET(dbp, rp));
+ memcpy((u_int8_t *)alloc_rp + HOFFSET(rp),
+ (u_int8_t *)rp + HOFFSET(rp), dbp->pgsize - HOFFSET(rp));
+ alloc_rp->lsn = save_lsn;
+
+ save_lsn = cp->page->lsn;
+ memcpy(cp->page, lp, LOFFSET(dbp, lp));
+ memcpy((u_int8_t *)cp->page + HOFFSET(lp),
+ (u_int8_t *)lp + HOFFSET(lp), dbp->pgsize - HOFFSET(lp));
+ cp->page->lsn = save_lsn;
+
+ /* Fix up the next-page link. */
+ if (tp != NULL)
+ PREV_PGNO(tp) = PGNO(rp);
+
+ /* Adjust any cursors. */
+ if ((ret = __bam_ca_split(dbc,
+ PGNO(cp->page), PGNO(cp->page), PGNO(rp), split, 0)) != 0)
+ goto err;
+
+ __os_free(dbp->dbenv, lp);
+ __os_free(dbp->dbenv, rp);
+
+ /*
+ * Success -- write the real pages back to the store. As we never
+ * acquired any sort of lock on the new page, we release it before
+ * releasing locks on the pages that reference it. We're finished
+ * modifying the page so it's not really necessary, but it's neater.
+ */
+ if ((t_ret = mpf->put(mpf, alloc_rp, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ (void)__TLPUT(dbc, rplock);
+ if ((t_ret = mpf->put(mpf, pp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ (void)__TLPUT(dbc, pp->lock);
+ if ((t_ret = mpf->put(mpf, cp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ (void)__TLPUT(dbc, cp->lock);
+ if (tp != NULL) {
+ if ((t_ret =
+ mpf->put(mpf, tp, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ (void)__TLPUT(dbc, tplock);
+ }
+ return (ret);
+
+err: if (lp != NULL)
+ __os_free(dbp->dbenv, lp);
+ if (rp != NULL)
+ __os_free(dbp->dbenv, rp);
+ if (alloc_rp != NULL)
+ (void)mpf->put(mpf, alloc_rp, 0);
+ if (tp != NULL)
+ (void)mpf->put(mpf, tp, 0);
+
+ /* We never updated the new or next pages, we can release them. */
+ (void)__LPUT(dbc, rplock);
+ (void)__LPUT(dbc, tplock);
+
+ (void)mpf->put(mpf, pp->page, 0);
+ if (ret == DB_NEEDSPLIT)
+ (void)__LPUT(dbc, pp->lock);
+ else
+ (void)__TLPUT(dbc, pp->lock);
+
+ (void)mpf->put(mpf, cp->page, 0);
+ if (ret == DB_NEEDSPLIT)
+ (void)__LPUT(dbc, cp->lock);
+ else
+ (void)__TLPUT(dbc, cp->lock);
+
+ return (ret);
+}
+
+/*
+ * __bam_broot --
+ * Fix up the btree root page after it has been split.
+ */
+static int
+__bam_broot(dbc, rootp, lp, rp)
+ DBC *dbc;
+ PAGE *rootp, *lp, *rp;
+{
+ BINTERNAL bi, *child_bi;
+ BKEYDATA *child_bk;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT hdr, data;
+ db_pgno_t root_pgno;
+ int ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * If the root page was a leaf page, change it into an internal page.
+ * We copy the key we split on (but not the key's data, in the case of
+ * a leaf page) to the new root page.
+ */
+ root_pgno = cp->root;
+ P_INIT(rootp, dbp->pgsize,
+ root_pgno, PGNO_INVALID, PGNO_INVALID, lp->level + 1, P_IBTREE);
+
+ memset(&data, 0, sizeof(data));
+ memset(&hdr, 0, sizeof(hdr));
+
+ /*
+ * The btree comparison code guarantees that the left-most key on any
+ * internal btree page is never used, so it doesn't need to be filled
+ * in. Set the record count if necessary.
+ */
+ memset(&bi, 0, sizeof(bi));
+ bi.len = 0;
+ B_TSET(bi.type, B_KEYDATA, 0);
+ bi.pgno = lp->pgno;
+ if (F_ISSET(cp, C_RECNUM)) {
+ bi.nrecs = __bam_total(dbp, lp);
+ RE_NREC_SET(rootp, bi.nrecs);
+ }
+ hdr.data = &bi;
+ hdr.size = SSZA(BINTERNAL, data);
+ if ((ret =
+ __db_pitem(dbc, rootp, 0, BINTERNAL_SIZE(0), &hdr, NULL)) != 0)
+ return (ret);
+
+ switch (TYPE(rp)) {
+ case P_IBTREE:
+ /* Copy the first key of the child page onto the root page. */
+ child_bi = GET_BINTERNAL(dbp, rp, 0);
+
+ bi.len = child_bi->len;
+ B_TSET(bi.type, child_bi->type, 0);
+ bi.pgno = rp->pgno;
+ if (F_ISSET(cp, C_RECNUM)) {
+ bi.nrecs = __bam_total(dbp, rp);
+ RE_NREC_ADJ(rootp, bi.nrecs);
+ }
+ hdr.data = &bi;
+ hdr.size = SSZA(BINTERNAL, data);
+ data.data = child_bi->data;
+ data.size = child_bi->len;
+ if ((ret = __db_pitem(dbc, rootp, 1,
+ BINTERNAL_SIZE(child_bi->len), &hdr, &data)) != 0)
+ return (ret);
+
+ /* Increment the overflow ref count. */
+ if (B_TYPE(child_bi->type) == B_OVERFLOW)
+ if ((ret = __db_ovref(dbc,
+ ((BOVERFLOW *)(child_bi->data))->pgno, 1)) != 0)
+ return (ret);
+ break;
+ case P_LDUP:
+ case P_LBTREE:
+ /* Copy the first key of the child page onto the root page. */
+ child_bk = GET_BKEYDATA(dbp, rp, 0);
+ switch (B_TYPE(child_bk->type)) {
+ case B_KEYDATA:
+ bi.len = child_bk->len;
+ B_TSET(bi.type, child_bk->type, 0);
+ bi.pgno = rp->pgno;
+ if (F_ISSET(cp, C_RECNUM)) {
+ bi.nrecs = __bam_total(dbp, rp);
+ RE_NREC_ADJ(rootp, bi.nrecs);
+ }
+ hdr.data = &bi;
+ hdr.size = SSZA(BINTERNAL, data);
+ data.data = child_bk->data;
+ data.size = child_bk->len;
+ if ((ret = __db_pitem(dbc, rootp, 1,
+ BINTERNAL_SIZE(child_bk->len), &hdr, &data)) != 0)
+ return (ret);
+ break;
+ case B_DUPLICATE:
+ case B_OVERFLOW:
+ bi.len = BOVERFLOW_SIZE;
+ B_TSET(bi.type, child_bk->type, 0);
+ bi.pgno = rp->pgno;
+ if (F_ISSET(cp, C_RECNUM)) {
+ bi.nrecs = __bam_total(dbp, rp);
+ RE_NREC_ADJ(rootp, bi.nrecs);
+ }
+ hdr.data = &bi;
+ hdr.size = SSZA(BINTERNAL, data);
+ data.data = child_bk;
+ data.size = BOVERFLOW_SIZE;
+ if ((ret = __db_pitem(dbc, rootp, 1,
+ BINTERNAL_SIZE(BOVERFLOW_SIZE), &hdr, &data)) != 0)
+ return (ret);
+
+ /* Increment the overflow ref count. */
+ if (B_TYPE(child_bk->type) == B_OVERFLOW)
+ if ((ret = __db_ovref(dbc,
+ ((BOVERFLOW *)child_bk)->pgno, 1)) != 0)
+ return (ret);
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, rp->pgno));
+ }
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, rp->pgno));
+ }
+ return (0);
+}
+
+/*
+ * __ram_root --
+ * Fix up the recno root page after it has been split.
+ */
+static int
+__ram_root(dbc, rootp, lp, rp)
+ DBC *dbc;
+ PAGE *rootp, *lp, *rp;
+{
+ DB *dbp;
+ DBT hdr;
+ RINTERNAL ri;
+ db_pgno_t root_pgno;
+ int ret;
+
+ dbp = dbc->dbp;
+ root_pgno = dbc->internal->root;
+
+ /* Initialize the page. */
+ P_INIT(rootp, dbp->pgsize,
+ root_pgno, PGNO_INVALID, PGNO_INVALID, lp->level + 1, P_IRECNO);
+
+ /* Initialize the header. */
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.data = &ri;
+ hdr.size = RINTERNAL_SIZE;
+
+ /* Insert the left and right keys, set the header information. */
+ ri.pgno = lp->pgno;
+ ri.nrecs = __bam_total(dbp, lp);
+ if ((ret = __db_pitem(dbc, rootp, 0, RINTERNAL_SIZE, &hdr, NULL)) != 0)
+ return (ret);
+ RE_NREC_SET(rootp, ri.nrecs);
+ ri.pgno = rp->pgno;
+ ri.nrecs = __bam_total(dbp, rp);
+ if ((ret = __db_pitem(dbc, rootp, 1, RINTERNAL_SIZE, &hdr, NULL)) != 0)
+ return (ret);
+ RE_NREC_ADJ(rootp, ri.nrecs);
+ return (0);
+}
+
+/*
+ * __bam_pinsert --
+ * Insert a new key into a parent page, completing the split.
+ */
+static int
+__bam_pinsert(dbc, parent, lchild, rchild, space_check)
+ DBC *dbc;
+ EPG *parent;
+ PAGE *lchild, *rchild;
+ int space_check;
+{
+ BINTERNAL bi, *child_bi;
+ BKEYDATA *child_bk, *tmp_bk;
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT a, b, hdr, data;
+ PAGE *ppage;
+ RINTERNAL ri;
+ db_indx_t off;
+ db_recno_t nrecs;
+ size_t (*func) __P((DB *, const DBT *, const DBT *));
+ u_int32_t n, nbytes, nksize;
+ int ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ t = dbp->bt_internal;
+ ppage = parent->page;
+
+ /* If handling record numbers, count records split to the right page. */
+ nrecs = F_ISSET(cp, C_RECNUM) &&
+ !space_check ? __bam_total(dbp, rchild) : 0;
+
+ /*
+ * Now we insert the new page's first key into the parent page, which
+ * completes the split. The parent points to a PAGE and a page index
+ * offset, where the new key goes ONE AFTER the index, because we split
+ * to the right.
+ *
+ * XXX
+ * Some btree algorithms replace the key for the old page as well as
+ * the new page. We don't, as there's no reason to believe that the
+ * first key on the old page is any better than the key we have, and,
+ * in the case of a key being placed at index 0 causing the split, the
+ * key is unavailable.
+ */
+ off = parent->indx + O_INDX;
+
+ /*
+ * Calculate the space needed on the parent page.
+ *
+ * Prefix trees: space hack used when inserting into BINTERNAL pages.
+ * Retain only what's needed to distinguish between the new entry and
+ * the LAST entry on the page to its left. If the keys compare equal,
+ * retain the entire key. We ignore overflow keys, and the entire key
+ * must be retained for the next-to-leftmost key on the leftmost page
+ * of each level, or the search will fail. Applicable ONLY to internal
+ * pages that have leaf pages as children. Further reduction of the
+ * key between pairs of internal pages loses too much information.
+ */
+ switch (TYPE(rchild)) {
+ case P_IBTREE:
+ child_bi = GET_BINTERNAL(dbp, rchild, 0);
+ nbytes = BINTERNAL_PSIZE(child_bi->len);
+
+ if (P_FREESPACE(dbp, ppage) < nbytes)
+ return (DB_NEEDSPLIT);
+ if (space_check)
+ return (0);
+
+ /* Add a new record for the right page. */
+ memset(&bi, 0, sizeof(bi));
+ bi.len = child_bi->len;
+ B_TSET(bi.type, child_bi->type, 0);
+ bi.pgno = rchild->pgno;
+ bi.nrecs = nrecs;
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.data = &bi;
+ hdr.size = SSZA(BINTERNAL, data);
+ memset(&data, 0, sizeof(data));
+ data.data = child_bi->data;
+ data.size = child_bi->len;
+ if ((ret = __db_pitem(dbc, ppage, off,
+ BINTERNAL_SIZE(child_bi->len), &hdr, &data)) != 0)
+ return (ret);
+
+ /* Increment the overflow ref count. */
+ if (B_TYPE(child_bi->type) == B_OVERFLOW)
+ if ((ret = __db_ovref(dbc,
+ ((BOVERFLOW *)(child_bi->data))->pgno, 1)) != 0)
+ return (ret);
+ break;
+ case P_LDUP:
+ case P_LBTREE:
+ child_bk = GET_BKEYDATA(dbp, rchild, 0);
+ switch (B_TYPE(child_bk->type)) {
+ case B_KEYDATA:
+ /*
+ * We set t->bt_prefix to NULL if we have a comparison
+ * callback but no prefix compression callback. But,
+ * if we're splitting in an off-page duplicates tree,
+ * we still have to do some checking. If using the
+ * default off-page duplicates comparison routine we
+ * can use the default prefix compression callback. If
+ * not using the default off-page duplicates comparison
+ * routine, we can't do any kind of prefix compression
+ * as there's no way for an application to specify a
+ * prefix compression callback that corresponds to its
+ * comparison callback.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ if (dbp->dup_compare == __bam_defcmp)
+ func = __bam_defpfx;
+ else
+ func = NULL;
+ } else
+ func = t->bt_prefix;
+
+ nbytes = BINTERNAL_PSIZE(child_bk->len);
+ nksize = child_bk->len;
+ if (func == NULL)
+ goto noprefix;
+ if (ppage->prev_pgno == PGNO_INVALID && off <= 1)
+ goto noprefix;
+ tmp_bk = GET_BKEYDATA(dbp, lchild, NUM_ENT(lchild) -
+ (TYPE(lchild) == P_LDUP ? O_INDX : P_INDX));
+ if (B_TYPE(tmp_bk->type) != B_KEYDATA)
+ goto noprefix;
+ memset(&a, 0, sizeof(a));
+ a.size = tmp_bk->len;
+ a.data = tmp_bk->data;
+ memset(&b, 0, sizeof(b));
+ b.size = child_bk->len;
+ b.data = child_bk->data;
+ nksize = (u_int32_t)func(dbp, &a, &b);
+ if ((n = BINTERNAL_PSIZE(nksize)) < nbytes)
+ nbytes = n;
+ else
+noprefix: nksize = child_bk->len;
+
+ if (P_FREESPACE(dbp, ppage) < nbytes)
+ return (DB_NEEDSPLIT);
+ if (space_check)
+ return (0);
+
+ memset(&bi, 0, sizeof(bi));
+ bi.len = nksize;
+ B_TSET(bi.type, child_bk->type, 0);
+ bi.pgno = rchild->pgno;
+ bi.nrecs = nrecs;
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.data = &bi;
+ hdr.size = SSZA(BINTERNAL, data);
+ memset(&data, 0, sizeof(data));
+ data.data = child_bk->data;
+ data.size = nksize;
+ if ((ret = __db_pitem(dbc, ppage, off,
+ BINTERNAL_SIZE(nksize), &hdr, &data)) != 0)
+ return (ret);
+ break;
+ case B_DUPLICATE:
+ case B_OVERFLOW:
+ nbytes = BINTERNAL_PSIZE(BOVERFLOW_SIZE);
+
+ if (P_FREESPACE(dbp, ppage) < nbytes)
+ return (DB_NEEDSPLIT);
+ if (space_check)
+ return (0);
+
+ memset(&bi, 0, sizeof(bi));
+ bi.len = BOVERFLOW_SIZE;
+ B_TSET(bi.type, child_bk->type, 0);
+ bi.pgno = rchild->pgno;
+ bi.nrecs = nrecs;
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.data = &bi;
+ hdr.size = SSZA(BINTERNAL, data);
+ memset(&data, 0, sizeof(data));
+ data.data = child_bk;
+ data.size = BOVERFLOW_SIZE;
+ if ((ret = __db_pitem(dbc, ppage, off,
+ BINTERNAL_SIZE(BOVERFLOW_SIZE), &hdr, &data)) != 0)
+ return (ret);
+
+ /* Increment the overflow ref count. */
+ if (B_TYPE(child_bk->type) == B_OVERFLOW)
+ if ((ret = __db_ovref(dbc,
+ ((BOVERFLOW *)child_bk)->pgno, 1)) != 0)
+ return (ret);
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, rchild->pgno));
+ }
+ break;
+ case P_IRECNO:
+ case P_LRECNO:
+ nbytes = RINTERNAL_PSIZE;
+
+ if (P_FREESPACE(dbp, ppage) < nbytes)
+ return (DB_NEEDSPLIT);
+ if (space_check)
+ return (0);
+
+ /* Add a new record for the right page. */
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.data = &ri;
+ hdr.size = RINTERNAL_SIZE;
+ ri.pgno = rchild->pgno;
+ ri.nrecs = nrecs;
+ if ((ret = __db_pitem(dbc,
+ ppage, off, RINTERNAL_SIZE, &hdr, NULL)) != 0)
+ return (ret);
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, rchild->pgno));
+ }
+
+ /*
+ * If a Recno or Btree with record numbers AM page, or an off-page
+ * duplicates tree, adjust the parent page's left page record count.
+ */
+ if (F_ISSET(cp, C_RECNUM)) {
+ /* Log the change. */
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __bam_cadjust_log(dbp, dbc->txn,
+ &LSN(ppage), 0, PGNO(ppage),
+ &LSN(ppage), parent->indx, -(int32_t)nrecs, 0)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(ppage));
+
+ /* Update the left page count. */
+ if (dbc->dbtype == DB_RECNO)
+ GET_RINTERNAL(dbp, ppage, parent->indx)->nrecs -= nrecs;
+ else
+ GET_BINTERNAL(dbp, ppage, parent->indx)->nrecs -= nrecs;
+ }
+
+ return (0);
+}
+
+/*
+ * __bam_psplit --
+ * Do the real work of splitting the page.
+ */
+static int
+__bam_psplit(dbc, cp, lp, rp, splitret)
+ DBC *dbc;
+ EPG *cp;
+ PAGE *lp, *rp;
+ db_indx_t *splitret;
+{
+ DB *dbp;
+ PAGE *pp;
+ db_indx_t half, *inp, nbytes, off, splitp, top;
+ int adjust, cnt, iflag, isbigkey, ret;
+
+ dbp = dbc->dbp;
+ pp = cp->page;
+ inp = P_INP(dbp, pp);
+ adjust = TYPE(pp) == P_LBTREE ? P_INDX : O_INDX;
+
+ /*
+ * If we're splitting the first (last) page on a level because we're
+ * inserting (appending) a key to it, it's likely that the data is
+ * sorted. Moving a single item to the new page is less work and can
+ * push the fill factor higher than normal. This is trivial when we
+ * are splitting a new page before the beginning of the tree, all of
+ * the interesting tests are against values of 0.
+ *
+ * Catching appends to the tree is harder. In a simple append, we're
+ * inserting an item that sorts past the end of the tree; the cursor
+ * will point past the last element on the page. But, in trees with
+ * duplicates, the cursor may point to the last entry on the page --
+ * in this case, the entry will also be the last element of a duplicate
+ * set (the last because the search call specified the S_DUPLAST flag).
+ * The only way to differentiate between an insert immediately before
+ * the last item in a tree or an append after a duplicate set which is
+ * also the last item in the tree is to call the comparison function.
+ * When splitting internal pages during an append, the search code
+ * guarantees the cursor always points to the largest page item less
+ * than the new internal entry. To summarize, we want to catch three
+ * possible index values:
+ *
+ * NUM_ENT(page) Btree/Recno leaf insert past end-of-tree
+ * NUM_ENT(page) - O_INDX Btree or Recno internal insert past EOT
+ * NUM_ENT(page) - P_INDX Btree leaf insert past EOT after a set
+ * of duplicates
+ *
+ * two of which, (NUM_ENT(page) - O_INDX or P_INDX) might be an insert
+ * near the end of the tree, and not after the end of the tree at all.
+ * Do a simple test which might be wrong because calling the comparison
+ * functions is expensive. Regardless, it's not a big deal if we're
+ * wrong, we'll do the split the right way next time.
+ */
+ off = 0;
+ if (NEXT_PGNO(pp) == PGNO_INVALID && cp->indx >= NUM_ENT(pp) - adjust)
+ off = NUM_ENT(pp) - adjust;
+ else if (PREV_PGNO(pp) == PGNO_INVALID && cp->indx == 0)
+ off = adjust;
+ if (off != 0)
+ goto sort;
+
+ /*
+ * Split the data to the left and right pages. Try not to split on
+ * an overflow key. (Overflow keys on internal pages will slow down
+ * searches.) Refuse to split in the middle of a set of duplicates.
+ *
+ * First, find the optimum place to split.
+ *
+ * It's possible to try and split past the last record on the page if
+ * there's a very large record at the end of the page. Make sure this
+ * doesn't happen by bounding the check at the next-to-last entry on
+ * the page.
+ *
+ * Note, we try and split half the data present on the page. This is
+ * because another process may have already split the page and left
+ * it half empty. We don't try and skip the split -- we don't know
+ * how much space we're going to need on the page, and we may need up
+ * to half the page for a big item, so there's no easy test to decide
+ * if we need to split or not. Besides, if two threads are inserting
+ * data into the same place in the database, we're probably going to
+ * need more space soon anyway.
+ */
+ top = NUM_ENT(pp) - adjust;
+ half = (dbp->pgsize - HOFFSET(pp)) / 2;
+ for (nbytes = 0, off = 0; off < top && nbytes < half; ++off)
+ switch (TYPE(pp)) {
+ case P_IBTREE:
+ if (B_TYPE(
+ GET_BINTERNAL(dbp, pp, off)->type) == B_KEYDATA)
+ nbytes += BINTERNAL_SIZE(
+ GET_BINTERNAL(dbp, pp, off)->len);
+ else
+ nbytes += BINTERNAL_SIZE(BOVERFLOW_SIZE);
+ break;
+ case P_LBTREE:
+ if (B_TYPE(GET_BKEYDATA(dbp, pp, off)->type) ==
+ B_KEYDATA)
+ nbytes += BKEYDATA_SIZE(GET_BKEYDATA(dbp,
+ pp, off)->len);
+ else
+ nbytes += BOVERFLOW_SIZE;
+
+ ++off;
+ /* FALLTHROUGH */
+ case P_LDUP:
+ case P_LRECNO:
+ if (B_TYPE(GET_BKEYDATA(dbp, pp, off)->type) ==
+ B_KEYDATA)
+ nbytes += BKEYDATA_SIZE(GET_BKEYDATA(dbp,
+ pp, off)->len);
+ else
+ nbytes += BOVERFLOW_SIZE;
+ break;
+ case P_IRECNO:
+ nbytes += RINTERNAL_SIZE;
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, pp->pgno));
+ }
+sort: splitp = off;
+
+ /*
+ * Splitp is either at or just past the optimum split point. If the
+ * tree type is such that we're going to promote a key to an internal
+ * page, and our current choice is an overflow key, look for something
+ * close by that's smaller.
+ */
+ switch (TYPE(pp)) {
+ case P_IBTREE:
+ iflag = 1;
+ isbigkey =
+ B_TYPE(GET_BINTERNAL(dbp, pp, off)->type) != B_KEYDATA;
+ break;
+ case P_LBTREE:
+ case P_LDUP:
+ iflag = 0;
+ isbigkey = B_TYPE(GET_BKEYDATA(dbp, pp, off)->type) !=
+ B_KEYDATA;
+ break;
+ default:
+ iflag = isbigkey = 0;
+ }
+ if (isbigkey)
+ for (cnt = 1; cnt <= 3; ++cnt) {
+ off = splitp + cnt * adjust;
+ if (off < (db_indx_t)NUM_ENT(pp) &&
+ ((iflag && B_TYPE(
+ GET_BINTERNAL(dbp, pp,off)->type) == B_KEYDATA) ||
+ B_TYPE(GET_BKEYDATA(dbp, pp, off)->type) ==
+ B_KEYDATA)) {
+ splitp = off;
+ break;
+ }
+ if (splitp <= (db_indx_t)(cnt * adjust))
+ continue;
+ off = splitp - cnt * adjust;
+ if (iflag ? B_TYPE(
+ GET_BINTERNAL(dbp, pp, off)->type) == B_KEYDATA :
+ B_TYPE(GET_BKEYDATA(dbp, pp, off)->type) ==
+ B_KEYDATA) {
+ splitp = off;
+ break;
+ }
+ }
+
+ /*
+ * We can't split in the middle a set of duplicates. We know that
+ * no duplicate set can take up more than about 25% of the page,
+ * because that's the point where we push it off onto a duplicate
+ * page set. So, this loop can't be unbounded.
+ */
+ if (TYPE(pp) == P_LBTREE &&
+ inp[splitp] == inp[splitp - adjust])
+ for (cnt = 1;; ++cnt) {
+ off = splitp + cnt * adjust;
+ if (off < NUM_ENT(pp) &&
+ inp[splitp] != inp[off]) {
+ splitp = off;
+ break;
+ }
+ if (splitp <= (db_indx_t)(cnt * adjust))
+ continue;
+ off = splitp - cnt * adjust;
+ if (inp[splitp] != inp[off]) {
+ splitp = off + adjust;
+ break;
+ }
+ }
+
+ /* We're going to split at splitp. */
+ if ((ret = __bam_copy(dbp, pp, lp, 0, splitp)) != 0)
+ return (ret);
+ if ((ret = __bam_copy(dbp, pp, rp, splitp, NUM_ENT(pp))) != 0)
+ return (ret);
+
+ *splitret = splitp;
+ return (0);
+}
+
+/*
+ * __bam_copy --
+ * Copy a set of records from one page to another.
+ *
+ * PUBLIC: int __bam_copy __P((DB *, PAGE *, PAGE *, u_int32_t, u_int32_t));
+ */
+int
+__bam_copy(dbp, pp, cp, nxt, stop)
+ DB *dbp;
+ PAGE *pp, *cp;
+ u_int32_t nxt, stop;
+{
+ db_indx_t *cinp, nbytes, off, *pinp;
+
+ cinp = P_INP(dbp, cp);
+ pinp = P_INP(dbp, pp);
+ /*
+ * Nxt is the offset of the next record to be placed on the target page.
+ */
+ for (off = 0; nxt < stop; ++nxt, ++NUM_ENT(cp), ++off) {
+ switch (TYPE(pp)) {
+ case P_IBTREE:
+ if (B_TYPE(
+ GET_BINTERNAL(dbp, pp, nxt)->type) == B_KEYDATA)
+ nbytes = BINTERNAL_SIZE(
+ GET_BINTERNAL(dbp, pp, nxt)->len);
+ else
+ nbytes = BINTERNAL_SIZE(BOVERFLOW_SIZE);
+ break;
+ case P_LBTREE:
+ /*
+ * If we're on a key and it's a duplicate, just copy
+ * the offset.
+ */
+ if (off != 0 && (nxt % P_INDX) == 0 &&
+ pinp[nxt] == pinp[nxt - P_INDX]) {
+ cinp[off] = cinp[off - P_INDX];
+ continue;
+ }
+ /* FALLTHROUGH */
+ case P_LDUP:
+ case P_LRECNO:
+ if (B_TYPE(GET_BKEYDATA(dbp, pp, nxt)->type) ==
+ B_KEYDATA)
+ nbytes = BKEYDATA_SIZE(GET_BKEYDATA(dbp,
+ pp, nxt)->len);
+ else
+ nbytes = BOVERFLOW_SIZE;
+ break;
+ case P_IRECNO:
+ nbytes = RINTERNAL_SIZE;
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, pp->pgno));
+ }
+ cinp[off] = HOFFSET(cp) -= nbytes;
+ memcpy(P_ENTRY(dbp, cp, off), P_ENTRY(dbp, pp, nxt), nbytes);
+ }
+ return (0);
+}
diff --git a/libdb/btree/bt_stat.c b/libdb/btree/bt_stat.c
new file mode 100644
index 0000000..e48616f
--- /dev/null
+++ b/libdb/btree/bt_stat.c
@@ -0,0 +1,481 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+
+/*
+ * __bam_stat --
+ * Gather/print the btree statistics
+ *
+ * PUBLIC: int __bam_stat __P((DB *, void *, u_int32_t));
+ */
+int
+__bam_stat(dbp, spp, flags)
+ DB *dbp;
+ void *spp;
+ u_int32_t flags;
+{
+ BTMETA *meta;
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DBC *dbc;
+ DB_BTREE_STAT *sp;
+ DB_LOCK lock, metalock;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_pgno_t pgno;
+ int ret, t_ret, write_meta;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->stat");
+
+ meta = NULL;
+ t = dbp->bt_internal;
+ sp = NULL;
+ LOCK_INIT(metalock);
+ LOCK_INIT(lock);
+ mpf = dbp->mpf;
+ h = NULL;
+ ret = 0;
+ write_meta = 0;
+
+ /* Check for invalid flags. */
+ if ((ret = __db_statchk(dbp, flags)) != 0)
+ return (ret);
+
+ /* Acquire a cursor. */
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ return (ret);
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ DEBUG_LWRITE(dbc, NULL, "bam_stat", NULL, NULL, flags);
+
+ /* Allocate and clear the structure. */
+ if ((ret = __os_umalloc(dbp->dbenv, sizeof(*sp), &sp)) != 0)
+ goto err;
+ memset(sp, 0, sizeof(*sp));
+
+ /* Get the metadata page for the entire database. */
+ pgno = PGNO_BASE_MD;
+ if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &pgno, 0, (PAGE **)&meta)) != 0)
+ goto err;
+
+ if (flags == DB_RECORDCOUNT || flags == DB_CACHED_COUNTS)
+ flags = DB_FAST_STAT;
+ if (flags == DB_FAST_STAT)
+ goto meta_only;
+
+ /* Walk the metadata free list, counting pages. */
+ for (sp->bt_free = 0, pgno = meta->dbmeta.free; pgno != PGNO_INVALID;) {
+ ++sp->bt_free;
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ goto err;
+
+ pgno = h->next_pgno;
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ goto err;
+ h = NULL;
+ }
+
+ /* Get the root page. */
+ pgno = cp->root;
+ if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &lock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ goto err;
+
+ /* Get the levels from the root page. */
+ sp->bt_levels = h->level;
+
+ /* Discard the root page. */
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ goto err;
+ h = NULL;
+ __LPUT(dbc, lock);
+
+ /* Walk the tree. */
+ if ((ret = __bam_traverse(dbc,
+ DB_LOCK_READ, cp->root, __bam_stat_callback, sp)) != 0)
+ goto err;
+
+ /*
+ * Get the subdatabase metadata page if it's not the same as the
+ * one we already have.
+ */
+ write_meta = !F_ISSET(dbp, DB_AM_RDONLY);
+meta_only:
+ if (t->bt_meta != PGNO_BASE_MD || write_meta != 0) {
+ if ((ret = mpf->put(mpf, meta, 0)) != 0)
+ goto err;
+ meta = NULL;
+ __LPUT(dbc, metalock);
+
+ if ((ret = __db_lget(dbc,
+ 0, t->bt_meta, write_meta == 0 ?
+ DB_LOCK_READ : DB_LOCK_WRITE, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &t->bt_meta, 0, (PAGE **)&meta)) != 0)
+ goto err;
+ }
+ if (flags == DB_FAST_STAT) {
+ if (dbp->type == DB_RECNO ||
+ (dbp->type == DB_BTREE && F_ISSET(dbp, DB_AM_RECNUM))) {
+ if ((ret = __db_lget(dbc, 0,
+ cp->root, DB_LOCK_READ, 0, &lock)) != 0)
+ goto err;
+ if ((ret =
+ mpf->get(mpf, &cp->root, 0, (PAGE **)&h)) != 0)
+ goto err;
+
+ sp->bt_nkeys = RE_NREC(h);
+ } else
+ sp->bt_nkeys = meta->dbmeta.key_count;
+ sp->bt_ndata = meta->dbmeta.record_count;
+ }
+
+ /* Get metadata page statistics. */
+ sp->bt_metaflags = meta->dbmeta.flags;
+ sp->bt_maxkey = meta->maxkey;
+ sp->bt_minkey = meta->minkey;
+ sp->bt_re_len = meta->re_len;
+ sp->bt_re_pad = meta->re_pad;
+ sp->bt_pagesize = meta->dbmeta.pagesize;
+ sp->bt_magic = meta->dbmeta.magic;
+ sp->bt_version = meta->dbmeta.version;
+
+ if (write_meta != 0) {
+ meta->dbmeta.key_count = sp->bt_nkeys;
+ meta->dbmeta.record_count = sp->bt_ndata;
+ }
+
+ *(DB_BTREE_STAT **)spp = sp;
+
+err: /* Discard the second page. */
+ __LPUT(dbc, lock);
+ if (h != NULL && (t_ret = mpf->put(mpf, h, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Discard the metadata page. */
+ __LPUT(dbc, metalock);
+ if (meta != NULL && (t_ret = mpf->put(
+ mpf, meta, write_meta == 0 ? 0 : DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (ret != 0 && sp != NULL) {
+ __os_ufree(dbp->dbenv, sp);
+ *(DB_BTREE_STAT **)spp = NULL;
+ }
+
+ return (ret);
+}
+
+/*
+ * __bam_traverse --
+ * Walk a Btree database.
+ *
+ * PUBLIC: int __bam_traverse __P((DBC *, db_lockmode_t,
+ * PUBLIC: db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *));
+ */
+int
+__bam_traverse(dbc, mode, root_pgno, callback, cookie)
+ DBC *dbc;
+ db_lockmode_t mode;
+ db_pgno_t root_pgno;
+ int (*callback)__P((DB *, PAGE *, void *, int *));
+ void *cookie;
+{
+ BINTERNAL *bi;
+ BKEYDATA *bk;
+ DB *dbp;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ RINTERNAL *ri;
+ db_indx_t indx;
+ int already_put, ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ already_put = 0;
+
+ if ((ret = __db_lget(dbc, 0, root_pgno, mode, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = mpf->get(mpf, &root_pgno, 0, &h)) != 0) {
+ __LPUT(dbc, lock);
+ return (ret);
+ }
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) {
+ bi = GET_BINTERNAL(dbp, h, indx);
+ if (B_TYPE(bi->type) == B_OVERFLOW &&
+ (ret = __db_traverse_big(dbp,
+ ((BOVERFLOW *)bi->data)->pgno,
+ callback, cookie)) != 0)
+ goto err;
+ if ((ret = __bam_traverse(
+ dbc, mode, bi->pgno, callback, cookie)) != 0)
+ goto err;
+ }
+ break;
+ case P_IRECNO:
+ for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) {
+ ri = GET_RINTERNAL(dbp, h, indx);
+ if ((ret = __bam_traverse(
+ dbc, mode, ri->pgno, callback, cookie)) != 0)
+ goto err;
+ }
+ break;
+ case P_LBTREE:
+ for (indx = 0; indx < NUM_ENT(h); indx += P_INDX) {
+ bk = GET_BKEYDATA(dbp, h, indx);
+ if (B_TYPE(bk->type) == B_OVERFLOW &&
+ (ret = __db_traverse_big(dbp,
+ GET_BOVERFLOW(dbp, h, indx)->pgno,
+ callback, cookie)) != 0)
+ goto err;
+ bk = GET_BKEYDATA(dbp, h, indx + O_INDX);
+ if (B_TYPE(bk->type) == B_DUPLICATE &&
+ (ret = __bam_traverse(dbc, mode,
+ GET_BOVERFLOW(dbp, h, indx + O_INDX)->pgno,
+ callback, cookie)) != 0)
+ goto err;
+ if (B_TYPE(bk->type) == B_OVERFLOW &&
+ (ret = __db_traverse_big(dbp,
+ GET_BOVERFLOW(dbp, h, indx + O_INDX)->pgno,
+ callback, cookie)) != 0)
+ goto err;
+ }
+ break;
+ case P_LDUP:
+ case P_LRECNO:
+ for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) {
+ bk = GET_BKEYDATA(dbp, h, indx);
+ if (B_TYPE(bk->type) == B_OVERFLOW &&
+ (ret = __db_traverse_big(dbp,
+ GET_BOVERFLOW(dbp, h, indx)->pgno,
+ callback, cookie)) != 0)
+ goto err;
+ }
+ break;
+ }
+
+ ret = callback(dbp, h, cookie, &already_put);
+
+err: if (!already_put && (t_ret = mpf->put(mpf, h, 0)) != 0 && ret != 0)
+ ret = t_ret;
+ __LPUT(dbc, lock);
+
+ return (ret);
+}
+
+/*
+ * __bam_stat_callback --
+ * Statistics callback.
+ *
+ * PUBLIC: int __bam_stat_callback __P((DB *, PAGE *, void *, int *));
+ */
+int
+__bam_stat_callback(dbp, h, cookie, putp)
+ DB *dbp;
+ PAGE *h;
+ void *cookie;
+ int *putp;
+{
+ DB_BTREE_STAT *sp;
+ db_indx_t indx, *inp, top;
+ u_int8_t type;
+
+ sp = cookie;
+ *putp = 0;
+ top = NUM_ENT(h);
+ inp = P_INP(dbp, h);
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ case P_IRECNO:
+ ++sp->bt_int_pg;
+ sp->bt_int_pgfree += P_FREESPACE(dbp, h);
+ break;
+ case P_LBTREE:
+ /* Correct for on-page duplicates and deleted items. */
+ for (indx = 0; indx < top; indx += P_INDX) {
+ if (indx + P_INDX >= top ||
+ inp[indx] != inp[indx + P_INDX])
+ ++sp->bt_nkeys;
+
+ type = GET_BKEYDATA(dbp, h, indx + O_INDX)->type;
+ if (!B_DISSET(type) && B_TYPE(type) != B_DUPLICATE)
+ ++sp->bt_ndata;
+ }
+
+ ++sp->bt_leaf_pg;
+ sp->bt_leaf_pgfree += P_FREESPACE(dbp, h);
+ break;
+ case P_LRECNO:
+ /*
+ * If walking a recno tree, then each of these items is a key.
+ * Otherwise, we're walking an off-page duplicate set.
+ */
+ if (dbp->type == DB_RECNO) {
+ sp->bt_nkeys += top;
+
+ /*
+ * Correct for deleted items in non-renumbering
+ * Recno databases.
+ */
+ if (F_ISSET(dbp, DB_AM_RENUMBER))
+ sp->bt_ndata += top;
+ else
+ for (indx = 0; indx < top; indx += O_INDX) {
+ type = GET_BKEYDATA(dbp, h, indx)->type;
+ if (!B_DISSET(type))
+ ++sp->bt_ndata;
+ }
+
+ ++sp->bt_leaf_pg;
+ sp->bt_leaf_pgfree += P_FREESPACE(dbp, h);
+ } else {
+ sp->bt_ndata += top;
+
+ ++sp->bt_dup_pg;
+ sp->bt_dup_pgfree += P_FREESPACE(dbp, h);
+ }
+ break;
+ case P_LDUP:
+ /* Correct for deleted items. */
+ for (indx = 0; indx < top; indx += O_INDX)
+ if (!B_DISSET(GET_BKEYDATA(dbp, h, indx)->type))
+ ++sp->bt_ndata;
+
+ ++sp->bt_dup_pg;
+ sp->bt_dup_pgfree += P_FREESPACE(dbp, h);
+ break;
+ case P_OVERFLOW:
+ ++sp->bt_over_pg;
+ sp->bt_over_pgfree += P_OVFLSPACE(dbp, dbp->pgsize, h);
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, h->pgno));
+ }
+ return (0);
+}
+
+/*
+ * __bam_key_range --
+ * Return proportion of keys relative to given key. The numbers are
+ * slightly skewed due to on page duplicates.
+ *
+ * PUBLIC: int __bam_key_range __P((DB *,
+ * PUBLIC: DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
+ */
+int
+__bam_key_range(dbp, txn, dbt, kp, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *dbt;
+ DB_KEY_RANGE *kp;
+ u_int32_t flags;
+{
+ BTREE_CURSOR *cp;
+ DBC *dbc;
+ EPG *sp;
+ double factor;
+ int exact, ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->key_range");
+
+ if (flags != 0)
+ return (__db_ferr(dbp->dbenv, "DB->key_range", 0));
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 1)) != 0)
+ return (ret);
+
+ /* Acquire a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+
+ DEBUG_LWRITE(dbc, NULL, "bam_key_range", NULL, NULL, 0);
+
+ if ((ret = __bam_search(dbc, PGNO_INVALID,
+ dbt, S_STK_ONLY, 1, NULL, &exact)) != 0)
+ goto err;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+ kp->less = kp->greater = 0.0;
+
+ factor = 1.0;
+ /* Correct the leaf page. */
+ cp->csp->entries /= 2;
+ cp->csp->indx /= 2;
+ for (sp = cp->sp; sp <= cp->csp; ++sp) {
+ /*
+ * At each level we know that pages greater than indx contain
+ * keys greater than what we are looking for and those less
+ * than indx are less than. The one pointed to by indx may
+ * have some less, some greater or even equal. If indx is
+ * equal to the number of entries, then the key is out of range
+ * and everything is less.
+ */
+ if (sp->indx == 0)
+ kp->greater += factor * (sp->entries - 1)/sp->entries;
+ else if (sp->indx == sp->entries)
+ kp->less += factor;
+ else {
+ kp->less += factor * sp->indx / sp->entries;
+ kp->greater += factor *
+ (sp->entries - sp->indx - 1) / sp->entries;
+ }
+ factor *= 1.0/sp->entries;
+ }
+
+ /*
+ * If there was an exact match then assign 1 n'th to the key itself.
+ * Otherwise that factor belongs to those greater than the key, unless
+ * the key was out of range.
+ */
+ if (exact)
+ kp->equal = factor;
+ else {
+ if (kp->less != 1)
+ kp->greater += factor;
+ kp->equal = 0;
+ }
+
+ BT_STK_CLR(cp);
+
+err: if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
diff --git a/libdb/btree/bt_upgrade.c b/libdb/btree/bt_upgrade.c
new file mode 100644
index 0000000..1fc2fa5
--- /dev/null
+++ b/libdb/btree/bt_upgrade.c
@@ -0,0 +1,162 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/db_upgrade.h"
+
+/*
+ * __bam_30_btreemeta --
+ * Upgrade the metadata pages from version 6 to version 7.
+ *
+ * PUBLIC: int __bam_30_btreemeta __P((DB *, char *, u_int8_t *));
+ */
+int
+__bam_30_btreemeta(dbp, real_name, buf)
+ DB *dbp;
+ char *real_name;
+ u_int8_t *buf;
+{
+ BTMETA30 *newmeta;
+ BTMETA2X *oldmeta;
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ newmeta = (BTMETA30 *)buf;
+ oldmeta = (BTMETA2X *)buf;
+
+ /*
+ * Move things from the end up, so we do not overwrite things.
+ * We are going to create a new uid, so we can move the stuff
+ * at the end of the structure first, overwriting the uid.
+ */
+
+ newmeta->re_pad = oldmeta->re_pad;
+ newmeta->re_len = oldmeta->re_len;
+ newmeta->minkey = oldmeta->minkey;
+ newmeta->maxkey = oldmeta->maxkey;
+ newmeta->dbmeta.free = oldmeta->free;
+ newmeta->dbmeta.flags = oldmeta->flags;
+ newmeta->dbmeta.type = P_BTREEMETA;
+
+ newmeta->dbmeta.version = 7;
+ /* Replace the unique ID. */
+ if ((ret = __os_fileid(dbenv, real_name, 1, buf + 36)) != 0)
+ return (ret);
+
+ newmeta->root = 1;
+
+ return (0);
+}
+
+/*
+ * __bam_31_btreemeta --
+ * Upgrade the database from version 7 to version 8.
+ *
+ * PUBLIC: int __bam_31_btreemeta
+ * PUBLIC: __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+ */
+int
+__bam_31_btreemeta(dbp, real_name, flags, fhp, h, dirtyp)
+ DB *dbp;
+ char *real_name;
+ u_int32_t flags;
+ DB_FH *fhp;
+ PAGE *h;
+ int *dirtyp;
+{
+ BTMETA31 *newmeta;
+ BTMETA30 *oldmeta;
+
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(real_name, NULL);
+ COMPQUIET(fhp, NULL);
+
+ newmeta = (BTMETA31 *)h;
+ oldmeta = (BTMETA30 *)h;
+
+ /*
+ * Copy the effected fields down the page.
+ * The fields may overlap each other so we
+ * start at the bottom and use memmove.
+ */
+ newmeta->root = oldmeta->root;
+ newmeta->re_pad = oldmeta->re_pad;
+ newmeta->re_len = oldmeta->re_len;
+ newmeta->minkey = oldmeta->minkey;
+ newmeta->maxkey = oldmeta->maxkey;
+ memmove(newmeta->dbmeta.uid,
+ oldmeta->dbmeta.uid, sizeof(oldmeta->dbmeta.uid));
+ newmeta->dbmeta.flags = oldmeta->dbmeta.flags;
+ newmeta->dbmeta.record_count = 0;
+ newmeta->dbmeta.key_count = 0;
+ ZERO_LSN(newmeta->dbmeta.unused3);
+
+ /* Set the version number. */
+ newmeta->dbmeta.version = 8;
+
+ /* Upgrade the flags. */
+ if (LF_ISSET(DB_DUPSORT))
+ F_SET(&newmeta->dbmeta, BTM_DUPSORT);
+
+ *dirtyp = 1;
+ return (0);
+}
+
+/*
+ * __bam_31_lbtree --
+ * Upgrade the database btree leaf pages.
+ *
+ * PUBLIC: int __bam_31_lbtree
+ * PUBLIC: __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+ */
+int
+__bam_31_lbtree(dbp, real_name, flags, fhp, h, dirtyp)
+ DB *dbp;
+ char *real_name;
+ u_int32_t flags;
+ DB_FH *fhp;
+ PAGE *h;
+ int *dirtyp;
+{
+ BKEYDATA *bk;
+ db_pgno_t pgno;
+ db_indx_t indx;
+ int ret;
+
+ ret = 0;
+ for (indx = O_INDX; indx < NUM_ENT(h); indx += P_INDX) {
+ bk = GET_BKEYDATA(dbp, h, indx);
+ if (B_TYPE(bk->type) == B_DUPLICATE) {
+ pgno = GET_BOVERFLOW(dbp, h, indx)->pgno;
+ if ((ret = __db_31_offdup(dbp, real_name, fhp,
+ LF_ISSET(DB_DUPSORT) ? 1 : 0, &pgno)) != 0)
+ break;
+ if (pgno != GET_BOVERFLOW(dbp, h, indx)->pgno) {
+ *dirtyp = 1;
+ GET_BOVERFLOW(dbp, h, indx)->pgno = pgno;
+ }
+ }
+ }
+
+ return (ret);
+}
diff --git a/libdb/btree/bt_verify.c b/libdb/btree/bt_verify.c
new file mode 100644
index 0000000..6977e7e
--- /dev/null
+++ b/libdb/btree/bt_verify.c
@@ -0,0 +1,2387 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_verify.h"
+#include "dbinc/btree.h"
+
+static int __bam_safe_getdata __P((DB *, PAGE *, u_int32_t, int, DBT *, int *));
+static int __bam_vrfy_inp __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ db_indx_t *, u_int32_t));
+static int __bam_vrfy_treeorder __P((DB *, db_pgno_t, PAGE *, BINTERNAL *,
+ BINTERNAL *, int (*)(DB *, const DBT *, const DBT *), u_int32_t));
+static int __ram_vrfy_inp __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ db_indx_t *, u_int32_t));
+
+#define OKFLAGS (DB_AGGRESSIVE | DB_NOORDERCHK | DB_SALVAGE)
+
+/*
+ * __bam_vrfy_meta --
+ * Verify the btree-specific part of a metadata page.
+ *
+ * PUBLIC: int __bam_vrfy_meta __P((DB *, VRFY_DBINFO *, BTMETA *,
+ * PUBLIC: db_pgno_t, u_int32_t));
+ */
+int
+__bam_vrfy_meta(dbp, vdp, meta, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ BTMETA *meta;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ int isbad, t_ret, ret;
+ db_indx_t ovflsize;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ isbad = 0;
+
+ /*
+ * If VRFY_INCOMPLETE is not set, then we didn't come through
+ * __db_vrfy_pagezero and didn't incompletely
+ * check this page--we haven't checked it at all.
+ * Thus we need to call __db_vrfy_meta and check the common fields.
+ *
+ * If VRFY_INCOMPLETE is set, we've already done all the same work
+ * in __db_vrfy_pagezero, so skip the check.
+ */
+ if (!F_ISSET(pip, VRFY_INCOMPLETE) &&
+ (ret = __db_vrfy_meta(dbp, vdp, &meta->dbmeta, pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /* bt_minkey: must be >= 2; must produce sensible ovflsize */
+
+ /* avoid division by zero */
+ ovflsize = meta->minkey > 0 ?
+ B_MINKEY_TO_OVFLSIZE(dbp, meta->minkey, dbp->pgsize) : 0;
+
+ if (meta->minkey < 2 ||
+ ovflsize > B_MINKEY_TO_OVFLSIZE(dbp, DEFMINKEYPAGE, dbp->pgsize)) {
+ pip->bt_minkey = 0;
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: nonsensical bt_minkey value %lu on metadata page",
+ (u_long)pgno, (u_long)meta->minkey));
+ } else
+ pip->bt_minkey = meta->minkey;
+
+ /* bt_maxkey: no constraints (XXX: right?) */
+ pip->bt_maxkey = meta->maxkey;
+
+ /* re_len: no constraints on this (may be zero or huge--we make rope) */
+ pip->re_len = meta->re_len;
+
+ /*
+ * The root must not be current page or 0 and it must be within
+ * database. If this metadata page is the master meta data page
+ * of the file, then the root page had better be page 1.
+ */
+ pip->root = 0;
+ if (meta->root == PGNO_INVALID ||
+ meta->root == pgno || !IS_VALID_PGNO(meta->root) ||
+ (pgno == PGNO_BASE_MD && meta->root != 1)) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: nonsensical root page %lu on metadata page",
+ (u_long)pgno, (u_long)meta->root));
+ } else
+ pip->root = meta->root;
+
+ /* Flags. */
+ if (F_ISSET(&meta->dbmeta, BTM_RENUMBER))
+ F_SET(pip, VRFY_IS_RRECNO);
+
+ if (F_ISSET(&meta->dbmeta, BTM_SUBDB)) {
+ /*
+ * If this is a master db meta page, it had better not have
+ * duplicates.
+ */
+ if (F_ISSET(&meta->dbmeta, BTM_DUP) && pgno == PGNO_BASE_MD) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+"Page %lu: Btree metadata page has both duplicates and multiple databases",
+ (u_long)pgno));
+ }
+ F_SET(pip, VRFY_HAS_SUBDBS);
+ }
+
+ if (F_ISSET(&meta->dbmeta, BTM_DUP))
+ F_SET(pip, VRFY_HAS_DUPS);
+ if (F_ISSET(&meta->dbmeta, BTM_DUPSORT))
+ F_SET(pip, VRFY_HAS_DUPSORT);
+ if (F_ISSET(&meta->dbmeta, BTM_RECNUM))
+ F_SET(pip, VRFY_HAS_RECNUMS);
+ if (F_ISSET(pip, VRFY_HAS_RECNUMS) && F_ISSET(pip, VRFY_HAS_DUPS)) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: Btree metadata page illegally has both recnums and dups",
+ (u_long)pgno));
+ isbad = 1;
+ }
+
+ if (F_ISSET(&meta->dbmeta, BTM_RECNO)) {
+ F_SET(pip, VRFY_IS_RECNO);
+ dbp->type = DB_RECNO;
+ } else if (F_ISSET(pip, VRFY_IS_RRECNO)) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: metadata page has renumber flag set but is not recno",
+ (u_long)pgno));
+ }
+
+ if (F_ISSET(pip, VRFY_IS_RECNO) && F_ISSET(pip, VRFY_HAS_DUPS)) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: recno metadata page specifies duplicates",
+ (u_long)pgno));
+ isbad = 1;
+ }
+
+ if (F_ISSET(&meta->dbmeta, BTM_FIXEDLEN))
+ F_SET(pip, VRFY_IS_FIXEDLEN);
+ else if (pip->re_len > 0) {
+ /*
+ * It's wrong to have an re_len if it's not a fixed-length
+ * database
+ */
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: re_len of %lu in non-fixed-length database",
+ (u_long)pgno, (u_long)pip->re_len));
+ }
+
+ /*
+ * We do not check that the rest of the page is 0, because it may
+ * not be and may still be correct.
+ */
+
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __ram_vrfy_leaf --
+ * Verify a recno leaf page.
+ *
+ * PUBLIC: int __ram_vrfy_leaf __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ * PUBLIC: u_int32_t));
+ */
+int
+__ram_vrfy_leaf(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ BKEYDATA *bk;
+ VRFY_PAGEINFO *pip;
+ db_indx_t i;
+ int ret, t_ret, isbad;
+ u_int32_t re_len_guess, len;
+
+ isbad = 0;
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ if ((ret = __db_fchk(dbp->dbenv,
+ "__ram_vrfy_leaf", flags, OKFLAGS)) != 0)
+ goto err;
+
+ if (TYPE(h) != P_LRECNO) {
+ /* We should not have been called. */
+ TYPE_ERR_PRINT(dbp->dbenv, "__ram_vrfy_leaf", pgno, TYPE(h));
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Verify (and, if relevant, save off) page fields common to
+ * all PAGEs.
+ */
+ if ((ret = __db_vrfy_datapage(dbp, vdp, h, pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /*
+ * Verify inp[]. Return immediately if it returns DB_VERIFY_BAD;
+ * further checks are dangerous.
+ */
+ if ((ret = __bam_vrfy_inp(dbp,
+ vdp, h, pgno, &pip->entries, flags)) != 0)
+ goto err;
+
+ if (F_ISSET(pip, VRFY_HAS_DUPS)) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: Recno database has dups", (u_long)pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ /*
+ * Walk through inp and see if the lengths of all the records are the
+ * same--if so, this may be a fixed-length database, and we want to
+ * save off this value. We know inp to be safe if we've gotten this
+ * far.
+ */
+ re_len_guess = 0;
+ for (i = 0; i < NUM_ENT(h); i++) {
+ bk = GET_BKEYDATA(dbp, h, i);
+ /* KEYEMPTY. Go on. */
+ if (B_DISSET(bk->type))
+ continue;
+ if (bk->type == B_OVERFLOW)
+ len = ((BOVERFLOW *)bk)->tlen;
+ else if (bk->type == B_KEYDATA)
+ len = bk->len;
+ else {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: nonsensical type for item %lu",
+ (u_long)pgno, (u_long)i));
+ continue;
+ }
+ if (re_len_guess == 0)
+ re_len_guess = len;
+
+ /*
+ * Is this item's len the same as the last one's? If not,
+ * reset to 0 and break--we don't have a single re_len.
+ * Otherwise, go on to the next item.
+ */
+ if (re_len_guess != len) {
+ re_len_guess = 0;
+ break;
+ }
+ }
+ pip->re_len = re_len_guess;
+
+ /* Save off record count. */
+ pip->rec_cnt = NUM_ENT(h);
+
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __bam_vrfy --
+ * Verify a btree leaf or internal page.
+ *
+ * PUBLIC: int __bam_vrfy __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ * PUBLIC: u_int32_t));
+ */
+int
+__bam_vrfy(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ int ret, t_ret, isbad;
+
+ isbad = 0;
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ case P_IRECNO:
+ case P_LBTREE:
+ case P_LDUP:
+ break;
+ default:
+ TYPE_ERR_PRINT(dbp->dbenv, "__bam_vrfy", pgno, TYPE(h));
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Verify (and, if relevant, save off) page fields common to
+ * all PAGEs.
+ */
+ if ((ret = __db_vrfy_datapage(dbp, vdp, h, pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /*
+ * The record count is, on internal pages, stored in an overloaded
+ * next_pgno field. Save it off; we'll verify it when we check
+ * overall database structure. We could overload the field
+ * in VRFY_PAGEINFO, too, but this seems gross, and space
+ * is not at such a premium.
+ */
+ pip->rec_cnt = RE_NREC(h);
+
+ /*
+ * Verify inp[].
+ */
+ if (TYPE(h) == P_IRECNO) {
+ if ((ret = __ram_vrfy_inp(dbp,
+ vdp, h, pgno, &pip->entries, flags)) != 0)
+ goto err;
+ } else if ((ret = __bam_vrfy_inp(dbp,
+ vdp, h, pgno, &pip->entries, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ EPRINT((dbp->dbenv,
+ "Page %lu: item order check unsafe: skipping",
+ (u_long)pgno));
+ } else if (!LF_ISSET(DB_NOORDERCHK) && (ret =
+ __bam_vrfy_itemorder(dbp, vdp, h, pgno, 0, 0, 0, flags)) != 0) {
+ /*
+ * We know that the elements of inp are reasonable.
+ *
+ * Check that elements fall in the proper order.
+ */
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __ram_vrfy_inp --
+ * Verify that all entries in a P_IRECNO inp[] array are reasonable,
+ * and count them. Note that P_LRECNO uses __bam_vrfy_inp;
+ * P_IRECNOs are a special, and simpler, case, since they have
+ * RINTERNALs rather than BKEYDATA/BINTERNALs.
+ */
+static int
+__ram_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ db_indx_t *nentriesp;
+ u_int32_t flags;
+{
+ RINTERNAL *ri;
+ VRFY_CHILDINFO child;
+ VRFY_PAGEINFO *pip;
+ int ret, t_ret, isbad;
+ u_int32_t himark, i, offset, nentries;
+ db_indx_t *inp;
+ u_int8_t *pagelayout, *p;
+
+ isbad = 0;
+ memset(&child, 0, sizeof(VRFY_CHILDINFO));
+ nentries = 0;
+ pagelayout = NULL;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ if (TYPE(h) != P_IRECNO) {
+ TYPE_ERR_PRINT(dbp->dbenv, "__ram_vrfy_inp", pgno, TYPE(h));
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ himark = dbp->pgsize;
+ if ((ret =
+ __os_malloc(dbp->dbenv, dbp->pgsize, &pagelayout)) != 0)
+ goto err;
+ memset(pagelayout, 0, dbp->pgsize);
+ inp = P_INP(dbp, h);
+ for (i = 0; i < NUM_ENT(h); i++) {
+ if ((u_int8_t *)inp + i >= (u_int8_t *)h + himark) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: entries listing %lu overlaps data",
+ (u_long)pgno, (u_long)i));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ offset = inp[i];
+ /*
+ * Check that the item offset is reasonable: it points
+ * somewhere after the inp array and before the end of the
+ * page.
+ */
+ if (offset <= (u_int32_t)((u_int8_t *)inp + i -
+ (u_int8_t *)h) ||
+ offset > (u_int32_t)(dbp->pgsize - RINTERNAL_SIZE)) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: bad offset %lu at index %lu",
+ (u_long)pgno, (u_long)offset, (u_long)i));
+ continue;
+ }
+
+ /* Update the high-water mark (what HOFFSET should be) */
+ if (offset < himark)
+ himark = offset;
+
+ nentries++;
+
+ /* Make sure this RINTERNAL is not multiply referenced. */
+ ri = GET_RINTERNAL(dbp, h, i);
+ if (pagelayout[offset] == 0) {
+ pagelayout[offset] = 1;
+ child.pgno = ri->pgno;
+ child.type = V_RECNO;
+ child.nrecs = ri->nrecs;
+ if ((ret = __db_vrfy_childput(vdp, pgno, &child)) != 0)
+ goto err;
+ } else {
+ EPRINT((dbp->dbenv,
+ "Page %lu: RINTERNAL structure at offset %lu referenced twice",
+ (u_long)pgno, (u_long)offset));
+ isbad = 1;
+ }
+ }
+
+ for (p = pagelayout + himark;
+ p < pagelayout + dbp->pgsize;
+ p += RINTERNAL_SIZE)
+ if (*p != 1) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: gap between items at offset %lu",
+ (u_long)pgno, (u_long)(p - pagelayout)));
+ isbad = 1;
+ }
+
+ if ((db_indx_t)himark != HOFFSET(h)) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: bad HOFFSET %lu, appears to be %lu",
+ (u_long)pgno, (u_long)(HOFFSET(h)), (u_long)himark));
+ isbad = 1;
+ }
+
+ *nentriesp = nentries;
+
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ if (pagelayout != NULL)
+ __os_free(dbp->dbenv, pagelayout);
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __bam_vrfy_inp --
+ * Verify that all entries in inp[] array are reasonable;
+ * count them.
+ */
+static int
+__bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ db_indx_t *nentriesp;
+ u_int32_t flags;
+{
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+ VRFY_CHILDINFO child;
+ VRFY_PAGEINFO *pip;
+ int isbad, initem, isdupitem, ret, t_ret;
+ u_int32_t himark, offset; /* These would be db_indx_ts but for algnmt.*/
+ u_int32_t i, endoff, nentries;
+ u_int8_t *pagelayout;
+
+ isbad = isdupitem = 0;
+ nentries = 0;
+ memset(&child, 0, sizeof(VRFY_CHILDINFO));
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ break;
+ default:
+ /*
+ * In the salvager, we might call this from a page which
+ * we merely suspect is a btree page. Otherwise, it
+ * shouldn't get called--if it is, that's a verifier bug.
+ */
+ if (LF_ISSET(DB_SALVAGE))
+ break;
+ TYPE_ERR_PRINT(dbp->dbenv, "__bam_vrfy_inp", pgno, TYPE(h));
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Loop through inp[], the array of items, until we either
+ * run out of entries or collide with the data. Keep track
+ * of h_offset in himark.
+ *
+ * For each element in inp[i], make sure it references a region
+ * that starts after the end of the inp array (as defined by
+ * NUM_ENT(h)), ends before the beginning of the page, doesn't
+ * overlap any other regions, and doesn't have a gap between
+ * it and the region immediately after it.
+ */
+ himark = dbp->pgsize;
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &pagelayout)) != 0)
+ goto err;
+ memset(pagelayout, 0, dbp->pgsize);
+ for (i = 0; i < NUM_ENT(h); i++) {
+ switch (ret = __db_vrfy_inpitem(dbp,
+ h, pgno, i, 1, flags, &himark, &offset)) {
+ case 0:
+ break;
+ case DB_VERIFY_BAD:
+ isbad = 1;
+ continue;
+ case DB_VERIFY_FATAL:
+ isbad = 1;
+ goto err;
+ default:
+ DB_ASSERT(ret != 0);
+ break;
+ }
+
+ /*
+ * We now have a plausible beginning for the item, and we know
+ * its length is safe.
+ *
+ * Mark the beginning and end in pagelayout so we can make sure
+ * items have no overlaps or gaps.
+ */
+ bk = GET_BKEYDATA(dbp, h, i);
+#define ITEM_BEGIN 1
+#define ITEM_END 2
+ if (pagelayout[offset] == 0)
+ pagelayout[offset] = ITEM_BEGIN;
+ else if (pagelayout[offset] == ITEM_BEGIN) {
+ /*
+ * Having two inp entries that point at the same patch
+ * of page is legal if and only if the page is
+ * a btree leaf and they're onpage duplicate keys--
+ * that is, if (i % P_INDX) == 0.
+ */
+ if ((i % P_INDX == 0) && (TYPE(h) == P_LBTREE)) {
+ /* Flag for later. */
+ F_SET(pip, VRFY_HAS_DUPS);
+
+ /* Bump up nentries so we don't undercount. */
+ nentries++;
+
+ /*
+ * We'll check to make sure the end is
+ * equal, too.
+ */
+ isdupitem = 1;
+ } else {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: duplicated item %lu",
+ (u_long)pgno, (u_long)i));
+ }
+ }
+
+ /*
+ * Mark the end. Its location varies with the page type
+ * and the item type.
+ *
+ * If the end already has a sign other than 0, do nothing--
+ * it's an overlap that we'll catch later.
+ */
+ switch(B_TYPE(bk->type)) {
+ case B_KEYDATA:
+ if (TYPE(h) == P_IBTREE)
+ /* It's a BINTERNAL. */
+ endoff = offset + BINTERNAL_SIZE(bk->len) - 1;
+ else
+ endoff = offset + BKEYDATA_SIZE(bk->len) - 1;
+ break;
+ case B_DUPLICATE:
+ /*
+ * Flag that we have dups; we'll check whether
+ * that's okay during the structure check.
+ */
+ F_SET(pip, VRFY_HAS_DUPS);
+ /* FALLTHROUGH */
+ case B_OVERFLOW:
+ /*
+ * Overflow entries on internal pages are stored
+ * as the _data_ of a BINTERNAL; overflow entries
+ * on leaf pages are stored as the entire entry.
+ */
+ endoff = offset +
+ ((TYPE(h) == P_IBTREE) ?
+ BINTERNAL_SIZE(BOVERFLOW_SIZE) :
+ BOVERFLOW_SIZE) - 1;
+ break;
+ default:
+ /*
+ * We'll complain later; for now, just mark
+ * a minimum.
+ */
+ endoff = offset + BKEYDATA_SIZE(0) - 1;
+ break;
+ }
+
+ /*
+ * If this is an onpage duplicate key we've seen before,
+ * the end had better coincide too.
+ */
+ if (isdupitem && pagelayout[endoff] != ITEM_END) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: duplicated item %lu",
+ (u_long)pgno, (u_long)i));
+ isbad = 1;
+ } else if (pagelayout[endoff] == 0)
+ pagelayout[endoff] = ITEM_END;
+ isdupitem = 0;
+
+ /*
+ * There should be no deleted items in a quiescent tree,
+ * except in recno.
+ */
+ if (B_DISSET(bk->type) && TYPE(h) != P_LRECNO) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: item %lu marked deleted",
+ (u_long)pgno, (u_long)i));
+ }
+
+ /*
+ * Check the type and such of bk--make sure it's reasonable
+ * for the pagetype.
+ */
+ switch (B_TYPE(bk->type)) {
+ case B_KEYDATA:
+ /*
+ * This is a normal, non-overflow BKEYDATA or BINTERNAL.
+ * The only thing to check is the len, and that's
+ * already been done.
+ */
+ break;
+ case B_DUPLICATE:
+ if (TYPE(h) == P_IBTREE) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: duplicate page referenced by internal btree page at item %lu",
+ (u_long)pgno, (u_long)i));
+ break;
+ } else if (TYPE(h) == P_LRECNO) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: duplicate page referenced by recno page at item %lu",
+ (u_long)pgno, (u_long)i));
+ break;
+ }
+ /* FALLTHROUGH */
+ case B_OVERFLOW:
+ bo = (TYPE(h) == P_IBTREE) ?
+ (BOVERFLOW *)(((BINTERNAL *)bk)->data) :
+ (BOVERFLOW *)bk;
+
+ if (B_TYPE(bk->type) == B_OVERFLOW)
+ /* Make sure tlen is reasonable. */
+ if (bo->tlen > dbp->pgsize * vdp->last_pgno) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: impossible tlen %lu, item %lu",
+ (u_long)pgno,
+ (u_long)bo->tlen, (u_long)i));
+ /* Don't save as a child. */
+ break;
+ }
+
+ if (!IS_VALID_PGNO(bo->pgno) || bo->pgno == pgno ||
+ bo->pgno == PGNO_INVALID) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: offpage item %lu has bad pgno %lu",
+ (u_long)pgno, (u_long)i, (u_long)bo->pgno));
+ /* Don't save as a child. */
+ break;
+ }
+
+ child.pgno = bo->pgno;
+ child.type = (B_TYPE(bk->type) == B_OVERFLOW ?
+ V_OVERFLOW : V_DUPLICATE);
+ child.tlen = bo->tlen;
+ if ((ret = __db_vrfy_childput(vdp, pgno, &child)) != 0)
+ goto err;
+ break;
+ default:
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: item %lu of invalid type %lu",
+ (u_long)pgno, (u_long)i));
+ break;
+ }
+ }
+
+ /*
+ * Now, loop through and make sure the items are contiguous and
+ * non-overlapping.
+ */
+ initem = 0;
+ for (i = himark; i < dbp->pgsize; i++)
+ if (initem == 0)
+ switch (pagelayout[i]) {
+ case 0:
+ /* May be just for alignment. */
+ if (i != ALIGN(i, sizeof(u_int32_t)))
+ continue;
+
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: gap between items at offset %lu",
+ (u_long)pgno, (u_long)i));
+ /* Find the end of the gap */
+ for ( ; pagelayout[i + 1] == 0 &&
+ (size_t)(i + 1) < dbp->pgsize; i++)
+ ;
+ break;
+ case ITEM_BEGIN:
+ /* We've found an item. Check its alignment. */
+ if (i != ALIGN(i, sizeof(u_int32_t))) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: offset %lu unaligned",
+ (u_long)pgno, (u_long)i));
+ }
+ initem = 1;
+ nentries++;
+ break;
+ case ITEM_END:
+ /*
+ * We've hit the end of an item even though
+ * we don't think we're in one; must
+ * be an overlap.
+ */
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: overlapping items at offset %lu",
+ (u_long)pgno, (u_long)i));
+ break;
+ default:
+ /* Should be impossible. */
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+ else
+ switch (pagelayout[i]) {
+ case 0:
+ /* In the middle of an item somewhere. Okay. */
+ break;
+ case ITEM_END:
+ /* End of an item; switch to out-of-item mode.*/
+ initem = 0;
+ break;
+ case ITEM_BEGIN:
+ /*
+ * Hit a second item beginning without an
+ * end. Overlap.
+ */
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: overlapping items at offset %lu",
+ (u_long)pgno, (u_long)i));
+ break;
+ }
+
+ (void)__os_free(dbp->dbenv, pagelayout);
+
+ /* Verify HOFFSET. */
+ if ((db_indx_t)himark != HOFFSET(h)) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: bad HOFFSET %lu, appears to be %lu",
+ (u_long)pgno, (u_long)HOFFSET(h), (u_long)himark));
+ isbad = 1;
+ }
+
+err: if (nentriesp != NULL)
+ *nentriesp = nentries;
+
+ if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __bam_vrfy_itemorder --
+ * Make sure the items on a page sort correctly.
+ *
+ * Assumes that NUM_ENT(h) and inp[0]..inp[NUM_ENT(h) - 1] are
+ * reasonable; be sure that __bam_vrfy_inp has been called first.
+ *
+ * If ovflok is set, it also assumes that overflow page chains
+ * hanging off the current page have been sanity-checked, and so we
+ * can use __bam_cmp to verify their ordering. If it is not set,
+ * and we run into an overflow page, carp and return DB_VERIFY_BAD;
+ * we shouldn't be called if any exist.
+ *
+ * PUBLIC: int __bam_vrfy_itemorder __P((DB *, VRFY_DBINFO *, PAGE *,
+ * PUBLIC: db_pgno_t, u_int32_t, int, int, u_int32_t));
+ */
+int
+__bam_vrfy_itemorder(dbp, vdp, h, pgno, nentries, ovflok, hasdups, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t nentries;
+ int ovflok, hasdups;
+ u_int32_t flags;
+{
+ DBT dbta, dbtb, dup_1, dup_2, *p1, *p2, *tmp;
+ BTREE *bt;
+ BINTERNAL *bi;
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+ VRFY_PAGEINFO *pip;
+ db_indx_t i;
+ int cmp, freedup_1, freedup_2, isbad, ret, t_ret;
+ int (*dupfunc) __P((DB *, const DBT *, const DBT *));
+ int (*func) __P((DB *, const DBT *, const DBT *));
+ void *buf1, *buf2, *tmpbuf;
+
+ /*
+ * We need to work in the ORDERCHKONLY environment where we might
+ * not have a pip, but we also may need to work in contexts where
+ * NUM_ENT isn't safe.
+ */
+ if (vdp != NULL) {
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+ nentries = pip->entries;
+ } else
+ pip = NULL;
+
+ ret = isbad = 0;
+ bo = NULL; /* Shut up compiler. */
+
+ memset(&dbta, 0, sizeof(DBT));
+ F_SET(&dbta, DB_DBT_REALLOC);
+
+ memset(&dbtb, 0, sizeof(DBT));
+ F_SET(&dbtb, DB_DBT_REALLOC);
+
+ buf1 = buf2 = NULL;
+
+ DB_ASSERT(!LF_ISSET(DB_NOORDERCHK));
+
+ dupfunc = (dbp->dup_compare == NULL) ? __bam_defcmp : dbp->dup_compare;
+ if (TYPE(h) == P_LDUP)
+ func = dupfunc;
+ else {
+ func = __bam_defcmp;
+ if (dbp->bt_internal != NULL) {
+ bt = (BTREE *)dbp->bt_internal;
+ if (bt->bt_compare != NULL)
+ func = bt->bt_compare;
+ }
+ }
+
+ /*
+ * We alternate our use of dbta and dbtb so that we can walk
+ * through the page key-by-key without copying a dbt twice.
+ * p1 is always the dbt for index i - 1, and p2 for index i.
+ */
+ p1 = &dbta;
+ p2 = &dbtb;
+
+ /*
+ * Loop through the entries. nentries ought to contain the
+ * actual count, and so is a safe way to terminate the loop; whether
+ * we inc. by one or two depends on whether we're a leaf page--
+ * on a leaf page, we care only about keys. On internal pages
+ * and LDUP pages, we want to check the order of all entries.
+ *
+ * Note that on IBTREE pages, we start with item 1, since item
+ * 0 doesn't get looked at by __bam_cmp.
+ */
+ for (i = (TYPE(h) == P_IBTREE) ? 1 : 0; i < nentries;
+ i += (TYPE(h) == P_LBTREE) ? P_INDX : O_INDX) {
+ /*
+ * Put key i-1, now in p2, into p1, by swapping DBTs and bufs.
+ */
+ tmp = p1;
+ p1 = p2;
+ p2 = tmp;
+ tmpbuf = buf1;
+ buf1 = buf2;
+ buf2 = tmpbuf;
+
+ /*
+ * Get key i into p2.
+ */
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ bi = GET_BINTERNAL(dbp, h, i);
+ if (B_TYPE(bi->type) == B_OVERFLOW) {
+ bo = (BOVERFLOW *)(bi->data);
+ goto overflow;
+ } else {
+ p2->data = bi->data;
+ p2->size = bi->len;
+ }
+
+ /*
+ * The leftmost key on an internal page must be
+ * len 0, since it's just a placeholder and
+ * automatically sorts less than all keys.
+ *
+ * XXX
+ * This criterion does not currently hold!
+ * See todo list item #1686. Meanwhile, it's harmless
+ * to just not check for it.
+ */
+#if 0
+ if (i == 0 && bi->len != 0) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: lowest key on internal page of nonzero length",
+ (u_long)pgno));
+ }
+#endif
+ break;
+ case P_LBTREE:
+ case P_LDUP:
+ bk = GET_BKEYDATA(dbp, h, i);
+ if (B_TYPE(bk->type) == B_OVERFLOW) {
+ bo = (BOVERFLOW *)bk;
+ goto overflow;
+ } else {
+ p2->data = bk->data;
+ p2->size = bk->len;
+ }
+ break;
+ default:
+ /*
+ * This means our caller screwed up and sent us
+ * an inappropriate page.
+ */
+ TYPE_ERR_PRINT(dbp->dbenv,
+ "__bam_vrfy_itemorder", pgno, TYPE(h))
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ if (0) {
+ /*
+ * If ovflok != 1, we can't safely go chasing
+ * overflow pages with the normal routines now;
+ * they might be unsafe or nonexistent. Mark this
+ * page as incomplete and return.
+ *
+ * Note that we don't need to worry about freeing
+ * buffers, since they can't have been allocated
+ * if overflow items are unsafe.
+ */
+overflow: if (!ovflok) {
+ F_SET(pip, VRFY_INCOMPLETE);
+ goto err;
+ }
+
+ /*
+ * Overflow items are safe to chase. Do so.
+ * Fetch the overflow item into p2->data,
+ * NULLing it or reallocing it as appropriate.
+ *
+ * (We set p2->data to buf2 before the call
+ * so we're sure to realloc if we can and if p2
+ * was just pointing at a non-overflow item.)
+ */
+ p2->data = buf2;
+ if ((ret = __db_goff(dbp,
+ p2, bo->tlen, bo->pgno, NULL, NULL)) != 0) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: error %lu in fetching overflow item %lu",
+ (u_long)pgno, (u_long)ret, (u_long)i));
+ }
+ /* In case it got realloc'ed and thus changed. */
+ buf2 = p2->data;
+ }
+
+ /* Compare with the last key. */
+ if (p1->data != NULL && p2->data != NULL) {
+ cmp = func(dbp, p1, p2);
+
+ /* comparison succeeded */
+ if (cmp > 0) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: out-of-order key at entry %lu",
+ (u_long)pgno, (u_long)i));
+ /* proceed */
+ } else if (cmp == 0) {
+ /*
+ * If they compared equally, this
+ * had better be a (sub)database with dups.
+ * Mark it so we can check during the
+ * structure check.
+ */
+ if (pip != NULL)
+ F_SET(pip, VRFY_HAS_DUPS);
+ else if (hasdups == 0) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: database with no duplicates has duplicated keys",
+ (u_long)pgno));
+ }
+
+ /*
+ * If we're a btree leaf, check to see
+ * if the data items of these on-page dups are
+ * in sorted order. If not, flag this, so
+ * that we can make sure during the
+ * structure checks that the DUPSORT flag
+ * is unset.
+ *
+ * At this point i points to a duplicate key.
+ * Compare the datum before it (same key)
+ * to the datum after it, i.e. i-1 to i+1.
+ */
+ if (TYPE(h) == P_LBTREE) {
+ /*
+ * Unsafe; continue and we'll pick
+ * up the bogus nentries later.
+ */
+ if (i + 1 >= (db_indx_t)nentries)
+ continue;
+
+ /*
+ * We don't bother with clever memory
+ * management with on-page dups,
+ * as it's only really a big win
+ * in the overflow case, and overflow
+ * dups are probably (?) rare.
+ */
+ if (((ret = __bam_safe_getdata(dbp,
+ h, i - 1, ovflok, &dup_1,
+ &freedup_1)) != 0) ||
+ ((ret = __bam_safe_getdata(dbp,
+ h, i + 1, ovflok, &dup_2,
+ &freedup_2)) != 0))
+ goto err;
+
+ /*
+ * If either of the data are NULL,
+ * it's because they're overflows and
+ * it's not safe to chase them now.
+ * Mark an incomplete and return.
+ */
+ if (dup_1.data == NULL ||
+ dup_2.data == NULL) {
+ DB_ASSERT(!ovflok);
+ F_SET(pip, VRFY_INCOMPLETE);
+ goto err;
+ }
+
+ /*
+ * If the dups are out of order,
+ * flag this. It's not an error
+ * until we do the structure check
+ * and see whether DUPSORT is set.
+ */
+ if (dupfunc(dbp, &dup_1, &dup_2) > 0)
+ F_SET(pip, VRFY_DUPS_UNSORTED);
+
+ if (freedup_1)
+ __os_ufree(dbp->dbenv,
+ dup_1.data);
+ if (freedup_2)
+ __os_ufree(dbp->dbenv,
+ dup_2.data);
+ }
+ }
+ }
+ }
+
+err: if (pip != NULL && ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0) && ret == 0)
+ ret = t_ret;
+
+ if (buf1 != NULL)
+ __os_ufree(dbp->dbenv, buf1);
+ if (buf2 != NULL)
+ __os_ufree(dbp->dbenv, buf2);
+
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __bam_vrfy_structure --
+ * Verify the tree structure of a btree database (including the master
+ * database containing subdbs).
+ *
+ * PUBLIC: int __bam_vrfy_structure __P((DB *, VRFY_DBINFO *, db_pgno_t,
+ * PUBLIC: u_int32_t));
+ */
+int
+__bam_vrfy_structure(dbp, vdp, meta_pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t meta_pgno;
+ u_int32_t flags;
+{
+ DB *pgset;
+ VRFY_PAGEINFO *mip, *rip;
+ db_pgno_t root, p;
+ int t_ret, ret;
+ u_int32_t nrecs, level, relen, stflags;
+
+ mip = rip = 0;
+ pgset = vdp->pgset;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, meta_pgno, &mip)) != 0)
+ return (ret);
+
+ if ((ret = __db_vrfy_pgset_get(pgset, meta_pgno, (int *)&p)) != 0)
+ goto err;
+ if (p != 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: btree metadata page observed twice",
+ (u_long)meta_pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ if ((ret = __db_vrfy_pgset_inc(pgset, meta_pgno)) != 0)
+ goto err;
+
+ root = mip->root;
+
+ if (root == 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: btree metadata page has no root",
+ (u_long)meta_pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, root, &rip)) != 0)
+ goto err;
+
+ switch (rip->type) {
+ case P_IBTREE:
+ case P_LBTREE:
+ stflags = flags | ST_TOPLEVEL;
+ if (F_ISSET(mip, VRFY_HAS_DUPS))
+ stflags |= ST_DUPOK;
+ if (F_ISSET(mip, VRFY_HAS_DUPSORT))
+ stflags |= ST_DUPSORT;
+ if (F_ISSET(mip, VRFY_HAS_RECNUMS))
+ stflags |= ST_RECNUM;
+ ret = __bam_vrfy_subtree(dbp,
+ vdp, root, NULL, NULL, stflags, NULL, NULL, NULL);
+ break;
+ case P_IRECNO:
+ case P_LRECNO:
+ stflags = flags | ST_RECNUM | ST_IS_RECNO | ST_TOPLEVEL;
+ if (mip->re_len > 0)
+ stflags |= ST_RELEN;
+ if ((ret = __bam_vrfy_subtree(dbp, vdp,
+ root, NULL, NULL, stflags, &level, &nrecs, &relen)) != 0)
+ goto err;
+ /*
+ * Even if mip->re_len > 0, re_len may come back zero if the
+ * tree is empty. It should be okay to just skip the check in
+ * this case, as if there are any non-deleted keys at all,
+ * that should never happen.
+ */
+ if (mip->re_len > 0 && relen > 0 && mip->re_len != relen) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: recno database has bad re_len %lu",
+ (u_long)meta_pgno, (u_long)relen));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ ret = 0;
+ break;
+ case P_LDUP:
+ EPRINT((dbp->dbenv,
+ "Page %lu: duplicate tree referenced from metadata page",
+ (u_long)meta_pgno));
+ ret = DB_VERIFY_BAD;
+ break;
+ default:
+ EPRINT((dbp->dbenv,
+ "Page %lu: btree root of incorrect type %lu on metadata page",
+ (u_long)meta_pgno, (u_long)rip->type));
+ ret = DB_VERIFY_BAD;
+ break;
+ }
+
+err: if (mip != NULL && ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, mip)) != 0) && ret == 0)
+ ret = t_ret;
+ if (rip != NULL && ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, rip)) != 0) && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __bam_vrfy_subtree--
+ * Verify a subtree (or entire) btree with specified root.
+ *
+ * Note that this is public because it must be called to verify
+ * offpage dup trees, including from hash.
+ *
+ * PUBLIC: int __bam_vrfy_subtree __P((DB *, VRFY_DBINFO *, db_pgno_t, void *,
+ * PUBLIC: void *, u_int32_t, u_int32_t *, u_int32_t *, u_int32_t *));
+ */
+int
+__bam_vrfy_subtree(dbp,
+ vdp, pgno, l, r, flags, levelp, nrecsp, relenp)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ void *l, *r;
+ u_int32_t flags, *levelp, *nrecsp, *relenp;
+{
+ BINTERNAL *li, *ri, *lp, *rp;
+ DB *pgset;
+ DB_MPOOLFILE *mpf;
+ DBC *cc;
+ PAGE *h;
+ VRFY_CHILDINFO *child;
+ VRFY_PAGEINFO *pip;
+ db_indx_t i;
+ db_pgno_t next_pgno, prev_pgno;
+ db_recno_t child_nrecs, nrecs;
+ u_int32_t child_level, child_relen, level, relen, stflags;
+ u_int8_t leaf_type;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+ int isbad, p, ret, t_ret, toplevel;
+
+ mpf = dbp->mpf;
+ ret = isbad = 0;
+ nrecs = 0;
+ h = NULL;
+ relen = 0;
+ leaf_type = P_INVALID;
+ next_pgno = prev_pgno = PGNO_INVALID;
+ rp = (BINTERNAL *)r;
+ lp = (BINTERNAL *)l;
+
+ /* Provide feedback on our progress to the application. */
+ if (!LF_ISSET(DB_SALVAGE))
+ __db_vrfy_struct_feedback(dbp, vdp);
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ cc = NULL;
+ level = pip->bt_level;
+
+ toplevel = LF_ISSET(ST_TOPLEVEL) ? 1 : 0;
+ LF_CLR(ST_TOPLEVEL);
+
+ /*
+ * If this is the root, initialize the vdp's prev- and next-pgno
+ * accounting.
+ *
+ * For each leaf page we hit, we'll want to make sure that
+ * vdp->prev_pgno is the same as pip->prev_pgno and vdp->next_pgno is
+ * our page number. Then, we'll set vdp->next_pgno to pip->next_pgno
+ * and vdp->prev_pgno to our page number, and the next leaf page in
+ * line should be able to do the same verification.
+ */
+ if (toplevel) {
+ /*
+ * Cache the values stored in the vdp so that if we're an
+ * auxiliary tree such as an off-page duplicate set, our
+ * caller's leaf page chain doesn't get lost.
+ */
+ prev_pgno = vdp->prev_pgno;
+ next_pgno = vdp->next_pgno;
+ leaf_type = vdp->leaf_type;
+ vdp->next_pgno = vdp->prev_pgno = PGNO_INVALID;
+ vdp->leaf_type = P_INVALID;
+ }
+
+ /*
+ * We are recursively descending a btree, starting from the root
+ * and working our way out to the leaves.
+ *
+ * There are four cases we need to deal with:
+ * 1. pgno is a recno leaf page. Any children are overflows.
+ * 2. pgno is a duplicate leaf page. Any children
+ * are overflow pages; traverse them, and then return
+ * level and nrecs.
+ * 3. pgno is an ordinary leaf page. Check whether dups are
+ * allowed, and if so, traverse any off-page dups or
+ * overflows. Then return nrecs and level.
+ * 4. pgno is a recno internal page. Recursively check any
+ * child pages, making sure their levels are one lower
+ * and their nrecs sum to ours.
+ * 5. pgno is a btree internal page. Same as #4, plus we
+ * must verify that for each pair of BINTERNAL entries
+ * N and N+1, the leftmost item on N's child sorts
+ * greater than N, and the rightmost item on N's child
+ * sorts less than N+1.
+ *
+ * Furthermore, in any sorted page type (P_LDUP, P_LBTREE, P_IBTREE),
+ * we need to verify the internal sort order is correct if,
+ * due to overflow items, we were not able to do so earlier.
+ */
+ switch (pip->type) {
+ case P_LRECNO:
+ case P_LDUP:
+ case P_LBTREE:
+ /*
+ * Cases 1, 2 and 3.
+ *
+ * We're some sort of leaf page; verify
+ * that our linked list of leaves is consistent.
+ */
+ if (vdp->leaf_type == P_INVALID) {
+ /*
+ * First leaf page. Set the type that all its
+ * successors should be, and verify that our prev_pgno
+ * is PGNO_INVALID.
+ */
+ vdp->leaf_type = pip->type;
+ if (pip->prev_pgno != PGNO_INVALID)
+ goto bad_prev;
+ } else {
+ /*
+ * Successor leaf page. Check our type, the previous
+ * page's next_pgno, and our prev_pgno.
+ */
+ if (pip->type != vdp->leaf_type) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: unexpected page type %lu found in leaf chain (expected %lu)",
+ (u_long)pip->pgno, (u_long)pip->type,
+ (u_long)vdp->leaf_type));
+ isbad = 1;
+ }
+ if (pip->pgno != vdp->next_pgno) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: incorrect next_pgno %lu found in leaf chain (should be %lu)",
+ (u_long)vdp->prev_pgno,
+ (u_long)vdp->next_pgno, (u_long)pip->pgno));
+ isbad = 1;
+ }
+ if (pip->prev_pgno != vdp->prev_pgno) {
+bad_prev: EPRINT((dbp->dbenv,
+ "Page %lu: incorrect prev_pgno %lu found in leaf chain (should be %lu)",
+ (u_long)pip->pgno, (u_long)pip->prev_pgno,
+ (u_long)vdp->prev_pgno));
+ isbad = 1;
+ }
+ }
+ vdp->prev_pgno = pip->pgno;
+ vdp->next_pgno = pip->next_pgno;
+
+ /*
+ * Overflow pages are common to all three leaf types;
+ * traverse the child list, looking for overflows.
+ */
+ if ((ret = __db_vrfy_childcursor(vdp, &cc)) != 0)
+ goto err;
+ for (ret = __db_vrfy_ccset(cc, pgno, &child); ret == 0;
+ ret = __db_vrfy_ccnext(cc, &child))
+ if (child->type == V_OVERFLOW &&
+ (ret = __db_vrfy_ovfl_structure(dbp, vdp,
+ child->pgno, child->tlen,
+ flags | ST_OVFL_LEAF)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto done;
+ }
+
+ if ((ret = __db_vrfy_ccclose(cc)) != 0)
+ goto err;
+ cc = NULL;
+
+ /* Case 1 */
+ if (pip->type == P_LRECNO) {
+ if (!LF_ISSET(ST_IS_RECNO) &&
+ !(LF_ISSET(ST_DUPOK) && !LF_ISSET(ST_DUPSORT))) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: recno leaf page non-recno tree",
+ (u_long)pgno));
+ goto done;
+ }
+ goto leaf;
+ } else if (LF_ISSET(ST_IS_RECNO)) {
+ /*
+ * It's a non-recno leaf. Had better not be a recno
+ * subtree.
+ */
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: non-recno leaf page in recno tree",
+ (u_long)pgno));
+ goto done;
+ }
+
+ /* Case 2--no more work. */
+ if (pip->type == P_LDUP)
+ goto leaf;
+
+ /* Case 3 */
+
+ /* Check if we have any dups. */
+ if (F_ISSET(pip, VRFY_HAS_DUPS)) {
+ /* If dups aren't allowed in this btree, trouble. */
+ if (!LF_ISSET(ST_DUPOK)) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: duplicates in non-dup btree",
+ (u_long)pgno));
+ } else {
+ /*
+ * We correctly have dups. If any are off-page,
+ * traverse those btrees recursively.
+ */
+ if ((ret =
+ __db_vrfy_childcursor(vdp, &cc)) != 0)
+ goto err;
+ for (ret = __db_vrfy_ccset(cc, pgno, &child);
+ ret == 0;
+ ret = __db_vrfy_ccnext(cc, &child)) {
+ stflags = flags | ST_RECNUM | ST_DUPSET;
+ /* Skip any overflow entries. */
+ if (child->type == V_DUPLICATE) {
+ if ((ret = __db_vrfy_duptype(
+ dbp, vdp, child->pgno,
+ stflags)) != 0) {
+ isbad = 1;
+ /* Next child. */
+ continue;
+ }
+ if ((ret = __bam_vrfy_subtree(
+ dbp, vdp, child->pgno, NULL,
+ NULL, stflags | ST_TOPLEVEL,
+ NULL, NULL, NULL)) != 0) {
+ if (ret !=
+ DB_VERIFY_BAD)
+ goto err;
+ else
+ isbad = 1;
+ }
+ }
+ }
+
+ if ((ret = __db_vrfy_ccclose(cc)) != 0)
+ goto err;
+ cc = NULL;
+
+ /*
+ * If VRFY_DUPS_UNSORTED is set,
+ * ST_DUPSORT had better not be.
+ */
+ if (F_ISSET(pip, VRFY_DUPS_UNSORTED) &&
+ LF_ISSET(ST_DUPSORT)) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: unsorted duplicate set in sorted-dup database",
+ (u_long)pgno));
+ isbad = 1;
+ }
+ }
+ }
+ goto leaf;
+ case P_IBTREE:
+ case P_IRECNO:
+ /* We handle these below. */
+ break;
+ default:
+ /*
+ * If a P_IBTREE or P_IRECNO contains a reference to an
+ * invalid page, we'll wind up here; handle it gracefully.
+ * Note that the code at the "done" label assumes that the
+ * current page is a btree/recno one of some sort; this
+ * is not the case here, so we goto err.
+ *
+ * If the page is entirely zeroed, its pip->type will be a lie
+ * (we assumed it was a hash page, as they're allowed to be
+ * zeroed); handle this case specially.
+ */
+ if (F_ISSET(pip, VRFY_IS_ALLZEROES))
+ ZEROPG_ERR_PRINT(dbp->dbenv,
+ pgno, "btree or recno page");
+ else
+ EPRINT((dbp->dbenv,
+ "Page %lu: btree or recno page is of inappropriate type %lu",
+ (u_long)pgno, (u_long)pip->type));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ /*
+ * Cases 4 & 5: This is a btree or recno internal page. For each child,
+ * recurse, keeping a running count of nrecs and making sure the level
+ * is always reasonable.
+ */
+ if ((ret = __db_vrfy_childcursor(vdp, &cc)) != 0)
+ goto err;
+ for (ret = __db_vrfy_ccset(cc, pgno, &child); ret == 0;
+ ret = __db_vrfy_ccnext(cc, &child))
+ if (child->type == V_RECNO) {
+ if (pip->type != P_IRECNO) {
+ TYPE_ERR_PRINT(dbp->dbenv, "__bam_vrfy_subtree",
+ pgno, pip->type);
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+ if ((ret = __bam_vrfy_subtree(dbp, vdp, child->pgno,
+ NULL, NULL, flags, &child_level, &child_nrecs,
+ &child_relen)) != 0) {
+ if (ret != DB_VERIFY_BAD)
+ goto done;
+ else
+ isbad = 1;
+ }
+
+ if (LF_ISSET(ST_RELEN)) {
+ if (relen == 0)
+ relen = child_relen;
+ /*
+ * child_relen may be zero if the child subtree
+ * is empty.
+ */
+ else if (child_relen > 0 &&
+ relen != child_relen) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: recno page returned bad re_len %lu",
+ (u_long)child->pgno,
+ (u_long)child_relen));
+ }
+ if (relenp)
+ *relenp = relen;
+ }
+ if (LF_ISSET(ST_RECNUM))
+ nrecs += child_nrecs;
+ if (level != child_level + 1) {
+ isbad = 1;
+ EPRINT((dbp->dbenv, "Page %lu: recno level incorrect: got %lu, expected %lu",
+ (u_long)child->pgno, (u_long)child_level,
+ (u_long)(level - 1)));
+ }
+ } else if (child->type == V_OVERFLOW &&
+ (ret = __db_vrfy_ovfl_structure(dbp, vdp,
+ child->pgno, child->tlen, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto done;
+ }
+
+ if ((ret = __db_vrfy_ccclose(cc)) != 0)
+ goto err;
+ cc = NULL;
+
+ /* We're done with case 4. */
+ if (pip->type == P_IRECNO)
+ goto done;
+
+ /*
+ * Case 5. Btree internal pages.
+ * As described above, we need to iterate through all the
+ * items on the page and make sure that our children sort appropriately
+ * with respect to them.
+ *
+ * For each entry, li will be the "left-hand" key for the entry
+ * itself, which must sort lower than all entries on its child;
+ * ri will be the key to its right, which must sort greater.
+ */
+ if (h == NULL && (ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ goto err;
+ for (i = 0; i < pip->entries; i += O_INDX) {
+ li = GET_BINTERNAL(dbp, h, i);
+ ri = (i + O_INDX < pip->entries) ?
+ GET_BINTERNAL(dbp, h, i + O_INDX) : NULL;
+
+ /*
+ * The leftmost key is forcibly sorted less than all entries,
+ * so don't bother passing it.
+ */
+ if ((ret = __bam_vrfy_subtree(dbp, vdp, li->pgno,
+ i == 0 ? NULL : li, ri, flags, &child_level,
+ &child_nrecs, NULL)) != 0) {
+ if (ret != DB_VERIFY_BAD)
+ goto done;
+ else
+ isbad = 1;
+ }
+
+ if (LF_ISSET(ST_RECNUM)) {
+ /*
+ * Keep a running tally on the actual record count so
+ * we can return it to our parent (if we have one) or
+ * compare it to the NRECS field if we're a root page.
+ */
+ nrecs += child_nrecs;
+
+ /*
+ * Make sure the actual record count of the child
+ * is equal to the value in the BINTERNAL structure.
+ */
+ if (li->nrecs != child_nrecs) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: item %lu has incorrect record count of %lu, should be %lu",
+ (u_long)pgno, (u_long)i, (u_long)li->nrecs,
+ (u_long)child_nrecs));
+ }
+ }
+
+ if (level != child_level + 1) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: Btree level incorrect: got %lu, expected %lu",
+ (u_long)li->pgno,
+ (u_long)child_level, (u_long)(level - 1)));
+ }
+ }
+
+ if (0) {
+leaf: level = LEAFLEVEL;
+ if (LF_ISSET(ST_RECNUM))
+ nrecs = pip->rec_cnt;
+
+ /* XXX
+ * We should verify that the record count on a leaf page
+ * is the sum of the number of keys and the number of
+ * records in its off-page dups. This requires looking
+ * at the page again, however, and it may all be changing
+ * soon, so for now we don't bother.
+ */
+
+ if (LF_ISSET(ST_RELEN) && relenp)
+ *relenp = pip->re_len;
+ }
+done: if (F_ISSET(pip, VRFY_INCOMPLETE) && isbad == 0 && ret == 0) {
+ /*
+ * During the page-by-page pass, item order verification was
+ * not finished due to the presence of overflow items. If
+ * isbad == 0, though, it's now safe to do so, as we've
+ * traversed any child overflow pages. Do it.
+ */
+ if (h == NULL && (ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ goto err;
+ if ((ret = __bam_vrfy_itemorder(dbp,
+ vdp, h, pgno, 0, 1, 0, flags)) != 0)
+ goto err;
+ F_CLR(pip, VRFY_INCOMPLETE);
+ }
+
+ /*
+ * It's possible to get to this point with a page that has no
+ * items, but without having detected any sort of failure yet.
+ * Having zero items is legal if it's a leaf--it may be the
+ * root page in an empty tree, or the tree may have been
+ * modified with the DB_REVSPLITOFF flag set (there's no way
+ * to tell from what's on disk). For an internal page,
+ * though, having no items is a problem (all internal pages
+ * must have children).
+ */
+ if (isbad == 0 && ret == 0) {
+ if (h == NULL && (ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ goto err;
+
+ if (NUM_ENT(h) == 0 && ISINTERNAL(h)) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: internal page is empty and should not be",
+ (u_long)pgno));
+ isbad = 1;
+ goto err;
+ }
+ }
+
+ /*
+ * Our parent has sent us BINTERNAL pointers to parent records
+ * so that we can verify our place with respect to them. If it's
+ * appropriate--we have a default sort function--verify this.
+ */
+ if (isbad == 0 && ret == 0 && !LF_ISSET(DB_NOORDERCHK) && lp != NULL) {
+ if (h == NULL && (ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ goto err;
+
+ /*
+ * __bam_vrfy_treeorder needs to know what comparison function
+ * to use. If ST_DUPSET is set, we're in a duplicate tree
+ * and we use the duplicate comparison function; otherwise,
+ * use the btree one. If unset, use the default, of course.
+ */
+ func = LF_ISSET(ST_DUPSET) ? dbp->dup_compare :
+ ((BTREE *)dbp->bt_internal)->bt_compare;
+ if (func == NULL)
+ func = __bam_defcmp;
+
+ if ((ret = __bam_vrfy_treeorder(
+ dbp, pgno, h, lp, rp, func, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+ }
+
+ /*
+ * This is guaranteed to succeed for leaf pages, but no harm done.
+ *
+ * Internal pages below the top level do not store their own
+ * record numbers, so we skip them.
+ */
+ if (LF_ISSET(ST_RECNUM) && nrecs != pip->rec_cnt && toplevel) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: bad record count: has %lu records, claims %lu",
+ (u_long)pgno, (u_long)nrecs, (u_long)pip->rec_cnt));
+ }
+
+ if (levelp)
+ *levelp = level;
+ if (nrecsp)
+ *nrecsp = nrecs;
+
+ pgset = vdp->pgset;
+ if ((ret = __db_vrfy_pgset_get(pgset, pgno, &p)) != 0)
+ goto err;
+ if (p != 0) {
+ isbad = 1;
+ EPRINT((dbp->dbenv, "Page %lu: linked twice", (u_long)pgno));
+ } else if ((ret = __db_vrfy_pgset_inc(pgset, pgno)) != 0)
+ goto err;
+
+ if (toplevel)
+ /*
+ * The last page's next_pgno in the leaf chain should have been
+ * PGNO_INVALID.
+ */
+ if (vdp->next_pgno != PGNO_INVALID) {
+ EPRINT((dbp->dbenv, "Page %lu: unterminated leaf chain",
+ (u_long)vdp->prev_pgno));
+ isbad = 1;
+ }
+
+err: if (toplevel) {
+ /* Restore our caller's settings. */
+ vdp->next_pgno = next_pgno;
+ vdp->prev_pgno = prev_pgno;
+ vdp->leaf_type = leaf_type;
+ }
+
+ if (h != NULL && (t_ret = mpf->put(mpf, h, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ if (cc != NULL && ((t_ret = __db_vrfy_ccclose(cc)) != 0) && ret == 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __bam_vrfy_treeorder --
+ * Verify that the lowest key on a page sorts greater than the
+ * BINTERNAL which points to it (lp), and the highest key
+ * sorts less than the BINTERNAL above that (rp).
+ *
+ * If lp is NULL, this means that it was the leftmost key on the
+ * parent, which (regardless of sort function) sorts less than
+ * all keys. No need to check it.
+ *
+ * If rp is NULL, lp was the highest key on the parent, so there's
+ * no higher key we must sort less than.
+ */
+static int
+__bam_vrfy_treeorder(dbp, pgno, h, lp, rp, func, flags)
+ DB *dbp;
+ db_pgno_t pgno;
+ PAGE *h;
+ BINTERNAL *lp, *rp;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+ u_int32_t flags;
+{
+ BOVERFLOW *bo;
+ DBT dbt;
+ db_indx_t last;
+ int ret, cmp;
+
+ memset(&dbt, 0, sizeof(DBT));
+ F_SET(&dbt, DB_DBT_MALLOC);
+ ret = 0;
+
+ /*
+ * Empty pages are sorted correctly by definition. We check
+ * to see whether they ought to be empty elsewhere; leaf
+ * pages legally may be.
+ */
+ if (NUM_ENT(h) == 0)
+ return (0);
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ case P_LDUP:
+ last = NUM_ENT(h) - O_INDX;
+ break;
+ case P_LBTREE:
+ last = NUM_ENT(h) - P_INDX;
+ break;
+ default:
+ TYPE_ERR_PRINT(dbp->dbenv,
+ "__bam_vrfy_treeorder", pgno, TYPE(h));
+ DB_ASSERT(0);
+ return (EINVAL);
+ }
+
+ /*
+ * The key on page h, the child page, is more likely to be
+ * an overflow page, so we pass its offset, rather than lp/rp's,
+ * into __bam_cmp. This will take advantage of __db_moff.
+ */
+
+ /*
+ * Skip first-item check if we're an internal page--the first
+ * entry on an internal page is treated specially by __bam_cmp,
+ * so what's on the page shouldn't matter. (Plus, since we're passing
+ * our page and item 0 as to __bam_cmp, we'll sort before our
+ * parent and falsely report a failure.)
+ */
+ if (lp != NULL && TYPE(h) != P_IBTREE) {
+ if (lp->type == B_KEYDATA) {
+ dbt.data = lp->data;
+ dbt.size = lp->len;
+ } else if (lp->type == B_OVERFLOW) {
+ bo = (BOVERFLOW *)lp->data;
+ if ((ret = __db_goff(dbp, &dbt, bo->tlen, bo->pgno,
+ NULL, NULL)) != 0)
+ return (ret);
+ } else {
+ DB_ASSERT(0);
+ EPRINT((dbp->dbenv,
+ "Page %lu: unknown type for internal record",
+ (u_long)PGNO(h)));
+ return (EINVAL);
+ }
+
+ /* On error, fall through, free if neeeded, and return. */
+ if ((ret = __bam_cmp(dbp, &dbt, h, 0, func, &cmp)) == 0) {
+ if (cmp > 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: first item on page sorted greater than parent entry",
+ (u_long)PGNO(h)));
+ ret = DB_VERIFY_BAD;
+ }
+ } else
+ EPRINT((dbp->dbenv,
+ "Page %lu: first item on page had comparison error",
+ (u_long)PGNO(h)));
+
+ if (dbt.data != lp->data)
+ __os_ufree(dbp->dbenv, dbt.data);
+ if (ret != 0)
+ return (ret);
+ }
+
+ if (rp != NULL) {
+ if (rp->type == B_KEYDATA) {
+ dbt.data = rp->data;
+ dbt.size = rp->len;
+ } else if (rp->type == B_OVERFLOW) {
+ bo = (BOVERFLOW *)rp->data;
+ if ((ret = __db_goff(dbp, &dbt, bo->tlen, bo->pgno,
+ NULL, NULL)) != 0)
+ return (ret);
+ } else {
+ DB_ASSERT(0);
+ EPRINT((dbp->dbenv,
+ "Page %lu: unknown type for internal record",
+ (u_long)PGNO(h)));
+ return (EINVAL);
+ }
+
+ /* On error, fall through, free if neeeded, and return. */
+ if ((ret = __bam_cmp(dbp, &dbt, h, last, func, &cmp)) == 0) {
+ if (cmp < 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: last item on page sorted greater than parent entry",
+ (u_long)PGNO(h)));
+ ret = DB_VERIFY_BAD;
+ }
+ } else
+ EPRINT((dbp->dbenv,
+ "Page %lu: last item on page had comparison error",
+ (u_long)PGNO(h)));
+
+ if (dbt.data != rp->data)
+ __os_ufree(dbp->dbenv, dbt.data);
+ }
+
+ return (ret);
+}
+
+/*
+ * __bam_salvage --
+ * Safely dump out anything that looks like a key on an alleged
+ * btree leaf page.
+ *
+ * PUBLIC: int __bam_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t,
+ * PUBLIC: PAGE *, void *, int (*)(void *, const void *), DBT *,
+ * PUBLIC: u_int32_t));
+ */
+int
+__bam_salvage(dbp, vdp, pgno, pgtype, h, handle, callback, key, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ u_int32_t pgtype;
+ PAGE *h;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ DBT *key;
+ u_int32_t flags;
+{
+ DBT dbt, unkdbt;
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+ db_indx_t i, beg, end, *inp;
+ u_int32_t himark;
+ u_int8_t *pgmap;
+ void *ovflbuf;
+ int t_ret, ret, err_ret;
+
+ /* Shut up lint. */
+ COMPQUIET(end, 0);
+
+ ovflbuf = pgmap = NULL;
+ err_ret = ret = 0;
+ inp = P_INP(dbp, h);
+
+ memset(&dbt, 0, sizeof(DBT));
+ dbt.flags = DB_DBT_REALLOC;
+
+ memset(&unkdbt, 0, sizeof(DBT));
+ unkdbt.size = (u_int32_t)(strlen("UNKNOWN") + 1);
+ unkdbt.data = "UNKNOWN";
+
+ /*
+ * Allocate a buffer for overflow items. Start at one page;
+ * __db_safe_goff will realloc as needed.
+ */
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &ovflbuf)) != 0)
+ return (ret);
+
+ if (LF_ISSET(DB_AGGRESSIVE)) {
+ if ((ret =
+ __os_malloc(dbp->dbenv, dbp->pgsize, &pgmap)) != 0)
+ goto err;
+ memset(pgmap, 0, dbp->pgsize);
+ }
+
+ /*
+ * Loop through the inp array, spitting out key/data pairs.
+ *
+ * If we're salvaging normally, loop from 0 through NUM_ENT(h).
+ * If we're being aggressive, loop until we hit the end of the page--
+ * NUM_ENT() may be bogus.
+ */
+ himark = dbp->pgsize;
+ for (i = 0;; i += O_INDX) {
+ /* If we're not aggressive, break when we hit NUM_ENT(h). */
+ if (!LF_ISSET(DB_AGGRESSIVE) && i >= NUM_ENT(h))
+ break;
+
+ /* Verify the current item. */
+ ret = __db_vrfy_inpitem(dbp,
+ h, pgno, i, 1, flags, &himark, NULL);
+ /* If this returned a fatality, it's time to break. */
+ if (ret == DB_VERIFY_FATAL) {
+ /*
+ * Don't return DB_VERIFY_FATAL; it's private
+ * and means only that we can't go on with this
+ * page, not with the whole database. It's
+ * not even an error if we've run into it
+ * after NUM_ENT(h).
+ */
+ ret = (i < NUM_ENT(h)) ? DB_VERIFY_BAD : 0;
+ break;
+ }
+
+ /*
+ * If this returned 0, it's safe to print or (carefully)
+ * try to fetch.
+ */
+ if (ret == 0) {
+ /*
+ * We only want to print deleted items if
+ * DB_AGGRESSIVE is set.
+ */
+ bk = GET_BKEYDATA(dbp, h, i);
+ if (!LF_ISSET(DB_AGGRESSIVE) && B_DISSET(bk->type))
+ continue;
+
+ /*
+ * We're going to go try to print the next item. If
+ * key is non-NULL, we're a dup page, so we've got to
+ * print the key first, unless SA_SKIPFIRSTKEY is set
+ * and we're on the first entry.
+ */
+ if (key != NULL &&
+ (i != 0 || !LF_ISSET(SA_SKIPFIRSTKEY)))
+ if ((ret = __db_prdbt(key,
+ 0, " ", handle, callback, 0, vdp)) != 0)
+ err_ret = ret;
+
+ beg = inp[i];
+ switch (B_TYPE(bk->type)) {
+ case B_DUPLICATE:
+ end = beg + BOVERFLOW_SIZE - 1;
+ /*
+ * If we're not on a normal btree leaf page,
+ * there shouldn't be off-page
+ * dup sets. Something's confused; just
+ * drop it, and the code to pick up unlinked
+ * offpage dup sets will print it out
+ * with key "UNKNOWN" later.
+ */
+ if (pgtype != P_LBTREE)
+ break;
+
+ bo = (BOVERFLOW *)bk;
+
+ /*
+ * If the page number is unreasonable, or
+ * if this is supposed to be a key item,
+ * just spit out "UNKNOWN"--the best we
+ * can do is run into the data items in the
+ * unlinked offpage dup pass.
+ */
+ if (!IS_VALID_PGNO(bo->pgno) ||
+ (i % P_INDX == 0)) {
+ /* Not much to do on failure. */
+ if ((ret = __db_prdbt(&unkdbt, 0, " ",
+ handle, callback, 0, vdp)) != 0)
+ err_ret = ret;
+ break;
+ }
+
+ if ((ret = __db_salvage_duptree(dbp,
+ vdp, bo->pgno, &dbt, handle, callback,
+ flags | SA_SKIPFIRSTKEY)) != 0)
+ err_ret = ret;
+
+ break;
+ case B_KEYDATA:
+ end =
+ ALIGN(beg + bk->len, sizeof(u_int32_t)) - 1;
+ dbt.data = bk->data;
+ dbt.size = bk->len;
+ if ((ret = __db_prdbt(&dbt,
+ 0, " ", handle, callback, 0, vdp)) != 0)
+ err_ret = ret;
+ break;
+ case B_OVERFLOW:
+ end = beg + BOVERFLOW_SIZE - 1;
+ bo = (BOVERFLOW *)bk;
+ if ((ret = __db_safe_goff(dbp, vdp,
+ bo->pgno, &dbt, &ovflbuf, flags)) != 0) {
+ err_ret = ret;
+ /* We care about err_ret more. */
+ (void)__db_prdbt(&unkdbt, 0, " ",
+ handle, callback, 0, vdp);
+ break;
+ }
+ if ((ret = __db_prdbt(&dbt,
+ 0, " ", handle, callback, 0, vdp)) != 0)
+ err_ret = ret;
+ break;
+ default:
+ /*
+ * We should never get here; __db_vrfy_inpitem
+ * should not be returning 0 if bk->type
+ * is unrecognizable.
+ */
+ DB_ASSERT(0);
+ return (EINVAL);
+ }
+
+ /*
+ * If we're being aggressive, mark the beginning
+ * and end of the item; we'll come back and print
+ * whatever "junk" is in the gaps in case we had
+ * any bogus inp elements and thereby missed stuff.
+ */
+ if (LF_ISSET(DB_AGGRESSIVE)) {
+ pgmap[beg] = ITEM_BEGIN;
+ pgmap[end] = ITEM_END;
+ }
+ }
+ }
+
+ /*
+ * If i is odd and this is a btree leaf, we've printed out a key but not
+ * a datum; fix this imbalance by printing an "UNKNOWN".
+ */
+ if (pgtype == P_LBTREE && (i % P_INDX == 1) && ((ret =
+ __db_prdbt(&unkdbt, 0, " ", handle, callback, 0, vdp)) != 0))
+ err_ret = ret;
+
+err: if (pgmap != NULL)
+ __os_free(dbp->dbenv, pgmap);
+ __os_free(dbp->dbenv, ovflbuf);
+
+ /* Mark this page as done. */
+ if ((t_ret = __db_salvage_markdone(vdp, pgno)) != 0)
+ return (t_ret);
+
+ return ((err_ret != 0) ? err_ret : ret);
+}
+
+/*
+ * __bam_salvage_walkdupint --
+ * Walk a known-good btree or recno internal page which is part of
+ * a dup tree, calling __db_salvage_duptree on each child page.
+ *
+ * PUBLIC: int __bam_salvage_walkdupint __P((DB *, VRFY_DBINFO *, PAGE *,
+ * PUBLIC: DBT *, void *, int (*)(void *, const void *), u_int32_t));
+ */
+int
+__bam_salvage_walkdupint(dbp, vdp, h, key, handle, callback, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ DBT *key;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ RINTERNAL *ri;
+ BINTERNAL *bi;
+ int ret, t_ret;
+ db_indx_t i;
+
+ ret = 0;
+ for (i = 0; i < NUM_ENT(h); i++) {
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ bi = GET_BINTERNAL(dbp, h, i);
+ if ((t_ret = __db_salvage_duptree(dbp,
+ vdp, bi->pgno, key, handle, callback, flags)) != 0)
+ ret = t_ret;
+ break;
+ case P_IRECNO:
+ ri = GET_RINTERNAL(dbp, h, i);
+ if ((t_ret = __db_salvage_duptree(dbp,
+ vdp, ri->pgno, key, handle, callback, flags)) != 0)
+ ret = t_ret;
+ break;
+ default:
+ __db_err(dbp->dbenv,
+ "__bam_salvage_walkdupint called on non-int. page");
+ DB_ASSERT(0);
+ return (EINVAL);
+ }
+ /* Pass SA_SKIPFIRSTKEY, if set, on to the 0th child only. */
+ flags &= ~LF_ISSET(SA_SKIPFIRSTKEY);
+ }
+
+ return (ret);
+}
+
+/*
+ * __bam_meta2pgset --
+ * Given a known-good meta page, return in pgsetp a 0-terminated list of
+ * db_pgno_t's corresponding to the pages in the btree.
+ *
+ * We do this by a somewhat sleazy method, to avoid having to traverse the
+ * btree structure neatly: we walk down the left side to the very
+ * first leaf page, then we mark all the pages in the chain of
+ * NEXT_PGNOs (being wary of cycles and invalid ones), then we
+ * consolidate our scratch array into a nice list, and return. This
+ * avoids the memory management hassles of recursion and the
+ * trouble of walking internal pages--they just don't matter, except
+ * for the left branch.
+ *
+ * PUBLIC: int __bam_meta2pgset __P((DB *, VRFY_DBINFO *, BTMETA *,
+ * PUBLIC: u_int32_t, DB *));
+ */
+int
+__bam_meta2pgset(dbp, vdp, btmeta, flags, pgset)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ BTMETA *btmeta;
+ u_int32_t flags;
+ DB *pgset;
+{
+ BINTERNAL *bi;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ RINTERNAL *ri;
+ db_pgno_t current, p;
+ int err_ret, ret;
+
+ mpf = dbp->mpf;
+ h = NULL;
+ ret = err_ret = 0;
+ DB_ASSERT(pgset != NULL);
+ for (current = btmeta->root;;) {
+ if (!IS_VALID_PGNO(current) || current == PGNO(btmeta)) {
+ err_ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ if ((ret = mpf->get(mpf, &current, 0, &h)) != 0) {
+ err_ret = ret;
+ goto err;
+ }
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ case P_IRECNO:
+ if ((ret = __bam_vrfy(dbp,
+ vdp, h, current, flags | DB_NOORDERCHK)) != 0) {
+ err_ret = ret;
+ goto err;
+ }
+ if (TYPE(h) == P_IBTREE) {
+ bi = GET_BINTERNAL(dbp, h, 0);
+ current = bi->pgno;
+ } else { /* P_IRECNO */
+ ri = GET_RINTERNAL(dbp, h, 0);
+ current = ri->pgno;
+ }
+ break;
+ case P_LBTREE:
+ case P_LRECNO:
+ goto traverse;
+ default:
+ err_ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ err_ret = ret;
+ h = NULL;
+ }
+
+ /*
+ * At this point, current is the pgno of leaf page h, the 0th in the
+ * tree we're concerned with.
+ */
+traverse:
+ while (IS_VALID_PGNO(current) && current != PGNO_INVALID) {
+ if (h == NULL && (ret = mpf->get(mpf, &current, 0, &h)) != 0) {
+ err_ret = ret;
+ break;
+ }
+
+ if ((ret = __db_vrfy_pgset_get(pgset, current, (int *)&p)) != 0)
+ goto err;
+
+ if (p != 0) {
+ /*
+ * We've found a cycle. Return success anyway--
+ * our caller may as well use however much of
+ * the pgset we've come up with.
+ */
+ break;
+ }
+ if ((ret = __db_vrfy_pgset_inc(pgset, current)) != 0)
+ goto err;
+
+ current = NEXT_PGNO(h);
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ err_ret = ret;
+ h = NULL;
+ }
+
+err: if (h != NULL)
+ (void)mpf->put(mpf, h, 0);
+
+ return (ret == 0 ? err_ret : ret);
+}
+
+/*
+ * __bam_safe_getdata --
+ *
+ * Utility function for __bam_vrfy_itemorder. Safely gets the datum at
+ * index i, page h, and sticks it in DBT dbt. If ovflok is 1 and i's an
+ * overflow item, we do a safe_goff to get the item and signal that we need
+ * to free dbt->data; if ovflok is 0, we leaves the DBT zeroed.
+ */
+static int
+__bam_safe_getdata(dbp, h, i, ovflok, dbt, freedbtp)
+ DB *dbp;
+ PAGE *h;
+ u_int32_t i;
+ int ovflok;
+ DBT *dbt;
+ int *freedbtp;
+{
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+
+ memset(dbt, 0, sizeof(DBT));
+ *freedbtp = 0;
+
+ bk = GET_BKEYDATA(dbp, h, i);
+ if (B_TYPE(bk->type) == B_OVERFLOW) {
+ if (!ovflok)
+ return (0);
+
+ bo = (BOVERFLOW *)bk;
+ F_SET(dbt, DB_DBT_MALLOC);
+
+ *freedbtp = 1;
+ return (__db_goff(dbp, dbt, bo->tlen, bo->pgno, NULL, NULL));
+ } else {
+ dbt->data = bk->data;
+ dbt->size = bk->len;
+ }
+
+ return (0);
+}
diff --git a/libdb/btree/btree.src b/libdb/btree/btree.src
new file mode 100644
index 0000000..514c6e6
--- /dev/null
+++ b/libdb/btree/btree.src
@@ -0,0 +1,208 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+PREFIX __bam
+DBPRIVATE
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/btree.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/txn.h"
+INCLUDE
+
+/*
+ * NOTE: pg_alloc and pg_free have been moved to db.src, where they belong.
+ */
+
+/*
+ * BTREE-split: used to log a page split.
+ *
+ * left: the page number for the low-order contents.
+ * llsn: the left page's original LSN.
+ * right: the page number for the high-order contents.
+ * rlsn: the right page's original LSN.
+ * indx: the number of entries that went to the left page.
+ * npgno: the next page number
+ * nlsn: the next page's original LSN (or 0 if no next page).
+ * root_pgno: the root page number
+ * pg: the split page's contents before the split.
+ * opflags: SPL_NRECS: if splitting a tree that maintains a record count.
+ */
+BEGIN split 62
+DB fileid int32_t ld
+WRLOCK left db_pgno_t lu
+POINTER llsn DB_LSN * lu
+WRLOCK right db_pgno_t lu
+POINTER rlsn DB_LSN * lu
+ARG indx u_int32_t lu
+ARG npgno db_pgno_t lu
+POINTER nlsn DB_LSN * lu
+WRLOCKNZ root_pgno db_pgno_t lu
+PGDBT pg DBT s
+ARG opflags u_int32_t lu
+END
+
+/*
+ * BTREE-rsplit: used to log a reverse-split
+ *
+ * pgno: the page number of the page copied over the root.
+ * pgdbt: the page being copied on the root page.
+ * root_pgno: the root page number.
+ * nrec: the tree's record count.
+ * rootent: last entry on the root page.
+ * rootlsn: the root page's original lsn.
+ */
+BEGIN rsplit 63
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+PGDBT pgdbt DBT s
+WRLOCK root_pgno db_pgno_t lu
+ARG nrec db_pgno_t lu
+DBT rootent DBT s
+POINTER rootlsn DB_LSN * lu
+END
+
+/*
+ * BTREE-adj: used to log the adjustment of an index.
+ *
+ * pgno: the page modified.
+ * lsn: the page's original lsn.
+ * indx: the index adjusted.
+ * indx_copy: the index to copy if inserting.
+ * is_insert: 0 if a delete, 1 if an insert.
+ */
+BEGIN adj 55
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+POINTER lsn DB_LSN * lu
+ARG indx u_int32_t lu
+ARG indx_copy u_int32_t lu
+ARG is_insert u_int32_t lu
+END
+
+/*
+ * BTREE-cadjust: used to adjust the count change in an internal page.
+ *
+ * pgno: the page modified.
+ * lsn: the page's original lsn.
+ * indx: the index to be adjusted.
+ * adjust: the signed adjustment.
+ * opflags: CAD_UPDATEROOT: if root page count was adjusted.
+ */
+BEGIN cadjust 56
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+POINTER lsn DB_LSN * lu
+ARG indx u_int32_t lu
+ARG adjust int32_t ld
+ARG opflags u_int32_t lu
+END
+
+/*
+ * BTREE-cdel: used to log the intent-to-delete of a cursor record.
+ *
+ * pgno: the page modified.
+ * lsn: the page's original lsn.
+ * indx: the index to be deleted.
+ */
+BEGIN cdel 57
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+POINTER lsn DB_LSN * lu
+ARG indx u_int32_t lu
+END
+
+/*
+ * BTREE-repl: used to log the replacement of an item.
+ *
+ * pgno: the page modified.
+ * lsn: the page's original lsn.
+ * orig: the original data.
+ * new: the replacement data.
+ * duplicate: the prefix of the replacement that matches the original.
+ */
+BEGIN repl 58
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+POINTER lsn DB_LSN * lu
+ARG indx u_int32_t lu
+ARG isdeleted u_int32_t lu
+DBT orig DBT s
+DBT repl DBT s
+ARG prefix u_int32_t lu
+ARG suffix u_int32_t lu
+END
+
+/*
+ * BTREE-root: log the assignment of a root btree page.
+ */
+BEGIN root 59
+DB fileid int32_t ld
+WRLOCK meta_pgno db_pgno_t lu
+WRLOCK root_pgno db_pgno_t lu
+POINTER meta_lsn DB_LSN * lu
+END
+
+/*
+ * BTREE-curadj: undo cursor adjustments on txn abort.
+ * Should only be processed during DB_TXN_ABORT.
+ * NOTE: the first_indx field gets used to hold
+ * signed index adjustment in one case.
+ * care should be taken if its size is changed.
+ */
+BEGIN curadj 64
+/* Fileid of db affected. */
+DB fileid int32_t ld
+/* Which adjustment. */
+ARG mode db_ca_mode ld
+/* Page entry is from. */
+ARG from_pgno db_pgno_t lu
+/* Page entry went to. */
+ARG to_pgno db_pgno_t lu
+/* Left page of root split. */
+ARG left_pgno db_pgno_t lu
+/* First index of dup set. Also used as adjustment. */
+ARG first_indx u_int32_t lu
+/* Index entry is from. */
+ARG from_indx u_int32_t lu
+/* Index where entry went. */
+ARG to_indx u_int32_t lu
+END
+
+/*
+ * BTREE-rcuradj: undo cursor adjustments on txn abort in
+ * renumbering recno trees.
+ * Should only be processed during DB_TXN_ABORT.
+ */
+BEGIN rcuradj 65
+/* Fileid of db affected. */
+DB fileid int32_t ld
+/* Which adjustment. */
+ARG mode ca_recno_arg ld
+/* Root page number. */
+ARG root db_pgno_t ld
+/* Recno of the adjustment. */
+ARG recno db_recno_t ld
+/* Order number of the adjustment. */
+ARG order u_int32_t ld
+END
diff --git a/libdb/btree/btree_auto.c b/libdb/btree/btree_auto.c
new file mode 100644
index 0000000..e94aca2
--- /dev/null
+++ b/libdb/btree/btree_auto.c
@@ -0,0 +1,2649 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc/db_am.h"
+#include "dbinc/btree.h"
+#include "dbinc/log.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+/*
+ * PUBLIC: int __bam_split_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, u_int32_t,
+ * PUBLIC: db_pgno_t, DB_LSN *, db_pgno_t, const DBT *, u_int32_t));
+ */
+int
+__bam_split_log(dbp, txnid, ret_lsnp, flags, left, llsn, right, rlsn, indx,
+ npgno, nlsn, root_pgno, pg, opflags)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ db_pgno_t left;
+ DB_LSN * llsn;
+ db_pgno_t right;
+ DB_LSN * rlsn;
+ u_int32_t indx;
+ db_pgno_t npgno;
+ DB_LSN * nlsn;
+ db_pgno_t root_pgno;
+ const DBT *pg;
+ u_int32_t opflags;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___bam_split;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(*llsn)
+ + sizeof(u_int32_t)
+ + sizeof(*rlsn)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(*nlsn)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t) + (pg == NULL ? 0 : pg->size)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)left;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (llsn != NULL)
+ memcpy(bp, llsn, sizeof(*llsn));
+ else
+ memset(bp, 0, sizeof(*llsn));
+ bp += sizeof(*llsn);
+
+ uinttmp = (u_int32_t)right;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (rlsn != NULL)
+ memcpy(bp, rlsn, sizeof(*rlsn));
+ else
+ memset(bp, 0, sizeof(*rlsn));
+ bp += sizeof(*rlsn);
+
+ uinttmp = (u_int32_t)indx;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)npgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (nlsn != NULL)
+ memcpy(bp, nlsn, sizeof(*nlsn));
+ else
+ memset(bp, 0, sizeof(*nlsn));
+ bp += sizeof(*nlsn);
+
+ uinttmp = (u_int32_t)root_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (pg == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &pg->size, sizeof(pg->size));
+ bp += sizeof(pg->size);
+ memcpy(bp, pg->data, pg->size);
+ bp += pg->size;
+ }
+
+ uinttmp = (u_int32_t)opflags;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__bam_split_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __bam_split_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__bam_split_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __bam_split_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __bam_split_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 3)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->left;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->right;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+ if (argp->root_pgno != PGNO_INVALID) {
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->root_pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+ }
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __bam_split_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__bam_split_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_split_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_split_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__bam_split: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tleft: %lu\n", (u_long)argp->left);
+ (void)printf("\tllsn: [%lu][%lu]\n",
+ (u_long)argp->llsn.file, (u_long)argp->llsn.offset);
+ (void)printf("\tright: %lu\n", (u_long)argp->right);
+ (void)printf("\trlsn: [%lu][%lu]\n",
+ (u_long)argp->rlsn.file, (u_long)argp->rlsn.offset);
+ (void)printf("\tindx: %lu\n", (u_long)argp->indx);
+ (void)printf("\tnpgno: %lu\n", (u_long)argp->npgno);
+ (void)printf("\tnlsn: [%lu][%lu]\n",
+ (u_long)argp->nlsn.file, (u_long)argp->nlsn.offset);
+ (void)printf("\troot_pgno: %lu\n", (u_long)argp->root_pgno);
+ (void)printf("\tpg: ");
+ for (i = 0; i < argp->pg.size; i++) {
+ ch = ((u_int8_t *)argp->pg.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\topflags: %lu\n", (u_long)argp->opflags);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_split_read __P((DB_ENV *, void *, __bam_split_args **));
+ */
+int
+__bam_split_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_split_args **argpp;
+{
+ __bam_split_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__bam_split_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->left = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->llsn, bp, sizeof(argp->llsn));
+ bp += sizeof(argp->llsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->right = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->rlsn, bp, sizeof(argp->rlsn));
+ bp += sizeof(argp->rlsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->indx = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->npgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->nlsn, bp, sizeof(argp->nlsn));
+ bp += sizeof(argp->nlsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->root_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memset(&argp->pg, 0, sizeof(argp->pg));
+ memcpy(&argp->pg.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->pg.data = bp;
+ bp += argp->pg.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->opflags = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_rsplit_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, db_pgno_t, const DBT *, db_pgno_t, db_pgno_t,
+ * PUBLIC: const DBT *, DB_LSN *));
+ */
+int
+__bam_rsplit_log(dbp, txnid, ret_lsnp, flags, pgno, pgdbt, root_pgno, nrec, rootent,
+ rootlsn)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ db_pgno_t pgno;
+ const DBT *pgdbt;
+ db_pgno_t root_pgno;
+ db_pgno_t nrec;
+ const DBT *rootent;
+ DB_LSN * rootlsn;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___bam_rsplit;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t) + (pgdbt == NULL ? 0 : pgdbt->size)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t) + (rootent == NULL ? 0 : rootent->size)
+ + sizeof(*rootlsn);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (pgdbt == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &pgdbt->size, sizeof(pgdbt->size));
+ bp += sizeof(pgdbt->size);
+ memcpy(bp, pgdbt->data, pgdbt->size);
+ bp += pgdbt->size;
+ }
+
+ uinttmp = (u_int32_t)root_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)nrec;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (rootent == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &rootent->size, sizeof(rootent->size));
+ bp += sizeof(rootent->size);
+ memcpy(bp, rootent->data, rootent->size);
+ bp += rootent->size;
+ }
+
+ if (rootlsn != NULL)
+ memcpy(bp, rootlsn, sizeof(*rootlsn));
+ else
+ memset(bp, 0, sizeof(*rootlsn));
+ bp += sizeof(*rootlsn);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__bam_rsplit_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __bam_rsplit_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__bam_rsplit_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __bam_rsplit_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __bam_rsplit_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 2)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->root_pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __bam_rsplit_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__bam_rsplit_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_rsplit_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_rsplit_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__bam_rsplit: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tpgdbt: ");
+ for (i = 0; i < argp->pgdbt.size; i++) {
+ ch = ((u_int8_t *)argp->pgdbt.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\troot_pgno: %lu\n", (u_long)argp->root_pgno);
+ (void)printf("\tnrec: %lu\n", (u_long)argp->nrec);
+ (void)printf("\trootent: ");
+ for (i = 0; i < argp->rootent.size; i++) {
+ ch = ((u_int8_t *)argp->rootent.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\trootlsn: [%lu][%lu]\n",
+ (u_long)argp->rootlsn.file, (u_long)argp->rootlsn.offset);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_rsplit_read __P((DB_ENV *, void *, __bam_rsplit_args **));
+ */
+int
+__bam_rsplit_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_rsplit_args **argpp;
+{
+ __bam_rsplit_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__bam_rsplit_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memset(&argp->pgdbt, 0, sizeof(argp->pgdbt));
+ memcpy(&argp->pgdbt.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->pgdbt.data = bp;
+ bp += argp->pgdbt.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->root_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->nrec = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memset(&argp->rootent, 0, sizeof(argp->rootent));
+ memcpy(&argp->rootent.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->rootent.data = bp;
+ bp += argp->rootent.size;
+
+ memcpy(&argp->rootlsn, bp, sizeof(argp->rootlsn));
+ bp += sizeof(argp->rootlsn);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_adj_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, db_pgno_t, DB_LSN *, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t));
+ */
+int
+__bam_adj_log(dbp, txnid, ret_lsnp, flags, pgno, lsn, indx, indx_copy, is_insert)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ db_pgno_t pgno;
+ DB_LSN * lsn;
+ u_int32_t indx;
+ u_int32_t indx_copy;
+ u_int32_t is_insert;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___bam_adj;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(*lsn)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+
+ uinttmp = (u_int32_t)indx;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)indx_copy;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)is_insert;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__bam_adj_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __bam_adj_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__bam_adj_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __bam_adj_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __bam_adj_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __bam_adj_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__bam_adj_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_adj_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_adj_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__bam_adj: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ (void)printf("\tindx: %lu\n", (u_long)argp->indx);
+ (void)printf("\tindx_copy: %lu\n", (u_long)argp->indx_copy);
+ (void)printf("\tis_insert: %lu\n", (u_long)argp->is_insert);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_adj_read __P((DB_ENV *, void *, __bam_adj_args **));
+ */
+int
+__bam_adj_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_adj_args **argpp;
+{
+ __bam_adj_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__bam_adj_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->indx = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->indx_copy = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->is_insert = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_cadjust_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, db_pgno_t, DB_LSN *, u_int32_t, int32_t, u_int32_t));
+ */
+int
+__bam_cadjust_log(dbp, txnid, ret_lsnp, flags, pgno, lsn, indx, adjust, opflags)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ db_pgno_t pgno;
+ DB_LSN * lsn;
+ u_int32_t indx;
+ int32_t adjust;
+ u_int32_t opflags;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___bam_cadjust;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(*lsn)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+
+ uinttmp = (u_int32_t)indx;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)adjust;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)opflags;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__bam_cadjust_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __bam_cadjust_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__bam_cadjust_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __bam_cadjust_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __bam_cadjust_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __bam_cadjust_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__bam_cadjust_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_cadjust_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_cadjust_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__bam_cadjust: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ (void)printf("\tindx: %lu\n", (u_long)argp->indx);
+ (void)printf("\tadjust: %ld\n", (long)argp->adjust);
+ (void)printf("\topflags: %lu\n", (u_long)argp->opflags);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_cadjust_read __P((DB_ENV *, void *,
+ * PUBLIC: __bam_cadjust_args **));
+ */
+int
+__bam_cadjust_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_cadjust_args **argpp;
+{
+ __bam_cadjust_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__bam_cadjust_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->indx = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->adjust = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->opflags = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_cdel_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, db_pgno_t, DB_LSN *, u_int32_t));
+ */
+int
+__bam_cdel_log(dbp, txnid, ret_lsnp, flags, pgno, lsn, indx)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ db_pgno_t pgno;
+ DB_LSN * lsn;
+ u_int32_t indx;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___bam_cdel;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(*lsn)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+
+ uinttmp = (u_int32_t)indx;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__bam_cdel_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __bam_cdel_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__bam_cdel_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __bam_cdel_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __bam_cdel_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __bam_cdel_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__bam_cdel_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_cdel_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_cdel_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__bam_cdel: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ (void)printf("\tindx: %lu\n", (u_long)argp->indx);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_cdel_read __P((DB_ENV *, void *, __bam_cdel_args **));
+ */
+int
+__bam_cdel_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_cdel_args **argpp;
+{
+ __bam_cdel_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__bam_cdel_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->indx = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_repl_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, db_pgno_t, DB_LSN *, u_int32_t, u_int32_t,
+ * PUBLIC: const DBT *, const DBT *, u_int32_t, u_int32_t));
+ */
+int
+__bam_repl_log(dbp, txnid, ret_lsnp, flags, pgno, lsn, indx, isdeleted, orig,
+ repl, prefix, suffix)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ db_pgno_t pgno;
+ DB_LSN * lsn;
+ u_int32_t indx;
+ u_int32_t isdeleted;
+ const DBT *orig;
+ const DBT *repl;
+ u_int32_t prefix;
+ u_int32_t suffix;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___bam_repl;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(*lsn)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t) + (orig == NULL ? 0 : orig->size)
+ + sizeof(u_int32_t) + (repl == NULL ? 0 : repl->size)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+
+ uinttmp = (u_int32_t)indx;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)isdeleted;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (orig == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &orig->size, sizeof(orig->size));
+ bp += sizeof(orig->size);
+ memcpy(bp, orig->data, orig->size);
+ bp += orig->size;
+ }
+
+ if (repl == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &repl->size, sizeof(repl->size));
+ bp += sizeof(repl->size);
+ memcpy(bp, repl->data, repl->size);
+ bp += repl->size;
+ }
+
+ uinttmp = (u_int32_t)prefix;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)suffix;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__bam_repl_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __bam_repl_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__bam_repl_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __bam_repl_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __bam_repl_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __bam_repl_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__bam_repl_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_repl_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_repl_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__bam_repl: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ (void)printf("\tindx: %lu\n", (u_long)argp->indx);
+ (void)printf("\tisdeleted: %lu\n", (u_long)argp->isdeleted);
+ (void)printf("\torig: ");
+ for (i = 0; i < argp->orig.size; i++) {
+ ch = ((u_int8_t *)argp->orig.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\trepl: ");
+ for (i = 0; i < argp->repl.size; i++) {
+ ch = ((u_int8_t *)argp->repl.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tprefix: %lu\n", (u_long)argp->prefix);
+ (void)printf("\tsuffix: %lu\n", (u_long)argp->suffix);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_repl_read __P((DB_ENV *, void *, __bam_repl_args **));
+ */
+int
+__bam_repl_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_repl_args **argpp;
+{
+ __bam_repl_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__bam_repl_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->indx = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->isdeleted = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memset(&argp->orig, 0, sizeof(argp->orig));
+ memcpy(&argp->orig.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->orig.data = bp;
+ bp += argp->orig.size;
+
+ memset(&argp->repl, 0, sizeof(argp->repl));
+ memcpy(&argp->repl.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->repl.data = bp;
+ bp += argp->repl.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->prefix = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->suffix = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_root_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, db_pgno_t, db_pgno_t, DB_LSN *));
+ */
+int
+__bam_root_log(dbp, txnid, ret_lsnp, flags, meta_pgno, root_pgno, meta_lsn)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ db_pgno_t meta_pgno;
+ db_pgno_t root_pgno;
+ DB_LSN * meta_lsn;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___bam_root;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(*meta_lsn);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)meta_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)root_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (meta_lsn != NULL)
+ memcpy(bp, meta_lsn, sizeof(*meta_lsn));
+ else
+ memset(bp, 0, sizeof(*meta_lsn));
+ bp += sizeof(*meta_lsn);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__bam_root_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __bam_root_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__bam_root_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __bam_root_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __bam_root_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 2)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->meta_pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->root_pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __bam_root_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__bam_root_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_root_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_root_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__bam_root: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno);
+ (void)printf("\troot_pgno: %lu\n", (u_long)argp->root_pgno);
+ (void)printf("\tmeta_lsn: [%lu][%lu]\n",
+ (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_root_read __P((DB_ENV *, void *, __bam_root_args **));
+ */
+int
+__bam_root_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_root_args **argpp;
+{
+ __bam_root_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__bam_root_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->meta_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->root_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->meta_lsn, bp, sizeof(argp->meta_lsn));
+ bp += sizeof(argp->meta_lsn);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_curadj_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, db_ca_mode, db_pgno_t, db_pgno_t, db_pgno_t,
+ * PUBLIC: u_int32_t, u_int32_t, u_int32_t));
+ */
+int
+__bam_curadj_log(dbp, txnid, ret_lsnp, flags, mode, from_pgno, to_pgno, left_pgno, first_indx,
+ from_indx, to_indx)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ db_ca_mode mode;
+ db_pgno_t from_pgno;
+ db_pgno_t to_pgno;
+ db_pgno_t left_pgno;
+ u_int32_t first_indx;
+ u_int32_t from_indx;
+ u_int32_t to_indx;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___bam_curadj;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)mode;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)from_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)to_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)left_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)first_indx;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)from_indx;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)to_indx;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__bam_curadj_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __bam_curadj_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__bam_curadj_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_curadj_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__bam_curadj_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_curadj_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_curadj_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__bam_curadj: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tmode: %ld\n", (long)argp->mode);
+ (void)printf("\tfrom_pgno: %lu\n", (u_long)argp->from_pgno);
+ (void)printf("\tto_pgno: %lu\n", (u_long)argp->to_pgno);
+ (void)printf("\tleft_pgno: %lu\n", (u_long)argp->left_pgno);
+ (void)printf("\tfirst_indx: %lu\n", (u_long)argp->first_indx);
+ (void)printf("\tfrom_indx: %lu\n", (u_long)argp->from_indx);
+ (void)printf("\tto_indx: %lu\n", (u_long)argp->to_indx);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_curadj_read __P((DB_ENV *, void *, __bam_curadj_args **));
+ */
+int
+__bam_curadj_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_curadj_args **argpp;
+{
+ __bam_curadj_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__bam_curadj_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->mode = (db_ca_mode)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->from_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->to_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->left_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->first_indx = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->from_indx = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->to_indx = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_rcuradj_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, ca_recno_arg, db_pgno_t, db_recno_t, u_int32_t));
+ */
+int
+__bam_rcuradj_log(dbp, txnid, ret_lsnp, flags, mode, root, recno, order)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ ca_recno_arg mode;
+ db_pgno_t root;
+ db_recno_t recno;
+ u_int32_t order;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___bam_rcuradj;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)mode;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)root;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)recno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)order;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__bam_rcuradj_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __bam_rcuradj_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__bam_rcuradj_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_rcuradj_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__bam_rcuradj_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __bam_rcuradj_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __bam_rcuradj_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__bam_rcuradj: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tmode: %ld\n", (long)argp->mode);
+ (void)printf("\troot: %ld\n", (long)argp->root);
+ (void)printf("\trecno: %ld\n", (long)argp->recno);
+ (void)printf("\torder: %ld\n", (long)argp->order);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_rcuradj_read __P((DB_ENV *, void *,
+ * PUBLIC: __bam_rcuradj_args **));
+ */
+int
+__bam_rcuradj_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __bam_rcuradj_args **argpp;
+{
+ __bam_rcuradj_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__bam_rcuradj_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->mode = (ca_recno_arg)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->root = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->recno = (db_recno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->order = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_init_print __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__bam_init_print(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_split_print, DB___bam_split)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_rsplit_print, DB___bam_rsplit)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_adj_print, DB___bam_adj)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_cadjust_print, DB___bam_cadjust)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_cdel_print, DB___bam_cdel)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_repl_print, DB___bam_repl)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_root_print, DB___bam_root)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_curadj_print, DB___bam_curadj)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_rcuradj_print, DB___bam_rcuradj)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__bam_init_getpgnos(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_split_getpgnos, DB___bam_split)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_rsplit_getpgnos, DB___bam_rsplit)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_adj_getpgnos, DB___bam_adj)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_cadjust_getpgnos, DB___bam_cadjust)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_cdel_getpgnos, DB___bam_cdel)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_repl_getpgnos, DB___bam_repl)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_root_getpgnos, DB___bam_root)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_curadj_getpgnos, DB___bam_curadj)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_rcuradj_getpgnos, DB___bam_rcuradj)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __bam_init_recover __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__bam_init_recover(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_split_recover, DB___bam_split)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_rsplit_recover, DB___bam_rsplit)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_adj_recover, DB___bam_adj)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_cadjust_recover, DB___bam_cadjust)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_cdel_recover, DB___bam_cdel)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_repl_recover, DB___bam_repl)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_root_recover, DB___bam_root)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_curadj_recover, DB___bam_curadj)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __bam_rcuradj_recover, DB___bam_rcuradj)) != 0)
+ return (ret);
+ return (0);
+}
diff --git a/libdb/build_unix/.IGNORE_ME b/libdb/build_unix/.IGNORE_ME
new file mode 100644
index 0000000..558fd49
--- /dev/null
+++ b/libdb/build_unix/.IGNORE_ME
@@ -0,0 +1,3 @@
+Some combinations of the gzip and tar archive exploders found
+on Linux systems ignore directories that don't have any files
+(other than symbolic links) in them. So, here's a file.
diff --git a/libdb/build_vxworks/BerkeleyDB.wpj b/libdb/build_vxworks/BerkeleyDB.wpj
new file mode 100755
index 0000000..45b15a6
--- /dev/null
+++ b/libdb/build_vxworks/BerkeleyDB.wpj
@@ -0,0 +1,3506 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUM_debug_BUILDRULE
+BerkeleyDB.out
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB_sim.a
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -O0 \
+ -I$(PRJ_DIR) \
+ -I$(PRJ_DIR)/.. \
+ -DDIAGNOSTIC \
+ -DDEBUG
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_BUILDRULE
+BerkeleyDB.out
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB_sim.a
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_CFLAGS
+-mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -O2 \
+ -I$(PRJ_DIR) \
+ -I$(PRJ_DIR)/..
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_BerkeleyDB.out
+
+<END>
+
+<BEGIN> BUILD_RULE_BerkeleyDB_sim.out
+
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM_debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM_release PENTIUM_debug
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_compare.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_compare.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_compare.c_objects
+bt_compare.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_compare.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_conv.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_conv.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_conv.c_objects
+bt_conv.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_conv.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_curadj.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_curadj.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_curadj.c_objects
+bt_curadj.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_curadj.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_cursor.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_cursor.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_cursor.c_objects
+bt_cursor.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_cursor.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_delete.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_delete.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_delete.c_objects
+bt_delete.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_delete.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_method.c_objects
+bt_method.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_open.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_open.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_open.c_objects
+bt_open.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_open.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_put.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_put.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_put.c_objects
+bt_put.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_put.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_rec.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_rec.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_rec.c_objects
+bt_rec.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_rec.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_reclaim.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_reclaim.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_reclaim.c_objects
+bt_reclaim.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_reclaim.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_recno.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_recno.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_recno.c_objects
+bt_recno.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_recno.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_rsearch.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_rsearch.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_rsearch.c_objects
+bt_rsearch.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_rsearch.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_search.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_search.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_search.c_objects
+bt_search.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_search.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_split.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_split.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_split.c_objects
+bt_split.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_split.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_stat.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_stat.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_stat.c_objects
+bt_stat.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_stat.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_upgrade.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_upgrade.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_upgrade.c_objects
+bt_upgrade.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_upgrade.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_verify.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_verify.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_verify.c_objects
+bt_verify.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_verify.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/btree_auto.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/btree_auto.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/btree_auto.c_objects
+btree_auto.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../btree/btree_auto.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../clib/getopt.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../clib/getopt.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../clib/getopt.c_objects
+getopt.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../clib/getopt.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../clib/snprintf.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../clib/snprintf.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../clib/snprintf.c_objects
+snprintf.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../clib/snprintf.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../clib/strcasecmp.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../clib/strcasecmp.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../clib/strcasecmp.c_objects
+strcasecmp.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../clib/strcasecmp.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../clib/strdup.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../clib/strdup.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../clib/strdup.c_objects
+strdup.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../clib/strdup.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../clib/vsnprintf.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../clib/vsnprintf.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../clib/vsnprintf.c_objects
+vsnprintf.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../clib/vsnprintf.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_byteorder.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_byteorder.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_byteorder.c_objects
+db_byteorder.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_byteorder.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_err.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_err.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_err.c_objects
+db_err.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_err.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_getlong.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_getlong.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_getlong.c_objects
+db_getlong.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_getlong.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_idspace.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_idspace.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_idspace.c_objects
+db_idspace.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_idspace.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_log2.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_log2.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_log2.c_objects
+db_log2.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_log2.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_arg.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_arg.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_arg.c_objects
+util_arg.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_arg.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_cache.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_cache.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_cache.c_objects
+util_cache.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_cache.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_log.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_log.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_log.c_objects
+util_log.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_log.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_sig.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_sig.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_sig.c_objects
+util_sig.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_sig.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/crdel_auto.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/crdel_auto.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/crdel_auto.c_objects
+crdel_auto.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/crdel_auto.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/crdel_rec.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/crdel_rec.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/crdel_rec.c_objects
+crdel_rec.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/crdel_rec.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db.c_objects
+db.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_am.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_am.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_am.c_objects
+db_am.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_am.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_auto.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_auto.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_auto.c_objects
+db_auto.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_auto.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_cam.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_cam.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_cam.c_objects
+db_cam.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_cam.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_conv.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_conv.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_conv.c_objects
+db_conv.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_conv.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_dispatch.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_dispatch.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_dispatch.c_objects
+db_dispatch.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_dispatch.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_dup.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_dup.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_dup.c_objects
+db_dup.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_dup.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_iface.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_iface.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_iface.c_objects
+db_iface.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_iface.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_join.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_join.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_join.c_objects
+db_join.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_join.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_meta.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_meta.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_meta.c_objects
+db_meta.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_meta.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_method.c_objects
+db_method.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_open.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_open.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_open.c_objects
+db_open.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_open.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_overflow.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_overflow.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_overflow.c_objects
+db_overflow.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_overflow.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_pr.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_pr.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_pr.c_objects
+db_pr.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_pr.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_rec.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_rec.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_rec.c_objects
+db_rec.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_rec.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_reclaim.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_reclaim.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_reclaim.c_objects
+db_reclaim.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_reclaim.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_remove.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_remove.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_remove.c_objects
+db_remove.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_remove.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_rename.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_rename.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_rename.c_objects
+db_rename.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_rename.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_ret.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_ret.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_ret.c_objects
+db_ret.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_ret.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_truncate.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_truncate.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_truncate.c_objects
+db_truncate.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_truncate.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_upg.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_upg.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_upg.c_objects
+db_upg.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_upg.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_upg_opd.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_upg_opd.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_upg_opd.c_objects
+db_upg_opd.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_upg_opd.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_vrfy.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_vrfy.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_vrfy.c_objects
+db_vrfy.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_vrfy.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_vrfyutil.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_vrfyutil.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_vrfyutil.c_objects
+db_vrfyutil.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_vrfyutil.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg.c_objects
+dbreg.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_auto.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_auto.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_auto.c_objects
+dbreg_auto.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_auto.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_rec.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_rec.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_rec.c_objects
+dbreg_rec.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_rec.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_util.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_util.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_util.c_objects
+dbreg_util.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_util.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/db_salloc.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/db_salloc.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/db_salloc.c_objects
+db_salloc.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/db_salloc.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/db_shash.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/db_shash.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/db_shash.c_objects
+db_shash.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/db_shash.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_file.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_file.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_file.c_objects
+env_file.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_file.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_method.c_objects
+env_method.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_open.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_open.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_open.c_objects
+env_open.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_open.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_recover.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_recover.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_recover.c_objects
+env_recover.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_recover.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_region.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_region.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_region.c_objects
+env_region.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_region.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fileops_auto.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fileops_auto.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fileops_auto.c_objects
+fileops_auto.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fileops_auto.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_basic.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_basic.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_basic.c_objects
+fop_basic.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_basic.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_rec.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_rec.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_rec.c_objects
+fop_rec.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_rec.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_util.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_util.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_util.c_objects
+fop_util.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_util.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash.c_objects
+hash.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_auto.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_auto.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_auto.c_objects
+hash_auto.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_auto.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_conv.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_conv.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_conv.c_objects
+hash_conv.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_conv.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_dup.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_dup.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_dup.c_objects
+hash_dup.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_dup.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_func.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_func.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_func.c_objects
+hash_func.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_func.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_meta.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_meta.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_meta.c_objects
+hash_meta.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_meta.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_method.c_objects
+hash_method.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_open.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_open.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_open.c_objects
+hash_open.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_open.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_page.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_page.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_page.c_objects
+hash_page.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_page.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_rec.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_rec.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_rec.c_objects
+hash_rec.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_rec.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_reclaim.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_reclaim.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_reclaim.c_objects
+hash_reclaim.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_reclaim.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_stat.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_stat.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_stat.c_objects
+hash_stat.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_stat.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_upgrade.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_upgrade.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_upgrade.c_objects
+hash_upgrade.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_upgrade.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_verify.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_verify.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_verify.c_objects
+hash_verify.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_verify.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hmac/hmac.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hmac/hmac.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hmac/hmac.c_objects
+hmac.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hmac/hmac.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hmac/sha1.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hmac/sha1.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hmac/sha1.c_objects
+sha1.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hmac/sha1.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hsearch/hsearch.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hsearch/hsearch.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hsearch/hsearch.c_objects
+hsearch.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hsearch/hsearch.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock.c_objects
+lock.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_deadlock.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_deadlock.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_deadlock.c_objects
+lock_deadlock.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_deadlock.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_method.c_objects
+lock_method.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_region.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_region.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_region.c_objects
+lock_region.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_region.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_stat.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_stat.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_stat.c_objects
+lock_stat.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_stat.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_util.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_util.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_util.c_objects
+lock_util.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_util.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log.c_objects
+log.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_archive.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_archive.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_archive.c_objects
+log_archive.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_archive.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_compare.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_compare.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_compare.c_objects
+log_compare.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_compare.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_get.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_get.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_get.c_objects
+log_get.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_get.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_method.c_objects
+log_method.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_put.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_put.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_put.c_objects
+log_put.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_put.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_alloc.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_alloc.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_alloc.c_objects
+mp_alloc.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_alloc.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_bh.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_bh.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_bh.c_objects
+mp_bh.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_bh.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fget.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fget.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fget.c_objects
+mp_fget.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fget.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fopen.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fopen.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fopen.c_objects
+mp_fopen.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fopen.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fput.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fput.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fput.c_objects
+mp_fput.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fput.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fset.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fset.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fset.c_objects
+mp_fset.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fset.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_method.c_objects
+mp_method.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_region.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_region.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_region.c_objects
+mp_region.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_region.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_register.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_register.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_register.c_objects
+mp_register.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_register.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_stat.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_stat.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_stat.c_objects
+mp_stat.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_stat.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_sync.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_sync.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_sync.c_objects
+mp_sync.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_sync.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_trickle.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_trickle.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_trickle.c_objects
+mp_trickle.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_trickle.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mutex/mut_tas.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mutex/mut_tas.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mutex/mut_tas.c_objects
+mut_tas.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mutex/mut_tas.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mutex/mutex.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mutex/mutex.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mutex/mutex.c_objects
+mutex.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../mutex/mutex.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_alloc.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_alloc.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_alloc.c_objects
+os_alloc.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_alloc.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_clock.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_clock.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_clock.c_objects
+os_clock.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_clock.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_dir.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_dir.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_dir.c_objects
+os_dir.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_dir.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_errno.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_errno.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_errno.c_objects
+os_errno.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_errno.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_fid.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_fid.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_fid.c_objects
+os_fid.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_fid.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_fsync.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_fsync.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_fsync.c_objects
+os_fsync.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_fsync.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_handle.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_handle.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_handle.c_objects
+os_handle.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_handle.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_id.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_id.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_id.c_objects
+os_id.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_id.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_method.c_objects
+os_method.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_oflags.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_oflags.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_oflags.c_objects
+os_oflags.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_oflags.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_open.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_open.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_open.c_objects
+os_open.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_open.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_region.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_region.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_region.c_objects
+os_region.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_region.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rename.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rename.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rename.c_objects
+os_rename.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rename.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_root.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_root.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_root.c_objects
+os_root.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_root.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rpath.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rpath.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rpath.c_objects
+os_rpath.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rpath.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rw.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rw.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rw.c_objects
+os_rw.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rw.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_seek.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_seek.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_seek.c_objects
+os_seek.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_seek.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_sleep.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_sleep.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_sleep.c_objects
+os_sleep.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_sleep.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_spin.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_spin.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_spin.c_objects
+os_spin.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_spin.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_stat.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_stat.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_stat.c_objects
+os_stat.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_stat.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_tmpdir.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_tmpdir.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_tmpdir.c_objects
+os_tmpdir.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_tmpdir.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_unlink.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_unlink.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_unlink.c_objects
+os_unlink.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_unlink.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_abs.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_abs.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_abs.c_objects
+os_vx_abs.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_abs.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_config.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_config.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_config.c_objects
+os_vx_config.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_config.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_map.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_map.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_map.c_objects
+os_vx_map.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_map.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam.c_objects
+qam.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_auto.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_auto.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_auto.c_objects
+qam_auto.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_auto.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_conv.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_conv.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_conv.c_objects
+qam_conv.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_conv.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_files.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_files.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_files.c_objects
+qam_files.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_files.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_method.c_objects
+qam_method.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_open.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_open.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_open.c_objects
+qam_open.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_open.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_rec.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_rec.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_rec.c_objects
+qam_rec.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_rec.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_stat.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_stat.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_stat.c_objects
+qam_stat.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_stat.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_upgrade.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_upgrade.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_upgrade.c_objects
+qam_upgrade.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_upgrade.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_verify.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_verify.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_verify.c_objects
+qam_verify.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_verify.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_method.c_objects
+rep_method.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_record.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_record.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_record.c_objects
+rep_record.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_record.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_region.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_region.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_region.c_objects
+rep_region.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_region.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_util.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_util.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_util.c_objects
+rep_util.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_util.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/client.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/client.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/client.c_objects
+client.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/client.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/db_server_clnt.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/db_server_clnt.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/db_server_clnt.c_objects
+db_server_clnt.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/db_server_clnt.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/gen_client.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/gen_client.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/gen_client.c_objects
+gen_client.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/gen_client.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/gen_client_ret.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/gen_client_ret.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/gen_client_ret.c_objects
+gen_client_ret.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/gen_client_ret.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_server/c/db_server_xdr.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_server/c/db_server_xdr.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_server/c/db_server_xdr.c_objects
+db_server_xdr.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_server/c/db_server_xdr.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn.c_objects
+txn.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_auto.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_auto.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_auto.c_objects
+txn_auto.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_auto.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_method.c_objects
+txn_method.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_rec.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_rec.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_rec.c_objects
+txn_rec.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_rec.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_recover.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_recover.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_recover.c_objects
+txn_recover.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_recover.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_region.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_region.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_region.c_objects
+txn_region.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_region.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_stat.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_stat.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_stat.c_objects
+txn_stat.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_stat.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_util.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_util.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_util.c_objects
+txn_util.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_util.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa.c_objects
+xa.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa_db.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa_db.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa_db.c_objects
+xa_db.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa_db.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa_map.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa_map.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa_map.c_objects
+xa_map.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa_map.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../btree/bt_compare.c \
+ $(PRJ_DIR)/../btree/bt_conv.c \
+ $(PRJ_DIR)/../btree/bt_curadj.c \
+ $(PRJ_DIR)/../btree/bt_cursor.c \
+ $(PRJ_DIR)/../btree/bt_delete.c \
+ $(PRJ_DIR)/../btree/bt_method.c \
+ $(PRJ_DIR)/../btree/bt_open.c \
+ $(PRJ_DIR)/../btree/bt_put.c \
+ $(PRJ_DIR)/../btree/bt_rec.c \
+ $(PRJ_DIR)/../btree/bt_reclaim.c \
+ $(PRJ_DIR)/../btree/bt_recno.c \
+ $(PRJ_DIR)/../btree/bt_rsearch.c \
+ $(PRJ_DIR)/../btree/bt_search.c \
+ $(PRJ_DIR)/../btree/bt_split.c \
+ $(PRJ_DIR)/../btree/bt_stat.c \
+ $(PRJ_DIR)/../btree/bt_upgrade.c \
+ $(PRJ_DIR)/../btree/bt_verify.c \
+ $(PRJ_DIR)/../btree/btree_auto.c \
+ $(PRJ_DIR)/../clib/getopt.c \
+ $(PRJ_DIR)/../clib/snprintf.c \
+ $(PRJ_DIR)/../clib/strcasecmp.c \
+ $(PRJ_DIR)/../clib/strdup.c \
+ $(PRJ_DIR)/../clib/vsnprintf.c \
+ $(PRJ_DIR)/../common/db_byteorder.c \
+ $(PRJ_DIR)/../common/db_err.c \
+ $(PRJ_DIR)/../common/db_getlong.c \
+ $(PRJ_DIR)/../common/db_idspace.c \
+ $(PRJ_DIR)/../common/db_log2.c \
+ $(PRJ_DIR)/../common/util_arg.c \
+ $(PRJ_DIR)/../common/util_cache.c \
+ $(PRJ_DIR)/../common/util_log.c \
+ $(PRJ_DIR)/../common/util_sig.c \
+ $(PRJ_DIR)/../db/crdel_auto.c \
+ $(PRJ_DIR)/../db/crdel_rec.c \
+ $(PRJ_DIR)/../db/db.c \
+ $(PRJ_DIR)/../db/db_am.c \
+ $(PRJ_DIR)/../db/db_auto.c \
+ $(PRJ_DIR)/../db/db_cam.c \
+ $(PRJ_DIR)/../db/db_conv.c \
+ $(PRJ_DIR)/../db/db_dispatch.c \
+ $(PRJ_DIR)/../db/db_dup.c \
+ $(PRJ_DIR)/../db/db_iface.c \
+ $(PRJ_DIR)/../db/db_join.c \
+ $(PRJ_DIR)/../db/db_meta.c \
+ $(PRJ_DIR)/../db/db_method.c \
+ $(PRJ_DIR)/../db/db_open.c \
+ $(PRJ_DIR)/../db/db_overflow.c \
+ $(PRJ_DIR)/../db/db_pr.c \
+ $(PRJ_DIR)/../db/db_rec.c \
+ $(PRJ_DIR)/../db/db_reclaim.c \
+ $(PRJ_DIR)/../db/db_remove.c \
+ $(PRJ_DIR)/../db/db_rename.c \
+ $(PRJ_DIR)/../db/db_ret.c \
+ $(PRJ_DIR)/../db/db_truncate.c \
+ $(PRJ_DIR)/../db/db_upg.c \
+ $(PRJ_DIR)/../db/db_upg_opd.c \
+ $(PRJ_DIR)/../db/db_vrfy.c \
+ $(PRJ_DIR)/../db/db_vrfyutil.c \
+ $(PRJ_DIR)/../dbreg/dbreg.c \
+ $(PRJ_DIR)/../dbreg/dbreg_auto.c \
+ $(PRJ_DIR)/../dbreg/dbreg_rec.c \
+ $(PRJ_DIR)/../dbreg/dbreg_util.c \
+ $(PRJ_DIR)/../env/db_salloc.c \
+ $(PRJ_DIR)/../env/db_shash.c \
+ $(PRJ_DIR)/../env/env_file.c \
+ $(PRJ_DIR)/../env/env_method.c \
+ $(PRJ_DIR)/../env/env_open.c \
+ $(PRJ_DIR)/../env/env_recover.c \
+ $(PRJ_DIR)/../env/env_region.c \
+ $(PRJ_DIR)/../fileops/fileops_auto.c \
+ $(PRJ_DIR)/../fileops/fop_basic.c \
+ $(PRJ_DIR)/../fileops/fop_rec.c \
+ $(PRJ_DIR)/../fileops/fop_util.c \
+ $(PRJ_DIR)/../hash/hash.c \
+ $(PRJ_DIR)/../hash/hash_auto.c \
+ $(PRJ_DIR)/../hash/hash_conv.c \
+ $(PRJ_DIR)/../hash/hash_dup.c \
+ $(PRJ_DIR)/../hash/hash_func.c \
+ $(PRJ_DIR)/../hash/hash_meta.c \
+ $(PRJ_DIR)/../hash/hash_method.c \
+ $(PRJ_DIR)/../hash/hash_open.c \
+ $(PRJ_DIR)/../hash/hash_page.c \
+ $(PRJ_DIR)/../hash/hash_rec.c \
+ $(PRJ_DIR)/../hash/hash_reclaim.c \
+ $(PRJ_DIR)/../hash/hash_stat.c \
+ $(PRJ_DIR)/../hash/hash_upgrade.c \
+ $(PRJ_DIR)/../hash/hash_verify.c \
+ $(PRJ_DIR)/../hmac/hmac.c \
+ $(PRJ_DIR)/../hmac/sha1.c \
+ $(PRJ_DIR)/../hsearch/hsearch.c \
+ $(PRJ_DIR)/../lock/lock.c \
+ $(PRJ_DIR)/../lock/lock_deadlock.c \
+ $(PRJ_DIR)/../lock/lock_method.c \
+ $(PRJ_DIR)/../lock/lock_region.c \
+ $(PRJ_DIR)/../lock/lock_stat.c \
+ $(PRJ_DIR)/../lock/lock_util.c \
+ $(PRJ_DIR)/../log/log.c \
+ $(PRJ_DIR)/../log/log_archive.c \
+ $(PRJ_DIR)/../log/log_compare.c \
+ $(PRJ_DIR)/../log/log_get.c \
+ $(PRJ_DIR)/../log/log_method.c \
+ $(PRJ_DIR)/../log/log_put.c \
+ $(PRJ_DIR)/../mp/mp_alloc.c \
+ $(PRJ_DIR)/../mp/mp_bh.c \
+ $(PRJ_DIR)/../mp/mp_fget.c \
+ $(PRJ_DIR)/../mp/mp_fopen.c \
+ $(PRJ_DIR)/../mp/mp_fput.c \
+ $(PRJ_DIR)/../mp/mp_fset.c \
+ $(PRJ_DIR)/../mp/mp_method.c \
+ $(PRJ_DIR)/../mp/mp_region.c \
+ $(PRJ_DIR)/../mp/mp_register.c \
+ $(PRJ_DIR)/../mp/mp_stat.c \
+ $(PRJ_DIR)/../mp/mp_sync.c \
+ $(PRJ_DIR)/../mp/mp_trickle.c \
+ $(PRJ_DIR)/../mutex/mut_tas.c \
+ $(PRJ_DIR)/../mutex/mutex.c \
+ $(PRJ_DIR)/../os/os_alloc.c \
+ $(PRJ_DIR)/../os/os_clock.c \
+ $(PRJ_DIR)/../os/os_dir.c \
+ $(PRJ_DIR)/../os/os_errno.c \
+ $(PRJ_DIR)/../os/os_fid.c \
+ $(PRJ_DIR)/../os/os_fsync.c \
+ $(PRJ_DIR)/../os/os_handle.c \
+ $(PRJ_DIR)/../os/os_id.c \
+ $(PRJ_DIR)/../os/os_method.c \
+ $(PRJ_DIR)/../os/os_oflags.c \
+ $(PRJ_DIR)/../os/os_open.c \
+ $(PRJ_DIR)/../os/os_region.c \
+ $(PRJ_DIR)/../os/os_rename.c \
+ $(PRJ_DIR)/../os/os_root.c \
+ $(PRJ_DIR)/../os/os_rpath.c \
+ $(PRJ_DIR)/../os/os_rw.c \
+ $(PRJ_DIR)/../os/os_seek.c \
+ $(PRJ_DIR)/../os/os_sleep.c \
+ $(PRJ_DIR)/../os/os_spin.c \
+ $(PRJ_DIR)/../os/os_stat.c \
+ $(PRJ_DIR)/../os/os_tmpdir.c \
+ $(PRJ_DIR)/../os/os_unlink.c \
+ $(PRJ_DIR)/../os_vxworks/os_vx_abs.c \
+ $(PRJ_DIR)/../os_vxworks/os_vx_config.c \
+ $(PRJ_DIR)/../os_vxworks/os_vx_map.c \
+ $(PRJ_DIR)/../qam/qam.c \
+ $(PRJ_DIR)/../qam/qam_auto.c \
+ $(PRJ_DIR)/../qam/qam_conv.c \
+ $(PRJ_DIR)/../qam/qam_files.c \
+ $(PRJ_DIR)/../qam/qam_method.c \
+ $(PRJ_DIR)/../qam/qam_open.c \
+ $(PRJ_DIR)/../qam/qam_rec.c \
+ $(PRJ_DIR)/../qam/qam_stat.c \
+ $(PRJ_DIR)/../qam/qam_upgrade.c \
+ $(PRJ_DIR)/../qam/qam_verify.c \
+ $(PRJ_DIR)/../rep/rep_method.c \
+ $(PRJ_DIR)/../rep/rep_record.c \
+ $(PRJ_DIR)/../rep/rep_region.c \
+ $(PRJ_DIR)/../rep/rep_util.c \
+ $(PRJ_DIR)/../rpc_client/client.c \
+ $(PRJ_DIR)/../rpc_client/db_server_clnt.c \
+ $(PRJ_DIR)/../rpc_client/gen_client.c \
+ $(PRJ_DIR)/../rpc_client/gen_client_ret.c \
+ $(PRJ_DIR)/../rpc_server/c/db_server_xdr.c \
+ $(PRJ_DIR)/../txn/txn.c \
+ $(PRJ_DIR)/../txn/txn_auto.c \
+ $(PRJ_DIR)/../txn/txn_method.c \
+ $(PRJ_DIR)/../txn/txn_rec.c \
+ $(PRJ_DIR)/../txn/txn_recover.c \
+ $(PRJ_DIR)/../txn/txn_region.c \
+ $(PRJ_DIR)/../txn/txn_stat.c \
+ $(PRJ_DIR)/../txn/txn_util.c \
+ $(PRJ_DIR)/../xa/xa.c \
+ $(PRJ_DIR)/../xa/xa_db.c \
+ $(PRJ_DIR)/../xa/xa_map.c
+<END>
+
+<BEGIN> userComments
+BerkeleyDB
+<END>
diff --git a/libdb/build_vxworks/BerkeleyDB.wsp b/libdb/build_vxworks/BerkeleyDB.wsp
new file mode 100755
index 0000000..ce2e71b
--- /dev/null
+++ b/libdb/build_vxworks/BerkeleyDB.wsp
@@ -0,0 +1,29 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+Workspace
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> projectList
+$(PRJ_DIR)/BerkeleyDB.wpj \
+ $(PRJ_DIR)/db_archive/db_archive.wpj \
+ $(PRJ_DIR)/db_checkpoint/db_checkpoint.wpj \
+ $(PRJ_DIR)/db_deadlock/db_deadlock.wpj \
+ $(PRJ_DIR)/db_dump/db_dump.wpj \
+ $(PRJ_DIR)/db_load/db_load.wpj \
+ $(PRJ_DIR)/db_printlog/db_printlog.wpj \
+ $(PRJ_DIR)/db_recover/db_recover.wpj \
+ $(PRJ_DIR)/db_stat/db_stat.wpj \
+ $(PRJ_DIR)/db_upgrade/db_upgrade.wpj \
+ $(PRJ_DIR)/db_verify/db_verify.wpj \
+ $(PRJ_DIR)/dbdemo/dbdemo.wpj
+<END>
+
+<BEGIN> userComments
+
+<END>
+
diff --git a/libdb/build_vxworks/BerkeleyDB/Makefile.custom b/libdb/build_vxworks/BerkeleyDB/Makefile.custom
new file mode 100644
index 0000000..ca781f7
--- /dev/null
+++ b/libdb/build_vxworks/BerkeleyDB/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/libdb/build_vxworks/BerkeleyDB/component.cdf b/libdb/build_vxworks/BerkeleyDB/component.cdf
new file mode 100755
index 0000000..4b3e6f1
--- /dev/null
+++ b/libdb/build_vxworks/BerkeleyDB/component.cdf
@@ -0,0 +1,1220 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_BERKELEYDB {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES bt_compare.o \
+ bt_conv.o \
+ bt_curadj.o \
+ bt_cursor.o \
+ bt_delete.o \
+ bt_method.o \
+ bt_open.o \
+ bt_put.o \
+ bt_rec.o \
+ bt_reclaim.o \
+ bt_recno.o \
+ bt_rsearch.o \
+ bt_search.o \
+ bt_split.o \
+ bt_stat.o \
+ bt_upgrade.o \
+ bt_verify.o \
+ btree_auto.o \
+ client.o \
+ crdel_auto.o \
+ crdel_rec.o \
+ db.o \
+ db_am.o \
+ db_auto.o \
+ db_byteorder.o \
+ db_cam.o \
+ db_conv.o \
+ db_dispatch.o \
+ db_dup.o \
+ db_err.o \
+ db_getlong.o \
+ db_idspace.o \
+ db_iface.o \
+ db_join.o \
+ db_log2.o \
+ db_meta.o \
+ db_method.o \
+ db_open.o \
+ db_overflow.o \
+ db_pr.o \
+ db_rec.o \
+ db_reclaim.o \
+ db_remove.o \
+ db_rename.o \
+ db_ret.o \
+ db_salloc.o \
+ db_server_clnt.o \
+ db_server_xdr.o \
+ db_shash.o \
+ db_truncate.o \
+ db_upg.o \
+ db_upg_opd.o \
+ db_vrfy.o \
+ db_vrfyutil.o \
+ dbreg.o \
+ dbreg_auto.o \
+ dbreg_rec.o \
+ dbreg_util.o \
+ env_file.o \
+ env_method.o \
+ env_open.o \
+ env_recover.o \
+ env_region.o \
+ fileops_auto.o \
+ fop_basic.o \
+ fop_rec.o \
+ fop_util.o \
+ gen_client.o \
+ gen_client_ret.o \
+ getopt.o \
+ hash.o \
+ hash_auto.o \
+ hash_conv.o \
+ hash_dup.o \
+ hash_func.o \
+ hash_meta.o \
+ hash_method.o \
+ hash_open.o \
+ hash_page.o \
+ hash_rec.o \
+ hash_reclaim.o \
+ hash_stat.o \
+ hash_upgrade.o \
+ hash_verify.o \
+ hmac.o \
+ hsearch.o \
+ lock.o \
+ lock_deadlock.o \
+ lock_method.o \
+ lock_region.o \
+ lock_stat.o \
+ lock_util.o \
+ log.o \
+ log_archive.o \
+ log_compare.o \
+ log_get.o \
+ log_method.o \
+ log_put.o \
+ mp_alloc.o \
+ mp_bh.o \
+ mp_fget.o \
+ mp_fopen.o \
+ mp_fput.o \
+ mp_fset.o \
+ mp_method.o \
+ mp_region.o \
+ mp_register.o \
+ mp_stat.o \
+ mp_sync.o \
+ mp_trickle.o \
+ mut_tas.o \
+ mutex.o \
+ os_alloc.o \
+ os_clock.o \
+ os_dir.o \
+ os_errno.o \
+ os_fid.o \
+ os_fsync.o \
+ os_handle.o \
+ os_id.o \
+ os_method.o \
+ os_oflags.o \
+ os_open.o \
+ os_region.o \
+ os_rename.o \
+ os_root.o \
+ os_rpath.o \
+ os_rw.o \
+ os_seek.o \
+ os_sleep.o \
+ os_spin.o \
+ os_stat.o \
+ os_tmpdir.o \
+ os_unlink.o \
+ os_vx_abs.o \
+ os_vx_config.o \
+ os_vx_map.o \
+ qam.o \
+ qam_auto.o \
+ qam_conv.o \
+ qam_files.o \
+ qam_method.o \
+ qam_open.o \
+ qam_rec.o \
+ qam_stat.o \
+ qam_upgrade.o \
+ qam_verify.o \
+ rep_method.o \
+ rep_record.o \
+ rep_region.o \
+ rep_util.o \
+ sha1.o \
+ snprintf.o \
+ strcasecmp.o \
+ strdup.o \
+ txn.o \
+ txn_auto.o \
+ txn_method.o \
+ txn_rec.o \
+ txn_recover.o \
+ txn_region.o \
+ txn_stat.o \
+ txn_util.o \
+ util_arg.o \
+ util_cache.o \
+ util_log.o \
+ util_sig.o \
+ vsnprintf.o \
+ xa.o \
+ xa_db.o \
+ xa_map.o
+ NAME BerkeleyDB
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module bt_compare.o {
+
+ NAME bt_compare.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_compare.c
+}
+
+Module bt_conv.o {
+
+ NAME bt_conv.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_conv.c
+}
+
+Module bt_curadj.o {
+
+ NAME bt_curadj.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_curadj.c
+}
+
+Module bt_cursor.o {
+
+ NAME bt_cursor.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_cursor.c
+}
+
+Module bt_delete.o {
+
+ NAME bt_delete.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_delete.c
+}
+
+Module bt_method.o {
+
+ NAME bt_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_method.c
+}
+
+Module bt_open.o {
+
+ NAME bt_open.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_open.c
+}
+
+Module bt_put.o {
+
+ NAME bt_put.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_put.c
+}
+
+Module bt_rec.o {
+
+ NAME bt_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_rec.c
+}
+
+Module bt_reclaim.o {
+
+ NAME bt_reclaim.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_reclaim.c
+}
+
+Module bt_recno.o {
+
+ NAME bt_recno.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_recno.c
+}
+
+Module bt_rsearch.o {
+
+ NAME bt_rsearch.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_rsearch.c
+}
+
+Module bt_search.o {
+
+ NAME bt_search.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_search.c
+}
+
+Module bt_split.o {
+
+ NAME bt_split.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_split.c
+}
+
+Module bt_stat.o {
+
+ NAME bt_stat.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_stat.c
+}
+
+Module bt_upgrade.o {
+
+ NAME bt_upgrade.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_upgrade.c
+}
+
+Module bt_verify.o {
+
+ NAME bt_verify.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_verify.c
+}
+
+Module btree_auto.o {
+
+ NAME btree_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/btree_auto.c
+}
+
+Module getopt.o {
+
+ NAME getopt.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../clib/getopt.c
+}
+
+Module snprintf.o {
+
+ NAME snprintf.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../clib/snprintf.c
+}
+
+Module strcasecmp.o {
+
+ NAME strcasecmp.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../clib/strcasecmp.c
+}
+
+Module strdup.o {
+
+ NAME strdup.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../clib/strdup.c
+}
+
+Module vsnprintf.o {
+
+ NAME vsnprintf.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../clib/vsnprintf.c
+}
+
+Module db_byteorder.o {
+
+ NAME db_byteorder.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/db_byteorder.c
+}
+
+Module db_err.o {
+
+ NAME db_err.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/db_err.c
+}
+
+Module db_getlong.o {
+
+ NAME db_getlong.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/db_getlong.c
+}
+
+Module db_idspace.o {
+
+ NAME db_idspace.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/db_idspace.c
+}
+
+Module db_log2.o {
+
+ NAME db_log2.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/db_log2.c
+}
+
+Module util_arg.o {
+
+ NAME util_arg.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/util_arg.c
+}
+
+Module util_cache.o {
+
+ NAME util_cache.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/util_cache.c
+}
+
+Module util_log.o {
+
+ NAME util_log.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/util_log.c
+}
+
+Module util_sig.o {
+
+ NAME util_sig.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/util_sig.c
+}
+
+Module crdel_auto.o {
+
+ NAME crdel_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/crdel_auto.c
+}
+
+Module crdel_rec.o {
+
+ NAME crdel_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/crdel_rec.c
+}
+
+Module db.o {
+
+ NAME db.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db.c
+}
+
+Module db_am.o {
+
+ NAME db_am.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_am.c
+}
+
+Module db_auto.o {
+
+ NAME db_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_auto.c
+}
+
+Module db_cam.o {
+
+ NAME db_cam.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_cam.c
+}
+
+Module db_conv.o {
+
+ NAME db_conv.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_conv.c
+}
+
+Module db_dispatch.o {
+
+ NAME db_dispatch.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_dispatch.c
+}
+
+Module db_dup.o {
+
+ NAME db_dup.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_dup.c
+}
+
+Module db_iface.o {
+
+ NAME db_iface.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_iface.c
+}
+
+Module db_join.o {
+
+ NAME db_join.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_join.c
+}
+
+Module db_meta.o {
+
+ NAME db_meta.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_meta.c
+}
+
+Module db_method.o {
+
+ NAME db_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_method.c
+}
+
+Module db_open.o {
+
+ NAME db_open.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_open.c
+}
+
+Module db_overflow.o {
+
+ NAME db_overflow.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_overflow.c
+}
+
+Module db_pr.o {
+
+ NAME db_pr.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_pr.c
+}
+
+Module db_rec.o {
+
+ NAME db_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_rec.c
+}
+
+Module db_reclaim.o {
+
+ NAME db_reclaim.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_reclaim.c
+}
+
+Module db_remove.o {
+
+ NAME db_remove.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_remove.c
+}
+
+Module db_rename.o {
+
+ NAME db_rename.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_rename.c
+}
+
+Module db_ret.o {
+
+ NAME db_ret.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_ret.c
+}
+
+Module db_truncate.o {
+
+ NAME db_truncate.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_truncate.c
+}
+
+Module db_upg.o {
+
+ NAME db_upg.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_upg.c
+}
+
+Module db_upg_opd.o {
+
+ NAME db_upg_opd.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_upg_opd.c
+}
+
+Module db_vrfy.o {
+
+ NAME db_vrfy.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_vrfy.c
+}
+
+Module db_vrfyutil.o {
+
+ NAME db_vrfyutil.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_vrfyutil.c
+}
+
+Module dbreg.o {
+
+ NAME dbreg.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../dbreg/dbreg.c
+}
+
+Module dbreg_auto.o {
+
+ NAME dbreg_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../dbreg/dbreg_auto.c
+}
+
+Module dbreg_rec.o {
+
+ NAME dbreg_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../dbreg/dbreg_rec.c
+}
+
+Module dbreg_util.o {
+
+ NAME dbreg_util.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../dbreg/dbreg_util.c
+}
+
+Module db_salloc.o {
+
+ NAME db_salloc.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../env/db_salloc.c
+}
+
+Module db_shash.o {
+
+ NAME db_shash.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../env/db_shash.c
+}
+
+Module env_file.o {
+
+ NAME env_file.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../env/env_file.c
+}
+
+Module env_method.o {
+
+ NAME env_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../env/env_method.c
+}
+
+Module env_open.o {
+
+ NAME env_open.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../env/env_open.c
+}
+
+Module env_recover.o {
+
+ NAME env_recover.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../env/env_recover.c
+}
+
+Module env_region.o {
+
+ NAME env_region.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../env/env_region.c
+}
+
+Module fileops_auto.o {
+
+ NAME fileops_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../fileops/fileops_auto.c
+}
+
+Module fop_basic.o {
+
+ NAME fop_basic.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../fileops/fop_basic.c
+}
+
+Module fop_rec.o {
+
+ NAME fop_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../fileops/fop_rec.c
+}
+
+Module fop_util.o {
+
+ NAME fop_util.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../fileops/fop_util.c
+}
+
+Module hash.o {
+
+ NAME hash.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash.c
+}
+
+Module hash_auto.o {
+
+ NAME hash_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_auto.c
+}
+
+Module hash_conv.o {
+
+ NAME hash_conv.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_conv.c
+}
+
+Module hash_dup.o {
+
+ NAME hash_dup.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_dup.c
+}
+
+Module hash_func.o {
+
+ NAME hash_func.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_func.c
+}
+
+Module hash_meta.o {
+
+ NAME hash_meta.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_meta.c
+}
+
+Module hash_method.o {
+
+ NAME hash_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_method.c
+}
+
+Module hash_open.o {
+
+ NAME hash_open.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_open.c
+}
+
+Module hash_page.o {
+
+ NAME hash_page.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_page.c
+}
+
+Module hash_rec.o {
+
+ NAME hash_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_rec.c
+}
+
+Module hash_reclaim.o {
+
+ NAME hash_reclaim.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_reclaim.c
+}
+
+Module hash_stat.o {
+
+ NAME hash_stat.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_stat.c
+}
+
+Module hash_upgrade.o {
+
+ NAME hash_upgrade.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_upgrade.c
+}
+
+Module hash_verify.o {
+
+ NAME hash_verify.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_verify.c
+}
+
+Module hmac.o {
+
+ NAME hmac.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hmac/hmac.c
+}
+
+Module sha1.o {
+
+ NAME sha1.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hmac/sha1.c
+}
+
+Module hsearch.o {
+
+ NAME hsearch.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hsearch/hsearch.c
+}
+
+Module lock.o {
+
+ NAME lock.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../lock/lock.c
+}
+
+Module lock_deadlock.o {
+
+ NAME lock_deadlock.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../lock/lock_deadlock.c
+}
+
+Module lock_method.o {
+
+ NAME lock_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../lock/lock_method.c
+}
+
+Module lock_region.o {
+
+ NAME lock_region.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../lock/lock_region.c
+}
+
+Module lock_stat.o {
+
+ NAME lock_stat.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../lock/lock_stat.c
+}
+
+Module lock_util.o {
+
+ NAME lock_util.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../lock/lock_util.c
+}
+
+Module log.o {
+
+ NAME log.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log.c
+}
+
+Module log_archive.o {
+
+ NAME log_archive.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log_archive.c
+}
+
+Module log_compare.o {
+
+ NAME log_compare.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log_compare.c
+}
+
+Module log_get.o {
+
+ NAME log_get.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log_get.c
+}
+
+Module log_method.o {
+
+ NAME log_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log_method.c
+}
+
+Module log_put.o {
+
+ NAME log_put.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log_put.c
+}
+
+Module mp_alloc.o {
+
+ NAME mp_alloc.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_alloc.c
+}
+
+Module mp_bh.o {
+
+ NAME mp_bh.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_bh.c
+}
+
+Module mp_fget.o {
+
+ NAME mp_fget.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_fget.c
+}
+
+Module mp_fopen.o {
+
+ NAME mp_fopen.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_fopen.c
+}
+
+Module mp_fput.o {
+
+ NAME mp_fput.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_fput.c
+}
+
+Module mp_fset.o {
+
+ NAME mp_fset.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_fset.c
+}
+
+Module mp_method.o {
+
+ NAME mp_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_method.c
+}
+
+Module mp_region.o {
+
+ NAME mp_region.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_region.c
+}
+
+Module mp_register.o {
+
+ NAME mp_register.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_register.c
+}
+
+Module mp_stat.o {
+
+ NAME mp_stat.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_stat.c
+}
+
+Module mp_sync.o {
+
+ NAME mp_sync.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_sync.c
+}
+
+Module mp_trickle.o {
+
+ NAME mp_trickle.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_trickle.c
+}
+
+Module mut_tas.o {
+
+ NAME mut_tas.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mutex/mut_tas.c
+}
+
+Module mutex.o {
+
+ NAME mutex.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mutex/mutex.c
+}
+
+Module os_alloc.o {
+
+ NAME os_alloc.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_alloc.c
+}
+
+Module os_clock.o {
+
+ NAME os_clock.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_clock.c
+}
+
+Module os_dir.o {
+
+ NAME os_dir.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_dir.c
+}
+
+Module os_errno.o {
+
+ NAME os_errno.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_errno.c
+}
+
+Module os_fid.o {
+
+ NAME os_fid.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_fid.c
+}
+
+Module os_fsync.o {
+
+ NAME os_fsync.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_fsync.c
+}
+
+Module os_handle.o {
+
+ NAME os_handle.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_handle.c
+}
+
+Module os_id.o {
+
+ NAME os_id.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_id.c
+}
+
+Module os_method.o {
+
+ NAME os_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_method.c
+}
+
+Module os_oflags.o {
+
+ NAME os_oflags.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_oflags.c
+}
+
+Module os_open.o {
+
+ NAME os_open.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_open.c
+}
+
+Module os_region.o {
+
+ NAME os_region.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_region.c
+}
+
+Module os_rename.o {
+
+ NAME os_rename.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_rename.c
+}
+
+Module os_root.o {
+
+ NAME os_root.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_root.c
+}
+
+Module os_rpath.o {
+
+ NAME os_rpath.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_rpath.c
+}
+
+Module os_rw.o {
+
+ NAME os_rw.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_rw.c
+}
+
+Module os_seek.o {
+
+ NAME os_seek.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_seek.c
+}
+
+Module os_sleep.o {
+
+ NAME os_sleep.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_sleep.c
+}
+
+Module os_spin.o {
+
+ NAME os_spin.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_spin.c
+}
+
+Module os_stat.o {
+
+ NAME os_stat.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_stat.c
+}
+
+Module os_tmpdir.o {
+
+ NAME os_tmpdir.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_tmpdir.c
+}
+
+Module os_unlink.o {
+
+ NAME os_unlink.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_unlink.c
+}
+
+Module os_vx_abs.o {
+
+ NAME os_vx_abs.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os_vxworks/os_vx_abs.c
+}
+
+Module os_vx_config.o {
+
+ NAME os_vx_config.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os_vxworks/os_vx_config.c
+}
+
+Module os_vx_map.o {
+
+ NAME os_vx_map.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os_vxworks/os_vx_map.c
+}
+
+Module qam.o {
+
+ NAME qam.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam.c
+}
+
+Module qam_auto.o {
+
+ NAME qam_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_auto.c
+}
+
+Module qam_conv.o {
+
+ NAME qam_conv.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_conv.c
+}
+
+Module qam_files.o {
+
+ NAME qam_files.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_files.c
+}
+
+Module qam_method.o {
+
+ NAME qam_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_method.c
+}
+
+Module qam_open.o {
+
+ NAME qam_open.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_open.c
+}
+
+Module qam_rec.o {
+
+ NAME qam_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_rec.c
+}
+
+Module qam_stat.o {
+
+ NAME qam_stat.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_stat.c
+}
+
+Module qam_upgrade.o {
+
+ NAME qam_upgrade.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_upgrade.c
+}
+
+Module qam_verify.o {
+
+ NAME qam_verify.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_verify.c
+}
+
+Module rep_method.o {
+
+ NAME rep_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rep/rep_method.c
+}
+
+Module rep_record.o {
+
+ NAME rep_record.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rep/rep_record.c
+}
+
+Module rep_region.o {
+
+ NAME rep_region.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rep/rep_region.c
+}
+
+Module rep_util.o {
+
+ NAME rep_util.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rep/rep_util.c
+}
+
+Module client.o {
+
+ NAME client.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rpc_client/client.c
+}
+
+Module db_server_clnt.o {
+
+ NAME db_server_clnt.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rpc_client/db_server_clnt.c
+}
+
+Module gen_client.o {
+
+ NAME gen_client.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rpc_client/gen_client.c
+}
+
+Module gen_client_ret.o {
+
+ NAME gen_client_ret.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rpc_client/gen_client_ret.c
+}
+
+Module db_server_xdr.o {
+
+ NAME db_server_xdr.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c
+}
+
+Module txn.o {
+
+ NAME txn.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn.c
+}
+
+Module txn_auto.o {
+
+ NAME txn_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn_auto.c
+}
+
+Module txn_method.o {
+
+ NAME txn_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn_method.c
+}
+
+Module txn_rec.o {
+
+ NAME txn_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn_rec.c
+}
+
+Module txn_recover.o {
+
+ NAME txn_recover.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn_recover.c
+}
+
+Module txn_region.o {
+
+ NAME txn_region.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn_region.c
+}
+
+Module txn_stat.o {
+
+ NAME txn_stat.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn_stat.c
+}
+
+Module txn_util.o {
+
+ NAME txn_util.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn_util.c
+}
+
+Module xa.o {
+
+ NAME xa.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../xa/xa.c
+}
+
+Module xa_db.o {
+
+ NAME xa_db.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../xa/xa_db.c
+}
+
+Module xa_map.o {
+
+ NAME xa_map.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../xa/xa_map.c
+}
+
+/* Parameter information */
+
diff --git a/libdb/build_vxworks/BerkeleyDB/component.wpj b/libdb/build_vxworks/BerkeleyDB/component.wpj
new file mode 100755
index 0000000..3207bb2
--- /dev/null
+++ b/libdb/build_vxworks/BerkeleyDB/component.wpj
@@ -0,0 +1,6764 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.0
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_objects
+bt_compare.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_objects
+bt_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_objects
+bt_curadj.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_objects
+bt_cursor.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_objects
+bt_delete.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_method.c_objects
+bt_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_open.c_objects
+bt_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_put.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_put.c_objects
+bt_put.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_put.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_objects
+bt_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_objects
+bt_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_objects
+bt_recno.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_objects
+bt_rsearch.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_search.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_search.c_objects
+bt_search.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_search.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_split.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_split.c_objects
+bt_split.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_split.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_objects
+bt_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_objects
+bt_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_objects
+bt_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_objects
+btree_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/getopt.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/getopt.c_objects
+getopt.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/getopt.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/snprintf.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/snprintf.c_objects
+snprintf.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/snprintf.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_objects
+strcasecmp.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/strdup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/strdup.c_objects
+strdup.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/strdup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_objects
+vsnprintf.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_objects
+db_byteorder.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_err.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_err.c_objects
+db_err.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_err.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_getlong.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_getlong.c_objects
+db_getlong.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_getlong.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_idspace.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_idspace.c_objects
+db_idspace.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_idspace.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_log2.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_log2.c_objects
+db_log2.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_log2.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_arg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_arg.c_objects
+util_arg.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_arg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_cache.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_cache.c_objects
+util_cache.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_cache.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_log.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_log.c_objects
+util_log.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_log.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_sig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_sig.c_objects
+util_sig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_sig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_objects
+crdel_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_objects
+crdel_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db.c_objects
+db.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_am.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_am.c_objects
+db_am.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_am.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_auto.c_objects
+db_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_cam.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_cam.c_objects
+db_cam.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_cam.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_conv.c_objects
+db_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_objects
+db_dispatch.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_dup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_dup.c_objects
+db_dup.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_dup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_iface.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_iface.c_objects
+db_iface.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_iface.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_join.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_join.c_objects
+db_join.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_join.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_meta.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_meta.c_objects
+db_meta.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_meta.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_method.c_objects
+db_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_open.c_objects
+db_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_overflow.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_overflow.c_objects
+db_overflow.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_overflow.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_pr.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_pr.c_objects
+db_pr.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_pr.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_rec.c_objects
+db_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_objects
+db_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_remove.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_remove.c_objects
+db_remove.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_remove.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_rename.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_rename.c_objects
+db_rename.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_rename.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_ret.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_ret.c_objects
+db_ret.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_ret.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_truncate.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_truncate.c_objects
+db_truncate.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_truncate.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg.c_objects
+db_upg.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_objects
+db_upg_opd.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_objects
+db_vrfy.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_objects
+db_vrfyutil.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg.c_objects
+dbreg.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_auto.c_objects
+dbreg_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_rec.c_objects
+dbreg_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_util.c_objects
+dbreg_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/db_salloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/db_salloc.c_objects
+db_salloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/db_salloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/db_shash.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/db_shash.c_objects
+db_shash.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/db_shash.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_file.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_file.c_objects
+env_file.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_file.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_method.c_objects
+env_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_open.c_objects
+env_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_recover.c_objects
+env_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_region.c_objects
+env_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fileops_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fileops_auto.c_objects
+fileops_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fileops_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_basic.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_basic.c_objects
+fop_basic.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_basic.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_rec.c_objects
+fop_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_util.c_objects
+fop_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash.c_objects
+hash.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_objects
+hash_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_objects
+hash_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_objects
+hash_dup.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_func.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_func.c_objects
+hash_func.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_func.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_objects
+hash_meta.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_method.c_objects
+hash_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_open.c_objects
+hash_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_page.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_page.c_objects
+hash_page.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_page.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_objects
+hash_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_objects
+hash_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_objects
+hash_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_objects
+hash_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_objects
+hash_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hmac/hmac.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hmac/hmac.c_objects
+hmac.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hmac/hmac.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hmac/sha1.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hmac/sha1.c_objects
+sha1.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hmac/sha1.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_objects
+hsearch.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock.c_objects
+lock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_objects
+lock_deadlock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_method.c_objects
+lock_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_region.c_objects
+lock_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_objects
+lock_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_util.c_objects
+lock_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log.c_objects
+log.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_archive.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_archive.c_objects
+log_archive.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_archive.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_compare.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_compare.c_objects
+log_compare.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_compare.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_get.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_get.c_objects
+log_get.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_get.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_method.c_objects
+log_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_put.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_put.c_objects
+log_put.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_put.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_objects
+mp_alloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_objects
+mp_bh.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_objects
+mp_fget.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_objects
+mp_fopen.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_objects
+mp_fput.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_objects
+mp_fset.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_method.c_objects
+mp_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_region.c_objects
+mp_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_register.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_register.c_objects
+mp_register.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_register.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_objects
+mp_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_objects
+mp_sync.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_objects
+mp_trickle.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_objects
+mut_tas.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mutex/mutex.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mutex/mutex.c_objects
+mutex.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mutex/mutex.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_alloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_alloc.c_objects
+os_alloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_alloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_clock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_clock.c_objects
+os_clock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_clock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_dir.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_dir.c_objects
+os_dir.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_dir.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_errno.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_errno.c_objects
+os_errno.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_errno.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_fid.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_fid.c_objects
+os_fid.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_fid.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_fsync.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_fsync.c_objects
+os_fsync.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_fsync.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_handle.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_handle.c_objects
+os_handle.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_handle.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_id.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_id.c_objects
+os_id.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_id.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_method.c_objects
+os_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_oflags.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_oflags.c_objects
+os_oflags.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_oflags.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_open.c_objects
+os_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_region.c_objects
+os_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rename.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rename.c_objects
+os_rename.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rename.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_root.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_root.c_objects
+os_root.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_root.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rpath.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rpath.c_objects
+os_rpath.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rpath.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rw.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rw.c_objects
+os_rw.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rw.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_seek.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_seek.c_objects
+os_seek.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_seek.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_sleep.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_sleep.c_objects
+os_sleep.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_sleep.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_spin.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_spin.c_objects
+os_spin.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_spin.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_stat.c_objects
+os_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_objects
+os_tmpdir.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_unlink.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_unlink.c_objects
+os_unlink.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_unlink.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_objects
+os_vx_abs.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_config.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_config.c_objects
+os_vx_config.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_config.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_objects
+os_vx_map.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam.c_objects
+qam.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_objects
+qam_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_objects
+qam_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_files.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_files.c_objects
+qam_files.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_files.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_method.c_objects
+qam_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_open.c_objects
+qam_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_objects
+qam_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_objects
+qam_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_objects
+qam_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_objects
+qam_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_method.c_objects
+rep_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_record.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_record.c_objects
+rep_record.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_record.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_region.c_objects
+rep_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_util.c_objects
+rep_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/client.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/client.c_objects
+client.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/client.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_objects
+db_server_clnt.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_objects
+gen_client.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_objects
+gen_client_ret.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c_objects
+db_server_xdr.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn.c_objects
+txn.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_objects
+txn_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_method.c_objects
+txn_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_objects
+txn_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_objects
+txn_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_region.c_objects
+txn_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_objects
+txn_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_util.c_objects
+txn_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa.c_objects
+xa.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_db.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_db.c_objects
+xa_db.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_db.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_map.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_map.c_objects
+xa_map.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_map.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2 \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -DDEBUG \
+ -DDIAGNOSTIC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_objects
+bt_compare.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_objects
+bt_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_objects
+bt_curadj.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_objects
+bt_cursor.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_objects
+bt_delete.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_method.c_objects
+bt_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_open.c_objects
+bt_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_put.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_put.c_objects
+bt_put.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_put.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_objects
+bt_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_objects
+bt_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_objects
+bt_recno.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_objects
+bt_rsearch.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_search.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_search.c_objects
+bt_search.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_search.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_split.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_split.c_objects
+bt_split.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_split.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_objects
+bt_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_objects
+bt_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_objects
+bt_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_objects
+btree_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/getopt.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/getopt.c_objects
+getopt.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/getopt.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/snprintf.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/snprintf.c_objects
+snprintf.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/snprintf.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_objects
+strcasecmp.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/strdup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/strdup.c_objects
+strdup.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/strdup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_objects
+vsnprintf.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_objects
+db_byteorder.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_err.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_err.c_objects
+db_err.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_err.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_getlong.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_getlong.c_objects
+db_getlong.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_getlong.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_idspace.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_idspace.c_objects
+db_idspace.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_idspace.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_log2.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_log2.c_objects
+db_log2.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_log2.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_arg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_arg.c_objects
+util_arg.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_arg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_cache.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_cache.c_objects
+util_cache.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_cache.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_log.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_log.c_objects
+util_log.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_log.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_sig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_sig.c_objects
+util_sig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_sig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_objects
+crdel_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_objects
+crdel_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db.c_objects
+db.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_am.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_am.c_objects
+db_am.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_am.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_auto.c_objects
+db_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_cam.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_cam.c_objects
+db_cam.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_cam.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_conv.c_objects
+db_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_objects
+db_dispatch.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_dup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_dup.c_objects
+db_dup.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_dup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_iface.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_iface.c_objects
+db_iface.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_iface.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_join.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_join.c_objects
+db_join.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_join.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_meta.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_meta.c_objects
+db_meta.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_meta.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_method.c_objects
+db_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_open.c_objects
+db_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_overflow.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_overflow.c_objects
+db_overflow.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_overflow.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_pr.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_pr.c_objects
+db_pr.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_pr.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_rec.c_objects
+db_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_objects
+db_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_remove.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_remove.c_objects
+db_remove.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_remove.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_rename.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_rename.c_objects
+db_rename.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_rename.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_ret.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_ret.c_objects
+db_ret.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_ret.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_truncate.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_truncate.c_objects
+db_truncate.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_truncate.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_upg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_upg.c_objects
+db_upg.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_upg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_objects
+db_upg_opd.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_objects
+db_vrfy.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_objects
+db_vrfyutil.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg.c_objects
+dbreg.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg_auto.c_objects
+dbreg_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg_rec.c_objects
+dbreg_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg_util.c_objects
+dbreg_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/db_salloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/db_salloc.c_objects
+db_salloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/db_salloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/db_shash.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/db_shash.c_objects
+db_shash.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/db_shash.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_file.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_file.c_objects
+env_file.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_file.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_method.c_objects
+env_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_open.c_objects
+env_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_recover.c_objects
+env_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_region.c_objects
+env_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fileops_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fileops_auto.c_objects
+fileops_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fileops_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fop_basic.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fop_basic.c_objects
+fop_basic.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fop_basic.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fop_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fop_rec.c_objects
+fop_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fop_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fop_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fop_util.c_objects
+fop_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fop_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash.c_objects
+hash.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_objects
+hash_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_objects
+hash_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_objects
+hash_dup.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_func.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_func.c_objects
+hash_func.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_func.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_objects
+hash_meta.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_method.c_objects
+hash_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_open.c_objects
+hash_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_page.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_page.c_objects
+hash_page.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_page.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_objects
+hash_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_objects
+hash_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_objects
+hash_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_objects
+hash_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_objects
+hash_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hmac/hmac.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hmac/hmac.c_objects
+hmac.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hmac/hmac.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hmac/sha1.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hmac/sha1.c_objects
+sha1.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hmac/sha1.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_objects
+hsearch.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock.c_objects
+lock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_objects
+lock_deadlock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_method.c_objects
+lock_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_region.c_objects
+lock_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_objects
+lock_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_util.c_objects
+lock_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log.c_objects
+log.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_archive.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_archive.c_objects
+log_archive.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_archive.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_compare.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_compare.c_objects
+log_compare.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_compare.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_get.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_get.c_objects
+log_get.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_get.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_method.c_objects
+log_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_put.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_put.c_objects
+log_put.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_put.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_objects
+mp_alloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_objects
+mp_bh.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_objects
+mp_fget.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_objects
+mp_fopen.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_objects
+mp_fput.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_objects
+mp_fset.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_method.c_objects
+mp_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_region.c_objects
+mp_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_register.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_register.c_objects
+mp_register.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_register.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_objects
+mp_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_objects
+mp_sync.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_objects
+mp_trickle.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_objects
+mut_tas.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mutex/mutex.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mutex/mutex.c_objects
+mutex.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mutex/mutex.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_alloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_alloc.c_objects
+os_alloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_alloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_clock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_clock.c_objects
+os_clock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_clock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_dir.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_dir.c_objects
+os_dir.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_dir.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_errno.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_errno.c_objects
+os_errno.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_errno.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_fid.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_fid.c_objects
+os_fid.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_fid.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_fsync.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_fsync.c_objects
+os_fsync.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_fsync.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_handle.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_handle.c_objects
+os_handle.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_handle.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_id.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_id.c_objects
+os_id.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_id.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_method.c_objects
+os_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_oflags.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_oflags.c_objects
+os_oflags.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_oflags.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_open.c_objects
+os_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_region.c_objects
+os_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rename.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rename.c_objects
+os_rename.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rename.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_root.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_root.c_objects
+os_root.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_root.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rpath.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rpath.c_objects
+os_rpath.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rpath.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rw.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rw.c_objects
+os_rw.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rw.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_seek.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_seek.c_objects
+os_seek.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_seek.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_sleep.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_sleep.c_objects
+os_sleep.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_sleep.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_spin.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_spin.c_objects
+os_spin.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_spin.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_stat.c_objects
+os_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_objects
+os_tmpdir.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_unlink.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_unlink.c_objects
+os_unlink.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_unlink.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_objects
+os_vx_abs.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_config.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_config.c_objects
+os_vx_config.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_config.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_objects
+os_vx_map.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam.c_objects
+qam.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_objects
+qam_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_objects
+qam_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_files.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_files.c_objects
+qam_files.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_files.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_method.c_objects
+qam_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_open.c_objects
+qam_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_objects
+qam_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_objects
+qam_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_objects
+qam_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_objects
+qam_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_method.c_objects
+rep_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_record.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_record.c_objects
+rep_record.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_record.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_region.c_objects
+rep_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_util.c_objects
+rep_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/client.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/client.c_objects
+client.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/client.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_objects
+db_server_clnt.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_objects
+gen_client.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_objects
+gen_client_ret.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c_objects
+db_server_xdr.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn.c_objects
+txn.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_objects
+txn_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_method.c_objects
+txn_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_objects
+txn_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_objects
+txn_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_region.c_objects
+txn_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_objects
+txn_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_util.c_objects
+txn_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa.c_objects
+xa.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa_db.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa_db.c_objects
+xa_db.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa_db.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa_map.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa_map.c_objects
+xa_map.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa_map.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2 \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../..
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_objects
+bt_compare.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_objects
+bt_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_objects
+bt_curadj.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_objects
+bt_cursor.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_objects
+bt_delete.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_method.c_objects
+bt_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_open.c_objects
+bt_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_put.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_put.c_objects
+bt_put.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_put.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_objects
+bt_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_objects
+bt_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_objects
+bt_recno.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_objects
+bt_rsearch.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_search.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_search.c_objects
+bt_search.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_search.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_split.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_split.c_objects
+bt_split.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_split.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_objects
+bt_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_objects
+bt_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_objects
+bt_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_objects
+btree_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/getopt.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/getopt.c_objects
+getopt.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/getopt.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/snprintf.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/snprintf.c_objects
+snprintf.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/snprintf.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_objects
+strcasecmp.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/strdup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/strdup.c_objects
+strdup.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/strdup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_objects
+vsnprintf.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_objects
+db_byteorder.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_err.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_err.c_objects
+db_err.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_err.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_getlong.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_getlong.c_objects
+db_getlong.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_getlong.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_idspace.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_idspace.c_objects
+db_idspace.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_idspace.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_log2.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_log2.c_objects
+db_log2.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_log2.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_arg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_arg.c_objects
+util_arg.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_arg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_cache.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_cache.c_objects
+util_cache.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_cache.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_log.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_log.c_objects
+util_log.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_log.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_sig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_sig.c_objects
+util_sig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_sig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_objects
+crdel_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_objects
+crdel_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db.c_objects
+db.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_am.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_am.c_objects
+db_am.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_am.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_auto.c_objects
+db_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_cam.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_cam.c_objects
+db_cam.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_cam.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_conv.c_objects
+db_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_objects
+db_dispatch.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_dup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_dup.c_objects
+db_dup.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_dup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_iface.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_iface.c_objects
+db_iface.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_iface.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_join.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_join.c_objects
+db_join.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_join.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_meta.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_meta.c_objects
+db_meta.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_meta.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_method.c_objects
+db_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_open.c_objects
+db_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_overflow.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_overflow.c_objects
+db_overflow.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_overflow.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_pr.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_pr.c_objects
+db_pr.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_pr.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_rec.c_objects
+db_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_objects
+db_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_remove.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_remove.c_objects
+db_remove.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_remove.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_rename.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_rename.c_objects
+db_rename.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_rename.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_ret.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_ret.c_objects
+db_ret.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_ret.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_truncate.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_truncate.c_objects
+db_truncate.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_truncate.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg.c_objects
+db_upg.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_objects
+db_upg_opd.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_objects
+db_vrfy.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_objects
+db_vrfyutil.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg.c_objects
+dbreg.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_auto.c_objects
+dbreg_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_rec.c_objects
+dbreg_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_util.c_objects
+dbreg_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/db_salloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/db_salloc.c_objects
+db_salloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/db_salloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/db_shash.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/db_shash.c_objects
+db_shash.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/db_shash.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_file.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_file.c_objects
+env_file.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_file.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_method.c_objects
+env_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_open.c_objects
+env_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_recover.c_objects
+env_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_region.c_objects
+env_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fileops_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fileops_auto.c_objects
+fileops_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fileops_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_basic.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_basic.c_objects
+fop_basic.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_basic.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_rec.c_objects
+fop_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_util.c_objects
+fop_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash.c_objects
+hash.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_objects
+hash_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_objects
+hash_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_objects
+hash_dup.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_func.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_func.c_objects
+hash_func.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_func.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_objects
+hash_meta.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_method.c_objects
+hash_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_open.c_objects
+hash_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_page.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_page.c_objects
+hash_page.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_page.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_objects
+hash_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_objects
+hash_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_objects
+hash_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_objects
+hash_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_objects
+hash_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hmac/hmac.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hmac/hmac.c_objects
+hmac.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hmac/hmac.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hmac/sha1.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hmac/sha1.c_objects
+sha1.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hmac/sha1.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_objects
+hsearch.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock.c_objects
+lock.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_objects
+lock_deadlock.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_method.c_objects
+lock_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_region.c_objects
+lock_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_objects
+lock_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_util.c_objects
+lock_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log.c_objects
+log.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_archive.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_archive.c_objects
+log_archive.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_archive.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_compare.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_compare.c_objects
+log_compare.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_compare.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_get.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_get.c_objects
+log_get.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_get.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_method.c_objects
+log_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_put.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_put.c_objects
+log_put.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_put.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_objects
+mp_alloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_objects
+mp_bh.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_objects
+mp_fget.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_objects
+mp_fopen.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_objects
+mp_fput.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_objects
+mp_fset.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_method.c_objects
+mp_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_region.c_objects
+mp_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_register.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_register.c_objects
+mp_register.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_register.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_objects
+mp_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_objects
+mp_sync.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_objects
+mp_trickle.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_objects
+mut_tas.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mutex/mutex.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mutex/mutex.c_objects
+mutex.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mutex/mutex.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_alloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_alloc.c_objects
+os_alloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_alloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_clock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_clock.c_objects
+os_clock.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_clock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_dir.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_dir.c_objects
+os_dir.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_dir.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_errno.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_errno.c_objects
+os_errno.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_errno.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_fid.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_fid.c_objects
+os_fid.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_fid.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_fsync.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_fsync.c_objects
+os_fsync.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_fsync.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_handle.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_handle.c_objects
+os_handle.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_handle.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_id.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_id.c_objects
+os_id.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_id.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_method.c_objects
+os_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_oflags.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_oflags.c_objects
+os_oflags.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_oflags.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_open.c_objects
+os_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_region.c_objects
+os_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rename.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rename.c_objects
+os_rename.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rename.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_root.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_root.c_objects
+os_root.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_root.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rpath.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rpath.c_objects
+os_rpath.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rpath.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rw.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rw.c_objects
+os_rw.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rw.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_seek.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_seek.c_objects
+os_seek.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_seek.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_sleep.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_sleep.c_objects
+os_sleep.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_sleep.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_spin.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_spin.c_objects
+os_spin.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_spin.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_stat.c_objects
+os_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_objects
+os_tmpdir.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_unlink.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_unlink.c_objects
+os_unlink.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_unlink.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_objects
+os_vx_abs.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_config.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_config.c_objects
+os_vx_config.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_config.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_objects
+os_vx_map.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam.c_objects
+qam.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_objects
+qam_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_objects
+qam_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_files.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_files.c_objects
+qam_files.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_files.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_method.c_objects
+qam_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_open.c_objects
+qam_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_objects
+qam_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_objects
+qam_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_objects
+qam_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_objects
+qam_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_method.c_objects
+rep_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_record.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_record.c_objects
+rep_record.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_record.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_region.c_objects
+rep_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_util.c_objects
+rep_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/client.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/client.c_objects
+client.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/client.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_objects
+db_server_clnt.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_objects
+gen_client.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_objects
+gen_client_ret.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c_objects
+db_server_xdr.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn.c_objects
+txn.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_objects
+txn_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_method.c_objects
+txn_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_objects
+txn_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_objects
+txn_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_region.c_objects
+txn_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_objects
+txn_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_util.c_objects
+txn_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa.c_objects
+xa.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_db.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_db.c_objects
+xa_db.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_db.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_map.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_map.c_objects
+xa_map.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_map.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -DDEBUG \
+ -DDIAGNOSTIC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu.debug PENTIUM2gnu.debug PENTIUM2gnu.release
+<END>
+
+<BEGIN> COMPONENT_COM_TYPE
+
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../../btree/bt_compare.c \
+ $(PRJ_DIR)/../../btree/bt_conv.c \
+ $(PRJ_DIR)/../../btree/bt_curadj.c \
+ $(PRJ_DIR)/../../btree/bt_cursor.c \
+ $(PRJ_DIR)/../../btree/bt_delete.c \
+ $(PRJ_DIR)/../../btree/bt_method.c \
+ $(PRJ_DIR)/../../btree/bt_open.c \
+ $(PRJ_DIR)/../../btree/bt_put.c \
+ $(PRJ_DIR)/../../btree/bt_rec.c \
+ $(PRJ_DIR)/../../btree/bt_reclaim.c \
+ $(PRJ_DIR)/../../btree/bt_recno.c \
+ $(PRJ_DIR)/../../btree/bt_rsearch.c \
+ $(PRJ_DIR)/../../btree/bt_search.c \
+ $(PRJ_DIR)/../../btree/bt_split.c \
+ $(PRJ_DIR)/../../btree/bt_stat.c \
+ $(PRJ_DIR)/../../btree/bt_upgrade.c \
+ $(PRJ_DIR)/../../btree/bt_verify.c \
+ $(PRJ_DIR)/../../btree/btree_auto.c \
+ $(PRJ_DIR)/../../clib/getopt.c \
+ $(PRJ_DIR)/../../clib/snprintf.c \
+ $(PRJ_DIR)/../../clib/strcasecmp.c \
+ $(PRJ_DIR)/../../clib/strdup.c \
+ $(PRJ_DIR)/../../clib/vsnprintf.c \
+ $(PRJ_DIR)/../../common/db_byteorder.c \
+ $(PRJ_DIR)/../../common/db_err.c \
+ $(PRJ_DIR)/../../common/db_getlong.c \
+ $(PRJ_DIR)/../../common/db_idspace.c \
+ $(PRJ_DIR)/../../common/db_log2.c \
+ $(PRJ_DIR)/../../common/util_arg.c \
+ $(PRJ_DIR)/../../common/util_cache.c \
+ $(PRJ_DIR)/../../common/util_log.c \
+ $(PRJ_DIR)/../../common/util_sig.c \
+ $(PRJ_DIR)/../../db/crdel_auto.c \
+ $(PRJ_DIR)/../../db/crdel_rec.c \
+ $(PRJ_DIR)/../../db/db.c \
+ $(PRJ_DIR)/../../db/db_am.c \
+ $(PRJ_DIR)/../../db/db_auto.c \
+ $(PRJ_DIR)/../../db/db_cam.c \
+ $(PRJ_DIR)/../../db/db_conv.c \
+ $(PRJ_DIR)/../../db/db_dispatch.c \
+ $(PRJ_DIR)/../../db/db_dup.c \
+ $(PRJ_DIR)/../../db/db_iface.c \
+ $(PRJ_DIR)/../../db/db_join.c \
+ $(PRJ_DIR)/../../db/db_meta.c \
+ $(PRJ_DIR)/../../db/db_method.c \
+ $(PRJ_DIR)/../../db/db_open.c \
+ $(PRJ_DIR)/../../db/db_overflow.c \
+ $(PRJ_DIR)/../../db/db_pr.c \
+ $(PRJ_DIR)/../../db/db_rec.c \
+ $(PRJ_DIR)/../../db/db_reclaim.c \
+ $(PRJ_DIR)/../../db/db_remove.c \
+ $(PRJ_DIR)/../../db/db_rename.c \
+ $(PRJ_DIR)/../../db/db_ret.c \
+ $(PRJ_DIR)/../../db/db_truncate.c \
+ $(PRJ_DIR)/../../db/db_upg.c \
+ $(PRJ_DIR)/../../db/db_upg_opd.c \
+ $(PRJ_DIR)/../../db/db_vrfy.c \
+ $(PRJ_DIR)/../../db/db_vrfyutil.c \
+ $(PRJ_DIR)/../../dbreg/dbreg.c \
+ $(PRJ_DIR)/../../dbreg/dbreg_auto.c \
+ $(PRJ_DIR)/../../dbreg/dbreg_rec.c \
+ $(PRJ_DIR)/../../dbreg/dbreg_util.c \
+ $(PRJ_DIR)/../../env/db_salloc.c \
+ $(PRJ_DIR)/../../env/db_shash.c \
+ $(PRJ_DIR)/../../env/env_file.c \
+ $(PRJ_DIR)/../../env/env_method.c \
+ $(PRJ_DIR)/../../env/env_open.c \
+ $(PRJ_DIR)/../../env/env_recover.c \
+ $(PRJ_DIR)/../../env/env_region.c \
+ $(PRJ_DIR)/../../fileops/fileops_auto.c \
+ $(PRJ_DIR)/../../fileops/fop_basic.c \
+ $(PRJ_DIR)/../../fileops/fop_rec.c \
+ $(PRJ_DIR)/../../fileops/fop_util.c \
+ $(PRJ_DIR)/../../hash/hash.c \
+ $(PRJ_DIR)/../../hash/hash_auto.c \
+ $(PRJ_DIR)/../../hash/hash_conv.c \
+ $(PRJ_DIR)/../../hash/hash_dup.c \
+ $(PRJ_DIR)/../../hash/hash_func.c \
+ $(PRJ_DIR)/../../hash/hash_meta.c \
+ $(PRJ_DIR)/../../hash/hash_method.c \
+ $(PRJ_DIR)/../../hash/hash_open.c \
+ $(PRJ_DIR)/../../hash/hash_page.c \
+ $(PRJ_DIR)/../../hash/hash_rec.c \
+ $(PRJ_DIR)/../../hash/hash_reclaim.c \
+ $(PRJ_DIR)/../../hash/hash_stat.c \
+ $(PRJ_DIR)/../../hash/hash_upgrade.c \
+ $(PRJ_DIR)/../../hash/hash_verify.c \
+ $(PRJ_DIR)/../../hmac/hmac.c \
+ $(PRJ_DIR)/../../hmac/sha1.c \
+ $(PRJ_DIR)/../../hsearch/hsearch.c \
+ $(PRJ_DIR)/../../lock/lock.c \
+ $(PRJ_DIR)/../../lock/lock_deadlock.c \
+ $(PRJ_DIR)/../../lock/lock_method.c \
+ $(PRJ_DIR)/../../lock/lock_region.c \
+ $(PRJ_DIR)/../../lock/lock_stat.c \
+ $(PRJ_DIR)/../../lock/lock_util.c \
+ $(PRJ_DIR)/../../log/log.c \
+ $(PRJ_DIR)/../../log/log_archive.c \
+ $(PRJ_DIR)/../../log/log_compare.c \
+ $(PRJ_DIR)/../../log/log_get.c \
+ $(PRJ_DIR)/../../log/log_method.c \
+ $(PRJ_DIR)/../../log/log_put.c \
+ $(PRJ_DIR)/../../mp/mp_alloc.c \
+ $(PRJ_DIR)/../../mp/mp_bh.c \
+ $(PRJ_DIR)/../../mp/mp_fget.c \
+ $(PRJ_DIR)/../../mp/mp_fopen.c \
+ $(PRJ_DIR)/../../mp/mp_fput.c \
+ $(PRJ_DIR)/../../mp/mp_fset.c \
+ $(PRJ_DIR)/../../mp/mp_method.c \
+ $(PRJ_DIR)/../../mp/mp_region.c \
+ $(PRJ_DIR)/../../mp/mp_register.c \
+ $(PRJ_DIR)/../../mp/mp_stat.c \
+ $(PRJ_DIR)/../../mp/mp_sync.c \
+ $(PRJ_DIR)/../../mp/mp_trickle.c \
+ $(PRJ_DIR)/../../mutex/mut_tas.c \
+ $(PRJ_DIR)/../../mutex/mutex.c \
+ $(PRJ_DIR)/../../os/os_alloc.c \
+ $(PRJ_DIR)/../../os/os_clock.c \
+ $(PRJ_DIR)/../../os/os_dir.c \
+ $(PRJ_DIR)/../../os/os_errno.c \
+ $(PRJ_DIR)/../../os/os_fid.c \
+ $(PRJ_DIR)/../../os/os_fsync.c \
+ $(PRJ_DIR)/../../os/os_handle.c \
+ $(PRJ_DIR)/../../os/os_id.c \
+ $(PRJ_DIR)/../../os/os_method.c \
+ $(PRJ_DIR)/../../os/os_oflags.c \
+ $(PRJ_DIR)/../../os/os_open.c \
+ $(PRJ_DIR)/../../os/os_region.c \
+ $(PRJ_DIR)/../../os/os_rename.c \
+ $(PRJ_DIR)/../../os/os_root.c \
+ $(PRJ_DIR)/../../os/os_rpath.c \
+ $(PRJ_DIR)/../../os/os_rw.c \
+ $(PRJ_DIR)/../../os/os_seek.c \
+ $(PRJ_DIR)/../../os/os_sleep.c \
+ $(PRJ_DIR)/../../os/os_spin.c \
+ $(PRJ_DIR)/../../os/os_stat.c \
+ $(PRJ_DIR)/../../os/os_tmpdir.c \
+ $(PRJ_DIR)/../../os/os_unlink.c \
+ $(PRJ_DIR)/../../os_vxworks/os_vx_abs.c \
+ $(PRJ_DIR)/../../os_vxworks/os_vx_config.c \
+ $(PRJ_DIR)/../../os_vxworks/os_vx_map.c \
+ $(PRJ_DIR)/../../qam/qam.c \
+ $(PRJ_DIR)/../../qam/qam_auto.c \
+ $(PRJ_DIR)/../../qam/qam_conv.c \
+ $(PRJ_DIR)/../../qam/qam_files.c \
+ $(PRJ_DIR)/../../qam/qam_method.c \
+ $(PRJ_DIR)/../../qam/qam_open.c \
+ $(PRJ_DIR)/../../qam/qam_rec.c \
+ $(PRJ_DIR)/../../qam/qam_stat.c \
+ $(PRJ_DIR)/../../qam/qam_upgrade.c \
+ $(PRJ_DIR)/../../qam/qam_verify.c \
+ $(PRJ_DIR)/../../rep/rep_method.c \
+ $(PRJ_DIR)/../../rep/rep_record.c \
+ $(PRJ_DIR)/../../rep/rep_region.c \
+ $(PRJ_DIR)/../../rep/rep_util.c \
+ $(PRJ_DIR)/../../rpc_client/client.c \
+ $(PRJ_DIR)/../../rpc_client/db_server_clnt.c \
+ $(PRJ_DIR)/../../rpc_client/gen_client.c \
+ $(PRJ_DIR)/../../rpc_client/gen_client_ret.c \
+ $(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c \
+ $(PRJ_DIR)/../../txn/txn.c \
+ $(PRJ_DIR)/../../txn/txn_auto.c \
+ $(PRJ_DIR)/../../txn/txn_method.c \
+ $(PRJ_DIR)/../../txn/txn_rec.c \
+ $(PRJ_DIR)/../../txn/txn_recover.c \
+ $(PRJ_DIR)/../../txn/txn_region.c \
+ $(PRJ_DIR)/../../txn/txn_stat.c \
+ $(PRJ_DIR)/../../txn/txn_util.c \
+ $(PRJ_DIR)/../../xa/xa.c \
+ $(PRJ_DIR)/../../xa/xa_db.c \
+ $(PRJ_DIR)/../../xa/xa_map.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUMgnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUMgnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/libdb/build_vxworks/db.h b/libdb/build_vxworks/db.h
new file mode 100644
index 0000000..fabbd1d
--- /dev/null
+++ b/libdb/build_vxworks/db.h
@@ -0,0 +1,1948 @@
+/* DO NOT EDIT: automatically built by dist/s_vxworks. */
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ *
+ * db.h include file layout:
+ * General.
+ * Database Environment.
+ * Locking subsystem.
+ * Logging subsystem.
+ * Shared buffer cache (mpool) subsystem.
+ * Transaction subsystem.
+ * Access methods.
+ * Access method cursors.
+ * Dbm/Ndbm, Hsearch historic interfaces.
+ */
+
+#ifndef _DB_H_
+#define _DB_H_
+
+#ifndef __NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Tornado 2 does not provide a standard C pre-processor #define. */
+#ifndef __vxworks
+#define __vxworks
+#endif
+
+/*
+ * XXX
+ * Handle function prototypes and the keyword "const". This steps on name
+ * space that DB doesn't control, but all of the other solutions are worse.
+ *
+ * XXX
+ * While Microsoft's compiler is ANSI C compliant, it doesn't have _STDC_
+ * defined by default, you specify a command line flag or #pragma to turn
+ * it on. Don't do that, however, because some of Microsoft's own header
+ * files won't compile.
+ */
+#undef __P
+#if defined(__STDC__) || defined(__cplusplus) || defined(_MSC_VER)
+#define __P(protos) protos /* ANSI C prototypes */
+#else
+#define const
+#define __P(protos) () /* K&R C preprocessor */
+#endif
+
+/*
+ * Berkeley DB version information.
+ */
+#define DB_VERSION_MAJOR 4
+#define DB_VERSION_MINOR 1
+#define DB_VERSION_PATCH 25
+#define DB_VERSION_STRING "Sleepycat Software: Berkeley DB 4.1.25: (December 19, 2002)"
+
+/*
+ * !!!
+ * Berkeley DB uses specifically sized types. If they're not provided by
+ * the system, typedef them here.
+ *
+ * We protect them against multiple inclusion using __BIT_TYPES_DEFINED__,
+ * as does BIND and Kerberos, since we don't know for sure what #include
+ * files the user is using.
+ *
+ * !!!
+ * We also provide the standard u_int, u_long etc., if they're not provided
+ * by the system.
+ */
+#ifndef __BIT_TYPES_DEFINED__
+#define __BIT_TYPES_DEFINED__
+typedef unsigned char u_int8_t;
+typedef unsigned short u_int16_t;
+typedef unsigned int u_int32_t;
+#endif
+
+
+/* Basic types that are exported or quasi-exported. */
+typedef u_int32_t db_pgno_t; /* Page number type. */
+typedef u_int16_t db_indx_t; /* Page offset type. */
+#define DB_MAX_PAGES 0xffffffff /* >= # of pages in a file */
+
+typedef u_int32_t db_recno_t; /* Record number type. */
+#define DB_MAX_RECORDS 0xffffffff /* >= # of records in a tree */
+
+typedef u_int32_t db_timeout_t; /* Type of a timeout. */
+
+/*
+ * Region offsets are currently limited to 32-bits. I expect that's going
+ * to have to be fixed in the not-too-distant future, since we won't want to
+ * split 100Gb memory pools into that many different regions.
+ */
+typedef u_int32_t roff_t;
+
+/*
+ * Forward structure declarations, so we can declare pointers and
+ * applications can get type checking.
+ */
+struct __db; typedef struct __db DB;
+struct __db_bt_stat; typedef struct __db_bt_stat DB_BTREE_STAT;
+struct __db_cipher; typedef struct __db_cipher DB_CIPHER;
+struct __db_dbt; typedef struct __db_dbt DBT;
+struct __db_env; typedef struct __db_env DB_ENV;
+struct __db_h_stat; typedef struct __db_h_stat DB_HASH_STAT;
+struct __db_ilock; typedef struct __db_ilock DB_LOCK_ILOCK;
+struct __db_lock_stat; typedef struct __db_lock_stat DB_LOCK_STAT;
+struct __db_lock_u; typedef struct __db_lock_u DB_LOCK;
+struct __db_lockreq; typedef struct __db_lockreq DB_LOCKREQ;
+struct __db_log_cursor; typedef struct __db_log_cursor DB_LOGC;
+struct __db_log_stat; typedef struct __db_log_stat DB_LOG_STAT;
+struct __db_lsn; typedef struct __db_lsn DB_LSN;
+struct __db_mpool; typedef struct __db_mpool DB_MPOOL;
+struct __db_mpool_fstat;typedef struct __db_mpool_fstat DB_MPOOL_FSTAT;
+struct __db_mpool_stat; typedef struct __db_mpool_stat DB_MPOOL_STAT;
+struct __db_mpoolfile; typedef struct __db_mpoolfile DB_MPOOLFILE;
+struct __db_preplist; typedef struct __db_preplist DB_PREPLIST;
+struct __db_qam_stat; typedef struct __db_qam_stat DB_QUEUE_STAT;
+struct __db_rep; typedef struct __db_rep DB_REP;
+struct __db_rep_stat; typedef struct __db_rep_stat DB_REP_STAT;
+struct __db_txn; typedef struct __db_txn DB_TXN;
+struct __db_txn_active; typedef struct __db_txn_active DB_TXN_ACTIVE;
+struct __db_txn_stat; typedef struct __db_txn_stat DB_TXN_STAT;
+struct __db_txnmgr; typedef struct __db_txnmgr DB_TXNMGR;
+struct __dbc; typedef struct __dbc DBC;
+struct __dbc_internal; typedef struct __dbc_internal DBC_INTERNAL;
+struct __fh_t; typedef struct __fh_t DB_FH;
+struct __fname; typedef struct __fname FNAME;
+struct __key_range; typedef struct __key_range DB_KEY_RANGE;
+struct __mpoolfile; typedef struct __mpoolfile MPOOLFILE;
+struct __mutex_t; typedef struct __mutex_t DB_MUTEX;
+
+/* Key/data structure -- a Data-Base Thang. */
+struct __db_dbt {
+ /*
+ * data/size must be fields 1 and 2 for DB 1.85 compatibility.
+ */
+ void *data; /* Key/data */
+ u_int32_t size; /* key/data length */
+
+ u_int32_t ulen; /* RO: length of user buffer. */
+ u_int32_t dlen; /* RO: get/put record length. */
+ u_int32_t doff; /* RO: get/put record offset. */
+
+#define DB_DBT_APPMALLOC 0x001 /* Callback allocated memory. */
+#define DB_DBT_ISSET 0x002 /* Lower level calls set value. */
+#define DB_DBT_MALLOC 0x004 /* Return in malloc'd memory. */
+#define DB_DBT_PARTIAL 0x008 /* Partial put/get. */
+#define DB_DBT_REALLOC 0x010 /* Return in realloc'd memory. */
+#define DB_DBT_USERMEM 0x020 /* Return in user's memory. */
+#define DB_DBT_DUPOK 0x040 /* Insert if duplicate. */
+ u_int32_t flags;
+};
+
+/*
+ * Common flags --
+ * Interfaces which use any of these common flags should never have
+ * interface specific flags in this range.
+ */
+#define DB_CREATE 0x000001 /* Create file as necessary. */
+#define DB_CXX_NO_EXCEPTIONS 0x000002 /* C++: return error values. */
+#define DB_FORCE 0x000004 /* Force (anything). */
+#define DB_NOMMAP 0x000008 /* Don't mmap underlying file. */
+#define DB_RDONLY 0x000010 /* Read-only (O_RDONLY). */
+#define DB_RECOVER 0x000020 /* Run normal recovery. */
+#define DB_THREAD 0x000040 /* Applications are threaded. */
+#define DB_TRUNCATE 0x000080 /* Discard existing DB (O_TRUNC). */
+#define DB_TXN_NOSYNC 0x000100 /* Do not sync log on commit. */
+#define DB_USE_ENVIRON 0x000200 /* Use the environment. */
+#define DB_USE_ENVIRON_ROOT 0x000400 /* Use the environment if root. */
+
+/*
+ * Common flags --
+ * Interfaces which use any of these common flags should never have
+ * interface specific flags in this range.
+ *
+ * DB_AUTO_COMMIT:
+ * DB_ENV->set_flags, DB->associate, DB->del, DB->put, DB->open,
+ * DB->remove, DB->rename, DB->truncate
+ * DB_DIRTY_READ:
+ * DB->cursor, DB->get, DB->join, DB->open, DBcursor->c_get,
+ * DB_ENV->txn_begin
+ *
+ * Shared flags up to 0x000400 */
+#define DB_AUTO_COMMIT 0x00800000 /* Implied transaction. */
+#define DB_DIRTY_READ 0x01000000 /* Dirty Read. */
+
+/*
+ * Flags private to db_env_create.
+ */
+#define DB_CLIENT 0x000001 /* Open for a client environment. */
+
+/*
+ * Flags private to db_create.
+ */
+#define DB_XA_CREATE 0x000001 /* Open in an XA environment. */
+
+/*
+ * Flags private to DB_ENV->open.
+ * Shared flags up to 0x000400 */
+#define DB_INIT_CDB 0x000800 /* Concurrent Access Methods. */
+#define DB_INIT_LOCK 0x001000 /* Initialize locking. */
+#define DB_INIT_LOG 0x002000 /* Initialize logging. */
+#define DB_INIT_MPOOL 0x004000 /* Initialize mpool. */
+#define DB_INIT_TXN 0x008000 /* Initialize transactions. */
+#define DB_JOINENV 0x010000 /* Initialize all subsystems present. */
+#define DB_LOCKDOWN 0x020000 /* Lock memory into physical core. */
+#define DB_PRIVATE 0x040000 /* DB_ENV is process local. */
+#define DB_RECOVER_FATAL 0x080000 /* Run catastrophic recovery. */
+#define DB_SYSTEM_MEM 0x100000 /* Use system-backed memory. */
+
+/*
+ * Flags private to DB->open.
+ * Shared flags up to 0x000400 */
+#define DB_EXCL 0x000800 /* Exclusive open (O_EXCL). */
+#define DB_FCNTL_LOCKING 0x001000 /* UNDOC: fcntl(2) locking. */
+#define DB_RDWRMASTER 0x002000 /* UNDOC: allow subdb master open R/W */
+#define DB_WRITEOPEN 0x004000 /* UNDOC: open with write lock. */
+
+/*
+ * Flags private to DB_ENV->txn_begin.
+ * Shared flags up to 0x000400 */
+#define DB_TXN_NOWAIT 0x000800 /* Do not wait for locks in this TXN. */
+#define DB_TXN_SYNC 0x001000 /* Always sync log on commit. */
+
+/*
+ * Flags private to DB_ENV->set_encrypt.
+ */
+#define DB_ENCRYPT_AES 0x000001 /* AES, assumes SHA1 checksum */
+
+/*
+ * Flags private to DB_ENV->set_flags.
+ * Shared flags up to 0x000400 */
+#define DB_CDB_ALLDB 0x000800 /* Set CDB locking per environment. */
+#define DB_DIRECT_DB 0x001000 /* Don't buffer databases in the OS. */
+#define DB_DIRECT_LOG 0x002000 /* Don't buffer log files in the OS. */
+#define DB_NOLOCKING 0x004000 /* Set locking/mutex behavior. */
+#define DB_NOPANIC 0x008000 /* Set panic state per DB_ENV. */
+#define DB_OVERWRITE 0x010000 /* Overwrite unlinked region files. */
+#define DB_PANIC_ENVIRONMENT 0x020000 /* Set panic state per environment. */
+#define DB_REGION_INIT 0x040000 /* Page-fault regions on open. */
+#define DB_TXN_WRITE_NOSYNC 0x080000 /* Write, don't sync, on txn commit. */
+#define DB_YIELDCPU 0x100000 /* Yield the CPU (a lot). */
+
+/*
+ * Flags private to DB->set_feedback's callback.
+ */
+#define DB_UPGRADE 0x000001 /* Upgrading. */
+#define DB_VERIFY 0x000002 /* Verifying. */
+
+/*
+ * Flags private to DB_MPOOLFILE->open.
+ * Shared flags up to 0x000400 */
+#define DB_DIRECT 0x000800 /* Don't buffer the file in the OS. */
+#define DB_EXTENT 0x001000 /* UNDOC: dealing with an extent. */
+#define DB_ODDFILESIZE 0x002000 /* Truncate file to N * pgsize. */
+
+/*
+ * Flags private to DB->set_flags.
+ */
+#define DB_CHKSUM_SHA1 0x000001 /* Use SHA1 checksumming */
+#define DB_DUP 0x000002 /* Btree, Hash: duplicate keys. */
+#define DB_DUPSORT 0x000004 /* Btree, Hash: duplicate keys. */
+#define DB_ENCRYPT 0x000008 /* Btree, Hash: duplicate keys. */
+#define DB_RECNUM 0x000010 /* Btree: record numbers. */
+#define DB_RENUMBER 0x000020 /* Recno: renumber on insert/delete. */
+#define DB_REVSPLITOFF 0x000040 /* Btree: turn off reverse splits. */
+#define DB_SNAPSHOT 0x000080 /* Recno: snapshot the input. */
+
+/*
+ * Flags private to the DB->stat methods.
+ */
+#define DB_STAT_CLEAR 0x000001 /* Clear stat after returning values. */
+
+/*
+ * Flags private to DB->join.
+ */
+#define DB_JOIN_NOSORT 0x000001 /* Don't try to optimize join. */
+
+/*
+ * Flags private to DB->verify.
+ */
+#define DB_AGGRESSIVE 0x000001 /* Salvage whatever could be data.*/
+#define DB_NOORDERCHK 0x000002 /* Skip sort order/hashing check. */
+#define DB_ORDERCHKONLY 0x000004 /* Only perform the order check. */
+#define DB_PR_PAGE 0x000008 /* Show page contents (-da). */
+#define DB_PR_RECOVERYTEST 0x000010 /* Recovery test (-dr). */
+#define DB_PRINTABLE 0x000020 /* Use printable format for salvage. */
+#define DB_SALVAGE 0x000040 /* Salvage what looks like data. */
+/*
+ * !!!
+ * These must not go over 0x8000, or they will collide with the flags
+ * used by __bam_vrfy_subtree.
+ */
+
+/*
+ * Flags private to DB->set_rep_transport's send callback.
+ */
+#define DB_REP_PERMANENT 0x0001 /* Important--app. may want to flush. */
+
+/*******************************************************
+ * Locking.
+ *******************************************************/
+#define DB_LOCKVERSION 1
+
+#define DB_FILE_ID_LEN 20 /* Unique file ID length. */
+
+/*
+ * Deadlock detector modes; used in the DB_ENV structure to configure the
+ * locking subsystem.
+ */
+#define DB_LOCK_NORUN 0
+#define DB_LOCK_DEFAULT 1 /* Default policy. */
+#define DB_LOCK_EXPIRE 2 /* Only expire locks, no detection. */
+#define DB_LOCK_MAXLOCKS 3 /* Abort txn with maximum # of locks. */
+#define DB_LOCK_MINLOCKS 4 /* Abort txn with minimum # of locks. */
+#define DB_LOCK_MINWRITE 5 /* Abort txn with minimum writelocks. */
+#define DB_LOCK_OLDEST 6 /* Abort oldest transaction. */
+#define DB_LOCK_RANDOM 7 /* Abort random transaction. */
+#define DB_LOCK_YOUNGEST 8 /* Abort youngest transaction. */
+
+/* Flag values for lock_vec(), lock_get(). */
+#define DB_LOCK_FREE_LOCKER 0x001 /* Internal: Free locker as well. */
+#define DB_LOCK_NOWAIT 0x002 /* Don't wait on unavailable lock. */
+#define DB_LOCK_RECORD 0x004 /* Internal: record lock. */
+#define DB_LOCK_REMOVE 0x008 /* Internal: flag object removed. */
+#define DB_LOCK_SET_TIMEOUT 0x010 /* Internal: set lock timeout. */
+#define DB_LOCK_SWITCH 0x020 /* Internal: switch existing lock. */
+#define DB_LOCK_UPGRADE 0x040 /* Internal: upgrade existing lock. */
+
+/*
+ * Simple R/W lock modes and for multi-granularity intention locking.
+ *
+ * !!!
+ * These values are NOT random, as they are used as an index into the lock
+ * conflicts arrays, i.e., DB_LOCK_IWRITE must be == 3, and DB_LOCK_IREAD
+ * must be == 4.
+ */
+typedef enum {
+ DB_LOCK_NG=0, /* Not granted. */
+ DB_LOCK_READ=1, /* Shared/read. */
+ DB_LOCK_WRITE=2, /* Exclusive/write. */
+ DB_LOCK_WAIT=3, /* Wait for event */
+ DB_LOCK_IWRITE=4, /* Intent exclusive/write. */
+ DB_LOCK_IREAD=5, /* Intent to share/read. */
+ DB_LOCK_IWR=6, /* Intent to read and write. */
+ DB_LOCK_DIRTY=7, /* Dirty Read. */
+ DB_LOCK_WWRITE=8 /* Was Written. */
+} db_lockmode_t;
+
+/*
+ * Request types.
+ */
+typedef enum {
+ DB_LOCK_DUMP=0, /* Display held locks. */
+ DB_LOCK_GET=1, /* Get the lock. */
+ DB_LOCK_GET_TIMEOUT=2, /* Get lock with a timeout. */
+ DB_LOCK_INHERIT=3, /* Pass locks to parent. */
+ DB_LOCK_PUT=4, /* Release the lock. */
+ DB_LOCK_PUT_ALL=5, /* Release locker's locks. */
+ DB_LOCK_PUT_OBJ=6, /* Release locker's locks on obj. */
+ DB_LOCK_PUT_READ=7, /* Release locker's read locks. */
+ DB_LOCK_TIMEOUT=8, /* Force a txn to timeout. */
+ DB_LOCK_TRADE=9, /* Trade locker ids on a lock. */
+ DB_LOCK_UPGRADE_WRITE=10 /* Upgrade writes for dirty reads. */
+} db_lockop_t;
+
+/*
+ * Status of a lock.
+ */
+typedef enum {
+ DB_LSTAT_ABORTED=1, /* Lock belongs to an aborted txn. */
+ DB_LSTAT_ERR=2, /* Lock is bad. */
+ DB_LSTAT_EXPIRED=3, /* Lock has expired. */
+ DB_LSTAT_FREE=4, /* Lock is unallocated. */
+ DB_LSTAT_HELD=5, /* Lock is currently held. */
+ DB_LSTAT_NOTEXIST=6, /* Object on which lock was waiting
+ * was removed */
+ DB_LSTAT_PENDING=7, /* Lock was waiting and has been
+ * promoted; waiting for the owner
+ * to run and upgrade it to held. */
+ DB_LSTAT_WAITING=8 /* Lock is on the wait queue. */
+}db_status_t;
+
+/* Lock statistics structure. */
+struct __db_lock_stat {
+ u_int32_t st_id; /* Last allocated locker ID. */
+ u_int32_t st_cur_maxid; /* Current maximum unused ID. */
+ u_int32_t st_maxlocks; /* Maximum number of locks in table. */
+ u_int32_t st_maxlockers; /* Maximum num of lockers in table. */
+ u_int32_t st_maxobjects; /* Maximum num of objects in table. */
+ u_int32_t st_nmodes; /* Number of lock modes. */
+ u_int32_t st_nlocks; /* Current number of locks. */
+ u_int32_t st_maxnlocks; /* Maximum number of locks so far. */
+ u_int32_t st_nlockers; /* Current number of lockers. */
+ u_int32_t st_maxnlockers; /* Maximum number of lockers so far. */
+ u_int32_t st_nobjects; /* Current number of objects. */
+ u_int32_t st_maxnobjects; /* Maximum number of objects so far. */
+ u_int32_t st_nconflicts; /* Number of lock conflicts. */
+ u_int32_t st_nrequests; /* Number of lock gets. */
+ u_int32_t st_nreleases; /* Number of lock puts. */
+ u_int32_t st_nnowaits; /* Number of requests that would have
+ waited, but NOWAIT was set. */
+ u_int32_t st_ndeadlocks; /* Number of lock deadlocks. */
+ db_timeout_t st_locktimeout; /* Lock timeout. */
+ u_int32_t st_nlocktimeouts; /* Number of lock timeouts. */
+ db_timeout_t st_txntimeout; /* Transaction timeout. */
+ u_int32_t st_ntxntimeouts; /* Number of transaction timeouts. */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted without wait. */
+ u_int32_t st_regsize; /* Region size. */
+};
+
+/*
+ * DB_LOCK_ILOCK --
+ * Internal DB access method lock.
+ */
+struct __db_ilock {
+ db_pgno_t pgno; /* Page being locked. */
+ u_int8_t fileid[DB_FILE_ID_LEN];/* File id. */
+#define DB_HANDLE_LOCK 1
+#define DB_RECORD_LOCK 2
+#define DB_PAGE_LOCK 3
+#define DB_TXN_LOCK 4
+ u_int32_t type; /* Type of lock. */
+};
+
+/*
+ * DB_LOCK --
+ * The structure is allocated by the caller and filled in during a
+ * lock_get request (or a lock_vec/DB_LOCK_GET).
+ */
+struct __db_lock_u {
+ size_t off; /* Offset of the lock in the region */
+ u_int32_t ndx; /* Index of the object referenced by
+ * this lock; used for locking. */
+ u_int32_t gen; /* Generation number of this lock. */
+ db_lockmode_t mode; /* mode of this lock. */
+};
+
+/* Lock request structure. */
+struct __db_lockreq {
+ db_lockop_t op; /* Operation. */
+ db_lockmode_t mode; /* Requested mode. */
+ db_timeout_t timeout; /* Time to expire lock. */
+ DBT *obj; /* Object being locked. */
+ DB_LOCK lock; /* Lock returned. */
+};
+
+/*******************************************************
+ * Logging.
+ *******************************************************/
+#define DB_LOGVERSION 7 /* Current log version. */
+#define DB_LOGOLDVER 7 /* Oldest log version supported. */
+#define DB_LOGMAGIC 0x040988
+
+/* Flag values for log_archive(). */
+#define DB_ARCH_ABS 0x001 /* Absolute pathnames. */
+#define DB_ARCH_DATA 0x002 /* Data files. */
+#define DB_ARCH_LOG 0x004 /* Log files. */
+
+/*
+ * A DB_LSN has two parts, a fileid which identifies a specific file, and an
+ * offset within that file. The fileid is an unsigned 4-byte quantity that
+ * uniquely identifies a file within the log directory -- currently a simple
+ * counter inside the log. The offset is also an unsigned 4-byte value. The
+ * log manager guarantees the offset is never more than 4 bytes by switching
+ * to a new log file before the maximum length imposed by an unsigned 4-byte
+ * offset is reached.
+ */
+struct __db_lsn {
+ u_int32_t file; /* File ID. */
+ u_int32_t offset; /* File offset. */
+};
+
+/*
+ * DB_LOGC --
+ * Log cursor.
+ */
+struct __db_log_cursor {
+ DB_ENV *dbenv; /* Enclosing dbenv. */
+
+ DB_FH *c_fh; /* File handle. */
+ DB_LSN c_lsn; /* Cursor: LSN */
+ u_int32_t c_len; /* Cursor: record length */
+ u_int32_t c_prev; /* Cursor: previous record's offset */
+
+ DBT c_dbt; /* Return DBT. */
+
+#define DB_LOGC_BUF_SIZE (32 * 1024)
+ u_int8_t *bp; /* Allocated read buffer. */
+ u_int32_t bp_size; /* Read buffer length in bytes. */
+ u_int32_t bp_rlen; /* Read buffer valid data length. */
+ DB_LSN bp_lsn; /* Read buffer first byte LSN. */
+
+ u_int32_t bp_maxrec; /* Max record length in the log file. */
+
+ /* Methods. */
+ int (*close) __P((DB_LOGC *, u_int32_t));
+ int (*get) __P((DB_LOGC *, DB_LSN *, DBT *, u_int32_t));
+
+#define DB_LOG_DISK 0x01 /* Log record came from disk. */
+#define DB_LOG_LOCKED 0x02 /* Log region already locked */
+#define DB_LOG_SILENT_ERR 0x04 /* Turn-off error messages. */
+ u_int32_t flags;
+};
+
+/* Log statistics structure. */
+struct __db_log_stat {
+ u_int32_t st_magic; /* Log file magic number. */
+ u_int32_t st_version; /* Log file version number. */
+ int st_mode; /* Log file mode. */
+ u_int32_t st_lg_bsize; /* Log buffer size. */
+ u_int32_t st_lg_size; /* Log file size. */
+ u_int32_t st_w_bytes; /* Bytes to log. */
+ u_int32_t st_w_mbytes; /* Megabytes to log. */
+ u_int32_t st_wc_bytes; /* Bytes to log since checkpoint. */
+ u_int32_t st_wc_mbytes; /* Megabytes to log since checkpoint. */
+ u_int32_t st_wcount; /* Total writes to the log. */
+ u_int32_t st_wcount_fill; /* Overflow writes to the log. */
+ u_int32_t st_scount; /* Total syncs to the log. */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted without wait. */
+ u_int32_t st_cur_file; /* Current log file number. */
+ u_int32_t st_cur_offset; /* Current log file offset. */
+ u_int32_t st_disk_file; /* Known on disk log file number. */
+ u_int32_t st_disk_offset; /* Known on disk log file offset. */
+ u_int32_t st_regsize; /* Region size. */
+ u_int32_t st_maxcommitperflush; /* Max number of commits in a flush. */
+ u_int32_t st_mincommitperflush; /* Min number of commits in a flush. */
+};
+
+/*******************************************************
+ * Shared buffer cache (mpool).
+ *******************************************************/
+/* Flag values for DB_MPOOLFILE->get. */
+#define DB_MPOOL_CREATE 0x001 /* Create a page. */
+#define DB_MPOOL_LAST 0x002 /* Return the last page. */
+#define DB_MPOOL_NEW 0x004 /* Create a new page. */
+
+/* Flag values for DB_MPOOLFILE->put, DB_MPOOLFILE->set. */
+#define DB_MPOOL_CLEAN 0x001 /* Page is not modified. */
+#define DB_MPOOL_DIRTY 0x002 /* Page is modified. */
+#define DB_MPOOL_DISCARD 0x004 /* Don't cache the page. */
+
+/* Priority values for DB_MPOOLFILE->set_priority. */
+typedef enum {
+ DB_PRIORITY_VERY_LOW=1,
+ DB_PRIORITY_LOW=2,
+ DB_PRIORITY_DEFAULT=3,
+ DB_PRIORITY_HIGH=4,
+ DB_PRIORITY_VERY_HIGH=5
+} DB_CACHE_PRIORITY;
+
+/* Per-process DB_MPOOLFILE information. */
+struct __db_mpoolfile {
+ /* These fields need to be protected for multi-threaded support. */
+ DB_MUTEX *mutexp; /* Structure thread lock. */
+
+ DB_FH *fhp; /* Underlying file handle. */
+
+ u_int32_t ref; /* Reference count. */
+
+ /*
+ * !!!
+ * The pinref and q fields are protected by the region lock, not the
+ * DB_MPOOLFILE structure mutex. We don't use the structure mutex
+ * because then I/O (which holds the structure lock held because of
+ * the race between the seek and write of the file descriptor) would
+ * block any other put/get calls using this DB_MPOOLFILE structure.
+ */
+ u_int32_t pinref; /* Pinned block reference count. */
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_mpoolfile) q;
+ */
+ struct {
+ struct __db_mpoolfile *tqe_next;
+ struct __db_mpoolfile **tqe_prev;
+ } q; /* Linked list of DB_MPOOLFILE's. */
+
+ /*
+ * These fields are not thread-protected because they are initialized
+ * when the file is opened and never modified.
+ */
+ int ftype; /* File type. */
+ DBT *pgcookie; /* Byte-string passed to pgin/pgout. */
+ u_int8_t *fileid; /* Unique file ID. */
+ int32_t lsn_offset; /* LSN offset in page. */
+ u_int32_t clear_len; /* Cleared length on created pages. */
+
+ DB_MPOOL *dbmp; /* Overlying DB_MPOOL. */
+ MPOOLFILE *mfp; /* Underlying MPOOLFILE. */
+
+ void *addr; /* Address of mmap'd region. */
+ size_t len; /* Length of mmap'd region. */
+
+ /* Methods. */
+ int (*close) __P((DB_MPOOLFILE *, u_int32_t));
+ int (*get) __P((DB_MPOOLFILE *, db_pgno_t *, u_int32_t, void *));
+ void (*get_fileid) __P((DB_MPOOLFILE *, u_int8_t *));
+ void (*last_pgno) __P((DB_MPOOLFILE *, db_pgno_t *));
+ int (*open)__P((DB_MPOOLFILE *, const char *, u_int32_t, int, size_t));
+ int (*put) __P((DB_MPOOLFILE *, void *, u_int32_t));
+ void (*refcnt) __P((DB_MPOOLFILE *, db_pgno_t *));
+ int (*set) __P((DB_MPOOLFILE *, void *, u_int32_t));
+ int (*set_clear_len) __P((DB_MPOOLFILE *, u_int32_t));
+ int (*set_fileid) __P((DB_MPOOLFILE *, u_int8_t *));
+ int (*set_ftype) __P((DB_MPOOLFILE *, int));
+ int (*set_lsn_offset) __P((DB_MPOOLFILE *, int32_t));
+ int (*set_pgcookie) __P((DB_MPOOLFILE *, DBT *));
+ int (*set_priority) __P((DB_MPOOLFILE *, DB_CACHE_PRIORITY));
+ void (*set_unlink) __P((DB_MPOOLFILE *, int));
+ int (*sync) __P((DB_MPOOLFILE *));
+
+ /*
+ * MP_OPEN_CALLED and MP_READONLY do not need to be thread protected
+ * because they are initialized when the file is opened, and never
+ * modified.
+ *
+ * MP_FLUSH, MP_UPGRADE and MP_UPGRADE_FAIL are thread protected
+ * becase they are potentially read by multiple threads of control.
+ */
+#define MP_FLUSH 0x001 /* Was opened to flush a buffer. */
+#define MP_OPEN_CALLED 0x002 /* File opened. */
+#define MP_READONLY 0x004 /* File is readonly. */
+#define MP_UPGRADE 0x008 /* File descriptor is readwrite. */
+#define MP_UPGRADE_FAIL 0x010 /* Upgrade wasn't possible. */
+ u_int32_t flags;
+};
+
+/*
+ * Mpool statistics structure.
+ */
+struct __db_mpool_stat {
+ u_int32_t st_gbytes; /* Total cache size: GB. */
+ u_int32_t st_bytes; /* Total cache size: B. */
+ u_int32_t st_ncache; /* Number of caches. */
+ u_int32_t st_regsize; /* Cache size. */
+ u_int32_t st_map; /* Pages from mapped files. */
+ u_int32_t st_cache_hit; /* Pages found in the cache. */
+ u_int32_t st_cache_miss; /* Pages not found in the cache. */
+ u_int32_t st_page_create; /* Pages created in the cache. */
+ u_int32_t st_page_in; /* Pages read in. */
+ u_int32_t st_page_out; /* Pages written out. */
+ u_int32_t st_ro_evict; /* Clean pages forced from the cache. */
+ u_int32_t st_rw_evict; /* Dirty pages forced from the cache. */
+ u_int32_t st_page_trickle; /* Pages written by memp_trickle. */
+ u_int32_t st_pages; /* Total number of pages. */
+ u_int32_t st_page_clean; /* Clean pages. */
+ u_int32_t st_page_dirty; /* Dirty pages. */
+ u_int32_t st_hash_buckets; /* Number of hash buckets. */
+ u_int32_t st_hash_searches; /* Total hash chain searches. */
+ u_int32_t st_hash_longest; /* Longest hash chain searched. */
+ u_int32_t st_hash_examined; /* Total hash entries searched. */
+ u_int32_t st_hash_nowait; /* Hash lock granted with nowait. */
+ u_int32_t st_hash_wait; /* Hash lock granted after wait. */
+ u_int32_t st_hash_max_wait; /* Max hash lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted with nowait. */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_alloc; /* Number of page allocations. */
+ u_int32_t st_alloc_buckets; /* Buckets checked during allocation. */
+ u_int32_t st_alloc_max_buckets; /* Max checked during allocation. */
+ u_int32_t st_alloc_pages; /* Pages checked during allocation. */
+ u_int32_t st_alloc_max_pages; /* Max checked during allocation. */
+};
+
+/* Mpool file statistics structure. */
+struct __db_mpool_fstat {
+ char *file_name; /* File name. */
+ size_t st_pagesize; /* Page size. */
+ u_int32_t st_map; /* Pages from mapped files. */
+ u_int32_t st_cache_hit; /* Pages found in the cache. */
+ u_int32_t st_cache_miss; /* Pages not found in the cache. */
+ u_int32_t st_page_create; /* Pages created in the cache. */
+ u_int32_t st_page_in; /* Pages read in. */
+ u_int32_t st_page_out; /* Pages written out. */
+};
+
+/*******************************************************
+ * Transactions and recovery.
+ *******************************************************/
+#define DB_TXNVERSION 1
+
+typedef enum {
+ DB_TXN_ABORT=0, /* Public. */
+ DB_TXN_APPLY=1, /* Public. */
+ DB_TXN_BACKWARD_ALLOC=2, /* Internal. */
+ DB_TXN_BACKWARD_ROLL=3, /* Public. */
+ DB_TXN_FORWARD_ROLL=4, /* Public. */
+ DB_TXN_GETPGNOS=5, /* Internal. */
+ DB_TXN_OPENFILES=6, /* Internal. */
+ DB_TXN_POPENFILES=7, /* Internal. */
+ DB_TXN_PRINT=8 /* Public. */
+} db_recops;
+
+/*
+ * BACKWARD_ALLOC is used during the forward pass to pick up any aborted
+ * allocations for files that were created during the forward pass.
+ * The main difference between _ALLOC and _ROLL is that the entry for
+ * the file not exist during the rollforward pass.
+ */
+#define DB_UNDO(op) ((op) == DB_TXN_ABORT || \
+ (op) == DB_TXN_BACKWARD_ROLL || (op) == DB_TXN_BACKWARD_ALLOC)
+#define DB_REDO(op) ((op) == DB_TXN_FORWARD_ROLL || (op) == DB_TXN_APPLY)
+
+struct __db_txn {
+ DB_TXNMGR *mgrp; /* Pointer to transaction manager. */
+ DB_TXN *parent; /* Pointer to transaction's parent. */
+ DB_LSN last_lsn; /* Lsn of last log write. */
+ u_int32_t txnid; /* Unique transaction id. */
+ roff_t off; /* Detail structure within region. */
+ db_timeout_t lock_timeout; /* Timeout for locks for this txn. */
+ db_timeout_t expire; /* Time this txn expires. */
+ void *txn_list; /* Undo information for parent. */
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_txn) links;
+ */
+ struct {
+ struct __db_txn *tqe_next;
+ struct __db_txn **tqe_prev;
+ } links; /* Links transactions off manager. */
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_HEAD(__events, __txn_event) events;
+ */
+ struct {
+ struct __txn_event *tqh_first;
+ struct __txn_event **tqh_last;
+ } events;
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_HEAD(__kids, __db_txn) kids;
+ */
+ struct __kids {
+ struct __db_txn *tqh_first;
+ struct __db_txn **tqh_last;
+ } kids;
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_txn) klinks;
+ */
+ struct {
+ struct __db_txn *tqe_next;
+ struct __db_txn **tqe_prev;
+ } klinks;
+
+ /* API-private structure: used by C++ */
+ void *api_internal;
+
+ u_int32_t cursors; /* Number of cursors open for txn */
+
+ /* Methods. */
+ int (*abort) __P((DB_TXN *));
+ int (*commit) __P((DB_TXN *, u_int32_t));
+ int (*discard) __P((DB_TXN *, u_int32_t));
+ u_int32_t (*id) __P((DB_TXN *));
+ int (*prepare) __P((DB_TXN *, u_int8_t *));
+ int (*set_timeout) __P((DB_TXN *, db_timeout_t, u_int32_t));
+
+#define TXN_CHILDCOMMIT 0x01 /* Transaction that has committed. */
+#define TXN_COMPENSATE 0x02 /* Compensating transaction. */
+#define TXN_DIRTY_READ 0x04 /* Transaction does dirty reads. */
+#define TXN_LOCKTIMEOUT 0x08 /* Transaction has a lock timeout. */
+#define TXN_MALLOC 0x10 /* Structure allocated by TXN system. */
+#define TXN_NOSYNC 0x20 /* Do not sync on prepare and commit. */
+#define TXN_NOWAIT 0x40 /* Do not wait on locks. */
+#define TXN_SYNC 0x80 /* Sync on prepare and commit. */
+ u_int32_t flags;
+};
+
+/* Transaction statistics structure. */
+struct __db_txn_active {
+ u_int32_t txnid; /* Transaction ID */
+ u_int32_t parentid; /* Transaction ID of parent */
+ DB_LSN lsn; /* LSN when transaction began */
+};
+
+struct __db_txn_stat {
+ DB_LSN st_last_ckp; /* lsn of the last checkpoint */
+ time_t st_time_ckp; /* time of last checkpoint */
+ u_int32_t st_last_txnid; /* last transaction id given out */
+ u_int32_t st_maxtxns; /* maximum txns possible */
+ u_int32_t st_naborts; /* number of aborted transactions */
+ u_int32_t st_nbegins; /* number of begun transactions */
+ u_int32_t st_ncommits; /* number of committed transactions */
+ u_int32_t st_nactive; /* number of active transactions */
+ u_int32_t st_nrestores; /* number of restored transactions
+ after recovery. */
+ u_int32_t st_maxnactive; /* maximum active transactions */
+ DB_TXN_ACTIVE *st_txnarray; /* array of active transactions */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted without wait. */
+ u_int32_t st_regsize; /* Region size. */
+};
+
+/*
+ * Structure used for two phase commit interface. Berkeley DB support for two
+ * phase commit is compatible with the X/open XA interface. The xa #define
+ * XIDDATASIZE defines the size of a global transaction ID. We have our own
+ * version here which must have the same value.
+ */
+#define DB_XIDDATASIZE 128
+struct __db_preplist {
+ DB_TXN *txn;
+ u_int8_t gid[DB_XIDDATASIZE];
+};
+
+/*******************************************************
+ * Replication.
+ *******************************************************/
+/* Special, out-of-band environment IDs. */
+#define DB_EID_BROADCAST -1
+#define DB_EID_INVALID -2
+
+/* rep_start flags values */
+#define DB_REP_CLIENT 0x001
+#define DB_REP_LOGSONLY 0x002
+#define DB_REP_MASTER 0x004
+
+/* Replication statistics. */
+struct __db_rep_stat {
+ /* !!!
+ * Many replication statistics fields cannot be protected by a mutex
+ * without an unacceptable performance penalty, since most message
+ * processing is done without the need to hold a region-wide lock.
+ * Fields whose comments end with a '+' may be updated without holding
+ * the replication or log mutexes (as appropriate), and thus may be
+ * off somewhat (or, on unreasonable architectures under unlucky
+ * circumstances, garbaged).
+ */
+ u_int32_t st_status; /* Current replication status. */
+ DB_LSN st_next_lsn; /* Next LSN to use or expect. */
+ DB_LSN st_waiting_lsn; /* LSN we're awaiting, if any. */
+
+ u_int32_t st_dupmasters; /* # of times a duplicate master
+ condition was detected.+ */
+ int st_env_id; /* Current environment ID. */
+ int st_env_priority; /* Current environment priority. */
+ u_int32_t st_gen; /* Current generation number. */
+ u_int32_t st_log_duplicated; /* Log records received multiply.+ */
+ u_int32_t st_log_queued; /* Log records currently queued.+ */
+ u_int32_t st_log_queued_max; /* Max. log records queued at once.+ */
+ u_int32_t st_log_queued_total; /* Total # of log recs. ever queued.+ */
+ u_int32_t st_log_records; /* Log records received and put.+ */
+ u_int32_t st_log_requested; /* Log recs. missed and requested.+ */
+ int st_master; /* Env. ID of the current master. */
+ u_int32_t st_master_changes; /* # of times we've switched masters. */
+ u_int32_t st_msgs_badgen; /* Messages with a bad generation #.+ */
+ u_int32_t st_msgs_processed; /* Messages received and processed.+ */
+ u_int32_t st_msgs_recover; /* Messages ignored because this site
+ was a client in recovery.+ */
+ u_int32_t st_msgs_send_failures;/* # of failed message sends.+ */
+ u_int32_t st_msgs_sent; /* # of successful message sends.+ */
+ u_int32_t st_newsites; /* # of NEWSITE msgs. received.+ */
+ int st_nsites; /* Current number of sites we will
+ assume during elections. */
+ u_int32_t st_nthrottles; /* # of times we were throttled. */
+ u_int32_t st_outdated; /* # of times we detected and returned
+ an OUTDATED condition.+ */
+ u_int32_t st_txns_applied; /* # of transactions applied.+ */
+
+ /* Elections generally. */
+ u_int32_t st_elections; /* # of elections held.+ */
+ u_int32_t st_elections_won; /* # of elections won by this site.+ */
+
+ /* Statistics about an in-progress election. */
+ int st_election_cur_winner; /* Current front-runner. */
+ u_int32_t st_election_gen; /* Election generation number. */
+ DB_LSN st_election_lsn; /* Max. LSN of current winner. */
+ int st_election_nsites; /* # of "registered voters". */
+ int st_election_priority; /* Current election priority. */
+ int st_election_status; /* Current election status. */
+ int st_election_tiebreaker; /* Election tiebreaker value. */
+ int st_election_votes; /* Votes received in this round. */
+};
+
+/*******************************************************
+ * Access methods.
+ *******************************************************/
+typedef enum {
+ DB_BTREE=1,
+ DB_HASH=2,
+ DB_RECNO=3,
+ DB_QUEUE=4,
+ DB_UNKNOWN=5 /* Figure it out on open. */
+} DBTYPE;
+
+#define DB_RENAMEMAGIC 0x030800 /* File has been renamed. */
+
+#define DB_BTREEVERSION 9 /* Current btree version. */
+#define DB_BTREEOLDVER 8 /* Oldest btree version supported. */
+#define DB_BTREEMAGIC 0x053162
+
+#define DB_HASHVERSION 8 /* Current hash version. */
+#define DB_HASHOLDVER 7 /* Oldest hash version supported. */
+#define DB_HASHMAGIC 0x061561
+
+#define DB_QAMVERSION 4 /* Current queue version. */
+#define DB_QAMOLDVER 3 /* Oldest queue version supported. */
+#define DB_QAMMAGIC 0x042253
+
+/*
+ * DB access method and cursor operation values. Each value is an operation
+ * code to which additional bit flags are added.
+ */
+#define DB_AFTER 1 /* c_put() */
+#define DB_APPEND 2 /* put() */
+#define DB_BEFORE 3 /* c_put() */
+#define DB_CACHED_COUNTS 4 /* stat() */
+#define DB_COMMIT 5 /* log_put() (internal) */
+#define DB_CONSUME 6 /* get() */
+#define DB_CONSUME_WAIT 7 /* get() */
+#define DB_CURRENT 8 /* c_get(), c_put(), DB_LOGC->get() */
+#define DB_FAST_STAT 9 /* stat() */
+#define DB_FIRST 10 /* c_get(), DB_LOGC->get() */
+#define DB_GET_BOTH 11 /* get(), c_get() */
+#define DB_GET_BOTHC 12 /* c_get() (internal) */
+#define DB_GET_BOTH_RANGE 13 /* get(), c_get() */
+#define DB_GET_RECNO 14 /* c_get() */
+#define DB_JOIN_ITEM 15 /* c_get(); do not do primary lookup */
+#define DB_KEYFIRST 16 /* c_put() */
+#define DB_KEYLAST 17 /* c_put() */
+#define DB_LAST 18 /* c_get(), DB_LOGC->get() */
+#define DB_NEXT 19 /* c_get(), DB_LOGC->get() */
+#define DB_NEXT_DUP 20 /* c_get() */
+#define DB_NEXT_NODUP 21 /* c_get() */
+#define DB_NODUPDATA 22 /* put(), c_put() */
+#define DB_NOOVERWRITE 23 /* put() */
+#define DB_NOSYNC 24 /* close() */
+#define DB_POSITION 25 /* c_dup() */
+#define DB_POSITIONI 26 /* c_dup() (internal) */
+#define DB_PREV 27 /* c_get(), DB_LOGC->get() */
+#define DB_PREV_NODUP 28 /* c_get(), DB_LOGC->get() */
+#define DB_RECORDCOUNT 29 /* stat() */
+#define DB_SET 30 /* c_get(), DB_LOGC->get() */
+#define DB_SET_LOCK_TIMEOUT 31 /* set_timout() */
+#define DB_SET_RANGE 32 /* c_get() */
+#define DB_SET_RECNO 33 /* get(), c_get() */
+#define DB_SET_TXN_NOW 34 /* set_timout() (internal) */
+#define DB_SET_TXN_TIMEOUT 35 /* set_timout() */
+#define DB_UPDATE_SECONDARY 36 /* c_get(), c_del() (internal) */
+#define DB_WRITECURSOR 37 /* cursor() */
+#define DB_WRITELOCK 38 /* cursor() (internal) */
+
+/* This has to change when the max opcode hits 255. */
+#define DB_OPFLAGS_MASK 0x000000ff /* Mask for operations flags. */
+/* DB_DIRTY_READ 0x01000000 Dirty Read. */
+#define DB_FLUSH 0x02000000 /* Flush data to disk. */
+#define DB_MULTIPLE 0x04000000 /* Return multiple data values. */
+#define DB_MULTIPLE_KEY 0x08000000 /* Return multiple data/key pairs. */
+#define DB_NOCOPY 0x10000000 /* Don't copy data */
+#define DB_PERMANENT 0x20000000 /* Flag record with REP_PERMANENT. */
+#define DB_RMW 0x40000000 /* Acquire write flag immediately. */
+#define DB_WRNOSYNC 0x80000000 /* Private: write, don't sync log_put */
+
+/*
+ * DB (user visible) error return codes.
+ *
+ * !!!
+ * For source compatibility with DB 2.X deadlock return (EAGAIN), use the
+ * following:
+ * #include <errno.h>
+ * #define DB_LOCK_DEADLOCK EAGAIN
+ *
+ * !!!
+ * We don't want our error returns to conflict with other packages where
+ * possible, so pick a base error value that's hopefully not common. We
+ * document that we own the error name space from -30,800 to -30,999.
+ */
+/* DB (public) error return codes. */
+#define DB_DONOTINDEX (-30999)/* "Null" return from 2ndary callbk. */
+#define DB_KEYEMPTY (-30998)/* Key/data deleted or never created. */
+#define DB_KEYEXIST (-30997)/* The key/data pair already exists. */
+#define DB_LOCK_DEADLOCK (-30996)/* Deadlock. */
+#define DB_LOCK_NOTGRANTED (-30995)/* Lock unavailable. */
+#define DB_NOSERVER (-30994)/* Server panic return. */
+#define DB_NOSERVER_HOME (-30993)/* Bad home sent to server. */
+#define DB_NOSERVER_ID (-30992)/* Bad ID sent to server. */
+#define DB_NOTFOUND (-30991)/* Key/data pair not found (EOF). */
+#define DB_OLD_VERSION (-30990)/* Out-of-date version. */
+#define DB_PAGE_NOTFOUND (-30989)/* Requested page not found. */
+#define DB_REP_DUPMASTER (-30988)/* There are two masters. */
+#define DB_REP_HOLDELECTION (-30987)/* Time to hold an election. */
+#define DB_REP_NEWMASTER (-30986)/* We have learned of a new master. */
+#define DB_REP_NEWSITE (-30985)/* New site entered system. */
+#define DB_REP_OUTDATED (-30984)/* Site is too far behind master. */
+#define DB_REP_UNAVAIL (-30983)/* Site cannot currently be reached. */
+#define DB_RUNRECOVERY (-30982)/* Panic return. */
+#define DB_SECONDARY_BAD (-30981)/* Secondary index corrupt. */
+#define DB_VERIFY_BAD (-30980)/* Verify failed; bad format. */
+
+/* DB (private) error return codes. */
+#define DB_ALREADY_ABORTED (-30899)
+#define DB_DELETED (-30898)/* Recovery file marked deleted. */
+#define DB_JAVA_CALLBACK (-30897)/* Exception during a java callback. */
+#define DB_LOCK_NOTEXIST (-30896)/* Object to lock is gone. */
+#define DB_NEEDSPLIT (-30895)/* Page needs to be split. */
+#define DB_SURPRISE_KID (-30894)/* Child commit where parent
+ didn't know it was a parent. */
+#define DB_SWAPBYTES (-30893)/* Database needs byte swapping. */
+#define DB_TIMEOUT (-30892)/* Timed out waiting for election. */
+#define DB_TXN_CKP (-30891)/* Encountered ckp record in log. */
+#define DB_VERIFY_FATAL (-30890)/* DB->verify cannot proceed. */
+
+/* Database handle. */
+struct __db {
+ /*******************************************************
+ * Public: owned by the application.
+ *******************************************************/
+ u_int32_t pgsize; /* Database logical page size. */
+
+ /* Callbacks. */
+ int (*db_append_recno) __P((DB *, DBT *, db_recno_t));
+ void (*db_feedback) __P((DB *, int, int));
+ int (*dup_compare) __P((DB *, const DBT *, const DBT *));
+
+ void *app_private; /* Application-private handle. */
+
+ /*******************************************************
+ * Private: owned by DB.
+ *******************************************************/
+ DB_ENV *dbenv; /* Backing environment. */
+
+ DBTYPE type; /* DB access method type. */
+
+ DB_MPOOLFILE *mpf; /* Backing buffer pool. */
+ DB_CACHE_PRIORITY priority; /* Priority in the buffer pool. */
+
+ DB_MUTEX *mutexp; /* Synchronization for free threading */
+
+ u_int8_t fileid[DB_FILE_ID_LEN];/* File's unique ID for locking. */
+
+ u_int32_t adj_fileid; /* File's unique ID for curs. adj. */
+
+#define DB_LOGFILEID_INVALID -1
+ FNAME *log_filename; /* File's naming info for logging. */
+
+ db_pgno_t meta_pgno; /* Meta page number */
+ u_int32_t lid; /* Locker id for handle locking. */
+ u_int32_t cur_lid; /* Current handle lock holder. */
+ u_int32_t associate_lid; /* Locker id for DB->associate call. */
+ DB_LOCK handle_lock; /* Lock held on this handle. */
+
+ long cl_id; /* RPC: remote client id. */
+
+ /*
+ * Returned data memory for DB->get() and friends.
+ */
+ DBT my_rskey; /* Secondary key. */
+ DBT my_rkey; /* [Primary] key. */
+ DBT my_rdata; /* Data. */
+
+ /*
+ * !!!
+ * Some applications use DB but implement their own locking outside of
+ * DB. If they're using fcntl(2) locking on the underlying database
+ * file, and we open and close a file descriptor for that file, we will
+ * discard their locks. The DB_FCNTL_LOCKING flag to DB->open is an
+ * undocumented interface to support this usage which leaves any file
+ * descriptors we open until DB->close. This will only work with the
+ * DB->open interface and simple caches, e.g., creating a transaction
+ * thread may open/close file descriptors this flag doesn't protect.
+ * Locking with fcntl(2) on a file that you don't own is a very, very
+ * unsafe thing to do. 'Nuff said.
+ */
+ DB_FH *saved_open_fhp; /* Saved file handle. */
+
+ /*
+ * Linked list of DBP's, linked from the DB_ENV, used to keep track
+ * of all open db handles for cursor adjustment.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * LIST_ENTRY(__db) dblistlinks;
+ */
+ struct {
+ struct __db *le_next;
+ struct __db **le_prev;
+ } dblistlinks;
+
+ /*
+ * Cursor queues.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_HEAD(__cq_fq, __dbc) free_queue;
+ * TAILQ_HEAD(__cq_aq, __dbc) active_queue;
+ * TAILQ_HEAD(__cq_jq, __dbc) join_queue;
+ */
+ struct __cq_fq {
+ struct __dbc *tqh_first;
+ struct __dbc **tqh_last;
+ } free_queue;
+ struct __cq_aq {
+ struct __dbc *tqh_first;
+ struct __dbc **tqh_last;
+ } active_queue;
+ struct __cq_jq {
+ struct __dbc *tqh_first;
+ struct __dbc **tqh_last;
+ } join_queue;
+
+ /*
+ * Secondary index support.
+ *
+ * Linked list of secondary indices -- set in the primary.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * LIST_HEAD(s_secondaries, __db);
+ */
+ struct {
+ struct __db *lh_first;
+ } s_secondaries;
+
+ /*
+ * List entries for secondaries, and reference count of how
+ * many threads are updating this secondary (see __db_c_put).
+ *
+ * !!!
+ * Note that these are synchronized by the primary's mutex, but
+ * filled in in the secondaries.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * LIST_ENTRY(__db) s_links;
+ */
+ struct {
+ struct __db *le_next;
+ struct __db **le_prev;
+ } s_links;
+ u_int32_t s_refcnt;
+
+ /* Secondary callback and free functions -- set in the secondary. */
+ int (*s_callback) __P((DB *, const DBT *, const DBT *, DBT *));
+
+ /* Reference to primary -- set in the secondary. */
+ DB *s_primary;
+
+ /* API-private structure: used by DB 1.85, C++, Java, Perl and Tcl */
+ void *api_internal;
+
+ /* Subsystem-private structure. */
+ void *bt_internal; /* Btree/Recno access method. */
+ void *h_internal; /* Hash access method. */
+ void *q_internal; /* Queue access method. */
+ void *xa_internal; /* XA. */
+
+ /* Methods. */
+ int (*associate) __P((DB *, DB_TXN *, DB *, int (*)(DB *, const DBT *,
+ const DBT *, DBT *), u_int32_t));
+ int (*close) __P((DB *, u_int32_t));
+ int (*cursor) __P((DB *, DB_TXN *, DBC **, u_int32_t));
+ int (*del) __P((DB *, DB_TXN *, DBT *, u_int32_t));
+ void (*err) __P((DB *, int, const char *, ...));
+ void (*errx) __P((DB *, const char *, ...));
+ int (*fd) __P((DB *, int *));
+ int (*get) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ int (*pget) __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t));
+ int (*get_byteswapped) __P((DB *, int *));
+ int (*get_type) __P((DB *, DBTYPE *));
+ int (*join) __P((DB *, DBC **, DBC **, u_int32_t));
+ int (*key_range) __P((DB *,
+ DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
+ int (*open) __P((DB *, DB_TXN *,
+ const char *, const char *, DBTYPE, u_int32_t, int));
+ int (*put) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ int (*remove) __P((DB *, const char *, const char *, u_int32_t));
+ int (*rename) __P((DB *,
+ const char *, const char *, const char *, u_int32_t));
+ int (*truncate) __P((DB *, DB_TXN *, u_int32_t *, u_int32_t));
+ int (*set_append_recno) __P((DB *, int (*)(DB *, DBT *, db_recno_t)));
+ int (*set_alloc) __P((DB *, void *(*)(size_t),
+ void *(*)(void *, size_t), void (*)(void *)));
+ int (*set_cachesize) __P((DB *, u_int32_t, u_int32_t, int));
+ int (*set_cache_priority) __P((DB *, DB_CACHE_PRIORITY));
+ int (*set_dup_compare) __P((DB *,
+ int (*)(DB *, const DBT *, const DBT *)));
+ int (*set_encrypt) __P((DB *, const char *, u_int32_t));
+ void (*set_errcall) __P((DB *, void (*)(const char *, char *)));
+ void (*set_errfile) __P((DB *, FILE *));
+ void (*set_errpfx) __P((DB *, const char *));
+ int (*set_feedback) __P((DB *, void (*)(DB *, int, int)));
+ int (*set_flags) __P((DB *, u_int32_t));
+ int (*set_lorder) __P((DB *, int));
+ int (*set_pagesize) __P((DB *, u_int32_t));
+ int (*set_paniccall) __P((DB *, void (*)(DB_ENV *, int)));
+ int (*stat) __P((DB *, void *, u_int32_t));
+ int (*sync) __P((DB *, u_int32_t));
+ int (*upgrade) __P((DB *, const char *, u_int32_t));
+ int (*verify) __P((DB *,
+ const char *, const char *, FILE *, u_int32_t));
+
+ int (*set_bt_compare) __P((DB *,
+ int (*)(DB *, const DBT *, const DBT *)));
+ int (*set_bt_maxkey) __P((DB *, u_int32_t));
+ int (*set_bt_minkey) __P((DB *, u_int32_t));
+ int (*set_bt_prefix) __P((DB *,
+ size_t (*)(DB *, const DBT *, const DBT *)));
+
+ int (*set_h_ffactor) __P((DB *, u_int32_t));
+ int (*set_h_hash) __P((DB *,
+ u_int32_t (*)(DB *, const void *, u_int32_t)));
+ int (*set_h_nelem) __P((DB *, u_int32_t));
+
+ int (*set_re_delim) __P((DB *, int));
+ int (*set_re_len) __P((DB *, u_int32_t));
+ int (*set_re_pad) __P((DB *, int));
+ int (*set_re_source) __P((DB *, const char *));
+ int (*set_q_extentsize) __P((DB *, u_int32_t));
+
+ int (*db_am_remove) __P((DB *,
+ DB_TXN *, const char *, const char *, DB_LSN *));
+ int (*db_am_rename) __P((DB *, DB_TXN *,
+ const char *, const char *, const char *));
+
+ /*
+ * Never called; these are a place to save function pointers
+ * so that we can undo an associate.
+ */
+ int (*stored_get) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ int (*stored_close) __P((DB *, u_int32_t));
+
+#define DB_OK_BTREE 0x01
+#define DB_OK_HASH 0x02
+#define DB_OK_QUEUE 0x04
+#define DB_OK_RECNO 0x08
+ u_int32_t am_ok; /* Legal AM choices. */
+
+#define DB_AM_CHKSUM 0x00000001 /* Checksumming. */
+#define DB_AM_CL_WRITER 0x00000002 /* Allow writes in client replica. */
+#define DB_AM_COMPENSATE 0x00000004 /* Created by compensating txn. */
+#define DB_AM_CREATED 0x00000008 /* Database was created upon open. */
+#define DB_AM_CREATED_MSTR 0x00000010 /* Encompassing file was created. */
+#define DB_AM_DBM_ERROR 0x00000020 /* Error in DBM/NDBM database. */
+#define DB_AM_DELIMITER 0x00000040 /* Variable length delimiter set. */
+#define DB_AM_DIRTY 0x00000080 /* Support Dirty Reads. */
+#define DB_AM_DISCARD 0x00000100 /* Discard any cached pages. */
+#define DB_AM_DUP 0x00000200 /* DB_DUP. */
+#define DB_AM_DUPSORT 0x00000400 /* DB_DUPSORT. */
+#define DB_AM_ENCRYPT 0x00000800 /* Encryption. */
+#define DB_AM_FIXEDLEN 0x00001000 /* Fixed-length records. */
+#define DB_AM_INMEM 0x00002000 /* In-memory; no sync on close. */
+#define DB_AM_IN_RENAME 0x00004000 /* File is being renamed. */
+#define DB_AM_OPEN_CALLED 0x00008000 /* DB->open called. */
+#define DB_AM_PAD 0x00010000 /* Fixed-length record pad. */
+#define DB_AM_PGDEF 0x00020000 /* Page size was defaulted. */
+#define DB_AM_RDONLY 0x00040000 /* Database is readonly. */
+#define DB_AM_RECNUM 0x00080000 /* DB_RECNUM. */
+#define DB_AM_RECOVER 0x00100000 /* DB opened by recovery routine. */
+#define DB_AM_RENUMBER 0x00200000 /* DB_RENUMBER. */
+#define DB_AM_REVSPLITOFF 0x00400000 /* DB_REVSPLITOFF. */
+#define DB_AM_SECONDARY 0x00800000 /* Database is a secondary index. */
+#define DB_AM_SNAPSHOT 0x01000000 /* DB_SNAPSHOT. */
+#define DB_AM_SUBDB 0x02000000 /* Subdatabases supported. */
+#define DB_AM_SWAP 0x04000000 /* Pages need to be byte-swapped. */
+#define DB_AM_TXN 0x08000000 /* Opened in a transaction. */
+#define DB_AM_VERIFYING 0x10000000 /* DB handle is in the verifier. */
+ u_int32_t flags;
+};
+
+/*
+ * Macros for bulk get. Note that wherever we use a DBT *, we explicitly
+ * cast it; this allows the same macros to work with C++ Dbt *'s, as Dbt
+ * is a subclass of struct DBT in C++.
+ */
+#define DB_MULTIPLE_INIT(pointer, dbt) \
+ (pointer = (u_int8_t *)((DBT *)(dbt))->data + \
+ ((DBT *)(dbt))->ulen - sizeof(u_int32_t))
+#define DB_MULTIPLE_NEXT(pointer, dbt, retdata, retdlen) \
+ do { \
+ if (*((u_int32_t *)(pointer)) == (u_int32_t)-1) { \
+ retdata = NULL; \
+ pointer = NULL; \
+ break; \
+ } \
+ retdata = (u_int8_t *) \
+ ((DBT *)(dbt))->data + *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdlen = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ if (retdlen == 0 && \
+ retdata == (u_int8_t *)((DBT *)(dbt))->data) \
+ retdata = NULL; \
+ } while (0)
+#define DB_MULTIPLE_KEY_NEXT(pointer, dbt, retkey, retklen, retdata, retdlen) \
+ do { \
+ if (*((u_int32_t *)(pointer)) == (u_int32_t)-1) { \
+ retdata = NULL; \
+ retkey = NULL; \
+ pointer = NULL; \
+ break; \
+ } \
+ retkey = (u_int8_t *) \
+ ((DBT *)(dbt))->data + *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retklen = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdata = (u_int8_t *) \
+ ((DBT *)(dbt))->data + *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdlen = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ } while (0)
+
+#define DB_MULTIPLE_RECNO_NEXT(pointer, dbt, recno, retdata, retdlen) \
+ do { \
+ if (*((u_int32_t *)(pointer)) == (u_int32_t)0) { \
+ recno = 0; \
+ retdata = NULL; \
+ pointer = NULL; \
+ break; \
+ } \
+ recno = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdata = (u_int8_t *) \
+ ((DBT *)(dbt))->data + *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdlen = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ } while (0)
+
+/*******************************************************
+ * Access method cursors.
+ *******************************************************/
+struct __dbc {
+ DB *dbp; /* Related DB access method. */
+ DB_TXN *txn; /* Associated transaction. */
+
+ /*
+ * Active/free cursor queues.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__dbc) links;
+ */
+ struct {
+ DBC *tqe_next;
+ DBC **tqe_prev;
+ } links;
+
+ /*
+ * The DBT *'s below are used by the cursor routines to return
+ * data to the user when DBT flags indicate that DB should manage
+ * the returned memory. They point at a DBT containing the buffer
+ * and length that will be used, and "belonging" to the handle that
+ * should "own" this memory. This may be a "my_*" field of this
+ * cursor--the default--or it may be the corresponding field of
+ * another cursor, a DB handle, a join cursor, etc. In general, it
+ * will be whatever handle the user originally used for the current
+ * DB interface call.
+ */
+ DBT *rskey; /* Returned secondary key. */
+ DBT *rkey; /* Returned [primary] key. */
+ DBT *rdata; /* Returned data. */
+
+ DBT my_rskey; /* Space for returned secondary key. */
+ DBT my_rkey; /* Space for returned [primary] key. */
+ DBT my_rdata; /* Space for returned data. */
+
+ u_int32_t lid; /* Default process' locker id. */
+ u_int32_t locker; /* Locker for this operation. */
+ DBT lock_dbt; /* DBT referencing lock. */
+ DB_LOCK_ILOCK lock; /* Object to be locked. */
+ DB_LOCK mylock; /* Lock held on this cursor. */
+
+ long cl_id; /* Remote client id. */
+
+ DBTYPE dbtype; /* Cursor type. */
+
+ DBC_INTERNAL *internal; /* Access method private. */
+
+ int (*c_close) __P((DBC *)); /* Methods: public. */
+ int (*c_count) __P((DBC *, db_recno_t *, u_int32_t));
+ int (*c_del) __P((DBC *, u_int32_t));
+ int (*c_dup) __P((DBC *, DBC **, u_int32_t));
+ int (*c_get) __P((DBC *, DBT *, DBT *, u_int32_t));
+ int (*c_pget) __P((DBC *, DBT *, DBT *, DBT *, u_int32_t));
+ int (*c_put) __P((DBC *, DBT *, DBT *, u_int32_t));
+
+ /* Methods: private. */
+ int (*c_am_bulk) __P((DBC *, DBT *, u_int32_t));
+ int (*c_am_close) __P((DBC *, db_pgno_t, int *));
+ int (*c_am_del) __P((DBC *));
+ int (*c_am_destroy) __P((DBC *));
+ int (*c_am_get) __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+ int (*c_am_put) __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+ int (*c_am_writelock) __P((DBC *));
+
+ /* Private: for secondary indices. */
+ int (*c_real_get) __P((DBC *, DBT *, DBT *, u_int32_t));
+
+#define DBC_ACTIVE 0x0001 /* Cursor in use. */
+#define DBC_COMPENSATE 0x0002 /* Cursor compensating, don't lock. */
+#define DBC_DIRTY_READ 0x0004 /* Cursor supports dirty reads. */
+#define DBC_OPD 0x0008 /* Cursor references off-page dups. */
+#define DBC_RECOVER 0x0010 /* Recovery cursor; don't log/lock. */
+#define DBC_RMW 0x0020 /* Acquire write flag in read op. */
+#define DBC_TRANSIENT 0x0040 /* Cursor is transient. */
+#define DBC_WRITECURSOR 0x0080 /* Cursor may be used to write (CDB). */
+#define DBC_WRITEDUP 0x0100 /* idup'ed DBC_WRITECURSOR (CDB). */
+#define DBC_WRITER 0x0200 /* Cursor immediately writing (CDB). */
+#define DBC_MULTIPLE 0x0400 /* Return Multiple data. */
+#define DBC_MULTIPLE_KEY 0x0800 /* Return Multiple keys and data. */
+#define DBC_OWN_LID 0x1000 /* Free lock id on destroy. */
+ u_int32_t flags;
+};
+
+/* Key range statistics structure */
+struct __key_range {
+ double less;
+ double equal;
+ double greater;
+};
+
+/* Btree/Recno statistics structure. */
+struct __db_bt_stat {
+ u_int32_t bt_magic; /* Magic number. */
+ u_int32_t bt_version; /* Version number. */
+ u_int32_t bt_metaflags; /* Metadata flags. */
+ u_int32_t bt_nkeys; /* Number of unique keys. */
+ u_int32_t bt_ndata; /* Number of data items. */
+ u_int32_t bt_pagesize; /* Page size. */
+ u_int32_t bt_maxkey; /* Maxkey value. */
+ u_int32_t bt_minkey; /* Minkey value. */
+ u_int32_t bt_re_len; /* Fixed-length record length. */
+ u_int32_t bt_re_pad; /* Fixed-length record pad. */
+ u_int32_t bt_levels; /* Tree levels. */
+ u_int32_t bt_int_pg; /* Internal pages. */
+ u_int32_t bt_leaf_pg; /* Leaf pages. */
+ u_int32_t bt_dup_pg; /* Duplicate pages. */
+ u_int32_t bt_over_pg; /* Overflow pages. */
+ u_int32_t bt_free; /* Pages on the free list. */
+ u_int32_t bt_int_pgfree; /* Bytes free in internal pages. */
+ u_int32_t bt_leaf_pgfree; /* Bytes free in leaf pages. */
+ u_int32_t bt_dup_pgfree; /* Bytes free in duplicate pages. */
+ u_int32_t bt_over_pgfree; /* Bytes free in overflow pages. */
+};
+
+/* Hash statistics structure. */
+struct __db_h_stat {
+ u_int32_t hash_magic; /* Magic number. */
+ u_int32_t hash_version; /* Version number. */
+ u_int32_t hash_metaflags; /* Metadata flags. */
+ u_int32_t hash_nkeys; /* Number of unique keys. */
+ u_int32_t hash_ndata; /* Number of data items. */
+ u_int32_t hash_pagesize; /* Page size. */
+ u_int32_t hash_ffactor; /* Fill factor specified at create. */
+ u_int32_t hash_buckets; /* Number of hash buckets. */
+ u_int32_t hash_free; /* Pages on the free list. */
+ u_int32_t hash_bfree; /* Bytes free on bucket pages. */
+ u_int32_t hash_bigpages; /* Number of big key/data pages. */
+ u_int32_t hash_big_bfree; /* Bytes free on big item pages. */
+ u_int32_t hash_overflows; /* Number of overflow pages. */
+ u_int32_t hash_ovfl_free; /* Bytes free on ovfl pages. */
+ u_int32_t hash_dup; /* Number of dup pages. */
+ u_int32_t hash_dup_free; /* Bytes free on duplicate pages. */
+};
+
+/* Queue statistics structure. */
+struct __db_qam_stat {
+ u_int32_t qs_magic; /* Magic number. */
+ u_int32_t qs_version; /* Version number. */
+ u_int32_t qs_metaflags; /* Metadata flags. */
+ u_int32_t qs_nkeys; /* Number of unique keys. */
+ u_int32_t qs_ndata; /* Number of data items. */
+ u_int32_t qs_pagesize; /* Page size. */
+ u_int32_t qs_extentsize; /* Pages per extent. */
+ u_int32_t qs_pages; /* Data pages. */
+ u_int32_t qs_re_len; /* Fixed-length record length. */
+ u_int32_t qs_re_pad; /* Fixed-length record pad. */
+ u_int32_t qs_pgfree; /* Bytes free in data pages. */
+ u_int32_t qs_first_recno; /* First not deleted record. */
+ u_int32_t qs_cur_recno; /* Next available record number. */
+};
+
+/*******************************************************
+ * Environment.
+ *******************************************************/
+#define DB_REGION_MAGIC 0x120897 /* Environment magic number. */
+
+/* Database Environment handle. */
+struct __db_env {
+ /*******************************************************
+ * Public: owned by the application.
+ *******************************************************/
+ FILE *db_errfile; /* Error message file stream. */
+ const char *db_errpfx; /* Error message prefix. */
+ /* Callbacks. */
+ void (*db_errcall) __P((const char *, char *));
+ void (*db_feedback) __P((DB_ENV *, int, int));
+ void (*db_paniccall) __P((DB_ENV *, int));
+
+ /* App-specified alloc functions. */
+ void *(*db_malloc) __P((size_t));
+ void *(*db_realloc) __P((void *, size_t));
+ void (*db_free) __P((void *));
+
+ /*
+ * Currently, the verbose list is a bit field with room for 32
+ * entries. There's no reason that it needs to be limited, if
+ * there are ever more than 32 entries, convert to a bit array.
+ */
+#define DB_VERB_CHKPOINT 0x0001 /* List checkpoints. */
+#define DB_VERB_DEADLOCK 0x0002 /* Deadlock detection information. */
+#define DB_VERB_RECOVERY 0x0004 /* Recovery information. */
+#define DB_VERB_REPLICATION 0x0008 /* Replication information. */
+#define DB_VERB_WAITSFOR 0x0010 /* Dump waits-for table. */
+ u_int32_t verbose; /* Verbose output. */
+
+ void *app_private; /* Application-private handle. */
+
+ int (*app_dispatch) /* User-specified recovery dispatch. */
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+
+ /* Locking. */
+ u_int8_t *lk_conflicts; /* Two dimensional conflict matrix. */
+ u_int32_t lk_modes; /* Number of lock modes in table. */
+ u_int32_t lk_max; /* Maximum number of locks. */
+ u_int32_t lk_max_lockers;/* Maximum number of lockers. */
+ u_int32_t lk_max_objects;/* Maximum number of locked objects. */
+ u_int32_t lk_detect; /* Deadlock detect on all conflicts. */
+ db_timeout_t lk_timeout; /* Lock timeout period. */
+
+ /* Logging. */
+ u_int32_t lg_bsize; /* Buffer size. */
+ u_int32_t lg_size; /* Log file size. */
+ u_int32_t lg_regionmax; /* Region size. */
+
+ /* Memory pool. */
+ u_int32_t mp_gbytes; /* Cachesize: GB. */
+ u_int32_t mp_bytes; /* Cachesize: Bytes. */
+ size_t mp_size; /* DEPRECATED: Cachesize: bytes. */
+ int mp_ncache; /* Number of cache regions. */
+ size_t mp_mmapsize; /* Maximum file size for mmap. */
+
+ int rep_eid; /* environment id. */
+
+ /* Transactions. */
+ u_int32_t tx_max; /* Maximum number of transactions. */
+ time_t tx_timestamp; /* Recover to specific timestamp. */
+ db_timeout_t tx_timeout; /* Timeout for transactions. */
+
+ /*******************************************************
+ * Private: owned by DB.
+ *******************************************************/
+ int panic_errval; /* Panic causing errno. */
+
+ /* User files, paths. */
+ char *db_home; /* Database home. */
+ char *db_log_dir; /* Database log file directory. */
+ char *db_tmp_dir; /* Database tmp file directory. */
+
+ char **db_data_dir; /* Database data file directories. */
+ int data_cnt; /* Database data file slots. */
+ int data_next; /* Next Database data file slot. */
+
+ int db_mode; /* Default open permissions. */
+
+ void *reginfo; /* REGINFO structure reference. */
+ DB_FH *lockfhp; /* fcntl(2) locking file handle. */
+
+ int (**recover_dtab) /* Dispatch table for recover funcs. */
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t recover_dtab_size;
+ /* Slots in the dispatch table. */
+
+ void *cl_handle; /* RPC: remote client handle. */
+ long cl_id; /* RPC: remote client env id. */
+
+ int db_ref; /* DB reference count. */
+
+ long shm_key; /* shmget(2) key. */
+ u_int32_t tas_spins; /* test-and-set spins. */
+
+ /*
+ * List of open DB handles for this DB_ENV, used for cursor
+ * adjustment. Must be protected for multi-threaded support.
+ *
+ * !!!
+ * As this structure is allocated in per-process memory, the
+ * mutex may need to be stored elsewhere on architectures unable
+ * to support mutexes in heap memory, e.g. HP/UX 9.
+ *
+ * !!!
+ * Explicit representation of structure in queue.h.
+ * LIST_HEAD(dblist, __db);
+ */
+ DB_MUTEX *dblist_mutexp; /* Mutex. */
+ struct {
+ struct __db *lh_first;
+ } dblist;
+
+ /*
+ * XA support.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_env) links;
+ */
+ struct {
+ struct __db_env *tqe_next;
+ struct __db_env **tqe_prev;
+ } links;
+ int xa_rmid; /* XA Resource Manager ID. */
+ DB_TXN *xa_txn; /* XA Current transaction. */
+
+ /* API-private structure. */
+ void *api1_internal; /* C++, Perl API private */
+ void *api2_internal; /* Java API private */
+
+ char *passwd; /* Cryptography support. */
+ size_t passwd_len;
+ void *crypto_handle; /* Primary handle. */
+ DB_MUTEX *mt_mutexp; /* Mersenne Twister mutex. */
+ int mti; /* Mersenne Twister index. */
+ u_long *mt; /* Mersenne Twister state vector. */
+
+ /* DB_ENV Methods. */
+ int (*close) __P((DB_ENV *, u_int32_t));
+ int (*dbremove) __P((DB_ENV *,
+ DB_TXN *, const char *, const char *, u_int32_t));
+ int (*dbrename) __P((DB_ENV *, DB_TXN *,
+ const char *, const char *, const char *, u_int32_t));
+ void (*err) __P((const DB_ENV *, int, const char *, ...));
+ void (*errx) __P((const DB_ENV *, const char *, ...));
+ int (*open) __P((DB_ENV *, const char *, u_int32_t, int));
+ int (*remove) __P((DB_ENV *, const char *, u_int32_t));
+ int (*set_data_dir) __P((DB_ENV *, const char *));
+ int (*set_alloc) __P((DB_ENV *, void *(*)(size_t),
+ void *(*)(void *, size_t), void (*)(void *)));
+ int (*set_app_dispatch) __P((DB_ENV *,
+ int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
+ int (*set_encrypt) __P((DB_ENV *, const char *, u_int32_t));
+ void (*set_errcall) __P((DB_ENV *, void (*)(const char *, char *)));
+ void (*set_errfile) __P((DB_ENV *, FILE *));
+ void (*set_errpfx) __P((DB_ENV *, const char *));
+ int (*set_feedback) __P((DB_ENV *, void (*)(DB_ENV *, int, int)));
+ int (*set_flags) __P((DB_ENV *, u_int32_t, int));
+ int (*set_paniccall) __P((DB_ENV *, void (*)(DB_ENV *, int)));
+ int (*set_rpc_server) __P((DB_ENV *,
+ void *, const char *, long, long, u_int32_t));
+ int (*set_shm_key) __P((DB_ENV *, long));
+ int (*set_tas_spins) __P((DB_ENV *, u_int32_t));
+ int (*set_tmp_dir) __P((DB_ENV *, const char *));
+ int (*set_verbose) __P((DB_ENV *, u_int32_t, int));
+
+ void *lg_handle; /* Log handle and methods. */
+ int (*set_lg_bsize) __P((DB_ENV *, u_int32_t));
+ int (*set_lg_dir) __P((DB_ENV *, const char *));
+ int (*set_lg_max) __P((DB_ENV *, u_int32_t));
+ int (*set_lg_regionmax) __P((DB_ENV *, u_int32_t));
+ int (*log_archive) __P((DB_ENV *, char **[], u_int32_t));
+ int (*log_cursor) __P((DB_ENV *, DB_LOGC **, u_int32_t));
+ int (*log_file) __P((DB_ENV *, const DB_LSN *, char *, size_t));
+ int (*log_flush) __P((DB_ENV *, const DB_LSN *));
+ int (*log_put) __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t));
+ int (*log_stat) __P((DB_ENV *, DB_LOG_STAT **, u_int32_t));
+
+ void *lk_handle; /* Lock handle and methods. */
+ int (*set_lk_conflicts) __P((DB_ENV *, u_int8_t *, int));
+ int (*set_lk_detect) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max_locks) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max_lockers) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max_objects) __P((DB_ENV *, u_int32_t));
+ int (*lock_detect) __P((DB_ENV *, u_int32_t, u_int32_t, int *));
+ int (*lock_dump_region) __P((DB_ENV *, char *, FILE *));
+ int (*lock_get) __P((DB_ENV *,
+ u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *));
+ int (*lock_put) __P((DB_ENV *, DB_LOCK *));
+ int (*lock_id) __P((DB_ENV *, u_int32_t *));
+ int (*lock_id_free) __P((DB_ENV *, u_int32_t));
+ int (*lock_id_set) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*lock_stat) __P((DB_ENV *, DB_LOCK_STAT **, u_int32_t));
+ int (*lock_vec) __P((DB_ENV *,
+ u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **));
+ int (*lock_downgrade) __P((DB_ENV *,
+ DB_LOCK *, db_lockmode_t, u_int32_t));
+
+ void *mp_handle; /* Mpool handle and methods. */
+ int (*set_mp_mmapsize) __P((DB_ENV *, size_t));
+ int (*set_cachesize) __P((DB_ENV *, u_int32_t, u_int32_t, int));
+ int (*memp_dump_region) __P((DB_ENV *, char *, FILE *));
+ int (*memp_fcreate) __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t));
+ int (*memp_nameop) __P((DB_ENV *,
+ u_int8_t *, const char *, const char *, const char *));
+ int (*memp_register) __P((DB_ENV *, int,
+ int (*)(DB_ENV *, db_pgno_t, void *, DBT *),
+ int (*)(DB_ENV *, db_pgno_t, void *, DBT *)));
+ int (*memp_stat) __P((DB_ENV *,
+ DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, u_int32_t));
+ int (*memp_sync) __P((DB_ENV *, DB_LSN *));
+ int (*memp_trickle) __P((DB_ENV *, int, int *));
+
+ void *rep_handle; /* Replication handle and methods. */
+ int (*rep_elect) __P((DB_ENV *, int, int, u_int32_t, int *));
+ int (*rep_flush) __P((DB_ENV *));
+ int (*rep_process_message) __P((DB_ENV *, DBT *, DBT *, int *));
+ int (*rep_start) __P((DB_ENV *, DBT *, u_int32_t));
+ int (*rep_stat) __P((DB_ENV *, DB_REP_STAT **, u_int32_t));
+ int (*set_rep_election) __P((DB_ENV *,
+ u_int32_t, u_int32_t, u_int32_t, u_int32_t));
+ int (*set_rep_limit) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*set_rep_request) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*set_rep_timeout) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*set_rep_transport) __P((DB_ENV *, int,
+ int (*) (DB_ENV *, const DBT *, const DBT *, int, u_int32_t)));
+
+ void *tx_handle; /* Txn handle and methods. */
+ int (*set_tx_max) __P((DB_ENV *, u_int32_t));
+ int (*set_tx_timestamp) __P((DB_ENV *, time_t *));
+ int (*txn_begin) __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
+ int (*txn_checkpoint) __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t));
+ int (*txn_id_set) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*txn_recover) __P((DB_ENV *,
+ DB_PREPLIST *, long, long *, u_int32_t));
+ int (*txn_stat) __P((DB_ENV *, DB_TXN_STAT **, u_int32_t));
+ int (*set_timeout) __P((DB_ENV *, db_timeout_t, u_int32_t));
+
+#define DB_TEST_ELECTINIT 1 /* after __rep_elect_init */
+#define DB_TEST_ELECTSEND 2 /* after REP_ELECT msgnit */
+#define DB_TEST_ELECTVOTE1 3 /* after __rep_send_vote 1 */
+#define DB_TEST_ELECTVOTE2 4 /* after __rep_wait */
+#define DB_TEST_ELECTWAIT1 5 /* after REP_VOTE2 */
+#define DB_TEST_ELECTWAIT2 6 /* after __rep_wait 2 */
+#define DB_TEST_PREDESTROY 7 /* before destroy op */
+#define DB_TEST_PREOPEN 8 /* before __os_open */
+#define DB_TEST_POSTDESTROY 9 /* after destroy op */
+#define DB_TEST_POSTLOG 10 /* after logging all pages */
+#define DB_TEST_POSTLOGMETA 11 /* after logging meta in btree */
+#define DB_TEST_POSTOPEN 12 /* after __os_open */
+#define DB_TEST_POSTSYNC 13 /* after syncing the log */
+#define DB_TEST_SUBDB_LOCKS 14 /* subdb locking tests */
+ int test_abort; /* Abort value for testing. */
+ int test_copy; /* Copy value for testing. */
+
+#define DB_ENV_AUTO_COMMIT 0x0000001 /* DB_AUTO_COMMIT. */
+#define DB_ENV_CDB 0x0000002 /* DB_INIT_CDB. */
+#define DB_ENV_CDB_ALLDB 0x0000004 /* CDB environment wide locking. */
+#define DB_ENV_CREATE 0x0000008 /* DB_CREATE set. */
+#define DB_ENV_DBLOCAL 0x0000010 /* DB_ENV allocated for private DB. */
+#define DB_ENV_DIRECT_DB 0x0000020 /* DB_DIRECT_DB set. */
+#define DB_ENV_DIRECT_LOG 0x0000040 /* DB_DIRECT_LOG set. */
+#define DB_ENV_FATAL 0x0000080 /* Doing fatal recovery in env. */
+#define DB_ENV_LOCKDOWN 0x0000100 /* DB_LOCKDOWN set. */
+#define DB_ENV_NOLOCKING 0x0000200 /* DB_NOLOCKING set. */
+#define DB_ENV_NOMMAP 0x0000400 /* DB_NOMMAP set. */
+#define DB_ENV_NOPANIC 0x0000800 /* Okay if panic set. */
+#define DB_ENV_OPEN_CALLED 0x0001000 /* DB_ENV->open called. */
+#define DB_ENV_OVERWRITE 0x0002000 /* DB_OVERWRITE set. */
+#define DB_ENV_PRIVATE 0x0004000 /* DB_PRIVATE set. */
+#define DB_ENV_REGION_INIT 0x0008000 /* DB_REGION_INIT set. */
+#define DB_ENV_REP_CLIENT 0x0010000 /* Replication client. */
+#define DB_ENV_REP_LOGSONLY 0x0020000 /* Log files only replication site. */
+#define DB_ENV_REP_MASTER 0x0040000 /* Replication master. */
+#define DB_ENV_RPCCLIENT 0x0080000 /* DB_CLIENT set. */
+#define DB_ENV_RPCCLIENT_GIVEN 0x0100000 /* User-supplied RPC client struct */
+#define DB_ENV_SYSTEM_MEM 0x0200000 /* DB_SYSTEM_MEM set. */
+#define DB_ENV_THREAD 0x0400000 /* DB_THREAD set. */
+#define DB_ENV_TXN_NOSYNC 0x0800000 /* DB_TXN_NOSYNC set. */
+#define DB_ENV_TXN_WRITE_NOSYNC 0x1000000 /* DB_TXN_WRITE_NOSYNC set. */
+#define DB_ENV_YIELDCPU 0x2000000 /* DB_YIELDCPU set. */
+ u_int32_t flags;
+};
+
+#ifndef DB_DBM_HSEARCH
+#define DB_DBM_HSEARCH 0 /* No historic interfaces by default. */
+#endif
+#if DB_DBM_HSEARCH != 0
+/*******************************************************
+ * Dbm/Ndbm historic interfaces.
+ *******************************************************/
+typedef struct __db DBM;
+
+#define DBM_INSERT 0 /* Flags to dbm_store(). */
+#define DBM_REPLACE 1
+
+/*
+ * The DB support for ndbm(3) always appends this suffix to the
+ * file name to avoid overwriting the user's original database.
+ */
+#define DBM_SUFFIX ".db"
+
+#if defined(_XPG4_2)
+typedef struct {
+ char *dptr;
+ size_t dsize;
+} datum;
+#else
+typedef struct {
+ char *dptr;
+ int dsize;
+} datum;
+#endif
+
+/*
+ * Translate NDBM calls into DB calls so that DB doesn't step on the
+ * application's name space.
+ */
+#define dbm_clearerr(a) __db_ndbm_clearerr(a)
+#define dbm_close(a) __db_ndbm_close(a)
+#define dbm_delete(a, b) __db_ndbm_delete(a, b)
+#define dbm_dirfno(a) __db_ndbm_dirfno(a)
+#define dbm_error(a) __db_ndbm_error(a)
+#define dbm_fetch(a, b) __db_ndbm_fetch(a, b)
+#define dbm_firstkey(a) __db_ndbm_firstkey(a)
+#define dbm_nextkey(a) __db_ndbm_nextkey(a)
+#define dbm_open(a, b, c) __db_ndbm_open(a, b, c)
+#define dbm_pagfno(a) __db_ndbm_pagfno(a)
+#define dbm_rdonly(a) __db_ndbm_rdonly(a)
+#define dbm_store(a, b, c, d) \
+ __db_ndbm_store(a, b, c, d)
+
+/*
+ * Translate DBM calls into DB calls so that DB doesn't step on the
+ * application's name space.
+ *
+ * The global variables dbrdonly, dirf and pagf were not retained when 4BSD
+ * replaced the dbm interface with ndbm, and are not supported here.
+ */
+#define dbminit(a) __db_dbm_init(a)
+#define dbmclose __db_dbm_close
+#if !defined(__cplusplus)
+#define delete(a) __db_dbm_delete(a)
+#endif
+#define fetch(a) __db_dbm_fetch(a)
+#define firstkey __db_dbm_firstkey
+#define nextkey(a) __db_dbm_nextkey(a)
+#define store(a, b) __db_dbm_store(a, b)
+
+/*******************************************************
+ * Hsearch historic interface.
+ *******************************************************/
+typedef enum {
+ FIND, ENTER
+} ACTION;
+
+typedef struct entry {
+ char *key;
+ char *data;
+} ENTRY;
+
+#define hcreate(a) __db_hcreate(a)
+#define hdestroy __db_hdestroy
+#define hsearch(a, b) __db_hsearch(a, b)
+
+#endif /* DB_DBM_HSEARCH */
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_DB_H_ */
+
+/* DO NOT EDIT: automatically built by dist/s_rpc. */
+#define DB_RPC_SERVERPROG ((unsigned long)(351457))
+#define DB_RPC_SERVERVERS ((unsigned long)(4001))
+
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _DB_EXT_PROT_IN_
+#define _DB_EXT_PROT_IN_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int db_create __P((DB **, DB_ENV *, u_int32_t));
+char *db_strerror __P((int));
+int db_env_create __P((DB_ENV **, u_int32_t));
+char *db_version __P((int *, int *, int *));
+int log_compare __P((const DB_LSN *, const DB_LSN *));
+int db_env_set_func_close __P((int (*)(int)));
+int db_env_set_func_dirfree __P((void (*)(char **, int)));
+int db_env_set_func_dirlist __P((int (*)(const char *, char ***, int *)));
+int db_env_set_func_exists __P((int (*)(const char *, int *)));
+int db_env_set_func_free __P((void (*)(void *)));
+int db_env_set_func_fsync __P((int (*)(int)));
+int db_env_set_func_ioinfo __P((int (*)(const char *, int, u_int32_t *, u_int32_t *, u_int32_t *)));
+int db_env_set_func_malloc __P((void *(*)(size_t)));
+int db_env_set_func_map __P((int (*)(char *, size_t, int, int, void **)));
+int db_env_set_func_open __P((int (*)(const char *, int, ...)));
+int db_env_set_func_read __P((ssize_t (*)(int, void *, size_t)));
+int db_env_set_func_realloc __P((void *(*)(void *, size_t)));
+int db_env_set_func_rename __P((int (*)(const char *, const char *)));
+int db_env_set_func_seek __P((int (*)(int, size_t, db_pgno_t, u_int32_t, int, int)));
+int db_env_set_func_sleep __P((int (*)(u_long, u_long)));
+int db_env_set_func_unlink __P((int (*)(const char *)));
+int db_env_set_func_unmap __P((int (*)(void *, size_t)));
+int db_env_set_func_write __P((ssize_t (*)(int, const void *, size_t)));
+int db_env_set_func_yield __P((int (*)(void)));
+int txn_abort __P((DB_TXN *));
+int txn_begin __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
+int txn_commit __P((DB_TXN *, u_int32_t));
+#if DB_DBM_HSEARCH != 0
+int __db_ndbm_clearerr __P((DBM *));
+void __db_ndbm_close __P((DBM *));
+int __db_ndbm_delete __P((DBM *, datum));
+int __db_ndbm_dirfno __P((DBM *));
+int __db_ndbm_error __P((DBM *));
+datum __db_ndbm_fetch __P((DBM *, datum));
+datum __db_ndbm_firstkey __P((DBM *));
+datum __db_ndbm_nextkey __P((DBM *));
+DBM *__db_ndbm_open __P((const char *, int, int));
+int __db_ndbm_pagfno __P((DBM *));
+int __db_ndbm_rdonly __P((DBM *));
+int __db_ndbm_store __P((DBM *, datum, datum, int));
+int __db_dbm_close __P((void));
+int __db_dbm_dbrdonly __P((void));
+int __db_dbm_delete __P((datum));
+int __db_dbm_dirf __P((void));
+datum __db_dbm_fetch __P((datum));
+datum __db_dbm_firstkey __P((void));
+int __db_dbm_init __P((char *));
+datum __db_dbm_nextkey __P((datum));
+int __db_dbm_pagf __P((void));
+int __db_dbm_store __P((datum, datum));
+#endif
+#if DB_DBM_HSEARCH != 0
+int __db_hcreate __P((size_t));
+ENTRY *__db_hsearch __P((ENTRY, ACTION));
+void __db_hdestroy __P((void));
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_DB_EXT_PROT_IN_ */
diff --git a/libdb/build_vxworks/db_archive/db_archive.c b/libdb/build_vxworks/db_archive/db_archive.c
new file mode 100644
index 0000000..b0c5a4a
--- /dev/null
+++ b/libdb/build_vxworks/db_archive/db_archive.c
@@ -0,0 +1,195 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+int db_archive_main __P((int, char *[]));
+int db_archive_usage __P((void));
+int db_archive_version_check __P((const char *));
+
+int
+db_archive(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_archive", args, &argc, &argv);
+ return (db_archive_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_archive_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_archive";
+ DB_ENV *dbenv;
+ u_int32_t flags;
+ int ch, e_close, exitval, ret, verbose;
+ char **file, *home, **list, *passwd;
+
+ if ((ret = db_archive_version_check(progname)) != 0)
+ return (ret);
+
+ flags = 0;
+ e_close = exitval = verbose = 0;
+ home = passwd = NULL;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "ah:lP:sVv")) != EOF)
+ switch (ch) {
+ case 'a':
+ LF_SET(DB_ARCH_ABS);
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'l':
+ LF_SET(DB_ARCH_LOG);
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 's':
+ LF_SET(DB_ARCH_DATA);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ return (db_archive_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ return (db_archive_usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (verbose)
+ (void)dbenv->set_verbose(dbenv, DB_VERB_CHKPOINT, 1);
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+ /*
+ * If attaching to a pre-existing environment fails, create a
+ * private one and try again.
+ */
+ if ((ret = dbenv->open(dbenv,
+ home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home, DB_CREATE |
+ DB_INIT_LOG | DB_INIT_TXN | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ /* Get the list of names. */
+ if ((ret = dbenv->log_archive(dbenv, &list, flags)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->log_archive");
+ goto shutdown;
+ }
+
+ /* Print the list of names. */
+ if (list != NULL) {
+ for (file = list; *file != NULL; ++file)
+ printf("%s\n", *file);
+ free(list);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+db_archive_usage()
+{
+ (void)fprintf(stderr,
+ "usage: db_archive [-alsVv] [-h home] [-P password]\n");
+ return (EXIT_FAILURE);
+}
+
+int
+db_archive_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/libdb/build_vxworks/db_archive/db_archive.wpj b/libdb/build_vxworks/db_archive/db_archive.wpj
new file mode 100755
index 0000000..06091bb
--- /dev/null
+++ b/libdb/build_vxworks/db_archive/db_archive.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_archive.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_archive.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_archive.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_archive.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_archive.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_archive.c_objects
+db_archive.o
+<END>
+
+<BEGIN> FILE_db_archive.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_archive.c
+<END>
+
+<BEGIN> userComments
+db_archive
+<END>
diff --git a/libdb/build_vxworks/db_archive/db_archive/Makefile.custom b/libdb/build_vxworks/db_archive/db_archive/Makefile.custom
new file mode 100644
index 0000000..ca781f7
--- /dev/null
+++ b/libdb/build_vxworks/db_archive/db_archive/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/libdb/build_vxworks/db_archive/db_archive/component.cdf b/libdb/build_vxworks/db_archive/db_archive/component.cdf
new file mode 100755
index 0000000..cf88762
--- /dev/null
+++ b/libdb/build_vxworks/db_archive/db_archive/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_ARCHIVE {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_archive.o
+ NAME db_archive
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_archive.o {
+
+ NAME db_archive.o
+ SRC_PATH_NAME $PRJ_DIR/../db_archive.c
+}
+
+/* Parameter information */
+
diff --git a/libdb/build_vxworks/db_archive/db_archive/component.wpj b/libdb/build_vxworks/db_archive/db_archive/component.wpj
new file mode 100755
index 0000000..e50d915
--- /dev/null
+++ b/libdb/build_vxworks/db_archive/db_archive/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_objects
+db_archive.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_objects
+db_archive.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_archive.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_archive.c_objects
+db_archive.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_archive.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_objects
+db_archive.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_archive.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/libdb/build_vxworks/db_checkpoint/db_checkpoint.c b/libdb/build_vxworks/db_checkpoint/db_checkpoint.c
new file mode 100644
index 0000000..5ccd949
--- /dev/null
+++ b/libdb/build_vxworks/db_checkpoint/db_checkpoint.c
@@ -0,0 +1,258 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+
+int db_checkpoint_main __P((int, char *[]));
+int db_checkpoint_usage __P((void));
+int db_checkpoint_version_check __P((const char *));
+
+int
+db_checkpoint(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_checkpoint", args, &argc, &argv);
+ return (db_checkpoint_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_checkpoint_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ DB_ENV *dbenv;
+ const char *progname = "db_checkpoint";
+ time_t now;
+ long argval;
+ u_int32_t flags, kbytes, minutes, seconds;
+ int ch, e_close, exitval, once, ret, verbose;
+ char *home, *logfile, *passwd;
+
+ if ((ret = db_checkpoint_version_check(progname)) != 0)
+ return (ret);
+
+ /*
+ * !!!
+ * Don't allow a fully unsigned 32-bit number, some compilers get
+ * upset and require it to be specified in hexadecimal and so on.
+ */
+#define MAX_UINT32_T 2147483647
+
+ kbytes = minutes = 0;
+ e_close = exitval = once = verbose = 0;
+ flags = 0;
+ home = logfile = passwd = NULL;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "1h:k:L:P:p:Vv")) != EOF)
+ switch (ch) {
+ case '1':
+ once = 1;
+ flags = DB_FORCE;
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'k':
+ if (__db_getlong(NULL, progname,
+ optarg, 1, (long)MAX_UINT32_T, &argval))
+ return (EXIT_FAILURE);
+ kbytes = argval;
+ break;
+ case 'L':
+ logfile = optarg;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'p':
+ if (__db_getlong(NULL, progname,
+ optarg, 1, (long)MAX_UINT32_T, &argval))
+ return (EXIT_FAILURE);
+ minutes = argval;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ return (db_checkpoint_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ return (db_checkpoint_usage());
+
+ if (once == 0 && kbytes == 0 && minutes == 0) {
+ (void)fprintf(stderr,
+ "%s: at least one of -1, -k and -p must be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /* Log our process ID. */
+ if (logfile != NULL && __db_util_logset(progname, logfile))
+ goto shutdown;
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+ /* Initialize the environment. */
+ if ((ret = dbenv->open(dbenv,
+ home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ /* Register the standard pgin/pgout functions, in case we do I/O. */
+ if ((ret = dbenv->memp_register(
+ dbenv, DB_FTYPE_SET, __db_pgin, __db_pgout)) != 0) {
+ dbenv->err(dbenv, ret,
+ "DB_ENV->memp_register: failed to register access method functions");
+ goto shutdown;
+ }
+
+ /*
+ * If we have only a time delay, then we'll sleep the right amount
+ * to wake up when a checkpoint is necessary. If we have a "kbytes"
+ * field set, then we'll check every 30 seconds.
+ */
+ seconds = kbytes != 0 ? 30 : minutes * 60;
+ while (!__db_util_interrupted()) {
+ if (verbose) {
+ (void)time(&now);
+ dbenv->errx(dbenv, "checkpoint: %s", ctime(&now));
+ }
+
+ if ((ret = dbenv->txn_checkpoint(dbenv,
+ kbytes, minutes, flags)) != 0) {
+ dbenv->err(dbenv, ret, "txn_checkpoint");
+ goto shutdown;
+ }
+
+ if (once)
+ break;
+
+ (void)__os_sleep(dbenv, seconds, 0);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ /* Clean up the logfile. */
+ if (logfile != NULL)
+ remove(logfile);
+
+ /* Clean up the environment. */
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+db_checkpoint_usage()
+{
+ (void)fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_checkpoint [-1Vv]",
+ "[-h home] [-k kbytes] [-L file] [-P password] [-p min]");
+ return (EXIT_FAILURE);
+}
+
+int
+db_checkpoint_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/libdb/build_vxworks/db_checkpoint/db_checkpoint.wpj b/libdb/build_vxworks/db_checkpoint/db_checkpoint.wpj
new file mode 100755
index 0000000..cae4317
--- /dev/null
+++ b/libdb/build_vxworks/db_checkpoint/db_checkpoint.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_checkpoint.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_checkpoint.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_checkpoint.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_checkpoint.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_checkpoint.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_checkpoint.c_objects
+db_checkpoint.o
+<END>
+
+<BEGIN> FILE_db_checkpoint.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_checkpoint.c
+<END>
+
+<BEGIN> userComments
+db_checkpoint
+<END>
diff --git a/libdb/build_vxworks/db_checkpoint/db_checkpoint/Makefile.custom b/libdb/build_vxworks/db_checkpoint/db_checkpoint/Makefile.custom
new file mode 100644
index 0000000..ca781f7
--- /dev/null
+++ b/libdb/build_vxworks/db_checkpoint/db_checkpoint/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/libdb/build_vxworks/db_checkpoint/db_checkpoint/component.cdf b/libdb/build_vxworks/db_checkpoint/db_checkpoint/component.cdf
new file mode 100755
index 0000000..ea05c3a
--- /dev/null
+++ b/libdb/build_vxworks/db_checkpoint/db_checkpoint/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_CHECKPOINT {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_checkpoint.o
+ NAME db_checkpoint
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_checkpoint.o {
+
+ NAME db_checkpoint.o
+ SRC_PATH_NAME $PRJ_DIR/../db_checkpoint.c
+}
+
+/* Parameter information */
+
diff --git a/libdb/build_vxworks/db_checkpoint/db_checkpoint/component.wpj b/libdb/build_vxworks/db_checkpoint/db_checkpoint/component.wpj
new file mode 100755
index 0000000..3b5daa1
--- /dev/null
+++ b/libdb/build_vxworks/db_checkpoint/db_checkpoint/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_objects
+db_checkpoint.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_objects
+db_checkpoint.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_checkpoint.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_checkpoint.c_objects
+db_checkpoint.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_checkpoint.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_objects
+db_checkpoint.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_checkpoint.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/libdb/build_vxworks/db_config.h b/libdb/build_vxworks/db_config.h
new file mode 100644
index 0000000..bdc4d6a
--- /dev/null
+++ b/libdb/build_vxworks/db_config.h
@@ -0,0 +1,382 @@
+/* DO NOT EDIT: automatically built by dist/s_vxworks. */
+/* !!!
+ * The CONFIG_TEST option may be added using the Tornado project build.
+ * DO NOT modify it here.
+ */
+/* Define to 1 if you want to build a version for running the test suite. */
+/* #undef CONFIG_TEST */
+
+/* !!!
+ * The DEBUG option may be added using the Tornado project build.
+ * DO NOT modify it here.
+ */
+/* Define to 1 if you want a debugging version. */
+/* #undef DEBUG */
+
+/* Define to 1 if you want a version that logs read operations. */
+/* #undef DEBUG_ROP */
+
+/* Define to 1 if you want a version that logs write operations. */
+/* #undef DEBUG_WOP */
+
+/* !!!
+ * The DIAGNOSTIC option may be added using the Tornado project build.
+ * DO NOT modify it here.
+ */
+/* Define to 1 if you want a version with run-time diagnostic checking. */
+/* #undef DIAGNOSTIC */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+#define HAVE_CLOCK_GETTIME 1
+
+/* Define to 1 if Berkeley DB release includes strong cryptography. */
+/* #undef HAVE_CRYPTO */
+
+/* Define to 1 if you have the `directio' function. */
+/* #undef HAVE_DIRECTIO */
+
+/* Define to 1 if you have the <dirent.h> header file, and it defines `DIR'.
+ */
+#define HAVE_DIRENT_H 1
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+/* #undef HAVE_DLFCN_H */
+
+/* Define to 1 if you have EXIT_SUCCESS/EXIT_FAILURE #defines. */
+#define HAVE_EXIT_SUCCESS 1
+
+/* Define to 1 if fcntl/F_SETFD denies child access to file descriptors. */
+/* #undef HAVE_FCNTL_F_SETFD */
+
+/* Define to 1 if allocated filesystem blocks are not zeroed. */
+#define HAVE_FILESYSTEM_NOTZERO 1
+
+/* Define to 1 if you have the `getcwd' function. */
+#define HAVE_GETCWD 1
+
+/* Define to 1 if you have the `getopt' function. */
+/* #undef HAVE_GETOPT */
+
+/* Define to 1 if you have the `gettimeofday' function. */
+/* #undef HAVE_GETTIMEOFDAY */
+
+/* Define to 1 if you have the `getuid' function. */
+/* #undef HAVE_GETUID */
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+/* #undef HAVE_INTTYPES_H */
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+/* #undef HAVE_LIBNSL */
+
+/* Define to 1 if you have the `memcmp' function. */
+#define HAVE_MEMCMP 1
+
+/* Define to 1 if you have the `memcpy' function. */
+#define HAVE_MEMCPY 1
+
+/* Define to 1 if you have the `memmove' function. */
+#define HAVE_MEMMOVE 1
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the `mlock' function. */
+/* #undef HAVE_MLOCK */
+
+/* Define to 1 if you have the `mmap' function. */
+/* #undef HAVE_MMAP */
+
+/* Define to 1 if you have the `munlock' function. */
+/* #undef HAVE_MUNLOCK */
+
+/* Define to 1 if you have the `munmap' function. */
+/* #undef HAVE_MUNMAP */
+
+/* Define to 1 to use the GCC compiler and 68K assembly language mutexes. */
+/* #undef HAVE_MUTEX_68K_GCC_ASSEMBLY */
+
+/* Define to 1 to use the AIX _check_lock mutexes. */
+/* #undef HAVE_MUTEX_AIX_CHECK_LOCK */
+
+/* Define to 1 to use the GCC compiler and Alpha assembly language mutexes. */
+/* #undef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and ARM assembly language mutexes. */
+/* #undef HAVE_MUTEX_ARM_GCC_ASSEMBLY */
+
+/* Define to 1 to use the UNIX fcntl system call mutexes. */
+/* #undef HAVE_MUTEX_FCNTL */
+
+/* Define to 1 to use the GCC compiler and PaRisc assembly language mutexes.
+ */
+/* #undef HAVE_MUTEX_HPPA_GCC_ASSEMBLY */
+
+/* Define to 1 to use the msem_XXX mutexes on HP-UX. */
+/* #undef HAVE_MUTEX_HPPA_MSEM_INIT */
+
+/* Define to 1 to use the GCC compiler and IA64 assembly language mutexes. */
+/* #undef HAVE_MUTEX_IA64_GCC_ASSEMBLY */
+
+/* Define to 1 to use the msem_XXX mutexes on systems other than HP-UX. */
+/* #undef HAVE_MUTEX_MSEM_INIT */
+
+/* Define to 1 to use the GCC compiler and Apple PowerPC assembly language. */
+/* #undef HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and generic PowerPC assembly language.
+ */
+/* #undef HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY */
+
+/* Define to 1 to use POSIX 1003.1 pthread_XXX mutexes. */
+/* #undef HAVE_MUTEX_PTHREADS */
+
+/* Define to 1 to use Reliant UNIX initspin mutexes. */
+/* #undef HAVE_MUTEX_RELIANTUNIX_INITSPIN */
+
+/* Define to 1 to use the GCC compiler and S/390 assembly language mutexes. */
+/* #undef HAVE_MUTEX_S390_GCC_ASSEMBLY */
+
+/* Define to 1 to use the SCO compiler and x86 assembly language mutexes. */
+/* #undef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY */
+
+/* Define to 1 to use the obsolete POSIX 1003.1 sema_XXX mutexes. */
+/* #undef HAVE_MUTEX_SEMA_INIT */
+
+/* Define to 1 to use the SGI XXX_lock mutexes. */
+/* #undef HAVE_MUTEX_SGI_INIT_LOCK */
+
+/* Define to 1 to use the Solaris _lock_XXX mutexes. */
+/* #undef HAVE_MUTEX_SOLARIS_LOCK_TRY */
+
+/* Define to 1 to use the Solaris lwp threads mutexes. */
+/* #undef HAVE_MUTEX_SOLARIS_LWP */
+
+/* Define to 1 to use the GCC compiler and Sparc assembly language mutexes. */
+/* #undef HAVE_MUTEX_SPARC_GCC_ASSEMBLY */
+
+/* Define to 1 if mutexes hold system resources. */
+#define HAVE_MUTEX_SYSTEM_RESOURCES 1
+
+/* Define to 1 if fast mutexes are available. */
+#define HAVE_MUTEX_THREADS 1
+
+/* Define to 1 to configure mutexes intra-process only. */
+/* #undef HAVE_MUTEX_THREAD_ONLY */
+
+/* Define to 1 to use the UNIX International mutexes. */
+/* #undef HAVE_MUTEX_UI_THREADS */
+
+/* Define to 1 to use the UTS compiler and assembly language mutexes. */
+/* #undef HAVE_MUTEX_UTS_CC_ASSEMBLY */
+
+/* Define to 1 to use VMS mutexes. */
+/* #undef HAVE_MUTEX_VMS */
+
+/* Define to 1 to use VxWorks mutexes. */
+#define HAVE_MUTEX_VXWORKS 1
+
+/* Define to 1 to use Windows mutexes. */
+/* #undef HAVE_MUTEX_WIN32 */
+
+/* Define to 1 to use the GCC compiler and x86 assembly language mutexes. */
+/* #undef HAVE_MUTEX_X86_GCC_ASSEMBLY */
+
+/* Define to 1 if you have the <ndir.h> header file, and it defines `DIR'. */
+/* #undef HAVE_NDIR_H */
+
+/* Define to 1 if you have the O_DIRECT flag. */
+/* #undef HAVE_O_DIRECT */
+
+/* Define to 1 if you have the `pread' function. */
+/* #undef HAVE_PREAD */
+
+/* Define to 1 if you have the `pstat_getdynamic' function. */
+/* #undef HAVE_PSTAT_GETDYNAMIC */
+
+/* Define to 1 if you have the `pwrite' function. */
+/* #undef HAVE_PWRITE */
+
+/* Define to 1 if building on QNX. */
+/* #undef HAVE_QNX */
+
+/* Define to 1 if you have the `qsort' function. */
+#define HAVE_QSORT 1
+
+/* Define to 1 if you have the `raise' function. */
+#define HAVE_RAISE 1
+
+/* Define to 1 if building RPC client/server. */
+/* #undef HAVE_RPC */
+
+/* Define to 1 if you have the `sched_yield' function. */
+#define HAVE_SCHED_YIELD 1
+
+/* Define to 1 if you have the `select' function. */
+#define HAVE_SELECT 1
+
+/* Define to 1 if you have the `shmget' function. */
+/* #undef HAVE_SHMGET */
+
+/* Define to 1 if you have the `snprintf' function. */
+/* #undef HAVE_SNPRINTF */
+
+/* Define to 1 if you have the <stdint.h> header file. */
+/* #undef HAVE_STDINT_H */
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the `strcasecmp' function. */
+/* #undef HAVE_STRCASECMP */
+
+/* Define to 1 if you have the `strdup' function. */
+/* #undef HAVE_STRDUP */
+
+/* Define to 1 if you have the `strerror' function. */
+#define HAVE_STRERROR 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strtoul' function. */
+#define HAVE_STRTOUL 1
+
+/* Define to 1 if `st_blksize' is member of `struct stat'. */
+#define HAVE_STRUCT_STAT_ST_BLKSIZE 1
+
+/* Define to 1 if you have the `sysconf' function. */
+/* #undef HAVE_SYSCONF */
+
+/* Define to 1 if you have the <sys/dir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_DIR_H */
+
+/* Define to 1 if you have the <sys/ndir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_NDIR_H */
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+/* #undef HAVE_SYS_SELECT_H */
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+/* #undef HAVE_SYS_STAT_H */
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+/* #undef HAVE_SYS_TIME_H */
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+/* #undef HAVE_SYS_TYPES_H */
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define to 1 if unlink of file with open file descriptors will fail. */
+#define HAVE_UNLINK_WITH_OPEN_FAILURE 1
+
+/* Define to 1 if you have the `vsnprintf' function. */
+/* #undef HAVE_VSNPRINTF */
+
+/* Define to 1 if building VxWorks. */
+#define HAVE_VXWORKS 1
+
+/* Define to 1 if you have the `yield' function. */
+/* #undef HAVE_YIELD */
+
+/* Define to 1 if you have the `_fstati64' function. */
+/* #undef HAVE__FSTATI64 */
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "support@sleepycat.com"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "Berkeley DB"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "Berkeley DB 4.1.25"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "db-4.1.25"
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "4.1.25"
+
+/* Define to 1 if the `S_IS*' macros in <sys/stat.h> do not work properly. */
+/* #undef STAT_MACROS_BROKEN */
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+/* #undef TIME_WITH_SYS_TIME */
+
+/* Define to 1 to mask harmless unitialized memory read/writes. */
+/* #undef UMRW */
+
+/* Number of bits in a file offset, on hosts where this is settable. */
+/* #undef _FILE_OFFSET_BITS */
+
+/* Define for large files, on AIX-style hosts. */
+/* #undef _LARGE_FILES */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef const */
+
+/*
+ * Exit success/failure macros.
+ */
+#ifndef HAVE_EXIT_SUCCESS
+#define EXIT_FAILURE 1
+#define EXIT_SUCCESS 0
+#endif
+
+/*
+ * Don't step on the namespace. Other libraries may have their own
+ * implementations of these functions, we don't want to use their
+ * implementations or force them to use ours based on the load order.
+ */
+#ifndef HAVE_GETCWD
+#define getcwd __db_Cgetcwd
+#endif
+#ifndef HAVE_GETOPT
+#define getopt __db_Cgetopt
+#define optarg __db_Coptarg
+#define opterr __db_Copterr
+#define optind __db_Coptind
+#define optopt __db_Coptopt
+#endif
+#ifndef HAVE_MEMCMP
+#define memcmp __db_Cmemcmp
+#endif
+#ifndef HAVE_MEMCPY
+#define memcpy __db_Cmemcpy
+#endif
+#ifndef HAVE_MEMMOVE
+#define memmove __db_Cmemmove
+#endif
+#ifndef HAVE_RAISE
+#define raise __db_Craise
+#endif
+#ifndef HAVE_SNPRINTF
+#define snprintf __db_Csnprintf
+#endif
+#ifndef HAVE_STRCASECMP
+#define strcasecmp __db_Cstrcasecmp
+#define strncasecmp __db_Cstrncasecmp
+#endif
+#ifndef HAVE_STRERROR
+#define strerror __db_Cstrerror
+#endif
+#ifndef HAVE_VSNPRINTF
+#define vsnprintf __db_Cvsnprintf
+#endif
+
+/*
+ * !!!
+ * The following is not part of the automatic configuration setup, but
+ * provides the information necessary to build Berkeley DB on VxWorks.
+ */
+#include "vxWorks.h"
diff --git a/libdb/build_vxworks/db_deadlock/db_deadlock.c b/libdb/build_vxworks/db_deadlock/db_deadlock.c
new file mode 100644
index 0000000..509c885
--- /dev/null
+++ b/libdb/build_vxworks/db_deadlock/db_deadlock.c
@@ -0,0 +1,249 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+int db_deadlock_main __P((int, char *[]));
+int db_deadlock_usage __P((void));
+int db_deadlock_version_check __P((const char *));
+
+int
+db_deadlock(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_deadlock", args, &argc, &argv);
+ return (db_deadlock_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_deadlock_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_deadlock";
+ DB_ENV *dbenv;
+ u_int32_t atype;
+ time_t now;
+ long secs, usecs;
+ int ch, e_close, exitval, ret, verbose;
+ char *home, *logfile, *str;
+
+ if ((ret = db_deadlock_version_check(progname)) != 0)
+ return (ret);
+
+ atype = DB_LOCK_DEFAULT;
+ home = logfile = NULL;
+ secs = usecs = 0;
+ e_close = exitval = verbose = 0;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "a:h:L:t:Vvw")) != EOF)
+ switch (ch) {
+ case 'a':
+ switch (optarg[0]) {
+ case 'e':
+ atype = DB_LOCK_EXPIRE;
+ break;
+ case 'm':
+ atype = DB_LOCK_MAXLOCKS;
+ break;
+ case 'n':
+ atype = DB_LOCK_MINLOCKS;
+ break;
+ case 'o':
+ atype = DB_LOCK_OLDEST;
+ break;
+ case 'w':
+ atype = DB_LOCK_MINWRITE;
+ break;
+ case 'y':
+ atype = DB_LOCK_YOUNGEST;
+ break;
+ default:
+ return (db_deadlock_usage());
+ /* NOTREACHED */
+ }
+ if (optarg[1] != '\0')
+ return (db_deadlock_usage());
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'L':
+ logfile = optarg;
+ break;
+ case 't':
+ if ((str = strchr(optarg, '.')) != NULL) {
+ *str++ = '\0';
+ if (*str != '\0' && __db_getlong(
+ NULL, progname, str, 0, LONG_MAX, &usecs))
+ return (EXIT_FAILURE);
+ }
+ if (*optarg != '\0' && __db_getlong(
+ NULL, progname, optarg, 0, LONG_MAX, &secs))
+ return (EXIT_FAILURE);
+ if (secs == 0 && usecs == 0)
+ return (db_deadlock_usage());
+
+ break;
+
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ verbose = 1;
+ break;
+ case 'w': /* Undocumented. */
+ /* Detect every 100ms (100000 us) when polling. */
+ secs = 0;
+ usecs = 100000;
+ break;
+ case '?':
+ default:
+ return (db_deadlock_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ return (db_deadlock_usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /* Log our process ID. */
+ if (logfile != NULL && __db_util_logset(progname, logfile))
+ goto shutdown;
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (verbose) {
+ (void)dbenv->set_verbose(dbenv, DB_VERB_DEADLOCK, 1);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_WAITSFOR, 1);
+ }
+
+ /* An environment is required. */
+ if ((ret = dbenv->open(dbenv, home,
+ DB_JOINENV | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ while (!__db_util_interrupted()) {
+ if (verbose) {
+ (void)time(&now);
+ dbenv->errx(dbenv, "running at %.24s", ctime(&now));
+ }
+
+ if ((ret = dbenv->lock_detect(dbenv, 0, atype, NULL)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->lock_detect");
+ goto shutdown;
+ }
+
+ /* Make a pass every "secs" secs and "usecs" usecs. */
+ if (secs == 0 && usecs == 0)
+ break;
+ (void)__os_sleep(dbenv, secs, usecs);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ /* Clean up the logfile. */
+ if (logfile != NULL)
+ remove(logfile);
+
+ /* Clean up the environment. */
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+db_deadlock_usage()
+{
+ (void)fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_deadlock [-Vv]",
+ "[-a e | m | n | o | w | y] [-h home] [-L file] [-t sec.usec]");
+ return (EXIT_FAILURE);
+}
+
+int
+db_deadlock_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/libdb/build_vxworks/db_deadlock/db_deadlock.wpj b/libdb/build_vxworks/db_deadlock/db_deadlock.wpj
new file mode 100755
index 0000000..10cc2dc
--- /dev/null
+++ b/libdb/build_vxworks/db_deadlock/db_deadlock.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_deadlock.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_deadlock.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_deadlock.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_deadlock.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_deadlock.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_deadlock.c_objects
+db_deadlock.o
+<END>
+
+<BEGIN> FILE_db_deadlock.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_deadlock.c
+<END>
+
+<BEGIN> userComments
+db_deadlock
+<END>
diff --git a/libdb/build_vxworks/db_deadlock/db_deadlock/Makefile.custom b/libdb/build_vxworks/db_deadlock/db_deadlock/Makefile.custom
new file mode 100644
index 0000000..ca781f7
--- /dev/null
+++ b/libdb/build_vxworks/db_deadlock/db_deadlock/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/libdb/build_vxworks/db_deadlock/db_deadlock/component.cdf b/libdb/build_vxworks/db_deadlock/db_deadlock/component.cdf
new file mode 100755
index 0000000..efc4984
--- /dev/null
+++ b/libdb/build_vxworks/db_deadlock/db_deadlock/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_DEADLOCK {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_deadlock.o
+ NAME db_deadlock
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_deadlock.o {
+
+ NAME db_deadlock.o
+ SRC_PATH_NAME $PRJ_DIR/../db_deadlock.c
+}
+
+/* Parameter information */
+
diff --git a/libdb/build_vxworks/db_deadlock/db_deadlock/component.wpj b/libdb/build_vxworks/db_deadlock/db_deadlock/component.wpj
new file mode 100755
index 0000000..f9a1b82
--- /dev/null
+++ b/libdb/build_vxworks/db_deadlock/db_deadlock/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_objects
+db_deadlock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_objects
+db_deadlock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_deadlock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_deadlock.c_objects
+db_deadlock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_deadlock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_objects
+db_deadlock.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_deadlock.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/libdb/build_vxworks/db_dump/db_dump.c b/libdb/build_vxworks/db_dump/db_dump.c
new file mode 100644
index 0000000..152760c
--- /dev/null
+++ b/libdb/build_vxworks/db_dump/db_dump.c
@@ -0,0 +1,626 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+
+int db_dump_db_init __P((DB_ENV *, char *, int, u_int32_t, int *));
+int db_dump_dump __P((DB *, int, int));
+int db_dump_dump_sub __P((DB_ENV *, DB *, char *, int, int));
+int db_dump_is_sub __P((DB *, int *));
+int db_dump_main __P((int, char *[]));
+int db_dump_show_subs __P((DB *));
+int db_dump_usage __P((void));
+int db_dump_version_check __P((const char *));
+
+int
+db_dump(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_dump", args, &argc, &argv);
+ return (db_dump_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_dump_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_dump";
+ DB_ENV *dbenv;
+ DB *dbp;
+ u_int32_t cache;
+ int ch, d_close;
+ int e_close, exitval, keyflag, lflag, nflag, pflag, private;
+ int ret, Rflag, rflag, resize, subs;
+ char *dopt, *home, *passwd, *subname;
+
+ if ((ret = db_dump_version_check(progname)) != 0)
+ return (ret);
+
+ dbp = NULL;
+ d_close = e_close = exitval = lflag = nflag = pflag = rflag = Rflag = 0;
+ keyflag = 0;
+ cache = MEGABYTE;
+ private = 0;
+ dopt = home = passwd = subname = NULL;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "d:f:h:klNpP:rRs:V")) != EOF)
+ switch (ch) {
+ case 'd':
+ dopt = optarg;
+ break;
+ case 'f':
+ if (freopen(optarg, "w", stdout) == NULL) {
+ fprintf(stderr, "%s: %s: reopen: %s\n",
+ progname, optarg, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'k':
+ keyflag = 1;
+ break;
+ case 'l':
+ lflag = 1;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'p':
+ pflag = 1;
+ break;
+ case 's':
+ subname = optarg;
+ break;
+ case 'R':
+ Rflag = 1;
+ /* DB_AGGRESSIVE requires DB_SALVAGE */
+ /* FALLTHROUGH */
+ case 'r':
+ rflag = 1;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (db_dump_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 1)
+ return (db_dump_usage());
+
+ if (dopt != NULL && pflag) {
+ fprintf(stderr,
+ "%s: the -d and -p options may not both be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+ if (lflag && subname != NULL) {
+ fprintf(stderr,
+ "%s: the -l and -s options may not both be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+
+ if (keyflag && rflag) {
+ fprintf(stderr, "%s: %s",
+ "the -k and -r or -R options may not both be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+
+ if (subname != NULL && rflag) {
+ fprintf(stderr, "%s: %s",
+ "the -s and -r or R options may not both be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+retry: if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto err;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto err;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto err;
+ }
+ }
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto err;
+ }
+
+ /* Initialize the environment. */
+ if (db_dump_db_init(dbenv, home, rflag, cache, &private) != 0)
+ goto err;
+
+ /* Create the DB object and open the file. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto err;
+ }
+ d_close = 1;
+
+ /*
+ * If we're salvaging, don't do an open; it might not be safe.
+ * Dispatch now into the salvager.
+ */
+ if (rflag) {
+ if ((ret = dbp->verify(dbp, argv[0], NULL, stdout,
+ DB_SALVAGE |
+ (Rflag ? DB_AGGRESSIVE : 0) |
+ (pflag ? DB_PRINTABLE : 0))) != 0)
+ goto err;
+ exitval = 0;
+ goto done;
+ }
+
+ if ((ret = dbp->open(dbp, NULL,
+ argv[0], subname, DB_UNKNOWN, DB_RDONLY, 0)) != 0) {
+ dbp->err(dbp, ret, "open: %s", argv[0]);
+ goto err;
+ }
+ if (private != 0) {
+ if ((ret = __db_util_cache(dbenv, dbp, &cache, &resize)) != 0)
+ goto err;
+ if (resize) {
+ (void)dbp->close(dbp, 0);
+ d_close = 0;
+
+ (void)dbenv->close(dbenv, 0);
+ e_close = 0;
+ goto retry;
+ }
+ }
+
+ if (dopt != NULL) {
+ if (__db_dump(dbp, dopt, NULL)) {
+ dbp->err(dbp, ret, "__db_dump: %s", argv[0]);
+ goto err;
+ }
+ } else if (lflag) {
+ if (db_dump_is_sub(dbp, &subs))
+ goto err;
+ if (subs == 0) {
+ dbp->errx(dbp,
+ "%s: does not contain multiple databases", argv[0]);
+ goto err;
+ }
+ if (db_dump_show_subs(dbp))
+ goto err;
+ } else {
+ subs = 0;
+ if (subname == NULL && db_dump_is_sub(dbp, &subs))
+ goto err;
+ if (subs) {
+ if (db_dump_dump_sub(dbenv, dbp, argv[0], pflag, keyflag))
+ goto err;
+ } else
+ if (__db_prheader(dbp, NULL, pflag, keyflag, stdout,
+ __db_verify_callback, NULL, 0) ||
+ db_dump_dump(dbp, pflag, keyflag))
+ goto err;
+ }
+
+ if (0) {
+err: exitval = 1;
+ }
+done: if (d_close && (ret = dbp->close(dbp, 0)) != 0) {
+ exitval = 1;
+ dbenv->err(dbenv, ret, "close");
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_dump_db_init(dbenv, home, is_salvage, cache, is_privatep)
+ DB_ENV *dbenv;
+ char *home;
+ int is_salvage;
+ u_int32_t cache;
+ int *is_privatep;
+{
+ int ret;
+
+ /*
+ * Try and use the underlying environment when opening a database.
+ * We wish to use the buffer pool so our information is as up-to-date
+ * as possible, even if the mpool cache hasn't been flushed.
+ *
+ * If we are not doing a salvage, we wish to use the DB_JOINENV flag;
+ * if a locking system is present, this will let us use it and be
+ * safe to run concurrently with other threads of control. (We never
+ * need to use transactions explicitly, as we're read-only.) Note
+ * that in CDB, too, this will configure our environment
+ * appropriately, and our cursors will (correctly) do locking as CDB
+ * read cursors.
+ *
+ * If we are doing a salvage, the verification code will protest
+ * if we initialize transactions, logging, or locking; do an
+ * explicit DB_INIT_MPOOL to try to join any existing environment
+ * before we create our own.
+ */
+ *is_privatep = 0;
+ if (dbenv->open(dbenv, home,
+ DB_USE_ENVIRON | (is_salvage ? DB_INIT_MPOOL : DB_JOINENV), 0) == 0)
+ return (0);
+
+ /*
+ * An environment is required because we may be trying to look at
+ * databases in directories other than the current one. We could
+ * avoid using an environment iff the -h option wasn't specified,
+ * but that seems like more work than it's worth.
+ *
+ * No environment exists (or, at least no environment that includes
+ * an mpool region exists). Create one, but make it private so that
+ * no files are actually created.
+ */
+ *is_privatep = 1;
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) == 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) == 0)
+ return (0);
+
+ /* An environment is required. */
+ dbenv->err(dbenv, ret, "open");
+ return (1);
+}
+
+/*
+ * is_sub --
+ * Return if the database contains subdatabases.
+ */
+int
+db_dump_is_sub(dbp, yesno)
+ DB *dbp;
+ int *yesno;
+{
+ DB_BTREE_STAT *btsp;
+ DB_HASH_STAT *hsp;
+ int ret;
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = dbp->stat(dbp, &btsp, DB_FAST_STAT)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (ret);
+ }
+ *yesno = btsp->bt_metaflags & BTM_SUBDB ? 1 : 0;
+ free(btsp);
+ break;
+ case DB_HASH:
+ if ((ret = dbp->stat(dbp, &hsp, DB_FAST_STAT)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (ret);
+ }
+ *yesno = hsp->hash_metaflags & DB_HASH_SUBDB ? 1 : 0;
+ free(hsp);
+ break;
+ case DB_QUEUE:
+ break;
+ default:
+ dbp->errx(dbp, "unknown database type");
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * dump_sub --
+ * Dump out the records for a DB containing subdatabases.
+ */
+int
+db_dump_dump_sub(dbenv, parent_dbp, parent_name, pflag, keyflag)
+ DB_ENV *dbenv;
+ DB *parent_dbp;
+ char *parent_name;
+ int pflag, keyflag;
+{
+ DB *dbp;
+ DBC *dbcp;
+ DBT key, data;
+ int ret;
+ char *subdb;
+
+ /*
+ * Get a cursor and step through the database, dumping out each
+ * subdatabase.
+ */
+ if ((ret = parent_dbp->cursor(parent_dbp, NULL, &dbcp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->cursor");
+ return (1);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0) {
+ /* Nul terminate the subdatabase name. */
+ if ((subdb = malloc(key.size + 1)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ return (1);
+ }
+ memcpy(subdb, key.data, key.size);
+ subdb[key.size] = '\0';
+
+ /* Create the DB object and open the file. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ free(subdb);
+ return (1);
+ }
+ if ((ret = dbp->open(dbp, NULL,
+ parent_name, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0)
+ dbp->err(dbp, ret,
+ "DB->open: %s:%s", parent_name, subdb);
+ if (ret == 0 &&
+ (__db_prheader(dbp, subdb, pflag, keyflag, stdout,
+ __db_verify_callback, NULL, 0) ||
+ db_dump_dump(dbp, pflag, keyflag)))
+ ret = 1;
+ (void)dbp->close(dbp, 0);
+ free(subdb);
+ if (ret != 0)
+ return (1);
+ }
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ return (1);
+ }
+
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ return (1);
+ }
+
+ return (0);
+}
+
+/*
+ * show_subs --
+ * Display the subdatabases for a database.
+ */
+int
+db_dump_show_subs(dbp)
+ DB *dbp;
+{
+ DBC *dbcp;
+ DBT key, data;
+ int ret;
+
+ /*
+ * Get a cursor and step through the database, printing out the key
+ * of each key/data pair.
+ */
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->cursor");
+ return (1);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0) {
+ if ((ret = __db_prdbt(&key, 1, NULL, stdout,
+ __db_verify_callback, 0, NULL)) != 0) {
+ dbp->errx(dbp, NULL);
+ return (1);
+ }
+ }
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ return (1);
+ }
+
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * dump --
+ * Dump out the records for a DB.
+ */
+int
+db_dump_dump(dbp, pflag, keyflag)
+ DB *dbp;
+ int pflag, keyflag;
+{
+ DBC *dbcp;
+ DBT key, data;
+ DBT keyret, dataret;
+ db_recno_t recno;
+ int is_recno, failed, ret;
+ void *pointer;
+
+ /*
+ * Get a cursor and step through the database, printing out each
+ * key/data pair.
+ */
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->cursor");
+ return (1);
+ }
+
+ failed = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ data.data = malloc(1024 * 1024);
+ if (data.data == NULL) {
+ dbp->err(dbp, ENOMEM, "bulk get buffer");
+ failed = 1;
+ goto err;
+ }
+ data.ulen = 1024 * 1024;
+ data.flags = DB_DBT_USERMEM;
+ is_recno = (dbp->type == DB_RECNO || dbp->type == DB_QUEUE);
+ keyflag = is_recno ? keyflag : 1;
+ if (is_recno) {
+ keyret.data = &recno;
+ keyret.size = sizeof(recno);
+ }
+
+retry:
+ while ((ret =
+ dbcp->c_get(dbcp, &key, &data, DB_NEXT | DB_MULTIPLE_KEY)) == 0) {
+ DB_MULTIPLE_INIT(pointer, &data);
+ for (;;) {
+ if (is_recno)
+ DB_MULTIPLE_RECNO_NEXT(pointer, &data,
+ recno, dataret.data, dataret.size);
+ else
+ DB_MULTIPLE_KEY_NEXT(pointer,
+ &data, keyret.data,
+ keyret.size, dataret.data, dataret.size);
+
+ if (dataret.data == NULL)
+ break;
+
+ if ((keyflag && (ret = __db_prdbt(&keyret,
+ pflag, " ", stdout, __db_verify_callback,
+ is_recno, NULL)) != 0) || (ret =
+ __db_prdbt(&dataret, pflag, " ", stdout,
+ __db_verify_callback, 0, NULL)) != 0) {
+ dbp->errx(dbp, NULL);
+ failed = 1;
+ goto err;
+ }
+ }
+ }
+ if (ret == ENOMEM) {
+ data.data = realloc(data.data, data.size);
+ if (data.data == NULL) {
+ dbp->err(dbp, ENOMEM, "bulk get buffer");
+ failed = 1;
+ goto err;
+ }
+ data.ulen = data.size;
+ goto retry;
+ }
+
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ failed = 1;
+ }
+
+err: if (data.data != NULL)
+ free(data.data);
+
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ failed = 1;
+ }
+
+ (void)__db_prfooter(stdout, __db_verify_callback);
+ return (failed);
+}
+
+/*
+ * usage --
+ * Display the usage message.
+ */
+int
+db_dump_usage()
+{
+ (void)fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_dump [-klNprRV]",
+ "[-d ahr] [-f output] [-h home] [-P password] [-s database] db_file");
+ return (EXIT_FAILURE);
+}
+
+int
+db_dump_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/libdb/build_vxworks/db_dump/db_dump.wpj b/libdb/build_vxworks/db_dump/db_dump.wpj
new file mode 100755
index 0000000..6813766
--- /dev/null
+++ b/libdb/build_vxworks/db_dump/db_dump.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_dump.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_dump.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_dump.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_dump.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_dump.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_dump.c_objects
+db_dump.o
+<END>
+
+<BEGIN> FILE_db_dump.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_dump.c
+<END>
+
+<BEGIN> userComments
+db_dump
+<END>
diff --git a/libdb/build_vxworks/db_dump/db_dump/Makefile.custom b/libdb/build_vxworks/db_dump/db_dump/Makefile.custom
new file mode 100644
index 0000000..ca781f7
--- /dev/null
+++ b/libdb/build_vxworks/db_dump/db_dump/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/libdb/build_vxworks/db_dump/db_dump/component.cdf b/libdb/build_vxworks/db_dump/db_dump/component.cdf
new file mode 100755
index 0000000..5c1d4cc
--- /dev/null
+++ b/libdb/build_vxworks/db_dump/db_dump/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_DUMP {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_dump.o
+ NAME db_dump
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_dump.o {
+
+ NAME db_dump.o
+ SRC_PATH_NAME $PRJ_DIR/../db_dump.c
+}
+
+/* Parameter information */
+
diff --git a/libdb/build_vxworks/db_dump/db_dump/component.wpj b/libdb/build_vxworks/db_dump/db_dump/component.wpj
new file mode 100755
index 0000000..e234641
--- /dev/null
+++ b/libdb/build_vxworks/db_dump/db_dump/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_objects
+db_dump.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_objects
+db_dump.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_dump.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_dump.c_objects
+db_dump.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_dump.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_objects
+db_dump.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_dump.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/libdb/build_vxworks/db_int.h b/libdb/build_vxworks/db_int.h
new file mode 100644
index 0000000..eb6a8ea
--- /dev/null
+++ b/libdb/build_vxworks/db_int.h
@@ -0,0 +1,474 @@
+/* DO NOT EDIT: automatically built by dist/s_vxworks. */
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_INTERNAL_H_
+#define _DB_INTERNAL_H_
+
+/*******************************************************
+ * System includes, db.h, a few general DB includes. The DB includes are
+ * here because it's OK if db_int.h includes queue structure declarations.
+ *******************************************************/
+#ifndef NO_SYSTEM_INCLUDES
+#if defined(__STDC__) || defined(__cplusplus)
+#include <stdarg.h>
+#else
+#include <varargs.h>
+#endif
+#include <errno.h>
+#endif
+
+#include "db.h"
+
+#include "dbinc/queue.h"
+#include "dbinc/shqueue.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*******************************************************
+ * General purpose constants and macros.
+ *******************************************************/
+#define UINT16_T_MAX 0xffff /* Maximum 16 bit unsigned. */
+#define UINT32_T_MAX 0xffffffff /* Maximum 32 bit unsigned. */
+
+#define MEGABYTE 1048576
+#define GIGABYTE 1073741824
+
+#define MS_PER_SEC 1000 /* Milliseconds in a second. */
+#define USEC_PER_MS 1000 /* Microseconds in a millisecond. */
+
+#define RECNO_OOB 0 /* Illegal record number. */
+
+/* Test for a power-of-two (tests true for zero, which doesn't matter here). */
+#define POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0)
+
+/* Test for valid page sizes. */
+#define DB_MIN_PGSIZE 0x000200 /* Minimum page size (512). */
+#define DB_MAX_PGSIZE 0x010000 /* Maximum page size (65536). */
+#define IS_VALID_PAGESIZE(x) \
+ (POWER_OF_TWO(x) && (x) >= DB_MIN_PGSIZE && ((x) <= DB_MAX_PGSIZE))
+
+/* Minimum number of pages cached, by default. */
+#define DB_MINPAGECACHE 16
+
+/*
+ * If we are unable to determine the underlying filesystem block size, use
+ * 8K on the grounds that most OS's use less than 8K for a VM page size.
+ */
+#define DB_DEF_IOSIZE (8 * 1024)
+
+/*
+ * Aligning items to particular sizes or in pages or memory.
+ *
+ * db_align_t --
+ * Largest integral type, used to align structures in memory. We don't store
+ * floating point types in structures, so integral types should be sufficient
+ * (and we don't have to worry about systems that store floats in other than
+ * power-of-2 numbers of bytes). Additionally this fixes compiler that rewrite
+ * structure assignments and ANSI C memcpy calls to be in-line instructions
+ * that happen to require alignment. Note: this alignment isn't sufficient for
+ * mutexes, which depend on things like cache line alignment. Mutex alignment
+ * is handled separately, in mutex.h.
+ *
+ * db_alignp_t --
+ * Integral type that's the same size as a pointer. There are places where
+ * DB modifies pointers by discarding the bottom bits to guarantee alignment.
+ * We can't use db_align_t, it may be larger than the pointer, and compilers
+ * get upset about that. So far we haven't run on any machine where there
+ * isn't an integral type the same size as a pointer -- here's hoping.
+ */
+typedef unsigned long db_align_t;
+typedef unsigned long db_alignp_t;
+
+/* Align an integer to a specific boundary. */
+#undef ALIGN
+#define ALIGN(v, bound) (((v) + (bound) - 1) & ~(((db_align_t)bound) - 1))
+
+/*
+ * Print an address as a u_long (a u_long is the largest type we can print
+ * portably). Most 64-bit systems have made longs 64-bits, so this should
+ * work.
+ */
+#define P_TO_ULONG(p) ((u_long)(db_alignp_t)(p))
+
+/*
+ * Convert a pointer to a small integral value.
+ *
+ * The (u_int16_t)(db_alignp_t) cast avoids warnings: the (db_alignp_t) cast
+ * converts the value to an integral type, and the (u_int16_t) cast converts
+ * it to a small integral type so we don't get complaints when we assign the
+ * final result to an integral type smaller than db_alignp_t.
+ */
+#define P_TO_UINT32(p) ((u_int32_t)(db_alignp_t)(p))
+#define P_TO_UINT16(p) ((u_int16_t)(db_alignp_t)(p))
+
+/*
+ * There are several on-page structures that are declared to have a number of
+ * fields followed by a variable length array of items. The structure size
+ * without including the variable length array or the address of the first of
+ * those elements can be found using SSZ.
+ *
+ * This macro can also be used to find the offset of a structure element in a
+ * structure. This is used in various places to copy structure elements from
+ * unaligned memory references, e.g., pointers into a packed page.
+ *
+ * There are two versions because compilers object if you take the address of
+ * an array.
+ */
+#undef SSZ
+#define SSZ(name, field) P_TO_UINT16(&(((name *)0)->field))
+
+#undef SSZA
+#define SSZA(name, field) P_TO_UINT16(&(((name *)0)->field[0]))
+
+/* Structure used to print flag values. */
+typedef struct __fn {
+ u_int32_t mask; /* Flag value. */
+ const char *name; /* Flag name. */
+} FN;
+
+/* Set, clear and test flags. */
+#define FLD_CLR(fld, f) (fld) &= ~(f)
+#define FLD_ISSET(fld, f) ((fld) & (f))
+#define FLD_SET(fld, f) (fld) |= (f)
+#define F_CLR(p, f) (p)->flags &= ~(f)
+#define F_ISSET(p, f) ((p)->flags & (f))
+#define F_SET(p, f) (p)->flags |= (f)
+#define LF_CLR(f) ((flags) &= ~(f))
+#define LF_ISSET(f) ((flags) & (f))
+#define LF_SET(f) ((flags) |= (f))
+
+/* Display separator string. */
+#undef DB_LINE
+#define DB_LINE "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-="
+
+/* Unused, or not-used-yet variable. "Shut that bloody compiler up!" */
+#define COMPQUIET(n, v) (n) = (v)
+
+/*******************************************************
+ * API return values
+ *******************************************************/
+ /*
+ * Return values that are OK for each different call. Most calls have
+ * a standard 'return of 0 is only OK value', but some, like db->get
+ * have DB_NOTFOUND as a return value, but it really isn't an error.
+ */
+#define DB_RETOK_STD(ret) ((ret) == 0)
+#define DB_RETOK_DBCDEL(ret) ((ret) == 0 || (ret) == DB_KEYEMPTY || \
+ (ret) == DB_NOTFOUND)
+#define DB_RETOK_DBCGET(ret) DB_RETOK_DBGET(ret)
+#define DB_RETOK_DBCPUT(ret) ((ret) == 0 || (ret) == DB_KEYEXIST || \
+ (ret) == DB_NOTFOUND)
+#define DB_RETOK_DBDEL(ret) ((ret) == 0 || (ret) == DB_NOTFOUND)
+#define DB_RETOK_DBGET(ret) ((ret) == 0 || (ret) == DB_KEYEMPTY || \
+ (ret) == DB_NOTFOUND)
+#define DB_RETOK_DBPUT(ret) ((ret) == 0 || (ret) == DB_KEYEXIST)
+#define DB_RETOK_LGGET(ret) ((ret) == 0 || (ret) == DB_NOTFOUND)
+#define DB_RETOK_MPGET(ret) ((ret) == 0 || (ret) == DB_PAGE_NOTFOUND)
+#define DB_RETOK_REPPMSG(ret) ((ret) == 0 || (ret) == DB_REP_NEWMASTER || \
+ (ret) == DB_REP_NEWSITE)
+
+/*******************************************************
+ * Files.
+ *******************************************************/
+ /*
+ * We use 1024 as the maximum path length. It's too hard to figure out what
+ * the real path length is, as it was traditionally stored in <sys/param.h>,
+ * and that file isn't always available.
+ */
+#undef MAXPATHLEN
+#define MAXPATHLEN 1024
+
+#define PATH_DOT "." /* Current working directory. */
+#define PATH_SEPARATOR "/\\" /* Path separator character(s). */
+
+/*
+ * Flags understood by __os_open.
+ */
+#define DB_OSO_CREATE 0x0001 /* POSIX: O_CREAT */
+#define DB_OSO_DIRECT 0x0002 /* Don't buffer the file in the OS. */
+#define DB_OSO_EXCL 0x0004 /* POSIX: O_EXCL */
+#define DB_OSO_LOG 0x0008 /* Opening a log file. */
+#define DB_OSO_RDONLY 0x0010 /* POSIX: O_RDONLY */
+#define DB_OSO_REGION 0x0020 /* Opening a region file. */
+#define DB_OSO_SEQ 0x0040 /* Expected sequential access. */
+#define DB_OSO_TEMP 0x0080 /* Remove after last close. */
+#define DB_OSO_TRUNC 0x0100 /* POSIX: O_TRUNC */
+
+/*
+ * Seek options understood by __os_seek.
+ */
+typedef enum {
+ DB_OS_SEEK_CUR, /* POSIX: SEEK_CUR */
+ DB_OS_SEEK_END, /* POSIX: SEEK_END */
+ DB_OS_SEEK_SET /* POSIX: SEEK_SET */
+} DB_OS_SEEK;
+
+/*******************************************************
+ * Environment.
+ *******************************************************/
+/* Type passed to __db_appname(). */
+typedef enum {
+ DB_APP_NONE=0, /* No type (region). */
+ DB_APP_DATA, /* Data file. */
+ DB_APP_LOG, /* Log file. */
+ DB_APP_TMP /* Temporary file. */
+} APPNAME;
+
+/*
+ * CDB_LOCKING CDB product locking.
+ * CRYPTO_ON Security has been configured.
+ * LOCKING_ON Locking has been configured.
+ * LOGGING_ON Logging has been configured.
+ * MPOOL_ON Memory pool has been configured.
+ * RPC_ON RPC has been configured.
+ * TXN_ON Transactions have been configured.
+ */
+#define CDB_LOCKING(dbenv) F_ISSET(dbenv, DB_ENV_CDB)
+#define CRYPTO_ON(dbenv) ((dbenv)->crypto_handle != NULL)
+#define LOCKING_ON(dbenv) ((dbenv)->lk_handle != NULL)
+#define LOGGING_ON(dbenv) ((dbenv)->lg_handle != NULL)
+#define MPOOL_ON(dbenv) ((dbenv)->mp_handle != NULL)
+#define RPC_ON(dbenv) ((dbenv)->cl_handle != NULL)
+#define TXN_ON(dbenv) ((dbenv)->tx_handle != NULL)
+
+/*
+ * STD_LOCKING Standard locking, that is, locking was configured and CDB
+ * was not. We do not do locking in off-page duplicate trees,
+ * so we check for that in the cursor first.
+ */
+#define STD_LOCKING(dbc) \
+ (!F_ISSET(dbc, DBC_OPD) && \
+ !CDB_LOCKING((dbc)->dbp->dbenv) && LOCKING_ON((dbc)->dbp->dbenv))
+
+/*
+ * IS_RECOVERING: The system is running recovery.
+ */
+#define IS_RECOVERING(dbenv) \
+ (LOGGING_ON(dbenv) && \
+ F_ISSET((DB_LOG *)(dbenv)->lg_handle, DBLOG_RECOVER))
+
+/* Initialization methods are often illegal before/after open is called. */
+#define ENV_ILLEGAL_AFTER_OPEN(dbenv, name) \
+ if (F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \
+ return (__db_mi_open(dbenv, name, 1));
+#define ENV_ILLEGAL_BEFORE_OPEN(dbenv, name) \
+ if (!F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \
+ return (__db_mi_open(dbenv, name, 0));
+
+/* We're not actually user hostile, honest. */
+#define ENV_REQUIRES_CONFIG(dbenv, handle, i, flags) \
+ if (handle == NULL) \
+ return (__db_env_config(dbenv, i, flags));
+
+/*******************************************************
+ * Database Access Methods.
+ *******************************************************/
+/*
+ * DB_IS_THREADED --
+ * The database handle is free-threaded (was opened with DB_THREAD).
+ */
+#define DB_IS_THREADED(dbp) \
+ ((dbp)->mutexp != NULL)
+
+/* Initialization methods are often illegal before/after open is called. */
+#define DB_ILLEGAL_AFTER_OPEN(dbp, name) \
+ if (F_ISSET((dbp), DB_AM_OPEN_CALLED)) \
+ return (__db_mi_open((dbp)->dbenv, name, 1));
+#define DB_ILLEGAL_BEFORE_OPEN(dbp, name) \
+ if (!F_ISSET((dbp), DB_AM_OPEN_CALLED)) \
+ return (__db_mi_open((dbp)->dbenv, name, 0));
+/* Some initialization methods are illegal if environment isn't local. */
+#define DB_ILLEGAL_IN_ENV(dbp, name) \
+ if (!F_ISSET((dbp)->dbenv, DB_ENV_DBLOCAL)) \
+ return (__db_mi_env((dbp)->dbenv, name));
+#define DB_ILLEGAL_METHOD(dbp, flags) { \
+ int __ret; \
+ if ((__ret = __dbh_am_chk(dbp, flags)) != 0) \
+ return (__ret); \
+}
+
+/*
+ * Common DBC->internal fields. Each access method adds additional fields
+ * to this list, but the initial fields are common.
+ */
+#define __DBC_INTERNAL \
+ DBC *opd; /* Off-page duplicate cursor. */\
+ \
+ void *page; /* Referenced page. */ \
+ db_pgno_t root; /* Tree root. */ \
+ db_pgno_t pgno; /* Referenced page number. */ \
+ db_indx_t indx; /* Referenced key item index. */\
+ \
+ DB_LOCK lock; /* Cursor lock. */ \
+ db_lockmode_t lock_mode; /* Lock mode. */
+
+struct __dbc_internal {
+ __DBC_INTERNAL
+};
+
+/* Actions that __db_master_update can take. */
+typedef enum { MU_REMOVE, MU_RENAME, MU_OPEN } mu_action;
+
+/*
+ * Access-method-common macro for determining whether a cursor
+ * has been initialized.
+ */
+#define IS_INITIALIZED(dbc) ((dbc)->internal->pgno != PGNO_INVALID)
+
+/* Free the callback-allocated buffer, if necessary, hanging off of a DBT. */
+#define FREE_IF_NEEDED(sdbp, dbt) \
+ if (F_ISSET((dbt), DB_DBT_APPMALLOC)) { \
+ __os_ufree((sdbp)->dbenv, (dbt)->data); \
+ F_CLR((dbt), DB_DBT_APPMALLOC); \
+ }
+
+/*
+ * Use memory belonging to object "owner" to return the results of
+ * any no-DBT-flag get ops on cursor "dbc".
+ */
+#define SET_RET_MEM(dbc, owner) \
+ do { \
+ (dbc)->rskey = &(owner)->my_rskey; \
+ (dbc)->rkey = &(owner)->my_rkey; \
+ (dbc)->rdata = &(owner)->my_rdata; \
+ } while (0)
+
+/* Use the return-data memory src is currently set to use in dest as well. */
+#define COPY_RET_MEM(src, dest) \
+ do { \
+ (dest)->rskey = (src)->rskey; \
+ (dest)->rkey = (src)->rkey; \
+ (dest)->rdata = (src)->rdata; \
+ } while (0)
+
+/* Reset the returned-memory pointers to their defaults. */
+#define RESET_RET_MEM(dbc) \
+ do { \
+ (dbc)->rskey = &(dbc)->my_rskey; \
+ (dbc)->rkey = &(dbc)->my_rkey; \
+ (dbc)->rdata = &(dbc)->my_rdata; \
+ } while (0)
+
+/*******************************************************
+ * Mpool.
+ *******************************************************/
+/*
+ * File types for DB access methods. Negative numbers are reserved to DB.
+ */
+#define DB_FTYPE_SET -1 /* Call pgin/pgout functions. */
+#define DB_FTYPE_NOTSET 0 /* Don't call... */
+
+/* Structure used as the DB pgin/pgout pgcookie. */
+typedef struct __dbpginfo {
+ size_t db_pagesize; /* Underlying page size. */
+ u_int32_t flags; /* Some DB_AM flags needed. */
+ DBTYPE type; /* DB type */
+} DB_PGINFO;
+
+/*******************************************************
+ * Log.
+ *******************************************************/
+/* Initialize an LSN to 'zero'. */
+#define ZERO_LSN(LSN) do { \
+ (LSN).file = 0; \
+ (LSN).offset = 0; \
+} while (0)
+#define IS_ZERO_LSN(LSN) ((LSN).file == 0)
+
+#define IS_INIT_LSN(LSN) ((LSN).file == 1 && (LSN).offset == 0)
+#define INIT_LSN(LSN) do { \
+ (LSN).file = 1; \
+ (LSN).offset = 0; \
+} while (0)
+
+#define MAX_LSN(LSN) do { \
+ (LSN).file = UINT32_T_MAX; \
+ (LSN).offset = UINT32_T_MAX; \
+} while (0)
+#define IS_MAX_LSN(LSN) \
+ ((LSN).file == UINT32_T_MAX && (LSN).offset == UINT32_T_MAX)
+
+/* If logging is turned off, smash the lsn. */
+#define LSN_NOT_LOGGED(LSN) do { \
+ (LSN).file = 0; \
+ (LSN).offset = 1; \
+} while (0)
+#define IS_NOT_LOGGED_LSN(LSN) \
+ ((LSN).file == 0 && (LSN).offset == 1)
+
+/*
+ * Test if the environment is currently logging changes. If we're in
+ * recovery or we're a replication client, we don't need to log changes
+ * because they're already in the log, even though we have a fully functional
+ * log system.
+ */
+#define DBENV_LOGGING(dbenv) \
+ (LOGGING_ON(dbenv) && !F_ISSET((dbenv), DB_ENV_REP_CLIENT) && \
+ (!IS_RECOVERING(dbenv)))
+
+/*
+ * Test if we need to log a change. Note that the DBC_RECOVER flag is set
+ * when we're in abort, as well as during recovery; thus DBC_LOGGING may be
+ * false for a particular dbc even when DBENV_LOGGING is true.
+ *
+ * We explicitly use LOGGING_ON/DB_ENV_REP_CLIENT here because we don't
+ * want to have to pull in the log headers, which IS_RECOVERING (and thus
+ * DBENV_LOGGING) rely on, and because DBC_RECOVER should be set anytime
+ * IS_RECOVERING would be true.
+ */
+#define DBC_LOGGING(dbc) \
+ (LOGGING_ON((dbc)->dbp->dbenv) && !F_ISSET((dbc), DBC_RECOVER) && \
+ !F_ISSET((dbc)->dbp->dbenv, DB_ENV_REP_CLIENT))
+
+/*******************************************************
+ * Txn.
+ *******************************************************/
+#define DB_NONBLOCK(C) ((C)->txn != NULL && F_ISSET((C)->txn, TXN_NOWAIT))
+#define IS_SUBTRANSACTION(txn) \
+ ((txn) != NULL && (txn)->parent != NULL)
+
+/*******************************************************
+ * Crypto.
+ *******************************************************/
+#define DB_IV_BYTES 16 /* Bytes per IV */
+#define DB_MAC_KEY 20 /* Bytes per MAC checksum */
+
+/*******************************************************
+ * Forward structure declarations.
+ *******************************************************/
+struct __db_reginfo_t; typedef struct __db_reginfo_t REGINFO;
+struct __db_txnhead; typedef struct __db_txnhead DB_TXNHEAD;
+struct __db_txnlist; typedef struct __db_txnlist DB_TXNLIST;
+struct __vrfy_childinfo; typedef struct __vrfy_childinfo VRFY_CHILDINFO;
+struct __vrfy_dbinfo; typedef struct __vrfy_dbinfo VRFY_DBINFO;
+struct __vrfy_pageinfo; typedef struct __vrfy_pageinfo VRFY_PAGEINFO;
+
+#if defined(__cplusplus)
+}
+#endif
+
+/*******************************************************
+ * Remaining general DB includes.
+ *******************************************************/
+
+
+#include "dbinc/globals.h"
+#include "dbinc/debug.h"
+#include "dbinc/mutex.h"
+#include "dbinc/region.h"
+#include "dbinc_auto/mutex_ext.h" /* XXX: Include after region.h. */
+#include "dbinc_auto/env_ext.h"
+#include "dbinc/os.h"
+#include "dbinc_auto/clib_ext.h"
+#include "dbinc_auto/common_ext.h"
+
+#endif /* !_DB_INTERNAL_H_ */
diff --git a/libdb/build_vxworks/db_load/db_load.c b/libdb/build_vxworks/db_load/db_load.c
new file mode 100644
index 0000000..05b4bcd
--- /dev/null
+++ b/libdb/build_vxworks/db_load/db_load.c
@@ -0,0 +1,1247 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+
+typedef struct { /* XXX: Globals. */
+ const char *progname; /* Program name. */
+ char *hdrbuf; /* Input file header. */
+ u_long lineno; /* Input file line number. */
+ u_long origline; /* Original file line number. */
+ int endodata; /* Reached the end of a database. */
+ int endofile; /* Reached the end of the input. */
+ int version; /* Input version. */
+ char *home; /* Env home. */
+ char *passwd; /* Env passwd. */
+ int private; /* Private env. */
+ u_int32_t cache; /* Env cache size. */
+} LDG;
+
+void db_load_badend __P((DB_ENV *));
+void db_load_badnum __P((DB_ENV *));
+int db_load_configure __P((DB_ENV *, DB *, char **, char **, int *));
+int db_load_convprintable __P((DB_ENV *, char *, char **));
+int db_load_db_init __P((DB_ENV *, char *, u_int32_t, int *));
+int db_load_dbt_rdump __P((DB_ENV *, DBT *));
+int db_load_dbt_rprint __P((DB_ENV *, DBT *));
+int db_load_dbt_rrecno __P((DB_ENV *, DBT *, int));
+int db_load_digitize __P((DB_ENV *, int, int *));
+int db_load_env_create __P((DB_ENV **, LDG *));
+int db_load_load __P((DB_ENV *, char *, DBTYPE, char **, u_int, LDG *, int *));
+int db_load_main __P((int, char *[]));
+int db_load_rheader __P((DB_ENV *, DB *, DBTYPE *, char **, int *, int *));
+int db_load_usage __P((void));
+int db_load_version_check __P((const char *));
+
+#define G(f) ((LDG *)dbenv->app_private)->f
+
+ /* Flags to the load function. */
+#define LDF_NOHEADER 0x01 /* No dump header. */
+#define LDF_NOOVERWRITE 0x02 /* Don't overwrite existing rows. */
+#define LDF_PASSWORD 0x04 /* Encrypt created databases. */
+
+int
+db_load(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_load", args, &argc, &argv);
+ return (db_load_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_load_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ DBTYPE dbtype;
+ DB_ENV *dbenv;
+ LDG ldg;
+ u_int32_t ldf;
+ int ch, existed, exitval, ret;
+ char **clist, **clp;
+
+ ldg.progname = "db_load";
+ ldg.lineno = 0;
+ ldg.endodata = ldg.endofile = 0;
+ ldg.version = 1;
+ ldg.cache = MEGABYTE;
+ ldg.hdrbuf = NULL;
+ ldg.home = NULL;
+ ldg.passwd = NULL;
+
+ if ((ret = db_load_version_check(ldg.progname)) != 0)
+ return (ret);
+
+ ldf = 0;
+ exitval = 0;
+ dbtype = DB_UNKNOWN;
+
+ /* Allocate enough room for configuration arguments. */
+ if ((clp = clist = (char **)calloc(argc + 1, sizeof(char *))) == NULL) {
+ fprintf(stderr, "%s: %s\n", ldg.progname, strerror(ENOMEM));
+ return (EXIT_FAILURE);
+ }
+
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "c:f:h:nP:Tt:V")) != EOF)
+ switch (ch) {
+ case 'c':
+ *clp++ = optarg;
+ break;
+ case 'f':
+ if (freopen(optarg, "r", stdin) == NULL) {
+ fprintf(stderr, "%s: %s: reopen: %s\n",
+ ldg.progname, optarg, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'h':
+ ldg.home = optarg;
+ break;
+ case 'n':
+ ldf |= LDF_NOOVERWRITE;
+ break;
+ case 'P':
+ ldg.passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (ldg.passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ ldg.progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ ldf |= LDF_PASSWORD;
+ break;
+ case 'T':
+ ldf |= LDF_NOHEADER;
+ break;
+ case 't':
+ if (strcmp(optarg, "btree") == 0) {
+ dbtype = DB_BTREE;
+ break;
+ }
+ if (strcmp(optarg, "hash") == 0) {
+ dbtype = DB_HASH;
+ break;
+ }
+ if (strcmp(optarg, "recno") == 0) {
+ dbtype = DB_RECNO;
+ break;
+ }
+ if (strcmp(optarg, "queue") == 0) {
+ dbtype = DB_QUEUE;
+ break;
+ }
+ return (db_load_usage());
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (db_load_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 1)
+ return (db_load_usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object initialized for error reporting, and
+ * then open it.
+ */
+ if (db_load_env_create(&dbenv, &ldg) != 0)
+ goto shutdown;
+
+ while (!ldg.endofile)
+ if (db_load_load(dbenv, argv[0], dbtype, clist, ldf,
+ &ldg, &existed) != 0)
+ goto shutdown;
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", ldg.progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+ free(clist);
+
+ /*
+ * Return 0 on success, 1 if keys existed already, and 2 on failure.
+ *
+ * Technically, this is wrong, because exit of anything other than
+ * 0 is implementation-defined by the ANSI C standard. I don't see
+ * any good solutions that don't involve API changes.
+ */
+ return (exitval == 0 ? (existed == 0 ? 0 : 1) : 2);
+}
+
+/*
+ * load --
+ * Load a database.
+ */
+int
+db_load_load(dbenv, name, argtype, clist, flags, ldg, existedp)
+ DB_ENV *dbenv;
+ char *name, **clist;
+ DBTYPE argtype;
+ u_int flags;
+ LDG *ldg;
+ int *existedp;
+{
+ DB *dbp;
+ DBT key, rkey, data, *readp, *writep;
+ DBTYPE dbtype;
+ DB_TXN *ctxn, *txn;
+ db_recno_t recno, datarecno;
+ u_int32_t put_flags;
+ int ascii_recno, checkprint, hexkeys, keyflag, keys, resize, ret, rval;
+ char *subdb;
+
+ *existedp = 0;
+
+ put_flags = LF_ISSET(LDF_NOOVERWRITE) ? DB_NOOVERWRITE : 0;
+ G(endodata) = 0;
+
+ subdb = NULL;
+ ctxn = txn = NULL;
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ memset(&rkey, 0, sizeof(DBT));
+
+retry_db:
+ /* Create the DB object. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto err;
+ }
+
+ dbtype = DB_UNKNOWN;
+ keys = -1;
+ hexkeys = -1;
+ keyflag = -1;
+ /* Read the header -- if there's no header, we expect flat text. */
+ if (LF_ISSET(LDF_NOHEADER)) {
+ checkprint = 1;
+ dbtype = argtype;
+ } else {
+ if (db_load_rheader(dbenv,
+ dbp, &dbtype, &subdb, &checkprint, &keys) != 0)
+ goto err;
+ if (G(endofile))
+ goto done;
+ }
+
+ /*
+ * Apply command-line configuration changes. (We apply command-line
+ * configuration changes to all databases that are loaded, e.g., all
+ * subdatabases.)
+ */
+ if (db_load_configure(dbenv, dbp, clist, &subdb, &keyflag))
+ goto err;
+
+ if (keys != 1) {
+ if (keyflag == 1) {
+ dbp->err(dbp, EINVAL, "No keys specified in file");
+ goto err;
+ }
+ }
+ else if (keyflag == 0) {
+ dbp->err(dbp, EINVAL, "Keys specified in file");
+ goto err;
+ }
+ else
+ keyflag = 1;
+
+ if (dbtype == DB_BTREE || dbtype == DB_HASH) {
+ if (keyflag == 0)
+ dbp->err(dbp,
+ EINVAL, "Btree and Hash must specify keys");
+ else
+ keyflag = 1;
+ }
+
+ if (argtype != DB_UNKNOWN) {
+
+ if (dbtype == DB_RECNO || dbtype == DB_QUEUE)
+ if (keyflag != 1 && argtype != DB_RECNO &&
+ argtype != DB_QUEUE) {
+ dbenv->errx(dbenv,
+ "improper database type conversion specified");
+ goto err;
+ }
+ dbtype = argtype;
+ }
+
+ if (dbtype == DB_UNKNOWN) {
+ dbenv->errx(dbenv, "no database type specified");
+ goto err;
+ }
+
+ if (keyflag == -1)
+ keyflag = 0;
+
+ /*
+ * Recno keys have only been printed in hexadecimal starting
+ * with db_dump format version 3 (DB 3.2).
+ *
+ * !!!
+ * Note that version is set in db_load_rheader(), which must be called before
+ * this assignment.
+ */
+ hexkeys = (G(version) >= 3 && keyflag == 1 && checkprint == 0);
+
+ if (keyflag == 1 && (dbtype == DB_RECNO || dbtype == DB_QUEUE))
+ ascii_recno = 1;
+ else
+ ascii_recno = 0;
+
+ /* If configured with a password, encrypt databases we create. */
+ if (LF_ISSET(LDF_PASSWORD) &&
+ (ret = dbp->set_flags(dbp, DB_ENCRYPT)) != 0) {
+ dbp->err(dbp, ret, "DB->set_flags: DB_ENCRYPT");
+ goto err;
+ }
+
+ /* Open the DB file. */
+ if ((ret = dbp->open(dbp, NULL, name, subdb, dbtype,
+ DB_CREATE | (TXN_ON(dbenv) ? DB_AUTO_COMMIT : 0),
+ __db_omode("rwrwrw"))) != 0) {
+ dbp->err(dbp, ret, "DB->open: %s", name);
+ goto err;
+ }
+ if (ldg->private != 0) {
+ if ((ret =
+ __db_util_cache(dbenv, dbp, &ldg->cache, &resize)) != 0)
+ goto err;
+ if (resize) {
+ dbp->close(dbp, 0);
+ dbp = NULL;
+ dbenv->close(dbenv, 0);
+ if ((ret = db_load_env_create(&dbenv, ldg)) != 0)
+ goto err;
+ goto retry_db;
+ }
+ }
+
+ /* Initialize the key/data pair. */
+ readp = &key;
+ writep = &key;
+ if (dbtype == DB_RECNO || dbtype == DB_QUEUE) {
+ key.size = sizeof(recno);
+ if (keyflag) {
+ key.data = &datarecno;
+ if (checkprint) {
+ readp = &rkey;
+ goto key_data;
+ }
+ }
+ else
+ key.data = &recno;
+ } else
+key_data: if ((readp->data =
+ (void *)malloc(readp->ulen = 1024)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ goto err;
+ }
+ if ((data.data = (void *)malloc(data.ulen = 1024)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ goto err;
+ }
+
+ if (TXN_ON(dbenv) &&
+ (ret = dbenv->txn_begin(dbenv, NULL, &txn, 0)) != 0)
+ goto err;
+
+ /* Get each key/data pair and add them to the database. */
+ for (recno = 1; !__db_util_interrupted(); ++recno) {
+ if (!keyflag)
+ if (checkprint) {
+ if (db_load_dbt_rprint(dbenv, &data))
+ goto err;
+ } else {
+ if (db_load_dbt_rdump(dbenv, &data))
+ goto err;
+ }
+ else
+ if (checkprint) {
+ if (db_load_dbt_rprint(dbenv, readp))
+ goto err;
+ if (!G(endodata) && db_load_dbt_rprint(dbenv, &data))
+ goto fmt;
+ } else {
+ if (ascii_recno) {
+ if (db_load_dbt_rrecno(dbenv, readp, hexkeys))
+ goto err;
+ } else
+ if (db_load_dbt_rdump(dbenv, readp))
+ goto err;
+ if (!G(endodata) && db_load_dbt_rdump(dbenv, &data)) {
+fmt: dbenv->errx(dbenv,
+ "odd number of key/data pairs");
+ goto err;
+ }
+ }
+ if (G(endodata))
+ break;
+ if (readp != writep) {
+ if (sscanf(readp->data, "%ud", &datarecno) != 1)
+ dbenv->errx(dbenv,
+ "%s: non-integer key at line: %d",
+ name, !keyflag ? recno : recno * 2 - 1);
+ if (datarecno == 0)
+ dbenv->errx(dbenv, "%s: zero key at line: %d",
+ name,
+ !keyflag ? recno : recno * 2 - 1);
+ }
+retry: if (txn != NULL)
+ if ((ret = dbenv->txn_begin(dbenv, txn, &ctxn, 0)) != 0)
+ goto err;
+ switch (ret = dbp->put(dbp, ctxn, writep, &data, put_flags)) {
+ case 0:
+ if (ctxn != NULL) {
+ if ((ret =
+ ctxn->commit(ctxn, DB_TXN_NOSYNC)) != 0)
+ goto err;
+ ctxn = NULL;
+ }
+ break;
+ case DB_KEYEXIST:
+ *existedp = 1;
+ dbenv->errx(dbenv,
+ "%s: line %d: key already exists, not loaded:",
+ name,
+ !keyflag ? recno : recno * 2 - 1);
+
+ (void)__db_prdbt(&key, checkprint, 0, stderr,
+ __db_verify_callback, 0, NULL);
+ break;
+ case DB_LOCK_DEADLOCK:
+ /* If we have a child txn, retry--else it's fatal. */
+ if (ctxn != NULL) {
+ if ((ret = ctxn->abort(ctxn)) != 0)
+ goto err;
+ ctxn = NULL;
+ goto retry;
+ }
+ /* FALLTHROUGH */
+ default:
+ dbenv->err(dbenv, ret, NULL);
+ if (ctxn != NULL) {
+ (void)ctxn->abort(ctxn);
+ ctxn = NULL;
+ }
+ goto err;
+ }
+ if (ctxn != NULL) {
+ if ((ret = ctxn->abort(ctxn)) != 0)
+ goto err;
+ ctxn = NULL;
+ }
+ }
+done: rval = 0;
+ DB_ASSERT(ctxn == NULL);
+ if (txn != NULL && (ret = txn->commit(txn, 0)) != 0) {
+ txn = NULL;
+ goto err;
+ }
+
+ if (0) {
+err: rval = 1;
+ DB_ASSERT(ctxn == NULL);
+ if (txn != NULL)
+ (void)txn->abort(txn);
+ }
+
+ /* Close the database. */
+ if (dbp != NULL && (ret = dbp->close(dbp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->close");
+ rval = 1;
+ }
+
+ if (G(hdrbuf) != NULL)
+ free(G(hdrbuf));
+ G(hdrbuf) = NULL;
+ /* Free allocated memory. */
+ if (subdb != NULL)
+ free(subdb);
+ if (dbtype != DB_RECNO && dbtype != DB_QUEUE)
+ free(key.data);
+ if (rkey.data != NULL)
+ free(rkey.data);
+ free(data.data);
+
+ return (rval);
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_load_db_init(dbenv, home, cache, is_private)
+ DB_ENV *dbenv;
+ char *home;
+ u_int32_t cache;
+ int *is_private;
+{
+ u_int32_t flags;
+ int ret;
+
+ *is_private = 0;
+ /* We may be loading into a live environment. Try and join. */
+ flags = DB_USE_ENVIRON |
+ DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN;
+ if (dbenv->open(dbenv, home, flags, 0) == 0)
+ return (0);
+
+ /*
+ * We're trying to load a database.
+ *
+ * An environment is required because we may be trying to look at
+ * databases in directories other than the current one. We could
+ * avoid using an environment iff the -h option wasn't specified,
+ * but that seems like more work than it's worth.
+ *
+ * No environment exists (or, at least no environment that includes
+ * an mpool region exists). Create one, but make it private so that
+ * no files are actually created.
+ */
+ LF_CLR(DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN);
+ LF_SET(DB_CREATE | DB_PRIVATE);
+ *is_private = 1;
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ return (1);
+ }
+ if ((ret = dbenv->open(dbenv, home, flags, 0)) == 0)
+ return (0);
+
+ /* An environment is required. */
+ dbenv->err(dbenv, ret, "DB_ENV->open");
+ return (1);
+}
+
+#define FLAG(name, value, keyword, flag) \
+ if (strcmp(name, keyword) == 0) { \
+ switch (*value) { \
+ case '1': \
+ if ((ret = dbp->set_flags(dbp, flag)) != 0) { \
+ dbp->err(dbp, ret, "%s: set_flags: %s", \
+ G(progname), name); \
+ return (1); \
+ } \
+ break; \
+ case '0': \
+ break; \
+ default: \
+ db_load_badnum(dbenv); \
+ return (1); \
+ } \
+ continue; \
+ }
+#define NUMBER(name, value, keyword, func) \
+ if (strcmp(name, keyword) == 0) { \
+ if (__db_getlong(dbp, \
+ NULL, value, 1, LONG_MAX, &val) != 0) \
+ return (1); \
+ if ((ret = dbp->func(dbp, val)) != 0) \
+ goto nameerr; \
+ continue; \
+ }
+#define STRING(name, value, keyword, func) \
+ if (strcmp(name, keyword) == 0) { \
+ if ((ret = dbp->func(dbp, value[0])) != 0) \
+ goto nameerr; \
+ continue; \
+ }
+
+/*
+ * configure --
+ * Handle command-line configuration options.
+ */
+int
+db_load_configure(dbenv, dbp, clp, subdbp, keysp)
+ DB_ENV *dbenv;
+ DB *dbp;
+ char **clp, **subdbp;
+ int *keysp;
+{
+ long val;
+ int ret, savech;
+ char *name, *value;
+
+ for (; (name = *clp) != NULL; *--value = savech, ++clp) {
+ if ((value = strchr(name, '=')) == NULL) {
+ dbp->errx(dbp,
+ "command-line configuration uses name=value format");
+ return (1);
+ }
+ savech = *value;
+ *value++ = '\0';
+
+ if (strcmp(name, "database") == 0 ||
+ strcmp(name, "subdatabase") == 0) {
+ if (*subdbp != NULL)
+ free(*subdbp);
+ if ((*subdbp = strdup(value)) == NULL) {
+ dbp->err(dbp, ENOMEM, NULL);
+ return (1);
+ }
+ continue;
+ }
+ if (strcmp(name, "keys") == 0) {
+ if (strcmp(value, "1") == 0)
+ *keysp = 1;
+ else if (strcmp(value, "0") == 0)
+ *keysp = 0;
+ else {
+ db_load_badnum(dbenv);
+ return (1);
+ }
+ continue;
+ }
+
+#ifdef notyet
+ NUMBER(name, value, "bt_maxkey", set_bt_maxkey);
+#endif
+ NUMBER(name, value, "bt_minkey", set_bt_minkey);
+ NUMBER(name, value, "db_lorder", set_lorder);
+ NUMBER(name, value, "db_pagesize", set_pagesize);
+ FLAG(name, value, "chksum", DB_CHKSUM_SHA1);
+ FLAG(name, value, "duplicates", DB_DUP);
+ FLAG(name, value, "dupsort", DB_DUPSORT);
+ NUMBER(name, value, "h_ffactor", set_h_ffactor);
+ NUMBER(name, value, "h_nelem", set_h_nelem);
+ NUMBER(name, value, "re_len", set_re_len);
+ STRING(name, value, "re_pad", set_re_pad);
+ FLAG(name, value, "recnum", DB_RECNUM);
+ FLAG(name, value, "renumber", DB_RENUMBER);
+
+ dbp->errx(dbp,
+ "unknown command-line configuration keyword \"%s\"", name);
+ return (1);
+ }
+ return (0);
+
+nameerr:
+ dbp->err(dbp, ret, "%s: %s=%s", G(progname), name, value);
+ return (1);
+}
+
+/*
+ * rheader --
+ * Read the header message.
+ */
+int
+db_load_rheader(dbenv, dbp, dbtypep, subdbp, checkprintp, keysp)
+ DB_ENV *dbenv;
+ DB *dbp;
+ DBTYPE *dbtypep;
+ char **subdbp;
+ int *checkprintp, *keysp;
+{
+ long val;
+ int ch, first, hdr, linelen, buflen, ret, start;
+ char *buf, *name, *p, *value;
+
+ *dbtypep = DB_UNKNOWN;
+ *checkprintp = 0;
+ name = p = NULL;
+
+ /*
+ * We start with a smallish buffer; most headers are small.
+ * We may need to realloc it for a large subdatabase name.
+ */
+ buflen = 4096;
+ if (G(hdrbuf) == NULL) {
+ hdr = 0;
+ if ((buf = (char *)malloc(buflen)) == NULL) {
+memerr: dbp->errx(dbp, "could not allocate buffer %d", buflen);
+ return (1);
+ }
+ G(hdrbuf) = buf;
+ G(origline) = G(lineno);
+ } else {
+ hdr = 1;
+ buf = G(hdrbuf);
+ G(lineno) = G(origline);
+ }
+
+ start = 0;
+ for (first = 1;; first = 0) {
+ ++G(lineno);
+
+ /* Read a line, which may be of arbitrary length, into buf. */
+ linelen = 0;
+ buf = &G(hdrbuf)[start];
+ if (hdr == 0) {
+ for (;;) {
+ if ((ch = getchar()) == EOF) {
+ if (!first || ferror(stdin))
+ goto badfmt;
+ G(endofile) = 1;
+ break;
+ }
+
+ if (ch == '\n')
+ break;
+
+ buf[linelen++] = ch;
+
+ /* If the buffer is too small, double it. */
+ if (linelen + start == buflen) {
+ G(hdrbuf) = (char *)realloc(G(hdrbuf),
+ buflen *= 2);
+ if (G(hdrbuf) == NULL)
+ goto memerr;
+ buf = &G(hdrbuf)[start];
+ }
+ }
+ if (G(endofile) == 1)
+ break;
+ buf[linelen++] = '\0';
+ } else
+ linelen = strlen(buf) + 1;
+ start += linelen;
+
+ if (name != NULL) {
+ *p = '=';
+ free(name);
+ name = NULL;
+ }
+ /* If we don't see the expected information, it's an error. */
+ if ((name = strdup(buf)) == NULL)
+ goto memerr;
+ if ((p = strchr(name, '=')) == NULL)
+ goto badfmt;
+ *p++ = '\0';
+
+ value = p--;
+
+ if (name[0] == '\0' || value[0] == '\0')
+ goto badfmt;
+
+ if (strcmp(name, "HEADER") == 0)
+ break;
+ if (strcmp(name, "VERSION") == 0) {
+ /*
+ * Version 1 didn't have a "VERSION" header line. We
+ * only support versions 1, 2, and 3 of the dump format.
+ */
+ G(version) = atoi(value);
+
+ if (G(version) > 3) {
+ dbp->errx(dbp,
+ "line %lu: VERSION %d is unsupported",
+ G(lineno), G(version));
+ goto err;
+ }
+ continue;
+ }
+ if (strcmp(name, "format") == 0) {
+ if (strcmp(value, "bytevalue") == 0) {
+ *checkprintp = 0;
+ continue;
+ }
+ if (strcmp(value, "print") == 0) {
+ *checkprintp = 1;
+ continue;
+ }
+ goto badfmt;
+ }
+ if (strcmp(name, "type") == 0) {
+ if (strcmp(value, "btree") == 0) {
+ *dbtypep = DB_BTREE;
+ continue;
+ }
+ if (strcmp(value, "hash") == 0) {
+ *dbtypep = DB_HASH;
+ continue;
+ }
+ if (strcmp(value, "recno") == 0) {
+ *dbtypep = DB_RECNO;
+ continue;
+ }
+ if (strcmp(value, "queue") == 0) {
+ *dbtypep = DB_QUEUE;
+ continue;
+ }
+ dbp->errx(dbp, "line %lu: unknown type", G(lineno));
+ goto err;
+ }
+ if (strcmp(name, "database") == 0 ||
+ strcmp(name, "subdatabase") == 0) {
+ if ((ret = db_load_convprintable(dbenv, value, subdbp)) != 0) {
+ dbp->err(dbp, ret, "error reading db name");
+ goto err;
+ }
+ continue;
+ }
+ if (strcmp(name, "keys") == 0) {
+ if (strcmp(value, "1") == 0)
+ *keysp = 1;
+ else if (strcmp(value, "0") == 0)
+ *keysp = 0;
+ else {
+ db_load_badnum(dbenv);
+ goto err;
+ }
+ continue;
+ }
+
+#ifdef notyet
+ NUMBER(name, value, "bt_maxkey", set_bt_maxkey);
+#endif
+ NUMBER(name, value, "bt_minkey", set_bt_minkey);
+ NUMBER(name, value, "db_lorder", set_lorder);
+ NUMBER(name, value, "db_pagesize", set_pagesize);
+ NUMBER(name, value, "extentsize", set_q_extentsize);
+ FLAG(name, value, "chksum", DB_CHKSUM_SHA1);
+ FLAG(name, value, "duplicates", DB_DUP);
+ FLAG(name, value, "dupsort", DB_DUPSORT);
+ NUMBER(name, value, "h_ffactor", set_h_ffactor);
+ NUMBER(name, value, "h_nelem", set_h_nelem);
+ NUMBER(name, value, "re_len", set_re_len);
+ STRING(name, value, "re_pad", set_re_pad);
+ FLAG(name, value, "recnum", DB_RECNUM);
+ FLAG(name, value, "renumber", DB_RENUMBER);
+
+ dbp->errx(dbp,
+ "unknown input-file header configuration keyword \"%s\"",
+ name);
+ goto err;
+ }
+ ret = 0;
+ if (0) {
+nameerr:
+ dbp->err(dbp, ret, "%s: %s=%s", G(progname), name, value);
+ ret = 1;
+ }
+ if (0)
+err: ret = 1;
+ if (0) {
+badfmt:
+ dbp->errx(dbp, "line %lu: unexpected format", G(lineno));
+ ret = 1;
+ }
+ if (name != NULL) {
+ *p = '=';
+ free(name);
+ }
+ return (ret);
+}
+
+/*
+ * convprintable --
+ * Convert a printable-encoded string into a newly allocated string.
+ *
+ * In an ideal world, this would probably share code with dbt_rprint, but
+ * that's set up to read character-by-character (to avoid large memory
+ * allocations that aren't likely to be a problem here), and this has fewer
+ * special cases to deal with.
+ *
+ * Note that despite the printable encoding, the char * interface to this
+ * function (which is, not coincidentally, also used for database naming)
+ * means that outstr cannot contain any nuls.
+ */
+int
+db_load_convprintable(dbenv, instr, outstrp)
+ DB_ENV *dbenv;
+ char *instr, **outstrp;
+{
+ char c, *outstr;
+ int e1, e2;
+
+ /*
+ * Just malloc a string big enough for the whole input string;
+ * the output string will be smaller (or of equal length).
+ */
+ if ((outstr = (char *)malloc(strlen(instr))) == NULL)
+ return (ENOMEM);
+
+ *outstrp = outstr;
+
+ e1 = e2 = 0;
+ for ( ; *instr != '\0'; instr++)
+ if (*instr == '\\') {
+ if (*++instr == '\\') {
+ *outstr++ = '\\';
+ continue;
+ }
+ c = db_load_digitize(dbenv, *instr, &e1) << 4;
+ c |= db_load_digitize(dbenv, *++instr, &e2);
+ if (e1 || e2) {
+ db_load_badend(dbenv);
+ return (EINVAL);
+ }
+
+ *outstr++ = c;
+ } else
+ *outstr++ = *instr;
+
+ *outstr = '\0';
+
+ return (0);
+}
+
+/*
+ * dbt_rprint --
+ * Read a printable line into a DBT structure.
+ */
+int
+db_load_dbt_rprint(dbenv, dbtp)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+{
+ u_int32_t len;
+ u_int8_t *p;
+ int c1, c2, e, escape, first;
+ char buf[32];
+
+ ++G(lineno);
+
+ first = 1;
+ e = escape = 0;
+ for (p = dbtp->data, len = 0; (c1 = getchar()) != '\n';) {
+ if (c1 == EOF) {
+ if (len == 0) {
+ G(endofile) = G(endodata) = 1;
+ return (0);
+ }
+ db_load_badend(dbenv);
+ return (1);
+ }
+ if (first) {
+ first = 0;
+ if (G(version) > 1) {
+ if (c1 != ' ') {
+ buf[0] = c1;
+ if (fgets(buf + 1,
+ sizeof(buf) - 1, stdin) == NULL ||
+ strcmp(buf, "DATA=END\n") != 0) {
+ db_load_badend(dbenv);
+ return (1);
+ }
+ G(endodata) = 1;
+ return (0);
+ }
+ continue;
+ }
+ }
+ if (escape) {
+ if (c1 != '\\') {
+ if ((c2 = getchar()) == EOF) {
+ db_load_badend(dbenv);
+ return (1);
+ }
+ c1 = db_load_digitize(dbenv,
+ c1, &e) << 4 | db_load_digitize(dbenv, c2, &e);
+ if (e)
+ return (1);
+ }
+ escape = 0;
+ } else
+ if (c1 == '\\') {
+ escape = 1;
+ continue;
+ }
+ if (len >= dbtp->ulen - 10) {
+ dbtp->ulen *= 2;
+ if ((dbtp->data =
+ (void *)realloc(dbtp->data, dbtp->ulen)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ return (1);
+ }
+ p = (u_int8_t *)dbtp->data + len;
+ }
+ ++len;
+ *p++ = c1;
+ }
+ dbtp->size = len;
+
+ return (0);
+}
+
+/*
+ * dbt_rdump --
+ * Read a byte dump line into a DBT structure.
+ */
+int
+db_load_dbt_rdump(dbenv, dbtp)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+{
+ u_int32_t len;
+ u_int8_t *p;
+ int c1, c2, e, first;
+ char buf[32];
+
+ ++G(lineno);
+
+ first = 1;
+ e = 0;
+ for (p = dbtp->data, len = 0; (c1 = getchar()) != '\n';) {
+ if (c1 == EOF) {
+ if (len == 0) {
+ G(endofile) = G(endodata) = 1;
+ return (0);
+ }
+ db_load_badend(dbenv);
+ return (1);
+ }
+ if (first) {
+ first = 0;
+ if (G(version) > 1) {
+ if (c1 != ' ') {
+ buf[0] = c1;
+ if (fgets(buf + 1,
+ sizeof(buf) - 1, stdin) == NULL ||
+ strcmp(buf, "DATA=END\n") != 0) {
+ db_load_badend(dbenv);
+ return (1);
+ }
+ G(endodata) = 1;
+ return (0);
+ }
+ continue;
+ }
+ }
+ if ((c2 = getchar()) == EOF) {
+ db_load_badend(dbenv);
+ return (1);
+ }
+ if (len >= dbtp->ulen - 10) {
+ dbtp->ulen *= 2;
+ if ((dbtp->data =
+ (void *)realloc(dbtp->data, dbtp->ulen)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ return (1);
+ }
+ p = (u_int8_t *)dbtp->data + len;
+ }
+ ++len;
+ *p++ = db_load_digitize(dbenv, c1, &e) << 4 | db_load_digitize(dbenv, c2, &e);
+ if (e)
+ return (1);
+ }
+ dbtp->size = len;
+
+ return (0);
+}
+
+/*
+ * dbt_rrecno --
+ * Read a record number dump line into a DBT structure.
+ */
+int
+db_load_dbt_rrecno(dbenv, dbtp, ishex)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ int ishex;
+{
+ char buf[32], *p, *q;
+
+ ++G(lineno);
+
+ if (fgets(buf, sizeof(buf), stdin) == NULL) {
+ G(endofile) = G(endodata) = 1;
+ return (0);
+ }
+
+ if (strcmp(buf, "DATA=END\n") == 0) {
+ G(endodata) = 1;
+ return (0);
+ }
+
+ if (buf[0] != ' ')
+ goto bad;
+
+ /*
+ * If we're expecting a hex key, do an in-place conversion
+ * of hex to straight ASCII before calling __db_getulong().
+ */
+ if (ishex) {
+ for (p = q = buf + 1; *q != '\0' && *q != '\n';) {
+ /*
+ * 0-9 in hex are 0x30-0x39, so this is easy.
+ * We should alternate between 3's and [0-9], and
+ * if the [0-9] are something unexpected,
+ * __db_getulong will fail, so we only need to catch
+ * end-of-string conditions.
+ */
+ if (*q++ != '3')
+ goto bad;
+ if (*q == '\n' || *q == '\0')
+ goto bad;
+ *p++ = *q++;
+ }
+ *p = '\0';
+ }
+
+ if (__db_getulong(NULL,
+ G(progname), buf + 1, 0, 0, (u_long *)dbtp->data)) {
+bad: db_load_badend(dbenv);
+ return (1);
+ }
+
+ dbtp->size = sizeof(db_recno_t);
+ return (0);
+}
+
+/*
+ * digitize --
+ * Convert a character to an integer.
+ */
+int
+db_load_digitize(dbenv, c, errorp)
+ DB_ENV *dbenv;
+ int c, *errorp;
+{
+ switch (c) { /* Don't depend on ASCII ordering. */
+ case '0': return (0);
+ case '1': return (1);
+ case '2': return (2);
+ case '3': return (3);
+ case '4': return (4);
+ case '5': return (5);
+ case '6': return (6);
+ case '7': return (7);
+ case '8': return (8);
+ case '9': return (9);
+ case 'a': return (10);
+ case 'b': return (11);
+ case 'c': return (12);
+ case 'd': return (13);
+ case 'e': return (14);
+ case 'f': return (15);
+ }
+
+ dbenv->errx(dbenv, "unexpected hexadecimal value");
+ *errorp = 1;
+
+ return (0);
+}
+
+/*
+ * badnum --
+ * Display the bad number message.
+ */
+void
+db_load_badnum(dbenv)
+ DB_ENV *dbenv;
+{
+ dbenv->errx(dbenv,
+ "boolean name=value pairs require a value of 0 or 1");
+}
+
+/*
+ * badend --
+ * Display the bad end to input message.
+ */
+void
+db_load_badend(dbenv)
+ DB_ENV *dbenv;
+{
+ dbenv->errx(dbenv, "unexpected end of input data or key/data pair");
+}
+
+/*
+ * usage --
+ * Display the usage message.
+ */
+int
+db_load_usage()
+{
+ (void)fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_load [-nTV] [-c name=value] [-f file]",
+ "[-h home] [-P password] [-t btree | hash | recno | queue] db_file");
+ return (EXIT_FAILURE);
+}
+
+int
+db_load_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
+
+int
+db_load_env_create(dbenvp, ldg)
+ DB_ENV **dbenvp;
+ LDG *ldg;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ if ((ret = db_env_create(dbenvp, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", ldg->progname, db_strerror(ret));
+ return (ret);
+ }
+ dbenv = *dbenvp;
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, ldg->progname);
+ if (ldg->passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ ldg->passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ return (ret);
+ }
+ if ((ret = db_load_db_init(dbenv, ldg->home, ldg->cache, &ldg->private)) != 0)
+ return (ret);
+ dbenv->app_private = ldg;
+
+ return (0);
+}
diff --git a/libdb/build_vxworks/db_load/db_load.wpj b/libdb/build_vxworks/db_load/db_load.wpj
new file mode 100755
index 0000000..59e194a
--- /dev/null
+++ b/libdb/build_vxworks/db_load/db_load.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_load.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_load.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_load.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_load.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_load.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_load.c_objects
+db_load.o
+<END>
+
+<BEGIN> FILE_db_load.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_load.c
+<END>
+
+<BEGIN> userComments
+db_load
+<END>
diff --git a/libdb/build_vxworks/db_load/db_load/Makefile.custom b/libdb/build_vxworks/db_load/db_load/Makefile.custom
new file mode 100644
index 0000000..ca781f7
--- /dev/null
+++ b/libdb/build_vxworks/db_load/db_load/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/libdb/build_vxworks/db_load/db_load/component.cdf b/libdb/build_vxworks/db_load/db_load/component.cdf
new file mode 100755
index 0000000..7d1d2bc
--- /dev/null
+++ b/libdb/build_vxworks/db_load/db_load/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_LOAD {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_load.o
+ NAME db_load
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_load.o {
+
+ NAME db_load.o
+ SRC_PATH_NAME $PRJ_DIR/../db_load.c
+}
+
+/* Parameter information */
+
diff --git a/libdb/build_vxworks/db_load/db_load/component.wpj b/libdb/build_vxworks/db_load/db_load/component.wpj
new file mode 100755
index 0000000..216e7d9
--- /dev/null
+++ b/libdb/build_vxworks/db_load/db_load/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_load.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_load.c_objects
+db_load.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_load.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_load.c_objects
+db_load.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_load.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_load.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_load.c_objects
+db_load.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_load.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_load.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_load.c_objects
+db_load.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_load.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_load.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/libdb/build_vxworks/db_printlog/db_printlog.c b/libdb/build_vxworks/db_printlog/db_printlog.c
new file mode 100644
index 0000000..18e2886
--- /dev/null
+++ b/libdb/build_vxworks/db_printlog/db_printlog.c
@@ -0,0 +1,375 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/fop.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+int db_printlog_main __P((int, char *[]));
+int db_printlog_usage __P((void));
+int db_printlog_version_check __P((const char *));
+int db_printlog_print_app_record __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+int db_printlog_open_rep_db __P((DB_ENV *, DB **, DBC **));
+
+int
+db_printlog(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_printlog", args, &argc, &argv);
+ return (db_printlog_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_printlog_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_printlog";
+ DB *dbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
+ int (**dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t dtabsize;
+ DBT data, keydbt;
+ DB_LSN key;
+ int ch, e_close, exitval, nflag, rflag, ret, repflag;
+ char *home, *passwd;
+
+ if ((ret = db_printlog_version_check(progname)) != 0)
+ return (ret);
+
+ dbp = NULL;
+ dbc = NULL;
+ logc = NULL;
+ e_close = exitval = nflag = rflag = repflag = 0;
+ home = passwd = NULL;
+ dtabsize = 0;
+ dtab = NULL;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "h:NP:rRV")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'r':
+ rflag = 1;
+ break;
+ case 'R':
+ repflag = 1;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (db_printlog_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc > 0)
+ return (db_printlog_usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+
+ /*
+ * Set up an app-specific dispatch function so that we can gracefully
+ * handle app-specific log records.
+ */
+ if ((ret = dbenv->set_app_dispatch(dbenv, db_printlog_print_app_record)) != 0) {
+ dbenv->err(dbenv, ret, "app_dispatch");
+ goto shutdown;
+ }
+
+ /*
+ * An environment is required, but as all we're doing is reading log
+ * files, we create one if it doesn't already exist. If we create
+ * it, create it private so it automatically goes away when we're done.
+ * If we are reading the replication database, do not open the env
+ * with logging, because we don't want to log the opens.
+ */
+ if (repflag) {
+ if ((ret = dbenv->open(dbenv, home,
+ DB_INIT_MPOOL | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0))
+ != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+ } else if ((ret = dbenv->open(dbenv, home,
+ DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_LOG | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ /* Initialize print callbacks. */
+ if ((ret = __bam_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __dbreg_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __crdel_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __db_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __fop_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __qam_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __ham_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __txn_init_print(dbenv, &dtab, &dtabsize)) != 0) {
+ dbenv->err(dbenv, ret, "callback: initialization");
+ goto shutdown;
+ }
+
+ /* Allocate a log cursor. */
+ if (repflag) {
+ if ((ret = db_printlog_open_rep_db(dbenv, &dbp, &dbc)) != 0)
+ goto shutdown;
+ } else if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->log_cursor");
+ goto shutdown;
+ }
+
+ memset(&data, 0, sizeof(data));
+ memset(&keydbt, 0, sizeof(keydbt));
+ while (!__db_util_interrupted()) {
+ if (repflag) {
+ ret = dbc->c_get(dbc,
+ &keydbt, &data, rflag ? DB_PREV : DB_NEXT);
+ if (ret == 0)
+ key = ((REP_CONTROL *)keydbt.data)->lsn;
+ } else
+ ret = logc->get(logc,
+ &key, &data, rflag ? DB_PREV : DB_NEXT);
+ if (ret != 0) {
+ if (ret == DB_NOTFOUND)
+ break;
+ dbenv->err(dbenv,
+ ret, repflag ? "DB_LOGC->get" : "DBC->get");
+ goto shutdown;
+ }
+
+ ret = __db_dispatch(dbenv,
+ dtab, dtabsize, &data, &key, DB_TXN_PRINT, NULL);
+
+ /*
+ * XXX
+ * Just in case the underlying routines don't flush.
+ */
+ (void)fflush(stdout);
+
+ if (ret != 0) {
+ dbenv->err(dbenv, ret, "tx: dispatch");
+ goto shutdown;
+ }
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (logc != NULL && (ret = logc->close(logc, 0)) != 0)
+ exitval = 1;
+
+ if (dbc != NULL && (ret = dbc->c_close(dbc)) != 0)
+ exitval = 1;
+
+ if (dbp != NULL && (ret = dbp->close(dbp, 0)) != 0)
+ exitval = 1;
+
+ /*
+ * The dtab is allocated by __db_add_recovery (called by *_init_print)
+ * using the library malloc function (__os_malloc). It thus needs to be
+ * freed using the corresponding free (__os_free).
+ */
+ if (dtab != NULL)
+ __os_free(dbenv, dtab);
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+db_printlog_usage()
+{
+ fprintf(stderr, "%s\n",
+ "usage: db_printlog [-NrV] [-h home] [-P password]");
+ return (EXIT_FAILURE);
+}
+
+int
+db_printlog_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
+
+/* Print an unknown, application-specific log record as best we can. */
+int
+db_printlog_print_app_record(dbenv, dbt, lsnp, op)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ DB_LSN *lsnp;
+ db_recops op;
+{
+ int ch;
+ u_int32_t i, rectype;
+
+ DB_ASSERT(op == DB_TXN_PRINT);
+ COMPQUIET(dbenv, NULL);
+
+ /*
+ * Fetch the rectype, which always must be at the beginning of the
+ * record (if dispatching is to work at all).
+ */
+ memcpy(&rectype, dbt->data, sizeof(rectype));
+
+ /*
+ * Applications may wish to customize the output here based on the
+ * rectype. We just print the entire log record in the generic
+ * mixed-hex-and-printable format we use for binary data.
+ */
+ printf("[%lu][%lu]application specific record: rec: %lu\n",
+ (u_long)lsnp->file, (u_long)lsnp->offset, (u_long)rectype);
+ printf("\tdata: ");
+ for (i = 0; i < dbt->size; i++) {
+ ch = ((u_int8_t *)dbt->data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ printf("\n\n");
+
+ return (0);
+}
+
+int
+db_printlog_open_rep_db(dbenv, dbpp, dbcp)
+ DB_ENV *dbenv;
+ DB **dbpp;
+ DBC **dbcp;
+{
+ int ret;
+
+ DB *dbp;
+ *dbpp = NULL;
+ *dbcp = NULL;
+
+ if ((ret = db_create(dbpp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ return (ret);
+ }
+
+ dbp = *dbpp;
+ if ((ret =
+ dbp->open(dbp, NULL, "__db.rep.db", NULL, DB_BTREE, 0, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->open");
+ goto err;
+ }
+
+ if ((ret = dbp->cursor(dbp, NULL, dbcp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->cursor");
+ goto err;
+ }
+
+ return (0);
+
+err: if (*dbpp != NULL)
+ (void)(*dbpp)->close(*dbpp, 0);
+ return (ret);
+}
diff --git a/libdb/build_vxworks/db_printlog/db_printlog.wpj b/libdb/build_vxworks/db_printlog/db_printlog.wpj
new file mode 100755
index 0000000..514122e
--- /dev/null
+++ b/libdb/build_vxworks/db_printlog/db_printlog.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_printlog.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_printlog.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_printlog.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_printlog.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_printlog.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_printlog.c_objects
+db_printlog.o
+<END>
+
+<BEGIN> FILE_db_printlog.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_printlog.c
+<END>
+
+<BEGIN> userComments
+db_printlog
+<END>
diff --git a/libdb/build_vxworks/db_printlog/db_printlog/Makefile.custom b/libdb/build_vxworks/db_printlog/db_printlog/Makefile.custom
new file mode 100644
index 0000000..ca781f7
--- /dev/null
+++ b/libdb/build_vxworks/db_printlog/db_printlog/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/libdb/build_vxworks/db_printlog/db_printlog/component.cdf b/libdb/build_vxworks/db_printlog/db_printlog/component.cdf
new file mode 100755
index 0000000..57c6452
--- /dev/null
+++ b/libdb/build_vxworks/db_printlog/db_printlog/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_PRINTLOG {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_printlog.o
+ NAME db_printlog
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_printlog.o {
+
+ NAME db_printlog.o
+ SRC_PATH_NAME $PRJ_DIR/../db_printlog.c
+}
+
+/* Parameter information */
+
diff --git a/libdb/build_vxworks/db_printlog/db_printlog/component.wpj b/libdb/build_vxworks/db_printlog/db_printlog/component.wpj
new file mode 100755
index 0000000..81d2447
--- /dev/null
+++ b/libdb/build_vxworks/db_printlog/db_printlog/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_objects
+db_printlog.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_objects
+db_printlog.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_printlog.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_printlog.c_objects
+db_printlog.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_printlog.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_objects
+db_printlog.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_printlog.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/libdb/build_vxworks/db_recover/db_recover.c b/libdb/build_vxworks/db_recover/db_recover.c
new file mode 100644
index 0000000..9dd9ca7
--- /dev/null
+++ b/libdb/build_vxworks/db_recover/db_recover.c
@@ -0,0 +1,328 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/txn.h"
+
+int db_recover_main __P((int, char *[]));
+int db_recover_read_timestamp __P((const char *, char *, time_t *));
+int db_recover_usage __P((void));
+int db_recover_version_check __P((const char *));
+
+int
+db_recover(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_recover", args, &argc, &argv);
+ return (db_recover_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_recover_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_recover";
+ DB_ENV *dbenv;
+ DB_TXNREGION *region;
+ time_t now, timestamp;
+ u_int32_t flags;
+ int ch, exitval, fatal_recover, ret, retain_env, verbose;
+ char *home, *passwd;
+
+ if ((ret = db_recover_version_check(progname)) != 0)
+ return (ret);
+
+ home = passwd = NULL;
+ timestamp = 0;
+ exitval = fatal_recover = retain_env = verbose = 0;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "ceh:P:t:Vv")) != EOF)
+ switch (ch) {
+ case 'c':
+ fatal_recover = 1;
+ break;
+ case 'e':
+ retain_env = 1;
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 't':
+ if ((ret =
+ db_recover_read_timestamp(progname, optarg, &timestamp)) != 0)
+ return (ret);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ return (db_recover_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ return (db_recover_usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ if (verbose) {
+ (void)dbenv->set_verbose(dbenv, DB_VERB_RECOVERY, 1);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_CHKPOINT, 1);
+ }
+ if (timestamp &&
+ (ret = dbenv->set_tx_timestamp(dbenv, &timestamp)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->set_timestamp");
+ goto shutdown;
+ }
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+
+ /*
+ * Initialize the environment -- we don't actually do anything
+ * else, that all that's needed to run recovery.
+ *
+ * Note that unless the caller specified the -e option, we use a
+ * private environment, as we're about to create a region, and we
+ * don't want to to leave it around. If we leave the region around,
+ * the application that should create it will simply join it instead,
+ * and will then be running with incorrectly sized (and probably
+ * terribly small) caches. Applications that use -e should almost
+ * certainly use DB_CONFIG files in the directory.
+ */
+ flags = 0;
+ LF_SET(DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_MPOOL | DB_INIT_TXN | DB_USE_ENVIRON);
+ LF_SET(fatal_recover ? DB_RECOVER_FATAL : DB_RECOVER);
+ LF_SET(retain_env ? 0 : DB_PRIVATE);
+ if ((ret = dbenv->open(dbenv, home, flags, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->open");
+ goto shutdown;
+ }
+
+ if (verbose) {
+ (void)time(&now);
+ region = ((DB_TXNMGR *)dbenv->tx_handle)->reginfo.primary;
+ dbenv->errx(dbenv, "Recovery complete at %.24s", ctime(&now));
+ dbenv->errx(dbenv, "%s %lx %s [%lu][%lu]",
+ "Maximum transaction id", (u_long)region->last_txnid,
+ "Recovery checkpoint", (u_long)region->last_ckp.file,
+ (u_long)region->last_ckp.offset);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ /* Clean up the environment. */
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+#define ATOI2(ar) ((ar)[0] - '0') * 10 + ((ar)[1] - '0'); (ar) += 2;
+
+/*
+ * read_timestamp --
+ * Convert a time argument to Epoch seconds.
+ *
+ * Copyright (c) 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+int
+db_recover_read_timestamp(progname, arg, timep)
+ const char *progname;
+ char *arg;
+ time_t *timep;
+{
+ struct tm *t;
+ time_t now;
+ int yearset;
+ char *p;
+ /* Start with the current time. */
+ (void)time(&now);
+ if ((t = localtime(&now)) == NULL) {
+ fprintf(stderr,
+ "%s: localtime: %s\n", progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ /* [[CC]YY]MMDDhhmm[.SS] */
+ if ((p = strchr(arg, '.')) == NULL)
+ t->tm_sec = 0; /* Seconds defaults to 0. */
+ else {
+ if (strlen(p + 1) != 2)
+ goto terr;
+ *p++ = '\0';
+ t->tm_sec = ATOI2(p);
+ }
+
+ yearset = 0;
+ switch(strlen(arg)) {
+ case 12: /* CCYYMMDDhhmm */
+ t->tm_year = ATOI2(arg);
+ t->tm_year *= 100;
+ yearset = 1;
+ /* FALLTHROUGH */
+ case 10: /* YYMMDDhhmm */
+ if (yearset) {
+ yearset = ATOI2(arg);
+ t->tm_year += yearset;
+ } else {
+ yearset = ATOI2(arg);
+ if (yearset < 69)
+ t->tm_year = yearset + 2000;
+ else
+ t->tm_year = yearset + 1900;
+ }
+ t->tm_year -= 1900; /* Convert to UNIX time. */
+ /* FALLTHROUGH */
+ case 8: /* MMDDhhmm */
+ t->tm_mon = ATOI2(arg);
+ --t->tm_mon; /* Convert from 01-12 to 00-11 */
+ t->tm_mday = ATOI2(arg);
+ t->tm_hour = ATOI2(arg);
+ t->tm_min = ATOI2(arg);
+ break;
+ default:
+ goto terr;
+ }
+
+ t->tm_isdst = -1; /* Figure out DST. */
+
+ *timep = mktime(t);
+ if (*timep == -1) {
+terr: fprintf(stderr,
+ "%s: out of range or illegal time specification: [[CC]YY]MMDDhhmm[.SS]",
+ progname);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
+
+int
+db_recover_usage()
+{
+ (void)fprintf(stderr, "%s\n",
+"usage: db_recover [-ceVv] [-h home] [-P password] [-t [[CC]YY]MMDDhhmm[.SS]]");
+ return (EXIT_FAILURE);
+}
+
+int
+db_recover_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/libdb/build_vxworks/db_recover/db_recover.wpj b/libdb/build_vxworks/db_recover/db_recover.wpj
new file mode 100755
index 0000000..2df7234
--- /dev/null
+++ b/libdb/build_vxworks/db_recover/db_recover.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_recover.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_recover.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_recover.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_recover.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_recover.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_recover.c_objects
+db_recover.o
+<END>
+
+<BEGIN> FILE_db_recover.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_recover.c
+<END>
+
+<BEGIN> userComments
+db_recover
+<END>
diff --git a/libdb/build_vxworks/db_recover/db_recover/Makefile.custom b/libdb/build_vxworks/db_recover/db_recover/Makefile.custom
new file mode 100644
index 0000000..ca781f7
--- /dev/null
+++ b/libdb/build_vxworks/db_recover/db_recover/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/libdb/build_vxworks/db_recover/db_recover/component.cdf b/libdb/build_vxworks/db_recover/db_recover/component.cdf
new file mode 100755
index 0000000..d322bf4
--- /dev/null
+++ b/libdb/build_vxworks/db_recover/db_recover/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_RECOVER {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_recover.o
+ NAME db_recover
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_recover.o {
+
+ NAME db_recover.o
+ SRC_PATH_NAME $PRJ_DIR/../db_recover.c
+}
+
+/* Parameter information */
+
diff --git a/libdb/build_vxworks/db_recover/db_recover/component.wpj b/libdb/build_vxworks/db_recover/db_recover/component.wpj
new file mode 100755
index 0000000..0daf9f6
--- /dev/null
+++ b/libdb/build_vxworks/db_recover/db_recover/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_objects
+db_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_objects
+db_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_recover.c_objects
+db_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_objects
+db_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_recover.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/libdb/build_vxworks/db_stat/db_stat.c b/libdb/build_vxworks/db_stat/db_stat.c
new file mode 100644
index 0000000..4cea8a9
--- /dev/null
+++ b/libdb/build_vxworks/db_stat/db_stat.c
@@ -0,0 +1,1282 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+
+#define PCT(f, t, pgsize) \
+ ((t) == 0 ? 0 : \
+ (((double)(((t) * (pgsize)) - (f)) / ((t) * (pgsize))) * 100))
+
+typedef enum { T_NOTSET,
+ T_DB, T_ENV, T_LOCK, T_LOG, T_MPOOL, T_REP, T_TXN } test_t;
+
+int db_stat_argcheck __P((char *, const char *));
+int db_stat_btree_stats __P((DB_ENV *, DB *, DB_BTREE_STAT *, int));
+int db_stat_db_init __P((DB_ENV *, char *, test_t, u_int32_t, int *));
+void db_stat_dl __P((const char *, u_long));
+void db_stat_dl_bytes __P((const char *, u_long, u_long, u_long));
+int db_stat_env_stats __P((DB_ENV *, u_int32_t));
+int db_stat_hash_stats __P((DB_ENV *, DB *, int));
+int db_stat_lock_stats __P((DB_ENV *, char *, u_int32_t));
+int db_stat_log_stats __P((DB_ENV *, u_int32_t));
+int db_stat_main __P((int, char *[]));
+int db_stat_mpool_stats __P((DB_ENV *, char *, u_int32_t));
+void db_stat_prflags __P((u_int32_t, const FN *));
+int db_stat_queue_stats __P((DB_ENV *, DB *, int));
+int db_stat_rep_stats __P((DB_ENV *, u_int32_t));
+int db_stat_txn_compare __P((const void *, const void *));
+int db_stat_txn_stats __P((DB_ENV *, u_int32_t));
+int db_stat_usage __P((void));
+int db_stat_version_check __P((const char *));
+
+int
+db_stat(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_stat", args, &argc, &argv);
+ return (db_stat_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_stat_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_stat";
+ DB_ENV *dbenv;
+ DB_BTREE_STAT *sp;
+ DB *alt_dbp, *dbp;
+ test_t ttype;
+ u_int32_t cache;
+ int ch, checked, d_close, e_close, exitval, fast, flags;
+ int nflag, private, resize, ret;
+ char *db, *home, *internal, *passwd, *subdb;
+
+ if ((ret = db_stat_version_check(progname)) != 0)
+ return (ret);
+
+ dbp = NULL;
+ ttype = T_NOTSET;
+ cache = MEGABYTE;
+ d_close = e_close = exitval = fast = flags = nflag = private = 0;
+ db = home = internal = passwd = subdb = NULL;
+
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "C:cd:efh:lM:mNP:rs:tVZ")) != EOF)
+ switch (ch) {
+ case 'C':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_LOCK;
+ if (!db_stat_argcheck(internal = optarg, "Aclmop"))
+ return (db_stat_usage());
+ break;
+ case 'c':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_LOCK;
+ break;
+ case 'd':
+ if (ttype != T_DB && ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_DB;
+ db = optarg;
+ break;
+ case 'e':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_ENV;
+ break;
+ case 'f':
+ fast = DB_FAST_STAT;
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'l':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_LOG;
+ break;
+ case 'M':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_MPOOL;
+ if (!db_stat_argcheck(internal = optarg, "Ahm"))
+ return (db_stat_usage());
+ break;
+ case 'm':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_MPOOL;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'r':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_REP;
+ break;
+ case 's':
+ if (ttype != T_DB && ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_DB;
+ subdb = optarg;
+ break;
+ case 't':
+ if (ttype != T_NOTSET) {
+argcombo: fprintf(stderr,
+ "%s: illegal option combination\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+ ttype = T_TXN;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'Z':
+ flags |= DB_STAT_CLEAR;
+ break;
+ case '?':
+ default:
+ return (db_stat_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ switch (ttype) {
+ case T_DB:
+ if (db == NULL)
+ return (db_stat_usage());
+ break;
+ case T_NOTSET:
+ return (db_stat_usage());
+ /* NOTREACHED */
+ default:
+ if (fast != 0)
+ return (db_stat_usage());
+ break;
+ }
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+retry: if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ if (passwd != NULL &&
+ (ret = dbenv->set_encrypt(dbenv, passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+
+ /* Initialize the environment. */
+ if (db_stat_db_init(dbenv, home, ttype, cache, &private) != 0)
+ goto shutdown;
+
+ switch (ttype) {
+ case T_DB:
+ /* Create the DB object and open the file. */
+ if (flags != 0)
+ return (db_stat_usage());
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto shutdown;
+ }
+ d_close = 1;
+
+ if ((ret = dbp->open(dbp,
+ NULL, db, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->open: %s", db);
+ goto shutdown;
+ }
+
+ /* Check if cache is too small for this DB's pagesize. */
+ if (private) {
+ if ((ret =
+ __db_util_cache(dbenv, dbp, &cache, &resize)) != 0)
+ goto shutdown;
+ if (resize) {
+ (void)dbp->close(dbp, 0);
+ d_close = 0;
+
+ (void)dbenv->close(dbenv, 0);
+ e_close = 0;
+ goto retry;
+ }
+ }
+
+ /*
+ * See if we can open this db read/write to update counts.
+ * If its a master-db then we cannot. So check to see,
+ * if its btree then it might be.
+ */
+ checked = 0;
+ if (subdb == NULL && dbp->type == DB_BTREE) {
+ if ((ret = dbp->stat(dbp, &sp, DB_FAST_STAT)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ goto shutdown;
+ }
+ checked = 1;
+ }
+
+ if (subdb != NULL ||
+ dbp->type != DB_BTREE ||
+ (sp->bt_metaflags & BTM_SUBDB) == 0) {
+ if ((ret = db_create(&alt_dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto shutdown;
+ }
+ if ((ret = dbp->open(alt_dbp, NULL,
+ db, subdb, DB_UNKNOWN, 0, 0)) != 0) {
+ dbenv->err(dbenv,
+ ret, "DB->open: %s:%s", db, subdb);
+ (void)alt_dbp->close(alt_dbp, 0);
+ goto shutdown;
+ }
+
+ (void)dbp->close(dbp, 0);
+ dbp = alt_dbp;
+
+ /* Need to run again to update counts */
+ checked = 0;
+ }
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if (db_stat_btree_stats(
+ dbenv, dbp, checked == 1 ? sp : NULL, fast))
+ goto shutdown;
+ break;
+ case DB_HASH:
+ if (db_stat_hash_stats(dbenv, dbp, fast))
+ goto shutdown;
+ break;
+ case DB_QUEUE:
+ if (db_stat_queue_stats(dbenv, dbp, fast))
+ goto shutdown;
+ break;
+ case DB_UNKNOWN:
+ dbenv->errx(dbenv, "Unknown database type.");
+ goto shutdown;
+ }
+ break;
+ case T_ENV:
+ if (db_stat_env_stats(dbenv, flags))
+ goto shutdown;
+ break;
+ case T_LOCK:
+ if (db_stat_lock_stats(dbenv, internal, flags))
+ goto shutdown;
+ break;
+ case T_LOG:
+ if (db_stat_log_stats(dbenv, flags))
+ goto shutdown;
+ break;
+ case T_MPOOL:
+ if (db_stat_mpool_stats(dbenv, internal, flags))
+ goto shutdown;
+ break;
+ case T_REP:
+ if (db_stat_rep_stats(dbenv, flags))
+ goto shutdown;
+ break;
+ case T_TXN:
+ if (db_stat_txn_stats(dbenv, flags))
+ goto shutdown;
+ break;
+ case T_NOTSET:
+ dbenv->errx(dbenv, "Unknown statistics flag.");
+ goto shutdown;
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (d_close && (ret = dbp->close(dbp, 0)) != 0) {
+ exitval = 1;
+ dbenv->err(dbenv, ret, "close");
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+/*
+ * env_stats --
+ * Display environment statistics.
+ */
+int
+db_stat_env_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ REGENV renv;
+ REGION *rp, regs[1024];
+ int n, ret;
+ const char *lable;
+
+ n = sizeof(regs) / sizeof(regs[0]);
+ if ((ret = __db_e_stat(dbenv, &renv, regs, &n, flags)) != 0) {
+ dbenv->err(dbenv, ret, "__db_e_stat");
+ return (1);
+ }
+
+ printf("%d.%d.%d\tEnvironment version.\n",
+ renv.majver, renv.minver, renv.patch);
+ printf("%lx\tMagic number.\n", (u_long)renv.magic);
+ printf("%d\tPanic value.\n", renv.envpanic);
+
+ /* Adjust the reference count for us... */
+ printf("%d\tReferences.\n", renv.refcnt - 1);
+
+ db_stat_dl("Locks granted without waiting.\n",
+ (u_long)renv.mutex.mutex_set_nowait);
+ db_stat_dl("Locks granted after waiting.\n",
+ (u_long)renv.mutex.mutex_set_wait);
+
+ while (n > 0) {
+ printf("%s\n", DB_LINE);
+ rp = &regs[--n];
+ switch (rp->type) {
+ case REGION_TYPE_ENV:
+ lable = "Environment";
+ break;
+ case REGION_TYPE_LOCK:
+ lable = "Lock";
+ break;
+ case REGION_TYPE_LOG:
+ lable = "Log";
+ break;
+ case REGION_TYPE_MPOOL:
+ lable = "Mpool";
+ break;
+ case REGION_TYPE_MUTEX:
+ lable = "Mutex";
+ break;
+ case REGION_TYPE_TXN:
+ lable = "Txn";
+ break;
+ case INVALID_REGION_TYPE:
+ default:
+ lable = "Invalid";
+ break;
+ }
+ printf("%s Region: %d.\n", lable, rp->id);
+ db_stat_dl_bytes("Size", (u_long)0, (u_long)0, (u_long)rp->size);
+ printf("%ld\tSegment ID.\n", rp->segid);
+ db_stat_dl("Locks granted without waiting.\n",
+ (u_long)rp->mutex.mutex_set_nowait);
+ db_stat_dl("Locks granted after waiting.\n",
+ (u_long)rp->mutex.mutex_set_wait);
+ }
+
+ return (0);
+}
+
+/*
+ * btree_stats --
+ * Display btree/recno statistics.
+ */
+int
+db_stat_btree_stats(dbenv, dbp, msp, fast)
+ DB_ENV *dbenv;
+ DB *dbp;
+ DB_BTREE_STAT *msp;
+ int fast;
+{
+ static const FN fn[] = {
+ { BTM_DUP, "duplicates" },
+ { BTM_FIXEDLEN, "fixed-length" },
+ { BTM_RECNO, "recno" },
+ { BTM_RECNUM, "record-numbers" },
+ { BTM_RENUMBER, "renumber" },
+ { BTM_SUBDB, "multiple-databases" },
+ { 0, NULL }
+ };
+ DB_BTREE_STAT *sp;
+ int ret;
+
+ COMPQUIET(dbenv, NULL);
+
+ if (msp != NULL)
+ sp = msp;
+ else if ((ret = dbp->stat(dbp, &sp, fast)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (1);
+ }
+
+ printf("%lx\tBtree magic number.\n", (u_long)sp->bt_magic);
+ printf("%lu\tBtree version number.\n", (u_long)sp->bt_version);
+ db_stat_prflags(sp->bt_metaflags, fn);
+ if (dbp->type == DB_BTREE) {
+#ifdef NOT_IMPLEMENTED
+ db_stat_dl("Maximum keys per-page.\n", (u_long)sp->bt_maxkey);
+#endif
+ db_stat_dl("Minimum keys per-page.\n", (u_long)sp->bt_minkey);
+ }
+ if (dbp->type == DB_RECNO) {
+ db_stat_dl("Fixed-length record size.\n", (u_long)sp->bt_re_len);
+ if (isprint(sp->bt_re_pad) && !isspace(sp->bt_re_pad))
+ printf("%c\tFixed-length record pad.\n",
+ (int)sp->bt_re_pad);
+ else
+ printf("0x%x\tFixed-length record pad.\n",
+ (int)sp->bt_re_pad);
+ }
+ db_stat_dl("Underlying database page size.\n", (u_long)sp->bt_pagesize);
+ db_stat_dl("Number of levels in the tree.\n", (u_long)sp->bt_levels);
+ db_stat_dl(dbp->type == DB_BTREE ?
+ "Number of unique keys in the tree.\n" :
+ "Number of records in the tree.\n", (u_long)sp->bt_nkeys);
+ db_stat_dl("Number of data items in the tree.\n", (u_long)sp->bt_ndata);
+
+ db_stat_dl("Number of tree internal pages.\n", (u_long)sp->bt_int_pg);
+ db_stat_dl("Number of bytes free in tree internal pages",
+ (u_long)sp->bt_int_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_int_pgfree, sp->bt_int_pg, sp->bt_pagesize));
+
+ db_stat_dl("Number of tree leaf pages.\n", (u_long)sp->bt_leaf_pg);
+ db_stat_dl("Number of bytes free in tree leaf pages",
+ (u_long)sp->bt_leaf_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_leaf_pgfree, sp->bt_leaf_pg, sp->bt_pagesize));
+
+ db_stat_dl("Number of tree duplicate pages.\n", (u_long)sp->bt_dup_pg);
+ db_stat_dl("Number of bytes free in tree duplicate pages",
+ (u_long)sp->bt_dup_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_dup_pgfree, sp->bt_dup_pg, sp->bt_pagesize));
+
+ db_stat_dl("Number of tree overflow pages.\n", (u_long)sp->bt_over_pg);
+ db_stat_dl("Number of bytes free in tree overflow pages",
+ (u_long)sp->bt_over_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_over_pgfree, sp->bt_over_pg, sp->bt_pagesize));
+
+ db_stat_dl("Number of pages on the free list.\n", (u_long)sp->bt_free);
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * hash_stats --
+ * Display hash statistics.
+ */
+int
+db_stat_hash_stats(dbenv, dbp, fast)
+ DB_ENV *dbenv;
+ DB *dbp;
+ int fast;
+{
+ static const FN fn[] = {
+ { DB_HASH_DUP, "duplicates" },
+ { DB_HASH_SUBDB,"multiple-databases" },
+ { 0, NULL }
+ };
+ DB_HASH_STAT *sp;
+ int ret;
+
+ COMPQUIET(dbenv, NULL);
+
+ if ((ret = dbp->stat(dbp, &sp, fast)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (1);
+ }
+
+ printf("%lx\tHash magic number.\n", (u_long)sp->hash_magic);
+ printf("%lu\tHash version number.\n", (u_long)sp->hash_version);
+ db_stat_prflags(sp->hash_metaflags, fn);
+ db_stat_dl("Underlying database page size.\n", (u_long)sp->hash_pagesize);
+ db_stat_dl("Specified fill factor.\n", (u_long)sp->hash_ffactor);
+ db_stat_dl("Number of keys in the database.\n", (u_long)sp->hash_nkeys);
+ db_stat_dl("Number of data items in the database.\n", (u_long)sp->hash_ndata);
+
+ db_stat_dl("Number of hash buckets.\n", (u_long)sp->hash_buckets);
+ db_stat_dl("Number of bytes free on bucket pages", (u_long)sp->hash_bfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_bfree, sp->hash_buckets, sp->hash_pagesize));
+
+ db_stat_dl("Number of overflow pages.\n", (u_long)sp->hash_bigpages);
+ db_stat_dl("Number of bytes free in overflow pages",
+ (u_long)sp->hash_big_bfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_big_bfree, sp->hash_bigpages, sp->hash_pagesize));
+
+ db_stat_dl("Number of bucket overflow pages.\n", (u_long)sp->hash_overflows);
+ db_stat_dl("Number of bytes free in bucket overflow pages",
+ (u_long)sp->hash_ovfl_free);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_ovfl_free, sp->hash_overflows, sp->hash_pagesize));
+
+ db_stat_dl("Number of duplicate pages.\n", (u_long)sp->hash_dup);
+ db_stat_dl("Number of bytes free in duplicate pages",
+ (u_long)sp->hash_dup_free);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_dup_free, sp->hash_dup, sp->hash_pagesize));
+
+ db_stat_dl("Number of pages on the free list.\n", (u_long)sp->hash_free);
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * queue_stats --
+ * Display queue statistics.
+ */
+int
+db_stat_queue_stats(dbenv, dbp, fast)
+ DB_ENV *dbenv;
+ DB *dbp;
+ int fast;
+{
+ DB_QUEUE_STAT *sp;
+ int ret;
+
+ COMPQUIET(dbenv, NULL);
+
+ if ((ret = dbp->stat(dbp, &sp, fast)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (1);
+ }
+
+ printf("%lx\tQueue magic number.\n", (u_long)sp->qs_magic);
+ printf("%lu\tQueue version number.\n", (u_long)sp->qs_version);
+ db_stat_dl("Fixed-length record size.\n", (u_long)sp->qs_re_len);
+ if (isprint(sp->qs_re_pad) && !isspace(sp->qs_re_pad))
+ printf("%c\tFixed-length record pad.\n", (int)sp->qs_re_pad);
+ else
+ printf("0x%x\tFixed-length record pad.\n", (int)sp->qs_re_pad);
+ db_stat_dl("Underlying database page size.\n", (u_long)sp->qs_pagesize);
+ if (sp->qs_extentsize != 0)
+ db_stat_dl("Underlying database extent size.\n",
+ (u_long)sp->qs_extentsize);
+ db_stat_dl("Number of records in the database.\n", (u_long)sp->qs_nkeys);
+ db_stat_dl("Number of database pages.\n", (u_long)sp->qs_pages);
+ db_stat_dl("Number of bytes free in database pages", (u_long)sp->qs_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->qs_pgfree, sp->qs_pages, sp->qs_pagesize));
+ printf("%lu\tFirst undeleted record.\n", (u_long)sp->qs_first_recno);
+ printf(
+ "%lu\tNext available record number.\n", (u_long)sp->qs_cur_recno);
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * lock_stats --
+ * Display lock statistics.
+ */
+int
+db_stat_lock_stats(dbenv, internal, flags)
+ DB_ENV *dbenv;
+ char *internal;
+ u_int32_t flags;
+{
+ DB_LOCK_STAT *sp;
+ int ret;
+
+ if (internal != NULL) {
+ if ((ret =
+ dbenv->lock_dump_region(dbenv, internal, stdout)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+ return (0);
+ }
+
+ if ((ret = dbenv->lock_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ db_stat_dl("Last allocated locker ID.\n", (u_long)sp->st_id);
+ db_stat_dl("Current maximum unused locker ID.\n", (u_long)sp->st_cur_maxid);
+ db_stat_dl("Number of lock modes.\n", (u_long)sp->st_nmodes);
+ db_stat_dl("Maximum number of locks possible.\n", (u_long)sp->st_maxlocks);
+ db_stat_dl("Maximum number of lockers possible.\n", (u_long)sp->st_maxlockers);
+ db_stat_dl("Maximum number of lock objects possible.\n",
+ (u_long)sp->st_maxobjects);
+ db_stat_dl("Number of current locks.\n", (u_long)sp->st_nlocks);
+ db_stat_dl("Maximum number of locks at any one time.\n",
+ (u_long)sp->st_maxnlocks);
+ db_stat_dl("Number of current lockers.\n", (u_long)sp->st_nlockers);
+ db_stat_dl("Maximum number of lockers at any one time.\n",
+ (u_long)sp->st_maxnlockers);
+ db_stat_dl("Number of current lock objects.\n", (u_long)sp->st_nobjects);
+ db_stat_dl("Maximum number of lock objects at any one time.\n",
+ (u_long)sp->st_maxnobjects);
+ db_stat_dl("Total number of locks requested.\n", (u_long)sp->st_nrequests);
+ db_stat_dl("Total number of locks released.\n", (u_long)sp->st_nreleases);
+ db_stat_dl(
+ "Total number of lock requests failing because DB_LOCK_NOWAIT was set.\n",
+ (u_long)sp->st_nnowaits);
+ db_stat_dl(
+ "Total number of locks not immediately available due to conflicts.\n",
+ (u_long)sp->st_nconflicts);
+ db_stat_dl("Number of deadlocks.\n", (u_long)sp->st_ndeadlocks);
+ db_stat_dl("Lock timeout value.\n", (u_long)sp->st_locktimeout);
+ db_stat_dl("Number of locks that have timed out.\n",
+ (u_long)sp->st_nlocktimeouts);
+ db_stat_dl("Transaction timeout value.\n", (u_long)sp->st_txntimeout);
+ db_stat_dl("Number of transactions that have timed out.\n",
+ (u_long)sp->st_ntxntimeouts);
+
+ db_stat_dl_bytes("The size of the lock region.",
+ (u_long)0, (u_long)0, (u_long)sp->st_regsize);
+ db_stat_dl("The number of region locks granted after waiting.\n",
+ (u_long)sp->st_region_wait);
+ db_stat_dl("The number of region locks granted without waiting.\n",
+ (u_long)sp->st_region_nowait);
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * log_stats --
+ * Display log statistics.
+ */
+int
+db_stat_log_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ DB_LOG_STAT *sp;
+ int ret;
+
+ if ((ret = dbenv->log_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ printf("%lx\tLog magic number.\n", (u_long)sp->st_magic);
+ printf("%lu\tLog version number.\n", (u_long)sp->st_version);
+ db_stat_dl_bytes("Log record cache size",
+ (u_long)0, (u_long)0, (u_long)sp->st_lg_bsize);
+ printf("%#o\tLog file mode.\n", sp->st_mode);
+ if (sp->st_lg_size % MEGABYTE == 0)
+ printf("%luMb\tCurrent log file size.\n",
+ (u_long)sp->st_lg_size / MEGABYTE);
+ else if (sp->st_lg_size % 1024 == 0)
+ printf("%luKb\tCurrent log file size.\n",
+ (u_long)sp->st_lg_size / 1024);
+ else
+ printf("%lu\tCurrent log file size.\n",
+ (u_long)sp->st_lg_size);
+ db_stat_dl_bytes("Log bytes written",
+ (u_long)0, (u_long)sp->st_w_mbytes, (u_long)sp->st_w_bytes);
+ db_stat_dl_bytes("Log bytes written since last checkpoint",
+ (u_long)0, (u_long)sp->st_wc_mbytes, (u_long)sp->st_wc_bytes);
+ db_stat_dl("Total log file writes.\n", (u_long)sp->st_wcount);
+ db_stat_dl("Total log file write due to overflow.\n",
+ (u_long)sp->st_wcount_fill);
+ db_stat_dl("Total log file flushes.\n", (u_long)sp->st_scount);
+ printf("%lu\tCurrent log file number.\n", (u_long)sp->st_cur_file);
+ printf("%lu\tCurrent log file offset.\n", (u_long)sp->st_cur_offset);
+ printf("%lu\tOn-disk log file number.\n", (u_long)sp->st_disk_file);
+ printf("%lu\tOn-disk log file offset.\n", (u_long)sp->st_disk_offset);
+
+ db_stat_dl("Max commits in a log flush.\n", (u_long)sp->st_maxcommitperflush);
+ db_stat_dl("Min commits in a log flush.\n", (u_long)sp->st_mincommitperflush);
+
+ db_stat_dl_bytes("Log region size",
+ (u_long)0, (u_long)0, (u_long)sp->st_regsize);
+ db_stat_dl("The number of region locks granted after waiting.\n",
+ (u_long)sp->st_region_wait);
+ db_stat_dl("The number of region locks granted without waiting.\n",
+ (u_long)sp->st_region_nowait);
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * mpool_stats --
+ * Display mpool statistics.
+ */
+int
+db_stat_mpool_stats(dbenv, internal, flags)
+ DB_ENV *dbenv;
+ char *internal;
+ u_int32_t flags;
+{
+ DB_MPOOL_FSTAT **fsp;
+ DB_MPOOL_STAT *gsp;
+ int ret;
+
+ if (internal != NULL) {
+ if ((ret =
+ dbenv->memp_dump_region(dbenv, internal, stdout)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+ return (0);
+ }
+
+ if ((ret = dbenv->memp_stat(dbenv, &gsp, &fsp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ db_stat_dl_bytes("Total cache size",
+ (u_long)gsp->st_gbytes, (u_long)0, (u_long)gsp->st_bytes);
+ db_stat_dl("Number of caches.\n", (u_long)gsp->st_ncache);
+ db_stat_dl_bytes("Pool individual cache size",
+ (u_long)0, (u_long)0, (u_long)gsp->st_regsize);
+ db_stat_dl("Requested pages mapped into the process' address space.\n",
+ (u_long)gsp->st_map);
+ db_stat_dl("Requested pages found in the cache", (u_long)gsp->st_cache_hit);
+ if (gsp->st_cache_hit + gsp->st_cache_miss != 0)
+ printf(" (%.0f%%)", ((double)gsp->st_cache_hit /
+ (gsp->st_cache_hit + gsp->st_cache_miss)) * 100);
+ printf(".\n");
+ db_stat_dl("Requested pages not found in the cache.\n",
+ (u_long)gsp->st_cache_miss);
+ db_stat_dl("Pages created in the cache.\n", (u_long)gsp->st_page_create);
+ db_stat_dl("Pages read into the cache.\n", (u_long)gsp->st_page_in);
+ db_stat_dl("Pages written from the cache to the backing file.\n",
+ (u_long)gsp->st_page_out);
+ db_stat_dl("Clean pages forced from the cache.\n",
+ (u_long)gsp->st_ro_evict);
+ db_stat_dl("Dirty pages forced from the cache.\n",
+ (u_long)gsp->st_rw_evict);
+ db_stat_dl("Dirty pages written by trickle-sync thread.\n",
+ (u_long)gsp->st_page_trickle);
+ db_stat_dl("Current total page count.\n",
+ (u_long)gsp->st_pages);
+ db_stat_dl("Current clean page count.\n",
+ (u_long)gsp->st_page_clean);
+ db_stat_dl("Current dirty page count.\n",
+ (u_long)gsp->st_page_dirty);
+ db_stat_dl("Number of hash buckets used for page location.\n",
+ (u_long)gsp->st_hash_buckets);
+ db_stat_dl("Total number of times hash chains searched for a page.\n",
+ (u_long)gsp->st_hash_searches);
+ db_stat_dl("The longest hash chain searched for a page.\n",
+ (u_long)gsp->st_hash_longest);
+ db_stat_dl("Total number of hash buckets examined for page location.\n",
+ (u_long)gsp->st_hash_examined);
+ db_stat_dl("The number of hash bucket locks granted without waiting.\n",
+ (u_long)gsp->st_hash_nowait);
+ db_stat_dl("The number of hash bucket locks granted after waiting.\n",
+ (u_long)gsp->st_hash_wait);
+ db_stat_dl("The maximum number of times any hash bucket lock was waited for.\n",
+ (u_long)gsp->st_hash_max_wait);
+ db_stat_dl("The number of region locks granted without waiting.\n",
+ (u_long)gsp->st_region_nowait);
+ db_stat_dl("The number of region locks granted after waiting.\n",
+ (u_long)gsp->st_region_wait);
+ db_stat_dl("The number of page allocations.\n",
+ (u_long)gsp->st_alloc);
+ db_stat_dl("The number of hash buckets examined during allocations\n",
+ (u_long)gsp->st_alloc_buckets);
+ db_stat_dl("The max number of hash buckets examined for an allocation\n",
+ (u_long)gsp->st_alloc_max_buckets);
+ db_stat_dl("The number of pages examined during allocations\n",
+ (u_long)gsp->st_alloc_pages);
+ db_stat_dl("The max number of pages examined for an allocation\n",
+ (u_long)gsp->st_alloc_max_pages);
+
+ for (; fsp != NULL && *fsp != NULL; ++fsp) {
+ printf("%s\n", DB_LINE);
+ printf("Pool File: %s\n", (*fsp)->file_name);
+ db_stat_dl("Page size.\n", (u_long)(*fsp)->st_pagesize);
+ db_stat_dl("Requested pages mapped into the process' address space.\n",
+ (u_long)(*fsp)->st_map);
+ db_stat_dl("Requested pages found in the cache",
+ (u_long)(*fsp)->st_cache_hit);
+ if ((*fsp)->st_cache_hit + (*fsp)->st_cache_miss != 0)
+ printf(" (%.0f%%)", ((double)(*fsp)->st_cache_hit /
+ ((*fsp)->st_cache_hit + (*fsp)->st_cache_miss)) *
+ 100);
+ printf(".\n");
+ db_stat_dl("Requested pages not found in the cache.\n",
+ (u_long)(*fsp)->st_cache_miss);
+ db_stat_dl("Pages created in the cache.\n",
+ (u_long)(*fsp)->st_page_create);
+ db_stat_dl("Pages read into the cache.\n",
+ (u_long)(*fsp)->st_page_in);
+ db_stat_dl("Pages written from the cache to the backing file.\n",
+ (u_long)(*fsp)->st_page_out);
+ }
+
+ free(gsp);
+
+ return (0);
+}
+
+/*
+ * rep_stats --
+ * Display replication statistics.
+ */
+int
+db_stat_rep_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ DB_REP_STAT *sp;
+ int is_client, ret;
+ const char *p;
+
+ if ((ret = dbenv->rep_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ is_client = 0;
+ switch (sp->st_status) {
+ case DB_REP_MASTER:
+ printf("Environment configured as a replication master.\n");
+ break;
+ case DB_REP_CLIENT:
+ printf("Environment configured as a replication client.\n");
+ is_client = 1;
+ break;
+ case DB_REP_LOGSONLY:
+ printf("Environment configured as a logs-only replica.\n");
+ is_client = 1;
+ break;
+ default:
+ printf("Environment not configured for replication.\n");
+ break;
+ }
+
+ printf("%lu/%lu\t%s\n",
+ (u_long)sp->st_next_lsn.file, (u_long)sp->st_next_lsn.offset,
+ is_client ? "Next LSN expected." : "Next LSN to be used.");
+ p = sp->st_waiting_lsn.file == 0 ?
+ "Not waiting for any missed log records." :
+ "LSN of first missed log record being waited for.";
+ printf("%lu/%lu\t%s\n",
+ (u_long)sp->st_waiting_lsn.file, (u_long)sp->st_waiting_lsn.offset,
+ p);
+
+ db_stat_dl("Number of duplicate master conditions detected.\n",
+ (u_long)sp->st_dupmasters);
+ if (sp->st_env_id != DB_EID_INVALID)
+ db_stat_dl("Current environment ID.\n", (u_long)sp->st_env_id);
+ else
+ printf("No current environment ID.\n");
+ db_stat_dl("Current environment priority.\n", (u_long)sp->st_env_priority);
+ db_stat_dl("Current generation number.\n", (u_long)sp->st_gen);
+ db_stat_dl("Number of duplicate log records received.\n",
+ (u_long)sp->st_log_duplicated);
+ db_stat_dl("Number of log records currently queued.\n",
+ (u_long)sp->st_log_queued);
+ db_stat_dl("Maximum number of log records ever queued at once.\n",
+ (u_long)sp->st_log_queued_max);
+ db_stat_dl("Total number of log records queued.\n",
+ (u_long)sp->st_log_queued_total);
+ db_stat_dl("Number of log records received and appended to the log.\n",
+ (u_long)sp->st_log_records);
+ db_stat_dl("Number of log records missed and requested.\n",
+ (u_long)sp->st_log_requested);
+ if (sp->st_master != DB_EID_INVALID)
+ db_stat_dl("Current master ID.\n", (u_long)sp->st_master);
+ else
+ printf("No current master ID.\n");
+ db_stat_dl("Number of times the master has changed.\n",
+ (u_long)sp->st_master_changes);
+ db_stat_dl("Number of messages received with a bad generation number.\n",
+ (u_long)sp->st_msgs_badgen);
+ db_stat_dl("Number of messages received and processed.\n",
+ (u_long)sp->st_msgs_processed);
+ db_stat_dl("Number of messages ignored due to pending recovery.\n",
+ (u_long)sp->st_msgs_recover);
+ db_stat_dl("Number of failed message sends.\n",
+ (u_long)sp->st_msgs_send_failures);
+ db_stat_dl("Number of messages sent.\n", (u_long)sp->st_msgs_sent);
+ db_stat_dl("Number of new site messages received.\n", (u_long)sp->st_newsites);
+ db_stat_dl("Transmission limited.\n", (u_long)sp->st_nthrottles);
+ db_stat_dl("Number of outdated conditions detected.\n",
+ (u_long)sp->st_outdated);
+ db_stat_dl("Number of transactions applied.\n", (u_long)sp->st_txns_applied);
+
+ db_stat_dl("Number of elections held.\n", (u_long)sp->st_elections);
+ db_stat_dl("Number of elections won.\n", (u_long)sp->st_elections_won);
+
+ if (sp->st_election_status == 0)
+ printf("No election in progress.\n");
+ else {
+ db_stat_dl("Current election phase.\n", (u_long)sp->st_election_status);
+ db_stat_dl("Election winner.\n",
+ (u_long)sp->st_election_cur_winner);
+ db_stat_dl("Election generation number.\n",
+ (u_long)sp->st_election_gen);
+ printf("%lu/%lu\tMaximum LSN of election winner.\n",
+ (u_long)sp->st_election_lsn.file,
+ (u_long)sp->st_election_lsn.offset);
+ db_stat_dl("Number of sites expected to participate in elections.\n",
+ (u_long)sp->st_election_nsites);
+ db_stat_dl("Election priority.\n", (u_long)sp->st_election_priority);
+ db_stat_dl("Election tiebreaker value.\n",
+ (u_long)sp->st_election_tiebreaker);
+ db_stat_dl("Votes received this election round.\n",
+ (u_long)sp->st_election_votes);
+ }
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * txn_stats --
+ * Display transaction statistics.
+ */
+int
+db_stat_txn_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ DB_TXN_STAT *sp;
+ u_int32_t i;
+ int ret;
+ const char *p;
+
+ if ((ret = dbenv->txn_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ p = sp->st_last_ckp.file == 0 ?
+ "No checkpoint LSN." : "File/offset for last checkpoint LSN.";
+ printf("%lu/%lu\t%s\n",
+ (u_long)sp->st_last_ckp.file, (u_long)sp->st_last_ckp.offset, p);
+ if (sp->st_time_ckp == 0)
+ printf("0\tNo checkpoint timestamp.\n");
+ else
+ printf("%.24s\tCheckpoint timestamp.\n",
+ ctime(&sp->st_time_ckp));
+ printf("%lx\tLast transaction ID allocated.\n",
+ (u_long)sp->st_last_txnid);
+ db_stat_dl("Maximum number of active transactions possible.\n",
+ (u_long)sp->st_maxtxns);
+ db_stat_dl("Active transactions.\n", (u_long)sp->st_nactive);
+ db_stat_dl("Maximum active transactions.\n", (u_long)sp->st_maxnactive);
+ db_stat_dl("Number of transactions begun.\n", (u_long)sp->st_nbegins);
+ db_stat_dl("Number of transactions aborted.\n", (u_long)sp->st_naborts);
+ db_stat_dl("Number of transactions committed.\n", (u_long)sp->st_ncommits);
+ db_stat_dl("Number of transactions restored.\n", (u_long)sp->st_nrestores);
+
+ db_stat_dl_bytes("Transaction region size",
+ (u_long)0, (u_long)0, (u_long)sp->st_regsize);
+ db_stat_dl("The number of region locks granted after waiting.\n",
+ (u_long)sp->st_region_wait);
+ db_stat_dl("The number of region locks granted without waiting.\n",
+ (u_long)sp->st_region_nowait);
+
+ qsort(sp->st_txnarray,
+ sp->st_nactive, sizeof(sp->st_txnarray[0]), db_stat_txn_compare);
+ for (i = 0; i < sp->st_nactive; ++i) {
+ printf("\tid: %lx; begin LSN: file/offset %lu/%lu",
+ (u_long)sp->st_txnarray[i].txnid,
+ (u_long)sp->st_txnarray[i].lsn.file,
+ (u_long)sp->st_txnarray[i].lsn.offset);
+ if (sp->st_txnarray[i].parentid == 0)
+ printf("\n");
+ else
+ printf(" parent: %lx\n",
+ (u_long)sp->st_txnarray[i].parentid);
+ }
+
+ free(sp);
+
+ return (0);
+}
+
+int
+db_stat_txn_compare(a1, b1)
+ const void *a1, *b1;
+{
+ const DB_TXN_ACTIVE *a, *b;
+
+ a = a1;
+ b = b1;
+
+ if (a->txnid > b->txnid)
+ return (1);
+ if (a->txnid < b->txnid)
+ return (-1);
+ return (0);
+}
+
+/*
+ * dl --
+ * Display a big value.
+ */
+void
+db_stat_dl(msg, value)
+ const char *msg;
+ u_long value;
+{
+ /*
+ * Two formats: if less than 10 million, display as the number, if
+ * greater than 10 million display as ###M.
+ */
+ if (value < 10000000)
+ printf("%lu\t%s", value, msg);
+ else
+ printf("%luM\t%s", value / 1000000, msg);
+}
+
+/*
+ * dl_bytes --
+ * Display a big number of bytes.
+ */
+void
+db_stat_dl_bytes(msg, gbytes, mbytes, bytes)
+ const char *msg;
+ u_long gbytes, mbytes, bytes;
+{
+ const char *sep;
+
+ /* Normalize the values. */
+ while (bytes >= MEGABYTE) {
+ ++mbytes;
+ bytes -= MEGABYTE;
+ }
+ while (mbytes >= GIGABYTE / MEGABYTE) {
+ ++gbytes;
+ mbytes -= GIGABYTE / MEGABYTE;
+ }
+
+ sep = "";
+ if (gbytes > 0) {
+ printf("%luGB", gbytes);
+ sep = " ";
+ }
+ if (mbytes > 0) {
+ printf("%s%luMB", sep, mbytes);
+ sep = " ";
+ }
+ if (bytes >= 1024) {
+ printf("%s%luKB", sep, bytes / 1024);
+ bytes %= 1024;
+ sep = " ";
+ }
+ if (bytes > 0)
+ printf("%s%luB", sep, bytes);
+
+ printf("\t%s.\n", msg);
+}
+
+/*
+ * prflags --
+ * Print out flag values.
+ */
+void
+db_stat_prflags(flags, fnp)
+ u_int32_t flags;
+ const FN *fnp;
+{
+ const char *sep;
+
+ sep = "\t";
+ printf("Flags:");
+ for (; fnp->mask != 0; ++fnp)
+ if (fnp->mask & flags) {
+ printf("%s%s", sep, fnp->name);
+ sep = ", ";
+ }
+ printf("\n");
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_stat_db_init(dbenv, home, ttype, cache, is_private)
+ DB_ENV *dbenv;
+ char *home;
+ test_t ttype;
+ u_int32_t cache;
+ int *is_private;
+{
+ u_int32_t oflags;
+ int ret;
+
+ /*
+ * If our environment open fails, and we're trying to look at a
+ * shared region, it's a hard failure.
+ *
+ * We will probably just drop core if the environment we join does
+ * not include a memory pool. This is probably acceptable; trying
+ * to use an existing environment that does not contain a memory
+ * pool to look at a database can be safely construed as operator
+ * error, I think.
+ */
+ *is_private = 0;
+ if ((ret =
+ dbenv->open(dbenv, home, DB_JOINENV | DB_USE_ENVIRON, 0)) == 0)
+ return (0);
+ if (ttype != T_DB && ttype != T_LOG) {
+ dbenv->err(dbenv, ret, "DB_ENV->open%s%s",
+ home == NULL ? "" : ": ", home == NULL ? "" : home);
+ return (1);
+ }
+
+ /*
+ * We're looking at a database or set of log files and no environment
+ * exists. Create one, but make it private so no files are actually
+ * created. Declare a reasonably large cache so that we don't fail
+ * when reporting statistics on large databases.
+ *
+ * An environment is required to look at databases because we may be
+ * trying to look at databases in directories other than the current
+ * one.
+ */
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ return (1);
+ }
+ *is_private = 1;
+ oflags = DB_CREATE | DB_PRIVATE | DB_USE_ENVIRON;
+ if (ttype == T_DB)
+ oflags |= DB_INIT_MPOOL;
+ if (ttype == T_LOG)
+ oflags |= DB_INIT_LOG;
+ if ((ret = dbenv->open(dbenv, home, oflags, 0)) == 0)
+ return (0);
+
+ /* An environment is required. */
+ dbenv->err(dbenv, ret, "open");
+ return (1);
+}
+
+/*
+ * argcheck --
+ * Return if argument flags are okay.
+ */
+int
+db_stat_argcheck(arg, ok_args)
+ char *arg;
+ const char *ok_args;
+{
+ for (; *arg != '\0'; ++arg)
+ if (strchr(ok_args, *arg) == NULL)
+ return (0);
+ return (1);
+}
+
+int
+db_stat_usage()
+{
+ fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_stat [-celmNrtVZ] [-C Aclmop]",
+ "[-d file [-f] [-s database]] [-h home] [-M Ahlm] [-P password]");
+ return (EXIT_FAILURE);
+}
+
+int
+db_stat_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/libdb/build_vxworks/db_stat/db_stat.wpj b/libdb/build_vxworks/db_stat/db_stat.wpj
new file mode 100755
index 0000000..ba78c4c
--- /dev/null
+++ b/libdb/build_vxworks/db_stat/db_stat.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_stat.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_stat.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_stat.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_stat.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_stat.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_stat.c_objects
+db_stat.o
+<END>
+
+<BEGIN> FILE_db_stat.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_stat.c
+<END>
+
+<BEGIN> userComments
+db_stat
+<END>
diff --git a/libdb/build_vxworks/db_stat/db_stat/Makefile.custom b/libdb/build_vxworks/db_stat/db_stat/Makefile.custom
new file mode 100644
index 0000000..ca781f7
--- /dev/null
+++ b/libdb/build_vxworks/db_stat/db_stat/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/libdb/build_vxworks/db_stat/db_stat/component.cdf b/libdb/build_vxworks/db_stat/db_stat/component.cdf
new file mode 100755
index 0000000..728544e
--- /dev/null
+++ b/libdb/build_vxworks/db_stat/db_stat/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_STAT {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_stat.o
+ NAME db_stat
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_stat.o {
+
+ NAME db_stat.o
+ SRC_PATH_NAME $PRJ_DIR/../db_stat.c
+}
+
+/* Parameter information */
+
diff --git a/libdb/build_vxworks/db_stat/db_stat/component.wpj b/libdb/build_vxworks/db_stat/db_stat/component.wpj
new file mode 100755
index 0000000..2020d71
--- /dev/null
+++ b/libdb/build_vxworks/db_stat/db_stat/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_objects
+db_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_objects
+db_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_stat.c_objects
+db_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_objects
+db_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_stat.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/libdb/build_vxworks/db_upgrade/db_upgrade.c b/libdb/build_vxworks/db_upgrade/db_upgrade.c
new file mode 100644
index 0000000..b3fc3cc
--- /dev/null
+++ b/libdb/build_vxworks/db_upgrade/db_upgrade.c
@@ -0,0 +1,205 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+int db_upgrade_main __P((int, char *[]));
+int db_upgrade_usage __P((void));
+int db_upgrade_version_check __P((const char *));
+
+int
+db_upgrade(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_upgrade", args, &argc, &argv);
+ return (db_upgrade_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_upgrade_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_upgrade";
+ DB *dbp;
+ DB_ENV *dbenv;
+ u_int32_t flags;
+ int ch, e_close, exitval, nflag, ret, t_ret;
+ char *home, *passwd;
+
+ if ((ret = db_upgrade_version_check(progname)) != 0)
+ return (ret);
+
+ dbenv = NULL;
+ flags = nflag = 0;
+ e_close = exitval = 0;
+ home = passwd = NULL;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "h:NP:sV")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 's':
+ LF_SET(DB_DUPSORT);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (db_upgrade_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc <= 0)
+ return (db_upgrade_usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: db_env_create: %s\n",
+ progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+
+ /*
+ * If attaching to a pre-existing environment fails, create a
+ * private one and try again.
+ */
+ if ((ret = dbenv->open(dbenv,
+ home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ for (; !__db_util_interrupted() && argv[0] != NULL; ++argv) {
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ dbp->set_errfile(dbp, stderr);
+ dbp->set_errpfx(dbp, progname);
+ if ((ret = dbp->upgrade(dbp, argv[0], flags)) != 0)
+ dbp->err(dbp, ret, "DB->upgrade: %s", argv[0]);
+ if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0) {
+ dbenv->err(dbenv, ret, "DB->close: %s", argv[0]);
+ ret = t_ret;
+ }
+ if (ret != 0)
+ goto shutdown;
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+db_upgrade_usage()
+{
+ fprintf(stderr, "%s\n",
+ "usage: db_upgrade [-NsV] [-h home] [-P password] db_file ...");
+ return (EXIT_FAILURE);
+}
+
+int
+db_upgrade_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/libdb/build_vxworks/db_upgrade/db_upgrade.wpj b/libdb/build_vxworks/db_upgrade/db_upgrade.wpj
new file mode 100755
index 0000000..65f834d
--- /dev/null
+++ b/libdb/build_vxworks/db_upgrade/db_upgrade.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_upgrade.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_upgrade.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_upgrade.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_upgrade.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_upgrade.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_upgrade.c_objects
+db_upgrade.o
+<END>
+
+<BEGIN> FILE_db_upgrade.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_upgrade.c
+<END>
+
+<BEGIN> userComments
+db_upgrade
+<END>
diff --git a/libdb/build_vxworks/db_upgrade/db_upgrade/Makefile.custom b/libdb/build_vxworks/db_upgrade/db_upgrade/Makefile.custom
new file mode 100644
index 0000000..ca781f7
--- /dev/null
+++ b/libdb/build_vxworks/db_upgrade/db_upgrade/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/libdb/build_vxworks/db_upgrade/db_upgrade/component.cdf b/libdb/build_vxworks/db_upgrade/db_upgrade/component.cdf
new file mode 100755
index 0000000..7bbdebd
--- /dev/null
+++ b/libdb/build_vxworks/db_upgrade/db_upgrade/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_UPGRADE {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_upgrade.o
+ NAME db_upgrade
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_upgrade.o {
+
+ NAME db_upgrade.o
+ SRC_PATH_NAME $PRJ_DIR/../db_upgrade.c
+}
+
+/* Parameter information */
+
diff --git a/libdb/build_vxworks/db_upgrade/db_upgrade/component.wpj b/libdb/build_vxworks/db_upgrade/db_upgrade/component.wpj
new file mode 100755
index 0000000..1cc5f30
--- /dev/null
+++ b/libdb/build_vxworks/db_upgrade/db_upgrade/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_objects
+db_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_objects
+db_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_upgrade.c_objects
+db_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_objects
+db_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_upgrade.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/libdb/build_vxworks/db_verify/db_verify.c b/libdb/build_vxworks/db_verify/db_verify.c
new file mode 100644
index 0000000..3f2fe4e
--- /dev/null
+++ b/libdb/build_vxworks/db_verify/db_verify.c
@@ -0,0 +1,263 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+int db_verify_main __P((int, char *[]));
+int db_verify_usage __P((void));
+int db_verify_version_check __P((const char *));
+
+int
+db_verify(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_verify", args, &argc, &argv);
+ return (db_verify_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_verify_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_verify";
+ DB *dbp, *dbp1;
+ DB_ENV *dbenv;
+ u_int32_t cache;
+ int ch, d_close, e_close, exitval, nflag, oflag, private;
+ int quiet, resize, ret, t_ret;
+ char *home, *passwd;
+
+ if ((ret = db_verify_version_check(progname)) != 0)
+ return (ret);
+
+ dbenv = NULL;
+ cache = MEGABYTE;
+ d_close = e_close = exitval = nflag = oflag = quiet = 0;
+ home = passwd = NULL;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "h:NoP:qV")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'o':
+ oflag = 1;
+ break;
+ case 'q':
+ quiet = 1;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (db_verify_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc <= 0)
+ return (db_verify_usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+retry: if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ if (!quiet) {
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ }
+
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ if (passwd != NULL &&
+ (ret = dbenv->set_encrypt(dbenv, passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+ /*
+ * Attach to an mpool if it exists, but if that fails, attach to a
+ * private region. In the latter case, declare a reasonably large
+ * cache so that we don't fail when verifying large databases.
+ */
+ private = 0;
+ if ((ret =
+ dbenv->open(dbenv, home, DB_INIT_MPOOL | DB_USE_ENVIRON, 0)) != 0) {
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ goto shutdown;
+ }
+ private = 1;
+ if ((ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+ }
+
+ for (; !__db_util_interrupted() && argv[0] != NULL; ++argv) {
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "%s: db_create", progname);
+ goto shutdown;
+ }
+ d_close = 1;
+
+ /*
+ * We create a 2nd dbp to this database to get its pagesize
+ * because the dbp we're using for verify cannot be opened.
+ */
+ if (private) {
+ if ((ret = db_create(&dbp1, dbenv, 0)) != 0) {
+ dbenv->err(
+ dbenv, ret, "%s: db_create", progname);
+ goto shutdown;
+ }
+
+ if ((ret = dbp1->open(dbp1, NULL,
+ argv[0], NULL, DB_UNKNOWN, DB_RDONLY, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->open: %s", argv[0]);
+ (void)dbp1->close(dbp1, 0);
+ goto shutdown;
+ }
+ /*
+ * If we get here, we can check the cache/page.
+ * !!!
+ * If we have to retry with an env with a larger
+ * cache, we jump out of this loop. However, we
+ * will still be working on the same argv when we
+ * get back into the for-loop.
+ */
+ ret = __db_util_cache(dbenv, dbp1, &cache, &resize);
+ (void)dbp1->close(dbp1, 0);
+ if (ret != 0)
+ goto shutdown;
+
+ if (resize) {
+ (void)dbp->close(dbp, 0);
+ d_close = 0;
+
+ (void)dbenv->close(dbenv, 0);
+ e_close = 0;
+ goto retry;
+ }
+ }
+ if ((ret = dbp->verify(dbp,
+ argv[0], NULL, NULL, oflag ? DB_NOORDERCHK : 0)) != 0)
+ dbp->err(dbp, ret, "DB->verify: %s", argv[0]);
+ if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0) {
+ dbenv->err(dbenv, ret, "DB->close: %s", argv[0]);
+ ret = t_ret;
+ }
+ d_close = 0;
+ if (ret != 0)
+ goto shutdown;
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ if (d_close && (ret = dbp->close(dbp, 0)) != 0) {
+ exitval = 1;
+ dbenv->err(dbenv, ret, "close");
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+db_verify_usage()
+{
+ fprintf(stderr, "%s\n",
+ "usage: db_verify [-NoqV] [-h home] [-P password] db_file ...");
+ return (EXIT_FAILURE);
+}
+
+int
+db_verify_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/libdb/build_vxworks/db_verify/db_verify.wpj b/libdb/build_vxworks/db_verify/db_verify.wpj
new file mode 100755
index 0000000..d807c98
--- /dev/null
+++ b/libdb/build_vxworks/db_verify/db_verify.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_verify.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_verify.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_verify.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_verify.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_verify.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_verify.c_objects
+db_verify.o
+<END>
+
+<BEGIN> FILE_db_verify.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_verify.c
+<END>
+
+<BEGIN> userComments
+db_verify
+<END>
diff --git a/libdb/build_vxworks/db_verify/db_verify/Makefile.custom b/libdb/build_vxworks/db_verify/db_verify/Makefile.custom
new file mode 100644
index 0000000..ca781f7
--- /dev/null
+++ b/libdb/build_vxworks/db_verify/db_verify/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/libdb/build_vxworks/db_verify/db_verify/component.cdf b/libdb/build_vxworks/db_verify/db_verify/component.cdf
new file mode 100755
index 0000000..f29f824
--- /dev/null
+++ b/libdb/build_vxworks/db_verify/db_verify/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_VERIFY {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_verify.o
+ NAME db_verify
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_verify.o {
+
+ NAME db_verify.o
+ SRC_PATH_NAME $PRJ_DIR/../db_verify.c
+}
+
+/* Parameter information */
+
diff --git a/libdb/build_vxworks/db_verify/db_verify/component.wpj b/libdb/build_vxworks/db_verify/db_verify/component.wpj
new file mode 100755
index 0000000..aca3ae8
--- /dev/null
+++ b/libdb/build_vxworks/db_verify/db_verify/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_objects
+db_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_objects
+db_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_verify.c_objects
+db_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_objects
+db_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_verify.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/libdb/build_vxworks/dbdemo/README b/libdb/build_vxworks/dbdemo/README
new file mode 100644
index 0000000..1a2c7c7
--- /dev/null
+++ b/libdb/build_vxworks/dbdemo/README
@@ -0,0 +1,39 @@
+This README describes the steps needed to run a demo example of BerkeleyDB.
+
+1. Read the pages in the Reference Guide that describe building
+ BerkeleyDB on VxWorks:
+
+ $(WIND_BASE)/target/src/BerkeleyDB/docs/ref/build_vxworks/intro.html
+ $(WIND_BASE)/target/src/BerkeleyDB/docs/ref/build_vxworks/notes.html
+ $(WIND_BASE)/target/src/BerkeleyDB/docs/ref/build_vxworks/faq.html
+
+2. Launch Tornado 2.0 and open up the BerkeleyDB project.
+
+3. Add the demo project to that workspace:
+
+ $(WIND_BASE)/target/src/BerkeleyDB/build_vxworks/demo/dbdemo.wpj
+
+4. Build BerkeleyDB as described in the Reference Guide.
+
+5. Build the dbdemo project.
+
+6. Download BerkeleyDB onto the target.
+
+7. Download the dbdemo project onto the target.
+
+8. Open a windsh to the target and run the demo:
+
+ -> dbdemo "<pathname>/<dbname>"
+
+ Where pathname is a pathname string pointing to a directory that the
+ demo can create a database in. That directory should already exist.
+ The dbname is the name for the database. For example:
+
+ -> dbdemo "/tmp/demo.db"
+
+9. The demo program will ask for input. You can type in any string.
+ The program will add an entry to the database with that string as
+ the key and the reverse of that string as the data item for that key.
+ It will continue asking for input until you hit ^D or enter "quit".
+ Upon doing so, the demo program will display all the keys you have
+ entered as input and their data items.
diff --git a/libdb/build_vxworks/dbdemo/dbdemo.c b/libdb/build_vxworks/dbdemo/dbdemo.c
new file mode 100644
index 0000000..887f71c
--- /dev/null
+++ b/libdb/build_vxworks/dbdemo/dbdemo.c
@@ -0,0 +1,178 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef _WIN32
+extern int getopt(int, char * const *, const char *);
+#else
+#include <unistd.h>
+#endif
+
+#include <db_config.h>
+#include <db_int.h>
+
+#define DATABASE "access.db"
+int dbdemo_main __P((int, char *[]));
+int dbdemo_usage __P((void));
+
+int
+dbdemo(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("dbdemo", args, &argc, &argv);
+ return (dbdemo_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+dbdemo_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern int optind, __db_getopt_reset;
+ DB *dbp;
+ DBC *dbcp;
+ DBT key, data;
+ u_int32_t len;
+ int ch, ret, rflag;
+ char *database, *p, *t, buf[1024], rbuf[1024];
+ const char *progname = "dbdemo"; /* Program name. */
+
+ rflag = 0;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "r")) != EOF)
+ switch (ch) {
+ case 'r':
+ rflag = 1;
+ break;
+ case '?':
+ default:
+ return (dbdemo_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ /* Accept optional database name. */
+ database = *argv == NULL ? DATABASE : argv[0];
+
+ /* Optionally discard the database. */
+ if (rflag)
+ (void)remove(database);
+
+ /* Create and initialize database object, open the database. */
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_create: %s\n", progname, db_strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ dbp->set_errfile(dbp, stderr);
+ dbp->set_errpfx(dbp, progname);
+ if ((ret = dbp->set_pagesize(dbp, 1024)) != 0) {
+ dbp->err(dbp, ret, "set_pagesize");
+ goto err1;
+ }
+ if ((ret = dbp->set_cachesize(dbp, 0, 32 * 1024, 0)) != 0) {
+ dbp->err(dbp, ret, "set_cachesize");
+ goto err1;
+ }
+ if ((ret = dbp->open(dbp,
+ NULL, database, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp->err(dbp, ret, "%s: open", database);
+ goto err1;
+ }
+
+ /*
+ * Insert records into the database, where the key is the user
+ * input and the data is the user input in reverse order.
+ */
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ for (;;) {
+ printf("input> ");
+ fflush(stdout);
+ if (fgets(buf, sizeof(buf), stdin) == NULL)
+ break;
+ if (strcmp(buf, "exit\n") == 0 || strcmp(buf, "quit\n") == 0)
+ break;
+ if ((len = strlen(buf)) <= 1)
+ continue;
+ for (t = rbuf, p = buf + (len - 2); p >= buf;)
+ *t++ = *p--;
+ *t++ = '\0';
+
+ key.data = buf;
+ data.data = rbuf;
+ data.size = key.size = len - 1;
+
+ switch (ret =
+ dbp->put(dbp, NULL, &key, &data, DB_NOOVERWRITE)) {
+ case 0:
+ break;
+ default:
+ dbp->err(dbp, ret, "DB->put");
+ if (ret != DB_KEYEXIST)
+ goto err1;
+ break;
+ }
+ }
+ printf("\n");
+
+ /* Acquire a cursor for the database. */
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->cursor");
+ goto err1;
+ }
+
+ /* Initialize the key/data pair so the flags aren't set. */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Walk through the database and print out the key/data pairs. */
+ while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0)
+ printf("%.*s : %.*s\n",
+ (int)key.size, (char *)key.data,
+ (int)data.size, (char *)data.data);
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ goto err2;
+ }
+
+ /* Close everything down. */
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ goto err1;
+ }
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ fprintf(stderr,
+ "%s: DB->close: %s\n", progname, db_strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ return (EXIT_SUCCESS);
+
+err2: (void)dbcp->c_close(dbcp);
+err1: (void)dbp->close(dbp, 0);
+ return (EXIT_FAILURE);
+}
+
+int
+dbdemo_usage()
+{
+ (void)fprintf(stderr, "usage: ex_access [-r] [database]\n");
+ return (EXIT_FAILURE);
+}
diff --git a/libdb/build_vxworks/dbdemo/dbdemo.wpj b/libdb/build_vxworks/dbdemo/dbdemo.wpj
new file mode 100755
index 0000000..52eec5e
--- /dev/null
+++ b/libdb/build_vxworks/dbdemo/dbdemo.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+dbdemo.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/dbdemo.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_dbdemo.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_dbdemo.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_dbdemo.c_dependencies
+
+<END>
+
+<BEGIN> FILE_dbdemo.c_objects
+dbdemo.o
+<END>
+
+<BEGIN> FILE_dbdemo.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/dbdemo.c
+<END>
+
+<BEGIN> userComments
+dbdemo
+<END>
diff --git a/libdb/build_vxworks/dbdemo/dbdemo/Makefile.custom b/libdb/build_vxworks/dbdemo/dbdemo/Makefile.custom
new file mode 100644
index 0000000..ca781f7
--- /dev/null
+++ b/libdb/build_vxworks/dbdemo/dbdemo/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/libdb/build_vxworks/dbdemo/dbdemo/component.cdf b/libdb/build_vxworks/dbdemo/dbdemo/component.cdf
new file mode 100755
index 0000000..188b63b
--- /dev/null
+++ b/libdb/build_vxworks/dbdemo/dbdemo/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DBDEMO {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES dbdemo.o
+ NAME dbdemo
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module dbdemo.o {
+
+ NAME dbdemo.o
+ SRC_PATH_NAME $PRJ_DIR/../dbdemo.c
+}
+
+/* Parameter information */
+
diff --git a/libdb/build_vxworks/dbdemo/dbdemo/component.wpj b/libdb/build_vxworks/dbdemo/dbdemo/component.wpj
new file mode 100755
index 0000000..b51ebce
--- /dev/null
+++ b/libdb/build_vxworks/dbdemo/dbdemo/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_objects
+dbdemo.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_objects
+dbdemo.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../dbdemo.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../dbdemo.c_objects
+dbdemo.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../dbdemo.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_objects
+dbdemo.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../dbdemo.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/libdb/build_win32/Berkeley_DB.dsw b/libdb/build_win32/Berkeley_DB.dsw
new file mode 100644
index 0000000..95670a3
--- /dev/null
+++ b/libdb/build_win32/Berkeley_DB.dsw
@@ -0,0 +1,540 @@
+Microsoft Developer Studio Workspace File, Format Version 6.00
+# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE!
+
+###############################################################################
+
+Project: "build_all"=.\build_all.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_archive
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_checkpoint
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_deadlock
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_dump
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_load
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_printlog
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_recover
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_stat
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_upgrade
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_verify
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name ex_access
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name ex_btrec
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name ex_env
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name ex_lock
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name ex_mpool
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name ex_tpcb
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name excxx_access
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name excxx_btrec
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name excxx_env
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name excxx_lock
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name excxx_mpool
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name excxx_tpcb
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+ Begin Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_archive"=.\db_archive.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_checkpoint"=.\db_checkpoint.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_deadlock"=.\db_deadlock.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_dll"=.\db_dll.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+}}}
+
+###############################################################################
+
+Project: "db_dump"=.\db_dump.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_java"=.\db_java.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_lib"=.\db_lib.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_dll
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_load"=.\db_load.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_perf"=.\db_perf.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_printlog"=.\db_printlog.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_recover"=.\db_recover.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_stat"=.\db_stat.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_static"=.\db_static.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+}}}
+
+###############################################################################
+
+Project: "db_tcl"=.\db_tcl.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_test"=.\db_test.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name build_all
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_tcl
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_upgrade"=.\db_upgrade.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_verify"=.\db_verify.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "ex_access"=.\ex_access.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "ex_btrec"=.\ex_btrec.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "ex_env"=.\ex_env.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "ex_lock"=.\ex_lock.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "ex_mpool"=.\ex_mpool.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "ex_tpcb"=.\ex_tpcb.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "excxx_access"=.\excxx_access.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "excxx_btrec"=.\excxx_btrec.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "excxx_env"=.\excxx_env.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "excxx_lock"=.\excxx_lock.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "excxx_mpool"=.\excxx_mpool.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "excxx_tpcb"=.\excxx_tpcb.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Global:
+
+Package=<5>
+{{{
+}}}
+
+Package=<3>
+{{{
+}}}
+
+###############################################################################
+
diff --git a/libdb/build_win32/app_dsp.src b/libdb/build_win32/app_dsp.src
new file mode 100644
index 0000000..34f80a3
--- /dev/null
+++ b/libdb/build_win32/app_dsp.src
@@ -0,0 +1,145 @@
+# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=@project_name@ - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "@project_name@ - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "@project_name@ - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "@project_name@ - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "@project_name@ - Win32 Release"
+# Name "@project_name@ - Win32 Debug"
+# Name "@project_name@ - Win32 Release Static"
+# Name "@project_name@ - Win32 Debug Static"
+@SOURCE_FILES@
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/build_all.dsp b/libdb/build_win32/build_all.dsp
new file mode 100644
index 0000000..7ae1f9b
--- /dev/null
+++ b/libdb/build_win32/build_all.dsp
@@ -0,0 +1,96 @@
+# Microsoft Developer Studio Project File - Name="build_all" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Generic Project" 0x010a
+
+CFG=build_all - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "build_all.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "build_all.mak" CFG="build_all - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "build_all - Win32 Release" (based on "Win32 (x86) External Target")
+!MESSAGE "build_all - Win32 Debug" (based on "Win32 (x86) External Target")
+!MESSAGE "build_all - Win32 Release Static" (based on "Win32 (x86) External Target")
+!MESSAGE "build_all - Win32 Debug Static" (based on "Win32 (x86) External Target")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+
+!IF "$(CFG)" == "build_all - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Cmd_Line "echo DB Release version built."
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "build_all - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Cmd_Line "echo DB Debug version built."
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "build_all - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release_static"
+# PROP BASE Intermediate_Dir "Release_static"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Cmd_Line "echo DB Release Static version built."
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "build_all - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug_static"
+# PROP BASE Intermediate_Dir "Debug_Static"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_Static"
+# PROP Cmd_Line "echo DB Debug Static version built."
+# PROP Target_Dir ""
+
+!ENDIF
+
+# Begin Target
+
+# Name "build_all - Win32 Release"
+# Name "build_all - Win32 Debug"
+# Name "build_all - Win32 Release Static"
+# Name "build_all - Win32 Debug Static"
+# End Target
+# End Project
diff --git a/libdb/build_win32/db.h b/libdb/build_win32/db.h
new file mode 100644
index 0000000..5531f2e
--- /dev/null
+++ b/libdb/build_win32/db.h
@@ -0,0 +1,1956 @@
+/* DO NOT EDIT: automatically built by dist/s_win32. */
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ *
+ * db.h include file layout:
+ * General.
+ * Database Environment.
+ * Locking subsystem.
+ * Logging subsystem.
+ * Shared buffer cache (mpool) subsystem.
+ * Transaction subsystem.
+ * Access methods.
+ * Access method cursors.
+ * Dbm/Ndbm, Hsearch historic interfaces.
+ */
+
+#ifndef _DB_H_
+#define _DB_H_
+
+#ifndef __NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * XXX
+ * Handle function prototypes and the keyword "const". This steps on name
+ * space that DB doesn't control, but all of the other solutions are worse.
+ *
+ * XXX
+ * While Microsoft's compiler is ANSI C compliant, it doesn't have _STDC_
+ * defined by default, you specify a command line flag or #pragma to turn
+ * it on. Don't do that, however, because some of Microsoft's own header
+ * files won't compile.
+ */
+#undef __P
+#if defined(__STDC__) || defined(__cplusplus) || defined(_MSC_VER)
+#define __P(protos) protos /* ANSI C prototypes */
+#else
+#define const
+#define __P(protos) () /* K&R C preprocessor */
+#endif
+
+/*
+ * Berkeley DB version information.
+ */
+#define DB_VERSION_MAJOR 4
+#define DB_VERSION_MINOR 1
+#define DB_VERSION_PATCH 25
+#define DB_VERSION_STRING "Sleepycat Software: Berkeley DB 4.1.25: (December 19, 2002)"
+
+/*
+ * !!!
+ * Berkeley DB uses specifically sized types. If they're not provided by
+ * the system, typedef them here.
+ *
+ * We protect them against multiple inclusion using __BIT_TYPES_DEFINED__,
+ * as does BIND and Kerberos, since we don't know for sure what #include
+ * files the user is using.
+ *
+ * !!!
+ * We also provide the standard u_int, u_long etc., if they're not provided
+ * by the system.
+ */
+#ifndef __BIT_TYPES_DEFINED__
+#define __BIT_TYPES_DEFINED__
+typedef unsigned char u_int8_t;
+typedef short int16_t;
+typedef unsigned short u_int16_t;
+typedef int int32_t;
+typedef unsigned int u_int32_t;
+#endif
+
+#if !defined(_WINSOCKAPI_)
+typedef unsigned char u_char;
+typedef unsigned short u_short;
+typedef unsigned int u_int;
+typedef unsigned long u_long;
+#endif
+#if defined(_WIN64)
+typedef __int64 ssize_t;
+#else
+typedef int ssize_t;
+#endif
+
+/* Basic types that are exported or quasi-exported. */
+typedef u_int32_t db_pgno_t; /* Page number type. */
+typedef u_int16_t db_indx_t; /* Page offset type. */
+#define DB_MAX_PAGES 0xffffffff /* >= # of pages in a file */
+
+typedef u_int32_t db_recno_t; /* Record number type. */
+#define DB_MAX_RECORDS 0xffffffff /* >= # of records in a tree */
+
+typedef u_int32_t db_timeout_t; /* Type of a timeout. */
+
+/*
+ * Region offsets are currently limited to 32-bits. I expect that's going
+ * to have to be fixed in the not-too-distant future, since we won't want to
+ * split 100Gb memory pools into that many different regions.
+ */
+typedef u_int32_t roff_t;
+
+/*
+ * Forward structure declarations, so we can declare pointers and
+ * applications can get type checking.
+ */
+struct __db; typedef struct __db DB;
+struct __db_bt_stat; typedef struct __db_bt_stat DB_BTREE_STAT;
+struct __db_cipher; typedef struct __db_cipher DB_CIPHER;
+struct __db_dbt; typedef struct __db_dbt DBT;
+struct __db_env; typedef struct __db_env DB_ENV;
+struct __db_h_stat; typedef struct __db_h_stat DB_HASH_STAT;
+struct __db_ilock; typedef struct __db_ilock DB_LOCK_ILOCK;
+struct __db_lock_stat; typedef struct __db_lock_stat DB_LOCK_STAT;
+struct __db_lock_u; typedef struct __db_lock_u DB_LOCK;
+struct __db_lockreq; typedef struct __db_lockreq DB_LOCKREQ;
+struct __db_log_cursor; typedef struct __db_log_cursor DB_LOGC;
+struct __db_log_stat; typedef struct __db_log_stat DB_LOG_STAT;
+struct __db_lsn; typedef struct __db_lsn DB_LSN;
+struct __db_mpool; typedef struct __db_mpool DB_MPOOL;
+struct __db_mpool_fstat;typedef struct __db_mpool_fstat DB_MPOOL_FSTAT;
+struct __db_mpool_stat; typedef struct __db_mpool_stat DB_MPOOL_STAT;
+struct __db_mpoolfile; typedef struct __db_mpoolfile DB_MPOOLFILE;
+struct __db_preplist; typedef struct __db_preplist DB_PREPLIST;
+struct __db_qam_stat; typedef struct __db_qam_stat DB_QUEUE_STAT;
+struct __db_rep; typedef struct __db_rep DB_REP;
+struct __db_rep_stat; typedef struct __db_rep_stat DB_REP_STAT;
+struct __db_txn; typedef struct __db_txn DB_TXN;
+struct __db_txn_active; typedef struct __db_txn_active DB_TXN_ACTIVE;
+struct __db_txn_stat; typedef struct __db_txn_stat DB_TXN_STAT;
+struct __db_txnmgr; typedef struct __db_txnmgr DB_TXNMGR;
+struct __dbc; typedef struct __dbc DBC;
+struct __dbc_internal; typedef struct __dbc_internal DBC_INTERNAL;
+struct __fh_t; typedef struct __fh_t DB_FH;
+struct __fname; typedef struct __fname FNAME;
+struct __key_range; typedef struct __key_range DB_KEY_RANGE;
+struct __mpoolfile; typedef struct __mpoolfile MPOOLFILE;
+struct __mutex_t; typedef struct __mutex_t DB_MUTEX;
+
+/* Key/data structure -- a Data-Base Thang. */
+struct __db_dbt {
+ /*
+ * data/size must be fields 1 and 2 for DB 1.85 compatibility.
+ */
+ void *data; /* Key/data */
+ u_int32_t size; /* key/data length */
+
+ u_int32_t ulen; /* RO: length of user buffer. */
+ u_int32_t dlen; /* RO: get/put record length. */
+ u_int32_t doff; /* RO: get/put record offset. */
+
+#define DB_DBT_APPMALLOC 0x001 /* Callback allocated memory. */
+#define DB_DBT_ISSET 0x002 /* Lower level calls set value. */
+#define DB_DBT_MALLOC 0x004 /* Return in malloc'd memory. */
+#define DB_DBT_PARTIAL 0x008 /* Partial put/get. */
+#define DB_DBT_REALLOC 0x010 /* Return in realloc'd memory. */
+#define DB_DBT_USERMEM 0x020 /* Return in user's memory. */
+#define DB_DBT_DUPOK 0x040 /* Insert if duplicate. */
+ u_int32_t flags;
+};
+
+/*
+ * Common flags --
+ * Interfaces which use any of these common flags should never have
+ * interface specific flags in this range.
+ */
+#define DB_CREATE 0x000001 /* Create file as necessary. */
+#define DB_CXX_NO_EXCEPTIONS 0x000002 /* C++: return error values. */
+#define DB_FORCE 0x000004 /* Force (anything). */
+#define DB_NOMMAP 0x000008 /* Don't mmap underlying file. */
+#define DB_RDONLY 0x000010 /* Read-only (O_RDONLY). */
+#define DB_RECOVER 0x000020 /* Run normal recovery. */
+#define DB_THREAD 0x000040 /* Applications are threaded. */
+#define DB_TRUNCATE 0x000080 /* Discard existing DB (O_TRUNC). */
+#define DB_TXN_NOSYNC 0x000100 /* Do not sync log on commit. */
+#define DB_USE_ENVIRON 0x000200 /* Use the environment. */
+#define DB_USE_ENVIRON_ROOT 0x000400 /* Use the environment if root. */
+
+/*
+ * Common flags --
+ * Interfaces which use any of these common flags should never have
+ * interface specific flags in this range.
+ *
+ * DB_AUTO_COMMIT:
+ * DB_ENV->set_flags, DB->associate, DB->del, DB->put, DB->open,
+ * DB->remove, DB->rename, DB->truncate
+ * DB_DIRTY_READ:
+ * DB->cursor, DB->get, DB->join, DB->open, DBcursor->c_get,
+ * DB_ENV->txn_begin
+ *
+ * Shared flags up to 0x000400 */
+#define DB_AUTO_COMMIT 0x00800000 /* Implied transaction. */
+#define DB_DIRTY_READ 0x01000000 /* Dirty Read. */
+
+/*
+ * Flags private to db_env_create.
+ */
+#define DB_CLIENT 0x000001 /* Open for a client environment. */
+
+/*
+ * Flags private to db_create.
+ */
+#define DB_XA_CREATE 0x000001 /* Open in an XA environment. */
+
+/*
+ * Flags private to DB_ENV->open.
+ * Shared flags up to 0x000400 */
+#define DB_INIT_CDB 0x000800 /* Concurrent Access Methods. */
+#define DB_INIT_LOCK 0x001000 /* Initialize locking. */
+#define DB_INIT_LOG 0x002000 /* Initialize logging. */
+#define DB_INIT_MPOOL 0x004000 /* Initialize mpool. */
+#define DB_INIT_TXN 0x008000 /* Initialize transactions. */
+#define DB_JOINENV 0x010000 /* Initialize all subsystems present. */
+#define DB_LOCKDOWN 0x020000 /* Lock memory into physical core. */
+#define DB_PRIVATE 0x040000 /* DB_ENV is process local. */
+#define DB_RECOVER_FATAL 0x080000 /* Run catastrophic recovery. */
+#define DB_SYSTEM_MEM 0x100000 /* Use system-backed memory. */
+
+/*
+ * Flags private to DB->open.
+ * Shared flags up to 0x000400 */
+#define DB_EXCL 0x000800 /* Exclusive open (O_EXCL). */
+#define DB_FCNTL_LOCKING 0x001000 /* UNDOC: fcntl(2) locking. */
+#define DB_RDWRMASTER 0x002000 /* UNDOC: allow subdb master open R/W */
+#define DB_WRITEOPEN 0x004000 /* UNDOC: open with write lock. */
+
+/*
+ * Flags private to DB_ENV->txn_begin.
+ * Shared flags up to 0x000400 */
+#define DB_TXN_NOWAIT 0x000800 /* Do not wait for locks in this TXN. */
+#define DB_TXN_SYNC 0x001000 /* Always sync log on commit. */
+
+/*
+ * Flags private to DB_ENV->set_encrypt.
+ */
+#define DB_ENCRYPT_AES 0x000001 /* AES, assumes SHA1 checksum */
+
+/*
+ * Flags private to DB_ENV->set_flags.
+ * Shared flags up to 0x000400 */
+#define DB_CDB_ALLDB 0x000800 /* Set CDB locking per environment. */
+#define DB_DIRECT_DB 0x001000 /* Don't buffer databases in the OS. */
+#define DB_DIRECT_LOG 0x002000 /* Don't buffer log files in the OS. */
+#define DB_NOLOCKING 0x004000 /* Set locking/mutex behavior. */
+#define DB_NOPANIC 0x008000 /* Set panic state per DB_ENV. */
+#define DB_OVERWRITE 0x010000 /* Overwrite unlinked region files. */
+#define DB_PANIC_ENVIRONMENT 0x020000 /* Set panic state per environment. */
+#define DB_REGION_INIT 0x040000 /* Page-fault regions on open. */
+#define DB_TXN_WRITE_NOSYNC 0x080000 /* Write, don't sync, on txn commit. */
+#define DB_YIELDCPU 0x100000 /* Yield the CPU (a lot). */
+
+/*
+ * Flags private to DB->set_feedback's callback.
+ */
+#define DB_UPGRADE 0x000001 /* Upgrading. */
+#define DB_VERIFY 0x000002 /* Verifying. */
+
+/*
+ * Flags private to DB_MPOOLFILE->open.
+ * Shared flags up to 0x000400 */
+#define DB_DIRECT 0x000800 /* Don't buffer the file in the OS. */
+#define DB_EXTENT 0x001000 /* UNDOC: dealing with an extent. */
+#define DB_ODDFILESIZE 0x002000 /* Truncate file to N * pgsize. */
+
+/*
+ * Flags private to DB->set_flags.
+ */
+#define DB_CHKSUM_SHA1 0x000001 /* Use SHA1 checksumming */
+#define DB_DUP 0x000002 /* Btree, Hash: duplicate keys. */
+#define DB_DUPSORT 0x000004 /* Btree, Hash: duplicate keys. */
+#define DB_ENCRYPT 0x000008 /* Btree, Hash: duplicate keys. */
+#define DB_RECNUM 0x000010 /* Btree: record numbers. */
+#define DB_RENUMBER 0x000020 /* Recno: renumber on insert/delete. */
+#define DB_REVSPLITOFF 0x000040 /* Btree: turn off reverse splits. */
+#define DB_SNAPSHOT 0x000080 /* Recno: snapshot the input. */
+
+/*
+ * Flags private to the DB->stat methods.
+ */
+#define DB_STAT_CLEAR 0x000001 /* Clear stat after returning values. */
+
+/*
+ * Flags private to DB->join.
+ */
+#define DB_JOIN_NOSORT 0x000001 /* Don't try to optimize join. */
+
+/*
+ * Flags private to DB->verify.
+ */
+#define DB_AGGRESSIVE 0x000001 /* Salvage whatever could be data.*/
+#define DB_NOORDERCHK 0x000002 /* Skip sort order/hashing check. */
+#define DB_ORDERCHKONLY 0x000004 /* Only perform the order check. */
+#define DB_PR_PAGE 0x000008 /* Show page contents (-da). */
+#define DB_PR_RECOVERYTEST 0x000010 /* Recovery test (-dr). */
+#define DB_PRINTABLE 0x000020 /* Use printable format for salvage. */
+#define DB_SALVAGE 0x000040 /* Salvage what looks like data. */
+/*
+ * !!!
+ * These must not go over 0x8000, or they will collide with the flags
+ * used by __bam_vrfy_subtree.
+ */
+
+/*
+ * Flags private to DB->set_rep_transport's send callback.
+ */
+#define DB_REP_PERMANENT 0x0001 /* Important--app. may want to flush. */
+
+/*******************************************************
+ * Locking.
+ *******************************************************/
+#define DB_LOCKVERSION 1
+
+#define DB_FILE_ID_LEN 20 /* Unique file ID length. */
+
+/*
+ * Deadlock detector modes; used in the DB_ENV structure to configure the
+ * locking subsystem.
+ */
+#define DB_LOCK_NORUN 0
+#define DB_LOCK_DEFAULT 1 /* Default policy. */
+#define DB_LOCK_EXPIRE 2 /* Only expire locks, no detection. */
+#define DB_LOCK_MAXLOCKS 3 /* Abort txn with maximum # of locks. */
+#define DB_LOCK_MINLOCKS 4 /* Abort txn with minimum # of locks. */
+#define DB_LOCK_MINWRITE 5 /* Abort txn with minimum writelocks. */
+#define DB_LOCK_OLDEST 6 /* Abort oldest transaction. */
+#define DB_LOCK_RANDOM 7 /* Abort random transaction. */
+#define DB_LOCK_YOUNGEST 8 /* Abort youngest transaction. */
+
+/* Flag values for lock_vec(), lock_get(). */
+#define DB_LOCK_FREE_LOCKER 0x001 /* Internal: Free locker as well. */
+#define DB_LOCK_NOWAIT 0x002 /* Don't wait on unavailable lock. */
+#define DB_LOCK_RECORD 0x004 /* Internal: record lock. */
+#define DB_LOCK_REMOVE 0x008 /* Internal: flag object removed. */
+#define DB_LOCK_SET_TIMEOUT 0x010 /* Internal: set lock timeout. */
+#define DB_LOCK_SWITCH 0x020 /* Internal: switch existing lock. */
+#define DB_LOCK_UPGRADE 0x040 /* Internal: upgrade existing lock. */
+
+/*
+ * Simple R/W lock modes and for multi-granularity intention locking.
+ *
+ * !!!
+ * These values are NOT random, as they are used as an index into the lock
+ * conflicts arrays, i.e., DB_LOCK_IWRITE must be == 3, and DB_LOCK_IREAD
+ * must be == 4.
+ */
+typedef enum {
+ DB_LOCK_NG=0, /* Not granted. */
+ DB_LOCK_READ=1, /* Shared/read. */
+ DB_LOCK_WRITE=2, /* Exclusive/write. */
+ DB_LOCK_WAIT=3, /* Wait for event */
+ DB_LOCK_IWRITE=4, /* Intent exclusive/write. */
+ DB_LOCK_IREAD=5, /* Intent to share/read. */
+ DB_LOCK_IWR=6, /* Intent to read and write. */
+ DB_LOCK_DIRTY=7, /* Dirty Read. */
+ DB_LOCK_WWRITE=8 /* Was Written. */
+} db_lockmode_t;
+
+/*
+ * Request types.
+ */
+typedef enum {
+ DB_LOCK_DUMP=0, /* Display held locks. */
+ DB_LOCK_GET=1, /* Get the lock. */
+ DB_LOCK_GET_TIMEOUT=2, /* Get lock with a timeout. */
+ DB_LOCK_INHERIT=3, /* Pass locks to parent. */
+ DB_LOCK_PUT=4, /* Release the lock. */
+ DB_LOCK_PUT_ALL=5, /* Release locker's locks. */
+ DB_LOCK_PUT_OBJ=6, /* Release locker's locks on obj. */
+ DB_LOCK_PUT_READ=7, /* Release locker's read locks. */
+ DB_LOCK_TIMEOUT=8, /* Force a txn to timeout. */
+ DB_LOCK_TRADE=9, /* Trade locker ids on a lock. */
+ DB_LOCK_UPGRADE_WRITE=10 /* Upgrade writes for dirty reads. */
+} db_lockop_t;
+
+/*
+ * Status of a lock.
+ */
+typedef enum {
+ DB_LSTAT_ABORTED=1, /* Lock belongs to an aborted txn. */
+ DB_LSTAT_ERR=2, /* Lock is bad. */
+ DB_LSTAT_EXPIRED=3, /* Lock has expired. */
+ DB_LSTAT_FREE=4, /* Lock is unallocated. */
+ DB_LSTAT_HELD=5, /* Lock is currently held. */
+ DB_LSTAT_NOTEXIST=6, /* Object on which lock was waiting
+ * was removed */
+ DB_LSTAT_PENDING=7, /* Lock was waiting and has been
+ * promoted; waiting for the owner
+ * to run and upgrade it to held. */
+ DB_LSTAT_WAITING=8 /* Lock is on the wait queue. */
+}db_status_t;
+
+/* Lock statistics structure. */
+struct __db_lock_stat {
+ u_int32_t st_id; /* Last allocated locker ID. */
+ u_int32_t st_cur_maxid; /* Current maximum unused ID. */
+ u_int32_t st_maxlocks; /* Maximum number of locks in table. */
+ u_int32_t st_maxlockers; /* Maximum num of lockers in table. */
+ u_int32_t st_maxobjects; /* Maximum num of objects in table. */
+ u_int32_t st_nmodes; /* Number of lock modes. */
+ u_int32_t st_nlocks; /* Current number of locks. */
+ u_int32_t st_maxnlocks; /* Maximum number of locks so far. */
+ u_int32_t st_nlockers; /* Current number of lockers. */
+ u_int32_t st_maxnlockers; /* Maximum number of lockers so far. */
+ u_int32_t st_nobjects; /* Current number of objects. */
+ u_int32_t st_maxnobjects; /* Maximum number of objects so far. */
+ u_int32_t st_nconflicts; /* Number of lock conflicts. */
+ u_int32_t st_nrequests; /* Number of lock gets. */
+ u_int32_t st_nreleases; /* Number of lock puts. */
+ u_int32_t st_nnowaits; /* Number of requests that would have
+ waited, but NOWAIT was set. */
+ u_int32_t st_ndeadlocks; /* Number of lock deadlocks. */
+ db_timeout_t st_locktimeout; /* Lock timeout. */
+ u_int32_t st_nlocktimeouts; /* Number of lock timeouts. */
+ db_timeout_t st_txntimeout; /* Transaction timeout. */
+ u_int32_t st_ntxntimeouts; /* Number of transaction timeouts. */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted without wait. */
+ u_int32_t st_regsize; /* Region size. */
+};
+
+/*
+ * DB_LOCK_ILOCK --
+ * Internal DB access method lock.
+ */
+struct __db_ilock {
+ db_pgno_t pgno; /* Page being locked. */
+ u_int8_t fileid[DB_FILE_ID_LEN];/* File id. */
+#define DB_HANDLE_LOCK 1
+#define DB_RECORD_LOCK 2
+#define DB_PAGE_LOCK 3
+#define DB_TXN_LOCK 4
+ u_int32_t type; /* Type of lock. */
+};
+
+/*
+ * DB_LOCK --
+ * The structure is allocated by the caller and filled in during a
+ * lock_get request (or a lock_vec/DB_LOCK_GET).
+ */
+struct __db_lock_u {
+ size_t off; /* Offset of the lock in the region */
+ u_int32_t ndx; /* Index of the object referenced by
+ * this lock; used for locking. */
+ u_int32_t gen; /* Generation number of this lock. */
+ db_lockmode_t mode; /* mode of this lock. */
+};
+
+/* Lock request structure. */
+struct __db_lockreq {
+ db_lockop_t op; /* Operation. */
+ db_lockmode_t mode; /* Requested mode. */
+ db_timeout_t timeout; /* Time to expire lock. */
+ DBT *obj; /* Object being locked. */
+ DB_LOCK lock; /* Lock returned. */
+};
+
+/*******************************************************
+ * Logging.
+ *******************************************************/
+#define DB_LOGVERSION 7 /* Current log version. */
+#define DB_LOGOLDVER 7 /* Oldest log version supported. */
+#define DB_LOGMAGIC 0x040988
+
+/* Flag values for log_archive(). */
+#define DB_ARCH_ABS 0x001 /* Absolute pathnames. */
+#define DB_ARCH_DATA 0x002 /* Data files. */
+#define DB_ARCH_LOG 0x004 /* Log files. */
+
+/*
+ * A DB_LSN has two parts, a fileid which identifies a specific file, and an
+ * offset within that file. The fileid is an unsigned 4-byte quantity that
+ * uniquely identifies a file within the log directory -- currently a simple
+ * counter inside the log. The offset is also an unsigned 4-byte value. The
+ * log manager guarantees the offset is never more than 4 bytes by switching
+ * to a new log file before the maximum length imposed by an unsigned 4-byte
+ * offset is reached.
+ */
+struct __db_lsn {
+ u_int32_t file; /* File ID. */
+ u_int32_t offset; /* File offset. */
+};
+
+/*
+ * DB_LOGC --
+ * Log cursor.
+ */
+struct __db_log_cursor {
+ DB_ENV *dbenv; /* Enclosing dbenv. */
+
+ DB_FH *c_fh; /* File handle. */
+ DB_LSN c_lsn; /* Cursor: LSN */
+ u_int32_t c_len; /* Cursor: record length */
+ u_int32_t c_prev; /* Cursor: previous record's offset */
+
+ DBT c_dbt; /* Return DBT. */
+
+#define DB_LOGC_BUF_SIZE (32 * 1024)
+ u_int8_t *bp; /* Allocated read buffer. */
+ u_int32_t bp_size; /* Read buffer length in bytes. */
+ u_int32_t bp_rlen; /* Read buffer valid data length. */
+ DB_LSN bp_lsn; /* Read buffer first byte LSN. */
+
+ u_int32_t bp_maxrec; /* Max record length in the log file. */
+
+ /* Methods. */
+ int (*close) __P((DB_LOGC *, u_int32_t));
+ int (*get) __P((DB_LOGC *, DB_LSN *, DBT *, u_int32_t));
+
+#define DB_LOG_DISK 0x01 /* Log record came from disk. */
+#define DB_LOG_LOCKED 0x02 /* Log region already locked */
+#define DB_LOG_SILENT_ERR 0x04 /* Turn-off error messages. */
+ u_int32_t flags;
+};
+
+/* Log statistics structure. */
+struct __db_log_stat {
+ u_int32_t st_magic; /* Log file magic number. */
+ u_int32_t st_version; /* Log file version number. */
+ int st_mode; /* Log file mode. */
+ u_int32_t st_lg_bsize; /* Log buffer size. */
+ u_int32_t st_lg_size; /* Log file size. */
+ u_int32_t st_w_bytes; /* Bytes to log. */
+ u_int32_t st_w_mbytes; /* Megabytes to log. */
+ u_int32_t st_wc_bytes; /* Bytes to log since checkpoint. */
+ u_int32_t st_wc_mbytes; /* Megabytes to log since checkpoint. */
+ u_int32_t st_wcount; /* Total writes to the log. */
+ u_int32_t st_wcount_fill; /* Overflow writes to the log. */
+ u_int32_t st_scount; /* Total syncs to the log. */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted without wait. */
+ u_int32_t st_cur_file; /* Current log file number. */
+ u_int32_t st_cur_offset; /* Current log file offset. */
+ u_int32_t st_disk_file; /* Known on disk log file number. */
+ u_int32_t st_disk_offset; /* Known on disk log file offset. */
+ u_int32_t st_regsize; /* Region size. */
+ u_int32_t st_maxcommitperflush; /* Max number of commits in a flush. */
+ u_int32_t st_mincommitperflush; /* Min number of commits in a flush. */
+};
+
+/*******************************************************
+ * Shared buffer cache (mpool).
+ *******************************************************/
+/* Flag values for DB_MPOOLFILE->get. */
+#define DB_MPOOL_CREATE 0x001 /* Create a page. */
+#define DB_MPOOL_LAST 0x002 /* Return the last page. */
+#define DB_MPOOL_NEW 0x004 /* Create a new page. */
+
+/* Flag values for DB_MPOOLFILE->put, DB_MPOOLFILE->set. */
+#define DB_MPOOL_CLEAN 0x001 /* Page is not modified. */
+#define DB_MPOOL_DIRTY 0x002 /* Page is modified. */
+#define DB_MPOOL_DISCARD 0x004 /* Don't cache the page. */
+
+/* Priority values for DB_MPOOLFILE->set_priority. */
+typedef enum {
+ DB_PRIORITY_VERY_LOW=1,
+ DB_PRIORITY_LOW=2,
+ DB_PRIORITY_DEFAULT=3,
+ DB_PRIORITY_HIGH=4,
+ DB_PRIORITY_VERY_HIGH=5
+} DB_CACHE_PRIORITY;
+
+/* Per-process DB_MPOOLFILE information. */
+struct __db_mpoolfile {
+ /* These fields need to be protected for multi-threaded support. */
+ DB_MUTEX *mutexp; /* Structure thread lock. */
+
+ DB_FH *fhp; /* Underlying file handle. */
+
+ u_int32_t ref; /* Reference count. */
+
+ /*
+ * !!!
+ * The pinref and q fields are protected by the region lock, not the
+ * DB_MPOOLFILE structure mutex. We don't use the structure mutex
+ * because then I/O (which holds the structure lock held because of
+ * the race between the seek and write of the file descriptor) would
+ * block any other put/get calls using this DB_MPOOLFILE structure.
+ */
+ u_int32_t pinref; /* Pinned block reference count. */
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_mpoolfile) q;
+ */
+ struct {
+ struct __db_mpoolfile *tqe_next;
+ struct __db_mpoolfile **tqe_prev;
+ } q; /* Linked list of DB_MPOOLFILE's. */
+
+ /*
+ * These fields are not thread-protected because they are initialized
+ * when the file is opened and never modified.
+ */
+ int ftype; /* File type. */
+ DBT *pgcookie; /* Byte-string passed to pgin/pgout. */
+ u_int8_t *fileid; /* Unique file ID. */
+ int32_t lsn_offset; /* LSN offset in page. */
+ u_int32_t clear_len; /* Cleared length on created pages. */
+
+ DB_MPOOL *dbmp; /* Overlying DB_MPOOL. */
+ MPOOLFILE *mfp; /* Underlying MPOOLFILE. */
+
+ void *addr; /* Address of mmap'd region. */
+ size_t len; /* Length of mmap'd region. */
+
+ /* Methods. */
+ int (*close) __P((DB_MPOOLFILE *, u_int32_t));
+ int (*get) __P((DB_MPOOLFILE *, db_pgno_t *, u_int32_t, void *));
+ void (*get_fileid) __P((DB_MPOOLFILE *, u_int8_t *));
+ void (*last_pgno) __P((DB_MPOOLFILE *, db_pgno_t *));
+ int (*open)__P((DB_MPOOLFILE *, const char *, u_int32_t, int, size_t));
+ int (*put) __P((DB_MPOOLFILE *, void *, u_int32_t));
+ void (*refcnt) __P((DB_MPOOLFILE *, db_pgno_t *));
+ int (*set) __P((DB_MPOOLFILE *, void *, u_int32_t));
+ int (*set_clear_len) __P((DB_MPOOLFILE *, u_int32_t));
+ int (*set_fileid) __P((DB_MPOOLFILE *, u_int8_t *));
+ int (*set_ftype) __P((DB_MPOOLFILE *, int));
+ int (*set_lsn_offset) __P((DB_MPOOLFILE *, int32_t));
+ int (*set_pgcookie) __P((DB_MPOOLFILE *, DBT *));
+ int (*set_priority) __P((DB_MPOOLFILE *, DB_CACHE_PRIORITY));
+ void (*set_unlink) __P((DB_MPOOLFILE *, int));
+ int (*sync) __P((DB_MPOOLFILE *));
+
+ /*
+ * MP_OPEN_CALLED and MP_READONLY do not need to be thread protected
+ * because they are initialized when the file is opened, and never
+ * modified.
+ *
+ * MP_FLUSH, MP_UPGRADE and MP_UPGRADE_FAIL are thread protected
+ * becase they are potentially read by multiple threads of control.
+ */
+#define MP_FLUSH 0x001 /* Was opened to flush a buffer. */
+#define MP_OPEN_CALLED 0x002 /* File opened. */
+#define MP_READONLY 0x004 /* File is readonly. */
+#define MP_UPGRADE 0x008 /* File descriptor is readwrite. */
+#define MP_UPGRADE_FAIL 0x010 /* Upgrade wasn't possible. */
+ u_int32_t flags;
+};
+
+/*
+ * Mpool statistics structure.
+ */
+struct __db_mpool_stat {
+ u_int32_t st_gbytes; /* Total cache size: GB. */
+ u_int32_t st_bytes; /* Total cache size: B. */
+ u_int32_t st_ncache; /* Number of caches. */
+ u_int32_t st_regsize; /* Cache size. */
+ u_int32_t st_map; /* Pages from mapped files. */
+ u_int32_t st_cache_hit; /* Pages found in the cache. */
+ u_int32_t st_cache_miss; /* Pages not found in the cache. */
+ u_int32_t st_page_create; /* Pages created in the cache. */
+ u_int32_t st_page_in; /* Pages read in. */
+ u_int32_t st_page_out; /* Pages written out. */
+ u_int32_t st_ro_evict; /* Clean pages forced from the cache. */
+ u_int32_t st_rw_evict; /* Dirty pages forced from the cache. */
+ u_int32_t st_page_trickle; /* Pages written by memp_trickle. */
+ u_int32_t st_pages; /* Total number of pages. */
+ u_int32_t st_page_clean; /* Clean pages. */
+ u_int32_t st_page_dirty; /* Dirty pages. */
+ u_int32_t st_hash_buckets; /* Number of hash buckets. */
+ u_int32_t st_hash_searches; /* Total hash chain searches. */
+ u_int32_t st_hash_longest; /* Longest hash chain searched. */
+ u_int32_t st_hash_examined; /* Total hash entries searched. */
+ u_int32_t st_hash_nowait; /* Hash lock granted with nowait. */
+ u_int32_t st_hash_wait; /* Hash lock granted after wait. */
+ u_int32_t st_hash_max_wait; /* Max hash lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted with nowait. */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_alloc; /* Number of page allocations. */
+ u_int32_t st_alloc_buckets; /* Buckets checked during allocation. */
+ u_int32_t st_alloc_max_buckets; /* Max checked during allocation. */
+ u_int32_t st_alloc_pages; /* Pages checked during allocation. */
+ u_int32_t st_alloc_max_pages; /* Max checked during allocation. */
+};
+
+/* Mpool file statistics structure. */
+struct __db_mpool_fstat {
+ char *file_name; /* File name. */
+ size_t st_pagesize; /* Page size. */
+ u_int32_t st_map; /* Pages from mapped files. */
+ u_int32_t st_cache_hit; /* Pages found in the cache. */
+ u_int32_t st_cache_miss; /* Pages not found in the cache. */
+ u_int32_t st_page_create; /* Pages created in the cache. */
+ u_int32_t st_page_in; /* Pages read in. */
+ u_int32_t st_page_out; /* Pages written out. */
+};
+
+/*******************************************************
+ * Transactions and recovery.
+ *******************************************************/
+#define DB_TXNVERSION 1
+
+typedef enum {
+ DB_TXN_ABORT=0, /* Public. */
+ DB_TXN_APPLY=1, /* Public. */
+ DB_TXN_BACKWARD_ALLOC=2, /* Internal. */
+ DB_TXN_BACKWARD_ROLL=3, /* Public. */
+ DB_TXN_FORWARD_ROLL=4, /* Public. */
+ DB_TXN_GETPGNOS=5, /* Internal. */
+ DB_TXN_OPENFILES=6, /* Internal. */
+ DB_TXN_POPENFILES=7, /* Internal. */
+ DB_TXN_PRINT=8 /* Public. */
+} db_recops;
+
+/*
+ * BACKWARD_ALLOC is used during the forward pass to pick up any aborted
+ * allocations for files that were created during the forward pass.
+ * The main difference between _ALLOC and _ROLL is that the entry for
+ * the file not exist during the rollforward pass.
+ */
+#define DB_UNDO(op) ((op) == DB_TXN_ABORT || \
+ (op) == DB_TXN_BACKWARD_ROLL || (op) == DB_TXN_BACKWARD_ALLOC)
+#define DB_REDO(op) ((op) == DB_TXN_FORWARD_ROLL || (op) == DB_TXN_APPLY)
+
+struct __db_txn {
+ DB_TXNMGR *mgrp; /* Pointer to transaction manager. */
+ DB_TXN *parent; /* Pointer to transaction's parent. */
+ DB_LSN last_lsn; /* Lsn of last log write. */
+ u_int32_t txnid; /* Unique transaction id. */
+ roff_t off; /* Detail structure within region. */
+ db_timeout_t lock_timeout; /* Timeout for locks for this txn. */
+ db_timeout_t expire; /* Time this txn expires. */
+ void *txn_list; /* Undo information for parent. */
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_txn) links;
+ */
+ struct {
+ struct __db_txn *tqe_next;
+ struct __db_txn **tqe_prev;
+ } links; /* Links transactions off manager. */
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_HEAD(__events, __txn_event) events;
+ */
+ struct {
+ struct __txn_event *tqh_first;
+ struct __txn_event **tqh_last;
+ } events;
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_HEAD(__kids, __db_txn) kids;
+ */
+ struct __kids {
+ struct __db_txn *tqh_first;
+ struct __db_txn **tqh_last;
+ } kids;
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_txn) klinks;
+ */
+ struct {
+ struct __db_txn *tqe_next;
+ struct __db_txn **tqe_prev;
+ } klinks;
+
+ /* API-private structure: used by C++ */
+ void *api_internal;
+
+ u_int32_t cursors; /* Number of cursors open for txn */
+
+ /* Methods. */
+ int (*abort) __P((DB_TXN *));
+ int (*commit) __P((DB_TXN *, u_int32_t));
+ int (*discard) __P((DB_TXN *, u_int32_t));
+ u_int32_t (*id) __P((DB_TXN *));
+ int (*prepare) __P((DB_TXN *, u_int8_t *));
+ int (*set_timeout) __P((DB_TXN *, db_timeout_t, u_int32_t));
+
+#define TXN_CHILDCOMMIT 0x01 /* Transaction that has committed. */
+#define TXN_COMPENSATE 0x02 /* Compensating transaction. */
+#define TXN_DIRTY_READ 0x04 /* Transaction does dirty reads. */
+#define TXN_LOCKTIMEOUT 0x08 /* Transaction has a lock timeout. */
+#define TXN_MALLOC 0x10 /* Structure allocated by TXN system. */
+#define TXN_NOSYNC 0x20 /* Do not sync on prepare and commit. */
+#define TXN_NOWAIT 0x40 /* Do not wait on locks. */
+#define TXN_SYNC 0x80 /* Sync on prepare and commit. */
+ u_int32_t flags;
+};
+
+/* Transaction statistics structure. */
+struct __db_txn_active {
+ u_int32_t txnid; /* Transaction ID */
+ u_int32_t parentid; /* Transaction ID of parent */
+ DB_LSN lsn; /* LSN when transaction began */
+};
+
+struct __db_txn_stat {
+ DB_LSN st_last_ckp; /* lsn of the last checkpoint */
+ time_t st_time_ckp; /* time of last checkpoint */
+ u_int32_t st_last_txnid; /* last transaction id given out */
+ u_int32_t st_maxtxns; /* maximum txns possible */
+ u_int32_t st_naborts; /* number of aborted transactions */
+ u_int32_t st_nbegins; /* number of begun transactions */
+ u_int32_t st_ncommits; /* number of committed transactions */
+ u_int32_t st_nactive; /* number of active transactions */
+ u_int32_t st_nrestores; /* number of restored transactions
+ after recovery. */
+ u_int32_t st_maxnactive; /* maximum active transactions */
+ DB_TXN_ACTIVE *st_txnarray; /* array of active transactions */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted without wait. */
+ u_int32_t st_regsize; /* Region size. */
+};
+
+/*
+ * Structure used for two phase commit interface. Berkeley DB support for two
+ * phase commit is compatible with the X/open XA interface. The xa #define
+ * XIDDATASIZE defines the size of a global transaction ID. We have our own
+ * version here which must have the same value.
+ */
+#define DB_XIDDATASIZE 128
+struct __db_preplist {
+ DB_TXN *txn;
+ u_int8_t gid[DB_XIDDATASIZE];
+};
+
+/*******************************************************
+ * Replication.
+ *******************************************************/
+/* Special, out-of-band environment IDs. */
+#define DB_EID_BROADCAST -1
+#define DB_EID_INVALID -2
+
+/* rep_start flags values */
+#define DB_REP_CLIENT 0x001
+#define DB_REP_LOGSONLY 0x002
+#define DB_REP_MASTER 0x004
+
+/* Replication statistics. */
+struct __db_rep_stat {
+ /* !!!
+ * Many replication statistics fields cannot be protected by a mutex
+ * without an unacceptable performance penalty, since most message
+ * processing is done without the need to hold a region-wide lock.
+ * Fields whose comments end with a '+' may be updated without holding
+ * the replication or log mutexes (as appropriate), and thus may be
+ * off somewhat (or, on unreasonable architectures under unlucky
+ * circumstances, garbaged).
+ */
+ u_int32_t st_status; /* Current replication status. */
+ DB_LSN st_next_lsn; /* Next LSN to use or expect. */
+ DB_LSN st_waiting_lsn; /* LSN we're awaiting, if any. */
+
+ u_int32_t st_dupmasters; /* # of times a duplicate master
+ condition was detected.+ */
+ int st_env_id; /* Current environment ID. */
+ int st_env_priority; /* Current environment priority. */
+ u_int32_t st_gen; /* Current generation number. */
+ u_int32_t st_log_duplicated; /* Log records received multiply.+ */
+ u_int32_t st_log_queued; /* Log records currently queued.+ */
+ u_int32_t st_log_queued_max; /* Max. log records queued at once.+ */
+ u_int32_t st_log_queued_total; /* Total # of log recs. ever queued.+ */
+ u_int32_t st_log_records; /* Log records received and put.+ */
+ u_int32_t st_log_requested; /* Log recs. missed and requested.+ */
+ int st_master; /* Env. ID of the current master. */
+ u_int32_t st_master_changes; /* # of times we've switched masters. */
+ u_int32_t st_msgs_badgen; /* Messages with a bad generation #.+ */
+ u_int32_t st_msgs_processed; /* Messages received and processed.+ */
+ u_int32_t st_msgs_recover; /* Messages ignored because this site
+ was a client in recovery.+ */
+ u_int32_t st_msgs_send_failures;/* # of failed message sends.+ */
+ u_int32_t st_msgs_sent; /* # of successful message sends.+ */
+ u_int32_t st_newsites; /* # of NEWSITE msgs. received.+ */
+ int st_nsites; /* Current number of sites we will
+ assume during elections. */
+ u_int32_t st_nthrottles; /* # of times we were throttled. */
+ u_int32_t st_outdated; /* # of times we detected and returned
+ an OUTDATED condition.+ */
+ u_int32_t st_txns_applied; /* # of transactions applied.+ */
+
+ /* Elections generally. */
+ u_int32_t st_elections; /* # of elections held.+ */
+ u_int32_t st_elections_won; /* # of elections won by this site.+ */
+
+ /* Statistics about an in-progress election. */
+ int st_election_cur_winner; /* Current front-runner. */
+ u_int32_t st_election_gen; /* Election generation number. */
+ DB_LSN st_election_lsn; /* Max. LSN of current winner. */
+ int st_election_nsites; /* # of "registered voters". */
+ int st_election_priority; /* Current election priority. */
+ int st_election_status; /* Current election status. */
+ int st_election_tiebreaker; /* Election tiebreaker value. */
+ int st_election_votes; /* Votes received in this round. */
+};
+
+/*******************************************************
+ * Access methods.
+ *******************************************************/
+typedef enum {
+ DB_BTREE=1,
+ DB_HASH=2,
+ DB_RECNO=3,
+ DB_QUEUE=4,
+ DB_UNKNOWN=5 /* Figure it out on open. */
+} DBTYPE;
+
+#define DB_RENAMEMAGIC 0x030800 /* File has been renamed. */
+
+#define DB_BTREEVERSION 9 /* Current btree version. */
+#define DB_BTREEOLDVER 8 /* Oldest btree version supported. */
+#define DB_BTREEMAGIC 0x053162
+
+#define DB_HASHVERSION 8 /* Current hash version. */
+#define DB_HASHOLDVER 7 /* Oldest hash version supported. */
+#define DB_HASHMAGIC 0x061561
+
+#define DB_QAMVERSION 4 /* Current queue version. */
+#define DB_QAMOLDVER 3 /* Oldest queue version supported. */
+#define DB_QAMMAGIC 0x042253
+
+/*
+ * DB access method and cursor operation values. Each value is an operation
+ * code to which additional bit flags are added.
+ */
+#define DB_AFTER 1 /* c_put() */
+#define DB_APPEND 2 /* put() */
+#define DB_BEFORE 3 /* c_put() */
+#define DB_CACHED_COUNTS 4 /* stat() */
+#define DB_COMMIT 5 /* log_put() (internal) */
+#define DB_CONSUME 6 /* get() */
+#define DB_CONSUME_WAIT 7 /* get() */
+#define DB_CURRENT 8 /* c_get(), c_put(), DB_LOGC->get() */
+#define DB_FAST_STAT 9 /* stat() */
+#define DB_FIRST 10 /* c_get(), DB_LOGC->get() */
+#define DB_GET_BOTH 11 /* get(), c_get() */
+#define DB_GET_BOTHC 12 /* c_get() (internal) */
+#define DB_GET_BOTH_RANGE 13 /* get(), c_get() */
+#define DB_GET_RECNO 14 /* c_get() */
+#define DB_JOIN_ITEM 15 /* c_get(); do not do primary lookup */
+#define DB_KEYFIRST 16 /* c_put() */
+#define DB_KEYLAST 17 /* c_put() */
+#define DB_LAST 18 /* c_get(), DB_LOGC->get() */
+#define DB_NEXT 19 /* c_get(), DB_LOGC->get() */
+#define DB_NEXT_DUP 20 /* c_get() */
+#define DB_NEXT_NODUP 21 /* c_get() */
+#define DB_NODUPDATA 22 /* put(), c_put() */
+#define DB_NOOVERWRITE 23 /* put() */
+#define DB_NOSYNC 24 /* close() */
+#define DB_POSITION 25 /* c_dup() */
+#define DB_POSITIONI 26 /* c_dup() (internal) */
+#define DB_PREV 27 /* c_get(), DB_LOGC->get() */
+#define DB_PREV_NODUP 28 /* c_get(), DB_LOGC->get() */
+#define DB_RECORDCOUNT 29 /* stat() */
+#define DB_SET 30 /* c_get(), DB_LOGC->get() */
+#define DB_SET_LOCK_TIMEOUT 31 /* set_timout() */
+#define DB_SET_RANGE 32 /* c_get() */
+#define DB_SET_RECNO 33 /* get(), c_get() */
+#define DB_SET_TXN_NOW 34 /* set_timout() (internal) */
+#define DB_SET_TXN_TIMEOUT 35 /* set_timout() */
+#define DB_UPDATE_SECONDARY 36 /* c_get(), c_del() (internal) */
+#define DB_WRITECURSOR 37 /* cursor() */
+#define DB_WRITELOCK 38 /* cursor() (internal) */
+
+/* This has to change when the max opcode hits 255. */
+#define DB_OPFLAGS_MASK 0x000000ff /* Mask for operations flags. */
+/* DB_DIRTY_READ 0x01000000 Dirty Read. */
+#define DB_FLUSH 0x02000000 /* Flush data to disk. */
+#define DB_MULTIPLE 0x04000000 /* Return multiple data values. */
+#define DB_MULTIPLE_KEY 0x08000000 /* Return multiple data/key pairs. */
+#define DB_NOCOPY 0x10000000 /* Don't copy data */
+#define DB_PERMANENT 0x20000000 /* Flag record with REP_PERMANENT. */
+#define DB_RMW 0x40000000 /* Acquire write flag immediately. */
+#define DB_WRNOSYNC 0x80000000 /* Private: write, don't sync log_put */
+
+/*
+ * DB (user visible) error return codes.
+ *
+ * !!!
+ * For source compatibility with DB 2.X deadlock return (EAGAIN), use the
+ * following:
+ * #include <errno.h>
+ * #define DB_LOCK_DEADLOCK EAGAIN
+ *
+ * !!!
+ * We don't want our error returns to conflict with other packages where
+ * possible, so pick a base error value that's hopefully not common. We
+ * document that we own the error name space from -30,800 to -30,999.
+ */
+/* DB (public) error return codes. */
+#define DB_DONOTINDEX (-30999)/* "Null" return from 2ndary callbk. */
+#define DB_KEYEMPTY (-30998)/* Key/data deleted or never created. */
+#define DB_KEYEXIST (-30997)/* The key/data pair already exists. */
+#define DB_LOCK_DEADLOCK (-30996)/* Deadlock. */
+#define DB_LOCK_NOTGRANTED (-30995)/* Lock unavailable. */
+#define DB_NOSERVER (-30994)/* Server panic return. */
+#define DB_NOSERVER_HOME (-30993)/* Bad home sent to server. */
+#define DB_NOSERVER_ID (-30992)/* Bad ID sent to server. */
+#define DB_NOTFOUND (-30991)/* Key/data pair not found (EOF). */
+#define DB_OLD_VERSION (-30990)/* Out-of-date version. */
+#define DB_PAGE_NOTFOUND (-30989)/* Requested page not found. */
+#define DB_REP_DUPMASTER (-30988)/* There are two masters. */
+#define DB_REP_HOLDELECTION (-30987)/* Time to hold an election. */
+#define DB_REP_NEWMASTER (-30986)/* We have learned of a new master. */
+#define DB_REP_NEWSITE (-30985)/* New site entered system. */
+#define DB_REP_OUTDATED (-30984)/* Site is too far behind master. */
+#define DB_REP_UNAVAIL (-30983)/* Site cannot currently be reached. */
+#define DB_RUNRECOVERY (-30982)/* Panic return. */
+#define DB_SECONDARY_BAD (-30981)/* Secondary index corrupt. */
+#define DB_VERIFY_BAD (-30980)/* Verify failed; bad format. */
+
+/* DB (private) error return codes. */
+#define DB_ALREADY_ABORTED (-30899)
+#define DB_DELETED (-30898)/* Recovery file marked deleted. */
+#define DB_JAVA_CALLBACK (-30897)/* Exception during a java callback. */
+#define DB_LOCK_NOTEXIST (-30896)/* Object to lock is gone. */
+#define DB_NEEDSPLIT (-30895)/* Page needs to be split. */
+#define DB_SURPRISE_KID (-30894)/* Child commit where parent
+ didn't know it was a parent. */
+#define DB_SWAPBYTES (-30893)/* Database needs byte swapping. */
+#define DB_TIMEOUT (-30892)/* Timed out waiting for election. */
+#define DB_TXN_CKP (-30891)/* Encountered ckp record in log. */
+#define DB_VERIFY_FATAL (-30890)/* DB->verify cannot proceed. */
+
+/* Database handle. */
+struct __db {
+ /*******************************************************
+ * Public: owned by the application.
+ *******************************************************/
+ u_int32_t pgsize; /* Database logical page size. */
+
+ /* Callbacks. */
+ int (*db_append_recno) __P((DB *, DBT *, db_recno_t));
+ void (*db_feedback) __P((DB *, int, int));
+ int (*dup_compare) __P((DB *, const DBT *, const DBT *));
+
+ void *app_private; /* Application-private handle. */
+
+ /*******************************************************
+ * Private: owned by DB.
+ *******************************************************/
+ DB_ENV *dbenv; /* Backing environment. */
+
+ DBTYPE type; /* DB access method type. */
+
+ DB_MPOOLFILE *mpf; /* Backing buffer pool. */
+ DB_CACHE_PRIORITY priority; /* Priority in the buffer pool. */
+
+ DB_MUTEX *mutexp; /* Synchronization for free threading */
+
+ u_int8_t fileid[DB_FILE_ID_LEN];/* File's unique ID for locking. */
+
+ u_int32_t adj_fileid; /* File's unique ID for curs. adj. */
+
+#define DB_LOGFILEID_INVALID -1
+ FNAME *log_filename; /* File's naming info for logging. */
+
+ db_pgno_t meta_pgno; /* Meta page number */
+ u_int32_t lid; /* Locker id for handle locking. */
+ u_int32_t cur_lid; /* Current handle lock holder. */
+ u_int32_t associate_lid; /* Locker id for DB->associate call. */
+ DB_LOCK handle_lock; /* Lock held on this handle. */
+
+ long cl_id; /* RPC: remote client id. */
+
+ /*
+ * Returned data memory for DB->get() and friends.
+ */
+ DBT my_rskey; /* Secondary key. */
+ DBT my_rkey; /* [Primary] key. */
+ DBT my_rdata; /* Data. */
+
+ /*
+ * !!!
+ * Some applications use DB but implement their own locking outside of
+ * DB. If they're using fcntl(2) locking on the underlying database
+ * file, and we open and close a file descriptor for that file, we will
+ * discard their locks. The DB_FCNTL_LOCKING flag to DB->open is an
+ * undocumented interface to support this usage which leaves any file
+ * descriptors we open until DB->close. This will only work with the
+ * DB->open interface and simple caches, e.g., creating a transaction
+ * thread may open/close file descriptors this flag doesn't protect.
+ * Locking with fcntl(2) on a file that you don't own is a very, very
+ * unsafe thing to do. 'Nuff said.
+ */
+ DB_FH *saved_open_fhp; /* Saved file handle. */
+
+ /*
+ * Linked list of DBP's, linked from the DB_ENV, used to keep track
+ * of all open db handles for cursor adjustment.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * LIST_ENTRY(__db) dblistlinks;
+ */
+ struct {
+ struct __db *le_next;
+ struct __db **le_prev;
+ } dblistlinks;
+
+ /*
+ * Cursor queues.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_HEAD(__cq_fq, __dbc) free_queue;
+ * TAILQ_HEAD(__cq_aq, __dbc) active_queue;
+ * TAILQ_HEAD(__cq_jq, __dbc) join_queue;
+ */
+ struct __cq_fq {
+ struct __dbc *tqh_first;
+ struct __dbc **tqh_last;
+ } free_queue;
+ struct __cq_aq {
+ struct __dbc *tqh_first;
+ struct __dbc **tqh_last;
+ } active_queue;
+ struct __cq_jq {
+ struct __dbc *tqh_first;
+ struct __dbc **tqh_last;
+ } join_queue;
+
+ /*
+ * Secondary index support.
+ *
+ * Linked list of secondary indices -- set in the primary.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * LIST_HEAD(s_secondaries, __db);
+ */
+ struct {
+ struct __db *lh_first;
+ } s_secondaries;
+
+ /*
+ * List entries for secondaries, and reference count of how
+ * many threads are updating this secondary (see __db_c_put).
+ *
+ * !!!
+ * Note that these are synchronized by the primary's mutex, but
+ * filled in in the secondaries.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * LIST_ENTRY(__db) s_links;
+ */
+ struct {
+ struct __db *le_next;
+ struct __db **le_prev;
+ } s_links;
+ u_int32_t s_refcnt;
+
+ /* Secondary callback and free functions -- set in the secondary. */
+ int (*s_callback) __P((DB *, const DBT *, const DBT *, DBT *));
+
+ /* Reference to primary -- set in the secondary. */
+ DB *s_primary;
+
+ /* API-private structure: used by DB 1.85, C++, Java, Perl and Tcl */
+ void *api_internal;
+
+ /* Subsystem-private structure. */
+ void *bt_internal; /* Btree/Recno access method. */
+ void *h_internal; /* Hash access method. */
+ void *q_internal; /* Queue access method. */
+ void *xa_internal; /* XA. */
+
+ /* Methods. */
+ int (*associate) __P((DB *, DB_TXN *, DB *, int (*)(DB *, const DBT *,
+ const DBT *, DBT *), u_int32_t));
+ int (*close) __P((DB *, u_int32_t));
+ int (*cursor) __P((DB *, DB_TXN *, DBC **, u_int32_t));
+ int (*del) __P((DB *, DB_TXN *, DBT *, u_int32_t));
+ void (*err) __P((DB *, int, const char *, ...));
+ void (*errx) __P((DB *, const char *, ...));
+ int (*fd) __P((DB *, int *));
+ int (*get) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ int (*pget) __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t));
+ int (*get_byteswapped) __P((DB *, int *));
+ int (*get_type) __P((DB *, DBTYPE *));
+ int (*join) __P((DB *, DBC **, DBC **, u_int32_t));
+ int (*key_range) __P((DB *,
+ DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
+ int (*open) __P((DB *, DB_TXN *,
+ const char *, const char *, DBTYPE, u_int32_t, int));
+ int (*put) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ int (*remove) __P((DB *, const char *, const char *, u_int32_t));
+ int (*rename) __P((DB *,
+ const char *, const char *, const char *, u_int32_t));
+ int (*truncate) __P((DB *, DB_TXN *, u_int32_t *, u_int32_t));
+ int (*set_append_recno) __P((DB *, int (*)(DB *, DBT *, db_recno_t)));
+ int (*set_alloc) __P((DB *, void *(*)(size_t),
+ void *(*)(void *, size_t), void (*)(void *)));
+ int (*set_cachesize) __P((DB *, u_int32_t, u_int32_t, int));
+ int (*set_cache_priority) __P((DB *, DB_CACHE_PRIORITY));
+ int (*set_dup_compare) __P((DB *,
+ int (*)(DB *, const DBT *, const DBT *)));
+ int (*set_encrypt) __P((DB *, const char *, u_int32_t));
+ void (*set_errcall) __P((DB *, void (*)(const char *, char *)));
+ void (*set_errfile) __P((DB *, FILE *));
+ void (*set_errpfx) __P((DB *, const char *));
+ int (*set_feedback) __P((DB *, void (*)(DB *, int, int)));
+ int (*set_flags) __P((DB *, u_int32_t));
+ int (*set_lorder) __P((DB *, int));
+ int (*set_pagesize) __P((DB *, u_int32_t));
+ int (*set_paniccall) __P((DB *, void (*)(DB_ENV *, int)));
+ int (*stat) __P((DB *, void *, u_int32_t));
+ int (*sync) __P((DB *, u_int32_t));
+ int (*upgrade) __P((DB *, const char *, u_int32_t));
+ int (*verify) __P((DB *,
+ const char *, const char *, FILE *, u_int32_t));
+
+ int (*set_bt_compare) __P((DB *,
+ int (*)(DB *, const DBT *, const DBT *)));
+ int (*set_bt_maxkey) __P((DB *, u_int32_t));
+ int (*set_bt_minkey) __P((DB *, u_int32_t));
+ int (*set_bt_prefix) __P((DB *,
+ size_t (*)(DB *, const DBT *, const DBT *)));
+
+ int (*set_h_ffactor) __P((DB *, u_int32_t));
+ int (*set_h_hash) __P((DB *,
+ u_int32_t (*)(DB *, const void *, u_int32_t)));
+ int (*set_h_nelem) __P((DB *, u_int32_t));
+
+ int (*set_re_delim) __P((DB *, int));
+ int (*set_re_len) __P((DB *, u_int32_t));
+ int (*set_re_pad) __P((DB *, int));
+ int (*set_re_source) __P((DB *, const char *));
+ int (*set_q_extentsize) __P((DB *, u_int32_t));
+
+ int (*db_am_remove) __P((DB *,
+ DB_TXN *, const char *, const char *, DB_LSN *));
+ int (*db_am_rename) __P((DB *, DB_TXN *,
+ const char *, const char *, const char *));
+
+ /*
+ * Never called; these are a place to save function pointers
+ * so that we can undo an associate.
+ */
+ int (*stored_get) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ int (*stored_close) __P((DB *, u_int32_t));
+
+#define DB_OK_BTREE 0x01
+#define DB_OK_HASH 0x02
+#define DB_OK_QUEUE 0x04
+#define DB_OK_RECNO 0x08
+ u_int32_t am_ok; /* Legal AM choices. */
+
+#define DB_AM_CHKSUM 0x00000001 /* Checksumming. */
+#define DB_AM_CL_WRITER 0x00000002 /* Allow writes in client replica. */
+#define DB_AM_COMPENSATE 0x00000004 /* Created by compensating txn. */
+#define DB_AM_CREATED 0x00000008 /* Database was created upon open. */
+#define DB_AM_CREATED_MSTR 0x00000010 /* Encompassing file was created. */
+#define DB_AM_DBM_ERROR 0x00000020 /* Error in DBM/NDBM database. */
+#define DB_AM_DELIMITER 0x00000040 /* Variable length delimiter set. */
+#define DB_AM_DIRTY 0x00000080 /* Support Dirty Reads. */
+#define DB_AM_DISCARD 0x00000100 /* Discard any cached pages. */
+#define DB_AM_DUP 0x00000200 /* DB_DUP. */
+#define DB_AM_DUPSORT 0x00000400 /* DB_DUPSORT. */
+#define DB_AM_ENCRYPT 0x00000800 /* Encryption. */
+#define DB_AM_FIXEDLEN 0x00001000 /* Fixed-length records. */
+#define DB_AM_INMEM 0x00002000 /* In-memory; no sync on close. */
+#define DB_AM_IN_RENAME 0x00004000 /* File is being renamed. */
+#define DB_AM_OPEN_CALLED 0x00008000 /* DB->open called. */
+#define DB_AM_PAD 0x00010000 /* Fixed-length record pad. */
+#define DB_AM_PGDEF 0x00020000 /* Page size was defaulted. */
+#define DB_AM_RDONLY 0x00040000 /* Database is readonly. */
+#define DB_AM_RECNUM 0x00080000 /* DB_RECNUM. */
+#define DB_AM_RECOVER 0x00100000 /* DB opened by recovery routine. */
+#define DB_AM_RENUMBER 0x00200000 /* DB_RENUMBER. */
+#define DB_AM_REVSPLITOFF 0x00400000 /* DB_REVSPLITOFF. */
+#define DB_AM_SECONDARY 0x00800000 /* Database is a secondary index. */
+#define DB_AM_SNAPSHOT 0x01000000 /* DB_SNAPSHOT. */
+#define DB_AM_SUBDB 0x02000000 /* Subdatabases supported. */
+#define DB_AM_SWAP 0x04000000 /* Pages need to be byte-swapped. */
+#define DB_AM_TXN 0x08000000 /* Opened in a transaction. */
+#define DB_AM_VERIFYING 0x10000000 /* DB handle is in the verifier. */
+ u_int32_t flags;
+};
+
+/*
+ * Macros for bulk get. Note that wherever we use a DBT *, we explicitly
+ * cast it; this allows the same macros to work with C++ Dbt *'s, as Dbt
+ * is a subclass of struct DBT in C++.
+ */
+#define DB_MULTIPLE_INIT(pointer, dbt) \
+ (pointer = (u_int8_t *)((DBT *)(dbt))->data + \
+ ((DBT *)(dbt))->ulen - sizeof(u_int32_t))
+#define DB_MULTIPLE_NEXT(pointer, dbt, retdata, retdlen) \
+ do { \
+ if (*((u_int32_t *)(pointer)) == (u_int32_t)-1) { \
+ retdata = NULL; \
+ pointer = NULL; \
+ break; \
+ } \
+ retdata = (u_int8_t *) \
+ ((DBT *)(dbt))->data + *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdlen = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ if (retdlen == 0 && \
+ retdata == (u_int8_t *)((DBT *)(dbt))->data) \
+ retdata = NULL; \
+ } while (0)
+#define DB_MULTIPLE_KEY_NEXT(pointer, dbt, retkey, retklen, retdata, retdlen) \
+ do { \
+ if (*((u_int32_t *)(pointer)) == (u_int32_t)-1) { \
+ retdata = NULL; \
+ retkey = NULL; \
+ pointer = NULL; \
+ break; \
+ } \
+ retkey = (u_int8_t *) \
+ ((DBT *)(dbt))->data + *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retklen = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdata = (u_int8_t *) \
+ ((DBT *)(dbt))->data + *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdlen = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ } while (0)
+
+#define DB_MULTIPLE_RECNO_NEXT(pointer, dbt, recno, retdata, retdlen) \
+ do { \
+ if (*((u_int32_t *)(pointer)) == (u_int32_t)0) { \
+ recno = 0; \
+ retdata = NULL; \
+ pointer = NULL; \
+ break; \
+ } \
+ recno = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdata = (u_int8_t *) \
+ ((DBT *)(dbt))->data + *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdlen = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ } while (0)
+
+/*******************************************************
+ * Access method cursors.
+ *******************************************************/
+struct __dbc {
+ DB *dbp; /* Related DB access method. */
+ DB_TXN *txn; /* Associated transaction. */
+
+ /*
+ * Active/free cursor queues.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__dbc) links;
+ */
+ struct {
+ DBC *tqe_next;
+ DBC **tqe_prev;
+ } links;
+
+ /*
+ * The DBT *'s below are used by the cursor routines to return
+ * data to the user when DBT flags indicate that DB should manage
+ * the returned memory. They point at a DBT containing the buffer
+ * and length that will be used, and "belonging" to the handle that
+ * should "own" this memory. This may be a "my_*" field of this
+ * cursor--the default--or it may be the corresponding field of
+ * another cursor, a DB handle, a join cursor, etc. In general, it
+ * will be whatever handle the user originally used for the current
+ * DB interface call.
+ */
+ DBT *rskey; /* Returned secondary key. */
+ DBT *rkey; /* Returned [primary] key. */
+ DBT *rdata; /* Returned data. */
+
+ DBT my_rskey; /* Space for returned secondary key. */
+ DBT my_rkey; /* Space for returned [primary] key. */
+ DBT my_rdata; /* Space for returned data. */
+
+ u_int32_t lid; /* Default process' locker id. */
+ u_int32_t locker; /* Locker for this operation. */
+ DBT lock_dbt; /* DBT referencing lock. */
+ DB_LOCK_ILOCK lock; /* Object to be locked. */
+ DB_LOCK mylock; /* Lock held on this cursor. */
+
+ long cl_id; /* Remote client id. */
+
+ DBTYPE dbtype; /* Cursor type. */
+
+ DBC_INTERNAL *internal; /* Access method private. */
+
+ int (*c_close) __P((DBC *)); /* Methods: public. */
+ int (*c_count) __P((DBC *, db_recno_t *, u_int32_t));
+ int (*c_del) __P((DBC *, u_int32_t));
+ int (*c_dup) __P((DBC *, DBC **, u_int32_t));
+ int (*c_get) __P((DBC *, DBT *, DBT *, u_int32_t));
+ int (*c_pget) __P((DBC *, DBT *, DBT *, DBT *, u_int32_t));
+ int (*c_put) __P((DBC *, DBT *, DBT *, u_int32_t));
+
+ /* Methods: private. */
+ int (*c_am_bulk) __P((DBC *, DBT *, u_int32_t));
+ int (*c_am_close) __P((DBC *, db_pgno_t, int *));
+ int (*c_am_del) __P((DBC *));
+ int (*c_am_destroy) __P((DBC *));
+ int (*c_am_get) __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+ int (*c_am_put) __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+ int (*c_am_writelock) __P((DBC *));
+
+ /* Private: for secondary indices. */
+ int (*c_real_get) __P((DBC *, DBT *, DBT *, u_int32_t));
+
+#define DBC_ACTIVE 0x0001 /* Cursor in use. */
+#define DBC_COMPENSATE 0x0002 /* Cursor compensating, don't lock. */
+#define DBC_DIRTY_READ 0x0004 /* Cursor supports dirty reads. */
+#define DBC_OPD 0x0008 /* Cursor references off-page dups. */
+#define DBC_RECOVER 0x0010 /* Recovery cursor; don't log/lock. */
+#define DBC_RMW 0x0020 /* Acquire write flag in read op. */
+#define DBC_TRANSIENT 0x0040 /* Cursor is transient. */
+#define DBC_WRITECURSOR 0x0080 /* Cursor may be used to write (CDB). */
+#define DBC_WRITEDUP 0x0100 /* idup'ed DBC_WRITECURSOR (CDB). */
+#define DBC_WRITER 0x0200 /* Cursor immediately writing (CDB). */
+#define DBC_MULTIPLE 0x0400 /* Return Multiple data. */
+#define DBC_MULTIPLE_KEY 0x0800 /* Return Multiple keys and data. */
+#define DBC_OWN_LID 0x1000 /* Free lock id on destroy. */
+ u_int32_t flags;
+};
+
+/* Key range statistics structure */
+struct __key_range {
+ double less;
+ double equal;
+ double greater;
+};
+
+/* Btree/Recno statistics structure. */
+struct __db_bt_stat {
+ u_int32_t bt_magic; /* Magic number. */
+ u_int32_t bt_version; /* Version number. */
+ u_int32_t bt_metaflags; /* Metadata flags. */
+ u_int32_t bt_nkeys; /* Number of unique keys. */
+ u_int32_t bt_ndata; /* Number of data items. */
+ u_int32_t bt_pagesize; /* Page size. */
+ u_int32_t bt_maxkey; /* Maxkey value. */
+ u_int32_t bt_minkey; /* Minkey value. */
+ u_int32_t bt_re_len; /* Fixed-length record length. */
+ u_int32_t bt_re_pad; /* Fixed-length record pad. */
+ u_int32_t bt_levels; /* Tree levels. */
+ u_int32_t bt_int_pg; /* Internal pages. */
+ u_int32_t bt_leaf_pg; /* Leaf pages. */
+ u_int32_t bt_dup_pg; /* Duplicate pages. */
+ u_int32_t bt_over_pg; /* Overflow pages. */
+ u_int32_t bt_free; /* Pages on the free list. */
+ u_int32_t bt_int_pgfree; /* Bytes free in internal pages. */
+ u_int32_t bt_leaf_pgfree; /* Bytes free in leaf pages. */
+ u_int32_t bt_dup_pgfree; /* Bytes free in duplicate pages. */
+ u_int32_t bt_over_pgfree; /* Bytes free in overflow pages. */
+};
+
+/* Hash statistics structure. */
+struct __db_h_stat {
+ u_int32_t hash_magic; /* Magic number. */
+ u_int32_t hash_version; /* Version number. */
+ u_int32_t hash_metaflags; /* Metadata flags. */
+ u_int32_t hash_nkeys; /* Number of unique keys. */
+ u_int32_t hash_ndata; /* Number of data items. */
+ u_int32_t hash_pagesize; /* Page size. */
+ u_int32_t hash_ffactor; /* Fill factor specified at create. */
+ u_int32_t hash_buckets; /* Number of hash buckets. */
+ u_int32_t hash_free; /* Pages on the free list. */
+ u_int32_t hash_bfree; /* Bytes free on bucket pages. */
+ u_int32_t hash_bigpages; /* Number of big key/data pages. */
+ u_int32_t hash_big_bfree; /* Bytes free on big item pages. */
+ u_int32_t hash_overflows; /* Number of overflow pages. */
+ u_int32_t hash_ovfl_free; /* Bytes free on ovfl pages. */
+ u_int32_t hash_dup; /* Number of dup pages. */
+ u_int32_t hash_dup_free; /* Bytes free on duplicate pages. */
+};
+
+/* Queue statistics structure. */
+struct __db_qam_stat {
+ u_int32_t qs_magic; /* Magic number. */
+ u_int32_t qs_version; /* Version number. */
+ u_int32_t qs_metaflags; /* Metadata flags. */
+ u_int32_t qs_nkeys; /* Number of unique keys. */
+ u_int32_t qs_ndata; /* Number of data items. */
+ u_int32_t qs_pagesize; /* Page size. */
+ u_int32_t qs_extentsize; /* Pages per extent. */
+ u_int32_t qs_pages; /* Data pages. */
+ u_int32_t qs_re_len; /* Fixed-length record length. */
+ u_int32_t qs_re_pad; /* Fixed-length record pad. */
+ u_int32_t qs_pgfree; /* Bytes free in data pages. */
+ u_int32_t qs_first_recno; /* First not deleted record. */
+ u_int32_t qs_cur_recno; /* Next available record number. */
+};
+
+/*******************************************************
+ * Environment.
+ *******************************************************/
+#define DB_REGION_MAGIC 0x120897 /* Environment magic number. */
+
+/* Database Environment handle. */
+struct __db_env {
+ /*******************************************************
+ * Public: owned by the application.
+ *******************************************************/
+ FILE *db_errfile; /* Error message file stream. */
+ const char *db_errpfx; /* Error message prefix. */
+ /* Callbacks. */
+ void (*db_errcall) __P((const char *, char *));
+ void (*db_feedback) __P((DB_ENV *, int, int));
+ void (*db_paniccall) __P((DB_ENV *, int));
+
+ /* App-specified alloc functions. */
+ void *(*db_malloc) __P((size_t));
+ void *(*db_realloc) __P((void *, size_t));
+ void (*db_free) __P((void *));
+
+ /*
+ * Currently, the verbose list is a bit field with room for 32
+ * entries. There's no reason that it needs to be limited, if
+ * there are ever more than 32 entries, convert to a bit array.
+ */
+#define DB_VERB_CHKPOINT 0x0001 /* List checkpoints. */
+#define DB_VERB_DEADLOCK 0x0002 /* Deadlock detection information. */
+#define DB_VERB_RECOVERY 0x0004 /* Recovery information. */
+#define DB_VERB_REPLICATION 0x0008 /* Replication information. */
+#define DB_VERB_WAITSFOR 0x0010 /* Dump waits-for table. */
+ u_int32_t verbose; /* Verbose output. */
+
+ void *app_private; /* Application-private handle. */
+
+ int (*app_dispatch) /* User-specified recovery dispatch. */
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+
+ /* Locking. */
+ u_int8_t *lk_conflicts; /* Two dimensional conflict matrix. */
+ u_int32_t lk_modes; /* Number of lock modes in table. */
+ u_int32_t lk_max; /* Maximum number of locks. */
+ u_int32_t lk_max_lockers;/* Maximum number of lockers. */
+ u_int32_t lk_max_objects;/* Maximum number of locked objects. */
+ u_int32_t lk_detect; /* Deadlock detect on all conflicts. */
+ db_timeout_t lk_timeout; /* Lock timeout period. */
+
+ /* Logging. */
+ u_int32_t lg_bsize; /* Buffer size. */
+ u_int32_t lg_size; /* Log file size. */
+ u_int32_t lg_regionmax; /* Region size. */
+
+ /* Memory pool. */
+ u_int32_t mp_gbytes; /* Cachesize: GB. */
+ u_int32_t mp_bytes; /* Cachesize: Bytes. */
+ size_t mp_size; /* DEPRECATED: Cachesize: bytes. */
+ int mp_ncache; /* Number of cache regions. */
+ size_t mp_mmapsize; /* Maximum file size for mmap. */
+
+ int rep_eid; /* environment id. */
+
+ /* Transactions. */
+ u_int32_t tx_max; /* Maximum number of transactions. */
+ time_t tx_timestamp; /* Recover to specific timestamp. */
+ db_timeout_t tx_timeout; /* Timeout for transactions. */
+
+ /*******************************************************
+ * Private: owned by DB.
+ *******************************************************/
+ int panic_errval; /* Panic causing errno. */
+
+ /* User files, paths. */
+ char *db_home; /* Database home. */
+ char *db_log_dir; /* Database log file directory. */
+ char *db_tmp_dir; /* Database tmp file directory. */
+
+ char **db_data_dir; /* Database data file directories. */
+ int data_cnt; /* Database data file slots. */
+ int data_next; /* Next Database data file slot. */
+
+ int db_mode; /* Default open permissions. */
+
+ void *reginfo; /* REGINFO structure reference. */
+ DB_FH *lockfhp; /* fcntl(2) locking file handle. */
+
+ int (**recover_dtab) /* Dispatch table for recover funcs. */
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t recover_dtab_size;
+ /* Slots in the dispatch table. */
+
+ void *cl_handle; /* RPC: remote client handle. */
+ long cl_id; /* RPC: remote client env id. */
+
+ int db_ref; /* DB reference count. */
+
+ long shm_key; /* shmget(2) key. */
+ u_int32_t tas_spins; /* test-and-set spins. */
+
+ /*
+ * List of open DB handles for this DB_ENV, used for cursor
+ * adjustment. Must be protected for multi-threaded support.
+ *
+ * !!!
+ * As this structure is allocated in per-process memory, the
+ * mutex may need to be stored elsewhere on architectures unable
+ * to support mutexes in heap memory, e.g. HP/UX 9.
+ *
+ * !!!
+ * Explicit representation of structure in queue.h.
+ * LIST_HEAD(dblist, __db);
+ */
+ DB_MUTEX *dblist_mutexp; /* Mutex. */
+ struct {
+ struct __db *lh_first;
+ } dblist;
+
+ /*
+ * XA support.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_env) links;
+ */
+ struct {
+ struct __db_env *tqe_next;
+ struct __db_env **tqe_prev;
+ } links;
+ int xa_rmid; /* XA Resource Manager ID. */
+ DB_TXN *xa_txn; /* XA Current transaction. */
+
+ /* API-private structure. */
+ void *api1_internal; /* C++, Perl API private */
+ void *api2_internal; /* Java API private */
+
+ char *passwd; /* Cryptography support. */
+ size_t passwd_len;
+ void *crypto_handle; /* Primary handle. */
+ DB_MUTEX *mt_mutexp; /* Mersenne Twister mutex. */
+ int mti; /* Mersenne Twister index. */
+ u_long *mt; /* Mersenne Twister state vector. */
+
+ /* DB_ENV Methods. */
+ int (*close) __P((DB_ENV *, u_int32_t));
+ int (*dbremove) __P((DB_ENV *,
+ DB_TXN *, const char *, const char *, u_int32_t));
+ int (*dbrename) __P((DB_ENV *, DB_TXN *,
+ const char *, const char *, const char *, u_int32_t));
+ void (*err) __P((const DB_ENV *, int, const char *, ...));
+ void (*errx) __P((const DB_ENV *, const char *, ...));
+ int (*open) __P((DB_ENV *, const char *, u_int32_t, int));
+ int (*remove) __P((DB_ENV *, const char *, u_int32_t));
+ int (*set_data_dir) __P((DB_ENV *, const char *));
+ int (*set_alloc) __P((DB_ENV *, void *(*)(size_t),
+ void *(*)(void *, size_t), void (*)(void *)));
+ int (*set_app_dispatch) __P((DB_ENV *,
+ int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
+ int (*set_encrypt) __P((DB_ENV *, const char *, u_int32_t));
+ void (*set_errcall) __P((DB_ENV *, void (*)(const char *, char *)));
+ void (*set_errfile) __P((DB_ENV *, FILE *));
+ void (*set_errpfx) __P((DB_ENV *, const char *));
+ int (*set_feedback) __P((DB_ENV *, void (*)(DB_ENV *, int, int)));
+ int (*set_flags) __P((DB_ENV *, u_int32_t, int));
+ int (*set_paniccall) __P((DB_ENV *, void (*)(DB_ENV *, int)));
+ int (*set_rpc_server) __P((DB_ENV *,
+ void *, const char *, long, long, u_int32_t));
+ int (*set_shm_key) __P((DB_ENV *, long));
+ int (*set_tas_spins) __P((DB_ENV *, u_int32_t));
+ int (*set_tmp_dir) __P((DB_ENV *, const char *));
+ int (*set_verbose) __P((DB_ENV *, u_int32_t, int));
+
+ void *lg_handle; /* Log handle and methods. */
+ int (*set_lg_bsize) __P((DB_ENV *, u_int32_t));
+ int (*set_lg_dir) __P((DB_ENV *, const char *));
+ int (*set_lg_max) __P((DB_ENV *, u_int32_t));
+ int (*set_lg_regionmax) __P((DB_ENV *, u_int32_t));
+ int (*log_archive) __P((DB_ENV *, char **[], u_int32_t));
+ int (*log_cursor) __P((DB_ENV *, DB_LOGC **, u_int32_t));
+ int (*log_file) __P((DB_ENV *, const DB_LSN *, char *, size_t));
+ int (*log_flush) __P((DB_ENV *, const DB_LSN *));
+ int (*log_put) __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t));
+ int (*log_stat) __P((DB_ENV *, DB_LOG_STAT **, u_int32_t));
+
+ void *lk_handle; /* Lock handle and methods. */
+ int (*set_lk_conflicts) __P((DB_ENV *, u_int8_t *, int));
+ int (*set_lk_detect) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max_locks) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max_lockers) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max_objects) __P((DB_ENV *, u_int32_t));
+ int (*lock_detect) __P((DB_ENV *, u_int32_t, u_int32_t, int *));
+ int (*lock_dump_region) __P((DB_ENV *, char *, FILE *));
+ int (*lock_get) __P((DB_ENV *,
+ u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *));
+ int (*lock_put) __P((DB_ENV *, DB_LOCK *));
+ int (*lock_id) __P((DB_ENV *, u_int32_t *));
+ int (*lock_id_free) __P((DB_ENV *, u_int32_t));
+ int (*lock_id_set) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*lock_stat) __P((DB_ENV *, DB_LOCK_STAT **, u_int32_t));
+ int (*lock_vec) __P((DB_ENV *,
+ u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **));
+ int (*lock_downgrade) __P((DB_ENV *,
+ DB_LOCK *, db_lockmode_t, u_int32_t));
+
+ void *mp_handle; /* Mpool handle and methods. */
+ int (*set_mp_mmapsize) __P((DB_ENV *, size_t));
+ int (*set_cachesize) __P((DB_ENV *, u_int32_t, u_int32_t, int));
+ int (*memp_dump_region) __P((DB_ENV *, char *, FILE *));
+ int (*memp_fcreate) __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t));
+ int (*memp_nameop) __P((DB_ENV *,
+ u_int8_t *, const char *, const char *, const char *));
+ int (*memp_register) __P((DB_ENV *, int,
+ int (*)(DB_ENV *, db_pgno_t, void *, DBT *),
+ int (*)(DB_ENV *, db_pgno_t, void *, DBT *)));
+ int (*memp_stat) __P((DB_ENV *,
+ DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, u_int32_t));
+ int (*memp_sync) __P((DB_ENV *, DB_LSN *));
+ int (*memp_trickle) __P((DB_ENV *, int, int *));
+
+ void *rep_handle; /* Replication handle and methods. */
+ int (*rep_elect) __P((DB_ENV *, int, int, u_int32_t, int *));
+ int (*rep_flush) __P((DB_ENV *));
+ int (*rep_process_message) __P((DB_ENV *, DBT *, DBT *, int *));
+ int (*rep_start) __P((DB_ENV *, DBT *, u_int32_t));
+ int (*rep_stat) __P((DB_ENV *, DB_REP_STAT **, u_int32_t));
+ int (*set_rep_election) __P((DB_ENV *,
+ u_int32_t, u_int32_t, u_int32_t, u_int32_t));
+ int (*set_rep_limit) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*set_rep_request) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*set_rep_timeout) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*set_rep_transport) __P((DB_ENV *, int,
+ int (*) (DB_ENV *, const DBT *, const DBT *, int, u_int32_t)));
+
+ void *tx_handle; /* Txn handle and methods. */
+ int (*set_tx_max) __P((DB_ENV *, u_int32_t));
+ int (*set_tx_timestamp) __P((DB_ENV *, time_t *));
+ int (*txn_begin) __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
+ int (*txn_checkpoint) __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t));
+ int (*txn_id_set) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*txn_recover) __P((DB_ENV *,
+ DB_PREPLIST *, long, long *, u_int32_t));
+ int (*txn_stat) __P((DB_ENV *, DB_TXN_STAT **, u_int32_t));
+ int (*set_timeout) __P((DB_ENV *, db_timeout_t, u_int32_t));
+
+#define DB_TEST_ELECTINIT 1 /* after __rep_elect_init */
+#define DB_TEST_ELECTSEND 2 /* after REP_ELECT msgnit */
+#define DB_TEST_ELECTVOTE1 3 /* after __rep_send_vote 1 */
+#define DB_TEST_ELECTVOTE2 4 /* after __rep_wait */
+#define DB_TEST_ELECTWAIT1 5 /* after REP_VOTE2 */
+#define DB_TEST_ELECTWAIT2 6 /* after __rep_wait 2 */
+#define DB_TEST_PREDESTROY 7 /* before destroy op */
+#define DB_TEST_PREOPEN 8 /* before __os_open */
+#define DB_TEST_POSTDESTROY 9 /* after destroy op */
+#define DB_TEST_POSTLOG 10 /* after logging all pages */
+#define DB_TEST_POSTLOGMETA 11 /* after logging meta in btree */
+#define DB_TEST_POSTOPEN 12 /* after __os_open */
+#define DB_TEST_POSTSYNC 13 /* after syncing the log */
+#define DB_TEST_SUBDB_LOCKS 14 /* subdb locking tests */
+ int test_abort; /* Abort value for testing. */
+ int test_copy; /* Copy value for testing. */
+
+#define DB_ENV_AUTO_COMMIT 0x0000001 /* DB_AUTO_COMMIT. */
+#define DB_ENV_CDB 0x0000002 /* DB_INIT_CDB. */
+#define DB_ENV_CDB_ALLDB 0x0000004 /* CDB environment wide locking. */
+#define DB_ENV_CREATE 0x0000008 /* DB_CREATE set. */
+#define DB_ENV_DBLOCAL 0x0000010 /* DB_ENV allocated for private DB. */
+#define DB_ENV_DIRECT_DB 0x0000020 /* DB_DIRECT_DB set. */
+#define DB_ENV_DIRECT_LOG 0x0000040 /* DB_DIRECT_LOG set. */
+#define DB_ENV_FATAL 0x0000080 /* Doing fatal recovery in env. */
+#define DB_ENV_LOCKDOWN 0x0000100 /* DB_LOCKDOWN set. */
+#define DB_ENV_NOLOCKING 0x0000200 /* DB_NOLOCKING set. */
+#define DB_ENV_NOMMAP 0x0000400 /* DB_NOMMAP set. */
+#define DB_ENV_NOPANIC 0x0000800 /* Okay if panic set. */
+#define DB_ENV_OPEN_CALLED 0x0001000 /* DB_ENV->open called. */
+#define DB_ENV_OVERWRITE 0x0002000 /* DB_OVERWRITE set. */
+#define DB_ENV_PRIVATE 0x0004000 /* DB_PRIVATE set. */
+#define DB_ENV_REGION_INIT 0x0008000 /* DB_REGION_INIT set. */
+#define DB_ENV_REP_CLIENT 0x0010000 /* Replication client. */
+#define DB_ENV_REP_LOGSONLY 0x0020000 /* Log files only replication site. */
+#define DB_ENV_REP_MASTER 0x0040000 /* Replication master. */
+#define DB_ENV_RPCCLIENT 0x0080000 /* DB_CLIENT set. */
+#define DB_ENV_RPCCLIENT_GIVEN 0x0100000 /* User-supplied RPC client struct */
+#define DB_ENV_SYSTEM_MEM 0x0200000 /* DB_SYSTEM_MEM set. */
+#define DB_ENV_THREAD 0x0400000 /* DB_THREAD set. */
+#define DB_ENV_TXN_NOSYNC 0x0800000 /* DB_TXN_NOSYNC set. */
+#define DB_ENV_TXN_WRITE_NOSYNC 0x1000000 /* DB_TXN_WRITE_NOSYNC set. */
+#define DB_ENV_YIELDCPU 0x2000000 /* DB_YIELDCPU set. */
+ u_int32_t flags;
+};
+
+#ifndef DB_DBM_HSEARCH
+#define DB_DBM_HSEARCH 0 /* No historic interfaces by default. */
+#endif
+#if DB_DBM_HSEARCH != 0
+/*******************************************************
+ * Dbm/Ndbm historic interfaces.
+ *******************************************************/
+typedef struct __db DBM;
+
+#define DBM_INSERT 0 /* Flags to dbm_store(). */
+#define DBM_REPLACE 1
+
+/*
+ * The DB support for ndbm(3) always appends this suffix to the
+ * file name to avoid overwriting the user's original database.
+ */
+#define DBM_SUFFIX ".db"
+
+#if defined(_XPG4_2)
+typedef struct {
+ char *dptr;
+ size_t dsize;
+} datum;
+#else
+typedef struct {
+ char *dptr;
+ int dsize;
+} datum;
+#endif
+
+/*
+ * Translate NDBM calls into DB calls so that DB doesn't step on the
+ * application's name space.
+ */
+#define dbm_clearerr(a) __db_ndbm_clearerr(a)
+#define dbm_close(a) __db_ndbm_close(a)
+#define dbm_delete(a, b) __db_ndbm_delete(a, b)
+#define dbm_dirfno(a) __db_ndbm_dirfno(a)
+#define dbm_error(a) __db_ndbm_error(a)
+#define dbm_fetch(a, b) __db_ndbm_fetch(a, b)
+#define dbm_firstkey(a) __db_ndbm_firstkey(a)
+#define dbm_nextkey(a) __db_ndbm_nextkey(a)
+#define dbm_open(a, b, c) __db_ndbm_open(a, b, c)
+#define dbm_pagfno(a) __db_ndbm_pagfno(a)
+#define dbm_rdonly(a) __db_ndbm_rdonly(a)
+#define dbm_store(a, b, c, d) \
+ __db_ndbm_store(a, b, c, d)
+
+/*
+ * Translate DBM calls into DB calls so that DB doesn't step on the
+ * application's name space.
+ *
+ * The global variables dbrdonly, dirf and pagf were not retained when 4BSD
+ * replaced the dbm interface with ndbm, and are not supported here.
+ */
+#define dbminit(a) __db_dbm_init(a)
+#define dbmclose __db_dbm_close
+#if !defined(__cplusplus)
+#define delete(a) __db_dbm_delete(a)
+#endif
+#define fetch(a) __db_dbm_fetch(a)
+#define firstkey __db_dbm_firstkey
+#define nextkey(a) __db_dbm_nextkey(a)
+#define store(a, b) __db_dbm_store(a, b)
+
+/*******************************************************
+ * Hsearch historic interface.
+ *******************************************************/
+typedef enum {
+ FIND, ENTER
+} ACTION;
+
+typedef struct entry {
+ char *key;
+ char *data;
+} ENTRY;
+
+#define hcreate(a) __db_hcreate(a)
+#define hdestroy __db_hdestroy
+#define hsearch(a, b) __db_hsearch(a, b)
+
+#endif /* DB_DBM_HSEARCH */
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_DB_H_ */
+
+/* DO NOT EDIT: automatically built by dist/s_rpc. */
+#define DB_RPC_SERVERPROG ((unsigned long)(351457))
+#define DB_RPC_SERVERVERS ((unsigned long)(4001))
+
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _DB_EXT_PROT_IN_
+#define _DB_EXT_PROT_IN_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int db_create __P((DB **, DB_ENV *, u_int32_t));
+char *db_strerror __P((int));
+int db_env_create __P((DB_ENV **, u_int32_t));
+char *db_version __P((int *, int *, int *));
+int log_compare __P((const DB_LSN *, const DB_LSN *));
+int db_env_set_func_close __P((int (*)(int)));
+int db_env_set_func_dirfree __P((void (*)(char **, int)));
+int db_env_set_func_dirlist __P((int (*)(const char *, char ***, int *)));
+int db_env_set_func_exists __P((int (*)(const char *, int *)));
+int db_env_set_func_free __P((void (*)(void *)));
+int db_env_set_func_fsync __P((int (*)(int)));
+int db_env_set_func_ioinfo __P((int (*)(const char *, int, u_int32_t *, u_int32_t *, u_int32_t *)));
+int db_env_set_func_malloc __P((void *(*)(size_t)));
+int db_env_set_func_map __P((int (*)(char *, size_t, int, int, void **)));
+int db_env_set_func_open __P((int (*)(const char *, int, ...)));
+int db_env_set_func_read __P((ssize_t (*)(int, void *, size_t)));
+int db_env_set_func_realloc __P((void *(*)(void *, size_t)));
+int db_env_set_func_rename __P((int (*)(const char *, const char *)));
+int db_env_set_func_seek __P((int (*)(int, size_t, db_pgno_t, u_int32_t, int, int)));
+int db_env_set_func_sleep __P((int (*)(u_long, u_long)));
+int db_env_set_func_unlink __P((int (*)(const char *)));
+int db_env_set_func_unmap __P((int (*)(void *, size_t)));
+int db_env_set_func_write __P((ssize_t (*)(int, const void *, size_t)));
+int db_env_set_func_yield __P((int (*)(void)));
+int txn_abort __P((DB_TXN *));
+int txn_begin __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
+int txn_commit __P((DB_TXN *, u_int32_t));
+#if DB_DBM_HSEARCH != 0
+int __db_ndbm_clearerr __P((DBM *));
+void __db_ndbm_close __P((DBM *));
+int __db_ndbm_delete __P((DBM *, datum));
+int __db_ndbm_dirfno __P((DBM *));
+int __db_ndbm_error __P((DBM *));
+datum __db_ndbm_fetch __P((DBM *, datum));
+datum __db_ndbm_firstkey __P((DBM *));
+datum __db_ndbm_nextkey __P((DBM *));
+DBM *__db_ndbm_open __P((const char *, int, int));
+int __db_ndbm_pagfno __P((DBM *));
+int __db_ndbm_rdonly __P((DBM *));
+int __db_ndbm_store __P((DBM *, datum, datum, int));
+int __db_dbm_close __P((void));
+int __db_dbm_dbrdonly __P((void));
+int __db_dbm_delete __P((datum));
+int __db_dbm_dirf __P((void));
+datum __db_dbm_fetch __P((datum));
+datum __db_dbm_firstkey __P((void));
+int __db_dbm_init __P((char *));
+datum __db_dbm_nextkey __P((datum));
+int __db_dbm_pagf __P((void));
+int __db_dbm_store __P((datum, datum));
+#endif
+#if DB_DBM_HSEARCH != 0
+int __db_hcreate __P((size_t));
+ENTRY *__db_hsearch __P((ENTRY, ACTION));
+void __db_hdestroy __P((void));
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_DB_EXT_PROT_IN_ */
diff --git a/libdb/build_win32/db_archive.dsp b/libdb/build_win32/db_archive.dsp
new file mode 100644
index 0000000..1f793ac
--- /dev/null
+++ b/libdb/build_win32/db_archive.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="db_archive" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_archive - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_archive.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_archive.mak" CFG="db_archive - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_archive - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_archive - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_archive - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_archive - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_archive - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_archive - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_archive - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_archive - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_archive - Win32 Release"
+# Name "db_archive - Win32 Debug"
+# Name "db_archive - Win32 Release Static"
+# Name "db_archive - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\db_archive\db_archive.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/db_checkpoint.dsp b/libdb/build_win32/db_checkpoint.dsp
new file mode 100644
index 0000000..d85675d
--- /dev/null
+++ b/libdb/build_win32/db_checkpoint.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="db_checkpoint" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_checkpoint - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_checkpoint.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_checkpoint.mak" CFG="db_checkpoint - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_checkpoint - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_checkpoint - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_checkpoint - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_checkpoint - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_checkpoint - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_checkpoint - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_checkpoint - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_checkpoint - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_checkpoint - Win32 Release"
+# Name "db_checkpoint - Win32 Debug"
+# Name "db_checkpoint - Win32 Release Static"
+# Name "db_checkpoint - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\db_checkpoint\db_checkpoint.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/db_config.h b/libdb/build_win32/db_config.h
new file mode 100644
index 0000000..497bf37
--- /dev/null
+++ b/libdb/build_win32/db_config.h
@@ -0,0 +1,440 @@
+/* DO NOT EDIT: automatically built by dist/s_win32. */
+/* Define to 1 if you want to build a version for running the test suite. */
+/* #undef CONFIG_TEST */
+
+/* Define to 1 if you want a debugging version. */
+/* #undef DEBUG */
+#if defined(_DEBUG)
+#if !defined(DEBUG)
+#define DEBUG 1
+#endif
+#endif
+
+/* Define to 1 if you want a version that logs read operations. */
+/* #undef DEBUG_ROP */
+
+/* Define to 1 if you want a version that logs write operations. */
+/* #undef DEBUG_WOP */
+
+/* Define to 1 if you want a version with run-time diagnostic checking. */
+/* #undef DIAGNOSTIC */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+/* #undef HAVE_CLOCK_GETTIME */
+
+/* Define to 1 if Berkeley DB release includes strong cryptography. */
+/* #undef HAVE_CRYPTO */
+
+/* Define to 1 if you have the `directio' function. */
+/* #undef HAVE_DIRECTIO */
+
+/* Define to 1 if you have the <dirent.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_DIRENT_H */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+/* #undef HAVE_DLFCN_H */
+
+/* Define to 1 if you have EXIT_SUCCESS/EXIT_FAILURE #defines. */
+#define HAVE_EXIT_SUCCESS 1
+
+/* Define to 1 if fcntl/F_SETFD denies child access to file descriptors. */
+/* #undef HAVE_FCNTL_F_SETFD */
+
+/* Define to 1 if allocated filesystem blocks are not zeroed. */
+#define HAVE_FILESYSTEM_NOTZERO 1
+
+/* Define to 1 if you have the `getcwd' function. */
+#define HAVE_GETCWD 1
+
+/* Define to 1 if you have the `getopt' function. */
+/* #undef HAVE_GETOPT */
+
+/* Define to 1 if you have the `gettimeofday' function. */
+/* #undef HAVE_GETTIMEOFDAY */
+
+/* Define to 1 if you have the `getuid' function. */
+/* #undef HAVE_GETUID */
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+/* #undef HAVE_INTTYPES_H */
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+/* #undef HAVE_LIBNSL */
+
+/* Define to 1 if you have the `memcmp' function. */
+#define HAVE_MEMCMP 1
+
+/* Define to 1 if you have the `memcpy' function. */
+#define HAVE_MEMCPY 1
+
+/* Define to 1 if you have the `memmove' function. */
+#define HAVE_MEMMOVE 1
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the `mlock' function. */
+/* #undef HAVE_MLOCK */
+
+/* Define to 1 if you have the `mmap' function. */
+/* #undef HAVE_MMAP */
+
+/* Define to 1 if you have the `munlock' function. */
+/* #undef HAVE_MUNLOCK */
+
+/* Define to 1 if you have the `munmap' function. */
+/* #undef HAVE_MUNMAP */
+
+/* Define to 1 to use the GCC compiler and 68K assembly language mutexes. */
+/* #undef HAVE_MUTEX_68K_GCC_ASSEMBLY */
+
+/* Define to 1 to use the AIX _check_lock mutexes. */
+/* #undef HAVE_MUTEX_AIX_CHECK_LOCK */
+
+/* Define to 1 to use the GCC compiler and Alpha assembly language mutexes. */
+/* #undef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and ARM assembly language mutexes. */
+/* #undef HAVE_MUTEX_ARM_GCC_ASSEMBLY */
+
+/* Define to 1 to use the UNIX fcntl system call mutexes. */
+/* #undef HAVE_MUTEX_FCNTL */
+
+/* Define to 1 to use the GCC compiler and PaRisc assembly language mutexes.
+ */
+/* #undef HAVE_MUTEX_HPPA_GCC_ASSEMBLY */
+
+/* Define to 1 to use the msem_XXX mutexes on HP-UX. */
+/* #undef HAVE_MUTEX_HPPA_MSEM_INIT */
+
+/* Define to 1 to use the GCC compiler and IA64 assembly language mutexes. */
+/* #undef HAVE_MUTEX_IA64_GCC_ASSEMBLY */
+
+/* Define to 1 to use the msem_XXX mutexes on systems other than HP-UX. */
+/* #undef HAVE_MUTEX_MSEM_INIT */
+
+/* Define to 1 to use the GCC compiler and Apple PowerPC assembly language. */
+/* #undef HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and generic PowerPC assembly language.
+ */
+/* #undef HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY */
+
+/* Define to 1 to use POSIX 1003.1 pthread_XXX mutexes. */
+/* #undef HAVE_MUTEX_PTHREADS */
+
+/* Define to 1 to use Reliant UNIX initspin mutexes. */
+/* #undef HAVE_MUTEX_RELIANTUNIX_INITSPIN */
+
+/* Define to 1 to use the GCC compiler and S/390 assembly language mutexes. */
+/* #undef HAVE_MUTEX_S390_GCC_ASSEMBLY */
+
+/* Define to 1 to use the SCO compiler and x86 assembly language mutexes. */
+/* #undef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY */
+
+/* Define to 1 to use the obsolete POSIX 1003.1 sema_XXX mutexes. */
+/* #undef HAVE_MUTEX_SEMA_INIT */
+
+/* Define to 1 to use the SGI XXX_lock mutexes. */
+/* #undef HAVE_MUTEX_SGI_INIT_LOCK */
+
+/* Define to 1 to use the Solaris _lock_XXX mutexes. */
+/* #undef HAVE_MUTEX_SOLARIS_LOCK_TRY */
+
+/* Define to 1 to use the Solaris lwp threads mutexes. */
+/* #undef HAVE_MUTEX_SOLARIS_LWP */
+
+/* Define to 1 to use the GCC compiler and Sparc assembly language mutexes. */
+/* #undef HAVE_MUTEX_SPARC_GCC_ASSEMBLY */
+
+/* Define to 1 if mutexes hold system resources. */
+/* #undef HAVE_MUTEX_SYSTEM_RESOURCES */
+
+/* Define to 1 if fast mutexes are available. */
+#define HAVE_MUTEX_THREADS 1
+
+/* Define to 1 to configure mutexes intra-process only. */
+/* #undef HAVE_MUTEX_THREAD_ONLY */
+
+/* Define to 1 to use the UNIX International mutexes. */
+/* #undef HAVE_MUTEX_UI_THREADS */
+
+/* Define to 1 to use the UTS compiler and assembly language mutexes. */
+/* #undef HAVE_MUTEX_UTS_CC_ASSEMBLY */
+
+/* Define to 1 to use VMS mutexes. */
+/* #undef HAVE_MUTEX_VMS */
+
+/* Define to 1 to use VxWorks mutexes. */
+/* #undef HAVE_MUTEX_VXWORKS */
+
+/* Define to 1 to use Windows mutexes. */
+#define HAVE_MUTEX_WIN32 1
+
+/* Define to 1 to use the GCC compiler and x86 assembly language mutexes. */
+/* #undef HAVE_MUTEX_X86_GCC_ASSEMBLY */
+
+/* Define to 1 if you have the <ndir.h> header file, and it defines `DIR'. */
+/* #undef HAVE_NDIR_H */
+
+/* Define to 1 if you have the O_DIRECT flag. */
+/* #undef HAVE_O_DIRECT */
+
+/* Define to 1 if you have the `pread' function. */
+/* #undef HAVE_PREAD */
+
+/* Define to 1 if you have the `pstat_getdynamic' function. */
+/* #undef HAVE_PSTAT_GETDYNAMIC */
+
+/* Define to 1 if you have the `pwrite' function. */
+/* #undef HAVE_PWRITE */
+
+/* Define to 1 if building on QNX. */
+/* #undef HAVE_QNX */
+
+/* Define to 1 if you have the `qsort' function. */
+#define HAVE_QSORT 1
+
+/* Define to 1 if you have the `raise' function. */
+#define HAVE_RAISE 1
+
+/* Define to 1 if building RPC client/server. */
+/* #undef HAVE_RPC */
+
+/* Define to 1 if you have the `sched_yield' function. */
+/* #undef HAVE_SCHED_YIELD */
+
+/* Define to 1 if you have the `select' function. */
+/* #undef HAVE_SELECT */
+
+/* Define to 1 if you have the `shmget' function. */
+/* #undef HAVE_SHMGET */
+
+/* Define to 1 if you have the `snprintf' function. */
+#define HAVE_SNPRINTF 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+/* #undef HAVE_STDINT_H */
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the `strcasecmp' function. */
+/* #undef HAVE_STRCASECMP */
+
+/* Define to 1 if you have the `strdup' function. */
+#define HAVE_STRDUP 1
+
+/* Define to 1 if you have the `strerror' function. */
+#define HAVE_STRERROR 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strtoul' function. */
+#define HAVE_STRTOUL 1
+
+/* Define to 1 if `st_blksize' is member of `struct stat'. */
+/* #undef HAVE_STRUCT_STAT_ST_BLKSIZE */
+
+/* Define to 1 if you have the `sysconf' function. */
+/* #undef HAVE_SYSCONF */
+
+/* Define to 1 if you have the <sys/dir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_DIR_H */
+
+/* Define to 1 if you have the <sys/ndir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_NDIR_H */
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+/* #undef HAVE_SYS_SELECT_H */
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+/* #undef HAVE_SYS_TIME_H */
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+/* #undef HAVE_UNISTD_H */
+
+/* Define to 1 if unlink of file with open file descriptors will fail. */
+/* #undef HAVE_UNLINK_WITH_OPEN_FAILURE */
+
+/* Define to 1 if you have the `vsnprintf' function. */
+#define HAVE_VSNPRINTF 1
+
+/* Define to 1 if building VxWorks. */
+/* #undef HAVE_VXWORKS */
+
+/* Define to 1 if you have the `yield' function. */
+/* #undef HAVE_YIELD */
+
+/* Define to 1 if you have the `_fstati64' function. */
+#define HAVE__FSTATI64 1
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "support@sleepycat.com"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "Berkeley DB"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "Berkeley DB 4.1.25"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "db-4.1.25"
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "4.1.25"
+
+/* Define to 1 if the `S_IS*' macros in <sys/stat.h> do not work properly. */
+/* #undef STAT_MACROS_BROKEN */
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+/* #undef TIME_WITH_SYS_TIME */
+
+/* Define to 1 to mask harmless unitialized memory read/writes. */
+/* #undef UMRW */
+
+/* Number of bits in a file offset, on hosts where this is settable. */
+/* #undef _FILE_OFFSET_BITS */
+
+/* Define for large files, on AIX-style hosts. */
+/* #undef _LARGE_FILES */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef const */
+
+/*
+ * Exit success/failure macros.
+ */
+#ifndef HAVE_EXIT_SUCCESS
+#define EXIT_FAILURE 1
+#define EXIT_SUCCESS 0
+#endif
+
+/*
+ * Don't step on the namespace. Other libraries may have their own
+ * implementations of these functions, we don't want to use their
+ * implementations or force them to use ours based on the load order.
+ */
+#ifndef HAVE_GETCWD
+#define getcwd __db_Cgetcwd
+#endif
+#ifndef HAVE_MEMCMP
+#define memcmp __db_Cmemcmp
+#endif
+#ifndef HAVE_MEMCPY
+#define memcpy __db_Cmemcpy
+#endif
+#ifndef HAVE_MEMMOVE
+#define memmove __db_Cmemmove
+#endif
+#ifndef HAVE_RAISE
+#define raise __db_Craise
+#endif
+#ifndef HAVE_SNPRINTF
+#define snprintf __db_Csnprintf
+#endif
+#ifndef HAVE_STRCASECMP
+#define strcasecmp __db_Cstrcasecmp
+#define strncasecmp __db_Cstrncasecmp
+#endif
+#ifndef HAVE_STRERROR
+#define strerror __db_Cstrerror
+#endif
+#ifndef HAVE_VSNPRINTF
+#define vsnprintf __db_Cvsnprintf
+#endif
+
+/*
+ * XXX
+ * The following is not part of the automatic configuration setup, but
+ * provides the information necessary to build Berkeley DB on Windows.
+ */
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <direct.h>
+#include <fcntl.h>
+#include <io.h>
+#include <limits.h>
+#include <memory.h>
+#include <process.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <errno.h>
+
+/*
+ * To build Tcl interface libraries, the include path must be configured to
+ * use the directory containing <tcl.h>, usually the include directory in
+ * the Tcl distribution.
+ */
+#ifdef DB_TCL_SUPPORT
+#include <tcl.h>
+#endif
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+/*
+ * All of the necessary includes have been included, ignore the #includes
+ * in the Berkeley DB source files.
+ */
+#define NO_SYSTEM_INCLUDES
+
+/*
+ * Win32 has getcwd, snprintf and vsnprintf, but under different names.
+ */
+#define getcwd(buf, size) _getcwd(buf, size)
+#define snprintf _snprintf
+#define vsnprintf _vsnprintf
+
+/*
+ * Win32 does not define getopt and friends in any header file, so we must.
+ */
+#if defined(__cplusplus)
+extern "C" {
+#endif
+extern int optind;
+extern char *optarg;
+extern int getopt(int, char * const *, const char *);
+#if defined(__cplusplus)
+}
+#endif
+
+/*
+ * We use DB_WIN32 much as one would use _WIN32, to determine that we're
+ * using an operating system environment that supports Win32 calls
+ * and semantics. We don't use _WIN32 because cygwin/gcc also defines
+ * that, even though it closely emulates the Unix environment.
+ */
+#define DB_WIN32 1
+
+/*
+ * This is a grievous hack -- once we've included windows.h, we have no choice
+ * but to use ANSI-style varargs (because it pulls in stdarg.h for us). DB's
+ * code decides which type of varargs to use based on the state of __STDC__.
+ * Sensible. Unfortunately, Microsoft's compiler _doesn't_ define __STDC__
+ * unless you invoke it with arguments turning OFF all vendor extensions. Even
+ * more unfortunately, if we do that, it fails to parse windows.h!!!!! So, we
+ * define __STDC__ here, after windows.h comes in. Note: the compiler knows
+ * we've defined it, and starts enforcing strict ANSI compilance from this point
+ * on.
+ */
+#define __STDC__ 1
diff --git a/libdb/build_win32/db_cxx.h b/libdb/build_win32/db_cxx.h
new file mode 100644
index 0000000..4929362
--- /dev/null
+++ b/libdb/build_win32/db_cxx.h
@@ -0,0 +1,796 @@
+/* DO NOT EDIT: automatically built by dist/s_win32. */
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_CXX_H_
+#define _DB_CXX_H_
+//
+// C++ assumptions:
+//
+// To ensure portability to many platforms, both new and old, we make
+// few assumptions about the C++ compiler and library. For example,
+// we do not expect STL, templates or namespaces to be available. The
+// "newest" C++ feature used is exceptions, which are used liberally
+// to transmit error information. Even the use of exceptions can be
+// disabled at runtime, to do so, use the DB_CXX_NO_EXCEPTIONS flags
+// with the DbEnv or Db constructor.
+//
+// C++ naming conventions:
+//
+// - All top level class names start with Db.
+// - All class members start with lower case letter.
+// - All private data members are suffixed with underscore.
+// - Use underscores to divide names into multiple words.
+// - Simple data accessors are named with get_ or set_ prefix.
+// - All method names are taken from names of functions in the C
+// layer of db (usually by dropping a prefix like "db_").
+// These methods have the same argument types and order,
+// other than dropping the explicit arg that acts as "this".
+//
+// As a rule, each DbFoo object has exactly one underlying DB_FOO struct
+// (defined in db.h) associated with it. In some cases, we inherit directly
+// from the DB_FOO structure to make this relationship explicit. Often,
+// the underlying C layer allocates and deallocates these structures, so
+// there is no easy way to add any data to the DbFoo class. When you see
+// a comment about whether data is permitted to be added, this is what
+// is going on. Of course, if we need to add data to such C++ classes
+// in the future, we will arrange to have an indirect pointer to the
+// DB_FOO struct (as some of the classes already have).
+//
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Forward declarations
+//
+
+#include <stdarg.h>
+
+#define HAVE_CXX_STDHEADERS 1
+#ifdef HAVE_CXX_STDHEADERS
+#include <iostream>
+#define __DB_OSTREAMCLASS std::ostream
+#else
+#include <iostream.h>
+#define __DB_OSTREAMCLASS ostream
+#endif
+
+#include "db.h"
+#include "cxx_common.h"
+#include "cxx_except.h"
+
+class Db; // forward
+class Dbc; // forward
+class DbEnv; // forward
+class DbInfo; // forward
+class DbLock; // forward
+class DbLogc; // forward
+class DbLsn; // forward
+class DbMpoolFile; // forward
+class DbPreplist; // forward
+class Dbt; // forward
+class DbTxn; // forward
+
+// These classes are not defined here and should be invisible
+// to the user, but some compilers require forward references.
+// There is one for each use of the DEFINE_DB_CLASS macro.
+
+class DbImp;
+class DbEnvImp;
+class DbMpoolFileImp;
+class DbTxnImp;
+
+// DEFINE_DB_CLASS defines an imp_ data member and imp() accessor.
+// The underlying type is a pointer to an opaque *Imp class, that
+// gets converted to the correct implementation class by the implementation.
+//
+// Since these defines use "private/public" labels, and leave the access
+// being "private", we always use these by convention before any data
+// members in the private section of a class. Keeping them in the
+// private section also emphasizes that they are off limits to user code.
+//
+#define DEFINE_DB_CLASS(name) \
+ public: class name##Imp* imp() { return (imp_); } \
+ public: const class name##Imp* constimp() const { return (imp_); } \
+ private: class name##Imp* imp_
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Turn off inappropriate compiler warnings
+//
+
+#ifdef _MSC_VER
+
+// These are level 4 warnings that are explicitly disabled.
+// With Visual C++, by default you do not see above level 3 unless
+// you use /W4. But we like to compile with the highest level
+// warnings to catch other errors.
+//
+// 4201: nameless struct/union
+// triggered by standard include file <winnt.h>
+//
+// 4514: unreferenced inline function has been removed
+// certain include files in MSVC define methods that are not called
+//
+#pragma warning(disable: 4201 4514)
+
+#endif
+
+// Some interfaces can be customized by allowing users to define
+// callback functions. For performance and logistical reasons, some
+// callback functions must be declared in extern "C" blocks. For others,
+// we allow you to declare the callbacks in C++ or C (or an extern "C"
+// block) as you wish. See the set methods for the callbacks for
+// the choices.
+//
+extern "C" {
+ typedef void * (*db_malloc_fcn_type)
+ (size_t);
+ typedef void * (*db_realloc_fcn_type)
+ (void *, size_t);
+ typedef void (*db_free_fcn_type)
+ (void *);
+ typedef int (*bt_compare_fcn_type) /*C++ version available*/
+ (DB *, const DBT *, const DBT *);
+ typedef size_t (*bt_prefix_fcn_type) /*C++ version available*/
+ (DB *, const DBT *, const DBT *);
+ typedef int (*dup_compare_fcn_type) /*C++ version available*/
+ (DB *, const DBT *, const DBT *);
+ typedef u_int32_t (*h_hash_fcn_type) /*C++ version available*/
+ (DB *, const void *, u_int32_t);
+ typedef int (*pgin_fcn_type)
+ (DB_ENV *dbenv, db_pgno_t pgno, void *pgaddr, DBT *pgcookie);
+ typedef int (*pgout_fcn_type)
+ (DB_ENV *dbenv, db_pgno_t pgno, void *pgaddr, DBT *pgcookie);
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Lock classes
+//
+
+class _exported DbLock
+{
+ friend class DbEnv;
+
+public:
+ DbLock();
+ DbLock(const DbLock &);
+ DbLock &operator = (const DbLock &);
+
+protected:
+ // We can add data to this class if needed
+ // since its contained class is not allocated by db.
+ // (see comment at top)
+
+ DbLock(DB_LOCK);
+ DB_LOCK lock_;
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Log classes
+//
+
+class _exported DbLsn : protected DB_LSN
+{
+ friend class DbEnv; // friendship needed to cast to base class
+ friend class DbLogc; // friendship needed to cast to base class
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Memory pool classes
+//
+
+class _exported DbMpoolFile
+{
+ friend class DbEnv;
+
+private:
+ // Put this first to allow inlining with some C++ compilers (g++-2.95)
+ DEFINE_DB_CLASS(DbMpoolFile);
+
+public:
+ int close(u_int32_t flags);
+ int get(db_pgno_t *pgnoaddr, u_int32_t flags, void *pagep);
+ void last_pgno(db_pgno_t *pgnoaddr);
+ int open(const char *file, u_int32_t flags, int mode, size_t pagesize);
+ int put(void *pgaddr, u_int32_t flags);
+ void refcnt(db_pgno_t *pgnoaddr);
+ int set(void *pgaddr, u_int32_t flags);
+ int set_clear_len(u_int32_t len);
+ int set_fileid(u_int8_t *fileid);
+ int set_ftype(int ftype);
+ int set_lsn_offset(int32_t offset);
+ int set_pgcookie(DBT *dbt);
+ void set_unlink(int);
+ int sync();
+
+ virtual DB_MPOOLFILE *get_DB_MPOOLFILE()
+ {
+ return (DB_MPOOLFILE *)imp();
+ }
+
+ virtual const DB_MPOOLFILE *get_const_DB_MPOOLFILE() const
+ {
+ return (const DB_MPOOLFILE *)constimp();
+ }
+
+private:
+ // We can add data to this class if needed
+ // since it is implemented via a pointer.
+ // (see comment at top)
+
+ // Note: use DbEnv::memp_fcreate() to get pointers to a DbMpoolFile,
+ // and call DbMpoolFile::close() rather than delete to release them.
+ //
+ DbMpoolFile();
+
+ // Shut g++ up.
+protected:
+ virtual ~DbMpoolFile();
+
+private:
+ // no copying
+ DbMpoolFile(const DbMpoolFile &);
+ void operator = (const DbMpoolFile &);
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// This is filled in and returned by the DbEnv::txn_recover() method.
+//
+
+class _exported DbPreplist
+{
+public:
+ DbTxn *txn;
+ u_int8_t gid[DB_XIDDATASIZE];
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Transaction classes
+//
+
+class _exported DbTxn
+{
+ friend class DbEnv;
+
+private:
+ // Put this first to allow inlining with some C++ compilers (g++-2.95)
+ DEFINE_DB_CLASS(DbTxn);
+
+public:
+ int abort();
+ int commit(u_int32_t flags);
+ int discard(u_int32_t flags);
+ u_int32_t id();
+ int prepare(u_int8_t *gid);
+ int set_timeout(db_timeout_t timeout, u_int32_t flags);
+
+ virtual DB_TXN *get_DB_TXN()
+ {
+ return (DB_TXN *)imp();
+ }
+
+ virtual const DB_TXN *get_const_DB_TXN() const
+ {
+ return (const DB_TXN *)constimp();
+ }
+
+ static DbTxn* get_DbTxn(DB_TXN *txn)
+ {
+ return (DbTxn *)txn->api_internal;
+ }
+
+ static const DbTxn* get_const_DbTxn(const DB_TXN *txn)
+ {
+ return (const DbTxn *)txn->api_internal;
+ }
+
+ // For internal use only.
+ static DbTxn* wrap_DB_TXN(DB_TXN *txn);
+
+private:
+ // We can add data to this class if needed
+ // since it is implemented via a pointer.
+ // (see comment at top)
+
+ // Note: use DbEnv::txn_begin() to get pointers to a DbTxn,
+ // and call DbTxn::abort() or DbTxn::commit rather than
+ // delete to release them.
+ //
+ DbTxn();
+ // For internal use only.
+ DbTxn(DB_TXN *txn);
+ virtual ~DbTxn();
+
+ // no copying
+ DbTxn(const DbTxn &);
+ void operator = (const DbTxn &);
+};
+
+//
+// Berkeley DB environment class. Provides functions for opening databases.
+// User of this library can use this class as a starting point for
+// developing a DB application - derive their application class from
+// this one, add application control logic.
+//
+// Note that if you use the default constructor, you must explicitly
+// call appinit() before any other db activity (e.g. opening files)
+//
+class _exported DbEnv
+{
+ friend class Db;
+ friend class DbLock;
+ friend class DbMpoolFile;
+
+private:
+ // Put this first to allow inlining with some C++ compilers (g++-2.95)
+ DEFINE_DB_CLASS(DbEnv);
+
+public:
+ // After using this constructor, you can set any needed
+ // parameters for the environment using the set_* methods.
+ // Then call open() to finish initializing the environment
+ // and attaching it to underlying files.
+ //
+ DbEnv(u_int32_t flags);
+
+ virtual ~DbEnv();
+
+ // These methods match those in the C interface.
+ //
+ virtual int close(u_int32_t);
+ virtual int dbremove(DbTxn *txn, const char *name, const char *subdb,
+ u_int32_t flags);
+ virtual int dbrename(DbTxn *txn, const char *name, const char *subdb,
+ const char *newname, u_int32_t flags);
+ virtual void err(int, const char *, ...);
+ virtual void errx(const char *, ...);
+ virtual void *get_app_private() const;
+ virtual int open(const char *, u_int32_t, int);
+ virtual int remove(const char *, u_int32_t);
+ virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type,
+ db_free_fcn_type);
+ virtual void set_app_private(void *);
+ virtual int set_cachesize(u_int32_t, u_int32_t, int);
+ virtual int set_data_dir(const char *);
+ virtual int set_encrypt(const char *, int);
+ virtual void set_errcall(void (*)(const char *, char *));
+ virtual void set_errfile(FILE *);
+ virtual void set_errpfx(const char *);
+ virtual int set_flags(u_int32_t, int);
+ virtual int set_feedback(void (*)(DbEnv *, int, int));
+ virtual int set_lg_bsize(u_int32_t);
+ virtual int set_lg_dir(const char *);
+ virtual int set_lg_max(u_int32_t);
+ virtual int set_lg_regionmax(u_int32_t);
+ virtual int set_lk_conflicts(u_int8_t *, int);
+ virtual int set_lk_detect(u_int32_t);
+ virtual int set_lk_max(u_int32_t);
+ virtual int set_lk_max_lockers(u_int32_t);
+ virtual int set_lk_max_locks(u_int32_t);
+ virtual int set_lk_max_objects(u_int32_t);
+ virtual int set_mp_mmapsize(size_t);
+ virtual int set_paniccall(void (*)(DbEnv *, int));
+ virtual int set_rpc_server(void *, char *, long, long, u_int32_t);
+ virtual int set_shm_key(long);
+ virtual int set_timeout(db_timeout_t timeout, u_int32_t flags);
+ virtual int set_tmp_dir(const char *);
+ virtual int set_tas_spins(u_int32_t);
+ virtual int set_tx_max(u_int32_t);
+ virtual int set_app_dispatch(int (*)(DbEnv *,
+ Dbt *, DbLsn *, db_recops));
+ virtual int set_tx_timestamp(time_t *);
+ virtual int set_verbose(u_int32_t which, int onoff);
+
+ // Version information. A static method so it can be obtained anytime.
+ //
+ static char *version(int *major, int *minor, int *patch);
+
+ // Convert DB errors to strings
+ static char *strerror(int);
+
+ // If an error is detected and the error call function
+ // or stream is set, a message is dispatched or printed.
+ // If a prefix is set, each message is prefixed.
+ //
+ // You can use set_errcall() or set_errfile() above to control
+ // error functionality. Alternatively, you can call
+ // set_error_stream() to force all errors to a C++ stream.
+ // It is unwise to mix these approaches.
+ //
+ virtual void set_error_stream(__DB_OSTREAMCLASS *);
+
+ // used internally
+ static void runtime_error(const char *caller, int err,
+ int error_policy);
+ static void runtime_error_dbt(const char *caller, Dbt *dbt,
+ int error_policy);
+ static void runtime_error_lock_get(const char *caller, int err,
+ db_lockop_t op, db_lockmode_t mode,
+ const Dbt *obj, DbLock lock, int index,
+ int error_policy);
+
+ // Lock functions
+ //
+ virtual int lock_detect(u_int32_t flags, u_int32_t atype, int *aborted);
+ virtual int lock_get(u_int32_t locker, u_int32_t flags, const Dbt *obj,
+ db_lockmode_t lock_mode, DbLock *lock);
+ virtual int lock_id(u_int32_t *idp);
+ virtual int lock_id_free(u_int32_t id);
+ virtual int lock_put(DbLock *lock);
+ virtual int lock_stat(DB_LOCK_STAT **statp, u_int32_t flags);
+ virtual int lock_vec(u_int32_t locker, u_int32_t flags, DB_LOCKREQ list[],
+ int nlist, DB_LOCKREQ **elistp);
+
+ // Log functions
+ //
+ virtual int log_archive(char **list[], u_int32_t flags);
+ static int log_compare(const DbLsn *lsn0, const DbLsn *lsn1);
+ virtual int log_cursor(DbLogc **cursorp, u_int32_t flags);
+ virtual int log_file(DbLsn *lsn, char *namep, size_t len);
+ virtual int log_flush(const DbLsn *lsn);
+ virtual int log_put(DbLsn *lsn, const Dbt *data, u_int32_t flags);
+
+ virtual int log_stat(DB_LOG_STAT **spp, u_int32_t flags);
+
+ // Mpool functions
+ //
+ virtual int memp_fcreate(DbMpoolFile **dbmfp, u_int32_t flags);
+ virtual int memp_register(int ftype,
+ pgin_fcn_type pgin_fcn,
+ pgout_fcn_type pgout_fcn);
+ virtual int memp_stat(DB_MPOOL_STAT
+ **gsp, DB_MPOOL_FSTAT ***fsp, u_int32_t flags);
+ virtual int memp_sync(DbLsn *lsn);
+ virtual int memp_trickle(int pct, int *nwrotep);
+
+ // Transaction functions
+ //
+ virtual int txn_begin(DbTxn *pid, DbTxn **tid, u_int32_t flags);
+ virtual int txn_checkpoint(u_int32_t kbyte, u_int32_t min, u_int32_t flags);
+ virtual int txn_recover(DbPreplist *preplist, long count,
+ long *retp, u_int32_t flags);
+ virtual int txn_stat(DB_TXN_STAT **statp, u_int32_t flags);
+
+ // Replication functions
+ //
+ virtual int rep_elect(int, int, u_int32_t, int *);
+ virtual int rep_process_message(Dbt *, Dbt *, int *);
+ virtual int rep_start(Dbt *, u_int32_t);
+ virtual int rep_stat(DB_REP_STAT **statp, u_int32_t flags);
+ virtual int set_rep_limit(u_int32_t, u_int32_t);
+ virtual int set_rep_transport(u_int32_t,
+ int (*)(DbEnv *, const Dbt *, const Dbt *, int, u_int32_t));
+
+ // Conversion functions
+ //
+ virtual DB_ENV *get_DB_ENV()
+ {
+ return (DB_ENV *)imp();
+ }
+
+ virtual const DB_ENV *get_const_DB_ENV() const
+ {
+ return (const DB_ENV *)constimp();
+ }
+
+ static DbEnv* get_DbEnv(DB_ENV *dbenv)
+ {
+ return (DbEnv *)dbenv->api1_internal;
+ }
+
+ static const DbEnv* get_const_DbEnv(const DB_ENV *dbenv)
+ {
+ return (const DbEnv *)dbenv->api1_internal;
+ }
+
+ // For internal use only.
+ static DbEnv* wrap_DB_ENV(DB_ENV *dbenv);
+
+ // These are public only because they need to be called
+ // via C functions. They should never be called by users
+ // of this class.
+ //
+ static void _stream_error_function(const char *, char *);
+ static int _app_dispatch_intercept(DB_ENV *env, DBT *dbt, DB_LSN *lsn,
+ db_recops op);
+ static void _paniccall_intercept(DB_ENV *env, int errval);
+ static void _feedback_intercept(DB_ENV *env, int opcode, int pct);
+ static int _rep_send_intercept(DB_ENV *env,
+ const DBT *cntrl, const DBT *data,
+ int id, u_int32_t flags);
+
+private:
+ void cleanup();
+ int initialize(DB_ENV *env);
+ int error_policy();
+
+ // For internal use only.
+ DbEnv(DB_ENV *, u_int32_t flags);
+
+ // no copying
+ DbEnv(const DbEnv &);
+ void operator = (const DbEnv &);
+
+ // instance data
+ int construct_error_;
+ u_int32_t construct_flags_;
+ int (*app_dispatch_callback_)(DbEnv *, Dbt *, DbLsn *, db_recops);
+ void (*feedback_callback_)(DbEnv *, int, int);
+ void (*paniccall_callback_)(DbEnv *, int);
+ int (*pgin_callback_)(DbEnv *dbenv, db_pgno_t pgno,
+ void *pgaddr, Dbt *pgcookie);
+ int (*pgout_callback_)(DbEnv *dbenv, db_pgno_t pgno,
+ void *pgaddr, Dbt *pgcookie);
+ int (*rep_send_callback_)(DbEnv *,
+ const Dbt *, const Dbt *, int, u_int32_t);
+
+ // class data
+ static __DB_OSTREAMCLASS *error_stream_;
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Table access classes
+//
+
+//
+// Represents a database table = a set of keys with associated values.
+//
+class _exported Db
+{
+ friend class DbEnv;
+
+private:
+ // Put this first to allow inlining with some C++ compilers (g++-2.95)
+ DEFINE_DB_CLASS(Db);
+
+public:
+ Db(DbEnv*, u_int32_t); // create a Db object, then call open()
+ virtual ~Db(); // does *not* call close.
+
+ // These methods exactly match those in the C interface.
+ //
+ virtual int associate(DbTxn *txn, Db *secondary,
+ int (*callback)(Db *, const Dbt *, const Dbt *, Dbt *),
+ u_int32_t flags);
+ virtual int close(u_int32_t flags);
+ virtual int cursor(DbTxn *txnid, Dbc **cursorp, u_int32_t flags);
+ virtual int del(DbTxn *txnid, Dbt *key, u_int32_t flags);
+ virtual void err(int, const char *, ...);
+ virtual void errx(const char *, ...);
+ virtual int fd(int *fdp);
+ virtual int get(DbTxn *txnid, Dbt *key, Dbt *data, u_int32_t flags);
+ virtual void *get_app_private() const;
+ virtual int get_byteswapped(int *);
+ virtual int get_type(DBTYPE *);
+ virtual int join(Dbc **curslist, Dbc **dbcp, u_int32_t flags);
+ virtual int key_range(DbTxn *, Dbt *, DB_KEY_RANGE *, u_int32_t);
+ virtual int open(DbTxn *txnid,
+ const char *, const char *subname, DBTYPE, u_int32_t, int);
+ virtual int pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Dbt *data,
+ u_int32_t flags);
+ virtual int put(DbTxn *, Dbt *, Dbt *, u_int32_t);
+ virtual int remove(const char *, const char *, u_int32_t);
+ virtual int rename(const char *, const char *, const char *, u_int32_t);
+ virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type,
+ db_free_fcn_type);
+ virtual void set_app_private(void *);
+ virtual int set_append_recno(int (*)(Db *, Dbt *, db_recno_t));
+ virtual int set_bt_compare(bt_compare_fcn_type); /*deprecated*/
+ virtual int set_bt_compare(int (*)(Db *, const Dbt *, const Dbt *));
+ virtual int set_bt_maxkey(u_int32_t);
+ virtual int set_bt_minkey(u_int32_t);
+ virtual int set_bt_prefix(bt_prefix_fcn_type); /*deprecated*/
+ virtual int set_bt_prefix(size_t (*)(Db *, const Dbt *, const Dbt *));
+ virtual int set_cachesize(u_int32_t, u_int32_t, int);
+ virtual int set_cache_priority(DB_CACHE_PRIORITY);
+ virtual int set_dup_compare(dup_compare_fcn_type); /*deprecated*/
+ virtual int set_dup_compare(int (*)(Db *, const Dbt *, const Dbt *));
+ virtual int set_encrypt(const char *, int);
+ virtual void set_errcall(void (*)(const char *, char *));
+ virtual void set_errfile(FILE *);
+ virtual void set_errpfx(const char *);
+ virtual int set_feedback(void (*)(Db *, int, int));
+ virtual int set_flags(u_int32_t);
+ virtual int set_h_ffactor(u_int32_t);
+ virtual int set_h_hash(h_hash_fcn_type); /*deprecated*/
+ virtual int set_h_hash(u_int32_t (*)(Db *, const void *, u_int32_t));
+ virtual int set_h_nelem(u_int32_t);
+ virtual int set_lorder(int);
+ virtual int set_pagesize(u_int32_t);
+ virtual int set_paniccall(void (*)(DbEnv *, int));
+ virtual int set_re_delim(int);
+ virtual int set_re_len(u_int32_t);
+ virtual int set_re_pad(int);
+ virtual int set_re_source(char *);
+ virtual int set_q_extentsize(u_int32_t);
+ virtual int stat(void *sp, u_int32_t flags);
+ virtual int sync(u_int32_t flags);
+ virtual int truncate(DbTxn *, u_int32_t *, u_int32_t);
+ virtual int upgrade(const char *name, u_int32_t flags);
+ virtual int verify(const char *, const char *, __DB_OSTREAMCLASS *, u_int32_t);
+
+ // These additional methods are not in the C interface, and
+ // are only available for C++.
+ //
+ virtual void set_error_stream(__DB_OSTREAMCLASS *);
+
+ virtual DB *get_DB()
+ {
+ return (DB *)imp();
+ }
+
+ virtual const DB *get_const_DB() const
+ {
+ return (const DB *)constimp();
+ }
+
+ static Db* get_Db(DB *db)
+ {
+ return (Db *)db->api_internal;
+ }
+
+ static const Db* get_const_Db(const DB *db)
+ {
+ return (const Db *)db->api_internal;
+ }
+
+private:
+ // no copying
+ Db(const Db &);
+ Db &operator = (const Db &);
+
+ void cleanup();
+ int initialize();
+ int error_policy();
+
+ // instance data
+ DbEnv *env_;
+ int construct_error_;
+ u_int32_t flags_;
+ u_int32_t construct_flags_;
+
+public:
+ // These are public only because they need to be called
+ // via C callback functions. They should never be used by
+ // external users of this class.
+ //
+ int (*append_recno_callback_)(Db *, Dbt *, db_recno_t);
+ int (*associate_callback_)(Db *, const Dbt *, const Dbt *, Dbt *);
+ int (*bt_compare_callback_)(Db *, const Dbt *, const Dbt *);
+ size_t (*bt_prefix_callback_)(Db *, const Dbt *, const Dbt *);
+ int (*dup_compare_callback_)(Db *, const Dbt *, const Dbt *);
+ void (*feedback_callback_)(Db *, int, int);
+ u_int32_t (*h_hash_callback_)(Db *, const void *, u_int32_t);
+};
+
+//
+// A chunk of data, maybe a key or value.
+//
+class _exported Dbt : private DBT
+{
+ friend class Dbc;
+ friend class Db;
+ friend class DbEnv;
+ friend class DbLogc;
+
+public:
+
+ // key/data
+ void *get_data() const { return data; }
+ void set_data(void *value) { data = value; }
+
+ // key/data length
+ u_int32_t get_size() const { return size; }
+ void set_size(u_int32_t value) { size = value; }
+
+ // RO: length of user buffer.
+ u_int32_t get_ulen() const { return ulen; }
+ void set_ulen(u_int32_t value) { ulen = value; }
+
+ // RO: get/put record length.
+ u_int32_t get_dlen() const { return dlen; }
+ void set_dlen(u_int32_t value) { dlen = value; }
+
+ // RO: get/put record offset.
+ u_int32_t get_doff() const { return doff; }
+ void set_doff(u_int32_t value) { doff = value; }
+
+ // flags
+ u_int32_t get_flags() const { return flags; }
+ void set_flags(u_int32_t value) { flags = value; }
+
+ // Conversion functions
+ DBT *get_DBT() { return (DBT *)this; }
+ const DBT *get_const_DBT() const { return (const DBT *)this; }
+
+ static Dbt* get_Dbt(DBT *dbt) { return (Dbt *)dbt; }
+ static const Dbt* get_const_Dbt(const DBT *dbt)
+ { return (const Dbt *)dbt; }
+
+ Dbt(void *data, u_int32_t size);
+ Dbt();
+ ~Dbt();
+ Dbt(const Dbt &);
+ Dbt &operator = (const Dbt &);
+
+private:
+ // Note: no extra data appears in this class (other than
+ // inherited from DBT) since we need DBT and Dbt objects
+ // to have interchangable pointers.
+ //
+ // When subclassing this class, remember that callback
+ // methods like bt_compare, bt_prefix, dup_compare may
+ // internally manufacture DBT objects (which later are
+ // cast to Dbt), so such callbacks might receive objects
+ // not of your subclassed type.
+};
+
+class _exported Dbc : protected DBC
+{
+ friend class Db;
+
+public:
+ int close();
+ int count(db_recno_t *countp, u_int32_t flags);
+ int del(u_int32_t flags);
+ int dup(Dbc** cursorp, u_int32_t flags);
+ int get(Dbt* key, Dbt *data, u_int32_t flags);
+ int pget(Dbt* key, Dbt* pkey, Dbt *data, u_int32_t flags);
+ int put(Dbt* key, Dbt *data, u_int32_t flags);
+
+private:
+ // No data is permitted in this class (see comment at top)
+
+ // Note: use Db::cursor() to get pointers to a Dbc,
+ // and call Dbc::close() rather than delete to release them.
+ //
+ Dbc();
+ ~Dbc();
+
+ // no copying
+ Dbc(const Dbc &);
+ Dbc &operator = (const Dbc &);
+};
+
+class _exported DbLogc : protected DB_LOGC
+{
+ friend class DbEnv;
+
+public:
+ int close(u_int32_t _flags);
+ int get(DbLsn *lsn, Dbt *data, u_int32_t _flags);
+
+private:
+ // No data is permitted in this class (see comment at top)
+
+ // Note: use Db::cursor() to get pointers to a Dbc,
+ // and call Dbc::close() rather than delete to release them.
+ //
+ DbLogc();
+ ~DbLogc();
+
+ // no copying
+ DbLogc(const Dbc &);
+ DbLogc &operator = (const Dbc &);
+};
+#endif /* !_DB_CXX_H_ */
diff --git a/libdb/build_win32/db_deadlock.dsp b/libdb/build_win32/db_deadlock.dsp
new file mode 100644
index 0000000..680f1bd
--- /dev/null
+++ b/libdb/build_win32/db_deadlock.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="db_deadlock" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_deadlock - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_deadlock.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_deadlock.mak" CFG="db_deadlock - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_deadlock - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_deadlock - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_deadlock - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_deadlock - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_deadlock - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_deadlock - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_deadlock - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_deadlock - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_deadlock - Win32 Release"
+# Name "db_deadlock - Win32 Debug"
+# Name "db_deadlock - Win32 Release Static"
+# Name "db_deadlock - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\db_deadlock\db_deadlock.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/db_dll.dsp b/libdb/build_win32/db_dll.dsp
new file mode 100644
index 0000000..6194003
--- /dev/null
+++ b/libdb/build_win32/db_dll.dsp
@@ -0,0 +1,792 @@
+# Microsoft Developer Studio Project File - Name="db_dll" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=db_dll - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_dll.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_dll.mak" CFG="db_dll - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_dll - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "db_dll - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_dll - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /I "../dbinc" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386
+# ADD LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb41.dll"
+
+!ELSEIF "$(CFG)" == "db_dll - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 2
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "DB_CREATE_DLL" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
+# SUBTRACT CPP /Fr
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb41d.dll" /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_dll - Win32 Release"
+# Name "db_dll - Win32 Debug"
+# Begin Source File
+
+SOURCE=..\btree\bt_compare.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_conv.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_curadj.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_cursor.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_delete.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_open.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_put.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_reclaim.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_recno.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_rsearch.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_search.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_split.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_upgrade.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_verify.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\btree_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\dllmain.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\libdb.def
+# End Source File
+# Begin Source File
+
+SOURCE=.\libdb.rc
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\strcasecmp.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\db_byteorder.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\db_err.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\db_getlong.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\db_idspace.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\db_log2.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\util_cache.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\util_log.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\util_sig.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_db.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_dbc.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_dbt.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_env.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_except.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_lock.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_logc.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_mpool.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_txn.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\crdel_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\crdel_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_am.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_cam.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_conv.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_dispatch.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_dup.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_iface.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_join.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_meta.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_open.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_overflow.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_pr.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_reclaim.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_remove.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_rename.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_ret.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_truncate.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_upg.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_upg_opd.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_vrfy.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_vrfyutil.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\dbm\dbm.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\dbreg\dbreg.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\dbreg\dbreg_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\dbreg\dbreg_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\dbreg\dbreg_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\db_salloc.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\db_shash.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\env_file.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\env_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\env_open.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\env_recover.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\env_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\fileops\fileops_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\fileops\fop_basic.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\fileops\fop_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\fileops\fop_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_conv.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_dup.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_func.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_meta.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_open.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_page.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_reclaim.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_upgrade.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_verify.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hmac\hmac.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hmac\sha1.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hsearch\hsearch.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_deadlock.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_archive.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_compare.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_get.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_put.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_alloc.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_bh.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_fget.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_fopen.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_fput.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_fset.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_register.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_sync.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_trickle.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mutex\mut_win32.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mutex\mutex.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_alloc.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_id.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_oflags.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_root.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_rpath.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_tmpdir.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_unlink.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_abs.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_clock.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_config.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_dir.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_errno.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_fid.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_fsync.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_handle.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_map.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_open.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_rename.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_rw.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_seek.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_sleep.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_spin.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_type.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_conv.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_files.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_open.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_upgrade.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_verify.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\rep\rep_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\rep\rep_record.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\rep\rep_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\rep\rep_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn_recover.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\xa\xa.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\xa\xa_db.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\xa\xa_map.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/db_dump.dsp b/libdb/build_win32/db_dump.dsp
new file mode 100644
index 0000000..2f7847b
--- /dev/null
+++ b/libdb/build_win32/db_dump.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="db_dump" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_dump - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_dump.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_dump.mak" CFG="db_dump - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_dump - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_dump - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_dump - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_dump - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_dump - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_dump - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_dump - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_dump - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_dump - Win32 Release"
+# Name "db_dump - Win32 Debug"
+# Name "db_dump - Win32 Release Static"
+# Name "db_dump - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\db_dump\db_dump.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/db_int.h b/libdb/build_win32/db_int.h
new file mode 100644
index 0000000..c775a12
--- /dev/null
+++ b/libdb/build_win32/db_int.h
@@ -0,0 +1,474 @@
+/* DO NOT EDIT: automatically built by dist/s_win32. */
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_INTERNAL_H_
+#define _DB_INTERNAL_H_
+
+/*******************************************************
+ * System includes, db.h, a few general DB includes. The DB includes are
+ * here because it's OK if db_int.h includes queue structure declarations.
+ *******************************************************/
+#ifndef NO_SYSTEM_INCLUDES
+#if defined(__STDC__) || defined(__cplusplus)
+#include <stdarg.h>
+#else
+#include <varargs.h>
+#endif
+#include <errno.h>
+#endif
+
+#include "db.h"
+
+#include "dbinc/queue.h"
+#include "dbinc/shqueue.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*******************************************************
+ * General purpose constants and macros.
+ *******************************************************/
+#define UINT16_T_MAX 0xffff /* Maximum 16 bit unsigned. */
+#define UINT32_T_MAX 0xffffffff /* Maximum 32 bit unsigned. */
+
+#define MEGABYTE 1048576
+#define GIGABYTE 1073741824
+
+#define MS_PER_SEC 1000 /* Milliseconds in a second. */
+#define USEC_PER_MS 1000 /* Microseconds in a millisecond. */
+
+#define RECNO_OOB 0 /* Illegal record number. */
+
+/* Test for a power-of-two (tests true for zero, which doesn't matter here). */
+#define POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0)
+
+/* Test for valid page sizes. */
+#define DB_MIN_PGSIZE 0x000200 /* Minimum page size (512). */
+#define DB_MAX_PGSIZE 0x010000 /* Maximum page size (65536). */
+#define IS_VALID_PAGESIZE(x) \
+ (POWER_OF_TWO(x) && (x) >= DB_MIN_PGSIZE && ((x) <= DB_MAX_PGSIZE))
+
+/* Minimum number of pages cached, by default. */
+#define DB_MINPAGECACHE 16
+
+/*
+ * If we are unable to determine the underlying filesystem block size, use
+ * 8K on the grounds that most OS's use less than 8K for a VM page size.
+ */
+#define DB_DEF_IOSIZE (8 * 1024)
+
+/*
+ * Aligning items to particular sizes or in pages or memory.
+ *
+ * db_align_t --
+ * Largest integral type, used to align structures in memory. We don't store
+ * floating point types in structures, so integral types should be sufficient
+ * (and we don't have to worry about systems that store floats in other than
+ * power-of-2 numbers of bytes). Additionally this fixes compiler that rewrite
+ * structure assignments and ANSI C memcpy calls to be in-line instructions
+ * that happen to require alignment. Note: this alignment isn't sufficient for
+ * mutexes, which depend on things like cache line alignment. Mutex alignment
+ * is handled separately, in mutex.h.
+ *
+ * db_alignp_t --
+ * Integral type that's the same size as a pointer. There are places where
+ * DB modifies pointers by discarding the bottom bits to guarantee alignment.
+ * We can't use db_align_t, it may be larger than the pointer, and compilers
+ * get upset about that. So far we haven't run on any machine where there
+ * isn't an integral type the same size as a pointer -- here's hoping.
+ */
+typedef unsigned long db_align_t;
+typedef unsigned long db_alignp_t;
+
+/* Align an integer to a specific boundary. */
+#undef ALIGN
+#define ALIGN(v, bound) (((v) + (bound) - 1) & ~(((db_align_t)bound) - 1))
+
+/*
+ * Print an address as a u_long (a u_long is the largest type we can print
+ * portably). Most 64-bit systems have made longs 64-bits, so this should
+ * work.
+ */
+#define P_TO_ULONG(p) ((u_long)(db_alignp_t)(p))
+
+/*
+ * Convert a pointer to a small integral value.
+ *
+ * The (u_int16_t)(db_alignp_t) cast avoids warnings: the (db_alignp_t) cast
+ * converts the value to an integral type, and the (u_int16_t) cast converts
+ * it to a small integral type so we don't get complaints when we assign the
+ * final result to an integral type smaller than db_alignp_t.
+ */
+#define P_TO_UINT32(p) ((u_int32_t)(db_alignp_t)(p))
+#define P_TO_UINT16(p) ((u_int16_t)(db_alignp_t)(p))
+
+/*
+ * There are several on-page structures that are declared to have a number of
+ * fields followed by a variable length array of items. The structure size
+ * without including the variable length array or the address of the first of
+ * those elements can be found using SSZ.
+ *
+ * This macro can also be used to find the offset of a structure element in a
+ * structure. This is used in various places to copy structure elements from
+ * unaligned memory references, e.g., pointers into a packed page.
+ *
+ * There are two versions because compilers object if you take the address of
+ * an array.
+ */
+#undef SSZ
+#define SSZ(name, field) P_TO_UINT16(&(((name *)0)->field))
+
+#undef SSZA
+#define SSZA(name, field) P_TO_UINT16(&(((name *)0)->field[0]))
+
+/* Structure used to print flag values. */
+typedef struct __fn {
+ u_int32_t mask; /* Flag value. */
+ const char *name; /* Flag name. */
+} FN;
+
+/* Set, clear and test flags. */
+#define FLD_CLR(fld, f) (fld) &= ~(f)
+#define FLD_ISSET(fld, f) ((fld) & (f))
+#define FLD_SET(fld, f) (fld) |= (f)
+#define F_CLR(p, f) (p)->flags &= ~(f)
+#define F_ISSET(p, f) ((p)->flags & (f))
+#define F_SET(p, f) (p)->flags |= (f)
+#define LF_CLR(f) ((flags) &= ~(f))
+#define LF_ISSET(f) ((flags) & (f))
+#define LF_SET(f) ((flags) |= (f))
+
+/* Display separator string. */
+#undef DB_LINE
+#define DB_LINE "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-="
+
+/* Unused, or not-used-yet variable. "Shut that bloody compiler up!" */
+#define COMPQUIET(n, v) (n) = (v)
+
+/*******************************************************
+ * API return values
+ *******************************************************/
+ /*
+ * Return values that are OK for each different call. Most calls have
+ * a standard 'return of 0 is only OK value', but some, like db->get
+ * have DB_NOTFOUND as a return value, but it really isn't an error.
+ */
+#define DB_RETOK_STD(ret) ((ret) == 0)
+#define DB_RETOK_DBCDEL(ret) ((ret) == 0 || (ret) == DB_KEYEMPTY || \
+ (ret) == DB_NOTFOUND)
+#define DB_RETOK_DBCGET(ret) DB_RETOK_DBGET(ret)
+#define DB_RETOK_DBCPUT(ret) ((ret) == 0 || (ret) == DB_KEYEXIST || \
+ (ret) == DB_NOTFOUND)
+#define DB_RETOK_DBDEL(ret) ((ret) == 0 || (ret) == DB_NOTFOUND)
+#define DB_RETOK_DBGET(ret) ((ret) == 0 || (ret) == DB_KEYEMPTY || \
+ (ret) == DB_NOTFOUND)
+#define DB_RETOK_DBPUT(ret) ((ret) == 0 || (ret) == DB_KEYEXIST)
+#define DB_RETOK_LGGET(ret) ((ret) == 0 || (ret) == DB_NOTFOUND)
+#define DB_RETOK_MPGET(ret) ((ret) == 0 || (ret) == DB_PAGE_NOTFOUND)
+#define DB_RETOK_REPPMSG(ret) ((ret) == 0 || (ret) == DB_REP_NEWMASTER || \
+ (ret) == DB_REP_NEWSITE)
+
+/*******************************************************
+ * Files.
+ *******************************************************/
+ /*
+ * We use 1024 as the maximum path length. It's too hard to figure out what
+ * the real path length is, as it was traditionally stored in <sys/param.h>,
+ * and that file isn't always available.
+ */
+#undef MAXPATHLEN
+#define MAXPATHLEN 1024
+
+#define PATH_DOT "." /* Current working directory. */
+#define PATH_SEPARATOR "\\/:" /* Path separator character(s). */
+
+/*
+ * Flags understood by __os_open.
+ */
+#define DB_OSO_CREATE 0x0001 /* POSIX: O_CREAT */
+#define DB_OSO_DIRECT 0x0002 /* Don't buffer the file in the OS. */
+#define DB_OSO_EXCL 0x0004 /* POSIX: O_EXCL */
+#define DB_OSO_LOG 0x0008 /* Opening a log file. */
+#define DB_OSO_RDONLY 0x0010 /* POSIX: O_RDONLY */
+#define DB_OSO_REGION 0x0020 /* Opening a region file. */
+#define DB_OSO_SEQ 0x0040 /* Expected sequential access. */
+#define DB_OSO_TEMP 0x0080 /* Remove after last close. */
+#define DB_OSO_TRUNC 0x0100 /* POSIX: O_TRUNC */
+
+/*
+ * Seek options understood by __os_seek.
+ */
+typedef enum {
+ DB_OS_SEEK_CUR, /* POSIX: SEEK_CUR */
+ DB_OS_SEEK_END, /* POSIX: SEEK_END */
+ DB_OS_SEEK_SET /* POSIX: SEEK_SET */
+} DB_OS_SEEK;
+
+/*******************************************************
+ * Environment.
+ *******************************************************/
+/* Type passed to __db_appname(). */
+typedef enum {
+ DB_APP_NONE=0, /* No type (region). */
+ DB_APP_DATA, /* Data file. */
+ DB_APP_LOG, /* Log file. */
+ DB_APP_TMP /* Temporary file. */
+} APPNAME;
+
+/*
+ * CDB_LOCKING CDB product locking.
+ * CRYPTO_ON Security has been configured.
+ * LOCKING_ON Locking has been configured.
+ * LOGGING_ON Logging has been configured.
+ * MPOOL_ON Memory pool has been configured.
+ * RPC_ON RPC has been configured.
+ * TXN_ON Transactions have been configured.
+ */
+#define CDB_LOCKING(dbenv) F_ISSET(dbenv, DB_ENV_CDB)
+#define CRYPTO_ON(dbenv) ((dbenv)->crypto_handle != NULL)
+#define LOCKING_ON(dbenv) ((dbenv)->lk_handle != NULL)
+#define LOGGING_ON(dbenv) ((dbenv)->lg_handle != NULL)
+#define MPOOL_ON(dbenv) ((dbenv)->mp_handle != NULL)
+#define RPC_ON(dbenv) ((dbenv)->cl_handle != NULL)
+#define TXN_ON(dbenv) ((dbenv)->tx_handle != NULL)
+
+/*
+ * STD_LOCKING Standard locking, that is, locking was configured and CDB
+ * was not. We do not do locking in off-page duplicate trees,
+ * so we check for that in the cursor first.
+ */
+#define STD_LOCKING(dbc) \
+ (!F_ISSET(dbc, DBC_OPD) && \
+ !CDB_LOCKING((dbc)->dbp->dbenv) && LOCKING_ON((dbc)->dbp->dbenv))
+
+/*
+ * IS_RECOVERING: The system is running recovery.
+ */
+#define IS_RECOVERING(dbenv) \
+ (LOGGING_ON(dbenv) && \
+ F_ISSET((DB_LOG *)(dbenv)->lg_handle, DBLOG_RECOVER))
+
+/* Initialization methods are often illegal before/after open is called. */
+#define ENV_ILLEGAL_AFTER_OPEN(dbenv, name) \
+ if (F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \
+ return (__db_mi_open(dbenv, name, 1));
+#define ENV_ILLEGAL_BEFORE_OPEN(dbenv, name) \
+ if (!F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \
+ return (__db_mi_open(dbenv, name, 0));
+
+/* We're not actually user hostile, honest. */
+#define ENV_REQUIRES_CONFIG(dbenv, handle, i, flags) \
+ if (handle == NULL) \
+ return (__db_env_config(dbenv, i, flags));
+
+/*******************************************************
+ * Database Access Methods.
+ *******************************************************/
+/*
+ * DB_IS_THREADED --
+ * The database handle is free-threaded (was opened with DB_THREAD).
+ */
+#define DB_IS_THREADED(dbp) \
+ ((dbp)->mutexp != NULL)
+
+/* Initialization methods are often illegal before/after open is called. */
+#define DB_ILLEGAL_AFTER_OPEN(dbp, name) \
+ if (F_ISSET((dbp), DB_AM_OPEN_CALLED)) \
+ return (__db_mi_open((dbp)->dbenv, name, 1));
+#define DB_ILLEGAL_BEFORE_OPEN(dbp, name) \
+ if (!F_ISSET((dbp), DB_AM_OPEN_CALLED)) \
+ return (__db_mi_open((dbp)->dbenv, name, 0));
+/* Some initialization methods are illegal if environment isn't local. */
+#define DB_ILLEGAL_IN_ENV(dbp, name) \
+ if (!F_ISSET((dbp)->dbenv, DB_ENV_DBLOCAL)) \
+ return (__db_mi_env((dbp)->dbenv, name));
+#define DB_ILLEGAL_METHOD(dbp, flags) { \
+ int __ret; \
+ if ((__ret = __dbh_am_chk(dbp, flags)) != 0) \
+ return (__ret); \
+}
+
+/*
+ * Common DBC->internal fields. Each access method adds additional fields
+ * to this list, but the initial fields are common.
+ */
+#define __DBC_INTERNAL \
+ DBC *opd; /* Off-page duplicate cursor. */\
+ \
+ void *page; /* Referenced page. */ \
+ db_pgno_t root; /* Tree root. */ \
+ db_pgno_t pgno; /* Referenced page number. */ \
+ db_indx_t indx; /* Referenced key item index. */\
+ \
+ DB_LOCK lock; /* Cursor lock. */ \
+ db_lockmode_t lock_mode; /* Lock mode. */
+
+struct __dbc_internal {
+ __DBC_INTERNAL
+};
+
+/* Actions that __db_master_update can take. */
+typedef enum { MU_REMOVE, MU_RENAME, MU_OPEN } mu_action;
+
+/*
+ * Access-method-common macro for determining whether a cursor
+ * has been initialized.
+ */
+#define IS_INITIALIZED(dbc) ((dbc)->internal->pgno != PGNO_INVALID)
+
+/* Free the callback-allocated buffer, if necessary, hanging off of a DBT. */
+#define FREE_IF_NEEDED(sdbp, dbt) \
+ if (F_ISSET((dbt), DB_DBT_APPMALLOC)) { \
+ __os_ufree((sdbp)->dbenv, (dbt)->data); \
+ F_CLR((dbt), DB_DBT_APPMALLOC); \
+ }
+
+/*
+ * Use memory belonging to object "owner" to return the results of
+ * any no-DBT-flag get ops on cursor "dbc".
+ */
+#define SET_RET_MEM(dbc, owner) \
+ do { \
+ (dbc)->rskey = &(owner)->my_rskey; \
+ (dbc)->rkey = &(owner)->my_rkey; \
+ (dbc)->rdata = &(owner)->my_rdata; \
+ } while (0)
+
+/* Use the return-data memory src is currently set to use in dest as well. */
+#define COPY_RET_MEM(src, dest) \
+ do { \
+ (dest)->rskey = (src)->rskey; \
+ (dest)->rkey = (src)->rkey; \
+ (dest)->rdata = (src)->rdata; \
+ } while (0)
+
+/* Reset the returned-memory pointers to their defaults. */
+#define RESET_RET_MEM(dbc) \
+ do { \
+ (dbc)->rskey = &(dbc)->my_rskey; \
+ (dbc)->rkey = &(dbc)->my_rkey; \
+ (dbc)->rdata = &(dbc)->my_rdata; \
+ } while (0)
+
+/*******************************************************
+ * Mpool.
+ *******************************************************/
+/*
+ * File types for DB access methods. Negative numbers are reserved to DB.
+ */
+#define DB_FTYPE_SET -1 /* Call pgin/pgout functions. */
+#define DB_FTYPE_NOTSET 0 /* Don't call... */
+
+/* Structure used as the DB pgin/pgout pgcookie. */
+typedef struct __dbpginfo {
+ size_t db_pagesize; /* Underlying page size. */
+ u_int32_t flags; /* Some DB_AM flags needed. */
+ DBTYPE type; /* DB type */
+} DB_PGINFO;
+
+/*******************************************************
+ * Log.
+ *******************************************************/
+/* Initialize an LSN to 'zero'. */
+#define ZERO_LSN(LSN) do { \
+ (LSN).file = 0; \
+ (LSN).offset = 0; \
+} while (0)
+#define IS_ZERO_LSN(LSN) ((LSN).file == 0)
+
+#define IS_INIT_LSN(LSN) ((LSN).file == 1 && (LSN).offset == 0)
+#define INIT_LSN(LSN) do { \
+ (LSN).file = 1; \
+ (LSN).offset = 0; \
+} while (0)
+
+#define MAX_LSN(LSN) do { \
+ (LSN).file = UINT32_T_MAX; \
+ (LSN).offset = UINT32_T_MAX; \
+} while (0)
+#define IS_MAX_LSN(LSN) \
+ ((LSN).file == UINT32_T_MAX && (LSN).offset == UINT32_T_MAX)
+
+/* If logging is turned off, smash the lsn. */
+#define LSN_NOT_LOGGED(LSN) do { \
+ (LSN).file = 0; \
+ (LSN).offset = 1; \
+} while (0)
+#define IS_NOT_LOGGED_LSN(LSN) \
+ ((LSN).file == 0 && (LSN).offset == 1)
+
+/*
+ * Test if the environment is currently logging changes. If we're in
+ * recovery or we're a replication client, we don't need to log changes
+ * because they're already in the log, even though we have a fully functional
+ * log system.
+ */
+#define DBENV_LOGGING(dbenv) \
+ (LOGGING_ON(dbenv) && !F_ISSET((dbenv), DB_ENV_REP_CLIENT) && \
+ (!IS_RECOVERING(dbenv)))
+
+/*
+ * Test if we need to log a change. Note that the DBC_RECOVER flag is set
+ * when we're in abort, as well as during recovery; thus DBC_LOGGING may be
+ * false for a particular dbc even when DBENV_LOGGING is true.
+ *
+ * We explicitly use LOGGING_ON/DB_ENV_REP_CLIENT here because we don't
+ * want to have to pull in the log headers, which IS_RECOVERING (and thus
+ * DBENV_LOGGING) rely on, and because DBC_RECOVER should be set anytime
+ * IS_RECOVERING would be true.
+ */
+#define DBC_LOGGING(dbc) \
+ (LOGGING_ON((dbc)->dbp->dbenv) && !F_ISSET((dbc), DBC_RECOVER) && \
+ !F_ISSET((dbc)->dbp->dbenv, DB_ENV_REP_CLIENT))
+
+/*******************************************************
+ * Txn.
+ *******************************************************/
+#define DB_NONBLOCK(C) ((C)->txn != NULL && F_ISSET((C)->txn, TXN_NOWAIT))
+#define IS_SUBTRANSACTION(txn) \
+ ((txn) != NULL && (txn)->parent != NULL)
+
+/*******************************************************
+ * Crypto.
+ *******************************************************/
+#define DB_IV_BYTES 16 /* Bytes per IV */
+#define DB_MAC_KEY 20 /* Bytes per MAC checksum */
+
+/*******************************************************
+ * Forward structure declarations.
+ *******************************************************/
+struct __db_reginfo_t; typedef struct __db_reginfo_t REGINFO;
+struct __db_txnhead; typedef struct __db_txnhead DB_TXNHEAD;
+struct __db_txnlist; typedef struct __db_txnlist DB_TXNLIST;
+struct __vrfy_childinfo; typedef struct __vrfy_childinfo VRFY_CHILDINFO;
+struct __vrfy_dbinfo; typedef struct __vrfy_dbinfo VRFY_DBINFO;
+struct __vrfy_pageinfo; typedef struct __vrfy_pageinfo VRFY_PAGEINFO;
+
+#if defined(__cplusplus)
+}
+#endif
+
+/*******************************************************
+ * Remaining general DB includes.
+ *******************************************************/
+
+
+#include "dbinc/globals.h"
+#include "dbinc/debug.h"
+#include "dbinc/mutex.h"
+#include "dbinc/region.h"
+#include "dbinc_auto/mutex_ext.h" /* XXX: Include after region.h. */
+#include "dbinc_auto/env_ext.h"
+#include "dbinc/os.h"
+#include "dbinc_auto/clib_ext.h"
+#include "dbinc_auto/common_ext.h"
+
+#endif /* !_DB_INTERNAL_H_ */
diff --git a/libdb/build_win32/db_java.dsp b/libdb/build_win32/db_java.dsp
new file mode 100644
index 0000000..19ca888
--- /dev/null
+++ b/libdb/build_win32/db_java.dsp
@@ -0,0 +1,180 @@
+# Microsoft Developer Studio Project File - Name="db_java" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=db_java - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_java.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_java.mak" CFG="db_java - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_java - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "db_java - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_java - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb_java41.dll"
+# Begin Custom Build - Compiling java files using javac
+ProjDir=.
+InputPath=.\Release\libdb_java41.dll
+SOURCE="$(InputPath)"
+
+"force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ mkdir $(ProjDir)\Release\classes
+ echo compiling Berkeley DB classes
+ javac -g -d $(ProjDir)/Release/classes -classpath "$(CLASSPATH);$(ProjDir)/Release/classes" ..\java\src\com\sleepycat\db\*.java
+ echo compiling examples
+ javac -g -d $(ProjDir)/Release/classes -classpath "$(CLASSPATH);$(ProjDir)/Release/classes" ..\java\src\com\sleepycat\examples\*.java
+ echo creating jar files
+ cd $(ProjDir)\Release\classes
+ jar cf ../db.jar com\sleepycat\db\*.class
+ jar cf ../dbexamples.jar com\sleepycat\examples\*.class
+ echo Java build finished
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "db_java - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 2
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c
+# SUBTRACT CPP /Fr
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb_java41d.dll" /fixed:no
+# Begin Custom Build - Compiling java files using javac
+ProjDir=.
+InputPath=.\Debug\libdb_java41d.dll
+SOURCE="$(InputPath)"
+
+"force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ mkdir $(ProjDir)\Debug\classes
+ echo compiling Berkeley DB classes
+ javac -g -d $(ProjDir)/Debug/classes -classpath "$(CLASSPATH);$(ProjDir)/Debug/classes" ..\java\src\com\sleepycat\db\*.java
+ echo compiling examples
+ javac -g -d $(ProjDir)/Debug/classes -classpath "$(CLASSPATH);$(ProjDir)/Debug/classes" ..\java\src\com\sleepycat\examples\*.java
+ echo creating jar files
+ cd $(ProjDir)\Debug\classes
+ jar cf ../db.jar com\sleepycat\db\*.class
+ jar cf ../dbexamples.jar com\sleepycat\examples\*.class
+ echo Java build finished
+
+# End Custom Build
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_java - Win32 Release"
+# Name "db_java - Win32 Debug"
+# Begin Source File
+
+SOURCE=..\libdb_java\java_Db.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\libdb_java\java_DbEnv.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\libdb_java\java_DbLock.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\libdb_java\java_DbLogc.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\libdb_java\java_DbLsn.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\libdb_java\java_DbTxn.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\libdb_java\java_DbUtil.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\libdb_java\java_Dbc.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\libdb_java\java_Dbt.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\libdb_java\java_info.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\libdb_java\java_locked.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\libdb_java\java_stat_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\libdb_java\java_util.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/db_java_xa.dsp b/libdb/build_win32/db_java_xa.dsp
new file mode 100644
index 0000000..9c700ff
--- /dev/null
+++ b/libdb/build_win32/db_java_xa.dsp
@@ -0,0 +1,85 @@
+# Microsoft Developer Studio Project File - Name="db_java_xa" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) External Target" 0x0106
+
+CFG=db_java_xa - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_java_xa.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_java_xa.mak" CFG="db_java_xa - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_java_xa - Win32 Release" (based on "Win32 (x86) External Target")
+!MESSAGE "db_java_xa - Win32 Debug" (based on "Win32 (x86) External Target")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+
+!IF "$(CFG)" == "db_java_xa - Win32 Release"
+
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Cmd_Line "NMAKE /f db_java_xaj.mak"
+# PROP BASE Rebuild_Opt "/a"
+# PROP BASE Target_File "db_java_xaj.exe"
+# PROP BASE Bsc_Name "db_java_xaj.bsc"
+# PROP BASE Target_Dir ""
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Cmd_Line "NMAKE /f db_java_xaj.mak Release/dbxa.jar"
+# PROP Rebuild_Opt "/a"
+# PROP Target_File "Release/dbxa.jar"
+# PROP Bsc_Name ""
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "db_java_xa - Win32 Debug"
+
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Cmd_Line "NMAKE /f db_java_xaj.mak"
+# PROP BASE Rebuild_Opt "/a"
+# PROP BASE Target_File "db_java_xaj.exe"
+# PROP BASE Bsc_Name "db_java_xaj.bsc"
+# PROP BASE Target_Dir ""
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Cmd_Line "NMAKE /f db_java_xaj.mak Debug/dbxa.jar"
+# PROP Rebuild_Opt "/a"
+# PROP Target_File "Debug/dbxa.jar"
+# PROP Bsc_Name ""
+# PROP Target_Dir ""
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_java_xa - Win32 Release"
+# Name "db_java_xa - Win32 Debug"
+
+!IF "$(CFG)" == "db_java_xa - Win32 Release"
+
+!ELSEIF "$(CFG)" == "db_java_xa - Win32 Debug"
+
+!ENDIF
+
+# Begin Source File
+
+SOURCE=.\db_java_xaj.mak
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/db_java_xaj.mak b/libdb/build_win32/db_java_xaj.mak
new file mode 100644
index 0000000..c2dbc92
--- /dev/null
+++ b/libdb/build_win32/db_java_xaj.mak
@@ -0,0 +1,21 @@
+JAVA_XADIR=../java/src/com/sleepycat/db/xa
+
+JAVA_XASRCS=\
+ $(JAVA_XADIR)/DbXAResource.java \
+ $(JAVA_XADIR)/DbXid.java
+
+Release/dbxa.jar : $(JAVA_XASRCS)
+ @echo compiling Berkeley DB XA classes
+ @javac -g -d ./Release/classes -classpath "$(CLASSPATH);./Release/classes" $(JAVA_XASRCS)
+ @echo creating jar file
+ @cd .\Release\classes
+ @jar cf ../dbxa.jar com\sleepycat\db\xa\*.class
+ @echo Java XA build finished
+
+Debug/dbxa.jar : $(JAVA_XASRCS)
+ @echo compiling Berkeley DB XA classes
+ @javac -g -d ./Debug/classes -classpath "$(CLASSPATH);./Debug/classes" $(JAVA_XASRCS)
+ @echo creating jar file
+ @cd .\Debug\classes
+ @jar cf ../dbxa.jar com\sleepycat\db\xa\*.class
+ @echo Java XA build finished
diff --git a/libdb/build_win32/db_lib.dsp b/libdb/build_win32/db_lib.dsp
new file mode 100644
index 0000000..a7fb415
--- /dev/null
+++ b/libdb/build_win32/db_lib.dsp
@@ -0,0 +1,92 @@
+# Microsoft Developer Studio Project File - Name="db_lib" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Generic Project" 0x010a
+
+CFG=db_lib - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_lib.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_lib.mak" CFG="db_lib - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_lib - Win32 Release" (based on "Win32 (x86) Generic Project")
+!MESSAGE "db_lib - Win32 Debug" (based on "Win32 (x86) Generic Project")
+!MESSAGE "db_lib - Win32 Release Static" (based on "Win32 (x86) Generic Project")
+!MESSAGE "db_lib - Win32 Debug Static" (based on "Win32 (x86) Generic Project")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+
+!IF "$(CFG)" == "db_lib - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "db_lib - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "db_lib - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release_static"
+# PROP BASE Intermediate_Dir "Release_static"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "db_lib - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug_static"
+# PROP BASE Intermediate_Dir "Debug_Static"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_Static"
+# PROP Target_Dir ""
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_lib - Win32 Release"
+# Name "db_lib - Win32 Debug"
+# Name "db_lib - Win32 Release Static"
+# Name "db_lib - Win32 Debug Static"
+# End Target
+# End Project
diff --git a/libdb/build_win32/db_load.dsp b/libdb/build_win32/db_load.dsp
new file mode 100644
index 0000000..95c866a
--- /dev/null
+++ b/libdb/build_win32/db_load.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="db_load" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_load - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_load.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_load.mak" CFG="db_load - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_load - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_load - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_load - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_load - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_load - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_load - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_load - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_load - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_load - Win32 Release"
+# Name "db_load - Win32 Debug"
+# Name "db_load - Win32 Release Static"
+# Name "db_load - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\db_load\db_load.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/db_perf.dsp b/libdb/build_win32/db_perf.dsp
new file mode 100644
index 0000000..21b79ed
--- /dev/null
+++ b/libdb/build_win32/db_perf.dsp
@@ -0,0 +1,216 @@
+# Microsoft Developer Studio Project File - Name="db_perf" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_perf - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_perf.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_perf.mak" CFG="db_perf - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_perf - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_perf - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_perf - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_perf - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_perf - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_perf - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_perf - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_perf - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_perf - Win32 Release"
+# Name "db_perf - Win32 Debug"
+# Name "db_perf - Win32 Release Static"
+# Name "db_perf - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\test_perf\db_perf.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_cache_check.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_checkpoint.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_config.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_dbs.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_debug.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_file.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_key.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_log.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_misc.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_op.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_parse.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_rand.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_spawn.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_thread.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_trickle.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_txn.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/db_printlog.dsp b/libdb/build_win32/db_printlog.dsp
new file mode 100644
index 0000000..684b3ec
--- /dev/null
+++ b/libdb/build_win32/db_printlog.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="db_printlog" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_printlog - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_printlog.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_printlog.mak" CFG="db_printlog - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_printlog - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_printlog - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_printlog - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_printlog - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_printlog - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_printlog - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_printlog - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_printlog - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_printlog - Win32 Release"
+# Name "db_printlog - Win32 Debug"
+# Name "db_printlog - Win32 Release Static"
+# Name "db_printlog - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\db_printlog\db_printlog.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/db_recover.dsp b/libdb/build_win32/db_recover.dsp
new file mode 100644
index 0000000..0c39584
--- /dev/null
+++ b/libdb/build_win32/db_recover.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="db_recover" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_recover - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_recover.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_recover.mak" CFG="db_recover - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_recover - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_recover - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_recover - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_recover - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_recover - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_recover - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_recover - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_recover - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_recover - Win32 Release"
+# Name "db_recover - Win32 Debug"
+# Name "db_recover - Win32 Release Static"
+# Name "db_recover - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\db_recover\db_recover.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/db_stat.dsp b/libdb/build_win32/db_stat.dsp
new file mode 100644
index 0000000..0a86fb3
--- /dev/null
+++ b/libdb/build_win32/db_stat.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="db_stat" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_stat - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_stat.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_stat.mak" CFG="db_stat - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_stat - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_stat - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_stat - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_stat - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_stat - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_stat - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_stat - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_stat - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_stat - Win32 Release"
+# Name "db_stat - Win32 Debug"
+# Name "db_stat - Win32 Release Static"
+# Name "db_stat - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\db_stat\db_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/db_static.dsp b/libdb/build_win32/db_static.dsp
new file mode 100644
index 0000000..3ee9aa5
--- /dev/null
+++ b/libdb/build_win32/db_static.dsp
@@ -0,0 +1,772 @@
+# Microsoft Developer Studio Project File - Name="db_static" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Static Library" 0x0104
+
+CFG=db_static - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_static.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_static.mak" CFG="db_static - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_static - Win32 Release Static" (based on "Win32 (x86) Static Library")
+!MESSAGE "db_static - Win32 Debug Static" (based on "Win32 (x86) Static Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_static - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release_static"
+# PROP BASE Intermediate_Dir "Release_static"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX"config.h" /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
+# ADD BASE RSC /l 0xc09
+# ADD RSC /l 0xc09
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo /out:"Release/libdb41s.lib"
+# ADD LIB32 /nologo /out:"Release_static/libdb41s.lib"
+
+!ELSEIF "$(CFG)" == "db_static - Win32 Debug Static"
+
+# PROP BASE Use_MFC 1
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug_static"
+# PROP BASE Intermediate_Dir "Debug_static"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 1
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX"config.h" /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
+# ADD BASE RSC /l 0xc09
+# ADD RSC /l 0xc09
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo /out:"Debug/libdb41sd.lib"
+# ADD LIB32 /nologo /out:"Debug_static/libdb41sd.lib"
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_static - Win32 Release Static"
+# Name "db_static - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\btree\bt_compare.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_conv.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_curadj.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_cursor.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_delete.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_open.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_put.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_reclaim.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_recno.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_rsearch.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_search.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_split.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_upgrade.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\bt_verify.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\btree\btree_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\strcasecmp.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\db_byteorder.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\db_err.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\db_getlong.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\db_idspace.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\db_log2.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\util_cache.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\util_log.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\common\util_sig.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_db.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_dbc.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_dbt.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_env.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_except.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_lock.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_logc.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_mpool.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\cxx\cxx_txn.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\crdel_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\crdel_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_am.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_cam.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_conv.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_dispatch.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_dup.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_iface.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_join.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_meta.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_open.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_overflow.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_pr.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_reclaim.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_remove.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_rename.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_ret.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_truncate.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_upg.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_upg_opd.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_vrfy.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\db\db_vrfyutil.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\dbm\dbm.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\dbreg\dbreg.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\dbreg\dbreg_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\dbreg\dbreg_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\dbreg\dbreg_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\db_salloc.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\db_shash.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\env_file.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\env_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\env_open.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\env_recover.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\env\env_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\fileops\fileops_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\fileops\fop_basic.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\fileops\fop_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\fileops\fop_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_conv.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_dup.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_func.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_meta.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_open.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_page.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_reclaim.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_upgrade.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hash\hash_verify.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hmac\hmac.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hmac\sha1.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\hsearch\hsearch.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_deadlock.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\lock\lock_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_archive.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_compare.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_get.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\log\log_put.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_alloc.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_bh.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_fget.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_fopen.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_fput.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_fset.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_register.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_sync.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mp\mp_trickle.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mutex\mut_win32.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\mutex\mutex.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_alloc.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_id.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_oflags.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_root.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_rpath.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_tmpdir.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os\os_unlink.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_abs.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_clock.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_config.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_dir.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_errno.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_fid.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_fsync.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_handle.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_map.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_open.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_rename.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_rw.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_seek.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_sleep.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_spin.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\os_win32\os_type.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_conv.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_files.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_open.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_upgrade.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\qam\qam_verify.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\rep\rep_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\rep\rep_record.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\rep\rep_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\rep\rep_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn_auto.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn_method.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn_rec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn_recover.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn_region.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn_stat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\txn\txn_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\xa\xa.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\xa\xa_db.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\xa\xa_map.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/db_tcl.dsp b/libdb/build_win32/db_tcl.dsp
new file mode 100644
index 0000000..8a813e9
--- /dev/null
+++ b/libdb/build_win32/db_tcl.dsp
@@ -0,0 +1,144 @@
+# Microsoft Developer Studio Project File - Name="db_tcl" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=db_tcl - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_tcl.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_tcl.mak" CFG="db_tcl - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_tcl - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "db_tcl - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_tcl - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /I "../dbinc" /D "DB_TCL_SUPPORT" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386
+# ADD LINK32 Release/libdb41.lib tcl83.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb_tcl41.dll"
+
+!ELSEIF "$(CFG)" == "db_tcl - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 2
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "DB_TCL_SUPPORT" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c
+# SUBTRACT CPP /Fr
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib tcl83d.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb_tcl41d.dll" /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_tcl - Win32 Release"
+# Name "db_tcl - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\libdb_tcl.def
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_compat.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_db.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_db_pkg.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_dbcursor.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_env.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_internal.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_lock.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_log.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_mp.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_rep.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_txn.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\tcl\tcl_util.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/db_test.dsp b/libdb/build_win32/db_test.dsp
new file mode 100644
index 0000000..5d936c5
--- /dev/null
+++ b/libdb/build_win32/db_test.dsp
@@ -0,0 +1,100 @@
+# Microsoft Developer Studio Project File - Name="db_test" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_test - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_test.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_test.mak" CFG="db_test - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_test - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_test - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_test - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386
+# Begin Special Build Tool
+SOURCE="$(InputPath)"
+PostBuild_Desc=Copy built executable files.
+PostBuild_Cmds=copy Release\*.exe .
+# End Special Build Tool
+
+!ELSEIF "$(CFG)" == "db_test - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /out:"Debug/dbkill.exe" /fixed:no
+# Begin Special Build Tool
+SOURCE="$(InputPath)"
+PostBuild_Desc=Copy built executable files.
+PostBuild_Cmds=copy Debug\*.exe .
+# End Special Build Tool
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_test - Win32 Release"
+# Name "db_test - Win32 Debug"
+# Begin Source File
+
+SOURCE=.\dbkill.cpp
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/db_test.src b/libdb/build_win32/db_test.src
new file mode 100644
index 0000000..73479d3
--- /dev/null
+++ b/libdb/build_win32/db_test.src
@@ -0,0 +1,97 @@
+# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=@project_name@ - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "@project_name@ - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /subsystem:console /machine:I386
+# Begin Special Build Tool
+SOURCE="$(InputPath)"
+PostBuild_Desc=Copy built executable files.
+PostBuild_Cmds=copy Release\*.exe .
+# End Special Build Tool
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /out:"Debug/dbkill.exe" /fixed:no
+# Begin Special Build Tool
+SOURCE="$(InputPath)"
+PostBuild_Desc=Copy built executable files.
+PostBuild_Cmds=copy Debug\*.exe .
+# End Special Build Tool
+
+!ENDIF
+
+# Begin Target
+
+# Name "@project_name@ - Win32 Release"
+# Name "@project_name@ - Win32 Debug"
+@SOURCE_FILES@
+# End Target
+# End Project
diff --git a/libdb/build_win32/db_upgrade.dsp b/libdb/build_win32/db_upgrade.dsp
new file mode 100644
index 0000000..2a4031f
--- /dev/null
+++ b/libdb/build_win32/db_upgrade.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="db_upgrade" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_upgrade - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_upgrade.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_upgrade.mak" CFG="db_upgrade - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_upgrade - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_upgrade - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_upgrade - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_upgrade - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_upgrade - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_upgrade - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_upgrade - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_upgrade - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_upgrade - Win32 Release"
+# Name "db_upgrade - Win32 Debug"
+# Name "db_upgrade - Win32 Release Static"
+# Name "db_upgrade - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\db_upgrade\db_upgrade.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/db_verify.dsp b/libdb/build_win32/db_verify.dsp
new file mode 100644
index 0000000..b4f62cb
--- /dev/null
+++ b/libdb/build_win32/db_verify.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="db_verify" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_verify - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_verify.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_verify.mak" CFG="db_verify - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_verify - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_verify - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_verify - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_verify - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_verify - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_verify - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_verify - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_verify - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_verify - Win32 Release"
+# Name "db_verify - Win32 Debug"
+# Name "db_verify - Win32 Release Static"
+# Name "db_verify - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\db_verify\db_verify.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/dbkill.cpp b/libdb/build_win32/dbkill.cpp
new file mode 100644
index 0000000..e646dfb
--- /dev/null
+++ b/libdb/build_win32/dbkill.cpp
@@ -0,0 +1,131 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+/*
+ * Kill -
+ * Simulate Unix kill on Windows/NT and Windows/9X.
+ * This good enough to support the Berkeley DB test suite,
+ * but may be missing some favorite features.
+ *
+ * Would have used MKS kill, but it didn't seem to work well
+ * on Win/9X. Cygnus kill works within the Gnu/Cygnus environment
+ * (where processes are given small pids, with presumably a translation
+ * table between small pids and actual process handles), but our test
+ * environment, via Tcl, does not use the Cygnus environment.
+ *
+ * Compile this and install it as c:/tools/kill.exe (or as indicated
+ * by build_win32/include.tcl ).
+ */
+
+#include <windows.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <limits.h>
+
+/*
+ * Like atol, with specified base. Would use stdlib, but
+ * strtol("0xFFFF1234", NULL, 16) returns 0x7FFFFFFF and
+ * strtol("4294712487", NULL, 16) returns 0x7FFFFFFF w/ VC++
+ */
+long
+myatol(char *s, int base)
+{
+ long result = 0;
+ char ch;
+ int sign = 1; /* + */
+ if (base == 0)
+ base = 10;
+ if (base != 10 && base != 16)
+ return LONG_MAX;
+ while ((ch = *s++) != '\0') {
+ if (ch == '-') {
+ sign = -sign;
+ }
+ else if (ch >= '0' && ch <= '9') {
+ result = result * base + (ch - '0');
+ }
+ else if (ch == 'x' || ch == 'X') {
+ /* Allow leading 0x..., and switch to base 16 */
+ base = 16;
+ }
+ else if (base == 16 && ch >= 'a' && ch <= 'f') {
+ result = result * base + (ch - 'a' + 10);
+ }
+ else if (base == 16 && ch >= 'A' && ch <= 'F') {
+ result = result * base + (ch - 'A' + 10);
+ }
+ else {
+ if (sign > 1)
+ return LONG_MAX;
+ else
+ return LONG_MIN;
+ }
+ }
+ return sign * result;
+}
+
+void
+usage_exit()
+{
+ fprintf(stderr, "Usage: kill [ -sig ] pid\n");
+ fprintf(stderr, " for win32, sig must be or 0, 15 (TERM)\n");
+ exit(EXIT_FAILURE);
+}
+
+int
+main(int argc, char **argv)
+{
+ HANDLE hProcess ;
+ DWORD accessflag;
+ long pid;
+ int sig = 15;
+
+ if (argc > 2) {
+ if (argv[1][0] != '-')
+ usage_exit();
+
+ if (strcmp(argv[1], "-TERM") == 0)
+ sig = 15;
+ else {
+ /* currently sig is more or less ignored,
+ * we only care if it is zero or not
+ */
+ sig = atoi(&argv[1][1]);
+ if (sig < 0)
+ usage_exit();
+ }
+ argc--;
+ argv++;
+ }
+ if (argc < 2)
+ usage_exit();
+
+ pid = myatol(argv[1], 10);
+ /*printf("pid = %ld (0x%lx) (command line %s)\n", pid, pid, argv[1]);*/
+ if (pid == LONG_MAX || pid == LONG_MIN)
+ usage_exit();
+
+ if (sig == 0)
+ accessflag = PROCESS_QUERY_INFORMATION | PROCESS_VM_READ;
+ else
+ accessflag = STANDARD_RIGHTS_REQUIRED | PROCESS_TERMINATE;
+ hProcess = OpenProcess(accessflag, FALSE, pid);
+ if (hProcess == NULL) {
+ fprintf(stderr, "dbkill: %s: no such process\n", argv[1]);
+ exit(EXIT_FAILURE);
+ }
+ if (sig == 0)
+ exit(EXIT_SUCCESS);
+ if (!TerminateProcess(hProcess, 99)) {
+ DWORD err = GetLastError();
+ fprintf(stderr,
+ "dbkill: cannot kill process: error %d (0x%lx)\n", err, err);
+ exit(EXIT_FAILURE);
+ }
+ return EXIT_SUCCESS;
+}
diff --git a/libdb/build_win32/dllmain.c b/libdb/build_win32/dllmain.c
new file mode 100644
index 0000000..1898dbc
--- /dev/null
+++ b/libdb/build_win32/dllmain.c
@@ -0,0 +1,97 @@
+/*
+ * --------------------------------------------------------------------------
+ * Copyright (C) 1997 Netscape Communications Corporation
+ * --------------------------------------------------------------------------
+ *
+ * dllmain.c
+ *
+ * $Id$
+ */
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+static int ProcessesAttached = 0;
+static HINSTANCE Instance; /* Global library instance handle. */
+
+/*
+ * The following declaration is for the VC++ DLL entry point.
+ */
+
+BOOL APIENTRY DllMain (HINSTANCE hInst,
+ DWORD reason, LPVOID reserved);
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * DllEntryPoint --
+ *
+ * This wrapper function is used by Borland to invoke the
+ * initialization code for Tcl. It simply calls the DllMain
+ * routine.
+ *
+ * Results:
+ * See DllMain.
+ *
+ * Side effects:
+ * See DllMain.
+ *
+ *----------------------------------------------------------------------
+ */
+
+BOOL APIENTRY
+DllEntryPoint(hInst, reason, reserved)
+ HINSTANCE hInst; /* Library instance handle. */
+ DWORD reason; /* Reason this function is being called. */
+ LPVOID reserved; /* Not used. */
+{
+ return DllMain(hInst, reason, reserved);
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * DllMain --
+ *
+ * This routine is called by the VC++ C run time library init
+ * code, or the DllEntryPoint routine. It is responsible for
+ * initializing various dynamically loaded libraries.
+ *
+ * Results:
+ * TRUE on sucess, FALSE on failure.
+ *
+ * Side effects:
+ * Establishes 32-to-16 bit thunk and initializes sockets library.
+ *
+ *----------------------------------------------------------------------
+ */
+BOOL APIENTRY
+DllMain(hInst, reason, reserved)
+ HINSTANCE hInst; /* Library instance handle. */
+ DWORD reason; /* Reason this function is being called. */
+ LPVOID reserved; /* Not used. */
+{
+ switch (reason) {
+ case DLL_PROCESS_ATTACH:
+
+ /*
+ * Registration of UT need to be done only once for first
+ * attaching process. At that time set the tclWin32s flag
+ * to indicate if the DLL is executing under Win32s or not.
+ */
+
+ if (ProcessesAttached++) {
+ return FALSE; /* Not the first initialization. */
+ }
+
+ Instance = hInst;
+ return TRUE;
+
+ case DLL_PROCESS_DETACH:
+
+ ProcessesAttached--;
+ break;
+ }
+
+ return TRUE;
+}
diff --git a/libdb/build_win32/dynamic_dsp.src b/libdb/build_win32/dynamic_dsp.src
new file mode 100644
index 0000000..3f9b4d5
--- /dev/null
+++ b/libdb/build_win32/dynamic_dsp.src
@@ -0,0 +1,93 @@
+# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=@project_name@ - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "@project_name@ - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /I "../dbinc" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386
+# ADD LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll"
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 2
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "DB_CREATE_DLL" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
+# SUBTRACT CPP /Fr
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "@project_name@ - Win32 Release"
+# Name "@project_name@ - Win32 Debug"
+@SOURCE_FILES@
+# End Target
+# End Project
diff --git a/libdb/build_win32/ex_access.dsp b/libdb/build_win32/ex_access.dsp
new file mode 100644
index 0000000..d7c06a7
--- /dev/null
+++ b/libdb/build_win32/ex_access.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="ex_access" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=ex_access - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "ex_access.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "ex_access.mak" CFG="ex_access - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "ex_access - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "ex_access - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "ex_access - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "ex_access - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "ex_access - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "ex_access - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "ex_access - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "ex_access - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "ex_access - Win32 Release"
+# Name "ex_access - Win32 Debug"
+# Name "ex_access - Win32 Release Static"
+# Name "ex_access - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_c\ex_access.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/ex_btrec.dsp b/libdb/build_win32/ex_btrec.dsp
new file mode 100644
index 0000000..16c782f
--- /dev/null
+++ b/libdb/build_win32/ex_btrec.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="ex_btrec" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=ex_btrec - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "ex_btrec.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "ex_btrec.mak" CFG="ex_btrec - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "ex_btrec - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "ex_btrec - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "ex_btrec - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "ex_btrec - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "ex_btrec - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "ex_btrec - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "ex_btrec - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "ex_btrec - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "ex_btrec - Win32 Release"
+# Name "ex_btrec - Win32 Debug"
+# Name "ex_btrec - Win32 Release Static"
+# Name "ex_btrec - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_c\ex_btrec.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/ex_env.dsp b/libdb/build_win32/ex_env.dsp
new file mode 100644
index 0000000..6e38306
--- /dev/null
+++ b/libdb/build_win32/ex_env.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="ex_env" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=ex_env - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "ex_env.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "ex_env.mak" CFG="ex_env - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "ex_env - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "ex_env - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "ex_env - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "ex_env - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "ex_env - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "ex_env - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "ex_env - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "ex_env - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "ex_env - Win32 Release"
+# Name "ex_env - Win32 Debug"
+# Name "ex_env - Win32 Release Static"
+# Name "ex_env - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_c\ex_env.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/ex_lock.dsp b/libdb/build_win32/ex_lock.dsp
new file mode 100644
index 0000000..21df3f2
--- /dev/null
+++ b/libdb/build_win32/ex_lock.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="ex_lock" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=ex_lock - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "ex_lock.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "ex_lock.mak" CFG="ex_lock - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "ex_lock - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "ex_lock - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "ex_lock - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "ex_lock - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "ex_lock - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "ex_lock - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "ex_lock - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "ex_lock - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "ex_lock - Win32 Release"
+# Name "ex_lock - Win32 Debug"
+# Name "ex_lock - Win32 Release Static"
+# Name "ex_lock - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_c\ex_lock.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/ex_mpool.dsp b/libdb/build_win32/ex_mpool.dsp
new file mode 100644
index 0000000..d506da2
--- /dev/null
+++ b/libdb/build_win32/ex_mpool.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="ex_mpool" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=ex_mpool - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "ex_mpool.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "ex_mpool.mak" CFG="ex_mpool - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "ex_mpool - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "ex_mpool - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "ex_mpool - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "ex_mpool - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "ex_mpool - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "ex_mpool - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "ex_mpool - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "ex_mpool - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "ex_mpool - Win32 Release"
+# Name "ex_mpool - Win32 Debug"
+# Name "ex_mpool - Win32 Release Static"
+# Name "ex_mpool - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_c\ex_mpool.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/ex_tpcb.dsp b/libdb/build_win32/ex_tpcb.dsp
new file mode 100644
index 0000000..f89bf3a
--- /dev/null
+++ b/libdb/build_win32/ex_tpcb.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="ex_tpcb" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=ex_tpcb - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "ex_tpcb.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "ex_tpcb.mak" CFG="ex_tpcb - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "ex_tpcb - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "ex_tpcb - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "ex_tpcb - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "ex_tpcb - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "ex_tpcb - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "ex_tpcb - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "ex_tpcb - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "ex_tpcb - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "ex_tpcb - Win32 Release"
+# Name "ex_tpcb - Win32 Debug"
+# Name "ex_tpcb - Win32 Release Static"
+# Name "ex_tpcb - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_c\ex_tpcb.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/excxx_access.dsp b/libdb/build_win32/excxx_access.dsp
new file mode 100644
index 0000000..c68f762
--- /dev/null
+++ b/libdb/build_win32/excxx_access.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="excxx_access" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=excxx_access - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_access.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_access.mak" CFG="excxx_access - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "excxx_access - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "excxx_access - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "excxx_access - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "excxx_access - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "excxx_access - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "excxx_access - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "excxx_access - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "excxx_access - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "excxx_access - Win32 Release"
+# Name "excxx_access - Win32 Debug"
+# Name "excxx_access - Win32 Release Static"
+# Name "excxx_access - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_cxx\AccessExample.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/excxx_btrec.dsp b/libdb/build_win32/excxx_btrec.dsp
new file mode 100644
index 0000000..03c4403
--- /dev/null
+++ b/libdb/build_win32/excxx_btrec.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="excxx_btrec" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=excxx_btrec - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_btrec.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_btrec.mak" CFG="excxx_btrec - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "excxx_btrec - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "excxx_btrec - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "excxx_btrec - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "excxx_btrec - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "excxx_btrec - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "excxx_btrec - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "excxx_btrec - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "excxx_btrec - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "excxx_btrec - Win32 Release"
+# Name "excxx_btrec - Win32 Debug"
+# Name "excxx_btrec - Win32 Release Static"
+# Name "excxx_btrec - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_cxx\BtRecExample.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/excxx_env.dsp b/libdb/build_win32/excxx_env.dsp
new file mode 100644
index 0000000..3ea71ad
--- /dev/null
+++ b/libdb/build_win32/excxx_env.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="excxx_env" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=excxx_env - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_env.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_env.mak" CFG="excxx_env - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "excxx_env - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "excxx_env - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "excxx_env - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "excxx_env - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "excxx_env - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "excxx_env - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "excxx_env - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "excxx_env - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "excxx_env - Win32 Release"
+# Name "excxx_env - Win32 Debug"
+# Name "excxx_env - Win32 Release Static"
+# Name "excxx_env - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_cxx\EnvExample.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/excxx_lock.dsp b/libdb/build_win32/excxx_lock.dsp
new file mode 100644
index 0000000..ccd49a5
--- /dev/null
+++ b/libdb/build_win32/excxx_lock.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="excxx_lock" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=excxx_lock - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_lock.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_lock.mak" CFG="excxx_lock - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "excxx_lock - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "excxx_lock - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "excxx_lock - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "excxx_lock - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "excxx_lock - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "excxx_lock - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "excxx_lock - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "excxx_lock - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "excxx_lock - Win32 Release"
+# Name "excxx_lock - Win32 Debug"
+# Name "excxx_lock - Win32 Release Static"
+# Name "excxx_lock - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_cxx\LockExample.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/excxx_mpool.dsp b/libdb/build_win32/excxx_mpool.dsp
new file mode 100644
index 0000000..09ed9fe
--- /dev/null
+++ b/libdb/build_win32/excxx_mpool.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="excxx_mpool" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=excxx_mpool - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_mpool.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_mpool.mak" CFG="excxx_mpool - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "excxx_mpool - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "excxx_mpool - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "excxx_mpool - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "excxx_mpool - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "excxx_mpool - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "excxx_mpool - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "excxx_mpool - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "excxx_mpool - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "excxx_mpool - Win32 Release"
+# Name "excxx_mpool - Win32 Debug"
+# Name "excxx_mpool - Win32 Release Static"
+# Name "excxx_mpool - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_cxx\MpoolExample.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/excxx_tpcb.dsp b/libdb/build_win32/excxx_tpcb.dsp
new file mode 100644
index 0000000..df1455a
--- /dev/null
+++ b/libdb/build_win32/excxx_tpcb.dsp
@@ -0,0 +1,148 @@
+# Microsoft Developer Studio Project File - Name="excxx_tpcb" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=excxx_tpcb - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_tpcb.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "excxx_tpcb.mak" CFG="excxx_tpcb - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "excxx_tpcb - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "excxx_tpcb - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "excxx_tpcb - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "excxx_tpcb - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "excxx_tpcb - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "excxx_tpcb - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "excxx_tpcb - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "excxx_tpcb - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "excxx_tpcb - Win32 Release"
+# Name "excxx_tpcb - Win32 Debug"
+# Name "excxx_tpcb - Win32 Release Static"
+# Name "excxx_tpcb - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\examples_cxx\TpcbExample.cpp
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/libdb/build_win32/include.tcl b/libdb/build_win32/include.tcl
new file mode 100644
index 0000000..bbddf24
--- /dev/null
+++ b/libdb/build_win32/include.tcl
@@ -0,0 +1,19 @@
+# Automatically built by dist/s_test; may require local editing.
+
+set tclsh_path SET_YOUR_TCLSH_PATH
+set tcllib ./Debug/libdb_tcl41d.dll
+
+set src_root ..
+set test_path ../test
+
+global testdir
+set testdir ./TESTDIR
+
+global dict
+global util_path
+
+global is_hp_test
+global is_qnx_test
+global is_windows_test
+
+set KILL ./dbkill.exe
diff --git a/libdb/build_win32/java_dsp.src b/libdb/build_win32/java_dsp.src
new file mode 100644
index 0000000..f46b597
--- /dev/null
+++ b/libdb/build_win32/java_dsp.src
@@ -0,0 +1,129 @@
+# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=@project_name@ - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "@project_name@ - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386
+# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll"
+# Begin Custom Build - Compiling java files using javac
+ProjDir=.
+InputPath=.\Release\libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll
+SOURCE="$(InputPath)"
+
+"force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ mkdir $(ProjDir)\Release\classes
+ echo compiling Berkeley DB classes
+ javac -g -d $(ProjDir)/Release/classes -classpath "$(CLASSPATH);$(ProjDir)/Release/classes" ..\java\src\com\sleepycat\db\*.java
+ echo compiling examples
+ javac -g -d $(ProjDir)/Release/classes -classpath "$(CLASSPATH);$(ProjDir)/Release/classes" ..\java\src\com\sleepycat\examples\*.java
+ echo creating jar files
+ cd $(ProjDir)\Release\classes
+ jar cf ../db.jar com\sleepycat\db\*.class
+ jar cf ../dbexamples.jar com\sleepycat\examples\*.class
+ echo Java build finished
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 2
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c
+# SUBTRACT CPP /Fr
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no
+# Begin Custom Build - Compiling java files using javac
+ProjDir=.
+InputPath=.\Debug\libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll
+SOURCE="$(InputPath)"
+
+"force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ mkdir $(ProjDir)\Debug\classes
+ echo compiling Berkeley DB classes
+ javac -g -d $(ProjDir)/Debug/classes -classpath "$(CLASSPATH);$(ProjDir)/Debug/classes" ..\java\src\com\sleepycat\db\*.java
+ echo compiling examples
+ javac -g -d $(ProjDir)/Debug/classes -classpath "$(CLASSPATH);$(ProjDir)/Debug/classes" ..\java\src\com\sleepycat\examples\*.java
+ echo creating jar files
+ cd $(ProjDir)\Debug\classes
+ jar cf ../db.jar com\sleepycat\db\*.class
+ jar cf ../dbexamples.jar com\sleepycat\examples\*.class
+ echo Java build finished
+
+# End Custom Build
+
+!ENDIF
+
+# Begin Target
+
+# Name "@project_name@ - Win32 Release"
+# Name "@project_name@ - Win32 Debug"
+@SOURCE_FILES@
+# End Target
+# End Project
diff --git a/libdb/build_win32/libdb.def b/libdb/build_win32/libdb.def
new file mode 100644
index 0000000..afcb092
--- /dev/null
+++ b/libdb/build_win32/libdb.def
@@ -0,0 +1,128 @@
+; DO NOT EDIT: automatically built by dist/s_win32.
+
+DESCRIPTION 'Berkeley DB 4.1 Library'
+
+EXPORTS
+ db_create @1
+ db_env_create @2
+ db_strerror @3
+ db_version @4
+ db_xa_switch @5
+ log_compare @6
+ txn_abort @7
+ txn_begin @8
+ txn_commit @9
+ db_env_set_func_close @10
+ db_env_set_func_dirfree @11
+ db_env_set_func_dirlist @12
+ db_env_set_func_exists @13
+ db_env_set_func_free @14
+ db_env_set_func_fsync @15
+ db_env_set_func_ioinfo @16
+ db_env_set_func_malloc @17
+ db_env_set_func_map @18
+ db_env_set_func_open @19
+ db_env_set_func_read @20
+ db_env_set_func_realloc @21
+ db_env_set_func_rename @22
+ db_env_set_func_seek @23
+ db_env_set_func_sleep @24
+ db_env_set_func_unlink @25
+ db_env_set_func_unmap @26
+ db_env_set_func_write @27
+ db_env_set_func_yield @28
+ __db_add_recovery @29
+ __db_dbm_close @30
+ __db_dbm_delete @31
+ __db_dbm_fetch @32
+ __db_dbm_firstkey @33
+ __db_dbm_init @34
+ __db_dbm_nextkey @35
+ __db_dbm_store @36
+ __db_hcreate @37
+ __db_hdestroy @38
+ __db_hsearch @39
+ __db_loadme @40
+ __db_ndbm_clearerr @41
+ __db_ndbm_close @42
+ __db_ndbm_delete @43
+ __db_ndbm_dirfno @44
+ __db_ndbm_error @45
+ __db_ndbm_fetch @46
+ __db_ndbm_firstkey @47
+ __db_ndbm_nextkey @48
+ __db_ndbm_open @49
+ __db_ndbm_pagfno @50
+ __db_ndbm_rdonly @51
+ __db_ndbm_store @52
+ __db_panic @53
+ __db_r_attach @54
+ __db_r_detach @55
+ __db_win32_mutex_init @56
+ __db_win32_mutex_lock @57
+ __db_win32_mutex_unlock @58
+ __ham_func2 @59
+ __ham_func3 @60
+ __ham_func4 @61
+ __ham_func5 @62
+ __ham_test @63
+ __lock_dump_region @64
+ __memp_dump_region @65
+ __os_calloc @66
+ __os_closehandle @67
+ __os_free @68
+ __os_ioinfo @69
+ __os_malloc @70
+ __os_open @71
+ __os_openhandle @72
+ __os_read @73
+ __os_realloc @74
+ __os_strdup @75
+ __os_umalloc @76
+ __os_write @77
+ __bam_init_print @78
+ __bam_pgin @79
+ __bam_pgout @80
+ __crdel_init_print @81
+ __db_dispatch @82
+ __db_dump @83
+ __db_e_stat @84
+ __db_err @85
+ __db_getlong @86
+ __db_getulong @87
+ __db_global_values @88
+ __db_init_print @89
+ __db_inmemdbflags @90
+ __db_isbigendian @91
+ __db_omode @92
+ __db_overwrite @93
+ __db_pgin @94
+ __db_pgout @95
+ __db_prdbt @96
+ __db_prfooter @97
+ __db_prheader @98
+ __db_rpath @99
+ __db_util_cache @100
+ __db_util_interrupted @101
+ __db_util_logset @102
+ __db_util_siginit @103
+ __db_util_sigresend @104
+ __db_verify_callback @105
+ __db_verify_internal @106
+ __dbreg_init_print @107
+ __fop_init_print @108
+ __ham_get_meta @109
+ __ham_init_print @110
+ __ham_pgin @111
+ __ham_pgout @112
+ __ham_release_meta @113
+ __os_clock @114
+ __os_get_errno @115
+ __os_id @116
+ __os_set_errno @117
+ __os_sleep @118
+ __os_ufree @119
+ __os_yield @120
+ __qam_init_print @121
+ __qam_pgin_out @122
+ __txn_init_print @123
diff --git a/libdb/build_win32/libdb.rc b/libdb/build_win32/libdb.rc
new file mode 100644
index 0000000..1aace86
--- /dev/null
+++ b/libdb/build_win32/libdb.rc
@@ -0,0 +1,33 @@
+1 VERSIONINFO
+ FILEVERSION 4,0,1,25
+ PRODUCTVERSION 4,0,1,25
+ FILEFLAGSMASK 0x3fL
+#ifdef _DEBUG
+ FILEFLAGS 0x1L
+#else
+ FILEFLAGS 0x0L
+#endif
+ FILEOS 0x4L
+ FILETYPE 0x2L
+ FILESUBTYPE 0x0L
+
+BEGIN
+ BLOCK "StringFileInfo"
+ BEGIN
+ BLOCK "040904b0"
+ BEGIN
+ VALUE "CompanyName", "Sleepycat Software\0"
+ VALUE "FileDescription", "Berkeley DB 3.0 DLL\0"
+ VALUE "FileVersion", "4.1.25\0"
+ VALUE "InternalName", "libdb.dll\0"
+ VALUE "LegalCopyright", "Copyright � Sleepycat Software Inc. 1997-2002\0"
+ VALUE "OriginalFilename", "libdb.dll\0"
+ VALUE "ProductName", "Sleepycat Software libdb\0"
+ VALUE "ProductVersion", "4.1.25\0"
+ END
+ END
+ BLOCK "VarFileInfo"
+ BEGIN
+ VALUE "Translation", 0x409, 1200
+ END
+END
diff --git a/libdb/build_win32/libdb_tcl.def b/libdb/build_win32/libdb_tcl.def
new file mode 100644
index 0000000..a2c603e
--- /dev/null
+++ b/libdb/build_win32/libdb_tcl.def
@@ -0,0 +1,6 @@
+; $Id$
+
+DESCRIPTION 'Berkeley DB TCL interface Library'
+EXPORTS
+ Db_tcl_Init
+ _NameToPtr
diff --git a/libdb/build_win32/libdbrc.src b/libdb/build_win32/libdbrc.src
new file mode 100644
index 0000000..3e5d8de
--- /dev/null
+++ b/libdb/build_win32/libdbrc.src
@@ -0,0 +1,33 @@
+1 VERSIONINFO
+ FILEVERSION %MAJOR%,0,%MINOR%,%PATCH%
+ PRODUCTVERSION %MAJOR%,0,%MINOR%,%PATCH%
+ FILEFLAGSMASK 0x3fL
+#ifdef _DEBUG
+ FILEFLAGS 0x1L
+#else
+ FILEFLAGS 0x0L
+#endif
+ FILEOS 0x4L
+ FILETYPE 0x2L
+ FILESUBTYPE 0x0L
+
+BEGIN
+ BLOCK "StringFileInfo"
+ BEGIN
+ BLOCK "040904b0"
+ BEGIN
+ VALUE "CompanyName", "Sleepycat Software\0"
+ VALUE "FileDescription", "Berkeley DB 3.0 DLL\0"
+ VALUE "FileVersion", "%MAJOR%.%MINOR%.%PATCH%\0"
+ VALUE "InternalName", "libdb.dll\0"
+ VALUE "LegalCopyright", "Copyright � Sleepycat Software Inc. 1997-2002\0"
+ VALUE "OriginalFilename", "libdb.dll\0"
+ VALUE "ProductName", "Sleepycat Software libdb\0"
+ VALUE "ProductVersion", "%MAJOR%.%MINOR%.%PATCH%\0"
+ END
+ END
+ BLOCK "VarFileInfo"
+ BEGIN
+ VALUE "Translation", 0x409, 1200
+ END
+END
diff --git a/libdb/build_win32/srcfile_dsp.src b/libdb/build_win32/srcfile_dsp.src
new file mode 100644
index 0000000..408a55e
--- /dev/null
+++ b/libdb/build_win32/srcfile_dsp.src
@@ -0,0 +1,4 @@
+# Begin Source File
+
+SOURCE=@srcdir@\@srcfile@
+# End Source File
diff --git a/libdb/build_win32/static_dsp.src b/libdb/build_win32/static_dsp.src
new file mode 100644
index 0000000..72dc77a
--- /dev/null
+++ b/libdb/build_win32/static_dsp.src
@@ -0,0 +1,85 @@
+# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Static Library" 0x0104
+
+CFG=@project_name@ - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "@project_name@ - Win32 Release Static" (based on "Win32 (x86) Static Library")
+!MESSAGE "@project_name@ - Win32 Debug Static" (based on "Win32 (x86) Static Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "@project_name@ - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release_static"
+# PROP BASE Intermediate_Dir "Release_static"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX"config.h" /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
+# ADD BASE RSC /l 0xc09
+# ADD RSC /l 0xc09
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo /out:"Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib"
+# ADD LIB32 /nologo /out:"Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib"
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug Static"
+
+# PROP BASE Use_MFC 1
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug_static"
+# PROP BASE Intermediate_Dir "Debug_static"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 1
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX"config.h" /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
+# ADD BASE RSC /l 0xc09
+# ADD RSC /l 0xc09
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo /out:"Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib"
+# ADD LIB32 /nologo /out:"Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib"
+
+!ENDIF
+
+# Begin Target
+
+# Name "@project_name@ - Win32 Release Static"
+# Name "@project_name@ - Win32 Debug Static"
+@SOURCE_FILES@
+# End Target
+# End Project
diff --git a/libdb/build_win32/tcl_dsp.src b/libdb/build_win32/tcl_dsp.src
new file mode 100644
index 0000000..10ce9b8
--- /dev/null
+++ b/libdb/build_win32/tcl_dsp.src
@@ -0,0 +1,93 @@
+# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=@project_name@ - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "@project_name@ - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /I "../dbinc" /D "DB_TCL_SUPPORT" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386
+# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib tcl83.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb_tcl@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll"
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 2
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "DB_TCL_SUPPORT" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c
+# SUBTRACT CPP /Fr
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib tcl83d.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb_tcl@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "@project_name@ - Win32 Release"
+# Name "@project_name@ - Win32 Debug"
+@SOURCE_FILES@
+# End Target
+# End Project
diff --git a/libdb/clib/getcwd.c b/libdb/clib/getcwd.c
new file mode 100644
index 0000000..d2e7410
--- /dev/null
+++ b/libdb/clib/getcwd.c
@@ -0,0 +1,272 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1989, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#if HAVE_DIRENT_H
+# include <dirent.h>
+# define NAMLEN(dirent) strlen((dirent)->d_name)
+#else
+# define dirent direct
+# define NAMLEN(dirent) (dirent)->d_namlen
+# if HAVE_SYS_NDIR_H
+# include <sys/ndir.h>
+# endif
+# if HAVE_SYS_DIR_H
+# include <sys/dir.h>
+# endif
+# if HAVE_NDIR_H
+# include <ndir.h>
+# endif
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+#define ISDOT(dp) \
+ (dp->d_name[0] == '.' && (dp->d_name[1] == '\0' || \
+ (dp->d_name[1] == '.' && dp->d_name[2] == '\0')))
+
+#ifndef dirfd
+#define dirfd(dirp) ((dirp)->dd_fd)
+#endif
+
+/*
+ * getcwd --
+ * Get the current working directory.
+ *
+ * PUBLIC: #ifndef HAVE_GETCWD
+ * PUBLIC: char *getcwd __P((char *, size_t));
+ * PUBLIC: #endif
+ */
+char *
+getcwd(pt, size)
+ char *pt;
+ size_t size;
+{
+ register struct dirent *dp;
+ register DIR *dir;
+ register dev_t dev;
+ register ino_t ino;
+ register int first;
+ register char *bpt, *bup;
+ struct stat s;
+ dev_t root_dev;
+ ino_t root_ino;
+ size_t ptsize, upsize;
+ int ret, save_errno;
+ char *ept, *eup, *up;
+
+ /*
+ * If no buffer specified by the user, allocate one as necessary.
+ * If a buffer is specified, the size has to be non-zero. The path
+ * is built from the end of the buffer backwards.
+ */
+ if (pt) {
+ ptsize = 0;
+ if (!size) {
+ __os_set_errno(EINVAL);
+ return (NULL);
+ }
+ if (size == 1) {
+ __os_set_errno(ERANGE);
+ return (NULL);
+ }
+ ept = pt + size;
+ } else {
+ if ((ret =
+ __os_malloc(NULL, ptsize = 1024 - 4, &pt)) != 0) {
+ __os_set_errno(ret);
+ return (NULL);
+ }
+ ept = pt + ptsize;
+ }
+ bpt = ept - 1;
+ *bpt = '\0';
+
+ /*
+ * Allocate bytes (1024 - malloc space) for the string of "../"'s.
+ * Should always be enough (it's 340 levels). If it's not, allocate
+ * as necessary. Special case the first stat, it's ".", not "..".
+ */
+ if ((ret = __os_malloc(NULL, upsize = 1024 - 4, &up)) != 0)
+ goto err;
+ eup = up + 1024;
+ bup = up;
+ up[0] = '.';
+ up[1] = '\0';
+
+ /* Save root values, so know when to stop. */
+ if (stat("/", &s))
+ goto err;
+ root_dev = s.st_dev;
+ root_ino = s.st_ino;
+
+ __os_set_errno(0); /* XXX readdir has no error return. */
+
+ for (first = 1;; first = 0) {
+ /* Stat the current level. */
+ if (lstat(up, &s))
+ goto err;
+
+ /* Save current node values. */
+ ino = s.st_ino;
+ dev = s.st_dev;
+
+ /* Check for reaching root. */
+ if (root_dev == dev && root_ino == ino) {
+ *--bpt = PATH_SEPARATOR[0];
+ /*
+ * It's unclear that it's a requirement to copy the
+ * path to the beginning of the buffer, but it's always
+ * been that way and stuff would probably break.
+ */
+ bcopy(bpt, pt, ept - bpt);
+ __os_free(NULL, up);
+ return (pt);
+ }
+
+ /*
+ * Build pointer to the parent directory, allocating memory
+ * as necessary. Max length is 3 for "../", the largest
+ * possible component name, plus a trailing NULL.
+ */
+ if (bup + 3 + MAXNAMLEN + 1 >= eup) {
+ if (__os_realloc(NULL, upsize *= 2, &up) != 0)
+ goto err;
+ bup = up;
+ eup = up + upsize;
+ }
+ *bup++ = '.';
+ *bup++ = '.';
+ *bup = '\0';
+
+ /* Open and stat parent directory. */
+ if (!(dir = opendir(up)) || fstat(dirfd(dir), &s))
+ goto err;
+
+ /* Add trailing slash for next directory. */
+ *bup++ = PATH_SEPARATOR[0];
+
+ /*
+ * If it's a mount point, have to stat each element because
+ * the inode number in the directory is for the entry in the
+ * parent directory, not the inode number of the mounted file.
+ */
+ save_errno = 0;
+ if (s.st_dev == dev) {
+ for (;;) {
+ if (!(dp = readdir(dir)))
+ goto notfound;
+ if (dp->d_fileno == ino)
+ break;
+ }
+ } else
+ for (;;) {
+ if (!(dp = readdir(dir)))
+ goto notfound;
+ if (ISDOT(dp))
+ continue;
+ bcopy(dp->d_name, bup, dp->d_namlen + 1);
+
+ /* Save the first error for later. */
+ if (lstat(up, &s)) {
+ if (save_errno == 0)
+ save_errno = __os_get_errno();
+ __os_set_errno(0);
+ continue;
+ }
+ if (s.st_dev == dev && s.st_ino == ino)
+ break;
+ }
+
+ /*
+ * Check for length of the current name, preceding slash,
+ * leading slash.
+ */
+ if (bpt - pt < dp->d_namlen + (first ? 1 : 2)) {
+ size_t len, off;
+
+ if (!ptsize) {
+ __os_set_errno(ERANGE);
+ goto err;
+ }
+ off = bpt - pt;
+ len = ept - bpt;
+ if (__os_realloc(NULL, ptsize *= 2, &pt) != 0)
+ goto err;
+ bpt = pt + off;
+ ept = pt + ptsize;
+ bcopy(bpt, ept - len, len);
+ bpt = ept - len;
+ }
+ if (!first)
+ *--bpt = PATH_SEPARATOR[0];
+ bpt -= dp->d_namlen;
+ bcopy(dp->d_name, bpt, dp->d_namlen);
+ (void)closedir(dir);
+
+ /* Truncate any file name. */
+ *bup = '\0';
+ }
+
+notfound:
+ /*
+ * If readdir set errno, use it, not any saved error; otherwise,
+ * didn't find the current directory in its parent directory, set
+ * errno to ENOENT.
+ */
+ if (__os_get_errno_ret_zero() == 0)
+ __os_set_errno(save_errno == 0 ? ENOENT : save_errno);
+ /* FALLTHROUGH */
+err:
+ if (ptsize)
+ __os_free(NULL, pt);
+ __os_free(NULL, up);
+ return (NULL);
+}
diff --git a/libdb/clib/getopt.c b/libdb/clib/getopt.c
new file mode 100644
index 0000000..f9c0fc8
--- /dev/null
+++ b/libdb/clib/getopt.c
@@ -0,0 +1,154 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1987, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+int __db_getopt_reset; /* global reset for VxWorks. */
+
+int opterr = 1, /* if error message should be printed */
+ optind = 1, /* index into parent argv vector */
+ optopt, /* character checked for validity */
+ optreset; /* reset getopt */
+char *optarg; /* argument associated with option */
+
+#undef BADCH
+#define BADCH (int)'?'
+#undef BADARG
+#define BADARG (int)':'
+#undef EMSG
+#define EMSG ""
+
+/*
+ * getopt --
+ * Parse argc/argv argument vector.
+ *
+ * PUBLIC: #ifndef HAVE_GETOPT
+ * PUBLIC: int getopt __P((int, char * const *, const char *));
+ * PUBLIC: #endif
+ */
+int
+getopt(nargc, nargv, ostr)
+ int nargc;
+ char * const *nargv;
+ const char *ostr;
+{
+ static char *progname;
+ static char *place = EMSG; /* option letter processing */
+ char *oli; /* option letter list index */
+
+ /*
+ * VxWorks needs to be able to repeatedly call getopt from multiple
+ * programs within its global name space.
+ */
+ if (__db_getopt_reset) {
+ __db_getopt_reset = 0;
+
+ opterr = optind = 1;
+ optopt = optreset = 0;
+ optarg = NULL;
+ progname = NULL;
+ place = EMSG;
+ }
+ if (!progname) {
+ if ((progname = __db_rpath(*nargv)) == NULL)
+ progname = *nargv;
+ else
+ ++progname;
+ }
+
+ if (optreset || !*place) { /* update scanning pointer */
+ optreset = 0;
+ if (optind >= nargc || *(place = nargv[optind]) != '-') {
+ place = EMSG;
+ return (EOF);
+ }
+ if (place[1] && *++place == '-') { /* found "--" */
+ ++optind;
+ place = EMSG;
+ return (EOF);
+ }
+ } /* option letter okay? */
+ if ((optopt = (int)*place++) == (int)':' ||
+ !(oli = strchr(ostr, optopt))) {
+ /*
+ * if the user didn't specify '-' as an option,
+ * assume it means EOF.
+ */
+ if (optopt == (int)'-')
+ return (EOF);
+ if (!*place)
+ ++optind;
+ if (opterr && *ostr != ':')
+ (void)fprintf(stderr,
+ "%s: illegal option -- %c\n", progname, optopt);
+ return (BADCH);
+ }
+ if (*++oli != ':') { /* don't need argument */
+ optarg = NULL;
+ if (!*place)
+ ++optind;
+ }
+ else { /* need an argument */
+ if (*place) /* no white space */
+ optarg = place;
+ else if (nargc <= ++optind) { /* no arg */
+ place = EMSG;
+ if (*ostr == ':')
+ return (BADARG);
+ if (opterr)
+ (void)fprintf(stderr,
+ "%s: option requires an argument -- %c\n",
+ progname, optopt);
+ return (BADCH);
+ }
+ else /* white space */
+ optarg = nargv[optind];
+ place = EMSG;
+ ++optind;
+ }
+ return (optopt); /* dump back option letter */
+}
diff --git a/libdb/clib/memcmp.c b/libdb/clib/memcmp.c
new file mode 100644
index 0000000..f3c1337
--- /dev/null
+++ b/libdb/clib/memcmp.c
@@ -0,0 +1,67 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+/*
+ * memcmp --
+ *
+ * PUBLIC: #ifndef HAVE_MEMCMP
+ * PUBLIC: int memcmp __P((const void *, const void *, size_t));
+ * PUBLIC: #endif
+ */
+int
+memcmp(s1, s2, n)
+ char *s1, *s2;
+ size_t n;
+{
+ if (n != 0) {
+ unsigned char *p1 = (unsigned char *)s1,
+ *p2 = (unsigned char *)s2;
+ do {
+ if (*p1++ != *p2++)
+ return (*--p1 - *--p2);
+ } while (--n != 0);
+ }
+ return (0);
+}
diff --git a/libdb/clib/memmove.c b/libdb/clib/memmove.c
new file mode 100644
index 0000000..8cf3c3e
--- /dev/null
+++ b/libdb/clib/memmove.c
@@ -0,0 +1,155 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+/*
+ * sizeof(word) MUST BE A POWER OF TWO
+ * SO THAT wmask BELOW IS ALL ONES
+ */
+typedef int word; /* "word" used for optimal copy speed */
+
+#undef wsize
+#define wsize sizeof(word)
+#undef wmask
+#define wmask (wsize - 1)
+
+/*
+ * Copy a block of memory, handling overlap.
+ * This is the routine that actually implements
+ * (the portable versions of) bcopy, memcpy, and memmove.
+ */
+#ifdef MEMCOPY
+/*
+ * PUBLIC: #ifndef HAVE_MEMCPY
+ * PUBLIC: void *memcpy __P((void *, const void *, size_t));
+ * PUBLIC: #endif
+ */
+void *
+memcpy(dst0, src0, length)
+#else
+#ifdef MEMMOVE
+/*
+ * PUBLIC: #ifndef HAVE_MEMMOVE
+ * PUBLIC: void *memmove __P((void *, const void *, size_t));
+ * PUBLIC: #endif
+ */
+void *
+memmove(dst0, src0, length)
+#else
+void
+bcopy(src0, dst0, length)
+#endif
+#endif
+ void *dst0;
+ const void *src0;
+ register size_t length;
+{
+ register char *dst = dst0;
+ register const char *src = src0;
+ register size_t t;
+
+ if (length == 0 || dst == src) /* nothing to do */
+ goto done;
+
+ /*
+ * Macros: loop-t-times; and loop-t-times, t>0
+ */
+#undef TLOOP
+#define TLOOP(s) if (t) TLOOP1(s)
+#undef TLOOP1
+#define TLOOP1(s) do { s; } while (--t)
+
+ if ((unsigned long)dst < (unsigned long)src) {
+ /*
+ * Copy forward.
+ */
+ t = (int)src; /* only need low bits */
+ if ((t | (int)dst) & wmask) {
+ /*
+ * Try to align operands. This cannot be done
+ * unless the low bits match.
+ */
+ if ((t ^ (int)dst) & wmask || length < wsize)
+ t = length;
+ else
+ t = wsize - (t & wmask);
+ length -= t;
+ TLOOP1(*dst++ = *src++);
+ }
+ /*
+ * Copy whole words, then mop up any trailing bytes.
+ */
+ t = length / wsize;
+ TLOOP(*(word *)dst = *(word *)src; src += wsize; dst += wsize);
+ t = length & wmask;
+ TLOOP(*dst++ = *src++);
+ } else {
+ /*
+ * Copy backwards. Otherwise essentially the same.
+ * Alignment works as before, except that it takes
+ * (t&wmask) bytes to align, not wsize-(t&wmask).
+ */
+ src += length;
+ dst += length;
+ t = (int)src;
+ if ((t | (int)dst) & wmask) {
+ if ((t ^ (int)dst) & wmask || length <= wsize)
+ t = length;
+ else
+ t &= wmask;
+ length -= t;
+ TLOOP1(*--dst = *--src);
+ }
+ t = length / wsize;
+ TLOOP(src -= wsize; dst -= wsize; *(word *)dst = *(word *)src);
+ t = length & wmask;
+ TLOOP(*--dst = *--src);
+ }
+done:
+#if defined(MEMCOPY) || defined(MEMMOVE)
+ return (dst0);
+#else
+ return;
+#endif
+}
diff --git a/libdb/clib/raise.c b/libdb/clib/raise.c
new file mode 100644
index 0000000..810c0f8
--- /dev/null
+++ b/libdb/clib/raise.c
@@ -0,0 +1,36 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <signal.h>
+#include <unistd.h>
+#endif
+
+/*
+ * raise --
+ * Send a signal to the current process.
+ *
+ * PUBLIC: #ifndef HAVE_RAISE
+ * PUBLIC: int raise __P((int));
+ * PUBLIC: #endif
+ */
+int
+raise(s)
+ int s;
+{
+ /*
+ * Do not use __os_id(), as it may not return the process ID -- any
+ * system with kill(3) probably has getpid(3).
+ */
+ return (kill(getpid(), s));
+}
diff --git a/libdb/clib/snprintf.c b/libdb/clib/snprintf.c
new file mode 100644
index 0000000..f269c05
--- /dev/null
+++ b/libdb/clib/snprintf.c
@@ -0,0 +1,74 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * snprintf --
+ * Bounded version of sprintf.
+ *
+ * PUBLIC: #ifndef HAVE_SNPRINTF
+ * PUBLIC: int snprintf __P((char *, size_t, const char *, ...));
+ * PUBLIC: #endif
+ */
+#ifndef HAVE_SNPRINTF
+int
+#ifdef __STDC__
+snprintf(char *str, size_t n, const char *fmt, ...)
+#else
+snprintf(str, n, fmt, va_alist)
+ char *str;
+ size_t n;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ static int ret_charpnt = -1;
+ va_list ap;
+ int len;
+
+ COMPQUIET(n, 0);
+
+ /*
+ * Some old versions of sprintf return a pointer to the first argument
+ * instead of a character count. Assume the return value of snprintf,
+ * vsprintf, etc. will be the same as sprintf, and check the easy one.
+ *
+ * We do this test at run-time because it's not a test we can do in a
+ * cross-compilation environment.
+ */
+ if (ret_charpnt == -1) {
+ char buf[10];
+
+ ret_charpnt =
+ sprintf(buf, "123") != 3 ||
+ sprintf(buf, "123456789") != 9 ||
+ sprintf(buf, "1234") != 4;
+ }
+
+#ifdef __STDC__
+ va_start(ap, fmt);
+#else
+ va_start(ap);
+#endif
+ len = vsprintf(str, fmt, ap);
+ va_end(ap);
+ return (ret_charpnt ? (int)strlen(str) : len);
+}
+#endif
diff --git a/libdb/clib/strcasecmp.c b/libdb/clib/strcasecmp.c
new file mode 100644
index 0000000..de8f0e7
--- /dev/null
+++ b/libdb/clib/strcasecmp.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 1987, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <string.h>
+#endif
+
+/*
+ * This array is designed for mapping upper and lower case letter
+ * together for a case independent comparison. The mappings are
+ * based upon ascii character sequences.
+ */
+static const unsigned char charmap[] = {
+ '\000', '\001', '\002', '\003', '\004', '\005', '\006', '\007',
+ '\010', '\011', '\012', '\013', '\014', '\015', '\016', '\017',
+ '\020', '\021', '\022', '\023', '\024', '\025', '\026', '\027',
+ '\030', '\031', '\032', '\033', '\034', '\035', '\036', '\037',
+ '\040', '\041', '\042', '\043', '\044', '\045', '\046', '\047',
+ '\050', '\051', '\052', '\053', '\054', '\055', '\056', '\057',
+ '\060', '\061', '\062', '\063', '\064', '\065', '\066', '\067',
+ '\070', '\071', '\072', '\073', '\074', '\075', '\076', '\077',
+ '\100', '\141', '\142', '\143', '\144', '\145', '\146', '\147',
+ '\150', '\151', '\152', '\153', '\154', '\155', '\156', '\157',
+ '\160', '\161', '\162', '\163', '\164', '\165', '\166', '\167',
+ '\170', '\171', '\172', '\133', '\134', '\135', '\136', '\137',
+ '\140', '\141', '\142', '\143', '\144', '\145', '\146', '\147',
+ '\150', '\151', '\152', '\153', '\154', '\155', '\156', '\157',
+ '\160', '\161', '\162', '\163', '\164', '\165', '\166', '\167',
+ '\170', '\171', '\172', '\173', '\174', '\175', '\176', '\177',
+ '\200', '\201', '\202', '\203', '\204', '\205', '\206', '\207',
+ '\210', '\211', '\212', '\213', '\214', '\215', '\216', '\217',
+ '\220', '\221', '\222', '\223', '\224', '\225', '\226', '\227',
+ '\230', '\231', '\232', '\233', '\234', '\235', '\236', '\237',
+ '\240', '\241', '\242', '\243', '\244', '\245', '\246', '\247',
+ '\250', '\251', '\252', '\253', '\254', '\255', '\256', '\257',
+ '\260', '\261', '\262', '\263', '\264', '\265', '\266', '\267',
+ '\270', '\271', '\272', '\273', '\274', '\275', '\276', '\277',
+ '\300', '\301', '\302', '\303', '\304', '\305', '\306', '\307',
+ '\310', '\311', '\312', '\313', '\314', '\315', '\316', '\317',
+ '\320', '\321', '\322', '\323', '\324', '\325', '\326', '\327',
+ '\330', '\331', '\332', '\333', '\334', '\335', '\336', '\337',
+ '\340', '\341', '\342', '\343', '\344', '\345', '\346', '\347',
+ '\350', '\351', '\352', '\353', '\354', '\355', '\356', '\357',
+ '\360', '\361', '\362', '\363', '\364', '\365', '\366', '\367',
+ '\370', '\371', '\372', '\373', '\374', '\375', '\376', '\377'
+};
+
+/*
+ * strcasecmp --
+ * Do strcmp(3) in a case-insensitive manner.
+ *
+ * PUBLIC: #ifndef HAVE_STRCASECMP
+ * PUBLIC: int strcasecmp __P((const char *, const char *));
+ * PUBLIC: #endif
+ */
+int
+strcasecmp(s1, s2)
+ const char *s1, *s2;
+{
+ register const unsigned char *cm = charmap,
+ *us1 = (const unsigned char *)s1,
+ *us2 = (const unsigned char *)s2;
+
+ while (cm[*us1] == cm[*us2++])
+ if (*us1++ == '\0')
+ return (0);
+ return (cm[*us1] - cm[*--us2]);
+}
+
+/*
+ * strncasecmp --
+ * Do strncmp(3) in a case-insensitive manner.
+ *
+ * PUBLIC: #ifndef HAVE_STRCASECMP
+ * PUBLIC: int strncasecmp __P((const char *, const char *, size_t));
+ * PUBLIC: #endif
+ */
+int
+strncasecmp(s1, s2, n)
+ const char *s1, *s2;
+ register size_t n;
+{
+ if (n != 0) {
+ register const unsigned char *cm = charmap,
+ *us1 = (const unsigned char *)s1,
+ *us2 = (const unsigned char *)s2;
+
+ do {
+ if (cm[*us1] != cm[*us2++])
+ return (cm[*us1] - cm[*--us2]);
+ if (*us1++ == '\0')
+ break;
+ } while (--n != 0);
+ }
+ return (0);
+}
diff --git a/libdb/clib/strdup.c b/libdb/clib/strdup.c
new file mode 100644
index 0000000..3593195
--- /dev/null
+++ b/libdb/clib/strdup.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+/*
+ * strdup --
+ *
+ * PUBLIC: #ifndef HAVE_STRDUP
+ * PUBLIC: char *strdup __P((const char *));
+ * PUBLIC: #endif
+ */
+char *
+strdup(str)
+ const char *str;
+{
+ size_t len;
+ char *copy;
+
+ len = strlen(str) + 1;
+ if (!(copy = malloc((u_int)len)))
+ return (NULL);
+ memcpy(copy, str, len);
+ return (copy);
+}
diff --git a/libdb/clib/strerror.c b/libdb/clib/strerror.c
new file mode 100644
index 0000000..c61a787
--- /dev/null
+++ b/libdb/clib/strerror.c
@@ -0,0 +1,77 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+/*
+ * strerror --
+ * Return the string associated with an errno.
+ *
+ * PUBLIC: #ifndef HAVE_STRERROR
+ * PUBLIC: char *strerror __P((int));
+ * PUBLIC: #endif
+ */
+char *
+strerror(num)
+ int num;
+{
+ extern int sys_nerr;
+ extern char *sys_errlist[];
+#undef UPREFIX
+#define UPREFIX "Unknown error: "
+ static char ebuf[40] = UPREFIX; /* 64-bit number + slop */
+ int errnum;
+ char *p, *t, tmp[40];
+
+ errnum = num; /* convert to unsigned */
+ if (errnum < sys_nerr)
+ return(sys_errlist[errnum]);
+
+ /* Do this by hand, so we don't include stdio(3). */
+ t = tmp;
+ do {
+ *t++ = "0123456789"[errnum % 10];
+ } while (errnum /= 10);
+ for (p = ebuf + sizeof(UPREFIX) - 1;;) {
+ *p++ = *--t;
+ if (t <= tmp)
+ break;
+ }
+ return(ebuf);
+}
diff --git a/libdb/clib/vsnprintf.c b/libdb/clib/vsnprintf.c
new file mode 100644
index 0000000..272e92e
--- /dev/null
+++ b/libdb/clib/vsnprintf.c
@@ -0,0 +1,47 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * vsnprintf --
+ * Bounded version of vsprintf.
+ *
+ * PUBLIC: #ifndef HAVE_VSNPRINTF
+ * PUBLIC: int vsnprintf __P((char *, size_t, const char *, va_list));
+ * PUBLIC: #endif
+ */
+#ifndef HAVE_VSNPRINTF
+int
+vsnprintf(str, n, fmt, ap)
+ char *str;
+ size_t n;
+ const char *fmt;
+ va_list ap;
+{
+ COMPQUIET(n, 0);
+
+#ifdef SPRINTF_RET_CHARPNT
+ (void)vsprintf(str, fmt, ap);
+ return (strlen(str));
+#else
+ return (vsprintf(str, fmt, ap));
+#endif
+}
+#endif
diff --git a/libdb/common/db_byteorder.c b/libdb/common/db_byteorder.c
new file mode 100644
index 0000000..e2f8ab5
--- /dev/null
+++ b/libdb/common/db_byteorder.c
@@ -0,0 +1,74 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __db_isbigendian --
+ * Return 1 if big-endian (Motorola and Sparc), not little-endian
+ * (Intel and Vax). We do this work at run-time, rather than at
+ * configuration time so cross-compilation and general embedded
+ * system support is simpler.
+ *
+ * PUBLIC: int __db_isbigendian __P((void));
+ */
+int
+__db_isbigendian()
+{
+ union { /* From Harbison & Steele. */
+ long l;
+ char c[sizeof(long)];
+ } u;
+
+ u.l = 1;
+ return (u.c[sizeof(long) - 1] == 1);
+}
+
+/*
+ * __db_byteorder --
+ * Return if we need to do byte swapping, checking for illegal
+ * values.
+ *
+ * PUBLIC: int __db_byteorder __P((DB_ENV *, int));
+ */
+int
+__db_byteorder(dbenv, lorder)
+ DB_ENV *dbenv;
+ int lorder;
+{
+ int is_bigendian;
+
+ is_bigendian = __db_isbigendian();
+
+ switch (lorder) {
+ case 0:
+ break;
+ case 1234:
+ if (is_bigendian)
+ return (DB_SWAPBYTES);
+ break;
+ case 4321:
+ if (!is_bigendian)
+ return (DB_SWAPBYTES);
+ break;
+ default:
+ __db_err(dbenv,
+ "unsupported byte order, only big and little-endian supported");
+ return (EINVAL);
+ }
+ return (0);
+}
diff --git a/libdb/common/db_err.c b/libdb/common/db_err.c
new file mode 100644
index 0000000..afe10a7
--- /dev/null
+++ b/libdb/common/db_err.c
@@ -0,0 +1,579 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+/*
+ * __db_fchk --
+ * General flags checking routine.
+ *
+ * PUBLIC: int __db_fchk __P((DB_ENV *, const char *, u_int32_t, u_int32_t));
+ */
+int
+__db_fchk(dbenv, name, flags, ok_flags)
+ DB_ENV *dbenv;
+ const char *name;
+ u_int32_t flags, ok_flags;
+{
+ return (LF_ISSET(~ok_flags) ? __db_ferr(dbenv, name, 0) : 0);
+}
+
+/*
+ * __db_fcchk --
+ * General combination flags checking routine.
+ *
+ * PUBLIC: int __db_fcchk
+ * PUBLIC: __P((DB_ENV *, const char *, u_int32_t, u_int32_t, u_int32_t));
+ */
+int
+__db_fcchk(dbenv, name, flags, flag1, flag2)
+ DB_ENV *dbenv;
+ const char *name;
+ u_int32_t flags, flag1, flag2;
+{
+ return (LF_ISSET(flag1) &&
+ LF_ISSET(flag2) ? __db_ferr(dbenv, name, 1) : 0);
+}
+
+/*
+ * __db_ferr --
+ * Common flag errors.
+ *
+ * PUBLIC: int __db_ferr __P((const DB_ENV *, const char *, int));
+ */
+int
+__db_ferr(dbenv, name, iscombo)
+ const DB_ENV *dbenv;
+ const char *name;
+ int iscombo;
+{
+ __db_err(dbenv, "illegal flag %sspecified to %s",
+ iscombo ? "combination " : "", name);
+ return (EINVAL);
+}
+
+/*
+ * __db_pgerr --
+ * Error when unable to retrieve a specified page.
+ *
+ * PUBLIC: void __db_pgerr __P((DB *, db_pgno_t, int));
+ */
+void
+__db_pgerr(dbp, pgno, errval)
+ DB *dbp;
+ db_pgno_t pgno;
+ int errval;
+{
+ /*
+ * Three things are certain:
+ * Death, taxes, and lost data.
+ * Guess which has occurred.
+ */
+ __db_err(dbp->dbenv,
+ "unable to create/retrieve page %lu", (u_long)pgno);
+ (void)__db_panic(dbp->dbenv, errval);
+}
+
+/*
+ * __db_pgfmt --
+ * Error when a page has the wrong format.
+ *
+ * PUBLIC: int __db_pgfmt __P((DB_ENV *, db_pgno_t));
+ */
+int
+__db_pgfmt(dbenv, pgno)
+ DB_ENV *dbenv;
+ db_pgno_t pgno;
+{
+ __db_err(dbenv, "page %lu: illegal page type or format", (u_long)pgno);
+ return (__db_panic(dbenv, EINVAL));
+}
+
+/*
+ * __db_eopnotsup --
+ * Common operation not supported message.
+ *
+ * PUBLIC: int __db_eopnotsup __P((const DB_ENV *));
+ */
+int
+__db_eopnotsup(dbenv)
+ const DB_ENV *dbenv;
+{
+ __db_err(dbenv, "operation not supported");
+#ifdef EOPNOTSUPP
+ return (EOPNOTSUPP);
+#else
+ return (EINVAL);
+#endif
+}
+
+#ifdef DIAGNOSTIC
+/*
+ * __db_assert --
+ * Error when an assertion fails. Only checked if #DIAGNOSTIC defined.
+ *
+ * PUBLIC: #ifdef DIAGNOSTIC
+ * PUBLIC: void __db_assert __P((const char *, const char *, int));
+ * PUBLIC: #endif
+ */
+void
+__db_assert(failedexpr, file, line)
+ const char *failedexpr, *file;
+ int line;
+{
+ (void)fprintf(stderr,
+ "__db_assert: \"%s\" failed: file \"%s\", line %d\n",
+ failedexpr, file, line);
+ (void)fflush(stderr);
+
+ /* We want a stack trace of how this could possibly happen. */
+ abort();
+
+ /* NOTREACHED */
+}
+#endif
+
+/*
+ * __db_panic_msg --
+ * Just report that someone else paniced.
+ *
+ * PUBLIC: int __db_panic_msg __P((DB_ENV *));
+ */
+int
+__db_panic_msg(dbenv)
+ DB_ENV *dbenv;
+{
+ __db_err(dbenv, "fatal region error detected; run recovery");
+ return (DB_RUNRECOVERY);
+}
+
+/*
+ * __db_panic --
+ * Lock out the tree due to unrecoverable error.
+ *
+ * PUBLIC: int __db_panic __P((DB_ENV *, int));
+ */
+int
+__db_panic(dbenv, errval)
+ DB_ENV *dbenv;
+ int errval;
+{
+ if (dbenv != NULL) {
+ PANIC_SET(dbenv, 1);
+
+ dbenv->panic_errval = errval;
+
+ __db_err(dbenv, "PANIC: %s", db_strerror(errval));
+
+ if (dbenv->db_paniccall != NULL)
+ dbenv->db_paniccall(dbenv, errval);
+ }
+
+#if defined(DIAGNOSTIC) && !defined(CONFIG_TEST)
+ /*
+ * We want a stack trace of how this could possibly happen.
+ *
+ * Don't drop core if it's the test suite -- it's reasonable for the
+ * test suite to check to make sure that DB_RUNRECOVERY is returned
+ * under certain conditions.
+ */
+ abort();
+#endif
+
+ /*
+ * Chaos reigns within.
+ * Reflect, repent, and reboot.
+ * Order shall return.
+ */
+ return (DB_RUNRECOVERY);
+}
+
+/*
+ * db_strerror --
+ * ANSI C strerror(3) for DB.
+ *
+ * EXTERN: char *db_strerror __P((int));
+ */
+char *
+db_strerror(error)
+ int error;
+{
+ if (error == 0)
+ return ("Successful return: 0");
+ if (error > 0)
+ return (strerror(error));
+
+ /*
+ * !!!
+ * The Tcl API requires that some of these return strings be compared
+ * against strings stored in application scripts. So, any of these
+ * errors that do not invariably result in a Tcl exception may not be
+ * altered.
+ */
+ switch (error) {
+ case DB_DONOTINDEX:
+ return ("DB_DONOTINDEX: Secondary index callback returns null");
+ case DB_KEYEMPTY:
+ return ("DB_KEYEMPTY: Non-existent key/data pair");
+ case DB_KEYEXIST:
+ return ("DB_KEYEXIST: Key/data pair already exists");
+ case DB_LOCK_DEADLOCK:
+ return
+ ("DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock");
+ case DB_LOCK_NOTGRANTED:
+ return ("DB_LOCK_NOTGRANTED: Lock not granted");
+ case DB_NOSERVER:
+ return ("DB_NOSERVER: Fatal error, no server");
+ case DB_NOSERVER_HOME:
+ return ("DB_NOSERVER_HOME: Home unrecognized at server");
+ case DB_NOSERVER_ID:
+ return ("DB_NOSERVER_ID: Identifier unrecognized at server");
+ case DB_NOTFOUND:
+ return ("DB_NOTFOUND: No matching key/data pair found");
+ case DB_OLD_VERSION:
+ return ("DB_OLDVERSION: Database requires a version upgrade");
+ case DB_PAGE_NOTFOUND:
+ return ("DB_PAGE_NOTFOUND: Requested page not found");
+ case DB_REP_DUPMASTER:
+ return ("DB_REP_DUPMASTER: A second master site appeared");
+ case DB_REP_HOLDELECTION:
+ return ("DB_REP_HOLDELECTION: Need to hold an election");
+ case DB_REP_NEWMASTER:
+ return ("DB_REP_NEWMASTER: A new master has declared itself");
+ case DB_REP_NEWSITE:
+ return ("DB_REP_NEWSITE: A new site has entered the system");
+ case DB_REP_OUTDATED:
+ return
+ ("DB_REP_OUTDATED: Insufficient logs on master to recover");
+ case DB_REP_UNAVAIL:
+ return ("DB_REP_UNAVAIL: Unable to elect a master");
+ case DB_RUNRECOVERY:
+ return ("DB_RUNRECOVERY: Fatal error, run database recovery");
+ case DB_SECONDARY_BAD:
+ return
+ ("DB_SECONDARY_BAD: Secondary index item missing from primary");
+ case DB_VERIFY_BAD:
+ return ("DB_VERIFY_BAD: Database verification failed");
+ default: {
+ /*
+ * !!!
+ * Room for a 64-bit number + slop. This buffer is only used
+ * if we're given an unknown error, which should never happen.
+ * Note, however, we're no longer thread-safe if it does.
+ */
+ static char ebuf[40];
+
+ (void)snprintf(ebuf, sizeof(ebuf), "Unknown error: %d", error);
+ return (ebuf);
+ }
+ }
+}
+
+/*
+ * __db_err --
+ * Standard DB error routine. The same as errx, except we don't write
+ * to stderr if no output mechanism was specified.
+ *
+ * PUBLIC: void __db_err __P((const DB_ENV *, const char *, ...));
+ */
+void
+#ifdef __STDC__
+__db_err(const DB_ENV *dbenv, const char *fmt, ...)
+#else
+__db_err(dbenv, fmt, va_alist)
+ const DB_ENV *dbenv;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ DB_REAL_ERR(dbenv, 0, 0, 0, fmt);
+}
+
+/*
+ * __db_errcall --
+ * Do the error message work for callback functions.
+ *
+ * PUBLIC: void __db_errcall
+ * PUBLIC: __P((const DB_ENV *, int, int, const char *, va_list));
+ */
+void
+__db_errcall(dbenv, error, error_set, fmt, ap)
+ const DB_ENV *dbenv;
+ int error, error_set;
+ const char *fmt;
+ va_list ap;
+{
+ char *p;
+ char errbuf[2048]; /* !!!: END OF THE STACK DON'T TRUST SPRINTF. */
+
+ p = errbuf;
+ if (fmt != NULL)
+ p += vsnprintf(errbuf, sizeof(errbuf), fmt, ap);
+ if (error_set)
+ p += snprintf(p,
+ sizeof(errbuf) - (p - errbuf), ": %s", db_strerror(error));
+ /*
+ * !!!
+ * We're potentially manipulating strings handed us by the application,
+ * and on systems without a real snprintf() the sprintf() calls could
+ * have overflowed the buffer. We can't do anything about it now, but
+ * we don't want to return control to the application, we might have
+ * overwritten the stack with a Trojan horse. We're not trying to do
+ * anything recoverable here because systems without snprintf support
+ * are pretty rare anymore.
+ */
+ if ((size_t)(p - errbuf) > sizeof(errbuf)) {
+ (void)fprintf(stderr,
+ "Berkeley DB: error callback interface buffer overflow\n");
+ (void)fflush(stderr);
+
+ abort();
+ /* NOTREACHED */
+ }
+
+ dbenv->db_errcall(dbenv->db_errpfx, errbuf);
+}
+
+/*
+ * __db_errfile --
+ * Do the error message work for FILE *s.
+ *
+ * PUBLIC: void __db_errfile
+ * PUBLIC: __P((const DB_ENV *, int, int, const char *, va_list));
+ */
+void
+__db_errfile(dbenv, error, error_set, fmt, ap)
+ const DB_ENV *dbenv;
+ int error, error_set;
+ const char *fmt;
+ va_list ap;
+{
+ FILE *fp;
+
+ fp = dbenv == NULL ||
+ dbenv->db_errfile == NULL ? stderr : dbenv->db_errfile;
+
+ if (dbenv != NULL && dbenv->db_errpfx != NULL)
+ (void)fprintf(fp, "%s: ", dbenv->db_errpfx);
+ if (fmt != NULL) {
+ (void)vfprintf(fp, fmt, ap);
+ if (error_set)
+ (void)fprintf(fp, ": ");
+ }
+ if (error_set)
+ (void)fprintf(fp, "%s", db_strerror(error));
+ (void)fprintf(fp, "\n");
+ (void)fflush(fp);
+}
+
+/*
+ * __db_logmsg --
+ * Write information into the DB log.
+ *
+ * PUBLIC: void __db_logmsg __P((const DB_ENV *,
+ * PUBLIC: DB_TXN *, const char *, u_int32_t, const char *, ...));
+ */
+void
+#ifdef __STDC__
+__db_logmsg(const DB_ENV *dbenv,
+ DB_TXN *txnid, const char *opname, u_int32_t flags, const char *fmt, ...)
+#else
+__db_logmsg(dbenv, txnid, opname, flags, fmt, va_alist)
+ const DB_ENV *dbenv;
+ DB_TXN *txnid;
+ const char *opname, *fmt;
+ u_int32_t flags;
+ va_dcl
+#endif
+{
+ DBT opdbt, msgdbt;
+ DB_LSN lsn;
+ va_list ap;
+ char __logbuf[2048]; /* !!!: END OF THE STACK DON'T TRUST SPRINTF. */
+
+ if (!LOGGING_ON(dbenv))
+ return;
+
+#ifdef __STDC__
+ va_start(ap, fmt);
+#else
+ va_start(ap);
+#endif
+ memset(&opdbt, 0, sizeof(opdbt));
+ opdbt.data = (void *)opname;
+ opdbt.size = (u_int32_t)(strlen(opname) + 1);
+
+ memset(&msgdbt, 0, sizeof(msgdbt));
+ msgdbt.data = __logbuf;
+ msgdbt.size = vsnprintf(__logbuf, sizeof(__logbuf), fmt, ap);
+
+ /*
+ * XXX
+ * Explicitly discard the const. Otherwise, we have to const DB_ENV
+ * references throughout the logging subsystem.
+ */
+ __db_debug_log(
+ (DB_ENV *)dbenv, txnid, &lsn, flags, &opdbt, -1, &msgdbt, NULL, 0);
+
+ va_end(ap);
+}
+
+/*
+ * __db_unknown_flag -- report internal error
+ *
+ * PUBLIC: int __db_unknown_flag __P((DB_ENV *, char *, u_int32_t));
+ */
+int
+__db_unknown_flag(dbenv, routine, flag)
+ DB_ENV *dbenv;
+ char *routine;
+ u_int32_t flag;
+{
+ __db_err(dbenv, "%s: Unknown flag: 0x%x", routine, flag);
+ DB_ASSERT(0);
+ return (EINVAL);
+}
+
+/*
+ * __db_unknown_type -- report internal error
+ *
+ * PUBLIC: int __db_unknown_type __P((DB_ENV *, char *, DBTYPE));
+ */
+int
+__db_unknown_type(dbenv, routine, type)
+ DB_ENV *dbenv;
+ char *routine;
+ DBTYPE type;
+{
+ __db_err(dbenv, "%s: Unknown db type: 0x%x", routine, type);
+ DB_ASSERT(0);
+ return (EINVAL);
+}
+
+/*
+ * __db_check_txn --
+ * Check for common transaction errors.
+ *
+ * PUBLIC: int __db_check_txn __P((DB *, DB_TXN *, u_int32_t, int));
+ */
+int
+__db_check_txn(dbp, txn, assoc_lid, read_op)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t assoc_lid;
+ int read_op;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * If we are in recovery or aborting a transaction, then we
+ * don't need to enforce the rules about dbp's not allowing
+ * transactional operations in non-transactional dbps and
+ * vica-versa. This happens all the time as the dbp during
+ * an abort may be transactional, but we undo operations
+ * outside a transaction since we're aborting.
+ */
+ if (IS_RECOVERING(dbenv) || F_ISSET(dbp, DB_AM_RECOVER))
+ return (0);
+
+ /*
+ * Check for common transaction errors:
+ * Failure to pass a transaction handle to a DB operation
+ * Failure to configure the DB handle in a proper environment
+ * Operation on a handle whose open commit hasn't completed.
+ *
+ * Read operations don't require a txn even if we've used one before
+ * with this handle, although if they do have a txn, we'd better be
+ * prepared for it.
+ */
+ if (txn == NULL) {
+ if (!read_op && F_ISSET(dbp, DB_AM_TXN)) {
+ __db_err(dbenv,
+ "DB handle previously used in transaction, missing transaction handle");
+ return (EINVAL);
+ }
+
+ if (dbp->cur_lid >= TXN_MINIMUM)
+ goto open_err;
+ } else {
+ if (dbp->cur_lid >= TXN_MINIMUM && dbp->cur_lid != txn->txnid)
+ goto open_err;
+
+ if (!TXN_ON(dbenv))
+ return (__db_not_txn_env(dbenv));
+
+ if (!F_ISSET(dbp, DB_AM_TXN)) {
+ __db_err(dbenv,
+ "Transaction specified for a DB handle opened outside a transaction");
+ return (EINVAL);
+ }
+ }
+
+ /*
+ * If dbp->associate_lid is not DB_LOCK_INVALIDID, that means we're in
+ * the middle of a DB->associate with DB_CREATE (i.e., a secondary index
+ * creation).
+ *
+ * In addition to the usual transaction rules, we need to lock out
+ * non-transactional updates that aren't part of the associate (and
+ * thus are using some other locker ID).
+ *
+ * Transactional updates should simply block; from the time we
+ * decide to build the secondary until commit, we'll hold a write
+ * lock on all of its pages, so it should be safe to attempt to update
+ * the secondary in another transaction (presumably by updating the
+ * primary).
+ */
+ if (!read_op && dbp->associate_lid != DB_LOCK_INVALIDID &&
+ txn != NULL && dbp->associate_lid != assoc_lid) {
+ __db_err(dbenv,
+ "Operation forbidden while secondary index is being created");
+ return (EINVAL);
+ }
+
+ return (0);
+open_err:
+ __db_err(dbenv,
+ "Transaction that opened the DB handle is still active");
+ return (EINVAL);
+}
+
+/*
+ * __db_not_txn_env --
+ * DB handle must be in an environment that supports transactions.
+ *
+ * PUBLIC: int __db_not_txn_env __P((DB_ENV *));
+ */
+int
+__db_not_txn_env(dbenv)
+ DB_ENV *dbenv;
+{
+ __db_err(dbenv, "DB environment not configured for transactions");
+ return (EINVAL);
+}
diff --git a/libdb/common/db_getlong.c b/libdb/common/db_getlong.c
new file mode 100644
index 0000000..b298ecb
--- /dev/null
+++ b/libdb/common/db_getlong.c
@@ -0,0 +1,154 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __db_getlong --
+ * Return a long value inside of basic parameters.
+ *
+ * PUBLIC: int __db_getlong
+ * PUBLIC: __P((DB *, const char *, char *, long, long, long *));
+ */
+int
+__db_getlong(dbp, progname, p, min, max, storep)
+ DB *dbp;
+ const char *progname;
+ char *p;
+ long min, max, *storep;
+{
+ long val;
+ char *end;
+
+ __os_set_errno(0);
+ val = strtol(p, &end, 10);
+ if ((val == LONG_MIN || val == LONG_MAX) &&
+ __os_get_errno() == ERANGE) {
+ if (dbp == NULL)
+ fprintf(stderr,
+ "%s: %s: %s\n", progname, p, strerror(ERANGE));
+ else
+ dbp->err(dbp, ERANGE, "%s", p);
+ return (1);
+ }
+ if (p[0] == '\0' || (end[0] != '\0' && end[0] != '\n')) {
+ if (dbp == NULL)
+ fprintf(stderr,
+ "%s: %s: Invalid numeric argument\n", progname, p);
+ else
+ dbp->errx(dbp, "%s: Invalid numeric argument", p);
+ return (1);
+ }
+ if (val < min) {
+ if (dbp == NULL)
+ fprintf(stderr,
+ "%s: %s: Less than minimum value (%ld)\n",
+ progname, p, min);
+ else
+ dbp->errx(dbp,
+ "%s: Less than minimum value (%ld)", p, min);
+ return (1);
+ }
+ if (val > max) {
+ if (dbp == NULL)
+ fprintf(stderr,
+ "%s: %s: Greater than maximum value (%ld)\n",
+ progname, p, max);
+ else
+ dbp->errx(dbp,
+ "%s: Greater than maximum value (%ld)", p, max);
+ return (1);
+ }
+ *storep = val;
+ return (0);
+}
+
+/*
+ * __db_getulong --
+ * Return an unsigned long value inside of basic parameters.
+ *
+ * PUBLIC: int __db_getulong
+ * PUBLIC: __P((DB *, const char *, char *, u_long, u_long, u_long *));
+ */
+int
+__db_getulong(dbp, progname, p, min, max, storep)
+ DB *dbp;
+ const char *progname;
+ char *p;
+ u_long min, max, *storep;
+{
+#if !defined(HAVE_STRTOUL)
+ COMPQUIET(min, 0);
+
+ return (__db_getlong(dbp, progname, p, 0, max, (long *)storep));
+#else
+ u_long val;
+ char *end;
+
+ __os_set_errno(0);
+ val = strtoul(p, &end, 10);
+ if (val == ULONG_MAX && __os_get_errno() == ERANGE) {
+ if (dbp == NULL)
+ fprintf(stderr,
+ "%s: %s: %s\n", progname, p, strerror(ERANGE));
+ else
+ dbp->err(dbp, ERANGE, "%s", p);
+ return (1);
+ }
+ if (p[0] == '\0' || (end[0] != '\0' && end[0] != '\n')) {
+ if (dbp == NULL)
+ fprintf(stderr,
+ "%s: %s: Invalid numeric argument\n", progname, p);
+ else
+ dbp->errx(dbp, "%s: Invalid numeric argument", p);
+ return (1);
+ }
+ if (val < min) {
+ if (dbp == NULL)
+ fprintf(stderr,
+ "%s: %s: Less than minimum value (%lu)\n",
+ progname, p, min);
+ else
+ dbp->errx(dbp,
+ "%s: Less than minimum value (%lu)", p, min);
+ return (1);
+ }
+
+ /*
+ * We allow a 0 to substitute as a max value for ULONG_MAX because
+ * 1) accepting only a 0 value is unlikely to be necessary, and 2)
+ * we don't want callers to have to use ULONG_MAX explicitly, as it
+ * may not exist on all platforms.
+ */
+ if (max != 0 && val > max) {
+ if (dbp == NULL)
+ fprintf(stderr,
+ "%s: %s: Greater than maximum value (%lu)\n",
+ progname, p, max);
+ else
+ dbp->errx(dbp,
+ "%s: Greater than maximum value (%lu)", p, max);
+ return (1);
+ }
+ *storep = val;
+ return (0);
+#endif /* !defined(HAVE_STRTOUL) */
+}
diff --git a/libdb/common/db_idspace.c b/libdb/common/db_idspace.c
new file mode 100644
index 0000000..1d60fee
--- /dev/null
+++ b/libdb/common/db_idspace.c
@@ -0,0 +1,93 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#endif
+
+#include "db_int.h"
+
+static int __db_idcmp __P((const void *, const void *));
+
+static int
+__db_idcmp(a, b)
+ const void *a;
+ const void *b;
+{
+ u_int32_t i, j;
+
+ i = *(u_int32_t *)a;
+ j = *(u_int32_t *)b;
+
+ if (i < j)
+ return (-1);
+ else if (i > j)
+ return (1);
+ else
+ return (0);
+}
+
+/*
+ * __db_idspace --
+ *
+ * On input, minp and maxp contain the minimum and maximum valid values for
+ * the name space and on return, they contain the minimum and maximum ids
+ * available (by finding the biggest gap).
+ *
+ * PUBLIC: void __db_idspace __P((u_int32_t *, int, u_int32_t *, u_int32_t *));
+ */
+void
+__db_idspace(inuse, n, minp, maxp)
+ u_int32_t *inuse;
+ int n;
+ u_int32_t *minp, *maxp;
+{
+ int i, low;
+ u_int32_t gap, t;
+
+ /* A single locker ID is a special case. */
+ if (n == 1) {
+ /*
+ * If the single item in use is the last one in the range,
+ * then we've got to perform wrap which means that we set
+ * the min to the minimum ID, which is what we came in with,
+ * so we don't do anything.
+ */
+ if (inuse[0] != *maxp)
+ *minp = inuse[0];
+ *maxp = inuse[0] - 1;
+ return;
+ }
+
+ gap = 0;
+ low = 0;
+ qsort(inuse, n, sizeof(u_int32_t), __db_idcmp);
+ for (i = 0; i < n - 1; i++)
+ if ((t = (inuse[i + 1] - inuse[i])) > gap) {
+ gap = t;
+ low = i;
+ }
+
+ /* Check for largest gap at the end of the space. */
+ if ((*maxp - inuse[n - 1]) + (inuse[0] - *minp) > gap) {
+ /* Do same check as we do in the n == 1 case. */
+ if (inuse[n - 1] != *maxp)
+ *minp = inuse[n - 1];
+ *maxp = inuse[0];
+ } else {
+ *minp = inuse[low];
+ *maxp = inuse[low + 1];
+ }
+}
diff --git a/libdb/common/db_log2.c b/libdb/common/db_log2.c
new file mode 100644
index 0000000..fd43cad
--- /dev/null
+++ b/libdb/common/db_log2.c
@@ -0,0 +1,64 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * PUBLIC: u_int32_t __db_log2 __P((u_int32_t));
+ */
+u_int32_t
+__db_log2(num)
+ u_int32_t num;
+{
+ u_int32_t i, limit;
+
+ limit = 1;
+ for (i = 0; limit < num; limit = limit << 1)
+ ++i;
+ return (i);
+}
diff --git a/libdb/common/util_arg.c b/libdb/common/util_arg.c
new file mode 100644
index 0000000..c0a24ee
--- /dev/null
+++ b/libdb/common/util_arg.c
@@ -0,0 +1,126 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+static char *__db_strsep __P((char **, const char *));
+
+/*
+ * __db_util_arg --
+ * Convert a string into an argc/argv pair.
+ *
+ * PUBLIC: int __db_util_arg __P((char *, char *, int *, char ***));
+ */
+int
+__db_util_arg(arg0, str, argcp, argvp)
+ char *arg0, *str, ***argvp;
+ int *argcp;
+{
+ int n, ret;
+ char **ap, **argv;
+
+#define MAXARGS 25
+ if ((ret =
+ __os_malloc(NULL, (MAXARGS + 1) * sizeof(char **), &argv)) != 0)
+ return (ret);
+
+ ap = argv;
+ *ap++ = arg0;
+ for (n = 1; (*ap = __db_strsep(&str, " \t")) != NULL;)
+ if (**ap != '\0') {
+ ++ap;
+ if (++n == MAXARGS)
+ break;
+ }
+ *ap = NULL;
+
+ *argcp = ap - argv;
+ *argvp = argv;
+
+ return (0);
+}
+
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Get next token from string *stringp, where tokens are possibly-empty
+ * strings separated by characters from delim.
+ *
+ * Writes NULs into the string at *stringp to end tokens.
+ * delim need not remain constant from call to call.
+ * On return, *stringp points past the last NUL written (if there might
+ * be further tokens), or is NULL (if there are definitely no more tokens).
+ *
+ * If *stringp is NULL, strsep returns NULL.
+ */
+static char *
+__db_strsep(stringp, delim)
+ char **stringp;
+ const char *delim;
+{
+ const char *spanp;
+ int c, sc;
+ char *s, *tok;
+
+ if ((s = *stringp) == NULL)
+ return (NULL);
+ for (tok = s;;) {
+ c = *s++;
+ spanp = delim;
+ do {
+ if ((sc = *spanp++) == c) {
+ if (c == 0)
+ s = NULL;
+ else
+ s[-1] = 0;
+ *stringp = s;
+ return (tok);
+ }
+ } while (sc != 0);
+ }
+ /* NOTREACHED */
+}
diff --git a/libdb/common/util_cache.c b/libdb/common/util_cache.c
new file mode 100644
index 0000000..b7954fc
--- /dev/null
+++ b/libdb/common/util_cache.c
@@ -0,0 +1,92 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __db_util_cache --
+ * Compute if we have enough cache.
+ *
+ * PUBLIC: int __db_util_cache __P((DB_ENV *, DB *, u_int32_t *, int *));
+ */
+int
+__db_util_cache(dbenv, dbp, cachep, resizep)
+ DB_ENV *dbenv;
+ DB *dbp;
+ u_int32_t *cachep;
+ int *resizep;
+{
+ DBTYPE type;
+ DB_BTREE_STAT *bsp;
+ DB_HASH_STAT *hsp;
+ DB_QUEUE_STAT *qsp;
+ u_int32_t pgsize;
+ int ret;
+ void *sp;
+
+ /*
+ * The current cache size is in cachep. If it's insufficient, set the
+ * the memory referenced by resizep to 1 and set cachep to the minimum
+ * size needed.
+ */
+ if ((ret = dbp->get_type(dbp, &type)) != 0) {
+ dbenv->err(dbenv, ret, "DB->get_type");
+ return (ret);
+ }
+
+ if ((ret = dbp->stat(dbp, &sp, DB_FAST_STAT)) != 0) {
+ dbenv->err(dbenv, ret, "DB->stat");
+ return (ret);
+ }
+
+ switch (type) {
+ case DB_QUEUE:
+ qsp = (DB_QUEUE_STAT *)sp;
+ pgsize = qsp->qs_pagesize;
+ break;
+ case DB_HASH:
+ hsp = (DB_HASH_STAT *)sp;
+ pgsize = hsp->hash_pagesize;
+ break;
+ case DB_BTREE:
+ case DB_RECNO:
+ bsp = (DB_BTREE_STAT *)sp;
+ pgsize = bsp->bt_pagesize;
+ break;
+ default:
+ dbenv->err(dbenv, ret, "unknown database type: %d", type);
+ return (EINVAL);
+ }
+ free(sp);
+
+ /*
+ * Make sure our current cache is big enough. We want at least
+ * DB_MINPAGECACHE pages in the cache.
+ */
+ if ((*cachep / pgsize) < DB_MINPAGECACHE) {
+ *resizep = 1;
+ *cachep = pgsize * DB_MINPAGECACHE;
+ } else
+ *resizep = 0;
+
+ return (0);
+}
diff --git a/libdb/common/util_log.c b/libdb/common/util_log.c
new file mode 100644
index 0000000..457505c
--- /dev/null
+++ b/libdb/common/util_log.c
@@ -0,0 +1,64 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __db_util_logset --
+ * Log that we're running.
+ *
+ * PUBLIC: int __db_util_logset __P((const char *, char *));
+ */
+int
+__db_util_logset(progname, fname)
+ const char *progname;
+ char *fname;
+{
+ FILE *fp;
+ time_t now;
+ u_int32_t id;
+
+ if ((fp = fopen(fname, "w")) == NULL)
+ goto err;
+
+ (void)time(&now);
+ __os_id(&id);
+ fprintf(fp, "%s: %lu %s", progname, (u_long)id, ctime(&now));
+
+ if (fclose(fp) == EOF)
+ goto err;
+
+ return (0);
+
+err: fprintf(stderr, "%s: %s: %s\n", progname, fname, strerror(errno));
+ return (1);
+}
diff --git a/libdb/common/util_sig.c b/libdb/common/util_sig.c
new file mode 100644
index 0000000..b286cdc
--- /dev/null
+++ b/libdb/common/util_sig.c
@@ -0,0 +1,86 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <signal.h>
+#endif
+
+#include "db_int.h"
+
+static int interrupt;
+static void onint __P((int));
+
+/*
+ * onint --
+ * Interrupt signal handler.
+ */
+static void
+onint(signo)
+ int signo;
+{
+ if ((interrupt = signo) == 0)
+ interrupt = SIGINT;
+}
+
+/*
+ * __db_util_siginit --
+ *
+ * PUBLIC: void __db_util_siginit __P((void));
+ */
+void
+__db_util_siginit()
+{
+ /*
+ * Initialize the set of signals for which we want to clean up.
+ * Generally, we try not to leave the shared regions locked if
+ * we can.
+ */
+#ifdef SIGHUP
+ (void)signal(SIGHUP, onint);
+#endif
+ (void)signal(SIGINT, onint);
+#ifdef SIGPIPE
+ (void)signal(SIGPIPE, onint);
+#endif
+ (void)signal(SIGTERM, onint);
+}
+
+/*
+ * __db_util_interrupted --
+ * Return if interrupted.
+ *
+ * PUBLIC: int __db_util_interrupted __P((void));
+ */
+int
+__db_util_interrupted()
+{
+ return (interrupt != 0);
+}
+
+/*
+ * __db_util_sigresend --
+ *
+ * PUBLIC: void __db_util_sigresend __P((void));
+ */
+void
+__db_util_sigresend()
+{
+ /* Resend any caught signal. */
+ if (interrupt != 0) {
+ (void)signal(interrupt, SIG_DFL);
+ (void)raise(interrupt);
+ /* NOTREACHED */
+ }
+}
diff --git a/libdb/cxx/cxx_db.cpp b/libdb/cxx/cxx_db.cpp
new file mode 100644
index 0000000..bd99986
--- /dev/null
+++ b/libdb/cxx/cxx_db.cpp
@@ -0,0 +1,605 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <errno.h>
+#include <string.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc_auto/db_auto.h"
+#include "dbinc_auto/crdel_auto.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc_auto/db_ext.h"
+#include "dbinc_auto/common_ext.h"
+
+// Helper macros for simple methods that pass through to the
+// underlying C method. It may return an error or raise an exception.
+// Note this macro expects that input _argspec is an argument
+// list element (e.g., "char *arg") and that _arglist is the arguments
+// that should be passed through to the C method (e.g., "(db, arg)")
+//
+#define DB_METHOD(_name, _argspec, _arglist, _retok) \
+int Db::_name _argspec \
+{ \
+ int ret; \
+ DB *db = unwrap(this); \
+ \
+ ret = db->_name _arglist; \
+ if (!_retok(ret)) \
+ DB_ERROR("Db::" # _name, ret, error_policy()); \
+ return (ret); \
+}
+
+#define DB_METHOD_CHECKED(_name, _cleanup, _argspec, _arglist, _retok) \
+int Db::_name _argspec \
+{ \
+ int ret; \
+ DB *db = unwrap(this); \
+ \
+ if (!db) { \
+ DB_ERROR("Db::" # _name, EINVAL, error_policy()); \
+ return (EINVAL); \
+ } \
+ if (_cleanup) \
+ cleanup(); \
+ ret = db->_name _arglist; \
+ if (!_retok(ret)) \
+ DB_ERROR("Db::" # _name, ret, error_policy()); \
+ return (ret); \
+}
+
+#define DB_METHOD_QUIET(_name, _argspec, _arglist) \
+int Db::_name _argspec \
+{ \
+ DB *db = unwrap(this); \
+ \
+ return (db->_name _arglist); \
+}
+
+#define DB_METHOD_VOID(_name, _argspec, _arglist) \
+void Db::_name _argspec \
+{ \
+ DB *db = unwrap(this); \
+ \
+ db->_name _arglist; \
+}
+
+// A truism for the Db object is that there is a valid
+// DB handle from the constructor until close().
+// After the close, the DB handle is invalid and
+// no operations are permitted on the Db (other than
+// destructor). Leaving the Db handle open and not
+// doing a close is generally considered an error.
+//
+// We used to allow Db objects to be closed and reopened.
+// This implied always keeping a valid DB object, and
+// coordinating the open objects between Db/DbEnv turned
+// out to be overly complicated. Now we do not allow this.
+
+Db::Db(DbEnv *env, u_int32_t flags)
+: imp_(0)
+, env_(env)
+, construct_error_(0)
+, flags_(0)
+, construct_flags_(flags)
+, append_recno_callback_(0)
+, associate_callback_(0)
+, bt_compare_callback_(0)
+, bt_prefix_callback_(0)
+, dup_compare_callback_(0)
+, feedback_callback_(0)
+, h_hash_callback_(0)
+{
+ if (env_ == 0)
+ flags_ |= DB_CXX_PRIVATE_ENV;
+
+ if ((construct_error_ = initialize()) != 0)
+ DB_ERROR("Db::Db", construct_error_, error_policy());
+}
+
+// If the DB handle is still open, we close it. This is to make stack
+// allocation of Db objects easier so that they are cleaned up in the error
+// path. If the environment was closed prior to this, it may cause a trap, but
+// an error message is generated during the environment close. Applications
+// should call close explicitly in normal (non-exceptional) cases to check the
+// return value.
+//
+Db::~Db()
+{
+ DB *db;
+
+ db = unwrap(this);
+ if (db != NULL) {
+ cleanup();
+ (void)db->close(db, 0);
+ }
+}
+
+// private method to initialize during constructor.
+// initialize must create a backing DB object,
+// and if that creates a new DB_ENV, it must be tied to a new DbEnv.
+//
+int Db::initialize()
+{
+ DB *db;
+ DB_ENV *cenv = unwrap(env_);
+ int ret;
+ u_int32_t cxx_flags;
+
+ cxx_flags = construct_flags_ & DB_CXX_NO_EXCEPTIONS;
+
+ // Create a new underlying DB object.
+ // We rely on the fact that if a NULL DB_ENV* is given,
+ // one is allocated by DB.
+ //
+ if ((ret = db_create(&db, cenv,
+ construct_flags_ & ~cxx_flags)) != 0)
+ return (ret);
+
+ // Associate the DB with this object
+ imp_ = wrap(db);
+ db->api_internal = this;
+
+ // Create a new DbEnv from a DB_ENV* if it was created locally.
+ // It is deleted in Db::close().
+ //
+ if ((flags_ & DB_CXX_PRIVATE_ENV) != 0)
+ env_ = new DbEnv(db->dbenv, cxx_flags);
+
+ return (0);
+}
+
+// private method to cleanup after destructor or during close.
+// If the environment was created by this Db object, we optionally
+// delete it, or return it so the caller can delete it after
+// last use.
+//
+void Db::cleanup()
+{
+ DB *db = unwrap(this);
+
+ if (db != NULL) {
+ // extra safety
+ db->api_internal = 0;
+ imp_ = 0;
+
+ // we must dispose of the DbEnv object if
+ // we created it. This will be the case
+ // if a NULL DbEnv was passed into the constructor.
+ // The underlying DB_ENV object will be inaccessible
+ // after the close, so we must clean it up now.
+ //
+ if ((flags_ & DB_CXX_PRIVATE_ENV) != 0) {
+ env_->cleanup();
+ delete env_;
+ env_ = 0;
+ }
+ }
+}
+
+// Return a tristate value corresponding to whether we should
+// throw exceptions on errors:
+// ON_ERROR_RETURN
+// ON_ERROR_THROW
+// ON_ERROR_UNKNOWN
+//
+int Db::error_policy()
+{
+ if (env_ != NULL)
+ return (env_->error_policy());
+ else {
+ // If the env_ is null, that means that the user
+ // did not attach an environment, so the correct error
+ // policy can be deduced from constructor flags
+ // for this Db.
+ //
+ if ((construct_flags_ & DB_CXX_NO_EXCEPTIONS) != 0) {
+ return (ON_ERROR_RETURN);
+ }
+ else {
+ return (ON_ERROR_THROW);
+ }
+ }
+}
+
+int Db::close(u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ int ret;
+
+ // after a DB->close (no matter if success or failure),
+ // the underlying DB object must not be accessed,
+ // so we clean up in advance.
+ //
+ cleanup();
+
+ // It's safe to throw an error after the close,
+ // since our error mechanism does not peer into
+ // the DB* structures.
+ //
+ if ((ret = db->close(db, flags)) != 0)
+ DB_ERROR("Db::close", ret, error_policy());
+
+ return (ret);
+}
+
+// The following cast implies that Dbc can be no larger than DBC
+DB_METHOD(cursor, (DbTxn *txnid, Dbc **cursorp, u_int32_t flags),
+ (db, unwrap(txnid), (DBC **)cursorp, flags),
+ DB_RETOK_STD)
+
+DB_METHOD(del, (DbTxn *txnid, Dbt *key, u_int32_t flags),
+ (db, unwrap(txnid), key, flags),
+ DB_RETOK_DBDEL)
+
+void Db::err(int error, const char *format, ...)
+{
+ DB *db = unwrap(this);
+
+ DB_REAL_ERR(db->dbenv, error, 1, 1, format);
+}
+
+void Db::errx(const char *format, ...)
+{
+ DB *db = unwrap(this);
+
+ DB_REAL_ERR(db->dbenv, 0, 0, 1, format);
+}
+
+DB_METHOD(fd, (int *fdp),
+ (db, fdp),
+ DB_RETOK_STD)
+
+int Db::get(DbTxn *txnid, Dbt *key, Dbt *value, u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ int ret;
+
+ ret = db->get(db, unwrap(txnid), key, value, flags);
+
+ if (!DB_RETOK_DBGET(ret)) {
+ if (ret == ENOMEM && DB_OVERFLOWED_DBT(value))
+ DB_ERROR_DBT("Db::get", value, error_policy());
+ else
+ DB_ERROR("Db::get", ret, error_policy());
+ }
+
+ return (ret);
+}
+
+int Db::get_byteswapped(int *isswapped)
+{
+ DB *db = (DB *)unwrapConst(this);
+ return (db->get_byteswapped(db, isswapped));
+}
+
+int Db::get_type(DBTYPE *dbtype)
+{
+ DB *db = (DB *)unwrapConst(this);
+ return (db->get_type(db, dbtype));
+}
+
+// Dbc is a "compatible" subclass of DBC - that is, no virtual functions
+// or even extra data members, so these casts, although technically
+// non-portable, "should" always be okay.
+DB_METHOD(join, (Dbc **curslist, Dbc **cursorp, u_int32_t flags),
+ (db, (DBC **)curslist, (DBC **)cursorp, flags),
+ DB_RETOK_STD)
+
+DB_METHOD(key_range,
+ (DbTxn *txnid, Dbt *key, DB_KEY_RANGE *results, u_int32_t flags),
+ (db, unwrap(txnid), key, results, flags),
+ DB_RETOK_STD)
+
+// If an error occurred during the constructor, report it now.
+// Otherwise, call the underlying DB->open method.
+//
+int Db::open(DbTxn *txnid, const char *file, const char *database,
+ DBTYPE type, u_int32_t flags, int mode)
+{
+ int ret;
+ DB *db = unwrap(this);
+
+ if (construct_error_ != 0)
+ ret = construct_error_;
+ else
+ ret = db->open(db, unwrap(txnid), file, database, type, flags,
+ mode);
+
+ if (!DB_RETOK_STD(ret))
+ DB_ERROR("Db::open", ret, error_policy());
+
+ return (ret);
+}
+
+int Db::pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Dbt *value, u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ int ret;
+
+ ret = db->pget(db, unwrap(txnid), key, pkey, value, flags);
+
+ /* The logic here is identical to Db::get - reuse the macro. */
+ if (!DB_RETOK_DBGET(ret)) {
+ if (ret == ENOMEM && DB_OVERFLOWED_DBT(value))
+ DB_ERROR_DBT("Db::pget", value, error_policy());
+ else
+ DB_ERROR("Db::pget", ret, error_policy());
+ }
+
+ return (ret);
+}
+
+DB_METHOD(put,
+ (DbTxn *txnid, Dbt *key, Dbt *value, u_int32_t flags),
+ (db, unwrap(txnid), key, value, flags),
+ DB_RETOK_DBPUT)
+
+DB_METHOD_CHECKED(rename, 1,
+ (const char *file, const char *database, const char *newname,
+ u_int32_t flags),
+ (db, file, database, newname, flags), DB_RETOK_STD)
+
+DB_METHOD_CHECKED(remove, 1,
+ (const char *file, const char *database, u_int32_t flags),
+ (db, file, database, flags), DB_RETOK_STD)
+
+DB_METHOD_CHECKED(truncate, 0,
+ (DbTxn *txnid, u_int32_t *countp, u_int32_t flags),
+ (db, unwrap(txnid), countp, flags), DB_RETOK_STD)
+
+DB_METHOD_CHECKED(stat, 0,
+ (void *sp, u_int32_t flags), (db, sp, flags), DB_RETOK_STD)
+
+DB_METHOD_CHECKED(sync, 0,
+ (u_int32_t flags), (db, flags), DB_RETOK_STD)
+
+DB_METHOD_CHECKED(upgrade, 0,
+ (const char *name, u_int32_t flags), (db, name, flags), DB_RETOK_STD)
+
+////////////////////////////////////////////////////////////////////////
+//
+// callbacks
+//
+// *_intercept_c are 'glue' functions that must be declared
+// as extern "C" so to be typesafe. Using a C++ method, even
+// a static class method with 'correct' arguments, will not pass
+// the test; some picky compilers do not allow mixing of function
+// pointers to 'C' functions with function pointers to C++ functions.
+//
+// One wart with this scheme is that the *_callback_ method pointer
+// must be declared public to be accessible by the C intercept.
+// It's possible to accomplish the goal without this, and with
+// another public transfer method, but it's just too much overhead.
+// These callbacks are supposed to be *fast*.
+//
+// The DBTs we receive in these callbacks from the C layer may be
+// manufactured there, but we want to treat them as a Dbts.
+// Technically speaking, these DBTs were not constructed as a Dbts,
+// but it should be safe to cast them as such given that Dbt is a
+// *very* thin extension of the DBT. That is, Dbt has no additional
+// data elements, does not use virtual functions, virtual inheritance,
+// multiple inheritance, RTI, or any other language feature that
+// causes the structure to grow or be displaced. Although this may
+// sound risky, a design goal of C++ is complete structure
+// compatibility with C, and has the philosophy 'if you don't use it,
+// you shouldn't incur the overhead'. If the C/C++ compilers you're
+// using on a given machine do not have matching struct layouts, then
+// a lot more things will be broken than just this.
+//
+// The alternative, creating a Dbt here in the callback, and populating
+// it from the DBT, is just too slow and cumbersome to be very useful.
+
+// These macros avoid a lot of boilerplate code for callbacks
+
+#define DB_CALLBACK_C_INTERCEPT(_name, _rettype, _cargspec, \
+ _return, _cxxargs) \
+extern "C" _rettype _db_##_name##_intercept_c _cargspec \
+{ \
+ Db *cxxthis; \
+ \
+ DB_ASSERT(cthis != NULL); \
+ cxxthis = (Db *)cthis->api_internal; \
+ DB_ASSERT(cxxthis != NULL); \
+ DB_ASSERT(cxxthis->_name##_callback_ != 0); \
+ \
+ _return (*cxxthis->_name##_callback_) _cxxargs; \
+}
+
+#define DB_SET_CALLBACK(_cxxname, _name, _cxxargspec, _cb) \
+int Db::_cxxname _cxxargspec \
+{ \
+ DB *cthis = unwrap(this); \
+ \
+ _name##_callback_ = _cb; \
+ return ((*(cthis->_cxxname))(cthis, \
+ (_cb) ? _db_##_name##_intercept_c : NULL)); \
+}
+
+/* associate callback - doesn't quite fit the pattern because of the flags */
+DB_CALLBACK_C_INTERCEPT(associate,
+ int, (DB *cthis, const DBT *key, const DBT *data, DBT *retval),
+ return, (cxxthis, Dbt::get_const_Dbt(key), Dbt::get_const_Dbt(data),
+ Dbt::get_Dbt(retval)))
+
+int Db::associate(DbTxn *txn, Db *secondary, int (*callback)(Db *, const Dbt *,
+ const Dbt *, Dbt *), u_int32_t flags)
+{
+ DB *cthis = unwrap(this);
+
+ /* Since the secondary Db is used as the first argument
+ * to the callback, we store the C++ callback on it
+ * rather than on 'this'.
+ */
+ secondary->associate_callback_ = callback;
+ return ((*(cthis->associate))(cthis, unwrap(txn), unwrap(secondary),
+ (callback) ? _db_associate_intercept_c : NULL, flags));
+}
+
+DB_CALLBACK_C_INTERCEPT(feedback,
+ void, (DB *cthis, int opcode, int pct),
+ /* no return */ (void), (cxxthis, opcode, pct))
+
+DB_SET_CALLBACK(set_feedback, feedback,
+ (void (*arg)(Db *cxxthis, int opcode, int pct)), arg)
+
+DB_CALLBACK_C_INTERCEPT(append_recno,
+ int, (DB *cthis, DBT *data, db_recno_t recno),
+ return, (cxxthis, Dbt::get_Dbt(data), recno))
+
+DB_SET_CALLBACK(set_append_recno, append_recno,
+ (int (*arg)(Db *cxxthis, Dbt *data, db_recno_t recno)), arg)
+
+DB_CALLBACK_C_INTERCEPT(bt_compare,
+ int, (DB *cthis, const DBT *data1, const DBT *data2),
+ return,
+ (cxxthis, Dbt::get_const_Dbt(data1), Dbt::get_const_Dbt(data2)))
+
+DB_SET_CALLBACK(set_bt_compare, bt_compare,
+ (int (*arg)(Db *cxxthis, const Dbt *data1, const Dbt *data2)), arg)
+
+DB_CALLBACK_C_INTERCEPT(bt_prefix,
+ size_t, (DB *cthis, const DBT *data1, const DBT *data2),
+ return,
+ (cxxthis, Dbt::get_const_Dbt(data1), Dbt::get_const_Dbt(data2)))
+
+DB_SET_CALLBACK(set_bt_prefix, bt_prefix,
+ (size_t (*arg)(Db *cxxthis, const Dbt *data1, const Dbt *data2)), arg)
+
+DB_CALLBACK_C_INTERCEPT(dup_compare,
+ int, (DB *cthis, const DBT *data1, const DBT *data2),
+ return,
+ (cxxthis, Dbt::get_const_Dbt(data1), Dbt::get_const_Dbt(data2)))
+
+DB_SET_CALLBACK(set_dup_compare, dup_compare,
+ (int (*arg)(Db *cxxthis, const Dbt *data1, const Dbt *data2)), arg)
+
+DB_CALLBACK_C_INTERCEPT(h_hash,
+ u_int32_t, (DB *cthis, const void *data, u_int32_t len),
+ return, (cxxthis, data, len))
+
+DB_SET_CALLBACK(set_h_hash, h_hash,
+ (u_int32_t (*arg)(Db *cxxthis, const void *data, u_int32_t len)), arg)
+
+// This is a 'glue' function declared as extern "C" so it will
+// be compatible with picky compilers that do not allow mixing
+// of function pointers to 'C' functions with function pointers
+// to C++ functions.
+//
+extern "C"
+int _verify_callback_c(void *handle, const void *str_arg)
+{
+ char *str;
+ __DB_OSTREAMCLASS *out;
+
+ str = (char *)str_arg;
+ out = (__DB_OSTREAMCLASS *)handle;
+
+ (*out) << str;
+ if (out->fail())
+ return (EIO);
+
+ return (0);
+}
+
+int Db::verify(const char *name, const char *subdb,
+ __DB_OSTREAMCLASS *ostr, u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ int ret;
+
+ if (!db)
+ ret = EINVAL;
+ else
+ ret = __db_verify_internal(db, name, subdb, ostr,
+ _verify_callback_c, flags);
+
+ if (!DB_RETOK_STD(ret))
+ DB_ERROR("Db::verify", ret, error_policy());
+
+ return (ret);
+}
+
+DB_METHOD(set_bt_compare, (bt_compare_fcn_type func),
+ (db, func), DB_RETOK_STD)
+DB_METHOD(set_bt_maxkey, (u_int32_t bt_maxkey),
+ (db, bt_maxkey), DB_RETOK_STD)
+DB_METHOD(set_bt_minkey, (u_int32_t bt_minkey),
+ (db, bt_minkey), DB_RETOK_STD)
+DB_METHOD(set_bt_prefix, (bt_prefix_fcn_type func),
+ (db, func), DB_RETOK_STD)
+DB_METHOD(set_dup_compare, (dup_compare_fcn_type func),
+ (db, func), DB_RETOK_STD)
+DB_METHOD(set_encrypt, (const char *passwd, int flags),
+ (db, passwd, flags), DB_RETOK_STD)
+DB_METHOD_VOID(set_errfile, (FILE *errfile), (db, errfile))
+DB_METHOD_VOID(set_errpfx, (const char *errpfx), (db, errpfx))
+DB_METHOD(set_flags, (u_int32_t flags), (db, flags),
+ DB_RETOK_STD)
+DB_METHOD(set_h_ffactor, (u_int32_t h_ffactor),
+ (db, h_ffactor), DB_RETOK_STD)
+DB_METHOD(set_h_hash, (h_hash_fcn_type func),
+ (db, func), DB_RETOK_STD)
+DB_METHOD(set_h_nelem, (u_int32_t h_nelem),
+ (db, h_nelem), DB_RETOK_STD)
+DB_METHOD(set_lorder, (int db_lorder), (db, db_lorder),
+ DB_RETOK_STD)
+DB_METHOD(set_pagesize, (u_int32_t db_pagesize),
+ (db, db_pagesize), DB_RETOK_STD)
+DB_METHOD(set_re_delim, (int re_delim),
+ (db, re_delim), DB_RETOK_STD)
+DB_METHOD(set_re_len, (u_int32_t re_len),
+ (db, re_len), DB_RETOK_STD)
+DB_METHOD(set_re_pad, (int re_pad),
+ (db, re_pad), DB_RETOK_STD)
+DB_METHOD(set_re_source, (char *re_source),
+ (db, re_source), DB_RETOK_STD)
+DB_METHOD(set_q_extentsize, (u_int32_t extentsize),
+ (db, extentsize), DB_RETOK_STD)
+
+DB_METHOD_QUIET(set_alloc, (db_malloc_fcn_type malloc_fcn,
+ db_realloc_fcn_type realloc_fcn, db_free_fcn_type free_fcn),
+ (db, malloc_fcn, realloc_fcn, free_fcn))
+
+void Db::set_errcall(void (*arg)(const char *, char *))
+{
+ env_->set_errcall(arg);
+}
+
+void *Db::get_app_private() const
+{
+ return unwrapConst(this)->app_private;
+}
+
+void Db::set_app_private(void *value)
+{
+ unwrap(this)->app_private = value;
+}
+
+DB_METHOD(set_cachesize, (u_int32_t gbytes, u_int32_t bytes, int ncache),
+ (db, gbytes, bytes, ncache), DB_RETOK_STD)
+DB_METHOD(set_cache_priority, (DB_CACHE_PRIORITY priority),
+ (db, priority), DB_RETOK_STD)
+
+int Db::set_paniccall(void (*callback)(DbEnv *, int))
+{
+ return (env_->set_paniccall(callback));
+}
+
+void Db::set_error_stream(__DB_OSTREAMCLASS *error_stream)
+{
+ env_->set_error_stream(error_stream);
+}
diff --git a/libdb/cxx/cxx_dbc.cpp b/libdb/cxx/cxx_dbc.cpp
new file mode 100644
index 0000000..347d0c2
--- /dev/null
+++ b/libdb/cxx/cxx_dbc.cpp
@@ -0,0 +1,115 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <errno.h>
+#include <string.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc_auto/db_auto.h"
+#include "dbinc_auto/crdel_auto.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc_auto/db_ext.h"
+#include "dbinc_auto/common_ext.h"
+
+// Helper macro for simple methods that pass through to the
+// underlying C method. It may return an error or raise an exception.
+// Note this macro expects that input _argspec is an argument
+// list element (e.g., "char *arg") and that _arglist is the arguments
+// that should be passed through to the C method (e.g., "(db, arg)")
+//
+#define DBC_METHOD(_name, _argspec, _arglist, _retok) \
+int Dbc::_name _argspec \
+{ \
+ int ret; \
+ DBC *dbc = this; \
+ \
+ ret = dbc->c_##_name _arglist; \
+ if (!_retok(ret)) \
+ DB_ERROR("Dbc::" # _name, ret, ON_ERROR_UNKNOWN); \
+ return (ret); \
+}
+
+// It's private, and should never be called, but VC4.0 needs it resolved
+//
+Dbc::~Dbc()
+{
+}
+
+DBC_METHOD(close, (void), (dbc), DB_RETOK_STD)
+DBC_METHOD(count, (db_recno_t *countp, u_int32_t _flags),
+ (dbc, countp, _flags), DB_RETOK_STD)
+DBC_METHOD(del, (u_int32_t _flags),
+ (dbc, _flags), DB_RETOK_DBCDEL)
+
+int Dbc::dup(Dbc** cursorp, u_int32_t _flags)
+{
+ int ret;
+ DBC *dbc = this;
+ DBC *new_cursor = 0;
+
+ ret = dbc->c_dup(dbc, &new_cursor, _flags);
+
+ if (DB_RETOK_STD(ret))
+ // The following cast implies that Dbc can be no larger than DBC
+ *cursorp = (Dbc*)new_cursor;
+ else
+ DB_ERROR("Dbc::dup", ret, ON_ERROR_UNKNOWN);
+
+ return (ret);
+}
+
+int Dbc::get(Dbt* key, Dbt *data, u_int32_t _flags)
+{
+ int ret;
+ DBC *dbc = this;
+
+ ret = dbc->c_get(dbc, key, data, _flags);
+
+ if (!DB_RETOK_DBCGET(ret)) {
+ if (ret == ENOMEM && DB_OVERFLOWED_DBT(key))
+ DB_ERROR_DBT("Dbc::get", key, ON_ERROR_UNKNOWN);
+ else if (ret == ENOMEM && DB_OVERFLOWED_DBT(data))
+ DB_ERROR_DBT("Dbc::get", data, ON_ERROR_UNKNOWN);
+ else
+ DB_ERROR("Dbc::get", ret, ON_ERROR_UNKNOWN);
+ }
+
+ return (ret);
+}
+
+int Dbc::pget(Dbt* key, Dbt *pkey, Dbt *data, u_int32_t _flags)
+{
+ int ret;
+ DBC *dbc = this;
+
+ ret = dbc->c_pget(dbc, key, pkey, data, _flags);
+
+ /* Logic is the same as for Dbc::get - reusing macro. */
+ if (!DB_RETOK_DBCGET(ret)) {
+ if (ret == ENOMEM && DB_OVERFLOWED_DBT(key))
+ DB_ERROR_DBT("Dbc::pget", key, ON_ERROR_UNKNOWN);
+ else if (ret == ENOMEM && DB_OVERFLOWED_DBT(data))
+ DB_ERROR_DBT("Dbc::pget", data, ON_ERROR_UNKNOWN);
+ else
+ DB_ERROR("Dbc::pget", ret, ON_ERROR_UNKNOWN);
+ }
+
+ return (ret);
+}
+
+DBC_METHOD(put, (Dbt* key, Dbt *data, u_int32_t _flags),
+ (dbc, key, data, _flags), DB_RETOK_DBCPUT)
diff --git a/libdb/cxx/cxx_dbt.cpp b/libdb/cxx/cxx_dbt.cpp
new file mode 100644
index 0000000..5772254
--- /dev/null
+++ b/libdb/cxx/cxx_dbt.cpp
@@ -0,0 +1,61 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <errno.h>
+#include <string.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc_auto/db_auto.h"
+#include "dbinc_auto/crdel_auto.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc_auto/db_ext.h"
+#include "dbinc_auto/common_ext.h"
+
+Dbt::Dbt()
+{
+ DBT *dbt = this;
+ memset(dbt, 0, sizeof(DBT));
+}
+
+Dbt::Dbt(void *data_arg, u_int32_t size_arg)
+{
+ DBT *dbt = this;
+ memset(dbt, 0, sizeof(DBT));
+ set_data(data_arg);
+ set_size(size_arg);
+}
+
+Dbt::~Dbt()
+{
+}
+
+Dbt::Dbt(const Dbt &that)
+{
+ const DBT *from = &that;
+ DBT *to = this;
+ memcpy(to, from, sizeof(DBT));
+}
+
+Dbt &Dbt::operator = (const Dbt &that)
+{
+ if (this != &that) {
+ const DBT *from = &that;
+ DBT *to = this;
+ memcpy(to, from, sizeof(DBT));
+ }
+ return (*this);
+}
diff --git a/libdb/cxx/cxx_env.cpp b/libdb/cxx/cxx_env.cpp
new file mode 100644
index 0000000..b17afd3
--- /dev/null
+++ b/libdb/cxx/cxx_env.cpp
@@ -0,0 +1,802 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <errno.h>
+#include <stdio.h> // needed for set_error_stream
+#include <string.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+#include "db_int.h"
+#include "dbinc_auto/common_ext.h"
+
+#ifdef HAVE_CXX_STDHEADERS
+using std::cerr;
+#endif
+
+// Helper macros for simple methods that pass through to the
+// underlying C method. They may return an error or raise an exception.
+// These macros expect that input _argspec is an argument
+// list element (e.g., "char *arg") and that _arglist is the arguments
+// that should be passed through to the C method (e.g., "(dbenv, arg)")
+//
+#define DBENV_METHOD_ERR(_name, _argspec, _arglist, _on_err) \
+int DbEnv::_name _argspec \
+{ \
+ DB_ENV *dbenv = unwrap(this); \
+ int ret; \
+ \
+ if ((ret = dbenv->_name _arglist) != 0) { \
+ _on_err; \
+ } \
+ return (ret); \
+}
+
+#define DBENV_METHOD(_name, _argspec, _arglist) \
+ DBENV_METHOD_ERR(_name, _argspec, _arglist, \
+ DB_ERROR("DbEnv::" # _name, ret, error_policy()))
+
+#define DBENV_METHOD_QUIET(_name, _argspec, _arglist) \
+int DbEnv::_name _argspec \
+{ \
+ DB_ENV *dbenv = unwrap(this); \
+ \
+ return (dbenv->_name _arglist); \
+}
+
+#define DBENV_METHOD_VOID(_name, _argspec, _arglist) \
+void DbEnv::_name _argspec \
+{ \
+ DB_ENV *dbenv = unwrap(this); \
+ \
+ dbenv->_name _arglist; \
+}
+
+// This datatype is needed for picky compilers.
+//
+extern "C" {
+ typedef void (*db_errcall_fcn_type)
+ (const char *, char *);
+};
+
+// The reason for a static variable is that some structures
+// (like Dbts) have no connection to any Db or DbEnv, so when
+// errors occur in their methods, we must have some reasonable
+// way to determine whether to throw or return errors.
+//
+// This variable is taken from flags whenever a DbEnv is constructed.
+// Normally there is only one DbEnv per program, and even if not,
+// there is typically a single policy of throwing or returning.
+//
+static int last_known_error_policy = ON_ERROR_UNKNOWN;
+
+__DB_OSTREAMCLASS *DbEnv::error_stream_ = 0;
+
+// These 'glue' function are declared as extern "C" so they will
+// be compatible with picky compilers that do not allow mixing
+// of function pointers to 'C' functions with function pointers
+// to C++ functions.
+//
+extern "C"
+void _feedback_intercept_c(DB_ENV *env, int opcode, int pct)
+{
+ DbEnv::_feedback_intercept(env, opcode, pct);
+}
+
+extern "C"
+void _paniccall_intercept_c(DB_ENV *env, int errval)
+{
+ DbEnv::_paniccall_intercept(env, errval);
+}
+
+extern "C"
+void _stream_error_function_c(const char *prefix, char *message)
+{
+ DbEnv::_stream_error_function(prefix, message);
+}
+
+extern "C"
+int _app_dispatch_intercept_c(DB_ENV *env, DBT *dbt,
+ DB_LSN *lsn, db_recops op)
+{
+ return (DbEnv::_app_dispatch_intercept(env, dbt, lsn, op));
+}
+
+extern "C"
+int _rep_send_intercept_c(DB_ENV *env, const DBT *cntrl,
+ const DBT *data, int id, u_int32_t flags)
+{
+ return (DbEnv::_rep_send_intercept(env,
+ cntrl, data, id, flags));
+}
+
+void DbEnv::_feedback_intercept(DB_ENV *env, int opcode, int pct)
+{
+ if (env == 0) {
+ DB_ERROR("DbEnv::feedback_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return;
+ }
+ DbEnv *cxxenv = (DbEnv *)env->api1_internal;
+ if (cxxenv == 0) {
+ DB_ERROR("DbEnv::feedback_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return;
+ }
+ if (cxxenv->feedback_callback_ == 0) {
+ DB_ERROR("DbEnv::feedback_callback", EINVAL,
+ cxxenv->error_policy());
+ return;
+ }
+ (*cxxenv->feedback_callback_)(cxxenv, opcode, pct);
+}
+
+void DbEnv::_paniccall_intercept(DB_ENV *env, int errval)
+{
+ if (env == 0) {
+ DB_ERROR("DbEnv::paniccall_callback", EINVAL,
+ ON_ERROR_UNKNOWN);
+ }
+ DbEnv *cxxenv = (DbEnv *)env->api1_internal;
+ if (cxxenv == 0) {
+ DB_ERROR("DbEnv::paniccall_callback", EINVAL,
+ ON_ERROR_UNKNOWN);
+ }
+ if (cxxenv->paniccall_callback_ == 0) {
+ DB_ERROR("DbEnv::paniccall_callback", EINVAL,
+ cxxenv->error_policy());
+ }
+ (*cxxenv->paniccall_callback_)(cxxenv, errval);
+}
+
+int DbEnv::_app_dispatch_intercept(DB_ENV *env, DBT *dbt,
+ DB_LSN *lsn, db_recops op)
+{
+ if (env == 0) {
+ DB_ERROR("DbEnv::app_dispatch_callback",
+ EINVAL, ON_ERROR_UNKNOWN);
+ return (EINVAL);
+ }
+ DbEnv *cxxenv = (DbEnv *)env->api1_internal;
+ if (cxxenv == 0) {
+ DB_ERROR("DbEnv::app_dispatch_callback",
+ EINVAL, ON_ERROR_UNKNOWN);
+ return (EINVAL);
+ }
+ if (cxxenv->app_dispatch_callback_ == 0) {
+ DB_ERROR("DbEnv::app_dispatch_callback",
+ EINVAL, cxxenv->error_policy());
+ return (EINVAL);
+ }
+ Dbt *cxxdbt = (Dbt *)dbt;
+ DbLsn *cxxlsn = (DbLsn *)lsn;
+ return ((*cxxenv->app_dispatch_callback_)(cxxenv, cxxdbt, cxxlsn, op));
+}
+
+int DbEnv::_rep_send_intercept(DB_ENV *env, const DBT *cntrl,
+ const DBT *data, int id, u_int32_t flags)
+{
+
+ if (env == 0) {
+ DB_ERROR("DbEnv::rep_send_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return (EINVAL);
+ }
+ DbEnv *cxxenv = (DbEnv *)env->api1_internal;
+ if (cxxenv == 0) {
+ DB_ERROR("DbEnv::rep_send_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return (EINVAL);
+ }
+ const Dbt *cxxcntrl = (const Dbt *)cntrl;
+ Dbt *cxxdata = (Dbt *)data;
+ return ((*cxxenv->rep_send_callback_)(cxxenv,
+ cxxcntrl, cxxdata, id, flags));
+}
+
+// A truism for the DbEnv object is that there is a valid
+// DB_ENV handle from the constructor until close().
+// After the close, the DB_ENV handle is invalid and
+// no operations are permitted on the DbEnv (other than
+// destructor). Leaving the DbEnv handle open and not
+// doing a close is generally considered an error.
+//
+// We used to allow DbEnv objects to be closed and reopened.
+// This implied always keeping a valid DB_ENV object, and
+// coordinating the open objects between Db/DbEnv turned
+// out to be overly complicated. Now we do not allow this.
+
+DbEnv::DbEnv(u_int32_t flags)
+: imp_(0)
+, construct_error_(0)
+, construct_flags_(flags)
+, app_dispatch_callback_(0)
+, feedback_callback_(0)
+, paniccall_callback_(0)
+, pgin_callback_(0)
+, pgout_callback_(0)
+, rep_send_callback_(0)
+{
+ if ((construct_error_ = initialize(0)) != 0)
+ DB_ERROR("DbEnv::DbEnv", construct_error_, error_policy());
+}
+
+DbEnv::DbEnv(DB_ENV *env, u_int32_t flags)
+: imp_(0)
+, construct_error_(0)
+, construct_flags_(flags)
+, app_dispatch_callback_(0)
+, feedback_callback_(0)
+, paniccall_callback_(0)
+, pgin_callback_(0)
+, pgout_callback_(0)
+, rep_send_callback_(0)
+{
+ if ((construct_error_ = initialize(env)) != 0)
+ DB_ERROR("DbEnv::DbEnv", construct_error_, error_policy());
+}
+
+// If the DB_ENV handle is still open, we close it. This is to make stack
+// allocation of DbEnv objects easier so that they are cleaned up in the error
+// path. Note that the C layer catches cases where handles are open in the
+// environment at close time and reports an error. Applications should call
+// close explicitly in normal (non-exceptional) cases to check the return
+// value.
+//
+DbEnv::~DbEnv()
+{
+ DB_ENV *env = unwrap(this);
+
+ if (env != NULL) {
+ cleanup();
+ (void)env->close(env, 0);
+ }
+}
+
+// called by destructors before the DB_ENV is destroyed.
+void DbEnv::cleanup()
+{
+ DB_ENV *env = unwrap(this);
+
+ if (env != NULL) {
+ env->api1_internal = 0;
+ imp_ = 0;
+ }
+}
+
+int DbEnv::close(u_int32_t flags)
+{
+ int ret;
+ DB_ENV *env = unwrap(this);
+
+ // after a close (no matter if success or failure),
+ // the underlying DB_ENV object must not be accessed,
+ // so we clean up in advance.
+ //
+ cleanup();
+
+ // It's safe to throw an error after the close,
+ // since our error mechanism does not peer into
+ // the DB* structures.
+ //
+ if ((ret = env->close(env, flags)) != 0)
+ DB_ERROR("DbEnv::close", ret, error_policy());
+
+ return (ret);
+}
+
+DBENV_METHOD(dbremove,
+ (DbTxn *txn, const char *name, const char *subdb, u_int32_t flags),
+ (dbenv, unwrap(txn), name, subdb, flags))
+DBENV_METHOD(dbrename, (DbTxn *txn, const char *name, const char *subdb,
+ const char *newname, u_int32_t flags),
+ (dbenv, unwrap(txn), name, subdb, newname, flags))
+
+void DbEnv::err(int error, const char *format, ...)
+{
+ DB_ENV *env = unwrap(this);
+
+ DB_REAL_ERR(env, error, 1, 1, format);
+}
+
+// Return a tristate value corresponding to whether we should
+// throw exceptions on errors:
+// ON_ERROR_RETURN
+// ON_ERROR_THROW
+// ON_ERROR_UNKNOWN
+//
+int DbEnv::error_policy()
+{
+ if ((construct_flags_ & DB_CXX_NO_EXCEPTIONS) != 0) {
+ return (ON_ERROR_RETURN);
+ }
+ else {
+ return (ON_ERROR_THROW);
+ }
+}
+
+void DbEnv::errx(const char *format, ...)
+{
+ DB_ENV *env = unwrap(this);
+
+ DB_REAL_ERR(env, 0, 0, 1, format);
+}
+
+void *DbEnv::get_app_private() const
+{
+ return unwrapConst(this)->app_private;
+}
+
+// used internally during constructor
+// to associate an existing DB_ENV with this DbEnv,
+// or create a new one.
+//
+int DbEnv::initialize(DB_ENV *env)
+{
+ int ret;
+
+ last_known_error_policy = error_policy();
+
+ if (env == 0) {
+ // Create a new DB_ENV environment.
+ if ((ret = ::db_env_create(&env,
+ construct_flags_ & ~DB_CXX_NO_EXCEPTIONS)) != 0)
+ return (ret);
+ }
+ imp_ = wrap(env);
+ env->api1_internal = this; // for DB_ENV* to DbEnv* conversion
+ return (0);
+}
+
+// lock methods
+DBENV_METHOD(lock_detect, (u_int32_t flags, u_int32_t atype, int *aborted),
+ (dbenv, flags, atype, aborted))
+DBENV_METHOD_ERR(lock_get,
+ (u_int32_t locker, u_int32_t flags, const Dbt *obj,
+ db_lockmode_t lock_mode, DbLock *lock),
+ (dbenv, locker, flags, obj, lock_mode, &lock->lock_),
+ DbEnv::runtime_error_lock_get("DbEnv::lock_get", ret,
+ DB_LOCK_GET, lock_mode, obj, *lock,
+ -1, error_policy()))
+DBENV_METHOD(lock_id, (u_int32_t *idp), (dbenv, idp))
+DBENV_METHOD(lock_id_free, (u_int32_t id), (dbenv, id))
+DBENV_METHOD(lock_put, (DbLock *lock), (dbenv, &lock->lock_))
+DBENV_METHOD(lock_stat, (DB_LOCK_STAT **statp, u_int32_t flags),
+ (dbenv, statp, flags))
+DBENV_METHOD_ERR(lock_vec,
+ (u_int32_t locker, u_int32_t flags, DB_LOCKREQ list[],
+ int nlist, DB_LOCKREQ **elist_returned),
+ (dbenv, locker, flags, list, nlist, elist_returned),
+ DbEnv::runtime_error_lock_get("DbEnv::lock_vec", ret,
+ (*elist_returned)->op, (*elist_returned)->mode,
+ Dbt::get_Dbt((*elist_returned)->obj), DbLock((*elist_returned)->lock),
+ (*elist_returned) - list, error_policy()))
+// log methods
+DBENV_METHOD(log_archive, (char **list[], u_int32_t flags),
+ (dbenv, list, flags))
+
+int DbEnv::log_compare(const DbLsn *lsn0, const DbLsn *lsn1)
+{
+ return (::log_compare(lsn0, lsn1));
+}
+
+// The following cast implies that DbLogc can be no larger than DB_LOGC
+DBENV_METHOD(log_cursor, (DbLogc **cursorp, u_int32_t flags),
+ (dbenv, (DB_LOGC **)cursorp, flags))
+DBENV_METHOD(log_file, (DbLsn *lsn, char *namep, size_t len),
+ (dbenv, lsn, namep, len))
+DBENV_METHOD(log_flush, (const DbLsn *lsn), (dbenv, lsn))
+DBENV_METHOD(log_put, (DbLsn *lsn, const Dbt *data, u_int32_t flags),
+ (dbenv, lsn, data, flags))
+DBENV_METHOD(log_stat, (DB_LOG_STAT **spp, u_int32_t flags),
+ (dbenv, spp, flags))
+
+int DbEnv::memp_fcreate(DbMpoolFile **dbmfp, u_int32_t flags)
+{
+ DB_ENV *env = unwrap(this);
+ int ret;
+ DB_MPOOLFILE *mpf;
+
+ if (env == NULL)
+ ret = EINVAL;
+ else
+ ret = env->memp_fcreate(env, &mpf, flags);
+
+ if (DB_RETOK_STD(ret)) {
+ *dbmfp = new DbMpoolFile();
+ (*dbmfp)->imp_ = wrap(mpf);
+ } else
+ DB_ERROR("DbMpoolFile::f_create", ret, ON_ERROR_UNKNOWN);
+
+ return (ret);
+}
+
+DBENV_METHOD(memp_register,
+ (int ftype, pgin_fcn_type pgin_fcn, pgout_fcn_type pgout_fcn),
+ (dbenv, ftype, pgin_fcn, pgout_fcn))
+
+// memory pool methods
+DBENV_METHOD(memp_stat,
+ (DB_MPOOL_STAT **gsp, DB_MPOOL_FSTAT ***fsp, u_int32_t flags),
+ (dbenv, gsp, fsp, flags))
+
+DBENV_METHOD(memp_sync, (DbLsn *sn), (dbenv, sn))
+
+DBENV_METHOD(memp_trickle, (int pct, int *nwrotep), (dbenv, pct, nwrotep))
+
+// If an error occurred during the constructor, report it now.
+// Otherwise, call the underlying DB->open method.
+//
+int DbEnv::open(const char *db_home, u_int32_t flags, int mode)
+{
+ int ret;
+ DB_ENV *env = unwrap(this);
+
+ if (construct_error_ != 0)
+ ret = construct_error_;
+ else
+ ret = env->open(env, db_home, flags, mode);
+
+ if (!DB_RETOK_STD(ret))
+ DB_ERROR("DbEnv::open", ret, error_policy());
+
+ return (ret);
+}
+
+int DbEnv::remove(const char *db_home, u_int32_t flags)
+{
+ int ret;
+ DB_ENV *env = unwrap(this);
+
+ // after a remove (no matter if success or failure),
+ // the underlying DB_ENV object must not be accessed,
+ // so we clean up in advance.
+ //
+ cleanup();
+
+ if ((ret = env->remove(env, db_home, flags)) != 0)
+ DB_ERROR("DbEnv::remove", ret, error_policy());
+
+ return (ret);
+}
+
+// Report an error associated with the DbEnv.
+// error_policy is one of:
+// ON_ERROR_THROW throw an error
+// ON_ERROR_RETURN do nothing here, the caller will return an error
+// ON_ERROR_UNKNOWN defer the policy to policy saved in DbEnv::DbEnv
+//
+void DbEnv::runtime_error(const char *caller, int error, int error_policy)
+{
+ if (error_policy == ON_ERROR_UNKNOWN)
+ error_policy = last_known_error_policy;
+ if (error_policy == ON_ERROR_THROW) {
+ // Creating and throwing the object in two separate
+ // statements seems to be necessary for HP compilers.
+ switch (error) {
+ case DB_LOCK_DEADLOCK:
+ {
+ DbDeadlockException dl_except(caller);
+ throw dl_except;
+ }
+ break;
+ case DB_RUNRECOVERY:
+ {
+ DbRunRecoveryException rr_except(caller);
+ throw rr_except;
+ }
+ break;
+ default:
+ {
+ DbException except(caller, error);
+ throw except;
+ }
+ break;
+ }
+ }
+}
+
+// Like DbEnv::runtime_error, but issue a DbMemoryException
+// based on the fact that this Dbt is not large enough.
+void DbEnv::runtime_error_dbt(const char *caller, Dbt *dbt, int error_policy)
+{
+ if (error_policy == ON_ERROR_UNKNOWN)
+ error_policy = last_known_error_policy;
+ if (error_policy == ON_ERROR_THROW) {
+ // Creating and throwing the object in two separate
+ // statements seems to be necessary for HP compilers.
+ DbMemoryException except(caller, dbt);
+ throw except;
+ }
+}
+
+// Like DbEnv::runtime_error, but issue a DbLockNotGrantedException,
+// or a regular runtime error.
+// call regular runtime_error if it
+void DbEnv::runtime_error_lock_get(const char *caller, int error,
+ db_lockop_t op, db_lockmode_t mode, const Dbt *obj,
+ DbLock lock, int index, int error_policy)
+{
+ if (error != DB_LOCK_NOTGRANTED) {
+ runtime_error(caller, error, error_policy);
+ return;
+ }
+
+ if (error_policy == ON_ERROR_UNKNOWN)
+ error_policy = last_known_error_policy;
+ if (error_policy == ON_ERROR_THROW) {
+ // Creating and throwing the object in two separate
+ // statements seems to be necessary for HP compilers.
+ DbLockNotGrantedException except(caller, op, mode,
+ obj, lock, index);
+ throw except;
+ }
+}
+
+// static method
+char *DbEnv::strerror(int error)
+{
+ return (db_strerror(error));
+}
+
+void DbEnv::_stream_error_function(const char *prefix, char *message)
+{
+ // HP compilers need the extra casts, we don't know why.
+ if (error_stream_) {
+ if (prefix) {
+ (*error_stream_) << prefix << (const char *)": ";
+ }
+ if (message) {
+ (*error_stream_) << (const char *)message;
+ }
+ (*error_stream_) << (const char *)"\n";
+ }
+}
+
+// set methods
+
+DBENV_METHOD_VOID(set_errfile, (FILE *errfile), (dbenv, errfile))
+DBENV_METHOD_VOID(set_errpfx, (const char *errpfx), (dbenv, errpfx))
+
+// We keep these alphabetical by field name,
+// for comparison with Java's list.
+//
+DBENV_METHOD(set_data_dir, (const char *dir), (dbenv, dir))
+DBENV_METHOD(set_encrypt, (const char *passwd, int flags),
+ (dbenv, passwd, flags))
+DBENV_METHOD(set_lg_bsize, (u_int32_t bsize), (dbenv, bsize))
+DBENV_METHOD(set_lg_dir, (const char *dir), (dbenv, dir))
+DBENV_METHOD(set_lg_max, (u_int32_t max), (dbenv, max))
+DBENV_METHOD(set_lg_regionmax, (u_int32_t regionmax), (dbenv, regionmax))
+DBENV_METHOD(set_lk_detect, (u_int32_t detect), (dbenv, detect))
+DBENV_METHOD(set_lk_max, (u_int32_t max), (dbenv, max))
+DBENV_METHOD(set_lk_max_lockers, (u_int32_t max_lockers), (dbenv, max_lockers))
+DBENV_METHOD(set_lk_max_locks, (u_int32_t max_locks), (dbenv, max_locks))
+DBENV_METHOD(set_lk_max_objects, (u_int32_t max_objects), (dbenv, max_objects))
+DBENV_METHOD(set_mp_mmapsize, (size_t mmapsize), (dbenv, mmapsize))
+DBENV_METHOD(set_tmp_dir, (const char *tmp_dir), (dbenv, tmp_dir))
+DBENV_METHOD(set_tx_max, (u_int32_t tx_max), (dbenv, tx_max))
+
+DBENV_METHOD_QUIET(set_alloc,
+ (db_malloc_fcn_type malloc_fcn, db_realloc_fcn_type realloc_fcn,
+ db_free_fcn_type free_fcn),
+ (dbenv, malloc_fcn, realloc_fcn, free_fcn))
+
+void DbEnv::set_app_private(void *value)
+{
+ unwrap(this)->app_private = value;
+}
+
+DBENV_METHOD(set_cachesize,
+ (u_int32_t gbytes, u_int32_t bytes, int ncache),
+ (dbenv, gbytes, bytes, ncache))
+
+void DbEnv::set_errcall(void (*arg)(const char *, char *))
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ // XXX
+ // We are casting from a function ptr declared with C++
+ // linkage to one (same arg types) declared with C
+ // linkage. It's hard to imagine a pair of C/C++
+ // compilers from the same vendor for which this
+ // won't work. Unfortunately, we can't use a
+ // intercept function like the others since the
+ // function does not have a (DbEnv*) as one of
+ // the args. If this causes trouble, we can pull
+ // the same trick we use in Java, namely stuffing
+ // a (DbEnv*) pointer into the prefix. We're
+ // avoiding this for the moment because it obfuscates.
+ //
+ (*(dbenv->set_errcall))(dbenv, (db_errcall_fcn_type)arg);
+}
+
+// Note: This actually behaves a bit like a static function,
+// since DB_ENV.db_errcall has no information about which
+// db_env triggered the call. A user that has multiple DB_ENVs
+// will simply not be able to have different streams for each one.
+//
+void DbEnv::set_error_stream(__DB_OSTREAMCLASS *stream)
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ error_stream_ = stream;
+ dbenv->set_errcall(dbenv, (stream == 0) ? 0 :
+ _stream_error_function_c);
+}
+
+int DbEnv::set_feedback(void (*arg)(DbEnv *, int, int))
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ feedback_callback_ = arg;
+
+ return ((*(dbenv->set_feedback))(dbenv, _feedback_intercept_c));
+}
+
+DBENV_METHOD(set_flags, (u_int32_t flags, int onoff), (dbenv, flags, onoff))
+DBENV_METHOD(set_lk_conflicts, (u_int8_t *lk_conflicts, int lk_max),
+ (dbenv, lk_conflicts, lk_max))
+
+int DbEnv::set_paniccall(void (*arg)(DbEnv *, int))
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ paniccall_callback_ = arg;
+
+ return ((*(dbenv->set_paniccall))(dbenv, _paniccall_intercept_c));
+}
+
+DBENV_METHOD(set_rpc_server,
+ (void *cl, char *host, long tsec, long ssec, u_int32_t flags),
+ (dbenv, cl, host, tsec, ssec, flags))
+DBENV_METHOD(set_shm_key, (long shm_key), (dbenv, shm_key))
+// Note: this changes from last_known_error_policy to error_policy()
+DBENV_METHOD(set_tas_spins, (u_int32_t arg), (dbenv, arg))
+
+int DbEnv::set_app_dispatch
+ (int (*arg)(DbEnv *, Dbt *, DbLsn *, db_recops))
+{
+ DB_ENV *dbenv = unwrap(this);
+ int ret;
+
+ app_dispatch_callback_ = arg;
+ if ((ret = (*(dbenv->set_app_dispatch))(dbenv,
+ _app_dispatch_intercept_c)) != 0)
+ DB_ERROR("DbEnv::set_app_dispatch", ret, error_policy());
+
+ return (ret);
+}
+
+DBENV_METHOD(set_tx_timestamp, (time_t *timestamp), (dbenv, timestamp))
+DBENV_METHOD(set_verbose, (u_int32_t which, int onoff), (dbenv, which, onoff))
+
+int DbEnv::txn_begin(DbTxn *pid, DbTxn **tid, u_int32_t flags)
+{
+ DB_ENV *env = unwrap(this);
+ DB_TXN *txn;
+ int ret;
+
+ ret = env->txn_begin(env, unwrap(pid), &txn, flags);
+ if (DB_RETOK_STD(ret))
+ *tid = new DbTxn(txn);
+ else
+ DB_ERROR("DbEnv::txn_begin", ret, error_policy());
+
+ return (ret);
+}
+
+DBENV_METHOD(txn_checkpoint, (u_int32_t kbyte, u_int32_t min, u_int32_t flags),
+ (dbenv, kbyte, min, flags))
+
+int DbEnv::txn_recover(DbPreplist *preplist, long count,
+ long *retp, u_int32_t flags)
+{
+ DB_ENV *dbenv = unwrap(this);
+ DB_PREPLIST *c_preplist;
+ long i;
+ int ret;
+
+ /*
+ * We need to allocate some local storage for the
+ * returned preplist, and that requires us to do
+ * our own argument validation.
+ */
+ if (count <= 0)
+ ret = EINVAL;
+ else
+ ret = __os_malloc(dbenv, sizeof(DB_PREPLIST) * count,
+ &c_preplist);
+
+ if (ret != 0) {
+ DB_ERROR("DbEnv::txn_recover", ret, error_policy());
+ return (ret);
+ }
+
+ if ((ret =
+ dbenv->txn_recover(dbenv, c_preplist, count, retp, flags)) != 0) {
+ __os_free(dbenv, c_preplist);
+ DB_ERROR("DbEnv::txn_recover", ret, error_policy());
+ return (ret);
+ }
+
+ for (i = 0; i < *retp; i++) {
+ preplist[i].txn = new DbTxn();
+ preplist[i].txn->imp_ = wrap(c_preplist[i].txn);
+ memcpy(preplist[i].gid, c_preplist[i].gid,
+ sizeof(preplist[i].gid));
+ }
+
+ __os_free(dbenv, c_preplist);
+
+ return (0);
+}
+
+DBENV_METHOD(txn_stat, (DB_TXN_STAT **statp, u_int32_t flags),
+ (dbenv, statp, flags))
+
+int DbEnv::set_rep_transport(u_int32_t myid,
+ int (*f_send)(DbEnv *, const Dbt *, const Dbt *, int, u_int32_t))
+{
+ DB_ENV *dbenv = unwrap(this);
+ int ret;
+
+ rep_send_callback_ = f_send;
+ if ((ret = dbenv->set_rep_transport(dbenv,
+ myid, _rep_send_intercept_c)) != 0)
+ DB_ERROR("DbEnv::set_rep_transport", ret, error_policy());
+
+ return (ret);
+}
+
+DBENV_METHOD(rep_elect,
+ (int nsites, int pri, u_int32_t timeout, int *idp),
+ (dbenv, nsites, pri, timeout, idp))
+
+int DbEnv::rep_process_message(Dbt *control, Dbt *rec, int *idp)
+{
+ DB_ENV *dbenv = unwrap(this);
+ int ret;
+
+ ret = dbenv->rep_process_message(dbenv, control, rec, idp);
+ if (!DB_RETOK_REPPMSG(ret))
+ DB_ERROR("DbEnv::rep_process_message", ret, error_policy());
+
+ return (ret);
+}
+
+DBENV_METHOD(rep_start,
+ (Dbt *cookie, u_int32_t flags),
+ (dbenv, (DBT *)cookie, flags))
+
+DBENV_METHOD(rep_stat, (DB_REP_STAT **statp, u_int32_t flags),
+ (dbenv, statp, flags))
+
+DBENV_METHOD(set_rep_limit, (u_int32_t gbytes, u_int32_t bytes),
+ (dbenv, gbytes, bytes))
+
+DBENV_METHOD(set_timeout,
+ (db_timeout_t timeout, u_int32_t flags),
+ (dbenv, timeout, flags))
+
+// static method
+char *DbEnv::version(int *major, int *minor, int *patch)
+{
+ return (db_version(major, minor, patch));
+}
+
+// static method
+DbEnv *DbEnv::wrap_DB_ENV(DB_ENV *dbenv)
+{
+ DbEnv *wrapped_env = get_DbEnv(dbenv);
+ if (wrapped_env == NULL)
+ wrapped_env = new DbEnv(dbenv, 0);
+ return wrapped_env;
+}
diff --git a/libdb/cxx/cxx_except.cpp b/libdb/cxx/cxx_except.cpp
new file mode 100644
index 0000000..eddb694
--- /dev/null
+++ b/libdb/cxx/cxx_except.cpp
@@ -0,0 +1,330 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <string.h>
+#include <errno.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+// tmpString is used to create strings on the stack
+//
+class tmpString
+{
+public:
+ tmpString(const char *str1,
+ const char *str2 = 0,
+ const char *str3 = 0,
+ const char *str4 = 0,
+ const char *str5 = 0);
+ ~tmpString() { delete [] s_; }
+ operator const char *() { return (s_); }
+
+private:
+ char *s_;
+};
+
+tmpString::tmpString(const char *str1,
+ const char *str2,
+ const char *str3,
+ const char *str4,
+ const char *str5)
+{
+ size_t len = strlen(str1);
+ if (str2)
+ len += strlen(str2);
+ if (str3)
+ len += strlen(str3);
+ if (str4)
+ len += strlen(str4);
+ if (str5)
+ len += strlen(str5);
+
+ s_ = new char[len+1];
+
+ strcpy(s_, str1);
+ if (str2)
+ strcat(s_, str2);
+ if (str3)
+ strcat(s_, str3);
+ if (str4)
+ strcat(s_, str4);
+ if (str5)
+ strcat(s_, str5);
+}
+
+// Note: would not be needed if we can inherit from exception
+// It does not appear to be possible to inherit from exception
+// with the current Microsoft library (VC5.0).
+//
+static char *dupString(const char *s)
+{
+ char *r = new char[strlen(s)+1];
+ strcpy(r, s);
+ return (r);
+}
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbException //
+// //
+////////////////////////////////////////////////////////////////////////
+
+DbException::~DbException()
+{
+ if (what_)
+ delete [] what_;
+}
+
+DbException::DbException(int err)
+: err_(err)
+{
+ what_ = dupString(db_strerror(err));
+}
+
+DbException::DbException(const char *description)
+: err_(0)
+{
+ what_ = dupString(tmpString(description));
+}
+
+DbException::DbException(const char *prefix, int err)
+: err_(err)
+{
+ what_ = dupString(tmpString(prefix, ": ", db_strerror(err)));
+}
+
+DbException::DbException(const char *prefix1, const char *prefix2, int err)
+: err_(err)
+{
+ what_ = dupString(tmpString(prefix1, ": ", prefix2, ": ",
+ db_strerror(err)));
+}
+
+DbException::DbException(const DbException &that)
+: err_(that.err_)
+{
+ what_ = dupString(that.what_);
+}
+
+DbException &DbException::operator = (const DbException &that)
+{
+ if (this != &that) {
+ err_ = that.err_;
+ if (what_)
+ delete [] what_;
+ what_ = 0; // in case new throws exception
+ what_ = dupString(that.what_);
+ }
+ return (*this);
+}
+
+int DbException::get_errno() const
+{
+ return (err_);
+}
+
+const char *DbException::what() const
+{
+ return (what_);
+}
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbMemoryException //
+// //
+////////////////////////////////////////////////////////////////////////
+
+static const char *memory_err_desc = "Dbt not large enough for available data";
+DbMemoryException::~DbMemoryException()
+{
+}
+
+DbMemoryException::DbMemoryException(Dbt *dbt)
+: DbException(memory_err_desc, ENOMEM)
+, dbt_(dbt)
+{
+}
+
+DbMemoryException::DbMemoryException(const char *description)
+: DbException(description, ENOMEM)
+, dbt_(0)
+{
+}
+
+DbMemoryException::DbMemoryException(const char *prefix, Dbt *dbt)
+: DbException(prefix, memory_err_desc, ENOMEM)
+, dbt_(dbt)
+{
+}
+
+DbMemoryException::DbMemoryException(const char *prefix1, const char *prefix2,
+ Dbt *dbt)
+: DbException(prefix1, prefix2, ENOMEM)
+, dbt_(dbt)
+{
+}
+
+DbMemoryException::DbMemoryException(const DbMemoryException &that)
+: DbException(that)
+, dbt_(that.dbt_)
+{
+}
+
+DbMemoryException
+&DbMemoryException::operator =(const DbMemoryException &that)
+{
+ if (this != &that) {
+ DbException::operator=(that);
+ dbt_ = that.dbt_;
+ }
+ return (*this);
+}
+
+Dbt *DbMemoryException::get_dbt() const
+{
+ return (dbt_);
+}
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbDeadlockException //
+// //
+////////////////////////////////////////////////////////////////////////
+
+DbDeadlockException::~DbDeadlockException()
+{
+}
+
+DbDeadlockException::DbDeadlockException(const char *description)
+: DbException(description, DB_LOCK_DEADLOCK)
+{
+}
+
+DbDeadlockException::DbDeadlockException(const DbDeadlockException &that)
+: DbException(that)
+{
+}
+
+DbDeadlockException
+&DbDeadlockException::operator =(const DbDeadlockException &that)
+{
+ if (this != &that)
+ DbException::operator=(that);
+ return (*this);
+}
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbLockNotGrantedException //
+// //
+////////////////////////////////////////////////////////////////////////
+
+DbLockNotGrantedException::~DbLockNotGrantedException()
+{
+ delete lock_;
+}
+
+DbLockNotGrantedException::DbLockNotGrantedException(const char *prefix,
+ db_lockop_t op, db_lockmode_t mode, const Dbt *obj, const DbLock lock,
+ int index)
+: DbException(prefix, DbEnv::strerror(DB_LOCK_NOTGRANTED),
+ DB_LOCK_NOTGRANTED)
+, op_(op)
+, mode_(mode)
+, obj_(obj)
+, index_(index)
+{
+ lock_ = new DbLock(lock);
+}
+
+DbLockNotGrantedException::DbLockNotGrantedException
+ (const DbLockNotGrantedException &that)
+: DbException(that)
+{
+ op_ = that.op_;
+ mode_ = that.mode_;
+ obj_ = that.obj_;
+ lock_ = new DbLock(*that.lock_);
+ index_ = that.index_;
+}
+
+DbLockNotGrantedException
+&DbLockNotGrantedException::operator =(const DbLockNotGrantedException &that)
+{
+ if (this != &that) {
+ DbException::operator=(that);
+ op_ = that.op_;
+ mode_ = that.mode_;
+ obj_ = that.obj_;
+ lock_ = new DbLock(*that.lock_);
+ index_ = that.index_;
+ }
+ return (*this);
+}
+
+db_lockop_t DbLockNotGrantedException::get_op() const
+{
+ return op_;
+}
+
+db_lockmode_t DbLockNotGrantedException::get_mode() const
+{
+ return mode_;
+}
+
+const Dbt* DbLockNotGrantedException::get_obj() const
+{
+ return obj_;
+}
+
+DbLock* DbLockNotGrantedException::get_lock() const
+{
+ return lock_;
+}
+
+int DbLockNotGrantedException::get_index() const
+{
+ return index_;
+}
+
+
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbRunRecoveryException //
+// //
+////////////////////////////////////////////////////////////////////////
+
+DbRunRecoveryException::~DbRunRecoveryException()
+{
+}
+
+DbRunRecoveryException::DbRunRecoveryException(const char *description)
+: DbException(description, DB_RUNRECOVERY)
+{
+}
+
+DbRunRecoveryException::DbRunRecoveryException
+ (const DbRunRecoveryException &that)
+: DbException(that)
+{
+}
+
+DbRunRecoveryException
+&DbRunRecoveryException::operator =(const DbRunRecoveryException &that)
+{
+ if (this != &that)
+ DbException::operator=(that);
+ return (*this);
+}
diff --git a/libdb/cxx/cxx_lock.cpp b/libdb/cxx/cxx_lock.cpp
new file mode 100644
index 0000000..31b6603
--- /dev/null
+++ b/libdb/cxx/cxx_lock.cpp
@@ -0,0 +1,45 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <errno.h>
+#include <string.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbLock //
+// //
+////////////////////////////////////////////////////////////////////////
+
+DbLock::DbLock(DB_LOCK value)
+: lock_(value)
+{
+}
+
+DbLock::DbLock()
+{
+ memset(&lock_, 0, sizeof(DB_LOCK));
+}
+
+DbLock::DbLock(const DbLock &that)
+: lock_(that.lock_)
+{
+}
+
+DbLock &DbLock::operator = (const DbLock &that)
+{
+ lock_ = that.lock_;
+ return (*this);
+}
diff --git a/libdb/cxx/cxx_logc.cpp b/libdb/cxx/cxx_logc.cpp
new file mode 100644
index 0000000..b6cd032
--- /dev/null
+++ b/libdb/cxx/cxx_logc.cpp
@@ -0,0 +1,65 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <errno.h>
+#include <string.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc_auto/db_auto.h"
+#include "dbinc_auto/crdel_auto.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc_auto/db_ext.h"
+#include "dbinc_auto/common_ext.h"
+
+// It's private, and should never be called,
+// but some compilers need it resolved
+//
+DbLogc::~DbLogc()
+{
+}
+
+// The name _flags prevents a name clash with __db_log_cursor::flags
+int DbLogc::close(u_int32_t _flags)
+{
+ DB_LOGC *logc = this;
+ int ret;
+
+ ret = logc->close(logc, _flags);
+
+ if (!DB_RETOK_STD(ret))
+ DB_ERROR("DbLogc::close", ret, ON_ERROR_UNKNOWN);
+
+ return (ret);
+}
+
+// The name _flags prevents a name clash with __db_log_cursor::flags
+int DbLogc::get(DbLsn *lsn, Dbt *data, u_int32_t _flags)
+{
+ DB_LOGC *logc = this;
+ int ret;
+
+ ret = logc->get(logc, lsn, data, _flags);
+
+ if (!DB_RETOK_LGGET(ret)) {
+ if (ret == ENOMEM && DB_OVERFLOWED_DBT(data))
+ DB_ERROR_DBT("DbLogc::get", data, ON_ERROR_UNKNOWN);
+ else
+ DB_ERROR("DbLogc::get", ret, ON_ERROR_UNKNOWN);
+ }
+
+ return (ret);
+}
diff --git a/libdb/cxx/cxx_mpool.cpp b/libdb/cxx/cxx_mpool.cpp
new file mode 100644
index 0000000..045fb96
--- /dev/null
+++ b/libdb/cxx/cxx_mpool.cpp
@@ -0,0 +1,110 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <errno.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+#include "db_int.h"
+
+// Helper macros for simple methods that pass through to the
+// underlying C method. It may return an error or raise an exception.
+// Note this macro expects that input _argspec is an argument
+// list element (e.g., "char *arg") and that _arglist is the arguments
+// that should be passed through to the C method (e.g., "(mpf, arg)")
+//
+#define DB_MPOOLFILE_METHOD(_name, _argspec, _arglist, _retok) \
+int DbMpoolFile::_name _argspec \
+{ \
+ int ret; \
+ DB_MPOOLFILE *mpf = unwrap(this); \
+ \
+ if (mpf == NULL) \
+ ret = EINVAL; \
+ else \
+ ret = mpf->_name _arglist; \
+ if (!_retok(ret)) \
+ DB_ERROR("DbMpoolFile::"#_name, ret, ON_ERROR_UNKNOWN); \
+ return (ret); \
+}
+
+#define DB_MPOOLFILE_METHOD_VOID(_name, _argspec, _arglist) \
+void DbMpoolFile::_name _argspec \
+{ \
+ DB_MPOOLFILE *mpf = unwrap(this); \
+ \
+ mpf->_name _arglist; \
+}
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbMpoolFile //
+// //
+////////////////////////////////////////////////////////////////////////
+
+DbMpoolFile::DbMpoolFile()
+: imp_(0)
+{
+}
+
+DbMpoolFile::~DbMpoolFile()
+{
+}
+
+int DbMpoolFile::close(u_int32_t flags)
+{
+ DB_MPOOLFILE *mpf = unwrap(this);
+ int ret;
+
+ if (mpf == NULL)
+ ret = EINVAL;
+ else
+ ret = mpf->close(mpf, flags);
+
+ imp_ = 0; // extra safety
+
+ // This may seem weird, but is legal as long as we don't access
+ // any data before returning.
+ delete this;
+
+ if (!DB_RETOK_STD(ret))
+ DB_ERROR("DbMpoolFile::close", ret, ON_ERROR_UNKNOWN);
+
+ return (ret);
+}
+
+DB_MPOOLFILE_METHOD(get, (db_pgno_t *pgnoaddr, u_int32_t flags, void *pagep),
+ (mpf, pgnoaddr, flags, pagep), DB_RETOK_MPGET)
+DB_MPOOLFILE_METHOD_VOID(last_pgno, (db_pgno_t *pgnoaddr), (mpf, pgnoaddr))
+DB_MPOOLFILE_METHOD(open,
+ (const char *file, u_int32_t flags, int mode, size_t pagesize),
+ (mpf, file, flags, mode, pagesize), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD(put, (void *pgaddr, u_int32_t flags),
+ (mpf, pgaddr, flags), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD_VOID(refcnt, (db_pgno_t *pgnoaddr), (mpf, pgnoaddr))
+DB_MPOOLFILE_METHOD(set, (void *pgaddr, u_int32_t flags),
+ (mpf, pgaddr, flags), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD(set_clear_len, (u_int32_t len),
+ (mpf, len), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD(set_fileid, (u_int8_t *fileid),
+ (mpf, fileid), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD(set_ftype, (int ftype),
+ (mpf, ftype), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD(set_lsn_offset, (int32_t offset),
+ (mpf, offset), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD(set_pgcookie, (DBT *dbt),
+ (mpf, dbt), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD_VOID(set_unlink, (int ul), (mpf, ul))
+DB_MPOOLFILE_METHOD(sync, (),
+ (mpf), DB_RETOK_STD)
diff --git a/libdb/cxx/cxx_txn.cpp b/libdb/cxx/cxx_txn.cpp
new file mode 100644
index 0000000..35c927f
--- /dev/null
+++ b/libdb/cxx/cxx_txn.cpp
@@ -0,0 +1,81 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <errno.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+#include "db_int.h"
+
+// Helper macro for simple methods that pass through to the
+// underlying C method. It may return an error or raise an exception.
+// Note this macro expects that input _argspec is an argument
+// list element (e.g., "char *arg") and that _arglist is the arguments
+// that should be passed through to the C method (e.g., "(db, arg)")
+//
+#define DBTXN_METHOD(_name, _delete, _argspec, _arglist) \
+int DbTxn::_name _argspec \
+{ \
+ int ret; \
+ DB_TXN *txn = unwrap(this); \
+ \
+ ret = txn->_name _arglist; \
+ /* Weird, but safe if we don't access this again. */ \
+ if (_delete) \
+ delete this; \
+ if (!DB_RETOK_STD(ret)) \
+ DB_ERROR("DbTxn::" # _name, ret, ON_ERROR_UNKNOWN); \
+ return (ret); \
+}
+
+// private constructor, never called but needed by some C++ linkers
+DbTxn::DbTxn()
+: imp_(0)
+{
+}
+
+DbTxn::DbTxn(DB_TXN *txn)
+: imp_(wrap(txn))
+{
+ txn->api_internal = this;
+}
+
+DbTxn::~DbTxn()
+{
+}
+
+DBTXN_METHOD(abort, 1, (), (txn))
+DBTXN_METHOD(commit, 1, (u_int32_t flags), (txn, flags))
+DBTXN_METHOD(discard, 1, (u_int32_t flags), (txn, flags))
+
+u_int32_t DbTxn::id()
+{
+ DB_TXN *txn;
+
+ txn = unwrap(this);
+ return (txn->id(txn)); // no error
+}
+
+DBTXN_METHOD(prepare, 0, (u_int8_t *gid), (txn, gid))
+DBTXN_METHOD(set_timeout, 0, (db_timeout_t timeout, u_int32_t flags),
+ (txn, timeout, flags))
+
+// static method
+DbTxn *DbTxn::wrap_DB_TXN(DB_TXN *txn)
+{
+ DbTxn *wrapped_txn = get_DbTxn(txn);
+ if (wrapped_txn == NULL)
+ wrapped_txn = new DbTxn(txn);
+ return wrapped_txn;
+}
diff --git a/libdb/db/crdel.src b/libdb/db/crdel.src
new file mode 100644
index 0000000..671db29
--- /dev/null
+++ b/libdb/db/crdel.src
@@ -0,0 +1,46 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+PREFIX __crdel
+DBPRIVATE
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/txn.h"
+INCLUDE
+
+/*
+ * Metasub: log the creation of a subdatabase meta data page.
+ *
+ * fileid: identifies the file being acted upon.
+ * pgno: page number on which to write this meta-data page
+ * page: the actual meta-data page
+ * lsn: lsn of the page.
+ */
+BEGIN metasub 142
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+PGDBT page DBT s
+POINTER lsn DB_LSN * lu
+END
+
diff --git a/libdb/db/crdel_auto.c b/libdb/db/crdel_auto.c
new file mode 100644
index 0000000..ba2e95d
--- /dev/null
+++ b/libdb/db/crdel_auto.c
@@ -0,0 +1,329 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+/*
+ * PUBLIC: int __crdel_metasub_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, db_pgno_t, const DBT *, DB_LSN *));
+ */
+int
+__crdel_metasub_log(dbp, txnid, ret_lsnp, flags, pgno, page, lsn)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ db_pgno_t pgno;
+ const DBT *page;
+ DB_LSN * lsn;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___crdel_metasub;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t) + (page == NULL ? 0 : page->size)
+ + sizeof(*lsn);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (page == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &page->size, sizeof(page->size));
+ bp += sizeof(page->size);
+ memcpy(bp, page->data, page->size);
+ bp += page->size;
+ }
+
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__crdel_metasub_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __crdel_metasub_getpgnos __P((DB_ENV *, DBT *,
+ * PUBLIC: DB_LSN *, db_recops, void *));
+ */
+int
+__crdel_metasub_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __crdel_metasub_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __crdel_metasub_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __crdel_metasub_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__crdel_metasub_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __crdel_metasub_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __crdel_metasub_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__crdel_metasub: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tpage: ");
+ for (i = 0; i < argp->page.size; i++) {
+ ch = ((u_int8_t *)argp->page.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __crdel_metasub_read __P((DB_ENV *, void *,
+ * PUBLIC: __crdel_metasub_args **));
+ */
+int
+__crdel_metasub_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __crdel_metasub_args **argpp;
+{
+ __crdel_metasub_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__crdel_metasub_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memset(&argp->page, 0, sizeof(argp->page));
+ memcpy(&argp->page.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->page.data = bp;
+ bp += argp->page.size;
+
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __crdel_init_print __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__crdel_init_print(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __crdel_metasub_print, DB___crdel_metasub)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __crdel_init_getpgnos __P((DB_ENV *,
+ * PUBLIC: int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *),
+ * PUBLIC: size_t *));
+ */
+int
+__crdel_init_getpgnos(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __crdel_metasub_getpgnos, DB___crdel_metasub)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __crdel_init_recover __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__crdel_init_recover(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __crdel_metasub_recover, DB___crdel_metasub)) != 0)
+ return (ret);
+ return (0);
+}
diff --git a/libdb/db/crdel_rec.c b/libdb/db/crdel_rec.c
new file mode 100644
index 0000000..e17e9a2
--- /dev/null
+++ b/libdb/db/crdel_rec.c
@@ -0,0 +1,97 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
+
+/*
+ * __crdel_metasub_recover --
+ * Recovery function for metasub.
+ *
+ * PUBLIC: int __crdel_metasub_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__crdel_metasub_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __crdel_metasub_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_p, modified, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__crdel_metasub_print);
+ REC_INTRO(__crdel_metasub_read, 0);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+ }
+
+ modified = 0;
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn);
+
+ if (cmp_p == 0 && DB_REDO(op)) {
+ memcpy(pagep, argp->page.data, argp->page.size);
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ } else if (DB_UNDO(op)) {
+ /*
+ * We want to undo this page creation. The page creation
+ * happened in two parts. First, we called __bam_new which
+ * was logged separately. Then we wrote the meta-data onto
+ * the page. So long as we restore the LSN, then the recovery
+ * for __bam_new will do everything else.
+ * Don't bother checking the lsn on the page. If we
+ * are rolling back the next thing is that this page
+ * will get freed. Opening the subdb will have reinitialized
+ * the page, but not the lsn.
+ */
+ LSN(pagep) = argp->lsn;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
diff --git a/libdb/db/db.c b/libdb/db/db.c
new file mode 100644
index 0000000..e08f6a2
--- /dev/null
+++ b/libdb/db/db.c
@@ -0,0 +1,1308 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/log.h"
+#include "dbinc/mp.h"
+#include "dbinc/qam.h"
+#include "dbinc/txn.h"
+
+static int __db_disassociate __P((DB *));
+#if CONFIG_TEST
+static void __db_makecopy __P((const char *, const char *));
+static int __db_testdocopy __P((DB_ENV *, const char *));
+static int __qam_testdocopy __P((DB *, const char *));
+#endif
+
+/*
+ * DB.C --
+ * This file contains the utility functions for the DBP layer.
+ */
+
+/*
+ * __db_master_open --
+ * Open up a handle on a master database.
+ *
+ * PUBLIC: int __db_master_open __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, u_int32_t, int, DB **));
+ */
+int
+__db_master_open(subdbp, txn, name, flags, mode, dbpp)
+ DB *subdbp;
+ DB_TXN *txn;
+ const char *name;
+ u_int32_t flags;
+ int mode;
+ DB **dbpp;
+{
+ DB *dbp;
+ int ret;
+
+ /* Open up a handle on the main database. */
+ if ((ret = db_create(&dbp, subdbp->dbenv, 0)) != 0)
+ return (ret);
+
+ /*
+ * It's always a btree.
+ * Run in the transaction we've created.
+ * Set the pagesize in case we're creating a new database.
+ * Flag that we're creating a database with subdatabases.
+ */
+ dbp->type = DB_BTREE;
+ dbp->pgsize = subdbp->pgsize;
+ F_SET(dbp, DB_AM_SUBDB);
+ F_SET(dbp, F_ISSET(subdbp,
+ DB_AM_RECOVER | DB_AM_SWAP | DB_AM_ENCRYPT | DB_AM_CHKSUM));
+
+ /*
+ * If there was a subdb specified, then we only want to apply
+ * DB_EXCL to the subdb, not the actual file. We only got here
+ * because there was a subdb specified.
+ */
+ LF_CLR(DB_EXCL);
+ LF_SET(DB_RDWRMASTER);
+ if ((ret = __db_dbopen(dbp, txn, name, NULL, flags, mode, PGNO_BASE_MD))
+ != 0)
+ goto err;
+
+ /*
+ * Verify that pagesize is the same on both.
+ * The items in dbp were now initialized from the meta
+ * page. The items in dbp were set in __db_dbopen
+ * when we either read or created the master file.
+ * Other items such as checksum and encryption are
+ * checked when we read the meta-page. So we do not
+ * check those here. However, if the meta-page caused
+ * chksumming to be turned on and it wasn't already, set
+ * it here.
+ */
+ if (F_ISSET(dbp, DB_AM_CHKSUM))
+ F_SET(subdbp, DB_AM_CHKSUM);
+ if (subdbp->pgsize != 0 && dbp->pgsize != subdbp->pgsize) {
+ ret = EINVAL;
+ __db_err(dbp->dbenv,
+ "Different pagesize specified on existent file");
+ goto err;
+ }
+err:
+ if (ret != 0 && !F_ISSET(dbp, DB_AM_DISCARD))
+ __db_close_i(dbp, txn, 0);
+ else
+ *dbpp = dbp;
+ return (ret);
+}
+
+/*
+ * __db_master_update --
+ * Add/Open/Remove a subdatabase from a master database.
+ *
+ * PUBLIC: int __db_master_update __P((DB *, DB *, DB_TXN *, const char *,
+ * PUBLIC: DBTYPE, mu_action, const char *, u_int32_t));
+ */
+int
+__db_master_update(mdbp, sdbp, txn, subdb, type, action, newname, flags)
+ DB *mdbp, *sdbp;
+ DB_TXN *txn;
+ const char *subdb;
+ DBTYPE type;
+ mu_action action;
+ const char *newname;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DBC *dbc, *ndbc;
+ DBT key, data, ndata;
+ PAGE *p;
+ db_pgno_t t_pgno;
+ int modify, ret, t_ret;
+
+ dbenv = mdbp->dbenv;
+ dbc = ndbc = NULL;
+ p = NULL;
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Might we modify the master database? If so, we'll need to lock. */
+ modify = (action != MU_OPEN || LF_ISSET(DB_CREATE)) ? 1 : 0;
+
+ /*
+ * Open up a cursor. If this is CDB and we're creating the database,
+ * make it an update cursor.
+ */
+ if ((ret = mdbp->cursor(mdbp, txn, &dbc,
+ (CDB_LOCKING(dbenv) && modify) ? DB_WRITECURSOR : 0)) != 0)
+ goto err;
+
+ /*
+ * Point the cursor at the record.
+ *
+ * If we're removing or potentially creating an entry, lock the page
+ * with DB_RMW.
+ *
+ * We do multiple cursor operations with the cursor in some cases and
+ * subsequently access the data DBT information. Set DB_DBT_MALLOC so
+ * we don't risk modification of the data between our uses of it.
+ *
+ * !!!
+ * We don't include the name's nul termination in the database.
+ */
+ key.data = (void *)subdb;
+ key.size = (u_int32_t)strlen(subdb);
+ F_SET(&data, DB_DBT_MALLOC);
+
+ ret = dbc->c_get(dbc, &key, &data,
+ DB_SET | ((STD_LOCKING(dbc) && modify) ? DB_RMW : 0));
+
+ /*
+ * What we do next--whether or not we found a record for the
+ * specified subdatabase--depends on what the specified action is.
+ * Handle ret appropriately as the first statement of each case.
+ */
+ switch (action) {
+ case MU_REMOVE:
+ /*
+ * We should have found something if we're removing it. Note
+ * that in the common case where the DB we're asking to remove
+ * doesn't exist, we won't get this far; __db_subdb_remove
+ * will already have returned an error from __db_open.
+ */
+ if (ret != 0)
+ goto err;
+
+ /*
+ * Delete the subdatabase entry first; if this fails,
+ * we don't want to touch the actual subdb pages.
+ */
+ if ((ret = dbc->c_del(dbc, 0)) != 0)
+ goto err;
+
+ /*
+ * We're handling actual data, not on-page meta-data,
+ * so it hasn't been converted to/from opposite
+ * endian architectures. Do it explicitly, now.
+ */
+ memcpy(&sdbp->meta_pgno, data.data, sizeof(db_pgno_t));
+ DB_NTOHL(&sdbp->meta_pgno);
+ if ((ret =
+ mdbp->mpf->get(mdbp->mpf, &sdbp->meta_pgno, 0, &p)) != 0)
+ goto err;
+
+ /* Free and put the page. */
+ if ((ret = __db_free(dbc, p)) != 0) {
+ p = NULL;
+ goto err;
+ }
+ p = NULL;
+ break;
+ case MU_RENAME:
+ /* We should have found something if we're renaming it. */
+ if (ret != 0)
+ goto err;
+
+ /*
+ * Before we rename, we need to make sure we're not
+ * overwriting another subdatabase, or else this operation
+ * won't be undoable. Open a second cursor and check
+ * for the existence of newname; it shouldn't appear under
+ * us since we hold the metadata lock.
+ */
+ if ((ret = mdbp->cursor(mdbp, txn, &ndbc, 0)) != 0)
+ goto err;
+ DB_ASSERT(newname != NULL);
+ key.data = (void *)newname;
+ key.size = (u_int32_t)strlen(newname);
+
+ /*
+ * We don't actually care what the meta page of the potentially-
+ * overwritten DB is; we just care about existence.
+ */
+ memset(&ndata, 0, sizeof(ndata));
+ F_SET(&ndata, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+
+ if ((ret = ndbc->c_get(ndbc, &key, &ndata, DB_SET)) == 0) {
+ /* A subdb called newname exists. Bail. */
+ ret = EEXIST;
+ __db_err(dbenv, "rename: database %s exists", newname);
+ goto err;
+ } else if (ret != DB_NOTFOUND)
+ goto err;
+
+ /*
+ * Now do the put first; we don't want to lose our
+ * sole reference to the subdb. Use the second cursor
+ * so that the first one continues to point to the old record.
+ */
+ if ((ret = ndbc->c_put(ndbc, &key, &data, DB_KEYFIRST)) != 0)
+ goto err;
+ if ((ret = dbc->c_del(dbc, 0)) != 0) {
+ /*
+ * If the delete fails, try to delete the record
+ * we just put, in case we're not txn-protected.
+ */
+ (void)ndbc->c_del(ndbc, 0);
+ goto err;
+ }
+
+ break;
+ case MU_OPEN:
+ /*
+ * Get the subdatabase information. If it already exists,
+ * copy out the page number and we're done.
+ */
+ switch (ret) {
+ case 0:
+ if (LF_ISSET(DB_CREATE) && LF_ISSET(DB_EXCL)) {
+ ret = EEXIST;
+ goto err;
+ }
+ memcpy(&sdbp->meta_pgno, data.data, sizeof(db_pgno_t));
+ DB_NTOHL(&sdbp->meta_pgno);
+ goto done;
+ case DB_NOTFOUND:
+ if (LF_ISSET(DB_CREATE))
+ break;
+ /*
+ * No db_err, it is reasonable to remove a
+ * nonexistent db.
+ */
+ ret = ENOENT;
+ goto err;
+ default:
+ goto err;
+ }
+
+ /*
+ * We need to check against the master lorder here because
+ * we only want to check this if we are creating. In the
+ * case where we don't create we just want to inherit.
+ */
+ if (F_ISSET(mdbp, DB_AM_SWAP) != F_ISSET(sdbp, DB_AM_SWAP)) {
+ ret = EINVAL;
+ __db_err(mdbp->dbenv,
+ "Different lorder specified on existent file");
+ goto err;
+ }
+ /* Create a subdatabase. */
+ if ((ret = __db_new(dbc,
+ type == DB_HASH ? P_HASHMETA : P_BTREEMETA, &p)) != 0)
+ goto err;
+ sdbp->meta_pgno = PGNO(p);
+
+ /*
+ * XXX
+ * We're handling actual data, not on-page meta-data, so it
+ * hasn't been converted to/from opposite endian architectures.
+ * Do it explicitly, now.
+ */
+ t_pgno = PGNO(p);
+ DB_HTONL(&t_pgno);
+ memset(&ndata, 0, sizeof(ndata));
+ ndata.data = &t_pgno;
+ ndata.size = sizeof(db_pgno_t);
+ if ((ret = dbc->c_put(dbc, &key, &ndata, DB_KEYLAST)) != 0)
+ goto err;
+ F_SET(sdbp, DB_AM_CREATED);
+ break;
+ }
+
+err:
+done: /*
+ * If we allocated a page: if we're successful, mark the page dirty
+ * and return it to the cache, otherwise, discard/free it.
+ */
+ if (p != NULL) {
+ if (ret == 0) {
+ if ((t_ret =
+ mdbp->mpf->put(mdbp->mpf, p, DB_MPOOL_DIRTY)) != 0)
+ ret = t_ret;
+ /*
+ * Since we cannot close this file until after
+ * transaction commit, we need to sync the dirty
+ * pages, because we'll read these directly from
+ * disk to open.
+ */
+ if ((t_ret = mdbp->sync(mdbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ } else
+ (void)mdbp->mpf->put(mdbp->mpf, p, 0);
+ }
+
+ /* Discard the cursor(s) and data. */
+ if (data.data != NULL)
+ __os_ufree(dbenv, data.data);
+ if (dbc != NULL && (t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ndbc != NULL && (t_ret = ndbc->c_close(ndbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_dbenv_setup --
+ * Set up the underlying environment during a db_open.
+ *
+ * PUBLIC: int __db_dbenv_setup __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, u_int32_t, u_int32_t));
+ */
+int
+__db_dbenv_setup(dbp, txn, name, id, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name;
+ u_int32_t id;
+ u_int32_t flags;
+{
+ DB *ldbp;
+ DBT pgcookie;
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ DB_MPOOLFILE *mpf;
+ DB_PGINFO pginfo;
+ u_int32_t maxid;
+ int ftype, ret;
+
+ dbenv = dbp->dbenv;
+
+ /* If we don't yet have an environment, it's time to create it. */
+ if (!F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) {
+ /* Make sure we have at least DB_MINCACHE pages in our cache. */
+ if (dbenv->mp_gbytes == 0 &&
+ dbenv->mp_bytes < dbp->pgsize * DB_MINPAGECACHE &&
+ (ret = dbenv->set_cachesize(
+ dbenv, 0, dbp->pgsize * DB_MINPAGECACHE, 0)) != 0)
+ return (ret);
+
+ if ((ret = dbenv->open(dbenv, NULL, DB_CREATE |
+ DB_INIT_MPOOL | DB_PRIVATE | LF_ISSET(DB_THREAD), 0)) != 0)
+ return (ret);
+ }
+
+ /* Register DB's pgin/pgout functions. */
+ if ((ret = dbenv->memp_register(
+ dbenv, DB_FTYPE_SET, __db_pgin, __db_pgout)) != 0)
+ return (ret);
+
+ /* Create the DB_MPOOLFILE structure. */
+ if ((ret = dbenv->memp_fcreate(dbenv, &dbp->mpf, 0)) != 0)
+ return (ret);
+ mpf = dbp->mpf;
+
+ /* Set the database's cache priority if we've been given one. */
+ if (dbp->priority != 0 &&
+ (ret = mpf->set_priority(mpf, dbp->priority)) != 0)
+ return (ret);
+
+ /*
+ * Open a backing file in the memory pool.
+ *
+ * If we need to pre- or post-process a file's pages on I/O, set the
+ * file type. If it's a hash file, always call the pgin and pgout
+ * routines. This means that hash files can never be mapped into
+ * process memory. If it's a btree file and requires swapping, we
+ * need to page the file in and out. This has to be right -- we can't
+ * mmap files that are being paged in and out.
+ */
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ ftype = F_ISSET(dbp, DB_AM_SWAP | DB_AM_ENCRYPT | DB_AM_CHKSUM)
+ ? DB_FTYPE_SET : DB_FTYPE_NOTSET;
+ (void)mpf->set_ftype(mpf, ftype);
+ (void)mpf->set_clear_len(mpf, (CRYPTO_ON(dbenv) ?
+ dbp->pgsize : DB_PAGE_DB_LEN));
+ break;
+ case DB_HASH:
+ (void)mpf->set_ftype(mpf, DB_FTYPE_SET);
+ (void)mpf->set_clear_len(mpf, (CRYPTO_ON(dbenv) ?
+ dbp->pgsize : DB_PAGE_DB_LEN));
+ break;
+ case DB_QUEUE:
+ ftype = F_ISSET(dbp, DB_AM_SWAP | DB_AM_ENCRYPT | DB_AM_CHKSUM)
+ ? DB_FTYPE_SET : DB_FTYPE_NOTSET;
+ (void)mpf->set_ftype(mpf, ftype);
+ (void)mpf->set_clear_len(mpf, (CRYPTO_ON(dbenv) ?
+ dbp->pgsize : DB_PAGE_QUEUE_LEN));
+ break;
+ case DB_UNKNOWN:
+ /*
+ * If we're running in the verifier, our database might
+ * be corrupt and we might not know its type--but we may
+ * still want to be able to verify and salvage.
+ *
+ * If we can't identify the type, it's not going to be safe
+ * to call __db_pgin--we pretty much have to give up all
+ * hope of salvaging cross-endianness. Proceed anyway;
+ * at worst, the database will just appear more corrupt
+ * than it actually is, but at best, we may be able
+ * to salvage some data even with no metadata page.
+ */
+ if (F_ISSET(dbp, DB_AM_VERIFYING)) {
+ (void)mpf->set_ftype(mpf, DB_FTYPE_NOTSET);
+ (void)mpf->set_clear_len(mpf, DB_PAGE_DB_LEN);
+ break;
+ }
+ /* FALLTHROUGH */
+ default:
+ return (
+ __db_unknown_type(dbenv, "__db_dbenv_setup", dbp->type));
+ }
+
+ (void)mpf->set_fileid(mpf, dbp->fileid);
+ (void)mpf->set_lsn_offset(mpf, 0);
+
+ pginfo.db_pagesize = dbp->pgsize;
+ pginfo.flags =
+ F_ISSET(dbp, (DB_AM_CHKSUM | DB_AM_ENCRYPT | DB_AM_SWAP));
+ pginfo.type = dbp->type;
+ pgcookie.data = &pginfo;
+ pgcookie.size = sizeof(DB_PGINFO);
+ (void)mpf->set_pgcookie(mpf, &pgcookie);
+
+ if ((ret = mpf->open(mpf, name,
+ LF_ISSET(DB_RDONLY | DB_NOMMAP | DB_ODDFILESIZE | DB_TRUNCATE) |
+ (F_ISSET(dbenv, DB_ENV_DIRECT_DB) ? DB_DIRECT : 0),
+ 0, dbp->pgsize)) != 0)
+ return (ret);
+
+ /*
+ * We may need a per-thread mutex. Allocate it from the mpool
+ * region, there's supposed to be extra space there for that purpose.
+ */
+ if (LF_ISSET(DB_THREAD)) {
+ dbmp = dbenv->mp_handle;
+ if ((ret = __db_mutex_setup(dbenv, dbmp->reginfo, &dbp->mutexp,
+ MUTEX_ALLOC | MUTEX_THREAD)) != 0)
+ return (ret);
+ }
+
+ /*
+ * Set up a bookkeeping entry for this database in the log region,
+ * if such a region exists. Note that even if we're in recovery
+ * or a replication client, where we won't log registries, we'll
+ * still need an FNAME struct, so LOGGING_ON is the correct macro.
+ */
+ if (LOGGING_ON(dbenv) &&
+ (ret = __dbreg_setup(dbp, name, id)) != 0)
+ return (ret);
+
+ /*
+ * If we're actively logging and our caller isn't a recovery function
+ * that already did so, assign this dbp a log fileid.
+ */
+ if (DBENV_LOGGING(dbenv) && !F_ISSET(dbp, DB_AM_RECOVER) &&
+#if !defined(DEBUG_ROP)
+ !F_ISSET(dbp, DB_AM_RDONLY) &&
+#endif
+ (ret = __dbreg_new_id(dbp, txn)) != 0)
+ return (ret);
+
+ /*
+ * Insert ourselves into the DB_ENV's dblist. We allocate a
+ * unique ID to each {fileid, meta page number} pair, and to
+ * each temporary file (since they all have a zero fileid).
+ * This ID gives us something to use to tell which DB handles
+ * go with which databases in all the cursor adjustment
+ * routines, where we don't want to do a lot of ugly and
+ * expensive memcmps.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (maxid = 0, ldbp = LIST_FIRST(&dbenv->dblist);
+ ldbp != NULL; ldbp = LIST_NEXT(dbp, dblistlinks)) {
+ if (name != NULL &&
+ memcmp(ldbp->fileid, dbp->fileid, DB_FILE_ID_LEN) == 0 &&
+ ldbp->meta_pgno == dbp->meta_pgno)
+ break;
+ if (ldbp->adj_fileid > maxid)
+ maxid = ldbp->adj_fileid;
+ }
+
+ /*
+ * If ldbp is NULL, we didn't find a match, or we weren't
+ * really looking because name is NULL. Assign the dbp an
+ * adj_fileid one higher than the largest we found, and
+ * insert it at the head of the master dbp list.
+ *
+ * If ldbp is not NULL, it is a match for our dbp. Give dbp
+ * the same ID that ldbp has, and add it after ldbp so they're
+ * together in the list.
+ */
+ if (ldbp == NULL) {
+ dbp->adj_fileid = maxid + 1;
+ LIST_INSERT_HEAD(&dbenv->dblist, dbp, dblistlinks);
+ } else {
+ dbp->adj_fileid = ldbp->adj_fileid;
+ LIST_INSERT_AFTER(ldbp, dbp, dblistlinks);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ return (0);
+}
+
+/*
+ * __db_close --
+ * DB destructor.
+ *
+ * PUBLIC: int __db_close __P((DB *, u_int32_t));
+ */
+int
+__db_close(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /* Validate arguments, but as a DB handle destructor, we can't fail. */
+ if (flags != 0 && flags != DB_NOSYNC)
+ (void)__db_ferr(dbenv, "DB->close", 0);
+
+ return (__db_close_i(dbp, NULL, flags));
+}
+
+/*
+ * __db_close_i --
+ * Internal DB destructor.
+ *
+ * PUBLIC: int __db_close_i __P((DB *, DB_TXN *, u_int32_t));
+ */
+int
+__db_close_i(dbp, txn, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret, t_ret;
+
+ dbenv = dbp->dbenv;
+ ret = 0;
+
+ /*
+ * Validate arguments, but as a DB handle destructor, we can't fail.
+ *
+ * Check for consistent transaction usage -- ignore errors. Only
+ * internal callers specify transactions, so it's a serious problem
+ * if we get error messages.
+ */
+ if (txn != NULL)
+ (void)__db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0);
+
+ /* Refresh the structure and close any local environment. */
+ if ((t_ret = __db_refresh(dbp, txn, flags)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * Call the access specific close function.
+ *
+ * !!!
+ * Because of where these functions are called in the DB handle close
+ * process, these routines can't do anything that would dirty pages or
+ * otherwise affect closing down the database. Specifically, we can't
+ * abort and recover any of the information they control.
+ */
+ if ((t_ret = __ham_db_close(dbp)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = __bam_db_close(dbp)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = __qam_db_close(dbp)) != 0 && ret == 0)
+ ret = t_ret;
+
+ --dbenv->db_ref;
+ if (F_ISSET(dbenv, DB_ENV_DBLOCAL) && dbenv->db_ref == 0 &&
+ (t_ret = dbenv->close(dbenv, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Free the database handle. */
+ memset(dbp, CLEAR_BYTE, sizeof(*dbp));
+ __os_free(dbenv, dbp);
+
+ return (ret);
+}
+
+/*
+ * __db_refresh --
+ * Refresh the DB structure, releasing any allocated resources.
+ * This does most of the work of closing files now because refresh
+ * is what is used during abort processing (since we can't destroy
+ * the actual handle) and during abort processing, we may have a
+ * fully opened handle.
+ *
+ * PUBLIC: int __db_refresh __P((DB *, DB_TXN *, u_int32_t));
+ */
+int
+__db_refresh(dbp, txn, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t flags;
+{
+ DB *sdbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LOCKREQ lreq;
+ DB_MPOOL *dbmp;
+ int ret, t_ret;
+
+ ret = 0;
+
+ dbenv = dbp->dbenv;
+
+ /* If never opened, or not currently open, it's easy. */
+ if (!F_ISSET(dbp, DB_AM_OPEN_CALLED))
+ goto never_opened;
+
+ /*
+ * If we have any secondary indices, disassociate them from us.
+ * We don't bother with the mutex here; it only protects some
+ * of the ops that will make us core-dump mid-close anyway, and
+ * if you're trying to do something with a secondary *while* you're
+ * closing the primary, you deserve what you get. The disassociation
+ * is mostly done just so we can close primaries and secondaries in
+ * any order--but within one thread of control.
+ */
+ for (sdbp = LIST_FIRST(&dbp->s_secondaries);
+ sdbp != NULL; sdbp = LIST_NEXT(sdbp, s_links)) {
+ LIST_REMOVE(sdbp, s_links);
+ if ((t_ret = __db_disassociate(sdbp)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ /*
+ * Sync the underlying access method. Do before closing the cursors
+ * because DB->sync allocates cursors in order to write Recno backing
+ * source text files.
+ */
+ if (!LF_ISSET(DB_NOSYNC) && !F_ISSET(dbp, DB_AM_DISCARD) &&
+ (t_ret = dbp->sync(dbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * Go through the active cursors and call the cursor recycle routine,
+ * which resolves pending operations and moves the cursors onto the
+ * free list. Then, walk the free list and call the cursor destroy
+ * routine. Note that any failure on a close is considered "really
+ * bad" and we just break out of the loop and force forward.
+ */
+ while ((dbc = TAILQ_FIRST(&dbp->active_queue)) != NULL)
+ if ((t_ret = dbc->c_close(dbc)) != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ break;
+ }
+
+ while ((dbc = TAILQ_FIRST(&dbp->free_queue)) != NULL)
+ if ((t_ret = __db_c_destroy(dbc)) != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ break;
+ }
+
+ /*
+ * Close any outstanding join cursors. Join cursors destroy
+ * themselves on close and have no separate destroy routine.
+ */
+ while ((dbc = TAILQ_FIRST(&dbp->join_queue)) != NULL)
+ if ((t_ret = dbc->c_close(dbc)) != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ break;
+ }
+
+ /*
+ * Sync the memory pool, even though we've already called DB->sync,
+ * because closing cursors can dirty pages by deleting items they
+ * referenced.
+ */
+ if (!LF_ISSET(DB_NOSYNC) && !F_ISSET(dbp, DB_AM_DISCARD) &&
+ (t_ret = dbp->mpf->sync(dbp->mpf)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Close any handle we've been holding since the open. */
+ if (dbp->saved_open_fhp != NULL &&
+ F_ISSET(dbp->saved_open_fhp, DB_FH_VALID) &&
+ (t_ret = __os_closehandle(dbenv, dbp->saved_open_fhp)) != 0 &&
+ ret == 0)
+ ret = t_ret;
+
+never_opened:
+ /*
+ * We are not releasing the handle lock here because we're about
+ * to release all locks held by dbp->lid below. There are two
+ * ways that we can get in here with a handle_lock, but not a
+ * dbp->lid. The first is when our lid has been hijacked by a
+ * subdb. The second is when we are a Queue database in the midst
+ * of a rename. If the queue file hasn't actually been opened, we
+ * hijack the main dbp's locker id to do the open so we can get the
+ * extent files. In both cases, we needn't free the handle lock
+ * because it will be freed when the hijacked locker-id is freed.
+ */
+ DB_ASSERT(!LOCK_ISSET(dbp->handle_lock) ||
+ dbp->lid != DB_LOCK_INVALIDID ||
+ dbp->type == DB_QUEUE ||
+ F_ISSET(dbp, DB_AM_SUBDB));
+
+ if (dbp->lid != DB_LOCK_INVALIDID) {
+ /* We may have pending trade operations on this dbp. */
+ if (txn != NULL)
+ __txn_remlock(dbenv, txn, &dbp->handle_lock, dbp->lid);
+
+ /* We may be holding the handle lock; release it. */
+ lreq.op = DB_LOCK_PUT_ALL;
+ if ((t_ret = __lock_vec(dbenv,
+ dbp->lid, 0, &lreq, 1, NULL)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret =
+ dbenv->lock_id_free(dbenv, dbp->lid)) != 0 && ret == 0)
+ ret = t_ret;
+ dbp->lid = DB_LOCK_INVALIDID;
+ LOCK_INIT(dbp->handle_lock);
+ }
+
+ /* Discard the locker ID allocated as the fileid. */
+ if (F_ISSET(dbp, DB_AM_INMEM) &&
+ LOCKING_ON(dbenv) && (t_ret = dbenv->lock_id_free(
+ dbenv, *(u_int32_t *)dbp->fileid)) != 0 && ret == 0)
+ ret = t_ret;
+
+ dbp->type = DB_UNKNOWN;
+
+ /* Discard the thread mutex. */
+ if (dbp->mutexp != NULL) {
+ dbmp = dbenv->mp_handle;
+ __db_mutex_free(dbenv, dbmp->reginfo, dbp->mutexp);
+ dbp->mutexp = NULL;
+ }
+
+ /* Discard any memory used to store returned data. */
+ if (dbp->my_rskey.data != NULL)
+ __os_free(dbp->dbenv, dbp->my_rskey.data);
+ if (dbp->my_rkey.data != NULL)
+ __os_free(dbp->dbenv, dbp->my_rkey.data);
+ if (dbp->my_rdata.data != NULL)
+ __os_free(dbp->dbenv, dbp->my_rdata.data);
+
+ /* For safety's sake; we may refresh twice. */
+ memset(&dbp->my_rskey, 0, sizeof(DBT));
+ memset(&dbp->my_rkey, 0, sizeof(DBT));
+ memset(&dbp->my_rdata, 0, sizeof(DBT));
+
+ /*
+ * Remove this DB handle from the DB_ENV's dblist, if it's been added.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ if (dbp->dblistlinks.le_prev != NULL)
+ LIST_REMOVE(dbp, dblistlinks);
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+ dbp->dblistlinks.le_prev = NULL;
+
+ /* Close the memory pool file handle. */
+ if (dbp->mpf != NULL) {
+ if ((t_ret = dbp->mpf->close(dbp->mpf,
+ F_ISSET(dbp, DB_AM_DISCARD) ? DB_MPOOL_DISCARD : 0)) != 0 &&
+ ret == 0)
+ ret = t_ret;
+ dbp->mpf = NULL;
+ }
+
+ if (LOGGING_ON(dbp->dbenv)) {
+ /*
+ * Discard the log file id, if any. We want to log the close
+ * if and only if this is not a recovery dbp.
+ */
+ if (F_ISSET(dbp, DB_AM_RECOVER))
+ (void)__dbreg_revoke_id(dbp, 0);
+ else
+ (void)__dbreg_close_id(dbp, txn);
+
+ /* Discard the log FNAME. */
+ (void)__dbreg_teardown(dbp);
+ }
+
+ /* Clear out fields that normally get set during open. */
+ memset(dbp->fileid, 0, sizeof(dbp->fileid));
+ dbp->adj_fileid = 0;
+ dbp->meta_pgno = 0;
+ dbp->cur_lid = DB_LOCK_INVALIDID;
+ dbp->associate_lid = DB_LOCK_INVALIDID;
+ dbp->cl_id = 0;
+
+ /*
+ * If we are being refreshed with a txn specified, then we need
+ * to make sure that we clear out the lock handle field, because
+ * releasing all the locks for this transaction will release this
+ * lock and we don't want close to stumble upon this handle and
+ * try to close it.
+ */
+ if (txn != NULL)
+ LOCK_INIT(dbp->handle_lock);
+
+ F_CLR(dbp, DB_AM_DBM_ERROR);
+ F_CLR(dbp, DB_AM_DISCARD);
+ F_CLR(dbp, DB_AM_INMEM);
+ F_CLR(dbp, DB_AM_RECOVER);
+ F_CLR(dbp, DB_AM_OPEN_CALLED);
+ F_CLR(dbp, DB_AM_RDONLY);
+ F_CLR(dbp, DB_AM_SWAP);
+
+ return (ret);
+}
+
+/*
+ * __db_log_page
+ * Log a meta-data or root page during a subdatabase create operation.
+ *
+ * PUBLIC: int __db_log_page __P((DB *, DB_TXN *, DB_LSN *, db_pgno_t, PAGE *));
+ */
+int
+__db_log_page(dbp, txn, lsn, pgno, page)
+ DB *dbp;
+ DB_TXN *txn;
+ DB_LSN *lsn;
+ db_pgno_t pgno;
+ PAGE *page;
+{
+ DBT page_dbt;
+ DB_LSN new_lsn;
+ int ret;
+
+ if (!LOGGING_ON(dbp->dbenv) || txn == NULL)
+ return (0);
+
+ memset(&page_dbt, 0, sizeof(page_dbt));
+ page_dbt.size = dbp->pgsize;
+ page_dbt.data = page;
+
+ ret = __crdel_metasub_log(dbp, txn, &new_lsn, 0, pgno, &page_dbt, lsn);
+
+ if (ret == 0)
+ page->lsn = new_lsn;
+ return (ret);
+}
+
+/*
+ * __db_backup_name
+ * Create the backup file name for a given file.
+ *
+ * PUBLIC: int __db_backup_name __P((DB_ENV *,
+ * PUBLIC: const char *, DB_TXN *, char **));
+ */
+#undef BACKUP_PREFIX
+#define BACKUP_PREFIX "__db."
+
+#undef MAX_LSN_TO_TEXT
+#define MAX_LSN_TO_TEXT 17
+
+int
+__db_backup_name(dbenv, name, txn, backup)
+ DB_ENV *dbenv;
+ const char *name;
+ DB_TXN *txn;
+ char **backup;
+{
+ DB_LSN lsn;
+ size_t len;
+ int plen, ret;
+ char *p, *retp;
+
+ /*
+ * Create the name. Backup file names are in one of two forms:
+ *
+ * In a transactional env: __db.LSN(8).LSN(8)
+ * and
+ * in a non-transactional env: __db.FILENAME.
+ *
+ * If the transaction doesn't have a current LSN, we write
+ * a dummy log record to force it, so that we ensure that
+ * all tmp names are unique.
+ *
+ * In addition, the name passed may contain an env-relative path.
+ * In that case, put the __db. in the right place (in the last
+ * component of the pathname).
+ */
+ if (txn != NULL) {
+ if (IS_ZERO_LSN(txn->last_lsn)) {
+ /*
+ * Write dummy log record. The two choices for
+ * dummy log records are __db_noop_log and
+ * __db_debug_log; unfortunately __db_noop_log requires
+ * a valid dbp, and we aren't guaranteed to be able
+ * to pass one in here.
+ */
+ if ((ret = __db_debug_log(dbenv, txn, &lsn, 0,
+ NULL, 0, NULL, NULL, 0)) != 0)
+ return (ret);
+ } else
+ lsn = txn->last_lsn;
+ }
+
+ /*
+ * Part of the name may be a full path, so we need to make sure that
+ * we allocate enough space for it, even in the case where we don't
+ * use the entire filename for the backup name.
+ */
+ len = strlen(name) + strlen(BACKUP_PREFIX) + MAX_LSN_TO_TEXT;
+
+ if ((ret = __os_malloc(dbenv, len, &retp)) != 0)
+ return (ret);
+
+ /*
+ * There are four cases here:
+ * 1. simple path w/out transaction
+ * 2. simple path + transaction
+ * 3. multi-component path w/out transaction
+ * 4. multi-component path + transaction
+ */
+ if ((p = __db_rpath(name)) == NULL) {
+ if (txn == NULL) /* case 1 */
+ snprintf(retp, len, "%s%s.", BACKUP_PREFIX, name);
+ else /* case 2 */
+ snprintf(retp, len,
+ "%s%x.%x", BACKUP_PREFIX, lsn.file, lsn.offset);
+ } else {
+ plen = (int)(p - name) + 1;
+ p++;
+ if (txn == NULL) /* case 3 */
+ snprintf(retp, len,
+ "%.*s%s%s.", plen, name, BACKUP_PREFIX, p);
+ else /* case 4 */
+ snprintf(retp, len,
+ "%.*s%x.%x.", plen, name, lsn.file, lsn.offset);
+ }
+
+ *backup = retp;
+ return (0);
+}
+
+/*
+ * __dblist_get --
+ * Get the first element of dbenv->dblist with
+ * dbp->adj_fileid matching adjid.
+ *
+ * PUBLIC: DB *__dblist_get __P((DB_ENV *, u_int32_t));
+ */
+DB *
+__dblist_get(dbenv, adjid)
+ DB_ENV *dbenv;
+ u_int32_t adjid;
+{
+ DB *dbp;
+
+ for (dbp = LIST_FIRST(&dbenv->dblist);
+ dbp != NULL && dbp->adj_fileid != adjid;
+ dbp = LIST_NEXT(dbp, dblistlinks))
+ ;
+
+ return (dbp);
+}
+
+/*
+ * __db_disassociate --
+ * Destroy the association between a given secondary and its primary.
+ */
+static int
+__db_disassociate(sdbp)
+ DB *sdbp;
+{
+ DBC *dbc;
+ int ret, t_ret;
+
+ ret = 0;
+
+ sdbp->s_callback = NULL;
+ sdbp->s_primary = NULL;
+ sdbp->get = sdbp->stored_get;
+ sdbp->close = sdbp->stored_close;
+
+ /*
+ * Complain, but proceed, if we have any active cursors. (We're in
+ * the middle of a close, so there's really no turning back.)
+ */
+ if (sdbp->s_refcnt != 1 ||
+ TAILQ_FIRST(&sdbp->active_queue) != NULL ||
+ TAILQ_FIRST(&sdbp->join_queue) != NULL) {
+ __db_err(sdbp->dbenv,
+ "Closing a primary DB while a secondary DB has active cursors is unsafe");
+ ret = EINVAL;
+ }
+ sdbp->s_refcnt = 0;
+
+ while ((dbc = TAILQ_FIRST(&sdbp->free_queue)) != NULL)
+ if ((t_ret = __db_c_destroy(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ F_CLR(sdbp, DB_AM_SECONDARY);
+ return (ret);
+}
+
+#if CONFIG_TEST
+/*
+ * __db_testcopy
+ * Create a copy of all backup files and our "main" DB.
+ *
+ * PUBLIC: #if CONFIG_TEST
+ * PUBLIC: int __db_testcopy __P((DB_ENV *, DB *, const char *));
+ * PUBLIC: #endif
+ */
+int
+__db_testcopy(dbenv, dbp, name)
+ DB_ENV *dbenv;
+ DB *dbp;
+ const char *name;
+{
+ DB_MPOOLFILE *mpf;
+
+ DB_ASSERT(dbp != NULL || name != NULL);
+
+ if (name == NULL) {
+ mpf = dbp->mpf;
+ name = R_ADDR(mpf->dbmp->reginfo, mpf->mfp->path_off);
+ }
+
+ if (dbp != NULL && dbp->type == DB_QUEUE)
+ return (__qam_testdocopy(dbp, name));
+ else
+ return (__db_testdocopy(dbenv, name));
+}
+
+static int
+__qam_testdocopy(dbp, name)
+ DB *dbp;
+ const char *name;
+{
+ QUEUE_FILELIST *filelist, *fp;
+ char buf[256], *dir;
+ int ret;
+
+ filelist = NULL;
+ if ((ret = __db_testdocopy(dbp->dbenv, name)) != 0)
+ return (ret);
+ if (dbp->mpf != NULL &&
+ (ret = __qam_gen_filelist(dbp, &filelist)) != 0)
+ return (ret);
+
+ if (filelist == NULL)
+ return (0);
+ dir = ((QUEUE *)dbp->q_internal)->dir;
+ for (fp = filelist; fp->mpf != NULL; fp++) {
+ snprintf(buf, sizeof(buf),
+ QUEUE_EXTENT, dir, PATH_SEPARATOR[0], name, fp->id);
+ if ((ret = __db_testdocopy(dbp->dbenv, buf)) != 0)
+ return (ret);
+ }
+
+ __os_free(dbp->dbenv, filelist);
+ return (0);
+}
+
+/*
+ * __db_testdocopy
+ * Create a copy of all backup files and our "main" DB.
+ *
+ */
+static int
+__db_testdocopy(dbenv, name)
+ DB_ENV *dbenv;
+ const char *name;
+{
+ size_t len;
+ int dircnt, i, ret;
+ char **namesp, *backup, *copy, *dir, *p, *real_name;
+ real_name = NULL;
+ /* Get the real backing file name. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
+ return (ret);
+
+ copy = backup = NULL;
+ namesp = NULL;
+
+ /*
+ * Maximum size of file, including adding a ".afterop".
+ */
+ len = strlen(real_name) + strlen(BACKUP_PREFIX) + MAX_LSN_TO_TEXT + 9;
+
+ if ((ret = __os_malloc(dbenv, len, &copy)) != 0)
+ goto out;
+
+ if ((ret = __os_malloc(dbenv, len, &backup)) != 0)
+ goto out;
+
+ /*
+ * First copy the file itself.
+ */
+ snprintf(copy, len, "%s.afterop", real_name);
+ __db_makecopy(real_name, copy);
+
+ if ((ret = __os_strdup(dbenv, real_name, &dir)) != 0)
+ goto out;
+ __os_free(dbenv, real_name);
+ real_name = NULL;
+ /*
+ * Create the name. Backup file names are of the form:
+ *
+ * __db.name.0x[lsn-file].0x[lsn-offset]
+ *
+ * which guarantees uniqueness. We want to look for the
+ * backup name, followed by a '.0x' (so that if they have
+ * files named, say, 'a' and 'abc' we won't match 'abc' when
+ * looking for 'a'.
+ */
+ snprintf(backup, len, "%s%s.0x", BACKUP_PREFIX, name);
+
+ /*
+ * We need the directory path to do the __os_dirlist.
+ */
+ p = __db_rpath(dir);
+ if (p != NULL)
+ *p = '\0';
+ ret = __os_dirlist(dbenv, dir, &namesp, &dircnt);
+#if DIAGNOSTIC
+ /*
+ * XXX
+ * To get the memory guard code to work because it uses strlen and we
+ * just moved the end of the string somewhere sooner. This causes the
+ * guard code to fail because it looks at one byte past the end of the
+ * string.
+ */
+ *p = '/';
+#endif
+ __os_free(dbenv, dir);
+ if (ret != 0)
+ goto out;
+ for (i = 0; i < dircnt; i++) {
+ /*
+ * Need to check if it is a backup file for this.
+ * No idea what namesp[i] may be or how long, so
+ * must use strncmp and not memcmp. We don't want
+ * to use strcmp either because we are only matching
+ * the first part of the real file's name. We don't
+ * know its LSN's.
+ */
+ if (strncmp(namesp[i], backup, strlen(backup)) == 0) {
+ if ((ret = __db_appname(dbenv, DB_APP_DATA,
+ namesp[i], 0, NULL, &real_name)) != 0)
+ goto out;
+
+ /*
+ * This should not happen. Check that old
+ * .afterop files aren't around.
+ * If so, just move on.
+ */
+ if (strstr(real_name, ".afterop") != NULL) {
+ __os_free(dbenv, real_name);
+ real_name = NULL;
+ continue;
+ }
+ snprintf(copy, len, "%s.afterop", real_name);
+ __db_makecopy(real_name, copy);
+ __os_free(dbenv, real_name);
+ real_name = NULL;
+ }
+ }
+out:
+ if (backup != NULL)
+ __os_free(dbenv, backup);
+ if (copy != NULL)
+ __os_free(dbenv, copy);
+ if (namesp != NULL)
+ __os_dirfree(dbenv, namesp, dircnt);
+ if (real_name != NULL)
+ __os_free(dbenv, real_name);
+ return (ret);
+}
+
+static void
+__db_makecopy(src, dest)
+ const char *src, *dest;
+{
+ DB_FH rfh, wfh;
+ size_t rcnt, wcnt;
+ char *buf;
+
+ memset(&rfh, 0, sizeof(rfh));
+ memset(&wfh, 0, sizeof(wfh));
+
+ if (__os_malloc(NULL, 1024, &buf) != 0)
+ return;
+
+ if (__os_open(NULL,
+ src, DB_OSO_RDONLY, __db_omode("rw----"), &rfh) != 0)
+ goto err;
+ if (__os_open(NULL, dest,
+ DB_OSO_CREATE | DB_OSO_TRUNC, __db_omode("rw----"), &wfh) != 0)
+ goto err;
+
+ for (;;)
+ if (__os_read(NULL, &rfh, buf, 1024, &rcnt) < 0 || rcnt == 0 ||
+ __os_write(NULL, &wfh, buf, rcnt, &wcnt) < 0)
+ break;
+
+err: __os_free(NULL, buf);
+ if (F_ISSET(&rfh, DB_FH_VALID))
+ __os_closehandle(NULL, &rfh);
+ if (F_ISSET(&wfh, DB_FH_VALID))
+ __os_closehandle(NULL, &wfh);
+}
+#endif
diff --git a/libdb/db/db.src b/libdb/db/db.src
new file mode 100644
index 0000000..cec58a6
--- /dev/null
+++ b/libdb/db/db.src
@@ -0,0 +1,195 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+PREFIX __db
+DBPRIVATE
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/txn.h"
+INCLUDE
+
+/*
+ * addrem -- Add or remove an entry from a duplicate page.
+ *
+ * opcode: identifies if this is an add or delete.
+ * fileid: file identifier of the file being modified.
+ * pgno: duplicate page number.
+ * indx: location at which to insert or delete.
+ * nbytes: number of bytes added/removed to/from the page.
+ * hdr: header for the data item.
+ * dbt: data that is deleted or is to be added.
+ * pagelsn: former lsn of the page.
+ *
+ * If the hdr was NULL then, the dbt is a regular B_KEYDATA.
+ * If the dbt was NULL then the hdr is a complete item to be
+ * pasted on the page.
+ */
+BEGIN addrem 41
+ARG opcode u_int32_t lu
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+ARG indx u_int32_t lu
+ARG nbytes u_int32_t lu
+PGDBT hdr DBT s
+DBT dbt DBT s
+POINTER pagelsn DB_LSN * lu
+END
+
+/*
+ * big -- Handles addition and deletion of big key/data items.
+ *
+ * opcode: identifies get/put.
+ * fileid: file identifier of the file being modified.
+ * pgno: page onto which data is being added/removed.
+ * prev_pgno: the page before the one we are logging.
+ * next_pgno: the page after the one we are logging.
+ * dbt: data being written onto the page.
+ * pagelsn: former lsn of the orig_page.
+ * prevlsn: former lsn of the prev_pgno.
+ * nextlsn: former lsn of the next_pgno. This is not currently used, but
+ * may be used later if we actually do overwrites of big key/
+ * data items in place.
+ */
+BEGIN big 43
+ARG opcode u_int32_t lu
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+WRLOCKNZ prev_pgno db_pgno_t lu
+WRLOCKNZ next_pgno db_pgno_t lu
+DBT dbt DBT s
+POINTER pagelsn DB_LSN * lu
+POINTER prevlsn DB_LSN * lu
+POINTER nextlsn DB_LSN * lu
+END
+
+/*
+ * ovref -- Handles increment/decrement of overflow page reference count.
+ *
+ * fileid: identifies the file being modified.
+ * pgno: page number whose ref count is being incremented/decremented.
+ * adjust: the adjustment being made.
+ * lsn: the page's original lsn.
+ */
+BEGIN ovref 44
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+ARG adjust int32_t ld
+POINTER lsn DB_LSN * lu
+END
+
+/*
+ * relink -- Handles relinking around a page.
+ *
+ * opcode: indicates if this is an addpage or delete page
+ * pgno: the page being changed.
+ * lsn the page's original lsn.
+ * prev: the previous page.
+ * lsn_prev: the previous page's original lsn.
+ * next: the next page.
+ * lsn_next: the previous page's original lsn.
+ */
+BEGIN relink 45
+ARG opcode u_int32_t lu
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+POINTER lsn DB_LSN * lu
+WRLOCKNZ prev db_pgno_t lu
+POINTER lsn_prev DB_LSN * lu
+WRLOCKNZ next db_pgno_t lu
+POINTER lsn_next DB_LSN * lu
+END
+
+/*
+ * Debug -- log an operation upon entering an access method.
+ * op: Operation (cursor, c_close, c_get, c_put, c_del,
+ * get, put, delete).
+ * fileid: identifies the file being acted upon.
+ * key: key paramater
+ * data: data parameter
+ * flags: flags parameter
+ */
+BEGIN debug 47
+DBT op DBT s
+ARG fileid int32_t ld
+DBT key DBT s
+DBT data DBT s
+ARG arg_flags u_int32_t lu
+END
+
+/*
+ * noop -- do nothing, but get an LSN.
+ */
+BEGIN noop 48
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+POINTER prevlsn DB_LSN * lu
+END
+
+/*
+ * pg_alloc: used to record allocating a new page.
+ *
+ * meta_lsn: the meta-data page's original lsn.
+ * meta_pgno the meta-data page number.
+ * page_lsn: the allocated page's original lsn.
+ * pgno: the page allocated.
+ * ptype: the type of the page allocated.
+ * next: the next page on the free list.
+ */
+BEGIN pg_alloc 49
+DB fileid int32_t ld
+POINTER meta_lsn DB_LSN * lu
+WRLOCK meta_pgno db_pgno_t lu
+POINTER page_lsn DB_LSN * lu
+WRLOCK pgno db_pgno_t lu
+ARG ptype u_int32_t lu
+ARG next db_pgno_t lu
+END
+
+/*
+ * pg_free: used to record freeing a page.
+ *
+ * pgno: the page being freed.
+ * meta_lsn: the meta-data page's original lsn.
+ * meta_pgno: the meta-data page number.
+ * header: the header from the free'd page.
+ * next: the previous next pointer on the metadata page.
+ */
+BEGIN pg_free 50
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+POINTER meta_lsn DB_LSN * lu
+WRLOCK meta_pgno db_pgno_t lu
+PGDBT header DBT s
+ARG next db_pgno_t lu
+END
+
+/*
+ * cksum --
+ * This log record is written when we're unable to checksum a page,
+ * before returning DB_RUNRECOVERY. This log record causes normal
+ * recovery to itself return DB_RUNRECOVERY, as only catastrophic
+ * recovery can fix things.
+ */
+BEGIN cksum 51
+END
diff --git a/libdb/db/db_am.c b/libdb/db/db_am.c
new file mode 100644
index 0000000..3f74ff1
--- /dev/null
+++ b/libdb/db/db_am.c
@@ -0,0 +1,1271 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/mp.h"
+#include "dbinc/qam.h"
+
+static int __db_append_primary __P((DBC *, DBT *, DBT *));
+static int __db_secondary_get __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+static int __db_secondary_close __P((DB *, u_int32_t));
+
+#ifdef DEBUG
+static int __db_cprint_item __P((DBC *));
+#endif
+
+/*
+ * __db_cursor --
+ * Allocate and return a cursor.
+ *
+ * PUBLIC: int __db_cursor __P((DB *, DB_TXN *, DBC **, u_int32_t));
+ */
+int
+__db_cursor(dbp, txn, dbcp, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBC **dbcp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DBC *dbc;
+ db_lockmode_t mode;
+ u_int32_t op;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ PANIC_CHECK(dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->cursor");
+
+ /* Validate arguments. */
+ if ((ret = __db_cursorchk(dbp, flags)) != 0)
+ return (ret);
+
+ /*
+ * Check for consistent transaction usage. For now, assume that
+ * this cursor might be used for read operations only (in which
+ * case it may not require a txn). We'll check more stringently
+ * in c_del and c_put. (Note that this all means that the
+ * read-op txn tests have to be a subset of the write-op ones.)
+ */
+ if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 1)) != 0)
+ return (ret);
+
+ if ((ret = __db_icursor(dbp,
+ txn, dbp->type, PGNO_INVALID, 0, DB_LOCK_INVALIDID, dbcp)) != 0)
+ return (ret);
+ dbc = *dbcp;
+
+ /*
+ * If this is CDB, do all the locking in the interface, which is
+ * right here.
+ */
+ if (CDB_LOCKING(dbenv)) {
+ op = LF_ISSET(DB_OPFLAGS_MASK);
+ mode = (op == DB_WRITELOCK) ? DB_LOCK_WRITE :
+ ((op == DB_WRITECURSOR) ? DB_LOCK_IWRITE : DB_LOCK_READ);
+ if ((ret = dbenv->lock_get(dbenv, dbc->locker, 0,
+ &dbc->lock_dbt, mode, &dbc->mylock)) != 0) {
+ (void)__db_c_close(dbc);
+ return (ret);
+ }
+ if (op == DB_WRITECURSOR)
+ F_SET(dbc, DBC_WRITECURSOR);
+ if (op == DB_WRITELOCK)
+ F_SET(dbc, DBC_WRITER);
+ }
+
+ if (LF_ISSET(DB_DIRTY_READ) ||
+ (txn != NULL && F_ISSET(txn, TXN_DIRTY_READ)))
+ F_SET(dbc, DBC_DIRTY_READ);
+ return (0);
+}
+
+/*
+ * __db_icursor --
+ * Internal version of __db_cursor. If dbcp is
+ * non-NULL it is assumed to point to an area to
+ * initialize as a cursor.
+ *
+ * PUBLIC: int __db_icursor
+ * PUBLIC: __P((DB *, DB_TXN *, DBTYPE, db_pgno_t, int, u_int32_t, DBC **));
+ */
+int
+__db_icursor(dbp, txn, dbtype, root, is_opd, lockerid, dbcp)
+ DB *dbp;
+ DB_TXN *txn;
+ DBTYPE dbtype;
+ db_pgno_t root;
+ int is_opd;
+ u_int32_t lockerid;
+ DBC **dbcp;
+{
+ DBC *dbc, *adbc;
+ DBC_INTERNAL *cp;
+ DB_ENV *dbenv;
+ int allocated, ret;
+
+ dbenv = dbp->dbenv;
+ allocated = 0;
+
+ /*
+ * Take one from the free list if it's available. Take only the
+ * right type. With off page dups we may have different kinds
+ * of cursors on the queue for a single database.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&dbp->free_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links))
+ if (dbtype == dbc->dbtype) {
+ TAILQ_REMOVE(&dbp->free_queue, dbc, links);
+ F_CLR(dbc, ~DBC_OWN_LID);
+ break;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+
+ if (dbc == NULL) {
+ if ((ret = __os_calloc(dbp->dbenv, 1, sizeof(DBC), &dbc)) != 0)
+ return (ret);
+ allocated = 1;
+ dbc->flags = 0;
+
+ dbc->dbp = dbp;
+
+ /* Set up locking information. */
+ if (LOCKING_ON(dbenv)) {
+ /*
+ * If we are not threaded, then there is no need to
+ * create new locker ids. We know that no one else
+ * is running concurrently using this DB, so we can
+ * take a peek at any cursors on the active queue.
+ */
+ if (!DB_IS_THREADED(dbp) &&
+ (adbc = TAILQ_FIRST(&dbp->active_queue)) != NULL)
+ dbc->lid = adbc->lid;
+ else {
+ if ((ret =
+ dbenv->lock_id(dbenv, &dbc->lid)) != 0)
+ goto err;
+ F_SET(dbc, DBC_OWN_LID);
+ }
+
+ /*
+ * In CDB, secondary indices should share a lock file
+ * ID with the primary; otherwise we're susceptible to
+ * deadlocks. We also use __db_icursor rather
+ * than sdbp->cursor to create secondary update
+ * cursors in c_put and c_del; these won't
+ * acquire a new lock.
+ *
+ * !!!
+ * Since this is in the one-time cursor allocation
+ * code, we need to be sure to destroy, not just
+ * close, all cursors in the secondary when we
+ * associate.
+ */
+ if (CDB_LOCKING(dbp->dbenv) &&
+ F_ISSET(dbp, DB_AM_SECONDARY))
+ memcpy(dbc->lock.fileid,
+ dbp->s_primary->fileid, DB_FILE_ID_LEN);
+ else
+ memcpy(dbc->lock.fileid,
+ dbp->fileid, DB_FILE_ID_LEN);
+
+ if (CDB_LOCKING(dbenv)) {
+ if (F_ISSET(dbenv, DB_ENV_CDB_ALLDB)) {
+ /*
+ * If we are doing a single lock per
+ * environment, set up the global
+ * lock object just like we do to
+ * single thread creates.
+ */
+ DB_ASSERT(sizeof(db_pgno_t) ==
+ sizeof(u_int32_t));
+ dbc->lock_dbt.size = sizeof(u_int32_t);
+ dbc->lock_dbt.data = &dbc->lock.pgno;
+ dbc->lock.pgno = 0;
+ } else {
+ dbc->lock_dbt.size = DB_FILE_ID_LEN;
+ dbc->lock_dbt.data = dbc->lock.fileid;
+ }
+ } else {
+ dbc->lock.type = DB_PAGE_LOCK;
+ dbc->lock_dbt.size = sizeof(dbc->lock);
+ dbc->lock_dbt.data = &dbc->lock;
+ }
+ }
+ /* Init the DBC internal structure. */
+ switch (dbtype) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = __bam_c_init(dbc, dbtype)) != 0)
+ goto err;
+ break;
+ case DB_HASH:
+ if ((ret = __ham_c_init(dbc)) != 0)
+ goto err;
+ break;
+ case DB_QUEUE:
+ if ((ret = __qam_c_init(dbc)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_type(dbp->dbenv,
+ "__db_icursor", dbtype);
+ goto err;
+ }
+
+ cp = dbc->internal;
+ }
+
+ /* Refresh the DBC structure. */
+ dbc->dbtype = dbtype;
+ RESET_RET_MEM(dbc);
+
+ if ((dbc->txn = txn) == NULL) {
+ /*
+ * There are certain cases in which we want to create a
+ * new cursor with a particular locker ID that is known
+ * to be the same as (and thus not conflict with) an
+ * open cursor.
+ *
+ * The most obvious case is cursor duplication; when we
+ * call DBC->c_dup or __db_c_idup, we want to use the original
+ * cursor's locker ID.
+ *
+ * Another case is when updating secondary indices. Standard
+ * CDB locking would mean that we might block ourself: we need
+ * to open an update cursor in the secondary while an update
+ * cursor in the primary is open, and when the secondary and
+ * primary are subdatabases or we're using env-wide locking,
+ * this is disastrous.
+ *
+ * In these cases, our caller will pass a nonzero locker ID
+ * into this function. Use this locker ID instead of dbc->lid
+ * as the locker ID for our new cursor.
+ */
+ if (lockerid != DB_LOCK_INVALIDID)
+ dbc->locker = lockerid;
+ else
+ dbc->locker = dbc->lid;
+ } else {
+ dbc->locker = txn->txnid;
+ txn->cursors++;
+ }
+
+ /*
+ * These fields change when we are used as a secondary index, so
+ * if the DB is a secondary, make sure they're set properly just
+ * in case we opened some cursors before we were associated.
+ *
+ * __db_c_get is used by all access methods, so this should be safe.
+ */
+ if (F_ISSET(dbp, DB_AM_SECONDARY))
+ dbc->c_get = __db_c_secondary_get;
+
+ if (is_opd)
+ F_SET(dbc, DBC_OPD);
+ if (F_ISSET(dbp, DB_AM_RECOVER))
+ F_SET(dbc, DBC_RECOVER);
+ if (F_ISSET(dbp, DB_AM_COMPENSATE))
+ F_SET(dbc, DBC_COMPENSATE);
+
+ /* Refresh the DBC internal structure. */
+ cp = dbc->internal;
+ cp->opd = NULL;
+
+ cp->indx = 0;
+ cp->page = NULL;
+ cp->pgno = PGNO_INVALID;
+ cp->root = root;
+
+ switch (dbtype) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = __bam_c_refresh(dbc)) != 0)
+ goto err;
+ break;
+ case DB_HASH:
+ case DB_QUEUE:
+ break;
+ default:
+ ret = __db_unknown_type(dbp->dbenv, "__db_icursor", dbp->type);
+ goto err;
+ }
+
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ TAILQ_INSERT_TAIL(&dbp->active_queue, dbc, links);
+ F_SET(dbc, DBC_ACTIVE);
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+
+ *dbcp = dbc;
+ return (0);
+
+err: if (allocated)
+ __os_free(dbp->dbenv, dbc);
+ return (ret);
+}
+
+#ifdef DEBUG
+/*
+ * __db_cprint --
+ * Display the cursor active and free queues.
+ *
+ * PUBLIC: int __db_cprint __P((DB *));
+ */
+int
+__db_cprint(dbp)
+ DB *dbp;
+{
+ DBC *dbc;
+ int ret, t_ret;
+
+ ret = 0;
+ MUTEX_THREAD_LOCK(dbp->dbenv, dbp->mutexp);
+ fprintf(stderr, "Active queue:\n");
+ for (dbc = TAILQ_FIRST(&dbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links))
+ if ((t_ret = __db_cprint_item(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ fprintf(stderr, "Free queue:\n");
+ for (dbc = TAILQ_FIRST(&dbp->free_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links))
+ if ((t_ret = __db_cprint_item(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+
+ return (ret);
+}
+
+static
+int __db_cprint_item(dbc)
+ DBC *dbc;
+{
+ static const FN fn[] = {
+ { DBC_ACTIVE, "active" },
+ { DBC_COMPENSATE, "compensate" },
+ { DBC_OPD, "off-page-dup" },
+ { DBC_RECOVER, "recover" },
+ { DBC_RMW, "read-modify-write" },
+ { DBC_TRANSIENT, "transient" },
+ { DBC_WRITECURSOR, "write cursor" },
+ { DBC_WRITEDUP, "internally dup'ed write cursor" },
+ { DBC_WRITER, "short-term write cursor" },
+ { 0, NULL }
+ };
+ DB *dbp;
+ DBC_INTERNAL *cp;
+ const char *s;
+
+ dbp = dbc->dbp;
+ cp = dbc->internal;
+
+ s = __db_dbtype_to_string(dbc->dbtype);
+ if (strcmp(s, "UNKNOWN TYPE") == 0) {
+ DB_ASSERT(0);
+ return (1);
+ }
+ fprintf(stderr, "%s/%#0lx: opd: %#0lx\n",
+ s, P_TO_ULONG(dbc), P_TO_ULONG(cp->opd));
+
+ fprintf(stderr, "\ttxn: %#0lx lid: %lu locker: %lu\n",
+ P_TO_ULONG(dbc->txn), (u_long)dbc->lid, (u_long)dbc->locker);
+
+ fprintf(stderr, "\troot: %lu page/index: %lu/%lu",
+ (u_long)cp->root, (u_long)cp->pgno, (u_long)cp->indx);
+
+ __db_prflags(dbc->flags, fn, stderr);
+ fprintf(stderr, "\n");
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ __bam_cprint(dbc);
+ break;
+ case DB_HASH:
+ __ham_cprint(dbc);
+ break;
+ default:
+ break;
+ }
+ return (0);
+}
+#endif /* DEBUG */
+
+/*
+ * db_fd --
+ * Return a file descriptor for flock'ing.
+ *
+ * PUBLIC: int __db_fd __P((DB *, int *));
+ */
+int
+__db_fd(dbp, fdp)
+ DB *dbp;
+ int *fdp;
+{
+ DB_FH *fhp;
+ int ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->fd");
+
+ /*
+ * XXX
+ * Truly spectacular layering violation.
+ */
+ if ((ret = __mp_xxx_fh(dbp->mpf, &fhp)) != 0)
+ return (ret);
+
+ if (F_ISSET(fhp, DB_FH_VALID)) {
+ *fdp = fhp->fd;
+ return (0);
+ } else {
+ *fdp = -1;
+ __db_err(dbp->dbenv, "DB does not have a valid file handle");
+ return (ENOENT);
+ }
+}
+
+/*
+ * __db_get --
+ * Return a key/data pair.
+ *
+ * PUBLIC: int __db_get __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ */
+int
+__db_get(dbp, txn, key, data, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ int mode, ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->get");
+
+ if ((ret = __db_getchk(dbp, key, data, flags)) != 0)
+ return (ret);
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 1)) != 0)
+ return (ret);
+
+ mode = 0;
+ if (LF_ISSET(DB_DIRTY_READ)) {
+ mode = DB_DIRTY_READ;
+ LF_CLR(DB_DIRTY_READ);
+ }
+ else if (flags == DB_CONSUME || flags == DB_CONSUME_WAIT)
+ mode = DB_WRITELOCK;
+ if ((ret = dbp->cursor(dbp, txn, &dbc, mode)) != 0)
+ return (ret);
+
+ DEBUG_LREAD(dbc, txn, "__db_get", key, NULL, flags);
+
+ /*
+ * The DBC_TRANSIENT flag indicates that we're just doing a
+ * single operation with this cursor, and that in case of
+ * error we don't need to restore it to its old position--we're
+ * going to close it right away. Thus, we can perform the get
+ * without duplicating the cursor, saving some cycles in this
+ * common case.
+ *
+ * SET_RET_MEM indicates that if key and/or data have no DBT
+ * flags set and DB manages the returned-data memory, that memory
+ * will belong to this handle, not to the underlying cursor.
+ */
+ F_SET(dbc, DBC_TRANSIENT);
+ SET_RET_MEM(dbc, dbp);
+
+ if (LF_ISSET(~(DB_RMW | DB_MULTIPLE)) == 0)
+ LF_SET(DB_SET);
+ ret = dbc->c_get(dbc, key, data, flags);
+
+ if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_put --
+ * Store a key/data pair.
+ *
+ * PUBLIC: int __db_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ */
+int
+__db_put(dbp, txn, key, data, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ DBT tdata;
+ DB_ENV *dbenv;
+ int ret, t_ret, txn_local;
+
+ dbc = NULL;
+ dbenv = dbp->dbenv;
+ txn_local = 0;
+
+ PANIC_CHECK(dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->put");
+
+ /* Validate arguments. */
+ if ((ret = __db_putchk(dbp, key, data,
+ flags, F_ISSET(dbp, DB_AM_DUP) || F_ISSET(key, DB_DBT_DUPOK))) != 0)
+ return (ret);
+
+ /* Create local transaction as necessary. */
+ if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+ if ((ret = __db_txn_auto(dbp, &txn)) != 0)
+ return (ret);
+ txn_local = 1;
+ LF_CLR(DB_AUTO_COMMIT);
+ }
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0)) != 0)
+ goto err;
+
+ if ((ret = dbp->cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0)
+ goto err;
+
+ DEBUG_LWRITE(dbc, txn, "db_put", key, data, flags);
+
+ SET_RET_MEM(dbc, dbp);
+
+ /*
+ * See the comment in __db_get().
+ *
+ * Note that the c_get in the DB_NOOVERWRITE case is safe to
+ * do with this flag set; if it errors in any way other than
+ * DB_NOTFOUND, we're going to close the cursor without doing
+ * anything else, and if it returns DB_NOTFOUND then it's safe
+ * to do a c_put(DB_KEYLAST) even if an access method moved the
+ * cursor, since that's not position-dependent.
+ */
+ F_SET(dbc, DBC_TRANSIENT);
+
+ switch (flags) {
+ case DB_APPEND:
+ /*
+ * If there is an append callback, the value stored in
+ * data->data may be replaced and then freed. To avoid
+ * passing a freed pointer back to the user, just operate
+ * on a copy of the data DBT.
+ */
+ tdata = *data;
+
+ /*
+ * Append isn't a normal put operation; call the appropriate
+ * access method's append function.
+ */
+ switch (dbp->type) {
+ case DB_QUEUE:
+ if ((ret = __qam_append(dbc, key, &tdata)) != 0)
+ goto err;
+ break;
+ case DB_RECNO:
+ if ((ret = __ram_append(dbc, key, &tdata)) != 0)
+ goto err;
+ break;
+ default:
+ /* The interface should prevent this. */
+ DB_ASSERT(0);
+ ret = __db_ferr(dbenv, "__db_put", flags);
+ goto err;
+ }
+
+ /*
+ * Secondary indices: since we've returned zero from
+ * an append function, we've just put a record, and done
+ * so outside __db_c_put. We know we're not a secondary--
+ * the interface prevents puts on them--but we may be a
+ * primary. If so, update our secondary indices
+ * appropriately.
+ */
+ DB_ASSERT(!F_ISSET(dbp, DB_AM_SECONDARY));
+
+ if (LIST_FIRST(&dbp->s_secondaries) != NULL)
+ ret = __db_append_primary(dbc, key, &tdata);
+
+ /*
+ * The append callback, if one exists, may have allocated
+ * a new tdata.data buffer. If so, free it.
+ */
+ FREE_IF_NEEDED(dbp, &tdata);
+
+ /* No need for a cursor put; we're done. */
+ goto err;
+ case DB_NOOVERWRITE:
+ flags = 0;
+ /*
+ * Set DB_DBT_USERMEM, this might be a threaded application and
+ * the flags checking will catch us. We don't want the actual
+ * data, so request a partial of length 0.
+ */
+ memset(&tdata, 0, sizeof(tdata));
+ F_SET(&tdata, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+
+ /*
+ * If we're doing page-level locking, set the read-modify-write
+ * flag, we're going to overwrite immediately.
+ */
+ if ((ret = dbc->c_get(dbc, key, &tdata,
+ DB_SET | (STD_LOCKING(dbc) ? DB_RMW : 0))) == 0)
+ ret = DB_KEYEXIST;
+ else if (ret == DB_NOTFOUND || ret == DB_KEYEMPTY)
+ ret = 0;
+ break;
+ default:
+ /* Fall through to normal cursor put. */
+ break;
+ }
+ if (ret == 0)
+ ret = dbc->c_put(dbc,
+ key, data, flags == 0 ? DB_KEYLAST : flags);
+
+err: /* Close the cursor. */
+ if (dbc != NULL && (t_ret = __db_c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Commit for DB_AUTO_COMMIT. */
+ if (txn_local) {
+ if (ret == 0)
+ ret = txn->commit(txn, 0);
+ else
+ if ((t_ret = txn->abort(txn)) != 0)
+ ret = __db_panic(dbenv, t_ret);
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_delete --
+ * Delete the items referenced by a key.
+ *
+ * PUBLIC: int __db_delete __P((DB *, DB_TXN *, DBT *, u_int32_t));
+ */
+int
+__db_delete(dbp, txn, key, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ DBT data, lkey;
+ DB_ENV *dbenv;
+ u_int32_t f_init, f_next;
+ int ret, t_ret, txn_local;
+
+ dbc = NULL;
+ dbenv = dbp->dbenv;
+ txn_local = 0;
+
+ PANIC_CHECK(dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->del");
+
+ /* Check for invalid flags. */
+ if ((ret = __db_delchk(dbp, key, flags)) != 0)
+ return (ret);
+
+ /* Create local transaction as necessary. */
+ if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+ if ((ret = __db_txn_auto(dbp, &txn)) != 0)
+ return (ret);
+ txn_local = 1;
+ LF_CLR(DB_AUTO_COMMIT);
+ }
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0)) != 0)
+ goto err;
+
+ /* Allocate a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0)
+ goto err;
+
+ DEBUG_LWRITE(dbc, txn, "db_delete", key, NULL, flags);
+
+ /*
+ * Walk a cursor through the key/data pairs, deleting as we go. Set
+ * the DB_DBT_USERMEM flag, as this might be a threaded application
+ * and the flags checking will catch us. We don't actually want the
+ * keys or data, so request a partial of length 0.
+ */
+ memset(&lkey, 0, sizeof(lkey));
+ F_SET(&lkey, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+ memset(&data, 0, sizeof(data));
+ F_SET(&data, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+
+ /*
+ * If locking (and we haven't already acquired CDB locks), set the
+ * read-modify-write flag.
+ */
+ f_init = DB_SET;
+ f_next = DB_NEXT_DUP;
+ if (STD_LOCKING(dbc)) {
+ f_init |= DB_RMW;
+ f_next |= DB_RMW;
+ }
+
+ /* Walk through the set of key/data pairs, deleting as we go. */
+ if ((ret = dbc->c_get(dbc, key, &data, f_init)) != 0)
+ goto err;
+
+ /*
+ * Hash permits an optimization in DB->del: since on-page
+ * duplicates are stored in a single HKEYDATA structure, it's
+ * possible to delete an entire set of them at once, and as
+ * the HKEYDATA has to be rebuilt and re-put each time it
+ * changes, this is much faster than deleting the duplicates
+ * one by one. Thus, if we're not pointing at an off-page
+ * duplicate set, and we're not using secondary indices (in
+ * which case we'd have to examine the items one by one anyway),
+ * let hash do this "quick delete".
+ *
+ * !!!
+ * Note that this is the only application-executed delete call in
+ * Berkeley DB that does not go through the __db_c_del function.
+ * If anything other than the delete itself (like a secondary index
+ * update) has to happen there in a particular situation, the
+ * conditions here should be modified not to call __ham_quick_delete.
+ * The ordinary AM-independent alternative will work just fine with
+ * a hash; it'll just be slower.
+ */
+ if (dbp->type == DB_HASH) {
+ if (LIST_FIRST(&dbp->s_secondaries) == NULL &&
+ !F_ISSET(dbp, DB_AM_SECONDARY) &&
+ dbc->internal->opd == NULL) {
+ ret = __ham_quick_delete(dbc);
+ goto err;
+ }
+ }
+
+ for (;;) {
+ if ((ret = dbc->c_del(dbc, 0)) != 0)
+ goto err;
+ if ((ret = dbc->c_get(dbc, &lkey, &data, f_next)) != 0) {
+ if (ret == DB_NOTFOUND) {
+ ret = 0;
+ break;
+ }
+ goto err;
+ }
+ }
+
+err: /* Discard the cursor. */
+ if (dbc != NULL && (t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Commit for DB_AUTO_COMMIT. */
+ if (txn_local) {
+ if (ret == 0)
+ ret = txn->commit(txn, 0);
+ else
+ if ((t_ret = txn->abort(txn)) != 0)
+ ret = __db_panic(dbenv, t_ret);
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_sync --
+ * Flush the database cache.
+ *
+ * PUBLIC: int __db_sync __P((DB *, u_int32_t));
+ */
+int
+__db_sync(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ int ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->sync");
+
+ if ((ret = __db_syncchk(dbp, flags)) != 0)
+ return (ret);
+
+ /* Read-only trees never need to be sync'd. */
+ if (F_ISSET(dbp, DB_AM_RDONLY))
+ return (0);
+
+ /* If it's a Recno tree, write the backing source text file. */
+ if (dbp->type == DB_RECNO)
+ ret = __ram_writeback(dbp);
+
+ /* If the tree was never backed by a database file, we're done. */
+ if (F_ISSET(dbp, DB_AM_INMEM))
+ return (0);
+
+ /* Flush any dirty pages from the cache to the backing file. */
+ if ((t_ret = dbp->mpf->sync(dbp->mpf)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __db_associate --
+ * Associate another database as a secondary index to this one.
+ *
+ * PUBLIC: int __db_associate __P((DB *, DB_TXN *, DB *,
+ * PUBLIC: int (*)(DB *, const DBT *, const DBT *, DBT *), u_int32_t));
+ */
+int
+__db_associate(dbp, txn, sdbp, callback, flags)
+ DB *dbp, *sdbp;
+ DB_TXN *txn;
+ int (*callback) __P((DB *, const DBT *, const DBT *, DBT *));
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DBC *pdbc, *sdbc;
+ DBT skey, key, data;
+ int build, ret, t_ret, txn_local;
+
+ dbenv = dbp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ txn_local = 0;
+ pdbc = NULL;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ memset(&skey, 0, sizeof(DBT));
+
+ if ((ret = __db_associatechk(dbp, sdbp, callback, flags)) != 0)
+ return (ret);
+
+ /*
+ * Create a local transaction as necessary, check for consistent
+ * transaction usage, and, if we have no transaction but do have
+ * locking on, acquire a locker id for the handle lock acquisition.
+ */
+ if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+ if ((ret = __db_txn_auto(dbp, &txn)) != 0)
+ return (ret);
+ txn_local = 1;
+ } else if (txn != NULL && !TXN_ON(dbenv))
+ return (__db_not_txn_env(dbenv));
+
+ /*
+ * Check that if an open transaction is in progress, we're in it,
+ * for other common transaction errors, and for concurrent associates.
+ */
+ if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0)) != 0)
+ return (ret);
+
+ sdbp->s_callback = callback;
+ sdbp->s_primary = dbp;
+
+ sdbp->stored_get = sdbp->get;
+ sdbp->get = __db_secondary_get;
+
+ sdbp->stored_close = sdbp->close;
+ sdbp->close = __db_secondary_close;
+
+ /*
+ * Secondary cursors may have the primary's lock file ID, so we
+ * need to make sure that no older cursors are lying around
+ * when we make the transition.
+ */
+ if (TAILQ_FIRST(&sdbp->active_queue) != NULL ||
+ TAILQ_FIRST(&sdbp->join_queue) != NULL) {
+ __db_err(dbenv,
+ "Databases may not become secondary indices while cursors are open");
+ ret = EINVAL;
+ goto err;
+ }
+ while ((sdbc = TAILQ_FIRST(&sdbp->free_queue)) != NULL)
+ if ((ret = __db_c_destroy(sdbc)) != 0)
+ goto err;
+
+ F_SET(sdbp, DB_AM_SECONDARY);
+
+ /*
+ * Check to see if the secondary is empty--and thus if we should
+ * build it--before we link it in and risk making it show up in
+ * other threads.
+ */
+ build = 0;
+ if (LF_ISSET(DB_CREATE)) {
+ if ((ret = sdbp->cursor(sdbp, txn, &sdbc, 0)) != 0)
+ goto err;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ /*
+ * We don't care about key or data; we're just doing
+ * an existence check.
+ */
+ F_SET(&key, DB_DBT_PARTIAL | DB_DBT_USERMEM);
+ F_SET(&data, DB_DBT_PARTIAL | DB_DBT_USERMEM);
+ if ((ret = sdbc->c_real_get(sdbc, &key, &data,
+ (STD_LOCKING(sdbc) ? DB_RMW : 0) |
+ DB_FIRST)) == DB_NOTFOUND) {
+ build = 1;
+ ret = 0;
+ }
+
+ /*
+ * Secondary cursors have special refcounting close
+ * methods. Be careful.
+ */
+ if ((t_ret = __db_c_close(sdbc)) != 0)
+ ret = t_ret;
+ if (ret != 0)
+ goto err;
+ }
+
+ /*
+ * Add the secondary to the list on the primary. Do it here
+ * so that we see any updates that occur while we're walking
+ * the primary.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+
+ /* See __db_s_next for an explanation of secondary refcounting. */
+ DB_ASSERT(sdbp->s_refcnt == 0);
+ sdbp->s_refcnt = 1;
+ LIST_INSERT_HEAD(&dbp->s_secondaries, sdbp, s_links);
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+
+ if (build) {
+ /*
+ * We loop through the primary, putting each item we
+ * find into the new secondary.
+ *
+ * If we're using CDB, opening these two cursors puts us
+ * in a bit of a locking tangle: CDB locks are done on the
+ * primary, so that we stay deadlock-free, but that means
+ * that updating the secondary while we have a read cursor
+ * open on the primary will self-block. To get around this,
+ * we force the primary cursor to use the same locker ID
+ * as the secondary, so they won't conflict. This should
+ * be harmless even if we're not using CDB.
+ */
+ if ((ret = sdbp->cursor(sdbp, txn, &sdbc,
+ CDB_LOCKING(sdbp->dbenv) ? DB_WRITECURSOR : 0)) != 0)
+ goto err;
+ if ((ret = __db_icursor(dbp,
+ txn, dbp->type, PGNO_INVALID, 0, sdbc->locker, &pdbc)) != 0)
+ goto err;
+
+ /* Lock out other threads, now that we have a locker ID. */
+ dbp->associate_lid = sdbc->locker;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ while ((ret = pdbc->c_get(pdbc, &key, &data, DB_NEXT)) == 0) {
+ memset(&skey, 0, sizeof(DBT));
+ if ((ret = callback(sdbp, &key, &data, &skey)) != 0) {
+ if (ret == DB_DONOTINDEX)
+ continue;
+ else
+ goto err;
+ }
+ if ((ret = sdbc->c_put(sdbc,
+ &skey, &key, DB_UPDATE_SECONDARY)) != 0) {
+ FREE_IF_NEEDED(sdbp, &skey);
+ goto err;
+ }
+
+ FREE_IF_NEEDED(sdbp, &skey);
+ }
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+
+ if ((ret = sdbc->c_close(sdbc)) != 0)
+ goto err;
+ }
+
+err: if (pdbc != NULL && (t_ret = pdbc->c_close(pdbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ dbp->associate_lid = DB_LOCK_INVALIDID;
+
+ if (txn_local) {
+ if (ret == 0)
+ ret = txn->commit(txn, 0);
+ else
+ if ((t_ret = txn->abort(txn)) != 0)
+ ret = __db_panic(dbenv, t_ret);
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_pget --
+ * Return a primary key/data pair given a secondary key.
+ *
+ * PUBLIC: int __db_pget __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t));
+ */
+int
+__db_pget(dbp, txn, skey, pkey, data, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *skey, *pkey, *data;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ int ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->pget");
+
+ if ((ret = __db_pgetchk(dbp, skey, pkey, data, flags)) != 0)
+ return (ret);
+
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+ SET_RET_MEM(dbc, dbp);
+
+ /*
+ * The underlying cursor pget will fill in a default DBT for null
+ * pkeys, and use the cursor's returned-key memory internally to
+ * store any intermediate primary keys. However, we've just set
+ * the returned-key memory to the DB handle's key memory, which
+ * is unsafe to use if the DB handle is threaded. If the pkey
+ * argument is NULL, use the DBC-owned returned-key memory
+ * instead; it'll go away when we close the cursor before we
+ * return, but in this case that's just fine, as we're not
+ * returning the primary key.
+ */
+ if (pkey == NULL)
+ dbc->rkey = &dbc->my_rkey;
+
+ DEBUG_LREAD(dbc, txn, "__db_pget", skey, NULL, flags);
+
+ /*
+ * The cursor is just a perfectly ordinary secondary database
+ * cursor. Call its c_pget() method to do the dirty work.
+ */
+ if (flags == 0 || flags == DB_RMW)
+ flags |= DB_SET;
+ ret = dbc->c_pget(dbc, skey, pkey, data, flags);
+
+ if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __db_secondary_get --
+ * This wrapper function for DB->pget() is the DB->get() function
+ * on a database which has been made into a secondary index.
+ */
+static int
+__db_secondary_get(sdbp, txn, skey, data, flags)
+ DB *sdbp;
+ DB_TXN *txn;
+ DBT *skey, *data;
+ u_int32_t flags;
+{
+
+ DB_ASSERT(F_ISSET(sdbp, DB_AM_SECONDARY));
+ return (sdbp->pget(sdbp, txn, skey, NULL, data, flags));
+}
+
+/*
+ * __db_secondary_close --
+ * Wrapper function for DB->close() which we use on secondaries to
+ * manage refcounting and make sure we don't close them underneath
+ * a primary that is updating.
+ */
+static int
+__db_secondary_close(sdbp, flags)
+ DB *sdbp;
+ u_int32_t flags;
+{
+ DB *primary;
+ int doclose;
+
+ doclose = 0;
+ primary = sdbp->s_primary;
+
+ MUTEX_THREAD_LOCK(primary->dbenv, primary->mutexp);
+ /*
+ * Check the refcount--if it was at 1 when we were called, no
+ * thread is currently updating this secondary through the primary,
+ * so it's safe to close it for real.
+ *
+ * If it's not safe to do the close now, we do nothing; the
+ * database will actually be closed when the refcount is decremented,
+ * which can happen in either __db_s_next or __db_s_done.
+ */
+ DB_ASSERT(sdbp->s_refcnt != 0);
+ if (--sdbp->s_refcnt == 0) {
+ LIST_REMOVE(sdbp, s_links);
+ /* We don't want to call close while the mutex is held. */
+ doclose = 1;
+ }
+ MUTEX_THREAD_UNLOCK(primary->dbenv, primary->mutexp);
+
+ /*
+ * sdbp->close is this function; call the real one explicitly if
+ * need be.
+ */
+ return (doclose ? __db_close(sdbp, flags) : 0);
+}
+
+/*
+ * __db_append_primary --
+ * Perform the secondary index updates necessary to put(DB_APPEND)
+ * a record to a primary database.
+ */
+static int
+__db_append_primary(dbc, key, data)
+ DBC *dbc;
+ DBT *key, *data;
+{
+ DB *dbp, *sdbp;
+ DBC *sdbc, *pdbc;
+ DBT oldpkey, pkey, pdata, skey;
+ int cmp, ret, t_ret;
+
+ dbp = dbc->dbp;
+ sdbp = NULL;
+ ret = 0;
+
+ /*
+ * Worrying about partial appends seems a little like worrying
+ * about Linear A character encodings. But we support those
+ * too if your application understands them.
+ */
+ pdbc = NULL;
+ if (F_ISSET(data, DB_DBT_PARTIAL) || F_ISSET(key, DB_DBT_PARTIAL)) {
+ /*
+ * The dbc we were passed is all set to pass things
+ * back to the user; we can't safely do a call on it.
+ * Dup the cursor, grab the real data item (we don't
+ * care what the key is--we've been passed it directly),
+ * and use that instead of the data DBT we were passed.
+ *
+ * Note that we can get away with this simple get because
+ * an appended item is by definition new, and the
+ * correctly-constructed full data item from this partial
+ * put is on the page waiting for us.
+ */
+ if ((ret = __db_c_idup(dbc, &pdbc, DB_POSITIONI)) != 0)
+ return (ret);
+ memset(&pkey, 0, sizeof(DBT));
+ memset(&pdata, 0, sizeof(DBT));
+
+ if ((ret = pdbc->c_get(pdbc, &pkey, &pdata, DB_CURRENT)) != 0)
+ goto err;
+
+ key = &pkey;
+ data = &pdata;
+ }
+
+ /*
+ * Loop through the secondary indices, putting a new item in
+ * each that points to the appended item.
+ *
+ * This is much like the loop in "step 3" in __db_c_put, so
+ * I'm not commenting heavily here; it was unclean to excerpt
+ * just that section into a common function, but the basic
+ * overview is the same here.
+ */
+ for (sdbp = __db_s_first(dbp);
+ sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) {
+ memset(&skey, 0, sizeof(DBT));
+ if ((ret = sdbp->s_callback(sdbp, key, data, &skey)) != 0) {
+ if (ret == DB_DONOTINDEX)
+ continue;
+ else
+ goto err;
+ }
+
+ if ((ret = __db_icursor(sdbp, dbc->txn, sdbp->type,
+ PGNO_INVALID, 0, dbc->locker, &sdbc)) != 0) {
+ FREE_IF_NEEDED(sdbp, &skey);
+ goto err;
+ }
+ if (CDB_LOCKING(sdbp->dbenv)) {
+ DB_ASSERT(sdbc->mylock.off == LOCK_INVALID);
+ F_SET(sdbc, DBC_WRITER);
+ }
+
+ /*
+ * Since we know we have a new primary key, it can't be a
+ * duplicate duplicate in the secondary. It can be a
+ * duplicate in a secondary that doesn't support duplicates,
+ * however, so we need to be careful to avoid an overwrite
+ * (which would corrupt our index).
+ */
+ if (!F_ISSET(sdbp, DB_AM_DUP)) {
+ memset(&oldpkey, 0, sizeof(DBT));
+ F_SET(&oldpkey, DB_DBT_MALLOC);
+ ret = sdbc->c_real_get(sdbc, &skey, &oldpkey,
+ DB_SET | (STD_LOCKING(dbc) ? DB_RMW : 0));
+ if (ret == 0) {
+ cmp = __bam_defcmp(sdbp, &oldpkey, key);
+ /*
+ * XXX
+ * This needs to use the right free function
+ * as soon as this is possible.
+ */
+ __os_ufree(sdbp->dbenv,
+ oldpkey.data);
+ if (cmp != 0) {
+ __db_err(sdbp->dbenv, "%s%s",
+ "Append results in a non-unique secondary key in",
+ " an index not configured to support duplicates");
+ ret = EINVAL;
+ goto err1;
+ }
+ } else if (ret != DB_NOTFOUND && ret != DB_KEYEMPTY)
+ goto err1;
+ }
+
+ ret = sdbc->c_put(sdbc, &skey, key, DB_UPDATE_SECONDARY);
+
+err1: FREE_IF_NEEDED(sdbp, &skey);
+
+ if ((t_ret = sdbc->c_close(sdbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (ret != 0)
+ goto err;
+ }
+
+err: if (pdbc != NULL && (t_ret = pdbc->c_close(pdbc)) != 0 && ret == 0)
+ ret = t_ret;
+ if (sdbp != NULL && (t_ret = __db_s_done(sdbp)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
diff --git a/libdb/db/db_auto.c b/libdb/db/db_auto.c
new file mode 100644
index 0000000..6b43b9c
--- /dev/null
+++ b/libdb/db/db_auto.c
@@ -0,0 +1,2583 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+/*
+ * PUBLIC: int __db_addrem_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, u_int32_t, db_pgno_t, u_int32_t, u_int32_t,
+ * PUBLIC: const DBT *, const DBT *, DB_LSN *));
+ */
+int
+__db_addrem_log(dbp, txnid, ret_lsnp, flags,
+ opcode, pgno, indx, nbytes, hdr,
+ dbt, pagelsn)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t opcode;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ u_int32_t nbytes;
+ const DBT *hdr;
+ const DBT *dbt;
+ DB_LSN * pagelsn;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___db_addrem;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t) + (hdr == NULL ? 0 : hdr->size)
+ + sizeof(u_int32_t) + (dbt == NULL ? 0 : dbt->size)
+ + sizeof(*pagelsn);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ uinttmp = (u_int32_t)opcode;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)indx;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)nbytes;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (hdr == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &hdr->size, sizeof(hdr->size));
+ bp += sizeof(hdr->size);
+ memcpy(bp, hdr->data, hdr->size);
+ bp += hdr->size;
+ }
+
+ if (dbt == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &dbt->size, sizeof(dbt->size));
+ bp += sizeof(dbt->size);
+ memcpy(bp, dbt->data, dbt->size);
+ bp += dbt->size;
+ }
+
+ if (pagelsn != NULL)
+ memcpy(bp, pagelsn, sizeof(*pagelsn));
+ else
+ memset(bp, 0, sizeof(*pagelsn));
+ bp += sizeof(*pagelsn);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__db_addrem_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __db_addrem_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__db_addrem_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __db_addrem_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __db_addrem_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __db_addrem_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__db_addrem_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __db_addrem_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __db_addrem_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__db_addrem: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\topcode: %lu\n", (u_long)argp->opcode);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tindx: %lu\n", (u_long)argp->indx);
+ (void)printf("\tnbytes: %lu\n", (u_long)argp->nbytes);
+ (void)printf("\thdr: ");
+ for (i = 0; i < argp->hdr.size; i++) {
+ ch = ((u_int8_t *)argp->hdr.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tdbt: ");
+ for (i = 0; i < argp->dbt.size; i++) {
+ ch = ((u_int8_t *)argp->dbt.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tpagelsn: [%lu][%lu]\n",
+ (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_addrem_read __P((DB_ENV *, void *, __db_addrem_args **));
+ */
+int
+__db_addrem_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __db_addrem_args **argpp;
+{
+ __db_addrem_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__db_addrem_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->opcode = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->indx = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->nbytes = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memset(&argp->hdr, 0, sizeof(argp->hdr));
+ memcpy(&argp->hdr.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->hdr.data = bp;
+ bp += argp->hdr.size;
+
+ memset(&argp->dbt, 0, sizeof(argp->dbt));
+ memcpy(&argp->dbt.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->dbt.data = bp;
+ bp += argp->dbt.size;
+
+ memcpy(&argp->pagelsn, bp, sizeof(argp->pagelsn));
+ bp += sizeof(argp->pagelsn);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_big_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, u_int32_t, db_pgno_t, db_pgno_t, db_pgno_t,
+ * PUBLIC: const DBT *, DB_LSN *, DB_LSN *, DB_LSN *));
+ */
+int
+__db_big_log(dbp, txnid, ret_lsnp, flags,
+ opcode, pgno, prev_pgno, next_pgno, dbt,
+ pagelsn, prevlsn, nextlsn)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t opcode;
+ db_pgno_t pgno;
+ db_pgno_t prev_pgno;
+ db_pgno_t next_pgno;
+ const DBT *dbt;
+ DB_LSN * pagelsn;
+ DB_LSN * prevlsn;
+ DB_LSN * nextlsn;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___db_big;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t) + (dbt == NULL ? 0 : dbt->size)
+ + sizeof(*pagelsn)
+ + sizeof(*prevlsn)
+ + sizeof(*nextlsn);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ uinttmp = (u_int32_t)opcode;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)prev_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)next_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (dbt == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &dbt->size, sizeof(dbt->size));
+ bp += sizeof(dbt->size);
+ memcpy(bp, dbt->data, dbt->size);
+ bp += dbt->size;
+ }
+
+ if (pagelsn != NULL)
+ memcpy(bp, pagelsn, sizeof(*pagelsn));
+ else
+ memset(bp, 0, sizeof(*pagelsn));
+ bp += sizeof(*pagelsn);
+
+ if (prevlsn != NULL)
+ memcpy(bp, prevlsn, sizeof(*prevlsn));
+ else
+ memset(bp, 0, sizeof(*prevlsn));
+ bp += sizeof(*prevlsn);
+
+ if (nextlsn != NULL)
+ memcpy(bp, nextlsn, sizeof(*nextlsn));
+ else
+ memset(bp, 0, sizeof(*nextlsn));
+ bp += sizeof(*nextlsn);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__db_big_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __db_big_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__db_big_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __db_big_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __db_big_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 3)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+ if (argp->prev_pgno != PGNO_INVALID) {
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->prev_pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+ }
+ if (argp->next_pgno != PGNO_INVALID) {
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->next_pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+ }
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __db_big_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__db_big_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __db_big_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __db_big_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__db_big: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\topcode: %lu\n", (u_long)argp->opcode);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tprev_pgno: %lu\n", (u_long)argp->prev_pgno);
+ (void)printf("\tnext_pgno: %lu\n", (u_long)argp->next_pgno);
+ (void)printf("\tdbt: ");
+ for (i = 0; i < argp->dbt.size; i++) {
+ ch = ((u_int8_t *)argp->dbt.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tpagelsn: [%lu][%lu]\n",
+ (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset);
+ (void)printf("\tprevlsn: [%lu][%lu]\n",
+ (u_long)argp->prevlsn.file, (u_long)argp->prevlsn.offset);
+ (void)printf("\tnextlsn: [%lu][%lu]\n",
+ (u_long)argp->nextlsn.file, (u_long)argp->nextlsn.offset);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_big_read __P((DB_ENV *, void *, __db_big_args **));
+ */
+int
+__db_big_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __db_big_args **argpp;
+{
+ __db_big_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__db_big_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->opcode = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->prev_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->next_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memset(&argp->dbt, 0, sizeof(argp->dbt));
+ memcpy(&argp->dbt.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->dbt.data = bp;
+ bp += argp->dbt.size;
+
+ memcpy(&argp->pagelsn, bp, sizeof(argp->pagelsn));
+ bp += sizeof(argp->pagelsn);
+
+ memcpy(&argp->prevlsn, bp, sizeof(argp->prevlsn));
+ bp += sizeof(argp->prevlsn);
+
+ memcpy(&argp->nextlsn, bp, sizeof(argp->nextlsn));
+ bp += sizeof(argp->nextlsn);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_ovref_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, db_pgno_t, int32_t, DB_LSN *));
+ */
+int
+__db_ovref_log(dbp, txnid, ret_lsnp, flags, pgno, adjust, lsn)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ db_pgno_t pgno;
+ int32_t adjust;
+ DB_LSN * lsn;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___db_ovref;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(*lsn);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)adjust;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__db_ovref_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __db_ovref_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__db_ovref_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __db_ovref_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __db_ovref_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __db_ovref_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__db_ovref_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __db_ovref_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __db_ovref_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__db_ovref: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tadjust: %ld\n", (long)argp->adjust);
+ (void)printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_ovref_read __P((DB_ENV *, void *, __db_ovref_args **));
+ */
+int
+__db_ovref_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __db_ovref_args **argpp;
+{
+ __db_ovref_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__db_ovref_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->adjust = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_relink_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *,
+ * PUBLIC: db_pgno_t, DB_LSN *));
+ */
+int
+__db_relink_log(dbp, txnid, ret_lsnp, flags,
+ opcode, pgno, lsn, prev, lsn_prev,
+ next, lsn_next)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t opcode;
+ db_pgno_t pgno;
+ DB_LSN * lsn;
+ db_pgno_t prev;
+ DB_LSN * lsn_prev;
+ db_pgno_t next;
+ DB_LSN * lsn_next;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___db_relink;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(*lsn)
+ + sizeof(u_int32_t)
+ + sizeof(*lsn_prev)
+ + sizeof(u_int32_t)
+ + sizeof(*lsn_next);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ uinttmp = (u_int32_t)opcode;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+
+ uinttmp = (u_int32_t)prev;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (lsn_prev != NULL)
+ memcpy(bp, lsn_prev, sizeof(*lsn_prev));
+ else
+ memset(bp, 0, sizeof(*lsn_prev));
+ bp += sizeof(*lsn_prev);
+
+ uinttmp = (u_int32_t)next;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (lsn_next != NULL)
+ memcpy(bp, lsn_next, sizeof(*lsn_next));
+ else
+ memset(bp, 0, sizeof(*lsn_next));
+ bp += sizeof(*lsn_next);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__db_relink_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __db_relink_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__db_relink_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __db_relink_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __db_relink_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 3)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+ if (argp->prev != PGNO_INVALID) {
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->prev;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+ }
+ if (argp->next != PGNO_INVALID) {
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->next;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+ }
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __db_relink_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__db_relink_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __db_relink_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __db_relink_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__db_relink: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\topcode: %lu\n", (u_long)argp->opcode);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ (void)printf("\tprev: %lu\n", (u_long)argp->prev);
+ (void)printf("\tlsn_prev: [%lu][%lu]\n",
+ (u_long)argp->lsn_prev.file, (u_long)argp->lsn_prev.offset);
+ (void)printf("\tnext: %lu\n", (u_long)argp->next);
+ (void)printf("\tlsn_next: [%lu][%lu]\n",
+ (u_long)argp->lsn_next.file, (u_long)argp->lsn_next.offset);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_relink_read __P((DB_ENV *, void *, __db_relink_args **));
+ */
+int
+__db_relink_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __db_relink_args **argpp;
+{
+ __db_relink_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__db_relink_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->opcode = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->prev = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->lsn_prev, bp, sizeof(argp->lsn_prev));
+ bp += sizeof(argp->lsn_prev);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->next = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->lsn_next, bp, sizeof(argp->lsn_next));
+ bp += sizeof(argp->lsn_next);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_debug_log __P((DB_ENV *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, const DBT *, int32_t, const DBT *, const DBT *,
+ * PUBLIC: u_int32_t));
+ */
+int
+__db_debug_log(dbenv, txnid, ret_lsnp, flags,
+ op, fileid, key, data, arg_flags)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ const DBT *op;
+ int32_t fileid;
+ const DBT *key;
+ const DBT *data;
+ u_int32_t arg_flags;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB___db_debug;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t) + (op == NULL ? 0 : op->size)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t) + (key == NULL ? 0 : key->size)
+ + sizeof(u_int32_t) + (data == NULL ? 0 : data->size)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ if (op == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &op->size, sizeof(op->size));
+ bp += sizeof(op->size);
+ memcpy(bp, op->data, op->size);
+ bp += op->size;
+ }
+
+ uinttmp = (u_int32_t)fileid;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (key == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &key->size, sizeof(key->size));
+ bp += sizeof(key->size);
+ memcpy(bp, key->data, key->size);
+ bp += key->size;
+ }
+
+ if (data == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &data->size, sizeof(data->size));
+ bp += sizeof(data->size);
+ memcpy(bp, data->data, data->size);
+ bp += data->size;
+ }
+
+ uinttmp = (u_int32_t)arg_flags;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__db_debug_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __db_debug_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__db_debug_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_debug_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__db_debug_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __db_debug_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __db_debug_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__db_debug: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\top: ");
+ for (i = 0; i < argp->op.size; i++) {
+ ch = ((u_int8_t *)argp->op.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tkey: ");
+ for (i = 0; i < argp->key.size; i++) {
+ ch = ((u_int8_t *)argp->key.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tdata: ");
+ for (i = 0; i < argp->data.size; i++) {
+ ch = ((u_int8_t *)argp->data.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\targ_flags: %lu\n", (u_long)argp->arg_flags);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_debug_read __P((DB_ENV *, void *, __db_debug_args **));
+ */
+int
+__db_debug_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __db_debug_args **argpp;
+{
+ __db_debug_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__db_debug_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memset(&argp->op, 0, sizeof(argp->op));
+ memcpy(&argp->op.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->op.data = bp;
+ bp += argp->op.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memset(&argp->key, 0, sizeof(argp->key));
+ memcpy(&argp->key.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->key.data = bp;
+ bp += argp->key.size;
+
+ memset(&argp->data, 0, sizeof(argp->data));
+ memcpy(&argp->data.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->data.data = bp;
+ bp += argp->data.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->arg_flags = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_noop_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, db_pgno_t, DB_LSN *));
+ */
+int
+__db_noop_log(dbp, txnid, ret_lsnp, flags, pgno, prevlsn)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ db_pgno_t pgno;
+ DB_LSN * prevlsn;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___db_noop;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(*prevlsn);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (prevlsn != NULL)
+ memcpy(bp, prevlsn, sizeof(*prevlsn));
+ else
+ memset(bp, 0, sizeof(*prevlsn));
+ bp += sizeof(*prevlsn);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__db_noop_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __db_noop_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__db_noop_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __db_noop_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __db_noop_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __db_noop_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__db_noop_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __db_noop_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __db_noop_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__db_noop: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tprevlsn: [%lu][%lu]\n",
+ (u_long)argp->prevlsn.file, (u_long)argp->prevlsn.offset);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_noop_read __P((DB_ENV *, void *, __db_noop_args **));
+ */
+int
+__db_noop_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __db_noop_args **argpp;
+{
+ __db_noop_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__db_noop_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->prevlsn, bp, sizeof(argp->prevlsn));
+ bp += sizeof(argp->prevlsn);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_pg_alloc_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, u_int32_t,
+ * PUBLIC: db_pgno_t));
+ */
+int
+__db_pg_alloc_log(dbp, txnid, ret_lsnp, flags, meta_lsn, meta_pgno, page_lsn, pgno, ptype,
+ next)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ DB_LSN * meta_lsn;
+ db_pgno_t meta_pgno;
+ DB_LSN * page_lsn;
+ db_pgno_t pgno;
+ u_int32_t ptype;
+ db_pgno_t next;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___db_pg_alloc;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(*meta_lsn)
+ + sizeof(u_int32_t)
+ + sizeof(*page_lsn)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (meta_lsn != NULL)
+ memcpy(bp, meta_lsn, sizeof(*meta_lsn));
+ else
+ memset(bp, 0, sizeof(*meta_lsn));
+ bp += sizeof(*meta_lsn);
+
+ uinttmp = (u_int32_t)meta_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (page_lsn != NULL)
+ memcpy(bp, page_lsn, sizeof(*page_lsn));
+ else
+ memset(bp, 0, sizeof(*page_lsn));
+ bp += sizeof(*page_lsn);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)ptype;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)next;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__db_pg_alloc_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __db_pg_alloc_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__db_pg_alloc_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __db_pg_alloc_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __db_pg_alloc_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 2)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->meta_pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __db_pg_alloc_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__db_pg_alloc_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __db_pg_alloc_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __db_pg_alloc_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__db_pg_alloc: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tmeta_lsn: [%lu][%lu]\n",
+ (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset);
+ (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno);
+ (void)printf("\tpage_lsn: [%lu][%lu]\n",
+ (u_long)argp->page_lsn.file, (u_long)argp->page_lsn.offset);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tptype: %lu\n", (u_long)argp->ptype);
+ (void)printf("\tnext: %lu\n", (u_long)argp->next);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_pg_alloc_read __P((DB_ENV *, void *,
+ * PUBLIC: __db_pg_alloc_args **));
+ */
+int
+__db_pg_alloc_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __db_pg_alloc_args **argpp;
+{
+ __db_pg_alloc_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__db_pg_alloc_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->meta_lsn, bp, sizeof(argp->meta_lsn));
+ bp += sizeof(argp->meta_lsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->meta_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->page_lsn, bp, sizeof(argp->page_lsn));
+ bp += sizeof(argp->page_lsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->ptype = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->next = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_pg_free_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, const DBT *,
+ * PUBLIC: db_pgno_t));
+ */
+int
+__db_pg_free_log(dbp, txnid, ret_lsnp, flags, pgno, meta_lsn, meta_pgno, header, next)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ db_pgno_t pgno;
+ DB_LSN * meta_lsn;
+ db_pgno_t meta_pgno;
+ const DBT *header;
+ db_pgno_t next;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___db_pg_free;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(*meta_lsn)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t) + (header == NULL ? 0 : header->size)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (meta_lsn != NULL)
+ memcpy(bp, meta_lsn, sizeof(*meta_lsn));
+ else
+ memset(bp, 0, sizeof(*meta_lsn));
+ bp += sizeof(*meta_lsn);
+
+ uinttmp = (u_int32_t)meta_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (header == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &header->size, sizeof(header->size));
+ bp += sizeof(header->size);
+ memcpy(bp, header->data, header->size);
+ bp += header->size;
+ }
+
+ uinttmp = (u_int32_t)next;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__db_pg_free_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __db_pg_free_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__db_pg_free_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __db_pg_free_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __db_pg_free_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 2)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->meta_pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __db_pg_free_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__db_pg_free_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __db_pg_free_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __db_pg_free_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__db_pg_free: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tmeta_lsn: [%lu][%lu]\n",
+ (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset);
+ (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno);
+ (void)printf("\theader: ");
+ for (i = 0; i < argp->header.size; i++) {
+ ch = ((u_int8_t *)argp->header.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tnext: %lu\n", (u_long)argp->next);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_pg_free_read __P((DB_ENV *, void *, __db_pg_free_args **));
+ */
+int
+__db_pg_free_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __db_pg_free_args **argpp;
+{
+ __db_pg_free_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__db_pg_free_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->meta_lsn, bp, sizeof(argp->meta_lsn));
+ bp += sizeof(argp->meta_lsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->meta_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memset(&argp->header, 0, sizeof(argp->header));
+ memcpy(&argp->header.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->header.data = bp;
+ bp += argp->header.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->next = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_cksum_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t));
+ */
+int
+__db_cksum_log(dbenv, txnid, ret_lsnp, flags)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB___db_cksum;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__db_cksum_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __db_cksum_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__db_cksum_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_cksum_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__db_cksum_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __db_cksum_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __db_cksum_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__db_cksum: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_cksum_read __P((DB_ENV *, void *, __db_cksum_args **));
+ */
+int
+__db_cksum_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __db_cksum_args **argpp;
+{
+ __db_cksum_args *argp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__db_cksum_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_init_print __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__db_init_print(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_addrem_print, DB___db_addrem)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_big_print, DB___db_big)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_ovref_print, DB___db_ovref)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_relink_print, DB___db_relink)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_debug_print, DB___db_debug)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_noop_print, DB___db_noop)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_pg_alloc_print, DB___db_pg_alloc)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_pg_free_print, DB___db_pg_free)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_cksum_print, DB___db_cksum)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__db_init_getpgnos(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_addrem_getpgnos, DB___db_addrem)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_big_getpgnos, DB___db_big)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_ovref_getpgnos, DB___db_ovref)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_relink_getpgnos, DB___db_relink)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_debug_getpgnos, DB___db_debug)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_noop_getpgnos, DB___db_noop)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_pg_alloc_getpgnos, DB___db_pg_alloc)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_pg_free_getpgnos, DB___db_pg_free)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_cksum_getpgnos, DB___db_cksum)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __db_init_recover __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__db_init_recover(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_addrem_recover, DB___db_addrem)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_big_recover, DB___db_big)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_ovref_recover, DB___db_ovref)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_relink_recover, DB___db_relink)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_debug_recover, DB___db_debug)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_noop_recover, DB___db_noop)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_pg_alloc_recover, DB___db_pg_alloc)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_pg_free_recover, DB___db_pg_free)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __db_cksum_recover, DB___db_cksum)) != 0)
+ return (ret);
+ return (0);
+}
diff --git a/libdb/db/db_cam.c b/libdb/db/db_cam.c
new file mode 100644
index 0000000..9807b9d
--- /dev/null
+++ b/libdb/db/db_cam.c
@@ -0,0 +1,2286 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+
+static int __db_buildpartial __P((DB *, DBT *, DBT *, DBT *));
+static int __db_c_cleanup __P((DBC *, DBC *, int));
+static int __db_c_del_secondary __P((DBC *));
+static int __db_c_pget_recno __P((DBC *, DBT *, DBT *, u_int32_t));
+static int __db_wrlock_err __P((DB_ENV *));
+
+#define CDB_LOCKING_INIT(dbp, dbc) \
+ /* \
+ * If we are running CDB, this had better be either a write \
+ * cursor or an immediate writer. If it's a regular writer, \
+ * that means we have an IWRITE lock and we need to upgrade \
+ * it to a write lock. \
+ */ \
+ if (CDB_LOCKING((dbp)->dbenv)) { \
+ if (!F_ISSET(dbc, DBC_WRITECURSOR | DBC_WRITER)) \
+ return (__db_wrlock_err(dbp->dbenv)); \
+ \
+ if (F_ISSET(dbc, DBC_WRITECURSOR) && \
+ (ret = (dbp)->dbenv->lock_get((dbp)->dbenv, \
+ (dbc)->locker, DB_LOCK_UPGRADE, &(dbc)->lock_dbt, \
+ DB_LOCK_WRITE, &(dbc)->mylock)) != 0) \
+ return (ret); \
+ }
+#define CDB_LOCKING_DONE(dbp, dbc) \
+ /* Release the upgraded lock. */ \
+ if (F_ISSET(dbc, DBC_WRITECURSOR)) \
+ (void)__lock_downgrade( \
+ (dbp)->dbenv, &(dbc)->mylock, DB_LOCK_IWRITE, 0);
+/*
+ * Copy the lock info from one cursor to another, so that locking
+ * in CDB can be done in the context of an internally-duplicated
+ * or off-page-duplicate cursor.
+ */
+#define CDB_LOCKING_COPY(dbp, dbc_o, dbc_n) \
+ if (CDB_LOCKING((dbp)->dbenv) && \
+ F_ISSET((dbc_o), DBC_WRITECURSOR | DBC_WRITEDUP)) { \
+ memcpy(&(dbc_n)->mylock, &(dbc_o)->mylock, \
+ sizeof((dbc_o)->mylock)); \
+ /* This lock isn't ours to put--just discard it on close. */ \
+ F_SET((dbc_n), DBC_WRITEDUP); \
+ }
+
+/*
+ * __db_c_close --
+ * Close the cursor.
+ *
+ * PUBLIC: int __db_c_close __P((DBC *));
+ */
+int
+__db_c_close(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DBC *opd;
+ DBC_INTERNAL *cp;
+ DB_ENV *dbenv;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+ ret = 0;
+
+ PANIC_CHECK(dbenv);
+
+ /*
+ * If the cursor is already closed we have a serious problem, and we
+ * assume that the cursor isn't on the active queue. Don't do any of
+ * the remaining cursor close processing.
+ */
+ if (!F_ISSET(dbc, DBC_ACTIVE)) {
+ if (dbp != NULL)
+ __db_err(dbenv, "Closing already-closed cursor");
+
+ DB_ASSERT(0);
+ return (EINVAL);
+ }
+
+ cp = dbc->internal;
+ opd = cp->opd;
+
+ /*
+ * Remove the cursor(s) from the active queue. We may be closing two
+ * cursors at once here, a top-level one and a lower-level, off-page
+ * duplicate one. The acess-method specific cursor close routine must
+ * close both of them in a single call.
+ *
+ * !!!
+ * Cursors must be removed from the active queue before calling the
+ * access specific cursor close routine, btree depends on having that
+ * order of operations.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+
+ if (opd != NULL) {
+ F_CLR(opd, DBC_ACTIVE);
+ TAILQ_REMOVE(&dbp->active_queue, opd, links);
+ }
+ F_CLR(dbc, DBC_ACTIVE);
+ TAILQ_REMOVE(&dbp->active_queue, dbc, links);
+
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+
+ /* Call the access specific cursor close routine. */
+ if ((t_ret =
+ dbc->c_am_close(dbc, PGNO_INVALID, NULL)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * Release the lock after calling the access method specific close
+ * routine, a Btree cursor may have had pending deletes.
+ */
+ if (CDB_LOCKING(dbenv)) {
+ /*
+ * If DBC_WRITEDUP is set, the cursor is an internally
+ * duplicated write cursor and the lock isn't ours to put.
+ *
+ * Also, be sure not to free anything if mylock.off is
+ * INVALID; in some cases, such as idup'ed read cursors
+ * and secondary update cursors, a cursor in a CDB
+ * environment may not have a lock at all.
+ */
+ if (!F_ISSET(dbc, DBC_WRITEDUP) && LOCK_ISSET(dbc->mylock)) {
+ if ((t_ret = dbenv->lock_put(
+ dbenv, &dbc->mylock)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ /* For safety's sake, since this is going on the free queue. */
+ memset(&dbc->mylock, 0, sizeof(dbc->mylock));
+ F_CLR(dbc, DBC_WRITEDUP);
+ }
+
+ if (dbc->txn != NULL)
+ dbc->txn->cursors--;
+
+ /* Move the cursor(s) to the free queue. */
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ if (opd != NULL) {
+ if (dbc->txn != NULL)
+ dbc->txn->cursors--;
+ TAILQ_INSERT_TAIL(&dbp->free_queue, opd, links);
+ opd = NULL;
+ }
+ TAILQ_INSERT_TAIL(&dbp->free_queue, dbc, links);
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+
+ return (ret);
+}
+
+/*
+ * __db_c_destroy --
+ * Destroy the cursor, called after DBC->c_close.
+ *
+ * PUBLIC: int __db_c_destroy __P((DBC *));
+ */
+int
+__db_c_destroy(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+
+ /* Remove the cursor from the free queue. */
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ TAILQ_REMOVE(&dbp->free_queue, dbc, links);
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+
+ /* Free up allocated memory. */
+ if (dbc->my_rskey.data != NULL)
+ __os_free(dbenv, dbc->my_rskey.data);
+ if (dbc->my_rkey.data != NULL)
+ __os_free(dbenv, dbc->my_rkey.data);
+ if (dbc->my_rdata.data != NULL)
+ __os_free(dbenv, dbc->my_rdata.data);
+
+ /* Call the access specific cursor destroy routine. */
+ ret = dbc->c_am_destroy == NULL ? 0 : dbc->c_am_destroy(dbc);
+
+ /*
+ * Release the lock id for this cursor.
+ */
+ if (LOCKING_ON(dbenv) &&
+ F_ISSET(dbc, DBC_OWN_LID) &&
+ (t_ret = dbenv->lock_id_free(dbenv, dbc->lid)) != 0 && ret == 0)
+ ret = t_ret;
+
+ __os_free(dbenv, dbc);
+
+ return (ret);
+}
+
+/*
+ * __db_c_count --
+ * Return a count of duplicate data items.
+ *
+ * PUBLIC: int __db_c_count __P((DBC *, db_recno_t *, u_int32_t));
+ */
+int
+__db_c_count(dbc, recnop, flags)
+ DBC *dbc;
+ db_recno_t *recnop;
+ u_int32_t flags;
+{
+ DB *dbp;
+ int ret;
+
+ /*
+ * Cursor Cleanup Note:
+ * All of the cursors passed to the underlying access methods by this
+ * routine are not duplicated and will not be cleaned up on return.
+ * So, pages/locks that the cursor references must be resolved by the
+ * underlying functions.
+ */
+ dbp = dbc->dbp;
+
+ PANIC_CHECK(dbp->dbenv);
+
+ /* Check for invalid flags. */
+ if ((ret = __db_ccountchk(dbp, flags, IS_INITIALIZED(dbc))) != 0)
+ return (ret);
+
+ switch (dbc->dbtype) {
+ case DB_QUEUE:
+ case DB_RECNO:
+ *recnop = 1;
+ break;
+ case DB_HASH:
+ if (dbc->internal->opd == NULL) {
+ if ((ret = __ham_c_count(dbc, recnop)) != 0)
+ return (ret);
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_BTREE:
+ if ((ret = __bam_c_count(dbc, recnop)) != 0)
+ return (ret);
+ break;
+ default:
+ return (__db_unknown_type(dbp->dbenv,
+ "__db_c_count", dbp->type));
+ }
+ return (0);
+}
+
+/*
+ * __db_c_del --
+ * Delete using a cursor.
+ *
+ * PUBLIC: int __db_c_del __P((DBC *, u_int32_t));
+ */
+int
+__db_c_del(dbc, flags)
+ DBC *dbc;
+ u_int32_t flags;
+{
+ DB *dbp;
+ DBC *opd;
+ int ret;
+
+ /*
+ * Cursor Cleanup Note:
+ * All of the cursors passed to the underlying access methods by this
+ * routine are not duplicated and will not be cleaned up on return.
+ * So, pages/locks that the cursor references must be resolved by the
+ * underlying functions.
+ */
+ dbp = dbc->dbp;
+
+ PANIC_CHECK(dbp->dbenv);
+
+ /* Check for invalid flags. */
+ if ((ret = __db_cdelchk(dbp, flags, IS_INITIALIZED(dbc))) != 0)
+ return (ret);
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, dbc->txn, dbc->locker, 0)) != 0)
+ return (ret);
+
+ DEBUG_LWRITE(dbc, dbc->txn, "db_c_del", NULL, NULL, flags);
+
+ CDB_LOCKING_INIT(dbp, dbc);
+
+ /*
+ * If we're a secondary index, and DB_UPDATE_SECONDARY isn't set
+ * (which it only is if we're being called from a primary update),
+ * then we need to call through to the primary and delete the item.
+ *
+ * Note that this will delete the current item; we don't need to
+ * delete it ourselves as well, so we can just goto done.
+ */
+ if (flags != DB_UPDATE_SECONDARY && F_ISSET(dbp, DB_AM_SECONDARY)) {
+ ret = __db_c_del_secondary(dbc);
+ goto done;
+ }
+
+ /*
+ * If we are a primary and have secondary indices, go through
+ * and delete any secondary keys that point at the current record.
+ */
+ if (LIST_FIRST(&dbp->s_secondaries) != NULL &&
+ (ret = __db_c_del_primary(dbc)) != 0)
+ goto done;
+
+ /*
+ * Off-page duplicate trees are locked in the primary tree, that is,
+ * we acquire a write lock in the primary tree and no locks in the
+ * off-page dup tree. If the del operation is done in an off-page
+ * duplicate tree, call the primary cursor's upgrade routine first.
+ */
+ opd = dbc->internal->opd;
+ if (opd == NULL)
+ ret = dbc->c_am_del(dbc);
+ else
+ if ((ret = dbc->c_am_writelock(dbc)) == 0)
+ ret = opd->c_am_del(opd);
+
+done: CDB_LOCKING_DONE(dbp, dbc);
+
+ return (ret);
+}
+
+/*
+ * __db_c_dup --
+ * Duplicate a cursor
+ *
+ * PUBLIC: int __db_c_dup __P((DBC *, DBC **, u_int32_t));
+ */
+int
+__db_c_dup(dbc_orig, dbcp, flags)
+ DBC *dbc_orig;
+ DBC **dbcp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB *dbp;
+ DBC *dbc_n, *dbc_nopd;
+ int ret;
+
+ dbp = dbc_orig->dbp;
+ dbenv = dbp->dbenv;
+ dbc_n = dbc_nopd = NULL;
+
+ PANIC_CHECK(dbp->dbenv);
+
+ /*
+ * We can never have two write cursors open in CDB, so do not
+ * allow duplication of a write cursor.
+ */
+ if (flags != DB_POSITIONI &&
+ F_ISSET(dbc_orig, DBC_WRITER | DBC_WRITECURSOR)) {
+ __db_err(dbenv, "Cannot duplicate writeable cursor");
+ return (EINVAL);
+ }
+
+ /* Allocate a new cursor and initialize it. */
+ if ((ret = __db_c_idup(dbc_orig, &dbc_n, flags)) != 0)
+ goto err;
+ *dbcp = dbc_n;
+
+ /*
+ * If we're in CDB, and this isn't an internal duplication (in which
+ * case we're explicitly overriding CDB locking), the duplicated
+ * cursor needs its own read lock. (We know it's not a write cursor
+ * because we wouldn't have made it this far; you can't dup them.)
+ */
+ if (CDB_LOCKING(dbenv) && flags != DB_POSITIONI) {
+ DB_ASSERT(!F_ISSET(dbc_orig, DBC_WRITER | DBC_WRITECURSOR));
+
+ if ((ret = dbenv->lock_get(dbenv, dbc_n->locker, 0,
+ &dbc_n->lock_dbt, DB_LOCK_READ, &dbc_n->mylock)) != 0) {
+ (void)__db_c_close(dbc_n);
+ return (ret);
+ }
+ }
+
+ /*
+ * If the cursor references an off-page duplicate tree, allocate a
+ * new cursor for that tree and initialize it.
+ */
+ if (dbc_orig->internal->opd != NULL) {
+ if ((ret =
+ __db_c_idup(dbc_orig->internal->opd, &dbc_nopd, flags)) != 0)
+ goto err;
+ dbc_n->internal->opd = dbc_nopd;
+ }
+
+ /* Copy the dirty read flag to the new cursor. */
+ F_SET(dbc_n, F_ISSET(dbc_orig, DBC_DIRTY_READ));
+ return (0);
+
+err: if (dbc_n != NULL)
+ (void)dbc_n->c_close(dbc_n);
+ if (dbc_nopd != NULL)
+ (void)dbc_nopd->c_close(dbc_nopd);
+
+ return (ret);
+}
+
+/*
+ * __db_c_idup --
+ * Internal version of __db_c_dup.
+ *
+ * PUBLIC: int __db_c_idup __P((DBC *, DBC **, u_int32_t));
+ */
+int
+__db_c_idup(dbc_orig, dbcp, flags)
+ DBC *dbc_orig, **dbcp;
+ u_int32_t flags;
+{
+ DB *dbp;
+ DBC *dbc_n;
+ DBC_INTERNAL *int_n, *int_orig;
+ int ret;
+
+ dbp = dbc_orig->dbp;
+ dbc_n = *dbcp;
+
+ if ((ret = __db_icursor(dbp, dbc_orig->txn, dbc_orig->dbtype,
+ dbc_orig->internal->root, F_ISSET(dbc_orig, DBC_OPD),
+ dbc_orig->locker, &dbc_n)) != 0)
+ return (ret);
+
+ /* If the user wants the cursor positioned, do it here. */
+ if (flags == DB_POSITION || flags == DB_POSITIONI) {
+ int_n = dbc_n->internal;
+ int_orig = dbc_orig->internal;
+
+ dbc_n->flags |= dbc_orig->flags & ~DBC_OWN_LID;
+
+ int_n->indx = int_orig->indx;
+ int_n->pgno = int_orig->pgno;
+ int_n->root = int_orig->root;
+ int_n->lock_mode = int_orig->lock_mode;
+
+ switch (dbc_orig->dbtype) {
+ case DB_QUEUE:
+ if ((ret = __qam_c_dup(dbc_orig, dbc_n)) != 0)
+ goto err;
+ break;
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = __bam_c_dup(dbc_orig, dbc_n)) != 0)
+ goto err;
+ break;
+ case DB_HASH:
+ if ((ret = __ham_c_dup(dbc_orig, dbc_n)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_type(dbp->dbenv,
+ "__db_c_idup", dbc_orig->dbtype);
+ goto err;
+ }
+ }
+
+ /* Now take care of duping the CDB information. */
+ CDB_LOCKING_COPY(dbp, dbc_orig, dbc_n);
+
+ /* Copy the dirty read flag to the new cursor. */
+ F_SET(dbc_n, F_ISSET(dbc_orig, DBC_DIRTY_READ));
+
+ *dbcp = dbc_n;
+ return (0);
+
+err: (void)dbc_n->c_close(dbc_n);
+ return (ret);
+}
+
+/*
+ * __db_c_newopd --
+ * Create a new off-page duplicate cursor.
+ *
+ * PUBLIC: int __db_c_newopd __P((DBC *, db_pgno_t, DBC *, DBC **));
+ */
+int
+__db_c_newopd(dbc_parent, root, oldopd, dbcp)
+ DBC *dbc_parent;
+ db_pgno_t root;
+ DBC *oldopd;
+ DBC **dbcp;
+{
+ DB *dbp;
+ DBC *opd;
+ DBTYPE dbtype;
+ int ret;
+
+ dbp = dbc_parent->dbp;
+ dbtype = (dbp->dup_compare == NULL) ? DB_RECNO : DB_BTREE;
+
+ /*
+ * On failure, we want to default to returning the old off-page dup
+ * cursor, if any; our caller can't be left with a dangling pointer
+ * to a freed cursor. On error the only allowable behavior is to
+ * close the cursor (and the old OPD cursor it in turn points to), so
+ * this should be safe.
+ */
+ *dbcp = oldopd;
+
+ if ((ret = __db_icursor(dbp,
+ dbc_parent->txn, dbtype, root, 1, dbc_parent->locker, &opd)) != 0)
+ return (ret);
+
+ /* !!!
+ * If the parent is a DBC_WRITER, this won't copy anything. That's
+ * not actually a problem--we only need lock information in an
+ * off-page dup cursor in order to upgrade at cursor close time
+ * if we've done a delete, but WRITERs don't need to upgrade.
+ */
+ CDB_LOCKING_COPY(dbp, dbc_parent, opd);
+
+ *dbcp = opd;
+
+ /*
+ * Check to see if we already have an off-page dup cursor that we've
+ * passed in. If we do, close it. It'd be nice to use it again
+ * if it's a cursor belonging to the right tree, but if we're doing
+ * a cursor-relative operation this might not be safe, so for now
+ * we'll take the easy way out and always close and reopen.
+ *
+ * Note that under no circumstances do we want to close the old
+ * cursor without returning a valid new one; we don't want to
+ * leave the main cursor in our caller with a non-NULL pointer
+ * to a freed off-page dup cursor.
+ */
+ if (oldopd != NULL && (ret = oldopd->c_close(oldopd)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __db_c_get --
+ * Get using a cursor.
+ *
+ * PUBLIC: int __db_c_get __P((DBC *, DBT *, DBT *, u_int32_t));
+ */
+int
+__db_c_get(dbc_arg, key, data, flags)
+ DBC *dbc_arg;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DB *dbp;
+ DBC *dbc, *dbc_n, *opd;
+ DBC_INTERNAL *cp, *cp_n;
+ DB_MPOOLFILE *mpf;
+ db_pgno_t pgno;
+ u_int32_t multi, tmp_dirty, tmp_flags, tmp_rmw;
+ u_int8_t type;
+ int ret, t_ret;
+
+ /*
+ * Cursor Cleanup Note:
+ * All of the cursors passed to the underlying access methods by this
+ * routine are duplicated cursors. On return, any referenced pages
+ * will be discarded, and, if the cursor is not intended to be used
+ * again, the close function will be called. So, pages/locks that
+ * the cursor references do not need to be resolved by the underlying
+ * functions.
+ */
+ dbp = dbc_arg->dbp;
+ mpf = dbp->mpf;
+ dbc_n = NULL;
+ opd = NULL;
+
+ PANIC_CHECK(dbp->dbenv);
+
+ /* Check for invalid flags. */
+ if ((ret =
+ __db_cgetchk(dbp, key, data, flags, IS_INITIALIZED(dbc_arg))) != 0)
+ return (ret);
+
+ /* Clear OR'd in additional bits so we can check for flag equality. */
+ tmp_rmw = LF_ISSET(DB_RMW);
+ LF_CLR(DB_RMW);
+
+ tmp_dirty = LF_ISSET(DB_DIRTY_READ);
+ LF_CLR(DB_DIRTY_READ);
+
+ multi = LF_ISSET(DB_MULTIPLE|DB_MULTIPLE_KEY);
+ LF_CLR(DB_MULTIPLE|DB_MULTIPLE_KEY);
+
+ DEBUG_LREAD(dbc_arg, dbc_arg->txn, "db_c_get",
+ flags == DB_SET || flags == DB_SET_RANGE ? key : NULL, NULL, flags);
+
+ /*
+ * Return a cursor's record number. It has nothing to do with the
+ * cursor get code except that it was put into the interface.
+ */
+ if (flags == DB_GET_RECNO) {
+ if (tmp_rmw)
+ F_SET(dbc_arg, DBC_RMW);
+ if (tmp_dirty)
+ F_SET(dbc_arg, DBC_DIRTY_READ);
+ ret = __bam_c_rget(dbc_arg, data);
+ if (tmp_rmw)
+ F_CLR(dbc_arg, DBC_RMW);
+ if (tmp_dirty)
+ F_CLR(dbc_arg, DBC_DIRTY_READ);
+ return (ret);
+ }
+
+ if (flags == DB_CONSUME || flags == DB_CONSUME_WAIT)
+ CDB_LOCKING_INIT(dbp, dbc_arg);
+
+ /*
+ * If we have an off-page duplicates cursor, and the operation applies
+ * to it, perform the operation. Duplicate the cursor and call the
+ * underlying function.
+ *
+ * Off-page duplicate trees are locked in the primary tree, that is,
+ * we acquire a write lock in the primary tree and no locks in the
+ * off-page dup tree. If the DB_RMW flag was specified and the get
+ * operation is done in an off-page duplicate tree, call the primary
+ * cursor's upgrade routine first.
+ */
+ cp = dbc_arg->internal;
+ if (cp->opd != NULL &&
+ (flags == DB_CURRENT || flags == DB_GET_BOTHC ||
+ flags == DB_NEXT || flags == DB_NEXT_DUP || flags == DB_PREV)) {
+ if (tmp_rmw && (ret = dbc_arg->c_am_writelock(dbc_arg)) != 0)
+ return (ret);
+ if ((ret = __db_c_idup(cp->opd, &opd, DB_POSITIONI)) != 0)
+ return (ret);
+
+ switch (ret =
+ opd->c_am_get(opd, key, data, flags, NULL)) {
+ case 0:
+ goto done;
+ case DB_NOTFOUND:
+ /*
+ * Translate DB_NOTFOUND failures for the DB_NEXT and
+ * DB_PREV operations into a subsequent operation on
+ * the parent cursor.
+ */
+ if (flags == DB_NEXT || flags == DB_PREV) {
+ if ((ret = opd->c_close(opd)) != 0)
+ goto err;
+ opd = NULL;
+ break;
+ }
+ goto err;
+ default:
+ goto err;
+ }
+ }
+
+ /*
+ * Perform an operation on the main cursor. Duplicate the cursor,
+ * upgrade the lock as required, and call the underlying function.
+ */
+ switch (flags) {
+ case DB_CURRENT:
+ case DB_GET_BOTHC:
+ case DB_NEXT:
+ case DB_NEXT_DUP:
+ case DB_NEXT_NODUP:
+ case DB_PREV:
+ case DB_PREV_NODUP:
+ tmp_flags = DB_POSITIONI;
+ break;
+ default:
+ tmp_flags = 0;
+ break;
+ }
+
+ if (tmp_dirty)
+ F_SET(dbc_arg, DBC_DIRTY_READ);
+
+ /*
+ * If this cursor is going to be closed immediately, we don't
+ * need to take precautions to clean it up on error.
+ */
+ if (F_ISSET(dbc_arg, DBC_TRANSIENT))
+ dbc_n = dbc_arg;
+ else {
+ ret = __db_c_idup(dbc_arg, &dbc_n, tmp_flags);
+ if (tmp_dirty)
+ F_CLR(dbc_arg, DBC_DIRTY_READ);
+
+ if (ret != 0)
+ goto err;
+ COPY_RET_MEM(dbc_arg, dbc_n);
+ }
+
+ if (tmp_rmw)
+ F_SET(dbc_n, DBC_RMW);
+
+ switch (multi) {
+ case DB_MULTIPLE:
+ F_SET(dbc_n, DBC_MULTIPLE);
+ break;
+ case DB_MULTIPLE_KEY:
+ F_SET(dbc_n, DBC_MULTIPLE_KEY);
+ break;
+ case DB_MULTIPLE | DB_MULTIPLE_KEY:
+ F_SET(dbc_n, DBC_MULTIPLE|DBC_MULTIPLE_KEY);
+ break;
+ case 0:
+ break;
+ }
+
+ pgno = PGNO_INVALID;
+ ret = dbc_n->c_am_get(dbc_n, key, data, flags, &pgno);
+ if (tmp_rmw)
+ F_CLR(dbc_n, DBC_RMW);
+ if (tmp_dirty)
+ F_CLR(dbc_arg, DBC_DIRTY_READ);
+ F_CLR(dbc_n, DBC_MULTIPLE|DBC_MULTIPLE_KEY);
+ if (ret != 0)
+ goto err;
+
+ cp_n = dbc_n->internal;
+
+ /*
+ * We may be referencing a new off-page duplicates tree. Acquire
+ * a new cursor and call the underlying function.
+ */
+ if (pgno != PGNO_INVALID) {
+ if ((ret = __db_c_newopd(dbc_arg,
+ pgno, cp_n->opd, &cp_n->opd)) != 0)
+ goto err;
+
+ switch (flags) {
+ case DB_FIRST:
+ case DB_NEXT:
+ case DB_NEXT_NODUP:
+ case DB_SET:
+ case DB_SET_RECNO:
+ case DB_SET_RANGE:
+ tmp_flags = DB_FIRST;
+ break;
+ case DB_LAST:
+ case DB_PREV:
+ case DB_PREV_NODUP:
+ tmp_flags = DB_LAST;
+ break;
+ case DB_GET_BOTH:
+ case DB_GET_BOTHC:
+ case DB_GET_BOTH_RANGE:
+ tmp_flags = flags;
+ break;
+ default:
+ ret =
+ __db_unknown_flag(dbp->dbenv, "__db_c_get", flags);
+ goto err;
+ }
+ if ((ret = cp_n->opd->c_am_get(
+ cp_n->opd, key, data, tmp_flags, NULL)) != 0)
+ goto err;
+ }
+
+done: /*
+ * Return a key/data item. The only exception is that we don't return
+ * a key if the user already gave us one, that is, if the DB_SET flag
+ * was set. The DB_SET flag is necessary. In a Btree, the user's key
+ * doesn't have to be the same as the key stored the tree, depending on
+ * the magic performed by the comparison function. As we may not have
+ * done any key-oriented operation here, the page reference may not be
+ * valid. Fill it in as necessary. We don't have to worry about any
+ * locks, the cursor must already be holding appropriate locks.
+ *
+ * XXX
+ * If not a Btree and DB_SET_RANGE is set, we shouldn't return a key
+ * either, should we?
+ */
+ cp_n = dbc_n == NULL ? dbc_arg->internal : dbc_n->internal;
+ if (!F_ISSET(key, DB_DBT_ISSET)) {
+ if (cp_n->page == NULL && (ret =
+ mpf->get(mpf, &cp_n->pgno, 0, &cp_n->page)) != 0)
+ goto err;
+
+ if ((ret = __db_ret(dbp, cp_n->page, cp_n->indx,
+ key, &dbc_arg->rkey->data, &dbc_arg->rkey->ulen)) != 0)
+ goto err;
+ }
+ if (multi != 0) {
+ /*
+ * Even if fetching from the OPD cursor we need a duplicate
+ * primary cursor if we are going after multiple keys.
+ */
+ if (dbc_n == NULL) {
+ /*
+ * Non-"_KEY" DB_MULTIPLE doesn't move the main cursor,
+ * so it's safe to just use dbc_arg, unless dbc_arg
+ * has an open OPD cursor whose state might need to
+ * be preserved.
+ */
+ if ((!(multi & DB_MULTIPLE_KEY) &&
+ dbc_arg->internal->opd == NULL) ||
+ F_ISSET(dbc_arg, DBC_TRANSIENT))
+ dbc_n = dbc_arg;
+ else {
+ if ((ret = __db_c_idup(dbc_arg,
+ &dbc_n, DB_POSITIONI)) != 0)
+ goto err;
+ if ((ret = dbc_n->c_am_get(dbc_n,
+ key, data, DB_CURRENT, &pgno)) != 0)
+ goto err;
+ }
+ cp_n = dbc_n->internal;
+ }
+
+ /*
+ * If opd is set then we dupped the opd that we came in with.
+ * When we return we may have a new opd if we went to another
+ * key.
+ */
+ if (opd != NULL) {
+ DB_ASSERT(cp_n->opd == NULL);
+ cp_n->opd = opd;
+ opd = NULL;
+ }
+
+ /*
+ * Bulk get doesn't use __db_retcopy, so data.size won't
+ * get set up unless there is an error. Assume success
+ * here. This is the only call to c_am_bulk, and it avoids
+ * setting it exactly the same everywhere. If we have an
+ * ENOMEM error, it'll get overwritten with the needed value.
+ */
+ data->size = data->ulen;
+ ret = dbc_n->c_am_bulk(dbc_n, data, flags | multi);
+ } else if (!F_ISSET(data, DB_DBT_ISSET)) {
+ dbc = opd != NULL ? opd : cp_n->opd != NULL ? cp_n->opd : dbc_n;
+ type = TYPE(dbc->internal->page);
+ ret = __db_ret(dbp, dbc->internal->page, dbc->internal->indx +
+ (type == P_LBTREE || type == P_HASH ? O_INDX : 0),
+ data, &dbc_arg->rdata->data, &dbc_arg->rdata->ulen);
+ }
+
+err: /* Don't pass DB_DBT_ISSET back to application level, error or no. */
+ F_CLR(key, DB_DBT_ISSET);
+ F_CLR(data, DB_DBT_ISSET);
+
+ /* Cleanup and cursor resolution. */
+ if (opd != NULL) {
+ if ((t_ret = __db_c_cleanup(
+ dbc_arg->internal->opd, opd, ret)) != 0 && ret == 0)
+ ret = t_ret;
+
+ }
+
+ if ((t_ret = __db_c_cleanup(dbc_arg, dbc_n, ret)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (flags == DB_CONSUME || flags == DB_CONSUME_WAIT)
+ CDB_LOCKING_DONE(dbp, dbc_arg);
+ return (ret);
+}
+
+/*
+ * __db_c_put --
+ * Put using a cursor.
+ *
+ * PUBLIC: int __db_c_put __P((DBC *, DBT *, DBT *, u_int32_t));
+ */
+int
+__db_c_put(dbc_arg, key, data, flags)
+ DBC *dbc_arg;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DB *dbp, *sdbp;
+ DBC *dbc_n, *oldopd, *opd, *sdbc, *pdbc;
+ DBT olddata, oldpkey, oldskey, newdata, pkey, save_skey, skey, temp;
+ db_pgno_t pgno;
+ int cmp, have_oldrec, ispartial, nodel, re_pad, ret, rmw, t_ret;
+ u_int32_t re_len, size, tmp_flags;
+
+ /*
+ * Cursor Cleanup Note:
+ * All of the cursors passed to the underlying access methods by this
+ * routine are duplicated cursors. On return, any referenced pages
+ * will be discarded, and, if the cursor is not intended to be used
+ * again, the close function will be called. So, pages/locks that
+ * the cursor references do not need to be resolved by the underlying
+ * functions.
+ */
+ dbp = dbc_arg->dbp;
+ sdbp = NULL;
+ pdbc = dbc_n = NULL;
+ memset(&newdata, 0, sizeof(DBT));
+
+ PANIC_CHECK(dbp->dbenv);
+
+ /* Check for invalid flags. */
+ if ((ret = __db_cputchk(dbp,
+ key, data, flags, IS_INITIALIZED(dbc_arg))) != 0)
+ return (ret);
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, dbc_arg->txn, dbc_arg->locker, 0)) != 0)
+ return (ret);
+
+ /*
+ * Putting to secondary indices is forbidden; when we need
+ * to internally update one, we'll call this with a private
+ * synonym for DB_KEYLAST, DB_UPDATE_SECONDARY, which does
+ * the right thing but won't return an error from cputchk().
+ */
+ if (flags == DB_UPDATE_SECONDARY)
+ flags = DB_KEYLAST;
+
+ DEBUG_LWRITE(dbc_arg, dbc_arg->txn, "db_c_put",
+ flags == DB_KEYFIRST || flags == DB_KEYLAST ||
+ flags == DB_NODUPDATA ? key : NULL, data, flags);
+
+ CDB_LOCKING_INIT(dbp, dbc_arg);
+
+ /*
+ * Check to see if we are a primary and have secondary indices.
+ * If we are not, we save ourselves a good bit of trouble and
+ * just skip to the "normal" put.
+ */
+ if (LIST_FIRST(&dbp->s_secondaries) == NULL)
+ goto skip_s_update;
+
+ /*
+ * We have at least one secondary which we may need to update.
+ *
+ * There is a rather vile locking issue here. Secondary gets
+ * will always involve acquiring a read lock in the secondary,
+ * then acquiring a read lock in the primary. Ideally, we
+ * would likewise perform puts by updating all the secondaries
+ * first, then doing the actual put in the primary, to avoid
+ * deadlock (since having multiple threads doing secondary
+ * gets and puts simultaneously is probably a common case).
+ *
+ * However, if this put is a put-overwrite--and we have no way to
+ * tell in advance whether it will be--we may need to delete
+ * an outdated secondary key. In order to find that old
+ * secondary key, we need to get the record we're overwriting,
+ * before we overwrite it.
+ *
+ * (XXX: It would be nice to avoid this extra get, and have the
+ * underlying put routines somehow pass us the old record
+ * since they need to traverse the tree anyway. I'm saving
+ * this optimization for later, as it's a lot of work, and it
+ * would be hard to fit into this locking paradigm anyway.)
+ *
+ * The simple thing to do would be to go get the old record before
+ * we do anything else. Unfortunately, though, doing so would
+ * violate our "secondary, then primary" lock acquisition
+ * ordering--even in the common case where no old primary record
+ * exists, we'll still acquire and keep a lock on the page where
+ * we're about to do the primary insert.
+ *
+ * To get around this, we do the following gyrations, which
+ * hopefully solve this problem in the common case:
+ *
+ * 1) If this is a c_put(DB_CURRENT), go ahead and get the
+ * old record. We already hold the lock on this page in
+ * the primary, so no harm done, and we'll need the primary
+ * key (which we weren't passed in this case) to do any
+ * secondary puts anyway.
+ *
+ * 2) If we're doing a partial put, we need to perform the
+ * get on the primary key right away, since we don't have
+ * the whole datum that the secondary key is based on.
+ * We may also need to pad out the record if the primary
+ * has a fixed record length.
+ *
+ * 3) Loop through the secondary indices, putting into each a
+ * new secondary key that corresponds to the new record.
+ *
+ * 4) If we haven't done so in (1) or (2), get the old primary
+ * key/data pair. If one does not exist--the common case--we're
+ * done with secondary indices, and can go straight on to the
+ * primary put.
+ *
+ * 5) If we do have an old primary key/data pair, however, we need
+ * to loop through all the secondaries a second time and delete
+ * the old secondary in each.
+ */
+ memset(&pkey, 0, sizeof(DBT));
+ memset(&olddata, 0, sizeof(DBT));
+ have_oldrec = nodel = 0;
+
+ /*
+ * Primary indices can't have duplicates, so only DB_CURRENT,
+ * DB_KEYFIRST, and DB_KEYLAST make any sense. Other flags
+ * should have been caught by the checking routine, but
+ * add a sprinkling of paranoia.
+ */
+ DB_ASSERT(flags == DB_CURRENT ||
+ flags == DB_KEYFIRST || flags == DB_KEYLAST);
+
+ /*
+ * We'll want to use DB_RMW in a few places, but it's only legal
+ * when locking is on.
+ */
+ rmw = STD_LOCKING(dbc_arg) ? DB_RMW : 0;
+
+ if (flags == DB_CURRENT) { /* Step 1. */
+ /*
+ * This is safe to do on the cursor we already have;
+ * error or no, it won't move.
+ *
+ * We use DB_RMW for all of these gets because we'll be
+ * writing soon enough in the "normal" put code. In
+ * transactional databases we'll hold those write locks
+ * even if we close the cursor we're reading with.
+ */
+ ret = dbc_arg->c_get(dbc_arg,
+ &pkey, &olddata, rmw | DB_CURRENT);
+ if (ret == DB_KEYEMPTY) {
+ nodel = 1; /*
+ * We know we don't need a delete
+ * in the secondary.
+ */
+ have_oldrec = 1; /* We've looked for the old record. */
+ } else if (ret != 0)
+ goto err;
+ else
+ have_oldrec = 1;
+
+ } else {
+ /* So we can just use &pkey everywhere instead of key. */
+ pkey.data = key->data;
+ pkey.size = key->size;
+ }
+
+ /*
+ * Check for partial puts (step 2).
+ */
+ if (F_ISSET(data, DB_DBT_PARTIAL)) {
+ if (!have_oldrec && !nodel) {
+ /*
+ * We're going to have to search the tree for the
+ * specified key. Dup a cursor (so we have the same
+ * locking info) and do a c_get.
+ */
+ if ((ret = __db_c_idup(dbc_arg, &pdbc, 0)) != 0)
+ goto err;
+
+ /* We should have gotten DB_CURRENT in step 1. */
+ DB_ASSERT(flags != DB_CURRENT);
+
+ ret = pdbc->c_get(pdbc,
+ &pkey, &olddata, rmw | DB_SET);
+ if (ret == DB_KEYEMPTY || ret == DB_NOTFOUND) {
+ nodel = 1;
+ ret = 0;
+ }
+ if ((t_ret = pdbc->c_close(pdbc)) != 0)
+ ret = t_ret;
+ if (ret != 0)
+ goto err;
+
+ have_oldrec = 1;
+ }
+
+ /*
+ * Now build the new datum from olddata and the partial
+ * data we were given.
+ */
+ if ((ret =
+ __db_buildpartial(dbp, &olddata, data, &newdata)) != 0)
+ goto err;
+ ispartial = 1;
+ } else
+ ispartial = 0;
+
+ /*
+ * Handle fixed-length records. If the primary database has
+ * fixed-length records, we need to pad out the datum before
+ * we pass it into the callback function; we always index the
+ * "real" record.
+ */
+ if ((dbp->type == DB_RECNO && F_ISSET(dbp, DB_AM_FIXEDLEN)) ||
+ (dbp->type == DB_QUEUE)) {
+ if (dbp->type == DB_QUEUE) {
+ re_len = ((QUEUE *)dbp->q_internal)->re_len;
+ re_pad = ((QUEUE *)dbp->q_internal)->re_pad;
+ } else {
+ re_len = ((BTREE *)dbp->bt_internal)->re_len;
+ re_pad = ((BTREE *)dbp->bt_internal)->re_pad;
+ }
+
+ size = ispartial ? newdata.size : data->size;
+ if (size > re_len) {
+ __db_err(dbp->dbenv,
+ "Length improper for fixed length record %lu",
+ (u_long)size);
+ ret = EINVAL;
+ goto err;
+ } else if (size < re_len) {
+ /*
+ * If we're not doing a partial put, copy
+ * data->data into newdata.data, then pad out
+ * newdata.data.
+ *
+ * If we're doing a partial put, the data
+ * we want are already in newdata.data; we
+ * just need to pad.
+ *
+ * Either way, realloc is safe.
+ */
+ if ((ret = __os_realloc(dbp->dbenv, re_len,
+ &newdata.data)) != 0)
+ goto err;
+ if (!ispartial)
+ memcpy(newdata.data, data->data, size);
+ memset((u_int8_t *)newdata.data + size, re_pad,
+ re_len - size);
+ newdata.size = re_len;
+ ispartial = 1;
+ }
+ }
+
+ /*
+ * Loop through the secondaries. (Step 3.)
+ *
+ * Note that __db_s_first and __db_s_next will take care of
+ * thread-locking and refcounting issues.
+ */
+ for (sdbp = __db_s_first(dbp);
+ sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) {
+ /*
+ * Call the callback for this secondary, to get the
+ * appropriate secondary key.
+ */
+ memset(&skey, 0, sizeof(DBT));
+ if ((ret = sdbp->s_callback(sdbp,
+ &pkey, ispartial ? &newdata : data, &skey)) != 0) {
+ if (ret == DB_DONOTINDEX)
+ /*
+ * The callback returned a null value--don't
+ * put this key in the secondary. Just
+ * move on to the next one--we'll handle
+ * any necessary deletes in step 5.
+ */
+ continue;
+ else
+ goto err;
+ }
+
+ /*
+ * Save the DBT we just got back from the callback function
+ * off; we want to pass its value into c_get functions
+ * that may stomp on a buffer the callback function
+ * allocated.
+ */
+ memset(&save_skey, 0, sizeof(DBT)); /* Paranoia. */
+ save_skey = skey;
+
+ /*
+ * Open a cursor in this secondary.
+ *
+ * Use the same locker ID as our primary cursor, so that
+ * we're guaranteed that the locks don't conflict (e.g. in CDB
+ * or if we're subdatabases that share and want to lock a
+ * metadata page).
+ */
+ if ((ret = __db_icursor(sdbp, dbc_arg->txn, sdbp->type,
+ PGNO_INVALID, 0, dbc_arg->locker, &sdbc)) != 0)
+ goto err;
+
+ /*
+ * If we're in CDB, updates will fail since the new cursor
+ * isn't a writer. However, we hold the WRITE lock in the
+ * primary and will for as long as our new cursor lasts,
+ * and the primary and secondary share a lock file ID,
+ * so it's safe to consider this a WRITER. The close
+ * routine won't try to put anything because we don't
+ * really have a lock.
+ */
+ if (CDB_LOCKING(sdbp->dbenv)) {
+ DB_ASSERT(sdbc->mylock.off == LOCK_INVALID);
+ F_SET(sdbc, DBC_WRITER);
+ }
+
+ /*
+ * There are three cases here--
+ * 1) The secondary supports sorted duplicates.
+ * If we attempt to put a secondary/primary pair
+ * that already exists, that's a duplicate duplicate,
+ * and c_put will return DB_KEYEXIST (see __db_duperr).
+ * This will leave us with exactly one copy of the
+ * secondary/primary pair, and this is just right--we'll
+ * avoid deleting it later, as the old and new secondaries
+ * will match (since the old secondary is the dup dup
+ * that's already there).
+ * 2) The secondary supports duplicates, but they're not
+ * sorted. We need to avoid putting a duplicate
+ * duplicate, because the matching old and new secondaries
+ * will prevent us from deleting anything and we'll
+ * wind up with two secondary records that point to the
+ * same primary key. Do a c_get(DB_GET_BOTH); if
+ * that returns 0, skip the put.
+ * 3) The secondary doesn't support duplicates at all.
+ * In this case, secondary keys must be unique; if
+ * another primary key already exists for this
+ * secondary key, we have to either overwrite it or
+ * not put this one, and in either case we've
+ * corrupted the secondary index. Do a c_get(DB_SET).
+ * If the secondary/primary pair already exists, do
+ * nothing; if the secondary exists with a different
+ * primary, return an error; and if the secondary
+ * does not exist, put it.
+ */
+ if (!F_ISSET(sdbp, DB_AM_DUP)) {
+ /* Case 3. */
+ memset(&oldpkey, 0, sizeof(DBT));
+ F_SET(&oldpkey, DB_DBT_MALLOC);
+ ret = sdbc->c_real_get(sdbc,
+ &skey, &oldpkey, rmw | DB_SET);
+ if (ret == 0) {
+ cmp = __bam_defcmp(sdbp, &oldpkey, &pkey);
+ __os_ufree(sdbp->dbenv, oldpkey.data);
+ if (cmp != 0) {
+ __db_err(sdbp->dbenv, "%s%s",
+ "Put results in a non-unique secondary key in an ",
+ "index not configured to support duplicates");
+ ret = EINVAL;
+ goto skipput;
+ }
+ } else if (ret != DB_NOTFOUND && ret != DB_KEYEMPTY)
+ goto skipput;
+ } else if (!F_ISSET(sdbp, DB_AM_DUPSORT))
+ /* Case 2. */
+ if ((ret = sdbc->c_real_get(sdbc,
+ &skey, &pkey, rmw | DB_GET_BOTH)) == 0)
+ goto skipput;
+
+ ret = sdbc->c_put(sdbc, &skey, &pkey, DB_UPDATE_SECONDARY);
+
+ /*
+ * We don't know yet whether this was a put-overwrite that
+ * in fact changed nothing. If it was, we may get DB_KEYEXIST.
+ * This is not an error.
+ */
+ if (ret == DB_KEYEXIST)
+ ret = 0;
+
+skipput: FREE_IF_NEEDED(sdbp, &save_skey)
+
+ if ((t_ret = sdbc->c_close(sdbc)) != 0)
+ ret = t_ret;
+
+ if (ret != 0)
+ goto err;
+ }
+ if (ret != 0)
+ goto err;
+
+ /* If still necessary, go get the old primary key/data. (Step 4.) */
+ if (!have_oldrec) {
+ /* See the comments in step 2. This is real familiar. */
+ if ((ret = __db_c_idup(dbc_arg, &pdbc, 0)) != 0)
+ goto err;
+ DB_ASSERT(flags != DB_CURRENT);
+ pkey.data = key->data;
+ pkey.size = key->size;
+ ret = pdbc->c_get(pdbc, &pkey, &olddata, rmw | DB_SET);
+ if (ret == DB_KEYEMPTY || ret == DB_NOTFOUND) {
+ nodel = 1;
+ ret = 0;
+ }
+ if ((t_ret = pdbc->c_close(pdbc)) != 0)
+ ret = t_ret;
+ if (ret != 0)
+ goto err;
+ have_oldrec = 1;
+ }
+
+ /*
+ * If we don't follow this goto, we do in fact have an old record
+ * we may need to go delete. (Step 5).
+ */
+ if (nodel)
+ goto skip_s_update;
+
+ for (sdbp = __db_s_first(dbp);
+ sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) {
+ /*
+ * Call the callback for this secondary to get the
+ * old secondary key.
+ */
+ memset(&oldskey, 0, sizeof(DBT));
+ if ((ret = sdbp->s_callback(sdbp,
+ &pkey, &olddata, &oldskey)) != 0) {
+ if (ret == DB_DONOTINDEX)
+ /*
+ * The callback returned a null value--there's
+ * nothing to delete. Go on to the next
+ * secondary.
+ */
+ continue;
+ else
+ goto err;
+ }
+ if ((ret = sdbp->s_callback(sdbp,
+ &pkey, ispartial ? &newdata : data, &skey)) != 0 &&
+ ret != DB_DONOTINDEX)
+ goto err;
+
+ /*
+ * If there is no new secondary key, or if the old secondary
+ * key is different from the new secondary key, then
+ * we need to delete the old one.
+ *
+ * Note that bt_compare is (and must be) set no matter
+ * what access method we're in.
+ */
+ sdbc = NULL;
+ if (ret == DB_DONOTINDEX ||
+ ((BTREE *)sdbp->bt_internal)->bt_compare(sdbp,
+ &oldskey, &skey) != 0) {
+ if ((ret = __db_icursor(sdbp, dbc_arg->txn, sdbp->type,
+ PGNO_INVALID, 0, dbc_arg->locker, &sdbc)) != 0)
+ goto err;
+ if (CDB_LOCKING(sdbp->dbenv)) {
+ DB_ASSERT(sdbc->mylock.off == LOCK_INVALID);
+ F_SET(sdbc, DBC_WRITER);
+ }
+
+ /*
+ * Don't let c_get(DB_GET_BOTH) stomp on
+ * any secondary key value that the callback
+ * function may have allocated. Use a temp
+ * DBT instead.
+ */
+ memset(&temp, 0, sizeof(DBT));
+ temp.data = oldskey.data;
+ temp.size = oldskey.size;
+ if ((ret = sdbc->c_real_get(sdbc,
+ &temp, &pkey, rmw | DB_GET_BOTH)) == 0)
+ ret = sdbc->c_del(sdbc, DB_UPDATE_SECONDARY);
+ }
+
+ FREE_IF_NEEDED(sdbp, &skey);
+ FREE_IF_NEEDED(sdbp, &oldskey);
+ if (sdbc != NULL && (t_ret = sdbc->c_close(sdbc)) != 0)
+ ret = t_ret;
+ if (ret != 0)
+ goto err;
+ }
+
+ /* Secondary index updates are now done. On to the "real" stuff. */
+
+skip_s_update:
+ /*
+ * If we have an off-page duplicates cursor, and the operation applies
+ * to it, perform the operation. Duplicate the cursor and call the
+ * underlying function.
+ *
+ * Off-page duplicate trees are locked in the primary tree, that is,
+ * we acquire a write lock in the primary tree and no locks in the
+ * off-page dup tree. If the put operation is done in an off-page
+ * duplicate tree, call the primary cursor's upgrade routine first.
+ */
+ if (dbc_arg->internal->opd != NULL &&
+ (flags == DB_AFTER || flags == DB_BEFORE || flags == DB_CURRENT)) {
+ /*
+ * A special case for hash off-page duplicates. Hash doesn't
+ * support (and is documented not to support) put operations
+ * relative to a cursor which references an already deleted
+ * item. For consistency, apply the same criteria to off-page
+ * duplicates as well.
+ */
+ if (dbc_arg->dbtype == DB_HASH && F_ISSET(
+ ((BTREE_CURSOR *)(dbc_arg->internal->opd->internal)),
+ C_DELETED)) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+
+ if ((ret = dbc_arg->c_am_writelock(dbc_arg)) != 0)
+ return (ret);
+ if ((ret = __db_c_dup(dbc_arg, &dbc_n, DB_POSITIONI)) != 0)
+ goto err;
+ opd = dbc_n->internal->opd;
+ if ((ret = opd->c_am_put(
+ opd, key, data, flags, NULL)) != 0)
+ goto err;
+ goto done;
+ }
+
+ /*
+ * Perform an operation on the main cursor. Duplicate the cursor,
+ * and call the underlying function.
+ *
+ * XXX: MARGO
+ *
+ tmp_flags = flags == DB_AFTER ||
+ flags == DB_BEFORE || flags == DB_CURRENT ? DB_POSITIONI : 0;
+ */
+ tmp_flags = DB_POSITIONI;
+
+ /*
+ * If this cursor is going to be closed immediately, we don't
+ * need to take precautions to clean it up on error.
+ */
+ if (F_ISSET(dbc_arg, DBC_TRANSIENT))
+ dbc_n = dbc_arg;
+ else if ((ret = __db_c_idup(dbc_arg, &dbc_n, tmp_flags)) != 0)
+ goto err;
+
+ pgno = PGNO_INVALID;
+ if ((ret = dbc_n->c_am_put(dbc_n, key, data, flags, &pgno)) != 0)
+ goto err;
+
+ /*
+ * We may be referencing a new off-page duplicates tree. Acquire
+ * a new cursor and call the underlying function.
+ */
+ if (pgno != PGNO_INVALID) {
+ oldopd = dbc_n->internal->opd;
+ if ((ret = __db_c_newopd(dbc_arg, pgno, oldopd, &opd)) != 0) {
+ dbc_n->internal->opd = opd;
+ goto err;
+ }
+
+ dbc_n->internal->opd = opd;
+
+ if ((ret = opd->c_am_put(
+ opd, key, data, flags, NULL)) != 0)
+ goto err;
+ }
+
+done:
+err: /* Cleanup and cursor resolution. */
+ if ((t_ret = __db_c_cleanup(dbc_arg, dbc_n, ret)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* If newdata was used, free its buffer. */
+ if (newdata.data != NULL)
+ __os_free(dbp->dbenv, newdata.data);
+
+ CDB_LOCKING_DONE(dbp, dbc_arg);
+
+ if (sdbp != NULL && (t_ret = __db_s_done(sdbp)) != 0)
+ return (t_ret);
+
+ return (ret);
+}
+
+/*
+ * __db_duperr()
+ * Error message: we don't currently support sorted duplicate duplicates.
+ * PUBLIC: int __db_duperr __P((DB *, u_int32_t));
+ */
+int
+__db_duperr(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+
+ /*
+ * If we run into this error while updating a secondary index,
+ * don't yell--there's no clean way to pass DB_NODUPDATA in along
+ * with DB_UPDATE_SECONDARY, but we may run into this problem
+ * in a normal, non-error course of events.
+ *
+ * !!!
+ * If and when we ever permit duplicate duplicates in sorted-dup
+ * databases, we need to either change the secondary index code
+ * to check for dup dups, or we need to maintain the implicit
+ * "DB_NODUPDATA" behavior for databases with DB_AM_SECONDARY set.
+ */
+ if (flags != DB_NODUPDATA && !F_ISSET(dbp, DB_AM_SECONDARY))
+ __db_err(dbp->dbenv,
+ "Duplicate data items are not supported with sorted data");
+ return (DB_KEYEXIST);
+}
+
+/*
+ * __db_c_cleanup --
+ * Clean up duplicate cursors.
+ */
+static int
+__db_c_cleanup(dbc, dbc_n, failed)
+ DBC *dbc, *dbc_n;
+ int failed;
+{
+ DB *dbp;
+ DBC *opd;
+ DBC_INTERNAL *internal;
+ DB_MPOOLFILE *mpf;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ internal = dbc->internal;
+ ret = 0;
+
+ /* Discard any pages we're holding. */
+ if (internal->page != NULL) {
+ if ((t_ret = mpf->put(mpf, internal->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ internal->page = NULL;
+ }
+ opd = internal->opd;
+ if (opd != NULL && opd->internal->page != NULL) {
+ if ((t_ret =
+ mpf->put(mpf, opd->internal->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ opd->internal->page = NULL;
+ }
+
+ /*
+ * If dbc_n is NULL, there's no internal cursor swapping to be done
+ * and no dbc_n to close--we probably did the entire operation on an
+ * offpage duplicate cursor. Just return.
+ *
+ * If dbc and dbc_n are the same, we're either inside a DB->{put/get}
+ * operation, and as an optimization we performed the operation on
+ * the main cursor rather than on a duplicated one, or we're in a
+ * bulk get that can't have moved the cursor (DB_MULTIPLE with the
+ * initial c_get operation on an off-page dup cursor). Just
+ * return--either we know we didn't move the cursor, or we're going
+ * to close it before we return to application code, so we're sure
+ * not to visibly violate the "cursor stays put on error" rule.
+ */
+ if (dbc_n == NULL || dbc == dbc_n)
+ return (ret);
+
+ if (dbc_n->internal->page != NULL) {
+ if ((t_ret =
+ mpf->put(mpf, dbc_n->internal->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ dbc_n->internal->page = NULL;
+ }
+ opd = dbc_n->internal->opd;
+ if (opd != NULL && opd->internal->page != NULL) {
+ if ((t_ret =
+ mpf->put(mpf, opd->internal->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ opd->internal->page = NULL;
+ }
+
+ /*
+ * If we didn't fail before entering this routine or just now when
+ * freeing pages, swap the interesting contents of the old and new
+ * cursors.
+ */
+ if (!failed && ret == 0) {
+ dbc->internal = dbc_n->internal;
+ dbc_n->internal = internal;
+ }
+
+ /*
+ * Close the cursor we don't care about anymore. The close can fail,
+ * but we only expect DB_LOCK_DEADLOCK failures. This violates our
+ * "the cursor is unchanged on error" semantics, but since all you can
+ * do with a DB_LOCK_DEADLOCK failure is close the cursor, I believe
+ * that's OK.
+ *
+ * XXX
+ * There's no way to recover from failure to close the old cursor.
+ * All we can do is move to the new position and return an error.
+ *
+ * XXX
+ * We might want to consider adding a flag to the cursor, so that any
+ * subsequent operations other than close just return an error?
+ */
+ if ((t_ret = dbc_n->c_close(dbc_n)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_c_secondary_get --
+ * This wrapper function for DBC->c_pget() is the DBC->c_get() function
+ * for a secondary index cursor.
+ *
+ * PUBLIC: int __db_c_secondary_get __P((DBC *, DBT *, DBT *, u_int32_t));
+ */
+int
+__db_c_secondary_get(dbc, skey, data, flags)
+ DBC *dbc;
+ DBT *skey, *data;
+ u_int32_t flags;
+{
+
+ DB_ASSERT(F_ISSET(dbc->dbp, DB_AM_SECONDARY));
+ return (dbc->c_pget(dbc, skey, NULL, data, flags));
+}
+
+/*
+ * __db_c_pget --
+ * Get a primary key/data pair through a secondary index.
+ *
+ * PUBLIC: int __db_c_pget __P((DBC *, DBT *, DBT *, DBT *, u_int32_t));
+ */
+int
+__db_c_pget(dbc, skey, pkey, data, flags)
+ DBC *dbc;
+ DBT *skey, *pkey, *data;
+ u_int32_t flags;
+{
+ DB *pdbp, *sdbp;
+ DBC *pdbc;
+ DBT *save_rdata, nullpkey;
+ int pkeymalloc, ret, save_pkey_flags, t_ret;
+
+ sdbp = dbc->dbp;
+ pdbp = sdbp->s_primary;
+ pkeymalloc = t_ret = 0;
+
+ PANIC_CHECK(sdbp->dbenv);
+ if ((ret = __db_cpgetchk(sdbp,
+ skey, pkey, data, flags, IS_INITIALIZED(dbc))) != 0)
+ return (ret);
+
+ /*
+ * The challenging part of this function is getting the behavior
+ * right for all the various permutations of DBT flags. The
+ * next several blocks handle the various cases we need to
+ * deal with specially.
+ */
+
+ /*
+ * We may be called with a NULL pkey argument, if we've been
+ * wrapped by a 2-DBT get call. If so, we need to use our
+ * own DBT.
+ */
+ if (pkey == NULL) {
+ memset(&nullpkey, 0, sizeof(DBT));
+ pkey = &nullpkey;
+ }
+
+ /*
+ * DB_GET_RECNO is a special case, because we're interested not in
+ * the primary key/data pair, but rather in the primary's record
+ * number.
+ */
+ if ((flags & DB_OPFLAGS_MASK) == DB_GET_RECNO)
+ return (__db_c_pget_recno(dbc, pkey, data, flags));
+
+ /*
+ * If the DBTs we've been passed don't have any of the
+ * user-specified memory management flags set, we want to make sure
+ * we return values using the DBTs dbc->rskey, dbc->rkey, and
+ * dbc->rdata, respectively.
+ *
+ * There are two tricky aspects to this: first, we need to pass
+ * skey and pkey *in* to the initial c_get on the secondary key,
+ * since either or both may be looked at by it (depending on the
+ * get flag). Second, we must not use a normal DB->get call
+ * on the secondary, even though that's what we want to accomplish,
+ * because the DB handle may be free-threaded. Instead,
+ * we open a cursor, then take steps to ensure that we actually use
+ * the rkey/rdata from the *secondary* cursor.
+ *
+ * We accomplish all this by passing in the DBTs we started out
+ * with to the c_get, but having swapped the contents of rskey and
+ * rkey, respectively, into rkey and rdata; __db_ret will treat
+ * them like the normal key/data pair in a c_get call, and will
+ * realloc them as need be (this is "step 1"). Then, for "step 2",
+ * we swap back rskey/rkey/rdata to normal, and do a get on the primary
+ * with the secondary dbc appointed as the owner of the returned-data
+ * memory.
+ *
+ * Note that in step 2, we copy the flags field in case we need to
+ * pass down a DB_DBT_PARTIAL or other flag that is compatible with
+ * letting DB do the memory management.
+ */
+ /* Step 1. */
+ save_rdata = dbc->rdata;
+ dbc->rdata = dbc->rkey;
+ dbc->rkey = dbc->rskey;
+
+ /*
+ * It is correct, though slightly sick, to attempt a partial get
+ * of a primary key. However, if we do so here, we'll never find the
+ * primary record; clear the DB_DBT_PARTIAL field of pkey just
+ * for the duration of the next call.
+ */
+ save_pkey_flags = pkey->flags;
+ F_CLR(pkey, DB_DBT_PARTIAL);
+
+ /*
+ * Now we can go ahead with the meat of this call. First, get the
+ * primary key from the secondary index. (What exactly we get depends
+ * on the flags, but the underlying cursor get will take care of the
+ * dirty work.)
+ */
+ if ((ret = dbc->c_real_get(dbc, skey, pkey, flags)) != 0) {
+ /* Restore rskey/rkey/rdata and return. */
+ pkey->flags = save_pkey_flags;
+ dbc->rskey = dbc->rkey;
+ dbc->rkey = dbc->rdata;
+ dbc->rdata = save_rdata;
+ goto err;
+ }
+
+ /* Restore pkey's flags in case we stomped the PARTIAL flag. */
+ pkey->flags = save_pkey_flags;
+
+ /*
+ * Restore the cursor's rskey, rkey, and rdata DBTs. If DB
+ * is handling the memory management, we now have newly
+ * reallocated buffers and ulens in rkey and rdata which we want
+ * to put in rskey and rkey. save_rdata contains the old value
+ * of dbc->rdata.
+ */
+ dbc->rskey = dbc->rkey;
+ dbc->rkey = dbc->rdata;
+ dbc->rdata = save_rdata;
+
+ /*
+ * Now we're ready for "step 2". If either or both of pkey and
+ * data do not have memory management flags set--that is, if DB is
+ * managing their memory--we need to swap around the rkey/rdata
+ * structures so that we don't wind up trying to use memory managed
+ * by the primary database cursor, which we'll close before we return.
+ *
+ * !!!
+ * If you're carefully following the bouncing ball, you'll note
+ * that in the DB-managed case, the buffer hanging off of pkey is
+ * the same as dbc->rkey->data. This is just fine; we may well
+ * realloc and stomp on it when we return, if we're going a
+ * DB_GET_BOTH and need to return a different partial or key
+ * (depending on the comparison function), but this is safe.
+ *
+ * !!!
+ * We need to use __db_icursor here rather than simply calling
+ * pdbp->cursor, because otherwise, if we're in CDB, we'll
+ * allocate a new locker ID and leave ourselves open to deadlocks.
+ * (Even though we're only acquiring read locks, we'll still block
+ * if there are any waiters.)
+ */
+ if ((ret = __db_icursor(pdbp,
+ dbc->txn, pdbp->type, PGNO_INVALID, 0, dbc->locker, &pdbc)) != 0)
+ goto err;
+
+ /*
+ * We're about to use pkey a second time. If DB_DBT_MALLOC
+ * is set on it, we'll leak the memory we allocated the first time.
+ * Thus, set DB_DBT_REALLOC instead so that we reuse that memory
+ * instead of leaking it.
+ *
+ * !!!
+ * This assumes that the user must always specify a compatible
+ * realloc function if a malloc function is specified. I think
+ * this is a reasonable requirement.
+ */
+ if (F_ISSET(pkey, DB_DBT_MALLOC)) {
+ F_CLR(pkey, DB_DBT_MALLOC);
+ F_SET(pkey, DB_DBT_REALLOC);
+ pkeymalloc = 1;
+ }
+
+ /*
+ * Do the actual get. Set DBC_TRANSIENT since we don't care
+ * about preserving the position on error, and it's faster.
+ * SET_RET_MEM so that the secondary DBC owns any returned-data
+ * memory.
+ */
+ F_SET(pdbc, DBC_TRANSIENT);
+ SET_RET_MEM(pdbc, dbc);
+ ret = pdbc->c_get(pdbc, pkey, data, DB_SET);
+
+ /*
+ * If the item wasn't found in the primary, this is a bug;
+ * our secondary has somehow gotten corrupted, and contains
+ * elements that don't correspond to anything in the primary.
+ * Complain.
+ */
+ if (ret == DB_NOTFOUND)
+ ret = __db_secondary_corrupt(pdbp);
+
+ /* Now close the primary cursor. */
+ t_ret = pdbc->c_close(pdbc);
+
+err: if (pkeymalloc) {
+ /*
+ * If pkey had a MALLOC flag, we need to restore it;
+ * otherwise, if the user frees the buffer but reuses
+ * the DBT without NULL'ing its data field or changing
+ * the flags, we may drop core.
+ */
+ F_CLR(pkey, DB_DBT_REALLOC);
+ F_SET(pkey, DB_DBT_MALLOC);
+ }
+ return (t_ret == 0 ? ret : t_ret);
+}
+
+/*
+ * __db_c_pget_recno --
+ * Perform a DB_GET_RECNO c_pget on a secondary index. Returns
+ * the secondary's record number in the pkey field and the primary's
+ * in the data field.
+ */
+static int
+__db_c_pget_recno(sdbc, pkey, data, flags)
+ DBC *sdbc;
+ DBT *pkey, *data;
+ u_int32_t flags;
+{
+ DB *pdbp, *sdbp;
+ DB_ENV *dbenv;
+ DBC *pdbc;
+ DBT discardme, primary_key;
+ db_recno_t oob;
+ u_int32_t rmw;
+ int ret, t_ret;
+
+ sdbp = sdbc->dbp;
+ pdbp = sdbp->s_primary;
+ dbenv = sdbp->dbenv;
+ pdbc = NULL;
+ ret = t_ret = 0;
+
+ rmw = LF_ISSET(DB_RMW);
+
+ memset(&discardme, 0, sizeof(DBT));
+ F_SET(&discardme, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+
+ oob = RECNO_OOB;
+
+ /*
+ * If the primary is an rbtree, we want its record number, whether
+ * or not the secondary is one too. Fetch the recno into "data".
+ *
+ * If it's not an rbtree, return RECNO_OOB in "data".
+ */
+ if (F_ISSET(pdbp, DB_AM_RECNUM)) {
+ /*
+ * Get the primary key, so we can find the record number
+ * in the primary. (We're uninterested in the secondary key.)
+ */
+ memset(&primary_key, 0, sizeof(DBT));
+ F_SET(&primary_key, DB_DBT_MALLOC);
+ if ((ret = sdbc->c_real_get(sdbc,
+ &discardme, &primary_key, rmw | DB_CURRENT)) != 0)
+ return (ret);
+
+ /*
+ * Open a cursor on the primary, set it to the right record,
+ * and fetch its recno into "data".
+ *
+ * (See __db_c_pget for a comment on the use of __db_icursor.)
+ *
+ * SET_RET_MEM so that the secondary DBC owns any returned-data
+ * memory.
+ */
+ if ((ret = __db_icursor(pdbp, sdbc->txn,
+ pdbp->type, PGNO_INVALID, 0, sdbc->locker, &pdbc)) != 0)
+ goto perr;
+ SET_RET_MEM(pdbc, sdbc);
+ if ((ret = pdbc->c_get(pdbc,
+ &primary_key, &discardme, rmw | DB_SET)) != 0)
+ goto perr;
+
+ ret = pdbc->c_get(pdbc, &discardme, data, rmw | DB_GET_RECNO);
+
+perr: __os_ufree(sdbp->dbenv, primary_key.data);
+ if (pdbc != NULL &&
+ (t_ret = pdbc->c_close(pdbc)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ret != 0)
+ return (ret);
+ } else if ((ret = __db_retcopy(dbenv, data, &oob,
+ sizeof(oob), &sdbc->rkey->data, &sdbc->rkey->ulen)) != 0)
+ return (ret);
+
+ /*
+ * If the secondary is an rbtree, we want its record number, whether
+ * or not the primary is one too. Fetch the recno into "pkey".
+ *
+ * If it's not an rbtree, return RECNO_OOB in "pkey".
+ */
+ if (F_ISSET(sdbp, DB_AM_RECNUM))
+ return (sdbc->c_real_get(sdbc, &discardme, pkey, flags));
+ else
+ return (__db_retcopy(dbenv, pkey, &oob,
+ sizeof(oob), &sdbc->rdata->data, &sdbc->rdata->ulen));
+}
+
+/*
+ * __db_wrlock_err -- do not have a write lock.
+ */
+static int
+__db_wrlock_err(dbenv)
+ DB_ENV *dbenv;
+{
+ __db_err(dbenv, "Write attempted on read-only cursor");
+ return (EPERM);
+}
+
+/*
+ * __db_c_del_secondary --
+ * Perform a delete operation on a secondary index: call through
+ * to the primary and delete the primary record that this record
+ * points to.
+ *
+ * Note that deleting the primary record will call c_del on all
+ * the secondaries, including this one; thus, it is not necessary
+ * to execute both this function and an actual delete.
+ *
+ */
+static int
+__db_c_del_secondary(dbc)
+ DBC *dbc;
+{
+ DB *pdbp;
+ DBC *pdbc;
+ DBT skey, pkey;
+ int ret, t_ret;
+
+ memset(&skey, 0, sizeof(DBT));
+ memset(&pkey, 0, sizeof(DBT));
+
+ /*
+ * Get the current item that we're pointing at.
+ * We don't actually care about the secondary key, just
+ * the primary.
+ */
+ F_SET(&skey, DB_DBT_PARTIAL | DB_DBT_USERMEM);
+ if ((ret = dbc->c_real_get(dbc,
+ &skey, &pkey, DB_CURRENT)) != 0)
+ return (ret);
+
+ /*
+ * Create a cursor on the primary with our locker ID,
+ * so that when it calls back, we don't conflict.
+ *
+ * We create a cursor explicitly because there's no
+ * way to specify the same locker ID if we're using
+ * locking but not transactions if we use the DB->del
+ * interface. This shouldn't be any less efficient
+ * anyway.
+ */
+ pdbp = dbc->dbp->s_primary;
+ if ((ret = __db_icursor(pdbp, dbc->txn,
+ pdbp->type, PGNO_INVALID, 0, dbc->locker, &pdbc)) != 0)
+ return (ret);
+
+ /*
+ * See comment in __db_c_put--if we're in CDB,
+ * we already hold the locks we need, and we need to flag
+ * the cursor as a WRITER so we don't run into errors
+ * when we try to delete.
+ */
+ if (CDB_LOCKING(pdbp->dbenv)) {
+ DB_ASSERT(pdbc->mylock.off == LOCK_INVALID);
+ F_SET(pdbc, DBC_WRITER);
+ }
+
+ /*
+ * Set the new cursor to the correct primary key. Then
+ * delete it. We don't really care about the datum;
+ * just reuse our skey DBT.
+ *
+ * If the primary get returns DB_NOTFOUND, something is amiss--
+ * every record in the secondary should correspond to some record
+ * in the primary.
+ */
+ if ((ret = pdbc->c_get(pdbc, &pkey, &skey,
+ (STD_LOCKING(dbc) ? DB_RMW : 0) | DB_SET)) == 0)
+ ret = pdbc->c_del(pdbc, 0);
+ else if (ret == DB_NOTFOUND)
+ ret = __db_secondary_corrupt(pdbp);
+
+ if ((t_ret = pdbc->c_close(pdbc)) != 0 && ret != 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_c_del_primary --
+ * Perform a delete operation on a primary index. Loop through
+ * all the secondary indices which correspond to this primary
+ * database, and delete any secondary keys that point at the current
+ * record.
+ *
+ * PUBLIC: int __db_c_del_primary __P((DBC *));
+ */
+int
+__db_c_del_primary(dbc)
+ DBC *dbc;
+{
+ DB *dbp, *sdbp;
+ DBC *sdbc;
+ DBT data, pkey, skey, temp;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+
+ /*
+ * If we're called at all, we have at least one secondary.
+ * (Unfortunately, we can't assert this without grabbing the mutex.)
+ * Get the current record so that we can construct appropriate
+ * secondary keys as needed.
+ */
+ memset(&pkey, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ if ((ret = dbc->c_get(dbc, &pkey, &data, DB_CURRENT)) != 0)
+ return (ret);
+
+ for (sdbp = __db_s_first(dbp);
+ sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) {
+ /*
+ * Get the secondary key for this secondary and the current
+ * item.
+ */
+ memset(&skey, 0, sizeof(DBT));
+ if ((ret = sdbp->s_callback(sdbp, &pkey, &data, &skey)) != 0) {
+ /*
+ * If the current item isn't in this index, we
+ * have no work to do. Proceed.
+ */
+ if (ret == DB_DONOTINDEX)
+ continue;
+
+ /* We had a substantive error. Bail. */
+ FREE_IF_NEEDED(sdbp, &skey);
+ goto done;
+ }
+
+ /* Open a secondary cursor. */
+ if ((ret = __db_icursor(sdbp, dbc->txn, sdbp->type,
+ PGNO_INVALID, 0, dbc->locker, &sdbc)) != 0)
+ goto done;
+ /* See comment above and in __db_c_put. */
+ if (CDB_LOCKING(sdbp->dbenv)) {
+ DB_ASSERT(sdbc->mylock.off == LOCK_INVALID);
+ F_SET(sdbc, DBC_WRITER);
+ }
+
+ /*
+ * Set the secondary cursor to the appropriate item.
+ * Delete it.
+ *
+ * We want to use DB_RMW if locking is on; it's only
+ * legal then, though.
+ *
+ * !!!
+ * Don't stomp on any callback-allocated buffer in skey
+ * when we do a c_get(DB_GET_BOTH); use a temp DBT instead.
+ */
+ memset(&temp, 0, sizeof(DBT));
+ temp.data = skey.data;
+ temp.size = skey.size;
+ if ((ret = sdbc->c_real_get(sdbc, &temp, &pkey,
+ (STD_LOCKING(dbc) ? DB_RMW : 0) | DB_GET_BOTH)) == 0)
+ ret = sdbc->c_del(sdbc, DB_UPDATE_SECONDARY);
+
+ FREE_IF_NEEDED(sdbp, &skey);
+
+ if ((t_ret = sdbc->c_close(sdbc)) != 0 || ret != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ goto done;
+ }
+ }
+
+done: if (sdbp != NULL && (t_ret = __db_s_done(sdbp)) != 0 && ret == 0)
+ return (t_ret);
+ return (ret);
+}
+
+/*
+ * __db_s_first --
+ * Get the first secondary, if any are present, from the primary.
+ *
+ * PUBLIC: DB *__db_s_first __P((DB *));
+ */
+DB *
+__db_s_first(pdbp)
+ DB *pdbp;
+{
+ DB *sdbp;
+
+ MUTEX_THREAD_LOCK(pdbp->dbenv, pdbp->mutexp);
+ sdbp = LIST_FIRST(&pdbp->s_secondaries);
+
+ /* See __db_s_next. */
+ if (sdbp != NULL)
+ sdbp->s_refcnt++;
+ MUTEX_THREAD_UNLOCK(pdbp->dbenv, pdbp->mutexp);
+
+ return (sdbp);
+}
+
+/*
+ * __db_s_next --
+ * Get the next secondary in the list.
+ *
+ * PUBLIC: int __db_s_next __P((DB **));
+ */
+int
+__db_s_next(sdbpp)
+ DB **sdbpp;
+{
+ DB *sdbp, *pdbp, *closeme;
+ int ret;
+
+ /*
+ * Secondary indices are kept in a linked list, s_secondaries,
+ * off each primary DB handle. If a primary is free-threaded,
+ * this list may only be traversed or modified while the primary's
+ * thread mutex is held.
+ *
+ * The tricky part is that we don't want to hold the thread mutex
+ * across the full set of secondary puts necessary for each primary
+ * put, or we'll wind up essentially single-threading all the puts
+ * to the handle; the secondary puts will each take about as
+ * long as the primary does, and may require I/O. So we instead
+ * hold the thread mutex only long enough to follow one link to the
+ * next secondary, and then we release it before performing the
+ * actual secondary put.
+ *
+ * The only danger here is that we might legitimately close a
+ * secondary index in one thread while another thread is performing
+ * a put and trying to update that same secondary index. To
+ * prevent this from happening, we refcount the secondary handles.
+ * If close is called on a secondary index handle while we're putting
+ * to it, it won't really be closed--the refcount will simply drop,
+ * and we'll be responsible for closing it here.
+ */
+ sdbp = *sdbpp;
+ pdbp = sdbp->s_primary;
+ closeme = NULL;
+
+ MUTEX_THREAD_LOCK(pdbp->dbenv, pdbp->mutexp);
+ DB_ASSERT(sdbp->s_refcnt != 0);
+ if (--sdbp->s_refcnt == 0) {
+ LIST_REMOVE(sdbp, s_links);
+ closeme = sdbp;
+ }
+ sdbp = LIST_NEXT(sdbp, s_links);
+ if (sdbp != NULL)
+ sdbp->s_refcnt++;
+ MUTEX_THREAD_UNLOCK(pdbp->dbenv, pdbp->mutexp);
+
+ *sdbpp = sdbp;
+
+ /*
+ * closeme->close() is a wrapper; call __db_close explicitly.
+ */
+ ret = closeme != NULL ? __db_close(closeme, 0) : 0;
+ return (ret);
+}
+
+/*
+ * __db_s_done --
+ * Properly decrement the refcount on a secondary database handle we're
+ * using, without calling __db_s_next.
+ *
+ * PUBLIC: int __db_s_done __P((DB *));
+ */
+int
+__db_s_done(sdbp)
+ DB *sdbp;
+{
+ DB *pdbp;
+ int doclose;
+
+ pdbp = sdbp->s_primary;
+ doclose = 0;
+
+ MUTEX_THREAD_LOCK(pdbp->dbenv, pdbp->mutexp);
+ DB_ASSERT(sdbp->s_refcnt != 0);
+ if (--sdbp->s_refcnt == 0) {
+ LIST_REMOVE(sdbp, s_links);
+ doclose = 1;
+ }
+ MUTEX_THREAD_UNLOCK(pdbp->dbenv, pdbp->mutexp);
+
+ return (doclose ? __db_close(sdbp, 0) : 0);
+}
+
+/*
+ * __db_buildpartial --
+ * Build the record that will result after a partial put is applied to
+ * an existing record.
+ *
+ * This should probably be merged with __bam_build, but that requires
+ * a little trickery if we plan to keep the overflow-record optimization
+ * in that function.
+ */
+static int
+__db_buildpartial(dbp, oldrec, partial, newrec)
+ DB *dbp;
+ DBT *oldrec, *partial, *newrec;
+{
+ int ret;
+ u_int8_t *buf;
+ u_int32_t len, nbytes;
+
+ DB_ASSERT(F_ISSET(partial, DB_DBT_PARTIAL));
+
+ memset(newrec, 0, sizeof(DBT));
+
+ nbytes = __db_partsize(oldrec->size, partial);
+ newrec->size = nbytes;
+
+ if ((ret = __os_malloc(dbp->dbenv, nbytes, &buf)) != 0)
+ return (ret);
+ newrec->data = buf;
+
+ /* Nul or pad out the buffer, for any part that isn't specified. */
+ memset(buf,
+ F_ISSET(dbp, DB_AM_FIXEDLEN) ? ((BTREE *)dbp->bt_internal)->re_pad :
+ 0, nbytes);
+
+ /* Copy in any leading data from the original record. */
+ memcpy(buf, oldrec->data,
+ partial->doff > oldrec->size ? oldrec->size : partial->doff);
+
+ /* Copy the data from partial. */
+ memcpy(buf + partial->doff, partial->data, partial->size);
+
+ /* Copy any trailing data from the original record. */
+ len = partial->doff + partial->dlen;
+ if (oldrec->size > len)
+ memcpy(buf + partial->doff + partial->size,
+ (u_int8_t *)oldrec->data + len, oldrec->size - len);
+
+ return (0);
+}
+
+/*
+ * __db_partsize --
+ * Given the number of bytes in an existing record and a DBT that
+ * is about to be partial-put, calculate the size of the record
+ * after the put.
+ *
+ * This code is called from __bam_partsize.
+ *
+ * PUBLIC: u_int32_t __db_partsize __P((u_int32_t, DBT *));
+ */
+u_int32_t
+__db_partsize(nbytes, data)
+ u_int32_t nbytes;
+ DBT *data;
+{
+
+ /*
+ * There are really two cases here:
+ *
+ * Case 1: We are replacing some bytes that do not exist (i.e., they
+ * are past the end of the record). In this case the number of bytes
+ * we are replacing is irrelevant and all we care about is how many
+ * bytes we are going to add from offset. So, the new record length
+ * is going to be the size of the new bytes (size) plus wherever those
+ * new bytes begin (doff).
+ *
+ * Case 2: All the bytes we are replacing exist. Therefore, the new
+ * size is the oldsize (nbytes) minus the bytes we are replacing (dlen)
+ * plus the bytes we are adding (size).
+ */
+ if (nbytes < data->doff + data->dlen) /* Case 1 */
+ return (data->doff + data->size);
+
+ return (nbytes + data->size - data->dlen); /* Case 2 */
+}
diff --git a/libdb/db/db_conv.c b/libdb/db/db_conv.c
new file mode 100644
index 0000000..de2710d
--- /dev/null
+++ b/libdb/db/db_conv.c
@@ -0,0 +1,550 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/hmac.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+
+/*
+ * __db_pgin --
+ * Primary page-swap routine.
+ *
+ * PUBLIC: int __db_pgin __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ */
+int
+__db_pgin(dbenv, pg, pp, cookie)
+ DB_ENV *dbenv;
+ db_pgno_t pg;
+ void *pp;
+ DBT *cookie;
+{
+ DB dummydb, *dbp;
+ DB_PGINFO *pginfo;
+ DB_CIPHER *db_cipher;
+ DB_LSN not_used;
+ PAGE *pagep;
+ size_t pg_off, pg_len, sum_len;
+ int is_hmac, ret;
+ u_int8_t *chksum, *iv;
+
+ pginfo = (DB_PGINFO *)cookie->data;
+ pagep = (PAGE *)pp;
+
+ ret = is_hmac = 0;
+ chksum = iv = NULL;
+ memset(&dummydb, 0, sizeof(DB));
+ dbp = &dummydb;
+ dummydb.flags = pginfo->flags;
+ db_cipher = (DB_CIPHER *)dbenv->crypto_handle;
+ switch (pagep->type) {
+ case P_HASHMETA:
+ case P_BTREEMETA:
+ case P_QAMMETA:
+ /*
+ * If checksumming is set on the meta-page, we must set
+ * it in the dbp.
+ */
+ if (FLD_ISSET(((DBMETA *)pp)->metaflags, DBMETA_CHKSUM))
+ F_SET(dbp, DB_AM_CHKSUM);
+ if (((DBMETA *)pp)->encrypt_alg != 0 ||
+ F_ISSET(dbp, DB_AM_ENCRYPT))
+ is_hmac = 1;
+ /*
+ * !!!
+ * For all meta pages it is required that the chksum
+ * be at the same location. Use BTMETA to get to it
+ * for any meta type.
+ */
+ chksum = ((BTMETA *)pp)->chksum;
+ sum_len = DBMETASIZE;
+ break;
+ case P_INVALID:
+ /*
+ * We assume that we've read a file hole if we have
+ * a zero LSN, zero page number and P_INVALID. Otherwise
+ * we have an invalid page that might contain real data.
+ */
+ if (IS_ZERO_LSN(LSN(pagep)) && pagep->pgno == PGNO_INVALID) {
+ sum_len = 0;
+ break;
+ }
+ /* FALLTHROUGH */
+ default:
+ chksum = P_CHKSUM(dbp, pagep);
+ sum_len = pginfo->db_pagesize;
+ /*
+ * If we are reading in a non-meta page, then if we have
+ * a db_cipher then we are using hmac.
+ */
+ is_hmac = CRYPTO_ON(dbenv) ? 1 : 0;
+ break;
+ }
+
+ /*
+ * We expect a checksum error if there was a configuration problem.
+ * If there is no configuration problem and we don't get a match,
+ * it's fatal: panic the system.
+ */
+ if (F_ISSET(dbp, DB_AM_CHKSUM) && sum_len != 0)
+ switch (ret = __db_check_chksum(
+ dbenv, db_cipher, chksum, pp, sum_len, is_hmac)) {
+ case 0:
+ break;
+ case -1:
+ if (DBENV_LOGGING(dbenv))
+ __db_cksum_log(
+ dbenv, NULL, &not_used, DB_FLUSH);
+ __db_err(dbenv,
+ "checksum error: catastrophic recovery required");
+ return (__db_panic(dbenv, DB_RUNRECOVERY));
+ default:
+ return (ret);
+ }
+
+ if (F_ISSET(dbp, DB_AM_ENCRYPT)) {
+ DB_ASSERT(db_cipher != NULL);
+ DB_ASSERT(F_ISSET(dbp, DB_AM_CHKSUM));
+
+ pg_off = P_OVERHEAD(dbp);
+ DB_ASSERT(db_cipher->adj_size(pg_off) == 0);
+
+ switch (pagep->type) {
+ case P_HASHMETA:
+ case P_BTREEMETA:
+ case P_QAMMETA:
+ /*
+ * !!!
+ * For all meta pages it is required that the iv
+ * be at the same location. Use BTMETA to get to it
+ * for any meta type.
+ */
+ iv = ((BTMETA *)pp)->iv;
+ pg_len = DBMETASIZE;
+ break;
+ case P_INVALID:
+ if (IS_ZERO_LSN(LSN(pagep)) &&
+ pagep->pgno == PGNO_INVALID) {
+ pg_len = 0;
+ break;
+ }
+ /* FALLTHROUGH */
+ default:
+ iv = P_IV(dbp, pagep);
+ pg_len = pginfo->db_pagesize;
+ break;
+ }
+ if (pg_len != 0 && (ret = db_cipher->decrypt(dbenv,
+ db_cipher->data, iv, ((u_int8_t *)pagep) + pg_off,
+ pg_len - pg_off)) != 0)
+ return (ret);
+ }
+ switch (pagep->type) {
+ case P_INVALID:
+ if (pginfo->type == DB_QUEUE)
+ return (__qam_pgin_out(dbenv, pg, pp, cookie));
+ else
+ return (__ham_pgin(dbenv, dbp, pg, pp, cookie));
+ case P_HASH:
+ case P_HASHMETA:
+ return (__ham_pgin(dbenv, dbp, pg, pp, cookie));
+ case P_BTREEMETA:
+ case P_IBTREE:
+ case P_IRECNO:
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ case P_OVERFLOW:
+ return (__bam_pgin(dbenv, dbp, pg, pp, cookie));
+ case P_QAMMETA:
+ case P_QAMDATA:
+ return (__qam_pgin_out(dbenv, pg, pp, cookie));
+ default:
+ break;
+ }
+ return (__db_pgfmt(dbenv, pg));
+}
+
+/*
+ * __db_pgout --
+ * Primary page-swap routine.
+ *
+ * PUBLIC: int __db_pgout __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ */
+int
+__db_pgout(dbenv, pg, pp, cookie)
+ DB_ENV *dbenv;
+ db_pgno_t pg;
+ void *pp;
+ DBT *cookie;
+{
+ DB dummydb, *dbp;
+ DB_CIPHER *db_cipher;
+ DB_PGINFO *pginfo;
+ PAGE *pagep;
+ size_t pg_off, pg_len, sum_len;
+ int ret;
+ u_int8_t *chksum, *iv, *key;
+
+ pginfo = (DB_PGINFO *)cookie->data;
+ pagep = (PAGE *)pp;
+
+ chksum = iv = key = NULL;
+ memset(&dummydb, 0, sizeof(DB));
+ dbp = &dummydb;
+ dummydb.flags = pginfo->flags;
+ ret = 0;
+ switch (pagep->type) {
+ case P_INVALID:
+ if (pginfo->type == DB_QUEUE)
+ ret = __qam_pgin_out(dbenv, pg, pp, cookie);
+ else
+ ret = __ham_pgout(dbenv, dbp, pg, pp, cookie);
+ break;
+ case P_HASH:
+ case P_HASHMETA:
+ ret = __ham_pgout(dbenv, dbp, pg, pp, cookie);
+ break;
+ case P_BTREEMETA:
+ case P_IBTREE:
+ case P_IRECNO:
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ case P_OVERFLOW:
+ ret = __bam_pgout(dbenv, dbp, pg, pp, cookie);
+ break;
+ case P_QAMMETA:
+ case P_QAMDATA:
+ ret = __qam_pgin_out(dbenv, pg, pp, cookie);
+ break;
+ default:
+ return (__db_pgfmt(dbenv, pg));
+ }
+ if (ret)
+ return (ret);
+
+ db_cipher = (DB_CIPHER *)dbenv->crypto_handle;
+ if (F_ISSET(dbp, DB_AM_ENCRYPT)) {
+
+ DB_ASSERT(db_cipher != NULL);
+ DB_ASSERT(F_ISSET(dbp, DB_AM_CHKSUM));
+
+ pg_off = P_OVERHEAD(dbp);
+ DB_ASSERT(db_cipher->adj_size(pg_off) == 0);
+
+ key = db_cipher->mac_key;
+
+ switch (pagep->type) {
+ case P_HASHMETA:
+ case P_BTREEMETA:
+ case P_QAMMETA:
+ /*
+ * !!!
+ * For all meta pages it is required that the iv
+ * be at the same location. Use BTMETA to get to it
+ * for any meta type.
+ */
+ iv = ((BTMETA *)pp)->iv;
+ pg_len = DBMETASIZE;
+ break;
+ default:
+ iv = P_IV(dbp, pagep);
+ pg_len = pginfo->db_pagesize;
+ break;
+ }
+ if ((ret = db_cipher->encrypt(dbenv, db_cipher->data,
+ iv, ((u_int8_t *)pagep) + pg_off, pg_len - pg_off)) != 0)
+ return (ret);
+ }
+ if (F_ISSET(dbp, DB_AM_CHKSUM)) {
+ switch (pagep->type) {
+ case P_HASHMETA:
+ case P_BTREEMETA:
+ case P_QAMMETA:
+ /*
+ * !!!
+ * For all meta pages it is required that the chksum
+ * be at the same location. Use BTMETA to get to it
+ * for any meta type.
+ */
+ chksum = ((BTMETA *)pp)->chksum;
+ sum_len = DBMETASIZE;
+ break;
+ default:
+ chksum = P_CHKSUM(dbp, pagep);
+ sum_len = pginfo->db_pagesize;
+ break;
+ }
+ __db_chksum(pp, sum_len, key, chksum);
+ }
+ return (0);
+}
+
+/*
+ * __db_metaswap --
+ * Byteswap the common part of the meta-data page.
+ *
+ * PUBLIC: void __db_metaswap __P((PAGE *));
+ */
+void
+__db_metaswap(pg)
+ PAGE *pg;
+{
+ u_int8_t *p;
+
+ p = (u_int8_t *)pg;
+
+ /* Swap the meta-data information. */
+ SWAP32(p); /* lsn.file */
+ SWAP32(p); /* lsn.offset */
+ SWAP32(p); /* pgno */
+ SWAP32(p); /* magic */
+ SWAP32(p); /* version */
+ SWAP32(p); /* pagesize */
+ p += 4; /* unused, page type, unused, unused */
+ SWAP32(p); /* free */
+ SWAP32(p); /* alloc_lsn part 1 */
+ SWAP32(p); /* alloc_lsn part 2 */
+ SWAP32(p); /* cached key count */
+ SWAP32(p); /* cached record count */
+ SWAP32(p); /* flags */
+}
+
+/*
+ * __db_byteswap --
+ * Byteswap a page.
+ *
+ * PUBLIC: int __db_byteswap
+ * PUBLIC: __P((DB_ENV *, DB *, db_pgno_t, PAGE *, size_t, int));
+ */
+int
+__db_byteswap(dbenv, dbp, pg, h, pagesize, pgin)
+ DB_ENV *dbenv;
+ DB *dbp;
+ db_pgno_t pg;
+ PAGE *h;
+ size_t pagesize;
+ int pgin;
+{
+ BINTERNAL *bi;
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+ RINTERNAL *ri;
+ db_indx_t i, *inp, len, tmp;
+ u_int8_t *p, *end;
+
+ COMPQUIET(pg, 0);
+
+ inp = P_INP(dbp, h);
+ if (pgin) {
+ M_32_SWAP(h->lsn.file);
+ M_32_SWAP(h->lsn.offset);
+ M_32_SWAP(h->pgno);
+ M_32_SWAP(h->prev_pgno);
+ M_32_SWAP(h->next_pgno);
+ M_16_SWAP(h->entries);
+ M_16_SWAP(h->hf_offset);
+ }
+
+ switch (h->type) {
+ case P_HASH:
+ for (i = 0; i < NUM_ENT(h); i++) {
+ if (pgin)
+ M_16_SWAP(inp[i]);
+
+ switch (HPAGE_TYPE(dbp, h, i)) {
+ case H_KEYDATA:
+ break;
+ case H_DUPLICATE:
+ len = LEN_HKEYDATA(dbp, h, pagesize, i);
+ p = HKEYDATA_DATA(P_ENTRY(dbp, h, i));
+ for (end = p + len; p < end;) {
+ if (pgin) {
+ P_16_SWAP(p);
+ memcpy(&tmp,
+ p, sizeof(db_indx_t));
+ p += sizeof(db_indx_t);
+ } else {
+ memcpy(&tmp,
+ p, sizeof(db_indx_t));
+ SWAP16(p);
+ }
+ p += tmp;
+ SWAP16(p);
+ }
+ break;
+ case H_OFFDUP:
+ p = HOFFPAGE_PGNO(P_ENTRY(dbp, h, i));
+ SWAP32(p); /* pgno */
+ break;
+ case H_OFFPAGE:
+ p = HOFFPAGE_PGNO(P_ENTRY(dbp, h, i));
+ SWAP32(p); /* pgno */
+ SWAP32(p); /* tlen */
+ break;
+ }
+
+ }
+
+ /*
+ * The offsets in the inp array are used to determine
+ * the size of entries on a page; therefore they
+ * cannot be converted until we've done all the
+ * entries.
+ */
+ if (!pgin)
+ for (i = 0; i < NUM_ENT(h); i++)
+ M_16_SWAP(inp[i]);
+ break;
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ for (i = 0; i < NUM_ENT(h); i++) {
+ if (pgin)
+ M_16_SWAP(inp[i]);
+
+ /*
+ * In the case of on-page duplicates, key information
+ * should only be swapped once.
+ */
+ if (h->type == P_LBTREE && i > 1) {
+ if (pgin) {
+ if (inp[i] == inp[i - 2])
+ continue;
+ } else {
+ M_16_SWAP(inp[i]);
+ if (inp[i] == inp[i - 2])
+ continue;
+ M_16_SWAP(inp[i]);
+ }
+ }
+
+ bk = GET_BKEYDATA(dbp, h, i);
+ switch (B_TYPE(bk->type)) {
+ case B_KEYDATA:
+ M_16_SWAP(bk->len);
+ break;
+ case B_DUPLICATE:
+ case B_OVERFLOW:
+ bo = (BOVERFLOW *)bk;
+ M_32_SWAP(bo->pgno);
+ M_32_SWAP(bo->tlen);
+ break;
+ }
+
+ if (!pgin)
+ M_16_SWAP(inp[i]);
+ }
+ break;
+ case P_IBTREE:
+ for (i = 0; i < NUM_ENT(h); i++) {
+ if (pgin)
+ M_16_SWAP(inp[i]);
+
+ bi = GET_BINTERNAL(dbp, h, i);
+ M_16_SWAP(bi->len);
+ M_32_SWAP(bi->pgno);
+ M_32_SWAP(bi->nrecs);
+
+ switch (B_TYPE(bi->type)) {
+ case B_KEYDATA:
+ break;
+ case B_DUPLICATE:
+ case B_OVERFLOW:
+ bo = (BOVERFLOW *)bi->data;
+ M_32_SWAP(bo->pgno);
+ M_32_SWAP(bo->tlen);
+ break;
+ }
+
+ if (!pgin)
+ M_16_SWAP(inp[i]);
+ }
+ break;
+ case P_IRECNO:
+ for (i = 0; i < NUM_ENT(h); i++) {
+ if (pgin)
+ M_16_SWAP(inp[i]);
+
+ ri = GET_RINTERNAL(dbp, h, i);
+ M_32_SWAP(ri->pgno);
+ M_32_SWAP(ri->nrecs);
+
+ if (!pgin)
+ M_16_SWAP(inp[i]);
+ }
+ break;
+ case P_OVERFLOW:
+ case P_INVALID:
+ /* Nothing to do. */
+ break;
+ default:
+ return (__db_pgfmt(dbenv, pg));
+ }
+
+ if (!pgin) {
+ /* Swap the header information. */
+ M_32_SWAP(h->lsn.file);
+ M_32_SWAP(h->lsn.offset);
+ M_32_SWAP(h->pgno);
+ M_32_SWAP(h->prev_pgno);
+ M_32_SWAP(h->next_pgno);
+ M_16_SWAP(h->entries);
+ M_16_SWAP(h->hf_offset);
+ }
+ return (0);
+}
diff --git a/libdb/db/db_dispatch.c b/libdb/db/db_dispatch.c
new file mode 100644
index 0000000..37d4b1d
--- /dev/null
+++ b/libdb/db/db_dispatch.c
@@ -0,0 +1,1404 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
+#include "dbinc/fop.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+static int __db_limbo_fix __P((DB *,
+ DB_TXN *, DB_TXNLIST *, db_pgno_t *, DBMETA *));
+static int __db_limbo_bucket __P((DB_ENV *, DB_TXN *, DB_TXNLIST *));
+static int __db_limbo_move __P((DB_ENV *, DB_TXN *, DB_TXN *, DB_TXNLIST *));
+static int __db_lock_move __P((DB_ENV *,
+ u_int8_t *, db_pgno_t, db_lockmode_t, DB_TXN *, DB_TXN *));
+static int __db_default_getpgnos __P((DB_ENV *, DB_LSN *lsnp, void *));
+static int __db_txnlist_find_internal __P((DB_ENV *, void *, db_txnlist_type,
+ u_int32_t, u_int8_t [DB_FILE_ID_LEN], DB_TXNLIST **, int));
+static int __db_txnlist_pgnoadd __P((DB_ENV *, DB_TXNHEAD *,
+ int32_t, u_int8_t [DB_FILE_ID_LEN], char *, db_pgno_t));
+
+/*
+ * __db_dispatch --
+ *
+ * This is the transaction dispatch function used by the db access methods.
+ * It is designed to handle the record format used by all the access
+ * methods (the one automatically generated by the db_{h,log,read}.sh
+ * scripts in the tools directory). An application using a different
+ * recovery paradigm will supply a different dispatch function to txn_open.
+ *
+ * PUBLIC: int __db_dispatch __P((DB_ENV *,
+ * PUBLIC: int (**)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)),
+ * PUBLIC: size_t, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_dispatch(dbenv, dtab, dtabsize, db, lsnp, redo, info)
+ DB_ENV *dbenv; /* The environment. */
+ int (**dtab)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t dtabsize; /* Size of the dtab. */
+ DBT *db; /* The log record upon which to dispatch. */
+ DB_LSN *lsnp; /* The lsn of the record being dispatched. */
+ db_recops redo; /* Redo this op (or undo it). */
+ void *info;
+{
+ DB_LSN prev_lsn;
+ u_int32_t rectype, txnid;
+ int make_call, ret;
+
+ memcpy(&rectype, db->data, sizeof(rectype));
+ memcpy(&txnid, (u_int8_t *)db->data + sizeof(rectype), sizeof(txnid));
+ make_call = ret = 0;
+
+ /* If we don't have a dispatch table, it's hard to dispatch. */
+ DB_ASSERT(dtab != NULL);
+
+ /*
+ * If we find a record that is in the user's number space and they
+ * have specified a recovery routine, let them handle it. If they
+ * didn't specify a recovery routine, then we expect that they've
+ * followed all our rules and registered new recovery functions.
+ */
+ switch (redo) {
+ case DB_TXN_ABORT:
+ case DB_TXN_APPLY:
+ case DB_TXN_PRINT:
+ make_call = 1;
+ break;
+ case DB_TXN_OPENFILES:
+ /*
+ * We collect all the transactions that have
+ * "begin" records, those with no previous LSN,
+ * so that we do not abort partial transactions.
+ * These are known to be undone, otherwise the
+ * log would not have been freeable.
+ */
+ memcpy(&prev_lsn, (u_int8_t *)db->data +
+ sizeof(rectype) + sizeof(txnid), sizeof(prev_lsn));
+ if (txnid != 0 && prev_lsn.file == 0 && (ret =
+ __db_txnlist_add(dbenv, info, txnid, TXN_OK, NULL)) != 0)
+ return (ret);
+
+ /* FALLTHROUGH */
+ case DB_TXN_POPENFILES:
+ if (rectype == DB___dbreg_register ||
+ rectype == DB___txn_ckp || rectype == DB___txn_recycle)
+ return (dtab[rectype](dbenv, db, lsnp, redo, info));
+ break;
+ case DB_TXN_BACKWARD_ROLL:
+ /*
+ * Running full recovery in the backward pass. If we've
+ * seen this txnid before and added to it our commit list,
+ * then we do nothing during this pass, unless this is a child
+ * commit record, in which case we need to process it. If
+ * we've never seen it, then we call the appropriate recovery
+ * routine.
+ *
+ * We need to always undo DB___db_noop records, so that we
+ * properly handle any aborts before the file was closed.
+ */
+ switch(rectype) {
+ case DB___txn_regop:
+ case DB___txn_recycle:
+ case DB___txn_ckp:
+ case DB___db_noop:
+ case DB___fop_file_remove:
+ case DB___txn_child:
+ make_call = 1;
+ break;
+
+ case DB___dbreg_register:
+ if (txnid == 0) {
+ make_call = 1;
+ break;
+ }
+ /* FALLTHROUGH */
+ default:
+ if (txnid != 0 && (ret =
+ __db_txnlist_find(dbenv,
+ info, txnid)) != TXN_COMMIT && ret != TXN_IGNORE) {
+ /*
+ * If not found then, this is an incomplete
+ * abort.
+ */
+ if (ret == TXN_NOTFOUND)
+ return (__db_txnlist_add(dbenv,
+ info, txnid, TXN_IGNORE, lsnp));
+ make_call = 1;
+ if (ret == TXN_OK &&
+ (ret = __db_txnlist_update(dbenv,
+ info, txnid,
+ rectype == DB___txn_xa_regop ?
+ TXN_PREPARE : TXN_ABORT, NULL)) != 0)
+ return (ret);
+ }
+ }
+ break;
+ case DB_TXN_FORWARD_ROLL:
+ /*
+ * In the forward pass, if we haven't seen the transaction,
+ * do nothing, else recover it.
+ *
+ * We need to always redo DB___db_noop records, so that we
+ * properly handle any commits after the file was closed.
+ */
+ switch(rectype) {
+ case DB___txn_recycle:
+ case DB___txn_ckp:
+ case DB___db_noop:
+ make_call = 1;
+ break;
+
+ default:
+ if (txnid != 0 && (ret = __db_txnlist_find(dbenv,
+ info, txnid)) == TXN_COMMIT)
+ make_call = 1;
+ else if (ret != TXN_IGNORE &&
+ (rectype == DB___ham_metagroup ||
+ rectype == DB___ham_groupalloc ||
+ rectype == DB___db_pg_alloc)) {
+ /*
+ * Because we cannot undo file extensions
+ * all allocation records must be reprocessed
+ * during rollforward in case the file was
+ * just created. It may not have been
+ * present during the backward pass.
+ */
+ make_call = 1;
+ redo = DB_TXN_BACKWARD_ALLOC;
+ } else if (rectype == DB___dbreg_register) {
+ /*
+ * This may be a transaction dbreg_register.
+ * If it is, we only make the call on a COMMIT,
+ * which we checked above. If it's not, then we
+ * should always make the call, because we need
+ * the file open information.
+ */
+ if (txnid == 0)
+ make_call = 1;
+ }
+ }
+ break;
+ case DB_TXN_GETPGNOS:
+ /*
+ * If this is one of DB's own log records, we simply
+ * dispatch.
+ */
+ if (rectype < DB_user_BEGIN) {
+ make_call = 1;
+ break;
+ }
+
+ /*
+ * If we're still here, this is a custom record in an
+ * application that's doing app-specific logging. Such a
+ * record doesn't have a getpgno function for the user
+ * dispatch function to call--the getpgnos functions return
+ * which pages replication needs to lock using the TXN_RECS
+ * structure, which is private and not something we want to
+ * document.
+ *
+ * Thus, we leave any necessary locking for the app's
+ * recovery function to do during the upcoming
+ * DB_TXN_APPLY. Fill in default getpgnos info (we need
+ * a stub entry for every log record that will get
+ * DB_TXN_APPLY'd) and return success.
+ */
+ return (__db_default_getpgnos(dbenv, lsnp, info));
+ default:
+ return (__db_unknown_flag(dbenv, "__db_dispatch", redo));
+ }
+ /*
+ * The switch statement uses ret to receive the return value of
+ * __db_txnlist_find, which returns a large number of different
+ * statuses, none of which we will be returning. For safety,
+ * let's reset this here in case we ever do a "return(ret)"
+ * below in the future.
+ */
+ ret = 0;
+
+ if (make_call) {
+ if (rectype >= DB_user_BEGIN && dbenv->app_dispatch != NULL)
+ return (dbenv->app_dispatch(dbenv, db, lsnp, redo));
+ else {
+ /*
+ * The size of the dtab table argument is the same as
+ * the standard table, use the standard table's size
+ * as our sanity check.
+ */
+ if (rectype > dtabsize || dtab[rectype] == NULL) {
+ __db_err(dbenv,
+ "Illegal record type %lu in log",
+ (u_long)rectype);
+ return (EINVAL);
+ }
+ return (dtab[rectype](dbenv, db, lsnp, redo, info));
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * __db_add_recovery --
+ *
+ * PUBLIC: int __db_add_recovery __P((DB_ENV *,
+ * PUBLIC: int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *,
+ * PUBLIC: int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), u_int32_t));
+ */
+int
+__db_add_recovery(dbenv, dtab, dtabsize, func, ndx)
+ DB_ENV *dbenv;
+ int (***dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsize;
+ int (*func) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ u_int32_t ndx;
+{
+ size_t i, nsize;
+ int ret;
+
+ /* Check if we have to grow the table. */
+ if (ndx >= *dtabsize) {
+ nsize = ndx + 40;
+ if ((ret =
+ __os_realloc(dbenv, nsize * sizeof((*dtab)[0]), dtab)) != 0)
+ return (ret);
+ for (i = *dtabsize; i < nsize; ++i)
+ (*dtab)[i] = NULL;
+ *dtabsize = nsize;
+ }
+
+ (*dtab)[ndx] = func;
+ return (0);
+}
+
+/*
+ * __db_txnlist_init --
+ * Initialize transaction linked list.
+ *
+ * PUBLIC: int __db_txnlist_init __P((DB_ENV *,
+ * PUBLIC: u_int32_t, u_int32_t, DB_LSN *, void *));
+ */
+int
+__db_txnlist_init(dbenv, low_txn, hi_txn, trunc_lsn, retp)
+ DB_ENV *dbenv;
+ u_int32_t low_txn, hi_txn;
+ DB_LSN *trunc_lsn;
+ void *retp;
+{
+ DB_TXNHEAD *headp;
+ u_int32_t tmp;
+ int ret, size;
+
+ /*
+ * Size a hash table.
+ * If low is zero then we are being called during rollback
+ * and we need only one slot.
+ * Hi maybe lower than low if we have recycled txnid's.
+ * The numbers here are guesses about txn density, we can afford
+ * to look at a few entries in each slot.
+ */
+ if (low_txn == 0)
+ size = 1;
+ else {
+ if (hi_txn < low_txn) {
+ tmp = hi_txn;
+ hi_txn = low_txn;
+ low_txn = tmp;
+ }
+ tmp = hi_txn - low_txn;
+ /* See if we wrapped around. */
+ if (tmp > (TXN_MAXIMUM - TXN_MINIMUM) / 2)
+ tmp = (low_txn - TXN_MINIMUM) + (TXN_MAXIMUM - hi_txn);
+ size = tmp / 5;
+ if (size < 100)
+ size = 100;
+ }
+ if ((ret = __os_malloc(dbenv,
+ sizeof(DB_TXNHEAD) + size * sizeof(headp->head), &headp)) != 0)
+ return (ret);
+
+ memset(headp, 0, sizeof(DB_TXNHEAD) + size * sizeof(headp->head));
+ headp->maxid = hi_txn;
+ headp->generation = 0;
+ headp->nslots = size;
+ headp->gen_alloc = 8;
+ if ((ret = __os_malloc(dbenv, headp->gen_alloc *
+ sizeof(headp->gen_array[0]), &headp->gen_array)) != 0) {
+ __os_free(dbenv, headp);
+ return (ret);
+ }
+ headp->gen_array[0].generation = 0;
+ headp->gen_array[0].txn_min = TXN_MINIMUM;
+ headp->gen_array[0].txn_max = TXN_MAXIMUM;
+ if (trunc_lsn != NULL)
+ headp->trunc_lsn = *trunc_lsn;
+ else
+ ZERO_LSN(headp->trunc_lsn);
+ ZERO_LSN(headp->maxlsn);
+ ZERO_LSN(headp->ckplsn);
+
+ *(void **)retp = headp;
+ return (0);
+}
+
+/*
+ * __db_txnlist_add --
+ * Add an element to our transaction linked list.
+ *
+ * PUBLIC: int __db_txnlist_add __P((DB_ENV *,
+ * PUBLIC: void *, u_int32_t, int32_t, DB_LSN *));
+ */
+int
+__db_txnlist_add(dbenv, listp, txnid, status, lsn)
+ DB_ENV *dbenv;
+ void *listp;
+ u_int32_t txnid;
+ int32_t status;
+ DB_LSN *lsn;
+{
+ DB_TXNHEAD *hp;
+ DB_TXNLIST *elp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv, sizeof(DB_TXNLIST), &elp)) != 0)
+ return (ret);
+
+ hp = (DB_TXNHEAD *)listp;
+ LIST_INSERT_HEAD(&hp->head[DB_TXNLIST_MASK(hp, txnid)], elp, links);
+
+ elp->type = TXNLIST_TXNID;
+ elp->u.t.txnid = txnid;
+ elp->u.t.status = status;
+ elp->u.t.generation = hp->generation;
+ if (txnid > hp->maxid)
+ hp->maxid = txnid;
+ if (lsn != NULL && IS_ZERO_LSN(hp->maxlsn) && status == TXN_COMMIT)
+ hp->maxlsn = *lsn;
+
+ DB_ASSERT(lsn == NULL ||
+ status != TXN_COMMIT || log_compare(&hp->maxlsn, lsn) >= 0);
+
+ return (0);
+}
+
+/*
+ * __db_txnlist_remove --
+ * Remove an element from our transaction linked list.
+ *
+ * PUBLIC: int __db_txnlist_remove __P((DB_ENV *, void *, u_int32_t));
+ */
+int
+__db_txnlist_remove(dbenv, listp, txnid)
+ DB_ENV *dbenv;
+ void *listp;
+ u_int32_t txnid;
+{
+ DB_TXNLIST *entry;
+
+ return (__db_txnlist_find_internal(dbenv,
+ listp, TXNLIST_TXNID, txnid,
+ NULL, &entry, 1) == TXN_NOTFOUND ? TXN_NOTFOUND : TXN_OK);
+}
+
+/*
+ * __db_txnlist_ckp --
+ * Used to record the maximum checkpoint that will be retained
+ * after recovery. Typically this is simply the max checkpoint, but
+ * if we are doing client replication recovery or timestamp-based
+ * recovery, we are going to virtually truncate the log and we need
+ * to retain the last checkpoint before the truncation point.
+ *
+ * PUBLIC: void __db_txnlist_ckp __P((DB_ENV *, void *, DB_LSN *));
+ */
+void
+__db_txnlist_ckp(dbenv, listp, ckp_lsn)
+ DB_ENV *dbenv;
+ void *listp;
+ DB_LSN *ckp_lsn;
+{
+ DB_TXNHEAD *hp;
+
+ COMPQUIET(dbenv, NULL);
+
+ hp = (DB_TXNHEAD *)listp;
+
+ if (IS_ZERO_LSN(hp->ckplsn) && !IS_ZERO_LSN(hp->maxlsn) &&
+ log_compare(&hp->maxlsn, ckp_lsn) >= 0)
+ hp->ckplsn = *ckp_lsn;
+}
+
+/*
+ * __db_txnlist_end --
+ * Discard transaction linked list. Print out any error messages
+ * for deleted files.
+ *
+ * PUBLIC: void __db_txnlist_end __P((DB_ENV *, void *));
+ */
+void
+__db_txnlist_end(dbenv, listp)
+ DB_ENV *dbenv;
+ void *listp;
+{
+ DB_TXNHEAD *hp;
+ DB_TXNLIST *p;
+ int i;
+
+ if ((hp = (DB_TXNHEAD *)listp) == NULL)
+ return;
+
+ for (i = 0; i < hp->nslots; i++)
+ while (hp != NULL && (p = LIST_FIRST(&hp->head[i])) != NULL) {
+ LIST_REMOVE(p, links);
+ switch (p->type) {
+ case TXNLIST_LSN:
+ __os_free(dbenv, p->u.l.lsn_array);
+ break;
+ default:
+ /*
+ * Possibly an incomplete DB_TXNLIST; just
+ * free it.
+ */
+ break;
+ }
+ __os_free(dbenv, p);
+ }
+
+ if (hp->gen_array != NULL)
+ __os_free(dbenv, hp->gen_array);
+ __os_free(dbenv, listp);
+}
+
+/*
+ * __db_txnlist_find --
+ * Checks to see if a txnid with the current generation is in the
+ * txnid list. This returns TXN_NOTFOUND if the item isn't in the
+ * list otherwise it returns (like __db_txnlist_find_internal)
+ * the status of the transaction. A txnid of 0 means the record
+ * was generated while not in a transaction.
+ *
+ * PUBLIC: int __db_txnlist_find __P((DB_ENV *, void *, u_int32_t));
+ */
+int
+__db_txnlist_find(dbenv, listp, txnid)
+ DB_ENV *dbenv;
+ void *listp;
+ u_int32_t txnid;
+{
+ DB_TXNLIST *entry;
+
+ if (txnid == 0)
+ return (TXN_NOTFOUND);
+ return (__db_txnlist_find_internal(dbenv, listp,
+ TXNLIST_TXNID, txnid, NULL, &entry, 0));
+}
+
+/*
+ * __db_txnlist_update --
+ * Change the status of an existing transaction entry.
+ * Returns TXN_NOTFOUND if no such entry exists.
+ *
+ * PUBLIC: int __db_txnlist_update __P((DB_ENV *,
+ * PUBLIC: void *, u_int32_t, u_int32_t, DB_LSN *));
+ */
+int
+__db_txnlist_update(dbenv, listp, txnid, status, lsn)
+ DB_ENV *dbenv;
+ void *listp;
+ u_int32_t txnid;
+ u_int32_t status;
+ DB_LSN *lsn;
+{
+ DB_TXNHEAD *hp;
+ DB_TXNLIST *elp;
+ int ret;
+
+ if (txnid == 0)
+ return (TXN_NOTFOUND);
+ hp = (DB_TXNHEAD *)listp;
+ ret = __db_txnlist_find_internal(dbenv,
+ listp, TXNLIST_TXNID, txnid, NULL, &elp, 0);
+
+ if (ret == TXN_NOTFOUND)
+ return (ret);
+ elp->u.t.status = status;
+
+ if (lsn != NULL && IS_ZERO_LSN(hp->maxlsn) && status == TXN_COMMIT)
+ hp->maxlsn = *lsn;
+
+ return (ret);
+}
+
+/*
+ * __db_txnlist_find_internal --
+ * Find an entry on the transaction list. If the entry is not there or
+ * the list pointer is not initialized we return TXN_NOTFOUND. If the
+ * item is found, we return the status. Currently we always call this
+ * with an initialized list pointer but checking for NULL keeps it general.
+ */
+static int
+__db_txnlist_find_internal(dbenv, listp, type, txnid, uid, txnlistp, delete)
+ DB_ENV *dbenv;
+ void *listp;
+ db_txnlist_type type;
+ u_int32_t txnid;
+ u_int8_t uid[DB_FILE_ID_LEN];
+ DB_TXNLIST **txnlistp;
+ int delete;
+{
+ DB_TXNHEAD *hp;
+ DB_TXNLIST *p;
+ int32_t generation;
+ u_int32_t hash;
+ struct __db_headlink *head;
+ int i, ret;
+
+ if ((hp = (DB_TXNHEAD *)listp) == NULL)
+ return (TXN_NOTFOUND);
+
+ switch (type) {
+ case TXNLIST_TXNID:
+ hash = txnid;
+ /* Find the most recent generation containing this ID */
+ for (i = 0; i <= hp->generation; i++)
+ /* The range may wrap around the end. */
+ if (hp->gen_array[i].txn_min <
+ hp->gen_array[i].txn_max ?
+ (txnid >= hp->gen_array[i].txn_min &&
+ txnid <= hp->gen_array[i].txn_max) :
+ (txnid >= hp->gen_array[i].txn_min ||
+ txnid <= hp->gen_array[i].txn_max))
+ break;
+ DB_ASSERT(i <= hp->generation);
+ generation = hp->gen_array[i].generation;
+ break;
+ case TXNLIST_PGNO:
+ memcpy(&hash, uid, sizeof(hash));
+ generation = 0;
+ break;
+ default:
+ DB_ASSERT(0);
+ return (EINVAL);
+ }
+
+ head = &hp->head[DB_TXNLIST_MASK(hp, hash)];
+
+ for (p = LIST_FIRST(head); p != NULL; p = LIST_NEXT(p, links)) {
+ if (p->type != type)
+ continue;
+ switch (type) {
+ case TXNLIST_TXNID:
+ if (p->u.t.txnid != txnid ||
+ generation != p->u.t.generation)
+ continue;
+ ret = p->u.t.status;
+ break;
+
+ case TXNLIST_PGNO:
+ if (memcmp(uid, p->u.p.uid, DB_FILE_ID_LEN) != 0)
+ continue;
+
+ ret = 0;
+ break;
+ default:
+ DB_ASSERT(0);
+ ret = EINVAL;
+ }
+ if (delete == 1) {
+ LIST_REMOVE(p, links);
+ __os_free(dbenv, p);
+ } else if (p != LIST_FIRST(head)) {
+ /* Move it to head of list. */
+ LIST_REMOVE(p, links);
+ LIST_INSERT_HEAD(head, p, links);
+ }
+ *txnlistp = p;
+ return (ret);
+ }
+
+ return (TXN_NOTFOUND);
+}
+
+/*
+ * __db_txnlist_gen --
+ * Change the current generation number.
+ *
+ * PUBLIC: int __db_txnlist_gen __P((DB_ENV *,
+ * PUBLIC: void *, int, u_int32_t, u_int32_t));
+ */
+int
+__db_txnlist_gen(dbenv, listp, incr, min, max)
+ DB_ENV *dbenv;
+ void *listp;
+ int incr;
+ u_int32_t min, max;
+{
+ DB_TXNHEAD *hp;
+ int ret;
+
+ /*
+ * During recovery generation numbers keep track of "restart"
+ * checkpoints and recycle records. Restart checkpoints occur
+ * whenever we take a checkpoint and there are no outstanding
+ * transactions. When that happens, we can reset transaction IDs
+ * back to TXNID_MINIMUM. Currently we only do the reset
+ * at then end of recovery. Recycle records occrur when txnids
+ * are exhausted during runtime. A free range of ids is identified
+ * and logged. This code maintains a stack of ranges. A txnid
+ * is given the generation number of the first range it falls into
+ * in the stack.
+ */
+ hp = (DB_TXNHEAD *)listp;
+ hp->generation += incr;
+ if (incr < 0)
+ memmove(hp->gen_array, &hp->gen_array[1],
+ (hp->generation + 1) * sizeof(hp->gen_array[0]));
+ else {
+ if (hp->generation >= hp->gen_alloc) {
+ hp->gen_alloc *= 2;
+ if ((ret = __os_realloc(dbenv, hp->gen_alloc *
+ sizeof(hp->gen_array[0]), &hp->gen_array)) != 0)
+ return (ret);
+ }
+ memmove(&hp->gen_array[1], &hp->gen_array[0],
+ hp->generation * sizeof(hp->gen_array[0]));
+ hp->gen_array[0].generation = hp->generation;
+ hp->gen_array[0].txn_min = min;
+ hp->gen_array[0].txn_max = max;
+ }
+ return (0);
+}
+
+#define TXN_BUBBLE(AP, MAX) { \
+ int __j; \
+ DB_LSN __tmp; \
+ \
+ for (__j = 0; __j < MAX - 1; __j++) \
+ if (log_compare(&AP[__j], &AP[__j + 1]) < 0) { \
+ __tmp = AP[__j]; \
+ AP[__j] = AP[__j + 1]; \
+ AP[__j + 1] = __tmp; \
+ } \
+}
+
+/*
+ * __db_txnlist_lsnadd --
+ * Add to or re-sort the transaction list lsn entry. Note that since this
+ * is used during an abort, the __txn_undo code calls into the "recovery"
+ * subsystem explicitly, and there is only a single TXNLIST_LSN entry on
+ * the list.
+ *
+ * PUBLIC: int __db_txnlist_lsnadd __P((DB_ENV *, void *, DB_LSN *, u_int32_t));
+ */
+int
+__db_txnlist_lsnadd(dbenv, listp, lsnp, flags)
+ DB_ENV *dbenv;
+ void *listp;
+ DB_LSN *lsnp;
+ u_int32_t flags;
+{
+ DB_TXNHEAD *hp;
+ DB_TXNLIST *elp;
+ int i, ret;
+
+ hp = (DB_TXNHEAD *)listp;
+
+ for (elp = LIST_FIRST(&hp->head[0]);
+ elp != NULL; elp = LIST_NEXT(elp, links))
+ if (elp->type == TXNLIST_LSN)
+ break;
+
+ if (elp == NULL)
+ return (DB_SURPRISE_KID);
+
+ if (LF_ISSET(TXNLIST_NEW)) {
+ if (elp->u.l.ntxns >= elp->u.l.maxn) {
+ if ((ret = __os_realloc(dbenv,
+ 2 * elp->u.l.maxn * sizeof(DB_LSN),
+ &elp->u.l.lsn_array)) != 0)
+ return (ret);
+ elp->u.l.maxn *= 2;
+ }
+ elp->u.l.lsn_array[elp->u.l.ntxns++] = *lsnp;
+ } else
+ /* Simply replace the 0th element. */
+ elp->u.l.lsn_array[0] = *lsnp;
+
+ /*
+ * If we just added a new entry and there may be NULL entries, so we
+ * have to do a complete bubble sort, not just trickle a changed entry
+ * around.
+ */
+ for (i = 0; i < (!LF_ISSET(TXNLIST_NEW) ? 1 : elp->u.l.ntxns); i++)
+ TXN_BUBBLE(elp->u.l.lsn_array, elp->u.l.ntxns);
+
+ *lsnp = elp->u.l.lsn_array[0];
+
+ return (0);
+}
+
+/*
+ * __db_txnlist_lsninit --
+ * Initialize a transaction list with an lsn array entry.
+ *
+ * PUBLIC: int __db_txnlist_lsninit __P((DB_ENV *, DB_TXNHEAD *, DB_LSN *));
+ */
+int
+__db_txnlist_lsninit(dbenv, hp, lsnp)
+ DB_ENV *dbenv;
+ DB_TXNHEAD *hp;
+ DB_LSN *lsnp;
+{
+ DB_TXNLIST *elp;
+ int ret;
+
+ elp = NULL;
+
+ if ((ret = __os_malloc(dbenv, sizeof(DB_TXNLIST), &elp)) != 0)
+ goto err;
+ LIST_INSERT_HEAD(&hp->head[0], elp, links);
+
+ if ((ret = __os_malloc(dbenv,
+ 12 * sizeof(DB_LSN), &elp->u.l.lsn_array)) != 0)
+ goto err;
+ elp->type = TXNLIST_LSN;
+ elp->u.l.maxn = 12;
+ elp->u.l.ntxns = 1;
+ elp->u.l.lsn_array[0] = *lsnp;
+
+ return (0);
+
+err: __db_txnlist_end(dbenv, hp);
+ return (ret);
+}
+
+/*
+ * __db_add_limbo -- add pages to the limbo list.
+ * Get the file information and call pgnoadd for each page.
+ *
+ * PUBLIC: int __db_add_limbo __P((DB_ENV *,
+ * PUBLIC: void *, int32_t, db_pgno_t, int32_t));
+ */
+int
+__db_add_limbo(dbenv, info, fileid, pgno, count)
+ DB_ENV *dbenv;
+ void *info;
+ int32_t fileid;
+ db_pgno_t pgno;
+ int32_t count;
+{
+ DB_LOG *dblp;
+ FNAME *fnp;
+ int ret;
+
+ dblp = dbenv->lg_handle;
+ if ((ret = __dbreg_id_to_fname(dblp, fileid, 0, &fnp)) != 0)
+ return (ret);
+
+ do {
+ if ((ret =
+ __db_txnlist_pgnoadd(dbenv, info, fileid, fnp->ufid,
+ R_ADDR(&dblp->reginfo, fnp->name_off), pgno)) != 0)
+ return (ret);
+ pgno++;
+ } while (--count != 0);
+
+ return (0);
+}
+
+/*
+ * __db_do_the_limbo -- move pages from limbo to free.
+ *
+ * Limbo processing is what ensures that we correctly handle and
+ * recover from page allocations. During recovery, for each database,
+ * we process each in-question allocation, link them into the free list
+ * and then write out the new meta-data page that contains the pointer
+ * to the new beginning of the free list. On an abort, we use our
+ * standard __db_free mechanism in a compensating transaction which logs
+ * the specific modifications to the free list.
+ *
+ * If we run out of log space during an abort, then we can't write the
+ * compensating transaction, so we abandon the idea of a compenating
+ * transaction, and go back to processing how we do during recovery.
+ * The reason that this is not the norm is that it's expensive: it requires
+ * that we flush any database with an in-question allocation. Thus if
+ * a compensating transaction fails, we never try to restart it.
+ *
+ * Since files may be open and closed within transactions (in particular,
+ * the master database for subdatabases), we must be prepared to open
+ * files during this process. If there is a compensating transaction, we
+ * can open the files in that transaction. If this was an abort and there
+ * is no compensating transaction, then we've got to perform these opens
+ * in the context of the aborting transaction so that we do not deadlock.
+ * During recovery, there's no locking, so this isn't an issue.
+ *
+ * What you want to keep in mind when reading this is that there are two
+ * algorithms going on here: ctxn == NULL, then we're either in recovery
+ * or our compensating transaction has failed and we're doing the
+ * "create list and write meta-data page" algorithm. Otherwise, we're in
+ * an abort and doing the "use compensating transaction" algorithm.
+ *
+ * PUBLIC: int __db_do_the_limbo __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, DB_TXN *, DB_TXNHEAD *));
+ */
+int
+__db_do_the_limbo(dbenv, ptxn, txn, hp)
+ DB_ENV *dbenv;
+ DB_TXN *ptxn, *txn;
+ DB_TXNHEAD *hp;
+{
+ DB_TXNLIST *elp;
+ int h, ret;
+
+ ret = 0;
+ /*
+ * The slots correspond to hash buckets. We've hashed the
+ * fileids into hash buckets and need to pick up all affected
+ * files. (There will only be a single slot for an abort.)
+ */
+ for (h = 0; h < hp->nslots; h++) {
+ if ((elp = LIST_FIRST(&hp->head[h])) == NULL)
+ continue;
+ if (ptxn != NULL) {
+ if ((ret =
+ __db_limbo_move(dbenv, ptxn, txn, elp)) != 0)
+ goto err;
+ } else if ((ret = __db_limbo_bucket(dbenv, txn, elp)) != 0)
+ goto err;
+ }
+
+err: if (ret != 0) {
+ __db_err(dbenv, "Fatal error in abort of an allocation");
+ ret = __db_panic(dbenv, ret);
+ }
+
+ return (ret);
+}
+
+/* Limbo support routines. */
+
+/*
+ * __db_lock_move --
+ * Move a lock from child to parent.
+ */
+static int
+__db_lock_move(dbenv, fileid, pgno, mode, ptxn, txn)
+ DB_ENV *dbenv;
+ u_int8_t *fileid;
+ db_pgno_t pgno;
+ db_lockmode_t mode;
+ DB_TXN *ptxn, *txn;
+{
+ DBT lock_dbt;
+ DB_LOCK lock;
+ DB_LOCK_ILOCK lock_obj;
+ DB_LOCKREQ req;
+ int ret;
+
+ lock_obj.pgno = pgno;
+ memcpy(lock_obj.fileid, fileid, DB_FILE_ID_LEN);
+ lock_obj.type = DB_PAGE_LOCK;
+
+ memset(&lock_dbt, 0, sizeof(lock_dbt));
+ lock_dbt.data = &lock_obj;
+ lock_dbt.size = sizeof(lock_obj);
+
+ if ((ret = dbenv->lock_get(dbenv,
+ txn->txnid, 0, &lock_dbt, mode, &lock)) == 0) {
+ memset(&req, 0, sizeof(req));
+ req.lock = lock;
+ req.op = DB_LOCK_TRADE;
+
+ ret = dbenv->lock_vec(dbenv, ptxn->txnid, 0, &req, 1, NULL);
+ }
+ return (ret);
+}
+
+/*
+ * __db_limbo_move
+ * Move just the metapage lock to the parent.
+ */
+static int
+__db_limbo_move(dbenv, ptxn, txn, elp)
+ DB_ENV *dbenv;
+ DB_TXN *ptxn, *txn;
+ DB_TXNLIST *elp;
+{
+ int ret;
+
+ for (; elp != NULL; elp = LIST_NEXT(elp, links)) {
+ if (elp->type != TXNLIST_PGNO || elp->u.p.locked == 1)
+ continue;
+ if ((ret = __db_lock_move(dbenv, elp->u.p.uid,
+ PGNO_BASE_MD, DB_LOCK_WRITE, ptxn, txn)) != 0)
+ return (ret);
+ elp->u.p.locked = 1;
+ }
+
+ return (0);
+}
+/*
+ * __db_limbo_bucket
+ * Perform limbo processing for a single hash bucket in the txnlist.
+ * txn is the transaction aborting in the case of an abort and ctxn is the
+ * compensating transaction.
+ */
+
+#define T_RESTORED(txn) ((txn) != NULL && F_ISSET(txn, TXN_RESTORED))
+static int
+__db_limbo_bucket(dbenv, txn, elp)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB_TXNLIST *elp;
+{
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ DBMETA *meta;
+ DB_TXN *ctxn, *t;
+ db_pgno_t last_pgno, pgno;
+ int dbp_created, in_retry, ret, t_ret;
+
+ ctxn = NULL;
+ in_retry = 0;
+ meta = NULL;
+ mpf = NULL;
+ ret = 0;
+ for (; elp != NULL; elp = LIST_NEXT(elp, links)) {
+ if (elp->type != TXNLIST_PGNO)
+ continue;
+retry: dbp_created = 0;
+
+ /*
+ * Pick the transaction in which to potentially
+ * log compensations.
+ */
+ if (!in_retry && !IS_RECOVERING(dbenv) && !T_RESTORED(txn)
+ && (ret = __txn_compensate_begin(dbenv, &ctxn)) != 0)
+ return (ret);
+
+ /*
+ * Either use the compensating transaction or
+ * the one passed in, which will be null if recovering.
+ */
+ t = ctxn == NULL ? txn : ctxn;
+
+ /* First try to get a dbp by fileid. */
+ ret = __dbreg_id_to_db(dbenv, t, &dbp, elp->u.p.fileid, 0);
+
+ /*
+ * File is being destroyed. No need to worry about
+ * dealing with recovery of allocations.
+ */
+ if (ret == DB_DELETED ||
+ (ret == 0 && F_ISSET(dbp, DB_AM_DISCARD)))
+ goto next;
+
+ if (ret != 0) {
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ goto err;
+
+ /*
+ * This tells the system not to lock, which is always
+ * OK, whether this is an abort or recovery.
+ */
+ F_SET(dbp, DB_AM_COMPENSATE);
+ dbp_created = 1;
+
+ /* It is ok if the file is nolonger there. */
+ dbp->type = DB_UNKNOWN;
+ ret = __db_dbopen(dbp, t, elp->u.p.fname, NULL,
+ DB_ODDFILESIZE, __db_omode("rw----"), PGNO_BASE_MD);
+ if (ret == ENOENT)
+ goto next;
+ }
+
+ /*
+ * Verify that we are opening the same file that we were
+ * referring to when we wrote this log record.
+ */
+ if (memcmp(elp->u.p.uid, dbp->fileid, DB_FILE_ID_LEN) != 0)
+ goto next;
+
+ mpf = dbp->mpf;
+ last_pgno = PGNO_INVALID;
+
+ if (ctxn == NULL) {
+ pgno = PGNO_BASE_MD;
+ if ((ret =
+ mpf->get(mpf, &pgno, 0, (PAGE **)&meta)) != 0)
+ goto err;
+ last_pgno = meta->free;
+ }
+
+ ret = __db_limbo_fix(dbp, ctxn, elp, &last_pgno, meta);
+ /*
+ * If we were doing compensating transactions, then we are
+ * going to hope this error was due to running out of space.
+ * We'll change modes (into the sync the file mode) and keep
+ * trying. If we weren't doing compensating transactions,
+ * then this is a real error and we're sunk.
+ */
+ if (ret != 0) {
+ if (ret == DB_RUNRECOVERY || ctxn == NULL)
+ goto err;
+ in_retry = 1;
+ goto retry;
+ }
+
+ if (ctxn != NULL) {
+ ret = ctxn->commit(ctxn, DB_TXN_NOSYNC);
+ ctxn = NULL;
+ if (ret != 0)
+ goto retry;
+ goto next;
+ }
+
+ /*
+ * This is where we handle the case where we're explicitly
+ * putting together a free list. We need to decide whether
+ * we have to write the meta-data page, and if we do, then
+ * we need to sync it as well.
+ */
+ if (last_pgno == meta->free) {
+ /* No change to page; just put the page back. */
+ if ((ret = mpf->put(mpf, meta, 0)) != 0)
+ goto err;
+ meta = NULL;
+ } else {
+ /*
+ * These changes are unlogged so we cannot have the
+ * metapage pointing at pages that are not on disk.
+ * Therefore, we flush the new free list, then update
+ * the metapage. We have to put the meta-data page
+ * first so that it isn't pinned when we try to sync.
+ */
+ if (!IS_RECOVERING(dbenv) && !T_RESTORED(txn))
+ __db_err(dbenv, "Flushing free list to disk");
+ if ((ret = mpf->put(mpf, meta, 0)) != 0)
+ goto err;
+ meta = NULL;
+ dbp->sync(dbp, 0);
+ pgno = PGNO_BASE_MD;
+ if ((ret =
+ mpf->get(mpf, &pgno, 0, (PAGE **)&meta)) != 0)
+ goto err;
+ meta->free = last_pgno;
+ if ((ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ meta = NULL;
+ }
+
+next:
+ /*
+ * If we get here, either we have processed the list
+ * or the db file has been deleted or could no be opened.
+ */
+ if (ctxn != NULL &&
+ (t_ret = ctxn->abort(ctxn)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (dbp_created &&
+ (t_ret = __db_close_i(dbp, txn, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ dbp = NULL;
+ __os_free(dbenv, elp->u.p.fname);
+ __os_free(dbenv, elp->u.p.pgno_array);
+ if (ret == ENOENT)
+ ret = 0;
+ else if (ret != 0)
+ goto err;
+ }
+
+err: if (meta != NULL)
+ (void)mpf->put(mpf, meta, 0);
+ return (ret);
+}
+
+/*
+ * __db_limbo_fix --
+ * Process a single limbo entry which describes all the page allocations
+ * for a single file.
+ */
+static int
+__db_limbo_fix(dbp, ctxn, elp, lastp, meta)
+ DB *dbp;
+ DB_TXN *ctxn;
+ DB_TXNLIST *elp;
+ db_pgno_t *lastp;
+ DBMETA *meta;
+{
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *freep, *pagep;
+ db_pgno_t next, pgno;
+ int i, put_page, ret, t_ret;
+
+ /*
+ * Loop through the entries for this txnlist element and
+ * either link them into the free list or write a compensating
+ * record for each.
+ */
+ put_page = 0;
+ ret = 0;
+ mpf = dbp->mpf;
+ dbc = NULL;
+
+ for (i = 0; i < elp->u.p.nentries; i++) {
+ pgno = elp->u.p.pgno_array[i];
+
+ if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto err;
+ put_page = 1;
+
+ if (IS_ZERO_LSN(LSN(pagep))) {
+ if (ctxn == NULL) {
+ /*
+ * If this is a fatal recovery which
+ * spans a previous crash this page may
+ * be on the free list already.
+ */
+ for (next = *lastp; next != 0; ) {
+ if (next == pgno)
+ break;
+ if ((ret = mpf->get(mpf,
+ &next, 0, &freep)) != 0)
+ goto err;
+ next = NEXT_PGNO(freep);
+ if ((ret =
+ mpf->put(mpf, freep, 0)) != 0)
+ goto err;
+ }
+
+ if (next != pgno) {
+ P_INIT(pagep, dbp->pgsize, pgno,
+ PGNO_INVALID, *lastp, 0, P_INVALID);
+ LSN(pagep) = LSN(meta);
+ *lastp = pgno;
+ }
+ } else {
+ P_INIT(pagep, dbp->pgsize, pgno,
+ PGNO_INVALID, *lastp, 0, P_INVALID);
+ if (dbc == NULL && (ret =
+ dbp->cursor(dbp, ctxn, &dbc, 0)) != 0)
+ goto err;
+ /*
+ * If the dbp is compensating (because we
+ * opened it), the dbc will automatically be
+ * marked compensating, but in case we didn't
+ * do the open, we have to mark it explicitly.
+ */
+ F_SET(dbc, DBC_COMPENSATE);
+ ret = __db_free(dbc, pagep);
+ put_page = 0;
+ /*
+ * On any error, we hope that the error was
+ * caused due to running out of space, and we
+ * switch modes, doing the processing where we
+ * sync out files instead of doing compensating
+ * transactions. If this was a real error and
+ * not out of space, we assume that some other
+ * call will fail real soon.
+ */
+ if (ret != 0) {
+ /* Assume that this is out of space. */
+ (void)dbc->c_close(dbc);
+ dbc = NULL;
+ goto err;
+ }
+ }
+ }
+
+ if (put_page == 1) {
+ ret = mpf->put(mpf, pagep, DB_MPOOL_DIRTY);
+ put_page = 0;
+ }
+ if (ret != 0)
+ goto err;
+ }
+
+err: if (put_page &&
+ (t_ret = mpf->put(mpf, pagep, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ if (dbc != NULL && (t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+#define DB_TXNLIST_MAX_PGNO 8 /* A nice even number. */
+
+/*
+ * __db_txnlist_pgnoadd --
+ * Find the txnlist entry for a file and add this pgno, or add the list
+ * entry for the file and then add the pgno.
+ */
+static int
+__db_txnlist_pgnoadd(dbenv, hp, fileid, uid, fname, pgno)
+ DB_ENV *dbenv;
+ DB_TXNHEAD *hp;
+ int32_t fileid;
+ u_int8_t uid[DB_FILE_ID_LEN];
+ char *fname;
+ db_pgno_t pgno;
+{
+ DB_TXNLIST *elp;
+ u_int32_t hash;
+ int len, ret;
+
+ elp = NULL;
+
+ if (__db_txnlist_find_internal(dbenv, hp,
+ TXNLIST_PGNO, 0, uid, &elp, 0) != 0) {
+ if ((ret =
+ __os_malloc(dbenv, sizeof(DB_TXNLIST), &elp)) != 0)
+ goto err;
+ memcpy(&hash, uid, sizeof(hash));
+ LIST_INSERT_HEAD(
+ &hp->head[DB_TXNLIST_MASK(hp, hash)], elp, links);
+ elp->u.p.fileid = fileid;
+ memcpy(elp->u.p.uid, uid, DB_FILE_ID_LEN);
+
+ len = (int)strlen(fname) + 1;
+ if ((ret = __os_malloc(dbenv, len, &elp->u.p.fname)) != 0)
+ goto err;
+ memcpy(elp->u.p.fname, fname, len);
+
+ elp->u.p.maxentry = 0;
+ elp->u.p.locked = 0;
+ elp->type = TXNLIST_PGNO;
+ if ((ret = __os_malloc(dbenv,
+ 8 * sizeof(db_pgno_t), &elp->u.p.pgno_array)) != 0)
+ goto err;
+ elp->u.p.maxentry = DB_TXNLIST_MAX_PGNO;
+ elp->u.p.nentries = 0;
+ } else if (elp->u.p.nentries == elp->u.p.maxentry) {
+ elp->u.p.maxentry <<= 1;
+ if ((ret = __os_realloc(dbenv, elp->u.p.maxentry *
+ sizeof(db_pgno_t), &elp->u.p.pgno_array)) != 0)
+ goto err;
+ }
+
+ elp->u.p.pgno_array[elp->u.p.nentries++] = pgno;
+
+ return (0);
+
+err: __db_txnlist_end(dbenv, hp);
+ return (ret);
+}
+
+/*
+ * __db_default_getpgnos --
+ * Fill in default getpgnos information for an application-specific
+ * log record.
+ */
+static int
+__db_default_getpgnos(dbenv, lsnp, summary)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+#ifdef DEBUG
+/*
+ * __db_txnlist_print --
+ * Print out the transaction list.
+ *
+ * PUBLIC: void __db_txnlist_print __P((void *));
+ */
+void
+__db_txnlist_print(listp)
+ void *listp;
+{
+ DB_TXNHEAD *hp;
+ DB_TXNLIST *p;
+ int i;
+ char *stats[] = { "ok", "commit", "prepare", "abort", "notfound",
+ "ignore", "expected", "unexpected" };
+
+ hp = (DB_TXNHEAD *)listp;
+
+ printf("Maxid: %lu Generation: %lu\n",
+ (u_long)hp->maxid, (u_long)hp->generation);
+ for (i = 0; i < hp->nslots; i++)
+ for (p = LIST_FIRST(&hp->head[i]); p != NULL; p = LIST_NEXT(p, links)) {
+ switch (p->type) {
+ case TXNLIST_TXNID:
+ printf("TXNID: %lx(%lu): %s\n",
+ (u_long)p->u.t.txnid, (u_long)p->u.t.generation,
+ stats[p->u.t.status]);
+ break;
+ default:
+ printf("Unrecognized type: %d\n", p->type);
+ break;
+ }
+ }
+}
+#endif
diff --git a/libdb/db/db_dup.c b/libdb/db/db_dup.c
new file mode 100644
index 0000000..5e12154
--- /dev/null
+++ b/libdb/db/db_dup.c
@@ -0,0 +1,281 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/db_am.h"
+
+/*
+ * __db_ditem --
+ * Remove an item from a page.
+ *
+ * PUBLIC: int __db_ditem __P((DBC *, PAGE *, u_int32_t, u_int32_t));
+ */
+int
+__db_ditem(dbc, pagep, indx, nbytes)
+ DBC *dbc;
+ PAGE *pagep;
+ u_int32_t indx, nbytes;
+{
+ DB *dbp;
+ DBT ldbt;
+ db_indx_t cnt, *inp, offset;
+ int ret;
+ u_int8_t *from;
+
+ dbp = dbc->dbp;
+ if (DBC_LOGGING(dbc)) {
+ ldbt.data = P_ENTRY(dbp, pagep, indx);
+ ldbt.size = nbytes;
+ if ((ret = __db_addrem_log(dbp, dbc->txn,
+ &LSN(pagep), 0, DB_REM_DUP, PGNO(pagep),
+ (u_int32_t)indx, nbytes, &ldbt, NULL, &LSN(pagep))) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(pagep));
+
+ /*
+ * If there's only a single item on the page, we don't have to
+ * work hard.
+ */
+ if (NUM_ENT(pagep) == 1) {
+ NUM_ENT(pagep) = 0;
+ HOFFSET(pagep) = dbp->pgsize;
+ return (0);
+ }
+
+ inp = P_INP(dbp, pagep);
+ /*
+ * Pack the remaining key/data items at the end of the page. Use
+ * memmove(3), the regions may overlap.
+ */
+ from = (u_int8_t *)pagep + HOFFSET(pagep);
+ DB_ASSERT((int)inp[indx] - HOFFSET(pagep) >= 0);
+ memmove(from + nbytes, from, inp[indx] - HOFFSET(pagep));
+ HOFFSET(pagep) += nbytes;
+
+ /* Adjust the indices' offsets. */
+ offset = inp[indx];
+ for (cnt = 0; cnt < NUM_ENT(pagep); ++cnt)
+ if (inp[cnt] < offset)
+ inp[cnt] += nbytes;
+
+ /* Shift the indices down. */
+ --NUM_ENT(pagep);
+ if (indx != NUM_ENT(pagep))
+ memmove(&inp[indx], &inp[indx + 1],
+ sizeof(db_indx_t) * (NUM_ENT(pagep) - indx));
+
+ return (0);
+}
+
+/*
+ * __db_pitem --
+ * Put an item on a page.
+ *
+ * PUBLIC: int __db_pitem
+ * PUBLIC: __P((DBC *, PAGE *, u_int32_t, u_int32_t, DBT *, DBT *));
+ */
+int
+__db_pitem(dbc, pagep, indx, nbytes, hdr, data)
+ DBC *dbc;
+ PAGE *pagep;
+ u_int32_t indx;
+ u_int32_t nbytes;
+ DBT *hdr, *data;
+{
+ DB *dbp;
+ BKEYDATA bk;
+ DBT thdr;
+ db_indx_t *inp;
+ int ret;
+ u_int8_t *p;
+
+ dbp = dbc->dbp;
+ if (nbytes > P_FREESPACE(dbp, pagep)) {
+ DB_ASSERT(nbytes <= P_FREESPACE(dbp, pagep));
+ return (EINVAL);
+ }
+ /*
+ * Put a single item onto a page. The logic figuring out where to
+ * insert and whether it fits is handled in the caller. All we do
+ * here is manage the page shuffling. We cheat a little bit in that
+ * we don't want to copy the dbt on a normal put twice. If hdr is
+ * NULL, we create a BKEYDATA structure on the page, otherwise, just
+ * copy the caller's information onto the page.
+ *
+ * This routine is also used to put entries onto the page where the
+ * entry is pre-built, e.g., during recovery. In this case, the hdr
+ * will point to the entry, and the data argument will be NULL.
+ *
+ * !!!
+ * There's a tremendous potential for off-by-one errors here, since
+ * the passed in header sizes must be adjusted for the structure's
+ * placeholder for the trailing variable-length data field.
+ */
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __db_addrem_log(dbp, dbc->txn,
+ &LSN(pagep), 0, DB_ADD_DUP, PGNO(pagep),
+ (u_int32_t)indx, nbytes, hdr, data, &LSN(pagep))) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(pagep));
+
+ if (hdr == NULL) {
+ B_TSET(bk.type, B_KEYDATA, 0);
+ bk.len = data == NULL ? 0 : data->size;
+
+ thdr.data = &bk;
+ thdr.size = SSZA(BKEYDATA, data);
+ hdr = &thdr;
+ }
+ inp = P_INP(dbp, pagep);
+
+ /* Adjust the index table, then put the item on the page. */
+ if (indx != NUM_ENT(pagep))
+ memmove(&inp[indx + 1], &inp[indx],
+ sizeof(db_indx_t) * (NUM_ENT(pagep) - indx));
+ HOFFSET(pagep) -= nbytes;
+ inp[indx] = HOFFSET(pagep);
+ ++NUM_ENT(pagep);
+
+ p = P_ENTRY(dbp, pagep, indx);
+ memcpy(p, hdr->data, hdr->size);
+ if (data != NULL)
+ memcpy(p + hdr->size, data->data, data->size);
+
+ return (0);
+}
+
+/*
+ * __db_relink --
+ * Relink around a deleted page.
+ *
+ * PUBLIC: int __db_relink __P((DBC *, u_int32_t, PAGE *, PAGE **, int));
+ */
+int
+__db_relink(dbc, add_rem, pagep, new_next, needlock)
+ DBC *dbc;
+ u_int32_t add_rem;
+ PAGE *pagep, **new_next;
+ int needlock;
+{
+ DB *dbp;
+ PAGE *np, *pp;
+ DB_LOCK npl, ppl;
+ DB_LSN *nlsnp, *plsnp, ret_lsn;
+ DB_MPOOLFILE *mpf;
+ int ret;
+
+ dbp = dbc->dbp;
+ np = pp = NULL;
+ LOCK_INIT(npl);
+ LOCK_INIT(ppl);
+ nlsnp = plsnp = NULL;
+ mpf = dbp->mpf;
+ ret = 0;
+
+ /*
+ * Retrieve and lock the one/two pages. For a remove, we may need
+ * two pages (the before and after). For an add, we only need one
+ * because, the split took care of the prev.
+ */
+ if (pagep->next_pgno != PGNO_INVALID) {
+ if (needlock && (ret = __db_lget(dbc,
+ 0, pagep->next_pgno, DB_LOCK_WRITE, 0, &npl)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &pagep->next_pgno, 0, &np)) != 0) {
+ __db_pgerr(dbp, pagep->next_pgno, ret);
+ goto err;
+ }
+ nlsnp = &np->lsn;
+ }
+ if (add_rem == DB_REM_PAGE && pagep->prev_pgno != PGNO_INVALID) {
+ if (needlock && (ret = __db_lget(dbc,
+ 0, pagep->prev_pgno, DB_LOCK_WRITE, 0, &ppl)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &pagep->prev_pgno, 0, &pp)) != 0) {
+ __db_pgerr(dbp, pagep->next_pgno, ret);
+ goto err;
+ }
+ plsnp = &pp->lsn;
+ }
+
+ /* Log the change. */
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __db_relink_log(dbp, dbc->txn, &ret_lsn, 0, add_rem,
+ pagep->pgno, &pagep->lsn, pagep->prev_pgno, plsnp,
+ pagep->next_pgno, nlsnp)) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(ret_lsn);
+ if (np != NULL)
+ np->lsn = ret_lsn;
+ if (pp != NULL)
+ pp->lsn = ret_lsn;
+ if (add_rem == DB_REM_PAGE)
+ pagep->lsn = ret_lsn;
+
+ /*
+ * Modify and release the two pages.
+ *
+ * !!!
+ * The parameter new_next gets set to the page following the page we
+ * are removing. If there is no following page, then new_next gets
+ * set to NULL.
+ */
+ if (np != NULL) {
+ if (add_rem == DB_ADD_PAGE)
+ np->prev_pgno = pagep->pgno;
+ else
+ np->prev_pgno = pagep->prev_pgno;
+ if (new_next == NULL)
+ ret = mpf->put(mpf, np, DB_MPOOL_DIRTY);
+ else {
+ *new_next = np;
+ ret = mpf->set(mpf, np, DB_MPOOL_DIRTY);
+ }
+ if (ret != 0)
+ goto err;
+ if (needlock)
+ (void)__TLPUT(dbc, npl);
+ } else if (new_next != NULL)
+ *new_next = NULL;
+
+ if (pp != NULL) {
+ pp->next_pgno = pagep->next_pgno;
+ if ((ret = mpf->put(mpf, pp, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ if (needlock)
+ (void)__TLPUT(dbc, ppl);
+ }
+ return (0);
+
+err: if (np != NULL)
+ (void)mpf->put(mpf, np, 0);
+ if (needlock)
+ (void)__TLPUT(dbc, npl);
+ if (pp != NULL)
+ (void)mpf->put(mpf, pp, 0);
+ if (needlock)
+ (void)__TLPUT(dbc, ppl);
+ return (ret);
+}
diff --git a/libdb/db/db_iface.c b/libdb/db/db_iface.c
new file mode 100644
index 0000000..3185e00
--- /dev/null
+++ b/libdb/db/db_iface.c
@@ -0,0 +1,983 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+
+static int __db_curinval __P((const DB_ENV *));
+static int __db_fnl __P((const DB_ENV *, const char *));
+static int __db_rdonly __P((const DB_ENV *, const char *));
+static int __dbt_ferr __P((const DB *, const char *, const DBT *, int));
+
+/*
+ * A database should be required to be readonly if it's been explicitly
+ * specified as such or if we're a client in a replicated environment and
+ * we don't have the special "client-writer" designation.
+ */
+#define IS_READONLY(dbp) \
+ (F_ISSET(dbp, DB_AM_RDONLY) || \
+ (F_ISSET((dbp)->dbenv, DB_ENV_REP_CLIENT) && \
+ !F_ISSET((dbp), DB_AM_CL_WRITER)))
+
+/*
+ * __db_cursorchk --
+ * Common cursor argument checking routine.
+ *
+ * PUBLIC: int __db_cursorchk __P((const DB *, u_int32_t));
+ */
+int
+__db_cursorchk(dbp, flags)
+ const DB *dbp;
+ u_int32_t flags;
+{
+ /* DB_DIRTY_READ is the only valid bit-flag and requires locking. */
+ if (LF_ISSET(DB_DIRTY_READ)) {
+ if (!LOCKING_ON(dbp->dbenv))
+ return (__db_fnl(dbp->dbenv, "DB->cursor"));
+ LF_CLR(DB_DIRTY_READ);
+ }
+
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ break;
+ case DB_WRITECURSOR:
+ if (IS_READONLY(dbp))
+ return (__db_rdonly(dbp->dbenv, "DB->cursor"));
+ if (!CDB_LOCKING(dbp->dbenv))
+ return (__db_ferr(dbp->dbenv, "DB->cursor", 0));
+ break;
+ case DB_WRITELOCK:
+ if (IS_READONLY(dbp))
+ return (__db_rdonly(dbp->dbenv, "DB->cursor"));
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DB->cursor", 0));
+ }
+
+ return (0);
+}
+
+/*
+ * __db_ccountchk --
+ * Common cursor count argument checking routine.
+ *
+ * PUBLIC: int __db_ccountchk __P((const DB *, u_int32_t, int));
+ */
+int
+__db_ccountchk(dbp, flags, isvalid)
+ const DB *dbp;
+ u_int32_t flags;
+ int isvalid;
+{
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DBcursor->c_count", 0));
+ }
+
+ /*
+ * The cursor must be initialized, return EINVAL for an invalid cursor,
+ * otherwise 0.
+ */
+ return (isvalid ? 0 : __db_curinval(dbp->dbenv));
+}
+
+/*
+ * __db_cdelchk --
+ * Common cursor delete argument checking routine.
+ *
+ * PUBLIC: int __db_cdelchk __P((const DB *, u_int32_t, int));
+ */
+int
+__db_cdelchk(dbp, flags, isvalid)
+ const DB *dbp;
+ u_int32_t flags;
+ int isvalid;
+{
+ /* Check for changes to a read-only tree. */
+ if (IS_READONLY(dbp))
+ return (__db_rdonly(dbp->dbenv, "c_del"));
+
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ break;
+ case DB_UPDATE_SECONDARY:
+ DB_ASSERT(F_ISSET(dbp, DB_AM_SECONDARY));
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DBcursor->c_del", 0));
+ }
+
+ /*
+ * The cursor must be initialized, return EINVAL for an invalid cursor,
+ * otherwise 0.
+ */
+ return (isvalid ? 0 : __db_curinval(dbp->dbenv));
+}
+
+/*
+ * __db_cgetchk --
+ * Common cursor get argument checking routine.
+ *
+ * PUBLIC: int __db_cgetchk __P((const DB *, DBT *, DBT *, u_int32_t, int));
+ */
+int
+__db_cgetchk(dbp, key, data, flags, isvalid)
+ const DB *dbp;
+ DBT *key, *data;
+ u_int32_t flags;
+ int isvalid;
+{
+ int dirty, multi, ret;
+
+ /*
+ * Check for read-modify-write validity. DB_RMW doesn't make sense
+ * with CDB cursors since if you're going to write the cursor, you
+ * had to create it with DB_WRITECURSOR. Regardless, we check for
+ * LOCKING_ON and not STD_LOCKING, as we don't want to disallow it.
+ * If this changes, confirm that DB does not itself set the DB_RMW
+ * flag in a path where CDB may have been configured.
+ */
+ dirty = 0;
+ if (LF_ISSET(DB_DIRTY_READ | DB_RMW)) {
+ if (!LOCKING_ON(dbp->dbenv))
+ return (__db_fnl(dbp->dbenv, "DBcursor->c_get"));
+ if (LF_ISSET(DB_DIRTY_READ))
+ dirty = 1;
+ LF_CLR(DB_DIRTY_READ | DB_RMW);
+ }
+
+ multi = 0;
+ if (LF_ISSET(DB_MULTIPLE | DB_MULTIPLE_KEY)) {
+ multi = 1;
+ if (LF_ISSET(DB_MULTIPLE) && LF_ISSET(DB_MULTIPLE_KEY))
+ goto multi_err;
+ LF_CLR(DB_MULTIPLE | DB_MULTIPLE_KEY);
+ }
+
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case DB_CONSUME:
+ case DB_CONSUME_WAIT:
+ if (dirty) {
+ __db_err(dbp->dbenv,
+ "DB_DIRTY_READ is not supported with DB_CONSUME or DB_CONSUME_WAIT");
+ return (EINVAL);
+ }
+ if (dbp->type != DB_QUEUE)
+ goto err;
+ break;
+ case DB_CURRENT:
+ case DB_FIRST:
+ case DB_GET_BOTH:
+ case DB_GET_BOTH_RANGE:
+ case DB_NEXT:
+ case DB_NEXT_DUP:
+ case DB_NEXT_NODUP:
+ case DB_SET:
+ case DB_SET_RANGE:
+ break;
+ case DB_LAST:
+ case DB_PREV:
+ case DB_PREV_NODUP:
+ if (multi)
+multi_err: return (__db_ferr(dbp->dbenv, "DBcursor->c_get", 1));
+ break;
+ case DB_GET_BOTHC:
+ if (dbp->type == DB_QUEUE)
+ goto err;
+ break;
+ case DB_GET_RECNO:
+ /*
+ * The one situation in which this might be legal with a
+ * non-RECNUM dbp is if dbp is a secondary and its primary is
+ * DB_AM_RECNUM.
+ */
+ if (!F_ISSET(dbp, DB_AM_RECNUM) &&
+ (!F_ISSET(dbp, DB_AM_SECONDARY) ||
+ !F_ISSET(dbp->s_primary, DB_AM_RECNUM)))
+ goto err;
+ break;
+ case DB_SET_RECNO:
+ if (!F_ISSET(dbp, DB_AM_RECNUM))
+ goto err;
+ break;
+ default:
+err: return (__db_ferr(dbp->dbenv, "DBcursor->c_get", 0));
+ }
+
+ /* Check for invalid key/data flags. */
+ if ((ret = __dbt_ferr(dbp, "key", key, 0)) != 0)
+ return (ret);
+ if ((ret = __dbt_ferr(dbp, "data", data, 0)) != 0)
+ return (ret);
+
+ if (multi && !F_ISSET(data, DB_DBT_USERMEM)) {
+ __db_err(dbp->dbenv,
+ "DB_MULTIPLE(_KEY) requires that DB_DBT_USERMEM be set");
+ return (EINVAL);
+ }
+ if (multi &&
+ (F_ISSET(key, DB_DBT_PARTIAL) || F_ISSET(data, DB_DBT_PARTIAL))) {
+ __db_err(dbp->dbenv,
+ "DB_DBT_PARTIAL forbidden with DB_MULTIPLE(_KEY)");
+ return (EINVAL);
+ }
+
+ /*
+ * The cursor must be initialized for DB_CURRENT, DB_GET_RECNO and
+ * DB_NEXT_DUP. Return EINVAL for an invalid cursor, otherwise 0.
+ */
+ if (isvalid || (flags != DB_CURRENT &&
+ flags != DB_GET_RECNO && flags != DB_NEXT_DUP))
+ return (0);
+
+ return (__db_curinval(dbp->dbenv));
+}
+
+/*
+ * __db_cputchk --
+ * Common cursor put argument checking routine.
+ *
+ * PUBLIC: int __db_cputchk __P((const DB *,
+ * PUBLIC: const DBT *, DBT *, u_int32_t, int));
+ */
+int
+__db_cputchk(dbp, key, data, flags, isvalid)
+ const DB *dbp;
+ const DBT *key;
+ DBT *data;
+ u_int32_t flags;
+ int isvalid;
+{
+ int key_flags, ret;
+
+ key_flags = 0;
+
+ /* Check for changes to a read-only tree. */
+ if (IS_READONLY(dbp))
+ return (__db_rdonly(dbp->dbenv, "c_put"));
+
+ /* Check for puts on a secondary. */
+ if (F_ISSET(dbp, DB_AM_SECONDARY)) {
+ if (flags == DB_UPDATE_SECONDARY)
+ flags = DB_KEYLAST;
+ else {
+ __db_err(dbp->dbenv,
+ "DBcursor->c_put forbidden on secondary indices");
+ return (EINVAL);
+ }
+ }
+
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case DB_AFTER:
+ case DB_BEFORE:
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_HASH: /* Only with unsorted duplicates. */
+ if (!F_ISSET(dbp, DB_AM_DUP))
+ goto err;
+ if (dbp->dup_compare != NULL)
+ goto err;
+ break;
+ case DB_QUEUE: /* Not permitted. */
+ goto err;
+ case DB_RECNO: /* Only with mutable record numbers. */
+ if (!F_ISSET(dbp, DB_AM_RENUMBER))
+ goto err;
+ key_flags = 1;
+ break;
+ default:
+ goto err;
+ }
+ break;
+ case DB_CURRENT:
+ /*
+ * If there is a comparison function, doing a DB_CURRENT
+ * must not change the part of the data item that is used
+ * for the comparison.
+ */
+ break;
+ case DB_NODUPDATA:
+ if (!F_ISSET(dbp, DB_AM_DUPSORT))
+ goto err;
+ /* FALLTHROUGH */
+ case DB_KEYFIRST:
+ case DB_KEYLAST:
+ key_flags = 1;
+ break;
+ default:
+err: return (__db_ferr(dbp->dbenv, "DBcursor->c_put", 0));
+ }
+
+ /* Check for invalid key/data flags. */
+ if (key_flags && (ret = __dbt_ferr(dbp, "key", key, 0)) != 0)
+ return (ret);
+ if ((ret = __dbt_ferr(dbp, "data", data, 0)) != 0)
+ return (ret);
+
+ /*
+ * The cursor must be initialized for anything other than DB_KEYFIRST
+ * and DB_KEYLAST, return EINVAL for an invalid cursor, otherwise 0.
+ */
+ if (isvalid || flags == DB_KEYFIRST ||
+ flags == DB_KEYLAST || flags == DB_NODUPDATA)
+ return (0);
+
+ return (__db_curinval(dbp->dbenv));
+}
+
+/*
+ * __db_pgetchk --
+ * DB->pget flag check.
+ *
+ * PUBLIC: int __db_pgetchk __P((const DB *, const DBT *, DBT *, DBT *,
+ * PUBLIC: u_int32_t));
+ */
+int
+__db_pgetchk(dbp, skey, pkey, data, flags)
+ const DB *dbp;
+ const DBT *skey;
+ DBT *pkey, *data;
+ u_int32_t flags;
+{
+ int ret;
+ u_int32_t save_flags;
+
+ save_flags = flags;
+
+ if (!F_ISSET(dbp, DB_AM_SECONDARY)) {
+ __db_err(dbp->dbenv,
+ "DB->pget may only be used on secondary indices");
+ return (EINVAL);
+ }
+
+ if (LF_ISSET(DB_MULTIPLE | DB_MULTIPLE_KEY)) {
+ __db_err(dbp->dbenv,
+ "DB_MULTIPLE and DB_MULTIPLE_KEY may not be used on secondary indices");
+ return (EINVAL);
+ }
+
+ /* DB_CONSUME makes no sense on a secondary index. */
+ LF_CLR(DB_RMW);
+ switch (flags) {
+ case DB_CONSUME:
+ case DB_CONSUME_WAIT:
+ return (__db_ferr(dbp->dbenv, "DB->pget", 0));
+ default:
+ /* __db_getchk will catch the rest. */
+ break;
+ }
+
+ /*
+ * We allow the pkey field to be NULL, so that we can make the
+ * two-DBT get calls into wrappers for the three-DBT ones.
+ */
+ if (pkey != NULL &&
+ (ret = __dbt_ferr(dbp, "primary key", pkey, 1)) != 0)
+ return (ret);
+
+ /* But the pkey field can't be NULL if we're doing a DB_GET_BOTH. */
+ if (pkey == NULL && flags == DB_GET_BOTH) {
+ __db_err(dbp->dbenv,
+ "DB_GET_BOTH on a secondary index requires a primary key");
+ return (EINVAL);
+ }
+
+ return (__db_getchk(dbp, skey, data, save_flags));
+}
+
+/*
+ * __db_cpgetchk --
+ * Secondary-index cursor get argument checking routine.
+ *
+ * PUBLIC: int __db_cpgetchk __P((const DB *,
+ * PUBLIC: DBT *, DBT *, DBT *, u_int32_t, int));
+ */
+int
+__db_cpgetchk(dbp, skey, pkey, data, flags, isvalid)
+ const DB *dbp;
+ DBT *skey, *pkey, *data;
+ u_int32_t flags;
+ int isvalid;
+{
+ int ret;
+ u_int32_t save_flags;
+
+ save_flags = flags;
+
+ if (!F_ISSET(dbp, DB_AM_SECONDARY)) {
+ __db_err(dbp->dbenv,
+ "DBcursor->c_pget may only be used on secondary indices");
+ return (EINVAL);
+ }
+
+ if (LF_ISSET(DB_MULTIPLE | DB_MULTIPLE_KEY)) {
+ __db_err(dbp->dbenv,
+ "DB_MULTIPLE and DB_MULTIPLE_KEY may not be used on secondary indices");
+ return (EINVAL);
+ }
+
+ LF_CLR(DB_RMW);
+ switch (flags) {
+ case DB_CONSUME:
+ case DB_CONSUME_WAIT:
+ /* DB_CONSUME makes no sense on a secondary index. */
+ return (__db_ferr(dbp->dbenv, "DBcursor->c_pget", 0));
+ case DB_GET_BOTH:
+ /* DB_GET_BOTH is "get both the primary and the secondary". */
+ if (pkey == NULL) {
+ __db_err(dbp->dbenv,
+ "DB_GET_BOTH requires both a secondary and a primary key");
+ return (EINVAL);
+ }
+ break;
+ default:
+ /* __db_cgetchk will catch the rest. */
+ break;
+ }
+
+ /*
+ * We allow the pkey field to be NULL, so that we can make the
+ * two-DBT get calls into wrappers for the three-DBT ones.
+ */
+ if (pkey != NULL &&
+ (ret = __dbt_ferr(dbp, "primary key", pkey, 0)) != 0)
+ return (ret);
+
+ /* But the pkey field can't be NULL if we're doing a DB_GET_BOTH. */
+ if (pkey == NULL && flags == DB_GET_BOTH) {
+ __db_err(dbp->dbenv,
+ "DB_GET_BOTH on a secondary index requires a primary key");
+ return (EINVAL);
+ }
+
+ return (__db_cgetchk(dbp, skey, data, save_flags, isvalid));
+}
+
+/*
+ * __db_delchk --
+ * Common delete argument checking routine.
+ *
+ * PUBLIC: int __db_delchk __P((const DB *, DBT *, u_int32_t));
+ */
+int
+__db_delchk(dbp, key, flags)
+ const DB *dbp;
+ DBT *key;
+ u_int32_t flags;
+{
+ COMPQUIET(key, NULL);
+
+ /* Check for changes to a read-only tree. */
+ if (IS_READONLY(dbp))
+ return (__db_rdonly(dbp->dbenv, "delete"));
+
+ /* Check for invalid function flags. */
+ LF_CLR(DB_AUTO_COMMIT);
+ switch (flags) {
+ case 0:
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DB->del", 0));
+ }
+
+ return (0);
+}
+
+/*
+ * __db_getchk --
+ * Common get argument checking routine.
+ *
+ * PUBLIC: int __db_getchk __P((const DB *, const DBT *, DBT *, u_int32_t));
+ */
+int
+__db_getchk(dbp, key, data, flags)
+ const DB *dbp;
+ const DBT *key;
+ DBT *data;
+ u_int32_t flags;
+{
+ int dirty, multi, ret;
+
+ /*
+ * Check for read-modify-write validity. DB_RMW doesn't make sense
+ * with CDB cursors since if you're going to write the cursor, you
+ * had to create it with DB_WRITECURSOR. Regardless, we check for
+ * LOCKING_ON and not STD_LOCKING, as we don't want to disallow it.
+ * If this changes, confirm that DB does not itself set the DB_RMW
+ * flag in a path where CDB may have been configured.
+ */
+ dirty = 0;
+ if (LF_ISSET(DB_DIRTY_READ | DB_RMW)) {
+ if (!LOCKING_ON(dbp->dbenv))
+ return (__db_fnl(dbp->dbenv, "DB->get"));
+ if (LF_ISSET(DB_DIRTY_READ))
+ dirty = 1;
+ LF_CLR(DB_DIRTY_READ | DB_RMW);
+ }
+
+ multi = 0;
+ if (LF_ISSET(DB_MULTIPLE | DB_MULTIPLE_KEY)) {
+ if (LF_ISSET(DB_MULTIPLE_KEY))
+ goto multi_err;
+ multi = LF_ISSET(DB_MULTIPLE) ? 1 : 0;
+ LF_CLR(DB_MULTIPLE);
+ }
+
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ case DB_GET_BOTH:
+ break;
+ case DB_SET_RECNO:
+ if (!F_ISSET(dbp, DB_AM_RECNUM))
+ goto err;
+ break;
+ case DB_CONSUME:
+ case DB_CONSUME_WAIT:
+ if (dirty) {
+ __db_err(dbp->dbenv,
+ "DB_DIRTY_READ is not supported with DB_CONSUME or DB_CONSUME_WAIT");
+ return (EINVAL);
+ }
+ if (multi)
+multi_err: return (__db_ferr(dbp->dbenv, "DB->get", 1));
+ if (dbp->type == DB_QUEUE)
+ break;
+ /* FALLTHROUGH */
+ default:
+err: return (__db_ferr(dbp->dbenv, "DB->get", 0));
+ }
+
+ /*
+ * Check for invalid key/data flags.
+ *
+ * XXX: Dave Krinsky
+ * Remember to modify this when we fix the flag-returning problem.
+ */
+ if ((ret = __dbt_ferr(dbp, "key", key, flags == DB_SET_RECNO)) != 0)
+ return (ret);
+ if ((ret = __dbt_ferr(dbp, "data", data, 1)) != 0)
+ return (ret);
+
+ if (multi && !F_ISSET(data, DB_DBT_USERMEM)) {
+ __db_err(dbp->dbenv,
+ "DB_MULTIPLE requires that DB_DBT_USERMEM be set");
+ return (EINVAL);
+ }
+ if (multi &&
+ (F_ISSET(key, DB_DBT_PARTIAL) || F_ISSET(data, DB_DBT_PARTIAL))) {
+ __db_err(dbp->dbenv,
+ "DB_DBT_PARTIAL forbidden with DB_MULTIPLE(_KEY)");
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/*
+ * __db_joinchk --
+ * Common join argument checking routine.
+ *
+ * PUBLIC: int __db_joinchk __P((const DB *, DBC * const *, u_int32_t));
+ */
+int
+__db_joinchk(dbp, curslist, flags)
+ const DB *dbp;
+ DBC * const *curslist;
+ u_int32_t flags;
+{
+ DB_TXN *txn;
+ int i;
+
+ switch (flags) {
+ case 0:
+ case DB_JOIN_NOSORT:
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DB->join", 0));
+ }
+
+ if (curslist == NULL || curslist[0] == NULL) {
+ __db_err(dbp->dbenv,
+ "At least one secondary cursor must be specified to DB->join");
+ return (EINVAL);
+ }
+
+ txn = curslist[0]->txn;
+ for (i = 1; curslist[i] != NULL; i++)
+ if (curslist[i]->txn != txn) {
+ __db_err(dbp->dbenv,
+ "All secondary cursors must share the same transaction");
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/*
+ * __db_joingetchk --
+ * Common join_get argument checking routine.
+ *
+ * PUBLIC: int __db_joingetchk __P((const DB *, DBT *, u_int32_t));
+ */
+int
+__db_joingetchk(dbp, key, flags)
+ const DB *dbp;
+ DBT *key;
+ u_int32_t flags;
+{
+
+ if (LF_ISSET(DB_DIRTY_READ | DB_RMW)) {
+ if (!LOCKING_ON(dbp->dbenv))
+ return (__db_fnl(dbp->dbenv, "DBcursor->c_get"));
+
+ LF_CLR(DB_DIRTY_READ | DB_RMW);
+ }
+
+ switch (flags) {
+ case 0:
+ case DB_JOIN_ITEM:
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DBcursor->c_get", 0));
+ }
+
+ /*
+ * A partial get of the key of a join cursor don't make much sense;
+ * the entire key is necessary to query the primary database
+ * and find the datum, and so regardless of the size of the key
+ * it would not be a performance improvement. Since it would require
+ * special handling, we simply disallow it.
+ *
+ * A partial get of the data, however, potentially makes sense (if
+ * all possible data are a predictable large structure, for instance)
+ * and causes us no headaches, so we permit it.
+ */
+ if (F_ISSET(key, DB_DBT_PARTIAL)) {
+ __db_err(dbp->dbenv,
+ "DB_DBT_PARTIAL may not be set on key during join_get");
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/*
+ * __db_putchk --
+ * Common put argument checking routine.
+ *
+ * PUBLIC: int __db_putchk
+ * PUBLIC: __P((const DB *, DBT *, const DBT *, u_int32_t, int));
+ */
+int
+__db_putchk(dbp, key, data, flags, isdup)
+ const DB *dbp;
+ DBT *key;
+ const DBT *data;
+ u_int32_t flags;
+ int isdup;
+{
+ int ret, returnkey;
+
+ returnkey = 0;
+
+ /* Check for changes to a read-only tree. */
+ if (IS_READONLY(dbp))
+ return (__db_rdonly(dbp->dbenv, "put"));
+
+ /* Check for puts on a secondary. */
+ if (F_ISSET(dbp, DB_AM_SECONDARY)) {
+ __db_err(dbp->dbenv, "DB->put forbidden on secondary indices");
+ return (EINVAL);
+ }
+
+ /* Check for invalid function flags. */
+ LF_CLR(DB_AUTO_COMMIT);
+ switch (flags) {
+ case 0:
+ case DB_NOOVERWRITE:
+ break;
+ case DB_APPEND:
+ if (dbp->type != DB_RECNO && dbp->type != DB_QUEUE)
+ goto err;
+ returnkey = 1;
+ break;
+ case DB_NODUPDATA:
+ if (F_ISSET(dbp, DB_AM_DUPSORT))
+ break;
+ /* FALLTHROUGH */
+ default:
+err: return (__db_ferr(dbp->dbenv, "DB->put", 0));
+ }
+
+ /* Check for invalid key/data flags. */
+ if ((ret = __dbt_ferr(dbp, "key", key, returnkey)) != 0)
+ return (ret);
+ if ((ret = __dbt_ferr(dbp, "data", data, 0)) != 0)
+ return (ret);
+
+ /* Check for partial puts in the presence of duplicates. */
+ if (isdup && F_ISSET(data, DB_DBT_PARTIAL)) {
+ __db_err(dbp->dbenv,
+"a partial put in the presence of duplicates requires a cursor operation");
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/*
+ * __db_statchk --
+ * Common stat argument checking routine.
+ *
+ * PUBLIC: int __db_statchk __P((const DB *, u_int32_t));
+ */
+int
+__db_statchk(dbp, flags)
+ const DB *dbp;
+ u_int32_t flags;
+{
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ case DB_FAST_STAT:
+ case DB_CACHED_COUNTS: /* Deprecated and undocumented. */
+ break;
+ case DB_RECORDCOUNT: /* Deprecated and undocumented. */
+ if (dbp->type == DB_RECNO)
+ break;
+ if (dbp->type == DB_BTREE && F_ISSET(dbp, DB_AM_RECNUM))
+ break;
+ goto err;
+ default:
+err: return (__db_ferr(dbp->dbenv, "DB->stat", 0));
+ }
+
+ return (0);
+}
+
+/*
+ * __db_syncchk --
+ * Common sync argument checking routine.
+ *
+ * PUBLIC: int __db_syncchk __P((const DB *, u_int32_t));
+ */
+int
+__db_syncchk(dbp, flags)
+ const DB *dbp;
+ u_int32_t flags;
+{
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DB->sync", 0));
+ }
+
+ return (0);
+}
+
+/*
+ * __dbt_ferr --
+ * Check a DBT for flag errors.
+ */
+static int
+__dbt_ferr(dbp, name, dbt, check_thread)
+ const DB *dbp;
+ const char *name;
+ const DBT *dbt;
+ int check_thread;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * Check for invalid DBT flags. We allow any of the flags to be
+ * specified to any DB or DBcursor call so that applications can
+ * set DB_DBT_MALLOC when retrieving a data item from a secondary
+ * database and then specify that same DBT as a key to a primary
+ * database, without having to clear flags.
+ */
+ if ((ret = __db_fchk(dbenv, name, dbt->flags, DB_DBT_APPMALLOC |
+ DB_DBT_MALLOC | DB_DBT_DUPOK | DB_DBT_REALLOC | DB_DBT_USERMEM |
+ DB_DBT_PARTIAL)) != 0)
+ return (ret);
+ switch (F_ISSET(dbt, DB_DBT_MALLOC | DB_DBT_REALLOC | DB_DBT_USERMEM)) {
+ case 0:
+ case DB_DBT_MALLOC:
+ case DB_DBT_REALLOC:
+ case DB_DBT_USERMEM:
+ break;
+ default:
+ return (__db_ferr(dbenv, name, 1));
+ }
+
+ if (check_thread && DB_IS_THREADED(dbp) &&
+ !F_ISSET(dbt, DB_DBT_MALLOC | DB_DBT_REALLOC | DB_DBT_USERMEM)) {
+ __db_err(dbenv,
+ "DB_THREAD mandates memory allocation flag on DBT %s",
+ name);
+ return (EINVAL);
+ }
+ return (0);
+}
+
+/*
+ * __db_rdonly --
+ * Common readonly message.
+ */
+static int
+__db_rdonly(dbenv, name)
+ const DB_ENV *dbenv;
+ const char *name;
+{
+ __db_err(dbenv, "%s: attempt to modify a read-only tree", name);
+ return (EACCES);
+}
+
+/*
+ * __db_fnl --
+ * Common flag-needs-locking message.
+ */
+static int
+__db_fnl(dbenv, name)
+ const DB_ENV *dbenv;
+ const char *name;
+{
+ __db_err(dbenv,
+ "%s: the DB_DIRTY_READ and DB_RMW flags require locking", name);
+ return (EINVAL);
+}
+
+/*
+ * __db_curinval
+ * Report that a cursor is in an invalid state.
+ */
+static int
+__db_curinval(dbenv)
+ const DB_ENV *dbenv;
+{
+ __db_err(dbenv,
+ "Cursor position must be set before performing this operation");
+ return (EINVAL);
+}
+
+/*
+ * __db_secondary_corrupt --
+ * Report that a secondary index appears corrupt, as it has a record
+ * that does not correspond to a record in the primary.
+ *
+ * PUBLIC: int __db_secondary_corrupt __P((DB *));
+ */
+int
+__db_secondary_corrupt(dbp)
+ DB *dbp;
+{
+
+ __db_err(dbp->dbenv,
+ "Secondary index corrupt: item in secondary not found in primary");
+ return (DB_SECONDARY_BAD);
+}
+
+/*
+ * __db_associatechk --
+ * Argument checking routine for DB->associate().
+ *
+ * PUBLIC: int __db_associatechk __P((DB *, DB *,
+ * PUBLIC: int (*)(DB *, const DBT *, const DBT *, DBT *), u_int32_t));
+ */
+int
+__db_associatechk(dbp, sdbp, callback, flags)
+ DB *dbp, *sdbp;
+ int (*callback) __P((DB *, const DBT *, const DBT *, DBT *));
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+
+ if (F_ISSET(sdbp, DB_AM_SECONDARY)) {
+ __db_err(dbenv,
+ "Secondary index handles may not be re-associated");
+ return (EINVAL);
+ }
+ if (F_ISSET(dbp, DB_AM_SECONDARY)) {
+ __db_err(dbenv,
+ "Secondary indices may not be used as primary databases");
+ return (EINVAL);
+ }
+ if (F_ISSET(dbp, DB_AM_DUP)) {
+ __db_err(dbenv,
+ "Primary databases may not be configured with duplicates");
+ return (EINVAL);
+ }
+ if (F_ISSET(dbp, DB_AM_RENUMBER)) {
+ __db_err(dbenv,
+ "Renumbering recno databases may not be used as primary databases");
+ return (EINVAL);
+ }
+ if (callback == NULL &&
+ (!F_ISSET(dbp, DB_AM_RDONLY) || !F_ISSET(sdbp, DB_AM_RDONLY))) {
+ __db_err(dbenv,
+ "Callback function may be NULL only when database handles are read-only");
+ return (EINVAL);
+ }
+
+ return (__db_fchk(dbenv,
+ "DB->associate", flags, DB_CREATE | DB_AUTO_COMMIT));
+}
+
+/*
+ * __db_txn_auto --
+ * Handle DB_AUTO_COMMIT initialization.
+ *
+ * PUBLIC: int __db_txn_auto __P((DB *, DB_TXN **));
+ */
+int
+__db_txn_auto(dbp, txnidp)
+ DB *dbp;
+ DB_TXN **txnidp;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+
+ if (*txnidp != NULL) {
+ __db_err(dbenv,
+ "DB_AUTO_COMMIT may not be specified along with a transaction handle");
+ return (EINVAL);
+ }
+
+ if (!TXN_ON(dbenv)) {
+ __db_err(dbenv,
+ "DB_AUTO_COMMIT may not be specified in non-transactional environment");
+ return (EINVAL);
+ }
+
+ return (dbenv->txn_begin(dbenv, NULL, txnidp, 0));
+}
diff --git a/libdb/db/db_join.c b/libdb/db/db_join.c
new file mode 100644
index 0000000..c5274e4
--- /dev/null
+++ b/libdb/db/db_join.c
@@ -0,0 +1,822 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_join.h"
+#include "dbinc/btree.h"
+
+static int __db_join_close __P((DBC *));
+static int __db_join_cmp __P((const void *, const void *));
+static int __db_join_del __P((DBC *, u_int32_t));
+static int __db_join_get __P((DBC *, DBT *, DBT *, u_int32_t));
+static int __db_join_getnext __P((DBC *, DBT *, DBT *, u_int32_t, u_int32_t));
+static int __db_join_primget __P((DB *,
+ DB_TXN *, u_int32_t, DBT *, DBT *, u_int32_t));
+static int __db_join_put __P((DBC *, DBT *, DBT *, u_int32_t));
+
+/*
+ * Check to see if the Nth secondary cursor of join cursor jc is pointing
+ * to a sorted duplicate set.
+ */
+#define SORTED_SET(jc, n) ((jc)->j_curslist[(n)]->dbp->dup_compare != NULL)
+
+/*
+ * This is the duplicate-assisted join functionality. Right now we're
+ * going to write it such that we return one item at a time, although
+ * I think we may need to optimize it to return them all at once.
+ * It should be easier to get it working this way, and I believe that
+ * changing it should be fairly straightforward.
+ *
+ * We optimize the join by sorting cursors from smallest to largest
+ * cardinality. In most cases, this is indeed optimal. However, if
+ * a cursor with large cardinality has very few data in common with the
+ * first cursor, it is possible that the join will be made faster by
+ * putting it earlier in the cursor list. Since we have no way to detect
+ * cases like this, we simply provide a flag, DB_JOIN_NOSORT, which retains
+ * the sort order specified by the caller, who may know more about the
+ * structure of the data.
+ *
+ * The first cursor moves sequentially through the duplicate set while
+ * the others search explicitly for the duplicate in question.
+ *
+ */
+
+/*
+ * __db_join --
+ * This is the interface to the duplicate-assisted join functionality.
+ * In the same way that cursors mark a position in a database, a cursor
+ * can mark a position in a join. While most cursors are created by the
+ * cursor method of a DB, join cursors are created through an explicit
+ * call to DB->join.
+ *
+ * The curslist is an array of existing, intialized cursors and primary
+ * is the DB of the primary file. The data item that joins all the
+ * cursors in the curslist is used as the key into the primary and that
+ * key and data are returned. When no more items are left in the join
+ * set, the c_next operation off the join cursor will return DB_NOTFOUND.
+ *
+ * PUBLIC: int __db_join __P((DB *, DBC **, DBC **, u_int32_t));
+ */
+int
+__db_join(primary, curslist, dbcp, flags)
+ DB *primary;
+ DBC **curslist, **dbcp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DBC *dbc;
+ JOIN_CURSOR *jc;
+ int ret;
+ u_int32_t i;
+ size_t ncurs, nslots;
+
+ COMPQUIET(nslots, 0);
+
+ PANIC_CHECK(primary->dbenv);
+
+ if ((ret = __db_joinchk(primary, curslist, flags)) != 0)
+ return (ret);
+
+ dbc = NULL;
+ jc = NULL;
+ dbenv = primary->dbenv;
+
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DBC), &dbc)) != 0)
+ goto err;
+
+ if ((ret = __os_calloc(dbenv,
+ 1, sizeof(JOIN_CURSOR), &jc)) != 0)
+ goto err;
+
+ if ((ret = __os_malloc(dbenv, 256, &jc->j_key.data)) != 0)
+ goto err;
+ jc->j_key.ulen = 256;
+ F_SET(&jc->j_key, DB_DBT_USERMEM);
+
+ F_SET(&jc->j_rdata, DB_DBT_REALLOC);
+
+ for (jc->j_curslist = curslist;
+ *jc->j_curslist != NULL; jc->j_curslist++)
+ ;
+
+ /*
+ * The number of cursor slots we allocate is one greater than
+ * the number of cursors involved in the join, because the
+ * list is NULL-terminated.
+ */
+ ncurs = jc->j_curslist - curslist;
+ nslots = ncurs + 1;
+
+ /*
+ * !!! -- A note on the various lists hanging off jc.
+ *
+ * j_curslist is the initial NULL-terminated list of cursors passed
+ * into __db_join. The original cursors are not modified; pristine
+ * copies are required because, in databases with unsorted dups, we
+ * must reset all of the secondary cursors after the first each
+ * time the first one is incremented, or else we will lose data
+ * which happen to be sorted differently in two different cursors.
+ *
+ * j_workcurs is where we put those copies that we're planning to
+ * work with. They're lazily c_dup'ed from j_curslist as we need
+ * them, and closed when the join cursor is closed or when we need
+ * to reset them to their original values (in which case we just
+ * c_dup afresh).
+ *
+ * j_fdupcurs is an array of cursors which point to the first
+ * duplicate in the duplicate set that contains the data value
+ * we're currently interested in. We need this to make
+ * __db_join_get correctly return duplicate duplicates; i.e., if a
+ * given data value occurs twice in the set belonging to cursor #2,
+ * and thrice in the set belonging to cursor #3, and once in all
+ * the other cursors, successive calls to __db_join_get need to
+ * return that data item six times. To make this happen, each time
+ * cursor N is allowed to advance to a new datum, all cursors M
+ * such that M > N have to be reset to the first duplicate with
+ * that datum, so __db_join_get will return all the dup-dups again.
+ * We could just reset them to the original cursor from j_curslist,
+ * but that would be a bit slower in the unsorted case and a LOT
+ * slower in the sorted one.
+ *
+ * j_exhausted is a list of boolean values which represent
+ * whether or not their corresponding cursors are "exhausted",
+ * i.e. whether the datum under the corresponding cursor has
+ * been found not to exist in any unreturned combinations of
+ * later secondary cursors, in which case they are ready to be
+ * incremented.
+ */
+
+ /* We don't want to free regions whose callocs have failed. */
+ jc->j_curslist = NULL;
+ jc->j_workcurs = NULL;
+ jc->j_fdupcurs = NULL;
+ jc->j_exhausted = NULL;
+
+ if ((ret = __os_calloc(dbenv, nslots, sizeof(DBC *),
+ &jc->j_curslist)) != 0)
+ goto err;
+ if ((ret = __os_calloc(dbenv, nslots, sizeof(DBC *),
+ &jc->j_workcurs)) != 0)
+ goto err;
+ if ((ret = __os_calloc(dbenv, nslots, sizeof(DBC *),
+ &jc->j_fdupcurs)) != 0)
+ goto err;
+ if ((ret = __os_calloc(dbenv, nslots, sizeof(u_int8_t),
+ &jc->j_exhausted)) != 0)
+ goto err;
+ for (i = 0; curslist[i] != NULL; i++) {
+ jc->j_curslist[i] = curslist[i];
+ jc->j_workcurs[i] = NULL;
+ jc->j_fdupcurs[i] = NULL;
+ jc->j_exhausted[i] = 0;
+ }
+ jc->j_ncurs = (u_int32_t)ncurs;
+
+ /*
+ * If DB_JOIN_NOSORT is not set, optimize secondary cursors by
+ * sorting in order of increasing cardinality.
+ */
+ if (!LF_ISSET(DB_JOIN_NOSORT))
+ qsort(jc->j_curslist, ncurs, sizeof(DBC *), __db_join_cmp);
+
+ /*
+ * We never need to reset the 0th cursor, so there's no
+ * solid reason to use workcurs[0] rather than curslist[0] in
+ * join_get. Nonetheless, it feels cleaner to do it for symmetry,
+ * and this is the most logical place to copy it.
+ *
+ * !!!
+ * There's no need to close the new cursor if we goto err only
+ * because this is the last thing that can fail. Modifier of this
+ * function beware!
+ */
+ if ((ret = jc->j_curslist[0]->c_dup(jc->j_curslist[0], jc->j_workcurs,
+ DB_POSITIONI)) != 0)
+ goto err;
+
+ dbc->c_close = __db_join_close;
+ dbc->c_del = __db_join_del;
+ dbc->c_get = __db_join_get;
+ dbc->c_put = __db_join_put;
+ dbc->internal = (DBC_INTERNAL *) jc;
+ dbc->dbp = primary;
+ jc->j_primary = primary;
+
+ *dbcp = dbc;
+
+ MUTEX_THREAD_LOCK(dbenv, primary->mutexp);
+ TAILQ_INSERT_TAIL(&primary->join_queue, dbc, links);
+ MUTEX_THREAD_UNLOCK(dbenv, primary->mutexp);
+
+ return (0);
+
+err: if (jc != NULL) {
+ if (jc->j_curslist != NULL)
+ __os_free(dbenv, jc->j_curslist);
+ if (jc->j_workcurs != NULL) {
+ if (jc->j_workcurs[0] != NULL)
+ __os_free(dbenv, jc->j_workcurs[0]);
+ __os_free(dbenv, jc->j_workcurs);
+ }
+ if (jc->j_fdupcurs != NULL)
+ __os_free(dbenv, jc->j_fdupcurs);
+ if (jc->j_exhausted != NULL)
+ __os_free(dbenv, jc->j_exhausted);
+ __os_free(dbenv, jc);
+ }
+ if (dbc != NULL)
+ __os_free(dbenv, dbc);
+ return (ret);
+}
+
+static int
+__db_join_put(dbc, key, data, flags)
+ DBC *dbc;
+ DBT *key;
+ DBT *data;
+ u_int32_t flags;
+{
+ PANIC_CHECK(dbc->dbp->dbenv);
+
+ COMPQUIET(key, NULL);
+ COMPQUIET(data, NULL);
+ COMPQUIET(flags, 0);
+ return (EINVAL);
+}
+
+static int
+__db_join_del(dbc, flags)
+ DBC *dbc;
+ u_int32_t flags;
+{
+ PANIC_CHECK(dbc->dbp->dbenv);
+
+ COMPQUIET(flags, 0);
+ return (EINVAL);
+}
+
+static int
+__db_join_get(dbc, key_arg, data_arg, flags)
+ DBC *dbc;
+ DBT *key_arg, *data_arg;
+ u_int32_t flags;
+{
+ DBT *key_n, key_n_mem;
+ DB *dbp;
+ DBC *cp;
+ JOIN_CURSOR *jc;
+ int db_manage_data, ret;
+ u_int32_t i, j, operation, opmods;
+
+ dbp = dbc->dbp;
+ jc = (JOIN_CURSOR *)dbc->internal;
+
+ PANIC_CHECK(dbp->dbenv);
+
+ operation = LF_ISSET(DB_OPFLAGS_MASK);
+
+ /* !!!
+ * If the set of flags here changes, check that __db_join_primget
+ * is updated to handle them properly.
+ */
+ opmods = LF_ISSET(DB_RMW | DB_DIRTY_READ);
+
+ if ((ret = __db_joingetchk(dbp, key_arg, flags)) != 0)
+ return (ret);
+
+ /*
+ * Since we are fetching the key as a datum in the secondary indices,
+ * we must be careful of caller-specified DB_DBT_* memory
+ * management flags. If necessary, use a stack-allocated DBT;
+ * we'll appropriately copy and/or allocate the data later.
+ */
+ if (F_ISSET(key_arg, DB_DBT_USERMEM) ||
+ F_ISSET(key_arg, DB_DBT_MALLOC)) {
+ /* We just use the default buffer; no need to go malloc. */
+ key_n = &key_n_mem;
+ memset(key_n, 0, sizeof(DBT));
+ } else {
+ /*
+ * Either DB_DBT_REALLOC or the default buffer will work
+ * fine if we have to reuse it, as we do.
+ */
+ key_n = key_arg;
+ }
+
+ /*
+ * If our last attempt to do a get on the primary key failed,
+ * short-circuit the join and try again with the same key.
+ */
+ if (F_ISSET(jc, JOIN_RETRY))
+ goto samekey;
+ F_CLR(jc, JOIN_RETRY);
+
+retry: ret = jc->j_workcurs[0]->c_real_get(jc->j_workcurs[0],
+ &jc->j_key, key_n,
+ opmods | (jc->j_exhausted[0] ? DB_NEXT_DUP : DB_CURRENT));
+
+ if (ret == ENOMEM) {
+ jc->j_key.ulen <<= 1;
+ if ((ret = __os_realloc(dbp->dbenv,
+ jc->j_key.ulen, &jc->j_key.data)) != 0)
+ goto mem_err;
+ goto retry;
+ }
+
+ /*
+ * If ret == DB_NOTFOUND, we're out of elements of the first
+ * secondary cursor. This is how we finally finish the join
+ * if all goes well.
+ */
+ if (ret != 0)
+ goto err;
+
+ /*
+ * If jc->j_exhausted[0] == 1, we've just advanced the first cursor,
+ * and we're going to want to advance all the cursors that point to
+ * the first member of a duplicate duplicate set (j_fdupcurs[1..N]).
+ * Close all the cursors in j_fdupcurs; we'll reopen them the
+ * first time through the upcoming loop.
+ */
+ for (i = 1; i < jc->j_ncurs; i++) {
+ if (jc->j_fdupcurs[i] != NULL &&
+ (ret = jc->j_fdupcurs[i]->c_close(jc->j_fdupcurs[i])) != 0)
+ goto err;
+ jc->j_fdupcurs[i] = NULL;
+ }
+
+ /*
+ * If jc->j_curslist[1] == NULL, we have only one cursor in the join.
+ * Thus, we can safely increment that one cursor on each call
+ * to __db_join_get, and we signal this by setting jc->j_exhausted[0]
+ * right away.
+ *
+ * Otherwise, reset jc->j_exhausted[0] to 0, so that we don't
+ * increment it until we know we're ready to.
+ */
+ if (jc->j_curslist[1] == NULL)
+ jc->j_exhausted[0] = 1;
+ else
+ jc->j_exhausted[0] = 0;
+
+ /* We have the first element; now look for it in the other cursors. */
+ for (i = 1; i < jc->j_ncurs; i++) {
+ DB_ASSERT(jc->j_curslist[i] != NULL);
+ if (jc->j_workcurs[i] == NULL)
+ /* If this is NULL, we need to dup curslist into it. */
+ if ((ret = jc->j_curslist[i]->c_dup(
+ jc->j_curslist[i], jc->j_workcurs + i,
+ DB_POSITIONI)) != 0)
+ goto err;
+
+retry2: cp = jc->j_workcurs[i];
+
+ if ((ret = __db_join_getnext(cp, &jc->j_key, key_n,
+ jc->j_exhausted[i], opmods)) == DB_NOTFOUND) {
+ /*
+ * jc->j_workcurs[i] has no more of the datum we're
+ * interested in. Go back one cursor and get
+ * a new dup. We can't just move to a new
+ * element of the outer relation, because that way
+ * we might miss duplicate duplicates in cursor i-1.
+ *
+ * If this takes us back to the first cursor,
+ * -then- we can move to a new element of the outer
+ * relation.
+ */
+ --i;
+ jc->j_exhausted[i] = 1;
+
+ if (i == 0) {
+ for (j = 1; jc->j_workcurs[j] != NULL; j++) {
+ /*
+ * We're moving to a new element of
+ * the first secondary cursor. If
+ * that cursor is sorted, then any
+ * other sorted cursors can be safely
+ * reset to the first duplicate
+ * duplicate in the current set if we
+ * have a pointer to it (we can't just
+ * leave them be, or we'll miss
+ * duplicate duplicates in the outer
+ * relation).
+ *
+ * If the first cursor is unsorted, or
+ * if cursor j is unsorted, we can
+ * make no assumptions about what
+ * we're looking for next or where it
+ * will be, so we reset to the very
+ * beginning (setting workcurs NULL
+ * will achieve this next go-round).
+ *
+ * XXX: This is likely to break
+ * horribly if any two cursors are
+ * both sorted, but have different
+ * specified sort functions. For,
+ * now, we dismiss this as pathology
+ * and let strange things happen--we
+ * can't make rope childproof.
+ */
+ if ((ret = jc->j_workcurs[j]->c_close(
+ jc->j_workcurs[j])) != 0)
+ goto err;
+ if (!SORTED_SET(jc, 0) ||
+ !SORTED_SET(jc, j) ||
+ jc->j_fdupcurs[j] == NULL)
+ /*
+ * Unsafe conditions;
+ * reset fully.
+ */
+ jc->j_workcurs[j] = NULL;
+ else
+ /* Partial reset suffices. */
+ if ((jc->j_fdupcurs[j]->c_dup(
+ jc->j_fdupcurs[j],
+ &jc->j_workcurs[j],
+ DB_POSITIONI)) != 0)
+ goto err;
+ jc->j_exhausted[j] = 0;
+ }
+ goto retry;
+ /* NOTREACHED */
+ }
+
+ /*
+ * We're about to advance the cursor and need to
+ * reset all of the workcurs[j] where j>i, so that
+ * we don't miss any duplicate duplicates.
+ */
+ for (j = i + 1;
+ jc->j_workcurs[j] != NULL;
+ j++) {
+ if ((ret = jc->j_workcurs[j]->c_close(
+ jc->j_workcurs[j])) != 0)
+ goto err;
+ jc->j_exhausted[j] = 0;
+ if (jc->j_fdupcurs[j] != NULL &&
+ (ret = jc->j_fdupcurs[j]->c_dup(
+ jc->j_fdupcurs[j], &jc->j_workcurs[j],
+ DB_POSITIONI)) != 0)
+ goto err;
+ else
+ jc->j_workcurs[j] = NULL;
+ }
+ goto retry2;
+ /* NOTREACHED */
+ }
+
+ if (ret == ENOMEM) {
+ jc->j_key.ulen <<= 1;
+ if ((ret = __os_realloc(dbp->dbenv, jc->j_key.ulen,
+ &jc->j_key.data)) != 0) {
+mem_err: __db_err(dbp->dbenv,
+ "Allocation failed for join key, len = %lu",
+ (u_long)jc->j_key.ulen);
+ goto err;
+ }
+ goto retry2;
+ }
+
+ if (ret != 0)
+ goto err;
+
+ /*
+ * If we made it this far, we've found a matching
+ * datum in cursor i. Mark the current cursor
+ * unexhausted, so we don't miss any duplicate
+ * duplicates the next go-round--unless this is the
+ * very last cursor, in which case there are none to
+ * miss, and we'll need that exhausted flag to finally
+ * get a DB_NOTFOUND and move on to the next datum in
+ * the outermost cursor.
+ */
+ if (i + 1 != jc->j_ncurs)
+ jc->j_exhausted[i] = 0;
+ else
+ jc->j_exhausted[i] = 1;
+
+ /*
+ * If jc->j_fdupcurs[i] is NULL and the ith cursor's dups are
+ * sorted, then we're here for the first time since advancing
+ * cursor 0, and we have a new datum of interest.
+ * jc->j_workcurs[i] points to the beginning of a set of
+ * duplicate duplicates; store this into jc->j_fdupcurs[i].
+ */
+ if (SORTED_SET(jc, i) && jc->j_fdupcurs[i] == NULL && (ret =
+ cp->c_dup(cp, &jc->j_fdupcurs[i], DB_POSITIONI)) != 0)
+ goto err;
+
+ }
+
+err: if (ret != 0)
+ return (ret);
+
+ if (0) {
+samekey: /*
+ * Get the key we tried and failed to return last time;
+ * it should be the current datum of all the secondary cursors.
+ */
+ if ((ret = jc->j_workcurs[0]->c_real_get(jc->j_workcurs[0],
+ &jc->j_key, key_n, DB_CURRENT | opmods)) != 0)
+ return (ret);
+ F_CLR(jc, JOIN_RETRY);
+ }
+
+ /*
+ * ret == 0; we have a key to return.
+ *
+ * If DB_DBT_USERMEM or DB_DBT_MALLOC is set, we need to copy the key
+ * back into the dbt we were given for the key; call __db_retcopy.
+ * Otherwise, assert that we do not need to copy anything and proceed.
+ */
+ DB_ASSERT(F_ISSET(
+ key_arg, DB_DBT_USERMEM | DB_DBT_MALLOC) || key_n == key_arg);
+
+ if (F_ISSET(key_arg, DB_DBT_USERMEM | DB_DBT_MALLOC) &&
+ (ret = __db_retcopy(dbp->dbenv,
+ key_arg, key_n->data, key_n->size, NULL, NULL)) != 0) {
+ /*
+ * The retcopy failed, most commonly because we have a user
+ * buffer for the key which is too small. Set things up to
+ * retry next time, and return.
+ */
+ F_SET(jc, JOIN_RETRY);
+ return (ret);
+ }
+
+ /*
+ * If DB_JOIN_ITEM is set, we return it; otherwise we do the lookup
+ * in the primary and then return.
+ *
+ * Note that we use key_arg here; it is safe (and appropriate)
+ * to do so.
+ */
+ if (operation == DB_JOIN_ITEM)
+ return (0);
+
+ /*
+ * If data_arg->flags == 0--that is, if DB is managing the
+ * data DBT's memory--it's not safe to just pass the DBT
+ * through to the primary get call, since we don't want that
+ * memory to belong to the primary DB handle (and if the primary
+ * is free-threaded, it can't anyway).
+ *
+ * Instead, use memory that is managed by the join cursor, in
+ * jc->j_rdata.
+ */
+ if (!F_ISSET(data_arg, DB_DBT_MALLOC | DB_DBT_REALLOC | DB_DBT_USERMEM))
+ db_manage_data = 1;
+ else
+ db_manage_data = 0;
+ if ((ret = __db_join_primget(jc->j_primary,
+ jc->j_curslist[0]->txn, jc->j_curslist[0]->locker, key_arg,
+ db_manage_data ? &jc->j_rdata : data_arg, opmods)) != 0) {
+ if (ret == DB_NOTFOUND)
+ /*
+ * If ret == DB_NOTFOUND, the primary and secondary
+ * are out of sync; every item in each secondary
+ * should correspond to something in the primary,
+ * or we shouldn't have done the join this way.
+ * Wail.
+ */
+ ret = __db_secondary_corrupt(jc->j_primary);
+ else
+ /*
+ * The get on the primary failed for some other
+ * reason, most commonly because we're using a user
+ * buffer that's not big enough. Flag our failure
+ * so we can return the same key next time.
+ */
+ F_SET(jc, JOIN_RETRY);
+ }
+ if (db_manage_data && ret == 0) {
+ data_arg->data = jc->j_rdata.data;
+ data_arg->size = jc->j_rdata.size;
+ }
+
+ return (ret);
+}
+
+static int
+__db_join_close(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ JOIN_CURSOR *jc;
+ int ret, t_ret;
+ u_int32_t i;
+
+ jc = (JOIN_CURSOR *)dbc->internal;
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+ ret = t_ret = 0;
+
+ /*
+ * Remove from active list of join cursors. Note that this
+ * must happen before any action that can fail and return, or else
+ * __db_close may loop indefinitely.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ TAILQ_REMOVE(&dbp->join_queue, dbc, links);
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+
+ PANIC_CHECK(dbenv);
+
+ /*
+ * Close any open scratch cursors. In each case, there may
+ * not be as many outstanding as there are cursors in
+ * curslist, but we want to close whatever's there.
+ *
+ * If any close fails, there's no reason not to close everything else;
+ * we'll just return the error code of the last one to fail. There's
+ * not much the caller can do anyway, since these cursors only exist
+ * hanging off a db-internal data structure that they shouldn't be
+ * mucking with.
+ */
+ for (i = 0; i < jc->j_ncurs; i++) {
+ if (jc->j_workcurs[i] != NULL && (t_ret =
+ jc->j_workcurs[i]->c_close(jc->j_workcurs[i])) != 0)
+ ret = t_ret;
+ if (jc->j_fdupcurs[i] != NULL && (t_ret =
+ jc->j_fdupcurs[i]->c_close(jc->j_fdupcurs[i])) != 0)
+ ret = t_ret;
+ }
+
+ __os_free(dbenv, jc->j_exhausted);
+ __os_free(dbenv, jc->j_curslist);
+ __os_free(dbenv, jc->j_workcurs);
+ __os_free(dbenv, jc->j_fdupcurs);
+ __os_free(dbenv, jc->j_key.data);
+ if (jc->j_rdata.data != NULL)
+ __os_ufree(dbenv, jc->j_rdata.data);
+ __os_free(dbenv, jc);
+ __os_free(dbenv, dbc);
+
+ return (ret);
+}
+
+/*
+ * __db_join_getnext --
+ * This function replaces the DBC_CONTINUE and DBC_KEYSET
+ * functionality inside the various cursor get routines.
+ *
+ * If exhausted == 0, we're not done with the current datum;
+ * return it if it matches "matching", otherwise search
+ * using DB_GET_BOTHC (which is faster than iteratively doing
+ * DB_NEXT_DUP) forward until we find one that does.
+ *
+ * If exhausted == 1, we are done with the current datum, so just
+ * leap forward to searching NEXT_DUPs.
+ *
+ * If no matching datum exists, returns DB_NOTFOUND, else 0.
+ */
+static int
+__db_join_getnext(dbc, key, data, exhausted, opmods)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t exhausted, opmods;
+{
+ int ret, cmp;
+ DB *dbp;
+ DBT ldata;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+
+ dbp = dbc->dbp;
+ func = (dbp->dup_compare == NULL) ? __bam_defcmp : dbp->dup_compare;
+
+ switch (exhausted) {
+ case 0:
+ /*
+ * We don't want to step on data->data; use a new
+ * DBT and malloc so we don't step on dbc's rdata memory.
+ */
+ memset(&ldata, 0, sizeof(DBT));
+ F_SET(&ldata, DB_DBT_MALLOC);
+ if ((ret = dbc->c_real_get(dbc,
+ key, &ldata, opmods | DB_CURRENT)) != 0)
+ break;
+ cmp = func(dbp, data, &ldata);
+ if (cmp == 0) {
+ /*
+ * We have to return the real data value. Copy
+ * it into data, then free the buffer we malloc'ed
+ * above.
+ */
+ if ((ret = __db_retcopy(dbp->dbenv, data, ldata.data,
+ ldata.size, &data->data, &data->size)) != 0)
+ return (ret);
+ __os_ufree(dbp->dbenv, ldata.data);
+ return (0);
+ }
+
+ /*
+ * Didn't match--we want to fall through and search future
+ * dups. We just forget about ldata and free
+ * its buffer--data contains the value we're searching for.
+ */
+ __os_ufree(dbp->dbenv, ldata.data);
+ /* FALLTHROUGH */
+ case 1:
+ ret = dbc->c_real_get(dbc, key, data, opmods | DB_GET_BOTHC);
+ break;
+ default:
+ ret = EINVAL;
+ break;
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_join_cmp --
+ * Comparison function for sorting DBCs in cardinality order.
+ */
+static int
+__db_join_cmp(a, b)
+ const void *a, *b;
+{
+ DBC *dbca, *dbcb;
+ db_recno_t counta, countb;
+
+ /* In case c_count fails, pretend cursors are equal. */
+ counta = countb = 0;
+
+ dbca = *((DBC * const *)a);
+ dbcb = *((DBC * const *)b);
+
+ if (dbca->c_count(dbca, &counta, 0) != 0 ||
+ dbcb->c_count(dbcb, &countb, 0) != 0)
+ return (0);
+
+ return (counta - countb);
+}
+
+/*
+ * __db_join_primget --
+ * Perform a DB->get in the primary, being careful not to use a new
+ * locker ID if we're doing CDB locking.
+ */
+static int
+__db_join_primget(dbp, txn, lockerid, key, data, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t lockerid;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ int dirty, ret, rmw, t_ret;
+
+ /*
+ * The only allowable flags here are the two flags copied into
+ * "opmods" in __db_join_get, DB_RMW and DB_DIRTY_READ. The former
+ * is an op on the c_get call, the latter on the cursor call.
+ * It's a DB bug if we allow any other flags down in here.
+ */
+ rmw = LF_ISSET(DB_RMW);
+ dirty = LF_ISSET(DB_DIRTY_READ);
+ LF_CLR(DB_RMW | DB_DIRTY_READ);
+ DB_ASSERT(flags == 0);
+
+ if ((ret = __db_icursor(dbp,
+ txn, dbp->type, PGNO_INVALID, 0, lockerid, &dbc)) != 0)
+ return (ret);
+
+ if (dirty ||
+ (txn != NULL && F_ISSET(txn, TXN_DIRTY_READ)))
+ F_SET(dbc, DBC_DIRTY_READ);
+ F_SET(dbc, DBC_TRANSIENT);
+
+ /*
+ * This shouldn't be necessary, thanks to the fact that join cursors
+ * swap in their own DB_DBT_REALLOC'ed buffers, but just for form's
+ * sake, we mirror what __db_get does.
+ */
+ SET_RET_MEM(dbc, dbp);
+
+ ret = dbc->c_get(dbc, key, data, DB_SET | rmw);
+
+ if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
diff --git a/libdb/db/db_meta.c b/libdb/db/db_meta.c
new file mode 100644
index 0000000..9c895ce
--- /dev/null
+++ b/libdb/db/db_meta.c
@@ -0,0 +1,452 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/db_am.h"
+
+static void __db_init_meta __P((void *, u_int32_t, db_pgno_t, u_int32_t));
+
+/*
+ * __db_init_meta --
+ * Helper function for __db_new that initializes the important fields in
+ * a meta-data page (used instead of P_INIT). We need to make sure that we
+ * retain the page number and LSN of the existing page.
+ */
+static void
+__db_init_meta(p, pgsize, pgno, pgtype)
+ void *p;
+ u_int32_t pgsize;
+ db_pgno_t pgno;
+ u_int32_t pgtype;
+{
+ DB_LSN save_lsn;
+ DBMETA *meta;
+
+ meta = (DBMETA *)p;
+ save_lsn = meta->lsn;
+ memset(meta, 0, sizeof(DBMETA));
+ meta->lsn = save_lsn;
+ meta->pagesize = pgsize;
+ meta->pgno = pgno;
+ meta->type = (u_int8_t)pgtype;
+}
+
+/*
+ * __db_new --
+ * Get a new page, preferably from the freelist.
+ *
+ * PUBLIC: int __db_new __P((DBC *, u_int32_t, PAGE **));
+ */
+int
+__db_new(dbc, type, pagepp)
+ DBC *dbc;
+ u_int32_t type;
+ PAGE **pagepp;
+{
+ DBMETA *meta;
+ DB *dbp;
+ DB_LOCK metalock;
+ DB_LSN lsn;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_pgno_t pgno, newnext;
+ int meta_flags, extend, ret;
+
+ meta = NULL;
+ meta_flags = 0;
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ h = NULL;
+ newnext = PGNO_INVALID;
+
+ pgno = PGNO_BASE_MD;
+ if ((ret = __db_lget(dbc,
+ LCK_ALWAYS, pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &pgno, 0, (PAGE **)&meta)) != 0)
+ goto err;
+ if (meta->free == PGNO_INVALID) {
+ pgno = meta->last_pgno + 1;
+ ZERO_LSN(lsn);
+ extend = 1;
+ } else {
+ pgno = meta->free;
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ goto err;
+
+ /*
+ * We want to take the first page off the free list and
+ * then set meta->free to the that page's next_pgno, but
+ * we need to log the change first.
+ */
+ newnext = h->next_pgno;
+ lsn = h->lsn;
+ extend = 0;
+ }
+
+ /*
+ * Log the allocation before fetching the new page. If we
+ * don't have room in the log then we don't want to tell
+ * mpool to extend the file.
+ */
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __db_pg_alloc_log(dbp, dbc->txn, &LSN(meta), 0,
+ &LSN(meta), PGNO_BASE_MD, &lsn, pgno,
+ (u_int32_t)type, newnext)) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(LSN(meta));
+
+ meta_flags = DB_MPOOL_DIRTY;
+ meta->free = newnext;
+
+ if (extend == 1) {
+ meta->last_pgno++;
+ if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_NEW, &h)) != 0)
+ goto err;
+ ZERO_LSN(h->lsn);
+ h->pgno = pgno;
+ DB_ASSERT(pgno == meta->last_pgno);
+ }
+ LSN(h) = LSN(meta);
+
+ DB_ASSERT(TYPE(h) == P_INVALID);
+
+ if (TYPE(h) != P_INVALID)
+ return (__db_panic(dbp->dbenv, EINVAL));
+
+ (void)mpf->put(mpf, (PAGE *)meta, DB_MPOOL_DIRTY);
+ (void)__TLPUT(dbc, metalock);
+
+ switch (type) {
+ case P_BTREEMETA:
+ case P_HASHMETA:
+ case P_QAMMETA:
+ __db_init_meta(h, dbp->pgsize, h->pgno, type);
+ break;
+ default:
+ P_INIT(h, dbp->pgsize,
+ h->pgno, PGNO_INVALID, PGNO_INVALID, 0, type);
+ break;
+ }
+
+ /*
+ * If dirty reads are enabled and we are in a transaction, we could
+ * abort this allocation after the page(s) pointing to this
+ * one have their locks downgraded. This would permit dirty readers
+ * to access this page which is ok, but they must be off the
+ * page when we abort. This will also prevent updates happening
+ * to this page until we commit.
+ */
+ if (F_ISSET(dbc->dbp, DB_AM_DIRTY) && dbc->txn != NULL) {
+ if ((ret = __db_lget(dbc, 0,
+ h->pgno, DB_LOCK_WWRITE, 0, &metalock)) != 0)
+ goto err;
+ }
+ *pagepp = h;
+ return (0);
+
+err: if (h != NULL)
+ (void)mpf->put(mpf, h, 0);
+ if (meta != NULL)
+ (void)mpf->put(mpf, meta, meta_flags);
+ (void)__TLPUT(dbc, metalock);
+ return (ret);
+}
+
+/*
+ * __db_free --
+ * Add a page to the head of the freelist.
+ *
+ * PUBLIC: int __db_free __P((DBC *, PAGE *));
+ */
+int
+__db_free(dbc, h)
+ DBC *dbc;
+ PAGE *h;
+{
+ DBMETA *meta;
+ DB *dbp;
+ DBT ldbt;
+ DB_LOCK metalock;
+ DB_MPOOLFILE *mpf;
+ db_pgno_t pgno;
+ u_int32_t dirty_flag;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+
+ /*
+ * Retrieve the metadata page and insert the page at the head of
+ * the free list. If either the lock get or page get routines
+ * fail, then we need to put the page with which we were called
+ * back because our caller assumes we take care of it.
+ */
+ dirty_flag = 0;
+ pgno = PGNO_BASE_MD;
+ if ((ret = __db_lget(dbc,
+ LCK_ALWAYS, pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &pgno, 0, (PAGE **)&meta)) != 0) {
+ (void)__TLPUT(dbc, metalock);
+ goto err;
+ }
+
+ DB_ASSERT(h->pgno != meta->free);
+ /* Log the change. */
+ if (DBC_LOGGING(dbc)) {
+ memset(&ldbt, 0, sizeof(ldbt));
+ ldbt.data = h;
+ ldbt.size = P_OVERHEAD(dbp);
+ if ((ret = __db_pg_free_log(dbp,
+ dbc->txn, &LSN(meta), 0, h->pgno,
+ &LSN(meta), PGNO_BASE_MD, &ldbt, meta->free)) != 0) {
+ (void)mpf->put(mpf, (PAGE *)meta, 0);
+ (void)__TLPUT(dbc, metalock);
+ goto err;
+ }
+ } else
+ LSN_NOT_LOGGED(LSN(meta));
+ LSN(h) = LSN(meta);
+
+ P_INIT(h, dbp->pgsize, h->pgno, PGNO_INVALID, meta->free, 0, P_INVALID);
+
+ meta->free = h->pgno;
+
+ /* Discard the metadata page. */
+ if ((t_ret =
+ mpf->put(mpf, (PAGE *)meta, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = __TLPUT(dbc, metalock)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Discard the caller's page reference. */
+ dirty_flag = DB_MPOOL_DIRTY;
+err: if ((t_ret = mpf->put(mpf, h, dirty_flag)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * XXX
+ * We have to unlock the caller's page in the caller!
+ */
+ return (ret);
+}
+
+#ifdef DEBUG
+/*
+ * __db_lprint --
+ * Print out the list of locks currently held by a cursor.
+ *
+ * PUBLIC: int __db_lprint __P((DBC *));
+ */
+int
+__db_lprint(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DB_LOCKREQ req;
+
+ dbp = dbc->dbp;
+
+ if (LOCKING_ON(dbp->dbenv)) {
+ req.op = DB_LOCK_DUMP;
+ dbp->dbenv->lock_vec(dbp->dbenv, dbc->locker, 0, &req, 1, NULL);
+ }
+ return (0);
+}
+#endif
+
+/*
+ * Implement the rules for transactional locking. We can release the previous
+ * lock if we are not in a transaction or COUPLE_ALWAYS is specifed (used in
+ * record locking). If we are doing dirty reads then we can release read locks
+ * and down grade write locks.
+ */
+#define DB_PUT_ACTION(dbc, action, lockp) \
+ (((action == LCK_COUPLE || action == LCK_COUPLE_ALWAYS) && \
+ LOCK_ISSET(*lockp)) ? \
+ (dbc->txn == NULL || action == LCK_COUPLE_ALWAYS || \
+ (F_ISSET(dbc, DBC_DIRTY_READ) && \
+ (lockp)->mode == DB_LOCK_DIRTY)) ? LCK_COUPLE : \
+ (F_ISSET((dbc)->dbp, DB_AM_DIRTY) && \
+ (lockp)->mode == DB_LOCK_WRITE) ? LCK_DOWNGRADE : 0 : 0)
+
+/*
+ * __db_lget --
+ * The standard lock get call.
+ *
+ * PUBLIC: int __db_lget __P((DBC *,
+ * PUBLIC: int, db_pgno_t, db_lockmode_t, u_int32_t, DB_LOCK *));
+ */
+int
+__db_lget(dbc, action, pgno, mode, lkflags, lockp)
+ DBC *dbc;
+ int action;
+ db_pgno_t pgno;
+ db_lockmode_t mode;
+ u_int32_t lkflags;
+ DB_LOCK *lockp;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ DB_LOCKREQ couple[2], *reqp;
+ DB_TXN *txn;
+ int has_timeout, ret;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+ txn = dbc->txn;
+
+ /*
+ * We do not always check if we're configured for locking before
+ * calling __db_lget to acquire the lock.
+ */
+ if (CDB_LOCKING(dbenv) ||
+ !LOCKING_ON(dbenv) || F_ISSET(dbc, DBC_COMPENSATE) ||
+ (F_ISSET(dbc, DBC_RECOVER) &&
+ (action != LCK_ROLLBACK || F_ISSET(dbenv, DB_ENV_REP_CLIENT))) ||
+ (action != LCK_ALWAYS && F_ISSET(dbc, DBC_OPD))) {
+ LOCK_INIT(*lockp);
+ return (0);
+ }
+
+ dbc->lock.pgno = pgno;
+ if (lkflags & DB_LOCK_RECORD)
+ dbc->lock.type = DB_RECORD_LOCK;
+ else
+ dbc->lock.type = DB_PAGE_LOCK;
+ lkflags &= ~DB_LOCK_RECORD;
+
+ /*
+ * If the transaction enclosing this cursor has DB_LOCK_NOWAIT set,
+ * pass that along to the lock call.
+ */
+ if (DB_NONBLOCK(dbc))
+ lkflags |= DB_LOCK_NOWAIT;
+
+ if (F_ISSET(dbc, DBC_DIRTY_READ) && mode == DB_LOCK_READ)
+ mode = DB_LOCK_DIRTY;
+
+ has_timeout = txn != NULL && F_ISSET(txn, TXN_LOCKTIMEOUT);
+
+ switch (DB_PUT_ACTION(dbc, action, lockp)) {
+ case LCK_COUPLE:
+lck_couple: couple[0].op = has_timeout? DB_LOCK_GET_TIMEOUT : DB_LOCK_GET;
+ couple[0].obj = &dbc->lock_dbt;
+ couple[0].mode = mode;
+ if (action == LCK_COUPLE_ALWAYS)
+ action = LCK_COUPLE;
+ UMRW_SET(couple[0].timeout);
+ if (has_timeout)
+ couple[0].timeout = txn->lock_timeout;
+ if (action == LCK_COUPLE) {
+ couple[1].op = DB_LOCK_PUT;
+ couple[1].lock = *lockp;
+ }
+
+ ret = dbenv->lock_vec(dbenv, dbc->locker,
+ lkflags, couple, action == LCK_COUPLE ? 2 : 1, &reqp);
+ if (ret == 0 || reqp == &couple[1])
+ *lockp = couple[0].lock;
+ break;
+ case LCK_DOWNGRADE:
+ if ((ret = dbenv->lock_downgrade(
+ dbenv, lockp, DB_LOCK_WWRITE, 0)) != 0)
+ return (ret);
+ /* FALL THROUGH */
+ default:
+ if (has_timeout)
+ goto lck_couple;
+ ret = dbenv->lock_get(dbenv,
+ dbc->locker, lkflags, &dbc->lock_dbt, mode, lockp);
+ break;
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_lput --
+ * The standard lock put call.
+ *
+ * PUBLIC: int __db_lput __P((DBC *, DB_LOCK *));
+ */
+int
+__db_lput(dbc, lockp)
+ DBC *dbc;
+ DB_LOCK *lockp;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = dbc->dbp->dbenv;
+
+ switch (DB_PUT_ACTION(dbc, LCK_COUPLE, lockp)) {
+ case LCK_COUPLE:
+ ret = dbenv->lock_put(dbenv, lockp);
+ break;
+ case LCK_DOWNGRADE:
+ ret = __lock_downgrade(dbenv, lockp, DB_LOCK_WWRITE, 0);
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+
+ return (ret);
+}
diff --git a/libdb/db/db_method.c b/libdb/db/db_method.c
new file mode 100644
index 0000000..2cd9003
--- /dev/null
+++ b/libdb/db/db_method.c
@@ -0,0 +1,691 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/qam.h"
+#include "dbinc/xa.h"
+#include "dbinc_auto/xa_ext.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+
+#ifdef HAVE_RPC
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+#endif
+
+static int __db_get_byteswapped __P((DB *, int *));
+static int __db_get_type __P((DB *, DBTYPE *dbtype));
+static int __db_init __P((DB *, u_int32_t));
+static int __db_key_range
+ __P((DB *, DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
+static int __db_set_alloc __P((DB *, void *(*)(size_t),
+ void *(*)(void *, size_t), void (*)(void *)));
+static int __db_set_append_recno __P((DB *, int (*)(DB *, DBT *, db_recno_t)));
+static int __db_set_cachesize __P((DB *, u_int32_t, u_int32_t, int));
+static int __db_set_cache_priority __P((DB *, DB_CACHE_PRIORITY));
+static int __db_set_dup_compare
+ __P((DB *, int (*)(DB *, const DBT *, const DBT *)));
+static int __db_set_encrypt __P((DB *, const char *, u_int32_t));
+static int __db_set_feedback __P((DB *, void (*)(DB *, int, int)));
+static int __db_set_flags __P((DB *, u_int32_t));
+static int __db_set_pagesize __P((DB *, u_int32_t));
+static int __db_set_paniccall __P((DB *, void (*)(DB_ENV *, int)));
+static void __db_set_errcall __P((DB *, void (*)(const char *, char *)));
+static void __db_set_errfile __P((DB *, FILE *));
+static void __db_set_errpfx __P((DB *, const char *));
+static int __db_stat_fail __P((DB *, void *, u_int32_t));
+static void __dbh_err __P((DB *, int, const char *, ...));
+static void __dbh_errx __P((DB *, const char *, ...));
+
+#ifdef HAVE_RPC
+static int __dbcl_init __P((DB *, DB_ENV *, u_int32_t));
+#endif
+
+/*
+ * db_create --
+ * DB constructor.
+ *
+ * EXTERN: int db_create __P((DB **, DB_ENV *, u_int32_t));
+ */
+int
+db_create(dbpp, dbenv, flags)
+ DB **dbpp;
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ DB *dbp;
+ int ret;
+
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ break;
+ case DB_XA_CREATE:
+ if (dbenv != NULL) {
+ __db_err(dbenv,
+ "XA applications may not specify an environment to db_create");
+ return (EINVAL);
+ }
+
+ /*
+ * If it's an XA database, open it within the XA environment,
+ * taken from the global list of environments. (When the XA
+ * transaction manager called our xa_start() routine the
+ * "current" environment was moved to the start of the list.
+ */
+ dbenv = TAILQ_FIRST(&DB_GLOBAL(db_envq));
+ break;
+ default:
+ return (__db_ferr(dbenv, "db_create", 0));
+ }
+
+ /* Allocate the DB. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(*dbp), &dbp)) != 0)
+ return (ret);
+#ifdef HAVE_RPC
+ if (dbenv != NULL && RPC_ON(dbenv))
+ ret = __dbcl_init(dbp, dbenv, flags);
+ else
+#endif
+ ret = __db_init(dbp, flags);
+ if (ret != 0) {
+ __os_free(dbenv, dbp);
+ return (ret);
+ }
+
+ /* If we don't have an environment yet, allocate a local one. */
+ if (dbenv == NULL) {
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ __os_free(dbenv, dbp);
+ return (ret);
+ }
+ F_SET(dbenv, DB_ENV_DBLOCAL);
+ }
+ ++dbenv->db_ref;
+
+ dbp->dbenv = dbenv;
+
+ *dbpp = dbp;
+ return (0);
+}
+
+/*
+ * __db_init --
+ * Initialize a DB structure.
+ */
+static int
+__db_init(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ int ret;
+
+ dbp->lid = DB_LOCK_INVALIDID;
+ LOCK_INIT(dbp->handle_lock);
+
+ TAILQ_INIT(&dbp->free_queue);
+ TAILQ_INIT(&dbp->active_queue);
+ TAILQ_INIT(&dbp->join_queue);
+ LIST_INIT(&dbp->s_secondaries);
+
+ FLD_SET(dbp->am_ok,
+ DB_OK_BTREE | DB_OK_HASH | DB_OK_QUEUE | DB_OK_RECNO);
+
+ dbp->associate = __db_associate;
+ dbp->close = __db_close;
+ dbp->cursor = __db_cursor;
+ dbp->del = __db_delete;
+ dbp->err = __dbh_err;
+ dbp->errx = __dbh_errx;
+ dbp->fd = __db_fd;
+ dbp->get = __db_get;
+ dbp->get_byteswapped = __db_get_byteswapped;
+ dbp->get_type = __db_get_type;
+ dbp->join = __db_join;
+ dbp->key_range = __db_key_range;
+ dbp->open = __db_open;
+ dbp->pget = __db_pget;
+ dbp->put = __db_put;
+ dbp->remove = __db_remove;
+ dbp->rename = __db_rename;
+ dbp->truncate = __db_truncate;
+ dbp->set_alloc = __db_set_alloc;
+ dbp->set_append_recno = __db_set_append_recno;
+ dbp->set_cachesize = __db_set_cachesize;
+ dbp->set_cache_priority = __db_set_cache_priority;
+ dbp->set_dup_compare = __db_set_dup_compare;
+ dbp->set_encrypt = __db_set_encrypt;
+ dbp->set_errcall = __db_set_errcall;
+ dbp->set_errfile = __db_set_errfile;
+ dbp->set_errpfx = __db_set_errpfx;
+ dbp->set_feedback = __db_set_feedback;
+ dbp->set_flags = __db_set_flags;
+ dbp->set_lorder = __db_set_lorder;
+ dbp->set_pagesize = __db_set_pagesize;
+ dbp->set_paniccall = __db_set_paniccall;
+ dbp->stat = __db_stat_fail;
+ dbp->sync = __db_sync;
+ dbp->upgrade = __db_upgrade;
+ dbp->verify = __db_verify;
+
+ /* Access method specific. */
+ if ((ret = __bam_db_create(dbp)) != 0)
+ return (ret);
+ if ((ret = __ham_db_create(dbp)) != 0)
+ return (ret);
+ if ((ret = __qam_db_create(dbp)) != 0)
+ return (ret);
+
+ /*
+ * XA specific: must be last, as we replace methods set by the
+ * access methods.
+ */
+ if (LF_ISSET(DB_XA_CREATE) && (ret = __db_xa_create(dbp)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __dbh_am_chk --
+ * Error if an unreasonable method is called.
+ *
+ * PUBLIC: int __dbh_am_chk __P((DB *, u_int32_t));
+ */
+int
+__dbh_am_chk(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ /*
+ * We start out allowing any access methods to be called, and as the
+ * application calls the methods the options become restricted. The
+ * idea is to quit as soon as an illegal method combination is called.
+ */
+ if ((LF_ISSET(DB_OK_BTREE) && FLD_ISSET(dbp->am_ok, DB_OK_BTREE)) ||
+ (LF_ISSET(DB_OK_HASH) && FLD_ISSET(dbp->am_ok, DB_OK_HASH)) ||
+ (LF_ISSET(DB_OK_QUEUE) && FLD_ISSET(dbp->am_ok, DB_OK_QUEUE)) ||
+ (LF_ISSET(DB_OK_RECNO) && FLD_ISSET(dbp->am_ok, DB_OK_RECNO))) {
+ FLD_CLR(dbp->am_ok, ~flags);
+ return (0);
+ }
+
+ __db_err(dbp->dbenv,
+ "call implies an access method which is inconsistent with previous calls");
+ return (EINVAL);
+}
+
+/*
+ * __dbh_err --
+ * Error message, including the standard error string.
+ */
+static void
+#ifdef __STDC__
+__dbh_err(DB *dbp, int error, const char *fmt, ...)
+#else
+__dbh_err(dbp, error, fmt, va_alist)
+ DB *dbp;
+ int error;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ DB_REAL_ERR(dbp->dbenv, error, 1, 1, fmt);
+}
+
+/*
+ * __dbh_errx --
+ * Error message.
+ */
+static void
+#ifdef __STDC__
+__dbh_errx(DB *dbp, const char *fmt, ...)
+#else
+__dbh_errx(dbp, fmt, va_alist)
+ DB *dbp;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ DB_REAL_ERR(dbp->dbenv, 0, 0, 1, fmt);
+}
+
+/*
+ * __db_get_byteswapped --
+ * Return if database requires byte swapping.
+ */
+static int
+__db_get_byteswapped(dbp, isswapped)
+ DB *dbp;
+ int *isswapped;
+{
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "get_byteswapped");
+
+ *isswapped = F_ISSET(dbp, DB_AM_SWAP) ? 1 : 0;
+ return (0);
+}
+
+/*
+ * __db_get_type --
+ * Return type of underlying database.
+ */
+static int
+__db_get_type(dbp, dbtype)
+ DB *dbp;
+ DBTYPE *dbtype;
+{
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "get_type");
+
+ *dbtype = dbp->type;
+ return (0);
+}
+
+/*
+ * __db_key_range --
+ * Return proportion of keys above and below given key.
+ */
+static int
+__db_key_range(dbp, txn, key, kr, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key;
+ DB_KEY_RANGE *kr;
+ u_int32_t flags;
+{
+ COMPQUIET(txn, NULL);
+ COMPQUIET(key, NULL);
+ COMPQUIET(kr, NULL);
+ COMPQUIET(flags, 0);
+
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "key_range");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
+
+ return (EINVAL);
+}
+
+/*
+ * __db_set_append_recno --
+ * Set record number append routine.
+ */
+static int
+__db_set_append_recno(dbp, func)
+ DB *dbp;
+ int (*func) __P((DB *, DBT *, db_recno_t));
+{
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_append_recno");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE | DB_OK_RECNO);
+
+ dbp->db_append_recno = func;
+
+ return (0);
+}
+
+/*
+ * __db_set_cachesize --
+ * Set underlying cache size.
+ */
+static int
+__db_set_cachesize(dbp, cache_gbytes, cache_bytes, ncache)
+ DB *dbp;
+ u_int32_t cache_gbytes, cache_bytes;
+ int ncache;
+{
+ DB_ILLEGAL_IN_ENV(dbp, "set_cachesize");
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_cachesize");
+
+ return (dbp->dbenv->set_cachesize(
+ dbp->dbenv, cache_gbytes, cache_bytes, ncache));
+}
+
+/*
+ * __db_set_cache_priority --
+ * Set cache priority for pages from this file.
+ */
+static int
+__db_set_cache_priority(dbp, priority)
+ DB *dbp;
+ DB_CACHE_PRIORITY priority;
+{
+ /*
+ * If an underlying DB_MPOOLFILE exists, call it. Otherwise, save
+ * the information away until DB->open is called.
+ */
+ if (dbp->mpf == NULL) {
+ dbp->priority = priority;
+ return (0);
+ }
+ return (dbp->mpf->set_priority(dbp->mpf, priority));
+}
+
+/*
+ * __db_set_dup_compare --
+ * Set duplicate comparison routine.
+ */
+static int
+__db_set_dup_compare(dbp, func)
+ DB *dbp;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+{
+ int ret;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "dup_compare");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE | DB_OK_HASH);
+
+ if ((ret = dbp->set_flags(dbp, DB_DUPSORT)) != 0)
+ return (ret);
+
+ dbp->dup_compare = func;
+
+ return (0);
+}
+
+/*
+ * __db_set_encrypt --
+ * Set database passwd.
+ */
+static int
+__db_set_encrypt(dbp, passwd, flags)
+ DB *dbp;
+ const char *passwd;
+ u_int32_t flags;
+{
+ DB_CIPHER *db_cipher;
+ int ret;
+
+ DB_ILLEGAL_IN_ENV(dbp, "set_encrypt");
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_encrypt");
+
+ if ((ret = dbp->dbenv->set_encrypt(dbp->dbenv, passwd, flags)) != 0)
+ return (ret);
+
+ /*
+ * In a real env, this gets initialized with the region. In a local
+ * env, we must do it here.
+ */
+ db_cipher = (DB_CIPHER *)dbp->dbenv->crypto_handle;
+ if (!F_ISSET(db_cipher, CIPHER_ANY) &&
+ (ret = db_cipher->init(dbp->dbenv, db_cipher)) != 0)
+ return (ret);
+
+ return (dbp->set_flags(dbp, DB_ENCRYPT));
+}
+
+static void
+__db_set_errcall(dbp, errcall)
+ DB *dbp;
+ void (*errcall) __P((const char *, char *));
+{
+ dbp->dbenv->set_errcall(dbp->dbenv, errcall);
+}
+
+static void
+__db_set_errfile(dbp, errfile)
+ DB *dbp;
+ FILE *errfile;
+{
+ dbp->dbenv->set_errfile(dbp->dbenv, errfile);
+}
+
+static void
+__db_set_errpfx(dbp, errpfx)
+ DB *dbp;
+ const char *errpfx;
+{
+ dbp->dbenv->set_errpfx(dbp->dbenv, errpfx);
+}
+
+static int
+__db_set_feedback(dbp, feedback)
+ DB *dbp;
+ void (*feedback) __P((DB *, int, int));
+{
+ dbp->db_feedback = feedback;
+ return (0);
+}
+
+static int
+__db_set_flags(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ int ret;
+
+ /*
+ * !!!
+ * The hash access method only takes two flags: DB_DUP and DB_DUPSORT.
+ * The Btree access method uses them for the same purposes, and so we
+ * resolve them there.
+ *
+ * The queue access method takes no flags.
+ */
+ if (LF_ISSET(DB_ENCRYPT)) {
+ if (!CRYPTO_ON(dbp->dbenv)) {
+ __db_err(dbp->dbenv,
+ "Database environment not configured for encryption");
+ return (EINVAL);
+ }
+ F_SET(dbp, DB_AM_ENCRYPT);
+ F_SET(dbp, DB_AM_CHKSUM);
+ LF_CLR(DB_ENCRYPT);
+ }
+ if (LF_ISSET(DB_CHKSUM_SHA1)) {
+ F_SET(dbp, DB_AM_CHKSUM);
+ LF_CLR(DB_CHKSUM_SHA1);
+ }
+
+ if ((ret = __bam_set_flags(dbp, &flags)) != 0)
+ return (ret);
+ if ((ret = __ram_set_flags(dbp, &flags)) != 0)
+ return (ret);
+
+ return (flags == 0 ? 0 : __db_ferr(dbp->dbenv, "DB->set_flags", 0));
+}
+
+/*
+ * __db_set_lorder --
+ * Set whether lorder is swapped or not.
+ *
+ * PUBLIC: int __db_set_lorder __P((DB *, int));
+ */
+int
+__db_set_lorder(dbp, db_lorder)
+ DB *dbp;
+ int db_lorder;
+{
+ int ret;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_lorder");
+
+ /* Flag if the specified byte order requires swapping. */
+ switch (ret = __db_byteorder(dbp->dbenv, db_lorder)) {
+ case 0:
+ F_CLR(dbp, DB_AM_SWAP);
+ break;
+ case DB_SWAPBYTES:
+ F_SET(dbp, DB_AM_SWAP);
+ break;
+ default:
+ return (ret);
+ /* NOTREACHED */
+ }
+ return (0);
+}
+
+static int
+__db_set_alloc(dbp, mal_func, real_func, free_func)
+ DB *dbp;
+ void *(*mal_func) __P((size_t));
+ void *(*real_func) __P((void *, size_t));
+ void (*free_func) __P((void *));
+{
+ DB_ILLEGAL_IN_ENV(dbp, "set_alloc");
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_alloc");
+
+ return (dbp->dbenv->set_alloc(dbp->dbenv,
+ mal_func, real_func, free_func));
+}
+
+static int
+__db_set_pagesize(dbp, db_pagesize)
+ DB *dbp;
+ u_int32_t db_pagesize;
+{
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_pagesize");
+
+ if (db_pagesize < DB_MIN_PGSIZE) {
+ __db_err(dbp->dbenv, "page sizes may not be smaller than %lu",
+ (u_long)DB_MIN_PGSIZE);
+ return (EINVAL);
+ }
+ if (db_pagesize > DB_MAX_PGSIZE) {
+ __db_err(dbp->dbenv, "page sizes may not be larger than %lu",
+ (u_long)DB_MAX_PGSIZE);
+ return (EINVAL);
+ }
+
+ /*
+ * We don't want anything that's not a power-of-2, as we rely on that
+ * for alignment of various types on the pages.
+ */
+ if (!POWER_OF_TWO(db_pagesize)) {
+ __db_err(dbp->dbenv, "page sizes must be a power-of-2");
+ return (EINVAL);
+ }
+
+ /*
+ * XXX
+ * Should we be checking for a page size that's not a multiple of 512,
+ * so that we never try and write less than a disk sector?
+ */
+ dbp->pgsize = db_pagesize;
+
+ return (0);
+}
+
+static int
+__db_set_paniccall(dbp, paniccall)
+ DB *dbp;
+ void (*paniccall) __P((DB_ENV *, int));
+{
+ return (dbp->dbenv->set_paniccall(dbp->dbenv, paniccall));
+}
+
+static int
+__db_stat_fail(dbp, sp, flags)
+ DB *dbp;
+ void *sp;
+ u_int32_t flags;
+{
+ COMPQUIET(sp, NULL);
+ COMPQUIET(flags, 0);
+
+ /*
+ * DB->stat isn't initialized until the actual DB->open call,
+ * but we don't want to core dump.
+ */
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->stat");
+
+ /* NOTREACHED */
+ return (EINVAL);
+}
+
+#ifdef HAVE_RPC
+/*
+ * __dbcl_init --
+ * Initialize a DB structure on the server.
+ */
+static int
+__dbcl_init(dbp, dbenv, flags)
+ DB *dbp;
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ TAILQ_INIT(&dbp->free_queue);
+ TAILQ_INIT(&dbp->active_queue);
+ /* !!!
+ * Note that we don't need to initialize the join_queue; it's
+ * not used in RPC clients. See the comment in __dbcl_db_join_ret().
+ */
+
+ dbp->associate = __dbcl_db_associate;
+ dbp->close = __dbcl_db_close;
+ dbp->cursor = __dbcl_db_cursor;
+ dbp->del = __dbcl_db_del;
+ dbp->err = __dbh_err;
+ dbp->errx = __dbh_errx;
+ dbp->fd = __dbcl_db_fd;
+ dbp->get = __dbcl_db_get;
+ dbp->get_byteswapped = __db_get_byteswapped;
+ dbp->get_type = __db_get_type;
+ dbp->join = __dbcl_db_join;
+ dbp->key_range = __dbcl_db_key_range;
+ dbp->open = __dbcl_db_open_wrap;
+ dbp->pget = __dbcl_db_pget;
+ dbp->put = __dbcl_db_put;
+ dbp->remove = __dbcl_db_remove;
+ dbp->rename = __dbcl_db_rename;
+ dbp->set_alloc = __dbcl_db_alloc;
+ dbp->set_append_recno = __dbcl_db_set_append_recno;
+ dbp->set_cachesize = __dbcl_db_cachesize;
+ dbp->set_cache_priority = __dbcl_db_cache_priority;
+ dbp->set_dup_compare = __dbcl_db_dup_compare;
+ dbp->set_encrypt = __dbcl_db_encrypt;
+ dbp->set_errcall = __db_set_errcall;
+ dbp->set_errfile = __db_set_errfile;
+ dbp->set_errpfx = __db_set_errpfx;
+ dbp->set_feedback = __dbcl_db_feedback;
+ dbp->set_flags = __dbcl_db_flags;
+ dbp->set_lorder = __dbcl_db_lorder;
+ dbp->set_pagesize = __dbcl_db_pagesize;
+ dbp->set_paniccall = __dbcl_db_panic;
+ dbp->stat = __dbcl_db_stat;
+ dbp->sync = __dbcl_db_sync;
+ dbp->truncate = __dbcl_db_truncate;
+ dbp->upgrade = __dbcl_db_upgrade;
+ dbp->verify = __dbcl_db_verify;
+
+ /*
+ * Set all the method specific functions to client funcs as well.
+ */
+ dbp->set_bt_compare = __dbcl_db_bt_compare;
+ dbp->set_bt_maxkey = __dbcl_db_bt_maxkey;
+ dbp->set_bt_minkey = __dbcl_db_bt_minkey;
+ dbp->set_bt_prefix = __dbcl_db_bt_prefix;
+ dbp->set_h_ffactor = __dbcl_db_h_ffactor;
+ dbp->set_h_hash = __dbcl_db_h_hash;
+ dbp->set_h_nelem = __dbcl_db_h_nelem;
+ dbp->set_q_extentsize = __dbcl_db_extentsize;
+ dbp->set_re_delim = __dbcl_db_re_delim;
+ dbp->set_re_len = __dbcl_db_re_len;
+ dbp->set_re_pad = __dbcl_db_re_pad;
+ dbp->set_re_source = __dbcl_db_re_source;
+
+ return (__dbcl_db_create(dbp, dbenv, flags));
+}
+#endif
diff --git a/libdb/db/db_open.c b/libdb/db/db_open.c
new file mode 100644
index 0000000..cf3509f
--- /dev/null
+++ b/libdb/db/db_open.c
@@ -0,0 +1,703 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/btree.h"
+#include "dbinc/crypto.h"
+#include "dbinc/hmac.h"
+#include "dbinc/fop.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+#include "dbinc/txn.h"
+
+static int __db_openchk __P((DB *,
+ DB_TXN *, const char *, const char *, DBTYPE, u_int32_t));
+
+/*
+ * __db_open --
+ * Main library interface to the DB access methods.
+ *
+ * PUBLIC: int __db_open __P((DB *, DB_TXN *,
+ * PUBLIC: const char *, const char *, DBTYPE, u_int32_t, int));
+ */
+int
+__db_open(dbp, txn, name, subdb, type, flags, mode)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb;
+ DBTYPE type;
+ u_int32_t flags;
+ int mode;
+{
+ DB_ENV *dbenv;
+ int remove_master, remove_me, ret, t_ret, txn_local;
+
+ dbenv = dbp->dbenv;
+ remove_me = remove_master = txn_local = 0;
+
+ PANIC_CHECK(dbenv);
+
+ if ((ret = __db_openchk(dbp, txn, name, subdb, type, flags)) != 0)
+ return (ret);
+
+ /*
+ * Create local transaction as necessary, check for consistent
+ * transaction usage.
+ */
+ if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+ if ((ret = __db_txn_auto(dbp, &txn)) != 0)
+ return (ret);
+ txn_local = 1;
+ } else
+ if (txn != NULL && !TXN_ON(dbenv))
+ return (__db_not_txn_env(dbenv));
+
+ /*
+ * If the environment was configured with threads, the DB handle
+ * must also be free-threaded, so we force the DB_THREAD flag on.
+ * (See SR #2033 for why this is a requirement--recovery needs
+ * to be able to grab a dbp using __db_fileid_to_dbp, and it has
+ * no way of knowing which dbp goes with which thread, so whichever
+ * one it finds has to be usable in any of them.)
+ */
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ LF_SET(DB_THREAD);
+
+ /* Convert any DB->open flags. */
+ if (LF_ISSET(DB_RDONLY))
+ F_SET(dbp, DB_AM_RDONLY);
+ if (LF_ISSET(DB_DIRTY_READ))
+ F_SET(dbp, DB_AM_DIRTY);
+
+ /* Fill in the type. */
+ dbp->type = type;
+
+ /*
+ * If we're opening a subdatabase, we have to open (and potentially
+ * create) the main database, and then get (and potentially store)
+ * our base page number in that database. Then, we can finally open
+ * the subdatabase.
+ */
+ if ((ret = __db_dbopen(
+ dbp, txn, name, subdb, flags, mode, PGNO_BASE_MD)) != 0)
+ goto err;
+
+ /*
+ * You can open the database that describes the subdatabases in the
+ * rest of the file read-only. The content of each key's data is
+ * unspecified and applications should never be adding new records
+ * or updating existing records. However, during recovery, we need
+ * to open these databases R/W so we can redo/undo changes in them.
+ * Likewise, we need to open master databases read/write during
+ * rename and remove so we can be sure they're fully sync'ed, so
+ * we provide an override flag for the purpose.
+ */
+ if (subdb == NULL && !IS_RECOVERING(dbenv) && !LF_ISSET(DB_RDONLY) &&
+ !LF_ISSET(DB_RDWRMASTER) && F_ISSET(dbp, DB_AM_SUBDB)) {
+ __db_err(dbenv,
+ "files containing multiple databases may only be opened read-only");
+ ret = EINVAL;
+ goto err;
+ }
+
+err: /* If we were successful, don't discard the file on close. */
+ if (ret == 0)
+ /* If we were successful, don't discard the file on close. */
+ F_CLR(dbp, DB_AM_DISCARD | DB_AM_CREATED | DB_AM_CREATED_MSTR);
+ else {
+ /*
+ * If we are not transactional, we need to remove the
+ * databases/subdatabases. If we are transactional, then
+ * the abort of the child transaction should take care of
+ * cleaning them up.
+ */
+ remove_me = txn == NULL && F_ISSET(dbp, DB_AM_CREATED);
+ remove_master = txn == NULL && F_ISSET(dbp, DB_AM_CREATED_MSTR);
+
+ /*
+ * If we had an error, it may have happened before or after
+ * we actually logged the open. If it happened before, then
+ * abort won't know anything about it and won't close or
+ * refresh the dbp, so we need to do it explicitly.
+ */
+ (void)__db_refresh(dbp, txn, DB_NOSYNC);
+ }
+
+ /* Remove anyone we created. */
+ if (remove_master || (subdb == NULL && remove_me))
+ /* Remove file. */
+ (void)dbenv->dbremove(dbenv, txn, name, NULL, 0);
+ else if (remove_me)
+ /* Remove subdatabase. */
+ (void)dbenv->dbremove(dbenv, txn, name, subdb, 0);
+
+ /* Commit for DB_AUTO_COMMIT. */
+ if (txn_local) {
+ if (ret == 0)
+ ret = txn->commit(txn, 0);
+ else
+ if ((t_ret = txn->abort(txn)) != 0)
+ ret = __db_panic(dbenv, t_ret);
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_dbopen --
+ * Open a database. This routine gets called in three different ways.
+ * 1. It can be called to open a file/database. In this case, subdb will
+ * be NULL and meta_pgno will be PGNO_BASE_MD.
+ * 2. It can be called to open a subdatabase during normal operation. In
+ * this case, name and subname will both be non-NULL and meta_pgno will
+ * be PGNO_BAS_MD (also PGNO_INVALID).
+ * 3. It can be called during recovery to open a subdatabase in which case
+ * name will be non-NULL, subname mqy be NULL and meta-pgno will be
+ * a valid pgno (i.e., not PGNO_BASE_MD).
+ *
+ * PUBLIC: int __db_dbopen __P((DB *, DB_TXN *,
+ * PUBLIC: const char *, const char *, u_int32_t, int, db_pgno_t));
+ */
+int
+__db_dbopen(dbp, txn, name, subdb, flags, mode, meta_pgno)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb;
+ u_int32_t flags;
+ int mode;
+ db_pgno_t meta_pgno;
+{
+ DB_ENV *dbenv;
+ int ret;
+ u_int32_t id;
+
+ dbenv = dbp->dbenv;
+ id = TXN_INVALID;
+ if (txn != NULL)
+ F_SET(dbp, DB_AM_TXN);
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_PREOPEN, ret, name);
+ /*
+ * If name is NULL, it's always a create, so make sure that we
+ * have a type specified. It would be nice if this checking
+ * were done in __db_open where most of the interface checking
+ * is done, but this interface (__db_dbopen) is used by the
+ * recovery and limbo system, so we need to safeguard this
+ * interface as well.
+ */
+ if (name == NULL) {
+ F_SET(dbp, DB_AM_INMEM);
+
+ if (dbp->type == DB_UNKNOWN) {
+ __db_err(dbenv,
+ "DBTYPE of unknown without existing file");
+ return (EINVAL);
+ }
+
+ if (dbp->pgsize == 0)
+ dbp->pgsize = DB_DEF_IOSIZE;
+
+ /*
+ * If the file is a temporary file and we're doing locking,
+ * then we have to create a unique file ID. We can't use our
+ * normal dev/inode pair (or whatever this OS uses in place of
+ * dev/inode pairs) because no backing file will be created
+ * until the mpool cache is filled forcing the buffers to disk.
+ * Grab a random locker ID to use as a file ID. The created
+ * ID must never match a potential real file ID -- we know it
+ * won't because real file IDs contain a time stamp after the
+ * dev/inode pair, and we're simply storing a 4-byte value.
+ *
+ * !!!
+ * Store the locker in the file id structure -- we can get it
+ * from there as necessary, and it saves having two copies.
+ */
+ if (LOCKING_ON(dbenv) && (ret = dbenv->lock_id(dbenv,
+ (u_int32_t *)dbp->fileid)) != 0)
+ return (ret);
+ } else if (subdb == NULL && meta_pgno == PGNO_BASE_MD) {
+ /* Open/create the underlying file. Acquire locks. */
+ if ((ret =
+ __fop_file_setup(dbp, txn, name, mode, flags, &id)) != 0)
+ return (ret);
+ } else {
+ if ((ret = __fop_subdb_setup(dbp,
+ txn, name, subdb, mode, flags)) != 0)
+ return (ret);
+ meta_pgno = dbp->meta_pgno;
+ }
+
+ /*
+ * If we created the file, set the truncate flag for the mpool. This
+ * isn't for anything we've done, it's protection against stupid user
+ * tricks: if the user deleted a file behind Berkeley DB's back, we
+ * may still have pages in the mpool that match the file's "unique" ID.
+ *
+ * Note that if we're opening a subdatabase, we don't want to set
+ * the TRUNCATE flag even if we just created the file--we already
+ * opened and updated the master using access method interfaces,
+ * so we don't want to get rid of any pages that are in the mpool.
+ * If we created the file when we opened the master, we already hit
+ * this check in a non-subdb context then.
+ */
+ if (subdb == NULL && F_ISSET(dbp, DB_AM_CREATED))
+ LF_SET(DB_TRUNCATE);
+
+ /* Set up the underlying environment. */
+ if ((ret = __db_dbenv_setup(dbp, txn, name, id, flags)) != 0)
+ return (ret);
+
+ /*
+ * Set the open flag. We use it to mean that the dbp has gone
+ * through mpf setup, including dbreg_register. Also, below,
+ * the underlying access method open functions may want to do
+ * things like acquire cursors, so the open flag has to be set
+ * before calling them.
+ */
+ F_SET(dbp, DB_AM_OPEN_CALLED);
+
+ /*
+ * For unnamed files, we need to actually create the file now
+ * that the mpool is open.
+ */
+ if (name == NULL && (ret = __db_new_file(dbp, txn, NULL, NULL)) != 0)
+ return (ret);
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ ret = __bam_open(dbp, txn, name, meta_pgno, flags);
+ break;
+ case DB_HASH:
+ ret = __ham_open(dbp, txn, name, meta_pgno, flags);
+ break;
+ case DB_RECNO:
+ ret = __ram_open(dbp, txn, name, meta_pgno, flags);
+ break;
+ case DB_QUEUE:
+ ret = __qam_open(dbp, txn, name, meta_pgno, mode, flags);
+ break;
+ case DB_UNKNOWN:
+ return (__db_unknown_type(dbenv, "__db_dbopen", dbp->type));
+ }
+ if (ret != 0)
+ goto err;
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTOPEN, ret, name);
+
+ /*
+ * Unnamed files don't need handle locks, so we only have to check
+ * for a handle lock downgrade or lockevent in the case of named
+ * files.
+ */
+ if (!F_ISSET(dbp, DB_AM_RECOVER) &&
+ name != NULL && LOCK_ISSET(dbp->handle_lock)) {
+ if (txn != NULL) {
+ ret = __txn_lockevent(dbenv,
+ txn, dbp, &dbp->handle_lock, dbp->lid);
+ } else if (LOCKING_ON(dbenv))
+ /* Trade write handle lock for read handle lock. */
+ ret = __lock_downgrade(dbenv,
+ &dbp->handle_lock, DB_LOCK_READ, 0);
+ }
+DB_TEST_RECOVERY_LABEL
+err:
+ return (ret);
+}
+
+/*
+ * __db_new_file --
+ * Create a new database file.
+ *
+ * PUBLIC: int __db_new_file __P((DB *, DB_TXN *, DB_FH *, const char *));
+ */
+int
+__db_new_file(dbp, txn, fhp, name)
+ DB *dbp;
+ DB_TXN *txn;
+ DB_FH *fhp;
+ const char *name;
+{
+ int ret;
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ ret = __bam_new_file(dbp, txn, fhp, name);
+ break;
+ case DB_HASH:
+ ret = __ham_new_file(dbp, txn, fhp, name);
+ break;
+ case DB_QUEUE:
+ ret = __qam_new_file(dbp, txn, fhp, name);
+ break;
+ default:
+ __db_err(dbp->dbenv,
+ "%s: Invalid type %d specified", name, dbp->type);
+ ret = EINVAL;
+ break;
+ }
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTLOGMETA, ret, name);
+ /* Sync the file in preparation for moving it into place. */
+ if (ret == 0 && fhp != NULL)
+ ret = __os_fsync(dbp->dbenv, fhp);
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTSYNC, ret, name);
+
+DB_TEST_RECOVERY_LABEL
+ return (ret);
+}
+
+/*
+ * __db_init_subdb --
+ * Initialize the dbp for a subdb.
+ *
+ * PUBLIC: int __db_init_subdb __P((DB *, DB *, const char *, DB_TXN *));
+ */
+int
+__db_init_subdb(mdbp, dbp, name, txn)
+ DB *mdbp, *dbp;
+ const char *name;
+ DB_TXN *txn;
+{
+ DBMETA *meta;
+ DB_MPOOLFILE *mpf;
+ int ret, t_ret;
+
+ ret = 0;
+ if (!F_ISSET(dbp, DB_AM_CREATED)) {
+ /* Subdb exists; read meta-data page and initialize. */
+ mpf = mdbp->mpf;
+ if ((ret = mpf->get(mpf, &dbp->meta_pgno, 0, &meta)) != 0)
+ goto err;
+ ret = __db_meta_setup(mdbp->dbenv, dbp, name, meta, 0, 0);
+ if ((t_ret = mpf->put(mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ /*
+ * If __db_meta_setup found that the meta-page hadn't
+ * been written out during recovery, we can just return.
+ */
+ if (ret == ENOENT)
+ ret = 0;
+ goto err;
+ }
+
+ /* Handle the create case here. */
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ ret = __bam_new_subdb(mdbp, dbp, txn);
+ break;
+ case DB_HASH:
+ ret = __ham_new_subdb(mdbp, dbp, txn);
+ break;
+ case DB_QUEUE:
+ ret = EINVAL;
+ break;
+ default:
+ __db_err(dbp->dbenv,
+ "Invalid subdatabase type %d specified", dbp->type);
+ return (EINVAL);
+ }
+
+err: return (ret);
+}
+
+/*
+ * __db_chk_meta --
+ * Take a buffer containing a meta-data page and check it for a checksum
+ * (and verify the checksum if necessary) and possibly decrypt it.
+ *
+ * Return 0 on success, >0 (errno) on error, -1 on checksum mismatch.
+ *
+ * PUBLIC: int __db_chk_meta __P((DB_ENV *, DB *, DBMETA *, int));
+ */
+int
+__db_chk_meta(dbenv, dbp, meta, do_metachk)
+ DB_ENV *dbenv;
+ DB *dbp;
+ DBMETA *meta;
+ int do_metachk;
+{
+ int is_hmac, ret;
+ u_int8_t *chksum;
+
+ ret = 0;
+
+ if (FLD_ISSET(meta->metaflags, DBMETA_CHKSUM)) {
+ if (dbp != NULL)
+ F_SET(dbp, DB_AM_CHKSUM);
+
+ is_hmac = meta->encrypt_alg == 0 ? 0 : 1;
+ chksum = ((BTMETA *)meta)->chksum;
+ if (do_metachk && ((ret = __db_check_chksum(dbenv,
+ (DB_CIPHER *)dbenv->crypto_handle, chksum, meta,
+ DBMETASIZE, is_hmac)) != 0))
+ return (ret);
+ }
+
+#ifdef HAVE_CRYPTO
+ ret = __crypto_decrypt_meta(dbenv, dbp, (u_int8_t *)meta, do_metachk);
+#endif
+ return (ret);
+}
+
+/*
+ * __db_meta_setup --
+ *
+ * Take a buffer containing a meta-data page and figure out if it's
+ * valid, and if so, initialize the dbp from the meta-data page.
+ *
+ * PUBLIC: int __db_meta_setup __P((DB_ENV *,
+ * PUBLIC: DB *, const char *, DBMETA *, u_int32_t, int));
+ */
+int
+__db_meta_setup(dbenv, dbp, name, meta, oflags, do_metachk)
+ DB_ENV *dbenv;
+ DB *dbp;
+ const char *name;
+ DBMETA *meta;
+ u_int32_t oflags;
+ int do_metachk;
+{
+ u_int32_t flags, magic;
+ int ret;
+
+ ret = 0;
+
+ /*
+ * Figure out what access method we're dealing with, and then
+ * call access method specific code to check error conditions
+ * based on conflicts between the found file and application
+ * arguments. A found file overrides some user information --
+ * we don't consider it an error, for example, if the user set
+ * an expected byte order and the found file doesn't match it.
+ */
+ F_CLR(dbp, DB_AM_SWAP);
+ magic = meta->magic;
+
+swap_retry:
+ switch (magic) {
+ case DB_BTREEMAGIC:
+ case DB_HASHMAGIC:
+ case DB_QAMMAGIC:
+ case DB_RENAMEMAGIC:
+ break;
+ case 0:
+ /*
+ * The only time this should be 0 is if we're in the
+ * midst of opening a subdb during recovery and that
+ * subdatabase had its meta-data page allocated, but
+ * not yet initialized.
+ */
+ if (F_ISSET(dbp, DB_AM_SUBDB) && ((IS_RECOVERING(dbenv) &&
+ F_ISSET((DB_LOG *) dbenv->lg_handle, DBLOG_FORCE_OPEN)) ||
+ meta->pgno != PGNO_INVALID))
+ return (ENOENT);
+
+ goto bad_format;
+ default:
+ if (F_ISSET(dbp, DB_AM_SWAP))
+ goto bad_format;
+
+ M_32_SWAP(magic);
+ F_SET(dbp, DB_AM_SWAP);
+ goto swap_retry;
+ }
+
+ /*
+ * We can only check the meta page if we are sure we have a meta page.
+ * If it is random data, then this check can fail. So only now can we
+ * checksum and decrypt. Don't distinguish between configuration and
+ * checksum match errors here, because we haven't opened the database
+ * and even a checksum error isn't a reason to panic the environment.
+ */
+ if ((ret = __db_chk_meta(dbenv, dbp, meta, do_metachk)) != 0) {
+ if (ret == -1)
+ __db_err(dbenv,
+ "%s: metadata page checksum error", name);
+ goto bad_format;
+ }
+
+ switch (magic) {
+ case DB_BTREEMAGIC:
+ flags = meta->flags;
+ if (F_ISSET(dbp, DB_AM_SWAP))
+ M_32_SWAP(flags);
+ if (LF_ISSET(BTM_RECNO))
+ dbp->type = DB_RECNO;
+ else
+ dbp->type = DB_BTREE;
+ if ((oflags & DB_TRUNCATE) == 0 && (ret =
+ __bam_metachk(dbp, name, (BTMETA *)meta)) != 0)
+ return (ret);
+ break;
+ case DB_HASHMAGIC:
+ dbp->type = DB_HASH;
+ if ((oflags & DB_TRUNCATE) == 0 && (ret =
+ __ham_metachk(dbp, name, (HMETA *)meta)) != 0)
+ return (ret);
+ break;
+ case DB_QAMMAGIC:
+ dbp->type = DB_QUEUE;
+ if ((oflags & DB_TRUNCATE) == 0 && (ret =
+ __qam_metachk(dbp, name, (QMETA *)meta)) != 0)
+ return (ret);
+ break;
+ case DB_RENAMEMAGIC:
+ F_SET(dbp, DB_AM_IN_RENAME);
+ break;
+ }
+ return (0);
+
+bad_format:
+ __db_err(dbenv, "%s: unexpected file type or format", name);
+ return (ret == 0 ? EINVAL : ret);
+}
+
+/*
+ * __db_openchk --
+ * Interface error checking for open calls.
+ */
+static int
+__db_openchk(dbp, txn, name, subdb, type, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb;
+ DBTYPE type;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret;
+ u_int32_t ok_flags;
+
+ dbenv = dbp->dbenv;
+
+ /* Validate arguments. */
+#define OKFLAGS \
+ (DB_AUTO_COMMIT | DB_CREATE | DB_DIRTY_READ | DB_EXCL | \
+ DB_FCNTL_LOCKING | DB_NOMMAP | DB_RDONLY | DB_RDWRMASTER | \
+ DB_THREAD | DB_TRUNCATE | DB_WRITEOPEN)
+ if ((ret = __db_fchk(dbenv, "DB->open", flags, OKFLAGS)) != 0)
+ return (ret);
+ if (LF_ISSET(DB_EXCL) && !LF_ISSET(DB_CREATE))
+ return (__db_ferr(dbenv, "DB->open", 1));
+ if (LF_ISSET(DB_RDONLY) && LF_ISSET(DB_CREATE))
+ return (__db_ferr(dbenv, "DB->open", 1));
+
+#ifdef HAVE_VXWORKS
+ if (LF_ISSET(DB_TRUNCATE)) {
+ __db_err(dbenv, "DB_TRUNCATE unsupported in VxWorks");
+ return (__db_eopnotsup(dbenv));
+ }
+#endif
+ switch (type) {
+ case DB_UNKNOWN:
+ if (LF_ISSET(DB_CREATE|DB_TRUNCATE)) {
+ __db_err(dbenv,
+ "%s: DB_UNKNOWN type specified with DB_CREATE or DB_TRUNCATE",
+ name);
+ return (EINVAL);
+ }
+ ok_flags = 0;
+ break;
+ case DB_BTREE:
+ ok_flags = DB_OK_BTREE;
+ break;
+ case DB_HASH:
+ ok_flags = DB_OK_HASH;
+ break;
+ case DB_QUEUE:
+ ok_flags = DB_OK_QUEUE;
+ break;
+ case DB_RECNO:
+ ok_flags = DB_OK_RECNO;
+ break;
+ default:
+ __db_err(dbenv, "unknown type: %lu", (u_long)type);
+ return (EINVAL);
+ }
+ if (ok_flags)
+ DB_ILLEGAL_METHOD(dbp, ok_flags);
+
+ /* The environment may have been created, but never opened. */
+ if (!F_ISSET(dbenv, DB_ENV_DBLOCAL | DB_ENV_OPEN_CALLED)) {
+ __db_err(dbenv, "environment not yet opened");
+ return (EINVAL);
+ }
+
+ /*
+ * Historically, you could pass in an environment that didn't have a
+ * mpool, and DB would create a private one behind the scenes. This
+ * no longer works.
+ */
+ if (!F_ISSET(dbenv, DB_ENV_DBLOCAL) && !MPOOL_ON(dbenv)) {
+ __db_err(dbenv, "environment did not include a memory pool");
+ return (EINVAL);
+ }
+
+ /*
+ * You can't specify threads during DB->open if subsystems in the
+ * environment weren't configured with them.
+ */
+ if (LF_ISSET(DB_THREAD) &&
+ !F_ISSET(dbenv, DB_ENV_DBLOCAL | DB_ENV_THREAD)) {
+ __db_err(dbenv, "environment not created using DB_THREAD");
+ return (EINVAL);
+ }
+
+ /* DB_TRUNCATE is not transaction recoverable. */
+ if (LF_ISSET(DB_TRUNCATE) && txn != NULL) {
+ __db_err(dbenv,
+ "DB_TRUNCATE illegal with transaction specified");
+ return (EINVAL);
+ }
+
+ /* Subdatabase checks. */
+ if (subdb != NULL) {
+ /* Subdatabases must be created in named files. */
+ if (name == NULL) {
+ __db_err(dbenv,
+ "multiple databases cannot be created in temporary files");
+ return (EINVAL);
+ }
+
+ /* Truncate is a physical file operation */
+ if (LF_ISSET(DB_TRUNCATE)) {
+ __db_err(dbenv,
+ "DB_TRUNCATE illegal with multiple databases");
+ return (EINVAL);
+ }
+
+ /* QAM can't be done as a subdatabase. */
+ if (type == DB_QUEUE) {
+ __db_err(dbenv, "Queue databases must be one-per-file");
+ return (EINVAL);
+ }
+ }
+
+ return (0);
+}
diff --git a/libdb/db/db_overflow.c b/libdb/db/db_overflow.c
new file mode 100644
index 0000000..36b959a
--- /dev/null
+++ b/libdb/db/db_overflow.c
@@ -0,0 +1,726 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/db_verify.h"
+
+/*
+ * Big key/data code.
+ *
+ * Big key and data entries are stored on linked lists of pages. The initial
+ * reference is a structure with the total length of the item and the page
+ * number where it begins. Each entry in the linked list contains a pointer
+ * to the next page of data, and so on.
+ */
+
+/*
+ * __db_goff --
+ * Get an offpage item.
+ *
+ * PUBLIC: int __db_goff __P((DB *, DBT *,
+ * PUBLIC: u_int32_t, db_pgno_t, void **, u_int32_t *));
+ */
+int
+__db_goff(dbp, dbt, tlen, pgno, bpp, bpsz)
+ DB *dbp;
+ DBT *dbt;
+ u_int32_t tlen;
+ db_pgno_t pgno;
+ void **bpp;
+ u_int32_t *bpsz;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_indx_t bytes;
+ u_int32_t curoff, needed, start;
+ u_int8_t *p, *src;
+ int ret;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+
+ /*
+ * Check if the buffer is big enough; if it is not and we are
+ * allowed to malloc space, then we'll malloc it. If we are
+ * not (DB_DBT_USERMEM), then we'll set the dbt and return
+ * appropriately.
+ */
+ if (F_ISSET(dbt, DB_DBT_PARTIAL)) {
+ start = dbt->doff;
+ if (start > tlen)
+ needed = 0;
+ else if (dbt->dlen > tlen - start)
+ needed = tlen - start;
+ else
+ needed = dbt->dlen;
+ } else {
+ start = 0;
+ needed = tlen;
+ }
+
+ /* Allocate any necessary memory. */
+ if (F_ISSET(dbt, DB_DBT_USERMEM)) {
+ if (needed > dbt->ulen) {
+ dbt->size = needed;
+ return (ENOMEM);
+ }
+ } else if (F_ISSET(dbt, DB_DBT_MALLOC)) {
+ if ((ret = __os_umalloc(dbenv, needed, &dbt->data)) != 0)
+ return (ret);
+ } else if (F_ISSET(dbt, DB_DBT_REALLOC)) {
+ if ((ret = __os_urealloc(dbenv, needed, &dbt->data)) != 0)
+ return (ret);
+ } else if (*bpsz == 0 || *bpsz < needed) {
+ if ((ret = __os_realloc(dbenv, needed, bpp)) != 0)
+ return (ret);
+ *bpsz = needed;
+ dbt->data = *bpp;
+ } else
+ dbt->data = *bpp;
+
+ /*
+ * Step through the linked list of pages, copying the data on each
+ * one into the buffer. Never copy more than the total data length.
+ */
+ dbt->size = needed;
+ for (curoff = 0, p = dbt->data; pgno != PGNO_INVALID && needed > 0;) {
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ return (ret);
+
+ /* Check if we need any bytes from this page. */
+ if (curoff + OV_LEN(h) >= start) {
+ src = (u_int8_t *)h + P_OVERHEAD(dbp);
+ bytes = OV_LEN(h);
+ if (start > curoff) {
+ src += start - curoff;
+ bytes -= start - curoff;
+ }
+ if (bytes > needed)
+ bytes = needed;
+ memcpy(p, src, bytes);
+ p += bytes;
+ needed -= bytes;
+ }
+ curoff += OV_LEN(h);
+ pgno = h->next_pgno;
+ (void)mpf->put(mpf, h, 0);
+ }
+ return (0);
+}
+
+/*
+ * __db_poff --
+ * Put an offpage item.
+ *
+ * PUBLIC: int __db_poff __P((DBC *, const DBT *, db_pgno_t *));
+ */
+int
+__db_poff(dbc, dbt, pgnop)
+ DBC *dbc;
+ const DBT *dbt;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ DBT tmp_dbt;
+ DB_LSN new_lsn, null_lsn;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep, *lastp;
+ db_indx_t pagespace;
+ u_int32_t sz;
+ u_int8_t *p;
+ int ret, t_ret;
+
+ /*
+ * Allocate pages and copy the key/data item into them. Calculate the
+ * number of bytes we get for pages we fill completely with a single
+ * item.
+ */
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ pagespace = P_MAXSPACE(dbp, dbp->pgsize);
+
+ ret = 0;
+ lastp = NULL;
+ for (p = dbt->data,
+ sz = dbt->size; sz > 0; p += pagespace, sz -= pagespace) {
+ /*
+ * Reduce pagespace so we terminate the loop correctly and
+ * don't copy too much data.
+ */
+ if (sz < pagespace)
+ pagespace = sz;
+
+ /*
+ * Allocate and initialize a new page and copy all or part of
+ * the item onto the page. If sz is less than pagespace, we
+ * have a partial record.
+ */
+ if ((ret = __db_new(dbc, P_OVERFLOW, &pagep)) != 0)
+ break;
+ if (DBC_LOGGING(dbc)) {
+ tmp_dbt.data = p;
+ tmp_dbt.size = pagespace;
+ ZERO_LSN(null_lsn);
+ if ((ret = __db_big_log(dbp, dbc->txn,
+ &new_lsn, 0, DB_ADD_BIG, PGNO(pagep),
+ lastp ? PGNO(lastp) : PGNO_INVALID,
+ PGNO_INVALID, &tmp_dbt, &LSN(pagep),
+ lastp == NULL ? &null_lsn : &LSN(lastp),
+ &null_lsn)) != 0) {
+ if (lastp != NULL)
+ (void)mpf->put(mpf,
+ lastp, DB_MPOOL_DIRTY);
+ lastp = pagep;
+ break;
+ }
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ /* Move LSN onto page. */
+ if (lastp != NULL)
+ LSN(lastp) = new_lsn;
+ LSN(pagep) = new_lsn;
+
+ P_INIT(pagep, dbp->pgsize,
+ PGNO(pagep), PGNO_INVALID, PGNO_INVALID, 0, P_OVERFLOW);
+ OV_LEN(pagep) = pagespace;
+ OV_REF(pagep) = 1;
+ memcpy((u_int8_t *)pagep + P_OVERHEAD(dbp), p, pagespace);
+
+ /*
+ * If this is the first entry, update the user's info.
+ * Otherwise, update the entry on the last page filled
+ * in and release that page.
+ */
+ if (lastp == NULL)
+ *pgnop = PGNO(pagep);
+ else {
+ lastp->next_pgno = PGNO(pagep);
+ pagep->prev_pgno = PGNO(lastp);
+ (void)mpf->put(mpf, lastp, DB_MPOOL_DIRTY);
+ }
+ lastp = pagep;
+ }
+ if (lastp != NULL &&
+ (t_ret = mpf->put(mpf, lastp, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __db_ovref --
+ * Increment/decrement the reference count on an overflow page.
+ *
+ * PUBLIC: int __db_ovref __P((DBC *, db_pgno_t, int32_t));
+ */
+int
+__db_ovref(dbc, pgno, adjust)
+ DBC *dbc;
+ db_pgno_t pgno;
+ int32_t adjust;
+{
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0) {
+ __db_pgerr(dbp, pgno, ret);
+ return (ret);
+ }
+
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __db_ovref_log(dbp,
+ dbc->txn, &LSN(h), 0, h->pgno, adjust, &LSN(h))) != 0) {
+ (void)mpf->put(mpf, h, 0);
+ return (ret);
+ }
+ } else
+ LSN_NOT_LOGGED(LSN(h));
+ OV_REF(h) += adjust;
+
+ (void)mpf->put(mpf, h, DB_MPOOL_DIRTY);
+ return (0);
+}
+
+/*
+ * __db_doff --
+ * Delete an offpage chain of overflow pages.
+ *
+ * PUBLIC: int __db_doff __P((DBC *, db_pgno_t));
+ */
+int
+__db_doff(dbc, pgno)
+ DBC *dbc;
+ db_pgno_t pgno;
+{
+ DB *dbp;
+ PAGE *pagep;
+ DB_LSN null_lsn;
+ DB_MPOOLFILE *mpf;
+ DBT tmp_dbt;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+
+ do {
+ if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) != 0) {
+ __db_pgerr(dbp, pgno, ret);
+ return (ret);
+ }
+
+ DB_ASSERT(TYPE(pagep) == P_OVERFLOW);
+ /*
+ * If it's referenced by more than one key/data item,
+ * decrement the reference count and return.
+ */
+ if (OV_REF(pagep) > 1) {
+ (void)mpf->put(mpf, pagep, 0);
+ return (__db_ovref(dbc, pgno, -1));
+ }
+
+ if (DBC_LOGGING(dbc)) {
+ tmp_dbt.data = (u_int8_t *)pagep + P_OVERHEAD(dbp);
+ tmp_dbt.size = OV_LEN(pagep);
+ ZERO_LSN(null_lsn);
+ if ((ret = __db_big_log(dbp, dbc->txn,
+ &LSN(pagep), 0, DB_REM_BIG,
+ PGNO(pagep), PREV_PGNO(pagep),
+ NEXT_PGNO(pagep), &tmp_dbt,
+ &LSN(pagep), &null_lsn, &null_lsn)) != 0) {
+ (void)mpf->put(mpf, pagep, 0);
+ return (ret);
+ }
+ } else
+ LSN_NOT_LOGGED(LSN(pagep));
+ pgno = pagep->next_pgno;
+ if ((ret = __db_free(dbc, pagep)) != 0)
+ return (ret);
+ } while (pgno != PGNO_INVALID);
+
+ return (0);
+}
+
+/*
+ * __db_moff --
+ * Match on overflow pages.
+ *
+ * Given a starting page number and a key, return <0, 0, >0 to indicate if the
+ * key on the page is less than, equal to or greater than the key specified.
+ * We optimize this by doing chunk at a time comparison unless the user has
+ * specified a comparison function. In this case, we need to materialize
+ * the entire object and call their comparison routine.
+ *
+ * PUBLIC: int __db_moff __P((DB *, const DBT *, db_pgno_t, u_int32_t,
+ * PUBLIC: int (*)(DB *, const DBT *, const DBT *), int *));
+ */
+int
+__db_moff(dbp, dbt, pgno, tlen, cmpfunc, cmpp)
+ DB *dbp;
+ const DBT *dbt;
+ db_pgno_t pgno;
+ u_int32_t tlen;
+ int (*cmpfunc) __P((DB *, const DBT *, const DBT *)), *cmpp;
+{
+ DBT local_dbt;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ void *buf;
+ u_int32_t bufsize, cmp_bytes, key_left;
+ u_int8_t *p1, *p2;
+ int ret;
+
+ mpf = dbp->mpf;
+
+ /*
+ * If there is a user-specified comparison function, build a
+ * contiguous copy of the key, and call it.
+ */
+ if (cmpfunc != NULL) {
+ memset(&local_dbt, 0, sizeof(local_dbt));
+ buf = NULL;
+ bufsize = 0;
+
+ if ((ret = __db_goff(dbp,
+ &local_dbt, tlen, pgno, &buf, &bufsize)) != 0)
+ return (ret);
+ /* Pass the key as the first argument */
+ *cmpp = cmpfunc(dbp, dbt, &local_dbt);
+ __os_free(dbp->dbenv, buf);
+ return (0);
+ }
+
+ /* While there are both keys to compare. */
+ for (*cmpp = 0, p1 = dbt->data,
+ key_left = dbt->size; key_left > 0 && pgno != PGNO_INVALID;) {
+ if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) != 0)
+ return (ret);
+
+ cmp_bytes = OV_LEN(pagep) < key_left ? OV_LEN(pagep) : key_left;
+ tlen -= cmp_bytes;
+ key_left -= cmp_bytes;
+ for (p2 = (u_int8_t *)pagep + P_OVERHEAD(dbp);
+ cmp_bytes-- > 0; ++p1, ++p2)
+ if (*p1 != *p2) {
+ *cmpp = (long)*p1 - (long)*p2;
+ break;
+ }
+ pgno = NEXT_PGNO(pagep);
+ if ((ret = mpf->put(mpf, pagep, 0)) != 0)
+ return (ret);
+ if (*cmpp != 0)
+ return (0);
+ }
+ if (key_left > 0) /* DBT is longer than the page key. */
+ *cmpp = 1;
+ else if (tlen > 0) /* DBT is shorter than the page key. */
+ *cmpp = -1;
+ else
+ *cmpp = 0;
+
+ return (0);
+}
+
+/*
+ * __db_vrfy_overflow --
+ * Verify overflow page.
+ *
+ * PUBLIC: int __db_vrfy_overflow __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ * PUBLIC: u_int32_t));
+ */
+int
+__db_vrfy_overflow(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ int isbad, ret, t_ret;
+
+ isbad = 0;
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ if ((ret = __db_vrfy_datapage(dbp, vdp, h, pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ pip->refcount = OV_REF(h);
+ if (pip->refcount < 1) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: overflow page has zero reference count",
+ (u_long)pgno));
+ isbad = 1;
+ }
+
+ /* Just store for now. */
+ pip->olen = HOFFSET(h);
+
+err: if ((t_ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __db_vrfy_ovfl_structure --
+ * Walk a list of overflow pages, avoiding cycles and marking
+ * pages seen.
+ *
+ * PUBLIC: int __db_vrfy_ovfl_structure
+ * PUBLIC: __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t, u_int32_t));
+ */
+int
+__db_vrfy_ovfl_structure(dbp, vdp, pgno, tlen, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ u_int32_t tlen;
+ u_int32_t flags;
+{
+ DB *pgset;
+ VRFY_PAGEINFO *pip;
+ db_pgno_t next, prev;
+ int isbad, p, ret, t_ret;
+ u_int32_t refcount;
+
+ pgset = vdp->pgset;
+ DB_ASSERT(pgset != NULL);
+ isbad = 0;
+
+ /* This shouldn't happen, but just to be sure. */
+ if (!IS_VALID_PGNO(pgno))
+ return (DB_VERIFY_BAD);
+
+ /*
+ * Check the first prev_pgno; it ought to be PGNO_INVALID,
+ * since there's no prev page.
+ */
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ /* The refcount is stored on the first overflow page. */
+ refcount = pip->refcount;
+
+ if (pip->type != P_OVERFLOW) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: overflow page of invalid type %lu",
+ (u_long)pgno, (u_long)pip->type));
+ ret = DB_VERIFY_BAD;
+ goto err; /* Unsafe to continue. */
+ }
+
+ prev = pip->prev_pgno;
+ if (prev != PGNO_INVALID) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: first page in overflow chain has a prev_pgno %lu",
+ (u_long)pgno, (u_long)prev));
+ isbad = 1;
+ }
+
+ for (;;) {
+ /*
+ * This is slightly gross. Btree leaf pages reference
+ * individual overflow trees multiple times if the overflow page
+ * is the key to a duplicate set. The reference count does not
+ * reflect this multiple referencing. Thus, if this is called
+ * during the structure verification of a btree leaf page, we
+ * check to see whether we've seen it from a leaf page before
+ * and, if we have, adjust our count of how often we've seen it
+ * accordingly.
+ *
+ * (This will screw up if it's actually referenced--and
+ * correctly refcounted--from two different leaf pages, but
+ * that's a very unlikely brokenness that we're not checking for
+ * anyway.)
+ */
+
+ if (LF_ISSET(ST_OVFL_LEAF)) {
+ if (F_ISSET(pip, VRFY_OVFL_LEAFSEEN)) {
+ if ((ret =
+ __db_vrfy_pgset_dec(pgset, pgno)) != 0)
+ goto err;
+ } else
+ F_SET(pip, VRFY_OVFL_LEAFSEEN);
+ }
+
+ if ((ret = __db_vrfy_pgset_get(pgset, pgno, &p)) != 0)
+ goto err;
+
+ /*
+ * We may have seen this elsewhere, if the overflow entry
+ * has been promoted to an internal page.
+ */
+ if ((u_int32_t)p > refcount) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: encountered twice in overflow traversal",
+ (u_long)pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ if ((ret = __db_vrfy_pgset_inc(pgset, pgno)) != 0)
+ goto err;
+
+ /* Keep a running tab on how much of the item we've seen. */
+ tlen -= pip->olen;
+
+ /* Send feedback to the application about our progress. */
+ if (!LF_ISSET(DB_SALVAGE))
+ __db_vrfy_struct_feedback(dbp, vdp);
+
+ next = pip->next_pgno;
+
+ /* Are we there yet? */
+ if (next == PGNO_INVALID)
+ break;
+
+ /*
+ * We've already checked this when we saved it, but just
+ * to be sure...
+ */
+ if (!IS_VALID_PGNO(next)) {
+ DB_ASSERT(0);
+ EPRINT((dbp->dbenv,
+ "Page %lu: bad next_pgno %lu on overflow page",
+ (u_long)pgno, (u_long)next));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ if ((ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 ||
+ (ret = __db_vrfy_getpageinfo(vdp, next, &pip)) != 0)
+ return (ret);
+ if (pip->prev_pgno != pgno) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: bad prev_pgno %lu on overflow page (should be %lu)",
+ (u_long)next, (u_long)pip->prev_pgno,
+ (u_long)pgno));
+ isbad = 1;
+ /*
+ * It's safe to continue because we have separate
+ * cycle detection.
+ */
+ }
+
+ pgno = next;
+ }
+
+ if (tlen > 0) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: overflow item incomplete", (u_long)pgno));
+ }
+
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __db_safe_goff --
+ * Get an overflow item, very carefully, from an untrusted database,
+ * in the context of the salvager.
+ *
+ * PUBLIC: int __db_safe_goff __P((DB *, VRFY_DBINFO *, db_pgno_t,
+ * PUBLIC: DBT *, void **, u_int32_t));
+ */
+int
+__db_safe_goff(dbp, vdp, pgno, dbt, buf, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ DBT *dbt;
+ void **buf;
+ u_int32_t flags;
+{
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ int ret, t_ret;
+ u_int32_t bytesgot, bytes;
+ u_int8_t *src, *dest;
+
+ mpf = dbp->mpf;
+ h = NULL;
+ ret = t_ret = 0;
+ bytesgot = bytes = 0;
+
+ while ((pgno != PGNO_INVALID) && (IS_VALID_PGNO(pgno))) {
+ /*
+ * Mark that we're looking at this page; if we've seen it
+ * already, quit.
+ */
+ if ((ret = __db_salvage_markdone(vdp, pgno)) != 0)
+ break;
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ break;
+
+ /*
+ * Make sure it's really an overflow page, unless we're
+ * being aggressive, in which case we pretend it is.
+ */
+ if (!LF_ISSET(DB_AGGRESSIVE) && TYPE(h) != P_OVERFLOW) {
+ ret = DB_VERIFY_BAD;
+ break;
+ }
+
+ src = (u_int8_t *)h + P_OVERHEAD(dbp);
+ bytes = OV_LEN(h);
+
+ if (bytes + P_OVERHEAD(dbp) > dbp->pgsize)
+ bytes = dbp->pgsize - P_OVERHEAD(dbp);
+
+ if ((ret = __os_realloc(dbp->dbenv,
+ bytesgot + bytes, buf)) != 0)
+ break;
+
+ dest = (u_int8_t *)*buf + bytesgot;
+ bytesgot += bytes;
+
+ memcpy(dest, src, bytes);
+
+ pgno = NEXT_PGNO(h);
+
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ break;
+ h = NULL;
+ }
+
+ /*
+ * If we're being aggressive, salvage a partial datum if there
+ * was an error somewhere along the way.
+ */
+ if (ret == 0 || LF_ISSET(DB_AGGRESSIVE)) {
+ dbt->size = bytesgot;
+ dbt->data = *buf;
+ }
+
+ /* If we broke out on error, don't leave pages pinned. */
+ if (h != NULL && (t_ret = mpf->put(mpf, h, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
diff --git a/libdb/db/db_pr.c b/libdb/db/db_pr.c
new file mode 100644
index 0000000..4e91dc1
--- /dev/null
+++ b/libdb/db/db_pr.c
@@ -0,0 +1,1294 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/mp.h"
+#include "dbinc/qam.h"
+#include "dbinc/db_verify.h"
+
+static int __db_bmeta __P((DB *, FILE *, BTMETA *, u_int32_t));
+static int __db_hmeta __P((DB *, FILE *, HMETA *, u_int32_t));
+static void __db_meta __P((DB *, DBMETA *, FILE *, FN const *, u_int32_t));
+static const char *__db_pagetype_to_string __P((u_int32_t));
+static void __db_prdb __P((DB *, FILE *));
+static void __db_proff __P((void *, FILE *));
+static int __db_prtree __P((DB *, FILE *, u_int32_t));
+static int __db_qmeta __P((DB *, FILE *, QMETA *, u_int32_t));
+
+/*
+ * __db_loadme --
+ * A nice place to put a breakpoint.
+ *
+ * PUBLIC: void __db_loadme __P((void));
+ */
+void
+__db_loadme()
+{
+ u_int32_t id;
+
+ __os_id(&id);
+}
+
+/*
+ * __db_dump --
+ * Dump the tree to a file.
+ *
+ * PUBLIC: int __db_dump __P((DB *, char *, char *));
+ */
+int
+__db_dump(dbp, op, name)
+ DB *dbp;
+ char *op, *name;
+{
+ FILE *fp;
+ u_int32_t flags;
+ int ret;
+
+ for (flags = 0; *op != '\0'; ++op)
+ switch (*op) {
+ case 'a':
+ LF_SET(DB_PR_PAGE);
+ break;
+ case 'h':
+ break;
+ case 'r':
+ LF_SET(DB_PR_RECOVERYTEST);
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ if (name == NULL)
+ fp = stdout;
+ else {
+ if ((fp = fopen(name, "w")) == NULL)
+ return (__os_get_errno());
+ }
+
+ __db_prdb(dbp, fp);
+
+ fprintf(fp, "%s\n", DB_LINE);
+
+ ret = __db_prtree(dbp, fp, flags);
+
+ fflush(fp);
+ if (name != NULL)
+ fclose(fp);
+
+ return (ret);
+}
+
+/*
+ * __db_inmemdbflags --
+ * Call a callback for printing or other handling of strings associated
+ * with whatever in-memory DB structure flags are set.
+ *
+ * PUBLIC: void __db_inmemdbflags __P((u_int32_t, void *,
+ * PUBLIC: void (*)(u_int32_t, const FN *, void *)));
+ */
+void
+__db_inmemdbflags(flags, cookie, callback)
+ u_int32_t flags;
+ void *cookie;
+ void (*callback) __P((u_int32_t, const FN *, void *));
+{
+ static const FN fn[] = {
+ { DB_AM_CHKSUM, "checksumming" },
+ { DB_AM_CL_WRITER, "client replica writer" },
+ { DB_AM_COMPENSATE, "created by compensating transaction" },
+ { DB_AM_CREATED, "database created" },
+ { DB_AM_CREATED_MSTR, "encompassing file created" },
+ { DB_AM_DBM_ERROR, "dbm/ndbm error" },
+ { DB_AM_DELIMITER, "variable length" },
+ { DB_AM_DIRTY, "dirty reads" },
+ { DB_AM_DISCARD, "discard cached pages" },
+ { DB_AM_DUP, "duplicates" },
+ { DB_AM_DUPSORT, "sorted duplicates" },
+ { DB_AM_ENCRYPT, "encrypted" },
+ { DB_AM_FIXEDLEN, "fixed-length records" },
+ { DB_AM_INMEM, "in-memory" },
+ { DB_AM_IN_RENAME, "file is being renamed" },
+ { DB_AM_OPEN_CALLED, "DB->open called" },
+ { DB_AM_PAD, "pad value" },
+ { DB_AM_PGDEF, "default page size" },
+ { DB_AM_RDONLY, "read-only" },
+ { DB_AM_RECNUM, "Btree record numbers" },
+ { DB_AM_RECOVER, "opened for recovery" },
+ { DB_AM_RENUMBER, "renumber" },
+ { DB_AM_REVSPLITOFF, "no reverse splits" },
+ { DB_AM_SECONDARY, "secondary" },
+ { DB_AM_SNAPSHOT, "load on open" },
+ { DB_AM_SUBDB, "subdatabases" },
+ { DB_AM_SWAP, "needswap" },
+ { DB_AM_TXN, "transactional" },
+ { DB_AM_VERIFYING, "verifier" },
+ { 0, NULL }
+ };
+
+ callback(flags, fn, cookie);
+}
+
+/*
+ * __db_prdb --
+ * Print out the DB structure information.
+ */
+static void
+__db_prdb(dbp, fp)
+ DB *dbp;
+ FILE *fp;
+{
+ BTREE *bt;
+ HASH *h;
+ QUEUE *q;
+
+ fprintf(fp,
+ "In-memory DB structure:\n%s: %#lx",
+ __db_dbtype_to_string(dbp->type), (u_long)dbp->flags);
+ __db_inmemdbflags(dbp->flags, fp, __db_prflags);
+ fprintf(fp, "\n");
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ bt = dbp->bt_internal;
+ fprintf(fp, "bt_meta: %lu bt_root: %lu\n",
+ (u_long)bt->bt_meta, (u_long)bt->bt_root);
+ fprintf(fp, "bt_maxkey: %lu bt_minkey: %lu\n",
+ (u_long)bt->bt_maxkey, (u_long)bt->bt_minkey);
+ fprintf(fp, "bt_compare: %#lx bt_prefix: %#lx\n",
+ P_TO_ULONG(bt->bt_compare), P_TO_ULONG(bt->bt_prefix));
+ fprintf(fp, "bt_lpgno: %lu\n", (u_long)bt->bt_lpgno);
+ if (dbp->type == DB_RECNO) {
+ fprintf(fp,
+ "re_pad: %#lx re_delim: %#lx re_len: %lu re_source: %s\n",
+ (u_long)bt->re_pad, (u_long)bt->re_delim,
+ (u_long)bt->re_len,
+ bt->re_source == NULL ? "" : bt->re_source);
+ fprintf(fp, "re_modified: %d re_eof: %d re_last: %lu\n",
+ bt->re_modified, bt->re_eof, (u_long)bt->re_last);
+ }
+ break;
+ case DB_HASH:
+ h = dbp->h_internal;
+ fprintf(fp, "meta_pgno: %lu\n", (u_long)h->meta_pgno);
+ fprintf(fp, "h_ffactor: %lu\n", (u_long)h->h_ffactor);
+ fprintf(fp, "h_nelem: %lu\n", (u_long)h->h_nelem);
+ fprintf(fp, "h_hash: %#lx\n", P_TO_ULONG(h->h_hash));
+ break;
+ case DB_QUEUE:
+ q = dbp->q_internal;
+ fprintf(fp, "q_meta: %lu\n", (u_long)q->q_meta);
+ fprintf(fp, "q_root: %lu\n", (u_long)q->q_root);
+ fprintf(fp, "re_pad: %#lx re_len: %lu\n",
+ (u_long)q->re_pad, (u_long)q->re_len);
+ fprintf(fp, "rec_page: %lu\n", (u_long)q->rec_page);
+ fprintf(fp, "page_ext: %lu\n", (u_long)q->page_ext);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * __db_prtree --
+ * Print out the entire tree.
+ */
+static int
+__db_prtree(dbp, fp, flags)
+ DB *dbp;
+ FILE *fp;
+ u_int32_t flags;
+{
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_pgno_t i, last;
+ int ret;
+
+ mpf = dbp->mpf;
+
+ if (dbp->type == DB_QUEUE)
+ return (__db_prqueue(dbp, fp, flags));
+
+ /*
+ * Find out the page number of the last page in the database, then
+ * dump each page.
+ */
+ mpf->last_pgno(mpf, &last);
+ for (i = 0; i <= last; ++i) {
+ if ((ret = mpf->get(mpf, &i, 0, &h)) != 0)
+ return (ret);
+ (void)__db_prpage(dbp, h, fp, flags);
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * __db_meta --
+ * Print out common metadata information.
+ */
+static void
+__db_meta(dbp, dbmeta, fp, fn, flags)
+ DB *dbp;
+ DBMETA *dbmeta;
+ FILE *fp;
+ FN const *fn;
+ u_int32_t flags;
+{
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int8_t *p;
+ int cnt, ret;
+ const char *sep;
+
+ mpf = dbp->mpf;
+
+ fprintf(fp, "\tmagic: %#lx\n", (u_long)dbmeta->magic);
+ fprintf(fp, "\tversion: %lu\n", (u_long)dbmeta->version);
+ fprintf(fp, "\tpagesize: %lu\n", (u_long)dbmeta->pagesize);
+ fprintf(fp, "\ttype: %lu\n", (u_long)dbmeta->type);
+ fprintf(fp, "\tkeys: %lu\trecords: %lu\n",
+ (u_long)dbmeta->key_count, (u_long)dbmeta->record_count);
+
+ if (!LF_ISSET(DB_PR_RECOVERYTEST)) {
+ /*
+ * If we're doing recovery testing, don't display the free
+ * list, it may have changed and that makes the dump diff
+ * not work.
+ */
+ fprintf(fp, "\tfree list: %lu", (u_long)dbmeta->free);
+ for (pgno = dbmeta->free,
+ cnt = 0, sep = ", "; pgno != PGNO_INVALID;) {
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0) {
+ fprintf(fp,
+ "Unable to retrieve free-list page: %lu: %s\n",
+ (u_long)pgno, db_strerror(ret));
+ break;
+ }
+ pgno = h->next_pgno;
+ (void)mpf->put(mpf, h, 0);
+ fprintf(fp, "%s%lu", sep, (u_long)pgno);
+ if (++cnt % 10 == 0) {
+ fprintf(fp, "\n");
+ cnt = 0;
+ sep = "\t";
+ } else
+ sep = ", ";
+ }
+ fprintf(fp, "\n");
+ fprintf(fp, "\tlast_pgno: %lu\n", (u_long)dbmeta->last_pgno);
+ }
+
+ if (fn != NULL) {
+ fprintf(fp, "\tflags: %#lx", (u_long)dbmeta->flags);
+ __db_prflags(dbmeta->flags, fn, fp);
+ fprintf(fp, "\n");
+ }
+
+ fprintf(fp, "\tuid: ");
+ for (p = (u_int8_t *)dbmeta->uid,
+ cnt = 0; cnt < DB_FILE_ID_LEN; ++cnt) {
+ fprintf(fp, "%x", *p++);
+ if (cnt < DB_FILE_ID_LEN - 1)
+ fprintf(fp, " ");
+ }
+ fprintf(fp, "\n");
+}
+
+/*
+ * __db_bmeta --
+ * Print out the btree meta-data page.
+ */
+static int
+__db_bmeta(dbp, fp, h, flags)
+ DB *dbp;
+ FILE *fp;
+ BTMETA *h;
+ u_int32_t flags;
+{
+ static const FN mfn[] = {
+ { BTM_DUP, "duplicates" },
+ { BTM_RECNO, "recno" },
+ { BTM_RECNUM, "btree:recnum" },
+ { BTM_FIXEDLEN, "recno:fixed-length" },
+ { BTM_RENUMBER, "recno:renumber" },
+ { BTM_SUBDB, "multiple-databases" },
+ { 0, NULL }
+ };
+
+ __db_meta(dbp, (DBMETA *)h, fp, mfn, flags);
+
+ fprintf(fp, "\tmaxkey: %lu minkey: %lu\n",
+ (u_long)h->maxkey, (u_long)h->minkey);
+ if (dbp->type == DB_RECNO)
+ fprintf(fp, "\tre_len: %#lx re_pad: %lu\n",
+ (u_long)h->re_len, (u_long)h->re_pad);
+ fprintf(fp, "\troot: %lu\n", (u_long)h->root);
+
+ return (0);
+}
+
+/*
+ * __db_hmeta --
+ * Print out the hash meta-data page.
+ */
+static int
+__db_hmeta(dbp, fp, h, flags)
+ DB *dbp;
+ FILE *fp;
+ HMETA *h;
+ u_int32_t flags;
+{
+ static const FN mfn[] = {
+ { DB_HASH_DUP, "duplicates" },
+ { DB_HASH_SUBDB, "multiple-databases" },
+ { 0, NULL }
+ };
+ int i;
+
+ __db_meta(dbp, (DBMETA *)h, fp, mfn, flags);
+
+ fprintf(fp, "\tmax_bucket: %lu\n", (u_long)h->max_bucket);
+ fprintf(fp, "\thigh_mask: %#lx\n", (u_long)h->high_mask);
+ fprintf(fp, "\tlow_mask: %#lx\n", (u_long)h->low_mask);
+ fprintf(fp, "\tffactor: %lu\n", (u_long)h->ffactor);
+ fprintf(fp, "\tnelem: %lu\n", (u_long)h->nelem);
+ fprintf(fp, "\th_charkey: %#lx\n", (u_long)h->h_charkey);
+ fprintf(fp, "\tspare points: ");
+ for (i = 0; i < NCACHED; i++)
+ fprintf(fp, "%lu ", (u_long)h->spares[i]);
+ fprintf(fp, "\n");
+
+ return (0);
+}
+
+/*
+ * __db_qmeta --
+ * Print out the queue meta-data page.
+ */
+static int
+__db_qmeta(dbp, fp, h, flags)
+ DB *dbp;
+ FILE *fp;
+ QMETA *h;
+ u_int32_t flags;
+{
+ __db_meta(dbp, (DBMETA *)h, fp, NULL, flags);
+
+ fprintf(fp, "\tfirst_recno: %lu\n", (u_long)h->first_recno);
+ fprintf(fp, "\tcur_recno: %lu\n", (u_long)h->cur_recno);
+ fprintf(fp, "\tre_len: %#lx re_pad: %lu\n",
+ (u_long)h->re_len, (u_long)h->re_pad);
+ fprintf(fp, "\trec_page: %lu\n", (u_long)h->rec_page);
+ fprintf(fp, "\tpage_ext: %lu\n", (u_long)h->page_ext);
+
+ return (0);
+}
+
+/*
+ * __db_prnpage
+ * -- Print out a specific page.
+ *
+ * PUBLIC: int __db_prnpage __P((DB *, db_pgno_t, FILE *));
+ */
+int
+__db_prnpage(dbp, pgno, fp)
+ DB *dbp;
+ db_pgno_t pgno;
+ FILE *fp;
+{
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ int ret, t_ret;
+
+ mpf = dbp->mpf;
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ return (ret);
+
+ ret = __db_prpage(dbp, h, fp, DB_PR_PAGE);
+
+ if ((t_ret = mpf->put(mpf, h, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_prpage
+ * -- Print out a page.
+ *
+ * PUBLIC: int __db_prpage __P((DB *, PAGE *, FILE *, u_int32_t));
+ */
+int
+__db_prpage(dbp, h, fp, flags)
+ DB *dbp;
+ PAGE *h;
+ FILE *fp;
+ u_int32_t flags;
+{
+ BINTERNAL *bi;
+ BKEYDATA *bk;
+ HOFFPAGE a_hkd;
+ QAMDATA *qp, *qep;
+ RINTERNAL *ri;
+ db_indx_t dlen, len, i, *inp;
+ db_pgno_t pgno;
+ db_recno_t recno;
+ u_int32_t pagesize, qlen;
+ u_int8_t *ep, *hk, *p;
+ int deleted, ret;
+ const char *s;
+ void *sp;
+
+ /*
+ * If we're doing recovery testing and this page is P_INVALID,
+ * assume it's a page that's on the free list, and don't display it.
+ */
+ if (LF_ISSET(DB_PR_RECOVERYTEST) && TYPE(h) == P_INVALID)
+ return (0);
+
+ s = __db_pagetype_to_string(TYPE(h));
+ if (s == NULL) {
+ fprintf(fp, "ILLEGAL PAGE TYPE: page: %lu type: %lu\n",
+ (u_long)h->pgno, (u_long)TYPE(h));
+ return (1);
+ }
+
+ /*
+ * !!!
+ * Find out the page size. We don't want to do it the "right" way,
+ * by reading the value from the meta-data page, that's going to be
+ * slow. Reach down into the mpool region.
+ */
+ pagesize = (u_int32_t)dbp->mpf->mfp->stat.st_pagesize;
+
+ /* Page number, page type. */
+ fprintf(fp, "page %lu: %s level: %lu",
+ (u_long)h->pgno, s, (u_long)h->level);
+
+ /* Record count. */
+ if (TYPE(h) == P_IBTREE ||
+ TYPE(h) == P_IRECNO || (TYPE(h) == P_LRECNO &&
+ h->pgno == ((BTREE *)dbp->bt_internal)->bt_root))
+ fprintf(fp, " records: %lu", (u_long)RE_NREC(h));
+
+ /* LSN. */
+ if (!LF_ISSET(DB_PR_RECOVERYTEST))
+ fprintf(fp, " (lsn.file: %lu lsn.offset: %lu)\n",
+ (u_long)LSN(h).file, (u_long)LSN(h).offset);
+
+ switch (TYPE(h)) {
+ case P_BTREEMETA:
+ return (__db_bmeta(dbp, fp, (BTMETA *)h, flags));
+ case P_HASHMETA:
+ return (__db_hmeta(dbp, fp, (HMETA *)h, flags));
+ case P_QAMMETA:
+ return (__db_qmeta(dbp, fp, (QMETA *)h, flags));
+ case P_QAMDATA: /* Should be meta->start. */
+ if (!LF_ISSET(DB_PR_PAGE))
+ return (0);
+
+ qlen = ((QUEUE *)dbp->q_internal)->re_len;
+ recno = (h->pgno - 1) * QAM_RECNO_PER_PAGE(dbp) + 1;
+ i = 0;
+ qep = (QAMDATA *)((u_int8_t *)h + pagesize - qlen);
+ for (qp = QAM_GET_RECORD(dbp, h, i); qp < qep;
+ recno++, i++, qp = QAM_GET_RECORD(dbp, h, i)) {
+ if (!F_ISSET(qp, QAM_SET))
+ continue;
+
+ fprintf(fp, "%s",
+ F_ISSET(qp, QAM_VALID) ? "\t" : " D");
+ fprintf(fp, "[%03lu] %4lu ", (u_long)recno,
+ (u_long)((u_int8_t *)qp - (u_int8_t *)h));
+ __db_pr(qp->data, qlen, fp);
+ }
+ return (0);
+ }
+
+ /* LSN. */
+ if (LF_ISSET(DB_PR_RECOVERYTEST))
+ fprintf(fp, " (lsn.file: %lu lsn.offset: %lu)\n",
+ (u_long)LSN(h).file, (u_long)LSN(h).offset);
+
+ s = "\t";
+ if (TYPE(h) != P_IBTREE && TYPE(h) != P_IRECNO) {
+ fprintf(fp, "%sprev: %4lu next: %4lu",
+ s, (u_long)PREV_PGNO(h), (u_long)NEXT_PGNO(h));
+ s = " ";
+ }
+ if (TYPE(h) == P_OVERFLOW) {
+ fprintf(fp, "%sref cnt: %4lu ", s, (u_long)OV_REF(h));
+ __db_pr((u_int8_t *)h + P_OVERHEAD(dbp), OV_LEN(h), fp);
+ return (0);
+ }
+ fprintf(fp, "%sentries: %4lu", s, (u_long)NUM_ENT(h));
+ fprintf(fp, " offset: %4lu\n", (u_long)HOFFSET(h));
+
+ if (TYPE(h) == P_INVALID || !LF_ISSET(DB_PR_PAGE))
+ return (0);
+
+ ret = 0;
+ inp = P_INP(dbp, h);
+ for (i = 0; i < NUM_ENT(h); i++) {
+ if ((db_alignp_t)(P_ENTRY(dbp, h, i) - (u_int8_t *)h) <
+ (db_alignp_t)(P_OVERHEAD(dbp)) ||
+ (size_t)(P_ENTRY(dbp, h, i) - (u_int8_t *)h) >= pagesize) {
+ fprintf(fp,
+ "ILLEGAL PAGE OFFSET: indx: %lu of %lu\n",
+ (u_long)i, (u_long)inp[i]);
+ ret = EINVAL;
+ continue;
+ }
+ deleted = 0;
+ switch (TYPE(h)) {
+ case P_HASH:
+ case P_IBTREE:
+ case P_IRECNO:
+ sp = P_ENTRY(dbp, h, i);
+ break;
+ case P_LBTREE:
+ sp = P_ENTRY(dbp, h, i);
+ deleted = i % 2 == 0 &&
+ B_DISSET(GET_BKEYDATA(dbp, h, i + O_INDX)->type);
+ break;
+ case P_LDUP:
+ case P_LRECNO:
+ sp = P_ENTRY(dbp, h, i);
+ deleted = B_DISSET(GET_BKEYDATA(dbp, h, i)->type);
+ break;
+ default:
+ fprintf(fp,
+ "ILLEGAL PAGE ITEM: %lu\n", (u_long)TYPE(h));
+ ret = EINVAL;
+ continue;
+ }
+ fprintf(fp, "%s", deleted ? " D" : "\t");
+ fprintf(fp, "[%03lu] %4lu ", (u_long)i, (u_long)inp[i]);
+ switch (TYPE(h)) {
+ case P_HASH:
+ hk = sp;
+ switch (HPAGE_PTYPE(hk)) {
+ case H_OFFDUP:
+ memcpy(&pgno,
+ HOFFDUP_PGNO(hk), sizeof(db_pgno_t));
+ fprintf(fp,
+ "%4lu [offpage dups]\n", (u_long)pgno);
+ break;
+ case H_DUPLICATE:
+ /*
+ * If this is the first item on a page, then
+ * we cannot figure out how long it is, so
+ * we only print the first one in the duplicate
+ * set.
+ */
+ if (i != 0)
+ len = LEN_HKEYDATA(dbp, h, 0, i);
+ else
+ len = 1;
+
+ fprintf(fp, "Duplicates:\n");
+ for (p = HKEYDATA_DATA(hk),
+ ep = p + len; p < ep;) {
+ memcpy(&dlen, p, sizeof(db_indx_t));
+ p += sizeof(db_indx_t);
+ fprintf(fp, "\t\t");
+ __db_pr(p, dlen, fp);
+ p += sizeof(db_indx_t) + dlen;
+ }
+ break;
+ case H_KEYDATA:
+ __db_pr(HKEYDATA_DATA(hk),
+ LEN_HKEYDATA(dbp, h, i == 0 ?
+ pagesize : 0, i), fp);
+ break;
+ case H_OFFPAGE:
+ memcpy(&a_hkd, hk, HOFFPAGE_SIZE);
+ fprintf(fp,
+ "overflow: total len: %4lu page: %4lu\n",
+ (u_long)a_hkd.tlen, (u_long)a_hkd.pgno);
+ break;
+ }
+ break;
+ case P_IBTREE:
+ bi = sp;
+ fprintf(fp, "count: %4lu pgno: %4lu type: %4lu",
+ (u_long)bi->nrecs, (u_long)bi->pgno,
+ (u_long)bi->type);
+ switch (B_TYPE(bi->type)) {
+ case B_KEYDATA:
+ __db_pr(bi->data, bi->len, fp);
+ break;
+ case B_DUPLICATE:
+ case B_OVERFLOW:
+ __db_proff(bi->data, fp);
+ break;
+ default:
+ fprintf(fp, "ILLEGAL BINTERNAL TYPE: %lu\n",
+ (u_long)B_TYPE(bi->type));
+ ret = EINVAL;
+ break;
+ }
+ break;
+ case P_IRECNO:
+ ri = sp;
+ fprintf(fp, "entries %4lu pgno %4lu\n",
+ (u_long)ri->nrecs, (u_long)ri->pgno);
+ break;
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ bk = sp;
+ switch (B_TYPE(bk->type)) {
+ case B_KEYDATA:
+ __db_pr(bk->data, bk->len, fp);
+ break;
+ case B_DUPLICATE:
+ case B_OVERFLOW:
+ __db_proff(bk, fp);
+ break;
+ default:
+ fprintf(fp,
+ "ILLEGAL DUPLICATE/LBTREE/LRECNO TYPE: %lu\n",
+ (u_long)B_TYPE(bk->type));
+ ret = EINVAL;
+ break;
+ }
+ break;
+ }
+ }
+ (void)fflush(fp);
+ return (ret);
+}
+
+/*
+ * __db_pr --
+ * Print out a data element.
+ *
+ * PUBLIC: void __db_pr __P((u_int8_t *, u_int32_t, FILE *));
+ */
+void
+__db_pr(p, len, fp)
+ u_int8_t *p;
+ u_int32_t len;
+ FILE *fp;
+{
+ u_int lastch;
+ int i;
+
+ fprintf(fp, "len: %3lu", (u_long)len);
+ lastch = '.';
+ if (len != 0) {
+ fprintf(fp, " data: ");
+ for (i = len <= 20 ? len : 20; i > 0; --i, ++p) {
+ lastch = *p;
+ if (isprint((int)*p) || *p == '\n')
+ fprintf(fp, "%c", *p);
+ else
+ fprintf(fp, "0x%.2x", (u_int)*p);
+ }
+ if (len > 20) {
+ fprintf(fp, "...");
+ lastch = '.';
+ }
+ }
+ if (lastch != '\n')
+ fprintf(fp, "\n");
+}
+
+/*
+ * __db_prdbt --
+ * Print out a DBT data element.
+ *
+ * PUBLIC: int __db_prdbt __P((DBT *, int, const char *, void *,
+ * PUBLIC: int (*)(void *, const void *), int, VRFY_DBINFO *));
+ */
+int
+__db_prdbt(dbtp, checkprint, prefix, handle, callback, is_recno, vdp)
+ DBT *dbtp;
+ int checkprint;
+ const char *prefix;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ int is_recno;
+ VRFY_DBINFO *vdp;
+{
+ static const char hex[] = "0123456789abcdef";
+ db_recno_t recno;
+ u_int32_t len;
+ int ret;
+#define DBTBUFLEN 100
+ char *p, *hp, buf[DBTBUFLEN], hbuf[DBTBUFLEN];
+
+ if (vdp != NULL) {
+ /*
+ * If vdp is non-NULL, we might be the first key in the
+ * "fake" subdatabase used for key/data pairs we can't
+ * associate with a known subdb.
+ *
+ * Check and clear the SALVAGE_PRINTHEADER flag; if
+ * it was set, print a subdatabase header.
+ */
+ if (F_ISSET(vdp, SALVAGE_PRINTHEADER))
+ (void)__db_prheader(NULL, "__OTHER__", 0, 0,
+ handle, callback, vdp, 0);
+ F_CLR(vdp, SALVAGE_PRINTHEADER);
+ F_SET(vdp, SALVAGE_PRINTFOOTER);
+
+ /*
+ * Even if the printable flag wasn't set by our immediate
+ * caller, it may be set on a salvage-wide basis.
+ */
+ if (F_ISSET(vdp, SALVAGE_PRINTABLE))
+ checkprint = 1;
+ }
+
+ /*
+ * !!!
+ * This routine is the routine that dumps out items in the format
+ * used by db_dump(1) and db_load(1). This means that the format
+ * cannot change.
+ */
+ if (prefix != NULL && (ret = callback(handle, prefix)) != 0)
+ return (ret);
+ if (is_recno) {
+ /*
+ * We're printing a record number, and this has to be done
+ * in a platform-independent way. So we use the numeral in
+ * straight ASCII.
+ */
+ (void)__ua_memcpy(&recno, dbtp->data, sizeof(recno));
+ snprintf(buf, DBTBUFLEN, "%lu", (u_long)recno);
+
+ /* If we're printing data as hex, print keys as hex too. */
+ if (!checkprint) {
+ for (len = (u_int32_t)strlen(buf), p = buf, hp = hbuf;
+ len-- > 0; ++p) {
+ *hp++ = hex[(u_int8_t)(*p & 0xf0) >> 4];
+ *hp++ = hex[*p & 0x0f];
+ }
+ *hp = '\0';
+ ret = callback(handle, hbuf);
+ } else
+ ret = callback(handle, buf);
+
+ if (ret != 0)
+ return (ret);
+ } else if (checkprint) {
+ for (len = dbtp->size, p = dbtp->data; len--; ++p)
+ if (isprint((int)*p)) {
+ if (*p == '\\' &&
+ (ret = callback(handle, "\\")) != 0)
+ return (ret);
+ snprintf(buf, DBTBUFLEN, "%c", *p);
+ if ((ret = callback(handle, buf)) != 0)
+ return (ret);
+ } else {
+ snprintf(buf, DBTBUFLEN, "\\%c%c",
+ hex[(u_int8_t)(*p & 0xf0) >> 4],
+ hex[*p & 0x0f]);
+ if ((ret = callback(handle, buf)) != 0)
+ return (ret);
+ }
+ } else
+ for (len = dbtp->size, p = dbtp->data; len--; ++p) {
+ snprintf(buf, DBTBUFLEN, "%c%c",
+ hex[(u_int8_t)(*p & 0xf0) >> 4],
+ hex[*p & 0x0f]);
+ if ((ret = callback(handle, buf)) != 0)
+ return (ret);
+ }
+
+ return (callback(handle, "\n"));
+}
+
+/*
+ * __db_proff --
+ * Print out an off-page element.
+ */
+static void
+__db_proff(vp, fp)
+ void *vp;
+ FILE *fp;
+{
+ BOVERFLOW *bo;
+
+ bo = vp;
+ switch (B_TYPE(bo->type)) {
+ case B_OVERFLOW:
+ fprintf(fp, "overflow: total len: %4lu page: %4lu\n",
+ (u_long)bo->tlen, (u_long)bo->pgno);
+ break;
+ case B_DUPLICATE:
+ fprintf(fp, "duplicate: page: %4lu\n", (u_long)bo->pgno);
+ break;
+ }
+}
+
+/*
+ * __db_prflags --
+ * Print out flags values.
+ *
+ * PUBLIC: void __db_prflags __P((u_int32_t, const FN *, void *));
+ */
+void
+__db_prflags(flags, fn, vfp)
+ u_int32_t flags;
+ FN const *fn;
+ void *vfp;
+{
+ FILE *fp;
+ const FN *fnp;
+ int found;
+ const char *sep;
+
+ /*
+ * We pass the FILE * through a void * so that we can use
+ * this function as as a callback.
+ */
+ fp = (FILE *)vfp;
+
+ sep = " (";
+ for (found = 0, fnp = fn; fnp->mask != 0; ++fnp)
+ if (LF_ISSET(fnp->mask)) {
+ fprintf(fp, "%s%s", sep, fnp->name);
+ sep = ", ";
+ found = 1;
+ }
+ if (found)
+ fprintf(fp, ")");
+}
+
+/*
+ * __db_dbtype_to_string --
+ * Return the name of the database type.
+ * PUBLIC: const char * __db_dbtype_to_string __P((DBTYPE));
+ */
+const char *
+__db_dbtype_to_string(type)
+ DBTYPE type;
+{
+ switch (type) {
+ case DB_BTREE:
+ return ("btree");
+ case DB_HASH:
+ return ("hash");
+ case DB_RECNO:
+ return ("recno");
+ case DB_QUEUE:
+ return ("queue");
+ default:
+ return ("UNKNOWN TYPE");
+ }
+ /* NOTREACHED */
+}
+
+/*
+ * __db_pagetype_to_string --
+ * Return the name of the specified page type.
+ */
+static const char *
+__db_pagetype_to_string(type)
+ u_int32_t type;
+{
+ char *s;
+
+ s = NULL;
+ switch (type) {
+ case P_BTREEMETA:
+ s = "btree metadata";
+ break;
+ case P_LDUP:
+ s = "duplicate";
+ break;
+ case P_HASH:
+ s = "hash";
+ break;
+ case P_HASHMETA:
+ s = "hash metadata";
+ break;
+ case P_IBTREE:
+ s = "btree internal";
+ break;
+ case P_INVALID:
+ s = "invalid";
+ break;
+ case P_IRECNO:
+ s = "recno internal";
+ break;
+ case P_LBTREE:
+ s = "btree leaf";
+ break;
+ case P_LRECNO:
+ s = "recno leaf";
+ break;
+ case P_OVERFLOW:
+ s = "overflow";
+ break;
+ case P_QAMMETA:
+ s = "queue metadata";
+ break;
+ case P_QAMDATA:
+ s = "queue";
+ break;
+ default:
+ /* Just return a NULL. */
+ break;
+ }
+ return (s);
+}
+
+/*
+ * __db_prheader --
+ * Write out header information in the format expected by db_load.
+ *
+ * PUBLIC: int __db_prheader __P((DB *, char *, int, int, void *,
+ * PUBLIC: int (*)(void *, const void *), VRFY_DBINFO *, db_pgno_t));
+ */
+int
+__db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno)
+ DB *dbp;
+ char *subname;
+ int pflag, keyflag;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ VRFY_DBINFO *vdp;
+ db_pgno_t meta_pgno;
+{
+ DB_BTREE_STAT *btsp;
+ DB_ENV *dbenv;
+ DB_HASH_STAT *hsp;
+ DB_QUEUE_STAT *qsp;
+ DBT dbt;
+ VRFY_PAGEINFO *pip;
+ char *buf;
+ int buflen, ret, t_ret;
+ u_int32_t dbtype;
+
+ btsp = NULL;
+ hsp = NULL;
+ qsp = NULL;
+ ret = 0;
+ buf = NULL;
+ COMPQUIET(buflen, 0);
+
+ if (dbp == NULL)
+ dbenv = NULL;
+ else
+ dbenv = dbp->dbenv;
+
+ /*
+ * If we've been passed a verifier statistics object, use
+ * that; we're being called in a context where dbp->stat
+ * is unsafe.
+ *
+ * Also, the verifier may set the pflag on a per-salvage basis.
+ * If so, respect that.
+ */
+ if (vdp != NULL) {
+ if ((ret = __db_vrfy_getpageinfo(vdp, meta_pgno, &pip)) != 0)
+ return (ret);
+
+ if (F_ISSET(vdp, SALVAGE_PRINTABLE))
+ pflag = 1;
+ } else
+ pip = NULL;
+
+ /*
+ * If dbp is NULL, we're being called from inside __db_prdbt,
+ * and this is a special subdatabase for "lost" items. Make it a btree.
+ * Otherwise, set dbtype to the appropriate type for the specified
+ * meta page, or the type of the dbp.
+ */
+ if (dbp == NULL)
+ dbtype = DB_BTREE;
+ else if (pip != NULL)
+ switch (pip->type) {
+ case P_BTREEMETA:
+ if (F_ISSET(pip, VRFY_IS_RECNO))
+ dbtype = DB_RECNO;
+ else
+ dbtype = DB_BTREE;
+ break;
+ case P_HASHMETA:
+ dbtype = DB_HASH;
+ break;
+ default:
+ /*
+ * If the meta page is of a bogus type, it's
+ * because we have a badly corrupt database.
+ * (We must be in the verifier for pip to be non-NULL.)
+ * Pretend we're a Btree and salvage what we can.
+ */
+ DB_ASSERT(F_ISSET(dbp, DB_AM_VERIFYING));
+ dbtype = DB_BTREE;
+ break;
+ }
+ else
+ dbtype = dbp->type;
+
+ if ((ret = callback(handle, "VERSION=3\n")) != 0)
+ goto err;
+ if (pflag) {
+ if ((ret = callback(handle, "format=print\n")) != 0)
+ goto err;
+ } else if ((ret = callback(handle, "format=bytevalue\n")) != 0)
+ goto err;
+
+ /*
+ * 64 bytes is long enough, as a minimum bound, for any of the
+ * fields besides subname. Subname uses __db_prdbt and therefore
+ * does not need buffer space here.
+ */
+ buflen = 64;
+ if ((ret = __os_malloc(dbenv, buflen, &buf)) != 0)
+ goto err;
+ if (subname != NULL) {
+ snprintf(buf, buflen, "database=");
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ memset(&dbt, 0, sizeof(dbt));
+ dbt.data = subname;
+ dbt.size = (u_int32_t)strlen(subname);
+ if ((ret = __db_prdbt(&dbt,
+ 1, NULL, handle, callback, 0, NULL)) != 0)
+ goto err;
+ }
+ switch (dbtype) {
+ case DB_BTREE:
+ if ((ret = callback(handle, "type=btree\n")) != 0)
+ goto err;
+ if (pip != NULL) {
+ if (F_ISSET(pip, VRFY_HAS_RECNUMS))
+ if ((ret =
+ callback(handle, "recnum=1\n")) != 0)
+ goto err;
+ if (pip->bt_maxkey != 0) {
+ snprintf(buf, buflen,
+ "bt_maxkey=%lu\n", (u_long)pip->bt_maxkey);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ if (pip->bt_minkey != 0 &&
+ pip->bt_minkey != DEFMINKEYPAGE) {
+ snprintf(buf, buflen,
+ "bt_minkey=%lu\n", (u_long)pip->bt_minkey);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ break;
+ }
+ if ((ret = dbp->stat(dbp, &btsp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ goto err;
+ }
+ if (F_ISSET(dbp, DB_AM_RECNUM))
+ if ((ret = callback(handle, "recnum=1\n")) != 0)
+ goto err;
+ if (btsp->bt_maxkey != 0) {
+ snprintf(buf, buflen,
+ "bt_maxkey=%lu\n", (u_long)btsp->bt_maxkey);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ if (btsp->bt_minkey != 0 && btsp->bt_minkey != DEFMINKEYPAGE) {
+ snprintf(buf, buflen,
+ "bt_minkey=%lu\n", (u_long)btsp->bt_minkey);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ break;
+ case DB_HASH:
+ if ((ret = callback(handle, "type=hash\n")) != 0)
+ goto err;
+ if (pip != NULL) {
+ if (pip->h_ffactor != 0) {
+ snprintf(buf, buflen,
+ "h_ffactor=%lu\n", (u_long)pip->h_ffactor);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ if (pip->h_nelem != 0) {
+ snprintf(buf, buflen,
+ "h_nelem=%lu\n", (u_long)pip->h_nelem);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ break;
+ }
+ if ((ret = dbp->stat(dbp, &hsp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ goto err;
+ }
+ if (hsp->hash_ffactor != 0) {
+ snprintf(buf, buflen,
+ "h_ffactor=%lu\n", (u_long)hsp->hash_ffactor);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ if (hsp->hash_nkeys != 0) {
+ snprintf(buf, buflen,
+ "h_nelem=%lu\n", (u_long)hsp->hash_nkeys);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ break;
+ case DB_QUEUE:
+ if ((ret = callback(handle, "type=queue\n")) != 0)
+ goto err;
+ if (vdp != NULL) {
+ snprintf(buf,
+ buflen, "re_len=%lu\n", (u_long)vdp->re_len);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ break;
+ }
+ if ((ret = dbp->stat(dbp, &qsp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ goto err;
+ }
+ snprintf(buf, buflen, "re_len=%lu\n", (u_long)qsp->qs_re_len);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ if (qsp->qs_re_pad != 0 && qsp->qs_re_pad != ' ') {
+ snprintf(buf, buflen, "re_pad=%#x\n", qsp->qs_re_pad);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ if (qsp->qs_extentsize != 0) {
+ snprintf(buf, buflen,
+ "extentsize=%lu\n", (u_long)qsp->qs_extentsize);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ break;
+ case DB_RECNO:
+ if ((ret = callback(handle, "type=recno\n")) != 0)
+ goto err;
+ if (pip != NULL) {
+ if (F_ISSET(pip, VRFY_IS_RRECNO))
+ if ((ret =
+ callback(handle, "renumber=1\n")) != 0)
+ goto err;
+ if (pip->re_len > 0) {
+ snprintf(buf, buflen,
+ "re_len=%lu\n", (u_long)pip->re_len);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ break;
+ }
+ if ((ret = dbp->stat(dbp, &btsp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ goto err;
+ }
+ if (F_ISSET(dbp, DB_AM_RENUMBER))
+ if ((ret = callback(handle, "renumber=1\n")) != 0)
+ goto err;
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN)) {
+ snprintf(buf, buflen,
+ "re_len=%lu\n", (u_long)btsp->bt_re_len);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ if (btsp->bt_re_pad != 0 && btsp->bt_re_pad != ' ') {
+ snprintf(buf, buflen, "re_pad=%#x\n", btsp->bt_re_pad);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ break;
+ case DB_UNKNOWN:
+ DB_ASSERT(0); /* Impossible. */
+ __db_err(dbp->dbenv, "Impossible DB type in __db_prheader");
+ ret = EINVAL;
+ goto err;
+ }
+
+ if (pip != NULL) {
+ if (F_ISSET(pip, VRFY_HAS_DUPS))
+ if ((ret = callback(handle, "duplicates=1\n")) != 0)
+ goto err;
+ if (F_ISSET(pip, VRFY_HAS_DUPSORT))
+ if ((ret = callback(handle, "dupsort=1\n")) != 0)
+ goto err;
+ /* We should handle page size. XXX */
+ } else {
+ if (F_ISSET(dbp, DB_AM_CHKSUM))
+ if ((ret = callback(handle, "chksum=1\n")) != 0)
+ goto err;
+ if (F_ISSET(dbp, DB_AM_DUP))
+ if ((ret = callback(handle, "duplicates=1\n")) != 0)
+ goto err;
+ if (F_ISSET(dbp, DB_AM_DUPSORT))
+ if ((ret = callback(handle, "dupsort=1\n")) != 0)
+ goto err;
+ if (!F_ISSET(dbp, DB_AM_PGDEF)) {
+ snprintf(buf, buflen,
+ "db_pagesize=%lu\n", (u_long)dbp->pgsize);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ }
+
+ if (keyflag && (ret = callback(handle, "keys=1\n")) != 0)
+ goto err;
+
+ ret = callback(handle, "HEADER=END\n");
+
+err: if (pip != NULL &&
+ (t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ if (btsp != NULL)
+ __os_ufree(dbenv, btsp);
+ if (hsp != NULL)
+ __os_ufree(dbenv, hsp);
+ if (qsp != NULL)
+ __os_ufree(dbenv, qsp);
+ if (buf != NULL)
+ __os_free(dbenv, buf);
+
+ return (ret);
+}
+
+/*
+ * __db_prfooter --
+ * Print the footer that marks the end of a DB dump. This is trivial,
+ * but for consistency's sake we don't want to put its literal contents
+ * in multiple places.
+ *
+ * PUBLIC: int __db_prfooter __P((void *, int (*)(void *, const void *)));
+ */
+int
+__db_prfooter(handle, callback)
+ void *handle;
+ int (*callback) __P((void *, const void *));
+{
+ return (callback(handle, "DATA=END\n"));
+}
diff --git a/libdb/db/db_rec.c b/libdb/db/db_rec.c
new file mode 100644
index 0000000..470df0f
--- /dev/null
+++ b/libdb/db/db_rec.c
@@ -0,0 +1,897 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/log.h"
+#include "dbinc/hash.h"
+
+/*
+ * PUBLIC: int __db_addrem_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ *
+ * This log message is generated whenever we add or remove a duplicate
+ * to/from a duplicate page. On recover, we just do the opposite.
+ */
+int
+__db_addrem_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_addrem_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int32_t change;
+ int cmp_n, cmp_p, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__db_addrem_print);
+ REC_INTRO(__db_addrem_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ goto done;
+ } else
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+ change = 0;
+ if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_ADD_DUP) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == DB_REM_DUP)) {
+
+ /* Need to redo an add, or undo a delete. */
+ if ((ret = __db_pitem(dbc, pagep, argp->indx, argp->nbytes,
+ argp->hdr.size == 0 ? NULL : &argp->hdr,
+ argp->dbt.size == 0 ? NULL : &argp->dbt)) != 0)
+ goto out;
+
+ change = DB_MPOOL_DIRTY;
+
+ } else if ((cmp_n == 0 && DB_UNDO(op) && argp->opcode == DB_ADD_DUP) ||
+ (cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_REM_DUP)) {
+ /* Need to undo an add, or redo a delete. */
+ if ((ret = __db_ditem(dbc,
+ pagep, argp->indx, argp->nbytes)) != 0)
+ goto out;
+ change = DB_MPOOL_DIRTY;
+ }
+
+ if (change) {
+ if (DB_REDO(op))
+ LSN(pagep) = *lsnp;
+ else
+ LSN(pagep) = argp->pagelsn;
+ }
+
+ if ((ret = mpf->put(mpf, pagep, change)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * PUBLIC: int __db_big_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_big_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_big_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int32_t change;
+ int cmp_n, cmp_p, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__db_big_print);
+ REC_INTRO(__db_big_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ ret = 0;
+ goto ppage;
+ } else
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ /*
+ * There are three pages we need to check. The one on which we are
+ * adding data, the previous one whose next_pointer may have
+ * been updated, and the next one whose prev_pointer may have
+ * been updated.
+ */
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+ change = 0;
+ if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_ADD_BIG) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == DB_REM_BIG)) {
+ /* We are either redo-ing an add, or undoing a delete. */
+ P_INIT(pagep, file_dbp->pgsize, argp->pgno, argp->prev_pgno,
+ argp->next_pgno, 0, P_OVERFLOW);
+ OV_LEN(pagep) = argp->dbt.size;
+ OV_REF(pagep) = 1;
+ memcpy((u_int8_t *)pagep + P_OVERHEAD(file_dbp), argp->dbt.data,
+ argp->dbt.size);
+ PREV_PGNO(pagep) = argp->prev_pgno;
+ change = DB_MPOOL_DIRTY;
+ } else if ((cmp_n == 0 && DB_UNDO(op) && argp->opcode == DB_ADD_BIG) ||
+ (cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_REM_BIG)) {
+ /*
+ * We are either undo-ing an add or redo-ing a delete.
+ * The page is about to be reclaimed in either case, so
+ * there really isn't anything to do here.
+ */
+ change = DB_MPOOL_DIRTY;
+ }
+ if (change)
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->pagelsn;
+
+ if ((ret = mpf->put(mpf, pagep, change)) != 0)
+ goto out;
+ pagep = NULL;
+
+ /*
+ * We only delete a whole chain of overflow.
+ * Each page is handled individually
+ */
+ if (argp->opcode == DB_REM_BIG)
+ goto done;
+
+ /* Now check the previous page. */
+ppage: if (argp->prev_pgno != PGNO_INVALID) {
+ change = 0;
+ if ((ret = mpf->get(mpf, &argp->prev_pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist.
+ * That is equivalent to having a pagelsn of 0,
+ * so we would not have to undo anything. In
+ * this case, don't bother creating a page.
+ */
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto npage;
+ } else
+ if ((ret = mpf->get(mpf, &argp->prev_pgno,
+ DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->prevlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->prevlsn);
+
+ if (cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_ADD_BIG) {
+ /* Redo add, undo delete. */
+ NEXT_PGNO(pagep) = argp->pgno;
+ change = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 &&
+ DB_UNDO(op) && argp->opcode == DB_ADD_BIG) {
+ /* Redo delete, undo add. */
+ NEXT_PGNO(pagep) = argp->next_pgno;
+ change = DB_MPOOL_DIRTY;
+ }
+ if (change)
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->prevlsn;
+ if ((ret = mpf->put(mpf, pagep, change)) != 0)
+ goto out;
+ }
+ pagep = NULL;
+
+ /* Now check the next page. Can only be set on a delete. */
+npage: if (argp->next_pgno != PGNO_INVALID) {
+ change = 0;
+ if ((ret = mpf->get(mpf, &argp->next_pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist.
+ * That is equivalent to having a pagelsn of 0,
+ * so we would not have to undo anything. In
+ * this case, don't bother creating a page.
+ */
+ goto done;
+ } else
+ if ((ret = mpf->get(mpf, &argp->next_pgno,
+ DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->nextlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->nextlsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ PREV_PGNO(pagep) = PGNO_INVALID;
+ change = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ PREV_PGNO(pagep) = argp->pgno;
+ change = DB_MPOOL_DIRTY;
+ }
+ if (change)
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->nextlsn;
+ if ((ret = mpf->put(mpf, pagep, change)) != 0)
+ goto out;
+ }
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __db_ovref_recover --
+ * Recovery function for __db_ovref().
+ *
+ * PUBLIC: int __db_ovref_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_ovref_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_ovref_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp, modified, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__db_ovref_print);
+ REC_INTRO(__db_ovref_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op))
+ goto done;
+ __db_pgerr(file_dbp, argp->pgno, ret);
+ goto out;
+ }
+
+ modified = 0;
+ cmp = log_compare(&LSN(pagep), &argp->lsn);
+ CHECK_LSN(op, cmp, &LSN(pagep), &argp->lsn);
+ if (cmp == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ OV_REF(pagep) += argp->adjust;
+
+ pagep->lsn = *lsnp;
+ modified = 1;
+ } else if (log_compare(lsnp, &LSN(pagep)) == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ OV_REF(pagep) -= argp->adjust;
+
+ pagep->lsn = argp->lsn;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __db_relink_recover --
+ * Recovery function for relink.
+ *
+ * PUBLIC: int __db_relink_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_relink_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_relink_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__db_relink_print);
+ REC_INTRO(__db_relink_read, 1);
+
+ /*
+ * There are up to three pages we need to check -- the page, and the
+ * previous and next pages, if they existed. For a page add operation,
+ * the current page is the result of a split and is being recovered
+ * elsewhere, so all we need do is recover the next page.
+ */
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_REDO(op)) {
+ __db_pgerr(file_dbp, argp->pgno, ret);
+ goto out;
+ }
+ goto next2;
+ }
+ modified = 0;
+ if (argp->opcode == DB_ADD_PAGE)
+ goto next1;
+
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Redo the relink. */
+ pagep->lsn = *lsnp;
+ modified = 1;
+ } else if (log_compare(lsnp, &LSN(pagep)) == 0 && DB_UNDO(op)) {
+ /* Undo the relink. */
+ pagep->next_pgno = argp->next;
+ pagep->prev_pgno = argp->prev;
+
+ pagep->lsn = argp->lsn;
+ modified = 1;
+ }
+next1: if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+next2: if ((ret = mpf->get(mpf, &argp->next, 0, &pagep)) != 0) {
+ if (DB_REDO(op)) {
+ __db_pgerr(file_dbp, argp->next, ret);
+ goto out;
+ }
+ goto prev;
+ }
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn_next);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn_next);
+ if ((argp->opcode == DB_REM_PAGE && cmp_p == 0 && DB_REDO(op)) ||
+ (argp->opcode == DB_ADD_PAGE && cmp_n == 0 && DB_UNDO(op))) {
+ /* Redo the remove or undo the add. */
+ pagep->prev_pgno = argp->prev;
+
+ modified = 1;
+ } else if ((argp->opcode == DB_REM_PAGE && cmp_n == 0 && DB_UNDO(op)) ||
+ (argp->opcode == DB_ADD_PAGE && cmp_p == 0 && DB_REDO(op))) {
+ /* Undo the remove or redo the add. */
+ pagep->prev_pgno = argp->pgno;
+
+ modified = 1;
+ }
+ if (modified == 1) {
+ if (DB_UNDO(op))
+ pagep->lsn = argp->lsn_next;
+ else
+ pagep->lsn = *lsnp;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+ if (argp->opcode == DB_ADD_PAGE)
+ goto done;
+
+prev: if ((ret = mpf->get(mpf, &argp->prev, 0, &pagep)) != 0) {
+ if (DB_REDO(op)) {
+ __db_pgerr(file_dbp, argp->prev, ret);
+ goto out;
+ }
+ goto done;
+ }
+ modified = 0;
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn_prev);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn_prev);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Redo the relink. */
+ pagep->next_pgno = argp->next;
+
+ modified = 1;
+ } else if (log_compare(lsnp, &LSN(pagep)) == 0 && DB_UNDO(op)) {
+ /* Undo the relink. */
+ pagep->next_pgno = argp->pgno;
+
+ modified = 1;
+ }
+ if (modified == 1) {
+ if (DB_UNDO(op))
+ pagep->lsn = argp->lsn_prev;
+ else
+ pagep->lsn = *lsnp;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __db_debug_recover --
+ * Recovery function for debug.
+ *
+ * PUBLIC: int __db_debug_recover __P((DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_debug_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_debug_args *argp;
+ int ret;
+
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(op, DB_TXN_ABORT);
+ COMPQUIET(info, NULL);
+
+ REC_PRINT(__db_debug_print);
+ REC_NOOP_INTRO(__db_debug_read);
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+ REC_NOOP_CLOSE;
+}
+
+/*
+ * __db_noop_recover --
+ * Recovery function for noop.
+ *
+ * PUBLIC: int __db_noop_recover __P((DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_noop_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_noop_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int32_t change;
+ int cmp_n, cmp_p, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__db_noop_print);
+ REC_INTRO(__db_noop_read, 0);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ goto out;
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->prevlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->prevlsn);
+ change = 0;
+ if (cmp_p == 0 && DB_REDO(op)) {
+ LSN(pagep) = *lsnp;
+ change = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ LSN(pagep) = argp->prevlsn;
+ change = DB_MPOOL_DIRTY;
+ }
+ ret = mpf->put(mpf, pagep, change);
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __db_pg_alloc_recover --
+ * Recovery function for pg_alloc.
+ *
+ * PUBLIC: int __db_pg_alloc_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_pg_alloc_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_pg_alloc_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DBMETA *meta;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ db_pgno_t pgno;
+ int cmp_n, cmp_p, created, level, modified, ret;
+
+ meta = NULL;
+ pagep = NULL;
+ REC_PRINT(__db_pg_alloc_print);
+ REC_INTRO(__db_pg_alloc_read, 0);
+
+ /*
+ * Fix up the allocated page. If we're redoing the operation, we have
+ * to get the page (creating it if it doesn't exist), and update its
+ * LSN. If we're undoing the operation, we have to reset the page's
+ * LSN and put it on the free list.
+ *
+ * Fix up the metadata page. If we're redoing the operation, we have
+ * to get the metadata page and update its LSN and its free pointer.
+ * If we're undoing the operation and the page was ever created, we put
+ * it on the freelist.
+ */
+ pgno = PGNO_BASE_MD;
+ if ((ret = mpf->get(mpf, &pgno, 0, &meta)) != 0) {
+ /* The metadata page must always exist on redo. */
+ if (DB_REDO(op)) {
+ __db_pgerr(file_dbp, pgno, ret);
+ goto out;
+ } else
+ goto done;
+ }
+ created = modified = 0;
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ /*
+ * We have to be able to identify if a page was newly
+ * created so we can recover it properly. We cannot simply
+ * look for an empty header, because hash uses a pgin
+ * function that will set the header. Instead, we explicitly
+ * try for the page without CREATE and if that fails, then
+ * create it.
+ */
+ if ((ret =
+ mpf->get(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) {
+ __db_pgerr(file_dbp, argp->pgno, ret);
+ goto out;
+ }
+ created = modified = 1;
+ }
+
+ /* Fix up the allocated page. */
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->page_lsn);
+
+ /*
+ * If an inital allocation is aborted and then reallocated
+ * during an archival restore the log record will have
+ * an LSN for the page but the page will be empty.
+ */
+ if (IS_ZERO_LSN(LSN(pagep)))
+ cmp_p = 0;
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->page_lsn);
+ /*
+ * If we we rolled back this allocation previously during an
+ * archive restore, the page may have the LSN of the meta page
+ * at the point of the roll back. This will be no more
+ * than the LSN of the metadata page at the time of this allocation.
+ * Another special case we have to handle is if we ended up with a
+ * page of all 0's which can happen if we abort between allocating a
+ * page in mpool and initializing it. In that case, even if we're
+ * undoing, we need to re-initialize the page.
+ */
+ if (DB_REDO(op) &&
+ (cmp_p == 0 ||
+ (IS_ZERO_LSN(argp->page_lsn) &&
+ log_compare(&LSN(pagep), &argp->meta_lsn) <= 0))) {
+ /* Need to redo update described. */
+ switch (argp->ptype) {
+ case P_LBTREE:
+ case P_LRECNO:
+ case P_LDUP:
+ level = LEAFLEVEL;
+ break;
+ default:
+ level = 0;
+ break;
+ }
+ P_INIT(pagep, file_dbp->pgsize,
+ argp->pgno, PGNO_INVALID, PGNO_INVALID, level, argp->ptype);
+
+ pagep->lsn = *lsnp;
+ modified = 1;
+ } else if (DB_UNDO(op) && (cmp_n == 0 || created)) {
+ /*
+ * This is where we handle the case of a 0'd page (pagep->pgno
+ * is equal to PGNO_INVALID).
+ * Undo the allocation, reinitialize the page and
+ * link its next pointer to the free list.
+ */
+ P_INIT(pagep, file_dbp->pgsize,
+ argp->pgno, PGNO_INVALID, argp->next, 0, P_INVALID);
+
+ pagep->lsn = argp->page_lsn;
+ modified = 1;
+ }
+
+ /*
+ * If the page was newly created, put it on the limbo list.
+ */
+ if (IS_ZERO_LSN(LSN(pagep)) &&
+ IS_ZERO_LSN(argp->page_lsn) && DB_UNDO(op)) {
+ /* Put the page in limbo.*/
+ if ((ret = __db_add_limbo(dbenv,
+ info, argp->fileid, argp->pgno, 1)) != 0)
+ goto out;
+ }
+
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+ /* Fix up the metadata page. */
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(meta));
+ cmp_p = log_compare(&LSN(meta), &argp->meta_lsn);
+ CHECK_LSN(op, cmp_p, &LSN(meta), &argp->meta_lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ LSN(meta) = *lsnp;
+ meta->free = argp->next;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ LSN(meta) = argp->meta_lsn;
+
+ /*
+ * If the page has a zero LSN then its newly created
+ * and will go into limbo rather than directly on the
+ * free list.
+ */
+ if (!IS_ZERO_LSN(argp->page_lsn))
+ meta->free = argp->pgno;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ meta = NULL;
+ /*
+ * This could be the metapage from a subdb which is read from disk
+ * to recover its creation.
+ */
+ if (F_ISSET(file_dbp, DB_AM_SUBDB))
+ switch (argp->type) {
+ case P_BTREEMETA:
+ case P_HASHMETA:
+ case P_QAMMETA:
+ file_dbp->sync(file_dbp, 0);
+ break;
+ }
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ if (meta != NULL)
+ (void)mpf->put(mpf, meta, 0);
+ if (ret == ENOENT && op == DB_TXN_BACKWARD_ALLOC)
+ ret = 0;
+ REC_CLOSE;
+}
+
+/*
+ * __db_pg_free_recover --
+ * Recovery function for pg_free.
+ *
+ * PUBLIC: int __db_pg_free_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_pg_free_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_pg_free_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DBMETA *meta;
+ DB_LSN copy_lsn;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ db_pgno_t pgno;
+ int cmp_n, cmp_p, modified, ret;
+
+ COMPQUIET(info, NULL);
+ meta = NULL;
+ pagep = NULL;
+ REC_PRINT(__db_pg_free_print);
+ REC_INTRO(__db_pg_free_read, 1);
+
+ /*
+ * Fix up the freed page. If we're redoing the operation we get the
+ * page and explicitly discard its contents, then update its LSN. If
+ * we're undoing the operation, we get the page and restore its header.
+ * Create the page if necessary, we may be freeing an aborted
+ * create.
+ */
+ if ((ret = mpf->get(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ modified = 0;
+ (void)__ua_memcpy(&copy_lsn, &LSN(argp->header.data), sizeof(DB_LSN));
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &copy_lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &copy_lsn);
+ if (DB_REDO(op) &&
+ (cmp_p == 0 ||
+ (IS_ZERO_LSN(copy_lsn) &&
+ log_compare(&LSN(pagep), &argp->meta_lsn) <= 0))) {
+ /* Need to redo update described. */
+ P_INIT(pagep, file_dbp->pgsize,
+ argp->pgno, PGNO_INVALID, argp->next, 0, P_INVALID);
+ pagep->lsn = *lsnp;
+
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ memcpy(pagep, argp->header.data, argp->header.size);
+
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+ /*
+ * Fix up the metadata page. If we're redoing or undoing the operation
+ * we get the page and update its LSN and free pointer.
+ */
+ pgno = PGNO_BASE_MD;
+ if ((ret = mpf->get(mpf, &pgno, 0, &meta)) != 0) {
+ /* The metadata page must always exist. */
+ __db_pgerr(file_dbp, pgno, ret);
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(meta));
+ cmp_p = log_compare(&LSN(meta), &argp->meta_lsn);
+ CHECK_LSN(op, cmp_p, &LSN(meta), &argp->meta_lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo the deallocation. */
+ meta->free = argp->pgno;
+ LSN(meta) = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo the deallocation. */
+ meta->free = argp->next;
+ LSN(meta) = argp->meta_lsn;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ meta = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ if (meta != NULL)
+ (void)mpf->put(mpf, meta, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __db_cksum_recover --
+ * Recovery function for checksum failure log record.
+ *
+ * PUBLIC: int __db_cksum_recover __P((DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_cksum_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_cksum_args *argp;
+
+ int ret;
+
+ COMPQUIET(info, NULL);
+ COMPQUIET(lsnp, NULL);
+ COMPQUIET(op, DB_TXN_ABORT);
+
+ REC_PRINT(__db_cksum_print);
+
+ if ((ret = __db_cksum_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+
+ /*
+ * We had a checksum failure -- the only option is to run catastrophic
+ * recovery.
+ */
+ if (F_ISSET(dbenv, DB_ENV_FATAL))
+ ret = 0;
+ else {
+ __db_err(dbenv,
+ "Checksum failure requires catastrophic recovery");
+ ret = __db_panic(dbenv, DB_RUNRECOVERY);
+ }
+
+ __os_free(dbenv, argp);
+ return (ret);
+}
diff --git a/libdb/db/db_reclaim.c b/libdb/db/db_reclaim.c
new file mode 100644
index 0000000..ef055cd
--- /dev/null
+++ b/libdb/db/db_reclaim.c
@@ -0,0 +1,248 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/lock.h"
+
+/*
+ * __db_traverse_big
+ * Traverse a chain of overflow pages and call the callback routine
+ * on each one. The calling convention for the callback is:
+ * callback(dbp, page, cookie, did_put),
+ * where did_put is a return value indicating if the page in question has
+ * already been returned to the mpool.
+ *
+ * PUBLIC: int __db_traverse_big __P((DB *,
+ * PUBLIC: db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *));
+ */
+int
+__db_traverse_big(dbp, pgno, callback, cookie)
+ DB *dbp;
+ db_pgno_t pgno;
+ int (*callback) __P((DB *, PAGE *, void *, int *));
+ void *cookie;
+{
+ DB_MPOOLFILE *mpf;
+ PAGE *p;
+ int did_put, ret;
+
+ mpf = dbp->mpf;
+
+ do {
+ did_put = 0;
+ if ((ret = mpf->get(mpf, &pgno, 0, &p)) != 0)
+ return (ret);
+ pgno = NEXT_PGNO(p);
+ if ((ret = callback(dbp, p, cookie, &did_put)) == 0 &&
+ !did_put)
+ ret = mpf->put(mpf, p, 0);
+ } while (ret == 0 && pgno != PGNO_INVALID);
+
+ return (ret);
+}
+
+/*
+ * __db_reclaim_callback
+ * This is the callback routine used during a delete of a subdatabase.
+ * we are traversing a btree or hash table and trying to free all the
+ * pages. Since they share common code for duplicates and overflow
+ * items, we traverse them identically and use this routine to do the
+ * actual free. The reason that this is callback is because hash uses
+ * the same traversal code for statistics gathering.
+ *
+ * PUBLIC: int __db_reclaim_callback __P((DB *, PAGE *, void *, int *));
+ */
+int
+__db_reclaim_callback(dbp, p, cookie, putp)
+ DB *dbp;
+ PAGE *p;
+ void *cookie;
+ int *putp;
+{
+ int ret;
+
+ COMPQUIET(dbp, NULL);
+
+ if ((ret = __db_free(cookie, p)) != 0)
+ return (ret);
+ *putp = 1;
+
+ return (0);
+}
+
+/*
+ * __db_truncate_callback
+ * This is the callback routine used during a truncate.
+ * we are traversing a btree or hash table and trying to free all the
+ * pages.
+ *
+ * PUBLIC: int __db_truncate_callback __P((DB *, PAGE *, void *, int *));
+ */
+int
+__db_truncate_callback(dbp, p, cookie, putp)
+ DB *dbp;
+ PAGE *p;
+ void *cookie;
+ int *putp;
+{
+ DBMETA *meta;
+ DBT ldbt;
+ DB_LOCK metalock;
+ DB_MPOOLFILE *mpf;
+ db_indx_t indx, len, off, tlen, top;
+ db_pgno_t pgno;
+ db_trunc_param *param;
+ u_int8_t *hk, type;
+ int ret;
+
+ top = NUM_ENT(p);
+ mpf = dbp->mpf;
+ param = cookie;
+ *putp = 1;
+
+ switch (TYPE(p)) {
+ case P_LBTREE:
+ /* Skip for off-page duplicates and deleted items. */
+ for (indx = 0; indx < top; indx += P_INDX) {
+ type = GET_BKEYDATA(dbp, p, indx + O_INDX)->type;
+ if (!B_DISSET(type) && B_TYPE(type) != B_DUPLICATE)
+ ++param->count;
+ }
+ /* FALLTHROUGH */
+ case P_IBTREE:
+ case P_IRECNO:
+ case P_INVALID:
+ if (dbp->type != DB_HASH &&
+ ((BTREE *)dbp->bt_internal)->bt_root == PGNO(p)) {
+ type = dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE;
+ goto reinit;
+ }
+ break;
+ case P_OVERFLOW:
+ if (DBC_LOGGING(param->dbc)) {
+ if ((ret = __db_ovref_log(dbp, param->dbc->txn,
+ &LSN(p), 0, p->pgno, -1, &LSN(p))) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(p));
+ if (--OV_REF(p) != 0)
+ *putp = 0;
+ break;
+ case P_LRECNO:
+ param->count += top;
+ if (((BTREE *)dbp->bt_internal)->bt_root == PGNO(p)) {
+ type = P_LRECNO;
+ goto reinit;
+ }
+ break;
+ case P_LDUP:
+ /* Correct for deleted items. */
+ for (indx = 0; indx < top; indx += O_INDX)
+ if (!B_DISSET(GET_BKEYDATA(dbp, p, indx)->type))
+ ++param->count;
+
+ break;
+ case P_HASH:
+ /* Correct for on-page duplicates and deleted items. */
+ for (indx = 0; indx < top; indx += P_INDX) {
+ switch (*H_PAIRDATA(dbp, p, indx)) {
+ case H_OFFDUP:
+ case H_OFFPAGE:
+ break;
+ case H_KEYDATA:
+ ++param->count;
+ break;
+ case H_DUPLICATE:
+ tlen = LEN_HDATA(dbp, p, 0, indx);
+ hk = H_PAIRDATA(dbp, p, indx);
+ for (off = 0; off < tlen;
+ off += len + 2 * sizeof (db_indx_t)) {
+ ++param->count;
+ memcpy(&len,
+ HKEYDATA_DATA(hk)
+ + off, sizeof(db_indx_t));
+ }
+ }
+ }
+ /* Don't free the head of the bucket. */
+ if (PREV_PGNO(p) == PGNO_INVALID) {
+ type = P_HASH;
+
+reinit: *putp = 0;
+ if (DBC_LOGGING(param->dbc)) {
+ pgno = PGNO_BASE_MD;
+ if ((ret = __db_lget(param->dbc, LCK_ALWAYS,
+ pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ return (ret);
+ if ((ret = mpf->get(mpf,
+ &pgno, 0, (PAGE **)&meta)) != 0) {
+ goto err;
+ }
+ memset(&ldbt, 0, sizeof(ldbt));
+ ldbt.data = p;
+ ldbt.size = P_OVERHEAD(dbp);
+ if ((ret = __db_pg_free_log(dbp,
+ param->dbc->txn, &LSN(meta), 0,
+ p->pgno, &LSN(meta),
+ PGNO_BASE_MD, &ldbt, meta->free)) != 0)
+ goto err;
+ LSN(p) = LSN(meta);
+
+ if ((ret =
+ __db_pg_alloc_log(dbp,
+ param->dbc->txn, &LSN(meta), 0,
+ &LSN(meta), PGNO_BASE_MD,
+ &p->lsn, p->pgno, type, meta->free)) != 0) {
+err: (void)mpf->put(mpf, (PAGE *)meta, 0);
+ (void)__TLPUT(param->dbc, metalock);
+ return (ret);
+ }
+ LSN(p) = LSN(meta);
+
+ if ((ret = mpf->put(mpf,
+ (PAGE *)meta, DB_MPOOL_DIRTY)) != 0) {
+ (void)__TLPUT(param->dbc, metalock);
+ return (ret);
+ }
+ if ((ret = __TLPUT(param->dbc, metalock)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(p));
+
+ P_INIT(p, dbp->pgsize, PGNO(p), PGNO_INVALID,
+ PGNO_INVALID, type == P_HASH ? 0 : 1, type);
+ }
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, p->pgno));
+ }
+
+ if (*putp == 1) {
+ if ((ret = __db_free(param->dbc, p)) != 0)
+ return (ret);
+ } else {
+ if ((ret = mpf->put(mpf, p, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+ *putp = 1;
+ }
+
+ return (0);
+}
diff --git a/libdb/db/db_remove.c b/libdb/db/db_remove.c
new file mode 100644
index 0000000..739571e
--- /dev/null
+++ b/libdb/db/db_remove.c
@@ -0,0 +1,318 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/fop.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+
+static int __db_subdb_remove __P((DB *, DB_TXN *, const char *, const char *));
+static int __db_dbtxn_remove __P((DB *, DB_TXN *, const char *));
+
+/*
+ * __dbenv_dbremove
+ * Remove method for DB_ENV.
+ *
+ * PUBLIC: int __dbenv_dbremove __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, const char *, const char *, u_int32_t));
+ */
+int
+__dbenv_dbremove(dbenv, txn, name, subdb, flags)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ const char *name, *subdb;
+ u_int32_t flags;
+{
+ DB *dbp;
+ int ret, t_ret, txn_local;
+
+ txn_local = 0;
+
+ PANIC_CHECK(dbenv);
+ ENV_ILLEGAL_BEFORE_OPEN(dbenv, "DB_ENV->dbremove");
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB->remove", flags, DB_AUTO_COMMIT)) != 0)
+ return (ret);
+
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return (ret);
+
+ /*
+ * Create local transaction as necessary, check for consistent
+ * transaction usage.
+ */
+ if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+ if ((ret = __db_txn_auto(dbp, &txn)) != 0)
+ return (ret);
+ txn_local = 1;
+ } else
+ if (txn != NULL && !TXN_ON(dbenv))
+ return (__db_not_txn_env(dbenv));
+
+ ret = __db_remove_i(dbp, txn, name, subdb);
+
+ /* Commit for DB_AUTO_COMMIT. */
+ if (txn_local) {
+ if (ret == 0)
+ ret = txn->commit(txn, 0);
+ else
+ if ((t_ret = txn->abort(txn)) != 0)
+ ret = __db_panic(dbenv, t_ret);
+ /*
+ * We created the DBP here and when we committed/aborted,
+ * we release all the tranasctional locks, which includes
+ * the handle lock; mark the handle cleared explicitly.
+ */
+ LOCK_INIT(dbp->handle_lock);
+ dbp->lid = DB_LOCK_INVALIDID;
+ }
+
+ /*
+ * We never opened this dbp for real, so don't call the transactional
+ * version of DB->close, and use NOSYNC to avoid calling into mpool.
+ */
+ if ((t_ret = dbp->close(dbp, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_remove
+ * Remove method for DB.
+ *
+ * PUBLIC: int __db_remove __P((DB *, const char *, const char *, u_int32_t));
+ */
+int
+__db_remove(dbp, name, subdb, flags)
+ DB *dbp;
+ const char *name, *subdb;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret, t_ret;
+
+ dbenv = dbp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /*
+ * Validate arguments, continuing to destroy the handle on failure.
+ *
+ * Cannot use DB_ILLEGAL_AFTER_OPEN directly because it returns.
+ *
+ * !!!
+ * We have a serious problem if we're here with a handle used to open
+ * a database -- we'll destroy the handle, and the application won't
+ * ever be able to close the database.
+ */
+ if (F_ISSET(dbp, DB_AM_OPEN_CALLED)) {
+ ret = __db_mi_open(dbenv, "DB->remove", 1);
+ goto err;
+ }
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB->remove", flags, 0)) != 0)
+ goto err;
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, NULL, DB_LOCK_INVALIDID, 0)) != 0)
+ goto err;
+
+ /* Remove the file. */
+ ret = __db_remove_i(dbp, NULL, name, subdb);
+
+ /*
+ * We never opened this dbp for real, use NOSYNC to avoid calling into
+ * mpool.
+ */
+err: if ((t_ret = dbp->close(dbp, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_remove_i
+ * Internal remove method for DB.
+ *
+ * PUBLIC: int __db_remove_i __P((DB *, DB_TXN *, const char *, const char *));
+ */
+int
+__db_remove_i(dbp, txn, name, subdb)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb;
+{
+ DB_ENV *dbenv;
+ DB_LSN newlsn;
+ int ret;
+ char *real_name;
+
+ dbenv = dbp->dbenv;
+ real_name = NULL;
+
+ /* Handle subdatabase removes separately. */
+ if (subdb != NULL)
+ return (__db_subdb_remove(dbp, txn, name, subdb));
+
+ /* Handle transactional file removes separately. */
+ if (txn != NULL)
+ return (__db_dbtxn_remove(dbp, txn, name));
+
+ /*
+ * The remaining case is a non-transactional file remove.
+ *
+ * Find the real name of the file.
+ */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
+ return (ret);
+
+ if ((ret = __fop_remove_setup(dbp, NULL, real_name, 0)) != 0)
+ goto err;
+
+ if (dbp->db_am_remove != NULL &&
+ (ret = dbp->db_am_remove(dbp, NULL, name, subdb, &newlsn)) != 0)
+ goto err;
+
+ ret = __fop_remove(dbenv, NULL, dbp->fileid, name, DB_APP_DATA);
+
+err:
+ if (real_name != NULL)
+ __os_free(dbenv, real_name);
+
+ return (ret);
+}
+
+/*
+ * __db_subdb_remove --
+ * Remove a subdatabase.
+ */
+static int
+__db_subdb_remove(dbp, txn, name, subdb)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb;
+{
+ DB *mdbp, *sdbp;
+ int ret, t_ret;
+
+ mdbp = sdbp = NULL;
+
+ /* Open the subdatabase. */
+ if ((ret = db_create(&sdbp, dbp->dbenv, 0)) != 0)
+ goto err;
+ if ((ret = __db_open(sdbp,
+ txn, name, subdb, DB_UNKNOWN, DB_WRITEOPEN, 0)) != 0)
+ goto err;
+
+ DB_TEST_RECOVERY(sdbp, DB_TEST_PREDESTROY, ret, name);
+
+ /* Free up the pages in the subdatabase. */
+ switch (sdbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = __bam_reclaim(sdbp, txn)) != 0)
+ goto err;
+ break;
+ case DB_HASH:
+ if ((ret = __ham_reclaim(sdbp, txn)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_type(
+ sdbp->dbenv, "__db_subdb_remove", sdbp->type);
+ goto err;
+ }
+
+ /*
+ * Remove the entry from the main database and free the subdatabase
+ * metadata page.
+ */
+ if ((ret = __db_master_open(sdbp, txn, name, 0, 0, &mdbp)) != 0)
+ goto err;
+
+ if ((ret = __db_master_update(
+ mdbp, sdbp, txn, subdb, sdbp->type, MU_REMOVE, NULL, 0)) != 0)
+ goto err;
+
+ DB_TEST_RECOVERY(sdbp, DB_TEST_POSTDESTROY, ret, name);
+
+DB_TEST_RECOVERY_LABEL
+err:
+ /* Close the main and subdatabases. */
+ if ((t_ret = __db_close_i(sdbp, txn, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (mdbp != NULL &&
+ (t_ret = __db_close_i(mdbp, txn, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+static int
+__db_dbtxn_remove(dbp, txn, name)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name;
+{
+ DB_ENV *dbenv;
+ DB_LSN newlsn;
+ int ret;
+ char *tmpname;
+
+ dbenv = dbp->dbenv;
+ tmpname = NULL;
+
+ /*
+ * This is a transactional rename, so we have to keep the name
+ * of the file locked until the transaction commits. As a result,
+ * we implement remove by renaming the file to some other name
+ * (which creates a dummy named file as a placeholder for the
+ * file being rename/dremoved) and then deleting that file as
+ * a delayed remove at commit.
+ */
+ if ((ret = __db_backup_name(dbenv, name, txn, &tmpname)) != 0)
+ return (ret);
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_PREDESTROY, ret, name);
+
+ if ((ret = __db_rename_i(dbp, txn, name, NULL, tmpname)) != 0)
+ goto err;
+
+ /* The internal removes will also translate into delayed removes. */
+ if (dbp->db_am_remove != NULL &&
+ (ret = dbp->db_am_remove(dbp, txn, tmpname, NULL, &newlsn)) != 0)
+ goto err;
+
+ ret = __fop_remove(dbenv, txn, dbp->fileid, tmpname, DB_APP_DATA);
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTDESTROY, ret, name);
+
+err:
+DB_TEST_RECOVERY_LABEL
+ if (tmpname != NULL)
+ __os_free(dbenv, tmpname);
+
+ return (ret);
+}
diff --git a/libdb/db/db_rename.c b/libdb/db/db_rename.c
new file mode 100644
index 0000000..ee03938
--- /dev/null
+++ b/libdb/db/db_rename.c
@@ -0,0 +1,297 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_am.h"
+#include "dbinc/fop.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+
+static int __db_subdb_rename __P(( DB *, DB_TXN *,
+ const char *, const char *, const char *));
+
+/*
+ * __dbenv_dbrename
+ * Rename method for DB_ENV.
+ *
+ * PUBLIC: int __dbenv_dbrename __P((DB_ENV *, DB_TXN *,
+ * PUBLIC: const char *, const char *, const char *, u_int32_t));
+ */
+int
+__dbenv_dbrename(dbenv, txn, name, subdb, newname, flags)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ const char *name, *subdb, *newname;
+ u_int32_t flags;
+{
+ DB *dbp;
+ int ret, t_ret, txn_local;
+
+ txn_local = 0;
+
+ PANIC_CHECK(dbenv);
+ ENV_ILLEGAL_BEFORE_OPEN(dbenv, "DB_ENV->dbrename");
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB->rename", flags, DB_AUTO_COMMIT)) != 0)
+ return (ret);
+
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return (ret);
+
+ /*
+ * Create local transaction as necessary, check for consistent
+ * transaction usage.
+ */
+ if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+ if ((ret = __db_txn_auto(dbp, &txn)) != 0)
+ return (ret);
+ txn_local = 1;
+ } else
+ if (txn != NULL && !TXN_ON(dbenv))
+ return (__db_not_txn_env(dbenv));
+
+ ret = __db_rename_i(dbp, txn, name, subdb, newname);
+
+ /* Commit for DB_AUTO_COMMIT. */
+ if (txn_local) {
+ if (ret == 0)
+ ret = txn->commit(txn, 0);
+ else
+ if ((t_ret = txn->abort(txn)) != 0)
+ ret = __db_panic(dbenv, t_ret);
+
+ /*
+ * We created the DBP here and when we committed/aborted,
+ * we release all the tranasctional locks, which includes
+ * the handle lock; mark the handle cleared explicitly.
+ */
+ LOCK_INIT(dbp->handle_lock);
+ dbp->lid = DB_LOCK_INVALIDID;
+ }
+
+ /*
+ * We never opened this dbp for real, so don't call the transactional
+ * version of DB->close, and use NOSYNC to avoid calling into mpool.
+ */
+ if ((t_ret = dbp->close(dbp, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_rename
+ * Rename method for DB.
+ *
+ * PUBLIC: int __db_rename __P((DB *,
+ * PUBLIC: const char *, const char *, const char *, u_int32_t));
+ */
+int
+__db_rename(dbp, name, subdb, newname, flags)
+ DB *dbp;
+ const char *name, *subdb, *newname;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret, t_ret;
+
+ dbenv = dbp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /*
+ * Validate arguments, continuing to destroy the handle on failure.
+ *
+ * Cannot use DB_ILLEGAL_AFTER_OPEN directly because it returns.
+ *
+ * !!!
+ * We have a serious problem if we're here with a handle used to open
+ * a database -- we'll destroy the handle, and the application won't
+ * ever be able to close the database.
+ */
+ if (F_ISSET(dbp, DB_AM_OPEN_CALLED)) {
+ ret = __db_mi_open(dbenv, "DB->rename", 1);
+ goto err;
+ }
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB->rename", flags, 0)) != 0)
+ goto err;
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, NULL, DB_LOCK_INVALIDID, 0)) != 0)
+ goto err;
+
+ /* Rename the file. */
+ ret = __db_rename_i(dbp, NULL, name, subdb, newname);
+
+ /*
+ * We never opened this dbp for real, use NOSYNC to avoid calling into
+ * mpool.
+ */
+err: if ((t_ret = dbp->close(dbp, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_rename_i
+ * Internal rename method for DB.
+ *
+ * PUBLIC: int __db_rename_i __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, const char *, const char *));
+ */
+int
+__db_rename_i(dbp, txn, name, subdb, newname)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb, *newname;
+{
+ DB_ENV *dbenv;
+ int ret;
+ char *real_name;
+
+ dbenv = dbp->dbenv;
+ real_name = NULL;
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_PREDESTROY, ret, name);
+
+ if (subdb != NULL) {
+ ret = __db_subdb_rename(dbp, txn, name, subdb, newname);
+ goto err;
+ }
+
+ /* From here on down, this pertains to files. */
+
+ /* Find the real name of the file. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
+ goto err;
+
+ if ((ret = __fop_remove_setup(dbp, txn, real_name, 0)) != 0)
+ goto err;
+
+ if (dbp->db_am_rename != NULL &&
+ (ret = dbp->db_am_rename(dbp, txn, name, subdb, newname)) != 0)
+ goto err;
+
+ /*
+ * The transactional case and non-transactional case are
+ * quite different. In the non-transactional case, we simply
+ * do the rename. In the transactional case, since we need
+ * the ability to back out and maintain locking, we have to
+ * create a temporary object as a placeholder. This is all
+ * taken care of in the fop layer.
+ */
+ if (txn != NULL) {
+ if ((ret = __fop_dummy(dbp, txn, name, newname, 0)) != 0)
+ goto err;
+ } else {
+ if ((ret = __fop_dbrename(dbp, name, newname)) != 0)
+ goto err;
+ }
+
+ /*
+ * I am pretty sure that we haven't gotten a dbreg id, so calling
+ * dbreg_filelist_update is not necessary.
+ */
+ DB_ASSERT(dbp->log_filename == NULL ||
+ dbp->log_filename->id == DB_LOGFILEID_INVALID);
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTDESTROY, ret, newname);
+
+DB_TEST_RECOVERY_LABEL
+err:
+ if (real_name != NULL)
+ __os_free(dbenv, real_name);
+
+ return (ret);
+}
+
+/*
+ * __db_subdb_rename --
+ * Rename a subdatabase.
+ */
+static int
+__db_subdb_rename(dbp, txn, name, subdb, newname)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb, *newname;
+{
+ DB *mdbp;
+ DB_ENV *dbenv;
+ PAGE *meta;
+ int ret, t_ret;
+
+ mdbp = NULL;
+ meta = NULL;
+ dbenv = dbp->dbenv;
+
+ /*
+ * We have not opened this dbp so it isn't marked as a subdb,
+ * but it ought to be.
+ */
+ F_SET(dbp, DB_AM_SUBDB);
+
+ /*
+ * Rename the entry in the main database. We need to first
+ * get the meta-data page number (via MU_OPEN) so that we can
+ * read the meta-data page and obtain a handle lock. Once we've
+ * done that, we can proceed to do the rename in the master.
+ */
+ if ((ret = __db_master_open(dbp, txn, name, 0, 0, &mdbp)) != 0)
+ goto err;
+
+ if ((ret = __db_master_update(mdbp, dbp, txn, subdb, dbp->type,
+ MU_OPEN, NULL, 0)) != 0)
+ goto err;
+
+ if ((ret = mdbp->mpf->get(mdbp->mpf, &dbp->meta_pgno, 0, &meta)) != 0)
+ goto err;
+ memcpy(&dbp->fileid, ((DBMETA *)meta)->uid, DB_FILE_ID_LEN);
+ if ((ret = __fop_lock_handle(dbenv,
+ dbp, mdbp->lid, DB_LOCK_WRITE, NULL, 0)) != 0)
+ goto err;
+
+ ret = mdbp->mpf->put(mdbp->mpf, meta, 0);
+ meta = NULL;
+ if (ret != 0)
+ goto err;
+
+ if ((ret = __db_master_update(mdbp, dbp, txn,
+ subdb, dbp->type, MU_RENAME, newname, 0)) != 0)
+ goto err;
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTDESTROY, ret, name);
+
+DB_TEST_RECOVERY_LABEL
+err:
+ if (meta != NULL &&
+ (t_ret = mdbp->mpf->put(mdbp->mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (mdbp != NULL &&
+ (t_ret = __db_close_i(mdbp, txn, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
diff --git a/libdb/db/db_ret.c b/libdb/db/db_ret.c
new file mode 100644
index 0000000..2e4466e
--- /dev/null
+++ b/libdb/db/db_ret.c
@@ -0,0 +1,154 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+
+/*
+ * __db_ret --
+ * Build return DBT.
+ *
+ * PUBLIC: int __db_ret __P((DB *,
+ * PUBLIC: PAGE *, u_int32_t, DBT *, void **, u_int32_t *));
+ */
+int
+__db_ret(dbp, h, indx, dbt, memp, memsize)
+ DB *dbp;
+ PAGE *h;
+ u_int32_t indx;
+ DBT *dbt;
+ void **memp;
+ u_int32_t *memsize;
+{
+ BKEYDATA *bk;
+ HOFFPAGE ho;
+ BOVERFLOW *bo;
+ u_int32_t len;
+ u_int8_t *hk;
+ void *data;
+
+ switch (TYPE(h)) {
+ case P_HASH:
+ hk = P_ENTRY(dbp, h, indx);
+ if (HPAGE_PTYPE(hk) == H_OFFPAGE) {
+ memcpy(&ho, hk, sizeof(HOFFPAGE));
+ return (__db_goff(dbp, dbt,
+ ho.tlen, ho.pgno, memp, memsize));
+ }
+ len = LEN_HKEYDATA(dbp, h, dbp->pgsize, indx);
+ data = HKEYDATA_DATA(hk);
+ break;
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ bk = GET_BKEYDATA(dbp, h, indx);
+ if (B_TYPE(bk->type) == B_OVERFLOW) {
+ bo = (BOVERFLOW *)bk;
+ return (__db_goff(dbp, dbt,
+ bo->tlen, bo->pgno, memp, memsize));
+ }
+ len = bk->len;
+ data = bk->data;
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, h->pgno));
+ }
+
+ return (__db_retcopy(dbp->dbenv, dbt, data, len, memp, memsize));
+}
+
+/*
+ * __db_retcopy --
+ * Copy the returned data into the user's DBT, handling special flags.
+ *
+ * PUBLIC: int __db_retcopy __P((DB_ENV *, DBT *,
+ * PUBLIC: void *, u_int32_t, void **, u_int32_t *));
+ */
+int
+__db_retcopy(dbenv, dbt, data, len, memp, memsize)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ void *data;
+ u_int32_t len;
+ void **memp;
+ u_int32_t *memsize;
+{
+ int ret;
+
+ /* If returning a partial record, reset the length. */
+ if (F_ISSET(dbt, DB_DBT_PARTIAL)) {
+ data = (u_int8_t *)data + dbt->doff;
+ if (len > dbt->doff) {
+ len -= dbt->doff;
+ if (len > dbt->dlen)
+ len = dbt->dlen;
+ } else
+ len = 0;
+ }
+
+ /*
+ * Return the length of the returned record in the DBT size field.
+ * This satisfies the requirement that if we're using user memory
+ * and insufficient memory was provided, return the amount necessary
+ * in the size field.
+ */
+ dbt->size = len;
+
+ /*
+ * Allocate memory to be owned by the application: DB_DBT_MALLOC,
+ * DB_DBT_REALLOC.
+ *
+ * !!!
+ * We always allocate memory, even if we're copying out 0 bytes. This
+ * guarantees consistency, i.e., the application can always free memory
+ * without concern as to how many bytes of the record were requested.
+ *
+ * Use the memory specified by the application: DB_DBT_USERMEM.
+ *
+ * !!!
+ * If the length we're going to copy is 0, the application-supplied
+ * memory pointer is allowed to be NULL.
+ */
+ if (F_ISSET(dbt, DB_DBT_MALLOC)) {
+ if ((ret = __os_umalloc(dbenv, len, &dbt->data)) != 0)
+ return (ret);
+ } else if (F_ISSET(dbt, DB_DBT_REALLOC)) {
+ if ((ret = __os_urealloc(dbenv, len, &dbt->data)) != 0)
+ return (ret);
+ } else if (F_ISSET(dbt, DB_DBT_USERMEM)) {
+ if (len != 0 && (dbt->data == NULL || dbt->ulen < len))
+ return (ENOMEM);
+ } else if (memp == NULL || memsize == NULL) {
+ return (EINVAL);
+ } else {
+ if (len != 0 && (*memsize == 0 || *memsize < len)) {
+ if ((ret = __os_realloc(dbenv, len, memp)) != 0) {
+ *memsize = 0;
+ return (ret);
+ }
+ *memsize = len;
+ }
+ dbt->data = *memp;
+ }
+
+ if (len != 0)
+ memcpy(dbt->data, data, len);
+ return (0);
+}
diff --git a/libdb/db/db_truncate.c b/libdb/db/db_truncate.c
new file mode 100644
index 0000000..c71d431
--- /dev/null
+++ b/libdb/db/db_truncate.c
@@ -0,0 +1,95 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/qam.h"
+
+/*
+ * __db_truncate
+ * truncate method for DB.
+ *
+ * PUBLIC: int __db_truncate __P((DB *, DB_TXN *, u_int32_t *, u_int32_t));
+ */
+int
+__db_truncate(dbp, txn, countp, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t *countp, flags;
+{
+ DB_ENV *dbenv;
+ int ret, t_ret, txn_local;
+
+ dbenv = dbp->dbenv;
+ ret = txn_local = 0;
+
+ PANIC_CHECK(dbenv);
+
+ /* Check for invalid flags. */
+ if ((ret =
+ __db_fchk(dbenv, "DB->truncate", flags, DB_AUTO_COMMIT)) != 0)
+ return (ret);
+
+ /*
+ * Create local transaction as necessary, check for consistent
+ * transaction usage.
+ */
+ if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+ if ((ret = __db_txn_auto(dbp, &txn)) != 0)
+ return (ret);
+ txn_local = 1;
+ } else
+ if (txn != NULL && !TXN_ON(dbenv))
+ return (__db_not_txn_env(dbenv));
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_PREDESTROY, ret, NULL);
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = __bam_truncate(dbp, txn, countp)) != 0)
+ goto err;
+ break;
+ case DB_HASH:
+ if ((ret = __ham_truncate(dbp, txn, countp)) != 0)
+ goto err;
+ break;
+ case DB_QUEUE:
+ if ((ret = __qam_truncate(dbp, txn, countp)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_type(
+ dbenv, "__db_truncate", dbp->type);
+ goto err;
+ }
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTDESTROY, ret, NULL);
+
+DB_TEST_RECOVERY_LABEL
+err:
+ /* Commit for DB_AUTO_COMMIT. */
+ if (txn_local) {
+ if (ret == 0)
+ ret = txn->commit(txn, 0);
+ else
+ if ((t_ret = txn->abort(txn)) != 0)
+ ret = __db_panic(dbenv, t_ret);
+ }
+
+ return (ret);
+}
diff --git a/libdb/db/db_upg.c b/libdb/db/db_upg.c
new file mode 100644
index 0000000..463e67c
--- /dev/null
+++ b/libdb/db/db_upg.c
@@ -0,0 +1,341 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/qam.h"
+
+static int (* const func_31_list[P_PAGETYPE_MAX])
+ __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *)) = {
+ NULL, /* P_INVALID */
+ NULL, /* __P_DUPLICATE */
+ __ham_31_hash, /* P_HASH */
+ NULL, /* P_IBTREE */
+ NULL, /* P_IRECNO */
+ __bam_31_lbtree, /* P_LBTREE */
+ NULL, /* P_LRECNO */
+ NULL, /* P_OVERFLOW */
+ __ham_31_hashmeta, /* P_HASHMETA */
+ __bam_31_btreemeta, /* P_BTREEMETA */
+};
+
+static int __db_page_pass __P((DB *, char *, u_int32_t, int (* const [])
+ (DB *, char *, u_int32_t, DB_FH *, PAGE *, int *), DB_FH *));
+
+/*
+ * __db_upgrade --
+ * Upgrade an existing database.
+ *
+ * PUBLIC: int __db_upgrade __P((DB *, const char *, u_int32_t));
+ */
+int
+__db_upgrade(dbp, fname, flags)
+ DB *dbp;
+ const char *fname;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_FH fh;
+ size_t n;
+ int ret, t_ret;
+ u_int8_t mbuf[256];
+ char *real_name;
+
+ dbenv = dbp->dbenv;
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB->upgrade", flags, DB_DUPSORT)) != 0)
+ return (ret);
+
+ /* Get the real backing file name. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, fname, 0, NULL, &real_name)) != 0)
+ return (ret);
+
+ /* Open the file. */
+ if ((ret = __os_open(dbenv, real_name, 0, 0, &fh)) != 0) {
+ __db_err(dbenv, "%s: %s", real_name, db_strerror(ret));
+ return (ret);
+ }
+
+ /* Initialize the feedback. */
+ if (dbp->db_feedback != NULL)
+ dbp->db_feedback(dbp, DB_UPGRADE, 0);
+
+ /*
+ * Read the metadata page. We read 256 bytes, which is larger than
+ * any access method's metadata page and smaller than any disk sector.
+ */
+ if ((ret = __os_read(dbenv, &fh, mbuf, sizeof(mbuf), &n)) != 0)
+ goto err;
+
+ switch (((DBMETA *)mbuf)->magic) {
+ case DB_BTREEMAGIC:
+ switch (((DBMETA *)mbuf)->version) {
+ case 6:
+ /*
+ * Before V7 not all pages had page types, so we do the
+ * single meta-data page by hand.
+ */
+ if ((ret =
+ __bam_30_btreemeta(dbp, real_name, mbuf)) != 0)
+ goto err;
+ if ((ret = __os_seek(dbenv,
+ &fh, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+ if ((ret = __os_write(dbenv, &fh, mbuf, 256, &n)) != 0)
+ goto err;
+ /* FALLTHROUGH */
+ case 7:
+ /*
+ * We need the page size to do more. Rip it out of
+ * the meta-data page.
+ */
+ memcpy(&dbp->pgsize, mbuf + 20, sizeof(u_int32_t));
+
+ if ((ret = __db_page_pass(
+ dbp, real_name, flags, func_31_list, &fh)) != 0)
+ goto err;
+ /* FALLTHROUGH */
+ case 8:
+ case 9:
+ break;
+ default:
+ __db_err(dbenv, "%s: unsupported btree version: %lu",
+ real_name, (u_long)((DBMETA *)mbuf)->version);
+ ret = DB_OLD_VERSION;
+ goto err;
+ }
+ break;
+ case DB_HASHMAGIC:
+ switch (((DBMETA *)mbuf)->version) {
+ case 4:
+ case 5:
+ /*
+ * Before V6 not all pages had page types, so we do the
+ * single meta-data page by hand.
+ */
+ if ((ret =
+ __ham_30_hashmeta(dbp, real_name, mbuf)) != 0)
+ goto err;
+ if ((ret = __os_seek(dbenv,
+ &fh, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+ if ((ret = __os_write(dbenv, &fh, mbuf, 256, &n)) != 0)
+ goto err;
+
+ /*
+ * Before V6, we created hash pages one by one as they
+ * were needed, using hashhdr.ovfl_point to reserve
+ * a block of page numbers for them. A consequence
+ * of this was that, if no overflow pages had been
+ * created, the current doubling might extend past
+ * the end of the database file.
+ *
+ * In DB 3.X, we now create all the hash pages
+ * belonging to a doubling atomicly; it's not
+ * safe to just save them for later, because when
+ * we create an overflow page we'll just create
+ * a new last page (whatever that may be). Grow
+ * the database to the end of the current doubling.
+ */
+ if ((ret =
+ __ham_30_sizefix(dbp, &fh, real_name, mbuf)) != 0)
+ goto err;
+ /* FALLTHROUGH */
+ case 6:
+ /*
+ * We need the page size to do more. Rip it out of
+ * the meta-data page.
+ */
+ memcpy(&dbp->pgsize, mbuf + 20, sizeof(u_int32_t));
+
+ if ((ret = __db_page_pass(
+ dbp, real_name, flags, func_31_list, &fh)) != 0)
+ goto err;
+ /* FALLTHROUGH */
+ case 7:
+ case 8:
+ break;
+ default:
+ __db_err(dbenv, "%s: unsupported hash version: %lu",
+ real_name, (u_long)((DBMETA *)mbuf)->version);
+ ret = DB_OLD_VERSION;
+ goto err;
+ }
+ break;
+ case DB_QAMMAGIC:
+ switch (((DBMETA *)mbuf)->version) {
+ case 1:
+ /*
+ * If we're in a Queue database, the only page that
+ * needs upgrading is the meta-database page, don't
+ * bother with a full pass.
+ */
+ if ((ret = __qam_31_qammeta(dbp, real_name, mbuf)) != 0)
+ return (ret);
+ /* FALLTHROUGH */
+ case 2:
+ if ((ret = __qam_32_qammeta(dbp, real_name, mbuf)) != 0)
+ return (ret);
+ if ((ret = __os_seek(dbenv,
+ &fh, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+ if ((ret = __os_write(dbenv, &fh, mbuf, 256, &n)) != 0)
+ goto err;
+ /* FALLTHROUGH */
+ case 3:
+ case 4:
+ break;
+ default:
+ __db_err(dbenv, "%s: unsupported queue version: %lu",
+ real_name, (u_long)((DBMETA *)mbuf)->version);
+ ret = DB_OLD_VERSION;
+ goto err;
+ }
+ break;
+ default:
+ M_32_SWAP(((DBMETA *)mbuf)->magic);
+ switch (((DBMETA *)mbuf)->magic) {
+ case DB_BTREEMAGIC:
+ case DB_HASHMAGIC:
+ case DB_QAMMAGIC:
+ __db_err(dbenv,
+ "%s: DB->upgrade only supported on native byte-order systems",
+ real_name);
+ break;
+ default:
+ __db_err(dbenv,
+ "%s: unrecognized file type", real_name);
+ break;
+ }
+ ret = EINVAL;
+ goto err;
+ }
+
+ ret = __os_fsync(dbenv, &fh);
+
+err: if ((t_ret = __os_closehandle(dbenv, &fh)) != 0 && ret == 0)
+ ret = t_ret;
+ __os_free(dbenv, real_name);
+
+ /* We're done. */
+ if (dbp->db_feedback != NULL)
+ dbp->db_feedback(dbp, DB_UPGRADE, 100);
+
+ return (ret);
+}
+
+/*
+ * __db_page_pass --
+ * Walk the pages of the database, upgrading whatever needs it.
+ */
+static int
+__db_page_pass(dbp, real_name, flags, fl, fhp)
+ DB *dbp;
+ char *real_name;
+ u_int32_t flags;
+ int (* const fl[P_PAGETYPE_MAX])
+ __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+ DB_FH *fhp;
+{
+ DB_ENV *dbenv;
+ PAGE *page;
+ db_pgno_t i, pgno_last;
+ size_t n;
+ int dirty, ret;
+
+ dbenv = dbp->dbenv;
+
+ /* Determine the last page of the file. */
+ if ((ret = __db_lastpgno(dbp, real_name, fhp, &pgno_last)) != 0)
+ return (ret);
+
+ /* Allocate memory for a single page. */
+ if ((ret = __os_malloc(dbenv, dbp->pgsize, &page)) != 0)
+ return (ret);
+
+ /* Walk the file, calling the underlying conversion functions. */
+ for (i = 0; i < pgno_last; ++i) {
+ if (dbp->db_feedback != NULL)
+ dbp->db_feedback(dbp, DB_UPGRADE, (i * 100)/pgno_last);
+ if ((ret = __os_seek(dbenv,
+ fhp, dbp->pgsize, i, 0, 0, DB_OS_SEEK_SET)) != 0)
+ break;
+ if ((ret = __os_read(dbenv, fhp, page, dbp->pgsize, &n)) != 0)
+ break;
+ dirty = 0;
+ if (fl[TYPE(page)] != NULL && (ret = fl[TYPE(page)]
+ (dbp, real_name, flags, fhp, page, &dirty)) != 0)
+ break;
+ if (dirty) {
+ if ((ret = __os_seek(dbenv,
+ fhp, dbp->pgsize, i, 0, 0, DB_OS_SEEK_SET)) != 0)
+ break;
+ if ((ret = __os_write(dbenv,
+ fhp, page, dbp->pgsize, &n)) != 0)
+ break;
+ }
+ }
+
+ __os_free(dbp->dbenv, page);
+ return (ret);
+}
+
+/*
+ * __db_lastpgno --
+ * Return the current last page number of the file.
+ *
+ * PUBLIC: int __db_lastpgno __P((DB *, char *, DB_FH *, db_pgno_t *));
+ */
+int
+__db_lastpgno(dbp, real_name, fhp, pgno_lastp)
+ DB *dbp;
+ char *real_name;
+ DB_FH *fhp;
+ db_pgno_t *pgno_lastp;
+{
+ DB_ENV *dbenv;
+ db_pgno_t pgno_last;
+ u_int32_t mbytes, bytes;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ if ((ret = __os_ioinfo(dbenv,
+ real_name, fhp, &mbytes, &bytes, NULL)) != 0) {
+ __db_err(dbenv, "%s: %s", real_name, db_strerror(ret));
+ return (ret);
+ }
+
+ /* Page sizes have to be a power-of-two. */
+ if (bytes % dbp->pgsize != 0) {
+ __db_err(dbenv,
+ "%s: file size not a multiple of the pagesize", real_name);
+ return (EINVAL);
+ }
+ pgno_last = mbytes * (MEGABYTE / dbp->pgsize);
+ pgno_last += bytes / dbp->pgsize;
+
+ *pgno_lastp = pgno_last;
+ return (0);
+}
diff --git a/libdb/db/db_upg_opd.c b/libdb/db/db_upg_opd.c
new file mode 100644
index 0000000..aba2028
--- /dev/null
+++ b/libdb/db/db_upg_opd.c
@@ -0,0 +1,352 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+
+static int __db_build_bi __P((DB *, DB_FH *, PAGE *, PAGE *, u_int32_t, int *));
+static int __db_build_ri __P((DB *, DB_FH *, PAGE *, PAGE *, u_int32_t, int *));
+static int __db_up_ovref __P((DB *, DB_FH *, db_pgno_t));
+
+#define GET_PAGE(dbp, fhp, pgno, page) { \
+ if ((ret = __os_seek(dbp->dbenv, \
+ fhp, (dbp)->pgsize, pgno, 0, 0, DB_OS_SEEK_SET)) != 0) \
+ goto err; \
+ if ((ret = __os_read(dbp->dbenv, \
+ fhp, page, (dbp)->pgsize, &n)) != 0) \
+ goto err; \
+}
+#define PUT_PAGE(dbp, fhp, pgno, page) { \
+ if ((ret = __os_seek(dbp->dbenv, \
+ fhp, (dbp)->pgsize, pgno, 0, 0, DB_OS_SEEK_SET)) != 0) \
+ goto err; \
+ if ((ret = __os_write(dbp->dbenv, \
+ fhp, page, (dbp)->pgsize, &n)) != 0) \
+ goto err; \
+}
+
+/*
+ * __db_31_offdup --
+ * Convert 3.0 off-page duplicates to 3.1 off-page duplicates.
+ *
+ * PUBLIC: int __db_31_offdup __P((DB *, char *, DB_FH *, int, db_pgno_t *));
+ */
+int
+__db_31_offdup(dbp, real_name, fhp, sorted, pgnop)
+ DB *dbp;
+ char *real_name;
+ DB_FH *fhp;
+ int sorted;
+ db_pgno_t *pgnop;
+{
+ PAGE *ipage, *page;
+ db_indx_t indx;
+ db_pgno_t cur_cnt, i, next_cnt, pgno, *pgno_cur, pgno_last;
+ db_pgno_t *pgno_next, pgno_max, *tmp;
+ db_recno_t nrecs;
+ size_t n;
+ int level, nomem, ret;
+
+ ipage = page = NULL;
+ pgno_cur = pgno_next = NULL;
+
+ /* Allocate room to hold a page. */
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &page)) != 0)
+ goto err;
+
+ /*
+ * Walk the chain of 3.0 off-page duplicates. Each one is converted
+ * in place to a 3.1 off-page duplicate page. If the duplicates are
+ * sorted, they are converted to a Btree leaf page, otherwise to a
+ * Recno leaf page.
+ */
+ for (nrecs = 0, cur_cnt = pgno_max = 0,
+ pgno = *pgnop; pgno != PGNO_INVALID;) {
+ if (pgno_max == cur_cnt) {
+ pgno_max += 20;
+ if ((ret = __os_realloc(dbp->dbenv, pgno_max *
+ sizeof(db_pgno_t), &pgno_cur)) != 0)
+ goto err;
+ }
+ pgno_cur[cur_cnt++] = pgno;
+
+ GET_PAGE(dbp, fhp, pgno, page);
+ nrecs += NUM_ENT(page);
+ LEVEL(page) = LEAFLEVEL;
+ TYPE(page) = sorted ? P_LDUP : P_LRECNO;
+ /*
+ * !!!
+ * DB didn't zero the LSNs on off-page duplicates pages.
+ */
+ ZERO_LSN(LSN(page));
+ PUT_PAGE(dbp, fhp, pgno, page);
+
+ pgno = NEXT_PGNO(page);
+ }
+
+ /* If we only have a single page, it's easy. */
+ if (cur_cnt > 1) {
+ /*
+ * pgno_cur is the list of pages we just converted. We're
+ * going to walk that list, but we'll need to create a new
+ * list while we do so.
+ */
+ if ((ret = __os_malloc(dbp->dbenv,
+ cur_cnt * sizeof(db_pgno_t), &pgno_next)) != 0)
+ goto err;
+
+ /* Figure out where we can start allocating new pages. */
+ if ((ret = __db_lastpgno(dbp, real_name, fhp, &pgno_last)) != 0)
+ goto err;
+
+ /* Allocate room for an internal page. */
+ if ((ret = __os_malloc(dbp->dbenv,
+ dbp->pgsize, &ipage)) != 0)
+ goto err;
+ PGNO(ipage) = PGNO_INVALID;
+ }
+
+ /*
+ * Repeatedly walk the list of pages, building internal pages, until
+ * there's only one page at a level.
+ */
+ for (level = LEAFLEVEL + 1; cur_cnt > 1; ++level) {
+ for (indx = 0, i = next_cnt = 0; i < cur_cnt;) {
+ if (indx == 0) {
+ P_INIT(ipage, dbp->pgsize, pgno_last,
+ PGNO_INVALID, PGNO_INVALID,
+ level, sorted ? P_IBTREE : P_IRECNO);
+ ZERO_LSN(LSN(ipage));
+
+ pgno_next[next_cnt++] = pgno_last++;
+ }
+
+ GET_PAGE(dbp, fhp, pgno_cur[i], page);
+
+ /*
+ * If the duplicates are sorted, put the first item on
+ * the lower-level page onto a Btree internal page. If
+ * the duplicates are not sorted, create an internal
+ * Recno structure on the page. If either case doesn't
+ * fit, push out the current page and start a new one.
+ */
+ nomem = 0;
+ if (sorted) {
+ if ((ret = __db_build_bi(
+ dbp, fhp, ipage, page, indx, &nomem)) != 0)
+ goto err;
+ } else
+ if ((ret = __db_build_ri(
+ dbp, fhp, ipage, page, indx, &nomem)) != 0)
+ goto err;
+ if (nomem) {
+ indx = 0;
+ PUT_PAGE(dbp, fhp, PGNO(ipage), ipage);
+ } else {
+ ++indx;
+ ++NUM_ENT(ipage);
+ ++i;
+ }
+ }
+
+ /*
+ * Push out the last internal page. Set the top-level record
+ * count if we've reached the top.
+ */
+ if (next_cnt == 1)
+ RE_NREC_SET(ipage, nrecs);
+ PUT_PAGE(dbp, fhp, PGNO(ipage), ipage);
+
+ /* Swap the current and next page number arrays. */
+ cur_cnt = next_cnt;
+ tmp = pgno_cur;
+ pgno_cur = pgno_next;
+ pgno_next = tmp;
+ }
+
+ *pgnop = pgno_cur[0];
+
+err: if (pgno_cur != NULL)
+ __os_free(dbp->dbenv, pgno_cur);
+ if (pgno_next != NULL)
+ __os_free(dbp->dbenv, pgno_next);
+ if (ipage != NULL)
+ __os_free(dbp->dbenv, ipage);
+ if (page != NULL)
+ __os_free(dbp->dbenv, page);
+
+ return (ret);
+}
+
+/*
+ * __db_build_bi --
+ * Build a BINTERNAL entry for a parent page.
+ */
+static int
+__db_build_bi(dbp, fhp, ipage, page, indx, nomemp)
+ DB *dbp;
+ DB_FH *fhp;
+ PAGE *ipage, *page;
+ u_int32_t indx;
+ int *nomemp;
+{
+ BINTERNAL bi, *child_bi;
+ BKEYDATA *child_bk;
+ u_int8_t *p;
+ int ret;
+ db_indx_t *inp;
+
+ inp = P_INP(dbp, ipage);
+ switch (TYPE(page)) {
+ case P_IBTREE:
+ child_bi = GET_BINTERNAL(dbp, page, 0);
+ if (P_FREESPACE(dbp, ipage) < BINTERNAL_PSIZE(child_bi->len)) {
+ *nomemp = 1;
+ return (0);
+ }
+ inp[indx] =
+ HOFFSET(ipage) -= BINTERNAL_SIZE(child_bi->len);
+ p = P_ENTRY(dbp, ipage, indx);
+
+ bi.len = child_bi->len;
+ B_TSET(bi.type, child_bi->type, 0);
+ bi.pgno = PGNO(page);
+ bi.nrecs = __bam_total(dbp, page);
+ memcpy(p, &bi, SSZA(BINTERNAL, data));
+ p += SSZA(BINTERNAL, data);
+ memcpy(p, child_bi->data, child_bi->len);
+
+ /* Increment the overflow ref count. */
+ if (B_TYPE(child_bi->type) == B_OVERFLOW)
+ if ((ret = __db_up_ovref(dbp, fhp,
+ ((BOVERFLOW *)(child_bi->data))->pgno)) != 0)
+ return (ret);
+ break;
+ case P_LDUP:
+ child_bk = GET_BKEYDATA(dbp, page, 0);
+ switch (B_TYPE(child_bk->type)) {
+ case B_KEYDATA:
+ if (P_FREESPACE(dbp, ipage) <
+ BINTERNAL_PSIZE(child_bk->len)) {
+ *nomemp = 1;
+ return (0);
+ }
+ inp[indx] =
+ HOFFSET(ipage) -= BINTERNAL_SIZE(child_bk->len);
+ p = P_ENTRY(dbp, ipage, indx);
+
+ bi.len = child_bk->len;
+ B_TSET(bi.type, child_bk->type, 0);
+ bi.pgno = PGNO(page);
+ bi.nrecs = __bam_total(dbp, page);
+ memcpy(p, &bi, SSZA(BINTERNAL, data));
+ p += SSZA(BINTERNAL, data);
+ memcpy(p, child_bk->data, child_bk->len);
+ break;
+ case B_OVERFLOW:
+ if (P_FREESPACE(dbp, ipage) <
+ BINTERNAL_PSIZE(BOVERFLOW_SIZE)) {
+ *nomemp = 1;
+ return (0);
+ }
+ inp[indx] =
+ HOFFSET(ipage) -= BINTERNAL_SIZE(BOVERFLOW_SIZE);
+ p = P_ENTRY(dbp, ipage, indx);
+
+ bi.len = BOVERFLOW_SIZE;
+ B_TSET(bi.type, child_bk->type, 0);
+ bi.pgno = PGNO(page);
+ bi.nrecs = __bam_total(dbp, page);
+ memcpy(p, &bi, SSZA(BINTERNAL, data));
+ p += SSZA(BINTERNAL, data);
+ memcpy(p, child_bk, BOVERFLOW_SIZE);
+
+ /* Increment the overflow ref count. */
+ if ((ret = __db_up_ovref(dbp, fhp,
+ ((BOVERFLOW *)child_bk)->pgno)) != 0)
+ return (ret);
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, PGNO(page)));
+ }
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, PGNO(page)));
+ }
+
+ return (0);
+}
+
+/*
+ * __db_build_ri --
+ * Build a RINTERNAL entry for an internal parent page.
+ */
+static int
+__db_build_ri(dbp, fhp, ipage, page, indx, nomemp)
+ DB *dbp;
+ DB_FH *fhp;
+ PAGE *ipage, *page;
+ u_int32_t indx;
+ int *nomemp;
+{
+ RINTERNAL ri;
+ db_indx_t *inp;
+
+ COMPQUIET(fhp, NULL);
+ inp = P_INP(dbp, ipage);
+ if (P_FREESPACE(dbp, ipage) < RINTERNAL_PSIZE) {
+ *nomemp = 1;
+ return (0);
+ }
+
+ ri.pgno = PGNO(page);
+ ri.nrecs = __bam_total(dbp, page);
+ inp[indx] = HOFFSET(ipage) -= RINTERNAL_SIZE;
+ memcpy(P_ENTRY(dbp, ipage, indx), &ri, RINTERNAL_SIZE);
+
+ return (0);
+}
+
+/*
+ * __db_up_ovref --
+ * Increment/decrement the reference count on an overflow page.
+ */
+static int
+__db_up_ovref(dbp, fhp, pgno)
+ DB *dbp;
+ DB_FH *fhp;
+ db_pgno_t pgno;
+{
+ PAGE *page;
+ size_t n;
+ int ret;
+
+ /* Allocate room to hold a page. */
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &page)) != 0)
+ return (ret);
+
+ GET_PAGE(dbp, fhp, pgno, page);
+ ++OV_REF(page);
+ PUT_PAGE(dbp, fhp, pgno, page);
+
+err: __os_free(dbp->dbenv, page);
+
+ return (ret);
+}
diff --git a/libdb/db/db_vrfy.c b/libdb/db/db_vrfy.c
new file mode 100644
index 0000000..d05556b
--- /dev/null
+++ b/libdb/db/db_vrfy.c
@@ -0,0 +1,2462 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/db_verify.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+#include "dbinc/qam.h"
+#include "dbinc/txn.h"
+
+static int __db_guesspgsize __P((DB_ENV *, DB_FH *));
+static int __db_is_valid_magicno __P((u_int32_t, DBTYPE *));
+static int __db_is_valid_pagetype __P((u_int32_t));
+static int __db_meta2pgset
+ __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t, DB *));
+static int __db_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t,
+ PAGE *, void *, int (*)(void *, const void *), u_int32_t));
+static int __db_salvage_subdbpg __P((DB *, VRFY_DBINFO *,
+ PAGE *, void *, int (*)(void *, const void *), u_int32_t));
+static int __db_salvage_subdbs
+ __P((DB *, VRFY_DBINFO *, void *,
+ int(*)(void *, const void *), u_int32_t, int *));
+static int __db_salvage_unknowns
+ __P((DB *, VRFY_DBINFO *, void *,
+ int (*)(void *, const void *), u_int32_t));
+static int __db_vrfy_common
+ __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t));
+static int __db_vrfy_freelist __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t));
+static int __db_vrfy_invalid
+ __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t));
+static int __db_vrfy_orderchkonly __P((DB *,
+ VRFY_DBINFO *, const char *, const char *, u_int32_t));
+static int __db_vrfy_pagezero __P((DB *, VRFY_DBINFO *, DB_FH *, u_int32_t));
+static int __db_vrfy_subdbs
+ __P((DB *, VRFY_DBINFO *, const char *, u_int32_t));
+static int __db_vrfy_structure
+ __P((DB *, VRFY_DBINFO *, const char *, db_pgno_t, u_int32_t));
+static int __db_vrfy_walkpages
+ __P((DB *, VRFY_DBINFO *, void *, int (*)(void *, const void *),
+ u_int32_t));
+
+/*
+ * This is the code for DB->verify, the DB database consistency checker.
+ * For now, it checks all subdatabases in a database, and verifies
+ * everything it knows how to (i.e. it's all-or-nothing, and one can't
+ * check only for a subset of possible problems).
+ */
+
+/*
+ * __db_verify --
+ * Walk the entire file page-by-page, either verifying with or without
+ * dumping in db_dump -d format, or DB_SALVAGE-ing whatever key/data
+ * pairs can be found and dumping them in standard (db_load-ready)
+ * dump format.
+ *
+ * (Salvaging isn't really a verification operation, but we put it
+ * here anyway because it requires essentially identical top-level
+ * code.)
+ *
+ * flags may be 0, DB_NOORDERCHK, DB_ORDERCHKONLY, or DB_SALVAGE
+ * (and optionally DB_AGGRESSIVE).
+ *
+ * __db_verify itself is simply a wrapper to __db_verify_internal,
+ * which lets us pass appropriate equivalents to FILE * in from the
+ * non-C APIs.
+ *
+ * PUBLIC: int __db_verify
+ * PUBLIC: __P((DB *, const char *, const char *, FILE *, u_int32_t));
+ */
+int
+__db_verify(dbp, file, database, outfile, flags)
+ DB *dbp;
+ const char *file, *database;
+ FILE *outfile;
+ u_int32_t flags;
+{
+
+ return (__db_verify_internal(dbp,
+ file, database, outfile, __db_verify_callback, flags));
+}
+
+/*
+ * __db_verify_callback --
+ * Callback function for using pr_* functions from C.
+ *
+ * PUBLIC: int __db_verify_callback __P((void *, const void *));
+ */
+int
+__db_verify_callback(handle, str_arg)
+ void *handle;
+ const void *str_arg;
+{
+ char *str;
+ FILE *f;
+
+ str = (char *)str_arg;
+ f = (FILE *)handle;
+
+ if (fprintf(f, "%s", str) != (int)strlen(str))
+ return (EIO);
+
+ return (0);
+}
+
+/*
+ * __db_verify_internal --
+ * Inner meat of __db_verify.
+ *
+ * PUBLIC: int __db_verify_internal __P((DB *, const char *,
+ * PUBLIC: const char *, void *, int (*)(void *, const void *), u_int32_t));
+ */
+int
+__db_verify_internal(dbp_orig, name, subdb, handle, callback, flags)
+ DB *dbp_orig;
+ const char *name, *subdb;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ DB_FH fh, *fhp;
+ VRFY_DBINFO *vdp;
+ int has, ret, isbad;
+ char *real_name;
+
+ dbenv = dbp_orig->dbenv;
+ vdp = NULL;
+ real_name = NULL;
+ ret = isbad = 0;
+
+ memset(&fh, 0, sizeof(fh));
+ fhp = &fh;
+
+ PANIC_CHECK(dbenv);
+ DB_ILLEGAL_AFTER_OPEN(dbp_orig, "verify");
+
+#define OKFLAGS (DB_AGGRESSIVE | DB_NOORDERCHK | DB_ORDERCHKONLY | \
+ DB_PRINTABLE | DB_SALVAGE)
+ if ((ret = __db_fchk(dbenv, "DB->verify", flags, OKFLAGS)) != 0)
+ return (ret);
+
+ /*
+ * DB_SALVAGE is mutually exclusive with the other flags except
+ * DB_AGGRESSIVE and DB_PRINTABLE.
+ */
+ if (LF_ISSET(DB_SALVAGE) &&
+ (flags & ~DB_AGGRESSIVE & ~DB_PRINTABLE) != DB_SALVAGE)
+ return (__db_ferr(dbenv, "__db_verify", 1));
+
+ /* DB_AGGRESSIVE and DB_PRINTABLE are only meaningful when salvaging. */
+ if ((LF_ISSET(DB_AGGRESSIVE) || LF_ISSET(DB_PRINTABLE)) &&
+ !LF_ISSET(DB_SALVAGE))
+ return (__db_ferr(dbenv, "__db_verify", 1));
+
+ if (LF_ISSET(DB_ORDERCHKONLY) && flags != DB_ORDERCHKONLY)
+ return (__db_ferr(dbenv, "__db_verify", 1));
+
+ if (LF_ISSET(DB_ORDERCHKONLY) && subdb == NULL) {
+ __db_err(dbenv, "DB_ORDERCHKONLY requires a database name");
+ return (EINVAL);
+ }
+
+ /*
+ * Forbid working in an environment that uses transactions or
+ * locking; we're going to be looking at the file freely,
+ * and while we're not going to modify it, we aren't obeying
+ * locking conventions either.
+ */
+ if (TXN_ON(dbenv) || LOCKING_ON(dbenv) || LOGGING_ON(dbenv)) {
+ dbp_orig->errx(dbp_orig,
+ "verify may not be used with transactions, logging, or locking");
+ return (EINVAL);
+ /* NOTREACHED */
+ }
+
+ /* Create a dbp to use internally, which we can close at our leisure. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ goto err;
+
+ F_SET(dbp, DB_AM_VERIFYING);
+
+ /* Copy the supplied pagesize, which we use if the file one is bogus. */
+ if (dbp_orig->pgsize >= DB_MIN_PGSIZE &&
+ dbp_orig->pgsize <= DB_MAX_PGSIZE)
+ dbp->set_pagesize(dbp, dbp_orig->pgsize);
+
+ /* Copy the feedback function, if present, and initialize it. */
+ if (!LF_ISSET(DB_SALVAGE) && dbp_orig->db_feedback != NULL) {
+ dbp->set_feedback(dbp, dbp_orig->db_feedback);
+ dbp->db_feedback(dbp, DB_VERIFY, 0);
+ }
+
+ /*
+ * Copy the comparison and hashing functions. Note that
+ * even if the database is not a hash or btree, the respective
+ * internal structures will have been initialized.
+ */
+ if (dbp_orig->dup_compare != NULL &&
+ (ret = dbp->set_dup_compare(dbp, dbp_orig->dup_compare)) != 0)
+ goto err;
+ if (((BTREE *)dbp_orig->bt_internal)->bt_compare != NULL &&
+ (ret = dbp->set_bt_compare(dbp,
+ ((BTREE *)dbp_orig->bt_internal)->bt_compare)) != 0)
+ goto err;
+ if (((HASH *)dbp_orig->h_internal)->h_hash != NULL &&
+ (ret = dbp->set_h_hash(dbp,
+ ((HASH *)dbp_orig->h_internal)->h_hash)) != 0)
+ goto err;
+
+ /*
+ * We don't know how large the cache is, and if the database
+ * in question uses a small page size--which we don't know
+ * yet!--it may be uncomfortably small for the default page
+ * size [#2143]. However, the things we need temporary
+ * databases for in dbinfo are largely tiny, so using a
+ * 1024-byte pagesize is probably not going to be a big hit,
+ * and will make us fit better into small spaces.
+ */
+ if ((ret = __db_vrfy_dbinfo_create(dbenv, 1024, &vdp)) != 0)
+ goto err;
+
+ /*
+ * Note whether the user has requested that we use printable
+ * chars where possible. We won't get here with this flag if
+ * we're not salvaging.
+ */
+ if (LF_ISSET(DB_PRINTABLE))
+ F_SET(vdp, SALVAGE_PRINTABLE);
+
+ /* Find the real name of the file. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
+ goto err;
+
+ /*
+ * Our first order of business is to verify page 0, which is
+ * the metadata page for the master database of subdatabases
+ * or of the only database in the file. We want to do this by hand
+ * rather than just calling __db_open in case it's corrupt--various
+ * things in __db_open might act funny.
+ *
+ * Once we know the metadata page is healthy, I believe that it's
+ * safe to open the database normally and then use the page swapping
+ * code, which makes life easier.
+ */
+ if ((ret = __os_open(dbenv, real_name, DB_OSO_RDONLY, 0444, fhp)) != 0)
+ goto err;
+
+ /* Verify the metadata page 0; set pagesize and type. */
+ if ((ret = __db_vrfy_pagezero(dbp, vdp, fhp, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /*
+ * We can assume at this point that dbp->pagesize and dbp->type are
+ * set correctly, or at least as well as they can be, and that
+ * locking, logging, and txns are not in use. Thus we can trust
+ * the memp code not to look at the page, and thus to be safe
+ * enough to use.
+ *
+ * The dbp is not open, but the file is open in the fhp, and we
+ * cannot assume that __db_open is safe. Call __db_dbenv_setup,
+ * the [safe] part of __db_open that initializes the environment--
+ * and the mpool--manually.
+ */
+ if ((ret = __db_dbenv_setup(dbp, NULL,
+ name, TXN_INVALID, DB_ODDFILESIZE | DB_RDONLY)) != 0)
+ return (ret);
+
+ /* Mark the dbp as opened, so that we correctly handle its close. */
+ F_SET(dbp, DB_AM_OPEN_CALLED);
+
+ /* Find out the page number of the last page in the database. */
+ dbp->mpf->last_pgno(dbp->mpf, &vdp->last_pgno);
+
+ /*
+ * DB_ORDERCHKONLY is a special case; our file consists of
+ * several subdatabases, which use different hash, bt_compare,
+ * and/or dup_compare functions. Consequently, we couldn't verify
+ * sorting and hashing simply by calling DB->verify() on the file.
+ * DB_ORDERCHKONLY allows us to come back and check those things; it
+ * requires a subdatabase, and assumes that everything but that
+ * database's sorting/hashing is correct.
+ */
+ if (LF_ISSET(DB_ORDERCHKONLY)) {
+ ret = __db_vrfy_orderchkonly(dbp, vdp, name, subdb, flags);
+ goto done;
+ }
+
+ /*
+ * When salvaging, we use a db to keep track of whether we've seen a
+ * given overflow or dup page in the course of traversing normal data.
+ * If in the end we have not, we assume its key got lost and print it
+ * with key "UNKNOWN".
+ */
+ if (LF_ISSET(DB_SALVAGE)) {
+ if ((ret = __db_salvage_init(vdp)) != 0)
+ return (ret);
+
+ /*
+ * If we're not being aggressive, attempt to crack subdbs.
+ * "has" will indicate whether the attempt has succeeded
+ * (even in part), meaning that we have some semblance of
+ * subdbs; on the walkpages pass, we print out
+ * whichever data pages we have not seen.
+ */
+ has = 0;
+ if (!LF_ISSET(DB_AGGRESSIVE) && (__db_salvage_subdbs(dbp,
+ vdp, handle, callback, flags, &has)) != 0)
+ isbad = 1;
+
+ /*
+ * If we have subdatabases, we need to signal that if
+ * any keys are found that don't belong to a subdatabase,
+ * they'll need to have an "__OTHER__" subdatabase header
+ * printed first. Flag this. Else, print a header for
+ * the normal, non-subdb database.
+ */
+ if (has == 1)
+ F_SET(vdp, SALVAGE_PRINTHEADER);
+ else if ((ret = __db_prheader(dbp,
+ NULL, 0, 0, handle, callback, vdp, PGNO_BASE_MD)) != 0)
+ goto err;
+ }
+
+ if ((ret =
+ __db_vrfy_walkpages(dbp, vdp, handle, callback, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else if (ret != 0)
+ goto err;
+ }
+
+ /* If we're verifying, verify inter-page structure. */
+ if (!LF_ISSET(DB_SALVAGE) && isbad == 0)
+ if ((ret =
+ __db_vrfy_structure(dbp, vdp, name, 0, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else if (ret != 0)
+ goto err;
+ }
+
+ /*
+ * If we're salvaging, output with key UNKNOWN any overflow or dup pages
+ * we haven't been able to put in context. Then destroy the salvager's
+ * state-saving database.
+ */
+ if (LF_ISSET(DB_SALVAGE)) {
+ if ((ret = __db_salvage_unknowns(dbp,
+ vdp, handle, callback, flags)) != 0)
+ isbad = 1;
+ /* No return value, since there's little we can do. */
+ __db_salvage_destroy(vdp);
+ }
+
+ if (0) {
+ /* Don't try to strerror() DB_VERIFY_FATAL; it's private. */
+err: if (ret == DB_VERIFY_FATAL)
+ ret = DB_VERIFY_BAD;
+ (void)__db_err(dbenv, "%s: %s", name, db_strerror(ret));
+ }
+
+ if (LF_ISSET(DB_SALVAGE) &&
+ (has == 0 || F_ISSET(vdp, SALVAGE_PRINTFOOTER)))
+ (void)__db_prfooter(handle, callback);
+
+ /* Send feedback that we're done. */
+done: if (!LF_ISSET(DB_SALVAGE) && dbp->db_feedback != NULL)
+ dbp->db_feedback(dbp, DB_VERIFY, 100);
+
+ if (F_ISSET(fhp, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, fhp);
+ if (dbp)
+ (void)dbp->close(dbp, 0);
+ if (vdp)
+ (void)__db_vrfy_dbinfo_destroy(dbenv, vdp);
+ if (real_name)
+ __os_free(dbenv, real_name);
+
+ if ((ret == 0 && isbad == 1) || ret == DB_VERIFY_FATAL)
+ ret = DB_VERIFY_BAD;
+
+ return (ret);
+}
+
+/*
+ * __db_vrfy_pagezero --
+ * Verify the master metadata page. Use seek, read, and a local buffer
+ * rather than the DB paging code, for safety.
+ *
+ * Must correctly (or best-guess) set dbp->type and dbp->pagesize.
+ */
+static int
+__db_vrfy_pagezero(dbp, vdp, fhp, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ DB_FH *fhp;
+ u_int32_t flags;
+{
+ DBMETA *meta;
+ DB_ENV *dbenv;
+ VRFY_PAGEINFO *pip;
+ db_pgno_t freelist;
+ size_t nr;
+ int isbad, ret, swapped;
+ u_int8_t mbuf[DBMETASIZE];
+
+ isbad = ret = swapped = 0;
+ freelist = 0;
+ dbenv = dbp->dbenv;
+ meta = (DBMETA *)mbuf;
+ dbp->type = DB_UNKNOWN;
+
+ /*
+ * Seek to the metadata page.
+ * Note that if we're just starting a verification, dbp->pgsize
+ * may be zero; this is okay, as we want page zero anyway and
+ * 0*0 == 0.
+ */
+ if ((ret = __os_seek(dbenv, fhp, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0 ||
+ (ret = __os_read(dbenv, fhp, mbuf, DBMETASIZE, &nr)) != 0) {
+ __db_err(dbenv,
+ "Metadata page %lu cannot be read: %s",
+ (u_long)PGNO_BASE_MD, db_strerror(ret));
+ return (ret);
+ }
+
+ if (nr != DBMETASIZE) {
+ EPRINT((dbenv,
+ "Page %lu: Incomplete metadata page",
+ (u_long)PGNO_BASE_MD));
+ return (DB_VERIFY_FATAL);
+ }
+
+ if ((ret = __db_chk_meta(dbenv, dbp, meta, 1)) != 0) {
+ EPRINT((dbenv,
+ "Page %lu: metadata page corrupted, (u_long)PGNO_BASE_MD"));
+ isbad = 1;
+ if (ret != -1) {
+ EPRINT((dbenv,
+ "Page %lu: could not check metadata page",
+ (u_long)PGNO_BASE_MD));
+ return (DB_VERIFY_FATAL);
+ }
+ }
+
+ /*
+ * Check all of the fields that we can.
+ *
+ * 08-11: Current page number. Must == pgno.
+ * Note that endianness doesn't matter--it's zero.
+ */
+ if (meta->pgno != PGNO_BASE_MD) {
+ isbad = 1;
+ EPRINT((dbenv, "Page %lu: pgno incorrectly set to %lu",
+ (u_long)PGNO_BASE_MD, (u_long)meta->pgno));
+ }
+
+ /* 12-15: Magic number. Must be one of valid set. */
+ if (__db_is_valid_magicno(meta->magic, &dbp->type))
+ swapped = 0;
+ else {
+ M_32_SWAP(meta->magic);
+ if (__db_is_valid_magicno(meta->magic,
+ &dbp->type))
+ swapped = 1;
+ else {
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: bad magic number %lu",
+ (u_long)PGNO_BASE_MD, (u_long)meta->magic));
+ }
+ }
+
+ /*
+ * 16-19: Version. Must be current; for now, we
+ * don't support verification of old versions.
+ */
+ if (swapped)
+ M_32_SWAP(meta->version);
+ if ((dbp->type == DB_BTREE &&
+ (meta->version > DB_BTREEVERSION ||
+ meta->version < DB_BTREEOLDVER)) ||
+ (dbp->type == DB_HASH &&
+ (meta->version > DB_HASHVERSION ||
+ meta->version < DB_HASHOLDVER)) ||
+ (dbp->type == DB_QUEUE &&
+ (meta->version > DB_QAMVERSION ||
+ meta->version < DB_QAMOLDVER))) {
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: unsupported DB version %lu; extraneous errors may result",
+ (u_long)PGNO_BASE_MD, (u_long)meta->version));
+ }
+
+ /*
+ * 20-23: Pagesize. Must be power of two,
+ * greater than 512, and less than 64K.
+ */
+ if (swapped)
+ M_32_SWAP(meta->pagesize);
+ if (IS_VALID_PAGESIZE(meta->pagesize))
+ dbp->pgsize = meta->pagesize;
+ else {
+ isbad = 1;
+ EPRINT((dbenv, "Page %lu: bad page size %lu",
+ (u_long)PGNO_BASE_MD, (u_long)meta->pagesize));
+
+ /*
+ * Now try to settle on a pagesize to use.
+ * If the user-supplied one is reasonable,
+ * use it; else, guess.
+ */
+ if (!IS_VALID_PAGESIZE(dbp->pgsize))
+ dbp->pgsize = __db_guesspgsize(dbenv, fhp);
+ }
+
+ /*
+ * 25: Page type. Must be correct for dbp->type,
+ * which is by now set as well as it can be.
+ */
+ /* Needs no swapping--only one byte! */
+ if ((dbp->type == DB_BTREE && meta->type != P_BTREEMETA) ||
+ (dbp->type == DB_HASH && meta->type != P_HASHMETA) ||
+ (dbp->type == DB_QUEUE && meta->type != P_QAMMETA)) {
+ isbad = 1;
+ EPRINT((dbenv, "Page %lu: bad page type %lu",
+ (u_long)PGNO_BASE_MD, (u_long)meta->type));
+ }
+
+ /*
+ * 28-31: Free list page number.
+ * We'll verify its sensibility when we do inter-page
+ * verification later; for now, just store it.
+ */
+ if (swapped)
+ M_32_SWAP(meta->free);
+ freelist = meta->free;
+
+ /*
+ * Initialize vdp->pages to fit a single pageinfo structure for
+ * this one page. We'll realloc later when we know how many
+ * pages there are.
+ */
+ if ((ret = __db_vrfy_getpageinfo(vdp, PGNO_BASE_MD, &pip)) != 0)
+ return (ret);
+ pip->pgno = PGNO_BASE_MD;
+ pip->type = meta->type;
+
+ /*
+ * Signal that we still have to check the info specific to
+ * a given type of meta page.
+ */
+ F_SET(pip, VRFY_INCOMPLETE);
+
+ pip->free = freelist;
+
+ if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0)
+ return (ret);
+
+ /* Set up the dbp's fileid. We don't use the regular open path. */
+ memcpy(dbp->fileid, meta->uid, DB_FILE_ID_LEN);
+
+ if (swapped == 1)
+ F_SET(dbp, DB_AM_SWAP);
+
+ return (isbad ? DB_VERIFY_BAD : 0);
+}
+
+/*
+ * __db_vrfy_walkpages --
+ * Main loop of the verifier/salvager. Walks through,
+ * page by page, and verifies all pages and/or prints all data pages.
+ */
+static int
+__db_vrfy_walkpages(dbp, vdp, handle, callback, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_pgno_t i;
+ int ret, t_ret, isbad;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ ret = isbad = t_ret = 0;
+
+ if ((ret = __db_fchk(dbenv,
+ "__db_vrfy_walkpages", flags, OKFLAGS)) != 0)
+ return (ret);
+
+ for (i = 0; i <= vdp->last_pgno; i++) {
+ /*
+ * If DB_SALVAGE is set, we inspect our database of
+ * completed pages, and skip any we've already printed in
+ * the subdb pass.
+ */
+ if (LF_ISSET(DB_SALVAGE) && (__db_salvage_isdone(vdp, i) != 0))
+ continue;
+
+ /*
+ * If an individual page get fails, keep going if and only
+ * if we're salvaging.
+ */
+ if ((t_ret = mpf->get(mpf, &i, 0, &h)) != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ if (LF_ISSET(DB_SALVAGE))
+ continue;
+ else
+ return (ret);
+ }
+
+ if (LF_ISSET(DB_SALVAGE)) {
+ /*
+ * We pretty much don't want to quit unless a
+ * bomb hits. May as well return that something
+ * was screwy, however.
+ */
+ if ((t_ret = __db_salvage(dbp,
+ vdp, i, h, handle, callback, flags)) != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ isbad = 1;
+ }
+ } else {
+ /*
+ * If we are not salvaging, and we get any error
+ * other than DB_VERIFY_BAD, return immediately;
+ * it may not be safe to proceed. If we get
+ * DB_VERIFY_BAD, keep going; listing more errors
+ * may make it easier to diagnose problems and
+ * determine the magnitude of the corruption.
+ */
+
+ /*
+ * Verify info common to all page
+ * types.
+ */
+ if (i != PGNO_BASE_MD) {
+ ret = __db_vrfy_common(dbp, vdp, h, i, flags);
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else if (ret != 0)
+ goto err;
+ }
+
+ switch (TYPE(h)) {
+ case P_INVALID:
+ ret = __db_vrfy_invalid(dbp, vdp, h, i, flags);
+ break;
+ case __P_DUPLICATE:
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: old-style duplicate page",
+ (u_long)i));
+ break;
+ case P_HASH:
+ ret = __ham_vrfy(dbp,
+ vdp, h, i, flags);
+ break;
+ case P_IBTREE:
+ case P_IRECNO:
+ case P_LBTREE:
+ case P_LDUP:
+ ret = __bam_vrfy(dbp,
+ vdp, h, i, flags);
+ break;
+ case P_LRECNO:
+ ret = __ram_vrfy_leaf(dbp,
+ vdp, h, i, flags);
+ break;
+ case P_OVERFLOW:
+ ret = __db_vrfy_overflow(dbp,
+ vdp, h, i, flags);
+ break;
+ case P_HASHMETA:
+ ret = __ham_vrfy_meta(dbp,
+ vdp, (HMETA *)h, i, flags);
+ break;
+ case P_BTREEMETA:
+ ret = __bam_vrfy_meta(dbp,
+ vdp, (BTMETA *)h, i, flags);
+ break;
+ case P_QAMMETA:
+ ret = __qam_vrfy_meta(dbp,
+ vdp, (QMETA *)h, i, flags);
+ break;
+ case P_QAMDATA:
+ ret = __qam_vrfy_data(dbp,
+ vdp, (QPAGE *)h, i, flags);
+ break;
+ default:
+ EPRINT((dbenv,
+ "Page %lu: unknown page type %lu",
+ (u_long)i, (u_long)TYPE(h)));
+ isbad = 1;
+ break;
+ }
+
+ /*
+ * Set up error return.
+ */
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else if (ret != 0)
+ goto err;
+
+ /*
+ * Provide feedback to the application about our
+ * progress. The range 0-50% comes from the fact
+ * that this is the first of two passes through the
+ * database (front-to-back, then top-to-bottom).
+ */
+ if (dbp->db_feedback != NULL)
+ dbp->db_feedback(dbp, DB_VERIFY,
+ (i + 1) * 50 / (vdp->last_pgno + 1));
+ }
+
+ /*
+ * Just as with the page get, bail if and only if we're
+ * not salvaging.
+ */
+ if ((t_ret = mpf->put(mpf, h, 0)) != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ if (!LF_ISSET(DB_SALVAGE))
+ return (ret);
+ }
+ }
+
+ if (0) {
+err: if ((t_ret = mpf->put(mpf, h, 0)) != 0)
+ return (ret == 0 ? t_ret : ret);
+ }
+
+ return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __db_vrfy_structure--
+ * After a beginning-to-end walk through the database has been
+ * completed, put together the information that has been collected
+ * to verify the overall database structure.
+ *
+ * Should only be called if we want to do a database verification,
+ * i.e. if DB_SALVAGE is not set.
+ */
+static int
+__db_vrfy_structure(dbp, vdp, dbname, meta_pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ const char *dbname;
+ db_pgno_t meta_pgno;
+ u_int32_t flags;
+{
+ DB *pgset;
+ DB_ENV *dbenv;
+ VRFY_PAGEINFO *pip;
+ db_pgno_t i;
+ int ret, isbad, hassubs, p;
+
+ isbad = 0;
+ pip = NULL;
+ dbenv = dbp->dbenv;
+ pgset = vdp->pgset;
+
+ if ((ret = __db_fchk(dbenv, "DB->verify", flags, OKFLAGS)) != 0)
+ return (ret);
+ if (LF_ISSET(DB_SALVAGE)) {
+ __db_err(dbenv, "__db_vrfy_structure called with DB_SALVAGE");
+ return (EINVAL);
+ }
+
+ /*
+ * Providing feedback here is tricky; in most situations,
+ * we fetch each page one more time, but we do so in a top-down
+ * order that depends on the access method. Worse, we do this
+ * recursively in btree, such that on any call where we're traversing
+ * a subtree we don't know where that subtree is in the whole database;
+ * worse still, any given database may be one of several subdbs.
+ *
+ * The solution is to decrement a counter vdp->pgs_remaining each time
+ * we verify (and call feedback on) a page. We may over- or
+ * under-count, but the structure feedback function will ensure that we
+ * never give a percentage under 50 or over 100. (The first pass
+ * covered the range 0-50%.)
+ */
+ if (dbp->db_feedback != NULL)
+ vdp->pgs_remaining = vdp->last_pgno + 1;
+
+ /*
+ * Call the appropriate function to downwards-traverse the db type.
+ */
+ switch(dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = __bam_vrfy_structure(dbp, vdp, 0, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /*
+ * If we have subdatabases and we know that the database is,
+ * thus far, sound, it's safe to walk the tree of subdatabases.
+ * Do so, and verify the structure of the databases within.
+ */
+ if ((ret = __db_vrfy_getpageinfo(vdp, 0, &pip)) != 0)
+ goto err;
+ hassubs = F_ISSET(pip, VRFY_HAS_SUBDBS) ? 1 : 0;
+ if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0)
+ goto err;
+
+ if (isbad == 0 && hassubs)
+ if ((ret =
+ __db_vrfy_subdbs(dbp, vdp, dbname, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+ break;
+ case DB_HASH:
+ if ((ret = __ham_vrfy_structure(dbp, vdp, 0, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+ break;
+ case DB_QUEUE:
+ if ((ret = __qam_vrfy_structure(dbp, vdp, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ }
+
+ /*
+ * Queue pages may be unreferenced and totally zeroed, if
+ * they're empty; queue doesn't have much structure, so
+ * this is unlikely to be wrong in any troublesome sense.
+ * Skip to "err".
+ */
+ goto err;
+ /* NOTREACHED */
+ default:
+ /* This should only happen if the verifier is somehow broken. */
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ /* NOTREACHED */
+ }
+
+ /* Walk free list. */
+ if ((ret =
+ __db_vrfy_freelist(dbp, vdp, meta_pgno, flags)) == DB_VERIFY_BAD)
+ isbad = 1;
+
+ /*
+ * If structure checks up until now have failed, it's likely that
+ * checking what pages have been missed will result in oodles of
+ * extraneous error messages being EPRINTed. Skip to the end
+ * if this is the case; we're going to be printing at least one
+ * error anyway, and probably all the more salient ones.
+ */
+ if (ret != 0 || isbad == 1)
+ goto err;
+
+ /*
+ * Make sure no page has been missed and that no page is still marked
+ * "all zeroes" (only certain hash pages can be, and they're unmarked
+ * in __ham_vrfy_structure).
+ */
+ for (i = 0; i < vdp->last_pgno + 1; i++) {
+ if ((ret = __db_vrfy_getpageinfo(vdp, i, &pip)) != 0)
+ goto err;
+ if ((ret = __db_vrfy_pgset_get(pgset, i, &p)) != 0)
+ goto err;
+ if (p == 0) {
+ EPRINT((dbenv,
+ "Page %lu: unreferenced page", (u_long)i));
+ isbad = 1;
+ }
+
+ if (F_ISSET(pip, VRFY_IS_ALLZEROES)) {
+ EPRINT((dbenv,
+ "Page %lu: totally zeroed page", (u_long)i));
+ isbad = 1;
+ }
+ if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0)
+ goto err;
+ pip = NULL;
+ }
+
+err: if (pip != NULL)
+ (void)__db_vrfy_putpageinfo(dbenv, vdp, pip);
+
+ return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __db_is_valid_pagetype
+ */
+static int
+__db_is_valid_pagetype(type)
+ u_int32_t type;
+{
+ switch (type) {
+ case P_INVALID: /* Order matches ordinal value. */
+ case P_HASH:
+ case P_IBTREE:
+ case P_IRECNO:
+ case P_LBTREE:
+ case P_LRECNO:
+ case P_OVERFLOW:
+ case P_HASHMETA:
+ case P_BTREEMETA:
+ case P_QAMMETA:
+ case P_QAMDATA:
+ case P_LDUP:
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * __db_is_valid_magicno
+ */
+static int
+__db_is_valid_magicno(magic, typep)
+ u_int32_t magic;
+ DBTYPE *typep;
+{
+ switch (magic) {
+ case DB_BTREEMAGIC:
+ *typep = DB_BTREE;
+ return (1);
+ case DB_HASHMAGIC:
+ *typep = DB_HASH;
+ return (1);
+ case DB_QAMMAGIC:
+ *typep = DB_QUEUE;
+ return (1);
+ }
+ *typep = DB_UNKNOWN;
+ return (0);
+}
+
+/*
+ * __db_vrfy_common --
+ * Verify info common to all page types.
+ */
+static int
+__db_vrfy_common(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ VRFY_PAGEINFO *pip;
+ int ret, t_ret;
+ u_int8_t *p;
+
+ dbenv = dbp->dbenv;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ pip->pgno = pgno;
+ F_CLR(pip, VRFY_IS_ALLZEROES);
+
+ /*
+ * Hash expands the table by leaving some pages between the
+ * old last and the new last totally zeroed. Its pgin function
+ * should fix things, but we might not be using that (e.g. if
+ * we're a subdatabase).
+ *
+ * Queue will create sparse files if sparse record numbers are used.
+ */
+ if (pgno != 0 && PGNO(h) == 0) {
+ for (p = (u_int8_t *)h; p < (u_int8_t *)h + dbp->pgsize; p++)
+ if (*p != 0) {
+ EPRINT((dbenv,
+ "Page %lu: partially zeroed page",
+ (u_long)pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ /*
+ * It's totally zeroed; mark it as a hash, and we'll
+ * check that that makes sense structurally later.
+ * (The queue verification doesn't care, since queues
+ * don't really have much in the way of structure.)
+ */
+ pip->type = P_HASH;
+ F_SET(pip, VRFY_IS_ALLZEROES);
+ ret = 0;
+ goto err; /* well, not really an err. */
+ }
+
+ if (PGNO(h) != pgno) {
+ EPRINT((dbenv, "Page %lu: bad page number %lu",
+ (u_long)pgno, (u_long)h->pgno));
+ ret = DB_VERIFY_BAD;
+ }
+
+ if (!__db_is_valid_pagetype(h->type)) {
+ EPRINT((dbenv, "Page %lu: bad page type %lu",
+ (u_long)pgno, (u_long)h->type));
+ ret = DB_VERIFY_BAD;
+ }
+ pip->type = h->type;
+
+err: if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_vrfy_invalid --
+ * Verify P_INVALID page.
+ * (Yes, there's not much to do here.)
+ */
+static int
+__db_vrfy_invalid(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ VRFY_PAGEINFO *pip;
+ int ret, t_ret;
+
+ dbenv = dbp->dbenv;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+ pip->next_pgno = pip->prev_pgno = 0;
+
+ if (!IS_VALID_PGNO(NEXT_PGNO(h))) {
+ EPRINT((dbenv, "Page %lu: invalid next_pgno %lu",
+ (u_long)pgno, (u_long)NEXT_PGNO(h)));
+ ret = DB_VERIFY_BAD;
+ } else
+ pip->next_pgno = NEXT_PGNO(h);
+
+ if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __db_vrfy_datapage --
+ * Verify elements common to data pages (P_HASH, P_LBTREE,
+ * P_IBTREE, P_IRECNO, P_LRECNO, P_OVERFLOW, P_DUPLICATE)--i.e.,
+ * those defined in the PAGE structure.
+ *
+ * Called from each of the per-page routines, after the
+ * all-page-type-common elements of pip have been verified and filled
+ * in.
+ *
+ * PUBLIC: int __db_vrfy_datapage
+ * PUBLIC: __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t));
+ */
+int
+__db_vrfy_datapage(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ VRFY_PAGEINFO *pip;
+ int isbad, ret, t_ret;
+
+ dbenv = dbp->dbenv;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+ isbad = 0;
+
+ /*
+ * prev_pgno and next_pgno: store for inter-page checks,
+ * verify that they point to actual pages and not to self.
+ *
+ * !!!
+ * Internal btree pages do not maintain these fields (indeed,
+ * they overload them). Skip.
+ */
+ if (TYPE(h) != P_IBTREE && TYPE(h) != P_IRECNO) {
+ if (!IS_VALID_PGNO(PREV_PGNO(h)) || PREV_PGNO(h) == pip->pgno) {
+ isbad = 1;
+ EPRINT((dbenv, "Page %lu: invalid prev_pgno %lu",
+ (u_long)pip->pgno, (u_long)PREV_PGNO(h)));
+ }
+ if (!IS_VALID_PGNO(NEXT_PGNO(h)) || NEXT_PGNO(h) == pip->pgno) {
+ isbad = 1;
+ EPRINT((dbenv, "Page %lu: invalid next_pgno %lu",
+ (u_long)pip->pgno, (u_long)NEXT_PGNO(h)));
+ }
+ pip->prev_pgno = PREV_PGNO(h);
+ pip->next_pgno = NEXT_PGNO(h);
+ }
+
+ /*
+ * Verify the number of entries on the page.
+ * There is no good way to determine if this is accurate; the
+ * best we can do is verify that it's not more than can, in theory,
+ * fit on the page. Then, we make sure there are at least
+ * this many valid elements in inp[], and hope that this catches
+ * most cases.
+ */
+ if (TYPE(h) != P_OVERFLOW) {
+ if (BKEYDATA_PSIZE(0) * NUM_ENT(h) > dbp->pgsize) {
+ isbad = 1;
+ EPRINT((dbenv, "Page %lu: too many entries: %lu",
+ (u_long)pgno, (u_long)NUM_ENT(h)));
+ }
+ pip->entries = NUM_ENT(h);
+ }
+
+ /*
+ * btree level. Should be zero unless we're a btree;
+ * if we are a btree, should be between LEAFLEVEL and MAXBTREELEVEL,
+ * and we need to save it off.
+ */
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ case P_IRECNO:
+ if (LEVEL(h) < LEAFLEVEL + 1 || LEVEL(h) > MAXBTREELEVEL) {
+ isbad = 1;
+ EPRINT((dbenv, "Page %lu: bad btree level %lu",
+ (u_long)pgno, (u_long)LEVEL(h)));
+ }
+ pip->bt_level = LEVEL(h);
+ break;
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ if (LEVEL(h) != LEAFLEVEL) {
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: btree leaf page has incorrect level %lu",
+ (u_long)pgno, (u_long)LEVEL(h)));
+ }
+ break;
+ default:
+ if (LEVEL(h) != 0) {
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: nonzero level %lu in non-btree database",
+ (u_long)pgno, (u_long)LEVEL(h)));
+ }
+ break;
+ }
+
+ /*
+ * Even though inp[] occurs in all PAGEs, we look at it in the
+ * access-method-specific code, since btree and hash treat
+ * item lengths very differently, and one of the most important
+ * things we want to verify is that the data--as specified
+ * by offset and length--cover the right part of the page
+ * without overlaps, gaps, or violations of the page boundary.
+ */
+ if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __db_vrfy_meta--
+ * Verify the access-method common parts of a meta page, using
+ * normal mpool routines.
+ *
+ * PUBLIC: int __db_vrfy_meta
+ * PUBLIC: __P((DB *, VRFY_DBINFO *, DBMETA *, db_pgno_t, u_int32_t));
+ */
+int
+__db_vrfy_meta(dbp, vdp, meta, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ DBMETA *meta;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DBTYPE dbtype, magtype;
+ VRFY_PAGEINFO *pip;
+ int isbad, ret, t_ret;
+
+ isbad = 0;
+ dbenv = dbp->dbenv;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ /* type plausible for a meta page */
+ switch (meta->type) {
+ case P_BTREEMETA:
+ dbtype = DB_BTREE;
+ break;
+ case P_HASHMETA:
+ dbtype = DB_HASH;
+ break;
+ case P_QAMMETA:
+ dbtype = DB_QUEUE;
+ break;
+ default:
+ /* The verifier should never let us get here. */
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /* magic number valid */
+ if (!__db_is_valid_magicno(meta->magic, &magtype)) {
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: invalid magic number", (u_long)pgno));
+ }
+ if (magtype != dbtype) {
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: magic number does not match database type",
+ (u_long)pgno));
+ }
+
+ /* version */
+ if ((dbtype == DB_BTREE &&
+ (meta->version > DB_BTREEVERSION ||
+ meta->version < DB_BTREEOLDVER)) ||
+ (dbtype == DB_HASH &&
+ (meta->version > DB_HASHVERSION ||
+ meta->version < DB_HASHOLDVER)) ||
+ (dbtype == DB_QUEUE &&
+ (meta->version > DB_QAMVERSION ||
+ meta->version < DB_QAMOLDVER))) {
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: unsupported database version %lu; extraneous errors may result",
+ (u_long)pgno, (u_long)meta->version));
+ }
+
+ /* pagesize */
+ if (meta->pagesize != dbp->pgsize) {
+ isbad = 1;
+ EPRINT((dbenv, "Page %lu: invalid pagesize %lu",
+ (u_long)pgno, (u_long)meta->pagesize));
+ }
+
+ /* free list */
+ /*
+ * If this is not the main, master-database meta page, it
+ * should not have a free list.
+ */
+ if (pgno != PGNO_BASE_MD && meta->free != PGNO_INVALID) {
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: nonempty free list on subdatabase metadata page",
+ (u_long)pgno));
+ }
+
+ /* Can correctly be PGNO_INVALID--that's just the end of the list. */
+ if (meta->free != PGNO_INVALID && IS_VALID_PGNO(meta->free))
+ pip->free = meta->free;
+ else if (!IS_VALID_PGNO(meta->free)) {
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: nonsensical free list pgno %lu",
+ (u_long)pgno, (u_long)meta->free));
+ }
+
+ /*
+ * We have now verified the common fields of the metadata page.
+ * Clear the flag that told us they had been incompletely checked.
+ */
+ F_CLR(pip, VRFY_INCOMPLETE);
+
+err: if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __db_vrfy_freelist --
+ * Walk free list, checking off pages and verifying absence of
+ * loops.
+ */
+static int
+__db_vrfy_freelist(dbp, vdp, meta, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t meta;
+ u_int32_t flags;
+{
+ DB *pgset;
+ DB_ENV *dbenv;
+ VRFY_PAGEINFO *pip;
+ db_pgno_t cur_pgno, next_pgno;
+ int p, ret, t_ret;
+
+ pgset = vdp->pgset;
+ DB_ASSERT(pgset != NULL);
+ dbenv = dbp->dbenv;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, meta, &pip)) != 0)
+ return (ret);
+ for (next_pgno = pip->free;
+ next_pgno != PGNO_INVALID; next_pgno = pip->next_pgno) {
+ cur_pgno = pip->pgno;
+ if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0)
+ return (ret);
+
+ /* This shouldn't happen, but just in case. */
+ if (!IS_VALID_PGNO(next_pgno)) {
+ EPRINT((dbenv,
+ "Page %lu: invalid next_pgno %lu on free list page",
+ (u_long)cur_pgno, (u_long)next_pgno));
+ return (DB_VERIFY_BAD);
+ }
+
+ /* Detect cycles. */
+ if ((ret = __db_vrfy_pgset_get(pgset, next_pgno, &p)) != 0)
+ return (ret);
+ if (p != 0) {
+ EPRINT((dbenv,
+ "Page %lu: page %lu encountered a second time on free list",
+ (u_long)cur_pgno, (u_long)next_pgno));
+ return (DB_VERIFY_BAD);
+ }
+ if ((ret = __db_vrfy_pgset_inc(pgset, next_pgno)) != 0)
+ return (ret);
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, next_pgno, &pip)) != 0)
+ return (ret);
+
+ if (pip->type != P_INVALID) {
+ EPRINT((dbenv,
+ "Page %lu: non-invalid page %lu on free list",
+ (u_long)cur_pgno, (u_long)next_pgno));
+ ret = DB_VERIFY_BAD; /* unsafe to continue */
+ break;
+ }
+ }
+
+ if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __db_vrfy_subdbs --
+ * Walk the known-safe master database of subdbs with a cursor,
+ * verifying the structure of each subdatabase we encounter.
+ */
+static int
+__db_vrfy_subdbs(dbp, vdp, dbname, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ const char *dbname;
+ u_int32_t flags;
+{
+ DB *mdbp;
+ DBC *dbc;
+ DBT key, data;
+ DB_ENV *dbenv;
+ VRFY_PAGEINFO *pip;
+ db_pgno_t meta_pgno;
+ int ret, t_ret, isbad;
+ u_int8_t type;
+
+ isbad = 0;
+ dbc = NULL;
+ dbenv = dbp->dbenv;
+
+ if ((ret =
+ __db_master_open(dbp, NULL, dbname, DB_RDONLY, 0, &mdbp)) != 0)
+ return (ret);
+
+ if ((ret = __db_icursor(mdbp,
+ NULL, DB_BTREE, PGNO_INVALID, 0, DB_LOCK_INVALIDID, &dbc)) != 0)
+ goto err;
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ while ((ret = dbc->c_get(dbc, &key, &data, DB_NEXT)) == 0) {
+ if (data.size != sizeof(db_pgno_t)) {
+ EPRINT((dbenv,
+ "Subdatabase entry not page-number size"));
+ isbad = 1;
+ goto err;
+ }
+ memcpy(&meta_pgno, data.data, data.size);
+ /*
+ * Subdatabase meta pgnos are stored in network byte
+ * order for cross-endian compatibility. Swap if appropriate.
+ */
+ DB_NTOHL(&meta_pgno);
+ if (meta_pgno == PGNO_INVALID || meta_pgno > vdp->last_pgno) {
+ EPRINT((dbenv,
+ "Subdatabase entry references invalid page %lu",
+ (u_long)meta_pgno));
+ isbad = 1;
+ goto err;
+ }
+ if ((ret = __db_vrfy_getpageinfo(vdp, meta_pgno, &pip)) != 0)
+ goto err;
+ type = pip->type;
+ if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0)
+ goto err;
+ switch (type) {
+ case P_BTREEMETA:
+ if ((ret = __bam_vrfy_structure(
+ dbp, vdp, meta_pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+ break;
+ case P_HASHMETA:
+ if ((ret = __ham_vrfy_structure(
+ dbp, vdp, meta_pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+ break;
+ case P_QAMMETA:
+ default:
+ EPRINT((dbenv,
+ "Subdatabase entry references page %lu of invalid type %lu",
+ (u_long)meta_pgno, (u_long)type));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ /* NOTREACHED */
+ }
+ }
+
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+
+err: if (dbc != NULL && (t_ret = __db_c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret = mdbp->close(mdbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __db_vrfy_struct_feedback --
+ * Provide feedback during top-down database structure traversal.
+ * (See comment at the beginning of __db_vrfy_structure.)
+ *
+ * PUBLIC: void __db_vrfy_struct_feedback __P((DB *, VRFY_DBINFO *));
+ */
+void
+__db_vrfy_struct_feedback(dbp, vdp)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+{
+ int progress;
+
+ if (dbp->db_feedback == NULL)
+ return;
+
+ if (vdp->pgs_remaining > 0)
+ vdp->pgs_remaining--;
+
+ /* Don't allow a feedback call of 100 until we're really done. */
+ progress = 100 - (vdp->pgs_remaining * 50 / (vdp->last_pgno + 1));
+ dbp->db_feedback(dbp, DB_VERIFY, progress == 100 ? 99 : progress);
+}
+
+/*
+ * __db_vrfy_orderchkonly --
+ * Do an sort-order/hashing check on a known-otherwise-good subdb.
+ */
+static int
+__db_vrfy_orderchkonly(dbp, vdp, name, subdb, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ const char *name, *subdb;
+ u_int32_t flags;
+{
+ BTMETA *btmeta;
+ DB *mdbp, *pgset;
+ DBC *pgsc;
+ DBT key, data;
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ HASH *h_internal;
+ HMETA *hmeta;
+ PAGE *h, *currpg;
+ db_pgno_t meta_pgno, p, pgno;
+ u_int32_t bucket;
+ int t_ret, ret;
+
+ pgset = NULL;
+ pgsc = NULL;
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ currpg = h = NULL;
+
+ LF_CLR(DB_NOORDERCHK);
+
+ /* Open the master database and get the meta_pgno for the subdb. */
+ if ((ret = db_create(&mdbp, NULL, 0)) != 0)
+ return (ret);
+ if ((ret = __db_master_open(dbp, NULL, name, DB_RDONLY, 0, &mdbp)) != 0)
+ goto err;
+
+ memset(&key, 0, sizeof(key));
+ key.data = (void *)subdb;
+ key.size = (u_int32_t)strlen(subdb);
+ memset(&data, 0, sizeof(data));
+ if ((ret = mdbp->get(mdbp, NULL, &key, &data, 0)) != 0)
+ goto err;
+
+ if (data.size != sizeof(db_pgno_t)) {
+ EPRINT((dbenv, "Subdatabase entry of invalid size"));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ memcpy(&meta_pgno, data.data, data.size);
+
+ /*
+ * Subdatabase meta pgnos are stored in network byte
+ * order for cross-endian compatibility. Swap if appropriate.
+ */
+ DB_NTOHL(&meta_pgno);
+
+ if ((ret = mpf->get(mpf, &meta_pgno, 0, &h)) != 0)
+ goto err;
+
+ if ((ret = __db_vrfy_pgset(dbenv, dbp->pgsize, &pgset)) != 0)
+ goto err;
+
+ switch (TYPE(h)) {
+ case P_BTREEMETA:
+ btmeta = (BTMETA *)h;
+ if (F_ISSET(&btmeta->dbmeta, BTM_RECNO)) {
+ /* Recnos have no order to check. */
+ ret = 0;
+ goto err;
+ }
+ if ((ret =
+ __db_meta2pgset(dbp, vdp, meta_pgno, flags, pgset)) != 0)
+ goto err;
+ if ((ret = pgset->cursor(pgset, NULL, &pgsc, 0)) != 0)
+ goto err;
+ while ((ret = __db_vrfy_pgset_next(pgsc, &p)) == 0) {
+ if ((ret = mpf->get(mpf, &p, 0, &currpg)) != 0)
+ goto err;
+ if ((ret = __bam_vrfy_itemorder(dbp,
+ NULL, currpg, p, NUM_ENT(currpg), 1,
+ F_ISSET(&btmeta->dbmeta, BTM_DUP), flags)) != 0)
+ goto err;
+ if ((ret = mpf->put(mpf, currpg, 0)) != 0)
+ goto err;
+ currpg = NULL;
+ }
+
+ /*
+ * The normal exit condition for the loop above is DB_NOTFOUND.
+ * If we see that, zero it and continue on to cleanup.
+ * Otherwise, it's a real error and will be returned.
+ */
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ break;
+ case P_HASHMETA:
+ hmeta = (HMETA *)h;
+ h_internal = (HASH *)dbp->h_internal;
+ /*
+ * Make sure h_charkey is right.
+ */
+ if (h_internal == NULL) {
+ EPRINT((dbenv,
+ "Page %lu: DB->h_internal field is NULL",
+ (u_long)meta_pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ if (h_internal->h_hash == NULL)
+ h_internal->h_hash = hmeta->dbmeta.version < 5
+ ? __ham_func4 : __ham_func5;
+ if (hmeta->h_charkey !=
+ h_internal->h_hash(dbp, CHARKEY, sizeof(CHARKEY))) {
+ EPRINT((dbenv,
+ "Page %lu: incorrect hash function for database",
+ (u_long)meta_pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ /*
+ * Foreach bucket, verify hashing on each page in the
+ * corresponding chain of pages.
+ */
+ for (bucket = 0; bucket <= hmeta->max_bucket; bucket++) {
+ pgno = BS_TO_PAGE(bucket, hmeta->spares);
+ while (pgno != PGNO_INVALID) {
+ if ((ret = mpf->get(mpf,
+ &pgno, 0, &currpg)) != 0)
+ goto err;
+ if ((ret = __ham_vrfy_hashing(dbp,
+ NUM_ENT(currpg), hmeta, bucket, pgno,
+ flags, h_internal->h_hash)) != 0)
+ goto err;
+ pgno = NEXT_PGNO(currpg);
+ if ((ret = mpf->put(mpf, currpg, 0)) != 0)
+ goto err;
+ currpg = NULL;
+ }
+ }
+ break;
+ default:
+ EPRINT((dbenv, "Page %lu: database metapage of bad type %lu",
+ (u_long)meta_pgno, (u_long)TYPE(h)));
+ ret = DB_VERIFY_BAD;
+ break;
+ }
+
+err: if (pgsc != NULL && (t_ret = pgsc->c_close(pgsc)) != 0 && ret == 0)
+ ret = t_ret;
+ if (pgset != NULL &&
+ (t_ret = pgset->close(pgset, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (h != NULL && (t_ret = mpf->put(mpf, h, 0)) != 0)
+ ret = t_ret;
+ if (currpg != NULL && (t_ret = mpf->put(mpf, currpg, 0)) != 0)
+ ret = t_ret;
+ if ((t_ret = mdbp->close(mdbp, 0)) != 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __db_salvage --
+ * Walk through a page, salvaging all likely or plausible (w/
+ * DB_AGGRESSIVE) key/data pairs.
+ */
+static int
+__db_salvage(dbp, vdp, pgno, h, handle, callback, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ PAGE *h;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ DB_ASSERT(LF_ISSET(DB_SALVAGE));
+
+ /* If we got this page in the subdb pass, we can safely skip it. */
+ if (__db_salvage_isdone(vdp, pgno))
+ return (0);
+
+ switch (TYPE(h)) {
+ case P_HASH:
+ return (__ham_salvage(dbp,
+ vdp, pgno, h, handle, callback, flags));
+ /* NOTREACHED */
+ case P_LBTREE:
+ return (__bam_salvage(dbp,
+ vdp, pgno, P_LBTREE, h, handle, callback, NULL, flags));
+ /* NOTREACHED */
+ case P_LDUP:
+ return (__db_salvage_markneeded(vdp, pgno, SALVAGE_LDUP));
+ /* NOTREACHED */
+ case P_OVERFLOW:
+ return (__db_salvage_markneeded(vdp, pgno, SALVAGE_OVERFLOW));
+ /* NOTREACHED */
+ case P_LRECNO:
+ /*
+ * Recnos are tricky -- they may represent dup pages, or
+ * they may be subdatabase/regular database pages in their
+ * own right. If the former, they need to be printed with a
+ * key, preferably when we hit the corresponding datum in
+ * a btree/hash page. If the latter, there is no key.
+ *
+ * If a database is sufficiently frotzed, we're not going
+ * to be able to get this right, so we best-guess: just
+ * mark it needed now, and if we're really a normal recno
+ * database page, the "unknowns" pass will pick us up.
+ */
+ return (__db_salvage_markneeded(vdp, pgno, SALVAGE_LRECNO));
+ /* NOTREACHED */
+ case P_IBTREE:
+ case P_INVALID:
+ case P_IRECNO:
+ case __P_DUPLICATE:
+ default:
+ /* XXX: Should we be more aggressive here? */
+ break;
+ }
+ return (0);
+}
+
+/*
+ * __db_salvage_unknowns --
+ * Walk through the salvager database, printing with key "UNKNOWN"
+ * any pages we haven't dealt with.
+ */
+static int
+__db_salvage_unknowns(dbp, vdp, handle, callback, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ DBT unkdbt, key, *dbt;
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t pgtype;
+ int ret, err_ret;
+ void *ovflbuf;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+
+ memset(&unkdbt, 0, sizeof(DBT));
+ unkdbt.size = (u_int32_t)strlen("UNKNOWN") + 1;
+ unkdbt.data = "UNKNOWN";
+
+ if ((ret = __os_malloc(dbenv, dbp->pgsize, &ovflbuf)) != 0)
+ return (ret);
+
+ err_ret = 0;
+ while ((ret = __db_salvage_getnext(vdp, &pgno, &pgtype)) == 0) {
+ dbt = NULL;
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0) {
+ err_ret = ret;
+ continue;
+ }
+
+ switch (pgtype) {
+ case SALVAGE_LDUP:
+ case SALVAGE_LRECNODUP:
+ dbt = &unkdbt;
+ /* FALLTHROUGH */
+ case SALVAGE_LBTREE:
+ case SALVAGE_LRECNO:
+ if ((ret = __bam_salvage(dbp, vdp, pgno, pgtype,
+ h, handle, callback, dbt, flags)) != 0)
+ err_ret = ret;
+ break;
+ case SALVAGE_OVERFLOW:
+ /*
+ * XXX:
+ * This may generate multiple "UNKNOWN" keys in
+ * a database with no dups. What to do?
+ */
+ if ((ret = __db_safe_goff(dbp,
+ vdp, pgno, &key, &ovflbuf, flags)) != 0 ||
+ (ret = __db_prdbt(&key,
+ 0, " ", handle, callback, 0, vdp)) != 0 ||
+ (ret = __db_prdbt(&unkdbt,
+ 0, " ", handle, callback, 0, vdp)) != 0)
+ err_ret = ret;
+ break;
+ case SALVAGE_HASH:
+ if ((ret = __ham_salvage(
+ dbp, vdp, pgno, h, handle, callback, flags)) != 0)
+ err_ret = ret;
+ break;
+ case SALVAGE_INVALID:
+ case SALVAGE_IGNORE:
+ default:
+ /*
+ * Shouldn't happen, but if it does, just do what the
+ * nice man says.
+ */
+ DB_ASSERT(0);
+ break;
+ }
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ err_ret = ret;
+ }
+
+ __os_free(dbenv, ovflbuf);
+
+ if (err_ret != 0 && ret == 0)
+ ret = err_ret;
+
+ return (ret == DB_NOTFOUND ? 0 : ret);
+}
+
+/*
+ * Offset of the ith inp array entry, which we can compare to the offset
+ * the entry stores.
+ */
+#define INP_OFFSET(dbp, h, i) \
+ ((db_indx_t)((u_int8_t *)((P_INP(dbp,(h))) + (i)) - (u_int8_t *)(h)))
+
+/*
+ * __db_vrfy_inpitem --
+ * Verify that a single entry in the inp array is sane, and update
+ * the high water mark and current item offset. (The former of these is
+ * used for state information between calls, and is required; it must
+ * be initialized to the pagesize before the first call.)
+ *
+ * Returns DB_VERIFY_FATAL if inp has collided with the data,
+ * since verification can't continue from there; returns DB_VERIFY_BAD
+ * if anything else is wrong.
+ *
+ * PUBLIC: int __db_vrfy_inpitem __P((DB *, PAGE *,
+ * PUBLIC: db_pgno_t, u_int32_t, int, u_int32_t, u_int32_t *, u_int32_t *));
+ */
+int
+__db_vrfy_inpitem(dbp, h, pgno, i, is_btree, flags, himarkp, offsetp)
+ DB *dbp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t i;
+ int is_btree;
+ u_int32_t flags, *himarkp, *offsetp;
+{
+ BKEYDATA *bk;
+ DB_ENV *dbenv;
+ db_indx_t *inp, offset, len;
+
+ dbenv = dbp->dbenv;
+
+ DB_ASSERT(himarkp != NULL);
+ inp = P_INP(dbp, h);
+
+ /*
+ * Check that the inp array, which grows from the beginning of the
+ * page forward, has not collided with the data, which grow from the
+ * end of the page backward.
+ */
+ if (inp + i >= (db_indx_t *)((u_int8_t *)h + *himarkp)) {
+ /* We've collided with the data. We need to bail. */
+ EPRINT((dbenv, "Page %lu: entries listing %lu overlaps data",
+ (u_long)pgno, (u_long)i));
+ return (DB_VERIFY_FATAL);
+ }
+
+ offset = inp[i];
+
+ /*
+ * Check that the item offset is reasonable: it points somewhere
+ * after the inp array and before the end of the page.
+ */
+ if (offset <= INP_OFFSET(dbp, h, i) || offset > dbp->pgsize) {
+ EPRINT((dbenv, "Page %lu: bad offset %lu at page index %lu",
+ (u_long)pgno, (u_long)offset, (u_long)i));
+ return (DB_VERIFY_BAD);
+ }
+
+ /* Update the high-water mark (what HOFFSET should be) */
+ if (offset < *himarkp)
+ *himarkp = offset;
+
+ if (is_btree) {
+ /*
+ * Check that the item length remains on-page.
+ */
+ bk = GET_BKEYDATA(dbp, h, i);
+
+ /*
+ * We need to verify the type of the item here;
+ * we can't simply assume that it will be one of the
+ * expected three. If it's not a recognizable type,
+ * it can't be considered to have a verifiable
+ * length, so it's not possible to certify it as safe.
+ */
+ switch (B_TYPE(bk->type)) {
+ case B_KEYDATA:
+ len = bk->len;
+ break;
+ case B_DUPLICATE:
+ case B_OVERFLOW:
+ len = BOVERFLOW_SIZE;
+ break;
+ default:
+ EPRINT((dbenv,
+ "Page %lu: item %lu of unrecognizable type",
+ (u_long)pgno, (u_long)i));
+ return (DB_VERIFY_BAD);
+ }
+
+ if ((size_t)(offset + len) > dbp->pgsize) {
+ EPRINT((dbenv,
+ "Page %lu: item %lu extends past page boundary",
+ (u_long)pgno, (u_long)i));
+ return (DB_VERIFY_BAD);
+ }
+ }
+
+ if (offsetp != NULL)
+ *offsetp = offset;
+ return (0);
+}
+
+/*
+ * __db_vrfy_duptype--
+ * Given a page number and a set of flags to __bam_vrfy_subtree,
+ * verify that the dup tree type is correct--i.e., it's a recno
+ * if DUPSORT is not set and a btree if it is.
+ *
+ * PUBLIC: int __db_vrfy_duptype
+ * PUBLIC: __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t));
+ */
+int
+__db_vrfy_duptype(dbp, vdp, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ VRFY_PAGEINFO *pip;
+ int ret, isbad;
+
+ dbenv = dbp->dbenv;
+ isbad = 0;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ switch (pip->type) {
+ case P_IBTREE:
+ case P_LDUP:
+ if (!LF_ISSET(ST_DUPSORT)) {
+ EPRINT((dbenv,
+ "Page %lu: sorted duplicate set in unsorted-dup database",
+ (u_long)pgno));
+ isbad = 1;
+ }
+ break;
+ case P_IRECNO:
+ case P_LRECNO:
+ if (LF_ISSET(ST_DUPSORT)) {
+ EPRINT((dbenv,
+ "Page %lu: unsorted duplicate set in sorted-dup database",
+ (u_long)pgno));
+ isbad = 1;
+ }
+ break;
+ default:
+ /*
+ * If the page is entirely zeroed, its pip->type will be a lie
+ * (we assumed it was a hash page, as they're allowed to be
+ * zeroed); handle this case specially.
+ */
+ if (F_ISSET(pip, VRFY_IS_ALLZEROES))
+ ZEROPG_ERR_PRINT(dbenv, pgno, "duplicate page");
+ else
+ EPRINT((dbenv,
+ "Page %lu: duplicate page of inappropriate type %lu",
+ (u_long)pgno, (u_long)pip->type));
+ isbad = 1;
+ break;
+ }
+
+ if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0)
+ return (ret);
+ return (isbad == 1 ? DB_VERIFY_BAD : 0);
+}
+
+/*
+ * __db_salvage_duptree --
+ * Attempt to salvage a given duplicate tree, given its alleged root.
+ *
+ * The key that corresponds to this dup set has been passed to us
+ * in DBT *key. Because data items follow keys, though, it has been
+ * printed once already.
+ *
+ * The basic idea here is that pgno ought to be a P_LDUP, a P_LRECNO, a
+ * P_IBTREE, or a P_IRECNO. If it's an internal page, use the verifier
+ * functions to make sure it's safe; if it's not, we simply bail and the
+ * data will have to be printed with no key later on. if it is safe,
+ * recurse on each of its children.
+ *
+ * Whether or not it's safe, if it's a leaf page, __bam_salvage it.
+ *
+ * At all times, use the DB hanging off vdp to mark and check what we've
+ * done, so each page gets printed exactly once and we don't get caught
+ * in any cycles.
+ *
+ * PUBLIC: int __db_salvage_duptree __P((DB *, VRFY_DBINFO *, db_pgno_t,
+ * PUBLIC: DBT *, void *, int (*)(void *, const void *), u_int32_t));
+ */
+int
+__db_salvage_duptree(dbp, vdp, pgno, key, handle, callback, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ DBT *key;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ int ret, t_ret;
+
+ mpf = dbp->mpf;
+
+ if (pgno == PGNO_INVALID || !IS_VALID_PGNO(pgno))
+ return (DB_VERIFY_BAD);
+
+ /* We have a plausible page. Try it. */
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ return (ret);
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ case P_IRECNO:
+ if ((ret = __db_vrfy_common(dbp, vdp, h, pgno, flags)) != 0)
+ goto err;
+ if ((ret = __bam_vrfy(dbp,
+ vdp, h, pgno, flags | DB_NOORDERCHK)) != 0 ||
+ (ret = __db_salvage_markdone(vdp, pgno)) != 0)
+ goto err;
+ /*
+ * We have a known-healthy internal page. Walk it.
+ */
+ if ((ret = __bam_salvage_walkdupint(dbp, vdp, h, key,
+ handle, callback, flags)) != 0)
+ goto err;
+ break;
+ case P_LRECNO:
+ case P_LDUP:
+ if ((ret = __bam_salvage(dbp,
+ vdp, pgno, TYPE(h), h, handle, callback, key, flags)) != 0)
+ goto err;
+ break;
+ default:
+ ret = DB_VERIFY_BAD;
+ goto err;
+ /* NOTREACHED */
+ }
+
+err: if ((t_ret = mpf->put(mpf, h, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __db_salvage_subdbs --
+ * Check and see if this database has subdbs; if so, try to salvage
+ * them independently.
+ */
+static int
+__db_salvage_subdbs(dbp, vdp, handle, callback, flags, hassubsp)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+ int *hassubsp;
+{
+ BTMETA *btmeta;
+ DB *pgset;
+ DBC *pgsc;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_pgno_t p, meta_pgno;
+ int ret, err_ret;
+
+ pgset = NULL;
+ pgsc = NULL;
+ mpf = dbp->mpf;
+ err_ret = 0;
+
+ meta_pgno = PGNO_BASE_MD;
+ if ((ret = mpf->get(mpf, &meta_pgno, 0, &h)) != 0)
+ return (ret);
+
+ if (TYPE(h) == P_BTREEMETA)
+ btmeta = (BTMETA *)h;
+ else {
+ /* Not a btree metadata, ergo no subdbs, so just return. */
+ ret = 0;
+ goto err;
+ }
+
+ /* If it's not a safe page, bail on the attempt. */
+ if ((ret = __db_vrfy_common(dbp, vdp, h, PGNO_BASE_MD, flags)) != 0 ||
+ (ret = __bam_vrfy_meta(dbp, vdp, btmeta, PGNO_BASE_MD, flags)) != 0)
+ goto err;
+
+ if (!F_ISSET(&btmeta->dbmeta, BTM_SUBDB)) {
+ /* No subdbs, just return. */
+ ret = 0;
+ goto err;
+ }
+
+ /* We think we've got subdbs. Mark it so. */
+ *hassubsp = 1;
+
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ return (ret);
+
+ /*
+ * We have subdbs. Try to crack them.
+ *
+ * To do so, get a set of leaf pages in the master
+ * database, and then walk each of the valid ones, salvaging
+ * subdbs as we go. If any prove invalid, just drop them; we'll
+ * pick them up on a later pass.
+ */
+ if ((ret = __db_vrfy_pgset(dbp->dbenv, dbp->pgsize, &pgset)) != 0)
+ return (ret);
+ if ((ret =
+ __db_meta2pgset(dbp, vdp, PGNO_BASE_MD, flags, pgset)) != 0)
+ goto err;
+
+ if ((ret = pgset->cursor(pgset, NULL, &pgsc, 0)) != 0)
+ goto err;
+ while ((ret = __db_vrfy_pgset_next(pgsc, &p)) == 0) {
+ if ((ret = mpf->get(mpf, &p, 0, &h)) != 0) {
+ err_ret = ret;
+ continue;
+ }
+ if ((ret = __db_vrfy_common(dbp, vdp, h, p, flags)) != 0 ||
+ (ret = __bam_vrfy(dbp,
+ vdp, h, p, flags | DB_NOORDERCHK)) != 0)
+ goto nextpg;
+ if (TYPE(h) != P_LBTREE)
+ goto nextpg;
+ else if ((ret = __db_salvage_subdbpg(
+ dbp, vdp, h, handle, callback, flags)) != 0)
+ err_ret = ret;
+nextpg: if ((ret = mpf->put(mpf, h, 0)) != 0)
+ err_ret = ret;
+ }
+
+ if (ret != DB_NOTFOUND)
+ goto err;
+ if ((ret = pgsc->c_close(pgsc)) != 0)
+ goto err;
+
+ ret = pgset->close(pgset, 0);
+ return ((ret == 0 && err_ret != 0) ? err_ret : ret);
+
+ /* NOTREACHED */
+
+err: if (pgsc != NULL)
+ (void)pgsc->c_close(pgsc);
+ if (pgset != NULL)
+ (void)pgset->close(pgset, 0);
+ (void)mpf->put(mpf, h, 0);
+ return (ret);
+}
+
+/*
+ * __db_salvage_subdbpg --
+ * Given a known-good leaf page in the master database, salvage all
+ * leaf pages corresponding to each subdb.
+ */
+static int
+__db_salvage_subdbpg(dbp, vdp, master, handle, callback, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *master;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ BKEYDATA *bkkey, *bkdata;
+ BOVERFLOW *bo;
+ DB *pgset;
+ DBC *pgsc;
+ DBT key;
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ PAGE *subpg;
+ db_indx_t i;
+ db_pgno_t meta_pgno, p;
+ int ret, err_ret, t_ret;
+ char *subdbname;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ ret = err_ret = 0;
+ subdbname = NULL;
+
+ if ((ret = __db_vrfy_pgset(dbenv, dbp->pgsize, &pgset)) != 0)
+ return (ret);
+
+ /*
+ * For each entry, get and salvage the set of pages
+ * corresponding to that entry.
+ */
+ for (i = 0; i < NUM_ENT(master); i += P_INDX) {
+ bkkey = GET_BKEYDATA(dbp, master, i);
+ bkdata = GET_BKEYDATA(dbp, master, i + O_INDX);
+
+ /* Get the subdatabase name. */
+ if (B_TYPE(bkkey->type) == B_OVERFLOW) {
+ /*
+ * We can, in principle anyway, have a subdb
+ * name so long it overflows. Ick.
+ */
+ bo = (BOVERFLOW *)bkkey;
+ if ((ret = __db_safe_goff(dbp, vdp, bo->pgno, &key,
+ (void **)&subdbname, flags)) != 0) {
+ err_ret = DB_VERIFY_BAD;
+ continue;
+ }
+
+ /* Nul-terminate it. */
+ if ((ret = __os_realloc(dbenv,
+ key.size + 1, &subdbname)) != 0)
+ goto err;
+ subdbname[key.size] = '\0';
+ } else if (B_TYPE(bkkey->type == B_KEYDATA)) {
+ if ((ret = __os_realloc(dbenv,
+ bkkey->len + 1, &subdbname)) != 0)
+ goto err;
+ memcpy(subdbname, bkkey->data, bkkey->len);
+ subdbname[bkkey->len] = '\0';
+ }
+
+ /* Get the corresponding pgno. */
+ if (bkdata->len != sizeof(db_pgno_t)) {
+ err_ret = DB_VERIFY_BAD;
+ continue;
+ }
+ memcpy(&meta_pgno, bkdata->data, sizeof(db_pgno_t));
+
+ /*
+ * Subdatabase meta pgnos are stored in network byte
+ * order for cross-endian compatibility. Swap if appropriate.
+ */
+ DB_NTOHL(&meta_pgno);
+
+ /* If we can't get the subdb meta page, just skip the subdb. */
+ if (!IS_VALID_PGNO(meta_pgno) ||
+ (ret = mpf->get(mpf, &meta_pgno, 0, &subpg)) != 0) {
+ err_ret = ret;
+ continue;
+ }
+
+ /*
+ * Verify the subdatabase meta page. This has two functions.
+ * First, if it's bad, we have no choice but to skip the subdb
+ * and let the pages just get printed on a later pass. Second,
+ * the access-method-specific meta verification routines record
+ * the various state info (such as the presence of dups)
+ * that we need for __db_prheader().
+ */
+ if ((ret =
+ __db_vrfy_common(dbp, vdp, subpg, meta_pgno, flags)) != 0) {
+ err_ret = ret;
+ (void)mpf->put(mpf, subpg, 0);
+ continue;
+ }
+ switch (TYPE(subpg)) {
+ case P_BTREEMETA:
+ if ((ret = __bam_vrfy_meta(dbp,
+ vdp, (BTMETA *)subpg, meta_pgno, flags)) != 0) {
+ err_ret = ret;
+ (void)mpf->put(mpf, subpg, 0);
+ continue;
+ }
+ break;
+ case P_HASHMETA:
+ if ((ret = __ham_vrfy_meta(dbp,
+ vdp, (HMETA *)subpg, meta_pgno, flags)) != 0) {
+ err_ret = ret;
+ (void)mpf->put(mpf, subpg, 0);
+ continue;
+ }
+ break;
+ default:
+ /* This isn't an appropriate page; skip this subdb. */
+ err_ret = DB_VERIFY_BAD;
+ continue;
+ /* NOTREACHED */
+ }
+
+ if ((ret = mpf->put(mpf, subpg, 0)) != 0) {
+ err_ret = ret;
+ continue;
+ }
+
+ /* Print a subdatabase header. */
+ if ((ret = __db_prheader(dbp,
+ subdbname, 0, 0, handle, callback, vdp, meta_pgno)) != 0)
+ goto err;
+
+ if ((ret = __db_meta2pgset(dbp, vdp, meta_pgno,
+ flags, pgset)) != 0) {
+ err_ret = ret;
+ continue;
+ }
+
+ if ((ret = pgset->cursor(pgset, NULL, &pgsc, 0)) != 0)
+ goto err;
+ while ((ret = __db_vrfy_pgset_next(pgsc, &p)) == 0) {
+ if ((ret = mpf->get(mpf, &p, 0, &subpg)) != 0) {
+ err_ret = ret;
+ continue;
+ }
+ if ((ret = __db_salvage(dbp, vdp, p, subpg,
+ handle, callback, flags)) != 0)
+ err_ret = ret;
+ if ((ret = mpf->put(mpf, subpg, 0)) != 0)
+ err_ret = ret;
+ }
+
+ if (ret != DB_NOTFOUND)
+ goto err;
+
+ if ((ret = pgsc->c_close(pgsc)) != 0)
+ goto err;
+ if ((ret = __db_prfooter(handle, callback)) != 0)
+ goto err;
+ }
+err: if (subdbname)
+ __os_free(dbenv, subdbname);
+
+ if ((t_ret = pgset->close(pgset, 0)) != 0)
+ ret = t_ret;
+
+ if ((t_ret = __db_salvage_markdone(vdp, PGNO(master))) != 0)
+ return (t_ret);
+
+ return ((err_ret != 0) ? err_ret : ret);
+}
+
+/*
+ * __db_meta2pgset --
+ * Given a known-safe meta page number, return the set of pages
+ * corresponding to the database it represents. Return DB_VERIFY_BAD if
+ * it's not a suitable meta page or is invalid.
+ */
+static int
+__db_meta2pgset(dbp, vdp, pgno, flags, pgset)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ u_int32_t flags;
+ DB *pgset;
+{
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ int ret, t_ret;
+
+ mpf = dbp->mpf;
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ return (ret);
+
+ switch (TYPE(h)) {
+ case P_BTREEMETA:
+ ret = __bam_meta2pgset(dbp, vdp, (BTMETA *)h, flags, pgset);
+ break;
+ case P_HASHMETA:
+ ret = __ham_meta2pgset(dbp, vdp, (HMETA *)h, flags, pgset);
+ break;
+ default:
+ ret = DB_VERIFY_BAD;
+ break;
+ }
+
+ if ((t_ret = mpf->put(mpf, h, 0)) != 0)
+ return (t_ret);
+ return (ret);
+}
+
+/*
+ * __db_guesspgsize --
+ * Try to guess what the pagesize is if the one on the meta page
+ * and the one in the db are invalid.
+ */
+static int
+__db_guesspgsize(dbenv, fhp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+{
+ db_pgno_t i;
+ size_t nr;
+ u_int32_t guess;
+ u_int8_t type;
+
+ for (guess = DB_MAX_PGSIZE; guess >= DB_MIN_PGSIZE; guess >>= 1) {
+ /*
+ * We try to read three pages ahead after the first one
+ * and make sure we have plausible types for all of them.
+ * If the seeks fail, continue with a smaller size;
+ * we're probably just looking past the end of the database.
+ * If they succeed and the types are reasonable, also continue
+ * with a size smaller; we may be looking at pages N,
+ * 2N, and 3N for some N > 1.
+ *
+ * As soon as we hit an invalid type, we stop and return
+ * our previous guess; that last one was probably the page size.
+ */
+ for (i = 1; i <= 3; i++) {
+ if (__os_seek(dbenv, fhp, guess,
+ i, SSZ(DBMETA, type), 0, DB_OS_SEEK_SET) != 0)
+ break;
+ if (__os_read(dbenv,
+ fhp, &type, 1, &nr) != 0 || nr == 0)
+ break;
+ if (type == P_INVALID || type >= P_PAGETYPE_MAX)
+ return (guess << 1);
+ }
+ }
+
+ /*
+ * If we're just totally confused--the corruption takes up most of the
+ * beginning pages of the database--go with the default size.
+ */
+ return (DB_DEF_IOSIZE);
+}
diff --git a/libdb/db/db_vrfyutil.c b/libdb/db/db_vrfyutil.c
new file mode 100644
index 0000000..e7fc70a
--- /dev/null
+++ b/libdb/db/db_vrfyutil.c
@@ -0,0 +1,872 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_verify.h"
+#include "dbinc/db_am.h"
+
+static int __db_vrfy_pageinfo_create __P((DB_ENV *, VRFY_PAGEINFO **));
+static int __db_vrfy_pgset_iinc __P((DB *, db_pgno_t, int));
+
+/*
+ * __db_vrfy_dbinfo_create --
+ * Allocate and initialize a VRFY_DBINFO structure.
+ *
+ * PUBLIC: int __db_vrfy_dbinfo_create
+ * PUBLIC: __P((DB_ENV *, u_int32_t, VRFY_DBINFO **));
+ */
+int
+__db_vrfy_dbinfo_create(dbenv, pgsize, vdpp)
+ DB_ENV *dbenv;
+ u_int32_t pgsize;
+ VRFY_DBINFO **vdpp;
+{
+ DB *cdbp, *pgdbp, *pgset;
+ VRFY_DBINFO *vdp;
+ int ret;
+
+ vdp = NULL;
+ cdbp = pgdbp = pgset = NULL;
+
+ if ((ret = __os_calloc(NULL,
+ 1, sizeof(VRFY_DBINFO), (void **)&vdp)) != 0)
+ goto err;
+
+ if ((ret = db_create(&cdbp, dbenv, 0)) != 0)
+ goto err;
+
+ if ((ret = cdbp->set_flags(cdbp, DB_DUP)) != 0)
+ goto err;
+
+ if ((ret = cdbp->set_pagesize(cdbp, pgsize)) != 0)
+ goto err;
+
+ if ((ret =
+ cdbp->open(cdbp, NULL, NULL, NULL, DB_BTREE, DB_CREATE, 0600)) != 0)
+ goto err;
+
+ if ((ret = db_create(&pgdbp, dbenv, 0)) != 0)
+ goto err;
+
+ if ((ret = pgdbp->set_pagesize(pgdbp, pgsize)) != 0)
+ goto err;
+
+ if ((ret = pgdbp->open(pgdbp,
+ NULL, NULL, NULL, DB_BTREE, DB_CREATE, 0600)) != 0)
+ goto err;
+
+ if ((ret = __db_vrfy_pgset(dbenv, pgsize, &pgset)) != 0)
+ goto err;
+
+ LIST_INIT(&vdp->subdbs);
+ LIST_INIT(&vdp->activepips);
+
+ vdp->cdbp = cdbp;
+ vdp->pgdbp = pgdbp;
+ vdp->pgset = pgset;
+ *vdpp = vdp;
+ return (0);
+
+err: if (cdbp != NULL)
+ (void)cdbp->close(cdbp, 0);
+ if (pgdbp != NULL)
+ (void)pgdbp->close(pgdbp, 0);
+ if (vdp != NULL)
+ __os_free(dbenv, vdp);
+ return (ret);
+}
+
+/*
+ * __db_vrfy_dbinfo_destroy --
+ * Destructor for VRFY_DBINFO. Destroys VRFY_PAGEINFOs and deallocates
+ * structure.
+ *
+ * PUBLIC: int __db_vrfy_dbinfo_destroy __P((DB_ENV *, VRFY_DBINFO *));
+ */
+int
+__db_vrfy_dbinfo_destroy(dbenv, vdp)
+ DB_ENV *dbenv;
+ VRFY_DBINFO *vdp;
+{
+ VRFY_CHILDINFO *c, *d;
+ int t_ret, ret;
+
+ ret = 0;
+
+ for (c = LIST_FIRST(&vdp->subdbs); c != NULL; c = d) {
+ d = LIST_NEXT(c, links);
+ __os_free(NULL, c);
+ }
+
+ if ((t_ret = vdp->pgdbp->close(vdp->pgdbp, 0)) != 0)
+ ret = t_ret;
+
+ if ((t_ret = vdp->cdbp->close(vdp->cdbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret = vdp->pgset->close(vdp->pgset, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ DB_ASSERT(LIST_FIRST(&vdp->activepips) == NULL);
+
+ __os_free(dbenv, vdp);
+ return (ret);
+}
+
+/*
+ * __db_vrfy_getpageinfo --
+ * Get a PAGEINFO structure for a given page, creating it if necessary.
+ *
+ * PUBLIC: int __db_vrfy_getpageinfo
+ * PUBLIC: __P((VRFY_DBINFO *, db_pgno_t, VRFY_PAGEINFO **));
+ */
+int
+__db_vrfy_getpageinfo(vdp, pgno, pipp)
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ VRFY_PAGEINFO **pipp;
+{
+ DBT key, data;
+ DB *pgdbp;
+ VRFY_PAGEINFO *pip;
+ int ret;
+
+ /*
+ * We want a page info struct. There are three places to get it from,
+ * in decreasing order of preference:
+ *
+ * 1. vdp->activepips. If it's already "checked out", we're
+ * already using it, we return the same exact structure with a
+ * bumped refcount. This is necessary because this code is
+ * replacing array accesses, and it's common for f() to make some
+ * changes to a pip, and then call g() and h() which each make
+ * changes to the same pip. vdps are never shared between threads
+ * (they're never returned to the application), so this is safe.
+ * 2. The pgdbp. It's not in memory, but it's in the database, so
+ * get it, give it a refcount of 1, and stick it on activepips.
+ * 3. malloc. It doesn't exist yet; create it, then stick it on
+ * activepips. We'll put it in the database when we putpageinfo
+ * later.
+ */
+
+ /* Case 1. */
+ for (pip = LIST_FIRST(&vdp->activepips); pip != NULL;
+ pip = LIST_NEXT(pip, links))
+ if (pip->pgno == pgno)
+ /* Found it. */
+ goto found;
+
+ /* Case 2. */
+ pgdbp = vdp->pgdbp;
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ F_SET(&data, DB_DBT_MALLOC);
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+
+ if ((ret = pgdbp->get(pgdbp, NULL, &key, &data, 0)) == 0) {
+ /* Found it. */
+ DB_ASSERT(data.size = sizeof(VRFY_PAGEINFO));
+ pip = data.data;
+ DB_ASSERT(pip->pi_refcount == 0);
+ LIST_INSERT_HEAD(&vdp->activepips, pip, links);
+ goto found;
+ } else if (ret != DB_NOTFOUND) /* Something nasty happened. */
+ return (ret);
+
+ /* Case 3 */
+ if ((ret = __db_vrfy_pageinfo_create(pgdbp->dbenv, &pip)) != 0)
+ return (ret);
+
+ LIST_INSERT_HEAD(&vdp->activepips, pip, links);
+found: pip->pi_refcount++;
+
+ *pipp = pip;
+
+ DB_ASSERT(pip->pi_refcount > 0);
+ return (0);
+}
+
+/*
+ * __db_vrfy_putpageinfo --
+ * Put back a VRFY_PAGEINFO that we're done with.
+ *
+ * PUBLIC: int __db_vrfy_putpageinfo __P((DB_ENV *,
+ * PUBLIC: VRFY_DBINFO *, VRFY_PAGEINFO *));
+ */
+int
+__db_vrfy_putpageinfo(dbenv, vdp, pip)
+ DB_ENV *dbenv;
+ VRFY_DBINFO *vdp;
+ VRFY_PAGEINFO *pip;
+{
+ DBT key, data;
+ DB *pgdbp;
+ VRFY_PAGEINFO *p;
+ int ret;
+#ifdef DIAGNOSTIC
+ int found;
+
+ found = 0;
+#endif
+
+ if (--pip->pi_refcount > 0)
+ return (0);
+
+ pgdbp = vdp->pgdbp;
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ key.data = &pip->pgno;
+ key.size = sizeof(db_pgno_t);
+ data.data = pip;
+ data.size = sizeof(VRFY_PAGEINFO);
+
+ if ((ret = pgdbp->put(pgdbp, NULL, &key, &data, 0)) != 0)
+ return (ret);
+
+ for (p = LIST_FIRST(&vdp->activepips); p != NULL;
+ p = LIST_NEXT(p, links))
+ if (p == pip) {
+#ifdef DIAGNOSTIC
+ found++;
+#endif
+ DB_ASSERT(p->pi_refcount == 0);
+ LIST_REMOVE(p, links);
+ break;
+ }
+#ifdef DIAGNOSTIC
+ DB_ASSERT(found == 1);
+#endif
+
+ DB_ASSERT(pip->pi_refcount == 0);
+ __os_ufree(dbenv, pip);
+ return (0);
+}
+
+/*
+ * __db_vrfy_pgset --
+ * Create a temporary database for the storing of sets of page numbers.
+ * (A mapping from page number to int, used by the *_meta2pgset functions,
+ * as well as for keeping track of which pages the verifier has seen.)
+ *
+ * PUBLIC: int __db_vrfy_pgset __P((DB_ENV *, u_int32_t, DB **));
+ */
+int
+__db_vrfy_pgset(dbenv, pgsize, dbpp)
+ DB_ENV *dbenv;
+ u_int32_t pgsize;
+ DB **dbpp;
+{
+ DB *dbp;
+ int ret;
+
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return (ret);
+ if ((ret = dbp->set_pagesize(dbp, pgsize)) != 0)
+ goto err;
+ if ((ret = dbp->open(dbp,
+ NULL, NULL, NULL, DB_BTREE, DB_CREATE, 0600)) == 0)
+ *dbpp = dbp;
+ else
+err: (void)dbp->close(dbp, 0);
+
+ return (ret);
+}
+
+/*
+ * __db_vrfy_pgset_get --
+ * Get the value associated in a page set with a given pgno. Return
+ * a 0 value (and succeed) if we've never heard of this page.
+ *
+ * PUBLIC: int __db_vrfy_pgset_get __P((DB *, db_pgno_t, int *));
+ */
+int
+__db_vrfy_pgset_get(dbp, pgno, valp)
+ DB *dbp;
+ db_pgno_t pgno;
+ int *valp;
+{
+ DBT key, data;
+ int ret, val;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+ data.data = &val;
+ data.ulen = sizeof(int);
+ F_SET(&data, DB_DBT_USERMEM);
+
+ if ((ret = dbp->get(dbp, NULL, &key, &data, 0)) == 0) {
+ DB_ASSERT(data.size = sizeof(int));
+ memcpy(&val, data.data, sizeof(int));
+ } else if (ret == DB_NOTFOUND)
+ val = 0;
+ else
+ return (ret);
+
+ *valp = val;
+ return (0);
+}
+
+/*
+ * __db_vrfy_pgset_inc --
+ * Increment the value associated with a pgno by 1.
+ *
+ * PUBLIC: int __db_vrfy_pgset_inc __P((DB *, db_pgno_t));
+ */
+int
+__db_vrfy_pgset_inc(dbp, pgno)
+ DB *dbp;
+ db_pgno_t pgno;
+{
+
+ return (__db_vrfy_pgset_iinc(dbp, pgno, 1));
+}
+
+/*
+ * __db_vrfy_pgset_dec --
+ * Increment the value associated with a pgno by 1.
+ *
+ * PUBLIC: int __db_vrfy_pgset_dec __P((DB *, db_pgno_t));
+ */
+int
+__db_vrfy_pgset_dec(dbp, pgno)
+ DB *dbp;
+ db_pgno_t pgno;
+{
+
+ return (__db_vrfy_pgset_iinc(dbp, pgno, -1));
+}
+
+/*
+ * __db_vrfy_pgset_iinc --
+ * Increment the value associated with a pgno by i.
+ *
+ */
+static int
+__db_vrfy_pgset_iinc(dbp, pgno, i)
+ DB *dbp;
+ db_pgno_t pgno;
+ int i;
+{
+ DBT key, data;
+ int ret;
+ int val;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ val = 0;
+
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+ data.data = &val;
+ data.ulen = sizeof(int);
+ F_SET(&data, DB_DBT_USERMEM);
+
+ if ((ret = dbp->get(dbp, NULL, &key, &data, 0)) == 0) {
+ DB_ASSERT(data.size == sizeof(int));
+ memcpy(&val, data.data, sizeof(int));
+ } else if (ret != DB_NOTFOUND)
+ return (ret);
+
+ data.size = sizeof(int);
+ val += i;
+
+ return (dbp->put(dbp, NULL, &key, &data, 0));
+}
+
+/*
+ * __db_vrfy_pgset_next --
+ * Given a cursor open in a pgset database, get the next page in the
+ * set.
+ *
+ * PUBLIC: int __db_vrfy_pgset_next __P((DBC *, db_pgno_t *));
+ */
+int
+__db_vrfy_pgset_next(dbc, pgnop)
+ DBC *dbc;
+ db_pgno_t *pgnop;
+{
+ DBT key, data;
+ db_pgno_t pgno;
+ int ret;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ /* We don't care about the data, just the keys. */
+ F_SET(&data, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+ F_SET(&key, DB_DBT_USERMEM);
+ key.data = &pgno;
+ key.ulen = sizeof(db_pgno_t);
+
+ if ((ret = dbc->c_get(dbc, &key, &data, DB_NEXT)) != 0)
+ return (ret);
+
+ DB_ASSERT(key.size == sizeof(db_pgno_t));
+ *pgnop = pgno;
+
+ return (0);
+}
+
+/*
+ * __db_vrfy_childcursor --
+ * Create a cursor to walk the child list with. Returns with a nonzero
+ * final argument if the specified page has no children.
+ *
+ * PUBLIC: int __db_vrfy_childcursor __P((VRFY_DBINFO *, DBC **));
+ */
+int
+__db_vrfy_childcursor(vdp, dbcp)
+ VRFY_DBINFO *vdp;
+ DBC **dbcp;
+{
+ DB *cdbp;
+ DBC *dbc;
+ int ret;
+
+ cdbp = vdp->cdbp;
+
+ if ((ret = cdbp->cursor(cdbp, NULL, &dbc, 0)) == 0)
+ *dbcp = dbc;
+
+ return (ret);
+}
+
+/*
+ * __db_vrfy_childput --
+ * Add a child structure to the set for a given page.
+ *
+ * PUBLIC: int __db_vrfy_childput
+ * PUBLIC: __P((VRFY_DBINFO *, db_pgno_t, VRFY_CHILDINFO *));
+ */
+int
+__db_vrfy_childput(vdp, pgno, cip)
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ VRFY_CHILDINFO *cip;
+{
+ DB *cdbp;
+ DBC *cc;
+ DBT key, data;
+ VRFY_CHILDINFO *oldcip;
+ int ret;
+
+ cdbp = vdp->cdbp;
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+
+ /*
+ * We want to avoid adding multiple entries for a single child page;
+ * we only need to verify each child once, even if a child (such
+ * as an overflow key) is multiply referenced.
+ *
+ * However, we also need to make sure that when walking the list
+ * of children, we encounter them in the order they're referenced
+ * on a page. (This permits us, for example, to verify the
+ * prev_pgno/next_pgno chain of Btree leaf pages.)
+ *
+ * Check the child database to make sure that this page isn't
+ * already a child of the specified page number. If it's not,
+ * put it at the end of the duplicate set.
+ */
+ if ((ret = __db_vrfy_childcursor(vdp, &cc)) != 0)
+ return (ret);
+ for (ret = __db_vrfy_ccset(cc, pgno, &oldcip); ret == 0;
+ ret = __db_vrfy_ccnext(cc, &oldcip))
+ if (oldcip->pgno == cip->pgno) {
+ /*
+ * Found a matching child. Return without
+ * putting it again.
+ */
+ if ((ret = __db_vrfy_ccclose(cc)) != 0)
+ return (ret);
+ return (0);
+ }
+ if (ret != DB_NOTFOUND) {
+ (void)__db_vrfy_ccclose(cc);
+ return (ret);
+ }
+ if ((ret = __db_vrfy_ccclose(cc)) != 0)
+ return (ret);
+
+ data.data = cip;
+ data.size = sizeof(VRFY_CHILDINFO);
+
+ return (cdbp->put(cdbp, NULL, &key, &data, 0));
+}
+
+/*
+ * __db_vrfy_ccset --
+ * Sets a cursor created with __db_vrfy_childcursor to the first
+ * child of the given pgno, and returns it in the third arg.
+ *
+ * PUBLIC: int __db_vrfy_ccset __P((DBC *, db_pgno_t, VRFY_CHILDINFO **));
+ */
+int
+__db_vrfy_ccset(dbc, pgno, cipp)
+ DBC *dbc;
+ db_pgno_t pgno;
+ VRFY_CHILDINFO **cipp;
+{
+ DBT key, data;
+ int ret;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+
+ if ((ret = dbc->c_get(dbc, &key, &data, DB_SET)) != 0)
+ return (ret);
+
+ DB_ASSERT(data.size == sizeof(VRFY_CHILDINFO));
+ *cipp = (VRFY_CHILDINFO *)data.data;
+
+ return (0);
+}
+
+/*
+ * __db_vrfy_ccnext --
+ * Gets the next child of the given cursor created with
+ * __db_vrfy_childcursor, and returns it in the memory provided in the
+ * second arg.
+ *
+ * PUBLIC: int __db_vrfy_ccnext __P((DBC *, VRFY_CHILDINFO **));
+ */
+int
+__db_vrfy_ccnext(dbc, cipp)
+ DBC *dbc;
+ VRFY_CHILDINFO **cipp;
+{
+ DBT key, data;
+ int ret;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ if ((ret = dbc->c_get(dbc, &key, &data, DB_NEXT_DUP)) != 0)
+ return (ret);
+
+ DB_ASSERT(data.size == sizeof(VRFY_CHILDINFO));
+ *cipp = (VRFY_CHILDINFO *)data.data;
+
+ return (0);
+}
+
+/*
+ * __db_vrfy_ccclose --
+ * Closes the cursor created with __db_vrfy_childcursor.
+ *
+ * This doesn't actually do anything interesting now, but it's
+ * not inconceivable that we might change the internal database usage
+ * and keep the interfaces the same, and a function call here or there
+ * seldom hurts anyone.
+ *
+ * PUBLIC: int __db_vrfy_ccclose __P((DBC *));
+ */
+int
+__db_vrfy_ccclose(dbc)
+ DBC *dbc;
+{
+
+ return (dbc->c_close(dbc));
+}
+
+/*
+ * __db_vrfy_pageinfo_create --
+ * Constructor for VRFY_PAGEINFO; allocates and initializes.
+ */
+static int
+__db_vrfy_pageinfo_create(dbenv, pgipp)
+ DB_ENV *dbenv;
+ VRFY_PAGEINFO **pgipp;
+{
+ VRFY_PAGEINFO *pgip;
+ int ret;
+
+ /*
+ * pageinfo structs are sometimes allocated here and sometimes
+ * allocated by fetching them from a database with DB_DBT_MALLOC.
+ * There's no easy way for the destructor to tell which was
+ * used, and so we always allocate with __os_umalloc so we can free
+ * with __os_ufree.
+ */
+ if ((ret = __os_umalloc(dbenv,
+ sizeof(VRFY_PAGEINFO), (void **)&pgip)) != 0)
+ return (ret);
+ memset(pgip, 0, sizeof(VRFY_PAGEINFO));
+
+ DB_ASSERT(pgip->pi_refcount == 0);
+
+ *pgipp = pgip;
+ return (0);
+}
+
+/*
+ * __db_salvage_init --
+ * Set up salvager database.
+ *
+ * PUBLIC: int __db_salvage_init __P((VRFY_DBINFO *));
+ */
+int
+__db_salvage_init(vdp)
+ VRFY_DBINFO *vdp;
+{
+ DB *dbp;
+ int ret;
+
+ if ((ret = db_create(&dbp, NULL, 0)) != 0)
+ return (ret);
+
+ if ((ret = dbp->set_pagesize(dbp, 1024)) != 0)
+ goto err;
+
+ if ((ret = dbp->open(dbp,
+ NULL, NULL, NULL, DB_BTREE, DB_CREATE, 0)) != 0)
+ goto err;
+
+ vdp->salvage_pages = dbp;
+ return (0);
+
+err: (void)dbp->close(dbp, 0);
+ return (ret);
+}
+
+/*
+ * __db_salvage_destroy --
+ * Close salvager database.
+ * PUBLIC: void __db_salvage_destroy __P((VRFY_DBINFO *));
+ */
+void
+__db_salvage_destroy(vdp)
+ VRFY_DBINFO *vdp;
+{
+ (void)vdp->salvage_pages->close(vdp->salvage_pages, 0);
+}
+
+/*
+ * __db_salvage_getnext --
+ * Get the next (first) unprinted page in the database of pages we need to
+ * print still. Delete entries for any already-printed pages we encounter
+ * in this search, as well as the page we're returning.
+ *
+ * PUBLIC: int __db_salvage_getnext
+ * PUBLIC: __P((VRFY_DBINFO *, db_pgno_t *, u_int32_t *));
+ */
+int
+__db_salvage_getnext(vdp, pgnop, pgtypep)
+ VRFY_DBINFO *vdp;
+ db_pgno_t *pgnop;
+ u_int32_t *pgtypep;
+{
+ DB *dbp;
+ DBC *dbc;
+ DBT key, data;
+ int ret;
+ u_int32_t pgtype;
+
+ dbp = vdp->salvage_pages;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ return (ret);
+
+ while ((ret = dbc->c_get(dbc, &key, &data, DB_NEXT)) == 0) {
+ DB_ASSERT(data.size == sizeof(u_int32_t));
+ memcpy(&pgtype, data.data, sizeof(pgtype));
+
+ if ((ret = dbc->c_del(dbc, 0)) != 0)
+ goto err;
+ if (pgtype != SALVAGE_IGNORE)
+ goto found;
+ }
+
+ /* No more entries--ret probably equals DB_NOTFOUND. */
+
+ if (0) {
+found: DB_ASSERT(key.size == sizeof(db_pgno_t));
+ DB_ASSERT(data.size == sizeof(u_int32_t));
+
+ *pgnop = *(db_pgno_t *)key.data;
+ *pgtypep = *(u_int32_t *)data.data;
+ }
+
+err: (void)dbc->c_close(dbc);
+ return (ret);
+}
+
+/*
+ * __db_salvage_isdone --
+ * Return whether or not the given pgno is already marked
+ * SALVAGE_IGNORE (meaning that we don't need to print it again).
+ *
+ * Returns DB_KEYEXIST if it is marked, 0 if not, or another error on
+ * error.
+ *
+ * PUBLIC: int __db_salvage_isdone __P((VRFY_DBINFO *, db_pgno_t));
+ */
+int
+__db_salvage_isdone(vdp, pgno)
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+{
+ DBT key, data;
+ DB *dbp;
+ int ret;
+ u_int32_t currtype;
+
+ dbp = vdp->salvage_pages;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ currtype = SALVAGE_INVALID;
+ data.data = &currtype;
+ data.ulen = sizeof(u_int32_t);
+ data.flags = DB_DBT_USERMEM;
+
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+
+ /*
+ * Put an entry for this page, with pgno as key and type as data,
+ * unless it's already there and is marked done.
+ * If it's there and is marked anything else, that's fine--we
+ * want to mark it done.
+ */
+ ret = dbp->get(dbp, NULL, &key, &data, 0);
+ if (ret == 0) {
+ /*
+ * The key's already here. Check and see if it's already
+ * marked done. If it is, return DB_KEYEXIST. If it's not,
+ * return 0.
+ */
+ if (currtype == SALVAGE_IGNORE)
+ return (DB_KEYEXIST);
+ else
+ return (0);
+ } else if (ret != DB_NOTFOUND)
+ return (ret);
+
+ /* The pgno is not yet marked anything; return 0. */
+ return (0);
+}
+
+/*
+ * __db_salvage_markdone --
+ * Mark as done a given page.
+ *
+ * PUBLIC: int __db_salvage_markdone __P((VRFY_DBINFO *, db_pgno_t));
+ */
+int
+__db_salvage_markdone(vdp, pgno)
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+{
+ DBT key, data;
+ DB *dbp;
+ int pgtype, ret;
+ u_int32_t currtype;
+
+ pgtype = SALVAGE_IGNORE;
+ dbp = vdp->salvage_pages;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ currtype = SALVAGE_INVALID;
+ data.data = &currtype;
+ data.ulen = sizeof(u_int32_t);
+ data.flags = DB_DBT_USERMEM;
+
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+
+ /*
+ * Put an entry for this page, with pgno as key and type as data,
+ * unless it's already there and is marked done.
+ * If it's there and is marked anything else, that's fine--we
+ * want to mark it done, but db_salvage_isdone only lets
+ * us know if it's marked IGNORE.
+ *
+ * We don't want to return DB_KEYEXIST, though; this will
+ * likely get passed up all the way and make no sense to the
+ * application. Instead, use DB_VERIFY_BAD to indicate that
+ * we've seen this page already--it probably indicates a
+ * multiply-linked page.
+ */
+ if ((ret = __db_salvage_isdone(vdp, pgno)) != 0)
+ return (ret == DB_KEYEXIST ? DB_VERIFY_BAD : ret);
+
+ data.size = sizeof(u_int32_t);
+ data.data = &pgtype;
+
+ return (dbp->put(dbp, NULL, &key, &data, 0));
+}
+
+/*
+ * __db_salvage_markneeded --
+ * If it has not yet been printed, make note of the fact that a page
+ * must be dealt with later.
+ *
+ * PUBLIC: int __db_salvage_markneeded
+ * PUBLIC: __P((VRFY_DBINFO *, db_pgno_t, u_int32_t));
+ */
+int
+__db_salvage_markneeded(vdp, pgno, pgtype)
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ u_int32_t pgtype;
+{
+ DB *dbp;
+ DBT key, data;
+ int ret;
+
+ dbp = vdp->salvage_pages;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+
+ data.data = &pgtype;
+ data.size = sizeof(u_int32_t);
+
+ /*
+ * Put an entry for this page, with pgno as key and type as data,
+ * unless it's already there, in which case it's presumably
+ * already been marked done.
+ */
+ ret = dbp->put(dbp, NULL, &key, &data, DB_NOOVERWRITE);
+ return (ret == DB_KEYEXIST ? 0 : ret);
+}
diff --git a/libdb/db185/db185.c b/libdb/db185/db185.c
new file mode 100644
index 0000000..90358d3
--- /dev/null
+++ b/libdb/db185/db185.c
@@ -0,0 +1,594 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "db185_int.h"
+
+static int db185_close __P((DB185 *));
+static int db185_compare __P((DB *, const DBT *, const DBT *));
+static int db185_del __P((const DB185 *, const DBT185 *, u_int));
+static int db185_fd __P((const DB185 *));
+static int db185_get __P((const DB185 *, const DBT185 *, DBT185 *, u_int));
+static u_int32_t
+ db185_hash __P((DB *, const void *, u_int32_t));
+static void db185_openstderr __P((DB_FH *));
+static size_t db185_prefix __P((DB *, const DBT *, const DBT *));
+static int db185_put __P((const DB185 *, DBT185 *, const DBT185 *, u_int));
+static int db185_seq __P((const DB185 *, DBT185 *, DBT185 *, u_int));
+static int db185_sync __P((const DB185 *, u_int));
+
+/*
+ * EXTERN: #ifdef _DB185_INT_H_
+ * EXTERN: DB185 *__db185_open
+ * EXTERN: __P((const char *, int, int, DBTYPE, const void *));
+ * EXTERN: #else
+ * EXTERN: DB *__db185_open
+ * EXTERN: __P((const char *, int, int, DBTYPE, const void *));
+ * EXTERN: #endif
+ */
+DB185 *
+__db185_open(file, oflags, mode, type, openinfo)
+ const char *file;
+ int oflags, mode;
+ DBTYPE type;
+ const void *openinfo;
+{
+ const BTREEINFO *bi;
+ const HASHINFO *hi;
+ const RECNOINFO *ri;
+ DB *dbp;
+ DB185 *db185p;
+ DB_FH fh;
+ size_t nw;
+ int ret;
+
+ dbp = NULL;
+ db185p = NULL;
+
+ if ((ret = db_create(&dbp, NULL, 0)) != 0)
+ goto err;
+
+ if ((ret = __os_calloc(NULL, 1, sizeof(DB185), &db185p)) != 0)
+ goto err;
+
+ /*
+ * !!!
+ * The DBTYPE enum wasn't initialized in DB 185, so it's off-by-one
+ * from DB 2.0.
+ */
+ switch (type) {
+ case 0: /* DB_BTREE */
+ type = DB_BTREE;
+ if ((bi = openinfo) != NULL) {
+ if (bi->flags & ~R_DUP)
+ goto einval;
+ if (bi->flags & R_DUP)
+ (void)dbp->set_flags(dbp, DB_DUP);
+ if (bi->cachesize != 0)
+ (void)dbp->set_cachesize
+ (dbp, 0, bi->cachesize, 0);
+ if (bi->minkeypage != 0)
+ (void)dbp->set_bt_minkey(dbp, bi->minkeypage);
+ if (bi->psize != 0)
+ (void)dbp->set_pagesize(dbp, bi->psize);
+ /*
+ * !!!
+ * Comparisons and prefix calls work because the DBT
+ * structures in 1.85 and 2.0 have the same initial
+ * fields.
+ */
+ if (bi->prefix != NULL) {
+ db185p->prefix = bi->prefix;
+ dbp->set_bt_prefix(dbp, db185_prefix);
+ }
+ if (bi->compare != NULL) {
+ db185p->compare = bi->compare;
+ dbp->set_bt_compare(dbp, db185_compare);
+ }
+ if (bi->lorder != 0)
+ dbp->set_lorder(dbp, bi->lorder);
+ }
+ break;
+ case 1: /* DB_HASH */
+ type = DB_HASH;
+ if ((hi = openinfo) != NULL) {
+ if (hi->bsize != 0)
+ (void)dbp->set_pagesize(dbp, hi->bsize);
+ if (hi->ffactor != 0)
+ (void)dbp->set_h_ffactor(dbp, hi->ffactor);
+ if (hi->nelem != 0)
+ (void)dbp->set_h_nelem(dbp, hi->nelem);
+ if (hi->cachesize != 0)
+ (void)dbp->set_cachesize
+ (dbp, 0, hi->cachesize, 0);
+ if (hi->hash != NULL) {
+ db185p->hash = hi->hash;
+ (void)dbp->set_h_hash(dbp, db185_hash);
+ }
+ if (hi->lorder != 0)
+ dbp->set_lorder(dbp, hi->lorder);
+ }
+
+ break;
+ case 2: /* DB_RECNO */
+ type = DB_RECNO;
+
+ /* DB 1.85 did renumbering by default. */
+ (void)dbp->set_flags(dbp, DB_RENUMBER);
+
+ /*
+ * !!!
+ * The file name given to DB 1.85 recno is the name of the DB
+ * 2.0 backing file. If the file doesn't exist, create it if
+ * the user has the O_CREAT flag set, DB 1.85 did it for you,
+ * and DB 2.0 doesn't.
+ *
+ * !!!
+ * Setting the file name to NULL specifies that we're creating
+ * a temporary backing file, in DB 2.X. If we're opening the
+ * DB file read-only, change the flags to read-write, because
+ * temporary backing files cannot be opened read-only, and DB
+ * 2.X will return an error. We are cheating here -- if the
+ * application does a put on the database, it will succeed --
+ * although that would be a stupid thing for the application
+ * to do.
+ *
+ * !!!
+ * Note, the file name in DB 1.85 was a const -- we don't do
+ * that in DB 2.0, so do that cast.
+ */
+ if (file != NULL) {
+ if (oflags & O_CREAT && __os_exists(file, NULL) != 0)
+ if (__os_openhandle(NULL, file,
+ oflags, mode, &fh) == 0)
+ (void)__os_closehandle(NULL, &fh);
+ (void)dbp->set_re_source(dbp, file);
+
+ if (O_RDONLY)
+ oflags &= ~O_RDONLY;
+ oflags |= O_RDWR;
+ file = NULL;
+ }
+
+ if ((ri = openinfo) != NULL) {
+ /*
+ * !!!
+ * We can't support the bfname field.
+ */
+#define BFMSG "DB: DB 1.85's recno bfname field is not supported.\n"
+ if (ri->bfname != NULL) {
+ db185_openstderr(&fh);
+ (void)__os_write(NULL, &fh,
+ BFMSG, sizeof(BFMSG) - 1, &nw);
+ goto einval;
+ }
+
+ if (ri->flags & ~(R_FIXEDLEN | R_NOKEY | R_SNAPSHOT))
+ goto einval;
+ if (ri->flags & R_FIXEDLEN) {
+ if (ri->bval != 0)
+ (void)dbp->set_re_pad(dbp, ri->bval);
+ if (ri->reclen != 0)
+ (void)dbp->set_re_len(dbp, ri->reclen);
+ } else
+ if (ri->bval != 0)
+ (void)dbp->set_re_delim(dbp, ri->bval);
+
+ /*
+ * !!!
+ * We ignore the R_NOKEY flag, but that's okay, it was
+ * only an optimization that was never implemented.
+ */
+ if (ri->flags & R_SNAPSHOT)
+ (void)dbp->set_flags(dbp, DB_SNAPSHOT);
+
+ if (ri->cachesize != 0)
+ (void)dbp->set_cachesize
+ (dbp, 0, ri->cachesize, 0);
+ if (ri->psize != 0)
+ (void)dbp->set_pagesize(dbp, ri->psize);
+ if (ri->lorder != 0)
+ dbp->set_lorder(dbp, ri->lorder);
+ }
+ break;
+ default:
+ goto einval;
+ }
+
+ db185p->close = db185_close;
+ db185p->del = db185_del;
+ db185p->fd = db185_fd;
+ db185p->get = db185_get;
+ db185p->put = db185_put;
+ db185p->seq = db185_seq;
+ db185p->sync = db185_sync;
+
+ /*
+ * Store a reference so we can indirect from the DB 1.85 structure
+ * to the underlying DB structure, and vice-versa. This has to be
+ * done BEFORE the DB::open method call because the hash callback
+ * is exercised as part of hash database initialiation.
+ */
+ db185p->dbp = dbp;
+ dbp->api_internal = db185p;
+
+ /* Open the database. */
+ if ((ret = dbp->open(dbp, NULL,
+ file, NULL, type, __db_oflags(oflags), mode)) != 0)
+ goto err;
+
+ /* Create the cursor used for sequential ops. */
+ if ((ret = dbp->cursor(dbp, NULL, &((DB185 *)db185p)->dbc, 0)) != 0)
+ goto err;
+
+ return (db185p);
+
+err: if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+einval: ret = EINVAL;
+ if (db185p != NULL)
+ __os_free(NULL, db185p);
+ if (dbp != NULL)
+ (void)dbp->close(dbp, 0);
+
+ __os_set_errno(ret);
+ return (NULL);
+}
+
+static int
+db185_close(db185p)
+ DB185 *db185p;
+{
+ DB *dbp;
+ int ret;
+
+ dbp = db185p->dbp;
+
+ ret = dbp->close(dbp, 0);
+
+ __os_free(NULL, db185p);
+
+ if (ret == 0)
+ return (0);
+
+ if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+ ret = EINVAL;
+ __os_set_errno(ret);
+ return (-1);
+}
+
+static int
+db185_del(db185p, key185, flags)
+ const DB185 *db185p;
+ const DBT185 *key185;
+ u_int flags;
+{
+ DB *dbp;
+ DBT key;
+ int ret;
+
+ dbp = db185p->dbp;
+
+ memset(&key, 0, sizeof(key));
+ key.data = key185->data;
+ key.size = key185->size;
+
+ if (flags & ~R_CURSOR)
+ goto einval;
+ if (flags & R_CURSOR)
+ ret = db185p->dbc->c_del(db185p->dbc, 0);
+ else
+ ret = dbp->del(dbp, NULL, &key, 0);
+
+ switch (ret) {
+ case 0:
+ return (0);
+ case DB_NOTFOUND:
+ return (1);
+ }
+
+ if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+einval: ret = EINVAL;
+ __os_set_errno(ret);
+ return (-1);
+}
+
+static int
+db185_fd(db185p)
+ const DB185 *db185p;
+{
+ DB *dbp;
+ int fd, ret;
+
+ dbp = db185p->dbp;
+
+ if ((ret = dbp->fd(dbp, &fd)) == 0)
+ return (fd);
+
+ if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+ ret = EINVAL;
+ __os_set_errno(ret);
+ return (-1);
+}
+
+static int
+db185_get(db185p, key185, data185, flags)
+ const DB185 *db185p;
+ const DBT185 *key185;
+ DBT185 *data185;
+ u_int flags;
+{
+ DB *dbp;
+ DBT key, data;
+ int ret;
+
+ dbp = db185p->dbp;
+
+ memset(&key, 0, sizeof(key));
+ key.data = key185->data;
+ key.size = key185->size;
+ memset(&data, 0, sizeof(data));
+ data.data = data185->data;
+ data.size = data185->size;
+
+ if (flags)
+ goto einval;
+
+ switch (ret = dbp->get(dbp, NULL, &key, &data, 0)) {
+ case 0:
+ data185->data = data.data;
+ data185->size = data.size;
+ return (0);
+ case DB_NOTFOUND:
+ return (1);
+ }
+
+ if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+einval: ret = EINVAL;
+ __os_set_errno(ret);
+ return (-1);
+}
+
+static int
+db185_put(db185p, key185, data185, flags)
+ const DB185 *db185p;
+ DBT185 *key185;
+ const DBT185 *data185;
+ u_int flags;
+{
+ DB *dbp;
+ DBC *dbcp_put;
+ DBT key, data;
+ int ret, t_ret;
+
+ dbp = db185p->dbp;
+
+ memset(&key, 0, sizeof(key));
+ key.data = key185->data;
+ key.size = key185->size;
+ memset(&data, 0, sizeof(data));
+ data.data = data185->data;
+ data.size = data185->size;
+
+ switch (flags) {
+ case 0:
+ ret = dbp->put(dbp, NULL, &key, &data, 0);
+ break;
+ case R_CURSOR:
+ ret = db185p->dbc->c_put(db185p->dbc, &key, &data, DB_CURRENT);
+ break;
+ case R_IAFTER:
+ case R_IBEFORE:
+ if (dbp->type != DB_RECNO)
+ goto einval;
+
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp_put, 0)) != 0)
+ break;
+ if ((ret =
+ dbcp_put->c_get(dbcp_put, &key, &data, DB_SET)) == 0) {
+ memset(&data, 0, sizeof(data));
+ data.data = data185->data;
+ data.size = data185->size;
+ ret = dbcp_put->c_put(dbcp_put, &key, &data,
+ flags == R_IAFTER ? DB_AFTER : DB_BEFORE);
+ }
+ if ((t_ret = dbcp_put->c_close(dbcp_put)) != 0 && ret == 0)
+ ret = t_ret;
+ break;
+ case R_NOOVERWRITE:
+ ret = dbp->put(dbp, NULL, &key, &data, DB_NOOVERWRITE);
+ break;
+ case R_SETCURSOR:
+ if (dbp->type != DB_BTREE && dbp->type != DB_RECNO)
+ goto einval;
+
+ if ((ret = dbp->put(dbp, NULL, &key, &data, 0)) != 0)
+ break;
+ ret =
+ db185p->dbc->c_get(db185p->dbc, &key, &data, DB_SET_RANGE);
+ break;
+ default:
+ goto einval;
+ }
+
+ switch (ret) {
+ case 0:
+ key185->data = key.data;
+ key185->size = key.size;
+ return (0);
+ case DB_KEYEXIST:
+ return (1);
+ }
+
+ if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+einval: ret = EINVAL;
+ __os_set_errno(ret);
+ return (-1);
+}
+
+static int
+db185_seq(db185p, key185, data185, flags)
+ const DB185 *db185p;
+ DBT185 *key185, *data185;
+ u_int flags;
+{
+ DB *dbp;
+ DBT key, data;
+ int ret;
+
+ dbp = db185p->dbp;
+
+ memset(&key, 0, sizeof(key));
+ key.data = key185->data;
+ key.size = key185->size;
+ memset(&data, 0, sizeof(data));
+ data.data = data185->data;
+ data.size = data185->size;
+
+ switch (flags) {
+ case R_CURSOR:
+ flags = DB_SET_RANGE;
+ break;
+ case R_FIRST:
+ flags = DB_FIRST;
+ break;
+ case R_LAST:
+ if (dbp->type != DB_BTREE && dbp->type != DB_RECNO)
+ goto einval;
+ flags = DB_LAST;
+ break;
+ case R_NEXT:
+ flags = DB_NEXT;
+ break;
+ case R_PREV:
+ if (dbp->type != DB_BTREE && dbp->type != DB_RECNO)
+ goto einval;
+ flags = DB_PREV;
+ break;
+ default:
+ goto einval;
+ }
+ switch (ret = db185p->dbc->c_get(db185p->dbc, &key, &data, flags)) {
+ case 0:
+ key185->data = key.data;
+ key185->size = key.size;
+ data185->data = data.data;
+ data185->size = data.size;
+ return (0);
+ case DB_NOTFOUND:
+ return (1);
+ }
+
+ if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+einval: ret = EINVAL;
+ __os_set_errno(ret);
+ return (-1);
+}
+
+static int
+db185_sync(db185p, flags)
+ const DB185 *db185p;
+ u_int flags;
+{
+ DB *dbp;
+ DB_FH fh;
+ size_t nw;
+ int ret;
+
+ dbp = db185p->dbp;
+
+ switch (flags) {
+ case 0:
+ break;
+ case R_RECNOSYNC:
+ /*
+ * !!!
+ * We can't support the R_RECNOSYNC flag.
+ */
+#define RSMSG "DB: DB 1.85's R_RECNOSYNC sync flag is not supported.\n"
+ db185_openstderr(&fh);
+ (void)__os_write(NULL, &fh, RSMSG, sizeof(RSMSG) - 1, &nw);
+ goto einval;
+ default:
+ goto einval;
+ }
+
+ if ((ret = dbp->sync(dbp, 0)) == 0)
+ return (0);
+
+ if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+einval: ret = EINVAL;
+ __os_set_errno(ret);
+ return (-1);
+}
+
+static void
+db185_openstderr(fhp)
+ DB_FH *fhp;
+{
+ /* Dummy up the results of an __os_openhandle() on stderr. */
+ memset(fhp, 0, sizeof(*fhp));
+ F_SET(fhp, DB_FH_VALID);
+
+#ifndef STDERR_FILENO
+#define STDERR_FILENO 2
+#endif
+ fhp->fd = STDERR_FILENO;
+}
+
+/*
+ * db185_compare --
+ * Cutout routine to call the user's Btree comparison function.
+ */
+static int
+db185_compare(dbp, a, b)
+ DB *dbp;
+ const DBT *a, *b;
+{
+ return (((DB185 *)dbp->api_internal)->compare(a, b));
+}
+
+/*
+ * db185_prefix --
+ * Cutout routine to call the user's Btree prefix function.
+ */
+static size_t
+db185_prefix(dbp, a, b)
+ DB *dbp;
+ const DBT *a, *b;
+{
+ return (((DB185 *)dbp->api_internal)->prefix(a, b));
+}
+
+/*
+ * db185_hash --
+ * Cutout routine to call the user's hash function.
+ */
+static u_int32_t
+db185_hash(dbp, key, len)
+ DB *dbp;
+ const void *key;
+ u_int32_t len;
+{
+ return (((DB185 *)dbp->api_internal)->hash(key, (size_t)len));
+}
diff --git a/libdb/db185/db185_int.in b/libdb/db185/db185_int.in
new file mode 100644
index 0000000..3b00742
--- /dev/null
+++ b/libdb/db185/db185_int.in
@@ -0,0 +1,129 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#ifndef _DB185_INT_H_
+#define _DB185_INT_H_
+
+/* Routine flags. */
+#define R_CURSOR 1 /* del, put, seq */
+#define __R_UNUSED 2 /* UNUSED */
+#define R_FIRST 3 /* seq */
+#define R_IAFTER 4 /* put (RECNO) */
+#define R_IBEFORE 5 /* put (RECNO) */
+#define R_LAST 6 /* seq (BTREE, RECNO) */
+#define R_NEXT 7 /* seq */
+#define R_NOOVERWRITE 8 /* put */
+#define R_PREV 9 /* seq (BTREE, RECNO) */
+#define R_SETCURSOR 10 /* put (RECNO) */
+#define R_RECNOSYNC 11 /* sync (RECNO) */
+
+typedef struct {
+ void *data; /* data */
+ size_t size; /* data length */
+} DBT185;
+
+/* Access method description structure. */
+typedef struct __db185 {
+ DBTYPE type; /* Underlying db type. */
+ int (*close) __P((struct __db185 *));
+ int (*del) __P((const struct __db185 *, const DBT185 *, u_int));
+ int (*get)
+ __P((const struct __db185 *, const DBT185 *, DBT185 *, u_int));
+ int (*put)
+ __P((const struct __db185 *, DBT185 *, const DBT185 *, u_int));
+ int (*seq)
+ __P((const struct __db185 *, DBT185 *, DBT185 *, u_int));
+ int (*sync) __P((const struct __db185 *, u_int));
+ DB *dbp; /* DB structure. Was void *internal. */
+ int (*fd) __P((const struct __db185 *));
+
+ /*
+ * !!!
+ * The following elements added to the end of the DB 1.85 DB
+ * structure.
+ */
+ DBC *dbc; /* DB cursor. */
+ /* Various callback functions. */
+ int (*compare) __P((const DBT *, const DBT *));
+ size_t (*prefix) __P((const DBT *, const DBT *));
+ u_int32_t (*hash) __P((const void *, size_t));
+} DB185;
+
+/* Structure used to pass parameters to the btree routines. */
+typedef struct {
+#define R_DUP 0x01 /* duplicate keys */
+ u_int32_t flags;
+ u_int32_t cachesize; /* bytes to cache */
+ u_int32_t maxkeypage; /* maximum keys per page */
+ u_int32_t minkeypage; /* minimum keys per page */
+ u_int32_t psize; /* page size */
+ int (*compare) /* comparison function */
+ __P((const DBT *, const DBT *));
+ size_t (*prefix) /* prefix function */
+ __P((const DBT *, const DBT *));
+ int lorder; /* byte order */
+} BTREEINFO;
+
+/* Structure used to pass parameters to the hashing routines. */
+typedef struct {
+ u_int32_t bsize; /* bucket size */
+ u_int32_t ffactor; /* fill factor */
+ u_int32_t nelem; /* number of elements */
+ u_int32_t cachesize; /* bytes to cache */
+ u_int32_t /* hash function */
+ (*hash) __P((const void *, size_t));
+ int lorder; /* byte order */
+} HASHINFO;
+
+/* Structure used to pass parameters to the record routines. */
+typedef struct {
+#define R_FIXEDLEN 0x01 /* fixed-length records */
+#define R_NOKEY 0x02 /* key not required */
+#define R_SNAPSHOT 0x04 /* snapshot the input */
+ u_int32_t flags;
+ u_int32_t cachesize; /* bytes to cache */
+ u_int32_t psize; /* page size */
+ int lorder; /* byte order */
+ size_t reclen; /* record length (fixed-length records) */
+ u_char bval; /* delimiting byte (variable-length records */
+ char *bfname; /* btree file name */
+} RECNOINFO;
+#endif /* !_DB185_INT_H_ */
diff --git a/libdb/db_archive/db_archive.c b/libdb/db_archive/db_archive.c
new file mode 100644
index 0000000..6ef6ccf
--- /dev/null
+++ b/libdb/db_archive/db_archive.c
@@ -0,0 +1,180 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+int main __P((int, char *[]));
+int usage __P((void));
+int version_check __P((const char *));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ const char *progname = "db_archive";
+ DB_ENV *dbenv;
+ u_int32_t flags;
+ int ch, e_close, exitval, ret, verbose;
+ char **file, *home, **list, *passwd;
+
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
+
+ flags = 0;
+ e_close = exitval = verbose = 0;
+ home = passwd = NULL;
+ while ((ch = getopt(argc, argv, "ah:lP:sVv")) != EOF)
+ switch (ch) {
+ case 'a':
+ LF_SET(DB_ARCH_ABS);
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'l':
+ LF_SET(DB_ARCH_LOG);
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 's':
+ LF_SET(DB_ARCH_DATA);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ return (usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (verbose)
+ (void)dbenv->set_verbose(dbenv, DB_VERB_CHKPOINT, 1);
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+ /*
+ * If attaching to a pre-existing environment fails, create a
+ * private one and try again.
+ */
+ if ((ret = dbenv->open(dbenv,
+ home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home, DB_CREATE |
+ DB_INIT_LOG | DB_INIT_TXN | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ /* Get the list of names. */
+ if ((ret = dbenv->log_archive(dbenv, &list, flags)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->log_archive");
+ goto shutdown;
+ }
+
+ /* Print the list of names. */
+ if (list != NULL) {
+ for (file = list; *file != NULL; ++file)
+ printf("%s\n", *file);
+ free(list);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+usage()
+{
+ (void)fprintf(stderr,
+ "usage: db_archive [-alsVv] [-h home] [-P password]\n");
+ return (EXIT_FAILURE);
+}
+
+int
+version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/libdb/db_checkpoint/db_checkpoint.c b/libdb/db_checkpoint/db_checkpoint.c
new file mode 100644
index 0000000..7ae7d7e
--- /dev/null
+++ b/libdb/db_checkpoint/db_checkpoint.c
@@ -0,0 +1,243 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+
+int main __P((int, char *[]));
+int usage __P((void));
+int version_check __P((const char *));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DB_ENV *dbenv;
+ const char *progname = "db_checkpoint";
+ time_t now;
+ long argval;
+ u_int32_t flags, kbytes, minutes, seconds;
+ int ch, e_close, exitval, once, ret, verbose;
+ char *home, *logfile, *passwd;
+
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
+
+ /*
+ * !!!
+ * Don't allow a fully unsigned 32-bit number, some compilers get
+ * upset and require it to be specified in hexadecimal and so on.
+ */
+#define MAX_UINT32_T 2147483647
+
+ kbytes = minutes = 0;
+ e_close = exitval = once = verbose = 0;
+ flags = 0;
+ home = logfile = passwd = NULL;
+ while ((ch = getopt(argc, argv, "1h:k:L:P:p:Vv")) != EOF)
+ switch (ch) {
+ case '1':
+ once = 1;
+ flags = DB_FORCE;
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'k':
+ if (__db_getlong(NULL, progname,
+ optarg, 1, (long)MAX_UINT32_T, &argval))
+ return (EXIT_FAILURE);
+ kbytes = argval;
+ break;
+ case 'L':
+ logfile = optarg;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'p':
+ if (__db_getlong(NULL, progname,
+ optarg, 1, (long)MAX_UINT32_T, &argval))
+ return (EXIT_FAILURE);
+ minutes = argval;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ return (usage());
+
+ if (once == 0 && kbytes == 0 && minutes == 0) {
+ (void)fprintf(stderr,
+ "%s: at least one of -1, -k and -p must be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /* Log our process ID. */
+ if (logfile != NULL && __db_util_logset(progname, logfile))
+ goto shutdown;
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+ /* Initialize the environment. */
+ if ((ret = dbenv->open(dbenv,
+ home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ /* Register the standard pgin/pgout functions, in case we do I/O. */
+ if ((ret = dbenv->memp_register(
+ dbenv, DB_FTYPE_SET, __db_pgin, __db_pgout)) != 0) {
+ dbenv->err(dbenv, ret,
+ "DB_ENV->memp_register: failed to register access method functions");
+ goto shutdown;
+ }
+
+ /*
+ * If we have only a time delay, then we'll sleep the right amount
+ * to wake up when a checkpoint is necessary. If we have a "kbytes"
+ * field set, then we'll check every 30 seconds.
+ */
+ seconds = kbytes != 0 ? 30 : minutes * 60;
+ while (!__db_util_interrupted()) {
+ if (verbose) {
+ (void)time(&now);
+ dbenv->errx(dbenv, "checkpoint: %s", ctime(&now));
+ }
+
+ if ((ret = dbenv->txn_checkpoint(dbenv,
+ kbytes, minutes, flags)) != 0) {
+ dbenv->err(dbenv, ret, "txn_checkpoint");
+ goto shutdown;
+ }
+
+ if (once)
+ break;
+
+ (void)__os_sleep(dbenv, seconds, 0);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ /* Clean up the logfile. */
+ if (logfile != NULL)
+ remove(logfile);
+
+ /* Clean up the environment. */
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+usage()
+{
+ (void)fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_checkpoint [-1Vv]",
+ "[-h home] [-k kbytes] [-L file] [-P password] [-p min]");
+ return (EXIT_FAILURE);
+}
+
+int
+version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/libdb/db_deadlock/db_deadlock.c b/libdb/db_deadlock/db_deadlock.c
new file mode 100644
index 0000000..71faf14
--- /dev/null
+++ b/libdb/db_deadlock/db_deadlock.c
@@ -0,0 +1,234 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+int main __P((int, char *[]));
+int usage __P((void));
+int version_check __P((const char *));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ const char *progname = "db_deadlock";
+ DB_ENV *dbenv;
+ u_int32_t atype;
+ time_t now;
+ long secs, usecs;
+ int ch, e_close, exitval, ret, verbose;
+ char *home, *logfile, *str;
+
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
+
+ atype = DB_LOCK_DEFAULT;
+ home = logfile = NULL;
+ secs = usecs = 0;
+ e_close = exitval = verbose = 0;
+ while ((ch = getopt(argc, argv, "a:h:L:t:Vvw")) != EOF)
+ switch (ch) {
+ case 'a':
+ switch (optarg[0]) {
+ case 'e':
+ atype = DB_LOCK_EXPIRE;
+ break;
+ case 'm':
+ atype = DB_LOCK_MAXLOCKS;
+ break;
+ case 'n':
+ atype = DB_LOCK_MINLOCKS;
+ break;
+ case 'o':
+ atype = DB_LOCK_OLDEST;
+ break;
+ case 'w':
+ atype = DB_LOCK_MINWRITE;
+ break;
+ case 'y':
+ atype = DB_LOCK_YOUNGEST;
+ break;
+ default:
+ return (usage());
+ /* NOTREACHED */
+ }
+ if (optarg[1] != '\0')
+ return (usage());
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'L':
+ logfile = optarg;
+ break;
+ case 't':
+ if ((str = strchr(optarg, '.')) != NULL) {
+ *str++ = '\0';
+ if (*str != '\0' && __db_getlong(
+ NULL, progname, str, 0, LONG_MAX, &usecs))
+ return (EXIT_FAILURE);
+ }
+ if (*optarg != '\0' && __db_getlong(
+ NULL, progname, optarg, 0, LONG_MAX, &secs))
+ return (EXIT_FAILURE);
+ if (secs == 0 && usecs == 0)
+ return (usage());
+
+ break;
+
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ verbose = 1;
+ break;
+ case 'w': /* Undocumented. */
+ /* Detect every 100ms (100000 us) when polling. */
+ secs = 0;
+ usecs = 100000;
+ break;
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ return (usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /* Log our process ID. */
+ if (logfile != NULL && __db_util_logset(progname, logfile))
+ goto shutdown;
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (verbose) {
+ (void)dbenv->set_verbose(dbenv, DB_VERB_DEADLOCK, 1);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_WAITSFOR, 1);
+ }
+
+ /* An environment is required. */
+ if ((ret = dbenv->open(dbenv, home,
+ DB_JOINENV | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ while (!__db_util_interrupted()) {
+ if (verbose) {
+ (void)time(&now);
+ dbenv->errx(dbenv, "running at %.24s", ctime(&now));
+ }
+
+ if ((ret = dbenv->lock_detect(dbenv, 0, atype, NULL)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->lock_detect");
+ goto shutdown;
+ }
+
+ /* Make a pass every "secs" secs and "usecs" usecs. */
+ if (secs == 0 && usecs == 0)
+ break;
+ (void)__os_sleep(dbenv, secs, usecs);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ /* Clean up the logfile. */
+ if (logfile != NULL)
+ remove(logfile);
+
+ /* Clean up the environment. */
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+usage()
+{
+ (void)fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_deadlock [-Vv]",
+ "[-a e | m | n | o | w | y] [-h home] [-L file] [-t sec.usec]");
+ return (EXIT_FAILURE);
+}
+
+int
+version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/libdb/db_dump/db_dump.c b/libdb/db_dump/db_dump.c
new file mode 100644
index 0000000..7f0d783
--- /dev/null
+++ b/libdb/db_dump/db_dump.c
@@ -0,0 +1,611 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+
+int db_init __P((DB_ENV *, char *, int, u_int32_t, int *));
+int dump __P((DB *, int, int));
+int dump_sub __P((DB_ENV *, DB *, char *, int, int));
+int is_sub __P((DB *, int *));
+int main __P((int, char *[]));
+int show_subs __P((DB *));
+int usage __P((void));
+int version_check __P((const char *));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ const char *progname = "db_dump";
+ DB_ENV *dbenv;
+ DB *dbp;
+ u_int32_t cache;
+ int ch, d_close;
+ int e_close, exitval, keyflag, lflag, nflag, pflag, private;
+ int ret, Rflag, rflag, resize, subs;
+ char *dopt, *home, *passwd, *subname;
+
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
+
+ dbp = NULL;
+ d_close = e_close = exitval = lflag = nflag = pflag = rflag = Rflag = 0;
+ keyflag = 0;
+ cache = MEGABYTE;
+ private = 0;
+ dopt = home = passwd = subname = NULL;
+ while ((ch = getopt(argc, argv, "d:f:h:klNpP:rRs:V")) != EOF)
+ switch (ch) {
+ case 'd':
+ dopt = optarg;
+ break;
+ case 'f':
+ if (freopen(optarg, "w", stdout) == NULL) {
+ fprintf(stderr, "%s: %s: reopen: %s\n",
+ progname, optarg, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'k':
+ keyflag = 1;
+ break;
+ case 'l':
+ lflag = 1;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'p':
+ pflag = 1;
+ break;
+ case 's':
+ subname = optarg;
+ break;
+ case 'R':
+ Rflag = 1;
+ /* DB_AGGRESSIVE requires DB_SALVAGE */
+ /* FALLTHROUGH */
+ case 'r':
+ rflag = 1;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 1)
+ return (usage());
+
+ if (dopt != NULL && pflag) {
+ fprintf(stderr,
+ "%s: the -d and -p options may not both be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+ if (lflag && subname != NULL) {
+ fprintf(stderr,
+ "%s: the -l and -s options may not both be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+
+ if (keyflag && rflag) {
+ fprintf(stderr, "%s: %s",
+ "the -k and -r or -R options may not both be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+
+ if (subname != NULL && rflag) {
+ fprintf(stderr, "%s: %s",
+ "the -s and -r or R options may not both be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+retry: if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto err;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto err;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto err;
+ }
+ }
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto err;
+ }
+
+ /* Initialize the environment. */
+ if (db_init(dbenv, home, rflag, cache, &private) != 0)
+ goto err;
+
+ /* Create the DB object and open the file. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto err;
+ }
+ d_close = 1;
+
+ /*
+ * If we're salvaging, don't do an open; it might not be safe.
+ * Dispatch now into the salvager.
+ */
+ if (rflag) {
+ if ((ret = dbp->verify(dbp, argv[0], NULL, stdout,
+ DB_SALVAGE |
+ (Rflag ? DB_AGGRESSIVE : 0) |
+ (pflag ? DB_PRINTABLE : 0))) != 0)
+ goto err;
+ exitval = 0;
+ goto done;
+ }
+
+ if ((ret = dbp->open(dbp, NULL,
+ argv[0], subname, DB_UNKNOWN, DB_RDONLY, 0)) != 0) {
+ dbp->err(dbp, ret, "open: %s", argv[0]);
+ goto err;
+ }
+ if (private != 0) {
+ if ((ret = __db_util_cache(dbenv, dbp, &cache, &resize)) != 0)
+ goto err;
+ if (resize) {
+ (void)dbp->close(dbp, 0);
+ d_close = 0;
+
+ (void)dbenv->close(dbenv, 0);
+ e_close = 0;
+ goto retry;
+ }
+ }
+
+ if (dopt != NULL) {
+ if (__db_dump(dbp, dopt, NULL)) {
+ dbp->err(dbp, ret, "__db_dump: %s", argv[0]);
+ goto err;
+ }
+ } else if (lflag) {
+ if (is_sub(dbp, &subs))
+ goto err;
+ if (subs == 0) {
+ dbp->errx(dbp,
+ "%s: does not contain multiple databases", argv[0]);
+ goto err;
+ }
+ if (show_subs(dbp))
+ goto err;
+ } else {
+ subs = 0;
+ if (subname == NULL && is_sub(dbp, &subs))
+ goto err;
+ if (subs) {
+ if (dump_sub(dbenv, dbp, argv[0], pflag, keyflag))
+ goto err;
+ } else
+ if (__db_prheader(dbp, NULL, pflag, keyflag, stdout,
+ __db_verify_callback, NULL, 0) ||
+ dump(dbp, pflag, keyflag))
+ goto err;
+ }
+
+ if (0) {
+err: exitval = 1;
+ }
+done: if (d_close && (ret = dbp->close(dbp, 0)) != 0) {
+ exitval = 1;
+ dbenv->err(dbenv, ret, "close");
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_init(dbenv, home, is_salvage, cache, is_privatep)
+ DB_ENV *dbenv;
+ char *home;
+ int is_salvage;
+ u_int32_t cache;
+ int *is_privatep;
+{
+ int ret;
+
+ /*
+ * Try and use the underlying environment when opening a database.
+ * We wish to use the buffer pool so our information is as up-to-date
+ * as possible, even if the mpool cache hasn't been flushed.
+ *
+ * If we are not doing a salvage, we wish to use the DB_JOINENV flag;
+ * if a locking system is present, this will let us use it and be
+ * safe to run concurrently with other threads of control. (We never
+ * need to use transactions explicitly, as we're read-only.) Note
+ * that in CDB, too, this will configure our environment
+ * appropriately, and our cursors will (correctly) do locking as CDB
+ * read cursors.
+ *
+ * If we are doing a salvage, the verification code will protest
+ * if we initialize transactions, logging, or locking; do an
+ * explicit DB_INIT_MPOOL to try to join any existing environment
+ * before we create our own.
+ */
+ *is_privatep = 0;
+ if (dbenv->open(dbenv, home,
+ DB_USE_ENVIRON | (is_salvage ? DB_INIT_MPOOL : DB_JOINENV), 0) == 0)
+ return (0);
+
+ /*
+ * An environment is required because we may be trying to look at
+ * databases in directories other than the current one. We could
+ * avoid using an environment iff the -h option wasn't specified,
+ * but that seems like more work than it's worth.
+ *
+ * No environment exists (or, at least no environment that includes
+ * an mpool region exists). Create one, but make it private so that
+ * no files are actually created.
+ */
+ *is_privatep = 1;
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) == 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) == 0)
+ return (0);
+
+ /* An environment is required. */
+ dbenv->err(dbenv, ret, "open");
+ return (1);
+}
+
+/*
+ * is_sub --
+ * Return if the database contains subdatabases.
+ */
+int
+is_sub(dbp, yesno)
+ DB *dbp;
+ int *yesno;
+{
+ DB_BTREE_STAT *btsp;
+ DB_HASH_STAT *hsp;
+ int ret;
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = dbp->stat(dbp, &btsp, DB_FAST_STAT)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (ret);
+ }
+ *yesno = btsp->bt_metaflags & BTM_SUBDB ? 1 : 0;
+ free(btsp);
+ break;
+ case DB_HASH:
+ if ((ret = dbp->stat(dbp, &hsp, DB_FAST_STAT)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (ret);
+ }
+ *yesno = hsp->hash_metaflags & DB_HASH_SUBDB ? 1 : 0;
+ free(hsp);
+ break;
+ case DB_QUEUE:
+ break;
+ default:
+ dbp->errx(dbp, "unknown database type");
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * dump_sub --
+ * Dump out the records for a DB containing subdatabases.
+ */
+int
+dump_sub(dbenv, parent_dbp, parent_name, pflag, keyflag)
+ DB_ENV *dbenv;
+ DB *parent_dbp;
+ char *parent_name;
+ int pflag, keyflag;
+{
+ DB *dbp;
+ DBC *dbcp;
+ DBT key, data;
+ int ret;
+ char *subdb;
+
+ /*
+ * Get a cursor and step through the database, dumping out each
+ * subdatabase.
+ */
+ if ((ret = parent_dbp->cursor(parent_dbp, NULL, &dbcp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->cursor");
+ return (1);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0) {
+ /* Nul terminate the subdatabase name. */
+ if ((subdb = malloc(key.size + 1)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ return (1);
+ }
+ memcpy(subdb, key.data, key.size);
+ subdb[key.size] = '\0';
+
+ /* Create the DB object and open the file. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ free(subdb);
+ return (1);
+ }
+ if ((ret = dbp->open(dbp, NULL,
+ parent_name, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0)
+ dbp->err(dbp, ret,
+ "DB->open: %s:%s", parent_name, subdb);
+ if (ret == 0 &&
+ (__db_prheader(dbp, subdb, pflag, keyflag, stdout,
+ __db_verify_callback, NULL, 0) ||
+ dump(dbp, pflag, keyflag)))
+ ret = 1;
+ (void)dbp->close(dbp, 0);
+ free(subdb);
+ if (ret != 0)
+ return (1);
+ }
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ return (1);
+ }
+
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ return (1);
+ }
+
+ return (0);
+}
+
+/*
+ * show_subs --
+ * Display the subdatabases for a database.
+ */
+int
+show_subs(dbp)
+ DB *dbp;
+{
+ DBC *dbcp;
+ DBT key, data;
+ int ret;
+
+ /*
+ * Get a cursor and step through the database, printing out the key
+ * of each key/data pair.
+ */
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->cursor");
+ return (1);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0) {
+ if ((ret = __db_prdbt(&key, 1, NULL, stdout,
+ __db_verify_callback, 0, NULL)) != 0) {
+ dbp->errx(dbp, NULL);
+ return (1);
+ }
+ }
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ return (1);
+ }
+
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * dump --
+ * Dump out the records for a DB.
+ */
+int
+dump(dbp, pflag, keyflag)
+ DB *dbp;
+ int pflag, keyflag;
+{
+ DBC *dbcp;
+ DBT key, data;
+ DBT keyret, dataret;
+ db_recno_t recno;
+ int is_recno, failed, ret;
+ void *pointer;
+
+ /*
+ * Get a cursor and step through the database, printing out each
+ * key/data pair.
+ */
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->cursor");
+ return (1);
+ }
+
+ failed = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ data.data = malloc(1024 * 1024);
+ if (data.data == NULL) {
+ dbp->err(dbp, ENOMEM, "bulk get buffer");
+ failed = 1;
+ goto err;
+ }
+ data.ulen = 1024 * 1024;
+ data.flags = DB_DBT_USERMEM;
+ is_recno = (dbp->type == DB_RECNO || dbp->type == DB_QUEUE);
+ keyflag = is_recno ? keyflag : 1;
+ if (is_recno) {
+ keyret.data = &recno;
+ keyret.size = sizeof(recno);
+ }
+
+retry:
+ while ((ret =
+ dbcp->c_get(dbcp, &key, &data, DB_NEXT | DB_MULTIPLE_KEY)) == 0) {
+ DB_MULTIPLE_INIT(pointer, &data);
+ for (;;) {
+ if (is_recno)
+ DB_MULTIPLE_RECNO_NEXT(pointer, &data,
+ recno, dataret.data, dataret.size);
+ else
+ DB_MULTIPLE_KEY_NEXT(pointer,
+ &data, keyret.data,
+ keyret.size, dataret.data, dataret.size);
+
+ if (dataret.data == NULL)
+ break;
+
+ if ((keyflag && (ret = __db_prdbt(&keyret,
+ pflag, " ", stdout, __db_verify_callback,
+ is_recno, NULL)) != 0) || (ret =
+ __db_prdbt(&dataret, pflag, " ", stdout,
+ __db_verify_callback, 0, NULL)) != 0) {
+ dbp->errx(dbp, NULL);
+ failed = 1;
+ goto err;
+ }
+ }
+ }
+ if (ret == ENOMEM) {
+ data.data = realloc(data.data, data.size);
+ if (data.data == NULL) {
+ dbp->err(dbp, ENOMEM, "bulk get buffer");
+ failed = 1;
+ goto err;
+ }
+ data.ulen = data.size;
+ goto retry;
+ }
+
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ failed = 1;
+ }
+
+err: if (data.data != NULL)
+ free(data.data);
+
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ failed = 1;
+ }
+
+ (void)__db_prfooter(stdout, __db_verify_callback);
+ return (failed);
+}
+
+/*
+ * usage --
+ * Display the usage message.
+ */
+int
+usage()
+{
+ (void)fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_dump [-klNprRV]",
+ "[-d ahr] [-f output] [-h home] [-P password] [-s database] db_file");
+ return (EXIT_FAILURE);
+}
+
+int
+version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/libdb/db_dump185/db_dump185.c b/libdb/db_dump185/db_dump185.c
new file mode 100644
index 0000000..7ae3288
--- /dev/null
+++ b/libdb/db_dump185/db_dump185.c
@@ -0,0 +1,355 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#ifndef lint
+static char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static char revid[] =
+ "$Id$";
+#endif
+
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <db.h>
+
+/* Hash Table Information */
+typedef struct hashhdr185 { /* Disk resident portion */
+ int magic; /* Magic NO for hash tables */
+ int version; /* Version ID */
+ u_int32_t lorder; /* Byte Order */
+ int bsize; /* Bucket/Page Size */
+ int bshift; /* Bucket shift */
+ int dsize; /* Directory Size */
+ int ssize; /* Segment Size */
+ int sshift; /* Segment shift */
+ int ovfl_point; /* Where overflow pages are being
+ * allocated */
+ int last_freed; /* Last overflow page freed */
+ int max_bucket; /* ID of Maximum bucket in use */
+ int high_mask; /* Mask to modulo into entire table */
+ int low_mask; /* Mask to modulo into lower half of
+ * table */
+ int ffactor; /* Fill factor */
+ int nkeys; /* Number of keys in hash table */
+} HASHHDR185;
+typedef struct htab185 { /* Memory resident data structure */
+ HASHHDR185 hdr; /* Header */
+} HTAB185;
+
+/* Hash Table Information */
+typedef struct hashhdr186 { /* Disk resident portion */
+ int32_t magic; /* Magic NO for hash tables */
+ int32_t version; /* Version ID */
+ int32_t lorder; /* Byte Order */
+ int32_t bsize; /* Bucket/Page Size */
+ int32_t bshift; /* Bucket shift */
+ int32_t ovfl_point; /* Where overflow pages are being allocated */
+ int32_t last_freed; /* Last overflow page freed */
+ int32_t max_bucket; /* ID of Maximum bucket in use */
+ int32_t high_mask; /* Mask to modulo into entire table */
+ int32_t low_mask; /* Mask to modulo into lower half of table */
+ int32_t ffactor; /* Fill factor */
+ int32_t nkeys; /* Number of keys in hash table */
+ int32_t hdrpages; /* Size of table header */
+ int32_t h_charkey; /* value of hash(CHARKEY) */
+#define NCACHED 32 /* number of bit maps and spare points */
+ int32_t spares[NCACHED];/* spare pages for overflow */
+ /* address of overflow page bitmaps */
+ u_int16_t bitmaps[NCACHED];
+} HASHHDR186;
+typedef struct htab186 { /* Memory resident data structure */
+ void *unused[2];
+ HASHHDR186 hdr; /* Header */
+} HTAB186;
+
+typedef struct _epgno {
+ u_int32_t pgno; /* the page number */
+ u_int16_t index; /* the index on the page */
+} EPGNO;
+
+typedef struct _epg {
+ void *page; /* the (pinned) page */
+ u_int16_t index; /* the index on the page */
+} EPG;
+
+typedef struct _cursor {
+ EPGNO pg; /* B: Saved tree reference. */
+ DBT key; /* B: Saved key, or key.data == NULL. */
+ u_int32_t rcursor; /* R: recno cursor (1-based) */
+
+#define CURS_ACQUIRE 0x01 /* B: Cursor needs to be reacquired. */
+#define CURS_AFTER 0x02 /* B: Unreturned cursor after key. */
+#define CURS_BEFORE 0x04 /* B: Unreturned cursor before key. */
+#define CURS_INIT 0x08 /* RB: Cursor initialized. */
+ u_int8_t flags;
+} CURSOR;
+
+/* The in-memory btree/recno data structure. */
+typedef struct _btree {
+ void *bt_mp; /* memory pool cookie */
+
+ void *bt_dbp; /* pointer to enclosing DB */
+
+ EPG bt_cur; /* current (pinned) page */
+ void *bt_pinned; /* page pinned across calls */
+
+ CURSOR bt_cursor; /* cursor */
+
+ EPGNO bt_stack[50]; /* stack of parent pages */
+ EPGNO *bt_sp; /* current stack pointer */
+
+ DBT bt_rkey; /* returned key */
+ DBT bt_rdata; /* returned data */
+
+ int bt_fd; /* tree file descriptor */
+
+ u_int32_t bt_free; /* next free page */
+ u_int32_t bt_psize; /* page size */
+ u_int16_t bt_ovflsize; /* cut-off for key/data overflow */
+ int bt_lorder; /* byte order */
+ /* sorted order */
+ enum { NOT, BACK, FORWARD } bt_order;
+ EPGNO bt_last; /* last insert */
+
+ /* B: key comparison function */
+ int (*bt_cmp) __P((DBT *, DBT *));
+ /* B: prefix comparison function */
+ size_t (*bt_pfx) __P((DBT *, DBT *));
+ /* R: recno input function */
+ int (*bt_irec) __P((struct _btree *, u_int32_t));
+
+ FILE *bt_rfp; /* R: record FILE pointer */
+ int bt_rfd; /* R: record file descriptor */
+
+ void *bt_cmap; /* R: current point in mapped space */
+ void *bt_smap; /* R: start of mapped space */
+ void *bt_emap; /* R: end of mapped space */
+ size_t bt_msize; /* R: size of mapped region. */
+
+ u_int32_t bt_nrecs; /* R: number of records */
+ size_t bt_reclen; /* R: fixed record length */
+ u_char bt_bval; /* R: delimiting byte/pad character */
+
+/*
+ * NB:
+ * B_NODUPS and R_RECNO are stored on disk, and may not be changed.
+ */
+#define B_INMEM 0x00001 /* in-memory tree */
+#define B_METADIRTY 0x00002 /* need to write metadata */
+#define B_MODIFIED 0x00004 /* tree modified */
+#define B_NEEDSWAP 0x00008 /* if byte order requires swapping */
+#define B_RDONLY 0x00010 /* read-only tree */
+
+#define B_NODUPS 0x00020 /* no duplicate keys permitted */
+#define R_RECNO 0x00080 /* record oriented tree */
+
+#define R_CLOSEFP 0x00040 /* opened a file pointer */
+#define R_EOF 0x00100 /* end of input file reached. */
+#define R_FIXLEN 0x00200 /* fixed length records */
+#define R_MEMMAPPED 0x00400 /* memory mapped file. */
+#define R_INMEM 0x00800 /* in-memory file */
+#define R_MODIFIED 0x01000 /* modified file */
+#define R_RDONLY 0x02000 /* read-only file */
+
+#define B_DB_LOCK 0x04000 /* DB_LOCK specified. */
+#define B_DB_SHMEM 0x08000 /* DB_SHMEM specified. */
+#define B_DB_TXN 0x10000 /* DB_TXN specified. */
+ u_int32_t flags;
+} BTREE;
+
+void db_btree __P((DB *, int));
+void db_hash __P((DB *, int));
+void dbt_dump __P((DBT *));
+void dbt_print __P((DBT *));
+int main __P((int, char *[]));
+int usage __P((void));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DB *dbp;
+ DBT key, data;
+ int ch, pflag, rval;
+
+ pflag = 0;
+ while ((ch = getopt(argc, argv, "f:p")) != EOF)
+ switch (ch) {
+ case 'f':
+ if (freopen(optarg, "w", stdout) == NULL) {
+ fprintf(stderr, "db_dump185: %s: %s\n",
+ optarg, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'p':
+ pflag = 1;
+ break;
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 1)
+ return (usage());
+
+ if ((dbp = dbopen(argv[0], O_RDONLY, 0, DB_BTREE, NULL)) == NULL) {
+ if ((dbp =
+ dbopen(argv[0], O_RDONLY, 0, DB_HASH, NULL)) == NULL) {
+ fprintf(stderr,
+ "db_dump185: %s: %s\n", argv[0], strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ db_hash(dbp, pflag);
+ } else
+ db_btree(dbp, pflag);
+
+ /*
+ * !!!
+ * DB 1.85 DBTs are a subset of DB 2.0 DBTs, so we just use the
+ * new dump/print routines.
+ */
+ if (pflag)
+ while (!(rval = dbp->seq(dbp, &key, &data, R_NEXT))) {
+ dbt_print(&key);
+ dbt_print(&data);
+ }
+ else
+ while (!(rval = dbp->seq(dbp, &key, &data, R_NEXT))) {
+ dbt_dump(&key);
+ dbt_dump(&data);
+ }
+
+ if (rval == -1) {
+ fprintf(stderr, "db_dump185: seq: %s\n", strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ return (EXIT_SUCCESS);
+}
+
+/*
+ * db_hash --
+ * Dump out hash header information.
+ */
+void
+db_hash(dbp, pflag)
+ DB *dbp;
+ int pflag;
+{
+ HTAB185 *hash185p;
+ HTAB186 *hash186p;
+
+ printf("format=%s\n", pflag ? "print" : "bytevalue");
+ printf("type=hash\n");
+
+ /* DB 1.85 was version 2, DB 1.86 was version 3. */
+ hash185p = dbp->internal;
+ if (hash185p->hdr.version > 2) {
+ hash186p = dbp->internal;
+ printf("h_ffactor=%lu\n", (u_long)hash186p->hdr.ffactor);
+ if (hash186p->hdr.lorder != 0)
+ printf("db_lorder=%lu\n", (u_long)hash186p->hdr.lorder);
+ printf("db_pagesize=%lu\n", (u_long)hash186p->hdr.bsize);
+ } else {
+ printf("h_ffactor=%lu\n", (u_long)hash185p->hdr.ffactor);
+ if (hash185p->hdr.lorder != 0)
+ printf("db_lorder=%lu\n", (u_long)hash185p->hdr.lorder);
+ printf("db_pagesize=%lu\n", (u_long)hash185p->hdr.bsize);
+ }
+ printf("HEADER=END\n");
+}
+
+/*
+ * db_btree --
+ * Dump out btree header information.
+ */
+void
+db_btree(dbp, pflag)
+ DB *dbp;
+ int pflag;
+{
+ BTREE *btp;
+
+ btp = dbp->internal;
+
+ printf("format=%s\n", pflag ? "print" : "bytevalue");
+ printf("type=btree\n");
+#ifdef NOT_AVAILABLE_IN_185
+ printf("bt_minkey=%lu\n", (u_long)XXX);
+ printf("bt_maxkey=%lu\n", (u_long)XXX);
+#endif
+ if (btp->bt_lorder != 0)
+ printf("db_lorder=%lu\n", (u_long)btp->bt_lorder);
+ printf("db_pagesize=%lu\n", (u_long)btp->bt_psize);
+ if (!(btp->flags & B_NODUPS))
+ printf("duplicates=1\n");
+ printf("HEADER=END\n");
+}
+
+static char hex[] = "0123456789abcdef";
+
+/*
+ * dbt_dump --
+ * Write out a key or data item using byte values.
+ */
+void
+dbt_dump(dbtp)
+ DBT *dbtp;
+{
+ size_t len;
+ u_int8_t *p;
+
+ for (len = dbtp->size, p = dbtp->data; len--; ++p)
+ (void)printf("%c%c",
+ hex[(*p & 0xf0) >> 4], hex[*p & 0x0f]);
+ printf("\n");
+}
+
+/*
+ * dbt_print --
+ * Write out a key or data item using printable characters.
+ */
+void
+dbt_print(dbtp)
+ DBT *dbtp;
+{
+ size_t len;
+ u_int8_t *p;
+
+ for (len = dbtp->size, p = dbtp->data; len--; ++p)
+ if (isprint((int)*p)) {
+ if (*p == '\\')
+ (void)printf("\\");
+ (void)printf("%c", *p);
+ } else
+ (void)printf("\\%c%c",
+ hex[(*p & 0xf0) >> 4], hex[*p & 0x0f]);
+ printf("\n");
+}
+
+/*
+ * usage --
+ * Display the usage message.
+ */
+int
+usage()
+{
+ (void)fprintf(stderr, "usage: db_dump185 [-p] [-f file] db_file\n");
+ return (EXIT_FAILURE);
+}
diff --git a/libdb/db_load/db_load.c b/libdb/db_load/db_load.c
new file mode 100644
index 0000000..eaa8845
--- /dev/null
+++ b/libdb/db_load/db_load.c
@@ -0,0 +1,1232 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+
+typedef struct { /* XXX: Globals. */
+ const char *progname; /* Program name. */
+ char *hdrbuf; /* Input file header. */
+ u_long lineno; /* Input file line number. */
+ u_long origline; /* Original file line number. */
+ int endodata; /* Reached the end of a database. */
+ int endofile; /* Reached the end of the input. */
+ int version; /* Input version. */
+ char *home; /* Env home. */
+ char *passwd; /* Env passwd. */
+ int private; /* Private env. */
+ u_int32_t cache; /* Env cache size. */
+} LDG;
+
+void badend __P((DB_ENV *));
+void badnum __P((DB_ENV *));
+int configure __P((DB_ENV *, DB *, char **, char **, int *));
+int convprintable __P((DB_ENV *, char *, char **));
+int db_init __P((DB_ENV *, char *, u_int32_t, int *));
+int dbt_rdump __P((DB_ENV *, DBT *));
+int dbt_rprint __P((DB_ENV *, DBT *));
+int dbt_rrecno __P((DB_ENV *, DBT *, int));
+int digitize __P((DB_ENV *, int, int *));
+int env_create __P((DB_ENV **, LDG *));
+int load __P((DB_ENV *, char *, DBTYPE, char **, u_int, LDG *, int *));
+int main __P((int, char *[]));
+int rheader __P((DB_ENV *, DB *, DBTYPE *, char **, int *, int *));
+int usage __P((void));
+int version_check __P((const char *));
+
+#define G(f) ((LDG *)dbenv->app_private)->f
+
+ /* Flags to the load function. */
+#define LDF_NOHEADER 0x01 /* No dump header. */
+#define LDF_NOOVERWRITE 0x02 /* Don't overwrite existing rows. */
+#define LDF_PASSWORD 0x04 /* Encrypt created databases. */
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DBTYPE dbtype;
+ DB_ENV *dbenv;
+ LDG ldg;
+ u_int32_t ldf;
+ int ch, existed, exitval, ret;
+ char **clist, **clp;
+
+ ldg.progname = "db_load";
+ ldg.lineno = 0;
+ ldg.endodata = ldg.endofile = 0;
+ ldg.version = 1;
+ ldg.cache = MEGABYTE;
+ ldg.hdrbuf = NULL;
+ ldg.home = NULL;
+ ldg.passwd = NULL;
+
+ if ((ret = version_check(ldg.progname)) != 0)
+ return (ret);
+
+ ldf = 0;
+ exitval = 0;
+ dbtype = DB_UNKNOWN;
+
+ /* Allocate enough room for configuration arguments. */
+ if ((clp = clist = (char **)calloc(argc + 1, sizeof(char *))) == NULL) {
+ fprintf(stderr, "%s: %s\n", ldg.progname, strerror(ENOMEM));
+ return (EXIT_FAILURE);
+ }
+
+ while ((ch = getopt(argc, argv, "c:f:h:nP:Tt:V")) != EOF)
+ switch (ch) {
+ case 'c':
+ *clp++ = optarg;
+ break;
+ case 'f':
+ if (freopen(optarg, "r", stdin) == NULL) {
+ fprintf(stderr, "%s: %s: reopen: %s\n",
+ ldg.progname, optarg, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'h':
+ ldg.home = optarg;
+ break;
+ case 'n':
+ ldf |= LDF_NOOVERWRITE;
+ break;
+ case 'P':
+ ldg.passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (ldg.passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ ldg.progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ ldf |= LDF_PASSWORD;
+ break;
+ case 'T':
+ ldf |= LDF_NOHEADER;
+ break;
+ case 't':
+ if (strcmp(optarg, "btree") == 0) {
+ dbtype = DB_BTREE;
+ break;
+ }
+ if (strcmp(optarg, "hash") == 0) {
+ dbtype = DB_HASH;
+ break;
+ }
+ if (strcmp(optarg, "recno") == 0) {
+ dbtype = DB_RECNO;
+ break;
+ }
+ if (strcmp(optarg, "queue") == 0) {
+ dbtype = DB_QUEUE;
+ break;
+ }
+ return (usage());
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 1)
+ return (usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object initialized for error reporting, and
+ * then open it.
+ */
+ if (env_create(&dbenv, &ldg) != 0)
+ goto shutdown;
+
+ while (!ldg.endofile)
+ if (load(dbenv, argv[0], dbtype, clist, ldf,
+ &ldg, &existed) != 0)
+ goto shutdown;
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", ldg.progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+ free(clist);
+
+ /*
+ * Return 0 on success, 1 if keys existed already, and 2 on failure.
+ *
+ * Technically, this is wrong, because exit of anything other than
+ * 0 is implementation-defined by the ANSI C standard. I don't see
+ * any good solutions that don't involve API changes.
+ */
+ return (exitval == 0 ? (existed == 0 ? 0 : 1) : 2);
+}
+
+/*
+ * load --
+ * Load a database.
+ */
+int
+load(dbenv, name, argtype, clist, flags, ldg, existedp)
+ DB_ENV *dbenv;
+ char *name, **clist;
+ DBTYPE argtype;
+ u_int flags;
+ LDG *ldg;
+ int *existedp;
+{
+ DB *dbp;
+ DBT key, rkey, data, *readp, *writep;
+ DBTYPE dbtype;
+ DB_TXN *ctxn, *txn;
+ db_recno_t recno, datarecno;
+ u_int32_t put_flags;
+ int ascii_recno, checkprint, hexkeys, keyflag, keys, resize, ret, rval;
+ char *subdb;
+
+ *existedp = 0;
+
+ put_flags = LF_ISSET(LDF_NOOVERWRITE) ? DB_NOOVERWRITE : 0;
+ G(endodata) = 0;
+
+ subdb = NULL;
+ ctxn = txn = NULL;
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ memset(&rkey, 0, sizeof(DBT));
+
+retry_db:
+ /* Create the DB object. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto err;
+ }
+
+ dbtype = DB_UNKNOWN;
+ keys = -1;
+ hexkeys = -1;
+ keyflag = -1;
+ /* Read the header -- if there's no header, we expect flat text. */
+ if (LF_ISSET(LDF_NOHEADER)) {
+ checkprint = 1;
+ dbtype = argtype;
+ } else {
+ if (rheader(dbenv,
+ dbp, &dbtype, &subdb, &checkprint, &keys) != 0)
+ goto err;
+ if (G(endofile))
+ goto done;
+ }
+
+ /*
+ * Apply command-line configuration changes. (We apply command-line
+ * configuration changes to all databases that are loaded, e.g., all
+ * subdatabases.)
+ */
+ if (configure(dbenv, dbp, clist, &subdb, &keyflag))
+ goto err;
+
+ if (keys != 1) {
+ if (keyflag == 1) {
+ dbp->err(dbp, EINVAL, "No keys specified in file");
+ goto err;
+ }
+ }
+ else if (keyflag == 0) {
+ dbp->err(dbp, EINVAL, "Keys specified in file");
+ goto err;
+ }
+ else
+ keyflag = 1;
+
+ if (dbtype == DB_BTREE || dbtype == DB_HASH) {
+ if (keyflag == 0)
+ dbp->err(dbp,
+ EINVAL, "Btree and Hash must specify keys");
+ else
+ keyflag = 1;
+ }
+
+ if (argtype != DB_UNKNOWN) {
+
+ if (dbtype == DB_RECNO || dbtype == DB_QUEUE)
+ if (keyflag != 1 && argtype != DB_RECNO &&
+ argtype != DB_QUEUE) {
+ dbenv->errx(dbenv,
+ "improper database type conversion specified");
+ goto err;
+ }
+ dbtype = argtype;
+ }
+
+ if (dbtype == DB_UNKNOWN) {
+ dbenv->errx(dbenv, "no database type specified");
+ goto err;
+ }
+
+ if (keyflag == -1)
+ keyflag = 0;
+
+ /*
+ * Recno keys have only been printed in hexadecimal starting
+ * with db_dump format version 3 (DB 3.2).
+ *
+ * !!!
+ * Note that version is set in rheader(), which must be called before
+ * this assignment.
+ */
+ hexkeys = (G(version) >= 3 && keyflag == 1 && checkprint == 0);
+
+ if (keyflag == 1 && (dbtype == DB_RECNO || dbtype == DB_QUEUE))
+ ascii_recno = 1;
+ else
+ ascii_recno = 0;
+
+ /* If configured with a password, encrypt databases we create. */
+ if (LF_ISSET(LDF_PASSWORD) &&
+ (ret = dbp->set_flags(dbp, DB_ENCRYPT)) != 0) {
+ dbp->err(dbp, ret, "DB->set_flags: DB_ENCRYPT");
+ goto err;
+ }
+
+ /* Open the DB file. */
+ if ((ret = dbp->open(dbp, NULL, name, subdb, dbtype,
+ DB_CREATE | (TXN_ON(dbenv) ? DB_AUTO_COMMIT : 0),
+ __db_omode("rwrwrw"))) != 0) {
+ dbp->err(dbp, ret, "DB->open: %s", name);
+ goto err;
+ }
+ if (ldg->private != 0) {
+ if ((ret =
+ __db_util_cache(dbenv, dbp, &ldg->cache, &resize)) != 0)
+ goto err;
+ if (resize) {
+ dbp->close(dbp, 0);
+ dbp = NULL;
+ dbenv->close(dbenv, 0);
+ if ((ret = env_create(&dbenv, ldg)) != 0)
+ goto err;
+ goto retry_db;
+ }
+ }
+
+ /* Initialize the key/data pair. */
+ readp = &key;
+ writep = &key;
+ if (dbtype == DB_RECNO || dbtype == DB_QUEUE) {
+ key.size = sizeof(recno);
+ if (keyflag) {
+ key.data = &datarecno;
+ if (checkprint) {
+ readp = &rkey;
+ goto key_data;
+ }
+ }
+ else
+ key.data = &recno;
+ } else
+key_data: if ((readp->data =
+ (void *)malloc(readp->ulen = 1024)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ goto err;
+ }
+ if ((data.data = (void *)malloc(data.ulen = 1024)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ goto err;
+ }
+
+ if (TXN_ON(dbenv) &&
+ (ret = dbenv->txn_begin(dbenv, NULL, &txn, 0)) != 0)
+ goto err;
+
+ /* Get each key/data pair and add them to the database. */
+ for (recno = 1; !__db_util_interrupted(); ++recno) {
+ if (!keyflag)
+ if (checkprint) {
+ if (dbt_rprint(dbenv, &data))
+ goto err;
+ } else {
+ if (dbt_rdump(dbenv, &data))
+ goto err;
+ }
+ else
+ if (checkprint) {
+ if (dbt_rprint(dbenv, readp))
+ goto err;
+ if (!G(endodata) && dbt_rprint(dbenv, &data))
+ goto fmt;
+ } else {
+ if (ascii_recno) {
+ if (dbt_rrecno(dbenv, readp, hexkeys))
+ goto err;
+ } else
+ if (dbt_rdump(dbenv, readp))
+ goto err;
+ if (!G(endodata) && dbt_rdump(dbenv, &data)) {
+fmt: dbenv->errx(dbenv,
+ "odd number of key/data pairs");
+ goto err;
+ }
+ }
+ if (G(endodata))
+ break;
+ if (readp != writep) {
+ if (sscanf(readp->data, "%ud", &datarecno) != 1)
+ dbenv->errx(dbenv,
+ "%s: non-integer key at line: %d",
+ name, !keyflag ? recno : recno * 2 - 1);
+ if (datarecno == 0)
+ dbenv->errx(dbenv, "%s: zero key at line: %d",
+ name,
+ !keyflag ? recno : recno * 2 - 1);
+ }
+retry: if (txn != NULL)
+ if ((ret = dbenv->txn_begin(dbenv, txn, &ctxn, 0)) != 0)
+ goto err;
+ switch (ret = dbp->put(dbp, ctxn, writep, &data, put_flags)) {
+ case 0:
+ if (ctxn != NULL) {
+ if ((ret =
+ ctxn->commit(ctxn, DB_TXN_NOSYNC)) != 0)
+ goto err;
+ ctxn = NULL;
+ }
+ break;
+ case DB_KEYEXIST:
+ *existedp = 1;
+ dbenv->errx(dbenv,
+ "%s: line %d: key already exists, not loaded:",
+ name,
+ !keyflag ? recno : recno * 2 - 1);
+
+ (void)__db_prdbt(&key, checkprint, 0, stderr,
+ __db_verify_callback, 0, NULL);
+ break;
+ case DB_LOCK_DEADLOCK:
+ /* If we have a child txn, retry--else it's fatal. */
+ if (ctxn != NULL) {
+ if ((ret = ctxn->abort(ctxn)) != 0)
+ goto err;
+ ctxn = NULL;
+ goto retry;
+ }
+ /* FALLTHROUGH */
+ default:
+ dbenv->err(dbenv, ret, NULL);
+ if (ctxn != NULL) {
+ (void)ctxn->abort(ctxn);
+ ctxn = NULL;
+ }
+ goto err;
+ }
+ if (ctxn != NULL) {
+ if ((ret = ctxn->abort(ctxn)) != 0)
+ goto err;
+ ctxn = NULL;
+ }
+ }
+done: rval = 0;
+ DB_ASSERT(ctxn == NULL);
+ if (txn != NULL && (ret = txn->commit(txn, 0)) != 0) {
+ txn = NULL;
+ goto err;
+ }
+
+ if (0) {
+err: rval = 1;
+ DB_ASSERT(ctxn == NULL);
+ if (txn != NULL)
+ (void)txn->abort(txn);
+ }
+
+ /* Close the database. */
+ if (dbp != NULL && (ret = dbp->close(dbp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->close");
+ rval = 1;
+ }
+
+ if (G(hdrbuf) != NULL)
+ free(G(hdrbuf));
+ G(hdrbuf) = NULL;
+ /* Free allocated memory. */
+ if (subdb != NULL)
+ free(subdb);
+ if (dbtype != DB_RECNO && dbtype != DB_QUEUE)
+ free(key.data);
+ if (rkey.data != NULL)
+ free(rkey.data);
+ free(data.data);
+
+ return (rval);
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_init(dbenv, home, cache, is_private)
+ DB_ENV *dbenv;
+ char *home;
+ u_int32_t cache;
+ int *is_private;
+{
+ u_int32_t flags;
+ int ret;
+
+ *is_private = 0;
+ /* We may be loading into a live environment. Try and join. */
+ flags = DB_USE_ENVIRON |
+ DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN;
+ if (dbenv->open(dbenv, home, flags, 0) == 0)
+ return (0);
+
+ /*
+ * We're trying to load a database.
+ *
+ * An environment is required because we may be trying to look at
+ * databases in directories other than the current one. We could
+ * avoid using an environment iff the -h option wasn't specified,
+ * but that seems like more work than it's worth.
+ *
+ * No environment exists (or, at least no environment that includes
+ * an mpool region exists). Create one, but make it private so that
+ * no files are actually created.
+ */
+ LF_CLR(DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN);
+ LF_SET(DB_CREATE | DB_PRIVATE);
+ *is_private = 1;
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ return (1);
+ }
+ if ((ret = dbenv->open(dbenv, home, flags, 0)) == 0)
+ return (0);
+
+ /* An environment is required. */
+ dbenv->err(dbenv, ret, "DB_ENV->open");
+ return (1);
+}
+
+#define FLAG(name, value, keyword, flag) \
+ if (strcmp(name, keyword) == 0) { \
+ switch (*value) { \
+ case '1': \
+ if ((ret = dbp->set_flags(dbp, flag)) != 0) { \
+ dbp->err(dbp, ret, "%s: set_flags: %s", \
+ G(progname), name); \
+ return (1); \
+ } \
+ break; \
+ case '0': \
+ break; \
+ default: \
+ badnum(dbenv); \
+ return (1); \
+ } \
+ continue; \
+ }
+#define NUMBER(name, value, keyword, func) \
+ if (strcmp(name, keyword) == 0) { \
+ if (__db_getlong(dbp, \
+ NULL, value, 1, LONG_MAX, &val) != 0) \
+ return (1); \
+ if ((ret = dbp->func(dbp, val)) != 0) \
+ goto nameerr; \
+ continue; \
+ }
+#define STRING(name, value, keyword, func) \
+ if (strcmp(name, keyword) == 0) { \
+ if ((ret = dbp->func(dbp, value[0])) != 0) \
+ goto nameerr; \
+ continue; \
+ }
+
+/*
+ * configure --
+ * Handle command-line configuration options.
+ */
+int
+configure(dbenv, dbp, clp, subdbp, keysp)
+ DB_ENV *dbenv;
+ DB *dbp;
+ char **clp, **subdbp;
+ int *keysp;
+{
+ long val;
+ int ret, savech;
+ char *name, *value;
+
+ for (; (name = *clp) != NULL; *--value = savech, ++clp) {
+ if ((value = strchr(name, '=')) == NULL) {
+ dbp->errx(dbp,
+ "command-line configuration uses name=value format");
+ return (1);
+ }
+ savech = *value;
+ *value++ = '\0';
+
+ if (strcmp(name, "database") == 0 ||
+ strcmp(name, "subdatabase") == 0) {
+ if (*subdbp != NULL)
+ free(*subdbp);
+ if ((*subdbp = strdup(value)) == NULL) {
+ dbp->err(dbp, ENOMEM, NULL);
+ return (1);
+ }
+ continue;
+ }
+ if (strcmp(name, "keys") == 0) {
+ if (strcmp(value, "1") == 0)
+ *keysp = 1;
+ else if (strcmp(value, "0") == 0)
+ *keysp = 0;
+ else {
+ badnum(dbenv);
+ return (1);
+ }
+ continue;
+ }
+
+#ifdef notyet
+ NUMBER(name, value, "bt_maxkey", set_bt_maxkey);
+#endif
+ NUMBER(name, value, "bt_minkey", set_bt_minkey);
+ NUMBER(name, value, "db_lorder", set_lorder);
+ NUMBER(name, value, "db_pagesize", set_pagesize);
+ FLAG(name, value, "chksum", DB_CHKSUM_SHA1);
+ FLAG(name, value, "duplicates", DB_DUP);
+ FLAG(name, value, "dupsort", DB_DUPSORT);
+ NUMBER(name, value, "h_ffactor", set_h_ffactor);
+ NUMBER(name, value, "h_nelem", set_h_nelem);
+ NUMBER(name, value, "re_len", set_re_len);
+ STRING(name, value, "re_pad", set_re_pad);
+ FLAG(name, value, "recnum", DB_RECNUM);
+ FLAG(name, value, "renumber", DB_RENUMBER);
+
+ dbp->errx(dbp,
+ "unknown command-line configuration keyword \"%s\"", name);
+ return (1);
+ }
+ return (0);
+
+nameerr:
+ dbp->err(dbp, ret, "%s: %s=%s", G(progname), name, value);
+ return (1);
+}
+
+/*
+ * rheader --
+ * Read the header message.
+ */
+int
+rheader(dbenv, dbp, dbtypep, subdbp, checkprintp, keysp)
+ DB_ENV *dbenv;
+ DB *dbp;
+ DBTYPE *dbtypep;
+ char **subdbp;
+ int *checkprintp, *keysp;
+{
+ long val;
+ int ch, first, hdr, linelen, buflen, ret, start;
+ char *buf, *name, *p, *value;
+
+ *dbtypep = DB_UNKNOWN;
+ *checkprintp = 0;
+ name = p = NULL;
+
+ /*
+ * We start with a smallish buffer; most headers are small.
+ * We may need to realloc it for a large subdatabase name.
+ */
+ buflen = 4096;
+ if (G(hdrbuf) == NULL) {
+ hdr = 0;
+ if ((buf = (char *)malloc(buflen)) == NULL) {
+memerr: dbp->errx(dbp, "could not allocate buffer %d", buflen);
+ return (1);
+ }
+ G(hdrbuf) = buf;
+ G(origline) = G(lineno);
+ } else {
+ hdr = 1;
+ buf = G(hdrbuf);
+ G(lineno) = G(origline);
+ }
+
+ start = 0;
+ for (first = 1;; first = 0) {
+ ++G(lineno);
+
+ /* Read a line, which may be of arbitrary length, into buf. */
+ linelen = 0;
+ buf = &G(hdrbuf)[start];
+ if (hdr == 0) {
+ for (;;) {
+ if ((ch = getchar()) == EOF) {
+ if (!first || ferror(stdin))
+ goto badfmt;
+ G(endofile) = 1;
+ break;
+ }
+
+ if (ch == '\n')
+ break;
+
+ buf[linelen++] = ch;
+
+ /* If the buffer is too small, double it. */
+ if (linelen + start == buflen) {
+ G(hdrbuf) = (char *)realloc(G(hdrbuf),
+ buflen *= 2);
+ if (G(hdrbuf) == NULL)
+ goto memerr;
+ buf = &G(hdrbuf)[start];
+ }
+ }
+ if (G(endofile) == 1)
+ break;
+ buf[linelen++] = '\0';
+ } else
+ linelen = strlen(buf) + 1;
+ start += linelen;
+
+ if (name != NULL) {
+ *p = '=';
+ free(name);
+ name = NULL;
+ }
+ /* If we don't see the expected information, it's an error. */
+ if ((name = strdup(buf)) == NULL)
+ goto memerr;
+ if ((p = strchr(name, '=')) == NULL)
+ goto badfmt;
+ *p++ = '\0';
+
+ value = p--;
+
+ if (name[0] == '\0' || value[0] == '\0')
+ goto badfmt;
+
+ if (strcmp(name, "HEADER") == 0)
+ break;
+ if (strcmp(name, "VERSION") == 0) {
+ /*
+ * Version 1 didn't have a "VERSION" header line. We
+ * only support versions 1, 2, and 3 of the dump format.
+ */
+ G(version) = atoi(value);
+
+ if (G(version) > 3) {
+ dbp->errx(dbp,
+ "line %lu: VERSION %d is unsupported",
+ G(lineno), G(version));
+ goto err;
+ }
+ continue;
+ }
+ if (strcmp(name, "format") == 0) {
+ if (strcmp(value, "bytevalue") == 0) {
+ *checkprintp = 0;
+ continue;
+ }
+ if (strcmp(value, "print") == 0) {
+ *checkprintp = 1;
+ continue;
+ }
+ goto badfmt;
+ }
+ if (strcmp(name, "type") == 0) {
+ if (strcmp(value, "btree") == 0) {
+ *dbtypep = DB_BTREE;
+ continue;
+ }
+ if (strcmp(value, "hash") == 0) {
+ *dbtypep = DB_HASH;
+ continue;
+ }
+ if (strcmp(value, "recno") == 0) {
+ *dbtypep = DB_RECNO;
+ continue;
+ }
+ if (strcmp(value, "queue") == 0) {
+ *dbtypep = DB_QUEUE;
+ continue;
+ }
+ dbp->errx(dbp, "line %lu: unknown type", G(lineno));
+ goto err;
+ }
+ if (strcmp(name, "database") == 0 ||
+ strcmp(name, "subdatabase") == 0) {
+ if ((ret = convprintable(dbenv, value, subdbp)) != 0) {
+ dbp->err(dbp, ret, "error reading db name");
+ goto err;
+ }
+ continue;
+ }
+ if (strcmp(name, "keys") == 0) {
+ if (strcmp(value, "1") == 0)
+ *keysp = 1;
+ else if (strcmp(value, "0") == 0)
+ *keysp = 0;
+ else {
+ badnum(dbenv);
+ goto err;
+ }
+ continue;
+ }
+
+#ifdef notyet
+ NUMBER(name, value, "bt_maxkey", set_bt_maxkey);
+#endif
+ NUMBER(name, value, "bt_minkey", set_bt_minkey);
+ NUMBER(name, value, "db_lorder", set_lorder);
+ NUMBER(name, value, "db_pagesize", set_pagesize);
+ NUMBER(name, value, "extentsize", set_q_extentsize);
+ FLAG(name, value, "chksum", DB_CHKSUM_SHA1);
+ FLAG(name, value, "duplicates", DB_DUP);
+ FLAG(name, value, "dupsort", DB_DUPSORT);
+ NUMBER(name, value, "h_ffactor", set_h_ffactor);
+ NUMBER(name, value, "h_nelem", set_h_nelem);
+ NUMBER(name, value, "re_len", set_re_len);
+ STRING(name, value, "re_pad", set_re_pad);
+ FLAG(name, value, "recnum", DB_RECNUM);
+ FLAG(name, value, "renumber", DB_RENUMBER);
+
+ dbp->errx(dbp,
+ "unknown input-file header configuration keyword \"%s\"",
+ name);
+ goto err;
+ }
+ ret = 0;
+ if (0) {
+nameerr:
+ dbp->err(dbp, ret, "%s: %s=%s", G(progname), name, value);
+ ret = 1;
+ }
+ if (0)
+err: ret = 1;
+ if (0) {
+badfmt:
+ dbp->errx(dbp, "line %lu: unexpected format", G(lineno));
+ ret = 1;
+ }
+ if (name != NULL) {
+ *p = '=';
+ free(name);
+ }
+ return (ret);
+}
+
+/*
+ * convprintable --
+ * Convert a printable-encoded string into a newly allocated string.
+ *
+ * In an ideal world, this would probably share code with dbt_rprint, but
+ * that's set up to read character-by-character (to avoid large memory
+ * allocations that aren't likely to be a problem here), and this has fewer
+ * special cases to deal with.
+ *
+ * Note that despite the printable encoding, the char * interface to this
+ * function (which is, not coincidentally, also used for database naming)
+ * means that outstr cannot contain any nuls.
+ */
+int
+convprintable(dbenv, instr, outstrp)
+ DB_ENV *dbenv;
+ char *instr, **outstrp;
+{
+ char c, *outstr;
+ int e1, e2;
+
+ /*
+ * Just malloc a string big enough for the whole input string;
+ * the output string will be smaller (or of equal length).
+ */
+ if ((outstr = (char *)malloc(strlen(instr))) == NULL)
+ return (ENOMEM);
+
+ *outstrp = outstr;
+
+ e1 = e2 = 0;
+ for ( ; *instr != '\0'; instr++)
+ if (*instr == '\\') {
+ if (*++instr == '\\') {
+ *outstr++ = '\\';
+ continue;
+ }
+ c = digitize(dbenv, *instr, &e1) << 4;
+ c |= digitize(dbenv, *++instr, &e2);
+ if (e1 || e2) {
+ badend(dbenv);
+ return (EINVAL);
+ }
+
+ *outstr++ = c;
+ } else
+ *outstr++ = *instr;
+
+ *outstr = '\0';
+
+ return (0);
+}
+
+/*
+ * dbt_rprint --
+ * Read a printable line into a DBT structure.
+ */
+int
+dbt_rprint(dbenv, dbtp)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+{
+ u_int32_t len;
+ u_int8_t *p;
+ int c1, c2, e, escape, first;
+ char buf[32];
+
+ ++G(lineno);
+
+ first = 1;
+ e = escape = 0;
+ for (p = dbtp->data, len = 0; (c1 = getchar()) != '\n';) {
+ if (c1 == EOF) {
+ if (len == 0) {
+ G(endofile) = G(endodata) = 1;
+ return (0);
+ }
+ badend(dbenv);
+ return (1);
+ }
+ if (first) {
+ first = 0;
+ if (G(version) > 1) {
+ if (c1 != ' ') {
+ buf[0] = c1;
+ if (fgets(buf + 1,
+ sizeof(buf) - 1, stdin) == NULL ||
+ strcmp(buf, "DATA=END\n") != 0) {
+ badend(dbenv);
+ return (1);
+ }
+ G(endodata) = 1;
+ return (0);
+ }
+ continue;
+ }
+ }
+ if (escape) {
+ if (c1 != '\\') {
+ if ((c2 = getchar()) == EOF) {
+ badend(dbenv);
+ return (1);
+ }
+ c1 = digitize(dbenv,
+ c1, &e) << 4 | digitize(dbenv, c2, &e);
+ if (e)
+ return (1);
+ }
+ escape = 0;
+ } else
+ if (c1 == '\\') {
+ escape = 1;
+ continue;
+ }
+ if (len >= dbtp->ulen - 10) {
+ dbtp->ulen *= 2;
+ if ((dbtp->data =
+ (void *)realloc(dbtp->data, dbtp->ulen)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ return (1);
+ }
+ p = (u_int8_t *)dbtp->data + len;
+ }
+ ++len;
+ *p++ = c1;
+ }
+ dbtp->size = len;
+
+ return (0);
+}
+
+/*
+ * dbt_rdump --
+ * Read a byte dump line into a DBT structure.
+ */
+int
+dbt_rdump(dbenv, dbtp)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+{
+ u_int32_t len;
+ u_int8_t *p;
+ int c1, c2, e, first;
+ char buf[32];
+
+ ++G(lineno);
+
+ first = 1;
+ e = 0;
+ for (p = dbtp->data, len = 0; (c1 = getchar()) != '\n';) {
+ if (c1 == EOF) {
+ if (len == 0) {
+ G(endofile) = G(endodata) = 1;
+ return (0);
+ }
+ badend(dbenv);
+ return (1);
+ }
+ if (first) {
+ first = 0;
+ if (G(version) > 1) {
+ if (c1 != ' ') {
+ buf[0] = c1;
+ if (fgets(buf + 1,
+ sizeof(buf) - 1, stdin) == NULL ||
+ strcmp(buf, "DATA=END\n") != 0) {
+ badend(dbenv);
+ return (1);
+ }
+ G(endodata) = 1;
+ return (0);
+ }
+ continue;
+ }
+ }
+ if ((c2 = getchar()) == EOF) {
+ badend(dbenv);
+ return (1);
+ }
+ if (len >= dbtp->ulen - 10) {
+ dbtp->ulen *= 2;
+ if ((dbtp->data =
+ (void *)realloc(dbtp->data, dbtp->ulen)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ return (1);
+ }
+ p = (u_int8_t *)dbtp->data + len;
+ }
+ ++len;
+ *p++ = digitize(dbenv, c1, &e) << 4 | digitize(dbenv, c2, &e);
+ if (e)
+ return (1);
+ }
+ dbtp->size = len;
+
+ return (0);
+}
+
+/*
+ * dbt_rrecno --
+ * Read a record number dump line into a DBT structure.
+ */
+int
+dbt_rrecno(dbenv, dbtp, ishex)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ int ishex;
+{
+ char buf[32], *p, *q;
+
+ ++G(lineno);
+
+ if (fgets(buf, sizeof(buf), stdin) == NULL) {
+ G(endofile) = G(endodata) = 1;
+ return (0);
+ }
+
+ if (strcmp(buf, "DATA=END\n") == 0) {
+ G(endodata) = 1;
+ return (0);
+ }
+
+ if (buf[0] != ' ')
+ goto bad;
+
+ /*
+ * If we're expecting a hex key, do an in-place conversion
+ * of hex to straight ASCII before calling __db_getulong().
+ */
+ if (ishex) {
+ for (p = q = buf + 1; *q != '\0' && *q != '\n';) {
+ /*
+ * 0-9 in hex are 0x30-0x39, so this is easy.
+ * We should alternate between 3's and [0-9], and
+ * if the [0-9] are something unexpected,
+ * __db_getulong will fail, so we only need to catch
+ * end-of-string conditions.
+ */
+ if (*q++ != '3')
+ goto bad;
+ if (*q == '\n' || *q == '\0')
+ goto bad;
+ *p++ = *q++;
+ }
+ *p = '\0';
+ }
+
+ if (__db_getulong(NULL,
+ G(progname), buf + 1, 0, 0, (u_long *)dbtp->data)) {
+bad: badend(dbenv);
+ return (1);
+ }
+
+ dbtp->size = sizeof(db_recno_t);
+ return (0);
+}
+
+/*
+ * digitize --
+ * Convert a character to an integer.
+ */
+int
+digitize(dbenv, c, errorp)
+ DB_ENV *dbenv;
+ int c, *errorp;
+{
+ switch (c) { /* Don't depend on ASCII ordering. */
+ case '0': return (0);
+ case '1': return (1);
+ case '2': return (2);
+ case '3': return (3);
+ case '4': return (4);
+ case '5': return (5);
+ case '6': return (6);
+ case '7': return (7);
+ case '8': return (8);
+ case '9': return (9);
+ case 'a': return (10);
+ case 'b': return (11);
+ case 'c': return (12);
+ case 'd': return (13);
+ case 'e': return (14);
+ case 'f': return (15);
+ }
+
+ dbenv->errx(dbenv, "unexpected hexadecimal value");
+ *errorp = 1;
+
+ return (0);
+}
+
+/*
+ * badnum --
+ * Display the bad number message.
+ */
+void
+badnum(dbenv)
+ DB_ENV *dbenv;
+{
+ dbenv->errx(dbenv,
+ "boolean name=value pairs require a value of 0 or 1");
+}
+
+/*
+ * badend --
+ * Display the bad end to input message.
+ */
+void
+badend(dbenv)
+ DB_ENV *dbenv;
+{
+ dbenv->errx(dbenv, "unexpected end of input data or key/data pair");
+}
+
+/*
+ * usage --
+ * Display the usage message.
+ */
+int
+usage()
+{
+ (void)fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_load [-nTV] [-c name=value] [-f file]",
+ "[-h home] [-P password] [-t btree | hash | recno | queue] db_file");
+ return (EXIT_FAILURE);
+}
+
+int
+version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
+
+int
+env_create(dbenvp, ldg)
+ DB_ENV **dbenvp;
+ LDG *ldg;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ if ((ret = db_env_create(dbenvp, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", ldg->progname, db_strerror(ret));
+ return (ret);
+ }
+ dbenv = *dbenvp;
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, ldg->progname);
+ if (ldg->passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ ldg->passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ return (ret);
+ }
+ if ((ret = db_init(dbenv, ldg->home, ldg->cache, &ldg->private)) != 0)
+ return (ret);
+ dbenv->app_private = ldg;
+
+ return (0);
+}
diff --git a/libdb/db_printlog/README b/libdb/db_printlog/README
new file mode 100644
index 0000000..7f7d81a
--- /dev/null
+++ b/libdb/db_printlog/README
@@ -0,0 +1,34 @@
+# $Id$
+
+Berkeley DB log dump utility. This utility dumps out a DB log in human
+readable form, a record at a time, to assist in recovery and transaction
+abort debugging.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+commit.awk Output transaction ID of committed transactions.
+
+count.awk Print out the number of log records for transactions
+ that we encountered.
+
+dbname.awk Take a comma-separated list of database names and spit
+ out all the log records that affect those databases.
+
+fileid.awk Take a comma-separated list of file numbers and spit out
+ all the log records that affect those file numbers.
+
+logstat.awk Display log record count/size statistics.
+
+pgno.awk Take a comma-separated list of page numbers and spit
+ out all the log records that affect those page numbers.
+
+range.awk Print out a range of the log.
+
+rectype.awk Print out a range of the log -- command line should
+ set RECTYPE to the a comma separated list of the
+ rectypes (or partial strings of rectypes) sought.
+
+status.awk Read through db_printlog output and list the transactions
+ encountered, and whether they commited or aborted.
+
+txn.awk Print out all the records for a comma-separated list of
+ transaction IDs.
diff --git a/libdb/db_printlog/commit.awk b/libdb/db_printlog/commit.awk
new file mode 100644
index 0000000..569d238
--- /dev/null
+++ b/libdb/db_printlog/commit.awk
@@ -0,0 +1,7 @@
+# $Id$
+#
+# Output tid of committed transactions.
+
+/txn_regop/ {
+ print $5
+}
diff --git a/libdb/db_printlog/count.awk b/libdb/db_printlog/count.awk
new file mode 100644
index 0000000..a5e87cc
--- /dev/null
+++ b/libdb/db_printlog/count.awk
@@ -0,0 +1,9 @@
+# $Id$
+#
+# Print out the number of log records for transactions that we
+# encountered.
+
+/^\[/{
+ if ($5 != 0)
+ print $5
+}
diff --git a/libdb/db_printlog/db_printlog.c b/libdb/db_printlog/db_printlog.c
new file mode 100644
index 0000000..23d7d38
--- /dev/null
+++ b/libdb/db_printlog/db_printlog.c
@@ -0,0 +1,360 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/fop.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+int main __P((int, char *[]));
+int usage __P((void));
+int version_check __P((const char *));
+int print_app_record __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+int open_rep_db __P((DB_ENV *, DB **, DBC **));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ const char *progname = "db_printlog";
+ DB *dbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
+ int (**dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t dtabsize;
+ DBT data, keydbt;
+ DB_LSN key;
+ int ch, e_close, exitval, nflag, rflag, ret, repflag;
+ char *home, *passwd;
+
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
+
+ dbp = NULL;
+ dbc = NULL;
+ logc = NULL;
+ e_close = exitval = nflag = rflag = repflag = 0;
+ home = passwd = NULL;
+ dtabsize = 0;
+ dtab = NULL;
+ while ((ch = getopt(argc, argv, "h:NP:rRV")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'r':
+ rflag = 1;
+ break;
+ case 'R':
+ repflag = 1;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc > 0)
+ return (usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+
+ /*
+ * Set up an app-specific dispatch function so that we can gracefully
+ * handle app-specific log records.
+ */
+ if ((ret = dbenv->set_app_dispatch(dbenv, print_app_record)) != 0) {
+ dbenv->err(dbenv, ret, "app_dispatch");
+ goto shutdown;
+ }
+
+ /*
+ * An environment is required, but as all we're doing is reading log
+ * files, we create one if it doesn't already exist. If we create
+ * it, create it private so it automatically goes away when we're done.
+ * If we are reading the replication database, do not open the env
+ * with logging, because we don't want to log the opens.
+ */
+ if (repflag) {
+ if ((ret = dbenv->open(dbenv, home,
+ DB_INIT_MPOOL | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0))
+ != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+ } else if ((ret = dbenv->open(dbenv, home,
+ DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_LOG | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ /* Initialize print callbacks. */
+ if ((ret = __bam_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __dbreg_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __crdel_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __db_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __fop_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __qam_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __ham_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __txn_init_print(dbenv, &dtab, &dtabsize)) != 0) {
+ dbenv->err(dbenv, ret, "callback: initialization");
+ goto shutdown;
+ }
+
+ /* Allocate a log cursor. */
+ if (repflag) {
+ if ((ret = open_rep_db(dbenv, &dbp, &dbc)) != 0)
+ goto shutdown;
+ } else if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->log_cursor");
+ goto shutdown;
+ }
+
+ memset(&data, 0, sizeof(data));
+ memset(&keydbt, 0, sizeof(keydbt));
+ while (!__db_util_interrupted()) {
+ if (repflag) {
+ ret = dbc->c_get(dbc,
+ &keydbt, &data, rflag ? DB_PREV : DB_NEXT);
+ if (ret == 0)
+ key = ((REP_CONTROL *)keydbt.data)->lsn;
+ } else
+ ret = logc->get(logc,
+ &key, &data, rflag ? DB_PREV : DB_NEXT);
+ if (ret != 0) {
+ if (ret == DB_NOTFOUND)
+ break;
+ dbenv->err(dbenv,
+ ret, repflag ? "DB_LOGC->get" : "DBC->get");
+ goto shutdown;
+ }
+
+ ret = __db_dispatch(dbenv,
+ dtab, dtabsize, &data, &key, DB_TXN_PRINT, NULL);
+
+ /*
+ * XXX
+ * Just in case the underlying routines don't flush.
+ */
+ (void)fflush(stdout);
+
+ if (ret != 0) {
+ dbenv->err(dbenv, ret, "tx: dispatch");
+ goto shutdown;
+ }
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (logc != NULL && (ret = logc->close(logc, 0)) != 0)
+ exitval = 1;
+
+ if (dbc != NULL && (ret = dbc->c_close(dbc)) != 0)
+ exitval = 1;
+
+ if (dbp != NULL && (ret = dbp->close(dbp, 0)) != 0)
+ exitval = 1;
+
+ /*
+ * The dtab is allocated by __db_add_recovery (called by *_init_print)
+ * using the library malloc function (__os_malloc). It thus needs to be
+ * freed using the corresponding free (__os_free).
+ */
+ if (dtab != NULL)
+ __os_free(dbenv, dtab);
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+usage()
+{
+ fprintf(stderr, "%s\n",
+ "usage: db_printlog [-NrV] [-h home] [-P password]");
+ return (EXIT_FAILURE);
+}
+
+int
+version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
+
+/* Print an unknown, application-specific log record as best we can. */
+int
+print_app_record(dbenv, dbt, lsnp, op)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ DB_LSN *lsnp;
+ db_recops op;
+{
+ int ch;
+ u_int32_t i, rectype;
+
+ DB_ASSERT(op == DB_TXN_PRINT);
+ COMPQUIET(dbenv, NULL);
+
+ /*
+ * Fetch the rectype, which always must be at the beginning of the
+ * record (if dispatching is to work at all).
+ */
+ memcpy(&rectype, dbt->data, sizeof(rectype));
+
+ /*
+ * Applications may wish to customize the output here based on the
+ * rectype. We just print the entire log record in the generic
+ * mixed-hex-and-printable format we use for binary data.
+ */
+ printf("[%lu][%lu]application specific record: rec: %lu\n",
+ (u_long)lsnp->file, (u_long)lsnp->offset, (u_long)rectype);
+ printf("\tdata: ");
+ for (i = 0; i < dbt->size; i++) {
+ ch = ((u_int8_t *)dbt->data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ printf("\n\n");
+
+ return (0);
+}
+
+int
+open_rep_db(dbenv, dbpp, dbcp)
+ DB_ENV *dbenv;
+ DB **dbpp;
+ DBC **dbcp;
+{
+ int ret;
+
+ DB *dbp;
+ *dbpp = NULL;
+ *dbcp = NULL;
+
+ if ((ret = db_create(dbpp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ return (ret);
+ }
+
+ dbp = *dbpp;
+ if ((ret =
+ dbp->open(dbp, NULL, "__db.rep.db", NULL, DB_BTREE, 0, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->open");
+ goto err;
+ }
+
+ if ((ret = dbp->cursor(dbp, NULL, dbcp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->cursor");
+ goto err;
+ }
+
+ return (0);
+
+err: if (*dbpp != NULL)
+ (void)(*dbpp)->close(*dbpp, 0);
+ return (ret);
+}
diff --git a/libdb/db_printlog/dbname.awk b/libdb/db_printlog/dbname.awk
new file mode 100644
index 0000000..064c50c
--- /dev/null
+++ b/libdb/db_printlog/dbname.awk
@@ -0,0 +1,79 @@
+# $Id$
+#
+# Take a comma-separated list of database names and spit out all the
+# log records that affect those databases.
+
+NR == 1 {
+ nfiles = 0
+ while ((ndx = index(DBNAME, ",")) != 0) {
+ filenames[nfiles] = substr(DBNAME, 1, ndx - 1) 0;
+ DBNAME = substr(DBNAME, ndx + 1, length(DBNAME) - ndx);
+ files[nfiles] = -1
+ nfiles++
+ }
+ filenames[nfiles] = DBNAME 0;
+ files[nfiles] = -1
+ myfile = -1;
+}
+
+/^\[.*dbreg_register/ {
+ register = 1;
+}
+/opcode:/ {
+ if (register == 1) {
+ if ($2 == 1)
+ register = 3;
+ else
+ register = $2;
+ }
+}
+/name:/ {
+ if (register >= 2) {
+ for (i = 0; i <= nfiles; i++) {
+ if ($2 == filenames[i]) {
+ if (register == 2) {
+ printme = 0;
+ myfile = -1;
+ files[i] = -1;
+ } else {
+ myfile = i;
+ }
+ break;
+ }
+ }
+ }
+ register = 0;
+}
+/fileid:/{
+ if (myfile != -1) {
+ files[myfile] = $2;
+ printme = 1;
+ register = 0;
+ myfile = -1;
+ } else
+ for (i = 0; i <= nfiles; i++)
+ if ($2 == files[i]) {
+ printme = 1
+ break;
+ }
+}
+
+/^\[/{
+ if (printme == 1) {
+ printf("%s\n", rec);
+ printme = 0
+ }
+ rec = "";
+
+ rec = $0
+}
+
+TXN == 1 && /txn_regop/ {printme = 1}
+/^ /{
+ rec = sprintf("%s\n%s", rec, $0);
+}
+
+END {
+ if (printme == 1)
+ printf("%s\n", rec);
+}
diff --git a/libdb/db_printlog/fileid.awk b/libdb/db_printlog/fileid.awk
new file mode 100644
index 0000000..fdad274
--- /dev/null
+++ b/libdb/db_printlog/fileid.awk
@@ -0,0 +1,37 @@
+# $Id$
+#
+# Take a comma-separated list of file numbers and spit out all the
+# log records that affect those file numbers.
+
+NR == 1 {
+ nfiles = 0
+ while ((ndx = index(FILEID, ",")) != 0) {
+ files[nfiles] = substr(FILEID, 1, ndx - 1);
+ FILEID = substr(FILEID, ndx + 1, length(FILEID) - ndx);
+ nfiles++
+ }
+ files[nfiles] = FILEID;
+}
+
+/^\[/{
+ if (printme == 1) {
+ printf("%s\n", rec);
+ printme = 0
+ }
+ rec = "";
+
+ rec = $0
+}
+/^ /{
+ rec = sprintf("%s\n%s", rec, $0);
+}
+/fileid/{
+ for (i = 0; i <= nfiles; i++)
+ if ($2 == files[i])
+ printme = 1
+}
+
+END {
+ if (printme == 1)
+ printf("%s\n", rec);
+}
diff --git a/libdb/db_printlog/logstat.awk b/libdb/db_printlog/logstat.awk
new file mode 100644
index 0000000..b22a3f1
--- /dev/null
+++ b/libdb/db_printlog/logstat.awk
@@ -0,0 +1,36 @@
+# $Id$
+#
+# Output accumulated log record count/size statistics.
+BEGIN {
+ l_file = 0;
+ l_offset = 0;
+}
+
+/^\[/{
+ gsub("[][: ]", " ", $1)
+ split($1, a)
+
+ if (a[1] == l_file) {
+ l[a[3]] += a[2] - l_offset
+ ++n[a[3]]
+ } else
+ ++s[a[3]]
+
+ l_file = a[1]
+ l_offset = a[2]
+}
+
+END {
+ # We can't figure out the size of the first record in each log file,
+ # use the average for other records we found as an estimate.
+ for (i in s)
+ if (s[i] != 0 && n[i] != 0) {
+ l[i] += s[i] * (l[i]/n[i])
+ n[i] += s[i]
+ delete s[i]
+ }
+ for (i in l)
+ printf "%s: %d (n: %d, avg: %.2f)\n", i, l[i], n[i], l[i]/n[i]
+ for (i in s)
+ printf "%s: unknown (n: %d, unknown)\n", i, s[i]
+}
diff --git a/libdb/db_printlog/pgno.awk b/libdb/db_printlog/pgno.awk
new file mode 100644
index 0000000..f5bbdfb
--- /dev/null
+++ b/libdb/db_printlog/pgno.awk
@@ -0,0 +1,47 @@
+# $Id$
+#
+# Take a comma-separated list of page numbers and spit out all the
+# log records that affect those page numbers.
+
+NR == 1 {
+ npages = 0
+ while ((ndx = index(PGNO, ",")) != 0) {
+ pgno[npages] = substr(PGNO, 1, ndx - 1);
+ PGNO = substr(PGNO, ndx + 1, length(PGNO) - ndx);
+ npages++
+ }
+ pgno[npages] = PGNO;
+}
+
+/^\[/{
+ if (printme == 1) {
+ printf("%s\n", rec);
+ printme = 0
+ }
+ rec = "";
+
+ rec = $0
+}
+/^ /{
+ rec = sprintf("%s\n%s", rec, $0);
+}
+/pgno/{
+ for (i = 0; i <= npages; i++)
+ if ($2 == pgno[i])
+ printme = 1
+}
+/right/{
+ for (i = 0; i <= npages; i++)
+ if ($2 == pgno[i])
+ printme = 1
+}
+/left/{
+ for (i = 0; i <= npages; i++)
+ if ($2 == pgno[i])
+ printme = 1
+}
+
+END {
+ if (printme == 1)
+ printf("%s\n", rec);
+}
diff --git a/libdb/db_printlog/range.awk b/libdb/db_printlog/range.awk
new file mode 100644
index 0000000..e2c0112
--- /dev/null
+++ b/libdb/db_printlog/range.awk
@@ -0,0 +1,27 @@
+# $Id$
+#
+# Print out a range of the log
+
+/^\[/{
+ l = length($1) - 1;
+ i = index($1, "]");
+ file = substr($1, 2, i - 2);
+ file += 0;
+ start = i + 2;
+ offset = substr($1, start, l - start + 1);
+ i = index(offset, "]");
+ offset = substr($1, start, i - 1);
+ offset += 0;
+
+ if ((file == START_FILE && offset >= START_OFFSET || file > START_FILE)\
+ && (file < END_FILE || (file == END_FILE && offset < END_OFFSET)))
+ printme = 1
+ else if (file == END_FILE && offset > END_OFFSET || file > END_FILE)
+ exit
+ else
+ printme = 0
+}
+{
+ if (printme == 1)
+ print $0
+}
diff --git a/libdb/db_printlog/rectype.awk b/libdb/db_printlog/rectype.awk
new file mode 100644
index 0000000..ebe46b1
--- /dev/null
+++ b/libdb/db_printlog/rectype.awk
@@ -0,0 +1,27 @@
+# $Id$
+#
+# Print out a range of the log
+# Command line should set RECTYPE to the a comma separated list
+# of the rectypes (or partial strings of rectypes) sought.
+NR == 1 {
+ ntypes = 0
+ while ((ndx = index(RECTYPE, ",")) != 0) {
+ types[ntypes] = substr(RECTYPE, 1, ndx - 1);
+ RECTYPE = substr(RECTYPE, ndx + 1, length(RECTYPE) - ndx);
+ ntypes++
+ }
+ types[ntypes] = RECTYPE;
+}
+
+/^\[/{
+ printme = 0
+ for (i = 0; i <= ntypes; i++)
+ if (index($1, types[i]) != 0) {
+ printme = 1
+ break;
+ }
+}
+{
+ if (printme == 1)
+ print $0
+}
diff --git a/libdb/db_printlog/status.awk b/libdb/db_printlog/status.awk
new file mode 100644
index 0000000..71376b4
--- /dev/null
+++ b/libdb/db_printlog/status.awk
@@ -0,0 +1,46 @@
+# $Id$
+#
+# Read through db_printlog output and list all the transactions encountered
+# and whether they commited or aborted.
+#
+# 1 = started
+# 2 = commited
+# 3 = explicitly aborted
+# 4 = other
+BEGIN {
+ cur_txn = 0
+}
+/^\[/{
+ in_regop = 0
+ if (status[$5] == 0) {
+ status[$5] = 1;
+ txns[cur_txn] = $5;
+ cur_txn++;
+ }
+}
+/txn_regop/ {
+ txnid = $5
+ in_regop = 1
+}
+/opcode:/ {
+ if (in_regop == 1) {
+ if ($2 == 1)
+ status[txnid] = 2
+ else if ($2 == 3)
+ status[txnid] = 3
+ else
+ status[txnid] = 4
+ }
+}
+END {
+ for (i = 0; i < cur_txn; i++) {
+ if (status[txns[i]] == 1)
+ printf("%s\tABORT\n", txns[i]);
+ if (status[txns[i]] == 2)
+ printf("%s\tCOMMIT\n", txns[i]);
+ if (status[txns[i]] == 3)
+ printf("%s\tABORT\n", txns[i]);
+ if (status[txns[i]] == 4)
+ printf("%s\tOTHER\n", txns[i]);
+ }
+}
diff --git a/libdb/db_printlog/txn.awk b/libdb/db_printlog/txn.awk
new file mode 100644
index 0000000..914db18
--- /dev/null
+++ b/libdb/db_printlog/txn.awk
@@ -0,0 +1,34 @@
+# $Id$
+#
+# Print out all the records for a comma-separated list of transaction ids.
+NR == 1 {
+ ntxns = 0
+ while ((ndx = index(TXN, ",")) != 0) {
+ txn[ntxns] = substr(TXN, 1, ndx - 1);
+ TXN = substr(TXN, ndx + 1, length(TXN) - ndx);
+ ntxns++
+ }
+ txn[ntxns] = TXN;
+}
+
+/^\[/{
+ if (printme == 1) {
+ printf("%s\n", rec);
+ printme = 0
+ }
+ rec = "";
+
+ for (i = 0; i <= ntxns; i++)
+ if (txn[i] == $5) {
+ rec = $0
+ printme = 1
+ }
+}
+/^ /{
+ rec = sprintf("%s\n%s", rec, $0);
+}
+
+END {
+ if (printme == 1)
+ printf("%s\n", rec);
+}
diff --git a/libdb/db_recover/db_recover.c b/libdb/db_recover/db_recover.c
new file mode 100644
index 0000000..6432f4e
--- /dev/null
+++ b/libdb/db_recover/db_recover.c
@@ -0,0 +1,313 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/txn.h"
+
+int main __P((int, char *[]));
+int read_timestamp __P((const char *, char *, time_t *));
+int usage __P((void));
+int version_check __P((const char *));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ const char *progname = "db_recover";
+ DB_ENV *dbenv;
+ DB_TXNREGION *region;
+ time_t now, timestamp;
+ u_int32_t flags;
+ int ch, exitval, fatal_recover, ret, retain_env, verbose;
+ char *home, *passwd;
+
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
+
+ home = passwd = NULL;
+ timestamp = 0;
+ exitval = fatal_recover = retain_env = verbose = 0;
+ while ((ch = getopt(argc, argv, "ceh:P:t:Vv")) != EOF)
+ switch (ch) {
+ case 'c':
+ fatal_recover = 1;
+ break;
+ case 'e':
+ retain_env = 1;
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 't':
+ if ((ret =
+ read_timestamp(progname, optarg, &timestamp)) != 0)
+ return (ret);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ return (usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ if (verbose) {
+ (void)dbenv->set_verbose(dbenv, DB_VERB_RECOVERY, 1);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_CHKPOINT, 1);
+ }
+ if (timestamp &&
+ (ret = dbenv->set_tx_timestamp(dbenv, &timestamp)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->set_timestamp");
+ goto shutdown;
+ }
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+
+ /*
+ * Initialize the environment -- we don't actually do anything
+ * else, that all that's needed to run recovery.
+ *
+ * Note that unless the caller specified the -e option, we use a
+ * private environment, as we're about to create a region, and we
+ * don't want to to leave it around. If we leave the region around,
+ * the application that should create it will simply join it instead,
+ * and will then be running with incorrectly sized (and probably
+ * terribly small) caches. Applications that use -e should almost
+ * certainly use DB_CONFIG files in the directory.
+ */
+ flags = 0;
+ LF_SET(DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_MPOOL | DB_INIT_TXN | DB_USE_ENVIRON);
+ LF_SET(fatal_recover ? DB_RECOVER_FATAL : DB_RECOVER);
+ LF_SET(retain_env ? 0 : DB_PRIVATE);
+ if ((ret = dbenv->open(dbenv, home, flags, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->open");
+ goto shutdown;
+ }
+
+ if (verbose) {
+ (void)time(&now);
+ region = ((DB_TXNMGR *)dbenv->tx_handle)->reginfo.primary;
+ dbenv->errx(dbenv, "Recovery complete at %.24s", ctime(&now));
+ dbenv->errx(dbenv, "%s %lx %s [%lu][%lu]",
+ "Maximum transaction id", (u_long)region->last_txnid,
+ "Recovery checkpoint", (u_long)region->last_ckp.file,
+ (u_long)region->last_ckp.offset);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ /* Clean up the environment. */
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+#define ATOI2(ar) ((ar)[0] - '0') * 10 + ((ar)[1] - '0'); (ar) += 2;
+
+/*
+ * read_timestamp --
+ * Convert a time argument to Epoch seconds.
+ *
+ * Copyright (c) 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+int
+read_timestamp(progname, arg, timep)
+ const char *progname;
+ char *arg;
+ time_t *timep;
+{
+ struct tm *t;
+ time_t now;
+ int yearset;
+ char *p;
+ /* Start with the current time. */
+ (void)time(&now);
+ if ((t = localtime(&now)) == NULL) {
+ fprintf(stderr,
+ "%s: localtime: %s\n", progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ /* [[CC]YY]MMDDhhmm[.SS] */
+ if ((p = strchr(arg, '.')) == NULL)
+ t->tm_sec = 0; /* Seconds defaults to 0. */
+ else {
+ if (strlen(p + 1) != 2)
+ goto terr;
+ *p++ = '\0';
+ t->tm_sec = ATOI2(p);
+ }
+
+ yearset = 0;
+ switch(strlen(arg)) {
+ case 12: /* CCYYMMDDhhmm */
+ t->tm_year = ATOI2(arg);
+ t->tm_year *= 100;
+ yearset = 1;
+ /* FALLTHROUGH */
+ case 10: /* YYMMDDhhmm */
+ if (yearset) {
+ yearset = ATOI2(arg);
+ t->tm_year += yearset;
+ } else {
+ yearset = ATOI2(arg);
+ if (yearset < 69)
+ t->tm_year = yearset + 2000;
+ else
+ t->tm_year = yearset + 1900;
+ }
+ t->tm_year -= 1900; /* Convert to UNIX time. */
+ /* FALLTHROUGH */
+ case 8: /* MMDDhhmm */
+ t->tm_mon = ATOI2(arg);
+ --t->tm_mon; /* Convert from 01-12 to 00-11 */
+ t->tm_mday = ATOI2(arg);
+ t->tm_hour = ATOI2(arg);
+ t->tm_min = ATOI2(arg);
+ break;
+ default:
+ goto terr;
+ }
+
+ t->tm_isdst = -1; /* Figure out DST. */
+
+ *timep = mktime(t);
+ if (*timep == -1) {
+terr: fprintf(stderr,
+ "%s: out of range or illegal time specification: [[CC]YY]MMDDhhmm[.SS]",
+ progname);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
+
+int
+usage()
+{
+ (void)fprintf(stderr, "%s\n",
+"usage: db_recover [-ceVv] [-h home] [-P password] [-t [[CC]YY]MMDDhhmm[.SS]]");
+ return (EXIT_FAILURE);
+}
+
+int
+version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/libdb/db_stat/db_stat.c b/libdb/db_stat/db_stat.c
new file mode 100644
index 0000000..4676e8a
--- /dev/null
+++ b/libdb/db_stat/db_stat.c
@@ -0,0 +1,1267 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+
+#define PCT(f, t, pgsize) \
+ ((t) == 0 ? 0 : \
+ (((double)(((t) * (pgsize)) - (f)) / ((t) * (pgsize))) * 100))
+
+typedef enum { T_NOTSET,
+ T_DB, T_ENV, T_LOCK, T_LOG, T_MPOOL, T_REP, T_TXN } test_t;
+
+int argcheck __P((char *, const char *));
+int btree_stats __P((DB_ENV *, DB *, DB_BTREE_STAT *, int));
+int db_init __P((DB_ENV *, char *, test_t, u_int32_t, int *));
+void dl __P((const char *, u_long));
+void dl_bytes __P((const char *, u_long, u_long, u_long));
+int env_stats __P((DB_ENV *, u_int32_t));
+int hash_stats __P((DB_ENV *, DB *, int));
+int lock_stats __P((DB_ENV *, char *, u_int32_t));
+int log_stats __P((DB_ENV *, u_int32_t));
+int main __P((int, char *[]));
+int mpool_stats __P((DB_ENV *, char *, u_int32_t));
+void prflags __P((u_int32_t, const FN *));
+int queue_stats __P((DB_ENV *, DB *, int));
+int rep_stats __P((DB_ENV *, u_int32_t));
+int txn_compare __P((const void *, const void *));
+int txn_stats __P((DB_ENV *, u_int32_t));
+int usage __P((void));
+int version_check __P((const char *));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ const char *progname = "db_stat";
+ DB_ENV *dbenv;
+ DB_BTREE_STAT *sp;
+ DB *alt_dbp, *dbp;
+ test_t ttype;
+ u_int32_t cache;
+ int ch, checked, d_close, e_close, exitval, fast, flags;
+ int nflag, private, resize, ret;
+ char *db, *home, *internal, *passwd, *subdb;
+
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
+
+ dbp = NULL;
+ ttype = T_NOTSET;
+ cache = MEGABYTE;
+ d_close = e_close = exitval = fast = flags = nflag = private = 0;
+ db = home = internal = passwd = subdb = NULL;
+
+ while ((ch = getopt(argc, argv, "C:cd:efh:lM:mNP:rs:tVZ")) != EOF)
+ switch (ch) {
+ case 'C':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_LOCK;
+ if (!argcheck(internal = optarg, "Aclmop"))
+ return (usage());
+ break;
+ case 'c':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_LOCK;
+ break;
+ case 'd':
+ if (ttype != T_DB && ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_DB;
+ db = optarg;
+ break;
+ case 'e':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_ENV;
+ break;
+ case 'f':
+ fast = DB_FAST_STAT;
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'l':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_LOG;
+ break;
+ case 'M':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_MPOOL;
+ if (!argcheck(internal = optarg, "Ahm"))
+ return (usage());
+ break;
+ case 'm':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_MPOOL;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'r':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_REP;
+ break;
+ case 's':
+ if (ttype != T_DB && ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_DB;
+ subdb = optarg;
+ break;
+ case 't':
+ if (ttype != T_NOTSET) {
+argcombo: fprintf(stderr,
+ "%s: illegal option combination\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+ ttype = T_TXN;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'Z':
+ flags |= DB_STAT_CLEAR;
+ break;
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ switch (ttype) {
+ case T_DB:
+ if (db == NULL)
+ return (usage());
+ break;
+ case T_NOTSET:
+ return (usage());
+ /* NOTREACHED */
+ default:
+ if (fast != 0)
+ return (usage());
+ break;
+ }
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+retry: if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ if (passwd != NULL &&
+ (ret = dbenv->set_encrypt(dbenv, passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+
+ /* Initialize the environment. */
+ if (db_init(dbenv, home, ttype, cache, &private) != 0)
+ goto shutdown;
+
+ switch (ttype) {
+ case T_DB:
+ /* Create the DB object and open the file. */
+ if (flags != 0)
+ return (usage());
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto shutdown;
+ }
+ d_close = 1;
+
+ if ((ret = dbp->open(dbp,
+ NULL, db, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->open: %s", db);
+ goto shutdown;
+ }
+
+ /* Check if cache is too small for this DB's pagesize. */
+ if (private) {
+ if ((ret =
+ __db_util_cache(dbenv, dbp, &cache, &resize)) != 0)
+ goto shutdown;
+ if (resize) {
+ (void)dbp->close(dbp, 0);
+ d_close = 0;
+
+ (void)dbenv->close(dbenv, 0);
+ e_close = 0;
+ goto retry;
+ }
+ }
+
+ /*
+ * See if we can open this db read/write to update counts.
+ * If its a master-db then we cannot. So check to see,
+ * if its btree then it might be.
+ */
+ checked = 0;
+ if (subdb == NULL && dbp->type == DB_BTREE) {
+ if ((ret = dbp->stat(dbp, &sp, DB_FAST_STAT)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ goto shutdown;
+ }
+ checked = 1;
+ }
+
+ if (subdb != NULL ||
+ dbp->type != DB_BTREE ||
+ (sp->bt_metaflags & BTM_SUBDB) == 0) {
+ if ((ret = db_create(&alt_dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto shutdown;
+ }
+ if ((ret = dbp->open(alt_dbp, NULL,
+ db, subdb, DB_UNKNOWN, 0, 0)) != 0) {
+ dbenv->err(dbenv,
+ ret, "DB->open: %s:%s", db, subdb);
+ (void)alt_dbp->close(alt_dbp, 0);
+ goto shutdown;
+ }
+
+ (void)dbp->close(dbp, 0);
+ dbp = alt_dbp;
+
+ /* Need to run again to update counts */
+ checked = 0;
+ }
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if (btree_stats(
+ dbenv, dbp, checked == 1 ? sp : NULL, fast))
+ goto shutdown;
+ break;
+ case DB_HASH:
+ if (hash_stats(dbenv, dbp, fast))
+ goto shutdown;
+ break;
+ case DB_QUEUE:
+ if (queue_stats(dbenv, dbp, fast))
+ goto shutdown;
+ break;
+ case DB_UNKNOWN:
+ dbenv->errx(dbenv, "Unknown database type.");
+ goto shutdown;
+ }
+ break;
+ case T_ENV:
+ if (env_stats(dbenv, flags))
+ goto shutdown;
+ break;
+ case T_LOCK:
+ if (lock_stats(dbenv, internal, flags))
+ goto shutdown;
+ break;
+ case T_LOG:
+ if (log_stats(dbenv, flags))
+ goto shutdown;
+ break;
+ case T_MPOOL:
+ if (mpool_stats(dbenv, internal, flags))
+ goto shutdown;
+ break;
+ case T_REP:
+ if (rep_stats(dbenv, flags))
+ goto shutdown;
+ break;
+ case T_TXN:
+ if (txn_stats(dbenv, flags))
+ goto shutdown;
+ break;
+ case T_NOTSET:
+ dbenv->errx(dbenv, "Unknown statistics flag.");
+ goto shutdown;
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (d_close && (ret = dbp->close(dbp, 0)) != 0) {
+ exitval = 1;
+ dbenv->err(dbenv, ret, "close");
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+/*
+ * env_stats --
+ * Display environment statistics.
+ */
+int
+env_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ REGENV renv;
+ REGION *rp, regs[1024];
+ int n, ret;
+ const char *lable;
+
+ n = sizeof(regs) / sizeof(regs[0]);
+ if ((ret = __db_e_stat(dbenv, &renv, regs, &n, flags)) != 0) {
+ dbenv->err(dbenv, ret, "__db_e_stat");
+ return (1);
+ }
+
+ printf("%d.%d.%d\tEnvironment version.\n",
+ renv.majver, renv.minver, renv.patch);
+ printf("%lx\tMagic number.\n", (u_long)renv.magic);
+ printf("%d\tPanic value.\n", renv.envpanic);
+
+ /* Adjust the reference count for us... */
+ printf("%d\tReferences.\n", renv.refcnt - 1);
+
+ dl("Locks granted without waiting.\n",
+ (u_long)renv.mutex.mutex_set_nowait);
+ dl("Locks granted after waiting.\n",
+ (u_long)renv.mutex.mutex_set_wait);
+
+ while (n > 0) {
+ printf("%s\n", DB_LINE);
+ rp = &regs[--n];
+ switch (rp->type) {
+ case REGION_TYPE_ENV:
+ lable = "Environment";
+ break;
+ case REGION_TYPE_LOCK:
+ lable = "Lock";
+ break;
+ case REGION_TYPE_LOG:
+ lable = "Log";
+ break;
+ case REGION_TYPE_MPOOL:
+ lable = "Mpool";
+ break;
+ case REGION_TYPE_MUTEX:
+ lable = "Mutex";
+ break;
+ case REGION_TYPE_TXN:
+ lable = "Txn";
+ break;
+ case INVALID_REGION_TYPE:
+ default:
+ lable = "Invalid";
+ break;
+ }
+ printf("%s Region: %d.\n", lable, rp->id);
+ dl_bytes("Size", (u_long)0, (u_long)0, (u_long)rp->size);
+ printf("%ld\tSegment ID.\n", rp->segid);
+ dl("Locks granted without waiting.\n",
+ (u_long)rp->mutex.mutex_set_nowait);
+ dl("Locks granted after waiting.\n",
+ (u_long)rp->mutex.mutex_set_wait);
+ }
+
+ return (0);
+}
+
+/*
+ * btree_stats --
+ * Display btree/recno statistics.
+ */
+int
+btree_stats(dbenv, dbp, msp, fast)
+ DB_ENV *dbenv;
+ DB *dbp;
+ DB_BTREE_STAT *msp;
+ int fast;
+{
+ static const FN fn[] = {
+ { BTM_DUP, "duplicates" },
+ { BTM_FIXEDLEN, "fixed-length" },
+ { BTM_RECNO, "recno" },
+ { BTM_RECNUM, "record-numbers" },
+ { BTM_RENUMBER, "renumber" },
+ { BTM_SUBDB, "multiple-databases" },
+ { 0, NULL }
+ };
+ DB_BTREE_STAT *sp;
+ int ret;
+
+ COMPQUIET(dbenv, NULL);
+
+ if (msp != NULL)
+ sp = msp;
+ else if ((ret = dbp->stat(dbp, &sp, fast)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (1);
+ }
+
+ printf("%lx\tBtree magic number.\n", (u_long)sp->bt_magic);
+ printf("%lu\tBtree version number.\n", (u_long)sp->bt_version);
+ prflags(sp->bt_metaflags, fn);
+ if (dbp->type == DB_BTREE) {
+#ifdef NOT_IMPLEMENTED
+ dl("Maximum keys per-page.\n", (u_long)sp->bt_maxkey);
+#endif
+ dl("Minimum keys per-page.\n", (u_long)sp->bt_minkey);
+ }
+ if (dbp->type == DB_RECNO) {
+ dl("Fixed-length record size.\n", (u_long)sp->bt_re_len);
+ if (isprint(sp->bt_re_pad) && !isspace(sp->bt_re_pad))
+ printf("%c\tFixed-length record pad.\n",
+ (int)sp->bt_re_pad);
+ else
+ printf("0x%x\tFixed-length record pad.\n",
+ (int)sp->bt_re_pad);
+ }
+ dl("Underlying database page size.\n", (u_long)sp->bt_pagesize);
+ dl("Number of levels in the tree.\n", (u_long)sp->bt_levels);
+ dl(dbp->type == DB_BTREE ?
+ "Number of unique keys in the tree.\n" :
+ "Number of records in the tree.\n", (u_long)sp->bt_nkeys);
+ dl("Number of data items in the tree.\n", (u_long)sp->bt_ndata);
+
+ dl("Number of tree internal pages.\n", (u_long)sp->bt_int_pg);
+ dl("Number of bytes free in tree internal pages",
+ (u_long)sp->bt_int_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_int_pgfree, sp->bt_int_pg, sp->bt_pagesize));
+
+ dl("Number of tree leaf pages.\n", (u_long)sp->bt_leaf_pg);
+ dl("Number of bytes free in tree leaf pages",
+ (u_long)sp->bt_leaf_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_leaf_pgfree, sp->bt_leaf_pg, sp->bt_pagesize));
+
+ dl("Number of tree duplicate pages.\n", (u_long)sp->bt_dup_pg);
+ dl("Number of bytes free in tree duplicate pages",
+ (u_long)sp->bt_dup_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_dup_pgfree, sp->bt_dup_pg, sp->bt_pagesize));
+
+ dl("Number of tree overflow pages.\n", (u_long)sp->bt_over_pg);
+ dl("Number of bytes free in tree overflow pages",
+ (u_long)sp->bt_over_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_over_pgfree, sp->bt_over_pg, sp->bt_pagesize));
+
+ dl("Number of pages on the free list.\n", (u_long)sp->bt_free);
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * hash_stats --
+ * Display hash statistics.
+ */
+int
+hash_stats(dbenv, dbp, fast)
+ DB_ENV *dbenv;
+ DB *dbp;
+ int fast;
+{
+ static const FN fn[] = {
+ { DB_HASH_DUP, "duplicates" },
+ { DB_HASH_SUBDB,"multiple-databases" },
+ { 0, NULL }
+ };
+ DB_HASH_STAT *sp;
+ int ret;
+
+ COMPQUIET(dbenv, NULL);
+
+ if ((ret = dbp->stat(dbp, &sp, fast)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (1);
+ }
+
+ printf("%lx\tHash magic number.\n", (u_long)sp->hash_magic);
+ printf("%lu\tHash version number.\n", (u_long)sp->hash_version);
+ prflags(sp->hash_metaflags, fn);
+ dl("Underlying database page size.\n", (u_long)sp->hash_pagesize);
+ dl("Specified fill factor.\n", (u_long)sp->hash_ffactor);
+ dl("Number of keys in the database.\n", (u_long)sp->hash_nkeys);
+ dl("Number of data items in the database.\n", (u_long)sp->hash_ndata);
+
+ dl("Number of hash buckets.\n", (u_long)sp->hash_buckets);
+ dl("Number of bytes free on bucket pages", (u_long)sp->hash_bfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_bfree, sp->hash_buckets, sp->hash_pagesize));
+
+ dl("Number of overflow pages.\n", (u_long)sp->hash_bigpages);
+ dl("Number of bytes free in overflow pages",
+ (u_long)sp->hash_big_bfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_big_bfree, sp->hash_bigpages, sp->hash_pagesize));
+
+ dl("Number of bucket overflow pages.\n", (u_long)sp->hash_overflows);
+ dl("Number of bytes free in bucket overflow pages",
+ (u_long)sp->hash_ovfl_free);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_ovfl_free, sp->hash_overflows, sp->hash_pagesize));
+
+ dl("Number of duplicate pages.\n", (u_long)sp->hash_dup);
+ dl("Number of bytes free in duplicate pages",
+ (u_long)sp->hash_dup_free);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_dup_free, sp->hash_dup, sp->hash_pagesize));
+
+ dl("Number of pages on the free list.\n", (u_long)sp->hash_free);
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * queue_stats --
+ * Display queue statistics.
+ */
+int
+queue_stats(dbenv, dbp, fast)
+ DB_ENV *dbenv;
+ DB *dbp;
+ int fast;
+{
+ DB_QUEUE_STAT *sp;
+ int ret;
+
+ COMPQUIET(dbenv, NULL);
+
+ if ((ret = dbp->stat(dbp, &sp, fast)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (1);
+ }
+
+ printf("%lx\tQueue magic number.\n", (u_long)sp->qs_magic);
+ printf("%lu\tQueue version number.\n", (u_long)sp->qs_version);
+ dl("Fixed-length record size.\n", (u_long)sp->qs_re_len);
+ if (isprint(sp->qs_re_pad) && !isspace(sp->qs_re_pad))
+ printf("%c\tFixed-length record pad.\n", (int)sp->qs_re_pad);
+ else
+ printf("0x%x\tFixed-length record pad.\n", (int)sp->qs_re_pad);
+ dl("Underlying database page size.\n", (u_long)sp->qs_pagesize);
+ if (sp->qs_extentsize != 0)
+ dl("Underlying database extent size.\n",
+ (u_long)sp->qs_extentsize);
+ dl("Number of records in the database.\n", (u_long)sp->qs_nkeys);
+ dl("Number of database pages.\n", (u_long)sp->qs_pages);
+ dl("Number of bytes free in database pages", (u_long)sp->qs_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->qs_pgfree, sp->qs_pages, sp->qs_pagesize));
+ printf("%lu\tFirst undeleted record.\n", (u_long)sp->qs_first_recno);
+ printf(
+ "%lu\tNext available record number.\n", (u_long)sp->qs_cur_recno);
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * lock_stats --
+ * Display lock statistics.
+ */
+int
+lock_stats(dbenv, internal, flags)
+ DB_ENV *dbenv;
+ char *internal;
+ u_int32_t flags;
+{
+ DB_LOCK_STAT *sp;
+ int ret;
+
+ if (internal != NULL) {
+ if ((ret =
+ dbenv->lock_dump_region(dbenv, internal, stdout)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+ return (0);
+ }
+
+ if ((ret = dbenv->lock_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ dl("Last allocated locker ID.\n", (u_long)sp->st_id);
+ dl("Current maximum unused locker ID.\n", (u_long)sp->st_cur_maxid);
+ dl("Number of lock modes.\n", (u_long)sp->st_nmodes);
+ dl("Maximum number of locks possible.\n", (u_long)sp->st_maxlocks);
+ dl("Maximum number of lockers possible.\n", (u_long)sp->st_maxlockers);
+ dl("Maximum number of lock objects possible.\n",
+ (u_long)sp->st_maxobjects);
+ dl("Number of current locks.\n", (u_long)sp->st_nlocks);
+ dl("Maximum number of locks at any one time.\n",
+ (u_long)sp->st_maxnlocks);
+ dl("Number of current lockers.\n", (u_long)sp->st_nlockers);
+ dl("Maximum number of lockers at any one time.\n",
+ (u_long)sp->st_maxnlockers);
+ dl("Number of current lock objects.\n", (u_long)sp->st_nobjects);
+ dl("Maximum number of lock objects at any one time.\n",
+ (u_long)sp->st_maxnobjects);
+ dl("Total number of locks requested.\n", (u_long)sp->st_nrequests);
+ dl("Total number of locks released.\n", (u_long)sp->st_nreleases);
+ dl(
+ "Total number of lock requests failing because DB_LOCK_NOWAIT was set.\n",
+ (u_long)sp->st_nnowaits);
+ dl(
+ "Total number of locks not immediately available due to conflicts.\n",
+ (u_long)sp->st_nconflicts);
+ dl("Number of deadlocks.\n", (u_long)sp->st_ndeadlocks);
+ dl("Lock timeout value.\n", (u_long)sp->st_locktimeout);
+ dl("Number of locks that have timed out.\n",
+ (u_long)sp->st_nlocktimeouts);
+ dl("Transaction timeout value.\n", (u_long)sp->st_txntimeout);
+ dl("Number of transactions that have timed out.\n",
+ (u_long)sp->st_ntxntimeouts);
+
+ dl_bytes("The size of the lock region.",
+ (u_long)0, (u_long)0, (u_long)sp->st_regsize);
+ dl("The number of region locks granted after waiting.\n",
+ (u_long)sp->st_region_wait);
+ dl("The number of region locks granted without waiting.\n",
+ (u_long)sp->st_region_nowait);
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * log_stats --
+ * Display log statistics.
+ */
+int
+log_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ DB_LOG_STAT *sp;
+ int ret;
+
+ if ((ret = dbenv->log_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ printf("%lx\tLog magic number.\n", (u_long)sp->st_magic);
+ printf("%lu\tLog version number.\n", (u_long)sp->st_version);
+ dl_bytes("Log record cache size",
+ (u_long)0, (u_long)0, (u_long)sp->st_lg_bsize);
+ printf("%#o\tLog file mode.\n", sp->st_mode);
+ if (sp->st_lg_size % MEGABYTE == 0)
+ printf("%luMb\tCurrent log file size.\n",
+ (u_long)sp->st_lg_size / MEGABYTE);
+ else if (sp->st_lg_size % 1024 == 0)
+ printf("%luKb\tCurrent log file size.\n",
+ (u_long)sp->st_lg_size / 1024);
+ else
+ printf("%lu\tCurrent log file size.\n",
+ (u_long)sp->st_lg_size);
+ dl_bytes("Log bytes written",
+ (u_long)0, (u_long)sp->st_w_mbytes, (u_long)sp->st_w_bytes);
+ dl_bytes("Log bytes written since last checkpoint",
+ (u_long)0, (u_long)sp->st_wc_mbytes, (u_long)sp->st_wc_bytes);
+ dl("Total log file writes.\n", (u_long)sp->st_wcount);
+ dl("Total log file write due to overflow.\n",
+ (u_long)sp->st_wcount_fill);
+ dl("Total log file flushes.\n", (u_long)sp->st_scount);
+ printf("%lu\tCurrent log file number.\n", (u_long)sp->st_cur_file);
+ printf("%lu\tCurrent log file offset.\n", (u_long)sp->st_cur_offset);
+ printf("%lu\tOn-disk log file number.\n", (u_long)sp->st_disk_file);
+ printf("%lu\tOn-disk log file offset.\n", (u_long)sp->st_disk_offset);
+
+ dl("Max commits in a log flush.\n", (u_long)sp->st_maxcommitperflush);
+ dl("Min commits in a log flush.\n", (u_long)sp->st_mincommitperflush);
+
+ dl_bytes("Log region size",
+ (u_long)0, (u_long)0, (u_long)sp->st_regsize);
+ dl("The number of region locks granted after waiting.\n",
+ (u_long)sp->st_region_wait);
+ dl("The number of region locks granted without waiting.\n",
+ (u_long)sp->st_region_nowait);
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * mpool_stats --
+ * Display mpool statistics.
+ */
+int
+mpool_stats(dbenv, internal, flags)
+ DB_ENV *dbenv;
+ char *internal;
+ u_int32_t flags;
+{
+ DB_MPOOL_FSTAT **fsp;
+ DB_MPOOL_STAT *gsp;
+ int ret;
+
+ if (internal != NULL) {
+ if ((ret =
+ dbenv->memp_dump_region(dbenv, internal, stdout)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+ return (0);
+ }
+
+ if ((ret = dbenv->memp_stat(dbenv, &gsp, &fsp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ dl_bytes("Total cache size",
+ (u_long)gsp->st_gbytes, (u_long)0, (u_long)gsp->st_bytes);
+ dl("Number of caches.\n", (u_long)gsp->st_ncache);
+ dl_bytes("Pool individual cache size",
+ (u_long)0, (u_long)0, (u_long)gsp->st_regsize);
+ dl("Requested pages mapped into the process' address space.\n",
+ (u_long)gsp->st_map);
+ dl("Requested pages found in the cache", (u_long)gsp->st_cache_hit);
+ if (gsp->st_cache_hit + gsp->st_cache_miss != 0)
+ printf(" (%.0f%%)", ((double)gsp->st_cache_hit /
+ (gsp->st_cache_hit + gsp->st_cache_miss)) * 100);
+ printf(".\n");
+ dl("Requested pages not found in the cache.\n",
+ (u_long)gsp->st_cache_miss);
+ dl("Pages created in the cache.\n", (u_long)gsp->st_page_create);
+ dl("Pages read into the cache.\n", (u_long)gsp->st_page_in);
+ dl("Pages written from the cache to the backing file.\n",
+ (u_long)gsp->st_page_out);
+ dl("Clean pages forced from the cache.\n",
+ (u_long)gsp->st_ro_evict);
+ dl("Dirty pages forced from the cache.\n",
+ (u_long)gsp->st_rw_evict);
+ dl("Dirty pages written by trickle-sync thread.\n",
+ (u_long)gsp->st_page_trickle);
+ dl("Current total page count.\n",
+ (u_long)gsp->st_pages);
+ dl("Current clean page count.\n",
+ (u_long)gsp->st_page_clean);
+ dl("Current dirty page count.\n",
+ (u_long)gsp->st_page_dirty);
+ dl("Number of hash buckets used for page location.\n",
+ (u_long)gsp->st_hash_buckets);
+ dl("Total number of times hash chains searched for a page.\n",
+ (u_long)gsp->st_hash_searches);
+ dl("The longest hash chain searched for a page.\n",
+ (u_long)gsp->st_hash_longest);
+ dl("Total number of hash buckets examined for page location.\n",
+ (u_long)gsp->st_hash_examined);
+ dl("The number of hash bucket locks granted without waiting.\n",
+ (u_long)gsp->st_hash_nowait);
+ dl("The number of hash bucket locks granted after waiting.\n",
+ (u_long)gsp->st_hash_wait);
+ dl("The maximum number of times any hash bucket lock was waited for.\n",
+ (u_long)gsp->st_hash_max_wait);
+ dl("The number of region locks granted without waiting.\n",
+ (u_long)gsp->st_region_nowait);
+ dl("The number of region locks granted after waiting.\n",
+ (u_long)gsp->st_region_wait);
+ dl("The number of page allocations.\n",
+ (u_long)gsp->st_alloc);
+ dl("The number of hash buckets examined during allocations\n",
+ (u_long)gsp->st_alloc_buckets);
+ dl("The max number of hash buckets examined for an allocation\n",
+ (u_long)gsp->st_alloc_max_buckets);
+ dl("The number of pages examined during allocations\n",
+ (u_long)gsp->st_alloc_pages);
+ dl("The max number of pages examined for an allocation\n",
+ (u_long)gsp->st_alloc_max_pages);
+
+ for (; fsp != NULL && *fsp != NULL; ++fsp) {
+ printf("%s\n", DB_LINE);
+ printf("Pool File: %s\n", (*fsp)->file_name);
+ dl("Page size.\n", (u_long)(*fsp)->st_pagesize);
+ dl("Requested pages mapped into the process' address space.\n",
+ (u_long)(*fsp)->st_map);
+ dl("Requested pages found in the cache",
+ (u_long)(*fsp)->st_cache_hit);
+ if ((*fsp)->st_cache_hit + (*fsp)->st_cache_miss != 0)
+ printf(" (%.0f%%)", ((double)(*fsp)->st_cache_hit /
+ ((*fsp)->st_cache_hit + (*fsp)->st_cache_miss)) *
+ 100);
+ printf(".\n");
+ dl("Requested pages not found in the cache.\n",
+ (u_long)(*fsp)->st_cache_miss);
+ dl("Pages created in the cache.\n",
+ (u_long)(*fsp)->st_page_create);
+ dl("Pages read into the cache.\n",
+ (u_long)(*fsp)->st_page_in);
+ dl("Pages written from the cache to the backing file.\n",
+ (u_long)(*fsp)->st_page_out);
+ }
+
+ free(gsp);
+
+ return (0);
+}
+
+/*
+ * rep_stats --
+ * Display replication statistics.
+ */
+int
+rep_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ DB_REP_STAT *sp;
+ int is_client, ret;
+ const char *p;
+
+ if ((ret = dbenv->rep_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ is_client = 0;
+ switch (sp->st_status) {
+ case DB_REP_MASTER:
+ printf("Environment configured as a replication master.\n");
+ break;
+ case DB_REP_CLIENT:
+ printf("Environment configured as a replication client.\n");
+ is_client = 1;
+ break;
+ case DB_REP_LOGSONLY:
+ printf("Environment configured as a logs-only replica.\n");
+ is_client = 1;
+ break;
+ default:
+ printf("Environment not configured for replication.\n");
+ break;
+ }
+
+ printf("%lu/%lu\t%s\n",
+ (u_long)sp->st_next_lsn.file, (u_long)sp->st_next_lsn.offset,
+ is_client ? "Next LSN expected." : "Next LSN to be used.");
+ p = sp->st_waiting_lsn.file == 0 ?
+ "Not waiting for any missed log records." :
+ "LSN of first missed log record being waited for.";
+ printf("%lu/%lu\t%s\n",
+ (u_long)sp->st_waiting_lsn.file, (u_long)sp->st_waiting_lsn.offset,
+ p);
+
+ dl("Number of duplicate master conditions detected.\n",
+ (u_long)sp->st_dupmasters);
+ if (sp->st_env_id != DB_EID_INVALID)
+ dl("Current environment ID.\n", (u_long)sp->st_env_id);
+ else
+ printf("No current environment ID.\n");
+ dl("Current environment priority.\n", (u_long)sp->st_env_priority);
+ dl("Current generation number.\n", (u_long)sp->st_gen);
+ dl("Number of duplicate log records received.\n",
+ (u_long)sp->st_log_duplicated);
+ dl("Number of log records currently queued.\n",
+ (u_long)sp->st_log_queued);
+ dl("Maximum number of log records ever queued at once.\n",
+ (u_long)sp->st_log_queued_max);
+ dl("Total number of log records queued.\n",
+ (u_long)sp->st_log_queued_total);
+ dl("Number of log records received and appended to the log.\n",
+ (u_long)sp->st_log_records);
+ dl("Number of log records missed and requested.\n",
+ (u_long)sp->st_log_requested);
+ if (sp->st_master != DB_EID_INVALID)
+ dl("Current master ID.\n", (u_long)sp->st_master);
+ else
+ printf("No current master ID.\n");
+ dl("Number of times the master has changed.\n",
+ (u_long)sp->st_master_changes);
+ dl("Number of messages received with a bad generation number.\n",
+ (u_long)sp->st_msgs_badgen);
+ dl("Number of messages received and processed.\n",
+ (u_long)sp->st_msgs_processed);
+ dl("Number of messages ignored due to pending recovery.\n",
+ (u_long)sp->st_msgs_recover);
+ dl("Number of failed message sends.\n",
+ (u_long)sp->st_msgs_send_failures);
+ dl("Number of messages sent.\n", (u_long)sp->st_msgs_sent);
+ dl("Number of new site messages received.\n", (u_long)sp->st_newsites);
+ dl("Transmission limited.\n", (u_long)sp->st_nthrottles);
+ dl("Number of outdated conditions detected.\n",
+ (u_long)sp->st_outdated);
+ dl("Number of transactions applied.\n", (u_long)sp->st_txns_applied);
+
+ dl("Number of elections held.\n", (u_long)sp->st_elections);
+ dl("Number of elections won.\n", (u_long)sp->st_elections_won);
+
+ if (sp->st_election_status == 0)
+ printf("No election in progress.\n");
+ else {
+ dl("Current election phase.\n", (u_long)sp->st_election_status);
+ dl("Election winner.\n",
+ (u_long)sp->st_election_cur_winner);
+ dl("Election generation number.\n",
+ (u_long)sp->st_election_gen);
+ printf("%lu/%lu\tMaximum LSN of election winner.\n",
+ (u_long)sp->st_election_lsn.file,
+ (u_long)sp->st_election_lsn.offset);
+ dl("Number of sites expected to participate in elections.\n",
+ (u_long)sp->st_election_nsites);
+ dl("Election priority.\n", (u_long)sp->st_election_priority);
+ dl("Election tiebreaker value.\n",
+ (u_long)sp->st_election_tiebreaker);
+ dl("Votes received this election round.\n",
+ (u_long)sp->st_election_votes);
+ }
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * txn_stats --
+ * Display transaction statistics.
+ */
+int
+txn_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ DB_TXN_STAT *sp;
+ u_int32_t i;
+ int ret;
+ const char *p;
+
+ if ((ret = dbenv->txn_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ p = sp->st_last_ckp.file == 0 ?
+ "No checkpoint LSN." : "File/offset for last checkpoint LSN.";
+ printf("%lu/%lu\t%s\n",
+ (u_long)sp->st_last_ckp.file, (u_long)sp->st_last_ckp.offset, p);
+ if (sp->st_time_ckp == 0)
+ printf("0\tNo checkpoint timestamp.\n");
+ else
+ printf("%.24s\tCheckpoint timestamp.\n",
+ ctime(&sp->st_time_ckp));
+ printf("%lx\tLast transaction ID allocated.\n",
+ (u_long)sp->st_last_txnid);
+ dl("Maximum number of active transactions possible.\n",
+ (u_long)sp->st_maxtxns);
+ dl("Active transactions.\n", (u_long)sp->st_nactive);
+ dl("Maximum active transactions.\n", (u_long)sp->st_maxnactive);
+ dl("Number of transactions begun.\n", (u_long)sp->st_nbegins);
+ dl("Number of transactions aborted.\n", (u_long)sp->st_naborts);
+ dl("Number of transactions committed.\n", (u_long)sp->st_ncommits);
+ dl("Number of transactions restored.\n", (u_long)sp->st_nrestores);
+
+ dl_bytes("Transaction region size",
+ (u_long)0, (u_long)0, (u_long)sp->st_regsize);
+ dl("The number of region locks granted after waiting.\n",
+ (u_long)sp->st_region_wait);
+ dl("The number of region locks granted without waiting.\n",
+ (u_long)sp->st_region_nowait);
+
+ qsort(sp->st_txnarray,
+ sp->st_nactive, sizeof(sp->st_txnarray[0]), txn_compare);
+ for (i = 0; i < sp->st_nactive; ++i) {
+ printf("\tid: %lx; begin LSN: file/offset %lu/%lu",
+ (u_long)sp->st_txnarray[i].txnid,
+ (u_long)sp->st_txnarray[i].lsn.file,
+ (u_long)sp->st_txnarray[i].lsn.offset);
+ if (sp->st_txnarray[i].parentid == 0)
+ printf("\n");
+ else
+ printf(" parent: %lx\n",
+ (u_long)sp->st_txnarray[i].parentid);
+ }
+
+ free(sp);
+
+ return (0);
+}
+
+int
+txn_compare(a1, b1)
+ const void *a1, *b1;
+{
+ const DB_TXN_ACTIVE *a, *b;
+
+ a = a1;
+ b = b1;
+
+ if (a->txnid > b->txnid)
+ return (1);
+ if (a->txnid < b->txnid)
+ return (-1);
+ return (0);
+}
+
+/*
+ * dl --
+ * Display a big value.
+ */
+void
+dl(msg, value)
+ const char *msg;
+ u_long value;
+{
+ /*
+ * Two formats: if less than 10 million, display as the number, if
+ * greater than 10 million display as ###M.
+ */
+ if (value < 10000000)
+ printf("%lu\t%s", value, msg);
+ else
+ printf("%luM\t%s", value / 1000000, msg);
+}
+
+/*
+ * dl_bytes --
+ * Display a big number of bytes.
+ */
+void
+dl_bytes(msg, gbytes, mbytes, bytes)
+ const char *msg;
+ u_long gbytes, mbytes, bytes;
+{
+ const char *sep;
+
+ /* Normalize the values. */
+ while (bytes >= MEGABYTE) {
+ ++mbytes;
+ bytes -= MEGABYTE;
+ }
+ while (mbytes >= GIGABYTE / MEGABYTE) {
+ ++gbytes;
+ mbytes -= GIGABYTE / MEGABYTE;
+ }
+
+ sep = "";
+ if (gbytes > 0) {
+ printf("%luGB", gbytes);
+ sep = " ";
+ }
+ if (mbytes > 0) {
+ printf("%s%luMB", sep, mbytes);
+ sep = " ";
+ }
+ if (bytes >= 1024) {
+ printf("%s%luKB", sep, bytes / 1024);
+ bytes %= 1024;
+ sep = " ";
+ }
+ if (bytes > 0)
+ printf("%s%luB", sep, bytes);
+
+ printf("\t%s.\n", msg);
+}
+
+/*
+ * prflags --
+ * Print out flag values.
+ */
+void
+prflags(flags, fnp)
+ u_int32_t flags;
+ const FN *fnp;
+{
+ const char *sep;
+
+ sep = "\t";
+ printf("Flags:");
+ for (; fnp->mask != 0; ++fnp)
+ if (fnp->mask & flags) {
+ printf("%s%s", sep, fnp->name);
+ sep = ", ";
+ }
+ printf("\n");
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_init(dbenv, home, ttype, cache, is_private)
+ DB_ENV *dbenv;
+ char *home;
+ test_t ttype;
+ u_int32_t cache;
+ int *is_private;
+{
+ u_int32_t oflags;
+ int ret;
+
+ /*
+ * If our environment open fails, and we're trying to look at a
+ * shared region, it's a hard failure.
+ *
+ * We will probably just drop core if the environment we join does
+ * not include a memory pool. This is probably acceptable; trying
+ * to use an existing environment that does not contain a memory
+ * pool to look at a database can be safely construed as operator
+ * error, I think.
+ */
+ *is_private = 0;
+ if ((ret =
+ dbenv->open(dbenv, home, DB_JOINENV | DB_USE_ENVIRON, 0)) == 0)
+ return (0);
+ if (ttype != T_DB && ttype != T_LOG) {
+ dbenv->err(dbenv, ret, "DB_ENV->open%s%s",
+ home == NULL ? "" : ": ", home == NULL ? "" : home);
+ return (1);
+ }
+
+ /*
+ * We're looking at a database or set of log files and no environment
+ * exists. Create one, but make it private so no files are actually
+ * created. Declare a reasonably large cache so that we don't fail
+ * when reporting statistics on large databases.
+ *
+ * An environment is required to look at databases because we may be
+ * trying to look at databases in directories other than the current
+ * one.
+ */
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ return (1);
+ }
+ *is_private = 1;
+ oflags = DB_CREATE | DB_PRIVATE | DB_USE_ENVIRON;
+ if (ttype == T_DB)
+ oflags |= DB_INIT_MPOOL;
+ if (ttype == T_LOG)
+ oflags |= DB_INIT_LOG;
+ if ((ret = dbenv->open(dbenv, home, oflags, 0)) == 0)
+ return (0);
+
+ /* An environment is required. */
+ dbenv->err(dbenv, ret, "open");
+ return (1);
+}
+
+/*
+ * argcheck --
+ * Return if argument flags are okay.
+ */
+int
+argcheck(arg, ok_args)
+ char *arg;
+ const char *ok_args;
+{
+ for (; *arg != '\0'; ++arg)
+ if (strchr(ok_args, *arg) == NULL)
+ return (0);
+ return (1);
+}
+
+int
+usage()
+{
+ fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_stat [-celmNrtVZ] [-C Aclmop]",
+ "[-d file [-f] [-s database]] [-h home] [-M Ahlm] [-P password]");
+ return (EXIT_FAILURE);
+}
+
+int
+version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/libdb/db_upgrade/db_upgrade.c b/libdb/db_upgrade/db_upgrade.c
new file mode 100644
index 0000000..c29584f
--- /dev/null
+++ b/libdb/db_upgrade/db_upgrade.c
@@ -0,0 +1,190 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+int main __P((int, char *[]));
+int usage __P((void));
+int version_check __P((const char *));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ const char *progname = "db_upgrade";
+ DB *dbp;
+ DB_ENV *dbenv;
+ u_int32_t flags;
+ int ch, e_close, exitval, nflag, ret, t_ret;
+ char *home, *passwd;
+
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
+
+ dbenv = NULL;
+ flags = nflag = 0;
+ e_close = exitval = 0;
+ home = passwd = NULL;
+ while ((ch = getopt(argc, argv, "h:NP:sV")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 's':
+ LF_SET(DB_DUPSORT);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc <= 0)
+ return (usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: db_env_create: %s\n",
+ progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+
+ /*
+ * If attaching to a pre-existing environment fails, create a
+ * private one and try again.
+ */
+ if ((ret = dbenv->open(dbenv,
+ home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ for (; !__db_util_interrupted() && argv[0] != NULL; ++argv) {
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ dbp->set_errfile(dbp, stderr);
+ dbp->set_errpfx(dbp, progname);
+ if ((ret = dbp->upgrade(dbp, argv[0], flags)) != 0)
+ dbp->err(dbp, ret, "DB->upgrade: %s", argv[0]);
+ if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0) {
+ dbenv->err(dbenv, ret, "DB->close: %s", argv[0]);
+ ret = t_ret;
+ }
+ if (ret != 0)
+ goto shutdown;
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+usage()
+{
+ fprintf(stderr, "%s\n",
+ "usage: db_upgrade [-NsV] [-h home] [-P password] db_file ...");
+ return (EXIT_FAILURE);
+}
+
+int
+version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/libdb/db_verify/db_verify.c b/libdb/db_verify/db_verify.c
new file mode 100644
index 0000000..bfe56ba
--- /dev/null
+++ b/libdb/db_verify/db_verify.c
@@ -0,0 +1,248 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+int main __P((int, char *[]));
+int usage __P((void));
+int version_check __P((const char *));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ const char *progname = "db_verify";
+ DB *dbp, *dbp1;
+ DB_ENV *dbenv;
+ u_int32_t cache;
+ int ch, d_close, e_close, exitval, nflag, oflag, private;
+ int quiet, resize, ret, t_ret;
+ char *home, *passwd;
+
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
+
+ dbenv = NULL;
+ cache = MEGABYTE;
+ d_close = e_close = exitval = nflag = oflag = quiet = 0;
+ home = passwd = NULL;
+ while ((ch = getopt(argc, argv, "h:NoP:qV")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'o':
+ oflag = 1;
+ break;
+ case 'q':
+ quiet = 1;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc <= 0)
+ return (usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+retry: if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ if (!quiet) {
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ }
+
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ if (passwd != NULL &&
+ (ret = dbenv->set_encrypt(dbenv, passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+ /*
+ * Attach to an mpool if it exists, but if that fails, attach to a
+ * private region. In the latter case, declare a reasonably large
+ * cache so that we don't fail when verifying large databases.
+ */
+ private = 0;
+ if ((ret =
+ dbenv->open(dbenv, home, DB_INIT_MPOOL | DB_USE_ENVIRON, 0)) != 0) {
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ goto shutdown;
+ }
+ private = 1;
+ if ((ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+ }
+
+ for (; !__db_util_interrupted() && argv[0] != NULL; ++argv) {
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "%s: db_create", progname);
+ goto shutdown;
+ }
+ d_close = 1;
+
+ /*
+ * We create a 2nd dbp to this database to get its pagesize
+ * because the dbp we're using for verify cannot be opened.
+ */
+ if (private) {
+ if ((ret = db_create(&dbp1, dbenv, 0)) != 0) {
+ dbenv->err(
+ dbenv, ret, "%s: db_create", progname);
+ goto shutdown;
+ }
+
+ if ((ret = dbp1->open(dbp1, NULL,
+ argv[0], NULL, DB_UNKNOWN, DB_RDONLY, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->open: %s", argv[0]);
+ (void)dbp1->close(dbp1, 0);
+ goto shutdown;
+ }
+ /*
+ * If we get here, we can check the cache/page.
+ * !!!
+ * If we have to retry with an env with a larger
+ * cache, we jump out of this loop. However, we
+ * will still be working on the same argv when we
+ * get back into the for-loop.
+ */
+ ret = __db_util_cache(dbenv, dbp1, &cache, &resize);
+ (void)dbp1->close(dbp1, 0);
+ if (ret != 0)
+ goto shutdown;
+
+ if (resize) {
+ (void)dbp->close(dbp, 0);
+ d_close = 0;
+
+ (void)dbenv->close(dbenv, 0);
+ e_close = 0;
+ goto retry;
+ }
+ }
+ if ((ret = dbp->verify(dbp,
+ argv[0], NULL, NULL, oflag ? DB_NOORDERCHK : 0)) != 0)
+ dbp->err(dbp, ret, "DB->verify: %s", argv[0]);
+ if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0) {
+ dbenv->err(dbenv, ret, "DB->close: %s", argv[0]);
+ ret = t_ret;
+ }
+ d_close = 0;
+ if (ret != 0)
+ goto shutdown;
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ if (d_close && (ret = dbp->close(dbp, 0)) != 0) {
+ exitval = 1;
+ dbenv->err(dbenv, ret, "close");
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+usage()
+{
+ fprintf(stderr, "%s\n",
+ "usage: db_verify [-NoqV] [-h home] [-P password] db_file ...");
+ return (EXIT_FAILURE);
+}
+
+int
+version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/libdb/dbinc/btree.h b/libdb/dbinc/btree.h
new file mode 100644
index 0000000..47b9a88
--- /dev/null
+++ b/libdb/dbinc/btree.h
@@ -0,0 +1,320 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+#ifndef _DB_BTREE_H_
+#define _DB_BTREE_H_
+
+/* Forward structure declarations. */
+struct __btree; typedef struct __btree BTREE;
+struct __cursor; typedef struct __cursor BTREE_CURSOR;
+struct __epg; typedef struct __epg EPG;
+struct __recno; typedef struct __recno RECNO;
+
+#define DEFMINKEYPAGE (2)
+
+/*
+ * A recno order of 0 indicates that we don't have an order, not that we've
+ * an order less than 1.
+ */
+#define INVALID_ORDER 0
+
+#define ISINTERNAL(p) (TYPE(p) == P_IBTREE || TYPE(p) == P_IRECNO)
+#define ISLEAF(p) (TYPE(p) == P_LBTREE || \
+ TYPE(p) == P_LRECNO || TYPE(p) == P_LDUP)
+
+/* Flags for __bam_cadjust_log(). */
+#define CAD_UPDATEROOT 0x01 /* Root page count was updated. */
+
+/* Flags for __bam_split_log(). */
+#define SPL_NRECS 0x01 /* Split tree has record count. */
+
+/* Flags for __bam_iitem(). */
+#define BI_DELETED 0x01 /* Key/data pair only placeholder. */
+
+/* Flags for __bam_stkrel(). */
+#define STK_CLRDBC 0x01 /* Clear dbc->page reference. */
+#define STK_NOLOCK 0x02 /* Don't retain locks. */
+
+/* Flags for __ram_ca(). These get logged, so make the values explicit. */
+typedef enum {
+ CA_DELETE = 0, /* Delete the current record. */
+ CA_IAFTER = 1, /* Insert before the current record. */
+ CA_IBEFORE = 2, /* Insert after the current record. */
+ CA_ICURRENT = 3 /* Overwrite the current record. */
+} ca_recno_arg;
+
+/*
+ * Flags for __bam_search() and __bam_rsearch().
+ *
+ * Note, internal page searches must find the largest record less than key in
+ * the tree so that descents work. Leaf page searches must find the smallest
+ * record greater than key so that the returned index is the record's correct
+ * position for insertion.
+ *
+ * The flags parameter to the search routines describes three aspects of the
+ * search: the type of locking required (including if we're locking a pair of
+ * pages), the item to return in the presence of duplicates and whether or not
+ * to return deleted entries. To simplify both the mnemonic representation
+ * and the code that checks for various cases, we construct a set of bitmasks.
+ */
+#define S_READ 0x00001 /* Read locks. */
+#define S_WRITE 0x00002 /* Write locks. */
+
+#define S_APPEND 0x00040 /* Append to the tree. */
+#define S_DELNO 0x00080 /* Don't return deleted items. */
+#define S_DUPFIRST 0x00100 /* Return first duplicate. */
+#define S_DUPLAST 0x00200 /* Return last duplicate. */
+#define S_EXACT 0x00400 /* Exact items only. */
+#define S_PARENT 0x00800 /* Lock page pair. */
+#define S_STACK 0x01000 /* Need a complete stack. */
+#define S_PAST_EOF 0x02000 /* If doing insert search (or keyfirst
+ * or keylast operations), or a split
+ * on behalf of an insert, it's okay to
+ * return an entry one past end-of-page.
+ */
+#define S_STK_ONLY 0x04000 /* Just return info in the stack */
+
+#define S_DELETE (S_WRITE | S_DUPFIRST | S_DELNO | S_EXACT | S_STACK)
+#define S_FIND (S_READ | S_DUPFIRST | S_DELNO)
+#define S_FIND_WR (S_WRITE | S_DUPFIRST | S_DELNO)
+#define S_INSERT (S_WRITE | S_DUPLAST | S_PAST_EOF | S_STACK)
+#define S_KEYFIRST (S_WRITE | S_DUPFIRST | S_PAST_EOF | S_STACK)
+#define S_KEYLAST (S_WRITE | S_DUPLAST | S_PAST_EOF | S_STACK)
+#define S_WRPAIR (S_WRITE | S_DUPLAST | S_PAST_EOF | S_PARENT)
+
+/*
+ * Various routines pass around page references. A page reference is
+ * a pointer to the page, and the indx indicates an item on the page.
+ * Each page reference may include a lock.
+ */
+struct __epg {
+ PAGE *page; /* The page. */
+ db_indx_t indx; /* The index on the page. */
+ db_indx_t entries; /* The number of entries on page */
+ DB_LOCK lock; /* The page's lock. */
+ db_lockmode_t lock_mode; /* The lock mode. */
+};
+
+/*
+ * We maintain a stack of the pages that we're locking in the tree. Grow
+ * the stack as necessary.
+ *
+ * XXX
+ * Temporary fix for #3243 -- clear the page and lock from the stack entry.
+ * The correct fix is to never release a stack that doesn't hold items.
+ */
+#define BT_STK_CLR(c) do { \
+ (c)->csp = (c)->sp; \
+ (c)->csp->page = NULL; \
+ LOCK_INIT((c)->csp->lock); \
+} while (0)
+
+#define BT_STK_ENTER(dbenv, c, pagep, page_indx, l, mode, ret) do { \
+ if ((ret = \
+ (c)->csp == (c)->esp ? __bam_stkgrow(dbenv, c) : 0) == 0) { \
+ (c)->csp->page = pagep; \
+ (c)->csp->indx = page_indx; \
+ (c)->csp->entries = NUM_ENT(pagep); \
+ (c)->csp->lock = l; \
+ (c)->csp->lock_mode = mode; \
+ } \
+} while (0)
+
+#define BT_STK_PUSH(dbenv, c, pagep, page_indx, lock, mode, ret) do { \
+ BT_STK_ENTER(dbenv, c, pagep, page_indx, lock, mode, ret); \
+ ++(c)->csp; \
+} while (0)
+
+#define BT_STK_NUM(dbenv, c, pagep, page_indx, ret) do { \
+ if ((ret = \
+ (c)->csp == (c)->esp ? __bam_stkgrow(dbenv, c) : 0) == 0) { \
+ (c)->csp->page = NULL; \
+ (c)->csp->indx = page_indx; \
+ (c)->csp->entries = NUM_ENT(pagep); \
+ LOCK_INIT((c)->csp->lock); \
+ (c)->csp->lock_mode = DB_LOCK_NG; \
+ } \
+} while (0)
+
+#define BT_STK_NUMPUSH(dbenv, c, pagep, page_indx, ret) do { \
+ BT_STK_NUM(dbenv, cp, pagep, page_indx, ret); \
+ ++(c)->csp; \
+} while (0)
+
+#define BT_STK_POP(c) \
+ ((c)->csp == (c)->sp ? NULL : --(c)->csp)
+
+/* Btree/Recno cursor. */
+struct __cursor {
+ /* struct __dbc_internal */
+ __DBC_INTERNAL
+
+ /* btree private part */
+ EPG *sp; /* Stack pointer. */
+ EPG *csp; /* Current stack entry. */
+ EPG *esp; /* End stack pointer. */
+ EPG stack[5];
+
+ db_indx_t ovflsize; /* Maximum key/data on-page size. */
+
+ db_recno_t recno; /* Current record number. */
+ u_int32_t order; /* Relative order among deleted curs. */
+
+ /*
+ * Btree:
+ * We set a flag in the cursor structure if the underlying object has
+ * been deleted. It's not strictly necessary, we could get the same
+ * information by looking at the page itself, but this method doesn't
+ * require us to retrieve the page on cursor delete.
+ *
+ * Recno:
+ * When renumbering recno databases during deletes, cursors referencing
+ * "deleted" records end up positioned between two records, and so must
+ * be specially adjusted on the next operation.
+ */
+#define C_DELETED 0x0001 /* Record was deleted. */
+ /*
+ * There are three tree types that require maintaining record numbers.
+ * Recno AM trees, Btree AM trees for which the DB_RECNUM flag was set,
+ * and Btree off-page duplicate trees.
+ */
+#define C_RECNUM 0x0002 /* Tree requires record counts. */
+ /*
+ * Recno trees have immutable record numbers by default, but optionally
+ * support mutable record numbers. Off-page duplicate Recno trees have
+ * mutable record numbers. All Btrees with record numbers (including
+ * off-page duplicate trees) are mutable by design, no flag is needed.
+ */
+#define C_RENUMBER 0x0004 /* Tree records are mutable. */
+ u_int32_t flags;
+};
+
+/*
+ * Threshhold value, as a function of bt_minkey, of the number of
+ * bytes a key/data pair can use before being placed on an overflow
+ * page. Assume every item requires the maximum alignment for
+ * padding, out of sheer paranoia.
+ */
+#define B_MINKEY_TO_OVFLSIZE(dbp, minkey, pgsize) \
+ ((u_int16_t)(((pgsize) - P_OVERHEAD(dbp)) / ((minkey) * P_INDX) -\
+ (BKEYDATA_PSIZE(0) + ALIGN(1, sizeof(int32_t)))))
+
+/*
+ * The maximum space that a single item can ever take up on one page.
+ * Used by __bam_split to determine whether a split is still necessary.
+ */
+#define B_MAX(a,b) (((a) > (b)) ? (a) : (b))
+#define B_MAXSIZEONPAGE(ovflsize) \
+ (B_MAX(BOVERFLOW_PSIZE, BKEYDATA_PSIZE(ovflsize)))
+
+/*
+ * The in-memory, per-tree btree/recno data structure.
+ */
+struct __btree { /* Btree access method. */
+ /*
+ * !!!
+ * These fields are write-once (when the structure is created) and
+ * so are ignored as far as multi-threading is concerned.
+ */
+ db_pgno_t bt_meta; /* Database meta-data page. */
+ db_pgno_t bt_root; /* Database root page. */
+
+ u_int32_t bt_maxkey; /* Maximum keys per page. */
+ u_int32_t bt_minkey; /* Minimum keys per page. */
+
+ /* Btree comparison function. */
+ int (*bt_compare) __P((DB *, const DBT *, const DBT *));
+ /* Btree prefix function. */
+ size_t (*bt_prefix) __P((DB *, const DBT *, const DBT *));
+
+ /* Recno access method. */
+ int re_pad; /* Fixed-length padding byte. */
+ int re_delim; /* Variable-length delimiting byte. */
+ u_int32_t re_len; /* Length for fixed-length records. */
+ char *re_source; /* Source file name. */
+
+ /*
+ * !!!
+ * The bt_lpgno field is NOT protected by any mutex, and for this
+ * reason must be advisory only, so, while it is read/written by
+ * multiple threads, DB is completely indifferent to the quality
+ * of its information.
+ */
+ db_pgno_t bt_lpgno; /* Last insert location. */
+
+ /*
+ * !!!
+ * The re_modified field is NOT protected by any mutex, and for this
+ * reason cannot be anything more complicated than a zero/non-zero
+ * value. The actual writing of the backing source file cannot be
+ * threaded, so clearing the flag isn't a problem.
+ */
+ int re_modified; /* If the tree was modified. */
+
+ /*
+ * !!!
+ * These fields are ignored as far as multi-threading is concerned.
+ * There are no transaction semantics associated with backing files,
+ * nor is there any thread protection.
+ */
+ FILE *re_fp; /* Source file handle. */
+ int re_eof; /* Backing source file EOF reached. */
+ db_recno_t re_last; /* Last record number read. */
+};
+
+/*
+ * Modes for the __bam_curadj recovery records (btree_curadj).
+ * These appear in log records, so we wire the values and
+ * do not leave it up to the compiler.
+ */
+typedef enum {
+ DB_CA_DI = 1,
+ DB_CA_DUP = 2,
+ DB_CA_RSPLIT = 3,
+ DB_CA_SPLIT = 4
+} db_ca_mode;
+
+#include "dbinc_auto/btree_auto.h"
+#include "dbinc_auto/btree_ext.h"
+#include "dbinc/db_am.h"
+#endif /* !_DB_BTREE_H_ */
diff --git a/libdb/dbinc/crypto.h b/libdb/dbinc/crypto.h
new file mode 100644
index 0000000..108ac75
--- /dev/null
+++ b/libdb/dbinc/crypto.h
@@ -0,0 +1,78 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_CRYPTO_H_
+#define _DB_CRYPTO_H_
+
+/*
+ * !!!
+ * These are the internal representations of the algorithm flags.
+ * They are used in both the DB_CIPHER structure and the CIPHER
+ * structure so we can tell if users specified both passwd and alg
+ * correctly.
+ *
+ * CIPHER_ANY is used when an app joins an existing env but doesn't
+ * know the algorithm originally used. This is only valid in the
+ * DB_CIPHER structure until we open and can set the alg.
+ */
+/*
+ * We store the algorithm in an 8-bit field on the meta-page. So we
+ * use a numeric value, not bit fields.
+ * now we are limited to 8 algorithms before we cannot use bits and
+ * need numeric values. That should be plenty. It is okay for the
+ * CIPHER_ANY flag to go beyond that since that is never stored on disk.
+ */
+
+/*
+ * This structure is per-process, not in shared memory.
+ */
+struct __db_cipher {
+ int (*adj_size) __P((size_t));
+ int (*close) __P((DB_ENV *, void *));
+ int (*decrypt) __P((DB_ENV *, void *, void *, u_int8_t *, size_t));
+ int (*encrypt) __P((DB_ENV *, void *, void *, u_int8_t *, size_t));
+ int (*init) __P((DB_ENV *, DB_CIPHER *));
+
+ u_int8_t mac_key[DB_MAC_KEY]; /* MAC key. */
+ void *data; /* Algorithm-specific information */
+
+#define CIPHER_AES 1 /* AES algorithm */
+ u_int8_t alg; /* Algorithm used - See above */
+ u_int8_t spare[3]; /* Spares */
+
+#define CIPHER_ANY 0x00000001 /* Only for DB_CIPHER */
+ u_int32_t flags; /* Other flags */
+};
+
+#ifdef HAVE_CRYPTO
+
+#include "crypto/rijndael/rijndael-api-fst.h"
+
+/*
+ * Shared ciphering structure
+ * No DB_MUTEX needed because all information is read-only after creation.
+ */
+typedef struct __cipher {
+ roff_t passwd; /* Offset to shared passwd */
+ size_t passwd_len; /* Length of passwd */
+ u_int32_t flags; /* Algorithm used - see above */
+} CIPHER;
+
+#define DB_AES_KEYLEN 128 /* AES key length */
+#define DB_AES_CHUNK 16 /* AES byte unit size */
+
+typedef struct __aes_cipher {
+ keyInstance decrypt_ki; /* Decryption key instance */
+ keyInstance encrypt_ki; /* Encryption key instance */
+ u_int32_t flags; /* AES-specific flags */
+} AES_CIPHER;
+
+#include "dbinc_auto/crypto_ext.h"
+#endif /* HAVE_CRYPTO */
+#endif /* !_DB_CRYPTO_H_ */
diff --git a/libdb/dbinc/cxx_common.h b/libdb/dbinc/cxx_common.h
new file mode 100644
index 0000000..b7c073d
--- /dev/null
+++ b/libdb/dbinc/cxx_common.h
@@ -0,0 +1,45 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _CXX_COMMON_H_
+#define _CXX_COMMON_H_
+
+//
+// Common definitions used by all of Berkeley DB's C++ include files.
+//
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Mechanisms for declaring classes
+//
+
+//
+// Every class defined in this file has an _exported next to the class name.
+// This is needed for WinTel machines so that the class methods can
+// be exported or imported in a DLL as appropriate. Users of the DLL
+// use the define DB_USE_DLL. When the DLL is built, DB_CREATE_DLL
+// must be defined.
+//
+#if defined(_MSC_VER)
+
+# if defined(DB_CREATE_DLL)
+# define _exported __declspec(dllexport) // creator of dll
+# elif defined(DB_USE_DLL)
+# define _exported __declspec(dllimport) // user of dll
+# else
+# define _exported // static lib creator or user
+# endif
+
+#else /* _MSC_VER */
+
+# define _exported
+
+#endif /* _MSC_VER */
+#endif /* !_CXX_COMMON_H_ */
diff --git a/libdb/dbinc/cxx_except.h b/libdb/dbinc/cxx_except.h
new file mode 100644
index 0000000..ee65df4
--- /dev/null
+++ b/libdb/dbinc/cxx_except.h
@@ -0,0 +1,141 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _CXX_EXCEPT_H_
+#define _CXX_EXCEPT_H_
+
+#include "cxx_common.h"
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Forward declarations
+//
+
+class DbDeadlockException; // forward
+class DbException; // forward
+class DbLockNotGrantedException; // forward
+class DbLock; // forward
+class DbMemoryException; // forward
+class DbRunRecoveryException; // forward
+class Dbt; // forward
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Exception classes
+//
+
+// Almost any error in the DB library throws a DbException.
+// Every exception should be considered an abnormality
+// (e.g. bug, misuse of DB, file system error).
+//
+// NOTE: We would like to inherit from class exception and
+// let it handle what(), but there are
+// MSVC++ problems when <exception> is included.
+//
+class _exported DbException
+{
+public:
+ virtual ~DbException();
+ DbException(int err);
+ DbException(const char *description);
+ DbException(const char *prefix, int err);
+ DbException(const char *prefix1, const char *prefix2, int err);
+ int get_errno() const;
+ virtual const char *what() const;
+
+ DbException(const DbException &);
+ DbException &operator = (const DbException &);
+
+private:
+ char *what_;
+ int err_; // errno
+};
+
+//
+// A specific sort of exception that occurs when
+// an operation is aborted to resolve a deadlock.
+//
+class _exported DbDeadlockException : public DbException
+{
+public:
+ virtual ~DbDeadlockException();
+ DbDeadlockException(const char *description);
+
+ DbDeadlockException(const DbDeadlockException &);
+ DbDeadlockException &operator = (const DbDeadlockException &);
+};
+
+//
+// A specific sort of exception that occurs when
+// a lock is not granted, e.g. by lock_get or lock_vec.
+// Note that the Dbt is only live as long as the Dbt used
+// in the offending call.
+//
+class _exported DbLockNotGrantedException : public DbException
+{
+public:
+ virtual ~DbLockNotGrantedException();
+ DbLockNotGrantedException(const char *prefix, db_lockop_t op,
+ db_lockmode_t mode, const Dbt *obj, const DbLock lock, int index);
+ DbLockNotGrantedException(const DbLockNotGrantedException &);
+ DbLockNotGrantedException &operator =
+ (const DbLockNotGrantedException &);
+
+ db_lockop_t get_op() const;
+ db_lockmode_t get_mode() const;
+ const Dbt* get_obj() const;
+ DbLock *get_lock() const;
+ int get_index() const;
+
+private:
+ db_lockop_t op_;
+ db_lockmode_t mode_;
+ const Dbt *obj_;
+ DbLock *lock_;
+ int index_;
+};
+
+//
+// A specific sort of exception that occurs when
+// user declared memory is insufficient in a Dbt.
+//
+class _exported DbMemoryException : public DbException
+{
+public:
+ virtual ~DbMemoryException();
+ DbMemoryException(Dbt *dbt);
+ DbMemoryException(const char *description);
+ DbMemoryException(const char *prefix, Dbt *dbt);
+ DbMemoryException(const char *prefix1, const char *prefix2, Dbt *dbt);
+ Dbt *get_dbt() const;
+
+ DbMemoryException(const DbMemoryException &);
+ DbMemoryException &operator = (const DbMemoryException &);
+
+private:
+ Dbt *dbt_;
+};
+
+//
+// A specific sort of exception that occurs when
+// recovery is required before continuing DB activity.
+//
+class _exported DbRunRecoveryException : public DbException
+{
+public:
+ virtual ~DbRunRecoveryException();
+ DbRunRecoveryException(const char *description);
+
+ DbRunRecoveryException(const DbRunRecoveryException &);
+ DbRunRecoveryException &operator = (const DbRunRecoveryException &);
+};
+
+#endif /* !_CXX_EXCEPT_H_ */
diff --git a/libdb/dbinc/cxx_int.h b/libdb/dbinc/cxx_int.h
new file mode 100644
index 0000000..f8169f7
--- /dev/null
+++ b/libdb/dbinc/cxx_int.h
@@ -0,0 +1,81 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _CXX_INT_H_
+#define _CXX_INT_H_
+
+// private data structures known to the implementation only
+
+//
+// Using FooImp classes will allow the implementation to change in the
+// future without any modification to user code or even to header files
+// that the user includes. FooImp * is just like void * except that it
+// provides a little extra protection, since you cannot randomly assign
+// any old pointer to a FooImp* as you can with void *. Currently, a
+// pointer to such an opaque class is always just a pointer to the
+// appropriate underlying implementation struct. These are converted
+// back and forth using the various overloaded wrap()/unwrap() methods.
+// This is essentially a use of the "Bridge" Design Pattern.
+//
+// WRAPPED_CLASS implements the appropriate wrap() and unwrap() methods
+// for a wrapper class that has an underlying pointer representation.
+//
+#define WRAPPED_CLASS(_WRAPPER_CLASS, _IMP_CLASS, _WRAPPED_TYPE) \
+ \
+ class _IMP_CLASS {}; \
+ \
+ inline _WRAPPED_TYPE unwrap(_WRAPPER_CLASS *val) \
+ { \
+ if (!val) return (0); \
+ return ((_WRAPPED_TYPE)((void *)(val->imp()))); \
+ } \
+ \
+ inline const _WRAPPED_TYPE unwrapConst(const _WRAPPER_CLASS *val) \
+ { \
+ if (!val) return (0); \
+ return ((const _WRAPPED_TYPE)((void *)(val->constimp()))); \
+ } \
+ \
+ inline _IMP_CLASS *wrap(_WRAPPED_TYPE val) \
+ { \
+ return ((_IMP_CLASS*)((void *)val)); \
+ }
+
+WRAPPED_CLASS(DbMpoolFile, DbMpoolFileImp, DB_MPOOLFILE*)
+WRAPPED_CLASS(Db, DbImp, DB*)
+WRAPPED_CLASS(DbEnv, DbEnvImp, DB_ENV*)
+WRAPPED_CLASS(DbTxn, DbTxnImp, DB_TXN*)
+
+// A tristate integer value used by the DB_ERROR macro below.
+// We chose not to make this an enumerated type so it can
+// be kept private, even though methods that return the
+// tristate int can be declared in db_cxx.h .
+//
+#define ON_ERROR_THROW 1
+#define ON_ERROR_RETURN 0
+#define ON_ERROR_UNKNOWN (-1)
+
+// Macros that handle detected errors, in case we want to
+// change the default behavior. The 'policy' is one of
+// the tristate values given above. If UNKNOWN is specified,
+// the behavior is taken from the last initialized DbEnv.
+//
+#define DB_ERROR(caller, ecode, policy) \
+ DbEnv::runtime_error(caller, ecode, policy)
+
+#define DB_ERROR_DBT(caller, dbt, policy) \
+ DbEnv::runtime_error_dbt(caller, dbt, policy)
+
+#define DB_OVERFLOWED_DBT(dbt) \
+ (F_ISSET(dbt, DB_DBT_USERMEM) && dbt->size > dbt->ulen)
+
+/* values for Db::flags_ */
+#define DB_CXX_PRIVATE_ENV 0x00000001
+
+#endif /* !_CXX_INT_H_ */
diff --git a/libdb/dbinc/db.in b/libdb/dbinc/db.in
new file mode 100644
index 0000000..2b73093
--- /dev/null
+++ b/libdb/dbinc/db.in
@@ -0,0 +1,1875 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ *
+ * db.h include file layout:
+ * General.
+ * Database Environment.
+ * Locking subsystem.
+ * Logging subsystem.
+ * Shared buffer cache (mpool) subsystem.
+ * Transaction subsystem.
+ * Access methods.
+ * Access method cursors.
+ * Dbm/Ndbm, Hsearch historic interfaces.
+ */
+
+#ifndef _DB_H_
+#define _DB_H_
+
+#ifndef __NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * XXX
+ * Handle function prototypes and the keyword "const". This steps on name
+ * space that DB doesn't control, but all of the other solutions are worse.
+ *
+ * XXX
+ * While Microsoft's compiler is ANSI C compliant, it doesn't have _STDC_
+ * defined by default, you specify a command line flag or #pragma to turn
+ * it on. Don't do that, however, because some of Microsoft's own header
+ * files won't compile.
+ */
+#undef __P
+#if defined(__STDC__) || defined(__cplusplus) || defined(_MSC_VER)
+#define __P(protos) protos /* ANSI C prototypes */
+#else
+#define const
+#define __P(protos) () /* K&R C preprocessor */
+#endif
+
+/*
+ * Berkeley DB version information.
+ */
+#define DB_VERSION_MAJOR @DB_VERSION_MAJOR@
+#define DB_VERSION_MINOR @DB_VERSION_MINOR@
+#define DB_VERSION_PATCH @DB_VERSION_PATCH@
+#define DB_VERSION_STRING @DB_VERSION_STRING@
+
+/*
+ * !!!
+ * Berkeley DB uses specifically sized types. If they're not provided by
+ * the system, typedef them here.
+ *
+ * We protect them against multiple inclusion using __BIT_TYPES_DEFINED__,
+ * as does BIND and Kerberos, since we don't know for sure what #include
+ * files the user is using.
+ *
+ * !!!
+ * We also provide the standard u_int, u_long etc., if they're not provided
+ * by the system.
+ */
+#ifndef __BIT_TYPES_DEFINED__
+#define __BIT_TYPES_DEFINED__
+@u_int8_decl@
+@int16_decl@
+@u_int16_decl@
+@int32_decl@
+@u_int32_decl@
+#endif
+
+@u_char_decl@
+@u_short_decl@
+@u_int_decl@
+@u_long_decl@
+@ssize_t_decl@
+
+/* Basic types that are exported or quasi-exported. */
+typedef u_int32_t db_pgno_t; /* Page number type. */
+typedef u_int16_t db_indx_t; /* Page offset type. */
+#define DB_MAX_PAGES 0xffffffff /* >= # of pages in a file */
+
+typedef u_int32_t db_recno_t; /* Record number type. */
+#define DB_MAX_RECORDS 0xffffffff /* >= # of records in a tree */
+
+typedef u_int32_t db_timeout_t; /* Type of a timeout. */
+
+/*
+ * Region offsets are currently limited to 32-bits. I expect that's going
+ * to have to be fixed in the not-too-distant future, since we won't want to
+ * split 100Gb memory pools into that many different regions.
+ */
+typedef u_int32_t roff_t;
+
+/*
+ * Forward structure declarations, so we can declare pointers and
+ * applications can get type checking.
+ */
+struct __db; typedef struct __db DB;
+struct __db_bt_stat; typedef struct __db_bt_stat DB_BTREE_STAT;
+struct __db_cipher; typedef struct __db_cipher DB_CIPHER;
+struct __db_dbt; typedef struct __db_dbt DBT;
+struct __db_env; typedef struct __db_env DB_ENV;
+struct __db_h_stat; typedef struct __db_h_stat DB_HASH_STAT;
+struct __db_ilock; typedef struct __db_ilock DB_LOCK_ILOCK;
+struct __db_lock_stat; typedef struct __db_lock_stat DB_LOCK_STAT;
+struct __db_lock_u; typedef struct __db_lock_u DB_LOCK;
+struct __db_lockreq; typedef struct __db_lockreq DB_LOCKREQ;
+struct __db_log_cursor; typedef struct __db_log_cursor DB_LOGC;
+struct __db_log_stat; typedef struct __db_log_stat DB_LOG_STAT;
+struct __db_lsn; typedef struct __db_lsn DB_LSN;
+struct __db_mpool; typedef struct __db_mpool DB_MPOOL;
+struct __db_mpool_fstat;typedef struct __db_mpool_fstat DB_MPOOL_FSTAT;
+struct __db_mpool_stat; typedef struct __db_mpool_stat DB_MPOOL_STAT;
+struct __db_mpoolfile; typedef struct __db_mpoolfile DB_MPOOLFILE;
+struct __db_preplist; typedef struct __db_preplist DB_PREPLIST;
+struct __db_qam_stat; typedef struct __db_qam_stat DB_QUEUE_STAT;
+struct __db_rep; typedef struct __db_rep DB_REP;
+struct __db_rep_stat; typedef struct __db_rep_stat DB_REP_STAT;
+struct __db_txn; typedef struct __db_txn DB_TXN;
+struct __db_txn_active; typedef struct __db_txn_active DB_TXN_ACTIVE;
+struct __db_txn_stat; typedef struct __db_txn_stat DB_TXN_STAT;
+struct __db_txnmgr; typedef struct __db_txnmgr DB_TXNMGR;
+struct __dbc; typedef struct __dbc DBC;
+struct __dbc_internal; typedef struct __dbc_internal DBC_INTERNAL;
+struct __fh_t; typedef struct __fh_t DB_FH;
+struct __fname; typedef struct __fname FNAME;
+struct __key_range; typedef struct __key_range DB_KEY_RANGE;
+struct __mpoolfile; typedef struct __mpoolfile MPOOLFILE;
+struct __mutex_t; typedef struct __mutex_t DB_MUTEX;
+
+/* Key/data structure -- a Data-Base Thang. */
+struct __db_dbt {
+ /*
+ * data/size must be fields 1 and 2 for DB 1.85 compatibility.
+ */
+ void *data; /* Key/data */
+ u_int32_t size; /* key/data length */
+
+ u_int32_t ulen; /* RO: length of user buffer. */
+ u_int32_t dlen; /* RO: get/put record length. */
+ u_int32_t doff; /* RO: get/put record offset. */
+
+#define DB_DBT_APPMALLOC 0x001 /* Callback allocated memory. */
+#define DB_DBT_ISSET 0x002 /* Lower level calls set value. */
+#define DB_DBT_MALLOC 0x004 /* Return in malloc'd memory. */
+#define DB_DBT_PARTIAL 0x008 /* Partial put/get. */
+#define DB_DBT_REALLOC 0x010 /* Return in realloc'd memory. */
+#define DB_DBT_USERMEM 0x020 /* Return in user's memory. */
+#define DB_DBT_DUPOK 0x040 /* Insert if duplicate. */
+ u_int32_t flags;
+};
+
+/*
+ * Common flags --
+ * Interfaces which use any of these common flags should never have
+ * interface specific flags in this range.
+ */
+#define DB_CREATE 0x000001 /* Create file as necessary. */
+#define DB_CXX_NO_EXCEPTIONS 0x000002 /* C++: return error values. */
+#define DB_FORCE 0x000004 /* Force (anything). */
+#define DB_NOMMAP 0x000008 /* Don't mmap underlying file. */
+#define DB_RDONLY 0x000010 /* Read-only (O_RDONLY). */
+#define DB_RECOVER 0x000020 /* Run normal recovery. */
+#define DB_THREAD 0x000040 /* Applications are threaded. */
+#define DB_TRUNCATE 0x000080 /* Discard existing DB (O_TRUNC). */
+#define DB_TXN_NOSYNC 0x000100 /* Do not sync log on commit. */
+#define DB_USE_ENVIRON 0x000200 /* Use the environment. */
+#define DB_USE_ENVIRON_ROOT 0x000400 /* Use the environment if root. */
+
+/*
+ * Common flags --
+ * Interfaces which use any of these common flags should never have
+ * interface specific flags in this range.
+ *
+ * DB_AUTO_COMMIT:
+ * DB_ENV->set_flags, DB->associate, DB->del, DB->put, DB->open,
+ * DB->remove, DB->rename, DB->truncate
+ * DB_DIRTY_READ:
+ * DB->cursor, DB->get, DB->join, DB->open, DBcursor->c_get,
+ * DB_ENV->txn_begin
+ *
+ * Shared flags up to 0x000400 */
+#define DB_AUTO_COMMIT 0x00800000 /* Implied transaction. */
+#define DB_DIRTY_READ 0x01000000 /* Dirty Read. */
+
+/*
+ * Flags private to db_env_create.
+ */
+#define DB_CLIENT 0x000001 /* Open for a client environment. */
+
+/*
+ * Flags private to db_create.
+ */
+#define DB_XA_CREATE 0x000001 /* Open in an XA environment. */
+
+/*
+ * Flags private to DB_ENV->open.
+ * Shared flags up to 0x000400 */
+#define DB_INIT_CDB 0x000800 /* Concurrent Access Methods. */
+#define DB_INIT_LOCK 0x001000 /* Initialize locking. */
+#define DB_INIT_LOG 0x002000 /* Initialize logging. */
+#define DB_INIT_MPOOL 0x004000 /* Initialize mpool. */
+#define DB_INIT_TXN 0x008000 /* Initialize transactions. */
+#define DB_JOINENV 0x010000 /* Initialize all subsystems present. */
+#define DB_LOCKDOWN 0x020000 /* Lock memory into physical core. */
+#define DB_PRIVATE 0x040000 /* DB_ENV is process local. */
+#define DB_RECOVER_FATAL 0x080000 /* Run catastrophic recovery. */
+#define DB_SYSTEM_MEM 0x100000 /* Use system-backed memory. */
+
+/*
+ * Flags private to DB->open.
+ * Shared flags up to 0x000400 */
+#define DB_EXCL 0x000800 /* Exclusive open (O_EXCL). */
+#define DB_FCNTL_LOCKING 0x001000 /* UNDOC: fcntl(2) locking. */
+#define DB_RDWRMASTER 0x002000 /* UNDOC: allow subdb master open R/W */
+#define DB_WRITEOPEN 0x004000 /* UNDOC: open with write lock. */
+
+/*
+ * Flags private to DB_ENV->txn_begin.
+ * Shared flags up to 0x000400 */
+#define DB_TXN_NOWAIT 0x000800 /* Do not wait for locks in this TXN. */
+#define DB_TXN_SYNC 0x001000 /* Always sync log on commit. */
+
+/*
+ * Flags private to DB_ENV->set_encrypt.
+ */
+#define DB_ENCRYPT_AES 0x000001 /* AES, assumes SHA1 checksum */
+
+/*
+ * Flags private to DB_ENV->set_flags.
+ * Shared flags up to 0x000400 */
+#define DB_CDB_ALLDB 0x000800 /* Set CDB locking per environment. */
+#define DB_DIRECT_DB 0x001000 /* Don't buffer databases in the OS. */
+#define DB_DIRECT_LOG 0x002000 /* Don't buffer log files in the OS. */
+#define DB_NOLOCKING 0x004000 /* Set locking/mutex behavior. */
+#define DB_NOPANIC 0x008000 /* Set panic state per DB_ENV. */
+#define DB_OVERWRITE 0x010000 /* Overwrite unlinked region files. */
+#define DB_PANIC_ENVIRONMENT 0x020000 /* Set panic state per environment. */
+#define DB_REGION_INIT 0x040000 /* Page-fault regions on open. */
+#define DB_TXN_WRITE_NOSYNC 0x080000 /* Write, don't sync, on txn commit. */
+#define DB_YIELDCPU 0x100000 /* Yield the CPU (a lot). */
+
+/*
+ * Flags private to DB->set_feedback's callback.
+ */
+#define DB_UPGRADE 0x000001 /* Upgrading. */
+#define DB_VERIFY 0x000002 /* Verifying. */
+
+/*
+ * Flags private to DB_MPOOLFILE->open.
+ * Shared flags up to 0x000400 */
+#define DB_DIRECT 0x000800 /* Don't buffer the file in the OS. */
+#define DB_EXTENT 0x001000 /* UNDOC: dealing with an extent. */
+#define DB_ODDFILESIZE 0x002000 /* Truncate file to N * pgsize. */
+
+/*
+ * Flags private to DB->set_flags.
+ */
+#define DB_CHKSUM_SHA1 0x000001 /* Use SHA1 checksumming */
+#define DB_DUP 0x000002 /* Btree, Hash: duplicate keys. */
+#define DB_DUPSORT 0x000004 /* Btree, Hash: duplicate keys. */
+#define DB_ENCRYPT 0x000008 /* Btree, Hash: duplicate keys. */
+#define DB_RECNUM 0x000010 /* Btree: record numbers. */
+#define DB_RENUMBER 0x000020 /* Recno: renumber on insert/delete. */
+#define DB_REVSPLITOFF 0x000040 /* Btree: turn off reverse splits. */
+#define DB_SNAPSHOT 0x000080 /* Recno: snapshot the input. */
+
+/*
+ * Flags private to the DB->stat methods.
+ */
+#define DB_STAT_CLEAR 0x000001 /* Clear stat after returning values. */
+
+/*
+ * Flags private to DB->join.
+ */
+#define DB_JOIN_NOSORT 0x000001 /* Don't try to optimize join. */
+
+/*
+ * Flags private to DB->verify.
+ */
+#define DB_AGGRESSIVE 0x000001 /* Salvage whatever could be data.*/
+#define DB_NOORDERCHK 0x000002 /* Skip sort order/hashing check. */
+#define DB_ORDERCHKONLY 0x000004 /* Only perform the order check. */
+#define DB_PR_PAGE 0x000008 /* Show page contents (-da). */
+#define DB_PR_RECOVERYTEST 0x000010 /* Recovery test (-dr). */
+#define DB_PRINTABLE 0x000020 /* Use printable format for salvage. */
+#define DB_SALVAGE 0x000040 /* Salvage what looks like data. */
+/*
+ * !!!
+ * These must not go over 0x8000, or they will collide with the flags
+ * used by __bam_vrfy_subtree.
+ */
+
+/*
+ * Flags private to DB->set_rep_transport's send callback.
+ */
+#define DB_REP_PERMANENT 0x0001 /* Important--app. may want to flush. */
+
+/*******************************************************
+ * Locking.
+ *******************************************************/
+#define DB_LOCKVERSION 1
+
+#define DB_FILE_ID_LEN 20 /* Unique file ID length. */
+
+/*
+ * Deadlock detector modes; used in the DB_ENV structure to configure the
+ * locking subsystem.
+ */
+#define DB_LOCK_NORUN 0
+#define DB_LOCK_DEFAULT 1 /* Default policy. */
+#define DB_LOCK_EXPIRE 2 /* Only expire locks, no detection. */
+#define DB_LOCK_MAXLOCKS 3 /* Abort txn with maximum # of locks. */
+#define DB_LOCK_MINLOCKS 4 /* Abort txn with minimum # of locks. */
+#define DB_LOCK_MINWRITE 5 /* Abort txn with minimum writelocks. */
+#define DB_LOCK_OLDEST 6 /* Abort oldest transaction. */
+#define DB_LOCK_RANDOM 7 /* Abort random transaction. */
+#define DB_LOCK_YOUNGEST 8 /* Abort youngest transaction. */
+
+/* Flag values for lock_vec(), lock_get(). */
+#define DB_LOCK_FREE_LOCKER 0x001 /* Internal: Free locker as well. */
+#define DB_LOCK_NOWAIT 0x002 /* Don't wait on unavailable lock. */
+#define DB_LOCK_RECORD 0x004 /* Internal: record lock. */
+#define DB_LOCK_REMOVE 0x008 /* Internal: flag object removed. */
+#define DB_LOCK_SET_TIMEOUT 0x010 /* Internal: set lock timeout. */
+#define DB_LOCK_SWITCH 0x020 /* Internal: switch existing lock. */
+#define DB_LOCK_UPGRADE 0x040 /* Internal: upgrade existing lock. */
+
+/*
+ * Simple R/W lock modes and for multi-granularity intention locking.
+ *
+ * !!!
+ * These values are NOT random, as they are used as an index into the lock
+ * conflicts arrays, i.e., DB_LOCK_IWRITE must be == 3, and DB_LOCK_IREAD
+ * must be == 4.
+ */
+typedef enum {
+ DB_LOCK_NG=0, /* Not granted. */
+ DB_LOCK_READ=1, /* Shared/read. */
+ DB_LOCK_WRITE=2, /* Exclusive/write. */
+ DB_LOCK_WAIT=3, /* Wait for event */
+ DB_LOCK_IWRITE=4, /* Intent exclusive/write. */
+ DB_LOCK_IREAD=5, /* Intent to share/read. */
+ DB_LOCK_IWR=6, /* Intent to read and write. */
+ DB_LOCK_DIRTY=7, /* Dirty Read. */
+ DB_LOCK_WWRITE=8 /* Was Written. */
+} db_lockmode_t;
+
+/*
+ * Request types.
+ */
+typedef enum {
+ DB_LOCK_DUMP=0, /* Display held locks. */
+ DB_LOCK_GET=1, /* Get the lock. */
+ DB_LOCK_GET_TIMEOUT=2, /* Get lock with a timeout. */
+ DB_LOCK_INHERIT=3, /* Pass locks to parent. */
+ DB_LOCK_PUT=4, /* Release the lock. */
+ DB_LOCK_PUT_ALL=5, /* Release locker's locks. */
+ DB_LOCK_PUT_OBJ=6, /* Release locker's locks on obj. */
+ DB_LOCK_PUT_READ=7, /* Release locker's read locks. */
+ DB_LOCK_TIMEOUT=8, /* Force a txn to timeout. */
+ DB_LOCK_TRADE=9, /* Trade locker ids on a lock. */
+ DB_LOCK_UPGRADE_WRITE=10 /* Upgrade writes for dirty reads. */
+} db_lockop_t;
+
+/*
+ * Status of a lock.
+ */
+typedef enum {
+ DB_LSTAT_ABORTED=1, /* Lock belongs to an aborted txn. */
+ DB_LSTAT_ERR=2, /* Lock is bad. */
+ DB_LSTAT_EXPIRED=3, /* Lock has expired. */
+ DB_LSTAT_FREE=4, /* Lock is unallocated. */
+ DB_LSTAT_HELD=5, /* Lock is currently held. */
+ DB_LSTAT_NOTEXIST=6, /* Object on which lock was waiting
+ * was removed */
+ DB_LSTAT_PENDING=7, /* Lock was waiting and has been
+ * promoted; waiting for the owner
+ * to run and upgrade it to held. */
+ DB_LSTAT_WAITING=8 /* Lock is on the wait queue. */
+}db_status_t;
+
+/* Lock statistics structure. */
+struct __db_lock_stat {
+ u_int32_t st_id; /* Last allocated locker ID. */
+ u_int32_t st_cur_maxid; /* Current maximum unused ID. */
+ u_int32_t st_maxlocks; /* Maximum number of locks in table. */
+ u_int32_t st_maxlockers; /* Maximum num of lockers in table. */
+ u_int32_t st_maxobjects; /* Maximum num of objects in table. */
+ u_int32_t st_nmodes; /* Number of lock modes. */
+ u_int32_t st_nlocks; /* Current number of locks. */
+ u_int32_t st_maxnlocks; /* Maximum number of locks so far. */
+ u_int32_t st_nlockers; /* Current number of lockers. */
+ u_int32_t st_maxnlockers; /* Maximum number of lockers so far. */
+ u_int32_t st_nobjects; /* Current number of objects. */
+ u_int32_t st_maxnobjects; /* Maximum number of objects so far. */
+ u_int32_t st_nconflicts; /* Number of lock conflicts. */
+ u_int32_t st_nrequests; /* Number of lock gets. */
+ u_int32_t st_nreleases; /* Number of lock puts. */
+ u_int32_t st_nnowaits; /* Number of requests that would have
+ waited, but NOWAIT was set. */
+ u_int32_t st_ndeadlocks; /* Number of lock deadlocks. */
+ db_timeout_t st_locktimeout; /* Lock timeout. */
+ u_int32_t st_nlocktimeouts; /* Number of lock timeouts. */
+ db_timeout_t st_txntimeout; /* Transaction timeout. */
+ u_int32_t st_ntxntimeouts; /* Number of transaction timeouts. */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted without wait. */
+ u_int32_t st_regsize; /* Region size. */
+};
+
+/*
+ * DB_LOCK_ILOCK --
+ * Internal DB access method lock.
+ */
+struct __db_ilock {
+ db_pgno_t pgno; /* Page being locked. */
+ u_int8_t fileid[DB_FILE_ID_LEN];/* File id. */
+#define DB_HANDLE_LOCK 1
+#define DB_RECORD_LOCK 2
+#define DB_PAGE_LOCK 3
+#define DB_TXN_LOCK 4
+ u_int32_t type; /* Type of lock. */
+};
+
+/*
+ * DB_LOCK --
+ * The structure is allocated by the caller and filled in during a
+ * lock_get request (or a lock_vec/DB_LOCK_GET).
+ */
+struct __db_lock_u {
+ size_t off; /* Offset of the lock in the region */
+ u_int32_t ndx; /* Index of the object referenced by
+ * this lock; used for locking. */
+ u_int32_t gen; /* Generation number of this lock. */
+ db_lockmode_t mode; /* mode of this lock. */
+};
+
+/* Lock request structure. */
+struct __db_lockreq {
+ db_lockop_t op; /* Operation. */
+ db_lockmode_t mode; /* Requested mode. */
+ db_timeout_t timeout; /* Time to expire lock. */
+ DBT *obj; /* Object being locked. */
+ DB_LOCK lock; /* Lock returned. */
+};
+
+/*******************************************************
+ * Logging.
+ *******************************************************/
+#define DB_LOGVERSION 7 /* Current log version. */
+#define DB_LOGOLDVER 7 /* Oldest log version supported. */
+#define DB_LOGMAGIC 0x040988
+
+/* Flag values for log_archive(). */
+#define DB_ARCH_ABS 0x001 /* Absolute pathnames. */
+#define DB_ARCH_DATA 0x002 /* Data files. */
+#define DB_ARCH_LOG 0x004 /* Log files. */
+
+/*
+ * A DB_LSN has two parts, a fileid which identifies a specific file, and an
+ * offset within that file. The fileid is an unsigned 4-byte quantity that
+ * uniquely identifies a file within the log directory -- currently a simple
+ * counter inside the log. The offset is also an unsigned 4-byte value. The
+ * log manager guarantees the offset is never more than 4 bytes by switching
+ * to a new log file before the maximum length imposed by an unsigned 4-byte
+ * offset is reached.
+ */
+struct __db_lsn {
+ u_int32_t file; /* File ID. */
+ u_int32_t offset; /* File offset. */
+};
+
+/*
+ * DB_LOGC --
+ * Log cursor.
+ */
+struct __db_log_cursor {
+ DB_ENV *dbenv; /* Enclosing dbenv. */
+
+ DB_FH *c_fh; /* File handle. */
+ DB_LSN c_lsn; /* Cursor: LSN */
+ u_int32_t c_len; /* Cursor: record length */
+ u_int32_t c_prev; /* Cursor: previous record's offset */
+
+ DBT c_dbt; /* Return DBT. */
+
+#define DB_LOGC_BUF_SIZE (32 * 1024)
+ u_int8_t *bp; /* Allocated read buffer. */
+ u_int32_t bp_size; /* Read buffer length in bytes. */
+ u_int32_t bp_rlen; /* Read buffer valid data length. */
+ DB_LSN bp_lsn; /* Read buffer first byte LSN. */
+
+ u_int32_t bp_maxrec; /* Max record length in the log file. */
+
+ /* Methods. */
+ int (*close) __P((DB_LOGC *, u_int32_t));
+ int (*get) __P((DB_LOGC *, DB_LSN *, DBT *, u_int32_t));
+
+#define DB_LOG_DISK 0x01 /* Log record came from disk. */
+#define DB_LOG_LOCKED 0x02 /* Log region already locked */
+#define DB_LOG_SILENT_ERR 0x04 /* Turn-off error messages. */
+ u_int32_t flags;
+};
+
+/* Log statistics structure. */
+struct __db_log_stat {
+ u_int32_t st_magic; /* Log file magic number. */
+ u_int32_t st_version; /* Log file version number. */
+ int st_mode; /* Log file mode. */
+ u_int32_t st_lg_bsize; /* Log buffer size. */
+ u_int32_t st_lg_size; /* Log file size. */
+ u_int32_t st_w_bytes; /* Bytes to log. */
+ u_int32_t st_w_mbytes; /* Megabytes to log. */
+ u_int32_t st_wc_bytes; /* Bytes to log since checkpoint. */
+ u_int32_t st_wc_mbytes; /* Megabytes to log since checkpoint. */
+ u_int32_t st_wcount; /* Total writes to the log. */
+ u_int32_t st_wcount_fill; /* Overflow writes to the log. */
+ u_int32_t st_scount; /* Total syncs to the log. */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted without wait. */
+ u_int32_t st_cur_file; /* Current log file number. */
+ u_int32_t st_cur_offset; /* Current log file offset. */
+ u_int32_t st_disk_file; /* Known on disk log file number. */
+ u_int32_t st_disk_offset; /* Known on disk log file offset. */
+ u_int32_t st_regsize; /* Region size. */
+ u_int32_t st_maxcommitperflush; /* Max number of commits in a flush. */
+ u_int32_t st_mincommitperflush; /* Min number of commits in a flush. */
+};
+
+/*******************************************************
+ * Shared buffer cache (mpool).
+ *******************************************************/
+/* Flag values for DB_MPOOLFILE->get. */
+#define DB_MPOOL_CREATE 0x001 /* Create a page. */
+#define DB_MPOOL_LAST 0x002 /* Return the last page. */
+#define DB_MPOOL_NEW 0x004 /* Create a new page. */
+
+/* Flag values for DB_MPOOLFILE->put, DB_MPOOLFILE->set. */
+#define DB_MPOOL_CLEAN 0x001 /* Page is not modified. */
+#define DB_MPOOL_DIRTY 0x002 /* Page is modified. */
+#define DB_MPOOL_DISCARD 0x004 /* Don't cache the page. */
+
+/* Priority values for DB_MPOOLFILE->set_priority. */
+typedef enum {
+ DB_PRIORITY_VERY_LOW=1,
+ DB_PRIORITY_LOW=2,
+ DB_PRIORITY_DEFAULT=3,
+ DB_PRIORITY_HIGH=4,
+ DB_PRIORITY_VERY_HIGH=5
+} DB_CACHE_PRIORITY;
+
+/* Per-process DB_MPOOLFILE information. */
+struct __db_mpoolfile {
+ /* These fields need to be protected for multi-threaded support. */
+ DB_MUTEX *mutexp; /* Structure thread lock. */
+
+ DB_FH *fhp; /* Underlying file handle. */
+
+ u_int32_t ref; /* Reference count. */
+
+ /*
+ * !!!
+ * The pinref and q fields are protected by the region lock, not the
+ * DB_MPOOLFILE structure mutex. We don't use the structure mutex
+ * because then I/O (which holds the structure lock held because of
+ * the race between the seek and write of the file descriptor) would
+ * block any other put/get calls using this DB_MPOOLFILE structure.
+ */
+ u_int32_t pinref; /* Pinned block reference count. */
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_mpoolfile) q;
+ */
+ struct {
+ struct __db_mpoolfile *tqe_next;
+ struct __db_mpoolfile **tqe_prev;
+ } q; /* Linked list of DB_MPOOLFILE's. */
+
+ /*
+ * These fields are not thread-protected because they are initialized
+ * when the file is opened and never modified.
+ */
+ int ftype; /* File type. */
+ DBT *pgcookie; /* Byte-string passed to pgin/pgout. */
+ u_int8_t *fileid; /* Unique file ID. */
+ int32_t lsn_offset; /* LSN offset in page. */
+ u_int32_t clear_len; /* Cleared length on created pages. */
+
+ DB_MPOOL *dbmp; /* Overlying DB_MPOOL. */
+ MPOOLFILE *mfp; /* Underlying MPOOLFILE. */
+
+ void *addr; /* Address of mmap'd region. */
+ size_t len; /* Length of mmap'd region. */
+
+ /* Methods. */
+ int (*close) __P((DB_MPOOLFILE *, u_int32_t));
+ int (*get) __P((DB_MPOOLFILE *, db_pgno_t *, u_int32_t, void *));
+ void (*get_fileid) __P((DB_MPOOLFILE *, u_int8_t *));
+ void (*last_pgno) __P((DB_MPOOLFILE *, db_pgno_t *));
+ int (*open)__P((DB_MPOOLFILE *, const char *, u_int32_t, int, size_t));
+ int (*put) __P((DB_MPOOLFILE *, void *, u_int32_t));
+ void (*refcnt) __P((DB_MPOOLFILE *, db_pgno_t *));
+ int (*set) __P((DB_MPOOLFILE *, void *, u_int32_t));
+ int (*set_clear_len) __P((DB_MPOOLFILE *, u_int32_t));
+ int (*set_fileid) __P((DB_MPOOLFILE *, u_int8_t *));
+ int (*set_ftype) __P((DB_MPOOLFILE *, int));
+ int (*set_lsn_offset) __P((DB_MPOOLFILE *, int32_t));
+ int (*set_pgcookie) __P((DB_MPOOLFILE *, DBT *));
+ int (*set_priority) __P((DB_MPOOLFILE *, DB_CACHE_PRIORITY));
+ void (*set_unlink) __P((DB_MPOOLFILE *, int));
+ int (*sync) __P((DB_MPOOLFILE *));
+
+ /*
+ * MP_OPEN_CALLED and MP_READONLY do not need to be thread protected
+ * because they are initialized when the file is opened, and never
+ * modified.
+ *
+ * MP_FLUSH, MP_UPGRADE and MP_UPGRADE_FAIL are thread protected
+ * becase they are potentially read by multiple threads of control.
+ */
+#define MP_FLUSH 0x001 /* Was opened to flush a buffer. */
+#define MP_OPEN_CALLED 0x002 /* File opened. */
+#define MP_READONLY 0x004 /* File is readonly. */
+#define MP_UPGRADE 0x008 /* File descriptor is readwrite. */
+#define MP_UPGRADE_FAIL 0x010 /* Upgrade wasn't possible. */
+ u_int32_t flags;
+};
+
+/*
+ * Mpool statistics structure.
+ */
+struct __db_mpool_stat {
+ u_int32_t st_gbytes; /* Total cache size: GB. */
+ u_int32_t st_bytes; /* Total cache size: B. */
+ u_int32_t st_ncache; /* Number of caches. */
+ u_int32_t st_regsize; /* Cache size. */
+ u_int32_t st_map; /* Pages from mapped files. */
+ u_int32_t st_cache_hit; /* Pages found in the cache. */
+ u_int32_t st_cache_miss; /* Pages not found in the cache. */
+ u_int32_t st_page_create; /* Pages created in the cache. */
+ u_int32_t st_page_in; /* Pages read in. */
+ u_int32_t st_page_out; /* Pages written out. */
+ u_int32_t st_ro_evict; /* Clean pages forced from the cache. */
+ u_int32_t st_rw_evict; /* Dirty pages forced from the cache. */
+ u_int32_t st_page_trickle; /* Pages written by memp_trickle. */
+ u_int32_t st_pages; /* Total number of pages. */
+ u_int32_t st_page_clean; /* Clean pages. */
+ u_int32_t st_page_dirty; /* Dirty pages. */
+ u_int32_t st_hash_buckets; /* Number of hash buckets. */
+ u_int32_t st_hash_searches; /* Total hash chain searches. */
+ u_int32_t st_hash_longest; /* Longest hash chain searched. */
+ u_int32_t st_hash_examined; /* Total hash entries searched. */
+ u_int32_t st_hash_nowait; /* Hash lock granted with nowait. */
+ u_int32_t st_hash_wait; /* Hash lock granted after wait. */
+ u_int32_t st_hash_max_wait; /* Max hash lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted with nowait. */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_alloc; /* Number of page allocations. */
+ u_int32_t st_alloc_buckets; /* Buckets checked during allocation. */
+ u_int32_t st_alloc_max_buckets; /* Max checked during allocation. */
+ u_int32_t st_alloc_pages; /* Pages checked during allocation. */
+ u_int32_t st_alloc_max_pages; /* Max checked during allocation. */
+};
+
+/* Mpool file statistics structure. */
+struct __db_mpool_fstat {
+ char *file_name; /* File name. */
+ size_t st_pagesize; /* Page size. */
+ u_int32_t st_map; /* Pages from mapped files. */
+ u_int32_t st_cache_hit; /* Pages found in the cache. */
+ u_int32_t st_cache_miss; /* Pages not found in the cache. */
+ u_int32_t st_page_create; /* Pages created in the cache. */
+ u_int32_t st_page_in; /* Pages read in. */
+ u_int32_t st_page_out; /* Pages written out. */
+};
+
+/*******************************************************
+ * Transactions and recovery.
+ *******************************************************/
+#define DB_TXNVERSION 1
+
+typedef enum {
+ DB_TXN_ABORT=0, /* Public. */
+ DB_TXN_APPLY=1, /* Public. */
+ DB_TXN_BACKWARD_ALLOC=2, /* Internal. */
+ DB_TXN_BACKWARD_ROLL=3, /* Public. */
+ DB_TXN_FORWARD_ROLL=4, /* Public. */
+ DB_TXN_GETPGNOS=5, /* Internal. */
+ DB_TXN_OPENFILES=6, /* Internal. */
+ DB_TXN_POPENFILES=7, /* Internal. */
+ DB_TXN_PRINT=8 /* Public. */
+} db_recops;
+
+/*
+ * BACKWARD_ALLOC is used during the forward pass to pick up any aborted
+ * allocations for files that were created during the forward pass.
+ * The main difference between _ALLOC and _ROLL is that the entry for
+ * the file not exist during the rollforward pass.
+ */
+#define DB_UNDO(op) ((op) == DB_TXN_ABORT || \
+ (op) == DB_TXN_BACKWARD_ROLL || (op) == DB_TXN_BACKWARD_ALLOC)
+#define DB_REDO(op) ((op) == DB_TXN_FORWARD_ROLL || (op) == DB_TXN_APPLY)
+
+struct __db_txn {
+ DB_TXNMGR *mgrp; /* Pointer to transaction manager. */
+ DB_TXN *parent; /* Pointer to transaction's parent. */
+ DB_LSN last_lsn; /* Lsn of last log write. */
+ u_int32_t txnid; /* Unique transaction id. */
+ roff_t off; /* Detail structure within region. */
+ db_timeout_t lock_timeout; /* Timeout for locks for this txn. */
+ db_timeout_t expire; /* Time this txn expires. */
+ void *txn_list; /* Undo information for parent. */
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_txn) links;
+ */
+ struct {
+ struct __db_txn *tqe_next;
+ struct __db_txn **tqe_prev;
+ } links; /* Links transactions off manager. */
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_HEAD(__events, __txn_event) events;
+ */
+ struct {
+ struct __txn_event *tqh_first;
+ struct __txn_event **tqh_last;
+ } events;
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_HEAD(__kids, __db_txn) kids;
+ */
+ struct __kids {
+ struct __db_txn *tqh_first;
+ struct __db_txn **tqh_last;
+ } kids;
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_txn) klinks;
+ */
+ struct {
+ struct __db_txn *tqe_next;
+ struct __db_txn **tqe_prev;
+ } klinks;
+
+ /* API-private structure: used by C++ */
+ void *api_internal;
+
+ u_int32_t cursors; /* Number of cursors open for txn */
+
+ /* Methods. */
+ int (*abort) __P((DB_TXN *));
+ int (*commit) __P((DB_TXN *, u_int32_t));
+ int (*discard) __P((DB_TXN *, u_int32_t));
+ u_int32_t (*id) __P((DB_TXN *));
+ int (*prepare) __P((DB_TXN *, u_int8_t *));
+ int (*set_timeout) __P((DB_TXN *, db_timeout_t, u_int32_t));
+
+#define TXN_CHILDCOMMIT 0x01 /* Transaction that has committed. */
+#define TXN_COMPENSATE 0x02 /* Compensating transaction. */
+#define TXN_DIRTY_READ 0x04 /* Transaction does dirty reads. */
+#define TXN_LOCKTIMEOUT 0x08 /* Transaction has a lock timeout. */
+#define TXN_MALLOC 0x10 /* Structure allocated by TXN system. */
+#define TXN_NOSYNC 0x20 /* Do not sync on prepare and commit. */
+#define TXN_NOWAIT 0x40 /* Do not wait on locks. */
+#define TXN_SYNC 0x80 /* Sync on prepare and commit. */
+ u_int32_t flags;
+};
+
+/* Transaction statistics structure. */
+struct __db_txn_active {
+ u_int32_t txnid; /* Transaction ID */
+ u_int32_t parentid; /* Transaction ID of parent */
+ DB_LSN lsn; /* LSN when transaction began */
+};
+
+struct __db_txn_stat {
+ DB_LSN st_last_ckp; /* lsn of the last checkpoint */
+ time_t st_time_ckp; /* time of last checkpoint */
+ u_int32_t st_last_txnid; /* last transaction id given out */
+ u_int32_t st_maxtxns; /* maximum txns possible */
+ u_int32_t st_naborts; /* number of aborted transactions */
+ u_int32_t st_nbegins; /* number of begun transactions */
+ u_int32_t st_ncommits; /* number of committed transactions */
+ u_int32_t st_nactive; /* number of active transactions */
+ u_int32_t st_nrestores; /* number of restored transactions
+ after recovery. */
+ u_int32_t st_maxnactive; /* maximum active transactions */
+ DB_TXN_ACTIVE *st_txnarray; /* array of active transactions */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted without wait. */
+ u_int32_t st_regsize; /* Region size. */
+};
+
+/*
+ * Structure used for two phase commit interface. Berkeley DB support for two
+ * phase commit is compatible with the X/open XA interface. The xa #define
+ * XIDDATASIZE defines the size of a global transaction ID. We have our own
+ * version here which must have the same value.
+ */
+#define DB_XIDDATASIZE 128
+struct __db_preplist {
+ DB_TXN *txn;
+ u_int8_t gid[DB_XIDDATASIZE];
+};
+
+/*******************************************************
+ * Replication.
+ *******************************************************/
+/* Special, out-of-band environment IDs. */
+#define DB_EID_BROADCAST -1
+#define DB_EID_INVALID -2
+
+/* rep_start flags values */
+#define DB_REP_CLIENT 0x001
+#define DB_REP_LOGSONLY 0x002
+#define DB_REP_MASTER 0x004
+
+/* Replication statistics. */
+struct __db_rep_stat {
+ /* !!!
+ * Many replication statistics fields cannot be protected by a mutex
+ * without an unacceptable performance penalty, since most message
+ * processing is done without the need to hold a region-wide lock.
+ * Fields whose comments end with a '+' may be updated without holding
+ * the replication or log mutexes (as appropriate), and thus may be
+ * off somewhat (or, on unreasonable architectures under unlucky
+ * circumstances, garbaged).
+ */
+ u_int32_t st_status; /* Current replication status. */
+ DB_LSN st_next_lsn; /* Next LSN to use or expect. */
+ DB_LSN st_waiting_lsn; /* LSN we're awaiting, if any. */
+
+ u_int32_t st_dupmasters; /* # of times a duplicate master
+ condition was detected.+ */
+ int st_env_id; /* Current environment ID. */
+ int st_env_priority; /* Current environment priority. */
+ u_int32_t st_gen; /* Current generation number. */
+ u_int32_t st_log_duplicated; /* Log records received multiply.+ */
+ u_int32_t st_log_queued; /* Log records currently queued.+ */
+ u_int32_t st_log_queued_max; /* Max. log records queued at once.+ */
+ u_int32_t st_log_queued_total; /* Total # of log recs. ever queued.+ */
+ u_int32_t st_log_records; /* Log records received and put.+ */
+ u_int32_t st_log_requested; /* Log recs. missed and requested.+ */
+ int st_master; /* Env. ID of the current master. */
+ u_int32_t st_master_changes; /* # of times we've switched masters. */
+ u_int32_t st_msgs_badgen; /* Messages with a bad generation #.+ */
+ u_int32_t st_msgs_processed; /* Messages received and processed.+ */
+ u_int32_t st_msgs_recover; /* Messages ignored because this site
+ was a client in recovery.+ */
+ u_int32_t st_msgs_send_failures;/* # of failed message sends.+ */
+ u_int32_t st_msgs_sent; /* # of successful message sends.+ */
+ u_int32_t st_newsites; /* # of NEWSITE msgs. received.+ */
+ int st_nsites; /* Current number of sites we will
+ assume during elections. */
+ u_int32_t st_nthrottles; /* # of times we were throttled. */
+ u_int32_t st_outdated; /* # of times we detected and returned
+ an OUTDATED condition.+ */
+ u_int32_t st_txns_applied; /* # of transactions applied.+ */
+
+ /* Elections generally. */
+ u_int32_t st_elections; /* # of elections held.+ */
+ u_int32_t st_elections_won; /* # of elections won by this site.+ */
+
+ /* Statistics about an in-progress election. */
+ int st_election_cur_winner; /* Current front-runner. */
+ u_int32_t st_election_gen; /* Election generation number. */
+ DB_LSN st_election_lsn; /* Max. LSN of current winner. */
+ int st_election_nsites; /* # of "registered voters". */
+ int st_election_priority; /* Current election priority. */
+ int st_election_status; /* Current election status. */
+ int st_election_tiebreaker; /* Election tiebreaker value. */
+ int st_election_votes; /* Votes received in this round. */
+};
+
+/*******************************************************
+ * Access methods.
+ *******************************************************/
+typedef enum {
+ DB_BTREE=1,
+ DB_HASH=2,
+ DB_RECNO=3,
+ DB_QUEUE=4,
+ DB_UNKNOWN=5 /* Figure it out on open. */
+} DBTYPE;
+
+#define DB_RENAMEMAGIC 0x030800 /* File has been renamed. */
+
+#define DB_BTREEVERSION 9 /* Current btree version. */
+#define DB_BTREEOLDVER 8 /* Oldest btree version supported. */
+#define DB_BTREEMAGIC 0x053162
+
+#define DB_HASHVERSION 8 /* Current hash version. */
+#define DB_HASHOLDVER 7 /* Oldest hash version supported. */
+#define DB_HASHMAGIC 0x061561
+
+#define DB_QAMVERSION 4 /* Current queue version. */
+#define DB_QAMOLDVER 3 /* Oldest queue version supported. */
+#define DB_QAMMAGIC 0x042253
+
+/*
+ * DB access method and cursor operation values. Each value is an operation
+ * code to which additional bit flags are added.
+ */
+#define DB_AFTER 1 /* c_put() */
+#define DB_APPEND 2 /* put() */
+#define DB_BEFORE 3 /* c_put() */
+#define DB_CACHED_COUNTS 4 /* stat() */
+#define DB_COMMIT 5 /* log_put() (internal) */
+#define DB_CONSUME 6 /* get() */
+#define DB_CONSUME_WAIT 7 /* get() */
+#define DB_CURRENT 8 /* c_get(), c_put(), DB_LOGC->get() */
+#define DB_FAST_STAT 9 /* stat() */
+#define DB_FIRST 10 /* c_get(), DB_LOGC->get() */
+#define DB_GET_BOTH 11 /* get(), c_get() */
+#define DB_GET_BOTHC 12 /* c_get() (internal) */
+#define DB_GET_BOTH_RANGE 13 /* get(), c_get() */
+#define DB_GET_RECNO 14 /* c_get() */
+#define DB_JOIN_ITEM 15 /* c_get(); do not do primary lookup */
+#define DB_KEYFIRST 16 /* c_put() */
+#define DB_KEYLAST 17 /* c_put() */
+#define DB_LAST 18 /* c_get(), DB_LOGC->get() */
+#define DB_NEXT 19 /* c_get(), DB_LOGC->get() */
+#define DB_NEXT_DUP 20 /* c_get() */
+#define DB_NEXT_NODUP 21 /* c_get() */
+#define DB_NODUPDATA 22 /* put(), c_put() */
+#define DB_NOOVERWRITE 23 /* put() */
+#define DB_NOSYNC 24 /* close() */
+#define DB_POSITION 25 /* c_dup() */
+#define DB_POSITIONI 26 /* c_dup() (internal) */
+#define DB_PREV 27 /* c_get(), DB_LOGC->get() */
+#define DB_PREV_NODUP 28 /* c_get(), DB_LOGC->get() */
+#define DB_RECORDCOUNT 29 /* stat() */
+#define DB_SET 30 /* c_get(), DB_LOGC->get() */
+#define DB_SET_LOCK_TIMEOUT 31 /* set_timout() */
+#define DB_SET_RANGE 32 /* c_get() */
+#define DB_SET_RECNO 33 /* get(), c_get() */
+#define DB_SET_TXN_NOW 34 /* set_timout() (internal) */
+#define DB_SET_TXN_TIMEOUT 35 /* set_timout() */
+#define DB_UPDATE_SECONDARY 36 /* c_get(), c_del() (internal) */
+#define DB_WRITECURSOR 37 /* cursor() */
+#define DB_WRITELOCK 38 /* cursor() (internal) */
+
+/* This has to change when the max opcode hits 255. */
+#define DB_OPFLAGS_MASK 0x000000ff /* Mask for operations flags. */
+/* DB_DIRTY_READ 0x01000000 Dirty Read. */
+#define DB_FLUSH 0x02000000 /* Flush data to disk. */
+#define DB_MULTIPLE 0x04000000 /* Return multiple data values. */
+#define DB_MULTIPLE_KEY 0x08000000 /* Return multiple data/key pairs. */
+#define DB_NOCOPY 0x10000000 /* Don't copy data */
+#define DB_PERMANENT 0x20000000 /* Flag record with REP_PERMANENT. */
+#define DB_RMW 0x40000000 /* Acquire write flag immediately. */
+#define DB_WRNOSYNC 0x80000000 /* Private: write, don't sync log_put */
+
+/*
+ * DB (user visible) error return codes.
+ *
+ * !!!
+ * For source compatibility with DB 2.X deadlock return (EAGAIN), use the
+ * following:
+ * #include <errno.h>
+ * #define DB_LOCK_DEADLOCK EAGAIN
+ *
+ * !!!
+ * We don't want our error returns to conflict with other packages where
+ * possible, so pick a base error value that's hopefully not common. We
+ * document that we own the error name space from -30,800 to -30,999.
+ */
+/* DB (public) error return codes. */
+#define DB_DONOTINDEX (-30999)/* "Null" return from 2ndary callbk. */
+#define DB_KEYEMPTY (-30998)/* Key/data deleted or never created. */
+#define DB_KEYEXIST (-30997)/* The key/data pair already exists. */
+#define DB_LOCK_DEADLOCK (-30996)/* Deadlock. */
+#define DB_LOCK_NOTGRANTED (-30995)/* Lock unavailable. */
+#define DB_NOSERVER (-30994)/* Server panic return. */
+#define DB_NOSERVER_HOME (-30993)/* Bad home sent to server. */
+#define DB_NOSERVER_ID (-30992)/* Bad ID sent to server. */
+#define DB_NOTFOUND (-30991)/* Key/data pair not found (EOF). */
+#define DB_OLD_VERSION (-30990)/* Out-of-date version. */
+#define DB_PAGE_NOTFOUND (-30989)/* Requested page not found. */
+#define DB_REP_DUPMASTER (-30988)/* There are two masters. */
+#define DB_REP_HOLDELECTION (-30987)/* Time to hold an election. */
+#define DB_REP_NEWMASTER (-30986)/* We have learned of a new master. */
+#define DB_REP_NEWSITE (-30985)/* New site entered system. */
+#define DB_REP_OUTDATED (-30984)/* Site is too far behind master. */
+#define DB_REP_UNAVAIL (-30983)/* Site cannot currently be reached. */
+#define DB_RUNRECOVERY (-30982)/* Panic return. */
+#define DB_SECONDARY_BAD (-30981)/* Secondary index corrupt. */
+#define DB_VERIFY_BAD (-30980)/* Verify failed; bad format. */
+
+/* DB (private) error return codes. */
+#define DB_ALREADY_ABORTED (-30899)
+#define DB_DELETED (-30898)/* Recovery file marked deleted. */
+#define DB_JAVA_CALLBACK (-30897)/* Exception during a java callback. */
+#define DB_LOCK_NOTEXIST (-30896)/* Object to lock is gone. */
+#define DB_NEEDSPLIT (-30895)/* Page needs to be split. */
+#define DB_SURPRISE_KID (-30894)/* Child commit where parent
+ didn't know it was a parent. */
+#define DB_SWAPBYTES (-30893)/* Database needs byte swapping. */
+#define DB_TIMEOUT (-30892)/* Timed out waiting for election. */
+#define DB_TXN_CKP (-30891)/* Encountered ckp record in log. */
+#define DB_VERIFY_FATAL (-30890)/* DB->verify cannot proceed. */
+
+/* Database handle. */
+struct __db {
+ /*******************************************************
+ * Public: owned by the application.
+ *******************************************************/
+ u_int32_t pgsize; /* Database logical page size. */
+
+ /* Callbacks. */
+ int (*db_append_recno) __P((DB *, DBT *, db_recno_t));
+ void (*db_feedback) __P((DB *, int, int));
+ int (*dup_compare) __P((DB *, const DBT *, const DBT *));
+
+ void *app_private; /* Application-private handle. */
+
+ /*******************************************************
+ * Private: owned by DB.
+ *******************************************************/
+ DB_ENV *dbenv; /* Backing environment. */
+
+ DBTYPE type; /* DB access method type. */
+
+ DB_MPOOLFILE *mpf; /* Backing buffer pool. */
+ DB_CACHE_PRIORITY priority; /* Priority in the buffer pool. */
+
+ DB_MUTEX *mutexp; /* Synchronization for free threading */
+
+ u_int8_t fileid[DB_FILE_ID_LEN];/* File's unique ID for locking. */
+
+ u_int32_t adj_fileid; /* File's unique ID for curs. adj. */
+
+#define DB_LOGFILEID_INVALID -1
+ FNAME *log_filename; /* File's naming info for logging. */
+
+ db_pgno_t meta_pgno; /* Meta page number */
+ u_int32_t lid; /* Locker id for handle locking. */
+ u_int32_t cur_lid; /* Current handle lock holder. */
+ u_int32_t associate_lid; /* Locker id for DB->associate call. */
+ DB_LOCK handle_lock; /* Lock held on this handle. */
+
+ long cl_id; /* RPC: remote client id. */
+
+ /*
+ * Returned data memory for DB->get() and friends.
+ */
+ DBT my_rskey; /* Secondary key. */
+ DBT my_rkey; /* [Primary] key. */
+ DBT my_rdata; /* Data. */
+
+ /*
+ * !!!
+ * Some applications use DB but implement their own locking outside of
+ * DB. If they're using fcntl(2) locking on the underlying database
+ * file, and we open and close a file descriptor for that file, we will
+ * discard their locks. The DB_FCNTL_LOCKING flag to DB->open is an
+ * undocumented interface to support this usage which leaves any file
+ * descriptors we open until DB->close. This will only work with the
+ * DB->open interface and simple caches, e.g., creating a transaction
+ * thread may open/close file descriptors this flag doesn't protect.
+ * Locking with fcntl(2) on a file that you don't own is a very, very
+ * unsafe thing to do. 'Nuff said.
+ */
+ DB_FH *saved_open_fhp; /* Saved file handle. */
+
+ /*
+ * Linked list of DBP's, linked from the DB_ENV, used to keep track
+ * of all open db handles for cursor adjustment.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * LIST_ENTRY(__db) dblistlinks;
+ */
+ struct {
+ struct __db *le_next;
+ struct __db **le_prev;
+ } dblistlinks;
+
+ /*
+ * Cursor queues.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_HEAD(__cq_fq, __dbc) free_queue;
+ * TAILQ_HEAD(__cq_aq, __dbc) active_queue;
+ * TAILQ_HEAD(__cq_jq, __dbc) join_queue;
+ */
+ struct __cq_fq {
+ struct __dbc *tqh_first;
+ struct __dbc **tqh_last;
+ } free_queue;
+ struct __cq_aq {
+ struct __dbc *tqh_first;
+ struct __dbc **tqh_last;
+ } active_queue;
+ struct __cq_jq {
+ struct __dbc *tqh_first;
+ struct __dbc **tqh_last;
+ } join_queue;
+
+ /*
+ * Secondary index support.
+ *
+ * Linked list of secondary indices -- set in the primary.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * LIST_HEAD(s_secondaries, __db);
+ */
+ struct {
+ struct __db *lh_first;
+ } s_secondaries;
+
+ /*
+ * List entries for secondaries, and reference count of how
+ * many threads are updating this secondary (see __db_c_put).
+ *
+ * !!!
+ * Note that these are synchronized by the primary's mutex, but
+ * filled in in the secondaries.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * LIST_ENTRY(__db) s_links;
+ */
+ struct {
+ struct __db *le_next;
+ struct __db **le_prev;
+ } s_links;
+ u_int32_t s_refcnt;
+
+ /* Secondary callback and free functions -- set in the secondary. */
+ int (*s_callback) __P((DB *, const DBT *, const DBT *, DBT *));
+
+ /* Reference to primary -- set in the secondary. */
+ DB *s_primary;
+
+ /* API-private structure: used by DB 1.85, C++, Java, Perl and Tcl */
+ void *api_internal;
+
+ /* Subsystem-private structure. */
+ void *bt_internal; /* Btree/Recno access method. */
+ void *h_internal; /* Hash access method. */
+ void *q_internal; /* Queue access method. */
+ void *xa_internal; /* XA. */
+
+ /* Methods. */
+ int (*associate) __P((DB *, DB_TXN *, DB *, int (*)(DB *, const DBT *,
+ const DBT *, DBT *), u_int32_t));
+ int (*close) __P((DB *, u_int32_t));
+ int (*cursor) __P((DB *, DB_TXN *, DBC **, u_int32_t));
+ int (*del) __P((DB *, DB_TXN *, DBT *, u_int32_t));
+ void (*err) __P((DB *, int, const char *, ...));
+ void (*errx) __P((DB *, const char *, ...));
+ int (*fd) __P((DB *, int *));
+ int (*get) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ int (*pget) __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t));
+ int (*get_byteswapped) __P((DB *, int *));
+ int (*get_type) __P((DB *, DBTYPE *));
+ int (*join) __P((DB *, DBC **, DBC **, u_int32_t));
+ int (*key_range) __P((DB *,
+ DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
+ int (*open) __P((DB *, DB_TXN *,
+ const char *, const char *, DBTYPE, u_int32_t, int));
+ int (*put) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ int (*remove) __P((DB *, const char *, const char *, u_int32_t));
+ int (*rename) __P((DB *,
+ const char *, const char *, const char *, u_int32_t));
+ int (*truncate) __P((DB *, DB_TXN *, u_int32_t *, u_int32_t));
+ int (*set_append_recno) __P((DB *, int (*)(DB *, DBT *, db_recno_t)));
+ int (*set_alloc) __P((DB *, void *(*)(size_t),
+ void *(*)(void *, size_t), void (*)(void *)));
+ int (*set_cachesize) __P((DB *, u_int32_t, u_int32_t, int));
+ int (*set_cache_priority) __P((DB *, DB_CACHE_PRIORITY));
+ int (*set_dup_compare) __P((DB *,
+ int (*)(DB *, const DBT *, const DBT *)));
+ int (*set_encrypt) __P((DB *, const char *, u_int32_t));
+ void (*set_errcall) __P((DB *, void (*)(const char *, char *)));
+ void (*set_errfile) __P((DB *, FILE *));
+ void (*set_errpfx) __P((DB *, const char *));
+ int (*set_feedback) __P((DB *, void (*)(DB *, int, int)));
+ int (*set_flags) __P((DB *, u_int32_t));
+ int (*set_lorder) __P((DB *, int));
+ int (*set_pagesize) __P((DB *, u_int32_t));
+ int (*set_paniccall) __P((DB *, void (*)(DB_ENV *, int)));
+ int (*stat) __P((DB *, void *, u_int32_t));
+ int (*sync) __P((DB *, u_int32_t));
+ int (*upgrade) __P((DB *, const char *, u_int32_t));
+ int (*verify) __P((DB *,
+ const char *, const char *, FILE *, u_int32_t));
+
+ int (*set_bt_compare) __P((DB *,
+ int (*)(DB *, const DBT *, const DBT *)));
+ int (*set_bt_maxkey) __P((DB *, u_int32_t));
+ int (*set_bt_minkey) __P((DB *, u_int32_t));
+ int (*set_bt_prefix) __P((DB *,
+ size_t (*)(DB *, const DBT *, const DBT *)));
+
+ int (*set_h_ffactor) __P((DB *, u_int32_t));
+ int (*set_h_hash) __P((DB *,
+ u_int32_t (*)(DB *, const void *, u_int32_t)));
+ int (*set_h_nelem) __P((DB *, u_int32_t));
+
+ int (*set_re_delim) __P((DB *, int));
+ int (*set_re_len) __P((DB *, u_int32_t));
+ int (*set_re_pad) __P((DB *, int));
+ int (*set_re_source) __P((DB *, const char *));
+ int (*set_q_extentsize) __P((DB *, u_int32_t));
+
+ int (*db_am_remove) __P((DB *,
+ DB_TXN *, const char *, const char *, DB_LSN *));
+ int (*db_am_rename) __P((DB *, DB_TXN *,
+ const char *, const char *, const char *));
+
+ /*
+ * Never called; these are a place to save function pointers
+ * so that we can undo an associate.
+ */
+ int (*stored_get) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ int (*stored_close) __P((DB *, u_int32_t));
+
+#define DB_OK_BTREE 0x01
+#define DB_OK_HASH 0x02
+#define DB_OK_QUEUE 0x04
+#define DB_OK_RECNO 0x08
+ u_int32_t am_ok; /* Legal AM choices. */
+
+#define DB_AM_CHKSUM 0x00000001 /* Checksumming. */
+#define DB_AM_CL_WRITER 0x00000002 /* Allow writes in client replica. */
+#define DB_AM_COMPENSATE 0x00000004 /* Created by compensating txn. */
+#define DB_AM_CREATED 0x00000008 /* Database was created upon open. */
+#define DB_AM_CREATED_MSTR 0x00000010 /* Encompassing file was created. */
+#define DB_AM_DBM_ERROR 0x00000020 /* Error in DBM/NDBM database. */
+#define DB_AM_DELIMITER 0x00000040 /* Variable length delimiter set. */
+#define DB_AM_DIRTY 0x00000080 /* Support Dirty Reads. */
+#define DB_AM_DISCARD 0x00000100 /* Discard any cached pages. */
+#define DB_AM_DUP 0x00000200 /* DB_DUP. */
+#define DB_AM_DUPSORT 0x00000400 /* DB_DUPSORT. */
+#define DB_AM_ENCRYPT 0x00000800 /* Encryption. */
+#define DB_AM_FIXEDLEN 0x00001000 /* Fixed-length records. */
+#define DB_AM_INMEM 0x00002000 /* In-memory; no sync on close. */
+#define DB_AM_IN_RENAME 0x00004000 /* File is being renamed. */
+#define DB_AM_OPEN_CALLED 0x00008000 /* DB->open called. */
+#define DB_AM_PAD 0x00010000 /* Fixed-length record pad. */
+#define DB_AM_PGDEF 0x00020000 /* Page size was defaulted. */
+#define DB_AM_RDONLY 0x00040000 /* Database is readonly. */
+#define DB_AM_RECNUM 0x00080000 /* DB_RECNUM. */
+#define DB_AM_RECOVER 0x00100000 /* DB opened by recovery routine. */
+#define DB_AM_RENUMBER 0x00200000 /* DB_RENUMBER. */
+#define DB_AM_REVSPLITOFF 0x00400000 /* DB_REVSPLITOFF. */
+#define DB_AM_SECONDARY 0x00800000 /* Database is a secondary index. */
+#define DB_AM_SNAPSHOT 0x01000000 /* DB_SNAPSHOT. */
+#define DB_AM_SUBDB 0x02000000 /* Subdatabases supported. */
+#define DB_AM_SWAP 0x04000000 /* Pages need to be byte-swapped. */
+#define DB_AM_TXN 0x08000000 /* Opened in a transaction. */
+#define DB_AM_VERIFYING 0x10000000 /* DB handle is in the verifier. */
+ u_int32_t flags;
+};
+
+/*
+ * Macros for bulk get. Note that wherever we use a DBT *, we explicitly
+ * cast it; this allows the same macros to work with C++ Dbt *'s, as Dbt
+ * is a subclass of struct DBT in C++.
+ */
+#define DB_MULTIPLE_INIT(pointer, dbt) \
+ (pointer = (u_int8_t *)((DBT *)(dbt))->data + \
+ ((DBT *)(dbt))->ulen - sizeof(u_int32_t))
+#define DB_MULTIPLE_NEXT(pointer, dbt, retdata, retdlen) \
+ do { \
+ if (*((u_int32_t *)(pointer)) == (u_int32_t)-1) { \
+ retdata = NULL; \
+ pointer = NULL; \
+ break; \
+ } \
+ retdata = (u_int8_t *) \
+ ((DBT *)(dbt))->data + *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdlen = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ if (retdlen == 0 && \
+ retdata == (u_int8_t *)((DBT *)(dbt))->data) \
+ retdata = NULL; \
+ } while (0)
+#define DB_MULTIPLE_KEY_NEXT(pointer, dbt, retkey, retklen, retdata, retdlen) \
+ do { \
+ if (*((u_int32_t *)(pointer)) == (u_int32_t)-1) { \
+ retdata = NULL; \
+ retkey = NULL; \
+ pointer = NULL; \
+ break; \
+ } \
+ retkey = (u_int8_t *) \
+ ((DBT *)(dbt))->data + *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retklen = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdata = (u_int8_t *) \
+ ((DBT *)(dbt))->data + *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdlen = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ } while (0)
+
+#define DB_MULTIPLE_RECNO_NEXT(pointer, dbt, recno, retdata, retdlen) \
+ do { \
+ if (*((u_int32_t *)(pointer)) == (u_int32_t)0) { \
+ recno = 0; \
+ retdata = NULL; \
+ pointer = NULL; \
+ break; \
+ } \
+ recno = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdata = (u_int8_t *) \
+ ((DBT *)(dbt))->data + *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdlen = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ } while (0)
+
+/*******************************************************
+ * Access method cursors.
+ *******************************************************/
+struct __dbc {
+ DB *dbp; /* Related DB access method. */
+ DB_TXN *txn; /* Associated transaction. */
+
+ /*
+ * Active/free cursor queues.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__dbc) links;
+ */
+ struct {
+ DBC *tqe_next;
+ DBC **tqe_prev;
+ } links;
+
+ /*
+ * The DBT *'s below are used by the cursor routines to return
+ * data to the user when DBT flags indicate that DB should manage
+ * the returned memory. They point at a DBT containing the buffer
+ * and length that will be used, and "belonging" to the handle that
+ * should "own" this memory. This may be a "my_*" field of this
+ * cursor--the default--or it may be the corresponding field of
+ * another cursor, a DB handle, a join cursor, etc. In general, it
+ * will be whatever handle the user originally used for the current
+ * DB interface call.
+ */
+ DBT *rskey; /* Returned secondary key. */
+ DBT *rkey; /* Returned [primary] key. */
+ DBT *rdata; /* Returned data. */
+
+ DBT my_rskey; /* Space for returned secondary key. */
+ DBT my_rkey; /* Space for returned [primary] key. */
+ DBT my_rdata; /* Space for returned data. */
+
+ u_int32_t lid; /* Default process' locker id. */
+ u_int32_t locker; /* Locker for this operation. */
+ DBT lock_dbt; /* DBT referencing lock. */
+ DB_LOCK_ILOCK lock; /* Object to be locked. */
+ DB_LOCK mylock; /* Lock held on this cursor. */
+
+ long cl_id; /* Remote client id. */
+
+ DBTYPE dbtype; /* Cursor type. */
+
+ DBC_INTERNAL *internal; /* Access method private. */
+
+ int (*c_close) __P((DBC *)); /* Methods: public. */
+ int (*c_count) __P((DBC *, db_recno_t *, u_int32_t));
+ int (*c_del) __P((DBC *, u_int32_t));
+ int (*c_dup) __P((DBC *, DBC **, u_int32_t));
+ int (*c_get) __P((DBC *, DBT *, DBT *, u_int32_t));
+ int (*c_pget) __P((DBC *, DBT *, DBT *, DBT *, u_int32_t));
+ int (*c_put) __P((DBC *, DBT *, DBT *, u_int32_t));
+
+ /* Methods: private. */
+ int (*c_am_bulk) __P((DBC *, DBT *, u_int32_t));
+ int (*c_am_close) __P((DBC *, db_pgno_t, int *));
+ int (*c_am_del) __P((DBC *));
+ int (*c_am_destroy) __P((DBC *));
+ int (*c_am_get) __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+ int (*c_am_put) __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+ int (*c_am_writelock) __P((DBC *));
+
+ /* Private: for secondary indices. */
+ int (*c_real_get) __P((DBC *, DBT *, DBT *, u_int32_t));
+
+#define DBC_ACTIVE 0x0001 /* Cursor in use. */
+#define DBC_COMPENSATE 0x0002 /* Cursor compensating, don't lock. */
+#define DBC_DIRTY_READ 0x0004 /* Cursor supports dirty reads. */
+#define DBC_OPD 0x0008 /* Cursor references off-page dups. */
+#define DBC_RECOVER 0x0010 /* Recovery cursor; don't log/lock. */
+#define DBC_RMW 0x0020 /* Acquire write flag in read op. */
+#define DBC_TRANSIENT 0x0040 /* Cursor is transient. */
+#define DBC_WRITECURSOR 0x0080 /* Cursor may be used to write (CDB). */
+#define DBC_WRITEDUP 0x0100 /* idup'ed DBC_WRITECURSOR (CDB). */
+#define DBC_WRITER 0x0200 /* Cursor immediately writing (CDB). */
+#define DBC_MULTIPLE 0x0400 /* Return Multiple data. */
+#define DBC_MULTIPLE_KEY 0x0800 /* Return Multiple keys and data. */
+#define DBC_OWN_LID 0x1000 /* Free lock id on destroy. */
+ u_int32_t flags;
+};
+
+/* Key range statistics structure */
+struct __key_range {
+ double less;
+ double equal;
+ double greater;
+};
+
+/* Btree/Recno statistics structure. */
+struct __db_bt_stat {
+ u_int32_t bt_magic; /* Magic number. */
+ u_int32_t bt_version; /* Version number. */
+ u_int32_t bt_metaflags; /* Metadata flags. */
+ u_int32_t bt_nkeys; /* Number of unique keys. */
+ u_int32_t bt_ndata; /* Number of data items. */
+ u_int32_t bt_pagesize; /* Page size. */
+ u_int32_t bt_maxkey; /* Maxkey value. */
+ u_int32_t bt_minkey; /* Minkey value. */
+ u_int32_t bt_re_len; /* Fixed-length record length. */
+ u_int32_t bt_re_pad; /* Fixed-length record pad. */
+ u_int32_t bt_levels; /* Tree levels. */
+ u_int32_t bt_int_pg; /* Internal pages. */
+ u_int32_t bt_leaf_pg; /* Leaf pages. */
+ u_int32_t bt_dup_pg; /* Duplicate pages. */
+ u_int32_t bt_over_pg; /* Overflow pages. */
+ u_int32_t bt_free; /* Pages on the free list. */
+ u_int32_t bt_int_pgfree; /* Bytes free in internal pages. */
+ u_int32_t bt_leaf_pgfree; /* Bytes free in leaf pages. */
+ u_int32_t bt_dup_pgfree; /* Bytes free in duplicate pages. */
+ u_int32_t bt_over_pgfree; /* Bytes free in overflow pages. */
+};
+
+/* Hash statistics structure. */
+struct __db_h_stat {
+ u_int32_t hash_magic; /* Magic number. */
+ u_int32_t hash_version; /* Version number. */
+ u_int32_t hash_metaflags; /* Metadata flags. */
+ u_int32_t hash_nkeys; /* Number of unique keys. */
+ u_int32_t hash_ndata; /* Number of data items. */
+ u_int32_t hash_pagesize; /* Page size. */
+ u_int32_t hash_ffactor; /* Fill factor specified at create. */
+ u_int32_t hash_buckets; /* Number of hash buckets. */
+ u_int32_t hash_free; /* Pages on the free list. */
+ u_int32_t hash_bfree; /* Bytes free on bucket pages. */
+ u_int32_t hash_bigpages; /* Number of big key/data pages. */
+ u_int32_t hash_big_bfree; /* Bytes free on big item pages. */
+ u_int32_t hash_overflows; /* Number of overflow pages. */
+ u_int32_t hash_ovfl_free; /* Bytes free on ovfl pages. */
+ u_int32_t hash_dup; /* Number of dup pages. */
+ u_int32_t hash_dup_free; /* Bytes free on duplicate pages. */
+};
+
+/* Queue statistics structure. */
+struct __db_qam_stat {
+ u_int32_t qs_magic; /* Magic number. */
+ u_int32_t qs_version; /* Version number. */
+ u_int32_t qs_metaflags; /* Metadata flags. */
+ u_int32_t qs_nkeys; /* Number of unique keys. */
+ u_int32_t qs_ndata; /* Number of data items. */
+ u_int32_t qs_pagesize; /* Page size. */
+ u_int32_t qs_extentsize; /* Pages per extent. */
+ u_int32_t qs_pages; /* Data pages. */
+ u_int32_t qs_re_len; /* Fixed-length record length. */
+ u_int32_t qs_re_pad; /* Fixed-length record pad. */
+ u_int32_t qs_pgfree; /* Bytes free in data pages. */
+ u_int32_t qs_first_recno; /* First not deleted record. */
+ u_int32_t qs_cur_recno; /* Next available record number. */
+};
+
+/*******************************************************
+ * Environment.
+ *******************************************************/
+#define DB_REGION_MAGIC 0x120897 /* Environment magic number. */
+
+/* Database Environment handle. */
+struct __db_env {
+ /*******************************************************
+ * Public: owned by the application.
+ *******************************************************/
+ FILE *db_errfile; /* Error message file stream. */
+ const char *db_errpfx; /* Error message prefix. */
+ /* Callbacks. */
+ void (*db_errcall) __P((const char *, char *));
+ void (*db_feedback) __P((DB_ENV *, int, int));
+ void (*db_paniccall) __P((DB_ENV *, int));
+
+ /* App-specified alloc functions. */
+ void *(*db_malloc) __P((size_t));
+ void *(*db_realloc) __P((void *, size_t));
+ void (*db_free) __P((void *));
+
+ /*
+ * Currently, the verbose list is a bit field with room for 32
+ * entries. There's no reason that it needs to be limited, if
+ * there are ever more than 32 entries, convert to a bit array.
+ */
+#define DB_VERB_CHKPOINT 0x0001 /* List checkpoints. */
+#define DB_VERB_DEADLOCK 0x0002 /* Deadlock detection information. */
+#define DB_VERB_RECOVERY 0x0004 /* Recovery information. */
+#define DB_VERB_REPLICATION 0x0008 /* Replication information. */
+#define DB_VERB_WAITSFOR 0x0010 /* Dump waits-for table. */
+ u_int32_t verbose; /* Verbose output. */
+
+ void *app_private; /* Application-private handle. */
+
+ int (*app_dispatch) /* User-specified recovery dispatch. */
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+
+ /* Locking. */
+ u_int8_t *lk_conflicts; /* Two dimensional conflict matrix. */
+ u_int32_t lk_modes; /* Number of lock modes in table. */
+ u_int32_t lk_max; /* Maximum number of locks. */
+ u_int32_t lk_max_lockers;/* Maximum number of lockers. */
+ u_int32_t lk_max_objects;/* Maximum number of locked objects. */
+ u_int32_t lk_detect; /* Deadlock detect on all conflicts. */
+ db_timeout_t lk_timeout; /* Lock timeout period. */
+
+ /* Logging. */
+ u_int32_t lg_bsize; /* Buffer size. */
+ u_int32_t lg_size; /* Log file size. */
+ u_int32_t lg_regionmax; /* Region size. */
+
+ /* Memory pool. */
+ u_int32_t mp_gbytes; /* Cachesize: GB. */
+ u_int32_t mp_bytes; /* Cachesize: Bytes. */
+ size_t mp_size; /* DEPRECATED: Cachesize: bytes. */
+ int mp_ncache; /* Number of cache regions. */
+ size_t mp_mmapsize; /* Maximum file size for mmap. */
+
+ int rep_eid; /* environment id. */
+
+ /* Transactions. */
+ u_int32_t tx_max; /* Maximum number of transactions. */
+ time_t tx_timestamp; /* Recover to specific timestamp. */
+ db_timeout_t tx_timeout; /* Timeout for transactions. */
+
+ /*******************************************************
+ * Private: owned by DB.
+ *******************************************************/
+ int panic_errval; /* Panic causing errno. */
+
+ /* User files, paths. */
+ char *db_home; /* Database home. */
+ char *db_log_dir; /* Database log file directory. */
+ char *db_tmp_dir; /* Database tmp file directory. */
+
+ char **db_data_dir; /* Database data file directories. */
+ int data_cnt; /* Database data file slots. */
+ int data_next; /* Next Database data file slot. */
+
+ int db_mode; /* Default open permissions. */
+
+ void *reginfo; /* REGINFO structure reference. */
+ DB_FH *lockfhp; /* fcntl(2) locking file handle. */
+
+ int (**recover_dtab) /* Dispatch table for recover funcs. */
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t recover_dtab_size;
+ /* Slots in the dispatch table. */
+
+ void *cl_handle; /* RPC: remote client handle. */
+ long cl_id; /* RPC: remote client env id. */
+
+ int db_ref; /* DB reference count. */
+
+ long shm_key; /* shmget(2) key. */
+ u_int32_t tas_spins; /* test-and-set spins. */
+
+ /*
+ * List of open DB handles for this DB_ENV, used for cursor
+ * adjustment. Must be protected for multi-threaded support.
+ *
+ * !!!
+ * As this structure is allocated in per-process memory, the
+ * mutex may need to be stored elsewhere on architectures unable
+ * to support mutexes in heap memory, e.g. HP/UX 9.
+ *
+ * !!!
+ * Explicit representation of structure in queue.h.
+ * LIST_HEAD(dblist, __db);
+ */
+ DB_MUTEX *dblist_mutexp; /* Mutex. */
+ struct {
+ struct __db *lh_first;
+ } dblist;
+
+ /*
+ * XA support.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_env) links;
+ */
+ struct {
+ struct __db_env *tqe_next;
+ struct __db_env **tqe_prev;
+ } links;
+ int xa_rmid; /* XA Resource Manager ID. */
+ DB_TXN *xa_txn; /* XA Current transaction. */
+
+ /* API-private structure. */
+ void *api1_internal; /* C++, Perl API private */
+ void *api2_internal; /* Java API private */
+
+ char *passwd; /* Cryptography support. */
+ size_t passwd_len;
+ void *crypto_handle; /* Primary handle. */
+ DB_MUTEX *mt_mutexp; /* Mersenne Twister mutex. */
+ int mti; /* Mersenne Twister index. */
+ u_long *mt; /* Mersenne Twister state vector. */
+
+ /* DB_ENV Methods. */
+ int (*close) __P((DB_ENV *, u_int32_t));
+ int (*dbremove) __P((DB_ENV *,
+ DB_TXN *, const char *, const char *, u_int32_t));
+ int (*dbrename) __P((DB_ENV *, DB_TXN *,
+ const char *, const char *, const char *, u_int32_t));
+ void (*err) __P((const DB_ENV *, int, const char *, ...));
+ void (*errx) __P((const DB_ENV *, const char *, ...));
+ int (*open) __P((DB_ENV *, const char *, u_int32_t, int));
+ int (*remove) __P((DB_ENV *, const char *, u_int32_t));
+ int (*set_data_dir) __P((DB_ENV *, const char *));
+ int (*set_alloc) __P((DB_ENV *, void *(*)(size_t),
+ void *(*)(void *, size_t), void (*)(void *)));
+ int (*set_app_dispatch) __P((DB_ENV *,
+ int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
+ int (*set_encrypt) __P((DB_ENV *, const char *, u_int32_t));
+ void (*set_errcall) __P((DB_ENV *, void (*)(const char *, char *)));
+ void (*set_errfile) __P((DB_ENV *, FILE *));
+ void (*set_errpfx) __P((DB_ENV *, const char *));
+ int (*set_feedback) __P((DB_ENV *, void (*)(DB_ENV *, int, int)));
+ int (*set_flags) __P((DB_ENV *, u_int32_t, int));
+ int (*set_paniccall) __P((DB_ENV *, void (*)(DB_ENV *, int)));
+ int (*set_rpc_server) __P((DB_ENV *,
+ void *, const char *, long, long, u_int32_t));
+ int (*set_shm_key) __P((DB_ENV *, long));
+ int (*set_tas_spins) __P((DB_ENV *, u_int32_t));
+ int (*set_tmp_dir) __P((DB_ENV *, const char *));
+ int (*set_verbose) __P((DB_ENV *, u_int32_t, int));
+
+ void *lg_handle; /* Log handle and methods. */
+ int (*set_lg_bsize) __P((DB_ENV *, u_int32_t));
+ int (*set_lg_dir) __P((DB_ENV *, const char *));
+ int (*set_lg_max) __P((DB_ENV *, u_int32_t));
+ int (*set_lg_regionmax) __P((DB_ENV *, u_int32_t));
+ int (*log_archive) __P((DB_ENV *, char **[], u_int32_t));
+ int (*log_cursor) __P((DB_ENV *, DB_LOGC **, u_int32_t));
+ int (*log_file) __P((DB_ENV *, const DB_LSN *, char *, size_t));
+ int (*log_flush) __P((DB_ENV *, const DB_LSN *));
+ int (*log_put) __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t));
+ int (*log_stat) __P((DB_ENV *, DB_LOG_STAT **, u_int32_t));
+
+ void *lk_handle; /* Lock handle and methods. */
+ int (*set_lk_conflicts) __P((DB_ENV *, u_int8_t *, int));
+ int (*set_lk_detect) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max_locks) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max_lockers) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max_objects) __P((DB_ENV *, u_int32_t));
+ int (*lock_detect) __P((DB_ENV *, u_int32_t, u_int32_t, int *));
+ int (*lock_dump_region) __P((DB_ENV *, char *, FILE *));
+ int (*lock_get) __P((DB_ENV *,
+ u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *));
+ int (*lock_put) __P((DB_ENV *, DB_LOCK *));
+ int (*lock_id) __P((DB_ENV *, u_int32_t *));
+ int (*lock_id_free) __P((DB_ENV *, u_int32_t));
+ int (*lock_id_set) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*lock_stat) __P((DB_ENV *, DB_LOCK_STAT **, u_int32_t));
+ int (*lock_vec) __P((DB_ENV *,
+ u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **));
+ int (*lock_downgrade) __P((DB_ENV *,
+ DB_LOCK *, db_lockmode_t, u_int32_t));
+
+ void *mp_handle; /* Mpool handle and methods. */
+ int (*set_mp_mmapsize) __P((DB_ENV *, size_t));
+ int (*set_cachesize) __P((DB_ENV *, u_int32_t, u_int32_t, int));
+ int (*memp_dump_region) __P((DB_ENV *, char *, FILE *));
+ int (*memp_fcreate) __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t));
+ int (*memp_nameop) __P((DB_ENV *,
+ u_int8_t *, const char *, const char *, const char *));
+ int (*memp_register) __P((DB_ENV *, int,
+ int (*)(DB_ENV *, db_pgno_t, void *, DBT *),
+ int (*)(DB_ENV *, db_pgno_t, void *, DBT *)));
+ int (*memp_stat) __P((DB_ENV *,
+ DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, u_int32_t));
+ int (*memp_sync) __P((DB_ENV *, DB_LSN *));
+ int (*memp_trickle) __P((DB_ENV *, int, int *));
+
+ void *rep_handle; /* Replication handle and methods. */
+ int (*rep_elect) __P((DB_ENV *, int, int, u_int32_t, int *));
+ int (*rep_flush) __P((DB_ENV *));
+ int (*rep_process_message) __P((DB_ENV *, DBT *, DBT *, int *));
+ int (*rep_start) __P((DB_ENV *, DBT *, u_int32_t));
+ int (*rep_stat) __P((DB_ENV *, DB_REP_STAT **, u_int32_t));
+ int (*set_rep_election) __P((DB_ENV *,
+ u_int32_t, u_int32_t, u_int32_t, u_int32_t));
+ int (*set_rep_limit) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*set_rep_request) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*set_rep_timeout) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*set_rep_transport) __P((DB_ENV *, int,
+ int (*) (DB_ENV *, const DBT *, const DBT *, int, u_int32_t)));
+
+ void *tx_handle; /* Txn handle and methods. */
+ int (*set_tx_max) __P((DB_ENV *, u_int32_t));
+ int (*set_tx_timestamp) __P((DB_ENV *, time_t *));
+ int (*txn_begin) __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
+ int (*txn_checkpoint) __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t));
+ int (*txn_id_set) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*txn_recover) __P((DB_ENV *,
+ DB_PREPLIST *, long, long *, u_int32_t));
+ int (*txn_stat) __P((DB_ENV *, DB_TXN_STAT **, u_int32_t));
+ int (*set_timeout) __P((DB_ENV *, db_timeout_t, u_int32_t));
+
+#define DB_TEST_ELECTINIT 1 /* after __rep_elect_init */
+#define DB_TEST_ELECTSEND 2 /* after REP_ELECT msgnit */
+#define DB_TEST_ELECTVOTE1 3 /* after __rep_send_vote 1 */
+#define DB_TEST_ELECTVOTE2 4 /* after __rep_wait */
+#define DB_TEST_ELECTWAIT1 5 /* after REP_VOTE2 */
+#define DB_TEST_ELECTWAIT2 6 /* after __rep_wait 2 */
+#define DB_TEST_PREDESTROY 7 /* before destroy op */
+#define DB_TEST_PREOPEN 8 /* before __os_open */
+#define DB_TEST_POSTDESTROY 9 /* after destroy op */
+#define DB_TEST_POSTLOG 10 /* after logging all pages */
+#define DB_TEST_POSTLOGMETA 11 /* after logging meta in btree */
+#define DB_TEST_POSTOPEN 12 /* after __os_open */
+#define DB_TEST_POSTSYNC 13 /* after syncing the log */
+#define DB_TEST_SUBDB_LOCKS 14 /* subdb locking tests */
+ int test_abort; /* Abort value for testing. */
+ int test_copy; /* Copy value for testing. */
+
+#define DB_ENV_AUTO_COMMIT 0x0000001 /* DB_AUTO_COMMIT. */
+#define DB_ENV_CDB 0x0000002 /* DB_INIT_CDB. */
+#define DB_ENV_CDB_ALLDB 0x0000004 /* CDB environment wide locking. */
+#define DB_ENV_CREATE 0x0000008 /* DB_CREATE set. */
+#define DB_ENV_DBLOCAL 0x0000010 /* DB_ENV allocated for private DB. */
+#define DB_ENV_DIRECT_DB 0x0000020 /* DB_DIRECT_DB set. */
+#define DB_ENV_DIRECT_LOG 0x0000040 /* DB_DIRECT_LOG set. */
+#define DB_ENV_FATAL 0x0000080 /* Doing fatal recovery in env. */
+#define DB_ENV_LOCKDOWN 0x0000100 /* DB_LOCKDOWN set. */
+#define DB_ENV_NOLOCKING 0x0000200 /* DB_NOLOCKING set. */
+#define DB_ENV_NOMMAP 0x0000400 /* DB_NOMMAP set. */
+#define DB_ENV_NOPANIC 0x0000800 /* Okay if panic set. */
+#define DB_ENV_OPEN_CALLED 0x0001000 /* DB_ENV->open called. */
+#define DB_ENV_OVERWRITE 0x0002000 /* DB_OVERWRITE set. */
+#define DB_ENV_PRIVATE 0x0004000 /* DB_PRIVATE set. */
+#define DB_ENV_REGION_INIT 0x0008000 /* DB_REGION_INIT set. */
+#define DB_ENV_REP_CLIENT 0x0010000 /* Replication client. */
+#define DB_ENV_REP_LOGSONLY 0x0020000 /* Log files only replication site. */
+#define DB_ENV_REP_MASTER 0x0040000 /* Replication master. */
+#define DB_ENV_RPCCLIENT 0x0080000 /* DB_CLIENT set. */
+#define DB_ENV_RPCCLIENT_GIVEN 0x0100000 /* User-supplied RPC client struct */
+#define DB_ENV_SYSTEM_MEM 0x0200000 /* DB_SYSTEM_MEM set. */
+#define DB_ENV_THREAD 0x0400000 /* DB_THREAD set. */
+#define DB_ENV_TXN_NOSYNC 0x0800000 /* DB_TXN_NOSYNC set. */
+#define DB_ENV_TXN_WRITE_NOSYNC 0x1000000 /* DB_TXN_WRITE_NOSYNC set. */
+#define DB_ENV_YIELDCPU 0x2000000 /* DB_YIELDCPU set. */
+ u_int32_t flags;
+};
+
+#ifndef DB_DBM_HSEARCH
+#define DB_DBM_HSEARCH 0 /* No historic interfaces by default. */
+#endif
+#if DB_DBM_HSEARCH != 0
+/*******************************************************
+ * Dbm/Ndbm historic interfaces.
+ *******************************************************/
+typedef struct __db DBM;
+
+#define DBM_INSERT 0 /* Flags to dbm_store(). */
+#define DBM_REPLACE 1
+
+/*
+ * The DB support for ndbm(3) always appends this suffix to the
+ * file name to avoid overwriting the user's original database.
+ */
+#define DBM_SUFFIX ".db"
+
+#if defined(_XPG4_2)
+typedef struct {
+ char *dptr;
+ size_t dsize;
+} datum;
+#else
+typedef struct {
+ char *dptr;
+ int dsize;
+} datum;
+#endif
+
+/*
+ * Translate NDBM calls into DB calls so that DB doesn't step on the
+ * application's name space.
+ */
+#define dbm_clearerr(a) __db_ndbm_clearerr@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_close(a) __db_ndbm_close@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_delete(a, b) __db_ndbm_delete@DB_VERSION_UNIQUE_NAME@(a, b)
+#define dbm_dirfno(a) __db_ndbm_dirfno@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_error(a) __db_ndbm_error@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_fetch(a, b) __db_ndbm_fetch@DB_VERSION_UNIQUE_NAME@(a, b)
+#define dbm_firstkey(a) __db_ndbm_firstkey@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_nextkey(a) __db_ndbm_nextkey@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_open(a, b, c) __db_ndbm_open@DB_VERSION_UNIQUE_NAME@(a, b, c)
+#define dbm_pagfno(a) __db_ndbm_pagfno@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_rdonly(a) __db_ndbm_rdonly@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_store(a, b, c, d) \
+ __db_ndbm_store@DB_VERSION_UNIQUE_NAME@(a, b, c, d)
+
+/*
+ * Translate DBM calls into DB calls so that DB doesn't step on the
+ * application's name space.
+ *
+ * The global variables dbrdonly, dirf and pagf were not retained when 4BSD
+ * replaced the dbm interface with ndbm, and are not supported here.
+ */
+#define dbminit(a) __db_dbm_init@DB_VERSION_UNIQUE_NAME@(a)
+#define dbmclose __db_dbm_close@DB_VERSION_UNIQUE_NAME@
+#if !defined(__cplusplus)
+#define delete(a) __db_dbm_delete@DB_VERSION_UNIQUE_NAME@(a)
+#endif
+#define fetch(a) __db_dbm_fetch@DB_VERSION_UNIQUE_NAME@(a)
+#define firstkey __db_dbm_firstkey@DB_VERSION_UNIQUE_NAME@
+#define nextkey(a) __db_dbm_nextkey@DB_VERSION_UNIQUE_NAME@(a)
+#define store(a, b) __db_dbm_store@DB_VERSION_UNIQUE_NAME@(a, b)
+
+/*******************************************************
+ * Hsearch historic interface.
+ *******************************************************/
+typedef enum {
+ FIND, ENTER
+} ACTION;
+
+typedef struct entry {
+ char *key;
+ char *data;
+} ENTRY;
+
+#define hcreate(a) __db_hcreate@DB_VERSION_UNIQUE_NAME@(a)
+#define hdestroy __db_hdestroy@DB_VERSION_UNIQUE_NAME@
+#define hsearch(a, b) __db_hsearch@DB_VERSION_UNIQUE_NAME@(a, b)
+
+#endif /* DB_DBM_HSEARCH */
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_DB_H_ */
diff --git a/libdb/dbinc/db_185.in b/libdb/dbinc/db_185.in
new file mode 100644
index 0000000..a03ebb5
--- /dev/null
+++ b/libdb/dbinc/db_185.in
@@ -0,0 +1,169 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_185_H_
+#define _DB_185_H_
+
+#include <sys/types.h>
+
+#include <limits.h>
+
+/*
+ * XXX
+ * Handle function prototypes and the keyword "const". This steps on name
+ * space that DB doesn't control, but all of the other solutions are worse.
+ */
+#undef __P
+#if defined(__STDC__) || defined(__cplusplus)
+#define __P(protos) protos /* ANSI C prototypes */
+#else
+#define const
+#define __P(protos) () /* K&R C preprocessor */
+#endif
+
+#define RET_ERROR -1 /* Return values. */
+#define RET_SUCCESS 0
+#define RET_SPECIAL 1
+
+#ifndef __BIT_TYPES_DEFINED__
+#define __BIT_TYPES_DEFINED__
+@u_int8_decl@
+@int16_decl@
+@u_int16_decl@
+@int32_decl@
+@u_int32_decl@
+#endif
+
+/*
+ * XXX
+ * SGI/IRIX already has a pgno_t.
+ */
+#ifdef sgi
+#define pgno_t db_pgno_t
+#endif
+
+#define MAX_PAGE_NUMBER 0xffffffff /* >= # of pages in a file */
+typedef u_int32_t pgno_t;
+#define MAX_PAGE_OFFSET 65535 /* >= # of bytes in a page */
+typedef u_int16_t indx_t;
+#define MAX_REC_NUMBER 0xffffffff /* >= # of records in a tree */
+typedef u_int32_t recno_t;
+
+/* Key/data structure -- a Data-Base Thang. */
+typedef struct {
+ void *data; /* data */
+ size_t size; /* data length */
+} DBT;
+
+/* Routine flags. */
+#define R_CURSOR 1 /* del, put, seq */
+#define __R_UNUSED 2 /* UNUSED */
+#define R_FIRST 3 /* seq */
+#define R_IAFTER 4 /* put (RECNO) */
+#define R_IBEFORE 5 /* put (RECNO) */
+#define R_LAST 6 /* seq (BTREE, RECNO) */
+#define R_NEXT 7 /* seq */
+#define R_NOOVERWRITE 8 /* put */
+#define R_PREV 9 /* seq (BTREE, RECNO) */
+#define R_SETCURSOR 10 /* put (RECNO) */
+#define R_RECNOSYNC 11 /* sync (RECNO) */
+
+typedef enum { DB_BTREE, DB_HASH, DB_RECNO } DBTYPE;
+
+/* Access method description structure. */
+typedef struct __db {
+ DBTYPE type; /* Underlying db type. */
+ int (*close) __P((struct __db *));
+ int (*del) __P((const struct __db *, const DBT *, u_int));
+ int (*get) __P((const struct __db *, const DBT *, DBT *, u_int));
+ int (*put) __P((const struct __db *, DBT *, const DBT *, u_int));
+ int (*seq) __P((const struct __db *, DBT *, DBT *, u_int));
+ int (*sync) __P((const struct __db *, u_int));
+ void *internal; /* Access method private. */
+ int (*fd) __P((const struct __db *));
+} DB;
+
+#define BTREEMAGIC 0x053162
+#define BTREEVERSION 3
+
+/* Structure used to pass parameters to the btree routines. */
+typedef struct {
+#define R_DUP 0x01 /* duplicate keys */
+ u_int32_t flags;
+ u_int32_t cachesize; /* bytes to cache */
+ u_int32_t maxkeypage; /* maximum keys per page */
+ u_int32_t minkeypage; /* minimum keys per page */
+ u_int32_t psize; /* page size */
+ int (*compare) /* comparison function */
+ __P((const DBT *, const DBT *));
+ size_t (*prefix) /* prefix function */
+ __P((const DBT *, const DBT *));
+ int lorder; /* byte order */
+} BTREEINFO;
+
+#define HASHMAGIC 0x061561
+#define HASHVERSION 2
+
+/* Structure used to pass parameters to the hashing routines. */
+typedef struct {
+ u_int32_t bsize; /* bucket size */
+ u_int32_t ffactor; /* fill factor */
+ u_int32_t nelem; /* number of elements */
+ u_int32_t cachesize; /* bytes to cache */
+ u_int32_t /* hash function */
+ (*hash) __P((const void *, size_t));
+ int lorder; /* byte order */
+} HASHINFO;
+
+/* Structure used to pass parameters to the record routines. */
+typedef struct {
+#define R_FIXEDLEN 0x01 /* fixed-length records */
+#define R_NOKEY 0x02 /* key not required */
+#define R_SNAPSHOT 0x04 /* snapshot the input */
+ u_int32_t flags;
+ u_int32_t cachesize; /* bytes to cache */
+ u_int32_t psize; /* page size */
+ int lorder; /* byte order */
+ size_t reclen; /* record length (fixed-length records) */
+ u_char bval; /* delimiting byte (variable-length records */
+ char *bfname; /* btree file name */
+} RECNOINFO;
+
+/* Re-define the user's dbopen calls. */
+#define dbopen __db185_open@DB_VERSION_UNIQUE_NAME@
+
+#endif /* !_DB_185_H_ */
diff --git a/libdb/dbinc/db_am.h b/libdb/dbinc/db_am.h
new file mode 100644
index 0000000..12d191d
--- /dev/null
+++ b/libdb/dbinc/db_am.h
@@ -0,0 +1,127 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+#ifndef _DB_AM_H_
+#define _DB_AM_H_
+
+/*
+ * IS_AUTO_COMMIT --
+ * Test for local auto-commit flag or global flag with no local DbTxn
+ * handle.
+ */
+#define IS_AUTO_COMMIT(dbenv, txn, flags) \
+ (LF_ISSET(DB_AUTO_COMMIT) || \
+ ((txn) == NULL && F_ISSET((dbenv), DB_ENV_AUTO_COMMIT)))
+
+/* DB recovery operation codes. */
+#define DB_ADD_DUP 1
+#define DB_REM_DUP 2
+#define DB_ADD_BIG 3
+#define DB_REM_BIG 4
+#define DB_ADD_PAGE 5
+#define DB_REM_PAGE 6
+
+/*
+ * Standard initialization and shutdown macros for all recovery functions.
+ */
+#define REC_INTRO(func, inc_count) { \
+ argp = NULL; \
+ dbc = NULL; \
+ file_dbp = NULL; \
+ mpf = NULL; \
+ if ((ret = func(dbenv, dbtp->data, &argp)) != 0) \
+ goto out; \
+ if ((ret = __dbreg_id_to_db(dbenv, argp->txnid, \
+ &file_dbp, argp->fileid, inc_count)) != 0) { \
+ if (ret == DB_DELETED) { \
+ ret = 0; \
+ goto done; \
+ } \
+ goto out; \
+ } \
+ if ((ret = file_dbp->cursor(file_dbp, NULL, &dbc, 0)) != 0) \
+ goto out; \
+ F_SET(dbc, DBC_RECOVER); \
+ mpf = file_dbp->mpf; \
+}
+
+#define REC_CLOSE { \
+ int __t_ret; \
+ if (argp != NULL) \
+ __os_free(dbenv, argp); \
+ if (dbc != NULL && \
+ (__t_ret = dbc->c_close(dbc)) != 0 && ret == 0) \
+ ret = __t_ret; \
+ return (ret); \
+}
+
+/*
+ * No-op versions of the same macros.
+ */
+#define REC_NOOP_INTRO(func) { \
+ argp = NULL; \
+ if ((ret = func(dbenv, dbtp->data, &argp)) != 0) \
+ return (ret); \
+}
+#define REC_NOOP_CLOSE \
+ if (argp != NULL) \
+ __os_free(dbenv, argp); \
+ return (ret); \
+
+/*
+ * Standard debugging macro for all recovery functions.
+ */
+#ifdef DEBUG_RECOVER
+#define REC_PRINT(func) \
+ (void)func(dbenv, dbtp, lsnp, op, info);
+#else
+#define REC_PRINT(func)
+#endif
+
+/*
+ * Actions to __db_lget
+ */
+#define LCK_ALWAYS 1 /* Lock even for off page dup cursors */
+#define LCK_COUPLE 2 /* Lock Couple */
+#define LCK_COUPLE_ALWAYS 3 /* Lock Couple even in txn. */
+#define LCK_DOWNGRADE 4 /* Downgrade the lock. (internal) */
+#define LCK_ROLLBACK 5 /* Lock even if in rollback */
+
+/*
+ * If doing transactions we have to hold the locks associated with a data item
+ * from a page for the entire transaction. However, we don't have to hold the
+ * locks associated with walking the tree. Distinguish between the two so that
+ * we don't tie up the internal pages of the tree longer than necessary.
+ */
+#define __LPUT(dbc, lock) \
+ (LOCK_ISSET(lock) ? \
+ (dbc)->dbp->dbenv->lock_put((dbc)->dbp->dbenv, &(lock)) : 0)
+
+/*
+ * __TLPUT -- transactional lock put
+ * If the lock is valid then
+ * If we are not in a transaction put the lock.
+ * Else if the cursor is doing dirty reads and this was a read then
+ * put the lock.
+ * Else if the db is supporting dirty reads and this is a write then
+ * downgrade it.
+ * Else do nothing.
+ */
+#define __TLPUT(dbc, lock) \
+ (LOCK_ISSET(lock) ? __db_lput(dbc, &(lock)) : 0)
+
+typedef struct {
+ DBC *dbc;
+ int count;
+} db_trunc_param;
+
+#include "dbinc/db_dispatch.h"
+#include "dbinc_auto/db_auto.h"
+#include "dbinc_auto/crdel_auto.h"
+#include "dbinc_auto/db_ext.h"
+#endif /* !_DB_AM_H_ */
diff --git a/libdb/dbinc/db_cxx.in b/libdb/dbinc/db_cxx.in
new file mode 100644
index 0000000..abd9b58
--- /dev/null
+++ b/libdb/dbinc/db_cxx.in
@@ -0,0 +1,795 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_CXX_H_
+#define _DB_CXX_H_
+//
+// C++ assumptions:
+//
+// To ensure portability to many platforms, both new and old, we make
+// few assumptions about the C++ compiler and library. For example,
+// we do not expect STL, templates or namespaces to be available. The
+// "newest" C++ feature used is exceptions, which are used liberally
+// to transmit error information. Even the use of exceptions can be
+// disabled at runtime, to do so, use the DB_CXX_NO_EXCEPTIONS flags
+// with the DbEnv or Db constructor.
+//
+// C++ naming conventions:
+//
+// - All top level class names start with Db.
+// - All class members start with lower case letter.
+// - All private data members are suffixed with underscore.
+// - Use underscores to divide names into multiple words.
+// - Simple data accessors are named with get_ or set_ prefix.
+// - All method names are taken from names of functions in the C
+// layer of db (usually by dropping a prefix like "db_").
+// These methods have the same argument types and order,
+// other than dropping the explicit arg that acts as "this".
+//
+// As a rule, each DbFoo object has exactly one underlying DB_FOO struct
+// (defined in db.h) associated with it. In some cases, we inherit directly
+// from the DB_FOO structure to make this relationship explicit. Often,
+// the underlying C layer allocates and deallocates these structures, so
+// there is no easy way to add any data to the DbFoo class. When you see
+// a comment about whether data is permitted to be added, this is what
+// is going on. Of course, if we need to add data to such C++ classes
+// in the future, we will arrange to have an indirect pointer to the
+// DB_FOO struct (as some of the classes already have).
+//
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Forward declarations
+//
+
+#include <stdarg.h>
+
+@cxx_have_stdheaders@
+#ifdef HAVE_CXX_STDHEADERS
+#include <iostream>
+#define __DB_OSTREAMCLASS std::ostream
+#else
+#include <iostream.h>
+#define __DB_OSTREAMCLASS ostream
+#endif
+
+#include "db.h"
+#include "cxx_common.h"
+#include "cxx_except.h"
+
+class Db; // forward
+class Dbc; // forward
+class DbEnv; // forward
+class DbInfo; // forward
+class DbLock; // forward
+class DbLogc; // forward
+class DbLsn; // forward
+class DbMpoolFile; // forward
+class DbPreplist; // forward
+class Dbt; // forward
+class DbTxn; // forward
+
+// These classes are not defined here and should be invisible
+// to the user, but some compilers require forward references.
+// There is one for each use of the DEFINE_DB_CLASS macro.
+
+class DbImp;
+class DbEnvImp;
+class DbMpoolFileImp;
+class DbTxnImp;
+
+// DEFINE_DB_CLASS defines an imp_ data member and imp() accessor.
+// The underlying type is a pointer to an opaque *Imp class, that
+// gets converted to the correct implementation class by the implementation.
+//
+// Since these defines use "private/public" labels, and leave the access
+// being "private", we always use these by convention before any data
+// members in the private section of a class. Keeping them in the
+// private section also emphasizes that they are off limits to user code.
+//
+#define DEFINE_DB_CLASS(name) \
+ public: class name##Imp* imp() { return (imp_); } \
+ public: const class name##Imp* constimp() const { return (imp_); } \
+ private: class name##Imp* imp_
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Turn off inappropriate compiler warnings
+//
+
+#ifdef _MSC_VER
+
+// These are level 4 warnings that are explicitly disabled.
+// With Visual C++, by default you do not see above level 3 unless
+// you use /W4. But we like to compile with the highest level
+// warnings to catch other errors.
+//
+// 4201: nameless struct/union
+// triggered by standard include file <winnt.h>
+//
+// 4514: unreferenced inline function has been removed
+// certain include files in MSVC define methods that are not called
+//
+#pragma warning(disable: 4201 4514)
+
+#endif
+
+// Some interfaces can be customized by allowing users to define
+// callback functions. For performance and logistical reasons, some
+// callback functions must be declared in extern "C" blocks. For others,
+// we allow you to declare the callbacks in C++ or C (or an extern "C"
+// block) as you wish. See the set methods for the callbacks for
+// the choices.
+//
+extern "C" {
+ typedef void * (*db_malloc_fcn_type)
+ (size_t);
+ typedef void * (*db_realloc_fcn_type)
+ (void *, size_t);
+ typedef void (*db_free_fcn_type)
+ (void *);
+ typedef int (*bt_compare_fcn_type) /*C++ version available*/
+ (DB *, const DBT *, const DBT *);
+ typedef size_t (*bt_prefix_fcn_type) /*C++ version available*/
+ (DB *, const DBT *, const DBT *);
+ typedef int (*dup_compare_fcn_type) /*C++ version available*/
+ (DB *, const DBT *, const DBT *);
+ typedef u_int32_t (*h_hash_fcn_type) /*C++ version available*/
+ (DB *, const void *, u_int32_t);
+ typedef int (*pgin_fcn_type)
+ (DB_ENV *dbenv, db_pgno_t pgno, void *pgaddr, DBT *pgcookie);
+ typedef int (*pgout_fcn_type)
+ (DB_ENV *dbenv, db_pgno_t pgno, void *pgaddr, DBT *pgcookie);
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Lock classes
+//
+
+class _exported DbLock
+{
+ friend class DbEnv;
+
+public:
+ DbLock();
+ DbLock(const DbLock &);
+ DbLock &operator = (const DbLock &);
+
+protected:
+ // We can add data to this class if needed
+ // since its contained class is not allocated by db.
+ // (see comment at top)
+
+ DbLock(DB_LOCK);
+ DB_LOCK lock_;
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Log classes
+//
+
+class _exported DbLsn : protected DB_LSN
+{
+ friend class DbEnv; // friendship needed to cast to base class
+ friend class DbLogc; // friendship needed to cast to base class
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Memory pool classes
+//
+
+class _exported DbMpoolFile
+{
+ friend class DbEnv;
+
+private:
+ // Put this first to allow inlining with some C++ compilers (g++-2.95)
+ DEFINE_DB_CLASS(DbMpoolFile);
+
+public:
+ int close(u_int32_t flags);
+ int get(db_pgno_t *pgnoaddr, u_int32_t flags, void *pagep);
+ void last_pgno(db_pgno_t *pgnoaddr);
+ int open(const char *file, u_int32_t flags, int mode, size_t pagesize);
+ int put(void *pgaddr, u_int32_t flags);
+ void refcnt(db_pgno_t *pgnoaddr);
+ int set(void *pgaddr, u_int32_t flags);
+ int set_clear_len(u_int32_t len);
+ int set_fileid(u_int8_t *fileid);
+ int set_ftype(int ftype);
+ int set_lsn_offset(int32_t offset);
+ int set_pgcookie(DBT *dbt);
+ void set_unlink(int);
+ int sync();
+
+ virtual DB_MPOOLFILE *get_DB_MPOOLFILE()
+ {
+ return (DB_MPOOLFILE *)imp();
+ }
+
+ virtual const DB_MPOOLFILE *get_const_DB_MPOOLFILE() const
+ {
+ return (const DB_MPOOLFILE *)constimp();
+ }
+
+private:
+ // We can add data to this class if needed
+ // since it is implemented via a pointer.
+ // (see comment at top)
+
+ // Note: use DbEnv::memp_fcreate() to get pointers to a DbMpoolFile,
+ // and call DbMpoolFile::close() rather than delete to release them.
+ //
+ DbMpoolFile();
+
+ // Shut g++ up.
+protected:
+ virtual ~DbMpoolFile();
+
+private:
+ // no copying
+ DbMpoolFile(const DbMpoolFile &);
+ void operator = (const DbMpoolFile &);
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// This is filled in and returned by the DbEnv::txn_recover() method.
+//
+
+class _exported DbPreplist
+{
+public:
+ DbTxn *txn;
+ u_int8_t gid[DB_XIDDATASIZE];
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Transaction classes
+//
+
+class _exported DbTxn
+{
+ friend class DbEnv;
+
+private:
+ // Put this first to allow inlining with some C++ compilers (g++-2.95)
+ DEFINE_DB_CLASS(DbTxn);
+
+public:
+ int abort();
+ int commit(u_int32_t flags);
+ int discard(u_int32_t flags);
+ u_int32_t id();
+ int prepare(u_int8_t *gid);
+ int set_timeout(db_timeout_t timeout, u_int32_t flags);
+
+ virtual DB_TXN *get_DB_TXN()
+ {
+ return (DB_TXN *)imp();
+ }
+
+ virtual const DB_TXN *get_const_DB_TXN() const
+ {
+ return (const DB_TXN *)constimp();
+ }
+
+ static DbTxn* get_DbTxn(DB_TXN *txn)
+ {
+ return (DbTxn *)txn->api_internal;
+ }
+
+ static const DbTxn* get_const_DbTxn(const DB_TXN *txn)
+ {
+ return (const DbTxn *)txn->api_internal;
+ }
+
+ // For internal use only.
+ static DbTxn* wrap_DB_TXN(DB_TXN *txn);
+
+private:
+ // We can add data to this class if needed
+ // since it is implemented via a pointer.
+ // (see comment at top)
+
+ // Note: use DbEnv::txn_begin() to get pointers to a DbTxn,
+ // and call DbTxn::abort() or DbTxn::commit rather than
+ // delete to release them.
+ //
+ DbTxn();
+ // For internal use only.
+ DbTxn(DB_TXN *txn);
+ virtual ~DbTxn();
+
+ // no copying
+ DbTxn(const DbTxn &);
+ void operator = (const DbTxn &);
+};
+
+//
+// Berkeley DB environment class. Provides functions for opening databases.
+// User of this library can use this class as a starting point for
+// developing a DB application - derive their application class from
+// this one, add application control logic.
+//
+// Note that if you use the default constructor, you must explicitly
+// call appinit() before any other db activity (e.g. opening files)
+//
+class _exported DbEnv
+{
+ friend class Db;
+ friend class DbLock;
+ friend class DbMpoolFile;
+
+private:
+ // Put this first to allow inlining with some C++ compilers (g++-2.95)
+ DEFINE_DB_CLASS(DbEnv);
+
+public:
+ // After using this constructor, you can set any needed
+ // parameters for the environment using the set_* methods.
+ // Then call open() to finish initializing the environment
+ // and attaching it to underlying files.
+ //
+ DbEnv(u_int32_t flags);
+
+ virtual ~DbEnv();
+
+ // These methods match those in the C interface.
+ //
+ virtual int close(u_int32_t);
+ virtual int dbremove(DbTxn *txn, const char *name, const char *subdb,
+ u_int32_t flags);
+ virtual int dbrename(DbTxn *txn, const char *name, const char *subdb,
+ const char *newname, u_int32_t flags);
+ virtual void err(int, const char *, ...);
+ virtual void errx(const char *, ...);
+ virtual void *get_app_private() const;
+ virtual int open(const char *, u_int32_t, int);
+ virtual int remove(const char *, u_int32_t);
+ virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type,
+ db_free_fcn_type);
+ virtual void set_app_private(void *);
+ virtual int set_cachesize(u_int32_t, u_int32_t, int);
+ virtual int set_data_dir(const char *);
+ virtual int set_encrypt(const char *, int);
+ virtual void set_errcall(void (*)(const char *, char *));
+ virtual void set_errfile(FILE *);
+ virtual void set_errpfx(const char *);
+ virtual int set_flags(u_int32_t, int);
+ virtual int set_feedback(void (*)(DbEnv *, int, int));
+ virtual int set_lg_bsize(u_int32_t);
+ virtual int set_lg_dir(const char *);
+ virtual int set_lg_max(u_int32_t);
+ virtual int set_lg_regionmax(u_int32_t);
+ virtual int set_lk_conflicts(u_int8_t *, int);
+ virtual int set_lk_detect(u_int32_t);
+ virtual int set_lk_max(u_int32_t);
+ virtual int set_lk_max_lockers(u_int32_t);
+ virtual int set_lk_max_locks(u_int32_t);
+ virtual int set_lk_max_objects(u_int32_t);
+ virtual int set_mp_mmapsize(size_t);
+ virtual int set_paniccall(void (*)(DbEnv *, int));
+ virtual int set_rpc_server(void *, char *, long, long, u_int32_t);
+ virtual int set_shm_key(long);
+ virtual int set_timeout(db_timeout_t timeout, u_int32_t flags);
+ virtual int set_tmp_dir(const char *);
+ virtual int set_tas_spins(u_int32_t);
+ virtual int set_tx_max(u_int32_t);
+ virtual int set_app_dispatch(int (*)(DbEnv *,
+ Dbt *, DbLsn *, db_recops));
+ virtual int set_tx_timestamp(time_t *);
+ virtual int set_verbose(u_int32_t which, int onoff);
+
+ // Version information. A static method so it can be obtained anytime.
+ //
+ static char *version(int *major, int *minor, int *patch);
+
+ // Convert DB errors to strings
+ static char *strerror(int);
+
+ // If an error is detected and the error call function
+ // or stream is set, a message is dispatched or printed.
+ // If a prefix is set, each message is prefixed.
+ //
+ // You can use set_errcall() or set_errfile() above to control
+ // error functionality. Alternatively, you can call
+ // set_error_stream() to force all errors to a C++ stream.
+ // It is unwise to mix these approaches.
+ //
+ virtual void set_error_stream(__DB_OSTREAMCLASS *);
+
+ // used internally
+ static void runtime_error(const char *caller, int err,
+ int error_policy);
+ static void runtime_error_dbt(const char *caller, Dbt *dbt,
+ int error_policy);
+ static void runtime_error_lock_get(const char *caller, int err,
+ db_lockop_t op, db_lockmode_t mode,
+ const Dbt *obj, DbLock lock, int index,
+ int error_policy);
+
+ // Lock functions
+ //
+ virtual int lock_detect(u_int32_t flags, u_int32_t atype, int *aborted);
+ virtual int lock_get(u_int32_t locker, u_int32_t flags, const Dbt *obj,
+ db_lockmode_t lock_mode, DbLock *lock);
+ virtual int lock_id(u_int32_t *idp);
+ virtual int lock_id_free(u_int32_t id);
+ virtual int lock_put(DbLock *lock);
+ virtual int lock_stat(DB_LOCK_STAT **statp, u_int32_t flags);
+ virtual int lock_vec(u_int32_t locker, u_int32_t flags, DB_LOCKREQ list[],
+ int nlist, DB_LOCKREQ **elistp);
+
+ // Log functions
+ //
+ virtual int log_archive(char **list[], u_int32_t flags);
+ static int log_compare(const DbLsn *lsn0, const DbLsn *lsn1);
+ virtual int log_cursor(DbLogc **cursorp, u_int32_t flags);
+ virtual int log_file(DbLsn *lsn, char *namep, size_t len);
+ virtual int log_flush(const DbLsn *lsn);
+ virtual int log_put(DbLsn *lsn, const Dbt *data, u_int32_t flags);
+
+ virtual int log_stat(DB_LOG_STAT **spp, u_int32_t flags);
+
+ // Mpool functions
+ //
+ virtual int memp_fcreate(DbMpoolFile **dbmfp, u_int32_t flags);
+ virtual int memp_register(int ftype,
+ pgin_fcn_type pgin_fcn,
+ pgout_fcn_type pgout_fcn);
+ virtual int memp_stat(DB_MPOOL_STAT
+ **gsp, DB_MPOOL_FSTAT ***fsp, u_int32_t flags);
+ virtual int memp_sync(DbLsn *lsn);
+ virtual int memp_trickle(int pct, int *nwrotep);
+
+ // Transaction functions
+ //
+ virtual int txn_begin(DbTxn *pid, DbTxn **tid, u_int32_t flags);
+ virtual int txn_checkpoint(u_int32_t kbyte, u_int32_t min, u_int32_t flags);
+ virtual int txn_recover(DbPreplist *preplist, long count,
+ long *retp, u_int32_t flags);
+ virtual int txn_stat(DB_TXN_STAT **statp, u_int32_t flags);
+
+ // Replication functions
+ //
+ virtual int rep_elect(int, int, u_int32_t, int *);
+ virtual int rep_process_message(Dbt *, Dbt *, int *);
+ virtual int rep_start(Dbt *, u_int32_t);
+ virtual int rep_stat(DB_REP_STAT **statp, u_int32_t flags);
+ virtual int set_rep_limit(u_int32_t, u_int32_t);
+ virtual int set_rep_transport(u_int32_t,
+ int (*)(DbEnv *, const Dbt *, const Dbt *, int, u_int32_t));
+
+ // Conversion functions
+ //
+ virtual DB_ENV *get_DB_ENV()
+ {
+ return (DB_ENV *)imp();
+ }
+
+ virtual const DB_ENV *get_const_DB_ENV() const
+ {
+ return (const DB_ENV *)constimp();
+ }
+
+ static DbEnv* get_DbEnv(DB_ENV *dbenv)
+ {
+ return (DbEnv *)dbenv->api1_internal;
+ }
+
+ static const DbEnv* get_const_DbEnv(const DB_ENV *dbenv)
+ {
+ return (const DbEnv *)dbenv->api1_internal;
+ }
+
+ // For internal use only.
+ static DbEnv* wrap_DB_ENV(DB_ENV *dbenv);
+
+ // These are public only because they need to be called
+ // via C functions. They should never be called by users
+ // of this class.
+ //
+ static void _stream_error_function(const char *, char *);
+ static int _app_dispatch_intercept(DB_ENV *env, DBT *dbt, DB_LSN *lsn,
+ db_recops op);
+ static void _paniccall_intercept(DB_ENV *env, int errval);
+ static void _feedback_intercept(DB_ENV *env, int opcode, int pct);
+ static int _rep_send_intercept(DB_ENV *env,
+ const DBT *cntrl, const DBT *data,
+ int id, u_int32_t flags);
+
+private:
+ void cleanup();
+ int initialize(DB_ENV *env);
+ int error_policy();
+
+ // For internal use only.
+ DbEnv(DB_ENV *, u_int32_t flags);
+
+ // no copying
+ DbEnv(const DbEnv &);
+ void operator = (const DbEnv &);
+
+ // instance data
+ int construct_error_;
+ u_int32_t construct_flags_;
+ int (*app_dispatch_callback_)(DbEnv *, Dbt *, DbLsn *, db_recops);
+ void (*feedback_callback_)(DbEnv *, int, int);
+ void (*paniccall_callback_)(DbEnv *, int);
+ int (*pgin_callback_)(DbEnv *dbenv, db_pgno_t pgno,
+ void *pgaddr, Dbt *pgcookie);
+ int (*pgout_callback_)(DbEnv *dbenv, db_pgno_t pgno,
+ void *pgaddr, Dbt *pgcookie);
+ int (*rep_send_callback_)(DbEnv *,
+ const Dbt *, const Dbt *, int, u_int32_t);
+
+ // class data
+ static __DB_OSTREAMCLASS *error_stream_;
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Table access classes
+//
+
+//
+// Represents a database table = a set of keys with associated values.
+//
+class _exported Db
+{
+ friend class DbEnv;
+
+private:
+ // Put this first to allow inlining with some C++ compilers (g++-2.95)
+ DEFINE_DB_CLASS(Db);
+
+public:
+ Db(DbEnv*, u_int32_t); // create a Db object, then call open()
+ virtual ~Db(); // does *not* call close.
+
+ // These methods exactly match those in the C interface.
+ //
+ virtual int associate(DbTxn *txn, Db *secondary,
+ int (*callback)(Db *, const Dbt *, const Dbt *, Dbt *),
+ u_int32_t flags);
+ virtual int close(u_int32_t flags);
+ virtual int cursor(DbTxn *txnid, Dbc **cursorp, u_int32_t flags);
+ virtual int del(DbTxn *txnid, Dbt *key, u_int32_t flags);
+ virtual void err(int, const char *, ...);
+ virtual void errx(const char *, ...);
+ virtual int fd(int *fdp);
+ virtual int get(DbTxn *txnid, Dbt *key, Dbt *data, u_int32_t flags);
+ virtual void *get_app_private() const;
+ virtual int get_byteswapped(int *);
+ virtual int get_type(DBTYPE *);
+ virtual int join(Dbc **curslist, Dbc **dbcp, u_int32_t flags);
+ virtual int key_range(DbTxn *, Dbt *, DB_KEY_RANGE *, u_int32_t);
+ virtual int open(DbTxn *txnid,
+ const char *, const char *subname, DBTYPE, u_int32_t, int);
+ virtual int pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Dbt *data,
+ u_int32_t flags);
+ virtual int put(DbTxn *, Dbt *, Dbt *, u_int32_t);
+ virtual int remove(const char *, const char *, u_int32_t);
+ virtual int rename(const char *, const char *, const char *, u_int32_t);
+ virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type,
+ db_free_fcn_type);
+ virtual void set_app_private(void *);
+ virtual int set_append_recno(int (*)(Db *, Dbt *, db_recno_t));
+ virtual int set_bt_compare(bt_compare_fcn_type); /*deprecated*/
+ virtual int set_bt_compare(int (*)(Db *, const Dbt *, const Dbt *));
+ virtual int set_bt_maxkey(u_int32_t);
+ virtual int set_bt_minkey(u_int32_t);
+ virtual int set_bt_prefix(bt_prefix_fcn_type); /*deprecated*/
+ virtual int set_bt_prefix(size_t (*)(Db *, const Dbt *, const Dbt *));
+ virtual int set_cachesize(u_int32_t, u_int32_t, int);
+ virtual int set_cache_priority(DB_CACHE_PRIORITY);
+ virtual int set_dup_compare(dup_compare_fcn_type); /*deprecated*/
+ virtual int set_dup_compare(int (*)(Db *, const Dbt *, const Dbt *));
+ virtual int set_encrypt(const char *, int);
+ virtual void set_errcall(void (*)(const char *, char *));
+ virtual void set_errfile(FILE *);
+ virtual void set_errpfx(const char *);
+ virtual int set_feedback(void (*)(Db *, int, int));
+ virtual int set_flags(u_int32_t);
+ virtual int set_h_ffactor(u_int32_t);
+ virtual int set_h_hash(h_hash_fcn_type); /*deprecated*/
+ virtual int set_h_hash(u_int32_t (*)(Db *, const void *, u_int32_t));
+ virtual int set_h_nelem(u_int32_t);
+ virtual int set_lorder(int);
+ virtual int set_pagesize(u_int32_t);
+ virtual int set_paniccall(void (*)(DbEnv *, int));
+ virtual int set_re_delim(int);
+ virtual int set_re_len(u_int32_t);
+ virtual int set_re_pad(int);
+ virtual int set_re_source(char *);
+ virtual int set_q_extentsize(u_int32_t);
+ virtual int stat(void *sp, u_int32_t flags);
+ virtual int sync(u_int32_t flags);
+ virtual int truncate(DbTxn *, u_int32_t *, u_int32_t);
+ virtual int upgrade(const char *name, u_int32_t flags);
+ virtual int verify(const char *, const char *, __DB_OSTREAMCLASS *, u_int32_t);
+
+ // These additional methods are not in the C interface, and
+ // are only available for C++.
+ //
+ virtual void set_error_stream(__DB_OSTREAMCLASS *);
+
+ virtual DB *get_DB()
+ {
+ return (DB *)imp();
+ }
+
+ virtual const DB *get_const_DB() const
+ {
+ return (const DB *)constimp();
+ }
+
+ static Db* get_Db(DB *db)
+ {
+ return (Db *)db->api_internal;
+ }
+
+ static const Db* get_const_Db(const DB *db)
+ {
+ return (const Db *)db->api_internal;
+ }
+
+private:
+ // no copying
+ Db(const Db &);
+ Db &operator = (const Db &);
+
+ void cleanup();
+ int initialize();
+ int error_policy();
+
+ // instance data
+ DbEnv *env_;
+ int construct_error_;
+ u_int32_t flags_;
+ u_int32_t construct_flags_;
+
+public:
+ // These are public only because they need to be called
+ // via C callback functions. They should never be used by
+ // external users of this class.
+ //
+ int (*append_recno_callback_)(Db *, Dbt *, db_recno_t);
+ int (*associate_callback_)(Db *, const Dbt *, const Dbt *, Dbt *);
+ int (*bt_compare_callback_)(Db *, const Dbt *, const Dbt *);
+ size_t (*bt_prefix_callback_)(Db *, const Dbt *, const Dbt *);
+ int (*dup_compare_callback_)(Db *, const Dbt *, const Dbt *);
+ void (*feedback_callback_)(Db *, int, int);
+ u_int32_t (*h_hash_callback_)(Db *, const void *, u_int32_t);
+};
+
+//
+// A chunk of data, maybe a key or value.
+//
+class _exported Dbt : private DBT
+{
+ friend class Dbc;
+ friend class Db;
+ friend class DbEnv;
+ friend class DbLogc;
+
+public:
+
+ // key/data
+ void *get_data() const { return data; }
+ void set_data(void *value) { data = value; }
+
+ // key/data length
+ u_int32_t get_size() const { return size; }
+ void set_size(u_int32_t value) { size = value; }
+
+ // RO: length of user buffer.
+ u_int32_t get_ulen() const { return ulen; }
+ void set_ulen(u_int32_t value) { ulen = value; }
+
+ // RO: get/put record length.
+ u_int32_t get_dlen() const { return dlen; }
+ void set_dlen(u_int32_t value) { dlen = value; }
+
+ // RO: get/put record offset.
+ u_int32_t get_doff() const { return doff; }
+ void set_doff(u_int32_t value) { doff = value; }
+
+ // flags
+ u_int32_t get_flags() const { return flags; }
+ void set_flags(u_int32_t value) { flags = value; }
+
+ // Conversion functions
+ DBT *get_DBT() { return (DBT *)this; }
+ const DBT *get_const_DBT() const { return (const DBT *)this; }
+
+ static Dbt* get_Dbt(DBT *dbt) { return (Dbt *)dbt; }
+ static const Dbt* get_const_Dbt(const DBT *dbt)
+ { return (const Dbt *)dbt; }
+
+ Dbt(void *data, u_int32_t size);
+ Dbt();
+ ~Dbt();
+ Dbt(const Dbt &);
+ Dbt &operator = (const Dbt &);
+
+private:
+ // Note: no extra data appears in this class (other than
+ // inherited from DBT) since we need DBT and Dbt objects
+ // to have interchangable pointers.
+ //
+ // When subclassing this class, remember that callback
+ // methods like bt_compare, bt_prefix, dup_compare may
+ // internally manufacture DBT objects (which later are
+ // cast to Dbt), so such callbacks might receive objects
+ // not of your subclassed type.
+};
+
+class _exported Dbc : protected DBC
+{
+ friend class Db;
+
+public:
+ int close();
+ int count(db_recno_t *countp, u_int32_t flags);
+ int del(u_int32_t flags);
+ int dup(Dbc** cursorp, u_int32_t flags);
+ int get(Dbt* key, Dbt *data, u_int32_t flags);
+ int pget(Dbt* key, Dbt* pkey, Dbt *data, u_int32_t flags);
+ int put(Dbt* key, Dbt *data, u_int32_t flags);
+
+private:
+ // No data is permitted in this class (see comment at top)
+
+ // Note: use Db::cursor() to get pointers to a Dbc,
+ // and call Dbc::close() rather than delete to release them.
+ //
+ Dbc();
+ ~Dbc();
+
+ // no copying
+ Dbc(const Dbc &);
+ Dbc &operator = (const Dbc &);
+};
+
+class _exported DbLogc : protected DB_LOGC
+{
+ friend class DbEnv;
+
+public:
+ int close(u_int32_t _flags);
+ int get(DbLsn *lsn, Dbt *data, u_int32_t _flags);
+
+private:
+ // No data is permitted in this class (see comment at top)
+
+ // Note: use Db::cursor() to get pointers to a Dbc,
+ // and call Dbc::close() rather than delete to release them.
+ //
+ DbLogc();
+ ~DbLogc();
+
+ // no copying
+ DbLogc(const Dbc &);
+ DbLogc &operator = (const Dbc &);
+};
+#endif /* !_DB_CXX_H_ */
diff --git a/libdb/dbinc/db_dispatch.h b/libdb/dbinc/db_dispatch.h
new file mode 100644
index 0000000..d01b607
--- /dev/null
+++ b/libdb/dbinc/db_dispatch.h
@@ -0,0 +1,105 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_DISPATCH_H_
+#define _DB_DISPATCH_H_
+
+/*
+ * Declarations and typedefs for the list of transaction IDs used during
+ * recovery. This is a generic list used to pass along whatever information
+ * we need during recovery.
+ */
+typedef enum {
+ TXNLIST_DELETE,
+ TXNLIST_LSN,
+ TXNLIST_PGNO,
+ TXNLIST_TXNID
+} db_txnlist_type;
+
+#define DB_TXNLIST_MASK(hp, n) (n % hp->nslots)
+struct __db_txnhead {
+ u_int32_t maxid; /* Maximum transaction id. */
+ DB_LSN maxlsn; /* Maximum commit lsn. */
+ DB_LSN ckplsn; /* LSN of last retained checkpoint. */
+ DB_LSN trunc_lsn; /* Lsn to which we are going to truncate;
+ * make sure we abort anyone after this. */
+ int32_t generation; /* Current generation number. */
+ int32_t gen_alloc; /* Number of generations allocated. */
+ struct {
+ int32_t generation;
+ u_int32_t txn_min;
+ u_int32_t txn_max;
+ } *gen_array; /* Array of txnids associted with a gen. */
+ int nslots;
+ LIST_HEAD(__db_headlink, __db_txnlist) head[1];
+};
+
+struct __db_txnlist {
+ db_txnlist_type type;
+ LIST_ENTRY(__db_txnlist) links;
+ union {
+ struct {
+ u_int32_t txnid;
+ int32_t generation;
+ int32_t status;
+ } t;
+ struct {
+ int32_t ntxns;
+ int32_t maxn;
+ DB_LSN *lsn_array;
+ } l;
+ struct {
+ int32_t nentries;
+ int32_t maxentry;
+ int32_t locked;
+ char *fname;
+ int32_t fileid;
+ db_pgno_t *pgno_array;
+ u_int8_t uid[DB_FILE_ID_LEN];
+ } p;
+ } u;
+};
+
+/*
+ * Flag value for __db_txnlist_lsnadd. Distinguish whether we are replacing
+ * an entry in the transaction list or adding a new one.
+ */
+#define TXNLIST_NEW 0x1
+
+#define DB_user_BEGIN 10000
+
+#endif /* !_DB_DISPATCH_H_ */
diff --git a/libdb/dbinc/db_int.in b/libdb/dbinc/db_int.in
new file mode 100644
index 0000000..ec92a21
--- /dev/null
+++ b/libdb/dbinc/db_int.in
@@ -0,0 +1,477 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_INTERNAL_H_
+#define _DB_INTERNAL_H_
+
+/*******************************************************
+ * System includes, db.h, a few general DB includes. The DB includes are
+ * here because it's OK if db_int.h includes queue structure declarations.
+ *******************************************************/
+#ifndef NO_SYSTEM_INCLUDES
+#if defined(__STDC__) || defined(__cplusplus)
+#include <stdarg.h>
+#else
+#include <varargs.h>
+#endif
+#include <errno.h>
+#endif
+
+#include "db.h"
+
+#include "dbinc/queue.h"
+#include "dbinc/shqueue.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*******************************************************
+ * General purpose constants and macros.
+ *******************************************************/
+#define UINT16_T_MAX 0xffff /* Maximum 16 bit unsigned. */
+#define UINT32_T_MAX 0xffffffff /* Maximum 32 bit unsigned. */
+
+#define MEGABYTE 1048576
+#define GIGABYTE 1073741824
+
+#define MS_PER_SEC 1000 /* Milliseconds in a second. */
+#define USEC_PER_MS 1000 /* Microseconds in a millisecond. */
+
+#define RECNO_OOB 0 /* Illegal record number. */
+
+/* Test for a power-of-two (tests true for zero, which doesn't matter here). */
+#define POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0)
+
+/* Test for valid page sizes. */
+#define DB_MIN_PGSIZE 0x000200 /* Minimum page size (512). */
+#define DB_MAX_PGSIZE 0x010000 /* Maximum page size (65536). */
+#define IS_VALID_PAGESIZE(x) \
+ (POWER_OF_TWO(x) && (x) >= DB_MIN_PGSIZE && ((x) <= DB_MAX_PGSIZE))
+
+/* Minimum number of pages cached, by default. */
+#define DB_MINPAGECACHE 16
+
+/*
+ * If we are unable to determine the underlying filesystem block size, use
+ * 8K on the grounds that most OS's use less than 8K for a VM page size.
+ */
+#define DB_DEF_IOSIZE (8 * 1024)
+
+/*
+ * Aligning items to particular sizes or in pages or memory.
+ *
+ * db_align_t --
+ * Largest integral type, used to align structures in memory. We don't store
+ * floating point types in structures, so integral types should be sufficient
+ * (and we don't have to worry about systems that store floats in other than
+ * power-of-2 numbers of bytes). Additionally this fixes compiler that rewrite
+ * structure assignments and ANSI C memcpy calls to be in-line instructions
+ * that happen to require alignment. Note: this alignment isn't sufficient for
+ * mutexes, which depend on things like cache line alignment. Mutex alignment
+ * is handled separately, in mutex.h.
+ *
+ * db_alignp_t --
+ * Integral type that's the same size as a pointer. There are places where
+ * DB modifies pointers by discarding the bottom bits to guarantee alignment.
+ * We can't use db_align_t, it may be larger than the pointer, and compilers
+ * get upset about that. So far we haven't run on any machine where there
+ * isn't an integral type the same size as a pointer -- here's hoping.
+ */
+@db_align_t_decl@
+@db_alignp_t_decl@
+
+/* Align an integer to a specific boundary. */
+#undef ALIGN
+#define ALIGN(v, bound) (((v) + (bound) - 1) & ~(((db_align_t)bound) - 1))
+
+/*
+ * Print an address as a u_long (a u_long is the largest type we can print
+ * portably). Most 64-bit systems have made longs 64-bits, so this should
+ * work.
+ */
+#define P_TO_ULONG(p) ((u_long)(db_alignp_t)(p))
+
+/*
+ * Convert a pointer to a small integral value.
+ *
+ * The (u_int16_t)(db_alignp_t) cast avoids warnings: the (db_alignp_t) cast
+ * converts the value to an integral type, and the (u_int16_t) cast converts
+ * it to a small integral type so we don't get complaints when we assign the
+ * final result to an integral type smaller than db_alignp_t.
+ */
+#define P_TO_UINT32(p) ((u_int32_t)(db_alignp_t)(p))
+#define P_TO_UINT16(p) ((u_int16_t)(db_alignp_t)(p))
+
+/*
+ * There are several on-page structures that are declared to have a number of
+ * fields followed by a variable length array of items. The structure size
+ * without including the variable length array or the address of the first of
+ * those elements can be found using SSZ.
+ *
+ * This macro can also be used to find the offset of a structure element in a
+ * structure. This is used in various places to copy structure elements from
+ * unaligned memory references, e.g., pointers into a packed page.
+ *
+ * There are two versions because compilers object if you take the address of
+ * an array.
+ */
+#undef SSZ
+#define SSZ(name, field) P_TO_UINT16(&(((name *)0)->field))
+
+#undef SSZA
+#define SSZA(name, field) P_TO_UINT16(&(((name *)0)->field[0]))
+
+/* Structure used to print flag values. */
+typedef struct __fn {
+ u_int32_t mask; /* Flag value. */
+ const char *name; /* Flag name. */
+} FN;
+
+/* Set, clear and test flags. */
+#define FLD_CLR(fld, f) (fld) &= ~(f)
+#define FLD_ISSET(fld, f) ((fld) & (f))
+#define FLD_SET(fld, f) (fld) |= (f)
+#define F_CLR(p, f) (p)->flags &= ~(f)
+#define F_ISSET(p, f) ((p)->flags & (f))
+#define F_SET(p, f) (p)->flags |= (f)
+#define LF_CLR(f) ((flags) &= ~(f))
+#define LF_ISSET(f) ((flags) & (f))
+#define LF_SET(f) ((flags) |= (f))
+
+/* Display separator string. */
+#undef DB_LINE
+#define DB_LINE "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-="
+
+/* Unused, or not-used-yet variable. "Shut that bloody compiler up!" */
+#define COMPQUIET(n, v) (n) = (v)
+
+/*******************************************************
+ * API return values
+ *******************************************************/
+ /*
+ * Return values that are OK for each different call. Most calls have
+ * a standard 'return of 0 is only OK value', but some, like db->get
+ * have DB_NOTFOUND as a return value, but it really isn't an error.
+ */
+#define DB_RETOK_STD(ret) ((ret) == 0)
+#define DB_RETOK_DBCDEL(ret) ((ret) == 0 || (ret) == DB_KEYEMPTY || \
+ (ret) == DB_NOTFOUND)
+#define DB_RETOK_DBCGET(ret) DB_RETOK_DBGET(ret)
+#define DB_RETOK_DBCPUT(ret) ((ret) == 0 || (ret) == DB_KEYEXIST || \
+ (ret) == DB_NOTFOUND)
+#define DB_RETOK_DBDEL(ret) ((ret) == 0 || (ret) == DB_NOTFOUND)
+#define DB_RETOK_DBGET(ret) ((ret) == 0 || (ret) == DB_KEYEMPTY || \
+ (ret) == DB_NOTFOUND)
+#define DB_RETOK_DBPUT(ret) ((ret) == 0 || (ret) == DB_KEYEXIST)
+#define DB_RETOK_LGGET(ret) ((ret) == 0 || (ret) == DB_NOTFOUND)
+#define DB_RETOK_MPGET(ret) ((ret) == 0 || (ret) == DB_PAGE_NOTFOUND)
+#define DB_RETOK_REPPMSG(ret) ((ret) == 0 || (ret) == DB_REP_NEWMASTER || \
+ (ret) == DB_REP_NEWSITE)
+
+/*******************************************************
+ * Files.
+ *******************************************************/
+ /*
+ * We use 1024 as the maximum path length. It's too hard to figure out what
+ * the real path length is, as it was traditionally stored in <sys/param.h>,
+ * and that file isn't always available.
+ */
+#undef MAXPATHLEN
+#define MAXPATHLEN 1024
+
+#define PATH_DOT "." /* Current working directory. */
+#ifdef _WIN32
+#define PATH_SEPARATOR "\\/:" /* Path separator character(s). */
+#else
+#define PATH_SEPARATOR "/" /* Path separator character(s). */
+#endif
+
+/*
+ * Flags understood by __os_open.
+ */
+#define DB_OSO_CREATE 0x0001 /* POSIX: O_CREAT */
+#define DB_OSO_DIRECT 0x0002 /* Don't buffer the file in the OS. */
+#define DB_OSO_EXCL 0x0004 /* POSIX: O_EXCL */
+#define DB_OSO_LOG 0x0008 /* Opening a log file. */
+#define DB_OSO_RDONLY 0x0010 /* POSIX: O_RDONLY */
+#define DB_OSO_REGION 0x0020 /* Opening a region file. */
+#define DB_OSO_SEQ 0x0040 /* Expected sequential access. */
+#define DB_OSO_TEMP 0x0080 /* Remove after last close. */
+#define DB_OSO_TRUNC 0x0100 /* POSIX: O_TRUNC */
+
+/*
+ * Seek options understood by __os_seek.
+ */
+typedef enum {
+ DB_OS_SEEK_CUR, /* POSIX: SEEK_CUR */
+ DB_OS_SEEK_END, /* POSIX: SEEK_END */
+ DB_OS_SEEK_SET /* POSIX: SEEK_SET */
+} DB_OS_SEEK;
+
+/*******************************************************
+ * Environment.
+ *******************************************************/
+/* Type passed to __db_appname(). */
+typedef enum {
+ DB_APP_NONE=0, /* No type (region). */
+ DB_APP_DATA, /* Data file. */
+ DB_APP_LOG, /* Log file. */
+ DB_APP_TMP /* Temporary file. */
+} APPNAME;
+
+/*
+ * CDB_LOCKING CDB product locking.
+ * CRYPTO_ON Security has been configured.
+ * LOCKING_ON Locking has been configured.
+ * LOGGING_ON Logging has been configured.
+ * MPOOL_ON Memory pool has been configured.
+ * RPC_ON RPC has been configured.
+ * TXN_ON Transactions have been configured.
+ */
+#define CDB_LOCKING(dbenv) F_ISSET(dbenv, DB_ENV_CDB)
+#define CRYPTO_ON(dbenv) ((dbenv)->crypto_handle != NULL)
+#define LOCKING_ON(dbenv) ((dbenv)->lk_handle != NULL)
+#define LOGGING_ON(dbenv) ((dbenv)->lg_handle != NULL)
+#define MPOOL_ON(dbenv) ((dbenv)->mp_handle != NULL)
+#define RPC_ON(dbenv) ((dbenv)->cl_handle != NULL)
+#define TXN_ON(dbenv) ((dbenv)->tx_handle != NULL)
+
+/*
+ * STD_LOCKING Standard locking, that is, locking was configured and CDB
+ * was not. We do not do locking in off-page duplicate trees,
+ * so we check for that in the cursor first.
+ */
+#define STD_LOCKING(dbc) \
+ (!F_ISSET(dbc, DBC_OPD) && \
+ !CDB_LOCKING((dbc)->dbp->dbenv) && LOCKING_ON((dbc)->dbp->dbenv))
+
+/*
+ * IS_RECOVERING: The system is running recovery.
+ */
+#define IS_RECOVERING(dbenv) \
+ (LOGGING_ON(dbenv) && \
+ F_ISSET((DB_LOG *)(dbenv)->lg_handle, DBLOG_RECOVER))
+
+/* Initialization methods are often illegal before/after open is called. */
+#define ENV_ILLEGAL_AFTER_OPEN(dbenv, name) \
+ if (F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \
+ return (__db_mi_open(dbenv, name, 1));
+#define ENV_ILLEGAL_BEFORE_OPEN(dbenv, name) \
+ if (!F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \
+ return (__db_mi_open(dbenv, name, 0));
+
+/* We're not actually user hostile, honest. */
+#define ENV_REQUIRES_CONFIG(dbenv, handle, i, flags) \
+ if (handle == NULL) \
+ return (__db_env_config(dbenv, i, flags));
+
+/*******************************************************
+ * Database Access Methods.
+ *******************************************************/
+/*
+ * DB_IS_THREADED --
+ * The database handle is free-threaded (was opened with DB_THREAD).
+ */
+#define DB_IS_THREADED(dbp) \
+ ((dbp)->mutexp != NULL)
+
+/* Initialization methods are often illegal before/after open is called. */
+#define DB_ILLEGAL_AFTER_OPEN(dbp, name) \
+ if (F_ISSET((dbp), DB_AM_OPEN_CALLED)) \
+ return (__db_mi_open((dbp)->dbenv, name, 1));
+#define DB_ILLEGAL_BEFORE_OPEN(dbp, name) \
+ if (!F_ISSET((dbp), DB_AM_OPEN_CALLED)) \
+ return (__db_mi_open((dbp)->dbenv, name, 0));
+/* Some initialization methods are illegal if environment isn't local. */
+#define DB_ILLEGAL_IN_ENV(dbp, name) \
+ if (!F_ISSET((dbp)->dbenv, DB_ENV_DBLOCAL)) \
+ return (__db_mi_env((dbp)->dbenv, name));
+#define DB_ILLEGAL_METHOD(dbp, flags) { \
+ int __ret; \
+ if ((__ret = __dbh_am_chk(dbp, flags)) != 0) \
+ return (__ret); \
+}
+
+/*
+ * Common DBC->internal fields. Each access method adds additional fields
+ * to this list, but the initial fields are common.
+ */
+#define __DBC_INTERNAL \
+ DBC *opd; /* Off-page duplicate cursor. */\
+ \
+ void *page; /* Referenced page. */ \
+ db_pgno_t root; /* Tree root. */ \
+ db_pgno_t pgno; /* Referenced page number. */ \
+ db_indx_t indx; /* Referenced key item index. */\
+ \
+ DB_LOCK lock; /* Cursor lock. */ \
+ db_lockmode_t lock_mode; /* Lock mode. */
+
+struct __dbc_internal {
+ __DBC_INTERNAL
+};
+
+/* Actions that __db_master_update can take. */
+typedef enum { MU_REMOVE, MU_RENAME, MU_OPEN } mu_action;
+
+/*
+ * Access-method-common macro for determining whether a cursor
+ * has been initialized.
+ */
+#define IS_INITIALIZED(dbc) ((dbc)->internal->pgno != PGNO_INVALID)
+
+/* Free the callback-allocated buffer, if necessary, hanging off of a DBT. */
+#define FREE_IF_NEEDED(sdbp, dbt) \
+ if (F_ISSET((dbt), DB_DBT_APPMALLOC)) { \
+ __os_ufree((sdbp)->dbenv, (dbt)->data); \
+ F_CLR((dbt), DB_DBT_APPMALLOC); \
+ }
+
+/*
+ * Use memory belonging to object "owner" to return the results of
+ * any no-DBT-flag get ops on cursor "dbc".
+ */
+#define SET_RET_MEM(dbc, owner) \
+ do { \
+ (dbc)->rskey = &(owner)->my_rskey; \
+ (dbc)->rkey = &(owner)->my_rkey; \
+ (dbc)->rdata = &(owner)->my_rdata; \
+ } while (0)
+
+/* Use the return-data memory src is currently set to use in dest as well. */
+#define COPY_RET_MEM(src, dest) \
+ do { \
+ (dest)->rskey = (src)->rskey; \
+ (dest)->rkey = (src)->rkey; \
+ (dest)->rdata = (src)->rdata; \
+ } while (0)
+
+/* Reset the returned-memory pointers to their defaults. */
+#define RESET_RET_MEM(dbc) \
+ do { \
+ (dbc)->rskey = &(dbc)->my_rskey; \
+ (dbc)->rkey = &(dbc)->my_rkey; \
+ (dbc)->rdata = &(dbc)->my_rdata; \
+ } while (0)
+
+/*******************************************************
+ * Mpool.
+ *******************************************************/
+/*
+ * File types for DB access methods. Negative numbers are reserved to DB.
+ */
+#define DB_FTYPE_SET -1 /* Call pgin/pgout functions. */
+#define DB_FTYPE_NOTSET 0 /* Don't call... */
+
+/* Structure used as the DB pgin/pgout pgcookie. */
+typedef struct __dbpginfo {
+ size_t db_pagesize; /* Underlying page size. */
+ u_int32_t flags; /* Some DB_AM flags needed. */
+ DBTYPE type; /* DB type */
+} DB_PGINFO;
+
+/*******************************************************
+ * Log.
+ *******************************************************/
+/* Initialize an LSN to 'zero'. */
+#define ZERO_LSN(LSN) do { \
+ (LSN).file = 0; \
+ (LSN).offset = 0; \
+} while (0)
+#define IS_ZERO_LSN(LSN) ((LSN).file == 0)
+
+#define IS_INIT_LSN(LSN) ((LSN).file == 1 && (LSN).offset == 0)
+#define INIT_LSN(LSN) do { \
+ (LSN).file = 1; \
+ (LSN).offset = 0; \
+} while (0)
+
+#define MAX_LSN(LSN) do { \
+ (LSN).file = UINT32_T_MAX; \
+ (LSN).offset = UINT32_T_MAX; \
+} while (0)
+#define IS_MAX_LSN(LSN) \
+ ((LSN).file == UINT32_T_MAX && (LSN).offset == UINT32_T_MAX)
+
+/* If logging is turned off, smash the lsn. */
+#define LSN_NOT_LOGGED(LSN) do { \
+ (LSN).file = 0; \
+ (LSN).offset = 1; \
+} while (0)
+#define IS_NOT_LOGGED_LSN(LSN) \
+ ((LSN).file == 0 && (LSN).offset == 1)
+
+/*
+ * Test if the environment is currently logging changes. If we're in
+ * recovery or we're a replication client, we don't need to log changes
+ * because they're already in the log, even though we have a fully functional
+ * log system.
+ */
+#define DBENV_LOGGING(dbenv) \
+ (LOGGING_ON(dbenv) && !F_ISSET((dbenv), DB_ENV_REP_CLIENT) && \
+ (!IS_RECOVERING(dbenv)))
+
+/*
+ * Test if we need to log a change. Note that the DBC_RECOVER flag is set
+ * when we're in abort, as well as during recovery; thus DBC_LOGGING may be
+ * false for a particular dbc even when DBENV_LOGGING is true.
+ *
+ * We explicitly use LOGGING_ON/DB_ENV_REP_CLIENT here because we don't
+ * want to have to pull in the log headers, which IS_RECOVERING (and thus
+ * DBENV_LOGGING) rely on, and because DBC_RECOVER should be set anytime
+ * IS_RECOVERING would be true.
+ */
+#define DBC_LOGGING(dbc) \
+ (LOGGING_ON((dbc)->dbp->dbenv) && !F_ISSET((dbc), DBC_RECOVER) && \
+ !F_ISSET((dbc)->dbp->dbenv, DB_ENV_REP_CLIENT))
+
+/*******************************************************
+ * Txn.
+ *******************************************************/
+#define DB_NONBLOCK(C) ((C)->txn != NULL && F_ISSET((C)->txn, TXN_NOWAIT))
+#define IS_SUBTRANSACTION(txn) \
+ ((txn) != NULL && (txn)->parent != NULL)
+
+/*******************************************************
+ * Crypto.
+ *******************************************************/
+#define DB_IV_BYTES 16 /* Bytes per IV */
+#define DB_MAC_KEY 20 /* Bytes per MAC checksum */
+
+/*******************************************************
+ * Forward structure declarations.
+ *******************************************************/
+struct __db_reginfo_t; typedef struct __db_reginfo_t REGINFO;
+struct __db_txnhead; typedef struct __db_txnhead DB_TXNHEAD;
+struct __db_txnlist; typedef struct __db_txnlist DB_TXNLIST;
+struct __vrfy_childinfo; typedef struct __vrfy_childinfo VRFY_CHILDINFO;
+struct __vrfy_dbinfo; typedef struct __vrfy_dbinfo VRFY_DBINFO;
+struct __vrfy_pageinfo; typedef struct __vrfy_pageinfo VRFY_PAGEINFO;
+
+#if defined(__cplusplus)
+}
+#endif
+
+/*******************************************************
+ * Remaining general DB includes.
+ *******************************************************/
+@db_int_def@
+
+#include "dbinc/globals.h"
+#include "dbinc/debug.h"
+#include "dbinc/mutex.h"
+#include "dbinc/region.h"
+#include "dbinc_auto/mutex_ext.h" /* XXX: Include after region.h. */
+#include "dbinc_auto/env_ext.h"
+#include "dbinc/os.h"
+#include "dbinc_auto/clib_ext.h"
+#include "dbinc_auto/common_ext.h"
+
+#endif /* !_DB_INTERNAL_H_ */
diff --git a/libdb/dbinc/db_join.h b/libdb/dbinc/db_join.h
new file mode 100644
index 0000000..487ce3e
--- /dev/null
+++ b/libdb/dbinc/db_join.h
@@ -0,0 +1,31 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * @(#)db_join.h 11.1 (Sleepycat) 7/25/99
+ */
+
+#ifndef _DB_JOIN_H_
+#define _DB_JOIN_H_
+
+/*
+ * Joins use a join cursor that is similar to a regular DB cursor except
+ * that it only supports c_get and c_close functionality. Also, it does
+ * not support the full range of flags for get.
+ */
+typedef struct __join_cursor {
+ u_int8_t *j_exhausted; /* Array of flags; is cursor i exhausted? */
+ DBC **j_curslist; /* Array of cursors in the join: constant. */
+ DBC **j_fdupcurs; /* Cursors w/ first intances of current dup. */
+ DBC **j_workcurs; /* Scratch cursor copies to muck with. */
+ DB *j_primary; /* Primary dbp. */
+ DBT j_key; /* Used to do lookups. */
+ DBT j_rdata; /* Memory used for data return. */
+ u_int32_t j_ncurs; /* How many cursors do we have? */
+#define JOIN_RETRY 0x01 /* Error on primary get; re-return same key. */
+ u_int32_t flags;
+} JOIN_CURSOR;
+
+#endif /* !_DB_JOIN_H_ */
diff --git a/libdb/dbinc/db_page.h b/libdb/dbinc/db_page.h
new file mode 100644
index 0000000..cc6e2ef
--- /dev/null
+++ b/libdb/dbinc/db_page.h
@@ -0,0 +1,651 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_PAGE_H_
+#define _DB_PAGE_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * DB page formats.
+ *
+ * !!!
+ * This implementation requires that values within the following structures
+ * NOT be padded -- note, ANSI C permits random padding within structures.
+ * If your compiler pads randomly you can just forget ever making DB run on
+ * your system. In addition, no data type can require larger alignment than
+ * its own size, e.g., a 4-byte data element may not require 8-byte alignment.
+ *
+ * Note that key/data lengths are often stored in db_indx_t's -- this is
+ * not accidental, nor does it limit the key/data size. If the key/data
+ * item fits on a page, it's guaranteed to be small enough to fit into a
+ * db_indx_t, and storing it in one saves space.
+ */
+
+#define PGNO_INVALID 0 /* Invalid page number in any database. */
+#define PGNO_BASE_MD 0 /* Base database: metadata page number. */
+
+/* Page types. */
+#define P_INVALID 0 /* Invalid page type. */
+#define __P_DUPLICATE 1 /* Duplicate. DEPRECATED in 3.1 */
+#define P_HASH 2 /* Hash. */
+#define P_IBTREE 3 /* Btree internal. */
+#define P_IRECNO 4 /* Recno internal. */
+#define P_LBTREE 5 /* Btree leaf. */
+#define P_LRECNO 6 /* Recno leaf. */
+#define P_OVERFLOW 7 /* Overflow. */
+#define P_HASHMETA 8 /* Hash metadata page. */
+#define P_BTREEMETA 9 /* Btree metadata page. */
+#define P_QAMMETA 10 /* Queue metadata page. */
+#define P_QAMDATA 11 /* Queue data page. */
+#define P_LDUP 12 /* Off-page duplicate leaf. */
+#define P_PAGETYPE_MAX 13
+
+/*
+ * When we create pages in mpool, we ask mpool to clear some number of bytes
+ * in the header. This number must be at least as big as the regular page
+ * headers and cover enough of the btree and hash meta-data pages to obliterate
+ * the page type.
+ */
+#define DB_PAGE_DB_LEN 32
+#define DB_PAGE_QUEUE_LEN 0
+
+/************************************************************************
+ GENERIC METADATA PAGE HEADER
+ *
+ * !!!
+ * The magic and version numbers have to be in the same place in all versions
+ * of the metadata page as the application may not have upgraded the database.
+ ************************************************************************/
+typedef struct _dbmeta33 {
+ DB_LSN lsn; /* 00-07: LSN. */
+ db_pgno_t pgno; /* 08-11: Current page number. */
+ u_int32_t magic; /* 12-15: Magic number. */
+ u_int32_t version; /* 16-19: Version. */
+ u_int32_t pagesize; /* 20-23: Pagesize. */
+ u_int8_t encrypt_alg; /* 24: Encryption algorithm. */
+ u_int8_t type; /* 25: Page type. */
+#define DBMETA_CHKSUM 0x01
+ u_int8_t metaflags; /* 26: Meta-only flags */
+ u_int8_t unused1; /* 27: Unused. */
+ u_int32_t free; /* 28-31: Free list page number. */
+ db_pgno_t last_pgno; /* 32-35: Page number of last page in db. */
+ u_int32_t unused3; /* 36-39: Unused. */
+ u_int32_t key_count; /* 40-43: Cached key count. */
+ u_int32_t record_count; /* 44-47: Cached record count. */
+ u_int32_t flags; /* 48-51: Flags: unique to each AM. */
+ /* 52-71: Unique file ID. */
+ u_int8_t uid[DB_FILE_ID_LEN];
+} DBMETA33, DBMETA;
+
+/************************************************************************
+ BTREE METADATA PAGE LAYOUT
+ ************************************************************************/
+typedef struct _btmeta33 {
+#define BTM_DUP 0x001 /* Duplicates. */
+#define BTM_RECNO 0x002 /* Recno tree. */
+#define BTM_RECNUM 0x004 /* Btree: maintain record count. */
+#define BTM_FIXEDLEN 0x008 /* Recno: fixed length records. */
+#define BTM_RENUMBER 0x010 /* Recno: renumber on insert/delete. */
+#define BTM_SUBDB 0x020 /* Subdatabases. */
+#define BTM_DUPSORT 0x040 /* Duplicates are sorted. */
+#define BTM_MASK 0x07f
+ DBMETA dbmeta; /* 00-71: Generic meta-data header. */
+
+ u_int32_t maxkey; /* 72-75: Btree: Maxkey. */
+ u_int32_t minkey; /* 76-79: Btree: Minkey. */
+ u_int32_t re_len; /* 80-83: Recno: fixed-length record length. */
+ u_int32_t re_pad; /* 84-87: Recno: fixed-length record pad. */
+ u_int32_t root; /* 88-91: Root page. */
+ u_int32_t unused[92]; /* 92-459: Unused space */
+ u_int32_t crypto_magic; /* 460-463: Crypto magic number */
+ u_int32_t trash[3]; /* 464-475: Trash space - Do not use */
+ u_int8_t iv[DB_IV_BYTES]; /* 476-495: Crypto IV */
+ u_int8_t chksum[DB_MAC_KEY]; /* 496-511: Page chksum */
+
+ /*
+ * Minimum page size is 512.
+ */
+} BTMETA33, BTMETA;
+
+/************************************************************************
+ HASH METADATA PAGE LAYOUT
+ ************************************************************************/
+typedef struct _hashmeta33 {
+#define DB_HASH_DUP 0x01 /* Duplicates. */
+#define DB_HASH_SUBDB 0x02 /* Subdatabases. */
+#define DB_HASH_DUPSORT 0x04 /* Duplicates are sorted. */
+ DBMETA dbmeta; /* 00-71: Generic meta-data page header. */
+
+ u_int32_t max_bucket; /* 72-75: ID of Maximum bucket in use */
+ u_int32_t high_mask; /* 76-79: Modulo mask into table */
+ u_int32_t low_mask; /* 80-83: Modulo mask into table lower half */
+ u_int32_t ffactor; /* 84-87: Fill factor */
+ u_int32_t nelem; /* 88-91: Number of keys in hash table */
+ u_int32_t h_charkey; /* 92-95: Value of hash(CHARKEY) */
+#define NCACHED 32 /* number of spare points */
+ /* 96-223: Spare pages for overflow */
+ u_int32_t spares[NCACHED];
+ u_int32_t unused[59]; /* 224-459: Unused space */
+ u_int32_t crypto_magic; /* 460-463: Crypto magic number */
+ u_int32_t trash[3]; /* 464-475: Trash space - Do not use */
+ u_int8_t iv[DB_IV_BYTES]; /* 476-495: Crypto IV */
+ u_int8_t chksum[DB_MAC_KEY]; /* 496-511: Page chksum */
+
+ /*
+ * Minimum page size is 512.
+ */
+} HMETA33, HMETA;
+
+/************************************************************************
+ QUEUE METADATA PAGE LAYOUT
+ ************************************************************************/
+/*
+ * QAM Meta data page structure
+ *
+ */
+typedef struct _qmeta33 {
+ DBMETA dbmeta; /* 00-71: Generic meta-data header. */
+
+ u_int32_t first_recno; /* 72-75: First not deleted record. */
+ u_int32_t cur_recno; /* 76-79: Next recno to be allocated. */
+ u_int32_t re_len; /* 80-83: Fixed-length record length. */
+ u_int32_t re_pad; /* 84-87: Fixed-length record pad. */
+ u_int32_t rec_page; /* 88-91: Records Per Page. */
+ u_int32_t page_ext; /* 92-95: Pages per extent */
+
+ u_int32_t unused[91]; /* 96-459: Unused space */
+ u_int32_t crypto_magic; /* 460-463: Crypto magic number */
+ u_int32_t trash[3]; /* 464-475: Trash space - Do not use */
+ u_int8_t iv[DB_IV_BYTES]; /* 476-495: Crypto IV */
+ u_int8_t chksum[DB_MAC_KEY]; /* 496-511: Page chksum */
+ /*
+ * Minimum page size is 512.
+ */
+} QMETA33, QMETA;
+
+/*
+ * DBMETASIZE is a constant used by __db_file_setup and DB->verify
+ * as a buffer which is guaranteed to be larger than any possible
+ * metadata page size and smaller than any disk sector.
+ */
+#define DBMETASIZE 512
+
+/************************************************************************
+ BTREE/HASH MAIN PAGE LAYOUT
+ ************************************************************************/
+/*
+ * +-----------------------------------+
+ * | lsn | pgno | prev pgno |
+ * +-----------------------------------+
+ * | next pgno | entries | hf offset |
+ * +-----------------------------------+
+ * | level | type | chksum |
+ * +-----------------------------------+
+ * | iv | index | free --> |
+ * +-----------+-----------------------+
+ * | F R E E A R E A |
+ * +-----------------------------------+
+ * | <-- free | item |
+ * +-----------------------------------+
+ * | item | item | item |
+ * +-----------------------------------+
+ *
+ * sizeof(PAGE) == 26 bytes + possibly 20 bytes of checksum and possibly
+ * 16 bytes of IV (+ 2 bytes for alignment), and the following indices
+ * are guaranteed to be two-byte aligned. If we aren't doing crypto or
+ * checksumming the bytes are reclaimed for data storage.
+ *
+ * For hash and btree leaf pages, index items are paired, e.g., inp[0] is the
+ * key for inp[1]'s data. All other types of pages only contain single items.
+ */
+typedef struct __pg_chksum {
+ u_int8_t unused[2]; /* 26-27: For alignment */
+ u_int8_t chksum[4]; /* 28-31: Checksum */
+} PG_CHKSUM;
+
+typedef struct __pg_crypto {
+ u_int8_t unused[2]; /* 26-27: For alignment */
+ u_int8_t chksum[DB_MAC_KEY]; /* 28-47: Checksum */
+ u_int8_t iv[DB_IV_BYTES]; /* 48-63: IV */
+ /* !!!
+ * Must be 16-byte aligned for crypto
+ */
+} PG_CRYPTO;
+
+typedef struct _db_page {
+ DB_LSN lsn; /* 00-07: Log sequence number. */
+ db_pgno_t pgno; /* 08-11: Current page number. */
+ db_pgno_t prev_pgno; /* 12-15: Previous page number. */
+ db_pgno_t next_pgno; /* 16-19: Next page number. */
+ db_indx_t entries; /* 20-21: Number of items on the page. */
+ db_indx_t hf_offset; /* 22-23: High free byte page offset. */
+
+ /*
+ * The btree levels are numbered from the leaf to the root, starting
+ * with 1, so the leaf is level 1, its parent is level 2, and so on.
+ * We maintain this level on all btree pages, but the only place that
+ * we actually need it is on the root page. It would not be difficult
+ * to hide the byte on the root page once it becomes an internal page,
+ * so we could get this byte back if we needed it for something else.
+ */
+#define LEAFLEVEL 1
+#define MAXBTREELEVEL 255
+ u_int8_t level; /* 24: Btree tree level. */
+ u_int8_t type; /* 25: Page type. */
+} PAGE;
+
+#define SIZEOF_PAGE 26
+/*
+ * !!!
+ * DB_AM_ENCRYPT always implies DB_AM_CHKSUM so that must come first.
+ */
+#define P_INP(dbp, pg) \
+ ((db_indx_t *)((u_int8_t *)(pg) + SIZEOF_PAGE + \
+ (F_ISSET((dbp), DB_AM_ENCRYPT) ? sizeof(PG_CRYPTO) : \
+ (F_ISSET((dbp), DB_AM_CHKSUM) ? sizeof(PG_CHKSUM) : 0))))
+
+#define P_IV(dbp, pg) \
+ (F_ISSET((dbp), DB_AM_ENCRYPT) ? ((u_int8_t *)(pg) + \
+ SIZEOF_PAGE + SSZA(PG_CRYPTO, iv)) \
+ : NULL)
+
+#define P_CHKSUM(dbp, pg) \
+ (F_ISSET((dbp), DB_AM_ENCRYPT) ? ((u_int8_t *)(pg) + \
+ SIZEOF_PAGE + SSZA(PG_CRYPTO, chksum)) : \
+ (F_ISSET((dbp), DB_AM_CHKSUM) ? ((u_int8_t *)(pg) + \
+ SIZEOF_PAGE + SSZA(PG_CHKSUM, chksum)) \
+ : NULL))
+
+/* PAGE element macros. */
+#define LSN(p) (((PAGE *)p)->lsn)
+#define PGNO(p) (((PAGE *)p)->pgno)
+#define PREV_PGNO(p) (((PAGE *)p)->prev_pgno)
+#define NEXT_PGNO(p) (((PAGE *)p)->next_pgno)
+#define NUM_ENT(p) (((PAGE *)p)->entries)
+#define HOFFSET(p) (((PAGE *)p)->hf_offset)
+#define LEVEL(p) (((PAGE *)p)->level)
+#define TYPE(p) (((PAGE *)p)->type)
+
+/************************************************************************
+ QUEUE MAIN PAGE LAYOUT
+ ************************************************************************/
+/*
+ * Sizes of page below. Used to reclaim space if not doing
+ * crypto or checksumming. If you change the QPAGE below you
+ * MUST adjust this too.
+ */
+#define QPAGE_NORMAL 28
+#define QPAGE_CHKSUM 48
+#define QPAGE_SEC 64
+
+typedef struct _qpage {
+ DB_LSN lsn; /* 00-07: Log sequence number. */
+ db_pgno_t pgno; /* 08-11: Current page number. */
+ u_int32_t unused0[3]; /* 12-23: Unused. */
+ u_int8_t unused1[1]; /* 24: Unused. */
+ u_int8_t type; /* 25: Page type. */
+ u_int8_t unused2[2]; /* 26-27: Unused. */
+ u_int8_t chksum[DB_MAC_KEY]; /* 28-47: Checksum */
+ u_int8_t iv[DB_IV_BYTES]; /* 48-63: IV */
+} QPAGE;
+
+#define QPAGE_SZ(dbp) \
+ (F_ISSET((dbp), DB_AM_ENCRYPT) ? QPAGE_SEC : \
+ F_ISSET((dbp), DB_AM_CHKSUM) ? QPAGE_CHKSUM : QPAGE_NORMAL)
+/*
+ * !!!
+ * The next_pgno and prev_pgno fields are not maintained for btree and recno
+ * internal pages. Doing so only provides a minor performance improvement,
+ * it's hard to do when deleting internal pages, and it increases the chance
+ * of deadlock during deletes and splits because we have to re-link pages at
+ * more than the leaf level.
+ *
+ * !!!
+ * The btree/recno access method needs db_recno_t bytes of space on the root
+ * page to specify how many records are stored in the tree. (The alternative
+ * is to store the number of records in the meta-data page, which will create
+ * a second hot spot in trees being actively modified, or recalculate it from
+ * the BINTERNAL fields on each access.) Overload the PREV_PGNO field.
+ */
+#define RE_NREC(p) \
+ ((TYPE(p) == P_IBTREE || TYPE(p) == P_IRECNO) ? PREV_PGNO(p) : \
+ (db_pgno_t)(TYPE(p) == P_LBTREE ? NUM_ENT(p) / 2 : NUM_ENT(p)))
+#define RE_NREC_ADJ(p, adj) \
+ PREV_PGNO(p) += adj;
+#define RE_NREC_SET(p, num) \
+ PREV_PGNO(p) = num;
+
+/*
+ * Initialize a page.
+ *
+ * !!!
+ * Don't modify the page's LSN, code depends on it being unchanged after a
+ * P_INIT call.
+ */
+#define P_INIT(pg, pg_size, n, pg_prev, pg_next, btl, pg_type) do { \
+ PGNO(pg) = n; \
+ PREV_PGNO(pg) = pg_prev; \
+ NEXT_PGNO(pg) = pg_next; \
+ NUM_ENT(pg) = 0; \
+ HOFFSET(pg) = pg_size; \
+ LEVEL(pg) = btl; \
+ TYPE(pg) = pg_type; \
+} while (0)
+
+/* Page header length (offset to first index). */
+#define P_OVERHEAD(dbp) P_TO_UINT16(P_INP(dbp, 0))
+
+/* First free byte. */
+#define LOFFSET(dbp, pg) \
+ (P_OVERHEAD(dbp) + NUM_ENT(pg) * sizeof(db_indx_t))
+
+/* Free space on a regular page. */
+#define P_FREESPACE(dbp, pg) (HOFFSET(pg) - LOFFSET(dbp, pg))
+
+/* Get a pointer to the bytes at a specific index. */
+#define P_ENTRY(dbp, pg, indx) ((u_int8_t *)pg + P_INP(dbp, pg)[indx])
+
+/************************************************************************
+ OVERFLOW PAGE LAYOUT
+ ************************************************************************/
+
+/*
+ * Overflow items are referenced by HOFFPAGE and BOVERFLOW structures, which
+ * store a page number (the first page of the overflow item) and a length
+ * (the total length of the overflow item). The overflow item consists of
+ * some number of overflow pages, linked by the next_pgno field of the page.
+ * A next_pgno field of PGNO_INVALID flags the end of the overflow item.
+ *
+ * Overflow page overloads:
+ * The amount of overflow data stored on each page is stored in the
+ * hf_offset field.
+ *
+ * The implementation reference counts overflow items as it's possible
+ * for them to be promoted onto btree internal pages. The reference
+ * count is stored in the entries field.
+ */
+#define OV_LEN(p) (((PAGE *)p)->hf_offset)
+#define OV_REF(p) (((PAGE *)p)->entries)
+
+/* Maximum number of bytes that you can put on an overflow page. */
+#define P_MAXSPACE(dbp, psize) ((psize) - P_OVERHEAD(dbp))
+
+/* Free space on an overflow page. */
+#define P_OVFLSPACE(dbp, psize, pg) (P_MAXSPACE(dbp, psize) - HOFFSET(pg))
+
+/************************************************************************
+ HASH PAGE LAYOUT
+ ************************************************************************/
+
+/* Each index references a group of bytes on the page. */
+#define H_KEYDATA 1 /* Key/data item. */
+#define H_DUPLICATE 2 /* Duplicate key/data item. */
+#define H_OFFPAGE 3 /* Overflow key/data item. */
+#define H_OFFDUP 4 /* Overflow page of duplicates. */
+
+/*
+ * !!!
+ * Items on hash pages are (potentially) unaligned, so we can never cast the
+ * (page + offset) pointer to an HKEYDATA, HOFFPAGE or HOFFDUP structure, as
+ * we do with B+tree on-page structures. Because we frequently want the type
+ * field, it requires no alignment, and it's in the same location in all three
+ * structures, there's a pair of macros.
+ */
+#define HPAGE_PTYPE(p) (*(u_int8_t *)p)
+#define HPAGE_TYPE(dbp, pg, indx) (*P_ENTRY(dbp, pg, indx))
+
+/*
+ * The first and second types are H_KEYDATA and H_DUPLICATE, represented
+ * by the HKEYDATA structure:
+ *
+ * +-----------------------------------+
+ * | type | key/data ... |
+ * +-----------------------------------+
+ *
+ * For duplicates, the data field encodes duplicate elements in the data
+ * field:
+ *
+ * +---------------------------------------------------------------+
+ * | type | len1 | element1 | len1 | len2 | element2 | len2 |
+ * +---------------------------------------------------------------+
+ *
+ * Thus, by keeping track of the offset in the element, we can do both
+ * backward and forward traversal.
+ */
+typedef struct _hkeydata {
+ u_int8_t type; /* 00: Page type. */
+ u_int8_t data[1]; /* Variable length key/data item. */
+} HKEYDATA;
+#define HKEYDATA_DATA(p) (((u_int8_t *)p) + SSZA(HKEYDATA, data))
+
+/*
+ * The length of any HKEYDATA item. Note that indx is an element index,
+ * not a PAIR index.
+ */
+#define LEN_HITEM(dbp, pg, pgsize, indx) \
+ (((indx) == 0 ? pgsize : \
+ (P_INP(dbp, pg)[indx - 1])) - (P_INP(dbp, pg)[indx]))
+
+#define LEN_HKEYDATA(dbp, pg, psize, indx) \
+ (db_indx_t)(LEN_HITEM(dbp, pg, psize, indx) - HKEYDATA_SIZE(0))
+
+/*
+ * Page space required to add a new HKEYDATA item to the page, with and
+ * without the index value.
+ */
+#define HKEYDATA_SIZE(len) \
+ ((len) + SSZA(HKEYDATA, data))
+#define HKEYDATA_PSIZE(len) \
+ (HKEYDATA_SIZE(len) + sizeof(db_indx_t))
+
+/* Put a HKEYDATA item at the location referenced by a page entry. */
+#define PUT_HKEYDATA(pe, kd, len, type) { \
+ ((HKEYDATA *)pe)->type = type; \
+ memcpy((u_int8_t *)pe + sizeof(u_int8_t), kd, len); \
+}
+
+/*
+ * Macros the describe the page layout in terms of key-data pairs.
+ */
+#define H_NUMPAIRS(pg) (NUM_ENT(pg) / 2)
+#define H_KEYINDEX(indx) (indx)
+#define H_DATAINDEX(indx) ((indx) + 1)
+#define H_PAIRKEY(dbp, pg, indx) P_ENTRY(dbp, pg, H_KEYINDEX(indx))
+#define H_PAIRDATA(dbp, pg, indx) P_ENTRY(dbp, pg, H_DATAINDEX(indx))
+#define H_PAIRSIZE(dbp, pg, psize, indx) \
+ (LEN_HITEM(dbp, pg, psize, H_KEYINDEX(indx)) + \
+ LEN_HITEM(dbp, pg, psize, H_DATAINDEX(indx)))
+#define LEN_HDATA(dbp, p, psize, indx) \
+ LEN_HKEYDATA(dbp, p, psize, H_DATAINDEX(indx))
+#define LEN_HKEY(dbp, p, psize, indx) \
+ LEN_HKEYDATA(dbp, p, psize, H_KEYINDEX(indx))
+
+/*
+ * The third type is the H_OFFPAGE, represented by the HOFFPAGE structure:
+ */
+typedef struct _hoffpage {
+ u_int8_t type; /* 00: Page type and delete flag. */
+ u_int8_t unused[3]; /* 01-03: Padding, unused. */
+ db_pgno_t pgno; /* 04-07: Offpage page number. */
+ u_int32_t tlen; /* 08-11: Total length of item. */
+} HOFFPAGE;
+
+#define HOFFPAGE_PGNO(p) (((u_int8_t *)p) + SSZ(HOFFPAGE, pgno))
+#define HOFFPAGE_TLEN(p) (((u_int8_t *)p) + SSZ(HOFFPAGE, tlen))
+
+/*
+ * Page space required to add a new HOFFPAGE item to the page, with and
+ * without the index value.
+ */
+#define HOFFPAGE_SIZE (sizeof(HOFFPAGE))
+#define HOFFPAGE_PSIZE (HOFFPAGE_SIZE + sizeof(db_indx_t))
+
+/*
+ * The fourth type is H_OFFDUP represented by the HOFFDUP structure:
+ */
+typedef struct _hoffdup {
+ u_int8_t type; /* 00: Page type and delete flag. */
+ u_int8_t unused[3]; /* 01-03: Padding, unused. */
+ db_pgno_t pgno; /* 04-07: Offpage page number. */
+} HOFFDUP;
+#define HOFFDUP_PGNO(p) (((u_int8_t *)p) + SSZ(HOFFDUP, pgno))
+
+/*
+ * Page space required to add a new HOFFDUP item to the page, with and
+ * without the index value.
+ */
+#define HOFFDUP_SIZE (sizeof(HOFFDUP))
+
+/************************************************************************
+ BTREE PAGE LAYOUT
+ ************************************************************************/
+
+/* Each index references a group of bytes on the page. */
+#define B_KEYDATA 1 /* Key/data item. */
+#define B_DUPLICATE 2 /* Duplicate key/data item. */
+#define B_OVERFLOW 3 /* Overflow key/data item. */
+
+/*
+ * We have to store a deleted entry flag in the page. The reason is complex,
+ * but the simple version is that we can't delete on-page items referenced by
+ * a cursor -- the return order of subsequent insertions might be wrong. The
+ * delete flag is an overload of the top bit of the type byte.
+ */
+#define B_DELETE (0x80)
+#define B_DCLR(t) (t) &= ~B_DELETE
+#define B_DSET(t) (t) |= B_DELETE
+#define B_DISSET(t) ((t) & B_DELETE)
+
+#define B_TYPE(t) ((t) & ~B_DELETE)
+#define B_TSET(t, type, deleted) { \
+ (t) = (type); \
+ if (deleted) \
+ B_DSET(t); \
+}
+
+/*
+ * The first type is B_KEYDATA, represented by the BKEYDATA structure:
+ */
+typedef struct _bkeydata {
+ db_indx_t len; /* 00-01: Key/data item length. */
+ u_int8_t type; /* 02: Page type AND DELETE FLAG. */
+ u_int8_t data[1]; /* Variable length key/data item. */
+} BKEYDATA;
+
+/* Get a BKEYDATA item for a specific index. */
+#define GET_BKEYDATA(dbp, pg, indx) \
+ ((BKEYDATA *)P_ENTRY(dbp, pg, indx))
+
+/*
+ * Page space required to add a new BKEYDATA item to the page, with and
+ * without the index value.
+ */
+#define BKEYDATA_SIZE(len) \
+ ALIGN((len) + SSZA(BKEYDATA, data), sizeof(u_int32_t))
+#define BKEYDATA_PSIZE(len) \
+ (BKEYDATA_SIZE(len) + sizeof(db_indx_t))
+
+/*
+ * The second and third types are B_DUPLICATE and B_OVERFLOW, represented
+ * by the BOVERFLOW structure.
+ */
+typedef struct _boverflow {
+ db_indx_t unused1; /* 00-01: Padding, unused. */
+ u_int8_t type; /* 02: Page type AND DELETE FLAG. */
+ u_int8_t unused2; /* 03: Padding, unused. */
+ db_pgno_t pgno; /* 04-07: Next page number. */
+ u_int32_t tlen; /* 08-11: Total length of item. */
+} BOVERFLOW;
+
+/* Get a BOVERFLOW item for a specific index. */
+#define GET_BOVERFLOW(dbp, pg, indx) \
+ ((BOVERFLOW *)P_ENTRY(dbp, pg, indx))
+
+/*
+ * Page space required to add a new BOVERFLOW item to the page, with and
+ * without the index value. The (u_int16_t) cast avoids warnings: ALIGN
+ * casts to db_align_t, the cast converts it to a small integral type so
+ * we don't get complaints when we assign the final result to an integral
+ * type smaller than db_align_t.
+ */
+#define BOVERFLOW_SIZE \
+ ((u_int16_t)ALIGN(sizeof(BOVERFLOW), sizeof(u_int32_t)))
+#define BOVERFLOW_PSIZE \
+ (BOVERFLOW_SIZE + sizeof(db_indx_t))
+
+/*
+ * Btree leaf and hash page layouts group indices in sets of two, one for the
+ * key and one for the data. Everything else does it in sets of one to save
+ * space. Use the following macros so that it's real obvious what's going on.
+ */
+#define O_INDX 1
+#define P_INDX 2
+
+/************************************************************************
+ BTREE INTERNAL PAGE LAYOUT
+ ************************************************************************/
+
+/*
+ * Btree internal entry.
+ */
+typedef struct _binternal {
+ db_indx_t len; /* 00-01: Key/data item length. */
+ u_int8_t type; /* 02: Page type AND DELETE FLAG. */
+ u_int8_t unused; /* 03: Padding, unused. */
+ db_pgno_t pgno; /* 04-07: Page number of referenced page. */
+ db_recno_t nrecs; /* 08-11: Subtree record count. */
+ u_int8_t data[1]; /* Variable length key item. */
+} BINTERNAL;
+
+/* Get a BINTERNAL item for a specific index. */
+#define GET_BINTERNAL(dbp, pg, indx) \
+ ((BINTERNAL *)P_ENTRY(dbp, pg, indx))
+
+/*
+ * Page space required to add a new BINTERNAL item to the page, with and
+ * without the index value.
+ */
+#define BINTERNAL_SIZE(len) \
+ ALIGN((len) + SSZA(BINTERNAL, data), sizeof(u_int32_t))
+#define BINTERNAL_PSIZE(len) \
+ (BINTERNAL_SIZE(len) + sizeof(db_indx_t))
+
+/************************************************************************
+ RECNO INTERNAL PAGE LAYOUT
+ ************************************************************************/
+
+/*
+ * The recno internal entry.
+ */
+typedef struct _rinternal {
+ db_pgno_t pgno; /* 00-03: Page number of referenced page. */
+ db_recno_t nrecs; /* 04-07: Subtree record count. */
+} RINTERNAL;
+
+/* Get a RINTERNAL item for a specific index. */
+#define GET_RINTERNAL(dbp, pg, indx) \
+ ((RINTERNAL *)P_ENTRY(dbp, pg, indx))
+
+/*
+ * Page space required to add a new RINTERNAL item to the page, with and
+ * without the index value.
+ */
+#define RINTERNAL_SIZE \
+ ALIGN(sizeof(RINTERNAL), sizeof(u_int32_t))
+#define RINTERNAL_PSIZE \
+ (RINTERNAL_SIZE + sizeof(db_indx_t))
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* !_DB_PAGE_H_ */
diff --git a/libdb/dbinc/db_server_int.h b/libdb/dbinc/db_server_int.h
new file mode 100644
index 0000000..022a60f
--- /dev/null
+++ b/libdb/dbinc/db_server_int.h
@@ -0,0 +1,148 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_SERVER_INT_H_
+#define _DB_SERVER_INT_H_
+
+#define DB_SERVER_TIMEOUT 300 /* 5 minutes */
+#define DB_SERVER_MAXTIMEOUT 1200 /* 20 minutes */
+#define DB_SERVER_IDLETIMEOUT 86400 /* 1 day */
+
+/*
+ * Ignore/mask off the following env->open flags:
+ * Most are illegal for a client to specify as they would control
+ * server resource usage. We will just ignore them.
+ * DB_LOCKDOWN
+ * DB_PRIVATE
+ * DB_RECOVER
+ * DB_RECOVER_FATAL
+ * DB_SYSTEM_MEM
+ * DB_USE_ENVIRON, DB_USE_ENVIRON_ROOT - handled on client
+ */
+#define DB_SERVER_FLAGMASK ( \
+DB_LOCKDOWN | DB_PRIVATE | DB_RECOVER | DB_RECOVER_FATAL | \
+DB_SYSTEM_MEM | DB_USE_ENVIRON | DB_USE_ENVIRON_ROOT)
+
+#define CT_CURSOR 0x001 /* Cursor */
+#define CT_DB 0x002 /* Database */
+#define CT_ENV 0x004 /* Env */
+#define CT_TXN 0x008 /* Txn */
+
+#define CT_JOIN 0x10000000 /* Join cursor component */
+#define CT_JOINCUR 0x20000000 /* Join cursor */
+
+typedef struct home_entry home_entry;
+struct home_entry {
+ LIST_ENTRY(home_entry) entries;
+ char *home;
+ char *dir;
+ char *name;
+ char *passwd;
+};
+
+/*
+ * Data needed for sharing handles.
+ * To share an env handle, on the open call, they must have matching
+ * env flags, and matching set_flags.
+ *
+ * To share a db handle on the open call, the db, subdb and flags must
+ * all be the same.
+ */
+#define DB_SERVER_ENVFLAGS ( \
+DB_INIT_CDB | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | \
+DB_INIT_TXN | DB_JOINENV)
+
+#define DB_SERVER_DBFLAGS (DB_DIRTY_READ | DB_NOMMAP | DB_RDONLY)
+#define DB_SERVER_DBNOSHARE (DB_EXCL | DB_TRUNCATE)
+
+typedef struct ct_envdata ct_envdata;
+typedef struct ct_dbdata ct_dbdata;
+struct ct_envdata {
+ u_int32_t envflags;
+ u_int32_t onflags;
+ u_int32_t offflags;
+ home_entry *home;
+};
+
+struct ct_dbdata {
+ u_int32_t dbflags;
+ u_int32_t setflags;
+ char *db;
+ char *subdb;
+ DBTYPE type;
+};
+
+/*
+ * We maintain an activity timestamp for each handle. However, we
+ * set it to point, possibly to the ct_active field of its own handle
+ * or it may point to the ct_active field of a parent. In the case
+ * of nested transactions and any cursors within transactions it must
+ * point to the ct_active field of the ultimate parent of the transaction
+ * no matter how deeply it is nested.
+ */
+typedef struct ct_entry ct_entry;
+struct ct_entry {
+ LIST_ENTRY(ct_entry) entries; /* List of entries */
+ union {
+#ifdef __cplusplus
+ DbEnv *envp; /* H_ENV */
+ DbTxn *txnp; /* H_TXN */
+ Db *dbp; /* H_DB */
+ Dbc *dbc; /* H_CURSOR */
+#else
+ DB_ENV *envp; /* H_ENV */
+ DB_TXN *txnp; /* H_TXN */
+ DB *dbp; /* H_DB */
+ DBC *dbc; /* H_CURSOR */
+#endif
+ void *anyp;
+ } handle_u;
+ union { /* Private data per type */
+ ct_envdata envdp; /* Env info */
+ ct_dbdata dbdp; /* Db info */
+ } private_u;
+ long ct_id; /* Client ID */
+ long *ct_activep; /* Activity timestamp pointer*/
+ long *ct_origp; /* Original timestamp pointer*/
+ long ct_active; /* Activity timestamp */
+ long ct_timeout; /* Resource timeout */
+ long ct_idle; /* Idle timeout */
+ u_int32_t ct_refcount; /* Ref count for sharing */
+ u_int32_t ct_type; /* This entry's type */
+ struct ct_entry *ct_parent; /* Its parent */
+ struct ct_entry *ct_envparent; /* Its environment */
+};
+
+#define ct_envp handle_u.envp
+#define ct_txnp handle_u.txnp
+#define ct_dbp handle_u.dbp
+#define ct_dbc handle_u.dbc
+#define ct_anyp handle_u.anyp
+
+#define ct_envdp private_u.envdp
+#define ct_dbdp private_u.dbdp
+
+extern int __dbsrv_verbose;
+
+/*
+ * Get ctp and activate it.
+ * Assumes local variable 'replyp'.
+ * NOTE: May 'return' from macro.
+ */
+#define ACTIVATE_CTP(ctp, id, type) { \
+ (ctp) = get_tableent(id); \
+ if ((ctp) == NULL) { \
+ replyp->status = DB_NOSERVER_ID;\
+ return; \
+ } \
+ DB_ASSERT((ctp)->ct_type & (type)); \
+ __dbsrv_active(ctp); \
+}
+
+#endif /* !_DB_SERVER_INT_H_ */
diff --git a/libdb/dbinc/db_shash.h b/libdb/dbinc/db_shash.h
new file mode 100644
index 0000000..de68908
--- /dev/null
+++ b/libdb/dbinc/db_shash.h
@@ -0,0 +1,81 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_SHASH_H_
+#define _DB_SHASH_H_
+
+/* Hash Headers */
+typedef SH_TAILQ_HEAD(__hash_head) DB_HASHTAB;
+
+/*
+ * HASHLOOKUP --
+ *
+ * Look up something in a shared memory hash table. The "elt" argument
+ * should be a key, and cmp_func must know how to compare a key to whatever
+ * structure it is that appears in the hash table. The comparison function
+ *
+ * begin: address of the beginning of the hash table.
+ * ndx: index into table for this item.
+ * type: the structure type of the elements that are linked in each bucket.
+ * field: the name of the field by which the "type" structures are linked.
+ * elt: the item for which we are searching in the hash table.
+ * res: the variable into which we'll store the element if we find it.
+ * cmp: called as: cmp(lookup_elt, table_elt).
+ *
+ * If the element is not in the hash table, this macro exits with res set
+ * to NULL.
+ */
+#define HASHLOOKUP(begin, ndx, type, field, elt, res, cmp) do { \
+ DB_HASHTAB *__bucket; \
+ \
+ __bucket = &begin[ndx]; \
+ for (res = SH_TAILQ_FIRST(__bucket, type); \
+ res != NULL; res = SH_TAILQ_NEXT(res, field, type)) \
+ if (cmp(elt, res)) \
+ break; \
+} while (0)
+
+/*
+ * HASHINSERT --
+ *
+ * Insert a new entry into the hash table. This assumes that you already
+ * have the bucket locked and that lookup has failed; don't call it if you
+ * haven't already called HASHLOOKUP. If you do, you could get duplicate
+ * entries.
+ *
+ * begin: the beginning address of the hash table.
+ * ndx: the index for this element.
+ * type: the structure type of the elements that are linked in each bucket.
+ * field: the name of the field by which the "type" structures are linked.
+ * elt: the item to be inserted.
+ */
+#define HASHINSERT(begin, ndx, type, field, elt) do { \
+ DB_HASHTAB *__bucket; \
+ \
+ __bucket = &begin[ndx]; \
+ SH_TAILQ_INSERT_HEAD(__bucket, elt, field, type); \
+} while (0)
+
+/*
+ * HASHREMOVE_EL --
+ * Given the object "obj" in the table, remove it.
+ *
+ * begin: address of the beginning of the hash table.
+ * ndx: index into hash table of where this element belongs.
+ * type: the structure type of the elements that are linked in each bucket.
+ * field: the name of the field by which the "type" structures are linked.
+ * obj: the object in the table that we with to delete.
+ */
+#define HASHREMOVE_EL(begin, ndx, type, field, obj) { \
+ DB_HASHTAB *__bucket; \
+ \
+ __bucket = &begin[ndx]; \
+ SH_TAILQ_REMOVE(__bucket, obj, field, type); \
+}
+#endif /* !_DB_SHASH_H_ */
diff --git a/libdb/dbinc/db_swap.h b/libdb/dbinc/db_swap.h
new file mode 100644
index 0000000..e7db66a
--- /dev/null
+++ b/libdb/dbinc/db_swap.h
@@ -0,0 +1,116 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_SWAP_H_
+#define _DB_SWAP_H_
+
+/*
+ * Little endian <==> big endian 32-bit swap macros.
+ * M_32_SWAP swap a memory location
+ * P_32_COPY copy potentially unaligned 4 byte quantities
+ * P_32_SWAP swap a referenced memory location
+ */
+#define M_32_SWAP(a) { \
+ u_int32_t _tmp; \
+ _tmp = a; \
+ ((u_int8_t *)&a)[0] = ((u_int8_t *)&_tmp)[3]; \
+ ((u_int8_t *)&a)[1] = ((u_int8_t *)&_tmp)[2]; \
+ ((u_int8_t *)&a)[2] = ((u_int8_t *)&_tmp)[1]; \
+ ((u_int8_t *)&a)[3] = ((u_int8_t *)&_tmp)[0]; \
+}
+#define P_32_COPY(a, b) { \
+ ((u_int8_t *)b)[0] = ((u_int8_t *)a)[0]; \
+ ((u_int8_t *)b)[1] = ((u_int8_t *)a)[1]; \
+ ((u_int8_t *)b)[2] = ((u_int8_t *)a)[2]; \
+ ((u_int8_t *)b)[3] = ((u_int8_t *)a)[3]; \
+}
+#define P_32_SWAP(a) { \
+ u_int32_t _tmp; \
+ P_32_COPY(a, &_tmp); \
+ ((u_int8_t *)a)[0] = ((u_int8_t *)&_tmp)[3]; \
+ ((u_int8_t *)a)[1] = ((u_int8_t *)&_tmp)[2]; \
+ ((u_int8_t *)a)[2] = ((u_int8_t *)&_tmp)[1]; \
+ ((u_int8_t *)a)[3] = ((u_int8_t *)&_tmp)[0]; \
+}
+
+/*
+ * Little endian <==> big endian 16-bit swap macros.
+ * M_16_SWAP swap a memory location
+ * P_16_COPY copy potentially unaligned 2 byte quantities
+ * P_16_SWAP swap a referenced memory location
+ */
+#define M_16_SWAP(a) { \
+ u_int16_t _tmp; \
+ _tmp = (u_int16_t)a; \
+ ((u_int8_t *)&a)[0] = ((u_int8_t *)&_tmp)[1]; \
+ ((u_int8_t *)&a)[1] = ((u_int8_t *)&_tmp)[0]; \
+}
+#define P_16_COPY(a, b) { \
+ ((u_int8_t *)b)[0] = ((u_int8_t *)a)[0]; \
+ ((u_int8_t *)b)[1] = ((u_int8_t *)a)[1]; \
+}
+#define P_16_SWAP(a) { \
+ u_int16_t _tmp; \
+ P_16_COPY(a, &_tmp); \
+ ((u_int8_t *)a)[0] = ((u_int8_t *)&_tmp)[1]; \
+ ((u_int8_t *)a)[1] = ((u_int8_t *)&_tmp)[0]; \
+}
+
+#define SWAP32(p) { \
+ P_32_SWAP(p); \
+ (p) += sizeof(u_int32_t); \
+}
+#define SWAP16(p) { \
+ P_16_SWAP(p); \
+ (p) += sizeof(u_int16_t); \
+}
+
+/*
+ * Berkeley DB has local versions of htonl() and ntohl() that operate on
+ * pointers to the right size memory locations; the portability magic for
+ * finding the real system functions isn't worth the effort.
+ */
+#define DB_HTONL(p) do { \
+ if (!__db_isbigendian()) \
+ P_32_SWAP(p); \
+} while (0)
+#define DB_NTOHL(p) do { \
+ if (!__db_isbigendian()) \
+ P_32_SWAP(p); \
+} while (0)
+
+#endif /* !_DB_SWAP_H_ */
diff --git a/libdb/dbinc/db_upgrade.h b/libdb/dbinc/db_upgrade.h
new file mode 100644
index 0000000..f6b305f
--- /dev/null
+++ b/libdb/dbinc/db_upgrade.h
@@ -0,0 +1,242 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_UPGRADE_H_
+#define _DB_UPGRADE_H_
+
+/*
+ * This file defines the metadata pages from the previous release.
+ * These structures are only used to upgrade old versions of databases.
+ */
+
+/* Structures from the 3.1 release */
+typedef struct _dbmeta31 {
+ DB_LSN lsn; /* 00-07: LSN. */
+ db_pgno_t pgno; /* 08-11: Current page number. */
+ u_int32_t magic; /* 12-15: Magic number. */
+ u_int32_t version; /* 16-19: Version. */
+ u_int32_t pagesize; /* 20-23: Pagesize. */
+ u_int8_t unused1[1]; /* 24: Unused. */
+ u_int8_t type; /* 25: Page type. */
+ u_int8_t unused2[2]; /* 26-27: Unused. */
+ u_int32_t free; /* 28-31: Free list page number. */
+ DB_LSN unused3; /* 36-39: Unused. */
+ u_int32_t key_count; /* 40-43: Cached key count. */
+ u_int32_t record_count; /* 44-47: Cached record count. */
+ u_int32_t flags; /* 48-51: Flags: unique to each AM. */
+ /* 52-71: Unique file ID. */
+ u_int8_t uid[DB_FILE_ID_LEN];
+} DBMETA31;
+
+typedef struct _btmeta31 {
+ DBMETA31 dbmeta; /* 00-71: Generic meta-data header. */
+
+ u_int32_t maxkey; /* 72-75: Btree: Maxkey. */
+ u_int32_t minkey; /* 76-79: Btree: Minkey. */
+ u_int32_t re_len; /* 80-83: Recno: fixed-length record length. */
+ u_int32_t re_pad; /* 84-87: Recno: fixed-length record pad. */
+ u_int32_t root; /* 88-92: Root page. */
+
+ /*
+ * Minimum page size is 128.
+ */
+} BTMETA31;
+
+/************************************************************************
+ HASH METADATA PAGE LAYOUT
+ ************************************************************************/
+typedef struct _hashmeta31 {
+ DBMETA31 dbmeta; /* 00-71: Generic meta-data page header. */
+
+ u_int32_t max_bucket; /* 72-75: ID of Maximum bucket in use */
+ u_int32_t high_mask; /* 76-79: Modulo mask into table */
+ u_int32_t low_mask; /* 80-83: Modulo mask into table lower half */
+ u_int32_t ffactor; /* 84-87: Fill factor */
+ u_int32_t nelem; /* 88-91: Number of keys in hash table */
+ u_int32_t h_charkey; /* 92-95: Value of hash(CHARKEY) */
+#define NCACHED 32 /* number of spare points */
+ /* 96-223: Spare pages for overflow */
+ u_int32_t spares[NCACHED];
+
+ /*
+ * Minimum page size is 256.
+ */
+} HMETA31;
+
+/*
+ * QAM Meta data page structure
+ *
+ */
+typedef struct _qmeta31 {
+ DBMETA31 dbmeta; /* 00-71: Generic meta-data header. */
+
+ u_int32_t start; /* 72-75: Start offset. */
+ u_int32_t first_recno; /* 76-79: First not deleted record. */
+ u_int32_t cur_recno; /* 80-83: Last recno allocated. */
+ u_int32_t re_len; /* 84-87: Fixed-length record length. */
+ u_int32_t re_pad; /* 88-91: Fixed-length record pad. */
+ u_int32_t rec_page; /* 92-95: Records Per Page. */
+
+ /*
+ * Minimum page size is 128.
+ */
+} QMETA31;
+/* Structures from the 3.2 release */
+typedef struct _qmeta32 {
+ DBMETA31 dbmeta; /* 00-71: Generic meta-data header. */
+
+ u_int32_t first_recno; /* 72-75: First not deleted record. */
+ u_int32_t cur_recno; /* 76-79: Last recno allocated. */
+ u_int32_t re_len; /* 80-83: Fixed-length record length. */
+ u_int32_t re_pad; /* 84-87: Fixed-length record pad. */
+ u_int32_t rec_page; /* 88-91: Records Per Page. */
+ u_int32_t page_ext; /* 92-95: Pages per extent */
+
+ /*
+ * Minimum page size is 128.
+ */
+} QMETA32;
+
+/* Structures from the 3.0 release */
+
+typedef struct _dbmeta30 {
+ DB_LSN lsn; /* 00-07: LSN. */
+ db_pgno_t pgno; /* 08-11: Current page number. */
+ u_int32_t magic; /* 12-15: Magic number. */
+ u_int32_t version; /* 16-19: Version. */
+ u_int32_t pagesize; /* 20-23: Pagesize. */
+ u_int8_t unused1[1]; /* 24: Unused. */
+ u_int8_t type; /* 25: Page type. */
+ u_int8_t unused2[2]; /* 26-27: Unused. */
+ u_int32_t free; /* 28-31: Free list page number. */
+ u_int32_t flags; /* 32-35: Flags: unique to each AM. */
+ /* 36-55: Unique file ID. */
+ u_int8_t uid[DB_FILE_ID_LEN];
+} DBMETA30;
+
+/************************************************************************
+ BTREE METADATA PAGE LAYOUT
+ ************************************************************************/
+typedef struct _btmeta30 {
+ DBMETA30 dbmeta; /* 00-55: Generic meta-data header. */
+
+ u_int32_t maxkey; /* 56-59: Btree: Maxkey. */
+ u_int32_t minkey; /* 60-63: Btree: Minkey. */
+ u_int32_t re_len; /* 64-67: Recno: fixed-length record length. */
+ u_int32_t re_pad; /* 68-71: Recno: fixed-length record pad. */
+ u_int32_t root; /* 72-75: Root page. */
+
+ /*
+ * Minimum page size is 128.
+ */
+} BTMETA30;
+
+/************************************************************************
+ HASH METADATA PAGE LAYOUT
+ ************************************************************************/
+typedef struct _hashmeta30 {
+ DBMETA30 dbmeta; /* 00-55: Generic meta-data page header. */
+
+ u_int32_t max_bucket; /* 56-59: ID of Maximum bucket in use */
+ u_int32_t high_mask; /* 60-63: Modulo mask into table */
+ u_int32_t low_mask; /* 64-67: Modulo mask into table lower half */
+ u_int32_t ffactor; /* 68-71: Fill factor */
+ u_int32_t nelem; /* 72-75: Number of keys in hash table */
+ u_int32_t h_charkey; /* 76-79: Value of hash(CHARKEY) */
+#define NCACHED30 32 /* number of spare points */
+ /* 80-207: Spare pages for overflow */
+ u_int32_t spares[NCACHED30];
+
+ /*
+ * Minimum page size is 256.
+ */
+} HMETA30;
+
+/************************************************************************
+ QUEUE METADATA PAGE LAYOUT
+ ************************************************************************/
+/*
+ * QAM Meta data page structure
+ *
+ */
+typedef struct _qmeta30 {
+ DBMETA30 dbmeta; /* 00-55: Generic meta-data header. */
+
+ u_int32_t start; /* 56-59: Start offset. */
+ u_int32_t first_recno; /* 60-63: First not deleted record. */
+ u_int32_t cur_recno; /* 64-67: Last recno allocated. */
+ u_int32_t re_len; /* 68-71: Fixed-length record length. */
+ u_int32_t re_pad; /* 72-75: Fixed-length record pad. */
+ u_int32_t rec_page; /* 76-79: Records Per Page. */
+
+ /*
+ * Minimum page size is 128.
+ */
+} QMETA30;
+
+/* Structures from Release 2.x */
+
+/************************************************************************
+ BTREE METADATA PAGE LAYOUT
+ ************************************************************************/
+
+/*
+ * Btree metadata page layout:
+ */
+typedef struct _btmeta2X {
+ DB_LSN lsn; /* 00-07: LSN. */
+ db_pgno_t pgno; /* 08-11: Current page number. */
+ u_int32_t magic; /* 12-15: Magic number. */
+ u_int32_t version; /* 16-19: Version. */
+ u_int32_t pagesize; /* 20-23: Pagesize. */
+ u_int32_t maxkey; /* 24-27: Btree: Maxkey. */
+ u_int32_t minkey; /* 28-31: Btree: Minkey. */
+ u_int32_t free; /* 32-35: Free list page number. */
+ u_int32_t flags; /* 36-39: Flags. */
+ u_int32_t re_len; /* 40-43: Recno: fixed-length record length. */
+ u_int32_t re_pad; /* 44-47: Recno: fixed-length record pad. */
+ /* 48-67: Unique file ID. */
+ u_int8_t uid[DB_FILE_ID_LEN];
+} BTMETA2X;
+
+/************************************************************************
+ HASH METADATA PAGE LAYOUT
+ ************************************************************************/
+
+/*
+ * Hash metadata page layout:
+ */
+/* Hash Table Information */
+typedef struct hashhdr { /* Disk resident portion */
+ DB_LSN lsn; /* 00-07: LSN of the header page */
+ db_pgno_t pgno; /* 08-11: Page number (btree compatibility). */
+ u_int32_t magic; /* 12-15: Magic NO for hash tables */
+ u_int32_t version; /* 16-19: Version ID */
+ u_int32_t pagesize; /* 20-23: Bucket/Page Size */
+ u_int32_t ovfl_point; /* 24-27: Overflow page allocation location */
+ u_int32_t last_freed; /* 28-31: Last freed overflow page pgno */
+ u_int32_t max_bucket; /* 32-35: ID of Maximum bucket in use */
+ u_int32_t high_mask; /* 36-39: Modulo mask into table */
+ u_int32_t low_mask; /* 40-43: Modulo mask into table lower half */
+ u_int32_t ffactor; /* 44-47: Fill factor */
+ u_int32_t nelem; /* 48-51: Number of keys in hash table */
+ u_int32_t h_charkey; /* 52-55: Value of hash(CHARKEY) */
+ u_int32_t flags; /* 56-59: Allow duplicates. */
+#define NCACHED2X 32 /* number of spare points */
+ /* 60-187: Spare pages for overflow */
+ u_int32_t spares[NCACHED2X];
+ /* 188-207: Unique file ID. */
+ u_int8_t uid[DB_FILE_ID_LEN];
+
+ /*
+ * Minimum page size is 256.
+ */
+} HASHHDR;
+
+#endif /* !_DB_UPGRADE_H_ */
diff --git a/libdb/dbinc/db_verify.h b/libdb/dbinc/db_verify.h
new file mode 100644
index 0000000..55858ee
--- /dev/null
+++ b/libdb/dbinc/db_verify.h
@@ -0,0 +1,205 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_VERIFY_H_
+#define _DB_VERIFY_H_
+
+/*
+ * Structures and macros for the storage and retrieval of all information
+ * needed for inter-page verification of a database.
+ */
+
+/*
+ * EPRINT is the macro for error printing. Takes as an arg the arg set
+ * for DB->err.
+ */
+#define EPRINT(x) \
+ do { \
+ if (!LF_ISSET(DB_SALVAGE)) \
+ __db_err x; \
+ } while (0)
+
+/* For fatal type errors--i.e., verifier bugs. */
+#define TYPE_ERR_PRINT(dbenv, func, pgno, ptype) \
+ EPRINT(((dbenv), "Page %lu: %s called on nonsensical page of type %lu", \
+ (u_long)(pgno), (func), (u_long)(ptype)));
+
+/* Complain about a totally zeroed page where we don't expect one. */
+#define ZEROPG_ERR_PRINT(dbenv, pgno, str) \
+ do { \
+ EPRINT(((dbenv), "Page %lu: %s is of inappropriate type %lu", \
+ (u_long)(pgno), str, (u_long)P_INVALID)); \
+ EPRINT(((dbenv), "Page %lu: totally zeroed page", \
+ (u_long)(pgno))); \
+ } while (0)
+
+/*
+ * Note that 0 is, in general, a valid pgno, despite equalling PGNO_INVALID;
+ * we have to test it separately where it's not appropriate.
+ */
+#define IS_VALID_PGNO(x) ((x) <= vdp->last_pgno)
+
+/*
+ * Flags understood by the btree structure checks (esp. __bam_vrfy_subtree).
+ * These share the same space as the global flags to __db_verify, and must not
+ * dip below 0x00010000.
+ */
+#define ST_DUPOK 0x00010000 /* Duplicates are acceptable. */
+#define ST_DUPSET 0x00020000 /* Subtree is in a duplicate tree. */
+#define ST_DUPSORT 0x00040000 /* Duplicates are sorted. */
+#define ST_IS_RECNO 0x00080000 /* Subtree is a recno. */
+#define ST_OVFL_LEAF 0x00100000 /* Overflow reffed from leaf page. */
+#define ST_RECNUM 0x00200000 /* Subtree has record numbering on. */
+#define ST_RELEN 0x00400000 /* Subtree has fixed-length records. */
+#define ST_TOPLEVEL 0x00800000 /* Subtree == entire tree */
+
+/*
+ * Flags understood by __bam_salvage and __db_salvage. These need not share
+ * the same space with the __bam_vrfy_subtree flags, but must share with
+ * __db_verify.
+ */
+#define SA_SKIPFIRSTKEY 0x00080000
+
+/*
+ * VRFY_DBINFO is the fundamental structure; it either represents the database
+ * of subdatabases, or the sole database if there are no subdatabases.
+ */
+struct __vrfy_dbinfo {
+ /* Info about this database in particular. */
+ DBTYPE type;
+
+ /* List of subdatabase meta pages, if any. */
+ LIST_HEAD(__subdbs, __vrfy_childinfo) subdbs;
+
+ /* File-global info--stores VRFY_PAGEINFOs for each page. */
+ DB *pgdbp;
+
+ /* Child database--stores VRFY_CHILDINFOs of each page. */
+ DB *cdbp;
+
+ /* Page info structures currently in use. */
+ LIST_HEAD(__activepips, __vrfy_pageinfo) activepips;
+
+ /*
+ * DB we use to keep track of which pages are linked somehow
+ * during verification. 0 is the default, "unseen"; 1 is seen.
+ */
+ DB *pgset;
+
+ /*
+ * This is a database we use during salvaging to keep track of which
+ * overflow and dup pages we need to come back to at the end and print
+ * with key "UNKNOWN". Pages which print with a good key get set
+ * to SALVAGE_IGNORE; others get set, as appropriate, to SALVAGE_LDUP,
+ * SALVAGE_LRECNODUP, SALVAGE_OVERFLOW for normal db overflow pages,
+ * and SALVAGE_BTREE, SALVAGE_LRECNO, and SALVAGE_HASH for subdb
+ * pages.
+ */
+#define SALVAGE_INVALID 0
+#define SALVAGE_IGNORE 1
+#define SALVAGE_LDUP 2
+#define SALVAGE_LRECNODUP 3
+#define SALVAGE_OVERFLOW 4
+#define SALVAGE_LBTREE 5
+#define SALVAGE_HASH 6
+#define SALVAGE_LRECNO 7
+ DB *salvage_pages;
+
+ db_pgno_t last_pgno;
+ db_pgno_t pgs_remaining; /* For dbp->db_feedback(). */
+
+ /*
+ * These are used during __bam_vrfy_subtree to keep track, while
+ * walking up and down the Btree structure, of the prev- and next-page
+ * chain of leaf pages and verify that it's intact. Also, make sure
+ * that this chain contains pages of only one type.
+ */
+ db_pgno_t prev_pgno;
+ db_pgno_t next_pgno;
+ u_int8_t leaf_type;
+
+ /* Queue needs these to verify data pages in the first pass. */
+ u_int32_t re_len;
+ u_int32_t rec_page;
+
+#define SALVAGE_PRINTABLE 0x01 /* Output printable chars literally. */
+#define SALVAGE_PRINTHEADER 0x02 /* Print the unknown-key header. */
+#define SALVAGE_PRINTFOOTER 0x04 /* Print the unknown-key footer. */
+ u_int32_t flags;
+}; /* VRFY_DBINFO */
+
+/*
+ * The amount of state information we need per-page is small enough that
+ * it's not worth the trouble to define separate structures for each
+ * possible type of page, and since we're doing verification with these we
+ * have to be open to the possibility that page N will be of a completely
+ * unexpected type anyway. So we define one structure here with all the
+ * info we need for inter-page verification.
+ */
+struct __vrfy_pageinfo {
+ u_int8_t type;
+ u_int8_t bt_level;
+ u_int8_t unused1;
+ u_int8_t unused2;
+ db_pgno_t pgno;
+ db_pgno_t prev_pgno;
+ db_pgno_t next_pgno;
+
+ /* meta pages */
+ db_pgno_t root;
+ db_pgno_t free; /* Free list head. */
+
+ db_indx_t entries; /* Actual number of entries. */
+ u_int16_t unused;
+ db_recno_t rec_cnt; /* Record count. */
+ u_int32_t re_len; /* Record length. */
+ u_int32_t bt_minkey;
+ u_int32_t bt_maxkey;
+ u_int32_t h_ffactor;
+ u_int32_t h_nelem;
+
+ /* overflow pages */
+ /*
+ * Note that refcount is the refcount for an overflow page; pi_refcount
+ * is this structure's own refcount!
+ */
+ u_int32_t refcount;
+ u_int32_t olen;
+
+#define VRFY_DUPS_UNSORTED 0x0001 /* Have to flag the negative! */
+#define VRFY_HAS_DUPS 0x0002
+#define VRFY_HAS_DUPSORT 0x0004 /* Has the flag set. */
+#define VRFY_HAS_SUBDBS 0x0008
+#define VRFY_HAS_RECNUMS 0x0010
+#define VRFY_INCOMPLETE 0x0020 /* Meta or item order checks incomp. */
+#define VRFY_IS_ALLZEROES 0x0040 /* Hash page we haven't touched? */
+#define VRFY_IS_FIXEDLEN 0x0080
+#define VRFY_IS_RECNO 0x0100
+#define VRFY_IS_RRECNO 0x0200
+#define VRFY_OVFL_LEAFSEEN 0x0400
+ u_int32_t flags;
+
+ LIST_ENTRY(__vrfy_pageinfo) links;
+ u_int32_t pi_refcount;
+}; /* VRFY_PAGEINFO */
+
+struct __vrfy_childinfo {
+ db_pgno_t pgno;
+
+#define V_DUPLICATE 1 /* off-page dup metadata */
+#define V_OVERFLOW 2 /* overflow page */
+#define V_RECNO 3 /* btree internal or leaf page */
+ u_int32_t type;
+ db_recno_t nrecs; /* record count on a btree subtree */
+ u_int32_t tlen; /* ovfl. item total size */
+
+ LIST_ENTRY(__vrfy_childinfo) links;
+}; /* VRFY_CHILDINFO */
+
+#endif /* !_DB_VERIFY_H_ */
diff --git a/libdb/dbinc/debug.h b/libdb/dbinc/debug.h
new file mode 100644
index 0000000..55a5c5d
--- /dev/null
+++ b/libdb/dbinc/debug.h
@@ -0,0 +1,198 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_DEBUG_H_
+#define _DB_DEBUG_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * When running with #DIAGNOSTIC defined, we smash memory and do memory
+ * guarding with a special byte value.
+ */
+#define CLEAR_BYTE 0xdb
+#define GUARD_BYTE 0xdc
+
+/*
+ * DB assertions.
+ */
+#if defined(DIAGNOSTIC) && defined(__STDC__)
+#define DB_ASSERT(e) ((e) ? (void)0 : __db_assert(#e, __FILE__, __LINE__))
+#else
+#define DB_ASSERT(e)
+#endif
+
+/*
+ * Purify and other run-time tools complain about uninitialized reads/writes
+ * of structure fields whose only purpose is padding, as well as when heap
+ * memory that was never initialized is written to disk.
+ */
+#ifdef UMRW
+#define UMRW_SET(v) (v) = 0
+#else
+#define UMRW_SET(v)
+#endif
+
+/*
+ * Error message handling. Use a macro instead of a function because va_list
+ * references to variadic arguments cannot be reset to the beginning of the
+ * variadic argument list (and then rescanned), by functions other than the
+ * original routine that took the variadic list of arguments.
+ */
+#if defined(__STDC__) || defined(__cplusplus)
+#define DB_REAL_ERR(env, error, error_set, stderr_default, fmt) { \
+ va_list ap; \
+ \
+ /* Call the user's callback function, if specified. */ \
+ va_start(ap, fmt); \
+ if ((env) != NULL && (env)->db_errcall != NULL) \
+ __db_errcall(env, error, error_set, fmt, ap); \
+ va_end(ap); \
+ \
+ /* Write to the user's file descriptor, if specified. */ \
+ va_start(ap, fmt); \
+ if ((env) != NULL && (env)->db_errfile != NULL) \
+ __db_errfile(env, error, error_set, fmt, ap); \
+ va_end(ap); \
+ \
+ /* \
+ * If we have a default and we didn't do either of the above, \
+ * write to the default. \
+ */ \
+ va_start(ap, fmt); \
+ if ((stderr_default) && ((env) == NULL || \
+ ((env)->db_errcall == NULL && (env)->db_errfile == NULL))) \
+ __db_errfile(env, error, error_set, fmt, ap); \
+ va_end(ap); \
+}
+#else
+#define DB_REAL_ERR(env, error, error_set, stderr_default, fmt) { \
+ va_list ap; \
+ \
+ /* Call the user's callback function, if specified. */ \
+ va_start(ap); \
+ if ((env) != NULL && (env)->db_errcall != NULL) \
+ __db_errcall(env, error, error_set, fmt, ap); \
+ va_end(ap); \
+ \
+ /* Write to the user's file descriptor, if specified. */ \
+ va_start(ap); \
+ if ((env) != NULL && (env)->db_errfile != NULL) \
+ __db_errfile(env, error, error_set, fmt, ap); \
+ va_end(ap); \
+ \
+ /* \
+ * If we have a default and we didn't do either of the above, \
+ * write to the default. \
+ */ \
+ va_start(ap); \
+ if ((stderr_default) && ((env) == NULL || \
+ ((env)->db_errcall == NULL && (env)->db_errfile == NULL))) \
+ __db_errfile(env, error, error_set, fmt, ap); \
+ va_end(ap); \
+}
+#endif
+
+/*
+ * Debugging macro to log operations.
+ * If DEBUG_WOP is defined, log operations that modify the database.
+ * If DEBUG_ROP is defined, log operations that read the database.
+ *
+ * D dbp
+ * T txn
+ * O operation (string)
+ * K key
+ * A data
+ * F flags
+ */
+#define LOG_OP(C, T, O, K, A, F) { \
+ DB_LSN __lsn; \
+ DBT __op; \
+ if (DBC_LOGGING((C))) { \
+ memset(&__op, 0, sizeof(__op)); \
+ __op.data = O; \
+ __op.size = strlen(O) + 1; \
+ (void)__db_debug_log((C)->dbp->dbenv, T, &__lsn, 0, \
+ &__op, (C)->dbp->log_filename->id, K, A, F); \
+ } \
+}
+#ifdef DEBUG_ROP
+#define DEBUG_LREAD(C, T, O, K, A, F) LOG_OP(C, T, O, K, A, F)
+#else
+#define DEBUG_LREAD(C, T, O, K, A, F)
+#endif
+#ifdef DEBUG_WOP
+#define DEBUG_LWRITE(C, T, O, K, A, F) LOG_OP(C, T, O, K, A, F)
+#else
+#define DEBUG_LWRITE(C, T, O, K, A, F)
+#endif
+
+/*
+ * Hook for testing recovery at various places in the create/delete paths.
+ * Hook for testing subdb locks.
+ */
+#if CONFIG_TEST
+#define DB_TEST_SUBLOCKS(env, flags) \
+do { \
+ if ((env)->test_abort == DB_TEST_SUBDB_LOCKS) \
+ (flags) |= DB_LOCK_NOWAIT; \
+} while (0)
+
+#define DB_ENV_TEST_RECOVERY(env, val, ret, name) \
+do { \
+ int __ret; \
+ PANIC_CHECK((env)); \
+ if ((env)->test_copy == (val)) { \
+ /* COPY the FILE */ \
+ if ((__ret = __db_testcopy((env), NULL, (name))) != 0) \
+ (ret) = __db_panic((env), __ret); \
+ } \
+ if ((env)->test_abort == (val)) { \
+ /* ABORT the TXN */ \
+ (env)->test_abort = 0; \
+ (ret) = EINVAL; \
+ goto db_tr_err; \
+ } \
+} while (0)
+
+#define DB_TEST_RECOVERY(dbp, val, ret, name) \
+do { \
+ int __ret; \
+ PANIC_CHECK((dbp)->dbenv); \
+ if ((dbp)->dbenv->test_copy == (val)) { \
+ /* Copy the file. */ \
+ if (F_ISSET((dbp), \
+ DB_AM_OPEN_CALLED) && (dbp)->mpf != NULL) \
+ (void)(dbp)->sync((dbp), 0); \
+ if ((__ret = \
+ __db_testcopy((dbp)->dbenv, (dbp), (name))) != 0) \
+ (ret) = __db_panic((dbp)->dbenv, __ret); \
+ } \
+ if ((dbp)->dbenv->test_abort == (val)) { \
+ /* Abort the transaction. */ \
+ (dbp)->dbenv->test_abort = 0; \
+ (ret) = EINVAL; \
+ goto db_tr_err; \
+ } \
+} while (0)
+
+#define DB_TEST_RECOVERY_LABEL db_tr_err:
+#else
+#define DB_TEST_SUBLOCKS(env, flags)
+#define DB_ENV_TEST_RECOVERY(env, val, ret, name)
+#define DB_TEST_RECOVERY(dbp, val, ret, name)
+#define DB_TEST_RECOVERY_LABEL
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_DB_DEBUG_H_ */
diff --git a/libdb/dbinc/fop.h b/libdb/dbinc/fop.h
new file mode 100644
index 0000000..fe118fc
--- /dev/null
+++ b/libdb/dbinc/fop.h
@@ -0,0 +1,16 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _FOP_H_
+#define _FOP_H_
+
+#include "dbinc_auto/fileops_auto.h"
+#include "dbinc_auto/fileops_ext.h"
+
+#endif /* !_FOP_H_ */
diff --git a/libdb/dbinc/globals.h b/libdb/dbinc/globals.h
new file mode 100644
index 0000000..4454cab
--- /dev/null
+++ b/libdb/dbinc/globals.h
@@ -0,0 +1,83 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*******************************************************
+ * Global variables.
+ *
+ * Held in a single structure to minimize the name-space pollution.
+ *******************************************************/
+#ifdef HAVE_VXWORKS
+#include "semLib.h"
+#endif
+
+typedef struct __db_globals {
+ u_int32_t no_write_errors; /* write error testing disallowed */
+#ifdef HAVE_VXWORKS
+ u_int32_t db_global_init; /* VxWorks: inited */
+ SEM_ID db_global_lock; /* VxWorks: global semaphore */
+#endif
+ /* XA: list of opened environments. */
+ TAILQ_HEAD(__db_envq, __db_env) db_envq;
+
+ int (*j_close) __P((int)); /* Underlying OS interface jump table.*/
+ void (*j_dirfree) __P((char **, int));
+ int (*j_dirlist) __P((const char *, char ***, int *));
+ int (*j_exists) __P((const char *, int *));
+ void (*j_free) __P((void *));
+ int (*j_fsync) __P((int));
+ int (*j_ioinfo) __P((const char *,
+ int, u_int32_t *, u_int32_t *, u_int32_t *));
+ void *(*j_malloc) __P((size_t));
+ int (*j_map) __P((char *, size_t, int, int, void **));
+ int (*j_open) __P((const char *, int, ...));
+ ssize_t (*j_read) __P((int, void *, size_t));
+ void *(*j_realloc) __P((void *, size_t));
+ int (*j_rename) __P((const char *, const char *));
+ int (*j_seek) __P((int, size_t, db_pgno_t, u_int32_t, int, int));
+ int (*j_sleep) __P((u_long, u_long));
+ int (*j_unlink) __P((const char *));
+ int (*j_unmap) __P((void *, size_t));
+ ssize_t (*j_write) __P((int, const void *, size_t));
+ int (*j_yield) __P((void));
+} DB_GLOBALS;
+
+#ifdef DB_INITIALIZE_DB_GLOBALS
+DB_GLOBALS __db_global_values = {
+ 0, /* write error testing disallowed */
+#ifdef HAVE_VXWORKS
+ 0, /* VxWorks: initialized */
+ NULL, /* VxWorks: global semaphore */
+#endif
+ /* XA: list of opened environments. */
+ {NULL, &__db_global_values.db_envq.tqh_first},
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+#else
+extern DB_GLOBALS __db_global_values;
+#endif
+
+#define DB_GLOBAL(v) __db_global_values.v
diff --git a/libdb/dbinc/hash.h b/libdb/dbinc/hash.h
new file mode 100644
index 0000000..218ad13
--- /dev/null
+++ b/libdb/dbinc/hash.h
@@ -0,0 +1,147 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_HASH_H_
+#define _DB_HASH_H_
+
+/* Hash internal structure. */
+typedef struct hash_t {
+ db_pgno_t meta_pgno; /* Page number of the meta data page. */
+ u_int32_t h_ffactor; /* Fill factor. */
+ u_int32_t h_nelem; /* Number of elements. */
+ /* Hash function. */
+ u_int32_t (*h_hash) __P((DB *, const void *, u_int32_t));
+} HASH;
+
+/* Cursor structure definitions. */
+typedef struct cursor_t {
+ /* struct __dbc_internal */
+ __DBC_INTERNAL
+
+ /* Hash private part */
+
+ /* Per-thread information */
+ DB_LOCK hlock; /* Metadata page lock. */
+ HMETA *hdr; /* Pointer to meta-data page. */
+ PAGE *split_buf; /* Temporary buffer for splits. */
+
+ /* Hash cursor information */
+ db_pgno_t bucket; /* Bucket we are traversing. */
+ db_pgno_t lbucket; /* Bucket for which we are locked. */
+ db_indx_t dup_off; /* Offset within a duplicate set. */
+ db_indx_t dup_len; /* Length of current duplicate. */
+ db_indx_t dup_tlen; /* Total length of duplicate entry. */
+ u_int32_t seek_size; /* Number of bytes we need for add. */
+ db_pgno_t seek_found_page;/* Page on which we can insert. */
+ u_int32_t order; /* Relative order among deleted curs. */
+
+#define H_CONTINUE 0x0001 /* Join--search strictly fwd for data */
+#define H_DELETED 0x0002 /* Cursor item is deleted. */
+#define H_DIRTY 0x0004 /* Meta-data page needs to be written */
+#define H_DUPONLY 0x0008 /* Dups only; do not change key. */
+#define H_EXPAND 0x0010 /* Table expanded. */
+#define H_ISDUP 0x0020 /* Cursor is within duplicate set. */
+#define H_NEXT_NODUP 0x0040 /* Get next non-dup entry. */
+#define H_NOMORE 0x0080 /* No more entries in bucket. */
+#define H_OK 0x0100 /* Request succeeded. */
+ u_int32_t flags;
+} HASH_CURSOR;
+
+/* Test string. */
+#define CHARKEY "%$sniglet^&"
+
+/* Overflow management */
+/*
+ * The spares table indicates the page number at which each doubling begins.
+ * From this page number we subtract the number of buckets already allocated
+ * so that we can do a simple addition to calculate the page number here.
+ */
+#define BS_TO_PAGE(bucket, spares) \
+ ((bucket) + (spares)[__db_log2((bucket) + 1)])
+#define BUCKET_TO_PAGE(I, B) (BS_TO_PAGE((B), (I)->hdr->spares))
+
+/* Constraints about much data goes on a page. */
+
+#define MINFILL 4
+#define ISBIG(I, N) (((N) > ((I)->hdr->dbmeta.pagesize / MINFILL)) ? 1 : 0)
+
+/* Shorthands for accessing structure */
+#define NDX_INVALID 0xFFFF
+#define BUCKET_INVALID 0xFFFFFFFF
+
+/* On page duplicates are stored as a string of size-data-size triples. */
+#define DUP_SIZE(len) ((len) + 2 * sizeof(db_indx_t))
+
+/* Log messages types (these are subtypes within a record type) */
+#define PAIR_KEYMASK 0x1
+#define PAIR_DATAMASK 0x2
+#define PAIR_DUPMASK 0x4
+#define PAIR_MASK 0xf
+#define PAIR_ISKEYBIG(N) (N & PAIR_KEYMASK)
+#define PAIR_ISDATABIG(N) (N & PAIR_DATAMASK)
+#define PAIR_ISDATADUP(N) (N & PAIR_DUPMASK)
+#define OPCODE_OF(N) (N & ~PAIR_MASK)
+
+#define PUTPAIR 0x20
+#define DELPAIR 0x30
+#define PUTOVFL 0x40
+#define DELOVFL 0x50
+#define HASH_UNUSED1 0x60
+#define HASH_UNUSED2 0x70
+#define SPLITOLD 0x80
+#define SPLITNEW 0x90
+
+typedef enum {
+ DB_HAM_CHGPG = 1,
+ DB_HAM_DELFIRSTPG = 2,
+ DB_HAM_DELMIDPG = 3,
+ DB_HAM_DELLASTPG = 4,
+ DB_HAM_DUP = 5,
+ DB_HAM_SPLIT = 6
+} db_ham_mode;
+
+#include "dbinc_auto/hash_auto.h"
+#include "dbinc_auto/hash_ext.h"
+#include "dbinc/db_am.h"
+#endif /* !_DB_HASH_H_ */
diff --git a/libdb/dbinc/hmac.h b/libdb/dbinc/hmac.h
new file mode 100644
index 0000000..8b15e6c
--- /dev/null
+++ b/libdb/dbinc/hmac.h
@@ -0,0 +1,32 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_HMAC_H_
+#define _DB_HMAC_H_
+
+/*
+ * Algorithm specific information.
+ */
+/*
+ * SHA1 checksumming
+ */
+typedef struct {
+ u_int32_t state[5];
+ u_int32_t count[2];
+ unsigned char buffer[64];
+} SHA1_CTX;
+
+/*
+ * AES assumes the SHA1 checksumming (also called MAC)
+ */
+#define DB_MAC_MAGIC "mac derivation key magic value"
+#define DB_ENC_MAGIC "encryption and decryption key value magic"
+
+#include "dbinc_auto/hmac_ext.h"
+#endif /* !_DB_HMAC_H_ */
diff --git a/libdb/dbinc/lock.h b/libdb/dbinc/lock.h
new file mode 100644
index 0000000..0027faf
--- /dev/null
+++ b/libdb/dbinc/lock.h
@@ -0,0 +1,212 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_LOCK_H_
+#define _DB_LOCK_H_
+
+#define DB_LOCK_DEFAULT_N 1000 /* Default # of locks in region. */
+
+/*
+ * The locker id space is divided between the transaction manager and the lock
+ * manager. Lock IDs start at 1 and go to DB_LOCK_MAXID. Txn IDs start at
+ * DB_LOCK_MAXID + 1 and go up to TXN_MAXIMUM.
+ */
+#define DB_LOCK_INVALIDID 0
+#define DB_LOCK_MAXID 0x7fffffff
+
+/*
+ * Out of band value for a lock. Locks contain an offset into a lock region,
+ * so we use an invalid region offset to indicate an invalid or unset lock.
+ */
+#define LOCK_INVALID INVALID_ROFF
+#define LOCK_ISSET(lock) ((lock).off != LOCK_INVALID)
+#define LOCK_INIT(lock) ((lock).off = LOCK_INVALID)
+
+/*
+ * Macro to identify a write lock for the purpose of counting locks
+ * for the NUMWRITES option to deadlock detection.
+ */
+#define IS_WRITELOCK(m) \
+ ((m) == DB_LOCK_WRITE || (m) == DB_LOCK_IWRITE || (m) == DB_LOCK_IWR)
+
+/*
+ * Lock timers.
+ */
+typedef struct {
+ u_int32_t tv_sec; /* Seconds. */
+ u_int32_t tv_usec; /* Microseconds. */
+} db_timeval_t;
+
+#define LOCK_TIME_ISVALID(time) ((time)->tv_sec != 0)
+#define LOCK_SET_TIME_INVALID(time) ((time)->tv_sec = 0)
+#define LOCK_TIME_EQUAL(t1, t2) \
+ ((t1)->tv_sec == (t2)->tv_sec && (t1)->tv_usec == (t2)->tv_usec)
+
+/*
+ * DB_LOCKREGION --
+ * The lock shared region.
+ */
+typedef struct __db_lockregion {
+ u_int32_t need_dd; /* flag for deadlock detector */
+ u_int32_t detect; /* run dd on every conflict */
+ /* free lock header */
+ SH_TAILQ_HEAD(__flock) free_locks;
+ /* free obj header */
+ SH_TAILQ_HEAD(__fobj) free_objs;
+ /* free locker header */
+ SH_TAILQ_HEAD(__flocker) free_lockers;
+ SH_TAILQ_HEAD(__dobj) dd_objs; /* objects with waiters */
+ SH_TAILQ_HEAD(__lkrs) lockers; /* list of lockers */
+
+ db_timeout_t lk_timeout; /* timeout for locks. */
+ db_timeout_t tx_timeout; /* timeout for txns. */
+
+ u_int32_t locker_t_size; /* size of locker hash table */
+ u_int32_t object_t_size; /* size of object hash table */
+
+ roff_t conf_off; /* offset of conflicts array */
+ roff_t obj_off; /* offset of object hash table */
+ roff_t osynch_off; /* offset of the object mutex table */
+ roff_t locker_off; /* offset of locker hash table */
+ roff_t lsynch_off; /* offset of the locker mutex table */
+
+ DB_LOCK_STAT stat; /* stats about locking. */
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ roff_t maint_off; /* offset of region maintenance info */
+#endif
+} DB_LOCKREGION;
+
+/*
+ * Since we will store DBTs in shared memory, we need the equivalent of a
+ * DBT that will work in shared memory.
+ */
+typedef struct __sh_dbt {
+ u_int32_t size; /* Byte length. */
+ ssize_t off; /* Region offset. */
+} SH_DBT;
+
+#define SH_DBT_PTR(p) ((void *)(((u_int8_t *)(p)) + (p)->off))
+
+/*
+ * Object structures; these live in the object hash table.
+ */
+typedef struct __db_lockobj {
+ SH_DBT lockobj; /* Identifies object locked. */
+ SH_TAILQ_ENTRY links; /* Links for free list or hash list. */
+ SH_TAILQ_ENTRY dd_links; /* Links for dd list. */
+ SH_TAILQ_HEAD(__wait) waiters; /* List of waiting locks. */
+ SH_TAILQ_HEAD(__hold) holders; /* List of held locks. */
+ /* Declare room in the object to hold
+ * typical DB lock structures so that
+ * we do not have to allocate them from
+ * shalloc at run-time. */
+ u_int8_t objdata[sizeof(struct __db_ilock)];
+} DB_LOCKOBJ;
+
+/*
+ * Locker structures; these live in the locker hash table.
+ */
+typedef struct __db_locker {
+ u_int32_t id; /* Locker id. */
+ u_int32_t dd_id; /* Deadlock detector id. */
+ u_int32_t nlocks; /* Number of locks held. */
+ u_int32_t nwrites; /* Number of write locks held. */
+ size_t master_locker; /* Locker of master transaction. */
+ size_t parent_locker; /* Parent of this child. */
+ SH_LIST_HEAD(_child) child_locker; /* List of descendant txns;
+ only used in a "master"
+ txn. */
+ SH_LIST_ENTRY child_link; /* Links transactions in the family;
+ elements of the child_locker
+ list. */
+ SH_TAILQ_ENTRY links; /* Links for free and hash list. */
+ SH_TAILQ_ENTRY ulinks; /* Links in-use list. */
+ SH_LIST_HEAD(_held) heldby; /* Locks held by this locker. */
+ db_timeval_t lk_expire; /* When current lock expires. */
+ db_timeval_t tx_expire; /* When this txn expires. */
+ db_timeout_t lk_timeout; /* How long do we let locks live. */
+
+#define DB_LOCKER_DELETED 0x0001
+#define DB_LOCKER_DIRTY 0x0002
+#define DB_LOCKER_INABORT 0x0004
+#define DB_LOCKER_TIMEOUT 0x0008
+ u_int32_t flags;
+} DB_LOCKER;
+
+/*
+ * DB_LOCKTAB --
+ * The primary library lock data structure (i.e., the one referenced
+ * by the environment, as opposed to the internal one laid out in the region.)
+ */
+typedef struct __db_locktab {
+ DB_ENV *dbenv; /* Environment. */
+ REGINFO reginfo; /* Region information. */
+ u_int8_t *conflicts; /* Pointer to conflict matrix. */
+ DB_HASHTAB *obj_tab; /* Beginning of object hash table. */
+ DB_HASHTAB *locker_tab; /* Beginning of locker hash table. */
+} DB_LOCKTAB;
+
+/* Test for conflicts. */
+#define CONFLICTS(T, R, HELD, WANTED) \
+ (T)->conflicts[(HELD) * (R)->stat.st_nmodes + (WANTED)]
+
+#define OBJ_LINKS_VALID(L) ((L)->links.stqe_prev != -1)
+
+struct __db_lock {
+ /*
+ * Wait on mutex to wait on lock. You reference your own mutex with
+ * ID 0 and others reference your mutex with ID 1.
+ */
+ DB_MUTEX mutex;
+
+ u_int32_t holder; /* Who holds this lock. */
+ u_int32_t gen; /* Generation count. */
+ SH_TAILQ_ENTRY links; /* Free or holder/waiter list. */
+ SH_LIST_ENTRY locker_links; /* List of locks held by a locker. */
+ u_int32_t refcount; /* Reference count the lock. */
+ db_lockmode_t mode; /* What sort of lock. */
+ ssize_t obj; /* Relative offset of object struct. */
+ db_status_t status; /* Status of this lock. */
+};
+
+/*
+ * Flag values for __lock_put_internal:
+ * DB_LOCK_DOALL: Unlock all references in this lock (instead of only 1).
+ * DB_LOCK_FREE: Free the lock (used in checklocker).
+ * DB_LOCK_IGNOREDEL: Remove from the locker hash table even if already
+ deleted (used in checklocker).
+ * DB_LOCK_NOPROMOTE: Don't bother running promotion when releasing locks
+ * (used by __lock_put_internal).
+ * DB_LOCK_UNLINK: Remove from the locker links (used in checklocker).
+ * Make sure that these do not conflict with the interface flags because
+ * we pass some of those around (i.e., DB_LOCK_REMOVE).
+ */
+#define DB_LOCK_DOALL 0x010000
+#define DB_LOCK_FREE 0x020000
+#define DB_LOCK_IGNOREDEL 0x040000
+#define DB_LOCK_NOPROMOTE 0x080000
+#define DB_LOCK_UNLINK 0x100000
+#define DB_LOCK_NOWAITERS 0x200000
+
+/*
+ * Macros to get/release different types of mutexes.
+ */
+#define OBJECT_LOCK(lt, reg, obj, ndx) \
+ ndx = __lock_ohash(obj) % (reg)->object_t_size
+#define SHOBJECT_LOCK(lt, reg, shobj, ndx) \
+ ndx = __lock_lhash(shobj) % (reg)->object_t_size
+#define LOCKER_LOCK(lt, reg, locker, ndx) \
+ ndx = __lock_locker_hash(locker) % (reg)->locker_t_size;
+
+#define LOCKREGION(dbenv, lt) R_LOCK((dbenv), &(lt)->reginfo)
+#define UNLOCKREGION(dbenv, lt) R_UNLOCK((dbenv), &(lt)->reginfo)
+
+#include "dbinc_auto/lock_ext.h"
+#endif /* !_DB_LOCK_H_ */
diff --git a/libdb/dbinc/log.h b/libdb/dbinc/log.h
new file mode 100644
index 0000000..4c8d204
--- /dev/null
+++ b/libdb/dbinc/log.h
@@ -0,0 +1,273 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _LOG_H_
+#define _LOG_H_
+
+struct __db_log; typedef struct __db_log DB_LOG;
+struct __hdr; typedef struct __hdr HDR;
+struct __log; typedef struct __log LOG;
+struct __log_persist; typedef struct __log_persist LOGP;
+
+#define LFPREFIX "log." /* Log file name prefix. */
+#define LFNAME "log.%010d" /* Log file name template. */
+#define LFNAME_V1 "log.%05d" /* Log file name template, rev 1. */
+
+#define LG_MAX_DEFAULT (10 * MEGABYTE) /* 10 MB. */
+#define LG_BSIZE_DEFAULT (32 * 1024) /* 32 KB. */
+#define LG_BASE_REGION_SIZE (60 * 1024) /* 60 KB. */
+
+/*
+ * The per-process table that maps log file-id's to DB structures.
+ */
+typedef struct __db_entry {
+ DB *dbp; /* Open dbp for this file id. */
+ int deleted; /* File was not found during open. */
+} DB_ENTRY;
+
+/*
+ * DB_LOG
+ * Per-process log structure.
+ */
+struct __db_log {
+/*
+ * These fields need to be protected for multi-threaded support.
+ *
+ * !!!
+ * As this structure is allocated in per-process memory, the mutex may need
+ * to be stored elsewhere on architectures unable to support mutexes in heap
+ * memory, e.g., HP/UX 9.
+ */
+ DB_MUTEX *mutexp; /* Mutex for thread protection. */
+
+ DB_ENTRY *dbentry; /* Recovery file-id mapping. */
+#define DB_GROW_SIZE 64
+ int32_t dbentry_cnt; /* Entries. Grows by DB_GROW_SIZE. */
+
+/*
+ * These fields are always accessed while the region lock is held, so they do
+ * not have to be protected by the thread lock as well, OR, they are only used
+ * when threads are not being used, i.e. most cursor operations are disallowed
+ * on threaded logs.
+ */
+ u_int32_t lfname; /* Log file "name". */
+ DB_FH lfh; /* Log file handle. */
+
+ u_int8_t *bufp; /* Region buffer. */
+
+/* These fields are not protected. */
+ DB_ENV *dbenv; /* Reference to error information. */
+ REGINFO reginfo; /* Region information. */
+
+#define DBLOG_RECOVER 0x01 /* We are in recovery. */
+#define DBLOG_FORCE_OPEN 0x02 /* Force the DB open even if it appears
+ * to be deleted.
+ */
+ u_int32_t flags;
+};
+
+/*
+ * HDR --
+ * Log record header.
+ */
+struct __hdr {
+ u_int32_t prev; /* Previous offset. */
+ u_int32_t len; /* Current length. */
+ u_int8_t chksum[DB_MAC_KEY]; /* Current checksum. */
+ u_int8_t iv[DB_IV_BYTES]; /* IV */
+ u_int32_t orig_size; /* Original size of log record */
+ /* !!! - 'size' is not written to log, must be last in hdr */
+ size_t size; /* Size of header to use */
+};
+
+/*
+ * We use HDR internally, and then when we write out, we write out
+ * prev, len, and then a 4-byte checksum if normal operation or
+ * a crypto-checksum and IV and original size if running in crypto
+ * mode. We must store the original size in case we pad. Set the
+ * size when we set up the header. We compute a DB_MAC_KEY size
+ * checksum regardless, but we can safely just use the first 4 bytes.
+ */
+#define HDR_NORMAL_SZ 12
+#define HDR_CRYPTO_SZ 12 + DB_MAC_KEY + DB_IV_BYTES
+
+struct __log_persist {
+ u_int32_t magic; /* DB_LOGMAGIC */
+ u_int32_t version; /* DB_LOGVERSION */
+
+ u_int32_t log_size; /* Log file size. */
+ u_int32_t mode; /* Log file mode. */
+};
+
+/*
+ * LOG --
+ * Shared log region. One of these is allocated in shared memory,
+ * and describes the log.
+ */
+struct __log {
+ /*
+ * Due to alignment constraints on some architectures (e.g. HP-UX),
+ * DB_MUTEXes must be the first element of shalloced structures,
+ * and as a corollary there can be only one per structure. Thus,
+ * flush_mutex_off points to a mutex in a separately-allocated chunk.
+ */
+ DB_MUTEX fq_mutex; /* Mutex guarding file name list. */
+
+ LOGP persist; /* Persistent information. */
+
+ SH_TAILQ_HEAD(__fq1) fq; /* List of file names. */
+ int32_t fid_max; /* Max fid allocated. */
+ roff_t free_fid_stack; /* Stack of free file ids. */
+ int free_fids; /* Height of free fid stack. */
+ int free_fids_alloced; /* Number of free fid slots alloc'ed. */
+
+ /*
+ * The lsn LSN is the file offset that we're about to write and which
+ * we will return to the user.
+ */
+ DB_LSN lsn; /* LSN at current file offset. */
+
+ /*
+ * The f_lsn LSN is the LSN (returned to the user) that "owns" the
+ * first byte of the buffer. If the record associated with the LSN
+ * spans buffers, it may not reflect the physical file location of
+ * the first byte of the buffer.
+ */
+ DB_LSN f_lsn; /* LSN of first byte in the buffer. */
+ size_t b_off; /* Current offset in the buffer. */
+ u_int32_t w_off; /* Current write offset in the file. */
+ u_int32_t len; /* Length of the last record. */
+
+ /*
+ * The s_lsn LSN is the last LSN that we know is on disk, not just
+ * written, but synced. This field is protected by the flush mutex
+ * rather than by the region mutex.
+ */
+ int in_flush; /* Log flush in progress. */
+ roff_t flush_mutex_off; /* Mutex guarding flushing. */
+ DB_LSN s_lsn; /* LSN of the last sync. */
+
+ DB_LOG_STAT stat; /* Log statistics. */
+
+ /*
+ * The waiting_lsn is used by the replication system. It is the
+ * first LSN that we are holding without putting in the log, because
+ * we received one or more log records out of order. Associated with
+ * the waiting_lsn is the number of log records that we still have to
+ * receive before we decide that we should request it again.
+ */
+ DB_LSN waiting_lsn; /* First log record after a gap. */
+ DB_LSN verify_lsn; /* LSN we are waiting to verify. */
+ u_int32_t wait_recs; /* Records to wait before requesting. */
+ u_int32_t rcvd_recs; /* Records received while waiting. */
+
+ /*
+ * The ready_lsn is also used by the replication system. It is the
+ * next LSN we expect to receive. It's normally equal to "lsn",
+ * except at the beginning of a log file, at which point it's set
+ * to the LSN of the first record of the new file (after the
+ * header), rather than to 0.
+ */
+ DB_LSN ready_lsn;
+
+ /*
+ * During initialization, the log system walks forward through the
+ * last log file to find its end. If it runs into a checkpoint
+ * while it's doing so, it caches it here so that the transaction
+ * system doesn't need to walk through the file again on its
+ * initialization.
+ */
+ DB_LSN cached_ckp_lsn;
+
+ roff_t buffer_off; /* Log buffer offset in the region. */
+ u_int32_t buffer_size; /* Log buffer size. */
+
+ u_int32_t log_size; /* Log file's size. */
+ u_int32_t log_nsize; /* Next log file's size. */
+
+ u_int32_t ncommit; /* Number of txns waiting to commit. */
+
+ DB_LSN t_lsn; /* LSN of first commit */
+ SH_TAILQ_HEAD(__commit) commits;/* list of txns waiting to commit. */
+ SH_TAILQ_HEAD(__free) free_commits;/* free list of commit structs. */
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+#define LG_MAINT_SIZE (sizeof(roff_t) * DB_MAX_HANDLES)
+
+ roff_t maint_off; /* offset of region maintenance info */
+#endif
+};
+
+/*
+ * __db_commit structure --
+ * One of these is allocated for each transaction waiting
+ * to commit.
+ */
+struct __db_commit {
+ DB_MUTEX mutex; /* Mutex for txn to wait on. */
+ DB_LSN lsn; /* LSN of commit record. */
+ SH_TAILQ_ENTRY links; /* Either on free or waiting list. */
+
+#define DB_COMMIT_FLUSH 0x0001 /* Flush the log when you wake up. */
+ u_int32_t flags;
+};
+
+/*
+ * FNAME --
+ * File name and id.
+ */
+struct __fname {
+ SH_TAILQ_ENTRY q; /* File name queue. */
+
+ int32_t id; /* Logging file id. */
+ DBTYPE s_type; /* Saved DB type. */
+
+ roff_t name_off; /* Name offset. */
+ db_pgno_t meta_pgno; /* Page number of the meta page. */
+ u_int8_t ufid[DB_FILE_ID_LEN]; /* Unique file id. */
+
+ u_int32_t create_txnid; /*
+ * Txn ID of the DB create, stored so
+ * we can log it at register time.
+ */
+};
+
+/* File open/close register log record opcodes. */
+#define LOG_CHECKPOINT 1 /* Checkpoint: file name/id dump. */
+#define LOG_CLOSE 2 /* File close. */
+#define LOG_OPEN 3 /* File open. */
+#define LOG_RCLOSE 4 /* File close after recovery. */
+
+#define CHECK_LSN(redo, cmp, lsn, prev) \
+ DB_ASSERT(!DB_REDO(redo) || \
+ (cmp) >= 0 || IS_NOT_LOGGED_LSN(*lsn)); \
+ if (DB_REDO(redo) && (cmp) < 0 && !IS_NOT_LOGGED_LSN(*(lsn))) { \
+ __db_err(dbenv, \
+ "Log sequence error: page LSN %lu %lu; previous LSN %lu %lu", \
+ (u_long)(lsn)->file, (u_long)(lsn)->offset, \
+ (u_long)(prev)->file, (u_long)(prev)->offset); \
+ goto out; \
+ }
+
+/*
+ * Status codes indicating the validity of a log file examined by
+ * __log_valid().
+ */
+typedef enum {
+ DB_LV_INCOMPLETE,
+ DB_LV_NONEXISTENT,
+ DB_LV_NORMAL,
+ DB_LV_OLD_READABLE,
+ DB_LV_OLD_UNREADABLE
+} logfile_validity;
+
+#include "dbinc_auto/dbreg_auto.h"
+#include "dbinc_auto/dbreg_ext.h"
+#include "dbinc_auto/log_ext.h"
+#endif /* !_LOG_H_ */
diff --git a/libdb/dbinc/mp.h b/libdb/dbinc/mp.h
new file mode 100644
index 0000000..f99f735
--- /dev/null
+++ b/libdb/dbinc/mp.h
@@ -0,0 +1,293 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_MP_H_
+#define _DB_MP_H_
+
+struct __bh; typedef struct __bh BH;
+struct __db_mpool_hash; typedef struct __db_mpool_hash DB_MPOOL_HASH;
+struct __db_mpreg; typedef struct __db_mpreg DB_MPREG;
+struct __mpool; typedef struct __mpool MPOOL;
+
+ /* We require at least 20KB of cache. */
+#define DB_CACHESIZE_MIN (20 * 1024)
+
+typedef enum {
+ DB_SYNC_ALLOC, /* Flush for allocation. */
+ DB_SYNC_CACHE, /* Checkpoint or flush entire cache. */
+ DB_SYNC_FILE, /* Flush file. */
+ DB_SYNC_TRICKLE /* Trickle sync. */
+} db_sync_op;
+
+/*
+ * DB_MPOOL --
+ * Per-process memory pool structure.
+ */
+struct __db_mpool {
+ /* These fields need to be protected for multi-threaded support. */
+ DB_MUTEX *mutexp; /* Structure thread lock. */
+
+ /* List of pgin/pgout routines. */
+ LIST_HEAD(__db_mpregh, __db_mpreg) dbregq;
+
+ /* List of DB_MPOOLFILE's. */
+ TAILQ_HEAD(__db_mpoolfileh, __db_mpoolfile) dbmfq;
+
+ /*
+ * The dbenv, nreg and reginfo fields are not thread protected,
+ * as they are initialized during mpool creation, and not modified
+ * again.
+ */
+ DB_ENV *dbenv; /* Enclosing environment. */
+
+ u_int32_t nreg; /* N underlying cache regions. */
+ REGINFO *reginfo; /* Underlying cache regions. */
+};
+
+/*
+ * DB_MPREG --
+ * DB_MPOOL registry of pgin/pgout functions.
+ */
+struct __db_mpreg {
+ LIST_ENTRY(__db_mpreg) q; /* Linked list. */
+
+ int32_t ftype; /* File type. */
+ /* Pgin, pgout routines. */
+ int (*pgin) __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ int (*pgout) __P((DB_ENV *, db_pgno_t, void *, DBT *));
+};
+
+/*
+ * NCACHE --
+ * Select a cache based on the file and the page number. Assumes accesses
+ * are uniform across pages, which is probably OK. What we really want to
+ * avoid is anything that puts all pages from any single file in the same
+ * cache, as we expect that file access will be bursty, and to avoid
+ * putting all page number N pages in the same cache as we expect access
+ * to the metapages (page 0) and the root of a btree (page 1) to be much
+ * more frequent than a random data page.
+ */
+#define NCACHE(mp, mf_offset, pgno) \
+ (((pgno) ^ ((mf_offset) >> 3)) % ((MPOOL *)mp)->nreg)
+
+/*
+ * NBUCKET --
+ * We make the assumption that early pages of the file are more likely
+ * to be retrieved than the later pages, which means the top bits will
+ * be more interesting for hashing as they're less likely to collide.
+ * That said, as 512 8K pages represents a 4MB file, so only reasonably
+ * large files will have page numbers with any other than the bottom 9
+ * bits set. We XOR in the MPOOL offset of the MPOOLFILE that backs the
+ * page, since that should also be unique for the page. We don't want
+ * to do anything very fancy -- speed is more important to us than using
+ * good hashing.
+ */
+#define NBUCKET(mc, mf_offset, pgno) \
+ (((pgno) ^ ((mf_offset) << 9)) % (mc)->htab_buckets)
+
+/*
+ * MPOOL --
+ * Shared memory pool region.
+ */
+struct __mpool {
+ /*
+ * The memory pool can be broken up into individual pieces/files.
+ * Not what we would have liked, but on Solaris you can allocate
+ * only a little more than 2GB of memory in a contiguous chunk,
+ * and I expect to see more systems with similar issues.
+ *
+ * While this structure is duplicated in each piece of the cache,
+ * the first of these pieces/files describes the entire pool, the
+ * second only describe a piece of the cache.
+ */
+
+ /*
+ * The lsn field and list of underlying MPOOLFILEs are thread protected
+ * by the region lock.
+ */
+ DB_LSN lsn; /* Maximum checkpoint LSN. */
+
+ SH_TAILQ_HEAD(__mpfq) mpfq; /* List of MPOOLFILEs. */
+
+ /*
+ * The nreg, regids and maint_off fields are not thread protected,
+ * as they are initialized during mpool creation, and not modified
+ * again.
+ */
+ u_int32_t nreg; /* Number of underlying REGIONS. */
+ roff_t regids; /* Array of underlying REGION Ids. */
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ roff_t maint_off; /* Maintenance information offset */
+#endif
+
+ /*
+ * The following structure fields only describe the per-cache portion
+ * of the region.
+ *
+ * The htab and htab_buckets fields are not thread protected as they
+ * are initialized during mpool creation, and not modified again.
+ *
+ * The last_checked and lru_count fields are thread protected by
+ * the region lock.
+ */
+ int htab_buckets; /* Number of hash table entries. */
+ roff_t htab; /* Hash table offset. */
+ u_int32_t last_checked; /* Last bucket checked for free. */
+ u_int32_t lru_count; /* Counter for buffer LRU */
+
+ /*
+ * The stat fields are generally not thread protected, and cannot be
+ * trusted. Note that st_pages is an exception, and is always updated
+ * inside a region lock (although it is sometimes read outside of the
+ * region lock).
+ */
+ DB_MPOOL_STAT stat; /* Per-cache mpool statistics. */
+};
+
+struct __db_mpool_hash {
+ DB_MUTEX hash_mutex; /* Per-bucket mutex. */
+
+ DB_HASHTAB hash_bucket; /* Head of bucket. */
+
+ u_int32_t hash_page_dirty;/* Count of dirty pages. */
+ u_int32_t hash_priority; /* Minimum priority of bucket buffer. */
+};
+
+/*
+ * The base mpool priority is 1/4th of the name space, or just under 2^30.
+ * When the LRU counter wraps, we shift everybody down to a base-relative
+ * value.
+ */
+#define MPOOL_BASE_DECREMENT (UINT32_T_MAX - (UINT32_T_MAX / 4))
+
+/*
+ * Mpool priorities from low to high. Defined in terms of fractions of the
+ * buffers in the pool.
+ */
+#define MPOOL_PRI_VERY_LOW -1 /* Dead duck. Check and set to 0. */
+#define MPOOL_PRI_LOW -2 /* Low. */
+#define MPOOL_PRI_DEFAULT 0 /* No adjustment -- special case.*/
+#define MPOOL_PRI_HIGH 10 /* With the dirty buffers. */
+#define MPOOL_PRI_DIRTY 10 /* Dirty gets a 10% boost. */
+#define MPOOL_PRI_VERY_HIGH 1 /* Add number of buffers in pool. */
+
+/*
+ * MPOOLFILE_IGNORE --
+ * Discard an MPOOLFILE and any buffers it references: update the flags
+ * so we never try to write buffers associated with the file, nor can we
+ * find it when looking for files to join. In addition, clear the ftype
+ * field, there's no reason to post-process pages, they can be discarded
+ * by any thread.
+ *
+ * Expects the MPOOLFILE mutex to be held.
+ */
+#define MPOOLFILE_IGNORE(mfp) { \
+ (mfp)->ftype = 0; \
+ F_SET(mfp, MP_DEADFILE); \
+}
+
+/*
+ * MPOOLFILE --
+ * Shared DB_MPOOLFILE information.
+ */
+struct __mpoolfile {
+ DB_MUTEX mutex;
+
+ /* Protected by MPOOLFILE mutex. */
+ u_int32_t mpf_cnt; /* Ref count: DB_MPOOLFILEs. */
+ u_int32_t block_cnt; /* Ref count: blocks in cache. */
+
+ roff_t path_off; /* File name location. */
+
+ /* Protected by mpool cache 0 region lock. */
+ SH_TAILQ_ENTRY q; /* List of MPOOLFILEs */
+ db_pgno_t last_pgno; /* Last page in the file. */
+ db_pgno_t orig_last_pgno; /* Original last page in the file. */
+
+ /*
+ * None of the following fields are thread protected.
+ *
+ * There are potential races with the ftype field because it's read
+ * without holding a lock. However, it has to be set before adding
+ * any buffers to the cache that depend on it being set, so there
+ * would need to be incorrect operation ordering to have a problem.
+ *
+ * There are potential races with the priority field because it's read
+ * without holding a lock. However, a collision is unlikely and if it
+ * happens is of little consequence.
+ *
+ * We do not protect the statistics in "stat" because of the cost of
+ * the mutex in the get/put routines. There is a chance that a count
+ * will get lost.
+ *
+ * The remaining fields are initialized at open and never subsequently
+ * modified, except for the MP_DEADFILE, which is only set and never
+ * unset. (If there was more than one flag that was subsequently set,
+ * there might be a race, but with a single flag there can't be.)
+ */
+ int32_t ftype; /* File type. */
+
+ int32_t priority; /* Priority when unpinning buffer. */
+
+ DB_MPOOL_FSTAT stat; /* Per-file mpool statistics. */
+
+ int32_t lsn_off; /* Page's LSN offset. */
+ u_int32_t clear_len; /* Bytes to clear on page create. */
+
+ roff_t fileid_off; /* File ID string location. */
+
+ roff_t pgcookie_len; /* Pgin/pgout cookie length. */
+ roff_t pgcookie_off; /* Pgin/pgout cookie location. */
+
+#define MP_CAN_MMAP 0x01 /* If the file can be mmap'd. */
+#define MP_DEADFILE 0x02 /* Dirty pages can simply be trashed. */
+#define MP_DIRECT 0x04 /* No OS buffering. */
+#define MP_EXTENT 0x08 /* Extent file. */
+#define MP_TEMP 0x10 /* Backing file is a temporary. */
+#define MP_UNLINK 0x20 /* Unlink file on last close. */
+ u_int32_t flags;
+};
+
+/*
+ * BH --
+ * Buffer header.
+ */
+struct __bh {
+ DB_MUTEX mutex; /* Buffer thread/process lock. */
+
+ u_int16_t ref; /* Reference count. */
+ u_int16_t ref_sync; /* Sync wait-for reference count. */
+
+#define BH_CALLPGIN 0x001 /* Convert the page before use. */
+#define BH_DIRTY 0x002 /* Page was modified. */
+#define BH_DIRTY_CREATE 0x004 /* Page created, must be written. */
+#define BH_DISCARD 0x008 /* Page is useless. */
+#define BH_LOCKED 0x010 /* Page is locked (I/O in progress). */
+#define BH_TRASH 0x020 /* Page is garbage. */
+ u_int16_t flags;
+
+ u_int32_t priority; /* LRU priority. */
+ SH_TAILQ_ENTRY hq; /* MPOOL hash bucket queue. */
+
+ db_pgno_t pgno; /* Underlying MPOOLFILE page number. */
+ roff_t mf_offset; /* Associated MPOOLFILE offset. */
+
+ /*
+ * !!!
+ * This array must be at least size_t aligned -- the DB access methods
+ * put PAGE and other structures into it, and then access them directly.
+ * (We guarantee size_t alignment to applications in the documentation,
+ * too.)
+ */
+ u_int8_t buf[1]; /* Variable length data. */
+};
+
+#include "dbinc_auto/mp_ext.h"
+#endif /* !_DB_MP_H_ */
diff --git a/libdb/dbinc/mutex.h b/libdb/dbinc/mutex.h
new file mode 100644
index 0000000..e42662c
--- /dev/null
+++ b/libdb/dbinc/mutex.h
@@ -0,0 +1,969 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_MUTEX_H_
+#define _DB_MUTEX_H_
+
+/*
+ * Some of the Berkeley DB ports require single-threading at various
+ * places in the code. In those cases, these #defines will be set.
+ */
+#define DB_BEGIN_SINGLE_THREAD
+#define DB_END_SINGLE_THREAD
+
+/*********************************************************************
+ * POSIX.1 pthreads interface.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_PTHREADS
+#include <pthread.h>
+
+#define MUTEX_FIELDS \
+ pthread_mutex_t mutex; /* Mutex. */ \
+ pthread_cond_t cond; /* Condition variable. */
+#endif
+
+/*********************************************************************
+ * Solaris lwp threads interface.
+ *
+ * !!!
+ * We use LWP mutexes on Solaris instead of UI or POSIX mutexes (both of
+ * which are available), for two reasons. First, the Solaris C library
+ * includes versions of the both UI and POSIX thread mutex interfaces, but
+ * they are broken in that they don't support inter-process locking, and
+ * there's no way to detect it, e.g., calls to configure the mutexes for
+ * inter-process locking succeed without error. So, we use LWP mutexes so
+ * that we don't fail in fairly undetectable ways because the application
+ * wasn't linked with the appropriate threads library. Second, there were
+ * bugs in SunOS 5.7 (Solaris 7) where if an application loaded the C library
+ * before loading the libthread/libpthread threads libraries (e.g., by using
+ * dlopen to load the DB library), the pwrite64 interface would be translated
+ * into a call to pwrite and DB would drop core.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_SOLARIS_LWP
+/*
+ * XXX
+ * Don't change <synch.h> to <sys/lwp.h> -- although lwp.h is listed in the
+ * Solaris manual page as the correct include to use, it causes the Solaris
+ * compiler on SunOS 2.6 to fail.
+ */
+#include <synch.h>
+
+#define MUTEX_FIELDS \
+ lwp_mutex_t mutex; /* Mutex. */ \
+ lwp_cond_t cond; /* Condition variable. */
+#endif
+
+/*********************************************************************
+ * Solaris/Unixware threads interface.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_UI_THREADS
+#include <thread.h>
+#include <synch.h>
+
+#define MUTEX_FIELDS \
+ mutex_t mutex; /* Mutex. */ \
+ cond_t cond; /* Condition variable. */
+#endif
+
+/*********************************************************************
+ * AIX C library functions.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_AIX_CHECK_LOCK
+#include <sys/atomic_op.h>
+typedef int tsl_t;
+#define MUTEX_ALIGN sizeof(int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) 0
+#define MUTEX_SET(x) (!_check_lock(x, 0, 1))
+#define MUTEX_UNSET(x) _clear_lock(x, 0)
+#endif
+#endif
+
+/*********************************************************************
+ * General C library functions (msemaphore).
+ *
+ * !!!
+ * Check for HPPA as a special case, because it requires unusual alignment,
+ * and doesn't support semaphores in malloc(3) or shmget(2) memory.
+ *
+ * !!!
+ * Do not remove the MSEM_IF_NOWAIT flag. The problem is that if a single
+ * process makes two msem_lock() calls in a row, the second one returns an
+ * error. We depend on the fact that we can lock against ourselves in the
+ * locking subsystem, where we set up a mutex so that we can block ourselves.
+ * Tested on OSF1 v4.0.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_HPPA_MSEM_INIT
+#define MUTEX_NO_MALLOC_LOCKS
+#define MUTEX_NO_SHMGET_LOCKS
+
+#define MUTEX_ALIGN 16
+#endif
+
+#if defined(HAVE_MUTEX_MSEM_INIT) || defined(HAVE_MUTEX_HPPA_MSEM_INIT)
+#include <sys/mman.h>
+typedef msemaphore tsl_t;
+
+#ifndef MUTEX_ALIGN
+#define MUTEX_ALIGN sizeof(int)
+#endif
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) (msem_init(x, MSEM_UNLOCKED) <= (msemaphore *)0)
+#define MUTEX_SET(x) (!msem_lock(x, MSEM_IF_NOWAIT))
+#define MUTEX_UNSET(x) msem_unlock(x, 0)
+#endif
+#endif
+
+/*********************************************************************
+ * Plan 9 library functions.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_PLAN9
+typedef Lock tsl_t;
+
+#define MUTEX_ALIGN sizeof(int)
+
+#define MUTEX_INIT(x) (memset(x, 0, sizeof(Lock)), 0)
+#define MUTEX_SET(x) canlock(x)
+#define MUTEX_UNSET(x) unlock(x)
+#endif
+
+/*********************************************************************
+ * Reliant UNIX C library functions.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_RELIANTUNIX_INITSPIN
+#include <ulocks.h>
+typedef spinlock_t tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) (initspin(x, 1), 0)
+#define MUTEX_SET(x) (cspinlock(x) == 0)
+#define MUTEX_UNSET(x) spinunlock(x)
+#endif
+#endif
+
+/*********************************************************************
+ * General C library functions (POSIX 1003.1 sema_XXX).
+ *
+ * !!!
+ * Never selected by autoconfig in this release (semaphore calls are known
+ * to not work in Solaris 5.5).
+ *********************************************************************/
+#ifdef HAVE_MUTEX_SEMA_INIT
+#include <synch.h>
+typedef sema_t tsl_t;
+#define MUTEX_ALIGN sizeof(int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_DESTROY(x) sema_destroy(x)
+#define MUTEX_INIT(x) (sema_init(x, 1, USYNC_PROCESS, NULL) != 0)
+#define MUTEX_SET(x) (sema_wait(x) == 0)
+#define MUTEX_UNSET(x) sema_post(x)
+#endif
+#endif
+
+/*********************************************************************
+ * SGI C library functions.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_SGI_INIT_LOCK
+#include <abi_mutex.h>
+typedef abilock_t tsl_t;
+#define MUTEX_ALIGN sizeof(int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) (init_lock(x) != 0)
+#define MUTEX_SET(x) (!acquire_lock(x))
+#define MUTEX_UNSET(x) release_lock(x)
+#endif
+#endif
+
+/*********************************************************************
+ * Solaris C library functions.
+ *
+ * !!!
+ * These are undocumented functions, but they're the only ones that work
+ * correctly as far as we know.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_SOLARIS_LOCK_TRY
+#include <sys/machlock.h>
+typedef lock_t tsl_t;
+#define MUTEX_ALIGN sizeof(int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) 0
+#define MUTEX_SET(x) _lock_try(x)
+#define MUTEX_UNSET(x) _lock_clear(x)
+#endif
+#endif
+
+/*********************************************************************
+ * VMS.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_VMS
+#include <sys/mman.h>;
+#include <builtins.h>
+typedef unsigned char tsl_t;
+#define MUTEX_ALIGN sizeof(unsigned int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#ifdef __ALPHA
+#define MUTEX_SET(tsl) (!__TESTBITSSI(tsl, 0))
+#else /* __VAX */
+#define MUTEX_SET(tsl) (!(int)_BBSSI(0, tsl))
+#endif
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * VxWorks
+ * Use basic binary semaphores in VxWorks, as we currently do not need
+ * any special features. We do need the ability to single-thread the
+ * entire system, however, because VxWorks doesn't support the open(2)
+ * flag O_EXCL, the mechanism we normally use to single thread access
+ * when we're first looking for a DB environment.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_VXWORKS
+#include "taskLib.h"
+typedef SEM_ID tsl_t;
+#define MUTEX_ALIGN sizeof(unsigned int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_SET(tsl) (semTake((*tsl), WAIT_FOREVER) == OK)
+#define MUTEX_UNSET(tsl) (semGive((*tsl)))
+#define MUTEX_INIT(tsl) \
+ ((*(tsl) = semBCreate(SEM_Q_FIFO, SEM_FULL)) == NULL)
+#define MUTEX_DESTROY(tsl) semDelete(*tsl)
+#endif
+
+/*
+ * Use the taskLock() mutex to eliminate a race where two tasks are
+ * trying to initialize the global lock at the same time.
+ */
+#undef DB_BEGIN_SINGLE_THREAD
+#define DB_BEGIN_SINGLE_THREAD \
+do { \
+ if (DB_GLOBAL(db_global_init)) \
+ (void)semTake(DB_GLOBAL(db_global_lock), WAIT_FOREVER); \
+ else { \
+ taskLock(); \
+ if (DB_GLOBAL(db_global_init)) { \
+ taskUnlock(); \
+ (void)semTake(DB_GLOBAL(db_global_lock), \
+ WAIT_FOREVER); \
+ continue; \
+ } \
+ DB_GLOBAL(db_global_lock) = \
+ semBCreate(SEM_Q_FIFO, SEM_EMPTY); \
+ if (DB_GLOBAL(db_global_lock) != NULL) \
+ DB_GLOBAL(db_global_init) = 1; \
+ taskUnlock(); \
+ } \
+} while (DB_GLOBAL(db_global_init) == 0)
+#undef DB_END_SINGLE_THREAD
+#define DB_END_SINGLE_THREAD (void)semGive(DB_GLOBAL(db_global_lock))
+#endif
+
+/*********************************************************************
+ * Win16
+ *
+ * Win16 spinlocks are simple because we cannot possibly be preempted.
+ *
+ * !!!
+ * We should simplify this by always returning a no-need-to-lock lock
+ * when we initialize the mutex.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_WIN16
+typedef unsigned int tsl_t;
+#define MUTEX_ALIGN sizeof(unsigned int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) 0
+#define MUTEX_SET(tsl) (*(tsl) = 1)
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#endif
+#endif
+
+/*********************************************************************
+ * Win32
+ *********************************************************************/
+#ifdef HAVE_MUTEX_WIN32
+#define MUTEX_FIELDS \
+ LONG tas; \
+ LONG nwaiters; \
+ u_int32_t id; /* ID used for creating events */ \
+
+#if defined(LOAD_ACTUAL_MUTEX_CODE)
+#define MUTEX_SET(tsl) (!InterlockedExchange((PLONG)tsl, 1))
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * 68K/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_68K_GCC_ASSEMBLY
+typedef unsigned char tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * For gcc/68K, 0 is clear, 1 is set.
+ */
+#define MUTEX_SET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ int __r; \
+ asm volatile("tas %1; \n \
+ seq %0" \
+ : "=dm" (__r), "=m" (*__l) \
+ : "1" (*__l) \
+ ); \
+ __r & 1; \
+})
+
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * ALPHA/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY
+typedef u_int32_t tsl_t;
+#define MUTEX_ALIGN 4
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * For gcc/alpha. Should return 0 if could not acquire the lock, 1 if
+ * lock was acquired properly.
+ */
+#ifdef __GNUC__
+static inline int
+MUTEX_SET(tsl_t *tsl) {
+ register tsl_t *__l = tsl;
+ register tsl_t __r;
+ asm volatile(
+ "1: ldl_l %0,%2\n"
+ " blbs %0,2f\n"
+ " or $31,1,%0\n"
+ " stl_c %0,%1\n"
+ " beq %0,3f\n"
+ " mb\n"
+ " br 3f\n"
+ "2: xor %0,%0\n"
+ "3:"
+ : "=&r"(__r), "=m"(*__l) : "1"(*__l) : "memory");
+ return __r;
+}
+
+/*
+ * Unset mutex. Judging by Alpha Architecture Handbook, the mb instruction
+ * might be necessary before unlocking
+ */
+static inline int
+MUTEX_UNSET(tsl_t *tsl) {
+ asm volatile(" mb\n");
+ return *tsl = 0;
+}
+#endif
+
+#ifdef __DECC
+#include <alpha/builtins.h>
+#define MUTEX_SET(tsl) (__LOCK_LONG_RETRY((tsl), 1) != 0)
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#endif
+
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * ARM/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_ARM_GCC_ASSEMBLY
+typedef unsigned char tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * For arm/gcc, 0 is clear, 1 is set.
+ */
+#define MUTEX_SET(tsl) ({ \
+ int __r; \
+ asm volatile("swpb %0, %1, [%2]" \
+ : "=r" (__r) \
+ : "0" (1), "r" (tsl) \
+ : "memory" \
+ ); \
+ __r & 1; \
+})
+
+#define MUTEX_UNSET(tsl) (*(volatile tsl_t *)(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * HPPA/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_HPPA_GCC_ASSEMBLY
+typedef u_int32_t tsl_t;
+#define MUTEX_ALIGN 16
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * The PA-RISC has a "load and clear" instead of a "test and set" instruction.
+ * The 32-bit word used by that instruction must be 16-byte aligned. We could
+ * use the "aligned" attribute in GCC but that doesn't work for stack variables.
+ */
+#define MUTEX_SET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ int __r; \
+ asm volatile("ldcws 0(%1),%0" : "=r" (__r) : "r" (__l)); \
+ __r & 1; \
+})
+
+#define MUTEX_UNSET(tsl) (*(tsl) = -1)
+#define MUTEX_INIT(tsl) (MUTEX_UNSET(tsl), 0)
+#endif
+#endif
+
+/*********************************************************************
+ * IA64/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_IA64_GCC_ASSEMBLY
+typedef unsigned char tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * For gcc/ia64, 0 is clear, 1 is set.
+ */
+#define MUTEX_SET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ long __r; \
+ asm volatile("xchg1 %0=%1,%3" : "=r"(__r), "=m"(*__l) : "1"(*__l), "r"(1));\
+ __r ^ 1; \
+})
+
+/*
+ * Store through a "volatile" pointer so we get a store with "release"
+ * semantics.
+ */
+#define MUTEX_UNSET(tsl) (*(volatile unsigned char *)(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * PowerPC/gcc assembly.
+ *********************************************************************/
+#if defined(HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY) || \
+ (HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY)
+typedef u_int32_t tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * The PowerPC does a sort of pseudo-atomic locking. You set up a
+ * 'reservation' on a chunk of memory containing a mutex by loading the
+ * mutex value with LWARX. If the mutex has an 'unlocked' (arbitrary)
+ * value, you then try storing into it with STWCX. If no other process or
+ * thread broke your 'reservation' by modifying the memory containing the
+ * mutex, then the STCWX succeeds; otherwise it fails and you try to get
+ * a reservation again.
+ *
+ * While mutexes are explicitly 4 bytes, a 'reservation' applies to an
+ * entire cache line, normally 32 bytes, aligned naturally. If the mutex
+ * lives near data that gets changed a lot, there's a chance that you'll
+ * see more broken reservations than you might otherwise. The only
+ * situation in which this might be a problem is if one processor is
+ * beating on a variable in the same cache block as the mutex while another
+ * processor tries to acquire the mutex. That's bad news regardless
+ * because of the way it bashes caches, but if you can't guarantee that a
+ * mutex will reside in a relatively quiescent cache line, you might
+ * consider padding the mutex to force it to live in a cache line by
+ * itself. No, you aren't guaranteed that cache lines are 32 bytes. Some
+ * embedded processors use 16-byte cache lines, while some 64-bit
+ * processors use 128-bit cache lines. But assuming a 32-byte cache line
+ * won't get you into trouble for now.
+ *
+ * If mutex locking is a bottleneck, then you can speed it up by adding a
+ * regular LWZ load before the LWARX load, so that you can test for the
+ * common case of a locked mutex without wasting cycles making a reservation.
+ *
+ * 'set' mutexes have the value 1, like on Intel; the returned value from
+ * MUTEX_SET() is 1 if the mutex previously had its low bit clear, 0 otherwise.
+ *
+ * Mutexes on Mac OS X work the same way as the standard PowerPC version, but
+ * the assembler syntax is subtly different -- the standard PowerPC version
+ * assembles but doesn't work correctly. This version makes (unnecessary?)
+ * use of a stupid linker trick: __db_mutex_tas_dummy is never called, but the
+ * ___db_mutex_set label is used as a function name.
+ */
+#ifdef HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY
+extern int __db_mutex_set __P((volatile tsl_t *));
+void
+__db_mutex_tas_dummy()
+{
+ __asm__ __volatile__(" \n\
+ .globl ___db_mutex_set \n\
+___db_mutex_set: \n\
+ lwarx r5,0,r3 \n\
+ cmpwi r5,0 \n\
+ bne fail \n\
+ addi r5,r5,1 \n\
+ stwcx. r5,0,r3 \n\
+ beq success \n\
+fail: \n\
+ li r3,0 \n\
+ blr \n\
+success: \n\
+ li r3,1 \n\
+ blr");
+}
+#define MUTEX_SET(tsl) __db_mutex_set(tsl)
+#endif
+#ifdef HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY
+#define MUTEX_SET(tsl) ({ \
+ int __one = 1; \
+ int __r; \
+ tsl_t *__l = (tsl); \
+ asm volatile (" \
+0: \
+ lwarx %0,0,%1; \
+ cmpwi %0,0; \
+ bne 1f; \
+ stwcx. %2,0,%1; \
+ bne- 0b; \
+ isync; \
+1:" \
+ : "=&r" (__r) \
+ : "r" (__l), "r" (__one)); \
+ !(__r & 1); \
+})
+#endif
+#define MUTEX_UNSET(tsl) ({ \
+ asm volatile("lwsync":::"memory"); \
+ (*(tsl) = 0); \
+ })
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * S/390 32-bit assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_S390_GCC_ASSEMBLY
+typedef int tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * For gcc/S390, 0 is clear, 1 is set.
+ */
+static inline int
+MUTEX_SET(tsl_t *tsl) { \
+ register tsl_t *__l = (tsl); \
+ int __r; \
+ asm volatile( \
+ " la 1,%1\n" \
+ " lhi 0,1\n" \
+ " l %0,%1\n" \
+ "0: cs %0,0,0(1)\n" \
+ " jl 0b" \
+ : "=&d" (__r), "+m" (*__l) \
+ : : "0", "1", "cc"); \
+ return !__r; \
+}
+
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * SCO/cc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY
+typedef unsigned char tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * UnixWare has threads in libthread, but OpenServer doesn't (yet).
+ *
+ * For cc/x86, 0 is clear, 1 is set.
+ */
+
+#if defined(__USLC__)
+asm int
+_tsl_set(void *tsl)
+{
+%mem tsl
+ movl tsl, %ecx
+ movl $1, %eax
+ lock
+ xchgb (%ecx),%al
+ xorl $1,%eax
+}
+#endif
+
+#define MUTEX_SET(tsl) _tsl_set(tsl)
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * Sparc/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_SPARC_GCC_ASSEMBLY
+typedef unsigned char tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ *
+ * The ldstub instruction takes the location specified by its first argument
+ * (a register containing a memory address) and loads its contents into its
+ * second argument (a register) and atomically sets the contents the location
+ * specified by its first argument to a byte of 1s. (The value in the second
+ * argument is never read, but only overwritten.)
+ *
+ * The stbar is needed for v8, and is implemented as membar #sync on v9,
+ * so is functional there as well. For v7, stbar may generate an illegal
+ * instruction and we have no way to tell what we're running on. Some
+ * operating systems notice and skip this instruction in the fault handler.
+ *
+ * For gcc/sparc, 0 is clear, 1 is set.
+ */
+#define MUTEX_SET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ register tsl_t __r; \
+ __asm__ volatile \
+ ("ldstub [%1],%0; stbar" \
+ : "=r"( __r) : "r" (__l)); \
+ !__r; \
+})
+
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * UTS/cc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_UTS_CC_ASSEMBLY
+typedef int tsl_t;
+
+#define MUTEX_ALIGN sizeof(int)
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) 0
+#define MUTEX_SET(x) (!uts_lock(x, 1))
+#define MUTEX_UNSET(x) (*(x) = 0)
+#endif
+#endif
+
+/*********************************************************************
+ * MIPS/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_MIPS_GCC_ASSEMBLY
+typedef unsigned int tsl_t;
+#define MUTEX_ALIGN sizeof(unsigned int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * For gcc/MIPS;
+ */
+#define MUTEX_SET(tsl) ({ \
+ tsl_t tmp, res; \
+ register tsl_t *__l = (tsl); \
+ __asm__ __volatile__( \
+ ".set\tnoreorder\t\t# test_and_set_bit\n" \
+ "1:\tll\t%0, %1\n\t" \
+ "ori\t%2, %0, 1\n\t" \
+ "sc\t%2, %1\n\t" \
+ "beqz\t%2, 1b\n\t" \
+ " andi\t%2, %0, 1\n\t" \
+ "sync\n\t" \
+ ".set\treorder" \
+ : "=&r" (tmp), "=m" (*__l), "=&r" (res) \
+ : "m" (*__l) \
+ : "memory"); \
+ (res ^ 1) & 1; \
+})
+
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * x86/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_X86_GCC_ASSEMBLY
+typedef unsigned char tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * For gcc/x86, 0 is clear, 1 is set.
+ */
+#define MUTEX_SET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ int __r; \
+ asm volatile("movl $1,%%eax; lock; xchgb %1,%%al; xorl $1,%%eax"\
+ : "=&a" (__r), "=m" (*__l) \
+ : "1" (*__l) \
+ ); \
+ __r & 1; \
+})
+
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * x86_64/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_X86_64_GCC_ASSEMBLY
+typedef unsigned char tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * For gcc/x86-64, 0 is clear, 1 is set.
+ */
+#define MUTEX_SET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ int __r; \
+ asm volatile("movq $1,%%rax; lock; xchgb %1,%%al; xorq $1,%%rax"\
+ : "=&a" (__r), "=m" (*__l) \
+ : "1" (*__l) \
+ ); \
+ __r & 1; \
+})
+
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * alphalinux/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_ALPHA_LINUX_ASSEMBLY
+typedef unsigned long int tsl_t;
+
+#define MUTEX_ALIGN 8
+#endif
+
+/*********************************************************************
+ * sparc32linux/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_SPARC32_LINUX_ASSEMBLY
+typedef unsigned char tsl_t;
+#endif
+
+/*********************************************************************
+ * sparc64linux/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_SPARC64_LINUX_ASSEMBLY
+typedef unsigned char tsl_t;
+#endif
+
+/*********************************************************************
+ * s390linux/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_S390_LINUX_ASSEMBLY
+typedef volatile int tsl_t;
+#endif
+
+/*
+ * Mutex alignment defaults to one byte.
+ *
+ * !!!
+ * Various systems require different alignments for mutexes (the worst we've
+ * seen so far is 16-bytes on some HP architectures). Malloc(3) is assumed
+ * to return reasonable alignment, all other mutex users must ensure proper
+ * alignment locally.
+ */
+#ifndef MUTEX_ALIGN
+#define MUTEX_ALIGN 1
+#endif
+
+/*
+ * Mutex destruction defaults to a no-op.
+ */
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#ifndef MUTEX_DESTROY
+#define MUTEX_DESTROY(x)
+#endif
+#endif
+
+/*
+ * !!!
+ * These defines are separated into the u_int8_t flags stored in the
+ * mutex below, and the 32 bit flags passed to __db_mutex_setup.
+ * But they must co-exist and not overlap. Flags to __db_mutex_setup are:
+ *
+ * MUTEX_ALLOC - Use when the mutex to initialize needs to be allocated.
+ * The 'ptr' arg to __db_mutex_setup should be a DB_MUTEX ** whenever
+ * you use this flag. If this flag is not set, the 'ptr' arg is
+ * a DB_MUTEX *.
+ * MUTEX_NO_RECORD - Explicitly do not record the mutex in the region.
+ * Otherwise the mutex will be recorded by default. If you set
+ * this you need to understand why you don't need it recorded. The
+ * *only* ones not recorded are those that are part of region structures
+ * that only get destroyed when the regions are destroyed.
+ * MUTEX_NO_RLOCK - Explicitly do not lock the given region otherwise
+ * the region will be locked by default.
+ * MUTEX_SELF_BLOCK - Set if self blocking mutex.
+ * MUTEX_THREAD - Set if mutex is a thread-only mutex.
+ */
+#define MUTEX_IGNORE 0x001 /* Ignore, no lock required. */
+#define MUTEX_INITED 0x002 /* Mutex is successfully initialized */
+#define MUTEX_MPOOL 0x004 /* Allocated from mpool. */
+#define MUTEX_SELF_BLOCK 0x008 /* Must block self. */
+/* Flags only, may be larger than 0xff. */
+#define MUTEX_ALLOC 0x00000100 /* Allocate and init a mutex */
+#define MUTEX_NO_RECORD 0x00000200 /* Do not record lock */
+#define MUTEX_NO_RLOCK 0x00000400 /* Do not acquire region lock */
+#define MUTEX_THREAD 0x00000800 /* Thread-only mutex. */
+
+/* Mutex. */
+struct __mutex_t {
+#ifdef HAVE_MUTEX_THREADS
+#ifdef MUTEX_FIELDS
+ MUTEX_FIELDS
+#else
+ tsl_t tas; /* Test and set. */
+#endif
+ u_int32_t spins; /* Spins before block. */
+ u_int32_t locked; /* !0 if locked. */
+#else
+ u_int32_t off; /* Byte offset to lock. */
+ u_int32_t pid; /* Lock holder: 0 or process pid. */
+#endif
+ u_int32_t mutex_set_wait; /* Granted after wait. */
+ u_int32_t mutex_set_nowait; /* Granted without waiting. */
+ u_int32_t mutex_set_spin; /* Granted without spinning. */
+ u_int32_t mutex_set_spins; /* Total number of spins. */
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ roff_t reg_off; /* Shared lock info offset. */
+#endif
+
+ u_int8_t flags; /* MUTEX_XXX */
+};
+
+/* Redirect calls to the correct functions. */
+#ifdef HAVE_MUTEX_THREADS
+#if defined(HAVE_MUTEX_PTHREADS) || \
+ defined(HAVE_MUTEX_SOLARIS_LWP) || \
+ defined(HAVE_MUTEX_UI_THREADS)
+#define __db_mutex_init_int(a, b, c, d) __db_pthread_mutex_init(a, b, d)
+#define __db_mutex_lock(a, b) __db_pthread_mutex_lock(a, b)
+#define __db_mutex_unlock(a, b) __db_pthread_mutex_unlock(a, b)
+#define __db_mutex_destroy(a) __db_pthread_mutex_destroy(a)
+#elif defined(HAVE_MUTEX_WIN32)
+#define __db_mutex_init_int(a, b, c, d) __db_win32_mutex_init(a, b, d)
+#define __db_mutex_lock(a, b) __db_win32_mutex_lock(a, b)
+#define __db_mutex_unlock(a, b) __db_win32_mutex_unlock(a, b)
+#define __db_mutex_destroy(a) __db_win32_mutex_destroy(a)
+#else
+#define __db_mutex_init_int(a, b, c, d) __db_tas_mutex_init(a, b, d)
+#define __db_mutex_lock(a, b) __db_tas_mutex_lock(a, b)
+#define __db_mutex_unlock(a, b) __db_tas_mutex_unlock(a, b)
+#define __db_mutex_destroy(a) __db_tas_mutex_destroy(a)
+#endif
+#else
+#define __db_mutex_init_int(a, b, c, d) __db_fcntl_mutex_init(a, b, c)
+#define __db_mutex_lock(a, b) __db_fcntl_mutex_lock(a, b)
+#define __db_mutex_unlock(a, b) __db_fcntl_mutex_unlock(a, b)
+#define __db_mutex_destroy(a) __db_fcntl_mutex_destroy(a)
+#endif
+
+/* Redirect system resource calls to correct functions */
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+#define __db_maintinit(a, b, c) __db_shreg_maintinit(a, b, c)
+#define __db_shlocks_clear(a, b, c) __db_shreg_locks_clear(a, b, c)
+#define __db_shlocks_destroy(a, b) __db_shreg_locks_destroy(a, b)
+#define __db_mutex_init(a, b, c, d, e, f) \
+ __db_shreg_mutex_init(a, b, c, d, e, f)
+#else
+#define __db_maintinit(a, b, c)
+#define __db_shlocks_clear(a, b, c)
+#define __db_shlocks_destroy(a, b)
+#define __db_mutex_init(a, b, c, d, e, f) __db_mutex_init_int(a, b, c, d)
+#endif
+
+/*
+ * Lock/unlock a mutex. If the mutex was marked as uninteresting, the thread
+ * of control can proceed without it.
+ *
+ * If the lock is for threads-only, then it was optionally not allocated and
+ * file handles aren't necessary, as threaded applications aren't supported by
+ * fcntl(2) locking.
+ */
+#ifdef DIAGNOSTIC
+ /*
+ * XXX
+ * We want to switch threads as often as possible. Yield every time
+ * we get a mutex to ensure contention.
+ */
+#define MUTEX_LOCK(dbenv, mp) \
+ if (!F_ISSET((mp), MUTEX_IGNORE)) \
+ DB_ASSERT(__db_mutex_lock(dbenv, mp) == 0); \
+ if (F_ISSET(dbenv, DB_ENV_YIELDCPU)) \
+ __os_yield(NULL, 1);
+#else
+#define MUTEX_LOCK(dbenv, mp) \
+ if (!F_ISSET((mp), MUTEX_IGNORE)) \
+ (void)__db_mutex_lock(dbenv, mp);
+#endif
+#define MUTEX_UNLOCK(dbenv, mp) \
+ if (!F_ISSET((mp), MUTEX_IGNORE)) \
+ (void)__db_mutex_unlock(dbenv, mp);
+#define MUTEX_THREAD_LOCK(dbenv, mp) \
+ if (mp != NULL) \
+ MUTEX_LOCK(dbenv, mp)
+#define MUTEX_THREAD_UNLOCK(dbenv, mp) \
+ if (mp != NULL) \
+ MUTEX_UNLOCK(dbenv, mp)
+
+/*
+ * We use a single file descriptor for fcntl(2) locking, and (generally) the
+ * object's offset in a shared region as the byte that we're locking. So,
+ * there's a (remote) possibility that two objects might have the same offsets
+ * such that the locks could conflict, resulting in deadlock. To avoid this
+ * possibility, we offset the region offset by a small integer value, using a
+ * different offset for each subsystem's locks. Since all region objects are
+ * suitably aligned, the offset guarantees that we don't collide with another
+ * region's objects.
+ */
+#define DB_FCNTL_OFF_GEN 0 /* Everything else. */
+#define DB_FCNTL_OFF_LOCK 1 /* Lock subsystem offset. */
+#define DB_FCNTL_OFF_MPOOL 2 /* Mpool subsystem offset. */
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+/*
+ * When the underlying mutexes require library (most likely heap) or system
+ * resources, we have to clean up when we discard mutexes (for the library
+ * resources) and both when discarding mutexes and after application failure
+ * (for the mutexes requiring system resources). This violates the rule that
+ * we never look at a shared region after application failure, but we've no
+ * other choice. In those cases, the #define HAVE_MUTEX_SYSTEM_RESOURCES is
+ * set.
+ *
+ * To support mutex release after application failure, allocate thread-handle
+ * mutexes in shared memory instead of in the heap. The number of slots we
+ * allocate for this purpose isn't configurable, but this tends to be an issue
+ * only on embedded systems where we don't expect large server applications.
+ */
+#define DB_MAX_HANDLES 100 /* Mutex slots for handles. */
+#endif
+#endif /* !_DB_MUTEX_H_ */
diff --git a/libdb/dbinc/os.h b/libdb/dbinc/os.h
new file mode 100644
index 0000000..7bfd10b
--- /dev/null
+++ b/libdb/dbinc/os.h
@@ -0,0 +1,59 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_OS_H_
+#define _DB_OS_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* DB filehandle. */
+struct __fh_t {
+#if defined(DB_WIN32)
+ HANDLE handle; /* Windows/32 file handle. */
+#endif
+ int fd; /* POSIX file descriptor. */
+ char *name; /* File name. */
+
+ u_int32_t pagesize; /* Underlying page size. */
+
+ u_int32_t log_size; /* XXX: Log file size. */
+
+ u_int32_t pgno; /* Last seek. */
+ u_int32_t pgsize;
+ u_int32_t offset;
+
+#define DB_FH_NOSYNC 0x01 /* Handle doesn't need to be sync'd. */
+#define DB_FH_UNLINK 0x02 /* Unlink on close */
+#define DB_FH_VALID 0x04 /* Handle is valid. */
+ u_int8_t flags;
+};
+
+/*
+ * We group certain seek/write calls into a single function so that we
+ * can use pread(2)/pwrite(2) where they're available.
+ */
+#define DB_IO_READ 1
+#define DB_IO_WRITE 2
+typedef struct __io_t {
+ DB_FH *fhp; /* I/O file handle. */
+ DB_MUTEX *mutexp; /* Mutex to lock. */
+ size_t pagesize; /* Page size. */
+ db_pgno_t pgno; /* Page number. */
+ u_int8_t *buf; /* Buffer. */
+ size_t bytes; /* Bytes read/written. */
+} DB_IO;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#include "dbinc_auto/os_ext.h"
+#endif /* !_DB_OS_H_ */
diff --git a/libdb/dbinc/qam.h b/libdb/dbinc/qam.h
new file mode 100644
index 0000000..a16fffc
--- /dev/null
+++ b/libdb/dbinc/qam.h
@@ -0,0 +1,156 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_QAM_H_
+#define _DB_QAM_H_
+
+/*
+ * QAM data elements: a status field and the data.
+ */
+typedef struct _qamdata {
+ u_int8_t flags; /* 00: delete bit. */
+#define QAM_VALID 0x01
+#define QAM_SET 0x02
+ u_int8_t data[1]; /* Record. */
+} QAMDATA;
+
+struct __queue; typedef struct __queue QUEUE;
+struct __qcursor; typedef struct __qcursor QUEUE_CURSOR;
+
+struct __qcursor {
+ /* struct __dbc_internal */
+ __DBC_INTERNAL
+
+ /* Queue private part */
+
+ /* Per-thread information: queue private. */
+ db_recno_t recno; /* Current record number. */
+
+ u_int32_t flags;
+};
+
+typedef struct __mpfarray {
+ u_int32_t n_extent; /* Number of extents in table. */
+ u_int32_t low_extent; /* First extent open. */
+ u_int32_t hi_extent; /* Last extent open. */
+ struct __qmpf {
+ int pinref;
+ DB_MPOOLFILE *mpf;
+ } *mpfarray; /* Array of open extents. */
+} MPFARRAY;
+
+/*
+ * The in-memory, per-tree queue data structure.
+ */
+struct __queue {
+ db_pgno_t q_meta; /* Database meta-data page. */
+ db_pgno_t q_root; /* Database root page. */
+
+ int re_pad; /* Fixed-length padding byte. */
+ u_int32_t re_len; /* Length for fixed-length records. */
+ u_int32_t rec_page; /* records per page */
+ u_int32_t page_ext; /* Pages per extent */
+ MPFARRAY array1, array2; /* File arrays. */
+
+ /* Extent file configuration: */
+ DBT pgcookie; /* Initialized pgcookie. */
+ DB_PGINFO pginfo; /* Initialized pginfo struct. */
+
+ char *path; /* Space allocated to file pathname. */
+ char *name; /* The name of the file. */
+ char *dir; /* The dir of the file. */
+ int mode; /* Mode to open extents. */
+};
+
+/* Format for queue extent names. */
+#define QUEUE_EXTENT "%s%c__dbq.%s.%d"
+
+typedef struct __qam_filelist {
+ DB_MPOOLFILE *mpf;
+ u_int32_t id;
+} QUEUE_FILELIST;
+
+/*
+ * Caculate the page number of a recno
+ *
+ * Number of records per page =
+ * Divide the available space on the page by the record len + header.
+ *
+ * Page number for record =
+ * divide the physical record number by the records per page
+ * add the root page number
+ * For now the root page will always be 1, but we might want to change
+ * in the future (e.g. multiple fixed len queues per file).
+ *
+ * Index of record on page =
+ * physical record number, less the logical pno times records/page
+ */
+#define CALC_QAM_RECNO_PER_PAGE(dbp) \
+ (((dbp)->pgsize - QPAGE_SZ(dbp)) / \
+ ALIGN(((QUEUE *)(dbp)->q_internal)->re_len + \
+ sizeof(QAMDATA) - SSZA(QAMDATA, data), sizeof(u_int32_t)))
+
+#define QAM_RECNO_PER_PAGE(dbp) (((QUEUE*)(dbp)->q_internal)->rec_page)
+
+#define QAM_RECNO_PAGE(dbp, recno) \
+ (((QUEUE *)(dbp)->q_internal)->q_root \
+ + (((recno) - 1) / QAM_RECNO_PER_PAGE(dbp)))
+
+#define QAM_RECNO_INDEX(dbp, pgno, recno) \
+ (((recno) - 1) - (QAM_RECNO_PER_PAGE(dbp) \
+ * (pgno - ((QUEUE *)(dbp)->q_internal)->q_root)))
+
+#define QAM_GET_RECORD(dbp, page, index) \
+ ((QAMDATA *)((u_int8_t *)(page) + \
+ QPAGE_SZ(dbp) + (ALIGN(sizeof(QAMDATA) - SSZA(QAMDATA, data) + \
+ ((QUEUE *)(dbp)->q_internal)->re_len, sizeof(u_int32_t)) * index)))
+
+#define QAM_AFTER_CURRENT(meta, recno) \
+ ((recno) > (meta)->cur_recno && \
+ ((meta)->first_recno <= (meta)->cur_recno || (recno) < (meta)->first_recno))
+
+#define QAM_BEFORE_FIRST(meta, recno) \
+ ((recno) < (meta)->first_recno && \
+ ((meta->first_recno <= (meta)->cur_recno || (recno) > (meta)->cur_recno)))
+
+#define QAM_NOT_VALID(meta, recno) \
+ (recno == RECNO_OOB || \
+ QAM_BEFORE_FIRST(meta, recno) || QAM_AFTER_CURRENT(meta, recno))
+
+/*
+ * Log opcodes for the mvptr routine.
+ */
+#define QAM_SETFIRST 0x01
+#define QAM_SETCUR 0x02
+#define QAM_TRUNCATE 0x04
+
+/*
+ * Parameter to __qam_position.
+ */
+typedef enum {
+ QAM_READ,
+ QAM_WRITE,
+ QAM_CONSUME
+} qam_position_mode;
+
+typedef enum {
+ QAM_PROBE_GET,
+ QAM_PROBE_PUT,
+ QAM_PROBE_MPF
+} qam_probe_mode;
+
+#define __qam_fget(dbp, pgnoaddr, flags, addrp) \
+ __qam_fprobe(dbp, *pgnoaddr, addrp, QAM_PROBE_GET, flags)
+
+#define __qam_fput(dbp, pageno, addrp, flags) \
+ __qam_fprobe(dbp, pageno, addrp, QAM_PROBE_PUT, flags)
+
+#include "dbinc_auto/qam_auto.h"
+#include "dbinc_auto/qam_ext.h"
+#endif /* !_DB_QAM_H_ */
diff --git a/libdb/dbinc/queue.h b/libdb/dbinc/queue.h
new file mode 100644
index 0000000..8d4a771
--- /dev/null
+++ b/libdb/dbinc/queue.h
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ */
+
+/*
+ * XXX
+ * We #undef the queue macros because there are incompatible versions of this
+ * file and these macros on various systems. What makes the problem worse is
+ * they are included and/or defined by system include files which we may have
+ * already loaded into Berkeley DB before getting here. For example, FreeBSD's
+ * <rpc/rpc.h> includes its system <sys/queue.h>, and VxWorks UnixLib.h defines
+ * several of the LIST_XXX macros. Make sure we use ours.
+ */
+#undef LIST_HEAD
+#undef LIST_ENTRY
+#undef LIST_FIRST
+#undef LIST_NEXT
+#undef LIST_INIT
+#undef LIST_INSERT_AFTER
+#undef LIST_INSERT_BEFORE
+#undef LIST_INSERT_HEAD
+#undef LIST_REMOVE
+#undef TAILQ_HEAD
+#undef TAILQ_ENTRY
+#undef TAILQ_FIRST
+#undef TAILQ_NEXT
+#undef TAILQ_INIT
+#undef TAILQ_INSERT_HEAD
+#undef TAILQ_INSERT_TAIL
+#undef TAILQ_INSERT_AFTER
+#undef TAILQ_INSERT_BEFORE
+#undef TAILQ_REMOVE
+#undef CIRCLEQ_HEAD
+#undef CIRCLEQ_ENTRY
+#undef CIRCLEQ_FIRST
+#undef CIRCLEQ_LAST
+#undef CIRCLEQ_NEXT
+#undef CIRCLEQ_PREV
+#undef CIRCLEQ_INIT
+#undef CIRCLEQ_INSERT_AFTER
+#undef CIRCLEQ_INSERT_BEFORE
+#undef CIRCLEQ_INSERT_HEAD
+#undef CIRCLEQ_INSERT_TAIL
+#undef CIRCLEQ_REMOVE
+
+/*
+ * This file defines three types of data structures: lists, tail queues,
+ * and circular queues.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may only be traversed in the forward direction.
+ *
+ * A circle queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the list.
+ * A circle queue may be traversed in either direction, but has a more
+ * complex end of list detection.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * List definitions.
+ */
+#define LIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define LIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+#define LIST_FIRST(head) ((head)->lh_first)
+#define LIST_NEXT(elm, field) ((elm)->field.le_next)
+
+/*
+ * List functions.
+ */
+#define LIST_INIT(head) { \
+ (head)->lh_first = NULL; \
+}
+
+#define LIST_INSERT_AFTER(listelm, elm, field) do { \
+ if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
+ (listelm)->field.le_next->field.le_prev = \
+ &(elm)->field.le_next; \
+ (listelm)->field.le_next = (elm); \
+ (elm)->field.le_prev = &(listelm)->field.le_next; \
+} while (0)
+
+#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ (elm)->field.le_next = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &(elm)->field.le_next; \
+} while (0)
+
+#define LIST_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.le_next = (head)->lh_first) != NULL) \
+ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+ (head)->lh_first = (elm); \
+ (elm)->field.le_prev = &(head)->lh_first; \
+} while (0)
+
+#define LIST_REMOVE(elm, field) do { \
+ if ((elm)->field.le_next != NULL) \
+ (elm)->field.le_next->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = (elm)->field.le_next; \
+} while (0)
+
+/*
+ * Tail queue definitions.
+ */
+#define TAILQ_HEAD(name, type) \
+struct name { \
+ struct type *tqh_first; /* first element */ \
+ struct type **tqh_last; /* addr of last next element */ \
+}
+
+#define TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+}
+
+#define TAILQ_FIRST(head) ((head)->tqh_first)
+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+
+/*
+ * Tail queue functions.
+ */
+#define TAILQ_INIT(head) do { \
+ (head)->tqh_first = NULL; \
+ (head)->tqh_last = &(head)->tqh_first; \
+} while (0)
+
+#define TAILQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
+ (head)->tqh_first->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (head)->tqh_first = (elm); \
+ (elm)->field.tqe_prev = &(head)->tqh_first; \
+} while (0)
+
+#define TAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.tqe_next = NULL; \
+ (elm)->field.tqe_prev = (head)->tqh_last; \
+ *(head)->tqh_last = (elm); \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
+ (elm)->field.tqe_next->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (listelm)->field.tqe_next = (elm); \
+ (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ (elm)->field.tqe_next = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_REMOVE(head, elm, field) do { \
+ if (((elm)->field.tqe_next) != NULL) \
+ (elm)->field.tqe_next->field.tqe_prev = \
+ (elm)->field.tqe_prev; \
+ else \
+ (head)->tqh_last = (elm)->field.tqe_prev; \
+ *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
+} while (0)
+
+/*
+ * This macro is used to fixup the queue after moving the head.
+ */
+#define TAILQ_REINSERT_HEAD(head, elm, field) do { \
+ DB_ASSERT((head)->tqh_first == (elm)); \
+ (elm)->field.tqe_prev = &(head)->tqh_first; \
+} while (0)
+
+/*
+ * Circular queue definitions.
+ */
+#define CIRCLEQ_HEAD(name, type) \
+struct name { \
+ struct type *cqh_first; /* first element */ \
+ struct type *cqh_last; /* last element */ \
+}
+
+#define CIRCLEQ_ENTRY(type) \
+struct { \
+ struct type *cqe_next; /* next element */ \
+ struct type *cqe_prev; /* previous element */ \
+}
+
+#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
+#define CIRCLEQ_LAST(head) ((head)->cqh_last)
+#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
+#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
+
+/*
+ * Circular queue functions.
+ */
+#define CIRCLEQ_INIT(head) do { \
+ (head)->cqh_first = (void *)(head); \
+ (head)->cqh_last = (void *)(head); \
+} while (0)
+
+#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ (elm)->field.cqe_next = (listelm)->field.cqe_next; \
+ (elm)->field.cqe_prev = (listelm); \
+ if ((listelm)->field.cqe_next == (void *)(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (listelm)->field.cqe_next->field.cqe_prev = (elm); \
+ (listelm)->field.cqe_next = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
+ (elm)->field.cqe_next = (listelm); \
+ (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
+ if ((listelm)->field.cqe_prev == (void *)(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (listelm)->field.cqe_prev->field.cqe_next = (elm); \
+ (listelm)->field.cqe_prev = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.cqe_next = (head)->cqh_first; \
+ (elm)->field.cqe_prev = (void *)(head); \
+ if ((head)->cqh_last == (void *)(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (head)->cqh_first->field.cqe_prev = (elm); \
+ (head)->cqh_first = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.cqe_next = (void *)(head); \
+ (elm)->field.cqe_prev = (head)->cqh_last; \
+ if ((head)->cqh_first == (void *)(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (head)->cqh_last->field.cqe_next = (elm); \
+ (head)->cqh_last = (elm); \
+} while (0)
+
+#define CIRCLEQ_REMOVE(head, elm, field) do { \
+ if ((elm)->field.cqe_next == (void *)(head)) \
+ (head)->cqh_last = (elm)->field.cqe_prev; \
+ else \
+ (elm)->field.cqe_next->field.cqe_prev = \
+ (elm)->field.cqe_prev; \
+ if ((elm)->field.cqe_prev == (void *)(head)) \
+ (head)->cqh_first = (elm)->field.cqe_next; \
+ else \
+ (elm)->field.cqe_prev->field.cqe_next = \
+ (elm)->field.cqe_next; \
+} while (0)
+
+#if defined(__cplusplus)
+}
+#endif
diff --git a/libdb/dbinc/region.h b/libdb/dbinc/region.h
new file mode 100644
index 0000000..1b76e2d
--- /dev/null
+++ b/libdb/dbinc/region.h
@@ -0,0 +1,304 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_REGION_H_
+#define _DB_REGION_H_
+
+/*
+ * The DB environment consists of some number of "regions", which are described
+ * by the following four structures:
+ *
+ * REGENV -- shared information about the environment
+ * REGENV_REF -- file describing system memory version of REGENV
+ * REGION -- shared information about a single region
+ * REGINFO -- per-process information about a REGION
+ *
+ * There are three types of memory that hold regions:
+ * per-process heap (malloc)
+ * file mapped into memory (mmap, MapViewOfFile)
+ * system memory (shmget, CreateFileMapping)
+ *
+ * If the regions are private to a process, they're in malloc. If they're
+ * public, they're in file mapped memory, or, optionally, in system memory.
+ * Regions in the filesystem are named "__db.001", "__db.002" and so on. If
+ * we're not using a private environment allocated using malloc(3), the file
+ * "__db.001" will always exist, as we use it to synchronize on the regions,
+ * whether they exist in file mapped memory or system memory.
+ *
+ * The file "__db.001" contains a REGENV structure and a linked list of some
+ * number of REGION structures. Each of the REGION structures describes and
+ * locks one of the underlying shared regions used by DB.
+ *
+ * __db.001
+ * +---------+
+ * |REGENV |
+ * +---------+ +----------+
+ * |REGION |-> | __db.002 |
+ * | | +----------+
+ * +---------+ +----------+
+ * |REGION |-> | __db.003 |
+ * | | +----------+
+ * +---------+ +----------+
+ * |REGION |-> | __db.004 |
+ * | | +----------+
+ * +---------+
+ *
+ * The only tricky part about manipulating the regions is correctly creating
+ * or joining the REGENV file, i.e., __db.001. We have to be absolutely sure
+ * that only one process creates it, and that everyone else joins it without
+ * seeing inconsistent data. Once that region is created, we can use normal
+ * shared locking procedures to do mutal exclusion for all other regions.
+ *
+ * One of the REGION structures in the main environment region describes the
+ * environment region itself.
+ *
+ * To lock a region, locate the REGION structure that describes it and acquire
+ * the region's mutex. There is one exception to this rule -- the lock for the
+ * environment region itself is in the REGENV structure, and not in the REGION
+ * that describes the environment region. That's so that we can acquire a lock
+ * without walking linked lists that could potentially change underneath us.
+ * The REGION will not be moved or removed during the life of the region, and
+ * so long-lived references to it can be held by the process.
+ *
+ * All requests to create or join a region return a REGINFO structure, which
+ * is held by the caller and used to open and subsequently close the reference
+ * to the region. The REGINFO structure contains the per-process information
+ * that we need to access the region.
+ *
+ * The one remaining complication. If the regions (including the environment
+ * region) live in system memory, and the system memory isn't "named" somehow
+ * in the filesystem name space, we need some way of finding it. Do this by
+ * by writing the REGENV_REF structure into the "__db.001" file. When we find
+ * a __db.001 file that is too small to be a real, on-disk environment, we use
+ * the information it contains to redirect to the real "__db.001" file/memory.
+ * This currently only happens when the REGENV file is in shared system memory.
+ *
+ * Although DB does not currently grow regions when they run out of memory, it
+ * would be possible to do so. To grow a region, allocate a new region of the
+ * appropriate size, then copy the old region over it and insert the additional
+ * space into the already existing shalloc arena. Callers may have to fix up
+ * local references, but that should be easy to do. This failed in historic
+ * versions of DB because the region lock lived in the mapped memory, and when
+ * it was unmapped and remapped (or copied), threads could lose track of it.
+ * Once we moved that lock into a region that is never unmapped, growing should
+ * work. That all said, current versions of DB don't implement region grow
+ * because some systems don't support mutex copying, e.g., from OSF1 V4.0:
+ *
+ * The address of an msemaphore structure may be significant. If the
+ * msemaphore structure contains any value copied from an msemaphore
+ * structure at a different address, the result is undefined.
+ */
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define DB_REGION_FMT "__db.%03d" /* Region file name format. */
+#define DB_REGION_NAME_NUM 5 /* First digit offset in file names. */
+#define DB_REGION_NAME_LENGTH 8 /* Length of file names. */
+
+#define DB_REGION_ENV "__db.001" /* Primary environment name. */
+
+#define INVALID_REGION_ID 0 /* Out-of-band region ID. */
+#define REGION_ID_ENV 1 /* Primary environment ID. */
+
+typedef enum {
+ INVALID_REGION_TYPE=0, /* Region type. */
+ REGION_TYPE_ENV,
+ REGION_TYPE_LOCK,
+ REGION_TYPE_LOG,
+ REGION_TYPE_MPOOL,
+ REGION_TYPE_MUTEX,
+ REGION_TYPE_TXN } reg_type;
+
+#define INVALID_REGION_SEGID -1 /* Segment IDs are either shmget(2) or
+ * Win16 segment identifiers. They are
+ * both stored in a "long", and we need
+ * an out-of-band value.
+ */
+/*
+ * Nothing can live at region offset 0, because, in all cases, that's where
+ * we store *something*. Lots of code needs an out-of-band value for region
+ * offsets, so we use 0.
+ */
+#define INVALID_ROFF 0
+
+/* Reference describing system memory version of REGENV. */
+typedef struct __db_reg_env_ref {
+ roff_t size; /* Region size. */
+ long segid; /* UNIX shmget ID, VxWorks ID. */
+} REGENV_REF;
+
+/* Per-environment region information. */
+typedef struct __db_reg_env {
+ /*
+ * !!!
+ * The mutex must be the first entry in the structure to guarantee
+ * correct alignment.
+ */
+ DB_MUTEX mutex; /* Environment mutex. */
+
+ /*
+ * !!!
+ * Note, the magic and panic fields are NOT protected by any mutex,
+ * and for this reason cannot be anything more complicated than a
+ * zero/non-zero value.
+ *
+ * !!!
+ * The valid region magic number must appear at the same byte offset
+ * in both the environment and each shared region, as Windows/95 uses
+ * it to determine if the memory has been zeroed since it was last used.
+ */
+ u_int32_t magic; /* Valid region magic number. */
+
+ int envpanic; /* Environment is dead. */
+
+ int majver; /* Major DB version number. */
+ int minver; /* Minor DB version number. */
+ int patch; /* Patch DB version number. */
+
+ u_int32_t init_flags; /* Flags the env was initialized with.*/
+ roff_t cipher_off; /* Offset of cipher area */
+
+ /* List of regions. */
+ SH_LIST_HEAD(__db_regionh) regionq;
+
+ u_int32_t refcnt; /* References to the environment. */
+
+ roff_t rep_off; /* Offset of the replication area. */
+
+ size_t pad; /* Guarantee that following memory is
+ * size_t aligned. This is necessary
+ * because we're going to store the
+ * allocation region information there.
+ */
+} REGENV;
+
+/* Per-region shared region information. */
+typedef struct __db_region {
+ /*
+ * !!!
+ * The mutex must be the first entry in the structure to guarantee
+ * correct alignment.
+ */
+ DB_MUTEX mutex; /* Region mutex. */
+
+ /*
+ * !!!
+ * The valid region magic number must appear at the same byte offset
+ * in both the environment and each shared region, as Windows/95 uses
+ * it to determine if the memory has been zeroed since it was last used.
+ */
+ u_int32_t magic;
+
+ SH_LIST_ENTRY q; /* Linked list of REGIONs. */
+
+ reg_type type; /* Region type. */
+ u_int32_t id; /* Region id. */
+
+ roff_t size; /* Region size in bytes. */
+
+ roff_t primary; /* Primary data structure offset. */
+
+ long segid; /* UNIX shmget(2), Win16 segment ID. */
+} REGION;
+
+/*
+ * Per-process/per-attachment information about a single region.
+ */
+struct __db_reginfo_t { /* __db_r_attach IN parameters. */
+ reg_type type; /* Region type. */
+ u_int32_t id; /* Region id. */
+ int mode; /* File creation mode. */
+
+ /* __db_r_attach OUT parameters. */
+ REGION *rp; /* Shared region. */
+
+ char *name; /* Region file name. */
+
+ void *addr; /* Region allocation address. */
+ void *primary; /* Primary data structure address. */
+
+ void *wnt_handle; /* Win/NT HANDLE. */
+
+#define REGION_CREATE 0x01 /* Caller created region. */
+#define REGION_CREATE_OK 0x02 /* Caller willing to create region. */
+#define REGION_JOIN_OK 0x04 /* Caller is looking for a match. */
+ u_int32_t flags;
+};
+
+/*
+ * Mutex maintenance information each subsystem region must keep track
+ * of to manage resources adequately.
+ */
+typedef struct __db_regmaint_stat_t {
+ u_int32_t st_hint_hit;
+ u_int32_t st_hint_miss;
+ u_int32_t st_records;
+ u_int32_t st_clears;
+ u_int32_t st_destroys;
+ u_int32_t st_max_locks;
+} REGMAINT_STAT;
+
+typedef struct __db_regmaint_t {
+ u_int32_t reglocks; /* Maximum # of mutexes we track. */
+ u_int32_t regmutex_hint; /* Hint for next slot */
+ REGMAINT_STAT stat; /* Stats */
+ roff_t regmutexes[1]; /* Region mutexes in use. */
+} REGMAINT;
+
+/*
+ * R_ADDR Return a per-process address for a shared region offset.
+ * R_OFFSET Return a shared region offset for a per-process address.
+ *
+ * !!!
+ * R_OFFSET should really be returning a ptrdiff_t, but that's not yet
+ * portable. We use u_int32_t, which restricts regions to 4Gb in size.
+ */
+#define R_ADDR(base, offset) \
+ ((void *)((u_int8_t *)((base)->addr) + offset))
+#define R_OFFSET(base, p) \
+ ((u_int32_t)((u_int8_t *)(p) - (u_int8_t *)(base)->addr))
+
+/*
+ * R_LOCK Lock/unlock a region.
+ * R_UNLOCK
+ */
+#define R_LOCK(dbenv, reginfo) \
+ MUTEX_LOCK(dbenv, &(reginfo)->rp->mutex)
+#define R_UNLOCK(dbenv, reginfo) \
+ MUTEX_UNLOCK(dbenv, &(reginfo)->rp->mutex)
+
+/* PANIC_CHECK: Check to see if the DB environment is dead. */
+#define PANIC_CHECK(dbenv) \
+ if (!F_ISSET((dbenv), DB_ENV_NOPANIC) && \
+ (dbenv)->reginfo != NULL && ((REGENV *) \
+ ((REGINFO *)(dbenv)->reginfo)->primary)->envpanic != 0) \
+ return (__db_panic_msg(dbenv));
+
+#define PANIC_SET(dbenv, onoff) \
+ ((REGENV *)((REGINFO *)(dbenv)->reginfo)->primary)->envpanic = (onoff);
+
+/*
+ * All regions are created on 8K boundaries out of sheer paranoia, so we
+ * don't make some underlying VM unhappy. Make sure we don't overflow or
+ * underflow.
+ */
+#define OS_VMPAGESIZE (8 * 1024)
+#define OS_VMROUNDOFF(i) { \
+ if ((i) < \
+ (UINT32_T_MAX - OS_VMPAGESIZE) + 1 || (i) < OS_VMPAGESIZE) \
+ (i) += OS_VMPAGESIZE - 1; \
+ (i) -= (i) % OS_VMPAGESIZE; \
+}
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_DB_REGION_H_ */
diff --git a/libdb/dbinc/rep.h b/libdb/dbinc/rep.h
new file mode 100644
index 0000000..1e31549
--- /dev/null
+++ b/libdb/dbinc/rep.h
@@ -0,0 +1,184 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#ifndef _REP_H_
+#define _REP_H_
+
+#define REP_ALIVE 1 /* I am alive message. */
+#define REP_ALIVE_REQ 2 /* Request for alive messages. */
+#define REP_ALL_REQ 3 /* Request all log records greater than LSN. */
+#define REP_ELECT 4 /* Indicates that all listeners should */
+ /* begin master election */
+#define REP_FILE 6 /* Page of a database file. */
+#define REP_FILE_REQ 7 /* Request for a database file. */
+#define REP_LOG 8 /* Log record. */
+#define REP_LOG_MORE 9 /* There are more log records to request. */
+#define REP_LOG_REQ 10 /* Request for a log record. */
+#define REP_MASTER_REQ 11 /* Who is the master */
+#define REP_NEWCLIENT 12 /* Announces the presence of a new client. */
+#define REP_NEWFILE 13 /* Announce a log file change. */
+#define REP_NEWMASTER 14 /* Announces who the master is. */
+#define REP_NEWSITE 15 /* Announces that a site has heard from a new
+ * site; like NEWCLIENT, but indirect. A
+ * NEWCLIENT message comes directly from the new
+ * client while a NEWSITE comes indirectly from
+ * someone who heard about a NEWSITE.
+ */
+#define REP_PAGE 16 /* Database page. */
+#define REP_PAGE_REQ 17 /* Request for a database page. */
+#define REP_PLIST 18 /* Database page list. */
+#define REP_PLIST_REQ 19 /* Request for a page list. */
+#define REP_VERIFY 20 /* A log record for verification. */
+#define REP_VERIFY_FAIL 21 /* The client is outdated. */
+#define REP_VERIFY_REQ 22 /* Request for a log record to verify. */
+#define REP_VOTE1 23 /* Send out your information for an election. */
+#define REP_VOTE2 24 /* Send a "you are master" vote. */
+
+/* Used to consistently designate which messages ought to be received where. */
+#define MASTER_ONLY(dbenv) \
+ if (!F_ISSET(dbenv, DB_ENV_REP_MASTER)) return (EINVAL)
+
+#define CLIENT_ONLY(dbenv) \
+ if (!F_ISSET(dbenv, DB_ENV_REP_CLIENT)) return (EINVAL)
+
+#define ANYSITE(dbenv)
+
+/* Shared replication structure. */
+
+typedef struct __rep {
+ /*
+ * Due to alignment constraints on some architectures (e.g. HP-UX),
+ * DB_MUTEXes must be the first element of shalloced structures,
+ * and as a corollary there can be only one per structure. Thus,
+ * db_mutex_off points to a mutex in a separately-allocated chunk.
+ */
+ DB_MUTEX mutex; /* Region lock. */
+ roff_t db_mutex_off; /* Client database mutex. */
+ u_int32_t tally_off; /* Offset of the tally region. */
+ int eid; /* Environment id. */
+ int master_id; /* ID of the master site. */
+ u_int32_t gen; /* Replication generation number */
+ int asites; /* Space allocated for sites. */
+ int nsites; /* Number of sites in group. */
+ int priority; /* My priority in an election. */
+ u_int32_t gbytes; /* Limit on data sent in single... */
+ u_int32_t bytes; /* __rep_process_message call. */
+#define DB_REP_REQUEST_GAP 4
+#define DB_REP_MAX_GAP 128
+ u_int32_t request_gap; /* # of records to receive before we
+ * request a missing log record. */
+ u_int32_t max_gap; /* Maximum number of records before
+ * requesting a missing log record. */
+
+ /* Vote tallying information. */
+ int sites; /* Sites heard from. */
+ int winner; /* Current winner. */
+ int w_priority; /* Winner priority. */
+ u_int32_t w_gen; /* Winner generation. */
+ DB_LSN w_lsn; /* Winner LSN. */
+ int w_tiebreaker; /* Winner tiebreaking value. */
+ int votes; /* Number of votes for this site. */
+
+ /* Statistics. */
+ DB_REP_STAT stat;
+
+#define REP_F_EPHASE1 0x01 /* In phase 1 of election. */
+#define REP_F_EPHASE2 0x02 /* In phase 2 of election. */
+#define REP_F_LOGSONLY 0x04 /* Log-site only; cannot be upgraded. */
+#define REP_F_MASTER 0x08 /* Master replica. */
+#define REP_F_RECOVER 0x10
+#define REP_F_UPGRADE 0x20 /* Upgradeable replica. */
+#define REP_ISCLIENT (REP_F_UPGRADE | REP_F_LOGSONLY)
+ u_int32_t flags;
+} REP;
+
+#define IN_ELECTION(R) F_ISSET((R), REP_F_EPHASE1 | REP_F_EPHASE2)
+#define ELECTION_DONE(R) F_CLR((R), REP_F_EPHASE1 | REP_F_EPHASE2)
+
+/*
+ * Per-process replication structure.
+ */
+struct __db_rep {
+ DB_MUTEX *mutexp;
+
+ DB_MUTEX *db_mutexp; /* Mutex for bookkeeping database. */
+ DB *rep_db; /* Bookkeeping database. */
+
+ REP *region; /* In memory structure. */
+ int (*rep_send) /* Send function. */
+ __P((DB_ENV *,
+ const DBT *, const DBT *, int, u_int32_t));
+};
+
+/*
+ * Control structure for replication communication infrastructure.
+ *
+ * Note that the version information should be at the beginning of the
+ * structure, so that we can rearrange the rest of it while letting the
+ * version checks continue to work. DB_REPVERSION should be revved any time
+ * the rest of the structure changes.
+ */
+typedef struct __rep_control {
+#define DB_REPVERSION 1
+ u_int32_t rep_version; /* Replication version number. */
+ u_int32_t log_version; /* Log version number. */
+
+ DB_LSN lsn; /* Log sequence number. */
+ u_int32_t rectype; /* Message type. */
+ u_int32_t gen; /* Generation number. */
+ u_int32_t flags; /* log_put flag value. */
+} REP_CONTROL;
+
+/* Election vote information. */
+typedef struct __rep_vote {
+ int priority; /* My site's priority. */
+ int nsites; /* Number of sites I've been in
+ * communication with. */
+ int tiebreaker; /* Tie-breaking quasi-random int. */
+} REP_VOTE_INFO;
+
+/*
+ * This structure takes care of representing a transaction.
+ * It holds all the records, sorted by page number so that
+ * we can obtain locks and apply updates in a deadlock free
+ * order.
+ */
+typedef struct __lsn_page {
+ DB_LSN lsn;
+ u_int32_t fid;
+ DB_LOCK_ILOCK pgdesc;
+#define LSN_PAGE_NOLOCK 0x0001 /* No lock necessary for log rec. */
+ u_int32_t flags;
+} LSN_PAGE;
+
+typedef struct __txn_recs {
+ int npages;
+ int nalloc;
+ LSN_PAGE *array;
+ u_int32_t txnid;
+ u_int32_t lockid;
+} TXN_RECS;
+
+typedef struct __lsn_collection {
+ int nlsns;
+ int nalloc;
+ DB_LSN *array;
+} LSN_COLLECTION;
+
+/*
+ * This is used by the page-prep routines to do the lock_vec call to
+ * apply the updates for a single transaction or a collection of
+ * transactions.
+ */
+typedef struct _linfo {
+ int n;
+ DB_LOCKREQ *reqs;
+ DBT *objs;
+} linfo_t;
+
+#include "dbinc_auto/rep_ext.h"
+#endif /* !_REP_H_ */
diff --git a/libdb/dbinc/shqueue.h b/libdb/dbinc/shqueue.h
new file mode 100644
index 0000000..af69025
--- /dev/null
+++ b/libdb/dbinc/shqueue.h
@@ -0,0 +1,337 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _SYS_SHQUEUE_H_
+#define _SYS_SHQUEUE_H_
+
+/*
+ * This file defines three types of data structures: lists, tail queues, and
+ * circular queues, similarly to the include file <sys/queue.h>.
+ *
+ * The difference is that this set of macros can be used for structures that
+ * reside in shared memory that may be mapped at different addresses in each
+ * process. In most cases, the macros for shared structures exactly mirror
+ * the normal macros, although the macro calls require an additional type
+ * parameter, only used by the HEAD and ENTRY macros of the standard macros.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * Shared list definitions.
+ */
+#define SH_LIST_HEAD(name) \
+struct name { \
+ ssize_t slh_first; /* first element */ \
+}
+
+#define SH_LIST_ENTRY \
+struct { \
+ ssize_t sle_next; /* relative offset next element */ \
+ ssize_t sle_prev; /* relative offset of prev element */ \
+}
+
+/*
+ * Shared list functions. Since we use relative offsets for pointers,
+ * 0 is a valid offset. Therefore, we use -1 to indicate end of list.
+ * The macros ending in "P" return pointers without checking for end
+ * of list, the others check for end of list and evaluate to either a
+ * pointer or NULL.
+ */
+
+#define SH_LIST_FIRSTP(head, type) \
+ ((struct type *)(((u_int8_t *)(head)) + (head)->slh_first))
+
+#define SH_LIST_FIRST(head, type) \
+ ((head)->slh_first == -1 ? NULL : \
+ ((struct type *)(((u_int8_t *)(head)) + (head)->slh_first)))
+
+#define SH_LIST_NEXTP(elm, field, type) \
+ ((struct type *)(((u_int8_t *)(elm)) + (elm)->field.sle_next))
+
+#define SH_LIST_NEXT(elm, field, type) \
+ ((elm)->field.sle_next == -1 ? NULL : \
+ ((struct type *)(((u_int8_t *)(elm)) + (elm)->field.sle_next)))
+
+#define SH_LIST_PREV(elm, field) \
+ ((ssize_t *)(((u_int8_t *)(elm)) + (elm)->field.sle_prev))
+
+#define SH_PTR_TO_OFF(src, dest) \
+ ((ssize_t)(((u_int8_t *)(dest)) - ((u_int8_t *)(src))))
+
+/*
+ * Take the element's next pointer and calculate what the corresponding
+ * Prev pointer should be -- basically it is the negation plus the offset
+ * of the next field in the structure.
+ */
+#define SH_LIST_NEXT_TO_PREV(elm, field) \
+ (-(elm)->field.sle_next + SH_PTR_TO_OFF(elm, &(elm)->field.sle_next))
+
+#define SH_LIST_INIT(head) (head)->slh_first = -1
+
+#define SH_LIST_INSERT_AFTER(listelm, elm, field, type) do { \
+ if ((listelm)->field.sle_next != -1) { \
+ (elm)->field.sle_next = SH_PTR_TO_OFF(elm, \
+ SH_LIST_NEXTP(listelm, field, type)); \
+ SH_LIST_NEXTP(listelm, field, type)->field.sle_prev = \
+ SH_LIST_NEXT_TO_PREV(elm, field); \
+ } else \
+ (elm)->field.sle_next = -1; \
+ (listelm)->field.sle_next = SH_PTR_TO_OFF(listelm, elm); \
+ (elm)->field.sle_prev = SH_LIST_NEXT_TO_PREV(listelm, field); \
+} while (0)
+
+#define SH_LIST_INSERT_HEAD(head, elm, field, type) do { \
+ if ((head)->slh_first != -1) { \
+ (elm)->field.sle_next = \
+ (head)->slh_first - SH_PTR_TO_OFF(head, elm); \
+ SH_LIST_FIRSTP(head, type)->field.sle_prev = \
+ SH_LIST_NEXT_TO_PREV(elm, field); \
+ } else \
+ (elm)->field.sle_next = -1; \
+ (head)->slh_first = SH_PTR_TO_OFF(head, elm); \
+ (elm)->field.sle_prev = SH_PTR_TO_OFF(elm, &(head)->slh_first); \
+} while (0)
+
+#define SH_LIST_REMOVE(elm, field, type) do { \
+ if ((elm)->field.sle_next != -1) { \
+ SH_LIST_NEXTP(elm, field, type)->field.sle_prev = \
+ (elm)->field.sle_prev - (elm)->field.sle_next; \
+ *SH_LIST_PREV(elm, field) += (elm)->field.sle_next; \
+ } else \
+ *SH_LIST_PREV(elm, field) = -1; \
+} while (0)
+
+/*
+ * Shared tail queue definitions.
+ */
+#define SH_TAILQ_HEAD(name) \
+struct name { \
+ ssize_t stqh_first; /* relative offset of first element */ \
+ ssize_t stqh_last; /* relative offset of last's next */ \
+}
+
+#define SH_TAILQ_ENTRY \
+struct { \
+ ssize_t stqe_next; /* relative offset of next element */ \
+ ssize_t stqe_prev; /* relative offset of prev's next */ \
+}
+
+/*
+ * Shared tail queue functions.
+ */
+#define SH_TAILQ_FIRSTP(head, type) \
+ ((struct type *)((u_int8_t *)(head) + (head)->stqh_first))
+
+#define SH_TAILQ_FIRST(head, type) \
+ ((head)->stqh_first == -1 ? NULL : SH_TAILQ_FIRSTP(head, type))
+
+#define SH_TAILQ_NEXTP(elm, field, type) \
+ ((struct type *)((u_int8_t *)(elm) + (elm)->field.stqe_next))
+
+#define SH_TAILQ_NEXT(elm, field, type) \
+ ((elm)->field.stqe_next == -1 ? NULL : SH_TAILQ_NEXTP(elm, field, type))
+
+#define SH_TAILQ_PREVP(elm, field) \
+ ((ssize_t *)((u_int8_t *)(elm) + (elm)->field.stqe_prev))
+
+#define SH_TAILQ_LAST(head) \
+ ((ssize_t *)(((u_int8_t *)(head)) + (head)->stqh_last))
+
+#define SH_TAILQ_NEXT_TO_PREV(elm, field) \
+ (-(elm)->field.stqe_next + SH_PTR_TO_OFF(elm, &(elm)->field.stqe_next))
+
+#define SH_TAILQ_INIT(head) { \
+ (head)->stqh_first = -1; \
+ (head)->stqh_last = SH_PTR_TO_OFF(head, &(head)->stqh_first); \
+}
+
+#define SH_TAILQ_INSERT_HEAD(head, elm, field, type) do { \
+ if ((head)->stqh_first != -1) { \
+ (elm)->field.stqe_next = \
+ (head)->stqh_first - SH_PTR_TO_OFF(head, elm); \
+ SH_TAILQ_FIRSTP(head, type)->field.stqe_prev = \
+ SH_TAILQ_NEXT_TO_PREV(elm, field); \
+ } else { \
+ (elm)->field.stqe_next = -1; \
+ (head)->stqh_last = \
+ SH_PTR_TO_OFF(head, &(elm)->field.stqe_next); \
+ } \
+ (head)->stqh_first = SH_PTR_TO_OFF(head, elm); \
+ (elm)->field.stqe_prev = \
+ SH_PTR_TO_OFF(elm, &(head)->stqh_first); \
+} while (0)
+
+#define SH_TAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.stqe_next = -1; \
+ (elm)->field.stqe_prev = \
+ -SH_PTR_TO_OFF(head, elm) + (head)->stqh_last; \
+ if ((head)->stqh_last == \
+ SH_PTR_TO_OFF((head), &(head)->stqh_first)) \
+ (head)->stqh_first = SH_PTR_TO_OFF(head, elm); \
+ else \
+ *SH_TAILQ_LAST(head) = -(head)->stqh_last + \
+ SH_PTR_TO_OFF((elm), &(elm)->field.stqe_next) + \
+ SH_PTR_TO_OFF(head, elm); \
+ (head)->stqh_last = \
+ SH_PTR_TO_OFF(head, &((elm)->field.stqe_next)); \
+} while (0)
+
+#define SH_TAILQ_INSERT_AFTER(head, listelm, elm, field, type) do { \
+ if ((listelm)->field.stqe_next != -1) { \
+ (elm)->field.stqe_next = (listelm)->field.stqe_next - \
+ SH_PTR_TO_OFF(listelm, elm); \
+ SH_TAILQ_NEXTP(listelm, field, type)->field.stqe_prev = \
+ SH_TAILQ_NEXT_TO_PREV(elm, field); \
+ } else { \
+ (elm)->field.stqe_next = -1; \
+ (head)->stqh_last = \
+ SH_PTR_TO_OFF(head, &elm->field.stqe_next); \
+ } \
+ (listelm)->field.stqe_next = SH_PTR_TO_OFF(listelm, elm); \
+ (elm)->field.stqe_prev = SH_TAILQ_NEXT_TO_PREV(listelm, field); \
+} while (0)
+
+#define SH_TAILQ_REMOVE(head, elm, field, type) do { \
+ if ((elm)->field.stqe_next != -1) { \
+ SH_TAILQ_NEXTP(elm, field, type)->field.stqe_prev = \
+ (elm)->field.stqe_prev + \
+ SH_PTR_TO_OFF(SH_TAILQ_NEXTP(elm, \
+ field, type), elm); \
+ *SH_TAILQ_PREVP(elm, field) += elm->field.stqe_next; \
+ } else { \
+ (head)->stqh_last = (elm)->field.stqe_prev + \
+ SH_PTR_TO_OFF(head, elm); \
+ *SH_TAILQ_PREVP(elm, field) = -1; \
+ } \
+} while (0)
+
+/*
+ * Shared circular queue definitions.
+ */
+#define SH_CIRCLEQ_HEAD(name) \
+struct name { \
+ ssize_t scqh_first; /* first element */ \
+ ssize_t scqh_last; /* last element */ \
+}
+
+#define SH_CIRCLEQ_ENTRY \
+struct { \
+ ssize_t scqe_next; /* next element */ \
+ ssize_t scqe_prev; /* previous element */ \
+}
+
+/*
+ * Shared circular queue functions.
+ */
+#define SH_CIRCLEQ_FIRSTP(head, type) \
+ ((struct type *)(((u_int8_t *)(head)) + (head)->scqh_first))
+
+#define SH_CIRCLEQ_FIRST(head, type) \
+ ((head)->scqh_first == -1 ? \
+ (void *)head : SH_CIRCLEQ_FIRSTP(head, type))
+
+#define SH_CIRCLEQ_LASTP(head, type) \
+ ((struct type *)(((u_int8_t *)(head)) + (head)->scqh_last))
+
+#define SH_CIRCLEQ_LAST(head, type) \
+ ((head)->scqh_last == -1 ? (void *)head : SH_CIRCLEQ_LASTP(head, type))
+
+#define SH_CIRCLEQ_NEXTP(elm, field, type) \
+ ((struct type *)(((u_int8_t *)(elm)) + (elm)->field.scqe_next))
+
+#define SH_CIRCLEQ_NEXT(head, elm, field, type) \
+ ((elm)->field.scqe_next == SH_PTR_TO_OFF(elm, head) ? \
+ (void *)head : SH_CIRCLEQ_NEXTP(elm, field, type))
+
+#define SH_CIRCLEQ_PREVP(elm, field, type) \
+ ((struct type *)(((u_int8_t *)(elm)) + (elm)->field.scqe_prev))
+
+#define SH_CIRCLEQ_PREV(head, elm, field, type) \
+ ((elm)->field.scqe_prev == SH_PTR_TO_OFF(elm, head) ? \
+ (void *)head : SH_CIRCLEQ_PREVP(elm, field, type))
+
+#define SH_CIRCLEQ_INIT(head) { \
+ (head)->scqh_first = 0; \
+ (head)->scqh_last = 0; \
+}
+
+#define SH_CIRCLEQ_INSERT_AFTER(head, listelm, elm, field, type) do { \
+ (elm)->field.scqe_prev = SH_PTR_TO_OFF(elm, listelm); \
+ (elm)->field.scqe_next = (listelm)->field.scqe_next + \
+ (elm)->field.scqe_prev; \
+ if (SH_CIRCLEQ_NEXTP(listelm, field, type) == (void *)head) \
+ (head)->scqh_last = SH_PTR_TO_OFF(head, elm); \
+ else \
+ SH_CIRCLEQ_NEXTP(listelm, \
+ field, type)->field.scqe_prev = \
+ SH_PTR_TO_OFF(SH_CIRCLEQ_NEXTP(listelm, \
+ field, type), elm); \
+ (listelm)->field.scqe_next = -(elm)->field.scqe_prev; \
+} while (0)
+
+#define SH_CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field, type) do { \
+ (elm)->field.scqe_next = SH_PTR_TO_OFF(elm, listelm); \
+ (elm)->field.scqe_prev = (elm)->field.scqe_next - \
+ SH_CIRCLEQ_PREVP(listelm, field, type)->field.scqe_next;\
+ if (SH_CIRCLEQ_PREVP(listelm, field, type) == (void *)(head)) \
+ (head)->scqh_first = SH_PTR_TO_OFF(head, elm); \
+ else \
+ SH_CIRCLEQ_PREVP(listelm, \
+ field, type)->field.scqe_next = \
+ SH_PTR_TO_OFF(SH_CIRCLEQ_PREVP(listelm, \
+ field, type), elm); \
+ (listelm)->field.scqe_prev = -(elm)->field.scqe_next; \
+} while (0)
+
+#define SH_CIRCLEQ_INSERT_HEAD(head, elm, field, type) do { \
+ (elm)->field.scqe_prev = SH_PTR_TO_OFF(elm, head); \
+ (elm)->field.scqe_next = (head)->scqh_first + \
+ (elm)->field.scqe_prev; \
+ if ((head)->scqh_last == 0) \
+ (head)->scqh_last = -(elm)->field.scqe_prev; \
+ else \
+ SH_CIRCLEQ_FIRSTP(head, type)->field.scqe_prev = \
+ SH_PTR_TO_OFF(SH_CIRCLEQ_FIRSTP(head, type), elm); \
+ (head)->scqh_first = -(elm)->field.scqe_prev; \
+} while (0)
+
+#define SH_CIRCLEQ_INSERT_TAIL(head, elm, field, type) do { \
+ (elm)->field.scqe_next = SH_PTR_TO_OFF(elm, head); \
+ (elm)->field.scqe_prev = (head)->scqh_last + \
+ (elm)->field.scqe_next; \
+ if ((head)->scqh_first == 0) \
+ (head)->scqh_first = -(elm)->field.scqe_next; \
+ else \
+ SH_CIRCLEQ_LASTP(head, type)->field.scqe_next = \
+ SH_PTR_TO_OFF(SH_CIRCLEQ_LASTP(head, type), elm); \
+ (head)->scqh_last = -(elm)->field.scqe_next; \
+} while (0)
+
+#define SH_CIRCLEQ_REMOVE(head, elm, field, type) do { \
+ if (SH_CIRCLEQ_NEXTP(elm, field, type) == (void *)(head)) \
+ (head)->scqh_last += (elm)->field.scqe_prev; \
+ else \
+ SH_CIRCLEQ_NEXTP(elm, field, type)->field.scqe_prev += \
+ (elm)->field.scqe_prev; \
+ if (SH_CIRCLEQ_PREVP(elm, field, type) == (void *)(head)) \
+ (head)->scqh_first += (elm)->field.scqe_next; \
+ else \
+ SH_CIRCLEQ_PREVP(elm, field, type)->field.scqe_next += \
+ (elm)->field.scqe_next; \
+} while (0)
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_SYS_SHQUEUE_H_ */
diff --git a/libdb/dbinc/tcl_db.h b/libdb/dbinc/tcl_db.h
new file mode 100644
index 0000000..2ec368b
--- /dev/null
+++ b/libdb/dbinc/tcl_db.h
@@ -0,0 +1,261 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _DB_TCL_DB_H_
+#define _DB_TCL_DB_H_
+
+#define MSG_SIZE 100 /* Message size */
+
+enum INFOTYPE {
+ I_ENV, I_DB, I_DBC, I_TXN, I_MP, I_PG, I_LOCK, I_LOGC, I_NDBM, I_MUTEX };
+
+#define MAX_ID 8 /* Maximum number of sub-id's we need */
+#define DBTCL_PREP 64 /* Size of txn_recover preplist */
+
+#define DBTCL_DBM 1
+#define DBTCL_NDBM 2
+
+typedef struct _mutex_entry {
+ union {
+ struct {
+ DB_MUTEX real_m;
+ u_int32_t real_val;
+ } r;
+ /*
+ * This is here to make sure that each of the mutex structures
+ * are 16-byte aligned, which is required on HP architectures.
+ * The db_mutex_t structure might be >32 bytes itself, or the
+ * real_val might push it over the 32 byte boundary. The best
+ * we can do is use a 48 byte boundary.
+ */
+ char c[48];
+ } u;
+} _MUTEX_ENTRY;
+
+#define m u.r.real_m
+#define val u.r.real_val
+
+typedef struct _mutex_data {
+ DB_ENV *env;
+ REGINFO reginfo;
+ _MUTEX_ENTRY *marray;
+ size_t size;
+ u_int32_t n_mutex;
+} _MUTEX_DATA;
+
+/*
+ * Why use a home grown package over the Tcl_Hash functions?
+ *
+ * We could have implemented the stuff below without maintaining our
+ * own list manipulation, efficiently hashing it with the available
+ * Tcl functions (Tcl_CreateHashEntry, Tcl_GetHashValue, etc). I chose
+ * not to do so for these reasons:
+ *
+ * We still need the information below. Using the hashing only removes
+ * us from needing the next/prev pointers. We still need the structure
+ * itself because we need more than one value associated with a widget.
+ * We need to keep track of parent pointers for sub-widgets (like cursors)
+ * so we can correctly close. We need to keep track of individual widget's
+ * id counters for any sub-widgets they may have. We need to be able to
+ * associate the name/client data outside the scope of the widget.
+ *
+ * So, is it better to use the hashing rather than
+ * the linear list we have now? I decided against it for the simple reason
+ * that to access the structure would require two calls. The first is
+ * Tcl_FindHashEntry(table, key) and then, once we have the entry, we'd
+ * have to do Tcl_GetHashValue(entry) to get the pointer of the structure.
+ *
+ * I believe the number of simultaneous DB widgets in existence at one time
+ * is not going to be that large (more than several dozen) such that
+ * linearly searching the list is not going to impact performance in a
+ * noticable way. Should performance be impacted due to the size of the
+ * info list, then perhaps it is time to revisit this decision.
+ */
+typedef struct dbtcl_info {
+ LIST_ENTRY(dbtcl_info) entries;
+ Tcl_Interp *i_interp;
+ char *i_name;
+ enum INFOTYPE i_type;
+ union infop {
+ DB_ENV *envp;
+ void *anyp;
+ DB *dbp;
+ DBC *dbcp;
+ DB_TXN *txnp;
+ DB_MPOOLFILE *mp;
+ DB_LOCK *lock;
+ _MUTEX_DATA *mutex;
+ DB_LOGC *logc;
+ } un;
+ union data {
+ int anydata;
+ db_pgno_t pgno;
+ u_int32_t lockid;
+ } und;
+ union data2 {
+ int anydata;
+ size_t pagesz;
+ } und2;
+ DBT i_lockobj;
+ FILE *i_err;
+ char *i_errpfx;
+
+ /* Callbacks--Tcl_Objs containing proc names */
+ Tcl_Obj *i_btcompare;
+ Tcl_Obj *i_dupcompare;
+ Tcl_Obj *i_hashproc;
+ Tcl_Obj *i_rep_send;
+ Tcl_Obj *i_second_call;
+
+ /* Environment ID for the i_rep_send callback. */
+ Tcl_Obj *i_rep_eid;
+
+ struct dbtcl_info *i_parent;
+ int i_otherid[MAX_ID];
+} DBTCL_INFO;
+
+#define i_anyp un.anyp
+#define i_pagep un.anyp
+#define i_envp un.envp
+#define i_dbp un.dbp
+#define i_dbcp un.dbcp
+#define i_txnp un.txnp
+#define i_mp un.mp
+#define i_lock un.lock
+#define i_mutex un.mutex
+#define i_logc un.logc
+
+#define i_data und.anydata
+#define i_pgno und.pgno
+#define i_locker und.lockid
+#define i_data2 und2.anydata
+#define i_pgsz und2.pagesz
+
+#define i_envtxnid i_otherid[0]
+#define i_envmpid i_otherid[1]
+#define i_envlockid i_otherid[2]
+#define i_envmutexid i_otherid[3]
+#define i_envlogcid i_otherid[4]
+
+#define i_mppgid i_otherid[0]
+
+#define i_dbdbcid i_otherid[0]
+
+extern int __debug_on, __debug_print, __debug_stop, __debug_test;
+
+typedef struct dbtcl_global {
+ LIST_HEAD(infohead, dbtcl_info) g_infohead;
+} DBTCL_GLOBAL;
+#define __db_infohead __dbtcl_global.g_infohead
+
+extern DBTCL_GLOBAL __dbtcl_global;
+
+#define NAME_TO_ENV(name) (DB_ENV *)_NameToPtr((name))
+#define NAME_TO_DB(name) (DB *)_NameToPtr((name))
+#define NAME_TO_DBC(name) (DBC *)_NameToPtr((name))
+#define NAME_TO_TXN(name) (DB_TXN *)_NameToPtr((name))
+#define NAME_TO_MP(name) (DB_MPOOLFILE *)_NameToPtr((name))
+#define NAME_TO_LOCK(name) (DB_LOCK *)_NameToPtr((name))
+
+/*
+ * MAKE_STAT_LIST appends a {name value} pair to a result list
+ * that MUST be called 'res' that is a Tcl_Obj * in the local
+ * function. This macro also assumes a label "error" to go to
+ * in the even of a Tcl error. For stat functions this will
+ * typically go before the "free" function to free the stat structure
+ * returned by DB.
+ */
+#define MAKE_STAT_LIST(s,v) \
+do { \
+ result = _SetListElemInt(interp, res, (s), (v)); \
+ if (result != TCL_OK) \
+ goto error; \
+} while (0)
+
+/*
+ * MAKE_STAT_LSN appends a {name {LSNfile LSNoffset}} pair to a result list
+ * that MUST be called 'res' that is a Tcl_Obj * in the local
+ * function. This macro also assumes a label "error" to go to
+ * in the even of a Tcl error. For stat functions this will
+ * typically go before the "free" function to free the stat structure
+ * returned by DB.
+ */
+#define MAKE_STAT_LSN(s, lsn) \
+do { \
+ myobjc = 2; \
+ myobjv[0] = Tcl_NewLongObj((long)(lsn)->file); \
+ myobjv[1] = Tcl_NewLongObj((long)(lsn)->offset); \
+ lsnlist = Tcl_NewListObj(myobjc, myobjv); \
+ myobjc = 2; \
+ myobjv[0] = Tcl_NewStringObj((s), strlen(s)); \
+ myobjv[1] = lsnlist; \
+ thislist = Tcl_NewListObj(myobjc, myobjv); \
+ result = Tcl_ListObjAppendElement(interp, res, thislist); \
+ if (result != TCL_OK) \
+ goto error; \
+} while (0)
+
+/*
+ * MAKE_STAT_STRLIST appends a {name string} pair to a result list
+ * that MUST be called 'res' that is a Tcl_Obj * in the local
+ * function. This macro also assumes a label "error" to go to
+ * in the even of a Tcl error. For stat functions this will
+ * typically go before the "free" function to free the stat structure
+ * returned by DB.
+ */
+#define MAKE_STAT_STRLIST(s,s1) \
+do { \
+ result = _SetListElem(interp, res, (s), strlen(s), \
+ (s1), strlen(s1)); \
+ if (result != TCL_OK) \
+ goto error; \
+} while (0)
+
+/*
+ * FLAG_CHECK checks that the given flag is not set yet.
+ * If it is, it sets up an error message.
+ */
+#define FLAG_CHECK(flag) \
+do { \
+ if ((flag) != 0) { \
+ Tcl_SetResult(interp, \
+ " Only 1 policy can be specified.\n", \
+ TCL_STATIC); \
+ result = TCL_ERROR; \
+ break; \
+ } \
+} while (0)
+
+/*
+ * FLAG_CHECK2 checks that the given flag is not set yet or is
+ * only set to the given allowed value.
+ * If it is, it sets up an error message.
+ */
+#define FLAG_CHECK2(flag,val) \
+do { \
+ if (((flag) & ~(val)) != 0) { \
+ Tcl_SetResult(interp, \
+ " Only 1 policy can be specified.\n", \
+ TCL_STATIC); \
+ result = TCL_ERROR; \
+ break; \
+ } \
+} while (0)
+
+/*
+ * IS_HELP checks whether the arg we bombed on is -?, which is a help option.
+ * If it is, we return TCL_OK (but leave the result set to whatever
+ * Tcl_GetIndexFromObj says, which lists all the valid options. Otherwise
+ * return TCL_ERROR.
+ */
+#define IS_HELP(s) \
+ (strcmp(Tcl_GetStringFromObj(s,NULL), "-?") == 0) ? TCL_OK : TCL_ERROR
+
+#include "dbinc_auto/tcl_ext.h"
+#endif /* !_DB_TCL_DB_H_ */
diff --git a/libdb/dbinc/txn.h b/libdb/dbinc/txn.h
new file mode 100644
index 0000000..2edcdf7
--- /dev/null
+++ b/libdb/dbinc/txn.h
@@ -0,0 +1,143 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _TXN_H_
+#define _TXN_H_
+
+#include "dbinc/xa.h"
+
+/* Operation parameters to the delayed commit processing code. */
+typedef enum {
+ TXN_REMOVE, /* Remove a file. */
+ TXN_TRADE, /* Trade lockers. */
+ TXN_TRADED /* Already traded; downgrade lock. */
+} TXN_EVENT_T;
+
+struct __db_txnregion; typedef struct __db_txnregion DB_TXNREGION;
+
+/*
+ * !!!
+ * TXN_MINIMUM = (DB_LOCK_MAXID + 1) but this makes compilers complain.
+ */
+#define TXN_MINIMUM 0x80000000
+#define TXN_MAXIMUM 0xffffffff /* Maximum number of txn ids. */
+#define TXN_INVALID 0 /* Invalid transaction ID. */
+
+#define DEF_MAX_TXNS 20 /* Default max transactions. */
+
+/*
+ * Internal data maintained in shared memory for each transaction.
+ */
+typedef struct __txn_detail {
+ u_int32_t txnid; /* current transaction id
+ used to link free list also */
+ DB_LSN last_lsn; /* last lsn written for this txn */
+ DB_LSN begin_lsn; /* lsn of begin record */
+ roff_t parent; /* Offset of transaction's parent. */
+
+#define TXN_RUNNING 1
+#define TXN_ABORTED 2
+#define TXN_PREPARED 3
+#define TXN_COMMITTED 4
+ u_int32_t status; /* status of the transaction */
+#define TXN_COLLECTED 0x1
+#define TXN_RESTORED 0x2
+ u_int32_t flags; /* collected during txn_recover */
+
+ SH_TAILQ_ENTRY links; /* free/active list */
+
+#define TXN_XA_ABORTED 1
+#define TXN_XA_DEADLOCKED 2
+#define TXN_XA_ENDED 3
+#define TXN_XA_PREPARED 4
+#define TXN_XA_STARTED 5
+#define TXN_XA_SUSPENDED 6
+ u_int32_t xa_status; /* XA status */
+
+ /*
+ * XID (xid_t) structure: because these fields are logged, the
+ * sizes have to be explicit.
+ */
+ u_int8_t xid[XIDDATASIZE]; /* XA global transaction id */
+ u_int32_t bqual; /* bqual_length from XID */
+ u_int32_t gtrid; /* gtrid_length from XID */
+ int32_t format; /* XA format */
+} TXN_DETAIL;
+
+/*
+ * DB_TXNMGR --
+ * The transaction manager encapsulates the transaction system.
+ */
+struct __db_txnmgr {
+/*
+ * These fields need to be protected for multi-threaded support.
+ *
+ * !!!
+ * As this structure is allocated in per-process memory, the mutex may need
+ * to be stored elsewhere on architectures unable to support mutexes in heap
+ * memory, e.g., HP/UX 9.
+ */
+ DB_MUTEX *mutexp; /* Lock list of active transactions
+ * (including the content of each
+ * TXN_DETAIL structure on the list).
+ */
+ /* List of active transactions. */
+ TAILQ_HEAD(_chain, __db_txn) txn_chain;
+ u_int32_t n_discards; /* Number of txns discarded. */
+
+/* These fields are never updated after creation, and so not protected. */
+ DB_ENV *dbenv; /* Environment. */
+ REGINFO reginfo; /* Region information. */
+};
+
+/*
+ * DB_TXNREGION --
+ * The primary transaction data structure in the shared memory region.
+ */
+struct __db_txnregion {
+ u_int32_t maxtxns; /* maximum number of active TXNs */
+ u_int32_t last_txnid; /* last transaction id given out */
+ u_int32_t cur_maxid; /* current max unused id. */
+ DB_LSN last_ckp; /* lsn of the last checkpoint */
+ time_t time_ckp; /* time of last checkpoint */
+ u_int32_t logtype; /* type of logging */
+ u_int32_t locktype; /* lock type */
+ DB_TXN_STAT stat; /* Statistics for txns. */
+
+#define TXN_IN_RECOVERY 0x01 /* environment is being recovered */
+ u_int32_t flags;
+ /* active TXN list */
+ SH_TAILQ_HEAD(__active) active_txn;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+#define TXN_MAINT_SIZE (sizeof(roff_t) * DB_MAX_HANDLES)
+
+ roff_t maint_off; /* offset of region maintenance info */
+#endif
+};
+
+/*
+ * Log record types. Note that these are *not* alphabetical. This is
+ * intentional so that we don't change the meaning of values between
+ * software upgrades. EXPECTED, UNEXPECTED, IGNORE, NOTFOUND and OK
+ * are used in the
+ * txnlist functions.
+ */
+#define TXN_OK 0
+#define TXN_COMMIT 1
+#define TXN_PREPARE 2
+#define TXN_ABORT 3
+#define TXN_NOTFOUND 4
+#define TXN_IGNORE 5
+#define TXN_EXPECTED 6
+#define TXN_UNEXPECTED 7
+
+#include "dbinc_auto/txn_auto.h"
+#include "dbinc_auto/txn_ext.h"
+#include "dbinc_auto/xa_ext.h"
+#endif /* !_TXN_H_ */
diff --git a/libdb/dbinc/xa.h b/libdb/dbinc/xa.h
new file mode 100644
index 0000000..9786e29
--- /dev/null
+++ b/libdb/dbinc/xa.h
@@ -0,0 +1,179 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+/*
+ * Start of xa.h header
+ *
+ * Define a symbol to prevent multiple inclusions of this header file
+ */
+#ifndef XA_H
+#define XA_H
+
+/*
+ * Transaction branch identification: XID and NULLXID:
+ */
+#define XIDDATASIZE 128 /* size in bytes */
+#define MAXGTRIDSIZE 64 /* maximum size in bytes of gtrid */
+#define MAXBQUALSIZE 64 /* maximum size in bytes of bqual */
+
+struct xid_t {
+ long formatID; /* format identifier */
+ long gtrid_length; /* value from 1 through 64 */
+ long bqual_length; /* value from 1 through 64 */
+ char data[XIDDATASIZE];
+};
+typedef struct xid_t XID;
+/*
+ * A value of -1 in formatID means that the XID is null.
+ */
+
+/*
+ * Declarations of routines by which RMs call TMs:
+ */
+extern int ax_reg __P((int, XID *, long));
+extern int ax_unreg __P((int, long));
+
+/*
+ * XA Switch Data Structure
+ */
+#define RMNAMESZ 32 /* length of resource manager name, */
+ /* including the null terminator */
+#define MAXINFOSIZE 256 /* maximum size in bytes of xa_info */
+ /* strings, including the null
+ terminator */
+struct xa_switch_t {
+ char name[RMNAMESZ]; /* name of resource manager */
+ long flags; /* resource manager specific options */
+ long version; /* must be 0 */
+ int (*xa_open_entry) /* xa_open function pointer */
+ __P((char *, int, long));
+ int (*xa_close_entry) /* xa_close function pointer */
+ __P((char *, int, long));
+ int (*xa_start_entry) /* xa_start function pointer */
+ __P((XID *, int, long));
+ int (*xa_end_entry) /* xa_end function pointer */
+ __P((XID *, int, long));
+ int (*xa_rollback_entry) /* xa_rollback function pointer */
+ __P((XID *, int, long));
+ int (*xa_prepare_entry) /* xa_prepare function pointer */
+ __P((XID *, int, long));
+ int (*xa_commit_entry) /* xa_commit function pointer */
+ __P((XID *, int, long));
+ int (*xa_recover_entry) /* xa_recover function pointer */
+ __P((XID *, long, int, long));
+ int (*xa_forget_entry) /* xa_forget function pointer */
+ __P((XID *, int, long));
+ int (*xa_complete_entry) /* xa_complete function pointer */
+ __P((int *, int *, int, long));
+};
+
+/*
+ * Flag definitions for the RM switch
+ */
+#define TMNOFLAGS 0x00000000L /* no resource manager features
+ selected */
+#define TMREGISTER 0x00000001L /* resource manager dynamically
+ registers */
+#define TMNOMIGRATE 0x00000002L /* resource manager does not support
+ association migration */
+#define TMUSEASYNC 0x00000004L /* resource manager supports
+ asynchronous operations */
+/*
+ * Flag definitions for xa_ and ax_ routines
+ */
+/* use TMNOFLAGGS, defined above, when not specifying other flags */
+#define TMASYNC 0x80000000L /* perform routine asynchronously */
+#define TMONEPHASE 0x40000000L /* caller is using one-phase commit
+ optimisation */
+#define TMFAIL 0x20000000L /* dissociates caller and marks
+ transaction branch rollback-only */
+#define TMNOWAIT 0x10000000L /* return if blocking condition
+ exists */
+#define TMRESUME 0x08000000L /* caller is resuming association with
+ suspended transaction branch */
+#define TMSUCCESS 0x04000000L /* dissociate caller from transaction
+ branch */
+#define TMSUSPEND 0x02000000L /* caller is suspending, not ending,
+ association */
+#define TMSTARTRSCAN 0x01000000L /* start a recovery scan */
+#define TMENDRSCAN 0x00800000L /* end a recovery scan */
+#define TMMULTIPLE 0x00400000L /* wait for any asynchronous
+ operation */
+#define TMJOIN 0x00200000L /* caller is joining existing
+ transaction branch */
+#define TMMIGRATE 0x00100000L /* caller intends to perform
+ migration */
+
+/*
+ * ax_() return codes (transaction manager reports to resource manager)
+ */
+#define TM_JOIN 2 /* caller is joining existing
+ transaction branch */
+#define TM_RESUME 1 /* caller is resuming association with
+ suspended transaction branch */
+#define TM_OK 0 /* normal execution */
+#define TMER_TMERR -1 /* an error occurred in the transaction
+ manager */
+#define TMER_INVAL -2 /* invalid arguments were given */
+#define TMER_PROTO -3 /* routine invoked in an improper
+ context */
+
+/*
+ * xa_() return codes (resource manager reports to transaction manager)
+ */
+#define XA_RBBASE 100 /* The inclusive lower bound of the
+ rollback codes */
+#define XA_RBROLLBACK XA_RBBASE /* The rollback was caused by an
+ unspecified reason */
+#define XA_RBCOMMFAIL XA_RBBASE+1 /* The rollback was caused by a
+ communication failure */
+#define XA_RBDEADLOCK XA_RBBASE+2 /* A deadlock was detected */
+#define XA_RBINTEGRITY XA_RBBASE+3 /* A condition that violates the
+ integrity of the resources was
+ detected */
+#define XA_RBOTHER XA_RBBASE+4 /* The resource manager rolled back the
+ transaction branch for a reason not
+ on this list */
+#define XA_RBPROTO XA_RBBASE+5 /* A protocol error occurred in the
+ resource manager */
+#define XA_RBTIMEOUT XA_RBBASE+6 /* A transaction branch took too long */
+#define XA_RBTRANSIENT XA_RBBASE+7 /* May retry the transaction branch */
+#define XA_RBEND XA_RBTRANSIENT /* The inclusive upper bound of the
+ rollback codes */
+#define XA_NOMIGRATE 9 /* resumption must occur where
+ suspension occurred */
+#define XA_HEURHAZ 8 /* the transaction branch may have
+ been heuristically completed */
+#define XA_HEURCOM 7 /* the transaction branch has been
+ heuristically committed */
+#define XA_HEURRB 6 /* the transaction branch has been
+ heuristically rolled back */
+#define XA_HEURMIX 5 /* the transaction branch has been
+ heuristically committed and rolled
+ back */
+#define XA_RETRY 4 /* routine returned with no effect and
+ may be re-issued */
+#define XA_RDONLY 3 /* the transaction branch was read-only
+ and has been committed */
+#define XA_OK 0 /* normal execution */
+#define XAER_ASYNC -2 /* asynchronous operation already
+ outstanding */
+#define XAER_RMERR -3 /* a resource manager error occurred in
+ the transaction branch */
+#define XAER_NOTA -4 /* the XID is not valid */
+#define XAER_INVAL -5 /* invalid arguments were given */
+#define XAER_PROTO -6 /* routine invoked in an improper
+ context */
+#define XAER_RMFAIL -7 /* resource manager unavailable */
+#define XAER_DUPID -8 /* the XID already exists */
+#define XAER_OUTSIDE -9 /* resource manager doing work outside
+ transaction */
+#endif /* ifndef XA_H */
+/*
+ * End of xa.h header
+ */
diff --git a/libdb/dbinc_auto/btree_auto.h b/libdb/dbinc_auto/btree_auto.h
new file mode 100644
index 0000000..4feb07a
--- /dev/null
+++ b/libdb/dbinc_auto/btree_auto.h
@@ -0,0 +1,128 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef __bam_AUTO_H
+#define __bam_AUTO_H
+#define DB___bam_split 62
+typedef struct ___bam_split_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t left;
+ DB_LSN llsn;
+ db_pgno_t right;
+ DB_LSN rlsn;
+ u_int32_t indx;
+ db_pgno_t npgno;
+ DB_LSN nlsn;
+ db_pgno_t root_pgno;
+ DBT pg;
+ u_int32_t opflags;
+} __bam_split_args;
+
+#define DB___bam_rsplit 63
+typedef struct ___bam_rsplit_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DBT pgdbt;
+ db_pgno_t root_pgno;
+ db_pgno_t nrec;
+ DBT rootent;
+ DB_LSN rootlsn;
+} __bam_rsplit_args;
+
+#define DB___bam_adj 55
+typedef struct ___bam_adj_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN lsn;
+ u_int32_t indx;
+ u_int32_t indx_copy;
+ u_int32_t is_insert;
+} __bam_adj_args;
+
+#define DB___bam_cadjust 56
+typedef struct ___bam_cadjust_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN lsn;
+ u_int32_t indx;
+ int32_t adjust;
+ u_int32_t opflags;
+} __bam_cadjust_args;
+
+#define DB___bam_cdel 57
+typedef struct ___bam_cdel_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN lsn;
+ u_int32_t indx;
+} __bam_cdel_args;
+
+#define DB___bam_repl 58
+typedef struct ___bam_repl_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN lsn;
+ u_int32_t indx;
+ u_int32_t isdeleted;
+ DBT orig;
+ DBT repl;
+ u_int32_t prefix;
+ u_int32_t suffix;
+} __bam_repl_args;
+
+#define DB___bam_root 59
+typedef struct ___bam_root_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t meta_pgno;
+ db_pgno_t root_pgno;
+ DB_LSN meta_lsn;
+} __bam_root_args;
+
+#define DB___bam_curadj 64
+typedef struct ___bam_curadj_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_ca_mode mode;
+ db_pgno_t from_pgno;
+ db_pgno_t to_pgno;
+ db_pgno_t left_pgno;
+ u_int32_t first_indx;
+ u_int32_t from_indx;
+ u_int32_t to_indx;
+} __bam_curadj_args;
+
+#define DB___bam_rcuradj 65
+typedef struct ___bam_rcuradj_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ ca_recno_arg mode;
+ db_pgno_t root;
+ db_recno_t recno;
+ u_int32_t order;
+} __bam_rcuradj_args;
+
+#endif
diff --git a/libdb/dbinc_auto/btree_ext.h b/libdb/dbinc_auto/btree_ext.h
new file mode 100644
index 0000000..ec5468a
--- /dev/null
+++ b/libdb/dbinc_auto/btree_ext.h
@@ -0,0 +1,132 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _btree_ext_h_
+#define _btree_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __bam_cmp __P((DB *, const DBT *, PAGE *, u_int32_t, int (*)(DB *, const DBT *, const DBT *), int *));
+int __bam_defcmp __P((DB *, const DBT *, const DBT *));
+size_t __bam_defpfx __P((DB *, const DBT *, const DBT *));
+int __bam_pgin __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
+int __bam_pgout __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
+int __bam_mswap __P((PAGE *));
+void __bam_cprint __P((DBC *));
+int __bam_ca_delete __P((DB *, db_pgno_t, u_int32_t, int));
+int __ram_ca_delete __P((DB *, db_pgno_t));
+int __bam_ca_di __P((DBC *, db_pgno_t, u_int32_t, int));
+int __bam_ca_dup __P((DBC *, u_int32_t, db_pgno_t, u_int32_t, db_pgno_t, u_int32_t));
+int __bam_ca_undodup __P((DB *, u_int32_t, db_pgno_t, u_int32_t, u_int32_t));
+int __bam_ca_rsplit __P((DBC *, db_pgno_t, db_pgno_t));
+int __bam_ca_split __P((DBC *, db_pgno_t, db_pgno_t, db_pgno_t, u_int32_t, int));
+void __bam_ca_undosplit __P((DB *, db_pgno_t, db_pgno_t, db_pgno_t, u_int32_t));
+int __bam_c_init __P((DBC *, DBTYPE));
+int __bam_c_refresh __P((DBC *));
+int __bam_c_count __P((DBC *, db_recno_t *));
+int __bam_c_dup __P((DBC *, DBC *));
+int __bam_bulk_overflow __P((DBC *, u_int32_t, db_pgno_t, u_int8_t *));
+int __bam_bulk_duplicates __P((DBC *, db_pgno_t, u_int8_t *, int32_t *, int32_t **, u_int8_t **, u_int32_t *, int));
+int __bam_c_rget __P((DBC *, DBT *));
+int __bam_ditem __P((DBC *, PAGE *, u_int32_t));
+int __bam_adjindx __P((DBC *, PAGE *, u_int32_t, u_int32_t, int));
+int __bam_dpages __P((DBC *, EPG *));
+int __bam_db_create __P((DB *));
+int __bam_db_close __P((DB *));
+int __bam_set_flags __P((DB *, u_int32_t *flagsp));
+int __ram_set_flags __P((DB *, u_int32_t *flagsp));
+int __bam_open __P((DB *, DB_TXN *, const char *, db_pgno_t, u_int32_t));
+int __bam_metachk __P((DB *, const char *, BTMETA *));
+int __bam_read_root __P((DB *, DB_TXN *, db_pgno_t, u_int32_t));
+int __bam_new_file __P((DB *, DB_TXN *, DB_FH *, const char *));
+int __bam_new_subdb __P((DB *, DB *, DB_TXN *));
+int __bam_iitem __P((DBC *, DBT *, DBT *, u_int32_t, u_int32_t));
+int __bam_ritem __P((DBC *, PAGE *, u_int32_t, DBT *));
+int __bam_split_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_rsplit_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_adj_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_cadjust_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_cdel_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_repl_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_root_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_curadj_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_rcuradj_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_reclaim __P((DB *, DB_TXN *));
+int __bam_truncate __P((DB *, DB_TXN *, u_int32_t *));
+int __ram_open __P((DB *, DB_TXN *, const char *, db_pgno_t, u_int32_t));
+int __ram_append __P((DBC *, DBT *, DBT *));
+int __ram_c_del __P((DBC *));
+int __ram_c_get __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+int __ram_c_put __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+int __ram_ca __P((DBC *, ca_recno_arg));
+int __ram_getno __P((DBC *, const DBT *, db_recno_t *, int));
+int __ram_writeback __P((DB *));
+int __bam_rsearch __P((DBC *, db_recno_t *, u_int32_t, int, int *));
+int __bam_adjust __P((DBC *, int32_t));
+int __bam_nrecs __P((DBC *, db_recno_t *));
+db_recno_t __bam_total __P((DB *, PAGE *));
+int __bam_search __P((DBC *, db_pgno_t, const DBT *, u_int32_t, int, db_recno_t *, int *));
+int __bam_stkrel __P((DBC *, u_int32_t));
+int __bam_stkgrow __P((DB_ENV *, BTREE_CURSOR *));
+int __bam_split __P((DBC *, void *, db_pgno_t *));
+int __bam_copy __P((DB *, PAGE *, PAGE *, u_int32_t, u_int32_t));
+int __bam_stat __P((DB *, void *, u_int32_t));
+int __bam_traverse __P((DBC *, db_lockmode_t, db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *));
+int __bam_stat_callback __P((DB *, PAGE *, void *, int *));
+int __bam_key_range __P((DB *, DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
+int __bam_30_btreemeta __P((DB *, char *, u_int8_t *));
+int __bam_31_btreemeta __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+int __bam_31_lbtree __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+int __bam_vrfy_meta __P((DB *, VRFY_DBINFO *, BTMETA *, db_pgno_t, u_int32_t));
+int __ram_vrfy_leaf __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t));
+int __bam_vrfy __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t));
+int __bam_vrfy_itemorder __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t, int, int, u_int32_t));
+int __bam_vrfy_structure __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t));
+int __bam_vrfy_subtree __P((DB *, VRFY_DBINFO *, db_pgno_t, void *, void *, u_int32_t, u_int32_t *, u_int32_t *, u_int32_t *));
+int __bam_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t, PAGE *, void *, int (*)(void *, const void *), DBT *, u_int32_t));
+int __bam_salvage_walkdupint __P((DB *, VRFY_DBINFO *, PAGE *, DBT *, void *, int (*)(void *, const void *), u_int32_t));
+int __bam_meta2pgset __P((DB *, VRFY_DBINFO *, BTMETA *, u_int32_t, DB *));
+int __bam_split_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, const DBT *, u_int32_t));
+int __bam_split_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_split_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_split_read __P((DB_ENV *, void *, __bam_split_args **));
+int __bam_rsplit_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, const DBT *, db_pgno_t, db_pgno_t, const DBT *, DB_LSN *));
+int __bam_rsplit_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_rsplit_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_rsplit_read __P((DB_ENV *, void *, __bam_rsplit_args **));
+int __bam_adj_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, u_int32_t, u_int32_t, u_int32_t));
+int __bam_adj_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_adj_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_adj_read __P((DB_ENV *, void *, __bam_adj_args **));
+int __bam_cadjust_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, u_int32_t, int32_t, u_int32_t));
+int __bam_cadjust_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_cadjust_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_cadjust_read __P((DB_ENV *, void *, __bam_cadjust_args **));
+int __bam_cdel_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, u_int32_t));
+int __bam_cdel_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_cdel_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_cdel_read __P((DB_ENV *, void *, __bam_cdel_args **));
+int __bam_repl_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, u_int32_t, u_int32_t, const DBT *, const DBT *, u_int32_t, u_int32_t));
+int __bam_repl_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_repl_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_repl_read __P((DB_ENV *, void *, __bam_repl_args **));
+int __bam_root_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, db_pgno_t, DB_LSN *));
+int __bam_root_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_root_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_root_read __P((DB_ENV *, void *, __bam_root_args **));
+int __bam_curadj_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_ca_mode, db_pgno_t, db_pgno_t, db_pgno_t, u_int32_t, u_int32_t, u_int32_t));
+int __bam_curadj_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_curadj_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_curadj_read __P((DB_ENV *, void *, __bam_curadj_args **));
+int __bam_rcuradj_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, ca_recno_arg, db_pgno_t, db_recno_t, u_int32_t));
+int __bam_rcuradj_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_rcuradj_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_rcuradj_read __P((DB_ENV *, void *, __bam_rcuradj_args **));
+int __bam_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __bam_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __bam_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_btree_ext_h_ */
diff --git a/libdb/dbinc_auto/clib_ext.h b/libdb/dbinc_auto/clib_ext.h
new file mode 100644
index 0000000..7e2817d
--- /dev/null
+++ b/libdb/dbinc_auto/clib_ext.h
@@ -0,0 +1,49 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _clib_ext_h_
+#define _clib_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#ifndef HAVE_GETCWD
+char *getcwd __P((char *, size_t));
+#endif
+#ifndef HAVE_GETOPT
+int getopt __P((int, char * const *, const char *));
+#endif
+#ifndef HAVE_MEMCMP
+int memcmp __P((const void *, const void *, size_t));
+#endif
+#ifndef HAVE_MEMCPY
+void *memcpy __P((void *, const void *, size_t));
+#endif
+#ifndef HAVE_MEMMOVE
+void *memmove __P((void *, const void *, size_t));
+#endif
+#ifndef HAVE_RAISE
+int raise __P((int));
+#endif
+#ifndef HAVE_SNPRINTF
+int snprintf __P((char *, size_t, const char *, ...));
+#endif
+#ifndef HAVE_STRCASECMP
+int strcasecmp __P((const char *, const char *));
+#endif
+#ifndef HAVE_STRCASECMP
+int strncasecmp __P((const char *, const char *, size_t));
+#endif
+#ifndef HAVE_STRDUP
+char *strdup __P((const char *));
+#endif
+#ifndef HAVE_STRERROR
+char *strerror __P((int));
+#endif
+#ifndef HAVE_VSNPRINTF
+int vsnprintf __P((char *, size_t, const char *, va_list));
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_clib_ext_h_ */
diff --git a/libdb/dbinc_auto/common_ext.h b/libdb/dbinc_auto/common_ext.h
new file mode 100644
index 0000000..7744982
--- /dev/null
+++ b/libdb/dbinc_auto/common_ext.h
@@ -0,0 +1,44 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _common_ext_h_
+#define _common_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __db_isbigendian __P((void));
+int __db_byteorder __P((DB_ENV *, int));
+int __db_fchk __P((DB_ENV *, const char *, u_int32_t, u_int32_t));
+int __db_fcchk __P((DB_ENV *, const char *, u_int32_t, u_int32_t, u_int32_t));
+int __db_ferr __P((const DB_ENV *, const char *, int));
+void __db_pgerr __P((DB *, db_pgno_t, int));
+int __db_pgfmt __P((DB_ENV *, db_pgno_t));
+int __db_eopnotsup __P((const DB_ENV *));
+#ifdef DIAGNOSTIC
+void __db_assert __P((const char *, const char *, int));
+#endif
+int __db_panic_msg __P((DB_ENV *));
+int __db_panic __P((DB_ENV *, int));
+void __db_err __P((const DB_ENV *, const char *, ...));
+void __db_errcall __P((const DB_ENV *, int, int, const char *, va_list));
+void __db_errfile __P((const DB_ENV *, int, int, const char *, va_list));
+void __db_logmsg __P((const DB_ENV *, DB_TXN *, const char *, u_int32_t, const char *, ...));
+int __db_unknown_flag __P((DB_ENV *, char *, u_int32_t));
+int __db_unknown_type __P((DB_ENV *, char *, DBTYPE));
+int __db_check_txn __P((DB *, DB_TXN *, u_int32_t, int));
+int __db_not_txn_env __P((DB_ENV *));
+int __db_getlong __P((DB *, const char *, char *, long, long, long *));
+int __db_getulong __P((DB *, const char *, char *, u_long, u_long, u_long *));
+void __db_idspace __P((u_int32_t *, int, u_int32_t *, u_int32_t *));
+u_int32_t __db_log2 __P((u_int32_t));
+int __db_util_arg __P((char *, char *, int *, char ***));
+int __db_util_cache __P((DB_ENV *, DB *, u_int32_t *, int *));
+int __db_util_logset __P((const char *, char *));
+void __db_util_siginit __P((void));
+int __db_util_interrupted __P((void));
+void __db_util_sigresend __P((void));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_common_ext_h_ */
diff --git a/libdb/dbinc_auto/crdel_auto.h b/libdb/dbinc_auto/crdel_auto.h
new file mode 100644
index 0000000..bdae193
--- /dev/null
+++ b/libdb/dbinc_auto/crdel_auto.h
@@ -0,0 +1,16 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef __crdel_AUTO_H
+#define __crdel_AUTO_H
+#define DB___crdel_metasub 142
+typedef struct ___crdel_metasub_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DBT page;
+ DB_LSN lsn;
+} __crdel_metasub_args;
+
+#endif
diff --git a/libdb/dbinc_auto/crypto_ext.h b/libdb/dbinc_auto/crypto_ext.h
new file mode 100644
index 0000000..e37a895
--- /dev/null
+++ b/libdb/dbinc_auto/crypto_ext.h
@@ -0,0 +1,37 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _crypto_ext_h_
+#define _crypto_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __aes_setup __P((DB_ENV *, DB_CIPHER *));
+int __aes_adj_size __P((size_t));
+int __aes_close __P((DB_ENV *, void *));
+int __aes_decrypt __P((DB_ENV *, void *, void *, u_int8_t *, size_t));
+int __aes_encrypt __P((DB_ENV *, void *, void *, u_int8_t *, size_t));
+int __aes_init __P((DB_ENV *, DB_CIPHER *));
+int __crypto_region_init __P((DB_ENV *));
+int __crypto_dbenv_close __P((DB_ENV *));
+int __crypto_algsetup __P((DB_ENV *, DB_CIPHER *, u_int32_t, int));
+int __crypto_decrypt_meta __P((DB_ENV *, DB *, u_int8_t *, int));
+int __db_generate_iv __P((DB_ENV *, u_int32_t *));
+int __db_rijndaelKeySetupEnc __P((u32 *, const u8 *, int));
+int __db_rijndaelKeySetupDec __P((u32 *, const u8 *, int));
+void __db_rijndaelEncrypt __P((u32 *, int, const u8 *, u8 *));
+void __db_rijndaelDecrypt __P((u32 *, int, const u8 *, u8 *));
+void __db_rijndaelEncryptRound __P((const u32 *, int, u8 *, int));
+void __db_rijndaelDecryptRound __P((const u32 *, int, u8 *, int));
+int __db_makeKey __P((keyInstance *, int, int, char *));
+int __db_cipherInit __P((cipherInstance *, int, char *));
+int __db_blockEncrypt __P((cipherInstance *, keyInstance *, BYTE *, size_t, BYTE *));
+int __db_padEncrypt __P((cipherInstance *, keyInstance *, BYTE *, int, BYTE *));
+int __db_blockDecrypt __P((cipherInstance *, keyInstance *, BYTE *, size_t, BYTE *));
+int __db_padDecrypt __P((cipherInstance *, keyInstance *, BYTE *, int, BYTE *));
+int __db_cipherUpdateRounds __P((cipherInstance *, keyInstance *, BYTE *, int, BYTE *, int));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_crypto_ext_h_ */
diff --git a/libdb/dbinc_auto/db_auto.h b/libdb/dbinc_auto/db_auto.h
new file mode 100644
index 0000000..e56f38b
--- /dev/null
+++ b/libdb/dbinc_auto/db_auto.h
@@ -0,0 +1,118 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef __db_AUTO_H
+#define __db_AUTO_H
+#define DB___db_addrem 41
+typedef struct ___db_addrem_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ u_int32_t nbytes;
+ DBT hdr;
+ DBT dbt;
+ DB_LSN pagelsn;
+} __db_addrem_args;
+
+#define DB___db_big 43
+typedef struct ___db_big_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t pgno;
+ db_pgno_t prev_pgno;
+ db_pgno_t next_pgno;
+ DBT dbt;
+ DB_LSN pagelsn;
+ DB_LSN prevlsn;
+ DB_LSN nextlsn;
+} __db_big_args;
+
+#define DB___db_ovref 44
+typedef struct ___db_ovref_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ int32_t adjust;
+ DB_LSN lsn;
+} __db_ovref_args;
+
+#define DB___db_relink 45
+typedef struct ___db_relink_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN lsn;
+ db_pgno_t prev;
+ DB_LSN lsn_prev;
+ db_pgno_t next;
+ DB_LSN lsn_next;
+} __db_relink_args;
+
+#define DB___db_debug 47
+typedef struct ___db_debug_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DBT op;
+ int32_t fileid;
+ DBT key;
+ DBT data;
+ u_int32_t arg_flags;
+} __db_debug_args;
+
+#define DB___db_noop 48
+typedef struct ___db_noop_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN prevlsn;
+} __db_noop_args;
+
+#define DB___db_pg_alloc 49
+typedef struct ___db_pg_alloc_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ DB_LSN meta_lsn;
+ db_pgno_t meta_pgno;
+ DB_LSN page_lsn;
+ db_pgno_t pgno;
+ u_int32_t ptype;
+ db_pgno_t next;
+} __db_pg_alloc_args;
+
+#define DB___db_pg_free 50
+typedef struct ___db_pg_free_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN meta_lsn;
+ db_pgno_t meta_pgno;
+ DBT header;
+ db_pgno_t next;
+} __db_pg_free_args;
+
+#define DB___db_cksum 51
+typedef struct ___db_cksum_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+} __db_cksum_args;
+
+#endif
diff --git a/libdb/dbinc_auto/db_ext.h b/libdb/dbinc_auto/db_ext.h
new file mode 100644
index 0000000..24a1397
--- /dev/null
+++ b/libdb/dbinc_auto/db_ext.h
@@ -0,0 +1,224 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _db_ext_h_
+#define _db_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __crdel_metasub_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, const DBT *, DB_LSN *));
+int __crdel_metasub_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __crdel_metasub_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __crdel_metasub_read __P((DB_ENV *, void *, __crdel_metasub_args **));
+int __crdel_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __crdel_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __crdel_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __crdel_metasub_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_master_open __P((DB *, DB_TXN *, const char *, u_int32_t, int, DB **));
+int __db_master_update __P((DB *, DB *, DB_TXN *, const char *, DBTYPE, mu_action, const char *, u_int32_t));
+int __db_dbenv_setup __P((DB *, DB_TXN *, const char *, u_int32_t, u_int32_t));
+int __db_close __P((DB *, u_int32_t));
+int __db_close_i __P((DB *, DB_TXN *, u_int32_t));
+int __db_refresh __P((DB *, DB_TXN *, u_int32_t));
+int __db_log_page __P((DB *, DB_TXN *, DB_LSN *, db_pgno_t, PAGE *));
+int __db_backup_name __P((DB_ENV *, const char *, DB_TXN *, char **));
+DB *__dblist_get __P((DB_ENV *, u_int32_t));
+#if CONFIG_TEST
+int __db_testcopy __P((DB_ENV *, DB *, const char *));
+#endif
+int __db_cursor __P((DB *, DB_TXN *, DBC **, u_int32_t));
+int __db_icursor __P((DB *, DB_TXN *, DBTYPE, db_pgno_t, int, u_int32_t, DBC **));
+int __db_cprint __P((DB *));
+int __db_fd __P((DB *, int *));
+int __db_get __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+int __db_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+int __db_delete __P((DB *, DB_TXN *, DBT *, u_int32_t));
+int __db_sync __P((DB *, u_int32_t));
+int __db_associate __P((DB *, DB_TXN *, DB *, int (*)(DB *, const DBT *, const DBT *, DBT *), u_int32_t));
+int __db_pget __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t));
+int __db_addrem_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, u_int32_t, u_int32_t, const DBT *, const DBT *, DB_LSN *));
+int __db_addrem_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_addrem_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_addrem_read __P((DB_ENV *, void *, __db_addrem_args **));
+int __db_big_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, db_pgno_t, db_pgno_t, const DBT *, DB_LSN *, DB_LSN *, DB_LSN *));
+int __db_big_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_big_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_big_read __P((DB_ENV *, void *, __db_big_args **));
+int __db_ovref_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, int32_t, DB_LSN *));
+int __db_ovref_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_ovref_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_ovref_read __P((DB_ENV *, void *, __db_ovref_args **));
+int __db_relink_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *));
+int __db_relink_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_relink_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_relink_read __P((DB_ENV *, void *, __db_relink_args **));
+int __db_debug_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, int32_t, const DBT *, const DBT *, u_int32_t));
+int __db_debug_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_debug_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_debug_read __P((DB_ENV *, void *, __db_debug_args **));
+int __db_noop_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *));
+int __db_noop_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_noop_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_noop_read __P((DB_ENV *, void *, __db_noop_args **));
+int __db_pg_alloc_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, u_int32_t, db_pgno_t));
+int __db_pg_alloc_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_pg_alloc_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_pg_alloc_read __P((DB_ENV *, void *, __db_pg_alloc_args **));
+int __db_pg_free_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, const DBT *, db_pgno_t));
+int __db_pg_free_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_pg_free_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_pg_free_read __P((DB_ENV *, void *, __db_pg_free_args **));
+int __db_cksum_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t));
+int __db_cksum_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_cksum_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_cksum_read __P((DB_ENV *, void *, __db_cksum_args **));
+int __db_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __db_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __db_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __db_c_close __P((DBC *));
+int __db_c_destroy __P((DBC *));
+int __db_c_count __P((DBC *, db_recno_t *, u_int32_t));
+int __db_c_del __P((DBC *, u_int32_t));
+int __db_c_dup __P((DBC *, DBC **, u_int32_t));
+int __db_c_idup __P((DBC *, DBC **, u_int32_t));
+int __db_c_newopd __P((DBC *, db_pgno_t, DBC *, DBC **));
+int __db_c_get __P((DBC *, DBT *, DBT *, u_int32_t));
+int __db_c_put __P((DBC *, DBT *, DBT *, u_int32_t));
+int __db_duperr __P((DB *, u_int32_t));
+int __db_c_secondary_get __P((DBC *, DBT *, DBT *, u_int32_t));
+int __db_c_pget __P((DBC *, DBT *, DBT *, DBT *, u_int32_t));
+int __db_c_del_primary __P((DBC *));
+DB *__db_s_first __P((DB *));
+int __db_s_next __P((DB **));
+int __db_s_done __P((DB *));
+u_int32_t __db_partsize __P((u_int32_t, DBT *));
+int __db_pgin __P((DB_ENV *, db_pgno_t, void *, DBT *));
+int __db_pgout __P((DB_ENV *, db_pgno_t, void *, DBT *));
+void __db_metaswap __P((PAGE *));
+int __db_byteswap __P((DB_ENV *, DB *, db_pgno_t, PAGE *, size_t, int));
+int __db_dispatch __P((DB_ENV *, int (**)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)), size_t, DBT *, DB_LSN *, db_recops, void *));
+int __db_add_recovery __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *, int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), u_int32_t));
+int __db_txnlist_init __P((DB_ENV *, u_int32_t, u_int32_t, DB_LSN *, void *));
+int __db_txnlist_add __P((DB_ENV *, void *, u_int32_t, int32_t, DB_LSN *));
+int __db_txnlist_remove __P((DB_ENV *, void *, u_int32_t));
+void __db_txnlist_ckp __P((DB_ENV *, void *, DB_LSN *));
+void __db_txnlist_end __P((DB_ENV *, void *));
+int __db_txnlist_find __P((DB_ENV *, void *, u_int32_t));
+int __db_txnlist_update __P((DB_ENV *, void *, u_int32_t, u_int32_t, DB_LSN *));
+int __db_txnlist_gen __P((DB_ENV *, void *, int, u_int32_t, u_int32_t));
+int __db_txnlist_lsnadd __P((DB_ENV *, void *, DB_LSN *, u_int32_t));
+int __db_txnlist_lsninit __P((DB_ENV *, DB_TXNHEAD *, DB_LSN *));
+int __db_add_limbo __P((DB_ENV *, void *, int32_t, db_pgno_t, int32_t));
+int __db_do_the_limbo __P((DB_ENV *, DB_TXN *, DB_TXN *, DB_TXNHEAD *));
+void __db_txnlist_print __P((void *));
+int __db_ditem __P((DBC *, PAGE *, u_int32_t, u_int32_t));
+int __db_pitem __P((DBC *, PAGE *, u_int32_t, u_int32_t, DBT *, DBT *));
+int __db_relink __P((DBC *, u_int32_t, PAGE *, PAGE **, int));
+int __db_cursorchk __P((const DB *, u_int32_t));
+int __db_ccountchk __P((const DB *, u_int32_t, int));
+int __db_cdelchk __P((const DB *, u_int32_t, int));
+int __db_cgetchk __P((const DB *, DBT *, DBT *, u_int32_t, int));
+int __db_cputchk __P((const DB *, const DBT *, DBT *, u_int32_t, int));
+int __db_pgetchk __P((const DB *, const DBT *, DBT *, DBT *, u_int32_t));
+int __db_cpgetchk __P((const DB *, DBT *, DBT *, DBT *, u_int32_t, int));
+int __db_delchk __P((const DB *, DBT *, u_int32_t));
+int __db_getchk __P((const DB *, const DBT *, DBT *, u_int32_t));
+int __db_joinchk __P((const DB *, DBC * const *, u_int32_t));
+int __db_joingetchk __P((const DB *, DBT *, u_int32_t));
+int __db_putchk __P((const DB *, DBT *, const DBT *, u_int32_t, int));
+int __db_statchk __P((const DB *, u_int32_t));
+int __db_syncchk __P((const DB *, u_int32_t));
+int __db_secondary_corrupt __P((DB *));
+int __db_associatechk __P((DB *, DB *, int (*)(DB *, const DBT *, const DBT *, DBT *), u_int32_t));
+int __db_txn_auto __P((DB *, DB_TXN **));
+int __db_join __P((DB *, DBC **, DBC **, u_int32_t));
+int __db_new __P((DBC *, u_int32_t, PAGE **));
+int __db_free __P((DBC *, PAGE *));
+int __db_lprint __P((DBC *));
+int __db_lget __P((DBC *, int, db_pgno_t, db_lockmode_t, u_int32_t, DB_LOCK *));
+int __db_lput __P((DBC *, DB_LOCK *));
+int __dbh_am_chk __P((DB *, u_int32_t));
+int __db_set_lorder __P((DB *, int));
+int __db_open __P((DB *, DB_TXN *, const char *, const char *, DBTYPE, u_int32_t, int));
+int __db_dbopen __P((DB *, DB_TXN *, const char *, const char *, u_int32_t, int, db_pgno_t));
+int __db_new_file __P((DB *, DB_TXN *, DB_FH *, const char *));
+int __db_init_subdb __P((DB *, DB *, const char *, DB_TXN *));
+int __db_chk_meta __P((DB_ENV *, DB *, DBMETA *, int));
+int __db_meta_setup __P((DB_ENV *, DB *, const char *, DBMETA *, u_int32_t, int));
+int __db_goff __P((DB *, DBT *, u_int32_t, db_pgno_t, void **, u_int32_t *));
+int __db_poff __P((DBC *, const DBT *, db_pgno_t *));
+int __db_ovref __P((DBC *, db_pgno_t, int32_t));
+int __db_doff __P((DBC *, db_pgno_t));
+int __db_moff __P((DB *, const DBT *, db_pgno_t, u_int32_t, int (*)(DB *, const DBT *, const DBT *), int *));
+int __db_vrfy_overflow __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t));
+int __db_vrfy_ovfl_structure __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t, u_int32_t));
+int __db_safe_goff __P((DB *, VRFY_DBINFO *, db_pgno_t, DBT *, void **, u_int32_t));
+void __db_loadme __P((void));
+int __db_dump __P((DB *, char *, char *));
+void __db_inmemdbflags __P((u_int32_t, void *, void (*)(u_int32_t, const FN *, void *)));
+int __db_prnpage __P((DB *, db_pgno_t, FILE *));
+int __db_prpage __P((DB *, PAGE *, FILE *, u_int32_t));
+void __db_pr __P((u_int8_t *, u_int32_t, FILE *));
+int __db_prdbt __P((DBT *, int, const char *, void *, int (*)(void *, const void *), int, VRFY_DBINFO *));
+void __db_prflags __P((u_int32_t, const FN *, void *));
+const char * __db_dbtype_to_string __P((DBTYPE));
+int __db_prheader __P((DB *, char *, int, int, void *, int (*)(void *, const void *), VRFY_DBINFO *, db_pgno_t));
+int __db_prfooter __P((void *, int (*)(void *, const void *)));
+int __db_addrem_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_big_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_ovref_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_relink_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_debug_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_noop_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_pg_alloc_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_pg_free_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_cksum_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_traverse_big __P((DB *, db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *));
+int __db_reclaim_callback __P((DB *, PAGE *, void *, int *));
+int __db_truncate_callback __P((DB *, PAGE *, void *, int *));
+int __dbenv_dbremove __P((DB_ENV *, DB_TXN *, const char *, const char *, u_int32_t));
+int __db_remove __P((DB *, const char *, const char *, u_int32_t));
+int __db_remove_i __P((DB *, DB_TXN *, const char *, const char *));
+int __dbenv_dbrename __P((DB_ENV *, DB_TXN *, const char *, const char *, const char *, u_int32_t));
+int __db_rename __P((DB *, const char *, const char *, const char *, u_int32_t));
+int __db_rename_i __P((DB *, DB_TXN *, const char *, const char *, const char *));
+int __db_ret __P((DB *, PAGE *, u_int32_t, DBT *, void **, u_int32_t *));
+int __db_retcopy __P((DB_ENV *, DBT *, void *, u_int32_t, void **, u_int32_t *));
+int __db_truncate __P((DB *, DB_TXN *, u_int32_t *, u_int32_t));
+int __db_upgrade __P((DB *, const char *, u_int32_t));
+int __db_lastpgno __P((DB *, char *, DB_FH *, db_pgno_t *));
+int __db_31_offdup __P((DB *, char *, DB_FH *, int, db_pgno_t *));
+int __db_verify __P((DB *, const char *, const char *, FILE *, u_int32_t));
+int __db_verify_callback __P((void *, const void *));
+int __db_verify_internal __P((DB *, const char *, const char *, void *, int (*)(void *, const void *), u_int32_t));
+int __db_vrfy_datapage __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t));
+int __db_vrfy_meta __P((DB *, VRFY_DBINFO *, DBMETA *, db_pgno_t, u_int32_t));
+void __db_vrfy_struct_feedback __P((DB *, VRFY_DBINFO *));
+int __db_vrfy_inpitem __P((DB *, PAGE *, db_pgno_t, u_int32_t, int, u_int32_t, u_int32_t *, u_int32_t *));
+int __db_vrfy_duptype __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t));
+int __db_salvage_duptree __P((DB *, VRFY_DBINFO *, db_pgno_t, DBT *, void *, int (*)(void *, const void *), u_int32_t));
+int __db_vrfy_dbinfo_create __P((DB_ENV *, u_int32_t, VRFY_DBINFO **));
+int __db_vrfy_dbinfo_destroy __P((DB_ENV *, VRFY_DBINFO *));
+int __db_vrfy_getpageinfo __P((VRFY_DBINFO *, db_pgno_t, VRFY_PAGEINFO **));
+int __db_vrfy_putpageinfo __P((DB_ENV *, VRFY_DBINFO *, VRFY_PAGEINFO *));
+int __db_vrfy_pgset __P((DB_ENV *, u_int32_t, DB **));
+int __db_vrfy_pgset_get __P((DB *, db_pgno_t, int *));
+int __db_vrfy_pgset_inc __P((DB *, db_pgno_t));
+int __db_vrfy_pgset_dec __P((DB *, db_pgno_t));
+int __db_vrfy_pgset_next __P((DBC *, db_pgno_t *));
+int __db_vrfy_childcursor __P((VRFY_DBINFO *, DBC **));
+int __db_vrfy_childput __P((VRFY_DBINFO *, db_pgno_t, VRFY_CHILDINFO *));
+int __db_vrfy_ccset __P((DBC *, db_pgno_t, VRFY_CHILDINFO **));
+int __db_vrfy_ccnext __P((DBC *, VRFY_CHILDINFO **));
+int __db_vrfy_ccclose __P((DBC *));
+int __db_salvage_init __P((VRFY_DBINFO *));
+void __db_salvage_destroy __P((VRFY_DBINFO *));
+int __db_salvage_getnext __P((VRFY_DBINFO *, db_pgno_t *, u_int32_t *));
+int __db_salvage_isdone __P((VRFY_DBINFO *, db_pgno_t));
+int __db_salvage_markdone __P((VRFY_DBINFO *, db_pgno_t));
+int __db_salvage_markneeded __P((VRFY_DBINFO *, db_pgno_t, u_int32_t));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_db_ext_h_ */
diff --git a/libdb/dbinc_auto/db_server.h b/libdb/dbinc_auto/db_server.h
new file mode 100644
index 0000000..3409eed
--- /dev/null
+++ b/libdb/dbinc_auto/db_server.h
@@ -0,0 +1,1006 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#ifndef _DB_SERVER_H_RPCGEN
+#define _DB_SERVER_H_RPCGEN
+
+
+struct __env_cachesize_msg {
+ u_int dbenvcl_id;
+ u_int gbytes;
+ u_int bytes;
+ u_int ncache;
+};
+typedef struct __env_cachesize_msg __env_cachesize_msg;
+
+struct __env_cachesize_reply {
+ int status;
+};
+typedef struct __env_cachesize_reply __env_cachesize_reply;
+
+struct __env_close_msg {
+ u_int dbenvcl_id;
+ u_int flags;
+};
+typedef struct __env_close_msg __env_close_msg;
+
+struct __env_close_reply {
+ int status;
+};
+typedef struct __env_close_reply __env_close_reply;
+
+struct __env_create_msg {
+ u_int timeout;
+};
+typedef struct __env_create_msg __env_create_msg;
+
+struct __env_create_reply {
+ int status;
+ u_int envcl_id;
+};
+typedef struct __env_create_reply __env_create_reply;
+
+struct __env_dbremove_msg {
+ u_int dbenvcl_id;
+ u_int txnpcl_id;
+ char *name;
+ char *subdb;
+ u_int flags;
+};
+typedef struct __env_dbremove_msg __env_dbremove_msg;
+
+struct __env_dbremove_reply {
+ int status;
+};
+typedef struct __env_dbremove_reply __env_dbremove_reply;
+
+struct __env_dbrename_msg {
+ u_int dbenvcl_id;
+ u_int txnpcl_id;
+ char *name;
+ char *subdb;
+ char *newname;
+ u_int flags;
+};
+typedef struct __env_dbrename_msg __env_dbrename_msg;
+
+struct __env_dbrename_reply {
+ int status;
+};
+typedef struct __env_dbrename_reply __env_dbrename_reply;
+
+struct __env_encrypt_msg {
+ u_int dbenvcl_id;
+ char *passwd;
+ u_int flags;
+};
+typedef struct __env_encrypt_msg __env_encrypt_msg;
+
+struct __env_encrypt_reply {
+ int status;
+};
+typedef struct __env_encrypt_reply __env_encrypt_reply;
+
+struct __env_flags_msg {
+ u_int dbenvcl_id;
+ u_int flags;
+ u_int onoff;
+};
+typedef struct __env_flags_msg __env_flags_msg;
+
+struct __env_flags_reply {
+ int status;
+};
+typedef struct __env_flags_reply __env_flags_reply;
+
+struct __env_open_msg {
+ u_int dbenvcl_id;
+ char *home;
+ u_int flags;
+ u_int mode;
+};
+typedef struct __env_open_msg __env_open_msg;
+
+struct __env_open_reply {
+ int status;
+ u_int envcl_id;
+};
+typedef struct __env_open_reply __env_open_reply;
+
+struct __env_remove_msg {
+ u_int dbenvcl_id;
+ char *home;
+ u_int flags;
+};
+typedef struct __env_remove_msg __env_remove_msg;
+
+struct __env_remove_reply {
+ int status;
+};
+typedef struct __env_remove_reply __env_remove_reply;
+
+struct __txn_abort_msg {
+ u_int txnpcl_id;
+};
+typedef struct __txn_abort_msg __txn_abort_msg;
+
+struct __txn_abort_reply {
+ int status;
+};
+typedef struct __txn_abort_reply __txn_abort_reply;
+
+struct __txn_begin_msg {
+ u_int dbenvcl_id;
+ u_int parentcl_id;
+ u_int flags;
+};
+typedef struct __txn_begin_msg __txn_begin_msg;
+
+struct __txn_begin_reply {
+ int status;
+ u_int txnidcl_id;
+};
+typedef struct __txn_begin_reply __txn_begin_reply;
+
+struct __txn_commit_msg {
+ u_int txnpcl_id;
+ u_int flags;
+};
+typedef struct __txn_commit_msg __txn_commit_msg;
+
+struct __txn_commit_reply {
+ int status;
+};
+typedef struct __txn_commit_reply __txn_commit_reply;
+
+struct __txn_discard_msg {
+ u_int txnpcl_id;
+ u_int flags;
+};
+typedef struct __txn_discard_msg __txn_discard_msg;
+
+struct __txn_discard_reply {
+ int status;
+};
+typedef struct __txn_discard_reply __txn_discard_reply;
+
+struct __txn_prepare_msg {
+ u_int txnpcl_id;
+ char gid[128];
+};
+typedef struct __txn_prepare_msg __txn_prepare_msg;
+
+struct __txn_prepare_reply {
+ int status;
+};
+typedef struct __txn_prepare_reply __txn_prepare_reply;
+
+struct __txn_recover_msg {
+ u_int dbenvcl_id;
+ u_int count;
+ u_int flags;
+};
+typedef struct __txn_recover_msg __txn_recover_msg;
+
+struct __txn_recover_reply {
+ int status;
+ struct {
+ u_int txn_len;
+ u_int *txn_val;
+ } txn;
+ struct {
+ u_int gid_len;
+ char *gid_val;
+ } gid;
+ u_int retcount;
+};
+typedef struct __txn_recover_reply __txn_recover_reply;
+
+struct __db_associate_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ u_int sdbpcl_id;
+ u_int flags;
+};
+typedef struct __db_associate_msg __db_associate_msg;
+
+struct __db_associate_reply {
+ int status;
+};
+typedef struct __db_associate_reply __db_associate_reply;
+
+struct __db_bt_maxkey_msg {
+ u_int dbpcl_id;
+ u_int maxkey;
+};
+typedef struct __db_bt_maxkey_msg __db_bt_maxkey_msg;
+
+struct __db_bt_maxkey_reply {
+ int status;
+};
+typedef struct __db_bt_maxkey_reply __db_bt_maxkey_reply;
+
+struct __db_bt_minkey_msg {
+ u_int dbpcl_id;
+ u_int minkey;
+};
+typedef struct __db_bt_minkey_msg __db_bt_minkey_msg;
+
+struct __db_bt_minkey_reply {
+ int status;
+};
+typedef struct __db_bt_minkey_reply __db_bt_minkey_reply;
+
+struct __db_close_msg {
+ u_int dbpcl_id;
+ u_int flags;
+};
+typedef struct __db_close_msg __db_close_msg;
+
+struct __db_close_reply {
+ int status;
+};
+typedef struct __db_close_reply __db_close_reply;
+
+struct __db_create_msg {
+ u_int dbenvcl_id;
+ u_int flags;
+};
+typedef struct __db_create_msg __db_create_msg;
+
+struct __db_create_reply {
+ int status;
+ u_int dbcl_id;
+};
+typedef struct __db_create_reply __db_create_reply;
+
+struct __db_del_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ u_int keydlen;
+ u_int keydoff;
+ u_int keyulen;
+ u_int keyflags;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ u_int flags;
+};
+typedef struct __db_del_msg __db_del_msg;
+
+struct __db_del_reply {
+ int status;
+};
+typedef struct __db_del_reply __db_del_reply;
+
+struct __db_encrypt_msg {
+ u_int dbpcl_id;
+ char *passwd;
+ u_int flags;
+};
+typedef struct __db_encrypt_msg __db_encrypt_msg;
+
+struct __db_encrypt_reply {
+ int status;
+};
+typedef struct __db_encrypt_reply __db_encrypt_reply;
+
+struct __db_extentsize_msg {
+ u_int dbpcl_id;
+ u_int extentsize;
+};
+typedef struct __db_extentsize_msg __db_extentsize_msg;
+
+struct __db_extentsize_reply {
+ int status;
+};
+typedef struct __db_extentsize_reply __db_extentsize_reply;
+
+struct __db_flags_msg {
+ u_int dbpcl_id;
+ u_int flags;
+};
+typedef struct __db_flags_msg __db_flags_msg;
+
+struct __db_flags_reply {
+ int status;
+};
+typedef struct __db_flags_reply __db_flags_reply;
+
+struct __db_get_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ u_int keydlen;
+ u_int keydoff;
+ u_int keyulen;
+ u_int keyflags;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ u_int datadlen;
+ u_int datadoff;
+ u_int dataulen;
+ u_int dataflags;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+ u_int flags;
+};
+typedef struct __db_get_msg __db_get_msg;
+
+struct __db_get_reply {
+ int status;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+};
+typedef struct __db_get_reply __db_get_reply;
+
+struct __db_h_ffactor_msg {
+ u_int dbpcl_id;
+ u_int ffactor;
+};
+typedef struct __db_h_ffactor_msg __db_h_ffactor_msg;
+
+struct __db_h_ffactor_reply {
+ int status;
+};
+typedef struct __db_h_ffactor_reply __db_h_ffactor_reply;
+
+struct __db_h_nelem_msg {
+ u_int dbpcl_id;
+ u_int nelem;
+};
+typedef struct __db_h_nelem_msg __db_h_nelem_msg;
+
+struct __db_h_nelem_reply {
+ int status;
+};
+typedef struct __db_h_nelem_reply __db_h_nelem_reply;
+
+struct __db_key_range_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ u_int keydlen;
+ u_int keydoff;
+ u_int keyulen;
+ u_int keyflags;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ u_int flags;
+};
+typedef struct __db_key_range_msg __db_key_range_msg;
+
+struct __db_key_range_reply {
+ int status;
+ double less;
+ double equal;
+ double greater;
+};
+typedef struct __db_key_range_reply __db_key_range_reply;
+
+struct __db_lorder_msg {
+ u_int dbpcl_id;
+ u_int lorder;
+};
+typedef struct __db_lorder_msg __db_lorder_msg;
+
+struct __db_lorder_reply {
+ int status;
+};
+typedef struct __db_lorder_reply __db_lorder_reply;
+
+struct __db_open_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ char *name;
+ char *subdb;
+ u_int type;
+ u_int flags;
+ u_int mode;
+};
+typedef struct __db_open_msg __db_open_msg;
+
+struct __db_open_reply {
+ int status;
+ u_int dbcl_id;
+ u_int type;
+ u_int dbflags;
+ u_int lorder;
+};
+typedef struct __db_open_reply __db_open_reply;
+
+struct __db_pagesize_msg {
+ u_int dbpcl_id;
+ u_int pagesize;
+};
+typedef struct __db_pagesize_msg __db_pagesize_msg;
+
+struct __db_pagesize_reply {
+ int status;
+};
+typedef struct __db_pagesize_reply __db_pagesize_reply;
+
+struct __db_pget_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ u_int skeydlen;
+ u_int skeydoff;
+ u_int skeyulen;
+ u_int skeyflags;
+ struct {
+ u_int skeydata_len;
+ char *skeydata_val;
+ } skeydata;
+ u_int pkeydlen;
+ u_int pkeydoff;
+ u_int pkeyulen;
+ u_int pkeyflags;
+ struct {
+ u_int pkeydata_len;
+ char *pkeydata_val;
+ } pkeydata;
+ u_int datadlen;
+ u_int datadoff;
+ u_int dataulen;
+ u_int dataflags;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+ u_int flags;
+};
+typedef struct __db_pget_msg __db_pget_msg;
+
+struct __db_pget_reply {
+ int status;
+ struct {
+ u_int skeydata_len;
+ char *skeydata_val;
+ } skeydata;
+ struct {
+ u_int pkeydata_len;
+ char *pkeydata_val;
+ } pkeydata;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+};
+typedef struct __db_pget_reply __db_pget_reply;
+
+struct __db_put_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ u_int keydlen;
+ u_int keydoff;
+ u_int keyulen;
+ u_int keyflags;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ u_int datadlen;
+ u_int datadoff;
+ u_int dataulen;
+ u_int dataflags;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+ u_int flags;
+};
+typedef struct __db_put_msg __db_put_msg;
+
+struct __db_put_reply {
+ int status;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+};
+typedef struct __db_put_reply __db_put_reply;
+
+struct __db_re_delim_msg {
+ u_int dbpcl_id;
+ u_int delim;
+};
+typedef struct __db_re_delim_msg __db_re_delim_msg;
+
+struct __db_re_delim_reply {
+ int status;
+};
+typedef struct __db_re_delim_reply __db_re_delim_reply;
+
+struct __db_re_len_msg {
+ u_int dbpcl_id;
+ u_int len;
+};
+typedef struct __db_re_len_msg __db_re_len_msg;
+
+struct __db_re_len_reply {
+ int status;
+};
+typedef struct __db_re_len_reply __db_re_len_reply;
+
+struct __db_re_pad_msg {
+ u_int dbpcl_id;
+ u_int pad;
+};
+typedef struct __db_re_pad_msg __db_re_pad_msg;
+
+struct __db_re_pad_reply {
+ int status;
+};
+typedef struct __db_re_pad_reply __db_re_pad_reply;
+
+struct __db_remove_msg {
+ u_int dbpcl_id;
+ char *name;
+ char *subdb;
+ u_int flags;
+};
+typedef struct __db_remove_msg __db_remove_msg;
+
+struct __db_remove_reply {
+ int status;
+};
+typedef struct __db_remove_reply __db_remove_reply;
+
+struct __db_rename_msg {
+ u_int dbpcl_id;
+ char *name;
+ char *subdb;
+ char *newname;
+ u_int flags;
+};
+typedef struct __db_rename_msg __db_rename_msg;
+
+struct __db_rename_reply {
+ int status;
+};
+typedef struct __db_rename_reply __db_rename_reply;
+
+struct __db_stat_msg {
+ u_int dbpcl_id;
+ u_int flags;
+};
+typedef struct __db_stat_msg __db_stat_msg;
+
+struct __db_stat_reply {
+ int status;
+ struct {
+ u_int stats_len;
+ u_int *stats_val;
+ } stats;
+};
+typedef struct __db_stat_reply __db_stat_reply;
+
+struct __db_sync_msg {
+ u_int dbpcl_id;
+ u_int flags;
+};
+typedef struct __db_sync_msg __db_sync_msg;
+
+struct __db_sync_reply {
+ int status;
+};
+typedef struct __db_sync_reply __db_sync_reply;
+
+struct __db_truncate_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ u_int flags;
+};
+typedef struct __db_truncate_msg __db_truncate_msg;
+
+struct __db_truncate_reply {
+ int status;
+ u_int count;
+};
+typedef struct __db_truncate_reply __db_truncate_reply;
+
+struct __db_cursor_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ u_int flags;
+};
+typedef struct __db_cursor_msg __db_cursor_msg;
+
+struct __db_cursor_reply {
+ int status;
+ u_int dbcidcl_id;
+};
+typedef struct __db_cursor_reply __db_cursor_reply;
+
+struct __db_join_msg {
+ u_int dbpcl_id;
+ struct {
+ u_int curs_len;
+ u_int *curs_val;
+ } curs;
+ u_int flags;
+};
+typedef struct __db_join_msg __db_join_msg;
+
+struct __db_join_reply {
+ int status;
+ u_int dbcidcl_id;
+};
+typedef struct __db_join_reply __db_join_reply;
+
+struct __dbc_close_msg {
+ u_int dbccl_id;
+};
+typedef struct __dbc_close_msg __dbc_close_msg;
+
+struct __dbc_close_reply {
+ int status;
+};
+typedef struct __dbc_close_reply __dbc_close_reply;
+
+struct __dbc_count_msg {
+ u_int dbccl_id;
+ u_int flags;
+};
+typedef struct __dbc_count_msg __dbc_count_msg;
+
+struct __dbc_count_reply {
+ int status;
+ u_int dupcount;
+};
+typedef struct __dbc_count_reply __dbc_count_reply;
+
+struct __dbc_del_msg {
+ u_int dbccl_id;
+ u_int flags;
+};
+typedef struct __dbc_del_msg __dbc_del_msg;
+
+struct __dbc_del_reply {
+ int status;
+};
+typedef struct __dbc_del_reply __dbc_del_reply;
+
+struct __dbc_dup_msg {
+ u_int dbccl_id;
+ u_int flags;
+};
+typedef struct __dbc_dup_msg __dbc_dup_msg;
+
+struct __dbc_dup_reply {
+ int status;
+ u_int dbcidcl_id;
+};
+typedef struct __dbc_dup_reply __dbc_dup_reply;
+
+struct __dbc_get_msg {
+ u_int dbccl_id;
+ u_int keydlen;
+ u_int keydoff;
+ u_int keyulen;
+ u_int keyflags;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ u_int datadlen;
+ u_int datadoff;
+ u_int dataulen;
+ u_int dataflags;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+ u_int flags;
+};
+typedef struct __dbc_get_msg __dbc_get_msg;
+
+struct __dbc_get_reply {
+ int status;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+};
+typedef struct __dbc_get_reply __dbc_get_reply;
+
+struct __dbc_pget_msg {
+ u_int dbccl_id;
+ u_int skeydlen;
+ u_int skeydoff;
+ u_int skeyulen;
+ u_int skeyflags;
+ struct {
+ u_int skeydata_len;
+ char *skeydata_val;
+ } skeydata;
+ u_int pkeydlen;
+ u_int pkeydoff;
+ u_int pkeyulen;
+ u_int pkeyflags;
+ struct {
+ u_int pkeydata_len;
+ char *pkeydata_val;
+ } pkeydata;
+ u_int datadlen;
+ u_int datadoff;
+ u_int dataulen;
+ u_int dataflags;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+ u_int flags;
+};
+typedef struct __dbc_pget_msg __dbc_pget_msg;
+
+struct __dbc_pget_reply {
+ int status;
+ struct {
+ u_int skeydata_len;
+ char *skeydata_val;
+ } skeydata;
+ struct {
+ u_int pkeydata_len;
+ char *pkeydata_val;
+ } pkeydata;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+};
+typedef struct __dbc_pget_reply __dbc_pget_reply;
+
+struct __dbc_put_msg {
+ u_int dbccl_id;
+ u_int keydlen;
+ u_int keydoff;
+ u_int keyulen;
+ u_int keyflags;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ u_int datadlen;
+ u_int datadoff;
+ u_int dataulen;
+ u_int dataflags;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+ u_int flags;
+};
+typedef struct __dbc_put_msg __dbc_put_msg;
+
+struct __dbc_put_reply {
+ int status;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+};
+typedef struct __dbc_put_reply __dbc_put_reply;
+
+#define __DB_env_cachesize ((unsigned long)(1))
+extern __env_cachesize_reply * __db_env_cachesize_4001();
+#define __DB_env_close ((unsigned long)(2))
+extern __env_close_reply * __db_env_close_4001();
+#define __DB_env_create ((unsigned long)(3))
+extern __env_create_reply * __db_env_create_4001();
+#define __DB_env_dbremove ((unsigned long)(4))
+extern __env_dbremove_reply * __db_env_dbremove_4001();
+#define __DB_env_dbrename ((unsigned long)(5))
+extern __env_dbrename_reply * __db_env_dbrename_4001();
+#define __DB_env_encrypt ((unsigned long)(6))
+extern __env_encrypt_reply * __db_env_encrypt_4001();
+#define __DB_env_flags ((unsigned long)(7))
+extern __env_flags_reply * __db_env_flags_4001();
+#define __DB_env_open ((unsigned long)(8))
+extern __env_open_reply * __db_env_open_4001();
+#define __DB_env_remove ((unsigned long)(9))
+extern __env_remove_reply * __db_env_remove_4001();
+#define __DB_txn_abort ((unsigned long)(10))
+extern __txn_abort_reply * __db_txn_abort_4001();
+#define __DB_txn_begin ((unsigned long)(11))
+extern __txn_begin_reply * __db_txn_begin_4001();
+#define __DB_txn_commit ((unsigned long)(12))
+extern __txn_commit_reply * __db_txn_commit_4001();
+#define __DB_txn_discard ((unsigned long)(13))
+extern __txn_discard_reply * __db_txn_discard_4001();
+#define __DB_txn_prepare ((unsigned long)(14))
+extern __txn_prepare_reply * __db_txn_prepare_4001();
+#define __DB_txn_recover ((unsigned long)(15))
+extern __txn_recover_reply * __db_txn_recover_4001();
+#define __DB_db_associate ((unsigned long)(16))
+extern __db_associate_reply * __db_db_associate_4001();
+#define __DB_db_bt_maxkey ((unsigned long)(17))
+extern __db_bt_maxkey_reply * __db_db_bt_maxkey_4001();
+#define __DB_db_bt_minkey ((unsigned long)(18))
+extern __db_bt_minkey_reply * __db_db_bt_minkey_4001();
+#define __DB_db_close ((unsigned long)(19))
+extern __db_close_reply * __db_db_close_4001();
+#define __DB_db_create ((unsigned long)(20))
+extern __db_create_reply * __db_db_create_4001();
+#define __DB_db_del ((unsigned long)(21))
+extern __db_del_reply * __db_db_del_4001();
+#define __DB_db_encrypt ((unsigned long)(22))
+extern __db_encrypt_reply * __db_db_encrypt_4001();
+#define __DB_db_extentsize ((unsigned long)(23))
+extern __db_extentsize_reply * __db_db_extentsize_4001();
+#define __DB_db_flags ((unsigned long)(24))
+extern __db_flags_reply * __db_db_flags_4001();
+#define __DB_db_get ((unsigned long)(25))
+extern __db_get_reply * __db_db_get_4001();
+#define __DB_db_h_ffactor ((unsigned long)(26))
+extern __db_h_ffactor_reply * __db_db_h_ffactor_4001();
+#define __DB_db_h_nelem ((unsigned long)(27))
+extern __db_h_nelem_reply * __db_db_h_nelem_4001();
+#define __DB_db_key_range ((unsigned long)(28))
+extern __db_key_range_reply * __db_db_key_range_4001();
+#define __DB_db_lorder ((unsigned long)(29))
+extern __db_lorder_reply * __db_db_lorder_4001();
+#define __DB_db_open ((unsigned long)(30))
+extern __db_open_reply * __db_db_open_4001();
+#define __DB_db_pagesize ((unsigned long)(31))
+extern __db_pagesize_reply * __db_db_pagesize_4001();
+#define __DB_db_pget ((unsigned long)(32))
+extern __db_pget_reply * __db_db_pget_4001();
+#define __DB_db_put ((unsigned long)(33))
+extern __db_put_reply * __db_db_put_4001();
+#define __DB_db_re_delim ((unsigned long)(34))
+extern __db_re_delim_reply * __db_db_re_delim_4001();
+#define __DB_db_re_len ((unsigned long)(35))
+extern __db_re_len_reply * __db_db_re_len_4001();
+#define __DB_db_re_pad ((unsigned long)(36))
+extern __db_re_pad_reply * __db_db_re_pad_4001();
+#define __DB_db_remove ((unsigned long)(37))
+extern __db_remove_reply * __db_db_remove_4001();
+#define __DB_db_rename ((unsigned long)(38))
+extern __db_rename_reply * __db_db_rename_4001();
+#define __DB_db_stat ((unsigned long)(39))
+extern __db_stat_reply * __db_db_stat_4001();
+#define __DB_db_sync ((unsigned long)(40))
+extern __db_sync_reply * __db_db_sync_4001();
+#define __DB_db_truncate ((unsigned long)(41))
+extern __db_truncate_reply * __db_db_truncate_4001();
+#define __DB_db_cursor ((unsigned long)(42))
+extern __db_cursor_reply * __db_db_cursor_4001();
+#define __DB_db_join ((unsigned long)(43))
+extern __db_join_reply * __db_db_join_4001();
+#define __DB_dbc_close ((unsigned long)(44))
+extern __dbc_close_reply * __db_dbc_close_4001();
+#define __DB_dbc_count ((unsigned long)(45))
+extern __dbc_count_reply * __db_dbc_count_4001();
+#define __DB_dbc_del ((unsigned long)(46))
+extern __dbc_del_reply * __db_dbc_del_4001();
+#define __DB_dbc_dup ((unsigned long)(47))
+extern __dbc_dup_reply * __db_dbc_dup_4001();
+#define __DB_dbc_get ((unsigned long)(48))
+extern __dbc_get_reply * __db_dbc_get_4001();
+#define __DB_dbc_pget ((unsigned long)(49))
+extern __dbc_pget_reply * __db_dbc_pget_4001();
+#define __DB_dbc_put ((unsigned long)(50))
+extern __dbc_put_reply * __db_dbc_put_4001();
+extern int db_rpc_serverprog_4001_freeresult();
+
+/* the xdr functions */
+extern bool_t xdr___env_cachesize_msg();
+extern bool_t xdr___env_cachesize_reply();
+extern bool_t xdr___env_close_msg();
+extern bool_t xdr___env_close_reply();
+extern bool_t xdr___env_create_msg();
+extern bool_t xdr___env_create_reply();
+extern bool_t xdr___env_dbremove_msg();
+extern bool_t xdr___env_dbremove_reply();
+extern bool_t xdr___env_dbrename_msg();
+extern bool_t xdr___env_dbrename_reply();
+extern bool_t xdr___env_encrypt_msg();
+extern bool_t xdr___env_encrypt_reply();
+extern bool_t xdr___env_flags_msg();
+extern bool_t xdr___env_flags_reply();
+extern bool_t xdr___env_open_msg();
+extern bool_t xdr___env_open_reply();
+extern bool_t xdr___env_remove_msg();
+extern bool_t xdr___env_remove_reply();
+extern bool_t xdr___txn_abort_msg();
+extern bool_t xdr___txn_abort_reply();
+extern bool_t xdr___txn_begin_msg();
+extern bool_t xdr___txn_begin_reply();
+extern bool_t xdr___txn_commit_msg();
+extern bool_t xdr___txn_commit_reply();
+extern bool_t xdr___txn_discard_msg();
+extern bool_t xdr___txn_discard_reply();
+extern bool_t xdr___txn_prepare_msg();
+extern bool_t xdr___txn_prepare_reply();
+extern bool_t xdr___txn_recover_msg();
+extern bool_t xdr___txn_recover_reply();
+extern bool_t xdr___db_associate_msg();
+extern bool_t xdr___db_associate_reply();
+extern bool_t xdr___db_bt_maxkey_msg();
+extern bool_t xdr___db_bt_maxkey_reply();
+extern bool_t xdr___db_bt_minkey_msg();
+extern bool_t xdr___db_bt_minkey_reply();
+extern bool_t xdr___db_close_msg();
+extern bool_t xdr___db_close_reply();
+extern bool_t xdr___db_create_msg();
+extern bool_t xdr___db_create_reply();
+extern bool_t xdr___db_del_msg();
+extern bool_t xdr___db_del_reply();
+extern bool_t xdr___db_encrypt_msg();
+extern bool_t xdr___db_encrypt_reply();
+extern bool_t xdr___db_extentsize_msg();
+extern bool_t xdr___db_extentsize_reply();
+extern bool_t xdr___db_flags_msg();
+extern bool_t xdr___db_flags_reply();
+extern bool_t xdr___db_get_msg();
+extern bool_t xdr___db_get_reply();
+extern bool_t xdr___db_h_ffactor_msg();
+extern bool_t xdr___db_h_ffactor_reply();
+extern bool_t xdr___db_h_nelem_msg();
+extern bool_t xdr___db_h_nelem_reply();
+extern bool_t xdr___db_key_range_msg();
+extern bool_t xdr___db_key_range_reply();
+extern bool_t xdr___db_lorder_msg();
+extern bool_t xdr___db_lorder_reply();
+extern bool_t xdr___db_open_msg();
+extern bool_t xdr___db_open_reply();
+extern bool_t xdr___db_pagesize_msg();
+extern bool_t xdr___db_pagesize_reply();
+extern bool_t xdr___db_pget_msg();
+extern bool_t xdr___db_pget_reply();
+extern bool_t xdr___db_put_msg();
+extern bool_t xdr___db_put_reply();
+extern bool_t xdr___db_re_delim_msg();
+extern bool_t xdr___db_re_delim_reply();
+extern bool_t xdr___db_re_len_msg();
+extern bool_t xdr___db_re_len_reply();
+extern bool_t xdr___db_re_pad_msg();
+extern bool_t xdr___db_re_pad_reply();
+extern bool_t xdr___db_remove_msg();
+extern bool_t xdr___db_remove_reply();
+extern bool_t xdr___db_rename_msg();
+extern bool_t xdr___db_rename_reply();
+extern bool_t xdr___db_stat_msg();
+extern bool_t xdr___db_stat_reply();
+extern bool_t xdr___db_sync_msg();
+extern bool_t xdr___db_sync_reply();
+extern bool_t xdr___db_truncate_msg();
+extern bool_t xdr___db_truncate_reply();
+extern bool_t xdr___db_cursor_msg();
+extern bool_t xdr___db_cursor_reply();
+extern bool_t xdr___db_join_msg();
+extern bool_t xdr___db_join_reply();
+extern bool_t xdr___dbc_close_msg();
+extern bool_t xdr___dbc_close_reply();
+extern bool_t xdr___dbc_count_msg();
+extern bool_t xdr___dbc_count_reply();
+extern bool_t xdr___dbc_del_msg();
+extern bool_t xdr___dbc_del_reply();
+extern bool_t xdr___dbc_dup_msg();
+extern bool_t xdr___dbc_dup_reply();
+extern bool_t xdr___dbc_get_msg();
+extern bool_t xdr___dbc_get_reply();
+extern bool_t xdr___dbc_pget_msg();
+extern bool_t xdr___dbc_pget_reply();
+extern bool_t xdr___dbc_put_msg();
+extern bool_t xdr___dbc_put_reply();
+
+#endif /* !_DB_SERVER_H_RPCGEN */
diff --git a/libdb/dbinc_auto/dbreg_auto.h b/libdb/dbinc_auto/dbreg_auto.h
new file mode 100644
index 0000000..4d7d4a9
--- /dev/null
+++ b/libdb/dbinc_auto/dbreg_auto.h
@@ -0,0 +1,19 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef __dbreg_AUTO_H
+#define __dbreg_AUTO_H
+#define DB___dbreg_register 2
+typedef struct ___dbreg_register_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ DBT name;
+ DBT uid;
+ int32_t fileid;
+ DBTYPE ftype;
+ db_pgno_t meta_pgno;
+ u_int32_t id;
+} __dbreg_register_args;
+
+#endif
diff --git a/libdb/dbinc_auto/dbreg_ext.h b/libdb/dbinc_auto/dbreg_ext.h
new file mode 100644
index 0000000..eda2620
--- /dev/null
+++ b/libdb/dbinc_auto/dbreg_ext.h
@@ -0,0 +1,43 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _dbreg_ext_h_
+#define _dbreg_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __dbreg_setup __P((DB *, const char *, u_int32_t));
+int __dbreg_teardown __P((DB *));
+int __dbreg_new_id __P((DB *, DB_TXN *));
+int __dbreg_assign_id __P((DB *, int32_t));
+int __dbreg_revoke_id __P((DB *, int));
+int __dbreg_close_id __P((DB *, DB_TXN *));
+int __dbreg_register_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, const DBT *, const DBT *, int32_t, DBTYPE, db_pgno_t, u_int32_t));
+int __dbreg_register_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __dbreg_register_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __dbreg_register_read __P((DB_ENV *, void *, __dbreg_register_args **));
+int __dbreg_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __dbreg_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __dbreg_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __dbreg_register_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __dbreg_add_dbentry __P((DB_ENV *, DB_LOG *, DB *, int32_t));
+void __dbreg_rem_dbentry __P((DB_LOG *, int32_t));
+int __dbreg_open_files __P((DB_ENV *));
+int __dbreg_close_files __P((DB_ENV *));
+int __dbreg_nofiles __P((DB_ENV *));
+int __dbreg_id_to_db __P((DB_ENV *, DB_TXN *, DB **, int32_t, int));
+int __dbreg_id_to_db_int __P((DB_ENV *, DB_TXN *, DB **, int32_t, int, int));
+int __dbreg_id_to_fname __P((DB_LOG *, int32_t, int, FNAME **));
+int __dbreg_fid_to_fname __P((DB_LOG *, u_int8_t *, int, FNAME **));
+int __dbreg_get_name __P((DB_ENV *, u_int8_t *, char **));
+int __dbreg_do_open __P((DB_ENV *, DB_TXN *, DB_LOG *, u_int8_t *, char *, DBTYPE, int32_t, db_pgno_t, void *, u_int32_t));
+int __dbreg_lazy_id __P((DB *));
+int __dbreg_push_id __P((DB_ENV *, int32_t));
+int __dbreg_pop_id __P((DB_ENV *, int32_t *));
+int __dbreg_pluck_id __P((DB_ENV *, int32_t));
+void __dbreg_print_dblist __P((DB_ENV *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_dbreg_ext_h_ */
diff --git a/libdb/dbinc_auto/env_ext.h b/libdb/dbinc_auto/env_ext.h
new file mode 100644
index 0000000..4bd0eee
--- /dev/null
+++ b/libdb/dbinc_auto/env_ext.h
@@ -0,0 +1,39 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _env_ext_h_
+#define _env_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+void __db_shalloc_init __P((void *, size_t));
+int __db_shalloc_size __P((size_t, size_t));
+int __db_shalloc __P((void *, size_t, size_t, void *));
+void __db_shalloc_free __P((void *, void *));
+size_t __db_shsizeof __P((void *));
+void __db_shalloc_dump __P((void *, FILE *));
+int __db_tablesize __P((u_int32_t));
+void __db_hashinit __P((void *, u_int32_t));
+int __db_fileinit __P((DB_ENV *, DB_FH *, size_t, int));
+int __db_overwrite __P((DB_ENV *, const char *));
+int __db_mi_env __P((DB_ENV *, const char *));
+int __db_mi_open __P((DB_ENV *, const char *, int));
+int __db_env_config __P((DB_ENV *, char *, u_int32_t));
+int __dbenv_open __P((DB_ENV *, const char *, u_int32_t, int));
+int __dbenv_remove __P((DB_ENV *, const char *, u_int32_t));
+int __dbenv_close __P((DB_ENV *, u_int32_t));
+int __db_appname __P((DB_ENV *, APPNAME, const char *, u_int32_t, DB_FH *, char **));
+int __db_home __P((DB_ENV *, const char *, u_int32_t));
+int __db_apprec __P((DB_ENV *, DB_LSN *, u_int32_t));
+int __env_openfiles __P((DB_ENV *, DB_LOGC *, void *, DBT *, DB_LSN *, DB_LSN *, double, int));
+int __db_e_attach __P((DB_ENV *, u_int32_t *));
+int __db_e_detach __P((DB_ENV *, int));
+int __db_e_remove __P((DB_ENV *, u_int32_t));
+int __db_e_stat __P((DB_ENV *, REGENV *, REGION *, int *, u_int32_t));
+int __db_r_attach __P((DB_ENV *, REGINFO *, size_t));
+int __db_r_detach __P((DB_ENV *, REGINFO *, int));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_env_ext_h_ */
diff --git a/libdb/dbinc_auto/ext_185_def.in b/libdb/dbinc_auto/ext_185_def.in
new file mode 100644
index 0000000..8da68a8
--- /dev/null
+++ b/libdb/dbinc_auto/ext_185_def.in
@@ -0,0 +1,12 @@
+
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _DB_EXT_185_DEF_IN_
+#define _DB_EXT_185_DEF_IN_
+
+#ifdef _DB185_INT_H_
+#define __db185_open __db185_open@DB_VERSION_UNIQUE_NAME@
+#else
+#define __db185_open __db185_open@DB_VERSION_UNIQUE_NAME@
+#endif
+
+#endif /* !_DB_EXT_185_DEF_IN_ */
diff --git a/libdb/dbinc_auto/ext_185_prot.in b/libdb/dbinc_auto/ext_185_prot.in
new file mode 100644
index 0000000..dfd8d3d
--- /dev/null
+++ b/libdb/dbinc_auto/ext_185_prot.in
@@ -0,0 +1,19 @@
+
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _DB_EXT_185_PROT_IN_
+#define _DB_EXT_185_PROT_IN_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#ifdef _DB185_INT_H_
+DB185 *__db185_open __P((const char *, int, int, DBTYPE, const void *));
+#else
+DB *__db185_open __P((const char *, int, int, DBTYPE, const void *));
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_DB_EXT_185_PROT_IN_ */
diff --git a/libdb/dbinc_auto/ext_def.in b/libdb/dbinc_auto/ext_def.in
new file mode 100644
index 0000000..7bef246
--- /dev/null
+++ b/libdb/dbinc_auto/ext_def.in
@@ -0,0 +1,61 @@
+
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _DB_EXT_DEF_IN_
+#define _DB_EXT_DEF_IN_
+
+#define db_create db_create@DB_VERSION_UNIQUE_NAME@
+#define db_strerror db_strerror@DB_VERSION_UNIQUE_NAME@
+#define db_env_create db_env_create@DB_VERSION_UNIQUE_NAME@
+#define db_version db_version@DB_VERSION_UNIQUE_NAME@
+#define log_compare log_compare@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_close db_env_set_func_close@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_dirfree db_env_set_func_dirfree@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_dirlist db_env_set_func_dirlist@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_exists db_env_set_func_exists@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_free db_env_set_func_free@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_fsync db_env_set_func_fsync@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_ioinfo db_env_set_func_ioinfo@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_malloc db_env_set_func_malloc@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_map db_env_set_func_map@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_open db_env_set_func_open@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_read db_env_set_func_read@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_realloc db_env_set_func_realloc@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_rename db_env_set_func_rename@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_seek db_env_set_func_seek@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_sleep db_env_set_func_sleep@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_unlink db_env_set_func_unlink@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_unmap db_env_set_func_unmap@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_write db_env_set_func_write@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_yield db_env_set_func_yield@DB_VERSION_UNIQUE_NAME@
+#if DB_DBM_HSEARCH != 0
+#define __db_ndbm_clearerr __db_ndbm_clearerr@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_close __db_ndbm_close@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_delete __db_ndbm_delete@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_dirfno __db_ndbm_dirfno@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_error __db_ndbm_error@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_fetch __db_ndbm_fetch@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_firstkey __db_ndbm_firstkey@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_nextkey __db_ndbm_nextkey@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_open __db_ndbm_open@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_pagfno __db_ndbm_pagfno@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_rdonly __db_ndbm_rdonly@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_store __db_ndbm_store@DB_VERSION_UNIQUE_NAME@
+#define __db_dbm_close __db_dbm_close@DB_VERSION_UNIQUE_NAME@
+#define __db_dbm_dbrdonly __db_dbm_dbrdonly@DB_VERSION_UNIQUE_NAME@
+#define __db_dbm_delete __db_dbm_delete@DB_VERSION_UNIQUE_NAME@
+#define __db_dbm_dirf __db_dbm_dirf@DB_VERSION_UNIQUE_NAME@
+#define __db_dbm_fetch __db_dbm_fetch@DB_VERSION_UNIQUE_NAME@
+#define __db_dbm_firstkey __db_dbm_firstkey@DB_VERSION_UNIQUE_NAME@
+#define __db_dbm_init __db_dbm_init@DB_VERSION_UNIQUE_NAME@
+#define __db_dbm_nextkey __db_dbm_nextkey@DB_VERSION_UNIQUE_NAME@
+#define __db_dbm_pagf __db_dbm_pagf@DB_VERSION_UNIQUE_NAME@
+#define __db_dbm_store __db_dbm_store@DB_VERSION_UNIQUE_NAME@
+#endif
+#if DB_DBM_HSEARCH != 0
+#define __db_hcreate __db_hcreate@DB_VERSION_UNIQUE_NAME@
+#define __db_hsearch __db_hsearch@DB_VERSION_UNIQUE_NAME@
+#define __db_hdestroy __db_hdestroy@DB_VERSION_UNIQUE_NAME@
+#endif
+#define db_xa_switch db_xa_switch@DB_VERSION_UNIQUE_NAME@
+
+#endif /* !_DB_EXT_DEF_IN_ */
diff --git a/libdb/dbinc_auto/ext_prot.in b/libdb/dbinc_auto/ext_prot.in
new file mode 100644
index 0000000..42c77a1
--- /dev/null
+++ b/libdb/dbinc_auto/ext_prot.in
@@ -0,0 +1,70 @@
+
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _DB_EXT_PROT_IN_
+#define _DB_EXT_PROT_IN_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int db_create __P((DB **, DB_ENV *, u_int32_t));
+char *db_strerror __P((int));
+int db_env_create __P((DB_ENV **, u_int32_t));
+char *db_version __P((int *, int *, int *));
+int log_compare __P((const DB_LSN *, const DB_LSN *));
+int db_env_set_func_close __P((int (*)(int)));
+int db_env_set_func_dirfree __P((void (*)(char **, int)));
+int db_env_set_func_dirlist __P((int (*)(const char *, char ***, int *)));
+int db_env_set_func_exists __P((int (*)(const char *, int *)));
+int db_env_set_func_free __P((void (*)(void *)));
+int db_env_set_func_fsync __P((int (*)(int)));
+int db_env_set_func_ioinfo __P((int (*)(const char *, int, u_int32_t *, u_int32_t *, u_int32_t *)));
+int db_env_set_func_malloc __P((void *(*)(size_t)));
+int db_env_set_func_map __P((int (*)(char *, size_t, int, int, void **)));
+int db_env_set_func_open __P((int (*)(const char *, int, ...)));
+int db_env_set_func_read __P((ssize_t (*)(int, void *, size_t)));
+int db_env_set_func_realloc __P((void *(*)(void *, size_t)));
+int db_env_set_func_rename __P((int (*)(const char *, const char *)));
+int db_env_set_func_seek __P((int (*)(int, size_t, db_pgno_t, u_int32_t, int, int)));
+int db_env_set_func_sleep __P((int (*)(u_long, u_long)));
+int db_env_set_func_unlink __P((int (*)(const char *)));
+int db_env_set_func_unmap __P((int (*)(void *, size_t)));
+int db_env_set_func_write __P((ssize_t (*)(int, const void *, size_t)));
+int db_env_set_func_yield __P((int (*)(void)));
+int txn_abort __P((DB_TXN *));
+int txn_begin __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
+int txn_commit __P((DB_TXN *, u_int32_t));
+#if DB_DBM_HSEARCH != 0
+int __db_ndbm_clearerr __P((DBM *));
+void __db_ndbm_close __P((DBM *));
+int __db_ndbm_delete __P((DBM *, datum));
+int __db_ndbm_dirfno __P((DBM *));
+int __db_ndbm_error __P((DBM *));
+datum __db_ndbm_fetch __P((DBM *, datum));
+datum __db_ndbm_firstkey __P((DBM *));
+datum __db_ndbm_nextkey __P((DBM *));
+DBM *__db_ndbm_open __P((const char *, int, int));
+int __db_ndbm_pagfno __P((DBM *));
+int __db_ndbm_rdonly __P((DBM *));
+int __db_ndbm_store __P((DBM *, datum, datum, int));
+int __db_dbm_close __P((void));
+int __db_dbm_dbrdonly __P((void));
+int __db_dbm_delete __P((datum));
+int __db_dbm_dirf __P((void));
+datum __db_dbm_fetch __P((datum));
+datum __db_dbm_firstkey __P((void));
+int __db_dbm_init __P((char *));
+datum __db_dbm_nextkey __P((datum));
+int __db_dbm_pagf __P((void));
+int __db_dbm_store __P((datum, datum));
+#endif
+#if DB_DBM_HSEARCH != 0
+int __db_hcreate __P((size_t));
+ENTRY *__db_hsearch __P((ENTRY, ACTION));
+void __db_hdestroy __P((void));
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_DB_EXT_PROT_IN_ */
diff --git a/libdb/dbinc_auto/fileops_auto.h b/libdb/dbinc_auto/fileops_auto.h
new file mode 100644
index 0000000..ee1f586
--- /dev/null
+++ b/libdb/dbinc_auto/fileops_auto.h
@@ -0,0 +1,60 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef __fop_AUTO_H
+#define __fop_AUTO_H
+#define DB___fop_create 143
+typedef struct ___fop_create_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DBT name;
+ u_int32_t appname;
+ u_int32_t mode;
+} __fop_create_args;
+
+#define DB___fop_remove 144
+typedef struct ___fop_remove_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DBT name;
+ DBT fid;
+ u_int32_t appname;
+} __fop_remove_args;
+
+#define DB___fop_write 145
+typedef struct ___fop_write_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DBT name;
+ u_int32_t appname;
+ u_int32_t offset;
+ DBT page;
+ u_int32_t flag;
+} __fop_write_args;
+
+#define DB___fop_rename 146
+typedef struct ___fop_rename_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DBT oldname;
+ DBT newname;
+ DBT fileid;
+ u_int32_t appname;
+} __fop_rename_args;
+
+#define DB___fop_file_remove 141
+typedef struct ___fop_file_remove_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DBT real_fid;
+ DBT tmp_fid;
+ DBT name;
+ u_int32_t appname;
+ u_int32_t child;
+} __fop_file_remove_args;
+
+#endif
diff --git a/libdb/dbinc_auto/fileops_ext.h b/libdb/dbinc_auto/fileops_ext.h
new file mode 100644
index 0000000..5edf7bd
--- /dev/null
+++ b/libdb/dbinc_auto/fileops_ext.h
@@ -0,0 +1,52 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _fileops_ext_h_
+#define _fileops_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __fop_create_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, u_int32_t, u_int32_t));
+int __fop_create_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_create_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_create_read __P((DB_ENV *, void *, __fop_create_args **));
+int __fop_remove_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, const DBT *, u_int32_t));
+int __fop_remove_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_remove_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_remove_read __P((DB_ENV *, void *, __fop_remove_args **));
+int __fop_write_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, u_int32_t, u_int32_t, const DBT *, u_int32_t));
+int __fop_write_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_write_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_write_read __P((DB_ENV *, void *, __fop_write_args **));
+int __fop_rename_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, const DBT *, const DBT *, u_int32_t));
+int __fop_rename_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_rename_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_rename_read __P((DB_ENV *, void *, __fop_rename_args **));
+int __fop_file_remove_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, const DBT *, const DBT *, u_int32_t, u_int32_t));
+int __fop_file_remove_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_file_remove_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_file_remove_read __P((DB_ENV *, void *, __fop_file_remove_args **));
+int __fop_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __fop_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __fop_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __fop_create __P((DB_ENV *, DB_TXN *, DB_FH *, const char *, APPNAME, int));
+int __fop_remove __P((DB_ENV *, DB_TXN *, u_int8_t *, const char *, APPNAME));
+int __fop_write __P((DB_ENV *, DB_TXN *, const char *, APPNAME, DB_FH *, u_int32_t, u_int8_t *, u_int32_t, u_int32_t));
+int __fop_rename __P((DB_ENV *, DB_TXN *, const char *, const char *, u_int8_t *, APPNAME));
+int __fop_create_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_remove_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_write_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_rename_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_file_remove_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_lock_handle __P((DB_ENV *, DB *, u_int32_t, db_lockmode_t, DB_LOCK *, u_int32_t));
+int __fop_file_setup __P((DB *, DB_TXN *, const char *, int, u_int32_t, u_int32_t *));
+int __fop_subdb_setup __P((DB *, DB_TXN *, const char *, const char *, int, u_int32_t));
+int __fop_remove_setup __P((DB *, DB_TXN *, const char *, u_int32_t));
+int __fop_read_meta __P((DB_ENV *, const char *, u_int8_t *, size_t, DB_FH *, int, size_t *, u_int32_t));
+int __fop_dummy __P((DB *, DB_TXN *, const char *, const char *, u_int32_t));
+int __fop_dbrename __P((DB *, const char *, const char *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_fileops_ext_h_ */
diff --git a/libdb/dbinc_auto/hash_auto.h b/libdb/dbinc_auto/hash_auto.h
new file mode 100644
index 0000000..7ec3fb7
--- /dev/null
+++ b/libdb/dbinc_auto/hash_auto.h
@@ -0,0 +1,132 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef __ham_AUTO_H
+#define __ham_AUTO_H
+#define DB___ham_insdel 21
+typedef struct ___ham_insdel_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t pgno;
+ u_int32_t ndx;
+ DB_LSN pagelsn;
+ DBT key;
+ DBT data;
+} __ham_insdel_args;
+
+#define DB___ham_newpage 22
+typedef struct ___ham_newpage_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t prev_pgno;
+ DB_LSN prevlsn;
+ db_pgno_t new_pgno;
+ DB_LSN pagelsn;
+ db_pgno_t next_pgno;
+ DB_LSN nextlsn;
+} __ham_newpage_args;
+
+#define DB___ham_splitdata 24
+typedef struct ___ham_splitdata_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ u_int32_t opcode;
+ db_pgno_t pgno;
+ DBT pageimage;
+ DB_LSN pagelsn;
+} __ham_splitdata_args;
+
+#define DB___ham_replace 25
+typedef struct ___ham_replace_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ u_int32_t ndx;
+ DB_LSN pagelsn;
+ int32_t off;
+ DBT olditem;
+ DBT newitem;
+ u_int32_t makedup;
+} __ham_replace_args;
+
+#define DB___ham_copypage 28
+typedef struct ___ham_copypage_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN pagelsn;
+ db_pgno_t next_pgno;
+ DB_LSN nextlsn;
+ db_pgno_t nnext_pgno;
+ DB_LSN nnextlsn;
+ DBT page;
+} __ham_copypage_args;
+
+#define DB___ham_metagroup 29
+typedef struct ___ham_metagroup_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ u_int32_t bucket;
+ db_pgno_t mmpgno;
+ DB_LSN mmetalsn;
+ db_pgno_t mpgno;
+ DB_LSN metalsn;
+ db_pgno_t pgno;
+ DB_LSN pagelsn;
+ u_int32_t newalloc;
+} __ham_metagroup_args;
+
+#define DB___ham_groupalloc 32
+typedef struct ___ham_groupalloc_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ DB_LSN meta_lsn;
+ db_pgno_t start_pgno;
+ u_int32_t num;
+ db_pgno_t free;
+} __ham_groupalloc_args;
+
+#define DB___ham_curadj 33
+typedef struct ___ham_curadj_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ u_int32_t len;
+ u_int32_t dup_off;
+ int add;
+ int is_dup;
+ u_int32_t order;
+} __ham_curadj_args;
+
+#define DB___ham_chgpg 34
+typedef struct ___ham_chgpg_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_ham_mode mode;
+ db_pgno_t old_pgno;
+ db_pgno_t new_pgno;
+ u_int32_t old_indx;
+ u_int32_t new_indx;
+} __ham_chgpg_args;
+
+#endif
diff --git a/libdb/dbinc_auto/hash_ext.h b/libdb/dbinc_auto/hash_ext.h
new file mode 100644
index 0000000..1ee2398
--- /dev/null
+++ b/libdb/dbinc_auto/hash_ext.h
@@ -0,0 +1,125 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _hash_ext_h_
+#define _hash_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __ham_quick_delete __P((DBC *));
+int __ham_c_init __P((DBC *));
+int __ham_c_count __P((DBC *, db_recno_t *));
+int __ham_c_dup __P((DBC *, DBC *));
+u_int32_t __ham_call_hash __P((DBC *, u_int8_t *, int32_t));
+int __ham_init_dbt __P((DB_ENV *, DBT *, u_int32_t, void **, u_int32_t *));
+int __ham_c_update __P((DBC *, u_int32_t, int, int));
+int __ham_get_clist __P((DB *, db_pgno_t, u_int32_t, DBC ***));
+int __ham_insdel_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, u_int32_t, DB_LSN *, const DBT *, const DBT *));
+int __ham_insdel_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_insdel_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_insdel_read __P((DB_ENV *, void *, __ham_insdel_args **));
+int __ham_newpage_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *));
+int __ham_newpage_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_newpage_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_newpage_read __P((DB_ENV *, void *, __ham_newpage_args **));
+int __ham_splitdata_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, const DBT *, DB_LSN *));
+int __ham_splitdata_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_splitdata_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_splitdata_read __P((DB_ENV *, void *, __ham_splitdata_args **));
+int __ham_replace_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, u_int32_t, DB_LSN *, int32_t, const DBT *, const DBT *, u_int32_t));
+int __ham_replace_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_replace_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_replace_read __P((DB_ENV *, void *, __ham_replace_args **));
+int __ham_copypage_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, const DBT *));
+int __ham_copypage_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_copypage_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_copypage_read __P((DB_ENV *, void *, __ham_copypage_args **));
+int __ham_metagroup_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, u_int32_t));
+int __ham_metagroup_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_metagroup_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_metagroup_read __P((DB_ENV *, void *, __ham_metagroup_args **));
+int __ham_groupalloc_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_pgno_t));
+int __ham_groupalloc_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_groupalloc_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_groupalloc_read __P((DB_ENV *, void *, __ham_groupalloc_args **));
+int __ham_curadj_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, u_int32_t, u_int32_t, u_int32_t, int, int, u_int32_t));
+int __ham_curadj_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_curadj_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_curadj_read __P((DB_ENV *, void *, __ham_curadj_args **));
+int __ham_chgpg_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_ham_mode, db_pgno_t, db_pgno_t, u_int32_t, u_int32_t));
+int __ham_chgpg_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_chgpg_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_chgpg_read __P((DB_ENV *, void *, __ham_chgpg_args **));
+int __ham_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __ham_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __ham_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __ham_pgin __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
+int __ham_pgout __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
+int __ham_mswap __P((void *));
+int __ham_add_dup __P((DBC *, DBT *, u_int32_t, db_pgno_t *));
+int __ham_dup_convert __P((DBC *));
+int __ham_make_dup __P((DB_ENV *, const DBT *, DBT *d, void **, u_int32_t *));
+void __ham_dsearch __P((DBC *, DBT *, u_int32_t *, int *, u_int32_t));
+void __ham_cprint __P((DBC *));
+u_int32_t __ham_func2 __P((DB *, const void *, u_int32_t));
+u_int32_t __ham_func3 __P((DB *, const void *, u_int32_t));
+u_int32_t __ham_func4 __P((DB *, const void *, u_int32_t));
+u_int32_t __ham_func5 __P((DB *, const void *, u_int32_t));
+u_int32_t __ham_test __P((DB *, const void *, u_int32_t));
+int __ham_get_meta __P((DBC *));
+int __ham_release_meta __P((DBC *));
+int __ham_dirty_meta __P((DBC *));
+int __ham_db_create __P((DB *));
+int __ham_db_close __P((DB *));
+int __ham_open __P((DB *, DB_TXN *, const char * name, db_pgno_t, u_int32_t));
+int __ham_metachk __P((DB *, const char *, HMETA *));
+int __ham_new_file __P((DB *, DB_TXN *, DB_FH *, const char *));
+int __ham_new_subdb __P((DB *, DB *, DB_TXN *));
+int __ham_item __P((DBC *, db_lockmode_t, db_pgno_t *));
+int __ham_item_reset __P((DBC *));
+void __ham_item_init __P((DBC *));
+int __ham_item_last __P((DBC *, db_lockmode_t, db_pgno_t *));
+int __ham_item_first __P((DBC *, db_lockmode_t, db_pgno_t *));
+int __ham_item_prev __P((DBC *, db_lockmode_t, db_pgno_t *));
+int __ham_item_next __P((DBC *, db_lockmode_t, db_pgno_t *));
+void __ham_putitem __P((DB *, PAGE *p, const DBT *, int));
+void __ham_reputpair __P((DB *, PAGE *, u_int32_t, const DBT *, const DBT *));
+int __ham_del_pair __P((DBC *, int));
+int __ham_replpair __P((DBC *, DBT *, u_int32_t));
+void __ham_onpage_replace __P((DB *, PAGE *, u_int32_t, int32_t, int32_t, DBT *));
+int __ham_split_page __P((DBC *, u_int32_t, u_int32_t));
+int __ham_add_el __P((DBC *, const DBT *, const DBT *, int));
+void __ham_copy_item __P((DB *, PAGE *, u_int32_t, PAGE *));
+int __ham_add_ovflpage __P((DBC *, PAGE *, int, PAGE **));
+int __ham_get_cpage __P((DBC *, db_lockmode_t));
+int __ham_next_cpage __P((DBC *, db_pgno_t, int));
+int __ham_lock_bucket __P((DBC *, db_lockmode_t));
+void __ham_dpair __P((DB *, PAGE *, u_int32_t));
+int __ham_insdel_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_newpage_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_replace_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_splitdata_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_copypage_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_metagroup_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_groupalloc_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_curadj_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_chgpg_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_reclaim __P((DB *, DB_TXN *txn));
+int __ham_truncate __P((DB *, DB_TXN *txn, u_int32_t *));
+int __ham_stat __P((DB *, void *, u_int32_t));
+int __ham_traverse __P((DBC *, db_lockmode_t, int (*)(DB *, PAGE *, void *, int *), void *, int));
+int __ham_30_hashmeta __P((DB *, char *, u_int8_t *));
+int __ham_30_sizefix __P((DB *, DB_FH *, char *, u_int8_t *));
+int __ham_31_hashmeta __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+int __ham_31_hash __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+int __ham_vrfy_meta __P((DB *, VRFY_DBINFO *, HMETA *, db_pgno_t, u_int32_t));
+int __ham_vrfy __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t));
+int __ham_vrfy_structure __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t));
+int __ham_vrfy_hashing __P((DB *, u_int32_t, HMETA *, u_int32_t, db_pgno_t, u_int32_t, u_int32_t (*) __P((DB *, const void *, u_int32_t))));
+int __ham_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t, PAGE *, void *, int (*)(void *, const void *), u_int32_t));
+int __ham_meta2pgset __P((DB *, VRFY_DBINFO *, HMETA *, u_int32_t, DB *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_hash_ext_h_ */
diff --git a/libdb/dbinc_auto/hmac_ext.h b/libdb/dbinc_auto/hmac_ext.h
new file mode 100644
index 0000000..d161a72
--- /dev/null
+++ b/libdb/dbinc_auto/hmac_ext.h
@@ -0,0 +1,20 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _hmac_ext_h_
+#define _hmac_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+void __db_chksum __P((u_int8_t *, size_t, u_int8_t *, u_int8_t *));
+void __db_derive_mac __P((u_int8_t *, size_t, u_int8_t *));
+int __db_check_chksum __P((DB_ENV *, DB_CIPHER *, u_int8_t *, void *, size_t, int));
+void __db_SHA1Transform __P((u_int32_t *, unsigned char *));
+void __db_SHA1Init __P((SHA1_CTX *));
+void __db_SHA1Update __P((SHA1_CTX *, unsigned char *, size_t));
+void __db_SHA1Final __P((unsigned char *, SHA1_CTX *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_hmac_ext_h_ */
diff --git a/libdb/dbinc_auto/int_def.in b/libdb/dbinc_auto/int_def.in
new file mode 100644
index 0000000..e75e191
--- /dev/null
+++ b/libdb/dbinc_auto/int_def.in
@@ -0,0 +1,1329 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _DB_INT_DEF_IN_
+#define _DB_INT_DEF_IN_
+
+#define __crdel_metasub_log __crdel_metasub_log@DB_VERSION_UNIQUE_NAME@
+#define __crdel_metasub_getpgnos __crdel_metasub_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __crdel_metasub_print __crdel_metasub_print@DB_VERSION_UNIQUE_NAME@
+#define __crdel_metasub_read __crdel_metasub_read@DB_VERSION_UNIQUE_NAME@
+#define __crdel_init_print __crdel_init_print@DB_VERSION_UNIQUE_NAME@
+#define __crdel_init_getpgnos __crdel_init_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __crdel_init_recover __crdel_init_recover@DB_VERSION_UNIQUE_NAME@
+#define __crdel_metasub_recover __crdel_metasub_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_master_open __db_master_open@DB_VERSION_UNIQUE_NAME@
+#define __db_master_update __db_master_update@DB_VERSION_UNIQUE_NAME@
+#define __db_dbenv_setup __db_dbenv_setup@DB_VERSION_UNIQUE_NAME@
+#define __db_close __db_close@DB_VERSION_UNIQUE_NAME@
+#define __db_close_i __db_close_i@DB_VERSION_UNIQUE_NAME@
+#define __db_refresh __db_refresh@DB_VERSION_UNIQUE_NAME@
+#define __db_log_page __db_log_page@DB_VERSION_UNIQUE_NAME@
+#define __db_backup_name __db_backup_name@DB_VERSION_UNIQUE_NAME@
+#define __dblist_get __dblist_get@DB_VERSION_UNIQUE_NAME@
+#if CONFIG_TEST
+#define __db_testcopy __db_testcopy@DB_VERSION_UNIQUE_NAME@
+#endif
+#define __db_cursor __db_cursor@DB_VERSION_UNIQUE_NAME@
+#define __db_icursor __db_icursor@DB_VERSION_UNIQUE_NAME@
+#define __db_cprint __db_cprint@DB_VERSION_UNIQUE_NAME@
+#define __db_fd __db_fd@DB_VERSION_UNIQUE_NAME@
+#define __db_get __db_get@DB_VERSION_UNIQUE_NAME@
+#define __db_put __db_put@DB_VERSION_UNIQUE_NAME@
+#define __db_delete __db_delete@DB_VERSION_UNIQUE_NAME@
+#define __db_sync __db_sync@DB_VERSION_UNIQUE_NAME@
+#define __db_associate __db_associate@DB_VERSION_UNIQUE_NAME@
+#define __db_pget __db_pget@DB_VERSION_UNIQUE_NAME@
+#define __db_addrem_log __db_addrem_log@DB_VERSION_UNIQUE_NAME@
+#define __db_addrem_getpgnos __db_addrem_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __db_addrem_print __db_addrem_print@DB_VERSION_UNIQUE_NAME@
+#define __db_addrem_read __db_addrem_read@DB_VERSION_UNIQUE_NAME@
+#define __db_big_log __db_big_log@DB_VERSION_UNIQUE_NAME@
+#define __db_big_getpgnos __db_big_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __db_big_print __db_big_print@DB_VERSION_UNIQUE_NAME@
+#define __db_big_read __db_big_read@DB_VERSION_UNIQUE_NAME@
+#define __db_ovref_log __db_ovref_log@DB_VERSION_UNIQUE_NAME@
+#define __db_ovref_getpgnos __db_ovref_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __db_ovref_print __db_ovref_print@DB_VERSION_UNIQUE_NAME@
+#define __db_ovref_read __db_ovref_read@DB_VERSION_UNIQUE_NAME@
+#define __db_relink_log __db_relink_log@DB_VERSION_UNIQUE_NAME@
+#define __db_relink_getpgnos __db_relink_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __db_relink_print __db_relink_print@DB_VERSION_UNIQUE_NAME@
+#define __db_relink_read __db_relink_read@DB_VERSION_UNIQUE_NAME@
+#define __db_debug_log __db_debug_log@DB_VERSION_UNIQUE_NAME@
+#define __db_debug_getpgnos __db_debug_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __db_debug_print __db_debug_print@DB_VERSION_UNIQUE_NAME@
+#define __db_debug_read __db_debug_read@DB_VERSION_UNIQUE_NAME@
+#define __db_noop_log __db_noop_log@DB_VERSION_UNIQUE_NAME@
+#define __db_noop_getpgnos __db_noop_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __db_noop_print __db_noop_print@DB_VERSION_UNIQUE_NAME@
+#define __db_noop_read __db_noop_read@DB_VERSION_UNIQUE_NAME@
+#define __db_pg_alloc_log __db_pg_alloc_log@DB_VERSION_UNIQUE_NAME@
+#define __db_pg_alloc_getpgnos __db_pg_alloc_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __db_pg_alloc_print __db_pg_alloc_print@DB_VERSION_UNIQUE_NAME@
+#define __db_pg_alloc_read __db_pg_alloc_read@DB_VERSION_UNIQUE_NAME@
+#define __db_pg_free_log __db_pg_free_log@DB_VERSION_UNIQUE_NAME@
+#define __db_pg_free_getpgnos __db_pg_free_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __db_pg_free_print __db_pg_free_print@DB_VERSION_UNIQUE_NAME@
+#define __db_pg_free_read __db_pg_free_read@DB_VERSION_UNIQUE_NAME@
+#define __db_cksum_log __db_cksum_log@DB_VERSION_UNIQUE_NAME@
+#define __db_cksum_getpgnos __db_cksum_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __db_cksum_print __db_cksum_print@DB_VERSION_UNIQUE_NAME@
+#define __db_cksum_read __db_cksum_read@DB_VERSION_UNIQUE_NAME@
+#define __db_init_print __db_init_print@DB_VERSION_UNIQUE_NAME@
+#define __db_init_getpgnos __db_init_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __db_init_recover __db_init_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_c_close __db_c_close@DB_VERSION_UNIQUE_NAME@
+#define __db_c_destroy __db_c_destroy@DB_VERSION_UNIQUE_NAME@
+#define __db_c_count __db_c_count@DB_VERSION_UNIQUE_NAME@
+#define __db_c_del __db_c_del@DB_VERSION_UNIQUE_NAME@
+#define __db_c_dup __db_c_dup@DB_VERSION_UNIQUE_NAME@
+#define __db_c_idup __db_c_idup@DB_VERSION_UNIQUE_NAME@
+#define __db_c_newopd __db_c_newopd@DB_VERSION_UNIQUE_NAME@
+#define __db_c_get __db_c_get@DB_VERSION_UNIQUE_NAME@
+#define __db_c_put __db_c_put@DB_VERSION_UNIQUE_NAME@
+#define __db_duperr __db_duperr@DB_VERSION_UNIQUE_NAME@
+#define __db_c_secondary_get __db_c_secondary_get@DB_VERSION_UNIQUE_NAME@
+#define __db_c_pget __db_c_pget@DB_VERSION_UNIQUE_NAME@
+#define __db_c_del_primary __db_c_del_primary@DB_VERSION_UNIQUE_NAME@
+#define __db_s_first __db_s_first@DB_VERSION_UNIQUE_NAME@
+#define __db_s_next __db_s_next@DB_VERSION_UNIQUE_NAME@
+#define __db_s_done __db_s_done@DB_VERSION_UNIQUE_NAME@
+#define __db_partsize __db_partsize@DB_VERSION_UNIQUE_NAME@
+#define __db_pgin __db_pgin@DB_VERSION_UNIQUE_NAME@
+#define __db_pgout __db_pgout@DB_VERSION_UNIQUE_NAME@
+#define __db_metaswap __db_metaswap@DB_VERSION_UNIQUE_NAME@
+#define __db_byteswap __db_byteswap@DB_VERSION_UNIQUE_NAME@
+#define __db_dispatch __db_dispatch@DB_VERSION_UNIQUE_NAME@
+#define __db_add_recovery __db_add_recovery@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_init __db_txnlist_init@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_add __db_txnlist_add@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_remove __db_txnlist_remove@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_ckp __db_txnlist_ckp@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_end __db_txnlist_end@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_find __db_txnlist_find@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_update __db_txnlist_update@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_gen __db_txnlist_gen@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_lsnadd __db_txnlist_lsnadd@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_lsninit __db_txnlist_lsninit@DB_VERSION_UNIQUE_NAME@
+#define __db_add_limbo __db_add_limbo@DB_VERSION_UNIQUE_NAME@
+#define __db_do_the_limbo __db_do_the_limbo@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_print __db_txnlist_print@DB_VERSION_UNIQUE_NAME@
+#define __db_ditem __db_ditem@DB_VERSION_UNIQUE_NAME@
+#define __db_pitem __db_pitem@DB_VERSION_UNIQUE_NAME@
+#define __db_relink __db_relink@DB_VERSION_UNIQUE_NAME@
+#define __db_cursorchk __db_cursorchk@DB_VERSION_UNIQUE_NAME@
+#define __db_ccountchk __db_ccountchk@DB_VERSION_UNIQUE_NAME@
+#define __db_cdelchk __db_cdelchk@DB_VERSION_UNIQUE_NAME@
+#define __db_cgetchk __db_cgetchk@DB_VERSION_UNIQUE_NAME@
+#define __db_cputchk __db_cputchk@DB_VERSION_UNIQUE_NAME@
+#define __db_pgetchk __db_pgetchk@DB_VERSION_UNIQUE_NAME@
+#define __db_cpgetchk __db_cpgetchk@DB_VERSION_UNIQUE_NAME@
+#define __db_delchk __db_delchk@DB_VERSION_UNIQUE_NAME@
+#define __db_getchk __db_getchk@DB_VERSION_UNIQUE_NAME@
+#define __db_joinchk __db_joinchk@DB_VERSION_UNIQUE_NAME@
+#define __db_joingetchk __db_joingetchk@DB_VERSION_UNIQUE_NAME@
+#define __db_putchk __db_putchk@DB_VERSION_UNIQUE_NAME@
+#define __db_statchk __db_statchk@DB_VERSION_UNIQUE_NAME@
+#define __db_syncchk __db_syncchk@DB_VERSION_UNIQUE_NAME@
+#define __db_secondary_corrupt __db_secondary_corrupt@DB_VERSION_UNIQUE_NAME@
+#define __db_associatechk __db_associatechk@DB_VERSION_UNIQUE_NAME@
+#define __db_txn_auto __db_txn_auto@DB_VERSION_UNIQUE_NAME@
+#define __db_join __db_join@DB_VERSION_UNIQUE_NAME@
+#define __db_new __db_new@DB_VERSION_UNIQUE_NAME@
+#define __db_free __db_free@DB_VERSION_UNIQUE_NAME@
+#define __db_lprint __db_lprint@DB_VERSION_UNIQUE_NAME@
+#define __db_lget __db_lget@DB_VERSION_UNIQUE_NAME@
+#define __db_lput __db_lput@DB_VERSION_UNIQUE_NAME@
+#define __dbh_am_chk __dbh_am_chk@DB_VERSION_UNIQUE_NAME@
+#define __db_set_lorder __db_set_lorder@DB_VERSION_UNIQUE_NAME@
+#define __db_open __db_open@DB_VERSION_UNIQUE_NAME@
+#define __db_dbopen __db_dbopen@DB_VERSION_UNIQUE_NAME@
+#define __db_new_file __db_new_file@DB_VERSION_UNIQUE_NAME@
+#define __db_init_subdb __db_init_subdb@DB_VERSION_UNIQUE_NAME@
+#define __db_chk_meta __db_chk_meta@DB_VERSION_UNIQUE_NAME@
+#define __db_meta_setup __db_meta_setup@DB_VERSION_UNIQUE_NAME@
+#define __db_goff __db_goff@DB_VERSION_UNIQUE_NAME@
+#define __db_poff __db_poff@DB_VERSION_UNIQUE_NAME@
+#define __db_ovref __db_ovref@DB_VERSION_UNIQUE_NAME@
+#define __db_doff __db_doff@DB_VERSION_UNIQUE_NAME@
+#define __db_moff __db_moff@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_overflow __db_vrfy_overflow@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_ovfl_structure __db_vrfy_ovfl_structure@DB_VERSION_UNIQUE_NAME@
+#define __db_safe_goff __db_safe_goff@DB_VERSION_UNIQUE_NAME@
+#define __db_loadme __db_loadme@DB_VERSION_UNIQUE_NAME@
+#define __db_dump __db_dump@DB_VERSION_UNIQUE_NAME@
+#define __db_inmemdbflags __db_inmemdbflags@DB_VERSION_UNIQUE_NAME@
+#define __db_prnpage __db_prnpage@DB_VERSION_UNIQUE_NAME@
+#define __db_prpage __db_prpage@DB_VERSION_UNIQUE_NAME@
+#define __db_pr __db_pr@DB_VERSION_UNIQUE_NAME@
+#define __db_prdbt __db_prdbt@DB_VERSION_UNIQUE_NAME@
+#define __db_prflags __db_prflags@DB_VERSION_UNIQUE_NAME@
+#define __db_dbtype_to_string __db_dbtype_to_string@DB_VERSION_UNIQUE_NAME@
+#define __db_prheader __db_prheader@DB_VERSION_UNIQUE_NAME@
+#define __db_prfooter __db_prfooter@DB_VERSION_UNIQUE_NAME@
+#define __db_addrem_recover __db_addrem_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_big_recover __db_big_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_ovref_recover __db_ovref_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_relink_recover __db_relink_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_debug_recover __db_debug_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_noop_recover __db_noop_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_pg_alloc_recover __db_pg_alloc_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_pg_free_recover __db_pg_free_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_cksum_recover __db_cksum_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_traverse_big __db_traverse_big@DB_VERSION_UNIQUE_NAME@
+#define __db_reclaim_callback __db_reclaim_callback@DB_VERSION_UNIQUE_NAME@
+#define __db_truncate_callback __db_truncate_callback@DB_VERSION_UNIQUE_NAME@
+#define __dbenv_dbremove __dbenv_dbremove@DB_VERSION_UNIQUE_NAME@
+#define __db_remove __db_remove@DB_VERSION_UNIQUE_NAME@
+#define __db_remove_i __db_remove_i@DB_VERSION_UNIQUE_NAME@
+#define __dbenv_dbrename __dbenv_dbrename@DB_VERSION_UNIQUE_NAME@
+#define __db_rename __db_rename@DB_VERSION_UNIQUE_NAME@
+#define __db_rename_i __db_rename_i@DB_VERSION_UNIQUE_NAME@
+#define __db_ret __db_ret@DB_VERSION_UNIQUE_NAME@
+#define __db_retcopy __db_retcopy@DB_VERSION_UNIQUE_NAME@
+#define __db_truncate __db_truncate@DB_VERSION_UNIQUE_NAME@
+#define __db_upgrade __db_upgrade@DB_VERSION_UNIQUE_NAME@
+#define __db_lastpgno __db_lastpgno@DB_VERSION_UNIQUE_NAME@
+#define __db_31_offdup __db_31_offdup@DB_VERSION_UNIQUE_NAME@
+#define __db_verify __db_verify@DB_VERSION_UNIQUE_NAME@
+#define __db_verify_callback __db_verify_callback@DB_VERSION_UNIQUE_NAME@
+#define __db_verify_internal __db_verify_internal@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_datapage __db_vrfy_datapage@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_meta __db_vrfy_meta@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_struct_feedback __db_vrfy_struct_feedback@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_inpitem __db_vrfy_inpitem@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_duptype __db_vrfy_duptype@DB_VERSION_UNIQUE_NAME@
+#define __db_salvage_duptree __db_salvage_duptree@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_dbinfo_create __db_vrfy_dbinfo_create@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_dbinfo_destroy __db_vrfy_dbinfo_destroy@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_getpageinfo __db_vrfy_getpageinfo@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_putpageinfo __db_vrfy_putpageinfo@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_pgset __db_vrfy_pgset@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_pgset_get __db_vrfy_pgset_get@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_pgset_inc __db_vrfy_pgset_inc@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_pgset_dec __db_vrfy_pgset_dec@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_pgset_next __db_vrfy_pgset_next@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_childcursor __db_vrfy_childcursor@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_childput __db_vrfy_childput@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_ccset __db_vrfy_ccset@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_ccnext __db_vrfy_ccnext@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_ccclose __db_vrfy_ccclose@DB_VERSION_UNIQUE_NAME@
+#define __db_salvage_init __db_salvage_init@DB_VERSION_UNIQUE_NAME@
+#define __db_salvage_destroy __db_salvage_destroy@DB_VERSION_UNIQUE_NAME@
+#define __db_salvage_getnext __db_salvage_getnext@DB_VERSION_UNIQUE_NAME@
+#define __db_salvage_isdone __db_salvage_isdone@DB_VERSION_UNIQUE_NAME@
+#define __db_salvage_markdone __db_salvage_markdone@DB_VERSION_UNIQUE_NAME@
+#define __db_salvage_markneeded __db_salvage_markneeded@DB_VERSION_UNIQUE_NAME@
+#define __bam_cmp __bam_cmp@DB_VERSION_UNIQUE_NAME@
+#define __bam_defcmp __bam_defcmp@DB_VERSION_UNIQUE_NAME@
+#define __bam_defpfx __bam_defpfx@DB_VERSION_UNIQUE_NAME@
+#define __bam_pgin __bam_pgin@DB_VERSION_UNIQUE_NAME@
+#define __bam_pgout __bam_pgout@DB_VERSION_UNIQUE_NAME@
+#define __bam_mswap __bam_mswap@DB_VERSION_UNIQUE_NAME@
+#define __bam_cprint __bam_cprint@DB_VERSION_UNIQUE_NAME@
+#define __bam_ca_delete __bam_ca_delete@DB_VERSION_UNIQUE_NAME@
+#define __ram_ca_delete __ram_ca_delete@DB_VERSION_UNIQUE_NAME@
+#define __bam_ca_di __bam_ca_di@DB_VERSION_UNIQUE_NAME@
+#define __bam_ca_dup __bam_ca_dup@DB_VERSION_UNIQUE_NAME@
+#define __bam_ca_undodup __bam_ca_undodup@DB_VERSION_UNIQUE_NAME@
+#define __bam_ca_rsplit __bam_ca_rsplit@DB_VERSION_UNIQUE_NAME@
+#define __bam_ca_split __bam_ca_split@DB_VERSION_UNIQUE_NAME@
+#define __bam_ca_undosplit __bam_ca_undosplit@DB_VERSION_UNIQUE_NAME@
+#define __bam_c_init __bam_c_init@DB_VERSION_UNIQUE_NAME@
+#define __bam_c_refresh __bam_c_refresh@DB_VERSION_UNIQUE_NAME@
+#define __bam_c_count __bam_c_count@DB_VERSION_UNIQUE_NAME@
+#define __bam_c_dup __bam_c_dup@DB_VERSION_UNIQUE_NAME@
+#define __bam_bulk_overflow __bam_bulk_overflow@DB_VERSION_UNIQUE_NAME@
+#define __bam_bulk_duplicates __bam_bulk_duplicates@DB_VERSION_UNIQUE_NAME@
+#define __bam_c_rget __bam_c_rget@DB_VERSION_UNIQUE_NAME@
+#define __bam_ditem __bam_ditem@DB_VERSION_UNIQUE_NAME@
+#define __bam_adjindx __bam_adjindx@DB_VERSION_UNIQUE_NAME@
+#define __bam_dpages __bam_dpages@DB_VERSION_UNIQUE_NAME@
+#define __bam_db_create __bam_db_create@DB_VERSION_UNIQUE_NAME@
+#define __bam_db_close __bam_db_close@DB_VERSION_UNIQUE_NAME@
+#define __bam_set_flags __bam_set_flags@DB_VERSION_UNIQUE_NAME@
+#define __ram_set_flags __ram_set_flags@DB_VERSION_UNIQUE_NAME@
+#define __bam_open __bam_open@DB_VERSION_UNIQUE_NAME@
+#define __bam_metachk __bam_metachk@DB_VERSION_UNIQUE_NAME@
+#define __bam_read_root __bam_read_root@DB_VERSION_UNIQUE_NAME@
+#define __bam_new_file __bam_new_file@DB_VERSION_UNIQUE_NAME@
+#define __bam_new_subdb __bam_new_subdb@DB_VERSION_UNIQUE_NAME@
+#define __bam_iitem __bam_iitem@DB_VERSION_UNIQUE_NAME@
+#define __bam_ritem __bam_ritem@DB_VERSION_UNIQUE_NAME@
+#define __bam_split_recover __bam_split_recover@DB_VERSION_UNIQUE_NAME@
+#define __bam_rsplit_recover __bam_rsplit_recover@DB_VERSION_UNIQUE_NAME@
+#define __bam_adj_recover __bam_adj_recover@DB_VERSION_UNIQUE_NAME@
+#define __bam_cadjust_recover __bam_cadjust_recover@DB_VERSION_UNIQUE_NAME@
+#define __bam_cdel_recover __bam_cdel_recover@DB_VERSION_UNIQUE_NAME@
+#define __bam_repl_recover __bam_repl_recover@DB_VERSION_UNIQUE_NAME@
+#define __bam_root_recover __bam_root_recover@DB_VERSION_UNIQUE_NAME@
+#define __bam_curadj_recover __bam_curadj_recover@DB_VERSION_UNIQUE_NAME@
+#define __bam_rcuradj_recover __bam_rcuradj_recover@DB_VERSION_UNIQUE_NAME@
+#define __bam_reclaim __bam_reclaim@DB_VERSION_UNIQUE_NAME@
+#define __bam_truncate __bam_truncate@DB_VERSION_UNIQUE_NAME@
+#define __ram_open __ram_open@DB_VERSION_UNIQUE_NAME@
+#define __ram_append __ram_append@DB_VERSION_UNIQUE_NAME@
+#define __ram_c_del __ram_c_del@DB_VERSION_UNIQUE_NAME@
+#define __ram_c_get __ram_c_get@DB_VERSION_UNIQUE_NAME@
+#define __ram_c_put __ram_c_put@DB_VERSION_UNIQUE_NAME@
+#define __ram_ca __ram_ca@DB_VERSION_UNIQUE_NAME@
+#define __ram_getno __ram_getno@DB_VERSION_UNIQUE_NAME@
+#define __ram_writeback __ram_writeback@DB_VERSION_UNIQUE_NAME@
+#define __bam_rsearch __bam_rsearch@DB_VERSION_UNIQUE_NAME@
+#define __bam_adjust __bam_adjust@DB_VERSION_UNIQUE_NAME@
+#define __bam_nrecs __bam_nrecs@DB_VERSION_UNIQUE_NAME@
+#define __bam_total __bam_total@DB_VERSION_UNIQUE_NAME@
+#define __bam_search __bam_search@DB_VERSION_UNIQUE_NAME@
+#define __bam_stkrel __bam_stkrel@DB_VERSION_UNIQUE_NAME@
+#define __bam_stkgrow __bam_stkgrow@DB_VERSION_UNIQUE_NAME@
+#define __bam_split __bam_split@DB_VERSION_UNIQUE_NAME@
+#define __bam_copy __bam_copy@DB_VERSION_UNIQUE_NAME@
+#define __bam_stat __bam_stat@DB_VERSION_UNIQUE_NAME@
+#define __bam_traverse __bam_traverse@DB_VERSION_UNIQUE_NAME@
+#define __bam_stat_callback __bam_stat_callback@DB_VERSION_UNIQUE_NAME@
+#define __bam_key_range __bam_key_range@DB_VERSION_UNIQUE_NAME@
+#define __bam_30_btreemeta __bam_30_btreemeta@DB_VERSION_UNIQUE_NAME@
+#define __bam_31_btreemeta __bam_31_btreemeta@DB_VERSION_UNIQUE_NAME@
+#define __bam_31_lbtree __bam_31_lbtree@DB_VERSION_UNIQUE_NAME@
+#define __bam_vrfy_meta __bam_vrfy_meta@DB_VERSION_UNIQUE_NAME@
+#define __ram_vrfy_leaf __ram_vrfy_leaf@DB_VERSION_UNIQUE_NAME@
+#define __bam_vrfy __bam_vrfy@DB_VERSION_UNIQUE_NAME@
+#define __bam_vrfy_itemorder __bam_vrfy_itemorder@DB_VERSION_UNIQUE_NAME@
+#define __bam_vrfy_structure __bam_vrfy_structure@DB_VERSION_UNIQUE_NAME@
+#define __bam_vrfy_subtree __bam_vrfy_subtree@DB_VERSION_UNIQUE_NAME@
+#define __bam_salvage __bam_salvage@DB_VERSION_UNIQUE_NAME@
+#define __bam_salvage_walkdupint __bam_salvage_walkdupint@DB_VERSION_UNIQUE_NAME@
+#define __bam_meta2pgset __bam_meta2pgset@DB_VERSION_UNIQUE_NAME@
+#define __bam_split_log __bam_split_log@DB_VERSION_UNIQUE_NAME@
+#define __bam_split_getpgnos __bam_split_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __bam_split_print __bam_split_print@DB_VERSION_UNIQUE_NAME@
+#define __bam_split_read __bam_split_read@DB_VERSION_UNIQUE_NAME@
+#define __bam_rsplit_log __bam_rsplit_log@DB_VERSION_UNIQUE_NAME@
+#define __bam_rsplit_getpgnos __bam_rsplit_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __bam_rsplit_print __bam_rsplit_print@DB_VERSION_UNIQUE_NAME@
+#define __bam_rsplit_read __bam_rsplit_read@DB_VERSION_UNIQUE_NAME@
+#define __bam_adj_log __bam_adj_log@DB_VERSION_UNIQUE_NAME@
+#define __bam_adj_getpgnos __bam_adj_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __bam_adj_print __bam_adj_print@DB_VERSION_UNIQUE_NAME@
+#define __bam_adj_read __bam_adj_read@DB_VERSION_UNIQUE_NAME@
+#define __bam_cadjust_log __bam_cadjust_log@DB_VERSION_UNIQUE_NAME@
+#define __bam_cadjust_getpgnos __bam_cadjust_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __bam_cadjust_print __bam_cadjust_print@DB_VERSION_UNIQUE_NAME@
+#define __bam_cadjust_read __bam_cadjust_read@DB_VERSION_UNIQUE_NAME@
+#define __bam_cdel_log __bam_cdel_log@DB_VERSION_UNIQUE_NAME@
+#define __bam_cdel_getpgnos __bam_cdel_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __bam_cdel_print __bam_cdel_print@DB_VERSION_UNIQUE_NAME@
+#define __bam_cdel_read __bam_cdel_read@DB_VERSION_UNIQUE_NAME@
+#define __bam_repl_log __bam_repl_log@DB_VERSION_UNIQUE_NAME@
+#define __bam_repl_getpgnos __bam_repl_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __bam_repl_print __bam_repl_print@DB_VERSION_UNIQUE_NAME@
+#define __bam_repl_read __bam_repl_read@DB_VERSION_UNIQUE_NAME@
+#define __bam_root_log __bam_root_log@DB_VERSION_UNIQUE_NAME@
+#define __bam_root_getpgnos __bam_root_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __bam_root_print __bam_root_print@DB_VERSION_UNIQUE_NAME@
+#define __bam_root_read __bam_root_read@DB_VERSION_UNIQUE_NAME@
+#define __bam_curadj_log __bam_curadj_log@DB_VERSION_UNIQUE_NAME@
+#define __bam_curadj_getpgnos __bam_curadj_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __bam_curadj_print __bam_curadj_print@DB_VERSION_UNIQUE_NAME@
+#define __bam_curadj_read __bam_curadj_read@DB_VERSION_UNIQUE_NAME@
+#define __bam_rcuradj_log __bam_rcuradj_log@DB_VERSION_UNIQUE_NAME@
+#define __bam_rcuradj_getpgnos __bam_rcuradj_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __bam_rcuradj_print __bam_rcuradj_print@DB_VERSION_UNIQUE_NAME@
+#define __bam_rcuradj_read __bam_rcuradj_read@DB_VERSION_UNIQUE_NAME@
+#define __bam_init_print __bam_init_print@DB_VERSION_UNIQUE_NAME@
+#define __bam_init_getpgnos __bam_init_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __bam_init_recover __bam_init_recover@DB_VERSION_UNIQUE_NAME@
+#ifndef HAVE_GETCWD
+#define getcwd getcwd@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_GETOPT
+#define getopt getopt@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_MEMCMP
+#define memcmp memcmp@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_MEMCPY
+#define memcpy memcpy@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_MEMMOVE
+#define memmove memmove@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_RAISE
+#define raise raise@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_SNPRINTF
+#define snprintf snprintf@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_STRCASECMP
+#define strcasecmp strcasecmp@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_STRCASECMP
+#define strncasecmp strncasecmp@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_STRDUP
+#define strdup strdup@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_STRERROR
+#define strerror strerror@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_VSNPRINTF
+#define vsnprintf vsnprintf@DB_VERSION_UNIQUE_NAME@
+#endif
+#define __db_isbigendian __db_isbigendian@DB_VERSION_UNIQUE_NAME@
+#define __db_byteorder __db_byteorder@DB_VERSION_UNIQUE_NAME@
+#define __db_fchk __db_fchk@DB_VERSION_UNIQUE_NAME@
+#define __db_fcchk __db_fcchk@DB_VERSION_UNIQUE_NAME@
+#define __db_ferr __db_ferr@DB_VERSION_UNIQUE_NAME@
+#define __db_pgerr __db_pgerr@DB_VERSION_UNIQUE_NAME@
+#define __db_pgfmt __db_pgfmt@DB_VERSION_UNIQUE_NAME@
+#define __db_eopnotsup __db_eopnotsup@DB_VERSION_UNIQUE_NAME@
+#ifdef DIAGNOSTIC
+#define __db_assert __db_assert@DB_VERSION_UNIQUE_NAME@
+#endif
+#define __db_panic_msg __db_panic_msg@DB_VERSION_UNIQUE_NAME@
+#define __db_panic __db_panic@DB_VERSION_UNIQUE_NAME@
+#define __db_err __db_err@DB_VERSION_UNIQUE_NAME@
+#define __db_errcall __db_errcall@DB_VERSION_UNIQUE_NAME@
+#define __db_errfile __db_errfile@DB_VERSION_UNIQUE_NAME@
+#define __db_logmsg __db_logmsg@DB_VERSION_UNIQUE_NAME@
+#define __db_unknown_flag __db_unknown_flag@DB_VERSION_UNIQUE_NAME@
+#define __db_unknown_type __db_unknown_type@DB_VERSION_UNIQUE_NAME@
+#define __db_check_txn __db_check_txn@DB_VERSION_UNIQUE_NAME@
+#define __db_not_txn_env __db_not_txn_env@DB_VERSION_UNIQUE_NAME@
+#define __db_getlong __db_getlong@DB_VERSION_UNIQUE_NAME@
+#define __db_getulong __db_getulong@DB_VERSION_UNIQUE_NAME@
+#define __db_idspace __db_idspace@DB_VERSION_UNIQUE_NAME@
+#define __db_log2 __db_log2@DB_VERSION_UNIQUE_NAME@
+#define __db_util_arg __db_util_arg@DB_VERSION_UNIQUE_NAME@
+#define __db_util_cache __db_util_cache@DB_VERSION_UNIQUE_NAME@
+#define __db_util_logset __db_util_logset@DB_VERSION_UNIQUE_NAME@
+#define __db_util_siginit __db_util_siginit@DB_VERSION_UNIQUE_NAME@
+#define __db_util_interrupted __db_util_interrupted@DB_VERSION_UNIQUE_NAME@
+#define __db_util_sigresend __db_util_sigresend@DB_VERSION_UNIQUE_NAME@
+#define __aes_setup __aes_setup@DB_VERSION_UNIQUE_NAME@
+#define __aes_adj_size __aes_adj_size@DB_VERSION_UNIQUE_NAME@
+#define __aes_close __aes_close@DB_VERSION_UNIQUE_NAME@
+#define __aes_decrypt __aes_decrypt@DB_VERSION_UNIQUE_NAME@
+#define __aes_encrypt __aes_encrypt@DB_VERSION_UNIQUE_NAME@
+#define __aes_init __aes_init@DB_VERSION_UNIQUE_NAME@
+#define __crypto_region_init __crypto_region_init@DB_VERSION_UNIQUE_NAME@
+#define __crypto_dbenv_close __crypto_dbenv_close@DB_VERSION_UNIQUE_NAME@
+#define __crypto_algsetup __crypto_algsetup@DB_VERSION_UNIQUE_NAME@
+#define __crypto_decrypt_meta __crypto_decrypt_meta@DB_VERSION_UNIQUE_NAME@
+#define __db_generate_iv __db_generate_iv@DB_VERSION_UNIQUE_NAME@
+#define __db_rijndaelKeySetupEnc __db_rijndaelKeySetupEnc@DB_VERSION_UNIQUE_NAME@
+#define __db_rijndaelKeySetupDec __db_rijndaelKeySetupDec@DB_VERSION_UNIQUE_NAME@
+#define __db_rijndaelEncrypt __db_rijndaelEncrypt@DB_VERSION_UNIQUE_NAME@
+#define __db_rijndaelDecrypt __db_rijndaelDecrypt@DB_VERSION_UNIQUE_NAME@
+#define __db_rijndaelEncryptRound __db_rijndaelEncryptRound@DB_VERSION_UNIQUE_NAME@
+#define __db_rijndaelDecryptRound __db_rijndaelDecryptRound@DB_VERSION_UNIQUE_NAME@
+#define __db_makeKey __db_makeKey@DB_VERSION_UNIQUE_NAME@
+#define __db_cipherInit __db_cipherInit@DB_VERSION_UNIQUE_NAME@
+#define __db_blockEncrypt __db_blockEncrypt@DB_VERSION_UNIQUE_NAME@
+#define __db_padEncrypt __db_padEncrypt@DB_VERSION_UNIQUE_NAME@
+#define __db_blockDecrypt __db_blockDecrypt@DB_VERSION_UNIQUE_NAME@
+#define __db_padDecrypt __db_padDecrypt@DB_VERSION_UNIQUE_NAME@
+#define __db_cipherUpdateRounds __db_cipherUpdateRounds@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_setup __dbreg_setup@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_teardown __dbreg_teardown@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_new_id __dbreg_new_id@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_assign_id __dbreg_assign_id@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_revoke_id __dbreg_revoke_id@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_close_id __dbreg_close_id@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_register_log __dbreg_register_log@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_register_getpgnos __dbreg_register_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_register_print __dbreg_register_print@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_register_read __dbreg_register_read@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_init_print __dbreg_init_print@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_init_getpgnos __dbreg_init_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_init_recover __dbreg_init_recover@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_register_recover __dbreg_register_recover@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_add_dbentry __dbreg_add_dbentry@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_rem_dbentry __dbreg_rem_dbentry@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_open_files __dbreg_open_files@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_close_files __dbreg_close_files@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_nofiles __dbreg_nofiles@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_id_to_db __dbreg_id_to_db@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_id_to_db_int __dbreg_id_to_db_int@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_id_to_fname __dbreg_id_to_fname@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_fid_to_fname __dbreg_fid_to_fname@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_get_name __dbreg_get_name@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_do_open __dbreg_do_open@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_lazy_id __dbreg_lazy_id@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_push_id __dbreg_push_id@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_pop_id __dbreg_pop_id@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_pluck_id __dbreg_pluck_id@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_print_dblist __dbreg_print_dblist@DB_VERSION_UNIQUE_NAME@
+#define __db_shalloc_init __db_shalloc_init@DB_VERSION_UNIQUE_NAME@
+#define __db_shalloc_size __db_shalloc_size@DB_VERSION_UNIQUE_NAME@
+#define __db_shalloc __db_shalloc@DB_VERSION_UNIQUE_NAME@
+#define __db_shalloc_free __db_shalloc_free@DB_VERSION_UNIQUE_NAME@
+#define __db_shsizeof __db_shsizeof@DB_VERSION_UNIQUE_NAME@
+#define __db_shalloc_dump __db_shalloc_dump@DB_VERSION_UNIQUE_NAME@
+#define __db_tablesize __db_tablesize@DB_VERSION_UNIQUE_NAME@
+#define __db_hashinit __db_hashinit@DB_VERSION_UNIQUE_NAME@
+#define __db_fileinit __db_fileinit@DB_VERSION_UNIQUE_NAME@
+#define __db_overwrite __db_overwrite@DB_VERSION_UNIQUE_NAME@
+#define __db_mi_env __db_mi_env@DB_VERSION_UNIQUE_NAME@
+#define __db_mi_open __db_mi_open@DB_VERSION_UNIQUE_NAME@
+#define __db_env_config __db_env_config@DB_VERSION_UNIQUE_NAME@
+#define __dbenv_open __dbenv_open@DB_VERSION_UNIQUE_NAME@
+#define __dbenv_remove __dbenv_remove@DB_VERSION_UNIQUE_NAME@
+#define __dbenv_close __dbenv_close@DB_VERSION_UNIQUE_NAME@
+#define __db_appname __db_appname@DB_VERSION_UNIQUE_NAME@
+#define __db_home __db_home@DB_VERSION_UNIQUE_NAME@
+#define __db_apprec __db_apprec@DB_VERSION_UNIQUE_NAME@
+#define __env_openfiles __env_openfiles@DB_VERSION_UNIQUE_NAME@
+#define __db_e_attach __db_e_attach@DB_VERSION_UNIQUE_NAME@
+#define __db_e_detach __db_e_detach@DB_VERSION_UNIQUE_NAME@
+#define __db_e_remove __db_e_remove@DB_VERSION_UNIQUE_NAME@
+#define __db_e_stat __db_e_stat@DB_VERSION_UNIQUE_NAME@
+#define __db_r_attach __db_r_attach@DB_VERSION_UNIQUE_NAME@
+#define __db_r_detach __db_r_detach@DB_VERSION_UNIQUE_NAME@
+#define __fop_create_log __fop_create_log@DB_VERSION_UNIQUE_NAME@
+#define __fop_create_getpgnos __fop_create_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __fop_create_print __fop_create_print@DB_VERSION_UNIQUE_NAME@
+#define __fop_create_read __fop_create_read@DB_VERSION_UNIQUE_NAME@
+#define __fop_remove_log __fop_remove_log@DB_VERSION_UNIQUE_NAME@
+#define __fop_remove_getpgnos __fop_remove_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __fop_remove_print __fop_remove_print@DB_VERSION_UNIQUE_NAME@
+#define __fop_remove_read __fop_remove_read@DB_VERSION_UNIQUE_NAME@
+#define __fop_write_log __fop_write_log@DB_VERSION_UNIQUE_NAME@
+#define __fop_write_getpgnos __fop_write_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __fop_write_print __fop_write_print@DB_VERSION_UNIQUE_NAME@
+#define __fop_write_read __fop_write_read@DB_VERSION_UNIQUE_NAME@
+#define __fop_rename_log __fop_rename_log@DB_VERSION_UNIQUE_NAME@
+#define __fop_rename_getpgnos __fop_rename_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __fop_rename_print __fop_rename_print@DB_VERSION_UNIQUE_NAME@
+#define __fop_rename_read __fop_rename_read@DB_VERSION_UNIQUE_NAME@
+#define __fop_file_remove_log __fop_file_remove_log@DB_VERSION_UNIQUE_NAME@
+#define __fop_file_remove_getpgnos __fop_file_remove_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __fop_file_remove_print __fop_file_remove_print@DB_VERSION_UNIQUE_NAME@
+#define __fop_file_remove_read __fop_file_remove_read@DB_VERSION_UNIQUE_NAME@
+#define __fop_init_print __fop_init_print@DB_VERSION_UNIQUE_NAME@
+#define __fop_init_getpgnos __fop_init_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __fop_init_recover __fop_init_recover@DB_VERSION_UNIQUE_NAME@
+#define __fop_create __fop_create@DB_VERSION_UNIQUE_NAME@
+#define __fop_remove __fop_remove@DB_VERSION_UNIQUE_NAME@
+#define __fop_write __fop_write@DB_VERSION_UNIQUE_NAME@
+#define __fop_rename __fop_rename@DB_VERSION_UNIQUE_NAME@
+#define __fop_create_recover __fop_create_recover@DB_VERSION_UNIQUE_NAME@
+#define __fop_remove_recover __fop_remove_recover@DB_VERSION_UNIQUE_NAME@
+#define __fop_write_recover __fop_write_recover@DB_VERSION_UNIQUE_NAME@
+#define __fop_rename_recover __fop_rename_recover@DB_VERSION_UNIQUE_NAME@
+#define __fop_file_remove_recover __fop_file_remove_recover@DB_VERSION_UNIQUE_NAME@
+#define __fop_lock_handle __fop_lock_handle@DB_VERSION_UNIQUE_NAME@
+#define __fop_file_setup __fop_file_setup@DB_VERSION_UNIQUE_NAME@
+#define __fop_subdb_setup __fop_subdb_setup@DB_VERSION_UNIQUE_NAME@
+#define __fop_remove_setup __fop_remove_setup@DB_VERSION_UNIQUE_NAME@
+#define __fop_read_meta __fop_read_meta@DB_VERSION_UNIQUE_NAME@
+#define __fop_dummy __fop_dummy@DB_VERSION_UNIQUE_NAME@
+#define __fop_dbrename __fop_dbrename@DB_VERSION_UNIQUE_NAME@
+#define __ham_quick_delete __ham_quick_delete@DB_VERSION_UNIQUE_NAME@
+#define __ham_c_init __ham_c_init@DB_VERSION_UNIQUE_NAME@
+#define __ham_c_count __ham_c_count@DB_VERSION_UNIQUE_NAME@
+#define __ham_c_dup __ham_c_dup@DB_VERSION_UNIQUE_NAME@
+#define __ham_call_hash __ham_call_hash@DB_VERSION_UNIQUE_NAME@
+#define __ham_init_dbt __ham_init_dbt@DB_VERSION_UNIQUE_NAME@
+#define __ham_c_update __ham_c_update@DB_VERSION_UNIQUE_NAME@
+#define __ham_get_clist __ham_get_clist@DB_VERSION_UNIQUE_NAME@
+#define __ham_insdel_log __ham_insdel_log@DB_VERSION_UNIQUE_NAME@
+#define __ham_insdel_getpgnos __ham_insdel_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __ham_insdel_print __ham_insdel_print@DB_VERSION_UNIQUE_NAME@
+#define __ham_insdel_read __ham_insdel_read@DB_VERSION_UNIQUE_NAME@
+#define __ham_newpage_log __ham_newpage_log@DB_VERSION_UNIQUE_NAME@
+#define __ham_newpage_getpgnos __ham_newpage_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __ham_newpage_print __ham_newpage_print@DB_VERSION_UNIQUE_NAME@
+#define __ham_newpage_read __ham_newpage_read@DB_VERSION_UNIQUE_NAME@
+#define __ham_splitdata_log __ham_splitdata_log@DB_VERSION_UNIQUE_NAME@
+#define __ham_splitdata_getpgnos __ham_splitdata_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __ham_splitdata_print __ham_splitdata_print@DB_VERSION_UNIQUE_NAME@
+#define __ham_splitdata_read __ham_splitdata_read@DB_VERSION_UNIQUE_NAME@
+#define __ham_replace_log __ham_replace_log@DB_VERSION_UNIQUE_NAME@
+#define __ham_replace_getpgnos __ham_replace_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __ham_replace_print __ham_replace_print@DB_VERSION_UNIQUE_NAME@
+#define __ham_replace_read __ham_replace_read@DB_VERSION_UNIQUE_NAME@
+#define __ham_copypage_log __ham_copypage_log@DB_VERSION_UNIQUE_NAME@
+#define __ham_copypage_getpgnos __ham_copypage_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __ham_copypage_print __ham_copypage_print@DB_VERSION_UNIQUE_NAME@
+#define __ham_copypage_read __ham_copypage_read@DB_VERSION_UNIQUE_NAME@
+#define __ham_metagroup_log __ham_metagroup_log@DB_VERSION_UNIQUE_NAME@
+#define __ham_metagroup_getpgnos __ham_metagroup_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __ham_metagroup_print __ham_metagroup_print@DB_VERSION_UNIQUE_NAME@
+#define __ham_metagroup_read __ham_metagroup_read@DB_VERSION_UNIQUE_NAME@
+#define __ham_groupalloc_log __ham_groupalloc_log@DB_VERSION_UNIQUE_NAME@
+#define __ham_groupalloc_getpgnos __ham_groupalloc_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __ham_groupalloc_print __ham_groupalloc_print@DB_VERSION_UNIQUE_NAME@
+#define __ham_groupalloc_read __ham_groupalloc_read@DB_VERSION_UNIQUE_NAME@
+#define __ham_curadj_log __ham_curadj_log@DB_VERSION_UNIQUE_NAME@
+#define __ham_curadj_getpgnos __ham_curadj_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __ham_curadj_print __ham_curadj_print@DB_VERSION_UNIQUE_NAME@
+#define __ham_curadj_read __ham_curadj_read@DB_VERSION_UNIQUE_NAME@
+#define __ham_chgpg_log __ham_chgpg_log@DB_VERSION_UNIQUE_NAME@
+#define __ham_chgpg_getpgnos __ham_chgpg_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __ham_chgpg_print __ham_chgpg_print@DB_VERSION_UNIQUE_NAME@
+#define __ham_chgpg_read __ham_chgpg_read@DB_VERSION_UNIQUE_NAME@
+#define __ham_init_print __ham_init_print@DB_VERSION_UNIQUE_NAME@
+#define __ham_init_getpgnos __ham_init_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __ham_init_recover __ham_init_recover@DB_VERSION_UNIQUE_NAME@
+#define __ham_pgin __ham_pgin@DB_VERSION_UNIQUE_NAME@
+#define __ham_pgout __ham_pgout@DB_VERSION_UNIQUE_NAME@
+#define __ham_mswap __ham_mswap@DB_VERSION_UNIQUE_NAME@
+#define __ham_add_dup __ham_add_dup@DB_VERSION_UNIQUE_NAME@
+#define __ham_dup_convert __ham_dup_convert@DB_VERSION_UNIQUE_NAME@
+#define __ham_make_dup __ham_make_dup@DB_VERSION_UNIQUE_NAME@
+#define __ham_dsearch __ham_dsearch@DB_VERSION_UNIQUE_NAME@
+#define __ham_cprint __ham_cprint@DB_VERSION_UNIQUE_NAME@
+#define __ham_func2 __ham_func2@DB_VERSION_UNIQUE_NAME@
+#define __ham_func3 __ham_func3@DB_VERSION_UNIQUE_NAME@
+#define __ham_func4 __ham_func4@DB_VERSION_UNIQUE_NAME@
+#define __ham_func5 __ham_func5@DB_VERSION_UNIQUE_NAME@
+#define __ham_test __ham_test@DB_VERSION_UNIQUE_NAME@
+#define __ham_get_meta __ham_get_meta@DB_VERSION_UNIQUE_NAME@
+#define __ham_release_meta __ham_release_meta@DB_VERSION_UNIQUE_NAME@
+#define __ham_dirty_meta __ham_dirty_meta@DB_VERSION_UNIQUE_NAME@
+#define __ham_db_create __ham_db_create@DB_VERSION_UNIQUE_NAME@
+#define __ham_db_close __ham_db_close@DB_VERSION_UNIQUE_NAME@
+#define __ham_open __ham_open@DB_VERSION_UNIQUE_NAME@
+#define __ham_metachk __ham_metachk@DB_VERSION_UNIQUE_NAME@
+#define __ham_new_file __ham_new_file@DB_VERSION_UNIQUE_NAME@
+#define __ham_new_subdb __ham_new_subdb@DB_VERSION_UNIQUE_NAME@
+#define __ham_item __ham_item@DB_VERSION_UNIQUE_NAME@
+#define __ham_item_reset __ham_item_reset@DB_VERSION_UNIQUE_NAME@
+#define __ham_item_init __ham_item_init@DB_VERSION_UNIQUE_NAME@
+#define __ham_item_last __ham_item_last@DB_VERSION_UNIQUE_NAME@
+#define __ham_item_first __ham_item_first@DB_VERSION_UNIQUE_NAME@
+#define __ham_item_prev __ham_item_prev@DB_VERSION_UNIQUE_NAME@
+#define __ham_item_next __ham_item_next@DB_VERSION_UNIQUE_NAME@
+#define __ham_putitem __ham_putitem@DB_VERSION_UNIQUE_NAME@
+#define __ham_reputpair __ham_reputpair@DB_VERSION_UNIQUE_NAME@
+#define __ham_del_pair __ham_del_pair@DB_VERSION_UNIQUE_NAME@
+#define __ham_replpair __ham_replpair@DB_VERSION_UNIQUE_NAME@
+#define __ham_onpage_replace __ham_onpage_replace@DB_VERSION_UNIQUE_NAME@
+#define __ham_split_page __ham_split_page@DB_VERSION_UNIQUE_NAME@
+#define __ham_add_el __ham_add_el@DB_VERSION_UNIQUE_NAME@
+#define __ham_copy_item __ham_copy_item@DB_VERSION_UNIQUE_NAME@
+#define __ham_add_ovflpage __ham_add_ovflpage@DB_VERSION_UNIQUE_NAME@
+#define __ham_get_cpage __ham_get_cpage@DB_VERSION_UNIQUE_NAME@
+#define __ham_next_cpage __ham_next_cpage@DB_VERSION_UNIQUE_NAME@
+#define __ham_lock_bucket __ham_lock_bucket@DB_VERSION_UNIQUE_NAME@
+#define __ham_dpair __ham_dpair@DB_VERSION_UNIQUE_NAME@
+#define __ham_insdel_recover __ham_insdel_recover@DB_VERSION_UNIQUE_NAME@
+#define __ham_newpage_recover __ham_newpage_recover@DB_VERSION_UNIQUE_NAME@
+#define __ham_replace_recover __ham_replace_recover@DB_VERSION_UNIQUE_NAME@
+#define __ham_splitdata_recover __ham_splitdata_recover@DB_VERSION_UNIQUE_NAME@
+#define __ham_copypage_recover __ham_copypage_recover@DB_VERSION_UNIQUE_NAME@
+#define __ham_metagroup_recover __ham_metagroup_recover@DB_VERSION_UNIQUE_NAME@
+#define __ham_groupalloc_recover __ham_groupalloc_recover@DB_VERSION_UNIQUE_NAME@
+#define __ham_curadj_recover __ham_curadj_recover@DB_VERSION_UNIQUE_NAME@
+#define __ham_chgpg_recover __ham_chgpg_recover@DB_VERSION_UNIQUE_NAME@
+#define __ham_reclaim __ham_reclaim@DB_VERSION_UNIQUE_NAME@
+#define __ham_truncate __ham_truncate@DB_VERSION_UNIQUE_NAME@
+#define __ham_stat __ham_stat@DB_VERSION_UNIQUE_NAME@
+#define __ham_traverse __ham_traverse@DB_VERSION_UNIQUE_NAME@
+#define __ham_30_hashmeta __ham_30_hashmeta@DB_VERSION_UNIQUE_NAME@
+#define __ham_30_sizefix __ham_30_sizefix@DB_VERSION_UNIQUE_NAME@
+#define __ham_31_hashmeta __ham_31_hashmeta@DB_VERSION_UNIQUE_NAME@
+#define __ham_31_hash __ham_31_hash@DB_VERSION_UNIQUE_NAME@
+#define __ham_vrfy_meta __ham_vrfy_meta@DB_VERSION_UNIQUE_NAME@
+#define __ham_vrfy __ham_vrfy@DB_VERSION_UNIQUE_NAME@
+#define __ham_vrfy_structure __ham_vrfy_structure@DB_VERSION_UNIQUE_NAME@
+#define __ham_vrfy_hashing __ham_vrfy_hashing@DB_VERSION_UNIQUE_NAME@
+#define __ham_salvage __ham_salvage@DB_VERSION_UNIQUE_NAME@
+#define __ham_meta2pgset __ham_meta2pgset@DB_VERSION_UNIQUE_NAME@
+#define __db_chksum __db_chksum@DB_VERSION_UNIQUE_NAME@
+#define __db_derive_mac __db_derive_mac@DB_VERSION_UNIQUE_NAME@
+#define __db_check_chksum __db_check_chksum@DB_VERSION_UNIQUE_NAME@
+#define __db_SHA1Transform __db_SHA1Transform@DB_VERSION_UNIQUE_NAME@
+#define __db_SHA1Init __db_SHA1Init@DB_VERSION_UNIQUE_NAME@
+#define __db_SHA1Update __db_SHA1Update@DB_VERSION_UNIQUE_NAME@
+#define __db_SHA1Final __db_SHA1Final@DB_VERSION_UNIQUE_NAME@
+#define __lock_id __lock_id@DB_VERSION_UNIQUE_NAME@
+#define __lock_id_free __lock_id_free@DB_VERSION_UNIQUE_NAME@
+#define __lock_vec __lock_vec@DB_VERSION_UNIQUE_NAME@
+#define __lock_get __lock_get@DB_VERSION_UNIQUE_NAME@
+#define __lock_put __lock_put@DB_VERSION_UNIQUE_NAME@
+#define __lock_downgrade __lock_downgrade@DB_VERSION_UNIQUE_NAME@
+#define __lock_addfamilylocker __lock_addfamilylocker@DB_VERSION_UNIQUE_NAME@
+#define __lock_freefamilylocker __lock_freefamilylocker@DB_VERSION_UNIQUE_NAME@
+#define __lock_set_timeout __lock_set_timeout@DB_VERSION_UNIQUE_NAME@
+#define __lock_inherit_timeout __lock_inherit_timeout@DB_VERSION_UNIQUE_NAME@
+#define __lock_getlocker __lock_getlocker@DB_VERSION_UNIQUE_NAME@
+#define __lock_promote __lock_promote@DB_VERSION_UNIQUE_NAME@
+#define __lock_expired __lock_expired@DB_VERSION_UNIQUE_NAME@
+#define __lock_detect __lock_detect@DB_VERSION_UNIQUE_NAME@
+#define __lock_dbenv_create __lock_dbenv_create@DB_VERSION_UNIQUE_NAME@
+#define __lock_dbenv_close __lock_dbenv_close@DB_VERSION_UNIQUE_NAME@
+#define __lock_open __lock_open@DB_VERSION_UNIQUE_NAME@
+#define __lock_dbenv_refresh __lock_dbenv_refresh@DB_VERSION_UNIQUE_NAME@
+#define __lock_region_destroy __lock_region_destroy@DB_VERSION_UNIQUE_NAME@
+#define __lock_id_set __lock_id_set@DB_VERSION_UNIQUE_NAME@
+#define __lock_stat __lock_stat@DB_VERSION_UNIQUE_NAME@
+#define __lock_dump_region __lock_dump_region@DB_VERSION_UNIQUE_NAME@
+#define __lock_printlock __lock_printlock@DB_VERSION_UNIQUE_NAME@
+#define __lock_cmp __lock_cmp@DB_VERSION_UNIQUE_NAME@
+#define __lock_locker_cmp __lock_locker_cmp@DB_VERSION_UNIQUE_NAME@
+#define __lock_ohash __lock_ohash@DB_VERSION_UNIQUE_NAME@
+#define __lock_lhash __lock_lhash@DB_VERSION_UNIQUE_NAME@
+#define __lock_locker_hash __lock_locker_hash@DB_VERSION_UNIQUE_NAME@
+#define __log_open __log_open@DB_VERSION_UNIQUE_NAME@
+#define __log_find __log_find@DB_VERSION_UNIQUE_NAME@
+#define __log_valid __log_valid@DB_VERSION_UNIQUE_NAME@
+#define __log_dbenv_refresh __log_dbenv_refresh@DB_VERSION_UNIQUE_NAME@
+#define __log_stat __log_stat@DB_VERSION_UNIQUE_NAME@
+#define __log_get_cached_ckp_lsn __log_get_cached_ckp_lsn@DB_VERSION_UNIQUE_NAME@
+#define __log_region_destroy __log_region_destroy@DB_VERSION_UNIQUE_NAME@
+#define __log_vtruncate __log_vtruncate@DB_VERSION_UNIQUE_NAME@
+#define __log_is_outdated __log_is_outdated@DB_VERSION_UNIQUE_NAME@
+#define __log_archive __log_archive@DB_VERSION_UNIQUE_NAME@
+#define __log_cursor __log_cursor@DB_VERSION_UNIQUE_NAME@
+#define __log_dbenv_create __log_dbenv_create@DB_VERSION_UNIQUE_NAME@
+#define __log_put __log_put@DB_VERSION_UNIQUE_NAME@
+#define __log_txn_lsn __log_txn_lsn@DB_VERSION_UNIQUE_NAME@
+#define __log_newfile __log_newfile@DB_VERSION_UNIQUE_NAME@
+#define __log_flush __log_flush@DB_VERSION_UNIQUE_NAME@
+#define __log_file __log_file@DB_VERSION_UNIQUE_NAME@
+#define __log_name __log_name@DB_VERSION_UNIQUE_NAME@
+#define __log_rep_put __log_rep_put@DB_VERSION_UNIQUE_NAME@
+#define __memp_alloc __memp_alloc@DB_VERSION_UNIQUE_NAME@
+#ifdef DIAGNOSTIC
+#define __memp_check_order __memp_check_order@DB_VERSION_UNIQUE_NAME@
+#endif
+#define __memp_bhwrite __memp_bhwrite@DB_VERSION_UNIQUE_NAME@
+#define __memp_pgread __memp_pgread@DB_VERSION_UNIQUE_NAME@
+#define __memp_pg __memp_pg@DB_VERSION_UNIQUE_NAME@
+#define __memp_bhfree __memp_bhfree@DB_VERSION_UNIQUE_NAME@
+#define __memp_fget __memp_fget@DB_VERSION_UNIQUE_NAME@
+#define __memp_fcreate __memp_fcreate@DB_VERSION_UNIQUE_NAME@
+#define __memp_fopen_int __memp_fopen_int@DB_VERSION_UNIQUE_NAME@
+#define __memp_fclose_int __memp_fclose_int@DB_VERSION_UNIQUE_NAME@
+#define __memp_mf_discard __memp_mf_discard@DB_VERSION_UNIQUE_NAME@
+#define __memp_fn __memp_fn@DB_VERSION_UNIQUE_NAME@
+#define __memp_fns __memp_fns@DB_VERSION_UNIQUE_NAME@
+#define __memp_fput __memp_fput@DB_VERSION_UNIQUE_NAME@
+#define __memp_fset __memp_fset@DB_VERSION_UNIQUE_NAME@
+#define __memp_dbenv_create __memp_dbenv_create@DB_VERSION_UNIQUE_NAME@
+#define __memp_open __memp_open@DB_VERSION_UNIQUE_NAME@
+#define __memp_dbenv_refresh __memp_dbenv_refresh@DB_VERSION_UNIQUE_NAME@
+#define __mpool_region_destroy __mpool_region_destroy@DB_VERSION_UNIQUE_NAME@
+#define __memp_nameop __memp_nameop@DB_VERSION_UNIQUE_NAME@
+#define __memp_register __memp_register@DB_VERSION_UNIQUE_NAME@
+#define __memp_stat __memp_stat@DB_VERSION_UNIQUE_NAME@
+#define __memp_dump_region __memp_dump_region@DB_VERSION_UNIQUE_NAME@
+#define __memp_stat_hash __memp_stat_hash@DB_VERSION_UNIQUE_NAME@
+#define __memp_sync __memp_sync@DB_VERSION_UNIQUE_NAME@
+#define __memp_fsync __memp_fsync@DB_VERSION_UNIQUE_NAME@
+#define __mp_xxx_fh __mp_xxx_fh@DB_VERSION_UNIQUE_NAME@
+#define __memp_sync_int __memp_sync_int@DB_VERSION_UNIQUE_NAME@
+#define __memp_trickle __memp_trickle@DB_VERSION_UNIQUE_NAME@
+#define __db_fcntl_mutex_init __db_fcntl_mutex_init@DB_VERSION_UNIQUE_NAME@
+#define __db_fcntl_mutex_lock __db_fcntl_mutex_lock@DB_VERSION_UNIQUE_NAME@
+#define __db_fcntl_mutex_unlock __db_fcntl_mutex_unlock@DB_VERSION_UNIQUE_NAME@
+#define __db_fcntl_mutex_destroy __db_fcntl_mutex_destroy@DB_VERSION_UNIQUE_NAME@
+#define __db_pthread_mutex_init __db_pthread_mutex_init@DB_VERSION_UNIQUE_NAME@
+#define __db_pthread_mutex_lock __db_pthread_mutex_lock@DB_VERSION_UNIQUE_NAME@
+#define __db_pthread_mutex_unlock __db_pthread_mutex_unlock@DB_VERSION_UNIQUE_NAME@
+#define __db_pthread_mutex_destroy __db_pthread_mutex_destroy@DB_VERSION_UNIQUE_NAME@
+#define __db_tas_mutex_init __db_tas_mutex_init@DB_VERSION_UNIQUE_NAME@
+#define __db_tas_mutex_lock __db_tas_mutex_lock@DB_VERSION_UNIQUE_NAME@
+#define __db_tas_mutex_unlock __db_tas_mutex_unlock@DB_VERSION_UNIQUE_NAME@
+#define __db_tas_mutex_destroy __db_tas_mutex_destroy@DB_VERSION_UNIQUE_NAME@
+#define __db_win32_mutex_init __db_win32_mutex_init@DB_VERSION_UNIQUE_NAME@
+#define __db_win32_mutex_lock __db_win32_mutex_lock@DB_VERSION_UNIQUE_NAME@
+#define __db_win32_mutex_unlock __db_win32_mutex_unlock@DB_VERSION_UNIQUE_NAME@
+#define __db_win32_mutex_destroy __db_win32_mutex_destroy@DB_VERSION_UNIQUE_NAME@
+#define __db_mutex_setup __db_mutex_setup@DB_VERSION_UNIQUE_NAME@
+#define __db_mutex_free __db_mutex_free@DB_VERSION_UNIQUE_NAME@
+#define __db_shreg_locks_clear __db_shreg_locks_clear@DB_VERSION_UNIQUE_NAME@
+#define __db_shreg_locks_destroy __db_shreg_locks_destroy@DB_VERSION_UNIQUE_NAME@
+#define __db_shreg_mutex_init __db_shreg_mutex_init@DB_VERSION_UNIQUE_NAME@
+#define __db_shreg_maintinit __db_shreg_maintinit@DB_VERSION_UNIQUE_NAME@
+#define __os_abspath __os_abspath@DB_VERSION_UNIQUE_NAME@
+#define __os_umalloc __os_umalloc@DB_VERSION_UNIQUE_NAME@
+#define __os_urealloc __os_urealloc@DB_VERSION_UNIQUE_NAME@
+#define __os_ufree __os_ufree@DB_VERSION_UNIQUE_NAME@
+#define __os_strdup __os_strdup@DB_VERSION_UNIQUE_NAME@
+#define __os_calloc __os_calloc@DB_VERSION_UNIQUE_NAME@
+#define __os_malloc __os_malloc@DB_VERSION_UNIQUE_NAME@
+#define __os_realloc __os_realloc@DB_VERSION_UNIQUE_NAME@
+#define __os_free __os_free@DB_VERSION_UNIQUE_NAME@
+#define __ua_memcpy __ua_memcpy@DB_VERSION_UNIQUE_NAME@
+#define __os_clock __os_clock@DB_VERSION_UNIQUE_NAME@
+#define __os_fs_notzero __os_fs_notzero@DB_VERSION_UNIQUE_NAME@
+#define __os_dirlist __os_dirlist@DB_VERSION_UNIQUE_NAME@
+#define __os_dirfree __os_dirfree@DB_VERSION_UNIQUE_NAME@
+#define __os_get_errno_ret_zero __os_get_errno_ret_zero@DB_VERSION_UNIQUE_NAME@
+#define __os_get_errno __os_get_errno@DB_VERSION_UNIQUE_NAME@
+#define __os_set_errno __os_set_errno@DB_VERSION_UNIQUE_NAME@
+#define __os_fileid __os_fileid@DB_VERSION_UNIQUE_NAME@
+#define __os_fsync __os_fsync@DB_VERSION_UNIQUE_NAME@
+#define __os_openhandle __os_openhandle@DB_VERSION_UNIQUE_NAME@
+#define __os_closehandle __os_closehandle@DB_VERSION_UNIQUE_NAME@
+#define __os_id __os_id@DB_VERSION_UNIQUE_NAME@
+#define __os_r_sysattach __os_r_sysattach@DB_VERSION_UNIQUE_NAME@
+#define __os_r_sysdetach __os_r_sysdetach@DB_VERSION_UNIQUE_NAME@
+#define __os_mapfile __os_mapfile@DB_VERSION_UNIQUE_NAME@
+#define __os_unmapfile __os_unmapfile@DB_VERSION_UNIQUE_NAME@
+#define __db_oflags __db_oflags@DB_VERSION_UNIQUE_NAME@
+#define __db_omode __db_omode@DB_VERSION_UNIQUE_NAME@
+#define __os_open __os_open@DB_VERSION_UNIQUE_NAME@
+#ifdef HAVE_QNX
+#define __os_shmname __os_shmname@DB_VERSION_UNIQUE_NAME@
+#endif
+#define __os_r_attach __os_r_attach@DB_VERSION_UNIQUE_NAME@
+#define __os_r_detach __os_r_detach@DB_VERSION_UNIQUE_NAME@
+#define __os_rename __os_rename@DB_VERSION_UNIQUE_NAME@
+#define __os_isroot __os_isroot@DB_VERSION_UNIQUE_NAME@
+#define __db_rpath __db_rpath@DB_VERSION_UNIQUE_NAME@
+#define __os_io __os_io@DB_VERSION_UNIQUE_NAME@
+#define __os_read __os_read@DB_VERSION_UNIQUE_NAME@
+#define __os_write __os_write@DB_VERSION_UNIQUE_NAME@
+#define __os_seek __os_seek@DB_VERSION_UNIQUE_NAME@
+#define __os_sleep __os_sleep@DB_VERSION_UNIQUE_NAME@
+#define __os_spin __os_spin@DB_VERSION_UNIQUE_NAME@
+#define __os_yield __os_yield@DB_VERSION_UNIQUE_NAME@
+#define __os_exists __os_exists@DB_VERSION_UNIQUE_NAME@
+#define __os_ioinfo __os_ioinfo@DB_VERSION_UNIQUE_NAME@
+#define __os_tmpdir __os_tmpdir@DB_VERSION_UNIQUE_NAME@
+#define __os_region_unlink __os_region_unlink@DB_VERSION_UNIQUE_NAME@
+#define __os_unlink __os_unlink@DB_VERSION_UNIQUE_NAME@
+#if defined(DB_WIN32)
+#define __os_win32_errno __os_win32_errno@DB_VERSION_UNIQUE_NAME@
+#endif
+#define __os_fsync __os_fsync@DB_VERSION_UNIQUE_NAME@
+#define __os_openhandle __os_openhandle@DB_VERSION_UNIQUE_NAME@
+#define __os_closehandle __os_closehandle@DB_VERSION_UNIQUE_NAME@
+#define __os_io __os_io@DB_VERSION_UNIQUE_NAME@
+#define __os_read __os_read@DB_VERSION_UNIQUE_NAME@
+#define __os_write __os_write@DB_VERSION_UNIQUE_NAME@
+#define __os_exists __os_exists@DB_VERSION_UNIQUE_NAME@
+#define __os_ioinfo __os_ioinfo@DB_VERSION_UNIQUE_NAME@
+#define __os_is_winnt __os_is_winnt@DB_VERSION_UNIQUE_NAME@
+#define __qam_position __qam_position@DB_VERSION_UNIQUE_NAME@
+#define __qam_pitem __qam_pitem@DB_VERSION_UNIQUE_NAME@
+#define __qam_append __qam_append@DB_VERSION_UNIQUE_NAME@
+#define __qam_c_dup __qam_c_dup@DB_VERSION_UNIQUE_NAME@
+#define __qam_c_init __qam_c_init@DB_VERSION_UNIQUE_NAME@
+#define __qam_truncate __qam_truncate@DB_VERSION_UNIQUE_NAME@
+#define __qam_incfirst_log __qam_incfirst_log@DB_VERSION_UNIQUE_NAME@
+#define __qam_incfirst_getpgnos __qam_incfirst_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __qam_incfirst_print __qam_incfirst_print@DB_VERSION_UNIQUE_NAME@
+#define __qam_incfirst_read __qam_incfirst_read@DB_VERSION_UNIQUE_NAME@
+#define __qam_mvptr_log __qam_mvptr_log@DB_VERSION_UNIQUE_NAME@
+#define __qam_mvptr_getpgnos __qam_mvptr_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __qam_mvptr_print __qam_mvptr_print@DB_VERSION_UNIQUE_NAME@
+#define __qam_mvptr_read __qam_mvptr_read@DB_VERSION_UNIQUE_NAME@
+#define __qam_del_log __qam_del_log@DB_VERSION_UNIQUE_NAME@
+#define __qam_del_getpgnos __qam_del_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __qam_del_print __qam_del_print@DB_VERSION_UNIQUE_NAME@
+#define __qam_del_read __qam_del_read@DB_VERSION_UNIQUE_NAME@
+#define __qam_add_log __qam_add_log@DB_VERSION_UNIQUE_NAME@
+#define __qam_add_getpgnos __qam_add_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __qam_add_print __qam_add_print@DB_VERSION_UNIQUE_NAME@
+#define __qam_add_read __qam_add_read@DB_VERSION_UNIQUE_NAME@
+#define __qam_delext_log __qam_delext_log@DB_VERSION_UNIQUE_NAME@
+#define __qam_delext_getpgnos __qam_delext_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __qam_delext_print __qam_delext_print@DB_VERSION_UNIQUE_NAME@
+#define __qam_delext_read __qam_delext_read@DB_VERSION_UNIQUE_NAME@
+#define __qam_init_print __qam_init_print@DB_VERSION_UNIQUE_NAME@
+#define __qam_init_getpgnos __qam_init_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __qam_init_recover __qam_init_recover@DB_VERSION_UNIQUE_NAME@
+#define __qam_mswap __qam_mswap@DB_VERSION_UNIQUE_NAME@
+#define __qam_pgin_out __qam_pgin_out@DB_VERSION_UNIQUE_NAME@
+#define __qam_fprobe __qam_fprobe@DB_VERSION_UNIQUE_NAME@
+#define __qam_fclose __qam_fclose@DB_VERSION_UNIQUE_NAME@
+#define __qam_fremove __qam_fremove@DB_VERSION_UNIQUE_NAME@
+#define __qam_sync __qam_sync@DB_VERSION_UNIQUE_NAME@
+#define __qam_gen_filelist __qam_gen_filelist@DB_VERSION_UNIQUE_NAME@
+#define __qam_extent_names __qam_extent_names@DB_VERSION_UNIQUE_NAME@
+#define __qam_exid __qam_exid@DB_VERSION_UNIQUE_NAME@
+#define __qam_db_create __qam_db_create@DB_VERSION_UNIQUE_NAME@
+#define __qam_db_close __qam_db_close@DB_VERSION_UNIQUE_NAME@
+#define __db_prqueue __db_prqueue@DB_VERSION_UNIQUE_NAME@
+#define __qam_remove __qam_remove@DB_VERSION_UNIQUE_NAME@
+#define __qam_rename __qam_rename@DB_VERSION_UNIQUE_NAME@
+#define __qam_open __qam_open@DB_VERSION_UNIQUE_NAME@
+#define __qam_metachk __qam_metachk@DB_VERSION_UNIQUE_NAME@
+#define __qam_new_file __qam_new_file@DB_VERSION_UNIQUE_NAME@
+#define __qam_incfirst_recover __qam_incfirst_recover@DB_VERSION_UNIQUE_NAME@
+#define __qam_mvptr_recover __qam_mvptr_recover@DB_VERSION_UNIQUE_NAME@
+#define __qam_del_recover __qam_del_recover@DB_VERSION_UNIQUE_NAME@
+#define __qam_delext_recover __qam_delext_recover@DB_VERSION_UNIQUE_NAME@
+#define __qam_add_recover __qam_add_recover@DB_VERSION_UNIQUE_NAME@
+#define __qam_stat __qam_stat@DB_VERSION_UNIQUE_NAME@
+#define __qam_31_qammeta __qam_31_qammeta@DB_VERSION_UNIQUE_NAME@
+#define __qam_32_qammeta __qam_32_qammeta@DB_VERSION_UNIQUE_NAME@
+#define __qam_vrfy_meta __qam_vrfy_meta@DB_VERSION_UNIQUE_NAME@
+#define __qam_vrfy_data __qam_vrfy_data@DB_VERSION_UNIQUE_NAME@
+#define __qam_vrfy_structure __qam_vrfy_structure@DB_VERSION_UNIQUE_NAME@
+#define __rep_dbenv_create __rep_dbenv_create@DB_VERSION_UNIQUE_NAME@
+#define __rep_process_message __rep_process_message@DB_VERSION_UNIQUE_NAME@
+#define __rep_process_txn __rep_process_txn@DB_VERSION_UNIQUE_NAME@
+#define __rep_region_init __rep_region_init@DB_VERSION_UNIQUE_NAME@
+#define __rep_region_destroy __rep_region_destroy@DB_VERSION_UNIQUE_NAME@
+#define __rep_dbenv_close __rep_dbenv_close@DB_VERSION_UNIQUE_NAME@
+#define __rep_preclose __rep_preclose@DB_VERSION_UNIQUE_NAME@
+#define __rep_check_alloc __rep_check_alloc@DB_VERSION_UNIQUE_NAME@
+#define __rep_send_message __rep_send_message@DB_VERSION_UNIQUE_NAME@
+#define __rep_new_master __rep_new_master@DB_VERSION_UNIQUE_NAME@
+#define __rep_lockpgno_init __rep_lockpgno_init@DB_VERSION_UNIQUE_NAME@
+#define __rep_unlockpages __rep_unlockpages@DB_VERSION_UNIQUE_NAME@
+#define __rep_lockpages __rep_lockpages@DB_VERSION_UNIQUE_NAME@
+#define __rep_is_client __rep_is_client@DB_VERSION_UNIQUE_NAME@
+#define __rep_send_vote __rep_send_vote@DB_VERSION_UNIQUE_NAME@
+#define __rep_grow_sites __rep_grow_sites@DB_VERSION_UNIQUE_NAME@
+#define __rep_print_message __rep_print_message@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_envrpcserver __dbcl_envrpcserver@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_open_wrap __dbcl_env_open_wrap@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_open_wrap __dbcl_db_open_wrap@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_refresh __dbcl_refresh@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_retcopy __dbcl_retcopy@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_end __dbcl_txn_end@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_setup __dbcl_txn_setup@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_c_refresh __dbcl_c_refresh@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_c_setup __dbcl_c_setup@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbclose_common __dbcl_dbclose_common@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_alloc __dbcl_env_alloc@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_app_dispatch __dbcl_set_app_dispatch@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_cachesize __dbcl_env_cachesize@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_close __dbcl_env_close@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_create __dbcl_env_create@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_data_dir __dbcl_set_data_dir@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_dbremove __dbcl_env_dbremove@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_dbrename __dbcl_env_dbrename@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_encrypt __dbcl_env_encrypt@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_set_feedback __dbcl_env_set_feedback@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_flags __dbcl_env_flags@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_lg_bsize __dbcl_set_lg_bsize@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_lg_dir __dbcl_set_lg_dir@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_lg_max __dbcl_set_lg_max@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_lg_regionmax __dbcl_set_lg_regionmax@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_lk_conflict __dbcl_set_lk_conflict@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_lk_detect __dbcl_set_lk_detect@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_lk_max __dbcl_set_lk_max@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_lk_max_locks __dbcl_set_lk_max_locks@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_lk_max_lockers __dbcl_set_lk_max_lockers@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_lk_max_objects __dbcl_set_lk_max_objects@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_mp_mmapsize __dbcl_set_mp_mmapsize@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_open __dbcl_env_open@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_paniccall __dbcl_env_paniccall@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_remove __dbcl_env_remove@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_shm_key __dbcl_set_shm_key@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_tas_spins __dbcl_set_tas_spins@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_timeout __dbcl_set_timeout@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_tmp_dir __dbcl_set_tmp_dir@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_tx_max __dbcl_set_tx_max@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_tx_timestamp __dbcl_set_tx_timestamp@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_verbose __dbcl_set_verbose@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_abort __dbcl_txn_abort@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_begin __dbcl_txn_begin@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_checkpoint __dbcl_txn_checkpoint@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_commit __dbcl_txn_commit@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_discard __dbcl_txn_discard@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_prepare __dbcl_txn_prepare@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_recover __dbcl_txn_recover@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_stat __dbcl_txn_stat@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_timeout __dbcl_txn_timeout@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_rep_elect __dbcl_rep_elect@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_rep_flush __dbcl_rep_flush@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_rep_process_message __dbcl_rep_process_message@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_rep_set_limit __dbcl_rep_set_limit@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_rep_set_request __dbcl_rep_set_request@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_rep_set_rep_transport __dbcl_rep_set_rep_transport@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_rep_start __dbcl_rep_start@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_rep_stat __dbcl_rep_stat@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_alloc __dbcl_db_alloc@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_associate __dbcl_db_associate@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_bt_compare __dbcl_db_bt_compare@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_bt_maxkey __dbcl_db_bt_maxkey@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_bt_minkey __dbcl_db_bt_minkey@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_bt_prefix __dbcl_db_bt_prefix@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_set_append_recno __dbcl_db_set_append_recno@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_cache_priority __dbcl_db_cache_priority@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_cachesize __dbcl_db_cachesize@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_close __dbcl_db_close@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_create __dbcl_db_create@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_del __dbcl_db_del@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_dup_compare __dbcl_db_dup_compare@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_encrypt __dbcl_db_encrypt@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_extentsize __dbcl_db_extentsize@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_fd __dbcl_db_fd@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_feedback __dbcl_db_feedback@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_flags __dbcl_db_flags@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_get __dbcl_db_get@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_h_ffactor __dbcl_db_h_ffactor@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_h_hash __dbcl_db_h_hash@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_h_nelem __dbcl_db_h_nelem@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_key_range __dbcl_db_key_range@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_lorder __dbcl_db_lorder@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_open __dbcl_db_open@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_pagesize __dbcl_db_pagesize@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_panic __dbcl_db_panic@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_pget __dbcl_db_pget@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_put __dbcl_db_put@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_re_delim __dbcl_db_re_delim@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_re_len __dbcl_db_re_len@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_re_pad __dbcl_db_re_pad@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_re_source __dbcl_db_re_source@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_remove __dbcl_db_remove@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_rename __dbcl_db_rename@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_stat __dbcl_db_stat@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_sync __dbcl_db_sync@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_truncate __dbcl_db_truncate@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_upgrade __dbcl_db_upgrade@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_verify __dbcl_db_verify@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_cursor __dbcl_db_cursor@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_join __dbcl_db_join@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_close __dbcl_dbc_close@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_count __dbcl_dbc_count@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_del __dbcl_dbc_del@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_dup __dbcl_dbc_dup@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_get __dbcl_dbc_get@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_pget __dbcl_dbc_pget@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_put __dbcl_dbc_put@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_lock_detect __dbcl_lock_detect@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_lock_get __dbcl_lock_get@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_lock_id __dbcl_lock_id@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_lock_id_free __dbcl_lock_id_free@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_lock_put __dbcl_lock_put@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_lock_stat __dbcl_lock_stat@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_lock_vec __dbcl_lock_vec@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_log_archive __dbcl_log_archive@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_log_cursor __dbcl_log_cursor@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_log_file __dbcl_log_file@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_log_flush __dbcl_log_flush@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_log_put __dbcl_log_put@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_log_stat __dbcl_log_stat@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_memp_fcreate __dbcl_memp_fcreate@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_memp_register __dbcl_memp_register@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_memp_stat __dbcl_memp_stat@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_memp_sync __dbcl_memp_sync@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_memp_trickle __dbcl_memp_trickle@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_close_ret __dbcl_env_close_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_create_ret __dbcl_env_create_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_open_ret __dbcl_env_open_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_remove_ret __dbcl_env_remove_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_abort_ret __dbcl_txn_abort_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_begin_ret __dbcl_txn_begin_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_commit_ret __dbcl_txn_commit_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_discard_ret __dbcl_txn_discard_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_recover_ret __dbcl_txn_recover_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_close_ret __dbcl_db_close_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_create_ret __dbcl_db_create_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_get_ret __dbcl_db_get_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_key_range_ret __dbcl_db_key_range_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_open_ret __dbcl_db_open_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_pget_ret __dbcl_db_pget_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_put_ret __dbcl_db_put_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_remove_ret __dbcl_db_remove_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_rename_ret __dbcl_db_rename_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_stat_ret __dbcl_db_stat_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_truncate_ret __dbcl_db_truncate_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_cursor_ret __dbcl_db_cursor_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_join_ret __dbcl_db_join_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_close_ret __dbcl_dbc_close_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_count_ret __dbcl_dbc_count_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_dup_ret __dbcl_dbc_dup_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_get_ret __dbcl_dbc_get_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_pget_ret __dbcl_dbc_pget_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_put_ret __dbcl_dbc_put_ret@DB_VERSION_UNIQUE_NAME@
+#define __env_cachesize_proc __env_cachesize_proc@DB_VERSION_UNIQUE_NAME@
+#define __env_close_proc __env_close_proc@DB_VERSION_UNIQUE_NAME@
+#define __env_create_proc __env_create_proc@DB_VERSION_UNIQUE_NAME@
+#define __env_dbremove_proc __env_dbremove_proc@DB_VERSION_UNIQUE_NAME@
+#define __env_dbrename_proc __env_dbrename_proc@DB_VERSION_UNIQUE_NAME@
+#define __env_encrypt_proc __env_encrypt_proc@DB_VERSION_UNIQUE_NAME@
+#define __env_flags_proc __env_flags_proc@DB_VERSION_UNIQUE_NAME@
+#define __env_open_proc __env_open_proc@DB_VERSION_UNIQUE_NAME@
+#define __env_remove_proc __env_remove_proc@DB_VERSION_UNIQUE_NAME@
+#define __txn_abort_proc __txn_abort_proc@DB_VERSION_UNIQUE_NAME@
+#define __txn_begin_proc __txn_begin_proc@DB_VERSION_UNIQUE_NAME@
+#define __txn_commit_proc __txn_commit_proc@DB_VERSION_UNIQUE_NAME@
+#define __txn_discard_proc __txn_discard_proc@DB_VERSION_UNIQUE_NAME@
+#define __txn_prepare_proc __txn_prepare_proc@DB_VERSION_UNIQUE_NAME@
+#define __txn_recover_proc __txn_recover_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_bt_maxkey_proc __db_bt_maxkey_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_associate_proc __db_associate_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_bt_minkey_proc __db_bt_minkey_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_close_proc __db_close_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_create_proc __db_create_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_del_proc __db_del_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_encrypt_proc __db_encrypt_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_extentsize_proc __db_extentsize_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_flags_proc __db_flags_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_get_proc __db_get_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_h_ffactor_proc __db_h_ffactor_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_h_nelem_proc __db_h_nelem_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_key_range_proc __db_key_range_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_lorder_proc __db_lorder_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_open_proc __db_open_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_pagesize_proc __db_pagesize_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_pget_proc __db_pget_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_put_proc __db_put_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_re_delim_proc __db_re_delim_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_re_len_proc __db_re_len_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_re_pad_proc __db_re_pad_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_remove_proc __db_remove_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_rename_proc __db_rename_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_stat_proc __db_stat_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_sync_proc __db_sync_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_truncate_proc __db_truncate_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_cursor_proc __db_cursor_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_join_proc __db_join_proc@DB_VERSION_UNIQUE_NAME@
+#define __dbc_close_proc __dbc_close_proc@DB_VERSION_UNIQUE_NAME@
+#define __dbc_count_proc __dbc_count_proc@DB_VERSION_UNIQUE_NAME@
+#define __dbc_del_proc __dbc_del_proc@DB_VERSION_UNIQUE_NAME@
+#define __dbc_dup_proc __dbc_dup_proc@DB_VERSION_UNIQUE_NAME@
+#define __dbc_get_proc __dbc_get_proc@DB_VERSION_UNIQUE_NAME@
+#define __dbc_pget_proc __dbc_pget_proc@DB_VERSION_UNIQUE_NAME@
+#define __dbc_put_proc __dbc_put_proc@DB_VERSION_UNIQUE_NAME@
+#define __dbsrv_settimeout __dbsrv_settimeout@DB_VERSION_UNIQUE_NAME@
+#define __dbsrv_timeout __dbsrv_timeout@DB_VERSION_UNIQUE_NAME@
+#define __dbclear_ctp __dbclear_ctp@DB_VERSION_UNIQUE_NAME@
+#define __dbdel_ctp __dbdel_ctp@DB_VERSION_UNIQUE_NAME@
+#define new_ct_ent new_ct_ent@DB_VERSION_UNIQUE_NAME@
+#define get_tableent get_tableent@DB_VERSION_UNIQUE_NAME@
+#define __dbsrv_sharedb __dbsrv_sharedb@DB_VERSION_UNIQUE_NAME@
+#define __dbsrv_shareenv __dbsrv_shareenv@DB_VERSION_UNIQUE_NAME@
+#define __dbsrv_active __dbsrv_active@DB_VERSION_UNIQUE_NAME@
+#define __db_close_int __db_close_int@DB_VERSION_UNIQUE_NAME@
+#define __dbc_close_int __dbc_close_int@DB_VERSION_UNIQUE_NAME@
+#define __dbenv_close_int __dbenv_close_int@DB_VERSION_UNIQUE_NAME@
+#define get_home get_home@DB_VERSION_UNIQUE_NAME@
+#define bdb_HCommand bdb_HCommand@DB_VERSION_UNIQUE_NAME@
+#if DB_DBM_HSEARCH != 0
+#define bdb_NdbmOpen bdb_NdbmOpen@DB_VERSION_UNIQUE_NAME@
+#endif
+#if DB_DBM_HSEARCH != 0
+#define bdb_DbmCommand bdb_DbmCommand@DB_VERSION_UNIQUE_NAME@
+#endif
+#define ndbm_Cmd ndbm_Cmd@DB_VERSION_UNIQUE_NAME@
+#define _DbInfoDelete _DbInfoDelete@DB_VERSION_UNIQUE_NAME@
+#define db_Cmd db_Cmd@DB_VERSION_UNIQUE_NAME@
+#define dbc_Cmd dbc_Cmd@DB_VERSION_UNIQUE_NAME@
+#define env_Cmd env_Cmd@DB_VERSION_UNIQUE_NAME@
+#define tcl_EnvRemove tcl_EnvRemove@DB_VERSION_UNIQUE_NAME@
+#define tcl_EnvVerbose tcl_EnvVerbose@DB_VERSION_UNIQUE_NAME@
+#define tcl_EnvAttr tcl_EnvAttr@DB_VERSION_UNIQUE_NAME@
+#define tcl_EnvTest tcl_EnvTest@DB_VERSION_UNIQUE_NAME@
+#define _NewInfo _NewInfo@DB_VERSION_UNIQUE_NAME@
+#define _NameToPtr _NameToPtr@DB_VERSION_UNIQUE_NAME@
+#define _PtrToInfo _PtrToInfo@DB_VERSION_UNIQUE_NAME@
+#define _NameToInfo _NameToInfo@DB_VERSION_UNIQUE_NAME@
+#define _SetInfoData _SetInfoData@DB_VERSION_UNIQUE_NAME@
+#define _DeleteInfo _DeleteInfo@DB_VERSION_UNIQUE_NAME@
+#define _SetListElem _SetListElem@DB_VERSION_UNIQUE_NAME@
+#define _SetListElemInt _SetListElemInt@DB_VERSION_UNIQUE_NAME@
+#define _SetListRecnoElem _SetListRecnoElem@DB_VERSION_UNIQUE_NAME@
+#define _Set3DBTList _Set3DBTList@DB_VERSION_UNIQUE_NAME@
+#define _SetMultiList _SetMultiList@DB_VERSION_UNIQUE_NAME@
+#define _GetGlobPrefix _GetGlobPrefix@DB_VERSION_UNIQUE_NAME@
+#define _ReturnSetup _ReturnSetup@DB_VERSION_UNIQUE_NAME@
+#define _ErrorSetup _ErrorSetup@DB_VERSION_UNIQUE_NAME@
+#define _ErrorFunc _ErrorFunc@DB_VERSION_UNIQUE_NAME@
+#define _GetLsn _GetLsn@DB_VERSION_UNIQUE_NAME@
+#define _GetUInt32 _GetUInt32@DB_VERSION_UNIQUE_NAME@
+#define _GetFlagsList _GetFlagsList@DB_VERSION_UNIQUE_NAME@
+#define _debug_check _debug_check@DB_VERSION_UNIQUE_NAME@
+#define _CopyObjBytes _CopyObjBytes@DB_VERSION_UNIQUE_NAME@
+#define tcl_LockDetect tcl_LockDetect@DB_VERSION_UNIQUE_NAME@
+#define tcl_LockGet tcl_LockGet@DB_VERSION_UNIQUE_NAME@
+#define tcl_LockStat tcl_LockStat@DB_VERSION_UNIQUE_NAME@
+#define tcl_LockTimeout tcl_LockTimeout@DB_VERSION_UNIQUE_NAME@
+#define tcl_LockVec tcl_LockVec@DB_VERSION_UNIQUE_NAME@
+#define tcl_LogArchive tcl_LogArchive@DB_VERSION_UNIQUE_NAME@
+#define tcl_LogCompare tcl_LogCompare@DB_VERSION_UNIQUE_NAME@
+#define tcl_LogFile tcl_LogFile@DB_VERSION_UNIQUE_NAME@
+#define tcl_LogFlush tcl_LogFlush@DB_VERSION_UNIQUE_NAME@
+#define tcl_LogGet tcl_LogGet@DB_VERSION_UNIQUE_NAME@
+#define tcl_LogPut tcl_LogPut@DB_VERSION_UNIQUE_NAME@
+#define tcl_LogStat tcl_LogStat@DB_VERSION_UNIQUE_NAME@
+#define logc_Cmd logc_Cmd@DB_VERSION_UNIQUE_NAME@
+#define _MpInfoDelete _MpInfoDelete@DB_VERSION_UNIQUE_NAME@
+#define tcl_MpSync tcl_MpSync@DB_VERSION_UNIQUE_NAME@
+#define tcl_MpTrickle tcl_MpTrickle@DB_VERSION_UNIQUE_NAME@
+#define tcl_Mp tcl_Mp@DB_VERSION_UNIQUE_NAME@
+#define tcl_MpStat tcl_MpStat@DB_VERSION_UNIQUE_NAME@
+#define tcl_RepElect tcl_RepElect@DB_VERSION_UNIQUE_NAME@
+#define tcl_RepFlush tcl_RepFlush@DB_VERSION_UNIQUE_NAME@
+#define tcl_RepLimit tcl_RepLimit@DB_VERSION_UNIQUE_NAME@
+#define tcl_RepRequest tcl_RepRequest@DB_VERSION_UNIQUE_NAME@
+#define tcl_RepStart tcl_RepStart@DB_VERSION_UNIQUE_NAME@
+#define tcl_RepProcessMessage tcl_RepProcessMessage@DB_VERSION_UNIQUE_NAME@
+#define tcl_RepStat tcl_RepStat@DB_VERSION_UNIQUE_NAME@
+#define _TxnInfoDelete _TxnInfoDelete@DB_VERSION_UNIQUE_NAME@
+#define tcl_TxnCheckpoint tcl_TxnCheckpoint@DB_VERSION_UNIQUE_NAME@
+#define tcl_Txn tcl_Txn@DB_VERSION_UNIQUE_NAME@
+#define tcl_TxnStat tcl_TxnStat@DB_VERSION_UNIQUE_NAME@
+#define tcl_TxnTimeout tcl_TxnTimeout@DB_VERSION_UNIQUE_NAME@
+#define tcl_TxnRecover tcl_TxnRecover@DB_VERSION_UNIQUE_NAME@
+#define bdb_RandCommand bdb_RandCommand@DB_VERSION_UNIQUE_NAME@
+#define tcl_Mutex tcl_Mutex@DB_VERSION_UNIQUE_NAME@
+#define __txn_begin __txn_begin@DB_VERSION_UNIQUE_NAME@
+#define __txn_xa_begin __txn_xa_begin@DB_VERSION_UNIQUE_NAME@
+#define __txn_compensate_begin __txn_compensate_begin@DB_VERSION_UNIQUE_NAME@
+#define __txn_commit __txn_commit@DB_VERSION_UNIQUE_NAME@
+#define __txn_abort __txn_abort@DB_VERSION_UNIQUE_NAME@
+#define __txn_discard __txn_discard@DB_VERSION_UNIQUE_NAME@
+#define __txn_prepare __txn_prepare@DB_VERSION_UNIQUE_NAME@
+#define __txn_id __txn_id@DB_VERSION_UNIQUE_NAME@
+#define __txn_checkpoint __txn_checkpoint@DB_VERSION_UNIQUE_NAME@
+#define __txn_getckp __txn_getckp@DB_VERSION_UNIQUE_NAME@
+#define __txn_activekids __txn_activekids@DB_VERSION_UNIQUE_NAME@
+#define __txn_force_abort __txn_force_abort@DB_VERSION_UNIQUE_NAME@
+#define __txn_preclose __txn_preclose@DB_VERSION_UNIQUE_NAME@
+#define __txn_reset __txn_reset@DB_VERSION_UNIQUE_NAME@
+#define __txn_updateckp __txn_updateckp@DB_VERSION_UNIQUE_NAME@
+#define __txn_regop_log __txn_regop_log@DB_VERSION_UNIQUE_NAME@
+#define __txn_regop_getpgnos __txn_regop_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __txn_regop_print __txn_regop_print@DB_VERSION_UNIQUE_NAME@
+#define __txn_regop_read __txn_regop_read@DB_VERSION_UNIQUE_NAME@
+#define __txn_ckp_log __txn_ckp_log@DB_VERSION_UNIQUE_NAME@
+#define __txn_ckp_getpgnos __txn_ckp_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __txn_ckp_print __txn_ckp_print@DB_VERSION_UNIQUE_NAME@
+#define __txn_ckp_read __txn_ckp_read@DB_VERSION_UNIQUE_NAME@
+#define __txn_child_log __txn_child_log@DB_VERSION_UNIQUE_NAME@
+#define __txn_child_getpgnos __txn_child_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __txn_child_print __txn_child_print@DB_VERSION_UNIQUE_NAME@
+#define __txn_child_read __txn_child_read@DB_VERSION_UNIQUE_NAME@
+#define __txn_xa_regop_log __txn_xa_regop_log@DB_VERSION_UNIQUE_NAME@
+#define __txn_xa_regop_getpgnos __txn_xa_regop_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __txn_xa_regop_print __txn_xa_regop_print@DB_VERSION_UNIQUE_NAME@
+#define __txn_xa_regop_read __txn_xa_regop_read@DB_VERSION_UNIQUE_NAME@
+#define __txn_recycle_log __txn_recycle_log@DB_VERSION_UNIQUE_NAME@
+#define __txn_recycle_getpgnos __txn_recycle_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __txn_recycle_print __txn_recycle_print@DB_VERSION_UNIQUE_NAME@
+#define __txn_recycle_read __txn_recycle_read@DB_VERSION_UNIQUE_NAME@
+#define __txn_init_print __txn_init_print@DB_VERSION_UNIQUE_NAME@
+#define __txn_init_getpgnos __txn_init_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __txn_init_recover __txn_init_recover@DB_VERSION_UNIQUE_NAME@
+#define __txn_dbenv_create __txn_dbenv_create@DB_VERSION_UNIQUE_NAME@
+#define __txn_regop_recover __txn_regop_recover@DB_VERSION_UNIQUE_NAME@
+#define __txn_xa_regop_recover __txn_xa_regop_recover@DB_VERSION_UNIQUE_NAME@
+#define __txn_ckp_recover __txn_ckp_recover@DB_VERSION_UNIQUE_NAME@
+#define __txn_child_recover __txn_child_recover@DB_VERSION_UNIQUE_NAME@
+#define __txn_restore_txn __txn_restore_txn@DB_VERSION_UNIQUE_NAME@
+#define __txn_recycle_recover __txn_recycle_recover@DB_VERSION_UNIQUE_NAME@
+#define __txn_continue __txn_continue@DB_VERSION_UNIQUE_NAME@
+#define __txn_map_gid __txn_map_gid@DB_VERSION_UNIQUE_NAME@
+#define __txn_recover __txn_recover@DB_VERSION_UNIQUE_NAME@
+#define __txn_get_prepared __txn_get_prepared@DB_VERSION_UNIQUE_NAME@
+#define __txn_open __txn_open@DB_VERSION_UNIQUE_NAME@
+#define __txn_dbenv_refresh __txn_dbenv_refresh@DB_VERSION_UNIQUE_NAME@
+#define __txn_region_destroy __txn_region_destroy@DB_VERSION_UNIQUE_NAME@
+#define __txn_id_set __txn_id_set@DB_VERSION_UNIQUE_NAME@
+#define __txn_stat __txn_stat@DB_VERSION_UNIQUE_NAME@
+#define __txn_remevent __txn_remevent@DB_VERSION_UNIQUE_NAME@
+#define __txn_lockevent __txn_lockevent@DB_VERSION_UNIQUE_NAME@
+#define __txn_remlock __txn_remlock@DB_VERSION_UNIQUE_NAME@
+#define __txn_doevents __txn_doevents@DB_VERSION_UNIQUE_NAME@
+#define __db_xa_create __db_xa_create@DB_VERSION_UNIQUE_NAME@
+#define __db_rmid_to_env __db_rmid_to_env@DB_VERSION_UNIQUE_NAME@
+#define __db_xid_to_txn __db_xid_to_txn@DB_VERSION_UNIQUE_NAME@
+#define __db_map_rmid __db_map_rmid@DB_VERSION_UNIQUE_NAME@
+#define __db_unmap_rmid __db_unmap_rmid@DB_VERSION_UNIQUE_NAME@
+#define __db_map_xid __db_map_xid@DB_VERSION_UNIQUE_NAME@
+#define __db_unmap_xid __db_unmap_xid@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_cachesize_msg xdr___env_cachesize_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_cachesize_reply xdr___env_cachesize_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_close_msg xdr___env_close_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_close_reply xdr___env_close_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_create_msg xdr___env_create_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_create_reply xdr___env_create_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_dbremove_msg xdr___env_dbremove_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_dbremove_reply xdr___env_dbremove_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_dbrename_msg xdr___env_dbrename_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_dbrename_reply xdr___env_dbrename_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_encrypt_msg xdr___env_encrypt_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_encrypt_reply xdr___env_encrypt_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_flags_msg xdr___env_flags_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_flags_reply xdr___env_flags_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_open_msg xdr___env_open_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_open_reply xdr___env_open_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_remove_msg xdr___env_remove_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_remove_reply xdr___env_remove_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_abort_msg xdr___txn_abort_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_abort_reply xdr___txn_abort_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_begin_msg xdr___txn_begin_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_begin_reply xdr___txn_begin_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_commit_msg xdr___txn_commit_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_commit_reply xdr___txn_commit_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_discard_msg xdr___txn_discard_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_discard_reply xdr___txn_discard_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_prepare_msg xdr___txn_prepare_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_prepare_reply xdr___txn_prepare_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_recover_msg xdr___txn_recover_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_recover_reply xdr___txn_recover_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_associate_msg xdr___db_associate_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_associate_reply xdr___db_associate_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_bt_maxkey_msg xdr___db_bt_maxkey_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_bt_maxkey_reply xdr___db_bt_maxkey_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_bt_minkey_msg xdr___db_bt_minkey_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_bt_minkey_reply xdr___db_bt_minkey_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_close_msg xdr___db_close_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_close_reply xdr___db_close_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_create_msg xdr___db_create_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_create_reply xdr___db_create_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_del_msg xdr___db_del_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_del_reply xdr___db_del_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_encrypt_msg xdr___db_encrypt_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_encrypt_reply xdr___db_encrypt_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_extentsize_msg xdr___db_extentsize_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_extentsize_reply xdr___db_extentsize_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_flags_msg xdr___db_flags_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_flags_reply xdr___db_flags_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_get_msg xdr___db_get_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_get_reply xdr___db_get_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_h_ffactor_msg xdr___db_h_ffactor_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_h_ffactor_reply xdr___db_h_ffactor_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_h_nelem_msg xdr___db_h_nelem_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_h_nelem_reply xdr___db_h_nelem_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_key_range_msg xdr___db_key_range_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_key_range_reply xdr___db_key_range_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_lorder_msg xdr___db_lorder_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_lorder_reply xdr___db_lorder_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_open_msg xdr___db_open_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_open_reply xdr___db_open_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_pagesize_msg xdr___db_pagesize_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_pagesize_reply xdr___db_pagesize_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_pget_msg xdr___db_pget_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_pget_reply xdr___db_pget_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_put_msg xdr___db_put_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_put_reply xdr___db_put_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_re_delim_msg xdr___db_re_delim_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_re_delim_reply xdr___db_re_delim_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_re_len_msg xdr___db_re_len_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_re_len_reply xdr___db_re_len_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_re_pad_msg xdr___db_re_pad_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_re_pad_reply xdr___db_re_pad_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_remove_msg xdr___db_remove_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_remove_reply xdr___db_remove_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_rename_msg xdr___db_rename_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_rename_reply xdr___db_rename_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_stat_msg xdr___db_stat_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_stat_reply xdr___db_stat_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_sync_msg xdr___db_sync_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_sync_reply xdr___db_sync_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_truncate_msg xdr___db_truncate_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_truncate_reply xdr___db_truncate_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_cursor_msg xdr___db_cursor_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_cursor_reply xdr___db_cursor_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_join_msg xdr___db_join_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_join_reply xdr___db_join_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_close_msg xdr___dbc_close_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_close_reply xdr___dbc_close_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_count_msg xdr___dbc_count_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_count_reply xdr___dbc_count_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_del_msg xdr___dbc_del_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_del_reply xdr___dbc_del_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_dup_msg xdr___dbc_dup_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_dup_reply xdr___dbc_dup_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_get_msg xdr___dbc_get_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_get_reply xdr___dbc_get_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_pget_msg xdr___dbc_pget_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_pget_reply xdr___dbc_pget_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_put_msg xdr___dbc_put_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_put_reply xdr___dbc_put_reply@DB_VERSION_UNIQUE_NAME@
+#define __db_global_values __db_global_values@DB_VERSION_UNIQUE_NAME@
+#define __db_jump __db_jump@DB_VERSION_UNIQUE_NAME@
+
+#endif /* !_DB_INT_DEF_IN_ */
diff --git a/libdb/dbinc_auto/lock_ext.h b/libdb/dbinc_auto/lock_ext.h
new file mode 100644
index 0000000..be6b1d0
--- /dev/null
+++ b/libdb/dbinc_auto/lock_ext.h
@@ -0,0 +1,41 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _lock_ext_h_
+#define _lock_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __lock_id __P((DB_ENV *, u_int32_t *));
+int __lock_id_free __P((DB_ENV *, u_int32_t));
+int __lock_vec __P((DB_ENV *, u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **));
+int __lock_get __P((DB_ENV *, u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *));
+int __lock_put __P((DB_ENV *, DB_LOCK *));
+int __lock_downgrade __P((DB_ENV *, DB_LOCK *, db_lockmode_t, u_int32_t));
+int __lock_addfamilylocker __P((DB_ENV *, u_int32_t, u_int32_t));
+int __lock_freefamilylocker __P((DB_LOCKTAB *, u_int32_t));
+int __lock_set_timeout __P(( DB_ENV *, u_int32_t, db_timeout_t, u_int32_t));
+int __lock_inherit_timeout __P(( DB_ENV *, u_int32_t, u_int32_t));
+int __lock_getlocker __P((DB_LOCKTAB *, u_int32_t, u_int32_t, int, DB_LOCKER **));
+int __lock_promote __P((DB_LOCKTAB *, DB_LOCKOBJ *, u_int32_t));
+int __lock_expired __P((DB_ENV *, db_timeval_t *, db_timeval_t *));
+int __lock_detect __P((DB_ENV *, u_int32_t, u_int32_t, int *));
+void __lock_dbenv_create __P((DB_ENV *));
+void __lock_dbenv_close __P((DB_ENV *));
+int __lock_open __P((DB_ENV *));
+int __lock_dbenv_refresh __P((DB_ENV *));
+void __lock_region_destroy __P((DB_ENV *, REGINFO *));
+int __lock_id_set __P((DB_ENV *, u_int32_t, u_int32_t));
+int __lock_stat __P((DB_ENV *, DB_LOCK_STAT **, u_int32_t));
+int __lock_dump_region __P((DB_ENV *, char *, FILE *));
+void __lock_printlock __P((DB_LOCKTAB *, struct __db_lock *, int));
+int __lock_cmp __P((const DBT *, DB_LOCKOBJ *));
+int __lock_locker_cmp __P((u_int32_t, DB_LOCKER *));
+u_int32_t __lock_ohash __P((const DBT *));
+u_int32_t __lock_lhash __P((DB_LOCKOBJ *));
+u_int32_t __lock_locker_hash __P((u_int32_t));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_lock_ext_h_ */
diff --git a/libdb/dbinc_auto/log_ext.h b/libdb/dbinc_auto/log_ext.h
new file mode 100644
index 0000000..6fc69af
--- /dev/null
+++ b/libdb/dbinc_auto/log_ext.h
@@ -0,0 +1,32 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _log_ext_h_
+#define _log_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __log_open __P((DB_ENV *));
+int __log_find __P((DB_LOG *, int, u_int32_t *, logfile_validity *));
+int __log_valid __P((DB_LOG *, u_int32_t, int, logfile_validity *));
+int __log_dbenv_refresh __P((DB_ENV *));
+int __log_stat __P((DB_ENV *, DB_LOG_STAT **, u_int32_t));
+void __log_get_cached_ckp_lsn __P((DB_ENV *, DB_LSN *));
+void __log_region_destroy __P((DB_ENV *, REGINFO *));
+int __log_vtruncate __P((DB_ENV *, DB_LSN *, DB_LSN *));
+int __log_is_outdated __P((DB_ENV *dbenv, u_int32_t fnum, int *outdatedp));
+int __log_archive __P((DB_ENV *, char **[], u_int32_t));
+int __log_cursor __P((DB_ENV *, DB_LOGC **, u_int32_t));
+void __log_dbenv_create __P((DB_ENV *));
+int __log_put __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t));
+void __log_txn_lsn __P((DB_ENV *, DB_LSN *, u_int32_t *, u_int32_t *));
+int __log_newfile __P((DB_LOG *, DB_LSN *));
+int __log_flush __P((DB_ENV *, const DB_LSN *));
+int __log_file __P((DB_ENV *, const DB_LSN *, char *, size_t));
+int __log_name __P((DB_LOG *, u_int32_t, char **, DB_FH *, u_int32_t));
+int __log_rep_put __P((DB_ENV *, DB_LSN *, const DBT *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_log_ext_h_ */
diff --git a/libdb/dbinc_auto/mp_ext.h b/libdb/dbinc_auto/mp_ext.h
new file mode 100644
index 0000000..ceadb3d
--- /dev/null
+++ b/libdb/dbinc_auto/mp_ext.h
@@ -0,0 +1,44 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _mp_ext_h_
+#define _mp_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __memp_alloc __P((DB_MPOOL *, REGINFO *, MPOOLFILE *, size_t, roff_t *, void *));
+#ifdef DIAGNOSTIC
+void __memp_check_order __P((DB_MPOOL_HASH *));
+#endif
+int __memp_bhwrite __P((DB_MPOOL *, DB_MPOOL_HASH *, MPOOLFILE *, BH *, int));
+int __memp_pgread __P((DB_MPOOLFILE *, DB_MUTEX *, BH *, int));
+int __memp_pg __P((DB_MPOOLFILE *, BH *, int));
+void __memp_bhfree __P((DB_MPOOL *, DB_MPOOL_HASH *, BH *, int));
+int __memp_fget __P((DB_MPOOLFILE *, db_pgno_t *, u_int32_t, void *));
+int __memp_fcreate __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t));
+int __memp_fopen_int __P((DB_MPOOLFILE *, MPOOLFILE *, const char *, u_int32_t, int, size_t));
+int __memp_fclose_int __P((DB_MPOOLFILE *, u_int32_t));
+int __memp_mf_discard __P((DB_MPOOL *, MPOOLFILE *));
+char * __memp_fn __P((DB_MPOOLFILE *));
+char * __memp_fns __P((DB_MPOOL *, MPOOLFILE *));
+int __memp_fput __P((DB_MPOOLFILE *, void *, u_int32_t));
+int __memp_fset __P((DB_MPOOLFILE *, void *, u_int32_t));
+void __memp_dbenv_create __P((DB_ENV *));
+int __memp_open __P((DB_ENV *));
+int __memp_dbenv_refresh __P((DB_ENV *));
+void __mpool_region_destroy __P((DB_ENV *, REGINFO *));
+int __memp_nameop __P((DB_ENV *, u_int8_t *, const char *, const char *, const char *));
+int __memp_register __P((DB_ENV *, int, int (*)(DB_ENV *, db_pgno_t, void *, DBT *), int (*)(DB_ENV *, db_pgno_t, void *, DBT *)));
+int __memp_stat __P((DB_ENV *, DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, u_int32_t));
+int __memp_dump_region __P((DB_ENV *, char *, FILE *));
+void __memp_stat_hash __P((REGINFO *, MPOOL *, u_int32_t *));
+int __memp_sync __P((DB_ENV *, DB_LSN *));
+int __memp_fsync __P((DB_MPOOLFILE *));
+int __mp_xxx_fh __P((DB_MPOOLFILE *, DB_FH **));
+int __memp_sync_int __P((DB_ENV *, DB_MPOOLFILE *, int, db_sync_op, int *));
+int __memp_trickle __P((DB_ENV *, int, int *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_mp_ext_h_ */
diff --git a/libdb/dbinc_auto/mutex_ext.h b/libdb/dbinc_auto/mutex_ext.h
new file mode 100644
index 0000000..a40f04d
--- /dev/null
+++ b/libdb/dbinc_auto/mutex_ext.h
@@ -0,0 +1,35 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _mutex_ext_h_
+#define _mutex_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __db_fcntl_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
+int __db_fcntl_mutex_lock __P((DB_ENV *, DB_MUTEX *));
+int __db_fcntl_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
+int __db_fcntl_mutex_destroy __P((DB_MUTEX *));
+int __db_pthread_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
+int __db_pthread_mutex_lock __P((DB_ENV *, DB_MUTEX *));
+int __db_pthread_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
+int __db_pthread_mutex_destroy __P((DB_MUTEX *));
+int __db_tas_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
+int __db_tas_mutex_lock __P((DB_ENV *, DB_MUTEX *));
+int __db_tas_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
+int __db_tas_mutex_destroy __P((DB_MUTEX *));
+int __db_win32_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
+int __db_win32_mutex_lock __P((DB_ENV *, DB_MUTEX *));
+int __db_win32_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
+int __db_win32_mutex_destroy __P((DB_MUTEX *));
+int __db_mutex_setup __P((DB_ENV *, REGINFO *, void *, u_int32_t));
+void __db_mutex_free __P((DB_ENV *, REGINFO *, DB_MUTEX *));
+void __db_shreg_locks_clear __P((DB_MUTEX *, REGINFO *, REGMAINT *));
+void __db_shreg_locks_destroy __P((REGINFO *, REGMAINT *));
+int __db_shreg_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t, u_int32_t, REGINFO *, REGMAINT *));
+void __db_shreg_maintinit __P((REGINFO *, void *addr, size_t));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_mutex_ext_h_ */
diff --git a/libdb/dbinc_auto/os_ext.h b/libdb/dbinc_auto/os_ext.h
new file mode 100644
index 0000000..0a2e5ab
--- /dev/null
+++ b/libdb/dbinc_auto/os_ext.h
@@ -0,0 +1,74 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _os_ext_h_
+#define _os_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __os_abspath __P((const char *));
+int __os_umalloc __P((DB_ENV *, size_t, void *));
+int __os_urealloc __P((DB_ENV *, size_t, void *));
+int __os_ufree __P((DB_ENV *, void *));
+int __os_strdup __P((DB_ENV *, const char *, void *));
+int __os_calloc __P((DB_ENV *, size_t, size_t, void *));
+int __os_malloc __P((DB_ENV *, size_t, void *));
+int __os_realloc __P((DB_ENV *, size_t, void *));
+void __os_free __P((DB_ENV *, void *));
+void *__ua_memcpy __P((void *, const void *, size_t));
+int __os_clock __P((DB_ENV *, u_int32_t *, u_int32_t *));
+int __os_fs_notzero __P((void));
+int __os_dirlist __P((DB_ENV *, const char *, char ***, int *));
+void __os_dirfree __P((DB_ENV *, char **, int));
+int __os_get_errno_ret_zero __P((void));
+int __os_get_errno __P((void));
+void __os_set_errno __P((int));
+int __os_fileid __P((DB_ENV *, const char *, int, u_int8_t *));
+int __os_fsync __P((DB_ENV *, DB_FH *));
+int __os_openhandle __P((DB_ENV *, const char *, int, int, DB_FH *));
+int __os_closehandle __P((DB_ENV *, DB_FH *));
+void __os_id __P((u_int32_t *));
+int __os_r_sysattach __P((DB_ENV *, REGINFO *, REGION *));
+int __os_r_sysdetach __P((DB_ENV *, REGINFO *, int));
+int __os_mapfile __P((DB_ENV *, char *, DB_FH *, size_t, int, void **));
+int __os_unmapfile __P((DB_ENV *, void *, size_t));
+u_int32_t __db_oflags __P((int));
+int __db_omode __P((const char *));
+int __os_open __P((DB_ENV *, const char *, u_int32_t, int, DB_FH *));
+#ifdef HAVE_QNX
+int __os_shmname __P((DB_ENV *, const char *, char **));
+#endif
+int __os_r_attach __P((DB_ENV *, REGINFO *, REGION *));
+int __os_r_detach __P((DB_ENV *, REGINFO *, int));
+int __os_rename __P((DB_ENV *, const char *, const char *, u_int32_t));
+int __os_isroot __P((void));
+char *__db_rpath __P((const char *));
+int __os_io __P((DB_ENV *, DB_IO *, int, size_t *));
+int __os_read __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+int __os_write __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+int __os_seek __P((DB_ENV *, DB_FH *, size_t, db_pgno_t, u_int32_t, int, DB_OS_SEEK));
+int __os_sleep __P((DB_ENV *, u_long, u_long));
+int __os_spin __P((DB_ENV *));
+void __os_yield __P((DB_ENV*, u_long));
+int __os_exists __P((const char *, int *));
+int __os_ioinfo __P((DB_ENV *, const char *, DB_FH *, u_int32_t *, u_int32_t *, u_int32_t *));
+int __os_tmpdir __P((DB_ENV *, u_int32_t));
+int __os_region_unlink __P((DB_ENV *, const char *));
+int __os_unlink __P((DB_ENV *, const char *));
+#if defined(DB_WIN32)
+int __os_win32_errno __P((void));
+#endif
+int __os_fsync __P((DB_ENV *, DB_FH *));
+int __os_openhandle __P((DB_ENV *, const char *, int, int, DB_FH *));
+int __os_closehandle __P((DB_ENV *, DB_FH *));
+int __os_io __P((DB_ENV *, DB_IO *, int, size_t *));
+int __os_read __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+int __os_write __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+int __os_exists __P((const char *, int *));
+int __os_ioinfo __P((DB_ENV *, const char *, DB_FH *, u_int32_t *, u_int32_t *, u_int32_t *));
+int __os_is_winnt __P((void));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_os_ext_h_ */
diff --git a/libdb/dbinc_auto/qam_auto.h b/libdb/dbinc_auto/qam_auto.h
new file mode 100644
index 0000000..655c6d0
--- /dev/null
+++ b/libdb/dbinc_auto/qam_auto.h
@@ -0,0 +1,70 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef __qam_AUTO_H
+#define __qam_AUTO_H
+#define DB___qam_incfirst 84
+typedef struct ___qam_incfirst_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_recno_t recno;
+ db_pgno_t meta_pgno;
+} __qam_incfirst_args;
+
+#define DB___qam_mvptr 85
+typedef struct ___qam_mvptr_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_recno_t old_first;
+ db_recno_t new_first;
+ db_recno_t old_cur;
+ db_recno_t new_cur;
+ DB_LSN metalsn;
+ db_pgno_t meta_pgno;
+} __qam_mvptr_args;
+
+#define DB___qam_del 79
+typedef struct ___qam_del_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ DB_LSN lsn;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ db_recno_t recno;
+} __qam_del_args;
+
+#define DB___qam_add 80
+typedef struct ___qam_add_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ DB_LSN lsn;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ db_recno_t recno;
+ DBT data;
+ u_int32_t vflag;
+ DBT olddata;
+} __qam_add_args;
+
+#define DB___qam_delext 83
+typedef struct ___qam_delext_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ DB_LSN lsn;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ db_recno_t recno;
+ DBT data;
+} __qam_delext_args;
+
+#endif
diff --git a/libdb/dbinc_auto/qam_ext.h b/libdb/dbinc_auto/qam_ext.h
new file mode 100644
index 0000000..16dbea7
--- /dev/null
+++ b/libdb/dbinc_auto/qam_ext.h
@@ -0,0 +1,70 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _qam_ext_h_
+#define _qam_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __qam_position __P((DBC *, db_recno_t *, qam_position_mode, int *));
+int __qam_pitem __P((DBC *, QPAGE *, u_int32_t, db_recno_t, DBT *));
+int __qam_append __P((DBC *, DBT *, DBT *));
+int __qam_c_dup __P((DBC *, DBC *));
+int __qam_c_init __P((DBC *));
+int __qam_truncate __P((DB *, DB_TXN *, u_int32_t *));
+int __qam_incfirst_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_recno_t, db_pgno_t));
+int __qam_incfirst_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_incfirst_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_incfirst_read __P((DB_ENV *, void *, __qam_incfirst_args **));
+int __qam_mvptr_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_recno_t, db_recno_t, db_recno_t, db_recno_t, DB_LSN *, db_pgno_t));
+int __qam_mvptr_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_mvptr_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_mvptr_read __P((DB_ENV *, void *, __qam_mvptr_args **));
+int __qam_del_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_recno_t));
+int __qam_del_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_del_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_del_read __P((DB_ENV *, void *, __qam_del_args **));
+int __qam_add_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_recno_t, const DBT *, u_int32_t, const DBT *));
+int __qam_add_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_add_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_add_read __P((DB_ENV *, void *, __qam_add_args **));
+int __qam_delext_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_recno_t, const DBT *));
+int __qam_delext_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_delext_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_delext_read __P((DB_ENV *, void *, __qam_delext_args **));
+int __qam_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __qam_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __qam_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __qam_mswap __P((PAGE *));
+int __qam_pgin_out __P((DB_ENV *, db_pgno_t, void *, DBT *));
+int __qam_fprobe __P((DB *, db_pgno_t, void *, qam_probe_mode, u_int32_t));
+int __qam_fclose __P((DB *, db_pgno_t));
+int __qam_fremove __P((DB *, db_pgno_t));
+int __qam_sync __P((DB *, u_int32_t));
+int __qam_gen_filelist __P(( DB *, QUEUE_FILELIST **));
+int __qam_extent_names __P((DB_ENV *, char *, char ***));
+void __qam_exid __P((DB *, u_int8_t *, u_int32_t));
+int __qam_db_create __P((DB *));
+int __qam_db_close __P((DB *));
+int __db_prqueue __P((DB *, FILE *, u_int32_t));
+int __qam_remove __P((DB *, DB_TXN *, const char *, const char *, DB_LSN *));
+int __qam_rename __P((DB *, DB_TXN *, const char *, const char *, const char *));
+int __qam_open __P((DB *, DB_TXN *, const char *, db_pgno_t, int, u_int32_t));
+int __qam_metachk __P((DB *, const char *, QMETA *));
+int __qam_new_file __P((DB *, DB_TXN *, DB_FH *, const char *));
+int __qam_incfirst_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_mvptr_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_del_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_delext_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_add_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_stat __P((DB *, void *, u_int32_t));
+int __qam_31_qammeta __P((DB *, char *, u_int8_t *));
+int __qam_32_qammeta __P((DB *, char *, u_int8_t *));
+int __qam_vrfy_meta __P((DB *, VRFY_DBINFO *, QMETA *, db_pgno_t, u_int32_t));
+int __qam_vrfy_data __P((DB *, VRFY_DBINFO *, QPAGE *, db_pgno_t, u_int32_t));
+int __qam_vrfy_structure __P((DB *, VRFY_DBINFO *, u_int32_t));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_qam_ext_h_ */
diff --git a/libdb/dbinc_auto/rep_ext.h b/libdb/dbinc_auto/rep_ext.h
new file mode 100644
index 0000000..22e2d25
--- /dev/null
+++ b/libdb/dbinc_auto/rep_ext.h
@@ -0,0 +1,30 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _rep_ext_h_
+#define _rep_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __rep_dbenv_create __P((DB_ENV *));
+int __rep_process_message __P((DB_ENV *, DBT *, DBT *, int *));
+int __rep_process_txn __P((DB_ENV *, DBT *));
+int __rep_region_init __P((DB_ENV *));
+int __rep_region_destroy __P((DB_ENV *));
+int __rep_dbenv_close __P((DB_ENV *));
+int __rep_preclose __P((DB_ENV *, int));
+int __rep_check_alloc __P((DB_ENV *, TXN_RECS *, int));
+int __rep_send_message __P((DB_ENV *, int, u_int32_t, DB_LSN *, const DBT *, u_int32_t));
+int __rep_new_master __P((DB_ENV *, REP_CONTROL *, int));
+int __rep_lockpgno_init __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __rep_unlockpages __P((DB_ENV *, u_int32_t));
+int __rep_lockpages __P((DB_ENV *, int (**)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t, DB_LSN *, DB_LSN *, TXN_RECS *, u_int32_t));
+int __rep_is_client __P((DB_ENV *));
+int __rep_send_vote __P((DB_ENV *, DB_LSN *, int, int, int));
+int __rep_grow_sites __P((DB_ENV *dbenv, int nsites));
+void __rep_print_message __P((DB_ENV *, int, REP_CONTROL *, char *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_rep_ext_h_ */
diff --git a/libdb/dbinc_auto/rpc_client_ext.h b/libdb/dbinc_auto/rpc_client_ext.h
new file mode 100644
index 0000000..9634b34
--- /dev/null
+++ b/libdb/dbinc_auto/rpc_client_ext.h
@@ -0,0 +1,167 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _rpc_client_ext_h_
+#define _rpc_client_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __dbcl_envrpcserver __P((DB_ENV *, void *, const char *, long, long, u_int32_t));
+int __dbcl_env_open_wrap __P((DB_ENV *, const char *, u_int32_t, int));
+int __dbcl_db_open_wrap __P((DB *, DB_TXN *, const char *, const char *, DBTYPE, u_int32_t, int));
+int __dbcl_refresh __P((DB_ENV *));
+int __dbcl_retcopy __P((DB_ENV *, DBT *, void *, u_int32_t, void **, u_int32_t *));
+void __dbcl_txn_end __P((DB_TXN *));
+void __dbcl_txn_setup __P((DB_ENV *, DB_TXN *, DB_TXN *, u_int32_t));
+void __dbcl_c_refresh __P((DBC *));
+int __dbcl_c_setup __P((long, DB *, DBC **));
+int __dbcl_dbclose_common __P((DB *));
+int __dbcl_env_alloc __P((DB_ENV *, void *(*)(size_t), void *(*)(void *, size_t), void (*)(void *)));
+int __dbcl_set_app_dispatch __P((DB_ENV *, int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
+int __dbcl_env_cachesize __P((DB_ENV *, u_int32_t, u_int32_t, int));
+int __dbcl_env_close __P((DB_ENV *, u_int32_t));
+int __dbcl_env_create __P((DB_ENV *, long));
+int __dbcl_set_data_dir __P((DB_ENV *, const char *));
+int __dbcl_env_dbremove __P((DB_ENV *, DB_TXN *, const char *, const char *, u_int32_t));
+int __dbcl_env_dbrename __P((DB_ENV *, DB_TXN *, const char *, const char *, const char *, u_int32_t));
+int __dbcl_env_encrypt __P((DB_ENV *, const char *, u_int32_t));
+int __dbcl_env_set_feedback __P((DB_ENV *, void (*)(DB_ENV *, int, int)));
+int __dbcl_env_flags __P((DB_ENV *, u_int32_t, int));
+int __dbcl_set_lg_bsize __P((DB_ENV *, u_int32_t));
+int __dbcl_set_lg_dir __P((DB_ENV *, const char *));
+int __dbcl_set_lg_max __P((DB_ENV *, u_int32_t));
+int __dbcl_set_lg_regionmax __P((DB_ENV *, u_int32_t));
+int __dbcl_set_lk_conflict __P((DB_ENV *, u_int8_t *, int));
+int __dbcl_set_lk_detect __P((DB_ENV *, u_int32_t));
+int __dbcl_set_lk_max __P((DB_ENV *, u_int32_t));
+int __dbcl_set_lk_max_locks __P((DB_ENV *, u_int32_t));
+int __dbcl_set_lk_max_lockers __P((DB_ENV *, u_int32_t));
+int __dbcl_set_lk_max_objects __P((DB_ENV *, u_int32_t));
+int __dbcl_set_mp_mmapsize __P((DB_ENV *, size_t));
+int __dbcl_env_open __P((DB_ENV *, const char *, u_int32_t, int));
+int __dbcl_env_paniccall __P((DB_ENV *, void (*)(DB_ENV *, int)));
+int __dbcl_env_remove __P((DB_ENV *, const char *, u_int32_t));
+int __dbcl_set_shm_key __P((DB_ENV *, long));
+int __dbcl_set_tas_spins __P((DB_ENV *, u_int32_t));
+int __dbcl_set_timeout __P((DB_ENV *, u_int32_t, u_int32_t));
+int __dbcl_set_tmp_dir __P((DB_ENV *, const char *));
+int __dbcl_set_tx_max __P((DB_ENV *, u_int32_t));
+int __dbcl_set_tx_timestamp __P((DB_ENV *, time_t *));
+int __dbcl_set_verbose __P((DB_ENV *, u_int32_t, int));
+int __dbcl_txn_abort __P((DB_TXN *));
+int __dbcl_txn_begin __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
+int __dbcl_txn_checkpoint __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t));
+int __dbcl_txn_commit __P((DB_TXN *, u_int32_t));
+int __dbcl_txn_discard __P((DB_TXN *, u_int32_t));
+int __dbcl_txn_prepare __P((DB_TXN *, u_int8_t *));
+int __dbcl_txn_recover __P((DB_ENV *, DB_PREPLIST *, long, long *, u_int32_t));
+int __dbcl_txn_stat __P((DB_ENV *, DB_TXN_STAT **, u_int32_t));
+int __dbcl_txn_timeout __P((DB_TXN *, u_int32_t, u_int32_t));
+int __dbcl_rep_elect __P((DB_ENV *, int, int, u_int32_t, int *));
+int __dbcl_rep_flush __P((DB_ENV *));
+int __dbcl_rep_process_message __P((DB_ENV *, DBT *, DBT *, int *));
+int __dbcl_rep_set_limit __P((DB_ENV *, u_int32_t, u_int32_t));
+int __dbcl_rep_set_request __P((DB_ENV *, u_int32_t, u_int32_t));
+int __dbcl_rep_set_rep_transport __P((DB_ENV *, int, int (*)(DB_ENV *, const DBT *, const DBT *, int, u_int32_t)));
+int __dbcl_rep_start __P((DB_ENV *, DBT *, u_int32_t));
+int __dbcl_rep_stat __P((DB_ENV *, DB_REP_STAT **, u_int32_t));
+int __dbcl_db_alloc __P((DB *, void *(*)(size_t), void *(*)(void *, size_t), void (*)(void *)));
+int __dbcl_db_associate __P((DB *, DB_TXN *, DB *, int (*)(DB *, const DBT *, const DBT *, DBT *), u_int32_t));
+int __dbcl_db_bt_compare __P((DB *, int (*)(DB *, const DBT *, const DBT *)));
+int __dbcl_db_bt_maxkey __P((DB *, u_int32_t));
+int __dbcl_db_bt_minkey __P((DB *, u_int32_t));
+int __dbcl_db_bt_prefix __P((DB *, size_t(*)(DB *, const DBT *, const DBT *)));
+int __dbcl_db_set_append_recno __P((DB *, int (*)(DB *, DBT *, db_recno_t)));
+int __dbcl_db_cache_priority __P((DB *, DB_CACHE_PRIORITY));
+int __dbcl_db_cachesize __P((DB *, u_int32_t, u_int32_t, int));
+int __dbcl_db_close __P((DB *, u_int32_t));
+int __dbcl_db_create __P((DB *, DB_ENV *, u_int32_t));
+int __dbcl_db_del __P((DB *, DB_TXN *, DBT *, u_int32_t));
+int __dbcl_db_dup_compare __P((DB *, int (*)(DB *, const DBT *, const DBT *)));
+int __dbcl_db_encrypt __P((DB *, const char *, u_int32_t));
+int __dbcl_db_extentsize __P((DB *, u_int32_t));
+int __dbcl_db_fd __P((DB *, int *));
+int __dbcl_db_feedback __P((DB *, void (*)(DB *, int, int)));
+int __dbcl_db_flags __P((DB *, u_int32_t));
+int __dbcl_db_get __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+int __dbcl_db_h_ffactor __P((DB *, u_int32_t));
+int __dbcl_db_h_hash __P((DB *, u_int32_t(*)(DB *, const void *, u_int32_t)));
+int __dbcl_db_h_nelem __P((DB *, u_int32_t));
+int __dbcl_db_key_range __P((DB *, DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
+int __dbcl_db_lorder __P((DB *, int));
+int __dbcl_db_open __P((DB *, DB_TXN *, const char *, const char *, DBTYPE, u_int32_t, int));
+int __dbcl_db_pagesize __P((DB *, u_int32_t));
+int __dbcl_db_panic __P((DB *, void (*)(DB_ENV *, int)));
+int __dbcl_db_pget __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t));
+int __dbcl_db_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+int __dbcl_db_re_delim __P((DB *, int));
+int __dbcl_db_re_len __P((DB *, u_int32_t));
+int __dbcl_db_re_pad __P((DB *, int));
+int __dbcl_db_re_source __P((DB *, const char *));
+int __dbcl_db_remove __P((DB *, const char *, const char *, u_int32_t));
+int __dbcl_db_rename __P((DB *, const char *, const char *, const char *, u_int32_t));
+int __dbcl_db_stat __P((DB *, void *, u_int32_t));
+int __dbcl_db_sync __P((DB *, u_int32_t));
+int __dbcl_db_truncate __P((DB *, DB_TXN *, u_int32_t *, u_int32_t));
+int __dbcl_db_upgrade __P((DB *, const char *, u_int32_t));
+int __dbcl_db_verify __P((DB *, const char *, const char *, FILE *, u_int32_t));
+int __dbcl_db_cursor __P((DB *, DB_TXN *, DBC **, u_int32_t));
+int __dbcl_db_join __P((DB *, DBC **, DBC **, u_int32_t));
+int __dbcl_dbc_close __P((DBC *));
+int __dbcl_dbc_count __P((DBC *, db_recno_t *, u_int32_t));
+int __dbcl_dbc_del __P((DBC *, u_int32_t));
+int __dbcl_dbc_dup __P((DBC *, DBC **, u_int32_t));
+int __dbcl_dbc_get __P((DBC *, DBT *, DBT *, u_int32_t));
+int __dbcl_dbc_pget __P((DBC *, DBT *, DBT *, DBT *, u_int32_t));
+int __dbcl_dbc_put __P((DBC *, DBT *, DBT *, u_int32_t));
+int __dbcl_lock_detect __P((DB_ENV *, u_int32_t, u_int32_t, int *));
+int __dbcl_lock_get __P((DB_ENV *, u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *));
+int __dbcl_lock_id __P((DB_ENV *, u_int32_t *));
+int __dbcl_lock_id_free __P((DB_ENV *, u_int32_t));
+int __dbcl_lock_put __P((DB_ENV *, DB_LOCK *));
+int __dbcl_lock_stat __P((DB_ENV *, DB_LOCK_STAT **, u_int32_t));
+int __dbcl_lock_vec __P((DB_ENV *, u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **));
+int __dbcl_log_archive __P((DB_ENV *, char ***, u_int32_t));
+int __dbcl_log_cursor __P((DB_ENV *, DB_LOGC **, u_int32_t));
+int __dbcl_log_file __P((DB_ENV *, const DB_LSN *, char *, size_t));
+int __dbcl_log_flush __P((DB_ENV *, const DB_LSN *));
+int __dbcl_log_put __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t));
+int __dbcl_log_stat __P((DB_ENV *, DB_LOG_STAT **, u_int32_t));
+int __dbcl_memp_fcreate __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t));
+int __dbcl_memp_register __P((DB_ENV *, int, int (*)(DB_ENV *, db_pgno_t, void *, DBT *), int (*)(DB_ENV *, db_pgno_t, void *, DBT *)));
+int __dbcl_memp_stat __P((DB_ENV *, DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, u_int32_t));
+int __dbcl_memp_sync __P((DB_ENV *, DB_LSN *));
+int __dbcl_memp_trickle __P((DB_ENV *, int, int *));
+int __dbcl_env_close_ret __P((DB_ENV *, u_int32_t, __env_close_reply *));
+int __dbcl_env_create_ret __P((DB_ENV *, long, __env_create_reply *));
+int __dbcl_env_open_ret __P((DB_ENV *, const char *, u_int32_t, int, __env_open_reply *));
+int __dbcl_env_remove_ret __P((DB_ENV *, const char *, u_int32_t, __env_remove_reply *));
+int __dbcl_txn_abort_ret __P((DB_TXN *, __txn_abort_reply *));
+int __dbcl_txn_begin_ret __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t, __txn_begin_reply *));
+int __dbcl_txn_commit_ret __P((DB_TXN *, u_int32_t, __txn_commit_reply *));
+int __dbcl_txn_discard_ret __P((DB_TXN *, u_int32_t, __txn_discard_reply *));
+int __dbcl_txn_recover_ret __P((DB_ENV *, DB_PREPLIST *, long, long *, u_int32_t, __txn_recover_reply *));
+int __dbcl_db_close_ret __P((DB *, u_int32_t, __db_close_reply *));
+int __dbcl_db_create_ret __P((DB *, DB_ENV *, u_int32_t, __db_create_reply *));
+int __dbcl_db_get_ret __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t, __db_get_reply *));
+int __dbcl_db_key_range_ret __P((DB *, DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t, __db_key_range_reply *));
+int __dbcl_db_open_ret __P((DB *, DB_TXN *, const char *, const char *, DBTYPE, u_int32_t, int, __db_open_reply *));
+int __dbcl_db_pget_ret __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t, __db_pget_reply *));
+int __dbcl_db_put_ret __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t, __db_put_reply *));
+int __dbcl_db_remove_ret __P((DB *, const char *, const char *, u_int32_t, __db_remove_reply *));
+int __dbcl_db_rename_ret __P((DB *, const char *, const char *, const char *, u_int32_t, __db_rename_reply *));
+int __dbcl_db_stat_ret __P((DB *, void *, u_int32_t, __db_stat_reply *));
+int __dbcl_db_truncate_ret __P((DB *, DB_TXN *, u_int32_t *, u_int32_t, __db_truncate_reply *));
+int __dbcl_db_cursor_ret __P((DB *, DB_TXN *, DBC **, u_int32_t, __db_cursor_reply *));
+int __dbcl_db_join_ret __P((DB *, DBC **, DBC **, u_int32_t, __db_join_reply *));
+int __dbcl_dbc_close_ret __P((DBC *, __dbc_close_reply *));
+int __dbcl_dbc_count_ret __P((DBC *, db_recno_t *, u_int32_t, __dbc_count_reply *));
+int __dbcl_dbc_dup_ret __P((DBC *, DBC **, u_int32_t, __dbc_dup_reply *));
+int __dbcl_dbc_get_ret __P((DBC *, DBT *, DBT *, u_int32_t, __dbc_get_reply *));
+int __dbcl_dbc_pget_ret __P((DBC *, DBT *, DBT *, DBT *, u_int32_t, __dbc_pget_reply *));
+int __dbcl_dbc_put_ret __P((DBC *, DBT *, DBT *, u_int32_t, __dbc_put_reply *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_rpc_client_ext_h_ */
diff --git a/libdb/dbinc_auto/rpc_defs.in b/libdb/dbinc_auto/rpc_defs.in
new file mode 100644
index 0000000..cae76f5
--- /dev/null
+++ b/libdb/dbinc_auto/rpc_defs.in
@@ -0,0 +1,4 @@
+
+/* DO NOT EDIT: automatically built by dist/s_rpc. */
+#define DB_RPC_SERVERPROG ((unsigned long)(351457))
+#define DB_RPC_SERVERVERS ((unsigned long)(4001))
diff --git a/libdb/dbinc_auto/rpc_server_ext.h b/libdb/dbinc_auto/rpc_server_ext.h
new file mode 100644
index 0000000..c0c7068
--- /dev/null
+++ b/libdb/dbinc_auto/rpc_server_ext.h
@@ -0,0 +1,126 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _rpc_server_ext_h_
+#define _rpc_server_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+void __env_cachesize_proc __P((long, u_int32_t, u_int32_t, u_int32_t, __env_cachesize_reply *));
+void __env_close_proc __P((long, u_int32_t, __env_close_reply *));
+void __env_create_proc __P((u_int32_t, __env_create_reply *));
+void __env_dbremove_proc __P((long, long, char *, char *, u_int32_t, __env_dbremove_reply *));
+void __env_dbrename_proc __P((long, long, char *, char *, char *, u_int32_t, __env_dbrename_reply *));
+void __env_encrypt_proc __P((long, char *, u_int32_t, __env_encrypt_reply *));
+void __env_flags_proc __P((long, u_int32_t, u_int32_t, __env_flags_reply *));
+void __env_open_proc __P((long, char *, u_int32_t, u_int32_t, __env_open_reply *));
+void __env_remove_proc __P((long, char *, u_int32_t, __env_remove_reply *));
+void __txn_abort_proc __P((long, __txn_abort_reply *));
+void __txn_begin_proc __P((long, long, u_int32_t, __txn_begin_reply *));
+void __txn_commit_proc __P((long, u_int32_t, __txn_commit_reply *));
+void __txn_discard_proc __P((long, u_int32_t, __txn_discard_reply *));
+void __txn_prepare_proc __P((long, u_int8_t *, __txn_prepare_reply *));
+void __txn_recover_proc __P((long, u_int32_t, u_int32_t, __txn_recover_reply *, int *));
+void __db_bt_maxkey_proc __P((long, u_int32_t, __db_bt_maxkey_reply *));
+void __db_associate_proc __P((long, long, long, u_int32_t, __db_associate_reply *));
+void __db_bt_minkey_proc __P((long, u_int32_t, __db_bt_minkey_reply *));
+void __db_close_proc __P((long, u_int32_t, __db_close_reply *));
+void __db_create_proc __P((long, u_int32_t, __db_create_reply *));
+void __db_del_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __db_del_reply *));
+void __db_encrypt_proc __P((long, char *, u_int32_t, __db_encrypt_reply *));
+void __db_extentsize_proc __P((long, u_int32_t, __db_extentsize_reply *));
+void __db_flags_proc __P((long, u_int32_t, __db_flags_reply *));
+void __db_get_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __db_get_reply *, int *));
+void __db_h_ffactor_proc __P((long, u_int32_t, __db_h_ffactor_reply *));
+void __db_h_nelem_proc __P((long, u_int32_t, __db_h_nelem_reply *));
+void __db_key_range_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __db_key_range_reply *));
+void __db_lorder_proc __P((long, u_int32_t, __db_lorder_reply *));
+void __db_open_proc __P((long, long, char *, char *, u_int32_t, u_int32_t, u_int32_t, __db_open_reply *));
+void __db_pagesize_proc __P((long, u_int32_t, __db_pagesize_reply *));
+void __db_pget_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __db_pget_reply *, int *));
+void __db_put_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __db_put_reply *, int *));
+void __db_re_delim_proc __P((long, u_int32_t, __db_re_delim_reply *));
+void __db_re_len_proc __P((long, u_int32_t, __db_re_len_reply *));
+void __db_re_pad_proc __P((long, u_int32_t, __db_re_pad_reply *));
+void __db_remove_proc __P((long, char *, char *, u_int32_t, __db_remove_reply *));
+void __db_rename_proc __P((long, char *, char *, char *, u_int32_t, __db_rename_reply *));
+void __db_stat_proc __P((long, u_int32_t, __db_stat_reply *, int *));
+void __db_sync_proc __P((long, u_int32_t, __db_sync_reply *));
+void __db_truncate_proc __P((long, long, u_int32_t, __db_truncate_reply *));
+void __db_cursor_proc __P((long, long, u_int32_t, __db_cursor_reply *));
+void __db_join_proc __P((long, u_int32_t *, u_int32_t, u_int32_t, __db_join_reply *));
+void __dbc_close_proc __P((long, __dbc_close_reply *));
+void __dbc_count_proc __P((long, u_int32_t, __dbc_count_reply *));
+void __dbc_del_proc __P((long, u_int32_t, __dbc_del_reply *));
+void __dbc_dup_proc __P((long, u_int32_t, __dbc_dup_reply *));
+void __dbc_get_proc __P((long, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __dbc_get_reply *, int *));
+void __dbc_pget_proc __P((long, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __dbc_pget_reply *, int *));
+void __dbc_put_proc __P((long, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __dbc_put_reply *, int *));
+void __dbsrv_settimeout __P((ct_entry *, u_int32_t));
+void __dbsrv_timeout __P((int));
+void __dbclear_ctp __P((ct_entry *));
+void __dbdel_ctp __P((ct_entry *));
+ct_entry *new_ct_ent __P((int *));
+ct_entry *get_tableent __P((long));
+ct_entry *__dbsrv_sharedb __P((ct_entry *, const char *, const char *, DBTYPE, u_int32_t));
+ct_entry *__dbsrv_shareenv __P((ct_entry *, home_entry *, u_int32_t));
+void __dbsrv_active __P((ct_entry *));
+int __db_close_int __P((long, u_int32_t));
+int __dbc_close_int __P((ct_entry *));
+int __dbenv_close_int __P((long, u_int32_t, int));
+home_entry *get_home __P((char *));
+__env_cachesize_reply *__db_env_cachesize_4001 __P((__env_cachesize_msg *, struct svc_req *));
+__env_close_reply *__db_env_close_4001 __P((__env_close_msg *, struct svc_req *));
+__env_create_reply *__db_env_create_4001 __P((__env_create_msg *, struct svc_req *));
+__env_dbremove_reply *__db_env_dbremove_4001 __P((__env_dbremove_msg *, struct svc_req *));
+__env_dbrename_reply *__db_env_dbrename_4001 __P((__env_dbrename_msg *, struct svc_req *));
+__env_encrypt_reply *__db_env_encrypt_4001 __P((__env_encrypt_msg *, struct svc_req *));
+__env_flags_reply *__db_env_flags_4001 __P((__env_flags_msg *, struct svc_req *));
+__env_open_reply *__db_env_open_4001 __P((__env_open_msg *, struct svc_req *));
+__env_remove_reply *__db_env_remove_4001 __P((__env_remove_msg *, struct svc_req *));
+__txn_abort_reply *__db_txn_abort_4001 __P((__txn_abort_msg *, struct svc_req *));
+__txn_begin_reply *__db_txn_begin_4001 __P((__txn_begin_msg *, struct svc_req *));
+__txn_commit_reply *__db_txn_commit_4001 __P((__txn_commit_msg *, struct svc_req *));
+__txn_discard_reply *__db_txn_discard_4001 __P((__txn_discard_msg *, struct svc_req *));
+__txn_prepare_reply *__db_txn_prepare_4001 __P((__txn_prepare_msg *, struct svc_req *));
+__txn_recover_reply *__db_txn_recover_4001 __P((__txn_recover_msg *, struct svc_req *));
+__db_associate_reply *__db_db_associate_4001 __P((__db_associate_msg *, struct svc_req *));
+__db_bt_maxkey_reply *__db_db_bt_maxkey_4001 __P((__db_bt_maxkey_msg *, struct svc_req *));
+__db_bt_minkey_reply *__db_db_bt_minkey_4001 __P((__db_bt_minkey_msg *, struct svc_req *));
+__db_close_reply *__db_db_close_4001 __P((__db_close_msg *, struct svc_req *));
+__db_create_reply *__db_db_create_4001 __P((__db_create_msg *, struct svc_req *));
+__db_del_reply *__db_db_del_4001 __P((__db_del_msg *, struct svc_req *));
+__db_encrypt_reply *__db_db_encrypt_4001 __P((__db_encrypt_msg *, struct svc_req *));
+__db_extentsize_reply *__db_db_extentsize_4001 __P((__db_extentsize_msg *, struct svc_req *));
+__db_flags_reply *__db_db_flags_4001 __P((__db_flags_msg *, struct svc_req *));
+__db_get_reply *__db_db_get_4001 __P((__db_get_msg *, struct svc_req *));
+__db_h_ffactor_reply *__db_db_h_ffactor_4001 __P((__db_h_ffactor_msg *, struct svc_req *));
+__db_h_nelem_reply *__db_db_h_nelem_4001 __P((__db_h_nelem_msg *, struct svc_req *));
+__db_key_range_reply *__db_db_key_range_4001 __P((__db_key_range_msg *, struct svc_req *));
+__db_lorder_reply *__db_db_lorder_4001 __P((__db_lorder_msg *, struct svc_req *));
+__db_open_reply *__db_db_open_4001 __P((__db_open_msg *, struct svc_req *));
+__db_pagesize_reply *__db_db_pagesize_4001 __P((__db_pagesize_msg *, struct svc_req *));
+__db_pget_reply *__db_db_pget_4001 __P((__db_pget_msg *, struct svc_req *));
+__db_put_reply *__db_db_put_4001 __P((__db_put_msg *, struct svc_req *));
+__db_re_delim_reply *__db_db_re_delim_4001 __P((__db_re_delim_msg *, struct svc_req *));
+__db_re_len_reply *__db_db_re_len_4001 __P((__db_re_len_msg *, struct svc_req *));
+__db_re_pad_reply *__db_db_re_pad_4001 __P((__db_re_pad_msg *, struct svc_req *));
+__db_remove_reply *__db_db_remove_4001 __P((__db_remove_msg *, struct svc_req *));
+__db_rename_reply *__db_db_rename_4001 __P((__db_rename_msg *, struct svc_req *));
+__db_stat_reply *__db_db_stat_4001 __P((__db_stat_msg *, struct svc_req *));
+__db_sync_reply *__db_db_sync_4001 __P((__db_sync_msg *, struct svc_req *));
+__db_truncate_reply *__db_db_truncate_4001 __P((__db_truncate_msg *, struct svc_req *));
+__db_cursor_reply *__db_db_cursor_4001 __P((__db_cursor_msg *, struct svc_req *));
+__db_join_reply *__db_db_join_4001 __P((__db_join_msg *, struct svc_req *));
+__dbc_close_reply *__db_dbc_close_4001 __P((__dbc_close_msg *, struct svc_req *));
+__dbc_count_reply *__db_dbc_count_4001 __P((__dbc_count_msg *, struct svc_req *));
+__dbc_del_reply *__db_dbc_del_4001 __P((__dbc_del_msg *, struct svc_req *));
+__dbc_dup_reply *__db_dbc_dup_4001 __P((__dbc_dup_msg *, struct svc_req *));
+__dbc_get_reply *__db_dbc_get_4001 __P((__dbc_get_msg *, struct svc_req *));
+__dbc_pget_reply *__db_dbc_pget_4001 __P((__dbc_pget_msg *, struct svc_req *));
+__dbc_put_reply *__db_dbc_put_4001 __P((__dbc_put_msg *, struct svc_req *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_rpc_server_ext_h_ */
diff --git a/libdb/dbinc_auto/tcl_ext.h b/libdb/dbinc_auto/tcl_ext.h
new file mode 100644
index 0000000..619ea4a
--- /dev/null
+++ b/libdb/dbinc_auto/tcl_ext.h
@@ -0,0 +1,82 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _tcl_ext_h_
+#define _tcl_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int bdb_HCommand __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+#if DB_DBM_HSEARCH != 0
+int bdb_NdbmOpen __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBM **));
+#endif
+#if DB_DBM_HSEARCH != 0
+int bdb_DbmCommand __P((Tcl_Interp *, int, Tcl_Obj * CONST*, int, DBM *));
+#endif
+int ndbm_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+void _DbInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+int db_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+int dbc_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+int env_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+int tcl_EnvRemove __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *));
+int tcl_EnvVerbose __P((Tcl_Interp *, DB_ENV *, Tcl_Obj *, Tcl_Obj *));
+int tcl_EnvAttr __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_EnvTest __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+DBTCL_INFO *_NewInfo __P((Tcl_Interp *, void *, char *, enum INFOTYPE));
+void *_NameToPtr __P((CONST char *));
+DBTCL_INFO *_PtrToInfo __P((CONST void *));
+DBTCL_INFO *_NameToInfo __P((CONST char *));
+void _SetInfoData __P((DBTCL_INFO *, void *));
+void _DeleteInfo __P((DBTCL_INFO *));
+int _SetListElem __P((Tcl_Interp *, Tcl_Obj *, void *, int, void *, int));
+int _SetListElemInt __P((Tcl_Interp *, Tcl_Obj *, void *, int));
+int _SetListRecnoElem __P((Tcl_Interp *, Tcl_Obj *, db_recno_t, u_char *, int));
+int _Set3DBTList __P((Tcl_Interp *, Tcl_Obj *, DBT *, int, DBT *, int, DBT *));
+int _SetMultiList __P((Tcl_Interp *, Tcl_Obj *, DBT *, DBT*, int, int));
+int _GetGlobPrefix __P((char *, char **));
+int _ReturnSetup __P((Tcl_Interp *, int, int, char *));
+int _ErrorSetup __P((Tcl_Interp *, int, char *));
+void _ErrorFunc __P((CONST char *, char *));
+int _GetLsn __P((Tcl_Interp *, Tcl_Obj *, DB_LSN *));
+int _GetUInt32 __P((Tcl_Interp *, Tcl_Obj *, u_int32_t *));
+Tcl_Obj *_GetFlagsList __P((Tcl_Interp *, u_int32_t, void (*)(u_int32_t, void *, void (*)(u_int32_t, const FN *, void *))));
+void _debug_check __P((void));
+int _CopyObjBytes __P((Tcl_Interp *, Tcl_Obj *obj, void **, u_int32_t *, int *));
+int tcl_LockDetect __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LockGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LockStat __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LockTimeout __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LockVec __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LogArchive __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LogCompare __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+int tcl_LogFile __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LogFlush __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LogGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LogPut __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LogStat __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int logc_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+void _MpInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+int tcl_MpSync __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_MpTrickle __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_Mp __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *));
+int tcl_MpStat __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_RepElect __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+int tcl_RepFlush __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+int tcl_RepLimit __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+int tcl_RepRequest __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+int tcl_RepStart __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+int tcl_RepProcessMessage __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+int tcl_RepStat __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+void _TxnInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+int tcl_TxnCheckpoint __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_Txn __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *));
+int tcl_TxnStat __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_TxnTimeout __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_TxnRecover __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *));
+int bdb_RandCommand __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+int tcl_Mutex __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_tcl_ext_h_ */
diff --git a/libdb/dbinc_auto/txn_auto.h b/libdb/dbinc_auto/txn_auto.h
new file mode 100644
index 0000000..ac841ba
--- /dev/null
+++ b/libdb/dbinc_auto/txn_auto.h
@@ -0,0 +1,55 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef __txn_AUTO_H
+#define __txn_AUTO_H
+#define DB___txn_regop 10
+typedef struct ___txn_regop_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t timestamp;
+} __txn_regop_args;
+
+#define DB___txn_ckp 11
+typedef struct ___txn_ckp_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DB_LSN ckp_lsn;
+ DB_LSN last_ckp;
+ int32_t timestamp;
+} __txn_ckp_args;
+
+#define DB___txn_child 12
+typedef struct ___txn_child_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t child;
+ DB_LSN c_lsn;
+} __txn_child_args;
+
+#define DB___txn_xa_regop 13
+typedef struct ___txn_xa_regop_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ DBT xid;
+ int32_t formatID;
+ u_int32_t gtrid;
+ u_int32_t bqual;
+ DB_LSN begin_lsn;
+} __txn_xa_regop_args;
+
+#define DB___txn_recycle 14
+typedef struct ___txn_recycle_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t min;
+ u_int32_t max;
+} __txn_recycle_args;
+
+#endif
diff --git a/libdb/dbinc_auto/txn_ext.h b/libdb/dbinc_auto/txn_ext.h
new file mode 100644
index 0000000..a53338d
--- /dev/null
+++ b/libdb/dbinc_auto/txn_ext.h
@@ -0,0 +1,71 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _txn_ext_h_
+#define _txn_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __txn_begin __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
+int __txn_xa_begin __P((DB_ENV *, DB_TXN *));
+int __txn_compensate_begin __P((DB_ENV *, DB_TXN **txnp));
+int __txn_commit __P((DB_TXN *, u_int32_t));
+int __txn_abort __P((DB_TXN *));
+int __txn_discard __P((DB_TXN *, u_int32_t flags));
+int __txn_prepare __P((DB_TXN *, u_int8_t *));
+u_int32_t __txn_id __P((DB_TXN *));
+int __txn_checkpoint __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t));
+int __txn_getckp __P((DB_ENV *, DB_LSN *));
+int __txn_activekids __P((DB_ENV *, u_int32_t, DB_TXN *));
+int __txn_force_abort __P((DB_ENV *, u_int8_t *));
+int __txn_preclose __P((DB_ENV *));
+int __txn_reset __P((DB_ENV *));
+void __txn_updateckp __P((DB_ENV *, DB_LSN *));
+int __txn_regop_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, int32_t));
+int __txn_regop_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_regop_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_regop_read __P((DB_ENV *, void *, __txn_regop_args **));
+int __txn_ckp_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, DB_LSN *, int32_t));
+int __txn_ckp_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_ckp_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_ckp_read __P((DB_ENV *, void *, __txn_ckp_args **));
+int __txn_child_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, DB_LSN *));
+int __txn_child_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_child_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_child_read __P((DB_ENV *, void *, __txn_child_args **));
+int __txn_xa_regop_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, const DBT *, int32_t, u_int32_t, u_int32_t, DB_LSN *));
+int __txn_xa_regop_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_xa_regop_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_xa_regop_read __P((DB_ENV *, void *, __txn_xa_regop_args **));
+int __txn_recycle_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, u_int32_t));
+int __txn_recycle_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_recycle_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_recycle_read __P((DB_ENV *, void *, __txn_recycle_args **));
+int __txn_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __txn_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __txn_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+void __txn_dbenv_create __P((DB_ENV *));
+int __txn_regop_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_xa_regop_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_ckp_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_child_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_restore_txn __P((DB_ENV *, DB_LSN *, __txn_xa_regop_args *));
+int __txn_recycle_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+void __txn_continue __P((DB_ENV *, DB_TXN *, TXN_DETAIL *, size_t));
+int __txn_map_gid __P((DB_ENV *, u_int8_t *, TXN_DETAIL **, size_t *));
+int __txn_recover __P((DB_ENV *, DB_PREPLIST *, long, long *, u_int32_t));
+int __txn_get_prepared __P((DB_ENV *, XID *, DB_PREPLIST *, long, long *, u_int32_t));
+int __txn_open __P((DB_ENV *));
+int __txn_dbenv_refresh __P((DB_ENV *));
+void __txn_region_destroy __P((DB_ENV *, REGINFO *));
+int __txn_id_set __P((DB_ENV *, u_int32_t, u_int32_t));
+int __txn_stat __P((DB_ENV *, DB_TXN_STAT **, u_int32_t));
+int __txn_remevent __P((DB_ENV *, DB_TXN *, const char *, u_int8_t*));
+int __txn_lockevent __P((DB_ENV *, DB_TXN *, DB *, DB_LOCK *, u_int32_t));
+void __txn_remlock __P((DB_ENV *, DB_TXN *, DB_LOCK *, u_int32_t));
+int __txn_doevents __P((DB_ENV *, DB_TXN *, int, int));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_txn_ext_h_ */
diff --git a/libdb/dbinc_auto/xa_ext.h b/libdb/dbinc_auto/xa_ext.h
new file mode 100644
index 0000000..e4fc989
--- /dev/null
+++ b/libdb/dbinc_auto/xa_ext.h
@@ -0,0 +1,20 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _xa_ext_h_
+#define _xa_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __db_xa_create __P((DB *));
+int __db_rmid_to_env __P((int rmid, DB_ENV **envp));
+int __db_xid_to_txn __P((DB_ENV *, XID *, size_t *));
+int __db_map_rmid __P((int, DB_ENV *));
+int __db_unmap_rmid __P((int));
+int __db_map_xid __P((DB_ENV *, XID *, size_t));
+void __db_unmap_xid __P((DB_ENV *, XID *, size_t));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_xa_ext_h_ */
diff --git a/libdb/dbm/dbm.c b/libdb/dbm/dbm.c
new file mode 100644
index 0000000..98ba086
--- /dev/null
+++ b/libdb/dbm/dbm.c
@@ -0,0 +1,522 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <string.h>
+#endif
+
+#define DB_DBM_HSEARCH 1
+#include "db_int.h"
+
+/*
+ *
+ * This package provides dbm and ndbm compatible interfaces to DB.
+ *
+ * EXTERN: #if DB_DBM_HSEARCH != 0
+ *
+ * EXTERN: int __db_ndbm_clearerr __P((DBM *));
+ * EXTERN: void __db_ndbm_close __P((DBM *));
+ * EXTERN: int __db_ndbm_delete __P((DBM *, datum));
+ * EXTERN: int __db_ndbm_dirfno __P((DBM *));
+ * EXTERN: int __db_ndbm_error __P((DBM *));
+ * EXTERN: datum __db_ndbm_fetch __P((DBM *, datum));
+ * EXTERN: datum __db_ndbm_firstkey __P((DBM *));
+ * EXTERN: datum __db_ndbm_nextkey __P((DBM *));
+ * EXTERN: DBM *__db_ndbm_open __P((const char *, int, int));
+ * EXTERN: int __db_ndbm_pagfno __P((DBM *));
+ * EXTERN: int __db_ndbm_rdonly __P((DBM *));
+ * EXTERN: int __db_ndbm_store __P((DBM *, datum, datum, int));
+ *
+ * EXTERN: int __db_dbm_close __P((void));
+ * EXTERN: int __db_dbm_dbrdonly __P((void));
+ * EXTERN: int __db_dbm_delete __P((datum));
+ * EXTERN: int __db_dbm_dirf __P((void));
+ * EXTERN: datum __db_dbm_fetch __P((datum));
+ * EXTERN: datum __db_dbm_firstkey __P((void));
+ * EXTERN: int __db_dbm_init __P((char *));
+ * EXTERN: datum __db_dbm_nextkey __P((datum));
+ * EXTERN: int __db_dbm_pagf __P((void));
+ * EXTERN: int __db_dbm_store __P((datum, datum));
+ *
+ * EXTERN: #endif
+ */
+
+/*
+ * The DBM routines, which call the NDBM routines.
+ */
+static DBM *__cur_db;
+
+static void __db_no_open __P((void));
+
+int
+__db_dbm_init(file)
+ char *file;
+{
+ if (__cur_db != NULL)
+ (void)dbm_close(__cur_db);
+ if ((__cur_db =
+ dbm_open(file, O_CREAT | O_RDWR, __db_omode("rw----"))) != NULL)
+ return (0);
+ if ((__cur_db = dbm_open(file, O_RDONLY, 0)) != NULL)
+ return (0);
+ return (-1);
+}
+
+int
+__db_dbm_close()
+{
+ if (__cur_db != NULL) {
+ dbm_close(__cur_db);
+ __cur_db = NULL;
+ }
+ return (0);
+}
+
+datum
+__db_dbm_fetch(key)
+ datum key;
+{
+ datum item;
+
+ if (__cur_db == NULL) {
+ __db_no_open();
+ item.dptr = NULL;
+ item.dsize = 0;
+ return (item);
+ }
+ return (dbm_fetch(__cur_db, key));
+}
+
+datum
+__db_dbm_firstkey()
+{
+ datum item;
+
+ if (__cur_db == NULL) {
+ __db_no_open();
+ item.dptr = NULL;
+ item.dsize = 0;
+ return (item);
+ }
+ return (dbm_firstkey(__cur_db));
+}
+
+datum
+__db_dbm_nextkey(key)
+ datum key;
+{
+ datum item;
+
+ COMPQUIET(key.dsize, 0);
+
+ if (__cur_db == NULL) {
+ __db_no_open();
+ item.dptr = NULL;
+ item.dsize = 0;
+ return (item);
+ }
+ return (dbm_nextkey(__cur_db));
+}
+
+int
+__db_dbm_delete(key)
+ datum key;
+{
+ if (__cur_db == NULL) {
+ __db_no_open();
+ return (-1);
+ }
+ return (dbm_delete(__cur_db, key));
+}
+
+int
+__db_dbm_store(key, dat)
+ datum key, dat;
+{
+ if (__cur_db == NULL) {
+ __db_no_open();
+ return (-1);
+ }
+ return (dbm_store(__cur_db, key, dat, DBM_REPLACE));
+}
+
+static void
+__db_no_open()
+{
+ (void)fprintf(stderr, "dbm: no open database.\n");
+}
+
+/*
+ * This package provides dbm and ndbm compatible interfaces to DB.
+ *
+ * The NDBM routines, which call the DB routines.
+ */
+/*
+ * Returns:
+ * *DBM on success
+ * NULL on failure
+ */
+DBM *
+__db_ndbm_open(file, oflags, mode)
+ const char *file;
+ int oflags, mode;
+{
+ DB *dbp;
+ DBC *dbc;
+ int ret;
+ char path[MAXPATHLEN];
+
+ /*
+ * !!!
+ * Don't use sprintf(3)/snprintf(3) -- the former is dangerous, and
+ * the latter isn't standard, and we're manipulating strings handed
+ * us by the application.
+ */
+ if (strlen(file) + strlen(DBM_SUFFIX) + 1 > sizeof(path)) {
+ __os_set_errno(ENAMETOOLONG);
+ return (NULL);
+ }
+ (void)strcpy(path, file);
+ (void)strcat(path, DBM_SUFFIX);
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ __os_set_errno(ret);
+ return (NULL);
+ }
+
+ /*
+ * !!!
+ * The historic ndbm library corrected for opening O_WRONLY.
+ */
+ if (oflags & O_WRONLY) {
+ oflags &= ~O_WRONLY;
+ oflags |= O_RDWR;
+ }
+
+ if ((ret = dbp->set_pagesize(dbp, 4096)) != 0 ||
+ (ret = dbp->set_h_ffactor(dbp, 40)) != 0 ||
+ (ret = dbp->set_h_nelem(dbp, 1)) != 0 ||
+ (ret = (*dbp->open)(dbp, NULL,
+ path, NULL, DB_HASH, __db_oflags(oflags), mode)) != 0) {
+ __os_set_errno(ret);
+ return (NULL);
+ }
+
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0) {
+ (void)dbp->close(dbp, 0);
+ __os_set_errno(ret);
+ return (NULL);
+ }
+
+ return ((DBM *)dbc);
+}
+
+/*
+ * Returns:
+ * Nothing.
+ */
+void
+__db_ndbm_close(dbm)
+ DBM *dbm;
+{
+ DBC *dbc;
+
+ dbc = (DBC *)dbm;
+
+ (void)dbc->dbp->close(dbc->dbp, 0);
+}
+
+/*
+ * Returns:
+ * DATUM on success
+ * NULL on failure
+ */
+datum
+__db_ndbm_fetch(dbm, key)
+ DBM *dbm;
+ datum key;
+{
+ DBC *dbc;
+ DBT _key, _data;
+ datum data;
+ int ret;
+
+ dbc = (DBC *)dbm;
+
+ memset(&_key, 0, sizeof(DBT));
+ memset(&_data, 0, sizeof(DBT));
+ _key.size = key.dsize;
+ _key.data = key.dptr;
+
+ /*
+ * Note that we can't simply use the dbc we have to do a c_get/SET,
+ * because that cursor is the one used for sequential iteration and
+ * it has to remain stable in the face of intervening gets and puts.
+ */
+ if ((ret = dbc->dbp->get(dbc->dbp, NULL, &_key, &_data, 0)) == 0) {
+ data.dptr = _data.data;
+ data.dsize = _data.size;
+ } else {
+ data.dptr = NULL;
+ data.dsize = 0;
+ if (ret == DB_NOTFOUND)
+ __os_set_errno(ENOENT);
+ else {
+ __os_set_errno(ret);
+ F_SET(dbc->dbp, DB_AM_DBM_ERROR);
+ }
+ }
+ return (data);
+}
+
+/*
+ * Returns:
+ * DATUM on success
+ * NULL on failure
+ */
+datum
+__db_ndbm_firstkey(dbm)
+ DBM *dbm;
+{
+ DBC *dbc;
+ DBT _key, _data;
+ datum key;
+ int ret;
+
+ dbc = (DBC *)dbm;
+
+ memset(&_key, 0, sizeof(DBT));
+ memset(&_data, 0, sizeof(DBT));
+
+ if ((ret = dbc->c_get(dbc, &_key, &_data, DB_FIRST)) == 0) {
+ key.dptr = _key.data;
+ key.dsize = _key.size;
+ } else {
+ key.dptr = NULL;
+ key.dsize = 0;
+ if (ret == DB_NOTFOUND)
+ __os_set_errno(ENOENT);
+ else {
+ __os_set_errno(ret);
+ F_SET(dbc->dbp, DB_AM_DBM_ERROR);
+ }
+ }
+ return (key);
+}
+
+/*
+ * Returns:
+ * DATUM on success
+ * NULL on failure
+ */
+datum
+__db_ndbm_nextkey(dbm)
+ DBM *dbm;
+{
+ DBC *dbc;
+ DBT _key, _data;
+ datum key;
+ int ret;
+
+ dbc = (DBC *)dbm;
+
+ memset(&_key, 0, sizeof(DBT));
+ memset(&_data, 0, sizeof(DBT));
+
+ if ((ret = dbc->c_get(dbc, &_key, &_data, DB_NEXT)) == 0) {
+ key.dptr = _key.data;
+ key.dsize = _key.size;
+ } else {
+ key.dptr = NULL;
+ key.dsize = 0;
+ if (ret == DB_NOTFOUND)
+ __os_set_errno(ENOENT);
+ else {
+ __os_set_errno(ret);
+ F_SET(dbc->dbp, DB_AM_DBM_ERROR);
+ }
+ }
+ return (key);
+}
+
+/*
+ * Returns:
+ * 0 on success
+ * <0 failure
+ */
+int
+__db_ndbm_delete(dbm, key)
+ DBM *dbm;
+ datum key;
+{
+ DBC *dbc;
+ DBT _key;
+ int ret;
+
+ dbc = (DBC *)dbm;
+
+ memset(&_key, 0, sizeof(DBT));
+ _key.data = key.dptr;
+ _key.size = key.dsize;
+
+ if ((ret = dbc->dbp->del(dbc->dbp, NULL, &_key, 0)) == 0)
+ return (0);
+
+ if (ret == DB_NOTFOUND)
+ __os_set_errno(ENOENT);
+ else {
+ __os_set_errno(ret);
+ F_SET(dbc->dbp, DB_AM_DBM_ERROR);
+ }
+ return (-1);
+}
+
+/*
+ * Returns:
+ * 0 on success
+ * <0 failure
+ * 1 if DBM_INSERT and entry exists
+ */
+int
+__db_ndbm_store(dbm, key, data, flags)
+ DBM *dbm;
+ datum key, data;
+ int flags;
+{
+ DBC *dbc;
+ DBT _key, _data;
+ int ret;
+
+ dbc = (DBC *)dbm;
+
+ memset(&_key, 0, sizeof(DBT));
+ _key.data = key.dptr;
+ _key.size = key.dsize;
+
+ memset(&_data, 0, sizeof(DBT));
+ _data.data = data.dptr;
+ _data.size = data.dsize;
+
+ if ((ret = dbc->dbp->put(dbc->dbp, NULL,
+ &_key, &_data, flags == DBM_INSERT ? DB_NOOVERWRITE : 0)) == 0)
+ return (0);
+
+ if (ret == DB_KEYEXIST)
+ return (1);
+
+ __os_set_errno(ret);
+ F_SET(dbc->dbp, DB_AM_DBM_ERROR);
+ return (-1);
+}
+
+int
+__db_ndbm_error(dbm)
+ DBM *dbm;
+{
+ DBC *dbc;
+
+ dbc = (DBC *)dbm;
+
+ return (F_ISSET(dbc->dbp, DB_AM_DBM_ERROR));
+}
+
+int
+__db_ndbm_clearerr(dbm)
+ DBM *dbm;
+{
+ DBC *dbc;
+
+ dbc = (DBC *)dbm;
+
+ F_CLR(dbc->dbp, DB_AM_DBM_ERROR);
+ return (0);
+}
+
+/*
+ * Returns:
+ * 1 if read-only
+ * 0 if not read-only
+ */
+int
+__db_ndbm_rdonly(dbm)
+ DBM *dbm;
+{
+ DBC *dbc;
+
+ dbc = (DBC *)dbm;
+
+ return (F_ISSET(dbc->dbp, DB_AM_RDONLY) ? 1 : 0);
+}
+
+/*
+ * XXX
+ * We only have a single file descriptor that we can return, not two. Return
+ * the same one for both files. Hopefully, the user is using it for locking
+ * and picked one to use at random.
+ */
+int
+__db_ndbm_dirfno(dbm)
+ DBM *dbm;
+{
+ return (dbm_pagfno(dbm));
+}
+
+int
+__db_ndbm_pagfno(dbm)
+ DBM *dbm;
+{
+ DBC *dbc;
+ int fd;
+
+ dbc = (DBC *)dbm;
+
+ (void)dbc->dbp->fd(dbc->dbp, &fd);
+ return (fd);
+}
diff --git a/libdb/dbreg/dbreg.c b/libdb/dbreg/dbreg.c
new file mode 100644
index 0000000..4346b61
--- /dev/null
+++ b/libdb/dbreg/dbreg.c
@@ -0,0 +1,450 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+/*
+ * The dbreg subsystem, as its name implies, registers database handles so
+ * that we can associate log messages with them without logging a filename
+ * or a full, unique DB ID. Instead, we assign each dbp an int32_t which is
+ * easy and cheap to log, and use this subsystem to map back and forth.
+ *
+ * Overview of how dbreg ids are managed:
+ *
+ * OPEN
+ * dbreg_setup (Creates FNAME struct.)
+ * dbreg_new_id (Assigns new ID to dbp and logs it. May be postponed
+ * until we attempt to log something else using that dbp, if the dbp
+ * was opened on a replication client.)
+ *
+ * CLOSE
+ * dbreg_close_id (Logs closure of dbp/revocation of ID.)
+ * dbreg_revoke_id (As name implies, revokes ID.)
+ * dbreg_teardown (Destroys FNAME.)
+ *
+ * RECOVERY
+ * dbreg_setup
+ * dbreg_assign_id (Assigns a particular ID we have in the log to a dbp.)
+ *
+ * sometimes: dbreg_revoke_id; dbreg_teardown
+ * other times: normal close path
+ *
+ * A note about locking:
+ *
+ * FNAME structures are referenced only by their corresponding dbp's
+ * until they have a valid id.
+ *
+ * Once they have a valid id, they must get linked into the log
+ * region list so they can get logged on checkpoints.
+ *
+ * An FNAME that may/does have a valid id must be accessed under
+ * protection of the fq_mutex, with the following exception:
+ *
+ * We don't want to have to grab the fq_mutex on every log
+ * record, and it should be safe not to do so when we're just
+ * looking at the id, because once allocated, the id should
+ * not change under a handle until the handle is closed.
+ *
+ * If a handle is closed during an attempt by another thread to
+ * log with it, well, the application doing the close deserves to
+ * go down in flames and a lot else is about to fail anyway.
+ *
+ * When in the course of logging we encounter an invalid id
+ * and go to allocate it lazily, we *do* need to check again
+ * after grabbing the mutex, because it's possible to race with
+ * another thread that has also decided that it needs to allocate
+ * a id lazily.
+ *
+ * See SR #5623 for further discussion of the new dbreg design.
+ */
+
+/*
+ * __dbreg_setup --
+ * Allocate and initialize an FNAME structure. The FNAME structures
+ * live in the log shared region and map one-to-one with open database handles.
+ * When the handle needs to be logged, the FNAME should have a valid fid
+ * allocated. If the handle currently isn't logged, it still has an FNAME
+ * entry. If we later discover that the handle needs to be logged, we can
+ * allocate a id for it later. (This happens when the handle is on a
+ * replication client that later becomes a master.)
+ *
+ * PUBLIC: int __dbreg_setup __P((DB *, const char *, u_int32_t));
+ */
+int
+__dbreg_setup(dbp, name, create_txnid)
+ DB *dbp;
+ const char *name;
+ u_int32_t create_txnid;
+{
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ FNAME *fnp;
+ int ret;
+ size_t len;
+ void *namep;
+
+ dbenv = dbp->dbenv;
+ dblp = dbenv->lg_handle;
+
+ fnp = NULL;
+ namep = NULL;
+
+ /* Allocate an FNAME and, if necessary, a buffer for the name itself. */
+ R_LOCK(dbenv, &dblp->reginfo);
+ if ((ret =
+ __db_shalloc(dblp->reginfo.addr, sizeof(FNAME), 0, &fnp)) != 0) {
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (ret);
+ }
+ memset(fnp, 0, sizeof(FNAME));
+ if (name != NULL) {
+ len = strlen(name) + 1;
+ if ((ret = __db_shalloc(dblp->reginfo.addr,
+ len, 0, &namep)) != 0) {
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (ret);
+ }
+ fnp->name_off = R_OFFSET(&dblp->reginfo, namep);
+ memcpy(namep, name, len);
+ } else
+ fnp->name_off = INVALID_ROFF;
+
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ /*
+ * Fill in all the remaining info that we'll need later to register
+ * the file, if we use it for logging.
+ */
+ fnp->id = DB_LOGFILEID_INVALID;
+ fnp->s_type = dbp->type;
+ memcpy(fnp->ufid, dbp->fileid, DB_FILE_ID_LEN);
+ fnp->meta_pgno = dbp->meta_pgno;
+ fnp->create_txnid = create_txnid;
+
+ dbp->log_filename = fnp;
+
+ return (0);
+}
+
+/*
+ * __dbreg_teardown --
+ * Destroy a DB handle's FNAME struct.
+ *
+ * PUBLIC: int __dbreg_teardown __P((DB *));
+ */
+int
+__dbreg_teardown(dbp)
+ DB *dbp;
+{
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ FNAME *fnp;
+
+ dbenv = dbp->dbenv;
+ dblp = dbenv->lg_handle;
+ fnp = dbp->log_filename;
+
+ /*
+ * We may not have an FNAME if we were never opened. This is not an
+ * error.
+ */
+ if (fnp == NULL)
+ return (0);
+
+ DB_ASSERT(fnp->id == DB_LOGFILEID_INVALID);
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ if (fnp->name_off != INVALID_ROFF)
+ __db_shalloc_free(dblp->reginfo.addr,
+ R_ADDR(&dblp->reginfo, fnp->name_off));
+ __db_shalloc_free(dblp->reginfo.addr, fnp);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ dbp->log_filename = NULL;
+
+ return (0);
+}
+
+/*
+ * __dbreg_new_id --
+ * Assign an unused dbreg id to this database handle.
+ *
+ * PUBLIC: int __dbreg_new_id __P((DB *, DB_TXN *));
+ */
+int
+__dbreg_new_id(dbp, txn)
+ DB *dbp;
+ DB_TXN *txn;
+{
+ DBT fid_dbt, r_name;
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ DB_LSN unused;
+ FNAME *fnp;
+ LOG *lp;
+ int32_t id;
+ int ret;
+
+ dbenv = dbp->dbenv;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ fnp = dbp->log_filename;
+
+ /* The fq_mutex protects the FNAME list and id management. */
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+
+ /*
+ * It's possible that after deciding we needed to call this function,
+ * someone else allocated an ID before we grabbed the lock. Check
+ * to make sure there was no race and we have something useful to do.
+ */
+ if (fnp->id != DB_LOGFILEID_INVALID) {
+ MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+ return (0);
+ }
+
+ /* Get an unused ID from the free list. */
+ if ((ret = __dbreg_pop_id(dbenv, &id)) != 0)
+ goto err;
+
+ /* If no ID was found, allocate a new one. */
+ if (id == DB_LOGFILEID_INVALID)
+ id = lp->fid_max++;
+
+ fnp->id = id;
+
+ /* Hook the FNAME into the list of open files. */
+ SH_TAILQ_INSERT_HEAD(&lp->fq, fnp, q, __fname);
+
+ /*
+ * Log the registry. We should only request a new ID in situations
+ * where logging is reasonable.
+ */
+ DB_ASSERT(!F_ISSET(dbp, DB_AM_RECOVER));
+
+ memset(&fid_dbt, 0, sizeof(fid_dbt));
+ memset(&r_name, 0, sizeof(r_name));
+ if (fnp->name_off != INVALID_ROFF) {
+ r_name.data = R_ADDR(&dblp->reginfo, fnp->name_off);
+ r_name.size = (u_int32_t)strlen((char *)r_name.data) + 1;
+ }
+ fid_dbt.data = dbp->fileid;
+ fid_dbt.size = DB_FILE_ID_LEN;
+ if ((ret = __dbreg_register_log(dbenv, txn, &unused, 0, LOG_OPEN,
+ r_name.size == 0 ? NULL : &r_name, &fid_dbt, id, fnp->s_type,
+ fnp->meta_pgno, fnp->create_txnid)) != 0)
+ goto err;
+
+ DB_ASSERT(dbp->type == fnp->s_type);
+ DB_ASSERT(dbp->meta_pgno == fnp->meta_pgno);
+
+ if ((ret = __dbreg_add_dbentry(dbenv, dblp, dbp, id)) != 0)
+ goto err;
+
+err: MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+ return (ret);
+}
+
+/*
+ * __dbreg_assign_id --
+ * Assign a particular dbreg id to this database handle.
+ *
+ * PUBLIC: int __dbreg_assign_id __P((DB *, int32_t));
+ */
+int
+__dbreg_assign_id(dbp, id)
+ DB *dbp;
+ int32_t id;
+{
+ DB *close_dbp;
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ FNAME *close_fnp, *fnp;
+ LOG *lp;
+ int ret;
+
+ dbenv = dbp->dbenv;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ fnp = dbp->log_filename;
+
+ close_dbp = NULL;
+ close_fnp = NULL;
+
+ /* The fq_mutex protects the FNAME list and id management. */
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+
+ /* We should only call this on DB handles that have no ID. */
+ DB_ASSERT(fnp->id == DB_LOGFILEID_INVALID);
+
+ /*
+ * Make sure there isn't already a file open with this ID. There can
+ * be in recovery, if we're recovering across a point where an ID got
+ * reused.
+ */
+ if (__dbreg_id_to_fname(dblp, id, 1, &close_fnp) == 0) {
+ /*
+ * We want to save off any dbp we have open with this id.
+ * We can't safely close it now, because we hold the fq_mutex,
+ * but we should be able to rely on it being open in this
+ * process, and we're running recovery, so no other thread
+ * should muck with it if we just put off closing it until
+ * we're ready to return.
+ *
+ * Once we have the dbp, revoke its id; we're about to
+ * reuse it.
+ */
+ ret = __dbreg_id_to_db_int(dbenv, NULL, &close_dbp, id, 0, 0);
+ if (ret == ENOENT) {
+ ret = 0;
+ goto cont;
+ } else if (ret != 0)
+ goto err;
+
+ if ((ret = __dbreg_revoke_id(close_dbp, 1)) != 0)
+ goto err;
+ }
+
+ /*
+ * Remove this ID from the free list, if it's there, and make sure
+ * we don't allocate it anew.
+ */
+cont: if ((ret = __dbreg_pluck_id(dbenv, id)) != 0)
+ goto err;
+ if (id >= lp->fid_max)
+ lp->fid_max = id + 1;
+
+ /* Now go ahead and assign the id to our dbp. */
+ fnp->id = id;
+ SH_TAILQ_INSERT_HEAD(&lp->fq, fnp, q, __fname);
+
+ if ((ret = __dbreg_add_dbentry(dbenv, dblp, dbp, id)) != 0)
+ goto err;
+
+err: MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+
+ /* There's nothing useful that our caller can do if this close fails. */
+ if (close_dbp != NULL)
+ (void)close_dbp->close(close_dbp, DB_NOSYNC);
+
+ return (ret);
+}
+
+/*
+ * __dbreg_revoke_id --
+ * Take a log id away from a dbp, in preparation for closing it,
+ * but without logging the close.
+ *
+ * PUBLIC: int __dbreg_revoke_id __P((DB *, int));
+ */
+int
+__dbreg_revoke_id(dbp, have_lock)
+ DB *dbp;
+ int have_lock;
+{
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ FNAME *fnp;
+ LOG *lp;
+ int32_t id;
+ int ret;
+
+ dbenv = dbp->dbenv;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ fnp = dbp->log_filename;
+
+ /* If we lack an ID, this is a null-op. */
+ if (fnp == NULL || fnp->id == DB_LOGFILEID_INVALID)
+ return (0);
+
+ if (!have_lock)
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+
+ id = fnp->id;
+ fnp->id = DB_LOGFILEID_INVALID;
+
+ /* Remove the FNAME from the list of open files. */
+ SH_TAILQ_REMOVE(&lp->fq, fnp, q, __fname);
+
+ /* Remove this id from the dbentry table. */
+ __dbreg_rem_dbentry(dblp, id);
+
+ /* Push this id onto the free list. */
+ ret = __dbreg_push_id(dbenv, id);
+
+ if (!have_lock)
+ MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+ return (ret);
+}
+
+/*
+ * __dbreg_close_id --
+ * Take a dbreg id away from a dbp that we're closing, and log
+ * the unregistry.
+ *
+ * PUBLIC: int __dbreg_close_id __P((DB *, DB_TXN *));
+ */
+int
+__dbreg_close_id(dbp, txn)
+ DB *dbp;
+ DB_TXN *txn;
+{
+ DBT fid_dbt, r_name, *dbtp;
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ DB_LSN r_unused;
+ FNAME *fnp;
+ LOG *lp;
+ int ret;
+
+ dbenv = dbp->dbenv;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ fnp = dbp->log_filename;
+
+ /* If we lack an ID, this is a null-op. */
+ if (fnp == NULL || fnp->id == DB_LOGFILEID_INVALID)
+ return (0);
+
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+
+ if (fnp->name_off == INVALID_ROFF)
+ dbtp = NULL;
+ else {
+ memset(&r_name, 0, sizeof(r_name));
+ r_name.data = R_ADDR(&dblp->reginfo, fnp->name_off);
+ r_name.size =
+ (u_int32_t)strlen((char *)r_name.data) + 1;
+ dbtp = &r_name;
+ }
+ memset(&fid_dbt, 0, sizeof(fid_dbt));
+ fid_dbt.data = fnp->ufid;
+ fid_dbt.size = DB_FILE_ID_LEN;
+ if ((ret = __dbreg_register_log(dbenv, txn,
+ &r_unused, 0, LOG_CLOSE, dbtp, &fid_dbt, fnp->id,
+ fnp->s_type, fnp->meta_pgno, TXN_INVALID)) != 0)
+ goto err;
+
+ ret = __dbreg_revoke_id(dbp, 1);
+
+err: MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+ return (ret);
+}
diff --git a/libdb/dbreg/dbreg.src b/libdb/dbreg/dbreg.src
new file mode 100644
index 0000000..a057593
--- /dev/null
+++ b/libdb/dbreg/dbreg.src
@@ -0,0 +1,49 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+PREFIX __dbreg
+DBPRIVATE
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/txn.h"
+INCLUDE
+
+/*
+ * Used for registering name/id translations at open or close.
+ * opcode: register or unregister
+ * name: file name
+ * fileid: unique file id
+ * ftype: file type
+ * ftype: database type
+ * id: transaction id of the subtransaction that created the fs object
+ */
+BEGIN register 2
+ARG opcode u_int32_t lu
+DBT name DBT s
+DBT uid DBT s
+ARG fileid int32_t ld
+ARG ftype DBTYPE lx
+ARG meta_pgno db_pgno_t lu
+ARG id u_int32_t lx
+END
diff --git a/libdb/dbreg/dbreg_auto.c b/libdb/dbreg/dbreg_auto.c
new file mode 100644
index 0000000..91eace3
--- /dev/null
+++ b/libdb/dbreg/dbreg_auto.c
@@ -0,0 +1,358 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+/*
+ * PUBLIC: int __dbreg_register_log __P((DB_ENV *, DB_TXN *,
+ * PUBLIC: DB_LSN *, u_int32_t, u_int32_t, const DBT *, const DBT *,
+ * PUBLIC: int32_t, DBTYPE, db_pgno_t, u_int32_t));
+ */
+int
+__dbreg_register_log(dbenv, txnid, ret_lsnp, flags,
+ opcode, name, uid, fileid, ftype, meta_pgno,
+ id)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t opcode;
+ const DBT *name;
+ const DBT *uid;
+ int32_t fileid;
+ DBTYPE ftype;
+ db_pgno_t meta_pgno;
+ u_int32_t id;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB___dbreg_register;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t) + (name == NULL ? 0 : name->size)
+ + sizeof(u_int32_t) + (uid == NULL ? 0 : uid->size)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ uinttmp = (u_int32_t)opcode;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (name == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &name->size, sizeof(name->size));
+ bp += sizeof(name->size);
+ memcpy(bp, name->data, name->size);
+ bp += name->size;
+ }
+
+ if (uid == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &uid->size, sizeof(uid->size));
+ bp += sizeof(uid->size);
+ memcpy(bp, uid->data, uid->size);
+ bp += uid->size;
+ }
+
+ uinttmp = (u_int32_t)fileid;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)ftype;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)meta_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__dbreg_register_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbreg_register_getpgnos __P((DB_ENV *, DBT *,
+ * PUBLIC: DB_LSN *, db_recops, void *));
+ */
+int
+__dbreg_register_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __dbreg_register_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__dbreg_register_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __dbreg_register_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __dbreg_register_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__dbreg_register: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\topcode: %lu\n", (u_long)argp->opcode);
+ (void)printf("\tname: ");
+ for (i = 0; i < argp->name.size; i++) {
+ ch = ((u_int8_t *)argp->name.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tuid: ");
+ for (i = 0; i < argp->uid.size; i++) {
+ ch = ((u_int8_t *)argp->uid.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tftype: 0x%lx\n", (u_long)argp->ftype);
+ (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno);
+ (void)printf("\tid: 0x%lx\n", (u_long)argp->id);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __dbreg_register_read __P((DB_ENV *, void *,
+ * PUBLIC: __dbreg_register_args **));
+ */
+int
+__dbreg_register_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __dbreg_register_args **argpp;
+{
+ __dbreg_register_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__dbreg_register_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->opcode = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memset(&argp->name, 0, sizeof(argp->name));
+ memcpy(&argp->name.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->name.data = bp;
+ bp += argp->name.size;
+
+ memset(&argp->uid, 0, sizeof(argp->uid));
+ memcpy(&argp->uid.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->uid.data = bp;
+ bp += argp->uid.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->ftype = (DBTYPE)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->meta_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->id = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __dbreg_init_print __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__dbreg_init_print(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __dbreg_register_print, DB___dbreg_register)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __dbreg_init_getpgnos __P((DB_ENV *,
+ * PUBLIC: int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *),
+ * PUBLIC: size_t *));
+ */
+int
+__dbreg_init_getpgnos(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __dbreg_register_getpgnos, DB___dbreg_register)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __dbreg_init_recover __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__dbreg_init_recover(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __dbreg_register_recover, DB___dbreg_register)) != 0)
+ return (ret);
+ return (0);
+}
diff --git a/libdb/dbreg/dbreg_rec.c b/libdb/dbreg/dbreg_rec.c
new file mode 100644
index 0000000..5f19a9e
--- /dev/null
+++ b/libdb/dbreg/dbreg_rec.c
@@ -0,0 +1,362 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+static int __dbreg_open_file __P((DB_ENV *,
+ DB_TXN *, __dbreg_register_args *, void *));
+
+/*
+ * PUBLIC: int __dbreg_register_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__dbreg_register_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ DB_ENTRY *dbe;
+ DB_LOG *dblp;
+ DB *dbp;
+ __dbreg_register_args *argp;
+ int do_close, do_open, do_rem, ret, t_ret;
+
+ dblp = dbenv->lg_handle;
+ dbp = NULL;
+
+#ifdef DEBUG_RECOVER
+ REC_PRINT(__dbreg_register_print);
+#endif
+ do_open = do_close = 0;
+ if ((ret = __dbreg_register_read(dbenv, dbtp->data, &argp)) != 0)
+ goto out;
+
+ switch (argp->opcode) {
+ case LOG_OPEN:
+ if ((DB_REDO(op) ||
+ op == DB_TXN_OPENFILES || op == DB_TXN_POPENFILES))
+ do_open = 1;
+ else
+ do_close = 1;
+ break;
+
+ case LOG_CLOSE:
+ if (DB_UNDO(op))
+ do_open = 1;
+ else
+ do_close = 1;
+ break;
+ case LOG_RCLOSE:
+ /*
+ * LOG_RCLOSE was generated by recover because a file
+ * was left open. The POPENFILES pass, which is run
+ * to open files to abort prepared transactions,
+ * may not include the open for this file so we
+ * open it here. Note that a normal CLOSE is
+ * not legal before the prepared transaction is
+ * committed or aborted.
+ */
+ if (DB_UNDO(op) || op == DB_TXN_POPENFILES)
+ do_open = 1;
+ else
+ do_close = 1;
+ break;
+
+ case LOG_CHECKPOINT:
+ if (DB_UNDO(op) ||
+ op == DB_TXN_OPENFILES || op == DB_TXN_POPENFILES)
+ do_open = 1;
+ break;
+ }
+
+ if (do_open) {
+ /*
+ * We must open the db even if the meta page is not
+ * yet written as we may be creating subdatabase.
+ */
+ if (op == DB_TXN_OPENFILES && argp->opcode != LOG_CHECKPOINT)
+ F_SET(dblp, DBLOG_FORCE_OPEN);
+
+ /*
+ * During an abort or an open pass to recover prepared txns,
+ * we need to make sure that we use the same locker id on the
+ * open. We pass the txnid along to ensure this.
+ */
+ ret = __dbreg_open_file(dbenv,
+ op == DB_TXN_ABORT || op == DB_TXN_POPENFILES ?
+ argp->txnid : NULL, argp, info);
+ if (ret == ENOENT || ret == EINVAL) {
+ /*
+ * If this is an OPEN while rolling forward, it's
+ * possible that the file was recreated since last
+ * time we got here. In that case, we've got deleted
+ * set and probably shouldn't, so we need to check
+ * for that case and possibly retry.
+ */
+ if (op == DB_TXN_FORWARD_ROLL &&
+ argp->txnid != 0 &&
+ dblp->dbentry[argp->fileid].deleted) {
+ dblp->dbentry[argp->fileid].deleted = 0;
+ ret =
+ __dbreg_open_file(dbenv, NULL, argp, info);
+ }
+ ret = 0;
+ }
+ F_CLR(dblp, DBLOG_FORCE_OPEN);
+ }
+
+ if (do_close) {
+ /*
+ * If we are undoing an open, or redoing a close,
+ * then we need to close the file.
+ *
+ * If the file is deleted, then we can just ignore this close.
+ * Otherwise, we should usually have a valid dbp we should
+ * close or whose reference count should be decremented.
+ * However, if we shut down without closing a file, we may, in
+ * fact, not have the file open, and that's OK.
+ */
+ do_rem = 0;
+ MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+ if (argp->fileid < dblp->dbentry_cnt) {
+ /*
+ * Typically, closes should match an open which means
+ * that if this is a close, there should be a valid
+ * entry in the dbentry table when we get here,
+ * however there is an exception. If this is an
+ * OPENFILES pass, then we may have started from
+ * a log file other than the first, and the
+ * corresponding open appears in an earlier file.
+ * We can ignore that case, but all others are errors.
+ */
+ dbe = &dblp->dbentry[argp->fileid];
+ if (dbe->dbp == NULL && !dbe->deleted) {
+ /* No valid entry here. */
+ if ((argp->opcode != LOG_CLOSE &&
+ argp->opcode != LOG_RCLOSE) ||
+ (op != DB_TXN_OPENFILES &&
+ op !=DB_TXN_POPENFILES)) {
+ __db_err(dbenv,
+ "Improper file close at %lu/%lu",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset);
+ ret = EINVAL;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ goto done;
+ }
+
+ /* We have either an open entry or a deleted entry. */
+ if ((dbp = dbe->dbp) != NULL) {
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ (void)__dbreg_revoke_id(dbp, 0);
+
+ /*
+ * If we're a replication client, it's
+ * possible to get here with a dbp that
+ * the user opened, but which we later
+ * assigned a fileid to. Be sure that
+ * we only close dbps that we opened in
+ * the recovery code; they should have
+ * DB_AM_RECOVER set.
+ *
+ * The only exception is if we're aborting
+ * in a normal environment; then we might
+ * get here with a non-AM_RECOVER database.
+ */
+ if (F_ISSET(dbp, DB_AM_RECOVER) ||
+ op == DB_TXN_ABORT)
+ do_rem = 1;
+ } else if (dbe->deleted) {
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ __dbreg_rem_dbentry(dblp, argp->fileid);
+ }
+ } else
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ if (do_rem) {
+ /*
+ * If we are undoing a create we'd better discard
+ * any buffers from the memory pool.
+ */
+ if (dbp != NULL && dbp->mpf != NULL && argp->id != 0) {
+ if ((ret = dbp->mpf->close(dbp->mpf,
+ DB_MPOOL_DISCARD)) != 0)
+ goto out;
+ dbp->mpf = NULL;
+ }
+
+ /*
+ * During recovery, all files are closed. On an abort,
+ * we only close the file if we opened it during the
+ * abort (DB_AM_RECOVER set), otherwise we simply do
+ * a __db_refresh. For the close case, if remove or
+ * rename has closed the file, don't request a sync,
+ * because the NULL mpf would be a problem.
+ */
+ if (dbp != NULL) {
+ if (op == DB_TXN_ABORT &&
+ !F_ISSET(dbp, DB_AM_RECOVER))
+ t_ret =
+ __db_refresh(dbp, NULL, DB_NOSYNC);
+ else
+ t_ret = dbp->close(dbp, DB_NOSYNC);
+ if (t_ret != 0 && ret == 0)
+ ret = t_ret;
+ }
+ }
+ }
+done: if (ret == 0)
+ *lsnp = argp->prev_lsn;
+out: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * __dbreg_open_file --
+ * Called during log_register recovery. Make sure that we have an
+ * entry in the dbentry table for this ndx. Returns 0 on success,
+ * non-zero on error.
+ */
+static int
+__dbreg_open_file(dbenv, txn, argp, info)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ __dbreg_register_args *argp;
+ void *info;
+{
+ DB_ENTRY *dbe;
+ DB_LOG *lp;
+ DB *dbp;
+ u_int32_t id;
+
+ lp = (DB_LOG *)dbenv->lg_handle;
+ /*
+ * We never re-open temporary files. Temp files are only
+ * useful during aborts in which case the dbp was entered
+ * when the file was registered. During recovery, we treat
+ * temp files as properly deleted files, allowing the open to
+ * fail and not reporting any errors when recovery fails to
+ * get a valid dbp from __dbreg_id_to_db.
+ */
+ if (argp->name.size == 0) {
+ (void)__dbreg_add_dbentry(dbenv, lp, NULL, argp->fileid);
+ return (ENOENT);
+ }
+
+ /*
+ * When we're opening, we have to check that the name we are opening
+ * is what we expect. If it's not, then we close the old file and
+ * open the new one.
+ */
+ MUTEX_THREAD_LOCK(dbenv, lp->mutexp);
+ if (argp->fileid < lp->dbentry_cnt)
+ dbe = &lp->dbentry[argp->fileid];
+ else
+ dbe = NULL;
+
+ if (dbe != NULL) {
+ if (dbe->deleted) {
+ MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp);
+ return (ENOENT);
+ }
+ if ((dbp = dbe->dbp) != NULL) {
+ if (dbp->meta_pgno != argp->meta_pgno ||
+ memcmp(dbp->fileid,
+ argp->uid.data, DB_FILE_ID_LEN) != 0) {
+ MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp);
+ (void)__dbreg_revoke_id(dbp, 0);
+ if (F_ISSET(dbp, DB_AM_RECOVER))
+ dbp->close(dbp, DB_NOSYNC);
+ goto reopen;
+ }
+
+ /*
+ * We should only get here if we already have the
+ * dbp from an openfiles pass, in which case, what's
+ * here had better be the same dbp.
+ */
+ DB_ASSERT(dbe->dbp == dbp);
+ MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp);
+
+ /*
+ * This is a successful open. We need to record that
+ * in the txnlist so that we know how to handle the
+ * subtransaction that created the file system object.
+ */
+ if (argp->id != TXN_INVALID &&
+ __db_txnlist_update(dbenv, info,
+ argp->id, TXN_EXPECTED, NULL) == TXN_NOTFOUND)
+ (void)__db_txnlist_add(dbenv,
+ info, argp->id, TXN_EXPECTED, NULL);
+ return (0);
+ }
+ }
+
+ MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp);
+
+ /*
+ * We are about to pass a recovery txn pointer into the main library.
+ * We need to make sure that any accessed fields are set appropriately.
+ */
+reopen: if (txn != NULL) {
+ id = txn->txnid;
+ memset(txn, 0, sizeof(DB_TXN));
+ txn->txnid = id;
+ txn->mgrp = dbenv->tx_handle;
+ }
+
+ return (__dbreg_do_open(dbenv, txn, lp, argp->uid.data, argp->name.data,
+ argp->ftype, argp->fileid, argp->meta_pgno, info, argp->id));
+}
diff --git a/libdb/dbreg/dbreg_util.c b/libdb/dbreg/dbreg_util.c
new file mode 100644
index 0000000..810c3dd
--- /dev/null
+++ b/libdb/dbreg/dbreg_util.c
@@ -0,0 +1,797 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+static int __dbreg_check_master __P((DB_ENV *, u_int8_t *, char *));
+
+/*
+ * __dbreg_add_dbentry --
+ * Adds a DB entry to the dbreg DB entry table.
+ *
+ * PUBLIC: int __dbreg_add_dbentry __P((DB_ENV *, DB_LOG *, DB *, int32_t));
+ */
+int
+__dbreg_add_dbentry(dbenv, dblp, dbp, ndx)
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ DB *dbp;
+ int32_t ndx;
+{
+ int32_t i;
+ int ret;
+
+ ret = 0;
+
+ MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+
+ /*
+ * Check if we need to grow the table. Note, ndx is 0-based (the
+ * index into the DB entry table) an dbentry_cnt is 1-based, the
+ * number of available slots.
+ */
+ if (dblp->dbentry_cnt <= ndx) {
+ if ((ret = __os_realloc(dbenv,
+ (ndx + DB_GROW_SIZE) * sizeof(DB_ENTRY),
+ &dblp->dbentry)) != 0)
+ goto err;
+
+ /* Initialize the new entries. */
+ for (i = dblp->dbentry_cnt; i < ndx + DB_GROW_SIZE; i++) {
+ dblp->dbentry[i].dbp = NULL;
+ dblp->dbentry[i].deleted = 0;
+ }
+ dblp->dbentry_cnt = i;
+ }
+
+ DB_ASSERT(dblp->dbentry[ndx].dbp == NULL);
+ dblp->dbentry[ndx].deleted = dbp == NULL;
+ dblp->dbentry[ndx].dbp = dbp;
+
+err: MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ return (ret);
+}
+
+/*
+ * __dbreg_rem_dbentry
+ * Remove an entry from the DB entry table.
+ *
+ * PUBLIC: void __dbreg_rem_dbentry __P((DB_LOG *, int32_t));
+ */
+void
+__dbreg_rem_dbentry(dblp, ndx)
+ DB_LOG *dblp;
+ int32_t ndx;
+{
+ MUTEX_THREAD_LOCK(dblp->dbenv, dblp->mutexp);
+ dblp->dbentry[ndx].dbp = NULL;
+ dblp->dbentry[ndx].deleted = 0;
+ MUTEX_THREAD_UNLOCK(dblp->dbenv, dblp->mutexp);
+}
+
+/*
+ * __dbreg_open_files --
+ * Put a LOG_CHECKPOINT log record for each open database.
+ *
+ * PUBLIC: int __dbreg_open_files __P((DB_ENV *));
+ */
+int
+__dbreg_open_files(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOG *dblp;
+ DB_LSN r_unused;
+ DBT *dbtp, fid_dbt, t;
+ FNAME *fnp;
+ LOG *lp;
+ int ret;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ ret = 0;
+
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+
+ for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
+ fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
+ if (fnp->name_off == INVALID_ROFF)
+ dbtp = NULL;
+ else {
+ memset(&t, 0, sizeof(t));
+ t.data = R_ADDR(&dblp->reginfo, fnp->name_off);
+ t.size = (u_int32_t)strlen(t.data) + 1;
+ dbtp = &t;
+ }
+ memset(&fid_dbt, 0, sizeof(fid_dbt));
+ fid_dbt.data = fnp->ufid;
+ fid_dbt.size = DB_FILE_ID_LEN;
+ /*
+ * Output LOG_CHECKPOINT records which will be
+ * processed during the OPENFILES pass of recovery.
+ * At the end of recovery we want to output the
+ * files that were open so that a future recovery
+ * run will have the correct files open during
+ * a backward pass. For this we output LOG_RCLOSE
+ * records so that the files will be closed on
+ * the forward pass.
+ */
+ if ((ret = __dbreg_register_log(dbenv,
+ NULL, &r_unused, 0,
+ F_ISSET(dblp, DBLOG_RECOVER) ? LOG_RCLOSE : LOG_CHECKPOINT,
+ dbtp, &fid_dbt, fnp->id, fnp->s_type, fnp->meta_pgno,
+ TXN_INVALID)) != 0)
+ break;
+ }
+
+ MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+
+ return (ret);
+}
+
+/*
+ * __dbreg_close_files --
+ * Close files that were opened by the recovery daemon. We sync the
+ * file, unless its mpf pointer has been NULLed by a db_remove or
+ * db_rename. We may not have flushed the log_register record that
+ * closes the file.
+ *
+ * PUBLIC: int __dbreg_close_files __P((DB_ENV *));
+ */
+int
+__dbreg_close_files(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOG *dblp;
+ DB *dbp;
+ int ret, t_ret;
+ int32_t i;
+
+ /* If we haven't initialized logging, we have nothing to do. */
+ if (!LOGGING_ON(dbenv))
+ return (0);
+
+ dblp = dbenv->lg_handle;
+ ret = 0;
+ MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+ for (i = 0; i < dblp->dbentry_cnt; i++) {
+ /* We only want to close dbps that recovery opened. */
+ if ((dbp = dblp->dbentry[i].dbp) != NULL &&
+ F_ISSET(dbp, DB_AM_RECOVER)) {
+ /*
+ * It's unsafe to call DB->close while holding the
+ * thread lock, because we'll call __dbreg_rem_dbentry
+ * and grab it again.
+ *
+ * Just drop it. Since dbreg ids go monotonically
+ * upward, concurrent opens should be safe, and the
+ * user should have no business closing files while
+ * we're in this loop anyway--we're in the process of
+ * making all outstanding dbps invalid.
+ */
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ if ((t_ret = dbp->close(dbp,
+ dbp->mpf == NULL ? DB_NOSYNC : 0)) != 0 && ret == 0)
+ ret = t_ret;
+ MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+ }
+ dblp->dbentry[i].deleted = 0;
+ dblp->dbentry[i].dbp = NULL;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ return (ret);
+}
+
+/*
+ * __dbreg_nofiles --
+ * Check that there are no open files in the process local table.
+ * Returns 0 if there are no files and EINVAL if there are any.
+ *
+ * PUBLIC: int __dbreg_nofiles __P((DB_ENV *));
+ */
+int
+__dbreg_nofiles(dbenv)
+ DB_ENV *dbenv;
+{
+ DB *dbp;
+ DB_LOG *dblp;
+ int ret;
+ int32_t i;
+
+ /* If we haven't initialized logging, we have nothing to do. */
+ if (!LOGGING_ON(dbenv))
+ return (0);
+
+ dblp = dbenv->lg_handle;
+ ret = 0;
+ MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+ for (i = 0; i < dblp->dbentry_cnt; i++) {
+ if ((dbp = dblp->dbentry[i].dbp) != NULL &&
+ !F_ISSET(dbp, DB_AM_RECOVER)) {
+ ret = EINVAL;
+ break;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ return (ret);
+}
+
+/*
+ * __dbreg_id_to_db --
+ * Return the DB corresponding to the specified dbreg id.
+ *
+ * PUBLIC: int __dbreg_id_to_db __P((DB_ENV *, DB_TXN *, DB **, int32_t, int));
+ */
+int
+__dbreg_id_to_db(dbenv, txn, dbpp, ndx, inc)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB **dbpp;
+ int32_t ndx;
+ int inc;
+{
+ return (__dbreg_id_to_db_int(dbenv, txn, dbpp, ndx, inc, 1));
+}
+
+/*
+ * __dbreg_id_to_db_int --
+ * Return the DB corresponding to the specified dbreg id. The internal
+ * version takes a final parameter that indicates whether we should attempt
+ * to open the file if no mapping is found. During recovery, the recovery
+ * routines all want to try to open the file (and this is called from
+ * __dbreg_id_to_db), however, if we have a multi-process environment where
+ * some processes may not have the files open (e.g., XA), then we also get
+ * called from __dbreg_assign_id and it's OK if there is no mapping.
+ *
+ * PUBLIC: int __dbreg_id_to_db_int __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, DB **, int32_t, int, int));
+ */
+int
+__dbreg_id_to_db_int(dbenv, txn, dbpp, ndx, inc, tryopen)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB **dbpp;
+ int32_t ndx;
+ int inc, tryopen;
+{
+ DB_LOG *dblp;
+ FNAME *fname;
+ int ret;
+ char *name;
+
+ ret = 0;
+ dblp = dbenv->lg_handle;
+ COMPQUIET(inc, 0);
+
+ MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+
+ /*
+ * Under XA, a process different than the one issuing DB operations
+ * may abort a transaction. In this case, the "recovery" routines
+ * are run by a process that does not necessarily have the file open,
+ * so we we must open the file explicitly.
+ */
+ if (ndx >= dblp->dbentry_cnt ||
+ (!dblp->dbentry[ndx].deleted && dblp->dbentry[ndx].dbp == NULL)) {
+ if (!tryopen || F_ISSET(dblp, DBLOG_RECOVER)) {
+ ret = ENOENT;
+ goto err;
+ }
+
+ /*
+ * __dbreg_id_to_fname acquires the region's fq_mutex,
+ * which we can't safely acquire while we hold the thread lock.
+ * We no longer need it anyway--the dbentry table didn't
+ * have what we needed.
+ */
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+
+ if (__dbreg_id_to_fname(dblp, ndx, 0, &fname) != 0)
+ /*
+ * With transactional opens, we may actually have
+ * closed this file in the transaction in which
+ * case this will fail too. Then it's up to the
+ * caller to reopen the file.
+ */
+ return (ENOENT);
+
+ /*
+ * Note that we're relying on fname not to change, even
+ * though we released the mutex that protects it (fq_mutex)
+ * inside __dbreg_id_to_fname. This should be a safe
+ * assumption, because the other process that has the file
+ * open shouldn't be closing it while we're trying to abort.
+ */
+ name = R_ADDR(&dblp->reginfo, fname->name_off);
+
+ /*
+ * At this point, we are not holding the thread lock, so exit
+ * directly instead of going through the exit code at the
+ * bottom. If the __dbreg_do_open succeeded, then we don't need
+ * to do any of the remaining error checking at the end of this
+ * routine.
+ * XXX I am sending a NULL txnlist and 0 txnid which may be
+ * completely broken ;(
+ */
+ if ((ret = __dbreg_do_open(dbenv, txn, dblp,
+ fname->ufid, name, fname->s_type,
+ ndx, fname->meta_pgno, NULL, 0)) != 0)
+ return (ret);
+
+ *dbpp = dblp->dbentry[ndx].dbp;
+ return (0);
+ }
+
+ /*
+ * Return DB_DELETED if the file has been deleted (it's not an error).
+ */
+ if (dblp->dbentry[ndx].deleted) {
+ ret = DB_DELETED;
+ goto err;
+ }
+
+ /* It's an error if we don't have a corresponding writeable DB. */
+ if ((*dbpp = dblp->dbentry[ndx].dbp) == NULL)
+ ret = ENOENT;
+
+err: MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ return (ret);
+}
+
+/*
+ * __dbreg_id_to_fname --
+ * Traverse the shared-memory region looking for the entry that
+ * matches the passed dbreg id. Returns 0 on success; -1 on error.
+ *
+ * PUBLIC: int __dbreg_id_to_fname __P((DB_LOG *, int32_t, int, FNAME **));
+ */
+int
+__dbreg_id_to_fname(dblp, lid, have_lock, fnamep)
+ DB_LOG *dblp;
+ int32_t lid;
+ int have_lock;
+ FNAME **fnamep;
+{
+ DB_ENV *dbenv;
+ FNAME *fnp;
+ LOG *lp;
+ int ret;
+
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+
+ ret = -1;
+
+ if (!have_lock)
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+ for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
+ fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
+ if (fnp->id == lid) {
+ *fnamep = fnp;
+ ret = 0;
+ break;
+ }
+ }
+ if (!have_lock)
+ MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+
+ return (ret);
+}
+/*
+ * __dbreg_fid_to_fname --
+ * Traverse the shared-memory region looking for the entry that
+ * matches the passed file unique id. Returns 0 on success; -1 on error.
+ *
+ * PUBLIC: int __dbreg_fid_to_fname __P((DB_LOG *, u_int8_t *, int, FNAME **));
+ */
+int
+__dbreg_fid_to_fname(dblp, fid, have_lock, fnamep)
+ DB_LOG *dblp;
+ u_int8_t *fid;
+ int have_lock;
+ FNAME **fnamep;
+{
+ DB_ENV *dbenv;
+ FNAME *fnp;
+ LOG *lp;
+ int ret;
+
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+
+ ret = -1;
+
+ if (!have_lock)
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+ for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
+ fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
+ if (memcmp(fnp->ufid, fid, DB_FILE_ID_LEN) == 0) {
+ *fnamep = fnp;
+ ret = 0;
+ break;
+ }
+ }
+ if (!have_lock)
+ MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+
+ return (ret);
+}
+
+/*
+ * __dbreg_get_name
+ *
+ * Interface to get name of registered files. This is mainly diagnostic
+ * and the name passed could be transient unless there is something
+ * ensuring that the file cannot be closed.
+ *
+ * PUBLIC: int __dbreg_get_name __P((DB_ENV *, u_int8_t *, char **));
+ */
+int
+__dbreg_get_name(dbenv, fid, namep)
+ DB_ENV *dbenv;
+ u_int8_t *fid;
+ char **namep;
+{
+ DB_LOG *dblp;
+ FNAME *fname;
+
+ dblp = dbenv->lg_handle;
+
+ if (dblp != NULL && __dbreg_fid_to_fname(dblp, fid, 0, &fname) == 0) {
+ *namep = R_ADDR(&dblp->reginfo, fname->name_off);
+ return (0);
+ }
+
+ return (-1);
+}
+
+/*
+ * __dbreg_do_open --
+ * Open files referenced in the log. This is the part of the open that
+ * is not protected by the thread mutex.
+ * PUBLIC: int __dbreg_do_open __P((DB_ENV *, DB_TXN *, DB_LOG *, u_int8_t *,
+ * PUBLIC: char *, DBTYPE, int32_t, db_pgno_t, void *, u_int32_t));
+ */
+int
+__dbreg_do_open(dbenv,
+ txn, lp, uid, name, ftype, ndx, meta_pgno, info, id)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB_LOG *lp;
+ u_int8_t *uid;
+ char *name;
+ DBTYPE ftype;
+ int32_t ndx;
+ db_pgno_t meta_pgno;
+ void *info;
+ u_int32_t id;
+{
+ DB *dbp;
+ int ret;
+ u_int32_t cstat;
+
+ if ((ret = db_create(&dbp, lp->dbenv, 0)) != 0)
+ return (ret);
+
+ /*
+ * We can open files under a number of different scenarios.
+ * First, we can open a file during a normal txn_abort, if that file
+ * was opened and closed during the transaction (as is the master
+ * database of a sub-database).
+ * Second, we might be aborting a transaction in XA and not have
+ * it open in the process that is actually doing the abort.
+ * Third, we might be in recovery.
+ * In case 3, there is no locking, so there is no issue.
+ * In cases 1 and 2, we are guaranteed to already hold any locks
+ * that we need, since we're still in the same transaction, so by
+ * setting DB_AM_RECOVER, we guarantee that we don't log and that
+ * we don't try to acquire locks on behalf of a different locker id.
+ */
+ F_SET(dbp, DB_AM_RECOVER);
+ if (meta_pgno != PGNO_BASE_MD) {
+ memcpy(dbp->fileid, uid, DB_FILE_ID_LEN);
+ dbp->meta_pgno = meta_pgno;
+ }
+ dbp->type = ftype;
+ if ((ret = __db_dbopen(dbp, txn, name, NULL,
+ DB_ODDFILESIZE, __db_omode("rw----"), meta_pgno)) == 0) {
+
+ /*
+ * Verify that we are opening the same file that we were
+ * referring to when we wrote this log record.
+ */
+ if ((meta_pgno != PGNO_BASE_MD &&
+ __dbreg_check_master(dbenv, uid, name) != 0) ||
+ memcmp(uid, dbp->fileid, DB_FILE_ID_LEN) != 0)
+ cstat = TXN_IGNORE;
+ else
+ cstat = TXN_EXPECTED;
+
+ /* Assign the specific dbreg id to this dbp. */
+ if ((ret = __dbreg_assign_id(dbp, ndx)) != 0)
+ goto err;
+
+ /*
+ * If we successfully opened this file, then we need to
+ * convey that information to the txnlist so that we
+ * know how to handle the subtransaction that created
+ * the file system object.
+ */
+ if (id != TXN_INVALID) {
+ if ((ret = __db_txnlist_update(dbenv,
+ info, id, cstat, NULL)) == TXN_NOTFOUND)
+ ret = __db_txnlist_add(dbenv,
+ info, id, cstat, NULL);
+ else if (ret > 0)
+ ret = 0;
+ }
+err: if (cstat == TXN_IGNORE)
+ goto not_right;
+ return (ret);
+ } else {
+ /* Record that the open failed in the txnlist. */
+ if (id != TXN_INVALID && (ret = __db_txnlist_update(dbenv,
+ info, id, TXN_UNEXPECTED, NULL)) == TXN_NOTFOUND)
+ ret = __db_txnlist_add(dbenv,
+ info, id, TXN_UNEXPECTED, NULL);
+ }
+not_right:
+ (void)dbp->close(dbp, 0);
+ /* Add this file as deleted. */
+ (void)__dbreg_add_dbentry(dbenv, lp, NULL, ndx);
+ return (ENOENT);
+}
+
+static int
+__dbreg_check_master(dbenv, uid, name)
+ DB_ENV *dbenv;
+ u_int8_t *uid;
+ char *name;
+{
+ DB *dbp;
+ int ret;
+
+ ret = 0;
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return (ret);
+ dbp->type = DB_BTREE;
+ F_SET(dbp, DB_AM_RECOVER);
+ ret = __db_dbopen(dbp,
+ NULL, name, NULL, 0, __db_omode("rw----"), PGNO_BASE_MD);
+
+ if (ret == 0 && memcmp(uid, dbp->fileid, DB_FILE_ID_LEN) != 0)
+ ret = EINVAL;
+
+ (void)dbp->close(dbp, 0);
+ return (ret);
+}
+
+/*
+ * __dbreg_lazy_id --
+ * When a replication client gets upgraded to being a replication master,
+ * it may have database handles open that have not been assigned an ID, but
+ * which have become legal to use for logging.
+ *
+ * This function lazily allocates a new ID for such a function, in a
+ * new transaction created for the purpose. We need to do this in a new
+ * transaction because we definitely wish to commit the dbreg_register, but
+ * at this point we have no way of knowing whether the log record that incited
+ * us to call this will be part of a committed transaction.
+ *
+ * PUBLIC: int __dbreg_lazy_id __P((DB *));
+ */
+int
+__dbreg_lazy_id(dbp)
+ DB *dbp;
+{
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ DB_ASSERT(F_ISSET(dbenv, DB_ENV_REP_MASTER));
+
+ if ((ret = dbenv->txn_begin(dbenv, NULL, &txn, 0)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_new_id(dbp, txn)) != 0) {
+ (void)txn->abort(txn);
+ return (ret);
+ }
+
+ return (txn->commit(txn, DB_TXN_NOSYNC));
+}
+
+/*
+ * __dbreg_push_id and __dbreg_pop_id --
+ * Dbreg ids from closed files are kept on a stack in shared memory
+ * for recycling. (We want to reuse them as much as possible because each
+ * process keeps open files in an array by ID.) Push them to the stack and
+ * pop them from it, managing memory as appropriate.
+ *
+ * The stack is protected by the fq_mutex, and in both functions we assume
+ * that this is already locked.
+ *
+ * PUBLIC: int __dbreg_push_id __P((DB_ENV *, int32_t));
+ * PUBLIC: int __dbreg_pop_id __P((DB_ENV *, int32_t *));
+ */
+int
+__dbreg_push_id(dbenv, id)
+ DB_ENV *dbenv;
+ int32_t id;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+ int32_t *stack, *newstack;
+ int ret;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ if (lp->free_fid_stack != INVALID_ROFF)
+ stack = R_ADDR(&dblp->reginfo, lp->free_fid_stack);
+ else
+ stack = NULL;
+
+ /* Check if we have room on the stack. */
+ if (lp->free_fids_alloced <= lp->free_fids + 1) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ if ((ret = __db_shalloc(dblp->reginfo.addr,
+ (lp->free_fids_alloced + 20) * sizeof(u_int32_t), 0,
+ &newstack)) != 0) {
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (ret);
+ }
+
+ memcpy(newstack, stack,
+ lp->free_fids_alloced * sizeof(u_int32_t));
+ lp->free_fid_stack = R_OFFSET(&dblp->reginfo, newstack);
+ lp->free_fids_alloced += 20;
+
+ if (stack != NULL)
+ __db_shalloc_free(dblp->reginfo.addr, stack);
+
+ stack = newstack;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ }
+
+ DB_ASSERT(stack != NULL);
+ stack[lp->free_fids++] = id;
+ return (0);
+}
+
+int
+__dbreg_pop_id(dbenv, id)
+ DB_ENV *dbenv;
+ int32_t *id;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+ int32_t *stack;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ /* Do we have anything to pop? */
+ if (lp->free_fid_stack != INVALID_ROFF && lp->free_fids > 0) {
+ stack = R_ADDR(&dblp->reginfo, lp->free_fid_stack);
+ *id = stack[--lp->free_fids];
+ } else
+ *id = DB_LOGFILEID_INVALID;
+
+ return (0);
+}
+
+/*
+ * __dbreg_pluck_id --
+ * Remove a particular dbreg id from the stack of free ids. This is
+ * used when we open a file, as in recovery, with a specific ID that might
+ * be on the stack.
+ *
+ * Returns success whether or not the particular id was found, and like
+ * push and pop, assumes that the fq_mutex is locked.
+ *
+ * PUBLIC: int __dbreg_pluck_id __P((DB_ENV *, int32_t));
+ */
+int
+__dbreg_pluck_id(dbenv, id)
+ DB_ENV *dbenv;
+ int32_t id;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+ int32_t *stack;
+ int i;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ /* Do we have anything to look at? */
+ if (lp->free_fid_stack != INVALID_ROFF) {
+ stack = R_ADDR(&dblp->reginfo, lp->free_fid_stack);
+ for (i = 0; i < lp->free_fids; i++)
+ if (id == stack[i]) {
+ /*
+ * Found it. Overwrite it with the top
+ * id (which may harmlessly be itself),
+ * and shorten the stack by one.
+ */
+ stack[i] = stack[lp->free_fids - 1];
+ lp->free_fids--;
+ return (0);
+ }
+ }
+
+ return (0);
+}
+
+#ifdef DEBUG
+/*
+ * __dbreg_print_dblist --
+ * Display the list of files.
+ *
+ * PUBLIC: void __dbreg_print_dblist __P((DB_ENV *));
+ */
+void
+__dbreg_print_dblist(dbenv)
+ DB_ENV *dbenv;
+{
+ DB *dbp;
+ DB_LOG *dblp;
+ FNAME *fnp;
+ LOG *lp;
+ int del, first;
+ char *name;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+
+ for (first = 1, fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
+ fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
+ if (first) {
+ first = 0;
+ __db_err(dbenv,
+ "ID\t\t\tName\tType\tPgno\tTxnid\tDBP-info");
+ }
+ if (fnp->name_off == INVALID_ROFF)
+ name = "";
+ else
+ name = R_ADDR(&dblp->reginfo, fnp->name_off);
+
+ dbp = fnp->id >= dblp->dbentry_cnt ? NULL :
+ dblp->dbentry[fnp->id].dbp;
+ del = fnp->id >= dblp->dbentry_cnt ? 0 :
+ dblp->dbentry[fnp->id].deleted;
+ __db_err(dbenv, "%ld\t%s\t\t\t%s\t%lu\t%lx\t%s %d %lx %lx",
+ (long)fnp->id, name,
+ __db_dbtype_to_string(fnp->s_type),
+ (u_long)fnp->meta_pgno, (u_long)fnp->create_txnid,
+ dbp == NULL ? "No DBP" : "DBP", del, P_TO_ULONG(dbp),
+ dbp == NULL ? 0 : dbp->flags);
+ }
+
+ MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+}
+#endif
diff --git a/libdb/dist/Makefile.in b/libdb/dist/Makefile.in
new file mode 100644
index 0000000..260c13d
--- /dev/null
+++ b/libdb/dist/Makefile.in
@@ -0,0 +1,1399 @@
+# $Id$
+
+srcdir= @srcdir@/..
+builddir=.
+
+##################################################
+# Installation directories and permissions.
+##################################################
+prefix= @prefix@
+exec_prefix=@exec_prefix@
+bindir= @bindir@
+includedir=@includedir@
+libdir= @libdir@
+docdir= $(prefix)/docs
+
+dmode= 755
+emode= 555
+fmode= 444
+
+transform=@program_transform_name@
+
+##################################################
+# Paths for standard user-level commands.
+##################################################
+SHELL= @db_cv_path_sh@
+ar= @db_cv_path_ar@
+chmod= @db_cv_path_chmod@
+cp= @db_cv_path_cp@
+ln= @db_cv_path_ln@
+mkdir= @db_cv_path_mkdir@
+ranlib= @db_cv_path_ranlib@
+rm= @db_cv_path_rm@
+rpm= @db_cv_path_rpm@
+strip= @db_cv_path_strip@
+
+##################################################
+# General library information.
+##################################################
+DEF_LIB= @DEFAULT_LIB@
+DEF_LIB_CXX= @DEFAULT_LIB_CXX@
+INSTALLER= @INSTALLER@
+LIBTOOL= @LIBTOOL@
+
+POSTLINK= @POSTLINK@
+SOLINK= @MAKEFILE_SOLINK@
+SOFLAGS= @SOFLAGS@
+SOMAJOR= @DB_VERSION_MAJOR@
+SOVERSION= @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@
+
+##################################################
+# C API.
+##################################################
+CPPFLAGS= -I$(builddir) -I$(srcdir) -I$(srcdir)/dbinc @CPPFLAGS@
+CFLAGS= -c $(CPPFLAGS) @CFLAGS@
+CC= @MAKEFILE_CC@
+CCLINK= @MAKEFILE_CCLINK@
+
+LDFLAGS= @LDFLAGS@
+LIBS= @LIBS@
+LIBSO_LIBS= @LIBSO_LIBS@
+
+libdb= libdb.a
+libso_base= libdb
+libso= $(libso_base)-$(SOVERSION).@SOSUFFIX@
+libso_static= $(libso_base)-$(SOVERSION).a
+libso_target= $(libso_base)-$(SOVERSION).la
+libso_default= $(libso_base).@SOSUFFIX@
+libso_major= $(libso_base)-$(SOMAJOR).@SOSUFFIX@
+
+##################################################
+# C++ API.
+#
+# C++ support is optional, and can be built with static or shared libraries.
+##################################################
+CXXFLAGS= -c $(CPPFLAGS) @CXXFLAGS@
+CXX= @MAKEFILE_CXX@
+CXXLINK= @MAKEFILE_CXXLINK@
+XSOLINK= @MAKEFILE_XSOLINK@
+LIBXSO_LIBS= @LIBXSO_LIBS@
+
+libcxx= libdb_cxx.a
+libxso_base= libdb_cxx
+libxso= $(libxso_base)-$(SOVERSION).@SOSUFFIX@
+libxso_static= $(libxso_base)-$(SOVERSION).a
+libxso_target= $(libxso_base)-$(SOVERSION).la
+libxso_default= $(libxso_base).@SOSUFFIX@
+libxso_major= $(libxso_base)-$(SOMAJOR).@SOSUFFIX@
+
+##################################################
+# Java API.
+#
+# Java support is optional and requires shared librarires.
+##################################################
+CLASSPATH= $(JAVA_CLASSTOP)
+LIBJSO_LIBS= @LIBJSO_LIBS@
+
+JAR= @JAR@
+JAVAC= env CLASSPATH="$(CLASSPATH)" @JAVAC@
+JAVACFLAGS= @JAVACFLAGS@
+JAVA_CLASSTOP= ./classes
+JAVA_RPCCLASSES=./classes.rpc
+JAVA_SRCDIR= $(srcdir)/java/src
+JAVA_DBREL= com/sleepycat/db
+JAVA_EXREL= com/sleepycat/examples
+JAVA_RPCREL= com/sleepycat/db/rpcserver
+JAVA_DBDIR= $(JAVA_SRCDIR)/$(JAVA_DBREL)
+JAVA_EXDIR= $(JAVA_SRCDIR)/$(JAVA_EXREL)
+JAVA_RPCDIR= $(srcdir)/rpc_server/java
+
+libj_jarfile= db.jar
+libj_exjarfile= dbexamples.jar
+rpc_jarfile= dbsvc.jar
+libjso_base= libdb_java
+libjso= $(libjso_base)-$(SOVERSION).@JMODSUFFIX@
+libjso_static= $(libjso_base)-$(SOVERSION).a
+libjso_target= $(libjso_base)-$(SOVERSION).la
+libjso_default= $(libjso_base).@JMODSUFFIX@
+libjso_major= $(libjso_base)-$(SOMAJOR).@JMODSUFFIX@
+libjso_g= $(libjso_base)-$(SOVERSION)_g.@JMODSUFFIX@
+
+##################################################
+# TCL API.
+#
+# Tcl support is optional and requires shared libraries.
+##################################################
+TCFLAGS= @TCFLAGS@
+LIBTSO_LIBS= @LIBTSO_LIBS@
+libtso_base= libdb_tcl
+libtso= $(libtso_base)-$(SOVERSION).@MODSUFFIX@
+libtso_static= $(libtso_base)-$(SOVERSION).a
+libtso_target= $(libtso_base)-$(SOVERSION).la
+libtso_default= $(libtso_base).@MODSUFFIX@
+libtso_major= $(libtso_base)-$(SOMAJOR).@MODSUFFIX@
+
+##################################################
+# db_dump185 UTILITY
+#
+# The db_dump185 application should be compiled using the system's db.h file
+# (which should be a DB 1.85/1.86 include file), and the system's 1.85/1.86
+# object library. To include the right db.h, don't include -I$(builddir) on
+# the compile line. You may also need to add a local include directory and
+# local libraries, for example. Do that by adding -I options to the DB185INC
+# line, and -l options to the DB185LIB line.
+##################################################
+DB185INC= -c @CFLAGS@ -I$(srcdir) @CPPFLAGS@
+DB185LIB=
+
+##################################################
+# NOTHING BELOW THIS LINE SHOULD EVER NEED TO BE MODIFIED.
+##################################################
+
+##################################################
+# Object and utility lists.
+##################################################
+C_OBJS= @ADDITIONAL_OBJS@ @LTLIBOBJS@ @RPC_CLIENT_OBJS@ \
+ bt_compare@o@ bt_conv@o@ bt_curadj@o@ bt_cursor@o@ bt_delete@o@ \
+ bt_method@o@ bt_open@o@ bt_put@o@ bt_rec@o@ bt_reclaim@o@ \
+ bt_recno@o@ bt_rsearch@o@ bt_search@o@ bt_split@o@ bt_stat@o@ \
+ bt_upgrade@o@ bt_verify@o@ btree_auto@o@ crdel_auto@o@ \
+ crdel_rec@o@ db@o@ db_am@o@ db_auto@o@ db_byteorder@o@ db_cam@o@ \
+ db_conv@o@ db_dispatch@o@ db_dup@o@ db_err@o@ db_getlong@o@ \
+ db_idspace@o@ db_iface@o@ db_join@o@ db_log2@o@ db_meta@o@ \
+ db_method@o@ db_open@o@ db_overflow@o@ db_pr@o@ db_rec@o@ \
+ db_reclaim@o@ db_rename@o@ db_remove@o@ db_ret@o@ db_salloc@o@ \
+ db_shash@o@ db_truncate@o@ db_upg@o@ db_upg_opd@o@ db_vrfy@o@ \
+ db_vrfyutil@o@ dbm@o@ dbreg@o@ dbreg_auto@o@ dbreg_rec@o@ \
+ dbreg_util@o@ env_file@o@ env_method@o@ env_open@o@ env_recover@o@ \
+ env_region@o@ fileops_auto@o@ fop_basic@o@ fop_rec@o@ \
+ fop_util@o@ hash@o@ hash_auto@o@ hash_conv@o@ hash_dup@o@ \
+ hash_func@o@ hash_meta@o@ hash_method@o@ hash_open@o@ \
+ hash_page@o@ hash_rec@o@ hash_reclaim@o@ hash_stat@o@ \
+ hash_upgrade@o@ hash_verify@o@ hmac@o@ hsearch@o@ lock@o@ \
+ lock_deadlock@o@ lock_method@o@ lock_region@o@ lock_stat@o@ \
+ lock_util@o@ log@o@ log_archive@o@ log_compare@o@ log_get@o@ \
+ log_method@o@ log_put@o@ mp_alloc@o@ mp_bh@o@ mp_fget@o@ \
+ mp_fopen@o@ mp_fput@o@ mp_fset@o@ mp_method@o@ mp_region@o@ \
+ mp_register@o@ mp_stat@o@ mp_sync@o@ mp_trickle@o@ mutex@o@ \
+ os_abs@o@ os_alloc@o@ os_clock@o@ os_config@o@ os_dir@o@ \
+ os_errno@o@ os_fid@o@ os_fsync@o@ os_handle@o@ os_id@o@ \
+ os_map@o@ os_method@o@ os_oflags@o@ os_open@o@ os_region@o@ \
+ os_rename@o@ os_root@o@ os_rpath@o@ os_rw@o@ os_seek@o@ \
+ os_sleep@o@ os_spin@o@ os_stat@o@ os_tmpdir@o@ os_unlink@o@ \
+ qam@o@ qam_auto@o@ qam_conv@o@ qam_files@o@ qam_method@o@ \
+ qam_open@o@ qam_rec@o@ qam_stat@o@ qam_upgrade@o@ qam_verify@o@ \
+ rep_method@o@ rep_record@o@ rep_region@o@ rep_util@o@ sha1@o@ \
+ txn@o@ txn_auto@o@ txn_method@o@ txn_rec@o@ txn_recover@o@ \
+ txn_region@o@ txn_stat@o@ txn_util@o@ xa@o@ xa_db@o@ xa_map@o@
+
+CXX_OBJS=\
+ cxx_db@o@ cxx_dbc@o@ cxx_dbt@o@ cxx_env@o@ cxx_except@o@ \
+ cxx_lock@o@ cxx_logc@o@ cxx_mpool@o@ cxx_txn@o@
+
+JAVA_OBJS=\
+ java_Db@o@ java_DbEnv@o@ java_DbLock@o@ java_DbLogc@o@ \
+ java_DbLsn@o@ java_DbTxn@o@ java_DbUtil@o@ java_Dbc@o@ \
+ java_Dbt@o@ \
+ java_info@o@ java_locked@o@ java_util@o@ java_stat_auto@o@
+
+JAVA_DBSRCS=\
+ $(JAVA_DBDIR)/Db.java $(JAVA_DBDIR)/DbAppendRecno.java \
+ $(JAVA_DBDIR)/DbAppDispatch.java \
+ $(JAVA_DBDIR)/DbBtreeCompare.java $(JAVA_DBDIR)/DbBtreePrefix.java \
+ $(JAVA_DBDIR)/DbBtreeStat.java $(JAVA_DBDIR)/DbClient.java \
+ $(JAVA_DBDIR)/DbConstants.java $(JAVA_DBDIR)/DbDeadlockException.java \
+ $(JAVA_DBDIR)/DbDupCompare.java $(JAVA_DBDIR)/DbEnv.java \
+ $(JAVA_DBDIR)/DbEnvFeedback.java $(JAVA_DBDIR)/DbErrcall.java \
+ $(JAVA_DBDIR)/DbException.java $(JAVA_DBDIR)/DbFeedback.java \
+ $(JAVA_DBDIR)/DbHash.java $(JAVA_DBDIR)/DbHashStat.java \
+ $(JAVA_DBDIR)/DbKeyRange.java $(JAVA_DBDIR)/DbLock.java \
+ $(JAVA_DBDIR)/DbLockNotGrantedException.java \
+ $(JAVA_DBDIR)/DbLockRequest.java $(JAVA_DBDIR)/DbLockStat.java \
+ $(JAVA_DBDIR)/DbLogc.java $(JAVA_DBDIR)/DbLogStat.java \
+ $(JAVA_DBDIR)/DbLsn.java $(JAVA_DBDIR)/DbMemoryException.java \
+ $(JAVA_DBDIR)/DbMpoolFStat.java $(JAVA_DBDIR)/DbMpoolStat.java \
+ $(JAVA_DBDIR)/DbMultipleDataIterator.java \
+ $(JAVA_DBDIR)/DbMultipleIterator.java \
+ $(JAVA_DBDIR)/DbMultipleKeyDataIterator.java \
+ $(JAVA_DBDIR)/DbMultipleRecnoDataIterator.java \
+ $(JAVA_DBDIR)/DbOutputStreamErrcall.java \
+ $(JAVA_DBDIR)/DbPreplist.java $(JAVA_DBDIR)/DbQueueStat.java \
+ $(JAVA_DBDIR)/DbRepStat.java $(JAVA_DBDIR)/DbRepTransport.java \
+ $(JAVA_DBDIR)/DbRunRecoveryException.java \
+ $(JAVA_DBDIR)/DbSecondaryKeyCreate.java $(JAVA_DBDIR)/DbTxn.java \
+ $(JAVA_DBDIR)/DbTxnStat.java \
+ $(JAVA_DBDIR)/DbUtil.java $(JAVA_DBDIR)/Dbc.java $(JAVA_DBDIR)/Dbt.java
+
+JAVA_EXSRCS=\
+ $(JAVA_EXDIR)/AccessExample.java \
+ $(JAVA_EXDIR)/BtRecExample.java \
+ $(JAVA_EXDIR)/BulkAccessExample.java \
+ $(JAVA_EXDIR)/EnvExample.java \
+ $(JAVA_EXDIR)/LockExample.java \
+ $(JAVA_EXDIR)/TpcbExample.java
+
+TCL_OBJS=\
+ tcl_compat@o@ tcl_db@o@ tcl_db_pkg@o@ tcl_dbcursor@o@ tcl_env@o@ \
+ tcl_internal@o@ tcl_lock@o@ tcl_log@o@ tcl_mp@o@ tcl_rep@o@ \
+ tcl_txn@o@ tcl_util@o@
+
+RPC_CLIENT_OBJS=\
+ client@o@ db_server_clnt@o@ db_server_xdr@o@ gen_client@o@ \
+ gen_client_ret@o@
+
+RPC_SRV_OBJS=\
+ db_server_proc@o@ db_server_svc@o@ db_server_util@o@ \
+ gen_db_server@o@
+
+RPC_CXXSRV_OBJS=\
+ db_server_cxxproc@o@ db_server_cxxutil@o@ db_server_svc@o@ \
+ gen_db_server@o@
+
+RPC_JAVASRV_SRCS=\
+ $(JAVA_RPCDIR)/DbDispatcher.java \
+ $(JAVA_RPCDIR)/DbServer.java \
+ $(JAVA_RPCDIR)/FreeList.java \
+ $(JAVA_RPCDIR)/LocalIterator.java \
+ $(JAVA_RPCDIR)/RpcDb.java \
+ $(JAVA_RPCDIR)/RpcDbEnv.java \
+ $(JAVA_RPCDIR)/RpcDbTxn.java \
+ $(JAVA_RPCDIR)/RpcDbc.java \
+ $(JAVA_RPCDIR)/Timer.java \
+ $(JAVA_RPCDIR)/gen/DbServerStub.java \
+ $(JAVA_RPCDIR)/gen/__db_associate_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_associate_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_bt_maxkey_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_bt_maxkey_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_bt_minkey_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_bt_minkey_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_close_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_close_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_create_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_create_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_cursor_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_cursor_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_del_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_del_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_encrypt_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_encrypt_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_extentsize_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_extentsize_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_flags_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_flags_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_get_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_get_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_h_ffactor_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_h_ffactor_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_h_nelem_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_h_nelem_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_join_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_join_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_key_range_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_key_range_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_lorder_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_lorder_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_open_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_open_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_pagesize_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_pagesize_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_pget_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_pget_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_put_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_put_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_re_delim_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_re_delim_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_re_len_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_re_len_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_re_pad_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_re_pad_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_remove_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_remove_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_rename_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_rename_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_stat_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_stat_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_sync_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_sync_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_truncate_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_truncate_reply.java \
+ $(JAVA_RPCDIR)/gen/__dbc_close_msg.java \
+ $(JAVA_RPCDIR)/gen/__dbc_close_reply.java \
+ $(JAVA_RPCDIR)/gen/__dbc_count_msg.java \
+ $(JAVA_RPCDIR)/gen/__dbc_count_reply.java \
+ $(JAVA_RPCDIR)/gen/__dbc_del_msg.java \
+ $(JAVA_RPCDIR)/gen/__dbc_del_reply.java \
+ $(JAVA_RPCDIR)/gen/__dbc_dup_msg.java \
+ $(JAVA_RPCDIR)/gen/__dbc_dup_reply.java \
+ $(JAVA_RPCDIR)/gen/__dbc_get_msg.java \
+ $(JAVA_RPCDIR)/gen/__dbc_get_reply.java \
+ $(JAVA_RPCDIR)/gen/__dbc_pget_msg.java \
+ $(JAVA_RPCDIR)/gen/__dbc_pget_reply.java \
+ $(JAVA_RPCDIR)/gen/__dbc_put_msg.java \
+ $(JAVA_RPCDIR)/gen/__dbc_put_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_cachesize_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_cachesize_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_close_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_close_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_create_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_create_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_dbremove_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_dbremove_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_dbrename_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_dbrename_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_encrypt_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_encrypt_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_flags_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_flags_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_open_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_open_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_remove_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_remove_reply.java \
+ $(JAVA_RPCDIR)/gen/__txn_abort_msg.java \
+ $(JAVA_RPCDIR)/gen/__txn_abort_reply.java \
+ $(JAVA_RPCDIR)/gen/__txn_begin_msg.java \
+ $(JAVA_RPCDIR)/gen/__txn_begin_reply.java \
+ $(JAVA_RPCDIR)/gen/__txn_commit_msg.java \
+ $(JAVA_RPCDIR)/gen/__txn_commit_reply.java \
+ $(JAVA_RPCDIR)/gen/__txn_discard_msg.java \
+ $(JAVA_RPCDIR)/gen/__txn_discard_reply.java \
+ $(JAVA_RPCDIR)/gen/__txn_prepare_msg.java \
+ $(JAVA_RPCDIR)/gen/__txn_prepare_reply.java \
+ $(JAVA_RPCDIR)/gen/__txn_recover_msg.java \
+ $(JAVA_RPCDIR)/gen/__txn_recover_reply.java \
+ $(JAVA_RPCDIR)/gen/db_server.java
+
+UTIL_PROGS=\
+ @ADDITIONAL_PROGS@ \
+ db_archive@EXEEXT@ db_checkpoint@EXEEXT@ db_deadlock@EXEEXT@ \
+ db_dump@EXEEXT@ db_load@EXEEXT@ db_printlog@EXEEXT@ \
+ db_recover@EXEEXT@ db_stat@EXEEXT@ db_upgrade@EXEEXT@ db_verify@EXEEXT@
+
+##################################################
+# List of files installed into the library directory.
+##################################################
+LIB_INSTALL_FILE_LIST=\
+ $(libdb) \
+ $(libso) \
+ $(libso_default) \
+ $(libso_major) \
+ $(libso_static) \
+ $(libso_target) \
+ $(libcxx) \
+ $(libxso) \
+ $(libxso_default) \
+ $(libxso_major) \
+ $(libxso_static) \
+ $(libxso_target) \
+ $(libtso) \
+ $(libtso_default) \
+ $(libtso_major) \
+ $(libtso_static) \
+ $(libtso_target) \
+ $(libjso) \
+ $(libjso_default) \
+ $(libjso_g) \
+ $(libjso_major) \
+ $(libjso_static) \
+ $(libjso_target) \
+ $(libj_exjarfile) \
+ $(libj_jarfile)
+
+##################################################
+# We're building a standard library or a RPM file hierarchy, potentially
+# for Embedix. Note: "all" must be the first target in the Makefile.
+##################################################
+all: @BUILD_TARGET@
+
+install-strip install: all
+ @echo "installation of embedded libdb has been disabled"
+
+##################################################
+# Library and standard utilities build.
+##################################################
+library_build: @INSTALL_LIBS@ @ADDITIONAL_LANG@ $(UTIL_PROGS)
+
+$(libdb): $(C_OBJS)
+ $(ar) cr $@ $(C_OBJS)
+ test ! -f $(ranlib) || $(ranlib) $@
+
+$(libcxx): $(CXX_OBJS) $(C_OBJS)
+ $(ar) cr $@ $(CXX_OBJS) $(C_OBJS)
+ test ! -f $(ranlib) || $(ranlib) $@
+
+$(libso_target): $(C_OBJS)
+ $(SOLINK) $(SOFLAGS) $(LDFLAGS) -o $@ $(C_OBJS) $(LIBSO_LIBS)
+
+$(libjso_target): $(JAVA_OBJS) $(C_OBJS)
+ $(SOLINK) -jnimodule $(SOFLAGS) $(LDFLAGS) \
+ -o $@ $(JAVA_OBJS) $(C_OBJS) $(LIBJSO_LIBS)
+
+$(libtso_target): $(TCL_OBJS) $(C_OBJS)
+ $(SOLINK) -module $(SOFLAGS) $(LDFLAGS) \
+ -o $@ $(TCL_OBJS) $(C_OBJS) $(LIBTSO_LIBS)
+
+$(libxso_target): $(CXX_OBJS) $(C_OBJS)
+ $(XSOLINK) $(SOFLAGS) $(LDFLAGS) \
+ -o $@ $(CXX_OBJS) $(C_OBJS) $(LIBXSO_LIBS)
+
+##################################################
+# Creating individual dependencies and actions for building class
+# files is possible, but it is very messy and error prone.
+##################################################
+java: $(libj_jarfile) $(libj_exjarfile)
+
+$(libj_jarfile): $(JAVA_DBSRCS)
+ @test -d $(JAVA_CLASSTOP) || \
+ ($(mkdir) -p $(JAVA_CLASSTOP) && $(chmod) $(dmode) $(JAVA_CLASSTOP))
+ $(JAVAC) -d $(JAVA_CLASSTOP) $(JAVACFLAGS) $(JAVA_DBSRCS)
+ cd $(JAVA_CLASSTOP) && $(JAR) cf ../$(libj_jarfile) $(JAVA_DBREL)
+
+$(libj_exjarfile): $(libj_jarfile) $(JAVA_EXSRCS)
+ @test -d $(JAVA_CLASSTOP) || \
+ ($(mkdir) -p $(JAVA_CLASSTOP) && $(chmod) $(dmode) $(JAVA_CLASSTOP))
+ $(JAVAC) -d $(JAVA_CLASSTOP) $(JAVACFLAGS) $(JAVA_EXSRCS)
+ cd $(JAVA_CLASSTOP) && $(JAR) cf ../$(libj_exjarfile) $(JAVA_EXREL)
+
+$(rpc_jarfile): $(libj_jarfile) $(RPC_JAVASRV_SRCS)
+ @test -d $(JAVA_RPCCLASSES) || \
+ ($(mkdir) -p $(JAVA_RPCCLASSES) && \
+ $(chmod) $(dmode) $(JAVA_RPCCLASSES))
+ env CLASSPATH=$(CLASSPATH):$(JAVA_RPCDIR)/oncrpc.jar \
+ @JAVAC@ -d $(JAVA_RPCCLASSES) $(JAVACFLAGS) $(RPC_JAVASRV_SRCS)
+ cd $(JAVA_RPCCLASSES) && $(JAR) cf ../$(rpc_jarfile) $(JAVA_RPCREL)
+
+
+##################################################
+# Utilities
+##################################################
+berkeley_db_svc: $(RPC_SRV_OBJS) util_log@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ $(RPC_SRV_OBJS) util_log@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+berkeley_db_cxxsvc: $(RPC_CXXSRV_OBJS) util_log@o@ $(DEF_LIB_CXX)
+ $(CXXLINK) -o $@ $(LDFLAGS) \
+ $(RPC_CXXSRV_OBJS) util_log@o@ $(DEF_LIB_CXX) $(LIBS)
+ $(POSTLINK) $@
+
+berkeley_db_javasvc: $(rpc_jarfile)
+ echo > $@ "#!/bin/sh"
+ echo >> $@ CLASSPATH="$(CLASSPATH):$(rpc_jarfile):$(JAVA_RPCDIR)/oncrpc.jar"
+ echo >> $@ LD_LIBRARY_PATH=.libs
+ echo >> $@ export CLASSPATH LD_LIBRARY_PATH
+ echo >> $@ exec java com.sleepycat.db.rpcserver.DbServer \$$@
+ chmod +x $@
+
+db_archive@EXEEXT@: db_archive@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_archive@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+db_checkpoint@EXEEXT@: db_checkpoint@o@ util_log@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_checkpoint@o@ util_log@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+db_deadlock@EXEEXT@: db_deadlock@o@ util_log@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_deadlock@o@ util_log@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+db_dump@EXEEXT@: db_dump@o@ util_cache@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) db_dump@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+db_dump185@EXEEXT@: db_dump185@o@ @LTLIBOBJS@
+ $(CCLINK) -o $@ $(LDFLAGS) db_dump185@o@ @LTLIBOBJS@ $(DB185LIB)
+ $(POSTLINK) $@
+
+db_load@EXEEXT@: db_load@o@ util_cache@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) db_load@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+db_printlog@EXEEXT@: db_printlog@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_printlog@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+db_recover@EXEEXT@: db_recover@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_recover@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+db_stat@EXEEXT@: db_stat@o@ util_cache@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) db_stat@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+db_upgrade@EXEEXT@: db_upgrade@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_upgrade@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+db_verify@EXEEXT@: db_verify@o@ util_cache@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_verify@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+##################################################
+# Library and standard utilities install.
+##################################################
+library_install: install_setup
+library_install: install_include install_lib install_utilities install_docs
+
+install_setup:
+ @test -d $(prefix) || \
+ ($(mkdir) -p $(prefix) && $(chmod) $(dmode) $(prefix))
+
+INCDOT= db.h db_cxx.h @ADDITIONAL_INCS@
+INCINC= cxx_common.h cxx_except.h
+install_include:
+ @echo "Installing DB include files: $(includedir) ..."
+ @test -d $(includedir) || \
+ ($(mkdir) -p $(includedir) && $(chmod) $(dmode) $(includedir))
+ @cd $(includedir) && $(rm) -f $(INCDOT) $(INCINC)
+ @$(cp) -p $(INCDOT) $(includedir)
+ @cd $(srcdir)/dbinc/ && $(cp) -p $(INCINC) $(includedir)
+ @cd $(includedir) && $(chmod) $(fmode) $(INCDOT) $(INCINC)
+
+uninstall_include:
+ @cd $(includedir) && $(rm) -f $(INCDOT) $(INCINC)
+
+install_lib:
+ @echo "Installing DB library: $(libdir) ..."
+ @test -d $(libdir) || \
+ ($(mkdir) -p $(libdir) && $(chmod) $(dmode) $(libdir))
+ @cd $(libdir) && $(rm) -f $(LIB_INSTALL_FILE_LIST)
+ @$(INSTALLER) @INSTALL_LIBS@ $(libdir)
+ @(cd $(libdir) && \
+ test -f $(libso) && $(ln) -s $(libso) $(libso_default); \
+ test -f $(libso) && $(ln) -s $(libso) $(libso_major); \
+ test -f $(libso_static) && $(ln) -s $(libso_static) $(libdb); \
+ test -f $(libxso) && $(ln) -s $(libxso) $(libxso_default); \
+ test -f $(libxso) && $(ln) -s $(libxso) $(libxso_major); \
+ test -f $(libxso_static) && $(ln) -s $(libxso_static) $(libcxx); \
+ test -f $(libtso) && $(ln) -s $(libtso) $(libtso_default); \
+ test -f $(libtso) && $(ln) -s $(libtso) $(libtso_major); \
+ test -f $(libjso) && $(ln) -s $(libjso) $(libjso_default); \
+ test -f $(libjso) && $(ln) -s $(libjso) $(libjso_major); \
+ test -f $(libjso) && $(ln) -s $(libjso) $(libjso_g)) || true
+ @(test -f $(libj_jarfile) && \
+ $(cp) $(libj_jarfile) $(libdir) && \
+ $(chmod) $(fmode) $(libdir)/$(libj_jarfile)) || true
+
+uninstall_lib:
+ @cd $(libdir) && $(rm) -f $(LIB_INSTALL_FILE_LIST)
+
+install_utilities:
+ echo "Installing DB utilities: $(bindir) ..."
+ @test -d $(bindir) || \
+ ($(mkdir) -p $(bindir) && $(chmod) $(dmode) $(bindir))
+ @for i in $(UTIL_PROGS); do \
+ $(rm) -f $(bindir)/$$i $(bindir)/$$i.exe; \
+ test -f $$i.exe && i=$$i.exe || true; \
+ $(INSTALLER) $$i $(bindir)/$$i; \
+ test -f $(strip) && $(strip) $(bindir)/$$i || true; \
+ $(chmod) $(emode) $(bindir)/$$i; \
+ done
+
+uninstall_utilities:
+ @(cd $(bindir); for i in $(UTIL_PROGS); do \
+ $(rm) -f $$i $$i.exe; \
+ done)
+
+DOCLIST=\
+ api_c api_cxx api_java api_tcl images index.html ref reftoc.html \
+ sleepycat utility
+install_docs:
+ @echo "Installing documentation: $(docdir) ..."
+ @test -d $(docdir) || \
+ ($(mkdir) -p $(docdir) && $(chmod) $(dmode) $(docdir))
+ @cd $(docdir) && $(rm) -rf $(DOCLIST)
+ @cd $(srcdir)/docs && $(cp) -pr $(DOCLIST) $(docdir)/
+
+uninstall_docs:
+ @cd $(docdir) && $(rm) -rf $(DOCLIST)
+
+##################################################
+# RPM, Embedix build and install.
+##################################################
+RPM_ARCHIVE=db-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@.tar.gz
+rpm_build:
+ @$(rm) -rf BUILD RPMS SOURCES SPECS SRPMS RPM_INSTALL
+ @$(mkdir) -p BUILD && $(chmod) $(dmode) BUILD
+ @$(mkdir) -p RPMS/i386 && $(chmod) $(dmode) RPMS RPMS/i386
+ @$(mkdir) -p SOURCES && $(chmod) $(dmode) SOURCES
+ @$(mkdir) -p SPECS && $(chmod) $(dmode) SPECS
+ @$(mkdir) -p SRPMS && $(chmod) $(dmode) SRPMS
+ $(cp) @db_cv_path_rpm_archive@/$(RPM_ARCHIVE) SOURCES/
+ $(cp) db.spec SPECS/db.spec
+ $(rpm) --rcfile @CONFIGURATION_PATH@/rpmrc -ba SPECS/db.spec
+
+rpm_install:
+
+RPM_SRPMS=db-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@-1.src.rpm
+embedix_install:
+ $(cp) db.ecd @db_cv_path_embedix_install@/config-data/ecds/db.ecd
+ $(chmod) $(fmode) @db_cv_path_embedix_install@/config-data/ecds/db.ecd
+ $(cp) SRPMS/$(RPM_SRPMS) \
+ @db_cv_path_embedix_install@/Packages/SRPMS/$(RPM_SRPMS)
+ $(chmod) $(fmode) \
+ @db_cv_path_embedix_install@/Packages/SRPMS/$(RPM_SRPMS)
+
+##################################################
+# Remaining standard Makefile targets.
+##################################################
+CLEAN_LIST=\
+ berkeley_db_svc berkeley_db_cxxsvc berkeley_db_javasvc \
+ db_dump185 db_perf dbs bench_001 \
+ ex_access ex_apprec ex_btrec ex_dbclient ex_env ex_lock ex_mpool \
+ ex_repquote ex_thread ex_tpcb excxx_access excxx_btrec excxx_env \
+ excxx_lock excxx_mpool excxx_tpcb rpmrc
+
+mostly-clean clean:
+ $(rm) -rf $(C_OBJS)
+ $(rm) -rf $(CXX_OBJS) $(JAVA_OBJS) $(TCL_OBJS)
+ $(rm) -rf $(RPC_CLIENT_OBJS) $(RPC_SRV_OBJS) $(RPC_CXXSRV_OBJS)
+ $(rm) -rf $(UTIL_PROGS) *.exe $(CLEAN_LIST)
+ $(rm) -rf $(JAVA_CLASSTOP) $(JAVA_RPCCLASSES) $(rpc_jarfile)
+ $(rm) -rf *@o@ *.o *.o.lock *.lo core *.core
+ $(rm) -rf ALL.OUT.* BUILD PARALLEL_TESTDIR.* RPMS RPM_INSTALL
+ $(rm) -rf RUN_LOG RUNQUEUE SOURCES SPECS SRPMS TESTDIR TESTDIR.A
+ $(rm) -rf logtrack_seen.db tm .libs $(LIB_INSTALL_FILE_LIST)
+
+REALCLEAN_LIST=\
+ Makefile confdefs.h config.cache config.log config.status db.h \
+ db.spec db185_int.h db_185.h db_config.h db_cxx.h db_int.h \
+ db_int_def.h include.tcl
+
+distclean maintainer-clean realclean: clean
+ $(rm) -rf $(REALCLEAN_LIST)
+ $(rm) -rf libtool
+
+check installcheck depend dvi info obj TAGS uninstall:
+ @echo "$@: make target not supported" && true
+
+dist:
+ @echo "$@: make target not supported" && false
+
+##################################################
+# Multi-threaded testers, benchmarks.
+##################################################
+dbs@o@: $(srcdir)/test_server/dbs.c
+ $(CC) $(CFLAGS) $?
+dbs_am@o@: $(srcdir)/test_server/dbs_am.c
+ $(CC) $(CFLAGS) $?
+dbs_checkpoint@o@: $(srcdir)/test_server/dbs_checkpoint.c
+ $(CC) $(CFLAGS) $?
+dbs_debug@o@: $(srcdir)/test_server/dbs_debug.c
+ $(CC) $(CFLAGS) $?
+dbs_handles@o@: $(srcdir)/test_server/dbs_handles.c
+ $(CC) $(CFLAGS) $?
+dbs_log@o@: $(srcdir)/test_server/dbs_log.c
+ $(CC) $(CFLAGS) $?
+dbs_qam@o@: $(srcdir)/test_server/dbs_qam.c
+ $(CC) $(CFLAGS) $?
+dbs_spawn@o@: $(srcdir)/test_server/dbs_spawn.c
+ $(CC) $(CFLAGS) $?
+dbs_trickle@o@: $(srcdir)/test_server/dbs_trickle.c
+ $(CC) $(CFLAGS) $?
+dbs_util@o@: $(srcdir)/test_server/dbs_util.c
+ $(CC) $(CFLAGS) $?
+dbs_yield@o@: $(srcdir)/test_server/dbs_yield.c
+ $(CC) $(CFLAGS) $?
+DBS_OBJS=\
+ dbs@o@ dbs_am@o@ dbs_checkpoint@o@ dbs_debug@o@ dbs_handles@o@ \
+ dbs_log@o@ dbs_qam@o@ dbs_spawn@o@ dbs_trickle@o@ dbs_util@o@ \
+ dbs_yield@o@
+dbs: $(DBS_OBJS) $(DEF_LIB)
+ $(CCLINK) -o $@ \
+ $(LDFLAGS) $(DBS_OBJS) $(DEF_LIB) @LOAD_LIBS@ $(LIBS)
+ $(POSTLINK) $@
+
+db_perf@o@: $(srcdir)/test_perf/db_perf.c
+ $(CC) $(CFLAGS) $?
+perf_cache_check@o@: $(srcdir)/test_perf/perf_cache_check.c
+ $(CC) $(CFLAGS) $?
+perf_checkpoint@o@: $(srcdir)/test_perf/perf_checkpoint.c
+ $(CC) $(CFLAGS) $?
+perf_config@o@: $(srcdir)/test_perf/perf_config.c
+ $(CC) $(CFLAGS) $?
+perf_dbs@o@: $(srcdir)/test_perf/perf_dbs.c
+ $(CC) $(CFLAGS) $?
+perf_debug@o@: $(srcdir)/test_perf/perf_debug.c
+ $(CC) $(CFLAGS) $?
+perf_file@o@: $(srcdir)/test_perf/perf_file.c
+ $(CC) $(CFLAGS) $?
+perf_key@o@: $(srcdir)/test_perf/perf_key.c
+ $(CC) $(CFLAGS) $?
+perf_log@o@: $(srcdir)/test_perf/perf_log.c
+ $(CC) $(CFLAGS) $?
+perf_misc@o@: $(srcdir)/test_perf/perf_misc.c
+ $(CC) $(CFLAGS) $?
+perf_op@o@: $(srcdir)/test_perf/perf_op.c
+ $(CC) $(CFLAGS) $?
+perf_parse@o@: $(srcdir)/test_perf/perf_parse.c
+ $(CC) $(CFLAGS) $?
+perf_rand@o@: $(srcdir)/test_perf/perf_rand.c
+ $(CC) $(CFLAGS) $?
+perf_spawn@o@: $(srcdir)/test_perf/perf_spawn.c
+ $(CC) $(CFLAGS) $?
+perf_thread@o@: $(srcdir)/test_perf/perf_thread.c
+ $(CC) $(CFLAGS) $?
+perf_trickle@o@: $(srcdir)/test_perf/perf_trickle.c
+ $(CC) $(CFLAGS) $?
+perf_txn@o@: $(srcdir)/test_perf/perf_txn.c
+ $(CC) $(CFLAGS) $?
+perf_util@o@: $(srcdir)/test_perf/perf_util.c
+ $(CC) $(CFLAGS) $?
+perf_vx@o@: $(srcdir)/test_perf/perf_vx.c
+ $(CC) $(CFLAGS) $?
+DBPERF_OBJS=\
+ db_perf@o@ perf_cache_check@o@ perf_checkpoint@o@ perf_config@o@ \
+ perf_dbs@o@ perf_debug@o@ perf_file@o@ perf_key@o@ perf_log@o@ \
+ perf_misc@o@ perf_op@o@ perf_parse@o@ perf_rand@o@ \
+ perf_spawn@o@ perf_thread@o@ perf_trickle@o@ perf_txn@o@ \
+ perf_util@o@ perf_vx@o@
+db_perf: $(DBPERF_OBJS) $(DEF_LIB)
+ $(CCLINK) -o $@ \
+ $(LDFLAGS) $(DBPERF_OBJS) $(DEF_LIB) @LOAD_LIBS@ $(LIBS)
+ $(POSTLINK) $@
+
+tm@o@: $(srcdir)/mutex/tm.c
+ $(CC) $(CFLAGS) $?
+tm: tm@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) tm@o@ $(DEF_LIB) @LOAD_LIBS@ $(LIBS)
+ $(POSTLINK) $@
+
+##################################################
+# Example programs for C.
+##################################################
+bench_001@o@: $(srcdir)/examples_c/bench_001.c
+ $(CC) $(CFLAGS) $?
+bench_001: bench_001@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) bench_001@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+ex_access@o@: $(srcdir)/examples_c/ex_access.c
+ $(CC) $(CFLAGS) $?
+ex_access: ex_access@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) ex_access@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+ex_apprec@o@: $(srcdir)/examples_c/ex_apprec/ex_apprec.c
+ $(CC) $(CFLAGS) $?
+ex_apprec_auto@o@: $(srcdir)/examples_c/ex_apprec/ex_apprec_auto.c
+ $(CC) $(CFLAGS) $?
+ex_apprec_rec@o@: $(srcdir)/examples_c/ex_apprec/ex_apprec_rec.c
+ $(CC) $(CFLAGS) $?
+EX_APPREC_OBJS=ex_apprec@o@ ex_apprec_auto@o@ ex_apprec_rec@o@
+ex_apprec: $(EX_APPREC_OBJS) $(DEF_LIB)
+ $(CCLINK) -o $@ \
+ $(LDFLAGS) $(EX_APPREC_OBJS) $(DEF_LIB) @LOAD_LIBS@ $(LIBS)
+
+ex_btrec@o@: $(srcdir)/examples_c/ex_btrec.c
+ $(CC) $(CFLAGS) $?
+ex_btrec: ex_btrec@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) ex_btrec@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+ex_dbclient@o@: $(srcdir)/examples_c/ex_dbclient.c
+ $(CC) $(CFLAGS) $?
+ex_dbclient: ex_dbclient@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) ex_dbclient@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+ex_env@o@: $(srcdir)/examples_c/ex_env.c
+ $(CC) $(CFLAGS) $?
+ex_env: ex_env@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) ex_env@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+ex_lock@o@: $(srcdir)/examples_c/ex_lock.c
+ $(CC) $(CFLAGS) $?
+ex_lock: ex_lock@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) ex_lock@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+ex_mpool@o@: $(srcdir)/examples_c/ex_mpool.c
+ $(CC) $(CFLAGS) $?
+ex_mpool: ex_mpool@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) ex_mpool@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+ex_rq_client@o@: $(srcdir)/examples_c/ex_repquote/ex_rq_client.c
+ $(CC) $(CFLAGS) $?
+ex_rq_main@o@: $(srcdir)/examples_c/ex_repquote/ex_rq_main.c
+ $(CC) $(CFLAGS) $?
+ex_rq_master@o@: $(srcdir)/examples_c/ex_repquote/ex_rq_master.c
+ $(CC) $(CFLAGS) $?
+ex_rq_net@o@: $(srcdir)/examples_c/ex_repquote/ex_rq_net.c
+ $(CC) $(CFLAGS) $?
+ex_rq_util@o@: $(srcdir)/examples_c/ex_repquote/ex_rq_util.c
+ $(CC) $(CFLAGS) $?
+EX_RQ_OBJS=\
+ ex_rq_client@o@ ex_rq_main@o@ ex_rq_master@o@ ex_rq_net@o@ ex_rq_util@o@
+ex_repquote: $(EX_RQ_OBJS) $(DEF_LIB)
+ $(CCLINK) -o $@ \
+ $(LDFLAGS) $(EX_RQ_OBJS) $(DEF_LIB) @LOAD_LIBS@ $(LIBS)
+ $(POSTLINK) $@
+
+ex_thread@o@: $(srcdir)/examples_c/ex_thread.c
+ $(CC) $(CFLAGS) $?
+ex_thread: ex_thread@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ \
+ $(LDFLAGS) ex_thread@o@ $(DEF_LIB) @LOAD_LIBS@ $(LIBS)
+ $(POSTLINK) $@
+
+ex_tpcb@o@: $(srcdir)/examples_c/ex_tpcb.c
+ $(CC) $(CFLAGS) $?
+ex_tpcb: ex_tpcb@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) ex_tpcb@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+##################################################
+# Example programs for C++.
+##################################################
+AccessExample@o@: $(srcdir)/examples_cxx/AccessExample.cpp
+ $(CXX) $(CXXFLAGS) $?
+excxx_access: AccessExample@o@ $(DEF_LIB_CXX)
+ $(CXXLINK) -o $@ $(LDFLAGS) AccessExample@o@ $(DEF_LIB_CXX) $(LIBS)
+ $(POSTLINK) $@
+
+BtRecExample@o@: $(srcdir)/examples_cxx/BtRecExample.cpp
+ $(CXX) $(CXXFLAGS) $?
+excxx_btrec: BtRecExample@o@ $(DEF_LIB_CXX)
+ $(CXXLINK) -o $@ $(LDFLAGS) BtRecExample@o@ $(DEF_LIB_CXX) $(LIBS)
+ $(POSTLINK) $@
+
+EnvExample@o@: $(srcdir)/examples_cxx/EnvExample.cpp
+ $(CXX) $(CXXFLAGS) $?
+excxx_env: EnvExample@o@ $(DEF_LIB_CXX)
+ $(CXXLINK) -o $@ $(LDFLAGS) EnvExample@o@ $(DEF_LIB_CXX) $(LIBS)
+ $(POSTLINK) $@
+
+LockExample@o@: $(srcdir)/examples_cxx/LockExample.cpp
+ $(CXX) $(CXXFLAGS) $?
+excxx_lock: LockExample@o@ $(DEF_LIB_CXX)
+ $(CXXLINK) -o $@ $(LDFLAGS) LockExample@o@ $(DEF_LIB_CXX) $(LIBS)
+ $(POSTLINK) $@
+
+MpoolExample@o@: $(srcdir)/examples_cxx/MpoolExample.cpp
+ $(CXX) $(CXXFLAGS) $?
+excxx_mpool: MpoolExample@o@ $(DEF_LIB_CXX)
+ $(CXXLINK) -o $@ $(LDFLAGS) MpoolExample@o@ $(DEF_LIB_CXX) $(LIBS)
+ $(POSTLINK) $@
+
+TpcbExample@o@: $(srcdir)/examples_cxx/TpcbExample.cpp
+ $(CXX) $(CXXFLAGS) $?
+excxx_tpcb: TpcbExample@o@ $(DEF_LIB_CXX)
+ $(CXXLINK) -o $@ $(LDFLAGS) TpcbExample@o@ $(DEF_LIB_CXX) $(LIBS)
+ $(POSTLINK) $@
+
+##################################################
+# C API build rules.
+##################################################
+aes_method@o@: $(srcdir)/crypto/aes_method.c
+ $(CC) $(CFLAGS) $?
+bt_compare@o@: $(srcdir)/btree/bt_compare.c
+ $(CC) $(CFLAGS) $?
+bt_conv@o@: $(srcdir)/btree/bt_conv.c
+ $(CC) $(CFLAGS) $?
+bt_curadj@o@: $(srcdir)/btree/bt_curadj.c
+ $(CC) $(CFLAGS) $?
+bt_cursor@o@: $(srcdir)/btree/bt_cursor.c
+ $(CC) $(CFLAGS) $?
+bt_delete@o@: $(srcdir)/btree/bt_delete.c
+ $(CC) $(CFLAGS) $?
+bt_method@o@: $(srcdir)/btree/bt_method.c
+ $(CC) $(CFLAGS) $?
+bt_open@o@: $(srcdir)/btree/bt_open.c
+ $(CC) $(CFLAGS) $?
+bt_put@o@: $(srcdir)/btree/bt_put.c
+ $(CC) $(CFLAGS) $?
+bt_rec@o@: $(srcdir)/btree/bt_rec.c
+ $(CC) $(CFLAGS) $?
+bt_reclaim@o@: $(srcdir)/btree/bt_reclaim.c
+ $(CC) $(CFLAGS) $?
+bt_recno@o@: $(srcdir)/btree/bt_recno.c
+ $(CC) $(CFLAGS) $?
+bt_rsearch@o@: $(srcdir)/btree/bt_rsearch.c
+ $(CC) $(CFLAGS) $?
+bt_search@o@: $(srcdir)/btree/bt_search.c
+ $(CC) $(CFLAGS) $?
+bt_split@o@: $(srcdir)/btree/bt_split.c
+ $(CC) $(CFLAGS) $?
+bt_stack@o@: $(srcdir)/btree/bt_stack.c
+ $(CC) $(CFLAGS) $?
+bt_stat@o@: $(srcdir)/btree/bt_stat.c
+ $(CC) $(CFLAGS) $?
+bt_upgrade@o@: $(srcdir)/btree/bt_upgrade.c
+ $(CC) $(CFLAGS) $?
+bt_verify@o@: $(srcdir)/btree/bt_verify.c
+ $(CC) $(CFLAGS) $?
+btree_auto@o@: $(srcdir)/btree/btree_auto.c
+ $(CC) $(CFLAGS) $?
+crdel_auto@o@: $(srcdir)/db/crdel_auto.c
+ $(CC) $(CFLAGS) $?
+crdel_rec@o@: $(srcdir)/db/crdel_rec.c
+ $(CC) $(CFLAGS) $?
+crypto@o@: $(srcdir)/crypto/crypto.c
+ $(CC) $(CFLAGS) $?
+db185@o@: $(srcdir)/db185/db185.c
+ $(CC) $(CFLAGS) $?
+db@o@: $(srcdir)/db/db.c
+ $(CC) $(CFLAGS) $?
+db_am@o@: $(srcdir)/db/db_am.c
+ $(CC) $(CFLAGS) $?
+db_auto@o@: $(srcdir)/db/db_auto.c
+ $(CC) $(CFLAGS) $?
+db_byteorder@o@: $(srcdir)/common/db_byteorder.c
+ $(CC) $(CFLAGS) $?
+db_cam@o@: $(srcdir)/db/db_cam.c
+ $(CC) $(CFLAGS) $?
+db_conv@o@: $(srcdir)/db/db_conv.c
+ $(CC) $(CFLAGS) $?
+db_dispatch@o@: $(srcdir)/db/db_dispatch.c
+ $(CC) $(CFLAGS) $?
+db_dup@o@: $(srcdir)/db/db_dup.c
+ $(CC) $(CFLAGS) $?
+db_err@o@: $(srcdir)/common/db_err.c
+ $(CC) $(CFLAGS) $?
+db_getlong@o@: $(srcdir)/common/db_getlong.c
+ $(CC) $(CFLAGS) $?
+db_idspace@o@: $(srcdir)/common/db_idspace.c
+ $(CC) $(CFLAGS) $?
+db_iface@o@: $(srcdir)/db/db_iface.c
+ $(CC) $(CFLAGS) $?
+db_join@o@: $(srcdir)/db/db_join.c
+ $(CC) $(CFLAGS) $?
+db_log2@o@: $(srcdir)/common/db_log2.c
+ $(CC) $(CFLAGS) $?
+db_meta@o@: $(srcdir)/db/db_meta.c
+ $(CC) $(CFLAGS) $?
+db_method@o@: $(srcdir)/db/db_method.c
+ $(CC) $(CFLAGS) $?
+db_open@o@: $(srcdir)/db/db_open.c
+ $(CC) $(CFLAGS) $?
+db_overflow@o@: $(srcdir)/db/db_overflow.c
+ $(CC) $(CFLAGS) $?
+db_pr@o@: $(srcdir)/db/db_pr.c
+ $(CC) $(CFLAGS) $?
+db_rec@o@: $(srcdir)/db/db_rec.c
+ $(CC) $(CFLAGS) $?
+db_reclaim@o@: $(srcdir)/db/db_reclaim.c
+ $(CC) $(CFLAGS) $?
+db_rename@o@: $(srcdir)/db/db_rename.c
+ $(CC) $(CFLAGS) $?
+db_remove@o@: $(srcdir)/db/db_remove.c
+ $(CC) $(CFLAGS) $?
+db_ret@o@: $(srcdir)/db/db_ret.c
+ $(CC) $(CFLAGS) $?
+db_salloc@o@: $(srcdir)/env/db_salloc.c
+ $(CC) $(CFLAGS) $?
+db_shash@o@: $(srcdir)/env/db_shash.c
+ $(CC) $(CFLAGS) $?
+db_truncate@o@: $(srcdir)/db/db_truncate.c
+ $(CC) $(CFLAGS) $?
+db_upg@o@: $(srcdir)/db/db_upg.c
+ $(CC) $(CFLAGS) $?
+db_upg_opd@o@: $(srcdir)/db/db_upg_opd.c
+ $(CC) $(CFLAGS) $?
+db_vrfy@o@: $(srcdir)/db/db_vrfy.c
+ $(CC) $(CFLAGS) $?
+db_vrfyutil@o@: $(srcdir)/db/db_vrfyutil.c
+ $(CC) $(CFLAGS) $?
+dbm@o@: $(srcdir)/dbm/dbm.c
+ $(CC) $(CFLAGS) $?
+dbreg@o@: $(srcdir)/dbreg/dbreg.c
+ $(CC) $(CFLAGS) $?
+dbreg_auto@o@: $(srcdir)/dbreg/dbreg_auto.c
+ $(CC) $(CFLAGS) $?
+dbreg_rec@o@: $(srcdir)/dbreg/dbreg_rec.c
+ $(CC) $(CFLAGS) $?
+dbreg_util@o@: $(srcdir)/dbreg/dbreg_util.c
+ $(CC) $(CFLAGS) $?
+env_file@o@: $(srcdir)/env/env_file.c
+ $(CC) $(CFLAGS) $?
+env_method@o@: $(srcdir)/env/env_method.c
+ $(CC) $(CFLAGS) $?
+env_open@o@: $(srcdir)/env/env_open.c
+ $(CC) $(CFLAGS) $?
+env_recover@o@: $(srcdir)/env/env_recover.c
+ $(CC) $(CFLAGS) $?
+env_region@o@: $(srcdir)/env/env_region.c
+ $(CC) $(CFLAGS) $?
+fileops_auto@o@: $(srcdir)/fileops/fileops_auto.c
+ $(CC) $(CFLAGS) $?
+fop_basic@o@: $(srcdir)/fileops/fop_basic.c
+ $(CC) $(CFLAGS) $?
+fop_rec@o@: $(srcdir)/fileops/fop_rec.c
+ $(CC) $(CFLAGS) $?
+fop_util@o@: $(srcdir)/fileops/fop_util.c
+ $(CC) $(CFLAGS) $?
+hash@o@: $(srcdir)/hash/hash.c
+ $(CC) $(CFLAGS) $?
+hash_auto@o@: $(srcdir)/hash/hash_auto.c
+ $(CC) $(CFLAGS) $?
+hash_conv@o@: $(srcdir)/hash/hash_conv.c
+ $(CC) $(CFLAGS) $?
+hash_dup@o@: $(srcdir)/hash/hash_dup.c
+ $(CC) $(CFLAGS) $?
+hash_func@o@: $(srcdir)/hash/hash_func.c
+ $(CC) $(CFLAGS) $?
+hash_meta@o@: $(srcdir)/hash/hash_meta.c
+ $(CC) $(CFLAGS) $?
+hash_method@o@: $(srcdir)/hash/hash_method.c
+ $(CC) $(CFLAGS) $?
+hash_open@o@: $(srcdir)/hash/hash_open.c
+ $(CC) $(CFLAGS) $?
+hash_page@o@: $(srcdir)/hash/hash_page.c
+ $(CC) $(CFLAGS) $?
+hash_rec@o@: $(srcdir)/hash/hash_rec.c
+ $(CC) $(CFLAGS) $?
+hash_reclaim@o@: $(srcdir)/hash/hash_reclaim.c
+ $(CC) $(CFLAGS) $?
+hash_stat@o@: $(srcdir)/hash/hash_stat.c
+ $(CC) $(CFLAGS) $?
+hash_upgrade@o@: $(srcdir)/hash/hash_upgrade.c
+ $(CC) $(CFLAGS) $?
+hash_verify@o@: $(srcdir)/hash/hash_verify.c
+ $(CC) $(CFLAGS) $?
+hmac@o@: $(srcdir)/hmac/hmac.c
+ $(CC) $(CFLAGS) $?
+hsearch@o@: $(srcdir)/hsearch/hsearch.c
+ $(CC) $(CFLAGS) $?
+lock@o@: $(srcdir)/lock/lock.c
+ $(CC) $(CFLAGS) $?
+lock_deadlock@o@:$(srcdir)/lock/lock_deadlock.c
+ $(CC) $(CFLAGS) $?
+lock_method@o@:$(srcdir)/lock/lock_method.c
+ $(CC) $(CFLAGS) $?
+lock_region@o@:$(srcdir)/lock/lock_region.c
+ $(CC) $(CFLAGS) $?
+lock_stat@o@:$(srcdir)/lock/lock_stat.c
+ $(CC) $(CFLAGS) $?
+lock_util@o@:$(srcdir)/lock/lock_util.c
+ $(CC) $(CFLAGS) $?
+log@o@: $(srcdir)/log/log.c
+ $(CC) $(CFLAGS) $?
+log_archive@o@: $(srcdir)/log/log_archive.c
+ $(CC) $(CFLAGS) $?
+log_compare@o@: $(srcdir)/log/log_compare.c
+ $(CC) $(CFLAGS) $?
+log_get@o@: $(srcdir)/log/log_get.c
+ $(CC) $(CFLAGS) $?
+log_method@o@: $(srcdir)/log/log_method.c
+ $(CC) $(CFLAGS) $?
+log_put@o@: $(srcdir)/log/log_put.c
+ $(CC) $(CFLAGS) $?
+mp_alloc@o@: $(srcdir)/mp/mp_alloc.c
+ $(CC) $(CFLAGS) $?
+mp_bh@o@: $(srcdir)/mp/mp_bh.c
+ $(CC) $(CFLAGS) $?
+mp_fget@o@: $(srcdir)/mp/mp_fget.c
+ $(CC) $(CFLAGS) $?
+mp_fopen@o@: $(srcdir)/mp/mp_fopen.c
+ $(CC) $(CFLAGS) $?
+mp_fput@o@: $(srcdir)/mp/mp_fput.c
+ $(CC) $(CFLAGS) $?
+mp_fset@o@: $(srcdir)/mp/mp_fset.c
+ $(CC) $(CFLAGS) $?
+mp_method@o@: $(srcdir)/mp/mp_method.c
+ $(CC) $(CFLAGS) $?
+mp_region@o@: $(srcdir)/mp/mp_region.c
+ $(CC) $(CFLAGS) $?
+mp_register@o@: $(srcdir)/mp/mp_register.c
+ $(CC) $(CFLAGS) $?
+mp_stat@o@: $(srcdir)/mp/mp_stat.c
+ $(CC) $(CFLAGS) $?
+mp_sync@o@: $(srcdir)/mp/mp_sync.c
+ $(CC) $(CFLAGS) $?
+mp_trickle@o@: $(srcdir)/mp/mp_trickle.c
+ $(CC) $(CFLAGS) $?
+mt19937db@o@: $(srcdir)/crypto/mersenne/mt19937db.c
+ $(CC) $(CFLAGS) $?
+mut_fcntl@o@: $(srcdir)/mutex/mut_fcntl.c
+ $(CC) $(CFLAGS) $?
+mut_pthread@o@: $(srcdir)/mutex/mut_pthread.c
+ $(CC) $(CFLAGS) $?
+mut_tas@o@: $(srcdir)/mutex/mut_tas.c
+ $(CC) $(CFLAGS) $?
+mutex@o@: $(srcdir)/mutex/mutex.c
+ $(CC) $(CFLAGS) $?
+os_abs@o@: $(srcdir)/os@MAKEFILE_MAYBE_WIN32@/os_abs.c
+ $(CC) $(CFLAGS) $?
+os_alloc@o@: $(srcdir)/os/os_alloc.c
+ $(CC) $(CFLAGS) $?
+os_clock@o@: $(srcdir)/os@MAKEFILE_MAYBE_WIN32@/os_clock.c
+ $(CC) $(CFLAGS) $?
+os_config@o@: $(srcdir)/os@MAKEFILE_MAYBE_WIN32@/os_config.c
+ $(CC) $(CFLAGS) $?
+os_dir@o@: $(srcdir)/os@MAKEFILE_MAYBE_WIN32@/os_dir.c
+ $(CC) $(CFLAGS) $?
+os_errno@o@: $(srcdir)/os@MAKEFILE_MAYBE_WIN32@/os_errno.c
+ $(CC) $(CFLAGS) $?
+os_fid@o@: $(srcdir)/os@MAKEFILE_MAYBE_WIN32@/os_fid.c
+ $(CC) $(CFLAGS) $?
+os_fsync@o@: $(srcdir)/os@MAKEFILE_MAYBE_WIN32@/os_fsync.c
+ $(CC) $(CFLAGS) $?
+os_id@o@: $(srcdir)/os/os_id.c
+ $(CC) $(CFLAGS) $?
+os_handle@o@: $(srcdir)/os@MAKEFILE_MAYBE_WIN32@/os_handle.c
+ $(CC) $(CFLAGS) $?
+os_map@o@: $(srcdir)/os@MAKEFILE_MAYBE_WIN32@/os_map.c
+ $(CC) $(CFLAGS) $?
+os_method@o@: $(srcdir)/os/os_method.c
+ $(CC) $(CFLAGS) $?
+os_oflags@o@: $(srcdir)/os/os_oflags.c
+ $(CC) $(CFLAGS) $?
+os_open@o@: $(srcdir)/os@MAKEFILE_MAYBE_WIN32@/os_open.c
+ $(CC) $(CFLAGS) $?
+os_region@o@: $(srcdir)/os/os_region.c
+ $(CC) $(CFLAGS) $?
+os_rename@o@: $(srcdir)/os@MAKEFILE_MAYBE_WIN32@/os_rename.c
+ $(CC) $(CFLAGS) $?
+os_root@o@: $(srcdir)/os/os_root.c
+ $(CC) $(CFLAGS) $?
+os_rpath@o@: $(srcdir)/os/os_rpath.c
+ $(CC) $(CFLAGS) $?
+os_rw@o@: $(srcdir)/os@MAKEFILE_MAYBE_WIN32@/os_rw.c
+ $(CC) $(CFLAGS) $?
+os_seek@o@: $(srcdir)/os@MAKEFILE_MAYBE_WIN32@/os_seek.c
+ $(CC) $(CFLAGS) $?
+os_sleep@o@: $(srcdir)/os@MAKEFILE_MAYBE_WIN32@/os_sleep.c
+ $(CC) $(CFLAGS) $?
+os_spin@o@: $(srcdir)/os@MAKEFILE_MAYBE_WIN32@/os_spin.c
+ $(CC) $(CFLAGS) $?
+os_stat@o@: $(srcdir)/os@MAKEFILE_MAYBE_WIN32@/os_stat.c
+ $(CC) $(CFLAGS) $?
+os_tmpdir@o@: $(srcdir)/os/os_tmpdir.c
+ $(CC) $(CFLAGS) $?
+os_type@o@: $(srcdir)/os@MAKEFILE_MAYBE_WIN32@/os_type.c
+ $(CC) $(CFLAGS) $?
+os_unlink@o@: $(srcdir)/os/os_unlink.c
+ $(CC) $(CFLAGS) $?
+qam@o@: $(srcdir)/qam/qam.c
+ $(CC) $(CFLAGS) $?
+qam_auto@o@: $(srcdir)/qam/qam_auto.c
+ $(CC) $(CFLAGS) $?
+qam_conv@o@: $(srcdir)/qam/qam_conv.c
+ $(CC) $(CFLAGS) $?
+qam_files@o@: $(srcdir)/qam/qam_files.c
+ $(CC) $(CFLAGS) $?
+qam_method@o@: $(srcdir)/qam/qam_method.c
+ $(CC) $(CFLAGS) $?
+qam_open@o@: $(srcdir)/qam/qam_open.c
+ $(CC) $(CFLAGS) $?
+qam_rec@o@: $(srcdir)/qam/qam_rec.c
+ $(CC) $(CFLAGS) $?
+qam_stat@o@: $(srcdir)/qam/qam_stat.c
+ $(CC) $(CFLAGS) $?
+qam_upgrade@o@: $(srcdir)/qam/qam_upgrade.c
+ $(CC) $(CFLAGS) $?
+qam_verify@o@: $(srcdir)/qam/qam_verify.c
+ $(CC) $(CFLAGS) $?
+rep_method@o@: $(srcdir)/rep/rep_method.c
+ $(CC) $(CFLAGS) $?
+rep_record@o@: $(srcdir)/rep/rep_record.c
+ $(CC) $(CFLAGS) $?
+rep_region@o@: $(srcdir)/rep/rep_region.c
+ $(CC) $(CFLAGS) $?
+rep_util@o@: $(srcdir)/rep/rep_util.c
+ $(CC) $(CFLAGS) $?
+rijndael-alg-fst@o@: $(srcdir)/crypto/rijndael/rijndael-alg-fst.c
+ $(CC) $(CFLAGS) $?
+rijndael-api-fst@o@: $(srcdir)/crypto/rijndael/rijndael-api-fst.c
+ $(CC) $(CFLAGS) $?
+sha1@o@: $(srcdir)/hmac/sha1.c
+ $(CC) $(CFLAGS) $?
+txn@o@: $(srcdir)/txn/txn.c
+ $(CC) $(CFLAGS) $?
+txn_auto@o@: $(srcdir)/txn/txn_auto.c
+ $(CC) $(CFLAGS) $?
+txn_method@o@: $(srcdir)/txn/txn_method.c
+ $(CC) $(CFLAGS) $?
+txn_rec@o@: $(srcdir)/txn/txn_rec.c
+ $(CC) $(CFLAGS) $?
+txn_recover@o@: $(srcdir)/txn/txn_recover.c
+ $(CC) $(CFLAGS) $?
+txn_region@o@: $(srcdir)/txn/txn_region.c
+ $(CC) $(CFLAGS) $?
+txn_stat@o@: $(srcdir)/txn/txn_stat.c
+ $(CC) $(CFLAGS) $?
+txn_util@o@: $(srcdir)/txn/txn_util.c
+ $(CC) $(CFLAGS) $?
+util_cache@o@: $(srcdir)/common/util_cache.c
+ $(CC) $(CFLAGS) $?
+util_log@o@: $(srcdir)/common/util_log.c
+ $(CC) $(CFLAGS) $?
+util_sig@o@: $(srcdir)/common/util_sig.c
+ $(CC) $(CFLAGS) $?
+uts4_cc@o@: $(srcdir)/mutex/uts4_cc.s
+ $(AS) $(ASFLAGS) -o $@ $?
+xa@o@: $(srcdir)/xa/xa.c
+ $(CC) $(CFLAGS) $?
+xa_db@o@: $(srcdir)/xa/xa_db.c
+ $(CC) $(CFLAGS) $?
+xa_map@o@: $(srcdir)/xa/xa_map.c
+ $(CC) $(CFLAGS) $?
+
+##################################################
+# C++ API build rules.
+##################################################
+cxx_db@o@: $(srcdir)/cxx/cxx_db.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_dbc@o@: $(srcdir)/cxx/cxx_dbc.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_dbt@o@: $(srcdir)/cxx/cxx_dbt.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_env@o@: $(srcdir)/cxx/cxx_env.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_except@o@: $(srcdir)/cxx/cxx_except.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_lock@o@: $(srcdir)/cxx/cxx_lock.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_logc@o@: $(srcdir)/cxx/cxx_logc.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_mpool@o@: $(srcdir)/cxx/cxx_mpool.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_txn@o@: $(srcdir)/cxx/cxx_txn.cpp
+ $(CXX) $(CXXFLAGS) $?
+
+##################################################
+# Java API build rules.
+##################################################
+java_Db@o@::$(srcdir)/libdb_java/java_Db.c
+ $(CC) $(CFLAGS) $?
+java_DbEnv@o@: $(srcdir)/libdb_java/java_DbEnv.c
+ $(CC) $(CFLAGS) $?
+java_DbLock@o@: $(srcdir)/libdb_java/java_DbLock.c
+ $(CC) $(CFLAGS) $?
+java_DbLogc@o@: $(srcdir)/libdb_java/java_DbLogc.c
+ $(CC) $(CFLAGS) $?
+java_DbLsn@o@: $(srcdir)/libdb_java/java_DbLsn.c
+ $(CC) $(CFLAGS) $?
+java_DbTxn@o@: $(srcdir)/libdb_java/java_DbTxn.c
+ $(CC) $(CFLAGS) $?
+java_DbUtil@o@: $(srcdir)/libdb_java/java_DbUtil.c
+ $(CC) $(CFLAGS) $?
+java_Dbc@o@: $(srcdir)/libdb_java/java_Dbc.c
+ $(CC) $(CFLAGS) $?
+java_Dbt@o@: $(srcdir)/libdb_java/java_Dbt.c
+ $(CC) $(CFLAGS) $?
+java_info@o@: $(srcdir)/libdb_java/java_info.c
+ $(CC) $(CFLAGS) $?
+java_locked@o@: $(srcdir)/libdb_java/java_locked.c
+ $(CC) $(CFLAGS) $?
+java_util@o@: $(srcdir)/libdb_java/java_util.c
+ $(CC) $(CFLAGS) $?
+java_stat_auto@o@: $(srcdir)/libdb_java/java_stat_auto.c
+ $(CC) $(CFLAGS) $?
+
+##################################################
+# Tcl API build rules.
+##################################################
+tcl_compat@o@: $(srcdir)/tcl/tcl_compat.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_db@o@: $(srcdir)/tcl/tcl_db.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_db_pkg@o@: $(srcdir)/tcl/tcl_db_pkg.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_dbcursor@o@: $(srcdir)/tcl/tcl_dbcursor.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_env@o@: $(srcdir)/tcl/tcl_env.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_internal@o@: $(srcdir)/tcl/tcl_internal.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_lock@o@: $(srcdir)/tcl/tcl_lock.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_log@o@: $(srcdir)/tcl/tcl_log.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_mp@o@: $(srcdir)/tcl/tcl_mp.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_rep@o@: $(srcdir)/tcl/tcl_rep.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_txn@o@: $(srcdir)/tcl/tcl_txn.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_util@o@: $(srcdir)/tcl/tcl_util.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+
+##################################################
+# RPC build rules.
+##################################################
+# RPC client files
+client@o@: $(srcdir)/rpc_client/client.c
+ $(CC) $(CFLAGS) $?
+db_server_clnt@o@: $(srcdir)/rpc_client/db_server_clnt.c
+ $(CC) $(CFLAGS) $?
+gen_client@o@: $(srcdir)/rpc_client/gen_client.c
+ $(CC) $(CFLAGS) $?
+gen_client_ret@o@: $(srcdir)/rpc_client/gen_client_ret.c
+ $(CC) $(CFLAGS) $?
+
+# RPC server files
+db_server_proc@o@: $(srcdir)/rpc_server/c/db_server_proc.c
+ $(CC) $(CFLAGS) $?
+db_server_svc@o@: $(srcdir)/rpc_server/c/db_server_svc.c
+ $(CC) $(CFLAGS) $?
+db_server_util@o@: $(srcdir)/rpc_server/c/db_server_util.c
+ $(CC) $(CFLAGS) $?
+db_server_xdr@o@: $(srcdir)/rpc_server/c/db_server_xdr.c
+ $(CC) $(CFLAGS) $?
+gen_db_server@o@: $(srcdir)/rpc_server/c/gen_db_server.c
+ $(CC) $(CFLAGS) $?
+db_server_cxxproc@o@: $(srcdir)/rpc_server/cxx/db_server_cxxproc.cpp
+ $(CXX) $(CXXFLAGS) $?
+db_server_cxxutil@o@: $(srcdir)/rpc_server/cxx/db_server_cxxutil.cpp
+ $(CXX) $(CXXFLAGS) $?
+
+##################################################
+# Utility build rules.
+##################################################
+db_archive@o@: $(srcdir)/db_archive/db_archive.c
+ $(CC) $(CFLAGS) $?
+db_checkpoint@o@: $(srcdir)/db_checkpoint/db_checkpoint.c
+ $(CC) $(CFLAGS) $?
+db_deadlock@o@: $(srcdir)/db_deadlock/db_deadlock.c
+ $(CC) $(CFLAGS) $?
+db_dump@o@: $(srcdir)/db_dump/db_dump.c
+ $(CC) $(CFLAGS) $?
+db_dump185@o@: $(srcdir)/db_dump185/db_dump185.c
+ $(CC) $(DB185INC) $?
+db_load@o@: $(srcdir)/db_load/db_load.c
+ $(CC) $(CFLAGS) $?
+db_printlog@o@: $(srcdir)/db_printlog/db_printlog.c
+ $(CC) $(CFLAGS) $?
+db_recover@o@: $(srcdir)/db_recover/db_recover.c
+ $(CC) $(CFLAGS) $?
+db_stat@o@: $(srcdir)/db_stat/db_stat.c
+ $(CC) $(CFLAGS) $?
+db_upgrade@o@: $(srcdir)/db_upgrade/db_upgrade.c
+ $(CC) $(CFLAGS) $?
+db_verify@o@: $(srcdir)/db_verify/db_verify.c
+ $(CC) $(CFLAGS) $?
+
+##################################################
+# C library replacement files.
+##################################################
+getcwd@o@: $(srcdir)/clib/getcwd.c
+ $(CC) $(CFLAGS) $?
+getopt@o@: $(srcdir)/clib/getopt.c
+ $(CC) $(CFLAGS) $?
+memcmp@o@: $(srcdir)/clib/memcmp.c
+ $(CC) $(CFLAGS) $?
+memcpy@o@: $(srcdir)/clib/memmove.c
+ $(CC) -DMEMCOPY $(CFLAGS) $? -o $@
+memmove@o@: $(srcdir)/clib/memmove.c
+ $(CC) -DMEMMOVE $(CFLAGS) $?
+raise@o@: $(srcdir)/clib/raise.c
+ $(CC) $(CFLAGS) $?
+strcasecmp@o@: $(srcdir)/clib/strcasecmp.c
+ $(CC) $(CFLAGS) $?
+strdup@o@: $(srcdir)/clib/strdup.c
+ $(CC) $(CFLAGS) $?
+snprintf@o@: $(srcdir)/clib/snprintf.c
+ $(CC) $(CFLAGS) $?
+strerror@o@: $(srcdir)/clib/strerror.c
+ $(CC) $(CFLAGS) $?
+vsnprintf@o@: $(srcdir)/clib/vsnprintf.c
+ $(CC) $(CFLAGS) $?
diff --git a/libdb/dist/NO-AUTO-GEN b/libdb/dist/NO-AUTO-GEN
new file mode 100644
index 0000000..e69de29
diff --git a/libdb/dist/RELEASE b/libdb/dist/RELEASE
new file mode 100644
index 0000000..b091172
--- /dev/null
+++ b/libdb/dist/RELEASE
@@ -0,0 +1,11 @@
+# $Id$
+
+DB_VERSION_MAJOR=4
+DB_VERSION_MINOR=1
+DB_VERSION_PATCH=25
+DB_VERSION="$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH"
+
+DB_VERSION_UNIQUE_NAME=`printf "_%d%03d" $DB_VERSION_MAJOR $DB_VERSION_MINOR`
+
+DB_RELEASE_DATE=`date "+%B %e, %Y"`
+DB_VERSION_STRING="Sleepycat Software: Berkeley DB $DB_VERSION: ($DB_RELEASE_DATE)"
diff --git a/libdb/dist/aclocal/config.ac b/libdb/dist/aclocal/config.ac
new file mode 100644
index 0000000..cd28842
--- /dev/null
+++ b/libdb/dist/aclocal/config.ac
@@ -0,0 +1,51 @@
+# Features we don't test for, but want the #defines to exist for
+# other ports.
+AH_TEMPLATE(HAVE_VXWORKS, [Define to 1 if building VxWorks.])
+
+AH_TEMPLATE(HAVE_FILESYSTEM_NOTZERO,
+ [Define to 1 if allocated filesystem blocks are not zeroed.])
+
+AH_TEMPLATE(HAVE_UNLINK_WITH_OPEN_FAILURE,
+ [Define to 1 if unlink of file with open file descriptors will fail.])
+
+AH_BOTTOM([/*
+ * Exit success/failure macros.
+ */
+#ifndef HAVE_EXIT_SUCCESS
+#define EXIT_FAILURE 1
+#define EXIT_SUCCESS 0
+#endif
+
+/*
+ * Don't step on the namespace. Other libraries may have their own
+ * implementations of these functions, we don't want to use their
+ * implementations or force them to use ours based on the load order.
+ */
+#ifndef HAVE_GETCWD
+#define getcwd __db_Cgetcwd
+#endif
+#ifndef HAVE_MEMCMP
+#define memcmp __db_Cmemcmp
+#endif
+#ifndef HAVE_MEMCPY
+#define memcpy __db_Cmemcpy
+#endif
+#ifndef HAVE_MEMMOVE
+#define memmove __db_Cmemmove
+#endif
+#ifndef HAVE_RAISE
+#define raise __db_Craise
+#endif
+#ifndef HAVE_SNPRINTF
+#define snprintf __db_Csnprintf
+#endif
+#ifndef HAVE_STRCASECMP
+#define strcasecmp __db_Cstrcasecmp
+#define strncasecmp __db_Cstrncasecmp
+#endif
+#ifndef HAVE_STRERROR
+#define strerror __db_Cstrerror
+#endif
+#ifndef HAVE_VSNPRINTF
+#define vsnprintf __db_Cvsnprintf
+#endif])
diff --git a/libdb/dist/aclocal/cxx.ac b/libdb/dist/aclocal/cxx.ac
new file mode 100644
index 0000000..49103cc
--- /dev/null
+++ b/libdb/dist/aclocal/cxx.ac
@@ -0,0 +1,17 @@
+# C++ checks to determine what style of headers to use and
+# whether to use "using" clauses.
+
+AC_DEFUN(AC_CXX_HAVE_STDHEADERS, [
+AC_SUBST(cxx_have_stdheaders)
+AC_CACHE_CHECK([whether C++ supports the ISO C++ standard includes],
+db_cv_cxx_have_stdheaders,
+[AC_LANG_SAVE
+ AC_LANG_CPLUSPLUS
+ AC_TRY_COMPILE([#include <iostream>
+],[std::ostream *o; return 0;],
+ db_cv_cxx_have_stdheaders=yes, db_cv_cxx_have_stdheaders=no)
+ AC_LANG_RESTORE
+])
+if test "$db_cv_cxx_have_stdheaders" = yes; then
+ cxx_have_stdheaders="#define HAVE_CXX_STDHEADERS 1"
+fi])
diff --git a/libdb/dist/aclocal/gcc.ac b/libdb/dist/aclocal/gcc.ac
new file mode 100644
index 0000000..0949d98
--- /dev/null
+++ b/libdb/dist/aclocal/gcc.ac
@@ -0,0 +1,36 @@
+# Version 2.96 of gcc (shipped with RedHat Linux 7.[01] and Mandrake) had
+# serious problems.
+AC_DEFUN(AC_GCC_CONFIG1, [
+AC_CACHE_CHECK([whether we are using gcc version 2.96],
+db_cv_gcc_2_96, [
+db_cv_gcc_2_96=no
+if test "$GCC" = "yes"; then
+ GCC_VERSION=`${MAKEFILE_CC} --version`
+ case ${GCC_VERSION} in
+ 2.96*)
+ db_cv_gcc_2_96=yes;;
+ esac
+fi])
+if test "$db_cv_gcc_2_96" = "yes"; then
+ CFLAGS=`echo "$CFLAGS" | sed 's/-O2/-O/'`
+ CXXFLAGS=`echo "$CXXFLAGS" | sed 's/-O2/-O/'`
+ AC_MSG_WARN([INSTALLED GCC COMPILER HAS SERIOUS BUGS; PLEASE UPGRADE.])
+ AC_MSG_WARN([GCC OPTIMIZATION LEVEL SET TO -O.])
+fi])
+
+# Versions of g++ up to 2.8.0 required -fhandle-exceptions, but it is
+# renamed as -fexceptions and is the default in versions 2.8.0 and after.
+AC_DEFUN(AC_GCC_CONFIG2, [
+AC_CACHE_CHECK([whether g++ requires -fhandle-exceptions],
+db_cv_gxx_except, [
+db_cv_gxx_except=no;
+if test "$GXX" = "yes"; then
+ GXX_VERSION=`${MAKEFILE_CXX} --version`
+ case ${GXX_VERSION} in
+ 1.*|2.[[01234567]].*|*-1.*|*-2.[[01234567]].*)
+ db_cv_gxx_except=yes;;
+ esac
+fi])
+if test "$db_cv_gxx_except" = "yes"; then
+ CXXFLAGS="$CXXFLAGS -fhandle-exceptions"
+fi])
diff --git a/libdb/dist/aclocal/libtool.ac b/libdb/dist/aclocal/libtool.ac
new file mode 100644
index 0000000..7522fa0
--- /dev/null
+++ b/libdb/dist/aclocal/libtool.ac
@@ -0,0 +1,3633 @@
+# libtool.m4 - Configure libtool for the host system. -*-Shell-script-*-
+## Copyright 1996, 1997, 1998, 1999, 2000, 2001
+## Free Software Foundation, Inc.
+## Originally by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful, but
+## WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+## General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, write to the Free Software
+## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+##
+## As a special exception to the GNU General Public License, if you
+## distribute this file as part of a program that contains a
+## configuration script generated by Autoconf, you may include it under
+## the same distribution terms that you use for the rest of that program.
+
+# serial 46 AC_PROG_LIBTOOL
+
+AC_DEFUN([AC_PROG_LIBTOOL],
+[AC_REQUIRE([AC_LIBTOOL_SETUP])dnl
+
+# This can be used to rebuild libtool when needed
+LIBTOOL_DEPS="$ac_aux_dir/ltmain.sh"
+
+# Always use our own libtool.
+LIBTOOL='$(SHELL) $(top_builddir)/libtool'
+AC_SUBST(LIBTOOL)dnl
+
+# Prevent multiple expansion
+define([AC_PROG_LIBTOOL], [])
+])
+
+AC_DEFUN([AC_LIBTOOL_SETUP],
+[AC_PREREQ(2.13)dnl
+AC_REQUIRE([AC_ENABLE_SHARED])dnl
+AC_REQUIRE([AC_ENABLE_STATIC])dnl
+AC_REQUIRE([AC_ENABLE_FAST_INSTALL])dnl
+AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([AC_PROG_LD])dnl
+AC_REQUIRE([AC_PROG_LD_RELOAD_FLAG])dnl
+AC_REQUIRE([AC_PROG_NM])dnl
+AC_REQUIRE([AC_PROG_LN_S])dnl
+AC_REQUIRE([AC_DEPLIBS_CHECK_METHOD])dnl
+AC_REQUIRE([AC_OBJEXT])dnl
+AC_REQUIRE([AC_EXEEXT])dnl
+dnl
+
+_LT_AC_PROG_ECHO_BACKSLASH
+# Only perform the check for file, if the check method requires it
+case $deplibs_check_method in
+file_magic*)
+ if test "$file_magic_cmd" = '$MAGIC_CMD'; then
+ AC_PATH_MAGIC
+ fi
+ ;;
+esac
+
+AC_CHECK_TOOL(RANLIB, ranlib, :)
+AC_CHECK_TOOL(STRIP, strip, :)
+
+ifdef([AC_PROVIDE_AC_LIBTOOL_DLOPEN], enable_dlopen=yes, enable_dlopen=no)
+ifdef([AC_PROVIDE_AC_LIBTOOL_WIN32_DLL],
+enable_win32_dll=yes, enable_win32_dll=no)
+
+AC_ARG_ENABLE(libtool-lock,
+ [ --disable-libtool-lock avoid locking (might break parallel builds)])
+test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes
+
+# Some flags need to be propagated to the compiler or linker for good
+# libtool support.
+case $host in
+*-*-irix6*)
+ # Find out which ABI we are using.
+ echo '[#]line __oline__ "configure"' > conftest.$ac_ext
+ if AC_TRY_EVAL(ac_compile); then
+ case `/usr/bin/file conftest.$ac_objext` in
+ *32-bit*)
+ LD="${LD-ld} -32"
+ ;;
+ *N32*)
+ LD="${LD-ld} -n32"
+ ;;
+ *64-bit*)
+ LD="${LD-ld} -64"
+ ;;
+ esac
+ fi
+ rm -rf conftest*
+ ;;
+
+*-*-sco3.2v5*)
+ # On SCO OpenServer 5, we need -belf to get full-featured binaries.
+ SAVE_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -belf"
+ AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf,
+ [AC_LANG_SAVE
+ AC_LANG_C
+ AC_TRY_LINK([],[],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no])
+ AC_LANG_RESTORE])
+ if test x"$lt_cv_cc_needs_belf" != x"yes"; then
+ # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf
+ CFLAGS="$SAVE_CFLAGS"
+ fi
+ ;;
+
+ifdef([AC_PROVIDE_AC_LIBTOOL_WIN32_DLL],
+[*-*-cygwin* | *-*-mingw* | *-*-pw32*)
+ AC_CHECK_TOOL(DLLTOOL, dlltool, false)
+ AC_CHECK_TOOL(AS, as, false)
+ AC_CHECK_TOOL(OBJDUMP, objdump, false)
+
+ # recent cygwin and mingw systems supply a stub DllMain which the user
+ # can override, but on older systems we have to supply one
+ AC_CACHE_CHECK([if libtool should supply DllMain function], lt_cv_need_dllmain,
+ [AC_TRY_LINK([],
+ [extern int __attribute__((__stdcall__)) DllMain(void*, int, void*);
+ DllMain (0, 0, 0);],
+ [lt_cv_need_dllmain=no],[lt_cv_need_dllmain=yes])])
+
+ case $host/$CC in
+ *-*-cygwin*/gcc*-mno-cygwin*|*-*-mingw*)
+ # old mingw systems require "-dll" to link a DLL, while more recent ones
+ # require "-mdll"
+ SAVE_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -mdll"
+ AC_CACHE_CHECK([how to link DLLs], lt_cv_cc_dll_switch,
+ [AC_TRY_LINK([], [], [lt_cv_cc_dll_switch=-mdll],[lt_cv_cc_dll_switch=-dll])])
+ CFLAGS="$SAVE_CFLAGS" ;;
+ *-*-cygwin* | *-*-pw32*)
+ # cygwin systems need to pass --dll to the linker, and not link
+ # crt.o which will require a WinMain@16 definition.
+ lt_cv_cc_dll_switch="-Wl,--dll -nostartfiles" ;;
+ esac
+ ;;
+ ])
+esac
+
+_LT_AC_LTCONFIG_HACK
+
+])
+
+# AC_LIBTOOL_HEADER_ASSERT
+# ------------------------
+AC_DEFUN([AC_LIBTOOL_HEADER_ASSERT],
+[AC_CACHE_CHECK([whether $CC supports assert without backlinking],
+ [lt_cv_func_assert_works],
+ [case $host in
+ *-*-solaris*)
+ if test "$GCC" = yes && test "$with_gnu_ld" != yes; then
+ case `$CC --version 2>/dev/null` in
+ [[12]].*) lt_cv_func_assert_works=no ;;
+ *) lt_cv_func_assert_works=yes ;;
+ esac
+ fi
+ ;;
+ esac])
+
+if test "x$lt_cv_func_assert_works" = xyes; then
+ AC_CHECK_HEADERS(assert.h)
+fi
+])# AC_LIBTOOL_HEADER_ASSERT
+
+# _LT_AC_CHECK_DLFCN
+# --------------------
+AC_DEFUN([_LT_AC_CHECK_DLFCN],
+[AC_CHECK_HEADERS(dlfcn.h)
+])# _LT_AC_CHECK_DLFCN
+
+# AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE
+# ---------------------------------
+AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE],
+[AC_REQUIRE([AC_CANONICAL_HOST])
+AC_REQUIRE([AC_PROG_NM])
+AC_REQUIRE([AC_OBJEXT])
+# Check for command to grab the raw symbol name followed by C symbol from nm.
+AC_MSG_CHECKING([command to parse $NM output])
+AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], [dnl
+
+# These are sane defaults that work on at least a few old systems.
+# [They come from Ultrix. What could be older than Ultrix?!! ;)]
+
+# Character class describing NM global symbol codes.
+symcode='[[BCDEGRST]]'
+
+# Regexp to match symbols that can be accessed directly from C.
+sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)'
+
+# Transform the above into a raw symbol and a C symbol.
+symxfrm='\1 \2\3 \3'
+
+# Transform an extracted symbol line into a proper C declaration
+lt_cv_global_symbol_to_cdecl="sed -n -e 's/^. .* \(.*\)$/extern char \1;/p'"
+
+# Transform an extracted symbol line into symbol name and symbol address
+lt_cv_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'"
+
+# Define system-specific variables.
+case $host_os in
+aix*)
+ symcode='[[BCDT]]'
+ ;;
+cygwin* | mingw* | pw32*)
+ symcode='[[ABCDGISTW]]'
+ ;;
+hpux*) # Its linker distinguishes data from code symbols
+ lt_cv_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern char \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'"
+ lt_cv_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'"
+ ;;
+irix*)
+ symcode='[[BCDEGRST]]'
+ ;;
+solaris* | sysv5*)
+ symcode='[[BDT]]'
+ ;;
+sysv4)
+ symcode='[[DFNSTU]]'
+ ;;
+esac
+
+# Handle CRLF in mingw tool chain
+opt_cr=
+case $host_os in
+mingw*)
+ opt_cr=`echo 'x\{0,1\}' | tr x '\015'` # option cr in regexp
+ ;;
+esac
+
+# If we're using GNU nm, then use its standard symbol codes.
+if $NM -V 2>&1 | egrep '(GNU|with BFD)' > /dev/null; then
+ symcode='[[ABCDGISTW]]'
+fi
+
+# Try without a prefix undercore, then with it.
+for ac_symprfx in "" "_"; do
+
+ # Write the raw and C identifiers.
+lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*\($ac_symprfx\)$sympat$opt_cr$/$symxfrm/p'"
+
+ # Check to see that the pipe works correctly.
+ pipe_works=no
+ rm -f conftest*
+ cat > conftest.$ac_ext <<EOF
+#ifdef __cplusplus
+extern "C" {
+#endif
+char nm_test_var;
+void nm_test_func(){}
+#ifdef __cplusplus
+}
+#endif
+int main(){nm_test_var='a';nm_test_func();return(0);}
+EOF
+
+ if AC_TRY_EVAL(ac_compile); then
+ # Now try to grab the symbols.
+ nlist=conftest.nm
+ if AC_TRY_EVAL(NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist) && test -s "$nlist"; then
+ # Try sorting and uniquifying the output.
+ if sort "$nlist" | uniq > "$nlist"T; then
+ mv -f "$nlist"T "$nlist"
+ else
+ rm -f "$nlist"T
+ fi
+
+ # Make sure that we snagged all the symbols we need.
+ if egrep ' nm_test_var$' "$nlist" >/dev/null; then
+ if egrep ' nm_test_func$' "$nlist" >/dev/null; then
+ cat <<EOF > conftest.$ac_ext
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+EOF
+ # Now generate the symbol file.
+ eval "$lt_cv_global_symbol_to_cdecl"' < "$nlist" >> conftest.$ac_ext'
+
+ cat <<EOF >> conftest.$ac_ext
+#if defined (__STDC__) && __STDC__
+# define lt_ptr void *
+#else
+# define lt_ptr char *
+# define const
+#endif
+
+/* The mapping between symbol names and symbols. */
+const struct {
+ const char *name;
+ lt_ptr address;
+}
+lt_preloaded_symbols[[]] =
+{
+EOF
+ sed "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (lt_ptr) \&\2},/" < "$nlist" >> conftest.$ac_ext
+ cat <<\EOF >> conftest.$ac_ext
+ {0, (lt_ptr) 0}
+};
+
+#ifdef __cplusplus
+}
+#endif
+EOF
+ # Now try linking the two files.
+ mv conftest.$ac_objext conftstm.$ac_objext
+ save_LIBS="$LIBS"
+ save_CFLAGS="$CFLAGS"
+ LIBS="conftstm.$ac_objext"
+ CFLAGS="$CFLAGS$no_builtin_flag"
+ if AC_TRY_EVAL(ac_link) && test -s conftest; then
+ pipe_works=yes
+ fi
+ LIBS="$save_LIBS"
+ CFLAGS="$save_CFLAGS"
+ else
+ echo "cannot find nm_test_func in $nlist" >&AC_FD_CC
+ fi
+ else
+ echo "cannot find nm_test_var in $nlist" >&AC_FD_CC
+ fi
+ else
+ echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AC_FD_CC
+ fi
+ else
+ echo "$progname: failed program was:" >&AC_FD_CC
+ cat conftest.$ac_ext >&5
+ fi
+ rm -f conftest* conftst*
+
+ # Do not use the global_symbol_pipe unless it works.
+ if test "$pipe_works" = yes; then
+ break
+ else
+ lt_cv_sys_global_symbol_pipe=
+ fi
+done
+])
+global_symbol_pipe="$lt_cv_sys_global_symbol_pipe"
+if test -z "$lt_cv_sys_global_symbol_pipe"; then
+ global_symbol_to_cdecl=
+ global_symbol_to_c_name_address=
+else
+ global_symbol_to_cdecl="$lt_cv_global_symbol_to_cdecl"
+ global_symbol_to_c_name_address="$lt_cv_global_symbol_to_c_name_address"
+fi
+if test -z "$global_symbol_pipe$global_symbol_to_cdec$global_symbol_to_c_name_address";
+then
+ AC_MSG_RESULT(failed)
+else
+ AC_MSG_RESULT(ok)
+fi
+]) # AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE
+
+# _LT_AC_LIBTOOL_SYS_PATH_SEPARATOR
+# ---------------------------------
+AC_DEFUN([_LT_AC_LIBTOOL_SYS_PATH_SEPARATOR],
+[# Find the correct PATH separator. Usually this is `:', but
+# DJGPP uses `;' like DOS.
+if test "X${PATH_SEPARATOR+set}" != Xset; then
+ UNAME=${UNAME-`uname 2>/dev/null`}
+ case X$UNAME in
+ *-DOS) lt_cv_sys_path_separator=';' ;;
+ *) lt_cv_sys_path_separator=':' ;;
+ esac
+ PATH_SEPARATOR=$lt_cv_sys_path_separator
+fi
+])# _LT_AC_LIBTOOL_SYS_PATH_SEPARATOR
+
+# _LT_AC_PROG_ECHO_BACKSLASH
+# --------------------------
+# Add some code to the start of the generated configure script which
+# will find an echo command which doesn't interpret backslashes.
+AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH],
+[ifdef([AC_DIVERSION_NOTICE], [AC_DIVERT_PUSH(AC_DIVERSION_NOTICE)],
+ [AC_DIVERT_PUSH(NOTICE)])
+_LT_AC_LIBTOOL_SYS_PATH_SEPARATOR
+
+# Check that we are running under the correct shell.
+SHELL=${CONFIG_SHELL-/bin/sh}
+
+case X$ECHO in
+X*--fallback-echo)
+ # Remove one level of quotation (which was required for Make).
+ ECHO=`echo "$ECHO" | sed 's,\\\\\[$]\\[$]0,'[$]0','`
+ ;;
+esac
+
+echo=${ECHO-echo}
+if test "X[$]1" = X--no-reexec; then
+ # Discard the --no-reexec flag, and continue.
+ shift
+elif test "X[$]1" = X--fallback-echo; then
+ # Avoid inline document here, it may be left over
+ :
+elif test "X`($echo '\t') 2>/dev/null`" = 'X\t'; then
+ # Yippee, $echo works!
+ :
+else
+ # Restart under the correct shell.
+ exec $SHELL "[$]0" --no-reexec ${1+"[$]@"}
+fi
+
+if test "X[$]1" = X--fallback-echo; then
+ # used as fallback echo
+ shift
+ cat <<EOF
+$*
+EOF
+ exit 0
+fi
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+if test "X${CDPATH+set}" = Xset; then CDPATH=:; export CDPATH; fi
+
+if test -z "$ECHO"; then
+if test "X${echo_test_string+set}" != Xset; then
+# find a string as large as possible, as long as the shell can cope with it
+ for cmd in 'sed 50q "[$]0"' 'sed 20q "[$]0"' 'sed 10q "[$]0"' 'sed 2q "[$]0"' 'echo test'; do
+ # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ...
+ if (echo_test_string="`eval $cmd`") 2>/dev/null &&
+ echo_test_string="`eval $cmd`" &&
+ (test "X$echo_test_string" = "X$echo_test_string") 2>/dev/null
+ then
+ break
+ fi
+ done
+fi
+
+if test "X`($echo '\t') 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ :
+else
+ # The Solaris, AIX, and Digital Unix default echo programs unquote
+ # backslashes. This makes it impossible to quote backslashes using
+ # echo "$something" | sed 's/\\/\\\\/g'
+ #
+ # So, first we look for a working echo in the user's PATH.
+
+ IFS="${IFS= }"; save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for dir in $PATH /usr/ucb; do
+ if (test -f $dir/echo || test -f $dir/echo$ac_exeext) &&
+ test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ echo="$dir/echo"
+ break
+ fi
+ done
+ IFS="$save_ifs"
+
+ if test "X$echo" = Xecho; then
+ # We didn't find a better echo, so look for alternatives.
+ if test "X`(print -r '\t') 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`(print -r "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ # This shell has a builtin print -r that does the trick.
+ echo='print -r'
+ elif (test -f /bin/ksh || test -f /bin/ksh$ac_exeext) &&
+ test "X$CONFIG_SHELL" != X/bin/ksh; then
+ # If we have ksh, try running configure again with it.
+ ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh}
+ export ORIGINAL_CONFIG_SHELL
+ CONFIG_SHELL=/bin/ksh
+ export CONFIG_SHELL
+ exec $CONFIG_SHELL "[$]0" --no-reexec ${1+"[$]@"}
+ else
+ # Try using printf.
+ echo='printf %s\n'
+ if test "X`($echo '\t') 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ # Cool, printf works
+ :
+ elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` &&
+ test "X$echo_testing_string" = 'X\t' &&
+ echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL
+ export CONFIG_SHELL
+ SHELL="$CONFIG_SHELL"
+ export SHELL
+ echo="$CONFIG_SHELL [$]0 --fallback-echo"
+ elif echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` &&
+ test "X$echo_testing_string" = 'X\t' &&
+ echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ echo="$CONFIG_SHELL [$]0 --fallback-echo"
+ else
+ # maybe with a smaller string...
+ prev=:
+
+ for cmd in 'echo test' 'sed 2q "[$]0"' 'sed 10q "[$]0"' 'sed 20q "[$]0"' 'sed 50q "[$]0"'; do
+ if (test "X$echo_test_string" = "X`eval $cmd`") 2>/dev/null
+ then
+ break
+ fi
+ prev="$cmd"
+ done
+
+ if test "$prev" != 'sed 50q "[$]0"'; then
+ echo_test_string=`eval $prev`
+ export echo_test_string
+ exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "[$]0" ${1+"[$]@"}
+ else
+ # Oops. We lost completely, so just stick with echo.
+ echo=echo
+ fi
+ fi
+ fi
+ fi
+fi
+fi
+
+# Copy echo and quote the copy suitably for passing to libtool from
+# the Makefile, instead of quoting the original, which is used later.
+ECHO=$echo
+if test "X$ECHO" = "X$CONFIG_SHELL [$]0 --fallback-echo"; then
+ ECHO="$CONFIG_SHELL \\\$\[$]0 --fallback-echo"
+fi
+
+AC_SUBST(ECHO)
+AC_DIVERT_POP
+])# _LT_AC_PROG_ECHO_BACKSLASH
+
+# _LT_AC_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE,
+# ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING)
+# ------------------------------------------------------------------
+AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF],
+[if test "$cross_compiling" = yes; then :
+ [$4]
+else
+ AC_REQUIRE([_LT_AC_CHECK_DLFCN])dnl
+ lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+ lt_status=$lt_dlunknown
+ cat > conftest.$ac_ext <<EOF
+[#line __oline__ "configure"
+#include "confdefs.h"
+
+#if HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+#include <stdio.h>
+
+#ifdef RTLD_GLOBAL
+# define LT_DLGLOBAL RTLD_GLOBAL
+#else
+# ifdef DL_GLOBAL
+# define LT_DLGLOBAL DL_GLOBAL
+# else
+# define LT_DLGLOBAL 0
+# endif
+#endif
+
+/* We may have to define LT_DLLAZY_OR_NOW in the command line if we
+ find out it does not work in some platform. */
+#ifndef LT_DLLAZY_OR_NOW
+# ifdef RTLD_LAZY
+# define LT_DLLAZY_OR_NOW RTLD_LAZY
+# else
+# ifdef DL_LAZY
+# define LT_DLLAZY_OR_NOW DL_LAZY
+# else
+# ifdef RTLD_NOW
+# define LT_DLLAZY_OR_NOW RTLD_NOW
+# else
+# ifdef DL_NOW
+# define LT_DLLAZY_OR_NOW DL_NOW
+# else
+# define LT_DLLAZY_OR_NOW 0
+# endif
+# endif
+# endif
+# endif
+#endif
+
+#ifdef __cplusplus
+extern "C" void exit (int);
+#endif
+
+void fnord() { int i=42;}
+int main ()
+{
+ void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
+ int status = $lt_dlunknown;
+
+ if (self)
+ {
+ if (dlsym (self,"fnord")) status = $lt_dlno_uscore;
+ else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore;
+ /* dlclose (self); */
+ }
+
+ exit (status);
+}]
+EOF
+ if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then
+ (./conftest; exit; ) 2>/dev/null
+ lt_status=$?
+ case x$lt_status in
+ x$lt_dlno_uscore) $1 ;;
+ x$lt_dlneed_uscore) $2 ;;
+ x$lt_unknown|x*) $3 ;;
+ esac
+ else :
+ # compilation failed
+ $3
+ fi
+fi
+rm -fr conftest*
+])# _LT_AC_TRY_DLOPEN_SELF
+
+# AC_LIBTOOL_DLOPEN_SELF
+# -------------------
+AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF],
+[if test "x$enable_dlopen" != xyes; then
+ enable_dlopen=unknown
+ enable_dlopen_self=unknown
+ enable_dlopen_self_static=unknown
+else
+ lt_cv_dlopen=no
+ lt_cv_dlopen_libs=
+
+ case $host_os in
+ beos*)
+ lt_cv_dlopen="load_add_on"
+ lt_cv_dlopen_libs=
+ lt_cv_dlopen_self=yes
+ ;;
+
+ cygwin* | mingw* | pw32*)
+ lt_cv_dlopen="LoadLibrary"
+ lt_cv_dlopen_libs=
+ ;;
+
+ *)
+ AC_CHECK_FUNC([shl_load],
+ [lt_cv_dlopen="shl_load"],
+ [AC_CHECK_LIB([dld], [shl_load],
+ [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-dld"],
+ [AC_CHECK_FUNC([dlopen],
+ [lt_cv_dlopen="dlopen"],
+ [AC_CHECK_LIB([dl], [dlopen],
+ [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],
+ [AC_CHECK_LIB([svld], [dlopen],
+ [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"],
+ [AC_CHECK_LIB([dld], [dld_link],
+ [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-dld"])
+ ])
+ ])
+ ])
+ ])
+ ])
+ ;;
+ esac
+
+ if test "x$lt_cv_dlopen" != xno; then
+ enable_dlopen=yes
+ else
+ enable_dlopen=no
+ fi
+
+ case $lt_cv_dlopen in
+ dlopen)
+ save_CPPFLAGS="$CPPFLAGS"
+ AC_REQUIRE([_LT_AC_CHECK_DLFCN])dnl
+ test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H"
+
+ save_LDFLAGS="$LDFLAGS"
+ eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\"
+
+ save_LIBS="$LIBS"
+ LIBS="$lt_cv_dlopen_libs $LIBS"
+
+ AC_CACHE_CHECK([whether a program can dlopen itself],
+ lt_cv_dlopen_self, [dnl
+ _LT_AC_TRY_DLOPEN_SELF(
+ lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes,
+ lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross)
+ ])
+
+ if test "x$lt_cv_dlopen_self" = xyes; then
+ LDFLAGS="$LDFLAGS $link_static_flag"
+ AC_CACHE_CHECK([whether a statically linked program can dlopen itself],
+ lt_cv_dlopen_self_static, [dnl
+ _LT_AC_TRY_DLOPEN_SELF(
+ lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes,
+ lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross)
+ ])
+ fi
+
+ CPPFLAGS="$save_CPPFLAGS"
+ LDFLAGS="$save_LDFLAGS"
+ LIBS="$save_LIBS"
+ ;;
+ esac
+
+ case $lt_cv_dlopen_self in
+ yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;;
+ *) enable_dlopen_self=unknown ;;
+ esac
+
+ case $lt_cv_dlopen_self_static in
+ yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;;
+ *) enable_dlopen_self_static=unknown ;;
+ esac
+fi
+])# AC_LIBTOOL_DLOPEN_SELF
+
+AC_DEFUN([_LT_AC_LTCONFIG_HACK],
+[AC_REQUIRE([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])dnl
+# Sed substitution that helps us do robust quoting. It backslashifies
+# metacharacters that are still active within double-quoted strings.
+Xsed='sed -e s/^X//'
+sed_quote_subst='s/\([[\\"\\`$\\\\]]\)/\\\1/g'
+
+# Same as above, but do not quote variable references.
+double_quote_subst='s/\([[\\"\\`\\\\]]\)/\\\1/g'
+
+# Sed substitution to delay expansion of an escaped shell variable in a
+# double_quote_subst'ed string.
+delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g'
+
+# Constants:
+rm="rm -f"
+
+# Global variables:
+default_ofile=libtool
+can_build_shared=yes
+
+# All known linkers require a `.a' archive for static linking (except M$VC,
+# which needs '.lib').
+libext=a
+ltmain="$ac_aux_dir/ltmain.sh"
+ofile="$default_ofile"
+with_gnu_ld="$lt_cv_prog_gnu_ld"
+need_locks="$enable_libtool_lock"
+
+old_CC="$CC"
+old_CFLAGS="$CFLAGS"
+
+# Set sane defaults for various variables
+test -z "$AR" && AR=ar
+test -z "$AR_FLAGS" && AR_FLAGS=cru
+test -z "$AS" && AS=as
+test -z "$CC" && CC=cc
+test -z "$DLLTOOL" && DLLTOOL=dlltool
+test -z "$LD" && LD=ld
+test -z "$LN_S" && LN_S="ln -s"
+test -z "$MAGIC_CMD" && MAGIC_CMD=file
+test -z "$NM" && NM=nm
+test -z "$OBJDUMP" && OBJDUMP=objdump
+test -z "$RANLIB" && RANLIB=:
+test -z "$STRIP" && STRIP=:
+test -z "$ac_objext" && ac_objext=o
+
+if test x"$host" != x"$build"; then
+ ac_tool_prefix=${host_alias}-
+else
+ ac_tool_prefix=
+fi
+
+# Transform linux* to *-*-linux-gnu*, to support old configure scripts.
+case $host_os in
+linux-gnu*) ;;
+linux*) host=`echo $host | sed 's/^\(.*-.*-linux\)\(.*\)$/\1-gnu\2/'`
+esac
+
+case $host_os in
+aix3*)
+ # AIX sometimes has problems with the GCC collect2 program. For some
+ # reason, if we set the COLLECT_NAMES environment variable, the problems
+ # vanish in a puff of smoke.
+ if test "X${COLLECT_NAMES+set}" != Xset; then
+ COLLECT_NAMES=
+ export COLLECT_NAMES
+ fi
+ ;;
+esac
+
+# Determine commands to create old-style static archives.
+old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs$old_deplibs'
+old_postinstall_cmds='chmod 644 $oldlib'
+old_postuninstall_cmds=
+
+if test -n "$RANLIB"; then
+ case $host_os in
+ openbsd*)
+ old_postinstall_cmds="\$RANLIB -t \$oldlib~$old_postinstall_cmds"
+ ;;
+ *)
+ old_postinstall_cmds="\$RANLIB \$oldlib~$old_postinstall_cmds"
+ ;;
+ esac
+ old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib"
+fi
+
+# Allow CC to be a program name with arguments.
+set dummy $CC
+compiler="[$]2"
+
+## FIXME: this should be a separate macro
+##
+AC_MSG_CHECKING([for objdir])
+rm -f .libs 2>/dev/null
+mkdir .libs 2>/dev/null
+if test -d .libs; then
+ objdir=.libs
+else
+ # MS-DOS does not allow filenames that begin with a dot.
+ objdir=_libs
+fi
+rmdir .libs 2>/dev/null
+AC_MSG_RESULT($objdir)
+##
+## END FIXME
+
+
+## FIXME: this should be a separate macro
+##
+AC_ARG_WITH(pic,
+[ --with-pic try to use only PIC/non-PIC objects [default=use both]],
+pic_mode="$withval", pic_mode=default)
+test -z "$pic_mode" && pic_mode=default
+
+# We assume here that the value for lt_cv_prog_cc_pic will not be cached
+# in isolation, and that seeing it set (from the cache) indicates that
+# the associated values are set (in the cache) correctly too.
+AC_MSG_CHECKING([for $compiler option to produce PIC])
+AC_CACHE_VAL(lt_cv_prog_cc_pic,
+[ lt_cv_prog_cc_pic=
+ lt_cv_prog_cc_shlib=
+ lt_cv_prog_cc_wl=
+ lt_cv_prog_cc_static=
+ lt_cv_prog_cc_no_builtin=
+ lt_cv_prog_cc_can_build_shared=$can_build_shared
+
+ if test "$GCC" = yes; then
+ lt_cv_prog_cc_wl='-Wl,'
+ lt_cv_prog_cc_static='-static'
+
+ case $host_os in
+ aix*)
+ # Below there is a dirty hack to force normal static linking with -ldl
+ # The problem is because libdl dynamically linked with both libc and
+ # libC (AIX C++ library), which obviously doesn't included in libraries
+ # list by gcc. This cause undefined symbols with -static flags.
+ # This hack allows C programs to be linked with "-static -ldl", but
+ # not sure about C++ programs.
+ lt_cv_prog_cc_static="$lt_cv_prog_cc_static ${lt_cv_prog_cc_wl}-lC"
+ ;;
+ amigaos*)
+ # FIXME: we need at least 68020 code to build shared libraries, but
+ # adding the `-m68020' flag to GCC prevents building anything better,
+ # like `-m68040'.
+ lt_cv_prog_cc_pic='-m68020 -resident32 -malways-restore-a4'
+ ;;
+ beos* | irix5* | irix6* | osf3* | osf4* | osf5*)
+ # PIC is the default for these OSes.
+ ;;
+ darwin* | rhapsody*)
+ # PIC is the default on this platform
+ # Common symbols not allowed in MH_DYLIB files
+ lt_cv_prog_cc_pic='-fno-common'
+ ;;
+ cygwin* | mingw* | pw32* | os2*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ lt_cv_prog_cc_pic='-DDLL_EXPORT'
+ ;;
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ lt_cv_prog_cc_pic=-Kconform_pic
+ fi
+ ;;
+ *)
+ lt_cv_prog_cc_pic='-fPIC'
+ ;;
+ esac
+ else
+ # PORTME Check for PIC flags for the system compiler.
+ case $host_os in
+ aix3* | aix4* | aix5*)
+ lt_cv_prog_cc_wl='-Wl,'
+ # All AIX code is PIC.
+ if test "$host_cpu" = ia64; then
+ # AIX 5 now supports IA64 processor
+ lt_cv_prog_cc_static='-Bstatic'
+ else
+ lt_cv_prog_cc_static='-bnso -bI:/lib/syscalls.exp'
+ fi
+ ;;
+
+ hpux9* | hpux10* | hpux11*)
+ # Is there a better lt_cv_prog_cc_static that works with the bundled CC?
+ lt_cv_prog_cc_wl='-Wl,'
+ lt_cv_prog_cc_static="${lt_cv_prog_cc_wl}-a ${lt_cv_prog_cc_wl}archive"
+ lt_cv_prog_cc_pic='+Z'
+ ;;
+
+ irix5* | irix6*)
+ lt_cv_prog_cc_wl='-Wl,'
+ lt_cv_prog_cc_static='-non_shared'
+ # PIC (with -KPIC) is the default.
+ ;;
+
+ cygwin* | mingw* | pw32* | os2*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ lt_cv_prog_cc_pic='-DDLL_EXPORT'
+ ;;
+
+ newsos6)
+ lt_cv_prog_cc_pic='-KPIC'
+ lt_cv_prog_cc_static='-Bstatic'
+ ;;
+
+ osf3* | osf4* | osf5*)
+ # All OSF/1 code is PIC.
+ lt_cv_prog_cc_wl='-Wl,'
+ lt_cv_prog_cc_static='-non_shared'
+ ;;
+
+ sco3.2v5*)
+ lt_cv_prog_cc_pic='-Kpic'
+ lt_cv_prog_cc_static='-dn'
+ lt_cv_prog_cc_shlib='-belf'
+ ;;
+
+ solaris*)
+ lt_cv_prog_cc_pic='-KPIC'
+ lt_cv_prog_cc_static='-Bstatic'
+ lt_cv_prog_cc_wl='-Wl,'
+ ;;
+
+ sunos4*)
+ lt_cv_prog_cc_pic='-PIC'
+ lt_cv_prog_cc_static='-Bstatic'
+ lt_cv_prog_cc_wl='-Qoption ld '
+ ;;
+
+ sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+ lt_cv_prog_cc_pic='-KPIC'
+ lt_cv_prog_cc_static='-Bstatic'
+ if test "x$host_vendor" = xsni; then
+ lt_cv_prog_cc_wl='-LD'
+ else
+ lt_cv_prog_cc_wl='-Wl,'
+ fi
+ ;;
+
+ uts4*)
+ lt_cv_prog_cc_pic='-pic'
+ lt_cv_prog_cc_static='-Bstatic'
+ ;;
+
+ sysv4*MP*)
+ if test -d /usr/nec ;then
+ lt_cv_prog_cc_pic='-Kconform_pic'
+ lt_cv_prog_cc_static='-Bstatic'
+ fi
+ ;;
+
+ *)
+ lt_cv_prog_cc_can_build_shared=no
+ ;;
+ esac
+ fi
+])
+if test -z "$lt_cv_prog_cc_pic"; then
+ AC_MSG_RESULT([none])
+else
+ AC_MSG_RESULT([$lt_cv_prog_cc_pic])
+
+ # Check to make sure the pic_flag actually works.
+ AC_MSG_CHECKING([if $compiler PIC flag $lt_cv_prog_cc_pic works])
+ AC_CACHE_VAL(lt_cv_prog_cc_pic_works, [dnl
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS $lt_cv_prog_cc_pic -DPIC"
+ AC_TRY_COMPILE([], [], [dnl
+ case $host_os in
+ hpux9* | hpux10* | hpux11*)
+ # On HP-UX, both CC and GCC only warn that PIC is supported... then
+ # they create non-PIC objects. So, if there were any warnings, we
+ # assume that PIC is not supported.
+ if test -s conftest.err; then
+ lt_cv_prog_cc_pic_works=no
+ else
+ lt_cv_prog_cc_pic_works=yes
+ fi
+ ;;
+ *)
+ lt_cv_prog_cc_pic_works=yes
+ ;;
+ esac
+ ], [dnl
+ lt_cv_prog_cc_pic_works=no
+ ])
+ CFLAGS="$save_CFLAGS"
+ ])
+
+ if test "X$lt_cv_prog_cc_pic_works" = Xno; then
+ lt_cv_prog_cc_pic=
+ lt_cv_prog_cc_can_build_shared=no
+ else
+ lt_cv_prog_cc_pic=" $lt_cv_prog_cc_pic"
+ fi
+
+ AC_MSG_RESULT([$lt_cv_prog_cc_pic_works])
+fi
+##
+## END FIXME
+
+# Check for any special shared library compilation flags.
+if test -n "$lt_cv_prog_cc_shlib"; then
+ AC_MSG_WARN([\`$CC' requires \`$lt_cv_prog_cc_shlib' to build shared libraries])
+ if echo "$old_CC $old_CFLAGS " | egrep -e "[[ ]]$lt_cv_prog_cc_shlib[[ ]]" >/dev/null; then :
+ else
+ AC_MSG_WARN([add \`$lt_cv_prog_cc_shlib' to the CC or CFLAGS env variable and reconfigure])
+ lt_cv_prog_cc_can_build_shared=no
+ fi
+fi
+
+## FIXME: this should be a separate macro
+##
+AC_MSG_CHECKING([if $compiler static flag $lt_cv_prog_cc_static works])
+AC_CACHE_VAL([lt_cv_prog_cc_static_works], [dnl
+ lt_cv_prog_cc_static_works=no
+ save_LDFLAGS="$LDFLAGS"
+ LDFLAGS="$LDFLAGS $lt_cv_prog_cc_static"
+ AC_TRY_LINK([], [], [lt_cv_prog_cc_static_works=yes])
+ LDFLAGS="$save_LDFLAGS"
+])
+
+# Belt *and* braces to stop my trousers falling down:
+test "X$lt_cv_prog_cc_static_works" = Xno && lt_cv_prog_cc_static=
+AC_MSG_RESULT([$lt_cv_prog_cc_static_works])
+
+pic_flag="$lt_cv_prog_cc_pic"
+special_shlib_compile_flags="$lt_cv_prog_cc_shlib"
+wl="$lt_cv_prog_cc_wl"
+link_static_flag="$lt_cv_prog_cc_static"
+no_builtin_flag="$lt_cv_prog_cc_no_builtin"
+can_build_shared="$lt_cv_prog_cc_can_build_shared"
+##
+## END FIXME
+
+
+## FIXME: this should be a separate macro
+##
+# Check to see if options -o and -c are simultaneously supported by compiler
+AC_MSG_CHECKING([if $compiler supports -c -o file.$ac_objext])
+AC_CACHE_VAL([lt_cv_compiler_c_o], [
+$rm -r conftest 2>/dev/null
+mkdir conftest
+cd conftest
+echo "int some_variable = 0;" > conftest.$ac_ext
+mkdir out
+# According to Tom Tromey, Ian Lance Taylor reported there are C compilers
+# that will create temporary files in the current directory regardless of
+# the output directory. Thus, making CWD read-only will cause this test
+# to fail, enabling locking or at least warning the user not to do parallel
+# builds.
+chmod -w .
+save_CFLAGS="$CFLAGS"
+CFLAGS="$CFLAGS -o out/conftest2.$ac_objext"
+compiler_c_o=no
+if { (eval echo configure:__oline__: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>out/conftest.err; } && test -s out/conftest2.$ac_objext; then
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s out/conftest.err; then
+ lt_cv_compiler_c_o=no
+ else
+ lt_cv_compiler_c_o=yes
+ fi
+else
+ # Append any errors to the config.log.
+ cat out/conftest.err 1>&AC_FD_CC
+ lt_cv_compiler_c_o=no
+fi
+CFLAGS="$save_CFLAGS"
+chmod u+w .
+$rm conftest* out/*
+rmdir out
+cd ..
+rmdir conftest
+$rm -r conftest 2>/dev/null
+])
+compiler_c_o=$lt_cv_compiler_c_o
+AC_MSG_RESULT([$compiler_c_o])
+
+if test x"$compiler_c_o" = x"yes"; then
+ # Check to see if we can write to a .lo
+ AC_MSG_CHECKING([if $compiler supports -c -o file.lo])
+ AC_CACHE_VAL([lt_cv_compiler_o_lo], [
+ lt_cv_compiler_o_lo=no
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -c -o conftest.lo"
+ save_objext="$ac_objext"
+ ac_objext=lo
+ AC_TRY_COMPILE([], [int some_variable = 0;], [dnl
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s conftest.err; then
+ lt_cv_compiler_o_lo=no
+ else
+ lt_cv_compiler_o_lo=yes
+ fi
+ ])
+ ac_objext="$save_objext"
+ CFLAGS="$save_CFLAGS"
+ ])
+ compiler_o_lo=$lt_cv_compiler_o_lo
+ AC_MSG_RESULT([$compiler_o_lo])
+else
+ compiler_o_lo=no
+fi
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+# Check to see if we can do hard links to lock some files if needed
+hard_links="nottested"
+if test "$compiler_c_o" = no && test "$need_locks" != no; then
+ # do not overwrite the value of need_locks provided by the user
+ AC_MSG_CHECKING([if we can lock with hard links])
+ hard_links=yes
+ $rm conftest*
+ ln conftest.a conftest.b 2>/dev/null && hard_links=no
+ touch conftest.a
+ ln conftest.a conftest.b 2>&5 || hard_links=no
+ ln conftest.a conftest.b 2>/dev/null && hard_links=no
+ AC_MSG_RESULT([$hard_links])
+ if test "$hard_links" = no; then
+ AC_MSG_WARN([\`$CC' does not support \`-c -o', so \`make -j' may be unsafe])
+ need_locks=warn
+ fi
+else
+ need_locks=no
+fi
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+if test "$GCC" = yes; then
+ # Check to see if options -fno-rtti -fno-exceptions are supported by compiler
+ AC_MSG_CHECKING([if $compiler supports -fno-rtti -fno-exceptions])
+ echo "int some_variable = 0;" > conftest.$ac_ext
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -fno-rtti -fno-exceptions -c conftest.$ac_ext"
+ compiler_rtti_exceptions=no
+ AC_TRY_COMPILE([], [int some_variable = 0;], [dnl
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s conftest.err; then
+ compiler_rtti_exceptions=no
+ else
+ compiler_rtti_exceptions=yes
+ fi
+ ])
+ CFLAGS="$save_CFLAGS"
+ AC_MSG_RESULT([$compiler_rtti_exceptions])
+
+ if test "$compiler_rtti_exceptions" = "yes"; then
+ no_builtin_flag=' -fno-builtin -fno-rtti -fno-exceptions'
+ else
+ no_builtin_flag=' -fno-builtin'
+ fi
+fi
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+# See if the linker supports building shared libraries.
+AC_MSG_CHECKING([whether the linker ($LD) supports shared libraries])
+
+allow_undefined_flag=
+no_undefined_flag=
+need_lib_prefix=unknown
+need_version=unknown
+# when you set need_version to no, make sure it does not cause -set_version
+# flags to be left without arguments
+archive_cmds=
+archive_expsym_cmds=
+old_archive_from_new_cmds=
+old_archive_from_expsyms_cmds=
+export_dynamic_flag_spec=
+whole_archive_flag_spec=
+thread_safe_flag_spec=
+hardcode_into_libs=no
+hardcode_libdir_flag_spec=
+hardcode_libdir_separator=
+hardcode_direct=no
+hardcode_minus_L=no
+hardcode_shlibpath_var=unsupported
+runpath_var=
+link_all_deplibs=unknown
+always_export_symbols=no
+export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | sed '\''s/.* //'\'' | sort | uniq > $export_symbols'
+# include_expsyms should be a list of space-separated symbols to be *always*
+# included in the symbol list
+include_expsyms=
+# exclude_expsyms can be an egrep regular expression of symbols to exclude
+# it will be wrapped by ` (' and `)$', so one must not match beginning or
+# end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc',
+# as well as any symbol that contains `d'.
+exclude_expsyms="_GLOBAL_OFFSET_TABLE_"
+# Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out
+# platforms (ab)use it in PIC code, but their linkers get confused if
+# the symbol is explicitly referenced. Since portable code cannot
+# rely on this symbol name, it's probably fine to never include it in
+# preloaded symbol tables.
+extract_expsyms_cmds=
+
+case $host_os in
+cygwin* | mingw* | pw32*)
+ # FIXME: the MSVC++ port hasn't been tested in a loooong time
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ if test "$GCC" != yes; then
+ with_gnu_ld=no
+ fi
+ ;;
+openbsd*)
+ with_gnu_ld=no
+ ;;
+esac
+
+ld_shlibs=yes
+if test "$with_gnu_ld" = yes; then
+ # If archive_cmds runs LD, not CC, wlarc should be empty
+ wlarc='${wl}'
+
+ # See if GNU ld supports shared libraries.
+ case $host_os in
+ aix3* | aix4* | aix5*)
+ # On AIX, the GNU linker is very broken
+ # Note:Check GNU linker on AIX 5-IA64 when/if it becomes available.
+ ld_shlibs=no
+ cat <<EOF 1>&2
+
+*** Warning: the GNU linker, at least up to release 2.9.1, is reported
+*** to be unable to reliably create shared libraries on AIX.
+*** Therefore, libtool is disabling shared libraries support. If you
+*** really care for shared libraries, you may want to modify your PATH
+*** so that a non-GNU linker is found, and then restart.
+
+EOF
+ ;;
+
+ amigaos*)
+ archive_cmds='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+
+ # Samuel A. Falvo II <kc5tja@dolphin.openprojects.net> reports
+ # that the semantics of dynamic libraries on AmigaOS, at least up
+ # to version 4, is to share data among multiple programs linked
+ # with the same dynamic library. Since this doesn't match the
+ # behavior of shared libraries on other platforms, we can use
+ # them.
+ ld_shlibs=no
+ ;;
+
+ beos*)
+ if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then
+ allow_undefined_flag=unsupported
+ # Joseph Beckenbach <jrb3@best.com> says some releases of gcc
+ # support --undefined. This deserves some investigation. FIXME
+ archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+
+ cygwin* | mingw* | pw32*)
+ # hardcode_libdir_flag_spec is actually meaningless, as there is
+ # no search path for DLLs.
+ hardcode_libdir_flag_spec='-L$libdir'
+ allow_undefined_flag=unsupported
+ always_export_symbols=yes
+
+ extract_expsyms_cmds='test -f $output_objdir/impgen.c || \
+ sed -e "/^# \/\* impgen\.c starts here \*\//,/^# \/\* impgen.c ends here \*\// { s/^# //;s/^# *$//; p; }" -e d < $''0 > $output_objdir/impgen.c~
+ test -f $output_objdir/impgen.exe || (cd $output_objdir && \
+ if test "x$HOST_CC" != "x" ; then $HOST_CC -o impgen impgen.c ; \
+ else $CC -o impgen impgen.c ; fi)~
+ $output_objdir/impgen $dir/$soroot > $output_objdir/$soname-def'
+
+ old_archive_from_expsyms_cmds='$DLLTOOL --as=$AS --dllname $soname --def $output_objdir/$soname-def --output-lib $output_objdir/$newlib'
+
+ # cygwin and mingw dlls have different entry points and sets of symbols
+ # to exclude.
+ # FIXME: what about values for MSVC?
+ dll_entry=__cygwin_dll_entry@12
+ dll_exclude_symbols=DllMain@12,_cygwin_dll_entry@12,_cygwin_noncygwin_dll_entry@12~
+ case $host_os in
+ mingw*)
+ # mingw values
+ dll_entry=_DllMainCRTStartup@12
+ dll_exclude_symbols=DllMain@12,DllMainCRTStartup@12,DllEntryPoint@12~
+ ;;
+ esac
+
+ # mingw and cygwin differ, and it's simplest to just exclude the union
+ # of the two symbol sets.
+ dll_exclude_symbols=DllMain@12,_cygwin_dll_entry@12,_cygwin_noncygwin_dll_entry@12,DllMainCRTStartup@12,DllEntryPoint@12
+
+ # recent cygwin and mingw systems supply a stub DllMain which the user
+ # can override, but on older systems we have to supply one (in ltdll.c)
+ if test "x$lt_cv_need_dllmain" = "xyes"; then
+ ltdll_obj='$output_objdir/$soname-ltdll.'"$ac_objext "
+ ltdll_cmds='test -f $output_objdir/$soname-ltdll.c || sed -e "/^# \/\* ltdll\.c starts here \*\//,/^# \/\* ltdll.c ends here \*\// { s/^# //; p; }" -e d < $''0 > $output_objdir/$soname-ltdll.c~
+ test -f $output_objdir/$soname-ltdll.$ac_objext || (cd $output_objdir && $CC -c $soname-ltdll.c)~'
+ else
+ ltdll_obj=
+ ltdll_cmds=
+ fi
+
+ # Extract the symbol export list from an `--export-all' def file,
+ # then regenerate the def file from the symbol export list, so that
+ # the compiled dll only exports the symbol export list.
+ # Be careful not to strip the DATA tag left be newer dlltools.
+ export_symbols_cmds="$ltdll_cmds"'
+ $DLLTOOL --export-all --exclude-symbols '$dll_exclude_symbols' --output-def $output_objdir/$soname-def '$ltdll_obj'$libobjs $convenience~
+ sed -e "1,/EXPORTS/d" -e "s/ @ [[0-9]]*//" -e "s/ *;.*$//" < $output_objdir/$soname-def > $export_symbols'
+
+ # If the export-symbols file already is a .def file (1st line
+ # is EXPORTS), use it as is.
+ # If DATA tags from a recent dlltool are present, honour them!
+ archive_expsym_cmds='if test "x`head -1 $export_symbols`" = xEXPORTS; then
+ cp $export_symbols $output_objdir/$soname-def;
+ else
+ echo EXPORTS > $output_objdir/$soname-def;
+ _lt_hint=1;
+ cat $export_symbols | while read symbol; do
+ set dummy \$symbol;
+ case \[$]# in
+ 2) echo " \[$]2 @ \$_lt_hint ; " >> $output_objdir/$soname-def;;
+ *) echo " \[$]2 @ \$_lt_hint \[$]3 ; " >> $output_objdir/$soname-def;;
+ esac;
+ _lt_hint=`expr 1 + \$_lt_hint`;
+ done;
+ fi~
+ '"$ltdll_cmds"'
+ $CC -Wl,--base-file,$output_objdir/$soname-base '$lt_cv_cc_dll_switch' -Wl,-e,'$dll_entry' -o $output_objdir/$soname '$ltdll_obj'$libobjs $deplibs $compiler_flags~
+ $DLLTOOL --as=$AS --dllname $soname --exclude-symbols '$dll_exclude_symbols' --def $output_objdir/$soname-def --base-file $output_objdir/$soname-base --output-exp $output_objdir/$soname-exp~
+ $CC -Wl,--base-file,$output_objdir/$soname-base $output_objdir/$soname-exp '$lt_cv_cc_dll_switch' -Wl,-e,'$dll_entry' -o $output_objdir/$soname '$ltdll_obj'$libobjs $deplibs $compiler_flags~
+ $DLLTOOL --as=$AS --dllname $soname --exclude-symbols '$dll_exclude_symbols' --def $output_objdir/$soname-def --base-file $output_objdir/$soname-base --output-exp $output_objdir/$soname-exp --output-lib $output_objdir/$libname.dll.a~
+ $CC $output_objdir/$soname-exp '$lt_cv_cc_dll_switch' -Wl,-e,'$dll_entry' -o $output_objdir/$soname '$ltdll_obj'$libobjs $deplibs $compiler_flags'
+ ;;
+
+ netbsd*)
+ if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
+ archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
+ wlarc=
+ else
+ archive_cmds='$CC -shared -nodefaultlibs $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared -nodefaultlibs $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ fi
+ ;;
+
+ solaris* | sysv5*)
+ if $LD -v 2>&1 | egrep 'BFD 2\.8' > /dev/null; then
+ ld_shlibs=no
+ cat <<EOF 1>&2
+
+*** Warning: The releases 2.8.* of the GNU linker cannot reliably
+*** create shared libraries on Solaris systems. Therefore, libtool
+*** is disabling shared libraries support. We urge you to upgrade GNU
+*** binutils to release 2.9.1 or newer. Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+EOF
+ elif $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+
+ sunos4*)
+ archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ wlarc=
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ *)
+ if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+ esac
+
+ if test "$ld_shlibs" = yes; then
+ runpath_var=LD_RUN_PATH
+ hardcode_libdir_flag_spec='${wl}--rpath ${wl}$libdir'
+ export_dynamic_flag_spec='${wl}--export-dynamic'
+ case $host_os in
+ cygwin* | mingw* | pw32*)
+ # dlltool doesn't understand --whole-archive et. al.
+ whole_archive_flag_spec=
+ ;;
+ *)
+ # ancient GNU ld didn't support --whole-archive et. al.
+ if $LD --help 2>&1 | egrep 'no-whole-archive' > /dev/null; then
+ whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+ else
+ whole_archive_flag_spec=
+ fi
+ ;;
+ esac
+ fi
+else
+ # PORTME fill in a description of your system's linker (not GNU ld)
+ case $host_os in
+ aix3*)
+ allow_undefined_flag=unsupported
+ always_export_symbols=yes
+ archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname'
+ # Note: this linker hardcodes the directories in LIBPATH if there
+ # are no directories specified by -L.
+ hardcode_minus_L=yes
+ if test "$GCC" = yes && test -z "$link_static_flag"; then
+ # Neither direct hardcoding nor static linking is supported with a
+ # broken collect2.
+ hardcode_direct=unsupported
+ fi
+ ;;
+
+ aix4* | aix5*)
+ if test "$host_cpu" = ia64; then
+ # On IA64, the linker does run time linking by default, so we don't
+ # have to do anything special.
+ aix_use_runtimelinking=no
+ exp_sym_flag='-Bexport'
+ no_entry_flag=""
+ else
+ aix_use_runtimelinking=no
+
+ # Test if we are trying to use run time linking or normal
+ # AIX style linking. If -brtl is somewhere in LDFLAGS, we
+ # need to do runtime linking.
+ case $host_os in aix4.[[23]]|aix4.[[23]].*|aix5*)
+ for ld_flag in $LDFLAGS; do
+ if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then
+ aix_use_runtimelinking=yes
+ break
+ fi
+ done
+ esac
+
+ exp_sym_flag='-bexport'
+ no_entry_flag='-bnoentry'
+ fi
+
+ # When large executables or shared objects are built, AIX ld can
+ # have problems creating the table of contents. If linking a library
+ # or program results in "error TOC overflow" add -mminimal-toc to
+ # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not
+ # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+
+ hardcode_direct=yes
+ archive_cmds=''
+ hardcode_libdir_separator=':'
+
+ #### Local change for Sleepycat's Berkeley DB [#5779]:
+ # Added $aix_export variable to control use of exports file.
+ # For non-gcc, we don't use exports files, and rather trust
+ # the binder's -qmkshrobj option to export all the mangled
+ # symbols we need for C++ and java.
+
+ aix_export="\${wl}$exp_sym_flag:\$export_symbols"
+
+ if test "$GCC" = yes; then
+ case $host_os in aix4.[[012]]|aix4.[[012]].*)
+ collect2name=`${CC} -print-prog-name=collect2`
+ if test -f "$collect2name" && \
+ strings "$collect2name" | grep resolve_lib_name >/dev/null
+ then
+ # We have reworked collect2
+ hardcode_direct=yes
+ else
+ # We have old collect2
+ hardcode_direct=unsupported
+ # It fails to find uninstalled libraries when the uninstalled
+ # path is not listed in the libpath. Setting hardcode_minus_L
+ # to unsupported forces relinking
+ hardcode_minus_L=yes
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_libdir_separator=
+ fi
+ esac
+
+ shared_flag='-shared'
+ else
+ # not using gcc
+ if test "$host_cpu" = ia64; then
+ shared_flag='${wl}-G'
+ else
+ if test "$aix_use_runtimelinking" = yes; then
+ shared_flag='${wl}-G'
+ else
+ shared_flag='${wl}-bM:SRE'
+ fi
+ fi
+
+ # Test for -qmkshrobj and use it if it's available.
+ # It's superior for determining exportable symbols,
+ # especially for C++ or JNI libraries, which have
+ # mangled names.
+ #
+ AC_LANG_CONFTEST(void f(){})
+ if AC_TRY_EVAL(CC -c conftest.c) && AC_TRY_EVAL(CC -o conftest conftest.$ac_objext -qmkshrobj -lC_r); then
+ lt_cv_aix_mkshrobj=yes
+ else
+ lt_cv_aix_mkshrobj=no
+ fi
+
+ if test "$lt_cv_aix_mkshrobj" = yes; then
+ aix_export="-qmkshrobj"
+ fi
+ fi
+
+ # It seems that -bexpall can do strange things, so it is better to
+ # generate a list of symbols to export.
+ always_export_symbols=yes
+ if test "$aix_use_runtimelinking" = yes; then
+ # Warning - without using the other runtime loading flags (-brtl),
+ # -berok will link without error, but may produce a broken library.
+ allow_undefined_flag='-berok'
+ hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:/usr/lib:/lib'
+ archive_expsym_cmds="\$CC"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$no_entry_flag $aix_export $shared_flag"
+ else
+ if test "$host_cpu" = ia64; then
+ hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib'
+ allow_undefined_flag="-z nodefs"
+ archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname ${wl}-h$soname $libobjs $deplibs $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$no_entry_flag $aix_export"
+ else
+ hardcode_libdir_flag_spec='${wl}-bnolibpath ${wl}-blibpath:$libdir:/usr/lib:/lib'
+ # Warning - without using the other run time loading flags,
+ # -berok will link without error, but may produce a broken library.
+ allow_undefined_flag='${wl}-berok'
+ # This is a bit strange, but is similar to how AIX traditionally builds
+ # it's shared libraries.
+ archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${allow_undefined_flag} '"\${wl}$no_entry_flag $aix_export"' ~$AR -crlo $objdir/$libname$release.a $objdir/$soname'
+ fi
+ fi
+ ;;
+
+ amigaos*)
+ archive_cmds='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ # see comment about different semantics on the GNU ld section
+ ld_shlibs=no
+ ;;
+
+ cygwin* | mingw* | pw32*)
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ # hardcode_libdir_flag_spec is actually meaningless, as there is
+ # no search path for DLLs.
+ hardcode_libdir_flag_spec=' '
+ allow_undefined_flag=unsupported
+ # Tell ltmain to make .lib files, not .a files.
+ libext=lib
+ # FIXME: Setting linknames here is a bad hack.
+ archive_cmds='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | sed -e '\''s/ -lc$//'\''` -link -dll~linknames='
+ # The linker will automatically build a .lib file if we build a DLL.
+ old_archive_from_new_cmds='true'
+ # FIXME: Should let the user specify the lib program.
+ old_archive_cmds='lib /OUT:$oldlib$oldobjs$old_deplibs'
+ fix_srcfile_path='`cygpath -w "$srcfile"`'
+ ;;
+
+ darwin* | rhapsody*)
+ case "$host_os" in
+ rhapsody* | darwin1.[[012]])
+ allow_undefined_flag='-undefined suppress'
+ ;;
+ *) # Darwin 1.3 on
+ allow_undefined_flag='-flat_namespace -undefined suppress'
+ ;;
+ esac
+ # FIXME: Relying on posixy $() will cause problems for
+ # cross-compilation, but unfortunately the echo tests do not
+ # yet detect zsh echo's removal of \ escapes.
+
+ #### Local change for Sleepycat's Berkeley DB [#5664] [#6511]
+ case "$host_os" in
+ darwin[[12345]].*)
+ # removed double quotes in the following line:
+ archive_cmds='$nonopt $(test x$module = xyes && echo -bundle || echo -dynamiclib) $allow_undefined_flag -o $lib $libobjs $deplibs$linker_flags -install_name $rpath/$soname $verstring'
+ ;;
+ *) # Darwin6.0 on (Mac OS/X Jaguar)
+ archive_cmds='$nonopt $allow_undefined_flag -o $lib $libobjs $deplibs$linker_flags -dynamiclib -install_name $rpath/$soname $verstring'
+ ;;
+ esac
+ #### End of changes for Sleepycat's Berkeley DB [#5664] [#6511]
+
+ # We need to add '_' to the symbols in $export_symbols first
+ #archive_expsym_cmds="$archive_cmds"' && strip -s $export_symbols'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ whole_archive_flag_spec='-all_load $convenience'
+ ;;
+
+ freebsd1*)
+ ld_shlibs=no
+ ;;
+
+ # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor
+ # support. Future versions do this automatically, but an explicit c++rt0.o
+ # does not break anything, and helps significantly (at the cost of a little
+ # extra space).
+ freebsd2.2*)
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o'
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ # Unfortunately, older versions of FreeBSD 2 do not have this feature.
+ freebsd2*)
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes
+ hardcode_minus_L=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ # FreeBSD 3 and greater uses gcc -shared to do shared libraries.
+ freebsd*)
+ archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags'
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ hpux9* | hpux10* | hpux11*)
+ case $host_os in
+ hpux9*) archive_cmds='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' ;;
+ *) archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' ;;
+ esac
+ hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+ hardcode_libdir_separator=:
+ hardcode_direct=yes
+ hardcode_minus_L=yes # Not in the search PATH, but as the default
+ # location of the library.
+ export_dynamic_flag_spec='${wl}-E'
+ ;;
+
+ irix5* | irix6*)
+ if test "$GCC" = yes; then
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ else
+ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib'
+ fi
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ link_all_deplibs=yes
+ ;;
+
+ netbsd*)
+ if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out
+ else
+ archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF
+ fi
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ newsos6)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ hardcode_shlibpath_var=no
+ ;;
+
+ openbsd*)
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+ export_dynamic_flag_spec='${wl}-E'
+ else
+ case "$host_os" in
+ openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*)
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='-R$libdir'
+ ;;
+ *)
+ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+ ;;
+ esac
+ fi
+ ;;
+
+ os2*)
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ allow_undefined_flag=unsupported
+ archive_cmds='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def'
+ old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def'
+ ;;
+
+ osf3*)
+ if test "$GCC" = yes; then
+ allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
+ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ else
+ allow_undefined_flag=' -expect_unresolved \*'
+ archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib'
+ fi
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ ;;
+
+ osf4* | osf5*) # as osf3* with the addition of -msym flag
+ if test "$GCC" = yes; then
+ allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
+ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ else
+ allow_undefined_flag=' -expect_unresolved \*'
+ archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib'
+ archive_expsym_cmds='for i in `cat $export_symbols`; do printf "-exported_symbol " >> $lib.exp; echo "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~
+ $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib~$rm $lib.exp'
+
+ #Both c and cxx compiler support -rpath directly
+ hardcode_libdir_flag_spec='-rpath $libdir'
+ fi
+ hardcode_libdir_separator=:
+ ;;
+
+ sco3.2v5*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_shlibpath_var=no
+ runpath_var=LD_RUN_PATH
+ hardcode_runpath_var=yes
+ export_dynamic_flag_spec='${wl}-Bexport'
+ ;;
+
+ solaris*)
+ # gcc --version < 3.0 without binutils cannot create self contained
+ # shared libraries reliably, requiring libgcc.a to resolve some of
+ # the object symbols generated in some cases. Libraries that use
+ # assert need libgcc.a to resolve __eprintf, for example. Linking
+ # a copy of libgcc.a into every shared library to guarantee resolving
+ # such symbols causes other problems: According to Tim Van Holder
+ # <tim.van.holder@pandora.be>, C++ libraries end up with a separate
+ # (to the application) exception stack for one thing.
+ no_undefined_flag=' -z defs'
+ if test "$GCC" = yes; then
+ case `$CC --version 2>/dev/null` in
+ [[12]].*)
+ cat <<EOF 1>&2
+
+*** Warning: Releases of GCC earlier than version 3.0 cannot reliably
+*** create self contained shared libraries on Solaris systems, without
+*** introducing a dependency on libgcc.a. Therefore, libtool is disabling
+*** -no-undefined support, which will at least allow you to build shared
+*** libraries. However, you may find that when you link such libraries
+*** into an application without using GCC, you have to manually add
+*** \`gcc --print-libgcc-file-name\` to the link command. We urge you to
+*** upgrade to a newer version of GCC. Another option is to rebuild your
+*** current GCC to use the GNU linker from GNU binutils 2.9.1 or newer.
+
+EOF
+ no_undefined_flag=
+ ;;
+ esac
+ fi
+ # $CC -shared without GNU ld will not create a library from C++
+ # object files and a static libstdc++, better avoid it by now
+ archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~
+ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp'
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_shlibpath_var=no
+ case $host_os in
+ solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
+ *) # Supported since Solaris 2.6 (maybe 2.5.1?)
+ whole_archive_flag_spec='-z allextract$convenience -z defaultextract' ;;
+ esac
+ link_all_deplibs=yes
+ ;;
+
+ sunos4*)
+ if test "x$host_vendor" = xsequent; then
+ # Use $CC to link under sequent, because it throws in some extra .o
+ # files that make .init and .fini sections work.
+ archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags'
+ fi
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_direct=yes
+ hardcode_minus_L=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ sysv4)
+ if test "x$host_vendor" = xsno; then
+ archive_cmds='$LD -G -Bsymbolic -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes # is this really true???
+ else
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=no #Motorola manual says yes, but my tests say they lie
+ fi
+ runpath_var='LD_RUN_PATH'
+ hardcode_shlibpath_var=no
+ ;;
+
+ sysv4.3*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_shlibpath_var=no
+ export_dynamic_flag_spec='-Bexport'
+ ;;
+
+ sysv5*)
+ no_undefined_flag=' -z text'
+ # $CC -shared without GNU ld will not create a library from C++
+ # object files and a static libstdc++, better avoid it by now
+ archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~
+ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp'
+ hardcode_libdir_flag_spec=
+ hardcode_shlibpath_var=no
+ runpath_var='LD_RUN_PATH'
+ ;;
+
+ uts4*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_shlibpath_var=no
+ ;;
+
+ dgux*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_shlibpath_var=no
+ ;;
+
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_shlibpath_var=no
+ runpath_var=LD_RUN_PATH
+ hardcode_runpath_var=yes
+ ld_shlibs=yes
+ fi
+ ;;
+
+ sysv4.2uw2*)
+ archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes
+ hardcode_minus_L=no
+ hardcode_shlibpath_var=no
+ hardcode_runpath_var=yes
+ runpath_var=LD_RUN_PATH
+ ;;
+
+ sysv5uw7* | unixware7*)
+ no_undefined_flag='${wl}-z ${wl}text'
+ if test "$GCC" = yes; then
+ archive_cmds='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ archive_cmds='$CC -G ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ fi
+ runpath_var='LD_RUN_PATH'
+ hardcode_shlibpath_var=no
+ ;;
+
+ *)
+ ld_shlibs=no
+ ;;
+ esac
+fi
+AC_MSG_RESULT([$ld_shlibs])
+test "$ld_shlibs" = no && can_build_shared=no
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+# Check hardcoding attributes.
+AC_MSG_CHECKING([how to hardcode library paths into programs])
+hardcode_action=
+if test -n "$hardcode_libdir_flag_spec" || \
+ test -n "$runpath_var"; then
+
+ # We can hardcode non-existant directories.
+ if test "$hardcode_direct" != no &&
+ # If the only mechanism to avoid hardcoding is shlibpath_var, we
+ # have to relink, otherwise we might link with an installed library
+ # when we should be linking with a yet-to-be-installed one
+ ## test "$hardcode_shlibpath_var" != no &&
+ test "$hardcode_minus_L" != no; then
+ # Linking always hardcodes the temporary library directory.
+ hardcode_action=relink
+ else
+ # We can link without hardcoding, and we can hardcode nonexisting dirs.
+ hardcode_action=immediate
+ fi
+else
+ # We cannot hardcode anything, or else we can only hardcode existing
+ # directories.
+ hardcode_action=unsupported
+fi
+AC_MSG_RESULT([$hardcode_action])
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+striplib=
+old_striplib=
+AC_MSG_CHECKING([whether stripping libraries is possible])
+if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then
+ test -z "$old_striplib" && old_striplib="$STRIP --strip-debug"
+ test -z "$striplib" && striplib="$STRIP --strip-unneeded"
+ AC_MSG_RESULT([yes])
+else
+ AC_MSG_RESULT([no])
+fi
+##
+## END FIXME
+
+reload_cmds='$LD$reload_flag -o $output$reload_objs'
+test -z "$deplibs_check_method" && deplibs_check_method=unknown
+
+## FIXME: this should be a separate macro
+##
+# PORTME Fill in your ld.so characteristics
+AC_MSG_CHECKING([dynamic linker characteristics])
+library_names_spec=
+libname_spec='lib$name'
+soname_spec=
+postinstall_cmds=
+postuninstall_cmds=
+finish_cmds=
+finish_eval=
+shlibpath_var=
+shlibpath_overrides_runpath=unknown
+version_type=none
+dynamic_linker="$host_os ld.so"
+sys_lib_dlsearch_path_spec="/lib /usr/lib"
+sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
+
+case $host_os in
+aix3*)
+ version_type=linux
+ library_names_spec='${libname}${release}.so$versuffix $libname.a'
+ shlibpath_var=LIBPATH
+
+ # AIX has no versioning support, so we append a major version to the name.
+ soname_spec='${libname}${release}.so$major'
+ ;;
+
+aix4* | aix5*)
+ version_type=linux
+
+ #### Local change for Sleepycat's Berkeley DB [#5779]:
+ # If we don't set need_version, we'll get x.so.0.0.0,
+ # even if -avoid-version is set.
+ need_version=no
+
+ if test "$host_cpu" = ia64; then
+ # AIX 5 supports IA64
+ library_names_spec='${libname}${release}.so$major ${libname}${release}.so$versuffix $libname.so'
+ shlibpath_var=LD_LIBRARY_PATH
+ else
+ # With GCC up to 2.95.x, collect2 would create an import file
+ # for dependence libraries. The import file would start with
+ # the line `#! .'. This would cause the generated library to
+ # depend on `.', always an invalid library. This was fixed in
+ # development snapshots of GCC prior to 3.0.
+ case $host_os in
+ aix4 | aix4.[[01]] | aix4.[[01]].*)
+ if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
+ echo ' yes '
+ echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then
+ :
+ else
+ can_build_shared=no
+ fi
+ ;;
+ esac
+ # AIX (on Power*) has no versioning support, so currently we can
+ # not hardcode correct soname into executable. Probably we can
+ # add versioning support to collect2, so additional links can
+ # be useful in future.
+ if test "$aix_use_runtimelinking" = yes; then
+ # If using run time linking (on AIX 4.2 or later) use lib<name>.so
+ # instead of lib<name>.a to let people know that these are not
+ # typical AIX shared libraries.
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ else
+ # We preserve .a as extension for shared libraries through AIX4.2
+ # and later when we are not doing run time linking.
+ library_names_spec='${libname}${release}.a $libname.a'
+ soname_spec='${libname}${release}.so$major'
+ fi
+ shlibpath_var=LIBPATH
+ fi
+ ;;
+
+amigaos*)
+ library_names_spec='$libname.ixlibrary $libname.a'
+ # Create ${libname}_ixlibrary.a entries in /sys/libs.
+ finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "(cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a)"; (cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a) || exit 1; done'
+ ;;
+
+beos*)
+ library_names_spec='${libname}.so'
+ dynamic_linker="$host_os ld.so"
+ shlibpath_var=LIBRARY_PATH
+ ;;
+
+bsdi4*)
+ version_type=linux
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
+ sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
+ export_dynamic_flag_spec=-rdynamic
+ # the default ld.so.conf also contains /usr/contrib/lib and
+ # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
+ # libtool to hard-code these into programs
+ ;;
+
+cygwin* | mingw* | pw32*)
+ version_type=windows
+ need_version=no
+ need_lib_prefix=no
+ case $GCC,$host_os in
+ yes,cygwin*)
+ library_names_spec='$libname.dll.a'
+ soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | sed -e 's/[[.]]/-/g'`${versuffix}.dll'
+ postinstall_cmds='dlpath=`bash 2>&1 -c '\''. $dir/${file}i;echo \$dlname'\''`~
+ dldir=$destdir/`dirname \$dlpath`~
+ test -d \$dldir || mkdir -p \$dldir~
+ $install_prog .libs/$dlname \$dldir/$dlname'
+ postuninstall_cmds='dldll=`bash 2>&1 -c '\''. $file; echo \$dlname'\''`~
+ dlpath=$dir/\$dldll~
+ $rm \$dlpath'
+ ;;
+ yes,mingw*)
+ library_names_spec='${libname}`echo ${release} | sed -e 's/[[.]]/-/g'`${versuffix}.dll'
+ sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | sed -e "s/^libraries://" -e "s/;/ /g"`
+ ;;
+ yes,pw32*)
+ library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | sed -e 's/[.]/-/g'`${versuffix}.dll'
+ ;;
+ *)
+ library_names_spec='${libname}`echo ${release} | sed -e 's/[[.]]/-/g'`${versuffix}.dll $libname.lib'
+ ;;
+ esac
+ dynamic_linker='Win32 ld.exe'
+ # FIXME: first we should search . and the directory the executable is in
+ shlibpath_var=PATH
+ ;;
+
+darwin* | rhapsody*)
+ dynamic_linker="$host_os dyld"
+ version_type=darwin
+ need_lib_prefix=no
+ need_version=no
+ # FIXME: Relying on posixy $() will cause problems for
+ # cross-compilation, but unfortunately the echo tests do not
+ # yet detect zsh echo's removal of \ escapes.
+ #### Local change for Sleepycat's Berkeley DB [#6117]:
+ # added support for -jnimodule, encapsulated below in ${darwin_suffix}
+ darwin_suffix='$(test .$jnimodule = .yes && echo jnilib || (test .$module = .yes && echo so || echo dylib))'
+ library_names_spec='${libname}${release}${versuffix}.'"${darwin_suffix}"' ${libname}${release}${major}.'"${darwin_suffix}"' ${libname}.'"${darwin_suffix}"
+ soname_spec='${libname}${release}${major}.'"${darwin_suffix}"
+ shlibpath_overrides_runpath=yes
+ shlibpath_var=DYLD_LIBRARY_PATH
+ ;;
+
+freebsd1*)
+ dynamic_linker=no
+ ;;
+
+freebsd*)
+ objformat=`test -x /usr/bin/objformat && /usr/bin/objformat || echo aout`
+ version_type=freebsd-$objformat
+ case $version_type in
+ freebsd-elf*)
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so $libname.so'
+ need_version=no
+ need_lib_prefix=no
+ ;;
+ freebsd-*)
+ library_names_spec='${libname}${release}.so$versuffix $libname.so$versuffix'
+ need_version=yes
+ ;;
+ esac
+ shlibpath_var=LD_LIBRARY_PATH
+ case $host_os in
+ freebsd2*)
+ shlibpath_overrides_runpath=yes
+ ;;
+ *)
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+ esac
+ ;;
+
+gnu*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so${major} ${libname}.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ hardcode_into_libs=yes
+ ;;
+
+hpux9* | hpux10* | hpux11*)
+ # Give a soname corresponding to the major version so that dld.sl refuses to
+ # link against other versions.
+ dynamic_linker="$host_os dld.sl"
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ shlibpath_var=SHLIB_PATH
+ shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
+ library_names_spec='${libname}${release}.sl$versuffix ${libname}${release}.sl$major $libname.sl'
+ soname_spec='${libname}${release}.sl$major'
+ # HP-UX runs *really* slowly unless shared libraries are mode 555.
+ postinstall_cmds='chmod 555 $lib'
+ ;;
+
+irix5* | irix6*)
+ version_type=irix
+ need_lib_prefix=no
+ need_version=no
+ soname_spec='${libname}${release}.so$major'
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major ${libname}${release}.so $libname.so'
+ case $host_os in
+ irix5*)
+ libsuff= shlibsuff=
+ ;;
+ *)
+ case $LD in # libtool.m4 will add one of these switches to LD
+ *-32|*"-32 ") libsuff= shlibsuff= libmagic=32-bit;;
+ *-n32|*"-n32 ") libsuff=32 shlibsuff=N32 libmagic=N32;;
+ *-64|*"-64 ") libsuff=64 shlibsuff=64 libmagic=64-bit;;
+ *) libsuff= shlibsuff= libmagic=never-match;;
+ esac
+ ;;
+ esac
+ shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
+ shlibpath_overrides_runpath=no
+ sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}"
+ sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}"
+ ;;
+
+# No shared lib support for Linux oldld, aout, or coff.
+linux-gnuoldld* | linux-gnuaout* | linux-gnucoff*)
+ dynamic_linker=no
+ ;;
+
+# This must be Linux ELF.
+linux-gnu*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ # This implies no fast_install, which is unacceptable.
+ # Some rework will be needed to allow for fast_install
+ # before this can be enabled.
+ hardcode_into_libs=yes
+
+ # We used to test for /lib/ld.so.1 and disable shared libraries on
+ # powerpc, because MkLinux only supported shared libraries with the
+ # GNU dynamic linker. Since this was broken with cross compilers,
+ # most powerpc-linux boxes support dynamic linking these days and
+ # people can always --disable-shared, the test was removed, and we
+ # assume the GNU/Linux dynamic linker is in use.
+ dynamic_linker='GNU/Linux ld.so'
+ ;;
+
+netbsd*)
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
+ library_names_spec='${libname}${release}.so$versuffix ${libname}.so$versuffix'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+ dynamic_linker='NetBSD (a.out) ld.so'
+ else
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major ${libname}${release}.so ${libname}.so'
+ soname_spec='${libname}${release}.so$major'
+ dynamic_linker='NetBSD ld.elf_so'
+ fi
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ ;;
+
+newsos6)
+ version_type=linux
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ ;;
+
+nto-qnx)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ ;;
+
+openbsd*)
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ case "$host_os" in
+ openbsd2.[[89]] | openbsd2.[[89]].*)
+ shlibpath_overrides_runpath=no
+ ;;
+ *)
+ shlibpath_overrides_runpath=yes
+ ;;
+ esac
+ else
+ shlibpath_overrides_runpath=yes
+ fi
+ library_names_spec='${libname}${release}.so$versuffix ${libname}.so$versuffix'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+os2*)
+ libname_spec='$name'
+ need_lib_prefix=no
+ library_names_spec='$libname.dll $libname.a'
+ dynamic_linker='OS/2 ld.exe'
+ shlibpath_var=LIBPATH
+ ;;
+
+osf3* | osf4* | osf5*)
+ version_type=osf
+ need_version=no
+ soname_spec='${libname}${release}.so'
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so $libname.so'
+ shlibpath_var=LD_LIBRARY_PATH
+ sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
+ sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec"
+ ;;
+
+sco3.2v5*)
+ version_type=osf
+ soname_spec='${libname}${release}.so$major'
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+solaris*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ # ldd complains unless libraries are executable
+ postinstall_cmds='chmod +x $lib'
+ ;;
+
+sunos4*)
+ version_type=sunos
+ library_names_spec='${libname}${release}.so$versuffix ${libname}.so$versuffix'
+ finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ if test "$with_gnu_ld" = yes; then
+ need_lib_prefix=no
+ fi
+ need_version=yes
+ ;;
+
+sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+ version_type=linux
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ case $host_vendor in
+ sni)
+ shlibpath_overrides_runpath=no
+ ;;
+ motorola)
+ need_lib_prefix=no
+ need_version=no
+ shlibpath_overrides_runpath=no
+ sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
+ ;;
+ esac
+ ;;
+
+uts4*)
+ version_type=linux
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+dgux*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+sysv4*MP*)
+ if test -d /usr/nec ;then
+ version_type=linux
+ library_names_spec='$libname.so.$versuffix $libname.so.$major $libname.so'
+ soname_spec='$libname.so.$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ fi
+ ;;
+
+*)
+ dynamic_linker=no
+ ;;
+esac
+AC_MSG_RESULT([$dynamic_linker])
+test "$dynamic_linker" = no && can_build_shared=no
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+# Report the final consequences.
+AC_MSG_CHECKING([if libtool supports shared libraries])
+AC_MSG_RESULT([$can_build_shared])
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+AC_MSG_CHECKING([whether to build shared libraries])
+test "$can_build_shared" = "no" && enable_shared=no
+
+# On AIX, shared libraries and static libraries use the same namespace, and
+# are all built from PIC.
+case "$host_os" in
+aix3*)
+ test "$enable_shared" = yes && enable_static=no
+ if test -n "$RANLIB"; then
+ archive_cmds="$archive_cmds~\$RANLIB \$lib"
+ postinstall_cmds='$RANLIB $lib'
+ fi
+ ;;
+
+aix4*)
+ if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then
+ test "$enable_shared" = yes && enable_static=no
+ fi
+ ;;
+esac
+AC_MSG_RESULT([$enable_shared])
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+AC_MSG_CHECKING([whether to build static libraries])
+# Make sure either enable_shared or enable_static is yes.
+test "$enable_shared" = yes || enable_static=yes
+AC_MSG_RESULT([$enable_static])
+##
+## END FIXME
+
+if test "$hardcode_action" = relink; then
+ # Fast installation is not supported
+ enable_fast_install=no
+elif test "$shlibpath_overrides_runpath" = yes ||
+ test "$enable_shared" = no; then
+ # Fast installation is not necessary
+ enable_fast_install=needless
+fi
+
+variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
+if test "$GCC" = yes; then
+ variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
+fi
+
+AC_LIBTOOL_DLOPEN_SELF
+
+## FIXME: this should be a separate macro
+##
+if test "$enable_shared" = yes && test "$GCC" = yes; then
+ case $archive_cmds in
+ *'~'*)
+ # FIXME: we may have to deal with multi-command sequences.
+ ;;
+ '$CC '*)
+ # Test whether the compiler implicitly links with -lc since on some
+ # systems, -lgcc has to come before -lc. If gcc already passes -lc
+ # to ld, don't add -lc before -lgcc.
+ AC_MSG_CHECKING([whether -lc should be explicitly linked in])
+ AC_CACHE_VAL([lt_cv_archive_cmds_need_lc],
+ [$rm conftest*
+ echo 'static int dummy;' > conftest.$ac_ext
+
+ if AC_TRY_EVAL(ac_compile); then
+ soname=conftest
+ lib=conftest
+ libobjs=conftest.$ac_objext
+ deplibs=
+ wl=$lt_cv_prog_cc_wl
+ compiler_flags=-v
+ linker_flags=-v
+ verstring=
+ output_objdir=.
+ libname=conftest
+ save_allow_undefined_flag=$allow_undefined_flag
+ allow_undefined_flag=
+ if AC_TRY_EVAL(archive_cmds 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1)
+ then
+ lt_cv_archive_cmds_need_lc=no
+ else
+ lt_cv_archive_cmds_need_lc=yes
+ fi
+ allow_undefined_flag=$save_allow_undefined_flag
+ else
+ cat conftest.err 1>&5
+ fi])
+ AC_MSG_RESULT([$lt_cv_archive_cmds_need_lc])
+ ;;
+ esac
+fi
+need_lc=${lt_cv_archive_cmds_need_lc-yes}
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+# The second clause should only fire when bootstrapping the
+# libtool distribution, otherwise you forgot to ship ltmain.sh
+# with your package, and you will get complaints that there are
+# no rules to generate ltmain.sh.
+if test -f "$ltmain"; then
+ :
+else
+ # If there is no Makefile yet, we rely on a make rule to execute
+ # `config.status --recheck' to rerun these tests and create the
+ # libtool script then.
+ test -f Makefile && make "$ltmain"
+fi
+
+if test -f "$ltmain"; then
+ trap "$rm \"${ofile}T\"; exit 1" 1 2 15
+ $rm -f "${ofile}T"
+
+ echo creating $ofile
+
+ # Now quote all the things that may contain metacharacters while being
+ # careful not to overquote the AC_SUBSTed values. We take copies of the
+ # variables and quote the copies for generation of the libtool script.
+ for var in echo old_CC old_CFLAGS \
+ AR AR_FLAGS CC LD LN_S NM SHELL \
+ reload_flag reload_cmds wl \
+ pic_flag link_static_flag no_builtin_flag export_dynamic_flag_spec \
+ thread_safe_flag_spec whole_archive_flag_spec libname_spec \
+ library_names_spec soname_spec \
+ RANLIB old_archive_cmds old_archive_from_new_cmds old_postinstall_cmds \
+ old_postuninstall_cmds archive_cmds archive_expsym_cmds postinstall_cmds \
+ postuninstall_cmds extract_expsyms_cmds old_archive_from_expsyms_cmds \
+ old_striplib striplib file_magic_cmd export_symbols_cmds \
+ deplibs_check_method allow_undefined_flag no_undefined_flag \
+ finish_cmds finish_eval global_symbol_pipe global_symbol_to_cdecl \
+ global_symbol_to_c_name_address \
+ hardcode_libdir_flag_spec hardcode_libdir_separator \
+ sys_lib_search_path_spec sys_lib_dlsearch_path_spec \
+ compiler_c_o compiler_o_lo need_locks exclude_expsyms include_expsyms; do
+
+ case $var in
+ reload_cmds | old_archive_cmds | old_archive_from_new_cmds | \
+ old_postinstall_cmds | old_postuninstall_cmds | \
+ export_symbols_cmds | archive_cmds | archive_expsym_cmds | \
+ extract_expsyms_cmds | old_archive_from_expsyms_cmds | \
+ postinstall_cmds | postuninstall_cmds | \
+ finish_cmds | sys_lib_search_path_spec | sys_lib_dlsearch_path_spec)
+ # Double-quote double-evaled strings.
+ eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\""
+ ;;
+ *)
+ eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\""
+ ;;
+ esac
+ done
+
+ cat <<__EOF__ > "${ofile}T"
+#! $SHELL
+
+# `$echo "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services.
+# Generated automatically by $PROGRAM (GNU $PACKAGE $VERSION$TIMESTAMP)
+# NOTE: Changes made to this file will be lost: look at ltmain.sh.
+#
+# Copyright (C) 1996-2000 Free Software Foundation, Inc.
+# Originally by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Sed that helps us avoid accidentally triggering echo(1) options like -n.
+Xsed="sed -e s/^X//"
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+if test "X\${CDPATH+set}" = Xset; then CDPATH=:; export CDPATH; fi
+
+# ### BEGIN LIBTOOL CONFIG
+
+# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`:
+
+# Shell to use when invoking shell scripts.
+SHELL=$lt_SHELL
+
+# Whether or not to build shared libraries.
+build_libtool_libs=$enable_shared
+
+# Whether or not to build static libraries.
+build_old_libs=$enable_static
+
+# Whether or not to add -lc for building shared libraries.
+build_libtool_need_lc=$need_lc
+
+# Whether or not to optimize for fast installation.
+fast_install=$enable_fast_install
+
+# The host system.
+host_alias=$host_alias
+host=$host
+
+# An echo program that does not interpret backslashes.
+echo=$lt_echo
+
+# The archiver.
+AR=$lt_AR
+AR_FLAGS=$lt_AR_FLAGS
+
+# The default C compiler.
+CC=$lt_CC
+
+# Is the compiler the GNU C compiler?
+with_gcc=$GCC
+
+# The linker used to build libraries.
+LD=$lt_LD
+
+# Whether we need hard or soft links.
+LN_S=$lt_LN_S
+
+# A BSD-compatible nm program.
+NM=$lt_NM
+
+# A symbol stripping program
+STRIP=$STRIP
+
+# Used to examine libraries when file_magic_cmd begins "file"
+MAGIC_CMD=$MAGIC_CMD
+
+# Used on cygwin: DLL creation program.
+DLLTOOL="$DLLTOOL"
+
+# Used on cygwin: object dumper.
+OBJDUMP="$OBJDUMP"
+
+# Used on cygwin: assembler.
+AS="$AS"
+
+# The name of the directory that contains temporary libtool files.
+objdir=$objdir
+
+# How to create reloadable object files.
+reload_flag=$lt_reload_flag
+reload_cmds=$lt_reload_cmds
+
+# How to pass a linker flag through the compiler.
+wl=$lt_wl
+
+# Object file suffix (normally "o").
+objext="$ac_objext"
+
+# Old archive suffix (normally "a").
+libext="$libext"
+
+# Executable file suffix (normally "").
+exeext="$exeext"
+
+# Additional compiler flags for building library objects.
+pic_flag=$lt_pic_flag
+pic_mode=$pic_mode
+
+# Does compiler simultaneously support -c and -o options?
+compiler_c_o=$lt_compiler_c_o
+
+# Can we write directly to a .lo ?
+compiler_o_lo=$lt_compiler_o_lo
+
+# Must we lock files when doing compilation ?
+need_locks=$lt_need_locks
+
+# Do we need the lib prefix for modules?
+need_lib_prefix=$need_lib_prefix
+
+# Do we need a version for libraries?
+need_version=$need_version
+
+# Whether dlopen is supported.
+dlopen_support=$enable_dlopen
+
+# Whether dlopen of programs is supported.
+dlopen_self=$enable_dlopen_self
+
+# Whether dlopen of statically linked programs is supported.
+dlopen_self_static=$enable_dlopen_self_static
+
+# Compiler flag to prevent dynamic linking.
+link_static_flag=$lt_link_static_flag
+
+# Compiler flag to turn off builtin functions.
+no_builtin_flag=$lt_no_builtin_flag
+
+# Compiler flag to allow reflexive dlopens.
+export_dynamic_flag_spec=$lt_export_dynamic_flag_spec
+
+# Compiler flag to generate shared objects directly from archives.
+whole_archive_flag_spec=$lt_whole_archive_flag_spec
+
+# Compiler flag to generate thread-safe objects.
+thread_safe_flag_spec=$lt_thread_safe_flag_spec
+
+# Library versioning type.
+version_type=$version_type
+
+# Format of library name prefix.
+libname_spec=$lt_libname_spec
+
+# List of archive names. First name is the real one, the rest are links.
+# The last name is the one that the linker finds with -lNAME.
+library_names_spec=$lt_library_names_spec
+
+# The coded name of the library, if different from the real name.
+soname_spec=$lt_soname_spec
+
+# Commands used to build and install an old-style archive.
+RANLIB=$lt_RANLIB
+old_archive_cmds=$lt_old_archive_cmds
+old_postinstall_cmds=$lt_old_postinstall_cmds
+old_postuninstall_cmds=$lt_old_postuninstall_cmds
+
+# Create an old-style archive from a shared archive.
+old_archive_from_new_cmds=$lt_old_archive_from_new_cmds
+
+# Create a temporary old-style archive to link instead of a shared archive.
+old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds
+
+# Commands used to build and install a shared archive.
+archive_cmds=$lt_archive_cmds
+archive_expsym_cmds=$lt_archive_expsym_cmds
+postinstall_cmds=$lt_postinstall_cmds
+postuninstall_cmds=$lt_postuninstall_cmds
+
+# Commands to strip libraries.
+old_striplib=$lt_old_striplib
+striplib=$lt_striplib
+
+# Method to check whether dependent libraries are shared objects.
+deplibs_check_method=$lt_deplibs_check_method
+
+# Command to use when deplibs_check_method == file_magic.
+file_magic_cmd=$lt_file_magic_cmd
+
+# Flag that allows shared libraries with undefined symbols to be built.
+allow_undefined_flag=$lt_allow_undefined_flag
+
+# Flag that forces no undefined symbols.
+no_undefined_flag=$lt_no_undefined_flag
+
+# Commands used to finish a libtool library installation in a directory.
+finish_cmds=$lt_finish_cmds
+
+# Same as above, but a single script fragment to be evaled but not shown.
+finish_eval=$lt_finish_eval
+
+# Take the output of nm and produce a listing of raw symbols and C names.
+global_symbol_pipe=$lt_global_symbol_pipe
+
+# Transform the output of nm in a proper C declaration
+global_symbol_to_cdecl=$lt_global_symbol_to_cdecl
+
+# Transform the output of nm in a C name address pair
+global_symbol_to_c_name_address=$lt_global_symbol_to_c_name_address
+
+# This is the shared library runtime path variable.
+runpath_var=$runpath_var
+
+# This is the shared library path variable.
+shlibpath_var=$shlibpath_var
+
+# Is shlibpath searched before the hard-coded library search path?
+shlibpath_overrides_runpath=$shlibpath_overrides_runpath
+
+# How to hardcode a shared library path into an executable.
+hardcode_action=$hardcode_action
+
+# Whether we should hardcode library paths into libraries.
+hardcode_into_libs=$hardcode_into_libs
+
+# Flag to hardcode \$libdir into a binary during linking.
+# This must work even if \$libdir does not exist.
+hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec
+
+# Whether we need a single -rpath flag with a separated argument.
+hardcode_libdir_separator=$lt_hardcode_libdir_separator
+
+# Set to yes if using DIR/libNAME.so during linking hardcodes DIR into the
+# resulting binary.
+hardcode_direct=$hardcode_direct
+
+# Set to yes if using the -LDIR flag during linking hardcodes DIR into the
+# resulting binary.
+hardcode_minus_L=$hardcode_minus_L
+
+# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into
+# the resulting binary.
+hardcode_shlibpath_var=$hardcode_shlibpath_var
+
+# Variables whose values should be saved in libtool wrapper scripts and
+# restored at relink time.
+variables_saved_for_relink="$variables_saved_for_relink"
+
+# Whether libtool must link a program against all its dependency libraries.
+link_all_deplibs=$link_all_deplibs
+
+# Compile-time system search path for libraries
+sys_lib_search_path_spec=$lt_sys_lib_search_path_spec
+
+# Run-time system search path for libraries
+sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec
+
+# Fix the shell variable \$srcfile for the compiler.
+fix_srcfile_path="$fix_srcfile_path"
+
+# Set to yes if exported symbols are required.
+always_export_symbols=$always_export_symbols
+
+# The commands to list exported symbols.
+export_symbols_cmds=$lt_export_symbols_cmds
+
+# The commands to extract the exported symbol list from a shared archive.
+extract_expsyms_cmds=$lt_extract_expsyms_cmds
+
+# Symbols that should not be listed in the preloaded symbols.
+exclude_expsyms=$lt_exclude_expsyms
+
+# Symbols that must always be exported.
+include_expsyms=$lt_include_expsyms
+
+# ### END LIBTOOL CONFIG
+
+__EOF__
+
+ case $host_os in
+ aix3*)
+ cat <<\EOF >> "${ofile}T"
+
+# AIX sometimes has problems with the GCC collect2 program. For some
+# reason, if we set the COLLECT_NAMES environment variable, the problems
+# vanish in a puff of smoke.
+if test "X${COLLECT_NAMES+set}" != Xset; then
+ COLLECT_NAMES=
+ export COLLECT_NAMES
+fi
+EOF
+ ;;
+ esac
+
+ case $host_os in
+ cygwin* | mingw* | pw32* | os2*)
+ cat <<'EOF' >> "${ofile}T"
+ # This is a source program that is used to create dlls on Windows
+ # Don't remove nor modify the starting and closing comments
+# /* ltdll.c starts here */
+# #define WIN32_LEAN_AND_MEAN
+# #include <windows.h>
+# #undef WIN32_LEAN_AND_MEAN
+# #include <stdio.h>
+#
+# #ifndef __CYGWIN__
+# # ifdef __CYGWIN32__
+# # define __CYGWIN__ __CYGWIN32__
+# # endif
+# #endif
+#
+# #ifdef __cplusplus
+# extern "C" {
+# #endif
+# BOOL APIENTRY DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved);
+# #ifdef __cplusplus
+# }
+# #endif
+#
+# #ifdef __CYGWIN__
+# #include <cygwin/cygwin_dll.h>
+# DECLARE_CYGWIN_DLL( DllMain );
+# #endif
+# HINSTANCE __hDllInstance_base;
+#
+# BOOL APIENTRY
+# DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved)
+# {
+# __hDllInstance_base = hInst;
+# return TRUE;
+# }
+# /* ltdll.c ends here */
+ # This is a source program that is used to create import libraries
+ # on Windows for dlls which lack them. Don't remove nor modify the
+ # starting and closing comments
+# /* impgen.c starts here */
+# /* Copyright (C) 1999-2000 Free Software Foundation, Inc.
+#
+# This file is part of GNU libtool.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+# */
+#
+# #include <stdio.h> /* for printf() */
+# #include <unistd.h> /* for open(), lseek(), read() */
+# #include <fcntl.h> /* for O_RDONLY, O_BINARY */
+# #include <string.h> /* for strdup() */
+#
+# /* O_BINARY isn't required (or even defined sometimes) under Unix */
+# #ifndef O_BINARY
+# #define O_BINARY 0
+# #endif
+#
+# static unsigned int
+# pe_get16 (fd, offset)
+# int fd;
+# int offset;
+# {
+# unsigned char b[2];
+# lseek (fd, offset, SEEK_SET);
+# read (fd, b, 2);
+# return b[0] + (b[1]<<8);
+# }
+#
+# static unsigned int
+# pe_get32 (fd, offset)
+# int fd;
+# int offset;
+# {
+# unsigned char b[4];
+# lseek (fd, offset, SEEK_SET);
+# read (fd, b, 4);
+# return b[0] + (b[1]<<8) + (b[2]<<16) + (b[3]<<24);
+# }
+#
+# static unsigned int
+# pe_as32 (ptr)
+# void *ptr;
+# {
+# unsigned char *b = ptr;
+# return b[0] + (b[1]<<8) + (b[2]<<16) + (b[3]<<24);
+# }
+#
+# int
+# main (argc, argv)
+# int argc;
+# char *argv[];
+# {
+# int dll;
+# unsigned long pe_header_offset, opthdr_ofs, num_entries, i;
+# unsigned long export_rva, export_size, nsections, secptr, expptr;
+# unsigned long name_rvas, nexp;
+# unsigned char *expdata, *erva;
+# char *filename, *dll_name;
+#
+# filename = argv[1];
+#
+# dll = open(filename, O_RDONLY|O_BINARY);
+# if (dll < 1)
+# return 1;
+#
+# dll_name = filename;
+#
+# for (i=0; filename[i]; i++)
+# if (filename[i] == '/' || filename[i] == '\\' || filename[i] == ':')
+# dll_name = filename + i +1;
+#
+# pe_header_offset = pe_get32 (dll, 0x3c);
+# opthdr_ofs = pe_header_offset + 4 + 20;
+# num_entries = pe_get32 (dll, opthdr_ofs + 92);
+#
+# if (num_entries < 1) /* no exports */
+# return 1;
+#
+# export_rva = pe_get32 (dll, opthdr_ofs + 96);
+# export_size = pe_get32 (dll, opthdr_ofs + 100);
+# nsections = pe_get16 (dll, pe_header_offset + 4 +2);
+# secptr = (pe_header_offset + 4 + 20 +
+# pe_get16 (dll, pe_header_offset + 4 + 16));
+#
+# expptr = 0;
+# for (i = 0; i < nsections; i++)
+# {
+# char sname[8];
+# unsigned long secptr1 = secptr + 40 * i;
+# unsigned long vaddr = pe_get32 (dll, secptr1 + 12);
+# unsigned long vsize = pe_get32 (dll, secptr1 + 16);
+# unsigned long fptr = pe_get32 (dll, secptr1 + 20);
+# lseek(dll, secptr1, SEEK_SET);
+# read(dll, sname, 8);
+# if (vaddr <= export_rva && vaddr+vsize > export_rva)
+# {
+# expptr = fptr + (export_rva - vaddr);
+# if (export_rva + export_size > vaddr + vsize)
+# export_size = vsize - (export_rva - vaddr);
+# break;
+# }
+# }
+#
+# expdata = (unsigned char*)malloc(export_size);
+# lseek (dll, expptr, SEEK_SET);
+# read (dll, expdata, export_size);
+# erva = expdata - export_rva;
+#
+# nexp = pe_as32 (expdata+24);
+# name_rvas = pe_as32 (expdata+32);
+#
+# printf ("EXPORTS\n");
+# for (i = 0; i<nexp; i++)
+# {
+# unsigned long name_rva = pe_as32 (erva+name_rvas+i*4);
+# printf ("\t%s @ %ld ;\n", erva+name_rva, 1+ i);
+# }
+#
+# return 0;
+# }
+# /* impgen.c ends here */
+
+EOF
+ ;;
+ esac
+
+ # We use sed instead of cat because bash on DJGPP gets confused if
+ # if finds mixed CR/LF and LF-only lines. Since sed operates in
+ # text mode, it properly converts lines to CR/LF. This bash problem
+ # is reportedly fixed, but why not run on old versions too?
+ sed '$q' "$ltmain" >> "${ofile}T" || (rm -f "${ofile}T"; exit 1)
+
+ mv -f "${ofile}T" "$ofile" || \
+ (rm -f "$ofile" && cp "${ofile}T" "$ofile" && rm -f "${ofile}T")
+ chmod +x "$ofile"
+fi
+##
+## END FIXME
+
+])# _LT_AC_LTCONFIG_HACK
+
+# AC_LIBTOOL_DLOPEN - enable checks for dlopen support
+AC_DEFUN([AC_LIBTOOL_DLOPEN], [AC_BEFORE([$0],[AC_LIBTOOL_SETUP])])
+
+# AC_LIBTOOL_WIN32_DLL - declare package support for building win32 dll's
+AC_DEFUN([AC_LIBTOOL_WIN32_DLL], [AC_BEFORE([$0], [AC_LIBTOOL_SETUP])])
+
+# AC_ENABLE_SHARED - implement the --enable-shared flag
+# Usage: AC_ENABLE_SHARED[(DEFAULT)]
+# Where DEFAULT is either `yes' or `no'. If omitted, it defaults to
+# `yes'.
+AC_DEFUN([AC_ENABLE_SHARED],
+[define([AC_ENABLE_SHARED_DEFAULT], ifelse($1, no, no, yes))dnl
+AC_ARG_ENABLE(shared,
+changequote(<<, >>)dnl
+<< --enable-shared[=PKGS] build shared libraries [default=>>AC_ENABLE_SHARED_DEFAULT],
+changequote([, ])dnl
+[p=${PACKAGE-default}
+case $enableval in
+yes) enable_shared=yes ;;
+no) enable_shared=no ;;
+*)
+ enable_shared=no
+ # Look at the argument we got. We use all the common list separators.
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}:,"
+ for pkg in $enableval; do
+ if test "X$pkg" = "X$p"; then
+ enable_shared=yes
+ fi
+ done
+ IFS="$ac_save_ifs"
+ ;;
+esac],
+enable_shared=AC_ENABLE_SHARED_DEFAULT)dnl
+])
+
+# AC_DISABLE_SHARED - set the default shared flag to --disable-shared
+AC_DEFUN([AC_DISABLE_SHARED],
+[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
+AC_ENABLE_SHARED(no)])
+
+# AC_ENABLE_STATIC - implement the --enable-static flag
+# Usage: AC_ENABLE_STATIC[(DEFAULT)]
+# Where DEFAULT is either `yes' or `no'. If omitted, it defaults to
+# `yes'.
+AC_DEFUN([AC_ENABLE_STATIC],
+[define([AC_ENABLE_STATIC_DEFAULT], ifelse($1, no, no, yes))dnl
+AC_ARG_ENABLE(static,
+changequote(<<, >>)dnl
+<< --enable-static[=PKGS] build static libraries [default=>>AC_ENABLE_STATIC_DEFAULT],
+changequote([, ])dnl
+[p=${PACKAGE-default}
+case $enableval in
+yes) enable_static=yes ;;
+no) enable_static=no ;;
+*)
+ enable_static=no
+ # Look at the argument we got. We use all the common list separators.
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}:,"
+ for pkg in $enableval; do
+ if test "X$pkg" = "X$p"; then
+ enable_static=yes
+ fi
+ done
+ IFS="$ac_save_ifs"
+ ;;
+esac],
+enable_static=AC_ENABLE_STATIC_DEFAULT)dnl
+])
+
+# AC_DISABLE_STATIC - set the default static flag to --disable-static
+AC_DEFUN([AC_DISABLE_STATIC],
+[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
+AC_ENABLE_STATIC(no)])
+
+
+# AC_ENABLE_FAST_INSTALL - implement the --enable-fast-install flag
+# Usage: AC_ENABLE_FAST_INSTALL[(DEFAULT)]
+# Where DEFAULT is either `yes' or `no'. If omitted, it defaults to
+# `yes'.
+AC_DEFUN([AC_ENABLE_FAST_INSTALL],
+[define([AC_ENABLE_FAST_INSTALL_DEFAULT], ifelse($1, no, no, yes))dnl
+AC_ARG_ENABLE(fast-install,
+changequote(<<, >>)dnl
+<< --enable-fast-install[=PKGS] optimize for fast installation [default=>>AC_ENABLE_FAST_INSTALL_DEFAULT],
+changequote([, ])dnl
+[p=${PACKAGE-default}
+case $enableval in
+yes) enable_fast_install=yes ;;
+no) enable_fast_install=no ;;
+*)
+ enable_fast_install=no
+ # Look at the argument we got. We use all the common list separators.
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}:,"
+ for pkg in $enableval; do
+ if test "X$pkg" = "X$p"; then
+ enable_fast_install=yes
+ fi
+ done
+ IFS="$ac_save_ifs"
+ ;;
+esac],
+enable_fast_install=AC_ENABLE_FAST_INSTALL_DEFAULT)dnl
+])
+
+# AC_DISABLE_FAST_INSTALL - set the default to --disable-fast-install
+AC_DEFUN([AC_DISABLE_FAST_INSTALL],
+[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
+AC_ENABLE_FAST_INSTALL(no)])
+
+# AC_LIBTOOL_PICMODE - implement the --with-pic flag
+# Usage: AC_LIBTOOL_PICMODE[(MODE)]
+# Where MODE is either `yes' or `no'. If omitted, it defaults to
+# `both'.
+AC_DEFUN([AC_LIBTOOL_PICMODE],
+[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
+pic_mode=ifelse($#,1,$1,default)])
+
+
+# AC_PATH_TOOL_PREFIX - find a file program which can recognise shared library
+AC_DEFUN([AC_PATH_TOOL_PREFIX],
+[AC_MSG_CHECKING([for $1])
+AC_CACHE_VAL(lt_cv_path_MAGIC_CMD,
+[case $MAGIC_CMD in
+ /*)
+ lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path.
+ ;;
+ ?:/*)
+ lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a dos path.
+ ;;
+ *)
+ ac_save_MAGIC_CMD="$MAGIC_CMD"
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
+dnl $ac_dummy forces splitting on constant user-supplied paths.
+dnl POSIX.2 word splitting is done only on the output of word expansions,
+dnl not every word. This closes a longstanding sh security hole.
+ ac_dummy="ifelse([$2], , $PATH, [$2])"
+ for ac_dir in $ac_dummy; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/$1; then
+ lt_cv_path_MAGIC_CMD="$ac_dir/$1"
+ if test -n "$file_magic_test_file"; then
+ case $deplibs_check_method in
+ "file_magic "*)
+ file_magic_regex="`expr \"$deplibs_check_method\" : \"file_magic \(.*\)\"`"
+ MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+ if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
+ egrep "$file_magic_regex" > /dev/null; then
+ :
+ else
+ cat <<EOF 1>&2
+
+*** Warning: the command libtool uses to detect shared libraries,
+*** $file_magic_cmd, produces output that libtool cannot recognize.
+*** The result is that libtool may fail to recognize shared libraries
+*** as such. This will affect the creation of libtool libraries that
+*** depend on shared libraries, but programs linked with such libtool
+*** libraries will work regardless of this problem. Nevertheless, you
+*** may want to report the problem to your system manager and/or to
+*** bug-libtool@gnu.org
+
+EOF
+ fi ;;
+ esac
+ fi
+ break
+ fi
+ done
+ IFS="$ac_save_ifs"
+ MAGIC_CMD="$ac_save_MAGIC_CMD"
+ ;;
+esac])
+MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+if test -n "$MAGIC_CMD"; then
+ AC_MSG_RESULT($MAGIC_CMD)
+else
+ AC_MSG_RESULT(no)
+fi
+])
+
+
+# AC_PATH_MAGIC - find a file program which can recognise a shared library
+AC_DEFUN([AC_PATH_MAGIC],
+[AC_REQUIRE([AC_CHECK_TOOL_PREFIX])dnl
+AC_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin:$PATH)
+if test -z "$lt_cv_path_MAGIC_CMD"; then
+ if test -n "$ac_tool_prefix"; then
+ AC_PATH_TOOL_PREFIX(file, /usr/bin:$PATH)
+ else
+ MAGIC_CMD=:
+ fi
+fi
+])
+
+
+# AC_PROG_LD - find the path to the GNU or non-GNU linker
+AC_DEFUN([AC_PROG_LD],
+[AC_ARG_WITH(gnu-ld,
+[ --with-gnu-ld assume the C compiler uses GNU ld [default=no]],
+test "$withval" = no || with_gnu_ld=yes, with_gnu_ld=no)
+AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+AC_REQUIRE([_LT_AC_LIBTOOL_SYS_PATH_SEPARATOR])dnl
+ac_prog=ld
+if test "$GCC" = yes; then
+ # Check if gcc -print-prog-name=ld gives a path.
+ AC_MSG_CHECKING([for ld used by GCC])
+ case $host in
+ *-*-mingw*)
+ # gcc leaves a trailing carriage return which upsets mingw
+ ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
+ *)
+ ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
+ esac
+ case $ac_prog in
+ # Accept absolute paths.
+ [[\\/]]* | [[A-Za-z]]:[[\\/]]*)
+ re_direlt='/[[^/]][[^/]]*/\.\./'
+ # Canonicalize the path of ld
+ ac_prog=`echo $ac_prog| sed 's%\\\\%/%g'`
+ while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do
+ ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"`
+ done
+ test -z "$LD" && LD="$ac_prog"
+ ;;
+ "")
+ # If it fails, then pretend we aren't using GCC.
+ ac_prog=ld
+ ;;
+ *)
+ # If it is relative, then search for the first ld in PATH.
+ with_gnu_ld=unknown
+ ;;
+ esac
+elif test "$with_gnu_ld" = yes; then
+ AC_MSG_CHECKING([for GNU ld])
+else
+ AC_MSG_CHECKING([for non-GNU ld])
+fi
+AC_CACHE_VAL(lt_cv_path_LD,
+[if test -z "$LD"; then
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for ac_dir in $PATH; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+ lt_cv_path_LD="$ac_dir/$ac_prog"
+ # Check to see if the program is GNU ld. I'd rather use --version,
+ # but apparently some GNU ld's only accept -v.
+ # Break only if it was the GNU/non-GNU ld that we prefer.
+ if "$lt_cv_path_LD" -v 2>&1 < /dev/null | egrep '(GNU|with BFD)' > /dev/null; then
+ test "$with_gnu_ld" != no && break
+ else
+ test "$with_gnu_ld" != yes && break
+ fi
+ fi
+ done
+ IFS="$ac_save_ifs"
+else
+ lt_cv_path_LD="$LD" # Let the user override the test with a path.
+fi])
+LD="$lt_cv_path_LD"
+if test -n "$LD"; then
+ AC_MSG_RESULT($LD)
+else
+ AC_MSG_RESULT(no)
+fi
+test -z "$LD" && AC_MSG_ERROR([no acceptable ld found in \$PATH])
+AC_PROG_LD_GNU
+])
+
+# AC_PROG_LD_GNU -
+AC_DEFUN([AC_PROG_LD_GNU],
+[AC_CACHE_CHECK([if the linker ($LD) is GNU ld], lt_cv_prog_gnu_ld,
+[# I'd rather use --version here, but apparently some GNU ld's only accept -v.
+if $LD -v 2>&1 </dev/null | egrep '(GNU|with BFD)' 1>&5; then
+ lt_cv_prog_gnu_ld=yes
+else
+ lt_cv_prog_gnu_ld=no
+fi])
+with_gnu_ld=$lt_cv_prog_gnu_ld
+])
+
+# AC_PROG_LD_RELOAD_FLAG - find reload flag for linker
+# -- PORTME Some linkers may need a different reload flag.
+AC_DEFUN([AC_PROG_LD_RELOAD_FLAG],
+[AC_CACHE_CHECK([for $LD option to reload object files], lt_cv_ld_reload_flag,
+[lt_cv_ld_reload_flag='-r'])
+reload_flag=$lt_cv_ld_reload_flag
+test -n "$reload_flag" && reload_flag=" $reload_flag"
+])
+
+# AC_DEPLIBS_CHECK_METHOD - how to check for library dependencies
+# -- PORTME fill in with the dynamic library characteristics
+AC_DEFUN([AC_DEPLIBS_CHECK_METHOD],
+[AC_CACHE_CHECK([how to recognise dependant libraries],
+lt_cv_deplibs_check_method,
+[lt_cv_file_magic_cmd='$MAGIC_CMD'
+lt_cv_file_magic_test_file=
+lt_cv_deplibs_check_method='unknown'
+# Need to set the preceding variable on all platforms that support
+# interlibrary dependencies.
+# 'none' -- dependencies not supported.
+# `unknown' -- same as none, but documents that we really don't know.
+# 'pass_all' -- all dependencies passed with no checks.
+# 'test_compile' -- check by making test program.
+# 'file_magic [[regex]]' -- check by looking for files in library path
+# which responds to the $file_magic_cmd with a given egrep regex.
+# If you have `file' or equivalent on your system and you're not sure
+# whether `pass_all' will *always* work, you probably want this one.
+
+case $host_os in
+aix4* | aix5*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+beos*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+bsdi4*)
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib)'
+ lt_cv_file_magic_cmd='/usr/bin/file -L'
+ lt_cv_file_magic_test_file=/shlib/libc.so
+ ;;
+
+cygwin* | mingw* | pw32*)
+ lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?'
+ lt_cv_file_magic_cmd='$OBJDUMP -f'
+ ;;
+
+darwin* | rhapsody*)
+ lt_cv_deplibs_check_method='file_magic Mach-O dynamically linked shared library'
+ lt_cv_file_magic_cmd='/usr/bin/file -L'
+ case "$host_os" in
+ rhapsody* | darwin1.[[012]])
+ lt_cv_file_magic_test_file=`echo /System/Library/Frameworks/System.framework/Versions/*/System | head -1`
+ ;;
+ *) # Darwin 1.3 on
+ lt_cv_file_magic_test_file='/usr/lib/libSystem.dylib'
+ ;;
+ esac
+ ;;
+
+freebsd*)
+ if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then
+ case $host_cpu in
+ i*86 )
+ # Not sure whether the presence of OpenBSD here was a mistake.
+ # Let's accept both of them until this is cleared up.
+ lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD)/i[[3-9]]86 (compact )?demand paged shared library'
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*`
+ ;;
+ esac
+ else
+ lt_cv_deplibs_check_method=pass_all
+ fi
+ ;;
+
+gnu*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+hpux10.20*|hpux11*)
+ lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]].[[0-9]]) shared library'
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=/usr/lib/libc.sl
+ ;;
+
+irix5* | irix6*)
+ case $host_os in
+ irix5*)
+ # this will be overridden with pass_all, but let us keep it just in case
+ lt_cv_deplibs_check_method="file_magic ELF 32-bit MSB dynamic lib MIPS - version 1"
+ ;;
+ *)
+ case $LD in
+ *-32|*"-32 ") libmagic=32-bit;;
+ *-n32|*"-n32 ") libmagic=N32;;
+ *-64|*"-64 ") libmagic=64-bit;;
+ *) libmagic=never-match;;
+ esac
+ # this will be overridden with pass_all, but let us keep it just in case
+ lt_cv_deplibs_check_method="file_magic ELF ${libmagic} MSB mips-[[1234]] dynamic lib MIPS - version 1"
+ ;;
+ esac
+ lt_cv_file_magic_test_file=`echo /lib${libsuff}/libc.so*`
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+# This must be Linux ELF.
+linux-gnu*)
+ case $host_cpu in
+ alpha* | hppa* | i*86 | powerpc* | sparc* | ia64* | *)
+ lt_cv_deplibs_check_method=pass_all ;;
+ *)
+ # glibc up to 2.1.1 does not perform some relocations on ARM
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' ;;
+ esac
+ lt_cv_file_magic_test_file=`echo /lib/libc.so* /lib/libc-*.so`
+ ;;
+
+netbsd*)
+ if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then
+ lt_cv_deplibs_check_method='match_pattern /lib[[^/\.]]+\.so\.[[0-9]]+\.[[0-9]]+$'
+ else
+ lt_cv_deplibs_check_method='match_pattern /lib[[^/\.]]+\.so$'
+ fi
+ ;;
+
+newos6*)
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)'
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=/usr/lib/libnls.so
+ ;;
+
+nto-qnx)
+ lt_cv_deplibs_check_method=unknown
+ ;;
+
+openbsd*)
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*`
+ if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB shared object'
+ else
+ lt_cv_deplibs_check_method='file_magic OpenBSD.* shared library'
+ fi
+ ;;
+
+osf3* | osf4* | osf5*)
+ # this will be overridden with pass_all, but let us keep it just in case
+ lt_cv_deplibs_check_method='file_magic COFF format alpha shared library'
+ lt_cv_file_magic_test_file=/shlib/libc.so
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+sco3.2v5*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+solaris*)
+ lt_cv_deplibs_check_method=pass_all
+ lt_cv_file_magic_test_file=/lib/libc.so
+ ;;
+
+sysv5uw[[78]]* | sysv4*uw2*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+ case $host_vendor in
+ motorola)
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]'
+ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*`
+ ;;
+ ncr)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+ sequent)
+ lt_cv_file_magic_cmd='/bin/file'
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )'
+ ;;
+ sni)
+ lt_cv_file_magic_cmd='/bin/file'
+ lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib"
+ lt_cv_file_magic_test_file=/lib/libc.so
+ ;;
+ esac
+ ;;
+esac
+])
+file_magic_cmd=$lt_cv_file_magic_cmd
+deplibs_check_method=$lt_cv_deplibs_check_method
+])
+
+
+# AC_PROG_NM - find the path to a BSD-compatible name lister
+AC_DEFUN([AC_PROG_NM],
+[AC_REQUIRE([_LT_AC_LIBTOOL_SYS_PATH_SEPARATOR])dnl
+AC_MSG_CHECKING([for BSD-compatible nm])
+AC_CACHE_VAL(lt_cv_path_NM,
+[if test -n "$NM"; then
+ # Let the user override the test.
+ lt_cv_path_NM="$NM"
+else
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for ac_dir in $PATH /usr/ccs/bin /usr/ucb /bin; do
+ test -z "$ac_dir" && ac_dir=.
+ tmp_nm=$ac_dir/${ac_tool_prefix}nm
+ if test -f $tmp_nm || test -f $tmp_nm$ac_exeext ; then
+ # Check to see if the nm accepts a BSD-compat flag.
+ # Adding the `sed 1q' prevents false positives on HP-UX, which says:
+ # nm: unknown option "B" ignored
+ # Tru64's nm complains that /dev/null is an invalid object file
+ if ($tmp_nm -B /dev/null 2>&1 | sed '1q'; exit 0) | egrep '(/dev/null|Invalid file or object type)' >/dev/null; then
+ lt_cv_path_NM="$tmp_nm -B"
+ break
+ elif ($tmp_nm -p /dev/null 2>&1 | sed '1q'; exit 0) | egrep /dev/null >/dev/null; then
+ lt_cv_path_NM="$tmp_nm -p"
+ break
+ else
+ lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but
+ continue # so that we can try to find one that supports BSD flags
+ fi
+ fi
+ done
+ IFS="$ac_save_ifs"
+ test -z "$lt_cv_path_NM" && lt_cv_path_NM=nm
+fi])
+NM="$lt_cv_path_NM"
+AC_MSG_RESULT([$NM])
+])
+
+# AC_CHECK_LIBM - check for math library
+AC_DEFUN([AC_CHECK_LIBM],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+LIBM=
+case $host in
+*-*-beos* | *-*-cygwin* | *-*-pw32*)
+ # These system don't have libm
+ ;;
+*-ncr-sysv4.3*)
+ AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM="-lmw")
+ AC_CHECK_LIB(m, main, LIBM="$LIBM -lm")
+ ;;
+*)
+ AC_CHECK_LIB(m, main, LIBM="-lm")
+ ;;
+esac
+])
+
+# AC_LIBLTDL_CONVENIENCE[(dir)] - sets LIBLTDL to the link flags for
+# the libltdl convenience library and INCLTDL to the include flags for
+# the libltdl header and adds --enable-ltdl-convenience to the
+# configure arguments. Note that LIBLTDL and INCLTDL are not
+# AC_SUBSTed, nor is AC_CONFIG_SUBDIRS called. If DIR is not
+# provided, it is assumed to be `libltdl'. LIBLTDL will be prefixed
+# with '${top_builddir}/' and INCLTDL will be prefixed with
+# '${top_srcdir}/' (note the single quotes!). If your package is not
+# flat and you're not using automake, define top_builddir and
+# top_srcdir appropriately in the Makefiles.
+AC_DEFUN([AC_LIBLTDL_CONVENIENCE],
+[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
+ case $enable_ltdl_convenience in
+ no) AC_MSG_ERROR([this package needs a convenience libltdl]) ;;
+ "") enable_ltdl_convenience=yes
+ ac_configure_args="$ac_configure_args --enable-ltdl-convenience" ;;
+ esac
+ LIBLTDL='${top_builddir}/'ifelse($#,1,[$1],['libltdl'])/libltdlc.la
+ INCLTDL='-I${top_srcdir}/'ifelse($#,1,[$1],['libltdl'])
+])
+
+# AC_LIBLTDL_INSTALLABLE[(dir)] - sets LIBLTDL to the link flags for
+# the libltdl installable library and INCLTDL to the include flags for
+# the libltdl header and adds --enable-ltdl-install to the configure
+# arguments. Note that LIBLTDL and INCLTDL are not AC_SUBSTed, nor is
+# AC_CONFIG_SUBDIRS called. If DIR is not provided and an installed
+# libltdl is not found, it is assumed to be `libltdl'. LIBLTDL will
+# be prefixed with '${top_builddir}/' and INCLTDL will be prefixed
+# with '${top_srcdir}/' (note the single quotes!). If your package is
+# not flat and you're not using automake, define top_builddir and
+# top_srcdir appropriately in the Makefiles.
+# In the future, this macro may have to be called after AC_PROG_LIBTOOL.
+AC_DEFUN([AC_LIBLTDL_INSTALLABLE],
+[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
+ AC_CHECK_LIB(ltdl, main,
+ [test x"$enable_ltdl_install" != xyes && enable_ltdl_install=no],
+ [if test x"$enable_ltdl_install" = xno; then
+ AC_MSG_WARN([libltdl not installed, but installation disabled])
+ else
+ enable_ltdl_install=yes
+ fi
+ ])
+ if test x"$enable_ltdl_install" = x"yes"; then
+ ac_configure_args="$ac_configure_args --enable-ltdl-install"
+ LIBLTDL='${top_builddir}/'ifelse($#,1,[$1],['libltdl'])/libltdl.la
+ INCLTDL='-I${top_srcdir}/'ifelse($#,1,[$1],['libltdl'])
+ else
+ ac_configure_args="$ac_configure_args --enable-ltdl-install=no"
+ LIBLTDL="-lltdl"
+ INCLTDL=
+ fi
+])
+
+# old names
+AC_DEFUN([AM_PROG_LIBTOOL], [AC_PROG_LIBTOOL])
+AC_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)])
+AC_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)])
+AC_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)])
+AC_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)])
+AC_DEFUN([AM_PROG_LD], [AC_PROG_LD])
+AC_DEFUN([AM_PROG_NM], [AC_PROG_NM])
+
+# This is just to silence aclocal about the macro not being used
+ifelse([AC_DISABLE_FAST_INSTALL])
diff --git a/libdb/dist/aclocal/mutex.ac b/libdb/dist/aclocal/mutex.ac
new file mode 100644
index 0000000..1682e08
--- /dev/null
+++ b/libdb/dist/aclocal/mutex.ac
@@ -0,0 +1,719 @@
+# $Id$
+
+# POSIX pthreads tests: inter-process safe and intra-process only.
+#
+# We need to run a test here, because the PTHREAD_PROCESS_SHARED flag compiles
+# fine on problematic systems, but won't actually work. This is a problem for
+# cross-compilation environments. I think inter-process mutexes are as likely
+# to fail in cross-compilation environments as real ones (especially since the
+# likely cross-compilation environment is Linux, where inter-process mutexes
+# don't currently work -- the latest estimate I've heard is Q1 2002, as part
+# of IBM's NGPT package). So:
+#
+# If checking for inter-process pthreads mutexes:
+# If it's local, run a test.
+# If it's a cross-compilation, fail.
+#
+# If the user specified pthreads mutexes and we're checking for intra-process
+# mutexes only:
+# If it's local, run a test.
+# If it's a cross-compilation, run a link-test.
+#
+# So, the thing you can't do here is configure for inter-process POSIX pthread
+# mutexes when cross-compiling. Since we're using the GNU/Cygnus toolchain for
+# cross-compilation, the target system is likely Linux or *BSD, so we're doing
+# the right thing.
+AC_DEFUN(AM_PTHREADS_SHARED, [
+AC_TRY_RUN([
+#include <pthread.h>
+main() {
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+ pthread_condattr_t condattr;
+ pthread_mutexattr_t mutexattr;
+ exit (
+ pthread_condattr_init(&condattr) ||
+ pthread_condattr_setpshared(&condattr, PTHREAD_PROCESS_SHARED) ||
+ pthread_mutexattr_init(&mutexattr) ||
+ pthread_mutexattr_setpshared(&mutexattr, PTHREAD_PROCESS_SHARED) ||
+ pthread_cond_init(&cond, &condattr) ||
+ pthread_mutex_init(&mutex, &mutexattr) ||
+ pthread_mutex_lock(&mutex) ||
+ pthread_mutex_unlock(&mutex) ||
+ pthread_mutex_destroy(&mutex) ||
+ pthread_cond_destroy(&cond) ||
+ pthread_condattr_destroy(&condattr) ||
+ pthread_mutexattr_destroy(&mutexattr));
+}], [db_cv_mutex="$1"],, [db_cv_mutex="no"])])
+AC_DEFUN(AM_PTHREADS_PRIVATE, [
+AC_TRY_RUN([
+#include <pthread.h>
+main() {
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+ pthread_condattr_t condattr;
+ pthread_mutexattr_t mutexattr;
+ exit (
+ pthread_condattr_init(&condattr) ||
+ pthread_mutexattr_init(&mutexattr) ||
+ pthread_cond_init(&cond, &condattr) ||
+ pthread_mutex_init(&mutex, &mutexattr) ||
+ pthread_mutex_lock(&mutex) ||
+ pthread_mutex_unlock(&mutex) ||
+ pthread_mutex_destroy(&mutex) ||
+ pthread_cond_destroy(&cond) ||
+ pthread_condattr_destroy(&condattr) ||
+ pthread_mutexattr_destroy(&mutexattr));
+}], [db_cv_mutex="$1"],,
+AC_TRY_LINK([
+#include <pthread.h>],[
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+ pthread_condattr_t condattr;
+ pthread_mutexattr_t mutexattr;
+ exit (
+ pthread_condattr_init(&condattr) ||
+ pthread_mutexattr_init(&mutexattr) ||
+ pthread_cond_init(&cond, &condattr) ||
+ pthread_mutex_init(&mutex, &mutexattr) ||
+ pthread_mutex_lock(&mutex) ||
+ pthread_mutex_unlock(&mutex) ||
+ pthread_mutex_destroy(&mutex) ||
+ pthread_cond_destroy(&cond) ||
+ pthread_condattr_destroy(&condattr) ||
+ pthread_mutexattr_destroy(&mutexattr));
+], [db_cv_mutex="$1"]))])
+
+# Figure out mutexes for this compiler/architecture.
+AC_DEFUN(AM_DEFINE_MUTEXES, [
+
+# Mutexes we don't test for, but want the #defines to exist for
+# other ports.
+AH_TEMPLATE(HAVE_MUTEX_VMS, [Define to 1 to use VMS mutexes.])
+AH_TEMPLATE(HAVE_MUTEX_VXWORKS, [Define to 1 to use VxWorks mutexes.])
+AH_TEMPLATE(HAVE_MUTEX_WIN32, [Define to 1 to use Windows mutexes.])
+
+AC_CACHE_CHECK([for mutexes], db_cv_mutex, [
+db_cv_mutex=no
+
+orig_libs=$LIBS
+
+# User-specified POSIX or UI mutexes.
+#
+# There are two different reasons to specify mutexes: First, the application
+# is already using one type of mutex and doesn't want to mix-and-match (for
+# example, on Solaris, which has POSIX, UI and LWP mutexes). Second, the
+# applications POSIX pthreads mutexes don't support inter-process locking,
+# but the application wants to use them anyway (for example, current Linux
+# and *BSD systems).
+#
+# If we're on Solaris, we insist that -lthread or -lpthread be used. The
+# problem is the Solaris C library has UI/POSIX interface stubs, but they're
+# broken, configuring them for inter-process mutexes doesn't return an error,
+# but it doesn't work either. Otherwise, we try first without the library
+# and then with it: there's some information that SCO/UnixWare/OpenUNIX needs
+# this. [#4950]
+#
+# Test for LWP threads before testing for UI/POSIX threads, we prefer them
+# on Solaris. There's a bug in SunOS 5.7 where applications get pwrite, not
+# pwrite64, if they load the C library before the appropriate threads library,
+# e.g., tclsh using dlopen to load the DB library. By using LWP threads we
+# avoid answering lots of user questions, not to mention the bugs.
+if test "$db_cv_posixmutexes" = yes; then
+ case "$host_os" in
+ solaris*)
+ db_cv_mutex="posix_library_only";;
+ *)
+ db_cv_mutex="posix_only";;
+ esac
+fi
+
+if test "$db_cv_uimutexes" = yes; then
+ case "$host_os" in
+ solaris*)
+ db_cv_mutex="ui_library_only";;
+ *)
+ db_cv_mutex="ui_only";;
+ esac
+fi
+
+# LWP threads: _lwp_XXX
+if test "$db_cv_mutex" = no; then
+AC_TRY_LINK([
+#include <synch.h>],[
+ static lwp_mutex_t mi = SHAREDMUTEX;
+ static lwp_cond_t ci = SHAREDCV;
+ lwp_mutex_t mutex = mi;
+ lwp_cond_t cond = ci;
+ exit (
+ _lwp_mutex_lock(&mutex) ||
+ _lwp_mutex_unlock(&mutex));
+], [db_cv_mutex="Solaris/lwp"])
+fi
+
+# UI threads: thr_XXX
+#
+# Try with and without the -lthread library.
+if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "ui_only"; then
+AC_TRY_LINK([
+#include <thread.h>
+#include <synch.h>],[
+ mutex_t mutex;
+ cond_t cond;
+ int type = USYNC_PROCESS;
+ exit (
+ mutex_init(&mutex, type, NULL) ||
+ cond_init(&cond, type, NULL) ||
+ mutex_lock(&mutex) ||
+ mutex_unlock(&mutex));
+], [db_cv_mutex="UI/threads"])
+fi
+if test "$db_cv_mutex" = no -o \
+ "$db_cv_mutex" = "ui_only" -o "$db_cv_mutex" = "ui_library_only"; then
+LIBS="$LIBS -lthread"
+AC_TRY_LINK([
+#include <thread.h>
+#include <synch.h>],[
+ mutex_t mutex;
+ cond_t cond;
+ int type = USYNC_PROCESS;
+ exit (
+ mutex_init(&mutex, type, NULL) ||
+ cond_init(&cond, type, NULL) ||
+ mutex_lock(&mutex) ||
+ mutex_unlock(&mutex));
+], [db_cv_mutex="UI/threads/library"])
+LIBS="$orig_libs"
+fi
+if test "$db_cv_mutex" = "ui_only" -o "$db_cv_mutex" = "ui_library_only"; then
+ AC_MSG_ERROR([unable to find UI mutex interfaces])
+fi
+
+# POSIX.1 pthreads: pthread_XXX
+#
+# Try with and without the -lpthread library. If the user specified we use
+# POSIX pthreads mutexes, and we fail to find the full interface, try and
+# configure for just intra-process support.
+if test "$db_cv_pthreadsmutexes" = yes; then
+ if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "posix_only"; then
+ AM_PTHREADS_SHARED("POSIX/pthreads")
+ fi
+ if test "$db_cv_mutex" = no -o \
+ "$db_cv_mutex" = "posix_only" -o \
+ "$db_cv_mutex" = "posix_library_only"; then
+ LIBS="$LIBS -lpthread"
+ AM_PTHREADS_SHARED("POSIX/pthreads/library")
+ LIBS="$orig_libs"
+ fi
+ if test "$db_cv_mutex" = "posix_only"; then
+ AM_PTHREADS_PRIVATE("POSIX/pthreads/private")
+ fi
+ if test "$db_cv_mutex" = "posix_only" -o \
+ "$db_cv_mutex" = "posix_library_only"; then
+ LIBS="$LIBS -lpthread"
+ AM_PTHREADS_PRIVATE("POSIX/pthreads/library/private")
+ LIBS="$orig_libs"
+ fi
+
+ if test "$db_cv_mutex" = "posix_only" -o \
+ "$db_cv_mutex" = "posix_library_only"; then
+ AC_MSG_ERROR([unable to find POSIX 1003.1 mutex interfaces])
+ fi
+fi
+
+# msemaphore: HPPA only
+# Try HPPA before general msem test, it needs special alignment.
+if test "$db_cv_mutex" = no; then
+AC_TRY_LINK([
+#include <sys/mman.h>],[
+#if defined(__hppa__)
+ typedef msemaphore tsl_t;
+ msemaphore x;
+ msem_init(&x, 0);
+ msem_lock(&x, 0);
+ msem_unlock(&x, 0);
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="HP/msem_init"])
+fi
+
+# msemaphore: AIX, OSF/1
+if test "$db_cv_mutex" = no; then
+AC_TRY_LINK([
+#include <sys/types.h>
+#include <sys/mman.h>],[
+ typedef msemaphore tsl_t;
+ msemaphore x;
+ msem_init(&x, 0);
+ msem_lock(&x, 0);
+ msem_unlock(&x, 0);
+ exit(0);
+], [db_cv_mutex="UNIX/msem_init"])
+fi
+
+# ReliantUNIX
+if test "$db_cv_mutex" = no; then
+LIBS="$LIBS -lmproc"
+AC_TRY_LINK([
+#include <ulocks.h>],[
+ typedef spinlock_t tsl_t;
+ spinlock_t x;
+ initspin(&x, 1);
+ cspinlock(&x);
+ spinunlock(&x);
+], [db_cv_mutex="ReliantUNIX/initspin"])
+LIBS="$orig_libs"
+fi
+
+# SCO: UnixWare has threads in libthread, but OpenServer doesn't.
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if defined(__USLC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="SCO/x86/cc-assembly"])
+fi
+
+# abilock_t: SGI
+if test "$db_cv_mutex" = no; then
+AC_TRY_LINK([
+#include <abi_mutex.h>],[
+ typedef abilock_t tsl_t;
+ abilock_t x;
+ init_lock(&x);
+ acquire_lock(&x);
+ release_lock(&x);
+], [db_cv_mutex="SGI/init_lock"])
+fi
+
+# sema_t: Solaris
+# The sema_XXX calls do not work on Solaris 5.5. I see no reason to ever
+# turn this test on, unless we find some other platform that uses the old
+# POSIX.1 interfaces. (I plan to move directly to pthreads on Solaris.)
+if test "$db_cv_mutex" = DOESNT_WORK; then
+AC_TRY_LINK([
+#include <synch.h>],[
+ typedef sema_t tsl_t;
+ sema_t x;
+ sema_init(&x, 1, USYNC_PROCESS, NULL);
+ sema_wait(&x);
+ sema_post(&x);
+], [db_cv_mutex="UNIX/sema_init"])
+fi
+
+# _lock_try/_lock_clear: Solaris
+# On Solaris systems without Pthread or UI mutex interfaces, DB uses the
+# undocumented _lock_try _lock_clear function calls instead of either the
+# sema_trywait(3T) or sema_wait(3T) function calls. This is because of
+# problems in those interfaces in some releases of the Solaris C library.
+if test "$db_cv_mutex" = no; then
+AC_TRY_LINK([
+#include <sys/machlock.h>],[
+ typedef lock_t tsl_t;
+ lock_t x;
+ _lock_try(&x);
+ _lock_clear(&x);
+], [db_cv_mutex="Solaris/_lock_try"])
+fi
+
+# _check_lock/_clear_lock: AIX
+if test "$db_cv_mutex" = no; then
+AC_TRY_LINK([
+#include <sys/atomic_op.h>],[
+ int x;
+ _check_lock(&x,0,1);
+ _clear_lock(&x,0);
+], [db_cv_mutex="AIX/_check_lock"])
+fi
+
+# Alpha/gcc: OSF/1
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if defined(__alpha) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="ALPHA/gcc-assembly"])
+fi
+
+# ARM/gcc: Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if defined(__arm__) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="ARM/gcc-assembly"])
+fi
+
+# MIPS/gcc: Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if (defined(__mips) || defined(__mips__)) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="MIPS/gcc-assembly"])
+fi
+
+# PaRisc/gcc: HP/UX
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if (defined(__hppa) || defined(__hppa__)) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="HPPA/gcc-assembly"])
+fi
+
+# PPC/gcc:
+# Test for Apple first, it requires slightly different assembly.
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if (defined(__powerpc__) || defined(__ppc__)) && defined(__GNUC__) && defined(__APPLE__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="PPC_APPLE/gcc-assembly"])
+fi
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if (defined(__powerpc__) || defined(__ppc__)) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="PPC_GENERIC/gcc-assembly"])
+fi
+
+# Sparc/gcc: SunOS, Solaris
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if defined(__sparc__) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="Sparc/gcc-assembly"])
+fi
+
+# 68K/gcc: SunOS
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if (defined(mc68020) || defined(sun3)) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="68K/gcc-assembly"])
+fi
+
+# x86/gcc: FreeBSD, NetBSD, BSD/OS, Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if (defined(i386) || defined(__i386__)) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="x86/gcc-assembly"])
+fi
+
+# S390/gcc: Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if defined(__s390__) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="S390/gcc-assembly"])
+fi
+
+# AMD64/gcc: FreeBSD, NetBSD, BSD/OS, Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if (defined(x86_64) || defined(__x86_64__)) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="x86_64/gcc-assembly"])
+fi
+
+# x86-64/gcc: FreeBSD, NetBSD, BSD/OS, Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_RUN([main(){
+#if defined(__x86_64__)
+#if defined(__GNUC__)
+ exit(0);
+#endif
+#endif
+ exit(1);
+}], [db_cv_mutex="x86_64/gcc-assembly"])
+fi
+
+# ia86/gcc: Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if defined(__ia64) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="ia64/gcc-assembly"])
+fi
+
+dnl alphalinux/gcc: Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_RUN([main(){
+#if defined(__alpha__)
+#if defined(__linux__)
+ exit(0);
+#endif
+#endif
+ exit(1);
+}], [db_cv_mutex="alphalinux/gcc-assembly"])
+fi
+
+dnl sparc32linux/gcc: Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_RUN([main(){
+#if defined(__sparc__) && !defined(__arch64__)
+#if defined(__linux__)
+ exit(0);
+#endif
+#endif
+ exit(1);
+}], [db_cv_mutex="sparc32linux/gcc-assembly"])
+fi
+
+dnl sparc64linux/gcc: Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_RUN([main(){
+#if defined(__sparc__) && defined(__arch64__)
+#if defined(__linux__)
+ exit(0);
+#endif
+#endif
+ exit(1);
+}], [db_cv_mutex="sparc64linux/gcc-assembly"])
+fi
+
+dnl s390linux/gcc: Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_RUN([main(){
+#if defined(__linux__) && defined(__s390__)
+ exit(0);
+#endif
+ exit(1);
+}], [db_cv_mutex="s390linux/gcc-assembly"])
+fi
+
+
+# uts/cc: UTS
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if defined(_UTS)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="UTS/cc-assembly"])
+fi
+
+# default to UNIX fcntl system call mutexes.
+if test "$db_cv_mutex" = no; then
+ db_cv_mutex="UNIX/fcntl"
+fi
+])
+
+case "$db_cv_mutex" in
+68K/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_68K_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_68K_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and 68K assembly language mutexes.]);;
+AIX/_check_lock) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_AIX_CHECK_LOCK)
+ AH_TEMPLATE(HAVE_MUTEX_AIX_CHECK_LOCK,
+ [Define to 1 to use the AIX _check_lock mutexes.]);;
+ALPHA/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_ALPHA_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_ALPHA_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and Alpha assembly language mutexes.]);;
+ARM/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_ARM_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_ARM_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and ARM assembly language mutexes.]);;
+HP/msem_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_HPPA_MSEM_INIT)
+ AH_TEMPLATE(HAVE_MUTEX_HPPA_MSEM_INIT,
+ [Define to 1 to use the msem_XXX mutexes on HP-UX.]);;
+HPPA/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_HPPA_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_HPPA_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and PaRisc assembly language mutexes.]);;
+ia64/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_IA64_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_IA64_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and IA64 assembly language mutexes.]);;
+POSIX/pthreads) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_PTHREADS)
+ AH_TEMPLATE(HAVE_MUTEX_PTHREADS,
+ [Define to 1 to use POSIX 1003.1 pthread_XXX mutexes.]);;
+POSIX/pthreads/private) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_PTHREADS)
+ AH_TEMPLATE(HAVE_MUTEX_PTHREADS,
+ [Define to 1 to use POSIX 1003.1 pthread_XXX mutexes.])
+ AC_DEFINE(HAVE_MUTEX_THREAD_ONLY)
+ AH_TEMPLATE(HAVE_MUTEX_THREAD_ONLY,
+ [Define to 1 to configure mutexes intra-process only.]);;
+POSIX/pthreads/library) LIBS="$LIBS -lpthread"
+ ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_PTHREADS)
+ AH_TEMPLATE(HAVE_MUTEX_PTHREADS,
+ [Define to 1 to use POSIX 1003.1 pthread_XXX mutexes.]);;
+POSIX/pthreads/library/private)
+ LIBS="$LIBS -lpthread"
+ ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_PTHREADS)
+ AH_TEMPLATE(HAVE_MUTEX_PTHREADS,
+ [Define to 1 to use POSIX 1003.1 pthread_XXX mutexes.])
+ AC_DEFINE(HAVE_MUTEX_THREAD_ONLY)
+ AH_TEMPLATE(HAVE_MUTEX_THREAD_ONLY,
+ [Define to 1 to configure mutexes intra-process only.]);;
+PPC_GENERIC/gcc-assembly)
+ ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and generic PowerPC assembly language.]);;
+PPC_APPLE/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and Apple PowerPC assembly language.]);;
+ReliantUNIX/initspin) LIBS="$LIBS -lmproc"
+ ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_RELIANTUNIX_INITSPIN)
+ AH_TEMPLATE(HAVE_MUTEX_RELIANTUNIX_INITSPIN,
+ [Define to 1 to use Reliant UNIX initspin mutexes.]);;
+S390/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_S390_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_S390_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and S/390 assembly language mutexes.]);;
+SCO/x86/cc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SCO_X86_CC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_SCO_X86_CC_ASSEMBLY,
+ [Define to 1 to use the SCO compiler and x86 assembly language mutexes.]);;
+SGI/init_lock) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SGI_INIT_LOCK)
+ AH_TEMPLATE(HAVE_MUTEX_SGI_INIT_LOCK,
+ [Define to 1 to use the SGI XXX_lock mutexes.]);;
+Solaris/_lock_try) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SOLARIS_LOCK_TRY)
+ AH_TEMPLATE(HAVE_MUTEX_SOLARIS_LOCK_TRY,
+ [Define to 1 to use the Solaris _lock_XXX mutexes.]);;
+Solaris/lwp) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SOLARIS_LWP)
+ AH_TEMPLATE(HAVE_MUTEX_SOLARIS_LWP,
+ [Define to 1 to use the Solaris lwp threads mutexes.]);;
+Sparc/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SPARC_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_SPARC_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and Sparc assembly language mutexes.]);;
+UI/threads) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_UI_THREADS)
+ AH_TEMPLATE(HAVE_MUTEX_UI_THREADS,
+ [Define to 1 to use the UNIX International mutexes.]);;
+UI/threads/library) LIBS="$LIBS -lthread"
+ ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_UI_THREADS)
+ AH_TEMPLATE(HAVE_MUTEX_UI_THREADS,
+ [Define to 1 to use the UNIX International mutexes.]);;
+UNIX/msem_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_MSEM_INIT)
+ AH_TEMPLATE(HAVE_MUTEX_MSEM_INIT,
+ [Define to 1 to use the msem_XXX mutexes on systems other than HP-UX.]);;
+UNIX/sema_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SEMA_INIT)
+ AH_TEMPLATE(HAVE_MUTEX_SEMA_INIT,
+ [Define to 1 to use the obsolete POSIX 1003.1 sema_XXX mutexes.]);;
+UTS/cc-assembly) ADDITIONAL_OBJS="$ADDITIONAL_OBJS uts4.cc${o}"
+ AC_DEFINE(HAVE_MUTEX_UTS_CC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_UTS_CC_ASSEMBLY,
+ [Define to 1 to use the UTS compiler and assembly language mutexes.]);;
+MIPS/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_MIPS_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_MIPS_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and Mips assembly language mutexes.]);;
+x86/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_X86_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_X86_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and x86 assembly language mutexes.]);;
+x86_64/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_X86_64_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_X86_64_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and amd64 assembly language mutexes.]);;
+alphalinux/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_ALPHA_LINUX_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_ALPHA_LINUX_ASSEMBLY,
+ [Define to use the GCC compiler and alpha assembly language mutexes.]);;
+sparc32linux/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SPARC32_LINUX_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_SPARC32_LINUX_ASSEMBLY,
+ [Define to use the GCC compiler and sparc64 assembly language mutexes.]);;
+sparc64linux/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SPARC64_LINUX_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_SPARC64_LINUX_ASSEMBLY,
+ [Define to use the GCC compiler and sparc64 assembly language mutexes.]);;
+s390linux/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_S390_LINUX_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_S390_LINUX_ASSEMBLY,
+ [Define to use the GCC compiler and s390 assembly language mutexes.]);;
+UNIX/fcntl) AC_MSG_WARN(
+ [NO FAST MUTEXES FOUND FOR THIS COMPILER/ARCHITECTURE.])
+ ADDITIONAL_OBJS="mut_fcntl${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_FCNTL)
+ AH_TEMPLATE(HAVE_MUTEX_FCNTL,
+ [Define to 1 to use the UNIX fcntl system call mutexes.]);;
+*) AC_MSG_ERROR([Unknown mutex interface: $db_cv_mutex]);;
+esac
+
+if test "$db_cv_mutex" != "UNIX/fcntl"; then
+ AC_DEFINE(HAVE_MUTEX_THREADS)
+ AH_TEMPLATE(HAVE_MUTEX_THREADS,
+ [Define to 1 if fast mutexes are available.])
+fi
+
+# There are 3 classes of mutexes:
+#
+# 1: Mutexes requiring no cleanup, for example, test-and-set mutexes.
+# 2: Mutexes that must be destroyed, but which don't hold permanent system
+# resources, for example, pthread mutexes on MVS aka OS/390 aka z/OS.
+# 3: Mutexes that must be destroyed, even after the process is gone, for
+# example, pthread mutexes on QNX and binary semaphores on VxWorks.
+#
+# DB cannot currently distinguish between #2 and #3 because DB does not know
+# if the application is running environment recovery as part of startup and
+# does not need to do cleanup, or if the environment is being removed and/or
+# recovered in a loop in the application, and so does need to clean up. If
+# we get it wrong, we're going to call the mutex destroy routine on a random
+# piece of memory, which usually works, but just might drop core. For now,
+# we group #2 and #3 into the HAVE_MUTEX_SYSTEM_RESOURCES define, until we
+# have a better solution or reason to solve this in a general way -- so far,
+# the places we've needed to handle this are few.
+AH_TEMPLATE(HAVE_MUTEX_SYSTEM_RESOURCES,
+ [Define to 1 if mutexes hold system resources.])
+
+case "$host_os$db_cv_mutex" in
+*qnx*POSIX/pthread*|openedition*POSIX/pthread*)
+ AC_DEFINE(HAVE_MUTEX_SYSTEM_RESOURCES);;
+esac])
diff --git a/libdb/dist/aclocal/options.ac b/libdb/dist/aclocal/options.ac
new file mode 100644
index 0000000..0077c1f
--- /dev/null
+++ b/libdb/dist/aclocal/options.ac
@@ -0,0 +1,207 @@
+# $Id$
+
+# Process user-specified options.
+AC_DEFUN(AM_OPTIONS_SET, [
+
+# --enable-bigfile was the configuration option that Berkeley DB used before
+# autoconf 2.50 was released (which had --enable-largefile integrated in).
+AC_ARG_ENABLE(bigfile,
+ [AC_HELP_STRING([--disable-bigfile],
+ [Obsolete; use --disable-largefile instead.])],
+ [AC_MSG_ERROR(
+ [--enable-bigfile no longer supported, use --enable-largefile])])
+
+AC_MSG_CHECKING(if --enable-compat185 option specified)
+AC_ARG_ENABLE(compat185,
+ [AC_HELP_STRING([--enable-compat185],
+ [Build DB 1.85 compatibility API.])],
+ [db_cv_compat185="$enable_compat185"], [db_cv_compat185="no"])
+AC_MSG_RESULT($db_cv_compat185)
+
+AC_MSG_CHECKING(if --enable-cxx option specified)
+AC_ARG_ENABLE(cxx,
+ [AC_HELP_STRING([--enable-cxx],
+ [Build C++ API.])],
+ [db_cv_cxx="$enable_cxx"], [db_cv_cxx="no"])
+AC_MSG_RESULT($db_cv_cxx)
+
+AC_MSG_CHECKING(if --enable-debug option specified)
+AC_ARG_ENABLE(debug,
+ [AC_HELP_STRING([--enable-debug],
+ [Build a debugging version.])],
+ [db_cv_debug="$enable_debug"], [db_cv_debug="no"])
+AC_MSG_RESULT($db_cv_debug)
+
+AC_MSG_CHECKING(if --enable-debug_rop option specified)
+AC_ARG_ENABLE(debug_rop,
+ [AC_HELP_STRING([--enable-debug_rop],
+ [Build a version that logs read operations.])],
+ [db_cv_debug_rop="$enable_debug_rop"], [db_cv_debug_rop="no"])
+AC_MSG_RESULT($db_cv_debug_rop)
+
+AC_MSG_CHECKING(if --enable-debug_wop option specified)
+AC_ARG_ENABLE(debug_wop,
+ [AC_HELP_STRING([--enable-debug_wop],
+ [Build a version that logs write operations.])],
+ [db_cv_debug_wop="$enable_debug_wop"], [db_cv_debug_wop="no"])
+AC_MSG_RESULT($db_cv_debug_wop)
+
+AC_MSG_CHECKING(if --enable-diagnostic option specified)
+AC_ARG_ENABLE(diagnostic,
+ [AC_HELP_STRING([--enable-diagnostic],
+ [Build a version with run-time diagnostics.])],
+ [db_cv_diagnostic="$enable_diagnostic"], [db_cv_diagnostic="no"])
+AC_MSG_RESULT($db_cv_diagnostic)
+
+AC_MSG_CHECKING(if --enable-dump185 option specified)
+AC_ARG_ENABLE(dump185,
+ [AC_HELP_STRING([--enable-dump185],
+ [Build db_dump185(1) to dump 1.85 databases.])],
+ [db_cv_dump185="$enable_dump185"], [db_cv_dump185="no"])
+AC_MSG_RESULT($db_cv_dump185)
+
+AC_MSG_CHECKING(if --enable-java option specified)
+AC_ARG_ENABLE(java,
+ [AC_HELP_STRING([--enable-java],
+ [Build Java API.])],
+ [db_cv_java="$enable_java"], [db_cv_java="no"])
+AC_MSG_RESULT($db_cv_java)
+
+AC_MSG_CHECKING(if --enable-pthreadsmutexes option specified)
+AC_ARG_ENABLE(pthreadsmutexes,
+ AC_HELP_STRING([--enable-pthreadsmutexes],
+ [Use POSIX pthreads mutexes.]),, enableval="no")
+db_cv_pthreadsmutexes="$enableval"
+case "$enableval" in
+ no) AC_MSG_RESULT(yes);;
+yes) AC_MSG_RESULT(no);;
+esac
+
+AC_MSG_CHECKING(if --enable-posixmutexes option specified)
+AC_ARG_ENABLE(posixmutexes,
+ [AC_HELP_STRING([--enable-posixmutexes],
+ [Force use of POSIX standard mutexes.])],
+ [db_cv_posixmutexes="$enable_posixmutexes"], [db_cv_posixmutexes="no"])
+AC_MSG_RESULT($db_cv_posixmutexes)
+
+AC_MSG_CHECKING(if --enable-rpc option specified)
+AC_ARG_ENABLE(rpc,
+ [AC_HELP_STRING([--enable-rpc],
+ [Build RPC client/server.])],
+ [db_cv_rpc="$enable_rpc"], [db_cv_rpc="no"])
+AC_MSG_RESULT($db_cv_rpc)
+
+AC_MSG_CHECKING(if --enable-tcl option specified)
+AC_ARG_ENABLE(tcl,
+ [AC_HELP_STRING([--enable-tcl],
+ [Build Tcl API.])],
+ [db_cv_tcl="$enable_tcl"], [db_cv_tcl="no"])
+AC_MSG_RESULT($db_cv_tcl)
+
+AC_MSG_CHECKING(if --enable-test option specified)
+AC_ARG_ENABLE(test,
+ [AC_HELP_STRING([--enable-test],
+ [Configure to run the test suite.])],
+ [db_cv_test="$enable_test"], [db_cv_test="no"])
+AC_MSG_RESULT($db_cv_test)
+
+AC_MSG_CHECKING(if --enable-uimutexes option specified)
+AC_ARG_ENABLE(uimutexes,
+ [AC_HELP_STRING([--enable-uimutexes],
+ [Force use of Unix International mutexes.])],
+ [db_cv_uimutexes="$enable_uimutexes"], [db_cv_uimutexes="no"])
+AC_MSG_RESULT($db_cv_uimutexes)
+
+AC_MSG_CHECKING(if --enable-umrw option specified)
+AC_ARG_ENABLE(umrw,
+ [AC_HELP_STRING([--enable-umrw],
+ [Mask harmless unitialized memory read/writes.])],
+ [db_cv_umrw="$enable_umrw"], [db_cv_umrw="no"])
+AC_MSG_RESULT($db_cv_umrw)
+
+AC_MSG_CHECKING([if --with-embedix=DIR option specified])
+AC_ARG_WITH(embedix,
+ [AC_HELP_STRING([--with-embedix=DIR],
+ [Embedix install directory location.])],
+ [with_embedix="$withval"], [with_embedix="no"])
+if test "$with_embedix" = "no"; then
+ db_cv_embedix="no"
+ AC_MSG_RESULT($with_embedix)
+else
+ db_cv_embedix="yes"
+ if test "$with_embedix" = "yes"; then
+ db_cv_path_embedix_install="/opt/Embedix"
+ else
+ db_cv_path_embedix_install="$with_embedix"
+ fi
+ AC_MSG_RESULT($db_cv_path_embedix_install)
+fi
+
+AC_MSG_CHECKING(if --with-mutex=MUTEX option specified)
+AC_ARG_WITH(mutex,
+ [AC_HELP_STRING([--with-mutex=MUTEX],
+ [Selection of non-standard mutexes.])],
+ [with_mutex="$withval"], [with_mutex="no"])
+if test "$with_mutex" = "yes"; then
+ AC_MSG_ERROR([--with-mutex requires a mutex name argument])
+fi
+if test "$with_mutex" != "no"; then
+ db_cv_mutex="$with_mutex"
+fi
+AC_MSG_RESULT($with_mutex)
+
+AC_MSG_CHECKING(if --with-rpm=DIR option specified)
+AC_ARG_WITH(rpm,
+ [AC_HELP_STRING([--with-rpm=DIR],
+ [Directory location of RPM archive.])],
+ [with_rpm="$withval"], [with_rpm="no"])
+if test "$with_rpm" = "no"; then
+ db_cv_rpm="no"
+else
+ if test "$with_rpm" = "yes"; then
+ AC_MSG_ERROR([--with-rpm requires a directory argument])
+ fi
+ db_cv_rpm="yes"
+ db_cv_path_rpm_archive="$with_rpm"
+fi
+AC_MSG_RESULT($with_rpm)
+
+AC_MSG_CHECKING([if --with-tcl=DIR option specified])
+AC_ARG_WITH(tcl,
+ [AC_HELP_STRING([--with-tcl=DIR],
+ [Directory location of tclConfig.sh.])],
+ [with_tclconfig="$withval"], [with_tclconfig="no"])
+AC_MSG_RESULT($with_tclconfig)
+if test "$with_tclconfig" != "no"; then
+ db_cv_tcl="yes"
+fi
+
+AC_MSG_CHECKING([if --with-uniquename=NAME option specified])
+AC_ARG_WITH(uniquename,
+ [AC_HELP_STRING([--with-uniquename=NAME],
+ [Build a uniquely named library.])],
+ [with_uniquename="$withval"], [with_uniquename="_eds"])
+if test "$with_uniquename" = "no"; then
+ db_cv_uniquename="no"
+ AC_MSG_RESULT($with_uniquename)
+else
+ db_cv_uniquename="yes"
+ if test "$with_uniquename" != "yes"; then
+ DB_VERSION_UNIQUE_NAME="$with_uniquename"
+ fi
+ AC_MSG_RESULT($DB_VERSION_UNIQUE_NAME)
+fi
+
+# Embedix requires RPM.
+if test "$db_cv_embedix" = "yes"; then
+ if test "$db_cv_rpm" = "no"; then
+ AC_MSG_ERROR([--with-embedix requires --with-rpm])
+ fi
+fi
+
+# Test requires Tcl
+if test "$db_cv_test" = "yes"; then
+ if test "$db_cv_tcl" = "no"; then
+ AC_MSG_ERROR([--enable-test requires --enable-tcl])
+ fi
+fi])
diff --git a/libdb/dist/aclocal/programs.ac b/libdb/dist/aclocal/programs.ac
new file mode 100644
index 0000000..0c030f3
--- /dev/null
+++ b/libdb/dist/aclocal/programs.ac
@@ -0,0 +1,80 @@
+# $Id$
+
+# Check for programs used in building/installation.
+AC_DEFUN(AM_PROGRAMS_SET, [
+
+AC_CHECK_TOOL(db_cv_path_ar, ar, missing_ar)
+if test "$db_cv_path_ar" = missing_ar; then
+ AC_MSG_ERROR([No ar utility found.])
+fi
+
+AC_CHECK_TOOL(db_cv_path_chmod, chmod, missing_chmod)
+if test "$db_cv_path_chmod" = missing_chmod; then
+ AC_MSG_ERROR([No chmod utility found.])
+fi
+
+AC_CHECK_TOOL(db_cv_path_cp, cp, missing_cp)
+if test "$db_cv_path_cp" = missing_cp; then
+ AC_MSG_ERROR([No cp utility found.])
+fi
+
+if test "$db_cv_rpm" = "yes"; then
+ AC_CHECK_TOOL(path_ldconfig, ldconfig, missing_ldconfig)
+ AC_PATH_PROG(db_cv_path_ldconfig, $path_ldconfig, missing_ldconfig)
+ if test "$db_cv_path_ldconfig" != missing_ldconfig; then
+ RPM_POST_INSTALL="%post -p $db_cv_path_ldconfig"
+ RPM_POST_UNINSTALL="%postun -p $db_cv_path_ldconfig"
+ fi
+fi
+
+AC_CHECK_TOOL(db_cv_path_ln, ln, missing_ln)
+if test "$db_cv_path_ln" = missing_ln; then
+ AC_MSG_ERROR([No ln utility found.])
+fi
+
+AC_CHECK_TOOL(db_cv_path_mkdir, mkdir, missing_mkdir)
+if test "$db_cv_path_mkdir" = missing_mkdir; then
+ AC_MSG_ERROR([No mkdir utility found.])
+fi
+
+# We need a complete path for ranlib, because it doesn't exist on some
+# architectures because the ar utility packages the library itself.
+AC_CHECK_TOOL(path_ranlib, ranlib, missing_ranlib)
+AC_PATH_PROG(db_cv_path_ranlib, $path_ranlib, missing_ranlib)
+
+AC_CHECK_TOOL(db_cv_path_rm, rm, missing_rm)
+if test "$db_cv_path_rm" = missing_rm; then
+ AC_MSG_ERROR([No rm utility found.])
+fi
+
+if test "$db_cv_rpm" = "yes"; then
+ AC_CHECK_TOOL(db_cv_path_rpm, rpm, missing_rpm)
+ if test "$db_cv_path_rpm" = missing_rpm; then
+ AC_MSG_ERROR([No rpm utility found.])
+ fi
+fi
+
+# We need a complete path for sh, because some implementations of make
+# get upset if SHELL is set to just the command name.
+AC_CHECK_TOOL(path_sh, sh, missing_sh)
+AC_PATH_PROG(db_cv_path_sh, $path_sh, missing_sh)
+if test "$db_cv_path_sh" = missing_sh; then
+ AC_MSG_ERROR([No sh utility found.])
+fi
+
+# Don't strip the binaries if --enable-debug was specified.
+if test "$db_cv_debug" = yes; then
+ db_cv_path_strip=debug_build_no_strip
+else
+ AC_CHECK_TOOL(path_strip, strip, missing_strip)
+ AC_PATH_PROG(db_cv_path_strip, $path_strip, missing_strip)
+fi
+
+if test "$db_cv_test" = "yes"; then
+ AC_CHECK_TOOL(db_cv_path_kill, kill, missing_kill)
+ if test "$db_cv_path_kill" = missing_kill; then
+ AC_MSG_ERROR([No kill utility found.])
+ fi
+fi
+
+])
diff --git a/libdb/dist/aclocal/sosuffix.ac b/libdb/dist/aclocal/sosuffix.ac
new file mode 100644
index 0000000..d52363c
--- /dev/null
+++ b/libdb/dist/aclocal/sosuffix.ac
@@ -0,0 +1,69 @@
+# $Id$
+# Determine shared object suffixes.
+#
+# Our method is to use the libtool variable $library_names_spec,
+# set by using AC_PROG_LIBTOOL. This variable is a snippet of shell
+# defined in terms of $versuffix, $release, $libname, $module and $jnimodule.
+# We want to eval it and grab the suffix used for shared objects.
+# By setting $module and $jnimodule to yes/no, we obtain the suffixes
+# used to create dlloadable, or java loadable modules.
+# On many (*nix) systems, these all evaluate to .so, but there
+# are some notable exceptions.
+
+# This macro is used internally to discover the suffix for the current
+# settings of $module and $jnimodule. The result is stored in $_SOSUFFIX.
+AC_DEFUN(_SOSUFFIX_INTERNAL, [
+ versuffix=""
+ release=""
+ libname=libfoo
+ eval library_names=\"$library_names_spec\"
+ _SOSUFFIX=`echo "$library_names" | sed -e 's/.*\.\([[a-zA-Z0-9_]]*\).*/\1/'`
+ if test "$_SOSUFFIX" = '' ; then
+ _SOSUFFIX=so
+ if test "$enable_shared" = "yes" && test "$_SOSUFFIX_MESSAGE" = ""; then
+ _SOSUFFIX_MESSAGE=yes
+ AC_MSG_WARN([libtool may not know about this architecture.])
+ AC_MSG_WARN([assuming .$_SUFFIX suffix for dynamic libraries.])
+ fi
+ fi
+])
+
+# SOSUFFIX_CONFIG will set the variable SOSUFFIX to be the
+# shared library extension used for general linking, not dlopen.
+AC_DEFUN(SOSUFFIX_CONFIG, [
+ AC_MSG_CHECKING([SOSUFFIX from libtool])
+ module=no
+ jnimodule=no
+ _SOSUFFIX_INTERNAL
+ SOSUFFIX=$_SOSUFFIX
+ AC_MSG_RESULT($SOSUFFIX)
+ AC_SUBST(SOSUFFIX)
+])
+
+# MODSUFFIX_CONFIG will set the variable MODSUFFIX to be the
+# shared library extension used for dlopen'ed modules.
+# To discover this, we set $module, simulating libtool's -module option.
+AC_DEFUN(MODSUFFIX_CONFIG, [
+ AC_MSG_CHECKING([MODSUFFIX from libtool])
+ module=yes
+ jnimodule=no
+ _SOSUFFIX_INTERNAL
+ MODSUFFIX=$_SOSUFFIX
+ AC_MSG_RESULT($MODSUFFIX)
+ AC_SUBST(MODSUFFIX)
+])
+
+# JMODSUFFIX_CONFIG will set the variable JMODSUFFIX to be the
+# shared library extension used JNI modules opened by Java.
+# To discover this, we set $jnimodule, simulating libtool's -jnimodule option.
+# -jnimodule is currently a Sleepycat local extension to libtool.
+AC_DEFUN(JMODSUFFIX_CONFIG, [
+ AC_MSG_CHECKING([JMODSUFFIX from libtool])
+ module=yes
+ jnimodule=yes
+ _SOSUFFIX_INTERNAL
+ JMODSUFFIX=$_SOSUFFIX
+ AC_MSG_RESULT($JMODSUFFIX)
+ AC_SUBST(JMODSUFFIX)
+])
+
diff --git a/libdb/dist/aclocal/tcl.ac b/libdb/dist/aclocal/tcl.ac
new file mode 100644
index 0000000..3319df3
--- /dev/null
+++ b/libdb/dist/aclocal/tcl.ac
@@ -0,0 +1,136 @@
+# $Id$
+
+# The SC_* macros in this file are from the unix/tcl.m4 files in the Tcl
+# 8.3.0 distribution, with some minor changes. For this reason, license
+# terms for the Berkeley DB distribution dist/aclocal/tcl.m4 file are as
+# follows (copied from the license.terms file in the Tcl 8.3 distribution):
+#
+# This software is copyrighted by the Regents of the University of
+# California, Sun Microsystems, Inc., Scriptics Corporation,
+# and other parties. The following terms apply to all files associated
+# with the software unless explicitly disclaimed in individual files.
+#
+# The authors hereby grant permission to use, copy, modify, distribute,
+# and license this software and its documentation for any purpose, provided
+# that existing copyright notices are retained in all copies and that this
+# notice is included verbatim in any distributions. No written agreement,
+# license, or royalty fee is required for any of the authorized uses.
+# Modifications to this software may be copyrighted by their authors
+# and need not follow the licensing terms described here, provided that
+# the new terms are clearly indicated on the first page of each file where
+# they apply.
+#
+# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
+# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
+# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
+# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
+# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
+# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
+# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
+# MODIFICATIONS.
+#
+# GOVERNMENT USE: If you are acquiring this software on behalf of the
+# U.S. government, the Government shall have only "Restricted Rights"
+# in the software and related documentation as defined in the Federal
+# Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you
+# are acquiring the software on behalf of the Department of Defense, the
+# software shall be classified as "Commercial Computer Software" and the
+# Government shall have only "Restricted Rights" as defined in Clause
+# 252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the
+# authors grant the U.S. Government and others acting in its behalf
+# permission to use and distribute the software in accordance with the
+# terms specified in this license.
+
+AC_DEFUN(SC_PATH_TCLCONFIG, [
+ AC_CACHE_VAL(ac_cv_c_tclconfig,[
+
+ # First check to see if --with-tclconfig was specified.
+ if test "${with_tclconfig}" != no; then
+ if test -f "${with_tclconfig}/tclConfig.sh" ; then
+ ac_cv_c_tclconfig=`(cd ${with_tclconfig}; pwd)`
+ else
+ AC_MSG_ERROR([${with_tclconfig} directory doesn't contain tclConfig.sh])
+ fi
+ fi
+
+ # check in a few common install locations
+ if test x"${ac_cv_c_tclconfig}" = x ; then
+ for i in `ls -d /usr/local/lib 2>/dev/null` ; do
+ if test -f "$i/tclConfig.sh" ; then
+ ac_cv_c_tclconfig=`(cd $i; pwd)`
+ break
+ fi
+ done
+ fi
+
+ ])
+
+ if test x"${ac_cv_c_tclconfig}" = x ; then
+ TCL_BIN_DIR="# no Tcl configs found"
+ AC_MSG_ERROR(can't find Tcl configuration definitions)
+ else
+ TCL_BIN_DIR=${ac_cv_c_tclconfig}
+ fi
+])
+
+AC_DEFUN(SC_LOAD_TCLCONFIG, [
+ AC_MSG_CHECKING([for existence of $TCL_BIN_DIR/tclConfig.sh])
+
+ if test -f "$TCL_BIN_DIR/tclConfig.sh" ; then
+ AC_MSG_RESULT([loading])
+ . $TCL_BIN_DIR/tclConfig.sh
+ else
+ AC_MSG_RESULT([file not found])
+ fi
+
+ #
+ # The eval is required to do the TCL_DBGX substitution in the
+ # TCL_LIB_FILE variable
+ #
+ eval TCL_LIB_FILE="${TCL_LIB_FILE}"
+ eval TCL_LIB_FLAG="${TCL_LIB_FLAG}"
+ eval "TCL_LIB_SPEC=\"${TCL_LIB_SPEC}\""
+
+ #
+ # If the DB Tcl library isn't loaded with the Tcl spec and library
+ # flags on AIX, the resulting libdb_tcl-X.Y.so.0 will drop core at
+ # load time. [#4843] Furthermore, with Tcl 8.3, the link flags
+ # given by the Tcl spec are insufficient for our use. [#5779]
+ #
+ case "$host_os" in
+ aix4.[[2-9]].*)
+ LIBTSO_LIBS="$LIBTSO_LIBS $TCL_LIB_SPEC $TCL_LIB_FLAG"
+ LIBTSO_LIBS="$LIBTSO_LIBS -L$TCL_EXEC_PREFIX/lib -ltcl$TCL_VERSION";;
+ aix*)
+ LIBTSO_LIBS="$LIBTSO_LIBS $TCL_LIB_SPEC $TCL_LIB_FLAG";;
+ esac
+ AC_SUBST(TCL_BIN_DIR)
+ AC_SUBST(TCL_SRC_DIR)
+ AC_SUBST(TCL_LIB_FILE)
+
+ AC_SUBST(TCL_TCLSH)
+ TCL_TCLSH="${TCL_PREFIX}/bin/tclsh${TCL_VERSION}"
+])
+
+# Optional Tcl API.
+AC_DEFUN(AM_TCL_LOAD, [
+if test "$db_cv_tcl" = "yes"; then
+ if test "$enable_shared" = "no"; then
+ AC_MSG_ERROR([Tcl requires shared libraries])
+ fi
+
+ AC_SUBST(TCFLAGS)
+
+ SC_PATH_TCLCONFIG
+ SC_LOAD_TCLCONFIG
+
+ if test x"$TCL_PREFIX" != x && test -f "$TCL_PREFIX/include/tcl.h"; then
+ TCFLAGS="-I$TCL_PREFIX/include"
+ fi
+
+ INSTALL_LIBS="${INSTALL_LIBS} \$(libtso_target)"
+fi])
diff --git a/libdb/dist/aclocal/types.ac b/libdb/dist/aclocal/types.ac
new file mode 100644
index 0000000..086bd08
--- /dev/null
+++ b/libdb/dist/aclocal/types.ac
@@ -0,0 +1,146 @@
+# $Id$
+
+# db.h includes <sys/types.h> and <stdio.h>, not the other default includes
+# autoconf usually includes. For that reason, we specify a set of includes
+# for all type checking tests. [#5060]
+AC_DEFUN(DB_INCLUDES, [[
+#include <sys/types.h>
+#include <stdio.h>]])
+
+# Check the sizes we know about, and see if any of them match what's needed.
+#
+# Prefer ints to anything else, because read, write and others historically
+# returned an int.
+AC_DEFUN(AM_SEARCH_USIZES, [
+ case "$3" in
+ "$ac_cv_sizeof_unsigned_int")
+ $1="typedef unsigned int $2;";;
+ "$ac_cv_sizeof_unsigned_char")
+ $1="typedef unsigned char $2;";;
+ "$ac_cv_sizeof_unsigned_short")
+ $1="typedef unsigned short $2;";;
+ "$ac_cv_sizeof_unsigned_long")
+ $1="typedef unsigned long $2;";;
+ *)
+ AC_MSG_ERROR([No unsigned $3-byte integral type]);;
+ esac])
+AC_DEFUN(AM_SEARCH_SSIZES, [
+ case "$3" in
+ "$ac_cv_sizeof_int")
+ $1="typedef int $2;";;
+ "$ac_cv_sizeof_char")
+ $1="typedef char $2;";;
+ "$ac_cv_sizeof_short")
+ $1="typedef short $2;";;
+ "$ac_cv_sizeof_long")
+ $1="typedef long $2;";;
+ *)
+ AC_MSG_ERROR([No signed $3-byte integral type]);;
+ esac])
+
+# Check for the standard system types.
+AC_DEFUN(AM_TYPES, [
+
+# We need to know the sizes of various objects on this system.
+# We don't use the SIZEOF_XXX values created by autoconf.
+AC_CHECK_SIZEOF(char,, DB_INCLUDES)
+AC_CHECK_SIZEOF(unsigned char,, DB_INCLUDES)
+AC_CHECK_SIZEOF(short,, DB_INCLUDES)
+AC_CHECK_SIZEOF(unsigned short,, DB_INCLUDES)
+AC_CHECK_SIZEOF(int,, DB_INCLUDES)
+AC_CHECK_SIZEOF(unsigned int,, DB_INCLUDES)
+AC_CHECK_SIZEOF(long,, DB_INCLUDES)
+AC_CHECK_SIZEOF(unsigned long,, DB_INCLUDES)
+AC_CHECK_SIZEOF(size_t,, DB_INCLUDES)
+AC_CHECK_SIZEOF(char *,, DB_INCLUDES)
+
+# We require off_t and size_t, and we don't try to substitute our own
+# if we can't find them.
+AC_CHECK_TYPE(off_t,,, DB_INCLUDES)
+if test "$ac_cv_type_off_t" = no; then
+ AC_MSG_ERROR([No off_t type.])
+fi
+
+AC_CHECK_TYPE(size_t,,, DB_INCLUDES)
+if test "$ac_cv_type_size_t" = no; then
+ AC_MSG_ERROR([No size_t type.])
+fi
+
+# We look for u_char, u_short, u_int, u_long -- if we can't find them,
+# we create our own.
+AC_SUBST(u_char_decl)
+AC_CHECK_TYPE(u_char,,, DB_INCLUDES)
+if test "$ac_cv_type_u_char" = no; then
+ u_char_decl="typedef unsigned char u_char;"
+fi
+
+AC_SUBST(u_short_decl)
+AC_CHECK_TYPE(u_short,,, DB_INCLUDES)
+if test "$ac_cv_type_u_short" = no; then
+ u_short_decl="typedef unsigned short u_short;"
+fi
+
+AC_SUBST(u_int_decl)
+AC_CHECK_TYPE(u_int,,, DB_INCLUDES)
+if test "$ac_cv_type_u_int" = no; then
+ u_int_decl="typedef unsigned int u_int;"
+fi
+
+AC_SUBST(u_long_decl)
+AC_CHECK_TYPE(u_long,,, DB_INCLUDES)
+if test "$ac_cv_type_u_long" = no; then
+ u_long_decl="typedef unsigned long u_long;"
+fi
+
+AC_SUBST(u_int8_decl)
+AC_CHECK_TYPE(u_int8_t,,, DB_INCLUDES)
+if test "$ac_cv_type_u_int8_t" = no; then
+ AM_SEARCH_USIZES(u_int8_decl, u_int8_t, 1)
+fi
+
+AC_SUBST(u_int16_decl)
+AC_CHECK_TYPE(u_int16_t,,, DB_INCLUDES)
+if test "$ac_cv_type_u_int16_t" = no; then
+ AM_SEARCH_USIZES(u_int16_decl, u_int16_t, 2)
+fi
+
+AC_SUBST(int16_decl)
+AC_CHECK_TYPE(int16_t,,, DB_INCLUDES)
+if test "$ac_cv_type_int16_t" = no; then
+ AM_SEARCH_SSIZES(int16_decl, int16_t, 2)
+fi
+
+AC_SUBST(u_int32_decl)
+AC_CHECK_TYPE(u_int32_t,,, DB_INCLUDES)
+if test "$ac_cv_type_u_int32_t" = no; then
+ AM_SEARCH_USIZES(u_int32_decl, u_int32_t, 4)
+fi
+
+AC_SUBST(int32_decl)
+AC_CHECK_TYPE(int32_t,,, DB_INCLUDES)
+if test "$ac_cv_type_int32_t" = no; then
+ AM_SEARCH_SSIZES(int32_decl, int32_t, 4)
+fi
+
+# Check for ssize_t -- if none exists, find a signed integral type that's
+# the same size as a size_t.
+AC_SUBST(ssize_t_decl)
+AC_CHECK_TYPE(ssize_t,,, DB_INCLUDES)
+if test "$ac_cv_type_ssize_t" = no; then
+ AM_SEARCH_SSIZES(ssize_t_decl, ssize_t, $ac_cv_sizeof_size_t)
+fi
+
+# Find the largest integral type.
+AC_SUBST(db_align_t_decl)
+AC_CHECK_TYPE(unsigned long long,,, DB_INCLUDES)
+if test "$ac_cv_type_unsigned_long_long" = no; then
+ db_align_t_decl="typedef unsigned long db_align_t;"
+else
+ db_align_t_decl="typedef unsigned long long db_align_t;"
+fi
+
+# Find an integral type which is the same size as a pointer.
+AC_SUBST(db_alignp_t_decl)
+AM_SEARCH_USIZES(db_alignp_t_decl, db_alignp_t, $ac_cv_sizeof_char_p)
+
+])
diff --git a/libdb/dist/aclocal_java/ac_check_class.ac b/libdb/dist/aclocal_java/ac_check_class.ac
new file mode 100644
index 0000000..37c96de
--- /dev/null
+++ b/libdb/dist/aclocal_java/ac_check_class.ac
@@ -0,0 +1,107 @@
+dnl @synopsis AC_CHECK_CLASS
+dnl
+dnl AC_CHECK_CLASS tests the existence of a given Java class, either in
+dnl a jar or in a '.class' file.
+dnl
+dnl *Warning*: its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version $Id$
+dnl
+AC_DEFUN([AC_CHECK_CLASS],[
+AC_REQUIRE([AC_PROG_JAVA])
+ac_var_name=`echo $1 | sed 's/\./_/g'`
+dnl Normaly I'd use a AC_CACHE_CHECK here but since the variable name is
+dnl dynamic I need an extra level of extraction
+AC_MSG_CHECKING([for $1 class])
+AC_CACHE_VAL(ac_cv_class_$ac_var_name, [
+if test x$ac_cv_prog_uudecode_base64 = xyes; then
+dnl /**
+dnl * Test.java: used to test dynamicaly if a class exists.
+dnl */
+dnl public class Test
+dnl {
+dnl
+dnl public static void
+dnl main( String[] argv )
+dnl {
+dnl Class lib;
+dnl if (argv.length < 1)
+dnl {
+dnl System.err.println ("Missing argument");
+dnl System.exit (77);
+dnl }
+dnl try
+dnl {
+dnl lib = Class.forName (argv[0]);
+dnl }
+dnl catch (ClassNotFoundException e)
+dnl {
+dnl System.exit (1);
+dnl }
+dnl lib = null;
+dnl System.exit (0);
+dnl }
+dnl
+dnl }
+cat << \EOF > Test.uue
+begin-base64 644 Test.class
+yv66vgADAC0AKQcAAgEABFRlc3QHAAQBABBqYXZhL2xhbmcvT2JqZWN0AQAE
+bWFpbgEAFihbTGphdmEvbGFuZy9TdHJpbmc7KVYBAARDb2RlAQAPTGluZU51
+bWJlclRhYmxlDAAKAAsBAANlcnIBABVMamF2YS9pby9QcmludFN0cmVhbTsJ
+AA0ACQcADgEAEGphdmEvbGFuZy9TeXN0ZW0IABABABBNaXNzaW5nIGFyZ3Vt
+ZW50DAASABMBAAdwcmludGxuAQAVKExqYXZhL2xhbmcvU3RyaW5nOylWCgAV
+ABEHABYBABNqYXZhL2lvL1ByaW50U3RyZWFtDAAYABkBAARleGl0AQAEKEkp
+VgoADQAXDAAcAB0BAAdmb3JOYW1lAQAlKExqYXZhL2xhbmcvU3RyaW5nOylM
+amF2YS9sYW5nL0NsYXNzOwoAHwAbBwAgAQAPamF2YS9sYW5nL0NsYXNzBwAi
+AQAgamF2YS9sYW5nL0NsYXNzTm90Rm91bmRFeGNlcHRpb24BAAY8aW5pdD4B
+AAMoKVYMACMAJAoAAwAlAQAKU291cmNlRmlsZQEACVRlc3QuamF2YQAhAAEA
+AwAAAAAAAgAJAAUABgABAAcAAABtAAMAAwAAACkqvgSiABCyAAwSD7YAFBBN
+uAAaKgMyuAAeTKcACE0EuAAaAUwDuAAasQABABMAGgAdACEAAQAIAAAAKgAK
+AAAACgAAAAsABgANAA4ADgATABAAEwASAB4AFgAiABgAJAAZACgAGgABACMA
+JAABAAcAAAAhAAEAAQAAAAUqtwAmsQAAAAEACAAAAAoAAgAAAAQABAAEAAEA
+JwAAAAIAKA==
+====
+EOF
+ if uudecode$EXEEXT Test.uue; then
+ :
+ else
+ echo "configure: __oline__: uudecode had trouble decoding base 64 file 'Test.uue'" >&AC_FD_CC
+ echo "configure: failed file was:" >&AC_FD_CC
+ cat Test.uue >&AC_FD_CC
+ ac_cv_prog_uudecode_base64=no
+ fi
+ rm -f Test.uue
+ if AC_TRY_COMMAND($JAVA $JAVAFLAGS Test $1) >/dev/null 2>&1; then
+ eval "ac_cv_class_$ac_var_name=yes"
+ else
+ eval "ac_cv_class_$ac_var_name=no"
+ fi
+ rm -f Test.class
+else
+ AC_TRY_COMPILE_JAVA([$1], , [eval "ac_cv_class_$ac_var_name=yes"],
+ [eval "ac_cv_class_$ac_var_name=no"])
+fi
+eval "ac_var_val=$`eval echo ac_cv_class_$ac_var_name`"
+eval "HAVE_$ac_var_name=$`echo ac_cv_class_$ac_var_val`"
+HAVE_LAST_CLASS=$ac_var_val
+if test x$ac_var_val = xyes; then
+ ifelse([$2], , :, [$2])
+else
+ ifelse([$3], , :, [$3])
+fi
+])
+dnl for some reason the above statment didn't fall though here?
+dnl do scripts have variable scoping?
+eval "ac_var_val=$`eval echo ac_cv_class_$ac_var_name`"
+AC_MSG_RESULT($ac_var_val)
+])
diff --git a/libdb/dist/aclocal_java/ac_check_classpath.ac b/libdb/dist/aclocal_java/ac_check_classpath.ac
new file mode 100644
index 0000000..4dff4bc
--- /dev/null
+++ b/libdb/dist/aclocal_java/ac_check_classpath.ac
@@ -0,0 +1,23 @@
+dnl @synopsis AC_CHECK_CLASSPATH
+dnl
+dnl AC_CHECK_CLASSPATH just displays the CLASSPATH, for the edification
+dnl of the user.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version $Id$
+dnl
+AC_DEFUN([AC_CHECK_CLASSPATH],[
+if test "x$CLASSPATH" = x; then
+ echo "You have no CLASSPATH, I hope it is good"
+else
+ echo "You have CLASSPATH $CLASSPATH, hope it is correct"
+fi
+])
diff --git a/libdb/dist/aclocal_java/ac_check_junit.ac b/libdb/dist/aclocal_java/ac_check_junit.ac
new file mode 100644
index 0000000..efc33af
--- /dev/null
+++ b/libdb/dist/aclocal_java/ac_check_junit.ac
@@ -0,0 +1,54 @@
+dnl @synopsis AC_CHECK_JUNIT
+dnl
+dnl AC_CHECK_JUNIT tests the availability of the Junit testing
+dnl framework, and set some variables for conditional compilation
+dnl of the test suite by automake.
+dnl
+dnl If available, JUNIT is set to a command launching the text
+dnl based user interface of Junit, @JAVA_JUNIT@ is set to $JAVA_JUNIT
+dnl and @TESTS_JUNIT@ is set to $TESTS_JUNIT, otherwise they are set
+dnl to empty values.
+dnl
+dnl You can use these variables in your Makefile.am file like this :
+dnl
+dnl # Some of the following classes are built only if junit is available
+dnl JAVA_JUNIT = Class1Test.java Class2Test.java AllJunitTests.java
+dnl
+dnl noinst_JAVA = Example1.java Example2.java @JAVA_JUNIT@
+dnl
+dnl EXTRA_JAVA = $(JAVA_JUNIT)
+dnl
+dnl TESTS_JUNIT = AllJunitTests
+dnl
+dnl TESTS = StandaloneTest1 StandaloneTest2 @TESTS_JUNIT@
+dnl
+dnl EXTRA_TESTS = $(TESTS_JUNIT)
+dnl
+dnl AllJunitTests :
+dnl echo "#! /bin/sh" > $@
+dnl echo "exec @JUNIT@ my.package.name.AllJunitTests" >> $@
+dnl chmod +x $@
+dnl
+dnl @author Luc Maisonobe
+dnl @version $Id$
+dnl
+AC_DEFUN([AC_CHECK_JUNIT],[
+AC_CACHE_VAL(ac_cv_prog_JUNIT,[
+AC_CHECK_CLASS(junit.textui.TestRunner)
+if test x"`eval 'echo $ac_cv_class_junit_textui_TestRunner'`" != xno ; then
+ ac_cv_prog_JUNIT='$(CLASSPATH_ENV) $(JAVA) $(JAVAFLAGS) junit.textui.TestRunner'
+fi])
+AC_MSG_CHECKING([for junit])
+if test x"`eval 'echo $ac_cv_prog_JUNIT'`" != x ; then
+ JUNIT="$ac_cv_prog_JUNIT"
+ JAVA_JUNIT='$(JAVA_JUNIT)'
+ TESTS_JUNIT='$(TESTS_JUNIT)'
+else
+ JUNIT=
+ JAVA_JUNIT=
+ TESTS_JUNIT=
+fi
+AC_MSG_RESULT($JAVA_JUNIT)
+AC_SUBST(JUNIT)
+AC_SUBST(JAVA_JUNIT)
+AC_SUBST(TESTS_JUNIT)])
diff --git a/libdb/dist/aclocal_java/ac_check_rqrd_class.ac b/libdb/dist/aclocal_java/ac_check_rqrd_class.ac
new file mode 100644
index 0000000..79f378f
--- /dev/null
+++ b/libdb/dist/aclocal_java/ac_check_rqrd_class.ac
@@ -0,0 +1,26 @@
+dnl @synopsis AC_CHECK_RQRD_CLASS
+dnl
+dnl AC_CHECK_RQRD_CLASS tests the existence of a given Java class, either in
+dnl a jar or in a '.class' file and fails if it doesn't exist.
+dnl Its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version $Id$
+dnl
+
+AC_DEFUN([AC_CHECK_RQRD_CLASS],[
+CLASS=`echo $1|sed 's/\./_/g'`
+AC_CHECK_CLASS($1)
+if test "$HAVE_LAST_CLASS" = "no"; then
+ AC_MSG_ERROR([Required class $1 missing, exiting.])
+fi
+])
diff --git a/libdb/dist/aclocal_java/ac_java_options.ac b/libdb/dist/aclocal_java/ac_java_options.ac
new file mode 100644
index 0000000..7d993c6
--- /dev/null
+++ b/libdb/dist/aclocal_java/ac_java_options.ac
@@ -0,0 +1,32 @@
+dnl @synopsis AC_JAVA_OPTIONS
+dnl
+dnl AC_JAVA_OPTIONS adds configure command line options used for Java m4
+dnl macros. This Macro is optional.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Devin Weaver <ktohg@tritarget.com>
+dnl @version $Id$
+dnl
+AC_DEFUN([AC_JAVA_OPTIONS],[
+AC_ARG_WITH(java-prefix,
+ [ --with-java-prefix=PFX prefix where Java runtime is installed (optional)])
+AC_ARG_WITH(javac-flags,
+ [ --with-javac-flags=FLAGS flags to pass to the Java compiler (optional)])
+AC_ARG_WITH(java-flags,
+ [ --with-java-flags=FLAGS flags to pass to the Java VM (optional)])
+JAVAPREFIX=$with_java_prefix
+JAVACFLAGS=$with_javac_flags
+JAVAFLAGS=$with_java_flags
+AC_SUBST(JAVAPREFIX)dnl
+AC_SUBST(JAVACFLAGS)dnl
+AC_SUBST(JAVAFLAGS)dnl
+AC_SUBST(JAVA)dnl
+AC_SUBST(JAVAC)dnl
+])
diff --git a/libdb/dist/aclocal_java/ac_jni_include_dirs.ac b/libdb/dist/aclocal_java/ac_jni_include_dirs.ac
new file mode 100644
index 0000000..bf849ae
--- /dev/null
+++ b/libdb/dist/aclocal_java/ac_jni_include_dirs.ac
@@ -0,0 +1,112 @@
+dnl @synopsis AC_JNI_INCLUDE_DIR
+dnl
+dnl AC_JNI_INCLUDE_DIR finds include directories needed
+dnl for compiling programs using the JNI interface.
+dnl
+dnl JNI include directories are usually in the java distribution
+dnl This is deduced from the value of JAVAC. When this macro
+dnl completes, a list of directories is left in the variable
+dnl JNI_INCLUDE_DIRS.
+dnl
+dnl Example usage follows:
+dnl
+dnl AC_JNI_INCLUDE_DIR
+dnl
+dnl for JNI_INCLUDE_DIR in $JNI_INCLUDE_DIRS
+dnl do
+dnl CPPFLAGS="$CPPFLAGS -I$JNI_INCLUDE_DIR"
+dnl done
+dnl
+dnl If you want to force a specific compiler:
+dnl
+dnl - at the configure.in level, set JAVAC=yourcompiler before calling
+dnl AC_JNI_INCLUDE_DIR
+dnl
+dnl - at the configure level, setenv JAVAC
+dnl
+dnl Note: This macro can work with the autoconf M4 macros for Java programs.
+dnl This particular macro is not part of the original set of macros.
+dnl
+dnl @author Don Anderson <dda@sleepycat.com>
+dnl @version $Id$
+dnl
+AC_DEFUN(AC_JNI_INCLUDE_DIR,[
+
+JNI_INCLUDE_DIRS=""
+
+test "x$JAVAC" = x && AC_MSG_ERROR(['$JAVAC' undefined])
+AC_PATH_PROG(_ACJNI_JAVAC, $JAVAC, $JAVAC)
+test ! -x "$_ACJNI_JAVAC" && AC_MSG_ERROR([$JAVAC could not be found in path])
+AC_MSG_CHECKING(absolute path of $JAVAC)
+case "$_ACJNI_JAVAC" in
+/*) AC_MSG_RESULT($_ACJNI_JAVAC);;
+*) AC_MSG_ERROR([$_ACJNI_JAVAC is not an absolute path name]);;
+esac
+
+_ACJNI_FOLLOW_SYMLINKS("$_ACJNI_JAVAC")
+_JTOPDIR=`echo "$_ACJNI_FOLLOWED" | sed -e 's://*:/:g' -e 's:/[[^/]]*$::'`
+case "$host_os" in
+ darwin*) _JTOPDIR=`echo "$_JTOPDIR" | sed -e 's:/[[^/]]*$::'`
+ _JINC="$_JTOPDIR/Headers";;
+ *) _JINC="$_JTOPDIR/include";;
+esac
+
+# If we find jni.h in /usr/include, then it's not a java-only tree, so
+# don't add /usr/include or subdirectories to the list of includes.
+# An extra -I/usr/include can foul things up with newer gcc's.
+if test -f "$_JINC/jni.h"; then
+ if test "$_JINC" != "/usr/include"; then
+ JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JINC"
+ fi
+else
+ _JTOPDIR=`echo "$_JTOPDIR" | sed -e 's:/[[^/]]*$::'`
+ if test -f "$_JTOPDIR/include/jni.h"; then
+ if test "$_JTOPDIR" != "/usr"; then
+ JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JTOPDIR/include"
+ fi
+ else
+ AC_MSG_ERROR([cannot find java include files])
+ fi
+fi
+
+# get the likely subdirectories for system specific java includes
+if test "$_JTOPDIR" != "/usr"; then
+ case "$host_os" in
+ aix*) _JNI_INC_SUBDIRS="aix";;
+ bsdi*) _JNI_INC_SUBDIRS="bsdos";;
+ linux*) _JNI_INC_SUBDIRS="linux genunix";;
+ osf*) _JNI_INC_SUBDIRS="alpha";;
+ solaris*) _JNI_INC_SUBDIRS="solaris";;
+ *) _JNI_INC_SUBDIRS="genunix";;
+ esac
+fi
+
+# add any subdirectories that are present
+for _JINCSUBDIR in $_JNI_INC_SUBDIRS
+do
+ if test -d "$_JTOPDIR/include/$_JINCSUBDIR"; then
+ JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JTOPDIR/include/$_JINCSUBDIR"
+ fi
+done
+])
+
+# _ACJNI_FOLLOW_SYMLINKS <path>
+# Follows symbolic links on <path>,
+# finally setting variable _ACJNI_FOLLOWED
+# --------------------
+AC_DEFUN(_ACJNI_FOLLOW_SYMLINKS,[
+# find the include directory relative to the javac executable
+_cur="$1"
+while ls -ld "$_cur" 2>/dev/null | grep " -> " >/dev/null; do
+ AC_MSG_CHECKING(symlink for $_cur)
+ _slink=`ls -ld "$_cur" | sed 's/.* -> //'`
+ case "$_slink" in
+ /*) _cur="$_slink";;
+ # 'X' avoids triggering unwanted echo options.
+ *) _cur=`echo "X$_cur" | sed -e 's/^X//' -e 's:[[^/]]*$::'`"$_slink";;
+ esac
+ AC_MSG_RESULT($_cur)
+done
+_ACJNI_FOLLOWED="$_cur"
+])# _ACJNI
+
diff --git a/libdb/dist/aclocal_java/ac_prog_jar.ac b/libdb/dist/aclocal_java/ac_prog_jar.ac
new file mode 100644
index 0000000..ac71b60
--- /dev/null
+++ b/libdb/dist/aclocal_java/ac_prog_jar.ac
@@ -0,0 +1,36 @@
+dnl @synopsis AC_PROG_JAR
+dnl
+dnl AC_PROG_JAR tests for an existing jar program. It uses the environment
+dnl variable JAR then tests in sequence various common jar programs.
+dnl
+dnl If you want to force a specific compiler:
+dnl
+dnl - at the configure.in level, set JAR=yourcompiler before calling
+dnl AC_PROG_JAR
+dnl
+dnl - at the configure level, setenv JAR
+dnl
+dnl You can use the JAR variable in your Makefile.in, with @JAR@.
+dnl
+dnl Note: This macro depends on the autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download that whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl
+dnl The general documentation of those macros, as well as the sample
+dnl configure.in, is included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Egon Willighagen <egonw@sci.kun.nl>
+dnl @version $Id$
+dnl
+AC_DEFUN([AC_PROG_JAR],[
+AC_REQUIRE([AC_EXEEXT])dnl
+if test "x$JAVAPREFIX" = x; then
+ test "x$JAR" = x && AC_CHECK_PROGS(JAR, jar$EXEEXT)
+else
+ test "x$JAR" = x && AC_CHECK_PROGS(JAR, jar, $JAVAPREFIX)
+fi
+test "x$JAR" = x && AC_MSG_ERROR([no acceptable jar program found in \$PATH])
+AC_PROVIDE([$0])dnl
+])
diff --git a/libdb/dist/aclocal_java/ac_prog_java.ac b/libdb/dist/aclocal_java/ac_prog_java.ac
new file mode 100644
index 0000000..8c2b24d
--- /dev/null
+++ b/libdb/dist/aclocal_java/ac_prog_java.ac
@@ -0,0 +1,77 @@
+dnl @synopsis AC_PROG_JAVA
+dnl
+dnl Here is a summary of the main macros:
+dnl
+dnl AC_PROG_JAVAC: finds a Java compiler.
+dnl
+dnl AC_PROG_JAVA: finds a Java virtual machine.
+dnl
+dnl AC_CHECK_CLASS: finds if we have the given class (beware of CLASSPATH!).
+dnl
+dnl AC_CHECK_RQRD_CLASS: finds if we have the given class and stops otherwise.
+dnl
+dnl AC_TRY_COMPILE_JAVA: attempt to compile user given source.
+dnl
+dnl AC_TRY_RUN_JAVA: attempt to compile and run user given source.
+dnl
+dnl AC_JAVA_OPTIONS: adds Java configure options.
+dnl
+dnl AC_PROG_JAVA tests an existing Java virtual machine. It uses the
+dnl environment variable JAVA then tests in sequence various common Java
+dnl virtual machines. For political reasons, it starts with the free ones.
+dnl You *must* call [AC_PROG_JAVAC] before.
+dnl
+dnl If you want to force a specific VM:
+dnl
+dnl - at the configure.in level, set JAVA=yourvm before calling AC_PROG_JAVA
+dnl (but after AC_INIT)
+dnl
+dnl - at the configure level, setenv JAVA
+dnl
+dnl You can use the JAVA variable in your Makefile.in, with @JAVA@.
+dnl
+dnl *Warning*: its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl TODO: allow to exclude virtual machines (rationale: most Java programs
+dnl cannot run with some VM like kaffe).
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl
+dnl A Web page, with a link to the latest CVS snapshot is at
+dnl <http://www.internatif.org/bortzmeyer/autoconf-Java/>.
+dnl
+dnl This is a sample configure.in
+dnl Process this file with autoconf to produce a configure script.
+dnl
+dnl AC_INIT(UnTag.java)
+dnl
+dnl dnl Checks for programs.
+dnl AC_CHECK_CLASSPATH
+dnl AC_PROG_JAVAC
+dnl AC_PROG_JAVA
+dnl
+dnl dnl Checks for classes
+dnl AC_CHECK_RQRD_CLASS(org.xml.sax.Parser)
+dnl AC_CHECK_RQRD_CLASS(com.jclark.xml.sax.Driver)
+dnl
+dnl AC_OUTPUT(Makefile)
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version $Id$
+dnl
+AC_DEFUN([AC_PROG_JAVA],[
+AC_REQUIRE([AC_EXEEXT])dnl
+if test x$JAVAPREFIX = x; then
+ test x$JAVA = x && AC_CHECK_PROGS(JAVA, kaffe$EXEEXT java$EXEEXT)
+else
+ test x$JAVA = x && AC_CHECK_PROGS(JAVA, kaffe$EXEEXT java$EXEEXT, $JAVAPREFIX)
+fi
+test x$JAVA = x && AC_MSG_ERROR([no acceptable Java virtual machine found in \$PATH])
+AC_PROG_JAVA_WORKS
+AC_PROVIDE([$0])dnl
+])
diff --git a/libdb/dist/aclocal_java/ac_prog_java_works.ac b/libdb/dist/aclocal_java/ac_prog_java_works.ac
new file mode 100644
index 0000000..8c4759f
--- /dev/null
+++ b/libdb/dist/aclocal_java/ac_prog_java_works.ac
@@ -0,0 +1,97 @@
+dnl @synopsis AC_PROG_JAVA_WORKS
+dnl
+dnl Internal use ONLY.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version $Id$
+dnl
+AC_DEFUN([AC_PROG_JAVA_WORKS], [
+AC_CHECK_PROG(uudecode, uudecode$EXEEXT, yes)
+if test x$uudecode = xyes; then
+AC_CACHE_CHECK([if uudecode can decode base 64 file], ac_cv_prog_uudecode_base64, [
+dnl /**
+dnl * Test.java: used to test if java compiler works.
+dnl */
+dnl public class Test
+dnl {
+dnl
+dnl public static void
+dnl main( String[] argv )
+dnl {
+dnl System.exit (0);
+dnl }
+dnl
+dnl }
+cat << \EOF > Test.uue
+begin-base64 644 Test.class
+yv66vgADAC0AFQcAAgEABFRlc3QHAAQBABBqYXZhL2xhbmcvT2JqZWN0AQAE
+bWFpbgEAFihbTGphdmEvbGFuZy9TdHJpbmc7KVYBAARDb2RlAQAPTGluZU51
+bWJlclRhYmxlDAAKAAsBAARleGl0AQAEKEkpVgoADQAJBwAOAQAQamF2YS9s
+YW5nL1N5c3RlbQEABjxpbml0PgEAAygpVgwADwAQCgADABEBAApTb3VyY2VG
+aWxlAQAJVGVzdC5qYXZhACEAAQADAAAAAAACAAkABQAGAAEABwAAACEAAQAB
+AAAABQO4AAyxAAAAAQAIAAAACgACAAAACgAEAAsAAQAPABAAAQAHAAAAIQAB
+AAEAAAAFKrcAErEAAAABAAgAAAAKAAIAAAAEAAQABAABABMAAAACABQ=
+====
+EOF
+if uudecode$EXEEXT Test.uue; then
+ ac_cv_prog_uudecode_base64=yes
+else
+ echo "configure: __oline__: uudecode had trouble decoding base 64 file 'Test.uue'" >&AC_FD_CC
+ echo "configure: failed file was:" >&AC_FD_CC
+ cat Test.uue >&AC_FD_CC
+ ac_cv_prog_uudecode_base64=no
+fi
+rm -f Test.uue])
+fi
+if test x$ac_cv_prog_uudecode_base64 != xyes; then
+ rm -f Test.class
+ AC_MSG_WARN([I have to compile Test.class from scratch])
+ if test x$ac_cv_prog_javac_works = xno; then
+ AC_MSG_ERROR([Cannot compile java source. $JAVAC does not work properly])
+ fi
+ if test x$ac_cv_prog_javac_works = x; then
+ AC_PROG_JAVAC
+ fi
+fi
+AC_CACHE_CHECK(if $JAVA works, ac_cv_prog_java_works, [
+JAVA_TEST=Test.java
+CLASS_TEST=Test.class
+TEST=Test
+changequote(, )dnl
+cat << \EOF > $JAVA_TEST
+/* [#]line __oline__ "configure" */
+public class Test {
+public static void main (String args[]) {
+ System.exit (0);
+} }
+EOF
+changequote([, ])dnl
+if test x$ac_cv_prog_uudecode_base64 != xyes; then
+ if AC_TRY_COMMAND($JAVAC $JAVACFLAGS $JAVA_TEST) && test -s $CLASS_TEST; then
+ :
+ else
+ echo "configure: failed program was:" >&AC_FD_CC
+ cat $JAVA_TEST >&AC_FD_CC
+ AC_MSG_ERROR(The Java compiler $JAVAC failed (see config.log, check the CLASSPATH?))
+ fi
+fi
+if AC_TRY_COMMAND($JAVA $JAVAFLAGS $TEST) >/dev/null 2>&1; then
+ ac_cv_prog_java_works=yes
+else
+ echo "configure: failed program was:" >&AC_FD_CC
+ cat $JAVA_TEST >&AC_FD_CC
+ AC_MSG_ERROR(The Java VM $JAVA failed (see config.log, check the CLASSPATH?))
+fi
+rm -fr $JAVA_TEST $CLASS_TEST Test.uue
+])
+AC_PROVIDE([$0])dnl
+]
+)
diff --git a/libdb/dist/aclocal_java/ac_prog_javac.ac b/libdb/dist/aclocal_java/ac_prog_javac.ac
new file mode 100644
index 0000000..d39030c
--- /dev/null
+++ b/libdb/dist/aclocal_java/ac_prog_javac.ac
@@ -0,0 +1,43 @@
+dnl @synopsis AC_PROG_JAVAC
+dnl
+dnl AC_PROG_JAVAC tests an existing Java compiler. It uses the environment
+dnl variable JAVAC then tests in sequence various common Java compilers. For
+dnl political reasons, it starts with the free ones.
+dnl
+dnl If you want to force a specific compiler:
+dnl
+dnl - at the configure.in level, set JAVAC=yourcompiler before calling
+dnl AC_PROG_JAVAC
+dnl
+dnl - at the configure level, setenv JAVAC
+dnl
+dnl You can use the JAVAC variable in your Makefile.in, with @JAVAC@.
+dnl
+dnl *Warning*: its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl TODO: allow to exclude compilers (rationale: most Java programs cannot compile
+dnl with some compilers like guavac).
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version $Id$
+dnl
+AC_DEFUN([AC_PROG_JAVAC],[
+AC_REQUIRE([AC_EXEEXT])dnl
+if test "x$JAVAPREFIX" = x; then
+ test "x$JAVAC" = x && AC_CHECK_PROGS(JAVAC, javac$EXEEXT "gcj$EXEEXT -C" guavac$EXEEXT jikes$EXEEXT)
+else
+ test "x$JAVAC" = x && AC_CHECK_PROGS(JAVAC, javac$EXEEXT "gcj$EXEEXT -C" guavac$EXEEXT jikes$EXEEXT, $JAVAPREFIX)
+fi
+test "x$JAVAC" = x && AC_MSG_ERROR([no acceptable Java compiler found in \$PATH])
+AC_PROG_JAVAC_WORKS
+AC_PROVIDE([$0])dnl
+])
diff --git a/libdb/dist/aclocal_java/ac_prog_javac_works.ac b/libdb/dist/aclocal_java/ac_prog_javac_works.ac
new file mode 100644
index 0000000..e2b1252
--- /dev/null
+++ b/libdb/dist/aclocal_java/ac_prog_javac_works.ac
@@ -0,0 +1,35 @@
+dnl @synopsis AC_PROG_JAVAC_WORKS
+dnl
+dnl Internal use ONLY.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version $Id$
+dnl
+AC_DEFUN([AC_PROG_JAVAC_WORKS],[
+AC_CACHE_CHECK([if $JAVAC works], ac_cv_prog_javac_works, [
+JAVA_TEST=Test.java
+CLASS_TEST=Test.class
+cat << \EOF > $JAVA_TEST
+/* [#]line __oline__ "configure" */
+public class Test {
+}
+EOF
+if AC_TRY_COMMAND($JAVAC $JAVACFLAGS $JAVA_TEST) >/dev/null 2>&1; then
+ ac_cv_prog_javac_works=yes
+else
+ AC_MSG_ERROR([The Java compiler $JAVAC failed (see config.log, check the CLASSPATH?)])
+ echo "configure: failed program was:" >&AC_FD_CC
+ cat $JAVA_TEST >&AC_FD_CC
+fi
+rm -f $JAVA_TEST $CLASS_TEST
+])
+AC_PROVIDE([$0])dnl
+])
diff --git a/libdb/dist/aclocal_java/ac_prog_javadoc.ac b/libdb/dist/aclocal_java/ac_prog_javadoc.ac
new file mode 100644
index 0000000..f879c90
--- /dev/null
+++ b/libdb/dist/aclocal_java/ac_prog_javadoc.ac
@@ -0,0 +1,37 @@
+dnl @synopsis AC_PROG_JAVADOC
+dnl
+dnl AC_PROG_JAVADOC tests for an existing javadoc generator. It uses the environment
+dnl variable JAVADOC then tests in sequence various common javadoc generator.
+dnl
+dnl If you want to force a specific compiler:
+dnl
+dnl - at the configure.in level, set JAVADOC=yourgenerator before calling
+dnl AC_PROG_JAVADOC
+dnl
+dnl - at the configure level, setenv JAVADOC
+dnl
+dnl You can use the JAVADOC variable in your Makefile.in, with @JAVADOC@.
+dnl
+dnl Note: This macro depends on the autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download that whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl
+dnl The general documentation of those macros, as well as the sample
+dnl configure.in, is included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Egon Willighagen <egonw@sci.kun.nl>
+dnl @version $Id$
+dnl
+AC_DEFUN([AC_PROG_JAVADOC],[
+AC_REQUIRE([AC_EXEEXT])dnl
+if test "x$JAVAPREFIX" = x; then
+ test "x$JAVADOC" = x && AC_CHECK_PROGS(JAVADOC, javadoc$EXEEXT)
+else
+ test "x$JAVADOC" = x && AC_CHECK_PROGS(JAVADOC, javadoc, $JAVAPREFIX)
+fi
+test "x$JAVADOC" = x && AC_MSG_ERROR([no acceptable javadoc generator found in \$PATH])
+AC_PROVIDE([$0])dnl
+])
+
diff --git a/libdb/dist/aclocal_java/ac_prog_javah.ac b/libdb/dist/aclocal_java/ac_prog_javah.ac
new file mode 100644
index 0000000..b10b561
--- /dev/null
+++ b/libdb/dist/aclocal_java/ac_prog_javah.ac
@@ -0,0 +1,26 @@
+dnl @synopsis AC_PROG_JAVAH
+dnl
+dnl AC_PROG_JAVAH tests the availability of the javah header generator
+dnl and looks for the jni.h header file. If available, JAVAH is set to
+dnl the full path of javah and CPPFLAGS is updated accordingly.
+dnl
+dnl @author Luc Maisonobe
+dnl @version $Id$
+dnl
+AC_DEFUN([AC_PROG_JAVAH],[
+AC_REQUIRE([AC_CANONICAL_SYSTEM])dnl
+AC_REQUIRE([AC_PROG_CPP])dnl
+AC_PATH_PROG(JAVAH,javah)
+if test x"`eval 'echo $ac_cv_path_JAVAH'`" != x ; then
+ AC_TRY_CPP([#include <jni.h>],,[
+ ac_save_CPPFLAGS="$CPPFLAGS"
+changequote(, )dnl
+ ac_dir=`echo $ac_cv_path_JAVAH | sed 's,\(.*\)/[^/]*/[^/]*$,\1/include,'`
+ ac_machdep=`echo $build_os | sed 's,[-0-9].*,,'`
+changequote([, ])dnl
+ CPPFLAGS="$ac_save_CPPFLAGS -I$ac_dir -I$ac_dir/$ac_machdep"
+ AC_TRY_CPP([#include <jni.h>],
+ ac_save_CPPFLAGS="$CPPFLAGS",
+ AC_MSG_WARN([unable to include <jni.h>]))
+ CPPFLAGS="$ac_save_CPPFLAGS"])
+fi])
diff --git a/libdb/dist/aclocal_java/ac_try_compile_java.ac b/libdb/dist/aclocal_java/ac_try_compile_java.ac
new file mode 100644
index 0000000..3f8f105
--- /dev/null
+++ b/libdb/dist/aclocal_java/ac_try_compile_java.ac
@@ -0,0 +1,39 @@
+dnl @synopsis AC_TRY_COMPILE_JAVA
+dnl
+dnl AC_TRY_COMPILE_JAVA attempt to compile user given source.
+dnl
+dnl *Warning*: its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Devin Weaver <ktohg@tritarget.com>
+dnl @version $Id$
+dnl
+AC_DEFUN([AC_TRY_COMPILE_JAVA],[
+AC_REQUIRE([AC_PROG_JAVAC])dnl
+cat << \EOF > Test.java
+/* [#]line __oline__ "configure" */
+ifelse([$1], , , [import $1;])
+public class Test {
+[$2]
+}
+EOF
+if AC_TRY_COMMAND($JAVAC $JAVACFLAGS Test.java) && test -s Test.class
+then
+dnl Don't remove the temporary files here, so they can be examined.
+ ifelse([$3], , :, [$3])
+else
+ echo "configure: failed program was:" >&AC_FD_CC
+ cat Test.java >&AC_FD_CC
+ifelse([$4], , , [ rm -fr Test*
+ $4
+])dnl
+fi
+rm -fr Test*])
diff --git a/libdb/dist/aclocal_java/ac_try_run_javac.ac b/libdb/dist/aclocal_java/ac_try_run_javac.ac
new file mode 100644
index 0000000..b529bf4
--- /dev/null
+++ b/libdb/dist/aclocal_java/ac_try_run_javac.ac
@@ -0,0 +1,40 @@
+dnl @synopsis AC_TRY_RUN_JAVA
+dnl
+dnl AC_TRY_RUN_JAVA attempt to compile and run user given source.
+dnl
+dnl *Warning*: its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Devin Weaver <ktohg@tritarget.com>
+dnl @version $Id$
+dnl
+AC_DEFUN([AC_TRY_RUN_JAVA],[
+AC_REQUIRE([AC_PROG_JAVAC])dnl
+AC_REQUIRE([AC_PROG_JAVA])dnl
+cat << \EOF > Test.java
+/* [#]line __oline__ "configure" */
+ifelse([$1], , , [include $1;])
+public class Test {
+[$2]
+}
+EOF
+if AC_TRY_COMMAND($JAVAC $JAVACFLAGS Test.java) && test -s Test.class && ($JAVA $JAVAFLAGS Test; exit) 2>/dev/null
+then
+dnl Don't remove the temporary files here, so they can be examined.
+ ifelse([$3], , :, [$3])
+else
+ echo "configure: failed program was:" >&AC_FD_CC
+ cat Test.java >&AC_FD_CC
+ifelse([$4], , , [ rm -fr Test*
+ $4
+])dnl
+fi
+rm -fr Test*])
diff --git a/libdb/dist/buildrel b/libdb/dist/buildrel
new file mode 100644
index 0000000..4bc1e9f
--- /dev/null
+++ b/libdb/dist/buildrel
@@ -0,0 +1,109 @@
+#!/bin/sh -
+# $Id$
+#
+# Build the distribution archives.
+#
+# A set of commands intended to be cut and pasted into a csh window.
+
+# Development tree, release home.
+setenv D `pwd`
+
+# Update the release number.
+cd $D/dist
+vi RELEASE
+setenv VERSION \
+`sh -c '. RELEASE; echo $DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH'`
+echo "Version: $VERSION"
+
+# Make sure the source tree is up-to-date, generate new support files, and
+# commit anything that's changed.
+cd $D && cvs -q update
+cd $D/dist && sh s_all
+cd $D && cvs -q commit
+
+# Copy a development tree into a release tree.
+setenv R /var/tmp/db-$VERSION
+rm -rf $R && mkdir -p $R
+cd $D && tar cf - \
+`cvs -q status | sed -n -e "/Repository/s;.*/CVSROOT/db/;;" -e "s/,v//p"` | \
+(cd $R && tar xpf -)
+
+# Fix symbolic links and permissions.
+cd $R/dist && sh s_perm
+cd $R/dist && sh s_symlink
+
+# Build a version.
+cd $R && rm -rf build_run && mkdir build_run
+cd $R/build_run && ~bostic/bin/dbconf && make >& mklog
+
+# Smoke test.
+cd $R/build_run && ./ex_access
+
+# Build the documentation.
+cd $R/docs_src && sh build clean
+cd $R/docs_src && sh build |& sed '/.html$/d'
+
+# Check the install
+cd $R/build_run && make prefix=`pwd`/BDB install
+
+# Clean up the tree.
+cd $R && rm -rf build_run docs_src
+cd $R && rm -rf test/TODO test/upgrade test_perf test_purify
+cd $R && rm -rf test_server test_thread test_vxworks test_xa
+
+# ACQUIRE ROOT PRIVILEGES
+cd $R && find . -type d | xargs chmod 775
+cd $R && find . -type f | xargs chmod 444
+cd $R && chmod 664 build_win32/*.dsp
+cd $R/dist && sh s_perm
+chown -R 100.100 $R
+# DISCARD ROOT PRIVILEGES
+
+# Compare this release with the last one.
+set LR=3.1.X
+cd $R/.. && gzcat /a/releases/db-${LR}.tar.gz | tar xf -
+cd $R/../db-${LR} && find . | sort > /tmp/__OLD
+cd $R && find . | sort > /tmp/__NEW
+diff -c /tmp/__OLD /tmp/__NEW
+
+# Create the crypto tar archive release.
+setenv T "$R/../db-$VERSION.tar.gz"
+cd $R/.. && tar cf - db-$VERSION | gzip --best > $T
+chmod 444 $T
+
+# Create the non-crypto tree.
+setenv RNC "$R/../db-$VERSION.NC"
+rm -rf $RNC $R/../__TMP && mkdir $R/../__TMP
+cd $R/../__TMP && gzcat $T | tar xpf - && mv -i db-$VERSION $RNC
+cd $R && rm -rf $R/../__TMP
+cd $RNC/dist && sh s_crypto
+
+# ACQUIRE ROOT PRIVILEGES
+cd $RNC && find . -type d | xargs chmod 775
+cd $RNC && find . -type f | xargs chmod 444
+cd $RNC && chmod 664 build_win32/*.dsp
+cd $RNC/dist && sh s_perm
+chown -R 100.100 $RNC
+# DISCARD ROOT PRIVILEGES
+
+# Create the non-crypto tar archive release.
+setenv T "$R/../db-$VERSION.NC.tar.gz"
+cd $RNC/.. && tar cf - db-$VERSION.NC | gzip --best > $T
+chmod 444 $T
+
+# Remove symbolic links to tags files. They're large and we don't want
+# to store real symbolic links in the zip archive for portability reasons.
+# ACQUIRE ROOT PRIVILEGES
+cd $R && rm -f `find . -type l -name 'tags'`
+cd $RNC && rm -f `find . -type l -name 'tags'`
+# DISCARD ROOT PRIVILEGES
+
+# Create the crypto zip archive release.
+setenv T "$R/../db-$VERSION.zip"
+cd $R/.. && zip -r - db-$VERSION > $T
+chmod 444 $T
+
+# Create the non-crypto zip archive release.
+setenv T "$R/../db-$VERSION.NC.zip"
+cd $RNC/.. && zip -r - db-$VERSION.NC > $T
+chmod 444 $T
diff --git a/libdb/dist/config.guess b/libdb/dist/config.guess
new file mode 100755
index 0000000..d6244fb
--- /dev/null
+++ b/libdb/dist/config.guess
@@ -0,0 +1,1354 @@
+#! /bin/sh
+# Attempt to guess a canonical system name.
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+# 2000, 2001, 2002 Free Software Foundation, Inc.
+
+timestamp='2002-07-23'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Originally written by Per Bothner <per@bothner.com>.
+# Please send patches to <config-patches@gnu.org>. Submit a context
+# diff and a properly formatted ChangeLog entry.
+#
+# This script attempts to guess a canonical system name similar to
+# config.sub. If it succeeds, it prints the system name on stdout, and
+# exits with 0. Otherwise, it exits with 1.
+#
+# The plan is that this can be called by configure scripts if you
+# don't specify an explicit build system type.
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION]
+
+Output the configuration name of the system \`$me' is run on.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.guess ($timestamp)
+
+Originally written by Per Bothner.
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001
+Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit 0 ;;
+ --version | -v )
+ echo "$version" ; exit 0 ;;
+ --help | --h* | -h )
+ echo "$usage"; exit 0 ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help" >&2
+ exit 1 ;;
+ * )
+ break ;;
+ esac
+done
+
+if test $# != 0; then
+ echo "$me: too many arguments$help" >&2
+ exit 1
+fi
+
+trap 'exit 1' 1 2 15
+
+# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
+# compiler to aid in system detection is discouraged as it requires
+# temporary files to be created and, as you can see below, it is a
+# headache to deal with in a portable fashion.
+
+# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
+# use `HOST_CC' if defined, but it is deprecated.
+
+# This shell variable is my proudest work .. or something. --bje
+
+set_cc_for_build='tmpdir=${TMPDIR-/tmp}/config-guess-$$ ;
+(old=`umask` && umask 077 && mkdir $tmpdir && umask $old && unset old)
+ || (echo "$me: cannot create $tmpdir" >&2 && exit 1) ;
+dummy=$tmpdir/dummy ;
+files="$dummy.c $dummy.o $dummy.rel $dummy" ;
+trap '"'"'rm -f $files; rmdir $tmpdir; exit 1'"'"' 1 2 15 ;
+case $CC_FOR_BUILD,$HOST_CC,$CC in
+ ,,) echo "int x;" > $dummy.c ;
+ for c in cc gcc c89 c99 ; do
+ if ($c $dummy.c -c -o $dummy.o) >/dev/null 2>&1 ; then
+ CC_FOR_BUILD="$c"; break ;
+ fi ;
+ done ;
+ rm -f $files ;
+ if test x"$CC_FOR_BUILD" = x ; then
+ CC_FOR_BUILD=no_compiler_found ;
+ fi
+ ;;
+ ,,*) CC_FOR_BUILD=$CC ;;
+ ,*,*) CC_FOR_BUILD=$HOST_CC ;;
+esac ;
+unset files'
+
+# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
+# (ghazi@noc.rutgers.edu 1994-08-24)
+if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
+ PATH=$PATH:/.attbin ; export PATH
+fi
+
+UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
+UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
+UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
+UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
+
+# Note: order is significant - the case branches are not exclusive.
+
+case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
+ *:NetBSD:*:*)
+ # NetBSD (nbsd) targets should (where applicable) match one or
+ # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*,
+ # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
+ # switched to ELF, *-*-netbsd* would select the old
+ # object file format. This provides both forward
+ # compatibility and a consistent mechanism for selecting the
+ # object file format.
+ #
+ # Note: NetBSD doesn't particularly care about the vendor
+ # portion of the name. We always set it to "unknown".
+ sysctl="sysctl -n hw.machine_arch"
+ UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
+ /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
+ case "${UNAME_MACHINE_ARCH}" in
+ armeb) machine=armeb-unknown ;;
+ arm*) machine=arm-unknown ;;
+ sh3el) machine=shl-unknown ;;
+ sh3eb) machine=sh-unknown ;;
+ *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
+ esac
+ # The Operating System including object format, if it has switched
+ # to ELF recently, or will in the future.
+ case "${UNAME_MACHINE_ARCH}" in
+ arm*|i386|m68k|ns32k|sh3*|sparc|vax)
+ eval $set_cc_for_build
+ if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep __ELF__ >/dev/null
+ then
+ # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
+ # Return netbsd for either. FIX?
+ os=netbsd
+ else
+ os=netbsdelf
+ fi
+ ;;
+ *)
+ os=netbsd
+ ;;
+ esac
+ # The OS release
+ release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
+ # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
+ # contains redundant information, the shorter form:
+ # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
+ echo "${machine}-${os}${release}"
+ exit 0 ;;
+ amiga:OpenBSD:*:*)
+ echo m68k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ arc:OpenBSD:*:*)
+ echo mipsel-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ hp300:OpenBSD:*:*)
+ echo m68k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ mac68k:OpenBSD:*:*)
+ echo m68k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ macppc:OpenBSD:*:*)
+ echo powerpc-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ mvme68k:OpenBSD:*:*)
+ echo m68k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ mvme88k:OpenBSD:*:*)
+ echo m88k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ mvmeppc:OpenBSD:*:*)
+ echo powerpc-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ pmax:OpenBSD:*:*)
+ echo mipsel-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ sgi:OpenBSD:*:*)
+ echo mipseb-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ sun3:OpenBSD:*:*)
+ echo m68k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ wgrisc:OpenBSD:*:*)
+ echo mipsel-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ *:OpenBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ alpha:OSF1:*:*)
+ if test $UNAME_RELEASE = "V4.0"; then
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
+ fi
+ # A Vn.n version is a released version.
+ # A Tn.n version is a released field test version.
+ # A Xn.n version is an unreleased experimental baselevel.
+ # 1.2 uses "1.2" for uname -r.
+ eval $set_cc_for_build
+ cat <<EOF >$dummy.s
+ .data
+\$Lformat:
+ .byte 37,100,45,37,120,10,0 # "%d-%x\n"
+
+ .text
+ .globl main
+ .align 4
+ .ent main
+main:
+ .frame \$30,16,\$26,0
+ ldgp \$29,0(\$27)
+ .prologue 1
+ .long 0x47e03d80 # implver \$0
+ lda \$2,-1
+ .long 0x47e20c21 # amask \$2,\$1
+ lda \$16,\$Lformat
+ mov \$0,\$17
+ not \$1,\$18
+ jsr \$26,printf
+ ldgp \$29,0(\$26)
+ mov 0,\$16
+ jsr \$26,exit
+ .end main
+EOF
+ $CC_FOR_BUILD $dummy.s -o $dummy 2>/dev/null
+ if test "$?" = 0 ; then
+ case `$dummy` in
+ 0-0)
+ UNAME_MACHINE="alpha"
+ ;;
+ 1-0)
+ UNAME_MACHINE="alphaev5"
+ ;;
+ 1-1)
+ UNAME_MACHINE="alphaev56"
+ ;;
+ 1-101)
+ UNAME_MACHINE="alphapca56"
+ ;;
+ 2-303)
+ UNAME_MACHINE="alphaev6"
+ ;;
+ 2-307)
+ UNAME_MACHINE="alphaev67"
+ ;;
+ 2-1307)
+ UNAME_MACHINE="alphaev68"
+ ;;
+ 3-1307)
+ UNAME_MACHINE="alphaev7"
+ ;;
+ esac
+ fi
+ rm -f $dummy.s $dummy && rmdir $tmpdir
+ echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[VTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ exit 0 ;;
+ Alpha\ *:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # Should we change UNAME_MACHINE based on the output of uname instead
+ # of the specific Alpha model?
+ echo alpha-pc-interix
+ exit 0 ;;
+ 21064:Windows_NT:50:3)
+ echo alpha-dec-winnt3.5
+ exit 0 ;;
+ Amiga*:UNIX_System_V:4.0:*)
+ echo m68k-unknown-sysv4
+ exit 0;;
+ *:[Aa]miga[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-amigaos
+ exit 0 ;;
+ *:[Mm]orph[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-morphos
+ exit 0 ;;
+ *:OS/390:*:*)
+ echo i370-ibm-openedition
+ exit 0 ;;
+ arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
+ echo arm-acorn-riscix${UNAME_RELEASE}
+ exit 0;;
+ SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
+ echo hppa1.1-hitachi-hiuxmpp
+ exit 0;;
+ Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
+ # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
+ if test "`(/bin/universe) 2>/dev/null`" = att ; then
+ echo pyramid-pyramid-sysv3
+ else
+ echo pyramid-pyramid-bsd
+ fi
+ exit 0 ;;
+ NILE*:*:*:dcosx)
+ echo pyramid-pyramid-svr4
+ exit 0 ;;
+ DRS?6000:UNIX_SV:4.2*:7*)
+ case `/usr/bin/uname -p` in
+ sparc) echo sparc-icl-nx7 && exit 0 ;;
+ esac ;;
+ sun4H:SunOS:5.*:*)
+ echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit 0 ;;
+ sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
+ echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit 0 ;;
+ i86pc:SunOS:5.*:*)
+ echo i386-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit 0 ;;
+ sun4*:SunOS:6*:*)
+ # According to config.sub, this is the proper way to canonicalize
+ # SunOS6. Hard to guess exactly what SunOS6 will be like, but
+ # it's likely to be more like Solaris than SunOS4.
+ echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit 0 ;;
+ sun4*:SunOS:*:*)
+ case "`/usr/bin/arch -k`" in
+ Series*|S4*)
+ UNAME_RELEASE=`uname -v`
+ ;;
+ esac
+ # Japanese Language versions have a version number like `4.1.3-JL'.
+ echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
+ exit 0 ;;
+ sun3*:SunOS:*:*)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ exit 0 ;;
+ sun*:*:4.2BSD:*)
+ UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
+ test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
+ case "`/bin/arch`" in
+ sun3)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ ;;
+ sun4)
+ echo sparc-sun-sunos${UNAME_RELEASE}
+ ;;
+ esac
+ exit 0 ;;
+ aushp:SunOS:*:*)
+ echo sparc-auspex-sunos${UNAME_RELEASE}
+ exit 0 ;;
+ # The situation for MiNT is a little confusing. The machine name
+ # can be virtually everything (everything which is not
+ # "atarist" or "atariste" at least should have a processor
+ # > m68000). The system name ranges from "MiNT" over "FreeMiNT"
+ # to the lowercase version "mint" (or "freemint"). Finally
+ # the system name "TOS" denotes a system which is actually not
+ # MiNT. But MiNT is downward compatible to TOS, so this should
+ # be no problem.
+ atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit 0 ;;
+ atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit 0 ;;
+ *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit 0 ;;
+ milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
+ echo m68k-milan-mint${UNAME_RELEASE}
+ exit 0 ;;
+ hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
+ echo m68k-hades-mint${UNAME_RELEASE}
+ exit 0 ;;
+ *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
+ echo m68k-unknown-mint${UNAME_RELEASE}
+ exit 0 ;;
+ powerpc:machten:*:*)
+ echo powerpc-apple-machten${UNAME_RELEASE}
+ exit 0 ;;
+ RISC*:Mach:*:*)
+ echo mips-dec-mach_bsd4.3
+ exit 0 ;;
+ RISC*:ULTRIX:*:*)
+ echo mips-dec-ultrix${UNAME_RELEASE}
+ exit 0 ;;
+ VAX*:ULTRIX*:*:*)
+ echo vax-dec-ultrix${UNAME_RELEASE}
+ exit 0 ;;
+ 2020:CLIX:*:* | 2430:CLIX:*:*)
+ echo clipper-intergraph-clix${UNAME_RELEASE}
+ exit 0 ;;
+ mips:*:*:UMIPS | mips:*:*:RISCos)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+#ifdef __cplusplus
+#include <stdio.h> /* for printf() prototype */
+ int main (int argc, char *argv[]) {
+#else
+ int main (argc, argv) int argc; char *argv[]; {
+#endif
+ #if defined (host_mips) && defined (MIPSEB)
+ #if defined (SYSTYPE_SYSV)
+ printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_SVR4)
+ printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
+ printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
+ #endif
+ #endif
+ exit (-1);
+ }
+EOF
+ $CC_FOR_BUILD $dummy.c -o $dummy \
+ && $dummy `echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` \
+ && rm -f $dummy.c $dummy && rmdir $tmpdir && exit 0
+ rm -f $dummy.c $dummy && rmdir $tmpdir
+ echo mips-mips-riscos${UNAME_RELEASE}
+ exit 0 ;;
+ Motorola:PowerMAX_OS:*:*)
+ echo powerpc-motorola-powermax
+ exit 0 ;;
+ Night_Hawk:*:*:PowerMAX_OS)
+ echo powerpc-harris-powermax
+ exit 0 ;;
+ Night_Hawk:Power_UNIX:*:*)
+ echo powerpc-harris-powerunix
+ exit 0 ;;
+ m88k:CX/UX:7*:*)
+ echo m88k-harris-cxux7
+ exit 0 ;;
+ m88k:*:4*:R4*)
+ echo m88k-motorola-sysv4
+ exit 0 ;;
+ m88k:*:3*:R3*)
+ echo m88k-motorola-sysv3
+ exit 0 ;;
+ AViiON:dgux:*:*)
+ # DG/UX returns AViiON for all architectures
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
+ then
+ if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
+ [ ${TARGET_BINARY_INTERFACE}x = x ]
+ then
+ echo m88k-dg-dgux${UNAME_RELEASE}
+ else
+ echo m88k-dg-dguxbcs${UNAME_RELEASE}
+ fi
+ else
+ echo i586-dg-dgux${UNAME_RELEASE}
+ fi
+ exit 0 ;;
+ M88*:DolphinOS:*:*) # DolphinOS (SVR3)
+ echo m88k-dolphin-sysv3
+ exit 0 ;;
+ M88*:*:R3*:*)
+ # Delta 88k system running SVR3
+ echo m88k-motorola-sysv3
+ exit 0 ;;
+ XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
+ echo m88k-tektronix-sysv3
+ exit 0 ;;
+ Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
+ echo m68k-tektronix-bsd
+ exit 0 ;;
+ *:IRIX*:*:*)
+ echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
+ exit 0 ;;
+ ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
+ echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
+ exit 0 ;; # Note that: echo "'`uname -s`'" gives 'AIX '
+ i*86:AIX:*:*)
+ echo i386-ibm-aix
+ exit 0 ;;
+ ia64:AIX:*:*)
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
+ exit 0 ;;
+ *:AIX:2:3)
+ if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <sys/systemcfg.h>
+
+ main()
+ {
+ if (!__power_pc())
+ exit(1);
+ puts("powerpc-ibm-aix3.2.5");
+ exit(0);
+ }
+EOF
+ $CC_FOR_BUILD $dummy.c -o $dummy && $dummy && rm -f $dummy.c $dummy && rmdir $tmpdir && exit 0
+ rm -f $dummy.c $dummy && rmdir $tmpdir
+ echo rs6000-ibm-aix3.2.5
+ elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
+ echo rs6000-ibm-aix3.2.4
+ else
+ echo rs6000-ibm-aix3.2
+ fi
+ exit 0 ;;
+ *:AIX:*:[45])
+ IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
+ if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
+ IBM_ARCH=rs6000
+ else
+ IBM_ARCH=powerpc
+ fi
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${IBM_ARCH}-ibm-aix${IBM_REV}
+ exit 0 ;;
+ *:AIX:*:*)
+ echo rs6000-ibm-aix
+ exit 0 ;;
+ ibmrt:4.4BSD:*|romp-ibm:BSD:*)
+ echo romp-ibm-bsd4.4
+ exit 0 ;;
+ ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
+ echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to
+ exit 0 ;; # report: romp-ibm BSD 4.3
+ *:BOSX:*:*)
+ echo rs6000-bull-bosx
+ exit 0 ;;
+ DPX/2?00:B.O.S.:*:*)
+ echo m68k-bull-sysv3
+ exit 0 ;;
+ 9000/[34]??:4.3bsd:1.*:*)
+ echo m68k-hp-bsd
+ exit 0 ;;
+ hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
+ echo m68k-hp-bsd4.4
+ exit 0 ;;
+ 9000/[34678]??:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ case "${UNAME_MACHINE}" in
+ 9000/31? ) HP_ARCH=m68000 ;;
+ 9000/[34]?? ) HP_ARCH=m68k ;;
+ 9000/[678][0-9][0-9])
+ if [ -x /usr/bin/getconf ]; then
+ sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
+ sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
+ case "${sc_cpu_version}" in
+ 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
+ 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
+ 532) # CPU_PA_RISC2_0
+ case "${sc_kernel_bits}" in
+ 32) HP_ARCH="hppa2.0n" ;;
+ 64) HP_ARCH="hppa2.0w" ;;
+ '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20
+ esac ;;
+ esac
+ fi
+ if [ "${HP_ARCH}" = "" ]; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+
+ #define _HPUX_SOURCE
+ #include <stdlib.h>
+ #include <unistd.h>
+
+ int main ()
+ {
+ #if defined(_SC_KERNEL_BITS)
+ long bits = sysconf(_SC_KERNEL_BITS);
+ #endif
+ long cpu = sysconf (_SC_CPU_VERSION);
+
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
+ case CPU_PA_RISC2_0:
+ #if defined(_SC_KERNEL_BITS)
+ switch (bits)
+ {
+ case 64: puts ("hppa2.0w"); break;
+ case 32: puts ("hppa2.0n"); break;
+ default: puts ("hppa2.0"); break;
+ } break;
+ #else /* !defined(_SC_KERNEL_BITS) */
+ puts ("hppa2.0"); break;
+ #endif
+ default: puts ("hppa1.0"); break;
+ }
+ exit (0);
+ }
+EOF
+ (CCOPTS= $CC_FOR_BUILD $dummy.c -o $dummy 2>/dev/null) && HP_ARCH=`$dummy`
+ if test -z "$HP_ARCH"; then HP_ARCH=hppa; fi
+ rm -f $dummy.c $dummy && rmdir $tmpdir
+ fi ;;
+ esac
+ echo ${HP_ARCH}-hp-hpux${HPUX_REV}
+ exit 0 ;;
+ ia64:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ echo ia64-hp-hpux${HPUX_REV}
+ exit 0 ;;
+ 3050*:HI-UX:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <unistd.h>
+ int
+ main ()
+ {
+ long cpu = sysconf (_SC_CPU_VERSION);
+ /* The order matters, because CPU_IS_HP_MC68K erroneously returns
+ true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct
+ results, however. */
+ if (CPU_IS_PA_RISC (cpu))
+ {
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
+ default: puts ("hppa-hitachi-hiuxwe2"); break;
+ }
+ }
+ else if (CPU_IS_HP_MC68K (cpu))
+ puts ("m68k-hitachi-hiuxwe2");
+ else puts ("unknown-hitachi-hiuxwe2");
+ exit (0);
+ }
+EOF
+ $CC_FOR_BUILD $dummy.c -o $dummy && $dummy && rm -f $dummy.c $dummy && rmdir $tmpdir && exit 0
+ rm -f $dummy.c $dummy && rmdir $tmpdir
+ echo unknown-hitachi-hiuxwe2
+ exit 0 ;;
+ 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
+ echo hppa1.1-hp-bsd
+ exit 0 ;;
+ 9000/8??:4.3bsd:*:*)
+ echo hppa1.0-hp-bsd
+ exit 0 ;;
+ *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
+ echo hppa1.0-hp-mpeix
+ exit 0 ;;
+ hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
+ echo hppa1.1-hp-osf
+ exit 0 ;;
+ hp8??:OSF1:*:*)
+ echo hppa1.0-hp-osf
+ exit 0 ;;
+ i*86:OSF1:*:*)
+ if [ -x /usr/sbin/sysversion ] ; then
+ echo ${UNAME_MACHINE}-unknown-osf1mk
+ else
+ echo ${UNAME_MACHINE}-unknown-osf1
+ fi
+ exit 0 ;;
+ parisc*:Lites*:*:*)
+ echo hppa1.1-hp-lites
+ exit 0 ;;
+ C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
+ echo c1-convex-bsd
+ exit 0 ;;
+ C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit 0 ;;
+ C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
+ echo c34-convex-bsd
+ exit 0 ;;
+ C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
+ echo c38-convex-bsd
+ exit 0 ;;
+ C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
+ echo c4-convex-bsd
+ exit 0 ;;
+ CRAY*Y-MP:*:*:*)
+ echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit 0 ;;
+ CRAY*[A-Z]90:*:*:*)
+ echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
+ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
+ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
+ -e 's/\.[^.]*$/.X/'
+ exit 0 ;;
+ CRAY*TS:*:*:*)
+ echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit 0 ;;
+ CRAY*T3D:*:*:*)
+ echo alpha-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit 0 ;;
+ CRAY*T3E:*:*:*)
+ echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit 0 ;;
+ CRAY*SV1:*:*:*)
+ echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit 0 ;;
+ F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
+ FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
+ echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit 0 ;;
+ i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
+ echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
+ exit 0 ;;
+ sparc*:BSD/OS:*:*)
+ echo sparc-unknown-bsdi${UNAME_RELEASE}
+ exit 0 ;;
+ *:BSD/OS:*:*)
+ echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
+ exit 0 ;;
+ *:FreeBSD:*:*)
+ # Determine whether the default compiler uses glibc.
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <features.h>
+ #if __GLIBC__ >= 2
+ LIBC=gnu
+ #else
+ LIBC=
+ #endif
+EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^LIBC=`
+ rm -f $dummy.c && rmdir $tmpdir
+ echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`${LIBC:+-$LIBC}
+ exit 0 ;;
+ i*:CYGWIN*:*)
+ echo ${UNAME_MACHINE}-pc-cygwin
+ exit 0 ;;
+ i*:MINGW*:*)
+ echo ${UNAME_MACHINE}-pc-mingw32
+ exit 0 ;;
+ i*:PW*:*)
+ echo ${UNAME_MACHINE}-pc-pw32
+ exit 0 ;;
+ x86:Interix*:3*)
+ echo i386-pc-interix3
+ exit 0 ;;
+ i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
+ # UNAME_MACHINE based on the output of uname instead of i386?
+ echo i386-pc-interix
+ exit 0 ;;
+ i*:UWIN*:*)
+ echo ${UNAME_MACHINE}-pc-uwin
+ exit 0 ;;
+ p*:CYGWIN*:*)
+ echo powerpcle-unknown-cygwin
+ exit 0 ;;
+ prep*:SunOS:5.*:*)
+ echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit 0 ;;
+ *:GNU:*:*)
+ echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+ exit 0 ;;
+ i*86:Minix:*:*)
+ echo ${UNAME_MACHINE}-pc-minix
+ exit 0 ;;
+ arm*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit 0 ;;
+ ia64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit 0 ;;
+ m68*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit 0 ;;
+ mips:Linux:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #undef CPU
+ #undef mips
+ #undef mipsel
+ #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
+ CPU=mipsel
+ #else
+ #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
+ CPU=mips
+ #else
+ CPU=
+ #endif
+ #endif
+EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^CPU=`
+ rm -f $dummy.c && rmdir $tmpdir
+ test x"${CPU}" != x && echo "${CPU}-pc-linux-gnu" && exit 0
+ ;;
+ ppc:Linux:*:*)
+ echo powerpc-unknown-linux-gnu
+ exit 0 ;;
+ ppc64:Linux:*:*)
+ echo powerpc64-unknown-linux-gnu
+ exit 0 ;;
+ alpha:Linux:*:*)
+ case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
+ EV5) UNAME_MACHINE=alphaev5 ;;
+ EV56) UNAME_MACHINE=alphaev56 ;;
+ PCA56) UNAME_MACHINE=alphapca56 ;;
+ PCA57) UNAME_MACHINE=alphapca56 ;;
+ EV6) UNAME_MACHINE=alphaev6 ;;
+ EV67) UNAME_MACHINE=alphaev67 ;;
+ EV68*) UNAME_MACHINE=alphaev68 ;;
+ esac
+ objdump --private-headers /bin/sh | grep ld.so.1 >/dev/null
+ if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
+ echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
+ exit 0 ;;
+ parisc:Linux:*:* | hppa:Linux:*:*)
+ # Look for CPU level
+ case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
+ PA7*) echo hppa1.1-unknown-linux-gnu ;;
+ PA8*) echo hppa2.0-unknown-linux-gnu ;;
+ *) echo hppa-unknown-linux-gnu ;;
+ esac
+ exit 0 ;;
+ parisc64:Linux:*:* | hppa64:Linux:*:*)
+ echo hppa64-unknown-linux-gnu
+ exit 0 ;;
+ s390:Linux:*:* | s390x:Linux:*:*)
+ echo ${UNAME_MACHINE}-ibm-linux
+ exit 0 ;;
+ sh*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit 0 ;;
+ sparc:Linux:*:* | sparc64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit 0 ;;
+ x86_64:Linux:*:*)
+ echo x86_64-unknown-linux-gnu
+ exit 0 ;;
+ i*86:Linux:*:*)
+ # The BFD linker knows what the default object file format is, so
+ # first see if it will tell us. cd to the root directory to prevent
+ # problems with other programs or directories called `ld' in the path.
+ # Set LC_ALL=C to ensure ld outputs messages in English.
+ ld_supported_targets=`cd /; LC_ALL=C ld --help 2>&1 \
+ | sed -ne '/supported targets:/!d
+ s/[ ][ ]*/ /g
+ s/.*supported targets: *//
+ s/ .*//
+ p'`
+ case "$ld_supported_targets" in
+ elf32-i386)
+ TENTATIVE="${UNAME_MACHINE}-pc-linux-gnu"
+ ;;
+ a.out-i386-linux)
+ echo "${UNAME_MACHINE}-pc-linux-gnuaout"
+ exit 0 ;;
+ coff-i386)
+ echo "${UNAME_MACHINE}-pc-linux-gnucoff"
+ exit 0 ;;
+ "")
+ # Either a pre-BFD a.out linker (linux-gnuoldld) or
+ # one that does not give us useful --help.
+ echo "${UNAME_MACHINE}-pc-linux-gnuoldld"
+ exit 0 ;;
+ esac
+ # Determine whether the default compiler is a.out or elf
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <features.h>
+ #ifdef __ELF__
+ # ifdef __GLIBC__
+ # if __GLIBC__ >= 2
+ LIBC=gnu
+ # else
+ LIBC=gnulibc1
+ # endif
+ # else
+ LIBC=gnulibc1
+ # endif
+ #else
+ #ifdef __INTEL_COMPILER
+ LIBC=gnu
+ #else
+ LIBC=gnuaout
+ #endif
+ #endif
+EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^LIBC=`
+ rm -f $dummy.c && rmdir $tmpdir
+ test x"${LIBC}" != x && echo "${UNAME_MACHINE}-pc-linux-${LIBC}" && exit 0
+ test x"${TENTATIVE}" != x && echo "${TENTATIVE}" && exit 0
+ ;;
+ i*86:DYNIX/ptx:4*:*)
+ # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
+ # earlier versions are messed up and put the nodename in both
+ # sysname and nodename.
+ echo i386-sequent-sysv4
+ exit 0 ;;
+ i*86:UNIX_SV:4.2MP:2.*)
+ # Unixware is an offshoot of SVR4, but it has its own version
+ # number series starting with 2...
+ # I am not positive that other SVR4 systems won't match this,
+ # I just have to hope. -- rms.
+ # Use sysv4.2uw... so that sysv4* matches it.
+ echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
+ exit 0 ;;
+ i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
+ UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
+ if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
+ echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
+ else
+ echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
+ fi
+ exit 0 ;;
+ i*86:*:5:[78]*)
+ case `/bin/uname -X | grep "^Machine"` in
+ *486*) UNAME_MACHINE=i486 ;;
+ *Pentium) UNAME_MACHINE=i586 ;;
+ *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
+ esac
+ echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
+ exit 0 ;;
+ i*86:*:3.2:*)
+ if test -f /usr/options/cb.name; then
+ UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
+ echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
+ elif /bin/uname -X 2>/dev/null >/dev/null ; then
+ UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
+ (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
+ (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
+ && UNAME_MACHINE=i586
+ (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
+ && UNAME_MACHINE=i686
+ (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
+ && UNAME_MACHINE=i686
+ echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
+ else
+ echo ${UNAME_MACHINE}-pc-sysv32
+ fi
+ exit 0 ;;
+ i*86:*DOS:*:*)
+ echo ${UNAME_MACHINE}-pc-msdosdjgpp
+ exit 0 ;;
+ pc:*:*:*)
+ # Left here for compatibility:
+ # uname -m prints for DJGPP always 'pc', but it prints nothing about
+ # the processor, so we play safe by assuming i386.
+ echo i386-pc-msdosdjgpp
+ exit 0 ;;
+ Intel:Mach:3*:*)
+ echo i386-pc-mach3
+ exit 0 ;;
+ paragon:*:*:*)
+ echo i860-intel-osf1
+ exit 0 ;;
+ i860:*:4.*:*) # i860-SVR4
+ if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
+ echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
+ else # Add other i860-SVR4 vendors below as they are discovered.
+ echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4
+ fi
+ exit 0 ;;
+ mini*:CTIX:SYS*5:*)
+ # "miniframe"
+ echo m68010-convergent-sysv
+ exit 0 ;;
+ M68*:*:R3V[567]*:*)
+ test -r /sysV68 && echo 'm68k-motorola-sysv' && exit 0 ;;
+ 3[34]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0)
+ OS_REL=''
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && echo i486-ncr-sysv4.3${OS_REL} && exit 0
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && echo i586-ncr-sysv4.3${OS_REL} && exit 0 ;;
+ 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && echo i486-ncr-sysv4 && exit 0 ;;
+ m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
+ echo m68k-unknown-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+ mc68030:UNIX_System_V:4.*:*)
+ echo m68k-atari-sysv4
+ exit 0 ;;
+ i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*)
+ echo i386-unknown-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+ TSUNAMI:LynxOS:2.*:*)
+ echo sparc-unknown-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+ rs6000:LynxOS:2.*:*)
+ echo rs6000-unknown-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+ PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.0*:*)
+ echo powerpc-unknown-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+ SM[BE]S:UNIX_SV:*:*)
+ echo mips-dde-sysv${UNAME_RELEASE}
+ exit 0 ;;
+ RM*:ReliantUNIX-*:*:*)
+ echo mips-sni-sysv4
+ exit 0 ;;
+ RM*:SINIX-*:*:*)
+ echo mips-sni-sysv4
+ exit 0 ;;
+ *:SINIX-*:*:*)
+ if uname -p 2>/dev/null >/dev/null ; then
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ echo ${UNAME_MACHINE}-sni-sysv4
+ else
+ echo ns32k-sni-sysv
+ fi
+ exit 0 ;;
+ PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
+ # says <Richard.M.Bartel@ccMail.Census.GOV>
+ echo i586-unisys-sysv4
+ exit 0 ;;
+ *:UNIX_System_V:4*:FTX*)
+ # From Gerald Hewes <hewes@openmarket.com>.
+ # How about differentiating between stratus architectures? -djm
+ echo hppa1.1-stratus-sysv4
+ exit 0 ;;
+ *:*:*:FTX*)
+ # From seanf@swdc.stratus.com.
+ echo i860-stratus-sysv4
+ exit 0 ;;
+ *:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo hppa1.1-stratus-vos
+ exit 0 ;;
+ mc68*:A/UX:*:*)
+ echo m68k-apple-aux${UNAME_RELEASE}
+ exit 0 ;;
+ news*:NEWS-OS:6*:*)
+ echo mips-sony-newsos6
+ exit 0 ;;
+ R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
+ if [ -d /usr/nec ]; then
+ echo mips-nec-sysv${UNAME_RELEASE}
+ else
+ echo mips-unknown-sysv${UNAME_RELEASE}
+ fi
+ exit 0 ;;
+ BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
+ echo powerpc-be-beos
+ exit 0 ;;
+ BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only.
+ echo powerpc-apple-beos
+ exit 0 ;;
+ BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
+ echo i586-pc-beos
+ exit 0 ;;
+ SX-4:SUPER-UX:*:*)
+ echo sx4-nec-superux${UNAME_RELEASE}
+ exit 0 ;;
+ SX-5:SUPER-UX:*:*)
+ echo sx5-nec-superux${UNAME_RELEASE}
+ exit 0 ;;
+ Power*:Rhapsody:*:*)
+ echo powerpc-apple-rhapsody${UNAME_RELEASE}
+ exit 0 ;;
+ *:Rhapsody:*:*)
+ echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
+ exit 0 ;;
+ *:Darwin:*:*)
+ echo `uname -p`-apple-darwin${UNAME_RELEASE}
+ exit 0 ;;
+ *:procnto*:*:* | *:QNX:[0123456789]*:*)
+ UNAME_PROCESSOR=`uname -p`
+ if test "$UNAME_PROCESSOR" = "x86"; then
+ UNAME_PROCESSOR=i386
+ UNAME_MACHINE=pc
+ fi
+ echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
+ exit 0 ;;
+ *:QNX:*:4*)
+ echo i386-pc-qnx
+ exit 0 ;;
+ NSR-[GKLNPTVW]:NONSTOP_KERNEL:*:*)
+ echo nsr-tandem-nsk${UNAME_RELEASE}
+ exit 0 ;;
+ *:NonStop-UX:*:*)
+ echo mips-compaq-nonstopux
+ exit 0 ;;
+ BS2000:POSIX*:*:*)
+ echo bs2000-siemens-sysv
+ exit 0 ;;
+ DS/*:UNIX_System_V:*:*)
+ echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
+ exit 0 ;;
+ *:Plan9:*:*)
+ # "uname -m" is not consistent, so use $cputype instead. 386
+ # is converted to i386 for consistency with other x86
+ # operating systems.
+ if test "$cputype" = "386"; then
+ UNAME_MACHINE=i386
+ else
+ UNAME_MACHINE="$cputype"
+ fi
+ echo ${UNAME_MACHINE}-unknown-plan9
+ exit 0 ;;
+ i*86:OS/2:*:*)
+ # If we were able to find `uname', then EMX Unix compatibility
+ # is probably installed.
+ echo ${UNAME_MACHINE}-pc-os2-emx
+ exit 0 ;;
+ *:TOPS-10:*:*)
+ echo pdp10-unknown-tops10
+ exit 0 ;;
+ *:TENEX:*:*)
+ echo pdp10-unknown-tenex
+ exit 0 ;;
+ KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
+ echo pdp10-dec-tops20
+ exit 0 ;;
+ XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
+ echo pdp10-xkl-tops20
+ exit 0 ;;
+ *:TOPS-20:*:*)
+ echo pdp10-unknown-tops20
+ exit 0 ;;
+ *:ITS:*:*)
+ echo pdp10-unknown-its
+ exit 0 ;;
+ i*86:XTS-300:*:STOP)
+ echo ${UNAME_MACHINE}-unknown-stop
+ exit 0 ;;
+ i*86:atheos:*:*)
+ echo ${UNAME_MACHINE}-unknown-atheos
+ exit 0 ;;
+esac
+
+#echo '(No uname command or uname output not recognized.)' 1>&2
+#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2
+
+eval $set_cc_for_build
+cat >$dummy.c <<EOF
+#ifdef _SEQUENT_
+# include <sys/types.h>
+# include <sys/utsname.h>
+#endif
+main ()
+{
+#if defined (sony)
+#if defined (MIPSEB)
+ /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed,
+ I don't know.... */
+ printf ("mips-sony-bsd\n"); exit (0);
+#else
+#include <sys/param.h>
+ printf ("m68k-sony-newsos%s\n",
+#ifdef NEWSOS4
+ "4"
+#else
+ ""
+#endif
+ ); exit (0);
+#endif
+#endif
+
+#if defined (__arm) && defined (__acorn) && defined (__unix)
+ printf ("arm-acorn-riscix"); exit (0);
+#endif
+
+#if defined (hp300) && !defined (hpux)
+ printf ("m68k-hp-bsd\n"); exit (0);
+#endif
+
+#if defined (NeXT)
+#if !defined (__ARCHITECTURE__)
+#define __ARCHITECTURE__ "m68k"
+#endif
+ int version;
+ version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
+ if (version < 4)
+ printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
+ else
+ printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
+ exit (0);
+#endif
+
+#if defined (MULTIMAX) || defined (n16)
+#if defined (UMAXV)
+ printf ("ns32k-encore-sysv\n"); exit (0);
+#else
+#if defined (CMU)
+ printf ("ns32k-encore-mach\n"); exit (0);
+#else
+ printf ("ns32k-encore-bsd\n"); exit (0);
+#endif
+#endif
+#endif
+
+#if defined (__386BSD__)
+ printf ("i386-pc-bsd\n"); exit (0);
+#endif
+
+#if defined (sequent)
+#if defined (i386)
+ printf ("i386-sequent-dynix\n"); exit (0);
+#endif
+#if defined (ns32000)
+ printf ("ns32k-sequent-dynix\n"); exit (0);
+#endif
+#endif
+
+#if defined (_SEQUENT_)
+ struct utsname un;
+
+ uname(&un);
+
+ if (strncmp(un.version, "V2", 2) == 0) {
+ printf ("i386-sequent-ptx2\n"); exit (0);
+ }
+ if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
+ printf ("i386-sequent-ptx1\n"); exit (0);
+ }
+ printf ("i386-sequent-ptx\n"); exit (0);
+
+#endif
+
+#if defined (vax)
+# if !defined (ultrix)
+# include <sys/param.h>
+# if defined (BSD)
+# if BSD == 43
+ printf ("vax-dec-bsd4.3\n"); exit (0);
+# else
+# if BSD == 199006
+ printf ("vax-dec-bsd4.3reno\n"); exit (0);
+# else
+ printf ("vax-dec-bsd\n"); exit (0);
+# endif
+# endif
+# else
+ printf ("vax-dec-bsd\n"); exit (0);
+# endif
+# else
+ printf ("vax-dec-ultrix\n"); exit (0);
+# endif
+#endif
+
+#if defined (alliant) && defined (i860)
+ printf ("i860-alliant-bsd\n"); exit (0);
+#endif
+
+ exit (1);
+}
+EOF
+
+$CC_FOR_BUILD $dummy.c -o $dummy 2>/dev/null && $dummy && rm -f $dummy.c $dummy && rmdir $tmpdir && exit 0
+rm -f $dummy.c $dummy && rmdir $tmpdir
+
+# Apollos put the system type in the environment.
+
+test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit 0; }
+
+# Convex versions that predate uname can use getsysinfo(1)
+
+if [ -x /usr/convex/getsysinfo ]
+then
+ case `getsysinfo -f cpu_type` in
+ c1*)
+ echo c1-convex-bsd
+ exit 0 ;;
+ c2*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit 0 ;;
+ c34*)
+ echo c34-convex-bsd
+ exit 0 ;;
+ c38*)
+ echo c38-convex-bsd
+ exit 0 ;;
+ c4*)
+ echo c4-convex-bsd
+ exit 0 ;;
+ esac
+fi
+
+cat >&2 <<EOF
+$0: unable to guess system type
+
+This script, last modified $timestamp, has failed to recognize
+the operating system you are using. It is advised that you
+download the most up to date version of the config scripts from
+
+ ftp://ftp.gnu.org/pub/gnu/config/
+
+If the version you run ($0) is already up to date, please
+send the following data and any information you think might be
+pertinent to <config-patches@gnu.org> in order to provide the needed
+information to handle your system.
+
+config.guess timestamp = $timestamp
+
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null`
+
+hostinfo = `(hostinfo) 2>/dev/null`
+/bin/universe = `(/bin/universe) 2>/dev/null`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null`
+/bin/arch = `(/bin/arch) 2>/dev/null`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
+
+UNAME_MACHINE = ${UNAME_MACHINE}
+UNAME_RELEASE = ${UNAME_RELEASE}
+UNAME_SYSTEM = ${UNAME_SYSTEM}
+UNAME_VERSION = ${UNAME_VERSION}
+EOF
+
+exit 1
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/libdb/dist/config.hin b/libdb/dist/config.hin
new file mode 100644
index 0000000..9b65cfa
--- /dev/null
+++ b/libdb/dist/config.hin
@@ -0,0 +1,459 @@
+/* config.hin. Generated from configure.ac by autoheader. */
+
+/* Define to 1 if you want to build a version for running the test suite. */
+#undef CONFIG_TEST
+
+/* Define to 1 if you want a debugging version. */
+#undef DEBUG
+
+/* Define to 1 if you want a version that logs read operations. */
+#undef DEBUG_ROP
+
+/* Define to 1 if you want a version that logs write operations. */
+#undef DEBUG_WOP
+
+/* Define to 1 if you want a version with run-time diagnostic checking. */
+#undef DIAGNOSTIC
+
+/* Define to 1 if you have the `clock_gettime' function. */
+#undef HAVE_CLOCK_GETTIME
+
+/* Define to 1 if Berkeley DB release includes strong cryptography. */
+#undef HAVE_CRYPTO
+
+/* Define to 1 if you have the `directio' function. */
+#undef HAVE_DIRECTIO
+
+/* Define to 1 if you have the <dirent.h> header file, and it defines `DIR'.
+ */
+#undef HAVE_DIRENT_H
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#undef HAVE_DLFCN_H
+
+/* Define to 1 if you have EXIT_SUCCESS/EXIT_FAILURE #defines. */
+#undef HAVE_EXIT_SUCCESS
+
+/* Define to 1 if fcntl/F_SETFD denies child access to file descriptors. */
+#undef HAVE_FCNTL_F_SETFD
+
+/* Define to 1 if allocated filesystem blocks are not zeroed. */
+#undef HAVE_FILESYSTEM_NOTZERO
+
+/* Define to 1 if you have the `getcwd' function. */
+#undef HAVE_GETCWD
+
+/* Define to 1 if you have the `getopt' function. */
+#undef HAVE_GETOPT
+
+/* Define to 1 if you have the `gettimeofday' function. */
+#undef HAVE_GETTIMEOFDAY
+
+/* Define to 1 if you have the `getuid' function. */
+#undef HAVE_GETUID
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#undef HAVE_INTTYPES_H
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+#undef HAVE_LIBNSL
+
+/* Define to 1 if you have the `memcmp' function. */
+#undef HAVE_MEMCMP
+
+/* Define to 1 if you have the `memcpy' function. */
+#undef HAVE_MEMCPY
+
+/* Define to 1 if you have the `memmove' function. */
+#undef HAVE_MEMMOVE
+
+/* Define to 1 if you have the <memory.h> header file. */
+#undef HAVE_MEMORY_H
+
+/* Define to 1 if you have the `mlock' function. */
+#undef HAVE_MLOCK
+
+/* Define to 1 if you have the `mmap' function. */
+#undef HAVE_MMAP
+
+/* Define to 1 if you have the `munlock' function. */
+#undef HAVE_MUNLOCK
+
+/* Define to 1 if you have the `munmap' function. */
+#undef HAVE_MUNMAP
+
+/* Define to 1 to use the GCC compiler and 68K assembly language mutexes. */
+#undef HAVE_MUTEX_68K_GCC_ASSEMBLY
+
+/* Define to 1 to use the AIX _check_lock mutexes. */
+#undef HAVE_MUTEX_AIX_CHECK_LOCK
+
+/* Define to 1 to use the GCC compiler and Alpha assembly language mutexes. */
+#undef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY
+
+/* Define to use the GCC compiler and alpha assembly language mutexes. */
+#undef HAVE_MUTEX_ALPHA_LINUX_ASSEMBLY
+
+/* Define to 1 to use the GCC compiler and ARM assembly language mutexes. */
+#undef HAVE_MUTEX_ARM_GCC_ASSEMBLY
+
+/* Define to 1 to use the UNIX fcntl system call mutexes. */
+#undef HAVE_MUTEX_FCNTL
+
+/* Define to 1 to use the GCC compiler and PaRisc assembly language mutexes.
+ */
+#undef HAVE_MUTEX_HPPA_GCC_ASSEMBLY
+
+/* Define to 1 to use the msem_XXX mutexes on HP-UX. */
+#undef HAVE_MUTEX_HPPA_MSEM_INIT
+
+/* Define to 1 to use the GCC compiler and IA64 assembly language mutexes. */
+#undef HAVE_MUTEX_IA64_GCC_ASSEMBLY
+
+/* Define to 1 to use the GCC compiler and Mips assembly language mutexes. */
+#undef HAVE_MUTEX_MIPS_GCC_ASSEMBLY
+
+/* Define to 1 to use the msem_XXX mutexes on systems other than HP-UX. */
+#undef HAVE_MUTEX_MSEM_INIT
+
+/* Define to 1 to use the GCC compiler and Apple PowerPC assembly language. */
+#undef HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY
+
+/* Define to 1 to use the GCC compiler and generic PowerPC assembly language.
+ */
+#undef HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY
+
+/* Define to 1 to use POSIX 1003.1 pthread_XXX mutexes. */
+#undef HAVE_MUTEX_PTHREADS
+
+/* Define to 1 to use Reliant UNIX initspin mutexes. */
+#undef HAVE_MUTEX_RELIANTUNIX_INITSPIN
+
+/* Define to 1 to use the GCC compiler and S/390 assembly language mutexes. */
+#undef HAVE_MUTEX_S390_GCC_ASSEMBLY
+
+/* Define to use the GCC compiler and s390 assembly language mutexes. */
+#undef HAVE_MUTEX_S390_LINUX_ASSEMBLY
+
+/* Define to 1 to use the SCO compiler and x86 assembly language mutexes. */
+#undef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY
+
+/* Define to 1 to use the obsolete POSIX 1003.1 sema_XXX mutexes. */
+#undef HAVE_MUTEX_SEMA_INIT
+
+/* Define to 1 to use the SGI XXX_lock mutexes. */
+#undef HAVE_MUTEX_SGI_INIT_LOCK
+
+/* Define to 1 to use the Solaris _lock_XXX mutexes. */
+#undef HAVE_MUTEX_SOLARIS_LOCK_TRY
+
+/* Define to 1 to use the Solaris lwp threads mutexes. */
+#undef HAVE_MUTEX_SOLARIS_LWP
+
+/* Define to use the GCC compiler and sparc64 assembly language mutexes. */
+#undef HAVE_MUTEX_SPARC32_LINUX_ASSEMBLY
+
+/* Define to use the GCC compiler and sparc64 assembly language mutexes. */
+#undef HAVE_MUTEX_SPARC64_LINUX_ASSEMBLY
+
+/* Define to 1 to use the GCC compiler and Sparc assembly language mutexes. */
+#undef HAVE_MUTEX_SPARC_GCC_ASSEMBLY
+
+/* Define to 1 if mutexes hold system resources. */
+#undef HAVE_MUTEX_SYSTEM_RESOURCES
+
+/* Define to 1 if fast mutexes are available. */
+#undef HAVE_MUTEX_THREADS
+
+/* Define to 1 to configure mutexes intra-process only. */
+#undef HAVE_MUTEX_THREAD_ONLY
+
+/* Define to 1 to use the UNIX International mutexes. */
+#undef HAVE_MUTEX_UI_THREADS
+
+/* Define to 1 to use the UTS compiler and assembly language mutexes. */
+#undef HAVE_MUTEX_UTS_CC_ASSEMBLY
+
+/* Define to 1 to use VMS mutexes. */
+#undef HAVE_MUTEX_VMS
+
+/* Define to 1 to use VxWorks mutexes. */
+#undef HAVE_MUTEX_VXWORKS
+
+/* Define to 1 to use Windows mutexes. */
+#undef HAVE_MUTEX_WIN32
+
+/* Define to 1 to use the GCC compiler and amd64 assembly language mutexes. */
+#undef HAVE_MUTEX_X86_64_GCC_ASSEMBLY
+
+/* Define to 1 to use the GCC compiler and x86 assembly language mutexes. */
+#undef HAVE_MUTEX_X86_GCC_ASSEMBLY
+
+/* Define to 1 if you have the <ndir.h> header file, and it defines `DIR'. */
+#undef HAVE_NDIR_H
+
+/* Define to 1 if you have the O_DIRECT flag. */
+#undef HAVE_O_DIRECT
+
+/* Define to 1 if you have the `pread' function. */
+#undef HAVE_PREAD
+
+/* Define to 1 if you have the `pstat_getdynamic' function. */
+#undef HAVE_PSTAT_GETDYNAMIC
+
+/* Define to 1 if you have the `pwrite' function. */
+#undef HAVE_PWRITE
+
+/* Define to 1 if building on QNX. */
+#undef HAVE_QNX
+
+/* Define to 1 if you have the `qsort' function. */
+#undef HAVE_QSORT
+
+/* Define to 1 if you have the `raise' function. */
+#undef HAVE_RAISE
+
+/* Define to 1 if building RPC client/server. */
+#undef HAVE_RPC
+
+/* Define to 1 if you have the `sched_yield' function. */
+#undef HAVE_SCHED_YIELD
+
+/* Define to 1 if you have the `select' function. */
+#undef HAVE_SELECT
+
+/* Define to 1 if you have the `shmget' function. */
+#undef HAVE_SHMGET
+
+/* Define to 1 if you have the `snprintf' function. */
+#undef HAVE_SNPRINTF
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#undef HAVE_STDINT_H
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#undef HAVE_STDLIB_H
+
+/* Define to 1 if you have the `strcasecmp' function. */
+#undef HAVE_STRCASECMP
+
+/* Define to 1 if you have the `strdup' function. */
+#undef HAVE_STRDUP
+
+/* Define to 1 if you have the `strerror' function. */
+#undef HAVE_STRERROR
+
+/* Define to 1 if you have the <strings.h> header file. */
+#undef HAVE_STRINGS_H
+
+/* Define to 1 if you have the <string.h> header file. */
+#undef HAVE_STRING_H
+
+/* Define to 1 if you have the `strtoul' function. */
+#undef HAVE_STRTOUL
+
+/* Define to 1 if `st_blksize' is member of `struct stat'. */
+#undef HAVE_STRUCT_STAT_ST_BLKSIZE
+
+/* Define to 1 if you have the `sysconf' function. */
+#undef HAVE_SYSCONF
+
+/* Define to 1 if you have the <sys/dir.h> header file, and it defines `DIR'.
+ */
+#undef HAVE_SYS_DIR_H
+
+/* Define to 1 if you have the <sys/ndir.h> header file, and it defines `DIR'.
+ */
+#undef HAVE_SYS_NDIR_H
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+#undef HAVE_SYS_SELECT_H
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#undef HAVE_SYS_STAT_H
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#undef HAVE_SYS_TIME_H
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#undef HAVE_SYS_TYPES_H
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#undef HAVE_UNISTD_H
+
+/* Define to 1 if unlink of file with open file descriptors will fail. */
+#undef HAVE_UNLINK_WITH_OPEN_FAILURE
+
+/* Define to 1 if you have the `vsnprintf' function. */
+#undef HAVE_VSNPRINTF
+
+/* Define to 1 if building VxWorks. */
+#undef HAVE_VXWORKS
+
+/* Define to 1 if you have the `yield' function. */
+#undef HAVE_YIELD
+
+/* Define to 1 if you have the `_fstati64' function. */
+#undef HAVE__FSTATI64
+
+/* Define to the address where bug reports for this package should be sent. */
+#undef PACKAGE_BUGREPORT
+
+/* Define to the full name of this package. */
+#undef PACKAGE_NAME
+
+/* Define to the full name and version of this package. */
+#undef PACKAGE_STRING
+
+/* Define to the one symbol short name of this package. */
+#undef PACKAGE_TARNAME
+
+/* Define to the version of this package. */
+#undef PACKAGE_VERSION
+
+/* The size of a `char', as computed by sizeof. */
+#undef SIZEOF_CHAR
+
+/* The size of a `char *', as computed by sizeof. */
+#undef SIZEOF_CHAR_P
+
+/* The size of a `int', as computed by sizeof. */
+#undef SIZEOF_INT
+
+/* The size of a `long', as computed by sizeof. */
+#undef SIZEOF_LONG
+
+/* The size of a `short', as computed by sizeof. */
+#undef SIZEOF_SHORT
+
+/* The size of a `size_t', as computed by sizeof. */
+#undef SIZEOF_SIZE_T
+
+/* The size of a `unsigned char', as computed by sizeof. */
+#undef SIZEOF_UNSIGNED_CHAR
+
+/* The size of a `unsigned int', as computed by sizeof. */
+#undef SIZEOF_UNSIGNED_INT
+
+/* The size of a `unsigned long', as computed by sizeof. */
+#undef SIZEOF_UNSIGNED_LONG
+
+/* The size of a `unsigned short', as computed by sizeof. */
+#undef SIZEOF_UNSIGNED_SHORT
+
+/* Define to 1 if the `S_IS*' macros in <sys/stat.h> do not work properly. */
+#undef STAT_MACROS_BROKEN
+
+/* Define to 1 if you have the ANSI C header files. */
+#undef STDC_HEADERS
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#undef TIME_WITH_SYS_TIME
+
+/* Define to 1 to mask harmless unitialized memory read/writes. */
+#undef UMRW
+
+/* Define to 1 to if building for Win32. */
+#undef DB_WIN32
+
+/* Number of bits in a file offset, on hosts where this is settable. */
+#undef _FILE_OFFSET_BITS
+
+/* Define for large files, on AIX-style hosts. */
+#undef _LARGE_FILES
+
+/* Define to empty if `const' does not conform to ANSI C. */
+#undef const
+
+/*
+ * Exit success/failure macros.
+ */
+#ifndef HAVE_EXIT_SUCCESS
+#define EXIT_FAILURE 1
+#define EXIT_SUCCESS 0
+#endif
+
+/*
+ * Don't step on the namespace. Other libraries may have their own
+ * implementations of these functions, we don't want to use their
+ * implementations or force them to use ours based on the load order.
+ */
+#ifndef HAVE_GETCWD
+#define getcwd __db_Cgetcwd
+#endif
+#ifndef HAVE_MEMCMP
+#define memcmp __db_Cmemcmp
+#endif
+#ifndef HAVE_MEMCPY
+#define memcpy __db_Cmemcpy
+#endif
+#ifndef HAVE_MEMMOVE
+#define memmove __db_Cmemmove
+#endif
+#ifndef HAVE_RAISE
+#define raise __db_Craise
+#endif
+#ifndef HAVE_SNPRINTF
+#define snprintf __db_Csnprintf
+#endif
+#ifndef HAVE_STRCASECMP
+#define strcasecmp __db_Cstrcasecmp
+#define strncasecmp __db_Cstrncasecmp
+#endif
+#ifndef HAVE_STRERROR
+#define strerror __db_Cstrerror
+#endif
+#ifndef HAVE_VSNPRINTF
+#define vsnprintf __db_Cvsnprintf
+#endif
+
+#ifdef DB_WIN32
+
+/* This part copied from build_win32/db_config.h */
+
+/*
+ * XXX
+ * The following is not part of the automatic configuration setup, but
+ * provides the information necessary to build Berkeley DB on Windows.
+ */
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <direct.h>
+#include <fcntl.h>
+#include <io.h>
+#include <limits.h>
+#include <memory.h>
+#include <process.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <errno.h>
+
+/*
+ * To build Tcl interface libraries, the include path must be configured to
+ * use the directory containing <tcl.h>, usually the include directory in
+ * the Tcl distribution.
+ */
+#ifdef DB_TCL_SUPPORT
+#include <tcl.h>
+#endif
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+/*
+ * All of the necessary includes have been included, ignore the #includes
+ * in the Berkeley DB source files.
+ */
+#define NO_SYSTEM_INCLUDES
+
+/*
+ * Win32 has getcwd, snprintf and vsnprintf, but under different names.
+ */
+#define getcwd(buf, size) _getcwd(buf, size)
+#define snprintf _snprintf
+#define vsnprintf _vsnprintf
+
+#endif /* DB_WIN32 */
diff --git a/libdb/dist/config.sub b/libdb/dist/config.sub
new file mode 100755
index 0000000..2033c30
--- /dev/null
+++ b/libdb/dist/config.sub
@@ -0,0 +1,1460 @@
+#! /bin/sh
+# Configuration validation subroutine script.
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+# 2000, 2001, 2002 Free Software Foundation, Inc.
+
+timestamp='2002-07-03'
+
+# This file is (in principle) common to ALL GNU software.
+# The presence of a machine in this file suggests that SOME GNU software
+# can handle that machine. It does not imply ALL GNU software can.
+#
+# This file is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Please send patches to <config-patches@gnu.org>. Submit a context
+# diff and a properly formatted ChangeLog entry.
+#
+# Configuration subroutine to validate and canonicalize a configuration type.
+# Supply the specified configuration type as an argument.
+# If it is invalid, we print an error message on stderr and exit with code 1.
+# Otherwise, we print the canonical config type on stdout and succeed.
+
+# This file is supposed to be the same for all GNU packages
+# and recognize all the CPU types, system types and aliases
+# that are meaningful with *any* GNU software.
+# Each package is responsible for reporting which valid configurations
+# it does not support. The user should be able to distinguish
+# a failure to support a valid configuration from a meaningless
+# configuration.
+
+# The goal of this file is to map all the various variations of a given
+# machine specification into a single specification in the form:
+# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
+# or in some cases, the newer four-part form:
+# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
+# It is wrong to echo any other type of specification.
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION] CPU-MFR-OPSYS
+ $0 [OPTION] ALIAS
+
+Canonicalize a configuration name.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.sub ($timestamp)
+
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001
+Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit 0 ;;
+ --version | -v )
+ echo "$version" ; exit 0 ;;
+ --help | --h* | -h )
+ echo "$usage"; exit 0 ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help"
+ exit 1 ;;
+
+ *local*)
+ # First pass through any local machine types.
+ echo $1
+ exit 0;;
+
+ * )
+ break ;;
+ esac
+done
+
+case $# in
+ 0) echo "$me: missing argument$help" >&2
+ exit 1;;
+ 1) ;;
+ *) echo "$me: too many arguments$help" >&2
+ exit 1;;
+esac
+
+# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
+# Here we must recognize all the valid KERNEL-OS combinations.
+maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
+case $maybe_os in
+ nto-qnx* | linux-gnu* | freebsd*-gnu* | storm-chaos* | os2-emx* | windows32-* | rtmk-nova*)
+ os=-$maybe_os
+ basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
+ ;;
+ *)
+ basic_machine=`echo $1 | sed 's/-[^-]*$//'`
+ if [ $basic_machine != $1 ]
+ then os=`echo $1 | sed 's/.*-/-/'`
+ else os=; fi
+ ;;
+esac
+
+### Let's recognize common machines as not being operating systems so
+### that things like config.sub decstation-3100 work. We also
+### recognize some manufacturers as not being operating systems, so we
+### can provide default operating systems below.
+case $os in
+ -sun*os*)
+ # Prevent following clause from handling this invalid input.
+ ;;
+ -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
+ -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
+ -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
+ -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
+ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
+ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
+ -apple | -axis)
+ os=
+ basic_machine=$1
+ ;;
+ -sim | -cisco | -oki | -wec | -winbond)
+ os=
+ basic_machine=$1
+ ;;
+ -scout)
+ ;;
+ -wrs)
+ os=-vxworks
+ basic_machine=$1
+ ;;
+ -chorusos*)
+ os=-chorusos
+ basic_machine=$1
+ ;;
+ -chorusrdb)
+ os=-chorusrdb
+ basic_machine=$1
+ ;;
+ -hiux*)
+ os=-hiuxwe2
+ ;;
+ -sco5)
+ os=-sco3.2v5
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco4)
+ os=-sco3.2v4
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2.[4-9]*)
+ os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2v[4-9]*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco*)
+ os=-sco3.2v2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -udk*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -isc)
+ os=-isc2.2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -clix*)
+ basic_machine=clipper-intergraph
+ ;;
+ -isc*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -lynx*)
+ os=-lynxos
+ ;;
+ -ptx*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
+ ;;
+ -windowsnt*)
+ os=`echo $os | sed -e 's/windowsnt/winnt/'`
+ ;;
+ -psos*)
+ os=-psos
+ ;;
+ -mint | -mint[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+esac
+
+# Decode aliases for certain CPU-COMPANY combinations.
+case $basic_machine in
+ # Recognize the basic CPU types without company name.
+ # Some are omitted here because they have special meanings below.
+ 1750a | 580 \
+ | a29k \
+ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
+ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
+ | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr \
+ | c4x | clipper \
+ | d10v | d30v | dlx | dsp16xx \
+ | fr30 | frv \
+ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
+ | i370 | i860 | i960 | ia64 \
+ | ip2k \
+ | m32r | m68000 | m68k | m88k | mcore \
+ | mips | mipsbe | mipseb | mipsel | mipsle \
+ | mips16 \
+ | mips64 | mips64el \
+ | mips64orion | mips64orionel \
+ | mips64vr4100 | mips64vr4100el \
+ | mips64vr4300 | mips64vr4300el \
+ | mips64vr5000 | mips64vr5000el \
+ | mipsisa32 | mipsisa32el \
+ | mipsisa64 | mipsisa64el \
+ | mipsisa64sb1 | mipsisa64sb1el \
+ | mipstx39 | mipstx39el \
+ | mn10200 | mn10300 \
+ | ns16k | ns32k \
+ | openrisc | or32 \
+ | pdp10 | pdp11 | pj | pjl \
+ | powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \
+ | pyramid \
+ | sh | sh[1234] | sh3e | sh[34]eb | shbe | shle | sh[1234]le | sh3ele \
+ | sh64 | sh64le \
+ | sparc | sparc64 | sparc86x | sparclet | sparclite | sparcv9 | sparcv9b \
+ | strongarm \
+ | tahoe | thumb | tic80 | tron \
+ | v850 | v850e \
+ | we32k \
+ | x86 | xscale | xstormy16 | xtensa \
+ | z8k)
+ basic_machine=$basic_machine-unknown
+ ;;
+ m6811 | m68hc11 | m6812 | m68hc12)
+ # Motorola 68HC11/12.
+ basic_machine=$basic_machine-unknown
+ os=-none
+ ;;
+ m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
+ ;;
+
+ # We use `pc' rather than `unknown'
+ # because (1) that's what they normally are, and
+ # (2) the word "unknown" tends to confuse beginning users.
+ i*86 | x86_64)
+ basic_machine=$basic_machine-pc
+ ;;
+ # Object if more than one company name word.
+ *-*-*)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+ # Recognize the basic CPU types with company name.
+ 580-* \
+ | a29k-* \
+ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
+ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
+ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
+ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \
+ | avr-* \
+ | bs2000-* \
+ | c[123]* | c30-* | [cjt]90-* | c54x-* \
+ | clipper-* | cydra-* \
+ | d10v-* | d30v-* | dlx-* \
+ | elxsi-* \
+ | f30[01]-* | f700-* | fr30-* | frv-* | fx80-* \
+ | h8300-* | h8500-* \
+ | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
+ | i*86-* | i860-* | i960-* | ia64-* \
+ | ip2k-* \
+ | m32r-* \
+ | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
+ | m88110-* | m88k-* | mcore-* \
+ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
+ | mips16-* \
+ | mips64-* | mips64el-* \
+ | mips64orion-* | mips64orionel-* \
+ | mips64vr4100-* | mips64vr4100el-* \
+ | mips64vr4300-* | mips64vr4300el-* \
+ | mips64vr5000-* | mips64vr5000el-* \
+ | mipsisa32-* | mipsisa32el-* \
+ | mipsisa64-* | mipsisa64el-* \
+ | mipsisa64sb1-* | mipsisa64sb1el-* \
+ | mipstx39 | mipstx39el \
+ | none-* | np1-* | ns16k-* | ns32k-* \
+ | orion-* \
+ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
+ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \
+ | pyramid-* \
+ | romp-* | rs6000-* \
+ | sh-* | sh[1234]-* | sh3e-* | sh[34]eb-* | shbe-* \
+ | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
+ | sparc-* | sparc64-* | sparc86x-* | sparclet-* | sparclite-* \
+ | sparcv9-* | sparcv9b-* | strongarm-* | sv1-* | sx?-* \
+ | tahoe-* | thumb-* | tic30-* | tic54x-* | tic80-* | tron-* \
+ | v850-* | v850e-* | vax-* \
+ | we32k-* \
+ | x86-* | x86_64-* | xps100-* | xscale-* | xstormy16-* \
+ | xtensa-* \
+ | ymp-* \
+ | z8k-*)
+ ;;
+ # Recognize the various machine names and aliases which stand
+ # for a CPU type and a company and sometimes even an OS.
+ 386bsd)
+ basic_machine=i386-unknown
+ os=-bsd
+ ;;
+ 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
+ basic_machine=m68000-att
+ ;;
+ 3b*)
+ basic_machine=we32k-att
+ ;;
+ a29khif)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ adobe68k)
+ basic_machine=m68010-adobe
+ os=-scout
+ ;;
+ alliant | fx80)
+ basic_machine=fx80-alliant
+ ;;
+ altos | altos3068)
+ basic_machine=m68k-altos
+ ;;
+ am29k)
+ basic_machine=a29k-none
+ os=-bsd
+ ;;
+ amdahl)
+ basic_machine=580-amdahl
+ os=-sysv
+ ;;
+ amiga | amiga-*)
+ basic_machine=m68k-unknown
+ ;;
+ amigaos | amigados)
+ basic_machine=m68k-unknown
+ os=-amigaos
+ ;;
+ amigaunix | amix)
+ basic_machine=m68k-unknown
+ os=-sysv4
+ ;;
+ apollo68)
+ basic_machine=m68k-apollo
+ os=-sysv
+ ;;
+ apollo68bsd)
+ basic_machine=m68k-apollo
+ os=-bsd
+ ;;
+ aux)
+ basic_machine=m68k-apple
+ os=-aux
+ ;;
+ balance)
+ basic_machine=ns32k-sequent
+ os=-dynix
+ ;;
+ c90)
+ basic_machine=c90-cray
+ os=-unicos
+ ;;
+ convex-c1)
+ basic_machine=c1-convex
+ os=-bsd
+ ;;
+ convex-c2)
+ basic_machine=c2-convex
+ os=-bsd
+ ;;
+ convex-c32)
+ basic_machine=c32-convex
+ os=-bsd
+ ;;
+ convex-c34)
+ basic_machine=c34-convex
+ os=-bsd
+ ;;
+ convex-c38)
+ basic_machine=c38-convex
+ os=-bsd
+ ;;
+ cray | j90)
+ basic_machine=j90-cray
+ os=-unicos
+ ;;
+ crds | unos)
+ basic_machine=m68k-crds
+ ;;
+ cris | cris-* | etrax*)
+ basic_machine=cris-axis
+ ;;
+ da30 | da30-*)
+ basic_machine=m68k-da30
+ ;;
+ decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
+ basic_machine=mips-dec
+ ;;
+ decsystem10* | dec10*)
+ basic_machine=pdp10-dec
+ os=-tops10
+ ;;
+ decsystem20* | dec20*)
+ basic_machine=pdp10-dec
+ os=-tops20
+ ;;
+ delta | 3300 | motorola-3300 | motorola-delta \
+ | 3300-motorola | delta-motorola)
+ basic_machine=m68k-motorola
+ ;;
+ delta88)
+ basic_machine=m88k-motorola
+ os=-sysv3
+ ;;
+ dpx20 | dpx20-*)
+ basic_machine=rs6000-bull
+ os=-bosx
+ ;;
+ dpx2* | dpx2*-bull)
+ basic_machine=m68k-bull
+ os=-sysv3
+ ;;
+ ebmon29k)
+ basic_machine=a29k-amd
+ os=-ebmon
+ ;;
+ elxsi)
+ basic_machine=elxsi-elxsi
+ os=-bsd
+ ;;
+ encore | umax | mmax)
+ basic_machine=ns32k-encore
+ ;;
+ es1800 | OSE68k | ose68k | ose | OSE)
+ basic_machine=m68k-ericsson
+ os=-ose
+ ;;
+ fx2800)
+ basic_machine=i860-alliant
+ ;;
+ genix)
+ basic_machine=ns32k-ns
+ ;;
+ gmicro)
+ basic_machine=tron-gmicro
+ os=-sysv
+ ;;
+ go32)
+ basic_machine=i386-pc
+ os=-go32
+ ;;
+ h3050r* | hiux*)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ h8300hms)
+ basic_machine=h8300-hitachi
+ os=-hms
+ ;;
+ h8300xray)
+ basic_machine=h8300-hitachi
+ os=-xray
+ ;;
+ h8500hms)
+ basic_machine=h8500-hitachi
+ os=-hms
+ ;;
+ harris)
+ basic_machine=m88k-harris
+ os=-sysv3
+ ;;
+ hp300-*)
+ basic_machine=m68k-hp
+ ;;
+ hp300bsd)
+ basic_machine=m68k-hp
+ os=-bsd
+ ;;
+ hp300hpux)
+ basic_machine=m68k-hp
+ os=-hpux
+ ;;
+ hp3k9[0-9][0-9] | hp9[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k2[0-9][0-9] | hp9k31[0-9])
+ basic_machine=m68000-hp
+ ;;
+ hp9k3[2-9][0-9])
+ basic_machine=m68k-hp
+ ;;
+ hp9k6[0-9][0-9] | hp6[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k7[0-79][0-9] | hp7[0-79][0-9])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k78[0-9] | hp78[0-9])
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][13679] | hp8[0-9][13679])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][0-9] | hp8[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hppa-next)
+ os=-nextstep3
+ ;;
+ hppaosf)
+ basic_machine=hppa1.1-hp
+ os=-osf
+ ;;
+ hppro)
+ basic_machine=hppa1.1-hp
+ os=-proelf
+ ;;
+ i370-ibm* | ibm*)
+ basic_machine=i370-ibm
+ ;;
+# I'm not sure what "Sysv32" means. Should this be sysv3.2?
+ i*86v32)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv32
+ ;;
+ i*86v4*)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv4
+ ;;
+ i*86v)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv
+ ;;
+ i*86sol2)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-solaris2
+ ;;
+ i386mach)
+ basic_machine=i386-mach
+ os=-mach
+ ;;
+ i386-vsta | vsta)
+ basic_machine=i386-unknown
+ os=-vsta
+ ;;
+ iris | iris4d)
+ basic_machine=mips-sgi
+ case $os in
+ -irix*)
+ ;;
+ *)
+ os=-irix4
+ ;;
+ esac
+ ;;
+ isi68 | isi)
+ basic_machine=m68k-isi
+ os=-sysv
+ ;;
+ m88k-omron*)
+ basic_machine=m88k-omron
+ ;;
+ magnum | m3230)
+ basic_machine=mips-mips
+ os=-sysv
+ ;;
+ merlin)
+ basic_machine=ns32k-utek
+ os=-sysv
+ ;;
+ mingw32)
+ basic_machine=i386-pc
+ os=-mingw32
+ ;;
+ miniframe)
+ basic_machine=m68000-convergent
+ ;;
+ *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+ mips3*-*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
+ ;;
+ mips3*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
+ ;;
+ mmix*)
+ basic_machine=mmix-knuth
+ os=-mmixware
+ ;;
+ monitor)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ morphos)
+ basic_machine=powerpc-unknown
+ os=-morphos
+ ;;
+ msdos)
+ basic_machine=i386-pc
+ os=-msdos
+ ;;
+ mvs)
+ basic_machine=i370-ibm
+ os=-mvs
+ ;;
+ ncr3000)
+ basic_machine=i486-ncr
+ os=-sysv4
+ ;;
+ netbsd386)
+ basic_machine=i386-unknown
+ os=-netbsd
+ ;;
+ netwinder)
+ basic_machine=armv4l-rebel
+ os=-linux
+ ;;
+ news | news700 | news800 | news900)
+ basic_machine=m68k-sony
+ os=-newsos
+ ;;
+ news1000)
+ basic_machine=m68030-sony
+ os=-newsos
+ ;;
+ news-3600 | risc-news)
+ basic_machine=mips-sony
+ os=-newsos
+ ;;
+ necv70)
+ basic_machine=v70-nec
+ os=-sysv
+ ;;
+ next | m*-next )
+ basic_machine=m68k-next
+ case $os in
+ -nextstep* )
+ ;;
+ -ns2*)
+ os=-nextstep2
+ ;;
+ *)
+ os=-nextstep3
+ ;;
+ esac
+ ;;
+ nh3000)
+ basic_machine=m68k-harris
+ os=-cxux
+ ;;
+ nh[45]000)
+ basic_machine=m88k-harris
+ os=-cxux
+ ;;
+ nindy960)
+ basic_machine=i960-intel
+ os=-nindy
+ ;;
+ mon960)
+ basic_machine=i960-intel
+ os=-mon960
+ ;;
+ nonstopux)
+ basic_machine=mips-compaq
+ os=-nonstopux
+ ;;
+ np1)
+ basic_machine=np1-gould
+ ;;
+ nsr-tandem)
+ basic_machine=nsr-tandem
+ ;;
+ op50n-* | op60c-*)
+ basic_machine=hppa1.1-oki
+ os=-proelf
+ ;;
+ or32 | or32-*)
+ basic_machine=or32-unknown
+ os=-coff
+ ;;
+ OSE68000 | ose68000)
+ basic_machine=m68000-ericsson
+ os=-ose
+ ;;
+ os68k)
+ basic_machine=m68k-none
+ os=-os68k
+ ;;
+ pa-hitachi)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ paragon)
+ basic_machine=i860-intel
+ os=-osf
+ ;;
+ pbd)
+ basic_machine=sparc-tti
+ ;;
+ pbb)
+ basic_machine=m68k-tti
+ ;;
+ pc532 | pc532-*)
+ basic_machine=ns32k-pc532
+ ;;
+ pentium | p5 | k5 | k6 | nexgen | viac3)
+ basic_machine=i586-pc
+ ;;
+ pentiumpro | p6 | 6x86 | athlon)
+ basic_machine=i686-pc
+ ;;
+ pentiumii | pentium2)
+ basic_machine=i686-pc
+ ;;
+ pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
+ basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumpro-* | p6-* | 6x86-* | athlon-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumii-* | pentium2-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pn)
+ basic_machine=pn-gould
+ ;;
+ power) basic_machine=power-ibm
+ ;;
+ ppc) basic_machine=powerpc-unknown
+ ;;
+ ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppcle | powerpclittle | ppc-le | powerpc-little)
+ basic_machine=powerpcle-unknown
+ ;;
+ ppcle-* | powerpclittle-*)
+ basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppc64) basic_machine=powerpc64-unknown
+ ;;
+ ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppc64le | powerpc64little | ppc64-le | powerpc64-little)
+ basic_machine=powerpc64le-unknown
+ ;;
+ ppc64le-* | powerpc64little-*)
+ basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ps2)
+ basic_machine=i386-ibm
+ ;;
+ pw32)
+ basic_machine=i586-unknown
+ os=-pw32
+ ;;
+ rom68k)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ rm[46]00)
+ basic_machine=mips-siemens
+ ;;
+ rtpc | rtpc-*)
+ basic_machine=romp-ibm
+ ;;
+ s390 | s390-*)
+ basic_machine=s390-ibm
+ ;;
+ s390x | s390x-*)
+ basic_machine=s390x-ibm
+ ;;
+ sa29200)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ sequent)
+ basic_machine=i386-sequent
+ ;;
+ sh)
+ basic_machine=sh-hitachi
+ os=-hms
+ ;;
+ sparclite-wrs | simso-wrs)
+ basic_machine=sparclite-wrs
+ os=-vxworks
+ ;;
+ sps7)
+ basic_machine=m68k-bull
+ os=-sysv2
+ ;;
+ spur)
+ basic_machine=spur-unknown
+ ;;
+ st2000)
+ basic_machine=m68k-tandem
+ ;;
+ stratus)
+ basic_machine=i860-stratus
+ os=-sysv4
+ ;;
+ sun2)
+ basic_machine=m68000-sun
+ ;;
+ sun2os3)
+ basic_machine=m68000-sun
+ os=-sunos3
+ ;;
+ sun2os4)
+ basic_machine=m68000-sun
+ os=-sunos4
+ ;;
+ sun3os3)
+ basic_machine=m68k-sun
+ os=-sunos3
+ ;;
+ sun3os4)
+ basic_machine=m68k-sun
+ os=-sunos4
+ ;;
+ sun4os3)
+ basic_machine=sparc-sun
+ os=-sunos3
+ ;;
+ sun4os4)
+ basic_machine=sparc-sun
+ os=-sunos4
+ ;;
+ sun4sol2)
+ basic_machine=sparc-sun
+ os=-solaris2
+ ;;
+ sun3 | sun3-*)
+ basic_machine=m68k-sun
+ ;;
+ sun4)
+ basic_machine=sparc-sun
+ ;;
+ sun386 | sun386i | roadrunner)
+ basic_machine=i386-sun
+ ;;
+ sv1)
+ basic_machine=sv1-cray
+ os=-unicos
+ ;;
+ symmetry)
+ basic_machine=i386-sequent
+ os=-dynix
+ ;;
+ t3d)
+ basic_machine=alpha-cray
+ os=-unicos
+ ;;
+ t3e)
+ basic_machine=alphaev5-cray
+ os=-unicos
+ ;;
+ t90)
+ basic_machine=t90-cray
+ os=-unicos
+ ;;
+ tic54x | c54x*)
+ basic_machine=tic54x-unknown
+ os=-coff
+ ;;
+ tx39)
+ basic_machine=mipstx39-unknown
+ ;;
+ tx39el)
+ basic_machine=mipstx39el-unknown
+ ;;
+ toad1)
+ basic_machine=pdp10-xkl
+ os=-tops20
+ ;;
+ tower | tower-32)
+ basic_machine=m68k-ncr
+ ;;
+ udi29k)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ ultra3)
+ basic_machine=a29k-nyu
+ os=-sym1
+ ;;
+ v810 | necv810)
+ basic_machine=v810-nec
+ os=-none
+ ;;
+ vaxv)
+ basic_machine=vax-dec
+ os=-sysv
+ ;;
+ vms)
+ basic_machine=vax-dec
+ os=-vms
+ ;;
+ vpp*|vx|vx-*)
+ basic_machine=f301-fujitsu
+ ;;
+ vxworks960)
+ basic_machine=i960-wrs
+ os=-vxworks
+ ;;
+ vxworks68)
+ basic_machine=m68k-wrs
+ os=-vxworks
+ ;;
+ vxworks29k)
+ basic_machine=a29k-wrs
+ os=-vxworks
+ ;;
+ w65*)
+ basic_machine=w65-wdc
+ os=-none
+ ;;
+ w89k-*)
+ basic_machine=hppa1.1-winbond
+ os=-proelf
+ ;;
+ windows32)
+ basic_machine=i386-pc
+ os=-windows32-msvcrt
+ ;;
+ xps | xps100)
+ basic_machine=xps100-honeywell
+ ;;
+ ymp)
+ basic_machine=ymp-cray
+ os=-unicos
+ ;;
+ z8k-*-coff)
+ basic_machine=z8k-unknown
+ os=-sim
+ ;;
+ none)
+ basic_machine=none-none
+ os=-none
+ ;;
+
+# Here we handle the default manufacturer of certain CPU types. It is in
+# some cases the only manufacturer, in others, it is the most popular.
+ w89k)
+ basic_machine=hppa1.1-winbond
+ ;;
+ op50n)
+ basic_machine=hppa1.1-oki
+ ;;
+ op60c)
+ basic_machine=hppa1.1-oki
+ ;;
+ romp)
+ basic_machine=romp-ibm
+ ;;
+ rs6000)
+ basic_machine=rs6000-ibm
+ ;;
+ vax)
+ basic_machine=vax-dec
+ ;;
+ pdp10)
+ # there are many clones, so DEC is not a safe bet
+ basic_machine=pdp10-unknown
+ ;;
+ pdp11)
+ basic_machine=pdp11-dec
+ ;;
+ we32k)
+ basic_machine=we32k-att
+ ;;
+ sh3 | sh4 | sh3eb | sh4eb | sh[1234]le | sh3ele)
+ basic_machine=sh-unknown
+ ;;
+ sh64)
+ basic_machine=sh64-unknown
+ ;;
+ sparc | sparcv9 | sparcv9b)
+ basic_machine=sparc-sun
+ ;;
+ cydra)
+ basic_machine=cydra-cydrome
+ ;;
+ orion)
+ basic_machine=orion-highlevel
+ ;;
+ orion105)
+ basic_machine=clipper-highlevel
+ ;;
+ mac | mpw | mac-mpw)
+ basic_machine=m68k-apple
+ ;;
+ pmac | pmac-mpw)
+ basic_machine=powerpc-apple
+ ;;
+ c4x*)
+ basic_machine=c4x-none
+ os=-coff
+ ;;
+ *-unknown)
+ # Make sure to match an already-canonicalized machine name.
+ ;;
+ *)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+
+# Here we canonicalize certain aliases for manufacturers.
+case $basic_machine in
+ *-digital*)
+ basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
+ ;;
+ *-commodore*)
+ basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
+ ;;
+ *)
+ ;;
+esac
+
+# Decode manufacturer-specific aliases for certain operating systems.
+
+if [ x"$os" != x"" ]
+then
+case $os in
+ # First match some system type aliases
+ # that might get confused with valid system types.
+ # -solaris* is a basic system type, with this one exception.
+ -solaris1 | -solaris1.*)
+ os=`echo $os | sed -e 's|solaris1|sunos4|'`
+ ;;
+ -solaris)
+ os=-solaris2
+ ;;
+ -svr4*)
+ os=-sysv4
+ ;;
+ -unixware*)
+ os=-sysv4.2uw
+ ;;
+ -gnu/linux*)
+ os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
+ ;;
+ # First accept the basic system types.
+ # The portable systems comes first.
+ # Each alternative MUST END IN A *, to match a version number.
+ # -sysv* is not here because it comes later, after sysvr4.
+ -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
+ | -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\
+ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \
+ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
+ | -aos* \
+ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
+ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
+ | -hiux* | -386bsd* | -netbsd* | -openbsd* | -freebsd* | -riscix* \
+ | -lynxos* | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
+ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
+ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
+ | -chorusos* | -chorusrdb* \
+ | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
+ | -mingw32* | -linux-gnu* | -uxpv* | -beos* | -mpeix* | -udk* \
+ | -interix* | -uwin* | -rhapsody* | -darwin* | -opened* \
+ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
+ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
+ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
+ | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* | -powermax*)
+ # Remember, each alternative MUST END IN *, to match a version number.
+ ;;
+ -qnx*)
+ case $basic_machine in
+ x86-* | i*86-*)
+ ;;
+ *)
+ os=-nto$os
+ ;;
+ esac
+ ;;
+ -nto*)
+ os=-nto-qnx
+ ;;
+ -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
+ | -windows* | -osx | -abug | -netware* | -os9* | -beos* \
+ | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
+ ;;
+ -mac*)
+ os=`echo $os | sed -e 's|mac|macos|'`
+ ;;
+ -linux*)
+ os=`echo $os | sed -e 's|linux|linux-gnu|'`
+ ;;
+ -sunos5*)
+ os=`echo $os | sed -e 's|sunos5|solaris2|'`
+ ;;
+ -sunos6*)
+ os=`echo $os | sed -e 's|sunos6|solaris3|'`
+ ;;
+ -opened*)
+ os=-openedition
+ ;;
+ -wince*)
+ os=-wince
+ ;;
+ -osfrose*)
+ os=-osfrose
+ ;;
+ -osf*)
+ os=-osf
+ ;;
+ -utek*)
+ os=-bsd
+ ;;
+ -dynix*)
+ os=-bsd
+ ;;
+ -acis*)
+ os=-aos
+ ;;
+ -atheos*)
+ os=-atheos
+ ;;
+ -386bsd)
+ os=-bsd
+ ;;
+ -ctix* | -uts*)
+ os=-sysv
+ ;;
+ -nova*)
+ os=-rtmk-nova
+ ;;
+ -ns2 )
+ os=-nextstep2
+ ;;
+ -nsk*)
+ os=-nsk
+ ;;
+ # Preserve the version number of sinix5.
+ -sinix5.*)
+ os=`echo $os | sed -e 's|sinix|sysv|'`
+ ;;
+ -sinix*)
+ os=-sysv4
+ ;;
+ -triton*)
+ os=-sysv3
+ ;;
+ -oss*)
+ os=-sysv3
+ ;;
+ -svr4)
+ os=-sysv4
+ ;;
+ -svr3)
+ os=-sysv3
+ ;;
+ -sysvr4)
+ os=-sysv4
+ ;;
+ # This must come after -sysvr4.
+ -sysv*)
+ ;;
+ -ose*)
+ os=-ose
+ ;;
+ -es1800*)
+ os=-ose
+ ;;
+ -xenix)
+ os=-xenix
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ os=-mint
+ ;;
+ -none)
+ ;;
+ *)
+ # Get rid of the `-' at the beginning of $os.
+ os=`echo $os | sed 's/[^-]*-//'`
+ echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+else
+
+# Here we handle the default operating systems that come with various machines.
+# The value should be what the vendor currently ships out the door with their
+# machine or put another way, the most popular os provided with the machine.
+
+# Note that if you're going to try to match "-MANUFACTURER" here (say,
+# "-sun"), then you have to tell the case statement up towards the top
+# that MANUFACTURER isn't an operating system. Otherwise, code above
+# will signal an error saying that MANUFACTURER isn't an operating
+# system, and we'll never get to this point.
+
+case $basic_machine in
+ *-acorn)
+ os=-riscix1.2
+ ;;
+ arm*-rebel)
+ os=-linux
+ ;;
+ arm*-semi)
+ os=-aout
+ ;;
+ # This must come before the *-dec entry.
+ pdp10-*)
+ os=-tops20
+ ;;
+ pdp11-*)
+ os=-none
+ ;;
+ *-dec | vax-*)
+ os=-ultrix4.2
+ ;;
+ m68*-apollo)
+ os=-domain
+ ;;
+ i386-sun)
+ os=-sunos4.0.2
+ ;;
+ m68000-sun)
+ os=-sunos3
+ # This also exists in the configure program, but was not the
+ # default.
+ # os=-sunos4
+ ;;
+ m68*-cisco)
+ os=-aout
+ ;;
+ mips*-cisco)
+ os=-elf
+ ;;
+ mips*-*)
+ os=-elf
+ ;;
+ or32-*)
+ os=-coff
+ ;;
+ *-tti) # must be before sparc entry or we get the wrong os.
+ os=-sysv3
+ ;;
+ sparc-* | *-sun)
+ os=-sunos4.1.1
+ ;;
+ *-be)
+ os=-beos
+ ;;
+ *-ibm)
+ os=-aix
+ ;;
+ *-wec)
+ os=-proelf
+ ;;
+ *-winbond)
+ os=-proelf
+ ;;
+ *-oki)
+ os=-proelf
+ ;;
+ *-hp)
+ os=-hpux
+ ;;
+ *-hitachi)
+ os=-hiux
+ ;;
+ i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
+ os=-sysv
+ ;;
+ *-cbm)
+ os=-amigaos
+ ;;
+ *-dg)
+ os=-dgux
+ ;;
+ *-dolphin)
+ os=-sysv3
+ ;;
+ m68k-ccur)
+ os=-rtu
+ ;;
+ m88k-omron*)
+ os=-luna
+ ;;
+ *-next )
+ os=-nextstep
+ ;;
+ *-sequent)
+ os=-ptx
+ ;;
+ *-crds)
+ os=-unos
+ ;;
+ *-ns)
+ os=-genix
+ ;;
+ i370-*)
+ os=-mvs
+ ;;
+ *-next)
+ os=-nextstep3
+ ;;
+ *-gould)
+ os=-sysv
+ ;;
+ *-highlevel)
+ os=-bsd
+ ;;
+ *-encore)
+ os=-bsd
+ ;;
+ *-sgi)
+ os=-irix
+ ;;
+ *-siemens)
+ os=-sysv4
+ ;;
+ *-masscomp)
+ os=-rtu
+ ;;
+ f30[01]-fujitsu | f700-fujitsu)
+ os=-uxpv
+ ;;
+ *-rom68k)
+ os=-coff
+ ;;
+ *-*bug)
+ os=-coff
+ ;;
+ *-apple)
+ os=-macos
+ ;;
+ *-atari*)
+ os=-mint
+ ;;
+ *)
+ os=-none
+ ;;
+esac
+fi
+
+# Here we handle the case where we know the os, and the CPU type, but not the
+# manufacturer. We pick the logical manufacturer.
+vendor=unknown
+case $basic_machine in
+ *-unknown)
+ case $os in
+ -riscix*)
+ vendor=acorn
+ ;;
+ -sunos*)
+ vendor=sun
+ ;;
+ -aix*)
+ vendor=ibm
+ ;;
+ -beos*)
+ vendor=be
+ ;;
+ -hpux*)
+ vendor=hp
+ ;;
+ -mpeix*)
+ vendor=hp
+ ;;
+ -hiux*)
+ vendor=hitachi
+ ;;
+ -unos*)
+ vendor=crds
+ ;;
+ -dgux*)
+ vendor=dg
+ ;;
+ -luna*)
+ vendor=omron
+ ;;
+ -genix*)
+ vendor=ns
+ ;;
+ -mvs* | -opened*)
+ vendor=ibm
+ ;;
+ -ptx*)
+ vendor=sequent
+ ;;
+ -vxsim* | -vxworks* | -windiss*)
+ vendor=wrs
+ ;;
+ -aux*)
+ vendor=apple
+ ;;
+ -hms*)
+ vendor=hitachi
+ ;;
+ -mpw* | -macos*)
+ vendor=apple
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ vendor=atari
+ ;;
+ -vos*)
+ vendor=stratus
+ ;;
+ esac
+ basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
+ ;;
+esac
+
+echo $basic_machine$os
+exit 0
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/libdb/dist/configure b/libdb/dist/configure
new file mode 100755
index 0000000..0a52109
--- /dev/null
+++ b/libdb/dist/configure
@@ -0,0 +1,21297 @@
+#! /bin/sh
+# Guess values for system-dependent variables and create Makefiles.
+# Generated by GNU Autoconf 2.57 for Berkeley DB 4.1.25.
+#
+# Report bugs to <support@sleepycat.com>.
+#
+# Copyright 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, 2002
+# Free Software Foundation, Inc.
+# This configure script is free software; the Free Software Foundation
+# gives unlimited permission to copy, distribute and modify it.
+## --------------------- ##
+## M4sh Initialization. ##
+## --------------------- ##
+
+# Be Bourne compatible
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+elif test -n "${BASH_VERSION+set}" && (set -o posix) >/dev/null 2>&1; then
+ set -o posix
+fi
+
+# Support unset when possible.
+if (FOO=FOO; unset FOO) >/dev/null 2>&1; then
+ as_unset=unset
+else
+ as_unset=false
+fi
+
+
+# Work around bugs in pre-3.0 UWIN ksh.
+$as_unset ENV MAIL MAILPATH
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+for as_var in \
+ LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \
+ LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \
+ LC_TELEPHONE LC_TIME
+do
+ if (set +x; test -n "`(eval $as_var=C; export $as_var) 2>&1`"); then
+ eval $as_var=C; export $as_var
+ else
+ $as_unset $as_var
+ fi
+done
+
+# Required to use basename.
+if expr a : '\(a\)' >/dev/null 2>&1; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename /) >/dev/null 2>&1 && test "X`basename / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+
+# Name of the executable.
+as_me=`$as_basename "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)$' \| \
+ . : '\(.\)' 2>/dev/null ||
+echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/; q; }
+ /^X\/\(\/\/\)$/{ s//\1/; q; }
+ /^X\/\(\/\).*/{ s//\1/; q; }
+ s/.*/./; q'`
+
+
+# PATH needs CR, and LINENO needs CR and PATH.
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ echo "#! /bin/sh" >conf$$.sh
+ echo "exit 0" >>conf$$.sh
+ chmod +x conf$$.sh
+ if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
+ PATH_SEPARATOR=';'
+ else
+ PATH_SEPARATOR=:
+ fi
+ rm -f conf$$.sh
+fi
+
+
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null`
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x$as_lineno_3" = "x$as_lineno_2" || {
+ # Find who we are. Look in the path if we contain no path at all
+ # relative or not.
+ case $0 in
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+done
+
+ ;;
+ esac
+ # We did not find ourselves, most probably we were run as `sh COMMAND'
+ # in which case we are not to be found in the path.
+ if test "x$as_myself" = x; then
+ as_myself=$0
+ fi
+ if test ! -f "$as_myself"; then
+ { echo "$as_me: error: cannot find myself; rerun with an absolute path" >&2
+ { (exit 1); exit 1; }; }
+ fi
+ case $CONFIG_SHELL in
+ '')
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for as_base in sh bash ksh sh5; do
+ case $as_dir in
+ /*)
+ if ("$as_dir/$as_base" -c '
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null`
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x$as_lineno_3" = "x$as_lineno_2" ') 2>/dev/null; then
+ $as_unset BASH_ENV || test "${BASH_ENV+set}" != set || { BASH_ENV=; export BASH_ENV; }
+ $as_unset ENV || test "${ENV+set}" != set || { ENV=; export ENV; }
+ CONFIG_SHELL=$as_dir/$as_base
+ export CONFIG_SHELL
+ exec "$CONFIG_SHELL" "$0" ${1+"$@"}
+ fi;;
+ esac
+ done
+done
+;;
+ esac
+
+ # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
+ # uniformly replaced by the line number. The first 'sed' inserts a
+ # line-number line before each line; the second 'sed' does the real
+ # work. The second script uses 'N' to pair each line-number line
+ # with the numbered line, and appends trailing '-' during
+ # substitution so that $LINENO is not a special case at line end.
+ # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
+ # second 'sed' script. Blame Lee E. McMahon for sed's syntax. :-)
+ sed '=' <$as_myself |
+ sed '
+ N
+ s,$,-,
+ : loop
+ s,^\(['$as_cr_digits']*\)\(.*\)[$]LINENO\([^'$as_cr_alnum'_]\),\1\2\1\3,
+ t loop
+ s,-$,,
+ s,^['$as_cr_digits']*\n,,
+ ' >$as_me.lineno &&
+ chmod +x $as_me.lineno ||
+ { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2
+ { (exit 1); exit 1; }; }
+
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensible to this).
+ . ./$as_me.lineno
+ # Exit status is that of the last command.
+ exit
+}
+
+
+case `echo "testing\c"; echo 1,2,3`,`echo -n testing; echo 1,2,3` in
+ *c*,-n*) ECHO_N= ECHO_C='
+' ECHO_T=' ' ;;
+ *c*,* ) ECHO_N=-n ECHO_C= ECHO_T= ;;
+ *) ECHO_N= ECHO_C='\c' ECHO_T= ;;
+esac
+
+if expr a : '\(a\)' >/dev/null 2>&1; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+rm -f conf$$ conf$$.exe conf$$.file
+echo >conf$$.file
+if ln -s conf$$.file conf$$ 2>/dev/null; then
+ # We could just check for DJGPP; but this test a) works b) is more generic
+ # and c) will remain valid once DJGPP supports symlinks (DJGPP 2.04).
+ if test -f conf$$.exe; then
+ # Don't use ln at all; we don't have any links
+ as_ln_s='cp -p'
+ else
+ as_ln_s='ln -s'
+ fi
+elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+else
+ as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.file
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p=:
+else
+ as_mkdir_p=false
+fi
+
+as_executable_p="test -f"
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="sed y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="sed y%*+%pp%;s%[^_$as_cr_alnum]%_%g"
+
+
+# IFS
+# We need space, tab and new line, in precisely that order.
+as_nl='
+'
+IFS=" $as_nl"
+
+# CDPATH.
+$as_unset CDPATH
+
+
+# Find the correct PATH separator. Usually this is `:', but
+# DJGPP uses `;' like DOS.
+if test "X${PATH_SEPARATOR+set}" != Xset; then
+ UNAME=${UNAME-`uname 2>/dev/null`}
+ case X$UNAME in
+ *-DOS) lt_cv_sys_path_separator=';' ;;
+ *) lt_cv_sys_path_separator=':' ;;
+ esac
+ PATH_SEPARATOR=$lt_cv_sys_path_separator
+fi
+
+
+# Check that we are running under the correct shell.
+SHELL=${CONFIG_SHELL-/bin/sh}
+
+case X$ECHO in
+X*--fallback-echo)
+ # Remove one level of quotation (which was required for Make).
+ ECHO=`echo "$ECHO" | sed 's,\\\\\$\\$0,'$0','`
+ ;;
+esac
+
+echo=${ECHO-echo}
+if test "X$1" = X--no-reexec; then
+ # Discard the --no-reexec flag, and continue.
+ shift
+elif test "X$1" = X--fallback-echo; then
+ # Avoid inline document here, it may be left over
+ :
+elif test "X`($echo '\t') 2>/dev/null`" = 'X\t'; then
+ # Yippee, $echo works!
+ :
+else
+ # Restart under the correct shell.
+ exec $SHELL "$0" --no-reexec ${1+"$@"}
+fi
+
+if test "X$1" = X--fallback-echo; then
+ # used as fallback echo
+ shift
+ cat <<EOF
+
+EOF
+ exit 0
+fi
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+if test "X${CDPATH+set}" = Xset; then CDPATH=:; export CDPATH; fi
+
+if test -z "$ECHO"; then
+if test "X${echo_test_string+set}" != Xset; then
+# find a string as large as possible, as long as the shell can cope with it
+ for cmd in 'sed 50q "$0"' 'sed 20q "$0"' 'sed 10q "$0"' 'sed 2q "$0"' 'echo test'; do
+ # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ...
+ if (echo_test_string="`eval $cmd`") 2>/dev/null &&
+ echo_test_string="`eval $cmd`" &&
+ (test "X$echo_test_string" = "X$echo_test_string") 2>/dev/null
+ then
+ break
+ fi
+ done
+fi
+
+if test "X`($echo '\t') 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ :
+else
+ # The Solaris, AIX, and Digital Unix default echo programs unquote
+ # backslashes. This makes it impossible to quote backslashes using
+ # echo "$something" | sed 's/\\/\\\\/g'
+ #
+ # So, first we look for a working echo in the user's PATH.
+
+ IFS="${IFS= }"; save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for dir in $PATH /usr/ucb; do
+ if (test -f $dir/echo || test -f $dir/echo$ac_exeext) &&
+ test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ echo="$dir/echo"
+ break
+ fi
+ done
+ IFS="$save_ifs"
+
+ if test "X$echo" = Xecho; then
+ # We didn't find a better echo, so look for alternatives.
+ if test "X`(print -r '\t') 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`(print -r "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ # This shell has a builtin print -r that does the trick.
+ echo='print -r'
+ elif (test -f /bin/ksh || test -f /bin/ksh$ac_exeext) &&
+ test "X$CONFIG_SHELL" != X/bin/ksh; then
+ # If we have ksh, try running configure again with it.
+ ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh}
+ export ORIGINAL_CONFIG_SHELL
+ CONFIG_SHELL=/bin/ksh
+ export CONFIG_SHELL
+ exec $CONFIG_SHELL "$0" --no-reexec ${1+"$@"}
+ else
+ # Try using printf.
+ echo='printf %s\n'
+ if test "X`($echo '\t') 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ # Cool, printf works
+ :
+ elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "$0" --fallback-echo '\t') 2>/dev/null` &&
+ test "X$echo_testing_string" = 'X\t' &&
+ echo_testing_string=`($ORIGINAL_CONFIG_SHELL "$0" --fallback-echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL
+ export CONFIG_SHELL
+ SHELL="$CONFIG_SHELL"
+ export SHELL
+ echo="$CONFIG_SHELL $0 --fallback-echo"
+ elif echo_testing_string=`($CONFIG_SHELL "$0" --fallback-echo '\t') 2>/dev/null` &&
+ test "X$echo_testing_string" = 'X\t' &&
+ echo_testing_string=`($CONFIG_SHELL "$0" --fallback-echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ echo="$CONFIG_SHELL $0 --fallback-echo"
+ else
+ # maybe with a smaller string...
+ prev=:
+
+ for cmd in 'echo test' 'sed 2q "$0"' 'sed 10q "$0"' 'sed 20q "$0"' 'sed 50q "$0"'; do
+ if (test "X$echo_test_string" = "X`eval $cmd`") 2>/dev/null
+ then
+ break
+ fi
+ prev="$cmd"
+ done
+
+ if test "$prev" != 'sed 50q "$0"'; then
+ echo_test_string=`eval $prev`
+ export echo_test_string
+ exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "$0" ${1+"$@"}
+ else
+ # Oops. We lost completely, so just stick with echo.
+ echo=echo
+ fi
+ fi
+ fi
+ fi
+fi
+fi
+
+# Copy echo and quote the copy suitably for passing to libtool from
+# the Makefile, instead of quoting the original, which is used later.
+ECHO=$echo
+if test "X$ECHO" = "X$CONFIG_SHELL $0 --fallback-echo"; then
+ ECHO="$CONFIG_SHELL \\\$\$0 --fallback-echo"
+fi
+
+
+
+# Name of the host.
+# hostname on some systems (SVR3.2, Linux) returns a bogus exit status,
+# so uname gets run too.
+ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
+
+exec 6>&1
+
+#
+# Initializations.
+#
+ac_default_prefix=/usr/local
+ac_config_libobj_dir=.
+cross_compiling=no
+subdirs=
+MFLAGS=
+MAKEFLAGS=
+SHELL=${CONFIG_SHELL-/bin/sh}
+
+# Maximum number of lines to put in a shell here document.
+# This variable seems obsolete. It should probably be removed, and
+# only ac_max_sed_lines should be used.
+: ${ac_max_here_lines=38}
+
+# Identity of this package.
+PACKAGE_NAME='Berkeley DB'
+PACKAGE_TARNAME='db-4.1.25'
+PACKAGE_VERSION='4.1.25'
+PACKAGE_STRING='Berkeley DB 4.1.25'
+PACKAGE_BUGREPORT='support@sleepycat.com'
+
+ac_unique_file="../db/db.c"
+ac_default_prefix=/usr/local/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@
+# Factoring default headers for most tests.
+ac_includes_default="\
+#include <stdio.h>
+#if HAVE_SYS_TYPES_H
+# include <sys/types.h>
+#endif
+#if HAVE_SYS_STAT_H
+# include <sys/stat.h>
+#endif
+#if STDC_HEADERS
+# include <stdlib.h>
+# include <stddef.h>
+#else
+# if HAVE_STDLIB_H
+# include <stdlib.h>
+# endif
+#endif
+#if HAVE_STRING_H
+# if !STDC_HEADERS && HAVE_MEMORY_H
+# include <memory.h>
+# endif
+# include <string.h>
+#endif
+#if HAVE_STRINGS_H
+# include <strings.h>
+#endif
+#if HAVE_INTTYPES_H
+# include <inttypes.h>
+#else
+# if HAVE_STDINT_H
+# include <stdint.h>
+# endif
+#endif
+#if HAVE_UNISTD_H
+# include <unistd.h>
+#endif"
+
+ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS build build_cpu build_vendor build_os host host_cpu host_vendor host_os ADDITIONAL_INCS ADDITIONAL_LANG ADDITIONAL_OBJS ADDITIONAL_PROGS BUILD_TARGET CFLAGS CONFIGURATION_ARGS CONFIGURATION_PATH CPPFLAGS CXX CXXFLAGS DEFAULT_LIB DEFAULT_LIB_CXX EMBEDIX_ECD_CXX EMBEDIX_ECD_RPC EMBEDIX_ROOT INSTALLER INSTALL_LIBS INSTALL_TARGET JAR JAVACFLAGS LDFLAGS LIBJSO_LIBS LIBSO_LIBS LIBTOOL LIBTSO_LIBS LIBXSO_LIBS LOAD_LIBS MAKEFILE_CC MAKEFILE_CCLINK MAKEFILE_CXX MAKEFILE_CXXLINK MAKEFILE_MAYBE_WIN32 MAKEFILE_SOLINK MAKEFILE_XSOLINK POSTLINK RPC_CLIENT_OBJS RPM_POST_INSTALL RPM_POST_UNINSTALL SOFLAGS db_cv_path_embedix_install db_cv_path_rpm_archive db_int_def o DB_VERSION_MAJOR DB_VERSION_MINOR DB_VERSION_PATCH DB_VERSION_UNIQUE_NAME DB_VERSION_STRING db_cv_path_ar ac_ct_db_cv_path_ar db_cv_path_chmod ac_ct_db_cv_path_chmod db_cv_path_cp ac_ct_db_cv_path_cp path_ldconfig ac_ct_path_ldconfig db_cv_path_ldconfig db_cv_path_ln ac_ct_db_cv_path_ln db_cv_path_mkdir ac_ct_db_cv_path_mkdir path_ranlib ac_ct_path_ranlib db_cv_path_ranlib db_cv_path_rm ac_ct_db_cv_path_rm db_cv_path_rpm ac_ct_db_cv_path_rpm path_sh ac_ct_path_sh db_cv_path_sh path_strip ac_ct_path_strip db_cv_path_strip db_cv_path_kill ac_ct_db_cv_path_kill INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA CC ac_ct_CC EXEEXT OBJEXT CCC ac_ct_CCC ac_ct_CXX cxx_have_stdheaders LN_S ECHO RANLIB ac_ct_RANLIB STRIP ac_ct_STRIP CPP EGREP SOSUFFIX MODSUFFIX JMODSUFFIX JAVAC _ACJNI_JAVAC TCFLAGS TCL_BIN_DIR TCL_SRC_DIR TCL_LIB_FILE TCL_TCLSH u_char_decl u_short_decl u_int_decl u_long_decl u_int8_decl u_int16_decl int16_decl u_int32_decl int32_decl ssize_t_decl db_align_t_decl db_alignp_t_decl LIBOBJS LTLIBOBJS'
+ac_subst_files=''
+
+# Initialize some variables set by options.
+ac_init_help=
+ac_init_version=false
+# The variables have the same names as the options, with
+# dashes changed to underlines.
+cache_file=/dev/null
+exec_prefix=NONE
+no_create=
+no_recursion=
+prefix=NONE
+program_prefix=NONE
+program_suffix=NONE
+program_transform_name=s,x,x,
+silent=
+site=
+srcdir=
+verbose=
+x_includes=NONE
+x_libraries=NONE
+
+# Installation directory options.
+# These are left unexpanded so users can "make install exec_prefix=/foo"
+# and all the variables that are supposed to be based on exec_prefix
+# by default will actually change.
+# Use braces instead of parens because sh, perl, etc. also accept them.
+bindir='${exec_prefix}/bin'
+sbindir='${exec_prefix}/sbin'
+libexecdir='${exec_prefix}/libexec'
+datadir='${prefix}/share'
+sysconfdir='${prefix}/etc'
+sharedstatedir='${prefix}/com'
+localstatedir='${prefix}/var'
+libdir='${exec_prefix}/lib'
+includedir='${prefix}/include'
+oldincludedir='/usr/include'
+infodir='${prefix}/info'
+mandir='${prefix}/man'
+
+ac_prev=
+for ac_option
+do
+ # If the previous option needs an argument, assign it.
+ if test -n "$ac_prev"; then
+ eval "$ac_prev=\$ac_option"
+ ac_prev=
+ continue
+ fi
+
+ ac_optarg=`expr "x$ac_option" : 'x[^=]*=\(.*\)'`
+
+ # Accept the important Cygnus configure options, so we can diagnose typos.
+
+ case $ac_option in
+
+ -bindir | --bindir | --bindi | --bind | --bin | --bi)
+ ac_prev=bindir ;;
+ -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
+ bindir=$ac_optarg ;;
+
+ -build | --build | --buil | --bui | --bu)
+ ac_prev=build_alias ;;
+ -build=* | --build=* | --buil=* | --bui=* | --bu=*)
+ build_alias=$ac_optarg ;;
+
+ -cache-file | --cache-file | --cache-fil | --cache-fi \
+ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
+ ac_prev=cache_file ;;
+ -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
+ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
+ cache_file=$ac_optarg ;;
+
+ --config-cache | -C)
+ cache_file=config.cache ;;
+
+ -datadir | --datadir | --datadi | --datad | --data | --dat | --da)
+ ac_prev=datadir ;;
+ -datadir=* | --datadir=* | --datadi=* | --datad=* | --data=* | --dat=* \
+ | --da=*)
+ datadir=$ac_optarg ;;
+
+ -disable-* | --disable-*)
+ ac_feature=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_feature" : ".*[^-_$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid feature name: $ac_feature" >&2
+ { (exit 1); exit 1; }; }
+ ac_feature=`echo $ac_feature | sed 's/-/_/g'`
+ eval "enable_$ac_feature=no" ;;
+
+ -enable-* | --enable-*)
+ ac_feature=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_feature" : ".*[^-_$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid feature name: $ac_feature" >&2
+ { (exit 1); exit 1; }; }
+ ac_feature=`echo $ac_feature | sed 's/-/_/g'`
+ case $ac_option in
+ *=*) ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"`;;
+ *) ac_optarg=yes ;;
+ esac
+ eval "enable_$ac_feature='$ac_optarg'" ;;
+
+ -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
+ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
+ | --exec | --exe | --ex)
+ ac_prev=exec_prefix ;;
+ -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
+ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
+ | --exec=* | --exe=* | --ex=*)
+ exec_prefix=$ac_optarg ;;
+
+ -gas | --gas | --ga | --g)
+ # Obsolete; use --with-gas.
+ with_gas=yes ;;
+
+ -help | --help | --hel | --he | -h)
+ ac_init_help=long ;;
+ -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
+ ac_init_help=recursive ;;
+ -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
+ ac_init_help=short ;;
+
+ -host | --host | --hos | --ho)
+ ac_prev=host_alias ;;
+ -host=* | --host=* | --hos=* | --ho=*)
+ host_alias=$ac_optarg ;;
+
+ -includedir | --includedir | --includedi | --included | --include \
+ | --includ | --inclu | --incl | --inc)
+ ac_prev=includedir ;;
+ -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
+ | --includ=* | --inclu=* | --incl=* | --inc=*)
+ includedir=$ac_optarg ;;
+
+ -infodir | --infodir | --infodi | --infod | --info | --inf)
+ ac_prev=infodir ;;
+ -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
+ infodir=$ac_optarg ;;
+
+ -libdir | --libdir | --libdi | --libd)
+ ac_prev=libdir ;;
+ -libdir=* | --libdir=* | --libdi=* | --libd=*)
+ libdir=$ac_optarg ;;
+
+ -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
+ | --libexe | --libex | --libe)
+ ac_prev=libexecdir ;;
+ -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
+ | --libexe=* | --libex=* | --libe=*)
+ libexecdir=$ac_optarg ;;
+
+ -localstatedir | --localstatedir | --localstatedi | --localstated \
+ | --localstate | --localstat | --localsta | --localst \
+ | --locals | --local | --loca | --loc | --lo)
+ ac_prev=localstatedir ;;
+ -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
+ | --localstate=* | --localstat=* | --localsta=* | --localst=* \
+ | --locals=* | --local=* | --loca=* | --loc=* | --lo=*)
+ localstatedir=$ac_optarg ;;
+
+ -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
+ ac_prev=mandir ;;
+ -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
+ mandir=$ac_optarg ;;
+
+ -nfp | --nfp | --nf)
+ # Obsolete; use --without-fp.
+ with_fp=no ;;
+
+ -no-create | --no-create | --no-creat | --no-crea | --no-cre \
+ | --no-cr | --no-c | -n)
+ no_create=yes ;;
+
+ -no-recursion | --no-recursion | --no-recursio | --no-recursi \
+ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
+ no_recursion=yes ;;
+
+ -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
+ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
+ | --oldin | --oldi | --old | --ol | --o)
+ ac_prev=oldincludedir ;;
+ -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
+ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
+ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
+ oldincludedir=$ac_optarg ;;
+
+ -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
+ ac_prev=prefix ;;
+ -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
+ prefix=$ac_optarg ;;
+
+ -program-prefix | --program-prefix | --program-prefi | --program-pref \
+ | --program-pre | --program-pr | --program-p)
+ ac_prev=program_prefix ;;
+ -program-prefix=* | --program-prefix=* | --program-prefi=* \
+ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
+ program_prefix=$ac_optarg ;;
+
+ -program-suffix | --program-suffix | --program-suffi | --program-suff \
+ | --program-suf | --program-su | --program-s)
+ ac_prev=program_suffix ;;
+ -program-suffix=* | --program-suffix=* | --program-suffi=* \
+ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
+ program_suffix=$ac_optarg ;;
+
+ -program-transform-name | --program-transform-name \
+ | --program-transform-nam | --program-transform-na \
+ | --program-transform-n | --program-transform- \
+ | --program-transform | --program-transfor \
+ | --program-transfo | --program-transf \
+ | --program-trans | --program-tran \
+ | --progr-tra | --program-tr | --program-t)
+ ac_prev=program_transform_name ;;
+ -program-transform-name=* | --program-transform-name=* \
+ | --program-transform-nam=* | --program-transform-na=* \
+ | --program-transform-n=* | --program-transform-=* \
+ | --program-transform=* | --program-transfor=* \
+ | --program-transfo=* | --program-transf=* \
+ | --program-trans=* | --program-tran=* \
+ | --progr-tra=* | --program-tr=* | --program-t=*)
+ program_transform_name=$ac_optarg ;;
+
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ silent=yes ;;
+
+ -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
+ ac_prev=sbindir ;;
+ -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
+ | --sbi=* | --sb=*)
+ sbindir=$ac_optarg ;;
+
+ -sharedstatedir | --sharedstatedir | --sharedstatedi \
+ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
+ | --sharedst | --shareds | --shared | --share | --shar \
+ | --sha | --sh)
+ ac_prev=sharedstatedir ;;
+ -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
+ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
+ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
+ | --sha=* | --sh=*)
+ sharedstatedir=$ac_optarg ;;
+
+ -site | --site | --sit)
+ ac_prev=site ;;
+ -site=* | --site=* | --sit=*)
+ site=$ac_optarg ;;
+
+ -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
+ ac_prev=srcdir ;;
+ -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
+ srcdir=$ac_optarg ;;
+
+ -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
+ | --syscon | --sysco | --sysc | --sys | --sy)
+ ac_prev=sysconfdir ;;
+ -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
+ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
+ sysconfdir=$ac_optarg ;;
+
+ -target | --target | --targe | --targ | --tar | --ta | --t)
+ ac_prev=target_alias ;;
+ -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
+ target_alias=$ac_optarg ;;
+
+ -v | -verbose | --verbose | --verbos | --verbo | --verb)
+ verbose=yes ;;
+
+ -version | --version | --versio | --versi | --vers | -V)
+ ac_init_version=: ;;
+
+ -with-* | --with-*)
+ ac_package=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_package" : ".*[^-_$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid package name: $ac_package" >&2
+ { (exit 1); exit 1; }; }
+ ac_package=`echo $ac_package| sed 's/-/_/g'`
+ case $ac_option in
+ *=*) ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"`;;
+ *) ac_optarg=yes ;;
+ esac
+ eval "with_$ac_package='$ac_optarg'" ;;
+
+ -without-* | --without-*)
+ ac_package=`expr "x$ac_option" : 'x-*without-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_package" : ".*[^-_$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid package name: $ac_package" >&2
+ { (exit 1); exit 1; }; }
+ ac_package=`echo $ac_package | sed 's/-/_/g'`
+ eval "with_$ac_package=no" ;;
+
+ --x)
+ # Obsolete; use --with-x.
+ with_x=yes ;;
+
+ -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
+ | --x-incl | --x-inc | --x-in | --x-i)
+ ac_prev=x_includes ;;
+ -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
+ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
+ x_includes=$ac_optarg ;;
+
+ -x-libraries | --x-libraries | --x-librarie | --x-librari \
+ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
+ ac_prev=x_libraries ;;
+ -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
+ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
+ x_libraries=$ac_optarg ;;
+
+ -*) { echo "$as_me: error: unrecognized option: $ac_option
+Try \`$0 --help' for more information." >&2
+ { (exit 1); exit 1; }; }
+ ;;
+
+ *=*)
+ ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid variable name: $ac_envvar" >&2
+ { (exit 1); exit 1; }; }
+ ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"`
+ eval "$ac_envvar='$ac_optarg'"
+ export $ac_envvar ;;
+
+ *)
+ # FIXME: should be removed in autoconf 3.0.
+ echo "$as_me: WARNING: you should use --build, --host, --target" >&2
+ expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
+ echo "$as_me: WARNING: invalid host type: $ac_option" >&2
+ : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}
+ ;;
+
+ esac
+done
+
+if test -n "$ac_prev"; then
+ ac_option=--`echo $ac_prev | sed 's/_/-/g'`
+ { echo "$as_me: error: missing argument to $ac_option" >&2
+ { (exit 1); exit 1; }; }
+fi
+
+# Be sure to have absolute paths.
+for ac_var in exec_prefix prefix
+do
+ eval ac_val=$`echo $ac_var`
+ case $ac_val in
+ [\\/$]* | ?:[\\/]* | NONE | '' ) ;;
+ *) { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2
+ { (exit 1); exit 1; }; };;
+ esac
+done
+
+# Be sure to have absolute paths.
+for ac_var in bindir sbindir libexecdir datadir sysconfdir sharedstatedir \
+ localstatedir libdir includedir oldincludedir infodir mandir
+do
+ eval ac_val=$`echo $ac_var`
+ case $ac_val in
+ [\\/$]* | ?:[\\/]* ) ;;
+ *) { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2
+ { (exit 1); exit 1; }; };;
+ esac
+done
+
+# There might be people who depend on the old broken behavior: `$host'
+# used to hold the argument of --host etc.
+# FIXME: To remove some day.
+build=$build_alias
+host=$host_alias
+target=$target_alias
+
+# FIXME: To remove some day.
+if test "x$host_alias" != x; then
+ if test "x$build_alias" = x; then
+ cross_compiling=maybe
+ echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host.
+ If a cross compiler is detected then cross compile mode will be used." >&2
+ elif test "x$build_alias" != "x$host_alias"; then
+ cross_compiling=yes
+ fi
+fi
+
+ac_tool_prefix=
+test -n "$host_alias" && ac_tool_prefix=$host_alias-
+
+test "$silent" = yes && exec 6>/dev/null
+
+
+# Find the source files, if location was not specified.
+if test -z "$srcdir"; then
+ ac_srcdir_defaulted=yes
+ # Try the directory containing this script, then its parent.
+ ac_confdir=`(dirname "$0") 2>/dev/null ||
+$as_expr X"$0" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$0" : 'X\(//\)[^/]' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| \
+ . : '\(.\)' 2>/dev/null ||
+echo X"$0" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; }
+ /^X\(\/\/\)[^/].*/{ s//\1/; q; }
+ /^X\(\/\/\)$/{ s//\1/; q; }
+ /^X\(\/\).*/{ s//\1/; q; }
+ s/.*/./; q'`
+ srcdir=$ac_confdir
+ if test ! -r $srcdir/$ac_unique_file; then
+ srcdir=..
+ fi
+else
+ ac_srcdir_defaulted=no
+fi
+if test ! -r $srcdir/$ac_unique_file; then
+ if test "$ac_srcdir_defaulted" = yes; then
+ { echo "$as_me: error: cannot find sources ($ac_unique_file) in $ac_confdir or .." >&2
+ { (exit 1); exit 1; }; }
+ else
+ { echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2
+ { (exit 1); exit 1; }; }
+ fi
+fi
+(cd $srcdir && test -r ./$ac_unique_file) 2>/dev/null ||
+ { echo "$as_me: error: sources are in $srcdir, but \`cd $srcdir' does not work" >&2
+ { (exit 1); exit 1; }; }
+srcdir=`echo "$srcdir" | sed 's%\([^\\/]\)[\\/]*$%\1%'`
+ac_env_build_alias_set=${build_alias+set}
+ac_env_build_alias_value=$build_alias
+ac_cv_env_build_alias_set=${build_alias+set}
+ac_cv_env_build_alias_value=$build_alias
+ac_env_host_alias_set=${host_alias+set}
+ac_env_host_alias_value=$host_alias
+ac_cv_env_host_alias_set=${host_alias+set}
+ac_cv_env_host_alias_value=$host_alias
+ac_env_target_alias_set=${target_alias+set}
+ac_env_target_alias_value=$target_alias
+ac_cv_env_target_alias_set=${target_alias+set}
+ac_cv_env_target_alias_value=$target_alias
+ac_env_CC_set=${CC+set}
+ac_env_CC_value=$CC
+ac_cv_env_CC_set=${CC+set}
+ac_cv_env_CC_value=$CC
+ac_env_CFLAGS_set=${CFLAGS+set}
+ac_env_CFLAGS_value=$CFLAGS
+ac_cv_env_CFLAGS_set=${CFLAGS+set}
+ac_cv_env_CFLAGS_value=$CFLAGS
+ac_env_LDFLAGS_set=${LDFLAGS+set}
+ac_env_LDFLAGS_value=$LDFLAGS
+ac_cv_env_LDFLAGS_set=${LDFLAGS+set}
+ac_cv_env_LDFLAGS_value=$LDFLAGS
+ac_env_CPPFLAGS_set=${CPPFLAGS+set}
+ac_env_CPPFLAGS_value=$CPPFLAGS
+ac_cv_env_CPPFLAGS_set=${CPPFLAGS+set}
+ac_cv_env_CPPFLAGS_value=$CPPFLAGS
+ac_env_CXX_set=${CXX+set}
+ac_env_CXX_value=$CXX
+ac_cv_env_CXX_set=${CXX+set}
+ac_cv_env_CXX_value=$CXX
+ac_env_CXXFLAGS_set=${CXXFLAGS+set}
+ac_env_CXXFLAGS_value=$CXXFLAGS
+ac_cv_env_CXXFLAGS_set=${CXXFLAGS+set}
+ac_cv_env_CXXFLAGS_value=$CXXFLAGS
+ac_env_CPP_set=${CPP+set}
+ac_env_CPP_value=$CPP
+ac_cv_env_CPP_set=${CPP+set}
+ac_cv_env_CPP_value=$CPP
+
+#
+# Report the --help message.
+#
+if test "$ac_init_help" = "long"; then
+ # Omit some internal or obsolete options to make the list less imposing.
+ # This message is too long to be a string in the A/UX 3.1 sh.
+ cat <<_ACEOF
+\`configure' configures Berkeley DB 4.1.25 to adapt to many kinds of systems.
+
+Usage: $0 [OPTION]... [VAR=VALUE]...
+
+To assign environment variables (e.g., CC, CFLAGS...), specify them as
+VAR=VALUE. See below for descriptions of some of the useful variables.
+
+Defaults for the options are specified in brackets.
+
+Configuration:
+ -h, --help display this help and exit
+ --help=short display options specific to this package
+ --help=recursive display the short help of all the included packages
+ -V, --version display version information and exit
+ -q, --quiet, --silent do not print \`checking...' messages
+ --cache-file=FILE cache test results in FILE [disabled]
+ -C, --config-cache alias for \`--cache-file=config.cache'
+ -n, --no-create do not create output files
+ --srcdir=DIR find the sources in DIR [configure dir or \`..']
+
+_ACEOF
+
+ cat <<_ACEOF
+Installation directories:
+ --prefix=PREFIX install architecture-independent files in PREFIX
+ [$ac_default_prefix]
+ --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX
+ [PREFIX]
+
+By default, \`make install' will install all the files in
+\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify
+an installation prefix other than \`$ac_default_prefix' using \`--prefix',
+for instance \`--prefix=\$HOME'.
+
+For better control, use the options below.
+
+Fine tuning of the installation directories:
+ --bindir=DIR user executables [EPREFIX/bin]
+ --sbindir=DIR system admin executables [EPREFIX/sbin]
+ --libexecdir=DIR program executables [EPREFIX/libexec]
+ --datadir=DIR read-only architecture-independent data [PREFIX/share]
+ --sysconfdir=DIR read-only single-machine data [PREFIX/etc]
+ --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
+ --localstatedir=DIR modifiable single-machine data [PREFIX/var]
+ --libdir=DIR object code libraries [EPREFIX/lib]
+ --includedir=DIR C header files [PREFIX/include]
+ --oldincludedir=DIR C header files for non-gcc [/usr/include]
+ --infodir=DIR info documentation [PREFIX/info]
+ --mandir=DIR man documentation [PREFIX/man]
+_ACEOF
+
+ cat <<\_ACEOF
+
+Program names:
+ --program-prefix=PREFIX prepend PREFIX to installed program names
+ --program-suffix=SUFFIX append SUFFIX to installed program names
+ --program-transform-name=PROGRAM run sed PROGRAM on installed program names
+
+System types:
+ --build=BUILD configure for building on BUILD [guessed]
+ --host=HOST cross-compile to build programs to run on HOST [BUILD]
+_ACEOF
+fi
+
+if test -n "$ac_init_help"; then
+ case $ac_init_help in
+ short | recursive ) echo "Configuration of Berkeley DB 4.1.25:";;
+ esac
+ cat <<\_ACEOF
+
+Optional Features:
+ --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
+ --enable-FEATURE[=ARG] include FEATURE [ARG=yes]
+ --disable-bigfile Obsolete; use --disable-largefile instead.
+ --enable-compat185 Build DB 1.85 compatibility API.
+ --enable-cxx Build C++ API.
+ --enable-debug Build a debugging version.
+ --enable-debug_rop Build a version that logs read operations.
+ --enable-debug_wop Build a version that logs write operations.
+ --enable-diagnostic Build a version with run-time diagnostics.
+ --enable-dump185 Build db_dump185(1) to dump 1.85 databases.
+ --enable-java Build Java API.
+ --enable-pthreadsmutexes
+ Use POSIX pthreads mutexes.
+ --enable-posixmutexes Force use of POSIX standard mutexes.
+ --enable-rpc Build RPC client/server.
+ --enable-tcl Build Tcl API.
+ --enable-test Configure to run the test suite.
+ --enable-uimutexes Force use of Unix International mutexes.
+ --enable-umrw Mask harmless unitialized memory read/writes.
+ --enable-shared=PKGS build shared libraries default=yes
+ --enable-static=PKGS build static libraries default=yes
+ --enable-fast-install=PKGS optimize for fast installation default=yes
+ --disable-libtool-lock avoid locking (might break parallel builds)
+ --disable-largefile omit support for large files
+
+Optional Packages:
+ --with-PACKAGE[=ARG] use PACKAGE [ARG=yes]
+ --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no)
+ --with-embedix=DIR Embedix install directory location.
+ --with-mutex=MUTEX Selection of non-standard mutexes.
+ --with-rpm=DIR Directory location of RPM archive.
+ --with-tcl=DIR Directory location of tclConfig.sh.
+ --with-uniquename=NAME Build a uniquely named library.
+ --with-gnu-ld assume the C compiler uses GNU ld default=no
+ --with-pic try to use only PIC/non-PIC objects default=use both
+
+Some influential environment variables:
+ CC C compiler command
+ CFLAGS C compiler flags
+ LDFLAGS linker flags, e.g. -L<lib dir> if you have libraries in a
+ nonstandard directory <lib dir>
+ CPPFLAGS C/C++ preprocessor flags, e.g. -I<include dir> if you have
+ headers in a nonstandard directory <include dir>
+ CXX C++ compiler command
+ CXXFLAGS C++ compiler flags
+ CPP C preprocessor
+
+Use these variables to override the choices made by `configure' or to help
+it to find libraries and programs with nonstandard names/locations.
+
+Report bugs to <support@sleepycat.com>.
+_ACEOF
+fi
+
+if test "$ac_init_help" = "recursive"; then
+ # If there are subdirs, report their specific --help.
+ ac_popdir=`pwd`
+ for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
+ test -d $ac_dir || continue
+ ac_builddir=.
+
+if test "$ac_dir" != .; then
+ ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'`
+ # A "../" for each directory in $ac_dir_suffix.
+ ac_top_builddir=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,../,g'`
+else
+ ac_dir_suffix= ac_top_builddir=
+fi
+
+case $srcdir in
+ .) # No --srcdir option. We are building in place.
+ ac_srcdir=.
+ if test -z "$ac_top_builddir"; then
+ ac_top_srcdir=.
+ else
+ ac_top_srcdir=`echo $ac_top_builddir | sed 's,/$,,'`
+ fi ;;
+ [\\/]* | ?:[\\/]* ) # Absolute path.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir ;;
+ *) # Relative path.
+ ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_builddir$srcdir ;;
+esac
+# Don't blindly perform a `cd "$ac_dir"/$ac_foo && pwd` since $ac_foo can be
+# absolute.
+ac_abs_builddir=`cd "$ac_dir" && cd $ac_builddir && pwd`
+ac_abs_top_builddir=`cd "$ac_dir" && cd ${ac_top_builddir}. && pwd`
+ac_abs_srcdir=`cd "$ac_dir" && cd $ac_srcdir && pwd`
+ac_abs_top_srcdir=`cd "$ac_dir" && cd $ac_top_srcdir && pwd`
+
+ cd $ac_dir
+ # Check for guested configure; otherwise get Cygnus style configure.
+ if test -f $ac_srcdir/configure.gnu; then
+ echo
+ $SHELL $ac_srcdir/configure.gnu --help=recursive
+ elif test -f $ac_srcdir/configure; then
+ echo
+ $SHELL $ac_srcdir/configure --help=recursive
+ elif test -f $ac_srcdir/configure.ac ||
+ test -f $ac_srcdir/configure.in; then
+ echo
+ $ac_configure --help
+ else
+ echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
+ fi
+ cd $ac_popdir
+ done
+fi
+
+test -n "$ac_init_help" && exit 0
+if $ac_init_version; then
+ cat <<\_ACEOF
+Berkeley DB configure 4.1.25
+generated by GNU Autoconf 2.57
+
+Copyright 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, 2002
+Free Software Foundation, Inc.
+This configure script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it.
+_ACEOF
+ exit 0
+fi
+exec 5>config.log
+cat >&5 <<_ACEOF
+This file contains any messages produced by compilers while
+running configure, to aid debugging if configure makes a mistake.
+
+It was created by Berkeley DB $as_me 4.1.25, which was
+generated by GNU Autoconf 2.57. Invocation command line was
+
+ $ $0 $@
+
+_ACEOF
+{
+cat <<_ASUNAME
+## --------- ##
+## Platform. ##
+## --------- ##
+
+hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown`
+
+/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
+hostinfo = `(hostinfo) 2>/dev/null || echo unknown`
+/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown`
+/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown`
+
+_ASUNAME
+
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ echo "PATH: $as_dir"
+done
+
+} >&5
+
+cat >&5 <<_ACEOF
+
+
+## ----------- ##
+## Core tests. ##
+## ----------- ##
+
+_ACEOF
+
+
+# Keep a trace of the command line.
+# Strip out --no-create and --no-recursion so they do not pile up.
+# Strip out --silent because we don't want to record it for future runs.
+# Also quote any args containing shell meta-characters.
+# Make two passes to allow for proper duplicate-argument suppression.
+ac_configure_args=
+ac_configure_args0=
+ac_configure_args1=
+ac_sep=
+ac_must_keep_next=false
+for ac_pass in 1 2
+do
+ for ac_arg
+ do
+ case $ac_arg in
+ -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ continue ;;
+ *" "*|*" "*|*[\[\]\~\#\$\^\&\*\(\)\{\}\\\|\;\<\>\?\"\']*)
+ ac_arg=`echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ case $ac_pass in
+ 1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;;
+ 2)
+ ac_configure_args1="$ac_configure_args1 '$ac_arg'"
+ if test $ac_must_keep_next = true; then
+ ac_must_keep_next=false # Got value, back to normal.
+ else
+ case $ac_arg in
+ *=* | --config-cache | -C | -disable-* | --disable-* \
+ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
+ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
+ | -with-* | --with-* | -without-* | --without-* | --x)
+ case "$ac_configure_args0 " in
+ "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
+ esac
+ ;;
+ -* ) ac_must_keep_next=true ;;
+ esac
+ fi
+ ac_configure_args="$ac_configure_args$ac_sep'$ac_arg'"
+ # Get rid of the leading space.
+ ac_sep=" "
+ ;;
+ esac
+ done
+done
+$as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; }
+$as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; }
+
+# When interrupted or exit'd, cleanup temporary files, and complete
+# config.log. We remove comments because anyway the quotes in there
+# would cause problems or look ugly.
+# WARNING: Be sure not to use single quotes in there, as some shells,
+# such as our DU 5.0 friend, will then `close' the trap.
+trap 'exit_status=$?
+ # Save into config.log some information that might help in debugging.
+ {
+ echo
+
+ cat <<\_ASBOX
+## ---------------- ##
+## Cache variables. ##
+## ---------------- ##
+_ASBOX
+ echo
+ # The following way of writing the cache mishandles newlines in values,
+{
+ (set) 2>&1 |
+ case `(ac_space='"'"' '"'"'; set | grep ac_space) 2>&1` in
+ *ac_space=\ *)
+ sed -n \
+ "s/'"'"'/'"'"'\\\\'"'"''"'"'/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='"'"'\\2'"'"'/p"
+ ;;
+ *)
+ sed -n \
+ "s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1=\\2/p"
+ ;;
+ esac;
+}
+ echo
+
+ cat <<\_ASBOX
+## ----------------- ##
+## Output variables. ##
+## ----------------- ##
+_ASBOX
+ echo
+ for ac_var in $ac_subst_vars
+ do
+ eval ac_val=$`echo $ac_var`
+ echo "$ac_var='"'"'$ac_val'"'"'"
+ done | sort
+ echo
+
+ if test -n "$ac_subst_files"; then
+ cat <<\_ASBOX
+## ------------- ##
+## Output files. ##
+## ------------- ##
+_ASBOX
+ echo
+ for ac_var in $ac_subst_files
+ do
+ eval ac_val=$`echo $ac_var`
+ echo "$ac_var='"'"'$ac_val'"'"'"
+ done | sort
+ echo
+ fi
+
+ if test -s confdefs.h; then
+ cat <<\_ASBOX
+## ----------- ##
+## confdefs.h. ##
+## ----------- ##
+_ASBOX
+ echo
+ sed "/^$/d" confdefs.h | sort
+ echo
+ fi
+ test "$ac_signal" != 0 &&
+ echo "$as_me: caught signal $ac_signal"
+ echo "$as_me: exit $exit_status"
+ } >&5
+ rm -f core core.* *.core &&
+ rm -rf conftest* confdefs* conf$$* $ac_clean_files &&
+ exit $exit_status
+ ' 0
+for ac_signal in 1 2 13 15; do
+ trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal
+done
+ac_signal=0
+
+# confdefs.h avoids OS command line length limits that DEFS can exceed.
+rm -rf conftest* confdefs.h
+# AIX cpp loses on an empty file, so make sure it contains at least a newline.
+echo >confdefs.h
+
+# Predefined preprocessor variables.
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_NAME "$PACKAGE_NAME"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_VERSION "$PACKAGE_VERSION"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_STRING "$PACKAGE_STRING"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
+_ACEOF
+
+
+# Let the site file select an alternate cache file if it wants to.
+# Prefer explicitly selected file to automatically selected ones.
+if test -z "$CONFIG_SITE"; then
+ if test "x$prefix" != xNONE; then
+ CONFIG_SITE="$prefix/share/config.site $prefix/etc/config.site"
+ else
+ CONFIG_SITE="$ac_default_prefix/share/config.site $ac_default_prefix/etc/config.site"
+ fi
+fi
+for ac_site_file in $CONFIG_SITE; do
+ if test -r "$ac_site_file"; then
+ { echo "$as_me:$LINENO: loading site script $ac_site_file" >&5
+echo "$as_me: loading site script $ac_site_file" >&6;}
+ sed 's/^/| /' "$ac_site_file" >&5
+ . "$ac_site_file"
+ fi
+done
+
+if test -r "$cache_file"; then
+ # Some versions of bash will fail to source /dev/null (special
+ # files actually), so we avoid doing that.
+ if test -f "$cache_file"; then
+ { echo "$as_me:$LINENO: loading cache $cache_file" >&5
+echo "$as_me: loading cache $cache_file" >&6;}
+ case $cache_file in
+ [\\/]* | ?:[\\/]* ) . $cache_file;;
+ *) . ./$cache_file;;
+ esac
+ fi
+else
+ { echo "$as_me:$LINENO: creating cache $cache_file" >&5
+echo "$as_me: creating cache $cache_file" >&6;}
+ >$cache_file
+fi
+
+# Check that the precious variables saved in the cache have kept the same
+# value.
+ac_cache_corrupted=false
+for ac_var in `(set) 2>&1 |
+ sed -n 's/^ac_env_\([a-zA-Z_0-9]*\)_set=.*/\1/p'`; do
+ eval ac_old_set=\$ac_cv_env_${ac_var}_set
+ eval ac_new_set=\$ac_env_${ac_var}_set
+ eval ac_old_val="\$ac_cv_env_${ac_var}_value"
+ eval ac_new_val="\$ac_env_${ac_var}_value"
+ case $ac_old_set,$ac_new_set in
+ set,)
+ { echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
+echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,set)
+ { echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5
+echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,);;
+ *)
+ if test "x$ac_old_val" != "x$ac_new_val"; then
+ { echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5
+echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
+ { echo "$as_me:$LINENO: former value: $ac_old_val" >&5
+echo "$as_me: former value: $ac_old_val" >&2;}
+ { echo "$as_me:$LINENO: current value: $ac_new_val" >&5
+echo "$as_me: current value: $ac_new_val" >&2;}
+ ac_cache_corrupted=:
+ fi;;
+ esac
+ # Pass precious variables to config.status.
+ if test "$ac_new_set" = set; then
+ case $ac_new_val in
+ *" "*|*" "*|*[\[\]\~\#\$\^\&\*\(\)\{\}\\\|\;\<\>\?\"\']*)
+ ac_arg=$ac_var=`echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
+ *) ac_arg=$ac_var=$ac_new_val ;;
+ esac
+ case " $ac_configure_args " in
+ *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy.
+ *) ac_configure_args="$ac_configure_args '$ac_arg'" ;;
+ esac
+ fi
+done
+if $ac_cache_corrupted; then
+ { echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5
+echo "$as_me: error: changes in the environment can compromise the build" >&2;}
+ { { echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5
+echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ac_config_headers="$ac_config_headers db_config.h:config.hin"
+
+
+# Configure setup.
+ac_aux_dir=
+for ac_dir in $srcdir $srcdir/.. $srcdir/../..; do
+ if test -f $ac_dir/install-sh; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install-sh -c"
+ break
+ elif test -f $ac_dir/install.sh; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install.sh -c"
+ break
+ elif test -f $ac_dir/shtool; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/shtool install -c"
+ break
+ fi
+done
+if test -z "$ac_aux_dir"; then
+ { { echo "$as_me:$LINENO: error: cannot find install-sh or install.sh in $srcdir $srcdir/.. $srcdir/../.." >&5
+echo "$as_me: error: cannot find install-sh or install.sh in $srcdir $srcdir/.. $srcdir/../.." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+ac_config_guess="$SHELL $ac_aux_dir/config.guess"
+ac_config_sub="$SHELL $ac_aux_dir/config.sub"
+ac_configure="$SHELL $ac_aux_dir/configure" # This should be Cygnus configure.
+
+# Make sure we can run config.sub.
+$ac_config_sub sun4 >/dev/null 2>&1 ||
+ { { echo "$as_me:$LINENO: error: cannot run $ac_config_sub" >&5
+echo "$as_me: error: cannot run $ac_config_sub" >&2;}
+ { (exit 1); exit 1; }; }
+
+echo "$as_me:$LINENO: checking build system type" >&5
+echo $ECHO_N "checking build system type... $ECHO_C" >&6
+if test "${ac_cv_build+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_cv_build_alias=$build_alias
+test -z "$ac_cv_build_alias" &&
+ ac_cv_build_alias=`$ac_config_guess`
+test -z "$ac_cv_build_alias" &&
+ { { echo "$as_me:$LINENO: error: cannot guess build type; you must specify one" >&5
+echo "$as_me: error: cannot guess build type; you must specify one" >&2;}
+ { (exit 1); exit 1; }; }
+ac_cv_build=`$ac_config_sub $ac_cv_build_alias` ||
+ { { echo "$as_me:$LINENO: error: $ac_config_sub $ac_cv_build_alias failed" >&5
+echo "$as_me: error: $ac_config_sub $ac_cv_build_alias failed" >&2;}
+ { (exit 1); exit 1; }; }
+
+fi
+echo "$as_me:$LINENO: result: $ac_cv_build" >&5
+echo "${ECHO_T}$ac_cv_build" >&6
+build=$ac_cv_build
+build_cpu=`echo $ac_cv_build | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'`
+build_vendor=`echo $ac_cv_build | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'`
+build_os=`echo $ac_cv_build | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'`
+
+
+echo "$as_me:$LINENO: checking host system type" >&5
+echo $ECHO_N "checking host system type... $ECHO_C" >&6
+if test "${ac_cv_host+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_cv_host_alias=$host_alias
+test -z "$ac_cv_host_alias" &&
+ ac_cv_host_alias=$ac_cv_build_alias
+ac_cv_host=`$ac_config_sub $ac_cv_host_alias` ||
+ { { echo "$as_me:$LINENO: error: $ac_config_sub $ac_cv_host_alias failed" >&5
+echo "$as_me: error: $ac_config_sub $ac_cv_host_alias failed" >&2;}
+ { (exit 1); exit 1; }; }
+
+fi
+echo "$as_me:$LINENO: result: $ac_cv_host" >&5
+echo "${ECHO_T}$ac_cv_host" >&6
+host=$ac_cv_host
+host_cpu=`echo $ac_cv_host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'`
+host_vendor=`echo $ac_cv_host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'`
+host_os=`echo $ac_cv_host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'`
+
+
+test "$program_prefix" != NONE &&
+ program_transform_name="s,^,$program_prefix,;$program_transform_name"
+# Use a double $ so make ignores it.
+test "$program_suffix" != NONE &&
+ program_transform_name="s,\$,$program_suffix,;$program_transform_name"
+# Double any \ or $. echo might interpret backslashes.
+# By default was `s,x,x', remove it if useless.
+cat <<\_ACEOF >conftest.sed
+s/[\\$]/&&/g;s/;s,x,x,$//
+_ACEOF
+program_transform_name=`echo $program_transform_name | sed -f conftest.sed`
+rm conftest.sed
+
+
+# We cannot build in the top-level directory.
+echo "$as_me:$LINENO: checking if building in the top-level directory" >&5
+echo $ECHO_N "checking if building in the top-level directory... $ECHO_C" >&6
+ test -d db_archive && { { echo "$as_me:$LINENO: error:
+Berkeley DB cannot be built in the top-level distribution directory." >&5
+echo "$as_me: error:
+Berkeley DB cannot be built in the top-level distribution directory." >&2;}
+ { (exit 1); exit 1; }; }
+echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+
+# Substitution variables.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# RPM needs the current absolute path.
+# RPM needs the list of original arguments, but we don't include the RPM
+# option itself.
+CONFIGURATION_PATH=${PWD-`pwd`}
+CONFIGURATION_ARGS=`echo "$*" |
+ sed -e 's/--with-embedix[^ ]*//' -e 's/--with-rpm[^ ]*//'`
+
+# Set the version.
+
+
+DB_VERSION_MAJOR=4
+
+DB_VERSION_MINOR=1
+
+DB_VERSION_PATCH=25
+
+DB_VERSION_UNIQUE_NAME=_4001
+
+DB_VERSION_STRING="\"Sleepycat Software: Berkeley DB 4.1.25: (October 11, 2004)\""
+
+
+# Set the default installation location.
+
+
+# Process all options before using them.
+
+
+# --enable-bigfile was the configuration option that Berkeley DB used before
+# autoconf 2.50 was released (which had --enable-largefile integrated in).
+# Check whether --enable-bigfile or --disable-bigfile was given.
+if test "${enable_bigfile+set}" = set; then
+ enableval="$enable_bigfile"
+ { { echo "$as_me:$LINENO: error: --enable-bigfile no longer supported, use --enable-largefile" >&5
+echo "$as_me: error: --enable-bigfile no longer supported, use --enable-largefile" >&2;}
+ { (exit 1); exit 1; }; }
+fi;
+
+echo "$as_me:$LINENO: checking if --enable-compat185 option specified" >&5
+echo $ECHO_N "checking if --enable-compat185 option specified... $ECHO_C" >&6
+# Check whether --enable-compat185 or --disable-compat185 was given.
+if test "${enable_compat185+set}" = set; then
+ enableval="$enable_compat185"
+ db_cv_compat185="$enable_compat185"
+else
+ db_cv_compat185="no"
+fi;
+echo "$as_me:$LINENO: result: $db_cv_compat185" >&5
+echo "${ECHO_T}$db_cv_compat185" >&6
+
+echo "$as_me:$LINENO: checking if --enable-cxx option specified" >&5
+echo $ECHO_N "checking if --enable-cxx option specified... $ECHO_C" >&6
+# Check whether --enable-cxx or --disable-cxx was given.
+if test "${enable_cxx+set}" = set; then
+ enableval="$enable_cxx"
+ db_cv_cxx="$enable_cxx"
+else
+ db_cv_cxx="no"
+fi;
+echo "$as_me:$LINENO: result: $db_cv_cxx" >&5
+echo "${ECHO_T}$db_cv_cxx" >&6
+
+echo "$as_me:$LINENO: checking if --enable-debug option specified" >&5
+echo $ECHO_N "checking if --enable-debug option specified... $ECHO_C" >&6
+# Check whether --enable-debug or --disable-debug was given.
+if test "${enable_debug+set}" = set; then
+ enableval="$enable_debug"
+ db_cv_debug="$enable_debug"
+else
+ db_cv_debug="no"
+fi;
+echo "$as_me:$LINENO: result: $db_cv_debug" >&5
+echo "${ECHO_T}$db_cv_debug" >&6
+
+echo "$as_me:$LINENO: checking if --enable-debug_rop option specified" >&5
+echo $ECHO_N "checking if --enable-debug_rop option specified... $ECHO_C" >&6
+# Check whether --enable-debug_rop or --disable-debug_rop was given.
+if test "${enable_debug_rop+set}" = set; then
+ enableval="$enable_debug_rop"
+ db_cv_debug_rop="$enable_debug_rop"
+else
+ db_cv_debug_rop="no"
+fi;
+echo "$as_me:$LINENO: result: $db_cv_debug_rop" >&5
+echo "${ECHO_T}$db_cv_debug_rop" >&6
+
+echo "$as_me:$LINENO: checking if --enable-debug_wop option specified" >&5
+echo $ECHO_N "checking if --enable-debug_wop option specified... $ECHO_C" >&6
+# Check whether --enable-debug_wop or --disable-debug_wop was given.
+if test "${enable_debug_wop+set}" = set; then
+ enableval="$enable_debug_wop"
+ db_cv_debug_wop="$enable_debug_wop"
+else
+ db_cv_debug_wop="no"
+fi;
+echo "$as_me:$LINENO: result: $db_cv_debug_wop" >&5
+echo "${ECHO_T}$db_cv_debug_wop" >&6
+
+echo "$as_me:$LINENO: checking if --enable-diagnostic option specified" >&5
+echo $ECHO_N "checking if --enable-diagnostic option specified... $ECHO_C" >&6
+# Check whether --enable-diagnostic or --disable-diagnostic was given.
+if test "${enable_diagnostic+set}" = set; then
+ enableval="$enable_diagnostic"
+ db_cv_diagnostic="$enable_diagnostic"
+else
+ db_cv_diagnostic="no"
+fi;
+echo "$as_me:$LINENO: result: $db_cv_diagnostic" >&5
+echo "${ECHO_T}$db_cv_diagnostic" >&6
+
+echo "$as_me:$LINENO: checking if --enable-dump185 option specified" >&5
+echo $ECHO_N "checking if --enable-dump185 option specified... $ECHO_C" >&6
+# Check whether --enable-dump185 or --disable-dump185 was given.
+if test "${enable_dump185+set}" = set; then
+ enableval="$enable_dump185"
+ db_cv_dump185="$enable_dump185"
+else
+ db_cv_dump185="no"
+fi;
+echo "$as_me:$LINENO: result: $db_cv_dump185" >&5
+echo "${ECHO_T}$db_cv_dump185" >&6
+
+echo "$as_me:$LINENO: checking if --enable-java option specified" >&5
+echo $ECHO_N "checking if --enable-java option specified... $ECHO_C" >&6
+# Check whether --enable-java or --disable-java was given.
+if test "${enable_java+set}" = set; then
+ enableval="$enable_java"
+ db_cv_java="$enable_java"
+else
+ db_cv_java="no"
+fi;
+echo "$as_me:$LINENO: result: $db_cv_java" >&5
+echo "${ECHO_T}$db_cv_java" >&6
+
+echo "$as_me:$LINENO: checking if --enable-pthreadsmutexes option specified" >&5
+echo $ECHO_N "checking if --enable-pthreadsmutexes option specified... $ECHO_C" >&6
+# Check whether --enable-pthreadsmutexes or --disable-pthreadsmutexes was given.
+if test "${enable_pthreadsmutexes+set}" = set; then
+ enableval="$enable_pthreadsmutexes"
+
+else
+ enableval="no"
+fi;
+db_cv_pthreadsmutexes="$enableval"
+case "$enableval" in
+ no) echo "$as_me:$LINENO: result: yes" >&5
+echo "${ECHO_T}yes" >&6;;
+yes) echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6;;
+esac
+
+echo "$as_me:$LINENO: checking if --enable-posixmutexes option specified" >&5
+echo $ECHO_N "checking if --enable-posixmutexes option specified... $ECHO_C" >&6
+# Check whether --enable-posixmutexes or --disable-posixmutexes was given.
+if test "${enable_posixmutexes+set}" = set; then
+ enableval="$enable_posixmutexes"
+ db_cv_posixmutexes="$enable_posixmutexes"
+else
+ db_cv_posixmutexes="no"
+fi;
+echo "$as_me:$LINENO: result: $db_cv_posixmutexes" >&5
+echo "${ECHO_T}$db_cv_posixmutexes" >&6
+
+echo "$as_me:$LINENO: checking if --enable-rpc option specified" >&5
+echo $ECHO_N "checking if --enable-rpc option specified... $ECHO_C" >&6
+# Check whether --enable-rpc or --disable-rpc was given.
+if test "${enable_rpc+set}" = set; then
+ enableval="$enable_rpc"
+ db_cv_rpc="$enable_rpc"
+else
+ db_cv_rpc="no"
+fi;
+echo "$as_me:$LINENO: result: $db_cv_rpc" >&5
+echo "${ECHO_T}$db_cv_rpc" >&6
+
+echo "$as_me:$LINENO: checking if --enable-tcl option specified" >&5
+echo $ECHO_N "checking if --enable-tcl option specified... $ECHO_C" >&6
+# Check whether --enable-tcl or --disable-tcl was given.
+if test "${enable_tcl+set}" = set; then
+ enableval="$enable_tcl"
+ db_cv_tcl="$enable_tcl"
+else
+ db_cv_tcl="no"
+fi;
+echo "$as_me:$LINENO: result: $db_cv_tcl" >&5
+echo "${ECHO_T}$db_cv_tcl" >&6
+
+echo "$as_me:$LINENO: checking if --enable-test option specified" >&5
+echo $ECHO_N "checking if --enable-test option specified... $ECHO_C" >&6
+# Check whether --enable-test or --disable-test was given.
+if test "${enable_test+set}" = set; then
+ enableval="$enable_test"
+ db_cv_test="$enable_test"
+else
+ db_cv_test="no"
+fi;
+echo "$as_me:$LINENO: result: $db_cv_test" >&5
+echo "${ECHO_T}$db_cv_test" >&6
+
+echo "$as_me:$LINENO: checking if --enable-uimutexes option specified" >&5
+echo $ECHO_N "checking if --enable-uimutexes option specified... $ECHO_C" >&6
+# Check whether --enable-uimutexes or --disable-uimutexes was given.
+if test "${enable_uimutexes+set}" = set; then
+ enableval="$enable_uimutexes"
+ db_cv_uimutexes="$enable_uimutexes"
+else
+ db_cv_uimutexes="no"
+fi;
+echo "$as_me:$LINENO: result: $db_cv_uimutexes" >&5
+echo "${ECHO_T}$db_cv_uimutexes" >&6
+
+echo "$as_me:$LINENO: checking if --enable-umrw option specified" >&5
+echo $ECHO_N "checking if --enable-umrw option specified... $ECHO_C" >&6
+# Check whether --enable-umrw or --disable-umrw was given.
+if test "${enable_umrw+set}" = set; then
+ enableval="$enable_umrw"
+ db_cv_umrw="$enable_umrw"
+else
+ db_cv_umrw="no"
+fi;
+echo "$as_me:$LINENO: result: $db_cv_umrw" >&5
+echo "${ECHO_T}$db_cv_umrw" >&6
+
+echo "$as_me:$LINENO: checking if --with-embedix=DIR option specified" >&5
+echo $ECHO_N "checking if --with-embedix=DIR option specified... $ECHO_C" >&6
+
+# Check whether --with-embedix or --without-embedix was given.
+if test "${with_embedix+set}" = set; then
+ withval="$with_embedix"
+ with_embedix="$withval"
+else
+ with_embedix="no"
+fi;
+if test "$with_embedix" = "no"; then
+ db_cv_embedix="no"
+ echo "$as_me:$LINENO: result: $with_embedix" >&5
+echo "${ECHO_T}$with_embedix" >&6
+else
+ db_cv_embedix="yes"
+ if test "$with_embedix" = "yes"; then
+ db_cv_path_embedix_install="/opt/Embedix"
+ else
+ db_cv_path_embedix_install="$with_embedix"
+ fi
+ echo "$as_me:$LINENO: result: $db_cv_path_embedix_install" >&5
+echo "${ECHO_T}$db_cv_path_embedix_install" >&6
+fi
+
+echo "$as_me:$LINENO: checking if --with-mutex=MUTEX option specified" >&5
+echo $ECHO_N "checking if --with-mutex=MUTEX option specified... $ECHO_C" >&6
+
+# Check whether --with-mutex or --without-mutex was given.
+if test "${with_mutex+set}" = set; then
+ withval="$with_mutex"
+ with_mutex="$withval"
+else
+ with_mutex="no"
+fi;
+if test "$with_mutex" = "yes"; then
+ { { echo "$as_me:$LINENO: error: --with-mutex requires a mutex name argument" >&5
+echo "$as_me: error: --with-mutex requires a mutex name argument" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test "$with_mutex" != "no"; then
+ db_cv_mutex="$with_mutex"
+fi
+echo "$as_me:$LINENO: result: $with_mutex" >&5
+echo "${ECHO_T}$with_mutex" >&6
+
+echo "$as_me:$LINENO: checking if --with-rpm=DIR option specified" >&5
+echo $ECHO_N "checking if --with-rpm=DIR option specified... $ECHO_C" >&6
+
+# Check whether --with-rpm or --without-rpm was given.
+if test "${with_rpm+set}" = set; then
+ withval="$with_rpm"
+ with_rpm="$withval"
+else
+ with_rpm="no"
+fi;
+if test "$with_rpm" = "no"; then
+ db_cv_rpm="no"
+else
+ if test "$with_rpm" = "yes"; then
+ { { echo "$as_me:$LINENO: error: --with-rpm requires a directory argument" >&5
+echo "$as_me: error: --with-rpm requires a directory argument" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ db_cv_rpm="yes"
+ db_cv_path_rpm_archive="$with_rpm"
+fi
+echo "$as_me:$LINENO: result: $with_rpm" >&5
+echo "${ECHO_T}$with_rpm" >&6
+
+echo "$as_me:$LINENO: checking if --with-tcl=DIR option specified" >&5
+echo $ECHO_N "checking if --with-tcl=DIR option specified... $ECHO_C" >&6
+
+# Check whether --with-tcl or --without-tcl was given.
+if test "${with_tcl+set}" = set; then
+ withval="$with_tcl"
+ with_tclconfig="$withval"
+else
+ with_tclconfig="no"
+fi;
+echo "$as_me:$LINENO: result: $with_tclconfig" >&5
+echo "${ECHO_T}$with_tclconfig" >&6
+if test "$with_tclconfig" != "no"; then
+ db_cv_tcl="yes"
+fi
+
+echo "$as_me:$LINENO: checking if --with-uniquename=NAME option specified" >&5
+echo $ECHO_N "checking if --with-uniquename=NAME option specified... $ECHO_C" >&6
+
+# Check whether --with-uniquename or --without-uniquename was given.
+if test "${with_uniquename+set}" = set; then
+ withval="$with_uniquename"
+ with_uniquename="$withval"
+else
+ with_uniquename="_eds"
+fi;
+if test "$with_uniquename" = "no"; then
+ db_cv_uniquename="no"
+ echo "$as_me:$LINENO: result: $with_uniquename" >&5
+echo "${ECHO_T}$with_uniquename" >&6
+else
+ db_cv_uniquename="yes"
+ if test "$with_uniquename" != "yes"; then
+ DB_VERSION_UNIQUE_NAME="$with_uniquename"
+ fi
+ echo "$as_me:$LINENO: result: $DB_VERSION_UNIQUE_NAME" >&5
+echo "${ECHO_T}$DB_VERSION_UNIQUE_NAME" >&6
+fi
+
+# Embedix requires RPM.
+if test "$db_cv_embedix" = "yes"; then
+ if test "$db_cv_rpm" = "no"; then
+ { { echo "$as_me:$LINENO: error: --with-embedix requires --with-rpm" >&5
+echo "$as_me: error: --with-embedix requires --with-rpm" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+fi
+
+# Test requires Tcl
+if test "$db_cv_test" = "yes"; then
+ if test "$db_cv_tcl" = "no"; then
+ { { echo "$as_me:$LINENO: error: --enable-test requires --enable-tcl" >&5
+echo "$as_me: error: --enable-test requires --enable-tcl" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+fi
+
+# Set some #defines based on configuration options.
+if test "$db_cv_diagnostic" = yes; then
+ cat >>confdefs.h <<\_ACEOF
+#define DIAGNOSTIC 1
+_ACEOF
+
+
+
+fi
+if test "$db_cv_debug_rop" = yes; then
+ cat >>confdefs.h <<\_ACEOF
+#define DEBUG_ROP 1
+_ACEOF
+
+
+
+fi
+if test "$db_cv_debug_wop" = yes; then
+ cat >>confdefs.h <<\_ACEOF
+#define DEBUG_WOP 1
+_ACEOF
+
+
+
+fi
+if test "$db_cv_umrw" = yes; then
+ cat >>confdefs.h <<\_ACEOF
+#define UMRW 1
+_ACEOF
+
+
+
+
+fi
+if test "$db_cv_test" = yes; then
+ cat >>confdefs.h <<\_ACEOF
+#define CONFIG_TEST 1
+_ACEOF
+
+
+
+fi
+
+# Check for programs used in building and installation.
+
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args.
+set dummy ${ac_tool_prefix}ar; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_db_cv_path_ar+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$db_cv_path_ar"; then
+ ac_cv_prog_db_cv_path_ar="$db_cv_path_ar" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_db_cv_path_ar="${ac_tool_prefix}ar"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+db_cv_path_ar=$ac_cv_prog_db_cv_path_ar
+if test -n "$db_cv_path_ar"; then
+ echo "$as_me:$LINENO: result: $db_cv_path_ar" >&5
+echo "${ECHO_T}$db_cv_path_ar" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$ac_cv_prog_db_cv_path_ar"; then
+ ac_ct_db_cv_path_ar=$db_cv_path_ar
+ # Extract the first word of "ar", so it can be a program name with args.
+set dummy ar; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_db_cv_path_ar+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_db_cv_path_ar"; then
+ ac_cv_prog_ac_ct_db_cv_path_ar="$ac_ct_db_cv_path_ar" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_db_cv_path_ar="ar"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+ test -z "$ac_cv_prog_ac_ct_db_cv_path_ar" && ac_cv_prog_ac_ct_db_cv_path_ar="missing_ar"
+fi
+fi
+ac_ct_db_cv_path_ar=$ac_cv_prog_ac_ct_db_cv_path_ar
+if test -n "$ac_ct_db_cv_path_ar"; then
+ echo "$as_me:$LINENO: result: $ac_ct_db_cv_path_ar" >&5
+echo "${ECHO_T}$ac_ct_db_cv_path_ar" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ db_cv_path_ar=$ac_ct_db_cv_path_ar
+else
+ db_cv_path_ar="$ac_cv_prog_db_cv_path_ar"
+fi
+
+if test "$db_cv_path_ar" = missing_ar; then
+ { { echo "$as_me:$LINENO: error: No ar utility found." >&5
+echo "$as_me: error: No ar utility found." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}chmod", so it can be a program name with args.
+set dummy ${ac_tool_prefix}chmod; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_db_cv_path_chmod+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$db_cv_path_chmod"; then
+ ac_cv_prog_db_cv_path_chmod="$db_cv_path_chmod" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_db_cv_path_chmod="${ac_tool_prefix}chmod"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+db_cv_path_chmod=$ac_cv_prog_db_cv_path_chmod
+if test -n "$db_cv_path_chmod"; then
+ echo "$as_me:$LINENO: result: $db_cv_path_chmod" >&5
+echo "${ECHO_T}$db_cv_path_chmod" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$ac_cv_prog_db_cv_path_chmod"; then
+ ac_ct_db_cv_path_chmod=$db_cv_path_chmod
+ # Extract the first word of "chmod", so it can be a program name with args.
+set dummy chmod; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_db_cv_path_chmod+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_db_cv_path_chmod"; then
+ ac_cv_prog_ac_ct_db_cv_path_chmod="$ac_ct_db_cv_path_chmod" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_db_cv_path_chmod="chmod"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+ test -z "$ac_cv_prog_ac_ct_db_cv_path_chmod" && ac_cv_prog_ac_ct_db_cv_path_chmod="missing_chmod"
+fi
+fi
+ac_ct_db_cv_path_chmod=$ac_cv_prog_ac_ct_db_cv_path_chmod
+if test -n "$ac_ct_db_cv_path_chmod"; then
+ echo "$as_me:$LINENO: result: $ac_ct_db_cv_path_chmod" >&5
+echo "${ECHO_T}$ac_ct_db_cv_path_chmod" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ db_cv_path_chmod=$ac_ct_db_cv_path_chmod
+else
+ db_cv_path_chmod="$ac_cv_prog_db_cv_path_chmod"
+fi
+
+if test "$db_cv_path_chmod" = missing_chmod; then
+ { { echo "$as_me:$LINENO: error: No chmod utility found." >&5
+echo "$as_me: error: No chmod utility found." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}cp", so it can be a program name with args.
+set dummy ${ac_tool_prefix}cp; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_db_cv_path_cp+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$db_cv_path_cp"; then
+ ac_cv_prog_db_cv_path_cp="$db_cv_path_cp" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_db_cv_path_cp="${ac_tool_prefix}cp"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+db_cv_path_cp=$ac_cv_prog_db_cv_path_cp
+if test -n "$db_cv_path_cp"; then
+ echo "$as_me:$LINENO: result: $db_cv_path_cp" >&5
+echo "${ECHO_T}$db_cv_path_cp" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$ac_cv_prog_db_cv_path_cp"; then
+ ac_ct_db_cv_path_cp=$db_cv_path_cp
+ # Extract the first word of "cp", so it can be a program name with args.
+set dummy cp; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_db_cv_path_cp+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_db_cv_path_cp"; then
+ ac_cv_prog_ac_ct_db_cv_path_cp="$ac_ct_db_cv_path_cp" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_db_cv_path_cp="cp"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+ test -z "$ac_cv_prog_ac_ct_db_cv_path_cp" && ac_cv_prog_ac_ct_db_cv_path_cp="missing_cp"
+fi
+fi
+ac_ct_db_cv_path_cp=$ac_cv_prog_ac_ct_db_cv_path_cp
+if test -n "$ac_ct_db_cv_path_cp"; then
+ echo "$as_me:$LINENO: result: $ac_ct_db_cv_path_cp" >&5
+echo "${ECHO_T}$ac_ct_db_cv_path_cp" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ db_cv_path_cp=$ac_ct_db_cv_path_cp
+else
+ db_cv_path_cp="$ac_cv_prog_db_cv_path_cp"
+fi
+
+if test "$db_cv_path_cp" = missing_cp; then
+ { { echo "$as_me:$LINENO: error: No cp utility found." >&5
+echo "$as_me: error: No cp utility found." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+if test "$db_cv_rpm" = "yes"; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}ldconfig", so it can be a program name with args.
+set dummy ${ac_tool_prefix}ldconfig; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_path_ldconfig+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$path_ldconfig"; then
+ ac_cv_prog_path_ldconfig="$path_ldconfig" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_path_ldconfig="${ac_tool_prefix}ldconfig"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+path_ldconfig=$ac_cv_prog_path_ldconfig
+if test -n "$path_ldconfig"; then
+ echo "$as_me:$LINENO: result: $path_ldconfig" >&5
+echo "${ECHO_T}$path_ldconfig" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$ac_cv_prog_path_ldconfig"; then
+ ac_ct_path_ldconfig=$path_ldconfig
+ # Extract the first word of "ldconfig", so it can be a program name with args.
+set dummy ldconfig; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_path_ldconfig+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_path_ldconfig"; then
+ ac_cv_prog_ac_ct_path_ldconfig="$ac_ct_path_ldconfig" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_path_ldconfig="ldconfig"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+ test -z "$ac_cv_prog_ac_ct_path_ldconfig" && ac_cv_prog_ac_ct_path_ldconfig="missing_ldconfig"
+fi
+fi
+ac_ct_path_ldconfig=$ac_cv_prog_ac_ct_path_ldconfig
+if test -n "$ac_ct_path_ldconfig"; then
+ echo "$as_me:$LINENO: result: $ac_ct_path_ldconfig" >&5
+echo "${ECHO_T}$ac_ct_path_ldconfig" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ path_ldconfig=$ac_ct_path_ldconfig
+else
+ path_ldconfig="$ac_cv_prog_path_ldconfig"
+fi
+
+ # Extract the first word of "$path_ldconfig", so it can be a program name with args.
+set dummy $path_ldconfig; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_path_db_cv_path_ldconfig+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ case $db_cv_path_ldconfig in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_db_cv_path_ldconfig="$db_cv_path_ldconfig" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path_db_cv_path_ldconfig="$as_dir/$ac_word$ac_exec_ext"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+ test -z "$ac_cv_path_db_cv_path_ldconfig" && ac_cv_path_db_cv_path_ldconfig="missing_ldconfig"
+ ;;
+esac
+fi
+db_cv_path_ldconfig=$ac_cv_path_db_cv_path_ldconfig
+
+if test -n "$db_cv_path_ldconfig"; then
+ echo "$as_me:$LINENO: result: $db_cv_path_ldconfig" >&5
+echo "${ECHO_T}$db_cv_path_ldconfig" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ if test "$db_cv_path_ldconfig" != missing_ldconfig; then
+ RPM_POST_INSTALL="%post -p $db_cv_path_ldconfig"
+ RPM_POST_UNINSTALL="%postun -p $db_cv_path_ldconfig"
+ fi
+fi
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}ln", so it can be a program name with args.
+set dummy ${ac_tool_prefix}ln; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_db_cv_path_ln+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$db_cv_path_ln"; then
+ ac_cv_prog_db_cv_path_ln="$db_cv_path_ln" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_db_cv_path_ln="${ac_tool_prefix}ln"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+db_cv_path_ln=$ac_cv_prog_db_cv_path_ln
+if test -n "$db_cv_path_ln"; then
+ echo "$as_me:$LINENO: result: $db_cv_path_ln" >&5
+echo "${ECHO_T}$db_cv_path_ln" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$ac_cv_prog_db_cv_path_ln"; then
+ ac_ct_db_cv_path_ln=$db_cv_path_ln
+ # Extract the first word of "ln", so it can be a program name with args.
+set dummy ln; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_db_cv_path_ln+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_db_cv_path_ln"; then
+ ac_cv_prog_ac_ct_db_cv_path_ln="$ac_ct_db_cv_path_ln" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_db_cv_path_ln="ln"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+ test -z "$ac_cv_prog_ac_ct_db_cv_path_ln" && ac_cv_prog_ac_ct_db_cv_path_ln="missing_ln"
+fi
+fi
+ac_ct_db_cv_path_ln=$ac_cv_prog_ac_ct_db_cv_path_ln
+if test -n "$ac_ct_db_cv_path_ln"; then
+ echo "$as_me:$LINENO: result: $ac_ct_db_cv_path_ln" >&5
+echo "${ECHO_T}$ac_ct_db_cv_path_ln" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ db_cv_path_ln=$ac_ct_db_cv_path_ln
+else
+ db_cv_path_ln="$ac_cv_prog_db_cv_path_ln"
+fi
+
+if test "$db_cv_path_ln" = missing_ln; then
+ { { echo "$as_me:$LINENO: error: No ln utility found." >&5
+echo "$as_me: error: No ln utility found." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}mkdir", so it can be a program name with args.
+set dummy ${ac_tool_prefix}mkdir; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_db_cv_path_mkdir+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$db_cv_path_mkdir"; then
+ ac_cv_prog_db_cv_path_mkdir="$db_cv_path_mkdir" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_db_cv_path_mkdir="${ac_tool_prefix}mkdir"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+db_cv_path_mkdir=$ac_cv_prog_db_cv_path_mkdir
+if test -n "$db_cv_path_mkdir"; then
+ echo "$as_me:$LINENO: result: $db_cv_path_mkdir" >&5
+echo "${ECHO_T}$db_cv_path_mkdir" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$ac_cv_prog_db_cv_path_mkdir"; then
+ ac_ct_db_cv_path_mkdir=$db_cv_path_mkdir
+ # Extract the first word of "mkdir", so it can be a program name with args.
+set dummy mkdir; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_db_cv_path_mkdir+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_db_cv_path_mkdir"; then
+ ac_cv_prog_ac_ct_db_cv_path_mkdir="$ac_ct_db_cv_path_mkdir" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_db_cv_path_mkdir="mkdir"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+ test -z "$ac_cv_prog_ac_ct_db_cv_path_mkdir" && ac_cv_prog_ac_ct_db_cv_path_mkdir="missing_mkdir"
+fi
+fi
+ac_ct_db_cv_path_mkdir=$ac_cv_prog_ac_ct_db_cv_path_mkdir
+if test -n "$ac_ct_db_cv_path_mkdir"; then
+ echo "$as_me:$LINENO: result: $ac_ct_db_cv_path_mkdir" >&5
+echo "${ECHO_T}$ac_ct_db_cv_path_mkdir" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ db_cv_path_mkdir=$ac_ct_db_cv_path_mkdir
+else
+ db_cv_path_mkdir="$ac_cv_prog_db_cv_path_mkdir"
+fi
+
+if test "$db_cv_path_mkdir" = missing_mkdir; then
+ { { echo "$as_me:$LINENO: error: No mkdir utility found." >&5
+echo "$as_me: error: No mkdir utility found." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+# We need a complete path for ranlib, because it doesn't exist on some
+# architectures because the ar utility packages the library itself.
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args.
+set dummy ${ac_tool_prefix}ranlib; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_path_ranlib+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$path_ranlib"; then
+ ac_cv_prog_path_ranlib="$path_ranlib" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_path_ranlib="${ac_tool_prefix}ranlib"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+path_ranlib=$ac_cv_prog_path_ranlib
+if test -n "$path_ranlib"; then
+ echo "$as_me:$LINENO: result: $path_ranlib" >&5
+echo "${ECHO_T}$path_ranlib" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$ac_cv_prog_path_ranlib"; then
+ ac_ct_path_ranlib=$path_ranlib
+ # Extract the first word of "ranlib", so it can be a program name with args.
+set dummy ranlib; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_path_ranlib+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_path_ranlib"; then
+ ac_cv_prog_ac_ct_path_ranlib="$ac_ct_path_ranlib" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_path_ranlib="ranlib"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+ test -z "$ac_cv_prog_ac_ct_path_ranlib" && ac_cv_prog_ac_ct_path_ranlib="missing_ranlib"
+fi
+fi
+ac_ct_path_ranlib=$ac_cv_prog_ac_ct_path_ranlib
+if test -n "$ac_ct_path_ranlib"; then
+ echo "$as_me:$LINENO: result: $ac_ct_path_ranlib" >&5
+echo "${ECHO_T}$ac_ct_path_ranlib" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ path_ranlib=$ac_ct_path_ranlib
+else
+ path_ranlib="$ac_cv_prog_path_ranlib"
+fi
+
+# Extract the first word of "$path_ranlib", so it can be a program name with args.
+set dummy $path_ranlib; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_path_db_cv_path_ranlib+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ case $db_cv_path_ranlib in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_db_cv_path_ranlib="$db_cv_path_ranlib" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path_db_cv_path_ranlib="$as_dir/$ac_word$ac_exec_ext"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+ test -z "$ac_cv_path_db_cv_path_ranlib" && ac_cv_path_db_cv_path_ranlib="missing_ranlib"
+ ;;
+esac
+fi
+db_cv_path_ranlib=$ac_cv_path_db_cv_path_ranlib
+
+if test -n "$db_cv_path_ranlib"; then
+ echo "$as_me:$LINENO: result: $db_cv_path_ranlib" >&5
+echo "${ECHO_T}$db_cv_path_ranlib" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}rm", so it can be a program name with args.
+set dummy ${ac_tool_prefix}rm; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_db_cv_path_rm+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$db_cv_path_rm"; then
+ ac_cv_prog_db_cv_path_rm="$db_cv_path_rm" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_db_cv_path_rm="${ac_tool_prefix}rm"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+db_cv_path_rm=$ac_cv_prog_db_cv_path_rm
+if test -n "$db_cv_path_rm"; then
+ echo "$as_me:$LINENO: result: $db_cv_path_rm" >&5
+echo "${ECHO_T}$db_cv_path_rm" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$ac_cv_prog_db_cv_path_rm"; then
+ ac_ct_db_cv_path_rm=$db_cv_path_rm
+ # Extract the first word of "rm", so it can be a program name with args.
+set dummy rm; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_db_cv_path_rm+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_db_cv_path_rm"; then
+ ac_cv_prog_ac_ct_db_cv_path_rm="$ac_ct_db_cv_path_rm" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_db_cv_path_rm="rm"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+ test -z "$ac_cv_prog_ac_ct_db_cv_path_rm" && ac_cv_prog_ac_ct_db_cv_path_rm="missing_rm"
+fi
+fi
+ac_ct_db_cv_path_rm=$ac_cv_prog_ac_ct_db_cv_path_rm
+if test -n "$ac_ct_db_cv_path_rm"; then
+ echo "$as_me:$LINENO: result: $ac_ct_db_cv_path_rm" >&5
+echo "${ECHO_T}$ac_ct_db_cv_path_rm" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ db_cv_path_rm=$ac_ct_db_cv_path_rm
+else
+ db_cv_path_rm="$ac_cv_prog_db_cv_path_rm"
+fi
+
+if test "$db_cv_path_rm" = missing_rm; then
+ { { echo "$as_me:$LINENO: error: No rm utility found." >&5
+echo "$as_me: error: No rm utility found." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+if test "$db_cv_rpm" = "yes"; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}rpm", so it can be a program name with args.
+set dummy ${ac_tool_prefix}rpm; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_db_cv_path_rpm+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$db_cv_path_rpm"; then
+ ac_cv_prog_db_cv_path_rpm="$db_cv_path_rpm" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_db_cv_path_rpm="${ac_tool_prefix}rpm"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+db_cv_path_rpm=$ac_cv_prog_db_cv_path_rpm
+if test -n "$db_cv_path_rpm"; then
+ echo "$as_me:$LINENO: result: $db_cv_path_rpm" >&5
+echo "${ECHO_T}$db_cv_path_rpm" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$ac_cv_prog_db_cv_path_rpm"; then
+ ac_ct_db_cv_path_rpm=$db_cv_path_rpm
+ # Extract the first word of "rpm", so it can be a program name with args.
+set dummy rpm; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_db_cv_path_rpm+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_db_cv_path_rpm"; then
+ ac_cv_prog_ac_ct_db_cv_path_rpm="$ac_ct_db_cv_path_rpm" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_db_cv_path_rpm="rpm"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+ test -z "$ac_cv_prog_ac_ct_db_cv_path_rpm" && ac_cv_prog_ac_ct_db_cv_path_rpm="missing_rpm"
+fi
+fi
+ac_ct_db_cv_path_rpm=$ac_cv_prog_ac_ct_db_cv_path_rpm
+if test -n "$ac_ct_db_cv_path_rpm"; then
+ echo "$as_me:$LINENO: result: $ac_ct_db_cv_path_rpm" >&5
+echo "${ECHO_T}$ac_ct_db_cv_path_rpm" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ db_cv_path_rpm=$ac_ct_db_cv_path_rpm
+else
+ db_cv_path_rpm="$ac_cv_prog_db_cv_path_rpm"
+fi
+
+ if test "$db_cv_path_rpm" = missing_rpm; then
+ { { echo "$as_me:$LINENO: error: No rpm utility found." >&5
+echo "$as_me: error: No rpm utility found." >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+fi
+
+# We need a complete path for sh, because some implementations of make
+# get upset if SHELL is set to just the command name.
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}sh", so it can be a program name with args.
+set dummy ${ac_tool_prefix}sh; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_path_sh+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$path_sh"; then
+ ac_cv_prog_path_sh="$path_sh" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_path_sh="${ac_tool_prefix}sh"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+path_sh=$ac_cv_prog_path_sh
+if test -n "$path_sh"; then
+ echo "$as_me:$LINENO: result: $path_sh" >&5
+echo "${ECHO_T}$path_sh" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$ac_cv_prog_path_sh"; then
+ ac_ct_path_sh=$path_sh
+ # Extract the first word of "sh", so it can be a program name with args.
+set dummy sh; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_path_sh+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_path_sh"; then
+ ac_cv_prog_ac_ct_path_sh="$ac_ct_path_sh" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_path_sh="sh"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+ test -z "$ac_cv_prog_ac_ct_path_sh" && ac_cv_prog_ac_ct_path_sh="missing_sh"
+fi
+fi
+ac_ct_path_sh=$ac_cv_prog_ac_ct_path_sh
+if test -n "$ac_ct_path_sh"; then
+ echo "$as_me:$LINENO: result: $ac_ct_path_sh" >&5
+echo "${ECHO_T}$ac_ct_path_sh" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ path_sh=$ac_ct_path_sh
+else
+ path_sh="$ac_cv_prog_path_sh"
+fi
+
+# Extract the first word of "$path_sh", so it can be a program name with args.
+set dummy $path_sh; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_path_db_cv_path_sh+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ case $db_cv_path_sh in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_db_cv_path_sh="$db_cv_path_sh" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path_db_cv_path_sh="$as_dir/$ac_word$ac_exec_ext"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+ test -z "$ac_cv_path_db_cv_path_sh" && ac_cv_path_db_cv_path_sh="missing_sh"
+ ;;
+esac
+fi
+db_cv_path_sh=$ac_cv_path_db_cv_path_sh
+
+if test -n "$db_cv_path_sh"; then
+ echo "$as_me:$LINENO: result: $db_cv_path_sh" >&5
+echo "${ECHO_T}$db_cv_path_sh" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+if test "$db_cv_path_sh" = missing_sh; then
+ { { echo "$as_me:$LINENO: error: No sh utility found." >&5
+echo "$as_me: error: No sh utility found." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+# Don't strip the binaries if --enable-debug was specified.
+if test "$db_cv_debug" = yes; then
+ db_cv_path_strip=debug_build_no_strip
+else
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args.
+set dummy ${ac_tool_prefix}strip; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_path_strip+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$path_strip"; then
+ ac_cv_prog_path_strip="$path_strip" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_path_strip="${ac_tool_prefix}strip"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+path_strip=$ac_cv_prog_path_strip
+if test -n "$path_strip"; then
+ echo "$as_me:$LINENO: result: $path_strip" >&5
+echo "${ECHO_T}$path_strip" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$ac_cv_prog_path_strip"; then
+ ac_ct_path_strip=$path_strip
+ # Extract the first word of "strip", so it can be a program name with args.
+set dummy strip; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_path_strip+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_path_strip"; then
+ ac_cv_prog_ac_ct_path_strip="$ac_ct_path_strip" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_path_strip="strip"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+ test -z "$ac_cv_prog_ac_ct_path_strip" && ac_cv_prog_ac_ct_path_strip="missing_strip"
+fi
+fi
+ac_ct_path_strip=$ac_cv_prog_ac_ct_path_strip
+if test -n "$ac_ct_path_strip"; then
+ echo "$as_me:$LINENO: result: $ac_ct_path_strip" >&5
+echo "${ECHO_T}$ac_ct_path_strip" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ path_strip=$ac_ct_path_strip
+else
+ path_strip="$ac_cv_prog_path_strip"
+fi
+
+ # Extract the first word of "$path_strip", so it can be a program name with args.
+set dummy $path_strip; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_path_db_cv_path_strip+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ case $db_cv_path_strip in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_db_cv_path_strip="$db_cv_path_strip" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path_db_cv_path_strip="$as_dir/$ac_word$ac_exec_ext"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+ test -z "$ac_cv_path_db_cv_path_strip" && ac_cv_path_db_cv_path_strip="missing_strip"
+ ;;
+esac
+fi
+db_cv_path_strip=$ac_cv_path_db_cv_path_strip
+
+if test -n "$db_cv_path_strip"; then
+ echo "$as_me:$LINENO: result: $db_cv_path_strip" >&5
+echo "${ECHO_T}$db_cv_path_strip" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+
+if test "$db_cv_test" = "yes"; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}kill", so it can be a program name with args.
+set dummy ${ac_tool_prefix}kill; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_db_cv_path_kill+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$db_cv_path_kill"; then
+ ac_cv_prog_db_cv_path_kill="$db_cv_path_kill" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_db_cv_path_kill="${ac_tool_prefix}kill"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+db_cv_path_kill=$ac_cv_prog_db_cv_path_kill
+if test -n "$db_cv_path_kill"; then
+ echo "$as_me:$LINENO: result: $db_cv_path_kill" >&5
+echo "${ECHO_T}$db_cv_path_kill" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$ac_cv_prog_db_cv_path_kill"; then
+ ac_ct_db_cv_path_kill=$db_cv_path_kill
+ # Extract the first word of "kill", so it can be a program name with args.
+set dummy kill; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_db_cv_path_kill+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_db_cv_path_kill"; then
+ ac_cv_prog_ac_ct_db_cv_path_kill="$ac_ct_db_cv_path_kill" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_db_cv_path_kill="kill"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+ test -z "$ac_cv_prog_ac_ct_db_cv_path_kill" && ac_cv_prog_ac_ct_db_cv_path_kill="missing_kill"
+fi
+fi
+ac_ct_db_cv_path_kill=$ac_cv_prog_ac_ct_db_cv_path_kill
+if test -n "$ac_ct_db_cv_path_kill"; then
+ echo "$as_me:$LINENO: result: $ac_ct_db_cv_path_kill" >&5
+echo "${ECHO_T}$ac_ct_db_cv_path_kill" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ db_cv_path_kill=$ac_ct_db_cv_path_kill
+else
+ db_cv_path_kill="$ac_cv_prog_db_cv_path_kill"
+fi
+
+ if test "$db_cv_path_kill" = missing_kill; then
+ { { echo "$as_me:$LINENO: error: No kill utility found." >&5
+echo "$as_me: error: No kill utility found." >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+fi
+
+
+# Find a good install program. We prefer a C program (faster),
+# so one script is as good as another. But avoid the broken or
+# incompatible versions:
+# SysV /etc/install, /usr/sbin/install
+# SunOS /usr/etc/install
+# IRIX /sbin/install
+# AIX /bin/install
+# AmigaOS /C/install, which installs bootblocks on floppy discs
+# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag
+# AFS /usr/afsws/bin/install, which mishandles nonexistent args
+# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
+# ./install, which can be erroneously created by make from ./install.sh.
+echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5
+echo $ECHO_N "checking for a BSD-compatible install... $ECHO_C" >&6
+if test -z "$INSTALL"; then
+if test "${ac_cv_path_install+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ # Account for people who put trailing slashes in PATH elements.
+case $as_dir/ in
+ ./ | .// | /cC/* | \
+ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \
+ /usr/ucb/* ) ;;
+ *)
+ # OSF1 and SCO ODT 3.0 have their own names for install.
+ # Don't use installbsd from OSF since it installs stuff as root
+ # by default.
+ for ac_prog in ginstall scoinst install; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then
+ if test $ac_prog = install &&
+ grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # AIX install. It has an incompatible calling convention.
+ :
+ elif test $ac_prog = install &&
+ grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # program-specific install script used by HP pwplus--don't use.
+ :
+ else
+ ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c"
+ break 3
+ fi
+ fi
+ done
+ done
+ ;;
+esac
+done
+
+
+fi
+ if test "${ac_cv_path_install+set}" = set; then
+ INSTALL=$ac_cv_path_install
+ else
+ # As a last resort, use the slow shell script. We don't cache a
+ # path for INSTALL within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the path is relative.
+ INSTALL=$ac_install_sh
+ fi
+fi
+echo "$as_me:$LINENO: result: $INSTALL" >&5
+echo "${ECHO_T}$INSTALL" >&6
+
+# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
+# It thinks the first close brace ends the variable substitution.
+test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
+
+test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
+
+test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
+
+
+# RPM/Embedix support: change the standard make and install targets
+if test "$db_cv_rpm" = "yes"; then
+ BUILD_TARGET="rpm_build"
+ echo "topdir: $CONFIGURATION_PATH" > rpmrc
+ if test "$db_cv_embedix" = "yes"; then
+ EMBEDIX_ROOT="/usr"
+ INSTALL_TARGET="embedix_install"
+ else
+ INSTALL_TARGET="rpm_install"
+ fi
+else
+ BUILD_TARGET="library_build"
+ INSTALL_TARGET="library_install"
+fi
+
+# This is where we handle stuff that autoconf can't handle: compiler,
+# preprocessor and load flags, libraries that the standard tests don't
+# look for. The default optimization is -O. We would like to set the
+# default optimization for systems using gcc to -O2, but we can't. By
+# the time we know we're using gcc, it's too late to set optimization
+# flags.
+#
+# There are additional libraries we need for some compiler/architecture
+# combinations.
+#
+# Some architectures require DB to be compiled with special flags and/or
+# libraries for threaded applications
+#
+# The makefile CC may be different than the CC used in config testing,
+# because the makefile CC may be set to use $(LIBTOOL).
+#
+# XXX
+# Don't override anything if it's already set from the environment.
+optimize_def="-O"
+MAKEFILE_MAYBE_WIN32=""
+SOFLAGS=""
+case "$host_os" in
+aix4.3.*|aix5*)
+ optimize_def="-O2"
+ CC=${CC-"xlc_r"}
+ CPPFLAGS="$CPPFLAGS -D_THREAD_SAFE"
+ LDFLAGS="$LDFLAGS -Wl,-brtl";;
+bsdi3*) optimize_def="-O2"
+ CC=${CC-"shlicc2"}
+ LIBS="$LIBS -lipc";;
+bsdi*) optimize_def="-O2";;
+freebsd*)
+ optimize_def="-O2"
+ CPPFLAGS="$CPPFLAGS -D_THREAD_SAFE"
+ LDFLAGS="$LDFLAGS -pthread";;
+hpux*) CPPFLAGS="$CPPFLAGS -D_REENTRANT";;
+irix*) optimize_def="-O2"
+ CPPFLAGS="$CPPFLAGS -D_SGI_MP_SOURCE";;
+linux*) optimize_def="-O2"
+ CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE -D_REENTRANT";;
+mingw*) optimize_def="-O2"
+ SOFLAGS="-no-undefined"
+ ADDITIONAL_OBJS="os_type.lo $ADDITIONAL_OBJS"
+ MAKEFILE_MAYBE_WIN32="_win32"
+ cat >>confdefs.h <<\_ACEOF
+#define DB_WIN32 1
+_ACEOF
+
+
+;;
+mpeix*) CPPFLAGS="$CPPFLAGS -D_POSIX_SOURCE -D_SOCKET_SOURCE"
+ LIBS="$LIBS -lsocket -lsvipc";;
+osf*) CPPFLAGS="$CPPFLAGS -D_REENTRANT"
+ LDFLAGS="$LDFLAGS -pthread";;
+*qnx) cat >>confdefs.h <<\_ACEOF
+#define HAVE_QNX 1
+_ACEOF
+
+
+;;
+solaris*)
+ CPPFLAGS="$CPPFLAGS -D_REENTRANT";;
+esac
+
+# Set CFLAGS/CXXFLAGS. We MUST set the flags before we call autoconf
+# compiler configuration macros, because if we don't, they set CFLAGS
+# to no optimization and -g, which isn't what we want.
+CFLAGS=${CFLAGS-$optimize_def}
+CXXFLAGS=${CXXFLAGS-"$CFLAGS"}
+
+# If the user wants a debugging environment, add -g to the CFLAGS value.
+#
+# XXX
+# Some compilers can't mix optimizing and debug flags. The only way to
+# handle this is to specify CFLAGS in the environment before configuring.
+if test "$db_cv_debug" = yes; then
+ cat >>confdefs.h <<\_ACEOF
+#define DEBUG 1
+_ACEOF
+
+
+
+
+ CFLAGS="$CFLAGS -g"
+ CXXFLAGS="$CXXFLAGS -g"
+fi
+
+# The default compiler is cc (NOT gcc), the default CFLAGS is as specified
+# above, NOT what is set by AC_PROG_CC, as it won't set optimization flags
+# for any compiler other than gcc.
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+if test -n "$ac_tool_prefix"; then
+ for ac_prog in cc gcc
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ echo "$as_me:$LINENO: result: $CC" >&5
+echo "${ECHO_T}$CC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ test -n "$CC" && break
+ done
+fi
+if test -z "$CC"; then
+ ac_ct_CC=$CC
+ for ac_prog in cc gcc
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CC="$ac_prog"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ echo "$as_me:$LINENO: result: $ac_ct_CC" >&5
+echo "${ECHO_T}$ac_ct_CC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ test -n "$ac_ct_CC" && break
+done
+
+ CC=$ac_ct_CC
+fi
+
+
+test -z "$CC" && { { echo "$as_me:$LINENO: error: no acceptable C compiler found in \$PATH
+See \`config.log' for more details." >&5
+echo "$as_me: error: no acceptable C compiler found in \$PATH
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+
+# Provide some information about the compiler.
+echo "$as_me:$LINENO:" \
+ "checking for C compiler version" >&5
+ac_compiler=`set X $ac_compile; echo $2`
+{ (eval echo "$as_me:$LINENO: \"$ac_compiler --version </dev/null >&5\"") >&5
+ (eval $ac_compiler --version </dev/null >&5) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+{ (eval echo "$as_me:$LINENO: \"$ac_compiler -v </dev/null >&5\"") >&5
+ (eval $ac_compiler -v </dev/null >&5) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+{ (eval echo "$as_me:$LINENO: \"$ac_compiler -V </dev/null >&5\"") >&5
+ (eval $ac_compiler -V </dev/null >&5) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files a.out a.exe b.out"
+# Try to create an executable without -o first, disregard a.out.
+# It will help us diagnose broken compilers, and finding out an intuition
+# of exeext.
+echo "$as_me:$LINENO: checking for C compiler default output" >&5
+echo $ECHO_N "checking for C compiler default output... $ECHO_C" >&6
+ac_link_default=`echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
+if { (eval echo "$as_me:$LINENO: \"$ac_link_default\"") >&5
+ (eval $ac_link_default) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ # Find the output, starting from the most likely. This scheme is
+# not robust to junk in `.', hence go to wildcards (a.*) only as a last
+# resort.
+
+# Be careful to initialize this variable, since it used to be cached.
+# Otherwise an old cache value of `no' led to `EXEEXT = no' in a Makefile.
+ac_cv_exeext=
+# b.out is created by i960 compilers.
+for ac_file in a_out.exe a.exe conftest.exe a.out conftest a.* conftest.* b.out
+do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.o | *.obj )
+ ;;
+ conftest.$ac_ext )
+ # This is the source file.
+ ;;
+ [ab].out )
+ # We found the default executable, but exeext='' is most
+ # certainly right.
+ break;;
+ *.* )
+ ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ # FIXME: I believe we export ac_cv_exeext for Libtool,
+ # but it would be cool to find out if it's true. Does anybody
+ # maintain Libtool? --akim.
+ export ac_cv_exeext
+ break;;
+ * )
+ break;;
+ esac
+done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { echo "$as_me:$LINENO: error: C compiler cannot create executables
+See \`config.log' for more details." >&5
+echo "$as_me: error: C compiler cannot create executables
+See \`config.log' for more details." >&2;}
+ { (exit 77); exit 77; }; }
+fi
+
+ac_exeext=$ac_cv_exeext
+echo "$as_me:$LINENO: result: $ac_file" >&5
+echo "${ECHO_T}$ac_file" >&6
+
+# Check the compiler produces executables we can run. If not, either
+# the compiler is broken, or we cross compile.
+echo "$as_me:$LINENO: checking whether the C compiler works" >&5
+echo $ECHO_N "checking whether the C compiler works... $ECHO_C" >&6
+# FIXME: These cross compiler hacks should be removed for Autoconf 3.0
+# If not cross compiling, check that we can run a simple program.
+if test "$cross_compiling" != yes; then
+ if { ac_try='./$ac_file'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ cross_compiling=no
+ else
+ if test "$cross_compiling" = maybe; then
+ cross_compiling=yes
+ else
+ { { echo "$as_me:$LINENO: error: cannot run C compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot run C compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ fi
+fi
+echo "$as_me:$LINENO: result: yes" >&5
+echo "${ECHO_T}yes" >&6
+
+rm -f a.out a.exe conftest$ac_cv_exeext b.out
+ac_clean_files=$ac_clean_files_save
+# Check the compiler produces executables we can run. If not, either
+# the compiler is broken, or we cross compile.
+echo "$as_me:$LINENO: checking whether we are cross compiling" >&5
+echo $ECHO_N "checking whether we are cross compiling... $ECHO_C" >&6
+echo "$as_me:$LINENO: result: $cross_compiling" >&5
+echo "${ECHO_T}$cross_compiling" >&6
+
+echo "$as_me:$LINENO: checking for suffix of executables" >&5
+echo $ECHO_N "checking for suffix of executables... $ECHO_C" >&6
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ # If both `conftest.exe' and `conftest' are `present' (well, observable)
+# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will
+# work properly (i.e., refer to `conftest.exe'), while it won't with
+# `rm'.
+for ac_file in conftest.exe conftest conftest.*; do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.o | *.obj ) ;;
+ *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ export ac_cv_exeext
+ break;;
+ * ) break;;
+ esac
+done
+else
+ { { echo "$as_me:$LINENO: error: cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+rm -f conftest$ac_cv_exeext
+echo "$as_me:$LINENO: result: $ac_cv_exeext" >&5
+echo "${ECHO_T}$ac_cv_exeext" >&6
+
+rm -f conftest.$ac_ext
+EXEEXT=$ac_cv_exeext
+ac_exeext=$EXEEXT
+echo "$as_me:$LINENO: checking for suffix of object files" >&5
+echo $ECHO_N "checking for suffix of object files... $ECHO_C" >&6
+if test "${ac_cv_objext+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.o conftest.obj
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ for ac_file in `(ls conftest.o conftest.obj; ls conftest.*) 2>/dev/null`; do
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg ) ;;
+ *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
+ break;;
+ esac
+done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { echo "$as_me:$LINENO: error: cannot compute suffix of object files: cannot compile
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute suffix of object files: cannot compile
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+rm -f conftest.$ac_cv_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_objext" >&5
+echo "${ECHO_T}$ac_cv_objext" >&6
+OBJEXT=$ac_cv_objext
+ac_objext=$OBJEXT
+echo "$as_me:$LINENO: checking whether we are using the GNU C compiler" >&5
+echo $ECHO_N "checking whether we are using the GNU C compiler... $ECHO_C" >&6
+if test "${ac_cv_c_compiler_gnu+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+#ifndef __GNUC__
+ choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_compiler_gnu=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_compiler_gnu=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ac_cv_c_compiler_gnu=$ac_compiler_gnu
+
+fi
+echo "$as_me:$LINENO: result: $ac_cv_c_compiler_gnu" >&5
+echo "${ECHO_T}$ac_cv_c_compiler_gnu" >&6
+GCC=`test $ac_compiler_gnu = yes && echo yes`
+ac_test_CFLAGS=${CFLAGS+set}
+ac_save_CFLAGS=$CFLAGS
+CFLAGS="-g"
+echo "$as_me:$LINENO: checking whether $CC accepts -g" >&5
+echo $ECHO_N "checking whether $CC accepts -g... $ECHO_C" >&6
+if test "${ac_cv_prog_cc_g+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_prog_cc_g=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_prog_cc_g=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5
+echo "${ECHO_T}$ac_cv_prog_cc_g" >&6
+if test "$ac_test_CFLAGS" = set; then
+ CFLAGS=$ac_save_CFLAGS
+elif test $ac_cv_prog_cc_g = yes; then
+ if test "$GCC" = yes; then
+ CFLAGS="-g -O2"
+ else
+ CFLAGS="-g"
+ fi
+else
+ if test "$GCC" = yes; then
+ CFLAGS="-O2"
+ else
+ CFLAGS=
+ fi
+fi
+echo "$as_me:$LINENO: checking for $CC option to accept ANSI C" >&5
+echo $ECHO_N "checking for $CC option to accept ANSI C... $ECHO_C" >&6
+if test "${ac_cv_prog_cc_stdc+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_cv_prog_cc_stdc=no
+ac_save_CC=$CC
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdarg.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */
+struct buf { int x; };
+FILE * (*rcsopen) (struct buf *, struct stat *, int);
+static char *e (p, i)
+ char **p;
+ int i;
+{
+ return p[i];
+}
+static char *f (char * (*g) (char **, int), char **p, ...)
+{
+ char *s;
+ va_list v;
+ va_start (v,p);
+ s = g (p, va_arg (v,int));
+ va_end (v);
+ return s;
+}
+int test (int i, double x);
+struct s1 {int (*f) (int a);};
+struct s2 {int (*f) (double a);};
+int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
+int argc;
+char **argv;
+int
+main ()
+{
+return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1];
+ ;
+ return 0;
+}
+_ACEOF
+# Don't try gcc -ansi; that turns off useful extensions and
+# breaks some systems' header files.
+# AIX -qlanglvl=ansi
+# Ultrix and OSF/1 -std1
+# HP-UX 10.20 and later -Ae
+# HP-UX older versions -Aa -D_HPUX_SOURCE
+# SVR4 -Xc -D__EXTENSIONS__
+for ac_arg in "" -qlanglvl=ansi -std1 -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
+do
+ CC="$ac_save_CC $ac_arg"
+ rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_prog_cc_stdc=$ac_arg
+break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext
+done
+rm -f conftest.$ac_ext conftest.$ac_objext
+CC=$ac_save_CC
+
+fi
+
+case "x$ac_cv_prog_cc_stdc" in
+ x|xno)
+ echo "$as_me:$LINENO: result: none needed" >&5
+echo "${ECHO_T}none needed" >&6 ;;
+ *)
+ echo "$as_me:$LINENO: result: $ac_cv_prog_cc_stdc" >&5
+echo "${ECHO_T}$ac_cv_prog_cc_stdc" >&6
+ CC="$CC $ac_cv_prog_cc_stdc" ;;
+esac
+
+# Some people use a C++ compiler to compile C. Since we use `exit',
+# in C++ we need to declare it. In case someone uses the same compiler
+# for both compiling C and C++ we need to have the C++ compiler decide
+# the declaration of exit, since it's the most demanding environment.
+cat >conftest.$ac_ext <<_ACEOF
+#ifndef __cplusplus
+ choke me
+#endif
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ for ac_declaration in \
+ ''\
+ '#include <stdlib.h>' \
+ 'extern "C" void std::exit (int) throw (); using std::exit;' \
+ 'extern "C" void std::exit (int); using std::exit;' \
+ 'extern "C" void exit (int) throw ();' \
+ 'extern "C" void exit (int);' \
+ 'void exit (int);'
+do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdlib.h>
+$ac_declaration
+int
+main ()
+{
+exit (42);
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ :
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+continue
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_declaration
+int
+main ()
+{
+exit (42);
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+done
+rm -f conftest*
+if test -n "$ac_declaration"; then
+ echo '#ifdef __cplusplus' >>confdefs.h
+ echo $ac_declaration >>confdefs.h
+ echo '#endif' >>confdefs.h
+fi
+
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+# Because of shared library building, the ${CC} used for config tests
+# may be different than the ${CC} we want to put in the Makefile.
+# The latter is known as ${MAKEFILE_CC} in this script.
+MAKEFILE_CC="${CC}"
+MAKEFILE_CCLINK="${CC}"
+MAKEFILE_CXX="nocxx"
+MAKEFILE_CXXLINK="nocxx"
+
+# See if we need the C++ compiler at all. If so, we'd like to find one that
+# interoperates with the C compiler we chose. Since we prefered cc over gcc,
+# we'll also prefer the vendor's compiler over g++/gcc. If we're wrong, the
+# user can set CC and CXX in their environment before running configure.
+#
+# AC_PROG_CXX sets CXX, but it uses $CXX and $CCC (in that order) as its
+# first choices.
+if test "$db_cv_cxx" = "yes"; then
+ if test "$GCC" != "yes"; then
+ case "$host_os" in
+ aix*) if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}xlC_r", so it can be a program name with args.
+set dummy ${ac_tool_prefix}xlC_r; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_CCC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CCC"; then
+ ac_cv_prog_CCC="$CCC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CCC="${ac_tool_prefix}xlC_r"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+CCC=$ac_cv_prog_CCC
+if test -n "$CCC"; then
+ echo "$as_me:$LINENO: result: $CCC" >&5
+echo "${ECHO_T}$CCC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$ac_cv_prog_CCC"; then
+ ac_ct_CCC=$CCC
+ # Extract the first word of "xlC_r", so it can be a program name with args.
+set dummy xlC_r; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_CCC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_CCC"; then
+ ac_cv_prog_ac_ct_CCC="$ac_ct_CCC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CCC="xlC_r"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+ac_ct_CCC=$ac_cv_prog_ac_ct_CCC
+if test -n "$ac_ct_CCC"; then
+ echo "$as_me:$LINENO: result: $ac_ct_CCC" >&5
+echo "${ECHO_T}$ac_ct_CCC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ CCC=$ac_ct_CCC
+else
+ CCC="$ac_cv_prog_CCC"
+fi
+
+ LIBXSO_LIBS="-lC_r $LIBXSO_LIBS"
+ LIBS="-lC_r $LIBS";;
+ hpux*) if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}aCC", so it can be a program name with args.
+set dummy ${ac_tool_prefix}aCC; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_CCC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CCC"; then
+ ac_cv_prog_CCC="$CCC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CCC="${ac_tool_prefix}aCC"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+CCC=$ac_cv_prog_CCC
+if test -n "$CCC"; then
+ echo "$as_me:$LINENO: result: $CCC" >&5
+echo "${ECHO_T}$CCC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$ac_cv_prog_CCC"; then
+ ac_ct_CCC=$CCC
+ # Extract the first word of "aCC", so it can be a program name with args.
+set dummy aCC; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_CCC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_CCC"; then
+ ac_cv_prog_ac_ct_CCC="$ac_ct_CCC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CCC="aCC"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+ac_ct_CCC=$ac_cv_prog_ac_ct_CCC
+if test -n "$ac_ct_CCC"; then
+ echo "$as_me:$LINENO: result: $ac_ct_CCC" >&5
+echo "${ECHO_T}$ac_ct_CCC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ CCC=$ac_ct_CCC
+else
+ CCC="$ac_cv_prog_CCC"
+fi
+;;
+ irix*) if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}CC", so it can be a program name with args.
+set dummy ${ac_tool_prefix}CC; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_CCC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CCC"; then
+ ac_cv_prog_CCC="$CCC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CCC="${ac_tool_prefix}CC"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+CCC=$ac_cv_prog_CCC
+if test -n "$CCC"; then
+ echo "$as_me:$LINENO: result: $CCC" >&5
+echo "${ECHO_T}$CCC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$ac_cv_prog_CCC"; then
+ ac_ct_CCC=$CCC
+ # Extract the first word of "CC", so it can be a program name with args.
+set dummy CC; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_CCC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_CCC"; then
+ ac_cv_prog_ac_ct_CCC="$ac_ct_CCC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CCC="CC"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+ac_ct_CCC=$ac_cv_prog_ac_ct_CCC
+if test -n "$ac_ct_CCC"; then
+ echo "$as_me:$LINENO: result: $ac_ct_CCC" >&5
+echo "${ECHO_T}$ac_ct_CCC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ CCC=$ac_ct_CCC
+else
+ CCC="$ac_cv_prog_CCC"
+fi
+;;
+ osf*) if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}cxx", so it can be a program name with args.
+set dummy ${ac_tool_prefix}cxx; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_CCC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CCC"; then
+ ac_cv_prog_CCC="$CCC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CCC="${ac_tool_prefix}cxx"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+CCC=$ac_cv_prog_CCC
+if test -n "$CCC"; then
+ echo "$as_me:$LINENO: result: $CCC" >&5
+echo "${ECHO_T}$CCC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$ac_cv_prog_CCC"; then
+ ac_ct_CCC=$CCC
+ # Extract the first word of "cxx", so it can be a program name with args.
+set dummy cxx; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_CCC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_CCC"; then
+ ac_cv_prog_ac_ct_CCC="$ac_ct_CCC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CCC="cxx"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+ac_ct_CCC=$ac_cv_prog_ac_ct_CCC
+if test -n "$ac_ct_CCC"; then
+ echo "$as_me:$LINENO: result: $ac_ct_CCC" >&5
+echo "${ECHO_T}$ac_ct_CCC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ CCC=$ac_ct_CCC
+else
+ CCC="$ac_cv_prog_CCC"
+fi
+;;
+ solaris*) if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}CC", so it can be a program name with args.
+set dummy ${ac_tool_prefix}CC; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_CCC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CCC"; then
+ ac_cv_prog_CCC="$CCC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CCC="${ac_tool_prefix}CC"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+CCC=$ac_cv_prog_CCC
+if test -n "$CCC"; then
+ echo "$as_me:$LINENO: result: $CCC" >&5
+echo "${ECHO_T}$CCC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$ac_cv_prog_CCC"; then
+ ac_ct_CCC=$CCC
+ # Extract the first word of "CC", so it can be a program name with args.
+set dummy CC; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_CCC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_CCC"; then
+ ac_cv_prog_ac_ct_CCC="$ac_ct_CCC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CCC="CC"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+ac_ct_CCC=$ac_cv_prog_ac_ct_CCC
+if test -n "$ac_ct_CCC"; then
+ echo "$as_me:$LINENO: result: $ac_ct_CCC" >&5
+echo "${ECHO_T}$ac_ct_CCC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ CCC=$ac_ct_CCC
+else
+ CCC="$ac_cv_prog_CCC"
+fi
+;;
+ esac
+ fi
+ ac_ext=cc
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+if test -n "$ac_tool_prefix"; then
+ for ac_prog in $CCC g++ c++ gpp aCC CC cxx cc++ cl FCC KCC RCC xlC_r xlC
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_CXX+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CXX"; then
+ ac_cv_prog_CXX="$CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CXX="$ac_tool_prefix$ac_prog"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+CXX=$ac_cv_prog_CXX
+if test -n "$CXX"; then
+ echo "$as_me:$LINENO: result: $CXX" >&5
+echo "${ECHO_T}$CXX" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ test -n "$CXX" && break
+ done
+fi
+if test -z "$CXX"; then
+ ac_ct_CXX=$CXX
+ for ac_prog in $CCC g++ c++ gpp aCC CC cxx cc++ cl FCC KCC RCC xlC_r xlC
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_CXX+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_CXX"; then
+ ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CXX="$ac_prog"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+ac_ct_CXX=$ac_cv_prog_ac_ct_CXX
+if test -n "$ac_ct_CXX"; then
+ echo "$as_me:$LINENO: result: $ac_ct_CXX" >&5
+echo "${ECHO_T}$ac_ct_CXX" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ test -n "$ac_ct_CXX" && break
+done
+test -n "$ac_ct_CXX" || ac_ct_CXX="g++"
+
+ CXX=$ac_ct_CXX
+fi
+
+
+# Provide some information about the compiler.
+echo "$as_me:$LINENO:" \
+ "checking for C++ compiler version" >&5
+ac_compiler=`set X $ac_compile; echo $2`
+{ (eval echo "$as_me:$LINENO: \"$ac_compiler --version </dev/null >&5\"") >&5
+ (eval $ac_compiler --version </dev/null >&5) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+{ (eval echo "$as_me:$LINENO: \"$ac_compiler -v </dev/null >&5\"") >&5
+ (eval $ac_compiler -v </dev/null >&5) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+{ (eval echo "$as_me:$LINENO: \"$ac_compiler -V </dev/null >&5\"") >&5
+ (eval $ac_compiler -V </dev/null >&5) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+
+echo "$as_me:$LINENO: checking whether we are using the GNU C++ compiler" >&5
+echo $ECHO_N "checking whether we are using the GNU C++ compiler... $ECHO_C" >&6
+if test "${ac_cv_cxx_compiler_gnu+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+#ifndef __GNUC__
+ choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_compiler_gnu=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_compiler_gnu=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ac_cv_cxx_compiler_gnu=$ac_compiler_gnu
+
+fi
+echo "$as_me:$LINENO: result: $ac_cv_cxx_compiler_gnu" >&5
+echo "${ECHO_T}$ac_cv_cxx_compiler_gnu" >&6
+GXX=`test $ac_compiler_gnu = yes && echo yes`
+ac_test_CXXFLAGS=${CXXFLAGS+set}
+ac_save_CXXFLAGS=$CXXFLAGS
+CXXFLAGS="-g"
+echo "$as_me:$LINENO: checking whether $CXX accepts -g" >&5
+echo $ECHO_N "checking whether $CXX accepts -g... $ECHO_C" >&6
+if test "${ac_cv_prog_cxx_g+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_prog_cxx_g=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_prog_cxx_g=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_prog_cxx_g" >&5
+echo "${ECHO_T}$ac_cv_prog_cxx_g" >&6
+if test "$ac_test_CXXFLAGS" = set; then
+ CXXFLAGS=$ac_save_CXXFLAGS
+elif test $ac_cv_prog_cxx_g = yes; then
+ if test "$GXX" = yes; then
+ CXXFLAGS="-g -O2"
+ else
+ CXXFLAGS="-g"
+ fi
+else
+ if test "$GXX" = yes; then
+ CXXFLAGS="-O2"
+ else
+ CXXFLAGS=
+ fi
+fi
+for ac_declaration in \
+ ''\
+ '#include <stdlib.h>' \
+ 'extern "C" void std::exit (int) throw (); using std::exit;' \
+ 'extern "C" void std::exit (int); using std::exit;' \
+ 'extern "C" void exit (int) throw ();' \
+ 'extern "C" void exit (int);' \
+ 'void exit (int);'
+do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdlib.h>
+$ac_declaration
+int
+main ()
+{
+exit (42);
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ :
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+continue
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_declaration
+int
+main ()
+{
+exit (42);
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+done
+rm -f conftest*
+if test -n "$ac_declaration"; then
+ echo '#ifdef __cplusplus' >>confdefs.h
+ echo $ac_declaration >>confdefs.h
+ echo '#endif' >>confdefs.h
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+
+
+echo "$as_me:$LINENO: checking whether C++ supports the ISO C++ standard includes" >&5
+echo $ECHO_N "checking whether C++ supports the ISO C++ standard includes... $ECHO_C" >&6
+if test "${db_cv_cxx_have_stdheaders+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+
+
+ ac_ext=cc
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <iostream>
+
+int
+main ()
+{
+std::ostream *o; return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_cxx_have_stdheaders=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+db_cv_cxx_have_stdheaders=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+fi
+echo "$as_me:$LINENO: result: $db_cv_cxx_have_stdheaders" >&5
+echo "${ECHO_T}$db_cv_cxx_have_stdheaders" >&6
+if test "$db_cv_cxx_have_stdheaders" = yes; then
+ cxx_have_stdheaders="#define HAVE_CXX_STDHEADERS 1"
+fi
+ MAKEFILE_CXX="${CXX}"
+ MAKEFILE_CXXLINK="${CXX}"
+fi
+
+# Do some gcc specific configuration.
+
+echo "$as_me:$LINENO: checking whether we are using gcc version 2.96" >&5
+echo $ECHO_N "checking whether we are using gcc version 2.96... $ECHO_C" >&6
+if test "${db_cv_gcc_2_96+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+
+db_cv_gcc_2_96=no
+if test "$GCC" = "yes"; then
+ GCC_VERSION=`${MAKEFILE_CC} --version`
+ case ${GCC_VERSION} in
+ 2.96*)
+ db_cv_gcc_2_96=yes;;
+ esac
+fi
+fi
+echo "$as_me:$LINENO: result: $db_cv_gcc_2_96" >&5
+echo "${ECHO_T}$db_cv_gcc_2_96" >&6
+if test "$db_cv_gcc_2_96" = "yes"; then
+ CFLAGS=`echo "$CFLAGS" | sed 's/-O2/-O/'`
+ CXXFLAGS=`echo "$CXXFLAGS" | sed 's/-O2/-O/'`
+ { echo "$as_me:$LINENO: WARNING: INSTALLED GCC COMPILER HAS SERIOUS BUGS; PLEASE UPGRADE." >&5
+echo "$as_me: WARNING: INSTALLED GCC COMPILER HAS SERIOUS BUGS; PLEASE UPGRADE." >&2;}
+ { echo "$as_me:$LINENO: WARNING: GCC OPTIMIZATION LEVEL SET TO -O." >&5
+echo "$as_me: WARNING: GCC OPTIMIZATION LEVEL SET TO -O." >&2;}
+fi
+
+echo "$as_me:$LINENO: checking whether g++ requires -fhandle-exceptions" >&5
+echo $ECHO_N "checking whether g++ requires -fhandle-exceptions... $ECHO_C" >&6
+if test "${db_cv_gxx_except+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+
+db_cv_gxx_except=no;
+if test "$GXX" = "yes"; then
+ GXX_VERSION=`${MAKEFILE_CXX} --version`
+ case ${GXX_VERSION} in
+ 1.*|2.[01234567].*|*-1.*|*-2.[01234567].*)
+ db_cv_gxx_except=yes;;
+ esac
+fi
+fi
+echo "$as_me:$LINENO: result: $db_cv_gxx_except" >&5
+echo "${ECHO_T}$db_cv_gxx_except" >&6
+if test "$db_cv_gxx_except" = "yes"; then
+ CXXFLAGS="$CXXFLAGS -fhandle-exceptions"
+fi
+
+# We need the -Kthread/-pthread flag when compiling on SCO/Caldera's UnixWare
+# and OpenUNIX releases. We can't make the test until we know which compiler
+# we're using.
+case "$host_os" in
+sysv5UnixWare*|sysv5OpenUNIX8*)
+ if test "$GCC" == "yes"; then
+ CPPFLAGS="$CPPFLAGS -pthread"
+ LDFLAGS="$LDFLAGS -pthread"
+ else
+ CPPFLAGS="$CPPFLAGS -Kthread"
+ LDFLAGS="$LDFLAGS -Kthread"
+ fi;;
+esac
+
+# Export our compiler preferences for the libtool configuration.
+export CC CCC
+CCC=CXX
+
+# Libtool configuration.
+# Check whether --enable-shared or --disable-shared was given.
+if test "${enable_shared+set}" = set; then
+ enableval="$enable_shared"
+ p=${PACKAGE-default}
+case $enableval in
+yes) enable_shared=yes ;;
+no) enable_shared=no ;;
+*)
+ enable_shared=no
+ # Look at the argument we got. We use all the common list separators.
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}:,"
+ for pkg in $enableval; do
+ if test "X$pkg" = "X$p"; then
+ enable_shared=yes
+ fi
+ done
+ IFS="$ac_save_ifs"
+ ;;
+esac
+else
+ enable_shared=yes
+fi;
+# Check whether --enable-static or --disable-static was given.
+if test "${enable_static+set}" = set; then
+ enableval="$enable_static"
+ p=${PACKAGE-default}
+case $enableval in
+yes) enable_static=yes ;;
+no) enable_static=no ;;
+*)
+ enable_static=no
+ # Look at the argument we got. We use all the common list separators.
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}:,"
+ for pkg in $enableval; do
+ if test "X$pkg" = "X$p"; then
+ enable_static=yes
+ fi
+ done
+ IFS="$ac_save_ifs"
+ ;;
+esac
+else
+ enable_static=yes
+fi;
+# Check whether --enable-fast-install or --disable-fast-install was given.
+if test "${enable_fast_install+set}" = set; then
+ enableval="$enable_fast_install"
+ p=${PACKAGE-default}
+case $enableval in
+yes) enable_fast_install=yes ;;
+no) enable_fast_install=no ;;
+*)
+ enable_fast_install=no
+ # Look at the argument we got. We use all the common list separators.
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}:,"
+ for pkg in $enableval; do
+ if test "X$pkg" = "X$p"; then
+ enable_fast_install=yes
+ fi
+ done
+ IFS="$ac_save_ifs"
+ ;;
+esac
+else
+ enable_fast_install=yes
+fi;
+# Find the correct PATH separator. Usually this is `:', but
+# DJGPP uses `;' like DOS.
+if test "X${PATH_SEPARATOR+set}" != Xset; then
+ UNAME=${UNAME-`uname 2>/dev/null`}
+ case X$UNAME in
+ *-DOS) lt_cv_sys_path_separator=';' ;;
+ *) lt_cv_sys_path_separator=':' ;;
+ esac
+ PATH_SEPARATOR=$lt_cv_sys_path_separator
+fi
+
+
+# Check whether --with-gnu-ld or --without-gnu-ld was given.
+if test "${with_gnu_ld+set}" = set; then
+ withval="$with_gnu_ld"
+ test "$withval" = no || with_gnu_ld=yes
+else
+ with_gnu_ld=no
+fi;
+ac_prog=ld
+if test "$GCC" = yes; then
+ # Check if gcc -print-prog-name=ld gives a path.
+ echo "$as_me:$LINENO: checking for ld used by GCC" >&5
+echo $ECHO_N "checking for ld used by GCC... $ECHO_C" >&6
+ case $host in
+ *-*-mingw*)
+ # gcc leaves a trailing carriage return which upsets mingw
+ ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
+ *)
+ ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
+ esac
+ case $ac_prog in
+ # Accept absolute paths.
+ [\\/]* | [A-Za-z]:[\\/]*)
+ re_direlt='/[^/][^/]*/\.\./'
+ # Canonicalize the path of ld
+ ac_prog=`echo $ac_prog| sed 's%\\\\%/%g'`
+ while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do
+ ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"`
+ done
+ test -z "$LD" && LD="$ac_prog"
+ ;;
+ "")
+ # If it fails, then pretend we aren't using GCC.
+ ac_prog=ld
+ ;;
+ *)
+ # If it is relative, then search for the first ld in PATH.
+ with_gnu_ld=unknown
+ ;;
+ esac
+elif test "$with_gnu_ld" = yes; then
+ echo "$as_me:$LINENO: checking for GNU ld" >&5
+echo $ECHO_N "checking for GNU ld... $ECHO_C" >&6
+else
+ echo "$as_me:$LINENO: checking for non-GNU ld" >&5
+echo $ECHO_N "checking for non-GNU ld... $ECHO_C" >&6
+fi
+if test "${lt_cv_path_LD+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -z "$LD"; then
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for ac_dir in $PATH; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+ lt_cv_path_LD="$ac_dir/$ac_prog"
+ # Check to see if the program is GNU ld. I'd rather use --version,
+ # but apparently some GNU ld's only accept -v.
+ # Break only if it was the GNU/non-GNU ld that we prefer.
+ if "$lt_cv_path_LD" -v 2>&1 < /dev/null | egrep '(GNU|with BFD)' > /dev/null; then
+ test "$with_gnu_ld" != no && break
+ else
+ test "$with_gnu_ld" != yes && break
+ fi
+ fi
+ done
+ IFS="$ac_save_ifs"
+else
+ lt_cv_path_LD="$LD" # Let the user override the test with a path.
+fi
+fi
+
+LD="$lt_cv_path_LD"
+if test -n "$LD"; then
+ echo "$as_me:$LINENO: result: $LD" >&5
+echo "${ECHO_T}$LD" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+test -z "$LD" && { { echo "$as_me:$LINENO: error: no acceptable ld found in \$PATH" >&5
+echo "$as_me: error: no acceptable ld found in \$PATH" >&2;}
+ { (exit 1); exit 1; }; }
+echo "$as_me:$LINENO: checking if the linker ($LD) is GNU ld" >&5
+echo $ECHO_N "checking if the linker ($LD) is GNU ld... $ECHO_C" >&6
+if test "${lt_cv_prog_gnu_ld+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ # I'd rather use --version here, but apparently some GNU ld's only accept -v.
+if $LD -v 2>&1 </dev/null | egrep '(GNU|with BFD)' 1>&5; then
+ lt_cv_prog_gnu_ld=yes
+else
+ lt_cv_prog_gnu_ld=no
+fi
+fi
+echo "$as_me:$LINENO: result: $lt_cv_prog_gnu_ld" >&5
+echo "${ECHO_T}$lt_cv_prog_gnu_ld" >&6
+with_gnu_ld=$lt_cv_prog_gnu_ld
+
+
+echo "$as_me:$LINENO: checking for $LD option to reload object files" >&5
+echo $ECHO_N "checking for $LD option to reload object files... $ECHO_C" >&6
+if test "${lt_cv_ld_reload_flag+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ lt_cv_ld_reload_flag='-r'
+fi
+echo "$as_me:$LINENO: result: $lt_cv_ld_reload_flag" >&5
+echo "${ECHO_T}$lt_cv_ld_reload_flag" >&6
+reload_flag=$lt_cv_ld_reload_flag
+test -n "$reload_flag" && reload_flag=" $reload_flag"
+
+echo "$as_me:$LINENO: checking for BSD-compatible nm" >&5
+echo $ECHO_N "checking for BSD-compatible nm... $ECHO_C" >&6
+if test "${lt_cv_path_NM+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$NM"; then
+ # Let the user override the test.
+ lt_cv_path_NM="$NM"
+else
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for ac_dir in $PATH /usr/ccs/bin /usr/ucb /bin; do
+ test -z "$ac_dir" && ac_dir=.
+ tmp_nm=$ac_dir/${ac_tool_prefix}nm
+ if test -f $tmp_nm || test -f $tmp_nm$ac_exeext ; then
+ # Check to see if the nm accepts a BSD-compat flag.
+ # Adding the `sed 1q' prevents false positives on HP-UX, which says:
+ # nm: unknown option "B" ignored
+ # Tru64's nm complains that /dev/null is an invalid object file
+ if ($tmp_nm -B /dev/null 2>&1 | sed '1q'; exit 0) | egrep '(/dev/null|Invalid file or object type)' >/dev/null; then
+ lt_cv_path_NM="$tmp_nm -B"
+ break
+ elif ($tmp_nm -p /dev/null 2>&1 | sed '1q'; exit 0) | egrep /dev/null >/dev/null; then
+ lt_cv_path_NM="$tmp_nm -p"
+ break
+ else
+ lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but
+ continue # so that we can try to find one that supports BSD flags
+ fi
+ fi
+ done
+ IFS="$ac_save_ifs"
+ test -z "$lt_cv_path_NM" && lt_cv_path_NM=nm
+fi
+fi
+
+NM="$lt_cv_path_NM"
+echo "$as_me:$LINENO: result: $NM" >&5
+echo "${ECHO_T}$NM" >&6
+
+echo "$as_me:$LINENO: checking whether ln -s works" >&5
+echo $ECHO_N "checking whether ln -s works... $ECHO_C" >&6
+LN_S=$as_ln_s
+if test "$LN_S" = "ln -s"; then
+ echo "$as_me:$LINENO: result: yes" >&5
+echo "${ECHO_T}yes" >&6
+else
+ echo "$as_me:$LINENO: result: no, using $LN_S" >&5
+echo "${ECHO_T}no, using $LN_S" >&6
+fi
+
+echo "$as_me:$LINENO: checking how to recognise dependant libraries" >&5
+echo $ECHO_N "checking how to recognise dependant libraries... $ECHO_C" >&6
+if test "${lt_cv_deplibs_check_method+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ lt_cv_file_magic_cmd='$MAGIC_CMD'
+lt_cv_file_magic_test_file=
+lt_cv_deplibs_check_method='unknown'
+# Need to set the preceding variable on all platforms that support
+# interlibrary dependencies.
+# 'none' -- dependencies not supported.
+# `unknown' -- same as none, but documents that we really don't know.
+# 'pass_all' -- all dependencies passed with no checks.
+# 'test_compile' -- check by making test program.
+# 'file_magic [[regex]]' -- check by looking for files in library path
+# which responds to the $file_magic_cmd with a given egrep regex.
+# If you have `file' or equivalent on your system and you're not sure
+# whether `pass_all' will *always* work, you probably want this one.
+
+case $host_os in
+aix4* | aix5*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+beos*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+bsdi4*)
+ lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)'
+ lt_cv_file_magic_cmd='/usr/bin/file -L'
+ lt_cv_file_magic_test_file=/shlib/libc.so
+ ;;
+
+cygwin* | mingw* | pw32*)
+ lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?'
+ lt_cv_file_magic_cmd='$OBJDUMP -f'
+ ;;
+
+darwin* | rhapsody*)
+ lt_cv_deplibs_check_method='file_magic Mach-O dynamically linked shared library'
+ lt_cv_file_magic_cmd='/usr/bin/file -L'
+ case "$host_os" in
+ rhapsody* | darwin1.[012])
+ lt_cv_file_magic_test_file=`echo /System/Library/Frameworks/System.framework/Versions/*/System | head -1`
+ ;;
+ *) # Darwin 1.3 on
+ lt_cv_file_magic_test_file='/usr/lib/libSystem.dylib'
+ ;;
+ esac
+ ;;
+
+freebsd*)
+ if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then
+ case $host_cpu in
+ i*86 )
+ # Not sure whether the presence of OpenBSD here was a mistake.
+ # Let's accept both of them until this is cleared up.
+ lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD)/i[3-9]86 (compact )?demand paged shared library'
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*`
+ ;;
+ esac
+ else
+ lt_cv_deplibs_check_method=pass_all
+ fi
+ ;;
+
+gnu*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+hpux10.20*|hpux11*)
+ lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9].[0-9]) shared library'
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=/usr/lib/libc.sl
+ ;;
+
+irix5* | irix6*)
+ case $host_os in
+ irix5*)
+ # this will be overridden with pass_all, but let us keep it just in case
+ lt_cv_deplibs_check_method="file_magic ELF 32-bit MSB dynamic lib MIPS - version 1"
+ ;;
+ *)
+ case $LD in
+ *-32|*"-32 ") libmagic=32-bit;;
+ *-n32|*"-n32 ") libmagic=N32;;
+ *-64|*"-64 ") libmagic=64-bit;;
+ *) libmagic=never-match;;
+ esac
+ # this will be overridden with pass_all, but let us keep it just in case
+ lt_cv_deplibs_check_method="file_magic ELF ${libmagic} MSB mips-[1234] dynamic lib MIPS - version 1"
+ ;;
+ esac
+ lt_cv_file_magic_test_file=`echo /lib${libsuff}/libc.so*`
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+# This must be Linux ELF.
+linux-gnu*)
+ case $host_cpu in
+ alpha* | hppa* | i*86 | powerpc* | sparc* | ia64* | *)
+ lt_cv_deplibs_check_method=pass_all ;;
+ *)
+ # glibc up to 2.1.1 does not perform some relocations on ARM
+ lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' ;;
+ esac
+ lt_cv_file_magic_test_file=`echo /lib/libc.so* /lib/libc-*.so`
+ ;;
+
+netbsd*)
+ if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then
+ lt_cv_deplibs_check_method='match_pattern /lib[^/\.]+\.so\.[0-9]+\.[0-9]+$'
+ else
+ lt_cv_deplibs_check_method='match_pattern /lib[^/\.]+\.so$'
+ fi
+ ;;
+
+newos6*)
+ lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)'
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=/usr/lib/libnls.so
+ ;;
+
+nto-qnx)
+ lt_cv_deplibs_check_method=unknown
+ ;;
+
+openbsd*)
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*`
+ if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB shared object'
+ else
+ lt_cv_deplibs_check_method='file_magic OpenBSD.* shared library'
+ fi
+ ;;
+
+osf3* | osf4* | osf5*)
+ # this will be overridden with pass_all, but let us keep it just in case
+ lt_cv_deplibs_check_method='file_magic COFF format alpha shared library'
+ lt_cv_file_magic_test_file=/shlib/libc.so
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+sco3.2v5*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+solaris*)
+ lt_cv_deplibs_check_method=pass_all
+ lt_cv_file_magic_test_file=/lib/libc.so
+ ;;
+
+sysv5uw[78]* | sysv4*uw2*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+ case $host_vendor in
+ motorola)
+ lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]'
+ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*`
+ ;;
+ ncr)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+ sequent)
+ lt_cv_file_magic_cmd='/bin/file'
+ lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )'
+ ;;
+ sni)
+ lt_cv_file_magic_cmd='/bin/file'
+ lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib"
+ lt_cv_file_magic_test_file=/lib/libc.so
+ ;;
+ esac
+ ;;
+esac
+
+fi
+echo "$as_me:$LINENO: result: $lt_cv_deplibs_check_method" >&5
+echo "${ECHO_T}$lt_cv_deplibs_check_method" >&6
+file_magic_cmd=$lt_cv_file_magic_cmd
+deplibs_check_method=$lt_cv_deplibs_check_method
+
+
+
+
+
+
+
+
+# Check for command to grab the raw symbol name followed by C symbol from nm.
+echo "$as_me:$LINENO: checking command to parse $NM output" >&5
+echo $ECHO_N "checking command to parse $NM output... $ECHO_C" >&6
+if test "${lt_cv_sys_global_symbol_pipe+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+
+# These are sane defaults that work on at least a few old systems.
+# [They come from Ultrix. What could be older than Ultrix?!! ;)]
+
+# Character class describing NM global symbol codes.
+symcode='[BCDEGRST]'
+
+# Regexp to match symbols that can be accessed directly from C.
+sympat='\([_A-Za-z][_A-Za-z0-9]*\)'
+
+# Transform the above into a raw symbol and a C symbol.
+symxfrm='\1 \2\3 \3'
+
+# Transform an extracted symbol line into a proper C declaration
+lt_cv_global_symbol_to_cdecl="sed -n -e 's/^. .* \(.*\)$/extern char \1;/p'"
+
+# Transform an extracted symbol line into symbol name and symbol address
+lt_cv_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode \([^ ]*\) \([^ ]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'"
+
+# Define system-specific variables.
+case $host_os in
+aix*)
+ symcode='[BCDT]'
+ ;;
+cygwin* | mingw* | pw32*)
+ symcode='[ABCDGISTW]'
+ ;;
+hpux*) # Its linker distinguishes data from code symbols
+ lt_cv_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern char \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'"
+ lt_cv_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'"
+ ;;
+irix*)
+ symcode='[BCDEGRST]'
+ ;;
+solaris* | sysv5*)
+ symcode='[BDT]'
+ ;;
+sysv4)
+ symcode='[DFNSTU]'
+ ;;
+esac
+
+# Handle CRLF in mingw tool chain
+opt_cr=
+case $host_os in
+mingw*)
+ opt_cr=`echo 'x\{0,1\}' | tr x '\015'` # option cr in regexp
+ ;;
+esac
+
+# If we're using GNU nm, then use its standard symbol codes.
+if $NM -V 2>&1 | egrep '(GNU|with BFD)' > /dev/null; then
+ symcode='[ABCDGISTW]'
+fi
+
+# Try without a prefix undercore, then with it.
+for ac_symprfx in "" "_"; do
+
+ # Write the raw and C identifiers.
+lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*\($ac_symprfx\)$sympat$opt_cr$/$symxfrm/p'"
+
+ # Check to see that the pipe works correctly.
+ pipe_works=no
+ rm -f conftest*
+ cat > conftest.$ac_ext <<EOF
+#ifdef __cplusplus
+extern "C" {
+#endif
+char nm_test_var;
+void nm_test_func(){}
+#ifdef __cplusplus
+}
+#endif
+int main(){nm_test_var='a';nm_test_func();return(0);}
+EOF
+
+ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ # Now try to grab the symbols.
+ nlist=conftest.nm
+ if { (eval echo "$as_me:$LINENO: \"$NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist\"") >&5
+ (eval $NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && test -s "$nlist"; then
+ # Try sorting and uniquifying the output.
+ if sort "$nlist" | uniq > "$nlist"T; then
+ mv -f "$nlist"T "$nlist"
+ else
+ rm -f "$nlist"T
+ fi
+
+ # Make sure that we snagged all the symbols we need.
+ if egrep ' nm_test_var$' "$nlist" >/dev/null; then
+ if egrep ' nm_test_func$' "$nlist" >/dev/null; then
+ cat <<EOF > conftest.$ac_ext
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+EOF
+ # Now generate the symbol file.
+ eval "$lt_cv_global_symbol_to_cdecl"' < "$nlist" >> conftest.$ac_ext'
+
+ cat <<EOF >> conftest.$ac_ext
+#if defined (__STDC__) && __STDC__
+# define lt_ptr void *
+#else
+# define lt_ptr char *
+# define const
+#endif
+
+/* The mapping between symbol names and symbols. */
+const struct {
+ const char *name;
+ lt_ptr address;
+}
+lt_preloaded_symbols[] =
+{
+EOF
+ sed "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (lt_ptr) \&\2},/" < "$nlist" >> conftest.$ac_ext
+ cat <<\EOF >> conftest.$ac_ext
+ {0, (lt_ptr) 0}
+};
+
+#ifdef __cplusplus
+}
+#endif
+EOF
+ # Now try linking the two files.
+ mv conftest.$ac_objext conftstm.$ac_objext
+ save_LIBS="$LIBS"
+ save_CFLAGS="$CFLAGS"
+ LIBS="conftstm.$ac_objext"
+ CFLAGS="$CFLAGS$no_builtin_flag"
+ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && test -s conftest; then
+ pipe_works=yes
+ fi
+ LIBS="$save_LIBS"
+ CFLAGS="$save_CFLAGS"
+ else
+ echo "cannot find nm_test_func in $nlist" >&5
+ fi
+ else
+ echo "cannot find nm_test_var in $nlist" >&5
+ fi
+ else
+ echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5
+ fi
+ else
+ echo "$progname: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ fi
+ rm -f conftest* conftst*
+
+ # Do not use the global_symbol_pipe unless it works.
+ if test "$pipe_works" = yes; then
+ break
+ else
+ lt_cv_sys_global_symbol_pipe=
+ fi
+done
+
+fi
+
+global_symbol_pipe="$lt_cv_sys_global_symbol_pipe"
+if test -z "$lt_cv_sys_global_symbol_pipe"; then
+ global_symbol_to_cdecl=
+ global_symbol_to_c_name_address=
+else
+ global_symbol_to_cdecl="$lt_cv_global_symbol_to_cdecl"
+ global_symbol_to_c_name_address="$lt_cv_global_symbol_to_c_name_address"
+fi
+if test -z "$global_symbol_pipe$global_symbol_to_cdec$global_symbol_to_c_name_address";
+then
+ echo "$as_me:$LINENO: result: failed" >&5
+echo "${ECHO_T}failed" >&6
+else
+ echo "$as_me:$LINENO: result: ok" >&5
+echo "${ECHO_T}ok" >&6
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+echo "$as_me:$LINENO: checking how to run the C preprocessor" >&5
+echo $ECHO_N "checking how to run the C preprocessor... $ECHO_C" >&6
+# On Suns, sometimes $CPP names a directory.
+if test -n "$CPP" && test -d "$CPP"; then
+ CPP=
+fi
+if test -z "$CPP"; then
+ if test "${ac_cv_prog_CPP+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ # Double quotes because CPP needs to be expanded
+ for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp"
+ do
+ ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5
+ (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null; then
+ if test -s conftest.err; then
+ ac_cpp_err=$ac_c_preproc_warn_flag
+ else
+ ac_cpp_err=
+ fi
+else
+ ac_cpp_err=yes
+fi
+if test -z "$ac_cpp_err"; then
+ :
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether non-existent headers
+ # can be detected and how.
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5
+ (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null; then
+ if test -s conftest.err; then
+ ac_cpp_err=$ac_c_preproc_warn_flag
+ else
+ ac_cpp_err=
+ fi
+else
+ ac_cpp_err=yes
+fi
+if test -z "$ac_cpp_err"; then
+ # Broken: success on invalid input.
+continue
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then
+ break
+fi
+
+ done
+ ac_cv_prog_CPP=$CPP
+
+fi
+ CPP=$ac_cv_prog_CPP
+else
+ ac_cv_prog_CPP=$CPP
+fi
+echo "$as_me:$LINENO: result: $CPP" >&5
+echo "${ECHO_T}$CPP" >&6
+ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5
+ (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null; then
+ if test -s conftest.err; then
+ ac_cpp_err=$ac_c_preproc_warn_flag
+ else
+ ac_cpp_err=
+ fi
+else
+ ac_cpp_err=yes
+fi
+if test -z "$ac_cpp_err"; then
+ :
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether non-existent headers
+ # can be detected and how.
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5
+ (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null; then
+ if test -s conftest.err; then
+ ac_cpp_err=$ac_c_preproc_warn_flag
+ else
+ ac_cpp_err=
+ fi
+else
+ ac_cpp_err=yes
+fi
+if test -z "$ac_cpp_err"; then
+ # Broken: success on invalid input.
+continue
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then
+ :
+else
+ { { echo "$as_me:$LINENO: error: C preprocessor \"$CPP\" fails sanity check
+See \`config.log' for more details." >&5
+echo "$as_me: error: C preprocessor \"$CPP\" fails sanity check
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+echo "$as_me:$LINENO: checking for egrep" >&5
+echo $ECHO_N "checking for egrep... $ECHO_C" >&6
+if test "${ac_cv_prog_egrep+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if echo a | (grep -E '(a|b)') >/dev/null 2>&1
+ then ac_cv_prog_egrep='grep -E'
+ else ac_cv_prog_egrep='egrep'
+ fi
+fi
+echo "$as_me:$LINENO: result: $ac_cv_prog_egrep" >&5
+echo "${ECHO_T}$ac_cv_prog_egrep" >&6
+ EGREP=$ac_cv_prog_egrep
+
+
+echo "$as_me:$LINENO: checking for ANSI C header files" >&5
+echo $ECHO_N "checking for ANSI C header files... $ECHO_C" >&6
+if test "${ac_cv_header_stdc+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <float.h>
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_header_stdc=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_header_stdc=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+
+if test $ac_cv_header_stdc = yes; then
+ # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <string.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "memchr" >/dev/null 2>&1; then
+ :
+else
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdlib.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "free" >/dev/null 2>&1; then
+ :
+else
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi.
+ if test "$cross_compiling" = yes; then
+ :
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <ctype.h>
+#if ((' ' & 0x0FF) == 0x020)
+# define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
+# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c))
+#else
+# define ISLOWER(c) \
+ (('a' <= (c) && (c) <= 'i') \
+ || ('j' <= (c) && (c) <= 'r') \
+ || ('s' <= (c) && (c) <= 'z'))
+# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c))
+#endif
+
+#define XOR(e, f) (((e) && !(f)) || (!(e) && (f)))
+int
+main ()
+{
+ int i;
+ for (i = 0; i < 256; i++)
+ if (XOR (islower (i), ISLOWER (i))
+ || toupper (i) != TOUPPER (i))
+ exit(2);
+ exit (0);
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ :
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+ac_cv_header_stdc=no
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+fi
+fi
+echo "$as_me:$LINENO: result: $ac_cv_header_stdc" >&5
+echo "${ECHO_T}$ac_cv_header_stdc" >&6
+if test $ac_cv_header_stdc = yes; then
+
+cat >>confdefs.h <<\_ACEOF
+#define STDC_HEADERS 1
+_ACEOF
+
+fi
+
+# On IRIX 5.3, sys/types and inttypes.h are conflicting.
+
+
+
+
+
+
+
+
+
+for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \
+ inttypes.h stdint.h unistd.h
+do
+as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh`
+echo "$as_me:$LINENO: checking for $ac_header" >&5
+echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6
+if eval "test \"\${$as_ac_Header+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+
+#include <$ac_header>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ eval "$as_ac_Header=yes"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+eval "$as_ac_Header=no"
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6
+if test `eval echo '${'$as_ac_Header'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+
+for ac_header in dlfcn.h
+do
+as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh`
+if eval "test \"\${$as_ac_Header+set}\" = set"; then
+ echo "$as_me:$LINENO: checking for $ac_header" >&5
+echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6
+if eval "test \"\${$as_ac_Header+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6
+else
+ # Is the header compilable?
+echo "$as_me:$LINENO: checking $ac_header usability" >&5
+echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+#include <$ac_header>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_header_compiler=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_header_compiler=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+echo "$as_me:$LINENO: result: $ac_header_compiler" >&5
+echo "${ECHO_T}$ac_header_compiler" >&6
+
+# Is the header present?
+echo "$as_me:$LINENO: checking $ac_header presence" >&5
+echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <$ac_header>
+_ACEOF
+if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5
+ (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null; then
+ if test -s conftest.err; then
+ ac_cpp_err=$ac_c_preproc_warn_flag
+ else
+ ac_cpp_err=
+ fi
+else
+ ac_cpp_err=yes
+fi
+if test -z "$ac_cpp_err"; then
+ ac_header_preproc=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_preproc=no
+fi
+rm -f conftest.err conftest.$ac_ext
+echo "$as_me:$LINENO: result: $ac_header_preproc" >&5
+echo "${ECHO_T}$ac_header_preproc" >&6
+
+# So? What about this header?
+case $ac_header_compiler:$ac_header_preproc in
+ yes:no )
+ { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5
+echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5
+echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;}
+ (
+ cat <<\_ASBOX
+## ------------------------------------ ##
+## Report this to bug-autoconf@gnu.org. ##
+## ------------------------------------ ##
+_ASBOX
+ ) |
+ sed "s/^/$as_me: WARNING: /" >&2
+ ;;
+ no:yes )
+ { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5
+echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5
+echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5
+echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;}
+ (
+ cat <<\_ASBOX
+## ------------------------------------ ##
+## Report this to bug-autoconf@gnu.org. ##
+## ------------------------------------ ##
+_ASBOX
+ ) |
+ sed "s/^/$as_me: WARNING: /" >&2
+ ;;
+esac
+echo "$as_me:$LINENO: checking for $ac_header" >&5
+echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6
+if eval "test \"\${$as_ac_Header+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ eval "$as_ac_Header=$ac_header_preproc"
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6
+
+fi
+if test `eval echo '${'$as_ac_Header'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+
+
+
+# Only perform the check for file, if the check method requires it
+case $deplibs_check_method in
+file_magic*)
+ if test "$file_magic_cmd" = '$MAGIC_CMD'; then
+ echo "$as_me:$LINENO: checking for ${ac_tool_prefix}file" >&5
+echo $ECHO_N "checking for ${ac_tool_prefix}file... $ECHO_C" >&6
+if test "${lt_cv_path_MAGIC_CMD+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ case $MAGIC_CMD in
+ /*)
+ lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path.
+ ;;
+ ?:/*)
+ lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a dos path.
+ ;;
+ *)
+ ac_save_MAGIC_CMD="$MAGIC_CMD"
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
+ ac_dummy="/usr/bin:$PATH"
+ for ac_dir in $ac_dummy; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/${ac_tool_prefix}file; then
+ lt_cv_path_MAGIC_CMD="$ac_dir/${ac_tool_prefix}file"
+ if test -n "$file_magic_test_file"; then
+ case $deplibs_check_method in
+ "file_magic "*)
+ file_magic_regex="`expr \"$deplibs_check_method\" : \"file_magic \(.*\)\"`"
+ MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+ if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
+ egrep "$file_magic_regex" > /dev/null; then
+ :
+ else
+ cat <<EOF 1>&2
+
+*** Warning: the command libtool uses to detect shared libraries,
+*** $file_magic_cmd, produces output that libtool cannot recognize.
+*** The result is that libtool may fail to recognize shared libraries
+*** as such. This will affect the creation of libtool libraries that
+*** depend on shared libraries, but programs linked with such libtool
+*** libraries will work regardless of this problem. Nevertheless, you
+*** may want to report the problem to your system manager and/or to
+*** bug-libtool@gnu.org
+
+EOF
+ fi ;;
+ esac
+ fi
+ break
+ fi
+ done
+ IFS="$ac_save_ifs"
+ MAGIC_CMD="$ac_save_MAGIC_CMD"
+ ;;
+esac
+fi
+
+MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+if test -n "$MAGIC_CMD"; then
+ echo "$as_me:$LINENO: result: $MAGIC_CMD" >&5
+echo "${ECHO_T}$MAGIC_CMD" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+if test -z "$lt_cv_path_MAGIC_CMD"; then
+ if test -n "$ac_tool_prefix"; then
+ echo "$as_me:$LINENO: checking for file" >&5
+echo $ECHO_N "checking for file... $ECHO_C" >&6
+if test "${lt_cv_path_MAGIC_CMD+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ case $MAGIC_CMD in
+ /*)
+ lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path.
+ ;;
+ ?:/*)
+ lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a dos path.
+ ;;
+ *)
+ ac_save_MAGIC_CMD="$MAGIC_CMD"
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
+ ac_dummy="/usr/bin:$PATH"
+ for ac_dir in $ac_dummy; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/file; then
+ lt_cv_path_MAGIC_CMD="$ac_dir/file"
+ if test -n "$file_magic_test_file"; then
+ case $deplibs_check_method in
+ "file_magic "*)
+ file_magic_regex="`expr \"$deplibs_check_method\" : \"file_magic \(.*\)\"`"
+ MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+ if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
+ egrep "$file_magic_regex" > /dev/null; then
+ :
+ else
+ cat <<EOF 1>&2
+
+*** Warning: the command libtool uses to detect shared libraries,
+*** $file_magic_cmd, produces output that libtool cannot recognize.
+*** The result is that libtool may fail to recognize shared libraries
+*** as such. This will affect the creation of libtool libraries that
+*** depend on shared libraries, but programs linked with such libtool
+*** libraries will work regardless of this problem. Nevertheless, you
+*** may want to report the problem to your system manager and/or to
+*** bug-libtool@gnu.org
+
+EOF
+ fi ;;
+ esac
+ fi
+ break
+ fi
+ done
+ IFS="$ac_save_ifs"
+ MAGIC_CMD="$ac_save_MAGIC_CMD"
+ ;;
+esac
+fi
+
+MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+if test -n "$MAGIC_CMD"; then
+ echo "$as_me:$LINENO: result: $MAGIC_CMD" >&5
+echo "${ECHO_T}$MAGIC_CMD" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ else
+ MAGIC_CMD=:
+ fi
+fi
+
+ fi
+ ;;
+esac
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args.
+set dummy ${ac_tool_prefix}ranlib; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_RANLIB+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$RANLIB"; then
+ ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+RANLIB=$ac_cv_prog_RANLIB
+if test -n "$RANLIB"; then
+ echo "$as_me:$LINENO: result: $RANLIB" >&5
+echo "${ECHO_T}$RANLIB" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$ac_cv_prog_RANLIB"; then
+ ac_ct_RANLIB=$RANLIB
+ # Extract the first word of "ranlib", so it can be a program name with args.
+set dummy ranlib; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_RANLIB+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_RANLIB"; then
+ ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_RANLIB="ranlib"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+ test -z "$ac_cv_prog_ac_ct_RANLIB" && ac_cv_prog_ac_ct_RANLIB=":"
+fi
+fi
+ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB
+if test -n "$ac_ct_RANLIB"; then
+ echo "$as_me:$LINENO: result: $ac_ct_RANLIB" >&5
+echo "${ECHO_T}$ac_ct_RANLIB" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ RANLIB=$ac_ct_RANLIB
+else
+ RANLIB="$ac_cv_prog_RANLIB"
+fi
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args.
+set dummy ${ac_tool_prefix}strip; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_STRIP+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$STRIP"; then
+ ac_cv_prog_STRIP="$STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_STRIP="${ac_tool_prefix}strip"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+STRIP=$ac_cv_prog_STRIP
+if test -n "$STRIP"; then
+ echo "$as_me:$LINENO: result: $STRIP" >&5
+echo "${ECHO_T}$STRIP" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+fi
+if test -z "$ac_cv_prog_STRIP"; then
+ ac_ct_STRIP=$STRIP
+ # Extract the first word of "strip", so it can be a program name with args.
+set dummy strip; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_STRIP"; then
+ ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_STRIP="strip"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+ test -z "$ac_cv_prog_ac_ct_STRIP" && ac_cv_prog_ac_ct_STRIP=":"
+fi
+fi
+ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP
+if test -n "$ac_ct_STRIP"; then
+ echo "$as_me:$LINENO: result: $ac_ct_STRIP" >&5
+echo "${ECHO_T}$ac_ct_STRIP" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ STRIP=$ac_ct_STRIP
+else
+ STRIP="$ac_cv_prog_STRIP"
+fi
+
+
+enable_dlopen=no
+enable_win32_dll=no
+
+# Check whether --enable-libtool-lock or --disable-libtool-lock was given.
+if test "${enable_libtool_lock+set}" = set; then
+ enableval="$enable_libtool_lock"
+
+fi;
+test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes
+
+# Some flags need to be propagated to the compiler or linker for good
+# libtool support.
+case $host in
+*-*-irix6*)
+ # Find out which ABI we are using.
+ echo '#line 6492 "configure"' > conftest.$ac_ext
+ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ case `/usr/bin/file conftest.$ac_objext` in
+ *32-bit*)
+ LD="${LD-ld} -32"
+ ;;
+ *N32*)
+ LD="${LD-ld} -n32"
+ ;;
+ *64-bit*)
+ LD="${LD-ld} -64"
+ ;;
+ esac
+ fi
+ rm -rf conftest*
+ ;;
+
+*-*-sco3.2v5*)
+ # On SCO OpenServer 5, we need -belf to get full-featured binaries.
+ SAVE_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -belf"
+ echo "$as_me:$LINENO: checking whether the C compiler needs -belf" >&5
+echo $ECHO_N "checking whether the C compiler needs -belf... $ECHO_C" >&6
+if test "${lt_cv_cc_needs_belf+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+
+
+ ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ lt_cv_cc_needs_belf=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+lt_cv_cc_needs_belf=no
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+ ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+fi
+echo "$as_me:$LINENO: result: $lt_cv_cc_needs_belf" >&5
+echo "${ECHO_T}$lt_cv_cc_needs_belf" >&6
+ if test x"$lt_cv_cc_needs_belf" != x"yes"; then
+ # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf
+ CFLAGS="$SAVE_CFLAGS"
+ fi
+ ;;
+
+
+esac
+
+# Sed substitution that helps us do robust quoting. It backslashifies
+# metacharacters that are still active within double-quoted strings.
+Xsed='sed -e s/^X//'
+sed_quote_subst='s/\([\\"\\`$\\\\]\)/\\\1/g'
+
+# Same as above, but do not quote variable references.
+double_quote_subst='s/\([\\"\\`\\\\]\)/\\\1/g'
+
+# Sed substitution to delay expansion of an escaped shell variable in a
+# double_quote_subst'ed string.
+delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g'
+
+# Constants:
+rm="rm -f"
+
+# Global variables:
+default_ofile=libtool
+can_build_shared=yes
+
+# All known linkers require a `.a' archive for static linking (except M$VC,
+# which needs '.lib').
+libext=a
+ltmain="$ac_aux_dir/ltmain.sh"
+ofile="$default_ofile"
+with_gnu_ld="$lt_cv_prog_gnu_ld"
+need_locks="$enable_libtool_lock"
+
+old_CC="$CC"
+old_CFLAGS="$CFLAGS"
+
+# Set sane defaults for various variables
+test -z "$AR" && AR=ar
+test -z "$AR_FLAGS" && AR_FLAGS=cru
+test -z "$AS" && AS=as
+test -z "$CC" && CC=cc
+test -z "$DLLTOOL" && DLLTOOL=dlltool
+test -z "$LD" && LD=ld
+test -z "$LN_S" && LN_S="ln -s"
+test -z "$MAGIC_CMD" && MAGIC_CMD=file
+test -z "$NM" && NM=nm
+test -z "$OBJDUMP" && OBJDUMP=objdump
+test -z "$RANLIB" && RANLIB=:
+test -z "$STRIP" && STRIP=:
+test -z "$ac_objext" && ac_objext=o
+
+if test x"$host" != x"$build"; then
+ ac_tool_prefix=${host_alias}-
+else
+ ac_tool_prefix=
+fi
+
+# Transform linux* to *-*-linux-gnu*, to support old configure scripts.
+case $host_os in
+linux-gnu*) ;;
+linux*) host=`echo $host | sed 's/^\(.*-.*-linux\)\(.*\)$/\1-gnu\2/'`
+esac
+
+case $host_os in
+aix3*)
+ # AIX sometimes has problems with the GCC collect2 program. For some
+ # reason, if we set the COLLECT_NAMES environment variable, the problems
+ # vanish in a puff of smoke.
+ if test "X${COLLECT_NAMES+set}" != Xset; then
+ COLLECT_NAMES=
+ export COLLECT_NAMES
+ fi
+ ;;
+esac
+
+# Determine commands to create old-style static archives.
+old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs$old_deplibs'
+old_postinstall_cmds='chmod 644 $oldlib'
+old_postuninstall_cmds=
+
+if test -n "$RANLIB"; then
+ case $host_os in
+ openbsd*)
+ old_postinstall_cmds="\$RANLIB -t \$oldlib~$old_postinstall_cmds"
+ ;;
+ *)
+ old_postinstall_cmds="\$RANLIB \$oldlib~$old_postinstall_cmds"
+ ;;
+ esac
+ old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib"
+fi
+
+# Allow CC to be a program name with arguments.
+set dummy $CC
+compiler="$2"
+
+## FIXME: this should be a separate macro
+##
+echo "$as_me:$LINENO: checking for objdir" >&5
+echo $ECHO_N "checking for objdir... $ECHO_C" >&6
+rm -f .libs 2>/dev/null
+mkdir .libs 2>/dev/null
+if test -d .libs; then
+ objdir=.libs
+else
+ # MS-DOS does not allow filenames that begin with a dot.
+ objdir=_libs
+fi
+rmdir .libs 2>/dev/null
+echo "$as_me:$LINENO: result: $objdir" >&5
+echo "${ECHO_T}$objdir" >&6
+##
+## END FIXME
+
+
+## FIXME: this should be a separate macro
+##
+
+# Check whether --with-pic or --without-pic was given.
+if test "${with_pic+set}" = set; then
+ withval="$with_pic"
+ pic_mode="$withval"
+else
+ pic_mode=default
+fi;
+test -z "$pic_mode" && pic_mode=default
+
+# We assume here that the value for lt_cv_prog_cc_pic will not be cached
+# in isolation, and that seeing it set (from the cache) indicates that
+# the associated values are set (in the cache) correctly too.
+echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5
+echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6
+if test "${lt_cv_prog_cc_pic+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ lt_cv_prog_cc_pic=
+ lt_cv_prog_cc_shlib=
+ lt_cv_prog_cc_wl=
+ lt_cv_prog_cc_static=
+ lt_cv_prog_cc_no_builtin=
+ lt_cv_prog_cc_can_build_shared=$can_build_shared
+
+ if test "$GCC" = yes; then
+ lt_cv_prog_cc_wl='-Wl,'
+ lt_cv_prog_cc_static='-static'
+
+ case $host_os in
+ aix*)
+ # Below there is a dirty hack to force normal static linking with -ldl
+ # The problem is because libdl dynamically linked with both libc and
+ # libC (AIX C++ library), which obviously doesn't included in libraries
+ # list by gcc. This cause undefined symbols with -static flags.
+ # This hack allows C programs to be linked with "-static -ldl", but
+ # not sure about C++ programs.
+ lt_cv_prog_cc_static="$lt_cv_prog_cc_static ${lt_cv_prog_cc_wl}-lC"
+ ;;
+ amigaos*)
+ # FIXME: we need at least 68020 code to build shared libraries, but
+ # adding the `-m68020' flag to GCC prevents building anything better,
+ # like `-m68040'.
+ lt_cv_prog_cc_pic='-m68020 -resident32 -malways-restore-a4'
+ ;;
+ beos* | irix5* | irix6* | osf3* | osf4* | osf5*)
+ # PIC is the default for these OSes.
+ ;;
+ darwin* | rhapsody*)
+ # PIC is the default on this platform
+ # Common symbols not allowed in MH_DYLIB files
+ lt_cv_prog_cc_pic='-fno-common'
+ ;;
+ cygwin* | mingw* | pw32* | os2*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ lt_cv_prog_cc_pic='-DDLL_EXPORT'
+ ;;
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ lt_cv_prog_cc_pic=-Kconform_pic
+ fi
+ ;;
+ *)
+ lt_cv_prog_cc_pic='-fPIC'
+ ;;
+ esac
+ else
+ # PORTME Check for PIC flags for the system compiler.
+ case $host_os in
+ aix3* | aix4* | aix5*)
+ lt_cv_prog_cc_wl='-Wl,'
+ # All AIX code is PIC.
+ if test "$host_cpu" = ia64; then
+ # AIX 5 now supports IA64 processor
+ lt_cv_prog_cc_static='-Bstatic'
+ else
+ lt_cv_prog_cc_static='-bnso -bI:/lib/syscalls.exp'
+ fi
+ ;;
+
+ hpux9* | hpux10* | hpux11*)
+ # Is there a better lt_cv_prog_cc_static that works with the bundled CC?
+ lt_cv_prog_cc_wl='-Wl,'
+ lt_cv_prog_cc_static="${lt_cv_prog_cc_wl}-a ${lt_cv_prog_cc_wl}archive"
+ lt_cv_prog_cc_pic='+Z'
+ ;;
+
+ irix5* | irix6*)
+ lt_cv_prog_cc_wl='-Wl,'
+ lt_cv_prog_cc_static='-non_shared'
+ # PIC (with -KPIC) is the default.
+ ;;
+
+ cygwin* | mingw* | pw32* | os2*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ lt_cv_prog_cc_pic='-DDLL_EXPORT'
+ ;;
+
+ newsos6)
+ lt_cv_prog_cc_pic='-KPIC'
+ lt_cv_prog_cc_static='-Bstatic'
+ ;;
+
+ osf3* | osf4* | osf5*)
+ # All OSF/1 code is PIC.
+ lt_cv_prog_cc_wl='-Wl,'
+ lt_cv_prog_cc_static='-non_shared'
+ ;;
+
+ sco3.2v5*)
+ lt_cv_prog_cc_pic='-Kpic'
+ lt_cv_prog_cc_static='-dn'
+ lt_cv_prog_cc_shlib='-belf'
+ ;;
+
+ solaris*)
+ lt_cv_prog_cc_pic='-KPIC'
+ lt_cv_prog_cc_static='-Bstatic'
+ lt_cv_prog_cc_wl='-Wl,'
+ ;;
+
+ sunos4*)
+ lt_cv_prog_cc_pic='-PIC'
+ lt_cv_prog_cc_static='-Bstatic'
+ lt_cv_prog_cc_wl='-Qoption ld '
+ ;;
+
+ sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+ lt_cv_prog_cc_pic='-KPIC'
+ lt_cv_prog_cc_static='-Bstatic'
+ if test "x$host_vendor" = xsni; then
+ lt_cv_prog_cc_wl='-LD'
+ else
+ lt_cv_prog_cc_wl='-Wl,'
+ fi
+ ;;
+
+ uts4*)
+ lt_cv_prog_cc_pic='-pic'
+ lt_cv_prog_cc_static='-Bstatic'
+ ;;
+
+ sysv4*MP*)
+ if test -d /usr/nec ;then
+ lt_cv_prog_cc_pic='-Kconform_pic'
+ lt_cv_prog_cc_static='-Bstatic'
+ fi
+ ;;
+
+ *)
+ lt_cv_prog_cc_can_build_shared=no
+ ;;
+ esac
+ fi
+
+fi
+
+if test -z "$lt_cv_prog_cc_pic"; then
+ echo "$as_me:$LINENO: result: none" >&5
+echo "${ECHO_T}none" >&6
+else
+ echo "$as_me:$LINENO: result: $lt_cv_prog_cc_pic" >&5
+echo "${ECHO_T}$lt_cv_prog_cc_pic" >&6
+
+ # Check to make sure the pic_flag actually works.
+ echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_cv_prog_cc_pic works" >&5
+echo $ECHO_N "checking if $compiler PIC flag $lt_cv_prog_cc_pic works... $ECHO_C" >&6
+ if test "${lt_cv_prog_cc_pic_works+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS $lt_cv_prog_cc_pic -DPIC"
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ case $host_os in
+ hpux9* | hpux10* | hpux11*)
+ # On HP-UX, both CC and GCC only warn that PIC is supported... then
+ # they create non-PIC objects. So, if there were any warnings, we
+ # assume that PIC is not supported.
+ if test -s conftest.err; then
+ lt_cv_prog_cc_pic_works=no
+ else
+ lt_cv_prog_cc_pic_works=yes
+ fi
+ ;;
+ *)
+ lt_cv_prog_cc_pic_works=yes
+ ;;
+ esac
+
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ lt_cv_prog_cc_pic_works=no
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ CFLAGS="$save_CFLAGS"
+
+fi
+
+
+ if test "X$lt_cv_prog_cc_pic_works" = Xno; then
+ lt_cv_prog_cc_pic=
+ lt_cv_prog_cc_can_build_shared=no
+ else
+ lt_cv_prog_cc_pic=" $lt_cv_prog_cc_pic"
+ fi
+
+ echo "$as_me:$LINENO: result: $lt_cv_prog_cc_pic_works" >&5
+echo "${ECHO_T}$lt_cv_prog_cc_pic_works" >&6
+fi
+##
+## END FIXME
+
+# Check for any special shared library compilation flags.
+if test -n "$lt_cv_prog_cc_shlib"; then
+ { echo "$as_me:$LINENO: WARNING: \`$CC' requires \`$lt_cv_prog_cc_shlib' to build shared libraries" >&5
+echo "$as_me: WARNING: \`$CC' requires \`$lt_cv_prog_cc_shlib' to build shared libraries" >&2;}
+ if echo "$old_CC $old_CFLAGS " | egrep -e "[ ]$lt_cv_prog_cc_shlib[ ]" >/dev/null; then :
+ else
+ { echo "$as_me:$LINENO: WARNING: add \`$lt_cv_prog_cc_shlib' to the CC or CFLAGS env variable and reconfigure" >&5
+echo "$as_me: WARNING: add \`$lt_cv_prog_cc_shlib' to the CC or CFLAGS env variable and reconfigure" >&2;}
+ lt_cv_prog_cc_can_build_shared=no
+ fi
+fi
+
+## FIXME: this should be a separate macro
+##
+echo "$as_me:$LINENO: checking if $compiler static flag $lt_cv_prog_cc_static works" >&5
+echo $ECHO_N "checking if $compiler static flag $lt_cv_prog_cc_static works... $ECHO_C" >&6
+if test "${lt_cv_prog_cc_static_works+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ lt_cv_prog_cc_static_works=no
+ save_LDFLAGS="$LDFLAGS"
+ LDFLAGS="$LDFLAGS $lt_cv_prog_cc_static"
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ lt_cv_prog_cc_static_works=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+ LDFLAGS="$save_LDFLAGS"
+
+fi
+
+
+# Belt *and* braces to stop my trousers falling down:
+test "X$lt_cv_prog_cc_static_works" = Xno && lt_cv_prog_cc_static=
+echo "$as_me:$LINENO: result: $lt_cv_prog_cc_static_works" >&5
+echo "${ECHO_T}$lt_cv_prog_cc_static_works" >&6
+
+pic_flag="$lt_cv_prog_cc_pic"
+special_shlib_compile_flags="$lt_cv_prog_cc_shlib"
+wl="$lt_cv_prog_cc_wl"
+link_static_flag="$lt_cv_prog_cc_static"
+no_builtin_flag="$lt_cv_prog_cc_no_builtin"
+can_build_shared="$lt_cv_prog_cc_can_build_shared"
+##
+## END FIXME
+
+
+## FIXME: this should be a separate macro
+##
+# Check to see if options -o and -c are simultaneously supported by compiler
+echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5
+echo $ECHO_N "checking if $compiler supports -c -o file.$ac_objext... $ECHO_C" >&6
+if test "${lt_cv_compiler_c_o+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+
+$rm -r conftest 2>/dev/null
+mkdir conftest
+cd conftest
+echo "int some_variable = 0;" > conftest.$ac_ext
+mkdir out
+# According to Tom Tromey, Ian Lance Taylor reported there are C compilers
+# that will create temporary files in the current directory regardless of
+# the output directory. Thus, making CWD read-only will cause this test
+# to fail, enabling locking or at least warning the user not to do parallel
+# builds.
+chmod -w .
+save_CFLAGS="$CFLAGS"
+CFLAGS="$CFLAGS -o out/conftest2.$ac_objext"
+compiler_c_o=no
+if { (eval echo configure:7039: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>out/conftest.err; } && test -s out/conftest2.$ac_objext; then
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s out/conftest.err; then
+ lt_cv_compiler_c_o=no
+ else
+ lt_cv_compiler_c_o=yes
+ fi
+else
+ # Append any errors to the config.log.
+ cat out/conftest.err 1>&5
+ lt_cv_compiler_c_o=no
+fi
+CFLAGS="$save_CFLAGS"
+chmod u+w .
+$rm conftest* out/*
+rmdir out
+cd ..
+rmdir conftest
+$rm -r conftest 2>/dev/null
+
+fi
+
+compiler_c_o=$lt_cv_compiler_c_o
+echo "$as_me:$LINENO: result: $compiler_c_o" >&5
+echo "${ECHO_T}$compiler_c_o" >&6
+
+if test x"$compiler_c_o" = x"yes"; then
+ # Check to see if we can write to a .lo
+ echo "$as_me:$LINENO: checking if $compiler supports -c -o file.lo" >&5
+echo $ECHO_N "checking if $compiler supports -c -o file.lo... $ECHO_C" >&6
+ if test "${lt_cv_compiler_o_lo+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+
+ lt_cv_compiler_o_lo=no
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -c -o conftest.lo"
+ save_objext="$ac_objext"
+ ac_objext=lo
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+int some_variable = 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s conftest.err; then
+ lt_cv_compiler_o_lo=no
+ else
+ lt_cv_compiler_o_lo=yes
+ fi
+
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ ac_objext="$save_objext"
+ CFLAGS="$save_CFLAGS"
+
+fi
+
+ compiler_o_lo=$lt_cv_compiler_o_lo
+ echo "$as_me:$LINENO: result: $compiler_o_lo" >&5
+echo "${ECHO_T}$compiler_o_lo" >&6
+else
+ compiler_o_lo=no
+fi
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+# Check to see if we can do hard links to lock some files if needed
+hard_links="nottested"
+if test "$compiler_c_o" = no && test "$need_locks" != no; then
+ # do not overwrite the value of need_locks provided by the user
+ echo "$as_me:$LINENO: checking if we can lock with hard links" >&5
+echo $ECHO_N "checking if we can lock with hard links... $ECHO_C" >&6
+ hard_links=yes
+ $rm conftest*
+ ln conftest.a conftest.b 2>/dev/null && hard_links=no
+ touch conftest.a
+ ln conftest.a conftest.b 2>&5 || hard_links=no
+ ln conftest.a conftest.b 2>/dev/null && hard_links=no
+ echo "$as_me:$LINENO: result: $hard_links" >&5
+echo "${ECHO_T}$hard_links" >&6
+ if test "$hard_links" = no; then
+ { echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5
+echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;}
+ need_locks=warn
+ fi
+else
+ need_locks=no
+fi
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+if test "$GCC" = yes; then
+ # Check to see if options -fno-rtti -fno-exceptions are supported by compiler
+ echo "$as_me:$LINENO: checking if $compiler supports -fno-rtti -fno-exceptions" >&5
+echo $ECHO_N "checking if $compiler supports -fno-rtti -fno-exceptions... $ECHO_C" >&6
+ echo "int some_variable = 0;" > conftest.$ac_ext
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -fno-rtti -fno-exceptions -c conftest.$ac_ext"
+ compiler_rtti_exceptions=no
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+int some_variable = 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s conftest.err; then
+ compiler_rtti_exceptions=no
+ else
+ compiler_rtti_exceptions=yes
+ fi
+
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ CFLAGS="$save_CFLAGS"
+ echo "$as_me:$LINENO: result: $compiler_rtti_exceptions" >&5
+echo "${ECHO_T}$compiler_rtti_exceptions" >&6
+
+ if test "$compiler_rtti_exceptions" = "yes"; then
+ no_builtin_flag=' -fno-builtin -fno-rtti -fno-exceptions'
+ else
+ no_builtin_flag=' -fno-builtin'
+ fi
+fi
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+# See if the linker supports building shared libraries.
+echo "$as_me:$LINENO: checking whether the linker ($LD) supports shared libraries" >&5
+echo $ECHO_N "checking whether the linker ($LD) supports shared libraries... $ECHO_C" >&6
+
+allow_undefined_flag=
+no_undefined_flag=
+need_lib_prefix=unknown
+need_version=unknown
+# when you set need_version to no, make sure it does not cause -set_version
+# flags to be left without arguments
+archive_cmds=
+archive_expsym_cmds=
+old_archive_from_new_cmds=
+old_archive_from_expsyms_cmds=
+export_dynamic_flag_spec=
+whole_archive_flag_spec=
+thread_safe_flag_spec=
+hardcode_into_libs=no
+hardcode_libdir_flag_spec=
+hardcode_libdir_separator=
+hardcode_direct=no
+hardcode_minus_L=no
+hardcode_shlibpath_var=unsupported
+runpath_var=
+link_all_deplibs=unknown
+always_export_symbols=no
+export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | sed '\''s/.* //'\'' | sort | uniq > $export_symbols'
+# include_expsyms should be a list of space-separated symbols to be *always*
+# included in the symbol list
+include_expsyms=
+# exclude_expsyms can be an egrep regular expression of symbols to exclude
+# it will be wrapped by ` (' and `)$', so one must not match beginning or
+# end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc',
+# as well as any symbol that contains `d'.
+exclude_expsyms="_GLOBAL_OFFSET_TABLE_"
+# Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out
+# platforms (ab)use it in PIC code, but their linkers get confused if
+# the symbol is explicitly referenced. Since portable code cannot
+# rely on this symbol name, it's probably fine to never include it in
+# preloaded symbol tables.
+extract_expsyms_cmds=
+
+case $host_os in
+cygwin* | mingw* | pw32*)
+ # FIXME: the MSVC++ port hasn't been tested in a loooong time
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ if test "$GCC" != yes; then
+ with_gnu_ld=no
+ fi
+ ;;
+openbsd*)
+ with_gnu_ld=no
+ ;;
+esac
+
+ld_shlibs=yes
+if test "$with_gnu_ld" = yes; then
+ # If archive_cmds runs LD, not CC, wlarc should be empty
+ wlarc='${wl}'
+
+ # See if GNU ld supports shared libraries.
+ case $host_os in
+ aix3* | aix4* | aix5*)
+ # On AIX, the GNU linker is very broken
+ # Note:Check GNU linker on AIX 5-IA64 when/if it becomes available.
+ ld_shlibs=no
+ cat <<EOF 1>&2
+
+*** Warning: the GNU linker, at least up to release 2.9.1, is reported
+*** to be unable to reliably create shared libraries on AIX.
+*** Therefore, libtool is disabling shared libraries support. If you
+*** really care for shared libraries, you may want to modify your PATH
+*** so that a non-GNU linker is found, and then restart.
+
+EOF
+ ;;
+
+ amigaos*)
+ archive_cmds='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+
+ # Samuel A. Falvo II <kc5tja@dolphin.openprojects.net> reports
+ # that the semantics of dynamic libraries on AmigaOS, at least up
+ # to version 4, is to share data among multiple programs linked
+ # with the same dynamic library. Since this doesn't match the
+ # behavior of shared libraries on other platforms, we can use
+ # them.
+ ld_shlibs=no
+ ;;
+
+ beos*)
+ if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then
+ allow_undefined_flag=unsupported
+ # Joseph Beckenbach <jrb3@best.com> says some releases of gcc
+ # support --undefined. This deserves some investigation. FIXME
+ archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+
+ cygwin* | mingw* | pw32*)
+ # hardcode_libdir_flag_spec is actually meaningless, as there is
+ # no search path for DLLs.
+ hardcode_libdir_flag_spec='-L$libdir'
+ allow_undefined_flag=unsupported
+ always_export_symbols=yes
+
+ extract_expsyms_cmds='test -f $output_objdir/impgen.c || \
+ sed -e "/^# \/\* impgen\.c starts here \*\//,/^# \/\* impgen.c ends here \*\// { s/^# //;s/^# *$//; p; }" -e d < $''0 > $output_objdir/impgen.c~
+ test -f $output_objdir/impgen.exe || (cd $output_objdir && \
+ if test "x$HOST_CC" != "x" ; then $HOST_CC -o impgen impgen.c ; \
+ else $CC -o impgen impgen.c ; fi)~
+ $output_objdir/impgen $dir/$soroot > $output_objdir/$soname-def'
+
+ old_archive_from_expsyms_cmds='$DLLTOOL --as=$AS --dllname $soname --def $output_objdir/$soname-def --output-lib $output_objdir/$newlib'
+
+ # cygwin and mingw dlls have different entry points and sets of symbols
+ # to exclude.
+ # FIXME: what about values for MSVC?
+ dll_entry=__cygwin_dll_entry@12
+ dll_exclude_symbols=DllMain@12,_cygwin_dll_entry@12,_cygwin_noncygwin_dll_entry@12~
+ case $host_os in
+ mingw*)
+ # mingw values
+ dll_entry=_DllMainCRTStartup@12
+ dll_exclude_symbols=DllMain@12,DllMainCRTStartup@12,DllEntryPoint@12~
+ ;;
+ esac
+
+ # mingw and cygwin differ, and it's simplest to just exclude the union
+ # of the two symbol sets.
+ dll_exclude_symbols=DllMain@12,_cygwin_dll_entry@12,_cygwin_noncygwin_dll_entry@12,DllMainCRTStartup@12,DllEntryPoint@12
+
+ # recent cygwin and mingw systems supply a stub DllMain which the user
+ # can override, but on older systems we have to supply one (in ltdll.c)
+ if test "x$lt_cv_need_dllmain" = "xyes"; then
+ ltdll_obj='$output_objdir/$soname-ltdll.'"$ac_objext "
+ ltdll_cmds='test -f $output_objdir/$soname-ltdll.c || sed -e "/^# \/\* ltdll\.c starts here \*\//,/^# \/\* ltdll.c ends here \*\// { s/^# //; p; }" -e d < $''0 > $output_objdir/$soname-ltdll.c~
+ test -f $output_objdir/$soname-ltdll.$ac_objext || (cd $output_objdir && $CC -c $soname-ltdll.c)~'
+ else
+ ltdll_obj=
+ ltdll_cmds=
+ fi
+
+ # Extract the symbol export list from an `--export-all' def file,
+ # then regenerate the def file from the symbol export list, so that
+ # the compiled dll only exports the symbol export list.
+ # Be careful not to strip the DATA tag left be newer dlltools.
+ export_symbols_cmds="$ltdll_cmds"'
+ $DLLTOOL --export-all --exclude-symbols '$dll_exclude_symbols' --output-def $output_objdir/$soname-def '$ltdll_obj'$libobjs $convenience~
+ sed -e "1,/EXPORTS/d" -e "s/ @ [0-9]*//" -e "s/ *;.*$//" < $output_objdir/$soname-def > $export_symbols'
+
+ # If the export-symbols file already is a .def file (1st line
+ # is EXPORTS), use it as is.
+ # If DATA tags from a recent dlltool are present, honour them!
+ archive_expsym_cmds='if test "x`head -1 $export_symbols`" = xEXPORTS; then
+ cp $export_symbols $output_objdir/$soname-def;
+ else
+ echo EXPORTS > $output_objdir/$soname-def;
+ _lt_hint=1;
+ cat $export_symbols | while read symbol; do
+ set dummy \$symbol;
+ case \$# in
+ 2) echo " \$2 @ \$_lt_hint ; " >> $output_objdir/$soname-def;;
+ *) echo " \$2 @ \$_lt_hint \$3 ; " >> $output_objdir/$soname-def;;
+ esac;
+ _lt_hint=`expr 1 + \$_lt_hint`;
+ done;
+ fi~
+ '"$ltdll_cmds"'
+ $CC -Wl,--base-file,$output_objdir/$soname-base '$lt_cv_cc_dll_switch' -Wl,-e,'$dll_entry' -o $output_objdir/$soname '$ltdll_obj'$libobjs $deplibs $compiler_flags~
+ $DLLTOOL --as=$AS --dllname $soname --exclude-symbols '$dll_exclude_symbols' --def $output_objdir/$soname-def --base-file $output_objdir/$soname-base --output-exp $output_objdir/$soname-exp~
+ $CC -Wl,--base-file,$output_objdir/$soname-base $output_objdir/$soname-exp '$lt_cv_cc_dll_switch' -Wl,-e,'$dll_entry' -o $output_objdir/$soname '$ltdll_obj'$libobjs $deplibs $compiler_flags~
+ $DLLTOOL --as=$AS --dllname $soname --exclude-symbols '$dll_exclude_symbols' --def $output_objdir/$soname-def --base-file $output_objdir/$soname-base --output-exp $output_objdir/$soname-exp --output-lib $output_objdir/$libname.dll.a~
+ $CC $output_objdir/$soname-exp '$lt_cv_cc_dll_switch' -Wl,-e,'$dll_entry' -o $output_objdir/$soname '$ltdll_obj'$libobjs $deplibs $compiler_flags'
+ ;;
+
+ netbsd*)
+ if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
+ archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
+ wlarc=
+ else
+ archive_cmds='$CC -shared -nodefaultlibs $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared -nodefaultlibs $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ fi
+ ;;
+
+ solaris* | sysv5*)
+ if $LD -v 2>&1 | egrep 'BFD 2\.8' > /dev/null; then
+ ld_shlibs=no
+ cat <<EOF 1>&2
+
+*** Warning: The releases 2.8.* of the GNU linker cannot reliably
+*** create shared libraries on Solaris systems. Therefore, libtool
+*** is disabling shared libraries support. We urge you to upgrade GNU
+*** binutils to release 2.9.1 or newer. Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+EOF
+ elif $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+
+ sunos4*)
+ archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ wlarc=
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ *)
+ if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+ esac
+
+ if test "$ld_shlibs" = yes; then
+ runpath_var=LD_RUN_PATH
+ hardcode_libdir_flag_spec='${wl}--rpath ${wl}$libdir'
+ export_dynamic_flag_spec='${wl}--export-dynamic'
+ case $host_os in
+ cygwin* | mingw* | pw32*)
+ # dlltool doesn't understand --whole-archive et. al.
+ whole_archive_flag_spec=
+ ;;
+ *)
+ # ancient GNU ld didn't support --whole-archive et. al.
+ if $LD --help 2>&1 | egrep 'no-whole-archive' > /dev/null; then
+ whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+ else
+ whole_archive_flag_spec=
+ fi
+ ;;
+ esac
+ fi
+else
+ # PORTME fill in a description of your system's linker (not GNU ld)
+ case $host_os in
+ aix3*)
+ allow_undefined_flag=unsupported
+ always_export_symbols=yes
+ archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname'
+ # Note: this linker hardcodes the directories in LIBPATH if there
+ # are no directories specified by -L.
+ hardcode_minus_L=yes
+ if test "$GCC" = yes && test -z "$link_static_flag"; then
+ # Neither direct hardcoding nor static linking is supported with a
+ # broken collect2.
+ hardcode_direct=unsupported
+ fi
+ ;;
+
+ aix4* | aix5*)
+ if test "$host_cpu" = ia64; then
+ # On IA64, the linker does run time linking by default, so we don't
+ # have to do anything special.
+ aix_use_runtimelinking=no
+ exp_sym_flag='-Bexport'
+ no_entry_flag=""
+ else
+ aix_use_runtimelinking=no
+
+ # Test if we are trying to use run time linking or normal
+ # AIX style linking. If -brtl is somewhere in LDFLAGS, we
+ # need to do runtime linking.
+ case $host_os in aix4.[23]|aix4.[23].*|aix5*)
+ for ld_flag in $LDFLAGS; do
+ if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then
+ aix_use_runtimelinking=yes
+ break
+ fi
+ done
+ esac
+
+ exp_sym_flag='-bexport'
+ no_entry_flag='-bnoentry'
+ fi
+
+ # When large executables or shared objects are built, AIX ld can
+ # have problems creating the table of contents. If linking a library
+ # or program results in "error TOC overflow" add -mminimal-toc to
+ # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not
+ # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+
+ hardcode_direct=yes
+ archive_cmds=''
+ hardcode_libdir_separator=':'
+
+ #### Local change for Sleepycat's Berkeley DB [#5779]:
+ # Added $aix_export variable to control use of exports file.
+ # For non-gcc, we don't use exports files, and rather trust
+ # the binder's -qmkshrobj option to export all the mangled
+ # symbols we need for C++ and java.
+
+ aix_export="\${wl}$exp_sym_flag:\$export_symbols"
+
+ if test "$GCC" = yes; then
+ case $host_os in aix4.[012]|aix4.[012].*)
+ collect2name=`${CC} -print-prog-name=collect2`
+ if test -f "$collect2name" && \
+ strings "$collect2name" | grep resolve_lib_name >/dev/null
+ then
+ # We have reworked collect2
+ hardcode_direct=yes
+ else
+ # We have old collect2
+ hardcode_direct=unsupported
+ # It fails to find uninstalled libraries when the uninstalled
+ # path is not listed in the libpath. Setting hardcode_minus_L
+ # to unsupported forces relinking
+ hardcode_minus_L=yes
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_libdir_separator=
+ fi
+ esac
+
+ shared_flag='-shared'
+ else
+ # not using gcc
+ if test "$host_cpu" = ia64; then
+ shared_flag='${wl}-G'
+ else
+ if test "$aix_use_runtimelinking" = yes; then
+ shared_flag='${wl}-G'
+ else
+ shared_flag='${wl}-bM:SRE'
+ fi
+ fi
+
+ # Test for -qmkshrobj and use it if it's available.
+ # It's superior for determining exportable symbols,
+ # especially for C++ or JNI libraries, which have
+ # mangled names.
+ #
+ cat >conftest.$ac_ext <<_ACEOF
+void f(){}
+_ACEOF
+ if { (eval echo "$as_me:$LINENO: \"$CC -c conftest.c\"") >&5
+ (eval $CC -c conftest.c) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { (eval echo "$as_me:$LINENO: \"$CC -o conftest conftest.$ac_objext -qmkshrobj -lC_r\"") >&5
+ (eval $CC -o conftest conftest.$ac_objext -qmkshrobj -lC_r) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ lt_cv_aix_mkshrobj=yes
+ else
+ lt_cv_aix_mkshrobj=no
+ fi
+
+ if test "$lt_cv_aix_mkshrobj" = yes; then
+ aix_export="-qmkshrobj"
+ fi
+ fi
+
+ # It seems that -bexpall can do strange things, so it is better to
+ # generate a list of symbols to export.
+ always_export_symbols=yes
+ if test "$aix_use_runtimelinking" = yes; then
+ # Warning - without using the other runtime loading flags (-brtl),
+ # -berok will link without error, but may produce a broken library.
+ allow_undefined_flag='-berok'
+ hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:/usr/lib:/lib'
+ archive_expsym_cmds="\$CC"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$no_entry_flag $aix_export $shared_flag"
+ else
+ if test "$host_cpu" = ia64; then
+ hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib'
+ allow_undefined_flag="-z nodefs"
+ archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname ${wl}-h$soname $libobjs $deplibs $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$no_entry_flag $aix_export"
+ else
+ hardcode_libdir_flag_spec='${wl}-bnolibpath ${wl}-blibpath:$libdir:/usr/lib:/lib'
+ # Warning - without using the other run time loading flags,
+ # -berok will link without error, but may produce a broken library.
+ allow_undefined_flag='${wl}-berok'
+ # This is a bit strange, but is similar to how AIX traditionally builds
+ # it's shared libraries.
+ archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${allow_undefined_flag} '"\${wl}$no_entry_flag $aix_export"' ~$AR -crlo $objdir/$libname$release.a $objdir/$soname'
+ fi
+ fi
+ ;;
+
+ amigaos*)
+ archive_cmds='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ # see comment about different semantics on the GNU ld section
+ ld_shlibs=no
+ ;;
+
+ cygwin* | mingw* | pw32*)
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ # hardcode_libdir_flag_spec is actually meaningless, as there is
+ # no search path for DLLs.
+ hardcode_libdir_flag_spec=' '
+ allow_undefined_flag=unsupported
+ # Tell ltmain to make .lib files, not .a files.
+ libext=lib
+ # FIXME: Setting linknames here is a bad hack.
+ archive_cmds='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | sed -e '\''s/ -lc$//'\''` -link -dll~linknames='
+ # The linker will automatically build a .lib file if we build a DLL.
+ old_archive_from_new_cmds='true'
+ # FIXME: Should let the user specify the lib program.
+ old_archive_cmds='lib /OUT:$oldlib$oldobjs$old_deplibs'
+ fix_srcfile_path='`cygpath -w "$srcfile"`'
+ ;;
+
+ darwin* | rhapsody*)
+ case "$host_os" in
+ rhapsody* | darwin1.[012])
+ allow_undefined_flag='-undefined suppress'
+ ;;
+ *) # Darwin 1.3 on
+ allow_undefined_flag='-flat_namespace -undefined suppress'
+ ;;
+ esac
+ # FIXME: Relying on posixy $() will cause problems for
+ # cross-compilation, but unfortunately the echo tests do not
+ # yet detect zsh echo's removal of \ escapes.
+
+ #### Local change for Sleepycat's Berkeley DB [#5664] [#6511]
+ case "$host_os" in
+ darwin[12345].*)
+ # removed double quotes in the following line:
+ archive_cmds='$nonopt $(test x$module = xyes && echo -bundle || echo -dynamiclib) $allow_undefined_flag -o $lib $libobjs $deplibs$linker_flags -install_name $rpath/$soname $verstring'
+ ;;
+ *) # Darwin6.0 on (Mac OS/X Jaguar)
+ archive_cmds='$nonopt $allow_undefined_flag -o $lib $libobjs $deplibs$linker_flags -dynamiclib -install_name $rpath/$soname $verstring'
+ ;;
+ esac
+ #### End of changes for Sleepycat's Berkeley DB [#5664] [#6511]
+
+ # We need to add '_' to the symbols in $export_symbols first
+ #archive_expsym_cmds="$archive_cmds"' && strip -s $export_symbols'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ whole_archive_flag_spec='-all_load $convenience'
+ ;;
+
+ freebsd1*)
+ ld_shlibs=no
+ ;;
+
+ # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor
+ # support. Future versions do this automatically, but an explicit c++rt0.o
+ # does not break anything, and helps significantly (at the cost of a little
+ # extra space).
+ freebsd2.2*)
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o'
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ # Unfortunately, older versions of FreeBSD 2 do not have this feature.
+ freebsd2*)
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes
+ hardcode_minus_L=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ # FreeBSD 3 and greater uses gcc -shared to do shared libraries.
+ freebsd*)
+ archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags'
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ hpux9* | hpux10* | hpux11*)
+ case $host_os in
+ hpux9*) archive_cmds='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' ;;
+ *) archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' ;;
+ esac
+ hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+ hardcode_libdir_separator=:
+ hardcode_direct=yes
+ hardcode_minus_L=yes # Not in the search PATH, but as the default
+ # location of the library.
+ export_dynamic_flag_spec='${wl}-E'
+ ;;
+
+ irix5* | irix6*)
+ if test "$GCC" = yes; then
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ else
+ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib'
+ fi
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ link_all_deplibs=yes
+ ;;
+
+ netbsd*)
+ if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out
+ else
+ archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF
+ fi
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ newsos6)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ hardcode_shlibpath_var=no
+ ;;
+
+ openbsd*)
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+ export_dynamic_flag_spec='${wl}-E'
+ else
+ case "$host_os" in
+ openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*)
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='-R$libdir'
+ ;;
+ *)
+ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+ ;;
+ esac
+ fi
+ ;;
+
+ os2*)
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ allow_undefined_flag=unsupported
+ archive_cmds='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def'
+ old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def'
+ ;;
+
+ osf3*)
+ if test "$GCC" = yes; then
+ allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
+ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ else
+ allow_undefined_flag=' -expect_unresolved \*'
+ archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib'
+ fi
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ ;;
+
+ osf4* | osf5*) # as osf3* with the addition of -msym flag
+ if test "$GCC" = yes; then
+ allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
+ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ else
+ allow_undefined_flag=' -expect_unresolved \*'
+ archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib'
+ archive_expsym_cmds='for i in `cat $export_symbols`; do printf "-exported_symbol " >> $lib.exp; echo "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~
+ $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib~$rm $lib.exp'
+
+ #Both c and cxx compiler support -rpath directly
+ hardcode_libdir_flag_spec='-rpath $libdir'
+ fi
+ hardcode_libdir_separator=:
+ ;;
+
+ sco3.2v5*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_shlibpath_var=no
+ runpath_var=LD_RUN_PATH
+ hardcode_runpath_var=yes
+ export_dynamic_flag_spec='${wl}-Bexport'
+ ;;
+
+ solaris*)
+ # gcc --version < 3.0 without binutils cannot create self contained
+ # shared libraries reliably, requiring libgcc.a to resolve some of
+ # the object symbols generated in some cases. Libraries that use
+ # assert need libgcc.a to resolve __eprintf, for example. Linking
+ # a copy of libgcc.a into every shared library to guarantee resolving
+ # such symbols causes other problems: According to Tim Van Holder
+ # <tim.van.holder@pandora.be>, C++ libraries end up with a separate
+ # (to the application) exception stack for one thing.
+ no_undefined_flag=' -z defs'
+ if test "$GCC" = yes; then
+ case `$CC --version 2>/dev/null` in
+ [12].*)
+ cat <<EOF 1>&2
+
+*** Warning: Releases of GCC earlier than version 3.0 cannot reliably
+*** create self contained shared libraries on Solaris systems, without
+*** introducing a dependency on libgcc.a. Therefore, libtool is disabling
+*** -no-undefined support, which will at least allow you to build shared
+*** libraries. However, you may find that when you link such libraries
+*** into an application without using GCC, you have to manually add
+*** \`gcc --print-libgcc-file-name\` to the link command. We urge you to
+*** upgrade to a newer version of GCC. Another option is to rebuild your
+*** current GCC to use the GNU linker from GNU binutils 2.9.1 or newer.
+
+EOF
+ no_undefined_flag=
+ ;;
+ esac
+ fi
+ # $CC -shared without GNU ld will not create a library from C++
+ # object files and a static libstdc++, better avoid it by now
+ archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~
+ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp'
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_shlibpath_var=no
+ case $host_os in
+ solaris2.[0-5] | solaris2.[0-5].*) ;;
+ *) # Supported since Solaris 2.6 (maybe 2.5.1?)
+ whole_archive_flag_spec='-z allextract$convenience -z defaultextract' ;;
+ esac
+ link_all_deplibs=yes
+ ;;
+
+ sunos4*)
+ if test "x$host_vendor" = xsequent; then
+ # Use $CC to link under sequent, because it throws in some extra .o
+ # files that make .init and .fini sections work.
+ archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags'
+ fi
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_direct=yes
+ hardcode_minus_L=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ sysv4)
+ if test "x$host_vendor" = xsno; then
+ archive_cmds='$LD -G -Bsymbolic -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes # is this really true???
+ else
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=no #Motorola manual says yes, but my tests say they lie
+ fi
+ runpath_var='LD_RUN_PATH'
+ hardcode_shlibpath_var=no
+ ;;
+
+ sysv4.3*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_shlibpath_var=no
+ export_dynamic_flag_spec='-Bexport'
+ ;;
+
+ sysv5*)
+ no_undefined_flag=' -z text'
+ # $CC -shared without GNU ld will not create a library from C++
+ # object files and a static libstdc++, better avoid it by now
+ archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~
+ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp'
+ hardcode_libdir_flag_spec=
+ hardcode_shlibpath_var=no
+ runpath_var='LD_RUN_PATH'
+ ;;
+
+ uts4*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_shlibpath_var=no
+ ;;
+
+ dgux*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_shlibpath_var=no
+ ;;
+
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_shlibpath_var=no
+ runpath_var=LD_RUN_PATH
+ hardcode_runpath_var=yes
+ ld_shlibs=yes
+ fi
+ ;;
+
+ sysv4.2uw2*)
+ archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes
+ hardcode_minus_L=no
+ hardcode_shlibpath_var=no
+ hardcode_runpath_var=yes
+ runpath_var=LD_RUN_PATH
+ ;;
+
+ sysv5uw7* | unixware7*)
+ no_undefined_flag='${wl}-z ${wl}text'
+ if test "$GCC" = yes; then
+ archive_cmds='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ archive_cmds='$CC -G ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ fi
+ runpath_var='LD_RUN_PATH'
+ hardcode_shlibpath_var=no
+ ;;
+
+ *)
+ ld_shlibs=no
+ ;;
+ esac
+fi
+echo "$as_me:$LINENO: result: $ld_shlibs" >&5
+echo "${ECHO_T}$ld_shlibs" >&6
+test "$ld_shlibs" = no && can_build_shared=no
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+# Check hardcoding attributes.
+echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5
+echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6
+hardcode_action=
+if test -n "$hardcode_libdir_flag_spec" || \
+ test -n "$runpath_var"; then
+
+ # We can hardcode non-existant directories.
+ if test "$hardcode_direct" != no &&
+ # If the only mechanism to avoid hardcoding is shlibpath_var, we
+ # have to relink, otherwise we might link with an installed library
+ # when we should be linking with a yet-to-be-installed one
+ ## test "$hardcode_shlibpath_var" != no &&
+ test "$hardcode_minus_L" != no; then
+ # Linking always hardcodes the temporary library directory.
+ hardcode_action=relink
+ else
+ # We can link without hardcoding, and we can hardcode nonexisting dirs.
+ hardcode_action=immediate
+ fi
+else
+ # We cannot hardcode anything, or else we can only hardcode existing
+ # directories.
+ hardcode_action=unsupported
+fi
+echo "$as_me:$LINENO: result: $hardcode_action" >&5
+echo "${ECHO_T}$hardcode_action" >&6
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+striplib=
+old_striplib=
+echo "$as_me:$LINENO: checking whether stripping libraries is possible" >&5
+echo $ECHO_N "checking whether stripping libraries is possible... $ECHO_C" >&6
+if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then
+ test -z "$old_striplib" && old_striplib="$STRIP --strip-debug"
+ test -z "$striplib" && striplib="$STRIP --strip-unneeded"
+ echo "$as_me:$LINENO: result: yes" >&5
+echo "${ECHO_T}yes" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+##
+## END FIXME
+
+reload_cmds='$LD$reload_flag -o $output$reload_objs'
+test -z "$deplibs_check_method" && deplibs_check_method=unknown
+
+## FIXME: this should be a separate macro
+##
+# PORTME Fill in your ld.so characteristics
+echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5
+echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6
+library_names_spec=
+libname_spec='lib$name'
+soname_spec=
+postinstall_cmds=
+postuninstall_cmds=
+finish_cmds=
+finish_eval=
+shlibpath_var=
+shlibpath_overrides_runpath=unknown
+version_type=none
+dynamic_linker="$host_os ld.so"
+sys_lib_dlsearch_path_spec="/lib /usr/lib"
+sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
+
+case $host_os in
+aix3*)
+ version_type=linux
+ library_names_spec='${libname}${release}.so$versuffix $libname.a'
+ shlibpath_var=LIBPATH
+
+ # AIX has no versioning support, so we append a major version to the name.
+ soname_spec='${libname}${release}.so$major'
+ ;;
+
+aix4* | aix5*)
+ version_type=linux
+
+ #### Local change for Sleepycat's Berkeley DB [#5779]:
+ # If we don't set need_version, we'll get x.so.0.0.0,
+ # even if -avoid-version is set.
+ need_version=no
+
+ if test "$host_cpu" = ia64; then
+ # AIX 5 supports IA64
+ library_names_spec='${libname}${release}.so$major ${libname}${release}.so$versuffix $libname.so'
+ shlibpath_var=LD_LIBRARY_PATH
+ else
+ # With GCC up to 2.95.x, collect2 would create an import file
+ # for dependence libraries. The import file would start with
+ # the line `#! .'. This would cause the generated library to
+ # depend on `.', always an invalid library. This was fixed in
+ # development snapshots of GCC prior to 3.0.
+ case $host_os in
+ aix4 | aix4.[01] | aix4.[01].*)
+ if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
+ echo ' yes '
+ echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then
+ :
+ else
+ can_build_shared=no
+ fi
+ ;;
+ esac
+ # AIX (on Power*) has no versioning support, so currently we can
+ # not hardcode correct soname into executable. Probably we can
+ # add versioning support to collect2, so additional links can
+ # be useful in future.
+ if test "$aix_use_runtimelinking" = yes; then
+ # If using run time linking (on AIX 4.2 or later) use lib<name>.so
+ # instead of lib<name>.a to let people know that these are not
+ # typical AIX shared libraries.
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ else
+ # We preserve .a as extension for shared libraries through AIX4.2
+ # and later when we are not doing run time linking.
+ library_names_spec='${libname}${release}.a $libname.a'
+ soname_spec='${libname}${release}.so$major'
+ fi
+ shlibpath_var=LIBPATH
+ fi
+ ;;
+
+amigaos*)
+ library_names_spec='$libname.ixlibrary $libname.a'
+ # Create ${libname}_ixlibrary.a entries in /sys/libs.
+ finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "(cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a)"; (cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a) || exit 1; done'
+ ;;
+
+beos*)
+ library_names_spec='${libname}.so'
+ dynamic_linker="$host_os ld.so"
+ shlibpath_var=LIBRARY_PATH
+ ;;
+
+bsdi4*)
+ version_type=linux
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
+ sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
+ export_dynamic_flag_spec=-rdynamic
+ # the default ld.so.conf also contains /usr/contrib/lib and
+ # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
+ # libtool to hard-code these into programs
+ ;;
+
+cygwin* | mingw* | pw32*)
+ version_type=windows
+ need_version=no
+ need_lib_prefix=no
+ case $GCC,$host_os in
+ yes,cygwin*)
+ library_names_spec='$libname.dll.a'
+ soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | sed -e 's/[.]/-/g'`${versuffix}.dll'
+ postinstall_cmds='dlpath=`bash 2>&1 -c '\''. $dir/${file}i;echo \$dlname'\''`~
+ dldir=$destdir/`dirname \$dlpath`~
+ test -d \$dldir || mkdir -p \$dldir~
+ $install_prog .libs/$dlname \$dldir/$dlname'
+ postuninstall_cmds='dldll=`bash 2>&1 -c '\''. $file; echo \$dlname'\''`~
+ dlpath=$dir/\$dldll~
+ $rm \$dlpath'
+ ;;
+ yes,mingw*)
+ library_names_spec='${libname}`echo ${release} | sed -e 's/[.]/-/g'`${versuffix}.dll'
+ sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | sed -e "s/^libraries://" -e "s/;/ /g"`
+ ;;
+ yes,pw32*)
+ library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | sed -e 's/./-/g'`${versuffix}.dll'
+ ;;
+ *)
+ library_names_spec='${libname}`echo ${release} | sed -e 's/[.]/-/g'`${versuffix}.dll $libname.lib'
+ ;;
+ esac
+ dynamic_linker='Win32 ld.exe'
+ # FIXME: first we should search . and the directory the executable is in
+ shlibpath_var=PATH
+ ;;
+
+darwin* | rhapsody*)
+ dynamic_linker="$host_os dyld"
+ version_type=darwin
+ need_lib_prefix=no
+ need_version=no
+ # FIXME: Relying on posixy $() will cause problems for
+ # cross-compilation, but unfortunately the echo tests do not
+ # yet detect zsh echo's removal of \ escapes.
+ #### Local change for Sleepycat's Berkeley DB [#6117]:
+ # added support for -jnimodule, encapsulated below in ${darwin_suffix}
+ darwin_suffix='$(test .$jnimodule = .yes && echo jnilib || (test .$module = .yes && echo so || echo dylib))'
+ library_names_spec='${libname}${release}${versuffix}.'"${darwin_suffix}"' ${libname}${release}${major}.'"${darwin_suffix}"' ${libname}.'"${darwin_suffix}"
+ soname_spec='${libname}${release}${major}.'"${darwin_suffix}"
+ shlibpath_overrides_runpath=yes
+ shlibpath_var=DYLD_LIBRARY_PATH
+ ;;
+
+freebsd1*)
+ dynamic_linker=no
+ ;;
+
+freebsd*)
+ objformat=`test -x /usr/bin/objformat && /usr/bin/objformat || echo aout`
+ version_type=freebsd-$objformat
+ case $version_type in
+ freebsd-elf*)
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so $libname.so'
+ need_version=no
+ need_lib_prefix=no
+ ;;
+ freebsd-*)
+ library_names_spec='${libname}${release}.so$versuffix $libname.so$versuffix'
+ need_version=yes
+ ;;
+ esac
+ shlibpath_var=LD_LIBRARY_PATH
+ case $host_os in
+ freebsd2*)
+ shlibpath_overrides_runpath=yes
+ ;;
+ *)
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+ esac
+ ;;
+
+gnu*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so${major} ${libname}.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ hardcode_into_libs=yes
+ ;;
+
+hpux9* | hpux10* | hpux11*)
+ # Give a soname corresponding to the major version so that dld.sl refuses to
+ # link against other versions.
+ dynamic_linker="$host_os dld.sl"
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ shlibpath_var=SHLIB_PATH
+ shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
+ library_names_spec='${libname}${release}.sl$versuffix ${libname}${release}.sl$major $libname.sl'
+ soname_spec='${libname}${release}.sl$major'
+ # HP-UX runs *really* slowly unless shared libraries are mode 555.
+ postinstall_cmds='chmod 555 $lib'
+ ;;
+
+irix5* | irix6*)
+ version_type=irix
+ need_lib_prefix=no
+ need_version=no
+ soname_spec='${libname}${release}.so$major'
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major ${libname}${release}.so $libname.so'
+ case $host_os in
+ irix5*)
+ libsuff= shlibsuff=
+ ;;
+ *)
+ case $LD in # libtool.m4 will add one of these switches to LD
+ *-32|*"-32 ") libsuff= shlibsuff= libmagic=32-bit;;
+ *-n32|*"-n32 ") libsuff=32 shlibsuff=N32 libmagic=N32;;
+ *-64|*"-64 ") libsuff=64 shlibsuff=64 libmagic=64-bit;;
+ *) libsuff= shlibsuff= libmagic=never-match;;
+ esac
+ ;;
+ esac
+ shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
+ shlibpath_overrides_runpath=no
+ sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}"
+ sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}"
+ ;;
+
+# No shared lib support for Linux oldld, aout, or coff.
+linux-gnuoldld* | linux-gnuaout* | linux-gnucoff*)
+ dynamic_linker=no
+ ;;
+
+# This must be Linux ELF.
+linux-gnu*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ # This implies no fast_install, which is unacceptable.
+ # Some rework will be needed to allow for fast_install
+ # before this can be enabled.
+ hardcode_into_libs=yes
+
+ # We used to test for /lib/ld.so.1 and disable shared libraries on
+ # powerpc, because MkLinux only supported shared libraries with the
+ # GNU dynamic linker. Since this was broken with cross compilers,
+ # most powerpc-linux boxes support dynamic linking these days and
+ # people can always --disable-shared, the test was removed, and we
+ # assume the GNU/Linux dynamic linker is in use.
+ dynamic_linker='GNU/Linux ld.so'
+ ;;
+
+netbsd*)
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
+ library_names_spec='${libname}${release}.so$versuffix ${libname}.so$versuffix'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+ dynamic_linker='NetBSD (a.out) ld.so'
+ else
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major ${libname}${release}.so ${libname}.so'
+ soname_spec='${libname}${release}.so$major'
+ dynamic_linker='NetBSD ld.elf_so'
+ fi
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ ;;
+
+newsos6)
+ version_type=linux
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ ;;
+
+nto-qnx)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ ;;
+
+openbsd*)
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ case "$host_os" in
+ openbsd2.[89] | openbsd2.[89].*)
+ shlibpath_overrides_runpath=no
+ ;;
+ *)
+ shlibpath_overrides_runpath=yes
+ ;;
+ esac
+ else
+ shlibpath_overrides_runpath=yes
+ fi
+ library_names_spec='${libname}${release}.so$versuffix ${libname}.so$versuffix'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+os2*)
+ libname_spec='$name'
+ need_lib_prefix=no
+ library_names_spec='$libname.dll $libname.a'
+ dynamic_linker='OS/2 ld.exe'
+ shlibpath_var=LIBPATH
+ ;;
+
+osf3* | osf4* | osf5*)
+ version_type=osf
+ need_version=no
+ soname_spec='${libname}${release}.so'
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so $libname.so'
+ shlibpath_var=LD_LIBRARY_PATH
+ sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
+ sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec"
+ ;;
+
+sco3.2v5*)
+ version_type=osf
+ soname_spec='${libname}${release}.so$major'
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+solaris*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ # ldd complains unless libraries are executable
+ postinstall_cmds='chmod +x $lib'
+ ;;
+
+sunos4*)
+ version_type=sunos
+ library_names_spec='${libname}${release}.so$versuffix ${libname}.so$versuffix'
+ finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ if test "$with_gnu_ld" = yes; then
+ need_lib_prefix=no
+ fi
+ need_version=yes
+ ;;
+
+sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+ version_type=linux
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ case $host_vendor in
+ sni)
+ shlibpath_overrides_runpath=no
+ ;;
+ motorola)
+ need_lib_prefix=no
+ need_version=no
+ shlibpath_overrides_runpath=no
+ sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
+ ;;
+ esac
+ ;;
+
+uts4*)
+ version_type=linux
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+dgux*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+sysv4*MP*)
+ if test -d /usr/nec ;then
+ version_type=linux
+ library_names_spec='$libname.so.$versuffix $libname.so.$major $libname.so'
+ soname_spec='$libname.so.$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ fi
+ ;;
+
+*)
+ dynamic_linker=no
+ ;;
+esac
+echo "$as_me:$LINENO: result: $dynamic_linker" >&5
+echo "${ECHO_T}$dynamic_linker" >&6
+test "$dynamic_linker" = no && can_build_shared=no
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+# Report the final consequences.
+echo "$as_me:$LINENO: checking if libtool supports shared libraries" >&5
+echo $ECHO_N "checking if libtool supports shared libraries... $ECHO_C" >&6
+echo "$as_me:$LINENO: result: $can_build_shared" >&5
+echo "${ECHO_T}$can_build_shared" >&6
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+echo "$as_me:$LINENO: checking whether to build shared libraries" >&5
+echo $ECHO_N "checking whether to build shared libraries... $ECHO_C" >&6
+test "$can_build_shared" = "no" && enable_shared=no
+
+# On AIX, shared libraries and static libraries use the same namespace, and
+# are all built from PIC.
+case "$host_os" in
+aix3*)
+ test "$enable_shared" = yes && enable_static=no
+ if test -n "$RANLIB"; then
+ archive_cmds="$archive_cmds~\$RANLIB \$lib"
+ postinstall_cmds='$RANLIB $lib'
+ fi
+ ;;
+
+aix4*)
+ if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then
+ test "$enable_shared" = yes && enable_static=no
+ fi
+ ;;
+esac
+echo "$as_me:$LINENO: result: $enable_shared" >&5
+echo "${ECHO_T}$enable_shared" >&6
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+echo "$as_me:$LINENO: checking whether to build static libraries" >&5
+echo $ECHO_N "checking whether to build static libraries... $ECHO_C" >&6
+# Make sure either enable_shared or enable_static is yes.
+test "$enable_shared" = yes || enable_static=yes
+echo "$as_me:$LINENO: result: $enable_static" >&5
+echo "${ECHO_T}$enable_static" >&6
+##
+## END FIXME
+
+if test "$hardcode_action" = relink; then
+ # Fast installation is not supported
+ enable_fast_install=no
+elif test "$shlibpath_overrides_runpath" = yes ||
+ test "$enable_shared" = no; then
+ # Fast installation is not necessary
+ enable_fast_install=needless
+fi
+
+variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
+if test "$GCC" = yes; then
+ variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
+fi
+
+if test "x$enable_dlopen" != xyes; then
+ enable_dlopen=unknown
+ enable_dlopen_self=unknown
+ enable_dlopen_self_static=unknown
+else
+ lt_cv_dlopen=no
+ lt_cv_dlopen_libs=
+
+ case $host_os in
+ beos*)
+ lt_cv_dlopen="load_add_on"
+ lt_cv_dlopen_libs=
+ lt_cv_dlopen_self=yes
+ ;;
+
+ cygwin* | mingw* | pw32*)
+ lt_cv_dlopen="LoadLibrary"
+ lt_cv_dlopen_libs=
+ ;;
+
+ *)
+ echo "$as_me:$LINENO: checking for shl_load" >&5
+echo $ECHO_N "checking for shl_load... $ECHO_C" >&6
+if test "${ac_cv_func_shl_load+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char shl_load (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char shl_load ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub_shl_load) || defined (__stub___shl_load)
+choke me
+#else
+char (*f) () = shl_load;
+#endif
+#ifdef __cplusplus
+}
+#endif
+
+int
+main ()
+{
+return f != shl_load;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_func_shl_load=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_func_shl_load=no
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_func_shl_load" >&5
+echo "${ECHO_T}$ac_cv_func_shl_load" >&6
+if test $ac_cv_func_shl_load = yes; then
+ lt_cv_dlopen="shl_load"
+else
+ echo "$as_me:$LINENO: checking for shl_load in -ldld" >&5
+echo $ECHO_N "checking for shl_load in -ldld... $ECHO_C" >&6
+if test "${ac_cv_lib_dld_shl_load+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-ldld $LIBS"
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char shl_load ();
+int
+main ()
+{
+shl_load ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_lib_dld_shl_load=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_lib_dld_shl_load=no
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+echo "$as_me:$LINENO: result: $ac_cv_lib_dld_shl_load" >&5
+echo "${ECHO_T}$ac_cv_lib_dld_shl_load" >&6
+if test $ac_cv_lib_dld_shl_load = yes; then
+ lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-dld"
+else
+ echo "$as_me:$LINENO: checking for dlopen" >&5
+echo $ECHO_N "checking for dlopen... $ECHO_C" >&6
+if test "${ac_cv_func_dlopen+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char dlopen (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char dlopen ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub_dlopen) || defined (__stub___dlopen)
+choke me
+#else
+char (*f) () = dlopen;
+#endif
+#ifdef __cplusplus
+}
+#endif
+
+int
+main ()
+{
+return f != dlopen;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_func_dlopen=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_func_dlopen=no
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_func_dlopen" >&5
+echo "${ECHO_T}$ac_cv_func_dlopen" >&6
+if test $ac_cv_func_dlopen = yes; then
+ lt_cv_dlopen="dlopen"
+else
+ echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5
+echo $ECHO_N "checking for dlopen in -ldl... $ECHO_C" >&6
+if test "${ac_cv_lib_dl_dlopen+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-ldl $LIBS"
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char dlopen ();
+int
+main ()
+{
+dlopen ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_lib_dl_dlopen=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_lib_dl_dlopen=no
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5
+echo "${ECHO_T}$ac_cv_lib_dl_dlopen" >&6
+if test $ac_cv_lib_dl_dlopen = yes; then
+ lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"
+else
+ echo "$as_me:$LINENO: checking for dlopen in -lsvld" >&5
+echo $ECHO_N "checking for dlopen in -lsvld... $ECHO_C" >&6
+if test "${ac_cv_lib_svld_dlopen+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lsvld $LIBS"
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char dlopen ();
+int
+main ()
+{
+dlopen ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_lib_svld_dlopen=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_lib_svld_dlopen=no
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+echo "$as_me:$LINENO: result: $ac_cv_lib_svld_dlopen" >&5
+echo "${ECHO_T}$ac_cv_lib_svld_dlopen" >&6
+if test $ac_cv_lib_svld_dlopen = yes; then
+ lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"
+else
+ echo "$as_me:$LINENO: checking for dld_link in -ldld" >&5
+echo $ECHO_N "checking for dld_link in -ldld... $ECHO_C" >&6
+if test "${ac_cv_lib_dld_dld_link+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-ldld $LIBS"
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char dld_link ();
+int
+main ()
+{
+dld_link ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_lib_dld_dld_link=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_lib_dld_dld_link=no
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+echo "$as_me:$LINENO: result: $ac_cv_lib_dld_dld_link" >&5
+echo "${ECHO_T}$ac_cv_lib_dld_dld_link" >&6
+if test $ac_cv_lib_dld_dld_link = yes; then
+ lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-dld"
+fi
+
+
+fi
+
+
+fi
+
+
+fi
+
+
+fi
+
+
+fi
+
+ ;;
+ esac
+
+ if test "x$lt_cv_dlopen" != xno; then
+ enable_dlopen=yes
+ else
+ enable_dlopen=no
+ fi
+
+ case $lt_cv_dlopen in
+ dlopen)
+ save_CPPFLAGS="$CPPFLAGS"
+ test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H"
+
+ save_LDFLAGS="$LDFLAGS"
+ eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\"
+
+ save_LIBS="$LIBS"
+ LIBS="$lt_cv_dlopen_libs $LIBS"
+
+ echo "$as_me:$LINENO: checking whether a program can dlopen itself" >&5
+echo $ECHO_N "checking whether a program can dlopen itself... $ECHO_C" >&6
+if test "${lt_cv_dlopen_self+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test "$cross_compiling" = yes; then :
+ lt_cv_dlopen_self=cross
+else
+ lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+ lt_status=$lt_dlunknown
+ cat > conftest.$ac_ext <<EOF
+#line 8946 "configure"
+#include "confdefs.h"
+
+#if HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+#include <stdio.h>
+
+#ifdef RTLD_GLOBAL
+# define LT_DLGLOBAL RTLD_GLOBAL
+#else
+# ifdef DL_GLOBAL
+# define LT_DLGLOBAL DL_GLOBAL
+# else
+# define LT_DLGLOBAL 0
+# endif
+#endif
+
+/* We may have to define LT_DLLAZY_OR_NOW in the command line if we
+ find out it does not work in some platform. */
+#ifndef LT_DLLAZY_OR_NOW
+# ifdef RTLD_LAZY
+# define LT_DLLAZY_OR_NOW RTLD_LAZY
+# else
+# ifdef DL_LAZY
+# define LT_DLLAZY_OR_NOW DL_LAZY
+# else
+# ifdef RTLD_NOW
+# define LT_DLLAZY_OR_NOW RTLD_NOW
+# else
+# ifdef DL_NOW
+# define LT_DLLAZY_OR_NOW DL_NOW
+# else
+# define LT_DLLAZY_OR_NOW 0
+# endif
+# endif
+# endif
+# endif
+#endif
+
+#ifdef __cplusplus
+extern "C" void exit (int);
+#endif
+
+void fnord() { int i=42;}
+int main ()
+{
+ void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
+ int status = $lt_dlunknown;
+
+ if (self)
+ {
+ if (dlsym (self,"fnord")) status = $lt_dlno_uscore;
+ else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore;
+ /* dlclose (self); */
+ }
+
+ exit (status);
+}
+EOF
+ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then
+ (./conftest; exit; ) 2>/dev/null
+ lt_status=$?
+ case x$lt_status in
+ x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;;
+ x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;;
+ x$lt_unknown|x*) lt_cv_dlopen_self=no ;;
+ esac
+ else :
+ # compilation failed
+ lt_cv_dlopen_self=no
+ fi
+fi
+rm -fr conftest*
+
+
+fi
+echo "$as_me:$LINENO: result: $lt_cv_dlopen_self" >&5
+echo "${ECHO_T}$lt_cv_dlopen_self" >&6
+
+ if test "x$lt_cv_dlopen_self" = xyes; then
+ LDFLAGS="$LDFLAGS $link_static_flag"
+ echo "$as_me:$LINENO: checking whether a statically linked program can dlopen itself" >&5
+echo $ECHO_N "checking whether a statically linked program can dlopen itself... $ECHO_C" >&6
+if test "${lt_cv_dlopen_self_static+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test "$cross_compiling" = yes; then :
+ lt_cv_dlopen_self_static=cross
+else
+ lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+ lt_status=$lt_dlunknown
+ cat > conftest.$ac_ext <<EOF
+#line 9044 "configure"
+#include "confdefs.h"
+
+#if HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+#include <stdio.h>
+
+#ifdef RTLD_GLOBAL
+# define LT_DLGLOBAL RTLD_GLOBAL
+#else
+# ifdef DL_GLOBAL
+# define LT_DLGLOBAL DL_GLOBAL
+# else
+# define LT_DLGLOBAL 0
+# endif
+#endif
+
+/* We may have to define LT_DLLAZY_OR_NOW in the command line if we
+ find out it does not work in some platform. */
+#ifndef LT_DLLAZY_OR_NOW
+# ifdef RTLD_LAZY
+# define LT_DLLAZY_OR_NOW RTLD_LAZY
+# else
+# ifdef DL_LAZY
+# define LT_DLLAZY_OR_NOW DL_LAZY
+# else
+# ifdef RTLD_NOW
+# define LT_DLLAZY_OR_NOW RTLD_NOW
+# else
+# ifdef DL_NOW
+# define LT_DLLAZY_OR_NOW DL_NOW
+# else
+# define LT_DLLAZY_OR_NOW 0
+# endif
+# endif
+# endif
+# endif
+#endif
+
+#ifdef __cplusplus
+extern "C" void exit (int);
+#endif
+
+void fnord() { int i=42;}
+int main ()
+{
+ void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
+ int status = $lt_dlunknown;
+
+ if (self)
+ {
+ if (dlsym (self,"fnord")) status = $lt_dlno_uscore;
+ else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore;
+ /* dlclose (self); */
+ }
+
+ exit (status);
+}
+EOF
+ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then
+ (./conftest; exit; ) 2>/dev/null
+ lt_status=$?
+ case x$lt_status in
+ x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;;
+ x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;;
+ x$lt_unknown|x*) lt_cv_dlopen_self_static=no ;;
+ esac
+ else :
+ # compilation failed
+ lt_cv_dlopen_self_static=no
+ fi
+fi
+rm -fr conftest*
+
+
+fi
+echo "$as_me:$LINENO: result: $lt_cv_dlopen_self_static" >&5
+echo "${ECHO_T}$lt_cv_dlopen_self_static" >&6
+ fi
+
+ CPPFLAGS="$save_CPPFLAGS"
+ LDFLAGS="$save_LDFLAGS"
+ LIBS="$save_LIBS"
+ ;;
+ esac
+
+ case $lt_cv_dlopen_self in
+ yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;;
+ *) enable_dlopen_self=unknown ;;
+ esac
+
+ case $lt_cv_dlopen_self_static in
+ yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;;
+ *) enable_dlopen_self_static=unknown ;;
+ esac
+fi
+
+
+## FIXME: this should be a separate macro
+##
+if test "$enable_shared" = yes && test "$GCC" = yes; then
+ case $archive_cmds in
+ *'~'*)
+ # FIXME: we may have to deal with multi-command sequences.
+ ;;
+ '$CC '*)
+ # Test whether the compiler implicitly links with -lc since on some
+ # systems, -lgcc has to come before -lc. If gcc already passes -lc
+ # to ld, don't add -lc before -lgcc.
+ echo "$as_me:$LINENO: checking whether -lc should be explicitly linked in" >&5
+echo $ECHO_N "checking whether -lc should be explicitly linked in... $ECHO_C" >&6
+ if test "${lt_cv_archive_cmds_need_lc+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ $rm conftest*
+ echo 'static int dummy;' > conftest.$ac_ext
+
+ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ soname=conftest
+ lib=conftest
+ libobjs=conftest.$ac_objext
+ deplibs=
+ wl=$lt_cv_prog_cc_wl
+ compiler_flags=-v
+ linker_flags=-v
+ verstring=
+ output_objdir=.
+ libname=conftest
+ save_allow_undefined_flag=$allow_undefined_flag
+ allow_undefined_flag=
+ if { (eval echo "$as_me:$LINENO: \"$archive_cmds 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1\"") >&5
+ (eval $archive_cmds 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+ then
+ lt_cv_archive_cmds_need_lc=no
+ else
+ lt_cv_archive_cmds_need_lc=yes
+ fi
+ allow_undefined_flag=$save_allow_undefined_flag
+ else
+ cat conftest.err 1>&5
+ fi
+fi
+
+ echo "$as_me:$LINENO: result: $lt_cv_archive_cmds_need_lc" >&5
+echo "${ECHO_T}$lt_cv_archive_cmds_need_lc" >&6
+ ;;
+ esac
+fi
+need_lc=${lt_cv_archive_cmds_need_lc-yes}
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+# The second clause should only fire when bootstrapping the
+# libtool distribution, otherwise you forgot to ship ltmain.sh
+# with your package, and you will get complaints that there are
+# no rules to generate ltmain.sh.
+if test -f "$ltmain"; then
+ :
+else
+ # If there is no Makefile yet, we rely on a make rule to execute
+ # `config.status --recheck' to rerun these tests and create the
+ # libtool script then.
+ test -f Makefile && make "$ltmain"
+fi
+
+if test -f "$ltmain"; then
+ trap "$rm \"${ofile}T\"; exit 1" 1 2 15
+ $rm -f "${ofile}T"
+
+ echo creating $ofile
+
+ # Now quote all the things that may contain metacharacters while being
+ # careful not to overquote the AC_SUBSTed values. We take copies of the
+ # variables and quote the copies for generation of the libtool script.
+ for var in echo old_CC old_CFLAGS \
+ AR AR_FLAGS CC LD LN_S NM SHELL \
+ reload_flag reload_cmds wl \
+ pic_flag link_static_flag no_builtin_flag export_dynamic_flag_spec \
+ thread_safe_flag_spec whole_archive_flag_spec libname_spec \
+ library_names_spec soname_spec \
+ RANLIB old_archive_cmds old_archive_from_new_cmds old_postinstall_cmds \
+ old_postuninstall_cmds archive_cmds archive_expsym_cmds postinstall_cmds \
+ postuninstall_cmds extract_expsyms_cmds old_archive_from_expsyms_cmds \
+ old_striplib striplib file_magic_cmd export_symbols_cmds \
+ deplibs_check_method allow_undefined_flag no_undefined_flag \
+ finish_cmds finish_eval global_symbol_pipe global_symbol_to_cdecl \
+ global_symbol_to_c_name_address \
+ hardcode_libdir_flag_spec hardcode_libdir_separator \
+ sys_lib_search_path_spec sys_lib_dlsearch_path_spec \
+ compiler_c_o compiler_o_lo need_locks exclude_expsyms include_expsyms; do
+
+ case $var in
+ reload_cmds | old_archive_cmds | old_archive_from_new_cmds | \
+ old_postinstall_cmds | old_postuninstall_cmds | \
+ export_symbols_cmds | archive_cmds | archive_expsym_cmds | \
+ extract_expsyms_cmds | old_archive_from_expsyms_cmds | \
+ postinstall_cmds | postuninstall_cmds | \
+ finish_cmds | sys_lib_search_path_spec | sys_lib_dlsearch_path_spec)
+ # Double-quote double-evaled strings.
+ eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\""
+ ;;
+ *)
+ eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\""
+ ;;
+ esac
+ done
+
+ cat <<__EOF__ > "${ofile}T"
+#! $SHELL
+
+# `$echo "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services.
+# Generated automatically by $PROGRAM (GNU $PACKAGE $VERSION$TIMESTAMP)
+# NOTE: Changes made to this file will be lost: look at ltmain.sh.
+#
+# Copyright (C) 1996-2000 Free Software Foundation, Inc.
+# Originally by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Sed that helps us avoid accidentally triggering echo(1) options like -n.
+Xsed="sed -e s/^X//"
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+if test "X\${CDPATH+set}" = Xset; then CDPATH=:; export CDPATH; fi
+
+# ### BEGIN LIBTOOL CONFIG
+
+# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`:
+
+# Shell to use when invoking shell scripts.
+SHELL=$lt_SHELL
+
+# Whether or not to build shared libraries.
+build_libtool_libs=$enable_shared
+
+# Whether or not to build static libraries.
+build_old_libs=$enable_static
+
+# Whether or not to add -lc for building shared libraries.
+build_libtool_need_lc=$need_lc
+
+# Whether or not to optimize for fast installation.
+fast_install=$enable_fast_install
+
+# The host system.
+host_alias=$host_alias
+host=$host
+
+# An echo program that does not interpret backslashes.
+echo=$lt_echo
+
+# The archiver.
+AR=$lt_AR
+AR_FLAGS=$lt_AR_FLAGS
+
+# The default C compiler.
+CC=$lt_CC
+
+# Is the compiler the GNU C compiler?
+with_gcc=$GCC
+
+# The linker used to build libraries.
+LD=$lt_LD
+
+# Whether we need hard or soft links.
+LN_S=$lt_LN_S
+
+# A BSD-compatible nm program.
+NM=$lt_NM
+
+# A symbol stripping program
+STRIP=$STRIP
+
+# Used to examine libraries when file_magic_cmd begins "file"
+MAGIC_CMD=$MAGIC_CMD
+
+# Used on cygwin: DLL creation program.
+DLLTOOL="$DLLTOOL"
+
+# Used on cygwin: object dumper.
+OBJDUMP="$OBJDUMP"
+
+# Used on cygwin: assembler.
+AS="$AS"
+
+# The name of the directory that contains temporary libtool files.
+objdir=$objdir
+
+# How to create reloadable object files.
+reload_flag=$lt_reload_flag
+reload_cmds=$lt_reload_cmds
+
+# How to pass a linker flag through the compiler.
+wl=$lt_wl
+
+# Object file suffix (normally "o").
+objext="$ac_objext"
+
+# Old archive suffix (normally "a").
+libext="$libext"
+
+# Executable file suffix (normally "").
+exeext="$exeext"
+
+# Additional compiler flags for building library objects.
+pic_flag=$lt_pic_flag
+pic_mode=$pic_mode
+
+# Does compiler simultaneously support -c and -o options?
+compiler_c_o=$lt_compiler_c_o
+
+# Can we write directly to a .lo ?
+compiler_o_lo=$lt_compiler_o_lo
+
+# Must we lock files when doing compilation ?
+need_locks=$lt_need_locks
+
+# Do we need the lib prefix for modules?
+need_lib_prefix=$need_lib_prefix
+
+# Do we need a version for libraries?
+need_version=$need_version
+
+# Whether dlopen is supported.
+dlopen_support=$enable_dlopen
+
+# Whether dlopen of programs is supported.
+dlopen_self=$enable_dlopen_self
+
+# Whether dlopen of statically linked programs is supported.
+dlopen_self_static=$enable_dlopen_self_static
+
+# Compiler flag to prevent dynamic linking.
+link_static_flag=$lt_link_static_flag
+
+# Compiler flag to turn off builtin functions.
+no_builtin_flag=$lt_no_builtin_flag
+
+# Compiler flag to allow reflexive dlopens.
+export_dynamic_flag_spec=$lt_export_dynamic_flag_spec
+
+# Compiler flag to generate shared objects directly from archives.
+whole_archive_flag_spec=$lt_whole_archive_flag_spec
+
+# Compiler flag to generate thread-safe objects.
+thread_safe_flag_spec=$lt_thread_safe_flag_spec
+
+# Library versioning type.
+version_type=$version_type
+
+# Format of library name prefix.
+libname_spec=$lt_libname_spec
+
+# List of archive names. First name is the real one, the rest are links.
+# The last name is the one that the linker finds with -lNAME.
+library_names_spec=$lt_library_names_spec
+
+# The coded name of the library, if different from the real name.
+soname_spec=$lt_soname_spec
+
+# Commands used to build and install an old-style archive.
+RANLIB=$lt_RANLIB
+old_archive_cmds=$lt_old_archive_cmds
+old_postinstall_cmds=$lt_old_postinstall_cmds
+old_postuninstall_cmds=$lt_old_postuninstall_cmds
+
+# Create an old-style archive from a shared archive.
+old_archive_from_new_cmds=$lt_old_archive_from_new_cmds
+
+# Create a temporary old-style archive to link instead of a shared archive.
+old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds
+
+# Commands used to build and install a shared archive.
+archive_cmds=$lt_archive_cmds
+archive_expsym_cmds=$lt_archive_expsym_cmds
+postinstall_cmds=$lt_postinstall_cmds
+postuninstall_cmds=$lt_postuninstall_cmds
+
+# Commands to strip libraries.
+old_striplib=$lt_old_striplib
+striplib=$lt_striplib
+
+# Method to check whether dependent libraries are shared objects.
+deplibs_check_method=$lt_deplibs_check_method
+
+# Command to use when deplibs_check_method == file_magic.
+file_magic_cmd=$lt_file_magic_cmd
+
+# Flag that allows shared libraries with undefined symbols to be built.
+allow_undefined_flag=$lt_allow_undefined_flag
+
+# Flag that forces no undefined symbols.
+no_undefined_flag=$lt_no_undefined_flag
+
+# Commands used to finish a libtool library installation in a directory.
+finish_cmds=$lt_finish_cmds
+
+# Same as above, but a single script fragment to be evaled but not shown.
+finish_eval=$lt_finish_eval
+
+# Take the output of nm and produce a listing of raw symbols and C names.
+global_symbol_pipe=$lt_global_symbol_pipe
+
+# Transform the output of nm in a proper C declaration
+global_symbol_to_cdecl=$lt_global_symbol_to_cdecl
+
+# Transform the output of nm in a C name address pair
+global_symbol_to_c_name_address=$lt_global_symbol_to_c_name_address
+
+# This is the shared library runtime path variable.
+runpath_var=$runpath_var
+
+# This is the shared library path variable.
+shlibpath_var=$shlibpath_var
+
+# Is shlibpath searched before the hard-coded library search path?
+shlibpath_overrides_runpath=$shlibpath_overrides_runpath
+
+# How to hardcode a shared library path into an executable.
+hardcode_action=$hardcode_action
+
+# Whether we should hardcode library paths into libraries.
+hardcode_into_libs=$hardcode_into_libs
+
+# Flag to hardcode \$libdir into a binary during linking.
+# This must work even if \$libdir does not exist.
+hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec
+
+# Whether we need a single -rpath flag with a separated argument.
+hardcode_libdir_separator=$lt_hardcode_libdir_separator
+
+# Set to yes if using DIR/libNAME.so during linking hardcodes DIR into the
+# resulting binary.
+hardcode_direct=$hardcode_direct
+
+# Set to yes if using the -LDIR flag during linking hardcodes DIR into the
+# resulting binary.
+hardcode_minus_L=$hardcode_minus_L
+
+# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into
+# the resulting binary.
+hardcode_shlibpath_var=$hardcode_shlibpath_var
+
+# Variables whose values should be saved in libtool wrapper scripts and
+# restored at relink time.
+variables_saved_for_relink="$variables_saved_for_relink"
+
+# Whether libtool must link a program against all its dependency libraries.
+link_all_deplibs=$link_all_deplibs
+
+# Compile-time system search path for libraries
+sys_lib_search_path_spec=$lt_sys_lib_search_path_spec
+
+# Run-time system search path for libraries
+sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec
+
+# Fix the shell variable \$srcfile for the compiler.
+fix_srcfile_path="$fix_srcfile_path"
+
+# Set to yes if exported symbols are required.
+always_export_symbols=$always_export_symbols
+
+# The commands to list exported symbols.
+export_symbols_cmds=$lt_export_symbols_cmds
+
+# The commands to extract the exported symbol list from a shared archive.
+extract_expsyms_cmds=$lt_extract_expsyms_cmds
+
+# Symbols that should not be listed in the preloaded symbols.
+exclude_expsyms=$lt_exclude_expsyms
+
+# Symbols that must always be exported.
+include_expsyms=$lt_include_expsyms
+
+# ### END LIBTOOL CONFIG
+
+__EOF__
+
+ case $host_os in
+ aix3*)
+ cat <<\EOF >> "${ofile}T"
+
+# AIX sometimes has problems with the GCC collect2 program. For some
+# reason, if we set the COLLECT_NAMES environment variable, the problems
+# vanish in a puff of smoke.
+if test "X${COLLECT_NAMES+set}" != Xset; then
+ COLLECT_NAMES=
+ export COLLECT_NAMES
+fi
+EOF
+ ;;
+ esac
+
+ case $host_os in
+ cygwin* | mingw* | pw32* | os2*)
+ cat <<'EOF' >> "${ofile}T"
+ # This is a source program that is used to create dlls on Windows
+ # Don't remove nor modify the starting and closing comments
+# /* ltdll.c starts here */
+# #define WIN32_LEAN_AND_MEAN
+# #include <windows.h>
+# #undef WIN32_LEAN_AND_MEAN
+# #include <stdio.h>
+#
+# #ifndef __CYGWIN__
+# # ifdef __CYGWIN32__
+# # define __CYGWIN__ __CYGWIN32__
+# # endif
+# #endif
+#
+# #ifdef __cplusplus
+# extern "C" {
+# #endif
+# BOOL APIENTRY DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved);
+# #ifdef __cplusplus
+# }
+# #endif
+#
+# #ifdef __CYGWIN__
+# #include <cygwin/cygwin_dll.h>
+# DECLARE_CYGWIN_DLL( DllMain );
+# #endif
+# HINSTANCE __hDllInstance_base;
+#
+# BOOL APIENTRY
+# DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved)
+# {
+# __hDllInstance_base = hInst;
+# return TRUE;
+# }
+# /* ltdll.c ends here */
+ # This is a source program that is used to create import libraries
+ # on Windows for dlls which lack them. Don't remove nor modify the
+ # starting and closing comments
+# /* impgen.c starts here */
+# /* Copyright (C) 1999-2000 Free Software Foundation, Inc.
+#
+# This file is part of GNU libtool.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+# */
+#
+# #include <stdio.h> /* for printf() */
+# #include <unistd.h> /* for open(), lseek(), read() */
+# #include <fcntl.h> /* for O_RDONLY, O_BINARY */
+# #include <string.h> /* for strdup() */
+#
+# /* O_BINARY isn't required (or even defined sometimes) under Unix */
+# #ifndef O_BINARY
+# #define O_BINARY 0
+# #endif
+#
+# static unsigned int
+# pe_get16 (fd, offset)
+# int fd;
+# int offset;
+# {
+# unsigned char b[2];
+# lseek (fd, offset, SEEK_SET);
+# read (fd, b, 2);
+# return b[0] + (b[1]<<8);
+# }
+#
+# static unsigned int
+# pe_get32 (fd, offset)
+# int fd;
+# int offset;
+# {
+# unsigned char b[4];
+# lseek (fd, offset, SEEK_SET);
+# read (fd, b, 4);
+# return b[0] + (b[1]<<8) + (b[2]<<16) + (b[3]<<24);
+# }
+#
+# static unsigned int
+# pe_as32 (ptr)
+# void *ptr;
+# {
+# unsigned char *b = ptr;
+# return b[0] + (b[1]<<8) + (b[2]<<16) + (b[3]<<24);
+# }
+#
+# int
+# main (argc, argv)
+# int argc;
+# char *argv[];
+# {
+# int dll;
+# unsigned long pe_header_offset, opthdr_ofs, num_entries, i;
+# unsigned long export_rva, export_size, nsections, secptr, expptr;
+# unsigned long name_rvas, nexp;
+# unsigned char *expdata, *erva;
+# char *filename, *dll_name;
+#
+# filename = argv[1];
+#
+# dll = open(filename, O_RDONLY|O_BINARY);
+# if (dll < 1)
+# return 1;
+#
+# dll_name = filename;
+#
+# for (i=0; filename[i]; i++)
+# if (filename[i] == '/' || filename[i] == '\\' || filename[i] == ':')
+# dll_name = filename + i +1;
+#
+# pe_header_offset = pe_get32 (dll, 0x3c);
+# opthdr_ofs = pe_header_offset + 4 + 20;
+# num_entries = pe_get32 (dll, opthdr_ofs + 92);
+#
+# if (num_entries < 1) /* no exports */
+# return 1;
+#
+# export_rva = pe_get32 (dll, opthdr_ofs + 96);
+# export_size = pe_get32 (dll, opthdr_ofs + 100);
+# nsections = pe_get16 (dll, pe_header_offset + 4 +2);
+# secptr = (pe_header_offset + 4 + 20 +
+# pe_get16 (dll, pe_header_offset + 4 + 16));
+#
+# expptr = 0;
+# for (i = 0; i < nsections; i++)
+# {
+# char sname[8];
+# unsigned long secptr1 = secptr + 40 * i;
+# unsigned long vaddr = pe_get32 (dll, secptr1 + 12);
+# unsigned long vsize = pe_get32 (dll, secptr1 + 16);
+# unsigned long fptr = pe_get32 (dll, secptr1 + 20);
+# lseek(dll, secptr1, SEEK_SET);
+# read(dll, sname, 8);
+# if (vaddr <= export_rva && vaddr+vsize > export_rva)
+# {
+# expptr = fptr + (export_rva - vaddr);
+# if (export_rva + export_size > vaddr + vsize)
+# export_size = vsize - (export_rva - vaddr);
+# break;
+# }
+# }
+#
+# expdata = (unsigned char*)malloc(export_size);
+# lseek (dll, expptr, SEEK_SET);
+# read (dll, expdata, export_size);
+# erva = expdata - export_rva;
+#
+# nexp = pe_as32 (expdata+24);
+# name_rvas = pe_as32 (expdata+32);
+#
+# printf ("EXPORTS\n");
+# for (i = 0; i<nexp; i++)
+# {
+# unsigned long name_rva = pe_as32 (erva+name_rvas+i*4);
+# printf ("\t%s @ %ld ;\n", erva+name_rva, 1+ i);
+# }
+#
+# return 0;
+# }
+# /* impgen.c ends here */
+
+EOF
+ ;;
+ esac
+
+ # We use sed instead of cat because bash on DJGPP gets confused if
+ # if finds mixed CR/LF and LF-only lines. Since sed operates in
+ # text mode, it properly converts lines to CR/LF. This bash problem
+ # is reportedly fixed, but why not run on old versions too?
+ sed '$q' "$ltmain" >> "${ofile}T" || (rm -f "${ofile}T"; exit 1)
+
+ mv -f "${ofile}T" "$ofile" || \
+ (rm -f "$ofile" && cp "${ofile}T" "$ofile" && rm -f "${ofile}T")
+ chmod +x "$ofile"
+fi
+##
+## END FIXME
+
+
+
+
+
+# This can be used to rebuild libtool when needed
+LIBTOOL_DEPS="$ac_aux_dir/ltmain.sh"
+
+# Always use our own libtool.
+LIBTOOL='$(SHELL) $(top_builddir)/libtool'
+
+# Prevent multiple expansion
+
+
+
+LIBTOOL="\$(SHELL) ./libtool"
+
+# Set SOSUFFIX and friends
+
+ echo "$as_me:$LINENO: checking SOSUFFIX from libtool" >&5
+echo $ECHO_N "checking SOSUFFIX from libtool... $ECHO_C" >&6
+ module=no
+ jnimodule=no
+
+ versuffix=""
+ release=""
+ libname=libfoo
+ eval library_names=\"$library_names_spec\"
+ _SOSUFFIX=`echo "$library_names" | sed -e 's/.*\.\([a-zA-Z0-9_]*\).*/\1/'`
+ if test "$_SOSUFFIX" = '' ; then
+ _SOSUFFIX=so
+ if test "$enable_shared" = "yes" && test "$_SOSUFFIX_MESSAGE" = ""; then
+ _SOSUFFIX_MESSAGE=yes
+ { echo "$as_me:$LINENO: WARNING: libtool may not know about this architecture." >&5
+echo "$as_me: WARNING: libtool may not know about this architecture." >&2;}
+ { echo "$as_me:$LINENO: WARNING: assuming .$_SUFFIX suffix for dynamic libraries." >&5
+echo "$as_me: WARNING: assuming .$_SUFFIX suffix for dynamic libraries." >&2;}
+ fi
+ fi
+
+ SOSUFFIX=$_SOSUFFIX
+ echo "$as_me:$LINENO: result: $SOSUFFIX" >&5
+echo "${ECHO_T}$SOSUFFIX" >&6
+
+
+
+ echo "$as_me:$LINENO: checking MODSUFFIX from libtool" >&5
+echo $ECHO_N "checking MODSUFFIX from libtool... $ECHO_C" >&6
+ module=yes
+ jnimodule=no
+
+ versuffix=""
+ release=""
+ libname=libfoo
+ eval library_names=\"$library_names_spec\"
+ _SOSUFFIX=`echo "$library_names" | sed -e 's/.*\.\([a-zA-Z0-9_]*\).*/\1/'`
+ if test "$_SOSUFFIX" = '' ; then
+ _SOSUFFIX=so
+ if test "$enable_shared" = "yes" && test "$_SOSUFFIX_MESSAGE" = ""; then
+ _SOSUFFIX_MESSAGE=yes
+ { echo "$as_me:$LINENO: WARNING: libtool may not know about this architecture." >&5
+echo "$as_me: WARNING: libtool may not know about this architecture." >&2;}
+ { echo "$as_me:$LINENO: WARNING: assuming .$_SUFFIX suffix for dynamic libraries." >&5
+echo "$as_me: WARNING: assuming .$_SUFFIX suffix for dynamic libraries." >&2;}
+ fi
+ fi
+
+ MODSUFFIX=$_SOSUFFIX
+ echo "$as_me:$LINENO: result: $MODSUFFIX" >&5
+echo "${ECHO_T}$MODSUFFIX" >&6
+
+
+
+ echo "$as_me:$LINENO: checking JMODSUFFIX from libtool" >&5
+echo $ECHO_N "checking JMODSUFFIX from libtool... $ECHO_C" >&6
+ module=yes
+ jnimodule=yes
+
+ versuffix=""
+ release=""
+ libname=libfoo
+ eval library_names=\"$library_names_spec\"
+ _SOSUFFIX=`echo "$library_names" | sed -e 's/.*\.\([a-zA-Z0-9_]*\).*/\1/'`
+ if test "$_SOSUFFIX" = '' ; then
+ _SOSUFFIX=so
+ if test "$enable_shared" = "yes" && test "$_SOSUFFIX_MESSAGE" = ""; then
+ _SOSUFFIX_MESSAGE=yes
+ { echo "$as_me:$LINENO: WARNING: libtool may not know about this architecture." >&5
+echo "$as_me: WARNING: libtool may not know about this architecture." >&2;}
+ { echo "$as_me:$LINENO: WARNING: assuming .$_SUFFIX suffix for dynamic libraries." >&5
+echo "$as_me: WARNING: assuming .$_SUFFIX suffix for dynamic libraries." >&2;}
+ fi
+ fi
+
+ JMODSUFFIX=$_SOSUFFIX
+ echo "$as_me:$LINENO: result: $JMODSUFFIX" >&5
+echo "${ECHO_T}$JMODSUFFIX" >&6
+
+
+
+INSTALLER="\$(LIBTOOL) --mode=install cp -p"
+
+MAKEFILE_CC="\$(LIBTOOL) --mode=compile ${MAKEFILE_CC}"
+MAKEFILE_SOLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CCLINK} -avoid-version"
+MAKEFILE_CCLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CCLINK}"
+MAKEFILE_CXX="\$(LIBTOOL) --mode=compile ${MAKEFILE_CXX}"
+MAKEFILE_XSOLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CXXLINK} -avoid-version"
+MAKEFILE_CXXLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CXXLINK}"
+
+DEFAULT_LIB="\$(libso_target)"
+POSTLINK="\$(LIBTOOL) --mode=execute true"
+o=".lo"
+INSTALL_LIBS="$DEFAULT_LIB"
+
+# Optional C++ API.
+if test "$db_cv_cxx" = "yes"; then
+ if test "$enable_shared" = "no"; then
+ DEFAULT_LIB_CXX="\$(libcxx)"
+ fi
+ if test "$enable_shared" = "yes"; then
+ DEFAULT_LIB_CXX="\$(libxso_target)"
+ fi
+ INSTALL_LIBS="$INSTALL_LIBS $DEFAULT_LIB_CXX"
+
+ # Fill in C++ library for Embedix.
+ EMBEDIX_ECD_CXX='<OPTION db-extra>\
+ TYPE=bool\
+ DEFAULT_VALUE=1\
+ PROMPT=Include BerkeleyDB C++ library?\
+ <KEEPLIST>\
+ /usr/include/db_cxx.h\
+ /usr/lib/libdb_cxx-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.so\
+ </KEEPLIST>\
+ <PROVIDES>\
+ libdb_cxx-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.so\
+ </PROVIDES>\
+ <REQUIRES>\
+ ld-linux.so.2\
+ libc.so.6\
+ </REQUIRES>\
+ STATIC_SIZE=0\
+ STORAGE_SIZE=523612\
+ STARTUP_TIME=0\
+ </OPTION>'
+fi
+
+# Optional Java API.
+if test "$db_cv_java" = "yes"; then
+ # Java requires shared libraries.
+ if test "$enable_shared" = "no"; then
+ { { echo "$as_me:$LINENO: error: Java requires shared libraries" >&5
+echo "$as_me: error: Java requires shared libraries" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+
+
+if test "x$JAVAPREFIX" = x; then
+ test "x$JAVAC" = x && for ac_prog in javac$EXEEXT "gcj$EXEEXT -C" guavac$EXEEXT jikes$EXEEXT
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_JAVAC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$JAVAC"; then
+ ac_cv_prog_JAVAC="$JAVAC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_JAVAC="$ac_prog"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+JAVAC=$ac_cv_prog_JAVAC
+if test -n "$JAVAC"; then
+ echo "$as_me:$LINENO: result: $JAVAC" >&5
+echo "${ECHO_T}$JAVAC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ test -n "$JAVAC" && break
+done
+
+else
+ test "x$JAVAC" = x && for ac_prog in javac$EXEEXT "gcj$EXEEXT -C" guavac$EXEEXT jikes$EXEEXT
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_JAVAC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$JAVAC"; then
+ ac_cv_prog_JAVAC="$JAVAC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_JAVAC="$ac_prog"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+JAVAC=$ac_cv_prog_JAVAC
+if test -n "$JAVAC"; then
+ echo "$as_me:$LINENO: result: $JAVAC" >&5
+echo "${ECHO_T}$JAVAC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ test -n "$JAVAC" && break
+done
+test -n "$JAVAC" || JAVAC="$JAVAPREFIX"
+
+fi
+test "x$JAVAC" = x && { { echo "$as_me:$LINENO: error: no acceptable Java compiler found in \$PATH" >&5
+echo "$as_me: error: no acceptable Java compiler found in \$PATH" >&2;}
+ { (exit 1); exit 1; }; }
+
+echo "$as_me:$LINENO: checking if $JAVAC works" >&5
+echo $ECHO_N "checking if $JAVAC works... $ECHO_C" >&6
+if test "${ac_cv_prog_javac_works+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+
+JAVA_TEST=Test.java
+CLASS_TEST=Test.class
+cat << \EOF > $JAVA_TEST
+/* #line 10016 "configure" */
+public class Test {
+}
+EOF
+if { ac_try='$JAVAC $JAVACFLAGS $JAVA_TEST'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } >/dev/null 2>&1; then
+ ac_cv_prog_javac_works=yes
+else
+ { { echo "$as_me:$LINENO: error: The Java compiler $JAVAC failed (see config.log, check the CLASSPATH?)" >&5
+echo "$as_me: error: The Java compiler $JAVAC failed (see config.log, check the CLASSPATH?)" >&2;}
+ { (exit 1); exit 1; }; }
+ echo "configure: failed program was:" >&5
+ cat $JAVA_TEST >&5
+fi
+rm -f $JAVA_TEST $CLASS_TEST
+
+fi
+echo "$as_me:$LINENO: result: $ac_cv_prog_javac_works" >&5
+echo "${ECHO_T}$ac_cv_prog_javac_works" >&6
+
+
+
+if test "x$JAVAPREFIX" = x; then
+ test "x$JAR" = x && for ac_prog in jar$EXEEXT
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_JAR+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$JAR"; then
+ ac_cv_prog_JAR="$JAR" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_JAR="$ac_prog"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+JAR=$ac_cv_prog_JAR
+if test -n "$JAR"; then
+ echo "$as_me:$LINENO: result: $JAR" >&5
+echo "${ECHO_T}$JAR" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ test -n "$JAR" && break
+done
+
+else
+ test "x$JAR" = x && for ac_prog in jar
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_prog_JAR+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$JAR"; then
+ ac_cv_prog_JAR="$JAR" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_JAR="$ac_prog"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+fi
+fi
+JAR=$ac_cv_prog_JAR
+if test -n "$JAR"; then
+ echo "$as_me:$LINENO: result: $JAR" >&5
+echo "${ECHO_T}$JAR" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ test -n "$JAR" && break
+done
+test -n "$JAR" || JAR="$JAVAPREFIX"
+
+fi
+test "x$JAR" = x && { { echo "$as_me:$LINENO: error: no acceptable jar program found in \$PATH" >&5
+echo "$as_me: error: no acceptable jar program found in \$PATH" >&2;}
+ { (exit 1); exit 1; }; }
+
+
+
+JNI_INCLUDE_DIRS=""
+
+test "x$JAVAC" = x && { { echo "$as_me:$LINENO: error: '$JAVAC' undefined" >&5
+echo "$as_me: error: '$JAVAC' undefined" >&2;}
+ { (exit 1); exit 1; }; }
+# Extract the first word of "$JAVAC", so it can be a program name with args.
+set dummy $JAVAC; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_path__ACJNI_JAVAC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ case $_ACJNI_JAVAC in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path__ACJNI_JAVAC="$_ACJNI_JAVAC" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path__ACJNI_JAVAC="$as_dir/$ac_word$ac_exec_ext"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+ test -z "$ac_cv_path__ACJNI_JAVAC" && ac_cv_path__ACJNI_JAVAC="$JAVAC"
+ ;;
+esac
+fi
+_ACJNI_JAVAC=$ac_cv_path__ACJNI_JAVAC
+
+if test -n "$_ACJNI_JAVAC"; then
+ echo "$as_me:$LINENO: result: $_ACJNI_JAVAC" >&5
+echo "${ECHO_T}$_ACJNI_JAVAC" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+test ! -x "$_ACJNI_JAVAC" && { { echo "$as_me:$LINENO: error: $JAVAC could not be found in path" >&5
+echo "$as_me: error: $JAVAC could not be found in path" >&2;}
+ { (exit 1); exit 1; }; }
+echo "$as_me:$LINENO: checking absolute path of $JAVAC" >&5
+echo $ECHO_N "checking absolute path of $JAVAC... $ECHO_C" >&6
+case "$_ACJNI_JAVAC" in
+/*) echo "$as_me:$LINENO: result: $_ACJNI_JAVAC" >&5
+echo "${ECHO_T}$_ACJNI_JAVAC" >&6;;
+*) { { echo "$as_me:$LINENO: error: $_ACJNI_JAVAC is not an absolute path name" >&5
+echo "$as_me: error: $_ACJNI_JAVAC is not an absolute path name" >&2;}
+ { (exit 1); exit 1; }; };;
+esac
+
+
+# find the include directory relative to the javac executable
+_cur=""$_ACJNI_JAVAC""
+while ls -ld "$_cur" 2>/dev/null | grep " -> " >/dev/null; do
+ echo "$as_me:$LINENO: checking symlink for $_cur" >&5
+echo $ECHO_N "checking symlink for $_cur... $ECHO_C" >&6
+ _slink=`ls -ld "$_cur" | sed 's/.* -> //'`
+ case "$_slink" in
+ /*) _cur="$_slink";;
+ # 'X' avoids triggering unwanted echo options.
+ *) _cur=`echo "X$_cur" | sed -e 's/^X//' -e 's:[^/]*$::'`"$_slink";;
+ esac
+ echo "$as_me:$LINENO: result: $_cur" >&5
+echo "${ECHO_T}$_cur" >&6
+done
+_ACJNI_FOLLOWED="$_cur"
+
+_JTOPDIR=`echo "$_ACJNI_FOLLOWED" | sed -e 's://*:/:g' -e 's:/[^/]*$::'`
+case "$host_os" in
+ darwin*) _JTOPDIR=`echo "$_JTOPDIR" | sed -e 's:/[^/]*$::'`
+ _JINC="$_JTOPDIR/Headers";;
+ *) _JINC="$_JTOPDIR/include";;
+esac
+
+# If we find jni.h in /usr/include, then it's not a java-only tree, so
+# don't add /usr/include or subdirectories to the list of includes.
+# An extra -I/usr/include can foul things up with newer gcc's.
+if test -f "$_JINC/jni.h"; then
+ if test "$_JINC" != "/usr/include"; then
+ JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JINC"
+ fi
+else
+ _JTOPDIR=`echo "$_JTOPDIR" | sed -e 's:/[^/]*$::'`
+ if test -f "$_JTOPDIR/include/jni.h"; then
+ if test "$_JTOPDIR" != "/usr"; then
+ JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JTOPDIR/include"
+ fi
+ else
+ { { echo "$as_me:$LINENO: error: cannot find java include files" >&5
+echo "$as_me: error: cannot find java include files" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+fi
+
+# get the likely subdirectories for system specific java includes
+if test "$_JTOPDIR" != "/usr"; then
+ case "$host_os" in
+ aix*) _JNI_INC_SUBDIRS="aix";;
+ bsdi*) _JNI_INC_SUBDIRS="bsdos";;
+ linux*) _JNI_INC_SUBDIRS="linux genunix";;
+ osf*) _JNI_INC_SUBDIRS="alpha";;
+ solaris*) _JNI_INC_SUBDIRS="solaris";;
+ *) _JNI_INC_SUBDIRS="genunix";;
+ esac
+fi
+
+# add any subdirectories that are present
+for _JINCSUBDIR in $_JNI_INC_SUBDIRS
+do
+ if test -d "$_JTOPDIR/include/$_JINCSUBDIR"; then
+ JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JTOPDIR/include/$_JINCSUBDIR"
+ fi
+done
+
+
+ for JNI_INCLUDE_DIR in $JNI_INCLUDE_DIRS
+ do
+ CPPFLAGS="$CPPFLAGS -I$JNI_INCLUDE_DIR"
+ done
+
+ ADDITIONAL_LANG="$ADDITIONAL_LANG java"
+ INSTALL_LIBS="$INSTALL_LIBS \$(libjso_target)"
+else
+ JAVAC=nojavac
+fi
+
+# Optional RPC client/server.
+if test "$db_cv_rpc" = "yes"; then
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_RPC 1
+_ACEOF
+
+
+
+
+ RPC_CLIENT_OBJS="\$(RPC_CLIENT_OBJS)"
+ ADDITIONAL_PROGS="berkeley_db_svc $ADDITIONAL_PROGS"
+
+ EMBEDIX_ECD_RPC="/usr/bin/berkeley_db_svc"
+
+ case "$host_os" in
+ hpux*)
+ echo "$as_me:$LINENO: checking for svc_run" >&5
+echo $ECHO_N "checking for svc_run... $ECHO_C" >&6
+if test "${ac_cv_func_svc_run+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char svc_run (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char svc_run ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub_svc_run) || defined (__stub___svc_run)
+choke me
+#else
+char (*f) () = svc_run;
+#endif
+#ifdef __cplusplus
+}
+#endif
+
+int
+main ()
+{
+return f != svc_run;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_func_svc_run=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_func_svc_run=no
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_func_svc_run" >&5
+echo "${ECHO_T}$ac_cv_func_svc_run" >&6
+if test $ac_cv_func_svc_run = yes; then
+ :
+else
+ echo "$as_me:$LINENO: checking for svc_run in -lnsl" >&5
+echo $ECHO_N "checking for svc_run in -lnsl... $ECHO_C" >&6
+if test "${ac_cv_lib_nsl_svc_run+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lnsl $LIBS"
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char svc_run ();
+int
+main ()
+{
+svc_run ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_lib_nsl_svc_run=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_lib_nsl_svc_run=no
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+echo "$as_me:$LINENO: result: $ac_cv_lib_nsl_svc_run" >&5
+echo "${ECHO_T}$ac_cv_lib_nsl_svc_run" >&6
+if test $ac_cv_lib_nsl_svc_run = yes; then
+ LIBS="-lnsl $LIBS"; LIBTSO_LIBS="-lnsl $LIBTSO_LIBS"
+fi
+
+fi
+;;
+ solaris*)
+ echo "$as_me:$LINENO: checking for svc_run" >&5
+echo $ECHO_N "checking for svc_run... $ECHO_C" >&6
+if test "${ac_cv_func_svc_run+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char svc_run (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char svc_run ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub_svc_run) || defined (__stub___svc_run)
+choke me
+#else
+char (*f) () = svc_run;
+#endif
+#ifdef __cplusplus
+}
+#endif
+
+int
+main ()
+{
+return f != svc_run;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_func_svc_run=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_func_svc_run=no
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_func_svc_run" >&5
+echo "${ECHO_T}$ac_cv_func_svc_run" >&6
+if test $ac_cv_func_svc_run = yes; then
+ :
+else
+
+echo "$as_me:$LINENO: checking for svc_run in -lnsl" >&5
+echo $ECHO_N "checking for svc_run in -lnsl... $ECHO_C" >&6
+if test "${ac_cv_lib_nsl_svc_run+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lnsl $LIBS"
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char svc_run ();
+int
+main ()
+{
+svc_run ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_lib_nsl_svc_run=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_lib_nsl_svc_run=no
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+echo "$as_me:$LINENO: result: $ac_cv_lib_nsl_svc_run" >&5
+echo "${ECHO_T}$ac_cv_lib_nsl_svc_run" >&6
+if test $ac_cv_lib_nsl_svc_run = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_LIBNSL 1
+_ACEOF
+
+ LIBS="-lnsl $LIBS"
+
+fi
+
+fi
+;;
+ esac
+fi
+
+
+if test "$db_cv_tcl" = "yes"; then
+ if test "$enable_shared" = "no"; then
+ { { echo "$as_me:$LINENO: error: Tcl requires shared libraries" >&5
+echo "$as_me: error: Tcl requires shared libraries" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+
+
+
+
+ if test "${ac_cv_c_tclconfig+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+
+
+ # First check to see if --with-tclconfig was specified.
+ if test "${with_tclconfig}" != no; then
+ if test -f "${with_tclconfig}/tclConfig.sh" ; then
+ ac_cv_c_tclconfig=`(cd ${with_tclconfig}; pwd)`
+ else
+ { { echo "$as_me:$LINENO: error: ${with_tclconfig} directory doesn't contain tclConfig.sh" >&5
+echo "$as_me: error: ${with_tclconfig} directory doesn't contain tclConfig.sh" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ fi
+
+ # check in a few common install locations
+ if test x"${ac_cv_c_tclconfig}" = x ; then
+ for i in `ls -d /usr/local/lib 2>/dev/null` ; do
+ if test -f "$i/tclConfig.sh" ; then
+ ac_cv_c_tclconfig=`(cd $i; pwd)`
+ break
+ fi
+ done
+ fi
+
+
+fi
+
+
+ if test x"${ac_cv_c_tclconfig}" = x ; then
+ TCL_BIN_DIR="# no Tcl configs found"
+ { { echo "$as_me:$LINENO: error: can't find Tcl configuration definitions" >&5
+echo "$as_me: error: can't find Tcl configuration definitions" >&2;}
+ { (exit 1); exit 1; }; }
+ else
+ TCL_BIN_DIR=${ac_cv_c_tclconfig}
+ fi
+
+
+ echo "$as_me:$LINENO: checking for existence of $TCL_BIN_DIR/tclConfig.sh" >&5
+echo $ECHO_N "checking for existence of $TCL_BIN_DIR/tclConfig.sh... $ECHO_C" >&6
+
+ if test -f "$TCL_BIN_DIR/tclConfig.sh" ; then
+ echo "$as_me:$LINENO: result: loading" >&5
+echo "${ECHO_T}loading" >&6
+ . $TCL_BIN_DIR/tclConfig.sh
+ else
+ echo "$as_me:$LINENO: result: file not found" >&5
+echo "${ECHO_T}file not found" >&6
+ fi
+
+ #
+ # The eval is required to do the TCL_DBGX substitution in the
+ # TCL_LIB_FILE variable
+ #
+ eval TCL_LIB_FILE="${TCL_LIB_FILE}"
+ eval TCL_LIB_FLAG="${TCL_LIB_FLAG}"
+ eval "TCL_LIB_SPEC=\"${TCL_LIB_SPEC}\""
+
+ #
+ # If the DB Tcl library isn't loaded with the Tcl spec and library
+ # flags on AIX, the resulting libdb_tcl-X.Y.so.0 will drop core at
+ # load time. [#4843] Furthermore, with Tcl 8.3, the link flags
+ # given by the Tcl spec are insufficient for our use. [#5779]
+ #
+ case "$host_os" in
+ aix4.[2-9].*)
+ LIBTSO_LIBS="$LIBTSO_LIBS $TCL_LIB_SPEC $TCL_LIB_FLAG"
+ LIBTSO_LIBS="$LIBTSO_LIBS -L$TCL_EXEC_PREFIX/lib -ltcl$TCL_VERSION";;
+ aix*)
+ LIBTSO_LIBS="$LIBTSO_LIBS $TCL_LIB_SPEC $TCL_LIB_FLAG";;
+ esac
+
+
+
+
+
+ TCL_TCLSH="${TCL_PREFIX}/bin/tclsh${TCL_VERSION}"
+
+
+ if test x"$TCL_PREFIX" != x && test -f "$TCL_PREFIX/include/tcl.h"; then
+ TCFLAGS="-I$TCL_PREFIX/include"
+ fi
+
+ case "$host" in
+ *-mingw*)
+ # To include tcl.h when compiling tcl_*.o
+ TCFLAGS="$TCFLAGS -DDB_TCL_SUPPORT"
+ ;;
+ esac
+
+ INSTALL_LIBS="${INSTALL_LIBS} \$(libtso_target)"
+fi
+
+# Optional crypto support.
+if test -d "$srcdir/../crypto"; then
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_CRYPTO 1
+_ACEOF
+
+
+
+ ADDITIONAL_OBJS="aes_method${o} crypto${o} mt19937db${o} rijndael-alg-fst${o} rijndael-api-fst${o} $ADDITIONAL_OBJS"
+fi
+
+# Optional DB 1.85 compatibility API.
+if test "$db_cv_compat185" = "yes"; then
+ ADDITIONAL_INCS="db_185.h $ADDITIONAL_INCS"
+ ADDITIONAL_OBJS="db185${o} $ADDITIONAL_OBJS"
+fi
+
+# Optional utilities.
+if test "$db_cv_dump185" = "yes"; then
+ ADDITIONAL_PROGS="db_dump185 $ADDITIONAL_PROGS"
+fi
+
+# Checks for compiler characteristics.
+echo "$as_me:$LINENO: checking for an ANSI C-conforming const" >&5
+echo $ECHO_N "checking for an ANSI C-conforming const... $ECHO_C" >&6
+if test "${ac_cv_c_const+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+/* FIXME: Include the comments suggested by Paul. */
+#ifndef __cplusplus
+ /* Ultrix mips cc rejects this. */
+ typedef int charset[2];
+ const charset x;
+ /* SunOS 4.1.1 cc rejects this. */
+ char const *const *ccp;
+ char **p;
+ /* NEC SVR4.0.2 mips cc rejects this. */
+ struct point {int x, y;};
+ static struct point const zero = {0,0};
+ /* AIX XL C 1.02.0.0 rejects this.
+ It does not let you subtract one const X* pointer from another in
+ an arm of an if-expression whose if-part is not a constant
+ expression */
+ const char *g = "string";
+ ccp = &g + (g ? g-g : 0);
+ /* HPUX 7.0 cc rejects these. */
+ ++ccp;
+ p = (char**) ccp;
+ ccp = (char const *const *) p;
+ { /* SCO 3.2v4 cc rejects this. */
+ char *t;
+ char const *s = 0 ? (char *) 0 : (char const *) 0;
+
+ *t++ = 0;
+ }
+ { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */
+ int x[] = {25, 17};
+ const int *foo = &x[0];
+ ++foo;
+ }
+ { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */
+ typedef const int *iptr;
+ iptr p = 0;
+ ++p;
+ }
+ { /* AIX XL C 1.02.0.0 rejects this saying
+ "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */
+ struct s { int j; const int *ap[3]; };
+ struct s *b; b->j = 5;
+ }
+ { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */
+ const int foo = 10;
+ }
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_c_const=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_c_const=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_c_const" >&5
+echo "${ECHO_T}$ac_cv_c_const" >&6
+if test $ac_cv_c_const = no; then
+
+cat >>confdefs.h <<\_ACEOF
+#define const
+_ACEOF
+
+fi
+
+
+# Checks for include files, structures, C types.
+echo "$as_me:$LINENO: checking whether stat file-mode macros are broken" >&5
+echo $ECHO_N "checking whether stat file-mode macros are broken... $ECHO_C" >&6
+if test "${ac_cv_header_stat_broken+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#if defined(S_ISBLK) && defined(S_IFDIR)
+# if S_ISBLK (S_IFDIR)
+You lose.
+# endif
+#endif
+
+#if defined(S_ISBLK) && defined(S_IFCHR)
+# if S_ISBLK (S_IFCHR)
+You lose.
+# endif
+#endif
+
+#if defined(S_ISLNK) && defined(S_IFREG)
+# if S_ISLNK (S_IFREG)
+You lose.
+# endif
+#endif
+
+#if defined(S_ISSOCK) && defined(S_IFREG)
+# if S_ISSOCK (S_IFREG)
+You lose.
+# endif
+#endif
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "You lose" >/dev/null 2>&1; then
+ ac_cv_header_stat_broken=yes
+else
+ ac_cv_header_stat_broken=no
+fi
+rm -f conftest*
+
+fi
+echo "$as_me:$LINENO: result: $ac_cv_header_stat_broken" >&5
+echo "${ECHO_T}$ac_cv_header_stat_broken" >&6
+if test $ac_cv_header_stat_broken = yes; then
+
+cat >>confdefs.h <<\_ACEOF
+#define STAT_MACROS_BROKEN 1
+_ACEOF
+
+fi
+
+echo "$as_me:$LINENO: checking whether time.h and sys/time.h may both be included" >&5
+echo $ECHO_N "checking whether time.h and sys/time.h may both be included... $ECHO_C" >&6
+if test "${ac_cv_header_time+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <sys/types.h>
+#include <sys/time.h>
+#include <time.h>
+
+int
+main ()
+{
+if ((struct tm *) 0)
+return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_header_time=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_header_time=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_header_time" >&5
+echo "${ECHO_T}$ac_cv_header_time" >&6
+if test $ac_cv_header_time = yes; then
+
+cat >>confdefs.h <<\_ACEOF
+#define TIME_WITH_SYS_TIME 1
+_ACEOF
+
+fi
+
+
+
+
+
+
+ac_header_dirent=no
+for ac_hdr in dirent.h sys/ndir.h sys/dir.h ndir.h; do
+ as_ac_Header=`echo "ac_cv_header_dirent_$ac_hdr" | $as_tr_sh`
+echo "$as_me:$LINENO: checking for $ac_hdr that defines DIR" >&5
+echo $ECHO_N "checking for $ac_hdr that defines DIR... $ECHO_C" >&6
+if eval "test \"\${$as_ac_Header+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <sys/types.h>
+#include <$ac_hdr>
+
+int
+main ()
+{
+if ((DIR *) 0)
+return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ eval "$as_ac_Header=yes"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+eval "$as_ac_Header=no"
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6
+if test `eval echo '${'$as_ac_Header'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_hdr" | $as_tr_cpp` 1
+_ACEOF
+
+ac_header_dirent=$ac_hdr; break
+fi
+
+done
+# Two versions of opendir et al. are in -ldir and -lx on SCO Xenix.
+if test $ac_header_dirent = dirent.h; then
+ echo "$as_me:$LINENO: checking for library containing opendir" >&5
+echo $ECHO_N "checking for library containing opendir... $ECHO_C" >&6
+if test "${ac_cv_search_opendir+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_func_search_save_LIBS=$LIBS
+ac_cv_search_opendir=no
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char opendir ();
+int
+main ()
+{
+opendir ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_search_opendir="none required"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+if test "$ac_cv_search_opendir" = no; then
+ for ac_lib in dir; do
+ LIBS="-l$ac_lib $ac_func_search_save_LIBS"
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char opendir ();
+int
+main ()
+{
+opendir ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_search_opendir="-l$ac_lib"
+break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+ done
+fi
+LIBS=$ac_func_search_save_LIBS
+fi
+echo "$as_me:$LINENO: result: $ac_cv_search_opendir" >&5
+echo "${ECHO_T}$ac_cv_search_opendir" >&6
+if test "$ac_cv_search_opendir" != no; then
+ test "$ac_cv_search_opendir" = "none required" || LIBS="$ac_cv_search_opendir $LIBS"
+
+fi
+
+else
+ echo "$as_me:$LINENO: checking for library containing opendir" >&5
+echo $ECHO_N "checking for library containing opendir... $ECHO_C" >&6
+if test "${ac_cv_search_opendir+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_func_search_save_LIBS=$LIBS
+ac_cv_search_opendir=no
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char opendir ();
+int
+main ()
+{
+opendir ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_search_opendir="none required"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+if test "$ac_cv_search_opendir" = no; then
+ for ac_lib in x; do
+ LIBS="-l$ac_lib $ac_func_search_save_LIBS"
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char opendir ();
+int
+main ()
+{
+opendir ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_search_opendir="-l$ac_lib"
+break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+ done
+fi
+LIBS=$ac_func_search_save_LIBS
+fi
+echo "$as_me:$LINENO: result: $ac_cv_search_opendir" >&5
+echo "${ECHO_T}$ac_cv_search_opendir" >&6
+if test "$ac_cv_search_opendir" != no; then
+ test "$ac_cv_search_opendir" = "none required" || LIBS="$ac_cv_search_opendir $LIBS"
+
+fi
+
+fi
+
+
+
+for ac_header in sys/select.h sys/time.h
+do
+as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh`
+if eval "test \"\${$as_ac_Header+set}\" = set"; then
+ echo "$as_me:$LINENO: checking for $ac_header" >&5
+echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6
+if eval "test \"\${$as_ac_Header+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6
+else
+ # Is the header compilable?
+echo "$as_me:$LINENO: checking $ac_header usability" >&5
+echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+#include <$ac_header>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_header_compiler=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_header_compiler=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+echo "$as_me:$LINENO: result: $ac_header_compiler" >&5
+echo "${ECHO_T}$ac_header_compiler" >&6
+
+# Is the header present?
+echo "$as_me:$LINENO: checking $ac_header presence" >&5
+echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <$ac_header>
+_ACEOF
+if { (eval echo "$as_me:$LINENO: \"$ac_cpp conftest.$ac_ext\"") >&5
+ (eval $ac_cpp conftest.$ac_ext) 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null; then
+ if test -s conftest.err; then
+ ac_cpp_err=$ac_c_preproc_warn_flag
+ else
+ ac_cpp_err=
+ fi
+else
+ ac_cpp_err=yes
+fi
+if test -z "$ac_cpp_err"; then
+ ac_header_preproc=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_preproc=no
+fi
+rm -f conftest.err conftest.$ac_ext
+echo "$as_me:$LINENO: result: $ac_header_preproc" >&5
+echo "${ECHO_T}$ac_header_preproc" >&6
+
+# So? What about this header?
+case $ac_header_compiler:$ac_header_preproc in
+ yes:no )
+ { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5
+echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5
+echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;}
+ (
+ cat <<\_ASBOX
+## ------------------------------------ ##
+## Report this to bug-autoconf@gnu.org. ##
+## ------------------------------------ ##
+_ASBOX
+ ) |
+ sed "s/^/$as_me: WARNING: /" >&2
+ ;;
+ no:yes )
+ { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5
+echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5
+echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5
+echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;}
+ (
+ cat <<\_ASBOX
+## ------------------------------------ ##
+## Report this to bug-autoconf@gnu.org. ##
+## ------------------------------------ ##
+_ASBOX
+ ) |
+ sed "s/^/$as_me: WARNING: /" >&2
+ ;;
+esac
+echo "$as_me:$LINENO: checking for $ac_header" >&5
+echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6
+if eval "test \"\${$as_ac_Header+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ eval "$as_ac_Header=$ac_header_preproc"
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_Header'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_Header'}'`" >&6
+
+fi
+if test `eval echo '${'$as_ac_Header'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+echo "$as_me:$LINENO: checking for struct stat.st_blksize" >&5
+echo $ECHO_N "checking for struct stat.st_blksize... $ECHO_C" >&6
+if test "${ac_cv_member_struct_stat_st_blksize+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static struct stat ac_aggr;
+if (ac_aggr.st_blksize)
+return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_member_struct_stat_st_blksize=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static struct stat ac_aggr;
+if (sizeof ac_aggr.st_blksize)
+return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_member_struct_stat_st_blksize=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_member_struct_stat_st_blksize=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_member_struct_stat_st_blksize" >&5
+echo "${ECHO_T}$ac_cv_member_struct_stat_st_blksize" >&6
+if test $ac_cv_member_struct_stat_st_blksize = yes; then
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_STRUCT_STAT_ST_BLKSIZE 1
+_ACEOF
+
+
+fi
+
+
+
+# We need to know the sizes of various objects on this system.
+# We don't use the SIZEOF_XXX values created by autoconf.
+echo "$as_me:$LINENO: checking for char" >&5
+echo $ECHO_N "checking for char... $ECHO_C" >&6
+if test "${ac_cv_type_char+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((char *) 0)
+ return 0;
+if (sizeof (char))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_char=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_char=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_char" >&5
+echo "${ECHO_T}$ac_cv_type_char" >&6
+
+echo "$as_me:$LINENO: checking size of char" >&5
+echo $ECHO_N "checking size of char... $ECHO_C" >&6
+if test "${ac_cv_sizeof_char+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test "$ac_cv_type_char" = yes; then
+ # The cast to unsigned long works around a bug in the HP C Compiler
+ # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+ # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+ # This bug is HP SR number 8606223364.
+ if test "$cross_compiling" = yes; then
+ # Depending upon the size, compute the lo and hi bounds.
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (char))) >= 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=0 ac_mid=0
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (char))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo=`expr $ac_mid + 1`
+ if test $ac_lo -le $ac_mid; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid + 1`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (char))) < 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=-1 ac_mid=-1
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (char))) >= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_hi=`expr '(' $ac_mid ')' - 1`
+ if test $ac_mid -le $ac_hi; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo= ac_hi=
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+# Binary search between lo and hi bounds.
+while test "x$ac_lo" != "x$ac_hi"; do
+ ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo`
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (char))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo=`expr '(' $ac_mid ')' + 1`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+done
+case $ac_lo in
+?*) ac_cv_sizeof_char=$ac_lo;;
+'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (char), 77
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (char), 77
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; } ;;
+esac
+else
+ if test "$cross_compiling" = yes; then
+ { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+long longval () { return (long) (sizeof (char)); }
+unsigned long ulongval () { return (long) (sizeof (char)); }
+#include <stdio.h>
+#include <stdlib.h>
+int
+main ()
+{
+
+ FILE *f = fopen ("conftest.val", "w");
+ if (! f)
+ exit (1);
+ if (((long) (sizeof (char))) < 0)
+ {
+ long i = longval ();
+ if (i != ((long) (sizeof (char))))
+ exit (1);
+ fprintf (f, "%ld\n", i);
+ }
+ else
+ {
+ unsigned long i = ulongval ();
+ if (i != ((long) (sizeof (char))))
+ exit (1);
+ fprintf (f, "%lu\n", i);
+ }
+ exit (ferror (f) || fclose (f) != 0);
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_sizeof_char=`cat conftest.val`
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+{ { echo "$as_me:$LINENO: error: cannot compute sizeof (char), 77
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (char), 77
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+fi
+rm -f conftest.val
+else
+ ac_cv_sizeof_char=0
+fi
+fi
+echo "$as_me:$LINENO: result: $ac_cv_sizeof_char" >&5
+echo "${ECHO_T}$ac_cv_sizeof_char" >&6
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_CHAR $ac_cv_sizeof_char
+_ACEOF
+
+
+echo "$as_me:$LINENO: checking for unsigned char" >&5
+echo $ECHO_N "checking for unsigned char... $ECHO_C" >&6
+if test "${ac_cv_type_unsigned_char+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((unsigned char *) 0)
+ return 0;
+if (sizeof (unsigned char))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_unsigned_char=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_unsigned_char=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_unsigned_char" >&5
+echo "${ECHO_T}$ac_cv_type_unsigned_char" >&6
+
+echo "$as_me:$LINENO: checking size of unsigned char" >&5
+echo $ECHO_N "checking size of unsigned char... $ECHO_C" >&6
+if test "${ac_cv_sizeof_unsigned_char+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test "$ac_cv_type_unsigned_char" = yes; then
+ # The cast to unsigned long works around a bug in the HP C Compiler
+ # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+ # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+ # This bug is HP SR number 8606223364.
+ if test "$cross_compiling" = yes; then
+ # Depending upon the size, compute the lo and hi bounds.
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (unsigned char))) >= 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=0 ac_mid=0
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (unsigned char))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo=`expr $ac_mid + 1`
+ if test $ac_lo -le $ac_mid; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid + 1`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (unsigned char))) < 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=-1 ac_mid=-1
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (unsigned char))) >= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_hi=`expr '(' $ac_mid ')' - 1`
+ if test $ac_mid -le $ac_hi; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo= ac_hi=
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+# Binary search between lo and hi bounds.
+while test "x$ac_lo" != "x$ac_hi"; do
+ ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo`
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (unsigned char))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo=`expr '(' $ac_mid ')' + 1`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+done
+case $ac_lo in
+?*) ac_cv_sizeof_unsigned_char=$ac_lo;;
+'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned char), 77
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (unsigned char), 77
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; } ;;
+esac
+else
+ if test "$cross_compiling" = yes; then
+ { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+long longval () { return (long) (sizeof (unsigned char)); }
+unsigned long ulongval () { return (long) (sizeof (unsigned char)); }
+#include <stdio.h>
+#include <stdlib.h>
+int
+main ()
+{
+
+ FILE *f = fopen ("conftest.val", "w");
+ if (! f)
+ exit (1);
+ if (((long) (sizeof (unsigned char))) < 0)
+ {
+ long i = longval ();
+ if (i != ((long) (sizeof (unsigned char))))
+ exit (1);
+ fprintf (f, "%ld\n", i);
+ }
+ else
+ {
+ unsigned long i = ulongval ();
+ if (i != ((long) (sizeof (unsigned char))))
+ exit (1);
+ fprintf (f, "%lu\n", i);
+ }
+ exit (ferror (f) || fclose (f) != 0);
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_sizeof_unsigned_char=`cat conftest.val`
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+{ { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned char), 77
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (unsigned char), 77
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+fi
+rm -f conftest.val
+else
+ ac_cv_sizeof_unsigned_char=0
+fi
+fi
+echo "$as_me:$LINENO: result: $ac_cv_sizeof_unsigned_char" >&5
+echo "${ECHO_T}$ac_cv_sizeof_unsigned_char" >&6
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_UNSIGNED_CHAR $ac_cv_sizeof_unsigned_char
+_ACEOF
+
+
+echo "$as_me:$LINENO: checking for short" >&5
+echo $ECHO_N "checking for short... $ECHO_C" >&6
+if test "${ac_cv_type_short+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((short *) 0)
+ return 0;
+if (sizeof (short))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_short=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_short=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_short" >&5
+echo "${ECHO_T}$ac_cv_type_short" >&6
+
+echo "$as_me:$LINENO: checking size of short" >&5
+echo $ECHO_N "checking size of short... $ECHO_C" >&6
+if test "${ac_cv_sizeof_short+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test "$ac_cv_type_short" = yes; then
+ # The cast to unsigned long works around a bug in the HP C Compiler
+ # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+ # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+ # This bug is HP SR number 8606223364.
+ if test "$cross_compiling" = yes; then
+ # Depending upon the size, compute the lo and hi bounds.
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (short))) >= 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=0 ac_mid=0
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (short))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo=`expr $ac_mid + 1`
+ if test $ac_lo -le $ac_mid; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid + 1`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (short))) < 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=-1 ac_mid=-1
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (short))) >= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_hi=`expr '(' $ac_mid ')' - 1`
+ if test $ac_mid -le $ac_hi; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo= ac_hi=
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+# Binary search between lo and hi bounds.
+while test "x$ac_lo" != "x$ac_hi"; do
+ ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo`
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (short))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo=`expr '(' $ac_mid ')' + 1`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+done
+case $ac_lo in
+?*) ac_cv_sizeof_short=$ac_lo;;
+'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (short), 77
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (short), 77
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; } ;;
+esac
+else
+ if test "$cross_compiling" = yes; then
+ { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+long longval () { return (long) (sizeof (short)); }
+unsigned long ulongval () { return (long) (sizeof (short)); }
+#include <stdio.h>
+#include <stdlib.h>
+int
+main ()
+{
+
+ FILE *f = fopen ("conftest.val", "w");
+ if (! f)
+ exit (1);
+ if (((long) (sizeof (short))) < 0)
+ {
+ long i = longval ();
+ if (i != ((long) (sizeof (short))))
+ exit (1);
+ fprintf (f, "%ld\n", i);
+ }
+ else
+ {
+ unsigned long i = ulongval ();
+ if (i != ((long) (sizeof (short))))
+ exit (1);
+ fprintf (f, "%lu\n", i);
+ }
+ exit (ferror (f) || fclose (f) != 0);
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_sizeof_short=`cat conftest.val`
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+{ { echo "$as_me:$LINENO: error: cannot compute sizeof (short), 77
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (short), 77
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+fi
+rm -f conftest.val
+else
+ ac_cv_sizeof_short=0
+fi
+fi
+echo "$as_me:$LINENO: result: $ac_cv_sizeof_short" >&5
+echo "${ECHO_T}$ac_cv_sizeof_short" >&6
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_SHORT $ac_cv_sizeof_short
+_ACEOF
+
+
+echo "$as_me:$LINENO: checking for unsigned short" >&5
+echo $ECHO_N "checking for unsigned short... $ECHO_C" >&6
+if test "${ac_cv_type_unsigned_short+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((unsigned short *) 0)
+ return 0;
+if (sizeof (unsigned short))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_unsigned_short=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_unsigned_short=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_unsigned_short" >&5
+echo "${ECHO_T}$ac_cv_type_unsigned_short" >&6
+
+echo "$as_me:$LINENO: checking size of unsigned short" >&5
+echo $ECHO_N "checking size of unsigned short... $ECHO_C" >&6
+if test "${ac_cv_sizeof_unsigned_short+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test "$ac_cv_type_unsigned_short" = yes; then
+ # The cast to unsigned long works around a bug in the HP C Compiler
+ # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+ # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+ # This bug is HP SR number 8606223364.
+ if test "$cross_compiling" = yes; then
+ # Depending upon the size, compute the lo and hi bounds.
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (unsigned short))) >= 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=0 ac_mid=0
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (unsigned short))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo=`expr $ac_mid + 1`
+ if test $ac_lo -le $ac_mid; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid + 1`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (unsigned short))) < 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=-1 ac_mid=-1
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (unsigned short))) >= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_hi=`expr '(' $ac_mid ')' - 1`
+ if test $ac_mid -le $ac_hi; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo= ac_hi=
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+# Binary search between lo and hi bounds.
+while test "x$ac_lo" != "x$ac_hi"; do
+ ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo`
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (unsigned short))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo=`expr '(' $ac_mid ')' + 1`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+done
+case $ac_lo in
+?*) ac_cv_sizeof_unsigned_short=$ac_lo;;
+'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned short), 77
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (unsigned short), 77
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; } ;;
+esac
+else
+ if test "$cross_compiling" = yes; then
+ { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+long longval () { return (long) (sizeof (unsigned short)); }
+unsigned long ulongval () { return (long) (sizeof (unsigned short)); }
+#include <stdio.h>
+#include <stdlib.h>
+int
+main ()
+{
+
+ FILE *f = fopen ("conftest.val", "w");
+ if (! f)
+ exit (1);
+ if (((long) (sizeof (unsigned short))) < 0)
+ {
+ long i = longval ();
+ if (i != ((long) (sizeof (unsigned short))))
+ exit (1);
+ fprintf (f, "%ld\n", i);
+ }
+ else
+ {
+ unsigned long i = ulongval ();
+ if (i != ((long) (sizeof (unsigned short))))
+ exit (1);
+ fprintf (f, "%lu\n", i);
+ }
+ exit (ferror (f) || fclose (f) != 0);
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_sizeof_unsigned_short=`cat conftest.val`
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+{ { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned short), 77
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (unsigned short), 77
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+fi
+rm -f conftest.val
+else
+ ac_cv_sizeof_unsigned_short=0
+fi
+fi
+echo "$as_me:$LINENO: result: $ac_cv_sizeof_unsigned_short" >&5
+echo "${ECHO_T}$ac_cv_sizeof_unsigned_short" >&6
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_UNSIGNED_SHORT $ac_cv_sizeof_unsigned_short
+_ACEOF
+
+
+echo "$as_me:$LINENO: checking for int" >&5
+echo $ECHO_N "checking for int... $ECHO_C" >&6
+if test "${ac_cv_type_int+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((int *) 0)
+ return 0;
+if (sizeof (int))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_int=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_int=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_int" >&5
+echo "${ECHO_T}$ac_cv_type_int" >&6
+
+echo "$as_me:$LINENO: checking size of int" >&5
+echo $ECHO_N "checking size of int... $ECHO_C" >&6
+if test "${ac_cv_sizeof_int+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test "$ac_cv_type_int" = yes; then
+ # The cast to unsigned long works around a bug in the HP C Compiler
+ # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+ # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+ # This bug is HP SR number 8606223364.
+ if test "$cross_compiling" = yes; then
+ # Depending upon the size, compute the lo and hi bounds.
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (int))) >= 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=0 ac_mid=0
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (int))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo=`expr $ac_mid + 1`
+ if test $ac_lo -le $ac_mid; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid + 1`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (int))) < 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=-1 ac_mid=-1
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (int))) >= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_hi=`expr '(' $ac_mid ')' - 1`
+ if test $ac_mid -le $ac_hi; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo= ac_hi=
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+# Binary search between lo and hi bounds.
+while test "x$ac_lo" != "x$ac_hi"; do
+ ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo`
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (int))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo=`expr '(' $ac_mid ')' + 1`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+done
+case $ac_lo in
+?*) ac_cv_sizeof_int=$ac_lo;;
+'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (int), 77
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (int), 77
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; } ;;
+esac
+else
+ if test "$cross_compiling" = yes; then
+ { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+long longval () { return (long) (sizeof (int)); }
+unsigned long ulongval () { return (long) (sizeof (int)); }
+#include <stdio.h>
+#include <stdlib.h>
+int
+main ()
+{
+
+ FILE *f = fopen ("conftest.val", "w");
+ if (! f)
+ exit (1);
+ if (((long) (sizeof (int))) < 0)
+ {
+ long i = longval ();
+ if (i != ((long) (sizeof (int))))
+ exit (1);
+ fprintf (f, "%ld\n", i);
+ }
+ else
+ {
+ unsigned long i = ulongval ();
+ if (i != ((long) (sizeof (int))))
+ exit (1);
+ fprintf (f, "%lu\n", i);
+ }
+ exit (ferror (f) || fclose (f) != 0);
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_sizeof_int=`cat conftest.val`
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+{ { echo "$as_me:$LINENO: error: cannot compute sizeof (int), 77
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (int), 77
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+fi
+rm -f conftest.val
+else
+ ac_cv_sizeof_int=0
+fi
+fi
+echo "$as_me:$LINENO: result: $ac_cv_sizeof_int" >&5
+echo "${ECHO_T}$ac_cv_sizeof_int" >&6
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_INT $ac_cv_sizeof_int
+_ACEOF
+
+
+echo "$as_me:$LINENO: checking for unsigned int" >&5
+echo $ECHO_N "checking for unsigned int... $ECHO_C" >&6
+if test "${ac_cv_type_unsigned_int+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((unsigned int *) 0)
+ return 0;
+if (sizeof (unsigned int))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_unsigned_int=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_unsigned_int=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_unsigned_int" >&5
+echo "${ECHO_T}$ac_cv_type_unsigned_int" >&6
+
+echo "$as_me:$LINENO: checking size of unsigned int" >&5
+echo $ECHO_N "checking size of unsigned int... $ECHO_C" >&6
+if test "${ac_cv_sizeof_unsigned_int+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test "$ac_cv_type_unsigned_int" = yes; then
+ # The cast to unsigned long works around a bug in the HP C Compiler
+ # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+ # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+ # This bug is HP SR number 8606223364.
+ if test "$cross_compiling" = yes; then
+ # Depending upon the size, compute the lo and hi bounds.
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (unsigned int))) >= 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=0 ac_mid=0
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (unsigned int))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo=`expr $ac_mid + 1`
+ if test $ac_lo -le $ac_mid; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid + 1`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (unsigned int))) < 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=-1 ac_mid=-1
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (unsigned int))) >= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_hi=`expr '(' $ac_mid ')' - 1`
+ if test $ac_mid -le $ac_hi; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo= ac_hi=
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+# Binary search between lo and hi bounds.
+while test "x$ac_lo" != "x$ac_hi"; do
+ ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo`
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (unsigned int))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo=`expr '(' $ac_mid ')' + 1`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+done
+case $ac_lo in
+?*) ac_cv_sizeof_unsigned_int=$ac_lo;;
+'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned int), 77
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (unsigned int), 77
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; } ;;
+esac
+else
+ if test "$cross_compiling" = yes; then
+ { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+long longval () { return (long) (sizeof (unsigned int)); }
+unsigned long ulongval () { return (long) (sizeof (unsigned int)); }
+#include <stdio.h>
+#include <stdlib.h>
+int
+main ()
+{
+
+ FILE *f = fopen ("conftest.val", "w");
+ if (! f)
+ exit (1);
+ if (((long) (sizeof (unsigned int))) < 0)
+ {
+ long i = longval ();
+ if (i != ((long) (sizeof (unsigned int))))
+ exit (1);
+ fprintf (f, "%ld\n", i);
+ }
+ else
+ {
+ unsigned long i = ulongval ();
+ if (i != ((long) (sizeof (unsigned int))))
+ exit (1);
+ fprintf (f, "%lu\n", i);
+ }
+ exit (ferror (f) || fclose (f) != 0);
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_sizeof_unsigned_int=`cat conftest.val`
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+{ { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned int), 77
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (unsigned int), 77
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+fi
+rm -f conftest.val
+else
+ ac_cv_sizeof_unsigned_int=0
+fi
+fi
+echo "$as_me:$LINENO: result: $ac_cv_sizeof_unsigned_int" >&5
+echo "${ECHO_T}$ac_cv_sizeof_unsigned_int" >&6
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_UNSIGNED_INT $ac_cv_sizeof_unsigned_int
+_ACEOF
+
+
+echo "$as_me:$LINENO: checking for long" >&5
+echo $ECHO_N "checking for long... $ECHO_C" >&6
+if test "${ac_cv_type_long+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((long *) 0)
+ return 0;
+if (sizeof (long))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_long=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_long=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_long" >&5
+echo "${ECHO_T}$ac_cv_type_long" >&6
+
+echo "$as_me:$LINENO: checking size of long" >&5
+echo $ECHO_N "checking size of long... $ECHO_C" >&6
+if test "${ac_cv_sizeof_long+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test "$ac_cv_type_long" = yes; then
+ # The cast to unsigned long works around a bug in the HP C Compiler
+ # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+ # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+ # This bug is HP SR number 8606223364.
+ if test "$cross_compiling" = yes; then
+ # Depending upon the size, compute the lo and hi bounds.
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (long))) >= 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=0 ac_mid=0
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (long))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo=`expr $ac_mid + 1`
+ if test $ac_lo -le $ac_mid; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid + 1`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (long))) < 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=-1 ac_mid=-1
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (long))) >= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_hi=`expr '(' $ac_mid ')' - 1`
+ if test $ac_mid -le $ac_hi; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo= ac_hi=
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+# Binary search between lo and hi bounds.
+while test "x$ac_lo" != "x$ac_hi"; do
+ ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo`
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (long))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo=`expr '(' $ac_mid ')' + 1`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+done
+case $ac_lo in
+?*) ac_cv_sizeof_long=$ac_lo;;
+'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (long), 77
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (long), 77
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; } ;;
+esac
+else
+ if test "$cross_compiling" = yes; then
+ { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+long longval () { return (long) (sizeof (long)); }
+unsigned long ulongval () { return (long) (sizeof (long)); }
+#include <stdio.h>
+#include <stdlib.h>
+int
+main ()
+{
+
+ FILE *f = fopen ("conftest.val", "w");
+ if (! f)
+ exit (1);
+ if (((long) (sizeof (long))) < 0)
+ {
+ long i = longval ();
+ if (i != ((long) (sizeof (long))))
+ exit (1);
+ fprintf (f, "%ld\n", i);
+ }
+ else
+ {
+ unsigned long i = ulongval ();
+ if (i != ((long) (sizeof (long))))
+ exit (1);
+ fprintf (f, "%lu\n", i);
+ }
+ exit (ferror (f) || fclose (f) != 0);
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_sizeof_long=`cat conftest.val`
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+{ { echo "$as_me:$LINENO: error: cannot compute sizeof (long), 77
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (long), 77
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+fi
+rm -f conftest.val
+else
+ ac_cv_sizeof_long=0
+fi
+fi
+echo "$as_me:$LINENO: result: $ac_cv_sizeof_long" >&5
+echo "${ECHO_T}$ac_cv_sizeof_long" >&6
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_LONG $ac_cv_sizeof_long
+_ACEOF
+
+
+echo "$as_me:$LINENO: checking for unsigned long" >&5
+echo $ECHO_N "checking for unsigned long... $ECHO_C" >&6
+if test "${ac_cv_type_unsigned_long+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((unsigned long *) 0)
+ return 0;
+if (sizeof (unsigned long))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_unsigned_long=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_unsigned_long=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_unsigned_long" >&5
+echo "${ECHO_T}$ac_cv_type_unsigned_long" >&6
+
+echo "$as_me:$LINENO: checking size of unsigned long" >&5
+echo $ECHO_N "checking size of unsigned long... $ECHO_C" >&6
+if test "${ac_cv_sizeof_unsigned_long+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test "$ac_cv_type_unsigned_long" = yes; then
+ # The cast to unsigned long works around a bug in the HP C Compiler
+ # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+ # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+ # This bug is HP SR number 8606223364.
+ if test "$cross_compiling" = yes; then
+ # Depending upon the size, compute the lo and hi bounds.
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (unsigned long))) >= 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=0 ac_mid=0
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (unsigned long))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo=`expr $ac_mid + 1`
+ if test $ac_lo -le $ac_mid; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid + 1`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (unsigned long))) < 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=-1 ac_mid=-1
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (unsigned long))) >= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_hi=`expr '(' $ac_mid ')' - 1`
+ if test $ac_mid -le $ac_hi; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo= ac_hi=
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+# Binary search between lo and hi bounds.
+while test "x$ac_lo" != "x$ac_hi"; do
+ ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo`
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (unsigned long))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo=`expr '(' $ac_mid ')' + 1`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+done
+case $ac_lo in
+?*) ac_cv_sizeof_unsigned_long=$ac_lo;;
+'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned long), 77
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (unsigned long), 77
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; } ;;
+esac
+else
+ if test "$cross_compiling" = yes; then
+ { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+long longval () { return (long) (sizeof (unsigned long)); }
+unsigned long ulongval () { return (long) (sizeof (unsigned long)); }
+#include <stdio.h>
+#include <stdlib.h>
+int
+main ()
+{
+
+ FILE *f = fopen ("conftest.val", "w");
+ if (! f)
+ exit (1);
+ if (((long) (sizeof (unsigned long))) < 0)
+ {
+ long i = longval ();
+ if (i != ((long) (sizeof (unsigned long))))
+ exit (1);
+ fprintf (f, "%ld\n", i);
+ }
+ else
+ {
+ unsigned long i = ulongval ();
+ if (i != ((long) (sizeof (unsigned long))))
+ exit (1);
+ fprintf (f, "%lu\n", i);
+ }
+ exit (ferror (f) || fclose (f) != 0);
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_sizeof_unsigned_long=`cat conftest.val`
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+{ { echo "$as_me:$LINENO: error: cannot compute sizeof (unsigned long), 77
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (unsigned long), 77
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+fi
+rm -f conftest.val
+else
+ ac_cv_sizeof_unsigned_long=0
+fi
+fi
+echo "$as_me:$LINENO: result: $ac_cv_sizeof_unsigned_long" >&5
+echo "${ECHO_T}$ac_cv_sizeof_unsigned_long" >&6
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_UNSIGNED_LONG $ac_cv_sizeof_unsigned_long
+_ACEOF
+
+
+echo "$as_me:$LINENO: checking for size_t" >&5
+echo $ECHO_N "checking for size_t... $ECHO_C" >&6
+if test "${ac_cv_type_size_t+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((size_t *) 0)
+ return 0;
+if (sizeof (size_t))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_size_t=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_size_t=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_size_t" >&5
+echo "${ECHO_T}$ac_cv_type_size_t" >&6
+
+echo "$as_me:$LINENO: checking size of size_t" >&5
+echo $ECHO_N "checking size of size_t... $ECHO_C" >&6
+if test "${ac_cv_sizeof_size_t+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test "$ac_cv_type_size_t" = yes; then
+ # The cast to unsigned long works around a bug in the HP C Compiler
+ # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+ # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+ # This bug is HP SR number 8606223364.
+ if test "$cross_compiling" = yes; then
+ # Depending upon the size, compute the lo and hi bounds.
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (size_t))) >= 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=0 ac_mid=0
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (size_t))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo=`expr $ac_mid + 1`
+ if test $ac_lo -le $ac_mid; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid + 1`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (size_t))) < 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=-1 ac_mid=-1
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (size_t))) >= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_hi=`expr '(' $ac_mid ')' - 1`
+ if test $ac_mid -le $ac_hi; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo= ac_hi=
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+# Binary search between lo and hi bounds.
+while test "x$ac_lo" != "x$ac_hi"; do
+ ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo`
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (size_t))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo=`expr '(' $ac_mid ')' + 1`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+done
+case $ac_lo in
+?*) ac_cv_sizeof_size_t=$ac_lo;;
+'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (size_t), 77
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (size_t), 77
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; } ;;
+esac
+else
+ if test "$cross_compiling" = yes; then
+ { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+long longval () { return (long) (sizeof (size_t)); }
+unsigned long ulongval () { return (long) (sizeof (size_t)); }
+#include <stdio.h>
+#include <stdlib.h>
+int
+main ()
+{
+
+ FILE *f = fopen ("conftest.val", "w");
+ if (! f)
+ exit (1);
+ if (((long) (sizeof (size_t))) < 0)
+ {
+ long i = longval ();
+ if (i != ((long) (sizeof (size_t))))
+ exit (1);
+ fprintf (f, "%ld\n", i);
+ }
+ else
+ {
+ unsigned long i = ulongval ();
+ if (i != ((long) (sizeof (size_t))))
+ exit (1);
+ fprintf (f, "%lu\n", i);
+ }
+ exit (ferror (f) || fclose (f) != 0);
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_sizeof_size_t=`cat conftest.val`
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+{ { echo "$as_me:$LINENO: error: cannot compute sizeof (size_t), 77
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (size_t), 77
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+fi
+rm -f conftest.val
+else
+ ac_cv_sizeof_size_t=0
+fi
+fi
+echo "$as_me:$LINENO: result: $ac_cv_sizeof_size_t" >&5
+echo "${ECHO_T}$ac_cv_sizeof_size_t" >&6
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_SIZE_T $ac_cv_sizeof_size_t
+_ACEOF
+
+
+echo "$as_me:$LINENO: checking for char *" >&5
+echo $ECHO_N "checking for char *... $ECHO_C" >&6
+if test "${ac_cv_type_char_p+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((char * *) 0)
+ return 0;
+if (sizeof (char *))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_char_p=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_char_p=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_char_p" >&5
+echo "${ECHO_T}$ac_cv_type_char_p" >&6
+
+echo "$as_me:$LINENO: checking size of char *" >&5
+echo $ECHO_N "checking size of char *... $ECHO_C" >&6
+if test "${ac_cv_sizeof_char_p+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test "$ac_cv_type_char_p" = yes; then
+ # The cast to unsigned long works around a bug in the HP C Compiler
+ # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+ # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+ # This bug is HP SR number 8606223364.
+ if test "$cross_compiling" = yes; then
+ # Depending upon the size, compute the lo and hi bounds.
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (char *))) >= 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=0 ac_mid=0
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (char *))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo=`expr $ac_mid + 1`
+ if test $ac_lo -le $ac_mid; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid + 1`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (char *))) < 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=-1 ac_mid=-1
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (char *))) >= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_hi=`expr '(' $ac_mid ')' - 1`
+ if test $ac_mid -le $ac_hi; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo= ac_hi=
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+# Binary search between lo and hi bounds.
+while test "x$ac_lo" != "x$ac_hi"; do
+ ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo`
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long) (sizeof (char *))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_lo=`expr '(' $ac_mid ')' + 1`
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+done
+case $ac_lo in
+?*) ac_cv_sizeof_char_p=$ac_lo;;
+'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (char *), 77
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (char *), 77
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; } ;;
+esac
+else
+ if test "$cross_compiling" = yes; then
+ { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+long longval () { return (long) (sizeof (char *)); }
+unsigned long ulongval () { return (long) (sizeof (char *)); }
+#include <stdio.h>
+#include <stdlib.h>
+int
+main ()
+{
+
+ FILE *f = fopen ("conftest.val", "w");
+ if (! f)
+ exit (1);
+ if (((long) (sizeof (char *))) < 0)
+ {
+ long i = longval ();
+ if (i != ((long) (sizeof (char *))))
+ exit (1);
+ fprintf (f, "%ld\n", i);
+ }
+ else
+ {
+ unsigned long i = ulongval ();
+ if (i != ((long) (sizeof (char *))))
+ exit (1);
+ fprintf (f, "%lu\n", i);
+ }
+ exit (ferror (f) || fclose (f) != 0);
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_sizeof_char_p=`cat conftest.val`
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+{ { echo "$as_me:$LINENO: error: cannot compute sizeof (char *), 77
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (char *), 77
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+fi
+rm -f conftest.val
+else
+ ac_cv_sizeof_char_p=0
+fi
+fi
+echo "$as_me:$LINENO: result: $ac_cv_sizeof_char_p" >&5
+echo "${ECHO_T}$ac_cv_sizeof_char_p" >&6
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_CHAR_P $ac_cv_sizeof_char_p
+_ACEOF
+
+
+
+# We require off_t and size_t, and we don't try to substitute our own
+# if we can't find them.
+echo "$as_me:$LINENO: checking for off_t" >&5
+echo $ECHO_N "checking for off_t... $ECHO_C" >&6
+if test "${ac_cv_type_off_t+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((off_t *) 0)
+ return 0;
+if (sizeof (off_t))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_off_t=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_off_t=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_off_t" >&5
+echo "${ECHO_T}$ac_cv_type_off_t" >&6
+
+if test "$ac_cv_type_off_t" = no; then
+ { { echo "$as_me:$LINENO: error: No off_t type." >&5
+echo "$as_me: error: No off_t type." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+echo "$as_me:$LINENO: checking for size_t" >&5
+echo $ECHO_N "checking for size_t... $ECHO_C" >&6
+if test "${ac_cv_type_size_t+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((size_t *) 0)
+ return 0;
+if (sizeof (size_t))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_size_t=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_size_t=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_size_t" >&5
+echo "${ECHO_T}$ac_cv_type_size_t" >&6
+
+if test "$ac_cv_type_size_t" = no; then
+ { { echo "$as_me:$LINENO: error: No size_t type." >&5
+echo "$as_me: error: No size_t type." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+# We look for u_char, u_short, u_int, u_long -- if we can't find them,
+# we create our own.
+
+echo "$as_me:$LINENO: checking for u_char" >&5
+echo $ECHO_N "checking for u_char... $ECHO_C" >&6
+if test "${ac_cv_type_u_char+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((u_char *) 0)
+ return 0;
+if (sizeof (u_char))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_u_char=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_u_char=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_u_char" >&5
+echo "${ECHO_T}$ac_cv_type_u_char" >&6
+
+if test "$ac_cv_type_u_char" = no; then
+ u_char_decl="typedef unsigned char u_char;"
+fi
+
+
+echo "$as_me:$LINENO: checking for u_short" >&5
+echo $ECHO_N "checking for u_short... $ECHO_C" >&6
+if test "${ac_cv_type_u_short+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((u_short *) 0)
+ return 0;
+if (sizeof (u_short))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_u_short=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_u_short=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_u_short" >&5
+echo "${ECHO_T}$ac_cv_type_u_short" >&6
+
+if test "$ac_cv_type_u_short" = no; then
+ u_short_decl="typedef unsigned short u_short;"
+fi
+
+
+echo "$as_me:$LINENO: checking for u_int" >&5
+echo $ECHO_N "checking for u_int... $ECHO_C" >&6
+if test "${ac_cv_type_u_int+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((u_int *) 0)
+ return 0;
+if (sizeof (u_int))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_u_int=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_u_int=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_u_int" >&5
+echo "${ECHO_T}$ac_cv_type_u_int" >&6
+
+if test "$ac_cv_type_u_int" = no; then
+ u_int_decl="typedef unsigned int u_int;"
+fi
+
+
+echo "$as_me:$LINENO: checking for u_long" >&5
+echo $ECHO_N "checking for u_long... $ECHO_C" >&6
+if test "${ac_cv_type_u_long+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((u_long *) 0)
+ return 0;
+if (sizeof (u_long))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_u_long=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_u_long=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_u_long" >&5
+echo "${ECHO_T}$ac_cv_type_u_long" >&6
+
+if test "$ac_cv_type_u_long" = no; then
+ u_long_decl="typedef unsigned long u_long;"
+fi
+
+
+echo "$as_me:$LINENO: checking for u_int8_t" >&5
+echo $ECHO_N "checking for u_int8_t... $ECHO_C" >&6
+if test "${ac_cv_type_u_int8_t+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((u_int8_t *) 0)
+ return 0;
+if (sizeof (u_int8_t))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_u_int8_t=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_u_int8_t=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_u_int8_t" >&5
+echo "${ECHO_T}$ac_cv_type_u_int8_t" >&6
+
+if test "$ac_cv_type_u_int8_t" = no; then
+
+ case "1" in
+ "$ac_cv_sizeof_unsigned_int")
+ u_int8_decl="typedef unsigned int u_int8_t;";;
+ "$ac_cv_sizeof_unsigned_char")
+ u_int8_decl="typedef unsigned char u_int8_t;";;
+ "$ac_cv_sizeof_unsigned_short")
+ u_int8_decl="typedef unsigned short u_int8_t;";;
+ "$ac_cv_sizeof_unsigned_long")
+ u_int8_decl="typedef unsigned long u_int8_t;";;
+ *)
+ { { echo "$as_me:$LINENO: error: No unsigned 1-byte integral type" >&5
+echo "$as_me: error: No unsigned 1-byte integral type" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+fi
+
+
+echo "$as_me:$LINENO: checking for u_int16_t" >&5
+echo $ECHO_N "checking for u_int16_t... $ECHO_C" >&6
+if test "${ac_cv_type_u_int16_t+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((u_int16_t *) 0)
+ return 0;
+if (sizeof (u_int16_t))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_u_int16_t=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_u_int16_t=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_u_int16_t" >&5
+echo "${ECHO_T}$ac_cv_type_u_int16_t" >&6
+
+if test "$ac_cv_type_u_int16_t" = no; then
+
+ case "2" in
+ "$ac_cv_sizeof_unsigned_int")
+ u_int16_decl="typedef unsigned int u_int16_t;";;
+ "$ac_cv_sizeof_unsigned_char")
+ u_int16_decl="typedef unsigned char u_int16_t;";;
+ "$ac_cv_sizeof_unsigned_short")
+ u_int16_decl="typedef unsigned short u_int16_t;";;
+ "$ac_cv_sizeof_unsigned_long")
+ u_int16_decl="typedef unsigned long u_int16_t;";;
+ *)
+ { { echo "$as_me:$LINENO: error: No unsigned 2-byte integral type" >&5
+echo "$as_me: error: No unsigned 2-byte integral type" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+fi
+
+
+echo "$as_me:$LINENO: checking for int16_t" >&5
+echo $ECHO_N "checking for int16_t... $ECHO_C" >&6
+if test "${ac_cv_type_int16_t+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((int16_t *) 0)
+ return 0;
+if (sizeof (int16_t))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_int16_t=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_int16_t=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_int16_t" >&5
+echo "${ECHO_T}$ac_cv_type_int16_t" >&6
+
+if test "$ac_cv_type_int16_t" = no; then
+
+ case "2" in
+ "$ac_cv_sizeof_int")
+ int16_decl="typedef int int16_t;";;
+ "$ac_cv_sizeof_char")
+ int16_decl="typedef char int16_t;";;
+ "$ac_cv_sizeof_short")
+ int16_decl="typedef short int16_t;";;
+ "$ac_cv_sizeof_long")
+ int16_decl="typedef long int16_t;";;
+ *)
+ { { echo "$as_me:$LINENO: error: No signed 2-byte integral type" >&5
+echo "$as_me: error: No signed 2-byte integral type" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+fi
+
+
+echo "$as_me:$LINENO: checking for u_int32_t" >&5
+echo $ECHO_N "checking for u_int32_t... $ECHO_C" >&6
+if test "${ac_cv_type_u_int32_t+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((u_int32_t *) 0)
+ return 0;
+if (sizeof (u_int32_t))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_u_int32_t=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_u_int32_t=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_u_int32_t" >&5
+echo "${ECHO_T}$ac_cv_type_u_int32_t" >&6
+
+if test "$ac_cv_type_u_int32_t" = no; then
+
+ case "4" in
+ "$ac_cv_sizeof_unsigned_int")
+ u_int32_decl="typedef unsigned int u_int32_t;";;
+ "$ac_cv_sizeof_unsigned_char")
+ u_int32_decl="typedef unsigned char u_int32_t;";;
+ "$ac_cv_sizeof_unsigned_short")
+ u_int32_decl="typedef unsigned short u_int32_t;";;
+ "$ac_cv_sizeof_unsigned_long")
+ u_int32_decl="typedef unsigned long u_int32_t;";;
+ *)
+ { { echo "$as_me:$LINENO: error: No unsigned 4-byte integral type" >&5
+echo "$as_me: error: No unsigned 4-byte integral type" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+fi
+
+
+echo "$as_me:$LINENO: checking for int32_t" >&5
+echo $ECHO_N "checking for int32_t... $ECHO_C" >&6
+if test "${ac_cv_type_int32_t+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((int32_t *) 0)
+ return 0;
+if (sizeof (int32_t))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_int32_t=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_int32_t=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_int32_t" >&5
+echo "${ECHO_T}$ac_cv_type_int32_t" >&6
+
+if test "$ac_cv_type_int32_t" = no; then
+
+ case "4" in
+ "$ac_cv_sizeof_int")
+ int32_decl="typedef int int32_t;";;
+ "$ac_cv_sizeof_char")
+ int32_decl="typedef char int32_t;";;
+ "$ac_cv_sizeof_short")
+ int32_decl="typedef short int32_t;";;
+ "$ac_cv_sizeof_long")
+ int32_decl="typedef long int32_t;";;
+ *)
+ { { echo "$as_me:$LINENO: error: No signed 4-byte integral type" >&5
+echo "$as_me: error: No signed 4-byte integral type" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+fi
+
+# Check for ssize_t -- if none exists, find a signed integral type that's
+# the same size as a size_t.
+
+echo "$as_me:$LINENO: checking for ssize_t" >&5
+echo $ECHO_N "checking for ssize_t... $ECHO_C" >&6
+if test "${ac_cv_type_ssize_t+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((ssize_t *) 0)
+ return 0;
+if (sizeof (ssize_t))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_ssize_t=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_ssize_t=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_ssize_t" >&5
+echo "${ECHO_T}$ac_cv_type_ssize_t" >&6
+
+if test "$ac_cv_type_ssize_t" = no; then
+
+ case "$ac_cv_sizeof_size_t" in
+ "$ac_cv_sizeof_int")
+ ssize_t_decl="typedef int ssize_t;";;
+ "$ac_cv_sizeof_char")
+ ssize_t_decl="typedef char ssize_t;";;
+ "$ac_cv_sizeof_short")
+ ssize_t_decl="typedef short ssize_t;";;
+ "$ac_cv_sizeof_long")
+ ssize_t_decl="typedef long ssize_t;";;
+ *)
+ { { echo "$as_me:$LINENO: error: No signed $ac_cv_sizeof_size_t-byte integral type" >&5
+echo "$as_me: error: No signed $ac_cv_sizeof_size_t-byte integral type" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+fi
+
+# Find the largest integral type.
+
+echo "$as_me:$LINENO: checking for unsigned long long" >&5
+echo $ECHO_N "checking for unsigned long long... $ECHO_C" >&6
+if test "${ac_cv_type_unsigned_long_long+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <stdio.h>
+
+int
+main ()
+{
+if ((unsigned long long *) 0)
+ return 0;
+if (sizeof (unsigned long long))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_unsigned_long_long=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_type_unsigned_long_long=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_type_unsigned_long_long" >&5
+echo "${ECHO_T}$ac_cv_type_unsigned_long_long" >&6
+
+if test "$ac_cv_type_unsigned_long_long" = no; then
+ db_align_t_decl="typedef unsigned long db_align_t;"
+else
+ db_align_t_decl="typedef unsigned long long db_align_t;"
+fi
+
+# Find an integral type which is the same size as a pointer.
+
+
+ case "$ac_cv_sizeof_char_p" in
+ "$ac_cv_sizeof_unsigned_int")
+ db_alignp_t_decl="typedef unsigned int db_alignp_t;";;
+ "$ac_cv_sizeof_unsigned_char")
+ db_alignp_t_decl="typedef unsigned char db_alignp_t;";;
+ "$ac_cv_sizeof_unsigned_short")
+ db_alignp_t_decl="typedef unsigned short db_alignp_t;";;
+ "$ac_cv_sizeof_unsigned_long")
+ db_alignp_t_decl="typedef unsigned long db_alignp_t;";;
+ *)
+ { { echo "$as_me:$LINENO: error: No unsigned $ac_cv_sizeof_char_p-byte integral type" >&5
+echo "$as_me: error: No unsigned $ac_cv_sizeof_char_p-byte integral type" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+
+
+
+echo "$as_me:$LINENO: checking for ANSI C exit success/failure values" >&5
+echo $ECHO_N "checking for ANSI C exit success/failure values... $ECHO_C" >&6
+if test "${db_cv_exit_defines+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdlib.h>
+int
+main ()
+{
+return (EXIT_SUCCESS);
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_exit_defines=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+db_cv_exit_defines=no
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $db_cv_exit_defines" >&5
+echo "${ECHO_T}$db_cv_exit_defines" >&6
+if test "$db_cv_exit_defines" = yes; then
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_EXIT_SUCCESS 1
+_ACEOF
+
+
+
+fi
+
+# Test for various functions/libraries that the test and example programs use:
+# sched_yield function
+# pthreads, socket and math libraries
+echo "$as_me:$LINENO: checking for sched_yield" >&5
+echo $ECHO_N "checking for sched_yield... $ECHO_C" >&6
+if test "${ac_cv_func_sched_yield+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char sched_yield (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char sched_yield ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub_sched_yield) || defined (__stub___sched_yield)
+choke me
+#else
+char (*f) () = sched_yield;
+#endif
+#ifdef __cplusplus
+}
+#endif
+
+int
+main ()
+{
+return f != sched_yield;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_func_sched_yield=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_func_sched_yield=no
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $ac_cv_func_sched_yield" >&5
+echo "${ECHO_T}$ac_cv_func_sched_yield" >&6
+if test $ac_cv_func_sched_yield = yes; then
+ :
+else
+ echo "$as_me:$LINENO: checking for library containing sched_yield" >&5
+echo $ECHO_N "checking for library containing sched_yield... $ECHO_C" >&6
+if test "${ac_cv_search_sched_yield+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_func_search_save_LIBS=$LIBS
+ac_cv_search_sched_yield=no
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char sched_yield ();
+int
+main ()
+{
+sched_yield ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_search_sched_yield="none required"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+if test "$ac_cv_search_sched_yield" = no; then
+ for ac_lib in rt; do
+ LIBS="-l$ac_lib $ac_func_search_save_LIBS"
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char sched_yield ();
+int
+main ()
+{
+sched_yield ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_search_sched_yield="-l$ac_lib"
+break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+ done
+fi
+LIBS=$ac_func_search_save_LIBS
+fi
+echo "$as_me:$LINENO: result: $ac_cv_search_sched_yield" >&5
+echo "${ECHO_T}$ac_cv_search_sched_yield" >&6
+if test "$ac_cv_search_sched_yield" != no; then
+ test "$ac_cv_search_sched_yield" = "none required" || LIBS="$ac_cv_search_sched_yield $LIBS"
+ LOAD_LIBS="$LOAD_LIBS -lrt"
+fi
+
+fi
+
+
+# XXX
+# We can't check for pthreads in the same way we did the test for sched_yield
+# because the Solaris C library includes pthread interfaces which are not
+# thread-safe. For that reason we always add -lpthread if we find a pthread
+# library. Also we can't depend on any specific call existing (pthread_create,
+# for example), as it may be #defined in an include file -- OSF/1 (Tru64) has
+# this problem.
+echo "$as_me:$LINENO: checking for main in -lpthread" >&5
+echo $ECHO_N "checking for main in -lpthread... $ECHO_C" >&6
+if test "${ac_cv_lib_pthread_main+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lpthread $LIBS"
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+main ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_lib_pthread_main=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_lib_pthread_main=no
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+echo "$as_me:$LINENO: result: $ac_cv_lib_pthread_main" >&5
+echo "${ECHO_T}$ac_cv_lib_pthread_main" >&6
+if test $ac_cv_lib_pthread_main = yes; then
+ LOAD_LIBS="$LOAD_LIBS -lpthread"
+fi
+ac_cv_lib_pthread=ac_cv_lib_pthread_main
+
+
+# XXX
+# We could be more exact about whether these libraries are needed, but we don't
+# bother -- if they exist, we load them.
+echo "$as_me:$LINENO: checking for main in -lm" >&5
+echo $ECHO_N "checking for main in -lm... $ECHO_C" >&6
+if test "${ac_cv_lib_m_main+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lm $LIBS"
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+main ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_lib_m_main=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_lib_m_main=no
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+echo "$as_me:$LINENO: result: $ac_cv_lib_m_main" >&5
+echo "${ECHO_T}$ac_cv_lib_m_main" >&6
+if test $ac_cv_lib_m_main = yes; then
+ LOAD_LIBS="$LOAD_LIBS -lm"
+fi
+ac_cv_lib_m=ac_cv_lib_m_main
+
+echo "$as_me:$LINENO: checking for main in -lsocket" >&5
+echo $ECHO_N "checking for main in -lsocket... $ECHO_C" >&6
+if test "${ac_cv_lib_socket_main+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lsocket $LIBS"
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+main ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_lib_socket_main=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_lib_socket_main=no
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+echo "$as_me:$LINENO: result: $ac_cv_lib_socket_main" >&5
+echo "${ECHO_T}$ac_cv_lib_socket_main" >&6
+if test $ac_cv_lib_socket_main = yes; then
+ LOAD_LIBS="$LOAD_LIBS -lsocket"
+fi
+ac_cv_lib_socket=ac_cv_lib_socket_main
+
+echo "$as_me:$LINENO: checking for main in -lnsl" >&5
+echo $ECHO_N "checking for main in -lnsl... $ECHO_C" >&6
+if test "${ac_cv_lib_nsl_main+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lnsl $LIBS"
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+main ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_lib_nsl_main=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ac_cv_lib_nsl_main=no
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+echo "$as_me:$LINENO: result: $ac_cv_lib_nsl_main" >&5
+echo "${ECHO_T}$ac_cv_lib_nsl_main" >&6
+if test $ac_cv_lib_nsl_main = yes; then
+ LOAD_LIBS="$LOAD_LIBS -lnsl"
+fi
+ac_cv_lib_nsl=ac_cv_lib_nsl_main
+
+
+# Check for mutexes.
+# We do this here because it changes $LIBS.
+
+
+# Mutexes we don't test for, but want the #defines to exist for
+# other ports.
+
+
+
+
+
+
+
+echo "$as_me:$LINENO: checking for mutexes" >&5
+echo $ECHO_N "checking for mutexes... $ECHO_C" >&6
+if test "${db_cv_mutex+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+
+db_cv_mutex=no
+
+orig_libs=$LIBS
+
+# User-specified POSIX or UI mutexes.
+#
+# There are two different reasons to specify mutexes: First, the application
+# is already using one type of mutex and doesn't want to mix-and-match (for
+# example, on Solaris, which has POSIX, UI and LWP mutexes). Second, the
+# applications POSIX pthreads mutexes don't support inter-process locking,
+# but the application wants to use them anyway (for example, current Linux
+# and *BSD systems).
+#
+# If we're on Solaris, we insist that -lthread or -lpthread be used. The
+# problem is the Solaris C library has UI/POSIX interface stubs, but they're
+# broken, configuring them for inter-process mutexes doesn't return an error,
+# but it doesn't work either. Otherwise, we try first without the library
+# and then with it: there's some information that SCO/UnixWare/OpenUNIX needs
+# this. [#4950]
+#
+# Test for LWP threads before testing for UI/POSIX threads, we prefer them
+# on Solaris. There's a bug in SunOS 5.7 where applications get pwrite, not
+# pwrite64, if they load the C library before the appropriate threads library,
+# e.g., tclsh using dlopen to load the DB library. By using LWP threads we
+# avoid answering lots of user questions, not to mention the bugs.
+if test "$db_cv_posixmutexes" = yes; then
+ case "$host_os" in
+ solaris*)
+ db_cv_mutex="posix_library_only";;
+ *)
+ db_cv_mutex="posix_only";;
+ esac
+fi
+
+if test "$db_cv_uimutexes" = yes; then
+ case "$host_os" in
+ solaris*)
+ db_cv_mutex="ui_library_only";;
+ *)
+ db_cv_mutex="ui_only";;
+ esac
+fi
+
+# LWP threads: _lwp_XXX
+if test "$db_cv_mutex" = no; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <synch.h>
+int
+main ()
+{
+
+ static lwp_mutex_t mi = SHAREDMUTEX;
+ static lwp_cond_t ci = SHAREDCV;
+ lwp_mutex_t mutex = mi;
+ lwp_cond_t cond = ci;
+ exit (
+ _lwp_mutex_lock(&mutex) ||
+ _lwp_mutex_unlock(&mutex));
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="Solaris/lwp"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+
+# UI threads: thr_XXX
+#
+# Try with and without the -lthread library.
+if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "ui_only"; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <thread.h>
+#include <synch.h>
+int
+main ()
+{
+
+ mutex_t mutex;
+ cond_t cond;
+ int type = USYNC_PROCESS;
+ exit (
+ mutex_init(&mutex, type, NULL) ||
+ cond_init(&cond, type, NULL) ||
+ mutex_lock(&mutex) ||
+ mutex_unlock(&mutex));
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="UI/threads"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+if test "$db_cv_mutex" = no -o \
+ "$db_cv_mutex" = "ui_only" -o "$db_cv_mutex" = "ui_library_only"; then
+LIBS="$LIBS -lthread"
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <thread.h>
+#include <synch.h>
+int
+main ()
+{
+
+ mutex_t mutex;
+ cond_t cond;
+ int type = USYNC_PROCESS;
+ exit (
+ mutex_init(&mutex, type, NULL) ||
+ cond_init(&cond, type, NULL) ||
+ mutex_lock(&mutex) ||
+ mutex_unlock(&mutex));
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="UI/threads/library"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+LIBS="$orig_libs"
+fi
+if test "$db_cv_mutex" = "ui_only" -o "$db_cv_mutex" = "ui_library_only"; then
+ { { echo "$as_me:$LINENO: error: unable to find UI mutex interfaces" >&5
+echo "$as_me: error: unable to find UI mutex interfaces" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+# POSIX.1 pthreads: pthread_XXX
+#
+# Try with and without the -lpthread library. If the user specified we use
+# POSIX pthreads mutexes, and we fail to find the full interface, try and
+# configure for just intra-process support.
+if test "$db_cv_pthreadsmutexes" = yes; then
+ if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "posix_only"; then
+
+if test "$cross_compiling" = yes; then
+ db_cv_mutex="no"
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <pthread.h>
+main() {
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+ pthread_condattr_t condattr;
+ pthread_mutexattr_t mutexattr;
+ exit (
+ pthread_condattr_init(&condattr) ||
+ pthread_condattr_setpshared(&condattr, PTHREAD_PROCESS_SHARED) ||
+ pthread_mutexattr_init(&mutexattr) ||
+ pthread_mutexattr_setpshared(&mutexattr, PTHREAD_PROCESS_SHARED) ||
+ pthread_cond_init(&cond, &condattr) ||
+ pthread_mutex_init(&mutex, &mutexattr) ||
+ pthread_mutex_lock(&mutex) ||
+ pthread_mutex_unlock(&mutex) ||
+ pthread_mutex_destroy(&mutex) ||
+ pthread_cond_destroy(&cond) ||
+ pthread_condattr_destroy(&condattr) ||
+ pthread_mutexattr_destroy(&mutexattr));
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex=""POSIX/pthreads""
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+ fi
+ if test "$db_cv_mutex" = no -o \
+ "$db_cv_mutex" = "posix_only" -o \
+ "$db_cv_mutex" = "posix_library_only"; then
+ LIBS="$LIBS -lpthread"
+
+if test "$cross_compiling" = yes; then
+ db_cv_mutex="no"
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <pthread.h>
+main() {
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+ pthread_condattr_t condattr;
+ pthread_mutexattr_t mutexattr;
+ exit (
+ pthread_condattr_init(&condattr) ||
+ pthread_condattr_setpshared(&condattr, PTHREAD_PROCESS_SHARED) ||
+ pthread_mutexattr_init(&mutexattr) ||
+ pthread_mutexattr_setpshared(&mutexattr, PTHREAD_PROCESS_SHARED) ||
+ pthread_cond_init(&cond, &condattr) ||
+ pthread_mutex_init(&mutex, &mutexattr) ||
+ pthread_mutex_lock(&mutex) ||
+ pthread_mutex_unlock(&mutex) ||
+ pthread_mutex_destroy(&mutex) ||
+ pthread_cond_destroy(&cond) ||
+ pthread_condattr_destroy(&condattr) ||
+ pthread_mutexattr_destroy(&mutexattr));
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex=""POSIX/pthreads/library""
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+ LIBS="$orig_libs"
+ fi
+ if test "$db_cv_mutex" = "posix_only"; then
+
+if test "$cross_compiling" = yes; then
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <pthread.h>
+int
+main ()
+{
+
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+ pthread_condattr_t condattr;
+ pthread_mutexattr_t mutexattr;
+ exit (
+ pthread_condattr_init(&condattr) ||
+ pthread_mutexattr_init(&mutexattr) ||
+ pthread_cond_init(&cond, &condattr) ||
+ pthread_mutex_init(&mutex, &mutexattr) ||
+ pthread_mutex_lock(&mutex) ||
+ pthread_mutex_unlock(&mutex) ||
+ pthread_mutex_destroy(&mutex) ||
+ pthread_cond_destroy(&cond) ||
+ pthread_condattr_destroy(&condattr) ||
+ pthread_mutexattr_destroy(&mutexattr));
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex=""POSIX/pthreads/private""
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <pthread.h>
+main() {
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+ pthread_condattr_t condattr;
+ pthread_mutexattr_t mutexattr;
+ exit (
+ pthread_condattr_init(&condattr) ||
+ pthread_mutexattr_init(&mutexattr) ||
+ pthread_cond_init(&cond, &condattr) ||
+ pthread_mutex_init(&mutex, &mutexattr) ||
+ pthread_mutex_lock(&mutex) ||
+ pthread_mutex_unlock(&mutex) ||
+ pthread_mutex_destroy(&mutex) ||
+ pthread_cond_destroy(&cond) ||
+ pthread_condattr_destroy(&condattr) ||
+ pthread_mutexattr_destroy(&mutexattr));
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex=""POSIX/pthreads/private""
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+ fi
+ if test "$db_cv_mutex" = "posix_only" -o \
+ "$db_cv_mutex" = "posix_library_only"; then
+ LIBS="$LIBS -lpthread"
+
+if test "$cross_compiling" = yes; then
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <pthread.h>
+int
+main ()
+{
+
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+ pthread_condattr_t condattr;
+ pthread_mutexattr_t mutexattr;
+ exit (
+ pthread_condattr_init(&condattr) ||
+ pthread_mutexattr_init(&mutexattr) ||
+ pthread_cond_init(&cond, &condattr) ||
+ pthread_mutex_init(&mutex, &mutexattr) ||
+ pthread_mutex_lock(&mutex) ||
+ pthread_mutex_unlock(&mutex) ||
+ pthread_mutex_destroy(&mutex) ||
+ pthread_cond_destroy(&cond) ||
+ pthread_condattr_destroy(&condattr) ||
+ pthread_mutexattr_destroy(&mutexattr));
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex=""POSIX/pthreads/library/private""
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <pthread.h>
+main() {
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+ pthread_condattr_t condattr;
+ pthread_mutexattr_t mutexattr;
+ exit (
+ pthread_condattr_init(&condattr) ||
+ pthread_mutexattr_init(&mutexattr) ||
+ pthread_cond_init(&cond, &condattr) ||
+ pthread_mutex_init(&mutex, &mutexattr) ||
+ pthread_mutex_lock(&mutex) ||
+ pthread_mutex_unlock(&mutex) ||
+ pthread_mutex_destroy(&mutex) ||
+ pthread_cond_destroy(&cond) ||
+ pthread_condattr_destroy(&condattr) ||
+ pthread_mutexattr_destroy(&mutexattr));
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex=""POSIX/pthreads/library/private""
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+ LIBS="$orig_libs"
+ fi
+
+ if test "$db_cv_mutex" = "posix_only" -o \
+ "$db_cv_mutex" = "posix_library_only"; then
+ { { echo "$as_me:$LINENO: error: unable to find POSIX 1003.1 mutex interfaces" >&5
+echo "$as_me: error: unable to find POSIX 1003.1 mutex interfaces" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+fi
+
+# msemaphore: HPPA only
+# Try HPPA before general msem test, it needs special alignment.
+if test "$db_cv_mutex" = no; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/mman.h>
+int
+main ()
+{
+
+#if defined(__hppa__)
+ typedef msemaphore tsl_t;
+ msemaphore x;
+ msem_init(&x, 0);
+ msem_lock(&x, 0);
+ msem_unlock(&x, 0);
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="HP/msem_init"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+
+# msemaphore: AIX, OSF/1
+if test "$db_cv_mutex" = no; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <sys/mman.h>
+int
+main ()
+{
+
+ typedef msemaphore tsl_t;
+ msemaphore x;
+ msem_init(&x, 0);
+ msem_lock(&x, 0);
+ msem_unlock(&x, 0);
+ exit(0);
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="UNIX/msem_init"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+
+# ReliantUNIX
+if test "$db_cv_mutex" = no; then
+LIBS="$LIBS -lmproc"
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <ulocks.h>
+int
+main ()
+{
+
+ typedef spinlock_t tsl_t;
+ spinlock_t x;
+ initspin(&x, 1);
+ cspinlock(&x);
+ spinunlock(&x);
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="ReliantUNIX/initspin"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+LIBS="$orig_libs"
+fi
+
+# SCO: UnixWare has threads in libthread, but OpenServer doesn't.
+if test "$db_cv_mutex" = no; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+#if defined(__USLC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="SCO/x86/cc-assembly"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+
+# abilock_t: SGI
+if test "$db_cv_mutex" = no; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <abi_mutex.h>
+int
+main ()
+{
+
+ typedef abilock_t tsl_t;
+ abilock_t x;
+ init_lock(&x);
+ acquire_lock(&x);
+ release_lock(&x);
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="SGI/init_lock"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+
+# sema_t: Solaris
+# The sema_XXX calls do not work on Solaris 5.5. I see no reason to ever
+# turn this test on, unless we find some other platform that uses the old
+# POSIX.1 interfaces. (I plan to move directly to pthreads on Solaris.)
+if test "$db_cv_mutex" = DOESNT_WORK; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <synch.h>
+int
+main ()
+{
+
+ typedef sema_t tsl_t;
+ sema_t x;
+ sema_init(&x, 1, USYNC_PROCESS, NULL);
+ sema_wait(&x);
+ sema_post(&x);
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="UNIX/sema_init"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+
+# _lock_try/_lock_clear: Solaris
+# On Solaris systems without Pthread or UI mutex interfaces, DB uses the
+# undocumented _lock_try _lock_clear function calls instead of either the
+# sema_trywait(3T) or sema_wait(3T) function calls. This is because of
+# problems in those interfaces in some releases of the Solaris C library.
+if test "$db_cv_mutex" = no; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/machlock.h>
+int
+main ()
+{
+
+ typedef lock_t tsl_t;
+ lock_t x;
+ _lock_try(&x);
+ _lock_clear(&x);
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="Solaris/_lock_try"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+
+# _check_lock/_clear_lock: AIX
+if test "$db_cv_mutex" = no; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/atomic_op.h>
+int
+main ()
+{
+
+ int x;
+ _check_lock(&x,0,1);
+ _clear_lock(&x,0);
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="AIX/_check_lock"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+
+# Alpha/gcc: OSF/1
+if test "$db_cv_mutex" = no; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+#if defined(__alpha) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="ALPHA/gcc-assembly"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+
+# ARM/gcc: Linux
+if test "$db_cv_mutex" = no; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+#if defined(__arm__) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="ARM/gcc-assembly"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+
+# MIPS/gcc: Linux
+if test "$db_cv_mutex" = no; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+#if (defined(__mips) || defined(__mips__)) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="MIPS/gcc-assembly"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+
+# PaRisc/gcc: HP/UX
+if test "$db_cv_mutex" = no; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+#if (defined(__hppa) || defined(__hppa__)) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="HPPA/gcc-assembly"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+
+# PPC/gcc:
+# Test for Apple first, it requires slightly different assembly.
+if test "$db_cv_mutex" = no; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+#if (defined(__powerpc__) || defined(__ppc__)) && defined(__GNUC__) && defined(__APPLE__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="PPC_APPLE/gcc-assembly"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+if test "$db_cv_mutex" = no; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+#if (defined(__powerpc__) || defined(__ppc__)) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="PPC_GENERIC/gcc-assembly"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+
+# Sparc/gcc: SunOS, Solaris
+if test "$db_cv_mutex" = no; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+#if defined(__sparc__) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="Sparc/gcc-assembly"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+
+# 68K/gcc: SunOS
+if test "$db_cv_mutex" = no; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+#if (defined(mc68020) || defined(sun3)) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="68K/gcc-assembly"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+
+# x86/gcc: FreeBSD, NetBSD, BSD/OS, Linux
+if test "$db_cv_mutex" = no; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+#if (defined(i386) || defined(__i386__)) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="x86/gcc-assembly"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+
+# S390/gcc: Linux
+if test "$db_cv_mutex" = no; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+#if defined(__s390__) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="S390/gcc-assembly"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+
+# AMD64/gcc: FreeBSD, NetBSD, BSD/OS, Linux
+if test "$db_cv_mutex" = no; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+#if (defined(x86_64) || defined(__x86_64__)) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="x86_64/gcc-assembly"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+
+# x86-64/gcc: FreeBSD, NetBSD, BSD/OS, Linux
+if test "$db_cv_mutex" = no; then
+if test "$cross_compiling" = yes; then
+ { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+main(){
+#if defined(__x86_64__)
+#if defined(__GNUC__)
+ exit(0);
+#endif
+#endif
+ exit(1);
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="x86_64/gcc-assembly"
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+fi
+
+# ia86/gcc: Linux
+if test "$db_cv_mutex" = no; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+#if defined(__ia64) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="ia64/gcc-assembly"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+
+if test "$db_cv_mutex" = no; then
+if test "$cross_compiling" = yes; then
+ { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+main(){
+#if defined(__alpha__)
+#if defined(__linux__)
+ exit(0);
+#endif
+#endif
+ exit(1);
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="alphalinux/gcc-assembly"
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+fi
+
+if test "$db_cv_mutex" = no; then
+if test "$cross_compiling" = yes; then
+ { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+main(){
+#if defined(__sparc__) && !defined(__arch64__)
+#if defined(__linux__)
+ exit(0);
+#endif
+#endif
+ exit(1);
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="sparc32linux/gcc-assembly"
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+fi
+
+if test "$db_cv_mutex" = no; then
+if test "$cross_compiling" = yes; then
+ { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+main(){
+#if defined(__sparc__) && defined(__arch64__)
+#if defined(__linux__)
+ exit(0);
+#endif
+#endif
+ exit(1);
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="sparc64linux/gcc-assembly"
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+fi
+
+if test "$db_cv_mutex" = no; then
+if test "$cross_compiling" = yes; then
+ { { echo "$as_me:$LINENO: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot run test program while cross compiling
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+main(){
+#if defined(__linux__) && defined(__s390__)
+ exit(0);
+#endif
+ exit(1);
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="s390linux/gcc-assembly"
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+fi
+
+
+# uts/cc: UTS
+if test "$db_cv_mutex" = no; then
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+#if defined(_UTS)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_mutex="UTS/cc-assembly"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+fi
+
+# default to UNIX fcntl system call mutexes.
+if test "$db_cv_mutex" = no; then
+ db_cv_mutex="UNIX/fcntl"
+fi
+
+fi
+echo "$as_me:$LINENO: result: $db_cv_mutex" >&5
+echo "${ECHO_T}$db_cv_mutex" >&6
+
+case "$db_cv_mutex" in
+68K/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_68K_GCC_ASSEMBLY 1
+_ACEOF
+
+
+;;
+AIX/_check_lock) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_AIX_CHECK_LOCK 1
+_ACEOF
+
+
+;;
+ALPHA/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_ALPHA_GCC_ASSEMBLY 1
+_ACEOF
+
+
+;;
+ARM/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_ARM_GCC_ASSEMBLY 1
+_ACEOF
+
+
+;;
+HP/msem_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_HPPA_MSEM_INIT 1
+_ACEOF
+
+
+;;
+HPPA/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_HPPA_GCC_ASSEMBLY 1
+_ACEOF
+
+
+;;
+ia64/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_IA64_GCC_ASSEMBLY 1
+_ACEOF
+
+
+;;
+POSIX/pthreads) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_PTHREADS 1
+_ACEOF
+
+
+;;
+POSIX/pthreads/private) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_PTHREADS 1
+_ACEOF
+
+
+
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_THREAD_ONLY 1
+_ACEOF
+
+
+;;
+POSIX/pthreads/library) LIBS="$LIBS -lpthread"
+ ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_PTHREADS 1
+_ACEOF
+
+
+;;
+POSIX/pthreads/library/private)
+ LIBS="$LIBS -lpthread"
+ ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_PTHREADS 1
+_ACEOF
+
+
+
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_THREAD_ONLY 1
+_ACEOF
+
+
+;;
+PPC_GENERIC/gcc-assembly)
+ ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY 1
+_ACEOF
+
+
+;;
+PPC_APPLE/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY 1
+_ACEOF
+
+
+;;
+ReliantUNIX/initspin) LIBS="$LIBS -lmproc"
+ ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_RELIANTUNIX_INITSPIN 1
+_ACEOF
+
+
+;;
+S390/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_S390_GCC_ASSEMBLY 1
+_ACEOF
+
+
+;;
+SCO/x86/cc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_SCO_X86_CC_ASSEMBLY 1
+_ACEOF
+
+
+;;
+SGI/init_lock) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_SGI_INIT_LOCK 1
+_ACEOF
+
+
+;;
+Solaris/_lock_try) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_SOLARIS_LOCK_TRY 1
+_ACEOF
+
+
+;;
+Solaris/lwp) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_SOLARIS_LWP 1
+_ACEOF
+
+
+;;
+Sparc/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_SPARC_GCC_ASSEMBLY 1
+_ACEOF
+
+
+;;
+UI/threads) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_UI_THREADS 1
+_ACEOF
+
+
+;;
+UI/threads/library) LIBS="$LIBS -lthread"
+ ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_UI_THREADS 1
+_ACEOF
+
+
+;;
+UNIX/msem_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_MSEM_INIT 1
+_ACEOF
+
+
+;;
+UNIX/sema_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_SEMA_INIT 1
+_ACEOF
+
+
+;;
+UTS/cc-assembly) ADDITIONAL_OBJS="$ADDITIONAL_OBJS uts4.cc${o}"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_UTS_CC_ASSEMBLY 1
+_ACEOF
+
+
+;;
+MIPS/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_MIPS_GCC_ASSEMBLY 1
+_ACEOF
+
+
+;;
+x86/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_X86_GCC_ASSEMBLY 1
+_ACEOF
+
+
+;;
+x86_64/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_X86_64_GCC_ASSEMBLY 1
+_ACEOF
+
+
+;;
+alphalinux/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_ALPHA_LINUX_ASSEMBLY 1
+_ACEOF
+
+
+;;
+sparc32linux/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_SPARC32_LINUX_ASSEMBLY 1
+_ACEOF
+
+
+;;
+sparc64linux/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_SPARC64_LINUX_ASSEMBLY 1
+_ACEOF
+
+
+;;
+s390linux/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_S390_LINUX_ASSEMBLY 1
+_ACEOF
+
+
+;;
+UNIX/fcntl) { echo "$as_me:$LINENO: WARNING: NO FAST MUTEXES FOUND FOR THIS COMPILER/ARCHITECTURE." >&5
+echo "$as_me: WARNING: NO FAST MUTEXES FOUND FOR THIS COMPILER/ARCHITECTURE." >&2;}
+ ADDITIONAL_OBJS="mut_fcntl${o} $ADDITIONAL_OBJS"
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_FCNTL 1
+_ACEOF
+
+
+;;
+*) { { echo "$as_me:$LINENO: error: Unknown mutex interface: $db_cv_mutex" >&5
+echo "$as_me: error: Unknown mutex interface: $db_cv_mutex" >&2;}
+ { (exit 1); exit 1; }; };;
+esac
+
+if test "$db_cv_mutex" != "UNIX/fcntl"; then
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_THREADS 1
+_ACEOF
+
+
+
+fi
+
+# There are 3 classes of mutexes:
+#
+# 1: Mutexes requiring no cleanup, for example, test-and-set mutexes.
+# 2: Mutexes that must be destroyed, but which don't hold permanent system
+# resources, for example, pthread mutexes on MVS aka OS/390 aka z/OS.
+# 3: Mutexes that must be destroyed, even after the process is gone, for
+# example, pthread mutexes on QNX and binary semaphores on VxWorks.
+#
+# DB cannot currently distinguish between #2 and #3 because DB does not know
+# if the application is running environment recovery as part of startup and
+# does not need to do cleanup, or if the environment is being removed and/or
+# recovered in a loop in the application, and so does need to clean up. If
+# we get it wrong, we're going to call the mutex destroy routine on a random
+# piece of memory, which usually works, but just might drop core. For now,
+# we group #2 and #3 into the HAVE_MUTEX_SYSTEM_RESOURCES define, until we
+# have a better solution or reason to solve this in a general way -- so far,
+# the places we've needed to handle this are few.
+
+
+
+case "$host_os$db_cv_mutex" in
+*qnx*POSIX/pthread*|openedition*POSIX/pthread*)
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_MUTEX_SYSTEM_RESOURCES 1
+_ACEOF
+;;
+esac
+
+# Checks for system functions for which we have replacements.
+#
+# XXX
+# The only portable getcwd call is getcwd(char *, size_t), where the
+# buffer is non-NULL -- Solaris can't handle a NULL buffer, and they
+# deleted getwd().
+
+
+
+
+
+
+for ac_func in getcwd getopt memcmp memcpy memmove raise
+do
+as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh`
+echo "$as_me:$LINENO: checking for $ac_func" >&5
+echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6
+if eval "test \"\${$as_ac_var+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char $ac_func (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char $ac_func ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub_$ac_func) || defined (__stub___$ac_func)
+choke me
+#else
+char (*f) () = $ac_func;
+#endif
+#ifdef __cplusplus
+}
+#endif
+
+int
+main ()
+{
+return f != $ac_func;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ eval "$as_ac_var=yes"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+eval "$as_ac_var=no"
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6
+if test `eval echo '${'$as_ac_var'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1
+_ACEOF
+
+else
+ LIBOBJS="$LIBOBJS $ac_func.$ac_objext"
+fi
+done
+
+
+
+
+
+
+
+for ac_func in snprintf strcasecmp strdup strerror vsnprintf
+do
+as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh`
+echo "$as_me:$LINENO: checking for $ac_func" >&5
+echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6
+if eval "test \"\${$as_ac_var+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char $ac_func (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char $ac_func ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub_$ac_func) || defined (__stub___$ac_func)
+choke me
+#else
+char (*f) () = $ac_func;
+#endif
+#ifdef __cplusplus
+}
+#endif
+
+int
+main ()
+{
+return f != $ac_func;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ eval "$as_ac_var=yes"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+eval "$as_ac_var=no"
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6
+if test `eval echo '${'$as_ac_var'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1
+_ACEOF
+
+else
+ LIBOBJS="$LIBOBJS $ac_func.$ac_objext"
+fi
+done
+
+
+
+# Check for system functions we optionally use.
+
+
+
+
+
+for ac_func in _fstati64 clock_gettime directio gettimeofday getuid
+do
+as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh`
+echo "$as_me:$LINENO: checking for $ac_func" >&5
+echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6
+if eval "test \"\${$as_ac_var+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char $ac_func (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char $ac_func ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub_$ac_func) || defined (__stub___$ac_func)
+choke me
+#else
+char (*f) () = $ac_func;
+#endif
+#ifdef __cplusplus
+}
+#endif
+
+int
+main ()
+{
+return f != $ac_func;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ eval "$as_ac_var=yes"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+eval "$as_ac_var=no"
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6
+if test `eval echo '${'$as_ac_var'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+done
+
+
+
+
+
+
+
+for ac_func in pstat_getdynamic sched_yield select strtoul sysconf yield
+do
+as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh`
+echo "$as_me:$LINENO: checking for $ac_func" >&5
+echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6
+if eval "test \"\${$as_ac_var+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char $ac_func (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char $ac_func ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub_$ac_func) || defined (__stub___$ac_func)
+choke me
+#else
+char (*f) () = $ac_func;
+#endif
+#ifdef __cplusplus
+}
+#endif
+
+int
+main ()
+{
+return f != $ac_func;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ eval "$as_ac_var=yes"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+eval "$as_ac_var=no"
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6
+if test `eval echo '${'$as_ac_var'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+done
+
+
+# Checks for system functions for which we don't have replacements.
+# We require qsort(3).
+
+for ac_func in qsort
+do
+as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh`
+echo "$as_me:$LINENO: checking for $ac_func" >&5
+echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6
+if eval "test \"\${$as_ac_var+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char $ac_func (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char $ac_func ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub_$ac_func) || defined (__stub___$ac_func)
+choke me
+#else
+char (*f) () = $ac_func;
+#endif
+#ifdef __cplusplus
+}
+#endif
+
+int
+main ()
+{
+return f != $ac_func;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ eval "$as_ac_var=yes"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+eval "$as_ac_var=no"
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6
+if test `eval echo '${'$as_ac_var'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1
+_ACEOF
+
+else
+ { { echo "$as_me:$LINENO: error: No qsort library function." >&5
+echo "$as_me: error: No qsort library function." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+done
+
+
+# Pread/pwrite.
+# HP-UX has pread/pwrite, but it doesn't work with largefile support.
+case "$host_os" in
+hpux*)
+ { echo "$as_me:$LINENO: WARNING: pread/pwrite interfaces ignored on $host_os." >&5
+echo "$as_me: WARNING: pread/pwrite interfaces ignored on $host_os." >&2;};;
+*)
+
+for ac_func in pread pwrite
+do
+as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh`
+echo "$as_me:$LINENO: checking for $ac_func" >&5
+echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6
+if eval "test \"\${$as_ac_var+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char $ac_func (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char $ac_func ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub_$ac_func) || defined (__stub___$ac_func)
+choke me
+#else
+char (*f) () = $ac_func;
+#endif
+#ifdef __cplusplus
+}
+#endif
+
+int
+main ()
+{
+return f != $ac_func;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ eval "$as_ac_var=yes"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+eval "$as_ac_var=no"
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6
+if test `eval echo '${'$as_ac_var'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+done
+
+esac
+
+# Check for fcntl(2) to deny child process access to file descriptors.
+echo "$as_me:$LINENO: checking for fcntl/F_SETFD" >&5
+echo $ECHO_N "checking for fcntl/F_SETFD... $ECHO_C" >&6
+if test "${db_cv_fcntl_f_setfd+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+
+cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <fcntl.h>
+int
+main ()
+{
+
+ fcntl(1, F_SETFD, 1);
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_fcntl_f_setfd=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+db_cv_fcntl_f_setfd=no
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: $db_cv_fcntl_f_setfd" >&5
+echo "${ECHO_T}$db_cv_fcntl_f_setfd" >&6
+if test "$db_cv_fcntl_f_setfd" = yes; then
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_FCNTL_F_SETFD 1
+_ACEOF
+
+
+
+fi
+
+# A/UX has a broken getopt(3).
+case "$host_os" in
+aux*) ADDITIONAL_OBJS="getopt${o} $ADDITIONAL_OBJS";;
+esac
+
+# Linux has the O_DIRECT flag, but you can't actually use it.
+echo "$as_me:$LINENO: checking for open/O_DIRECT" >&5
+echo $ECHO_N "checking for open/O_DIRECT... $ECHO_C" >&6
+if test "${db_cv_open_o_direct+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+
+echo "test for working open/O_DIRECT" > __o_direct_file
+if test "$cross_compiling" = yes; then
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <fcntl.h>
+int
+main ()
+{
+
+ open("__o_direct_file", O_RDONLY | O_DIRECT, 0);
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_open_o_direct=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+db_cv_open_o_direct=no
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#include <fcntl.h>
+main() {
+int c, fd = open("__o_direct_file", O_RDONLY | O_DIRECT, 0);
+exit ((fd == -1) || (read(fd, &c, 1) != 1));
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ db_cv_open_o_direct=yes
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+db_cv_open_o_direct=no
+fi
+rm -f core core.* *.core gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f __o_direct_file
+fi
+echo "$as_me:$LINENO: result: $db_cv_open_o_direct" >&5
+echo "${ECHO_T}$db_cv_open_o_direct" >&6
+if test "$db_cv_open_o_direct" = yes; then
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_O_DIRECT 1
+_ACEOF
+
+
+
+fi
+
+# Check for largefile support.
+# Check whether --enable-largefile or --disable-largefile was given.
+if test "${enable_largefile+set}" = set; then
+ enableval="$enable_largefile"
+
+fi;
+if test "$enable_largefile" != no; then
+
+ echo "$as_me:$LINENO: checking for special C compiler options needed for large files" >&5
+echo $ECHO_N "checking for special C compiler options needed for large files... $ECHO_C" >&6
+if test "${ac_cv_sys_largefile_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_cv_sys_largefile_CC=no
+ if test "$GCC" != yes; then
+ ac_save_CC=$CC
+ while :; do
+ # IRIX 6.2 and later do not support large files by default,
+ # so use the C compiler's -n32 option if that helps.
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <sys/types.h>
+ /* Check that off_t can represent 2**63 - 1 correctly.
+ We can't simply define LARGE_OFF_T to be 9223372036854775807,
+ since some C++ compilers masquerading as C compilers
+ incorrectly reject 9223372036854775807. */
+#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
+ int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
+ && LARGE_OFF_T % 2147483647 == 1)
+ ? 1 : -1];
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+ rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext
+ CC="$CC -n32"
+ rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_sys_largefile_CC=' -n32'; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext
+ break
+ done
+ CC=$ac_save_CC
+ rm -f conftest.$ac_ext
+ fi
+fi
+echo "$as_me:$LINENO: result: $ac_cv_sys_largefile_CC" >&5
+echo "${ECHO_T}$ac_cv_sys_largefile_CC" >&6
+ if test "$ac_cv_sys_largefile_CC" != no; then
+ CC=$CC$ac_cv_sys_largefile_CC
+ fi
+
+ echo "$as_me:$LINENO: checking for _FILE_OFFSET_BITS value needed for large files" >&5
+echo $ECHO_N "checking for _FILE_OFFSET_BITS value needed for large files... $ECHO_C" >&6
+if test "${ac_cv_sys_file_offset_bits+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ while :; do
+ ac_cv_sys_file_offset_bits=no
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <sys/types.h>
+ /* Check that off_t can represent 2**63 - 1 correctly.
+ We can't simply define LARGE_OFF_T to be 9223372036854775807,
+ since some C++ compilers masquerading as C compilers
+ incorrectly reject 9223372036854775807. */
+#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
+ int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
+ && LARGE_OFF_T % 2147483647 == 1)
+ ? 1 : -1];
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#define _FILE_OFFSET_BITS 64
+#include <sys/types.h>
+ /* Check that off_t can represent 2**63 - 1 correctly.
+ We can't simply define LARGE_OFF_T to be 9223372036854775807,
+ since some C++ compilers masquerading as C compilers
+ incorrectly reject 9223372036854775807. */
+#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
+ int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
+ && LARGE_OFF_T % 2147483647 == 1)
+ ? 1 : -1];
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_sys_file_offset_bits=64; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ break
+done
+fi
+echo "$as_me:$LINENO: result: $ac_cv_sys_file_offset_bits" >&5
+echo "${ECHO_T}$ac_cv_sys_file_offset_bits" >&6
+if test "$ac_cv_sys_file_offset_bits" != no; then
+
+cat >>confdefs.h <<_ACEOF
+#define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits
+_ACEOF
+
+fi
+rm -f conftest*
+ echo "$as_me:$LINENO: checking for _LARGE_FILES value needed for large files" >&5
+echo $ECHO_N "checking for _LARGE_FILES value needed for large files... $ECHO_C" >&6
+if test "${ac_cv_sys_large_files+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ while :; do
+ ac_cv_sys_large_files=no
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <sys/types.h>
+ /* Check that off_t can represent 2**63 - 1 correctly.
+ We can't simply define LARGE_OFF_T to be 9223372036854775807,
+ since some C++ compilers masquerading as C compilers
+ incorrectly reject 9223372036854775807. */
+#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
+ int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
+ && LARGE_OFF_T % 2147483647 == 1)
+ ? 1 : -1];
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#define _LARGE_FILES 1
+#include <sys/types.h>
+ /* Check that off_t can represent 2**63 - 1 correctly.
+ We can't simply define LARGE_OFF_T to be 9223372036854775807,
+ since some C++ compilers masquerading as C compilers
+ incorrectly reject 9223372036854775807. */
+#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
+ int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
+ && LARGE_OFF_T % 2147483647 == 1)
+ ? 1 : -1];
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_sys_large_files=1; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+fi
+rm -f conftest.$ac_objext conftest.$ac_ext
+ break
+done
+fi
+echo "$as_me:$LINENO: result: $ac_cv_sys_large_files" >&5
+echo "${ECHO_T}$ac_cv_sys_large_files" >&6
+if test "$ac_cv_sys_large_files" != no; then
+
+cat >>confdefs.h <<_ACEOF
+#define _LARGE_FILES $ac_cv_sys_large_files
+_ACEOF
+
+fi
+rm -f conftest*
+fi
+
+
+# Figure out how to create shared regions.
+#
+# First, we look for mmap.
+#
+# BSD/OS has mlock(2), but it doesn't work until the 4.1 release.
+#
+# Nextstep (version 3.3) apparently supports mmap(2) (the mmap symbol
+# is defined in the C library) but does not support munmap(2). Don't
+# try to use mmap if we can't find munmap.
+#
+# Ultrix has mmap(2), but it doesn't work.
+mmap_ok=no
+case "$host_os" in
+bsdi3*|bsdi4.0)
+ { echo "$as_me:$LINENO: WARNING: mlock(2) interface ignored on BSD/OS 3.X and 4.0." >&5
+echo "$as_me: WARNING: mlock(2) interface ignored on BSD/OS 3.X and 4.0." >&2;}
+ mmap_ok=yes
+
+
+for ac_func in mmap munmap
+do
+as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh`
+echo "$as_me:$LINENO: checking for $ac_func" >&5
+echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6
+if eval "test \"\${$as_ac_var+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char $ac_func (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char $ac_func ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub_$ac_func) || defined (__stub___$ac_func)
+choke me
+#else
+char (*f) () = $ac_func;
+#endif
+#ifdef __cplusplus
+}
+#endif
+
+int
+main ()
+{
+return f != $ac_func;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ eval "$as_ac_var=yes"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+eval "$as_ac_var=no"
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6
+if test `eval echo '${'$as_ac_var'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1
+_ACEOF
+
+else
+ mmap_ok=no
+fi
+done
+;;
+ultrix*)
+ { echo "$as_me:$LINENO: WARNING: mmap(2) interface ignored on Ultrix." >&5
+echo "$as_me: WARNING: mmap(2) interface ignored on Ultrix." >&2;};;
+*)
+ mmap_ok=yes
+
+
+for ac_func in mlock munlock
+do
+as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh`
+echo "$as_me:$LINENO: checking for $ac_func" >&5
+echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6
+if eval "test \"\${$as_ac_var+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char $ac_func (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char $ac_func ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub_$ac_func) || defined (__stub___$ac_func)
+choke me
+#else
+char (*f) () = $ac_func;
+#endif
+#ifdef __cplusplus
+}
+#endif
+
+int
+main ()
+{
+return f != $ac_func;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ eval "$as_ac_var=yes"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+eval "$as_ac_var=no"
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6
+if test `eval echo '${'$as_ac_var'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+done
+
+
+
+for ac_func in mmap munmap
+do
+as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh`
+echo "$as_me:$LINENO: checking for $ac_func" >&5
+echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6
+if eval "test \"\${$as_ac_var+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char $ac_func (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char $ac_func ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub_$ac_func) || defined (__stub___$ac_func)
+choke me
+#else
+char (*f) () = $ac_func;
+#endif
+#ifdef __cplusplus
+}
+#endif
+
+int
+main ()
+{
+return f != $ac_func;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ eval "$as_ac_var=yes"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+eval "$as_ac_var=no"
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6
+if test `eval echo '${'$as_ac_var'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1
+_ACEOF
+
+else
+ mmap_ok=no
+fi
+done
+;;
+esac
+
+# Second, we look for shmget.
+#
+# SunOS has the shmget(2) interfaces, but there appears to be a missing
+# #include <debug/debug.h> file, so we ignore them.
+shmget_ok=no
+case "$host_os" in
+sunos*)
+ { echo "$as_me:$LINENO: WARNING: shmget(2) interface ignored on SunOS." >&5
+echo "$as_me: WARNING: shmget(2) interface ignored on SunOS." >&2;};;
+*)
+ shmget_ok=yes
+
+for ac_func in shmget
+do
+as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh`
+echo "$as_me:$LINENO: checking for $ac_func" >&5
+echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6
+if eval "test \"\${$as_ac_var+set}\" = set"; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+#line $LINENO "configure"
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char $ac_func (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+/* Override any gcc2 internal prototype to avoid an error. */
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char $ac_func ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined (__stub_$ac_func) || defined (__stub___$ac_func)
+choke me
+#else
+char (*f) () = $ac_func;
+#endif
+#ifdef __cplusplus
+}
+#endif
+
+int
+main ()
+{
+return f != $ac_func;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ eval "$as_ac_var=yes"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+eval "$as_ac_var=no"
+fi
+rm -f conftest.$ac_objext conftest$ac_exeext conftest.$ac_ext
+fi
+echo "$as_me:$LINENO: result: `eval echo '${'$as_ac_var'}'`" >&5
+echo "${ECHO_T}`eval echo '${'$as_ac_var'}'`" >&6
+if test `eval echo '${'$as_ac_var'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1
+_ACEOF
+
+else
+ shmget_ok=no
+fi
+done
+;;
+esac
+
+# We require either mmap/munmap(2) or shmget(2).
+if test "$mmap_ok" = no -a "$shmget_ok" = no; then
+ { echo "$as_me:$LINENO: WARNING: Neither mmap/munmap(2) or shmget(2) library functions." >&5
+echo "$as_me: WARNING: Neither mmap/munmap(2) or shmget(2) library functions." >&2;}
+fi
+
+# If we're not doing version name substitution, DB_VERSION_UNIQUE_NAME
+# needs to be erased.
+if test "$db_cv_uniquename" = "no"; then
+ DB_VERSION_UNIQUE_NAME=""
+fi
+
+# This is necessary so that .o files in LIBOBJS are also built via
+# the ANSI2KNR-filtering rules.
+LIBOBJS=`echo "$LIBOBJS" |
+ sed 's,\.[^.]* ,$U&,g;s,\.[^.]*$,$U&,'`
+LTLIBOBJS=`echo "$LIBOBJS" |
+ sed 's,\.[^.]* ,.lo ,g;s,\.[^.]*$,.lo,'`
+
+
+# Initial output file list.
+CREATE_LIST="Makefile
+ db_cxx.h:$srcdir/../dbinc/db_cxx.in
+ db_int.h:$srcdir/../dbinc/db_int.in
+ include.tcl:$srcdir/../test/include.tcl"
+
+# Create the db.h file from a source file, a list of global function
+# prototypes, and, if configured for unique names, a list of #defines
+# to do DB_VERSION_UNIQUE_NAME substitution.
+if test "$db_cv_uniquename" = "yes"; then
+ CREATE_LIST="$CREATE_LIST
+ db.h:$srcdir/../dbinc/db.in:$srcdir/../dbinc_auto/rpc_defs.in:$srcdir/../dbinc_auto/ext_def.in:$srcdir/../dbinc_auto/ext_prot.in"
+else
+ CREATE_LIST="$CREATE_LIST
+ db.h:$srcdir/../dbinc/db.in:$srcdir/../dbinc_auto/rpc_defs.in:$srcdir/../dbinc_auto/ext_prot.in"
+fi
+
+# If configured for unique names, create the db_int_uext.h file (which
+# does the DB_VERSION_UNIQUE_NAME substitution), which is included by
+# the db_int.h file.
+if test "$db_cv_uniquename" = "yes"; then
+ CREATE_LIST="$CREATE_LIST
+ db_int_def.h:$srcdir/../dbinc_auto/int_def.in"
+ db_int_def='#include "db_int_def.h"'
+fi
+
+# Create the db_185.h and db185_int.h files from source files, a list of
+# global function prototypes, and, if configured for unique names, a list
+# of #defines to do DB_VERSION_UNIQUE_NAME substitution.
+if test "$db_cv_compat185" = "yes"; then
+ if test "$db_cv_uniquename" = "yes"; then
+ CREATE_LIST="$CREATE_LIST
+ db_185.h:$srcdir/../dbinc/db_185.in:$srcdir/../dbinc_auto/ext_185_def.in:$srcdir/../dbinc_auto/ext_185_prot.in
+ db185_int.h:$srcdir/../db185/db185_int.in:$srcdir/../dbinc_auto/ext_185_def.in:$srcdir/../dbinc_auto/ext_185_prot.in"
+ else
+ CREATE_LIST="$CREATE_LIST
+ db_185.h:$srcdir/../dbinc/db_185.in:$srcdir/../dbinc_auto/ext_185_prot.in
+ db185_int.h:$srcdir/../db185/db185_int.in:$srcdir/../dbinc_auto/ext_185_prot.in"
+ fi
+fi
+
+if test "$db_cv_embedix" = "yes"; then
+ CREATE_LIST="$CREATE_LIST db.ecd:../dist/db.ecd.in"
+fi
+
+if test "$db_cv_rpm" = "yes"; then
+ CREATE_LIST="$CREATE_LIST db.spec:../dist/db.spec.in"
+fi
+
+ ac_config_files="$ac_config_files $CREATE_LIST"
+
+cat >confcache <<\_ACEOF
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs, see configure's option --config-cache.
+# It is not useful on other systems. If it contains results you don't
+# want to keep, you may remove or edit it.
+#
+# config.status only pays attention to the cache file if you give it
+# the --recheck option to rerun configure.
+#
+# `ac_cv_env_foo' variables (set or unset) will be overridden when
+# loading this file, other *unset* `ac_cv_foo' will be assigned the
+# following values.
+
+_ACEOF
+
+# The following way of writing the cache mishandles newlines in values,
+# but we know of no workaround that is simple, portable, and efficient.
+# So, don't put newlines in cache variables' values.
+# Ultrix sh set writes to stderr and can't be redirected directly,
+# and sets the high bit in the cache file unless we assign to the vars.
+{
+ (set) 2>&1 |
+ case `(ac_space=' '; set | grep ac_space) 2>&1` in
+ *ac_space=\ *)
+ # `set' does not quote correctly, so add quotes (double-quote
+ # substitution turns \\\\ into \\, and sed turns \\ into \).
+ sed -n \
+ "s/'/'\\\\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
+ ;;
+ *)
+ # `set' quotes correctly as required by POSIX, so do not add quotes.
+ sed -n \
+ "s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1=\\2/p"
+ ;;
+ esac;
+} |
+ sed '
+ t clear
+ : clear
+ s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
+ t end
+ /^ac_cv_env/!s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
+ : end' >>confcache
+if diff $cache_file confcache >/dev/null 2>&1; then :; else
+ if test -w $cache_file; then
+ test "x$cache_file" != "x/dev/null" && echo "updating cache $cache_file"
+ cat confcache >$cache_file
+ else
+ echo "not updating unwritable cache $cache_file"
+ fi
+fi
+rm -f confcache
+
+test "x$prefix" = xNONE && prefix=$ac_default_prefix
+# Let make expand exec_prefix.
+test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
+
+# VPATH may cause trouble with some makes, so we remove $(srcdir),
+# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and
+# trailing colons and then remove the whole line if VPATH becomes empty
+# (actually we leave an empty line to preserve line numbers).
+if test "x$srcdir" = x.; then
+ ac_vpsub='/^[ ]*VPATH[ ]*=/{
+s/:*\$(srcdir):*/:/;
+s/:*\${srcdir}:*/:/;
+s/:*@srcdir@:*/:/;
+s/^\([^=]*=[ ]*\):*/\1/;
+s/:*$//;
+s/^[^=]*=[ ]*$//;
+}'
+fi
+
+DEFS=-DHAVE_CONFIG_H
+
+ac_libobjs=
+ac_ltlibobjs=
+for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
+ # 1. Remove the extension, and $U if already installed.
+ ac_i=`echo "$ac_i" |
+ sed 's/\$U\././;s/\.o$//;s/\.obj$//'`
+ # 2. Add them.
+ ac_libobjs="$ac_libobjs $ac_i\$U.$ac_objext"
+ ac_ltlibobjs="$ac_ltlibobjs $ac_i"'$U.lo'
+done
+LIBOBJS=$ac_libobjs
+
+LTLIBOBJS=$ac_ltlibobjs
+
+
+
+: ${CONFIG_STATUS=./config.status}
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files $CONFIG_STATUS"
+{ echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5
+echo "$as_me: creating $CONFIG_STATUS" >&6;}
+cat >$CONFIG_STATUS <<_ACEOF
+#! $SHELL
+# Generated by $as_me.
+# Run this file to recreate the current configuration.
+# Compiler output produced by configure, useful for debugging
+# configure, is in config.log if it exists.
+
+debug=false
+ac_cs_recheck=false
+ac_cs_silent=false
+SHELL=\${CONFIG_SHELL-$SHELL}
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+## --------------------- ##
+## M4sh Initialization. ##
+## --------------------- ##
+
+# Be Bourne compatible
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+elif test -n "${BASH_VERSION+set}" && (set -o posix) >/dev/null 2>&1; then
+ set -o posix
+fi
+
+# Support unset when possible.
+if (FOO=FOO; unset FOO) >/dev/null 2>&1; then
+ as_unset=unset
+else
+ as_unset=false
+fi
+
+
+# Work around bugs in pre-3.0 UWIN ksh.
+$as_unset ENV MAIL MAILPATH
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+for as_var in \
+ LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \
+ LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \
+ LC_TELEPHONE LC_TIME
+do
+ if (set +x; test -n "`(eval $as_var=C; export $as_var) 2>&1`"); then
+ eval $as_var=C; export $as_var
+ else
+ $as_unset $as_var
+ fi
+done
+
+# Required to use basename.
+if expr a : '\(a\)' >/dev/null 2>&1; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename /) >/dev/null 2>&1 && test "X`basename / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+
+# Name of the executable.
+as_me=`$as_basename "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)$' \| \
+ . : '\(.\)' 2>/dev/null ||
+echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/; q; }
+ /^X\/\(\/\/\)$/{ s//\1/; q; }
+ /^X\/\(\/\).*/{ s//\1/; q; }
+ s/.*/./; q'`
+
+
+# PATH needs CR, and LINENO needs CR and PATH.
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ echo "#! /bin/sh" >conf$$.sh
+ echo "exit 0" >>conf$$.sh
+ chmod +x conf$$.sh
+ if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
+ PATH_SEPARATOR=';'
+ else
+ PATH_SEPARATOR=:
+ fi
+ rm -f conf$$.sh
+fi
+
+
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null`
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x$as_lineno_3" = "x$as_lineno_2" || {
+ # Find who we are. Look in the path if we contain no path at all
+ # relative or not.
+ case $0 in
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+done
+
+ ;;
+ esac
+ # We did not find ourselves, most probably we were run as `sh COMMAND'
+ # in which case we are not to be found in the path.
+ if test "x$as_myself" = x; then
+ as_myself=$0
+ fi
+ if test ! -f "$as_myself"; then
+ { { echo "$as_me:$LINENO: error: cannot find myself; rerun with an absolute path" >&5
+echo "$as_me: error: cannot find myself; rerun with an absolute path" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ case $CONFIG_SHELL in
+ '')
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for as_base in sh bash ksh sh5; do
+ case $as_dir in
+ /*)
+ if ("$as_dir/$as_base" -c '
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null`
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x$as_lineno_3" = "x$as_lineno_2" ') 2>/dev/null; then
+ $as_unset BASH_ENV || test "${BASH_ENV+set}" != set || { BASH_ENV=; export BASH_ENV; }
+ $as_unset ENV || test "${ENV+set}" != set || { ENV=; export ENV; }
+ CONFIG_SHELL=$as_dir/$as_base
+ export CONFIG_SHELL
+ exec "$CONFIG_SHELL" "$0" ${1+"$@"}
+ fi;;
+ esac
+ done
+done
+;;
+ esac
+
+ # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
+ # uniformly replaced by the line number. The first 'sed' inserts a
+ # line-number line before each line; the second 'sed' does the real
+ # work. The second script uses 'N' to pair each line-number line
+ # with the numbered line, and appends trailing '-' during
+ # substitution so that $LINENO is not a special case at line end.
+ # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
+ # second 'sed' script. Blame Lee E. McMahon for sed's syntax. :-)
+ sed '=' <$as_myself |
+ sed '
+ N
+ s,$,-,
+ : loop
+ s,^\(['$as_cr_digits']*\)\(.*\)[$]LINENO\([^'$as_cr_alnum'_]\),\1\2\1\3,
+ t loop
+ s,-$,,
+ s,^['$as_cr_digits']*\n,,
+ ' >$as_me.lineno &&
+ chmod +x $as_me.lineno ||
+ { { echo "$as_me:$LINENO: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&5
+echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2;}
+ { (exit 1); exit 1; }; }
+
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensible to this).
+ . ./$as_me.lineno
+ # Exit status is that of the last command.
+ exit
+}
+
+
+case `echo "testing\c"; echo 1,2,3`,`echo -n testing; echo 1,2,3` in
+ *c*,-n*) ECHO_N= ECHO_C='
+' ECHO_T=' ' ;;
+ *c*,* ) ECHO_N=-n ECHO_C= ECHO_T= ;;
+ *) ECHO_N= ECHO_C='\c' ECHO_T= ;;
+esac
+
+if expr a : '\(a\)' >/dev/null 2>&1; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+rm -f conf$$ conf$$.exe conf$$.file
+echo >conf$$.file
+if ln -s conf$$.file conf$$ 2>/dev/null; then
+ # We could just check for DJGPP; but this test a) works b) is more generic
+ # and c) will remain valid once DJGPP supports symlinks (DJGPP 2.04).
+ if test -f conf$$.exe; then
+ # Don't use ln at all; we don't have any links
+ as_ln_s='cp -p'
+ else
+ as_ln_s='ln -s'
+ fi
+elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+else
+ as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.file
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p=:
+else
+ as_mkdir_p=false
+fi
+
+as_executable_p="test -f"
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="sed y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="sed y%*+%pp%;s%[^_$as_cr_alnum]%_%g"
+
+
+# IFS
+# We need space, tab and new line, in precisely that order.
+as_nl='
+'
+IFS=" $as_nl"
+
+# CDPATH.
+$as_unset CDPATH
+
+exec 6>&1
+
+# Open the log real soon, to keep \$[0] and so on meaningful, and to
+# report actual input values of CONFIG_FILES etc. instead of their
+# values after options handling. Logging --version etc. is OK.
+exec 5>>config.log
+{
+ echo
+ sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
+## Running $as_me. ##
+_ASBOX
+} >&5
+cat >&5 <<_CSEOF
+
+This file was extended by Berkeley DB $as_me 4.1.25, which was
+generated by GNU Autoconf 2.57. Invocation command line was
+
+ CONFIG_FILES = $CONFIG_FILES
+ CONFIG_HEADERS = $CONFIG_HEADERS
+ CONFIG_LINKS = $CONFIG_LINKS
+ CONFIG_COMMANDS = $CONFIG_COMMANDS
+ $ $0 $@
+
+_CSEOF
+echo "on `(hostname || uname -n) 2>/dev/null | sed 1q`" >&5
+echo >&5
+_ACEOF
+
+# Files that config.status was made for.
+if test -n "$ac_config_files"; then
+ echo "config_files=\"$ac_config_files\"" >>$CONFIG_STATUS
+fi
+
+if test -n "$ac_config_headers"; then
+ echo "config_headers=\"$ac_config_headers\"" >>$CONFIG_STATUS
+fi
+
+if test -n "$ac_config_links"; then
+ echo "config_links=\"$ac_config_links\"" >>$CONFIG_STATUS
+fi
+
+if test -n "$ac_config_commands"; then
+ echo "config_commands=\"$ac_config_commands\"" >>$CONFIG_STATUS
+fi
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+
+ac_cs_usage="\
+\`$as_me' instantiates files from templates according to the
+current configuration.
+
+Usage: $0 [OPTIONS] [FILE]...
+
+ -h, --help print this help, then exit
+ -V, --version print version number, then exit
+ -q, --quiet do not print progress messages
+ -d, --debug don't remove temporary files
+ --recheck update $as_me by reconfiguring in the same conditions
+ --file=FILE[:TEMPLATE]
+ instantiate the configuration file FILE
+ --header=FILE[:TEMPLATE]
+ instantiate the configuration header FILE
+
+Configuration files:
+$config_files
+
+Configuration headers:
+$config_headers
+
+Report bugs to <bug-autoconf@gnu.org>."
+_ACEOF
+
+cat >>$CONFIG_STATUS <<_ACEOF
+ac_cs_version="\\
+Berkeley DB config.status 4.1.25
+configured by $0, generated by GNU Autoconf 2.57,
+ with options \\"`echo "$ac_configure_args" | sed 's/[\\""\`\$]/\\\\&/g'`\\"
+
+Copyright 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001
+Free Software Foundation, Inc.
+This config.status script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it."
+srcdir=$srcdir
+INSTALL="$INSTALL"
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+# If no file are specified by the user, then we need to provide default
+# value. By we need to know if files were specified by the user.
+ac_need_defaults=:
+while test $# != 0
+do
+ case $1 in
+ --*=*)
+ ac_option=`expr "x$1" : 'x\([^=]*\)='`
+ ac_optarg=`expr "x$1" : 'x[^=]*=\(.*\)'`
+ ac_shift=:
+ ;;
+ -*)
+ ac_option=$1
+ ac_optarg=$2
+ ac_shift=shift
+ ;;
+ *) # This is not an option, so the user has probably given explicit
+ # arguments.
+ ac_option=$1
+ ac_need_defaults=false;;
+ esac
+
+ case $ac_option in
+ # Handling of the options.
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF
+ -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
+ ac_cs_recheck=: ;;
+ --version | --vers* | -V )
+ echo "$ac_cs_version"; exit 0 ;;
+ --he | --h)
+ # Conflict between --help and --header
+ { { echo "$as_me:$LINENO: error: ambiguous option: $1
+Try \`$0 --help' for more information." >&5
+echo "$as_me: error: ambiguous option: $1
+Try \`$0 --help' for more information." >&2;}
+ { (exit 1); exit 1; }; };;
+ --help | --hel | -h )
+ echo "$ac_cs_usage"; exit 0 ;;
+ --debug | --d* | -d )
+ debug=: ;;
+ --file | --fil | --fi | --f )
+ $ac_shift
+ CONFIG_FILES="$CONFIG_FILES $ac_optarg"
+ ac_need_defaults=false;;
+ --header | --heade | --head | --hea )
+ $ac_shift
+ CONFIG_HEADERS="$CONFIG_HEADERS $ac_optarg"
+ ac_need_defaults=false;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil | --si | --s)
+ ac_cs_silent=: ;;
+
+ # This is an error.
+ -*) { { echo "$as_me:$LINENO: error: unrecognized option: $1
+Try \`$0 --help' for more information." >&5
+echo "$as_me: error: unrecognized option: $1
+Try \`$0 --help' for more information." >&2;}
+ { (exit 1); exit 1; }; } ;;
+
+ *) ac_config_targets="$ac_config_targets $1" ;;
+
+ esac
+ shift
+done
+
+ac_configure_extra_args=
+
+if $ac_cs_silent; then
+ exec 6>/dev/null
+ ac_configure_extra_args="$ac_configure_extra_args --silent"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF
+if \$ac_cs_recheck; then
+ echo "running $SHELL $0 " $ac_configure_args \$ac_configure_extra_args " --no-create --no-recursion" >&6
+ exec $SHELL $0 $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+fi
+
+_ACEOF
+
+
+
+
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+for ac_config_target in $ac_config_targets
+do
+ case "$ac_config_target" in
+ # Handling of arguments.
+ "$CREATE_LIST" ) CONFIG_FILES="$CONFIG_FILES $CREATE_LIST" ;;
+ "db_config.h" ) CONFIG_HEADERS="$CONFIG_HEADERS db_config.h:config.hin" ;;
+ *) { { echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5
+echo "$as_me: error: invalid argument: $ac_config_target" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+done
+
+# If the user did not use the arguments to specify the items to instantiate,
+# then the envvar interface is used. Set only those that are not.
+# We use the long form for the default assignment because of an extremely
+# bizarre bug on SunOS 4.1.3.
+if $ac_need_defaults; then
+ test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
+ test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers
+fi
+
+# Have a temporary directory for convenience. Make it in the build tree
+# simply because there is no reason to put it here, and in addition,
+# creating and moving files from /tmp can sometimes cause problems.
+# Create a temporary directory, and hook for its removal unless debugging.
+$debug ||
+{
+ trap 'exit_status=$?; rm -rf $tmp && exit $exit_status' 0
+ trap '{ (exit 1); exit 1; }' 1 2 13 15
+}
+
+# Create a (secure) tmp directory for tmp files.
+
+{
+ tmp=`(umask 077 && mktemp -d -q "./confstatXXXXXX") 2>/dev/null` &&
+ test -n "$tmp" && test -d "$tmp"
+} ||
+{
+ tmp=./confstat$$-$RANDOM
+ (umask 077 && mkdir $tmp)
+} ||
+{
+ echo "$me: cannot create a temporary directory in ." >&2
+ { (exit 1); exit 1; }
+}
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<_ACEOF
+
+#
+# CONFIG_FILES section.
+#
+
+# No need to generate the scripts if there are no CONFIG_FILES.
+# This happens for instance when ./config.status config.h
+if test -n "\$CONFIG_FILES"; then
+ # Protect against being on the right side of a sed subst in config.status.
+ sed 's/,@/@@/; s/@,/@@/; s/,;t t\$/@;t t/; /@;t t\$/s/[\\\\&,]/\\\\&/g;
+ s/@@/,@/; s/@@/@,/; s/@;t t\$/,;t t/' >\$tmp/subs.sed <<\\CEOF
+s,@SHELL@,$SHELL,;t t
+s,@PATH_SEPARATOR@,$PATH_SEPARATOR,;t t
+s,@PACKAGE_NAME@,$PACKAGE_NAME,;t t
+s,@PACKAGE_TARNAME@,$PACKAGE_TARNAME,;t t
+s,@PACKAGE_VERSION@,$PACKAGE_VERSION,;t t
+s,@PACKAGE_STRING@,$PACKAGE_STRING,;t t
+s,@PACKAGE_BUGREPORT@,$PACKAGE_BUGREPORT,;t t
+s,@exec_prefix@,$exec_prefix,;t t
+s,@prefix@,$prefix,;t t
+s,@program_transform_name@,$program_transform_name,;t t
+s,@bindir@,$bindir,;t t
+s,@sbindir@,$sbindir,;t t
+s,@libexecdir@,$libexecdir,;t t
+s,@datadir@,$datadir,;t t
+s,@sysconfdir@,$sysconfdir,;t t
+s,@sharedstatedir@,$sharedstatedir,;t t
+s,@localstatedir@,$localstatedir,;t t
+s,@libdir@,$libdir,;t t
+s,@includedir@,$includedir,;t t
+s,@oldincludedir@,$oldincludedir,;t t
+s,@infodir@,$infodir,;t t
+s,@mandir@,$mandir,;t t
+s,@build_alias@,$build_alias,;t t
+s,@host_alias@,$host_alias,;t t
+s,@target_alias@,$target_alias,;t t
+s,@DEFS@,$DEFS,;t t
+s,@ECHO_C@,$ECHO_C,;t t
+s,@ECHO_N@,$ECHO_N,;t t
+s,@ECHO_T@,$ECHO_T,;t t
+s,@LIBS@,$LIBS,;t t
+s,@build@,$build,;t t
+s,@build_cpu@,$build_cpu,;t t
+s,@build_vendor@,$build_vendor,;t t
+s,@build_os@,$build_os,;t t
+s,@host@,$host,;t t
+s,@host_cpu@,$host_cpu,;t t
+s,@host_vendor@,$host_vendor,;t t
+s,@host_os@,$host_os,;t t
+s,@ADDITIONAL_INCS@,$ADDITIONAL_INCS,;t t
+s,@ADDITIONAL_LANG@,$ADDITIONAL_LANG,;t t
+s,@ADDITIONAL_OBJS@,$ADDITIONAL_OBJS,;t t
+s,@ADDITIONAL_PROGS@,$ADDITIONAL_PROGS,;t t
+s,@BUILD_TARGET@,$BUILD_TARGET,;t t
+s,@CFLAGS@,$CFLAGS,;t t
+s,@CONFIGURATION_ARGS@,$CONFIGURATION_ARGS,;t t
+s,@CONFIGURATION_PATH@,$CONFIGURATION_PATH,;t t
+s,@CPPFLAGS@,$CPPFLAGS,;t t
+s,@CXX@,$CXX,;t t
+s,@CXXFLAGS@,$CXXFLAGS,;t t
+s,@DEFAULT_LIB@,$DEFAULT_LIB,;t t
+s,@DEFAULT_LIB_CXX@,$DEFAULT_LIB_CXX,;t t
+s,@EMBEDIX_ECD_CXX@,$EMBEDIX_ECD_CXX,;t t
+s,@EMBEDIX_ECD_RPC@,$EMBEDIX_ECD_RPC,;t t
+s,@EMBEDIX_ROOT@,$EMBEDIX_ROOT,;t t
+s,@INSTALLER@,$INSTALLER,;t t
+s,@INSTALL_LIBS@,$INSTALL_LIBS,;t t
+s,@INSTALL_TARGET@,$INSTALL_TARGET,;t t
+s,@JAR@,$JAR,;t t
+s,@JAVACFLAGS@,$JAVACFLAGS,;t t
+s,@LDFLAGS@,$LDFLAGS,;t t
+s,@LIBJSO_LIBS@,$LIBJSO_LIBS,;t t
+s,@LIBSO_LIBS@,$LIBSO_LIBS,;t t
+s,@LIBTOOL@,$LIBTOOL,;t t
+s,@LIBTSO_LIBS@,$LIBTSO_LIBS,;t t
+s,@LIBXSO_LIBS@,$LIBXSO_LIBS,;t t
+s,@LOAD_LIBS@,$LOAD_LIBS,;t t
+s,@MAKEFILE_CC@,$MAKEFILE_CC,;t t
+s,@MAKEFILE_CCLINK@,$MAKEFILE_CCLINK,;t t
+s,@MAKEFILE_CXX@,$MAKEFILE_CXX,;t t
+s,@MAKEFILE_CXXLINK@,$MAKEFILE_CXXLINK,;t t
+s,@MAKEFILE_MAYBE_WIN32@,$MAKEFILE_MAYBE_WIN32,;t t
+s,@MAKEFILE_SOLINK@,$MAKEFILE_SOLINK,;t t
+s,@MAKEFILE_XSOLINK@,$MAKEFILE_XSOLINK,;t t
+s,@POSTLINK@,$POSTLINK,;t t
+s,@RPC_CLIENT_OBJS@,$RPC_CLIENT_OBJS,;t t
+s,@RPM_POST_INSTALL@,$RPM_POST_INSTALL,;t t
+s,@RPM_POST_UNINSTALL@,$RPM_POST_UNINSTALL,;t t
+s,@SOFLAGS@,$SOFLAGS,;t t
+s,@db_cv_path_embedix_install@,$db_cv_path_embedix_install,;t t
+s,@db_cv_path_rpm_archive@,$db_cv_path_rpm_archive,;t t
+s,@db_int_def@,$db_int_def,;t t
+s,@o@,$o,;t t
+s,@DB_VERSION_MAJOR@,$DB_VERSION_MAJOR,;t t
+s,@DB_VERSION_MINOR@,$DB_VERSION_MINOR,;t t
+s,@DB_VERSION_PATCH@,$DB_VERSION_PATCH,;t t
+s,@DB_VERSION_UNIQUE_NAME@,$DB_VERSION_UNIQUE_NAME,;t t
+s,@DB_VERSION_STRING@,$DB_VERSION_STRING,;t t
+s,@db_cv_path_ar@,$db_cv_path_ar,;t t
+s,@ac_ct_db_cv_path_ar@,$ac_ct_db_cv_path_ar,;t t
+s,@db_cv_path_chmod@,$db_cv_path_chmod,;t t
+s,@ac_ct_db_cv_path_chmod@,$ac_ct_db_cv_path_chmod,;t t
+s,@db_cv_path_cp@,$db_cv_path_cp,;t t
+s,@ac_ct_db_cv_path_cp@,$ac_ct_db_cv_path_cp,;t t
+s,@path_ldconfig@,$path_ldconfig,;t t
+s,@ac_ct_path_ldconfig@,$ac_ct_path_ldconfig,;t t
+s,@db_cv_path_ldconfig@,$db_cv_path_ldconfig,;t t
+s,@db_cv_path_ln@,$db_cv_path_ln,;t t
+s,@ac_ct_db_cv_path_ln@,$ac_ct_db_cv_path_ln,;t t
+s,@db_cv_path_mkdir@,$db_cv_path_mkdir,;t t
+s,@ac_ct_db_cv_path_mkdir@,$ac_ct_db_cv_path_mkdir,;t t
+s,@path_ranlib@,$path_ranlib,;t t
+s,@ac_ct_path_ranlib@,$ac_ct_path_ranlib,;t t
+s,@db_cv_path_ranlib@,$db_cv_path_ranlib,;t t
+s,@db_cv_path_rm@,$db_cv_path_rm,;t t
+s,@ac_ct_db_cv_path_rm@,$ac_ct_db_cv_path_rm,;t t
+s,@db_cv_path_rpm@,$db_cv_path_rpm,;t t
+s,@ac_ct_db_cv_path_rpm@,$ac_ct_db_cv_path_rpm,;t t
+s,@path_sh@,$path_sh,;t t
+s,@ac_ct_path_sh@,$ac_ct_path_sh,;t t
+s,@db_cv_path_sh@,$db_cv_path_sh,;t t
+s,@path_strip@,$path_strip,;t t
+s,@ac_ct_path_strip@,$ac_ct_path_strip,;t t
+s,@db_cv_path_strip@,$db_cv_path_strip,;t t
+s,@db_cv_path_kill@,$db_cv_path_kill,;t t
+s,@ac_ct_db_cv_path_kill@,$ac_ct_db_cv_path_kill,;t t
+s,@INSTALL_PROGRAM@,$INSTALL_PROGRAM,;t t
+s,@INSTALL_SCRIPT@,$INSTALL_SCRIPT,;t t
+s,@INSTALL_DATA@,$INSTALL_DATA,;t t
+s,@CC@,$CC,;t t
+s,@ac_ct_CC@,$ac_ct_CC,;t t
+s,@EXEEXT@,$EXEEXT,;t t
+s,@OBJEXT@,$OBJEXT,;t t
+s,@CCC@,$CCC,;t t
+s,@ac_ct_CCC@,$ac_ct_CCC,;t t
+s,@ac_ct_CXX@,$ac_ct_CXX,;t t
+s,@cxx_have_stdheaders@,$cxx_have_stdheaders,;t t
+s,@LN_S@,$LN_S,;t t
+s,@ECHO@,$ECHO,;t t
+s,@RANLIB@,$RANLIB,;t t
+s,@ac_ct_RANLIB@,$ac_ct_RANLIB,;t t
+s,@STRIP@,$STRIP,;t t
+s,@ac_ct_STRIP@,$ac_ct_STRIP,;t t
+s,@CPP@,$CPP,;t t
+s,@EGREP@,$EGREP,;t t
+s,@SOSUFFIX@,$SOSUFFIX,;t t
+s,@MODSUFFIX@,$MODSUFFIX,;t t
+s,@JMODSUFFIX@,$JMODSUFFIX,;t t
+s,@JAVAC@,$JAVAC,;t t
+s,@_ACJNI_JAVAC@,$_ACJNI_JAVAC,;t t
+s,@TCFLAGS@,$TCFLAGS,;t t
+s,@TCL_BIN_DIR@,$TCL_BIN_DIR,;t t
+s,@TCL_SRC_DIR@,$TCL_SRC_DIR,;t t
+s,@TCL_LIB_FILE@,$TCL_LIB_FILE,;t t
+s,@TCL_TCLSH@,$TCL_TCLSH,;t t
+s,@u_char_decl@,$u_char_decl,;t t
+s,@u_short_decl@,$u_short_decl,;t t
+s,@u_int_decl@,$u_int_decl,;t t
+s,@u_long_decl@,$u_long_decl,;t t
+s,@u_int8_decl@,$u_int8_decl,;t t
+s,@u_int16_decl@,$u_int16_decl,;t t
+s,@int16_decl@,$int16_decl,;t t
+s,@u_int32_decl@,$u_int32_decl,;t t
+s,@int32_decl@,$int32_decl,;t t
+s,@ssize_t_decl@,$ssize_t_decl,;t t
+s,@db_align_t_decl@,$db_align_t_decl,;t t
+s,@db_alignp_t_decl@,$db_alignp_t_decl,;t t
+s,@LIBOBJS@,$LIBOBJS,;t t
+s,@LTLIBOBJS@,$LTLIBOBJS,;t t
+CEOF
+
+_ACEOF
+
+ cat >>$CONFIG_STATUS <<\_ACEOF
+ # Split the substitutions into bite-sized pieces for seds with
+ # small command number limits, like on Digital OSF/1 and HP-UX.
+ ac_max_sed_lines=48
+ ac_sed_frag=1 # Number of current file.
+ ac_beg=1 # First line for current file.
+ ac_end=$ac_max_sed_lines # Line after last line for current file.
+ ac_more_lines=:
+ ac_sed_cmds=
+ while $ac_more_lines; do
+ if test $ac_beg -gt 1; then
+ sed "1,${ac_beg}d; ${ac_end}q" $tmp/subs.sed >$tmp/subs.frag
+ else
+ sed "${ac_end}q" $tmp/subs.sed >$tmp/subs.frag
+ fi
+ if test ! -s $tmp/subs.frag; then
+ ac_more_lines=false
+ else
+ # The purpose of the label and of the branching condition is to
+ # speed up the sed processing (if there are no `@' at all, there
+ # is no need to browse any of the substitutions).
+ # These are the two extra sed commands mentioned above.
+ (echo ':t
+ /@[a-zA-Z_][a-zA-Z_0-9]*@/!b' && cat $tmp/subs.frag) >$tmp/subs-$ac_sed_frag.sed
+ if test -z "$ac_sed_cmds"; then
+ ac_sed_cmds="sed -f $tmp/subs-$ac_sed_frag.sed"
+ else
+ ac_sed_cmds="$ac_sed_cmds | sed -f $tmp/subs-$ac_sed_frag.sed"
+ fi
+ ac_sed_frag=`expr $ac_sed_frag + 1`
+ ac_beg=$ac_end
+ ac_end=`expr $ac_end + $ac_max_sed_lines`
+ fi
+ done
+ if test -z "$ac_sed_cmds"; then
+ ac_sed_cmds=cat
+ fi
+fi # test -n "$CONFIG_FILES"
+
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF
+for ac_file in : $CONFIG_FILES; do test "x$ac_file" = x: && continue
+ # Support "outfile[:infile[:infile...]]", defaulting infile="outfile.in".
+ case $ac_file in
+ - | *:- | *:-:* ) # input from stdin
+ cat >$tmp/stdin
+ ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'`
+ ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;;
+ *:* ) ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'`
+ ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;;
+ * ) ac_file_in=$ac_file.in ;;
+ esac
+
+ # Compute @srcdir@, @top_srcdir@, and @INSTALL@ for subdirectories.
+ ac_dir=`(dirname "$ac_file") 2>/dev/null ||
+$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$ac_file" : 'X\(//\)[^/]' \| \
+ X"$ac_file" : 'X\(//\)$' \| \
+ X"$ac_file" : 'X\(/\)' \| \
+ . : '\(.\)' 2>/dev/null ||
+echo X"$ac_file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; }
+ /^X\(\/\/\)[^/].*/{ s//\1/; q; }
+ /^X\(\/\/\)$/{ s//\1/; q; }
+ /^X\(\/\).*/{ s//\1/; q; }
+ s/.*/./; q'`
+ { if $as_mkdir_p; then
+ mkdir -p "$ac_dir"
+ else
+ as_dir="$ac_dir"
+ as_dirs=
+ while test ! -d "$as_dir"; do
+ as_dirs="$as_dir $as_dirs"
+ as_dir=`(dirname "$as_dir") 2>/dev/null ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| \
+ . : '\(.\)' 2>/dev/null ||
+echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; }
+ /^X\(\/\/\)[^/].*/{ s//\1/; q; }
+ /^X\(\/\/\)$/{ s//\1/; q; }
+ /^X\(\/\).*/{ s//\1/; q; }
+ s/.*/./; q'`
+ done
+ test ! -n "$as_dirs" || mkdir $as_dirs
+ fi || { { echo "$as_me:$LINENO: error: cannot create directory \"$ac_dir\"" >&5
+echo "$as_me: error: cannot create directory \"$ac_dir\"" >&2;}
+ { (exit 1); exit 1; }; }; }
+
+ ac_builddir=.
+
+if test "$ac_dir" != .; then
+ ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'`
+ # A "../" for each directory in $ac_dir_suffix.
+ ac_top_builddir=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,../,g'`
+else
+ ac_dir_suffix= ac_top_builddir=
+fi
+
+case $srcdir in
+ .) # No --srcdir option. We are building in place.
+ ac_srcdir=.
+ if test -z "$ac_top_builddir"; then
+ ac_top_srcdir=.
+ else
+ ac_top_srcdir=`echo $ac_top_builddir | sed 's,/$,,'`
+ fi ;;
+ [\\/]* | ?:[\\/]* ) # Absolute path.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir ;;
+ *) # Relative path.
+ ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_builddir$srcdir ;;
+esac
+# Don't blindly perform a `cd "$ac_dir"/$ac_foo && pwd` since $ac_foo can be
+# absolute.
+ac_abs_builddir=`cd "$ac_dir" && cd $ac_builddir && pwd`
+ac_abs_top_builddir=`cd "$ac_dir" && cd ${ac_top_builddir}. && pwd`
+ac_abs_srcdir=`cd "$ac_dir" && cd $ac_srcdir && pwd`
+ac_abs_top_srcdir=`cd "$ac_dir" && cd $ac_top_srcdir && pwd`
+
+
+ case $INSTALL in
+ [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;;
+ *) ac_INSTALL=$ac_top_builddir$INSTALL ;;
+ esac
+
+ if test x"$ac_file" != x-; then
+ { echo "$as_me:$LINENO: creating $ac_file" >&5
+echo "$as_me: creating $ac_file" >&6;}
+ rm -f "$ac_file"
+ fi
+ # Let's still pretend it is `configure' which instantiates (i.e., don't
+ # use $as_me), people would be surprised to read:
+ # /* config.h. Generated by config.status. */
+ if test x"$ac_file" = x-; then
+ configure_input=
+ else
+ configure_input="$ac_file. "
+ fi
+ configure_input=$configure_input"Generated from `echo $ac_file_in |
+ sed 's,.*/,,'` by configure."
+
+ # First look for the input files in the build tree, otherwise in the
+ # src tree.
+ ac_file_inputs=`IFS=:
+ for f in $ac_file_in; do
+ case $f in
+ -) echo $tmp/stdin ;;
+ [\\/$]*)
+ # Absolute (can't be DOS-style, as IFS=:)
+ test -f "$f" || { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5
+echo "$as_me: error: cannot find input file: $f" >&2;}
+ { (exit 1); exit 1; }; }
+ echo $f;;
+ *) # Relative
+ if test -f "$f"; then
+ # Build tree
+ echo $f
+ elif test -f "$srcdir/$f"; then
+ # Source tree
+ echo $srcdir/$f
+ else
+ # /dev/null tree
+ { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5
+echo "$as_me: error: cannot find input file: $f" >&2;}
+ { (exit 1); exit 1; }; }
+ fi;;
+ esac
+ done` || { (exit 1); exit 1; }
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF
+ sed "$ac_vpsub
+$extrasub
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF
+:t
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
+s,@configure_input@,$configure_input,;t t
+s,@srcdir@,$ac_srcdir,;t t
+s,@abs_srcdir@,$ac_abs_srcdir,;t t
+s,@top_srcdir@,$ac_top_srcdir,;t t
+s,@abs_top_srcdir@,$ac_abs_top_srcdir,;t t
+s,@builddir@,$ac_builddir,;t t
+s,@abs_builddir@,$ac_abs_builddir,;t t
+s,@top_builddir@,$ac_top_builddir,;t t
+s,@abs_top_builddir@,$ac_abs_top_builddir,;t t
+s,@INSTALL@,$ac_INSTALL,;t t
+" $ac_file_inputs | (eval "$ac_sed_cmds") >$tmp/out
+ rm -f $tmp/stdin
+ if test x"$ac_file" != x-; then
+ mv $tmp/out $ac_file
+ else
+ cat $tmp/out
+ rm -f $tmp/out
+ fi
+
+done
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF
+
+#
+# CONFIG_HEADER section.
+#
+
+# These sed commands are passed to sed as "A NAME B NAME C VALUE D", where
+# NAME is the cpp macro being defined and VALUE is the value it is being given.
+#
+# ac_d sets the value in "#define NAME VALUE" lines.
+ac_dA='s,^\([ ]*\)#\([ ]*define[ ][ ]*\)'
+ac_dB='[ ].*$,\1#\2'
+ac_dC=' '
+ac_dD=',;t'
+# ac_u turns "#undef NAME" without trailing blanks into "#define NAME VALUE".
+ac_uA='s,^\([ ]*\)#\([ ]*\)undef\([ ][ ]*\)'
+ac_uB='$,\1#\2define\3'
+ac_uC=' '
+ac_uD=',;t'
+
+for ac_file in : $CONFIG_HEADERS; do test "x$ac_file" = x: && continue
+ # Support "outfile[:infile[:infile...]]", defaulting infile="outfile.in".
+ case $ac_file in
+ - | *:- | *:-:* ) # input from stdin
+ cat >$tmp/stdin
+ ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'`
+ ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;;
+ *:* ) ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'`
+ ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;;
+ * ) ac_file_in=$ac_file.in ;;
+ esac
+
+ test x"$ac_file" != x- && { echo "$as_me:$LINENO: creating $ac_file" >&5
+echo "$as_me: creating $ac_file" >&6;}
+
+ # First look for the input files in the build tree, otherwise in the
+ # src tree.
+ ac_file_inputs=`IFS=:
+ for f in $ac_file_in; do
+ case $f in
+ -) echo $tmp/stdin ;;
+ [\\/$]*)
+ # Absolute (can't be DOS-style, as IFS=:)
+ test -f "$f" || { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5
+echo "$as_me: error: cannot find input file: $f" >&2;}
+ { (exit 1); exit 1; }; }
+ echo $f;;
+ *) # Relative
+ if test -f "$f"; then
+ # Build tree
+ echo $f
+ elif test -f "$srcdir/$f"; then
+ # Source tree
+ echo $srcdir/$f
+ else
+ # /dev/null tree
+ { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5
+echo "$as_me: error: cannot find input file: $f" >&2;}
+ { (exit 1); exit 1; }; }
+ fi;;
+ esac
+ done` || { (exit 1); exit 1; }
+ # Remove the trailing spaces.
+ sed 's/[ ]*$//' $ac_file_inputs >$tmp/in
+
+_ACEOF
+
+# Transform confdefs.h into two sed scripts, `conftest.defines' and
+# `conftest.undefs', that substitutes the proper values into
+# config.h.in to produce config.h. The first handles `#define'
+# templates, and the second `#undef' templates.
+# And first: Protect against being on the right side of a sed subst in
+# config.status. Protect against being in an unquoted here document
+# in config.status.
+rm -f conftest.defines conftest.undefs
+# Using a here document instead of a string reduces the quoting nightmare.
+# Putting comments in sed scripts is not portable.
+#
+# `end' is used to avoid that the second main sed command (meant for
+# 0-ary CPP macros) applies to n-ary macro definitions.
+# See the Autoconf documentation for `clear'.
+cat >confdef2sed.sed <<\_ACEOF
+s/[\\&,]/\\&/g
+s,[\\$`],\\&,g
+t clear
+: clear
+s,^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*\)\(([^)]*)\)[ ]*\(.*\)$,${ac_dA}\1${ac_dB}\1\2${ac_dC}\3${ac_dD},gp
+t end
+s,^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\)$,${ac_dA}\1${ac_dB}\1${ac_dC}\2${ac_dD},gp
+: end
+_ACEOF
+# If some macros were called several times there might be several times
+# the same #defines, which is useless. Nevertheless, we may not want to
+# sort them, since we want the *last* AC-DEFINE to be honored.
+uniq confdefs.h | sed -n -f confdef2sed.sed >conftest.defines
+sed 's/ac_d/ac_u/g' conftest.defines >conftest.undefs
+rm -f confdef2sed.sed
+
+# This sed command replaces #undef with comments. This is necessary, for
+# example, in the case of _POSIX_SOURCE, which is predefined and required
+# on some systems where configure will not decide to define it.
+cat >>conftest.undefs <<\_ACEOF
+s,^[ ]*#[ ]*undef[ ][ ]*[a-zA-Z_][a-zA-Z_0-9]*,/* & */,
+_ACEOF
+
+# Break up conftest.defines because some shells have a limit on the size
+# of here documents, and old seds have small limits too (100 cmds).
+echo ' # Handle all the #define templates only if necessary.' >>$CONFIG_STATUS
+echo ' if grep "^[ ]*#[ ]*define" $tmp/in >/dev/null; then' >>$CONFIG_STATUS
+echo ' # If there are no defines, we may have an empty if/fi' >>$CONFIG_STATUS
+echo ' :' >>$CONFIG_STATUS
+rm -f conftest.tail
+while grep . conftest.defines >/dev/null
+do
+ # Write a limited-size here document to $tmp/defines.sed.
+ echo ' cat >$tmp/defines.sed <<CEOF' >>$CONFIG_STATUS
+ # Speed up: don't consider the non `#define' lines.
+ echo '/^[ ]*#[ ]*define/!b' >>$CONFIG_STATUS
+ # Work around the forget-to-reset-the-flag bug.
+ echo 't clr' >>$CONFIG_STATUS
+ echo ': clr' >>$CONFIG_STATUS
+ sed ${ac_max_here_lines}q conftest.defines >>$CONFIG_STATUS
+ echo 'CEOF
+ sed -f $tmp/defines.sed $tmp/in >$tmp/out
+ rm -f $tmp/in
+ mv $tmp/out $tmp/in
+' >>$CONFIG_STATUS
+ sed 1,${ac_max_here_lines}d conftest.defines >conftest.tail
+ rm -f conftest.defines
+ mv conftest.tail conftest.defines
+done
+rm -f conftest.defines
+echo ' fi # grep' >>$CONFIG_STATUS
+echo >>$CONFIG_STATUS
+
+# Break up conftest.undefs because some shells have a limit on the size
+# of here documents, and old seds have small limits too (100 cmds).
+echo ' # Handle all the #undef templates' >>$CONFIG_STATUS
+rm -f conftest.tail
+while grep . conftest.undefs >/dev/null
+do
+ # Write a limited-size here document to $tmp/undefs.sed.
+ echo ' cat >$tmp/undefs.sed <<CEOF' >>$CONFIG_STATUS
+ # Speed up: don't consider the non `#undef'
+ echo '/^[ ]*#[ ]*undef/!b' >>$CONFIG_STATUS
+ # Work around the forget-to-reset-the-flag bug.
+ echo 't clr' >>$CONFIG_STATUS
+ echo ': clr' >>$CONFIG_STATUS
+ sed ${ac_max_here_lines}q conftest.undefs >>$CONFIG_STATUS
+ echo 'CEOF
+ sed -f $tmp/undefs.sed $tmp/in >$tmp/out
+ rm -f $tmp/in
+ mv $tmp/out $tmp/in
+' >>$CONFIG_STATUS
+ sed 1,${ac_max_here_lines}d conftest.undefs >conftest.tail
+ rm -f conftest.undefs
+ mv conftest.tail conftest.undefs
+done
+rm -f conftest.undefs
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+ # Let's still pretend it is `configure' which instantiates (i.e., don't
+ # use $as_me), people would be surprised to read:
+ # /* config.h. Generated by config.status. */
+ if test x"$ac_file" = x-; then
+ echo "/* Generated by configure. */" >$tmp/config.h
+ else
+ echo "/* $ac_file. Generated by configure. */" >$tmp/config.h
+ fi
+ cat $tmp/in >>$tmp/config.h
+ rm -f $tmp/in
+ if test x"$ac_file" != x-; then
+ if diff $ac_file $tmp/config.h >/dev/null 2>&1; then
+ { echo "$as_me:$LINENO: $ac_file is unchanged" >&5
+echo "$as_me: $ac_file is unchanged" >&6;}
+ else
+ ac_dir=`(dirname "$ac_file") 2>/dev/null ||
+$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$ac_file" : 'X\(//\)[^/]' \| \
+ X"$ac_file" : 'X\(//\)$' \| \
+ X"$ac_file" : 'X\(/\)' \| \
+ . : '\(.\)' 2>/dev/null ||
+echo X"$ac_file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; }
+ /^X\(\/\/\)[^/].*/{ s//\1/; q; }
+ /^X\(\/\/\)$/{ s//\1/; q; }
+ /^X\(\/\).*/{ s//\1/; q; }
+ s/.*/./; q'`
+ { if $as_mkdir_p; then
+ mkdir -p "$ac_dir"
+ else
+ as_dir="$ac_dir"
+ as_dirs=
+ while test ! -d "$as_dir"; do
+ as_dirs="$as_dir $as_dirs"
+ as_dir=`(dirname "$as_dir") 2>/dev/null ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| \
+ . : '\(.\)' 2>/dev/null ||
+echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; }
+ /^X\(\/\/\)[^/].*/{ s//\1/; q; }
+ /^X\(\/\/\)$/{ s//\1/; q; }
+ /^X\(\/\).*/{ s//\1/; q; }
+ s/.*/./; q'`
+ done
+ test ! -n "$as_dirs" || mkdir $as_dirs
+ fi || { { echo "$as_me:$LINENO: error: cannot create directory \"$ac_dir\"" >&5
+echo "$as_me: error: cannot create directory \"$ac_dir\"" >&2;}
+ { (exit 1); exit 1; }; }; }
+
+ rm -f $ac_file
+ mv $tmp/config.h $ac_file
+ fi
+ else
+ cat $tmp/config.h
+ rm -f $tmp/config.h
+ fi
+done
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+
+{ (exit 0); exit 0; }
+_ACEOF
+chmod +x $CONFIG_STATUS
+ac_clean_files=$ac_clean_files_save
+
+
+# configure is writing to config.log, and then calls config.status.
+# config.status does its own redirection, appending to config.log.
+# Unfortunately, on DOS this fails, as config.log is still kept open
+# by configure, so config.status won't be able to write to it; its
+# output is simply discarded. So we exec the FD to /dev/null,
+# effectively closing config.log, so it can be properly (re)opened and
+# appended to by config.status. When coming back to configure, we
+# need to make the FD available again.
+if test "$no_create" != yes; then
+ ac_cs_success=:
+ ac_config_status_args=
+ test "$silent" = yes &&
+ ac_config_status_args="$ac_config_status_args --quiet"
+ exec 5>/dev/null
+ $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
+ exec 5>>config.log
+ # Use ||, not &&, to avoid exiting from the if with $? = 1, which
+ # would make configure fail if this is the last instruction.
+ $ac_cs_success || { (exit 1); exit 1; }
+fi
+
diff --git a/libdb/dist/configure.ac b/libdb/dist/configure.ac
new file mode 100644
index 0000000..fce2b3d
--- /dev/null
+++ b/libdb/dist/configure.ac
@@ -0,0 +1,608 @@
+# $Id$
+# Process this file with autoconf to produce a configure script.
+
+PACKAGE=db
+AC_INIT(Berkeley DB,
+ __EDIT_DB_VERSION__, support@sleepycat.com, db-__EDIT_DB_VERSION__)
+AC_CONFIG_SRCDIR([../db/db.c])
+AC_CONFIG_HEADER(db_config.h:config.hin)
+
+# Configure setup.
+AC_CANONICAL_HOST()
+AC_ARG_PROGRAM()
+
+# We cannot build in the top-level directory.
+AC_MSG_CHECKING(if building in the top-level directory)
+[ test -d db_archive ] && AC_MSG_ERROR([
+Berkeley DB cannot be built in the top-level distribution directory.])
+AC_MSG_RESULT(no)
+
+# Substitution variables.
+AC_SUBST(ADDITIONAL_INCS)
+AC_SUBST(ADDITIONAL_LANG)
+AC_SUBST(ADDITIONAL_OBJS)
+AC_SUBST(ADDITIONAL_PROGS)
+AC_SUBST(BUILD_TARGET)
+AC_SUBST(CFLAGS)
+AC_SUBST(CONFIGURATION_ARGS)
+AC_SUBST(CONFIGURATION_PATH)
+AC_SUBST(CPPFLAGS)
+AC_SUBST(CXX)
+AC_SUBST(CXXFLAGS)
+AC_SUBST(DEFAULT_LIB)
+AC_SUBST(DEFAULT_LIB_CXX)
+AC_SUBST(EMBEDIX_ECD_CXX)
+AC_SUBST(EMBEDIX_ECD_RPC)
+AC_SUBST(EMBEDIX_ROOT)
+AC_SUBST(EXEEXT)
+AC_SUBST(INSTALLER)
+AC_SUBST(INSTALL_LIBS)
+AC_SUBST(INSTALL_TARGET)
+AC_SUBST(JAR)
+AC_SUBST(JAVACFLAGS)
+AC_SUBST(LDFLAGS)
+AC_SUBST(LIBJSO_LIBS)
+AC_SUBST(LIBS)
+AC_SUBST(LIBSO_LIBS)
+AC_SUBST(LIBTOOL)
+AC_SUBST(LIBTSO_LIBS)
+AC_SUBST(LIBXSO_LIBS)
+AC_SUBST(LOAD_LIBS)
+AC_SUBST(MAKEFILE_CC)
+AC_SUBST(MAKEFILE_CCLINK)
+AC_SUBST(MAKEFILE_CXX)
+AC_SUBST(MAKEFILE_CXXLINK)
+AC_SUBST(MAKEFILE_MAYBE_WIN32)
+AC_SUBST(MAKEFILE_SOLINK)
+AC_SUBST(MAKEFILE_XSOLINK)
+AC_SUBST(POSTLINK)
+AC_SUBST(RPC_CLIENT_OBJS)
+AC_SUBST(RPM_POST_INSTALL)
+AC_SUBST(RPM_POST_UNINSTALL)
+AC_SUBST(SOFLAGS)
+AC_SUBST(db_cv_path_embedix_install)
+AC_SUBST(db_cv_path_rpm_archive)
+AC_SUBST(db_int_def)
+AC_SUBST(o)
+
+# RPM needs the current absolute path.
+# RPM needs the list of original arguments, but we don't include the RPM
+# option itself.
+CONFIGURATION_PATH=${PWD-`pwd`}
+CONFIGURATION_ARGS=`echo "$*" |
+ sed -e 's/--with-embedix[[^ ]]*//' -e 's/--with-rpm[[^ ]]*//'`
+
+# Set the version.
+AM_VERSION_SET
+
+# Set the default installation location.
+AC_PREFIX_DEFAULT(/usr/local/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@)
+
+# Process all options before using them.
+AM_OPTIONS_SET
+
+# Set some #defines based on configuration options.
+if test "$db_cv_diagnostic" = yes; then
+ AC_DEFINE(DIAGNOSTIC)
+ AH_TEMPLATE(DIAGNOSTIC,
+ [Define to 1 if you want a version with run-time diagnostic checking.])
+fi
+if test "$db_cv_debug_rop" = yes; then
+ AC_DEFINE(DEBUG_ROP)
+ AH_TEMPLATE(DEBUG_ROP,
+ [Define to 1 if you want a version that logs read operations.])
+fi
+if test "$db_cv_debug_wop" = yes; then
+ AC_DEFINE(DEBUG_WOP)
+ AH_TEMPLATE(DEBUG_WOP,
+ [Define to 1 if you want a version that logs write operations.])
+fi
+if test "$db_cv_umrw" = yes; then
+ AC_DEFINE(UMRW)
+ AH_TEMPLATE(UMRW,
+ [Define to 1 to mask harmless unitialized memory read/writes.])
+
+fi
+if test "$db_cv_test" = yes; then
+ AC_DEFINE(CONFIG_TEST)
+ AH_TEMPLATE(CONFIG_TEST,
+ [Define to 1 if you want to build a version for running the test suite.])
+fi
+
+# Check for programs used in building and installation.
+AM_PROGRAMS_SET
+AC_PROG_INSTALL
+
+# RPM/Embedix support: change the standard make and install targets
+if test "$db_cv_rpm" = "yes"; then
+ BUILD_TARGET="rpm_build"
+ echo "topdir: $CONFIGURATION_PATH" > rpmrc
+ if test "$db_cv_embedix" = "yes"; then
+ EMBEDIX_ROOT="/usr"
+ INSTALL_TARGET="embedix_install"
+ else
+ INSTALL_TARGET="rpm_install"
+ fi
+else
+ BUILD_TARGET="library_build"
+ INSTALL_TARGET="library_install"
+fi
+
+# This is where we handle stuff that autoconf can't handle: compiler,
+# preprocessor and load flags, libraries that the standard tests don't
+# look for. The default optimization is -O. We would like to set the
+# default optimization for systems using gcc to -O2, but we can't. By
+# the time we know we're using gcc, it's too late to set optimization
+# flags.
+#
+# There are additional libraries we need for some compiler/architecture
+# combinations.
+#
+# Some architectures require DB to be compiled with special flags and/or
+# libraries for threaded applications
+#
+# The makefile CC may be different than the CC used in config testing,
+# because the makefile CC may be set to use $(LIBTOOL).
+#
+# XXX
+# Don't override anything if it's already set from the environment.
+optimize_def="-O"
+MAKEFILE_MAYBE_WIN32=""
+EXEEXT=""
+SOFLAGS=""
+case "$host_os" in
+aix4.3.*|aix5*)
+ optimize_def="-O2"
+ CC=${CC-"xlc_r"}
+ CPPFLAGS="$CPPFLAGS -D_THREAD_SAFE"
+ LDFLAGS="$LDFLAGS -Wl,-brtl";;
+bsdi3*) optimize_def="-O2"
+ CC=${CC-"shlicc2"}
+ LIBS="$LIBS -lipc";;
+bsdi*) optimize_def="-O2";;
+freebsd*)
+ optimize_def="-O2"
+ CPPFLAGS="$CPPFLAGS -D_THREAD_SAFE"
+ LDFLAGS="$LDFLAGS -pthread";;
+hpux*) CPPFLAGS="$CPPFLAGS -D_REENTRANT";;
+irix*) optimize_def="-O2"
+ CPPFLAGS="$CPPFLAGS -D_SGI_MP_SOURCE";;
+linux*) optimize_def="-O2"
+ CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE -D_REENTRANT";;
+mingw*) optimize_def="-O2"
+ EXEEXT=".exe"
+ SOFLAGS="-no-undefined"
+ ADDITIONAL_OBJS="os_type.lo $ADDITIONAL_OBJS"
+ MAKEFILE_MAYBE_WIN32="_win32"
+ AC_DEFINE(DB_WIN32)
+ AH_TEMPLATE(DB_WIN32,
+ [Define to 1 to if building for Win32.]);;
+mpeix*) CPPFLAGS="$CPPFLAGS -D_POSIX_SOURCE -D_SOCKET_SOURCE"
+ LIBS="$LIBS -lsocket -lsvipc";;
+osf*) CPPFLAGS="$CPPFLAGS -D_REENTRANT"
+ LDFLAGS="$LDFLAGS -pthread";;
+*qnx) AC_DEFINE(HAVE_QNX)
+ AH_TEMPLATE(HAVE_QNX, [Define to 1 if building on QNX.]);;
+solaris*)
+ CPPFLAGS="$CPPFLAGS -D_REENTRANT";;
+esac
+
+# Set CFLAGS/CXXFLAGS. We MUST set the flags before we call autoconf
+# compiler configuration macros, because if we don't, they set CFLAGS
+# to no optimization and -g, which isn't what we want.
+CFLAGS=${CFLAGS-$optimize_def}
+CXXFLAGS=${CXXFLAGS-"$CFLAGS"}
+
+# If the user wants a debugging environment, add -g to the CFLAGS value.
+#
+# XXX
+# Some compilers can't mix optimizing and debug flags. The only way to
+# handle this is to specify CFLAGS in the environment before configuring.
+if test "$db_cv_debug" = yes; then
+ AC_DEFINE(DEBUG)
+ AH_TEMPLATE(DEBUG, [Define to 1 if you want a debugging version.])
+
+ CFLAGS="$CFLAGS -g"
+ CXXFLAGS="$CXXFLAGS -g"
+fi
+
+# The default compiler is cc (NOT gcc), the default CFLAGS is as specified
+# above, NOT what is set by AC_PROG_CC, as it won't set optimization flags
+# for any compiler other than gcc.
+AC_PROG_CC(cc gcc)
+
+# Because of shared library building, the ${CC} used for config tests
+# may be different than the ${CC} we want to put in the Makefile.
+# The latter is known as ${MAKEFILE_CC} in this script.
+MAKEFILE_CC="${CC}"
+MAKEFILE_CCLINK="${CC}"
+MAKEFILE_CXX="nocxx"
+MAKEFILE_CXXLINK="nocxx"
+
+# See if we need the C++ compiler at all. If so, we'd like to find one that
+# interoperates with the C compiler we chose. Since we prefered cc over gcc,
+# we'll also prefer the vendor's compiler over g++/gcc. If we're wrong, the
+# user can set CC and CXX in their environment before running configure.
+#
+# AC_PROG_CXX sets CXX, but it uses $CXX and $CCC (in that order) as its
+# first choices.
+if test "$db_cv_cxx" = "yes"; then
+ if test "$GCC" != "yes"; then
+ case "$host_os" in
+ aix*) AC_CHECK_TOOL(CCC, xlC_r)
+ LIBXSO_LIBS="-lC_r $LIBXSO_LIBS"
+ LIBS="-lC_r $LIBS";;
+ hpux*) AC_CHECK_TOOL(CCC, aCC);;
+ irix*) AC_CHECK_TOOL(CCC, CC);;
+ osf*) AC_CHECK_TOOL(CCC, cxx);;
+ solaris*) AC_CHECK_TOOL(CCC, CC);;
+ esac
+ fi
+ AC_PROG_CXX
+ AC_CXX_HAVE_STDHEADERS
+ MAKEFILE_CXX="${CXX}"
+ MAKEFILE_CXXLINK="${CXX}"
+fi
+
+# Do some gcc specific configuration.
+AC_GCC_CONFIG1
+AC_GCC_CONFIG2
+
+# We need the -Kthread/-pthread flag when compiling on SCO/Caldera's UnixWare
+# and OpenUNIX releases. We can't make the test until we know which compiler
+# we're using.
+case "$host_os" in
+sysv5UnixWare*|sysv5OpenUNIX8*)
+ if test "$GCC" == "yes"; then
+ CPPFLAGS="$CPPFLAGS -pthread"
+ LDFLAGS="$LDFLAGS -pthread"
+ else
+ CPPFLAGS="$CPPFLAGS -Kthread"
+ LDFLAGS="$LDFLAGS -Kthread"
+ fi;;
+esac
+
+# Export our compiler preferences for the libtool configuration.
+export CC CCC
+CCC=CXX
+
+# Libtool configuration.
+AC_PROG_LIBTOOL
+
+LIBTOOL="\$(SHELL) ./libtool"
+
+# Set SOSUFFIX and friends
+SOSUFFIX_CONFIG
+MODSUFFIX_CONFIG
+JMODSUFFIX_CONFIG
+
+INSTALLER="\$(LIBTOOL) --mode=install cp -p"
+
+MAKEFILE_CC="\$(LIBTOOL) --mode=compile ${MAKEFILE_CC}"
+MAKEFILE_SOLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CCLINK} -avoid-version"
+MAKEFILE_CCLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CCLINK}"
+MAKEFILE_CXX="\$(LIBTOOL) --mode=compile ${MAKEFILE_CXX}"
+MAKEFILE_XSOLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CXXLINK} -avoid-version"
+MAKEFILE_CXXLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CXXLINK}"
+
+DEFAULT_LIB="\$(libso_target)"
+POSTLINK="\$(LIBTOOL) --mode=execute true"
+o=".lo"
+INSTALL_LIBS="$DEFAULT_LIB"
+
+# Optional C++ API.
+if test "$db_cv_cxx" = "yes"; then
+ if test "$enable_shared" = "no"; then
+ DEFAULT_LIB_CXX="\$(libcxx)"
+ fi
+ if test "$enable_shared" = "yes"; then
+ DEFAULT_LIB_CXX="\$(libxso_target)"
+ fi
+ INSTALL_LIBS="$INSTALL_LIBS $DEFAULT_LIB_CXX"
+
+ # Fill in C++ library for Embedix.
+ EMBEDIX_ECD_CXX='<OPTION db-extra>\
+ TYPE=bool\
+ DEFAULT_VALUE=1\
+ PROMPT=Include BerkeleyDB C++ library?\
+ <KEEPLIST>\
+ /usr/include/db_cxx.h\
+ /usr/lib/libdb_cxx-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.so\
+ </KEEPLIST>\
+ <PROVIDES>\
+ libdb_cxx-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.so\
+ </PROVIDES>\
+ <REQUIRES>\
+ ld-linux.so.2\
+ libc.so.6\
+ </REQUIRES>\
+ STATIC_SIZE=0\
+ STORAGE_SIZE=523612\
+ STARTUP_TIME=0\
+ </OPTION>'
+fi
+
+# Optional Java API.
+if test "$db_cv_java" = "yes"; then
+ # Java requires shared libraries.
+ if test "$enable_shared" = "no"; then
+ AC_MSG_ERROR([Java requires shared libraries])
+ fi
+
+ AC_PROG_JAVAC
+ AC_PROG_JAR
+ AC_JNI_INCLUDE_DIR
+
+ for JNI_INCLUDE_DIR in $JNI_INCLUDE_DIRS
+ do
+ CPPFLAGS="$CPPFLAGS -I$JNI_INCLUDE_DIR"
+ done
+
+ ADDITIONAL_LANG="$ADDITIONAL_LANG java"
+ INSTALL_LIBS="$INSTALL_LIBS \$(libjso_target)"
+else
+ JAVAC=nojavac
+fi
+
+# Optional RPC client/server.
+if test "$db_cv_rpc" = "yes"; then
+ AC_DEFINE(HAVE_RPC)
+ AH_TEMPLATE(HAVE_RPC, [Define to 1 if building RPC client/server.])
+
+ RPC_CLIENT_OBJS="\$(RPC_CLIENT_OBJS)"
+ ADDITIONAL_PROGS="berkeley_db_svc $ADDITIONAL_PROGS"
+
+ EMBEDIX_ECD_RPC="/usr/bin/berkeley_db_svc"
+
+ case "$host_os" in
+ hpux*)
+ AC_CHECK_FUNC(svc_run,,
+ AC_CHECK_LIB(nsl, svc_run,
+ LIBS="-lnsl $LIBS"; LIBTSO_LIBS="-lnsl $LIBTSO_LIBS"));;
+ solaris*)
+ AC_CHECK_FUNC(svc_run,, AC_CHECK_LIB(nsl, svc_run));;
+ esac
+fi
+
+AM_TCL_LOAD
+
+# Optional crypto support.
+if test -d "$srcdir/../crypto"; then
+ AC_DEFINE(HAVE_CRYPTO)
+ AH_TEMPLATE(HAVE_CRYPTO,
+ [Define to 1 if Berkeley DB release includes strong cryptography.])
+ ADDITIONAL_OBJS="aes_method${o} crypto${o} mt19937db${o} rijndael-alg-fst${o} rijndael-api-fst${o} $ADDITIONAL_OBJS"
+fi
+
+# Optional DB 1.85 compatibility API.
+if test "$db_cv_compat185" = "yes"; then
+ ADDITIONAL_INCS="db_185.h $ADDITIONAL_INCS"
+ ADDITIONAL_OBJS="db185${o} $ADDITIONAL_OBJS"
+fi
+
+# Optional utilities.
+if test "$db_cv_dump185" = "yes"; then
+ ADDITIONAL_PROGS="db_dump185 $ADDITIONAL_PROGS"
+fi
+
+# Checks for compiler characteristics.
+AC_C_CONST
+
+# Checks for include files, structures, C types.
+AC_HEADER_STAT
+AC_HEADER_TIME
+AC_HEADER_DIRENT
+AC_CHECK_HEADERS(sys/select.h sys/time.h)
+AC_CHECK_MEMBERS([struct stat.st_blksize])
+AM_TYPES
+
+AC_CACHE_CHECK([for ANSI C exit success/failure values], db_cv_exit_defines, [
+AC_TRY_COMPILE([#include <stdlib.h>], return (EXIT_SUCCESS);,
+ [db_cv_exit_defines=yes], [db_cv_exit_defines=no])])
+if test "$db_cv_exit_defines" = yes; then
+ AC_DEFINE(HAVE_EXIT_SUCCESS)
+ AH_TEMPLATE(HAVE_EXIT_SUCCESS,
+ [Define to 1 if you have EXIT_SUCCESS/EXIT_FAILURE #defines.])
+fi
+
+# Test for various functions/libraries that the test and example programs use:
+# sched_yield function
+# pthreads, socket and math libraries
+AC_CHECK_FUNC(sched_yield,,
+ AC_SEARCH_LIBS(sched_yield, rt, LOAD_LIBS="$LOAD_LIBS -lrt"))
+
+# XXX
+# We can't check for pthreads in the same way we did the test for sched_yield
+# because the Solaris C library includes pthread interfaces which are not
+# thread-safe. For that reason we always add -lpthread if we find a pthread
+# library. Also we can't depend on any specific call existing (pthread_create,
+# for example), as it may be #defined in an include file -- OSF/1 (Tru64) has
+# this problem.
+AC_HAVE_LIBRARY(pthread, LOAD_LIBS="$LOAD_LIBS -lpthread")
+
+# XXX
+# We could be more exact about whether these libraries are needed, but we don't
+# bother -- if they exist, we load them.
+AC_HAVE_LIBRARY(m, LOAD_LIBS="$LOAD_LIBS -lm")
+AC_HAVE_LIBRARY(socket, LOAD_LIBS="$LOAD_LIBS -lsocket")
+AC_HAVE_LIBRARY(nsl, LOAD_LIBS="$LOAD_LIBS -lnsl")
+
+# Check for mutexes.
+# We do this here because it changes $LIBS.
+AM_DEFINE_MUTEXES
+
+# Checks for system functions for which we have replacements.
+#
+# XXX
+# The only portable getcwd call is getcwd(char *, size_t), where the
+# buffer is non-NULL -- Solaris can't handle a NULL buffer, and they
+# deleted getwd().
+AC_REPLACE_FUNCS(getcwd getopt memcmp memcpy memmove raise)
+AC_REPLACE_FUNCS(snprintf strcasecmp strdup strerror vsnprintf)
+
+# Check for system functions we optionally use.
+AC_CHECK_FUNCS(_fstati64 clock_gettime directio gettimeofday getuid)
+AC_CHECK_FUNCS(pstat_getdynamic sched_yield select strtoul sysconf yield)
+
+# Checks for system functions for which we don't have replacements.
+# We require qsort(3).
+AC_CHECK_FUNCS(qsort, , AC_MSG_ERROR([No qsort library function.]))
+
+# Pread/pwrite.
+# HP-UX has pread/pwrite, but it doesn't work with largefile support.
+case "$host_os" in
+hpux*)
+ AC_MSG_WARN([pread/pwrite interfaces ignored on $host_os.]);;
+*) AC_CHECK_FUNCS(pread pwrite)
+esac
+
+# Check for fcntl(2) to deny child process access to file descriptors.
+AC_CACHE_CHECK([for fcntl/F_SETFD], db_cv_fcntl_f_setfd, [
+AC_TRY_LINK([
+#include <sys/types.h>
+#include <fcntl.h>], [
+ fcntl(1, F_SETFD, 1);
+], [db_cv_fcntl_f_setfd=yes], [db_cv_fcntl_f_setfd=no])])
+if test "$db_cv_fcntl_f_setfd" = yes; then
+ AC_DEFINE(HAVE_FCNTL_F_SETFD)
+ AH_TEMPLATE(HAVE_FCNTL_F_SETFD,
+ [Define to 1 if fcntl/F_SETFD denies child access to file descriptors.])
+fi
+
+# A/UX has a broken getopt(3).
+case "$host_os" in
+aux*) ADDITIONAL_OBJS="getopt${o} $ADDITIONAL_OBJS";;
+esac
+
+# Linux has the O_DIRECT flag, but you can't actually use it.
+AC_CACHE_CHECK([for open/O_DIRECT], db_cv_open_o_direct, [
+echo "test for working open/O_DIRECT" > __o_direct_file
+AC_TRY_RUN([
+#include <sys/types.h>
+#include <fcntl.h>
+main() {
+int c, fd = open("__o_direct_file", O_RDONLY | O_DIRECT, 0);
+exit ((fd == -1) || (read(fd, &c, 1) != 1));
+}], [db_cv_open_o_direct=yes], [db_cv_open_o_direct=no],
+AC_TRY_LINK([
+#include <sys/types.h>
+#include <fcntl.h>], [
+ open("__o_direct_file", O_RDONLY | O_DIRECT, 0);
+], [db_cv_open_o_direct=yes], [db_cv_open_o_direct=no]))
+rm -f __o_direct_file])
+if test "$db_cv_open_o_direct" = yes; then
+ AC_DEFINE(HAVE_O_DIRECT)
+ AH_TEMPLATE(HAVE_O_DIRECT, [Define to 1 if you have the O_DIRECT flag.])
+fi
+
+# Check for largefile support.
+AC_SYS_LARGEFILE
+
+# Figure out how to create shared regions.
+#
+# First, we look for mmap.
+#
+# BSD/OS has mlock(2), but it doesn't work until the 4.1 release.
+#
+# Nextstep (version 3.3) apparently supports mmap(2) (the mmap symbol
+# is defined in the C library) but does not support munmap(2). Don't
+# try to use mmap if we can't find munmap.
+#
+# Ultrix has mmap(2), but it doesn't work.
+mmap_ok=no
+case "$host_os" in
+bsdi3*|bsdi4.0)
+ AC_MSG_WARN([mlock(2) interface ignored on BSD/OS 3.X and 4.0.])
+ mmap_ok=yes
+ AC_CHECK_FUNCS(mmap munmap, , mmap_ok=no);;
+ultrix*)
+ AC_MSG_WARN([mmap(2) interface ignored on Ultrix.]);;
+*)
+ mmap_ok=yes
+ AC_CHECK_FUNCS(mlock munlock)
+ AC_CHECK_FUNCS(mmap munmap, , mmap_ok=no);;
+esac
+
+# Second, we look for shmget.
+#
+# SunOS has the shmget(2) interfaces, but there appears to be a missing
+# #include <debug/debug.h> file, so we ignore them.
+shmget_ok=no
+case "$host_os" in
+sunos*)
+ AC_MSG_WARN([shmget(2) interface ignored on SunOS.]);;
+*)
+ shmget_ok=yes
+ AC_CHECK_FUNCS(shmget, , shmget_ok=no);;
+esac
+
+# We require either mmap/munmap(2) or shmget(2).
+if test "$mmap_ok" = no -a "$shmget_ok" = no; then
+ AC_MSG_WARN([Neither mmap/munmap(2) or shmget(2) library functions.])
+fi
+
+# If we're not doing version name substitution, DB_VERSION_UNIQUE_NAME
+# needs to be erased.
+if test "$db_cv_uniquename" = "no"; then
+ DB_VERSION_UNIQUE_NAME=""
+fi
+
+# This is necessary so that .o files in LIBOBJS are also built via
+# the ANSI2KNR-filtering rules.
+LIB@&t@OBJS=`echo "$LIB@&t@OBJS" |
+ sed 's,\.[[^.]]* ,$U&,g;s,\.[[^.]]*$,$U&,'`
+LTLIBOBJS=`echo "$LIB@&t@OBJS" |
+ sed 's,\.[[^.]]* ,.lo ,g;s,\.[[^.]]*$,.lo,'`
+AC_SUBST(LTLIBOBJS)
+
+# Initial output file list.
+CREATE_LIST="Makefile
+ db_cxx.h:$srcdir/../dbinc/db_cxx.in
+ db_int.h:$srcdir/../dbinc/db_int.in
+ include.tcl:$srcdir/../test/include.tcl"
+
+# Create the db.h file from a source file, a list of global function
+# prototypes, and, if configured for unique names, a list of #defines
+# to do DB_VERSION_UNIQUE_NAME substitution.
+if test "$db_cv_uniquename" = "yes"; then
+ CREATE_LIST="$CREATE_LIST
+ db.h:$srcdir/../dbinc/db.in:$srcdir/../dbinc_auto/rpc_defs.in:$srcdir/../dbinc_auto/ext_def.in:$srcdir/../dbinc_auto/ext_prot.in"
+else
+ CREATE_LIST="$CREATE_LIST
+ db.h:$srcdir/../dbinc/db.in:$srcdir/../dbinc_auto/rpc_defs.in:$srcdir/../dbinc_auto/ext_prot.in"
+fi
+
+# If configured for unique names, create the db_int_uext.h file (which
+# does the DB_VERSION_UNIQUE_NAME substitution), which is included by
+# the db_int.h file.
+if test "$db_cv_uniquename" = "yes"; then
+ CREATE_LIST="$CREATE_LIST
+ db_int_def.h:$srcdir/../dbinc_auto/int_def.in"
+ db_int_def='#include "db_int_def.h"'
+fi
+
+# Create the db_185.h and db185_int.h files from source files, a list of
+# global function prototypes, and, if configured for unique names, a list
+# of #defines to do DB_VERSION_UNIQUE_NAME substitution.
+if test "$db_cv_compat185" = "yes"; then
+ if test "$db_cv_uniquename" = "yes"; then
+ CREATE_LIST="$CREATE_LIST
+ db_185.h:$srcdir/../dbinc/db_185.in:$srcdir/../dbinc_auto/ext_185_def.in:$srcdir/../dbinc_auto/ext_185_prot.in
+ db185_int.h:$srcdir/../db185/db185_int.in:$srcdir/../dbinc_auto/ext_185_def.in:$srcdir/../dbinc_auto/ext_185_prot.in"
+ else
+ CREATE_LIST="$CREATE_LIST
+ db_185.h:$srcdir/../dbinc/db_185.in:$srcdir/../dbinc_auto/ext_185_prot.in
+ db185_int.h:$srcdir/../db185/db185_int.in:$srcdir/../dbinc_auto/ext_185_prot.in"
+ fi
+fi
+
+if test "$db_cv_embedix" = "yes"; then
+ CREATE_LIST="$CREATE_LIST db.ecd:../dist/db.ecd.in"
+fi
+
+if test "$db_cv_rpm" = "yes"; then
+ CREATE_LIST="$CREATE_LIST db.spec:../dist/db.spec.in"
+fi
+
+AC_CONFIG_FILES($CREATE_LIST)
+AC_OUTPUT
diff --git a/libdb/dist/db.ecd.in b/libdb/dist/db.ecd.in
new file mode 100644
index 0000000..f10ff53
--- /dev/null
+++ b/libdb/dist/db.ecd.in
@@ -0,0 +1,64 @@
+# Embedix Componenet Description (ECD) file for BerkeleyDB.
+#
+# $Id$
+
+<GROUP System>
+<GROUP Library>
+<COMPONENT BerkeleyDB>
+ SRPM=db
+ <SPECPATCH></SPECPATCH>
+ <HELP>
+ Berkeley DB is Sleepycat Software's programmatic database toolkit.
+ </HELP>
+
+ TYPE=bool
+ DEFAULT_VALUE=1
+ PROMPT=Include BerkeleyDB library?
+ <KEEPLIST>
+ /usr/lib/libdb-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.so
+ /usr/include/db.h
+ /usr/lib/libdb.so
+ </KEEPLIST>
+ <PROVIDES>
+ libdb-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.so
+ </PROVIDES>
+ <REQUIRES>
+ ld-linux.so.2
+ libc.so.6
+ </REQUIRES>
+ STATIC_SIZE=0
+ STARTUP_TIME=0
+
+ @EMBEDIX_ECD_CXX@
+
+ <OPTION db-extra>
+ TYPE=bool
+ DEFAULT_VALUE=1
+ PROMPT=Include BerkeleyDB Utilities?
+ <KEEPLIST>
+ /usr/bin/db_archive
+ /usr/bin/db_checkpoint
+ /usr/bin/db_deadlock
+ /usr/bin/db_dump
+ /usr/bin/db_load
+ /usr/bin/db_printlog
+ /usr/bin/db_recover
+ /usr/bin/db_stat
+ /usr/bin/db_upgrade
+ /usr/bin/db_verify
+ @EMBEDIX_ECD_RPC@
+ </KEEPLIST>
+ <REQUIRES>
+ libdb-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.so
+ ld-linux.so.2
+ libc.so.6
+ libdl.so.2
+ libm.so.6
+ </REQUIRES>
+ STATIC_SIZE=0
+ STARTUP_TIME=0
+ </OPTION>
+
+</COMPONENT>
+</GROUP>
+</GROUP>
diff --git a/libdb/dist/db.spec.in b/libdb/dist/db.spec.in
new file mode 100644
index 0000000..ef253bc
--- /dev/null
+++ b/libdb/dist/db.spec.in
@@ -0,0 +1,52 @@
+# Berkeley DB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+
+Summary: Sleepycat Berkeley DB database library
+Name: db
+Version: @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+Release: 1
+Copyright: Freely redistributable, see LICENSE for details.
+Source: http://www.sleepycat.com/update/@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/db-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@.tar.gz
+URL: http://www.sleepycat.com
+Group: System Environment/Libraries
+BuildRoot: @CONFIGURATION_PATH@/RPM_INSTALL
+
+%description
+Berkeley DB is a programmatic toolkit that provides fast, reliable,
+mission-critical, and scalable built-in database support for software
+ranging from embedded applications running on hand-held appliances to
+enterprise-scale servers.
+
+The Berkeley DB access methods include B+tree, Extended Linear Hashing,
+Fixed and Variable-length records, and Persistent Queues. Berkeley DB
+provides full transactional support, database recovery, online backups,
+and separate access to locking, logging and shared memory caching
+subsystems.
+
+Berkeley DB supports C, C++, Java, Tcl, Perl, and Python APIs. The
+software is available for Linux, a wide variety of UNIX platforms,
+Windows 95/98, Windows/NT, Windows 2000, VxWorks and QNX.
+
+%prep
+%setup
+
+%build
+cd build_unix
+CFLAGS="$RPM_OPT_FLAGS" ../dist/configure @CONFIGURATION_ARGS@
+make library_build
+
+%install
+cd build_unix
+make prefix=@CONFIGURATION_PATH@/RPM_INSTALL@EMBEDIX_ROOT@ install
+
+@RPM_POST_INSTALL@
+
+@RPM_POST_UNINSTALL@
+
+%files
+%defattr(-,root,root)
+%dir @EMBEDIX_ROOT@/bin
+%dir @EMBEDIX_ROOT@/docs
+%dir @EMBEDIX_ROOT@/include
+%dir @EMBEDIX_ROOT@/lib
+
+%changelog
diff --git a/libdb/dist/gen_inc.awk b/libdb/dist/gen_inc.awk
new file mode 100644
index 0000000..4d24562
--- /dev/null
+++ b/libdb/dist/gen_inc.awk
@@ -0,0 +1,73 @@
+# This awk script parses C input files looking for lines marked "PUBLIC:"
+# and "EXTERN:". (PUBLIC lines are DB internal function prototypes and
+# #defines, EXTERN are DB external function prototypes and #defines.)
+#
+# PUBLIC lines are put into two versions of per-directory include files:
+# one file that contains the prototypes, and one file that contains a
+# #define for the name to be processed during configuration when creating
+# unique names for every global symbol in the DB library.
+#
+# The EXTERN lines are put into two files: one of which contains prototypes
+# which are always appended to the db.h file, and one of which contains a
+# #define list for use when creating unique symbol names.
+#
+# Four arguments:
+# e_dfile list of EXTERN #defines
+# e_pfile include file that contains EXTERN prototypes
+# i_dfile list of internal (PUBLIC) #defines
+# i_pfile include file that contains internal (PUBLIC) prototypes
+/PUBLIC:/ {
+ sub("^.*PUBLIC:[ ][ ]*", "")
+ if ($0 ~ "^#if|^#ifdef|^#ifndef|^#else|^#endif") {
+ print $0 >> i_pfile
+ print $0 >> i_dfile
+ next
+ }
+ pline = sprintf("%s %s", pline, $0)
+ if (pline ~ "));") {
+ sub("^[ ]*", "", pline)
+ print pline >> i_pfile
+ if (pline !~ db_version_unique_name) {
+ def = gensub("[ ][ ]*__P.*", "", 1, pline)
+ sub("^.*[ ][*]*", "", def)
+ printf("#define %s %s@DB_VERSION_UNIQUE_NAME@\n",
+ def, def) >> i_dfile
+ }
+ pline = ""
+ }
+}
+
+# When we switched to methods in 4.0, we guessed txn_{abort,begin,commit}
+# were the interfaces applications would likely use and not be willing to
+# change, due to the sheer volume of the calls. Provide wrappers -- we
+# could do txn_abort and txn_commit using macros, but not txn_begin, as
+# the name of the field is txn_begin, we didn't want to modify it.
+#
+# The issue with txn_begin hits us in another way. If configured with the
+# --with-uniquename option, we use #defines to re-define DB's interfaces
+# to unique names. We can't do that for these functions because txn_begin
+# is also a field name in the DB_ENV structure, and the #defines we use go
+# at the end of the db.h file -- we get control too late to #define a field
+# name. So, modify the script that generates the unique names #defines to
+# not generate them for these three functions, and don't include the three
+# functions in libraries built with that configuration option.
+/EXTERN:/ {
+ sub("^.*EXTERN:[ ][ ]*", "")
+ if ($0 ~ "^#if|^#ifdef|^#ifndef|^#else|^#endif") {
+ print $0 >> e_pfile
+ print $0 >> e_dfile
+ next
+ }
+ eline = sprintf("%s %s", eline, $0)
+ if (eline ~ "));") {
+ sub("^[ ]*", "", eline)
+ print eline >> e_pfile
+ if (eline !~ db_version_unique_name && eline !~ "^int txn_") {
+ def = gensub("[ ][ ]*__P.*", "", 1, eline)
+ sub("^.*[ ][*]*", "", def)
+ printf("#define %s %s@DB_VERSION_UNIQUE_NAME@\n",
+ def, def) >> e_dfile
+ }
+ eline = ""
+ }
+}
diff --git a/libdb/dist/gen_rec.awk b/libdb/dist/gen_rec.awk
new file mode 100644
index 0000000..a004191
--- /dev/null
+++ b/libdb/dist/gen_rec.awk
@@ -0,0 +1,844 @@
+#!/bin/sh -
+#
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+
+# This awk script generates all the log, print, and read routines for the DB
+# logging. It also generates a template for the recovery functions (these
+# functions must still be edited, but are highly stylized and the initial
+# template gets you a fair way along the path).
+#
+# For a given file prefix.src, we generate a file prefix_auto.c, and a file
+# prefix_auto.h that contains:
+#
+# external declarations for the file's functions
+# defines for the physical record types
+# (logical types are defined in each subsystem manually)
+# structures to contain the data unmarshalled from the log.
+#
+# This awk script requires that four variables be set when it is called:
+#
+# source_file -- the C source file being created
+# header_file -- the C #include file being created
+# template_file -- the template file being created
+#
+# And stdin must be the input file that defines the recovery setup.
+#
+# Within each file prefix.src, we use a number of public keywords (documented
+# in the reference guide) as well as the following ones which are private to
+# DB:
+# DBPRIVATE Indicates that a file will be built as part of DB,
+# rather than compiled independently, and so can use
+# DB-private interfaces (such as DB_NOCOPY).
+# DB A DB handle. Logs the dbreg fileid for that handle,
+# and makes the *_log interface take a DB * instead of a
+# DB_ENV *.
+# PGDBT Just like DBT, only we know it stores a page or page
+# header, so we can byte-swap it (once we write the
+# byte-swapping code, which doesn't exist yet).
+# WRLOCK
+# WRLOCKNZ An ARG that stores a db_pgno_t, which the getpgnos
+# function should acquire a lock on. WRLOCK implies
+# that we should always get the lock; WRLOCKNZ implies
+# that we should do so if and only if the pgno is non-zero
+# (unfortunately, 0 is both PGNO_INVALID and the main
+# metadata page number).
+
+BEGIN {
+ if (source_file == "" ||
+ header_file == "" || template_file == "") {
+ print "Usage: gen_rec.awk requires three variables to be set:"
+ print "\tsource_file\t-- the C source file being created"
+ print "\theader_file\t-- the C #include file being created"
+ print "\ttemplate_file\t-- the template file being created"
+ exit
+ }
+ FS="[\t ][\t ]*"
+ CFILE=source_file
+ HFILE=header_file
+ TFILE=template_file
+ dbprivate = 0
+}
+/^[ ]*DBPRIVATE/ {
+ dbprivate = 1
+}
+/^[ ]*PREFIX/ {
+ prefix = $2
+ num_funcs = 0;
+
+ # Start .c file.
+ printf("/* Do not edit: automatically built by gen_rec.awk. */\n") \
+ > CFILE
+
+ # Start .h file, make the entire file conditional.
+ printf("/* Do not edit: automatically built by gen_rec.awk. */\n\n") \
+ > HFILE
+ printf("#ifndef\t%s_AUTO_H\n#define\t%s_AUTO_H\n", prefix, prefix) \
+ >> HFILE;
+
+ # Write recovery template file headers
+ # This assumes we're doing DB recovery.
+ printf("#include \"db_config.h\"\n\n") > TFILE
+ printf("#ifndef NO_SYSTEM_INCLUDES\n") >> TFILE
+ printf("#include <sys/types.h>\n\n") >> TFILE
+ printf("#include <string.h>\n") >> TFILE
+ printf("#endif\n\n") >> TFILE
+ printf("#include \"db_int.h\"\n") >> TFILE
+ printf("#include \"dbinc/db_page.h\"\n") >> TFILE
+ printf("#include \"dbinc/%s.h\"\n", prefix) >> TFILE
+ printf("#include \"dbinc/log.h\"\n\n") >> TFILE
+}
+/^[ ]*INCLUDE/ {
+ if ($3 == "")
+ printf("%s\n", $2) >> CFILE
+ else
+ printf("%s %s\n", $2, $3) >> CFILE
+}
+/^[ ]*(BEGIN|IGNORED)/ {
+ if (in_begin) {
+ print "Invalid format: missing END statement"
+ exit
+ }
+ in_begin = 1;
+ is_dbt = 0;
+ has_dbp = 0;
+ is_uint = 0;
+ need_log_function = ($1 == "BEGIN");
+ nvars = 0;
+
+ # number of locks that the getpgnos functions will return
+ nlocks = 0;
+
+ thisfunc = $2;
+ funcname = sprintf("%s_%s", prefix, $2);
+
+ rectype = $3;
+
+ funcs[num_funcs] = funcname;
+ ++num_funcs;
+}
+/^[ ]*(DB|ARG|DBT|PGDBT|POINTER|WRLOCK|WRLOCKNZ)/ {
+ vars[nvars] = $2;
+ types[nvars] = $3;
+ atypes[nvars] = $1;
+ modes[nvars] = $1;
+ formats[nvars] = $NF;
+ for (i = 4; i < NF; i++)
+ types[nvars] = sprintf("%s %s", types[nvars], $i);
+
+ if ($1 == "DB") {
+ has_dbp = 1;
+ }
+
+ if ($1 == "DB" || $1 == "ARG" || $1 == "WRLOCK" || $1 == "WRLOCKNZ") {
+ sizes[nvars] = sprintf("sizeof(u_int32_t)");
+ is_uint = 1;
+ } else if ($1 == "POINTER")
+ sizes[nvars] = sprintf("sizeof(*%s)", $2);
+ else { # DBT, PGDBT
+ sizes[nvars] = \
+ sprintf("sizeof(u_int32_t) + (%s == NULL ? 0 : %s->size)", \
+ $2, $2);
+ is_dbt = 1;
+ }
+ nvars++;
+}
+/^[ ]*(WRLOCK|WRLOCKNZ)/ {
+ nlocks++;
+
+ if ($1 == "WRLOCK") {
+ lock_if_zero[nlocks] = 1;
+ } else {
+ lock_if_zero[nlocks] = 0;
+ }
+
+ lock_pgnos[nlocks] = $2;
+}
+/^[ ]*END/ {
+ if (!in_begin) {
+ print "Invalid format: missing BEGIN statement"
+ exit;
+ }
+
+ # Declare the record type.
+ printf("#define\tDB_%s\t%d\n", funcname, rectype) >> HFILE
+
+ # Structure declaration.
+ printf("typedef struct _%s_args {\n", funcname) >> HFILE
+
+ # Here are the required fields for every structure
+ printf("\tu_int32_t type;\n\tDB_TXN *txnid;\n") >> HFILE
+ printf("\tDB_LSN prev_lsn;\n") >>HFILE
+
+ # Here are the specified fields.
+ for (i = 0; i < nvars; i++) {
+ t = types[i];
+ if (modes[i] == "POINTER") {
+ ndx = index(t, "*");
+ t = substr(types[i], 0, ndx - 2);
+ }
+ printf("\t%s\t%s;\n", t, vars[i]) >> HFILE
+ }
+ printf("} %s_args;\n\n", funcname) >> HFILE
+
+ # Output the log, print, read, and getpgnos functions.
+ if (need_log_function) {
+ log_function();
+
+ # The getpgnos function calls DB-private (__rep_*) functions,
+ # so we only generate it for our own logging functions,
+ # not application-specific ones.
+ if (dbprivate) {
+ getpgnos_function();
+ }
+ }
+ print_function();
+ read_function();
+
+ # Recovery template
+ cmd = sprintf(\
+ "sed -e s/PREF/%s/ -e s/FUNC/%s/ < template/rec_ctemp >> %s",
+ prefix, thisfunc, TFILE)
+ system(cmd);
+
+ # Done writing stuff, reset and continue.
+ in_begin = 0;
+}
+
+END {
+ # End the conditional for the HFILE
+ printf("#endif\n") >> HFILE;
+
+ # Print initialization routine; function prototype
+ p[1] = sprintf("int %s_init_print %s%s", prefix,
+ "__P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, ",
+ "db_recops, void *), size_t *));");
+ p[2] = "";
+ proto_format(p);
+
+ # Create the routine to call __db_add_recovery(print_fn, id)
+ printf("int\n%s_init_print(dbenv, dtabp, dtabsizep)\n", \
+ prefix) >> CFILE;
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;;
+ printf("\tint (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *,") >> CFILE;
+ printf(" db_recops, void *));\n") >> CFILE;
+ printf("\tsize_t *dtabsizep;\n{\n") >> CFILE;
+ # If application-specific, the user will need a prototype for
+ # __db_add_recovery, since they won't have DB's.
+ if (!dbprivate) {
+ printf("\tint __db_add_recovery __P((DB_ENV *,\n") >> CFILE;
+ printf(\
+"\t int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *),\n") >> CFILE;
+ printf("\t size_t *,\n") >> CFILE;
+ printf(\
+"\t int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), u_int32_t));\n") \
+ >> CFILE;
+ }
+
+ printf("\tint ret;\n\n") >> CFILE;
+ for (i = 0; i < num_funcs; i++) {
+ printf("\tif ((ret = __db_add_recovery(dbenv, ") >> CFILE;
+ printf("dtabp, dtabsizep,\n") >> CFILE;
+ printf("\t %s_print, DB_%s)) != 0)\n", \
+ funcs[i], funcs[i]) >> CFILE;
+ printf("\t\treturn (ret);\n") >> CFILE;
+ }
+ printf("\treturn (0);\n}\n\n") >> CFILE;
+
+ # We only want to generate *_init_{getpgnos,recover} functions
+ # if this is a DB-private, rather than application-specific,
+ # set of recovery functions. Application-specific recovery functions
+ # should be dispatched using the DB_ENV->set_app_dispatch callback
+ # rather than a DB dispatch table ("dtab").
+ if (!dbprivate)
+ exit
+
+ # Page number initialization routine; function prototype
+ p[1] = sprintf("int %s_init_getpgnos %s%s", prefix,
+ "__P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, ",
+ "db_recops, void *), size_t *));");
+ p[2] = "";
+ proto_format(p);
+
+ # Create the routine to call db_add_recovery(pgno_fn, id)
+ printf("int\n%s_init_getpgnos(dbenv, dtabp, dtabsizep)\n", \
+ prefix) >> CFILE;
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ printf("\tint (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *,") >> CFILE;
+ printf(" db_recops, void *));\n") >> CFILE;
+ printf("\tsize_t *dtabsizep;\n{\n\tint ret;\n\n") >> CFILE;
+ for (i = 0; i < num_funcs; i++) {
+ printf("\tif ((ret = __db_add_recovery(dbenv, ") >> CFILE;
+ printf("dtabp, dtabsizep,\n") >> CFILE;
+ printf("\t %s_getpgnos, DB_%s)) != 0)\n", \
+ funcs[i], funcs[i]) >> CFILE;
+ printf("\t\treturn (ret);\n") >> CFILE;
+ }
+ printf("\treturn (0);\n}\n\n") >> CFILE;
+
+ # Recover initialization routine
+ p[1] = sprintf("int %s_init_recover %s%s", prefix,
+ "__P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, ",
+ "db_recops, void *), size_t *));");
+ p[2] = "";
+ proto_format(p);
+
+ # Create the routine to call db_add_recovery(func, id)
+ printf("int\n%s_init_recover(dbenv, dtabp, dtabsizep)\n", \
+ prefix) >> CFILE;
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ printf("\tint (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *,") >> CFILE;
+ printf(" db_recops, void *));\n") >> CFILE;
+ printf("\tsize_t *dtabsizep;\n{\n\tint ret;\n\n") >> CFILE;
+ for (i = 0; i < num_funcs; i++) {
+ printf("\tif ((ret = __db_add_recovery(dbenv, ") >> CFILE;
+ printf("dtabp, dtabsizep,\n") >> CFILE;
+ printf("\t %s_recover, DB_%s)) != 0)\n", \
+ funcs[i], funcs[i]) >> CFILE;
+ printf("\t\treturn (ret);\n") >> CFILE;
+ }
+ printf("\treturn (0);\n}\n") >> CFILE;
+}
+
+function log_function() {
+ # Write the log function; function prototype
+ pi = 1;
+ p[pi++] = sprintf("int %s_log", funcname);
+ p[pi++] = " ";
+ if (has_dbp == 1) {
+ p[pi++] = "__P((DB *, DB_TXN *, DB_LSN *, u_int32_t";
+ } else {
+ p[pi++] = "__P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t";
+ }
+ for (i = 0; i < nvars; i++) {
+ if (modes[i] == "DB")
+ continue;
+ p[pi++] = ", ";
+ p[pi++] = sprintf("%s%s%s",
+ (modes[i] == "DBT" || modes[i] == "PGDBT") ? "const " : "",
+ types[i],
+ (modes[i] == "DBT" || modes[i] == "PGDBT") ? " *" : "");
+ }
+ p[pi++] = "";
+ p[pi++] = "));";
+ p[pi++] = "";
+ proto_format(p);
+
+ # Function declaration
+ if (has_dbp == 1) {
+ printf("int\n%s_log(dbp, txnid, ret_lsnp, flags", \
+ funcname) >> CFILE;
+ } else {
+ printf("int\n%s_log(dbenv, txnid, ret_lsnp, flags", \
+ funcname) >> CFILE;
+ }
+ for (i = 0; i < nvars; i++) {
+ if (modes[i] == "DB") {
+ # We pass in fileids on the dbp, so if this is one,
+ # skip it.
+ continue;
+ }
+ printf(",") >> CFILE;
+ if ((i % 6) == 0)
+ printf("\n ") >> CFILE;
+ else
+ printf(" ") >> CFILE;
+ printf("%s", vars[i]) >> CFILE;
+ }
+ printf(")\n") >> CFILE;
+
+ # Now print the parameters
+ if (has_dbp == 1) {
+ printf("\tDB *dbp;\n") >> CFILE;
+ } else {
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ }
+ printf("\tDB_TXN *txnid;\n\tDB_LSN *ret_lsnp;\n") >> CFILE;
+ printf("\tu_int32_t flags;\n") >> CFILE;
+ for (i = 0; i < nvars; i++) {
+ # We just skip for modes == DB.
+ if (modes[i] == "DBT" || modes[i] == "PGDBT")
+ printf("\tconst %s *%s;\n", types[i], vars[i]) >> CFILE;
+ else if (modes[i] != "DB")
+ printf("\t%s %s;\n", types[i], vars[i]) >> CFILE;
+ }
+
+ # Function body and local decls
+ printf("{\n") >> CFILE;
+ printf("\tDBT logrec;\n") >> CFILE;
+ if (has_dbp == 1)
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ printf("\tDB_LSN *lsnp, null_lsn;\n") >> CFILE;
+ if (is_dbt == 1)
+ printf("\tu_int32_t zero;\n") >> CFILE;
+ if (is_uint == 1)
+ printf("\tu_int32_t uinttmp;\n") >> CFILE;
+ printf("\tu_int32_t npad, rectype, txn_num;\n") >> CFILE;
+ printf("\tint ret;\n") >> CFILE;
+ printf("\tu_int8_t *bp;\n\n") >> CFILE;
+
+ # Initialization
+ if (has_dbp == 1)
+ printf("\tdbenv = dbp->dbenv;\n") >> CFILE;
+ printf("\trectype = DB_%s;\n", funcname) >> CFILE;
+ printf("\tnpad = 0;\n\n") >> CFILE;
+
+ printf("\tif (txnid == NULL) {\n") >> CFILE;
+ printf("\t\ttxn_num = 0;\n") >> CFILE;
+ printf("\t\tnull_lsn.file = 0;\n") >> CFILE;
+ printf("\t\tnull_lsn.offset = 0;\n") >> CFILE;
+ printf("\t\tlsnp = &null_lsn;\n") >> CFILE;
+ printf("\t} else {\n") >> CFILE;
+ if (funcname != "__db_debug" && dbprivate) {
+ printf(\
+ "\t\tif (TAILQ_FIRST(&txnid->kids) != NULL &&\n") >> CFILE;
+ printf("\t\t (ret = __txn_activekids(") >> CFILE;
+ printf("dbenv, rectype, txnid)) != 0)\n") >> CFILE;
+ printf("\t\t\treturn (ret);\n") >> CFILE;
+ }
+ printf("\t\ttxn_num = txnid->txnid;\n") >> CFILE;
+ printf("\t\tlsnp = &txnid->last_lsn;\n") >> CFILE;
+ printf("\t}\n\n") >> CFILE;
+
+ # Malloc
+ printf("\tlogrec.size = sizeof(rectype) + ") >> CFILE;
+ printf("sizeof(txn_num) + sizeof(DB_LSN)") >> CFILE;
+ for (i = 0; i < nvars; i++)
+ printf("\n\t + %s", sizes[i]) >> CFILE;
+ printf(";\n") >> CFILE
+ if (dbprivate) {
+ printf("\tif (CRYPTO_ON(dbenv)) {\n") >> CFILE;
+ printf("\t\tnpad =\n") >> CFILE
+ printf(\
+"\t\t ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);\n")\
+ >> CFILE;
+ printf("\t\tlogrec.size += npad;\n\t}\n\n") >> CFILE
+ }
+ write_malloc("logrec.data", "logrec.size", CFILE)
+ printf("\tif (npad > 0)\n") >> CFILE;
+ printf("\t\tmemset((u_int8_t *)logrec.data + logrec.size ") >> CFILE;
+ printf("- npad, 0, npad);\n\n") >> CFILE;
+
+ # Copy args into buffer
+ printf("\tbp = logrec.data;\n\n") >> CFILE;
+ printf("\tmemcpy(bp, &rectype, sizeof(rectype));\n") >> CFILE;
+ printf("\tbp += sizeof(rectype);\n\n") >> CFILE;
+ printf("\tmemcpy(bp, &txn_num, sizeof(txn_num));\n") >> CFILE;
+ printf("\tbp += sizeof(txn_num);\n\n") >> CFILE;
+ printf("\tmemcpy(bp, lsnp, sizeof(DB_LSN));\n") >> CFILE;
+ printf("\tbp += sizeof(DB_LSN);\n\n") >> CFILE;
+
+ for (i = 0; i < nvars; i ++) {
+ if (modes[i] == "ARG" || modes[i] == "WRLOCK" || \
+ modes[i] == "WRLOCKNZ") {
+ printf("\tuinttmp = (u_int32_t)%s;\n", \
+ vars[i]) >> CFILE;
+ printf("\tmemcpy(bp, &uinttmp, sizeof(uinttmp));\n") \
+ >> CFILE;
+ printf("\tbp += sizeof(uinttmp);\n\n") >> CFILE;
+ } else if (modes[i] == "DBT" || modes[i] == "PGDBT") {
+ printf("\tif (%s == NULL) {\n", vars[i]) >> CFILE;
+ printf("\t\tzero = 0;\n") >> CFILE;
+ printf("\t\tmemcpy(bp, &zero, sizeof(u_int32_t));\n") \
+ >> CFILE;
+ printf("\t\tbp += sizeof(u_int32_t);\n") >> CFILE;
+ printf("\t} else {\n") >> CFILE;
+ printf("\t\tmemcpy(bp, &%s->size, ", vars[i]) >> CFILE;
+ printf("sizeof(%s->size));\n", vars[i]) >> CFILE;
+ printf("\t\tbp += sizeof(%s->size);\n", vars[i]) \
+ >> CFILE;
+ printf("\t\tmemcpy(bp, %s->data, %s->size);\n", \
+ vars[i], vars[i]) >> CFILE;
+ printf("\t\tbp += %s->size;\n\t}\n\n", \
+ vars[i]) >> CFILE;
+ } else if (modes[i] == "DB") {
+ # We need to log a DB handle. To do this, we
+ # actually just log its fileid; from that, we'll
+ # be able to acquire an open handle at recovery time.
+ printf("\tDB_ASSERT(dbp->log_filename != NULL);\n") \
+ >> CFILE;
+ printf("\tif (dbp->log_filename->id == ") >> CFILE;
+ printf("DB_LOGFILEID_INVALID &&\n\t ") >> CFILE
+ printf("(ret = __dbreg_lazy_id(dbp)) != 0)\n") \
+ >> CFILE;
+ printf("\t\treturn (ret);\n\n") >> CFILE;
+
+ printf("\tuinttmp = ") >> CFILE;
+ printf("(u_int32_t)dbp->log_filename->id;\n") >> CFILE;
+ printf("\tmemcpy(bp, &uinttmp, sizeof(uinttmp));\n") \
+ >> CFILE;
+ printf("\tbp += sizeof(uinttmp);\n\n") >> CFILE;
+ } else { # POINTER
+ printf("\tif (%s != NULL)\n", vars[i]) >> CFILE;
+ printf("\t\tmemcpy(bp, %s, %s);\n", vars[i], \
+ sizes[i]) >> CFILE;
+ printf("\telse\n") >> CFILE;
+ printf("\t\tmemset(bp, 0, %s);\n", sizes[i]) >> CFILE;
+ printf("\tbp += %s;\n\n", sizes[i]) >> CFILE;
+ }
+ }
+
+ # Error checking. User code won't have DB_ASSERT available, but
+ # this is a pretty unlikely assertion anyway, so we just leave it out
+ # rather than requiring assert.h.
+ if (dbprivate) {
+ printf("\tDB_ASSERT((u_int32_t)") >> CFILE;
+ printf("(bp - (u_int8_t *)logrec.data) <= logrec.size);\n") \
+ >> CFILE;
+ }
+
+ # Issue log call
+ # We didn't call the crypto alignment function when we created this
+ # log record (because we don't have the right header files to find
+ # the function), so we have to copy the log record to make sure the
+ # alignment is correct.
+ printf(\
+ "\tret = dbenv->log_put(dbenv,\n\t ret_lsnp, (DBT *)&logrec, ") \
+ >> CFILE;
+ if (dbprivate) {
+ printf("flags | DB_NOCOPY);\n") >> CFILE;
+ } else {
+ printf("flags);\n") >> CFILE;
+ }
+
+ # Update the transactions last_lsn
+ printf("\tif (txnid != NULL && ret == 0)\n") >> CFILE;
+ printf("\t\ttxnid->last_lsn = *ret_lsnp;\n") >> CFILE;
+
+ # If out of disk space log writes may fail. If we are debugging
+ # that print out which records did not make it to disk.
+ printf("#ifdef LOG_DIAGNOSTIC\n") >> CFILE
+ printf("\tif (ret != 0)\n") >> CFILE;
+ printf("\t\t(void)%s_print(dbenv,\n", funcname) >> CFILE;
+ printf("\t\t (DBT *)&logrec, ret_lsnp, NULL, NULL);\n") >> CFILE
+ printf("#endif\n") >> CFILE
+
+ # Free and return
+ write_free("logrec.data", CFILE)
+ printf("\treturn (ret);\n}\n\n") >> CFILE;
+}
+
+function print_function() {
+ # Write the print function; function prototype
+ p[1] = sprintf("int %s_print", funcname);
+ p[2] = " ";
+ p[3] = "__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));";
+ p[4] = "";
+ proto_format(p);
+
+ # Function declaration
+ printf("int\n%s_print(dbenv, ", funcname) >> CFILE;
+ printf("dbtp, lsnp, notused2, notused3)\n") >> CFILE;
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ printf("\tDBT *dbtp;\n") >> CFILE;
+ printf("\tDB_LSN *lsnp;\n") >> CFILE;
+ printf("\tdb_recops notused2;\n\tvoid *notused3;\n{\n") >> CFILE;
+
+ # Locals
+ printf("\t%s_args *argp;\n", funcname) >> CFILE;
+ for (i = 0; i < nvars; i ++)
+ if (modes[i] == "DBT" || modes[i] == "PGDBT") {
+ printf("\tu_int32_t i;\n") >> CFILE
+ printf("\tint ch;\n") >> CFILE
+ break;
+ }
+
+ printf("\tint ret;\n\n") >> CFILE;
+
+ # Get rid of complaints about unused parameters.
+ printf("\tnotused2 = DB_TXN_ABORT;\n\tnotused3 = NULL;\n\n") >> CFILE;
+
+ # Call read routine to initialize structure
+ printf("\tif ((ret = %s_read(dbenv, dbtp->data, &argp)) != 0)\n", \
+ funcname) >> CFILE;
+ printf("\t\treturn (ret);\n") >> CFILE;
+
+ # Print values in every record
+ printf("\t(void)printf(\n\t \"[%%lu][%%lu]%s: ", funcname) >> CFILE;
+ printf("rec: %%lu txnid %%lx ") >> CFILE;
+ printf("prevlsn [%%lu][%%lu]\\n\",\n") >> CFILE;
+ printf("\t (u_long)lsnp->file,\n") >> CFILE;
+ printf("\t (u_long)lsnp->offset,\n") >> CFILE;
+ printf("\t (u_long)argp->type,\n") >> CFILE;
+ printf("\t (u_long)argp->txnid->txnid,\n") >> CFILE;
+ printf("\t (u_long)argp->prev_lsn.file,\n") >> CFILE;
+ printf("\t (u_long)argp->prev_lsn.offset);\n") >> CFILE;
+
+ # Now print fields of argp
+ for (i = 0; i < nvars; i ++) {
+ printf("\t(void)printf(\"\\t%s: ", vars[i]) >> CFILE;
+
+ if (modes[i] == "DBT" || modes[i] == "PGDBT") {
+ printf("\");\n") >> CFILE;
+ printf("\tfor (i = 0; i < ") >> CFILE;
+ printf("argp->%s.size; i++) {\n", vars[i]) >> CFILE;
+ printf("\t\tch = ((u_int8_t *)argp->%s.data)[i];\n", \
+ vars[i]) >> CFILE;
+ printf("\t\tprintf(isprint(ch) || ch == 0x0a") >> CFILE;
+ printf(" ? \"%%c\" : \"%%#x \", ch);\n") >> CFILE;
+ printf("\t}\n\t(void)printf(\"\\n\");\n") >> CFILE;
+ } else if (types[i] == "DB_LSN *") {
+ printf("[%%%s][%%%s]\\n\",\n", \
+ formats[i], formats[i]) >> CFILE;
+ printf("\t (u_long)argp->%s.file,", \
+ vars[i]) >> CFILE;
+ printf(" (u_long)argp->%s.offset);\n", \
+ vars[i]) >> CFILE;
+ } else {
+ if (formats[i] == "lx")
+ printf("0x") >> CFILE;
+ printf("%%%s\\n\", ", formats[i]) >> CFILE;
+ if (formats[i] == "lx" || formats[i] == "lu")
+ printf("(u_long)") >> CFILE;
+ if (formats[i] == "ld")
+ printf("(long)") >> CFILE;
+ printf("argp->%s);\n", vars[i]) >> CFILE;
+ }
+ }
+ printf("\t(void)printf(\"\\n\");\n") >> CFILE;
+ write_free("argp", CFILE);
+ printf("\treturn (0);\n") >> CFILE;
+ printf("}\n\n") >> CFILE;
+}
+
+function read_function() {
+ # Write the read function; function prototype
+ p[1] = sprintf("int %s_read __P((DB_ENV *, void *,", funcname);
+ p[2] = " ";
+ p[3] = sprintf("%s_args **));", funcname);
+ p[4] = "";
+ proto_format(p);
+
+ # Function declaration
+ printf("int\n%s_read(dbenv, recbuf, argpp)\n", funcname) >> CFILE;
+
+ # Now print the parameters
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ printf("\tvoid *recbuf;\n") >> CFILE;
+ printf("\t%s_args **argpp;\n", funcname) >> CFILE;
+
+ # Function body and local decls
+ printf("{\n\t%s_args *argp;\n", funcname) >> CFILE;
+ if (is_uint == 1)
+ printf("\tu_int32_t uinttmp;\n") >> CFILE;
+ printf("\tu_int8_t *bp;\n") >> CFILE;
+
+
+ if (dbprivate) {
+ # We only use dbenv and ret in the private malloc case.
+ printf("\tint ret;\n\n") >> CFILE;
+ } else {
+ printf("\t/* Keep the compiler quiet. */\n") >> CFILE;
+ printf("\n\tdbenv = NULL;\n") >> CFILE;
+ }
+
+ malloc_size = sprintf("sizeof(%s_args) + sizeof(DB_TXN)", funcname)
+ write_malloc("argp", malloc_size, CFILE)
+
+ # Set up the pointers to the txnid.
+ printf("\targp->txnid = (DB_TXN *)&argp[1];\n\n") >> CFILE;
+
+ # First get the record type, prev_lsn, and txnid fields.
+
+ printf("\tbp = recbuf;\n") >> CFILE;
+ printf("\tmemcpy(&argp->type, bp, sizeof(argp->type));\n") >> CFILE;
+ printf("\tbp += sizeof(argp->type);\n\n") >> CFILE;
+ printf("\tmemcpy(&argp->txnid->txnid, bp, ") >> CFILE;
+ printf("sizeof(argp->txnid->txnid));\n") >> CFILE;
+ printf("\tbp += sizeof(argp->txnid->txnid);\n\n") >> CFILE;
+ printf("\tmemcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));\n") >> CFILE;
+ printf("\tbp += sizeof(DB_LSN);\n\n") >> CFILE;
+
+ # Now get rest of data.
+ for (i = 0; i < nvars; i ++) {
+ if (modes[i] == "DBT" || modes[i] == "PGDBT") {
+ printf("\tmemset(&argp->%s, 0, sizeof(argp->%s));\n", \
+ vars[i], vars[i]) >> CFILE;
+ printf("\tmemcpy(&argp->%s.size, ", vars[i]) >> CFILE;
+ printf("bp, sizeof(u_int32_t));\n") >> CFILE;
+ printf("\tbp += sizeof(u_int32_t);\n") >> CFILE;
+ printf("\targp->%s.data = bp;\n", vars[i]) >> CFILE;
+ printf("\tbp += argp->%s.size;\n", vars[i]) >> CFILE;
+ } else if (modes[i] == "ARG" || modes[i] == "WRLOCK" || \
+ modes[i] == "WRLOCKNZ" || modes[i] == "DB") {
+ printf("\tmemcpy(&uinttmp, bp, sizeof(uinttmp));\n") \
+ >> CFILE;
+ printf("\targp->%s = (%s)uinttmp;\n", vars[i], \
+ types[i]) >> CFILE;
+ printf("\tbp += sizeof(uinttmp);\n") >> CFILE;
+ } else { # POINTER
+ printf("\tmemcpy(&argp->%s, bp, ", vars[i]) >> CFILE;
+ printf(" sizeof(argp->%s));\n", vars[i]) >> CFILE;
+ printf("\tbp += sizeof(argp->%s);\n", vars[i]) >> CFILE;
+ }
+ printf("\n") >> CFILE;
+ }
+
+ # Free and return
+ printf("\t*argpp = argp;\n") >> CFILE;
+ printf("\treturn (0);\n}\n\n") >> CFILE;
+}
+
+function getpgnos_function() {
+ # Write the getpgnos function; function prototype
+ p[1] = sprintf("int %s_getpgnos", funcname);
+ p[2] = " ";
+ p[3] = "__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));";
+ p[4] = "";
+ proto_format(p);
+
+ # Function declaration
+ printf("int\n%s_getpgnos(dbenv, ", funcname) >> CFILE;
+ printf("rec, lsnp, notused1, summary)\n") >> CFILE;
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ printf("\tDBT *rec;\n") >> CFILE;
+ printf("\tDB_LSN *lsnp;\n") >> CFILE;
+ printf("\tdb_recops notused1;\n") >> CFILE;
+ printf("\tvoid *summary;\n{\n") >> CFILE;
+
+ # If there are no locks, return this fact.
+ if (nlocks == 0) {
+ printf("\tTXN_RECS *t;\n") >> CFILE;
+ printf("\tint ret;\n") >> CFILE;
+ printf("\tCOMPQUIET(rec, NULL);\n") >> CFILE;
+ printf("\tCOMPQUIET(notused1, DB_TXN_ABORT);\n") >> CFILE;
+
+ printf("\n\tt = (TXN_RECS *)summary;\n") >> CFILE;
+ printf("\n\tif ((ret = __rep_check_alloc(dbenv, ") >> CFILE;
+ printf("t, 1)) != 0)\n") >> CFILE;
+ printf("\t\treturn (ret);\n") >> CFILE;
+
+ printf("\n\tt->array[t->npages].flags = LSN_PAGE_NOLOCK;\n") \
+ >> CFILE;
+ printf("\tt->array[t->npages].lsn = *lsnp;\n") >> CFILE;
+ printf("\tt->array[t->npages].fid = DB_LOGFILEID_INVALID;\n") \
+ >> CFILE;
+ printf("\tmemset(&t->array[t->npages].pgdesc, 0,\n") >> CFILE;
+ printf("\t sizeof(t->array[t->npages].pgdesc));\n") >> CFILE;
+ printf("\n\tt->npages++;\n") >> CFILE;
+
+ printf("\n") >> CFILE;
+ printf("\treturn (0);\n") >> CFILE;
+ printf("}\n\n") >> CFILE;
+ return;
+ }
+
+ # Locals
+ printf("\tDB *dbp;\n") >> CFILE;
+ printf("\tTXN_RECS *t;\n") >> CFILE;
+ printf("\t%s_args *argp;\n", funcname) >> CFILE;
+ printf("\tu_int32_t ret;\n\n") >> CFILE;
+
+ # Shut up compiler.
+ printf("\tCOMPQUIET(notused1, DB_TXN_ABORT);\n\n") >> CFILE;
+
+ printf("\targp = NULL;\n") >> CFILE;
+ printf("\tt = (TXN_RECS *)summary;\n\n") >> CFILE;
+
+ printf("\tif ((ret = %s_read(dbenv, rec->data, &argp)) != 0)\n", \
+ funcname) >> CFILE;
+ printf("\t\treturn (ret);\n") >> CFILE;
+
+ # Get file ID.
+ printf("\n\tif ((ret = __dbreg_id_to_db(dbenv,\n\t ") >> CFILE;
+ printf("argp->txnid, &dbp, argp->fileid, 0)) != 0)\n") >> CFILE;
+ printf("\t\tgoto err;\n") >> CFILE;
+
+ printf("\n\tif ((ret = __rep_check_alloc(dbenv, t, %d)) != 0)\n", \
+ nlocks) >> CFILE;
+ printf("\t\tgoto err;\n\n") >> CFILE;
+
+ for (i = 1; i <= nlocks; i++) {
+ if (lock_if_zero[i]) {
+ indent = "\t";
+ } else {
+ indent = "\t\t";
+ printf("\tif (argp->%s != PGNO_INVALID) {\n", \
+ lock_pgnos[i]) >> CFILE;
+ }
+ printf("%st->array[t->npages].flags = 0;\n", indent) >> CFILE;
+ printf("%st->array[t->npages].fid = argp->fileid;\n", indent) \
+ >> CFILE;
+ printf("%st->array[t->npages].lsn = *lsnp;\n", indent) >> CFILE;
+ printf("%st->array[t->npages].pgdesc.pgno = argp->%s;\n", \
+ indent, lock_pgnos[i]) >> CFILE;
+ printf("%st->array[t->npages].pgdesc.type = DB_PAGE_LOCK;\n", \
+ indent) >> CFILE;
+ printf("%smemcpy(t->array[t->npages].pgdesc.fileid, ", indent) \
+ >> CFILE;
+ printf("dbp->fileid,\n%s DB_FILE_ID_LEN);\n", \
+ indent, indent) >> CFILE;
+ printf("%st->npages++;\n", indent) >> CFILE;
+ if (!lock_if_zero[i]) {
+ printf("\t}\n") >> CFILE;
+ }
+ }
+
+ printf("\nerr:\tif (argp != NULL)\n") >> CFILE;
+ write_free("argp", CFILE);
+
+ printf("\treturn (ret);\n") >> CFILE;
+
+ printf("}\n\n") >> CFILE;
+}
+
+# proto_format --
+# Pretty-print a function prototype.
+function proto_format(p)
+{
+ printf("/*\n") >> CFILE;
+
+ s = "";
+ for (i = 1; i in p; ++i)
+ s = s p[i];
+
+ t = " * PUBLIC: "
+ if (length(s) + length(t) < 80)
+ printf("%s%s", t, s) >> CFILE;
+ else {
+ split(s, p, "__P");
+ len = length(t) + length(p[1]);
+ printf("%s%s", t, p[1]) >> CFILE
+
+ n = split(p[2], comma, ",");
+ comma[1] = "__P" comma[1];
+ for (i = 1; i <= n; i++) {
+ if (len + length(comma[i]) > 70) {
+ printf("\n * PUBLIC: ") >> CFILE;
+ len = 0;
+ }
+ printf("%s%s", comma[i], i == n ? "" : ",") >> CFILE;
+ len += length(comma[i]) + 2;
+ }
+ }
+ printf("\n */\n") >> CFILE;
+ delete p;
+}
+
+function write_malloc(ptr, size, file)
+{
+ if (dbprivate) {
+ printf("\tif ((ret = ") >> file;
+ printf(\
+ "__os_malloc(dbenv,\n\t " size ", &" ptr ")) != 0)\n") \
+ >> file
+ printf("\t\treturn (ret);\n\n") >> file;
+ } else {
+ printf("\tif ((" ptr " = malloc(" size ")) == NULL)\n") >> file
+ printf("\t\treturn (ENOMEM);\n\n") >> file
+ }
+}
+
+function write_free(ptr, file)
+{
+ if (dbprivate) {
+ printf("\t__os_free(dbenv, " ptr ");\n") >> file
+ } else {
+ printf("\tfree(" ptr ");\n") >> file
+ }
+}
diff --git a/libdb/dist/gen_rpc.awk b/libdb/dist/gen_rpc.awk
new file mode 100644
index 0000000..ccc330e
--- /dev/null
+++ b/libdb/dist/gen_rpc.awk
@@ -0,0 +1,1214 @@
+#
+# $Id$
+# Awk script for generating client/server RPC code.
+#
+# This awk script generates most of the RPC routines for DB client/server
+# use. It also generates a template for server and client procedures. These
+# functions must still be edited, but are highly stylized and the initial
+# template gets you a fair way along the path).
+#
+# This awk script requires that these variables be set when it is called:
+#
+# major -- Major version number
+# minor -- Minor version number
+# xidsize -- size of GIDs
+# client_file -- the C source file being created for client code
+# ctmpl_file -- the C template file being created for client code
+# sed_file -- the sed file created to alter server proc code
+# server_file -- the C source file being created for server code
+# stmpl_file -- the C template file being created for server code
+# xdr_file -- the XDR message file created
+#
+# And stdin must be the input file that defines the RPC setup.
+BEGIN {
+ if (major == "" || minor == "" || xidsize == "" ||
+ client_file == "" || ctmpl_file == "" ||
+ sed_file == "" || server_file == "" ||
+ stmpl_file == "" || xdr_file == "") {
+ print "Usage: gen_rpc.awk requires these variables be set:"
+ print "\tmajor\t-- Major version number"
+ print "\tminor\t-- Minor version number"
+ print "\txidsize\t-- GID size"
+ print "\tclient_file\t-- the client C source file being created"
+ print "\tctmpl_file\t-- the client template file being created"
+ print "\tsed_file\t-- the sed command file being created"
+ print "\tserver_file\t-- the server C source file being created"
+ print "\tstmpl_file\t-- the server template file being created"
+ print "\txdr_file\t-- the XDR message file being created"
+ error = 1; exit
+ }
+
+ FS="\t\t*"
+ CFILE=client_file
+ printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
+ > CFILE
+
+ TFILE = ctmpl_file
+ printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
+ > TFILE
+
+ SFILE = server_file
+ printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
+ > SFILE
+
+ # Server procedure template and a sed file to massage an existing
+ # template source file to change args.
+ # SEDFILE should be same name as PFILE but .c
+ #
+ PFILE = stmpl_file
+ SEDFILE = sed_file
+ printf("") > SEDFILE
+ printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
+ > PFILE
+
+ XFILE = xdr_file
+ printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
+ > XFILE
+ nendlist = 1;
+}
+END {
+ printf("#endif /* HAVE_RPC */\n") >> CFILE
+ printf("#endif /* HAVE_RPC */\n") >> TFILE
+ printf("program DB_RPC_SERVERPROG {\n") >> XFILE
+ printf("\tversion DB_RPC_SERVERVERS {\n") >> XFILE
+
+ for (i = 1; i < nendlist; ++i)
+ printf("\t\t%s;\n", endlist[i]) >> XFILE
+
+ printf("\t} = %d%03d;\n", major, minor) >> XFILE
+ printf("} = 351457;\n") >> XFILE
+}
+
+/^[ ]*BEGIN/ {
+ name = $2;
+ nofunc_code = 0;
+ funcvars = 0;
+ ret_code = 0;
+ if ($3 == "NOFUNC")
+ nofunc_code = 1;
+ if ($3 == "RETCODE")
+ ret_code = 1;
+
+ nvars = 0;
+ rvars = 0;
+ newvars = 0;
+ db_handle = 0;
+ env_handle = 0;
+ dbc_handle = 0;
+ txn_handle = 0;
+ mp_handle = 0;
+ dbt_handle = 0;
+ xdr_free = 0;
+}
+/^[ ]*ARG/ {
+ rpc_type[nvars] = $2;
+ c_type[nvars] = $3;
+ pr_type[nvars] = $3;
+ args[nvars] = $4;
+ func_arg[nvars] = 0;
+ if (rpc_type[nvars] == "LIST") {
+ list_type[nvars] = $5;
+ } else
+ list_type[nvars] = 0;
+
+ if (c_type[nvars] == "DBT *")
+ dbt_handle = 1;
+
+ if (c_type[nvars] == "DB_ENV *") {
+ ctp_type[nvars] = "CT_ENV";
+ env_handle = 1;
+ env_idx = nvars;
+ }
+
+ if (c_type[nvars] == "DB *") {
+ ctp_type[nvars] = "CT_DB";
+ if (db_handle != 1) {
+ db_handle = 1;
+ db_idx = nvars;
+ }
+ }
+
+ if (c_type[nvars] == "DBC *") {
+ ctp_type[nvars] = "CT_CURSOR";
+ dbc_handle = 1;
+ dbc_idx = nvars;
+ }
+
+ if (c_type[nvars] == "DB_TXN *") {
+ ctp_type[nvars] = "CT_TXN";
+ txn_handle = 1;
+ txn_idx = nvars;
+ }
+
+ if (c_type[nvars] == "DB_MPOOLFILE *") {
+ mp_handle = 1;
+ mp_idx = nvars;
+ }
+
+ ++nvars;
+}
+/^[ ]*FUNCPROT/ {
+ pr_type[nvars] = $2;
+}
+/^[ ]*FUNCARG/ {
+ rpc_type[nvars] = "IGNORE";
+ c_type[nvars] = $2;
+ args[nvars] = sprintf("func%d", funcvars);
+ func_arg[nvars] = 1;
+ ++funcvars;
+ ++nvars;
+}
+/^[ ]*RET/ {
+ ret_type[rvars] = $2;
+ retc_type[rvars] = $3;
+ retargs[rvars] = $4;
+ if (ret_type[rvars] == "LIST" || ret_type[rvars] == "DBT") {
+ xdr_free = 1;
+ }
+ if (ret_type[rvars] == "LIST") {
+ retlist_type[rvars] = $5;
+ } else
+ retlist_type[rvars] = 0;
+
+ ++rvars;
+}
+/^[ ]*END/ {
+ #
+ # =====================================================
+ # File headers, if necessary.
+ #
+ if (first == 0) {
+ printf("#include \"db_config.h\"\n") >> CFILE
+ printf("\n") >> CFILE
+ printf("#ifdef HAVE_RPC\n") >> CFILE
+ printf("#ifndef NO_SYSTEM_INCLUDES\n") >> CFILE
+ printf("#include <sys/types.h>\n\n") >> CFILE
+ printf("#include <rpc/rpc.h>\n") >> CFILE
+ printf("#include <rpc/xdr.h>\n") >> CFILE
+ printf("\n") >> CFILE
+ printf("#include <string.h>\n") >> CFILE
+ printf("#endif\n") >> CFILE
+ printf("\n") >> CFILE
+ printf("#include \"db_int.h\"\n") >> CFILE
+ printf("#include \"dbinc/txn.h\"\n") >> CFILE
+ printf("\n") >> CFILE
+ printf("#include \"dbinc_auto/db_server.h\"\n") >> CFILE
+ printf("#include \"dbinc_auto/rpc_client_ext.h\"\n") >> CFILE
+ printf("\n") >> CFILE
+
+ printf("#include \"db_config.h\"\n") >> TFILE
+ printf("\n") >> TFILE
+ printf("#ifdef HAVE_RPC\n") >> TFILE
+ printf("#ifndef NO_SYSTEM_INCLUDES\n") >> TFILE
+ printf("#include <sys/types.h>\n") >> TFILE
+ printf("#include <rpc/rpc.h>\n") >> TFILE
+ printf("\n") >> TFILE
+ printf("#include <string.h>\n") >> TFILE
+ printf("#endif\n") >> TFILE
+ printf("#include \"db_int.h\"\n") >> TFILE
+ printf("#include \"dbinc_auto/db_server.h\"\n") >> TFILE
+ printf("#include \"dbinc/txn.h\"\n") >> TFILE
+ printf("\n") >> TFILE
+
+ printf("#include \"db_config.h\"\n") >> SFILE
+ printf("\n") >> SFILE
+ printf("#ifndef NO_SYSTEM_INCLUDES\n") >> SFILE
+ printf("#include <sys/types.h>\n") >> SFILE
+ printf("\n") >> SFILE
+ printf("#include <rpc/rpc.h>\n") >> SFILE
+ printf("#include <rpc/xdr.h>\n") >> SFILE
+ printf("\n") >> SFILE
+ printf("#include <string.h>\n") >> SFILE
+ printf("#endif\n") >> SFILE
+ printf("\n") >> SFILE
+ printf("#include \"db_int.h\"\n") >> SFILE
+ printf("#include \"dbinc_auto/db_server.h\"\n") >> SFILE
+ printf("#include \"dbinc/db_server_int.h\"\n") >> SFILE
+ printf("#include \"dbinc_auto/rpc_server_ext.h\"\n") >> SFILE
+ printf("\n") >> SFILE
+
+ printf("#include \"db_config.h\"\n") >> PFILE
+ printf("\n") >> PFILE
+ printf("#ifndef NO_SYSTEM_INCLUDES\n") >> PFILE
+ printf("#include <sys/types.h>\n") >> PFILE
+ printf("\n") >> PFILE
+ printf("#include <rpc/rpc.h>\n") >> PFILE
+ printf("\n") >> PFILE
+ printf("#include <string.h>\n") >> PFILE
+ printf("#endif\n") >> PFILE
+ printf("\n") >> PFILE
+ printf("#include \"db_int.h\"\n") >> PFILE
+ printf("#include \"dbinc_auto/db_server.h\"\n") >> PFILE
+ printf("#include \"dbinc/db_server_int.h\"\n") >> PFILE
+ printf("#include \"dbinc_auto/rpc_server_ext.h\"\n") >> PFILE
+ printf("\n") >> PFILE
+
+ first = 1;
+ }
+ #
+ # =====================================================
+ # Generate Client Nofunc code first if necessary
+ # NOTE: This code must be first, because we don't want any
+ # other code other than this function, so before we write
+ # out to the XDR and server files, we just generate this
+ # and move on if this is all we are doing.
+ #
+ if (nofunc_code == 1) {
+ #
+ # First time through, put out the general no server and
+ # illegal functions.
+ #
+ if (first_nofunc == 0) {
+ printf("static int __dbcl_noserver ") >> CFILE
+ printf("__P((DB_ENV *));\n\n") >> CFILE
+ printf("static int\n") >> CFILE
+ printf("__dbcl_noserver(dbenv)\n") >> CFILE
+ printf("\tDB_ENV *dbenv;\n") >> CFILE
+ printf("{\n\t__db_err(dbenv,") >> CFILE
+ printf(" \"No server environment\");\n") >> CFILE
+ printf("\treturn (DB_NOSERVER);\n") >> CFILE
+ printf("}\n\n") >> CFILE
+
+ printf("static int __dbcl_rpc_illegal ") >> CFILE
+ printf("__P((DB_ENV *, char *));\n\n") >> CFILE
+ printf("static int\n") >> CFILE
+ printf("__dbcl_rpc_illegal(dbenv, name)\n") >> CFILE
+ printf("\tDB_ENV *dbenv;\n\tchar *name;\n") >> CFILE
+ printf("{\n\t__db_err(dbenv,") >> CFILE
+ printf(" \"%%s method meaningless in an RPC") >> CFILE
+ printf(" environment\", name);\n") >> CFILE
+ printf("\treturn (__db_eopnotsup(dbenv));\n") >> CFILE
+ printf("}\n\n") >> CFILE
+
+ first_nofunc = 1
+ }
+ #
+ # Spit out PUBLIC prototypes.
+ #
+ pi = 1;
+ p[pi++] = sprintf("int __dbcl_%s __P((", name);
+ p[pi++] = "";
+ for (i = 0; i < nvars; ++i) {
+ p[pi++] = pr_type[i];
+ p[pi++] = ", ";
+ }
+ p[pi - 1] = "";
+ p[pi++] = "));";
+ p[pi] = "";
+ proto_format(p, 0, CFILE);
+
+ #
+ # Spit out function name/args.
+ #
+ printf("int\n") >> CFILE
+ printf("__dbcl_%s(", name) >> CFILE
+ sep = "";
+ for (i = 0; i < nvars; ++i) {
+ printf("%s%s", sep, args[i]) >> CFILE
+ sep = ", ";
+ }
+ printf(")\n") >> CFILE
+
+ for (i = 0; i < nvars; ++i)
+ if (func_arg[i] == 0)
+ printf("\t%s %s;\n", c_type[i], args[i]) \
+ >> CFILE
+ else
+ printf("\t%s;\n", c_type[i]) >> CFILE
+
+ #
+ # Call error function and return EINVAL
+ #
+ printf("{\n") >> CFILE
+
+ #
+ # If we don't have a local env, set one.
+ #
+ if (env_handle == 0) {
+ printf("\tDB_ENV *dbenv;\n\n") >> CFILE
+ if (db_handle)
+ printf("\tdbenv = %s->dbenv;\n", \
+ args[db_idx]) >> CFILE
+ else if (dbc_handle)
+ printf("\tdbenv = %s->dbp->dbenv;\n", \
+ args[dbc_idx]) >> CFILE
+ else if (txn_handle)
+ printf("\tdbenv = %s->mgrp->dbenv;\n", \
+ args[txn_idx]) >> CFILE
+ else if (mp_handle)
+ printf("\tdbenv = %s->dbmp->dbenv;\n", \
+ args[mp_idx]) >> CFILE
+ else
+ printf("\tdbenv = NULL;\n") >> CFILE
+ }
+ #
+ # Quiet the compiler for all variables.
+ #
+ # NOTE: Index 'i' starts at 1, not 0. Our first arg is
+ # the handle we need to get to the env, and we do not want
+ # to COMPQUIET that one.
+ for (i = 1; i < nvars; ++i) {
+ if (rpc_type[i] == "CONST" || rpc_type[i] == "DBT" ||
+ rpc_type[i] == "LIST" || rpc_type[i] == "STRING" ||
+ rpc_type[i] == "GID") {
+ printf("\tCOMPQUIET(%s, NULL);\n", args[i]) \
+ >> CFILE
+ }
+ if (rpc_type[i] == "INT" || rpc_type[i] == "IGNORE" ||
+ rpc_type[i] == "ID") {
+ printf("\tCOMPQUIET(%s, 0);\n", args[i]) \
+ >> CFILE
+ }
+ }
+
+ if (!env_handle) {
+ printf("\treturn (__dbcl_rpc_illegal(dbenv, ") >> CFILE
+ printf("\"%s\"));\n", name) >> CFILE
+ } else
+ printf("\treturn (__dbcl_rpc_illegal(%s, \"%s\"));\n", \
+ args[env_idx], name) >> CFILE
+ printf("}\n\n") >> CFILE
+
+ next;
+ }
+
+ #
+ # =====================================================
+ # XDR messages.
+ #
+ printf("\n") >> XFILE
+ printf("struct __%s_msg {\n", name) >> XFILE
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "LIST") {
+ if (list_type[i] == "GID") {
+ printf("\topaque %s<>;\n", args[i]) >> XFILE
+ } else {
+ printf("\tunsigned int %s<>;\n", args[i]) >> XFILE
+ }
+ }
+ if (rpc_type[i] == "ID") {
+ printf("\tunsigned int %scl_id;\n", args[i]) >> XFILE
+ }
+ if (rpc_type[i] == "STRING") {
+ printf("\tstring %s<>;\n", args[i]) >> XFILE
+ }
+ if (rpc_type[i] == "GID") {
+ printf("\topaque %s[%d];\n", args[i], xidsize) >> XFILE
+ }
+ if (rpc_type[i] == "INT") {
+ printf("\tunsigned int %s;\n", args[i]) >> XFILE
+ }
+ if (rpc_type[i] == "DBT") {
+ printf("\tunsigned int %sdlen;\n", args[i]) >> XFILE
+ printf("\tunsigned int %sdoff;\n", args[i]) >> XFILE
+ printf("\tunsigned int %sulen;\n", args[i]) >> XFILE
+ printf("\tunsigned int %sflags;\n", args[i]) >> XFILE
+ printf("\topaque %sdata<>;\n", args[i]) >> XFILE
+ }
+ }
+ printf("};\n") >> XFILE
+
+ printf("\n") >> XFILE
+ #
+ # Generate the reply message
+ #
+ printf("struct __%s_reply {\n", name) >> XFILE
+ printf("\tint status;\n") >> XFILE
+ for (i = 0; i < rvars; ++i) {
+ if (ret_type[i] == "ID") {
+ printf("\tunsigned int %scl_id;\n", retargs[i]) >> XFILE
+ }
+ if (ret_type[i] == "STRING") {
+ printf("\tstring %s<>;\n", retargs[i]) >> XFILE
+ }
+ if (ret_type[i] == "INT") {
+ printf("\tunsigned int %s;\n", retargs[i]) >> XFILE
+ }
+ if (ret_type[i] == "DBL") {
+ printf("\tdouble %s;\n", retargs[i]) >> XFILE
+ }
+ if (ret_type[i] == "DBT") {
+ printf("\topaque %sdata<>;\n", retargs[i]) >> XFILE
+ }
+ if (ret_type[i] == "LIST") {
+ if (retlist_type[i] == "GID") {
+ printf("\topaque %s<>;\n", retargs[i]) >> XFILE
+ } else {
+ printf("\tunsigned int %s<>;\n", retargs[i]) >> XFILE
+ }
+ }
+ }
+ printf("};\n") >> XFILE
+
+ endlist[nendlist] = \
+ sprintf("__%s_reply __DB_%s(__%s_msg) = %d", \
+ name, name, name, nendlist);
+ nendlist++;
+ #
+ # =====================================================
+ # Server functions.
+ #
+ # First spit out PUBLIC prototypes for server functions.
+ #
+ p[1] = sprintf("__%s_reply *__db_%s_%d%03d __P((__%s_msg *, struct svc_req *));",
+ name, name, major, minor, name);
+ p[2] = "";
+ proto_format(p, 0, SFILE);
+
+ printf("__%s_reply *\n", name) >> SFILE
+ printf("__db_%s_%d%03d(msg, req)\n", name, major, minor) >> SFILE
+ printf("\t__%s_msg *msg;\n", name) >> SFILE;
+ printf("\tstruct svc_req *req;\n", name) >> SFILE;
+ printf("{\n") >> SFILE
+ printf("\tstatic __%s_reply reply; /* must be static */\n", \
+ name) >> SFILE
+ if (xdr_free) {
+ printf("\tstatic int __%s_free = 0; /* must be static */\n\n", \
+ name) >> SFILE
+ }
+ printf("\tCOMPQUIET(req, NULL);\n", name) >> SFILE
+ if (xdr_free) {
+ printf("\tif (__%s_free)\n", name) >> SFILE
+ printf("\t\txdr_free((xdrproc_t)xdr___%s_reply, (void *)&reply);\n", \
+ name) >> SFILE
+ printf("\t__%s_free = 0;\n", name) >> SFILE
+ printf("\n\t/* Reinitialize allocated fields */\n") >> SFILE
+ for (i = 0; i < rvars; ++i) {
+ if (ret_type[i] == "LIST") {
+ printf("\treply.%s.%s_val = NULL;\n", \
+ retargs[i], retargs[i]) >> SFILE
+ }
+ if (ret_type[i] == "DBT") {
+ printf("\treply.%sdata.%sdata_val = NULL;\n", \
+ retargs[i], retargs[i]) >> SFILE
+ }
+ }
+ }
+
+ need_out = 0;
+ #
+ # Compose server proc to call. Decompose message components as args.
+ #
+ printf("\n\t__%s_proc(", name) >> SFILE
+ sep = "";
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "IGNORE") {
+ continue;
+ }
+ if (rpc_type[i] == "ID") {
+ printf("%smsg->%scl_id", sep, args[i]) >> SFILE
+ }
+ if (rpc_type[i] == "STRING") {
+ printf("%s(*msg->%s == '\\0') ? NULL : msg->%s", \
+ sep, args[i], args[i]) >> SFILE
+ }
+ if (rpc_type[i] == "GID") {
+ printf("%smsg->%s", sep, args[i]) >> SFILE
+ }
+ if (rpc_type[i] == "INT") {
+ printf("%smsg->%s", sep, args[i]) >> SFILE
+ }
+ if (rpc_type[i] == "LIST") {
+ printf("%smsg->%s.%s_val", \
+ sep, args[i], args[i]) >> SFILE
+ printf("%smsg->%s.%s_len", \
+ sep, args[i], args[i]) >> SFILE
+ }
+ if (rpc_type[i] == "DBT") {
+ printf("%smsg->%sdlen", sep, args[i]) >> SFILE
+ sep = ",\n\t ";
+ printf("%smsg->%sdoff", sep, args[i]) >> SFILE
+ printf("%smsg->%sulen", sep, args[i]) >> SFILE
+ printf("%smsg->%sflags", sep, args[i]) >> SFILE
+ printf("%smsg->%sdata.%sdata_val", \
+ sep, args[i], args[i]) >> SFILE
+ printf("%smsg->%sdata.%sdata_len", \
+ sep, args[i], args[i]) >> SFILE
+ }
+ sep = ",\n\t ";
+ }
+ printf("%s&reply", sep) >> SFILE
+ if (xdr_free)
+ printf("%s&__%s_free);\n", sep, name) >> SFILE
+ else
+ printf(");\n\n") >> SFILE
+ if (need_out) {
+ printf("\nout:\n") >> SFILE
+ }
+ printf("\treturn (&reply);\n") >> SFILE
+ printf("}\n\n") >> SFILE
+
+ #
+ # =====================================================
+ # Generate Procedure Template Server code
+ #
+ # Produce SED file commands if needed at the same time
+ #
+ # Spit out comment, prototype, function name and arg list.
+ #
+ printf("/^\\/\\* BEGIN __%s_proc/,/^\\/\\* END __%s_proc/c\\\n", \
+ name, name) >> SEDFILE
+
+ printf("/* BEGIN __%s_proc */\n", name) >> PFILE
+ printf("/* BEGIN __%s_proc */\\\n", name) >> SEDFILE
+
+ pi = 1;
+ p[pi++] = sprintf("void __%s_proc __P((", name);
+ p[pi++] = "";
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "IGNORE")
+ continue;
+ if (rpc_type[i] == "ID") {
+ p[pi++] = "long";
+ p[pi++] = ", ";
+ }
+ if (rpc_type[i] == "STRING") {
+ p[pi++] = "char *";
+ p[pi++] = ", ";
+ }
+ if (rpc_type[i] == "GID") {
+ p[pi++] = "u_int8_t *";
+ p[pi++] = ", ";
+ }
+ if (rpc_type[i] == "INT") {
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ }
+ if (rpc_type[i] == "LIST" && list_type[i] == "GID") {
+ p[pi++] = "u_int8_t *";
+ p[pi++] = ", ";
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ }
+ if (rpc_type[i] == "LIST" && list_type[i] == "INT") {
+ p[pi++] = "u_int32_t *";
+ p[pi++] = ", ";
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ }
+ if (rpc_type[i] == "LIST" && list_type[i] == "ID") {
+ p[pi++] = "u_int32_t *";
+ p[pi++] = ", ";
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ }
+ if (rpc_type[i] == "DBT") {
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ p[pi++] = "void *";
+ p[pi++] = ", ";
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ }
+ }
+ p[pi++] = sprintf("__%s_reply *", name);
+ if (xdr_free) {
+ p[pi++] = ", ";
+ p[pi++] = "int *));";
+ } else {
+ p[pi++] = "";
+ p[pi++] = "));";
+ }
+ p[pi++] = "";
+ proto_format(p, 1, SEDFILE);
+
+ printf("void\n") >> PFILE
+ printf("void\\\n") >> SEDFILE
+ printf("__%s_proc(", name) >> PFILE
+ printf("__%s_proc(", name) >> SEDFILE
+ sep = "";
+ argcount = 0;
+ for (i = 0; i < nvars; ++i) {
+ argcount++;
+ split_lines();
+ if (argcount == 0) {
+ sep = "";
+ }
+ if (rpc_type[i] == "IGNORE")
+ continue;
+ if (rpc_type[i] == "ID") {
+ printf("%s%scl_id", sep, args[i]) >> PFILE
+ printf("%s%scl_id", sep, args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "STRING") {
+ printf("%s%s", sep, args[i]) >> PFILE
+ printf("%s%s", sep, args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "GID") {
+ printf("%s%s", sep, args[i]) >> PFILE
+ printf("%s%s", sep, args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "INT") {
+ printf("%s%s", sep, args[i]) >> PFILE
+ printf("%s%s", sep, args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "LIST") {
+ printf("%s%s", sep, args[i]) >> PFILE
+ printf("%s%s", sep, args[i]) >> SEDFILE
+ argcount++;
+ split_lines();
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%s%slen", sep, args[i]) >> PFILE
+ printf("%s%slen", sep, args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "DBT") {
+ printf("%s%sdlen", sep, args[i]) >> PFILE
+ printf("%s%sdlen", sep, args[i]) >> SEDFILE
+ sep = ", ";
+ argcount++;
+ split_lines();
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%s%sdoff", sep, args[i]) >> PFILE
+ printf("%s%sdoff", sep, args[i]) >> SEDFILE
+ argcount++;
+ split_lines();
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%s%sulen", sep, args[i]) >> PFILE
+ printf("%s%sulen", sep, args[i]) >> SEDFILE
+ argcount++;
+ split_lines();
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%s%sflags", sep, args[i]) >> PFILE
+ printf("%s%sflags", sep, args[i]) >> SEDFILE
+ argcount++;
+ split_lines();
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%s%sdata", sep, args[i]) >> PFILE
+ printf("%s%sdata", sep, args[i]) >> SEDFILE
+ argcount++;
+ split_lines();
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%s%ssize", sep, args[i]) >> PFILE
+ printf("%s%ssize", sep, args[i]) >> SEDFILE
+ }
+ sep = ", ";
+ }
+ printf("%sreplyp",sep) >> PFILE
+ printf("%sreplyp",sep) >> SEDFILE
+ if (xdr_free) {
+ printf("%sfreep)\n",sep) >> PFILE
+ printf("%sfreep)\\\n",sep) >> SEDFILE
+ } else {
+ printf(")\n") >> PFILE
+ printf(")\\\n") >> SEDFILE
+ }
+ #
+ # Spit out arg types/names;
+ #
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "ID") {
+ printf("\tlong %scl_id;\n", args[i]) >> PFILE
+ printf("\\\tlong %scl_id;\\\n", args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "STRING") {
+ printf("\tchar *%s;\n", args[i]) >> PFILE
+ printf("\\\tchar *%s;\\\n", args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "GID") {
+ printf("\tu_int8_t *%s;\n", args[i]) >> PFILE
+ printf("\\\tu_int8_t *%s;\\\n", args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "INT") {
+ printf("\tu_int32_t %s;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %s;\\\n", args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "LIST" && list_type[i] == "GID") {
+ printf("\tu_int8_t * %s;\n", args[i]) >> PFILE
+ printf("\\\tu_int8_t * %s;\\\n", args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "LIST" && list_type[i] == "INT") {
+ printf("\tu_int32_t * %s;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t * %s;\\\n", \
+ args[i]) >> SEDFILE
+ printf("\tu_int32_t %ssize;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %ssize;\\\n", args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "LIST" && list_type[i] == "ID") {
+ printf("\tu_int32_t * %s;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t * %s;\\\n", args[i]) \
+ >> SEDFILE
+ }
+ if (rpc_type[i] == "LIST") {
+ printf("\tu_int32_t %slen;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %slen;\\\n", args[i]) \
+ >> SEDFILE
+ }
+ if (rpc_type[i] == "DBT") {
+ printf("\tu_int32_t %sdlen;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %sdlen;\\\n", args[i]) >> SEDFILE
+ printf("\tu_int32_t %sdoff;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %sdoff;\\\n", args[i]) >> SEDFILE
+ printf("\tu_int32_t %sulen;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %sulen;\\\n", args[i]) >> SEDFILE
+ printf("\tu_int32_t %sflags;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %sflags;\\\n", args[i]) >> SEDFILE
+ printf("\tvoid *%sdata;\n", args[i]) >> PFILE
+ printf("\\\tvoid *%sdata;\\\n", args[i]) >> SEDFILE
+ printf("\tu_int32_t %ssize;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %ssize;\\\n", args[i]) >> SEDFILE
+ }
+ }
+ printf("\t__%s_reply *replyp;\n",name) >> PFILE
+ printf("\\\t__%s_reply *replyp;\\\n",name) >> SEDFILE
+ if (xdr_free) {
+ printf("\tint * freep;\n") >> PFILE
+ printf("\\\tint * freep;\\\n") >> SEDFILE
+ }
+
+ printf("/* END __%s_proc */\n", name) >> PFILE
+ printf("/* END __%s_proc */\n", name) >> SEDFILE
+
+ #
+ # Function body
+ #
+ printf("{\n") >> PFILE
+ printf("\tint ret;\n") >> PFILE
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "ID") {
+ printf("\t%s %s;\n", c_type[i], args[i]) >> PFILE
+ printf("\tct_entry *%s_ctp;\n", args[i]) >> PFILE
+ }
+ }
+ printf("\n") >> PFILE
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "ID") {
+ printf("\tACTIVATE_CTP(%s_ctp, %scl_id, %s);\n", \
+ args[i], args[i], ctp_type[i]) >> PFILE
+ printf("\t%s = (%s)%s_ctp->ct_anyp;\n", \
+ args[i], c_type[i], args[i]) >> PFILE
+ }
+ }
+ printf("\n\t/*\n\t * XXX Code goes here\n\t */\n\n") >> PFILE
+ printf("\treplyp->status = ret;\n") >> PFILE
+ printf("\treturn;\n") >> PFILE
+ printf("}\n\n") >> PFILE
+
+ #
+ # =====================================================
+ # Generate Client code
+ #
+ # Spit out PUBLIC prototypes.
+ #
+ pi = 1;
+ p[pi++] = sprintf("int __dbcl_%s __P((", name);
+ p[pi++] = "";
+ for (i = 0; i < nvars; ++i) {
+ p[pi++] = pr_type[i];
+ p[pi++] = ", ";
+ }
+ p[pi - 1] = "";
+ p[pi++] = "));";
+ p[pi] = "";
+ proto_format(p, 0, CFILE);
+
+ #
+ # Spit out function name/args.
+ #
+ printf("int\n") >> CFILE
+ printf("__dbcl_%s(", name) >> CFILE
+ sep = "";
+ for (i = 0; i < nvars; ++i) {
+ printf("%s%s", sep, args[i]) >> CFILE
+ sep = ", ";
+ }
+ printf(")\n") >> CFILE
+
+ for (i = 0; i < nvars; ++i)
+ if (func_arg[i] == 0)
+ printf("\t%s %s;\n", c_type[i], args[i]) >> CFILE
+ else
+ printf("\t%s;\n", c_type[i]) >> CFILE
+
+ printf("{\n") >> CFILE
+ printf("\tCLIENT *cl;\n") >> CFILE
+ printf("\t__%s_msg msg;\n", name) >> CFILE
+ printf("\t__%s_reply *replyp = NULL;\n", name) >> CFILE;
+ printf("\tint ret;\n") >> CFILE
+ if (!env_handle)
+ printf("\tDB_ENV *dbenv;\n") >> CFILE
+ #
+ # If we are managing a list, we need a few more vars.
+ #
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "LIST") {
+ printf("\t%s %sp;\n", c_type[i], args[i]) >> CFILE
+ printf("\tint %si;\n", args[i]) >> CFILE
+ if (list_type[i] == "GID")
+ printf("\tu_int8_t ** %sq;\n", args[i]) >> CFILE
+ else
+ printf("\tu_int32_t * %sq;\n", args[i]) >> CFILE
+ }
+ }
+
+ printf("\n") >> CFILE
+ printf("\tret = 0;\n") >> CFILE
+ if (!env_handle) {
+ if (db_handle)
+ printf("\tdbenv = %s->dbenv;\n", args[db_idx]) >> CFILE
+ else if (dbc_handle)
+ printf("\tdbenv = %s->dbp->dbenv;\n", \
+ args[dbc_idx]) >> CFILE
+ else if (txn_handle)
+ printf("\tdbenv = %s->mgrp->dbenv;\n", \
+ args[txn_idx]) >> CFILE
+ else
+ printf("\tdbenv = NULL;\n") >> CFILE
+ printf("\tif (dbenv == NULL || !RPC_ON(dbenv))\n") \
+ >> CFILE
+ printf("\t\treturn (__dbcl_noserver(NULL));\n") >> CFILE
+ } else {
+ printf("\tif (%s == NULL || !RPC_ON(%s))\n", \
+ args[env_idx], args[env_idx]) >> CFILE
+ printf("\t\treturn (__dbcl_noserver(%s));\n", \
+ args[env_idx]) >> CFILE
+ }
+ printf("\n") >> CFILE
+
+ if (!env_handle)
+ printf("\tcl = (CLIENT *)dbenv->cl_handle;\n") >> CFILE
+ else
+ printf("\tcl = (CLIENT *)%s->cl_handle;\n", \
+ args[env_idx]) >> CFILE
+
+ printf("\n") >> CFILE
+
+ #
+ # If there is a function arg, check that it is NULL
+ #
+ for (i = 0; i < nvars; ++i) {
+ if (func_arg[i] != 1)
+ continue;
+ printf("\tif (%s != NULL) {\n", args[i]) >> CFILE
+ if (!env_handle) {
+ printf("\t\t__db_err(dbenv, ") >> CFILE
+ } else {
+ printf("\t\t__db_err(%s, ", args[env_idx]) >> CFILE
+ }
+ printf("\"User functions not supported in RPC\");\n") >> CFILE
+ printf("\t\treturn (EINVAL);\n\t}\n") >> CFILE
+ }
+
+ #
+ # Compose message components
+ #
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "ID") {
+ printf("\tif (%s == NULL)\n", args[i]) >> CFILE
+ printf("\t\tmsg.%scl_id = 0;\n\telse\n", \
+ args[i]) >> CFILE
+ if (c_type[i] == "DB_TXN *") {
+ printf("\t\tmsg.%scl_id = %s->txnid;\n", \
+ args[i], args[i]) >> CFILE
+ } else {
+ printf("\t\tmsg.%scl_id = %s->cl_id;\n", \
+ args[i], args[i]) >> CFILE
+ }
+ }
+ if (rpc_type[i] == "GID") {
+ printf("\tmemcpy(msg.%s, %s, %d);\n", \
+ args[i], args[i], xidsize) >> CFILE
+ }
+ if (rpc_type[i] == "INT") {
+ printf("\tmsg.%s = %s;\n", args[i], args[i]) >> CFILE
+ }
+ if (rpc_type[i] == "STRING") {
+ printf("\tif (%s == NULL)\n", args[i]) >> CFILE
+ printf("\t\tmsg.%s = \"\";\n", args[i]) >> CFILE
+ printf("\telse\n") >> CFILE
+ printf("\t\tmsg.%s = (char *)%s;\n", \
+ args[i], args[i]) >> CFILE
+ }
+ if (rpc_type[i] == "DBT") {
+ printf("\tmsg.%sdlen = %s->dlen;\n", \
+ args[i], args[i]) >> CFILE
+ printf("\tmsg.%sdoff = %s->doff;\n", \
+ args[i], args[i]) >> CFILE
+ printf("\tmsg.%sulen = %s->ulen;\n", \
+ args[i], args[i]) >> CFILE
+ printf("\tmsg.%sflags = %s->flags;\n", \
+ args[i], args[i]) >> CFILE
+ printf("\tmsg.%sdata.%sdata_val = %s->data;\n", \
+ args[i], args[i], args[i]) >> CFILE
+ printf("\tmsg.%sdata.%sdata_len = %s->size;\n", \
+ args[i], args[i], args[i]) >> CFILE
+ }
+ if (rpc_type[i] == "LIST") {
+ printf("\tfor (%si = 0, %sp = %s; *%sp != 0; ", \
+ args[i], args[i], args[i], args[i]) >> CFILE
+ printf(" %si++, %sp++)\n\t\t;\n", args[i], args[i]) \
+ >> CFILE
+
+ #
+ # If we are an array of ints, *_len is how many
+ # elements. If we are a GID, *_len is total bytes.
+ #
+ printf("\tmsg.%s.%s_len = %si",args[i], args[i], \
+ args[i]) >> CFILE
+ if (list_type[i] == "GID")
+ printf(" * %d;\n", xidsize) >> CFILE
+ else
+ printf(";\n") >> CFILE
+ printf("\tif ((ret = __os_calloc(") >> CFILE
+ if (!env_handle)
+ printf("dbenv,\n") >> CFILE
+ else
+ printf("%s,\n", args[env_idx]) >> CFILE
+ printf("\t msg.%s.%s_len,", \
+ args[i], args[i]) >> CFILE
+ if (list_type[i] == "GID")
+ printf(" 1,") >> CFILE
+ else
+ printf(" sizeof(u_int32_t),") >> CFILE
+ printf(" &msg.%s.%s_val)) != 0)\n",\
+ args[i], args[i], args[i], args[i]) >> CFILE
+ printf("\t\treturn (ret);\n") >> CFILE
+ printf("\tfor (%sq = msg.%s.%s_val, %sp = %s; ", \
+ args[i], args[i], args[i], \
+ args[i], args[i]) >> CFILE
+ printf("%si--; %sq++, %sp++)\n", \
+ args[i], args[i], args[i]) >> CFILE
+ printf("\t\t*%sq = ", args[i]) >> CFILE
+ if (list_type[i] == "GID")
+ printf("*%sp;\n", args[i]) >> CFILE
+ if (list_type[i] == "ID")
+ printf("(*%sp)->cl_id;\n", args[i]) >> CFILE
+ if (list_type[i] == "INT")
+ printf("*%sp;\n", args[i]) >> CFILE
+ }
+ }
+
+ printf("\n") >> CFILE
+ printf("\treplyp = __db_%s_%d%03d(&msg, cl);\n", name, major, minor) \
+ >> CFILE
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "LIST") {
+ printf("\t__os_free(") >> CFILE
+ if (!env_handle)
+ printf("dbenv, ") >> CFILE
+ else
+ printf("%s, ", args[env_idx]) >> CFILE
+ printf("msg.%s.%s_val);\n", args[i], args[i]) >> CFILE
+ }
+ }
+ printf("\tif (replyp == NULL) {\n") >> CFILE
+ if (!env_handle) {
+ printf("\t\t__db_err(dbenv, ") >> CFILE
+ printf("clnt_sperror(cl, \"Berkeley DB\"));\n") >> CFILE
+ } else {
+ printf("\t\t__db_err(%s, ", args[env_idx]) >> CFILE
+ printf("clnt_sperror(cl, \"Berkeley DB\"));\n") >> CFILE
+ }
+ printf("\t\tret = DB_NOSERVER;\n") >> CFILE
+ printf("\t\tgoto out;\n") >> CFILE
+ printf("\t}\n") >> CFILE
+
+ if (ret_code == 0) {
+ printf("\tret = replyp->status;\n") >> CFILE
+ } else {
+ printf("\tret = __dbcl_%s_ret(", name) >> CFILE
+ sep = "";
+ for (i = 0; i < nvars; ++i) {
+ printf("%s%s", sep, args[i]) >> CFILE
+ sep = ", ";
+ }
+ printf("%sreplyp);\n", sep) >> CFILE
+ }
+ printf("out:\n") >> CFILE
+ #
+ # Free reply if there was one.
+ #
+ printf("\tif (replyp != NULL)\n") >> CFILE
+ printf("\t\txdr_free((xdrproc_t)xdr___%s_reply,",name) >> CFILE
+ printf(" (void *)replyp);\n") >> CFILE
+ printf("\treturn (ret);\n") >> CFILE
+ printf("}\n\n") >> CFILE
+
+ #
+ # Generate Client Template code
+ #
+ if (ret_code) {
+ #
+ # If we are doing a list, write prototypes
+ #
+ pi = 1;
+ p[pi++] = sprintf("int __dbcl_%s_ret __P((", name);
+ p[pi++] = "";
+ for (i = 0; i < nvars; ++i) {
+ p[pi++] = pr_type[i];
+ p[pi++] = ", ";
+ }
+ p[pi++] = sprintf("__%s_reply *));", name);
+ p[pi++] = "";
+ proto_format(p, 0, TFILE);
+
+ printf("int\n") >> TFILE
+ printf("__dbcl_%s_ret(", name) >> TFILE
+ sep = "";
+ for (i = 0; i < nvars; ++i) {
+ printf("%s%s", sep, args[i]) >> TFILE
+ sep = ", ";
+ }
+ printf("%sreplyp)\n",sep) >> TFILE
+
+ for (i = 0; i < nvars; ++i)
+ if (func_arg[i] == 0)
+ printf("\t%s %s;\n", c_type[i], args[i]) \
+ >> TFILE
+ else
+ printf("\t%s;\n", c_type[i]) >> TFILE
+ printf("\t__%s_reply *replyp;\n", name) >> TFILE;
+ printf("{\n") >> TFILE
+ printf("\tint ret;\n") >> TFILE
+ #
+ # Local vars in template
+ #
+ for (i = 0; i < rvars; ++i) {
+ if (ret_type[i] == "ID" || ret_type[i] == "STRING" ||
+ ret_type[i] == "INT" || ret_type[i] == "DBL") {
+ printf("\t%s %s;\n", \
+ retc_type[i], retargs[i]) >> TFILE
+ } else if (ret_type[i] == "LIST") {
+ if (retlist_type[i] == "GID")
+ printf("\tu_int8_t *__db_%s;\n", \
+ retargs[i]) >> TFILE
+ if (retlist_type[i] == "ID" ||
+ retlist_type[i] == "INT")
+ printf("\tu_int32_t *__db_%s;\n", \
+ retargs[i]) >> TFILE
+ } else {
+ printf("\t/* %s %s; */\n", \
+ ret_type[i], retargs[i]) >> TFILE
+ }
+ }
+ #
+ # Client return code
+ #
+ printf("\n") >> TFILE
+ printf("\tif (replyp->status != 0)\n") >> TFILE
+ printf("\t\treturn (replyp->status);\n") >> TFILE
+ for (i = 0; i < rvars; ++i) {
+ varname = "";
+ if (ret_type[i] == "ID") {
+ varname = sprintf("%scl_id", retargs[i]);
+ }
+ if (ret_type[i] == "STRING") {
+ varname = retargs[i];
+ }
+ if (ret_type[i] == "INT" || ret_type[i] == "DBL") {
+ varname = retargs[i];
+ }
+ if (ret_type[i] == "DBT") {
+ varname = sprintf("%sdata", retargs[i]);
+ }
+ if (ret_type[i] == "ID" || ret_type[i] == "STRING" ||
+ ret_type[i] == "INT" || ret_type[i] == "DBL") {
+ printf("\t%s = replyp->%s;\n", \
+ retargs[i], varname) >> TFILE
+ } else if (ret_type[i] == "LIST") {
+ printf("\n\t/*\n") >> TFILE
+ printf("\t * XXX Handle list\n") >> TFILE
+ printf("\t */\n\n") >> TFILE
+ } else {
+ printf("\t/* Handle replyp->%s; */\n", \
+ varname) >> TFILE
+ }
+ }
+ printf("\n\t/*\n\t * XXX Code goes here\n\t */\n\n") >> TFILE
+ printf("\treturn (replyp->status);\n") >> TFILE
+ printf("}\n\n") >> TFILE
+ }
+}
+
+#
+# split_lines --
+# Add line separators to pretty-print the output.
+function split_lines() {
+ if (argcount > 3) {
+ # Reset the counter, remove any trailing whitespace from
+ # the separator.
+ argcount = 0;
+ sub("[ ]$", "", sep)
+
+ printf("%s\n\t\t", sep) >> PFILE
+ printf("%s\\\n\\\t\\\t", sep) >> SEDFILE
+ }
+}
+
+# proto_format --
+# Pretty-print a function prototype.
+function proto_format(p, sedfile, OUTPUT)
+{
+ if (sedfile)
+ printf("/*\\\n") >> OUTPUT;
+ else
+ printf("/*\n") >> OUTPUT;
+
+ s = "";
+ for (i = 1; i in p; ++i)
+ s = s p[i];
+
+ if (sedfile)
+ t = "\\ * PUBLIC: "
+ else
+ t = " * PUBLIC: "
+ if (length(s) + length(t) < 80)
+ if (sedfile)
+ printf("%s%s", t, s) >> OUTPUT;
+ else
+ printf("%s%s", t, s) >> OUTPUT;
+ else {
+ split(s, p, "__P");
+ len = length(t) + length(p[1]);
+ printf("%s%s", t, p[1]) >> OUTPUT
+
+ n = split(p[2], comma, ",");
+ comma[1] = "__P" comma[1];
+ for (i = 1; i <= n; i++) {
+ if (len + length(comma[i]) > 75) {
+ if (sedfile)
+ printf(\
+ "\\\n\\ * PUBLIC: ") >> OUTPUT;
+ else
+ printf("\n * PUBLIC: ") >> OUTPUT;
+ len = 0;
+ }
+ printf("%s%s", comma[i], i == n ? "" : ",") >> OUTPUT;
+ len += length(comma[i]);
+ }
+ }
+ if (sedfile)
+ printf("\\\n\\ */\\\n") >> OUTPUT;
+ else
+ printf("\n */\n") >> OUTPUT;
+ delete p;
+}
diff --git a/libdb/dist/install-sh b/libdb/dist/install-sh
new file mode 100755
index 0000000..b41a245
--- /dev/null
+++ b/libdb/dist/install-sh
@@ -0,0 +1,251 @@
+#!/bin/sh
+#
+# install - install a program, script, or datafile
+# This comes from X11R5 (mit/util/scripts/install.sh).
+#
+# Copyright 1991 by the Massachusetts Institute of Technology
+#
+# Permission to use, copy, modify, distribute, and sell this software and its
+# documentation for any purpose is hereby granted without fee, provided that
+# the above copyright notice appear in all copies and that both that
+# copyright notice and this permission notice appear in supporting
+# documentation, and that the name of M.I.T. not be used in advertising or
+# publicity pertaining to distribution of the software without specific,
+# written prior permission. M.I.T. makes no representations about the
+# suitability of this software for any purpose. It is provided "as is"
+# without express or implied warranty.
+#
+# Calling this script install-sh is preferred over install.sh, to prevent
+# `make' implicit rules from creating a file called install from it
+# when there is no Makefile.
+#
+# This script is compatible with the BSD install script, but was written
+# from scratch. It can only install one file at a time, a restriction
+# shared with many OS's install programs.
+
+
+# set DOITPROG to echo to test this script
+
+# Don't use :- since 4.3BSD and earlier shells don't like it.
+doit="${DOITPROG-}"
+
+
+# put in absolute paths if you don't have them in your path; or use env. vars.
+
+mvprog="${MVPROG-mv}"
+cpprog="${CPPROG-cp}"
+chmodprog="${CHMODPROG-chmod}"
+chownprog="${CHOWNPROG-chown}"
+chgrpprog="${CHGRPPROG-chgrp}"
+stripprog="${STRIPPROG-strip}"
+rmprog="${RMPROG-rm}"
+mkdirprog="${MKDIRPROG-mkdir}"
+
+transformbasename=""
+transform_arg=""
+instcmd="$mvprog"
+chmodcmd="$chmodprog 0755"
+chowncmd=""
+chgrpcmd=""
+stripcmd=""
+rmcmd="$rmprog -f"
+mvcmd="$mvprog"
+src=""
+dst=""
+dir_arg=""
+
+while [ x"$1" != x ]; do
+ case $1 in
+ -c) instcmd="$cpprog"
+ shift
+ continue;;
+
+ -d) dir_arg=true
+ shift
+ continue;;
+
+ -m) chmodcmd="$chmodprog $2"
+ shift
+ shift
+ continue;;
+
+ -o) chowncmd="$chownprog $2"
+ shift
+ shift
+ continue;;
+
+ -g) chgrpcmd="$chgrpprog $2"
+ shift
+ shift
+ continue;;
+
+ -s) stripcmd="$stripprog"
+ shift
+ continue;;
+
+ -t=*) transformarg=`echo $1 | sed 's/-t=//'`
+ shift
+ continue;;
+
+ -b=*) transformbasename=`echo $1 | sed 's/-b=//'`
+ shift
+ continue;;
+
+ *) if [ x"$src" = x ]
+ then
+ src=$1
+ else
+ # this colon is to work around a 386BSD /bin/sh bug
+ :
+ dst=$1
+ fi
+ shift
+ continue;;
+ esac
+done
+
+if [ x"$src" = x ]
+then
+ echo "install: no input file specified"
+ exit 1
+else
+ true
+fi
+
+if [ x"$dir_arg" != x ]; then
+ dst=$src
+ src=""
+
+ if [ -d $dst ]; then
+ instcmd=:
+ chmodcmd=""
+ else
+ instcmd=$mkdirprog
+ fi
+else
+
+# Waiting for this to be detected by the "$instcmd $src $dsttmp" command
+# might cause directories to be created, which would be especially bad
+# if $src (and thus $dsttmp) contains '*'.
+
+ if [ -f $src -o -d $src ]
+ then
+ true
+ else
+ echo "install: $src does not exist"
+ exit 1
+ fi
+
+ if [ x"$dst" = x ]
+ then
+ echo "install: no destination specified"
+ exit 1
+ else
+ true
+ fi
+
+# If destination is a directory, append the input filename; if your system
+# does not like double slashes in filenames, you may need to add some logic
+
+ if [ -d $dst ]
+ then
+ dst="$dst"/`basename $src`
+ else
+ true
+ fi
+fi
+
+## this sed command emulates the dirname command
+dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'`
+
+# Make sure that the destination directory exists.
+# this part is taken from Noah Friedman's mkinstalldirs script
+
+# Skip lots of stat calls in the usual case.
+if [ ! -d "$dstdir" ]; then
+defaultIFS='
+ '
+IFS="${IFS-${defaultIFS}}"
+
+oIFS="${IFS}"
+# Some sh's can't handle IFS=/ for some reason.
+IFS='%'
+set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'`
+IFS="${oIFS}"
+
+pathcomp=''
+
+while [ $# -ne 0 ] ; do
+ pathcomp="${pathcomp}${1}"
+ shift
+
+ if [ ! -d "${pathcomp}" ] ;
+ then
+ $mkdirprog "${pathcomp}"
+ else
+ true
+ fi
+
+ pathcomp="${pathcomp}/"
+done
+fi
+
+if [ x"$dir_arg" != x ]
+then
+ $doit $instcmd $dst &&
+
+ if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi &&
+ if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi &&
+ if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi &&
+ if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi
+else
+
+# If we're going to rename the final executable, determine the name now.
+
+ if [ x"$transformarg" = x ]
+ then
+ dstfile=`basename $dst`
+ else
+ dstfile=`basename $dst $transformbasename |
+ sed $transformarg`$transformbasename
+ fi
+
+# don't allow the sed command to completely eliminate the filename
+
+ if [ x"$dstfile" = x ]
+ then
+ dstfile=`basename $dst`
+ else
+ true
+ fi
+
+# Make a temp file name in the proper directory.
+
+ dsttmp=$dstdir/#inst.$$#
+
+# Move or copy the file name to the temp name
+
+ $doit $instcmd $src $dsttmp &&
+
+ trap "rm -f ${dsttmp}" 0 &&
+
+# and set any options; do chmod last to preserve setuid bits
+
+# If any of these fail, we abort the whole thing. If we want to
+# ignore errors from any of these, just make sure not to ignore
+# errors from the above "$doit $instcmd $src $dsttmp" command.
+
+ if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi &&
+ if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi &&
+ if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi &&
+ if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi &&
+
+# Now rename the file to the real destination.
+
+ $doit $rmcmd -f $dstdir/$dstfile &&
+ $doit $mvcmd $dsttmp $dstdir/$dstfile
+
+fi &&
+
+
+exit 0
diff --git a/libdb/dist/ltmain.sh b/libdb/dist/ltmain.sh
new file mode 100644
index 0000000..4d4a27a
--- /dev/null
+++ b/libdb/dist/ltmain.sh
@@ -0,0 +1,4999 @@
+# ltmain.sh - Provide generalized library-building support services.
+# NOTE: Changing this file will not affect anything until you rerun configure.
+#
+# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001
+# Free Software Foundation, Inc.
+# Originally by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Check that we have a working $echo.
+if test "X$1" = X--no-reexec; then
+ # Discard the --no-reexec flag, and continue.
+ shift
+elif test "X$1" = X--fallback-echo; then
+ # Avoid inline document here, it may be left over
+ :
+elif test "X`($echo '\t') 2>/dev/null`" = 'X\t'; then
+ # Yippee, $echo works!
+ :
+else
+ # Restart under the correct shell, and then maybe $echo will work.
+ exec $SHELL "$0" --no-reexec ${1+"$@"}
+fi
+
+if test "X$1" = X--fallback-echo; then
+ # used as fallback echo
+ shift
+ cat <<EOF
+$*
+EOF
+ exit 0
+fi
+
+# The name of this program.
+progname=`$echo "$0" | sed 's%^.*/%%'`
+modename="$progname"
+
+# Constants.
+PROGRAM=ltmain.sh
+PACKAGE=libtool
+VERSION=1.4.2
+TIMESTAMP=" (1.922.2.53 2001/09/11 03:18:52)"
+
+default_mode=
+help="Try \`$progname --help' for more information."
+magic="%%%MAGIC variable%%%"
+mkdir="mkdir"
+mv="mv -f"
+rm="rm -f"
+
+# Sed substitution that helps us do robust quoting. It backslashifies
+# metacharacters that are still active within double-quoted strings.
+Xsed='sed -e 1s/^X//'
+sed_quote_subst='s/\([\\`\\"$\\\\]\)/\\\1/g'
+SP2NL='tr \040 \012'
+NL2SP='tr \015\012 \040\040'
+
+# NLS nuisances.
+# Only set LANG and LC_ALL to C if already set.
+# These must not be set unconditionally because not all systems understand
+# e.g. LANG=C (notably SCO).
+# We save the old values to restore during execute mode.
+if test "${LC_ALL+set}" = set; then
+ save_LC_ALL="$LC_ALL"; LC_ALL=C; export LC_ALL
+fi
+if test "${LANG+set}" = set; then
+ save_LANG="$LANG"; LANG=C; export LANG
+fi
+
+# Make sure IFS has a sensible default
+: ${IFS=" "}
+
+if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then
+ echo "$modename: not configured to build any kind of library" 1>&2
+ echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2
+ exit 1
+fi
+
+# Global variables.
+mode=$default_mode
+nonopt=
+prev=
+prevopt=
+run=
+show="$echo"
+show_help=
+execute_dlfiles=
+lo2o="s/\\.lo\$/.${objext}/"
+o2lo="s/\\.${objext}\$/.lo/"
+
+# Parse our command line options once, thoroughly.
+while test $# -gt 0
+do
+ arg="$1"
+ shift
+
+ case $arg in
+ -*=*) optarg=`$echo "X$arg" | $Xsed -e 's/[-_a-zA-Z0-9]*=//'` ;;
+ *) optarg= ;;
+ esac
+
+ # If the previous option needs an argument, assign it.
+ if test -n "$prev"; then
+ case $prev in
+ execute_dlfiles)
+ execute_dlfiles="$execute_dlfiles $arg"
+ ;;
+ *)
+ eval "$prev=\$arg"
+ ;;
+ esac
+
+ prev=
+ prevopt=
+ continue
+ fi
+
+ # Have we seen a non-optional argument yet?
+ case $arg in
+ --help)
+ show_help=yes
+ ;;
+
+ --version)
+ echo "$PROGRAM (GNU $PACKAGE) $VERSION$TIMESTAMP"
+ exit 0
+ ;;
+
+ --config)
+ sed -e '1,/^# ### BEGIN LIBTOOL CONFIG/d' -e '/^# ### END LIBTOOL CONFIG/,$d' $0
+ exit 0
+ ;;
+
+ --debug)
+ echo "$progname: enabling shell trace mode"
+ set -x
+ ;;
+
+ --dry-run | -n)
+ run=:
+ ;;
+
+ --features)
+ echo "host: $host"
+ if test "$build_libtool_libs" = yes; then
+ echo "enable shared libraries"
+ else
+ echo "disable shared libraries"
+ fi
+ if test "$build_old_libs" = yes; then
+ echo "enable static libraries"
+ else
+ echo "disable static libraries"
+ fi
+ exit 0
+ ;;
+
+ --finish) mode="finish" ;;
+
+ --mode) prevopt="--mode" prev=mode ;;
+ --mode=*) mode="$optarg" ;;
+
+ --quiet | --silent)
+ show=:
+ ;;
+
+ -dlopen)
+ prevopt="-dlopen"
+ prev=execute_dlfiles
+ ;;
+
+ -*)
+ $echo "$modename: unrecognized option \`$arg'" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ ;;
+
+ *)
+ nonopt="$arg"
+ break
+ ;;
+ esac
+done
+
+if test -n "$prevopt"; then
+ $echo "$modename: option \`$prevopt' requires an argument" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+fi
+
+# If this variable is set in any of the actions, the command in it
+# will be execed at the end. This prevents here-documents from being
+# left over by shells.
+exec_cmd=
+
+if test -z "$show_help"; then
+
+ # Infer the operation mode.
+ if test -z "$mode"; then
+ case $nonopt in
+ *cc | *++ | gcc* | *-gcc*)
+ mode=link
+ for arg
+ do
+ case $arg in
+ -c)
+ mode=compile
+ break
+ ;;
+ esac
+ done
+ ;;
+ *db | *dbx | *strace | *truss)
+ mode=execute
+ ;;
+ *install*|cp|mv)
+ mode=install
+ ;;
+ *rm)
+ mode=uninstall
+ ;;
+ *)
+ # If we have no mode, but dlfiles were specified, then do execute mode.
+ test -n "$execute_dlfiles" && mode=execute
+
+ # Just use the default operation mode.
+ if test -z "$mode"; then
+ if test -n "$nonopt"; then
+ $echo "$modename: warning: cannot infer operation mode from \`$nonopt'" 1>&2
+ else
+ $echo "$modename: warning: cannot infer operation mode without MODE-ARGS" 1>&2
+ fi
+ fi
+ ;;
+ esac
+ fi
+
+ # Only execute mode is allowed to have -dlopen flags.
+ if test -n "$execute_dlfiles" && test "$mode" != execute; then
+ $echo "$modename: unrecognized option \`-dlopen'" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ # Change the help message to a mode-specific one.
+ generic_help="$help"
+ help="Try \`$modename --help --mode=$mode' for more information."
+
+ # These modes are in order of execution frequency so that they run quickly.
+ case $mode in
+ # libtool compile mode
+ compile)
+ modename="$modename: compile"
+ # Get the compilation command and the source file.
+ base_compile=
+ prev=
+ lastarg=
+ srcfile="$nonopt"
+ suppress_output=
+
+ user_target=no
+ for arg
+ do
+ case $prev in
+ "") ;;
+ xcompiler)
+ # Aesthetically quote the previous argument.
+ prev=
+ lastarg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
+
+ case $arg in
+ # Double-quote args containing other shell metacharacters.
+ # Many Bourne shells cannot handle close brackets correctly
+ # in scan sets, so we specify it separately.
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ arg="\"$arg\""
+ ;;
+ esac
+
+ # Add the previous argument to base_compile.
+ if test -z "$base_compile"; then
+ base_compile="$lastarg"
+ else
+ base_compile="$base_compile $lastarg"
+ fi
+ continue
+ ;;
+ esac
+
+ # Accept any command-line options.
+ case $arg in
+ -o)
+ if test "$user_target" != "no"; then
+ $echo "$modename: you cannot specify \`-o' more than once" 1>&2
+ exit 1
+ fi
+ user_target=next
+ ;;
+
+ -static)
+ build_old_libs=yes
+ continue
+ ;;
+
+ -prefer-pic)
+ pic_mode=yes
+ continue
+ ;;
+
+ -prefer-non-pic)
+ pic_mode=no
+ continue
+ ;;
+
+ -Xcompiler)
+ prev=xcompiler
+ continue
+ ;;
+
+ -Wc,*)
+ args=`$echo "X$arg" | $Xsed -e "s/^-Wc,//"`
+ lastarg=
+ save_ifs="$IFS"; IFS=','
+ for arg in $args; do
+ IFS="$save_ifs"
+
+ # Double-quote args containing other shell metacharacters.
+ # Many Bourne shells cannot handle close brackets correctly
+ # in scan sets, so we specify it separately.
+ case $arg in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ arg="\"$arg\""
+ ;;
+ esac
+ lastarg="$lastarg $arg"
+ done
+ IFS="$save_ifs"
+ lastarg=`$echo "X$lastarg" | $Xsed -e "s/^ //"`
+
+ # Add the arguments to base_compile.
+ if test -z "$base_compile"; then
+ base_compile="$lastarg"
+ else
+ base_compile="$base_compile $lastarg"
+ fi
+ continue
+ ;;
+ esac
+
+ case $user_target in
+ next)
+ # The next one is the -o target name
+ user_target=yes
+ continue
+ ;;
+ yes)
+ # We got the output file
+ user_target=set
+ libobj="$arg"
+ continue
+ ;;
+ esac
+
+ # Accept the current argument as the source file.
+ lastarg="$srcfile"
+ srcfile="$arg"
+
+ # Aesthetically quote the previous argument.
+
+ # Backslashify any backslashes, double quotes, and dollar signs.
+ # These are the only characters that are still specially
+ # interpreted inside of double-quoted scrings.
+ lastarg=`$echo "X$lastarg" | $Xsed -e "$sed_quote_subst"`
+
+ # Double-quote args containing other shell metacharacters.
+ # Many Bourne shells cannot handle close brackets correctly
+ # in scan sets, so we specify it separately.
+ case $lastarg in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ lastarg="\"$lastarg\""
+ ;;
+ esac
+
+ # Add the previous argument to base_compile.
+ if test -z "$base_compile"; then
+ base_compile="$lastarg"
+ else
+ base_compile="$base_compile $lastarg"
+ fi
+ done
+
+ case $user_target in
+ set)
+ ;;
+ no)
+ # Get the name of the library object.
+ libobj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%'`
+ ;;
+ *)
+ $echo "$modename: you must specify a target with \`-o'" 1>&2
+ exit 1
+ ;;
+ esac
+
+ # Recognize several different file suffixes.
+ # If the user specifies -o file.o, it is replaced with file.lo
+ xform='[cCFSfmso]'
+ case $libobj in
+ *.ada) xform=ada ;;
+ *.adb) xform=adb ;;
+ *.ads) xform=ads ;;
+ *.asm) xform=asm ;;
+ *.c++) xform=c++ ;;
+ *.cc) xform=cc ;;
+ *.cpp) xform=cpp ;;
+ *.cxx) xform=cxx ;;
+ *.f90) xform=f90 ;;
+ *.for) xform=for ;;
+ esac
+
+ libobj=`$echo "X$libobj" | $Xsed -e "s/\.$xform$/.lo/"`
+
+ case $libobj in
+ *.lo) obj=`$echo "X$libobj" | $Xsed -e "$lo2o"` ;;
+ *)
+ $echo "$modename: cannot determine name of library object from \`$libobj'" 1>&2
+ exit 1
+ ;;
+ esac
+
+ if test -z "$base_compile"; then
+ $echo "$modename: you must specify a compilation command" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ # Delete any leftover library objects.
+ if test "$build_old_libs" = yes; then
+ removelist="$obj $libobj"
+ else
+ removelist="$libobj"
+ fi
+
+ $run $rm $removelist
+ trap "$run $rm $removelist; exit 1" 1 2 15
+
+ # On Cygwin there's no "real" PIC flag so we must build both object types
+ case $host_os in
+ cygwin* | mingw* | pw32* | os2*)
+ pic_mode=default
+ ;;
+ esac
+ if test $pic_mode = no && test "$deplibs_check_method" != pass_all; then
+ # non-PIC code in shared libraries is not supported
+ pic_mode=default
+ fi
+
+ # Calculate the filename of the output object if compiler does
+ # not support -o with -c
+ if test "$compiler_c_o" = no; then
+ output_obj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\.[^.]*$%%'`.${objext}
+ lockfile="$output_obj.lock"
+ removelist="$removelist $output_obj $lockfile"
+ trap "$run $rm $removelist; exit 1" 1 2 15
+ else
+ need_locks=no
+ lockfile=
+ fi
+
+ # Lock this critical section if it is needed
+ # We use this script file to make the link, it avoids creating a new file
+ if test "$need_locks" = yes; then
+ until $run ln "$0" "$lockfile" 2>/dev/null; do
+ $show "Waiting for $lockfile to be removed"
+ sleep 2
+ done
+ elif test "$need_locks" = warn; then
+ if test -f "$lockfile"; then
+ echo "\
+*** ERROR, $lockfile exists and contains:
+`cat $lockfile 2>/dev/null`
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support \`-c' and \`-o' together. If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+ $run $rm $removelist
+ exit 1
+ fi
+ echo $srcfile > "$lockfile"
+ fi
+
+ if test -n "$fix_srcfile_path"; then
+ eval srcfile=\"$fix_srcfile_path\"
+ fi
+
+ # Only build a PIC object if we are building libtool libraries.
+ if test "$build_libtool_libs" = yes; then
+ # Without this assignment, base_compile gets emptied.
+ fbsd_hideous_sh_bug=$base_compile
+
+ if test "$pic_mode" != no; then
+ # All platforms use -DPIC, to notify preprocessed assembler code.
+ command="$base_compile $srcfile $pic_flag -DPIC"
+ else
+ # Don't build PIC code
+ command="$base_compile $srcfile"
+ fi
+ if test "$build_old_libs" = yes; then
+ lo_libobj="$libobj"
+ dir=`$echo "X$libobj" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$dir" = "X$libobj"; then
+ dir="$objdir"
+ else
+ dir="$dir/$objdir"
+ fi
+ libobj="$dir/"`$echo "X$libobj" | $Xsed -e 's%^.*/%%'`
+
+ if test -d "$dir"; then
+ $show "$rm $libobj"
+ $run $rm $libobj
+ else
+ $show "$mkdir $dir"
+ $run $mkdir $dir
+ status=$?
+ if test $status -ne 0 && test ! -d $dir; then
+ exit $status
+ fi
+ fi
+ fi
+ if test "$compiler_o_lo" = yes; then
+ output_obj="$libobj"
+ command="$command -o $output_obj"
+ elif test "$compiler_c_o" = yes; then
+ output_obj="$obj"
+ command="$command -o $output_obj"
+ fi
+
+ $run $rm "$output_obj"
+ $show "$command"
+ if $run eval "$command"; then :
+ else
+ test -n "$output_obj" && $run $rm $removelist
+ exit 1
+ fi
+
+ if test "$need_locks" = warn &&
+ test x"`cat $lockfile 2>/dev/null`" != x"$srcfile"; then
+ echo "\
+*** ERROR, $lockfile contains:
+`cat $lockfile 2>/dev/null`
+
+but it should contain:
+$srcfile
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support \`-c' and \`-o' together. If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+ $run $rm $removelist
+ exit 1
+ fi
+
+ # Just move the object if needed, then go on to compile the next one
+ if test x"$output_obj" != x"$libobj"; then
+ $show "$mv $output_obj $libobj"
+ if $run $mv $output_obj $libobj; then :
+ else
+ error=$?
+ $run $rm $removelist
+ exit $error
+ fi
+ fi
+
+ # If we have no pic_flag, then copy the object into place and finish.
+ if (test -z "$pic_flag" || test "$pic_mode" != default) &&
+ test "$build_old_libs" = yes; then
+ # Rename the .lo from within objdir to obj
+ if test -f $obj; then
+ $show $rm $obj
+ $run $rm $obj
+ fi
+
+ $show "$mv $libobj $obj"
+ if $run $mv $libobj $obj; then :
+ else
+ error=$?
+ $run $rm $removelist
+ exit $error
+ fi
+
+ xdir=`$echo "X$obj" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$xdir" = "X$obj"; then
+ xdir="."
+ else
+ xdir="$xdir"
+ fi
+ baseobj=`$echo "X$obj" | $Xsed -e "s%.*/%%"`
+ libobj=`$echo "X$baseobj" | $Xsed -e "$o2lo"`
+ # Now arrange that obj and lo_libobj become the same file
+ $show "(cd $xdir && $LN_S $baseobj $libobj)"
+ if $run eval '(cd $xdir && $LN_S $baseobj $libobj)'; then
+ # Unlock the critical section if it was locked
+ if test "$need_locks" != no; then
+ $run $rm "$lockfile"
+ fi
+ exit 0
+ else
+ error=$?
+ $run $rm $removelist
+ exit $error
+ fi
+ fi
+
+ # Allow error messages only from the first compilation.
+ suppress_output=' >/dev/null 2>&1'
+ fi
+
+ # Only build a position-dependent object if we build old libraries.
+ if test "$build_old_libs" = yes; then
+ if test "$pic_mode" != yes; then
+ # Don't build PIC code
+ command="$base_compile $srcfile"
+ else
+ # All platforms use -DPIC, to notify preprocessed assembler code.
+ command="$base_compile $srcfile $pic_flag -DPIC"
+ fi
+ if test "$compiler_c_o" = yes; then
+ command="$command -o $obj"
+ output_obj="$obj"
+ fi
+
+ # Suppress compiler output if we already did a PIC compilation.
+ command="$command$suppress_output"
+ $run $rm "$output_obj"
+ $show "$command"
+ if $run eval "$command"; then :
+ else
+ $run $rm $removelist
+ exit 1
+ fi
+
+ if test "$need_locks" = warn &&
+ test x"`cat $lockfile 2>/dev/null`" != x"$srcfile"; then
+ echo "\
+*** ERROR, $lockfile contains:
+`cat $lockfile 2>/dev/null`
+
+but it should contain:
+$srcfile
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support \`-c' and \`-o' together. If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+ $run $rm $removelist
+ exit 1
+ fi
+
+ # Just move the object if needed
+ if test x"$output_obj" != x"$obj"; then
+ $show "$mv $output_obj $obj"
+ if $run $mv $output_obj $obj; then :
+ else
+ error=$?
+ $run $rm $removelist
+ exit $error
+ fi
+ fi
+
+ # Create an invalid libtool object if no PIC, so that we do not
+ # accidentally link it into a program.
+ if test "$build_libtool_libs" != yes; then
+ $show "echo timestamp > $libobj"
+ $run eval "echo timestamp > \$libobj" || exit $?
+ else
+ # Move the .lo from within objdir
+ $show "$mv $libobj $lo_libobj"
+ if $run $mv $libobj $lo_libobj; then :
+ else
+ error=$?
+ $run $rm $removelist
+ exit $error
+ fi
+ fi
+ fi
+
+ # Unlock the critical section if it was locked
+ if test "$need_locks" != no; then
+ $run $rm "$lockfile"
+ fi
+
+ exit 0
+ ;;
+
+ # libtool link mode
+ link | relink)
+ modename="$modename: link"
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
+ # It is impossible to link a dll without this setting, and
+ # we shouldn't force the makefile maintainer to figure out
+ # which system we are compiling for in order to pass an extra
+ # flag for every libtool invokation.
+ # allow_undefined=no
+
+ # FIXME: Unfortunately, there are problems with the above when trying
+ # to make a dll which has undefined symbols, in which case not
+ # even a static library is built. For now, we need to specify
+ # -no-undefined on the libtool link line when we can be certain
+ # that all symbols are satisfied, otherwise we get a static library.
+ allow_undefined=yes
+ ;;
+ *)
+ allow_undefined=yes
+ ;;
+ esac
+ libtool_args="$nonopt"
+ compile_command="$nonopt"
+ finalize_command="$nonopt"
+
+ compile_rpath=
+ finalize_rpath=
+ compile_shlibpath=
+ finalize_shlibpath=
+ convenience=
+ old_convenience=
+ deplibs=
+ old_deplibs=
+ compiler_flags=
+ linker_flags=
+ dllsearchpath=
+ lib_search_path=`pwd`
+
+ avoid_version=no
+ dlfiles=
+ dlprefiles=
+ dlself=no
+ export_dynamic=no
+ export_symbols=
+ export_symbols_regex=
+ generated=
+ libobjs=
+ ltlibs=
+ module=no
+ no_install=no
+ objs=
+ prefer_static_libs=no
+ preload=no
+ prev=
+ prevarg=
+ release=
+ rpath=
+ xrpath=
+ perm_rpath=
+ temp_rpath=
+ thread_safe=no
+ vinfo=
+
+ # We need to know -static, to get the right output filenames.
+ for arg
+ do
+ case $arg in
+ -all-static | -static)
+ if test "X$arg" = "X-all-static"; then
+ if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then
+ $echo "$modename: warning: complete static linking is impossible in this configuration" 1>&2
+ fi
+ if test -n "$link_static_flag"; then
+ dlopen_self=$dlopen_self_static
+ fi
+ else
+ if test -z "$pic_flag" && test -n "$link_static_flag"; then
+ dlopen_self=$dlopen_self_static
+ fi
+ fi
+ build_libtool_libs=no
+ build_old_libs=yes
+ prefer_static_libs=yes
+ break
+ ;;
+ esac
+ done
+
+ # See if our shared archives depend on static archives.
+ test -n "$old_archive_from_new_cmds" && build_old_libs=yes
+
+ # Go through the arguments, transforming them on the way.
+ while test $# -gt 0; do
+ arg="$1"
+ shift
+ case $arg in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ qarg=\"`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`\" ### testsuite: skip nested quoting test
+ ;;
+ *) qarg=$arg ;;
+ esac
+ libtool_args="$libtool_args $qarg"
+
+ # If the previous option needs an argument, assign it.
+ if test -n "$prev"; then
+ case $prev in
+ output)
+ compile_command="$compile_command @OUTPUT@"
+ finalize_command="$finalize_command @OUTPUT@"
+ ;;
+ esac
+
+ case $prev in
+ dlfiles|dlprefiles)
+ if test "$preload" = no; then
+ # Add the symbol object into the linking commands.
+ compile_command="$compile_command @SYMFILE@"
+ finalize_command="$finalize_command @SYMFILE@"
+ preload=yes
+ fi
+ case $arg in
+ *.la | *.lo) ;; # We handle these cases below.
+ force)
+ if test "$dlself" = no; then
+ dlself=needless
+ export_dynamic=yes
+ fi
+ prev=
+ continue
+ ;;
+ self)
+ if test "$prev" = dlprefiles; then
+ dlself=yes
+ elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then
+ dlself=yes
+ else
+ dlself=needless
+ export_dynamic=yes
+ fi
+ prev=
+ continue
+ ;;
+ *)
+ if test "$prev" = dlfiles; then
+ dlfiles="$dlfiles $arg"
+ else
+ dlprefiles="$dlprefiles $arg"
+ fi
+ prev=
+ continue
+ ;;
+ esac
+ ;;
+ expsyms)
+ export_symbols="$arg"
+ if test ! -f "$arg"; then
+ $echo "$modename: symbol file \`$arg' does not exist"
+ exit 1
+ fi
+ prev=
+ continue
+ ;;
+ expsyms_regex)
+ export_symbols_regex="$arg"
+ prev=
+ continue
+ ;;
+ release)
+ release="-$arg"
+ prev=
+ continue
+ ;;
+ rpath | xrpath)
+ # We need an absolute path.
+ case $arg in
+ [\\/]* | [A-Za-z]:[\\/]*) ;;
+ *)
+ $echo "$modename: only absolute run-paths are allowed" 1>&2
+ exit 1
+ ;;
+ esac
+ if test "$prev" = rpath; then
+ case "$rpath " in
+ *" $arg "*) ;;
+ *) rpath="$rpath $arg" ;;
+ esac
+ else
+ case "$xrpath " in
+ *" $arg "*) ;;
+ *) xrpath="$xrpath $arg" ;;
+ esac
+ fi
+ prev=
+ continue
+ ;;
+ xcompiler)
+ compiler_flags="$compiler_flags $qarg"
+ prev=
+ compile_command="$compile_command $qarg"
+ finalize_command="$finalize_command $qarg"
+ continue
+ ;;
+ xlinker)
+ linker_flags="$linker_flags $qarg"
+ compiler_flags="$compiler_flags $wl$qarg"
+ prev=
+ compile_command="$compile_command $wl$qarg"
+ finalize_command="$finalize_command $wl$qarg"
+ continue
+ ;;
+ *)
+ eval "$prev=\"\$arg\""
+ prev=
+ continue
+ ;;
+ esac
+ fi # test -n $prev
+
+ prevarg="$arg"
+
+ case $arg in
+ -all-static)
+ if test -n "$link_static_flag"; then
+ compile_command="$compile_command $link_static_flag"
+ finalize_command="$finalize_command $link_static_flag"
+ fi
+ continue
+ ;;
+
+ -allow-undefined)
+ # FIXME: remove this flag sometime in the future.
+ $echo "$modename: \`-allow-undefined' is deprecated because it is the default" 1>&2
+ continue
+ ;;
+
+ -avoid-version)
+ avoid_version=yes
+ continue
+ ;;
+
+ -dlopen)
+ prev=dlfiles
+ continue
+ ;;
+
+ -dlpreopen)
+ prev=dlprefiles
+ continue
+ ;;
+
+ -export-dynamic)
+ export_dynamic=yes
+ continue
+ ;;
+
+ -export-symbols | -export-symbols-regex)
+ if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
+ $echo "$modename: more than one -exported-symbols argument is not allowed"
+ exit 1
+ fi
+ if test "X$arg" = "X-export-symbols"; then
+ prev=expsyms
+ else
+ prev=expsyms_regex
+ fi
+ continue
+ ;;
+
+ # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:*
+ # so, if we see these flags be careful not to treat them like -L
+ -L[A-Z][A-Z]*:*)
+ case $with_gcc/$host in
+ no/*-*-irix*)
+ compile_command="$compile_command $arg"
+ finalize_command="$finalize_command $arg"
+ ;;
+ esac
+ continue
+ ;;
+
+ -L*)
+ dir=`$echo "X$arg" | $Xsed -e 's/^-L//'`
+ # We need an absolute path.
+ case $dir in
+ [\\/]* | [A-Za-z]:[\\/]*) ;;
+ *)
+ absdir=`cd "$dir" && pwd`
+ if test -z "$absdir"; then
+ $echo "$modename: cannot determine absolute directory name of \`$dir'" 1>&2
+ exit 1
+ fi
+ dir="$absdir"
+ ;;
+ esac
+ case "$deplibs " in
+ *" -L$dir "*) ;;
+ *)
+ deplibs="$deplibs -L$dir"
+ lib_search_path="$lib_search_path $dir"
+ ;;
+ esac
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
+ case :$dllsearchpath: in
+ *":$dir:"*) ;;
+ *) dllsearchpath="$dllsearchpath:$dir";;
+ esac
+ ;;
+ esac
+ continue
+ ;;
+
+ -l*)
+ if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then
+ case $host in
+ *-*-cygwin* | *-*-pw32* | *-*-beos*)
+ # These systems don't actually have a C or math library (as such)
+ continue
+ ;;
+ *-*-mingw* | *-*-os2*)
+ # These systems don't actually have a C library (as such)
+ test "X$arg" = "X-lc" && continue
+ ;;
+ *-*-openbsd*)
+ # Do not include libc due to us having libc/libc_r.
+ test "X$arg" = "X-lc" && continue
+ ;;
+ esac
+ elif test "X$arg" = "X-lc_r"; then
+ case $host in
+ *-*-openbsd*)
+ # Do not include libc_r directly, use -pthread flag.
+ continue
+ ;;
+ esac
+ fi
+ deplibs="$deplibs $arg"
+ continue
+ ;;
+
+ -module)
+ module=yes
+ continue
+ ;;
+
+ #### Local change for Sleepycat's Berkeley DB [#6117]:
+ -jnimodule)
+ module=yes
+ jnimodule=yes
+ continue
+ ;;
+
+ -no-fast-install)
+ fast_install=no
+ continue
+ ;;
+
+ -no-install)
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
+ # The PATH hackery in wrapper scripts is required on Windows
+ # in order for the loader to find any dlls it needs.
+ $echo "$modename: warning: \`-no-install' is ignored for $host" 1>&2
+ $echo "$modename: warning: assuming \`-no-fast-install' instead" 1>&2
+ fast_install=no
+ ;;
+ *) no_install=yes ;;
+ esac
+ continue
+ ;;
+
+ -no-undefined)
+ allow_undefined=no
+ continue
+ ;;
+
+ -o) prev=output ;;
+
+ -release)
+ prev=release
+ continue
+ ;;
+
+ -rpath)
+ prev=rpath
+ continue
+ ;;
+
+ -R)
+ prev=xrpath
+ continue
+ ;;
+
+ -R*)
+ dir=`$echo "X$arg" | $Xsed -e 's/^-R//'`
+ # We need an absolute path.
+ case $dir in
+ [\\/]* | [A-Za-z]:[\\/]*) ;;
+ *)
+ $echo "$modename: only absolute run-paths are allowed" 1>&2
+ exit 1
+ ;;
+ esac
+ case "$xrpath " in
+ *" $dir "*) ;;
+ *) xrpath="$xrpath $dir" ;;
+ esac
+ continue
+ ;;
+
+ -static)
+ # The effects of -static are defined in a previous loop.
+ # We used to do the same as -all-static on platforms that
+ # didn't have a PIC flag, but the assumption that the effects
+ # would be equivalent was wrong. It would break on at least
+ # Digital Unix and AIX.
+ continue
+ ;;
+
+ -thread-safe)
+ thread_safe=yes
+ continue
+ ;;
+
+ -version-info)
+ prev=vinfo
+ continue
+ ;;
+
+ -Wc,*)
+ args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wc,//'`
+ arg=
+ save_ifs="$IFS"; IFS=','
+ for flag in $args; do
+ IFS="$save_ifs"
+ case $flag in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ flag="\"$flag\""
+ ;;
+ esac
+ arg="$arg $wl$flag"
+ compiler_flags="$compiler_flags $flag"
+ done
+ IFS="$save_ifs"
+ arg=`$echo "X$arg" | $Xsed -e "s/^ //"`
+ ;;
+
+ -Wl,*)
+ args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wl,//'`
+ arg=
+ save_ifs="$IFS"; IFS=','
+ for flag in $args; do
+ IFS="$save_ifs"
+ case $flag in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ flag="\"$flag\""
+ ;;
+ esac
+ arg="$arg $wl$flag"
+ compiler_flags="$compiler_flags $wl$flag"
+ linker_flags="$linker_flags $flag"
+ done
+ IFS="$save_ifs"
+ arg=`$echo "X$arg" | $Xsed -e "s/^ //"`
+ ;;
+
+ -Xcompiler)
+ prev=xcompiler
+ continue
+ ;;
+
+ -Xlinker)
+ prev=xlinker
+ continue
+ ;;
+
+ # Some other compiler flag.
+ -* | +*)
+ # Unknown arguments in both finalize_command and compile_command need
+ # to be aesthetically quoted because they are evaled later.
+ arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
+ case $arg in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ arg="\"$arg\""
+ ;;
+ esac
+ ;;
+
+ *.lo | *.$objext)
+ # A library or standard object.
+ if test "$prev" = dlfiles; then
+ # This file was specified with -dlopen.
+ if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
+ dlfiles="$dlfiles $arg"
+ prev=
+ continue
+ else
+ # If libtool objects are unsupported, then we need to preload.
+ prev=dlprefiles
+ fi
+ fi
+
+ if test "$prev" = dlprefiles; then
+ # Preload the old-style object.
+ dlprefiles="$dlprefiles "`$echo "X$arg" | $Xsed -e "$lo2o"`
+ prev=
+ else
+ case $arg in
+ *.lo) libobjs="$libobjs $arg" ;;
+ *) objs="$objs $arg" ;;
+ esac
+ fi
+ ;;
+
+ *.$libext)
+ # An archive.
+ deplibs="$deplibs $arg"
+ old_deplibs="$old_deplibs $arg"
+ continue
+ ;;
+
+ *.la)
+ # A libtool-controlled library.
+
+ if test "$prev" = dlfiles; then
+ # This library was specified with -dlopen.
+ dlfiles="$dlfiles $arg"
+ prev=
+ elif test "$prev" = dlprefiles; then
+ # The library was specified with -dlpreopen.
+ dlprefiles="$dlprefiles $arg"
+ prev=
+ else
+ deplibs="$deplibs $arg"
+ fi
+ continue
+ ;;
+
+ # Some other compiler argument.
+ *)
+ # Unknown arguments in both finalize_command and compile_command need
+ # to be aesthetically quoted because they are evaled later.
+ arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
+ case $arg in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ arg="\"$arg\""
+ ;;
+ esac
+ ;;
+ esac # arg
+
+ # Now actually substitute the argument into the commands.
+ if test -n "$arg"; then
+ compile_command="$compile_command $arg"
+ finalize_command="$finalize_command $arg"
+ fi
+ done # argument parsing loop
+
+ if test -n "$prev"; then
+ $echo "$modename: the \`$prevarg' option requires an argument" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then
+ eval arg=\"$export_dynamic_flag_spec\"
+ compile_command="$compile_command $arg"
+ finalize_command="$finalize_command $arg"
+ fi
+
+ # calculate the name of the file, without its directory
+ outputname=`$echo "X$output" | $Xsed -e 's%^.*/%%'`
+ libobjs_save="$libobjs"
+
+ if test -n "$shlibpath_var"; then
+ # get the directories listed in $shlibpath_var
+ eval shlib_search_path=\`\$echo \"X\${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\`
+ else
+ shlib_search_path=
+ fi
+ eval sys_lib_search_path=\"$sys_lib_search_path_spec\"
+ eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\"
+
+ output_objdir=`$echo "X$output" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$output_objdir" = "X$output"; then
+ output_objdir="$objdir"
+ else
+ output_objdir="$output_objdir/$objdir"
+ fi
+ # Create the object directory.
+ if test ! -d $output_objdir; then
+ $show "$mkdir $output_objdir"
+ $run $mkdir $output_objdir
+ status=$?
+ if test $status -ne 0 && test ! -d $output_objdir; then
+ exit $status
+ fi
+ fi
+
+ # Determine the type of output
+ case $output in
+ "")
+ $echo "$modename: you must specify an output file" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ ;;
+ *.$libext) linkmode=oldlib ;;
+ *.lo | *.$objext) linkmode=obj ;;
+ *.la) linkmode=lib ;;
+ *) linkmode=prog ;; # Anything else should be a program.
+ esac
+
+ specialdeplibs=
+ libs=
+ # Find all interdependent deplibs by searching for libraries
+ # that are linked more than once (e.g. -la -lb -la)
+ for deplib in $deplibs; do
+ case "$libs " in
+ *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
+ esac
+ libs="$libs $deplib"
+ done
+ deplibs=
+ newdependency_libs=
+ newlib_search_path=
+ need_relink=no # whether we're linking any uninstalled libtool libraries
+ notinst_deplibs= # not-installed libtool libraries
+ notinst_path= # paths that contain not-installed libtool libraries
+ case $linkmode in
+ lib)
+ passes="conv link"
+ for file in $dlfiles $dlprefiles; do
+ case $file in
+ *.la) ;;
+ *)
+ $echo "$modename: libraries can \`-dlopen' only libtool libraries: $file" 1>&2
+ exit 1
+ ;;
+ esac
+ done
+ ;;
+ prog)
+ compile_deplibs=
+ finalize_deplibs=
+ alldeplibs=no
+ newdlfiles=
+ newdlprefiles=
+ passes="conv scan dlopen dlpreopen link"
+ ;;
+ *) passes="conv"
+ ;;
+ esac
+ for pass in $passes; do
+ if test $linkmode = prog; then
+ # Determine which files to process
+ case $pass in
+ dlopen)
+ libs="$dlfiles"
+ save_deplibs="$deplibs" # Collect dlpreopened libraries
+ deplibs=
+ ;;
+ dlpreopen) libs="$dlprefiles" ;;
+ link) libs="$deplibs %DEPLIBS% $dependency_libs" ;;
+ esac
+ fi
+ for deplib in $libs; do
+ lib=
+ found=no
+ case $deplib in
+ -l*)
+ if test $linkmode = oldlib && test $linkmode = obj; then
+ $echo "$modename: warning: \`-l' is ignored for archives/objects: $deplib" 1>&2
+ continue
+ fi
+ if test $pass = conv; then
+ deplibs="$deplib $deplibs"
+ continue
+ fi
+ name=`$echo "X$deplib" | $Xsed -e 's/^-l//'`
+ for searchdir in $newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path; do
+ # Search the libtool library
+ lib="$searchdir/lib${name}.la"
+ if test -f "$lib"; then
+ found=yes
+ break
+ fi
+ done
+ if test "$found" != yes; then
+ # deplib doesn't seem to be a libtool library
+ if test "$linkmode,$pass" = "prog,link"; then
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ else
+ deplibs="$deplib $deplibs"
+ test $linkmode = lib && newdependency_libs="$deplib $newdependency_libs"
+ fi
+ continue
+ fi
+ ;; # -l
+ -L*)
+ case $linkmode in
+ lib)
+ deplibs="$deplib $deplibs"
+ test $pass = conv && continue
+ newdependency_libs="$deplib $newdependency_libs"
+ newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`
+ ;;
+ prog)
+ if test $pass = conv; then
+ deplibs="$deplib $deplibs"
+ continue
+ fi
+ if test $pass = scan; then
+ deplibs="$deplib $deplibs"
+ newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`
+ else
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ fi
+ ;;
+ *)
+ $echo "$modename: warning: \`-L' is ignored for archives/objects: $deplib" 1>&2
+ ;;
+ esac # linkmode
+ continue
+ ;; # -L
+ -R*)
+ if test $pass = link; then
+ dir=`$echo "X$deplib" | $Xsed -e 's/^-R//'`
+ # Make sure the xrpath contains only unique directories.
+ case "$xrpath " in
+ *" $dir "*) ;;
+ *) xrpath="$xrpath $dir" ;;
+ esac
+ fi
+ deplibs="$deplib $deplibs"
+ continue
+ ;;
+ *.la) lib="$deplib" ;;
+ *.$libext)
+ if test $pass = conv; then
+ deplibs="$deplib $deplibs"
+ continue
+ fi
+ case $linkmode in
+ lib)
+ if test "$deplibs_check_method" != pass_all; then
+ echo
+ echo "*** Warning: This library needs some functionality provided by $deplib."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have."
+ else
+ echo
+ echo "*** Warning: Linking the shared library $output against the"
+ echo "*** static library $deplib is not portable!"
+ deplibs="$deplib $deplibs"
+ fi
+ continue
+ ;;
+ prog)
+ if test $pass != link; then
+ deplibs="$deplib $deplibs"
+ else
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ fi
+ continue
+ ;;
+ esac # linkmode
+ ;; # *.$libext
+ *.lo | *.$objext)
+ if test $pass = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then
+ # If there is no dlopen support or we're linking statically,
+ # we need to preload.
+ newdlprefiles="$newdlprefiles $deplib"
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ else
+ newdlfiles="$newdlfiles $deplib"
+ fi
+ continue
+ ;;
+ %DEPLIBS%)
+ alldeplibs=yes
+ continue
+ ;;
+ esac # case $deplib
+ if test $found = yes || test -f "$lib"; then :
+ else
+ $echo "$modename: cannot find the library \`$lib'" 1>&2
+ exit 1
+ fi
+
+ # Check to see that this really is a libtool archive.
+ if (sed -e '2q' $lib | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
+ else
+ $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
+ exit 1
+ fi
+
+ ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'`
+ test "X$ladir" = "X$lib" && ladir="."
+
+ dlname=
+ dlopen=
+ dlpreopen=
+ libdir=
+ library_names=
+ old_library=
+ # If the library was installed with an old release of libtool,
+ # it will not redefine variable installed.
+ installed=yes
+
+ # Read the .la file
+ case $lib in
+ */* | *\\*) . $lib ;;
+ *) . ./$lib ;;
+ esac
+
+ if test "$linkmode,$pass" = "lib,link" ||
+ test "$linkmode,$pass" = "prog,scan" ||
+ { test $linkmode = oldlib && test $linkmode = obj; }; then
+ # Add dl[pre]opened files of deplib
+ test -n "$dlopen" && dlfiles="$dlfiles $dlopen"
+ test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen"
+ fi
+
+ if test $pass = conv; then
+ # Only check for convenience libraries
+ deplibs="$lib $deplibs"
+ if test -z "$libdir"; then
+ if test -z "$old_library"; then
+ $echo "$modename: cannot find name of link library for \`$lib'" 1>&2
+ exit 1
+ fi
+ # It is a libtool convenience library, so add in its objects.
+ convenience="$convenience $ladir/$objdir/$old_library"
+ old_convenience="$old_convenience $ladir/$objdir/$old_library"
+ tmp_libs=
+ for deplib in $dependency_libs; do
+ deplibs="$deplib $deplibs"
+ case "$tmp_libs " in
+ *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
+ esac
+ tmp_libs="$tmp_libs $deplib"
+ done
+ elif test $linkmode != prog && test $linkmode != lib; then
+ $echo "$modename: \`$lib' is not a convenience library" 1>&2
+ exit 1
+ fi
+ continue
+ fi # $pass = conv
+
+ # Get the name of the library we link against.
+ linklib=
+ for l in $old_library $library_names; do
+ linklib="$l"
+ done
+ if test -z "$linklib"; then
+ $echo "$modename: cannot find name of link library for \`$lib'" 1>&2
+ exit 1
+ fi
+
+ # This library was specified with -dlopen.
+ if test $pass = dlopen; then
+ if test -z "$libdir"; then
+ $echo "$modename: cannot -dlopen a convenience library: \`$lib'" 1>&2
+ exit 1
+ fi
+ if test -z "$dlname" || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then
+ # If there is no dlname, no dlopen support or we're linking
+ # statically, we need to preload.
+ dlprefiles="$dlprefiles $lib"
+ else
+ newdlfiles="$newdlfiles $lib"
+ fi
+ continue
+ fi # $pass = dlopen
+
+ # We need an absolute path.
+ case $ladir in
+ [\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;;
+ *)
+ abs_ladir=`cd "$ladir" && pwd`
+ if test -z "$abs_ladir"; then
+ $echo "$modename: warning: cannot determine absolute directory name of \`$ladir'" 1>&2
+ $echo "$modename: passing it literally to the linker, although it might fail" 1>&2
+ abs_ladir="$ladir"
+ fi
+ ;;
+ esac
+ laname=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
+
+ # Find the relevant object directory and library name.
+ if test "X$installed" = Xyes; then
+ if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then
+ $echo "$modename: warning: library \`$lib' was moved." 1>&2
+ dir="$ladir"
+ absdir="$abs_ladir"
+ libdir="$abs_ladir"
+ else
+ dir="$libdir"
+ absdir="$libdir"
+ fi
+ else
+ dir="$ladir/$objdir"
+ absdir="$abs_ladir/$objdir"
+ # Remove this search path later
+ notinst_path="$notinst_path $abs_ladir"
+ fi # $installed = yes
+ name=`$echo "X$laname" | $Xsed -e 's/\.la$//' -e 's/^lib//'`
+
+ # This library was specified with -dlpreopen.
+ if test $pass = dlpreopen; then
+ if test -z "$libdir"; then
+ $echo "$modename: cannot -dlpreopen a convenience library: \`$lib'" 1>&2
+ exit 1
+ fi
+ # Prefer using a static library (so that no silly _DYNAMIC symbols
+ # are required to link).
+ if test -n "$old_library"; then
+ newdlprefiles="$newdlprefiles $dir/$old_library"
+ # Otherwise, use the dlname, so that lt_dlopen finds it.
+ elif test -n "$dlname"; then
+ newdlprefiles="$newdlprefiles $dir/$dlname"
+ else
+ newdlprefiles="$newdlprefiles $dir/$linklib"
+ fi
+ fi # $pass = dlpreopen
+
+ if test -z "$libdir"; then
+ # Link the convenience library
+ if test $linkmode = lib; then
+ deplibs="$dir/$old_library $deplibs"
+ elif test "$linkmode,$pass" = "prog,link"; then
+ compile_deplibs="$dir/$old_library $compile_deplibs"
+ finalize_deplibs="$dir/$old_library $finalize_deplibs"
+ else
+ deplibs="$lib $deplibs"
+ fi
+ continue
+ fi
+
+ if test $linkmode = prog && test $pass != link; then
+ newlib_search_path="$newlib_search_path $ladir"
+ deplibs="$lib $deplibs"
+
+ linkalldeplibs=no
+ if test "$link_all_deplibs" != no || test -z "$library_names" ||
+ test "$build_libtool_libs" = no; then
+ linkalldeplibs=yes
+ fi
+
+ tmp_libs=
+ for deplib in $dependency_libs; do
+ case $deplib in
+ -L*) newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`;; ### testsuite: skip nested quoting test
+ esac
+ # Need to link against all dependency_libs?
+ if test $linkalldeplibs = yes; then
+ deplibs="$deplib $deplibs"
+ else
+ # Need to hardcode shared library paths
+ # or/and link against static libraries
+ newdependency_libs="$deplib $newdependency_libs"
+ fi
+ case "$tmp_libs " in
+ *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
+ esac
+ tmp_libs="$tmp_libs $deplib"
+ done # for deplib
+ continue
+ fi # $linkmode = prog...
+
+ link_static=no # Whether the deplib will be linked statically
+ if test -n "$library_names" &&
+ { test "$prefer_static_libs" = no || test -z "$old_library"; }; then
+ # Link against this shared library
+
+ if test "$linkmode,$pass" = "prog,link" ||
+ { test $linkmode = lib && test $hardcode_into_libs = yes; }; then
+ # Hardcode the library path.
+ # Skip directories that are in the system default run-time
+ # search path.
+ case " $sys_lib_dlsearch_path " in
+ *" $absdir "*) ;;
+ *)
+ case "$compile_rpath " in
+ *" $absdir "*) ;;
+ *) compile_rpath="$compile_rpath $absdir"
+ esac
+ ;;
+ esac
+ case " $sys_lib_dlsearch_path " in
+ *" $libdir "*) ;;
+ *)
+ case "$finalize_rpath " in
+ *" $libdir "*) ;;
+ *) finalize_rpath="$finalize_rpath $libdir"
+ esac
+ ;;
+ esac
+ if test $linkmode = prog; then
+ # We need to hardcode the library path
+ if test -n "$shlibpath_var"; then
+ # Make sure the rpath contains only unique directories.
+ case "$temp_rpath " in
+ *" $dir "*) ;;
+ *" $absdir "*) ;;
+ *) temp_rpath="$temp_rpath $dir" ;;
+ esac
+ fi
+ fi
+ fi # $linkmode,$pass = prog,link...
+
+ if test "$alldeplibs" = yes &&
+ { test "$deplibs_check_method" = pass_all ||
+ { test "$build_libtool_libs" = yes &&
+ test -n "$library_names"; }; }; then
+ # We only need to search for static libraries
+ continue
+ fi
+
+ if test "$installed" = no; then
+ notinst_deplibs="$notinst_deplibs $lib"
+ need_relink=yes
+ fi
+
+ if test -n "$old_archive_from_expsyms_cmds"; then
+ # figure out the soname
+ set dummy $library_names
+ realname="$2"
+ shift; shift
+ libname=`eval \\$echo \"$libname_spec\"`
+ # use dlname if we got it. it's perfectly good, no?
+ if test -n "$dlname"; then
+ soname="$dlname"
+ elif test -n "$soname_spec"; then
+ # bleh windows
+ case $host in
+ *cygwin*)
+ major=`expr $current - $age`
+ versuffix="-$major"
+ ;;
+ esac
+ eval soname=\"$soname_spec\"
+ else
+ soname="$realname"
+ fi
+
+ # Make a new name for the extract_expsyms_cmds to use
+ soroot="$soname"
+ soname=`echo $soroot | sed -e 's/^.*\///'`
+ newlib="libimp-`echo $soname | sed 's/^lib//;s/\.dll$//'`.a"
+
+ # If the library has no export list, then create one now
+ if test -f "$output_objdir/$soname-def"; then :
+ else
+ $show "extracting exported symbol list from \`$soname'"
+ save_ifs="$IFS"; IFS='~'
+ eval cmds=\"$extract_expsyms_cmds\"
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+ fi
+
+ # Create $newlib
+ if test -f "$output_objdir/$newlib"; then :; else
+ $show "generating import library for \`$soname'"
+ save_ifs="$IFS"; IFS='~'
+ eval cmds=\"$old_archive_from_expsyms_cmds\"
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+ fi
+ # make sure the library variables are pointing to the new library
+ dir=$output_objdir
+ linklib=$newlib
+ fi # test -n $old_archive_from_expsyms_cmds
+
+ if test $linkmode = prog || test "$mode" != relink; then
+ add_shlibpath=
+ add_dir=
+ add=
+ lib_linked=yes
+ case $hardcode_action in
+ immediate | unsupported)
+ if test "$hardcode_direct" = no; then
+ add="$dir/$linklib"
+ elif test "$hardcode_minus_L" = no; then
+ case $host in
+ *-*-sunos*) add_shlibpath="$dir" ;;
+ esac
+ add_dir="-L$dir"
+ add="-l$name"
+ elif test "$hardcode_shlibpath_var" = no; then
+ add_shlibpath="$dir"
+ add="-l$name"
+ else
+ lib_linked=no
+ fi
+ ;;
+ relink)
+ if test "$hardcode_direct" = yes; then
+ add="$dir/$linklib"
+ elif test "$hardcode_minus_L" = yes; then
+ add_dir="-L$dir"
+ add="-l$name"
+ elif test "$hardcode_shlibpath_var" = yes; then
+ add_shlibpath="$dir"
+ add="-l$name"
+ else
+ lib_linked=no
+ fi
+ ;;
+ *) lib_linked=no ;;
+ esac
+
+ if test "$lib_linked" != yes; then
+ $echo "$modename: configuration error: unsupported hardcode properties"
+ exit 1
+ fi
+
+ if test -n "$add_shlibpath"; then
+ case :$compile_shlibpath: in
+ *":$add_shlibpath:"*) ;;
+ *) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;;
+ esac
+ fi
+ if test $linkmode = prog; then
+ test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs"
+ test -n "$add" && compile_deplibs="$add $compile_deplibs"
+ else
+ test -n "$add_dir" && deplibs="$add_dir $deplibs"
+ test -n "$add" && deplibs="$add $deplibs"
+ if test "$hardcode_direct" != yes && \
+ test "$hardcode_minus_L" != yes && \
+ test "$hardcode_shlibpath_var" = yes; then
+ case :$finalize_shlibpath: in
+ *":$libdir:"*) ;;
+ *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
+ esac
+ fi
+ fi
+ fi
+
+ if test $linkmode = prog || test "$mode" = relink; then
+ add_shlibpath=
+ add_dir=
+ add=
+ # Finalize command for both is simple: just hardcode it.
+ if test "$hardcode_direct" = yes; then
+ add="$libdir/$linklib"
+ elif test "$hardcode_minus_L" = yes; then
+ add_dir="-L$libdir"
+ add="-l$name"
+ elif test "$hardcode_shlibpath_var" = yes; then
+ case :$finalize_shlibpath: in
+ *":$libdir:"*) ;;
+ *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
+ esac
+ add="-l$name"
+ else
+ # We cannot seem to hardcode it, guess we'll fake it.
+ add_dir="-L$libdir"
+ add="-l$name"
+ fi
+
+ if test $linkmode = prog; then
+ test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs"
+ test -n "$add" && finalize_deplibs="$add $finalize_deplibs"
+ else
+ test -n "$add_dir" && deplibs="$add_dir $deplibs"
+ test -n "$add" && deplibs="$add $deplibs"
+ fi
+ fi
+ elif test $linkmode = prog; then
+ if test "$alldeplibs" = yes &&
+ { test "$deplibs_check_method" = pass_all ||
+ { test "$build_libtool_libs" = yes &&
+ test -n "$library_names"; }; }; then
+ # We only need to search for static libraries
+ continue
+ fi
+
+ # Try to link the static library
+ # Here we assume that one of hardcode_direct or hardcode_minus_L
+ # is not unsupported. This is valid on all known static and
+ # shared platforms.
+ if test "$hardcode_direct" != unsupported; then
+ test -n "$old_library" && linklib="$old_library"
+ compile_deplibs="$dir/$linklib $compile_deplibs"
+ finalize_deplibs="$dir/$linklib $finalize_deplibs"
+ else
+ compile_deplibs="-l$name -L$dir $compile_deplibs"
+ finalize_deplibs="-l$name -L$dir $finalize_deplibs"
+ fi
+ elif test "$build_libtool_libs" = yes; then
+ # Not a shared library
+ if test "$deplibs_check_method" != pass_all; then
+ # We're trying link a shared library against a static one
+ # but the system doesn't support it.
+
+ # Just print a warning and add the library to dependency_libs so
+ # that the program can be linked against the static library.
+ echo
+ echo "*** Warning: This library needs some functionality provided by $lib."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have."
+ if test "$module" = yes; then
+ echo "*** Therefore, libtool will create a static module, that should work "
+ echo "*** as long as the dlopening application is linked with the -dlopen flag."
+ if test -z "$global_symbol_pipe"; then
+ echo
+ echo "*** However, this would only work if libtool was able to extract symbol"
+ echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
+ echo "*** not find such a program. So, this module is probably useless."
+ echo "*** \`nm' from GNU binutils and a full rebuild may help."
+ fi
+ if test "$build_old_libs" = no; then
+ build_libtool_libs=module
+ build_old_libs=yes
+ else
+ build_libtool_libs=no
+ fi
+ fi
+ else
+ convenience="$convenience $dir/$old_library"
+ old_convenience="$old_convenience $dir/$old_library"
+ deplibs="$dir/$old_library $deplibs"
+ link_static=yes
+ fi
+ fi # link shared/static library?
+
+ if test $linkmode = lib; then
+ if test -n "$dependency_libs" &&
+ { test $hardcode_into_libs != yes || test $build_old_libs = yes ||
+ test $link_static = yes; }; then
+ # Extract -R from dependency_libs
+ temp_deplibs=
+ for libdir in $dependency_libs; do
+ case $libdir in
+ -R*) temp_xrpath=`$echo "X$libdir" | $Xsed -e 's/^-R//'`
+ case " $xrpath " in
+ *" $temp_xrpath "*) ;;
+ *) xrpath="$xrpath $temp_xrpath";;
+ esac;;
+ *) temp_deplibs="$temp_deplibs $libdir";;
+ esac
+ done
+ dependency_libs="$temp_deplibs"
+ fi
+
+ newlib_search_path="$newlib_search_path $absdir"
+ # Link against this library
+ test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs"
+ # ... and its dependency_libs
+ tmp_libs=
+ for deplib in $dependency_libs; do
+ newdependency_libs="$deplib $newdependency_libs"
+ case "$tmp_libs " in
+ *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
+ esac
+ tmp_libs="$tmp_libs $deplib"
+ done
+
+ if test $link_all_deplibs != no; then
+ # Add the search paths of all dependency libraries
+ for deplib in $dependency_libs; do
+ case $deplib in
+ -L*) path="$deplib" ;;
+ *.la)
+ dir=`$echo "X$deplib" | $Xsed -e 's%/[^/]*$%%'`
+ test "X$dir" = "X$deplib" && dir="."
+ # We need an absolute path.
+ case $dir in
+ [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;;
+ *)
+ absdir=`cd "$dir" && pwd`
+ if test -z "$absdir"; then
+ $echo "$modename: warning: cannot determine absolute directory name of \`$dir'" 1>&2
+ absdir="$dir"
+ fi
+ ;;
+ esac
+ if grep "^installed=no" $deplib > /dev/null; then
+ path="-L$absdir/$objdir"
+ else
+ eval libdir=`sed -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
+ if test -z "$libdir"; then
+ $echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2
+ exit 1
+ fi
+ if test "$absdir" != "$libdir"; then
+ $echo "$modename: warning: \`$deplib' seems to be moved" 1>&2
+ fi
+ path="-L$absdir"
+ fi
+ ;;
+ *) continue ;;
+ esac
+ case " $deplibs " in
+ *" $path "*) ;;
+ *) deplibs="$deplibs $path" ;;
+ esac
+ done
+ fi # link_all_deplibs != no
+ fi # linkmode = lib
+ done # for deplib in $libs
+ if test $pass = dlpreopen; then
+ # Link the dlpreopened libraries before other libraries
+ for deplib in $save_deplibs; do
+ deplibs="$deplib $deplibs"
+ done
+ fi
+ if test $pass != dlopen; then
+ test $pass != scan && dependency_libs="$newdependency_libs"
+ if test $pass != conv; then
+ # Make sure lib_search_path contains only unique directories.
+ lib_search_path=
+ for dir in $newlib_search_path; do
+ case "$lib_search_path " in
+ *" $dir "*) ;;
+ *) lib_search_path="$lib_search_path $dir" ;;
+ esac
+ done
+ newlib_search_path=
+ fi
+
+ if test "$linkmode,$pass" != "prog,link"; then
+ vars="deplibs"
+ else
+ vars="compile_deplibs finalize_deplibs"
+ fi
+ for var in $vars dependency_libs; do
+ # Add libraries to $var in reverse order
+ eval tmp_libs=\"\$$var\"
+ new_libs=
+ for deplib in $tmp_libs; do
+ case $deplib in
+ -L*) new_libs="$deplib $new_libs" ;;
+ *)
+ case " $specialdeplibs " in
+ *" $deplib "*) new_libs="$deplib $new_libs" ;;
+ *)
+ case " $new_libs " in
+ *" $deplib "*) ;;
+ *) new_libs="$deplib $new_libs" ;;
+ esac
+ ;;
+ esac
+ ;;
+ esac
+ done
+ tmp_libs=
+ for deplib in $new_libs; do
+ case $deplib in
+ -L*)
+ case " $tmp_libs " in
+ *" $deplib "*) ;;
+ *) tmp_libs="$tmp_libs $deplib" ;;
+ esac
+ ;;
+ *) tmp_libs="$tmp_libs $deplib" ;;
+ esac
+ done
+ eval $var=\"$tmp_libs\"
+ done # for var
+ fi
+ if test "$pass" = "conv" &&
+ { test "$linkmode" = "lib" || test "$linkmode" = "prog"; }; then
+ libs="$deplibs" # reset libs
+ deplibs=
+ fi
+ done # for pass
+ if test $linkmode = prog; then
+ dlfiles="$newdlfiles"
+ dlprefiles="$newdlprefiles"
+ fi
+
+ case $linkmode in
+ oldlib)
+ if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
+ $echo "$modename: warning: \`-dlopen' is ignored for archives" 1>&2
+ fi
+
+ if test -n "$rpath"; then
+ $echo "$modename: warning: \`-rpath' is ignored for archives" 1>&2
+ fi
+
+ if test -n "$xrpath"; then
+ $echo "$modename: warning: \`-R' is ignored for archives" 1>&2
+ fi
+
+ if test -n "$vinfo"; then
+ $echo "$modename: warning: \`-version-info' is ignored for archives" 1>&2
+ fi
+
+ if test -n "$release"; then
+ $echo "$modename: warning: \`-release' is ignored for archives" 1>&2
+ fi
+
+ if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
+ $echo "$modename: warning: \`-export-symbols' is ignored for archives" 1>&2
+ fi
+
+ # Now set the variables for building old libraries.
+ build_libtool_libs=no
+ oldlibs="$output"
+ objs="$objs$old_deplibs"
+ ;;
+
+ lib)
+ # Make sure we only generate libraries of the form `libNAME.la'.
+ case $outputname in
+ lib*)
+ name=`$echo "X$outputname" | $Xsed -e 's/\.la$//' -e 's/^lib//'`
+ eval libname=\"$libname_spec\"
+ ;;
+ *)
+ if test "$module" = no; then
+ $echo "$modename: libtool library \`$output' must begin with \`lib'" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+ if test "$need_lib_prefix" != no; then
+ # Add the "lib" prefix for modules if required
+ name=`$echo "X$outputname" | $Xsed -e 's/\.la$//'`
+ eval libname=\"$libname_spec\"
+ else
+ libname=`$echo "X$outputname" | $Xsed -e 's/\.la$//'`
+ fi
+ ;;
+ esac
+
+ if test -n "$objs"; then
+ if test "$deplibs_check_method" != pass_all; then
+ $echo "$modename: cannot build libtool library \`$output' from non-libtool objects on this host:$objs" 2>&1
+ exit 1
+ else
+ echo
+ echo "*** Warning: Linking the shared library $output against the non-libtool"
+ echo "*** objects $objs is not portable!"
+ libobjs="$libobjs $objs"
+ fi
+ fi
+
+ if test "$dlself" != no; then
+ $echo "$modename: warning: \`-dlopen self' is ignored for libtool libraries" 1>&2
+ fi
+
+ set dummy $rpath
+ if test $# -gt 2; then
+ $echo "$modename: warning: ignoring multiple \`-rpath's for a libtool library" 1>&2
+ fi
+ install_libdir="$2"
+
+ oldlibs=
+ if test -z "$rpath"; then
+ if test "$build_libtool_libs" = yes; then
+ # Building a libtool convenience library.
+ libext=al
+ oldlibs="$output_objdir/$libname.$libext $oldlibs"
+ build_libtool_libs=convenience
+ build_old_libs=yes
+ fi
+
+ if test -n "$vinfo"; then
+ $echo "$modename: warning: \`-version-info' is ignored for convenience libraries" 1>&2
+ fi
+
+ if test -n "$release"; then
+ $echo "$modename: warning: \`-release' is ignored for convenience libraries" 1>&2
+ fi
+ else
+
+ # Parse the version information argument.
+ save_ifs="$IFS"; IFS=':'
+ set dummy $vinfo 0 0 0
+ IFS="$save_ifs"
+
+ if test -n "$8"; then
+ $echo "$modename: too many parameters to \`-version-info'" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ current="$2"
+ revision="$3"
+ age="$4"
+
+ # Check that each of the things are valid numbers.
+ case $current in
+ 0 | [1-9] | [1-9][0-9] | [1-9][0-9][0-9]) ;;
+ *)
+ $echo "$modename: CURRENT \`$current' is not a nonnegative integer" 1>&2
+ $echo "$modename: \`$vinfo' is not valid version information" 1>&2
+ exit 1
+ ;;
+ esac
+
+ case $revision in
+ 0 | [1-9] | [1-9][0-9] | [1-9][0-9][0-9]) ;;
+ *)
+ $echo "$modename: REVISION \`$revision' is not a nonnegative integer" 1>&2
+ $echo "$modename: \`$vinfo' is not valid version information" 1>&2
+ exit 1
+ ;;
+ esac
+
+ case $age in
+ 0 | [1-9] | [1-9][0-9] | [1-9][0-9][0-9]) ;;
+ *)
+ $echo "$modename: AGE \`$age' is not a nonnegative integer" 1>&2
+ $echo "$modename: \`$vinfo' is not valid version information" 1>&2
+ exit 1
+ ;;
+ esac
+
+ if test $age -gt $current; then
+ $echo "$modename: AGE \`$age' is greater than the current interface number \`$current'" 1>&2
+ $echo "$modename: \`$vinfo' is not valid version information" 1>&2
+ exit 1
+ fi
+
+ # Calculate the version variables.
+ major=
+ versuffix=
+ verstring=
+ case $version_type in
+ none) ;;
+
+ darwin)
+ # Like Linux, but with the current version available in
+ # verstring for coding it into the library header
+ major=.`expr $current - $age`
+ versuffix="$major.$age.$revision"
+ # Darwin ld doesn't like 0 for these options...
+ minor_current=`expr $current + 1`
+ verstring="-compatibility_version $minor_current -current_version $minor_current.$revision"
+ ;;
+
+ freebsd-aout)
+ major=".$current"
+ versuffix=".$current.$revision";
+ ;;
+
+ freebsd-elf)
+ major=".$current"
+ versuffix=".$current";
+ ;;
+
+ irix)
+ major=`expr $current - $age + 1`
+ verstring="sgi$major.$revision"
+
+ # Add in all the interfaces that we are compatible with.
+ loop=$revision
+ while test $loop != 0; do
+ iface=`expr $revision - $loop`
+ loop=`expr $loop - 1`
+ verstring="sgi$major.$iface:$verstring"
+ done
+
+ # Before this point, $major must not contain `.'.
+ major=.$major
+ versuffix="$major.$revision"
+ ;;
+
+ linux)
+ major=.`expr $current - $age`
+ versuffix="$major.$age.$revision"
+ ;;
+
+ osf)
+ major=`expr $current - $age`
+ versuffix=".$current.$age.$revision"
+ verstring="$current.$age.$revision"
+
+ # Add in all the interfaces that we are compatible with.
+ loop=$age
+ while test $loop != 0; do
+ iface=`expr $current - $loop`
+ loop=`expr $loop - 1`
+ verstring="$verstring:${iface}.0"
+ done
+
+ # Make executables depend on our current version.
+ verstring="$verstring:${current}.0"
+ ;;
+
+ sunos)
+ major=".$current"
+ versuffix=".$current.$revision"
+ ;;
+
+ windows)
+ # Use '-' rather than '.', since we only want one
+ # extension on DOS 8.3 filesystems.
+ major=`expr $current - $age`
+ versuffix="-$major"
+ ;;
+
+ *)
+ $echo "$modename: unknown library version type \`$version_type'" 1>&2
+ echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2
+ exit 1
+ ;;
+ esac
+
+ # Clear the version info if we defaulted, and they specified a release.
+ if test -z "$vinfo" && test -n "$release"; then
+ major=
+ verstring="0.0"
+ case $version_type in
+ darwin)
+ # we can't check for "0.0" in archive_cmds due to quoting
+ # problems, so we reset it completely
+ verstring=""
+ ;;
+ *)
+ verstring="0.0"
+ ;;
+ esac
+ if test "$need_version" = no; then
+ versuffix=
+ else
+ versuffix=".0.0"
+ fi
+ fi
+
+ # Remove version info from name if versioning should be avoided
+ if test "$avoid_version" = yes && test "$need_version" = no; then
+ major=
+ versuffix=
+ verstring=""
+ fi
+
+ # Check to see if the archive will have undefined symbols.
+ if test "$allow_undefined" = yes; then
+ if test "$allow_undefined_flag" = unsupported; then
+ $echo "$modename: warning: undefined symbols not allowed in $host shared libraries" 1>&2
+ build_libtool_libs=no
+ build_old_libs=yes
+ fi
+ else
+ # Don't allow undefined symbols.
+ allow_undefined_flag="$no_undefined_flag"
+ fi
+ fi
+
+ if test "$mode" != relink; then
+ # Remove our outputs.
+ $show "${rm}r $output_objdir/$outputname $output_objdir/$libname.* $output_objdir/${libname}${release}.*"
+ $run ${rm}r $output_objdir/$outputname $output_objdir/$libname.* $output_objdir/${libname}${release}.*
+ fi
+
+ # Now set the variables for building old libraries.
+ if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then
+ oldlibs="$oldlibs $output_objdir/$libname.$libext"
+
+ # Transform .lo files to .o files.
+ oldobjs="$objs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e "$lo2o" | $NL2SP`
+ fi
+
+ # Eliminate all temporary directories.
+ for path in $notinst_path; do
+ lib_search_path=`echo "$lib_search_path " | sed -e 's% $path % %g'`
+ deplibs=`echo "$deplibs " | sed -e 's% -L$path % %g'`
+ dependency_libs=`echo "$dependency_libs " | sed -e 's% -L$path % %g'`
+ done
+
+ if test -n "$xrpath"; then
+ # If the user specified any rpath flags, then add them.
+ temp_xrpath=
+ for libdir in $xrpath; do
+ temp_xrpath="$temp_xrpath -R$libdir"
+ case "$finalize_rpath " in
+ *" $libdir "*) ;;
+ *) finalize_rpath="$finalize_rpath $libdir" ;;
+ esac
+ done
+ if test $hardcode_into_libs != yes || test $build_old_libs = yes; then
+ dependency_libs="$temp_xrpath $dependency_libs"
+ fi
+ fi
+
+ # Make sure dlfiles contains only unique files that won't be dlpreopened
+ old_dlfiles="$dlfiles"
+ dlfiles=
+ for lib in $old_dlfiles; do
+ case " $dlprefiles $dlfiles " in
+ *" $lib "*) ;;
+ *) dlfiles="$dlfiles $lib" ;;
+ esac
+ done
+
+ # Make sure dlprefiles contains only unique files
+ old_dlprefiles="$dlprefiles"
+ dlprefiles=
+ for lib in $old_dlprefiles; do
+ case "$dlprefiles " in
+ *" $lib "*) ;;
+ *) dlprefiles="$dlprefiles $lib" ;;
+ esac
+ done
+
+ if test "$build_libtool_libs" = yes; then
+ if test -n "$rpath"; then
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos*)
+ # these systems don't actually have a c library (as such)!
+ ;;
+ *-*-rhapsody* | *-*-darwin1.[012])
+ # Rhapsody C library is in the System framework
+ deplibs="$deplibs -framework System"
+ ;;
+ *-*-netbsd*)
+ # Don't link with libc until the a.out ld.so is fixed.
+ ;;
+ *-*-openbsd*)
+ # Do not include libc due to us having libc/libc_r.
+ ;;
+ #### Local change for Sleepycat's Berkeley DB [#2380]:
+ # FreeBSD, like OpenBSD, uses libc/libc_r and should not
+ # link against libc/c_r explicitly; the -pthread linker flag
+ # implicitly controls use of -lc and -lc_r.
+ *-*-freebsd*)
+ # Do not include libc due to us having libc/libc_r.
+ ;;
+ *)
+ # Add libc to deplibs on all other systems if necessary.
+ if test $build_libtool_need_lc = "yes"; then
+ deplibs="$deplibs -lc"
+ fi
+ ;;
+ esac
+ fi
+
+ # Transform deplibs into only deplibs that can be linked in shared.
+ name_save=$name
+ libname_save=$libname
+ release_save=$release
+ versuffix_save=$versuffix
+ major_save=$major
+ # I'm not sure if I'm treating the release correctly. I think
+ # release should show up in the -l (ie -lgmp5) so we don't want to
+ # add it in twice. Is that correct?
+ release=""
+ versuffix=""
+ major=""
+ newdeplibs=
+ droppeddeps=no
+ case $deplibs_check_method in
+ pass_all)
+ # Don't check for shared/static. Everything works.
+ # This might be a little naive. We might want to check
+ # whether the library exists or not. But this is on
+ # osf3 & osf4 and I'm not really sure... Just
+ # implementing what was already the behaviour.
+ newdeplibs=$deplibs
+ ;;
+ test_compile)
+ # This code stresses the "libraries are programs" paradigm to its
+ # limits. Maybe even breaks it. We compile a program, linking it
+ # against the deplibs as a proxy for the library. Then we can check
+ # whether they linked in statically or dynamically with ldd.
+ $rm conftest.c
+ cat > conftest.c <<EOF
+ int main() { return 0; }
+EOF
+ $rm conftest
+ $CC -o conftest conftest.c $deplibs
+ if test $? -eq 0 ; then
+ ldd_output=`ldd conftest`
+ for i in $deplibs; do
+ name="`expr $i : '-l\(.*\)'`"
+ # If $name is empty we are operating on a -L argument.
+ if test -n "$name" && test "$name" != "0"; then
+ libname=`eval \\$echo \"$libname_spec\"`
+ deplib_matches=`eval \\$echo \"$library_names_spec\"`
+ set dummy $deplib_matches
+ deplib_match=$2
+ if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
+ newdeplibs="$newdeplibs $i"
+ else
+ droppeddeps=yes
+ echo
+ echo "*** Warning: This library needs some functionality provided by $i."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have."
+ fi
+ else
+ newdeplibs="$newdeplibs $i"
+ fi
+ done
+ else
+ # Error occured in the first compile. Let's try to salvage the situation:
+ # Compile a seperate program for each library.
+ for i in $deplibs; do
+ name="`expr $i : '-l\(.*\)'`"
+ # If $name is empty we are operating on a -L argument.
+ if test -n "$name" && test "$name" != "0"; then
+ $rm conftest
+ $CC -o conftest conftest.c $i
+ # Did it work?
+ if test $? -eq 0 ; then
+ ldd_output=`ldd conftest`
+ libname=`eval \\$echo \"$libname_spec\"`
+ deplib_matches=`eval \\$echo \"$library_names_spec\"`
+ set dummy $deplib_matches
+ deplib_match=$2
+ if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
+ newdeplibs="$newdeplibs $i"
+ else
+ droppeddeps=yes
+ echo
+ echo "*** Warning: This library needs some functionality provided by $i."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have."
+ fi
+ else
+ droppeddeps=yes
+ echo
+ echo "*** Warning! Library $i is needed by this library but I was not able to"
+ echo "*** make it link in! You will probably need to install it or some"
+ echo "*** library that it depends on before this library will be fully"
+ echo "*** functional. Installing it before continuing would be even better."
+ fi
+ else
+ newdeplibs="$newdeplibs $i"
+ fi
+ done
+ fi
+ ;;
+ file_magic*)
+ set dummy $deplibs_check_method
+ file_magic_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"`
+ for a_deplib in $deplibs; do
+ name="`expr $a_deplib : '-l\(.*\)'`"
+ # If $name is empty we are operating on a -L argument.
+ if test -n "$name" && test "$name" != "0"; then
+ libname=`eval \\$echo \"$libname_spec\"`
+ for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
+ potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
+ for potent_lib in $potential_libs; do
+ # Follow soft links.
+ if ls -lLd "$potent_lib" 2>/dev/null \
+ | grep " -> " >/dev/null; then
+ continue
+ fi
+ # The statement above tries to avoid entering an
+ # endless loop below, in case of cyclic links.
+ # We might still enter an endless loop, since a link
+ # loop can be closed while we follow links,
+ # but so what?
+ potlib="$potent_lib"
+ while test -h "$potlib" 2>/dev/null; do
+ potliblink=`ls -ld $potlib | sed 's/.* -> //'`
+ case $potliblink in
+ [\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";;
+ *) potlib=`$echo "X$potlib" | $Xsed -e 's,[^/]*$,,'`"$potliblink";;
+ esac
+ done
+ if eval $file_magic_cmd \"\$potlib\" 2>/dev/null \
+ | sed 10q \
+ | egrep "$file_magic_regex" > /dev/null; then
+ newdeplibs="$newdeplibs $a_deplib"
+ a_deplib=""
+ break 2
+ fi
+ done
+ done
+ if test -n "$a_deplib" ; then
+ droppeddeps=yes
+ echo
+ echo "*** Warning: This library needs some functionality provided by $a_deplib."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have."
+ fi
+ else
+ # Add a -L argument.
+ newdeplibs="$newdeplibs $a_deplib"
+ fi
+ done # Gone through all deplibs.
+ ;;
+ match_pattern*)
+ set dummy $deplibs_check_method
+ match_pattern_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"`
+ for a_deplib in $deplibs; do
+ name="`expr $a_deplib : '-l\(.*\)'`"
+ # If $name is empty we are operating on a -L argument.
+ if test -n "$name" && test "$name" != "0"; then
+ libname=`eval \\$echo \"$libname_spec\"`
+ for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
+ potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
+ for potent_lib in $potential_libs; do
+ if eval echo \"$potent_lib\" 2>/dev/null \
+ | sed 10q \
+ | egrep "$match_pattern_regex" > /dev/null; then
+ newdeplibs="$newdeplibs $a_deplib"
+ a_deplib=""
+ break 2
+ fi
+ done
+ done
+ if test -n "$a_deplib" ; then
+ droppeddeps=yes
+ echo
+ echo "*** Warning: This library needs some functionality provided by $a_deplib."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have."
+ fi
+ else
+ # Add a -L argument.
+ newdeplibs="$newdeplibs $a_deplib"
+ fi
+ done # Gone through all deplibs.
+ ;;
+ none | unknown | *)
+ newdeplibs=""
+ if $echo "X $deplibs" | $Xsed -e 's/ -lc$//' \
+ -e 's/ -[LR][^ ]*//g' -e 's/[ ]//g' |
+ grep . >/dev/null; then
+ echo
+ if test "X$deplibs_check_method" = "Xnone"; then
+ echo "*** Warning: inter-library dependencies are not supported in this platform."
+ else
+ echo "*** Warning: inter-library dependencies are not known to be supported."
+ fi
+ echo "*** All declared inter-library dependencies are being dropped."
+ droppeddeps=yes
+ fi
+ ;;
+ esac
+ versuffix=$versuffix_save
+ major=$major_save
+ release=$release_save
+ libname=$libname_save
+ name=$name_save
+
+ case $host in
+ *-*-rhapsody* | *-*-darwin1.[012])
+ # On Rhapsody replace the C library is the System framework
+ newdeplibs=`$echo "X $newdeplibs" | $Xsed -e 's/ -lc / -framework System /'`
+ ;;
+ esac
+
+ if test "$droppeddeps" = yes; then
+ if test "$module" = yes; then
+ echo
+ echo "*** Warning: libtool could not satisfy all declared inter-library"
+ echo "*** dependencies of module $libname. Therefore, libtool will create"
+ echo "*** a static module, that should work as long as the dlopening"
+ echo "*** application is linked with the -dlopen flag."
+ if test -z "$global_symbol_pipe"; then
+ echo
+ echo "*** However, this would only work if libtool was able to extract symbol"
+ echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
+ echo "*** not find such a program. So, this module is probably useless."
+ echo "*** \`nm' from GNU binutils and a full rebuild may help."
+ fi
+ if test "$build_old_libs" = no; then
+ oldlibs="$output_objdir/$libname.$libext"
+ build_libtool_libs=module
+ build_old_libs=yes
+ else
+ build_libtool_libs=no
+ fi
+ else
+ echo "*** The inter-library dependencies that have been dropped here will be"
+ echo "*** automatically added whenever a program is linked with this library"
+ echo "*** or is declared to -dlopen it."
+
+ if test $allow_undefined = no; then
+ echo
+ echo "*** Since this library must not contain undefined symbols,"
+ echo "*** because either the platform does not support them or"
+ echo "*** it was explicitly requested with -no-undefined,"
+ echo "*** libtool will only create a static version of it."
+ if test "$build_old_libs" = no; then
+ oldlibs="$output_objdir/$libname.$libext"
+ build_libtool_libs=module
+ build_old_libs=yes
+ else
+ build_libtool_libs=no
+ fi
+ fi
+ fi
+ fi
+ # Done checking deplibs!
+ deplibs=$newdeplibs
+ fi
+
+ # All the library-specific variables (install_libdir is set above).
+ library_names=
+ old_library=
+ dlname=
+
+ # Test again, we may have decided not to build it any more
+ if test "$build_libtool_libs" = yes; then
+ if test $hardcode_into_libs = yes; then
+ # Hardcode the library paths
+ hardcode_libdirs=
+ dep_rpath=
+ rpath="$finalize_rpath"
+ test "$mode" != relink && rpath="$compile_rpath$rpath"
+ for libdir in $rpath; do
+ if test -n "$hardcode_libdir_flag_spec"; then
+ if test -n "$hardcode_libdir_separator"; then
+ if test -z "$hardcode_libdirs"; then
+ hardcode_libdirs="$libdir"
+ else
+ # Just accumulate the unique libdirs.
+ case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+ *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+ ;;
+ *)
+ hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
+ ;;
+ esac
+ fi
+ else
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ dep_rpath="$dep_rpath $flag"
+ fi
+ elif test -n "$runpath_var"; then
+ case "$perm_rpath " in
+ *" $libdir "*) ;;
+ *) perm_rpath="$perm_rpath $libdir" ;;
+ esac
+ fi
+ done
+ # Substitute the hardcoded libdirs into the rpath.
+ if test -n "$hardcode_libdir_separator" &&
+ test -n "$hardcode_libdirs"; then
+ libdir="$hardcode_libdirs"
+ eval dep_rpath=\"$hardcode_libdir_flag_spec\"
+ fi
+ if test -n "$runpath_var" && test -n "$perm_rpath"; then
+ # We should set the runpath_var.
+ rpath=
+ for dir in $perm_rpath; do
+ rpath="$rpath$dir:"
+ done
+ eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var"
+ fi
+ test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs"
+ fi
+
+ shlibpath="$finalize_shlibpath"
+ test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath"
+ if test -n "$shlibpath"; then
+ eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var"
+ fi
+
+ # Get the real and link names of the library.
+ eval library_names=\"$library_names_spec\"
+ set dummy $library_names
+ realname="$2"
+ shift; shift
+
+ if test -n "$soname_spec"; then
+ eval soname=\"$soname_spec\"
+ else
+ soname="$realname"
+ fi
+ test -z "$dlname" && dlname=$soname
+
+ lib="$output_objdir/$realname"
+ for link
+ do
+ linknames="$linknames $link"
+ done
+
+ # Ensure that we have .o objects for linkers which dislike .lo
+ # (e.g. aix) in case we are running --disable-static
+ for obj in $libobjs; do
+ xdir=`$echo "X$obj" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$xdir" = "X$obj"; then
+ xdir="."
+ else
+ xdir="$xdir"
+ fi
+ baseobj=`$echo "X$obj" | $Xsed -e 's%^.*/%%'`
+ oldobj=`$echo "X$baseobj" | $Xsed -e "$lo2o"`
+ if test ! -f $xdir/$oldobj; then
+ $show "(cd $xdir && ${LN_S} $baseobj $oldobj)"
+ $run eval '(cd $xdir && ${LN_S} $baseobj $oldobj)' || exit $?
+ fi
+ done
+
+ # Use standard objects if they are pic
+ test -z "$pic_flag" && libobjs=`$echo "X$libobjs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
+
+ # Prepare the list of exported symbols
+ if test -z "$export_symbols"; then
+ if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then
+ $show "generating symbol list for \`$libname.la'"
+ export_symbols="$output_objdir/$libname.exp"
+ $run $rm $export_symbols
+ eval cmds=\"$export_symbols_cmds\"
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+ if test -n "$export_symbols_regex"; then
+ $show "egrep -e \"$export_symbols_regex\" \"$export_symbols\" > \"${export_symbols}T\""
+ $run eval 'egrep -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
+ $show "$mv \"${export_symbols}T\" \"$export_symbols\""
+ $run eval '$mv "${export_symbols}T" "$export_symbols"'
+ fi
+ fi
+ fi
+
+ if test -n "$export_symbols" && test -n "$include_expsyms"; then
+ $run eval '$echo "X$include_expsyms" | $SP2NL >> "$export_symbols"'
+ fi
+
+ if test -n "$convenience"; then
+ if test -n "$whole_archive_flag_spec"; then
+ eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
+ else
+ gentop="$output_objdir/${outputname}x"
+ $show "${rm}r $gentop"
+ $run ${rm}r "$gentop"
+ $show "mkdir $gentop"
+ $run mkdir "$gentop"
+ status=$?
+ if test $status -ne 0 && test ! -d "$gentop"; then
+ exit $status
+ fi
+ generated="$generated $gentop"
+
+ for xlib in $convenience; do
+ # Extract the objects.
+ case $xlib in
+ [\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
+ *) xabs=`pwd`"/$xlib" ;;
+ esac
+ xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'`
+ xdir="$gentop/$xlib"
+
+ $show "${rm}r $xdir"
+ $run ${rm}r "$xdir"
+ $show "mkdir $xdir"
+ $run mkdir "$xdir"
+ status=$?
+ if test $status -ne 0 && test ! -d "$xdir"; then
+ exit $status
+ fi
+ $show "(cd $xdir && $AR x $xabs)"
+ $run eval "(cd \$xdir && $AR x \$xabs)" || exit $?
+
+ libobjs="$libobjs "`find $xdir -name \*.o -print -o -name \*.lo -print | $NL2SP`
+ done
+ fi
+ fi
+
+ if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then
+ eval flag=\"$thread_safe_flag_spec\"
+ linker_flags="$linker_flags $flag"
+ fi
+
+ # Make a backup of the uninstalled library when relinking
+ if test "$mode" = relink; then
+ $run eval '(cd $output_objdir && $rm ${realname}U && $mv $realname ${realname}U)' || exit $?
+ fi
+
+ # Do each of the archive commands.
+ if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
+ eval cmds=\"$archive_expsym_cmds\"
+ else
+ eval cmds=\"$archive_cmds\"
+ fi
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+
+ # Restore the uninstalled library and exit
+ if test "$mode" = relink; then
+ $run eval '(cd $output_objdir && $rm ${realname}T && $mv $realname ${realname}T && $mv "$realname"U $realname)' || exit $?
+ exit 0
+ fi
+
+ # Create links to the real library.
+ for linkname in $linknames; do
+ if test "$realname" != "$linkname"; then
+ $show "(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)"
+ $run eval '(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)' || exit $?
+ fi
+ done
+
+ # If -module or -export-dynamic was specified, set the dlname.
+ if test "$module" = yes || test "$export_dynamic" = yes; then
+ # On all known operating systems, these are identical.
+ dlname="$soname"
+ fi
+ fi
+ ;;
+
+ obj)
+ if test -n "$deplibs"; then
+ $echo "$modename: warning: \`-l' and \`-L' are ignored for objects" 1>&2
+ fi
+
+ if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
+ $echo "$modename: warning: \`-dlopen' is ignored for objects" 1>&2
+ fi
+
+ if test -n "$rpath"; then
+ $echo "$modename: warning: \`-rpath' is ignored for objects" 1>&2
+ fi
+
+ if test -n "$xrpath"; then
+ $echo "$modename: warning: \`-R' is ignored for objects" 1>&2
+ fi
+
+ if test -n "$vinfo"; then
+ $echo "$modename: warning: \`-version-info' is ignored for objects" 1>&2
+ fi
+
+ if test -n "$release"; then
+ $echo "$modename: warning: \`-release' is ignored for objects" 1>&2
+ fi
+
+ case $output in
+ *.lo)
+ if test -n "$objs$old_deplibs"; then
+ $echo "$modename: cannot build library object \`$output' from non-libtool objects" 1>&2
+ exit 1
+ fi
+ libobj="$output"
+ obj=`$echo "X$output" | $Xsed -e "$lo2o"`
+ ;;
+ *)
+ libobj=
+ obj="$output"
+ ;;
+ esac
+
+ # Delete the old objects.
+ $run $rm $obj $libobj
+
+ # Objects from convenience libraries. This assumes
+ # single-version convenience libraries. Whenever we create
+ # different ones for PIC/non-PIC, this we'll have to duplicate
+ # the extraction.
+ reload_conv_objs=
+ gentop=
+ # reload_cmds runs $LD directly, so let us get rid of
+ # -Wl from whole_archive_flag_spec
+ wl=
+
+ if test -n "$convenience"; then
+ if test -n "$whole_archive_flag_spec"; then
+ eval reload_conv_objs=\"\$reload_objs $whole_archive_flag_spec\"
+ else
+ gentop="$output_objdir/${obj}x"
+ $show "${rm}r $gentop"
+ $run ${rm}r "$gentop"
+ $show "mkdir $gentop"
+ $run mkdir "$gentop"
+ status=$?
+ if test $status -ne 0 && test ! -d "$gentop"; then
+ exit $status
+ fi
+ generated="$generated $gentop"
+
+ for xlib in $convenience; do
+ # Extract the objects.
+ case $xlib in
+ [\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
+ *) xabs=`pwd`"/$xlib" ;;
+ esac
+ xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'`
+ xdir="$gentop/$xlib"
+
+ $show "${rm}r $xdir"
+ $run ${rm}r "$xdir"
+ $show "mkdir $xdir"
+ $run mkdir "$xdir"
+ status=$?
+ if test $status -ne 0 && test ! -d "$xdir"; then
+ exit $status
+ fi
+ $show "(cd $xdir && $AR x $xabs)"
+ $run eval "(cd \$xdir && $AR x \$xabs)" || exit $?
+
+ reload_conv_objs="$reload_objs "`find $xdir -name \*.o -print -o -name \*.lo -print | $NL2SP`
+ done
+ fi
+ fi
+
+ # Create the old-style object.
+ reload_objs="$objs$old_deplibs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test
+
+ output="$obj"
+ eval cmds=\"$reload_cmds\"
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+
+ # Exit if we aren't doing a library object file.
+ if test -z "$libobj"; then
+ if test -n "$gentop"; then
+ $show "${rm}r $gentop"
+ $run ${rm}r $gentop
+ fi
+
+ exit 0
+ fi
+
+ if test "$build_libtool_libs" != yes; then
+ if test -n "$gentop"; then
+ $show "${rm}r $gentop"
+ $run ${rm}r $gentop
+ fi
+
+ # Create an invalid libtool object if no PIC, so that we don't
+ # accidentally link it into a program.
+ $show "echo timestamp > $libobj"
+ $run eval "echo timestamp > $libobj" || exit $?
+ exit 0
+ fi
+
+ if test -n "$pic_flag" || test "$pic_mode" != default; then
+ # Only do commands if we really have different PIC objects.
+ reload_objs="$libobjs $reload_conv_objs"
+ output="$libobj"
+ eval cmds=\"$reload_cmds\"
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+ else
+ # Just create a symlink.
+ $show $rm $libobj
+ $run $rm $libobj
+ xdir=`$echo "X$libobj" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$xdir" = "X$libobj"; then
+ xdir="."
+ else
+ xdir="$xdir"
+ fi
+ baseobj=`$echo "X$libobj" | $Xsed -e 's%^.*/%%'`
+ oldobj=`$echo "X$baseobj" | $Xsed -e "$lo2o"`
+ $show "(cd $xdir && $LN_S $oldobj $baseobj)"
+ $run eval '(cd $xdir && $LN_S $oldobj $baseobj)' || exit $?
+ fi
+
+ if test -n "$gentop"; then
+ $show "${rm}r $gentop"
+ $run ${rm}r $gentop
+ fi
+
+ exit 0
+ ;;
+
+ prog)
+ case $host in
+ *cygwin*) output=`echo $output | sed -e 's,.exe$,,;s,$,.exe,'` ;;
+ esac
+ if test -n "$vinfo"; then
+ $echo "$modename: warning: \`-version-info' is ignored for programs" 1>&2
+ fi
+
+ if test -n "$release"; then
+ $echo "$modename: warning: \`-release' is ignored for programs" 1>&2
+ fi
+
+ if test "$preload" = yes; then
+ if test "$dlopen_support" = unknown && test "$dlopen_self" = unknown &&
+ test "$dlopen_self_static" = unknown; then
+ $echo "$modename: warning: \`AC_LIBTOOL_DLOPEN' not used. Assuming no dlopen support."
+ fi
+ fi
+
+ case $host in
+ *-*-rhapsody* | *-*-darwin1.[012])
+ # On Rhapsody replace the C library is the System framework
+ compile_deplibs=`$echo "X $compile_deplibs" | $Xsed -e 's/ -lc / -framework System /'`
+ finalize_deplibs=`$echo "X $finalize_deplibs" | $Xsed -e 's/ -lc / -framework System /'`
+ ;;
+ esac
+
+ compile_command="$compile_command $compile_deplibs"
+ finalize_command="$finalize_command $finalize_deplibs"
+
+ if test -n "$rpath$xrpath"; then
+ # If the user specified any rpath flags, then add them.
+ for libdir in $rpath $xrpath; do
+ # This is the magic to use -rpath.
+ case "$finalize_rpath " in
+ *" $libdir "*) ;;
+ *) finalize_rpath="$finalize_rpath $libdir" ;;
+ esac
+ done
+ fi
+
+ # Now hardcode the library paths
+ rpath=
+ hardcode_libdirs=
+ for libdir in $compile_rpath $finalize_rpath; do
+ if test -n "$hardcode_libdir_flag_spec"; then
+ if test -n "$hardcode_libdir_separator"; then
+ if test -z "$hardcode_libdirs"; then
+ hardcode_libdirs="$libdir"
+ else
+ # Just accumulate the unique libdirs.
+ case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+ *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+ ;;
+ *)
+ hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
+ ;;
+ esac
+ fi
+ else
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ rpath="$rpath $flag"
+ fi
+ elif test -n "$runpath_var"; then
+ case "$perm_rpath " in
+ *" $libdir "*) ;;
+ *) perm_rpath="$perm_rpath $libdir" ;;
+ esac
+ fi
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
+ case :$dllsearchpath: in
+ *":$libdir:"*) ;;
+ *) dllsearchpath="$dllsearchpath:$libdir";;
+ esac
+ ;;
+ esac
+ done
+ # Substitute the hardcoded libdirs into the rpath.
+ if test -n "$hardcode_libdir_separator" &&
+ test -n "$hardcode_libdirs"; then
+ libdir="$hardcode_libdirs"
+ eval rpath=\" $hardcode_libdir_flag_spec\"
+ fi
+ compile_rpath="$rpath"
+
+ rpath=
+ hardcode_libdirs=
+ for libdir in $finalize_rpath; do
+ if test -n "$hardcode_libdir_flag_spec"; then
+ if test -n "$hardcode_libdir_separator"; then
+ if test -z "$hardcode_libdirs"; then
+ hardcode_libdirs="$libdir"
+ else
+ # Just accumulate the unique libdirs.
+ case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+ *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+ ;;
+ *)
+ hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
+ ;;
+ esac
+ fi
+ else
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ rpath="$rpath $flag"
+ fi
+ elif test -n "$runpath_var"; then
+ case "$finalize_perm_rpath " in
+ *" $libdir "*) ;;
+ *) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;;
+ esac
+ fi
+ done
+ # Substitute the hardcoded libdirs into the rpath.
+ if test -n "$hardcode_libdir_separator" &&
+ test -n "$hardcode_libdirs"; then
+ libdir="$hardcode_libdirs"
+ eval rpath=\" $hardcode_libdir_flag_spec\"
+ fi
+ finalize_rpath="$rpath"
+
+ if test -n "$libobjs" && test "$build_old_libs" = yes; then
+ # Transform all the library objects into standard objects.
+ compile_command=`$echo "X$compile_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
+ finalize_command=`$echo "X$finalize_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
+ fi
+
+ dlsyms=
+ if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
+ if test -n "$NM" && test -n "$global_symbol_pipe"; then
+ dlsyms="${outputname}S.c"
+ else
+ $echo "$modename: not configured to extract global symbols from dlpreopened files" 1>&2
+ fi
+ fi
+
+ if test -n "$dlsyms"; then
+ case $dlsyms in
+ "") ;;
+ *.c)
+ # Discover the nlist of each of the dlfiles.
+ nlist="$output_objdir/${outputname}.nm"
+
+ $show "$rm $nlist ${nlist}S ${nlist}T"
+ $run $rm "$nlist" "${nlist}S" "${nlist}T"
+
+ # Parse the name list into a source file.
+ $show "creating $output_objdir/$dlsyms"
+
+ test -z "$run" && $echo > "$output_objdir/$dlsyms" "\
+/* $dlsyms - symbol resolution table for \`$outputname' dlsym emulation. */
+/* Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP */
+
+#ifdef __cplusplus
+extern \"C\" {
+#endif
+
+/* Prevent the only kind of declaration conflicts we can make. */
+#define lt_preloaded_symbols some_other_symbol
+
+/* External symbol declarations for the compiler. */\
+"
+
+ if test "$dlself" = yes; then
+ $show "generating symbol list for \`$output'"
+
+ test -z "$run" && $echo ': @PROGRAM@ ' > "$nlist"
+
+ # Add our own program objects to the symbol list.
+ progfiles=`$echo "X$objs$old_deplibs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
+ for arg in $progfiles; do
+ $show "extracting global C symbols from \`$arg'"
+ $run eval "$NM $arg | $global_symbol_pipe >> '$nlist'"
+ done
+
+ if test -n "$exclude_expsyms"; then
+ $run eval 'egrep -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T'
+ $run eval '$mv "$nlist"T "$nlist"'
+ fi
+
+ if test -n "$export_symbols_regex"; then
+ $run eval 'egrep -e "$export_symbols_regex" "$nlist" > "$nlist"T'
+ $run eval '$mv "$nlist"T "$nlist"'
+ fi
+
+ # Prepare the list of exported symbols
+ if test -z "$export_symbols"; then
+ export_symbols="$output_objdir/$output.exp"
+ $run $rm $export_symbols
+ $run eval "sed -n -e '/^: @PROGRAM@$/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"'
+ else
+ $run eval "sed -e 's/\([][.*^$]\)/\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$output.exp"'
+ $run eval 'grep -f "$output_objdir/$output.exp" < "$nlist" > "$nlist"T'
+ $run eval 'mv "$nlist"T "$nlist"'
+ fi
+ fi
+
+ for arg in $dlprefiles; do
+ $show "extracting global C symbols from \`$arg'"
+ name=`echo "$arg" | sed -e 's%^.*/%%'`
+ $run eval 'echo ": $name " >> "$nlist"'
+ $run eval "$NM $arg | $global_symbol_pipe >> '$nlist'"
+ done
+
+ if test -z "$run"; then
+ # Make sure we have at least an empty file.
+ test -f "$nlist" || : > "$nlist"
+
+ if test -n "$exclude_expsyms"; then
+ egrep -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T
+ $mv "$nlist"T "$nlist"
+ fi
+
+ # Try sorting and uniquifying the output.
+ if grep -v "^: " < "$nlist" | sort +2 | uniq > "$nlist"S; then
+ :
+ else
+ grep -v "^: " < "$nlist" > "$nlist"S
+ fi
+
+ if test -f "$nlist"S; then
+ eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$dlsyms"'
+ else
+ echo '/* NONE */' >> "$output_objdir/$dlsyms"
+ fi
+
+ $echo >> "$output_objdir/$dlsyms" "\
+
+#undef lt_preloaded_symbols
+
+#if defined (__STDC__) && __STDC__
+# define lt_ptr void *
+#else
+# define lt_ptr char *
+# define const
+#endif
+
+/* The mapping between symbol names and symbols. */
+const struct {
+ const char *name;
+ lt_ptr address;
+}
+lt_preloaded_symbols[] =
+{\
+"
+
+ eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$dlsyms"
+
+ $echo >> "$output_objdir/$dlsyms" "\
+ {0, (lt_ptr) 0}
+};
+
+/* This works around a problem in FreeBSD linker */
+#ifdef FREEBSD_WORKAROUND
+static const void *lt_preloaded_setup() {
+ return lt_preloaded_symbols;
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif\
+"
+ fi
+
+ pic_flag_for_symtable=
+ case $host in
+ # compiling the symbol table file with pic_flag works around
+ # a FreeBSD bug that causes programs to crash when -lm is
+ # linked before any other PIC object. But we must not use
+ # pic_flag when linking with -static. The problem exists in
+ # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1.
+ *-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*)
+ case "$compile_command " in
+ *" -static "*) ;;
+ *) pic_flag_for_symtable=" $pic_flag -DPIC -DFREEBSD_WORKAROUND";;
+ esac;;
+ *-*-hpux*)
+ case "$compile_command " in
+ *" -static "*) ;;
+ *) pic_flag_for_symtable=" $pic_flag -DPIC";;
+ esac
+ esac
+
+ # Now compile the dynamic symbol file.
+ $show "(cd $output_objdir && $CC -c$no_builtin_flag$pic_flag_for_symtable \"$dlsyms\")"
+ $run eval '(cd $output_objdir && $CC -c$no_builtin_flag$pic_flag_for_symtable "$dlsyms")' || exit $?
+
+ # Clean up the generated files.
+ $show "$rm $output_objdir/$dlsyms $nlist ${nlist}S ${nlist}T"
+ $run $rm "$output_objdir/$dlsyms" "$nlist" "${nlist}S" "${nlist}T"
+
+ # Transform the symbol file into the correct name.
+ compile_command=`$echo "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"`
+ finalize_command=`$echo "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"`
+ ;;
+ *)
+ $echo "$modename: unknown suffix for \`$dlsyms'" 1>&2
+ exit 1
+ ;;
+ esac
+ else
+ # We keep going just in case the user didn't refer to
+ # lt_preloaded_symbols. The linker will fail if global_symbol_pipe
+ # really was required.
+
+ # Nullify the symbol file.
+ compile_command=`$echo "X$compile_command" | $Xsed -e "s% @SYMFILE@%%"`
+ finalize_command=`$echo "X$finalize_command" | $Xsed -e "s% @SYMFILE@%%"`
+ fi
+
+ if test $need_relink = no || test "$build_libtool_libs" != yes; then
+ # Replace the output file specification.
+ compile_command=`$echo "X$compile_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
+ link_command="$compile_command$compile_rpath"
+
+ # We have no uninstalled library dependencies, so finalize right now.
+ $show "$link_command"
+ $run eval "$link_command"
+ status=$?
+
+ # Delete the generated files.
+ if test -n "$dlsyms"; then
+ $show "$rm $output_objdir/${outputname}S.${objext}"
+ $run $rm "$output_objdir/${outputname}S.${objext}"
+ fi
+
+ exit $status
+ fi
+
+ if test -n "$shlibpath_var"; then
+ # We should set the shlibpath_var
+ rpath=
+ for dir in $temp_rpath; do
+ case $dir in
+ [\\/]* | [A-Za-z]:[\\/]*)
+ # Absolute path.
+ rpath="$rpath$dir:"
+ ;;
+ *)
+ # Relative path: add a thisdir entry.
+ rpath="$rpath\$thisdir/$dir:"
+ ;;
+ esac
+ done
+ temp_rpath="$rpath"
+ fi
+
+ if test -n "$compile_shlibpath$finalize_shlibpath"; then
+ compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command"
+ fi
+ if test -n "$finalize_shlibpath"; then
+ finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command"
+ fi
+
+ compile_var=
+ finalize_var=
+ if test -n "$runpath_var"; then
+ if test -n "$perm_rpath"; then
+ # We should set the runpath_var.
+ rpath=
+ for dir in $perm_rpath; do
+ rpath="$rpath$dir:"
+ done
+ compile_var="$runpath_var=\"$rpath\$$runpath_var\" "
+ fi
+ if test -n "$finalize_perm_rpath"; then
+ # We should set the runpath_var.
+ rpath=
+ for dir in $finalize_perm_rpath; do
+ rpath="$rpath$dir:"
+ done
+ finalize_var="$runpath_var=\"$rpath\$$runpath_var\" "
+ fi
+ fi
+
+ if test "$no_install" = yes; then
+ # We don't need to create a wrapper script.
+ link_command="$compile_var$compile_command$compile_rpath"
+ # Replace the output file specification.
+ link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
+ # Delete the old output file.
+ $run $rm $output
+ # Link the executable and exit
+ $show "$link_command"
+ $run eval "$link_command" || exit $?
+ exit 0
+ fi
+
+ if test "$hardcode_action" = relink; then
+ # Fast installation is not supported
+ link_command="$compile_var$compile_command$compile_rpath"
+ relink_command="$finalize_var$finalize_command$finalize_rpath"
+
+ $echo "$modename: warning: this platform does not like uninstalled shared libraries" 1>&2
+ $echo "$modename: \`$output' will be relinked during installation" 1>&2
+ else
+ if test "$fast_install" != no; then
+ link_command="$finalize_var$compile_command$finalize_rpath"
+ if test "$fast_install" = yes; then
+ relink_command=`$echo "X$compile_var$compile_command$compile_rpath" | $Xsed -e 's%@OUTPUT@%\$progdir/\$file%g'`
+ else
+ # fast_install is set to needless
+ relink_command=
+ fi
+ else
+ link_command="$compile_var$compile_command$compile_rpath"
+ relink_command="$finalize_var$finalize_command$finalize_rpath"
+ fi
+ fi
+
+ # Replace the output file specification.
+ link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'`
+
+ # Delete the old output files.
+ $run $rm $output $output_objdir/$outputname $output_objdir/lt-$outputname
+
+ $show "$link_command"
+ $run eval "$link_command" || exit $?
+
+ # Now create the wrapper script.
+ $show "creating $output"
+
+ # Quote the relink command for shipping.
+ if test -n "$relink_command"; then
+ # Preserve any variables that may affect compiler behavior
+ for var in $variables_saved_for_relink; do
+ if eval test -z \"\${$var+set}\"; then
+ relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command"
+ elif eval var_value=\$$var; test -z "$var_value"; then
+ relink_command="$var=; export $var; $relink_command"
+ else
+ var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"`
+ relink_command="$var=\"$var_value\"; export $var; $relink_command"
+ fi
+ done
+ relink_command="cd `pwd`; $relink_command"
+ relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"`
+ fi
+
+ # Quote $echo for shipping.
+ if test "X$echo" = "X$SHELL $0 --fallback-echo"; then
+ case $0 in
+ [\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $0 --fallback-echo";;
+ *) qecho="$SHELL `pwd`/$0 --fallback-echo";;
+ esac
+ qecho=`$echo "X$qecho" | $Xsed -e "$sed_quote_subst"`
+ else
+ qecho=`$echo "X$echo" | $Xsed -e "$sed_quote_subst"`
+ fi
+
+ # Only actually do things if our run command is non-null.
+ if test -z "$run"; then
+ # win32 will think the script is a binary if it has
+ # a .exe suffix, so we strip it off here.
+ case $output in
+ *.exe) output=`echo $output|sed 's,.exe$,,'` ;;
+ esac
+ # test for cygwin because mv fails w/o .exe extensions
+ case $host in
+ *cygwin*) exeext=.exe ;;
+ *) exeext= ;;
+ esac
+ $rm $output
+ trap "$rm $output; exit 1" 1 2 15
+
+ $echo > $output "\
+#! $SHELL
+
+# $output - temporary wrapper script for $objdir/$outputname
+# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
+#
+# The $output program cannot be directly executed until all the libtool
+# libraries that it depends on are installed.
+#
+# This wrapper script should never be moved out of the build directory.
+# If it is, it will not operate correctly.
+
+# Sed substitution that helps us do robust quoting. It backslashifies
+# metacharacters that are still active within double-quoted strings.
+Xsed='sed -e 1s/^X//'
+sed_quote_subst='$sed_quote_subst'
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+if test \"\${CDPATH+set}\" = set; then CDPATH=:; export CDPATH; fi
+
+relink_command=\"$relink_command\"
+
+# This environment variable determines our operation mode.
+if test \"\$libtool_install_magic\" = \"$magic\"; then
+ # install mode needs the following variable:
+ notinst_deplibs='$notinst_deplibs'
+else
+ # When we are sourced in execute mode, \$file and \$echo are already set.
+ if test \"\$libtool_execute_magic\" != \"$magic\"; then
+ echo=\"$qecho\"
+ file=\"\$0\"
+ # Make sure echo works.
+ if test \"X\$1\" = X--no-reexec; then
+ # Discard the --no-reexec flag, and continue.
+ shift
+ elif test \"X\`(\$echo '\t') 2>/dev/null\`\" = 'X\t'; then
+ # Yippee, \$echo works!
+ :
+ else
+ # Restart under the correct shell, and then maybe \$echo will work.
+ exec $SHELL \"\$0\" --no-reexec \${1+\"\$@\"}
+ fi
+ fi\
+"
+ $echo >> $output "\
+
+ # Find the directory that this script lives in.
+ thisdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*$%%'\`
+ test \"x\$thisdir\" = \"x\$file\" && thisdir=.
+
+ # Follow symbolic links until we get to the real thisdir.
+ file=\`ls -ld \"\$file\" | sed -n 's/.*-> //p'\`
+ while test -n \"\$file\"; do
+ destdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*\$%%'\`
+
+ # If there was a directory component, then change thisdir.
+ if test \"x\$destdir\" != \"x\$file\"; then
+ case \"\$destdir\" in
+ [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;;
+ *) thisdir=\"\$thisdir/\$destdir\" ;;
+ esac
+ fi
+
+ file=\`\$echo \"X\$file\" | \$Xsed -e 's%^.*/%%'\`
+ file=\`ls -ld \"\$thisdir/\$file\" | sed -n 's/.*-> //p'\`
+ done
+
+ # Try to get the absolute directory name.
+ absdir=\`cd \"\$thisdir\" && pwd\`
+ test -n \"\$absdir\" && thisdir=\"\$absdir\"
+"
+
+ if test "$fast_install" = yes; then
+ echo >> $output "\
+ program=lt-'$outputname'$exeext
+ progdir=\"\$thisdir/$objdir\"
+
+ if test ! -f \"\$progdir/\$program\" || \\
+ { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | sed 1q\`; \\
+ test \"X\$file\" != \"X\$progdir/\$program\"; }; then
+
+ file=\"\$\$-\$program\"
+
+ if test ! -d \"\$progdir\"; then
+ $mkdir \"\$progdir\"
+ else
+ $rm \"\$progdir/\$file\"
+ fi"
+
+ echo >> $output "\
+
+ # relink executable if necessary
+ if test -n \"\$relink_command\"; then
+ if relink_command_output=\`eval \$relink_command 2>&1\`; then :
+ else
+ $echo \"\$relink_command_output\" >&2
+ $rm \"\$progdir/\$file\"
+ exit 1
+ fi
+ fi
+
+ $mv \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null ||
+ { $rm \"\$progdir/\$program\";
+ $mv \"\$progdir/\$file\" \"\$progdir/\$program\"; }
+ $rm \"\$progdir/\$file\"
+ fi"
+ else
+ echo >> $output "\
+ program='$outputname'
+ progdir=\"\$thisdir/$objdir\"
+"
+ fi
+
+ echo >> $output "\
+
+ if test -f \"\$progdir/\$program\"; then"
+
+ # Export our shlibpath_var if we have one.
+ if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then
+ $echo >> $output "\
+ # Add our own library path to $shlibpath_var
+ $shlibpath_var=\"$temp_rpath\$$shlibpath_var\"
+
+ # Some systems cannot cope with colon-terminated $shlibpath_var
+ # The second colon is a workaround for a bug in BeOS R4 sed
+ $shlibpath_var=\`\$echo \"X\$$shlibpath_var\" | \$Xsed -e 's/::*\$//'\`
+
+ export $shlibpath_var
+"
+ fi
+
+ # fixup the dll searchpath if we need to.
+ if test -n "$dllsearchpath"; then
+ $echo >> $output "\
+ # Add the dll search path components to the executable PATH
+ PATH=$dllsearchpath:\$PATH
+"
+ fi
+
+ $echo >> $output "\
+ if test \"\$libtool_execute_magic\" != \"$magic\"; then
+ # Run the actual program with our arguments.
+"
+ case $host in
+ # win32 systems need to use the prog path for dll
+ # lookup to work
+ *-*-cygwin* | *-*-pw32*)
+ $echo >> $output "\
+ exec \$progdir/\$program \${1+\"\$@\"}
+"
+ ;;
+
+ # Backslashes separate directories on plain windows
+ *-*-mingw | *-*-os2*)
+ $echo >> $output "\
+ exec \$progdir\\\\\$program \${1+\"\$@\"}
+"
+ ;;
+
+ *)
+ $echo >> $output "\
+ # Export the path to the program.
+ PATH=\"\$progdir:\$PATH\"
+ export PATH
+
+ exec \$program \${1+\"\$@\"}
+"
+ ;;
+ esac
+ $echo >> $output "\
+ \$echo \"\$0: cannot exec \$program \${1+\"\$@\"}\"
+ exit 1
+ fi
+ else
+ # The program doesn't exist.
+ \$echo \"\$0: error: \$progdir/\$program does not exist\" 1>&2
+ \$echo \"This script is just a wrapper for \$program.\" 1>&2
+ echo \"See the $PACKAGE documentation for more information.\" 1>&2
+ exit 1
+ fi
+fi\
+"
+ chmod +x $output
+ fi
+ exit 0
+ ;;
+ esac
+
+ # See if we need to build an old-fashioned archive.
+ for oldlib in $oldlibs; do
+
+ if test "$build_libtool_libs" = convenience; then
+ oldobjs="$libobjs_save"
+ addlibs="$convenience"
+ build_libtool_libs=no
+ else
+ if test "$build_libtool_libs" = module; then
+ oldobjs="$libobjs_save"
+ build_libtool_libs=no
+ else
+ oldobjs="$objs$old_deplibs "`$echo "X$libobjs_save" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`
+ fi
+ addlibs="$old_convenience"
+ fi
+
+ if test -n "$addlibs"; then
+ gentop="$output_objdir/${outputname}x"
+ $show "${rm}r $gentop"
+ $run ${rm}r "$gentop"
+ $show "mkdir $gentop"
+ $run mkdir "$gentop"
+ status=$?
+ if test $status -ne 0 && test ! -d "$gentop"; then
+ exit $status
+ fi
+ generated="$generated $gentop"
+
+ # Add in members from convenience archives.
+ for xlib in $addlibs; do
+ # Extract the objects.
+ case $xlib in
+ [\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
+ *) xabs=`pwd`"/$xlib" ;;
+ esac
+ xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'`
+ xdir="$gentop/$xlib"
+
+ $show "${rm}r $xdir"
+ $run ${rm}r "$xdir"
+ $show "mkdir $xdir"
+ $run mkdir "$xdir"
+ status=$?
+ if test $status -ne 0 && test ! -d "$xdir"; then
+ exit $status
+ fi
+ $show "(cd $xdir && $AR x $xabs)"
+ $run eval "(cd \$xdir && $AR x \$xabs)" || exit $?
+
+ oldobjs="$oldobjs "`find $xdir -name \*.${objext} -print -o -name \*.lo -print | $NL2SP`
+ done
+ fi
+
+ # Do each command in the archive commands.
+ if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then
+ eval cmds=\"$old_archive_from_new_cmds\"
+ else
+ # Ensure that we have .o objects in place in case we decided
+ # not to build a shared library, and have fallen back to building
+ # static libs even though --disable-static was passed!
+ for oldobj in $oldobjs; do
+ if test ! -f $oldobj; then
+ xdir=`$echo "X$oldobj" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$xdir" = "X$oldobj"; then
+ xdir="."
+ else
+ xdir="$xdir"
+ fi
+ baseobj=`$echo "X$oldobj" | $Xsed -e 's%^.*/%%'`
+ obj=`$echo "X$baseobj" | $Xsed -e "$o2lo"`
+ $show "(cd $xdir && ${LN_S} $obj $baseobj)"
+ $run eval '(cd $xdir && ${LN_S} $obj $baseobj)' || exit $?
+ fi
+ done
+
+ eval cmds=\"$old_archive_cmds\"
+ fi
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+ done
+
+ if test -n "$generated"; then
+ $show "${rm}r$generated"
+ $run ${rm}r$generated
+ fi
+
+ # Now create the libtool archive.
+ case $output in
+ *.la)
+ old_library=
+ test "$build_old_libs" = yes && old_library="$libname.$libext"
+ $show "creating $output"
+
+ # Preserve any variables that may affect compiler behavior
+ for var in $variables_saved_for_relink; do
+ if eval test -z \"\${$var+set}\"; then
+ relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command"
+ elif eval var_value=\$$var; test -z "$var_value"; then
+ relink_command="$var=; export $var; $relink_command"
+ else
+ var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"`
+ relink_command="$var=\"$var_value\"; export $var; $relink_command"
+ fi
+ done
+ # Quote the link command for shipping.
+ relink_command="cd `pwd`; $SHELL $0 --mode=relink $libtool_args"
+ relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"`
+
+ # Only create the output if not a dry run.
+ if test -z "$run"; then
+ for installed in no yes; do
+ if test "$installed" = yes; then
+ if test -z "$install_libdir"; then
+ break
+ fi
+ output="$output_objdir/$outputname"i
+ # Replace all uninstalled libtool libraries with the installed ones
+ newdependency_libs=
+ for deplib in $dependency_libs; do
+ case $deplib in
+ *.la)
+ name=`$echo "X$deplib" | $Xsed -e 's%^.*/%%'`
+ eval libdir=`sed -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
+ if test -z "$libdir"; then
+ $echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2
+ exit 1
+ fi
+ newdependency_libs="$newdependency_libs $libdir/$name"
+ ;;
+ *) newdependency_libs="$newdependency_libs $deplib" ;;
+ esac
+ done
+ dependency_libs="$newdependency_libs"
+ newdlfiles=
+ for lib in $dlfiles; do
+ name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
+ eval libdir=`sed -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
+ if test -z "$libdir"; then
+ $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
+ exit 1
+ fi
+ newdlfiles="$newdlfiles $libdir/$name"
+ done
+ dlfiles="$newdlfiles"
+ newdlprefiles=
+ for lib in $dlprefiles; do
+ name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
+ eval libdir=`sed -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
+ if test -z "$libdir"; then
+ $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
+ exit 1
+ fi
+ newdlprefiles="$newdlprefiles $libdir/$name"
+ done
+ dlprefiles="$newdlprefiles"
+ fi
+ $rm $output
+ # place dlname in correct position for cygwin
+ tdlname=$dlname
+ case $host,$output,$installed,$module,$dlname in
+ *cygwin*,*lai,yes,no,*.dll) tdlname=../bin/$dlname ;;
+ esac
+ $echo > $output "\
+# $outputname - a libtool library file
+# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname='$tdlname'
+
+# Names of this library.
+library_names='$library_names'
+
+# The name of the static archive.
+old_library='$old_library'
+
+# Libraries that this one depends upon.
+dependency_libs='$dependency_libs'
+
+# Version information for $libname.
+current=$current
+age=$age
+revision=$revision
+
+# Is this an already installed library?
+installed=$installed
+
+# Files to dlopen/dlpreopen
+dlopen='$dlfiles'
+dlpreopen='$dlprefiles'
+
+# Directory that this library needs to be installed in:
+libdir='$install_libdir'"
+ if test "$installed" = no && test $need_relink = yes; then
+ $echo >> $output "\
+relink_command=\"$relink_command\""
+ fi
+ done
+ fi
+
+ # Do a symbolic link so that the libtool archive can be found in
+ # LD_LIBRARY_PATH before the program is installed.
+ $show "(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)"
+ $run eval '(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)' || exit $?
+ ;;
+ esac
+ exit 0
+ ;;
+
+ # libtool install mode
+ install)
+ modename="$modename: install"
+
+ # There may be an optional sh(1) argument at the beginning of
+ # install_prog (especially on Windows NT).
+ if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh ||
+ # Allow the use of GNU shtool's install command.
+ $echo "X$nonopt" | $Xsed | grep shtool > /dev/null; then
+ # Aesthetically quote it.
+ arg=`$echo "X$nonopt" | $Xsed -e "$sed_quote_subst"`
+ case $arg in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
+ arg="\"$arg\""
+ ;;
+ esac
+ install_prog="$arg "
+ arg="$1"
+ shift
+ else
+ install_prog=
+ arg="$nonopt"
+ fi
+
+ # The real first argument should be the name of the installation program.
+ # Aesthetically quote it.
+ arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
+ case $arg in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
+ arg="\"$arg\""
+ ;;
+ esac
+ install_prog="$install_prog$arg"
+
+ # We need to accept at least all the BSD install flags.
+ dest=
+ files=
+ opts=
+ prev=
+ install_type=
+ isdir=no
+ stripme=
+ for arg
+ do
+ if test -n "$dest"; then
+ files="$files $dest"
+ dest="$arg"
+ continue
+ fi
+
+ case $arg in
+ -d) isdir=yes ;;
+ -f) prev="-f" ;;
+ -g) prev="-g" ;;
+ -m) prev="-m" ;;
+ -o) prev="-o" ;;
+ -s)
+ stripme=" -s"
+ continue
+ ;;
+ -*) ;;
+
+ *)
+ # If the previous option needed an argument, then skip it.
+ if test -n "$prev"; then
+ prev=
+ else
+ dest="$arg"
+ continue
+ fi
+ ;;
+ esac
+
+ # Aesthetically quote the argument.
+ arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
+ case $arg in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
+ arg="\"$arg\""
+ ;;
+ esac
+ install_prog="$install_prog $arg"
+ done
+
+ if test -z "$install_prog"; then
+ $echo "$modename: you must specify an install program" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ if test -n "$prev"; then
+ $echo "$modename: the \`$prev' option requires an argument" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ if test -z "$files"; then
+ if test -z "$dest"; then
+ $echo "$modename: no file or destination specified" 1>&2
+ else
+ $echo "$modename: you must specify a destination" 1>&2
+ fi
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ # Strip any trailing slash from the destination.
+ dest=`$echo "X$dest" | $Xsed -e 's%/$%%'`
+
+ # Check to see that the destination is a directory.
+ test -d "$dest" && isdir=yes
+ if test "$isdir" = yes; then
+ destdir="$dest"
+ destname=
+ else
+ destdir=`$echo "X$dest" | $Xsed -e 's%/[^/]*$%%'`
+ test "X$destdir" = "X$dest" && destdir=.
+ destname=`$echo "X$dest" | $Xsed -e 's%^.*/%%'`
+
+ # Not a directory, so check to see that there is only one file specified.
+ set dummy $files
+ if test $# -gt 2; then
+ $echo "$modename: \`$dest' is not a directory" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+ fi
+ case $destdir in
+ [\\/]* | [A-Za-z]:[\\/]*) ;;
+ *)
+ for file in $files; do
+ case $file in
+ *.lo) ;;
+ *)
+ $echo "$modename: \`$destdir' must be an absolute directory name" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ ;;
+ esac
+ done
+ ;;
+ esac
+
+ # This variable tells wrapper scripts just to set variables rather
+ # than running their programs.
+ libtool_install_magic="$magic"
+
+ staticlibs=
+ future_libdirs=
+ current_libdirs=
+ for file in $files; do
+
+ # Do each installation.
+ case $file in
+ *.$libext)
+ # Do the static libraries later.
+ staticlibs="$staticlibs $file"
+ ;;
+
+ *.la)
+ # Check to see that this really is a libtool archive.
+ if (sed -e '2q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
+ else
+ $echo "$modename: \`$file' is not a valid libtool archive" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ library_names=
+ old_library=
+ relink_command=
+ # If there is no directory component, then add one.
+ case $file in
+ */* | *\\*) . $file ;;
+ *) . ./$file ;;
+ esac
+
+ # Add the libdir to current_libdirs if it is the destination.
+ if test "X$destdir" = "X$libdir"; then
+ case "$current_libdirs " in
+ *" $libdir "*) ;;
+ *) current_libdirs="$current_libdirs $libdir" ;;
+ esac
+ else
+ # Note the libdir as a future libdir.
+ case "$future_libdirs " in
+ *" $libdir "*) ;;
+ *) future_libdirs="$future_libdirs $libdir" ;;
+ esac
+ fi
+
+ dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`/
+ test "X$dir" = "X$file/" && dir=
+ dir="$dir$objdir"
+
+ if test -n "$relink_command"; then
+ $echo "$modename: warning: relinking \`$file'" 1>&2
+ $show "$relink_command"
+ if $run eval "$relink_command"; then :
+ else
+ $echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2
+ continue
+ fi
+ fi
+
+ # See the names of the shared library.
+ set dummy $library_names
+ if test -n "$2"; then
+ realname="$2"
+ shift
+ shift
+
+ srcname="$realname"
+ test -n "$relink_command" && srcname="$realname"T
+
+ # Install the shared library and build the symlinks.
+ $show "$install_prog $dir/$srcname $destdir/$realname"
+ $run eval "$install_prog $dir/$srcname $destdir/$realname" || exit $?
+ if test -n "$stripme" && test -n "$striplib"; then
+ $show "$striplib $destdir/$realname"
+ $run eval "$striplib $destdir/$realname" || exit $?
+ fi
+
+ if test $# -gt 0; then
+ # Delete the old symlinks, and create new ones.
+ for linkname
+ do
+ if test "$linkname" != "$realname"; then
+ $show "(cd $destdir && $rm $linkname && $LN_S $realname $linkname)"
+ $run eval "(cd $destdir && $rm $linkname && $LN_S $realname $linkname)"
+ fi
+ done
+ fi
+
+ # Do each command in the postinstall commands.
+ lib="$destdir/$realname"
+ eval cmds=\"$postinstall_cmds\"
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+ fi
+
+ # Install the pseudo-library for information purposes.
+ name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
+ instname="$dir/$name"i
+ $show "$install_prog $instname $destdir/$name"
+ $run eval "$install_prog $instname $destdir/$name" || exit $?
+
+ # Maybe install the static library, too.
+ test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library"
+ ;;
+
+ *.lo)
+ # Install (i.e. copy) a libtool object.
+
+ # Figure out destination file name, if it wasn't already specified.
+ if test -n "$destname"; then
+ destfile="$destdir/$destname"
+ else
+ destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
+ destfile="$destdir/$destfile"
+ fi
+
+ # Deduce the name of the destination old-style object file.
+ case $destfile in
+ *.lo)
+ staticdest=`$echo "X$destfile" | $Xsed -e "$lo2o"`
+ ;;
+ *.$objext)
+ staticdest="$destfile"
+ destfile=
+ ;;
+ *)
+ $echo "$modename: cannot copy a libtool object to \`$destfile'" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ ;;
+ esac
+
+ # Install the libtool object if requested.
+ if test -n "$destfile"; then
+ $show "$install_prog $file $destfile"
+ $run eval "$install_prog $file $destfile" || exit $?
+ fi
+
+ # Install the old object if enabled.
+ if test "$build_old_libs" = yes; then
+ # Deduce the name of the old-style object file.
+ staticobj=`$echo "X$file" | $Xsed -e "$lo2o"`
+
+ $show "$install_prog $staticobj $staticdest"
+ $run eval "$install_prog \$staticobj \$staticdest" || exit $?
+ fi
+ exit 0
+ ;;
+
+ *)
+ # Figure out destination file name, if it wasn't already specified.
+ if test -n "$destname"; then
+ destfile="$destdir/$destname"
+ else
+ destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
+ destfile="$destdir/$destfile"
+ fi
+
+ # Do a test to see if this is really a libtool program.
+ if (sed -e '4q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
+ notinst_deplibs=
+ relink_command=
+
+ # If there is no directory component, then add one.
+ case $file in
+ */* | *\\*) . $file ;;
+ *) . ./$file ;;
+ esac
+
+ # Check the variables that should have been set.
+ if test -z "$notinst_deplibs"; then
+ $echo "$modename: invalid libtool wrapper script \`$file'" 1>&2
+ exit 1
+ fi
+
+ finalize=yes
+ for lib in $notinst_deplibs; do
+ # Check to see that each library is installed.
+ libdir=
+ if test -f "$lib"; then
+ # If there is no directory component, then add one.
+ case $lib in
+ */* | *\\*) . $lib ;;
+ *) . ./$lib ;;
+ esac
+ fi
+ libfile="$libdir/"`$echo "X$lib" | $Xsed -e 's%^.*/%%g'` ### testsuite: skip nested quoting test
+ if test -n "$libdir" && test ! -f "$libfile"; then
+ $echo "$modename: warning: \`$lib' has not been installed in \`$libdir'" 1>&2
+ finalize=no
+ fi
+ done
+
+ relink_command=
+ # If there is no directory component, then add one.
+ case $file in
+ */* | *\\*) . $file ;;
+ *) . ./$file ;;
+ esac
+
+ outputname=
+ if test "$fast_install" = no && test -n "$relink_command"; then
+ if test "$finalize" = yes && test -z "$run"; then
+ tmpdir="/tmp"
+ test -n "$TMPDIR" && tmpdir="$TMPDIR"
+ tmpdir="$tmpdir/libtool-$$"
+ if $mkdir -p "$tmpdir" && chmod 700 "$tmpdir"; then :
+ else
+ $echo "$modename: error: cannot create temporary directory \`$tmpdir'" 1>&2
+ continue
+ fi
+ file=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
+ outputname="$tmpdir/$file"
+ # Replace the output file specification.
+ relink_command=`$echo "X$relink_command" | $Xsed -e 's%@OUTPUT@%'"$outputname"'%g'`
+
+ $show "$relink_command"
+ if $run eval "$relink_command"; then :
+ else
+ $echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2
+ ${rm}r "$tmpdir"
+ continue
+ fi
+ file="$outputname"
+ else
+ $echo "$modename: warning: cannot relink \`$file'" 1>&2
+ fi
+ else
+ # Install the binary that we compiled earlier.
+ file=`$echo "X$file" | $Xsed -e "s%\([^/]*\)$%$objdir/\1%"`
+ fi
+ fi
+
+ # remove .exe since cygwin /usr/bin/install will append another
+ # one anyways
+ case $install_prog,$host in
+ /usr/bin/install*,*cygwin*)
+ case $file:$destfile in
+ *.exe:*.exe)
+ # this is ok
+ ;;
+ *.exe:*)
+ destfile=$destfile.exe
+ ;;
+ *:*.exe)
+ destfile=`echo $destfile | sed -e 's,.exe$,,'`
+ ;;
+ esac
+ ;;
+ esac
+ $show "$install_prog$stripme $file $destfile"
+ $run eval "$install_prog\$stripme \$file \$destfile" || exit $?
+ test -n "$outputname" && ${rm}r "$tmpdir"
+ ;;
+ esac
+ done
+
+ for file in $staticlibs; do
+ name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
+
+ # Set up the ranlib parameters.
+ oldlib="$destdir/$name"
+
+ $show "$install_prog $file $oldlib"
+ $run eval "$install_prog \$file \$oldlib" || exit $?
+
+ if test -n "$stripme" && test -n "$striplib"; then
+ $show "$old_striplib $oldlib"
+ $run eval "$old_striplib $oldlib" || exit $?
+ fi
+
+ # Do each command in the postinstall commands.
+ eval cmds=\"$old_postinstall_cmds\"
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+ done
+
+ if test -n "$future_libdirs"; then
+ $echo "$modename: warning: remember to run \`$progname --finish$future_libdirs'" 1>&2
+ fi
+
+ if test -n "$current_libdirs"; then
+ # Maybe just do a dry run.
+ test -n "$run" && current_libdirs=" -n$current_libdirs"
+ exec_cmd='$SHELL $0 --finish$current_libdirs'
+ else
+ exit 0
+ fi
+ ;;
+
+ # libtool finish mode
+ finish)
+ modename="$modename: finish"
+ libdirs="$nonopt"
+ admincmds=
+
+ if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then
+ for dir
+ do
+ libdirs="$libdirs $dir"
+ done
+
+ for libdir in $libdirs; do
+ if test -n "$finish_cmds"; then
+ # Do each command in the finish commands.
+ eval cmds=\"$finish_cmds\"
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || admincmds="$admincmds
+ $cmd"
+ done
+ IFS="$save_ifs"
+ fi
+ if test -n "$finish_eval"; then
+ # Do the single finish_eval.
+ eval cmds=\"$finish_eval\"
+ $run eval "$cmds" || admincmds="$admincmds
+ $cmds"
+ fi
+ done
+ fi
+
+ # Exit here if they wanted silent mode.
+ test "$show" = ":" && exit 0
+
+ echo "----------------------------------------------------------------------"
+ echo "Libraries have been installed in:"
+ for libdir in $libdirs; do
+ echo " $libdir"
+ done
+ echo
+ echo "If you ever happen to want to link against installed libraries"
+ echo "in a given directory, LIBDIR, you must either use libtool, and"
+ echo "specify the full pathname of the library, or use the \`-LLIBDIR'"
+ echo "flag during linking and do at least one of the following:"
+ if test -n "$shlibpath_var"; then
+ echo " - add LIBDIR to the \`$shlibpath_var' environment variable"
+ echo " during execution"
+ fi
+ if test -n "$runpath_var"; then
+ echo " - add LIBDIR to the \`$runpath_var' environment variable"
+ echo " during linking"
+ fi
+ if test -n "$hardcode_libdir_flag_spec"; then
+ libdir=LIBDIR
+ eval flag=\"$hardcode_libdir_flag_spec\"
+
+ echo " - use the \`$flag' linker flag"
+ fi
+ if test -n "$admincmds"; then
+ echo " - have your system administrator run these commands:$admincmds"
+ fi
+ if test -f /etc/ld.so.conf; then
+ echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'"
+ fi
+ echo
+ echo "See any operating system documentation about shared libraries for"
+ echo "more information, such as the ld(1) and ld.so(8) manual pages."
+ echo "----------------------------------------------------------------------"
+ exit 0
+ ;;
+
+ # libtool execute mode
+ execute)
+ modename="$modename: execute"
+
+ # The first argument is the command name.
+ cmd="$nonopt"
+ if test -z "$cmd"; then
+ $echo "$modename: you must specify a COMMAND" 1>&2
+ $echo "$help"
+ exit 1
+ fi
+
+ # Handle -dlopen flags immediately.
+ for file in $execute_dlfiles; do
+ if test ! -f "$file"; then
+ $echo "$modename: \`$file' is not a file" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ dir=
+ case $file in
+ *.la)
+ # Check to see that this really is a libtool archive.
+ if (sed -e '2q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
+ else
+ $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ # Read the libtool library.
+ dlname=
+ library_names=
+
+ # If there is no directory component, then add one.
+ case $file in
+ */* | *\\*) . $file ;;
+ *) . ./$file ;;
+ esac
+
+ # Skip this library if it cannot be dlopened.
+ if test -z "$dlname"; then
+ # Warn if it was a shared library.
+ test -n "$library_names" && $echo "$modename: warning: \`$file' was not linked with \`-export-dynamic'"
+ continue
+ fi
+
+ dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
+ test "X$dir" = "X$file" && dir=.
+
+ if test -f "$dir/$objdir/$dlname"; then
+ dir="$dir/$objdir"
+ else
+ $echo "$modename: cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" 1>&2
+ exit 1
+ fi
+ ;;
+
+ *.lo)
+ # Just add the directory containing the .lo file.
+ dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
+ test "X$dir" = "X$file" && dir=.
+ ;;
+
+ *)
+ $echo "$modename: warning \`-dlopen' is ignored for non-libtool libraries and objects" 1>&2
+ continue
+ ;;
+ esac
+
+ # Get the absolute pathname.
+ absdir=`cd "$dir" && pwd`
+ test -n "$absdir" && dir="$absdir"
+
+ # Now add the directory to shlibpath_var.
+ if eval "test -z \"\$$shlibpath_var\""; then
+ eval "$shlibpath_var=\"\$dir\""
+ else
+ eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\""
+ fi
+ done
+
+ # This variable tells wrapper scripts just to set shlibpath_var
+ # rather than running their programs.
+ libtool_execute_magic="$magic"
+
+ # Check if any of the arguments is a wrapper script.
+ args=
+ for file
+ do
+ case $file in
+ -*) ;;
+ *)
+ # Do a test to see if this is really a libtool program.
+ if (sed -e '4q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
+ # If there is no directory component, then add one.
+ case $file in
+ */* | *\\*) . $file ;;
+ *) . ./$file ;;
+ esac
+
+ # Transform arg to wrapped name.
+ file="$progdir/$program"
+ fi
+ ;;
+ esac
+ # Quote arguments (to preserve shell metacharacters).
+ file=`$echo "X$file" | $Xsed -e "$sed_quote_subst"`
+ args="$args \"$file\""
+ done
+
+ if test -z "$run"; then
+ if test -n "$shlibpath_var"; then
+ # Export the shlibpath_var.
+ eval "export $shlibpath_var"
+ fi
+
+ # Restore saved enviroment variables
+ if test "${save_LC_ALL+set}" = set; then
+ LC_ALL="$save_LC_ALL"; export LC_ALL
+ fi
+ if test "${save_LANG+set}" = set; then
+ LANG="$save_LANG"; export LANG
+ fi
+
+ # Now prepare to actually exec the command.
+ exec_cmd='"$cmd"$args'
+ else
+ # Display what would be done.
+ if test -n "$shlibpath_var"; then
+ eval "\$echo \"\$shlibpath_var=\$$shlibpath_var\""
+ $echo "export $shlibpath_var"
+ fi
+ $echo "$cmd$args"
+ exit 0
+ fi
+ ;;
+
+ # libtool clean and uninstall mode
+ clean | uninstall)
+ modename="$modename: $mode"
+ rm="$nonopt"
+ files=
+ rmforce=
+ exit_status=0
+
+ # This variable tells wrapper scripts just to set variables rather
+ # than running their programs.
+ libtool_install_magic="$magic"
+
+ for arg
+ do
+ case $arg in
+ -f) rm="$rm $arg"; rmforce=yes ;;
+ -*) rm="$rm $arg" ;;
+ *) files="$files $arg" ;;
+ esac
+ done
+
+ if test -z "$rm"; then
+ $echo "$modename: you must specify an RM program" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ rmdirs=
+
+ for file in $files; do
+ dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$dir" = "X$file"; then
+ dir=.
+ objdir="$objdir"
+ else
+ objdir="$dir/$objdir"
+ fi
+ name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
+ test $mode = uninstall && objdir="$dir"
+
+ # Remember objdir for removal later, being careful to avoid duplicates
+ if test $mode = clean; then
+ case " $rmdirs " in
+ *" $objdir "*) ;;
+ *) rmdirs="$rmdirs $objdir" ;;
+ esac
+ fi
+
+ # Don't error if the file doesn't exist and rm -f was used.
+ if (test -L "$file") >/dev/null 2>&1 \
+ || (test -h "$file") >/dev/null 2>&1 \
+ || test -f "$file"; then
+ :
+ elif test -d "$file"; then
+ exit_status=1
+ continue
+ elif test "$rmforce" = yes; then
+ continue
+ fi
+
+ rmfiles="$file"
+
+ case $name in
+ *.la)
+ # Possibly a libtool archive, so verify it.
+ if (sed -e '2q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
+ . $dir/$name
+
+ # Delete the libtool libraries and symlinks.
+ for n in $library_names; do
+ rmfiles="$rmfiles $objdir/$n"
+ done
+ test -n "$old_library" && rmfiles="$rmfiles $objdir/$old_library"
+ test $mode = clean && rmfiles="$rmfiles $objdir/$name $objdir/${name}i"
+
+ if test $mode = uninstall; then
+ if test -n "$library_names"; then
+ # Do each command in the postuninstall commands.
+ eval cmds=\"$postuninstall_cmds\"
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd"
+ if test $? != 0 && test "$rmforce" != yes; then
+ exit_status=1
+ fi
+ done
+ IFS="$save_ifs"
+ fi
+
+ if test -n "$old_library"; then
+ # Do each command in the old_postuninstall commands.
+ eval cmds=\"$old_postuninstall_cmds\"
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd"
+ if test $? != 0 && test "$rmforce" != yes; then
+ exit_status=1
+ fi
+ done
+ IFS="$save_ifs"
+ fi
+ # FIXME: should reinstall the best remaining shared library.
+ fi
+ fi
+ ;;
+
+ *.lo)
+ if test "$build_old_libs" = yes; then
+ oldobj=`$echo "X$name" | $Xsed -e "$lo2o"`
+ rmfiles="$rmfiles $dir/$oldobj"
+ fi
+ ;;
+
+ *)
+ # Do a test to see if this is a libtool program.
+ if test $mode = clean &&
+ (sed -e '4q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
+ relink_command=
+ . $dir/$file
+
+ rmfiles="$rmfiles $objdir/$name $objdir/${name}S.${objext}"
+ if test "$fast_install" = yes && test -n "$relink_command"; then
+ rmfiles="$rmfiles $objdir/lt-$name"
+ fi
+ fi
+ ;;
+ esac
+ $show "$rm $rmfiles"
+ $run $rm $rmfiles || exit_status=1
+ done
+
+ # Try to remove the ${objdir}s in the directories where we deleted files
+ for dir in $rmdirs; do
+ if test -d "$dir"; then
+ $show "rmdir $dir"
+ $run rmdir $dir >/dev/null 2>&1
+ fi
+ done
+
+ exit $exit_status
+ ;;
+
+ "")
+ $echo "$modename: you must specify a MODE" 1>&2
+ $echo "$generic_help" 1>&2
+ exit 1
+ ;;
+ esac
+
+ if test -z "$exec_cmd"; then
+ $echo "$modename: invalid operation mode \`$mode'" 1>&2
+ $echo "$generic_help" 1>&2
+ exit 1
+ fi
+fi # test -z "$show_help"
+
+if test -n "$exec_cmd"; then
+ eval exec $exec_cmd
+ exit 1
+fi
+
+# We need to display help for each of the modes.
+case $mode in
+"") $echo \
+"Usage: $modename [OPTION]... [MODE-ARG]...
+
+Provide generalized library-building support services.
+
+ --config show all configuration variables
+ --debug enable verbose shell tracing
+-n, --dry-run display commands without modifying any files
+ --features display basic configuration information and exit
+ --finish same as \`--mode=finish'
+ --help display this help message and exit
+ --mode=MODE use operation mode MODE [default=inferred from MODE-ARGS]
+ --quiet same as \`--silent'
+ --silent don't print informational messages
+ --version print version information
+
+MODE must be one of the following:
+
+ clean remove files from the build directory
+ compile compile a source file into a libtool object
+ execute automatically set library path, then run a program
+ finish complete the installation of libtool libraries
+ install install libraries or executables
+ link create a library or an executable
+ uninstall remove libraries from an installed directory
+
+MODE-ARGS vary depending on the MODE. Try \`$modename --help --mode=MODE' for
+a more detailed description of MODE."
+ exit 0
+ ;;
+
+clean)
+ $echo \
+"Usage: $modename [OPTION]... --mode=clean RM [RM-OPTION]... FILE...
+
+Remove files from the build directory.
+
+RM is the name of the program to use to delete files associated with each FILE
+(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
+to RM.
+
+If FILE is a libtool library, object or program, all the files associated
+with it are deleted. Otherwise, only FILE itself is deleted using RM."
+ ;;
+
+compile)
+ $echo \
+"Usage: $modename [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE
+
+Compile a source file into a libtool library object.
+
+This mode accepts the following additional options:
+
+ -o OUTPUT-FILE set the output file name to OUTPUT-FILE
+ -prefer-pic try to building PIC objects only
+ -prefer-non-pic try to building non-PIC objects only
+ -static always build a \`.o' file suitable for static linking
+
+COMPILE-COMMAND is a command to be used in creating a \`standard' object file
+from the given SOURCEFILE.
+
+The output file name is determined by removing the directory component from
+SOURCEFILE, then substituting the C source code suffix \`.c' with the
+library object suffix, \`.lo'."
+ ;;
+
+execute)
+ $echo \
+"Usage: $modename [OPTION]... --mode=execute COMMAND [ARGS]...
+
+Automatically set library path, then run a program.
+
+This mode accepts the following additional options:
+
+ -dlopen FILE add the directory containing FILE to the library path
+
+This mode sets the library path environment variable according to \`-dlopen'
+flags.
+
+If any of the ARGS are libtool executable wrappers, then they are translated
+into their corresponding uninstalled binary, and any of their required library
+directories are added to the library path.
+
+Then, COMMAND is executed, with ARGS as arguments."
+ ;;
+
+finish)
+ $echo \
+"Usage: $modename [OPTION]... --mode=finish [LIBDIR]...
+
+Complete the installation of libtool libraries.
+
+Each LIBDIR is a directory that contains libtool libraries.
+
+The commands that this mode executes may require superuser privileges. Use
+the \`--dry-run' option if you just want to see what would be executed."
+ ;;
+
+install)
+ $echo \
+"Usage: $modename [OPTION]... --mode=install INSTALL-COMMAND...
+
+Install executables or libraries.
+
+INSTALL-COMMAND is the installation command. The first component should be
+either the \`install' or \`cp' program.
+
+The rest of the components are interpreted as arguments to that command (only
+BSD-compatible install options are recognized)."
+ ;;
+
+link)
+ $echo \
+"Usage: $modename [OPTION]... --mode=link LINK-COMMAND...
+
+Link object files or libraries together to form another library, or to
+create an executable program.
+
+LINK-COMMAND is a command using the C compiler that you would use to create
+a program from several object files.
+
+The following components of LINK-COMMAND are treated specially:
+
+ -all-static do not do any dynamic linking at all
+ -avoid-version do not add a version suffix if possible
+ -dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime
+ -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols
+ -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3)
+ -export-symbols SYMFILE
+ try to export only the symbols listed in SYMFILE
+ -export-symbols-regex REGEX
+ try to export only the symbols matching REGEX
+ -LLIBDIR search LIBDIR for required installed libraries
+ -lNAME OUTPUT-FILE requires the installed library libNAME
+ -jnimodule build a library that can dlopened via Java JNI
+ -module build a library that can dlopened
+ -no-fast-install disable the fast-install mode
+ -no-install link a not-installable executable
+ -no-undefined declare that a library does not refer to external symbols
+ -o OUTPUT-FILE create OUTPUT-FILE from the specified objects
+ -release RELEASE specify package release information
+ -rpath LIBDIR the created library will eventually be installed in LIBDIR
+ -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries
+ -static do not do any dynamic linking of libtool libraries
+ -version-info CURRENT[:REVISION[:AGE]]
+ specify library version info [each variable defaults to 0]
+
+All other options (arguments beginning with \`-') are ignored.
+
+Every other argument is treated as a filename. Files ending in \`.la' are
+treated as uninstalled libtool libraries, other files are standard or library
+object files.
+
+If the OUTPUT-FILE ends in \`.la', then a libtool library is created,
+only library objects (\`.lo' files) may be specified, and \`-rpath' is
+required, except when creating a convenience library.
+
+If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created
+using \`ar' and \`ranlib', or on Windows using \`lib'.
+
+If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file
+is created, otherwise an executable program is created."
+ ;;
+
+uninstall)
+ $echo \
+"Usage: $modename [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE...
+
+Remove libraries from an installation directory.
+
+RM is the name of the program to use to delete files associated with each FILE
+(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
+to RM.
+
+If FILE is a libtool library, all the files associated with it are deleted.
+Otherwise, only FILE itself is deleted using RM."
+ ;;
+
+*)
+ $echo "$modename: invalid operation mode \`$mode'" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ ;;
+esac
+
+echo
+$echo "Try \`$modename --help' for more information about other modes."
+
+exit 0
+
+# Local Variables:
+# mode:shell-script
+# sh-indentation:2
+# End:
diff --git a/libdb/dist/pubdef.in b/libdb/dist/pubdef.in
new file mode 100644
index 0000000..f423630
--- /dev/null
+++ b/libdb/dist/pubdef.in
@@ -0,0 +1,350 @@
+# Name
+# D == documentation
+# I == include file
+# C == Java case value (declared and initialized)
+# J == Java constant (declared only)
+DB_AFTER D I J
+DB_AGGRESSIVE D I J
+DB_ALREADY_ABORTED * I *
+DB_AM_CHKSUM * I *
+DB_AM_CL_WRITER * I *
+DB_AM_COMPENSATE * I *
+DB_AM_CREATED * I *
+DB_AM_CREATED_MSTR * I *
+DB_AM_DBM_ERROR * I *
+DB_AM_DELIMITER * I *
+DB_AM_DIRTY * I *
+DB_AM_DISCARD * I *
+DB_AM_DUP * I *
+DB_AM_DUPSORT * I *
+DB_AM_ENCRYPT * I *
+DB_AM_FIXEDLEN * I *
+DB_AM_INMEM * I *
+DB_AM_IN_RENAME * I *
+DB_AM_OPEN_CALLED * I *
+DB_AM_PAD * I *
+DB_AM_PGDEF * I *
+DB_AM_RDONLY * I *
+DB_AM_RECNUM * I *
+DB_AM_RECOVER * I *
+DB_AM_RENUMBER * I *
+DB_AM_REVSPLITOFF * I *
+DB_AM_SECONDARY * I *
+DB_AM_SNAPSHOT * I *
+DB_AM_SUBDB * I *
+DB_AM_SWAP * I *
+DB_AM_TXN * I *
+DB_AM_VERIFYING * I *
+DB_APPEND D I J
+DB_ARCH_ABS D I J
+DB_ARCH_DATA D I J
+DB_ARCH_LOG D I J
+DB_AUTO_COMMIT D I J
+DB_BEFORE D I J
+DB_BTREE D I C
+DB_BTREEMAGIC * I *
+DB_BTREEOLDVER * I *
+DB_BTREEVERSION * I *
+DB_CACHED_COUNTS * I J
+DB_CDB_ALLDB D I J
+DB_CHKSUM_SHA1 D I J
+DB_CLIENT D I J
+DB_COMMIT * I *
+DB_CONFIG D * *
+DB_CONSUME D I J
+DB_CONSUME_WAIT D I J
+DB_CREATE D I J
+DB_CURRENT D I J
+DB_CXX_NO_EXCEPTIONS D I J
+DB_DBM_HSEARCH * I *
+DB_DBT_APPMALLOC D I *
+DB_DBT_DUPOK * I *
+DB_DBT_ISSET * I *
+DB_DBT_MALLOC D I J
+DB_DBT_PARTIAL D I J
+DB_DBT_REALLOC D I J
+DB_DBT_USERMEM D I J
+DB_DELETED * I *
+DB_DIRECT D I J
+DB_DIRECT_DB D I J
+DB_DIRECT_LOG D I J
+DB_DIRTY_READ D I J
+DB_DONOTINDEX D I C
+DB_DUP D I J
+DB_DUPSORT D I J
+DB_EID_BROADCAST D I J
+DB_EID_INVALID D I J
+DB_ENCRYPT D I J
+DB_ENCRYPT_AES D I J
+DB_ENV_AUTO_COMMIT * I *
+DB_ENV_CDB * I *
+DB_ENV_CDB_ALLDB * I *
+DB_ENV_CREATE * I *
+DB_ENV_DBLOCAL * I *
+DB_ENV_DIRECT_DB * I *
+DB_ENV_DIRECT_LOG * I *
+DB_ENV_FATAL * I *
+DB_ENV_LOCKDOWN * I *
+DB_ENV_NOLOCKING * I *
+DB_ENV_NOMMAP * I *
+DB_ENV_NOPANIC * I *
+DB_ENV_OPEN_CALLED * I *
+DB_ENV_OVERWRITE * I *
+DB_ENV_PRIVATE * I *
+DB_ENV_REGION_INIT * I *
+DB_ENV_REP_CLIENT * I *
+DB_ENV_REP_LOGSONLY * I *
+DB_ENV_REP_MASTER * I *
+DB_ENV_RPCCLIENT * I *
+DB_ENV_RPCCLIENT_GIVEN * I *
+DB_ENV_SYSTEM_MEM * I *
+DB_ENV_THREAD * I *
+DB_ENV_TXN_NOSYNC * I *
+DB_ENV_TXN_WRITE_NOSYNC * I *
+DB_ENV_YIELDCPU * I *
+DB_EXCL D I J
+DB_EXTENT * I *
+DB_FAST_STAT D I J
+DB_FCNTL_LOCKING * I *
+DB_FILE_ID_LEN * I *
+DB_FIRST D I J
+DB_FLUSH D I J
+DB_FORCE D I J
+DB_GET_BOTH D I J
+DB_GET_BOTHC * I *
+DB_GET_BOTH_RANGE D I J
+DB_GET_RECNO D I J
+DB_HANDLE_LOCK * I *
+DB_HASH D I C
+DB_HASHMAGIC * I *
+DB_HASHOLDVER * I *
+DB_HASHVERSION * I *
+DB_HOME D * *
+DB_INIT_CDB D I J
+DB_INIT_LOCK D I J
+DB_INIT_LOG D I J
+DB_INIT_MPOOL D I J
+DB_INIT_TXN D I J
+DB_JAVA_CALLBACK * I *
+DB_JOINENV D I J
+DB_JOIN_ITEM D I J
+DB_JOIN_NOSORT D I J
+DB_KEYEMPTY D I C
+DB_KEYEXIST D I C
+DB_KEYFIRST D I J
+DB_KEYLAST D I J
+DB_LAST D I J
+DB_LOCKDOWN D I J
+DB_LOCKVERSION * I *
+DB_LOCK_DEADLOCK D I C
+DB_LOCK_DEFAULT D I J
+DB_LOCK_DIRTY * I *
+DB_LOCK_DUMP * I *
+DB_LOCK_EXPIRE D I J
+DB_LOCK_FREE_LOCKER * I *
+DB_LOCK_GET D I J
+DB_LOCK_GET_TIMEOUT D I J
+DB_LOCK_INHERIT * I *
+DB_LOCK_IREAD D I J
+DB_LOCK_IWR D I J
+DB_LOCK_IWRITE D I J
+DB_LOCK_MAXLOCKS D I J
+DB_LOCK_MINLOCKS D I J
+DB_LOCK_MINWRITE D I J
+DB_LOCK_NG * I *
+DB_LOCK_NORUN * I *
+DB_LOCK_NOTEXIST * I *
+DB_LOCK_NOTGRANTED D I C
+DB_LOCK_NOWAIT D I J
+DB_LOCK_OLDEST D I J
+DB_LOCK_PUT D I J
+DB_LOCK_PUT_ALL D I J
+DB_LOCK_PUT_OBJ D I J
+DB_LOCK_PUT_READ * I *
+DB_LOCK_RANDOM D I J
+DB_LOCK_READ D I J
+DB_LOCK_RECORD * I *
+DB_LOCK_REMOVE * I *
+DB_LOCK_SET_TIMEOUT * I *
+DB_LOCK_SWITCH * I *
+DB_LOCK_TIMEOUT D I J
+DB_LOCK_TRADE * I *
+DB_LOCK_UPGRADE * I *
+DB_LOCK_UPGRADE_WRITE * I *
+DB_LOCK_WAIT * I *
+DB_LOCK_WRITE D I J
+DB_LOCK_WWRITE * I *
+DB_LOCK_YOUNGEST D I J
+DB_LOGC_BUF_SIZE * I *
+DB_LOGFILEID_INVALID * I *
+DB_LOGMAGIC * I *
+DB_LOGOLDVER * I *
+DB_LOGVERSION * I *
+DB_LOG_DISK * I *
+DB_LOG_LOCKED * I *
+DB_LOG_SILENT_ERR * I *
+DB_LSTAT_ABORTED * I *
+DB_LSTAT_ERR * I *
+DB_LSTAT_EXPIRED * I *
+DB_LSTAT_FREE * I *
+DB_LSTAT_HELD * I *
+DB_LSTAT_NOTEXIST * I *
+DB_LSTAT_PENDING * I *
+DB_LSTAT_WAITING * I *
+DB_MAX_PAGES * I *
+DB_MAX_RECORDS * I *
+DB_MPOOL_CLEAN D I *
+DB_MPOOL_CREATE D I *
+DB_MPOOL_DIRTY D I *
+DB_MPOOL_DISCARD D I *
+DB_MPOOL_LAST D I *
+DB_MPOOL_NEW D I *
+DB_MULTIPLE D I J
+DB_MULTIPLE_INIT D I *
+DB_MULTIPLE_KEY D I J
+DB_MULTIPLE_KEY_NEXT D I *
+DB_MULTIPLE_NEXT D I *
+DB_MULTIPLE_RECNO_NEXT D I *
+DB_NEEDSPLIT * I *
+DB_NEXT D I J
+DB_NEXT_DUP D I J
+DB_NEXT_NODUP D I J
+DB_NOCOPY * I *
+DB_NODUPDATA D I J
+DB_NOLOCKING D I J
+DB_NOMMAP D I J
+DB_NOORDERCHK D I J
+DB_NOOVERWRITE D I J
+DB_NOPANIC D I J
+DB_NOSERVER D I C
+DB_NOSERVER_HOME D I C
+DB_NOSERVER_ID D I C
+DB_NOSYNC D I J
+DB_NOTFOUND D I C
+DB_ODDFILESIZE D I J
+DB_OK_BTREE * I *
+DB_OK_HASH * I *
+DB_OK_QUEUE * I *
+DB_OK_RECNO * I *
+DB_OLD_VERSION D I C
+DB_OPFLAGS_MASK * I *
+DB_ORDERCHKONLY D I J
+DB_OVERWRITE D I J
+DB_PAGE_LOCK * I *
+DB_PAGE_NOTFOUND D I C
+DB_PANIC_ENVIRONMENT D I J
+DB_PERMANENT * I *
+DB_POSITION D I J
+DB_POSITIONI * I *
+DB_PREV D I J
+DB_PREV_NODUP D I J
+DB_PRINTABLE D I J
+DB_PRIORITY_DEFAULT D I J
+DB_PRIORITY_HIGH D I J
+DB_PRIORITY_LOW D I J
+DB_PRIORITY_VERY_HIGH D I J
+DB_PRIORITY_VERY_LOW D I J
+DB_PRIVATE D I J
+DB_PR_PAGE * I *
+DB_PR_RECOVERYTEST * I *
+DB_QAMMAGIC * I *
+DB_QAMOLDVER * I *
+DB_QAMVERSION * I *
+DB_QUEUE D I C
+DB_RDONLY D I J
+DB_RDWRMASTER * I *
+DB_RECNO D I C
+DB_RECNUM D I J
+DB_RECORDCOUNT * I J
+DB_RECORD_LOCK * I *
+DB_RECOVER D I J
+DB_RECOVER_FATAL D I J
+DB_REDO * I *
+DB_REGION_INIT D I J
+DB_REGION_MAGIC * I *
+DB_RENAMEMAGIC * I *
+DB_RENUMBER D I J
+DB_REP_CLIENT D I J
+DB_REP_DUPMASTER D I C
+DB_REP_HOLDELECTION D I C
+DB_REP_LOGSONLY D I J
+DB_REP_MASTER D I J
+DB_REP_NEWMASTER D I C
+DB_REP_NEWSITE D I C
+DB_REP_OUTDATED D I C
+DB_REP_PERMANENT D I J
+DB_REP_UNAVAIL D I J
+DB_REVSPLITOFF D I J
+DB_RMW D I J
+DB_RUNRECOVERY D I C
+DB_SALVAGE D I J
+DB_SECONDARY_BAD D I C
+DB_SET D I J
+DB_SET_LOCK_TIMEOUT D I J
+DB_SET_RANGE D I J
+DB_SET_RECNO D I J
+DB_SET_TXN_NOW * I *
+DB_SET_TXN_TIMEOUT D I J
+DB_SNAPSHOT D I J
+DB_STAT_CLEAR D I J
+DB_SURPRISE_KID * I *
+DB_SWAPBYTES * I *
+DB_SYSTEM_MEM D I J
+DB_TEST_ELECTINIT * I *
+DB_TEST_ELECTSEND * I *
+DB_TEST_ELECTVOTE1 * I *
+DB_TEST_ELECTVOTE2 * I *
+DB_TEST_ELECTWAIT1 * I *
+DB_TEST_ELECTWAIT2 * I *
+DB_TEST_POSTDESTROY * I *
+DB_TEST_POSTLOG * I *
+DB_TEST_POSTLOGMETA * I *
+DB_TEST_POSTOPEN * I *
+DB_TEST_POSTSYNC * I *
+DB_TEST_PREDESTROY * I *
+DB_TEST_PREOPEN * I *
+DB_TEST_SUBDB_LOCKS * I *
+DB_THREAD D I J
+DB_TIMEOUT * I *
+DB_TRUNCATE D I J
+DB_TXNVERSION * I *
+DB_TXN_ABORT D I C
+DB_TXN_APPLY D I C
+DB_TXN_BACKWARD_ALLOC * I *
+DB_TXN_BACKWARD_ROLL D I C
+DB_TXN_CKP * I *
+DB_TXN_FORWARD_ROLL D I C
+DB_TXN_GETPGNOS * I *
+DB_TXN_LOCK * I *
+DB_TXN_NOSYNC D I J
+DB_TXN_NOWAIT D I J
+DB_TXN_OPENFILES * I *
+DB_TXN_POPENFILES * I *
+DB_TXN_PRINT D I C
+DB_TXN_SYNC D I J
+DB_TXN_WRITE_NOSYNC D I J
+DB_UNDO * I *
+DB_UNKNOWN D I C
+DB_UPDATE_SECONDARY * I *
+DB_UPGRADE D I J
+DB_USE_ENVIRON D I J
+DB_USE_ENVIRON_ROOT D I J
+DB_VERB_CHKPOINT D I J
+DB_VERB_DEADLOCK D I J
+DB_VERB_RECOVERY D I J
+DB_VERB_REPLICATION D I J
+DB_VERB_WAITSFOR D I J
+DB_VERIFY D I J
+DB_VERIFY_BAD D I C
+DB_VERIFY_FATAL * I *
+DB_VERSION_MAJOR * I J
+DB_VERSION_MINOR * I J
+DB_VERSION_PATCH * I J
+DB_VERSION_STRING * I *
+DB_WRITECURSOR D I J
+DB_WRITELOCK * I *
+DB_WRITEOPEN * I *
+DB_WRNOSYNC * I *
+DB_XA_CREATE D I J
+DB_XIDDATASIZE D I J
+DB_YIELDCPU D I J
diff --git a/libdb/dist/s_all b/libdb/dist/s_all
new file mode 100755
index 0000000..f90d70c
--- /dev/null
+++ b/libdb/dist/s_all
@@ -0,0 +1,21 @@
+#!/bin/sh -
+# $Id$
+
+sh s_perm # permissions.
+sh s_symlink # symbolic links.
+sh s_readme # db/README file.
+
+#
+# The following order is important, s_include must run last.
+#
+sh s_config # autoconf.
+sh s_recover # logging/recovery files.
+sh s_rpc # RPC files.
+sh s_include # standard include files.
+
+sh s_win32 # Win32 include files.
+sh s_win32_dsp # Win32 build environment.
+sh s_vxworks # VxWorks include files.
+sh s_java # Java support.
+sh s_test # Test suite support.
+sh s_tags # Tags files.
diff --git a/libdb/dist/s_config b/libdb/dist/s_config
new file mode 100755
index 0000000..63aae44
--- /dev/null
+++ b/libdb/dist/s_config
@@ -0,0 +1,45 @@
+#!/bin/sh -
+# $Id$
+#
+# Build the autoconfiguration files.
+
+trap 'rm -f aclocal.m4 ; exit 0' 0 1 2 3 13 15
+
+msgac="# DO NOT EDIT: automatically built by dist/s_config."
+
+. ./RELEASE
+
+echo "Building aclocal.m4"
+(echo "$msgac" &&
+ echo "AC_DEFUN(AM_VERSION_SET, [" &&
+ echo "AC_SUBST(DB_VERSION_MAJOR)" &&
+ echo "DB_VERSION_MAJOR=$DB_VERSION_MAJOR" &&
+ echo "AC_SUBST(DB_VERSION_MINOR)" &&
+ echo "DB_VERSION_MINOR=$DB_VERSION_MINOR" &&
+ echo "AC_SUBST(DB_VERSION_PATCH)" &&
+ echo "DB_VERSION_PATCH=$DB_VERSION_PATCH" &&
+ echo "AC_SUBST(DB_VERSION_UNIQUE_NAME)" &&
+ echo "DB_VERSION_UNIQUE_NAME=$DB_VERSION_UNIQUE_NAME" &&
+ echo "AC_SUBST(DB_VERSION_STRING)" &&
+ echo "DB_VERSION_STRING=\"\\\"$DB_VERSION_STRING\\\"\"" &&
+ echo "])" &&
+ cat aclocal/*.ac aclocal_java/*.ac) > aclocal.m4
+
+echo "Running autoheader to build config.hin"
+rm -f config.hin
+autoheader
+chmod 444 config.hin
+
+echo "Running autoconf to build configure"
+rm -f configure
+autoconf
+
+# Edit version information we couldn't pre-compute.
+(echo "1,\$s/__EDIT_DB_VERSION__/$DB_VERSION/g" &&
+ echo "w" &&
+ echo "q") | ed configure
+
+rm -rf autom4te.cache
+chmod 555 configure
+
+chmod 555 config.guess config.sub install-sh
diff --git a/libdb/dist/s_crypto b/libdb/dist/s_crypto
new file mode 100644
index 0000000..dbe8d9a
--- /dev/null
+++ b/libdb/dist/s_crypto
@@ -0,0 +1,57 @@
+#!/bin/sh -
+# $Id$
+
+# Remove crypto from the DB source tree.
+
+d=..
+
+t=/tmp/__db_a
+trap 'rm -f $t ; exit 0' 0
+trap 'rm -f $t ; exit 1' 1 2 3 13 15
+
+if ! test -d $d/crypto; then
+ echo "s_crypto: no crypto sources found in the source tree."
+ exit 1
+fi
+
+# Remove the crypto.
+rm -rf $d/crypto
+
+# Update the docs.
+f=$d/docs/ref/env/encrypt.html
+chmod 664 $f
+(echo '/DOES/' &&
+ echo 's/DOES/DOES NOT/' &&
+ echo 'w' &&
+ echo 'q') | ed $f
+
+# Win/32.
+f=win_config.in
+chmod 664 $f
+(echo '/#define.HAVE_CRYPTO/' &&
+ echo 'c' &&
+ echo '/* #undef HAVE_CRYPTO */'
+ echo '.' &&
+ echo 'w' &&
+ echo 'q') | ed $f
+
+f=srcfiles.in
+chmod 664 $f
+(echo 'g/^crypto\//d' &&
+ echo 'w' &&
+ echo 'q') | ed $f
+
+ sh ./s_win32
+ sh ./s_win32_dsp
+
+# VxWorks
+f=vx_config.in
+chmod 664 $f
+(echo '/#define.HAVE_CRYPTO/' &&
+ echo 'c' &&
+ echo '/* #undef HAVE_CRYPTO */'
+ echo '.' &&
+ echo 'w' &&
+ echo 'q') | ed $f
+
+ sh ./s_vxworks
diff --git a/libdb/dist/s_include b/libdb/dist/s_include
new file mode 100755
index 0000000..2079848
--- /dev/null
+++ b/libdb/dist/s_include
@@ -0,0 +1,160 @@
+#!/bin/sh -
+# $Id$
+#
+# Build the automatically generated function prototype files.
+
+msgc="/* DO NOT EDIT: automatically built by dist/s_include. */"
+
+. ./RELEASE
+
+head()
+{
+ defonly=0
+ while :
+ do case "$1" in
+ space)
+ echo ""; shift;;
+ defonly)
+ defonly=1; shift;;
+ *)
+ name="$1"; break;;
+ esac
+ done
+
+ echo "$msgc"
+ echo "#ifndef $name"
+ echo "#define $name"
+ echo ""
+ if [ $defonly -eq 0 ]; then
+ echo "#if defined(__cplusplus)"
+ echo "extern \"C\" {"
+ echo "#endif"
+ echo ""
+ fi
+}
+
+tail()
+{
+ defonly=0
+ while :
+ do case "$1" in
+ defonly)
+ defonly=1; shift;;
+ *)
+ name="$1"; break;;
+ esac
+ done
+
+ echo ""
+ if [ $defonly -eq 0 ]; then
+ echo "#if defined(__cplusplus)"
+ echo "}"
+ echo "#endif"
+ fi
+ echo "#endif /* !$name */"
+}
+
+# We are building several files:
+# 1 external #define file
+# 1 external prototype file
+# 1 internal #define file
+# N internal prototype files
+e_dfile=/tmp/__db_c.$$
+e_pfile=/tmp/__db_a.$$
+i_dfile=/tmp/__db_d.$$
+i_pfile=/tmp/__db_b.$$
+trap 'rm -f $e_dfile $e_pfile $i_dfile $i_pfile; exit 0' 0 1 2 3 13 15
+
+head defonly space _DB_EXT_DEF_IN_ > $e_dfile
+head space _DB_EXT_PROT_IN_ > $e_pfile
+head defonly _DB_INT_DEF_IN_ > $i_dfile
+
+# Process the standard directories, creating per-directory prototype
+# files and adding to the external prototype and #define files.
+for i in db btree clib common crypto dbreg env fileops hash hmac \
+ lock log mp mutex os qam rep rpc_client rpc_server tcl txn xa; do
+ head "_${i}_ext_h_" > $i_pfile
+
+ f="../$i/*.c"
+ [ $i = os ] && f="$f ../os_win32/*.c"
+ [ $i = rpc_server ] && f="../$i/c/*.c"
+ [ $i = crypto ] && f="../$i/*.c ../$i/*/*.c"
+ awk -f gen_inc.awk \
+ -v db_version_unique_name=$DB_VERSION_UNIQUE_NAME \
+ -v e_dfile=$e_dfile \
+ -v e_pfile=$e_pfile \
+ -v i_dfile=$i_dfile \
+ -v i_pfile=$i_pfile $f
+
+ tail "_${i}_ext_h_" >> $i_pfile
+
+ f=../dbinc_auto/${i}_ext.h
+ cmp $i_pfile $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $i_pfile $f && chmod 444 $f)
+done
+
+# Process directories which only add to the external prototype and #define
+# files.
+for i in dbm hsearch; do
+ f="../$i/*.c"
+ awk -f gen_inc.awk \
+ -v db_version_unique_name=$DB_VERSION_UNIQUE_NAME \
+ -v e_dfile=$e_dfile \
+ -v e_pfile=$e_pfile \
+ -v i_dfile="" \
+ -v i_pfile="" $f
+done
+
+# RPC uses rpcgen to generate a header file; post-process it to add more
+# interfaces to the internal #define file.
+sed -e '/extern bool_t xdr___/{' \
+ -e 's/.* //' \
+ -e 's/();//' \
+ -e 's/.*/#define & &@DB_VERSION_UNIQUE_NAME@/' \
+ -e 'p' \
+ -e '}' \
+ -e d < ../dbinc_auto/db_server.h >> $i_dfile
+
+# There are a few globals in DB -- add them to the external/internal
+# #define files.
+(echo "#define __db_global_values __db_global_values@DB_VERSION_UNIQUE_NAME@";
+ echo "#define __db_jump __db_jump@DB_VERSION_UNIQUE_NAME@") >> $i_dfile
+(echo "#define db_xa_switch db_xa_switch@DB_VERSION_UNIQUE_NAME@") >> $e_dfile
+
+# Wrap up the external #defines/prototypes, and internal #defines.
+tail defonly _DB_EXT_DEF_IN_ >> $e_dfile
+f=../dbinc_auto/ext_def.in
+cmp $e_dfile $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $e_dfile $f && chmod 444 $f)
+
+tail _DB_EXT_PROT_IN_ >> $e_pfile
+f=../dbinc_auto/ext_prot.in
+cmp $e_pfile $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $e_pfile $f && chmod 444 $f)
+
+tail defonly _DB_INT_DEF_IN_ >> $i_dfile
+f=../dbinc_auto/int_def.in
+cmp $i_dfile $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $i_dfile $f && chmod 444 $f)
+
+# DB185 compatibility support.
+head space defonly _DB_EXT_185_DEF_IN_ > $e_dfile
+head space _DB_EXT_185_PROT_IN_ > $e_pfile
+
+f="../db185/*.c"
+awk -f gen_inc.awk \
+ -v db_version_unique_name=$DB_VERSION_UNIQUE_NAME \
+ -v e_dfile=$e_dfile \
+ -v e_pfile=$e_pfile \
+ -v i_dfile="" \
+ -v i_pfile="" $f
+
+tail defonly _DB_EXT_185_DEF_IN_ >> $e_dfile
+f=../dbinc_auto/ext_185_def.in
+cmp $e_dfile $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $e_dfile $f && chmod 444 $f)
+
+tail _DB_EXT_185_PROT_IN_ >> $e_pfile
+f=../dbinc_auto/ext_185_prot.in
+cmp $e_pfile $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $e_pfile $f && chmod 444 $f)
diff --git a/libdb/dist/s_java b/libdb/dist/s_java
new file mode 100755
index 0000000..0e97cf3
--- /dev/null
+++ b/libdb/dist/s_java
@@ -0,0 +1,273 @@
+#!/bin/sh -
+# $Id$
+#
+# Build the Java files.
+
+msgjava="/* DO NOT EDIT: automatically built by dist/s_java. */"
+
+. RELEASE
+
+t=/tmp/__java
+c=/tmp/__javajnic
+h=/tmp/__javajnih
+trap 'rm -f $t $c $h; exit 0' 0 1 2 3 13 15
+
+# Build DbConstants.java.
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo 'class DbConstants' &&
+ echo '{' &&
+ for i in `egrep '^DB_.*J$' pubdef.in | awk '{print $1}'`; do \
+ egrep -w "^#define[ ]$i|^[ ][ ]*$i" ../dbinc/db.in; \
+ done |
+ sed -e "s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/" \
+ -e "s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/" \
+ -e "s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/" \
+ -e 's/^#define[ ][ ]*//' \
+ -e 's/[()=,]/ /g' \
+ -e 's/\/\*/ /' | \
+ awk '{ print " static final int " $1 " = " $2 ";" }' &&
+ echo '}' &&
+ echo &&
+ echo '// end of DbConstants.java') > $t
+
+f=../java/src/com/sleepycat/db/DbConstants.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build Db.java.
+f=../java/src/com/sleepycat/db/Db.java
+sed '/BEGIN-JAVA-SPECIAL-CONSTANTS/q' < $f > $t
+(echo " $msgjava" &&
+ for i in `egrep '^DB_.*C$' pubdef.in | awk '{print $1}'`; do \
+ egrep -w "^#define[ ]$i|^[ ][ ]*$i" ../dbinc/db.in; \
+ done |
+ sed -e 's/^#define[ ][ ]*//' \
+ -e 's/[()=,]/ /g' |
+ awk '{ print " public static final int " $1 " = " $2 ";" }') >> $t
+(for i in `egrep '^DB_.*J$' pubdef.in | awk '{print $1}'`; do \
+ egrep -w "^#define[ ]$i|^[ ][ ]*$i" ../dbinc/db.in; \
+ done |
+ sed -e 's/^#define[ ][ ]*//' \
+ -e 's/[()=,]/ /g' |
+ awk '{ print " public static final int " $1 ";" }') >> $t
+sed -n \
+ '/END-JAVA-SPECIAL-CONSTANTS/,/BEGIN-JAVA-CONSTANT-INITIALIZATION/p' \
+ < $f >> $t
+(echo " $msgjava" &&
+ for i in `egrep '^DB_.*J$' pubdef.in | awk '{print $1}'`; do \
+ egrep -w "^#define[ ]$i|^[ ][ ]*$i" ../dbinc/db.in; \
+ done |
+ sed -e 's/^#define[ ][ ]*//' \
+ -e 's/[()=,]/ /g' \
+ -e 's/\/\*/ /' | \
+ awk '{ print " " $1 " = DbConstants." $1 ";" }') >> $t
+sed -n '/END-JAVA-CONSTANT-INITIALIZATION/,$p' < $f >> $t
+
+f=../java/src/com/sleepycat/db/Db.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Script to convert DB C structure declarations into Java declarations.
+jclass()
+{
+ cat ../dbinc/db.in |
+ sed -n \
+ -e "/struct $1 {/,/^}/{" \
+ -e "/$1/d" \
+ -e '/;/!d' \
+ -e '/^}/d' \
+ -e '/char[ ]*\*/{' \
+ -e "s/^[ ]*[^\*]*\*[ ]*\([^;]*\).*/$2 public String \1;/p"\
+ -e 'd' \
+ -e '}' \
+ -e '/time_t/{' \
+ -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/$2 public long \1;/p" \
+ -e 'd' \
+ -e '}' \
+ -e '/DB_LSN[ ]*/{' \
+ -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/$2 public DbLsn \1;/p"\
+ -e 'd' \
+ -e '}' \
+ -e '/DB_TXN_ACTIVE[ ]*\*/{' \
+ -e "s/^[ ]*[^\*]*\*[ ]*\([^;]*\).*/$2 public Active \1[];/p"\
+ -e 'd' \
+ -e '}' \
+ -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/$2 public int \1;/p" \
+ -e '}'
+}
+
+# Script to convert DB C structure declarations into Java declarations.
+jclass_jni()
+{
+ c=$3
+ h=$4
+ echo "extern int $2(JNIEnv *jnienv, jclass cl, jobject jobj, struct $1 *statp);" >> $h
+ echo "int $2(JNIEnv *jnienv, jclass cl," >> $c
+ echo " jobject jobj, struct $1 *statp) {" >> $c
+ cat ../dbinc/db.in |
+ sed -n \
+ -e "/struct $1 {/,/^}/{" \
+ -e "/$1/d" \
+ -e '/;/!d' \
+ -e '/^}/d' \
+ -e '/char[ ]*\*/{' \
+ -e "s/^[ ]*[^\*]*\*[ ]*\([^;]*\).*/ JAVADB_STAT_STRING(jnienv, cl, jobj, statp, \1);/p"\
+ -e 'd' \
+ -e '}' \
+ -e '/time_t/{' \
+ -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/ JAVADB_STAT_LONG(jnienv, cl, jobj, statp, \1);/p" \
+ -e 'd' \
+ -e '}' \
+ -e '/DB_LSN[ ]*/{' \
+ -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/ JAVADB_STAT_LSN(jnienv, cl, jobj, statp, \1);/p"\
+ -e 'd' \
+ -e '}' \
+ -e '/DB_TXN_ACTIVE[ ]*\*/{' \
+ -e "s/^[ ]*[^\*]*\*[ ]*\([^;]*\).*/ JAVADB_STAT_ACTIVE(jnienv, cl, jobj, statp, \1);/p"\
+ -e 'd' \
+ -e '}' \
+ -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/ JAVADB_STAT_INT(jnienv, cl, jobj, statp, \1);/p" \
+ -e '}' >> $c
+ echo ' return (0);' >> $c
+ echo '}' >> $c
+}
+
+echo "$msgjava" >> $c
+echo "$msgjava" >> $h
+echo '#include "java_util.h"' >> $c
+
+# Build DbBtreeStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbBtreeStat"
+ echo '{'
+ jclass __db_bt_stat &&
+ echo '}' &&
+ echo '// end of DbBtreeStat.java') > $t
+jclass_jni __db_bt_stat __jv_fill_bt_stat $c $h
+f=../java/src/com/sleepycat/db/DbBtreeStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build DbHashStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbHashStat"
+ echo '{'
+ jclass __db_h_stat &&
+ echo '}' &&
+ echo '// end of DbHashStat.java') > $t
+jclass_jni __db_h_stat __jv_fill_h_stat $c $h
+f=../java/src/com/sleepycat/db/DbHashStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build DbLockStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbLockStat"
+ echo '{'
+ jclass __db_lock_stat &&
+ echo '}' &&
+ echo '// end of DbLockStat.java') > $t
+jclass_jni __db_lock_stat __jv_fill_lock_stat $c $h
+f=../java/src/com/sleepycat/db/DbLockStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build DbLogStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbLogStat"
+ echo '{'
+ jclass __db_log_stat &&
+ echo '}' &&
+ echo '// end of DbLogStat.java') > $t
+jclass_jni __db_log_stat __jv_fill_log_stat $c $h
+f=../java/src/com/sleepycat/db/DbLogStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build DbMpoolFStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbMpoolFStat"
+ echo '{'
+ jclass __db_mpool_fstat &&
+ echo '}' &&
+ echo '// end of DbMpoolFStat.java') > $t
+jclass_jni __db_mpool_stat __jv_fill_mpool_stat $c $h
+f=../java/src/com/sleepycat/db/DbMpoolFStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build DbQueueStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbQueueStat"
+ echo '{'
+ jclass __db_qam_stat &&
+ echo '}' &&
+ echo '// end of DbQueueStat.java') > $t
+jclass_jni __db_qam_stat __jv_fill_qam_stat $c $h
+f=../java/src/com/sleepycat/db/DbQueueStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build DbRepStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbRepStat"
+ echo '{'
+ jclass __db_rep_stat &&
+ echo '}' &&
+ echo '// end of DbRepStat.java') > $t
+jclass_jni __db_rep_stat __jv_fill_rep_stat $c $h
+f=../java/src/com/sleepycat/db/DbRepStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build DbTxnStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbTxnStat"
+ echo '{'
+ echo " public static class Active {"
+ jclass __db_txn_active " " &&
+ echo ' };' &&
+ jclass __db_txn_stat &&
+ echo '}' &&
+ echo '// end of DbTxnStat.java') > $t
+jclass_jni __db_txn_stat __jv_fill_txn_stat $c $h
+f=../java/src/com/sleepycat/db/DbTxnStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+mv $c $t
+f=../libdb_java/java_stat_auto.c
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+mv $h $t
+f=../libdb_java/java_stat_auto.h
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
diff --git a/libdb/dist/s_javah b/libdb/dist/s_javah
new file mode 100755
index 0000000..07ffb9b
--- /dev/null
+++ b/libdb/dist/s_javah
@@ -0,0 +1,55 @@
+#!/bin/sh -
+# $Id$
+#
+# Use javah to build the libdb_java/com_*.h header files.
+#
+# To run this, you will need a javac and javah in your PATH.
+# If possible, install tools with a recent vintage, JDK 1.3 or higher is good.
+# Using Sun's JDK rather than some other installation ensures
+# that the header files will not be constantly changed.
+
+. RELEASE
+
+JAVAC=javac
+JAVAH=javah
+export CLASSPATH
+CLASSPATH=
+
+# CLASSES are only those classes for which we have native methods.
+D=com.sleepycat.db
+CLASSES="$D.Dbc $D.DbEnv $D.Db $D.DbLock $D.DbLogc $D.DbLsn $D.Dbt $D.DbTxn $D.xa.DbXAResource"
+
+d=/tmp/__javah
+c=$d/classes
+trap 'rm -rf $d; exit 0' 0 1 2 3 13 15
+
+rm -rf $d
+mkdir $d || exit 1
+mkdir $c || exit 1
+
+# Make skeleton versions of XA classes and interfaces
+# We only need to compile them, not run them.
+pkg="package javax.transaction.xa"
+echo "$pkg; public interface XAResource {}" > $d/XAResource.java
+echo "$pkg; public interface Xid {}" > $d/Xid.java
+echo "$pkg; public class XAException extends Exception {}" \
+ > $d/XAException.java
+
+
+# Create the .class files and use them with javah to create the .h files
+${JAVAC} -d $c $d/*.java \
+ ../java/src/com/sleepycat/db/*.java \
+ ../java/src/com/sleepycat/db/xa/*.java || exit 1
+${JAVAH} -classpath $c -d $d ${CLASSES} || exit 1
+
+for cl in ${CLASSES}; do
+ h=`echo $cl | sed -e 's/\./_/g'`.h
+ t=$d/$h
+ f=../libdb_java/$h
+ if [ ! -f $t ]; then
+ echo "ERROR: $t does not exist"
+ exit 1
+ fi
+ cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+done
diff --git a/libdb/dist/s_perm b/libdb/dist/s_perm
new file mode 100755
index 0000000..97a365a
--- /dev/null
+++ b/libdb/dist/s_perm
@@ -0,0 +1,47 @@
+#!/bin/sh -
+# $Id$
+
+d=..
+echo 'Updating Berkeley DB source tree permissions...'
+
+run()
+{
+ echo " $1 ($2)"
+ if [ -f "$d/$1" ]; then
+ chmod "$2" "$d/$1"
+ else
+ echo "$d/$1: no such file or directory"
+ exit 1
+ fi
+}
+
+run build_win32/include.tcl 664
+run dist/config.guess 555
+run dist/config.sub 555
+run dist/configure 555
+run dist/install-sh 555
+run dist/s_all 555
+run dist/s_config 555
+run dist/s_include 555
+run dist/s_java 555
+run dist/s_javah 555
+run dist/s_perm 555
+run dist/s_readme 555
+run dist/s_recover 555
+run dist/s_rpc 555
+run dist/s_symlink 555
+run dist/s_tags 555
+run dist/s_test 555
+run dist/s_vxworks 555
+run dist/s_win32 555
+run dist/s_win32_dsp 555
+run dist/vx_buildcd 555
+
+run perl/BerkeleyDB/dbinfo 555
+run perl/BerkeleyDB/mkpod 555
+
+for i in `cd $d && find build_vxworks \
+ -name '*.wsp' -o -name '*.cdf' -o -name '*.wpj'`; do
+ echo " $i (775)"
+ chmod 775 $d/$i
+done
diff --git a/libdb/dist/s_readme b/libdb/dist/s_readme
new file mode 100755
index 0000000..c819ccf
--- /dev/null
+++ b/libdb/dist/s_readme
@@ -0,0 +1,23 @@
+#!/bin/sh -
+# $Id$
+#
+# Build the README.
+
+d=..
+
+t=/tmp/__t
+trap 'rm -f $t; exit 0' 0 1 2 3 13 15
+
+. RELEASE
+
+cat << END_OF_README>$t
+$DB_VERSION_STRING
+
+This is version $DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH of Berkeley DB from Sleepycat Software. To view
+the release and installation documentation, load the distribution file
+docs/index.html into your web browser.
+END_OF_README
+
+f=../README
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
diff --git a/libdb/dist/s_recover b/libdb/dist/s_recover
new file mode 100755
index 0000000..faa8582
--- /dev/null
+++ b/libdb/dist/s_recover
@@ -0,0 +1,67 @@
+#!/bin/sh -
+# $Id$
+#
+# Build the automatically generated logging/recovery files.
+
+tmp=/tmp/__db_a
+loglist=/tmp/__db_b
+source=/tmp/__db_c
+header=/tmp/__db_d
+template=/tmp/__db_e
+
+trap 'rm -f $tmp $loglist $source $header $template; exit 1' 1 2 3 13 15
+trap 'rm -f $tmp $loglist $source $header $template; exit 0' 0
+
+DIR="db dbreg btree hash qam txn"
+
+# Check to make sure we haven't duplicated a log record entry, and build
+# the list of log record types that the test suite uses.
+for i in $DIR; do
+ p=none
+ for f in ../$i/*.src; do
+ # Grab the PREFIX; there should only be one per file, and
+ # so it's okay to just take the first.
+ grep '^PREFIX' $f | sed q
+ egrep '^BEGIN[ ]|^IGNORED[ ]|^DEPRECATED[ ]' $f |
+ awk '{print $1 "\t" $2 "\t" $3}'
+
+ done
+done > $loglist
+grep -v '^PREFIX' $loglist |
+ awk '{print $2 "\t" $3}' | sort -n -k 2 | uniq -d -f 1 > $tmp
+[ -s $tmp ] && {
+ echo "DUPLICATE LOG VALUES:"
+ cat $tmp
+ rm -f $tmp
+ exit 1
+}
+f=../test/logtrack.list
+cmp $loglist $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $loglist $f && chmod 444 $f)
+
+# Build DB's recovery routines.
+for i in db dbreg btree fileops hash qam txn; do
+ for f in ../$i/*.src; do
+ subsystem=`basename $f .src`
+ awk -f gen_rec.awk \
+ -v source_file=$source \
+ -v header_file=$header \
+ -v template_file=$template < $f
+
+ f=../dbinc_auto/${subsystem}_auto.h
+ cmp $header $f > /dev/null 2>&1 ||
+ (echo "Building $f" &&
+ rm -f $f && cp $header $f && chmod 444 $f)
+ f=../$i/${subsystem}_auto.c
+ cmp $source $f > /dev/null 2>&1 ||
+ (echo "Building $f" &&
+ rm -f $f && cp $source $f && chmod 444 $f)
+ f=template/rec_${subsystem}
+ cmp $template $f > /dev/null 2>&1 ||
+ (echo "Building $f" &&
+ rm -f $f && cp $template $f && chmod 444 $f)
+ done
+done
+
+# Build the example application's recovery routines.
+(cd ../examples_c/ex_apprec && sh auto_rebuild)
diff --git a/libdb/dist/s_rpc b/libdb/dist/s_rpc
new file mode 100755
index 0000000..136c05b
--- /dev/null
+++ b/libdb/dist/s_rpc
@@ -0,0 +1,137 @@
+#!/bin/sh -
+# $Id$
+#
+# Build the automatically generated RPC files
+
+echo "Building RPC client/server files"
+
+. ./RELEASE
+
+t=/tmp/__db_a
+trap 'rm -f $t ; exit 0' 0
+trap 'rm -f $t ; exit 1' 1 2 3 13 15
+
+client_file=../rpc_client/gen_client.c
+ctmpl_file=./template/gen_client_ret
+dbinc_file=../dbinc/db.in
+defs_file=../dbinc_auto/rpc_defs.in
+header_file=../dbinc_auto/db_server.h
+proc_file=../rpc_server/c/db_server_proc.c
+rpcclnt_file=../rpc_client/db_server_clnt.c
+rpcsvc_file=../rpc_server/c/db_server_svc.c
+rpcxdr_file=../rpc_server/c/db_server_xdr.c
+sed_file=../rpc_server/c/db_server_proc.sed
+server_file=../rpc_server/c/gen_db_server.c
+stmpl_file=./template/db_server_proc
+xdr_file=../rpc_server/db_server.x
+
+#
+# NOTE: We do NOT want to remove proc_file. It is what we apply $sed_file
+# to, but we do not want to remove it, it does not get built in place.
+rm -f $client_file \
+ $ctmpl_file \
+ $header_file \
+ $rpcclnt_file \
+ $rpcsvc_file \
+ $rpcxdr_file \
+ $sed_file \
+ $server_file \
+ $stmpl_file \
+ $xdr_file
+
+#
+# Generate client/server/XDR code
+#
+xidsize=\
+`awk '/^#define/ { if ($2 == "DB_XIDDATASIZE") { print $3 }}' $dbinc_file`
+
+awk -f gen_rpc.awk \
+ -v major=$DB_VERSION_MAJOR \
+ -v minor=$DB_VERSION_MINOR \
+ -v xidsize=$xidsize \
+ -v client_file=$client_file \
+ -v ctmpl_file=$ctmpl_file \
+ -v sed_file=$sed_file \
+ -v server_file=$server_file \
+ -v stmpl_file=$stmpl_file \
+ -v xdr_file=$xdr_file < ../rpc_server/rpc.src
+chmod 444 $client_file $server_file
+
+#
+# Now run rpcgen to generate all our sources from the XDR file
+#
+rpcgen -h $xdr_file > $header_file
+rpcgen -l $xdr_file > $rpcclnt_file
+rpcgen -s tcp $xdr_file > $rpcsvc_file
+rpcgen -c $xdr_file > $rpcxdr_file
+
+#
+# Run various server files through sed.
+#
+cat <<ENDOFSEDTEXT>$t
+s/^#include[ ]"db_server.h"/#include "db_config.h"\\
+\\
+\\#ifndef NO_SYSTEM_INCLUDES\\
+\\#include <rpc\\/rpc.h>\\
+\\#include <rpc\\/pmap_clnt.h>/
+/^#include <netinet.in.h>/a\\
+\\#endif\\
+\\
+\\#include "db_int.h"\\
+\\#include "dbinc_auto/db_server.h"\\
+\\#include "dbinc/db_server_int.h"\\
+\\#include "dbinc_auto/rpc_server_ext.h"
+/^ return;/i\\
+\\ __dbsrv_timeout(0);
+s/svc_sendreply(transp, xdr_void,/svc_sendreply(transp, (xdrproc_t)xdr_void,/
+s/svc_getargs(transp, xdr_argument, &argument)/svc_getargs(transp, (xdrproc_t)xdr_argument, (char *)\&argument)/
+s/svc_sendreply(transp, xdr_result, result)/svc_sendreply(transp, (xdrproc_t)xdr_result, result)/
+s/svc_freeargs(transp, xdr_argument, &argument)/svc_freeargs(transp, (xdrproc_t)xdr_argument, (char *)\&argument)/
+s/^main/void __dbsrv_main/
+ENDOFSEDTEXT
+sed -f $t $rpcsvc_file > ${rpcsvc_file}.new
+mv ${rpcsvc_file}.new $rpcsvc_file
+
+sed -f $sed_file $proc_file > ${proc_file}.new
+mv ${proc_file}.new $proc_file
+
+# Run rpcgen files through sed to add HAVE_RPC ifdef and appropriate
+# includes.
+cat <<ENDOFSEDTEXT>$t
+1i\\
+\\#include "db_config.h"\\
+\\
+\\#ifdef HAVE_RPC
+/^#include "db_server.h"/c\\
+\\#ifndef NO_SYSTEM_INCLUDES\\
+\\#include <rpc/rpc.h>\\
+\\
+\\#include <strings.h>\\
+\\#endif\\
+\\
+\\#include "db_int.h"\\
+\\#include "dbinc_auto/db_server.h"
+\$a\\
+\\#endif /* HAVE_RPC */
+ENDOFSEDTEXT
+
+sed -f $t $rpcxdr_file > ${rpcxdr_file}.new
+mv ${rpcxdr_file}.new $rpcxdr_file
+sed -f $t $rpcclnt_file > ${rpcclnt_file}.new
+mv ${rpcclnt_file}.new $rpcclnt_file
+
+# Copy the DB_RPC SERVER #defines into a separate file so
+# they can be part of db.h.
+msgc="/* DO NOT EDIT: automatically built by dist/s_rpc. */"
+(echo "" && echo "$msgc" &&
+ sed -n -e "/DB_RPC_SERVER/p" $header_file) > $defs_file
+
+# Fix up the header file:
+# Remove the DB_RPC_SERVER #defines.
+# Remove the <rpc/rpc.h> include, it needs to be included earlier
+# than that.
+sed -e "/DB_RPC_SERVER/d"\
+ -e "/^#include.*<rpc\/rpc.h>/d" $header_file > ${header_file}.new
+mv ${header_file}.new $header_file
+
+chmod 444 $header_file $rpcclnt_file $rpcsvc_file $rpcxdr_file
diff --git a/libdb/dist/s_symlink b/libdb/dist/s_symlink
new file mode 100755
index 0000000..d45d6f0
--- /dev/null
+++ b/libdb/dist/s_symlink
@@ -0,0 +1,58 @@
+#!/bin/sh -
+# $Id$
+
+echo 'Creating Berkeley DB source tree symbolic links...'
+
+build()
+{
+ echo " $1 -> $2"
+ (cd ../`dirname $1` && rm -f `basename $1` && ln -s $2 `basename $1`)
+}
+
+build btree/tags ../dist/tags
+build build_unix/tags ../dist/tags
+build clib/tags ../dist/tags
+build common/tags ../dist/tags
+build crypto/tags ../dist/tags
+build cxx/tags ../dist/tags
+build db/tags ../dist/tags
+build db185/tags ../dist/tags
+build db_archive/tags ../dist/tags
+build db_checkpoint/tags ../dist/tags
+build db_deadlock/tags ../dist/tags
+build db_dump/tags ../dist/tags
+build db_dump185/tags ../dist/tags
+build db_load/tags ../dist/tags
+build db_printlog/tags ../dist/tags
+build db_recover/tags ../dist/tags
+build db_stat/tags ../dist/tags
+build db_upgrade/tags ../dist/tags
+build db_verify/tags ../dist/tags
+build dbinc/tags ../dist/tags
+build dbinc_auto/tags ../dist/tags
+build dbm/tags ../dist/tags
+build dbreg/tags ../dist/tags
+build env/tags ../dist/tags
+build examples_c/tags ../dist/tags
+build examples_cxx/tags ../dist/tags
+build examples_java java/src/com/sleepycat/examples
+build fileops/tags ../dist/tags
+build hash/tags ../dist/tags
+build hmac/tags ../dist/tags
+build hsearch/tags ../dist/tags
+build libdb_java/tags ../dist/tags
+build lock/tags ../dist/tags
+build log/tags ../dist/tags
+build mp/tags ../dist/tags
+build mutex/tags ../dist/tags
+build os/tags ../dist/tags
+build os_vxworks/tags ../dist/tags
+build os_win32/tags ../dist/tags
+build qam/tags ../dist/tags
+build rep/tags ../dist/tags
+build rpc_client/tags ../dist/tags
+build rpc_server/tags ../dist/tags
+build tcl/tags ../dist/tags
+build test_server/tags ../dist/tags
+build txn/tags ../dist/tags
+build xa/tags ../dist/tags
diff --git a/libdb/dist/s_tags b/libdb/dist/s_tags
new file mode 100755
index 0000000..348af3f
--- /dev/null
+++ b/libdb/dist/s_tags
@@ -0,0 +1,60 @@
+#!/bin/sh -
+# $Id$
+#
+# Build tags files.
+
+files="../dbinc/*.h \
+ ../dbinc/*.in \
+ ../btree/*.[ch] \
+ ../clib/*.[ch] \
+ ../common/*.[ch] \
+ ../crypto/*.[ch] \
+ ../crypto/mersenne/*.[ch] \
+ ../crypto/rijndael/*.[ch] \
+ ../db/*.[ch] \
+ ../db185/*.[ch] \
+ ../dbm/*.[ch] \
+ ../dbreg/*.[ch] \
+ ../env/*.[ch] \
+ ../fileops/*.[ch] \
+ ../hash/*.[ch] \
+ ../hmac/*.[ch] \
+ ../hsearch/*.[ch] \
+ ../lock/*.[ch] \
+ ../log/*.[ch] \
+ ../mp/*.[ch] \
+ ../mutex/*.[ch] \
+ ../os/*.[ch] \
+ ../qam/*.[ch] \
+ ../rep/*.[ch] \
+ ../rpc_client/*.[ch] \
+ ../rpc_server/c/*.[ch] \
+ ../tcl/*.[ch] \
+ ../txn/*.[ch] \
+ ../xa/*.[ch] \
+ ../cxx/*.cpp \
+ ../libdb_java/*.[ch]"
+
+f=tags
+echo "Building $f"
+rm -f $f
+
+# Figure out what flags this ctags accepts.
+flags=""
+if ctags -d ../db/db.c 2>/dev/null; then
+ flags="-d $flags"
+fi
+if ctags -t ../db/db.c 2>/dev/null; then
+ flags="-t $flags"
+fi
+if ctags -w ../db/db.c 2>/dev/null; then
+ flags="-w $flags"
+fi
+
+ctags $flags $files 2>/dev/null
+chmod 444 $f
+
+f=../test_perf/tags
+echo "Building $f"
+(cd ../test_perf && ctags $flags *.[ch] 2>/dev/null)
+chmod 444 $f
diff --git a/libdb/dist/s_test b/libdb/dist/s_test
new file mode 100755
index 0000000..2c48806
--- /dev/null
+++ b/libdb/dist/s_test
@@ -0,0 +1,92 @@
+#!/bin/sh -
+# $Id$
+#
+# Build the Tcl test files.
+
+msg1="# Automatically built by dist/s_test; may require local editing."
+msg2="# Automatically built by dist/s_test; may require local editing."
+
+t=/tmp/__t
+trap 'rm -f $t; exit 0' 0 1 2 3 13 15
+
+. RELEASE
+
+(echo "$msg1" && \
+ echo "" && \
+ echo "set tclsh_path @TCL_TCLSH@" && \
+ echo "set tcllib .libs/libdb_tcl-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@MODSUFFIX@" && \
+ echo "" && \
+ echo "set rpc_server localhost" && \
+ echo "set rpc_path ." && \
+ echo "set rpc_testdir \$rpc_path/TESTDIR" && \
+ echo "" && \
+ echo "set src_root @srcdir@/.." && \
+ echo "set test_path @srcdir@/../test" && \
+ echo "" && \
+ echo "global testdir" && \
+ echo "set testdir ./TESTDIR" && \
+ echo "" && \
+ echo "global dict" && \
+ echo "global util_path" && \
+ echo "" && \
+ echo "global is_hp_test" && \
+ echo "global is_qnx_test" && \
+ echo "global is_windows_test" && \
+ echo "" && \
+ echo "set KILL \"@db_cv_path_kill@\"") > $t
+
+f=../test/include.tcl
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+(echo "$msg1" && \
+ echo "" && \
+ echo "set tclsh_path SET_YOUR_TCLSH_PATH" && \
+ echo "set tcllib ./Debug/libdb_tcl${DB_VERSION_MAJOR}${DB_VERSION_MINOR}d.dll" && \
+ echo "" && \
+ echo "set src_root .." && \
+ echo "set test_path ../test" && \
+ echo "" && \
+ echo "global testdir" && \
+ echo "set testdir ./TESTDIR" && \
+ echo "" && \
+ echo "global dict" && \
+ echo "global util_path" && \
+ echo "" && \
+ echo "global is_hp_test" && \
+ echo "global is_qnx_test" && \
+ echo "global is_windows_test" && \
+ echo "" && \
+ echo "set KILL ./dbkill.exe") > $t
+
+f=../build_win32/include.tcl
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build the test directory TESTS file.
+(echo $msg2;
+cat `egrep -l '^#[ ][ ]*TEST' ../test/*.tcl` |
+sed -e '/^#[ ][ ]*TEST/!{' \
+ -e 's/.*//' \
+ -e '}' |
+cat -s |
+sed -e '/TEST/{' \
+ -e 's/^#[ ][ ]*TEST[ ]*//' \
+ -e 's/^ //' \
+ -e 'H' \
+ -e 'd' \
+ -e '}' \
+ -e 's/.*//' \
+ -e x \
+ -e 's/\n/__LINEBREAK__/g' |
+sort |
+sed -e 's/__LINEBREAK__/\
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\
+/' \
+ -e 's/__LINEBREAK__/\
+ /g' |
+sed -e 's/^[ ][ ]*$//') > $t
+
+f=../test/TESTS
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
diff --git a/libdb/dist/s_vxworks b/libdb/dist/s_vxworks
new file mode 100755
index 0000000..edf265e
--- /dev/null
+++ b/libdb/dist/s_vxworks
@@ -0,0 +1,324 @@
+#!/bin/sh -
+# $Id$
+#
+# Build the VxWorks files.
+
+msgc="/* DO NOT EDIT: automatically built by dist/s_vxworks. */"
+
+. RELEASE
+
+s=/tmp/__db_a
+t=/tmp/__db_b
+vxfilelist=/tmp/__db_c
+
+trap 'rm -f $s $t $vxfilelist ; exit 0' 0
+trap 'rm -f $s $t $vxfilelist ; exit 1' 1 2 3 13 15
+
+# Build the VxWorks automatically generated files.
+f=../build_vxworks/db.h
+cat <<ENDOFSEDTEXT > $s
+/extern "C" {/{
+n
+n
+i\\
+\\
+/* Tornado 2 does not provide a standard C pre-processor #define. */\\
+#ifndef __vxworks\\
+#define __vxworks\\
+#endif
+}
+s/@u_int8_decl@/typedef unsigned char u_int8_t;/
+s/@u_int16_decl@/typedef unsigned short u_int16_t;/
+s/@u_int32_decl@/typedef unsigned int u_int32_t;/
+/@int16_decl@/d
+/@int32_decl@/d
+/@u_char_decl@/d
+/@u_short_decl@/d
+/@u_int_decl@/d
+/@u_long_decl@/d
+/@ssize_t_decl@/d
+s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/
+s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/
+s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/
+s/@DB_VERSION_STRING@/"$DB_VERSION_STRING"/
+s/@DB_VERSION_UNIQUE_NAME@//
+ENDOFSEDTEXT
+(echo "$msgc" &&
+ sed -f $s ../dbinc/db.in &&
+ cat ../dbinc_auto/rpc_defs.in &&
+ cat ../dbinc_auto/ext_prot.in) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+f=../build_vxworks/db_int.h
+cat <<ENDOFSEDTEXT > $s
+s/\(PATH_SEPARATOR[^"]*"\)\/"/\1\/\\\\\\\\\\"/
+s/@db_align_t_decl@/typedef unsigned long db_align_t;/
+s/@db_alignp_t_decl@/typedef unsigned long db_alignp_t;/
+s/@db_int_def@//
+ENDOFSEDTEXT
+(echo "$msgc" && sed -f $s ../dbinc/db_int.in) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+f=../build_vxworks/db_config.h
+(echo "$msgc" && sed "s/__EDIT_DB_VERSION__/$DB_VERSION/" vx_config.in) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build a sed script that will change a "standard" DB utility into
+# VxWorks-compatible code.
+transform()
+{
+ # Build a sed script that will add argument parsing support and
+ # rename all of the functions to be private to this file.
+cat <<ENDOFSEDTEXT
+/^main(argc, argv)$/{
+i\\
+$1(args)\\
+\\ char *args;\\
+{\\
+\\ int argc;\\
+\\ char **argv;\\
+\\
+\\ __db_util_arg("$1", args, &argc, &argv);\\
+\\ return ($1_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);\\
+}\\
+\\
+#include <stdio.h>\\
+#define ERROR_RETURN ERROR\\
+\\
+int\\
+$1_main(argc, argv)
+d
+}
+/^ while ((ch = getopt/i\\
+\\ __db_getopt_reset = 1;
+/^[ ]*extern int optind;/s/;/, __db_getopt_reset;/
+ENDOFSEDTEXT
+
+ # Replace all function names with VxWorks safe names.
+ # Function names are:
+ # Tokens starting at the beginning of the line, immediately
+ # followed by an opening parenthesis.
+ # Replace:
+ # Matches preceded by a non-C-token character and immediately
+ # followed by an opening parenthesis.
+ # Matches preceded by a non-C-token character and immediately
+ # followed by " __P".
+ # Matches starting at the beginning of the line, immediately
+ # followed by an opening parenthesis.
+ for k in `sed -e 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)(.*$/\1/p' -e d $2`; do
+ echo "s/\([^a-zA-Z0-9_]\)\($k(\)/\1$1_\2/g"
+ echo "s/\([^a-zA-Z0-9_]\)\($k[ ]__P\)/\1$1_\2/g"
+ echo "s/^\($k(\)/$1_\1/g"
+ done
+
+ # There is a special case the rules above don't catch:
+ # a txn_compare function used as an argument to qsort(3).
+ # a print_app_record function used as argument to
+ # dbenv->set_app_dispatch).
+ echo "s/, txn_compare);/, db_stat_txn_compare);/"
+ echo "s/, print_app_record)) /, db_printlog_print_app_record)) /"
+
+ # We convert the ex_access sample into dbdemo for VxWorks.
+ echo 's/progname = "ex_access";/progname = "dbdemo";/'
+
+ # The example programs have to load db_int.h, not db.h -- else
+ # they won't have the right Berkeley DB prototypes for getopt
+ # and friends.
+ echo '/#include.*db.h/c\'
+ echo '#include <db_config.h>\'
+ echo '#include <db_int.h>'
+}
+
+PROGRAM_LIST="db_archive db_checkpoint db_deadlock db_dump db_load \
+ db_printlog db_recover db_stat db_upgrade db_verify ex_access"
+
+# Build VxWorks versions of the utilities.
+for i in $PROGRAM_LIST; do
+ if [ $i = "ex_access" ]; then
+ target=dbdemo
+ dir=../examples_c
+ else
+ target=$i
+ dir=../$i
+ fi
+
+ transform $target $dir/$i.c > $s
+ sed -f $s < $dir/$i.c > $t
+
+ f=../build_vxworks/$target/$target.c
+ cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+done
+
+# Build VxWorks Tornado 2.0 project files for the utilities.
+for i in $PROGRAM_LIST; do
+ if [ $i = "ex_access" ]; then
+ target=dbdemo
+ dir=../examples_c
+ else
+ target=$i
+ dir=../$i
+ fi
+
+ sed "s/__DB_APPLICATION_NAME__/$target/g" < vx_2.0/wpj.in > $t
+ f=../build_vxworks/$target/$target.wpj
+ cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+done
+
+# Build the list of files VxWorks knows about.
+sed -e '/^$/d' -e '/^[ #]/d' srcfiles.in |
+ egrep -w vx |
+ sed 's/[ ].*//' > $vxfilelist
+
+# Build VxWorks Tornado 2.0 project files for the library itself.
+(cat vx_2.0/BerkeleyDB.wpj
+for i in `cat $vxfilelist`; do
+ o="<BEGIN> FILE_\$(PRJ_DIR)/../$i"
+ echo "${o}_dependDone"
+ echo "TRUE"
+ echo "<END>"
+ echo
+ echo "${o}_dependencies"
+ echo "\$(PRJ_DIR)/db_config.h \\"
+ echo " \$(PRJ_DIR)/db_int.h \\"
+ echo " \$(PRJ_DIR)/db.h"
+ echo "<END>"
+ echo
+ echo "${o}_objects"
+ echo "`basename $i .c`.o"
+ echo "<END>"
+ echo
+ echo "${o}_tool"
+ echo "C/C++ compiler"
+ echo "<END>"
+ echo
+done
+echo "<BEGIN> PROJECT_FILES"
+sed -e '$!s/$/ \\/' \
+ -e 's/^/$(PRJ_DIR)\/..\//' \
+ -e '1!s/^/ /' < $vxfilelist
+echo "<END>"
+echo
+echo "<BEGIN> userComments"
+echo "BerkeleyDB"
+echo "<END>") > $t
+f=../build_vxworks/BerkeleyDB.wpj
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build VxWorks Tornado 3.1 project files for the utilities.
+for i in $PROGRAM_LIST; do
+ if [ $i = "ex_access" ]; then
+ target=dbdemo
+ dir=../examples_c
+ else
+ target=$i
+ dir=../$i
+ fi
+
+ cp vx_3.1/Makefile.custom $t
+ f=../build_vxworks/$target/$target/Makefile.custom
+ cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+ for j in component.cdf component.wpj; do
+ #
+ # Some parts of the component files needs to have the
+ # name in all capitals. Sigh.
+ #
+ z=`echo $target | tr "a-z" "A-Z"`
+ sed -e "s/__DB_APPLICATION_NAME__/$target/g" \
+ -e "s/__DB_CAPAPPL_NAME__/$z/g" < vx_3.1/$j > $t
+ f=../build_vxworks/$target/$target/$j
+ cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+ done
+done
+
+# Build VxWorks Tornado 3.1 project files for the library itself.
+cp vx_3.1/Makefile.custom $t
+f=../build_vxworks/BerkeleyDB/Makefile.custom
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+(cat vx_3.1/cdf.1
+echo -n " MODULES"
+for i in `cat $vxfilelist`; do
+ echo " `basename $i .c`.o"
+done | sort | sed -e '$!s/$/ \\/'
+cat vx_3.1/cdf.2
+for i in `cat $vxfilelist`; do
+ b="`basename $i .c`.o"
+ echo "Module $b {"
+ echo
+ echo " NAME $b"
+ echo " SRC_PATH_NAME \$(PRJ_DIR)/../../$i"
+ echo "}"
+ echo
+done
+cat vx_3.1/cdf.3)> $t
+f=../build_vxworks/BerkeleyDB/component.cdf
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+(cat vx_3.1/wpj.1
+for i in `cat $vxfilelist`; do
+ o="<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_\$(PRJ_DIR)/../../$i"
+ echo "${o}_infoTags"
+ echo "toolMacro objects"
+ echo "<END>"
+ echo
+ echo "${o}_objects"
+ echo "`basename $i .c`.o"
+ echo "<END>"
+ echo
+ echo "${o}_toolMacro"
+ echo "CC"
+ echo "<END>"
+ echo
+done
+cat vx_3.1/wpj.2
+for i in `cat $vxfilelist`; do
+ o="<BEGIN> BUILD_PENTIUM2gnu.release_FILE_\$(PRJ_DIR)/../../$i"
+ echo "${o}_infoTags"
+ echo "toolMacro objects"
+ echo "<END>"
+ echo
+ echo "${o}_objects"
+ echo "`basename $i .c`.o"
+ echo "<END>"
+ echo
+ echo "${o}_toolMacro"
+ echo "CC"
+ echo "<END>"
+ echo
+done
+cat vx_3.1/wpj.3
+for i in `cat $vxfilelist`; do
+ o="<BEGIN> BUILD_PENTIUMgnu.debug_FILE_\$(PRJ_DIR)/../../$i"
+ echo "${o}_infoTags"
+ echo "toolMacro objects"
+ echo "<END>"
+ echo
+ echo "${o}_objects"
+ echo "`basename $i .c`.o"
+ echo "<END>"
+ echo
+ echo "${o}_toolMacro"
+ echo "CC"
+ echo "<END>"
+ echo
+done
+cat vx_3.1/wpj.4
+sort $vxfilelist |
+sed -e 's/^/$(PRJ_DIR)\/..\/..\//' \
+ -e '1!s/^/ /' \
+ -e '$!s/$/ \\/'
+cat vx_3.1/wpj.5) > $t
+f=../build_vxworks/BerkeleyDB/component.wpj
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
diff --git a/libdb/dist/s_win32 b/libdb/dist/s_win32
new file mode 100755
index 0000000..1220755
--- /dev/null
+++ b/libdb/dist/s_win32
@@ -0,0 +1,108 @@
+#!/bin/sh -
+# $Id$
+#
+# Build Windows/32 include files.
+
+msgc="/* DO NOT EDIT: automatically built by dist/s_win32. */"
+msgw="; DO NOT EDIT: automatically built by dist/s_win32."
+
+. RELEASE
+
+s=/tmp/__db_a$$
+t=/tmp/__db_b$$
+rm -f $s $t
+
+trap 'rm -f $s $t ; exit 1' 1 2 3 13 15
+
+# Build the Win32 automatically generated files.
+f=../build_win32/db.h
+cat <<ENDOFSEDTEXT > $s
+s/@u_int8_decl@/typedef unsigned char u_int8_t;/
+s/@int16_decl@/typedef short int16_t;/
+s/@u_int16_decl@/typedef unsigned short u_int16_t;/
+s/@int32_decl@/typedef int int32_t;/
+s/@u_int32_decl@/typedef unsigned int u_int32_t;/
+/@u_char_decl@/{
+ i\\
+#if !defined(_WINSOCKAPI_)
+ s/@u_char_decl@/typedef unsigned char u_char;/
+}
+s/@u_short_decl@/typedef unsigned short u_short;/
+s/@u_int_decl@/typedef unsigned int u_int;/
+/@u_long_decl@/{
+ s/@u_long_decl@/typedef unsigned long u_long;/
+ a\\
+#endif
+}
+/@ssize_t_decl@/{
+ i\\
+#if defined(_WIN64)\\
+typedef __int64 ssize_t;\\
+#else\\
+typedef int ssize_t;\\
+#endif
+ d
+}
+s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/
+s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/
+s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/
+s/@DB_VERSION_STRING@/"$DB_VERSION_STRING"/
+s/@DB_VERSION_UNIQUE_NAME@//
+ENDOFSEDTEXT
+(echo "$msgc" &&
+ sed -f $s ../dbinc/db.in &&
+ cat ../dbinc_auto/rpc_defs.in &&
+ cat ../dbinc_auto/ext_prot.in) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+f=../build_win32/db_cxx.h
+cat <<ENDOFSEDTEXT > $s
+s/@cxx_have_stdheaders@/#define HAVE_CXX_STDHEADERS 1/
+ENDOFSEDTEXT
+(echo "$msgc" && sed -f $s ../dbinc/db_cxx.in) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+f=../build_win32/db_int.h
+cat <<ENDOFSEDTEXT > $s
+s/\(PATH_SEPARATOR[^"]*"\)\/"/\1\\\\\\\\\\/:\"/
+s/@db_align_t_decl@/typedef unsigned long db_align_t;/
+s/@db_alignp_t_decl@/typedef unsigned long db_alignp_t;/
+s/@db_int_def@//
+ENDOFSEDTEXT
+(echo "$msgc" && sed -f $s ../dbinc/db_int.in) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+f=../build_win32/db_config.h
+(echo "$msgc" && sed "s/__EDIT_DB_VERSION__/$DB_VERSION/" win_config.in) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+f=../build_win32/libdb.rc
+cat <<ENDOFSEDTEXT > $s
+s/%MAJOR%/$DB_VERSION_MAJOR/
+s/%MINOR%/$DB_VERSION_MINOR/
+s/%PATCH%/$DB_VERSION_PATCH/
+ENDOFSEDTEXT
+sed -f $s ../build_win32/libdbrc.src > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+f=../build_win32/libdb.def
+(echo $msgw &&
+ echo &&
+ echo \
+ "DESCRIPTION 'Berkeley DB $DB_VERSION_MAJOR.$DB_VERSION_MINOR Library'" &&
+ echo &&
+ echo EXPORTS;
+a=1
+for i in `sed -e '/^$/d' -e '/^#/d' win_exports.in`; do
+ echo " $i @$a"
+ a=`expr $a + 1`
+done) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+rm -f $s $t
diff --git a/libdb/dist/s_win32_dsp b/libdb/dist/s_win32_dsp
new file mode 100755
index 0000000..7bd30b9
--- /dev/null
+++ b/libdb/dist/s_win32_dsp
@@ -0,0 +1,107 @@
+#!/bin/sh -
+# $Id$
+#
+# Build Windows/32 .dsp files.
+
+. RELEASE
+
+BUILDDIR=../build_win32
+SRCFILES=srcfiles.in
+
+create_dsp()
+{
+ projname="$1" # name of the .dsp file
+ match="$2" # the string used to egrep the $sources file
+ sources="$3" # a modified version of $SRCFILES to facilitate matches
+ dsptemplate="$4" # overall template file for the .dsp
+ srctemplate="$5" # template file for the src file fragments
+
+ dspoutput=$BUILDDIR/$projname.dsp
+
+ rm -f $dspoutput.insert
+ for srcpath in `egrep "$match" $sources | sed -e 's/[ ].*//'`
+ do
+ # take the path name and break it up, converting / to \\.
+ # so many backslashes needed because of shell quoting and
+ # sed quoting -- we'll end up with two backslashes for every
+ # forward slash, but we need that when feeding that to the
+ # later sed command.
+ set - `echo $srcpath | sed -e 's;\(.*\)/;../\\1 ;' \
+ -e 's;../build_win32;.;' \
+ -e 's;/;\\\\\\\\;g'`
+ srcdir="$1"
+ srcfile="$2"
+ sed -e "s/@srcdir@/$srcdir/g" \
+ -e "s/@srcfile@/$srcfile/g" \
+ < $srctemplate >> $dspoutput.insert
+ done
+ sed -e "/@SOURCE_FILES@/r$dspoutput.insert" \
+ -e "/@SOURCE_FILES@/d" \
+ -e "s/@project_name@/$projname/g" \
+ -e "s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/g" \
+ -e "s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/g" \
+ < $dsptemplate > $dspoutput.new
+
+ # Set the file mode to 644 because the VC++ IDE needs a writeable file
+ # in our development environment.
+ cmp $dspoutput.new $dspoutput > /dev/null 2>&1 ||
+ (echo "Building $dspoutput" && rm -f $dspoutput &&
+ cp $dspoutput.new $dspoutput && chmod 664 $dspoutput)
+ rm -f $dspoutput.insert $dspoutput.new
+}
+
+TMPA=/tmp/swin32dsp$$a
+trap "rm -f $TMPA; exit 1" 1 2 3 15
+
+# create a copy of the srcfiles with comments and empty lines removed.
+# add a space at the end of each list of modules so that each module
+# can be unambiguously matched e.g. ' dynamic '
+sed -e "s/#.*$//" \
+ -e "/^[ ]*$/d" \
+ -e "s/[ ][ ]*/ /" \
+ -e "s/[ ]*$//" \
+ -e "/[ ]/!d" \
+ -e "s/$/ /" < $SRCFILES > $TMPA
+
+# get a list of all modules mentioned
+#
+MODULES="`sed -e 's/^[^ ]* //' < $TMPA \
+ | tr ' ' '\012' | sort | uniq`"
+
+for module in $MODULES
+do
+ case "$module" in
+ dynamic )
+ create_dsp db_dll " $module " $TMPA \
+ $BUILDDIR/dynamic_dsp.src $BUILDDIR/srcfile_dsp.src
+ ;;
+ java )
+ create_dsp db_java " $module " $TMPA \
+ $BUILDDIR/java_dsp.src $BUILDDIR/srcfile_dsp.src
+ ;;
+ tcl )
+ create_dsp db_tcl " $module " $TMPA \
+ $BUILDDIR/tcl_dsp.src $BUILDDIR/srcfile_dsp.src
+ ;;
+ testutil )
+ create_dsp db_test " $module " $TMPA \
+ $BUILDDIR/db_test.src $BUILDDIR/srcfile_dsp.src
+ ;;
+ static )
+ create_dsp db_static " $module " $TMPA \
+ $BUILDDIR/static_dsp.src $BUILDDIR/srcfile_dsp.src
+ ;;
+ app=* )
+ appname=`echo $module | sed -e 's/^app=//'`
+ create_dsp $appname " $module " $TMPA \
+ $BUILDDIR/app_dsp.src $BUILDDIR/srcfile_dsp.src
+ ;;
+ vx )
+ ;;
+ * )
+ echo "s_win32_dsp: module name $module in $SRCFILES is unknown type"
+ ;;
+ esac
+done
+
+rm -f $TMPA
diff --git a/libdb/dist/srcfiles.in b/libdb/dist/srcfiles.in
new file mode 100644
index 0000000..0f39f47
--- /dev/null
+++ b/libdb/dist/srcfiles.in
@@ -0,0 +1,332 @@
+# $Id$
+#
+# This is an input file for the s_win32_dsp and s_vxworks scripts. It lists
+# the source files in the Berkeley DB tree and notes which are used to build
+# the Win/32 and VxWorks libraries.
+#
+# Please keep this list sorted alphabetically!
+#
+# Each non-blank, non-comment line is of the form
+# filename module [ module ...]
+#
+# The possible modules, including the name of the project (.dsp) file:
+#
+# app=NAME Linked into application NAME.exe (db_NAME.dsp)
+# dynamic File is in the Windows DLL (db_dll.dsp)
+# java File is in the Windows Java DLL (db_java.dsp)
+# static File is in the Windows static library (db_static.dsp)
+# tcl File is in the Windows tcl DLL (db_tcl.dsp)
+# testutil File is used for Windows testing (db_test.dsp)
+# vx File is in the VxWorks library.
+
+btree/bt_compare.c dynamic static vx
+btree/bt_conv.c dynamic static vx
+btree/bt_curadj.c dynamic static vx
+btree/bt_cursor.c dynamic static vx
+btree/bt_delete.c dynamic static vx
+btree/bt_method.c dynamic static vx
+btree/bt_open.c dynamic static vx
+btree/bt_put.c dynamic static vx
+btree/bt_rec.c dynamic static vx
+btree/bt_reclaim.c dynamic static vx
+btree/bt_recno.c dynamic static vx
+btree/bt_rsearch.c dynamic static vx
+btree/bt_search.c dynamic static vx
+btree/bt_split.c dynamic static vx
+btree/bt_stat.c dynamic static vx
+btree/bt_upgrade.c dynamic static vx
+btree/bt_verify.c dynamic static vx
+btree/btree_auto.c dynamic static vx
+build_vxworks/db_archive/db_archive.c
+build_vxworks/db_checkpoint/db_checkpoint.c
+build_vxworks/db_deadlock/db_deadlock.c
+build_vxworks/db_dump/db_dump.c
+build_vxworks/db_load/db_load.c
+build_vxworks/db_printlog/db_printlog.c
+build_vxworks/db_recover/db_recover.c
+build_vxworks/db_stat/db_stat.c
+build_vxworks/db_upgrade/db_upgrade.c
+build_vxworks/db_verify/db_verify.c
+build_vxworks/dbdemo/dbdemo.c
+build_win32/dbkill.cpp testutil
+build_win32/dllmain.c dynamic
+build_win32/libdb.def dynamic
+build_win32/libdb.rc dynamic
+build_win32/libdb_tcl.def tcl
+clib/getcwd.c
+clib/getopt.c vx
+clib/memcmp.c
+clib/memmove.c
+clib/raise.c
+clib/snprintf.c vx
+clib/strcasecmp.c dynamic static vx
+clib/strdup.c vx
+clib/strerror.c
+clib/vsnprintf.c vx
+common/db_byteorder.c dynamic static vx
+common/db_err.c dynamic static vx
+common/db_getlong.c dynamic static vx
+common/db_idspace.c dynamic static vx
+common/db_log2.c dynamic static vx
+common/util_arg.c vx
+common/util_cache.c dynamic static vx
+common/util_log.c dynamic static vx
+common/util_sig.c dynamic static vx
+cxx/cxx_db.cpp dynamic static
+cxx/cxx_dbc.cpp dynamic static
+cxx/cxx_dbt.cpp dynamic static
+cxx/cxx_env.cpp dynamic static
+cxx/cxx_except.cpp dynamic static
+cxx/cxx_lock.cpp dynamic static
+cxx/cxx_logc.cpp dynamic static
+cxx/cxx_mpool.cpp dynamic static
+cxx/cxx_txn.cpp dynamic static
+db/crdel_auto.c dynamic static vx
+db/crdel_rec.c dynamic static vx
+db/db.c dynamic static vx
+db/db_am.c dynamic static vx
+db/db_auto.c dynamic static vx
+db/db_cam.c dynamic static vx
+db/db_conv.c dynamic static vx
+db/db_dispatch.c dynamic static vx
+db/db_dup.c dynamic static vx
+db/db_iface.c dynamic static vx
+db/db_join.c dynamic static vx
+db/db_meta.c dynamic static vx
+db/db_method.c dynamic static vx
+db/db_open.c dynamic static vx
+db/db_overflow.c dynamic static vx
+db/db_pr.c dynamic static vx
+db/db_rec.c dynamic static vx
+db/db_reclaim.c dynamic static vx
+db/db_remove.c dynamic static vx
+db/db_rename.c dynamic static vx
+db/db_ret.c dynamic static vx
+db/db_truncate.c dynamic static vx
+db/db_upg.c dynamic static vx
+db/db_upg_opd.c dynamic static vx
+db/db_vrfy.c dynamic static vx
+db/db_vrfyutil.c dynamic static vx
+db185/db185.c
+db_archive/db_archive.c app=db_archive
+db_checkpoint/db_checkpoint.c app=db_checkpoint
+db_deadlock/db_deadlock.c app=db_deadlock
+db_dump/db_dump.c app=db_dump
+db_dump185/db_dump185.c
+db_load/db_load.c app=db_load
+db_printlog/db_printlog.c app=db_printlog
+db_recover/db_recover.c app=db_recover
+db_stat/db_stat.c app=db_stat
+db_upgrade/db_upgrade.c app=db_upgrade
+db_verify/db_verify.c app=db_verify
+dbm/dbm.c dynamic static
+dbreg/dbreg.c dynamic static vx
+dbreg/dbreg_auto.c dynamic static vx
+dbreg/dbreg_rec.c dynamic static vx
+dbreg/dbreg_util.c dynamic static vx
+env/db_salloc.c dynamic static vx
+env/db_shash.c dynamic static vx
+env/env_file.c dynamic static vx
+env/env_method.c dynamic static vx
+env/env_open.c dynamic static vx
+env/env_recover.c dynamic static vx
+env/env_region.c dynamic static vx
+examples_c/bench_001.c
+examples_c/ex_access.c app=ex_access
+examples_c/ex_apprec/ex_apprec.c
+examples_c/ex_apprec/ex_apprec_auto.c
+examples_c/ex_apprec/ex_apprec_rec.c
+examples_c/ex_btrec.c app=ex_btrec
+examples_c/ex_dbclient.c
+examples_c/ex_env.c app=ex_env
+examples_c/ex_lock.c app=ex_lock
+examples_c/ex_mpool.c app=ex_mpool
+examples_c/ex_repquote/ex_rq_client.c
+examples_c/ex_repquote/ex_rq_main.c
+examples_c/ex_repquote/ex_rq_master.c
+examples_c/ex_repquote/ex_rq_net.c
+examples_c/ex_repquote/ex_rq_util.c
+examples_c/ex_thread.c
+examples_c/ex_tpcb.c app=ex_tpcb
+examples_cxx/AccessExample.cpp app=excxx_access
+examples_cxx/BtRecExample.cpp app=excxx_btrec
+examples_cxx/EnvExample.cpp app=excxx_env
+examples_cxx/LockExample.cpp app=excxx_lock
+examples_cxx/MpoolExample.cpp app=excxx_mpool
+examples_cxx/TpcbExample.cpp app=excxx_tpcb
+fileops/fileops_auto.c dynamic static vx
+fileops/fop_basic.c dynamic static vx
+fileops/fop_rec.c dynamic static vx
+fileops/fop_util.c dynamic static vx
+hash/hash.c dynamic static vx
+hash/hash_auto.c dynamic static vx
+hash/hash_conv.c dynamic static vx
+hash/hash_dup.c dynamic static vx
+hash/hash_func.c dynamic static vx
+hash/hash_meta.c dynamic static vx
+hash/hash_method.c dynamic static vx
+hash/hash_open.c dynamic static vx
+hash/hash_page.c dynamic static vx
+hash/hash_rec.c dynamic static vx
+hash/hash_reclaim.c dynamic static vx
+hash/hash_stat.c dynamic static vx
+hash/hash_upgrade.c dynamic static vx
+hash/hash_verify.c dynamic static vx
+hmac/hmac.c dynamic static vx
+hmac/sha1.c dynamic static vx
+hsearch/hsearch.c dynamic static vx
+libdb_java/java_Db.c java
+libdb_java/java_DbEnv.c java
+libdb_java/java_DbLock.c java
+libdb_java/java_DbLogc.c java
+libdb_java/java_DbLsn.c java
+libdb_java/java_DbTxn.c java
+libdb_java/java_DbUtil.c java
+libdb_java/java_Dbc.c java
+libdb_java/java_Dbt.c java
+libdb_java/java_info.c java
+libdb_java/java_locked.c java
+libdb_java/java_stat_auto.c java
+libdb_java/java_util.c java
+lock/lock.c dynamic static vx
+lock/lock_deadlock.c dynamic static vx
+lock/lock_method.c dynamic static vx
+lock/lock_region.c dynamic static vx
+lock/lock_stat.c dynamic static vx
+lock/lock_util.c dynamic static vx
+log/log.c dynamic static vx
+log/log_archive.c dynamic static vx
+log/log_compare.c dynamic static vx
+log/log_get.c dynamic static vx
+log/log_method.c dynamic static vx
+log/log_put.c dynamic static vx
+mp/mp_alloc.c dynamic static vx
+mp/mp_bh.c dynamic static vx
+mp/mp_fget.c dynamic static vx
+mp/mp_fopen.c dynamic static vx
+mp/mp_fput.c dynamic static vx
+mp/mp_fset.c dynamic static vx
+mp/mp_method.c dynamic static vx
+mp/mp_region.c dynamic static vx
+mp/mp_register.c dynamic static vx
+mp/mp_stat.c dynamic static vx
+mp/mp_sync.c dynamic static vx
+mp/mp_trickle.c dynamic static vx
+mutex/mut_fcntl.c
+mutex/mut_pthread.c
+mutex/mut_tas.c vx
+mutex/mut_win32.c dynamic static
+mutex/mutex.c dynamic static vx
+mutex/tm.c
+os/os_abs.c
+os/os_alloc.c dynamic static vx
+os/os_clock.c vx
+os/os_config.c
+os/os_dir.c vx
+os/os_errno.c vx
+os/os_fid.c vx
+os/os_fsync.c vx
+os/os_handle.c vx
+os/os_id.c dynamic static vx
+os/os_map.c
+os/os_method.c dynamic static vx
+os/os_oflags.c dynamic static vx
+os/os_open.c vx
+os/os_region.c dynamic static vx
+os/os_rename.c vx
+os/os_root.c dynamic static vx
+os/os_rpath.c dynamic static vx
+os/os_rw.c vx
+os/os_seek.c vx
+os/os_sleep.c vx
+os/os_spin.c vx
+os/os_stat.c vx
+os/os_tmpdir.c dynamic static vx
+os/os_unlink.c dynamic static vx
+os_vxworks/os_vx_abs.c vx
+os_vxworks/os_vx_config.c vx
+os_vxworks/os_vx_map.c vx
+os_win32/os_abs.c dynamic static
+os_win32/os_clock.c dynamic static
+os_win32/os_config.c dynamic static
+os_win32/os_dir.c dynamic static
+os_win32/os_errno.c dynamic static
+os_win32/os_fid.c dynamic static
+os_win32/os_fsync.c dynamic static
+os_win32/os_handle.c dynamic static
+os_win32/os_map.c dynamic static
+os_win32/os_open.c dynamic static
+os_win32/os_rename.c dynamic static
+os_win32/os_rw.c dynamic static
+os_win32/os_seek.c dynamic static
+os_win32/os_sleep.c dynamic static
+os_win32/os_spin.c dynamic static
+os_win32/os_stat.c dynamic static
+os_win32/os_type.c dynamic static
+qam/qam.c dynamic static vx
+qam/qam_auto.c dynamic static vx
+qam/qam_conv.c dynamic static vx
+qam/qam_files.c dynamic static vx
+qam/qam_method.c dynamic static vx
+qam/qam_open.c dynamic static vx
+qam/qam_rec.c dynamic static vx
+qam/qam_stat.c dynamic static vx
+qam/qam_upgrade.c dynamic static vx
+qam/qam_verify.c dynamic static vx
+rep/rep_method.c dynamic static vx
+rep/rep_record.c dynamic static vx
+rep/rep_region.c dynamic static vx
+rep/rep_util.c dynamic static vx
+rpc_client/client.c vx
+rpc_client/db_server_clnt.c vx
+rpc_client/gen_client.c vx
+rpc_client/gen_client_ret.c vx
+rpc_server/c/db_server_proc.c
+rpc_server/c/db_server_svc.c
+rpc_server/c/db_server_util.c
+rpc_server/c/db_server_xdr.c vx
+rpc_server/c/gen_db_server.c
+rpc_server/cxx/db_server_cxxproc.cpp
+rpc_server/cxx/db_server_cxxutil.cpp
+tcl/tcl_compat.c tcl
+tcl/tcl_db.c tcl
+tcl/tcl_db_pkg.c tcl
+tcl/tcl_dbcursor.c tcl
+tcl/tcl_env.c tcl
+tcl/tcl_internal.c tcl
+tcl/tcl_lock.c tcl
+tcl/tcl_log.c tcl
+tcl/tcl_mp.c tcl
+tcl/tcl_rep.c tcl
+tcl/tcl_txn.c tcl
+tcl/tcl_util.c tcl
+test_perf/db_perf.c app=db_perf
+test_perf/perf_cache_check.c app=db_perf
+test_perf/perf_checkpoint.c app=db_perf
+test_perf/perf_config.c app=db_perf
+test_perf/perf_dbs.c app=db_perf
+test_perf/perf_debug.c app=db_perf
+test_perf/perf_file.c app=db_perf
+test_perf/perf_key.c app=db_perf
+test_perf/perf_log.c app=db_perf
+test_perf/perf_misc.c app=db_perf
+test_perf/perf_op.c app=db_perf
+test_perf/perf_parse.c app=db_perf
+test_perf/perf_rand.c app=db_perf
+test_perf/perf_spawn.c app=db_perf
+test_perf/perf_thread.c app=db_perf
+test_perf/perf_trickle.c app=db_perf
+test_perf/perf_txn.c app=db_perf
+test_perf/perf_util.c app=db_perf
+test_perf/perf_vx.c
+txn/txn.c dynamic static vx
+txn/txn_auto.c dynamic static vx
+txn/txn_method.c dynamic static vx
+txn/txn_rec.c dynamic static vx
+txn/txn_recover.c dynamic static vx
+txn/txn_region.c dynamic static vx
+txn/txn_stat.c dynamic static vx
+txn/txn_util.c dynamic static vx
+xa/xa.c dynamic static vx
+xa/xa_db.c dynamic static vx
+xa/xa_map.c dynamic static vx
diff --git a/libdb/dist/tags b/libdb/dist/tags
new file mode 100644
index 0000000..e69de29
diff --git a/libdb/dist/template/db_server_proc b/libdb/dist/template/db_server_proc
new file mode 100644
index 0000000..74a6c69
--- /dev/null
+++ b/libdb/dist/template/db_server_proc
@@ -0,0 +1,1392 @@
+/* Do not edit: automatically built by gen_rpc.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <rpc/rpc.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc_auto/db_server.h"
+#include "dbinc/db_server_int.h"
+#include "dbinc_auto/rpc_server_ext.h"
+
+/* BEGIN __env_cachesize_proc */
+void
+__env_cachesize_proc(dbenvcl_id, gbytes, bytes,
+ ncache, replyp)
+ long dbenvcl_id;
+ u_int32_t gbytes;
+ u_int32_t bytes;
+ u_int32_t ncache;
+ __env_cachesize_reply *replyp;
+/* END __env_cachesize_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_close_proc */
+void
+__env_close_proc(dbenvcl_id, flags, replyp)
+ long dbenvcl_id;
+ u_int32_t flags;
+ __env_close_reply *replyp;
+/* END __env_close_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_create_proc */
+void
+__env_create_proc(timeout, replyp)
+ u_int32_t timeout;
+ __env_create_reply *replyp;
+/* END __env_create_proc */
+{
+ int ret;
+
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_dbremove_proc */
+void
+__env_dbremove_proc(dbenvcl_id, txnpcl_id, name,
+ subdb, flags, replyp)
+ long dbenvcl_id;
+ long txnpcl_id;
+ char *name;
+ char *subdb;
+ u_int32_t flags;
+ __env_dbremove_reply *replyp;
+/* END __env_dbremove_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_dbrename_proc */
+void
+__env_dbrename_proc(dbenvcl_id, txnpcl_id, name,
+ subdb, newname, flags, replyp)
+ long dbenvcl_id;
+ long txnpcl_id;
+ char *name;
+ char *subdb;
+ char *newname;
+ u_int32_t flags;
+ __env_dbrename_reply *replyp;
+/* END __env_dbrename_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_encrypt_proc */
+void
+__env_encrypt_proc(dbenvcl_id, passwd, flags, replyp)
+ long dbenvcl_id;
+ char *passwd;
+ u_int32_t flags;
+ __env_encrypt_reply *replyp;
+/* END __env_encrypt_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_flags_proc */
+void
+__env_flags_proc(dbenvcl_id, flags, onoff, replyp)
+ long dbenvcl_id;
+ u_int32_t flags;
+ u_int32_t onoff;
+ __env_flags_reply *replyp;
+/* END __env_flags_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_open_proc */
+void
+__env_open_proc(dbenvcl_id, home, flags,
+ mode, replyp)
+ long dbenvcl_id;
+ char *home;
+ u_int32_t flags;
+ u_int32_t mode;
+ __env_open_reply *replyp;
+/* END __env_open_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_remove_proc */
+void
+__env_remove_proc(dbenvcl_id, home, flags, replyp)
+ long dbenvcl_id;
+ char *home;
+ u_int32_t flags;
+ __env_remove_reply *replyp;
+/* END __env_remove_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_abort_proc */
+void
+__txn_abort_proc(txnpcl_id, replyp)
+ long txnpcl_id;
+ __txn_abort_reply *replyp;
+/* END __txn_abort_proc */
+{
+ int ret;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_begin_proc */
+void
+__txn_begin_proc(dbenvcl_id, parentcl_id,
+ flags, replyp)
+ long dbenvcl_id;
+ long parentcl_id;
+ u_int32_t flags;
+ __txn_begin_reply *replyp;
+/* END __txn_begin_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+ DB_TXN * parent;
+ ct_entry *parent_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+ ACTIVATE_CTP(parent_ctp, parentcl_id, CT_TXN);
+ parent = (DB_TXN *)parent_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_commit_proc */
+void
+__txn_commit_proc(txnpcl_id, flags, replyp)
+ long txnpcl_id;
+ u_int32_t flags;
+ __txn_commit_reply *replyp;
+/* END __txn_commit_proc */
+{
+ int ret;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_discard_proc */
+void
+__txn_discard_proc(txnpcl_id, flags, replyp)
+ long txnpcl_id;
+ u_int32_t flags;
+ __txn_discard_reply *replyp;
+/* END __txn_discard_proc */
+{
+ int ret;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_prepare_proc */
+void
+__txn_prepare_proc(txnpcl_id, gid, replyp)
+ long txnpcl_id;
+ u_int8_t *gid;
+ __txn_prepare_reply *replyp;
+/* END __txn_prepare_proc */
+{
+ int ret;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_recover_proc */
+void
+__txn_recover_proc(dbenvcl_id, count,
+ flags, replyp, freep)
+ long dbenvcl_id;
+ u_int32_t count;
+ u_int32_t flags;
+ __txn_recover_reply *replyp;
+ int * freep;
+/* END __txn_recover_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_associate_proc */
+void
+__db_associate_proc(dbpcl_id, txnpcl_id, sdbpcl_id,
+ flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ long sdbpcl_id;
+ u_int32_t flags;
+ __db_associate_reply *replyp;
+/* END __db_associate_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+ DB * sdbp;
+ ct_entry *sdbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ ACTIVATE_CTP(sdbp_ctp, sdbpcl_id, CT_DB);
+ sdbp = (DB *)sdbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_bt_maxkey_proc */
+void
+__db_bt_maxkey_proc(dbpcl_id, maxkey, replyp)
+ long dbpcl_id;
+ u_int32_t maxkey;
+ __db_bt_maxkey_reply *replyp;
+/* END __db_bt_maxkey_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_bt_minkey_proc */
+void
+__db_bt_minkey_proc(dbpcl_id, minkey, replyp)
+ long dbpcl_id;
+ u_int32_t minkey;
+ __db_bt_minkey_reply *replyp;
+/* END __db_bt_minkey_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_close_proc */
+void
+__db_close_proc(dbpcl_id, flags, replyp)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_close_reply *replyp;
+/* END __db_close_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_create_proc */
+void
+__db_create_proc(dbenvcl_id, flags, replyp)
+ long dbenvcl_id;
+ u_int32_t flags;
+ __db_create_reply *replyp;
+/* END __db_create_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_del_proc */
+void
+__db_del_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyulen, keyflags, keydata,
+ keysize, flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t flags;
+ __db_del_reply *replyp;
+/* END __db_del_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_encrypt_proc */
+void
+__db_encrypt_proc(dbpcl_id, passwd, flags, replyp)
+ long dbpcl_id;
+ char *passwd;
+ u_int32_t flags;
+ __db_encrypt_reply *replyp;
+/* END __db_encrypt_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_extentsize_proc */
+void
+__db_extentsize_proc(dbpcl_id, extentsize, replyp)
+ long dbpcl_id;
+ u_int32_t extentsize;
+ __db_extentsize_reply *replyp;
+/* END __db_extentsize_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_flags_proc */
+void
+__db_flags_proc(dbpcl_id, flags, replyp)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_flags_reply *replyp;
+/* END __db_flags_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_get_proc */
+void
+__db_get_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyulen, keyflags, keydata,
+ keysize, datadlen, datadoff, dataulen,
+ dataflags, datadata, datasize, flags, replyp, freep)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __db_get_reply *replyp;
+ int * freep;
+/* END __db_get_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_h_ffactor_proc */
+void
+__db_h_ffactor_proc(dbpcl_id, ffactor, replyp)
+ long dbpcl_id;
+ u_int32_t ffactor;
+ __db_h_ffactor_reply *replyp;
+/* END __db_h_ffactor_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_h_nelem_proc */
+void
+__db_h_nelem_proc(dbpcl_id, nelem, replyp)
+ long dbpcl_id;
+ u_int32_t nelem;
+ __db_h_nelem_reply *replyp;
+/* END __db_h_nelem_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_key_range_proc */
+void
+__db_key_range_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyulen, keyflags, keydata,
+ keysize, flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t flags;
+ __db_key_range_reply *replyp;
+/* END __db_key_range_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_lorder_proc */
+void
+__db_lorder_proc(dbpcl_id, lorder, replyp)
+ long dbpcl_id;
+ u_int32_t lorder;
+ __db_lorder_reply *replyp;
+/* END __db_lorder_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_open_proc */
+void
+__db_open_proc(dbpcl_id, txnpcl_id, name,
+ subdb, type, flags, mode, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ char *name;
+ char *subdb;
+ u_int32_t type;
+ u_int32_t flags;
+ u_int32_t mode;
+ __db_open_reply *replyp;
+/* END __db_open_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_pagesize_proc */
+void
+__db_pagesize_proc(dbpcl_id, pagesize, replyp)
+ long dbpcl_id;
+ u_int32_t pagesize;
+ __db_pagesize_reply *replyp;
+/* END __db_pagesize_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_pget_proc */
+void
+__db_pget_proc(dbpcl_id, txnpcl_id, skeydlen,
+ skeydoff, skeyulen, skeyflags, skeydata,
+ skeysize, pkeydlen, pkeydoff, pkeyulen,
+ pkeyflags, pkeydata, pkeysize, datadlen,
+ datadoff, dataulen, dataflags, datadata,
+ datasize, flags, replyp, freep)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t skeydlen;
+ u_int32_t skeydoff;
+ u_int32_t skeyulen;
+ u_int32_t skeyflags;
+ void *skeydata;
+ u_int32_t skeysize;
+ u_int32_t pkeydlen;
+ u_int32_t pkeydoff;
+ u_int32_t pkeyulen;
+ u_int32_t pkeyflags;
+ void *pkeydata;
+ u_int32_t pkeysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __db_pget_reply *replyp;
+ int * freep;
+/* END __db_pget_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_put_proc */
+void
+__db_put_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyulen, keyflags, keydata,
+ keysize, datadlen, datadoff, dataulen,
+ dataflags, datadata, datasize, flags, replyp, freep)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __db_put_reply *replyp;
+ int * freep;
+/* END __db_put_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_delim_proc */
+void
+__db_re_delim_proc(dbpcl_id, delim, replyp)
+ long dbpcl_id;
+ u_int32_t delim;
+ __db_re_delim_reply *replyp;
+/* END __db_re_delim_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_len_proc */
+void
+__db_re_len_proc(dbpcl_id, len, replyp)
+ long dbpcl_id;
+ u_int32_t len;
+ __db_re_len_reply *replyp;
+/* END __db_re_len_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_pad_proc */
+void
+__db_re_pad_proc(dbpcl_id, pad, replyp)
+ long dbpcl_id;
+ u_int32_t pad;
+ __db_re_pad_reply *replyp;
+/* END __db_re_pad_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_remove_proc */
+void
+__db_remove_proc(dbpcl_id, name, subdb,
+ flags, replyp)
+ long dbpcl_id;
+ char *name;
+ char *subdb;
+ u_int32_t flags;
+ __db_remove_reply *replyp;
+/* END __db_remove_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_rename_proc */
+void
+__db_rename_proc(dbpcl_id, name, subdb,
+ newname, flags, replyp)
+ long dbpcl_id;
+ char *name;
+ char *subdb;
+ char *newname;
+ u_int32_t flags;
+ __db_rename_reply *replyp;
+/* END __db_rename_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_stat_proc */
+void
+__db_stat_proc(dbpcl_id, flags, replyp, freep)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_stat_reply *replyp;
+ int * freep;
+/* END __db_stat_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_sync_proc */
+void
+__db_sync_proc(dbpcl_id, flags, replyp)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_sync_reply *replyp;
+/* END __db_sync_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_truncate_proc */
+void
+__db_truncate_proc(dbpcl_id, txnpcl_id,
+ flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t flags;
+ __db_truncate_reply *replyp;
+/* END __db_truncate_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_cursor_proc */
+void
+__db_cursor_proc(dbpcl_id, txnpcl_id,
+ flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t flags;
+ __db_cursor_reply *replyp;
+/* END __db_cursor_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_join_proc */
+void
+__db_join_proc(dbpcl_id, curs, curslen,
+ flags, replyp)
+ long dbpcl_id;
+ u_int32_t * curs;
+ u_int32_t curslen;
+ u_int32_t flags;
+ __db_join_reply *replyp;
+/* END __db_join_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_close_proc */
+void
+__dbc_close_proc(dbccl_id, replyp)
+ long dbccl_id;
+ __dbc_close_reply *replyp;
+/* END __dbc_close_proc */
+{
+ int ret;
+ DBC * dbc;
+ ct_entry *dbc_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_count_proc */
+void
+__dbc_count_proc(dbccl_id, flags, replyp)
+ long dbccl_id;
+ u_int32_t flags;
+ __dbc_count_reply *replyp;
+/* END __dbc_count_proc */
+{
+ int ret;
+ DBC * dbc;
+ ct_entry *dbc_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_del_proc */
+void
+__dbc_del_proc(dbccl_id, flags, replyp)
+ long dbccl_id;
+ u_int32_t flags;
+ __dbc_del_reply *replyp;
+/* END __dbc_del_proc */
+{
+ int ret;
+ DBC * dbc;
+ ct_entry *dbc_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_dup_proc */
+void
+__dbc_dup_proc(dbccl_id, flags, replyp)
+ long dbccl_id;
+ u_int32_t flags;
+ __dbc_dup_reply *replyp;
+/* END __dbc_dup_proc */
+{
+ int ret;
+ DBC * dbc;
+ ct_entry *dbc_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_get_proc */
+void
+__dbc_get_proc(dbccl_id, keydlen, keydoff,
+ keyulen, keyflags, keydata, keysize,
+ datadlen, datadoff, dataulen, dataflags,
+ datadata, datasize, flags, replyp, freep)
+ long dbccl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __dbc_get_reply *replyp;
+ int * freep;
+/* END __dbc_get_proc */
+{
+ int ret;
+ DBC * dbc;
+ ct_entry *dbc_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_pget_proc */
+void
+__dbc_pget_proc(dbccl_id, skeydlen, skeydoff,
+ skeyulen, skeyflags, skeydata, skeysize,
+ pkeydlen, pkeydoff, pkeyulen, pkeyflags,
+ pkeydata, pkeysize, datadlen, datadoff,
+ dataulen, dataflags, datadata, datasize,
+ flags, replyp, freep)
+ long dbccl_id;
+ u_int32_t skeydlen;
+ u_int32_t skeydoff;
+ u_int32_t skeyulen;
+ u_int32_t skeyflags;
+ void *skeydata;
+ u_int32_t skeysize;
+ u_int32_t pkeydlen;
+ u_int32_t pkeydoff;
+ u_int32_t pkeyulen;
+ u_int32_t pkeyflags;
+ void *pkeydata;
+ u_int32_t pkeysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __dbc_pget_reply *replyp;
+ int * freep;
+/* END __dbc_pget_proc */
+{
+ int ret;
+ DBC * dbc;
+ ct_entry *dbc_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_put_proc */
+void
+__dbc_put_proc(dbccl_id, keydlen, keydoff,
+ keyulen, keyflags, keydata, keysize,
+ datadlen, datadoff, dataulen, dataflags,
+ datadata, datasize, flags, replyp, freep)
+ long dbccl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __dbc_put_reply *replyp;
+ int * freep;
+/* END __dbc_put_proc */
+{
+ int ret;
+ DBC * dbc;
+ ct_entry *dbc_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ /*
+ * XXX Code goes here
+ */
+
+ replyp->status = ret;
+ return;
+}
+
diff --git a/libdb/dist/template/gen_client_ret b/libdb/dist/template/gen_client_ret
new file mode 100644
index 0000000..ca4c443
--- /dev/null
+++ b/libdb/dist/template/gen_client_ret
@@ -0,0 +1,750 @@
+/* Do not edit: automatically built by gen_rpc.awk. */
+#include "db_config.h"
+
+#ifdef HAVE_RPC
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <rpc/rpc.h>
+
+#include <string.h>
+#endif
+#include "db_int.h"
+#include "dbinc_auto/db_server.h"
+#include "dbinc/txn.h"
+
+/*
+ * PUBLIC: int __dbcl_env_close_ret __P((DB_ENV *, u_int32_t,
+ * PUBLIC: __env_close_reply *));
+ */
+int
+__dbcl_env_close_ret(dbenv, flags, replyp)
+ DB_ENV * dbenv;
+ u_int32_t flags;
+ __env_close_reply *replyp;
+{
+ int ret;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_env_create_ret __P((DB_ENV *, long,
+ * PUBLIC: __env_create_reply *));
+ */
+int
+__dbcl_env_create_ret(dbenv, timeout, replyp)
+ DB_ENV * dbenv;
+ long timeout;
+ __env_create_reply *replyp;
+{
+ int ret;
+ long env;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ env = replyp->envcl_id;
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_env_open_ret __P((DB_ENV *, const char *, u_int32_t, int,
+ * PUBLIC: __env_open_reply *));
+ */
+int
+__dbcl_env_open_ret(dbenv, home, flags, mode, replyp)
+ DB_ENV * dbenv;
+ const char * home;
+ u_int32_t flags;
+ int mode;
+ __env_open_reply *replyp;
+{
+ int ret;
+ long env;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ env = replyp->envcl_id;
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_env_remove_ret __P((DB_ENV *, const char *, u_int32_t,
+ * PUBLIC: __env_remove_reply *));
+ */
+int
+__dbcl_env_remove_ret(dbenv, home, flags, replyp)
+ DB_ENV * dbenv;
+ const char * home;
+ u_int32_t flags;
+ __env_remove_reply *replyp;
+{
+ int ret;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_abort_ret __P((DB_TXN *, __txn_abort_reply *));
+ */
+int
+__dbcl_txn_abort_ret(txnp, replyp)
+ DB_TXN * txnp;
+ __txn_abort_reply *replyp;
+{
+ int ret;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_begin_ret __P((DB_ENV *, DB_TXN *, DB_TXN **,
+ * PUBLIC: u_int32_t, __txn_begin_reply *));
+ */
+int
+__dbcl_txn_begin_ret(dbenv, parent, txnpp, flags, replyp)
+ DB_ENV * dbenv;
+ DB_TXN * parent;
+ DB_TXN ** txnpp;
+ u_int32_t flags;
+ __txn_begin_reply *replyp;
+{
+ int ret;
+ long txnid;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ txnid = replyp->txnidcl_id;
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_commit_ret __P((DB_TXN *, u_int32_t,
+ * PUBLIC: __txn_commit_reply *));
+ */
+int
+__dbcl_txn_commit_ret(txnp, flags, replyp)
+ DB_TXN * txnp;
+ u_int32_t flags;
+ __txn_commit_reply *replyp;
+{
+ int ret;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_discard_ret __P((DB_TXN *, u_int32_t,
+ * PUBLIC: __txn_discard_reply *));
+ */
+int
+__dbcl_txn_discard_ret(txnp, flags, replyp)
+ DB_TXN * txnp;
+ u_int32_t flags;
+ __txn_discard_reply *replyp;
+{
+ int ret;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_recover_ret __P((DB_ENV *, DB_PREPLIST *, long,
+ * PUBLIC: long *, u_int32_t, __txn_recover_reply *));
+ */
+int
+__dbcl_txn_recover_ret(dbenv, preplist, count, retp, flags, replyp)
+ DB_ENV * dbenv;
+ DB_PREPLIST * preplist;
+ long count;
+ long * retp;
+ u_int32_t flags;
+ __txn_recover_reply *replyp;
+{
+ int ret;
+ u_int32_t *__db_txn;
+ u_int8_t *__db_gid;
+ long retcount;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * XXX Handle list
+ */
+
+
+ /*
+ * XXX Handle list
+ */
+
+ retcount = replyp->retcount;
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_close_ret __P((DB *, u_int32_t, __db_close_reply *));
+ */
+int
+__dbcl_db_close_ret(dbp, flags, replyp)
+ DB * dbp;
+ u_int32_t flags;
+ __db_close_reply *replyp;
+{
+ int ret;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_create_ret __P((DB *, DB_ENV *, u_int32_t,
+ * PUBLIC: __db_create_reply *));
+ */
+int
+__dbcl_db_create_ret(dbp, dbenv, flags, replyp)
+ DB * dbp;
+ DB_ENV * dbenv;
+ u_int32_t flags;
+ __db_create_reply *replyp;
+{
+ int ret;
+ long db;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ db = replyp->dbcl_id;
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_get_ret __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t,
+ * PUBLIC: __db_get_reply *));
+ */
+int
+__dbcl_db_get_ret(dbp, txnp, key, data, flags, replyp)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBT * key;
+ DBT * data;
+ u_int32_t flags;
+ __db_get_reply *replyp;
+{
+ int ret;
+ /* DBT key; */
+ /* DBT data; */
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ /* Handle replyp->keydata; */
+ /* Handle replyp->datadata; */
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_key_range_ret __P((DB *, DB_TXN *, DBT *,
+ * PUBLIC: DB_KEY_RANGE *, u_int32_t, __db_key_range_reply *));
+ */
+int
+__dbcl_db_key_range_ret(dbp, txnp, key, range, flags, replyp)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBT * key;
+ DB_KEY_RANGE * range;
+ u_int32_t flags;
+ __db_key_range_reply *replyp;
+{
+ int ret;
+ double less;
+ double equal;
+ double greater;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ less = replyp->less;
+ equal = replyp->equal;
+ greater = replyp->greater;
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_open_ret __P((DB *, DB_TXN *, const char *,
+ * PUBLIC: const char *, DBTYPE, u_int32_t, int, __db_open_reply *));
+ */
+int
+__dbcl_db_open_ret(dbp, txnp, name, subdb, type, flags, mode, replyp)
+ DB * dbp;
+ DB_TXN * txnp;
+ const char * name;
+ const char * subdb;
+ DBTYPE type;
+ u_int32_t flags;
+ int mode;
+ __db_open_reply *replyp;
+{
+ int ret;
+ long db;
+ DBTYPE type;
+ u_int32_t dbflags;
+ int lorder;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ db = replyp->dbcl_id;
+ type = replyp->type;
+ dbflags = replyp->dbflags;
+ lorder = replyp->lorder;
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_pget_ret __P((DB *, DB_TXN *, DBT *, DBT *, DBT *,
+ * PUBLIC: u_int32_t, __db_pget_reply *));
+ */
+int
+__dbcl_db_pget_ret(dbp, txnp, skey, pkey, data, flags, replyp)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBT * skey;
+ DBT * pkey;
+ DBT * data;
+ u_int32_t flags;
+ __db_pget_reply *replyp;
+{
+ int ret;
+ /* DBT skey; */
+ /* DBT pkey; */
+ /* DBT data; */
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ /* Handle replyp->skeydata; */
+ /* Handle replyp->pkeydata; */
+ /* Handle replyp->datadata; */
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_put_ret __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t,
+ * PUBLIC: __db_put_reply *));
+ */
+int
+__dbcl_db_put_ret(dbp, txnp, key, data, flags, replyp)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBT * key;
+ DBT * data;
+ u_int32_t flags;
+ __db_put_reply *replyp;
+{
+ int ret;
+ /* DBT key; */
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ /* Handle replyp->keydata; */
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_remove_ret __P((DB *, const char *, const char *,
+ * PUBLIC: u_int32_t, __db_remove_reply *));
+ */
+int
+__dbcl_db_remove_ret(dbp, name, subdb, flags, replyp)
+ DB * dbp;
+ const char * name;
+ const char * subdb;
+ u_int32_t flags;
+ __db_remove_reply *replyp;
+{
+ int ret;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_rename_ret __P((DB *, const char *, const char *,
+ * PUBLIC: const char *, u_int32_t, __db_rename_reply *));
+ */
+int
+__dbcl_db_rename_ret(dbp, name, subdb, newname, flags, replyp)
+ DB * dbp;
+ const char * name;
+ const char * subdb;
+ const char * newname;
+ u_int32_t flags;
+ __db_rename_reply *replyp;
+{
+ int ret;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_stat_ret __P((DB *, void *, u_int32_t,
+ * PUBLIC: __db_stat_reply *));
+ */
+int
+__dbcl_db_stat_ret(dbp, sp, flags, replyp)
+ DB * dbp;
+ void * sp;
+ u_int32_t flags;
+ __db_stat_reply *replyp;
+{
+ int ret;
+ u_int32_t *__db_stats;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * XXX Handle list
+ */
+
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_truncate_ret __P((DB *, DB_TXN *, u_int32_t *,
+ * PUBLIC: u_int32_t, __db_truncate_reply *));
+ */
+int
+__dbcl_db_truncate_ret(dbp, txnp, countp, flags, replyp)
+ DB * dbp;
+ DB_TXN * txnp;
+ u_int32_t * countp;
+ u_int32_t flags;
+ __db_truncate_reply *replyp;
+{
+ int ret;
+ u_int32_t count;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ count = replyp->count;
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_cursor_ret __P((DB *, DB_TXN *, DBC **, u_int32_t,
+ * PUBLIC: __db_cursor_reply *));
+ */
+int
+__dbcl_db_cursor_ret(dbp, txnp, dbcpp, flags, replyp)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBC ** dbcpp;
+ u_int32_t flags;
+ __db_cursor_reply *replyp;
+{
+ int ret;
+ long dbcid;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ dbcid = replyp->dbcidcl_id;
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_join_ret __P((DB *, DBC **, DBC **, u_int32_t,
+ * PUBLIC: __db_join_reply *));
+ */
+int
+__dbcl_db_join_ret(dbp, curs, dbcp, flags, replyp)
+ DB * dbp;
+ DBC ** curs;
+ DBC ** dbcp;
+ u_int32_t flags;
+ __db_join_reply *replyp;
+{
+ int ret;
+ long dbcid;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ dbcid = replyp->dbcidcl_id;
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_close_ret __P((DBC *, __dbc_close_reply *));
+ */
+int
+__dbcl_dbc_close_ret(dbc, replyp)
+ DBC * dbc;
+ __dbc_close_reply *replyp;
+{
+ int ret;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_count_ret __P((DBC *, db_recno_t *, u_int32_t,
+ * PUBLIC: __dbc_count_reply *));
+ */
+int
+__dbcl_dbc_count_ret(dbc, countp, flags, replyp)
+ DBC * dbc;
+ db_recno_t * countp;
+ u_int32_t flags;
+ __dbc_count_reply *replyp;
+{
+ int ret;
+ db_recno_t dupcount;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ dupcount = replyp->dupcount;
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_dup_ret __P((DBC *, DBC **, u_int32_t,
+ * PUBLIC: __dbc_dup_reply *));
+ */
+int
+__dbcl_dbc_dup_ret(dbc, dbcp, flags, replyp)
+ DBC * dbc;
+ DBC ** dbcp;
+ u_int32_t flags;
+ __dbc_dup_reply *replyp;
+{
+ int ret;
+ long dbcid;
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ dbcid = replyp->dbcidcl_id;
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_get_ret __P((DBC *, DBT *, DBT *, u_int32_t,
+ * PUBLIC: __dbc_get_reply *));
+ */
+int
+__dbcl_dbc_get_ret(dbc, key, data, flags, replyp)
+ DBC * dbc;
+ DBT * key;
+ DBT * data;
+ u_int32_t flags;
+ __dbc_get_reply *replyp;
+{
+ int ret;
+ /* DBT key; */
+ /* DBT data; */
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ /* Handle replyp->keydata; */
+ /* Handle replyp->datadata; */
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_pget_ret __P((DBC *, DBT *, DBT *, DBT *, u_int32_t,
+ * PUBLIC: __dbc_pget_reply *));
+ */
+int
+__dbcl_dbc_pget_ret(dbc, skey, pkey, data, flags, replyp)
+ DBC * dbc;
+ DBT * skey;
+ DBT * pkey;
+ DBT * data;
+ u_int32_t flags;
+ __dbc_pget_reply *replyp;
+{
+ int ret;
+ /* DBT skey; */
+ /* DBT pkey; */
+ /* DBT data; */
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ /* Handle replyp->skeydata; */
+ /* Handle replyp->pkeydata; */
+ /* Handle replyp->datadata; */
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_put_ret __P((DBC *, DBT *, DBT *, u_int32_t,
+ * PUBLIC: __dbc_put_reply *));
+ */
+int
+__dbcl_dbc_put_ret(dbc, key, data, flags, replyp)
+ DBC * dbc;
+ DBT * key;
+ DBT * data;
+ u_int32_t flags;
+ __dbc_put_reply *replyp;
+{
+ int ret;
+ /* DBT key; */
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ /* Handle replyp->keydata; */
+
+ /*
+ * XXX Code goes here
+ */
+
+ return (replyp->status);
+}
+
+#endif /* HAVE_RPC */
diff --git a/libdb/dist/template/rec_btree b/libdb/dist/template/rec_btree
new file mode 100644
index 0000000..f0b49d8
--- /dev/null
+++ b/libdb/dist/template/rec_btree
@@ -0,0 +1,571 @@
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/__bam.h"
+#include "dbinc/log.h"
+
+/*
+ * __bam_split_recover --
+ * Recovery function for split.
+ *
+ * PUBLIC: int __bam_split_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_split_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_split_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_split_print);
+ REC_INTRO(__bam_split_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_rsplit_recover --
+ * Recovery function for rsplit.
+ *
+ * PUBLIC: int __bam_rsplit_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_rsplit_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_rsplit_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_rsplit_print);
+ REC_INTRO(__bam_rsplit_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_adj_recover --
+ * Recovery function for adj.
+ *
+ * PUBLIC: int __bam_adj_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_adj_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_adj_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_adj_print);
+ REC_INTRO(__bam_adj_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_cadjust_recover --
+ * Recovery function for cadjust.
+ *
+ * PUBLIC: int __bam_cadjust_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_cadjust_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_cadjust_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_cadjust_print);
+ REC_INTRO(__bam_cadjust_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_cdel_recover --
+ * Recovery function for cdel.
+ *
+ * PUBLIC: int __bam_cdel_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_cdel_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_cdel_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_cdel_print);
+ REC_INTRO(__bam_cdel_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_repl_recover --
+ * Recovery function for repl.
+ *
+ * PUBLIC: int __bam_repl_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_repl_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_repl_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_repl_print);
+ REC_INTRO(__bam_repl_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_root_recover --
+ * Recovery function for root.
+ *
+ * PUBLIC: int __bam_root_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_root_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_root_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_root_print);
+ REC_INTRO(__bam_root_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_curadj_recover --
+ * Recovery function for curadj.
+ *
+ * PUBLIC: int __bam_curadj_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_curadj_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_curadj_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_curadj_print);
+ REC_INTRO(__bam_curadj_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_rcuradj_recover --
+ * Recovery function for rcuradj.
+ *
+ * PUBLIC: int __bam_rcuradj_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_rcuradj_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_rcuradj_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__bam_rcuradj_print);
+ REC_INTRO(__bam_rcuradj_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/libdb/dist/template/rec_crdel b/libdb/dist/template/rec_crdel
new file mode 100644
index 0000000..430806e
--- /dev/null
+++ b/libdb/dist/template/rec_crdel
@@ -0,0 +1,75 @@
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/__crdel.h"
+#include "dbinc/log.h"
+
+/*
+ * __crdel_metasub_recover --
+ * Recovery function for metasub.
+ *
+ * PUBLIC: int __crdel_metasub_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__crdel_metasub_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __crdel_metasub_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__crdel_metasub_print);
+ REC_INTRO(__crdel_metasub_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/libdb/dist/template/rec_ctemp b/libdb/dist/template/rec_ctemp
new file mode 100644
index 0000000..2951189
--- /dev/null
+++ b/libdb/dist/template/rec_ctemp
@@ -0,0 +1,62 @@
+/*
+ * PREF_FUNC_recover --
+ * Recovery function for FUNC.
+ *
+ * PUBLIC: int PREF_FUNC_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+PREF_FUNC_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ PREF_FUNC_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(PREF_FUNC_print);
+ REC_INTRO(PREF_FUNC_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/libdb/dist/template/rec_db b/libdb/dist/template/rec_db
new file mode 100644
index 0000000..ab83912
--- /dev/null
+++ b/libdb/dist/template/rec_db
@@ -0,0 +1,571 @@
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/__db.h"
+#include "dbinc/log.h"
+
+/*
+ * __db_addrem_recover --
+ * Recovery function for addrem.
+ *
+ * PUBLIC: int __db_addrem_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_addrem_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_addrem_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__db_addrem_print);
+ REC_INTRO(__db_addrem_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __db_big_recover --
+ * Recovery function for big.
+ *
+ * PUBLIC: int __db_big_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_big_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_big_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__db_big_print);
+ REC_INTRO(__db_big_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __db_ovref_recover --
+ * Recovery function for ovref.
+ *
+ * PUBLIC: int __db_ovref_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_ovref_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_ovref_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__db_ovref_print);
+ REC_INTRO(__db_ovref_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __db_relink_recover --
+ * Recovery function for relink.
+ *
+ * PUBLIC: int __db_relink_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_relink_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_relink_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__db_relink_print);
+ REC_INTRO(__db_relink_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __db_debug_recover --
+ * Recovery function for debug.
+ *
+ * PUBLIC: int __db_debug_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_debug_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_debug_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__db_debug_print);
+ REC_INTRO(__db_debug_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __db_noop_recover --
+ * Recovery function for noop.
+ *
+ * PUBLIC: int __db_noop_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_noop_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_noop_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__db_noop_print);
+ REC_INTRO(__db_noop_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __db_pg_alloc_recover --
+ * Recovery function for pg_alloc.
+ *
+ * PUBLIC: int __db_pg_alloc_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_pg_alloc_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_pg_alloc_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__db_pg_alloc_print);
+ REC_INTRO(__db_pg_alloc_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __db_pg_free_recover --
+ * Recovery function for pg_free.
+ *
+ * PUBLIC: int __db_pg_free_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_pg_free_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_pg_free_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__db_pg_free_print);
+ REC_INTRO(__db_pg_free_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __db_cksum_recover --
+ * Recovery function for cksum.
+ *
+ * PUBLIC: int __db_cksum_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_cksum_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_cksum_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__db_cksum_print);
+ REC_INTRO(__db_cksum_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/libdb/dist/template/rec_dbreg b/libdb/dist/template/rec_dbreg
new file mode 100644
index 0000000..bbdf19d
--- /dev/null
+++ b/libdb/dist/template/rec_dbreg
@@ -0,0 +1,75 @@
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/__dbreg.h"
+#include "dbinc/log.h"
+
+/*
+ * __dbreg_register_recover --
+ * Recovery function for register.
+ *
+ * PUBLIC: int __dbreg_register_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__dbreg_register_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __dbreg_register_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__dbreg_register_print);
+ REC_INTRO(__dbreg_register_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/libdb/dist/template/rec_fileops b/libdb/dist/template/rec_fileops
new file mode 100644
index 0000000..c148783
--- /dev/null
+++ b/libdb/dist/template/rec_fileops
@@ -0,0 +1,323 @@
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/__fop.h"
+#include "dbinc/log.h"
+
+/*
+ * __fop_create_recover --
+ * Recovery function for create.
+ *
+ * PUBLIC: int __fop_create_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_create_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_create_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__fop_create_print);
+ REC_INTRO(__fop_create_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __fop_remove_recover --
+ * Recovery function for remove.
+ *
+ * PUBLIC: int __fop_remove_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_remove_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_remove_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__fop_remove_print);
+ REC_INTRO(__fop_remove_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __fop_write_recover --
+ * Recovery function for write.
+ *
+ * PUBLIC: int __fop_write_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_write_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_write_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__fop_write_print);
+ REC_INTRO(__fop_write_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __fop_rename_recover --
+ * Recovery function for rename.
+ *
+ * PUBLIC: int __fop_rename_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_rename_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_rename_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__fop_rename_print);
+ REC_INTRO(__fop_rename_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __fop_file_remove_recover --
+ * Recovery function for file_remove.
+ *
+ * PUBLIC: int __fop_file_remove_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_file_remove_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_file_remove_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__fop_file_remove_print);
+ REC_INTRO(__fop_file_remove_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/libdb/dist/template/rec_hash b/libdb/dist/template/rec_hash
new file mode 100644
index 0000000..b5160d5
--- /dev/null
+++ b/libdb/dist/template/rec_hash
@@ -0,0 +1,571 @@
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/__ham.h"
+#include "dbinc/log.h"
+
+/*
+ * __ham_insdel_recover --
+ * Recovery function for insdel.
+ *
+ * PUBLIC: int __ham_insdel_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_insdel_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_insdel_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_insdel_print);
+ REC_INTRO(__ham_insdel_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_newpage_recover --
+ * Recovery function for newpage.
+ *
+ * PUBLIC: int __ham_newpage_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_newpage_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_newpage_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_newpage_print);
+ REC_INTRO(__ham_newpage_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_splitdata_recover --
+ * Recovery function for splitdata.
+ *
+ * PUBLIC: int __ham_splitdata_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_splitdata_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_splitdata_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_splitdata_print);
+ REC_INTRO(__ham_splitdata_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_replace_recover --
+ * Recovery function for replace.
+ *
+ * PUBLIC: int __ham_replace_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_replace_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_replace_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_replace_print);
+ REC_INTRO(__ham_replace_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_copypage_recover --
+ * Recovery function for copypage.
+ *
+ * PUBLIC: int __ham_copypage_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_copypage_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_copypage_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_copypage_print);
+ REC_INTRO(__ham_copypage_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_metagroup_recover --
+ * Recovery function for metagroup.
+ *
+ * PUBLIC: int __ham_metagroup_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_metagroup_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_metagroup_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_metagroup_print);
+ REC_INTRO(__ham_metagroup_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_groupalloc_recover --
+ * Recovery function for groupalloc.
+ *
+ * PUBLIC: int __ham_groupalloc_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_groupalloc_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_groupalloc_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_groupalloc_print);
+ REC_INTRO(__ham_groupalloc_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_curadj_recover --
+ * Recovery function for curadj.
+ *
+ * PUBLIC: int __ham_curadj_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_curadj_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_curadj_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_curadj_print);
+ REC_INTRO(__ham_curadj_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_chgpg_recover --
+ * Recovery function for chgpg.
+ *
+ * PUBLIC: int __ham_chgpg_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_chgpg_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_chgpg_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__ham_chgpg_print);
+ REC_INTRO(__ham_chgpg_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/libdb/dist/template/rec_qam b/libdb/dist/template/rec_qam
new file mode 100644
index 0000000..ffe0d63
--- /dev/null
+++ b/libdb/dist/template/rec_qam
@@ -0,0 +1,323 @@
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/__qam.h"
+#include "dbinc/log.h"
+
+/*
+ * __qam_incfirst_recover --
+ * Recovery function for incfirst.
+ *
+ * PUBLIC: int __qam_incfirst_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_incfirst_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_incfirst_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__qam_incfirst_print);
+ REC_INTRO(__qam_incfirst_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_mvptr_recover --
+ * Recovery function for mvptr.
+ *
+ * PUBLIC: int __qam_mvptr_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_mvptr_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_mvptr_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__qam_mvptr_print);
+ REC_INTRO(__qam_mvptr_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_del_recover --
+ * Recovery function for del.
+ *
+ * PUBLIC: int __qam_del_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_del_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_del_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__qam_del_print);
+ REC_INTRO(__qam_del_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_add_recover --
+ * Recovery function for add.
+ *
+ * PUBLIC: int __qam_add_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_add_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_add_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__qam_add_print);
+ REC_INTRO(__qam_add_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_delext_recover --
+ * Recovery function for delext.
+ *
+ * PUBLIC: int __qam_delext_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_delext_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_delext_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__qam_delext_print);
+ REC_INTRO(__qam_delext_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/libdb/dist/template/rec_txn b/libdb/dist/template/rec_txn
new file mode 100644
index 0000000..e3ee4c8
--- /dev/null
+++ b/libdb/dist/template/rec_txn
@@ -0,0 +1,323 @@
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/__txn.h"
+#include "dbinc/log.h"
+
+/*
+ * __txn_regop_recover --
+ * Recovery function for regop.
+ *
+ * PUBLIC: int __txn_regop_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_regop_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_regop_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__txn_regop_print);
+ REC_INTRO(__txn_regop_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __txn_ckp_recover --
+ * Recovery function for ckp.
+ *
+ * PUBLIC: int __txn_ckp_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_ckp_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_ckp_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__txn_ckp_print);
+ REC_INTRO(__txn_ckp_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __txn_child_recover --
+ * Recovery function for child.
+ *
+ * PUBLIC: int __txn_child_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_child_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_child_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__txn_child_print);
+ REC_INTRO(__txn_child_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __txn_xa_regop_recover --
+ * Recovery function for xa_regop.
+ *
+ * PUBLIC: int __txn_xa_regop_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_xa_regop_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_xa_regop_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__txn_xa_regop_print);
+ REC_INTRO(__txn_xa_regop_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __txn_recycle_recover --
+ * Recovery function for recycle.
+ *
+ * PUBLIC: int __txn_recycle_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_recycle_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_recycle_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__txn_recycle_print);
+ REC_INTRO(__txn_recycle_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/libdb/dist/vx_2.0/BerkeleyDB.wpj b/libdb/dist/vx_2.0/BerkeleyDB.wpj
new file mode 100644
index 0000000..78684d9
--- /dev/null
+++ b/libdb/dist/vx_2.0/BerkeleyDB.wpj
@@ -0,0 +1,251 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUM_debug_BUILDRULE
+BerkeleyDB.out
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB_sim.a
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -O0 \
+ -I$(PRJ_DIR) \
+ -I$(PRJ_DIR)/.. \
+ -DDIAGNOSTIC \
+ -DDEBUG
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_BUILDRULE
+BerkeleyDB.out
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB_sim.a
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_CFLAGS
+-mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -O2 \
+ -I$(PRJ_DIR) \
+ -I$(PRJ_DIR)/..
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_BerkeleyDB.out
+
+<END>
+
+<BEGIN> BUILD_RULE_BerkeleyDB_sim.out
+
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM_debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM_release PENTIUM_debug
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
diff --git a/libdb/dist/vx_2.0/wpj.in b/libdb/dist/vx_2.0/wpj.in
new file mode 100644
index 0000000..2b942bb
--- /dev/null
+++ b/libdb/dist/vx_2.0/wpj.in
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+__DB_APPLICATION_NAME__.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/__DB_APPLICATION_NAME__.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE___DB_APPLICATION_NAME__.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE___DB_APPLICATION_NAME__.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE___DB_APPLICATION_NAME__.c_dependencies
+
+<END>
+
+<BEGIN> FILE___DB_APPLICATION_NAME__.c_objects
+__DB_APPLICATION_NAME__.o
+<END>
+
+<BEGIN> FILE___DB_APPLICATION_NAME__.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/__DB_APPLICATION_NAME__.c
+<END>
+
+<BEGIN> userComments
+__DB_APPLICATION_NAME__
+<END>
diff --git a/libdb/dist/vx_3.1/Makefile.custom b/libdb/dist/vx_3.1/Makefile.custom
new file mode 100644
index 0000000..ca781f7
--- /dev/null
+++ b/libdb/dist/vx_3.1/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/libdb/dist/vx_3.1/cdf.1 b/libdb/dist/vx_3.1/cdf.1
new file mode 100644
index 0000000..17db06f
--- /dev/null
+++ b/libdb/dist/vx_3.1/cdf.1
@@ -0,0 +1,12 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_BERKELEYDB {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
diff --git a/libdb/dist/vx_3.1/cdf.2 b/libdb/dist/vx_3.1/cdf.2
new file mode 100644
index 0000000..76f123a
--- /dev/null
+++ b/libdb/dist/vx_3.1/cdf.2
@@ -0,0 +1,9 @@
+ NAME BerkeleyDB
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
diff --git a/libdb/dist/vx_3.1/cdf.3 b/libdb/dist/vx_3.1/cdf.3
new file mode 100644
index 0000000..a3146ce
--- /dev/null
+++ b/libdb/dist/vx_3.1/cdf.3
@@ -0,0 +1,2 @@
+/* Parameter information */
+
diff --git a/libdb/dist/vx_3.1/component.cdf b/libdb/dist/vx_3.1/component.cdf
new file mode 100644
index 0000000..91edaa8
--- /dev/null
+++ b/libdb/dist/vx_3.1/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE___DB_CAPAPPL_NAME__ {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES __DB_APPLICATION_NAME__.o
+ NAME __DB_APPLICATION_NAME__
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module __DB_APPLICATION_NAME__.o {
+
+ NAME __DB_APPLICATION_NAME__.o
+ SRC_PATH_NAME $PRJ_DIR/../__DB_APPLICATION_NAME__.c
+}
+
+/* Parameter information */
+
diff --git a/libdb/dist/vx_3.1/component.wpj b/libdb/dist/vx_3.1/component.wpj
new file mode 100644
index 0000000..01c51c1
--- /dev/null
+++ b/libdb/dist/vx_3.1/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_objects
+__DB_APPLICATION_NAME__.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_objects
+__DB_APPLICATION_NAME__.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_objects
+__DB_APPLICATION_NAME__.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_objects
+__DB_APPLICATION_NAME__.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/libdb/dist/vx_3.1/wpj.1 b/libdb/dist/vx_3.1/wpj.1
new file mode 100644
index 0000000..414b4e8
--- /dev/null
+++ b/libdb/dist/vx_3.1/wpj.1
@@ -0,0 +1,22 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.0
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+0
+<END>
+
diff --git a/libdb/dist/vx_3.1/wpj.2 b/libdb/dist/vx_3.1/wpj.2
new file mode 100644
index 0000000..0294f76
--- /dev/null
+++ b/libdb/dist/vx_3.1/wpj.2
@@ -0,0 +1,130 @@
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2 \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -DDEBUG \
+ -DDIAGNOSTIC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
diff --git a/libdb/dist/vx_3.1/wpj.3 b/libdb/dist/vx_3.1/wpj.3
new file mode 100644
index 0000000..f06e625
--- /dev/null
+++ b/libdb/dist/vx_3.1/wpj.3
@@ -0,0 +1,128 @@
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2 \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../..
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
diff --git a/libdb/dist/vx_3.1/wpj.4 b/libdb/dist/vx_3.1/wpj.4
new file mode 100644
index 0000000..84de6eb
--- /dev/null
+++ b/libdb/dist/vx_3.1/wpj.4
@@ -0,0 +1,135 @@
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -DDEBUG \
+ -DDIAGNOSTIC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu.debug PENTIUM2gnu.debug PENTIUM2gnu.release
+<END>
+
+<BEGIN> COMPONENT_COM_TYPE
+
+<END>
+
+<BEGIN> PROJECT_FILES
diff --git a/libdb/dist/vx_3.1/wpj.5 b/libdb/dist/vx_3.1/wpj.5
new file mode 100644
index 0000000..f4056e7
--- /dev/null
+++ b/libdb/dist/vx_3.1/wpj.5
@@ -0,0 +1,22 @@
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUMgnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUMgnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/libdb/dist/vx_buildcd b/libdb/dist/vx_buildcd
new file mode 100755
index 0000000..2400e69
--- /dev/null
+++ b/libdb/dist/vx_buildcd
@@ -0,0 +1,119 @@
+#!/bin/sh
+# $Id$
+#
+# Build the Setup SDK CD image on the VxWorks host machine.
+
+. ./RELEASE
+
+B=`pwd`
+B=$B/..
+D=$B/dist/vx_setup
+C=$D/db.CD
+Q=/export/home/sue/SetupSDK
+S=$Q/resource/mfg/setup
+W=sun4-solaris2
+
+symdoc=$D/docs/BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH
+symdb=$D/windlink/sleepycat/BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH
+rm -rf $D/docs $D/windlink
+mkdir $D/docs $D/windlink $D/windlink/sleepycat
+ln -s $B/docs $symdoc
+ln -s $B $symdb
+
+s=/tmp/__db_a
+t=/tmp/__db_b
+
+#
+# Remove the old CD directory if it is there.
+if test -d $C; then
+ echo "$C cannot exist."
+ echo "As root, execute 'rm -rf $C'"
+ echo "and then rerun the script"
+ exit 1
+fi
+
+#
+# Check for absolute pathnames in the project files.
+# That is bad, but Tornado insists on putting them in
+# whenever you add new files.
+#
+rm -f $t
+f=`find $B/build_vxworks -name \*.wpj -print`
+for i in $f; do
+ grep -l -- "$B" $i >> $t
+done
+if test -s $t; then
+ echo "The following files contain absolute pathnames."
+ echo "They must be fixed before building the CD image:"
+ cat $t
+ exit 1
+fi
+
+#
+# NOTE: We reuse the same sed script over several files.
+#
+cat <<ENDOFSEDTEXT > $s
+s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/g
+s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/g
+s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/g
+s#@DB_SETUP_DIR@#$D#g
+ENDOFSEDTEXT
+
+f=$D/setup.pool
+(sed -f $s $D/vx_setup.in) > $t
+ (echo "Building $f" && rm -f $f && cp $t $f)
+
+f=$D/README.TXT
+(sed -f $s $D/README.in) > $t
+ (echo "Building $f" && rm -f $f && cp $t $f)
+
+f=$D/CONFIG.TCL
+(sed -f $s $D/CONFIG.in) > $t
+ (echo "Building $f" && rm -f $f && cp $t $f)
+
+f=$D/filelist.demo
+(sed -f $s $D/vx_demofile.in) > $t
+ (echo "Building $f" && rm -f $f && cp $t $f)
+
+# Copy the Sleepycat specific files into the SetupSDK area.
+(cd $D && cp README.TXT $S)
+(cd $D && cp LICENSE.TXT $S)
+(cd $D && cp CONFIG.TCL $S/RESOURCE/TCL)
+(cd $D && cp SETUP.BMP $S/RESOURCE/BITMAPS)
+
+#
+# NOTE: The contents of LIB must be on one, long, single line.
+# Even preserving it with a \ doesn't work for htmlBook.
+#
+f=../docs/LIB
+(echo "Building $f" && rm -f $f)
+cat <<ENDOFLIBTEXT >> $f
+{BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH} {Sleepycat Software Berkeley DB} {<b>BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH</b>} {<b><a href="./index.html">BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH</a></b>} {Sleepycat BerkeleyDB} {} {} {}
+ENDOFLIBTEXT
+
+#
+# Start generating the file list.
+f=$D/filelist.all
+
+#
+# Just put everything into the image. But we only want to find regular
+# files; we cannot have all the directories listed too.
+#
+# NOTE: This find is overly aggressive in getting files, particularly
+# for the 'windlink/sleepycat' files. We actually end up with 3 sets of the
+# documentation, the "real" ones in 'docs/BerkeleyDB*', the set found
+# via 'windlink/sleepycat/Berk*/docs' and the one found via our symlink in
+# 'windlink/sleepycat/Berk*/dist/vx_setup/docs/Berk*'.
+#
+# However, we waste a little disk space so that the expression below
+# is trivial and we don't have to maintain it as new files/directories
+# are added to DB.
+#
+(cd $D && find docs/BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH/ -follow -name \* -type f -print) > $t
+(cd $D && find windlink/sleepycat/BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH/ -follow -name docs -prune -o -type f -print) >> $t
+(echo "Building $f" && rm -f $f && cp $t $f)
+#
+# Finally build the CD image!
+#
+env PATH=$Q/$W/bin:$PATH QMS_BASE=$Q WIND_HOST_TYPE=$W \
+pool mfg -d $C -v -nokey BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR < $D/setup.pool
diff --git a/libdb/dist/vx_config.in b/libdb/dist/vx_config.in
new file mode 100644
index 0000000..43fc8eb
--- /dev/null
+++ b/libdb/dist/vx_config.in
@@ -0,0 +1,381 @@
+/* !!!
+ * The CONFIG_TEST option may be added using the Tornado project build.
+ * DO NOT modify it here.
+ */
+/* Define to 1 if you want to build a version for running the test suite. */
+/* #undef CONFIG_TEST */
+
+/* !!!
+ * The DEBUG option may be added using the Tornado project build.
+ * DO NOT modify it here.
+ */
+/* Define to 1 if you want a debugging version. */
+/* #undef DEBUG */
+
+/* Define to 1 if you want a version that logs read operations. */
+/* #undef DEBUG_ROP */
+
+/* Define to 1 if you want a version that logs write operations. */
+/* #undef DEBUG_WOP */
+
+/* !!!
+ * The DIAGNOSTIC option may be added using the Tornado project build.
+ * DO NOT modify it here.
+ */
+/* Define to 1 if you want a version with run-time diagnostic checking. */
+/* #undef DIAGNOSTIC */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+#define HAVE_CLOCK_GETTIME 1
+
+/* Define to 1 if Berkeley DB release includes strong cryptography. */
+/* #undef HAVE_CRYPTO */
+
+/* Define to 1 if you have the `directio' function. */
+/* #undef HAVE_DIRECTIO */
+
+/* Define to 1 if you have the <dirent.h> header file, and it defines `DIR'.
+ */
+#define HAVE_DIRENT_H 1
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+/* #undef HAVE_DLFCN_H */
+
+/* Define to 1 if you have EXIT_SUCCESS/EXIT_FAILURE #defines. */
+#define HAVE_EXIT_SUCCESS 1
+
+/* Define to 1 if fcntl/F_SETFD denies child access to file descriptors. */
+/* #undef HAVE_FCNTL_F_SETFD */
+
+/* Define to 1 if allocated filesystem blocks are not zeroed. */
+#define HAVE_FILESYSTEM_NOTZERO 1
+
+/* Define to 1 if you have the `getcwd' function. */
+#define HAVE_GETCWD 1
+
+/* Define to 1 if you have the `getopt' function. */
+/* #undef HAVE_GETOPT */
+
+/* Define to 1 if you have the `gettimeofday' function. */
+/* #undef HAVE_GETTIMEOFDAY */
+
+/* Define to 1 if you have the `getuid' function. */
+/* #undef HAVE_GETUID */
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+/* #undef HAVE_INTTYPES_H */
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+/* #undef HAVE_LIBNSL */
+
+/* Define to 1 if you have the `memcmp' function. */
+#define HAVE_MEMCMP 1
+
+/* Define to 1 if you have the `memcpy' function. */
+#define HAVE_MEMCPY 1
+
+/* Define to 1 if you have the `memmove' function. */
+#define HAVE_MEMMOVE 1
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the `mlock' function. */
+/* #undef HAVE_MLOCK */
+
+/* Define to 1 if you have the `mmap' function. */
+/* #undef HAVE_MMAP */
+
+/* Define to 1 if you have the `munlock' function. */
+/* #undef HAVE_MUNLOCK */
+
+/* Define to 1 if you have the `munmap' function. */
+/* #undef HAVE_MUNMAP */
+
+/* Define to 1 to use the GCC compiler and 68K assembly language mutexes. */
+/* #undef HAVE_MUTEX_68K_GCC_ASSEMBLY */
+
+/* Define to 1 to use the AIX _check_lock mutexes. */
+/* #undef HAVE_MUTEX_AIX_CHECK_LOCK */
+
+/* Define to 1 to use the GCC compiler and Alpha assembly language mutexes. */
+/* #undef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and ARM assembly language mutexes. */
+/* #undef HAVE_MUTEX_ARM_GCC_ASSEMBLY */
+
+/* Define to 1 to use the UNIX fcntl system call mutexes. */
+/* #undef HAVE_MUTEX_FCNTL */
+
+/* Define to 1 to use the GCC compiler and PaRisc assembly language mutexes.
+ */
+/* #undef HAVE_MUTEX_HPPA_GCC_ASSEMBLY */
+
+/* Define to 1 to use the msem_XXX mutexes on HP-UX. */
+/* #undef HAVE_MUTEX_HPPA_MSEM_INIT */
+
+/* Define to 1 to use the GCC compiler and IA64 assembly language mutexes. */
+/* #undef HAVE_MUTEX_IA64_GCC_ASSEMBLY */
+
+/* Define to 1 to use the msem_XXX mutexes on systems other than HP-UX. */
+/* #undef HAVE_MUTEX_MSEM_INIT */
+
+/* Define to 1 to use the GCC compiler and Apple PowerPC assembly language. */
+/* #undef HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and generic PowerPC assembly language.
+ */
+/* #undef HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY */
+
+/* Define to 1 to use POSIX 1003.1 pthread_XXX mutexes. */
+/* #undef HAVE_MUTEX_PTHREADS */
+
+/* Define to 1 to use Reliant UNIX initspin mutexes. */
+/* #undef HAVE_MUTEX_RELIANTUNIX_INITSPIN */
+
+/* Define to 1 to use the GCC compiler and S/390 assembly language mutexes. */
+/* #undef HAVE_MUTEX_S390_GCC_ASSEMBLY */
+
+/* Define to 1 to use the SCO compiler and x86 assembly language mutexes. */
+/* #undef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY */
+
+/* Define to 1 to use the obsolete POSIX 1003.1 sema_XXX mutexes. */
+/* #undef HAVE_MUTEX_SEMA_INIT */
+
+/* Define to 1 to use the SGI XXX_lock mutexes. */
+/* #undef HAVE_MUTEX_SGI_INIT_LOCK */
+
+/* Define to 1 to use the Solaris _lock_XXX mutexes. */
+/* #undef HAVE_MUTEX_SOLARIS_LOCK_TRY */
+
+/* Define to 1 to use the Solaris lwp threads mutexes. */
+/* #undef HAVE_MUTEX_SOLARIS_LWP */
+
+/* Define to 1 to use the GCC compiler and Sparc assembly language mutexes. */
+/* #undef HAVE_MUTEX_SPARC_GCC_ASSEMBLY */
+
+/* Define to 1 if mutexes hold system resources. */
+#define HAVE_MUTEX_SYSTEM_RESOURCES 1
+
+/* Define to 1 if fast mutexes are available. */
+#define HAVE_MUTEX_THREADS 1
+
+/* Define to 1 to configure mutexes intra-process only. */
+/* #undef HAVE_MUTEX_THREAD_ONLY */
+
+/* Define to 1 to use the UNIX International mutexes. */
+/* #undef HAVE_MUTEX_UI_THREADS */
+
+/* Define to 1 to use the UTS compiler and assembly language mutexes. */
+/* #undef HAVE_MUTEX_UTS_CC_ASSEMBLY */
+
+/* Define to 1 to use VMS mutexes. */
+/* #undef HAVE_MUTEX_VMS */
+
+/* Define to 1 to use VxWorks mutexes. */
+#define HAVE_MUTEX_VXWORKS 1
+
+/* Define to 1 to use Windows mutexes. */
+/* #undef HAVE_MUTEX_WIN32 */
+
+/* Define to 1 to use the GCC compiler and x86 assembly language mutexes. */
+/* #undef HAVE_MUTEX_X86_GCC_ASSEMBLY */
+
+/* Define to 1 if you have the <ndir.h> header file, and it defines `DIR'. */
+/* #undef HAVE_NDIR_H */
+
+/* Define to 1 if you have the O_DIRECT flag. */
+/* #undef HAVE_O_DIRECT */
+
+/* Define to 1 if you have the `pread' function. */
+/* #undef HAVE_PREAD */
+
+/* Define to 1 if you have the `pstat_getdynamic' function. */
+/* #undef HAVE_PSTAT_GETDYNAMIC */
+
+/* Define to 1 if you have the `pwrite' function. */
+/* #undef HAVE_PWRITE */
+
+/* Define to 1 if building on QNX. */
+/* #undef HAVE_QNX */
+
+/* Define to 1 if you have the `qsort' function. */
+#define HAVE_QSORT 1
+
+/* Define to 1 if you have the `raise' function. */
+#define HAVE_RAISE 1
+
+/* Define to 1 if building RPC client/server. */
+/* #undef HAVE_RPC */
+
+/* Define to 1 if you have the `sched_yield' function. */
+#define HAVE_SCHED_YIELD 1
+
+/* Define to 1 if you have the `select' function. */
+#define HAVE_SELECT 1
+
+/* Define to 1 if you have the `shmget' function. */
+/* #undef HAVE_SHMGET */
+
+/* Define to 1 if you have the `snprintf' function. */
+/* #undef HAVE_SNPRINTF */
+
+/* Define to 1 if you have the <stdint.h> header file. */
+/* #undef HAVE_STDINT_H */
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the `strcasecmp' function. */
+/* #undef HAVE_STRCASECMP */
+
+/* Define to 1 if you have the `strdup' function. */
+/* #undef HAVE_STRDUP */
+
+/* Define to 1 if you have the `strerror' function. */
+#define HAVE_STRERROR 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strtoul' function. */
+#define HAVE_STRTOUL 1
+
+/* Define to 1 if `st_blksize' is member of `struct stat'. */
+#define HAVE_STRUCT_STAT_ST_BLKSIZE 1
+
+/* Define to 1 if you have the `sysconf' function. */
+/* #undef HAVE_SYSCONF */
+
+/* Define to 1 if you have the <sys/dir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_DIR_H */
+
+/* Define to 1 if you have the <sys/ndir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_NDIR_H */
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+/* #undef HAVE_SYS_SELECT_H */
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+/* #undef HAVE_SYS_STAT_H */
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+/* #undef HAVE_SYS_TIME_H */
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+/* #undef HAVE_SYS_TYPES_H */
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define to 1 if unlink of file with open file descriptors will fail. */
+#define HAVE_UNLINK_WITH_OPEN_FAILURE 1
+
+/* Define to 1 if you have the `vsnprintf' function. */
+/* #undef HAVE_VSNPRINTF */
+
+/* Define to 1 if building VxWorks. */
+#define HAVE_VXWORKS 1
+
+/* Define to 1 if you have the `yield' function. */
+/* #undef HAVE_YIELD */
+
+/* Define to 1 if you have the `_fstati64' function. */
+/* #undef HAVE__FSTATI64 */
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "support@sleepycat.com"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "Berkeley DB"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "Berkeley DB __EDIT_DB_VERSION__"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "db-__EDIT_DB_VERSION__"
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "__EDIT_DB_VERSION__"
+
+/* Define to 1 if the `S_IS*' macros in <sys/stat.h> do not work properly. */
+/* #undef STAT_MACROS_BROKEN */
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+/* #undef TIME_WITH_SYS_TIME */
+
+/* Define to 1 to mask harmless unitialized memory read/writes. */
+/* #undef UMRW */
+
+/* Number of bits in a file offset, on hosts where this is settable. */
+/* #undef _FILE_OFFSET_BITS */
+
+/* Define for large files, on AIX-style hosts. */
+/* #undef _LARGE_FILES */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef const */
+
+/*
+ * Exit success/failure macros.
+ */
+#ifndef HAVE_EXIT_SUCCESS
+#define EXIT_FAILURE 1
+#define EXIT_SUCCESS 0
+#endif
+
+/*
+ * Don't step on the namespace. Other libraries may have their own
+ * implementations of these functions, we don't want to use their
+ * implementations or force them to use ours based on the load order.
+ */
+#ifndef HAVE_GETCWD
+#define getcwd __db_Cgetcwd
+#endif
+#ifndef HAVE_GETOPT
+#define getopt __db_Cgetopt
+#define optarg __db_Coptarg
+#define opterr __db_Copterr
+#define optind __db_Coptind
+#define optopt __db_Coptopt
+#endif
+#ifndef HAVE_MEMCMP
+#define memcmp __db_Cmemcmp
+#endif
+#ifndef HAVE_MEMCPY
+#define memcpy __db_Cmemcpy
+#endif
+#ifndef HAVE_MEMMOVE
+#define memmove __db_Cmemmove
+#endif
+#ifndef HAVE_RAISE
+#define raise __db_Craise
+#endif
+#ifndef HAVE_SNPRINTF
+#define snprintf __db_Csnprintf
+#endif
+#ifndef HAVE_STRCASECMP
+#define strcasecmp __db_Cstrcasecmp
+#define strncasecmp __db_Cstrncasecmp
+#endif
+#ifndef HAVE_STRERROR
+#define strerror __db_Cstrerror
+#endif
+#ifndef HAVE_VSNPRINTF
+#define vsnprintf __db_Cvsnprintf
+#endif
+
+/*
+ * !!!
+ * The following is not part of the automatic configuration setup, but
+ * provides the information necessary to build Berkeley DB on VxWorks.
+ */
+#include "vxWorks.h"
diff --git a/libdb/dist/vx_setup/CONFIG.in b/libdb/dist/vx_setup/CONFIG.in
new file mode 100644
index 0000000..6ccceee
--- /dev/null
+++ b/libdb/dist/vx_setup/CONFIG.in
@@ -0,0 +1,10 @@
+#
+# Install configuration file.
+#
+# Note: This file may be modified during the pool manufacturing process to
+# add additional configuration statements. This file is sourced by
+# INSTW32.TCL.
+#
+
+cdromDescSet "Berkeley DB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@"
+
diff --git a/libdb/dist/vx_setup/LICENSE.TXT b/libdb/dist/vx_setup/LICENSE.TXT
new file mode 100644
index 0000000..7814c67
--- /dev/null
+++ b/libdb/dist/vx_setup/LICENSE.TXT
@@ -0,0 +1,3 @@
+Copyright (c) 1996-2002
+ Sleepycat Software. All rights reserved.
+See the file LICENSE for redistribution information.
diff --git a/libdb/dist/vx_setup/MESSAGES.TCL b/libdb/dist/vx_setup/MESSAGES.TCL
new file mode 100644
index 0000000..718a67f
--- /dev/null
+++ b/libdb/dist/vx_setup/MESSAGES.TCL
@@ -0,0 +1,651 @@
+# MESSAGES.TCL - All setup strings.
+
+# modification history
+# --------------------
+# 03q,20apr99,bjl added release notes message for backward compatibility
+# page.
+# 03p,12apr99,wmd Add word about simulator in message about the drivers
+# object product.
+# 03o,03mar99,tcy Adjust setup directory size based on platform (fix for
+# SPR 25228)
+# 03n,24feb99,tcy modified DLL update messages
+# 03m,22feb99,tcy modified to align messages
+# 03l,17feb99,tcy modified message in the finish page for program group
+# installation
+# 03k,11feb99,tcy added messages for backward compatibility page
+# 03j,25jan99,tcy added messages from INSTW32.TCL
+# 03i,25jan99,wmd Reword the message for 5010_DRIVERS_INFO.
+# 03h,09dec98,bjl added messages about manufacturers updating patches.
+# 03g,01dec98,wmd Fix typos.
+# 03f,23nov98,tcy warn user to disable virus protection on Welcome screen
+# 03e,19nov98,wmd fixed minor nits in wording.
+# 03d,19nov98,bjl added web site locations for patchinfo.
+# 03c,18nov98,bjl added formatted patch messages for patchinfo file.
+# 03b,12nov98,tcy added message for not saving installation key
+# 03a,10nov98,tcy added warning message for space in destination directory
+# removed message for checking temporary disk space
+# 02z,27oct98,bjl added recommended patch messages, modified required msg.
+# 02y,26oct98,tcy added message for checking temporary disk space
+# 02x,22oct98,wmd fix messages for clarity.
+# 02w,21oct98,wmd fix message for drv/obj.
+# 02v,20oct98,tcy added message for updating system and changed dcom message
+# 02u,20oct98,bjl added tornado registry name entry message.
+# 02t,19oct98,bjl added tornado registry description message.
+# 02s,16oct98,wmd add new message for driver product warning.
+# 02r,16oct98,wmd fixed README.TXT description.
+# 02q,12oct98,tcy removed extraneous "the" from messages
+# 02p,06oct98,tcy added CD description to Welcome page
+# 02o,29sep98,bjl added required patches message 5000_PATCHES_TEXT.
+# 02n,29sep98,wmd add text for readme page
+# 02m,29sep98,tcy refined DLL registration page text
+# 02l,29sep98,tcy changed message for DCOM
+# 02k,26sep98,tcy added messages for DLL and DCOM pages
+# 02j,24sep98,tcy removed "following" from 1080_WARN_4 message.
+# 02i,17sep98,tcy added comment on size of SETUP files to 1140_COMP_SELECT.
+# 02h,17sep98,wmd reword message 1080_WARN_4.
+# 02g,14sep98,tcy changed 1210_FINISH and 1550_USAGE messages
+# 02f,08sep98,tcy warn user library update may take several minutes
+# 02e,01sep98,wmd reword message for installing over tree.
+# added new messages for license agreement pages.
+# 02d,20aug98,wmd added message for license agreeement.
+# 02c,18aug98,tcy added message for zip-file dialog box
+# 02d,04aug98,wmd added newer/older duplicate file warnings.
+# 02c,24jul98,tcy added system check messages
+# 02b,16jul98,wmd add new messages for T-2.
+# 02a,22jul98,tcy moved license messages to LICW32.TCL;
+# removed portMapper messages
+# 01n,09feb98,pdn updated string 1080_WARN_4
+# 01m,08apr97,pdn added new string for remote icon installing
+# fixed spr#8334
+# 01l,08mar97,tcy fixed language in string id 3340
+# 01k,07mar97,tcy added string id 3340
+# 01j,10feb97,pdn added more license messages.
+# 01i,09feb97,pdn implemented variable argument list for strTableGet(),
+# clean up.
+# 01h,17jan97,jmo fixed language in strings
+# 01g,12dec96,tcy merged in TEXT-only strings
+# 01f,12dec96,pdn added 1080_WARN_4 string warning that CD-ROM
+# revision is older than expected.
+# 01e,27nov96,sj added string for warning against installing in
+# the root of windows drive.
+# 01d,18nov96,tcy added strings for text-based installation script
+# 01c,14nov96,pdn substituted function for some global variables
+# 01b,14nov96,sj added strings from Windows installation script
+# 01a,11nov96,pdn written
+
+proc strTableGet {strId args} {
+ global strTable
+ global setupVals
+ global current_file
+
+ if [regexp {^format.*$} $strTable($strId) junk] {
+ return [eval $strTable($strId)]
+ } {
+ return $strTable($strId)
+ }
+}
+
+set strTable(1000_WELCOME_CD) \
+ "format %s \"[cdNameGet description]\""
+
+set strTable(1000_WELCOME1) \
+ "format %s \"Welcome to the SETUP program. This program will\
+ install \[cdromDescGet\] on your computer.\""
+
+set strTable(1010_WELCOME2) \
+ "It is strongly recommended that you exit all programs and disable virus\
+ protection before running this SETUP program."
+
+set strTable(1020_WELCOME3) \
+ "At any time, you can quit the SETUP program by clicking the <Cancel>\
+ button. You also can go back to previous dialog boxes by clicking the\
+ <Back> button. To accept the current settings for a dialog box and go on\
+ with the installation process, click the <Next> button."
+
+set strTable(3020_WELCOME3) \
+ "format %s \"At any prompt, you can cancel installation \[cdromDescGet\]\
+ by typing \'exit\'. You can also go to the previous question\
+ by typing \'-\'. To accept current settings and go on with\
+ the installation process, press <Return>.\""
+
+set strTable(1030_WELCOME4) \
+ "WARNING: This program is protected by copyright law and international\
+ treaties."
+
+set strTable(1040_WELCOME5) \
+ "Unauthorized reproduction or distribution of this program, or any portion\
+ of it, may result in severe civil and criminal penalties, and will be\
+ prosecuted to the maximum extent possible under law."
+
+set strTable(1050_ROOT_WARN) \
+ "format %s \"Installing \[cdromDescGet\] as \[setupId effective user\] is not\
+ recommended. We suggest that you logoff and logon as a normal\
+ user before running this program.\
+ \n\nClick Next to continue with SETUP anyway.\""
+
+set strTable(3050_ROOT_WARN) \
+ "format %s \"Installing \[cdromDescGet\] as \[setupId effective user\]\
+ is not recommended. We suggest that you logoff and \
+ logon as a normal user before running this program.\
+ \n\nPress <Return> to continue with SETUP anyway.\""
+
+set strTable(1051_ROOT_WARN) \
+ "format %s \"Installing \[cdromDescGet\] without System Administrator\
+ privileges is not recommended. Under your present privileges,\
+ SETUP will not offer certain installation options, such as \
+ the installation of some services, etc. Also, the software\
+ will be installed as a personal copy and will not be visible\
+ to other users on this machine.\
+ \n\nTo install \[cdromDescGet\] with access to all its\
+ installation features and options, we suggest that you exit\
+ the installation now and rerun it later with System\
+ Administrator\'s privileges.\n\nClick <Next> to continue with\
+ SETUP anyway.\""
+
+set strTable(1060_REGISTRATION) \
+ "Below, type your name, the name of your company."
+
+set strTable(1070_WARN_1) \
+ "The installation key you entered is invalid. Please enter a valid\
+ installation key."
+
+set strTable(1071_WARN_1) \
+ "Please enter the requested information."
+
+set strTable(1080_WARN_2) \
+ "You entered a key that was not created for this CD-ROM. Please verify\
+ that you are using the appropriate key. If this problem persists, contact\
+ Wind River Systems Sales department for help."
+
+set strTable(1080_WARN_3) \
+ "The installation key you entered is meant for other vendor's CD-ROM.\
+ Please contact the vendor who issued the CD-ROM for a proper key."
+
+set strTable(1085_WARN_4) \
+ "This CD-ROM does not require an installation key. Click the \"Next\"\
+ button to continue the installation."
+
+set strTable(1090_WARN_3) \
+ "format %s \"Can\'t initiate SETUP: \[lindex \$args 0\]. Please correct\
+ the problem then run SETUP again.\""
+
+set strTable(1095_WARN_NO_TCPIP) \
+ "SETUP has detected that your system does not have TCP-IP installed.\
+ To correct the problem, please contact your administrator and then\
+ run SETUP again.\nAborting setup."
+
+set strTable(1097_WARN_NO_LONGFILENAME_SUP) \
+ "SETUP has detected that your system does not have long filename\
+ support. To correct the problem, please contact your administrator\
+ and then run SETUP again.\nAborting setup."
+
+set strTable(1105_FULL_INSTALL) \
+ "Installs the Tornado products, tools, compilers, and other optional\
+ components that you may have purchased."
+
+set strTable(1107_PROGRAM_GROUP) \
+"Installs only the Tornado program group and tools icons for access to\
+ Tornado tools installed on a remote server."
+
+set strTable(1100_DEST_DIR) \
+ "format %s \"Please type the name of the directory where you want SETUP to\
+ install \[cdromDescGet\].\
+ \n\nClick the <Browse> button to choose the directory\
+ interactively.\""
+
+set strTable(1100_REMOTE_DIR) \
+ "format %s \"Please type the name of the directory where Tornado has\
+ already been installed.\
+ \n\nClick the <Browse> button to choose the directory\
+ interactively.\""
+
+set strTable(3100_DEST_DIR) \
+ "format %s \"Please type the name of the directory where you want SETUP\
+ to install \[cdromDescGet\].\""
+
+set strTable(1110_DEST_DIR_WARN) \
+ "The installation directory you entered does not exist.\
+ \nDo you want to create it now?"
+
+set strTable(3110_DEST_DIR_WARN) \
+ "The installation directory you entered does not exist."
+
+set strTable(3115_DEST_DIR_QUESTION) \
+ "Do you want to create it now? \[y\]"
+
+set strTable(1111_DEST_DIR_WARN) \
+ "format %s \"Installing \[cdromDescGet\] in the root directory is not\
+ recommended.\nClick <Yes> to select another directory.\""
+
+set strTable(1120_DEST_DIR_WARN2) \
+ "format %s \"Creating \[destDirGet\] failed: file exists.\""
+
+set strTable(1121_DEST_DIR_WARN2) \
+ "format %s \"Installing in \[destDirGet\] is not recommended.\
+ \nDo you want to change the installation directory?\""
+
+set strTable(1122_DEST_DIR_WARN2) \
+ "format %s \"Unable to create \[destDirGet\].\""
+
+set strTable(1130_DEST_DIR_WARN3) \
+ "You do not have permission to write files into the installation directory\
+ you entered.\
+ \n\nPlease choose a writable directory."
+
+set strTable(1135_DEST_DIR_WARN4) \
+ "format %s \"The installation directory you entered contains white\
+ space(s). Please select another directory.\""
+
+set strTable(1137_DUP_PRODUCT_WARN) \
+ "format %s \"Reinstalling products may potentially destroy any\
+ modifications you may have made to previously installed files.\
+ Do you wish to continue with the installation or go back to the\
+ '\[strTableGet 1450_TITLE_OPTION\]' page to reconsider your choices?\""
+
+set strTable(3155_COMP_SELECT_QUESTION) \
+ "Do you want to go back and specify a directory on a bigger partition?\
+ \[y\]"
+
+set strTable(1140_COMP_SELECT) \
+ "format %s \"In the option list below, please check all items you wish\
+ to install. SETUP files will be copied to your selected directory and\
+ take up \[setupSizeGet\] MB of disk space.\n\""
+
+set strTable(3140_COMP_SELECT) \
+ "In the option list below, select the item(s) you want to install."
+
+set strTable(3145_COMP_SELECT_CHANGE) \
+ "Press <Return> to accept the setting. To change the setting, enter a\
+ list of item numbers separated by spaces."
+
+set strTable(3145_COMP_SELECT_CHANGE_INVALID) \
+ "The item number(s) you entered is not valid."
+
+set strTable(1150_COMP_SELECT_WARN) \
+ "There is not enough disk space to install the selected component(s).\
+ \n\nDo you want to go back and specify a directory on a bigger disk or\
+ partition?"
+
+set strTable(3150_COMP_SELECT_WARN) \
+ "There is not enough space to install the selected component(s)."
+
+set strTable(1151_COMP_SELECT_WARN) \
+ "At least one component must be selected to continue installation."
+
+set strTable(1160_PERMISSION) \
+ "SETUP is about to install the component(s) you have requested.\
+ \n\nThe selected button(s) below indicate the file permissions which\
+ will be set during the installation process.\
+ \n\nPlease adjust these to suit your site requirements."
+
+set strTable(3160_PERMISSION) \
+ "SETUP is about to install the component(s) you have requested."
+
+set strTable(3162_PERMISSION) \
+ "The list below indicates the file permissions which will be set during\
+ the installation process. Please adjust these to suit your site\
+ requirements."
+
+set strTable(3165_PERMISSION_QUESTION) \
+ "Press <Return> to accept the setting. To change the setting, enter a\
+ list of item numbers separated by spaces."
+
+set strTable(1161_FOLDER_SELECT) \
+ "SETUP will add program icons to the Program Folder listed below. You may\
+ type a new folder name, or select one from the existing Folders list."
+
+set strTable(1162_FOLDER_SELECT) \
+ "Please enter a valid folder name."
+
+set strTable(1170_FILE_COPY) \
+ "format %s \"SETUP is copying the selected component(s) to the directory\
+ \[destDirGet\].\""
+
+set strTable(1171_FILE_COPY) \
+ "format %s \"SETUP cannot read \[setupFileNameGet 0\] from the CD-ROM.\
+ Please ensure that the CD-ROM is properly mounted.\""
+
+set strTable(1180_LIB_UPDATE) \
+ "SETUP is updating the VxWorks libraries. We recommend that you let\
+ SETUP finish this step, or the libraries will be in an inconsistent\
+ state. Please be patient as the process may take several minutes. \
+ If you want to quit the SETUP program, click <Cancel> and run\
+ the SETUP program again at a later time."
+
+set strTable(3180_LIB_UPDATE) \
+ "SETUP is updating the VxWorks libraries."
+
+set strTable(1190_REGISTRY_HOST) \
+ "The Tornado Registry is a daemon that keeps track of all available\
+ targets by name. Only one registry is required on your network, \
+ and it can run on any networked host.\
+ \n\nPlease enter the name of the host where the Tornado Registry will\
+ be running."
+
+set strTable(1191_REGISTRY_DESC) \
+ "The Tornado Registry is a daemon that keeps track of all available\
+ targets by name. Only one registry is required on your network, \
+ and it can run on any networked host."
+
+set strTable(1192_REGISTRY_NAME) \
+ "Please enter the name of the host where the Tornado Registry will\
+ be running."
+
+set strTable(1200_FINISH_WARN) \
+ "format %s \"However, there were \[errorCountGet\] error(s) which occured\
+ during the process. Please review the log file\
+ \[destDirDispGet\]/setup.log for more information.\""
+
+set strTable(1210_FINISH) \
+ "format %s \"SETUP has completed installing the selected product(s).\""
+
+set strTable(1212_FINISH) \
+ "SETUP has completed installing the program folders and icons."
+
+set strTable(1213_FINISH) \
+ "Terminating SETUP program."
+
+set strTable(1360_QUIT_CALLBACK) \
+ "format %s \"SETUP is not complete. If you quit the SETUP program now,\
+ \[cdromDescGet\] will not be installed.\n\nYou may run\
+ the SETUP program at a later time to complete the\
+ installation.\
+ \n\nTo continue installing the program, click <Resume>. \
+ To quit the SETUP program, click <Exit SETUP>.\""
+
+set strTable(3360_QUIT_CALLBACK) \
+ "format %s \"SETUP is not complete. If you quit the SETUP program now,\
+ \[cdromDescGet\] will not be installed.\n\nYou may run the\
+ SETUP program at a later time to complete the installation.\
+ \n\nTo continue installing the program, Press <Return>. \
+ To quit the SETUP program, type \'exit\'.\""
+
+set strTable(1370_FILE_ACCESS_ERROR) \
+ "format %s \"SETUP cannot create/update file \[lindex \$args 0\]:\
+ \[lindex \$args 1\]\""
+
+set strTable(1380_DEFLATE_ERROR) \
+ "format %s \"SETUP isn\'t able to deflate \[setupFileNameGet 0\]\
+ \n\nPlease select one of the following options\
+ to continue with the SETUP process.\""
+
+set strTable(1390_MEMORY_LOW) \
+ "The system is running out of memory. To continue, close applications\
+ or increase the system swap space."
+
+set strTable(1400_DISK_FULL) \
+ "No disk space left. To continue, free up some disk space."
+
+set strTable(1550_USAGE) \
+ "Usage: SETUP /I\[con\]\]\t\n\
+ /I : Add standard Tornado icons \n\
+ from a remote installation"
+
+set strTable(1410_TITLE_WELCOME) "Welcome"
+set strTable(1420_TITLE_WARNING) "Warning"
+set strTable(1430_TITLE_REGISTRATION) "User Registration"
+set strTable(1440_TITLE_DESTDIR) "Select Directory"
+set strTable(1450_TITLE_OPTION) "Select Products"
+set strTable(1460_TITLE_PERMISSION) "Permission"
+set strTable(1470_TITLE_FILECOPY) "Copying Files"
+set strTable(1480_TITLE_LIBUPDATE) "Update Libraries"
+set strTable(1490_TITLE_REGISTRY_HOST) "Tornado Registry"
+set strTable(1495_TITLE_BACKWARD_COMPATIBILITY) "Backward Compatibility"
+set strTable(1500_TITLE_FINISH) "Finish"
+set strTable(1560_TITLE_FOLDER) "Select Folder"
+set strTable(1563_TITLE_DLL_REG) "Software Registration"
+set strTable(1567_TITLE_DCOM) "DCOM Installation"
+
+set strTable(1570_OPTION_SELECT) \
+ "Choose one of the options listed below, then click the\
+ <Next> button to continue the installation."
+
+set strTable(1576_OPTION_MANUAL) \
+ "Install Tornado Registry manually"
+
+set strTable(1577_OPTION_STARTUP) \
+ "Install Tornado Registry locally in the Startup Group"
+
+set strTable(1578_OPTION_SERVICE) \
+ "Install Tornado Registry locally as a Service"
+
+set strTable(1579_OPTION_REMOTE) \
+ "Configure to use a remote Tornado Registry"
+
+set strTable(1580_OPTION_DESC) \
+ "If you plan on running Tornado in a non-networked environment, we\
+ recommend that you install the registry in your Startup Group or as an\
+ NT Service. For more information, consult your Tornado User\'s Guide."
+
+set strTable(1581_OPTION_DESC) \
+ "If you plan on running Tornado in a non-networked environment, we\
+ recommend that you install the registry in your Startup Group. For more\
+ information, consult your Tornado User\'s Guide."
+
+set strTable(3000_RETURN_QUESTION) \
+ "Press <Return> to continue"
+
+set strTable(3055_EXIT_QUESTION) \
+ "Type \'exit\' to quit the program or press <Return> to continue"
+
+set strTable(3370_BACK_CALLBACK) \
+ "Cannot go back further."
+
+set strTable(1080_WARN_4) \
+ "The installation key you entered attempted to unlock one or more \
+ products that may have been removed from our product line. \
+ Please compare the unlocked product list on the\
+ \"[strTableGet 1450_TITLE_OPTION]\" screen with your purchased order\
+ list, and contact us if you discover any differences."
+
+set strTable(4000_BASE_INSTALL_WARN) \
+ "format %s \"Warning! Re-installing Tornado over an existing \
+ tree will overwrite any installed patches. \
+ If you proceed with the installation, please \
+ re-install patches if any.\""
+
+set strTable(4000_BASE_INSTALL_WARN_1) \
+ "Select <Install> to overwrite existing Tornado installation,\
+ or choose <Select Path> to enable you to back up to the \'Select\
+ Directory\' page to enter an alternate path."
+
+set strTable(4010_FILE_EXISTS_OLDER_WARN) \
+ "format %s \"The file \'\$current_file\' exists in your destination\
+ directory path \'\[destDirGet\]\' and is older. You can\
+ set the policy for handling duplicate files by\
+ selecting one of the following buttons. All files to be\
+ overwritten will be backed up.\""
+
+set strTable(4010_FILE_EXISTS_NEWER_WARN) \
+ "format %s \"The file \'\$current_file\' exists in your destination\
+ directory path \'\[destDirGet\]\' and is newer. You can\
+ set the policy for handling duplicate files by\
+ selecting one of the following buttons. All files to be\
+ overwritten will be backed up.\""
+
+set strTable(4010_FILE_EXISTS_WARN_1) \
+ "Overwrite the existing file."
+
+set strTable(4010_FILE_EXISTS_WARN_2) \
+ "Do not overwrite the existing file."
+
+set strTable(4010_FILE_EXISTS_WARN_3) \
+ "Overwrite ALL files, do not show this dialog again."
+
+set strTable(4020_ANALYZING_BANNER) \
+ "Analyzing installation files, please wait..."
+
+set strTable(4030_NO_ZIP_FILE) \
+ "format %s \"SETUP cannot find the ZIP files for installing\
+ \[cdromDescGet\] in the default directory.\n\n\
+ Please type the name of the WIND\
+ directory containing the ZIP files.\n\nClick the\
+ <Browse> button to choose the directory interactively.\""
+
+set strTable(4040_LIC_TEXT) \
+ "Attention: By clicking on the \"I accept\" button or by\
+ Installing the software you are consenting to be bound by\
+ the terms of this agreement (this \"Agreement\"). If you do\
+ not agree to all of the terms, click the \"I don't Accept\" button\
+ and do not install this software. A copy of this Agreement can be viewed\
+ in the Setup directory under the destination path that you have\
+ designated after the installation is completed."
+
+set strTable(4050_PROJECT_TEXT) \
+ "Please enter your project name, and the number of licensed\
+ users on the project in the spaces below."
+
+set strTable(4060_LICENSE_TEXT) \
+ "By clicking on the \"I accept\" button \
+ you are consenting to be bound by the terms of this agreement.\
+ If you do not agree to all of the terms, click the \"Cancel\"\
+ button and do not install this software."
+
+set strTable(4070_DLL_TEXT) \
+ "SETUP is registering software on your machine. This will take a few\
+ minutes."
+
+set strTable(4080_DCOM_TEXT) \
+ "Setup has detected that your COM/DCOM DLLs must\
+ be updated for the correct operation of Tornado 2.0.\
+ \n\n\
+ Setup will now ask you to run DCOM95 to update your\
+ DLLs.\
+ \n\n\
+ You will have to reboot your system after DLL files have been\
+ installed. Please rerun SETUP to continue with installation\
+ after your system has rebooted.\
+ \n\n\
+ Note: The DCOM95 installation programs update your\
+ system DLLs. You should save all open documents and close all\
+ programs before proceeding.\
+ \n\nWould you like to install \"DCOM95\" now?"
+
+set strTable(4082_DCOM95_AND_COMCTL_TEXT) \
+ "Setup has detected that your COM/DCOM and Common Control DLLs must\
+ be updated for the correct operation of Tornado 2.0.\
+ \n\n\
+ Setup will now ask you to run DCOM95 and 401comupd.exe to update your\
+ DLLs.\
+ \n\n\
+ You must reboot your system after DLL files have been\
+ installed. After rebooting, please rerun SETUP to continue with\
+ installation.\
+ \n\n\
+ Note: 401comupd.exe and DCOM95 installation programs update your\
+ system DLLs. You should save all open documents and close all\
+ programs before proceeding\
+ \n\nWould you like to install \"401comupd.exe\" and \"DCOM95\" now?"
+
+set strTable(4085_COMCTL_UPDATE_TEXT) \
+ "Setup has detected that your Common Control DLLs must\
+ be updated for the correct operation of Tornado 2.0.\
+ \n\n\
+ Setup will now ask you to run DCOM95 and 401comupd.exe to update your\
+ DLLs.\
+ \n\n\
+ You will have to reboot your system after DLL files have been\
+ installed. Please rerun SETUP to continue with installation\
+ after your system has rebooted.\
+ \n\n\
+ Note: The 401comupd.exe installation program updates your system DLLs. You\
+ should save all open documents and close all programs before installing\
+ 401comupd.exe.\
+ \n\nWould you like to install \"401comupd.exe\" now?"
+
+set strTable(4090_README_TEXT) \
+ "Please read the README file contents that are displayed below.\
+ It contains important information that will enable you to install\
+ and successfully run the BerkeleyDB product. For your convenience\
+ this file is copied to your installation directory path."
+
+set strTable(5000_PATCHES_REQUIRED_TEXT) \
+ "SETUP has detected that required operating system patches\
+ have not been installed on this machine. These patches are\
+ necessary for the correct operation of SETUP and Tornado. Please refer\
+ to the Tornado Release Notes for details.\n\n\
+ The following operating system patches must be installed before\
+ you can continue with installation:\n\n"
+
+set strTable(5001_PATCHES_RECOMMENDED_TEXT) \
+ "\n\nSETUP has also detected that recommended operating system patches\
+ have not been installed. It is recommended that these patches are\
+ installed before starting Tornado to ensure correct operation.\n\n\
+ The following operating system patches are recommended to be installed:\n\n"
+
+set strTable(5002_PATCHES_RECOMMENDED_TEXT) \
+ "SETUP has detected that some operating system patches have not been\
+ installed on this machine. It is recommended that these\
+ patches are installed before starting Tornado to ensure correct\
+ operation. Please refer to the Tornado Release Notes\
+ for details.\n\n\
+ The following operating system patches are recommended to be installed:\n\n"
+
+set strTable(5003_PATCHES_REQUIRED_FORMATTED_TEXT) \
+ "\n SETUP has detected that required operating system patches\n\
+ have not been installed on this machine. These patches are\n\
+ necessary for the correct operation of SETUP and Tornado. Please refer\n\
+ to the Tornado Release Notes for details.\n\n\
+ The following operating system patches must be installed before\n\
+ you can continue with installation:\n\n"
+
+set strTable(5004_PATCHES_RECOMMENDED_FORMATTED_TEXT) \
+ "\n\n SETUP has also detected that recommended operating system patches\n\
+ have not been installed. It is recommended that these patches are\n\
+ installed before starting Tornado to ensure correct operation.\n\n\
+ The following operating system patches are recommended to be installed:\n\n"
+
+set strTable(5005_PATCHES_RECOMMENDED_FORMATTED_TEXT) \
+ "\n SETUP has detected that some operating system patches have not been\n\
+ installed on this machine. It is recommended that these\n\
+ patches are installed before starting Tornado to ensure correct\n\
+ operation. Please refer to the Tornado Release Notes\n\
+ for details.\n\n\
+ The following operating system patches are recommended to be installed:\n\n"
+
+set strTable(5006_PATCHES_SUN_LOCATION) \
+ "\nPatches for Sun machines are available at http://sunsolve.sun.com.\n"
+
+set strTable(5007_PATCHES_HP_LOCATION) \
+ "\nPatches for HP machines are available at:\n\
+ http://us-support.external.hp.com (US, Canada, Asia-Pacific, and\
+ Latin-America)\n\
+ http://europe-support.external.hp.com (Europe)\n"
+
+set strTable(5008_PATCHES_UPDATE) \
+ "\nNote: System vendors very frequently update and replace patches.\
+ If a specific patch is no longer available, please use the\
+ replacement patch suggested by the system vendor.\n"
+
+set strTable(5009_PATCHES_UPDATE_FORMATTED) \
+ "\n Note: System vendors very frequently update and replace patches.\n\
+ If a specific patch is no longer available, please use the\n\
+ replacement patch suggested by the system vendor.\n"
+
+set strTable(5010_DRIVERS_INFO) \
+ "The installation of the Driver component is required because\n\
+ you have selected the basic Tornado product for installation.\n\n\
+ If you wish to uncheck this item you must uncheck either the\n\
+ basic Tornado and/or Tornado Simulator product(s) or go to the\n\
+ 'Details' button for Tornado and uncheck both the Simulator and\n\
+ the Tornado Object parts."
+
+set strTable(5020_DO_NOT_SAVE_KEY_FOR_FAE) \
+ "The installation key you are about to enter will NOT\
+ be saved in the system registry.\nIs this what you want?"
+
+set strTable(5030_BACKWARD_COMPATIBILITY) \
+ "While the portmapper is not needed for Tornado 2.0, it is\
+ included in this release for development environments in\
+ which both Tornado 2.0 and Tornado 1.0.1 are in use.\
+ \n\nWould you like to use your Tornado 1.0.x tools with Tornado 2.0?"
+
+set strTable(5040_BACKWARD_COMPATIBILITY) \
+ "Note:\
+ \n\nIf you have selected to install the Tornado Registry as\
+ a service, there is no way to retain backward compatibility\
+ with Tornado 1.0.x."
+
+set strTable(5050_BACKWARD_COMPATIBILITY) \
+ "For more information on backward compatibility,\
+ please consult the Tornado 2.0 Release Notes."
diff --git a/libdb/dist/vx_setup/README.in b/libdb/dist/vx_setup/README.in
new file mode 100644
index 0000000..f96948c
--- /dev/null
+++ b/libdb/dist/vx_setup/README.in
@@ -0,0 +1,7 @@
+README.TXT: Sleepycat Software Berkeley DB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@ Release v@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+
+Information on known problems, changes introduced with the
+current revision of the CD-ROM, and other product bulletins
+can be obtained from the Sleepycat Software web site:
+
+ http://www.sleepycat.com/
diff --git a/libdb/dist/vx_setup/SETUP.BMP b/libdb/dist/vx_setup/SETUP.BMP
new file mode 100644
index 0000000..2918480
Binary files /dev/null and b/libdb/dist/vx_setup/SETUP.BMP differ
diff --git a/libdb/dist/vx_setup/vx_allfile.in b/libdb/dist/vx_setup/vx_allfile.in
new file mode 100644
index 0000000..61a1b8e
--- /dev/null
+++ b/libdb/dist/vx_setup/vx_allfile.in
@@ -0,0 +1,5 @@
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/BerkeleyDB.wpj
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/BerkeleyDB.wsp
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/db.h
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/db_config.h
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/db_int.h
diff --git a/libdb/dist/vx_setup/vx_demofile.in b/libdb/dist/vx_setup/vx_demofile.in
new file mode 100644
index 0000000..42a698e
--- /dev/null
+++ b/libdb/dist/vx_setup/vx_demofile.in
@@ -0,0 +1,3 @@
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/dbdemo/dbdemo.wpj
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/dbdemo/README
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/dbdemo/dbdemo.c
diff --git a/libdb/dist/vx_setup/vx_setup.in b/libdb/dist/vx_setup/vx_setup.in
new file mode 100644
index 0000000..7bc3f51
--- /dev/null
+++ b/libdb/dist/vx_setup/vx_setup.in
@@ -0,0 +1,13 @@
+Sleepycat Software BerkeleyDB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+db@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@ demo-db@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+@DB_SETUP_DIR@
+Sleepycat Software BerkeleyDB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+db@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+@DB_SETUP_DIR@/filelist.all
+BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@-Demo
+@DB_SETUP_DIR@
+BerkeleyDB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@ Demo program
+demo-db@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+@DB_SETUP_DIR@/filelist.demo
+Sleepycat Software BerkeleyDB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
diff --git a/libdb/dist/win_config.in b/libdb/dist/win_config.in
new file mode 100644
index 0000000..09acab2
--- /dev/null
+++ b/libdb/dist/win_config.in
@@ -0,0 +1,439 @@
+/* Define to 1 if you want to build a version for running the test suite. */
+/* #undef CONFIG_TEST */
+
+/* Define to 1 if you want a debugging version. */
+/* #undef DEBUG */
+#if defined(_DEBUG)
+#if !defined(DEBUG)
+#define DEBUG 1
+#endif
+#endif
+
+/* Define to 1 if you want a version that logs read operations. */
+/* #undef DEBUG_ROP */
+
+/* Define to 1 if you want a version that logs write operations. */
+/* #undef DEBUG_WOP */
+
+/* Define to 1 if you want a version with run-time diagnostic checking. */
+/* #undef DIAGNOSTIC */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+/* #undef HAVE_CLOCK_GETTIME */
+
+/* Define to 1 if Berkeley DB release includes strong cryptography. */
+/* #undef HAVE_CRYPTO */
+
+/* Define to 1 if you have the `directio' function. */
+/* #undef HAVE_DIRECTIO */
+
+/* Define to 1 if you have the <dirent.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_DIRENT_H */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+/* #undef HAVE_DLFCN_H */
+
+/* Define to 1 if you have EXIT_SUCCESS/EXIT_FAILURE #defines. */
+#define HAVE_EXIT_SUCCESS 1
+
+/* Define to 1 if fcntl/F_SETFD denies child access to file descriptors. */
+/* #undef HAVE_FCNTL_F_SETFD */
+
+/* Define to 1 if allocated filesystem blocks are not zeroed. */
+#define HAVE_FILESYSTEM_NOTZERO 1
+
+/* Define to 1 if you have the `getcwd' function. */
+#define HAVE_GETCWD 1
+
+/* Define to 1 if you have the `getopt' function. */
+/* #undef HAVE_GETOPT */
+
+/* Define to 1 if you have the `gettimeofday' function. */
+/* #undef HAVE_GETTIMEOFDAY */
+
+/* Define to 1 if you have the `getuid' function. */
+/* #undef HAVE_GETUID */
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+/* #undef HAVE_INTTYPES_H */
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+/* #undef HAVE_LIBNSL */
+
+/* Define to 1 if you have the `memcmp' function. */
+#define HAVE_MEMCMP 1
+
+/* Define to 1 if you have the `memcpy' function. */
+#define HAVE_MEMCPY 1
+
+/* Define to 1 if you have the `memmove' function. */
+#define HAVE_MEMMOVE 1
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the `mlock' function. */
+/* #undef HAVE_MLOCK */
+
+/* Define to 1 if you have the `mmap' function. */
+/* #undef HAVE_MMAP */
+
+/* Define to 1 if you have the `munlock' function. */
+/* #undef HAVE_MUNLOCK */
+
+/* Define to 1 if you have the `munmap' function. */
+/* #undef HAVE_MUNMAP */
+
+/* Define to 1 to use the GCC compiler and 68K assembly language mutexes. */
+/* #undef HAVE_MUTEX_68K_GCC_ASSEMBLY */
+
+/* Define to 1 to use the AIX _check_lock mutexes. */
+/* #undef HAVE_MUTEX_AIX_CHECK_LOCK */
+
+/* Define to 1 to use the GCC compiler and Alpha assembly language mutexes. */
+/* #undef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and ARM assembly language mutexes. */
+/* #undef HAVE_MUTEX_ARM_GCC_ASSEMBLY */
+
+/* Define to 1 to use the UNIX fcntl system call mutexes. */
+/* #undef HAVE_MUTEX_FCNTL */
+
+/* Define to 1 to use the GCC compiler and PaRisc assembly language mutexes.
+ */
+/* #undef HAVE_MUTEX_HPPA_GCC_ASSEMBLY */
+
+/* Define to 1 to use the msem_XXX mutexes on HP-UX. */
+/* #undef HAVE_MUTEX_HPPA_MSEM_INIT */
+
+/* Define to 1 to use the GCC compiler and IA64 assembly language mutexes. */
+/* #undef HAVE_MUTEX_IA64_GCC_ASSEMBLY */
+
+/* Define to 1 to use the msem_XXX mutexes on systems other than HP-UX. */
+/* #undef HAVE_MUTEX_MSEM_INIT */
+
+/* Define to 1 to use the GCC compiler and Apple PowerPC assembly language. */
+/* #undef HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and generic PowerPC assembly language.
+ */
+/* #undef HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY */
+
+/* Define to 1 to use POSIX 1003.1 pthread_XXX mutexes. */
+/* #undef HAVE_MUTEX_PTHREADS */
+
+/* Define to 1 to use Reliant UNIX initspin mutexes. */
+/* #undef HAVE_MUTEX_RELIANTUNIX_INITSPIN */
+
+/* Define to 1 to use the GCC compiler and S/390 assembly language mutexes. */
+/* #undef HAVE_MUTEX_S390_GCC_ASSEMBLY */
+
+/* Define to 1 to use the SCO compiler and x86 assembly language mutexes. */
+/* #undef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY */
+
+/* Define to 1 to use the obsolete POSIX 1003.1 sema_XXX mutexes. */
+/* #undef HAVE_MUTEX_SEMA_INIT */
+
+/* Define to 1 to use the SGI XXX_lock mutexes. */
+/* #undef HAVE_MUTEX_SGI_INIT_LOCK */
+
+/* Define to 1 to use the Solaris _lock_XXX mutexes. */
+/* #undef HAVE_MUTEX_SOLARIS_LOCK_TRY */
+
+/* Define to 1 to use the Solaris lwp threads mutexes. */
+/* #undef HAVE_MUTEX_SOLARIS_LWP */
+
+/* Define to 1 to use the GCC compiler and Sparc assembly language mutexes. */
+/* #undef HAVE_MUTEX_SPARC_GCC_ASSEMBLY */
+
+/* Define to 1 if mutexes hold system resources. */
+/* #undef HAVE_MUTEX_SYSTEM_RESOURCES */
+
+/* Define to 1 if fast mutexes are available. */
+#define HAVE_MUTEX_THREADS 1
+
+/* Define to 1 to configure mutexes intra-process only. */
+/* #undef HAVE_MUTEX_THREAD_ONLY */
+
+/* Define to 1 to use the UNIX International mutexes. */
+/* #undef HAVE_MUTEX_UI_THREADS */
+
+/* Define to 1 to use the UTS compiler and assembly language mutexes. */
+/* #undef HAVE_MUTEX_UTS_CC_ASSEMBLY */
+
+/* Define to 1 to use VMS mutexes. */
+/* #undef HAVE_MUTEX_VMS */
+
+/* Define to 1 to use VxWorks mutexes. */
+/* #undef HAVE_MUTEX_VXWORKS */
+
+/* Define to 1 to use Windows mutexes. */
+#define HAVE_MUTEX_WIN32 1
+
+/* Define to 1 to use the GCC compiler and x86 assembly language mutexes. */
+/* #undef HAVE_MUTEX_X86_GCC_ASSEMBLY */
+
+/* Define to 1 if you have the <ndir.h> header file, and it defines `DIR'. */
+/* #undef HAVE_NDIR_H */
+
+/* Define to 1 if you have the O_DIRECT flag. */
+/* #undef HAVE_O_DIRECT */
+
+/* Define to 1 if you have the `pread' function. */
+/* #undef HAVE_PREAD */
+
+/* Define to 1 if you have the `pstat_getdynamic' function. */
+/* #undef HAVE_PSTAT_GETDYNAMIC */
+
+/* Define to 1 if you have the `pwrite' function. */
+/* #undef HAVE_PWRITE */
+
+/* Define to 1 if building on QNX. */
+/* #undef HAVE_QNX */
+
+/* Define to 1 if you have the `qsort' function. */
+#define HAVE_QSORT 1
+
+/* Define to 1 if you have the `raise' function. */
+#define HAVE_RAISE 1
+
+/* Define to 1 if building RPC client/server. */
+/* #undef HAVE_RPC */
+
+/* Define to 1 if you have the `sched_yield' function. */
+/* #undef HAVE_SCHED_YIELD */
+
+/* Define to 1 if you have the `select' function. */
+/* #undef HAVE_SELECT */
+
+/* Define to 1 if you have the `shmget' function. */
+/* #undef HAVE_SHMGET */
+
+/* Define to 1 if you have the `snprintf' function. */
+#define HAVE_SNPRINTF 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+/* #undef HAVE_STDINT_H */
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the `strcasecmp' function. */
+/* #undef HAVE_STRCASECMP */
+
+/* Define to 1 if you have the `strdup' function. */
+#define HAVE_STRDUP 1
+
+/* Define to 1 if you have the `strerror' function. */
+#define HAVE_STRERROR 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strtoul' function. */
+#define HAVE_STRTOUL 1
+
+/* Define to 1 if `st_blksize' is member of `struct stat'. */
+/* #undef HAVE_STRUCT_STAT_ST_BLKSIZE */
+
+/* Define to 1 if you have the `sysconf' function. */
+/* #undef HAVE_SYSCONF */
+
+/* Define to 1 if you have the <sys/dir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_DIR_H */
+
+/* Define to 1 if you have the <sys/ndir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_NDIR_H */
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+/* #undef HAVE_SYS_SELECT_H */
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+/* #undef HAVE_SYS_TIME_H */
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+/* #undef HAVE_UNISTD_H */
+
+/* Define to 1 if unlink of file with open file descriptors will fail. */
+/* #undef HAVE_UNLINK_WITH_OPEN_FAILURE */
+
+/* Define to 1 if you have the `vsnprintf' function. */
+#define HAVE_VSNPRINTF 1
+
+/* Define to 1 if building VxWorks. */
+/* #undef HAVE_VXWORKS */
+
+/* Define to 1 if you have the `yield' function. */
+/* #undef HAVE_YIELD */
+
+/* Define to 1 if you have the `_fstati64' function. */
+#define HAVE__FSTATI64 1
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "support@sleepycat.com"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "Berkeley DB"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "Berkeley DB __EDIT_DB_VERSION__"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "db-__EDIT_DB_VERSION__"
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "__EDIT_DB_VERSION__"
+
+/* Define to 1 if the `S_IS*' macros in <sys/stat.h> do not work properly. */
+/* #undef STAT_MACROS_BROKEN */
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+/* #undef TIME_WITH_SYS_TIME */
+
+/* Define to 1 to mask harmless unitialized memory read/writes. */
+/* #undef UMRW */
+
+/* Number of bits in a file offset, on hosts where this is settable. */
+/* #undef _FILE_OFFSET_BITS */
+
+/* Define for large files, on AIX-style hosts. */
+/* #undef _LARGE_FILES */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef const */
+
+/*
+ * Exit success/failure macros.
+ */
+#ifndef HAVE_EXIT_SUCCESS
+#define EXIT_FAILURE 1
+#define EXIT_SUCCESS 0
+#endif
+
+/*
+ * Don't step on the namespace. Other libraries may have their own
+ * implementations of these functions, we don't want to use their
+ * implementations or force them to use ours based on the load order.
+ */
+#ifndef HAVE_GETCWD
+#define getcwd __db_Cgetcwd
+#endif
+#ifndef HAVE_MEMCMP
+#define memcmp __db_Cmemcmp
+#endif
+#ifndef HAVE_MEMCPY
+#define memcpy __db_Cmemcpy
+#endif
+#ifndef HAVE_MEMMOVE
+#define memmove __db_Cmemmove
+#endif
+#ifndef HAVE_RAISE
+#define raise __db_Craise
+#endif
+#ifndef HAVE_SNPRINTF
+#define snprintf __db_Csnprintf
+#endif
+#ifndef HAVE_STRCASECMP
+#define strcasecmp __db_Cstrcasecmp
+#define strncasecmp __db_Cstrncasecmp
+#endif
+#ifndef HAVE_STRERROR
+#define strerror __db_Cstrerror
+#endif
+#ifndef HAVE_VSNPRINTF
+#define vsnprintf __db_Cvsnprintf
+#endif
+
+/*
+ * XXX
+ * The following is not part of the automatic configuration setup, but
+ * provides the information necessary to build Berkeley DB on Windows.
+ */
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <direct.h>
+#include <fcntl.h>
+#include <io.h>
+#include <limits.h>
+#include <memory.h>
+#include <process.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <errno.h>
+
+/*
+ * To build Tcl interface libraries, the include path must be configured to
+ * use the directory containing <tcl.h>, usually the include directory in
+ * the Tcl distribution.
+ */
+#ifdef DB_TCL_SUPPORT
+#include <tcl.h>
+#endif
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+/*
+ * All of the necessary includes have been included, ignore the #includes
+ * in the Berkeley DB source files.
+ */
+#define NO_SYSTEM_INCLUDES
+
+/*
+ * Win32 has getcwd, snprintf and vsnprintf, but under different names.
+ */
+#define getcwd(buf, size) _getcwd(buf, size)
+#define snprintf _snprintf
+#define vsnprintf _vsnprintf
+
+/*
+ * Win32 does not define getopt and friends in any header file, so we must.
+ */
+#if defined(__cplusplus)
+extern "C" {
+#endif
+extern int optind;
+extern char *optarg;
+extern int getopt(int, char * const *, const char *);
+#if defined(__cplusplus)
+}
+#endif
+
+/*
+ * We use DB_WIN32 much as one would use _WIN32, to determine that we're
+ * using an operating system environment that supports Win32 calls
+ * and semantics. We don't use _WIN32 because cygwin/gcc also defines
+ * that, even though it closely emulates the Unix environment.
+ */
+#define DB_WIN32 1
+
+/*
+ * This is a grievous hack -- once we've included windows.h, we have no choice
+ * but to use ANSI-style varargs (because it pulls in stdarg.h for us). DB's
+ * code decides which type of varargs to use based on the state of __STDC__.
+ * Sensible. Unfortunately, Microsoft's compiler _doesn't_ define __STDC__
+ * unless you invoke it with arguments turning OFF all vendor extensions. Even
+ * more unfortunately, if we do that, it fails to parse windows.h!!!!! So, we
+ * define __STDC__ here, after windows.h comes in. Note: the compiler knows
+ * we've defined it, and starts enforcing strict ANSI compilance from this point
+ * on.
+ */
+#define __STDC__ 1
diff --git a/libdb/dist/win_exports.in b/libdb/dist/win_exports.in
new file mode 100644
index 0000000..d031b4b
--- /dev/null
+++ b/libdb/dist/win_exports.in
@@ -0,0 +1,134 @@
+# $Id$
+
+# Standard interfaces.
+ db_create
+ db_env_create
+ db_strerror
+ db_version
+ db_xa_switch
+ log_compare
+ txn_abort
+ txn_begin
+ txn_commit
+
+# Library configuration interfaces.
+ db_env_set_func_close
+ db_env_set_func_dirfree
+ db_env_set_func_dirlist
+ db_env_set_func_exists
+ db_env_set_func_free
+ db_env_set_func_fsync
+ db_env_set_func_ioinfo
+ db_env_set_func_malloc
+ db_env_set_func_map
+ db_env_set_func_open
+ db_env_set_func_read
+ db_env_set_func_realloc
+ db_env_set_func_rename
+ db_env_set_func_seek
+ db_env_set_func_sleep
+ db_env_set_func_unlink
+ db_env_set_func_unmap
+ db_env_set_func_write
+ db_env_set_func_yield
+
+# Needed for application-specific logging and recovery routines.
+ __db_add_recovery
+
+# These are needed to link the tcl library.
+ __db_dbm_close
+ __db_dbm_delete
+ __db_dbm_fetch
+ __db_dbm_firstkey
+ __db_dbm_init
+ __db_dbm_nextkey
+ __db_dbm_store
+ __db_hcreate
+ __db_hdestroy
+ __db_hsearch
+ __db_loadme
+ __db_ndbm_clearerr
+ __db_ndbm_close
+ __db_ndbm_delete
+ __db_ndbm_dirfno
+ __db_ndbm_error
+ __db_ndbm_fetch
+ __db_ndbm_firstkey
+ __db_ndbm_nextkey
+ __db_ndbm_open
+ __db_ndbm_pagfno
+ __db_ndbm_rdonly
+ __db_ndbm_store
+ __db_panic
+ __db_r_attach
+ __db_r_detach
+ __db_win32_mutex_init
+ __db_win32_mutex_lock
+ __db_win32_mutex_unlock
+ __ham_func2
+ __ham_func3
+ __ham_func4
+ __ham_func5
+ __ham_test
+ __lock_dump_region
+ __memp_dump_region
+ __os_calloc
+ __os_closehandle
+ __os_free
+ __os_ioinfo
+ __os_malloc
+ __os_open
+ __os_openhandle
+ __os_read
+ __os_realloc
+ __os_strdup
+ __os_umalloc
+ __os_write
+
+#These are needed for linking tools or java.
+ __bam_init_print
+ __bam_pgin
+ __bam_pgout
+ __crdel_init_print
+ __db_dispatch
+ __db_dump
+ __db_e_stat
+ __db_err
+ __db_getlong
+ __db_getulong
+ __db_global_values
+ __db_init_print
+ __db_inmemdbflags
+ __db_isbigendian
+ __db_omode
+ __db_overwrite
+ __db_pgin
+ __db_pgout
+ __db_prdbt
+ __db_prfooter
+ __db_prheader
+ __db_rpath
+ __db_util_cache
+ __db_util_interrupted
+ __db_util_logset
+ __db_util_siginit
+ __db_util_sigresend
+ __db_verify_callback
+ __db_verify_internal
+ __dbreg_init_print
+ __fop_init_print
+ __ham_get_meta
+ __ham_init_print
+ __ham_pgin
+ __ham_pgout
+ __ham_release_meta
+ __os_clock
+ __os_get_errno
+ __os_id
+ __os_set_errno
+ __os_sleep
+ __os_ufree
+ __os_yield
+ __qam_init_print
+ __qam_pgin_out
+ __txn_init_print
diff --git a/libdb/docs/api_c/c_index.html b/libdb/docs/api_c/c_index.html
new file mode 100644
index 0000000..33ee6d0
--- /dev/null
+++ b/libdb/docs/api_c/c_index.html
@@ -0,0 +1,164 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: C Interface</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: C Interface</h1>
+<p><table border=1 align=center>
+<tr><th>Section</th><th>Method</th><th>Description</th></tr>
+<tr><td><b>Database Environment</b></td><td><a href="../api_c/env_create.html">db_env_create</a></td><td>Create an environment handle</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_close.html">DB_ENV-&gt;close</a></td><td>Close an environment</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_dbremove.html">DB_ENV-&gt;dbremove</a></td><td>Remove a database</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_dbrename.html">DB_ENV-&gt;dbrename</a></td><td>Rename a database</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_err.html">DB_ENV-&gt;err</a></td><td>Error message with error string</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_err.html">DB_ENV-&gt;errx</a></td><td>Error message</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_open.html">DB_ENV-&gt;open</a></td><td>Open an environment</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_remove.html">DB_ENV-&gt;remove</a></td><td>Remove an environment</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_strerror.html">db_strerror</a></td><td>Error strings</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_version.html">db_version</a></td><td>Return version information</td></tr>
+<tr><td><b>Environment Configuration</b></td><td><a href="../api_c/env_set_app_dispatch.html">DB_ENV-&gt;set_app_dispatch</a></td><td>Configure application recovery interface</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_alloc.html">DB_ENV-&gt;set_alloc</a></td><td>Set local space allocation functions</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_data_dir.html">DB_ENV-&gt;set_data_dir</a></td><td>Set the environment data directory</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_encrypt.html">DB_ENV-&gt;set_encrypt</a></td><td>Set the environment cryptographic key</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_errcall.html">DB_ENV-&gt;set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_errfile.html">DB_ENV-&gt;set_errfile</a></td><td>Set error message FILE</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_errpfx.html">DB_ENV-&gt;set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_feedback.html">DB_ENV-&gt;set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a></td><td>Environment configuration</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_paniccall.html">DB_ENV-&gt;set_paniccall</a></td><td>Set panic callback</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_rpc_server.html">DB_ENV-&gt;set_rpc_server</a></td><td>Establish an RPC server connection</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_shm_key.html">DB_ENV-&gt;set_shm_key</a></td><td>Set system memory shared segment ID</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_tas_spins.html">DB_ENV-&gt;set_tas_spins</a></td><td>Set the number of test-and-set spins</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_timeout.html">DB_ENV-&gt;set_timeout</a></td><td>Set lock and transaction timeout</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_tmp_dir.html">DB_ENV-&gt;set_tmp_dir</a></td><td>Set the environment temporary file directory</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_verbose.html">DB_ENV-&gt;set_verbose</a></td><td>Set verbose messages</td></tr>
+<tr><td><b>Database Operations</b></td><td><a href="../api_c/db_create.html">db_create</a></td><td>Create a database handle</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_associate.html">DB-&gt;associate</a></td><td>Associate a secondary index</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_close.html">DB-&gt;close</a></td><td>Close a database</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_del.html">DB-&gt;del</a></td><td>Delete items from a database</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_err.html">DB-&gt;err</a></td><td>Error message with error string</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_err.html">DB-&gt;errx</a></td><td>Error message</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_fd.html">DB-&gt;fd</a></td><td>Return a file descriptor from a database</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_get.html">DB-&gt;get</a>, <a href="../api_c/db_get.html">DB-&gt;pget</a></td><td>Get items from a database</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a></td><td>Return if the underlying database is in host order</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_get_type.html">DB-&gt;get_type</a></td><td>Return the database type</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_join.html">DB-&gt;join</a></td><td>Perform a database join on cursors</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_key_range.html">DB-&gt;key_range</a></td><td>Return estimate of key location</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_open.html">DB-&gt;open</a></td><td>Open a database</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_put.html">DB-&gt;put</a></td><td>Store items into a database</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_remove.html">DB-&gt;remove</a></td><td>Remove a database</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_rename.html">DB-&gt;rename</a></td><td>Rename a database</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_stat.html">DB-&gt;stat</a></td><td>Return database statistics</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_sync.html">DB-&gt;sync</a></td><td>Flush a database to stable storage</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_truncate.html">DB-&gt;truncate</a></td><td>Empty a database</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a></td><td>Upgrade a database</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_verify.html">DB-&gt;verify</a></td><td>Verify/salvage a database</td></tr>
+<tr><td><b>Database Configuration</b></td><td><a href="../api_c/db_set_alloc.html">DB-&gt;set_alloc</a></td><td>Set local space allocation functions</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_set_cache_priority.html">DB-&gt;set_cache_priority</a></td><td>Set the database cache priority</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a></td><td>Set the database cache size</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a></td><td>Set a duplicate comparison function</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_set_encrypt.html">DB-&gt;set_encrypt</a></td><td>Set the database cryptographic key</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a></td><td>Set error message FILE</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_set_feedback.html">DB-&gt;set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a></td><td>General database configuration</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a></td><td>Set the database byte order</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a></td><td>Set the underlying database page size</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a></td><td>Set panic callback</td></tr>
+<tr><td><b>Btree/Recno Configuration</b></td><td><a href="../api_c/db_set_append_recno.html">DB-&gt;set_append_recno</a></td><td>Set record append callback</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a></td><td>Set a Btree comparison function</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a></td><td>Set the minimum number of keys per Btree page</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a></td><td>Set a Btree prefix comparison function</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a></td><td>Set the variable-length record delimiter</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a></td><td>Set the fixed-length record length</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a></td><td>Set the fixed-length record pad byte</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a></td><td>Set the backing Recno text file</td></tr>
+<tr><td><b>Hash Configuration</b></td><td><a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a></td><td>Set the Hash table density</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a></td><td>Set a hashing function</td></tr>
+<tr><td><br></td><td><a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a></td><td>Set the Hash table size</td></tr>
+<tr><td><b>Queue Configuration</b></td><td><a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a></td><td>Set Queue database extent size</td></tr>
+<tr><td><b>Database Cursor Operations</b></td><td><a href="../api_c/db_cursor.html">DB-&gt;cursor</a></td><td>Create a cursor handle</td></tr>
+<tr><td><br></td><td><a href="../api_c/dbc_close.html">DBcursor-&gt;c_close</a></td><td>Close a cursor</td></tr>
+<tr><td><br></td><td><a href="../api_c/dbc_count.html">DBcursor-&gt;c_count</a></td><td>Return count of duplicates</td></tr>
+<tr><td><br></td><td><a href="../api_c/dbc_del.html">DBcursor-&gt;c_del</a></td><td>Delete by cursor</td></tr>
+<tr><td><br></td><td><a href="../api_c/dbc_dup.html">DBcursor-&gt;c_dup</a></td><td>Duplicate a cursor</td></tr>
+<tr><td><br></td><td><a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a>, <a href="../api_c/dbc_get.html">DBcursor-&gt;c_pget</a></td><td>Retrieve by cursor</td></tr>
+<tr><td><br></td><td><a href="../api_c/dbc_put.html">DBcursor-&gt;c_put</a></td><td>Store by cursor</td></tr>
+<tr><td><b>Key/Data Pairs</b></td><td><a href="../api_c/dbt_class.html">DBT</a></td><td><br></td></tr>
+<tr><td><b>Bulk Retrieval</b></td><td><a href="../api_c/dbt_bulk.html#DB_MULTIPLE_INIT">DB_MULTIPLE_INIT</a></td><td><br></td></tr>
+<tr><td><b>Lock Subsystem</b></td><td><a href="../api_c/env_set_lk_conflicts.html">DB_ENV-&gt;set_lk_conflicts</a></td><td>Set lock conflicts matrix</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_lk_detect.html">DB_ENV-&gt;set_lk_detect</a></td><td>Set automatic deadlock detection</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_lk_max_lockers.html">DB_ENV-&gt;set_lk_max_lockers</a></td><td>Set maximum number of lockers</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_lk_max_locks.html">DB_ENV-&gt;set_lk_max_locks</a></td><td>Set maximum number of locks</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_lk_max_objects.html">DB_ENV-&gt;set_lk_max_objects</a></td><td>Set maximum number of lock objects</td></tr>
+<tr><td><br></td><td><a href="../api_c/lock_detect.html">DB_ENV-&gt;lock_detect</a></td><td>Perform deadlock detection</td></tr>
+<tr><td><br></td><td><a href="../api_c/lock_get.html">DB_ENV-&gt;lock_get</a></td><td>Acquire a lock</td></tr>
+<tr><td><br></td><td><a href="../api_c/lock_id.html">DB_ENV-&gt;lock_id</a></td><td>Acquire a locker ID</td></tr>
+<tr><td><br></td><td><a href="../api_c/lock_id_free.html">DB_ENV-&gt;lock_id_free</a></td><td>Release a locker ID</td></tr>
+<tr><td><br></td><td><a href="../api_c/lock_put.html">DB_ENV-&gt;lock_put</a></td><td>Release a lock</td></tr>
+<tr><td><br></td><td><a href="../api_c/lock_stat.html">DB_ENV-&gt;lock_stat</a></td><td>Return lock subsystem statistics</td></tr>
+<tr><td><br></td><td><a href="../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a></td><td>Acquire/release locks</td></tr>
+<tr><td><b>Log Subsystem</b></td><td><a href="../api_c/env_set_lg_bsize.html">DB_ENV-&gt;set_lg_bsize</a></td><td>Set log buffer size</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_lg_dir.html">DB_ENV-&gt;set_lg_dir</a></td><td>Set the environment logging directory</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_lg_max.html">DB_ENV-&gt;set_lg_max</a></td><td>Set log file size</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_lg_regionmax.html">DB_ENV-&gt;set_lg_regionmax</a></td><td>Set logging region size</td></tr>
+<tr><td><br></td><td><a href="../api_c/log_archive.html">DB_ENV-&gt;log_archive</a></td><td>List log and database files</td></tr>
+<tr><td><br></td><td><a href="../api_c/log_file.html">DB_ENV-&gt;log_file</a></td><td>Map Log Sequence Numbers to log files</td></tr>
+<tr><td><br></td><td><a href="../api_c/log_flush.html">DB_ENV-&gt;log_flush</a></td><td>Flush log records</td></tr>
+<tr><td><br></td><td><a href="../api_c/log_put.html">DB_ENV-&gt;log_put</a></td><td>Write a log record</td></tr>
+<tr><td><br></td><td><a href="../api_c/log_stat.html">DB_ENV-&gt;log_stat</a></td><td>Return log subsystem statistics</td></tr>
+<tr><td><b>Log Cursor Operations</b></td><td><a href="../api_c/log_cursor.html">DB_ENV-&gt;log_cursor</a></td><td>Create a log cursor handle</td></tr>
+<tr><td><br></td><td><a href="../api_c/logc_close.html">DB_LOGC-&gt;close</a></td><td>Close a log cursor</td></tr>
+<tr><td><br></td><td><a href="../api_c/logc_get.html">DB_LOGC-&gt;get</a></td><td>Retrieve a log record</td></tr>
+<tr><td><b>Log Sequence Numbers</b></td><td><a href="../api_c/lsn_class.html">DB_LSN</a></td><td><br></td></tr>
+<tr><td><br></td><td><a href="../api_c/log_compare.html">log_compare</a></td><td>Compare two Log Sequence Numbers</td></tr>
+<tr><td><b>Memory Pool Subsystem</b></td><td><a href="../api_c/env_set_cachesize.html">DB_ENV-&gt;set_cachesize</a></td><td>Set the environment cache size</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_mp_mmapsize.html">DB_ENV-&gt;set_mp_mmapsize</a></td><td>Set maximum mapped-in database file size</td></tr>
+<tr><td><br></td><td><a href="../api_c/memp_register.html">DB_ENV-&gt;memp_register</a></td><td>Register input/output functions for a file in a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a></td><td>Return memory pool statistics</td></tr>
+<tr><td><br></td><td><a href="../api_c/memp_sync.html">DB_ENV-&gt;memp_sync</a></td><td>Flush pages from a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_c/memp_trickle.html">DB_ENV-&gt;memp_trickle</a></td><td>Trickle flush pages from a memory pool</td></tr>
+<tr><td><b>Memory Pool Files</b></td><td><a href="../api_c/memp_fcreate.html">DB_ENV-&gt;memp_fcreate</a></td><td>Open a file in a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_c/memp_fclose.html">DB_MPOOLFILE-&gt;close</a></td><td>Close a file in a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_c/memp_fget.html">DB_MPOOLFILE-&gt;get</a></td><td>Get page from a file in a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a></td><td>Open a file in a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_c/memp_fput.html">DB_MPOOLFILE-&gt;put</a></td><td>Return a page to a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_c/memp_fset.html">DB_MPOOLFILE-&gt;set</a></td><td>Set memory pool page status</td></tr>
+<tr><td><br></td><td><a href="../api_c/memp_fsync.html">DB_MPOOLFILE-&gt;sync</a></td><td>Flush pages from a file in a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_c/memp_set_clear_len.html">DB_MPOOLFILE-&gt;set_clear_len</a></td><td>Set file page bytes to be cleared</td></tr>
+<tr><td><br></td><td><a href="../api_c/memp_set_fileid.html">DB_MPOOLFILE-&gt;set_fileid</a></td><td>Set file unique identifier</td></tr>
+<tr><td><br></td><td><a href="../api_c/memp_set_ftype.html">DB_MPOOLFILE-&gt;set_ftype</a></td><td>Set file type</td></tr>
+<tr><td><br></td><td><a href="../api_c/memp_set_lsn_offset.html">DB_MPOOLFILE-&gt;set_lsn_offset</a></td><td>Set file log-sequence-number offset</td></tr>
+<tr><td><br></td><td><a href="../api_c/memp_set_pgcookie.html">DB_MPOOLFILE-&gt;set_pgcookie</a></td><td>Set file cookie for pgin/pgout</td></tr>
+<tr><td><b>Transaction Subsystem</b></td><td><a href="../api_c/env_set_tx_max.html">DB_ENV-&gt;set_tx_max</a></td><td>Set maximum number of transactions</td></tr>
+<tr><td><br></td><td><a href="../api_c/env_set_tx_timestamp.html">DB_ENV-&gt;set_tx_timestamp</a></td><td>Set recovery timestamp</td></tr>
+<tr><td><br></td><td><a href="../api_c/txn_checkpoint.html">DB_ENV-&gt;txn_checkpoint</a></td><td>Checkpoint the transaction subsystem</td></tr>
+<tr><td><br></td><td><a href="../api_c/txn_recover.html">DB_ENV-&gt;txn_recover</a></td><td>Distributed transaction recovery</td></tr>
+<tr><td><br></td><td><a href="../api_c/txn_stat.html">DB_ENV-&gt;txn_stat</a></td><td>Return transaction subsystem statistics</td></tr>
+<tr><td><b>Transactions</b></td><td><a href="../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a></td><td>Begin a transaction</td></tr>
+<tr><td><br></td><td><a href="../api_c/txn_abort.html">DB_TXN-&gt;abort</a></td><td>Abort a transaction</td></tr>
+<tr><td><br></td><td><a href="../api_c/txn_commit.html">DB_TXN-&gt;commit</a></td><td>Commit a transaction</td></tr>
+<tr><td><br></td><td><a href="../api_c/txn_discard.html">DB_TXN-&gt;discard</a></td><td>Discard a prepared but not resolved transaction handle</td></tr>
+<tr><td><br></td><td><a href="../api_c/txn_id.html">DB_TXN-&gt;id</a></td><td>Return a transaction's ID</td></tr>
+<tr><td><br></td><td><a href="../api_c/txn_prepare.html">DB_TXN-&gt;prepare</a></td><td>Prepare a transaction for commit</td></tr>
+<tr><td><br></td><td><a href="../api_c/txn_set_timeout.html">DB_TXN-&gt;set_timeout</a></td><td>Set transaction timeout</td></tr>
+<tr><td><b>Replication</b></td><td><a href="../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a></td><td>Configure replication transport</td></tr>
+<tr><td><br></td><td><a href="../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a></td><td>Hold a replication election</td></tr>
+<tr><td><br></td><td><a href="../api_c/rep_limit.html">DB_ENV-&gt;set_rep_limit</a></td><td>Limit data sent in response to a single message</td></tr>
+<tr><td><br></td><td><a href="../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a></td><td>Process a replication message</td></tr>
+<tr><td><br></td><td><a href="../api_c/rep_start.html">DB_ENV-&gt;rep_start</a></td><td>Configure an environment for replication</td></tr>
+<tr><td><br></td><td><a href="../api_c/rep_stat.html">DB_ENV-&gt;rep_stat</a></td><td>Replication statistics</td></tr>
+<tr><td><b>Historic Interfaces</b></td><td><a href="../api_c/dbm.html">dbm</a></td><td>UNIX Dbm/Ndbm Interfaces</td></tr>
+<tr><td><br></td><td><a href="../api_c/hsearch.html">hsearch</a></td><td>UNIX Hsearch Interfaces</td></tr>
+<tr><td><b>System Configuration</b></td><td><a href="../ref/program/runtime.html">Run-time configuration</a></td><td>Replace underlying Berkeley DB system interfaces</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/c_pindex.html b/libdb/docs/api_c/c_pindex.html
new file mode 100644
index 0000000..1baa229
--- /dev/null
+++ b/libdb/docs/api_c/c_pindex.html
@@ -0,0 +1,727 @@
+<html>
+<head>
+<title>Berkeley DB: C Interface Index</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>C Interface Index</h1>
+<center>
+<table cellspacing=0 cellpadding=0>
+<tr><td align=right>configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#4">1.85</a> API compatibility</td></tr>
+<tr><td align=right>building a utility to dump Berkeley DB </td><td><a href="../ref/build_unix/conf.html#6">1.85</a> databases</td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.2.0/intro.html#2">2.0</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.3.0/intro.html#2">3.0</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.3.1/intro.html#2">3.1</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.3.2/intro.html#2">3.2</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.3.3/intro.html#2">3.3</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.4.0/intro.html#2">4.0</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.4.1/intro.html#2">4.1</a></td></tr>
+<tr><td align=right>selecting an </td><td><a href="../ref/am_conf/select.html#2">access</a> method</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/faq.html#2">access</a> method FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/tune.html#2">access</a> method tuning</td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/am_conf/intro.html#2">access</a> methods</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/aix.html#2">AIX</a></td></tr>
+<tr><td align=right>data </td><td><a href="../ref/am_misc/align.html#2">alignment</a></td></tr>
+<tr><td align=right>programmatic </td><td><a href="../ref/arch/apis.html#2">APIs</a></td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_archive.html#3">archive</a> log files</td></tr>
+<tr><td align=right>hot </td><td><a href="../ref/transapp/archival.html#4">backup</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/berkeley_db_svc.html#2">berkeley_db_svc</a></td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/mp/intro.html#4">buffer</a> pool subsystem</td></tr>
+<tr><td align=right>turn off system </td><td><a href="../api_c/memp_fopen.html#3">buffering</a></td></tr>
+<tr><td align=right>turn off system </td><td><a href="../api_c/env_set_flags.html#4">buffering</a> for database files</td></tr>
+<tr><td align=right>turn off system </td><td><a href="../api_c/env_set_flags.html#5">buffering</a> for log files</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/intro.html#3">building</a> for QNX</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/intro.html#2">building</a> for UNIX</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/notes.html#2">building</a> for UNIX FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_vxworks/intro.html#2">building</a> for VxWorks</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_vxworks/introae.html#2">building</a> for VxWorks AE</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_vxworks/faq.html#2">building</a> for VxWorks FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_win/intro.html#2">building</a> for Win32</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_win/faq.html#2">building</a> for Windows FAQ</td></tr>
+<tr><td align=right></td><td><a href="../api_c/dbt_bulk.html#3">bulk</a> retrieval</td></tr>
+<tr><td align=right>selecting a </td><td><a href="../ref/am_conf/byteorder.html#2">byte</a> order</td></tr>
+<tr><td align=right>configuring the </td><td><a href="../ref/build_unix/conf.html#5">C++</a> API</td></tr>
+<tr><td align=right>flushing the database </td><td><a href="../ref/am/sync.html#2">cache</a></td></tr>
+<tr><td align=right>selecting a </td><td><a href="../ref/am_conf/cachesize.html#2">cache</a> size</td></tr>
+<tr><td align=right>introduction to the memory </td><td><a href="../ref/mp/intro.html#3">cache</a> subsystem</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/archival.html#3">catastrophic</a> recovery</td></tr>
+<tr><td align=right>Patches, Updates and </td><td><a href="http://www.sleepycat.com/update/index.html">Change</a> logs</td></tr>
+<tr><td align=right>utility to take </td><td><a href="../utility/db_checkpoint.html#3">checkpoints</a></td></tr>
+<tr><td align=right>database page </td><td><a href="../api_c/db_set_flags.html#3">checksum</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/curclose.html#2">closing</a> a cursor</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/close.html#2">closing</a> a database</td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am_misc/faq.html#3">compaction</a></td></tr>
+<tr><td align=right>specifying a Btree </td><td><a href="../ref/am_conf/bt_compare.html#2">comparison</a> function</td></tr>
+<tr><td align=right>changing </td><td><a href="../ref/build_unix/flags.html#2">compile</a> or load options</td></tr>
+<tr><td align=right></td><td><a href="../ref/cam/intro.html#2">Concurrent</a> Data Store</td></tr>
+<tr><td align=right>database environment </td><td><a href="../ref/env/db_config.html#3">configuration</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/conf.html#2">configuring</a> Berkeley DB for UNIX systems</td></tr>
+<tr><td align=right>salvaging </td><td><a href="../ref/am/verify.html#4">corrupted</a> databases</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/count.html#2">counting</a> data items for a key</td></tr>
+<tr><td align=right>closing a </td><td><a href="../ref/am/curclose.html#3">cursor</a></td></tr>
+<tr><td align=right>deleting records with a </td><td><a href="../ref/am/curdel.html#3">cursor</a></td></tr>
+<tr><td align=right>duplicating a </td><td><a href="../ref/am/curdup.html#3">cursor</a></td></tr>
+<tr><td align=right>retrieving records with a </td><td><a href="../ref/am/curget.html#3">cursor</a></td></tr>
+<tr><td align=right>storing records with a </td><td><a href="../ref/am/curput.html#3">cursor</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/stability.html#2">cursor</a> stability</td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am/cursor.html#2">cursors</a></td></tr>
+<tr><td align=right>DBT </td><td><a href="../api_c/dbt_class.html#data">data</a></td></tr>
+<tr><td align=right>utility to upgrade </td><td><a href="../utility/db_upgrade.html#3">database</a> files</td></tr>
+<tr><td align=right>utility to verify </td><td><a href="../utility/db_verify.html#3">database</a> files</td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_class.html#2">DB</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/env/region.html#2">__db.001</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_put </td><td><a href="../api_c/dbc_put.html#DB_AFTER">DB_AFTER</a></td></tr>
+<tr><td align=right>DB-&gt;verify </td><td><a href="../api_c/db_verify.html#DB_AGGRESSIVE">DB_AGGRESSIVE</a></td></tr>
+<tr><td align=right>DB-&gt;put </td><td><a href="../api_c/db_put.html#DB_APPEND">DB_APPEND</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;log_archive </td><td><a href="../api_c/log_archive.html#DB_ARCH_ABS">DB_ARCH_ABS</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;log_archive </td><td><a href="../api_c/log_archive.html#DB_ARCH_DATA">DB_ARCH_DATA</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_archive.html#2">db_archive</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;log_archive </td><td><a href="../api_c/log_archive.html#DB_ARCH_LOG">DB_ARCH_LOG</a></td></tr>
+<tr><td align=right>DB-&gt;associate </td><td><a href="../api_c/db_associate.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>DB-&gt;del </td><td><a href="../api_c/db_del.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>DB-&gt;open </td><td><a href="../api_c/db_open.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>DB-&gt;put </td><td><a href="../api_c/db_put.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>DB-&gt;truncate </td><td><a href="../api_c/db_truncate.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;dbremove </td><td><a href="../api_c/env_dbremove.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;dbrename </td><td><a href="../api_c/env_dbrename.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_flags </td><td><a href="../api_c/env_set_flags.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_put </td><td><a href="../api_c/dbc_put.html#DB_BEFORE">DB_BEFORE</a></td></tr>
+<tr><td align=right>DB-&gt;open </td><td><a href="../api_c/db_open.html#DB_BTREE">DB_BTREE</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/dbc_class.html#2">DBC</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_flags </td><td><a href="../api_c/env_set_flags.html#DB_CDB_ALLDB">DB_CDB_ALLDB</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_checkpoint.html#2">db_checkpoint</a></td></tr>
+<tr><td align=right>DB-&gt;set_flags </td><td><a href="../api_c/db_set_flags.html#DB_CHKSUM_SHA1">DB_CHKSUM_SHA1</a></td></tr>
+<tr><td align=right>db_env_create </td><td><a href="../api_c/env_create.html#DB_CLIENT">DB_CLIENT</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/env/db_config.html#2">DB_CONFIG</a></td></tr>
+<tr><td align=right>DB-&gt;get </td><td><a href="../api_c/db_get.html#DB_CONSUME">DB_CONSUME</a></td></tr>
+<tr><td align=right>DB-&gt;get </td><td><a href="../api_c/db_get.html#DB_CONSUME_WAIT">DB_CONSUME_WAIT</a></td></tr>
+<tr><td align=right>DB-&gt;associate </td><td><a href="../api_c/db_associate.html#DB_CREATE">DB_CREATE</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_create.html#2">db_create</a></td></tr>
+<tr><td align=right>DB-&gt;open </td><td><a href="../api_c/db_open.html#DB_CREATE">DB_CREATE</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;open </td><td><a href="../api_c/env_open.html#DB_CREATE">DB_CREATE</a></td></tr>
+<tr><td align=right>DB_MPOOLFILE-&gt;open </td><td><a href="../api_c/memp_fopen.html#DB_CREATE">DB_CREATE</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get </td><td><a href="../api_c/dbc_get.html#DB_CURRENT">DB_CURRENT</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_put </td><td><a href="../api_c/dbc_put.html#DB_CURRENT">DB_CURRENT</a></td></tr>
+<tr><td align=right>DB_LOGC-&gt;get </td><td><a href="../api_c/logc_get.html#DB_CURRENT">DB_CURRENT</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/dbc_close.html#2">DBcursor-&gt;c_close</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/dbc_count.html#2">DBcursor-&gt;c_count</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/dbc_del.html#2">DBcursor-&gt;c_del</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/dbc_dup.html#2">DBcursor-&gt;c_dup</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/dbc_get.html#2">DBcursor-&gt;c_get</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/dbc_put.html#2">DBcursor-&gt;c_put</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_associate.html#3">DB_DBT_APPMALLOC</a></td></tr>
+<tr><td align=right>DBT </td><td><a href="../api_c/dbt_class.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a></td></tr>
+<tr><td align=right>DBT </td><td><a href="../api_c/dbt_class.html#DB_DBT_PARTIAL">DB_DBT_PARTIAL</a></td></tr>
+<tr><td align=right>DBT </td><td><a href="../api_c/dbt_class.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a></td></tr>
+<tr><td align=right>DBT </td><td><a href="../api_c/dbt_class.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_deadlock.html#2">db_deadlock</a></td></tr>
+<tr><td align=right>DB_MPOOLFILE-&gt;open </td><td><a href="../api_c/memp_fopen.html#DB_DIRECT">DB_DIRECT</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_flags </td><td><a href="../api_c/env_set_flags.html#DB_DIRECT_DB">DB_DIRECT_DB</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_flags </td><td><a href="../api_c/env_set_flags.html#DB_DIRECT_LOG">DB_DIRECT_LOG</a></td></tr>
+<tr><td align=right>DB-&gt;cursor </td><td><a href="../api_c/db_cursor.html#DB_DIRTY_READ">DB_DIRTY_READ</a></td></tr>
+<tr><td align=right>DB-&gt;get </td><td><a href="../api_c/db_get.html#DB_DIRTY_READ">DB_DIRTY_READ</a></td></tr>
+<tr><td align=right>DB-&gt;join </td><td><a href="../api_c/db_join.html#DB_DIRTY_READ">DB_DIRTY_READ</a></td></tr>
+<tr><td align=right>DB-&gt;open </td><td><a href="../api_c/db_open.html#DB_DIRTY_READ">DB_DIRTY_READ</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get </td><td><a href="../api_c/dbc_get.html#DB_DIRTY_READ">DB_DIRTY_READ</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;txn_begin </td><td><a href="../api_c/txn_begin.html#DB_DIRTY_READ">DB_DIRTY_READ</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_associate.html#4">DB_DONOTINDEX</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_dump.html#2">db_dump</a></td></tr>
+<tr><td align=right>DB-&gt;set_flags </td><td><a href="../api_c/db_set_flags.html#DB_DUP">DB_DUP</a></td></tr>
+<tr><td align=right>DB-&gt;set_flags </td><td><a href="../api_c/db_set_flags.html#DB_DUPSORT">DB_DUPSORT</a></td></tr>
+<tr><td align=right>DB-&gt;upgrade </td><td><a href="../api_c/db_upgrade.html#DB_DUPSORT">DB_DUPSORT</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/rep_transport.html#3">DB_EID_BROADCAST</a></td></tr>
+<tr><td align=right>DB-&gt;set_flags </td><td><a href="../api_c/db_set_flags.html#DB_ENCRYPT">DB_ENCRYPT</a></td></tr>
+<tr><td align=right>DB-&gt;set_encrypt </td><td><a href="../api_c/db_set_encrypt.html#DB_ENCRYPT_AES">DB_ENCRYPT_AES</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_encrypt </td><td><a href="../api_c/env_set_encrypt.html#DB_ENCRYPT_AES">DB_ENCRYPT_AES</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_class.html#2">DB_ENV</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_create.html#2">db_env_create</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_close.html#2">DB_ENV-&gt;close</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_dbremove.html#2">DB_ENV-&gt;dbremove</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_dbrename.html#2">DB_ENV-&gt;dbrename</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_err.html#2">DB_ENV-&gt;err</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/lock_detect.html#2">DB_ENV-&gt;lock_detect</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/lock_get.html#2">DB_ENV-&gt;lock_get</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/lock_id.html#2">DB_ENV-&gt;lock_id</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/lock_id_free.html#2">DB_ENV-&gt;lock_id_free</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/lock_put.html#2">DB_ENV-&gt;lock_put</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/lock_stat.html#2">DB_ENV-&gt;lock_stat</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/lock_vec.html#2">DB_ENV-&gt;lock_vec</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/log_archive.html#2">DB_ENV-&gt;log_archive</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/log_cursor.html#2">DB_ENV-&gt;log_cursor</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/log_file.html#2">DB_ENV-&gt;log_file</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/log_flush.html#2">DB_ENV-&gt;log_flush</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/log_put.html#2">DB_ENV-&gt;log_put</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/log_stat.html#2">DB_ENV-&gt;log_stat</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/memp_fcreate.html#2">DB_ENV-&gt;memp_fcreate</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/memp_register.html#2">DB_ENV-&gt;memp_register</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/memp_stat.html#2">DB_ENV-&gt;memp_stat</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/memp_sync.html#2">DB_ENV-&gt;memp_sync</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/memp_trickle.html#2">DB_ENV-&gt;memp_trickle</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_open.html#2">DB_ENV-&gt;open</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_remove.html#2">DB_ENV-&gt;remove</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/rep_elect.html#2">DB_ENV-&gt;rep_elect</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/rep_message.html#2">DB_ENV-&gt;rep_process_message</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/rep_start.html#2">DB_ENV-&gt;rep_start</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/rep_stat.html#2">DB_ENV-&gt;rep_stat</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_alloc.html#2">DB_ENV-&gt;set_alloc</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_app_dispatch.html#2">DB_ENV-&gt;set_app_dispatch</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_cachesize.html#2">DB_ENV-&gt;set_cachesize</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_data_dir.html#2">DB_ENV-&gt;set_data_dir</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_encrypt.html#2">DB_ENV-&gt;set_encrypt</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_errcall.html#2">DB_ENV-&gt;set_errcall</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_errfile.html#2">DB_ENV-&gt;set_errfile</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_errpfx.html#2">DB_ENV-&gt;set_errpfx</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_feedback.html#2">DB_ENV-&gt;set_feedback</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_flags.html#2">DB_ENV-&gt;set_flags</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_lg_bsize.html#2">DB_ENV-&gt;set_lg_bsize</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_lg_dir.html#2">DB_ENV-&gt;set_lg_dir</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_lg_max.html#2">DB_ENV-&gt;set_lg_max</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_lg_regionmax.html#2">DB_ENV-&gt;set_lg_regionmax</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_lk_conflicts.html#2">DB_ENV-&gt;set_lk_conflicts</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_lk_detect.html#2">DB_ENV-&gt;set_lk_detect</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_lk_max_lockers.html#2">DB_ENV-&gt;set_lk_max_lockers</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_lk_max_locks.html#2">DB_ENV-&gt;set_lk_max_locks</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_lk_max_objects.html#2">DB_ENV-&gt;set_lk_max_objects</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_mp_mmapsize.html#2">DB_ENV-&gt;set_mp_mmapsize</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_paniccall.html#2">DB_ENV-&gt;set_paniccall</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/rep_limit.html#2">DB_ENV-&gt;set_rep_limit</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/rep_transport.html#2">DB_ENV-&gt;set_rep_transport</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_rpc_server.html#2">DB_ENV-&gt;set_rpc_server</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_shm_key.html#2">DB_ENV-&gt;set_shm_key</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_tas_spins.html#2">DB_ENV-&gt;set_tas_spins</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_timeout.html#2">DB_ENV-&gt;set_timeout</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_tmp_dir.html#2">DB_ENV-&gt;set_tmp_dir</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_tx_max.html#2">DB_ENV-&gt;set_tx_max</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_tx_timestamp.html#2">DB_ENV-&gt;set_tx_timestamp</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_verbose.html#2">DB_ENV-&gt;set_verbose</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/txn_begin.html#2">DB_ENV-&gt;txn_begin</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/txn_checkpoint.html#2">DB_ENV-&gt;txn_checkpoint</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/txn_recover.html#2">DB_ENV-&gt;txn_recover</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/txn_stat.html#2">DB_ENV-&gt;txn_stat</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/set_func_close.html#2">db_env_set_func_close</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/set_func_dirfree.html#2">db_env_set_func_dirfree</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/set_func_dirlist.html#2">db_env_set_func_dirlist</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/set_func_exists.html#2">db_env_set_func_exists</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/set_func_free.html#2">db_env_set_func_free</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/set_func_fsync.html#2">db_env_set_func_fsync</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/set_func_ioinfo.html#2">db_env_set_func_ioinfo</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/set_func_malloc.html#2">db_env_set_func_malloc</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/set_func_map.html#2">db_env_set_func_map</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/set_func_open.html#2">db_env_set_func_open</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/set_func_read.html#2">db_env_set_func_read</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/set_func_realloc.html#2">db_env_set_func_realloc</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/set_func_rename.html#2">db_env_set_func_rename</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/set_func_seek.html#2">db_env_set_func_seek</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/set_func_sleep.html#2">db_env_set_func_sleep</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/set_func_unlink.html#2">db_env_set_func_unlink</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/set_func_unmap.html#2">db_env_set_func_unmap</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/set_func_write.html#2">db_env_set_func_write</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/set_func_yield.html#2">db_env_set_func_yield</a></td></tr>
+<tr><td align=right>DB-&gt;open </td><td><a href="../api_c/db_open.html#DB_EXCL">DB_EXCL</a></td></tr>
+<tr><td align=right>DB-&gt;stat </td><td><a href="../api_c/db_stat.html#DB_FAST_STAT">DB_FAST_STAT</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get </td><td><a href="../api_c/dbc_get.html#DB_FIRST">DB_FIRST</a></td></tr>
+<tr><td align=right>DB_LOGC-&gt;get </td><td><a href="../api_c/logc_get.html#DB_FIRST">DB_FIRST</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;txn_recover </td><td><a href="../api_c/txn_recover.html#DB_FIRST">DB_FIRST</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;log_put </td><td><a href="../api_c/log_put.html#DB_FLUSH">DB_FLUSH</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;remove </td><td><a href="../api_c/env_remove.html#DB_FORCE">DB_FORCE</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;txn_checkpoint </td><td><a href="../api_c/txn_checkpoint.html#DB_FORCE">DB_FORCE</a></td></tr>
+<tr><td align=right>DB-&gt;get </td><td><a href="../api_c/db_get.html#DB_GET_BOTH">DB_GET_BOTH</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get </td><td><a href="../api_c/dbc_get.html#DB_GET_BOTH">DB_GET_BOTH</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get </td><td><a href="../api_c/dbc_get.html#DB_GET_BOTH_RANGE">DB_GET_BOTH_RANGE</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get </td><td><a href="../api_c/dbc_get.html#DB_GET_RECNO">DB_GET_RECNO</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_associate.html#2">DB-&gt;associate</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_close.html#2">DB-&gt;close</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_cursor.html#2">DB-&gt;cursor</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_del.html#2">DB-&gt;del</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_err.html#2">DB-&gt;err</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_fd.html#2">DB-&gt;fd</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_get.html#2">DB-&gt;get</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_get_byteswapped.html#2">DB-&gt;get_byteswapped</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_get_type.html#2">DB-&gt;get_type</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_join.html#2">DB-&gt;join</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_key_range.html#2">DB-&gt;key_range</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_open.html#2">DB-&gt;open</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_put.html#2">DB-&gt;put</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_remove.html#2">DB-&gt;remove</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_rename.html#2">DB-&gt;rename</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_alloc.html#2">DB-&gt;set_alloc</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_append_recno.html#2">DB-&gt;set_append_recno</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_bt_compare.html#2">DB-&gt;set_bt_compare</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_bt_minkey.html#2">DB-&gt;set_bt_minkey</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_bt_prefix.html#2">DB-&gt;set_bt_prefix</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_cache_priority.html#2">DB-&gt;set_cache_priority</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_cachesize.html#2">DB-&gt;set_cachesize</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_dup_compare.html#2">DB-&gt;set_dup_compare</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_encrypt.html#2">DB-&gt;set_encrypt</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_errcall.html#2">DB-&gt;set_errcall</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_errfile.html#2">DB-&gt;set_errfile</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_errpfx.html#2">DB-&gt;set_errpfx</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_feedback.html#2">DB-&gt;set_feedback</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_flags.html#2">DB-&gt;set_flags</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_h_ffactor.html#2">DB-&gt;set_h_ffactor</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_h_hash.html#2">DB-&gt;set_h_hash</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_h_nelem.html#2">DB-&gt;set_h_nelem</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_lorder.html#2">DB-&gt;set_lorder</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_pagesize.html#2">DB-&gt;set_pagesize</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_paniccall.html#2">DB-&gt;set_paniccall</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_q_extentsize.html#2">DB-&gt;set_q_extentsize</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_re_delim.html#2">DB-&gt;set_re_delim</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_re_len.html#2">DB-&gt;set_re_len</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_re_pad.html#2">DB-&gt;set_re_pad</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_re_source.html#2">DB-&gt;set_re_source</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_stat.html#2">DB-&gt;stat</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_sync.html#2">DB-&gt;sync</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_truncate.html#2">DB-&gt;truncate</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_upgrade.html#2">DB-&gt;upgrade</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_verify.html#2">DB-&gt;verify</a></td></tr>
+<tr><td align=right>DB-&gt;open </td><td><a href="../api_c/db_open.html#DB_HASH">DB_HASH</a></td></tr>
+<tr><td align=right>File naming </td><td><a href="../ref/env/naming.html#DB_HOME">DB_HOME</a></td></tr>
+<tr><td align=right>File naming </td><td><a href="../ref/env/naming.html#db_home">db_home</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;open </td><td><a href="../api_c/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;open </td><td><a href="../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;open </td><td><a href="../api_c/env_open.html#DB_INIT_LOG">DB_INIT_LOG</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;open </td><td><a href="../api_c/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;open </td><td><a href="../api_c/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;open </td><td><a href="../api_c/env_open.html#DB_JOINENV">DB_JOINENV</a></td></tr>
+<tr><td align=right>DB-&gt;join </td><td><a href="../api_c/db_join.html#DB_JOIN_ITEM">DB_JOIN_ITEM</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get </td><td><a href="../api_c/dbc_get.html#DB_JOIN_ITEM">DB_JOIN_ITEM</a></td></tr>
+<tr><td align=right>DB-&gt;join </td><td><a href="../api_c/db_join.html#DB_JOIN_NOSORT">DB_JOIN_NOSORT</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_KEYEXIST">DB_KEYEXIST</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_put </td><td><a href="../api_c/dbc_put.html#DB_KEYFIRST">DB_KEYFIRST</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_put </td><td><a href="../api_c/dbc_put.html#DB_KEYLAST">DB_KEYLAST</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get </td><td><a href="../api_c/dbc_get.html#DB_LAST">DB_LAST</a></td></tr>
+<tr><td align=right>DB_LOGC-&gt;get </td><td><a href="../api_c/logc_get.html#DB_LAST">DB_LAST</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_load.html#2">db_load</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/lock_class.html#2">DB_LOCK</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/program/errorret.html#4">DB_LOCK_DEADLOCK</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_lk_detect </td><td><a href="../api_c/env_set_lk_detect.html#DB_LOCK_DEFAULT">DB_LOCK_DEFAULT</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_detect </td><td><a href="../api_c/lock_detect.html#DB_LOCK_DEFAULT">DB_LOCK_DEFAULT</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;open </td><td><a href="../api_c/env_open.html#DB_LOCKDOWN">DB_LOCKDOWN</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_lk_detect </td><td><a href="../api_c/env_set_lk_detect.html#DB_LOCK_EXPIRE">DB_LOCK_EXPIRE</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_detect </td><td><a href="../api_c/lock_detect.html#DB_LOCK_EXPIRE">DB_LOCK_EXPIRE</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_vec </td><td><a href="../api_c/lock_vec.html#DB_LOCK_GET">DB_LOCK_GET</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_vec </td><td><a href="../api_c/lock_vec.html#DB_LOCK_GET_TIMEOUT">DB_LOCK_GET_TIMEOUT</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_vec </td><td><a href="../api_c/lock_vec.html#DB_LOCK_IREAD">DB_LOCK_IREAD</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_vec </td><td><a href="../api_c/lock_vec.html#DB_LOCK_IWR">DB_LOCK_IWR</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_vec </td><td><a href="../api_c/lock_vec.html#DB_LOCK_IWRITE">DB_LOCK_IWRITE</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_lk_detect </td><td><a href="../api_c/env_set_lk_detect.html#DB_LOCK_MAXLOCKS">DB_LOCK_MAXLOCKS</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_detect </td><td><a href="../api_c/lock_detect.html#DB_LOCK_MAXLOCKS">DB_LOCK_MAXLOCKS</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_lk_detect </td><td><a href="../api_c/env_set_lk_detect.html#DB_LOCK_MINLOCKS">DB_LOCK_MINLOCKS</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_detect </td><td><a href="../api_c/lock_detect.html#DB_LOCK_MINLOCKS">DB_LOCK_MINLOCKS</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_lk_detect </td><td><a href="../api_c/env_set_lk_detect.html#DB_LOCK_MINWRITE">DB_LOCK_MINWRITE</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_detect </td><td><a href="../api_c/lock_detect.html#DB_LOCK_MINWRITE">DB_LOCK_MINWRITE</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_LOCK_NOTGRANTED">DB_LOCK_NOTGRANTED</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_get </td><td><a href="../api_c/lock_get.html#DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_vec </td><td><a href="../api_c/lock_vec.html#DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_lk_detect </td><td><a href="../api_c/env_set_lk_detect.html#DB_LOCK_OLDEST">DB_LOCK_OLDEST</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_detect </td><td><a href="../api_c/lock_detect.html#DB_LOCK_OLDEST">DB_LOCK_OLDEST</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_vec </td><td><a href="../api_c/lock_vec.html#DB_LOCK_PUT">DB_LOCK_PUT</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_vec </td><td><a href="../api_c/lock_vec.html#DB_LOCK_PUT_ALL">DB_LOCK_PUT_ALL</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_vec </td><td><a href="../api_c/lock_vec.html#DB_LOCK_PUT_OBJ">DB_LOCK_PUT_OBJ</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_lk_detect </td><td><a href="../api_c/env_set_lk_detect.html#DB_LOCK_RANDOM">DB_LOCK_RANDOM</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_detect </td><td><a href="../api_c/lock_detect.html#DB_LOCK_RANDOM">DB_LOCK_RANDOM</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_vec </td><td><a href="../api_c/lock_vec.html#DB_LOCK_READ">DB_LOCK_READ</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_vec </td><td><a href="../api_c/lock_vec.html#DB_LOCK_TIMEOUT">DB_LOCK_TIMEOUT</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_vec </td><td><a href="../api_c/lock_vec.html#DB_LOCK_WRITE">DB_LOCK_WRITE</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_lk_detect </td><td><a href="../api_c/env_set_lk_detect.html#DB_LOCK_YOUNGEST">DB_LOCK_YOUNGEST</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_detect </td><td><a href="../api_c/lock_detect.html#DB_LOCK_YOUNGEST">DB_LOCK_YOUNGEST</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/logc_class.html#2">DB_LOGC</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/logc_close.html#2">DB_LOGC-&gt;close</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/logc_get.html#2">DB_LOGC-&gt;get</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/lsn_class.html#2">DB_LSN</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/dbm.html#2">dbm/ndbm</a></td></tr>
+<tr><td align=right>DB_MPOOLFILE-&gt;put </td><td><a href="../api_c/memp_fput.html#DB_MPOOL_CLEAN">DB_MPOOL_CLEAN</a></td></tr>
+<tr><td align=right>DB_MPOOLFILE-&gt;set </td><td><a href="../api_c/memp_fset.html#DB_MPOOL_CLEAN">DB_MPOOL_CLEAN</a></td></tr>
+<tr><td align=right>DB_MPOOLFILE-&gt;get </td><td><a href="../api_c/memp_fget.html#DB_MPOOL_CREATE">DB_MPOOL_CREATE</a></td></tr>
+<tr><td align=right>DB_MPOOLFILE-&gt;put </td><td><a href="../api_c/memp_fput.html#DB_MPOOL_DIRTY">DB_MPOOL_DIRTY</a></td></tr>
+<tr><td align=right>DB_MPOOLFILE-&gt;set </td><td><a href="../api_c/memp_fset.html#DB_MPOOL_DIRTY">DB_MPOOL_DIRTY</a></td></tr>
+<tr><td align=right>DB_MPOOLFILE-&gt;put </td><td><a href="../api_c/memp_fput.html#DB_MPOOL_DISCARD">DB_MPOOL_DISCARD</a></td></tr>
+<tr><td align=right>DB_MPOOLFILE-&gt;set </td><td><a href="../api_c/memp_fset.html#DB_MPOOL_DISCARD">DB_MPOOL_DISCARD</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/mempfile_class.html#2">DB_MPOOLFILE</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/memp_fclose.html#2">DB_MPOOLFILE-&gt;close</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/memp_fget.html#2">DB_MPOOLFILE-&gt;get</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/memp_fopen.html#2">DB_MPOOLFILE-&gt;open</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/memp_fput.html#2">DB_MPOOLFILE-&gt;put</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/memp_fset.html#2">DB_MPOOLFILE-&gt;set</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/memp_set_clear_len.html#2">DB_MPOOLFILE-&gt;set_clear_len</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/memp_set_fileid.html#2">DB_MPOOLFILE-&gt;set_fileid</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/memp_set_ftype.html#2">DB_MPOOLFILE-&gt;set_ftype</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/memp_set_lsn_offset.html#2">DB_MPOOLFILE-&gt;set_lsn_offset</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/memp_set_pgcookie.html#2">DB_MPOOLFILE-&gt;set_pgcookie</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/memp_fsync.html#2">DB_MPOOLFILE-&gt;sync</a></td></tr>
+<tr><td align=right>DB_MPOOLFILE-&gt;get </td><td><a href="../api_c/memp_fget.html#DB_MPOOL_LAST">DB_MPOOL_LAST</a></td></tr>
+<tr><td align=right>DB_MPOOLFILE-&gt;get </td><td><a href="../api_c/memp_fget.html#DB_MPOOL_NEW">DB_MPOOL_NEW</a></td></tr>
+<tr><td align=right>DB-&gt;get </td><td><a href="../api_c/db_get.html#DB_MULTIPLE">DB_MULTIPLE</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get </td><td><a href="../api_c/dbc_get.html#DB_MULTIPLE">DB_MULTIPLE</a></td></tr>
+<tr><td align=right>DBT </td><td><a href="../api_c/dbt_bulk.html#DB_MULTIPLE_INIT">DB_MULTIPLE_INIT</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get </td><td><a href="../api_c/dbc_get.html#DB_MULTIPLE_KEY">DB_MULTIPLE_KEY</a></td></tr>
+<tr><td align=right>DBT </td><td><a href="../api_c/dbt_bulk.html#DB_MULTIPLE_KEY_NEXT">DB_MULTIPLE_KEY_NEXT</a></td></tr>
+<tr><td align=right>DBT </td><td><a href="../api_c/dbt_bulk.html#DB_MULTIPLE_NEXT">DB_MULTIPLE_NEXT</a></td></tr>
+<tr><td align=right>DBT </td><td><a href="../api_c/dbt_bulk.html#DB_MULTIPLE_RECNO_NEXT">DB_MULTIPLE_RECNO_NEXT</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get </td><td><a href="../api_c/dbc_get.html#DB_NEXT">DB_NEXT</a></td></tr>
+<tr><td align=right>DB_LOGC-&gt;get </td><td><a href="../api_c/logc_get.html#DB_NEXT">DB_NEXT</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;txn_recover </td><td><a href="../api_c/txn_recover.html#DB_NEXT">DB_NEXT</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get </td><td><a href="../api_c/dbc_get.html#DB_NEXT_DUP">DB_NEXT_DUP</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get </td><td><a href="../api_c/dbc_get.html#DB_NEXT_NODUP">DB_NEXT_NODUP</a></td></tr>
+<tr><td align=right>DB-&gt;put </td><td><a href="../api_c/db_put.html#DB_NODUPDATA">DB_NODUPDATA</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_put </td><td><a href="../api_c/dbc_put.html#DB_NODUPDATA">DB_NODUPDATA</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_flags </td><td><a href="../api_c/env_set_flags.html#DB_NOLOCKING">DB_NOLOCKING</a></td></tr>
+<tr><td align=right>DB-&gt;open </td><td><a href="../api_c/db_open.html#DB_NOMMAP">DB_NOMMAP</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_flags </td><td><a href="../api_c/env_set_flags.html#DB_NOMMAP">DB_NOMMAP</a></td></tr>
+<tr><td align=right>DB_MPOOLFILE-&gt;open </td><td><a href="../api_c/memp_fopen.html#DB_NOMMAP">DB_NOMMAP</a></td></tr>
+<tr><td align=right>DB-&gt;verify </td><td><a href="../api_c/db_verify.html#DB_NOORDERCHK">DB_NOORDERCHK</a></td></tr>
+<tr><td align=right>DB-&gt;put </td><td><a href="../api_c/db_put.html#DB_NOOVERWRITE">DB_NOOVERWRITE</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_flags </td><td><a href="../api_c/env_set_flags.html#DB_NOPANIC">DB_NOPANIC</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_rpc_server.html#3">DB_NOSERVER</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_rpc_server </td><td><a href="../api_c/env_set_rpc_server.html#DB_NOSERVER">DB_NOSERVER</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_rpc_server </td><td><a href="../api_c/env_set_rpc_server.html#DB_NOSERVER_HOME">DB_NOSERVER_HOME</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_rpc_server.html#4">DB_NOSERVER_ID</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_rpc_server </td><td><a href="../api_c/env_set_rpc_server.html#DB_NOSERVER_ID">DB_NOSERVER_ID</a></td></tr>
+<tr><td align=right>DB-&gt;close </td><td><a href="../api_c/db_close.html#DB_NOSYNC">DB_NOSYNC</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a></td></tr>
+<tr><td align=right>DB_MPOOLFILE-&gt;open </td><td><a href="../api_c/memp_fopen.html#DB_ODDFILESIZE">DB_ODDFILESIZE</a></td></tr>
+<tr><td align=right>DB-&gt;open </td><td><a href="../api_c/db_open.html#DB_OLD_VERSION">DB_OLD_VERSION</a></td></tr>
+<tr><td align=right>DB-&gt;upgrade </td><td><a href="../api_c/db_upgrade.html#DB_OLD_VERSION">DB_OLD_VERSION</a></td></tr>
+<tr><td align=right>DB-&gt;verify </td><td><a href="../api_c/db_verify.html#DB_ORDERCHKONLY">DB_ORDERCHKONLY</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_flags </td><td><a href="../api_c/env_set_flags.html#DB_OVERWRITE">DB_OVERWRITE</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/memp_fget.html#3">DB_PAGE_NOTFOUND</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_flags </td><td><a href="../api_c/env_set_flags.html#DB_PANIC_ENVIRONMENT">DB_PANIC_ENVIRONMENT</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_dup </td><td><a href="../api_c/dbc_dup.html#DB_POSITION">DB_POSITION</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get </td><td><a href="../api_c/dbc_get.html#DB_PREV">DB_PREV</a></td></tr>
+<tr><td align=right>DB_LOGC-&gt;get </td><td><a href="../api_c/logc_get.html#DB_PREV">DB_PREV</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get </td><td><a href="../api_c/dbc_get.html#DB_PREV_NODUP">DB_PREV_NODUP</a></td></tr>
+<tr><td align=right>DB-&gt;verify </td><td><a href="../api_c/db_verify.html#DB_PRINTABLE">DB_PRINTABLE</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_printlog.html#2">db_printlog</a></td></tr>
+<tr><td align=right>DB-&gt;set_cache_priority </td><td><a href="../api_c/db_set_cache_priority.html#DB_PRIORITY_DEFAULT">DB_PRIORITY_DEFAULT</a></td></tr>
+<tr><td align=right>DB-&gt;set_cache_priority </td><td><a href="../api_c/db_set_cache_priority.html#DB_PRIORITY_HIGH">DB_PRIORITY_HIGH</a></td></tr>
+<tr><td align=right>DB-&gt;set_cache_priority </td><td><a href="../api_c/db_set_cache_priority.html#DB_PRIORITY_LOW">DB_PRIORITY_LOW</a></td></tr>
+<tr><td align=right>DB-&gt;set_cache_priority </td><td><a href="../api_c/db_set_cache_priority.html#DB_PRIORITY_VERY_HIGH">DB_PRIORITY_VERY_HIGH</a></td></tr>
+<tr><td align=right>DB-&gt;set_cache_priority </td><td><a href="../api_c/db_set_cache_priority.html#DB_PRIORITY_VERY_LOW">DB_PRIORITY_VERY_LOW</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;open </td><td><a href="../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a></td></tr>
+<tr><td align=right>DB-&gt;open </td><td><a href="../api_c/db_open.html#DB_QUEUE">DB_QUEUE</a></td></tr>
+<tr><td align=right>DB-&gt;open </td><td><a href="../api_c/db_open.html#DB_RDONLY">DB_RDONLY</a></td></tr>
+<tr><td align=right>DB_MPOOLFILE-&gt;open </td><td><a href="../api_c/memp_fopen.html#DB_RDONLY">DB_RDONLY</a></td></tr>
+<tr><td align=right>DB-&gt;open </td><td><a href="../api_c/db_open.html#DB_RECNO">DB_RECNO</a></td></tr>
+<tr><td align=right>DB-&gt;set_flags </td><td><a href="../api_c/db_set_flags.html#DB_RECNUM">DB_RECNUM</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;open </td><td><a href="../api_c/env_open.html#DB_RECOVER">DB_RECOVER</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_feedback </td><td><a href="../api_c/env_set_feedback.html#DB_RECOVER">DB_RECOVER</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_recover.html#2">db_recover</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;open </td><td><a href="../api_c/env_open.html#DB_RECOVER_FATAL">DB_RECOVER_FATAL</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_flags </td><td><a href="../api_c/env_set_flags.html#DB_REGION_INIT">DB_REGION_INIT</a></td></tr>
+<tr><td align=right>DB-&gt;set_flags </td><td><a href="../api_c/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;rep_start </td><td><a href="../api_c/rep_start.html#DB_REP_CLIENT">DB_REP_CLIENT</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;rep_start </td><td><a href="../api_c/rep_start.html#DB_REP_LOGSONLY">DB_REP_LOGSONLY</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;rep_start </td><td><a href="../api_c/rep_start.html#DB_REP_MASTER">DB_REP_MASTER</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_rep_transport </td><td><a href="../api_c/rep_transport.html#DB_REP_PERMANENT">DB_REP_PERMANENT</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/rep_elect.html#3">DB_REP_UNAVAIL</a></td></tr>
+<tr><td align=right>DB-&gt;set_flags </td><td><a href="../api_c/db_set_flags.html#DB_REVSPLITOFF">DB_REVSPLITOFF</a></td></tr>
+<tr><td align=right>DB-&gt;get </td><td><a href="../api_c/db_get.html#DB_RMW">DB_RMW</a></td></tr>
+<tr><td align=right>DB-&gt;join </td><td><a href="../api_c/db_join.html#DB_RMW">DB_RMW</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get </td><td><a href="../api_c/dbc_get.html#DB_RMW">DB_RMW</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a></td></tr>
+<tr><td align=right>DB-&gt;verify </td><td><a href="../api_c/db_verify.html#DB_SALVAGE">DB_SALVAGE</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get </td><td><a href="../api_c/dbc_get.html#DB_SET">DB_SET</a></td></tr>
+<tr><td align=right>DB_LOGC-&gt;get </td><td><a href="../api_c/logc_get.html#DB_SET">DB_SET</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_timeout </td><td><a href="../api_c/env_set_timeout.html#DB_SET_LOCK_TIMEOUT">DB_SET_LOCK_TIMEOUT</a></td></tr>
+<tr><td align=right>DB_TXN-&gt;set_timeout </td><td><a href="../api_c/txn_set_timeout.html#DB_SET_LOCK_TIMEOUT">DB_SET_LOCK_TIMEOUT</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get </td><td><a href="../api_c/dbc_get.html#DB_SET_RANGE">DB_SET_RANGE</a></td></tr>
+<tr><td align=right>DB-&gt;get </td><td><a href="../api_c/db_get.html#DB_SET_RECNO">DB_SET_RECNO</a></td></tr>
+<tr><td align=right>DBcursor-&gt;c_get </td><td><a href="../api_c/dbc_get.html#DB_SET_RECNO">DB_SET_RECNO</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_timeout </td><td><a href="../api_c/env_set_timeout.html#DB_SET_TXN_TIMEOUT">DB_SET_TXN_TIMEOUT</a></td></tr>
+<tr><td align=right>DB_TXN-&gt;set_timeout </td><td><a href="../api_c/txn_set_timeout.html#DB_SET_TXN_TIMEOUT">DB_SET_TXN_TIMEOUT</a></td></tr>
+<tr><td align=right>DB-&gt;set_flags </td><td><a href="../api_c/db_set_flags.html#DB_SNAPSHOT">DB_SNAPSHOT</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_stat.html#2">db_stat</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_stat </td><td><a href="../api_c/lock_stat.html#DB_STAT_CLEAR">DB_STAT_CLEAR</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;log_stat </td><td><a href="../api_c/log_stat.html#DB_STAT_CLEAR">DB_STAT_CLEAR</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;memp_stat </td><td><a href="../api_c/memp_stat.html#DB_STAT_CLEAR">DB_STAT_CLEAR</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;rep_stat </td><td><a href="../api_c/rep_stat.html#DB_STAT_CLEAR">DB_STAT_CLEAR</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;txn_stat </td><td><a href="../api_c/txn_stat.html#DB_STAT_CLEAR">DB_STAT_CLEAR</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_strerror.html#2">db_strerror</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;open </td><td><a href="../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/dbt_bulk.html#2">DBT</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/dbt_class.html#2">DBT</a></td></tr>
+<tr><td align=right>DB-&gt;open </td><td><a href="../api_c/db_open.html#DB_THREAD">DB_THREAD</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;open </td><td><a href="../api_c/env_open.html#DB_THREAD">DB_THREAD</a></td></tr>
+<tr><td align=right>DB-&gt;open </td><td><a href="../api_c/db_open.html#DB_TRUNCATE">DB_TRUNCATE</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/txn_class.html#2">DB_TXN</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_app_dispatch </td><td><a href="../api_c/env_set_app_dispatch.html#DB_TXN_ABORT">DB_TXN_ABORT</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_app_dispatch </td><td><a href="../api_c/env_set_app_dispatch.html#DB_TXN_APPLY">DB_TXN_APPLY</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_app_dispatch </td><td><a href="../api_c/env_set_app_dispatch.html#DB_TXN_BACKWARD_ROLL">DB_TXN_BACKWARD_ROLL</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_app_dispatch </td><td><a href="../api_c/env_set_app_dispatch.html#DB_TXN_FORWARD_ROLL">DB_TXN_FORWARD_ROLL</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/txn_abort.html#2">DB_TXN-&gt;abort</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/txn_commit.html#2">DB_TXN-&gt;commit</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/txn_discard.html#2">DB_TXN-&gt;discard</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/txn_id.html#2">DB_TXN-&gt;id</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/txn_prepare.html#2">DB_TXN-&gt;prepare</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/txn_set_timeout.html#2">DB_TXN-&gt;set_timeout</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_flags </td><td><a href="../api_c/env_set_flags.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;txn_begin </td><td><a href="../api_c/txn_begin.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a></td></tr>
+<tr><td align=right>DB_TXN-&gt;commit </td><td><a href="../api_c/txn_commit.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;txn_begin </td><td><a href="../api_c/txn_begin.html#DB_TXN_NOWAIT">DB_TXN_NOWAIT</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_app_dispatch </td><td><a href="../api_c/env_set_app_dispatch.html#DB_TXN_PRINT">DB_TXN_PRINT</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;txn_begin </td><td><a href="../api_c/txn_begin.html#DB_TXN_SYNC">DB_TXN_SYNC</a></td></tr>
+<tr><td align=right>DB_TXN-&gt;commit </td><td><a href="../api_c/txn_commit.html#DB_TXN_SYNC">DB_TXN_SYNC</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_flags </td><td><a href="../api_c/env_set_flags.html#DB_TXN_WRITE_NOSYNC">DB_TXN_WRITE_NOSYNC</a></td></tr>
+<tr><td align=right>DB-&gt;open </td><td><a href="../api_c/db_open.html#DB_UNKNOWN">DB_UNKNOWN</a></td></tr>
+<tr><td align=right>DB-&gt;set_feedback </td><td><a href="../api_c/db_set_feedback.html#DB_UPGRADE">DB_UPGRADE</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_upgrade.html#2">db_upgrade</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;open </td><td><a href="../api_c/env_open.html#DB_USE_ENVIRON">DB_USE_ENVIRON</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;remove </td><td><a href="../api_c/env_remove.html#DB_USE_ENVIRON">DB_USE_ENVIRON</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;open </td><td><a href="../api_c/env_open.html#DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;remove </td><td><a href="../api_c/env_remove.html#DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_verbose </td><td><a href="../api_c/env_set_verbose.html#DB_VERB_CHKPOINT">DB_VERB_CHKPOINT</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_verbose </td><td><a href="../api_c/env_set_verbose.html#DB_VERB_DEADLOCK">DB_VERB_DEADLOCK</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_verbose </td><td><a href="../api_c/env_set_verbose.html#DB_VERB_RECOVERY">DB_VERB_RECOVERY</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_verbose </td><td><a href="../api_c/env_set_verbose.html#DB_VERB_REPLICATION">DB_VERB_REPLICATION</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_verbose </td><td><a href="../api_c/env_set_verbose.html#DB_VERB_WAITSFOR">DB_VERB_WAITSFOR</a></td></tr>
+<tr><td align=right>DB-&gt;set_feedback </td><td><a href="../api_c/db_set_feedback.html#DB_VERIFY">DB_VERIFY</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_verify.html#2">db_verify</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_verify.html#3">DB_VERIFY_BAD</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_version.html#2">db_version</a></td></tr>
+<tr><td align=right>DB-&gt;cursor </td><td><a href="../api_c/db_cursor.html#DB_WRITECURSOR">DB_WRITECURSOR</a></td></tr>
+<tr><td align=right>db_create </td><td><a href="../api_c/db_create.html#DB_XA_CREATE">DB_XA_CREATE</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/txn_prepare.html#3">DB_XIDDATASIZE</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;set_flags </td><td><a href="../api_c/env_set_flags.html#DB_YIELDCPU">DB_YIELDCPU</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/lock/dead.html#2">deadlocks</a></td></tr>
+<tr><td align=right>utility to detect </td><td><a href="../utility/db_deadlock.html#3">deadlocks</a></td></tr>
+<tr><td align=right>introduction to </td><td><a href="../ref/debug/intro.html#2">debugging</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/debug/common.html#2">debugging</a> applications</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/stability.html#4">degrees</a> of isolation</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/delete.html#2">deleting</a> records</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/curdel.html#2">deleting</a> records with a cursor</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/read.html#4">dirty</a> reads</td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--disable-largefile">--disable-largefile</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--disable-shared">--disable-shared</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--disable-static">--disable-static</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/diskspace.html#2">disk</a> space requirements</td></tr>
+<tr><td align=right></td><td><a href="../ref/xa/intro.html#2">Distributed</a> Transactions</td></tr>
+<tr><td align=right>DBT </td><td><a href="../api_c/dbt_class.html#dlen">dlen</a></td></tr>
+<tr><td align=right>DBT </td><td><a href="../api_c/dbt_class.html#doff">doff</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/faq.html#5">double</a> buffering</td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_dump.html#3">dump</a> databases as text files</td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_flags.html#5">duplicate</a> data items</td></tr>
+<tr><td align=right>sorted </td><td><a href="../api_c/db_set_flags.html#6">duplicate</a> data items</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_conf/dup.html#2">duplicate</a> data items</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/curdup.html#2">duplicating</a> a cursor</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/embedix.html#2">Embedix</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/truncate.html#3">emptying</a> a database</td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-compat185">--enable-compat185</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-cxx">--enable-cxx</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-debug">--enable-debug</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-debug_rop">--enable-debug_rop</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-debug_wop">--enable-debug_wop</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-diagnostic">--enable-diagnostic</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-dump185">--enable-dump185</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-java">--enable-java</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-posixmutexes">--enable-posixmutexes</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-rpc">--enable-rpc</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-tcl">--enable-tcl</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-test">--enable-test</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-uimutexes">--enable-uimutexes</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-umrw">--enable-umrw</a></td></tr>
+<tr><td align=right>database </td><td><a href="../api_c/db_set_flags.html#4">encryption</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/env/encrypt.html#2">encryption</a></td></tr>
+<tr><td align=right>turn off access to a database </td><td><a href="../api_c/env_set_flags.html#9">environment</a></td></tr>
+<tr><td align=right>database </td><td><a href="../ref/env/create.html#2">environment</a></td></tr>
+<tr><td align=right>database </td><td><a href="../ref/env/faq.html#2">environment</a> FAQ</td></tr>
+<tr><td align=right>fault database </td><td><a href="../api_c/env_set_flags.html#10">environment</a> in during open</td></tr>
+<tr><td align=right></td><td><a href="../ref/program/environ.html#2">environment</a> variables</td></tr>
+<tr><td align=right>use </td><td><a href="../api_c/env_open.html#3">environment</a> variables in naming</td></tr>
+<tr><td align=right>use </td><td><a href="../api_c/env_remove.html#3">environment</a> variables in naming</td></tr>
+<tr><td align=right>introduction to database </td><td><a href="../ref/env/intro.html#2">environments</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/join.html#2">equality</a> join</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/error.html#2">error</a> handling</td></tr>
+<tr><td align=right></td><td><a href="../ref/program/errorret.html#3">error</a> name space</td></tr>
+<tr><td align=right></td><td><a href="../ref/program/errorret.html#2">error</a> returns</td></tr>
+<tr><td align=right></td><td><a href="../ref/install/file.html#2">/etc/magic</a></td></tr>
+<tr><td align=right>selecting a Queue </td><td><a href="../ref/am_conf/extentsize.html#2">extent</a> size</td></tr>
+<tr><td align=right>hot </td><td><a href="../ref/transapp/hotfail.html#2">failover</a></td></tr>
+<tr><td align=right>Java </td><td><a href="../ref/java/faq.html#2">FAQ</a></td></tr>
+<tr><td align=right>Tcl </td><td><a href="../ref/tcl/faq.html#2">FAQ</a></td></tr>
+<tr><td align=right>XA </td><td><a href="../ref/xa/faq.html#2">FAQ</a></td></tr>
+<tr><td align=right>configuring without large </td><td><a href="../ref/build_unix/conf.html#8">file</a> support</td></tr>
+<tr><td align=right></td><td><a href="../ref/install/file.html#3">file</a> utility</td></tr>
+<tr><td align=right>returning pages to the </td><td><a href="../ref/am_misc/faq.html#4">filesystem</a></td></tr>
+<tr><td align=right>recovery and </td><td><a href="../ref/transapp/filesys.html#2">filesystem</a> operations</td></tr>
+<tr><td align=right>remote </td><td><a href="../ref/env/remote.html#2">filesystems</a></td></tr>
+<tr><td align=right>page </td><td><a href="../ref/am_conf/h_ffactor.html#2">fill</a> factor</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/freebsd.html#2">FreeBSD</a></td></tr>
+<tr><td align=right>Berkeley DB </td><td><a href="../ref/program/scope.html#3">free-threaded</a> handles</td></tr>
+<tr><td align=right>specifying a database </td><td><a href="../ref/am_conf/h_hash.html#2">hash</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am_conf/h_nelem.html#2">hash</a> table size</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/hpux.html#2">HP-UX</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/hsearch.html#2">hsearch</a></td></tr>
+<tr><td align=right>secondary </td><td><a href="../ref/am/second.html#3">indices</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/install.html#2">installing</a> Berkeley DB for UNIX systems</td></tr>
+<tr><td align=right></td><td><a href="../ref/program/compatible.html#2">interface</a> compatibility</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/irix.html#2">IRIX</a></td></tr>
+<tr><td align=right>degrees of </td><td><a href="../ref/am_misc/stability.html#5">isolation</a></td></tr>
+<tr><td align=right>configuring the </td><td><a href="../ref/build_unix/conf.html#7">Java</a> API</td></tr>
+<tr><td align=right></td><td><a href="../ref/java/compat.html#2">Java</a> compatibility</td></tr>
+<tr><td align=right></td><td><a href="../ref/java/conf.html#2">Java</a> configuration</td></tr>
+<tr><td align=right></td><td><a href="../ref/java/faq.html#3">Java</a> FAQ</td></tr>
+<tr><td align=right>equality </td><td><a href="../ref/am/join.html#3">join</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/dbt_class.html#3">key/data</a> pairs</td></tr>
+<tr><td align=right>retrieved </td><td><a href="../ref/am_misc/perm.html#3">key/data</a> permanence</td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am_misc/dbsizes.html#2">limits</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/linux.html#2">Linux</a></td></tr>
+<tr><td align=right>changing compile or </td><td><a href="../ref/build_unix/flags.html#3">load</a> options</td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_load.html#3">load</a> text files into databases</td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_vec </td><td><a href="../api_c/lock_vec.html#lock">lock</a></td></tr>
+<tr><td align=right>standard </td><td><a href="../ref/lock/stdmode.html#2">lock</a> modes</td></tr>
+<tr><td align=right>ignore </td><td><a href="../api_c/env_set_flags.html#6">locking</a></td></tr>
+<tr><td align=right>page-level </td><td><a href="../ref/lock/page.html#2">locking</a></td></tr>
+<tr><td align=right>two-phase </td><td><a href="../ref/lock/twopl.html#2">locking</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/lock/nondb.html#2">locking</a> and non-Berkeley DB applications</td></tr>
+<tr><td align=right></td><td><a href="../ref/lock/config.html#2">locking</a> configuration</td></tr>
+<tr><td align=right>Berkeley DB Transactional Data Store </td><td><a href="../ref/lock/am_conv.html#2">locking</a> conventions</td></tr>
+<tr><td align=right>Berkeley DB Concurrent Data Store </td><td><a href="../ref/lock/cam_conv.html#2">locking</a> conventions</td></tr>
+<tr><td align=right>configure </td><td><a href="../api_c/env_set_flags.html#3">locking</a> for Berkeley DB Concurrent Data Store</td></tr>
+<tr><td align=right></td><td><a href="../ref/lock/page.html#3">locking</a> granularity</td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/lock/intro.html#2">locking</a> subsystem</td></tr>
+<tr><td align=right>sizing the </td><td><a href="../ref/lock/max.html#2">locking</a> subsystem</td></tr>
+<tr><td align=right></td><td><a href="../ref/lock/notxn.html#2">locking</a> without transactions</td></tr>
+<tr><td align=right></td><td><a href="../ref/log/limits.html#2">log</a> file limits</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/logfile.html#2">log</a> file removal</td></tr>
+<tr><td align=right>utility to display </td><td><a href="../utility/db_printlog.html#3">log</a> files as text</td></tr>
+<tr><td align=right></td><td><a href="../api_c/log_compare.html#2">log_compare</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/log/config.html#2">logging</a> configuration</td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/log/intro.html#2">logging</a> subsystem</td></tr>
+<tr><td align=right>retrieving Btree records by </td><td><a href="../ref/am_conf/bt_recnum.html#3">logical</a> record @number</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/macosx.html#2">Mac</a> OS X</td></tr>
+<tr><td align=right>turn off database file </td><td><a href="../api_c/env_set_flags.html#7">memory</a> mapping</td></tr>
+<tr><td align=right></td><td><a href="../ref/mp/config.html#2">memory</a> pool configuration</td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/mp/intro.html#2">memory</a> pool subsystem</td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_vec </td><td><a href="../api_c/lock_vec.html#mode">mode</a></td></tr>
+<tr><td align=right>Berkeley DB library </td><td><a href="../ref/program/namespace.html#2">name</a> spaces</td></tr>
+<tr><td align=right>file </td><td><a href="../ref/env/naming.html#2">naming</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/join.html#4">natural</a> join</td></tr>
+<tr><td align=right>retrieving Btree records by logical record </td><td><a href="../ref/am_conf/bt_recnum.html#2">number</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_vec </td><td><a href="../api_c/lock_vec.html#obj">obj</a></td></tr>
+<tr><td align=right>DB_ENV-&gt;lock_vec </td><td><a href="../api_c/lock_vec.html#op">op</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/open.html#2">opening</a> a database</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/osf1.html#2">OSF/1</a></td></tr>
+<tr><td align=right>selecting a </td><td><a href="../ref/am_conf/pagesize.html#2">page</a> size</td></tr>
+<tr><td align=right>ignore database environment </td><td><a href="../api_c/env_set_flags.html#8">panic</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/partial.html#2">partial</a> record storage and retrieval</td></tr>
+<tr><td align=right></td><td><a href="http://www.sleepycat.com/update/index.html">Patches,</a> Updates and Change logs</td></tr>
+<tr><td align=right></td><td><a href="../ref/perl/intro.html#2">Perl</a></td></tr>
+<tr><td align=right>retrieved key/data </td><td><a href="../ref/am_misc/perm.html#2">permanence</a></td></tr>
+<tr><td align=right>task/thread </td><td><a href="../ref/program/faq.html#2">priority</a></td></tr>
+<tr><td align=right>Sleepycat Software's Berkeley DB </td><td><a href="../ref/intro/products.html#2">products</a></td></tr>
+<tr><td align=right>building for </td><td><a href="../ref/build_unix/intro.html#5">QNX</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/qnx.html#2">QNX</a></td></tr>
+<tr><td align=right>dirty </td><td><a href="../ref/transapp/read.html#3">reads</a></td></tr>
+<tr><td align=right>accessing Btree records by </td><td><a href="../api_c/db_set_flags.html#7">record</a> number</td></tr>
+<tr><td align=right>logical </td><td><a href="../ref/am_conf/logrec.html#2">record</a> numbers</td></tr>
+<tr><td align=right>managing </td><td><a href="../ref/am_conf/recno.html#2">record-based</a> databases</td></tr>
+<tr><td align=right>logically renumbering </td><td><a href="../ref/am_conf/renumber.html#2">records</a></td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_recover.html#3">recover</a> database environments</td></tr>
+<tr><td align=right>Berkeley DB </td><td><a href="../ref/transapp/reclimit.html#2">recoverability</a></td></tr>
+<tr><td align=right></td><td><a href="../api_c/db_set_flags.html#10">renumbering</a> records in Recno databases</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/read.html#2">repeatable</a> read</td></tr>
+<tr><td align=right>introduction to </td><td><a href="../ref/rep/intro.html#2">replication</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/xa/intro.html#3">Resource</a> Manager</td></tr>
+<tr><td align=right>XA </td><td><a href="../ref/xa/xa_intro.html#3">Resource</a> Manager</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/get.html#2">retrieving</a> records</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/get_bulk.html#2">retrieving</a> records in bulk</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/curget.html#2">retrieving</a> records with a cursor</td></tr>
+<tr><td align=right>turn off </td><td><a href="../api_c/db_set_flags.html#8">reverse</a> splits in Btree databases</td></tr>
+<tr><td align=right></td><td><a href="../ref/rpc/client.html#2">RPC</a> client</td></tr>
+<tr><td align=right>configuring a </td><td><a href="../ref/build_unix/conf.html#9">RPC</a> client/server</td></tr>
+<tr><td align=right>introduction to </td><td><a href="../ref/rpc/intro.html#2">rpc</a> client/server</td></tr>
+<tr><td align=right>utility to support </td><td><a href="../utility/berkeley_db_svc.html#3">RPC</a> client/server</td></tr>
+<tr><td align=right></td><td><a href="../ref/rpc/faq.html#2">RPC</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/rpc/server.html#2">RPC</a> server</td></tr>
+<tr><td align=right></td><td><a href="../ref/install/rpm.html#2">RPM</a></td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am/verify.html#3">salvage</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/sco.html#2">SCO</a></td></tr>
+<tr><td align=right>Berkeley DB handle </td><td><a href="../ref/program/scope.html#2">scope</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/second.html#2">secondary</a> indices</td></tr>
+<tr><td align=right></td><td><a href="../ref/env/security.html#2">security</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/sendmail/intro.html#2">Sendmail</a></td></tr>
+<tr><td align=right>disabling </td><td><a href="../ref/build_unix/conf.html#10">shared</a> libraries</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/shlib.html#2">shared</a> libraries</td></tr>
+<tr><td align=right></td><td><a href="../ref/program/appsignals.html#2">signal</a> handling</td></tr>
+<tr><td align=right>DBT </td><td><a href="../api_c/dbt_class.html#size">size</a></td></tr>
+<tr><td align=right></td><td><a href="http://www.sleepycat.com/">Sleepycat</a> Software</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/solaris.html#2">Solaris</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/distrib/layout.html#2">source</a> code layout</td></tr>
+<tr><td align=right>turn off reverse </td><td><a href="../api_c/db_set_flags.html#9">splits</a> in Btree databases</td></tr>
+<tr><td align=right>cursor </td><td><a href="../ref/am_misc/stability.html#3">stability</a></td></tr>
+<tr><td align=right>disabling </td><td><a href="../ref/build_unix/conf.html#11">static</a> libraries</td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am/stat.html#2">statistics</a></td></tr>
+<tr><td align=right>utility to display database and environment </td><td><a href="../utility/db_stat.html#3">statistics</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/put.html#2">storing</a> records</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/curput.html#2">storing</a> records with a cursor</td></tr>
+<tr><td align=right>configure for </td><td><a href="../api_c/env_set_flags.html#13">stress</a> testing</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/sunos.html#2">SunOS</a></td></tr>
+<tr><td align=right>loading Berkeley DB with </td><td><a href="../ref/tcl/intro.html#2">Tcl</a></td></tr>
+<tr><td align=right>using Berkeley DB with </td><td><a href="../ref/tcl/using.html#2">Tcl</a></td></tr>
+<tr><td align=right>configuring the </td><td><a href="../ref/build_unix/conf.html#12">Tcl</a> API</td></tr>
+<tr><td align=right></td><td><a href="../ref/tcl/program.html#2">Tcl</a> API programming notes</td></tr>
+<tr><td align=right></td><td><a href="../ref/tcl/faq.html#3">Tcl</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../api_c/env_set_tmp_dir.html#3">temporary</a> files</td></tr>
+<tr><td align=right>configuring the </td><td><a href="../ref/build_unix/conf.html#13">test</a> suite</td></tr>
+<tr><td align=right>running the </td><td><a href="../ref/test/run.html#2">test</a> suite</td></tr>
+<tr><td align=right>running the </td><td><a href="../ref/build_unix/test.html#2">test</a> suite under UNIX</td></tr>
+<tr><td align=right>running the </td><td><a href="../ref/build_win/test.html#2">test</a> suite under Windows</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_conf/re_source.html#2">text</a> backing files</td></tr>
+<tr><td align=right>pre-loading </td><td><a href="../api_c/db_set_flags.html#11">text</a> files into Recno databases</td></tr>
+<tr><td align=right>loading </td><td><a href="../ref/dumpload/text.html#2">text</a> into databases</td></tr>
+<tr><td align=right>dumping/loading </td><td><a href="../ref/dumpload/utility.html#2">text</a> to/from databases</td></tr>
+<tr><td align=right>building </td><td><a href="../ref/program/mt.html#2">threaded</a> applications</td></tr>
+<tr><td align=right>lock </td><td><a href="../ref/lock/timeout.html#2">timeouts</a></td></tr>
+<tr><td align=right>transaction </td><td><a href="../ref/lock/timeout.html#3">timeouts</a></td></tr>
+<tr><td align=right>turn off synchronous </td><td><a href="../api_c/env_set_flags.html#11">transaction</a> commit</td></tr>
+<tr><td align=right>turn off synchronous </td><td><a href="../api_c/env_set_flags.html#12">transaction</a> commit</td></tr>
+<tr><td align=right></td><td><a href="../ref/txn/config.html#2">transaction</a> configuration</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/faq.html#2">transaction</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/txn/limits.html#2">transaction</a> limits</td></tr>
+<tr><td align=right></td><td><a href="../ref/xa/build.html#2">Transaction</a> Manager</td></tr>
+<tr><td align=right>administering </td><td><a href="../ref/transapp/admin.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right>archival in </td><td><a href="../ref/transapp/archival.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right>checkpoints in </td><td><a href="../ref/transapp/checkpoint.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right>deadlock detection in </td><td><a href="../ref/transapp/deadlock.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right>recovery in </td><td><a href="../ref/transapp/recovery.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/txn/intro.html#2">transaction</a> subsystem</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/throughput.html#2">transaction</a> throughput</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/tune.html#2">transaction</a> tuning</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/intro.html#2">Transactional</a> Data Store</td></tr>
+<tr><td align=right>nested </td><td><a href="../ref/transapp/nested.html#2">transactions</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/truncate.html#2">truncating</a> a database</td></tr>
+<tr><td align=right>access method </td><td><a href="../ref/am_misc/tune.html#3">tuning</a></td></tr>
+<tr><td align=right>transaction </td><td><a href="../ref/transapp/tune.html#3">tuning</a></td></tr>
+<tr><td align=right>simple </td><td><a href="../ref/simple_tut/intro.html#2">tutorial</a></td></tr>
+<tr><td align=right>configuring Berkeley DB with the </td><td><a href="../ref/xa/xa_config.html#2">Tuxedo</a> System</td></tr>
+<tr><td align=right>DBT </td><td><a href="../api_c/dbt_class.html#ulen">ulen</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/ultrix.html#2">Ultrix</a></td></tr>
+<tr><td align=right>building for </td><td><a href="../ref/build_unix/intro.html#4">UNIX</a></td></tr>
+<tr><td align=right>building for </td><td><a href="../ref/build_unix/notes.html#3">UNIX</a> FAQ</td></tr>
+<tr><td align=right>configuring Berkeley DB for </td><td><a href="../ref/build_unix/conf.html#3">UNIX</a> systems</td></tr>
+<tr><td align=right>Patches, </td><td><a href="http://www.sleepycat.com/update/index.html">Updates</a> and Change logs</td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_upgrade.html#4">upgrade</a> database files</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/upgrade.html#2">upgrading</a> databases</td></tr>
+<tr><td align=right></td><td><a href="../ref/arch/utilities.html#2">utilities</a></td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am/verify.html#2">verification</a></td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_verify.html#4">verify</a> database files</td></tr>
+<tr><td align=right>building for </td><td><a href="../ref/build_vxworks/faq.html#3">VxWorks</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_vxworks/notes.html#2">VxWorks</a> notes</td></tr>
+<tr><td align=right>running the test suite under </td><td><a href="../ref/build_win/test.html#3">Windows</a></td></tr>
+<tr><td align=right>building for </td><td><a href="../ref/build_win/faq.html#3">Windows</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_win/notes.html#2">Windows</a> notes</td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--with-embedix=DIR">--with-embedix=DIR</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--with-mutex=MUTEX">--with-mutex=MUTEX</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--with-rpm=DIR">--with-rpm=DIR</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--with-tcl=DIR">--with-tcl=DIR</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--with-uniquename=NAME">--with-uniquename=NAME</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/xa/faq.html#3">XA</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/xa/xa_intro.html#2">XA</a> Resource Manager</td></tr>
+</table>
+</center>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_associate.html b/libdb/docs/api_c/db_associate.html
new file mode 100644
index 0000000..eeae58c
--- /dev/null
+++ b/libdb/docs/api_c/db_associate.html
@@ -0,0 +1,134 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;associate</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;associate</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;associate(DB *primary, DB_TXN *txnid, DB *secondary,
+ int (*callback)(DB *, const DBT *, const DBT *, DBT *),
+ u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;associate function is used to declare one database a
+secondary index for a primary database. After a secondary database has
+been "associated" with a primary database, all updates to the primary
+will be automatically reflected in the secondary and all reads from the
+secondary will return corresponding data from the primary. Note that
+as primary keys must be unique for secondary indices to work, the
+primary database must be configured without support for duplicate data
+items. See <a href="../ref/am/second.html">Secondary indices</a> for
+more information.
+<p>The <b>primary</b> argument should be a database handle for the primary
+database that is to be indexed.
+The <b>secondary</b> argument should be an open database handle of
+either a newly created and empty database that is to be used to store
+a secondary index, or of a database that was previously associated with
+the same primary and contains a secondary index. Note that it is not
+safe to associate as a secondary database a handle that is in use by
+another thread of control or has open cursors. If the handle was opened
+with the <a href="../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag it is safe to use it in multiple threads
+of control after the DB-&gt;associate method has returned. Note also
+that either secondary keys must be unique or the secondary database must
+be configured with support for duplicate data items.
+<p>If the operation is to be transaction-protected (other than by specifying
+the DB_AUTO_COMMIT flag), the <b>txnid</b> parameter is a
+transaction handle returned from <a href="../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a>; otherwise, NULL.
+<p>The <b>callback</b> argument should refer to a callback function that
+creates a secondary key from a given primary key and data pair. When
+called, the first argument will be the secondary <a href="../api_c/db_class.html">DB</a> handle; the
+second and third arguments will be <a href="../api_c/dbt_class.html">DBT</a>s containing a primary
+key and datum respectively; and the fourth argument will be a zeroed
+DBT in which the callback function should fill in <b>data</b> and
+<b>size</b> fields that describe the secondary key.
+<a name="3"><!--meow--></a>
+<p>If the callback function needs to allocate memory for the <b>data</b>
+field rather than simply pointing into the primary key or datum, the
+<b>flags</b> field of the returned <a href="../api_c/dbt_class.html">DBT</a> should be set to
+DB_DBT_APPMALLOC, which indicates that Berkeley DB should free the
+memory when it is done with it.
+<a name="4"><!--meow--></a>
+<p>If any key/data pair in the primary yields a null secondary key and
+should be left out of the secondary index, the callback function may
+optionally return DB_DONOTINDEX. Otherwise, the callback
+function should return 0 in case of success or any other integer error
+code in case of failure; the error code will be returned from the Berkeley DB
+interface call that initiated the callback. Note that if the callback
+function returns DB_DONOTINDEX for any key/data pairs in the
+primary database, the secondary index will not contain any reference to
+those key/data pairs, and such operations as cursor iterations and range
+queries will reflect only the corresponding subset of the database. If
+this is not desirable, the application should ensure that the callback
+function is well-defined for all possible values and never returns
+DB_DONOTINDEX.
+<p>The callback argument may be NULL if and only if both the primary and
+secondary database handles were opened with the <a href="../api_c/db_open.html#DB_RDONLY">DB_RDONLY</a> flag.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_CREATE">DB_CREATE</a><dd>If the secondary database is empty, walk through the primary and create
+an index to it in the empty secondary. This operation is potentially
+very expensive.
+<p>If the secondary database has been opened in an environment configured
+with transactions, each put necessary for its creation will be done in
+the context of a transaction created for the purpose.
+<p>Care should be taken not to use a newly-populated secondary database in
+another thread of control until the DB-&gt;associate call has
+returned successfully in the first thread.
+<p>If transactions are not being used, care should be taken not to modify
+a primary database being used to populate a secondary database, in
+another thread of control, until the DB-&gt;associate call has
+returned successfully in the first thread. If transactions are being
+used, Berkeley DB will perform appropriate locking and the application need
+not do any special operation ordering.
+</dl>
+<p>In addition, the following flag may be set by
+bitwise inclusively <b>OR</b>'ing it into the <b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="DB_AUTO_COMMIT">DB_AUTO_COMMIT</a><dd>Enclose the DB-&gt;associate call within a transaction. If the call succeeds,
+changes made by the operation will be recoverable. If the call fails,
+the operation will have made no changes.
+</dl>
+<p>The DB-&gt;associate method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;associate method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The secondary database handle has already been associated with this or
+another database handle.
+<p>The secondary database handle is not open.
+<p>The primary database has been configured to allow duplicates.
+</dl>
+<p>The DB-&gt;associate method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;associate method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_class.html b/libdb/docs/api_c/db_class.html
new file mode 100644
index 0000000..48c3c49
--- /dev/null
+++ b/libdb/docs/api_c/db_class.html
@@ -0,0 +1,50 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+typedef struct __db DB;
+</pre></h3>
+<h1>Description</h1>
+<p>The DB handle is the handle for a Berkeley DB database, which may or
+may not be part of a database environment. DB handles are
+free-threaded if the <a href="../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag is specified to the
+<a href="../api_c/db_open.html">DB-&gt;open</a> method when the database is opened or if the database
+environment in which the database is opened is free-threaded. The
+handle should not be closed while any other handle that refers to the
+database is in use; for example, database handles must not be closed
+while cursor handles into the database remain open, or transactions that
+include operations on the database have not yet been committed or
+aborted. Once the <a href="../api_c/db_close.html">DB-&gt;close</a>, <a href="../api_c/db_remove.html">DB-&gt;remove</a>, or
+<a href="../api_c/db_rename.html">DB-&gt;rename</a> methods are called, the handle may not be accessed again,
+regardless of the function's return.
+<h1>Class</h1>
+DB
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_close.html b/libdb/docs/api_c/db_close.html
new file mode 100644
index 0000000..c954b80
--- /dev/null
+++ b/libdb/docs/api_c/db_close.html
@@ -0,0 +1,78 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;close</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;close(DB *db, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;close method flushes any cached database information to disk,
+closes any open cursors, frees any allocated resources, and closes any
+underlying files. Because key/data pairs are cached in memory, failing
+to sync the file with the DB-&gt;close or <a href="../api_c/db_sync.html">DB-&gt;sync</a> method may
+result in inconsistent or lost information.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_NOSYNC">DB_NOSYNC</a><dd>Do not flush cached information to disk. The <a href="../api_c/db_close.html#DB_NOSYNC">DB_NOSYNC</a> flag is
+a dangerous option. It should be set only if the application is doing
+logging (with transactions) so that the database is recoverable after
+a system or application crash, or if the database is always generated
+from scratch after any system or application crash.
+<p><b>It is important to understand that flushing cached information to disk
+only minimizes the window of opportunity for corrupted data.</b> Although
+unlikely, it is possible for database corruption to happen if a system
+or application crash occurs while writing data to the database. To
+ensure that database corruption never occurs, applications must either:
+use transactions and logging with automatic recovery; use logging and
+application-specific recovery; or edit a copy of the database, and once
+all applications using the database have successfully called
+DB-&gt;close, atomically replace the original database with the
+updated copy.
+</dl>
+<p>When multiple threads are using the <a href="../api_c/db_class.html">DB</a> concurrently, only a single
+thread may call the DB-&gt;close method.
+<p>The <a href="../api_c/db_class.html">DB</a> handle may not be accessed again after DB-&gt;close is
+called, regardless of its return.
+<p>The DB-&gt;close method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;close method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB-&gt;close method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;close method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_create.html b/libdb/docs/api_c/db_create.html
new file mode 100644
index 0000000..8aee9dd
--- /dev/null
+++ b/libdb/docs/api_c/db_create.html
@@ -0,0 +1,72 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_create</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_create</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_create(DB **dbp, DB_ENV *dbenv, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The db_create method creates a <a href="../api_c/db_class.html">DB</a> structure that is the
+handle for a Berkeley DB database. A pointer to this structure is returned
+in the memory to which <b>db</b> refers. Calling the
+<a href="../api_c/db_close.html">DB-&gt;close</a>, <a href="../api_c/db_remove.html">DB-&gt;remove</a> or <a href="../api_c/db_rename.html">DB-&gt;rename</a> methods will
+discard the returned handle.
+<p>If the <b>dbenv</b> argument is NULL, the database is standalone; that
+is, it is not part of any Berkeley DB environment.
+<p>If the <b>dbenv</b> argument is not NULL, the database is created
+within the specified Berkeley DB environment. The database access methods
+automatically make calls to the other subsystems in Berkeley DB, based on the
+enclosing environment. For example, if the environment has been
+configured to use locking, the access methods will automatically acquire
+the correct locks when reading and writing pages of the database.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_XA_CREATE">DB_XA_CREATE</a><dd>Instead of creating a standalone database, create a database intended
+to be accessed via applications running under an X/Open conformant
+Transaction Manager. The database will be opened in the environment
+specified by the OPENINFO parameter of the GROUPS section of the
+ubbconfig file. See the <a href="../ref/xa/xa_intro.html">XA
+Introduction</a> section in the Berkeley DB Reference Guide for more information.
+</dl>
+<p>The <a href="../api_c/db_class.html">DB</a> handle contains a special field, "app_private", which
+is declared as type "void *". This field is provided for the use of
+the application program. It is initialized to NULL and is not further
+used by Berkeley DB in any way.
+<p>The db_create method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_create method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_create method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_cursor.html b/libdb/docs/api_c/db_cursor.html
new file mode 100644
index 0000000..90ab900
--- /dev/null
+++ b/libdb/docs/api_c/db_cursor.html
@@ -0,0 +1,70 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;cursor</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;cursor(DB *db,
+ DB_TXN *txnid, DBC **cursorp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;cursor method
+creates a cursor and copies a pointer to it into the memory to which
+<b>cursorp</b> refers.
+<p>If the operation is to be transaction-protected, the <b>txnid</b>
+parameter is a transaction handle returned from <a href="../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a>;
+otherwise, NULL.
+<p>To transaction-protect cursor operations, cursors must be opened and
+closed within the context of a transaction, and the <b>txnid</b>
+parameter specifies the transaction context in which the cursor may be
+used.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_DIRTY_READ">DB_DIRTY_READ</a><dd>All read operations performed by the cursor may return modified but not
+yet committed data. Silently ignored if the <a href="../api_c/db_open.html#DB_DIRTY_READ">DB_DIRTY_READ</a> flag
+was not specified when the underlying database was opened.
+<p><dt><a name="DB_WRITECURSOR">DB_WRITECURSOR</a><dd>Specify that the cursor will be used to update the database. The
+underlying database environment must have been opened using the
+<a href="../api_c/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a> flag.
+</dl>
+<p>The DB-&gt;cursor method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;cursor method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB-&gt;cursor method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;cursor method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/dbc_class.html">DBC</a>
+<h1>See Also</h1>
+<a href="../api_c/dbc_list.html">Database Cursors and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_del.html b/libdb/docs/api_c/db_del.html
new file mode 100644
index 0000000..f0a73e6
--- /dev/null
+++ b/libdb/docs/api_c/db_del.html
@@ -0,0 +1,77 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;del</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;del</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;del(DB *db, DB_TXN *txnid, DBT *key, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;del method removes key/data pairs from the database. The
+key/data pair associated with the specified <b>key</b> is discarded from
+the database. In the presence of duplicate key values, all records
+associated with the designated key will be discarded.
+<p>When called on a database that has been made into a secondary index
+using the <a href="../api_c/db_associate.html">DB-&gt;associate</a> method, the DB-&gt;del method deletes the
+key/data pair from the primary database and all secondary indices.
+<p>If the operation is to be transaction-protected (other than by specifying
+the DB_AUTO_COMMIT flag), the <b>txnid</b> parameter is a
+transaction handle returned from <a href="../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a>; otherwise, NULL.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_AUTO_COMMIT">DB_AUTO_COMMIT</a><dd>Enclose the DB-&gt;del call within a transaction. If the call succeeds,
+changes made by the operation will be recoverable. If the call fails,
+the operation will have made no changes.
+</dl>
+<p>
+If the specified key is not in the database, the DB-&gt;del method will return DB_NOTFOUND.
+Otherwise, the DB-&gt;del method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;del method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>DB_SECONDARY_BAD<dd>A secondary index references a nonexistent primary key.
+</dl>
+<p><dl compact>
+<p><dt>EACCES<dd>An attempt was made to modify a read-only database.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB-&gt;del method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;del method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_err.html b/libdb/docs/api_c/db_err.html
new file mode 100644
index 0000000..7d98557
--- /dev/null
+++ b/libdb/docs/api_c/db_err.html
@@ -0,0 +1,77 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;err</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;err</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+void
+DB-&gt;err(DB *db, int error, const char *fmt, ...);
+<p>
+void
+DB-&gt;errx(DB *db, const char *fmt, ...);
+</pre></h3>
+<h1>Description</h1>
+<p>The <a href="../api_c/env_err.html">DB_ENV-&gt;err</a>, <a href="../api_c/env_err.html">DB_ENV-&gt;errx</a>, DB-&gt;err and
+DB-&gt;errx methods provide error-messaging functionality for
+applications written using the Berkeley DB library.
+<p>The <a href="../api_c/env_err.html">DB_ENV-&gt;err</a> method constructs an error message consisting of the
+following elements:
+<p><blockquote><p><dl compact>
+<p><dt>An optional prefix string<dd>If no error callback function has been set using the
+<a href="../api_c/env_set_errcall.html">DB_ENV-&gt;set_errcall</a> method, any prefix string specified using the
+<a href="../api_c/env_set_errpfx.html">DB_ENV-&gt;set_errpfx</a> method, followed by two separating characters: a colon
+and a &lt;space&gt; character.
+<p><dt>An optional printf-style message<dd>The supplied message <b>fmt</b>, if non-NULL, in which the
+ANSI C X3.159-1989 (ANSI C) printf function specifies how subsequent arguments
+are converted for output.
+<p><dt>A separator<dd>Two separating characters: a colon and a &lt;space&gt; character.
+<p><dt>A standard error string<dd>The standard system or Berkeley DB library error string associated with the
+<b>error</b> value, as returned by the <a href="../api_c/env_strerror.html">db_strerror</a> method.
+</dl>
+</blockquote>
+<p>This constructed error message is then handled as follows:
+<p><blockquote>
+<p>If an error callback function has been set (see <a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>
+and <a href="../api_c/env_set_errcall.html">DB_ENV-&gt;set_errcall</a>), that function is called with two
+arguments: any prefix string specified (see <a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a> and
+<a href="../api_c/env_set_errpfx.html">DB_ENV-&gt;set_errpfx</a>) and the error message.
+<p>If a C library FILE * has been set (see <a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a> and
+<a href="../api_c/env_set_errfile.html">DB_ENV-&gt;set_errfile</a>), the error message is written to that output
+stream.
+<p>If none of these output options has been configured, the error message
+is written to stderr, the standard
+error output stream.</blockquote>
+<p>The <a href="../api_c/env_err.html">DB_ENV-&gt;errx</a> and DB-&gt;errx methods perform identically to the
+<a href="../api_c/env_err.html">DB_ENV-&gt;err</a> and DB-&gt;err methods, except that they do not append
+the final separator characters and standard error string to the error
+message.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_fd.html b/libdb/docs/api_c/db_fd.html
new file mode 100644
index 0000000..fc507bd
--- /dev/null
+++ b/libdb/docs/api_c/db_fd.html
@@ -0,0 +1,56 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;fd</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;fd</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;fd(DB *db, int *fdp);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;fd method
+copies a file descriptor representative of the underlying database into
+the memory to which <b>fdp</b> refers. A file descriptor referring to
+the same file will be returned to all processes that call
+<a href="../api_c/db_open.html">DB-&gt;open</a> with the same <b>file</b> argument. This file
+descriptor may be safely used as an argument to the <b>fcntl</b>(2)
+and <b>flock</b>(2) locking functions. The file descriptor is not
+necessarily associated with any of the underlying files actually used
+by the access method.
+<p>The DB-&gt;fd method only supports a coarse-grained form of locking.
+Applications should use the lock manager where possible.
+<p>The DB-&gt;fd method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;fd method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;fd method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_get.html b/libdb/docs/api_c/db_get.html
new file mode 100644
index 0000000..77b1d71
--- /dev/null
+++ b/libdb/docs/api_c/db_get.html
@@ -0,0 +1,152 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;get</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;get(DB *db,
+ DB_TXN *txnid, DBT *key, DBT *data, u_int32_t flags);
+int
+DB-&gt;pget(DB *db,
+ DB_TXN *txnid, DBT *key, DBT *pkey, DBT *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;get method retrieves key/data pairs from the database. The
+address
+and length of the data associated with the specified <b>key</b> are
+returned in the structure to which <b>data</b> refers.
+<p>In the presence of duplicate key values, DB-&gt;get will return the
+first data item for the designated key. Duplicates are sorted by insert
+order, except where this order has been overridden by cursor operations.
+<b>Retrieval of duplicates requires the use of cursor operations.</b>
+See <a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a> for details.
+<p>When called on a database that has been made into a secondary index
+using the <a href="../api_c/db_associate.html">DB-&gt;associate</a> method, the DB-&gt;get and
+DB-&gt;pget methods return the key from the secondary index and the data
+item from the primary database. In addition, the DB-&gt;pget method
+returns the key from the primary database. In databases that are not
+secondary indices, the DB-&gt;pget interface will always fail and
+return EINVAL.
+<p>If the operation is to be transaction-protected, the <b>txnid</b>
+parameter is a transaction handle returned from <a href="../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a>;
+otherwise, NULL.
+<p>The <b>flags</b> value must be set to 0 or
+one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_CONSUME">DB_CONSUME</a><dd>Return the record number and data from the available record closest to
+the head of the queue, and delete the record. The cursor will be
+positioned on the deleted record. The record number will be returned
+in <b>key</b>, as described in <a href="../api_c/dbt_class.html">DBT</a>. The data will be returned
+in the <b>data</b> parameter. A record is available if it is not
+deleted and is not currently locked. The underlying database must be
+of type Queue for DB_CONSUME to be specified.
+<p><dt><a name="DB_CONSUME_WAIT">DB_CONSUME_WAIT</a><dd>The DB_CONSUME_WAIT flag is the same as the DB_CONSUME
+flag, except that if the Queue database is empty, the thread of control
+will wait until there is data in the queue before returning. The
+underlying database must be of type Queue for DB_CONSUME_WAIT
+to be specified.
+<p><dt><a name="DB_GET_BOTH">DB_GET_BOTH</a><dd>Retrieve the key/data pair only if both the key and data match the
+arguments.
+<p>When used with the DB-&gt;pget version of this interface
+on a secondary index handle, return the secondary key/primary key/data
+tuple only if both the primary and secondary keys match the arguments.
+It is an error to use the DB_GET_BOTH flag with the DB-&gt;get
+version of this interface and a secondary index handle.
+<p><dt><a name="DB_SET_RECNO">DB_SET_RECNO</a><dd>Retrieve the specified numbered key/data pair from a database. Upon
+return, both the <b>key</b> and <b>data</b> items will have been
+filled in.
+<p>The <b>data</b> field of the specified <b>key</b>
+must be a pointer to a logical record number (that is, a <b>db_recno_t</b>).
+This record number determines the record to be retrieved.
+<p>For DB_SET_RECNO to be specified, the underlying database must be
+of type Btree, and it must have been created with the DB_RECNUM flag.
+</dl>
+<p>In addition, the following flags may be set by
+bitwise inclusively <b>OR</b>'ing them into the <b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="DB_DIRTY_READ">DB_DIRTY_READ</a><dd>Read modified but not yet committed data. Silently ignored if the
+<a href="../api_c/db_open.html#DB_DIRTY_READ">DB_DIRTY_READ</a> flag was not specified when the underlying
+database was opened.
+<p><dt><a name="DB_MULTIPLE">DB_MULTIPLE</a><dd>Return multiple data items. The buffer to which the <b>data</b>
+argument refers is filled with the specified key's data items. If all
+of the data items associated with the key cannot fit into the buffer,
+the size field of the <b>data</b> argument is set to the length needed
+for the specified items, and the error ENOMEM is returned. The buffer
+to which the <b>data</b> argument refers should be large relative to
+the page size of the underlying database, aligned for unsigned integer
+access, and be a multiple of 1024 bytes in size.
+<p>The DB_MULTIPLE flag may only be used alone, or with the
+DB_GET_BOTH and DB_SET_RECNO options. The
+DB_MULTIPLE flag may not be used when accessing databases made
+into secondary indices using the <a href="../api_c/db_associate.html">DB-&gt;associate</a> method.
+<p>See <a href="../api_c/dbt_bulk.html#DB_MULTIPLE_INIT">DB_MULTIPLE_INIT</a> for more information.
+<p><dt><a name="DB_RMW">DB_RMW</a><dd>Acquire write locks instead of read locks when doing the retrieval.
+Setting this flag can eliminate deadlock during a read-modify-write
+cycle by acquiring the write lock during the read part of the cycle so
+that another thread of control acquiring a read lock for the same item,
+in its own read-modify-write cycle, will not result in deadlock.
+<p>Because the DB-&gt;get interface will not hold locks
+across Berkeley DB interface calls in non-transactional environments, the
+<a href="../api_c/dbc_get.html#DB_RMW">DB_RMW</a> flag to the DB-&gt;get call is meaningful only in
+the presence of transactions.
+</dl>
+<p>
+If the specified key is not in the database, the DB-&gt;get method will return DB_NOTFOUND.
+If the database is a Queue or Recno database and the specified key
+exists, but was never explicitly created by the application or was
+later deleted, the DB-&gt;get method will return DB_KEYEMPTY.
+Otherwise, the DB-&gt;get method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;get method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>DB_SECONDARY_BAD<dd>A secondary index references a nonexistent primary key.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>There was insufficient memory to return the requested item.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A record number of 0 was specified.
+<p>The <a href="../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag was specified to the <a href="../api_c/db_open.html">DB-&gt;open</a> method and
+none of the <a href="../api_c/dbt_class.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a>, <a href="../api_c/dbt_class.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a> or
+<a href="../api_c/dbt_class.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a> flags were set in the <a href="../api_c/dbt_class.html">DBT</a>.
+<p>The DB-&gt;pget interface was called with a <a href="../api_c/db_class.html">DB</a> handle that
+does not refer to a secondary index.
+</dl>
+<p>The DB-&gt;get method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;get method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_get_byteswapped.html b/libdb/docs/api_c/db_get_byteswapped.html
new file mode 100644
index 0000000..f20dcd0
--- /dev/null
+++ b/libdb/docs/api_c/db_get_byteswapped.html
@@ -0,0 +1,59 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;get_byteswapped</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;get_byteswapped</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;get_byteswapped(DB *db, int *isswapped);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;get_byteswapped method
+stores 0 into the memory location referenced by <b>isswapped</b>
+if the underlying database files were created on an architecture of the
+same byte order as the current one, and
+stores 1 into the memory location referenced by <b>isswapped</b>
+if they were not (that is, big-endian on a little-endian machine, or
+vice versa). This field may be used to determine whether application
+data needs to be adjusted for this architecture or not.
+<p>The DB-&gt;get_byteswapped interface may not be called before the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface has been called.
+<h1>Errors</h1>
+<p>The DB-&gt;get_byteswapped method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called before <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<p>The DB-&gt;get_byteswapped method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;get_byteswapped method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_get_type.html b/libdb/docs/api_c/db_get_type.html
new file mode 100644
index 0000000..066af6e
--- /dev/null
+++ b/libdb/docs/api_c/db_get_type.html
@@ -0,0 +1,58 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;get_type</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;get_type</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;get_type(DB *db, DBTYPE *type);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;get_type method
+stores the type of the underlying access method (and file format) into
+the memory referenced by <b>type</b>.
+The returned value is one of DB_BTREE, DB_HASH,
+DB_RECNO, or DB_QUEUE. This value may be used to
+determine the type of the database after a return from <a href="../api_c/db_open.html">DB-&gt;open</a>
+with the <b>type</b> argument set to DB_UNKNOWN.
+<p>The DB-&gt;get_type interface may not be called before the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface has been called.
+<h1>Errors</h1>
+<p>The DB-&gt;get_type method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called before <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<p>The DB-&gt;get_type method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;get_type method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_join.html b/libdb/docs/api_c/db_join.html
new file mode 100644
index 0000000..0cd9692
--- /dev/null
+++ b/libdb/docs/api_c/db_join.html
@@ -0,0 +1,123 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;join</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;join</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;join(DB *primary,
+ DBC **curslist, DBC **dbcp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;join method creates a specialized cursor for use in performing
+equality or natural joins on secondary indices. For information on how
+to organize your data to use this functionality, see
+<a href="../ref/am/join.html">Equality join</a>.
+<p>The <b>primary</b> argument contains the <a href="../api_c/db_class.html">DB</a> handle of the primary
+database, which is keyed by the data values found in entries in the
+<b>curslist</b>.
+<p>The <b>curslist</b> argument contains a NULL terminated array of cursors.
+Each cursor must have been initialized to refer to the key on which the
+underlying database should be joined. Typically, this initialization is done
+by a <a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a> call with the <a href="../api_c/dbc_get.html#DB_SET">DB_SET</a> flag specified. Once the
+cursors have been passed as part of a <b>curslist</b>, they should not
+be accessed or modified until the newly created join cursor has been closed,
+or else inconsistent results may be returned.
+<p>Joined values are retrieved by doing a sequential iteration over the first
+cursor in the <b>curslist</b> argument, and a nested iteration over each
+secondary cursor in the order they are specified in the <b>curslist</b>
+argument. This requires database traversals to search for the current
+datum in all the cursors after the first. For this reason, the best join
+performance normally results from sorting the cursors from the one that
+refers to the least number of data items to the one that refers to the
+most. By default, DB-&gt;join does this sort on behalf of its caller.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_JOIN_NOSORT">DB_JOIN_NOSORT</a><dd>Do not sort the cursors based on the number of data items to which they
+refer. If the data are structured so that cursors with many data items
+also share many common elements, higher performance will result from
+listing those cursors before cursors with fewer data items; that is, a
+sort order other than the default. The DB_JOIN_NOSORT flag
+permits applications to perform join optimization prior to calling
+DB-&gt;join.
+</dl>
+<p>A newly created cursor is returned in the memory location to which
+<b>dbcp</b> refers. It
+supports only the <a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a> and <b>dbc_close</b> cursor
+functions:
+<p><dl compact>
+<p><dt><a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a><dd>Iterates over the values associated with the keys to which each item in
+<b>curslist</b> was initialized. Any data value that appears in all
+items specified by the <b>curslist</b> argument is then used as a key
+into the <b>primary</b>, and the key/data pair found in the
+<b>primary</b> is returned.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_JOIN_ITEM">DB_JOIN_ITEM</a><dd>Do not use the data value found in all the cursors as a lookup key for
+the <b>primary</b>, but simply return it in the key parameter instead.
+The data parameter is left unchanged.
+</dl>
+<p>In addition, the following flag may be set by
+bitwise inclusively <b>OR</b>'ing it into the <b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="DB_DIRTY_READ">DB_DIRTY_READ</a><dd>Read modified but not yet committed data. Silently ignored if the
+<a href="../api_c/db_open.html#DB_DIRTY_READ">DB_DIRTY_READ</a> flag was not specified when the underlying
+database was opened.
+<p><dt><a name="DB_RMW">DB_RMW</a><dd>Acquire write locks instead of read locks when doing the retrieval.
+Setting this flag can eliminate deadlock during a read-modify-write
+cycle by acquiring the write lock during the read part of the cycle so
+that another thread of control acquiring a read lock for the same item,
+in its own read-modify-write cycle, will not result in deadlock.
+</dl>
+<p><dt><a href="../api_c/dbc_close.html">DBcursor-&gt;c_close</a><dd>Close the returned cursor and release all resources. (Closing the cursors
+in <b>curslist</b> is the responsibility of the caller.)
+</dl>
+<p>For the returned join cursor to be used in a transaction-protected manner,
+the cursors listed in <b>curslist</b> must have been created within the
+context of the same transaction.
+<p>The DB-&gt;join method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;join method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_SECONDARY_BAD<dd>A secondary index references a nonexistent primary key.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Cursor functions other than <a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a> or <a href="../api_c/dbc_close.html">DBcursor-&gt;c_close</a> were
+called.
+</dl>
+<p>The DB-&gt;join method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;join method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_key_range.html b/libdb/docs/api_c/db_key_range.html
new file mode 100644
index 0000000..c3ca7e0
--- /dev/null
+++ b/libdb/docs/api_c/db_key_range.html
@@ -0,0 +1,69 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;key_range</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;key_range</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;key_range(DB *db, DB_TXN *txnid,
+ DBT *key, DB_KEY_RANGE *key_range, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;key_range method returns an estimate of the proportion of keys
+that are less than, equal to, and greater than the specified key. The
+underlying database must be of type Btree.
+<p>The information is returned in the <b>key_range</b> argument, which
+contains three elements of type double: <b>less</b>, <b>equal</b>,
+and <b>greater</b>. Values are in the range of 0 to 1; for example,
+if the field <b>less</b> is 0.05, 5% of the keys in the database are
+less than the key argument. The value for <b>equal</b> will be zero
+if there is no matching key, and will be non-zero otherwise.
+<p>If the operation is to be transaction-protected, the <b>txnid</b>
+parameter is a transaction handle returned from <a href="../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a>;
+otherwise, NULL.
+The DB-&gt;key_range method does not retain the locks it acquires for the
+life of the transaction, so estimates may not be repeatable.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DB-&gt;key_range method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;key_range method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The underlying database was not of type Btree.
+</dl>
+<p>The DB-&gt;key_range method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;key_range method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_list.html b/libdb/docs/api_c/db_list.html
new file mode 100644
index 0000000..01b8fc2
--- /dev/null
+++ b/libdb/docs/api_c/db_list.html
@@ -0,0 +1,67 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Databases and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Databases and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Databases and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_c/db_create.html">db_create</a></td><td>Create a database handle</td></tr>
+<tr><td><a href="../api_c/db_associate.html">DB-&gt;associate</a></td><td>Associate a secondary index</td></tr>
+<tr><td><a href="../api_c/db_close.html">DB-&gt;close</a></td><td>Close a database</td></tr>
+<tr><td><a href="../api_c/db_cursor.html">DB-&gt;cursor</a></td><td>Create a cursor handle</td></tr>
+<tr><td><a href="../api_c/db_del.html">DB-&gt;del</a></td><td>Delete items from a database</td></tr>
+<tr><td><a href="../api_c/db_err.html">DB-&gt;err</a></td><td>Error message with error string</td></tr>
+<tr><td><a href="../api_c/db_err.html">DB-&gt;errx</a></td><td>Error message</td></tr>
+<tr><td><a href="../api_c/db_fd.html">DB-&gt;fd</a></td><td>Return a file descriptor from a database</td></tr>
+<tr><td><a href="../api_c/db_get.html">DB-&gt;get</a></td><td>Get items from a database</td></tr>
+<tr><td><a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a></td><td>Return if the underlying database is in host order</td></tr>
+<tr><td><a href="../api_c/db_get_type.html">DB-&gt;get_type</a></td><td>Return the database type</td></tr>
+<tr><td><a href="../api_c/db_join.html">DB-&gt;join</a></td><td>Perform a database join on cursors</td></tr>
+<tr><td><a href="../api_c/db_key_range.html">DB-&gt;key_range</a></td><td>Return estimate of key location</td></tr>
+<tr><td><a href="../api_c/db_open.html">DB-&gt;open</a></td><td>Open a database</td></tr>
+<tr><td><a href="../api_c/db_get.html">DB-&gt;pget</a></td><td>Get items from a database</td></tr>
+<tr><td><a href="../api_c/db_put.html">DB-&gt;put</a></td><td>Store items into a database</td></tr>
+<tr><td><a href="../api_c/db_remove.html">DB-&gt;remove</a></td><td>Remove a database</td></tr>
+<tr><td><a href="../api_c/db_rename.html">DB-&gt;rename</a></td><td>Rename a database</td></tr>
+<tr><td><a href="../api_c/db_set_alloc.html">DB-&gt;set_alloc</a></td><td>Set local space allocation functions</td></tr>
+<tr><td><a href="../api_c/db_set_append_recno.html">DB-&gt;set_append_recno</a></td><td>Set record append callback</td></tr>
+<tr><td><a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a></td><td>Set a Btree comparison function</td></tr>
+<tr><td><a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a></td><td>Set the minimum number of keys per Btree page</td></tr>
+<tr><td><a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a></td><td>Set a Btree prefix comparison function</td></tr>
+<tr><td><a href="../api_c/db_set_cache_priority.html">DB-&gt;set_cache_priority</a></td><td>Set the database cache priority</td></tr>
+<tr><td><a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a></td><td>Set the database cache size</td></tr>
+<tr><td><a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a></td><td>Set a duplicate comparison function</td></tr>
+<tr><td><a href="../api_c/db_set_encrypt.html">DB-&gt;set_encrypt</a></td><td>Set the database cryptographic key</td></tr>
+<tr><td><a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a></td><td>Set error message FILE</td></tr>
+<tr><td><a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><a href="../api_c/db_set_feedback.html">DB-&gt;set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a></td><td>General database configuration</td></tr>
+<tr><td><a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a></td><td>Set the Hash table density</td></tr>
+<tr><td><a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a></td><td>Set a hashing function</td></tr>
+<tr><td><a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a></td><td>Set the Hash table size</td></tr>
+<tr><td><a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a></td><td>Set the database byte order</td></tr>
+<tr><td><a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a></td><td>Set the underlying database page size</td></tr>
+<tr><td><a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a></td><td>Set panic callback</td></tr>
+<tr><td><a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a></td><td>Set Queue database extent size</td></tr>
+<tr><td><a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a></td><td>Set the variable-length record delimiter</td></tr>
+<tr><td><a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a></td><td>Set the fixed-length record length</td></tr>
+<tr><td><a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a></td><td>Set the fixed-length record pad byte</td></tr>
+<tr><td><a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a></td><td>Set the backing Recno text file</td></tr>
+<tr><td><a href="../api_c/db_stat.html">DB-&gt;stat</a></td><td>Return database statistics</td></tr>
+<tr><td><a href="../api_c/db_sync.html">DB-&gt;sync</a></td><td>Flush a database to stable storage</td></tr>
+<tr><td><a href="../api_c/db_truncate.html">DB-&gt;truncate</a></td><td>Empty a database</td></tr>
+<tr><td><a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a></td><td>Upgrade a database</td></tr>
+<tr><td><a href="../api_c/db_verify.html">DB-&gt;verify</a></td><td>Verify/salvage a database</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_open.html b/libdb/docs/api_c/db_open.html
new file mode 100644
index 0000000..6340351
--- /dev/null
+++ b/libdb/docs/api_c/db_open.html
@@ -0,0 +1,161 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;open</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;open(DB *db, DB_TXN *txnid, const char *file,
+ const char *database, DBTYPE type, u_int32_t flags, int mode);
+</pre></h3>
+<h1>Description</h1>
+<p>The currently supported Berkeley DB file formats (or <i>access methods</i>)
+are Btree, Hash, Queue, and Recno. The Btree format is a representation
+of a sorted, balanced tree structure. The Hash format is an extensible,
+dynamic hashing scheme. The Queue format supports fast access to
+fixed-length records accessed sequentially or by logical record number.
+The Recno format supports fixed- or variable-length records, accessed
+sequentially or by logical record number, and optionally backed by a
+flat text file.
+<p>Storage and retrieval for the Berkeley DB access methods are based on key/data
+pairs; see <a href="../api_c/dbt_class.html">DBT</a> for more information.
+<p>The DB-&gt;open interface opens the database represented by the
+<b>file</b> and <b>database</b> arguments for both reading and
+writing. The <b>file</b> argument is used as the name of an underlying
+file that will be used to back the database. The <b>database</b>
+argument is optional, and allows applications to have multiple databases
+in a single file. Although no <b>database</b> argument needs to be
+specified, it is an error to attempt to open a second database in a
+<b>file</b> that was not initially created using a <b>database</b>
+name. Further, the <b>database</b> argument is not supported by the
+Queue format. Finally, when opening multiple databases in the same
+physical file, it is important to consider locking and memory cache
+issues; see <a href="../ref/am/opensub.html">Opening multiple databases
+in a single file</a> for more information.
+<p>In-memory databases never intended to be preserved on disk may be
+created by setting both the <b>file</b> and <b>database</b> arguments
+to NULL. Note that in-memory databases can only ever be shared by
+sharing the single database handle that created them, in circumstances
+where doing so is safe.
+<p>The <b>type</b> argument is of type DBTYPE, and must be set to one of <a name="DB_BTREE">DB_BTREE</a>,
+<a name="DB_HASH">DB_HASH</a>, <a name="DB_QUEUE">DB_QUEUE</a>,
+<a name="DB_RECNO">DB_RECNO</a>, or <a name="DB_UNKNOWN">DB_UNKNOWN</a>. If
+<b>type</b> is DB_UNKNOWN, the database must already exist
+and DB-&gt;open will automatically determine its type. The
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a> method may be used to determine the underlying type of
+databases opened using DB_UNKNOWN.
+<p>If the operation is to be transaction-protected (other than by specifying
+the DB_AUTO_COMMIT flag), the <b>txnid</b> parameter is a
+transaction handle returned from <a href="../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a>; otherwise, NULL.
+<p>The <b>flags</b> and <b>mode</b> arguments specify how files will be opened
+and/or created if they do not already exist.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_AUTO_COMMIT">DB_AUTO_COMMIT</a><dd>Enclose the DB-&gt;open call within a transaction. If the call succeeds,
+the open operation will be recoverable. If the call fails, no database will
+have been created.
+<p><dt><a name="DB_CREATE">DB_CREATE</a><dd>Create the database. If the database does not already exist and the DB_CREATE
+flag is not specified, the DB-&gt;open will fail.
+<p><dt><a name="DB_DIRTY_READ">DB_DIRTY_READ</a><dd>Support dirty reads; that is, read operations on the database may request the
+return of modified but not yet committed data.
+<p><dt><a name="DB_EXCL">DB_EXCL</a><dd>Return an error if the database already exists. The DB_EXCL flag is
+only meaningful when specified with the DB_CREATE flag.
+<p><dt><a name="DB_NOMMAP">DB_NOMMAP</a><dd>Do not map this database into process memory (see the description of the
+<a href="../api_c/env_set_mp_mmapsize.html">DB_ENV-&gt;set_mp_mmapsize</a> method for further information).
+<p><dt><a name="DB_RDONLY">DB_RDONLY</a><dd>Open the database for reading only. Any attempt to modify items in the database
+will fail, regardless of the actual permissions of any underlying files.
+<p><dt><a name="DB_THREAD">DB_THREAD</a><dd>Cause the <a href="../api_c/db_class.html">DB</a> handle returned by DB-&gt;open to be
+<i>free-threaded</i>; that is, usable by multiple threads within a
+single address space.
+<p><dt><a name="DB_TRUNCATE">DB_TRUNCATE</a><dd>Physically truncate the underlying file, discarding all previous
+databases it might have held. Underlying filesystem primitives are used
+to implement this flag. For this reason, it is applicable only to the
+file and cannot be used to discard databases within a file.
+<p>The DB_TRUNCATE flag cannot be transaction-protected, and it is
+an error to specify it in a transaction-protected environment.
+</dl>
+<p>On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by
+the database open are created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and modified by the process' umask value at the time of creation
+(see <b>umask</b>(2)). If <b>mode</b> is 0, the database open will use a default
+mode of readable and writable by both owner and group. On Windows
+systems, the mode argument is ignored. The group ownership of created
+files is based on the system and directory defaults, and is not further
+specified by Berkeley DB.
+<p>Calling DB-&gt;open is a reasonably expensive operation, and maintaining
+a set of open databases will normally be preferable to repeatedly opening
+and closing the database for each new query.
+<p>The DB-&gt;open method returns a non-zero error value on failure and 0 on success.
+If DB-&gt;open fails, the <a href="../api_c/db_close.html">DB-&gt;close</a> method should be called to discard the
+<a href="../api_c/db_class.html">DB</a> handle.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If a <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was specified, the
+environment variable <b>DB_HOME</b> may be used as the path of the
+database environment home.
+<p>DB-&gt;open is affected by any database directory specified using the
+<a href="../api_c/env_set_data_dir.html">DB_ENV-&gt;set_data_dir</a> method, or by setting the "set_data_dir" string
+in the environment's <b>DB_CONFIG</b> file.
+</dl>
+<p><dl compact>
+<p><dt>TMPDIR<dd>If the <b>file</b> and <b>dbenv</b> arguments to DB-&gt;open are
+NULL, the environment variable <b>TMPDIR</b> may be used as a
+directory in which to create temporary backing files
+</dl>
+<h1>Errors</h1>
+<p>The DB-&gt;open method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt><a name="DB_OLD_VERSION">DB_OLD_VERSION</a><dd>The database cannot be opened without being first upgraded.
+<p><dt>EEXIST<dd>DB_CREATE and DB_EXCL were specified and the database exists.
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+(For example, unknown database type, page size, hash function, pad byte,
+byte order) or a flag value or parameter that is incompatible with the
+specified database.
+<p>
+The <a href="../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag was specified and fast mutexes are not
+available for this architecture.
+<p>The <a href="../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag was specified to DB-&gt;open, but was not
+specified to the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> call for the environment in which the
+<a href="../api_c/db_class.html">DB</a> handle was created.
+<p>A backing flat text file was specified with either the <a href="../api_c/env_open.html#DB_THREAD">DB_THREAD</a>
+flag or the provided database environment supports transaction
+processing.
+<p><dt>ENOENT<dd>A nonexistent <b>re_source</b> file was specified.
+</dl>
+<p>The DB-&gt;open method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;open method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_put.html b/libdb/docs/api_c/db_put.html
new file mode 100644
index 0000000..da8b70c
--- /dev/null
+++ b/libdb/docs/api_c/db_put.html
@@ -0,0 +1,108 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;put</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;put(DB *db,
+ DB_TXN *txnid, DBT *key, DBT *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;put method stores key/data pairs in the database. The default
+behavior of the DB-&gt;put function is to enter the new key/data
+pair, replacing any previously existing key if duplicates are disallowed,
+or adding a duplicate data item if duplicates are allowed. If the database
+supports duplicates, the DB-&gt;put method adds the new data value at the
+end of the duplicate set. If the database supports sorted duplicates,
+the new data value is inserted at the correct sorted location.
+<p>If the operation is to be transaction-protected (other than by specifying
+the DB_AUTO_COMMIT flag), the <b>txnid</b> parameter is a
+transaction handle returned from <a href="../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a>; otherwise, NULL.
+<p>The <b>flags</b> value must be set to 0 or
+one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_APPEND">DB_APPEND</a><dd>Append the key/data pair to the end of the database. For the
+DB_APPEND flag to be specified, the underlying database must be
+a Queue or Recno database. The record number allocated to the record is
+returned in the specified <b>key</b>.
+<p>There is a minor behavioral difference between the Recno and Queue access
+methods for the DB_APPEND flag. If a transaction enclosing a
+DB-&gt;put operation with the DB_APPEND flag aborts, the
+record number may be decremented (and later reallocated by a subsequent
+DB_APPEND operation) by the Recno access method, but will not be
+decremented or reallocated by the Queue access method.
+<p><dt><a name="DB_NODUPDATA">DB_NODUPDATA</a><dd>In the case of the Btree and Hash access methods, enter the new key/data
+pair only if it does not already appear in the database. If the
+key/data pair already appears in the database, <a href="../api_c/dbc_put.html#DB_KEYEXIST">DB_KEYEXIST</a> is
+returned. The DB_NODUPDATA flag may only be specified if the
+underlying database has been configured to support sorted duplicates.
+<p>The DB_NODUPDATA flag may not be specified to the Queue or Recno
+access methods.
+<p><dt><a name="DB_NOOVERWRITE">DB_NOOVERWRITE</a><dd>Enter the new key/data pair only if the key does not already appear in
+the database. If the key already appears in the database,
+<a href="../api_c/dbc_put.html#DB_KEYEXIST">DB_KEYEXIST</a> is returned. Even if the database allows duplicates,
+a call to DB-&gt;put with the DB_NOOVERWRITE flag set will
+fail if the key already exists in the database.
+</dl>
+<p>In addition, the following flag may be set by
+bitwise inclusively <b>OR</b>'ing it into the <b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="DB_AUTO_COMMIT">DB_AUTO_COMMIT</a><dd>Enclose the DB-&gt;put call within a transaction. If the call succeeds,
+changes made by the operation will be recoverable. If the call fails,
+the operation will have made no changes.
+</dl>
+<p>
+Otherwise, the DB-&gt;put method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;put method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EACCES<dd>An attempt was made to modify a read-only database.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A record number of 0 was specified.
+<p>An attempt was made to add a record to a fixed-length database that was too
+large to fit.
+<p>An attempt was made to do a partial put.
+<p>An attempt was made to add a record to a secondary index.
+</dl>
+<p><dl compact>
+<p><dt>ENOSPC<dd>A btree exceeded the maximum btree depth (255).
+</dl>
+<p>The DB-&gt;put method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;put method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_remove.html b/libdb/docs/api_c/db_remove.html
new file mode 100644
index 0000000..ae84d3b
--- /dev/null
+++ b/libdb/docs/api_c/db_remove.html
@@ -0,0 +1,75 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;remove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;remove</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;remove(DB *db,
+ const char *file, const char *database, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;remove method removes the database specified by the
+<b>file</b> and <b>database</b> arguments. If no <b>database</b> is
+specified, the underlying file represented by <b>file</b> is removed,
+incidentally removing all databases that it contained.
+<p>Applications should never remove databases with open <a href="../api_c/db_class.html">DB</a> handles,
+or in the case of removing a file, when any database in the file has an
+open handle. For example, some architectures do not permit the removal
+of files with open system handles. On these architectures, attempts to
+remove databases currently in use by any thread of control in the system
+will fail.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The <a href="../api_c/db_class.html">DB</a> handle may not be accessed again after DB-&gt;remove is
+called, regardless of its return.
+<p>The DB-&gt;remove method returns a non-zero error value on failure and 0 on success.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If a <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was specified, the
+environment variable <b>DB_HOME</b> may be used as the path of the
+database environment home.
+<p>DB-&gt;remove is affected by any database directory specified using the
+<a href="../api_c/env_set_data_dir.html">DB_ENV-&gt;set_data_dir</a> method, or by setting the "set_data_dir" string
+in the environment's <b>DB_CONFIG</b> file.
+</dl>
+<h1>Errors</h1>
+<p>The DB-&gt;remove method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A database in the file is currently open.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<p>The DB-&gt;remove method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;remove method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_rename.html b/libdb/docs/api_c/db_rename.html
new file mode 100644
index 0000000..ebe6290
--- /dev/null
+++ b/libdb/docs/api_c/db_rename.html
@@ -0,0 +1,77 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;rename</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;rename</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;rename(DB *db, const char *file,
+ const char *database, const char *newname, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;rename method renames the database specified by the
+<b>file</b> and <b>database</b> arguments to <b>newname</b>. If no
+<b>database</b> is specified, the underlying file represented by
+<b>file</b> is renamed, incidentally renaming all databases that it
+contained.
+<p>Applications should not rename databases that are currently in use. If
+an underlying file is being renamed and logging is currently enabled in
+the database environment, no database in the file may be open when the
+DB-&gt;rename method is called. In particular, some architectures do
+not permit renaming files with open handles. On these architectures,
+attempts to rename databases that are currently in use by any thread of
+control in the system will fail.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The <a href="../api_c/db_class.html">DB</a> handle may not be accessed again after DB-&gt;rename is
+called, regardless of its return.
+<p>The DB-&gt;rename method returns a non-zero error value on failure and 0 on success.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If a <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was specified, the
+environment variable <b>DB_HOME</b> may be used as the path of the
+database environment home.
+<p>DB-&gt;rename is affected by any database directory specified using the
+<a href="../api_c/env_set_data_dir.html">DB_ENV-&gt;set_data_dir</a> method, or by setting the "set_data_dir" string
+in the environment's <b>DB_CONFIG</b> file.
+</dl>
+<h1>Errors</h1>
+<p>The DB-&gt;rename method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A database in the file is currently open.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<p>The DB-&gt;rename method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;rename method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_alloc.html b/libdb/docs/api_c/db_set_alloc.html
new file mode 100644
index 0000000..a4a48c4
--- /dev/null
+++ b/libdb/docs/api_c/db_set_alloc.html
@@ -0,0 +1,86 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_alloc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_alloc</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_alloc(DB *db,
+ void *(*app_malloc)(size_t),
+ void *(*app_realloc)(void *, size_t),
+ void (*app_free)(void *));
+</pre></h3>
+<h1>Description</h1>
+<p>Set the allocation functions used by the <a href="../api_c/env_class.html">DB_ENV</a> and <a href="../api_c/db_class.html">DB</a>
+methods to allocate or free memory owned by the application.
+<p>There are a number of interfaces in Berkeley DB where memory is allocated by
+the library and then given to the application. For example, the
+<a href="../api_c/dbt_class.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a> flag, when specified in the <a href="../api_c/dbt_class.html">DBT</a> object,
+will cause the <a href="../api_c/db_class.html">DB</a> methods to allocate and reallocate memory
+which then becomes the responsibility of the calling application. (See
+<a href="../api_c/dbt_class.html">DBT</a> for more information.) Other examples are the Berkeley DB
+interfaces which return statistical information to the application:
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>, <a href="../api_c/lock_stat.html">DB_ENV-&gt;lock_stat</a>, <a href="../api_c/log_archive.html">DB_ENV-&gt;log_archive</a>,
+<a href="../api_c/log_stat.html">DB_ENV-&gt;log_stat</a>, <a href="../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a>, and <a href="../api_c/txn_stat.html">DB_ENV-&gt;txn_stat</a>. There is
+one interface in the Berkeley DB where memory is allocated by the application
+and then given to the library: <a href="../api_c/db_associate.html">DB-&gt;associate</a>.
+<p>On systems in which there may be multiple library versions of the
+standard allocation routines (notably Windows NT), transferring memory
+between the library and the application will fail because the Berkeley DB
+library allocates memory from a different heap than the application uses
+to free it. To avoid this problem, the <a href="../api_c/env_set_alloc.html">DB_ENV-&gt;set_alloc</a> and
+DB-&gt;set_alloc methods can be used to pass Berkeley DB references to the
+application's allocation routines.
+<p>It is not an error to specify only one or two of the possible allocation
+function arguments to these interfaces; however, in that case the
+specified interfaces must be compatible with the standard library
+interfaces, as they will be used together. The functions specified
+must match the calling conventions of the ANSI C X3.159-1989 (ANSI C) library routines
+of the same name.
+<p>Because databases opened within Berkeley DB environments use the allocation
+interfaces specified to the environment, it is an error to attempt to
+set those interfaces in a database created within an environment.
+<p>The DB-&gt;set_alloc interface may not be called after the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface is called.
+<p>The DB-&gt;set_alloc method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;set_alloc method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called in a database environment.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<p>The DB-&gt;set_alloc method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;set_alloc method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_append_recno.html b/libdb/docs/api_c/db_set_append_recno.html
new file mode 100644
index 0000000..68e1872
--- /dev/null
+++ b/libdb/docs/api_c/db_set_append_recno.html
@@ -0,0 +1,61 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_append_recno</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_append_recno</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_append_recno(DB *,
+ int (*db_append_recno_fcn)(DB *dbp, DBT *data, db_recno_t recno));
+</pre></h3>
+<h1>Description</h1>
+<p>When using the <a href="../api_c/db_put.html#DB_APPEND">DB_APPEND</a> option of the <a href="../api_c/db_put.html">DB-&gt;put</a> method,
+it may be useful to modify the stored data based on the generated key.
+If a callback function is specified using the
+DB-&gt;set_append_recno method, it will be called after the record number
+has been selected, but before the data has been stored.
+The callback function must return 0 on success and <b>errno</b> or
+a value outside of the Berkeley DB error name space on failure.
+<p>The called function must take three arguments: a reference to the
+enclosing database handle; the data <a href="../api_c/dbt_class.html">DBT</a> to be stored; and the
+selected record number. The called function may then modify the data
+<a href="../api_c/dbt_class.html">DBT</a>.
+<p>If the callback function needs to allocate memory for the <b>data</b>
+field, the <b>flags</b> field of the returned <a href="../api_c/dbt_class.html">DBT</a> should be
+set to DB_DBT_APPMALLOC, which indicates that Berkeley DB should free
+the memory when it is done with it.
+<p>The DB-&gt;set_append_recno method configures operations performed using the specified
+<a href="../api_c/db_class.html">DB</a> handle, not all operations performed on the underlying
+database.
+<p>The DB-&gt;set_append_recno interface may not be called after the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface is called.
+<p>The DB-&gt;set_append_recno method returns a non-zero error value on failure and 0 on success.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_bt_compare.html b/libdb/docs/api_c/db_set_bt_compare.html
new file mode 100644
index 0000000..ba557e1
--- /dev/null
+++ b/libdb/docs/api_c/db_set_bt_compare.html
@@ -0,0 +1,80 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_bt_compare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_bt_compare</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_bt_compare(DB *db,
+ int (*bt_compare_fcn)(DB *, const DBT *, const DBT *));
+</pre></h3>
+<h1>Description</h1>
+<p>Set the Btree key comparison function. The comparison function is
+called when it is necessary to compare a key specified by the
+application with a key currently stored in the tree. The first argument
+to the comparison function is the <a href="../api_c/dbt_class.html">DBT</a> representing the
+application supplied key; the second is the current tree's key.
+<p>The comparison function must return an integer value less than, equal
+to, or greater than zero if the first key argument is considered to be
+respectively less than, equal to, or greater than the second key
+argument. In addition, the comparison function must cause the keys in
+the database to be <i>well-ordered</i>. The comparison function
+must correctly handle any key values used by the application (possibly
+including zero-length keys). In addition, when Btree key prefix
+comparison is being performed (see <a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a> for more
+information), the comparison routine may be passed a prefix of any
+database key. The <b>data</b> and <b>size</b> fields of the
+<a href="../api_c/dbt_class.html">DBT</a> are the only fields that may be used for the purposes of
+this comparison, and no particular alignment of the memory to which
+by the <b>data</b> field refers may be assumed.
+<p>If no comparison function is specified, the keys are compared lexically,
+with shorter keys collating before longer keys.
+<p>The DB-&gt;set_bt_compare method configures operations performed using the specified
+<a href="../api_c/db_class.html">DB</a> handle, not all operations performed on the underlying
+database.
+<p>The DB-&gt;set_bt_compare interface may not be called after the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface is called.
+If the database already exists when
+<a href="../api_c/db_open.html">DB-&gt;open</a> is called, the information specified to DB-&gt;set_bt_compare must
+be the same as that historically used to create the database or
+corruption can occur.
+<p>The DB-&gt;set_bt_compare method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;set_bt_compare method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<p>The DB-&gt;set_bt_compare method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;set_bt_compare method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_bt_minkey.html b/libdb/docs/api_c/db_set_bt_minkey.html
new file mode 100644
index 0000000..0d7abde
--- /dev/null
+++ b/libdb/docs/api_c/db_set_bt_minkey.html
@@ -0,0 +1,64 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_bt_minkey</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_bt_minkey</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_bt_minkey(DB *db, u_int32_t bt_minkey);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the minimum number of key/data pairs intended to be stored on any
+single Btree leaf page.
+<p>This value is used to determine if key or data items will be stored on
+overflow pages instead of Btree leaf pages. For more information on
+the specific algorithm used, see <a href="../ref/am_conf/bt_minkey.html">Minimum keys per page</a>. The <b>bt_minkey</b> value specified must
+be at least 2; if <b>bt_minkey</b> is not explicitly set, a value of
+2 is used.
+<p>The DB-&gt;set_bt_minkey method configures a database, not only operations performed
+using the specified <a href="../api_c/db_class.html">DB</a> handle.
+<p>The DB-&gt;set_bt_minkey interface may not be called after the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface is called.
+If the database already exists when
+<a href="../api_c/db_open.html">DB-&gt;open</a> is called, the information specified to DB-&gt;set_bt_minkey will
+be ignored.
+<p>The DB-&gt;set_bt_minkey method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;set_bt_minkey method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<p>The DB-&gt;set_bt_minkey method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;set_bt_minkey method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_bt_prefix.html b/libdb/docs/api_c/db_set_bt_prefix.html
new file mode 100644
index 0000000..cb8cdbc
--- /dev/null
+++ b/libdb/docs/api_c/db_set_bt_prefix.html
@@ -0,0 +1,83 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_bt_prefix</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_bt_prefix</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_bt_prefix(DB *db,
+ size_t (*bt_prefix_fcn)(DB *, const DBT *, const DBT *));
+</pre></h3>
+<h1>Description</h1>
+<p>Set the Btree prefix function. The prefix function must return the
+number of bytes of the second key argument that would be required by
+the Btree key comparison function to determine the second key argument's
+ordering relationship with respect to the first key argument. If the
+two keys are equal, the key length should be returned. The prefix
+function must correctly handle any key values used by the application
+(possibly including zero-length keys). The <b>data</b> and
+<b>size</b> fields of the <a href="../api_c/dbt_class.html">DBT</a> are the only fields that may be
+used for the purposes of this determination, and no particular alignment
+of the memory to which the <b>data</b> field refers may be assumed.
+<p>The prefix function is used to determine the amount by which keys stored
+on the Btree internal pages can be safely truncated without losing their
+uniqueness. See the <a href="../ref/am_conf/bt_prefix.html">Btree
+prefix comparison</a> section of the Berkeley DB Reference Guide for more details
+about how this works. The usefulness of this is data-dependent, but
+can produce significantly reduced tree sizes and search times in some
+data sets.
+<p>If no prefix function or key comparison function is specified by the
+application, a default lexical comparison function is used as the prefix
+function. If no prefix function is specified and a key comparison
+function is specified, no prefix function is used. It is an error to
+specify a prefix function without also specifying a key comparison
+function.
+<p>The DB-&gt;set_bt_prefix method configures operations performed using the specified
+<a href="../api_c/db_class.html">DB</a> handle, not all operations performed on the underlying
+database.
+<p>The DB-&gt;set_bt_prefix interface may not be called after the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface is called.
+If the database already exists when
+<a href="../api_c/db_open.html">DB-&gt;open</a> is called, the information specified to DB-&gt;set_bt_prefix must
+be the same as that historically used to create the database or
+corruption can occur.
+<p>The DB-&gt;set_bt_prefix method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;set_bt_prefix method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<p>The DB-&gt;set_bt_prefix method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;set_bt_prefix method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_cache_priority.html b/libdb/docs/api_c/db_set_cache_priority.html
new file mode 100644
index 0000000..262e700
--- /dev/null
+++ b/libdb/docs/api_c/db_set_cache_priority.html
@@ -0,0 +1,64 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_cache_priority</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_cache_priority</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_cache_priority(DB *db, DB_CACHE_PRIORITY priority);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the cache priority for pages from the specified database. The
+priority of a page biases the replacement algorithm to be more or less
+likely to discard a page when space is needed in the buffer pool. The
+bias is temporary, and pages will eventually be discarded if they are
+not referenced again. The DB-&gt;set_cache_priority interface is
+only advisory, and does not guarantee pages will be treated in a specific
+way.
+<p>The <b>priority</b> argument must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_PRIORITY_VERY_LOW">DB_PRIORITY_VERY_LOW</a><dd>The lowest priority: pages are the most likely to be discarded.
+<dt><a name="DB_PRIORITY_LOW">DB_PRIORITY_LOW</a><dd>The next lowest priority.
+<dt><a name="DB_PRIORITY_DEFAULT">DB_PRIORITY_DEFAULT</a><dd>The default priority.
+<dt><a name="DB_PRIORITY_HIGH">DB_PRIORITY_HIGH</a><dd>The next highest priority.
+<dt><a name="DB_PRIORITY_VERY_HIGH">DB_PRIORITY_VERY_HIGH</a><dd>The highest priority: pages are the least likely to be discarded.
+</dl>
+<p>The DB-&gt;set_cache_priority method configures a database, not only operations performed
+using the specified <a href="../api_c/db_class.html">DB</a> handle.
+<p>The DB-&gt;set_cache_priority interface may be called at any time during the life of
+the application.
+<p>The DB-&gt;set_cache_priority method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;set_cache_priority method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;set_cache_priority method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_cachesize.html b/libdb/docs/api_c/db_set_cachesize.html
new file mode 100644
index 0000000..31ff83e
--- /dev/null
+++ b/libdb/docs/api_c/db_set_cachesize.html
@@ -0,0 +1,87 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_cachesize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_cachesize</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_cachesize(DB *db,
+ u_int32_t gbytes, u_int32_t bytes, int ncache);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the shared memory buffer pool -- that is, the cache --
+to <b>gbytes</b> gigabytes plus <b>bytes</b>. The cache should be
+the size of the normal working data set of the application, with some
+small amount of additional memory for unusual situations. (Note: the
+working set is not the same as the number of pages accessed
+simultaneously, and should be quite a bit larger!)
+<p>The default cache size is 256KB, and may not be specified as less than
+20KB. Any cache size less than 500MB is automatically increased by 25%
+to account for buffer pool overhead; cache sizes larger than 500MB are
+used as specified. The current maximum size of a single cache is 4GB.
+For information on tuning the Berkeley DB cache size, see
+<a href="../ref/am_conf/cachesize.html">Selecting a cache size</a>.
+<p>It is possible to specify caches to Berkeley DB that are large enough so that
+they cannot be allocated contiguously on some architectures. For
+example, some releases of Solaris limit the amount of memory that may
+be allocated contiguously by a process. If <b>ncache</b> is 0 or 1,
+the cache will be allocated contiguously in memory. If it is greater
+than 1, the cache will be broken up into <b>ncache</b> equally sized,
+separate pieces of memory.
+<p>Because databases opened within Berkeley DB environments use the cache
+specified to the environment, it is an error to attempt to set a cache
+in a database created within an environment.
+<p>The DB-&gt;set_cachesize interface may not be called after the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface is called.
+<p>The DB-&gt;set_cachesize method returns a non-zero error value on failure and 0 on success.
+<p>The database environment's cache size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_cachesize", one or more whitespace characters,
+and the three arguments specified to this interface, separated by whitespace
+characters, for example, "set_cachesize 1 500 2". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DB-&gt;set_cachesize method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The specified cache size was impossibly small.
+<p>Called in a database environment.
+<p>Called after
+<a href="../api_c/db_open.html">DB-&gt;open</a>
+was called.
+</dl>
+<p>The DB-&gt;set_cachesize method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;set_cachesize method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_dup_compare.html b/libdb/docs/api_c/db_set_dup_compare.html
new file mode 100644
index 0000000..afa443d
--- /dev/null
+++ b/libdb/docs/api_c/db_set_dup_compare.html
@@ -0,0 +1,75 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_dup_compare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_dup_compare</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_dup_compare(DB *db,
+ int (*dup_compare_fcn)(DB *, const DBT *, const DBT *));
+</pre></h3>
+<h1>Description</h1>
+<p>Set the duplicate data item comparison function. The comparison function
+is called when it is necessary to compare a data item specified by the
+application with a data item currently stored in the tree. The first
+argument to the comparison function is the <a href="../api_c/dbt_class.html">DBT</a> representing the
+application's data item; the second is the current tree's data item.
+Calling DB-&gt;set_dup_compare implies calling <a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>
+with the <a href="../api_c/db_set_flags.html#DB_DUPSORT">DB_DUPSORT</a> flag.
+<p>The comparison function must return an integer value less than, equal
+to, or greater than zero if the first data item argument is considered
+to be respectively less than, equal to, or greater than the second data
+item argument. In addition, the comparison function must cause the data
+items in the set to be <i>well-ordered</i>. The comparison function
+must correctly handle any data item values used by the application
+(possibly including zero-length data items). The <b>data</b> and
+<b>size</b> fields of the <a href="../api_c/dbt_class.html">DBT</a> are the only fields that may be
+used for the purposes of this comparison, and no particular alignment
+of the memory to which the <b>data</b> field refers may be assumed.
+<p>If no comparison function is specified, the data items are compared
+lexically, with shorter data items collating before longer data items.
+<p>The DB-&gt;set_dup_compare interface may not be called after the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface is called.
+If the database already exists when
+<a href="../api_c/db_open.html">DB-&gt;open</a> is called, the information specified to DB-&gt;set_dup_compare must
+be the same as that historically used to create the database or
+corruption can occur.
+<p>The DB-&gt;set_dup_compare method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;set_dup_compare method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB-&gt;set_dup_compare method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;set_dup_compare method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_encrypt.html b/libdb/docs/api_c/db_set_encrypt.html
new file mode 100644
index 0000000..addc058
--- /dev/null
+++ b/libdb/docs/api_c/db_set_encrypt.html
@@ -0,0 +1,70 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_encrypt</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_encrypt</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_encrypt(DB *db, const char *passwd, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the password used by the <a href="../api_c/env_class.html">DB_ENV</a> and <a href="../api_c/db_class.html">DB</a> methods to
+perform encryption and decryption.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_ENCRYPT_AES">DB_ENCRYPT_AES</a><dd>Use the Rijndael/AES (also known as the Advanced Encryption Standard
+and Federal Information Processing Standard (FIPS) 197) algorithm for
+encryption or decryption.
+</dl>
+<p>Because databases opened within Berkeley DB environments use the password
+specified to the environment, it is an error to attempt to set a
+password in a database created within an environment.
+<p>The DB-&gt;set_encrypt interface may not be called after the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface is called.
+<p>The DB-&gt;set_encrypt method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;set_encrypt method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after
+<a href="../api_c/db_open.html">DB-&gt;open</a>
+was called.
+</dl>
+<p><dl compact>
+<p><dt>EOPNOTSUPP<dd>Cryptography is not available in this Berkeley DB release.
+</dl>
+<p>The DB-&gt;set_encrypt method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;set_encrypt method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_errcall.html b/libdb/docs/api_c/db_set_errcall.html
new file mode 100644
index 0000000..ea89fd6
--- /dev/null
+++ b/libdb/docs/api_c/db_set_errcall.html
@@ -0,0 +1,65 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_errcall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_errcall</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+void
+DB-&gt;set_errcall(DB *,
+ void (*db_errcall_fcn)(const char *errpfx, char *msg));
+</pre></h3>
+<h1>Description</h1>
+<p>When an error occurs in the Berkeley DB library, a Berkeley DB error or an error
+return value is returned by the function. In some cases, however,
+the <b>errno</b> value may be insufficient to completely describe
+the cause of the error, especially during initial application debugging.
+<p>The <a href="../api_c/env_set_errcall.html">DB_ENV-&gt;set_errcall</a> and DB-&gt;set_errcall methods are used to
+enhance the mechanism for reporting error messages to the application.
+In some cases, when an error occurs, Berkeley DB will call
+<b>db_errcall_fcn</b> with additional error information. The function
+must be declared with two arguments; the first will be the prefix string
+(as previously set by <a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a> or <a href="../api_c/env_set_errpfx.html">DB_ENV-&gt;set_errpfx</a>);
+the second will be the error message string. It is up to the
+<b>db_errcall_fcn</b> function to display the error message in an
+appropriate manner.
+<p>Alternatively, you can use the <a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a> or
+<a href="../api_c/env_set_errfile.html">DB_ENV-&gt;set_errfile</a> methods to display the additional information via
+a C library FILE *.
+<p>This error-logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>For <a href="../api_c/db_class.html">DB</a> handles opened inside of Berkeley DB environments, calling the
+DB-&gt;set_errcall method affects the entire environment and is equivalent to calling
+the <a href="../api_c/env_set_errcall.html">DB_ENV-&gt;set_errcall</a> method.
+<p>The DB-&gt;set_errcall interface may be called at any time during the life of
+the application.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_errfile.html b/libdb/docs/api_c/db_set_errfile.html
new file mode 100644
index 0000000..49f8a04
--- /dev/null
+++ b/libdb/docs/api_c/db_set_errfile.html
@@ -0,0 +1,61 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_errfile</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_errfile</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+void
+DB-&gt;set_errfile(DB *db, FILE *errfile);
+</pre></h3>
+<h1>Description</h1>
+When an error occurs in the Berkeley DB library, a Berkeley DB error or an error
+return value is returned by the function. In some cases, however,
+the <b>errno</b> value may be insufficient to completely describe
+the cause of the error especially during initial application debugging.
+<p>The <a href="../api_c/env_set_errfile.html">DB_ENV-&gt;set_errfile</a> and DB-&gt;set_errfile methods are used to
+enhance the mechanism for reporting error messages to the application
+by setting a C library FILE * to be used for displaying additional Berkeley DB
+error messages. In some cases, when an error occurs, Berkeley DB will output
+an additional error message to the specified file reference.
+<p>The error message will consist of the prefix string and a colon
+("<b>:</b>") (if a prefix string was previously specified using
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a> or <a href="../api_c/env_set_errpfx.html">DB_ENV-&gt;set_errpfx</a>), an error string, and
+a trailing &lt;newline&gt; character.
+<p>This error logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>For <a href="../api_c/db_class.html">DB</a> handles opened inside of Berkeley DB environments, calling the
+DB-&gt;set_errfile method affects the entire environment and is equivalent to calling
+the <a href="../api_c/env_set_errfile.html">DB_ENV-&gt;set_errfile</a> method.
+<p>The DB-&gt;set_errfile interface may be called at any time during the life of
+the application.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_errpfx.html b/libdb/docs/api_c/db_set_errpfx.html
new file mode 100644
index 0000000..9506c77
--- /dev/null
+++ b/libdb/docs/api_c/db_set_errpfx.html
@@ -0,0 +1,52 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_errpfx</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_errpfx</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+void
+DB-&gt;set_errpfx(DB *db, const char *errpfx);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the prefix string that appears before error messages issued by Berkeley DB.
+<p>The DB-&gt;set_errpfx and <a href="../api_c/env_set_errpfx.html">DB_ENV-&gt;set_errpfx</a> methods do not copy
+the memory to which the <b>errpfx</b> argument refers; rather, they
+maintain a reference to it. Although this allows applications to modify
+the error message prefix at any time (without repeatedly calling the
+interfaces), it means the memory must be maintained until the handle is
+closed.
+<p>For <a href="../api_c/db_class.html">DB</a> handles opened inside of Berkeley DB environments, calling the
+DB-&gt;set_errpfx method affects the entire environment and is equivalent to calling
+the <a href="../api_c/env_set_errpfx.html">DB_ENV-&gt;set_errpfx</a> method.
+<p>The DB-&gt;set_errpfx interface may be called at any time during the life of
+the application.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_feedback.html b/libdb/docs/api_c/db_set_feedback.html
new file mode 100644
index 0000000..0ee3b7e
--- /dev/null
+++ b/libdb/docs/api_c/db_set_feedback.html
@@ -0,0 +1,58 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_feedback</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_feedback</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_feedback(DB *,
+ void (*db_feedback_fcn)(DB *dbp, int opcode, int pct));
+</pre></h3>
+<h1>Description</h1>
+<p>Some operations performed by the Berkeley DB library can take non-trivial
+amounts of time. The DB-&gt;set_feedback method can be used by
+applications to monitor progress within these operations.
+<p>When an operation is likely to take a long time, Berkeley DB will call the
+specified callback function. This function must be declared with
+three arguments: the first will be a reference to the enclosing database
+handle; the second a flag value; and the third the percent of the
+operation that has been completed, specified as an integer value between
+0 and 100. It is up to the callback function to display this
+information in an appropriate manner.
+<p>The <b>opcode</b> argument may take on any of the following values:
+<p><dl compact>
+<p><dt><a name="DB_UPGRADE">DB_UPGRADE</a><dd>The underlying database is being upgraded.
+<p><dt><a name="DB_VERIFY">DB_VERIFY</a><dd>The underlying database is being verified.
+</dl>
+<p>The DB-&gt;set_feedback interface may be called at any time during the life of
+the application.
+<p>The DB-&gt;set_feedback method returns a non-zero error value on failure and 0 on success.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_flags.html b/libdb/docs/api_c/db_set_flags.html
new file mode 100644
index 0000000..72942cf
--- /dev/null
+++ b/libdb/docs/api_c/db_set_flags.html
@@ -0,0 +1,226 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_flags</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_flags</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_flags(DB *db, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>Calling DB-&gt;set_flags is additive; there is no way to clear flags.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<h3>General</h3>
+<p>The following flags may be specified for any Berkeley DB access method:
+<p><dl compact>
+<a name="3"><!--meow--></a>
+<p><dt><a name="DB_CHKSUM_SHA1">DB_CHKSUM_SHA1</a><dd>Do checksum verification of pages read into the cache from the backing
+filestore, using the SHA1 Secure Hash Algorithm.
+<p>Calling DB-&gt;set_flags with the DB_CHKSUM_SHA1 flag only affects the
+specified <a href="../api_c/db_class.html">DB</a> handle (and any other Berkeley DB handles opened within
+the scope of that handle).
+<p>If the database already exists when <a href="../api_c/db_open.html">DB-&gt;open</a> is called, the DB_CHKSUM_SHA1
+flag
+will be ignored.
+If creating additional databases in a file, the checksum behavior specified
+must be consistent with the existing databases in the file or an error will
+be returned.
+<a name="4"><!--meow--></a>
+<p><dt><a name="DB_ENCRYPT">DB_ENCRYPT</a><dd>Encrypt the database using the cryptographic password specified to the
+<a href="../api_c/env_set_encrypt.html">DB_ENV-&gt;set_encrypt</a> or <a href="../api_c/db_set_encrypt.html">DB-&gt;set_encrypt</a> methods.
+<p>Calling DB-&gt;set_flags with the DB_ENCRYPT flag only affects the
+specified <a href="../api_c/db_class.html">DB</a> handle (and any other Berkeley DB handles opened within
+the scope of that handle).
+<p>If the database already exists when <a href="../api_c/db_open.html">DB-&gt;open</a> is called, the DB_ENCRYPT
+flag
+must be the same as the existing database or an error
+will be returned.
+If creating additional databases in a file, the encryption behavior specified
+must be consistent with the existing databases in the file or an error will
+be returned.
+</dl>
+<h3>Btree</h3>
+<p>The following flags may be specified for the Btree access method:
+<p><dl compact>
+<a name="5"><!--meow--></a>
+<p><dt><a name="DB_DUP">DB_DUP</a><dd>Permit duplicate data items in the tree; that is, insertion when the
+key of the key/data pair being inserted already exists in the tree will
+be successful. The ordering of duplicates in the tree is determined by
+the order of insertion, unless the ordering is otherwise specified by
+use of a cursor operation. It is an error to specify both DB_DUP
+and DB_RECNUM.
+<p>Calling DB-&gt;set_flags with the DB_DUP flag affects the
+database, including all threads of control accessing the database.
+<p>If the database already exists when <a href="../api_c/db_open.html">DB-&gt;open</a> is called, the DB_DUP
+flag
+must be the same as the existing database or an error
+will be returned.
+<a name="6"><!--meow--></a>
+<p><dt><a name="DB_DUPSORT">DB_DUPSORT</a><dd>Permit duplicate data items in the tree; that is, insertion when the
+key of the key/data pair being inserted already exists in the tree will
+be successful. The ordering of duplicates in the tree is determined by
+the duplicate comparison function.
+If the application does not specify a comparison function using the
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a> method, a default lexical comparison will be
+used.
+It is an error to specify both DB_DUPSORT and DB_RECNUM.
+<p>Calling DB-&gt;set_flags with the DB_DUPSORT flag affects the
+database, including all threads of control accessing the database.
+<p>If the database already exists when <a href="../api_c/db_open.html">DB-&gt;open</a> is called, the DB_DUPSORT
+flag
+must be the same as the existing database or an error
+will be returned.
+<a name="7"><!--meow--></a>
+<p><dt><a name="DB_RECNUM">DB_RECNUM</a><dd>Support retrieval from the Btree using record numbers. For more
+information, see the <a href="../api_c/db_get.html#DB_SET_RECNO">DB_SET_RECNO</a> flag to the <a href="../api_c/db_get.html">DB-&gt;get</a>
+and <a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a> methods.
+<p>Logical record numbers in Btree databases are mutable in the face of
+record insertion or deletion. See the DB_RENUMBER flag in the
+Recno access method information for further discussion.
+<p>Maintaining record counts within a Btree introduces a serious point of
+contention, namely the page locations where the record counts are
+stored. In addition, the entire tree must be locked during both
+insertions and deletions, effectively single-threading the tree for
+those operations. Specifying DB_RECNUM can result in serious
+performance degradation for some applications and data sets.
+<p>It is an error to specify both DB_DUP and DB_RECNUM.
+<p>Calling DB-&gt;set_flags with the DB_RECNUM flag affects the
+database, including all threads of control accessing the database.
+<p>If the database already exists when <a href="../api_c/db_open.html">DB-&gt;open</a> is called, the DB_RECNUM
+flag
+must be the same as the existing database or an error
+will be returned.
+<a name="8"><!--meow--></a><a name="9"><!--meow--></a>
+<p><dt><a name="DB_REVSPLITOFF">DB_REVSPLITOFF</a><dd>Turn off reverse splitting in the Btree. As pages are emptied in a
+database, the Berkeley DB Btree implementation attempts to coalesce empty pages
+into higher-level pages in order to keep the tree as small as possible
+and minimize tree search time. This can hurt performance in applications
+with cyclical data demands; that is, applications where the database grows
+and shrinks repeatedly. For example, because Berkeley DB does page-level
+locking, the maximum level of concurrency in a database of two pages is far
+smaller than that in a database of 100 pages, so a database that has
+shrunk to a minimal size can cause severe deadlocking when a new cycle of
+data insertion begins.
+<p>Calling DB-&gt;set_flags with the DB_REVSPLITOFF flag only affects the
+specified <a href="../api_c/db_class.html">DB</a> handle (and any other Berkeley DB handles opened within
+the scope of that handle).
+</dl>
+<h3>Hash</h3>
+<p>The following flags may be specified for the Hash access method:
+<p><dl compact>
+<p><dt><a name="DB_DUP">DB_DUP</a><dd>Permit duplicate data items in the tree; that is, insertion when the
+key of the key/data pair being inserted already exists in the tree will
+be successful. The ordering of duplicates in the tree is determined by
+the order of insertion, unless the ordering is otherwise specified by
+use of a cursor operation. It is an error to specify both DB_DUP
+and DB_RECNUM.
+<p>Calling DB-&gt;set_flags with the DB_DUP flag affects the
+database, including all threads of control accessing the database.
+<p>If the database already exists when <a href="../api_c/db_open.html">DB-&gt;open</a> is called, the DB_DUP
+flag
+must be the same as the existing database or an error
+will be returned.
+<p><dt><a name="DB_DUPSORT">DB_DUPSORT</a><dd>Permit duplicate data items in the tree; that is, insertion when the
+key of the key/data pair being inserted already exists in the tree will
+be successful. The ordering of duplicates in the tree is determined by
+the duplicate comparison function.
+If the application does not specify a comparison function using the
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a> method, a default lexical comparison will be
+used.
+It is an error to specify both DB_DUPSORT and DB_RECNUM.
+<p>Calling DB-&gt;set_flags with the DB_DUPSORT flag affects the
+database, including all threads of control accessing the database.
+<p>If the database already exists when <a href="../api_c/db_open.html">DB-&gt;open</a> is called, the DB_DUPSORT
+flag
+must be the same as the existing database or an error
+will be returned.
+</dl>
+<h3>Queue</h3>
+<p>There are no additional flags that may be specified for the Queue access
+method.
+<h3>Recno</h3>
+<p>The following flags may be specified for the Recno access method:
+<p><dl compact>
+<a name="10"><!--meow--></a>
+<p><dt><a name="DB_RENUMBER">DB_RENUMBER</a><dd>Specifying the DB_RENUMBER flag causes the logical record
+numbers to be mutable, and change as records are added to and deleted
+from the database. For example, the deletion of record number 4 causes
+records numbered 5 and greater to be renumbered downward by one. If a
+cursor was positioned to record number 4 before the deletion, it will
+refer to the new record number 4, if any such record exists, after the
+deletion. If a cursor was positioned after record number 4 before the
+deletion, it will be shifted downward one logical record, continuing to
+refer to the same record as it did before.
+<p>Using the <a href="../api_c/db_put.html">DB-&gt;put</a> or <a href="../api_c/dbc_put.html">DBcursor-&gt;c_put</a> interfaces to create new
+records will cause the creation of multiple records if the record number
+is more than one greater than the largest record currently in the
+database. For example, creating record 28, when record 25 was previously
+the last record in the database, will create records 26 and 27 as well as
+28. Attempts to retrieve records that were created in this manner will
+result in an error return of <a href="../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a>.
+<p>If a created record is not at the end of the database, all records
+following the new record will be automatically renumbered upward by one.
+For example, the creation of a new record numbered 8 causes records
+numbered 8 and greater to be renumbered upward by one. If a cursor was
+positioned to record number 8 or greater before the insertion, it will be
+shifted upward one logical record, continuing to refer to the same record
+as it did before.
+<p>For these reasons, concurrent access to a Recno database with the
+DB_RENUMBER flag specified may be largely meaningless, although
+it is supported.
+<p>Calling DB-&gt;set_flags with the DB_RENUMBER flag affects the
+database, including all threads of control accessing the database.
+<p>If the database already exists when <a href="../api_c/db_open.html">DB-&gt;open</a> is called, the DB_RENUMBER
+flag
+must be the same as the existing database or an error
+will be returned.
+<a name="11"><!--meow--></a>
+<p><dt><a name="DB_SNAPSHOT">DB_SNAPSHOT</a><dd>This flag specifies that any specified <b>re_source</b> file be read
+in its entirety when <a href="../api_c/db_open.html">DB-&gt;open</a> is called. If this flag is not
+specified, the <b>re_source</b> file may be read lazily.
+<p>Calling DB-&gt;set_flags with the DB_SNAPSHOT flag only affects the
+specified <a href="../api_c/db_class.html">DB</a> handle (and any other Berkeley DB handles opened within
+the scope of that handle).
+</dl>
+<p>The DB-&gt;set_flags interface may not be called after the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface is called.
+<p>The DB-&gt;set_flags method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;set_flags method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The <a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a> method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the <a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a> method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_h_ffactor.html b/libdb/docs/api_c/db_set_h_ffactor.html
new file mode 100644
index 0000000..ddbf217
--- /dev/null
+++ b/libdb/docs/api_c/db_set_h_ffactor.html
@@ -0,0 +1,66 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_h_ffactor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_h_ffactor</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_h_ffactor(DB *db, u_int32_t h_ffactor);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the desired density within the hash table.
+<p>The density is an approximation of the number of keys allowed to
+accumulate in any one bucket, determining when the hash table grows or
+shrinks. If you know the average sizes of the keys and data in your
+data set, setting the fill factor can enhance performance. A reasonable
+rule computing fill factor is to set it to the following:
+<p><blockquote><pre>(pagesize - 32) / (average_key_size + average_data_size + 8)</pre></blockquote>
+<p>If no value is specified, the fill factor will be selected dynamically as
+pages are filled.
+<p>The DB-&gt;set_h_ffactor method configures a database, not only operations performed
+using the specified <a href="../api_c/db_class.html">DB</a> handle.
+<p>The DB-&gt;set_h_ffactor interface may not be called after the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface is called.
+If the database already exists when
+<a href="../api_c/db_open.html">DB-&gt;open</a> is called, the information specified to DB-&gt;set_h_ffactor will
+be ignored.
+<p>The DB-&gt;set_h_ffactor method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;set_h_ffactor method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<p>The DB-&gt;set_h_ffactor method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;set_h_ffactor method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_h_hash.html b/libdb/docs/api_c/db_set_h_hash.html
new file mode 100644
index 0000000..87fe5a0
--- /dev/null
+++ b/libdb/docs/api_c/db_set_h_hash.html
@@ -0,0 +1,72 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_h_hash</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_h_hash</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_h_hash(DB *db,
+ u_int32_t (*h_hash_fcn)(DB *, const void *bytes, u_int32_t length));
+</pre></h3>
+<h1>Description</h1>
+<p>Set a user-defined hash method; if no hash method is specified, a default
+hash method is used. Because no hash method performs equally well on all
+possible data, the user may find that the built-in hash method performs
+poorly with a particular data set. User-specified hash functions must
+take a pointer to a byte string and a length as arguments, and return a
+value of type
+<b>u_int32_t</b>.
+The hash function must handle any key values used by the application
+(possibly including zero-length keys).
+<p>If a hash method is specified, <a href="../api_c/db_open.html">DB-&gt;open</a> will attempt to determine
+whether the hash method specified is the same as the one with which the
+database was created, and will fail if it detects that it is not.
+<p>The DB-&gt;set_h_hash method configures operations performed using the specified
+<a href="../api_c/db_class.html">DB</a> handle, not all operations performed on the underlying
+database.
+<p>The DB-&gt;set_h_hash interface may not be called after the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface is called.
+If the database already exists when
+<a href="../api_c/db_open.html">DB-&gt;open</a> is called, the information specified to DB-&gt;set_h_hash must
+be the same as that historically used to create the database or
+corruption can occur.
+<p>The DB-&gt;set_h_hash method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;set_h_hash method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<p>The DB-&gt;set_h_hash method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;set_h_hash method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_h_nelem.html b/libdb/docs/api_c/db_set_h_nelem.html
new file mode 100644
index 0000000..3d88827
--- /dev/null
+++ b/libdb/docs/api_c/db_set_h_nelem.html
@@ -0,0 +1,64 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_h_nelem</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_h_nelem</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_h_nelem(DB *db, u_int32_t h_nelem);
+</pre></h3>
+<h1>Description</h1>
+<p>Set an estimate of the final size of the hash table.
+<p>In order for the estimate to be used when creating the database,
+the <a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a> method must also be called.
+If the estimate or fill factor are not set or are set too low,
+hash tables will still expand gracefully
+as keys are entered, although a slight performance degradation may be
+noticed.
+<p>The DB-&gt;set_h_nelem method configures a database, not only operations performed
+using the specified <a href="../api_c/db_class.html">DB</a> handle.
+<p>The DB-&gt;set_h_nelem interface may not be called after the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface is called.
+If the database already exists when
+<a href="../api_c/db_open.html">DB-&gt;open</a> is called, the information specified to DB-&gt;set_h_nelem will
+be ignored.
+<p>The DB-&gt;set_h_nelem method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;set_h_nelem method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<p>The DB-&gt;set_h_nelem method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;set_h_nelem method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_lorder.html b/libdb/docs/api_c/db_set_lorder.html
new file mode 100644
index 0000000..fe961ce
--- /dev/null
+++ b/libdb/docs/api_c/db_set_lorder.html
@@ -0,0 +1,68 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_lorder</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_lorder</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_lorder(DB *db, int lorder);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the byte order for integers in the stored database metadata. The
+number should represent the order as an integer; for example, big endian
+order is the value 4,321, and little endian order is the value 1,234.
+If <b>lorder</b> is not explicitly set, the host order of the machine
+where the Berkeley DB library was compiled is used.
+<p><b>The access methods provide no guarantees about the byte ordering of the
+application data stored in the database, and applications are responsible
+for maintaining any necessary ordering.</b>
+<p>The DB-&gt;set_lorder method configures a database, not only operations performed
+using the specified <a href="../api_c/db_class.html">DB</a> handle.
+<p>The DB-&gt;set_lorder interface may not be called after the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface is called.
+If the database already exists when
+<a href="../api_c/db_open.html">DB-&gt;open</a> is called, the information specified to DB-&gt;set_lorder will
+be ignored.
+If creating additional databases in a file, the byte order specified must
+be consistent with the existing databases in the file or an error will be
+returned.
+<p>The DB-&gt;set_lorder method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;set_lorder method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<p>The DB-&gt;set_lorder method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;set_lorder method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_pagesize.html b/libdb/docs/api_c/db_set_pagesize.html
new file mode 100644
index 0000000..0bf5132
--- /dev/null
+++ b/libdb/docs/api_c/db_set_pagesize.html
@@ -0,0 +1,67 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_pagesize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_pagesize</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_pagesize(DB *db, u_int32_t pagesize);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the pages used to hold items in the database, in bytes.
+The minimum page size is 512 bytes, and the maximum page size is 64K
+bytes. If the page size is not explicitly set, one is selected based
+on the underlying filesystem I/O block size. The automatically selected
+size has a lower limit of 512 bytes and an upper limit of 16K bytes.
+<p>For information on tuning the Berkeley DB page size, see
+<a href="../ref/am_conf/pagesize.html">Selecting a page size</a>.
+<p>The DB-&gt;set_pagesize method configures a database, not only operations performed
+using the specified <a href="../api_c/db_class.html">DB</a> handle.
+<p>The DB-&gt;set_pagesize interface may not be called after the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface is called.
+If the database already exists when
+<a href="../api_c/db_open.html">DB-&gt;open</a> is called, the information specified to DB-&gt;set_pagesize will
+be ignored.
+If creating additional databases in a file, the page size specified must
+be consistent with the existing databases in the file or an error will
+be returned.
+<p>The DB-&gt;set_pagesize method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;set_pagesize method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<p>The DB-&gt;set_pagesize method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;set_pagesize method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_paniccall.html b/libdb/docs/api_c/db_set_paniccall.html
new file mode 100644
index 0000000..5787edb
--- /dev/null
+++ b/libdb/docs/api_c/db_set_paniccall.html
@@ -0,0 +1,59 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_paniccall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_paniccall</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_paniccall(DB *db,
+ void (*paniccall)(DB_ENV *, int errval));
+</pre></h3>
+<h1>Description</h1>
+<p>Errors can occur in the Berkeley DB library where the only solution is to shut
+down the application and run recovery (for example, if Berkeley DB is unable
+to allocate heap memory). In these cases, the value <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>
+is returned by Berkeley DB.
+<p>In these cases, it is also often simpler to shut down the application
+when such errors occur rather than to try to gracefully return up the
+stack. The <a href="../api_c/env_set_paniccall.html">DB_ENV-&gt;set_paniccall</a> and DB-&gt;set_paniccall methods
+are used to specify functions to be called when
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> is about to be returned from a Berkeley DB interface.
+When called, the <b>dbenv</b> argument will be a reference to the
+current environment, and the <b>errval</b> argument is the error value
+that would have been returned to the calling function.
+<p>For <a href="../api_c/db_class.html">DB</a> handles opened inside of Berkeley DB environments, calling the
+DB-&gt;set_paniccall method affects the entire environment and is equivalent to calling
+the <a href="../api_c/env_set_paniccall.html">DB_ENV-&gt;set_paniccall</a> method.
+<p>The DB-&gt;set_paniccall interface may be called at any time during the life of
+the application.
+<p>The DB-&gt;set_paniccall method returns a non-zero error value on failure and 0 on success.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_q_extentsize.html b/libdb/docs/api_c/db_set_q_extentsize.html
new file mode 100644
index 0000000..4f587e5
--- /dev/null
+++ b/libdb/docs/api_c/db_set_q_extentsize.html
@@ -0,0 +1,63 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_q_extentsize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_q_extentsize</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_q_extentsize(DB *db, u_int32_t extentsize);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the extents used to hold pages in a Queue database,
+specified as a number of pages. Each extent is created as a separate
+physical file. If no extent size is set, the default behavior is to
+create only a single underlying database file.
+<p>For information on tuning the extent size, see
+<a href="../ref/am_conf/extentsize.html">Selecting a extent size</a>.
+<p>The DB-&gt;set_q_extentsize method configures a database, not only operations performed
+using the specified <a href="../api_c/db_class.html">DB</a> handle.
+<p>The DB-&gt;set_q_extentsize interface may not be called after the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface is called.
+If the database already exists when
+<a href="../api_c/db_open.html">DB-&gt;open</a> is called, the information specified to DB-&gt;set_q_extentsize will
+be ignored.
+<p>The DB-&gt;set_q_extentsize method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;set_q_extentsize method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<p>The DB-&gt;set_q_extentsize method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;set_q_extentsize method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_re_delim.html b/libdb/docs/api_c/db_set_re_delim.html
new file mode 100644
index 0000000..8e14034
--- /dev/null
+++ b/libdb/docs/api_c/db_set_re_delim.html
@@ -0,0 +1,63 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_re_delim</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_re_delim</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_re_delim(DB *db, int re_delim);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the delimiting byte used to mark the end of a record in the backing
+source file for the Recno access method.
+<p>This byte is used for variable length records if the <b>re_source</b>
+file is specified. If the <b>re_source</b> file is specified and no
+delimiting byte was specified, &lt;newline&gt; characters (that
+is, ASCII 0x0a) are interpreted as end-of-record markers.
+<p>The DB-&gt;set_re_delim method configures a database, not only operations performed
+using the specified <a href="../api_c/db_class.html">DB</a> handle.
+<p>The DB-&gt;set_re_delim interface may not be called after the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface is called.
+If the database already exists when
+<a href="../api_c/db_open.html">DB-&gt;open</a> is called, the information specified to DB-&gt;set_re_delim will
+be ignored.
+<p>The DB-&gt;set_re_delim method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;set_re_delim method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<p>The DB-&gt;set_re_delim method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;set_re_delim method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_re_len.html b/libdb/docs/api_c/db_set_re_len.html
new file mode 100644
index 0000000..209b124
--- /dev/null
+++ b/libdb/docs/api_c/db_set_re_len.html
@@ -0,0 +1,70 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_re_len</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_re_len</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_re_len(DB *db, u_int32_t re_len);
+</pre></h3>
+<h1>Description</h1>
+<p>For the Queue access method, specify that the records are of length
+<b>re_len</b>. For the Queue access method, the record length must be
+enough smaller than the database's page size that at least one record
+plus the database page's metadata information can fit on each database
+page.
+<p>For the Recno access method, specify that the records are fixed-length,
+not byte-delimited, and are of length <b>re_len</b>.
+<p>Any records added to the database that are less than <b>re_len</b> bytes
+long are automatically padded (see <a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a> for more
+information).
+<p>Any attempt to insert records into the database that are greater than
+<b>re_len</b> bytes long will cause the call to fail immediately and
+return an error.
+<p>The DB-&gt;set_re_len method configures a database, not only operations performed
+using the specified <a href="../api_c/db_class.html">DB</a> handle.
+<p>The DB-&gt;set_re_len interface may not be called after the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface is called.
+If the database already exists when
+<a href="../api_c/db_open.html">DB-&gt;open</a> is called, the information specified to DB-&gt;set_re_len will
+be ignored.
+<p>The DB-&gt;set_re_len method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;set_re_len method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<p>The DB-&gt;set_re_len method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;set_re_len method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_re_pad.html b/libdb/docs/api_c/db_set_re_pad.html
new file mode 100644
index 0000000..311312d
--- /dev/null
+++ b/libdb/docs/api_c/db_set_re_pad.html
@@ -0,0 +1,61 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_re_pad</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_re_pad</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_re_pad(DB *db, int re_pad);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the padding character for short, fixed-length records for the Queue
+and Recno access methods.
+<p>If no pad character is specified, &lt;space&gt; characters (that
+is, ASCII 0x20) are used for padding.
+<p>The DB-&gt;set_re_pad method configures a database, not only operations performed
+using the specified <a href="../api_c/db_class.html">DB</a> handle.
+<p>The DB-&gt;set_re_pad interface may not be called after the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface is called.
+If the database already exists when
+<a href="../api_c/db_open.html">DB-&gt;open</a> is called, the information specified to DB-&gt;set_re_pad will
+be ignored.
+<p>The DB-&gt;set_re_pad method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;set_re_pad method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<p>The DB-&gt;set_re_pad method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;set_re_pad method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_set_re_source.html b/libdb/docs/api_c/db_set_re_source.html
new file mode 100644
index 0000000..c8a0fa4
--- /dev/null
+++ b/libdb/docs/api_c/db_set_re_source.html
@@ -0,0 +1,102 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;set_re_source</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;set_re_source</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;set_re_source(DB *db, char *re_source);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the underlying source file for the Recno access method. The purpose
+of the <b>re_source</b> value is to provide fast access and modification
+to databases that are normally stored as flat text files.
+<p>If the <b>re_source</b> field is set, it specifies an underlying flat
+text database file that is read to initialize a transient record number
+index. In the case of variable length records, the records are
+separated, as specified by <a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>. For example,
+standard UNIX byte stream files can be interpreted as a sequence of
+variable length records separated by &lt;newline&gt; characters.
+<p>In addition, when cached data would normally be written back to the
+underlying database file (for example, the <a href="../api_c/db_close.html">DB-&gt;close</a> or
+<a href="../api_c/db_sync.html">DB-&gt;sync</a> methods are called), the in-memory copy of the database
+will be written back to the <b>re_source</b> file.
+<p>By default, the backing source file is read lazily; that is, records
+are not read from the file until they are requested by the application.
+<b>If multiple processes (not threads) are accessing a Recno database
+concurrently, and are either inserting or deleting records, the backing
+source file must be read in its entirety before more than a single
+process accesses the database, and only that process should specify the
+backing source file as part of the <a href="../api_c/db_open.html">DB-&gt;open</a> call. See the
+<a href="../api_c/db_set_flags.html#DB_SNAPSHOT">DB_SNAPSHOT</a> flag for more information.</b>
+<p><b>Reading and writing the backing source file specified by <b>re_source</b>
+cannot be transaction-protected because it involves filesystem
+operations that are not part of the Db transaction methodology.</b> For
+this reason, if a temporary database is used to hold the records, it is
+possible to lose the contents of the <b>re_source</b> file, for
+example, if the system crashes at the right instant. If a file is used
+to hold the database, normal database recovery on that file can be used
+to prevent information loss, although it is still possible that the
+contents of <b>re_source</b> will be lost if the system crashes.
+<p>The <b>re_source</b> file must already exist (but may be zero-length) when
+<a href="../api_c/db_open.html">DB-&gt;open</a> is called.
+<p>It is not an error to specify a read-only <b>re_source</b> file when
+creating a database, nor is it an error to modify the resulting database.
+However, any attempt to write the changes to the backing source file using
+either the <a href="../api_c/db_sync.html">DB-&gt;sync</a> or <a href="../api_c/db_close.html">DB-&gt;close</a> methods will fail, of course.
+Specify the <a href="../api_c/db_close.html#DB_NOSYNC">DB_NOSYNC</a> flag to the <a href="../api_c/db_close.html">DB-&gt;close</a> method to stop it
+from attempting to write the changes to the backing file; instead, they
+will be silently discarded.
+<p>For all of the previous reasons, the <b>re_source</b> field is generally
+used to specify databases that are read-only for Berkeley DB applications;
+and that are either generated on the fly by software tools or modified
+using a different mechanism -- for example, a text editor.
+<p>The DB-&gt;set_re_source method configures operations performed using the specified
+<a href="../api_c/db_class.html">DB</a> handle, not all operations performed on the underlying
+database.
+<p>The DB-&gt;set_re_source interface may not be called after the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface is called.
+If the database already exists when
+<a href="../api_c/db_open.html">DB-&gt;open</a> is called, the information specified to DB-&gt;set_re_source must
+be the same as that historically used to create the database or
+corruption can occur.
+<p>The DB-&gt;set_re_source method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;set_re_source method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/db_open.html">DB-&gt;open</a> was called.
+</dl>
+<p>The DB-&gt;set_re_source method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;set_re_source method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_stat.html b/libdb/docs/api_c/db_stat.html
new file mode 100644
index 0000000..338ed3d
--- /dev/null
+++ b/libdb/docs/api_c/db_stat.html
@@ -0,0 +1,173 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;stat</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;stat(DB *db, void *sp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;stat method creates a statistical structure and
+copies a pointer to it into user-specified memory locations.
+Specifically, if <b>sp</b> is non-NULL, a pointer to the statistics
+for the database are copied into the memory location to which it refers.
+<p>Statistical structures are created in allocated memory. If application-specific allocation
+routines have been declared (see <a href="../api_c/env_set_alloc.html">DB_ENV-&gt;set_alloc</a> for more
+information), they are used to allocate the memory; otherwise, the
+library <b>malloc</b>(3) interface is used. The caller is
+responsible for deallocating the memory. To deallocate the memory, free
+the memory reference; references inside the returned memory need not be
+individually freed.
+<p>The <b>flags</b> value must be set to 0 or
+one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_FAST_STAT">DB_FAST_STAT</a><dd>Return only the values which do not require traversal of the database.
+Fields returned when this flag is set are noted with an asterisk (*)
+below.
+<p>Among other things, this flag makes it possible for applications to
+request key and record counts without incurring the performance penalty
+of traversing the entire database. If the underlying database is of
+type Recno, or of type Btree and the database was created with the
+<a href="../api_c/db_set_flags.html#DB_RECNUM">DB_RECNUM</a> flag, the count of keys will be exact. Otherwise,
+the count of keys will be the value saved the last time the database
+was traversed, or 0 if no count of keys has ever been made. If the
+underlying database is of type Recno, the count of data items will be
+exact, otherwise, the count of data items will be the value saved the
+last time the database was traversed, or 0 if no count of data items
+has ever been done.
+</dl>
+<p>If the DB_FAST_STAT flag has not been specified, the
+DB-&gt;stat method will access some of or all the pages in the database,
+incurring a severe performance penalty as well as possibly flushing the
+underlying buffer pool.
+<p>In the presence of multiple threads or processes accessing an active
+database, the information returned by DB-&gt;stat may be out-of-date.
+<p>If the database was not opened read-only and the DB_FAST_STAT
+flag was not specified, the cached key and record numbers will be
+updated after the statistical information has been gathered.
+<p>The DB-&gt;stat method cannot be transaction-protected. For this reason,
+it should be called in a thread of control that has no open cursors or
+active transactions.
+<p>The DB-&gt;stat method returns a non-zero error value on failure and 0 on success.
+<h3>Hash Statistics</h3>
+<p>In the case of a Hash database,
+the statistics are stored in a structure of type DB_HASH_STAT. The
+following fields will be filled in:
+<p><dl compact>
+<p><dt>u_int32_t hash_magic*<dd>Magic number that identifies the file as a Hash file.
+<dt>u_int32_t hash_version*<dd>The version of the Hash database.
+<dt>u_int32_t hash_nkeys*<dd>The number of unique keys in the database. If DB_FAST_STAT was
+specified the count will be the last saved value unless it has never
+been calculated, in which case it will be 0.
+<dt>u_int32_t hash_ndata*<dd>The number of key/data pairs in the database. If DB_FAST_STAT
+was specified the count will be the last saved value unless it has never
+been calculated, in which case it will be 0.
+<dt>u_int32_t hash_pagesize*<dd>The underlying Hash database page (and bucket) size, in bytes.
+<dt>u_int32_t hash_ffactor*<dd>The desired fill factor (number of items per bucket) specified at
+database-creation time.
+<dt>u_int32_t hash_buckets*<dd>The number of hash buckets.
+<dt>u_int32_t hash_free<dd>The number of pages on the free list.
+<dt>u_int32_t hash_bfree<dd>The number of bytes free on bucket pages.
+<dt>u_int32_t hash_bigpages<dd>The number of big key/data pages.
+<dt>u_int32_t hash_big_bfree<dd>The number of bytes free on big item pages.
+<dt>u_int32_t hash_overflows<dd>The number of overflow pages (overflow pages are pages that contain items
+that did not fit in the main bucket page).
+<dt>u_int32_t hash_ovfl_free<dd>The number of bytes free on overflow pages.
+<dt>u_int32_t hash_dup<dd>The number of duplicate pages.
+<dt>u_int32_t hash_dup_free<dd>The number of bytes free on duplicate pages.
+</dl>
+<h3>Btree and Recno Statistics</h3>
+<p>In the case of a Btree or Recno database,
+the statistics are stored in a structure of type DB_BTREE_STAT. The
+following fields will be filled in:
+<p><dl compact>
+<p><dt>u_int32_t bt_magic*<dd>Magic number that identifies the file as a Btree database.
+<dt>u_int32_t bt_version*<dd>The version of the Btree database.
+<dt>u_int32_t bt_nkeys*<dd>For the Btree Access Method, the number of unique keys in the database.
+If DB_FAST_STAT was specified and the database was created with
+the <a href="../api_c/db_set_flags.html#DB_RECNUM">DB_RECNUM</a> flag, the count will be exact, otherwise, the
+count will be the last saved value unless it has never been calculated,
+in which case it will be 0.
+<p>For the Recno Access Method, the exact number of records in the
+database.
+<dt>u_int32_t bt_ndata*<dd>For the Btree Access Method, the number of key/data pairs in the
+database. If DB_FAST_STAT was specified the count will be the
+last saved value unless it has never been calculated, in which case it
+will be 0.
+<p>For the Recno Access Method, the exact number of records in the
+database. If the database has been configured to not renumber records
+during deletion, the count of records will only reflect undeleted
+records.
+<dt>u_int32_t bt_pagesize*<dd>Underlying database page size, in bytes.
+<dt>u_int32_t bt_minkey*<dd>The minimum keys per page.
+<dt>u_int32_t bt_re_len*<dd>The length of fixed-length records.
+<dt>u_int32_t bt_re_pad*<dd>The padding byte value for fixed-length records.
+<dt>u_int32_t bt_levels<dd>Number of levels in the database.
+<dt>u_int32_t bt_int_pg<dd>Number of database internal pages.
+<dt>u_int32_t bt_leaf_pg<dd>Number of database leaf pages.
+<dt>u_int32_t bt_dup_pg<dd>Number of database duplicate pages.
+<dt>u_int32_t bt_over_pg<dd>Number of database overflow pages.
+<dt>u_int32_t bt_free<dd>Number of pages on the free list.
+<dt>u_int32_t bt_int_pgfree<dd>Number of bytes free in database internal pages.
+<dt>u_int32_t bt_leaf_pgfree<dd>Number of bytes free in database leaf pages.
+<dt>u_int32_t bt_dup_pgfree<dd>Number of bytes free in database duplicate pages.
+<dt>u_int32_t bt_over_pgfree<dd>Number of bytes free in database overflow pages.
+</dl>
+<h3>Queue Statistics</h3>
+<p>In the case of a Queue database,
+the statistics are stored in a structure of type DB_QUEUE_STAT. The
+following fields will be filled in:
+<p><dl compact>
+<p><dt>u_int32_t qs_magic*<dd>Magic number that identifies the file as a Queue file.
+<dt>u_int32_t qs_version*<dd>The version of the Queue file type.
+<dt>u_int32_t qs_nkeys*<dd>The number of records in the database. If DB_FAST_STAT was
+specified the count will be the last saved value unless it has never
+been calculated, in which case it will be 0.
+<dt>u_int32_t qs_ndata*<dd>The number of records in the database. If DB_FAST_STAT was
+specified the count will be the last saved value unless it has never
+been calculated, in which case it will be 0.
+<dt>u_int32_t qs_pagesize*<dd>Underlying database page size, in bytes.
+<dt>u_int32_t qs_extentsize*<dd>Underlying database extent size, in pages.
+<dt>u_int32_t qs_pages<dd>Number of pages in the database.
+<dt>u_int32_t qs_re_len*<dd>The length of the records.
+<dt>u_int32_t qs_re_pad*<dd>The padding byte value for the records.
+<dt>u_int32_t qs_pgfree<dd>Number of bytes free in database pages.
+<dt>u_int32_t qs_first_recno*<dd>First undeleted record in the database.
+<dt>u_int32_t qs_cur_recno*<dd>Next available record number.
+</dl>
+<p>The DB-&gt;stat method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;stat method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;stat method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_sync.html b/libdb/docs/api_c/db_sync.html
new file mode 100644
index 0000000..49c9202
--- /dev/null
+++ b/libdb/docs/api_c/db_sync.html
@@ -0,0 +1,63 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;sync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;sync</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;sync(DB *db, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;sync method flushes any cached information to disk.
+<p>If the database is in memory only, the DB-&gt;sync method has no effect and
+will always succeed.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p><b>It is important to understand that flushing cached information to disk
+only minimizes the window of opportunity for corrupted data.</b> Although
+unlikely, it is possible for database corruption to happen if a system
+or application crash occurs while writing data to the database. To
+ensure that database corruption never occurs, applications must either:
+use transactions and logging with automatic recovery; use logging and
+application-specific recovery; or edit a copy of the database, and once
+all applications using the database have successfully called
+<a href="../api_c/db_close.html">DB-&gt;close</a>, atomically replace the original database with the
+updated copy.
+<p>The DB-&gt;sync method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;sync method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB-&gt;sync method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;sync method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_truncate.html b/libdb/docs/api_c/db_truncate.html
new file mode 100644
index 0000000..eea8c35
--- /dev/null
+++ b/libdb/docs/api_c/db_truncate.html
@@ -0,0 +1,64 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;truncate</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;truncate</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;truncate(DB *db,
+ DB_TXN *txnid, u_int32_t *countp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;truncate interface empties the database, discarding all
+records it contains.
+The number of records discarded from the database is returned in
+<b>countp</b>.
+<p>If the operation is to be transaction-protected (other than by specifying
+the DB_AUTO_COMMIT flag), the <b>txnid</b> parameter is a
+transaction handle returned from <a href="../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a>; otherwise, NULL.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_AUTO_COMMIT">DB_AUTO_COMMIT</a><dd>Enclose the DB-&gt;truncate call within a transaction. If the call succeeds,
+changes made by the operation will be recoverable. If the call fails,
+the operation will have made no changes.
+</dl>
+<p>The DB-&gt;truncate method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB-&gt;truncate method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p>The DB-&gt;truncate method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;truncate method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_upgrade.html b/libdb/docs/api_c/db_upgrade.html
new file mode 100644
index 0000000..8aff59b
--- /dev/null
+++ b/libdb/docs/api_c/db_upgrade.html
@@ -0,0 +1,100 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;upgrade</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;upgrade</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;upgrade(DB *db, const char *file, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;upgrade method upgrades all of the databases included in the
+file <b>file</b>, if necessary. If no upgrade is necessary,
+DB-&gt;upgrade always returns success.
+<p><b>Database upgrades are done in place and are destructive. For example,
+if pages need to be allocated and no disk space is available, the
+database may be left corrupted. Backups should be made before databases
+are upgraded. See <a href="../ref/am/upgrade.html">Upgrading databases</a>
+for more information.</b>
+<p>Unlike all other database operations, DB-&gt;upgrade may only be done
+on a system with the same byte-order as the database.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_DUPSORT">DB_DUPSORT</a><dd><b>This flag is only meaningful when upgrading databases from
+releases before the Berkeley DB 3.1 release.</b>
+<p>As part of the upgrade from the Berkeley DB 3.0 release to the 3.1 release,
+the on-disk format of duplicate data items changed. To correctly
+upgrade the format requires applications to specify whether duplicate
+data items in the database are sorted or not. Specifying the
+DB_DUPSORT flag informs DB-&gt;upgrade that the duplicates
+are sorted; otherwise they are assumed to be unsorted. Incorrectly
+specifying the value of this flag may lead to database corruption.
+<p>Further, because the DB-&gt;upgrade method upgrades a physical file
+(including all the databases it contains), it is not possible to use
+DB-&gt;upgrade to upgrade files in which some of the databases it
+includes have sorted duplicate data items, and some of the databases it
+includes have unsorted duplicate data items. If the file does not have
+more than a single database, if the databases do not support duplicate
+data items, or if all of the databases that support duplicate data items
+support the same style of duplicates (either sorted or unsorted),
+DB-&gt;upgrade will work correctly as long as the
+DB_DUPSORT flag is correctly specified. Otherwise, the file
+cannot be upgraded using DB-&gt;upgrade; it must be upgraded
+manually by dumping and reloading the databases.
+</dl>
+<p>The DB-&gt;upgrade method returns a non-zero error value on failure and 0 on success.
+<p>The DB-&gt;upgrade method is the underlying interface used by the <a href="../utility/db_upgrade.html">db_upgrade</a> utility.
+See the <a href="../utility/db_upgrade.html">db_upgrade</a> utility source code for an example of using DB-&gt;upgrade
+in a IEEE/ANSI Std 1003.1 (POSIX) environment.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If a <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was specified, the
+environment variable <b>DB_HOME</b> may be used as the path of the
+database environment home.
+<p>DB-&gt;upgrade is affected by any database directory specified using the
+<a href="../api_c/env_set_data_dir.html">DB_ENV-&gt;set_data_dir</a> method, or by setting the "set_data_dir" string
+in the environment's <b>DB_CONFIG</b> file.
+</dl>
+<h1>Errors</h1>
+<p>The DB-&gt;upgrade method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The database is not in the same byte-order as the system.
+</dl>
+<p><dl compact>
+<p><dt><a name="DB_OLD_VERSION">DB_OLD_VERSION</a><dd>The database cannot be upgraded by this version of the Berkeley DB software.
+</dl>
+<p>The DB-&gt;upgrade method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;upgrade method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/db_verify.html b/libdb/docs/api_c/db_verify.html
new file mode 100644
index 0000000..5d0c811
--- /dev/null
+++ b/libdb/docs/api_c/db_verify.html
@@ -0,0 +1,131 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB-&gt;verify</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB-&gt;verify</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB-&gt;verify(DB *db, const char *file,
+ const char *database, FILE *outfile, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB-&gt;verify method verifies the integrity of all databases in the
+file specified by the <b>file</b> argument, and optionally outputs the
+databases' key/data pairs to the file stream specified by the
+<b>outfile</b> argument.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_SALVAGE">DB_SALVAGE</a><dd>Write the key/data pairs from all databases in the file to the file stream
+named in
+the <b>outfile</b> argument. The output format is the same as that
+specified for the <a href="../utility/db_dump.html">db_dump</a> utility, and can be used as input for
+the <a href="../utility/db_load.html">db_load</a> utility.
+<p>Because the key/data pairs are output in page order as opposed to the sort
+order used by <a href="../utility/db_dump.html">db_dump</a>, using DB-&gt;verify to dump key/data
+pairs normally produces less than optimal loads for Btree databases.
+</dl>
+<p>In addition, the following flags may be set by bitwise inclusively <b>OR</b>'ing them into the
+<b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="DB_AGGRESSIVE">DB_AGGRESSIVE</a><dd>Output <b>all</b> the key/data pairs in the file that can be found.
+By default, DB-&gt;verify does not assume corruption. For example,
+if a key/data pair on a page is marked as deleted, it is not then written
+to the output file. When DB_AGGRESSIVE is specified, corruption
+is assumed, and any key/data pair that can be found is written. In this
+case, key/data pairs that are corrupted or have been deleted may appear
+in the output (even if the file being salvaged is in no way corrupt), and
+the output will almost certainly require editing before being loaded into
+a database.
+<p><dt><a name="DB_PRINTABLE">DB_PRINTABLE</a><dd>When using the DB_SALVAGE flag, if characters in either the key
+or data items are printing characters (as defined by <b>isprint</b>(3)), use printing characters to represent them. This flag permits users
+to use standard text editors and tools to modify the contents of
+databases or selectively remove data from salvager output.
+<p>Note: different systems may have different notions about what characters
+are considered <i>printing characters</i>, and databases dumped in
+this manner may be less portable to external systems.
+<p><dt><a name="DB_NOORDERCHK">DB_NOORDERCHK</a><dd>Skip the database checks for btree and duplicate sort order and for
+hashing.
+<p>The DB-&gt;verify method normally verifies that btree keys and duplicate
+items are correctly sorted, and hash keys are correctly hashed. If the
+file being verified contains multiple databases using differing sorting
+or hashing algorithms, some of them must necessarily fail database
+verification because only one sort order or hash function can be
+specified before DB-&gt;verify is called. To verify files with
+multiple databases having differing sorting orders or hashing functions,
+first perform verification of the file as a whole by using the
+DB_NOORDERCHK flag, and then individually verify the sort order
+and hashing function for each database in the file using the
+DB_ORDERCHKONLY flag.
+<p><dt><a name="DB_ORDERCHKONLY">DB_ORDERCHKONLY</a><dd>Perform the database checks for btree and duplicate sort order and for
+hashing, skipped by DB_NOORDERCHK.
+<p>When this flag is specified, a <b>database</b> argument should also be
+specified, indicating the database in the physical file which is to be
+checked. This flag is only safe to use on databases that have already
+successfully been verified using DB-&gt;verify with the
+DB_NOORDERCHK flag set.
+</dl>
+<p>The database argument must be set to NULL except when the
+DB_ORDERCHKONLY flag is set.
+<p><b>The DB-&gt;verify method does not perform any locking, even in Berkeley DB
+environments that are configured with a locking subsystem. As such, it
+should only be used on files that are not being modified by another
+thread of control.</b>
+<p>The DB-&gt;verify interface may not be called after the <a href="../api_c/db_open.html">DB-&gt;open</a>
+interface is called.
+<a name="3"><!--meow--></a>
+<p>The DB-&gt;verify method returns a non-zero error value on failure, 0 on success, and <a href="../ref/program/errorret.html#DB_VERIFY_BAD">DB_VERIFY_BAD</a> if a database is corrupted. When the
+DB_SALVAGE flag is specified, the <a href="../ref/program/errorret.html#DB_VERIFY_BAD">DB_VERIFY_BAD</a> return
+means that all key/data pairs in the file may not have been successfully
+output.
+<p>The DB-&gt;verify method is the underlying interface used by the <a href="../utility/db_verify.html">db_verify</a> utility.
+See the <a href="../utility/db_verify.html">db_verify</a> utility source code for an example of using DB-&gt;verify
+in a IEEE/ANSI Std 1003.1 (POSIX) environment.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If a <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was specified, the
+environment variable <b>DB_HOME</b> may be used as the path of the
+database environment home.
+<p>DB-&gt;verify is affected by any database directory specified using the
+<a href="../api_c/env_set_data_dir.html">DB_ENV-&gt;set_data_dir</a> method, or by setting the "set_data_dir" string
+in the environment's <b>DB_CONFIG</b> file.
+</dl>
+<h1>Errors</h1>
+<p>The DB-&gt;verify method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>DB-&gt;verify was called after <a href="../api_c/db_open.html">DB-&gt;open</a>.
+</dl>
+<p>The DB-&gt;verify method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB-&gt;verify method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/db_class.html">DB</a>
+<h1>See Also</h1>
+<a href="../api_c/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/dbc_class.html b/libdb/docs/api_c/dbc_class.html
new file mode 100644
index 0000000..53ec2d2
--- /dev/null
+++ b/libdb/docs/api_c/dbc_class.html
@@ -0,0 +1,41 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DBC</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DBC</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+typedef struct __dbc DBC;
+</pre></h3>
+<h1>Description</h1>
+<p>The DBC object is the handle for a cursor into a Berkeley DB database.
+The handle is not free-threaded, and cursors may not span threads; nor
+may cursors be used by more than a single thread. If the cursor is to
+be used to perform operations on behalf of a transaction, the cursor
+must be opened and closed within the context of that single transaction.
+Once <a href="../api_c/dbc_close.html">DBcursor-&gt;c_close</a> has been called, the handle may not be accessed
+again, regardless of the method's return.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/dbc_close.html b/libdb/docs/api_c/dbc_close.html
new file mode 100644
index 0000000..8c5fa19
--- /dev/null
+++ b/libdb/docs/api_c/dbc_close.html
@@ -0,0 +1,61 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DBcursor-&gt;c_close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DBcursor-&gt;c_close</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBcursor-&gt;c_close(DBC *cursor);
+</pre></h3>
+<h1>Description</h1>
+<p>The DBcursor-&gt;c_close method discards the cursor.
+<p>It is possible for the DBcursor-&gt;c_close method to return
+<a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a>, signaling that any enclosing transaction should
+be aborted. If the application is already intending to abort the
+transaction, this error should be ignored, and the application should
+proceed.
+<p>After DBcursor-&gt;c_close has been called, regardless of its return, the
+cursor handle may not be used again.
+<p>The DBcursor-&gt;c_close method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DBcursor-&gt;c_close method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The cursor was previously closed.
+</dl>
+<p>The DBcursor-&gt;c_close method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DBcursor-&gt;c_close method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/dbc_class.html">DBC</a>
+<h1>See Also</h1>
+<a href="../api_c/dbc_list.html">Database Cursors and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/dbc_count.html b/libdb/docs/api_c/dbc_count.html
new file mode 100644
index 0000000..db0c3bf
--- /dev/null
+++ b/libdb/docs/api_c/dbc_count.html
@@ -0,0 +1,53 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DBcursor-&gt;c_count</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DBcursor-&gt;c_count</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBcursor-&gt;c_count(DBC *cursor, db_recno_t *countp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DBcursor-&gt;c_count method returns a count of the number of duplicate data
+items for the key to which the
+cursor refers, into the memory location to which <b>countp</b> refers.
+If the underlying database does not support duplicate data items, the
+call will still succeed and a count of 1 will be returned.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>
+If the <b>cursor</b> argument is not yet initialized, the DBcursor-&gt;c_count method will return EINVAL.
+Otherwise, the DBcursor-&gt;c_count method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DBcursor-&gt;c_count method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DBcursor-&gt;c_count method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/dbc_class.html">DBC</a>
+<h1>See Also</h1>
+<a href="../api_c/dbc_list.html">Database Cursors and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/dbc_del.html b/libdb/docs/api_c/dbc_del.html
new file mode 100644
index 0000000..28c5e41
--- /dev/null
+++ b/libdb/docs/api_c/dbc_del.html
@@ -0,0 +1,75 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DBcursor-&gt;c_del</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DBcursor-&gt;c_del</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBcursor-&gt;c_del(DBC *cursor, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DBcursor-&gt;c_del method deletes the key/data pair to which the cursor
+refers.
+<p>When called on a cursor opened on a database that has been made into a
+secondary index using the <a href="../api_c/db_associate.html">DB-&gt;associate</a> method, the <a href="../api_c/db_del.html">DB-&gt;del</a> method
+deletes the key/data pair from the primary database and all secondary
+indices.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The cursor position is unchanged after a delete, and subsequent calls to
+cursor functions expecting the cursor to refer to an existing key will
+fail.
+<p>
+If the element has already been deleted, the DBcursor-&gt;c_del method will return DB_KEYEMPTY.
+If the cursor is not yet initialized, the DBcursor-&gt;c_del method will return EINVAL.
+Otherwise, the DBcursor-&gt;c_del method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DBcursor-&gt;c_del method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>DB_SECONDARY_BAD<dd>A secondary index references a nonexistent primary key.
+</dl>
+<p><dl compact>
+<p><dt>EACCES<dd>An attempt was made to modify a read-only database.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p><dl compact>
+<p><dt>EPERM <dd>Write attempted on read-only cursor when the <a href="../api_c/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a> flag was
+specified to <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>.
+</dl>
+<p>The DBcursor-&gt;c_del method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DBcursor-&gt;c_del method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/dbc_class.html">DBC</a>
+<h1>See Also</h1>
+<a href="../api_c/dbc_list.html">Database Cursors and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/dbc_dup.html b/libdb/docs/api_c/dbc_dup.html
new file mode 100644
index 0000000..5180df0
--- /dev/null
+++ b/libdb/docs/api_c/dbc_dup.html
@@ -0,0 +1,70 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DBcursor-&gt;c_dup</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DBcursor-&gt;c_dup</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBcursor-&gt;c_dup(DBC *cursor, DBC **cursorp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DBcursor-&gt;c_dup method creates a new cursor that uses the same transaction
+and locker ID as the original cursor. This is useful when an application
+is using locking and requires two or more cursors in the same thread of
+control.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_POSITION">DB_POSITION</a><dd>The newly created cursor is initialized to refer to the same position
+in the database as the original cursor and hold the same locks. If the
+DB_POSITION flag is not specified, then the created cursor is
+uninitialized and will behave like a cursor newly created using
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>.
+</dl>
+<p>When using the Berkeley DB Concurrent Data Store product, there can be only one active write cursor
+at a time. For this reason, attempting to duplicate a cursor for which
+the <a href="../api_c/db_cursor.html#DB_WRITECURSOR">DB_WRITECURSOR</a> flag was specified during creation will return
+an error.
+<p>
+If the <b>cursor</b> argument is not yet initialized, the DBcursor-&gt;c_dup method will return EINVAL.
+Otherwise, the DBcursor-&gt;c_dup method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DBcursor-&gt;c_dup method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The <b>cursor</b> argument was created using the
+<a href="../api_c/db_cursor.html#DB_WRITECURSOR">DB_WRITECURSOR</a> flag in the Berkeley DB Concurrent Data Store product.
+</dl>
+<p>The DBcursor-&gt;c_dup method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DBcursor-&gt;c_dup method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/dbc_class.html">DBC</a>
+<h1>See Also</h1>
+<a href="../api_c/dbc_list.html">Database Cursors and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/dbc_get.html b/libdb/docs/api_c/dbc_get.html
new file mode 100644
index 0000000..ce1208d
--- /dev/null
+++ b/libdb/docs/api_c/dbc_get.html
@@ -0,0 +1,231 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DBcursor-&gt;c_get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DBcursor-&gt;c_get</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBcursor-&gt;c_get(DBC *cursor,
+ DBT *key, DBT *data, u_int32_t flags);
+int
+DBcursor-&gt;c_pget(DBC *cursor,
+ DBT *key, DBT *pkey, DBT *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DBcursor-&gt;c_get method retrieves key/data pairs from the database. The
+address and length of the key
+are returned in the object to which <b>key</b> refers (except for the
+case of the DB_SET flag, in which the <b>key</b> object is
+unchanged), and the address
+and length of the data are returned in the object to which <b>data</b>
+refers.
+<p>When called on a cursor opened on a database that has been made into a
+secondary index using the <a href="../api_c/db_associate.html">DB-&gt;associate</a> method, the DBcursor-&gt;c_get
+and DBcursor-&gt;c_pget methods return the key from the secondary index and the
+data item from the primary database. In addition, the DBcursor-&gt;c_pget method
+returns the key from the primary database. In databases that are not
+secondary indices, the DBcursor-&gt;c_pget interface will always fail and
+return EINVAL.
+<p>Modifications to the database during a sequential scan will be reflected
+in the scan; that is, records inserted behind a cursor will not be
+returned while records inserted in front of a cursor will be returned.
+<p>In Queue and Recno databases, missing entries (that is, entries that
+were never explicitly created or that were created and then deleted)
+will be skipped during a sequential scan.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_CURRENT">DB_CURRENT</a><dd>Return the key/data pair to which the cursor refers.
+<p>
+If the cursor key/data pair was deleted, the DBcursor-&gt;c_get method will return DB_KEYEMPTY.
+If the cursor is not yet initialized, the DBcursor-&gt;c_get method will return EINVAL.
+<p><dt><a name="DB_FIRST">DB_FIRST</a>, <a name="DB_LAST">DB_LAST</a><dd>The cursor is set to refer to the first (last) key/data pair of the
+database, and that pair is returned. In the presence of duplicate key
+values, the first (last) data item in the set of duplicates is returned.
+<p>If the database is a Queue or Recno database, DBcursor-&gt;c_get using the
+DB_FIRST (DB_LAST) flags will ignore any keys that exist
+but were never explicitly created by the application, or were created and
+later deleted.
+<p>
+If the database is empty, the DBcursor-&gt;c_get method will return DB_NOTFOUND.
+<p><dt><a name="DB_GET_BOTH">DB_GET_BOTH</a><dd>The DB_GET_BOTH flag is identical to the DB_SET flag,
+except that both the key and the data arguments must be matched by the
+key and data item in the database.
+<p>When used with the DBcursor-&gt;c_pget version of this interface on a
+secondary index handle, both the secondary and primary keys must be
+matched by the secondary and primary key item in the database. It is
+an error to use the DB_GET_BOTH flag with the DBcursor-&gt;c_get
+version of this interface and a cursor that has been opened on a
+secondary index handle.
+<p><dt><a name="DB_GET_BOTH_RANGE">DB_GET_BOTH_RANGE</a><dd>The DB_GET_BOTH_RANGE flag is identical to the DB_GET_BOTH
+flag, except that, in the case of any database supporting sorted
+duplicate sets, the returned key/data pair is the smallest data item
+greater than or equal to the specified data item (as determined by the
+comparison function), permitting partial matches and range searches in
+duplicate data sets.
+<p><dt><a name="DB_GET_RECNO">DB_GET_RECNO</a><dd>Return the record number associated with the cursor. The record number
+will be returned in <b>data</b>, as described in <a href="../api_c/dbt_class.html">DBT</a>. The
+<b>key</b> parameter is ignored.
+<p>For DB_GET_RECNO to be specified, the underlying database must be
+of type Btree, and it must have been created with the <a href="../api_c/db_set_flags.html#DB_RECNUM">DB_RECNUM</a>
+flag.
+<p><dt><a name="DB_JOIN_ITEM">DB_JOIN_ITEM</a><dd>Do not use the data value found in all of the cursors as a lookup key for
+the primary database, but simply return it in the key parameter instead.
+The data parameter is left unchanged.
+<p>For DB_JOIN_ITEM to be specified, the underlying cursor must have
+been returned from the <a href="../api_c/db_join.html">DB-&gt;join</a> method.
+<p><dt><a name="DB_NEXT">DB_NEXT</a>, <a name="DB_PREV">DB_PREV</a><dd>If the cursor is not yet initialized, DB_NEXT (DB_PREV)
+is identical to DB_FIRST (DB_LAST). Otherwise, the cursor
+is moved to the next (previous) key/data pair of the database, and that
+pair is returned. In the presence of duplicate key values, the value of
+the key may not change.
+<p>If the database is a Queue or Recno database, DBcursor-&gt;c_get using the
+DB_NEXT (DB_PREV) flag will skip any keys that exist
+but were never explicitly created by the application, or those that were
+created and later deleted.
+<p>
+If the cursor is already on the last (first) record in the database, the DBcursor-&gt;c_get method will return DB_NOTFOUND.
+<p><dt><a name="DB_NEXT_DUP">DB_NEXT_DUP</a><dd>If the next key/data pair of the database is a duplicate data record for
+the current key/data pair, the cursor is moved to the next key/data pair
+of the database, and that pair is returned.
+If the next key/data pair of the database is not a duplicate data record
+for the current key/data pair, the DBcursor-&gt;c_get method will return DB_NOTFOUND.
+If the cursor is not yet initialized, the DBcursor-&gt;c_get method will return EINVAL.
+<p><dt><a name="DB_NEXT_NODUP">DB_NEXT_NODUP</a>, <a name="DB_PREV_NODUP">DB_PREV_NODUP</a><dd>If the cursor is not yet initialized, DB_NEXT_NODUP
+(DB_PREV_NODUP) is identical to DB_FIRST
+(DB_LAST). Otherwise, the cursor is moved to the next (previous)
+non-duplicate key of the database, and that key/data pair is returned.
+<p>If the database is a Queue or Recno database, DBcursor-&gt;c_get using the
+DB_NEXT_NODUP (DB_PREV_NODUP) flags will ignore any keys
+that exist but were never explicitly created by the application, or those
+that were created and later deleted.
+<p>
+If no non-duplicate key/data pairs occur after (before) the cursor
+position in the database, the DBcursor-&gt;c_get method will return DB_NOTFOUND.
+<p><dt><a name="DB_SET">DB_SET</a><dd>Move the cursor to the specified key/data pair of the database, and
+return the datum associated with the given key.
+<p>In the presence of duplicate key values, DBcursor-&gt;c_get will return the
+first data item for the given key.
+If no matching keys are found, the DBcursor-&gt;c_get method will return DB_NOTFOUND.
+If the database is a Queue or Recno database, and the specified key exists,
+but was never explicitly created by the application or was later deleted, the DBcursor-&gt;c_get method will return DB_KEYEMPTY.
+<p><dt><a name="DB_SET_RANGE">DB_SET_RANGE</a><dd>The DB_SET_RANGE flag is identical to the DB_SET flag,
+except that in the case of the Btree access method, the key is returned
+as well as the data item and the returned key/data pair is the smallest
+key greater than or equal to the specified key (as determined by the
+comparison function), permitting partial key matches and range
+searches.
+<p><dt><a name="DB_SET_RECNO">DB_SET_RECNO</a><dd>Move the cursor to the specific numbered record of the database, and
+return the associated key/data pair. The <b>data</b> field of the
+specified <b>key</b>
+must be a pointer to a memory location from which a <a href="../api_c/dbt_class.html#db_recno_t">db_recno_t</a>
+may be read, as described in <a href="../api_c/dbt_class.html">DBT</a>. This memory location will be
+read to determine the record to be retrieved.
+<p>For DB_SET_RECNO to be specified, the underlying database must be
+of type Btree, and it must have been created with the <a href="../api_c/db_set_flags.html#DB_RECNUM">DB_RECNUM</a>
+flag.
+</dl>
+<p>In addition, the following flags may be set by
+bitwise inclusively <b>OR</b>'ing them into the <b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="DB_DIRTY_READ">DB_DIRTY_READ</a><dd>Read modified but not yet committed data. Silently ignored if the
+<a href="../api_c/db_open.html#DB_DIRTY_READ">DB_DIRTY_READ</a> flag was not specified when the underlying
+database was opened.
+<p><dt><a name="DB_MULTIPLE">DB_MULTIPLE</a><dd>Return multiple data items. The buffer to which the <b>data</b>
+argument refers is filled with the specified key's data items. If the
+first data item associated with the key cannot fit into the buffer, the
+size field of the <b>data</b> argument is set to the length needed to
+retrieve it, and the error ENOMEM is returned. Subsequent calls with both the
+DB_NEXT_DUP and DB_MULTIPLE flags specified will return
+additional data items associated with the current key or
+<a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a> if there is no additional data items to return.
+<p>If DB_MULTIPLE is specified for the Queue and Recno access
+methods, the buffer will be filled with as many data records as
+possible. The record number of the first record will be returned in
+the <b>key</b> argument. The record number of each subsequent returned
+record must be calculated from this value.
+<p>The buffer to which the <b>data</b> argument refers should be large
+relative to the page size of the underlying database, aligned for
+unsigned integer access, and be a multiple of 1024 bytes in size.
+<p>The DB_MULTIPLE flag may only be used with the
+DB_CURRENT, DB_FIRST, DB_GET_BOTH,
+DB_NEXT, DB_NEXT_DUP, DB_NEXT_NODUP,
+DB_SET, DB_SET_RANGE, and DB_SET_RECNO
+options.
+<p>The DB_MULTIPLE flag may not be used when accessing databases
+made into secondary indices using the <a href="../api_c/db_associate.html">DB-&gt;associate</a> method.
+<p>See <a href="../api_c/dbt_bulk.html#DB_MULTIPLE_INIT">DB_MULTIPLE_INIT</a> for more information.
+<p><dt><a name="DB_MULTIPLE_KEY">DB_MULTIPLE_KEY</a><dd>Return multiple key and data pairs. The buffer to which the
+<b>data</b> argument refers is filled with key and data pairs. If the
+first key and data pair cannot fit into the buffer, the size field of
+the <b>data</b> argument is set to the length needed to retrieve them,
+and the error ENOMEM is returned.
+<p>The buffer to which the <b>data</b> argument refers should be large
+relative to the page size of the underlying database, aligned for
+unsigned integer access, and be a multiple of 1024 bytes in size.
+<p>The DB_MULTIPLE_KEY flag may only be used with the
+DB_CURRENT, DB_FIRST, DB_GET_BOTH,
+DB_NEXT, DB_NEXT_NODUP, DB_SET,
+DB_SET_RANGE, and DB_SET_RECNO options. The
+DB_MULTIPLE_KEY flag may not be used when accessing databases
+made into secondary indices using the <a href="../api_c/db_associate.html">DB-&gt;associate</a> method.
+<p>See <a href="../api_c/dbt_bulk.html#DB_MULTIPLE_INIT">DB_MULTIPLE_INIT</a> for more information.
+<p><dt><a name="DB_RMW">DB_RMW</a><dd>Acquire write locks instead of read locks when doing the retrieval.
+Setting this flag can eliminate deadlock during a read-modify-write
+cycle by acquiring the write lock during the read part of the cycle so
+that another thread of control acquiring a read lock for the same item,
+in its own read-modify-write cycle, will not result in deadlock.
+</dl>
+<p>
+Otherwise, the DBcursor-&gt;c_get method returns a non-zero error value on failure and 0 on success.
+<p>If DBcursor-&gt;c_get fails for any reason, the state of the cursor will be
+unchanged.
+<h1>Errors</h1>
+<p>The DBcursor-&gt;c_get method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>DB_SECONDARY_BAD<dd>A secondary index references a nonexistent primary key.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>There was insufficient memory to return the requested item.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The specified cursor was not currently initialized.
+<p>The DBcursor-&gt;c_pget interface was called with a cursor that does not
+refer to a secondary index.
+</dl>
+<p>The DBcursor-&gt;c_get method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DBcursor-&gt;c_get method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/dbc_class.html">DBC</a>
+<h1>See Also</h1>
+<a href="../api_c/dbc_list.html">Database Cursors and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/dbc_list.html b/libdb/docs/api_c/dbc_list.html
new file mode 100644
index 0000000..a54b66a
--- /dev/null
+++ b/libdb/docs/api_c/dbc_list.html
@@ -0,0 +1,27 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Database Cursors and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Database Cursors and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Database Cursors and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_c/db_cursor.html">DB-&gt;cursor</a></td><td>Create a cursor handle</td></tr>
+<tr><td><a href="../api_c/dbc_close.html">DBcursor-&gt;c_close</a></td><td>Close a cursor</td></tr>
+<tr><td><a href="../api_c/dbc_count.html">DBcursor-&gt;c_count</a></td><td>Return count of duplicates</td></tr>
+<tr><td><a href="../api_c/dbc_del.html">DBcursor-&gt;c_del</a></td><td>Delete by cursor</td></tr>
+<tr><td><a href="../api_c/dbc_dup.html">DBcursor-&gt;c_dup</a></td><td>Duplicate a cursor</td></tr>
+<tr><td><a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a></td><td>Retrieve by cursor</td></tr>
+<tr><td><a href="../api_c/dbc_get.html">DBcursor-&gt;c_pget</a></td><td>Retrieve by cursor</td></tr>
+<tr><td><a href="../api_c/dbc_put.html">DBcursor-&gt;c_put</a></td><td>Store by cursor</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/dbc_put.html b/libdb/docs/api_c/dbc_put.html
new file mode 100644
index 0000000..5d31abb
--- /dev/null
+++ b/libdb/docs/api_c/dbc_put.html
@@ -0,0 +1,153 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DBcursor-&gt;c_put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DBcursor-&gt;c_put</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DBcursor-&gt;c_put(DBC *, DBT *key, DBT *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DBcursor-&gt;c_put method stores key/data pairs into the database.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_AFTER">DB_AFTER</a><dd>In the case of the Btree and Hash access methods, insert the data
+element as a duplicate element of the key to which the cursor refers.
+The new element appears immediately after the current cursor position.
+It is an error to specify DB_AFTER if the underlying Btree or
+Hash database does not support duplicate data items. The <b>key</b>
+parameter is ignored.
+<p>In the case of the Recno access method, it is an error to specify
+DB_AFTER if the underlying Recno database was not created with
+the <a href="../api_c/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag. If the <a href="../api_c/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag was
+specified, a new key is created, all records after the inserted item
+are automatically renumbered, and the key of the new record is returned
+in the structure to which the <b>key</b> argument refers. The initial
+value of the <b>key</b> parameter is ignored. See <a href="../api_c/db_open.html">DB-&gt;open</a>
+for more information.
+<p>The DB_AFTER flag may not be specified to the Queue access method.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, DBcursor-&gt;c_put will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+If the underlying access method is Btree or Recno, the operation will
+succeed.
+<p>If the cursor is not yet initialized or if a duplicate sort function
+has been specified, the DBcursor-&gt;c_put function will return EINVAL.
+<p><dt><a name="DB_BEFORE">DB_BEFORE</a><dd>In the case of the Btree and Hash access methods, insert the data
+element as a duplicate element of the key to which the cursor refers.
+The new element appears immediately before the current cursor position.
+It is an error to specify DB_BEFORE if the underlying Btree or
+Hash database does not support duplicate data items. The <b>key</b>
+parameter is ignored.
+<p>In the case of the Recno access method, it is an error to specify
+DB_BEFORE if the underlying Recno database was not created with
+the <a href="../api_c/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag. If the <a href="../api_c/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag was
+specified, a new key is created, the current record and all records
+after it are automatically renumbered, and the key of the new record is
+returned in the structure to which the <b>key</b> argument refers.
+The initial value of the <b>key</b> parameter is ignored. See
+<a href="../api_c/db_open.html">DB-&gt;open</a> for more information.
+<p>The DB_BEFORE flag may not be specified to the Queue access method.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, DBcursor-&gt;c_put will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+If the underlying access method is Btree or Recno, the operation will
+succeed.
+<p>If the cursor is not yet initialized or if a duplicate sort function
+has been specified, DBcursor-&gt;c_put will return EINVAL.
+<p><dt><a name="DB_CURRENT">DB_CURRENT</a><dd>Overwrite the data of the key/data pair to which the cursor refers with
+the specified data item. The <b>key</b> parameter is ignored.
+<p>If a duplicate sort function has been specified and the data item of the
+referenced key/data pair does not compare equally to the <b>data</b>
+parameter, DBcursor-&gt;c_put will return EINVAL.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, DBcursor-&gt;c_put will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+If the underlying access method is Btree, Queue, or Recno, the operation
+will succeed.
+<p>If the cursor is not yet initialized, DBcursor-&gt;c_put will return EINVAL.
+<p><dt><a name="DB_KEYFIRST">DB_KEYFIRST</a><dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database.
+<p>If the underlying database supports duplicate data items, and if the
+key already exists in the database and a duplicate sort function has
+been specified, the inserted data item is added in its sorted location.
+If the key already exists in the database and no duplicate sort function
+has been specified, the inserted data item is added as the first of the
+data items for that key.
+<p>The DB_KEYFIRST flag may not be specified to the Queue or Recno
+access methods.
+<p><dt><a name="DB_KEYLAST">DB_KEYLAST</a><dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database.
+<p>If the underlying database supports duplicate data items, and if the
+key already exists in the database and a duplicate sort function has
+been specified, the inserted data item is added in its sorted location.
+If the key already exists in the database, and no duplicate sort
+function has been specified, the inserted data item is added as the last
+of the data items for that key.
+<p>The DB_KEYLAST flag may not be specified to the Queue or Recno
+access methods.
+<p><dt><a name="DB_NODUPDATA">DB_NODUPDATA</a><dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database, unless it already exists in the database.
+If the key/data pair already appears in the database, <a href="../api_c/dbc_put.html#DB_KEYEXIST">DB_KEYEXIST</a>
+is returned. The DB_NODUPDATA flag may only be specified if
+the underlying database has been configured to support sorted duplicate
+data items.
+<p>The DB_NODUPDATA flag may not be specified to the Queue or Recno
+access methods.
+</dl>
+<p>
+Otherwise, the DBcursor-&gt;c_put method returns a non-zero error value on failure and 0 on success.
+<p>If DBcursor-&gt;c_put fails for any reason, the state of the cursor will be
+unchanged. If DBcursor-&gt;c_put succeeds and an item is inserted into the
+database, the cursor is always positioned to refer to the newly inserted
+item.
+<h1>Errors</h1>
+<p>The DBcursor-&gt;c_put method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EACCES<dd>An attempt was made to modify a read-only database.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The DB_BEFORE or DB_AFTER flags were specified, and the
+underlying access method is Queue.
+<p>An attempt was made to add a record to a fixed-length database that was too
+large to fit.
+<p>An attempt was made to add a record to a secondary index.
+</dl>
+<p><dl compact>
+<p><dt>EPERM <dd>Write attempted on read-only cursor when the <a href="../api_c/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a> flag was
+specified to <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>.
+</dl>
+<p>The DBcursor-&gt;c_put method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DBcursor-&gt;c_put method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/dbc_class.html">DBC</a>
+<h1>See Also</h1>
+<a href="../api_c/dbc_list.html">Database Cursors and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/dbm.html b/libdb/docs/api_c/dbm.html
new file mode 100644
index 0000000..b875b5f
--- /dev/null
+++ b/libdb/docs/api_c/dbm.html
@@ -0,0 +1,221 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: dbm/ndbm</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>dbm/ndbm</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#define DB_DBM_HSEARCH 1
+#include &lt;db.h&gt;
+<p>
+typedef struct {
+ char *dptr;
+ int dsize;
+} datum;
+<hr size=1 noshade>
+<h3>Dbm Functions</h3>
+int
+dbminit(char *file);
+<p>
+int
+dbmclose();
+<p>
+datum
+fetch(datum key);
+<p>
+int
+store(datum key, datum content);
+<p>
+int
+delete(datum key);
+<p>
+datum
+firstkey(void);
+<p>
+datum
+nextkey(datum key);
+<hr size=1 noshade>
+<h3>Ndbm Functions</h3>
+DBM *
+dbm_open(char *file, int flags, int mode);
+<p>
+void
+dbm_close(DBM *db);
+<p>
+datum
+dbm_fetch(DBM *db, datum key);
+<p>
+int
+dbm_store(DBM *db, datum key, datum content, int flags);
+<p>
+int
+dbm_delete(DBM *db, datum key);
+<p>
+datum
+dbm_firstkey(DBM *db);
+<p>
+datum
+dbm_nextkey(DBM *db);
+<p>
+int
+dbm_error(DBM *db);
+<p>
+int
+dbm_clearerr(DBM *db);
+</pre></h3>
+<h1>Description</h1>
+<p>The dbm interfaces to the Berkeley DB library are intended to provide
+high-performance implementations and source code compatibility for
+applications written to historic interfaces. They are not recommended
+for any other purpose. The historic dbm database format
+<b>is not supported</b>, and databases previously built using the real
+dbm libraries cannot be read by the Berkeley DB functions.
+<p>To compile dbm applications, replace the application's
+<b>#include</b> of the dbm or ndbm include file (for example,
+<b>#include &lt;dbm.h&gt;</b> or <b>#include &lt;ndbm.h&gt;</b>)
+with the following two lines:
+<p><blockquote><pre>#define DB_DBM_HSEARCH 1
+#include &lt;db.h&gt;</pre></blockquote>
+<p>and recompile. If the application attempts to load against a dbm library
+(for example, <b>-ldbm</b>), remove the library from the load line.
+<p><b>Key</b> and <b>content</b> arguments are objects described by the
+<b>datum</b> typedef. A <b>datum</b> specifies a string of
+<b>dsize</b> bytes pointed to by <b>dptr</b>. Arbitrary binary data,
+as well as normal text strings, are allowed.
+<h3>Dbm Functions</h3>
+<p>Before a database can be accessed, it must be opened by dbminit.
+This will open and/or create the database <b>file</b>.db. If created,
+the database file is created read/write by owner only (as described in
+<b>chmod</b>(2)) and modified by the process' umask value at the time
+of creation (see <b>umask</b>(2)). The group ownership of created
+files is based on the system and directory defaults, and is not further
+specified by Berkeley DB.
+<p>A database may be closed, and any held resources released, by calling
+dbmclose.
+<p>Once open, the data stored under a key is accessed by fetch, and
+data is placed under a key by store. A key (and its associated
+contents) are deleted by delete. A linear pass through all keys
+in a database may be made, in an (apparently) random order, by using
+firstkey and nextkey. The firstkey method will return
+the first key in the database. The nextkey method will return the next
+key in the database.
+<p>The following code will traverse the database:
+<p><blockquote><pre>for (key = firstkey();
+ key.dptr != NULL; key = nextkey(key)) {
+ ...
+}</pre></blockquote>
+<h3>Ndbm Functions</h3>
+<p>Before a database can be accessed, it must be opened by dbm_open.
+This will open and/or create the database file <b>file.db</b>, depending
+on the flags parameter (see <b>open</b>(2)). If created, the database
+file is created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and modified by the process' umask value at the time of creation (see
+<b>umask</b>(2)). The group ownership of created files is based on
+the system and directory defaults, and is not further specified by
+Berkeley DB.
+<p>Once open, the data stored under a key is accessed by dbm_fetch,
+and data is placed under a key by dbm_store. The <b>flags</b>
+field can be either <b>DBM_INSERT</b> or <b>DBM_REPLACE</b>.
+<b>DBM_INSERT</b> will only insert new entries into the database, and will
+not change an existing entry with the same key. <b>DBM_REPLACE</b> will
+replace an existing entry if it has the same key. A key (and its
+associated contents) are deleted by dbm_delete. A linear pass
+through all keys in a database may be made, in an (apparently) random
+order, by using dbm_firstkey and dbm_nextkey. The
+dbm_firstkey method will return the first key in the database. The
+dbm_nextkey method will return the next key in the database.
+<p>The following code will traverse the database:
+<p><blockquote><pre>for (key = dbm_firstkey(db);
+ key.dptr != NULL; key = dbm_nextkey(db)) {
+ ...
+}</pre></blockquote>
+<h3>Compatibility Notes</h3>
+<p>The historic dbm library created two underlying database files,
+traditionally named <b>file.dir</b> and <b>file.pag</b>. The Berkeley DB
+library creates a single database file named <b>file.db</b>.
+Applications that are aware of the underlying database filenames may
+require additional source code modifications.
+<p>The historic dbminit interface required that the underlying
+<b>.dir</b> and <b>.pag</b> files already exist (empty databases were
+created by first manually creating zero-length <b>.dir</b> and
+<b>.pag</b> files). Applications that expect to create databases using
+this method may require additional source code modifications.
+<p>The historic dbm_dirfno and dbm_pagfno macros are
+supported, but will return identical file descriptors because there is
+only a single underlying file used by the Berkeley DB hashing access method.
+Applications using both file descriptors for locking may require
+additional source code modifications.
+<p>If applications using the dbm interface exits without first
+closing the database, it may lose updates because the Berkeley DB library
+buffers writes to underlying databases. Such applications will require
+additional source code modifications to work correctly with the Berkeley DB
+library.
+<h3>Dbm Diagnostics</h3>
+<p>The dbminit function returns -1 on failure, setting <b>errno</b>,
+and 0 on success.
+<p>The fetch function sets the <b>dptr</b> field of the returned
+<b>datum</b> to NULL on failure, setting <b>errno</b>,
+and returns a non-NULL <b>dptr</b> on success.
+<p>The store function returns -1 on failure, setting <b>errno</b>,
+and 0 on success.
+<p>The delete function returns -1 on failure, setting <b>errno</b>,
+and 0 on success.
+<p>The firstkey function sets the <b>dptr</b> field of the returned
+<b>datum</b> to NULL on failure, setting <b>errno</b>,
+and returns a non-NULL <b>dptr</b> on success.
+<p>The nextkey function sets the <b>dptr</b> field of the returned
+<b>datum</b> to NULL on failure, setting <b>errno</b>,
+and returns a non-NULL <b>dptr</b> on success.
+<h1>Errors</h1>
+<p>The dbminit, fetch, store, delete, firstkey, and nextkey functions may
+fail and return an error for errors specified for other Berkeley DB and C
+library or system functions.
+<h3>Ndbm Diagnostics</h3>
+<p>The dbm_close method returns non-zero when an error has occurred reading or
+writing the database.
+<p>The dbm_close method resets the error condition on the named database.
+<p>The dbm_open function returns NULL on failure, setting <b>errno</b>,
+and a DBM reference on success.
+<p>The dbm_fetch function sets the <b>dptr</b> field of the returned
+<b>datum</b> to NULL on failure, setting <b>errno</b>,
+and returns a non-NULL <b>dptr</b> on success.
+<p>The dbm_store function returns -1 on failure, setting <b>errno</b>,
+0 on success, and 1 if DBM_INSERT was set and the specified key already
+existed in the database.
+<p>The dbm_delete function returns -1 on failure, setting <b>errno</b>,
+and 0 on success.
+<p>The dbm_firstkey function sets the <b>dptr</b> field of the returned
+<b>datum</b> to NULL on failure, setting <b>errno</b>,
+and returns a non-NULL <b>dptr</b> on success.
+<p>The dbm_nextkey function sets the <b>dptr</b> field of the returned
+<b>datum</b> to NULL on failure, setting <b>errno</b>,
+and returns a non-NULL <b>dptr</b> on success.
+<p>The dbm_close function returns -1 on failure, setting <b>errno</b>,
+and 0 on success.
+<p>The dbm_close function returns -1 on failure, setting <b>errno</b>,
+and 0 on success.
+<h1>Errors</h1>
+<p>The dbm_open, dbm_close, dbm_fetch, dbm_store, dbm_delete, dbm_firstkey,
+and dbm_nextkey functions may fail and return an error for errors
+specified for other Berkeley DB and C library or system functions.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/dbt_bulk.html b/libdb/docs/api_c/dbt_bulk.html
new file mode 100644
index 0000000..d024263
--- /dev/null
+++ b/libdb/docs/api_c/dbt_bulk.html
@@ -0,0 +1,82 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DBT</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DBT: Bulk Retrieval</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<a name="3"><!--meow--></a>
+<p>If either of the <a href="../api_c/dbc_get.html#DB_MULTIPLE">DB_MULTIPLE</a> or <a href="../api_c/dbc_get.html#DB_MULTIPLE_KEY">DB_MULTIPLE_KEY</a> flags
+were specified to the <a href="../api_c/db_get.html">DB-&gt;get</a> or <a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a> methods, the data
+<a href="../api_c/dbt_class.html">DBT</a> returned by those interfaces will refer to a buffer that
+is filled with data. Access to that data is through the following
+macros:
+<p><dl compact>
+<p><dt><a name="DB_MULTIPLE_INIT">DB_MULTIPLE_INIT</a><dd><pre>DB_MULTIPLE_INIT(void *pointer, <a href="../api_c/dbt_class.html">DBT</a> *data);</pre>
+<p>Initialize the retrieval. The <b>pointer</b> argument is a variable
+to be initialized. The <b>data</b> argument is a <a href="../api_c/dbt_class.html">DBT</a> structure
+returned from a successful call to <a href="../api_c/db_get.html">DB-&gt;get</a> or <a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a>
+for which one of the <a href="../api_c/dbc_get.html#DB_MULTIPLE">DB_MULTIPLE</a> or <a href="../api_c/dbc_get.html#DB_MULTIPLE_KEY">DB_MULTIPLE_KEY</a>
+flags was specified.
+<p><dt><a name="DB_MULTIPLE_NEXT">DB_MULTIPLE_NEXT</a><dd><pre>DB_MULTIPLE_NEXT(void *pointer, <a href="../api_c/dbt_class.html">DBT</a> *data, void *retdata, size_t retdlen);</pre>
+<p>The <b>data</b> argument is a <a href="../api_c/dbt_class.html">DBT</a> structure returned from a
+successful call to <a href="../api_c/db_get.html">DB-&gt;get</a> or <a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a> for which the
+<a href="../api_c/dbc_get.html#DB_MULTIPLE">DB_MULTIPLE</a> flag was specified. The <b>pointer</b> and
+<b>data</b> arguments must have been previously initialized by a call
+to DB_MULTIPLE_INIT. The <b>retdata</b> argument is set to
+refer to the next data element in the returned set, and the
+<b>retdlen</b> argument is set to the length, in bytes, of that data
+element. When used with the Queue and Recno access methods,
+<b>retdata</b> will be set to NULL for deleted records. The
+<b>pointer</b> argument is set to NULL if there are no more data
+elements in the returned set.
+<p><dt><a name="DB_MULTIPLE_KEY_NEXT">DB_MULTIPLE_KEY_NEXT</a><dd><pre>DB_MULTIPLE_KEY_NEXT(void *pointer, <a href="../api_c/dbt_class.html">DBT</a> *data,
+ void *retkey, size_t retklen, void *retdata, size_t retdlen);</pre>
+<p>The <b>data</b> argument is a <a href="../api_c/dbt_class.html">DBT</a> structure returned from a
+successful call to <a href="../api_c/db_get.html">DB-&gt;get</a> or <a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a> for which the
+<a href="../api_c/dbc_get.html#DB_MULTIPLE_KEY">DB_MULTIPLE_KEY</a> flag was specified. The <b>pointer</b> and
+<b>data</b> arguments must have been previously initialized by a call
+to DB_MULTIPLE_INIT. The <b>retkey</b> argument is set to
+refer to the next key element in the returned set, and the
+<b>retklen</b> argument is set to the length, in bytes, of that key
+element. The <b>retdata</b> argument is set to refer to the next data
+element in the returned set, and the <b>retdlen</b> argument is set to
+the length, in bytes, of that data element. The <b>pointer</b>
+argument is set to NULL if there are no more key/data pairs in the
+returned set.
+<p><dt><a name="DB_MULTIPLE_RECNO_NEXT">DB_MULTIPLE_RECNO_NEXT</a><dd><pre>DB_MULTIPLE_RECNO_NEXT(void *pointer, <a href="../api_c/dbt_class.html">DBT</a> *data,
+ db_recno_t recno, void * retdata, size_t retdlen);</pre>
+<p>The <b>data</b> argument is a <a href="../api_c/dbt_class.html">DBT</a> structure returned from a
+successful call to <a href="../api_c/db_get.html">DB-&gt;get</a> or <a href="../api_c/dbc_get.html">DBcursor-&gt;c_get</a> for which the
+<a href="../api_c/dbc_get.html#DB_MULTIPLE_KEY">DB_MULTIPLE_KEY</a> flag was specified. The <b>pointer</b> and
+<b>data</b> arguments must have been previously initialized by a call
+to DB_MULTIPLE_INIT. The <b>recno</b> argument is set to the
+record number of the next record in the returned set. The
+<b>retdata</b> argument is set to refer to the next data element in
+the returned set, and the <b>retdlen</b> argument is set to the length,
+in bytes, of that data element. When used with the Queue and Recno
+access methods, <b>retdata</b> will be set to NULL for deleted
+records. The <b>pointer</b> argument is set to NULL if there are
+no more key/data pairs in the returned set.
+</dl>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/dbt_class.html b/libdb/docs/api_c/dbt_class.html
new file mode 100644
index 0000000..ec37768
--- /dev/null
+++ b/libdb/docs/api_c/dbt_class.html
@@ -0,0 +1,137 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DBT</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DBT: Key/Data Pairs</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<a name="3"><!--meow--></a>
+<p>Storage and retrieval for the Berkeley DB access methods are based on key/data
+pairs. Both key and data items are represented by the DBT data
+structure. (The name <i>DBT</i> is a mnemonic for <i>data
+base thang</i>, and was used because no one could think of a reasonable
+name that wasn't already in use somewhere else.) Key and data byte
+strings may refer to strings of zero length up to strings of
+essentially unlimited length. See <a href="../ref/am_misc/dbsizes.html">Database limits</a> for more information.
+<p><blockquote><pre>typedef struct {
+ void *data;
+ u_int32_t size;
+ u_int32_t ulen;
+ u_int32_t dlen;
+ u_int32_t doff;
+ u_int32_t flags;
+} DBT;</pre></blockquote>
+<p>In order to ensure compatibility with future releases of Berkeley DB, all
+fields of the DBT structure that are not explicitly set should be
+initialized to nul bytes before the first time the structure is used.
+Do this by declaring the structure external or static, or by calling
+the C library routine <b>bzero</b>(3) or <b>memset</b>(3).
+<p>By default, the <b>flags</b> structure element is expected to be set
+to 0. In this default case, when the application is providing Berkeley DB a
+key or data item to store into the database, Berkeley DB expects the
+<b>data</b> structure element to point to a byte string of <b>size</b>
+bytes. When returning a key/data item to the application, Berkeley DB will
+store into the <b>data</b> structure element a pointer to a byte string
+of <b>size</b> bytes, and the memory to which the pointer refers will be
+allocated and managed by Berkeley DB.
+<p>The elements of the DBT structure are defined as follows:
+<p><dl compact>
+<p><dt>void *<a name="data">data</a>;<dd>A pointer to a byte string.
+<p><dt>u_int32_t <a name="size">size</a>;<dd>The length of <b>data</b>, in bytes.
+<p><dt>u_int32_t <a name="ulen">ulen</a>;<dd>The size of the user's buffer (to which <b>data</b> refers), in bytes.
+This location is not written by the Berkeley DB functions.
+<p>Note that applications can determine the length of a record by setting
+the <b>ulen</b> field to 0 and checking the return value in the
+<b>size</b> field. See the DB_DBT_USERMEM flag for more information.
+<p><dt>u_int32_t <a name="dlen">dlen</a>;<dd>The length of the partial record being read or written by the application,
+in bytes. See the DB_DBT_PARTIAL flag for more information.
+<p><dt>u_int32_t <a name="doff">doff</a>;<dd>The offset of the partial record being read or written by the application,
+in bytes. See the DB_DBT_PARTIAL flag for more information.
+<p><dt>u_int32_t flags;<dd>
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_DBT_MALLOC">DB_DBT_MALLOC</a><dd>When this flag is set, Berkeley DB will allocate memory for the returned key
+or data item (using <b>malloc</b>(3), or the user-specified malloc
+function), and return a pointer to it in the <b>data</b> field of the
+key or data DBT structure. Because any allocated memory becomes the
+responsibility of the calling application, the caller must determine
+whether memory was allocated using the returned value of the
+<b>data</b> field.
+<p>It is an error to specify more than one of DB_DBT_MALLOC,
+DB_DBT_REALLOC, and DB_DBT_USERMEM.
+<p><dt><a name="DB_DBT_REALLOC">DB_DBT_REALLOC</a><dd>When this flag is set Berkeley DB will allocate memory for the returned key
+or data item (using <b>realloc</b>(3), or the user-specified realloc
+function), and return a pointer to it in the <b>data</b> field of the
+key or data DBT structure. Because any allocated memory becomes the
+responsibility of the calling application, the caller must determine
+whether memory was allocated using the returned value of the
+<b>data</b> field.
+<p>The difference between DB_DBT_MALLOC and DB_DBT_REALLOC
+is that the latter will call <b>realloc</b>(3) instead of
+<b>malloc</b>(3), so the allocated memory will be grown as necessary
+instead of the application doing repeated free/malloc calls.
+<p>It is an error to specify more than one of DB_DBT_MALLOC,
+DB_DBT_REALLOC, and DB_DBT_USERMEM.
+<p><dt><a name="DB_DBT_USERMEM">DB_DBT_USERMEM</a><dd>The <b>data</b> field of the key or data structure must refer to
+memory that is at least <b>ulen</b> bytes in length. If the length of
+the requested item is less than or equal to that number of bytes, the
+item is copied into the memory to which the <b>data</b> field refers.
+Otherwise, the <b>size</b> field is set to the length needed for the
+requested item, and the error ENOMEM is returned.
+<p>It is an error to specify more than one of DB_DBT_MALLOC,
+DB_DBT_REALLOC, and DB_DBT_USERMEM.
+<p><dt><a name="DB_DBT_PARTIAL">DB_DBT_PARTIAL</a><dd>Do partial retrieval or storage of an item. If the calling application
+is doing a get, the <b>dlen</b> bytes starting <b>doff</b> bytes from
+the beginning of the retrieved data record are returned as if they
+comprised the entire record. If any or all of the specified bytes do
+not exist in the record, the get is successful, and any existing bytes
+are returned.
+<p>For example, if the data portion of a retrieved record was 100 bytes,
+and a partial retrieval was done using a DBT having a <b>dlen</b>
+field of 20 and a <b>doff</b> field of 85, the get call would succeed,
+the <b>data</b> field would refer to the last 15 bytes of the record,
+and the <b>size</b> field would be set to 15.
+<p>If the calling application is doing a put, the <b>dlen</b> bytes
+starting <b>doff</b> bytes from the beginning of the specified key's
+data record are replaced by the data specified by the <b>data</b> and
+<b>size</b> structure elements. If <b>dlen</b> is smaller than
+<b>size</b>, the record will grow; if <b>dlen</b> is larger than
+<b>size</b>, the record will shrink. If the specified bytes do not
+exist, the record will be extended using nul bytes as necessary, and
+the put call will succeed.
+<p>It is an error to attempt a partial put using the <a href="../api_c/db_put.html">DB-&gt;put</a> function
+in a database that supports duplicate records.
+Partial puts in databases supporting duplicate records must be done
+using a <a href="../api_c/dbc_put.html">DBcursor-&gt;c_put</a> function.
+<p>It is an error to attempt a partial put with differing <b>dlen</b> and
+<b>size</b> values in Queue or Recno databases with fixed-length records.
+<p>For example, if the data portion of a retrieved record was 100 bytes,
+and a partial put was done using a DBT having a <b>dlen</b> field of 20,
+a <b>doff</b> field of 85, and a <b>size</b> field of 30, the resulting
+record would be 115 bytes in length, where the last 30 bytes would be
+those specified by the put call.
+</dl>
+</dl>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_class.html b/libdb/docs/api_c/env_class.html
new file mode 100644
index 0000000..d1f6172
--- /dev/null
+++ b/libdb/docs/api_c/env_class.html
@@ -0,0 +1,51 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+typedef struct __db_env DB_ENV;
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV object is the handle for a Berkeley DB environment -- a
+collection including support for some or all of caching, locking,
+logging and transaction subsystems, as well as databases and log files.
+Methods off the DB_ENV handle are used to configure the
+environment as well as to operate on subsystems and databases in the
+environment.
+<p>DB_ENV handles are free-threaded if the <a href="../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag
+is specified to the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> method when the environment is opened.
+The DB_ENV handle should not be closed while any other handle
+remains open that is using it as a reference (for example, <a href="../api_c/db_class.html">DB</a>
+or <a href="../api_c/txn_class.html">DB_TXN</a>). Once either the <a href="../api_c/env_close.html">DB_ENV-&gt;close</a> or
+<a href="../api_c/env_remove.html">DB_ENV-&gt;remove</a> methods are called, the handle may not be accessed again,
+regardless of the method's return.
+<h1>Class</h1>
+DB_ENV
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_close.html b/libdb/docs/api_c/env_close.html
new file mode 100644
index 0000000..a2249a7
--- /dev/null
+++ b/libdb/docs/api_c/env_close.html
@@ -0,0 +1,77 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;close</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;close(DB_ENV *dbenv, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;close method closes the Berkeley DB environment, freeing any
+allocated resources and closing any underlying subsystems.
+<p>Calling DB_ENV-&gt;close does not imply closing any databases that
+were opened in the environment, and all databases opened in the
+environment should be closed before the environment is closed.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>Where the environment was initialized with the <a href="../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a> flag,
+calling DB_ENV-&gt;close does not release any locks still held by the
+closing process, providing functionality for long-lived locks.
+Processes that want to have all their locks
+released can do so by issuing the appropriate <a href="../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a> call.
+<p>Where the environment was initialized with the <a href="../api_c/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a>
+flag, calling DB_ENV-&gt;close implies calls to <a href="../api_c/memp_fclose.html">DB_MPOOLFILE-&gt;close</a> for
+any remaining open files in the memory pool that were returned to this
+process by calls to <a href="../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a>. It does not imply a call to
+<a href="../api_c/memp_fsync.html">DB_MPOOLFILE-&gt;sync</a> for those files.
+<p>Where the environment was initialized with the <a href="../api_c/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a> flag,
+calling DB_ENV-&gt;close aborts any unresolved transactions.
+Applications should not depend on this behavior for transactions
+involving Berkeley DB databases; all such transactions should be explicitly
+resolved. The problem with depending on this semantic is that aborting
+an unresolved transaction involving database operations requires a
+database handle. Because the database handles should have been closed before
+calling DB_ENV-&gt;close, it will not be possible to abort the
+transaction, and recovery will have to be run on the Berkeley DB environment
+before further operations are done.
+<p>Where log cursors were created using the <a href="../api_c/log_cursor.html">DB_ENV-&gt;log_cursor</a> method, calling
+DB_ENV-&gt;close does not imply closing those cursors.
+<p>In multithreaded applications, only a single thread may call
+DB_ENV-&gt;close.
+<p>After DB_ENV-&gt;close has been called, regardless of its return, the
+Berkeley DB environment handle may not be accessed again.
+<p>The DB_ENV-&gt;close method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;close method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;close method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_create.html b/libdb/docs/api_c/env_create.html
new file mode 100644
index 0000000..5e7bfee
--- /dev/null
+++ b/libdb/docs/api_c/env_create.html
@@ -0,0 +1,65 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_create</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_env_create</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_create(DB_ENV **dbenvp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The db_env_create method creates a <a href="../api_c/env_class.html">DB_ENV</a> structure that is the
+handle for a Berkeley DB environment. A pointer to this structure is returned
+in the memory to which <b>dbenvp</b> refers. Calling the
+<a href="../api_c/env_close.html">DB_ENV-&gt;close</a> or <a href="../api_c/env_remove.html">DB_ENV-&gt;remove</a> methods will discard the returned
+handle.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_CLIENT">DB_CLIENT</a><dd>Create a client environment to connect to a server.
+<p>The DB_CLIENT flag indicates to the system that this environment
+is remote on a server. The use of this flag causes the environment
+methods to use functions that call a server instead of local functions.
+Prior to making any environment or database method calls, the application
+must call the <a href="../api_c/env_set_rpc_server.html">DB_ENV-&gt;set_rpc_server</a> function to establish the
+connection to the server.
+</dl>
+<p>The <a href="../api_c/env_class.html">DB_ENV</a> handle contains a special field, "app_private", which
+is declared as type "void *". This field is provided for the use of
+the application program. It is initialized to NULL and is not further
+used by Berkeley DB in any way.
+<p>The db_env_create method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_env_create method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_env_create method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_dbremove.html b/libdb/docs/api_c/env_dbremove.html
new file mode 100644
index 0000000..1c9bdbc
--- /dev/null
+++ b/libdb/docs/api_c/env_dbremove.html
@@ -0,0 +1,84 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;dbremove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;dbremove</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;dbremove(DB_ENV *dbenv, DB_TXN *txnid,
+ const char *file, const char *database, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;dbremove method removes the database specified by the
+<b>file</b> and <b>database</b> arguments. If no <b>database</b> is
+specified, the underlying file represented by <b>file</b> is removed,
+incidentally removing all databases that it contained.
+<p>Applications should never remove databases with open <a href="../api_c/db_class.html">DB</a> handles,
+or in the case of removing a file, when any database in the file has an
+open handle. For example, some architectures do not permit the removal
+of files with open system handles. On these architectures, attempts to
+remove databases currently in use by any thread of control in the system
+will fail.
+<p>If the operation is to be transaction-protected, the <b>txnid</b>
+parameter is a transaction handle returned from <a href="../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a>;
+otherwise, NULL.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_AUTO_COMMIT">DB_AUTO_COMMIT</a><dd>Enclose the DB_ENV-&gt;dbremove call within a transaction. If the call succeeds,
+changes made by the operation will be recoverable. If the call fails,
+the operation will have made no changes.
+</dl>
+<p>The DB_ENV-&gt;dbremove method returns a non-zero error value on failure and 0 on success.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>The
+environment variable <b>DB_HOME</b> may be used as the path of the
+database environment home.
+<p>DB_ENV-&gt;dbremove is affected by any database directory specified using the
+<a href="../api_c/env_set_data_dir.html">DB_ENV-&gt;set_data_dir</a> method, or by setting the "set_data_dir" string
+in the environment's <b>DB_CONFIG</b> file.
+</dl>
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;dbremove method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A database in the file is currently open.
+<p>Called before <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> was called.
+</dl>
+<p>The DB_ENV-&gt;dbremove method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;dbremove method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_dbrename.html b/libdb/docs/api_c/env_dbrename.html
new file mode 100644
index 0000000..59fb99d
--- /dev/null
+++ b/libdb/docs/api_c/env_dbrename.html
@@ -0,0 +1,86 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;dbrename</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;dbrename</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;dbrename(DB_ENV *dbenv, DB_TXN *txnid, const char *file,
+ const char *database, const char *newname, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;dbrename method renames the database specified by the
+<b>file</b> and <b>database</b> arguments to <b>newname</b>. If no
+<b>database</b> is specified, the underlying file represented by
+<b>file</b> is renamed, incidentally renaming all databases that it
+contained.
+<p>Applications should not rename databases that are currently in use. If
+an underlying file is being renamed and logging is currently enabled in
+the database environment, no database in the file may be open when the
+DB_ENV-&gt;dbrename method is called. In particular, some architectures do
+not permit renaming files with open handles. On these architectures,
+attempts to rename databases that are currently in use by any thread of
+control in the system will fail.
+<p>If the operation is to be transaction-protected, the <b>txnid</b>
+parameter is a transaction handle returned from <a href="../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a>;
+otherwise, NULL.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_AUTO_COMMIT">DB_AUTO_COMMIT</a><dd>Enclose the DB_ENV-&gt;dbrename call within a transaction. If the call succeeds,
+changes made by the operation will be recoverable. If the call fails,
+the operation will have made no changes.
+</dl>
+<p>The DB_ENV-&gt;dbrename method returns a non-zero error value on failure and 0 on success.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>The
+environment variable <b>DB_HOME</b> may be used as the path of the
+database environment home.
+<p>DB_ENV-&gt;dbrename is affected by any database directory specified using the
+<a href="../api_c/env_set_data_dir.html">DB_ENV-&gt;set_data_dir</a> method, or by setting the "set_data_dir" string
+in the environment's <b>DB_CONFIG</b> file.
+</dl>
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;dbrename method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A database in the file is currently open.
+<p>Called before <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> was called.
+</dl>
+<p>The DB_ENV-&gt;dbrename method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;dbrename method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_err.html b/libdb/docs/api_c/env_err.html
new file mode 100644
index 0000000..d6cd2d1
--- /dev/null
+++ b/libdb/docs/api_c/env_err.html
@@ -0,0 +1,76 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;err</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;err</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+void
+DB_ENV-&gt;err(DB_ENV *dbenv, int error, const char *fmt, ...);
+<p>
+void
+DB_ENV-&gt;errx(DB_ENV *dbenv, const char *fmt, ...);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;err, DB_ENV-&gt;errx, <a href="../api_c/db_err.html">DB-&gt;err</a> and
+<a href="../api_c/db_err.html">DB-&gt;errx</a> methods provide error-messaging functionality for
+applications written using the Berkeley DB library.
+<p>The DB_ENV-&gt;err method constructs an error message consisting of the
+following elements:
+<p><blockquote><p><dl compact>
+<p><dt>An optional prefix string<dd>If no error callback function has been set using the
+<a href="../api_c/env_set_errcall.html">DB_ENV-&gt;set_errcall</a> method, any prefix string specified using the
+<a href="../api_c/env_set_errpfx.html">DB_ENV-&gt;set_errpfx</a> method, followed by two separating characters: a colon
+and a &lt;space&gt; character.
+<p><dt>An optional printf-style message<dd>The supplied message <b>fmt</b>, if non-NULL, in which the
+ANSI C X3.159-1989 (ANSI C) printf function specifies how subsequent arguments
+are converted for output.
+<p><dt>A separator<dd>Two separating characters: a colon and a &lt;space&gt; character.
+<p><dt>A standard error string<dd>The standard system or Berkeley DB library error string associated with the
+<b>error</b> value, as returned by the <a href="../api_c/env_strerror.html">db_strerror</a> method.
+</dl>
+</blockquote>
+<p>This constructed error message is then handled as follows:
+<p><blockquote>
+<p>If an error callback function has been set (see <a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>
+and <a href="../api_c/env_set_errcall.html">DB_ENV-&gt;set_errcall</a>), that function is called with two
+arguments: any prefix string specified (see <a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a> and
+<a href="../api_c/env_set_errpfx.html">DB_ENV-&gt;set_errpfx</a>) and the error message.
+<p>If a C library FILE * has been set (see <a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a> and
+<a href="../api_c/env_set_errfile.html">DB_ENV-&gt;set_errfile</a>), the error message is written to that output
+stream.
+<p>If none of these output options has been configured, the error message
+is written to stderr, the standard
+error output stream.</blockquote>
+<p>The DB_ENV-&gt;errx and <a href="../api_c/db_err.html">DB-&gt;errx</a> methods perform identically to the
+DB_ENV-&gt;err and <a href="../api_c/db_err.html">DB-&gt;err</a> methods, except that they do not append
+the final separator characters and standard error string to the error
+message.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_list.html b/libdb/docs/api_c/env_list.html
new file mode 100644
index 0000000..5b2777d
--- /dev/null
+++ b/libdb/docs/api_c/env_list.html
@@ -0,0 +1,84 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Database Environments and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Database Environments and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Database Environments and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_c/env_create.html">db_env_create</a></td><td>Create an environment handle</td></tr>
+<tr><td><a href="../api_c/env_close.html">DB_ENV-&gt;close</a></td><td>Close an environment</td></tr>
+<tr><td><a href="../api_c/env_dbremove.html">DB_ENV-&gt;dbremove</a></td><td>Remove a database</td></tr>
+<tr><td><a href="../api_c/env_dbrename.html">DB_ENV-&gt;dbrename</a></td><td>Rename a database</td></tr>
+<tr><td><a href="../api_c/env_err.html">DB_ENV-&gt;err</a></td><td>Error message with error string</td></tr>
+<tr><td><a href="../api_c/env_err.html">DB_ENV-&gt;errx</a></td><td>Error message</td></tr>
+<tr><td><a href="../api_c/lock_detect.html">DB_ENV-&gt;lock_detect</a></td><td>Perform deadlock detection</td></tr>
+<tr><td><a href="../api_c/lock_get.html">DB_ENV-&gt;lock_get</a></td><td>Acquire a lock</td></tr>
+<tr><td><a href="../api_c/lock_id.html">DB_ENV-&gt;lock_id</a></td><td>Acquire a locker ID</td></tr>
+<tr><td><a href="../api_c/lock_id_free.html">DB_ENV-&gt;lock_id_free</a></td><td>Release a locker ID</td></tr>
+<tr><td><a href="../api_c/lock_put.html">DB_ENV-&gt;lock_put</a></td><td>Release a lock</td></tr>
+<tr><td><a href="../api_c/lock_stat.html">DB_ENV-&gt;lock_stat</a></td><td>Return lock subsystem statistics</td></tr>
+<tr><td><a href="../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a></td><td>Acquire/release locks</td></tr>
+<tr><td><a href="../api_c/log_archive.html">DB_ENV-&gt;log_archive</a></td><td>List log and database files</td></tr>
+<tr><td><a href="../api_c/log_cursor.html">DB_ENV-&gt;log_cursor</a></td><td>Create a log cursor handle</td></tr>
+<tr><td><a href="../api_c/log_file.html">DB_ENV-&gt;log_file</a></td><td>Map Log Sequence Numbers to log files</td></tr>
+<tr><td><a href="../api_c/log_flush.html">DB_ENV-&gt;log_flush</a></td><td>Flush log records</td></tr>
+<tr><td><a href="../api_c/log_put.html">DB_ENV-&gt;log_put</a></td><td>Write a log record</td></tr>
+<tr><td><a href="../api_c/log_stat.html">DB_ENV-&gt;log_stat</a></td><td>Return log subsystem statistics</td></tr>
+<tr><td><a href="../api_c/memp_fcreate.html">DB_ENV-&gt;memp_fcreate</a></td><td>Open a file in a memory pool</td></tr>
+<tr><td><a href="../api_c/memp_register.html">DB_ENV-&gt;memp_register</a></td><td>Register input/output functions for a file in a memory pool</td></tr>
+<tr><td><a href="../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a></td><td>Return memory pool statistics</td></tr>
+<tr><td><a href="../api_c/memp_sync.html">DB_ENV-&gt;memp_sync</a></td><td>Flush pages from a memory pool</td></tr>
+<tr><td><a href="../api_c/memp_trickle.html">DB_ENV-&gt;memp_trickle</a></td><td>Trickle flush pages from a memory pool</td></tr>
+<tr><td><a href="../api_c/env_open.html">DB_ENV-&gt;open</a></td><td>Open an environment</td></tr>
+<tr><td><a href="../api_c/env_remove.html">DB_ENV-&gt;remove</a></td><td>Remove an environment</td></tr>
+<tr><td><a href="../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a></td><td>Hold a replication election</td></tr>
+<tr><td><a href="../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a></td><td>Process a replication message</td></tr>
+<tr><td><a href="../api_c/rep_start.html">DB_ENV-&gt;rep_start</a></td><td>Configure an environment for replication</td></tr>
+<tr><td><a href="../api_c/rep_stat.html">DB_ENV-&gt;rep_stat</a></td><td>Replication statistics</td></tr>
+<tr><td><a href="../api_c/env_set_alloc.html">DB_ENV-&gt;set_alloc</a></td><td>Set local space allocation functions</td></tr>
+<tr><td><a href="../api_c/env_set_app_dispatch.html">DB_ENV-&gt;set_app_dispatch</a></td><td>Configure application recovery interface</td></tr>
+<tr><td><a href="../api_c/env_set_cachesize.html">DB_ENV-&gt;set_cachesize</a></td><td>Set the environment cache size</td></tr>
+<tr><td><a href="../api_c/env_set_data_dir.html">DB_ENV-&gt;set_data_dir</a></td><td>Set the environment data directory</td></tr>
+<tr><td><a href="../api_c/env_set_encrypt.html">DB_ENV-&gt;set_encrypt</a></td><td>Set the environment cryptographic key</td></tr>
+<tr><td><a href="../api_c/env_set_errcall.html">DB_ENV-&gt;set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><a href="../api_c/env_set_errfile.html">DB_ENV-&gt;set_errfile</a></td><td>Set error message FILE</td></tr>
+<tr><td><a href="../api_c/env_set_errpfx.html">DB_ENV-&gt;set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><a href="../api_c/env_set_feedback.html">DB_ENV-&gt;set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><a href="../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a></td><td>Environment configuration</td></tr>
+<tr><td><a href="../api_c/env_set_lg_bsize.html">DB_ENV-&gt;set_lg_bsize</a></td><td>Set log buffer size</td></tr>
+<tr><td><a href="../api_c/env_set_lg_dir.html">DB_ENV-&gt;set_lg_dir</a></td><td>Set the environment logging directory</td></tr>
+<tr><td><a href="../api_c/env_set_lg_max.html">DB_ENV-&gt;set_lg_max</a></td><td>Set log file size</td></tr>
+<tr><td><a href="../api_c/env_set_lg_regionmax.html">DB_ENV-&gt;set_lg_regionmax</a></td><td>Set logging region size</td></tr>
+<tr><td><a href="../api_c/env_set_lk_conflicts.html">DB_ENV-&gt;set_lk_conflicts</a></td><td>Set lock conflicts matrix</td></tr>
+<tr><td><a href="../api_c/env_set_lk_detect.html">DB_ENV-&gt;set_lk_detect</a></td><td>Set automatic deadlock detection</td></tr>
+<tr><td><a href="../api_c/env_set_lk_max_lockers.html">DB_ENV-&gt;set_lk_max_lockers</a></td><td>Set maximum number of lockers</td></tr>
+<tr><td><a href="../api_c/env_set_lk_max_locks.html">DB_ENV-&gt;set_lk_max_locks</a></td><td>Set maximum number of locks</td></tr>
+<tr><td><a href="../api_c/env_set_lk_max_objects.html">DB_ENV-&gt;set_lk_max_objects</a></td><td>Set maximum number of lock objects</td></tr>
+<tr><td><a href="../api_c/env_set_mp_mmapsize.html">DB_ENV-&gt;set_mp_mmapsize</a></td><td>Set maximum mapped-in database file size</td></tr>
+<tr><td><a href="../api_c/env_set_paniccall.html">DB_ENV-&gt;set_paniccall</a></td><td>Set panic callback</td></tr>
+<tr><td><a href="../api_c/rep_limit.html">DB_ENV-&gt;set_rep_limit</a></td><td>Limit data sent in response to a single message</td></tr>
+<tr><td><a href="../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a></td><td>Configure replication transport</td></tr>
+<tr><td><a href="../api_c/env_set_rpc_server.html">DB_ENV-&gt;set_rpc_server</a></td><td>Establish an RPC server connection</td></tr>
+<tr><td><a href="../api_c/env_set_shm_key.html">DB_ENV-&gt;set_shm_key</a></td><td>Set system memory shared segment ID</td></tr>
+<tr><td><a href="../api_c/env_set_tas_spins.html">DB_ENV-&gt;set_tas_spins</a></td><td>Set the number of test-and-set spins</td></tr>
+<tr><td><a href="../api_c/env_set_timeout.html">DB_ENV-&gt;set_timeout</a></td><td>Set lock and transaction timeout</td></tr>
+<tr><td><a href="../api_c/env_set_tmp_dir.html">DB_ENV-&gt;set_tmp_dir</a></td><td>Set the environment temporary file directory</td></tr>
+<tr><td><a href="../api_c/env_set_tx_max.html">DB_ENV-&gt;set_tx_max</a></td><td>Set maximum number of transactions</td></tr>
+<tr><td><a href="../api_c/env_set_tx_timestamp.html">DB_ENV-&gt;set_tx_timestamp</a></td><td>Set recovery timestamp</td></tr>
+<tr><td><a href="../api_c/env_set_verbose.html">DB_ENV-&gt;set_verbose</a></td><td>Set verbose messages</td></tr>
+<tr><td><a href="../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a></td><td>Begin a transaction</td></tr>
+<tr><td><a href="../api_c/txn_checkpoint.html">DB_ENV-&gt;txn_checkpoint</a></td><td>Checkpoint the transaction subsystem</td></tr>
+<tr><td><a href="../api_c/txn_recover.html">DB_ENV-&gt;txn_recover</a></td><td>Distributed transaction recovery</td></tr>
+<tr><td><a href="../api_c/txn_stat.html">DB_ENV-&gt;txn_stat</a></td><td>Return transaction subsystem statistics</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_open.html b/libdb/docs/api_c/env_open.html
new file mode 100644
index 0000000..74e05f4
--- /dev/null
+++ b/libdb/docs/api_c/env_open.html
@@ -0,0 +1,188 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;open</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;open(DB_ENV *, char *db_home, u_int32_t flags, int mode);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;open method is the interface for opening the Berkeley DB
+environment. It provides a structure for creating a consistent
+environment for processes using one or more of the features of Berkeley DB.
+<p>The <b>db_home</b> argument to DB_ENV-&gt;open (and filename
+resolution in general) is described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p>The <b>flags</b> argument specifies the subsystems that are initialized
+and how the application's environment affects Berkeley DB file naming, among
+other things.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p>Because there are a large number of flags that can be specified, they
+have been grouped together by functionality. The first group of flags
+indicates which of the Berkeley DB subsystems should be initialized:
+<p><dl compact>
+<p><dt><a name="DB_JOINENV">DB_JOINENV</a><dd>Join an existing environment. This option allows applications to
+join an existing environment without knowing which Berkeley DB subsystems
+the environment supports.
+<p><dt><a name="DB_INIT_CDB">DB_INIT_CDB</a><dd>Initialize locking for the <a href="../ref/cam/intro.html">Berkeley DB Concurrent Data Store</a>
+product. In this mode, Berkeley DB provides multiple reader/single writer
+access. The only other subsystem that should be specified with the
+DB_INIT_CDB flag is DB_INIT_MPOOL.
+<p><dt><a name="DB_INIT_LOCK">DB_INIT_LOCK</a><dd>Initialize the locking subsystem. This subsystem should be used when
+multiple processes or threads are going to be reading and writing a
+Berkeley DB database, so that they do not interfere with each other. If all
+threads are accessing the database(s) read-only, locking is unnecessary.
+When the DB_INIT_LOCK flag is specified, it is usually necessary
+to run a deadlock detector, as well. See <a href="../utility/db_deadlock.html">db_deadlock</a> and
+<a href="../api_c/lock_detect.html">DB_ENV-&gt;lock_detect</a> for more information.
+<p><dt><a name="DB_INIT_LOG">DB_INIT_LOG</a><dd>Initialize the logging subsystem. This subsystem should be used when
+recovery from application or system failure is necessary. If the log
+region is being created and log files are already present, the log files
+are reviewed; subsequent log writes are appended to the end of the log,
+rather than overwriting current log entries.
+<p><dt><a name="DB_INIT_MPOOL">DB_INIT_MPOOL</a><dd>Initialize the shared memory buffer pool subsystem. This subsystem
+should be used whenever an application is using any Berkeley DB access
+method.
+<p><dt><a name="DB_INIT_TXN">DB_INIT_TXN</a><dd>Initialize the transaction subsystem. This subsystem should be used
+when recovery and atomicity of multiple operations are important. The
+DB_INIT_TXN flag implies the DB_INIT_LOG flag.
+</dl>
+<p>The second group of flags govern what recovery, if any, is performed when
+the environment is initialized:
+<p><dl compact>
+<p><dt><a name="DB_RECOVER">DB_RECOVER</a><dd>Run normal recovery on this environment before opening it for normal
+use. If this flag is set, the DB_CREATE flag must also be set
+because the regions will be removed and re-created.
+<p><dt><a name="DB_RECOVER_FATAL">DB_RECOVER_FATAL</a><dd>Run catastrophic recovery on this environment before opening it for
+normal use. If this flag is set, the DB_CREATE flag must also
+be set because the regions will be removed and re-created.
+</dl>
+<p>A standard part of the recovery process is to remove the existing Berkeley DB
+environment and create a new one in which to perform recovery. If the
+thread of control performing recovery does not specify the correct
+region initialization information (for example, the correct memory pool
+cache size), the result can be an application running in an environment
+with incorrect cache and other subsystem sizes. For this reason, the
+thread of control performing recovery should specify correct
+configuration information before calling the DB_ENV-&gt;open method; or it
+should remove the environment after recovery is completed, leaving
+creation of the correctly sized environment to a subsequent call to
+DB_ENV-&gt;open.
+<p>All Berkeley DB recovery processing must be single-threaded; that is, only a
+single thread of control may perform recovery or access a Berkeley DB
+environment while recovery is being performed. Because it is not an
+error to specify DB_RECOVER for an environment for which no
+recovery is required, it is reasonable programming practice for the
+thread of control responsible for performing recovery and creating the
+environment to always specify the DB_CREATE and
+DB_RECOVER flags during startup.
+<p>The DB_ENV-&gt;open function returns successfully if DB_RECOVER
+or DB_RECOVER_FATAL is specified and no log files exist, so it
+is necessary to ensure that all necessary log files are present before
+running recovery. For further information, consult <a href="../utility/db_archive.html">db_archive</a>
+and <a href="../utility/db_recover.html">db_recover</a>.
+<p>The third group of flags govern file-naming extensions in the environment:
+<p><dl compact>
+<a name="3"><!--meow--></a>
+<p><dt><a name="DB_USE_ENVIRON">DB_USE_ENVIRON</a><dd>The Berkeley DB process' environment may be permitted to specify information
+to be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB File Naming</a>. Because permitting users to specify which files
+are used can create security problems, environment information will be
+used in file naming for all users only if the DB_USE_ENVIRON
+flag is set.
+<p><dt><a name="DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a><dd>The Berkeley DB process' environment may be permitted to specify information
+to be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB File Naming</a>. Because permitting users to specify which files
+are used can create security problems, if the
+DB_USE_ENVIRON_ROOT flag is set, environment information will
+be used for file naming only for users with appropriate permissions (for
+example, users with a user-ID of 0 on UNIX systems).
+</dl>
+<p>Finally, there are a few additional unrelated flags:
+<p><dl compact>
+<p><dt><a name="DB_CREATE">DB_CREATE</a><dd>Cause Berkeley DB subsystems to create any underlying files, as necessary.
+<p><dt><a name="DB_LOCKDOWN">DB_LOCKDOWN</a><dd>Lock shared Berkeley DB environment files and memory-mapped databases into
+memory.
+<p><dt><a name="DB_PRIVATE">DB_PRIVATE</a><dd>Specify that the environment will only be accessed by a single process
+(although that process may be multithreaded). This flag has two effects
+on the Berkeley DB environment. First, all underlying data structures are
+allocated from per-process memory instead of from shared memory that is
+potentially accessible to more than a single process. Second, mutexes
+are only configured to work between threads.
+<p>This flag should not be specified if more than a single process is
+accessing the environment because it is likely to cause database
+corruption and unpredictable behavior. For example, if both a server
+application and the Berkeley DB utility <a href="../utility/db_stat.html">db_stat</a> are expected to access
+the environment, the DB_PRIVATE flag should not be
+specified.
+<p><dt><a name="DB_SYSTEM_MEM">DB_SYSTEM_MEM</a><dd>Allocate memory from system shared memory instead of from memory backed
+by the filesystem. See <a href="../ref/env/region.html">Shared Memory
+Regions</a> for more information.
+<p><dt><a name="DB_THREAD">DB_THREAD</a><dd>Cause the <a href="../api_c/env_class.html">DB_ENV</a> handle returned by DB_ENV-&gt;open to be
+<i>free-threaded</i>; that is, usable by multiple threads within a
+single address space.
+</dl>
+<p>On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by
+Berkeley DB are created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and modified by the process' umask value at the time of creation
+(see <b>umask</b>(2)). If <b>mode</b> is 0, Berkeley DB will use a default
+mode of readable and writable by both owner and group. On Windows
+systems, the mode argument is ignored. The group ownership of created
+files is based on the system and directory defaults, and is not further
+specified by Berkeley DB.
+<p>The DB_ENV-&gt;open method returns a non-zero error value on failure and 0 on success.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>The environment variable <b>DB_HOME</b> may be used as the path of
+the database home, as described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+</dl>
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;open method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EAGAIN<dd>The shared memory region was locked and (repeatedly) unavailable.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>
+The DB_THREAD flag was specified and fast mutexes are not
+available for this architecture.
+<p>The DB_HOME or TMPDIR environment variables were set, but empty.
+<p>An incorrectly formatted <b>NAME VALUE</b> entry or line was found.
+</dl>
+<p><dl compact>
+<p><dt>ENOSPC<dd>HP-UX only: a previously created Berkeley DB environment for this process still
+exists.
+</dl>
+<p>The DB_ENV-&gt;open method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;open method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_remove.html b/libdb/docs/api_c/env_remove.html
new file mode 100644
index 0000000..a89536b
--- /dev/null
+++ b/libdb/docs/api_c/env_remove.html
@@ -0,0 +1,110 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;remove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;remove</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;remove(DB_ENV *, char *db_home, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;remove method destroys a Berkeley DB environment if it is not
+currently in use. The environment regions, including any backing files,
+are removed. Any log or database files and the environment directory are
+not removed.
+<p>The <b>db_home</b> argument to DB_ENV-&gt;remove is described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p>If there are processes that have called <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> without
+calling <a href="../api_c/env_close.html">DB_ENV-&gt;close</a> (that is, there are processes currently
+using the environment), DB_ENV-&gt;remove will fail without further
+action unless the <a href="../api_c/env_remove.html#DB_FORCE">DB_FORCE</a> flag is set, in which case
+DB_ENV-&gt;remove will attempt to remove the environment, regardless
+of any processes still using it.
+<p>The result of attempting to forcibly destroy the environment when it is
+in use is unspecified. Processes using an environment often maintain open
+file descriptors for shared regions within it. On UNIX systems, the
+environment removal will usually succeed, and processes that have already
+joined the region will continue to run in that region without change.
+However, processes attempting to join the environment will either fail
+or create new regions. On other systems in which the <b>unlink</b>(2) system call will fail if any process has an open file descriptor for
+the file (for example Windows/NT), the region removal will fail.
+<p>Calling DB_ENV-&gt;remove should not be necessary for most applications
+because the Berkeley DB environment is cleaned up as part of normal database
+recovery procedures. However, applications may want to call
+DB_ENV-&gt;remove as part of application shut down to free up system
+resources. For example, if the <a href="../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag was specified
+to <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>, it may be useful to call DB_ENV-&gt;remove in
+order to release system shared memory segments that have been allocated.
+Or, on architectures in which mutexes require allocation of underlying
+system resources, it may be useful to call DB_ENV-&gt;remove in order
+to release those resources. Alternatively, if recovery is not required
+because no database state is maintained across failures, and no system
+resources need to be released, it is possible to clean up an environment
+by simply removing all the Berkeley DB files in the database environment's
+directories.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_FORCE">DB_FORCE</a><dd>If the <a href="../api_c/env_remove.html#DB_FORCE">DB_FORCE</a> flag is set, the environment is removed, regardless
+of any processes that may still using it, and no locks are acquired
+during this process. (Generally, the <a href="../api_c/env_remove.html#DB_FORCE">DB_FORCE</a> flag is
+specified only when applications were unable to shut down cleanly, and there
+is a risk that an application may have died holding a Berkeley DB lock.)
+<a name="3"><!--meow--></a>
+<p><dt><a name="DB_USE_ENVIRON">DB_USE_ENVIRON</a><dd>The Berkeley DB process' environment may be permitted to specify information
+to be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB File Naming</a>. Because permitting users to specify which files
+are used can create security problems, environment information will be
+used in file naming for all users only if the DB_USE_ENVIRON
+flag is set.
+<p><dt><a name="DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a><dd>The Berkeley DB process' environment may be permitted to specify information
+to be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB File Naming</a>. Because permitting users to specify which files
+are used can create security problems, if the
+DB_USE_ENVIRON_ROOT flag is set, environment information will
+be used for file naming only for users with appropriate permissions (for
+example, users with a user-ID of 0 on UNIX systems).
+</dl>
+<p>In multithreaded applications, only a single thread may call
+DB_ENV-&gt;remove.
+<p>A <a href="../api_c/env_class.html">DB_ENV</a> handle that has already been used to open an environment
+should not be used to call the DB_ENV-&gt;remove method; a new
+<a href="../api_c/env_class.html">DB_ENV</a> handle should be created for that purpose.
+<p>After DB_ENV-&gt;remove has been called, regardless of its return,
+the Berkeley DB environment handle may not be accessed again.
+<p>The DB_ENV-&gt;remove method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EBUSY<dd>The shared memory region was in use and the force flag was not set.
+</dl>
+<p>The DB_ENV-&gt;remove method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;remove method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_alloc.html b/libdb/docs/api_c/env_set_alloc.html
new file mode 100644
index 0000000..58f3d31
--- /dev/null
+++ b/libdb/docs/api_c/env_set_alloc.html
@@ -0,0 +1,84 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_alloc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_alloc</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_alloc(DB_ENV *db,
+ void *(*app_malloc)(size_t),
+ void *(*app_realloc)(void *, size_t),
+ void (*app_free)(void *));
+</pre></h3>
+<h1>Description</h1>
+<p>Set the allocation functions used by the <a href="../api_c/env_class.html">DB_ENV</a> and <a href="../api_c/db_class.html">DB</a>
+methods to allocate or free memory owned by the application.
+<p>There are a number of interfaces in Berkeley DB where memory is allocated by
+the library and then given to the application. For example, the
+<a href="../api_c/dbt_class.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a> flag, when specified in the <a href="../api_c/dbt_class.html">DBT</a> object,
+will cause the <a href="../api_c/db_class.html">DB</a> methods to allocate and reallocate memory
+which then becomes the responsibility of the calling application. (See
+<a href="../api_c/dbt_class.html">DBT</a> for more information.) Other examples are the Berkeley DB
+interfaces which return statistical information to the application:
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>, <a href="../api_c/lock_stat.html">DB_ENV-&gt;lock_stat</a>, <a href="../api_c/log_archive.html">DB_ENV-&gt;log_archive</a>,
+<a href="../api_c/log_stat.html">DB_ENV-&gt;log_stat</a>, <a href="../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a>, and <a href="../api_c/txn_stat.html">DB_ENV-&gt;txn_stat</a>. There is
+one interface in the Berkeley DB where memory is allocated by the application
+and then given to the library: <a href="../api_c/db_associate.html">DB-&gt;associate</a>.
+<p>On systems in which there may be multiple library versions of the
+standard allocation routines (notably Windows NT), transferring memory
+between the library and the application will fail because the Berkeley DB
+library allocates memory from a different heap than the application uses
+to free it. To avoid this problem, the DB_ENV-&gt;set_alloc and
+<a href="../api_c/db_set_alloc.html">DB-&gt;set_alloc</a> methods can be used to pass Berkeley DB references to the
+application's allocation routines.
+<p>It is not an error to specify only one or two of the possible allocation
+function arguments to these interfaces; however, in that case the
+specified interfaces must be compatible with the standard library
+interfaces, as they will be used together. The functions specified
+must match the calling conventions of the ANSI C X3.159-1989 (ANSI C) library routines
+of the same name.
+<p>The DB_ENV-&gt;set_alloc method configures operations performed using the specified
+<a href="../api_c/env_class.html">DB_ENV</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DB_ENV-&gt;set_alloc interface may not be called after the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>
+interface is called.
+<p>The DB_ENV-&gt;set_alloc method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_alloc method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> was called.
+</dl>
+<p>The DB_ENV-&gt;set_alloc method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_alloc method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_app_dispatch.html b/libdb/docs/api_c/env_set_app_dispatch.html
new file mode 100644
index 0000000..c05a35a
--- /dev/null
+++ b/libdb/docs/api_c/env_set_app_dispatch.html
@@ -0,0 +1,95 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_app_dispatch</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_app_dispatch</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_app_dispatch(DB_ENV *dbenv,
+ int (*tx_recover)(DB_ENV *dbenv,
+ DBT *log_rec, DB_LSN *lsn, db_recops op));
+</pre></h3>
+<h1>Description</h1>
+<p>Set the application's function to be called during transaction abort
+and recovery. This function must return 0 on success and either
+<b>errno</b> or a value outside of the Berkeley DB error name space on
+failure. It takes four arguments:
+<p><dl compact>
+<p><dt>dbenv <dd>A Berkeley DB environment.
+<p><dt>log_rec<dd>A log record.
+<p><dt>lsn<dd>A log sequence number.
+<p><dt>op<dd>One of the following values:
+<p><dl compact>
+<p><dt><a name="DB_TXN_BACKWARD_ROLL">DB_TXN_BACKWARD_ROLL</a><dd>The log is being read backward to determine which transactions have been
+committed and to abort those operations that were not; undo the operation
+described by the log record.
+<p><dt><a name="DB_TXN_FORWARD_ROLL">DB_TXN_FORWARD_ROLL</a><dd>The log is being played forward; redo the operation described by the log
+record.
+<p><dt><a name="DB_TXN_ABORT">DB_TXN_ABORT</a><dd>The log is being read backward during a transaction abort; undo the
+operation described by the log record.
+<p><dt><a name="DB_TXN_APPLY">DB_TXN_APPLY</a><dd>The log is being applied on a replica site; redo the operation
+described by the log record.
+<p><dt><a name="DB_TXN_PRINT">DB_TXN_PRINT</a><dd>The log is being printed for debugging purposes; print the contents of
+this log record in the desired format.
+</dl>
+</dl>
+<p>The DB_TXN_FORWARD_ROLL and DB_TXN_APPLY operations
+frequently imply the same actions, redoing changes that appear in the
+log record, although if a recovery function is to be used on a
+replication client where reads may be taking place concurrently with
+the processing of incoming messages, DB_TXN_APPLY operations
+should also perform appropriate locking. The macro DB_REDO(op) checks
+that the operation is one of DB_TXN_FORWARD_ROLL or
+DB_TXN_APPLY, and should be used in the recovery code to refer
+to the conditions under which operations should be redone. Similarly,
+the macro DB_UNDO(op) checks if the operation is one of
+DB_TXN_BACKWARD_ROLL or DB_TXN_ABORT.
+<p>The DB_ENV-&gt;set_app_dispatch method configures operations performed using the specified
+<a href="../api_c/env_class.html">DB_ENV</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DB_ENV-&gt;set_app_dispatch interface may not be called after the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_c/env_open.html">DB_ENV-&gt;open</a> is called, the information specified to DB_ENV-&gt;set_app_dispatch
+must be consistent with the existing environment or corruption can
+occur.
+<p>The DB_ENV-&gt;set_app_dispatch method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_app_dispatch method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> was called.
+</dl>
+<p>The DB_ENV-&gt;set_app_dispatch method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_app_dispatch method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/txn_class.html">DB_TXN</a>
+<h1>See Also</h1>
+<a href="../api_c/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_cachesize.html b/libdb/docs/api_c/env_set_cachesize.html
new file mode 100644
index 0000000..cff9cd8
--- /dev/null
+++ b/libdb/docs/api_c/env_set_cachesize.html
@@ -0,0 +1,87 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_cachesize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_cachesize</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_cachesize(DB_ENV *dbenv,
+ u_int32_t gbytes, u_int32_t bytes, int ncache);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the shared memory buffer pool -- that is, the cache --
+to <b>gbytes</b> gigabytes plus <b>bytes</b>. The cache should be
+the size of the normal working data set of the application, with some
+small amount of additional memory for unusual situations. (Note: the
+working set is not the same as the number of pages accessed
+simultaneously, and should be quite a bit larger!)
+<p>The default cache size is 256KB, and may not be specified as less than
+20KB. Any cache size less than 500MB is automatically increased by 25%
+to account for buffer pool overhead; cache sizes larger than 500MB are
+used as specified. The current maximum size of a single cache is 4GB.
+For information on tuning the Berkeley DB cache size, see
+<a href="../ref/am_conf/cachesize.html">Selecting a cache size</a>.
+<p>It is possible to specify caches to Berkeley DB that are large enough so that
+they cannot be allocated contiguously on some architectures. For
+example, some releases of Solaris limit the amount of memory that may
+be allocated contiguously by a process. If <b>ncache</b> is 0 or 1,
+the cache will be allocated contiguously in memory. If it is greater
+than 1, the cache will be broken up into <b>ncache</b> equally sized,
+separate pieces of memory.
+<p>The DB_ENV-&gt;set_cachesize method configures a database environment, not only operations
+performed using the specified <a href="../api_c/env_class.html">DB_ENV</a> handle.
+<p>The DB_ENV-&gt;set_cachesize interface may not be called after the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_c/env_open.html">DB_ENV-&gt;open</a> is called, the information specified to DB_ENV-&gt;set_cachesize
+will be ignored.
+<p>The DB_ENV-&gt;set_cachesize method returns a non-zero error value on failure and 0 on success.
+<p>The database environment's cache size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_cachesize", one or more whitespace characters,
+and the three arguments specified to this interface, separated by whitespace
+characters, for example, "set_cachesize 1 500 2". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_cachesize method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The specified cache size was impossibly small.
+<p>Called after
+<a href="../api_c/env_open.html">DB_ENV-&gt;open</a>
+was called.
+</dl>
+<p>The DB_ENV-&gt;set_cachesize method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_cachesize method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_data_dir.html b/libdb/docs/api_c/env_set_data_dir.html
new file mode 100644
index 0000000..f6303f4
--- /dev/null
+++ b/libdb/docs/api_c/env_set_data_dir.html
@@ -0,0 +1,77 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_data_dir</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_data_dir</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_data_dir(DB_ENV *dbenv, const char *dir);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the path of a directory to be used as the location of the access
+method database files. Paths specified to the <a href="../api_c/db_open.html">DB-&gt;open</a> function
+will be searched relative to this path. Paths set using this interface
+are additive, and specifying more than one will result in each specified
+directory being searched for database files. If any directories are
+specified, created database files will always be created in the first path
+specified.
+<p>If no database directories are specified, database files can exist only
+in the environment home directory. See <a href="../ref/env/naming.html">Berkeley DB File Naming</a> for more information.
+<p>For the greatest degree of recoverability from system or application
+failure, database files and log files should be located on separate
+physical devices.
+<p>The DB_ENV-&gt;set_data_dir method configures operations performed using the specified
+<a href="../api_c/env_class.html">DB_ENV</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DB_ENV-&gt;set_data_dir interface may not be called after the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_c/env_open.html">DB_ENV-&gt;open</a> is called, the information specified to DB_ENV-&gt;set_data_dir
+must be consistent with the existing environment or corruption can
+occur.
+<p>The DB_ENV-&gt;set_data_dir method returns a non-zero error value on failure and 0 on success.
+<p>The database environment's data directory may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_data_dir", one or more whitespace characters,
+and the directory name. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_data_dir method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> was called.
+</dl>
+<p>The DB_ENV-&gt;set_data_dir method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_data_dir method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_encrypt.html b/libdb/docs/api_c/env_set_encrypt.html
new file mode 100644
index 0000000..61af2b4
--- /dev/null
+++ b/libdb/docs/api_c/env_set_encrypt.html
@@ -0,0 +1,72 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_encrypt</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_encrypt</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_encrypt(DB_ENV *dbenv, const char *passwd, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the password used by the <a href="../api_c/env_class.html">DB_ENV</a> and <a href="../api_c/db_class.html">DB</a> methods to
+perform encryption and decryption.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_ENCRYPT_AES">DB_ENCRYPT_AES</a><dd>Use the Rijndael/AES (also known as the Advanced Encryption Standard
+and Federal Information Processing Standard (FIPS) 197) algorithm for
+encryption or decryption.
+</dl>
+<p>The DB_ENV-&gt;set_encrypt method configures a database environment, not only operations
+performed using the specified <a href="../api_c/env_class.html">DB_ENV</a> handle.
+<p>The DB_ENV-&gt;set_encrypt interface may not be called after the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_c/env_open.html">DB_ENV-&gt;open</a> is called, the information specified to DB_ENV-&gt;set_encrypt
+must be consistent with the existing environment or an error will be
+returned.
+<p>The DB_ENV-&gt;set_encrypt method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_encrypt method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after
+<a href="../api_c/env_open.html">DB_ENV-&gt;open</a>
+was called.
+</dl>
+<p><dl compact>
+<p><dt>EOPNOTSUPP<dd>Cryptography is not available in this Berkeley DB release.
+</dl>
+<p>The DB_ENV-&gt;set_encrypt method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_encrypt method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_errcall.html b/libdb/docs/api_c/env_set_errcall.html
new file mode 100644
index 0000000..76ee0ab
--- /dev/null
+++ b/libdb/docs/api_c/env_set_errcall.html
@@ -0,0 +1,61 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_errcall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_errcall</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+void
+DB_ENV-&gt;set_errcall(DB_ENV *dbenv,
+ void (*db_errcall_fcn)(const char *errpfx, char *msg));
+</pre></h3>
+<h1>Description</h1>
+<p>When an error occurs in the Berkeley DB library, a Berkeley DB error or an error
+return value is returned by the function. In some cases, however,
+the <b>errno</b> value may be insufficient to completely describe
+the cause of the error, especially during initial application debugging.
+<p>The DB_ENV-&gt;set_errcall and <a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a> methods are used to
+enhance the mechanism for reporting error messages to the application.
+In some cases, when an error occurs, Berkeley DB will call
+<b>db_errcall_fcn</b> with additional error information. The function
+must be declared with two arguments; the first will be the prefix string
+(as previously set by <a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a> or <a href="../api_c/env_set_errpfx.html">DB_ENV-&gt;set_errpfx</a>);
+the second will be the error message string. It is up to the
+<b>db_errcall_fcn</b> function to display the error message in an
+appropriate manner.
+<p>Alternatively, you can use the <a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a> or
+<a href="../api_c/env_set_errfile.html">DB_ENV-&gt;set_errfile</a> methods to display the additional information via
+a C library FILE *.
+<p>This error-logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>The DB_ENV-&gt;set_errcall interface may be called at any time during the life of
+the application.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_errfile.html b/libdb/docs/api_c/env_set_errfile.html
new file mode 100644
index 0000000..5c1c76c
--- /dev/null
+++ b/libdb/docs/api_c/env_set_errfile.html
@@ -0,0 +1,57 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_errfile</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_errfile</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+void
+DB_ENV-&gt;set_errfile(DB_ENV *dbenv, FILE *errfile);
+</pre></h3>
+<h1>Description</h1>
+When an error occurs in the Berkeley DB library, a Berkeley DB error or an error
+return value is returned by the function. In some cases, however,
+the <b>errno</b> value may be insufficient to completely describe
+the cause of the error especially during initial application debugging.
+<p>The DB_ENV-&gt;set_errfile and <a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a> methods are used to
+enhance the mechanism for reporting error messages to the application
+by setting a C library FILE * to be used for displaying additional Berkeley DB
+error messages. In some cases, when an error occurs, Berkeley DB will output
+an additional error message to the specified file reference.
+<p>The error message will consist of the prefix string and a colon
+("<b>:</b>") (if a prefix string was previously specified using
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a> or <a href="../api_c/env_set_errpfx.html">DB_ENV-&gt;set_errpfx</a>), an error string, and
+a trailing &lt;newline&gt; character.
+<p>This error logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>The DB_ENV-&gt;set_errfile interface may be called at any time during the life of
+the application.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_errpfx.html b/libdb/docs/api_c/env_set_errpfx.html
new file mode 100644
index 0000000..7b99a29
--- /dev/null
+++ b/libdb/docs/api_c/env_set_errpfx.html
@@ -0,0 +1,48 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_errpfx</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_errpfx</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+void
+DB_ENV-&gt;set_errpfx(DB_ENV *dbenv, const char *errpfx);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the prefix string that appears before error messages issued by Berkeley DB.
+<p>The <a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a> and DB_ENV-&gt;set_errpfx methods do not copy
+the memory to which the <b>errpfx</b> argument refers; rather, they
+maintain a reference to it. Although this allows applications to modify
+the error message prefix at any time (without repeatedly calling the
+interfaces), it means the memory must be maintained until the handle is
+closed.
+<p>The DB_ENV-&gt;set_errpfx interface may be called at any time during the life of
+the application.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_feedback.html b/libdb/docs/api_c/env_set_feedback.html
new file mode 100644
index 0000000..0e77056
--- /dev/null
+++ b/libdb/docs/api_c/env_set_feedback.html
@@ -0,0 +1,57 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_feedback</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_feedback</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_feedback(DB_ENV *,
+ void (*db_feedback_fcn)(DB_ENV *, int opcode, int pct));
+</pre></h3>
+<h1>Description</h1>
+<p>Some operations performed by the Berkeley DB library can take non-trivial
+amounts of time. The DB_ENV-&gt;set_feedback method can be used by
+applications to monitor progress within these operations.
+<p>When an operation is likely to take a long time, Berkeley DB will call the
+specified callback function. This function must be declared with
+three arguments: the first will be a reference to the enclosing
+environment, the second a flag value, and the third the percent of the
+operation that has been completed, specified as an integer value between
+0 and 100. It is up to the callback function to display this
+information in an appropriate manner.
+<p>The <b>opcode</b> argument may take on any of the following values:
+<p><dl compact>
+<p><dt><a name="DB_RECOVER">DB_RECOVER</a><dd>The environment is being recovered.
+</dl>
+<p>The DB_ENV-&gt;set_feedback interface may be called at any time during the life of
+the application.
+<p>The DB_ENV-&gt;set_feedback method returns a non-zero error value on failure and 0 on success.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_flags.html b/libdb/docs/api_c/env_set_flags.html
new file mode 100644
index 0000000..b2e00b5
--- /dev/null
+++ b/libdb/docs/api_c/env_set_flags.html
@@ -0,0 +1,240 @@
+<!--$Id-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_flags</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_flags</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_flags(DB_ENV *dbenv, u_int32_t flags, int onoff);
+</pre></h3>
+<h1>Description</h1>
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+If <b>onoff</b> is
+zero,
+the specified flags are cleared; otherwise they are set.
+<p><dl compact>
+<p><dt><a name="DB_AUTO_COMMIT">DB_AUTO_COMMIT</a><dd>If set, operations for which no explicit transaction handle was
+specified, and which modify databases in the database environment, will
+be automatically enclosed within a transaction. If the call succeeds,
+changes made by the operation will be recoverable. If the call fails,
+the operation will have made no changes.
+<p>Calling DB_ENV-&gt;set_flags with the <a href="../api_c/env_set_flags.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a> flag only affects
+the specified <a href="../api_c/env_class.html">DB_ENV</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_c/env_class.html">DB_ENV</a>
+handles opened in the environment must either set the <a href="../api_c/env_set_flags.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a> flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The <a href="../api_c/env_set_flags.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a> flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="3"><!--meow--></a>
+<p><dt><a name="DB_CDB_ALLDB">DB_CDB_ALLDB</a><dd>If set, Berkeley DB Concurrent Data Store applications will perform locking on an environment-wide
+basis rather than on a per-database basis.
+<p>Calling DB_ENV-&gt;set_flags with the DB_CDB_ALLDB flag only affects
+the specified <a href="../api_c/env_class.html">DB_ENV</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_c/env_class.html">DB_ENV</a>
+handles opened in the environment must either set the DB_CDB_ALLDB flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The DB_CDB_ALLDB flag may be used to configure Berkeley DB only before the
+<a href="../api_c/env_open.html">DB_ENV-&gt;open</a> interface is called.
+<a name="4"><!--meow--></a>
+<p><dt><a name="DB_DIRECT_DB">DB_DIRECT_DB</a><dd>If set and supported by the system, Berkeley DB will turn off system buffering
+of Berkeley DB database files to avoid double caching.
+<p>Calling DB_ENV-&gt;set_flags with the DB_DIRECT_DB flag only affects
+the specified <a href="../api_c/env_class.html">DB_ENV</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_c/env_class.html">DB_ENV</a>
+handles opened in the environment must either set the DB_DIRECT_DB flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The DB_DIRECT_DB flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="5"><!--meow--></a>
+<p><dt><a name="DB_DIRECT_LOG">DB_DIRECT_LOG</a><dd>If set and supported by the system, Berkeley DB will turn off system buffering
+of Berkeley DB log files to avoid double caching.
+<p>Calling DB_ENV-&gt;set_flags with the DB_DIRECT_LOG flag only affects
+the specified <a href="../api_c/env_class.html">DB_ENV</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_c/env_class.html">DB_ENV</a>
+handles opened in the environment must either set the DB_DIRECT_LOG flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The DB_DIRECT_LOG flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="6"><!--meow--></a>
+<p><dt><a name="DB_NOLOCKING">DB_NOLOCKING</a><dd>If set, Berkeley DB will grant all requested mutual exclusion mutexes and
+database locks without regard for their actual availability. This
+functionality should never be used for purposes other than debugging.
+<p>Calling DB_ENV-&gt;set_flags with the DB_NOLOCKING flag only affects
+the specified <a href="../api_c/env_class.html">DB_ENV</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+<p>The DB_NOLOCKING flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="7"><!--meow--></a>
+<p><dt><a name="DB_NOMMAP">DB_NOMMAP</a><dd>If set, Berkeley DB will copy read-only database files into the local cache
+instead of potentially mapping them into process memory (see the
+description of the <a href="../api_c/env_set_mp_mmapsize.html">DB_ENV-&gt;set_mp_mmapsize</a> method for further
+information).
+<p>Calling DB_ENV-&gt;set_flags with the DB_NOMMAP flag only affects
+the specified <a href="../api_c/env_class.html">DB_ENV</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_c/env_class.html">DB_ENV</a>
+handles opened in the environment must either set the DB_NOMMAP flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The DB_NOMMAP flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="8"><!--meow--></a>
+<p><dt><a name="DB_NOPANIC">DB_NOPANIC</a><dd>If set, Berkeley DB will ignore any panic state in the database environment.
+(Database environments in a panic state normally refuse all attempts to
+call Berkeley DB functions, returning <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>.) This
+functionality should never be used for purposes other than debugging.
+<p>Calling DB_ENV-&gt;set_flags with the DB_NOPANIC flag only affects
+the specified <a href="../api_c/env_class.html">DB_ENV</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+<p>The DB_NOPANIC flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<p><dt><a name="DB_OVERWRITE">DB_OVERWRITE</a><dd>Overwrite files stored in encrypted formats before deleting them. Berkeley DB
+overwrites files using alternating 0xff, 0x00 and 0xff byte patterns.
+For file overwriting to be effective, the underlying file must be stored
+on a fixed-block filesystem. Systems with journaling or logging filesystems
+will require operating system support and probably modification of the
+Berkeley DB sources.
+<p>Calling DB_ENV-&gt;set_flags with the DB_OVERWRITE flag only affects
+the specified <a href="../api_c/env_class.html">DB_ENV</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+<p>The DB_OVERWRITE flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="9"><!--meow--></a>
+<p><dt><a name="DB_PANIC_ENVIRONMENT">DB_PANIC_ENVIRONMENT</a><dd>If set, Berkeley DB will set the panic state for the database environment.
+(Database environments in a panic state normally refuse all attempts to
+call Berkeley DB functions, returning <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>.) This flag may
+not be specified using the environment's <b>DB_CONFIG</b> file. This
+flag may be used to configure Berkeley DB only after the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>
+interface is called.
+<p>Calling DB_ENV-&gt;set_flags with the DB_PANIC_ENVIRONMENT flag affects the
+database environment, including all threads of control accessing the
+database environment.
+<p>The DB_PANIC_ENVIRONMENT flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="10"><!--meow--></a>
+<p><dt><a name="DB_REGION_INIT">DB_REGION_INIT</a><dd>In some applications, the expense of page-faulting the underlying shared
+memory regions can affect performance. (For example, if the page-fault
+occurs while holding a lock, other lock requests can convoy, and overall
+throughput may decrease.) If set, Berkeley DB will page-fault shared regions
+into memory when initially creating or joining a Berkeley DB environment. In
+addition, Berkeley DB will write the shared regions when creating an
+environment, forcing the underlying virtual memory and filesystems to
+instantiate both the necessary memory and the necessary disk space.
+This can also avoid out-of-disk space failures later on.
+<p>Calling DB_ENV-&gt;set_flags with the DB_REGION_INIT flag only affects
+the specified <a href="../api_c/env_class.html">DB_ENV</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_c/env_class.html">DB_ENV</a>
+handles opened in the environment must either set the DB_REGION_INIT flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The DB_REGION_INIT flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="11"><!--meow--></a>
+<p><dt><a name="DB_TXN_NOSYNC">DB_TXN_NOSYNC</a><dd>If set, Berkeley DB will not write or synchronously flush the log on transaction
+commit or prepare.
+This means that transactions exhibit the ACI (atomicity, consistency,
+and isolation) properties, but not D (durability); that is, database
+integrity will be maintained, but if the application or system fails,
+it is possible some number of the most recently committed transactions
+may be undone during recovery. The number of transactions at risk is
+governed by how many log updates can fit into the log buffer, how often
+the operating system flushes dirty buffers to disk, and how often the
+log is checkpointed
+<p>Calling DB_ENV-&gt;set_flags with the DB_TXN_NOSYNC flag only affects
+the specified <a href="../api_c/env_class.html">DB_ENV</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_c/env_class.html">DB_ENV</a>
+handles opened in the environment must either set the DB_TXN_NOSYNC flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The DB_TXN_NOSYNC flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="12"><!--meow--></a>
+<p><dt><a name="DB_TXN_WRITE_NOSYNC">DB_TXN_WRITE_NOSYNC</a><dd>If set, Berkeley DB will write, but will not synchronously flush, the log on
+transaction commit or prepare.
+This means that transactions exhibit the ACI (atomicity, consistency,
+and isolation) properties, but not D (durability); that is, database
+integrity will be maintained, but if the system fails, it is possible
+some number of the most recently committed transactions may be undone
+during recovery. The number of transactions at risk is governed by how
+often the system flushes dirty buffers to disk and how often the log is
+checkpointed.
+<p>Calling DB_ENV-&gt;set_flags with the DB_TXN_WRITE_NOSYNC flag only affects
+the specified <a href="../api_c/env_class.html">DB_ENV</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_c/env_class.html">DB_ENV</a>
+handles opened in the environment must either set the DB_TXN_WRITE_NOSYNC flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The DB_TXN_WRITE_NOSYNC flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="13"><!--meow--></a>
+<p><dt><a name="DB_YIELDCPU">DB_YIELDCPU</a><dd>If set, Berkeley DB will yield the processor immediately after each page or
+mutex acquisition. This functionality should never be used for purposes
+other than stress testing.
+<p>Calling DB_ENV-&gt;set_flags with the DB_YIELDCPU flag only affects
+the specified <a href="../api_c/env_class.html">DB_ENV</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_c/env_class.html">DB_ENV</a>
+handles opened in the environment must either set the DB_YIELDCPU flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The DB_YIELDCPU flag may be used to configure Berkeley DB at any time during
+the life of the application.
+</dl>
+<p>The DB_ENV-&gt;set_flags method returns a non-zero error value on failure and 0 on success.
+<p>The database environment's flag values may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_flags", one or more whitespace characters,
+and the interface flag argument as a string; for example, "set_flags
+DB_TXN_NOSYNC". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_flags method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB_ENV-&gt;set_flags method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_flags method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_lg_bsize.html b/libdb/docs/api_c/env_set_lg_bsize.html
new file mode 100644
index 0000000..935de04
--- /dev/null
+++ b/libdb/docs/api_c/env_set_lg_bsize.html
@@ -0,0 +1,74 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_lg_bsize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_lg_bsize</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_lg_bsize(DB_ENV *dbenv, u_int32_t lg_bsize);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the in-memory log buffer, in bytes. By default, or if
+the value is set to 0, a size of 32K is used. The size of the log file
+(see <a href="../api_c/env_set_lg_max.html">DB_ENV-&gt;set_lg_max</a>) must be at least four times the size of the
+the in-memory log buffer.
+<p>Log information is stored in-memory until the storage space fills up
+or transaction commit forces the information to be flushed to stable
+storage. In the presence of long-running transactions or transactions
+producing large amounts of data, larger buffer sizes can increase
+throughput.
+<p>The DB_ENV-&gt;set_lg_bsize method configures a database environment, not only operations
+performed using the specified <a href="../api_c/env_class.html">DB_ENV</a> handle.
+<p>The DB_ENV-&gt;set_lg_bsize interface may not be called after the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_c/env_open.html">DB_ENV-&gt;open</a> is called, the information specified to DB_ENV-&gt;set_lg_bsize
+will be ignored.
+<p>The DB_ENV-&gt;set_lg_bsize method returns a non-zero error value on failure and 0 on success.
+<p>The database environment's log buffer size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lg_bsize", one or more whitespace characters,
+and the size in bytes. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_lg_bsize method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> was called.
+<p>The size of the log file is less than four times the size of the in-memory
+log buffer.
+</dl>
+<p>The DB_ENV-&gt;set_lg_bsize method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_lg_bsize method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/logc_class.html">DB_LOGC</a>, <a href="../api_c/lsn_class.html">DB_LSN</a>
+<h1>See Also</h1>
+<a href="../api_c/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_lg_dir.html b/libdb/docs/api_c/env_set_lg_dir.html
new file mode 100644
index 0000000..d12a78d
--- /dev/null
+++ b/libdb/docs/api_c/env_set_lg_dir.html
@@ -0,0 +1,73 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_lg_dir</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_lg_dir</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_lg_dir(DB_ENV *dbenv, const char *dir);
+</pre></h3>
+<h1>Description</h1>
+<p>The path of a directory to be used as the location of logging files.
+Log files created by the Log Manager subsystem will be created in this
+directory.
+<p>If no logging directory is specified, log files are created in the
+environment home directory. See <a href="../ref/env/naming.html">Berkeley DB File Naming</a> for more information.
+<p>For the greatest degree of recoverability from system or application
+failure, database files and log files should be located on separate
+physical devices.
+<p>The DB_ENV-&gt;set_lg_dir method configures operations performed using the specified
+<a href="../api_c/env_class.html">DB_ENV</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DB_ENV-&gt;set_lg_dir interface may not be called after the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_c/env_open.html">DB_ENV-&gt;open</a> is called, the information specified to DB_ENV-&gt;set_lg_dir
+must be consistent with the existing environment or corruption can
+occur.
+<p>The DB_ENV-&gt;set_lg_dir method returns a non-zero error value on failure and 0 on success.
+<p>The database environment's logging directory may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lg_dir", one or more whitespace characters,
+and the directory name. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_lg_dir method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> was called.
+</dl>
+<p>The DB_ENV-&gt;set_lg_dir method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_lg_dir method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/logc_class.html">DB_LOGC</a>, <a href="../api_c/lsn_class.html">DB_LSN</a>
+<h1>See Also</h1>
+<a href="../api_c/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_lg_max.html b/libdb/docs/api_c/env_set_lg_max.html
new file mode 100644
index 0000000..c2b6b7d
--- /dev/null
+++ b/libdb/docs/api_c/env_set_lg_max.html
@@ -0,0 +1,74 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_lg_max</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_lg_max</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_lg_max(DB_ENV *dbenv, u_int32_t lg_max);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum size of a single file in the log, in bytes. By default,
+or if the <b>lg_max</b> argument is set to 0, a size of 10MB is used.
+Because <a href="../api_c/lsn_class.html">DB_LSN</a> file offsets are unsigned four-byte values, the
+set value may not be larger than the maximum unsigned four-byte value.
+The size of the log file must be at least four times the size of the
+in-memory log buffer (see <a href="../api_c/env_set_lg_bsize.html">DB_ENV-&gt;set_lg_bsize</a>).
+<p>See <a href="../ref/log/limits.html">Log File Limits</a>
+for more information.
+<p>The DB_ENV-&gt;set_lg_max method configures a database environment, not only operations
+performed using the specified <a href="../api_c/env_class.html">DB_ENV</a> handle.
+<p>The DB_ENV-&gt;set_lg_max interface may be called at any time during the life of
+the application.
+If no size is specified by the application, the size last specified for
+the database region will be used, or if no database region previously
+existed, the default will be used.
+<p>The DB_ENV-&gt;set_lg_max method returns a non-zero error value on failure and 0 on success.
+<p>The database environment's log file size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lg_max", one or more whitespace characters,
+and the size in bytes. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_lg_max method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> was called.
+<p>The size of the log file is less than four times the size of the in-memory
+log buffer.
+<p>The specified log file size was too large.
+</dl>
+<p>The DB_ENV-&gt;set_lg_max method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_lg_max method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/logc_class.html">DB_LOGC</a>, <a href="../api_c/lsn_class.html">DB_LSN</a>
+<h1>See Also</h1>
+<a href="../api_c/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_lg_regionmax.html b/libdb/docs/api_c/env_set_lg_regionmax.html
new file mode 100644
index 0000000..7364c0e
--- /dev/null
+++ b/libdb/docs/api_c/env_set_lg_regionmax.html
@@ -0,0 +1,68 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_lg_regionmax</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_lg_regionmax</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_lg_regionmax(DB_ENV *dbenv, u_int32_t lg_regionmax);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the underlying logging subsystem region, in bytes. By
+default, or if the value is set to 0, the base region size is 60KB.
+The log region is used to store filenames, and so may need to be
+increased in size if a large number of files will be opened and
+registered with the specified Berkeley DB environment's log manager.
+<p>The DB_ENV-&gt;set_lg_regionmax method configures a database environment, not only operations
+performed using the specified <a href="../api_c/env_class.html">DB_ENV</a> handle.
+<p>The DB_ENV-&gt;set_lg_regionmax interface may not be called after the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_c/env_open.html">DB_ENV-&gt;open</a> is called, the information specified to DB_ENV-&gt;set_lg_regionmax
+will be ignored.
+<p>The DB_ENV-&gt;set_lg_regionmax method returns a non-zero error value on failure and 0 on success.
+<p>The database environment's log region size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lg_regionmax", one or more whitespace characters,
+and the size in bytes. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_lg_regionmax method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> was called.
+</dl>
+<p>The DB_ENV-&gt;set_lg_regionmax method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_lg_regionmax method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/logc_class.html">DB_LOGC</a>, <a href="../api_c/lsn_class.html">DB_LSN</a>
+<h1>See Also</h1>
+<a href="../api_c/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_lk_conflicts.html b/libdb/docs/api_c/env_set_lk_conflicts.html
new file mode 100644
index 0000000..c295d3c
--- /dev/null
+++ b/libdb/docs/api_c/env_set_lk_conflicts.html
@@ -0,0 +1,71 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_lk_conflicts</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_lk_conflicts</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_lk_conflicts(DB_ENV *dbenv,
+ u_int8_t *conflicts, int nmodes);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the locking conflicts matrix.
+The <b>conflicts</b> argument
+is an <b>nmodes</b> by <b>nmodes</b> array.
+A non-0 value for the array element indicates that requested_mode and
+held_mode conflict:
+<p><blockquote><pre>conflicts[requested_mode][held_mode]</pre></blockquote>
+<p>The <i>not-granted</i> mode must be represented by 0.
+<p>If DB_ENV-&gt;set_lk_conflicts is never called, a standard conflicts
+array is used; see <a href="../ref/lock/stdmode.html">Standard Lock
+Modes</a> for more information.
+<p>The DB_ENV-&gt;set_lk_conflicts method configures a database environment, not only operations
+performed using the specified <a href="../api_c/env_class.html">DB_ENV</a> handle.
+<p>The DB_ENV-&gt;set_lk_conflicts interface may not be called after the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_c/env_open.html">DB_ENV-&gt;open</a> is called, the information specified to DB_ENV-&gt;set_lk_conflicts
+will be ignored.
+<p>The DB_ENV-&gt;set_lk_conflicts method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_lk_conflicts method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> was called.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>No memory was available to copy the conflicts array.
+</dl>
+<p>The DB_ENV-&gt;set_lk_conflicts method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_lk_conflicts method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/lock_class.html">DB_LOCK</a>
+<h1>See Also</h1>
+<a href="../api_c/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_lk_detect.html b/libdb/docs/api_c/env_set_lk_detect.html
new file mode 100644
index 0000000..fc4228d
--- /dev/null
+++ b/libdb/docs/api_c/env_set_lk_detect.html
@@ -0,0 +1,87 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_lk_detect</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_lk_detect</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_lk_detect(DB_ENV *dbenv, u_int32_t detect);
+</pre></h3>
+<h1>Description</h1>
+<p>Set if the deadlock detector is to be run whenever a lock conflict
+occurs, and specify what lock request(s) should be rejected. As
+transactions acquire locks on behalf of a single locker ID, rejecting
+a lock request associated with a transaction normally requires the
+transaction be aborted. The specified value must be one of the
+following list:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_DEFAULT">DB_LOCK_DEFAULT</a><dd>Use whatever lock policy was specified when the database environment
+was created. If no lock policy has yet been specified, set the lock
+policy to DB_LOCK_RANDOM.
+<dt><a name="DB_LOCK_EXPIRE">DB_LOCK_EXPIRE</a><dd>Reject lock requests which have timed out. No other deadlock detection
+is performed.
+<dt><a name="DB_LOCK_MAXLOCKS">DB_LOCK_MAXLOCKS</a><dd>Reject the lock request for the locker ID with the greatest number of
+locks.
+<dt><a name="DB_LOCK_MINLOCKS">DB_LOCK_MINLOCKS</a><dd>Reject the lock request for the locker ID with the fewest number of
+locks.
+<dt><a name="DB_LOCK_MINWRITE">DB_LOCK_MINWRITE</a><dd>Reject the lock request for the locker ID with the fewest number of
+write locks.
+<dt><a name="DB_LOCK_OLDEST">DB_LOCK_OLDEST</a><dd>Reject the lock request for the oldest locker ID.
+<dt><a name="DB_LOCK_RANDOM">DB_LOCK_RANDOM</a><dd>Reject the lock request for a random locker ID.
+<dt><a name="DB_LOCK_YOUNGEST">DB_LOCK_YOUNGEST</a><dd>Reject the lock request for the youngest locker ID.
+</dl>
+<p>The DB_ENV-&gt;set_lk_detect method configures a database environment, not only operations
+performed using the specified <a href="../api_c/env_class.html">DB_ENV</a> handle.
+<p>The DB_ENV-&gt;set_lk_detect interface may not be called after the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_c/env_open.html">DB_ENV-&gt;open</a> is called, the information specified to DB_ENV-&gt;set_lk_detect
+must be consistent with the existing environment or an error will be
+returned.
+<p>The DB_ENV-&gt;set_lk_detect method returns a non-zero error value on failure and 0 on success.
+<p>The database environment's deadlock detector configuration may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_detect", one or more whitespace characters,
+and the interface <b>detect</b> argument as a string; for example,
+"set_lk_detect DB_LOCK_OLDEST". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_lk_detect method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> was called.
+</dl>
+<p>The DB_ENV-&gt;set_lk_detect method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_lk_detect method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/lock_class.html">DB_LOCK</a>
+<h1>See Also</h1>
+<a href="../api_c/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_lk_max_lockers.html b/libdb/docs/api_c/env_set_lk_max_lockers.html
new file mode 100644
index 0000000..c43bb6d
--- /dev/null
+++ b/libdb/docs/api_c/env_set_lk_max_lockers.html
@@ -0,0 +1,70 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_lk_max_lockers</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_lk_max_lockers</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_lk_max_lockers(DB_ENV *dbenv, u_int32_t max);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of simultaneous locking entities supported by
+the Berkeley DB lock subsystem. This value is used by <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> to
+estimate how much space to allocate for various lock-table data
+structures. The default value is 1000 lockers. For specific
+information on configuring the size of the lock subsystem, see
+<a href="../ref/lock/max.html">Configuring locking: sizing the
+system</a>.
+<p>The DB_ENV-&gt;set_lk_max_lockers method configures a database environment, not only operations
+performed using the specified <a href="../api_c/env_class.html">DB_ENV</a> handle.
+<p>The DB_ENV-&gt;set_lk_max_lockers interface may not be called after the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_c/env_open.html">DB_ENV-&gt;open</a> is called, the information specified to DB_ENV-&gt;set_lk_max_lockers
+will be ignored.
+<p>The DB_ENV-&gt;set_lk_max_lockers method returns a non-zero error value on failure and 0 on success.
+<p>The database environment's maximum number of lockers may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_max_lockers", one or more whitespace characters,
+and the number of lockers. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_lk_max_lockers method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> was called.
+</dl>
+<p>The DB_ENV-&gt;set_lk_max_lockers method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_lk_max_lockers method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/lock_class.html">DB_LOCK</a>
+<h1>See Also</h1>
+<a href="../api_c/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_lk_max_locks.html b/libdb/docs/api_c/env_set_lk_max_locks.html
new file mode 100644
index 0000000..79106b8
--- /dev/null
+++ b/libdb/docs/api_c/env_set_lk_max_locks.html
@@ -0,0 +1,69 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_lk_max_locks</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_lk_max_locks</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_lk_max_locks(DB_ENV *dbenv, u_int32_t max);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of locks supported by the Berkeley DB lock subsystem.
+This value is used by <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> to estimate how much space to
+allocate for various lock-table data structures. The default value is
+1000 locks. For specific information on configuring the size of the lock
+subsystem, see <a href="../ref/lock/max.html">Configuring locking:
+sizing the system</a>.
+<p>The DB_ENV-&gt;set_lk_max_locks method configures a database environment, not only operations
+performed using the specified <a href="../api_c/env_class.html">DB_ENV</a> handle.
+<p>The DB_ENV-&gt;set_lk_max_locks interface may not be called after the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_c/env_open.html">DB_ENV-&gt;open</a> is called, the information specified to DB_ENV-&gt;set_lk_max_locks
+will be ignored.
+<p>The DB_ENV-&gt;set_lk_max_locks method returns a non-zero error value on failure and 0 on success.
+<p>The database environment's maximum number of locks may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_max_locks", one or more whitespace characters,
+and the number of locks. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_lk_max_locks method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> was called.
+</dl>
+<p>The DB_ENV-&gt;set_lk_max_locks method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_lk_max_locks method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/lock_class.html">DB_LOCK</a>
+<h1>See Also</h1>
+<a href="../api_c/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_lk_max_objects.html b/libdb/docs/api_c/env_set_lk_max_objects.html
new file mode 100644
index 0000000..78c49b9
--- /dev/null
+++ b/libdb/docs/api_c/env_set_lk_max_objects.html
@@ -0,0 +1,70 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_lk_max_objects</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_lk_max_objects</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_lk_max_objects(DB_ENV *dbenv, u_int32_t max);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of simultaneously locked objects supported by
+the Berkeley DB lock subsystem. This value is used by <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> to
+estimate how much space to allocate for various lock-table data
+structures. The default value is 1000 objects. For specific
+information on configuring the size of the lock subsystem, see
+<a href="../ref/lock/max.html">Configuring locking: sizing the
+system</a>.
+<p>The DB_ENV-&gt;set_lk_max_objects method configures a database environment, not only operations
+performed using the specified <a href="../api_c/env_class.html">DB_ENV</a> handle.
+<p>The DB_ENV-&gt;set_lk_max_objects interface may not be called after the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_c/env_open.html">DB_ENV-&gt;open</a> is called, the information specified to DB_ENV-&gt;set_lk_max_objects
+will be ignored.
+<p>The DB_ENV-&gt;set_lk_max_objects method returns a non-zero error value on failure and 0 on success.
+<p>The database environment's maximum number of objects may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_max_objects", one or more whitespace characters,
+and the number of objects. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_lk_max_objects method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> was called.
+</dl>
+<p>The DB_ENV-&gt;set_lk_max_objects method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_lk_max_objects method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/lock_class.html">DB_LOCK</a>
+<h1>See Also</h1>
+<a href="../api_c/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_mp_mmapsize.html b/libdb/docs/api_c/env_set_mp_mmapsize.html
new file mode 100644
index 0000000..c1d2250
--- /dev/null
+++ b/libdb/docs/api_c/env_set_mp_mmapsize.html
@@ -0,0 +1,71 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_mp_mmapsize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_mp_mmapsize</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_mp_mmapsize(DB_ENV *dbenv, size_t mp_mmapsize);
+</pre></h3>
+<h1>Description</h1>
+<p>Files that are opened read-only in the pool (and that satisfy a few
+other criteria) are, by default, mapped into the process address space
+instead of being copied into the local cache. This can result in
+better-than-usual performance because available virtual memory is
+normally much larger than the local cache, and page faults are faster
+than page copying on many systems. However, it can cause resource
+starvation in the presence of limited virtual memory, and it can result
+in immense process sizes in the presence of large databases.
+<p>Set the maximum file size, in bytes, for a file to be mapped into the
+process address space. If no value is specified, it defaults to 10MB.
+<p>The DB_ENV-&gt;set_mp_mmapsize method configures operations performed using the specified
+<a href="../api_c/env_class.html">DB_ENV</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DB_ENV-&gt;set_mp_mmapsize interface may be called at any time during the life of
+the application.
+<p>The DB_ENV-&gt;set_mp_mmapsize method returns a non-zero error value on failure and 0 on success.
+<p>The database environment's maximum mapped file size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_mp_mmapsize", one or more whitespace characters,
+and the size in bytes. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_mp_mmapsize method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> was called.
+</dl>
+<p>The DB_ENV-&gt;set_mp_mmapsize method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_mp_mmapsize method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a>
+<h1>See Also</h1>
+<a href="../api_c/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_paniccall.html b/libdb/docs/api_c/env_set_paniccall.html
new file mode 100644
index 0000000..cb80b59
--- /dev/null
+++ b/libdb/docs/api_c/env_set_paniccall.html
@@ -0,0 +1,55 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_paniccall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_paniccall</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_paniccall(DB_ENV *dbenv,
+ void (*paniccall)(DB_ENV *, int errval));
+</pre></h3>
+<h1>Description</h1>
+<p>Errors can occur in the Berkeley DB library where the only solution is to shut
+down the application and run recovery (for example, if Berkeley DB is unable
+to allocate heap memory). In these cases, the value <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>
+is returned by Berkeley DB.
+<p>In these cases, it is also often simpler to shut down the application
+when such errors occur rather than to try to gracefully return up the
+stack. The DB_ENV-&gt;set_paniccall and <a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a> methods
+are used to specify functions to be called when
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> is about to be returned from a Berkeley DB interface.
+When called, the <b>dbenv</b> argument will be a reference to the
+current environment, and the <b>errval</b> argument is the error value
+that would have been returned to the calling function.
+<p>The DB_ENV-&gt;set_paniccall interface may be called at any time during the life of
+the application.
+<p>The DB_ENV-&gt;set_paniccall method returns a non-zero error value on failure and 0 on success.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_rpc_server.html b/libdb/docs/api_c/env_set_rpc_server.html
new file mode 100644
index 0000000..7733b67
--- /dev/null
+++ b/libdb/docs/api_c/env_set_rpc_server.html
@@ -0,0 +1,82 @@
+<!--"@(#)env_set_rpc_server.so 10.1 (Sleepycat) 8/25/99"-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_rpc_server</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_rpc_server</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_rpc_server(DB_ENV *dbenv, CLIENT *client, char *host,
+ long cl_timeout, long sv_timeout, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>Establishes a connection for this <b>dbenv</b> to a RPC server. If
+the <b>client</b> argument is NULL, this call creates a connection to
+the Berkeley DB server on the indicated hostname and sets up a channel for
+communication.
+If the <b>client</b> channel has been provided by the
+application then Berkeley DB will use it as its connection and the <b>host</b> and
+<b>cl_timeout</b> fields are ignored.
+<a name="3"><!--meow--></a>
+<p>The <b>cl_timeout</b> argument specifies the number of seconds the client
+should wait for results to come back from the server. Once the timeout
+has expired on any communication with the server, DB_NOSERVER will
+be returned. If this value is zero, a default timeout is used.
+<a name="4"><!--meow--></a>
+<p>The <b>sv_timeout</b> argument specifies the number of seconds the server
+should allow a client connection to remain idle before assuming that the
+client is gone. Once that timeout has been reached, the server releases
+all resources associated with that client connection. Subsequent attempts
+by that client to communicate with the server result in
+DB_NOSERVER_ID, indicating that an invalid identifier has been
+given to the server. This value can be considered a hint to the server.
+The server may alter this value based on its own policies or allowed
+values. If this value is zero, a default timeout is used.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>When the DB_ENV-&gt;set_rpc_server method has been called, subsequent calls
+to Berkeley DB library interfaces may return or throw exceptions encapsulating
+<a name="DB_NOSERVER">DB_NOSERVER</a>, <a name="DB_NOSERVER_ID">DB_NOSERVER_ID</a>, or
+<a name="DB_NOSERVER_HOME">DB_NOSERVER_HOME</a>.
+<p>The DB_ENV-&gt;set_rpc_server method configures operations performed using the specified
+<a href="../api_c/env_class.html">DB_ENV</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DB_ENV-&gt;set_rpc_server interface may not be called after the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>
+interface is called.
+<p>The DB_ENV-&gt;set_rpc_server method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_rpc_server method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB_ENV-&gt;set_rpc_server method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_rpc_server method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_shm_key.html b/libdb/docs/api_c/env_set_shm_key.html
new file mode 100644
index 0000000..62fa5e0
--- /dev/null
+++ b/libdb/docs/api_c/env_set_shm_key.html
@@ -0,0 +1,87 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_shm_key</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_shm_key</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_shm_key(DB_ENV *dbenv, long shm_key);
+</pre></h3>
+<h1>Description</h1>
+<p>Specify a base segment ID for Berkeley DB environment shared memory regions
+created in system memory on VxWorks or systems supporting X/Open-style
+shared memory interfaces; for example, UNIX systems supporting
+<b>shmget</b>(2) and related System V IPC interfaces.
+<p>This base segment ID will be used when Berkeley DB shared memory regions are
+first created. It will be incremented a small integer value each time
+a new shared memory region is created; that is, if the base ID is 35,
+the first shared memory region created will have a segment ID of 35,
+and the next one will have a segment ID between 36 and 40 or so. A
+Berkeley DB environment always creates a master shared memory region; an
+additional shared memory region for each of the subsystems supported by
+the environment (Locking, Logging, Memory Pool and Transaction); plus
+an additional shared memory region for each additional memory pool cache
+that is supported. Already existing regions with the same segment IDs
+will be removed. See <a href="../ref/env/region.html">Shared Memory
+Regions</a> for more information.
+<p>The intent behind this interface is two-fold: without it, applications
+have no way to ensure that two Berkeley DB applications don't attempt to use
+the same segment IDs when creating different Berkeley DB environments. In
+addition, by using the same segment IDs each time the environment is
+created, previously created segments will be removed, and the set of
+segments on the system will not grow without bound.
+<p>The DB_ENV-&gt;set_shm_key method configures operations performed using the specified
+<a href="../api_c/env_class.html">DB_ENV</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DB_ENV-&gt;set_shm_key interface may not be called after the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_c/env_open.html">DB_ENV-&gt;open</a> is called, the information specified to DB_ENV-&gt;set_shm_key
+must be consistent with the existing environment or corruption can
+occur.
+<p>The DB_ENV-&gt;set_shm_key method returns a non-zero error value on failure and 0 on success.
+<p>The database environment's base segment ID may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_shm_key", one or more whitespace characters,
+and the ID. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_shm_key method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> was called.
+</dl>
+<p>The DB_ENV-&gt;set_shm_key method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_shm_key method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_tas_spins.html b/libdb/docs/api_c/env_set_tas_spins.html
new file mode 100644
index 0000000..0ea1c7c
--- /dev/null
+++ b/libdb/docs/api_c/env_set_tas_spins.html
@@ -0,0 +1,63 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_tas_spins</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_tas_spins</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_tas_spins(DB_ENV *dbenv, u_int32_t tas_spins);
+</pre></h3>
+<h1>Description</h1>
+<p>Specify that test-and-set mutexes should spin <b>tas_spins</b> times
+without blocking. The value defaults to 1 on uniprocessor systems and
+to 50 times the number of processors on multiprocessor systems.
+<p>The DB_ENV-&gt;set_tas_spins method configures operations performed using the specified
+<a href="../api_c/env_class.html">DB_ENV</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DB_ENV-&gt;set_tas_spins interface may be called at any time during the life of
+the application.
+<p>The DB_ENV-&gt;set_tas_spins method returns a non-zero error value on failure and 0 on success.
+<p>The database environment's test-and-set spin count may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_tas_spins", one or more whitespace characters,
+and the number of spins. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_tas_spins method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB_ENV-&gt;set_tas_spins method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_tas_spins method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_timeout.html b/libdb/docs/api_c/env_set_timeout.html
new file mode 100644
index 0000000..522563a
--- /dev/null
+++ b/libdb/docs/api_c/env_set_timeout.html
@@ -0,0 +1,87 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_timeout</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_timeout</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_timeout(DB_ENV *dbenv, db_timeout_t timeout, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;set_timeout method sets timeout values for locks or
+transactions in the database environment. The timeout value is
+currently specified as an unsigned 32-bit number of microseconds,
+limiting the maximum timeout to roughly 71 minutes.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_SET_LOCK_TIMEOUT">DB_SET_LOCK_TIMEOUT</a><dd>Set the timeout value for locks in this database environment.
+<p>The database environment's transaction timeout value may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_txn_timeout", one or more whitespace characters,
+and the transaction timeout value. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<p><dt><a name="DB_SET_TXN_TIMEOUT">DB_SET_TXN_TIMEOUT</a><dd>Set the timeout value for transactions in this database environment.
+<p>The database environment's lock timeout value may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lock_timeout", one or more whitespace characters,
+and the lock timeout value. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+</dl>
+<p>Timeouts are checked whenever a thread of control blocks on a lock or
+when deadlock detection is performed. (In the case of
+DB_SET_LOCK_TIMEOUT, the lock is one requested explicitly
+through the Lock subsystem interfaces. In the case of
+DB_SET_TXN_TIMEOUT, the lock is one requested on behalf of a
+transaction. In either case, it may be a lock requested by the database
+access methods underlying the application.) As timeouts are only
+checked when the lock request first blocks or when deadlock detection
+is performed, the accuracy of the timeout depends on how often deadlock
+detection is performed.
+<p>Timeout values specified for the database environment may be overridden
+on a per-lock or per-transaction basis. See <a href="../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a> and
+<a href="../api_c/txn_set_timeout.html">DB_TXN-&gt;set_timeout</a> for more information.
+<p>The DB_ENV-&gt;set_timeout method configures a database environment, not only operations
+performed using the specified <a href="../api_c/env_class.html">DB_ENV</a> handle.
+<p>The DB_ENV-&gt;set_timeout interface may be called at any time during the life of
+the application.
+<p>The DB_ENV-&gt;set_timeout method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_timeout method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB_ENV-&gt;set_timeout method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_timeout method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_tmp_dir.html b/libdb/docs/api_c/env_set_tmp_dir.html
new file mode 100644
index 0000000..4053129
--- /dev/null
+++ b/libdb/docs/api_c/env_set_tmp_dir.html
@@ -0,0 +1,90 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_tmp_dir</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_tmp_dir</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_tmp_dir(DB_ENV *dbenv, const char *dir);
+</pre></h3>
+<h1>Description</h1>
+<a name="3"><!--meow--></a>
+<p>The path of a directory to be used as the location of temporary files.
+The files created to back in-memory access method databases will be
+created relative to this path. These temporary files can be quite large,
+depending on the size of the database.
+<p>If no directories are specified, the following alternatives are checked
+in the specified order. The first existing directory path is used for
+all temporary files.
+<p><ol>
+<p><li>The value of the environment variable <b>TMPDIR</b>.
+<li>The value of the environment variable <b>TEMP</b>.
+<li>The value of the environment variable <b>TMP</b>.
+<li>The value of the environment variable <b>TempFolder</b>.
+<li>The value returned by the GetTempPath interface.
+<li>The directory <b>/var/tmp</b>.
+<li>The directory <b>/usr/tmp</b>.
+<li>The directory <b>/temp</b>.
+<li>The directory <b>/tmp</b>.
+<li>The directory <b>C:/temp</b>.
+<li>The directory <b>C:/tmp</b>.
+</ol>
+<p>Note: environment variables are only checked if one of the
+<a href="../api_c/env_open.html#DB_USE_ENVIRON">DB_USE_ENVIRON</a> or <a href="../api_c/env_open.html#DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a> flags were
+specified.
+<p>Note: the GetTempPath interface is only checked on Win/32 platforms.
+<p>The DB_ENV-&gt;set_tmp_dir method configures operations performed using the specified
+<a href="../api_c/env_class.html">DB_ENV</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DB_ENV-&gt;set_tmp_dir interface may not be called after the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_c/env_open.html">DB_ENV-&gt;open</a> is called, the information specified to DB_ENV-&gt;set_tmp_dir
+must be consistent with the existing environment or corruption can
+occur.
+<p>The DB_ENV-&gt;set_tmp_dir method returns a non-zero error value on failure and 0 on success.
+<p>The database environment's temporary file directory may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_tmp_dir", one or more whitespace characters,
+and the directory name. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_tmp_dir method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> was called.
+</dl>
+<p>The DB_ENV-&gt;set_tmp_dir method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_tmp_dir method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_tx_max.html b/libdb/docs/api_c/env_set_tx_max.html
new file mode 100644
index 0000000..f48f1c9
--- /dev/null
+++ b/libdb/docs/api_c/env_set_tx_max.html
@@ -0,0 +1,70 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_tx_max</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_tx_max</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_tx_max(DB_ENV *dbenv, u_int32_t tx_max);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of active transactions that are supported by the
+environment. This value bounds the size of backing shared memory regions.
+Note that child transactions must be counted as active until their
+ultimate parent commits or aborts.
+<p>When there are more than the specified number of concurrent transactions,
+calls to <a href="../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a> will fail (until some active transactions
+complete). If no value is specified, a default value of 20 is used.
+<p>The DB_ENV-&gt;set_tx_max method configures a database environment, not only operations
+performed using the specified <a href="../api_c/env_class.html">DB_ENV</a> handle.
+<p>The DB_ENV-&gt;set_tx_max interface may not be called after the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_c/env_open.html">DB_ENV-&gt;open</a> is called, the information specified to DB_ENV-&gt;set_tx_max
+will be ignored.
+<p>The DB_ENV-&gt;set_tx_max method returns a non-zero error value on failure and 0 on success.
+<p>The database environment's maximum number of active transactions may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_tx_max", one or more whitespace characters,
+and the number of transactions. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_tx_max method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> was called.
+</dl>
+<p>The DB_ENV-&gt;set_tx_max method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_tx_max method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/txn_class.html">DB_TXN</a>
+<h1>See Also</h1>
+<a href="../api_c/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_tx_timestamp.html b/libdb/docs/api_c/env_set_tx_timestamp.html
new file mode 100644
index 0000000..6702377
--- /dev/null
+++ b/libdb/docs/api_c/env_set_tx_timestamp.html
@@ -0,0 +1,64 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_tx_timestamp</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_tx_timestamp</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_tx_timestamp(DB_ENV *dbenv, time_t *timestamp);
+</pre></h3>
+<h1>Description</h1>
+<p>Recover to the time specified by <b>timestamp</b> rather than to the most
+current possible date.
+The <b>timestamp</b> argument should be the number of seconds since 0
+hours, 0 minutes, 0 seconds, January 1, 1970, Coordinated Universal
+Time; that is, the Epoch.
+<p>Once a database environment has been upgraded to a new version of Berkeley DB
+involving a log format change (see <a href="../ref/upgrade/process.html">Upgrading Berkeley DB installations</a>), it is no longer possible to recover
+to a specific time before that upgrade.
+<p>The DB_ENV-&gt;set_tx_timestamp method configures operations performed using the specified
+<a href="../api_c/env_class.html">DB_ENV</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DB_ENV-&gt;set_tx_timestamp interface may not be called after the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>
+interface is called.
+<p>The DB_ENV-&gt;set_tx_timestamp method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_tx_timestamp method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>It is not possible to recover to the specified time using the log files
+currently present in the environment.
+</dl>
+<p>The DB_ENV-&gt;set_tx_timestamp method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_tx_timestamp method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/txn_class.html">DB_TXN</a>
+<h1>See Also</h1>
+<a href="../api_c/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_set_verbose.html b/libdb/docs/api_c/env_set_verbose.html
new file mode 100644
index 0000000..518d398
--- /dev/null
+++ b/libdb/docs/api_c/env_set_verbose.html
@@ -0,0 +1,75 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_verbose</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_verbose</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_verbose(DB_ENV *dbenv, u_int32_t which, int onoff);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;set_verbose method turns additional informational and
+debugging messages in the Berkeley DB message output on and off. If
+<b>onoff</b> is set to
+non-zero,
+the additional messages are output.
+<p>The <b>which</b> parameter must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_VERB_CHKPOINT">DB_VERB_CHKPOINT</a><dd>Display checkpoint location information when searching the log for
+checkpoints.
+<p><dt><a name="DB_VERB_DEADLOCK">DB_VERB_DEADLOCK</a><dd>Display additional information when doing deadlock detection.
+<p><dt><a name="DB_VERB_RECOVERY">DB_VERB_RECOVERY</a><dd>Display additional information when performing recovery.
+<p><dt><a name="DB_VERB_REPLICATION">DB_VERB_REPLICATION</a><dd>Display additional information when processing replication messages.
+<p><dt><a name="DB_VERB_WAITSFOR">DB_VERB_WAITSFOR</a><dd>Display the waits-for table when doing deadlock detection.
+</dl>
+<p>The DB_ENV-&gt;set_verbose method configures operations performed using the specified
+<a href="../api_c/env_class.html">DB_ENV</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DB_ENV-&gt;set_verbose interface may be called at any time during the life of
+the application.
+<p>The DB_ENV-&gt;set_verbose method returns a non-zero error value on failure and 0 on success.
+<p>The database environment's verbosity may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_verbose", one or more whitespace characters,
+and the interface <b>which</b> argument as a string; for example,
+"set_verbose DB_VERB_CHKPOINT". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_verbose method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB_ENV-&gt;set_verbose method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_verbose method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_strerror.html b/libdb/docs/api_c/env_strerror.html
new file mode 100644
index 0000000..77a74c0
--- /dev/null
+++ b/libdb/docs/api_c/env_strerror.html
@@ -0,0 +1,48 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_strerror</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_strerror</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+char *
+db_strerror(int error);
+</pre></h3>
+<h1>Description</h1>
+<p>The db_strerror method returns an error message string corresponding
+to the error number <b>error</b>. This interface is a superset of the
+ANSI C X3.159-1989 (ANSI C) <b>strerror</b>(3) interface. If the error number
+<b>error</b> is greater than or equal to 0, then the string returned by
+the system interface <b>strerror</b>(3) is returned. If the error
+number is less than 0, an error string appropriate to the corresponding
+Berkeley DB library error is returned. See
+<a href="../ref/program/errorret.html">Error returns to applications</a>
+for more information.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/env_version.html b/libdb/docs/api_c/env_version.html
new file mode 100644
index 0000000..e176b03
--- /dev/null
+++ b/libdb/docs/api_c/env_version.html
@@ -0,0 +1,46 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_version</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_version</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+char *
+db_version(int *major, int *minor, int *patch);
+</pre></h3>
+<h1>Description</h1>
+<p>The db_version method returns a pointer to a string containing
+Berkeley DB version information. If <b>major</b> is non-NULL, the major
+version of the Berkeley DB release is stored in the memory to which it refers.
+If <b>minor</b> is non-NULL, the minor version of the Berkeley DB release
+is stored in the memory to which it refers. If <b>patch</b> is
+non-NULL, the patch version of the Berkeley DB release is stored in the
+memory to which it refers.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/hsearch.html b/libdb/docs/api_c/hsearch.html
new file mode 100644
index 0000000..7ff3e6d
--- /dev/null
+++ b/libdb/docs/api_c/hsearch.html
@@ -0,0 +1,106 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: hsearch</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>hsearch</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#define DB_DBM_HSEARCH 1
+#include &lt;db.h&gt;
+<p>
+typedef enum {
+ FIND, ENTER
+} ACTION;
+<p>
+typedef struct entry {
+ char *key;
+ void *data;
+} ENTRY;
+<p>
+ENTRY *
+hsearch(ENTRY item, ACTION action);
+<p>
+int
+hcreate(size_t nelem);
+<p>
+void
+hdestroy(void);
+</pre></h3>
+<h1>Description</h1>
+<p>The hsearch interface to the Berkeley DB library is intended to
+provide a high-performance implementation and source code compatibility
+for applications written to the historic hsearch interface.
+It is not recommended for any other purpose.
+<p>To compile hsearch applications, replace the application's
+<b>#include</b> of the hsearch include
+file (for example, <b>#include &lt;search.h&gt;</b>)
+with the following two lines:
+<p><blockquote><pre>#define DB_DBM_HSEARCH 1
+#include &lt;db.h&gt;</pre></blockquote>
+<p>and recompile.
+<p>The hcreate function creates an in-memory database. The
+<b>nelem</b> argument is an estimation of the maximum number of key/data
+pairs that will be stored in the database.
+<p>The <b>hdestroy</b> function discards the database.
+<p>Database elements are structures of type <b>ENTRY</b>, which contain at
+least two fields: <b>key</b> and <b>data</b>. The field <b>key</b> is
+declared to be of type <b>char *</b>, and is the key used for storage
+and retrieval. The field <b>data</b> is declared to be of type
+<b>void *</b>, and is its associated data.
+<p>The hsearch function retrieves key/data pairs from, and stores
+key/data pairs into the database.
+<p>The <b>action</b> argument must be set to one of two values:
+<p><dl compact>
+<p><dt>ENTER<dd>If the key does not already appear in the database, insert the key/data
+pair into the database. If the key already appears in the database,
+return a reference to an <b>ENTRY</b> structure which refers to the
+existing key and its associated data element.
+<p><dt>FIND<dd>Retrieve the specified key/data pair from the database.
+</dl>
+<h3>Compatibility Notes</h3>
+<p>Historically, hsearch required applications to maintain the keys
+and data in the application's memory for as long as the <b>hsearch</b>
+database existed. Because Berkeley DB handles key and data management
+internally, there is no requirement that applications maintain local
+copies of key and data items, although the only effect of doing so
+should be the allocation of additional memory.
+<h3>Hsearch Diagnostics</h3>
+<p>The <b>hcreate</b> function returns 0 on failure, setting
+<b>errno</b>, and non-zero on success.
+<p>The <b>hsearch</b> function returns a pointer to an ENTRY structure on
+success, and NULL, setting <b>errno</b>, if the <b>action</b>
+specified was FIND and the item did not appear in the database.
+<h1>Errors</h1>
+<p>The hcreate method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the hcreate method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<p>The hsearch method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the hsearch method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<p>In addition, the <b>hsearch</b> function will fail, setting <b>errno</b>
+to 0, if the <b>action</b> specified was FIND and the item did not appear in
+the database.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/lock_class.html b/libdb/docs/api_c/lock_class.html
new file mode 100644
index 0000000..81903c2
--- /dev/null
+++ b/libdb/docs/api_c/lock_class.html
@@ -0,0 +1,41 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_LOCK</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_LOCK</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+typedef struct __db_lock_u DB_LOCK;
+</pre></h3>
+<h1>Description</h1>
+<p>The locking interfaces for the Berkeley DB database environment are methods
+of the <a href="../api_c/env_class.html">DB_ENV</a> handle. The DB_LOCK object is the handle
+for a single lock, and has no methods of its own.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, DB_LOCK
+<h1>See Also</h1>
+<a href="../api_c/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/lock_detect.html b/libdb/docs/api_c/lock_detect.html
new file mode 100644
index 0000000..b159c9d
--- /dev/null
+++ b/libdb/docs/api_c/lock_detect.html
@@ -0,0 +1,74 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;lock_detect</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;lock_detect</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;lock_detect(DB_ENV *env,
+ u_int32_t flags, u_int32_t atype, int *aborted);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;lock_detect method runs one iteration of the deadlock detector.
+The deadlock detector traverses the lock table and marks one of the
+participating lock requesters for rejection in each deadlock it finds.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The <b>atype</b> parameter specifies which lock request(s) to reject.
+It must be set to one following list:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_DEFAULT">DB_LOCK_DEFAULT</a><dd>Use whatever lock policy was specified when the database environment
+was created. If no lock policy has yet been specified, set the lock
+policy to DB_LOCK_RANDOM.
+<dt><a name="DB_LOCK_EXPIRE">DB_LOCK_EXPIRE</a><dd>Reject lock requests which have timed out. No other deadlock detection
+is performed.
+<dt><a name="DB_LOCK_MAXLOCKS">DB_LOCK_MAXLOCKS</a><dd>Reject the lock request for the locker ID with the greatest number of
+locks.
+<dt><a name="DB_LOCK_MINLOCKS">DB_LOCK_MINLOCKS</a><dd>Reject the lock request for the locker ID with the fewest number of
+locks.
+<dt><a name="DB_LOCK_MINWRITE">DB_LOCK_MINWRITE</a><dd>Reject the lock request for the locker ID with the fewest number of
+write locks.
+<dt><a name="DB_LOCK_OLDEST">DB_LOCK_OLDEST</a><dd>Reject the lock request for the oldest locker ID.
+<dt><a name="DB_LOCK_RANDOM">DB_LOCK_RANDOM</a><dd>Reject the lock request for a random locker ID.
+<dt><a name="DB_LOCK_YOUNGEST">DB_LOCK_YOUNGEST</a><dd>Reject the lock request for the youngest locker ID.
+</dl>
+<p>If the <b>aborted</b> parameter is non-NULL, the memory location to
+which it refers will be set to the number of lock requests that were
+rejected.
+<p>The DB_ENV-&gt;lock_detect method is the underlying interface used by the <a href="../utility/db_deadlock.html">db_deadlock</a> utility.
+See the <a href="../utility/db_deadlock.html">db_deadlock</a> utility source code for an example of using DB_ENV-&gt;lock_detect
+in a IEEE/ANSI Std 1003.1 (POSIX) environment.
+<p>The DB_ENV-&gt;lock_detect method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;lock_detect method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;lock_detect method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/lock_class.html">DB_LOCK</a>
+<h1>See Also</h1>
+<a href="../api_c/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/lock_get.html b/libdb/docs/api_c/lock_get.html
new file mode 100644
index 0000000..026a512
--- /dev/null
+++ b/libdb/docs/api_c/lock_get.html
@@ -0,0 +1,90 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;lock_get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;lock_get</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;lock_get(DB_ENV *env, u_int32_t locker,
+ u_int32_t flags, const DBT *obj,
+ const db_lockmode_t lock_mode, DB_LOCK *lock);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;lock_get method acquires a lock from the lock table, returning
+information about it in
+the <b>lock</b> argument.
+<p>The <b>locker</b> argument specified to DB_ENV-&gt;lock_get is an unsigned
+32-bit integer quantity. It represents the entity requesting or releasing
+the lock.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a><dd>If a lock cannot be granted because the requested lock conflicts with an
+existing lock,
+return DB_LOCK_NOTGRANTED immediately instead of waiting
+for the lock to become available.
+</dl>
+<p>The <b>obj</b> argument is an untyped byte string that specifies the
+object to be locked or released. Applications using the locking
+subsystem directly while also doing locking via the Berkeley DB access methods
+must take care not to inadvertently lock objects that happen to be equal
+to the unique file IDs used to lock files. See
+<a href="../ref/lock/am_conv.html">Access method locking conventions</a>
+for more information.
+<p>The <b>mode</b> argument is used as an index into the environment's
+lock conflict matrix. When using the default lock conflict matrix,
+<b>mode</b> must be set to one of the following values:
+<p><dl compact>
+<dt>DB_LOCK_READ<dd>read (shared)
+<dt>DB_LOCK_WRITE<dd>write (exclusive)
+<dt>DB_LOCK_IWRITE<dd>intention to write (shared)
+<dt>DB_LOCK_IREAD<dd>intention to read (shared)
+<dt>DB_LOCK_IWR<dd>intention to read and write (shared)
+</dl>
+<p>See <a href="../api_c/env_set_lk_conflicts.html">DB_ENV-&gt;set_lk_conflicts</a> and <a href="../ref/lock/stdmode.html">Standard Lock Modes</a> for more information on the lock conflict matrix.
+<p>
+Otherwise, the DB_ENV-&gt;lock_get method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;lock_get method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>The maximum number of locks has been reached.
+</dl>
+<p>The DB_ENV-&gt;lock_get method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;lock_get method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/lock_class.html">DB_LOCK</a>
+<h1>See Also</h1>
+<a href="../api_c/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/lock_id.html b/libdb/docs/api_c/lock_id.html
new file mode 100644
index 0000000..5079c73
--- /dev/null
+++ b/libdb/docs/api_c/lock_id.html
@@ -0,0 +1,50 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;lock_id</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;lock_id</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;lock_id(DB_ENV *env, u_int32_t *idp);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;lock_id method
+copies a locker ID, which is guaranteed to be unique in the specified lock
+table, into the memory location to which <b>idp</b> refers.
+<p>The <a href="../api_c/lock_id_free.html">DB_ENV-&gt;lock_id_free</a> method should be called to return the locker ID to
+the Berkeley DB library when it is no longer needed.
+<p>The DB_ENV-&gt;lock_id method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;lock_id method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;lock_id method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/lock_class.html">DB_LOCK</a>
+<h1>See Also</h1>
+<a href="../api_c/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/lock_id_free.html b/libdb/docs/api_c/lock_id_free.html
new file mode 100644
index 0000000..738e052
--- /dev/null
+++ b/libdb/docs/api_c/lock_id_free.html
@@ -0,0 +1,51 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;lock_id_free</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;lock_id_free</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;lock_id_free(DB_ENV *env, u_int32_t id);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;lock_id_free method frees a locker ID allocated by the
+<a href="../api_c/lock_id.html">DB_ENV-&gt;lock_id</a> method.
+<p>The DB_ENV-&gt;lock_id_free method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The locker ID is invalid or locks are still held by this locker ID.
+</dl>
+<p>The DB_ENV-&gt;lock_id_free method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;lock_id_free method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/lock_class.html">DB_LOCK</a>
+<h1>See Also</h1>
+<a href="../api_c/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/lock_list.html b/libdb/docs/api_c/lock_list.html
new file mode 100644
index 0000000..22734cf
--- /dev/null
+++ b/libdb/docs/api_c/lock_list.html
@@ -0,0 +1,32 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Locking Subsystem and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Locking Subsystem and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Locking Subsystem and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_c/env_set_lk_conflicts.html">DB_ENV-&gt;set_lk_conflicts</a></td><td>Set lock conflicts matrix</td></tr>
+<tr><td><a href="../api_c/env_set_lk_detect.html">DB_ENV-&gt;set_lk_detect</a></td><td>Set automatic deadlock detection</td></tr>
+<tr><td><a href="../api_c/env_set_lk_max_lockers.html">DB_ENV-&gt;set_lk_max_lockers</a></td><td>Set maximum number of lockers</td></tr>
+<tr><td><a href="../api_c/env_set_lk_max_locks.html">DB_ENV-&gt;set_lk_max_locks</a></td><td>Set maximum number of locks</td></tr>
+<tr><td><a href="../api_c/env_set_lk_max_objects.html">DB_ENV-&gt;set_lk_max_objects</a></td><td>Set maximum number of lock objects</td></tr>
+<tr><td><a href="../api_c/env_set_timeout.html">DB_ENV-&gt;set_timeout</a></td><td>Set lock and transaction timeout</td></tr>
+<tr><td><a href="../api_c/lock_detect.html">DB_ENV-&gt;lock_detect</a></td><td>Perform deadlock detection</td></tr>
+<tr><td><a href="../api_c/lock_get.html">DB_ENV-&gt;lock_get</a></td><td>Acquire a lock</td></tr>
+<tr><td><a href="../api_c/lock_id.html">DB_ENV-&gt;lock_id</a></td><td>Acquire a locker ID</td></tr>
+<tr><td><a href="../api_c/lock_id_free.html">DB_ENV-&gt;lock_id_free</a></td><td>Release a locker ID</td></tr>
+<tr><td><a href="../api_c/lock_put.html">DB_ENV-&gt;lock_put</a></td><td>Release a lock</td></tr>
+<tr><td><a href="../api_c/lock_stat.html">DB_ENV-&gt;lock_stat</a></td><td>Return lock subsystem statistics</td></tr>
+<tr><td><a href="../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a></td><td>Acquire/release locks</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/lock_put.html b/libdb/docs/api_c/lock_put.html
new file mode 100644
index 0000000..ec853e2
--- /dev/null
+++ b/libdb/docs/api_c/lock_put.html
@@ -0,0 +1,50 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;lock_put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;lock_put</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;lock_put(DB_ENV *env, DB_LOCK *lock);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;lock_put method releases <b>lock</b> from the lock table.
+<p>The DB_ENV-&gt;lock_put method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;lock_put method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB_ENV-&gt;lock_put method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;lock_put method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/lock_class.html">DB_LOCK</a>
+<h1>See Also</h1>
+<a href="../api_c/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/lock_stat.html b/libdb/docs/api_c/lock_stat.html
new file mode 100644
index 0000000..8cea1b6
--- /dev/null
+++ b/libdb/docs/api_c/lock_stat.html
@@ -0,0 +1,93 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;lock_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;lock_stat</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;lock_stat(DB_ENV *env, DB_LOCK_STAT **statp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;lock_stat method returns the locking subsystem statistics.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_STAT_CLEAR">DB_STAT_CLEAR</a><dd>Reset statistics after returning their values.
+</dl>
+<p>The DB_ENV-&gt;lock_stat method creates a statistical structure of type
+DB_LOCK_STAT and copies a pointer to it into a user-specified memory
+location.
+<p>Statistical structures are created in allocated memory. If application-specific allocation
+routines have been declared (see <a href="../api_c/env_set_alloc.html">DB_ENV-&gt;set_alloc</a> for more
+information), they are used to allocate the memory; otherwise, the
+library <b>malloc</b>(3) interface is used. The caller is
+responsible for deallocating the memory. To deallocate the memory, free
+the memory reference; references inside the returned memory need not be
+individually freed.
+<p>The following DB_LOCK_STAT fields will be filled in:
+<p><dl compact>
+<dt>u_int32_t st_id;<dd>The last allocated locker ID.
+<dt>u_int32_t st_cur_maxid;<dd>The current maximum unused locker ID.
+<dt>u_int32_t st_nmodes;<dd>The number of lock modes.
+<dt>u_int32_t st_maxlocks;<dd>The maximum number of locks possible.
+<dt>u_int32_t st_maxlockers;<dd>The maximum number of lockers possible.
+<dt>u_int32_t st_maxobjects;<dd>The maximum number of lock objects possible.
+<dt>u_int32_t st_nlocks;<dd>The number of current locks.
+<dt>u_int32_t st_maxnlocks;<dd>The maximum number of locks at any one time.
+<dt>u_int32_t st_nlockers;<dd>The number of current lockers.
+<dt>u_int32_t st_maxnlockers;<dd>The maximum number of lockers at any one time.
+<dt>u_int32_t st_nobjects;<dd>The number of current lock objects.
+<dt>u_int32_t st_maxnobjects;<dd>The maximum number of lock objects at any one time.
+<dt>u_int32_t st_nrequests;<dd>The total number of locks requested.
+<dt>u_int32_t st_nreleases;<dd>The total number of locks released.
+<dt>u_int32_t st_nnowaits;<dd>The total number of lock requests failing because
+<a href="../api_c/lock_vec.html#DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a> was set.
+<dt>u_int32_t st_nconflicts;<dd>The total number of locks not immediately available due to conflicts.
+<dt>u_int32_t st_ndeadlocks;<dd>The number of deadlocks.
+<dt>u_int32_t st_locktimeout;<dd>Lock timeout value.
+<dt>u_int32_t st_nlocktimeouts;<dd>The number of locks that have timed out.
+<dt>u_int32_t st_txntimeout;<dd>Transaction timeout value.
+<dt>u_int32_t st_ntxntimeouts;<dd>The number of transactions that have timed out. This value is also a
+component of <b>st_ndeadlocks</b>, the total number of deadlocks
+detected.
+<dt>u_int32_t st_regsize;<dd>The size of the lock region.
+<dt>u_int32_t st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining the region lock.
+<dt>u_int32_t st_region_nowait;<dd>The number of times that a thread of control was able to obtain
+the region lock without waiting.
+</dl>
+<p>The DB_ENV-&gt;lock_stat method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;lock_stat method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;lock_stat method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/lock_class.html">DB_LOCK</a>
+<h1>See Also</h1>
+<a href="../api_c/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/lock_vec.html b/libdb/docs/api_c/lock_vec.html
new file mode 100644
index 0000000..7169598
--- /dev/null
+++ b/libdb/docs/api_c/lock_vec.html
@@ -0,0 +1,145 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;lock_vec</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;lock_vec</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;lock_vec(DB_ENV *env, u_int32_t locker, u_int32_t flags,
+ DB_LOCKREQ list[], int nlist, DB_LOCKREQ **elistp);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;lock_vec method atomically obtains and releases one or more locks
+from the lock table. The DB_ENV-&gt;lock_vec method is intended to support
+acquisition or trading of multiple locks under one lock table semaphore,
+as is needed for lock coupling or in multigranularity locking for lock
+escalation.
+<p>The <b>locker</b> argument specified to DB_ENV-&gt;lock_vec is an unsigned
+32-bit integer quantity. It represents the entity requesting or releasing
+the locks.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a><dd>If a lock cannot be granted because the requested lock conflicts with
+an existing lock,
+return DB_LOCK_NOTGRANTED immediately instead of waiting
+for the lock to become available. In this case, if non-NULL,
+<b>elistp</b> identifies the request that was not granted.
+</dl>
+<p>The <b>list</b> array provided to DB_ENV-&gt;lock_vec is typedef'd as
+DB_LOCKREQ. A DB_LOCKREQ structure has at least the following fields.
+In order to ensure compatibility with future releases of Berkeley DB, all
+fields of the DB_LOCKREQ structure that are not explicitly set should
+be initialized to 0 before the first time the structure is used. Do
+this by declaring the structure external or static, or by calling
+<b>memset</b>(3).
+<p><dl compact>
+<p><dt>lockop_t <a name="op">op</a>;<dd>The operation to be performed, which must be set to one of the
+following values:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_GET">DB_LOCK_GET</a><dd>Get the lock defined by the values of the <b>mode</b> and <b>obj</b>
+structure fields, for the specified <b>locker</b>. Upon return from
+DB_ENV-&gt;lock_vec, if the <b>lock</b> field is non-NULL, a reference
+to the acquired lock is stored there. (This reference is invalidated
+by any call to DB_ENV-&gt;lock_vec or <a href="../api_c/lock_put.html">DB_ENV-&gt;lock_put</a> that releases the
+lock.)
+<p><dt><a name="DB_LOCK_GET_TIMEOUT">DB_LOCK_GET_TIMEOUT</a><dd>Identical to DB_LOCK_GET except that the value in the <b>timeout</b>
+structure field overrides any previously specified timeout value for
+this lock. A value of 0 turns off any previously specified timeout.
+<p><dt><a name="DB_LOCK_PUT">DB_LOCK_PUT</a><dd>The lock to which the <b>lock</b> structure field refers is released.
+The <b>locker</b> argument, and <b>mode</b> and <b>obj</b> fields
+are ignored.
+<p><dt><a name="DB_LOCK_PUT_ALL">DB_LOCK_PUT_ALL</a><dd>All locks held by the specified <b>locker</b> are released. The
+<b>lock</b>, <b>mode</b>, and <b>obj</b> structure fields are
+ignored. Locks acquired in operations performed by the current call to
+DB_ENV-&gt;lock_vec which appear before the DB_LOCK_PUT_ALL
+operation are released; those acquired in operations appearing after
+the DB_LOCK_PUT_ALL operation are not released.
+<p><dt><a name="DB_LOCK_PUT_OBJ">DB_LOCK_PUT_OBJ</a><dd>All locks held on the object <b>obj</b> are released. The
+<b>locker</b> argument and the <b>lock</b> and <b>mode</b> structure
+fields are ignored. Locks acquired in operations performed by the
+current call to DB_ENV-&gt;lock_vec that appear before the
+DB_LOCK_PUT_OBJ operation are released; those acquired in
+operations appearing after the DB_LOCK_PUT_OBJ operation are
+not released.
+<p><dt><a name="DB_LOCK_TIMEOUT">DB_LOCK_TIMEOUT</a><dd>Cause the specified <b>locker</b> to timeout immediately. If the
+database environment has not configured automatic deadlock detection,
+the transaction will timeout the next time deadlock detection is
+performed. As transactions acquire locks on behalf of a single locker
+ID, timing out the locker ID associated with a transaction will time
+out the transaction itself.
+</dl>
+<p><dt>DB_LOCK <a name="lock">lock</a>;<dd>A lock reference.
+<p><dt>const lockmode_t <a name="mode">mode</a>;<dd>The lock mode, used as an index into the environment's lock conflict matrix.
+When using the default lock conflict matrix, <b>mode</b> must be set to one
+of the following values:
+<p><dl compact>
+<dt><a name="DB_LOCK_READ">DB_LOCK_READ</a><dd>read (shared)
+<dt><a name="DB_LOCK_WRITE">DB_LOCK_WRITE</a><dd>write (exclusive)
+<dt><a name="DB_LOCK_IWRITE">DB_LOCK_IWRITE</a><dd>intention to write (shared)
+<dt><a name="DB_LOCK_IREAD">DB_LOCK_IREAD</a><dd>intention to read (shared)
+<dt><a name="DB_LOCK_IWR">DB_LOCK_IWR</a><dd>intention to read and write (shared)
+</dl>
+<p>See <a href="../api_c/env_set_lk_conflicts.html">DB_ENV-&gt;set_lk_conflicts</a> and <a href="../ref/lock/stdmode.html">Standard Lock Modes</a> for more information on the lock conflict matrix.
+<p><dt>const DBT <a name="obj">obj</a>;<dd>An untyped byte string that specifies the object to be locked or
+released. Applications using the locking subsystem directly while also
+doing locking via the Berkeley DB access methods must take care not to
+inadvertently lock objects that happen to be equal to the unique file
+IDs used to lock files. See <a href="../ref/lock/am_conv.html">Access
+method locking conventions</a> for more information.
+<p><dt>u_int32_t timeout;<dd>The lock timeout value.
+</dl>
+<p>The <b>nlist</b> argument specifies the number of elements in the
+<b>list</b> array.
+<p>If any of the requested locks cannot be acquired, or any of the locks to
+be released cannot be released, the operations before the failing
+operation are guaranteed to have completed successfully, and
+DB_ENV-&gt;lock_vec returns a non-zero value. In addition, if <b>elistp</b>
+is not NULL, it is set to point to the DB_LOCKREQ entry that was being
+processed when the error occurred.
+<p>
+Otherwise, the DB_ENV-&gt;lock_vec method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;lock_vec method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_LOCK_DEADLOCK<dd>The operation was selected to resolve a deadlock.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>The maximum number of locks has been reached.
+</dl>
+<p>The DB_ENV-&gt;lock_vec method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;lock_vec method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/lock_class.html">DB_LOCK</a>
+<h1>See Also</h1>
+<a href="../api_c/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/log_archive.html b/libdb/docs/api_c/log_archive.html
new file mode 100644
index 0000000..3d394ff
--- /dev/null
+++ b/libdb/docs/api_c/log_archive.html
@@ -0,0 +1,102 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;log_archive</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;log_archive</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;log_archive(DB_ENV *env, char *(*listp)[], u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;log_archive method
+creates a NULL-terminated array of log or database filenames, and copies
+a pointer to them into the user-specified memory location <b>listp</b>.
+<p>By default, DB_ENV-&gt;log_archive returns the names of all of the log
+files that are no longer in use (for example, that are no longer
+involved in active transactions), and that may safely be archived for
+catastrophic recovery and then removed from the system. If there are
+no filenames to return,
+the memory location to which <b>listp</b> refers will be set to NULL.
+<p>Arrays of log filenames are created in allocated memory. If application-specific allocation
+routines have been declared (see <a href="../api_c/env_set_alloc.html">DB_ENV-&gt;set_alloc</a> for more
+information), they are used to allocate the memory; otherwise, the
+library <b>malloc</b>(3) interface is used. The caller is
+responsible for deallocating the memory. To deallocate the memory, free
+the memory reference; references inside the returned memory need not be
+individually freed.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_ARCH_ABS">DB_ARCH_ABS</a><dd>All pathnames are returned as absolute pathnames, instead of relative
+to the database home directory.
+<p><dt><a name="DB_ARCH_DATA">DB_ARCH_DATA</a><dd>Return the database files that need to be archived in order to recover
+the database from catastrophic failure. If any of the database files
+have not been accessed during the lifetime of the current log files,
+DB_ENV-&gt;log_archive will not include them in this list. It is also
+possible that some of the files referred to by the log have since been
+deleted from the system.
+<p><dt><a name="DB_ARCH_LOG">DB_ARCH_LOG</a><dd>Return all the log filenames, regardless of whether or not they are in
+use.
+</dl>
+<p>The DB_ARCH_DATA and DB_ARCH_LOG flags are mutually
+exclusive.
+<p>Log cursor handles (returned by the <a href="../api_c/log_cursor.html">DB_ENV-&gt;log_cursor</a> method) may have open
+file descriptors for log files in the database environment. Also, the
+Berkeley DB interfaces to the database environment logging subsystem (for
+example, <a href="../api_c/log_put.html">DB_ENV-&gt;log_put</a> and <a href="../api_c/txn_abort.html">DB_TXN-&gt;abort</a>) may allocate log cursors
+and have open file descriptors for log files as well. On operating
+systems where filesystem related system calls (for example, rename and
+unlink on Windows/NT) can fail if a process has an open file descriptor
+for the affected file, attempting to move or remove the log files listed
+by DB_ENV-&gt;log_archive may fail. All Berkeley DB internal use of log cursors
+operates on active log files only and furthermore, is short-lived in
+nature. So, an application seeing such a failure should be restructured
+to close any open log cursors it may have, and otherwise to retry the
+operation until it succeeds. (Although the latter is not likely to be
+necessary; it is hard to imagine a reason to move or rename a log file
+in which transactions are being logged or aborted.)
+<p>See the <a href="../utility/db_archive.html">db_archive</a> manual page for more information on database
+archival procedures.
+<p>The DB_ENV-&gt;log_archive method is the underlying interface used by the <a href="../utility/db_archive.html">db_archive</a> utility.
+See the <a href="../utility/db_archive.html">db_archive</a> utility source code for an example of using DB_ENV-&gt;log_archive
+in a IEEE/ANSI Std 1003.1 (POSIX) environment.
+<p>The DB_ENV-&gt;log_archive method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;log_archive method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The log was corrupted.
+</dl>
+<p>The DB_ENV-&gt;log_archive method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;log_archive method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/logc_class.html">DB_LOGC</a>, <a href="../api_c/lsn_class.html">DB_LSN</a>
+<h1>See Also</h1>
+<a href="../api_c/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/log_compare.html b/libdb/docs/api_c/log_compare.html
new file mode 100644
index 0000000..3432b5b
--- /dev/null
+++ b/libdb/docs/api_c/log_compare.html
@@ -0,0 +1,43 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: log_compare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>log_compare</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+log_compare(const DB_LSN *lsn0, const DB_LSN *lsn1);
+</pre></h3>
+<h1>Description</h1>
+<p>The log_compare method allows the caller to compare two
+DB_LSN structures,
+returning 0 if they are equal, 1 if <b>lsn0</b> is greater than
+<b>lsn1</b>, and -1 if <b>lsn0</b> is less than <b>lsn1</b>.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/logc_class.html">DB_LOGC</a>, <a href="../api_c/lsn_class.html">DB_LSN</a>
+<h1>See Also</h1>
+<a href="../api_c/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/log_cursor.html b/libdb/docs/api_c/log_cursor.html
new file mode 100644
index 0000000..d0c8cae
--- /dev/null
+++ b/libdb/docs/api_c/log_cursor.html
@@ -0,0 +1,53 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;log_cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;log_cursor</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;log_cursor(DB_ENV *dbenv, DB_LOGC **cursorp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;log_cursor method
+creates a log cursor and copies a pointer to it into the memory to which
+<b>cursorp</b> refers.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DB_ENV-&gt;log_cursor method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;log_cursor method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB_ENV-&gt;log_cursor method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;log_cursor method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/logc_class.html">DB_LOGC</a>, <a href="../api_c/lsn_class.html">DB_LSN</a>
+<h1>See Also</h1>
+<a href="../api_c/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/log_file.html b/libdb/docs/api_c/log_file.html
new file mode 100644
index 0000000..05d8609
--- /dev/null
+++ b/libdb/docs/api_c/log_file.html
@@ -0,0 +1,68 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;log_file</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;log_file</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;log_file(DB_ENV *env,
+ const DB_LSN *lsn, char *namep, size_t len);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;log_file method maps
+DB_LSN structures
+to filenames,
+copying the name of the file containing the record named by <b>lsn</b>
+into the memory location to which <b>namep</b> refers.
+<p>The <b>len</b> argument is the length of the <b>namep</b> buffer in bytes.
+If <b>namep</b> is too short to hold the filename, DB_ENV-&gt;log_file will
+return ENOMEM.
+(Log filenames are normally quite short, on the order of 10 characters.)
+<p>This mapping of
+DB_LSN structures
+to files is needed for database administration. For example, a
+transaction manager typically records the earliest
+DB_LSN
+needed for restart, and the database administrator may want to archive
+log files to tape when they contain only
+DB_LSN
+entries before the earliest one needed for restart.
+<p>The DB_ENV-&gt;log_file method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;log_file method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>ENOMEM<dd>The supplied buffer was too small to hold the log filename.
+</dl>
+<p>The DB_ENV-&gt;log_file method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;log_file method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/logc_class.html">DB_LOGC</a>, <a href="../api_c/lsn_class.html">DB_LSN</a>
+<h1>See Also</h1>
+<a href="../api_c/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/log_flush.html b/libdb/docs/api_c/log_flush.html
new file mode 100644
index 0000000..4b7fe65
--- /dev/null
+++ b/libdb/docs/api_c/log_flush.html
@@ -0,0 +1,54 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;log_flush</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;log_flush</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;log_flush(DB_ENV *env, const DB_LSN *lsn);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;log_flush method guarantees that all log records whose
+DB_LSN values
+are less than or equal to the <b>lsn</b> argument have been
+written to disk. If <b>lsn</b> is NULL, all records in the
+log are flushed.
+<p>The DB_ENV-&gt;log_flush method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;log_flush method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB_ENV-&gt;log_flush method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;log_flush method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/logc_class.html">DB_LOGC</a>, <a href="../api_c/lsn_class.html">DB_LSN</a>
+<h1>See Also</h1>
+<a href="../api_c/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/log_list.html b/libdb/docs/api_c/log_list.html
new file mode 100644
index 0000000..50217ed
--- /dev/null
+++ b/libdb/docs/api_c/log_list.html
@@ -0,0 +1,32 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Logging Subsystem and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Logging Subsystem and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Logging Subsystem and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_c/log_archive.html">DB_ENV-&gt;log_archive</a></td><td>List log and database files</td></tr>
+<tr><td><a href="../api_c/log_cursor.html">DB_ENV-&gt;log_cursor</a></td><td>Create a log cursor handle</td></tr>
+<tr><td><a href="../api_c/log_file.html">DB_ENV-&gt;log_file</a></td><td>Map Log Sequence Numbers to log files</td></tr>
+<tr><td><a href="../api_c/log_flush.html">DB_ENV-&gt;log_flush</a></td><td>Flush log records</td></tr>
+<tr><td><a href="../api_c/log_put.html">DB_ENV-&gt;log_put</a></td><td>Write a log record</td></tr>
+<tr><td><a href="../api_c/env_set_lg_bsize.html">DB_ENV-&gt;set_lg_bsize</a></td><td>Set log buffer size</td></tr>
+<tr><td><a href="../api_c/env_set_lg_dir.html">DB_ENV-&gt;set_lg_dir</a></td><td>Set the environment logging directory</td></tr>
+<tr><td><a href="../api_c/env_set_lg_max.html">DB_ENV-&gt;set_lg_max</a></td><td>Set log file size</td></tr>
+<tr><td><a href="../api_c/env_set_lg_regionmax.html">DB_ENV-&gt;set_lg_regionmax</a></td><td>Set logging region size</td></tr>
+<tr><td><a href="../api_c/log_compare.html">log_compare</a></td><td>Compare two Log Sequence Numbers</td></tr>
+<tr><td><a href="../api_c/log_stat.html">DB_ENV-&gt;log_stat</a></td><td>Return log subsystem statistics</td></tr>
+<tr><td><a href="../api_c/logc_close.html">DB_LOGC-&gt;close</a></td><td>Close a log cursor</td></tr>
+<tr><td><a href="../api_c/logc_get.html">DB_LOGC-&gt;get</a></td><td>Retrieve a log record</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/log_put.html b/libdb/docs/api_c/log_put.html
new file mode 100644
index 0000000..d5d19fb
--- /dev/null
+++ b/libdb/docs/api_c/log_put.html
@@ -0,0 +1,67 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;log_put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;log_put</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;log_put(DB_ENV *env,
+ DB_LSN *lsn, const DBT *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;log_put method appends records to the log. The <a href="../api_c/lsn_class.html">DB_LSN</a> of
+the put record is returned in the <b>lsn</b> argument. The <b>flags</b>
+argument may be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_FLUSH">DB_FLUSH</a><dd>The log is forced to disk after this record is written, guaranteeing
+that all records with <a href="../api_c/lsn_class.html">DB_LSN</a> values less than or equal to the
+one being "put" are on disk before DB_ENV-&gt;log_put returns.
+</dl>
+<p>The caller is responsible for providing any necessary structure to
+<b>data</b>. (For example, in a write-ahead logging protocol, the
+application must understand what part of <b>data</b> is an operation
+code, what part is redo information, and what part is undo information.
+In addition, most transaction managers will store in <b>data</b> the
+<a href="../api_c/lsn_class.html">DB_LSN</a> of the previous log record for the same transaction, to
+support chaining back through the transaction's log records during
+undo.)
+<p>The DB_ENV-&gt;log_put method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The <a href="../api_c/log_flush.html">DB_ENV-&gt;log_flush</a> method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The record to be logged is larger than the maximum log record.
+</dl>
+<p>The DB_ENV-&gt;log_put method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;log_put method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/logc_class.html">DB_LOGC</a>, <a href="../api_c/lsn_class.html">DB_LSN</a>
+<h1>See Also</h1>
+<a href="../api_c/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/log_stat.html b/libdb/docs/api_c/log_stat.html
new file mode 100644
index 0000000..d58b08d
--- /dev/null
+++ b/libdb/docs/api_c/log_stat.html
@@ -0,0 +1,91 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;log_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;log_stat</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;log_stat(DB_ENV *env, DB_LOG_STAT **spp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;log_stat method returns the logging subsystem statistics.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_STAT_CLEAR">DB_STAT_CLEAR</a><dd>Reset statistics after returning their values.
+</dl>
+<p>The DB_ENV-&gt;log_stat method creates a statistical structure of type
+DB_LOG_STAT and copies a pointer to it into a user-specified memory
+location.
+<p>Statistical structures are created in allocated memory. If application-specific allocation
+routines have been declared (see <a href="../api_c/env_set_alloc.html">DB_ENV-&gt;set_alloc</a> for more
+information), they are used to allocate the memory; otherwise, the
+library <b>malloc</b>(3) interface is used. The caller is
+responsible for deallocating the memory. To deallocate the memory, free
+the memory reference; references inside the returned memory need not be
+individually freed.
+<p>The following DB_LOG_STAT fields will be filled in:
+<p><dl compact>
+<dt>u_int32_t st_magic;<dd>The magic number that identifies a file as a log file.
+<dt>u_int32_t st_version;<dd>The version of the log file type.
+<dt>int st_mode;<dd>The mode of any created log files.
+<dt>u_int32_t st_lg_bsize;<dd>The in-memory log record cache size.
+<dt>u_int32_t st_lg_size;<dd>The current log file size.
+<dt>u_int32_t st_w_mbytes;<dd>The number of megabytes written to this log.
+<dt>u_int32_t st_w_bytes;<dd>The number of bytes over and above <b>st_w_mbytes</b> written to this log.
+<dt>u_int32_t st_wc_mbytes;<dd>The number of megabytes written to this log since the last checkpoint.
+<dt>u_int32_t st_wc_bytes;<dd>The number of bytes over and above <b>st_wc_mbytes</b> written to this log
+since the last checkpoint.
+<dt>u_int32_t st_wcount;<dd>The number of times the log has been written to disk.
+<dt>u_int32_t st_wcount_fill;<dd>The number of times the log has been written to disk because the
+in-memory log record cache filled up.
+<dt>u_int32_t st_scount;<dd>The number of times the log has been flushed to disk.
+<dt>u_int32_t st_cur_file;<dd>The current log file number.
+<dt>u_int32_t st_cur_offset;<dd>The byte offset in the current log file.
+<dt>u_int32_t st_disk_file;<dd>The log file number of the last record known to be on disk.
+<dt>u_int32_t st_disk_offset;<dd>The byte offset of the last record known to be on disk.
+<dt>u_int32_t st_cur_offset;<dd>The byte offset of the last record known to be on disk.
+<dt>u_int32_t st_maxcommitperflush;<dd>The maximum number of commits contained in a single log flush.
+<dt>u_int32_t st_mincommitperflush;<dd>The minimum number of commits contained in a single log flush that
+contained a commit.]
+<dt>u_int32_t st_regsize;<dd>The size of the region.
+<dt>u_int32_t st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining the region lock.
+<dt>u_int32_t st_region_nowait;<dd>The number of times that a thread of control was able to obtain
+the region lock without waiting.
+</dl>
+<p>The DB_ENV-&gt;log_stat method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;log_stat method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;log_stat method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/logc_class.html">DB_LOGC</a>, <a href="../api_c/lsn_class.html">DB_LSN</a>
+<h1>See Also</h1>
+<a href="../api_c/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/logc_class.html b/libdb/docs/api_c/logc_class.html
new file mode 100644
index 0000000..48ee255
--- /dev/null
+++ b/libdb/docs/api_c/logc_class.html
@@ -0,0 +1,43 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_LOGC</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_LOGC</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+typedef struct __db_log_cursor DB_LOGC;
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_LOGC object is the handle for a cursor into the log files,
+supporting sequential access to the records stored in log files. The
+handle is not free-threaded. Once the <a href="../api_c/logc_close.html">DB_LOGC-&gt;close</a> method is called,
+the handle may not be accessed again, regardless of that method's
+return.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, DB_LOGC, <a href="../api_c/lsn_class.html">DB_LSN</a>
+<h1>See Also</h1>
+<a href="../api_c/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/logc_close.html b/libdb/docs/api_c/logc_close.html
new file mode 100644
index 0000000..2f889e3
--- /dev/null
+++ b/libdb/docs/api_c/logc_close.html
@@ -0,0 +1,54 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_LOGC-&gt;close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_LOGC-&gt;close</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_LOGC-&gt;close(DB_LOGC *cursor, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_LOGC-&gt;close method discards the log cursor. After DB_LOGC-&gt;close
+has been called, regardless of its return, the cursor handle may not be
+used again.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DB_LOGC-&gt;close method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_LOGC-&gt;close method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The cursor was previously closed.
+</dl>
+<p>The DB_LOGC-&gt;close method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_LOGC-&gt;close method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/logc_class.html">DB_LOGC</a>, <a href="../api_c/lsn_class.html">DB_LSN</a>
+<h1>See Also</h1>
+<a href="../api_c/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/logc_get.html b/libdb/docs/api_c/logc_get.html
new file mode 100644
index 0000000..149c513
--- /dev/null
+++ b/libdb/docs/api_c/logc_get.html
@@ -0,0 +1,94 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_LOGC-&gt;get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_LOGC-&gt;get</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_LOGC-&gt;get(DB_LOGC *logc, DB_LSN *lsn, DBT *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_LOGC-&gt;get method retrieve records from the log according to the
+<b>lsn</b> and <b>flags</b> arguments.
+<p>The data field of the <b>data</b> structure is set to the record
+retrieved, and the size field indicates the number of bytes in the
+record. See <a href="../api_c/dbt_class.html">DBT</a> for a description of other fields in the
+<b>data</b> structure. The <a href="../api_c/dbt_class.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a>,
+<a href="../api_c/dbt_class.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a> and <a href="../api_c/dbt_class.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a> flags may be specified
+for any <a href="../api_c/dbt_class.html">DBT</a> used for data retrieval.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_FIRST">DB_FIRST</a><dd>The first record from any of the log files found in the log directory
+is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_c/lsn_class.html">DB_LSN</a> of the
+record returned.
+<p>
+If the log is empty, the DB_LOGC-&gt;get method will return DB_NOTFOUND.
+<p><dt><a name="DB_LAST">DB_LAST</a><dd>The last record in the log is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_c/lsn_class.html">DB_LSN</a> of the
+record returned.
+<p>
+If the log is empty, the DB_LOGC-&gt;get method will return DB_NOTFOUND.
+<p><dt><a name="DB_NEXT">DB_NEXT</a>, <a name="DB_PREV">DB_PREV</a><dd>The current log position is advanced to the next (previous) record in
+the log, and that record is returned in the <b>data</b> argument. The
+<b>lsn</b> argument is overwritten with the <a href="../api_c/lsn_class.html">DB_LSN</a> of the record
+returned.
+<p>If the cursor has not been initialized via DB_FIRST, DB_LAST, DB_SET,
+DB_NEXT, or DB_PREV, DB_LOGC-&gt;get will return the first (last) record
+in the log.
+If the last (first) log record has already been returned or the log is
+empty, the DB_LOGC-&gt;get method will return DB_NOTFOUND.
+If the log was opened with the DB_THREAD flag set, calls to
+DB_LOGC-&gt;get with the DB_NEXT (DB_PREV) flag set, the DB_LOGC-&gt;get method will return EINVAL.
+<p><dt><a name="DB_CURRENT">DB_CURRENT</a><dd>Return the log record to which the log currently refers.
+If the log cursor has not been initialized via DB_FIRST, DB_LAST,
+DB_SET, DB_NEXT, or DB_PREV, or if the log was opened with the DB_THREAD
+flag set, the DB_LOGC-&gt;get method will return EINVAL.
+<p><dt><a name="DB_SET">DB_SET</a><dd>Retrieve the record specified by the <b>lsn</b> argument.
+If the specified <a href="../api_c/lsn_class.html">DB_LSN</a> is invalid (for example, it does not
+appear in the log), the DB_LOGC-&gt;get method will return EINVAL.
+</dl>
+<p>
+Otherwise, the DB_LOGC-&gt;get method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_LOGC-&gt;get method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The DB_CURRENT flag was set and the log pointer had not yet been
+initialized.
+<p>The DB_SET flag was set and the specified log sequence number does not
+exist.
+</dl>
+<p>The DB_LOGC-&gt;get method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_LOGC-&gt;get method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/logc_class.html">DB_LOGC</a>, <a href="../api_c/lsn_class.html">DB_LSN</a>
+<h1>See Also</h1>
+<a href="../api_c/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/lsn_class.html b/libdb/docs/api_c/lsn_class.html
new file mode 100644
index 0000000..4201aac
--- /dev/null
+++ b/libdb/docs/api_c/lsn_class.html
@@ -0,0 +1,41 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_LSN</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_LSN</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+typedef struct __db_lsn DB_LSN;
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_LSN object is a <b>log sequence number</b> which
+specifies a unique location in a log file. It has no methods and
+its data may not be manipulated by an application.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/logc_class.html">DB_LOGC</a>, DB_LSN
+<h1>See Also</h1>
+<a href="../api_c/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/memp_fclose.html b/libdb/docs/api_c/memp_fclose.html
new file mode 100644
index 0000000..b5c64d8
--- /dev/null
+++ b/libdb/docs/api_c/memp_fclose.html
@@ -0,0 +1,54 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_MPOOLFILE-&gt;close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_MPOOLFILE-&gt;close</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_MPOOLFILE-&gt;close(DB_MPOOLFILE *mpf, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_MPOOLFILE-&gt;close method closes the source file indicated by the
+<a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a> structure. Calling DB_MPOOLFILE-&gt;close does not imply a call
+to <a href="../api_c/memp_fsync.html">DB_MPOOLFILE-&gt;sync</a>; that is, no pages are written to the source file
+as as a result of calling DB_MPOOLFILE-&gt;close.
+<p>If the <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a> was temporary, any underlying files created
+for this <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a> will be removed.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>After DB_MPOOLFILE-&gt;close has been called, regardless of its return, the
+<a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a> handle may not be accessed again.
+<p>The DB_MPOOLFILE-&gt;close method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_MPOOLFILE-&gt;close method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_MPOOLFILE-&gt;close method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a>
+<h1>See Also</h1>
+<a href="../api_c/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/memp_fcreate.html b/libdb/docs/api_c/memp_fcreate.html
new file mode 100644
index 0000000..ce5e95f
--- /dev/null
+++ b/libdb/docs/api_c/memp_fcreate.html
@@ -0,0 +1,50 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;memp_fcreate</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;memp_fcreate</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;memp_fcreate(DB_ENV *dbenvp, DB_MPOOLFILE **dbmfp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;memp_fcreate method creates a <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a> structure that
+is the handle for a Berkeley DB shared memory buffer pool file. A pointer to
+this structure is returned in the memory to which <b>dbmfp</b> refers.
+Calling the <a href="../api_c/memp_fclose.html">DB_MPOOLFILE-&gt;close</a> method will discard the returned handle.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DB_ENV-&gt;memp_fcreate method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;memp_fcreate method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;memp_fcreate method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a>
+<h1>See Also</h1>
+<a href="../api_c/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/memp_fget.html b/libdb/docs/api_c/memp_fget.html
new file mode 100644
index 0000000..879162e
--- /dev/null
+++ b/libdb/docs/api_c/memp_fget.html
@@ -0,0 +1,91 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_MPOOLFILE-&gt;get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_MPOOLFILE-&gt;get</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_MPOOLFILE-&gt;get(DB_MPOOLFILE *mpf,
+ db_pgno_t *pgnoaddr, u_int32_t flags, void **pagep);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_MPOOLFILE-&gt;get method copies a pointer to the page with the page number
+specified by <b>pgnoaddr</b>, from the source file in the <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a>,
+into the memory location to which <b>pagep</b> refers. If the page
+does not exist or cannot be retrieved, DB_MPOOLFILE-&gt;get will fail.
+<p><b>Page numbers begin at 0; that is, the first page in the file is page
+number 0, not page number 1.</b>
+<p>The returned page is <b>size_t</b> type aligned.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_MPOOL_CREATE">DB_MPOOL_CREATE</a><dd>If the specified page does not exist, create it. In this case, the
+<a href="memp_register.html#pgin">pgin</a> function, if specified, is
+called.
+<p><dt><a name="DB_MPOOL_LAST">DB_MPOOL_LAST</a><dd>Return the last page of the source file, and copy its page number into
+the memory location to which <b>pgnoaddr</b> refers.
+<p><dt><a name="DB_MPOOL_NEW">DB_MPOOL_NEW</a><dd>Create a new page in the file, and copy its page number into the memory
+location to which <b>pgnoaddr</b> refers. In this case, the
+<a href="memp_register.html#pgin">pgin</a> function, if specified, is
+<b>not</b> called.
+</dl>
+<p>The DB_MPOOL_CREATE, DB_MPOOL_LAST, and
+DB_MPOOL_NEW flags are mutually exclusive.
+<p>Fully or partially created pages have all their bytes set to a nul byte,
+unless the <a href="../api_c/memp_set_clear_len.html">DB_MPOOLFILE-&gt;set_clear_len</a> method was called to specify other
+behavior before the file was opened.
+<p>All pages returned by DB_MPOOLFILE-&gt;get will be retained (that is,
+<i>pinned</i>), in the pool until a subsequent call to
+<a href="../api_c/memp_fput.html">DB_MPOOLFILE-&gt;put</a>.
+<a name="3"><!--meow--></a>
+<p>The DB_MPOOLFILE-&gt;get method returns a non-zero error value on failure, 0 on success, and returns DB_PAGE_NOTFOUND if the requested page does not
+exist and DB_MPOOL_CREATE was not set.
+<h1>Errors</h1>
+<p>The DB_MPOOLFILE-&gt;get method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EAGAIN<dd>The page reference count has overflowed. (This should never happen unless
+there's a bug in the application.)
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The DB_MPOOL_NEW flag was set, and the source file was not
+opened for writing.
+<p>More than one of DB_MPOOL_CREATE, DB_MPOOL_LAST, and
+DB_MPOOL_NEW was set.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>The cache is full, and no more pages will fit in the pool.
+</dl>
+<p>The DB_MPOOLFILE-&gt;get method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_MPOOLFILE-&gt;get method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a>
+<h1>See Also</h1>
+<a href="../api_c/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/memp_fopen.html b/libdb/docs/api_c/memp_fopen.html
new file mode 100644
index 0000000..bd9a845
--- /dev/null
+++ b/libdb/docs/api_c/memp_fopen.html
@@ -0,0 +1,94 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_MPOOLFILE-&gt;open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_MPOOLFILE-&gt;open</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_MPOOLFILE-&gt;open(DB_MPOOLFILE *mpf,
+ char *file, u_int32_t flags, int mode, size_t pagesize);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_MPOOLFILE-&gt;open method opens a file in the shared memory buffer pool.
+The <b>file</b> argument is the name of the file to be opened. If
+<b>file</b> is NULL, a private temporary file is created that
+cannot be shared with any other process (although it may be shared with
+other threads).
+<p>The <b>flags</b> and <b>mode</b> arguments specify how files will be opened
+and/or created if they do not already exist.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_CREATE">DB_CREATE</a><dd>Create any underlying files, as necessary. If the files do not already
+exist and the DB_CREATE flag is not specified, the call will
+fail.
+<a name="3"><!--meow--></a>
+<p><dt><a name="DB_DIRECT">DB_DIRECT</a><dd>If set and supported by the system, turn off system buffering of the
+file to avoid double caching.
+<p><dt><a name="DB_NOMMAP">DB_NOMMAP</a><dd>Always copy this file into the local cache instead of potentially mapping
+it into process memory (see the description of the
+<a href="../api_c/env_set_mp_mmapsize.html">DB_ENV-&gt;set_mp_mmapsize</a> method for further information).
+<p><dt><a name="DB_ODDFILESIZE">DB_ODDFILESIZE</a><dd>Attempts to open files which are not a multiple of the page size in
+length will fail, by default. If the DB_ODDFILESIZE flag is
+set, any partial page at the end of the file will be ignored and the
+open will proceed.
+<p><dt><a name="DB_RDONLY">DB_RDONLY</a><dd>Open any underlying files for reading only. Any attempt to write the file
+using the pool functions will fail, regardless of the actual permissions
+of the file.
+</dl>
+<p>On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by
+function DB_MPOOLFILE-&gt;open are created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and modified by the process' umask value at the time of creation
+(see <b>umask</b>(2)). If <b>mode</b> is 0, function DB_MPOOLFILE-&gt;open will use a default
+mode of readable and writable by both owner and group. On Windows
+systems, the mode argument is ignored. The group ownership of created
+files is based on the system and directory defaults, and is not further
+specified by Berkeley DB.
+<p>The <b>pagesize</b> argument is the size, in bytes, of the unit of transfer
+between the application and the pool, although it is not necessarily the
+unit of transfer between the pool and the source file.
+<p>The DB_MPOOLFILE-&gt;open method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_MPOOLFILE-&gt;open method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The file has already been entered into the pool, and the <b>pagesize</b>
+value is not the same as when the file was entered into the pool, or the
+length of the file is not zero or a multiple of the <b>pagesize</b>.
+<p>The DB_RDONLY flag was specified for an in-memory pool.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>The maximum number of open files has been reached.
+</dl>
+<p>The DB_MPOOLFILE-&gt;open method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_MPOOLFILE-&gt;open method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a>
+<h1>See Also</h1>
+<a href="../api_c/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/memp_fput.html b/libdb/docs/api_c/memp_fput.html
new file mode 100644
index 0000000..86ac86b
--- /dev/null
+++ b/libdb/docs/api_c/memp_fput.html
@@ -0,0 +1,72 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_MPOOLFILE-&gt;put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_MPOOLFILE-&gt;put</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_MPOOLFILE-&gt;put(DB_MPOOLFILE *mpf, void *pgaddr, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_MPOOLFILE-&gt;put method indicates that the page to which <b>pgaddr</b>
+refers can be evicted from the pool. The <b>pgaddr</b> argument must
+be an address previously returned by <a href="../api_c/memp_fget.html">DB_MPOOLFILE-&gt;get</a>.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_MPOOL_CLEAN">DB_MPOOL_CLEAN</a><dd>Clear any previously set modification information (that is, don't bother
+writing the page back to the source file).
+<p><dt><a name="DB_MPOOL_DIRTY">DB_MPOOL_DIRTY</a><dd>The page has been modified and must be written to the source file before
+being evicted from the pool.
+<p><dt><a name="DB_MPOOL_DISCARD">DB_MPOOL_DISCARD</a><dd>The page is unlikely to be useful in the near future, and should be
+discarded before other pages in the pool.
+</dl>
+<p>The DB_MPOOL_CLEAN and DB_MPOOL_DIRTY flags are
+mutually exclusive.
+<p>The DB_MPOOLFILE-&gt;put method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_MPOOLFILE-&gt;put method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EACCES<dd>The <a href="../api_c/memp_fput.html#DB_MPOOL_DIRTY">DB_MPOOL_DIRTY</a> flag was set and the source file was not
+opened for writing.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The <b>pgaddr</b> argument does not refer to a page returned by
+<a href="../api_c/memp_fget.html">DB_MPOOLFILE-&gt;get</a>.
+<p>More than one of the <a href="../api_c/memp_fput.html#DB_MPOOL_CLEAN">DB_MPOOL_CLEAN</a> and <a href="../api_c/memp_fput.html#DB_MPOOL_DIRTY">DB_MPOOL_DIRTY</a>
+flags was set.
+</dl>
+<p>The DB_MPOOLFILE-&gt;put method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_MPOOLFILE-&gt;put method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a>
+<h1>See Also</h1>
+<a href="../api_c/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/memp_fset.html b/libdb/docs/api_c/memp_fset.html
new file mode 100644
index 0000000..fcfba18
--- /dev/null
+++ b/libdb/docs/api_c/memp_fset.html
@@ -0,0 +1,65 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_MPOOLFILE-&gt;set</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_MPOOLFILE-&gt;set</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_MPOOLFILE-&gt;set(DB_MPOOLFILE *mpf, void *pgaddr, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_MPOOLFILE-&gt;set method sets the flags associated with the page to which
+<b>pgaddr</b> refers without unpinning it from the pool. The
+<b>pgaddr</b> argument must be an address previously returned by
+<a href="../api_c/memp_fget.html">DB_MPOOLFILE-&gt;get</a>.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_MPOOL_CLEAN">DB_MPOOL_CLEAN</a><dd>Clear any previously set modification information (that is, don't bother
+writing the page back to the source file).
+<p><dt><a name="DB_MPOOL_DIRTY">DB_MPOOL_DIRTY</a><dd>The page has been modified and must be written to the source file before
+being evicted from the pool.
+<p><dt><a name="DB_MPOOL_DISCARD">DB_MPOOL_DISCARD</a><dd>The page is unlikely to be useful in the near future, and should be
+discarded before other pages in the pool.
+</dl>
+<p>The DB_MPOOL_CLEAN and DB_MPOOL_DIRTY flags are
+mutually exclusive.
+<p>The DB_MPOOLFILE-&gt;set method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_MPOOLFILE-&gt;set method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB_MPOOLFILE-&gt;set method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_MPOOLFILE-&gt;set method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a>
+<h1>See Also</h1>
+<a href="../api_c/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/memp_fsync.html b/libdb/docs/api_c/memp_fsync.html
new file mode 100644
index 0000000..58e9d69
--- /dev/null
+++ b/libdb/docs/api_c/memp_fsync.html
@@ -0,0 +1,51 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_MPOOLFILE-&gt;sync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_MPOOLFILE-&gt;sync</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_MPOOLFILE-&gt;sync(DB_MPOOLFILE *mpf);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_MPOOLFILE-&gt;sync method writes all pages associated with the
+<a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a>, which were marked as modified using
+<a href="../api_c/memp_fput.html">DB_MPOOLFILE-&gt;put</a> or <a href="../api_c/memp_fset.html">DB_MPOOLFILE-&gt;set</a>, back to the source file. If
+any of the modified pages are also <i>pinned</i> (that is, this or
+another process currently refers to them), DB_MPOOLFILE-&gt;sync will
+ignore them.
+<p>The DB_MPOOLFILE-&gt;sync method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_MPOOLFILE-&gt;sync method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_MPOOLFILE-&gt;sync method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a>
+<h1>See Also</h1>
+<a href="../api_c/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/memp_list.html b/libdb/docs/api_c/memp_list.html
new file mode 100644
index 0000000..a5b4dc3
--- /dev/null
+++ b/libdb/docs/api_c/memp_list.html
@@ -0,0 +1,37 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Memory Pools and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Memory Pools and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Memory Pools and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_c/env_set_cachesize.html">DB_ENV-&gt;set_cachesize</a></td><td>Set the environment cache size</td></tr>
+<tr><td><a href="../api_c/env_set_mp_mmapsize.html">DB_ENV-&gt;set_mp_mmapsize</a></td><td>Set maximum mapped-in database file size</td></tr>
+<tr><td><a href="../api_c/memp_register.html">DB_ENV-&gt;memp_register</a></td><td>Register input/output functions for a file in a memory pool</td></tr>
+<tr><td><a href="../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a></td><td>Return memory pool statistics</td></tr>
+<tr><td><a href="../api_c/memp_sync.html">DB_ENV-&gt;memp_sync</a></td><td>Flush pages from a memory pool</td></tr>
+<tr><td><a href="../api_c/memp_trickle.html">DB_ENV-&gt;memp_trickle</a></td><td>Trickle flush pages from a memory pool</td></tr>
+<tr><td><a href="../api_c/memp_fcreate.html">DB_ENV-&gt;memp_fcreate</a></td><td>Open a file in a memory pool</td></tr>
+<tr><td><a href="../api_c/memp_fclose.html">DB_MPOOLFILE-&gt;close</a></td><td>Close a file in a memory pool</td></tr>
+<tr><td><a href="../api_c/memp_fget.html">DB_MPOOLFILE-&gt;get</a></td><td>Get page from a file in a memory pool</td></tr>
+<tr><td><a href="../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a></td><td>Open a file in a memory pool</td></tr>
+<tr><td><a href="../api_c/memp_fput.html">DB_MPOOLFILE-&gt;put</a></td><td>Return a page to a memory pool</td></tr>
+<tr><td><a href="../api_c/memp_fset.html">DB_MPOOLFILE-&gt;set</a></td><td>Set memory pool page status</td></tr>
+<tr><td><a href="../api_c/memp_fsync.html">DB_MPOOLFILE-&gt;sync</a></td><td>Flush pages from a file in a memory pool</td></tr>
+<tr><td><a href="../api_c/memp_set_clear_len.html">DB_MPOOLFILE-&gt;set_clear_len</a></td><td>Set file page bytes to be cleared</td></tr>
+<tr><td><a href="../api_c/memp_set_fileid.html">DB_MPOOLFILE-&gt;set_fileid</a></td><td>Set file unique identifier</td></tr>
+<tr><td><a href="../api_c/memp_set_ftype.html">DB_MPOOLFILE-&gt;set_ftype</a></td><td>Set file type</td></tr>
+<tr><td><a href="../api_c/memp_set_lsn_offset.html">DB_MPOOLFILE-&gt;set_lsn_offset</a></td><td>Set file log-sequence-number offset</td></tr>
+<tr><td><a href="../api_c/memp_set_pgcookie.html">DB_MPOOLFILE-&gt;set_pgcookie</a></td><td>Set file cookie for pgin/pgout</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/memp_register.html b/libdb/docs/api_c/memp_register.html
new file mode 100644
index 0000000..5d064f1
--- /dev/null
+++ b/libdb/docs/api_c/memp_register.html
@@ -0,0 +1,84 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;memp_register</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;memp_register</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;memp_register(DB_ENV *env, int ftype,
+ int (*pgin_fcn)(DB_ENV *, db_pgno_t pgno, void *pgaddr, DBT *pgcookie),
+ int (*pgout_fcn)(DB_ENV *, db_pgno_t pgno, void *pgaddr, DBT *pgcookie));
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;memp_register method registers page-in and page-out
+functions for files of type <b>ftype</b> in the specified pool.
+<p>If the <b>pgin_fcn</b> function is non-NULL, it is called each time
+a page is read into the memory pool from a file of type <b>ftype</b>, or
+a page is created for a file of type <b>ftype</b> (see the
+DB_MPOOL_CREATE flag for the <a href="../api_c/memp_fget.html">DB_MPOOLFILE-&gt;get</a> method).
+<p>If the <b>pgout_fcn</b> function is non-NULL, it is called each time
+a page is written to a file of type <b>ftype</b>.
+<p>Both the <b>pgin_fcn</b> and <b>pgout_fcn</b> functions are called
+with a reference to the current environment, the page number, a pointer
+to the page being read or written, and any argument <b>pgcookie</b>
+that was specified to the <a href="../api_c/memp_set_pgcookie.html">DB_MPOOLFILE-&gt;set_pgcookie</a> method. The
+<b>pgin_fcn</b> and <b>pgout_fcn</b> functions should return 0 on
+success, and an applicable non-zero <b>errno</b> value on failure, in
+which case the shared memory pool interface routine (and, by extension,
+any Berkeley DB library function) calling it will also fail, returning that
+<b>errno</b> value.
+<p>The purpose of the DB_ENV-&gt;memp_register function is to support processing
+when pages are entered into, or flushed from, the pool. A file type must
+be specified to make it possible for unrelated threads or processes that
+are sharing a pool, to evict each other's pages from the pool. During
+initialization, applications should call DB_ENV-&gt;memp_register for each
+type of file requiring input or output processing that will be sharing
+the underlying pool. (No registry is necessary for the standard Berkeley DB
+access method types because <a href="../api_c/db_open.html">DB-&gt;open</a> registers them separately.)
+<p>If a thread or process does not call DB_ENV-&gt;memp_register for a file
+type, it is impossible for it to evict pages for any file requiring input
+or output processing from the pool. For this reason,
+DB_ENV-&gt;memp_register should always be called by each application sharing
+a pool for each type of file included in the pool, regardless of whether
+or not the application itself uses files of that type.
+<p>There are no standard values for <b>ftype</b>, <b>pgin_fcn</b>,
+<b>pgout_fcn</b>, and <b>pgcookie</b>, except that the <b>ftype</b>
+value for a file must be a non-zero positive number less than 128
+(0 and negative numbers are reserved for internal use by the Berkeley DB
+library). For this reason, applications sharing a pool must coordinate
+the values among themselves.
+<p>The DB_ENV-&gt;memp_register method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;memp_register method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;memp_register method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a>
+<h1>See Also</h1>
+<a href="../api_c/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/memp_set_clear_len.html b/libdb/docs/api_c/memp_set_clear_len.html
new file mode 100644
index 0000000..609d79f
--- /dev/null
+++ b/libdb/docs/api_c/memp_set_clear_len.html
@@ -0,0 +1,58 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_MPOOLFILE-&gt;set_clear_len</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_MPOOLFILE-&gt;set_clear_len</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_MPOOLFILE-&gt;set_clear_len(DB_MPOOLFILE *mpf, u_int32_t len);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_MPOOLFILE-&gt;set_clear_len method sets the number of initial bytes in a
+page that should be set to nul when the page is created as a result of
+the <a href="../api_c/memp_fget.html#DB_MPOOL_CREATE">DB_MPOOL_CREATE</a> or <a href="../api_c/memp_fget.html#DB_MPOOL_NEW">DB_MPOOL_NEW</a> flags being
+specified to <a href="../api_c/memp_fget.html">DB_MPOOLFILE-&gt;get</a>. If no value is specified, or <b>len</b>
+is 0, the entire page is cleared.
+<p>The DB_MPOOLFILE-&gt;set_clear_len method configures a file in the memory pool, not only
+operations performed using the specified <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a> handle.
+<p>The DB_MPOOLFILE-&gt;set_clear_len interface may not be called after the <a href="../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a>
+interface is called.
+If the file is already open in the memory pool when
+<a href="../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a> is called, the information specified to DB_MPOOLFILE-&gt;set_clear_len
+must be consistent with the existing file or an error will be
+returned.
+<p>The DB_MPOOLFILE-&gt;set_clear_len method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_MPOOLFILE-&gt;set_clear_len method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_MPOOLFILE-&gt;set_clear_len method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a>
+<h1>See Also</h1>
+<a href="../api_c/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/memp_set_fileid.html b/libdb/docs/api_c/memp_set_fileid.html
new file mode 100644
index 0000000..a74468e
--- /dev/null
+++ b/libdb/docs/api_c/memp_set_fileid.html
@@ -0,0 +1,75 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_MPOOLFILE-&gt;set_fileid</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_MPOOLFILE-&gt;set_fileid</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_MPOOLFILE-&gt;set_fileid(DB_MPOOLFILE *mpf, u_int8_t *fileid);
+</pre></h3>
+<h1>Description</h1>
+<p>The shared memory buffer pool functions must be able to uniquely
+identify files in order that multiple processes wanting to share a file
+will correctly identify it in the pool. The DB_MPOOLFILE-&gt;set_fileid method
+specifies a unique identifier for the file. Unique file identifiers
+must be a DB_FILE_ID_LEN length array of bytes.
+<p>On most UNIX/POSIX systems, the <b>fileid</b> field will not need to
+be set, and the memory pool functions will use the file's device and
+inode numbers for this purpose. On Windows systems, the memory pool
+functions use the values returned by GetFileInformationByHandle() by
+default -- these values are known to be constant between processes and
+over reboot in the case of NTFS (in which they are the NTFS MFT
+indices).
+<p>On other filesystems (for example, FAT or NFS), these default values
+are not necessarily unique between processes or across system reboots.
+<b>Applications wanting to maintain a shared memory buffer pool
+between processes or across system reboots, in which the pool contains
+pages from files stored on such filesystems, must specify a unique file
+identifier using the DB_MPOOLFILE-&gt;set_fileid method, and each process opening
+the file must provide the same unique identifier.</b>
+<p>This call should not be necessary for most applications. Specifically,
+it is not necessary if the memory pool is not shared between processes
+and is reinstantiated after each system reboot, if the application is
+using the Berkeley DB access methods instead of calling the pool functions
+explicitly, or if the files in the memory pool are stored on filesystems
+in which the default values as described previously are invariant
+between process and across system reboots.
+<p>The DB_MPOOLFILE-&gt;set_fileid method configures a file in the memory pool, not only
+operations performed using the specified <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a> handle.
+<p>The DB_MPOOLFILE-&gt;set_fileid interface may not be called after the <a href="../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a>
+interface is called.
+<p>The DB_MPOOLFILE-&gt;set_fileid method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_MPOOLFILE-&gt;set_fileid method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_MPOOLFILE-&gt;set_fileid method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a>
+<h1>See Also</h1>
+<a href="../api_c/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/memp_set_ftype.html b/libdb/docs/api_c/memp_set_ftype.html
new file mode 100644
index 0000000..d328553
--- /dev/null
+++ b/libdb/docs/api_c/memp_set_ftype.html
@@ -0,0 +1,58 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_MPOOLFILE-&gt;set_ftype</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_MPOOLFILE-&gt;set_ftype</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_MPOOLFILE-&gt;set_ftype(DB_MPOOLFILE *mpf, int ftype);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_MPOOLFILE-&gt;set_ftype method specifies a file type for the purposes of
+input or output processing of the files pages as they are read from or
+written to, the backing filesystem store. The <b>ftype</b> argument
+must be the same as a <b>ftype</b> argument previously specified to
+the <a href="../api_c/memp_register.html">DB_ENV-&gt;memp_register</a> method. (See the <a href="../api_c/memp_register.html">DB_ENV-&gt;memp_register</a>
+documentation for more information.)
+<p>The DB_MPOOLFILE-&gt;set_ftype method configures a file in the memory pool, not only
+operations performed using the specified <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a> handle.
+<p>The DB_MPOOLFILE-&gt;set_ftype interface may not be called after the <a href="../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a>
+interface is called.
+If the file is already open in the memory pool when
+<a href="../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a> is called, the information specified to DB_MPOOLFILE-&gt;set_ftype
+will replace the existing information.
+<p>The DB_MPOOLFILE-&gt;set_ftype method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_MPOOLFILE-&gt;set_ftype method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_MPOOLFILE-&gt;set_ftype method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a>
+<h1>See Also</h1>
+<a href="../api_c/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/memp_set_lsn_offset.html b/libdb/docs/api_c/memp_set_lsn_offset.html
new file mode 100644
index 0000000..c79fb63
--- /dev/null
+++ b/libdb/docs/api_c/memp_set_lsn_offset.html
@@ -0,0 +1,57 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_MPOOLFILE-&gt;set_lsn_offset</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_MPOOLFILE-&gt;set_lsn_offset</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_MPOOLFILE-&gt;set_lsn_offset(DB_MPOOLFILE *mpf, int32_t lsn_offset);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_MPOOLFILE-&gt;set_lsn_offset method specifies the zero-based byte offset
+of a log sequence number (<a href="../api_c/lsn_class.html">DB_LSN</a>) on the file's pages, for the
+purposes of page-flushing as part of transaction checkpoint. (See the
+<a href="../api_c/memp_sync.html">DB_ENV-&gt;memp_sync</a> documentation for more information.)
+<p>The DB_MPOOLFILE-&gt;set_lsn_offset method configures a file in the memory pool, not only
+operations performed using the specified <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a> handle.
+<p>The DB_MPOOLFILE-&gt;set_lsn_offset interface may not be called after the <a href="../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a>
+interface is called.
+If the file is already open in the memory pool when
+<a href="../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a> is called, the information specified to DB_MPOOLFILE-&gt;set_lsn_offset
+must be consistent with the existing file or an error will be
+returned.
+<p>The DB_MPOOLFILE-&gt;set_lsn_offset method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_MPOOLFILE-&gt;set_lsn_offset method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_MPOOLFILE-&gt;set_lsn_offset method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a>
+<h1>See Also</h1>
+<a href="../api_c/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/memp_set_pgcookie.html b/libdb/docs/api_c/memp_set_pgcookie.html
new file mode 100644
index 0000000..bf33832
--- /dev/null
+++ b/libdb/docs/api_c/memp_set_pgcookie.html
@@ -0,0 +1,57 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_MPOOLFILE-&gt;set_pgcookie</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_MPOOLFILE-&gt;set_pgcookie</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_MPOOLFILE-&gt;set_pgcookie(DB_MPOOLFILE *mpf, DBT *pgcookie);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_MPOOLFILE-&gt;set_pgcookie method specifies a byte string that is provided
+to the functions registered to do input or output processing of the
+file's pages as they are read from or written to, the backing filesystem
+store. (See the <a href="../api_c/memp_register.html">DB_ENV-&gt;memp_register</a> documentation for more
+information.)
+<p>The DB_MPOOLFILE-&gt;set_pgcookie method configures a file in the memory pool, not only
+operations performed using the specified <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a> handle.
+<p>The DB_MPOOLFILE-&gt;set_pgcookie interface may not be called after the <a href="../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a>
+interface is called.
+If the file is already open in the memory pool when
+<a href="../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a> is called, the information specified to DB_MPOOLFILE-&gt;set_pgcookie
+will replace the existing information.
+<p>The DB_MPOOLFILE-&gt;set_pgcookie method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_MPOOLFILE-&gt;set_pgcookie method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_MPOOLFILE-&gt;set_pgcookie method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a>
+<h1>See Also</h1>
+<a href="../api_c/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/memp_stat.html b/libdb/docs/api_c/memp_stat.html
new file mode 100644
index 0000000..fed5654
--- /dev/null
+++ b/libdb/docs/api_c/memp_stat.html
@@ -0,0 +1,129 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;memp_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;memp_stat</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;memp_stat(DB_ENV *env, DB_MPOOL_STAT **gsp,
+ DB_MPOOL_FSTAT *(*fsp)[], u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;memp_stat and <a href="../api_cxx/memp_stat.html">DbEnv::memp_fstat</a> methods return the memory pool
+subsystem statistics.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_STAT_CLEAR">DB_STAT_CLEAR</a><dd>Reset statistics after returning their values.
+</dl>
+<p>The DB_ENV-&gt;memp_stat and <a href="../api_cxx/memp_stat.html">DbEnv::memp_fstat</a> methods create statistical
+structures of type DB_MPOOL_STAT and DB_MPOOL_FSTAT, and copy pointers
+to them into user-specified memory locations. The memory pool region
+statistics are stored in the DB_MPOOL_STAT structure and the per-file
+memory pool statistics are stored the DB_MPOOL_FSTAT structure.
+<p>Statistical structures are created in allocated memory. If application-specific allocation
+routines have been declared (see <a href="../api_c/env_set_alloc.html">DB_ENV-&gt;set_alloc</a> for more
+information), they are used to allocate the memory; otherwise, the
+library <b>malloc</b>(3) interface is used. The caller is
+responsible for deallocating the memory. To deallocate the memory, free
+the memory reference; references inside the returned memory need not be
+individually freed.
+<p>If <b>gsp</b> is non-NULL, the global statistics for the memory pool
+<b>mp</b> are copied into the memory location to which it refers. The
+following DB_MPOOL_STAT fields will be filled in:
+<p><dl compact>
+<dt>size_t st_gbytes;<dd>Gigabytes of cache (total cache size is st_gbytes + st_bytes).
+<dt>size_t st_bytes;<dd>Bytes of cache (total cache size is st_gbytes + st_bytes).
+<dt>u_int32_t st_ncache;<dd>Number of caches.
+<dt>u_int32_t st_regsize;<dd>Individual cache size.
+<dt>u_int32_t st_map;<dd>Requested pages mapped into the process' address space (there is no
+available information about whether or not this request caused disk I/O,
+although examining the application page fault rate may be helpful).
+<dt>u_int32_t st_cache_hit;<dd>Requested pages found in the cache.
+<dt>u_int32_t st_cache_miss;<dd>Requested pages not found in the cache.
+<dt>u_int32_t st_page_create;<dd>Pages created in the cache.
+<dt>u_int32_t st_page_in;<dd>Pages read into the cache.
+<dt>u_int32_t st_page_out;<dd>Pages written from the cache to the backing file.
+<dt>u_int32_t st_ro_evict;<dd>Clean pages forced from the cache.
+<dt>u_int32_t st_rw_evict;<dd>Dirty pages forced from the cache.
+<dt>u_int32_t st_page_trickle;<dd>Dirty pages written using the <a href="../api_c/memp_trickle.html">DB_ENV-&gt;memp_trickle</a> interface.
+<dt>u_int32_t st_pages;<dd>Pages in the cache.
+<dt>u_int32_t st_page_clean;<dd>Clean pages currently in the cache.
+<dt>u_int32_t st_page_dirty;<dd>Dirty pages currently in the cache.
+<dt>u_int32_t st_hash_buckets;<dd>Number of hash buckets in buffer hash table.
+<dt>u_int32_t st_hash_searches;<dd>Total number of buffer hash table lookups.
+<dt>u_int32_t st_hash_longest;<dd>The longest chain ever encountered in buffer hash table lookups.
+<dt>u_int32_t st_hash_examined;<dd>Total number of hash elements traversed during hash table lookups.
+<dt>u_int32_t st_hash_nowait;<dd>The number of times that a thread of control was able to obtain a hash
+bucket lock without waiting.
+<dt>u_int32_t st_hash_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining a hash bucket lock.
+<dt>u_int32_t st_hash_max_wait;<dd>The maximum number of times any hash bucket lock was waited for by a
+thread of control.
+<dt>u_int32_t st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining a region lock.
+<dt>u_int32_t st_region_nowait;<dd>The number of times that a thread of control was able to obtain a region
+lock without waiting.
+<dt>u_int32_t st_alloc;<dd>Number of page allocations.
+<dt>u_int32_t st_alloc_buckets;<dd>Number of hash buckets checked during allocation.
+<dt>u_int32_t st_alloc_max_buckets;<dd>Maximum number of hash buckets checked during an allocation.
+<dt>u_int32_t st_alloc_pages;<dd>Number of pages checked during allocation.
+<dt>u_int32_t st_alloc_max_pages;<dd>Maximum number of pages checked during an allocation.
+</dl>
+<p>If <b>fsp</b> is non-NULL, a pointer to a NULL-terminated
+variable length array of statistics for individual files, in the memory
+pool <b>mp</b>, is copied into the memory location to which it refers.
+If no individual files currently exist in the memory pool, <b>fsp</b>
+will be set to NULL.
+<p>The per-file statistics are stored in structures of type DB_MPOOL_FSTAT.
+The following DB_MPOOL_FSTAT fields will be filled in for each file in
+the pool; that is, each element of the array:
+<p><dl compact>
+<dt>char *file_name;<dd>The name of the file.
+<dt>size_t st_pagesize;<dd>Page size in bytes.
+<dt>u_int32_t st_cache_hit;<dd>Requested pages found in the cache.
+<dt>u_int32_t st_cache_miss;<dd>Requested pages not found in the cache.
+<dt>u_int32_t st_map;<dd>Requested pages mapped into the process' address space.
+<dt>u_int32_t st_page_create;<dd>Pages created in the cache.
+<dt>u_int32_t st_page_in;<dd>Pages read into the cache.
+<dt>u_int32_t st_page_out;<dd>Pages written from the cache to the backing file.
+</dl>
+<p>The DB_ENV-&gt;memp_stat method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;memp_stat method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB_ENV-&gt;memp_stat method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;memp_stat method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a>
+<h1>See Also</h1>
+<a href="../api_c/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/memp_sync.html b/libdb/docs/api_c/memp_sync.html
new file mode 100644
index 0000000..135775f
--- /dev/null
+++ b/libdb/docs/api_c/memp_sync.html
@@ -0,0 +1,66 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;memp_sync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;memp_sync</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;memp_sync(DB_ENV *env, DB_LSN *lsn);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;memp_sync method ensures that any modified pages in the pool with
+a log sequence number (<a href="../api_c/lsn_class.html">DB_LSN</a>) less than the <b>lsn</b> argument
+are written to disk. The purpose of the <b>lsn</b> argument is to
+enable a transaction manager to ensure, as part of a checkpoint, that
+all pages modified by a certain time have been written to disk. Pages
+in the pool that cannot be immediately written back to disk (for
+example, pages that are currently in use by another thread of control)
+are waited for and written to disk as soon as it is possible to do so.
+If <b>lsn</b> is NULL, all modified pages in the pool are written
+to disk.
+<p>To support the DB_ENV-&gt;memp_sync functionality, it is necessary that the
+pool functions know the location of the log sequence number on the page
+for each file type. This location should be specified when the file is
+opened using the <a href="../api_c/memp_set_lsn_offset.html">DB_MPOOLFILE-&gt;set_lsn_offset</a> method. It is not required that
+the log sequence number be aligned on the page in any way.
+<p>The DB_ENV-&gt;memp_sync method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;memp_sync method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The DB_ENV-&gt;memp_sync function was called without logging having been
+initialized in the environment.
+</dl>
+<p>The DB_ENV-&gt;memp_sync method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;memp_sync method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a>
+<h1>See Also</h1>
+<a href="../api_c/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/memp_trickle.html b/libdb/docs/api_c/memp_trickle.html
new file mode 100644
index 0000000..e9af067
--- /dev/null
+++ b/libdb/docs/api_c/memp_trickle.html
@@ -0,0 +1,58 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;memp_trickle</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;memp_trickle</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;memp_trickle(DB_ENV *env, int pct, int *nwrotep);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;memp_trickle method ensures that at least <b>pct</b> percent of
+the pages in the shared memory pool are clean by writing dirty pages to
+their backing files.
+If the <b>nwrotep</b> argument is non-NULL, the number of pages that
+were written to reach the correct percentage is returned in the memory
+location to which it refers.
+<p>The purpose of the DB_ENV-&gt;memp_trickle function is to enable a memory
+pool manager to ensure that a page is always available for reading in new
+information without having to wait for a write.
+<p>The DB_ENV-&gt;memp_trickle method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;memp_trickle method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB_ENV-&gt;memp_trickle method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;memp_trickle method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/mempfile_class.html">DB_MPOOLFILE</a>
+<h1>See Also</h1>
+<a href="../api_c/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/mempfile_class.html b/libdb/docs/api_c/mempfile_class.html
new file mode 100644
index 0000000..65b8ec1
--- /dev/null
+++ b/libdb/docs/api_c/mempfile_class.html
@@ -0,0 +1,56 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_MPOOLFILE</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_MPOOLFILE</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+typedef struct __db_mpoolfile DB_MPOOLFILE;
+</pre></h3>
+<h1>Description</h1>
+<p>The memory pool interfaces for the Berkeley DB database environment are
+methods of the <a href="../api_c/env_class.html">DB_ENV</a> handle. The <a href="../api_c/env_class.html">DB_ENV</a> memory pool
+methods and the DB_MPOOLFILE class provide general-purpose,
+page-oriented buffer management of files. Although designed to work
+with the other <a href="../api_c/db_class.html">DB</a> classes, they are also useful for more general
+purposes. The memory pools are referred to in this document as simply
+<i>pools</i>.
+<p>Pools may be shared between processes. Pools are usually filled by
+pages from one or more files. Pages in the pool are replaced in LRU
+(least-recently-used) order, with each new page replacing the page that
+has been unused the longest. Pages retrieved from the pool using
+<a href="../api_c/memp_fget.html">DB_MPOOLFILE-&gt;get</a> are <i>pinned</i> in the pool until they are
+returned to the control of the buffer pool using the <a href="../api_c/memp_fput.html">DB_MPOOLFILE-&gt;put</a>
+method.
+<p>The DB_MPOOLFILE object is the handle for a file in the memory
+pool. The handle is not free-threaded. Once the <a href="../api_c/memp_fclose.html">DB_MPOOLFILE-&gt;close</a> method
+is called, the handle may not be accessed again, regardless of that
+method's return.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, DB_MPOOLFILE
+<h1>See Also</h1>
+<a href="../api_c/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/pindex.src b/libdb/docs/api_c/pindex.src
new file mode 100644
index 0000000..1d50c60
--- /dev/null
+++ b/libdb/docs/api_c/pindex.src
@@ -0,0 +1,434 @@
+__APIREL__/api_c/db_associate.html__OCT__2 @DB-__GT__associate
+__APIREL__/api_c/db_associate.html__OCT__3 @DB_DBT_APPMALLOC
+__APIREL__/api_c/db_associate.html__OCT__4 @DB_DONOTINDEX
+__APIREL__/api_c/db_associate.html__OCT__DB_CREATE DB-__GT__associate@DB_CREATE
+__APIREL__/api_c/db_associate.html__OCT__DB_AUTO_COMMIT DB-__GT__associate@DB_AUTO_COMMIT
+__APIREL__/api_c/db_class.html__OCT__2 @DB
+__APIREL__/api_c/db_close.html__OCT__2 @DB-__GT__close
+__APIREL__/api_c/db_close.html__OCT__DB_NOSYNC DB-__GT__close@DB_NOSYNC
+__APIREL__/api_c/db_cursor.html__OCT__2 @DB-__GT__cursor
+__APIREL__/api_c/db_cursor.html__OCT__DB_DIRTY_READ DB-__GT__cursor@DB_DIRTY_READ
+__APIREL__/api_c/db_cursor.html__OCT__DB_WRITECURSOR DB-__GT__cursor@DB_WRITECURSOR
+__APIREL__/api_c/db_del.html__OCT__2 @DB-__GT__del
+__APIREL__/api_c/db_del.html__OCT__DB_AUTO_COMMIT DB-__GT__del@DB_AUTO_COMMIT
+__APIREL__/api_c/db_err.html__OCT__2 @DB-__GT__err
+__APIREL__/api_c/db_fd.html__OCT__2 @DB-__GT__fd
+__APIREL__/api_c/db_get.html__OCT__2 @DB-__GT__get
+__APIREL__/api_c/db_get.html__OCT__DB_CONSUME DB-__GT__get@DB_CONSUME
+__APIREL__/api_c/db_get.html__OCT__DB_CONSUME_WAIT DB-__GT__get@DB_CONSUME_WAIT
+__APIREL__/api_c/db_get.html__OCT__DB_GET_BOTH DB-__GT__get@DB_GET_BOTH
+__APIREL__/api_c/db_get.html__OCT__DB_SET_RECNO DB-__GT__get@DB_SET_RECNO
+__APIREL__/api_c/db_get.html__OCT__DB_DIRTY_READ DB-__GT__get@DB_DIRTY_READ
+__APIREL__/api_c/db_get.html__OCT__DB_MULTIPLE DB-__GT__get@DB_MULTIPLE
+__APIREL__/api_c/db_get.html__OCT__DB_RMW DB-__GT__get@DB_RMW
+__APIREL__/api_c/db_get_byteswapped.html__OCT__2 @DB-__GT__get_byteswapped
+__APIREL__/api_c/db_get_type.html__OCT__2 @DB-__GT__get_type
+__APIREL__/api_c/db_join.html__OCT__2 @DB-__GT__join
+__APIREL__/api_c/db_join.html__OCT__DB_JOIN_NOSORT DB-__GT__join@DB_JOIN_NOSORT
+__APIREL__/api_c/db_join.html__OCT__DB_JOIN_ITEM DB-__GT__join@DB_JOIN_ITEM
+__APIREL__/api_c/db_join.html__OCT__DB_DIRTY_READ DB-__GT__join@DB_DIRTY_READ
+__APIREL__/api_c/db_join.html__OCT__DB_RMW DB-__GT__join@DB_RMW
+__APIREL__/api_c/db_key_range.html__OCT__2 @DB-__GT__key_range
+__APIREL__/api_c/db_open.html__OCT__2 @DB-__GT__open
+__APIREL__/api_c/db_open.html__OCT__DB_BTREE DB-__GT__open@DB_BTREE
+__APIREL__/api_c/db_open.html__OCT__DB_HASH DB-__GT__open@DB_HASH
+__APIREL__/api_c/db_open.html__OCT__DB_QUEUE DB-__GT__open@DB_QUEUE
+__APIREL__/api_c/db_open.html__OCT__DB_RECNO DB-__GT__open@DB_RECNO
+__APIREL__/api_c/db_open.html__OCT__DB_UNKNOWN DB-__GT__open@DB_UNKNOWN
+__APIREL__/api_c/db_open.html__OCT__DB_AUTO_COMMIT DB-__GT__open@DB_AUTO_COMMIT
+__APIREL__/api_c/db_open.html__OCT__DB_CREATE DB-__GT__open@DB_CREATE
+__APIREL__/api_c/db_open.html__OCT__DB_DIRTY_READ DB-__GT__open@DB_DIRTY_READ
+__APIREL__/api_c/db_open.html__OCT__DB_EXCL DB-__GT__open@DB_EXCL
+__APIREL__/api_c/db_open.html__OCT__DB_NOMMAP DB-__GT__open@DB_NOMMAP
+__APIREL__/api_c/db_open.html__OCT__DB_RDONLY DB-__GT__open@DB_RDONLY
+__APIREL__/api_c/db_open.html__OCT__DB_THREAD DB-__GT__open@DB_THREAD
+__APIREL__/api_c/db_open.html__OCT__DB_TRUNCATE DB-__GT__open@DB_TRUNCATE
+__APIREL__/api_c/db_open.html__OCT__DB_OLD_VERSION DB-__GT__open@DB_OLD_VERSION
+__APIREL__/api_c/db_put.html__OCT__2 @DB-__GT__put
+__APIREL__/api_c/db_put.html__OCT__DB_APPEND DB-__GT__put@DB_APPEND
+__APIREL__/api_c/db_put.html__OCT__DB_NODUPDATA DB-__GT__put@DB_NODUPDATA
+__APIREL__/api_c/db_put.html__OCT__DB_NOOVERWRITE DB-__GT__put@DB_NOOVERWRITE
+__APIREL__/api_c/db_put.html__OCT__DB_AUTO_COMMIT DB-__GT__put@DB_AUTO_COMMIT
+__APIREL__/api_c/db_remove.html__OCT__2 @DB-__GT__remove
+__APIREL__/api_c/db_rename.html__OCT__2 @DB-__GT__rename
+__APIREL__/api_c/db_set_append_recno.html__OCT__2 @DB-__GT__set_append_recno
+__APIREL__/api_c/db_set_bt_compare.html__OCT__2 @DB-__GT__set_bt_compare
+__APIREL__/api_c/db_set_bt_minkey.html__OCT__2 @DB-__GT__set_bt_minkey
+__APIREL__/api_c/db_set_bt_prefix.html__OCT__2 @DB-__GT__set_bt_prefix
+__APIREL__/api_c/db_set_cache_priority.html__OCT__2 @DB-__GT__set_cache_priority
+__APIREL__/api_c/db_set_cache_priority.html__OCT__DB_PRIORITY_VERY_LOW DB-__GT__set_cache_priority@DB_PRIORITY_VERY_LOW
+__APIREL__/api_c/db_set_cache_priority.html__OCT__DB_PRIORITY_LOW DB-__GT__set_cache_priority@DB_PRIORITY_LOW
+__APIREL__/api_c/db_set_cache_priority.html__OCT__DB_PRIORITY_DEFAULT DB-__GT__set_cache_priority@DB_PRIORITY_DEFAULT
+__APIREL__/api_c/db_set_cache_priority.html__OCT__DB_PRIORITY_HIGH DB-__GT__set_cache_priority@DB_PRIORITY_HIGH
+__APIREL__/api_c/db_set_cache_priority.html__OCT__DB_PRIORITY_VERY_HIGH DB-__GT__set_cache_priority@DB_PRIORITY_VERY_HIGH
+__APIREL__/api_c/db_set_cachesize.html__OCT__2 @DB-__GT__set_cachesize
+__APIREL__/api_c/db_set_dup_compare.html__OCT__2 @DB-__GT__set_dup_compare
+__APIREL__/api_c/db_set_encrypt.html__OCT__2 @DB-__GT__set_encrypt
+__APIREL__/api_c/db_set_encrypt.html__OCT__DB_ENCRYPT_AES DB-__GT__set_encrypt@DB_ENCRYPT_AES
+__APIREL__/api_c/db_set_errcall.html__OCT__2 @DB-__GT__set_errcall
+__APIREL__/api_c/db_set_errpfx.html__OCT__2 @DB-__GT__set_errpfx
+__APIREL__/api_c/db_set_feedback.html__OCT__2 @DB-__GT__set_feedback
+__APIREL__/api_c/db_set_feedback.html__OCT__DB_UPGRADE DB-__GT__set_feedback@DB_UPGRADE
+__APIREL__/api_c/db_set_feedback.html__OCT__DB_VERIFY DB-__GT__set_feedback@DB_VERIFY
+__APIREL__/api_c/db_set_flags.html__OCT__2 @DB-__GT__set_flags
+__APIREL__/api_c/db_set_flags.html__OCT__3 database page @checksum
+__APIREL__/api_c/db_set_flags.html__OCT__DB_CHKSUM_SHA1 DB-__GT__set_flags@DB_CHKSUM_SHA1
+__APIREL__/api_c/db_set_flags.html__OCT__4 database @encryption
+__APIREL__/api_c/db_set_flags.html__OCT__DB_ENCRYPT DB-__GT__set_flags@DB_ENCRYPT
+__APIREL__/api_c/db_set_flags.html__OCT__5 @duplicate data items
+__APIREL__/api_c/db_set_flags.html__OCT__DB_DUP DB-__GT__set_flags@DB_DUP
+__APIREL__/api_c/db_set_flags.html__OCT__6 sorted @duplicate data items
+__APIREL__/api_c/db_set_flags.html__OCT__DB_DUPSORT DB-__GT__set_flags@DB_DUPSORT
+__APIREL__/api_c/db_set_flags.html__OCT__7 accessing Btree records by @record number
+__APIREL__/api_c/db_set_flags.html__OCT__DB_RECNUM DB-__GT__set_flags@DB_RECNUM
+__APIREL__/api_c/db_set_flags.html__OCT__8 turn off @reverse splits in Btree databases
+__APIREL__/api_c/db_set_flags.html__OCT__9 turn off reverse @splits in Btree databases
+__APIREL__/api_c/db_set_flags.html__OCT__DB_REVSPLITOFF DB-__GT__set_flags@DB_REVSPLITOFF
+__APIREL__/api_c/db_set_flags.html__OCT__DB_DUP DB-__GT__set_flags@DB_DUP
+__APIREL__/api_c/db_set_flags.html__OCT__DB_DUPSORT DB-__GT__set_flags@DB_DUPSORT
+__APIREL__/api_c/db_set_flags.html__OCT__10 @renumbering records in Recno databases
+__APIREL__/api_c/db_set_flags.html__OCT__DB_RENUMBER DB-__GT__set_flags@DB_RENUMBER
+__APIREL__/api_c/db_set_flags.html__OCT__11 pre-loading @text files into Recno databases
+__APIREL__/api_c/db_set_flags.html__OCT__DB_SNAPSHOT DB-__GT__set_flags@DB_SNAPSHOT
+__APIREL__/api_c/db_set_h_ffactor.html__OCT__2 @DB-__GT__set_h_ffactor
+__APIREL__/api_c/db_set_h_hash.html__OCT__2 @DB-__GT__set_h_hash
+__APIREL__/api_c/db_set_h_nelem.html__OCT__2 @DB-__GT__set_h_nelem
+__APIREL__/api_c/db_set_lorder.html__OCT__2 @DB-__GT__set_lorder
+__APIREL__/api_c/db_set_pagesize.html__OCT__2 @DB-__GT__set_pagesize
+__APIREL__/api_c/db_set_q_extentsize.html__OCT__2 @DB-__GT__set_q_extentsize
+__APIREL__/api_c/db_set_re_delim.html__OCT__2 @DB-__GT__set_re_delim
+__APIREL__/api_c/db_set_re_len.html__OCT__2 @DB-__GT__set_re_len
+__APIREL__/api_c/db_set_re_pad.html__OCT__2 @DB-__GT__set_re_pad
+__APIREL__/api_c/db_set_re_source.html__OCT__2 @DB-__GT__set_re_source
+__APIREL__/api_c/db_stat.html__OCT__2 @DB-__GT__stat
+__APIREL__/api_c/db_stat.html__OCT__DB_FAST_STAT DB-__GT__stat@DB_FAST_STAT
+__APIREL__/api_c/db_sync.html__OCT__2 @DB-__GT__sync
+__APIREL__/api_c/db_truncate.html__OCT__2 @DB-__GT__truncate
+__APIREL__/api_c/db_truncate.html__OCT__DB_AUTO_COMMIT DB-__GT__truncate@DB_AUTO_COMMIT
+__APIREL__/api_c/db_upgrade.html__OCT__2 @DB-__GT__upgrade
+__APIREL__/api_c/db_upgrade.html__OCT__DB_DUPSORT DB-__GT__upgrade@DB_DUPSORT
+__APIREL__/api_c/db_upgrade.html__OCT__DB_OLD_VERSION DB-__GT__upgrade@DB_OLD_VERSION
+__APIREL__/api_c/db_verify.html__OCT__2 @DB-__GT__verify
+__APIREL__/api_c/db_verify.html__OCT__DB_SALVAGE DB-__GT__verify@DB_SALVAGE
+__APIREL__/api_c/db_verify.html__OCT__DB_AGGRESSIVE DB-__GT__verify@DB_AGGRESSIVE
+__APIREL__/api_c/db_verify.html__OCT__DB_PRINTABLE DB-__GT__verify@DB_PRINTABLE
+__APIREL__/api_c/db_verify.html__OCT__DB_NOORDERCHK DB-__GT__verify@DB_NOORDERCHK
+__APIREL__/api_c/db_verify.html__OCT__DB_ORDERCHKONLY DB-__GT__verify@DB_ORDERCHKONLY
+__APIREL__/api_c/db_verify.html__OCT__3 @DB_VERIFY_BAD
+__APIREL__/api_c/dbt_class.html__OCT__2 @DBT
+__APIREL__/api_c/dbt_class.html__OCT__3 @key/data pairs
+__APIREL__/api_c/dbt_class.html__OCT__data DBT@data
+__APIREL__/api_c/dbt_class.html__OCT__size DBT@size
+__APIREL__/api_c/dbt_class.html__OCT__ulen DBT@ulen
+__APIREL__/api_c/dbt_class.html__OCT__dlen DBT@dlen
+__APIREL__/api_c/dbt_class.html__OCT__doff DBT@doff
+__APIREL__/api_c/dbt_class.html__OCT__DB_DBT_MALLOC DBT@DB_DBT_MALLOC
+__APIREL__/api_c/dbt_class.html__OCT__DB_DBT_REALLOC DBT@DB_DBT_REALLOC
+__APIREL__/api_c/dbt_class.html__OCT__DB_DBT_USERMEM DBT@DB_DBT_USERMEM
+__APIREL__/api_c/dbt_class.html__OCT__DB_DBT_PARTIAL DBT@DB_DBT_PARTIAL
+__APIREL__/api_c/db_create.html__OCT__2 @db_create
+__APIREL__/api_c/db_create.html__OCT__DB_XA_CREATE db_create@DB_XA_CREATE
+__APIREL__/api_c/db_set_alloc.html__OCT__2 @DB-__GT__set_alloc
+__APIREL__/api_c/db_set_errfile.html__OCT__2 @DB-__GT__set_errfile
+__APIREL__/api_c/db_set_paniccall.html__OCT__2 @DB-__GT__set_paniccall
+__APIREL__/api_c/dbt_bulk.html__OCT__2 @DBT
+__APIREL__/api_c/dbt_bulk.html__OCT__3 @bulk retrieval
+__APIREL__/api_c/dbt_bulk.html__OCT__DB_MULTIPLE_INIT DBT@DB_MULTIPLE_INIT
+__APIREL__/api_c/dbt_bulk.html__OCT__DB_MULTIPLE_NEXT DBT@DB_MULTIPLE_NEXT
+__APIREL__/api_c/dbt_bulk.html__OCT__DB_MULTIPLE_KEY_NEXT DBT@DB_MULTIPLE_KEY_NEXT
+__APIREL__/api_c/dbt_bulk.html__OCT__DB_MULTIPLE_RECNO_NEXT DBT@DB_MULTIPLE_RECNO_NEXT
+__APIREL__/api_c/dbc_class.html__OCT__2 @DBC
+__APIREL__/api_c/dbc_close.html__OCT__2 @DBcursor-__GT__c_close
+__APIREL__/api_c/dbc_count.html__OCT__2 @DBcursor-__GT__c_count
+__APIREL__/api_c/dbc_del.html__OCT__2 @DBcursor-__GT__c_del
+__APIREL__/api_c/dbc_dup.html__OCT__2 @DBcursor-__GT__c_dup
+__APIREL__/api_c/dbc_dup.html__OCT__DB_POSITION DBcursor-__GT__c_dup@DB_POSITION
+__APIREL__/api_c/dbc_get.html__OCT__2 @DBcursor-__GT__c_get
+__APIREL__/api_c/dbc_get.html__OCT__DB_CURRENT DBcursor-__GT__c_get@DB_CURRENT
+__APIREL__/api_c/dbc_get.html__OCT__DB_FIRST DBcursor-__GT__c_get@DB_FIRST
+__APIREL__/api_c/dbc_get.html__OCT__DB_LAST DBcursor-__GT__c_get@DB_LAST
+__APIREL__/api_c/dbc_get.html__OCT__DB_GET_BOTH DBcursor-__GT__c_get@DB_GET_BOTH
+__APIREL__/api_c/dbc_get.html__OCT__DB_GET_BOTH_RANGE DBcursor-__GT__c_get@DB_GET_BOTH_RANGE
+__APIREL__/api_c/dbc_get.html__OCT__DB_GET_RECNO DBcursor-__GT__c_get@DB_GET_RECNO
+__APIREL__/api_c/dbc_get.html__OCT__DB_JOIN_ITEM DBcursor-__GT__c_get@DB_JOIN_ITEM
+__APIREL__/api_c/dbc_get.html__OCT__DB_NEXT DBcursor-__GT__c_get@DB_NEXT
+__APIREL__/api_c/dbc_get.html__OCT__DB_PREV DBcursor-__GT__c_get@DB_PREV
+__APIREL__/api_c/dbc_get.html__OCT__DB_NEXT_DUP DBcursor-__GT__c_get@DB_NEXT_DUP
+__APIREL__/api_c/dbc_get.html__OCT__DB_NEXT_NODUP DBcursor-__GT__c_get@DB_NEXT_NODUP
+__APIREL__/api_c/dbc_get.html__OCT__DB_PREV_NODUP DBcursor-__GT__c_get@DB_PREV_NODUP
+__APIREL__/api_c/dbc_get.html__OCT__DB_SET DBcursor-__GT__c_get@DB_SET
+__APIREL__/api_c/dbc_get.html__OCT__DB_SET_RANGE DBcursor-__GT__c_get@DB_SET_RANGE
+__APIREL__/api_c/dbc_get.html__OCT__DB_SET_RECNO DBcursor-__GT__c_get@DB_SET_RECNO
+__APIREL__/api_c/dbc_get.html__OCT__DB_DIRTY_READ DBcursor-__GT__c_get@DB_DIRTY_READ
+__APIREL__/api_c/dbc_get.html__OCT__DB_MULTIPLE DBcursor-__GT__c_get@DB_MULTIPLE
+__APIREL__/api_c/dbc_get.html__OCT__DB_MULTIPLE_KEY DBcursor-__GT__c_get@DB_MULTIPLE_KEY
+__APIREL__/api_c/dbc_get.html__OCT__DB_RMW DBcursor-__GT__c_get@DB_RMW
+__APIREL__/api_c/dbc_put.html__OCT__2 @DBcursor-__GT__c_put
+__APIREL__/api_c/dbc_put.html__OCT__DB_AFTER DBcursor-__GT__c_put@DB_AFTER
+__APIREL__/api_c/dbc_put.html__OCT__DB_BEFORE DBcursor-__GT__c_put@DB_BEFORE
+__APIREL__/api_c/dbc_put.html__OCT__DB_CURRENT DBcursor-__GT__c_put@DB_CURRENT
+__APIREL__/api_c/dbc_put.html__OCT__DB_KEYFIRST DBcursor-__GT__c_put@DB_KEYFIRST
+__APIREL__/api_c/dbc_put.html__OCT__DB_KEYLAST DBcursor-__GT__c_put@DB_KEYLAST
+__APIREL__/api_c/dbc_put.html__OCT__DB_NODUPDATA DBcursor-__GT__c_put@DB_NODUPDATA
+__APIREL__/api_c/env_class.html__OCT__2 @DB_ENV
+__APIREL__/api_c/env_close.html__OCT__2 @DB_ENV-__GT__close
+__APIREL__/api_c/env_dbremove.html__OCT__2 @DB_ENV-__GT__dbremove
+__APIREL__/api_c/env_dbremove.html__OCT__DB_AUTO_COMMIT DB_ENV-__GT__dbremove@DB_AUTO_COMMIT
+__APIREL__/api_c/env_dbrename.html__OCT__2 @DB_ENV-__GT__dbrename
+__APIREL__/api_c/env_dbrename.html__OCT__DB_AUTO_COMMIT DB_ENV-__GT__dbrename@DB_AUTO_COMMIT
+__APIREL__/api_c/env_err.html__OCT__2 @DB_ENV-__GT__err
+__APIREL__/api_c/env_open.html__OCT__2 @DB_ENV-__GT__open
+__APIREL__/api_c/env_open.html__OCT__DB_JOINENV DB_ENV-__GT__open@DB_JOINENV
+__APIREL__/api_c/env_open.html__OCT__DB_INIT_CDB DB_ENV-__GT__open@DB_INIT_CDB
+__APIREL__/api_c/env_open.html__OCT__DB_INIT_LOCK DB_ENV-__GT__open@DB_INIT_LOCK
+__APIREL__/api_c/env_open.html__OCT__DB_INIT_LOG DB_ENV-__GT__open@DB_INIT_LOG
+__APIREL__/api_c/env_open.html__OCT__DB_INIT_MPOOL DB_ENV-__GT__open@DB_INIT_MPOOL
+__APIREL__/api_c/env_open.html__OCT__DB_INIT_TXN DB_ENV-__GT__open@DB_INIT_TXN
+__APIREL__/api_c/env_open.html__OCT__DB_RECOVER DB_ENV-__GT__open@DB_RECOVER
+__APIREL__/api_c/env_open.html__OCT__DB_RECOVER_FATAL DB_ENV-__GT__open@DB_RECOVER_FATAL
+__APIREL__/api_c/env_open.html__OCT__3 use @environment variables in naming
+__APIREL__/api_c/env_open.html__OCT__DB_USE_ENVIRON DB_ENV-__GT__open@DB_USE_ENVIRON
+__APIREL__/api_c/env_open.html__OCT__DB_USE_ENVIRON_ROOT DB_ENV-__GT__open@DB_USE_ENVIRON_ROOT
+__APIREL__/api_c/env_open.html__OCT__DB_CREATE DB_ENV-__GT__open@DB_CREATE
+__APIREL__/api_c/env_open.html__OCT__DB_LOCKDOWN DB_ENV-__GT__open@DB_LOCKDOWN
+__APIREL__/api_c/env_open.html__OCT__DB_PRIVATE DB_ENV-__GT__open@DB_PRIVATE
+__APIREL__/api_c/env_open.html__OCT__DB_SYSTEM_MEM DB_ENV-__GT__open@DB_SYSTEM_MEM
+__APIREL__/api_c/env_open.html__OCT__DB_THREAD DB_ENV-__GT__open@DB_THREAD
+__APIREL__/api_c/env_remove.html__OCT__2 @DB_ENV-__GT__remove
+__APIREL__/api_c/env_remove.html__OCT__DB_FORCE DB_ENV-__GT__remove@DB_FORCE
+__APIREL__/api_c/env_remove.html__OCT__3 use @environment variables in naming
+__APIREL__/api_c/env_remove.html__OCT__DB_USE_ENVIRON DB_ENV-__GT__remove@DB_USE_ENVIRON
+__APIREL__/api_c/env_remove.html__OCT__DB_USE_ENVIRON_ROOT DB_ENV-__GT__remove@DB_USE_ENVIRON_ROOT
+__APIREL__/api_c/env_set_app_dispatch.html__OCT__2 @DB_ENV-__GT__set_app_dispatch
+__APIREL__/api_c/env_set_app_dispatch.html__OCT__DB_TXN_BACKWARD_ROLL DB_ENV-__GT__set_app_dispatch@DB_TXN_BACKWARD_ROLL
+__APIREL__/api_c/env_set_app_dispatch.html__OCT__DB_TXN_FORWARD_ROLL DB_ENV-__GT__set_app_dispatch@DB_TXN_FORWARD_ROLL
+__APIREL__/api_c/env_set_app_dispatch.html__OCT__DB_TXN_ABORT DB_ENV-__GT__set_app_dispatch@DB_TXN_ABORT
+__APIREL__/api_c/env_set_app_dispatch.html__OCT__DB_TXN_APPLY DB_ENV-__GT__set_app_dispatch@DB_TXN_APPLY
+__APIREL__/api_c/env_set_app_dispatch.html__OCT__DB_TXN_PRINT DB_ENV-__GT__set_app_dispatch@DB_TXN_PRINT
+__APIREL__/api_c/env_set_cachesize.html__OCT__2 @DB_ENV-__GT__set_cachesize
+__APIREL__/api_c/env_set_data_dir.html__OCT__2 @DB_ENV-__GT__set_data_dir
+__APIREL__/api_c/env_set_encrypt.html__OCT__2 @DB_ENV-__GT__set_encrypt
+__APIREL__/api_c/env_set_encrypt.html__OCT__DB_ENCRYPT_AES DB_ENV-__GT__set_encrypt@DB_ENCRYPT_AES
+__APIREL__/api_c/env_set_errcall.html__OCT__2 @DB_ENV-__GT__set_errcall
+__APIREL__/api_c/env_set_errpfx.html__OCT__2 @DB_ENV-__GT__set_errpfx
+__APIREL__/api_c/env_set_feedback.html__OCT__2 @DB_ENV-__GT__set_feedback
+__APIREL__/api_c/env_set_feedback.html__OCT__DB_RECOVER DB_ENV-__GT__set_feedback@DB_RECOVER
+__APIREL__/api_c/env_set_flags.html__OCT__2 @DB_ENV-__GT__set_flags
+__APIREL__/api_c/env_set_flags.html__OCT__DB_AUTO_COMMIT DB_ENV-__GT__set_flags@DB_AUTO_COMMIT
+__APIREL__/api_c/env_set_flags.html__OCT__3 configure @locking for Berkeley DB Concurrent Data Store
+__APIREL__/api_c/env_set_flags.html__OCT__DB_CDB_ALLDB DB_ENV-__GT__set_flags@DB_CDB_ALLDB
+__APIREL__/api_c/env_set_flags.html__OCT__4 turn off system @buffering for database files
+__APIREL__/api_c/env_set_flags.html__OCT__DB_DIRECT_DB DB_ENV-__GT__set_flags@DB_DIRECT_DB
+__APIREL__/api_c/env_set_flags.html__OCT__5 turn off system @buffering for log files
+__APIREL__/api_c/env_set_flags.html__OCT__DB_DIRECT_LOG DB_ENV-__GT__set_flags@DB_DIRECT_LOG
+__APIREL__/api_c/env_set_flags.html__OCT__6 ignore @locking
+__APIREL__/api_c/env_set_flags.html__OCT__DB_NOLOCKING DB_ENV-__GT__set_flags@DB_NOLOCKING
+__APIREL__/api_c/env_set_flags.html__OCT__7 turn off database file @memory mapping
+__APIREL__/api_c/env_set_flags.html__OCT__DB_NOMMAP DB_ENV-__GT__set_flags@DB_NOMMAP
+__APIREL__/api_c/env_set_flags.html__OCT__8 ignore database environment @panic
+__APIREL__/api_c/env_set_flags.html__OCT__DB_NOPANIC DB_ENV-__GT__set_flags@DB_NOPANIC
+__APIREL__/api_c/env_set_flags.html__OCT__DB_OVERWRITE DB_ENV-__GT__set_flags@DB_OVERWRITE
+__APIREL__/api_c/env_set_flags.html__OCT__9 turn off access to a database @environment
+__APIREL__/api_c/env_set_flags.html__OCT__DB_PANIC_ENVIRONMENT DB_ENV-__GT__set_flags@DB_PANIC_ENVIRONMENT
+__APIREL__/api_c/env_set_flags.html__OCT__10 fault database @environment in during open
+__APIREL__/api_c/env_set_flags.html__OCT__DB_REGION_INIT DB_ENV-__GT__set_flags@DB_REGION_INIT
+__APIREL__/api_c/env_set_flags.html__OCT__11 turn off synchronous @transaction commit
+__APIREL__/api_c/env_set_flags.html__OCT__DB_TXN_NOSYNC DB_ENV-__GT__set_flags@DB_TXN_NOSYNC
+__APIREL__/api_c/env_set_flags.html__OCT__12 turn off synchronous @transaction commit
+__APIREL__/api_c/env_set_flags.html__OCT__DB_TXN_WRITE_NOSYNC DB_ENV-__GT__set_flags@DB_TXN_WRITE_NOSYNC
+__APIREL__/api_c/env_set_flags.html__OCT__13 configure for @stress testing
+__APIREL__/api_c/env_set_flags.html__OCT__DB_YIELDCPU DB_ENV-__GT__set_flags@DB_YIELDCPU
+__APIREL__/api_c/env_set_lg_bsize.html__OCT__2 @DB_ENV-__GT__set_lg_bsize
+__APIREL__/api_c/env_set_lg_dir.html__OCT__2 @DB_ENV-__GT__set_lg_dir
+__APIREL__/api_c/env_set_lg_max.html__OCT__2 @DB_ENV-__GT__set_lg_max
+__APIREL__/api_c/env_set_lg_regionmax.html__OCT__2 @DB_ENV-__GT__set_lg_regionmax
+__APIREL__/api_c/env_set_lk_conflicts.html__OCT__2 @DB_ENV-__GT__set_lk_conflicts
+__APIREL__/api_c/env_set_lk_detect.html__OCT__2 @DB_ENV-__GT__set_lk_detect
+__APIREL__/api_c/env_set_lk_detect.html__OCT__DB_LOCK_DEFAULT DB_ENV-__GT__set_lk_detect@DB_LOCK_DEFAULT
+__APIREL__/api_c/env_set_lk_detect.html__OCT__DB_LOCK_EXPIRE DB_ENV-__GT__set_lk_detect@DB_LOCK_EXPIRE
+__APIREL__/api_c/env_set_lk_detect.html__OCT__DB_LOCK_MAXLOCKS DB_ENV-__GT__set_lk_detect@DB_LOCK_MAXLOCKS
+__APIREL__/api_c/env_set_lk_detect.html__OCT__DB_LOCK_MINLOCKS DB_ENV-__GT__set_lk_detect@DB_LOCK_MINLOCKS
+__APIREL__/api_c/env_set_lk_detect.html__OCT__DB_LOCK_MINWRITE DB_ENV-__GT__set_lk_detect@DB_LOCK_MINWRITE
+__APIREL__/api_c/env_set_lk_detect.html__OCT__DB_LOCK_OLDEST DB_ENV-__GT__set_lk_detect@DB_LOCK_OLDEST
+__APIREL__/api_c/env_set_lk_detect.html__OCT__DB_LOCK_RANDOM DB_ENV-__GT__set_lk_detect@DB_LOCK_RANDOM
+__APIREL__/api_c/env_set_lk_detect.html__OCT__DB_LOCK_YOUNGEST DB_ENV-__GT__set_lk_detect@DB_LOCK_YOUNGEST
+__APIREL__/api_c/env_set_lk_max_lockers.html__OCT__2 @DB_ENV-__GT__set_lk_max_lockers
+__APIREL__/api_c/env_set_lk_max_locks.html__OCT__2 @DB_ENV-__GT__set_lk_max_locks
+__APIREL__/api_c/env_set_lk_max_objects.html__OCT__2 @DB_ENV-__GT__set_lk_max_objects
+__APIREL__/api_c/env_set_mp_mmapsize.html__OCT__2 @DB_ENV-__GT__set_mp_mmapsize
+__APIREL__/api_c/env_set_rpc_server.html__OCT__2 @DB_ENV-__GT__set_rpc_server
+__APIREL__/api_c/env_set_rpc_server.html__OCT__3 @DB_NOSERVER
+__APIREL__/api_c/env_set_rpc_server.html__OCT__4 @DB_NOSERVER_ID
+__APIREL__/api_c/env_set_rpc_server.html__OCT__DB_NOSERVER DB_ENV-__GT__set_rpc_server@DB_NOSERVER
+__APIREL__/api_c/env_set_rpc_server.html__OCT__DB_NOSERVER_ID DB_ENV-__GT__set_rpc_server@DB_NOSERVER_ID
+__APIREL__/api_c/env_set_rpc_server.html__OCT__DB_NOSERVER_HOME DB_ENV-__GT__set_rpc_server@DB_NOSERVER_HOME
+__APIREL__/api_c/env_set_shm_key.html__OCT__2 @DB_ENV-__GT__set_shm_key
+__APIREL__/api_c/env_set_tas_spins.html__OCT__2 @DB_ENV-__GT__set_tas_spins
+__APIREL__/api_c/env_set_timeout.html__OCT__2 @DB_ENV-__GT__set_timeout
+__APIREL__/api_c/env_set_timeout.html__OCT__DB_SET_LOCK_TIMEOUT DB_ENV-__GT__set_timeout@DB_SET_LOCK_TIMEOUT
+__APIREL__/api_c/env_set_timeout.html__OCT__DB_SET_TXN_TIMEOUT DB_ENV-__GT__set_timeout@DB_SET_TXN_TIMEOUT
+__APIREL__/api_c/env_set_tmp_dir.html__OCT__2 @DB_ENV-__GT__set_tmp_dir
+__APIREL__/api_c/env_set_tmp_dir.html__OCT__3 @temporary files
+__APIREL__/api_c/env_set_tx_max.html__OCT__2 @DB_ENV-__GT__set_tx_max
+__APIREL__/api_c/env_set_tx_timestamp.html__OCT__2 @DB_ENV-__GT__set_tx_timestamp
+__APIREL__/api_c/env_set_verbose.html__OCT__2 @DB_ENV-__GT__set_verbose
+__APIREL__/api_c/env_set_verbose.html__OCT__DB_VERB_CHKPOINT DB_ENV-__GT__set_verbose@DB_VERB_CHKPOINT
+__APIREL__/api_c/env_set_verbose.html__OCT__DB_VERB_DEADLOCK DB_ENV-__GT__set_verbose@DB_VERB_DEADLOCK
+__APIREL__/api_c/env_set_verbose.html__OCT__DB_VERB_RECOVERY DB_ENV-__GT__set_verbose@DB_VERB_RECOVERY
+__APIREL__/api_c/env_set_verbose.html__OCT__DB_VERB_REPLICATION DB_ENV-__GT__set_verbose@DB_VERB_REPLICATION
+__APIREL__/api_c/env_set_verbose.html__OCT__DB_VERB_WAITSFOR DB_ENV-__GT__set_verbose@DB_VERB_WAITSFOR
+__APIREL__/api_c/env_strerror.html__OCT__2 @db_strerror
+__APIREL__/api_c/env_version.html__OCT__2 @db_version
+__APIREL__/api_c/env_create.html__OCT__2 @db_env_create
+__APIREL__/api_c/env_create.html__OCT__DB_CLIENT db_env_create@DB_CLIENT
+__APIREL__/api_c/env_set_alloc.html__OCT__2 @DB_ENV-__GT__set_alloc
+__APIREL__/api_c/env_set_errfile.html__OCT__2 @DB_ENV-__GT__set_errfile
+__APIREL__/api_c/env_set_paniccall.html__OCT__2 @DB_ENV-__GT__set_paniccall
+__APIREL__/api_c/dbm.html__OCT__2 @dbm/ndbm
+__APIREL__/api_c/hsearch.html__OCT__2 @hsearch
+__APIREL__/api_c/lock_class.html__OCT__2 @DB_LOCK
+__APIREL__/api_c/lock_detect.html__OCT__2 @DB_ENV-__GT__lock_detect
+__APIREL__/api_c/lock_detect.html__OCT__DB_LOCK_DEFAULT DB_ENV-__GT__lock_detect@DB_LOCK_DEFAULT
+__APIREL__/api_c/lock_detect.html__OCT__DB_LOCK_EXPIRE DB_ENV-__GT__lock_detect@DB_LOCK_EXPIRE
+__APIREL__/api_c/lock_detect.html__OCT__DB_LOCK_MAXLOCKS DB_ENV-__GT__lock_detect@DB_LOCK_MAXLOCKS
+__APIREL__/api_c/lock_detect.html__OCT__DB_LOCK_MINLOCKS DB_ENV-__GT__lock_detect@DB_LOCK_MINLOCKS
+__APIREL__/api_c/lock_detect.html__OCT__DB_LOCK_MINWRITE DB_ENV-__GT__lock_detect@DB_LOCK_MINWRITE
+__APIREL__/api_c/lock_detect.html__OCT__DB_LOCK_OLDEST DB_ENV-__GT__lock_detect@DB_LOCK_OLDEST
+__APIREL__/api_c/lock_detect.html__OCT__DB_LOCK_RANDOM DB_ENV-__GT__lock_detect@DB_LOCK_RANDOM
+__APIREL__/api_c/lock_detect.html__OCT__DB_LOCK_YOUNGEST DB_ENV-__GT__lock_detect@DB_LOCK_YOUNGEST
+__APIREL__/api_c/lock_get.html__OCT__2 @DB_ENV-__GT__lock_get
+__APIREL__/api_c/lock_get.html__OCT__DB_LOCK_NOWAIT DB_ENV-__GT__lock_get@DB_LOCK_NOWAIT
+__APIREL__/api_c/lock_id.html__OCT__2 @DB_ENV-__GT__lock_id
+__APIREL__/api_c/lock_id_free.html__OCT__2 @DB_ENV-__GT__lock_id_free
+__APIREL__/api_c/lock_put.html__OCT__2 @DB_ENV-__GT__lock_put
+__APIREL__/api_c/lock_stat.html__OCT__2 @DB_ENV-__GT__lock_stat
+__APIREL__/api_c/lock_stat.html__OCT__DB_STAT_CLEAR DB_ENV-__GT__lock_stat@DB_STAT_CLEAR
+__APIREL__/api_c/lock_vec.html__OCT__2 @DB_ENV-__GT__lock_vec
+__APIREL__/api_c/lock_vec.html__OCT__DB_LOCK_NOWAIT DB_ENV-__GT__lock_vec@DB_LOCK_NOWAIT
+__APIREL__/api_c/lock_vec.html__OCT__op DB_ENV-__GT__lock_vec@op
+__APIREL__/api_c/lock_vec.html__OCT__DB_LOCK_GET DB_ENV-__GT__lock_vec@DB_LOCK_GET
+__APIREL__/api_c/lock_vec.html__OCT__DB_LOCK_GET_TIMEOUT DB_ENV-__GT__lock_vec@DB_LOCK_GET_TIMEOUT
+__APIREL__/api_c/lock_vec.html__OCT__DB_LOCK_PUT DB_ENV-__GT__lock_vec@DB_LOCK_PUT
+__APIREL__/api_c/lock_vec.html__OCT__DB_LOCK_PUT_ALL DB_ENV-__GT__lock_vec@DB_LOCK_PUT_ALL
+__APIREL__/api_c/lock_vec.html__OCT__DB_LOCK_PUT_OBJ DB_ENV-__GT__lock_vec@DB_LOCK_PUT_OBJ
+__APIREL__/api_c/lock_vec.html__OCT__DB_LOCK_TIMEOUT DB_ENV-__GT__lock_vec@DB_LOCK_TIMEOUT
+__APIREL__/api_c/lock_vec.html__OCT__lock DB_ENV-__GT__lock_vec@lock
+__APIREL__/api_c/lock_vec.html__OCT__mode DB_ENV-__GT__lock_vec@mode
+__APIREL__/api_c/lock_vec.html__OCT__DB_LOCK_READ DB_ENV-__GT__lock_vec@DB_LOCK_READ
+__APIREL__/api_c/lock_vec.html__OCT__DB_LOCK_WRITE DB_ENV-__GT__lock_vec@DB_LOCK_WRITE
+__APIREL__/api_c/lock_vec.html__OCT__DB_LOCK_IWRITE DB_ENV-__GT__lock_vec@DB_LOCK_IWRITE
+__APIREL__/api_c/lock_vec.html__OCT__DB_LOCK_IREAD DB_ENV-__GT__lock_vec@DB_LOCK_IREAD
+__APIREL__/api_c/lock_vec.html__OCT__DB_LOCK_IWR DB_ENV-__GT__lock_vec@DB_LOCK_IWR
+__APIREL__/api_c/lock_vec.html__OCT__obj DB_ENV-__GT__lock_vec@obj
+__APIREL__/api_c/log_archive.html__OCT__2 @DB_ENV-__GT__log_archive
+__APIREL__/api_c/log_archive.html__OCT__DB_ARCH_ABS DB_ENV-__GT__log_archive@DB_ARCH_ABS
+__APIREL__/api_c/log_archive.html__OCT__DB_ARCH_DATA DB_ENV-__GT__log_archive@DB_ARCH_DATA
+__APIREL__/api_c/log_archive.html__OCT__DB_ARCH_LOG DB_ENV-__GT__log_archive@DB_ARCH_LOG
+__APIREL__/api_c/log_compare.html__OCT__2 @log_compare
+__APIREL__/api_c/log_cursor.html__OCT__2 @DB_ENV-__GT__log_cursor
+__APIREL__/api_c/log_file.html__OCT__2 @DB_ENV-__GT__log_file
+__APIREL__/api_c/log_flush.html__OCT__2 @DB_ENV-__GT__log_flush
+__APIREL__/api_c/log_put.html__OCT__2 @DB_ENV-__GT__log_put
+__APIREL__/api_c/log_put.html__OCT__DB_FLUSH DB_ENV-__GT__log_put@DB_FLUSH
+__APIREL__/api_c/log_stat.html__OCT__2 @DB_ENV-__GT__log_stat
+__APIREL__/api_c/log_stat.html__OCT__DB_STAT_CLEAR DB_ENV-__GT__log_stat@DB_STAT_CLEAR
+__APIREL__/api_c/logc_class.html__OCT__2 @DB_LOGC
+__APIREL__/api_c/logc_close.html__OCT__2 @DB_LOGC-__GT__close
+__APIREL__/api_c/logc_get.html__OCT__2 @DB_LOGC-__GT__get
+__APIREL__/api_c/logc_get.html__OCT__DB_FIRST DB_LOGC-__GT__get@DB_FIRST
+__APIREL__/api_c/logc_get.html__OCT__DB_LAST DB_LOGC-__GT__get@DB_LAST
+__APIREL__/api_c/logc_get.html__OCT__DB_NEXT DB_LOGC-__GT__get@DB_NEXT
+__APIREL__/api_c/logc_get.html__OCT__DB_PREV DB_LOGC-__GT__get@DB_PREV
+__APIREL__/api_c/logc_get.html__OCT__DB_CURRENT DB_LOGC-__GT__get@DB_CURRENT
+__APIREL__/api_c/logc_get.html__OCT__DB_SET DB_LOGC-__GT__get@DB_SET
+__APIREL__/api_c/lsn_class.html__OCT__2 @DB_LSN
+__APIREL__/api_c/memp_fclose.html__OCT__2 @DB_MPOOLFILE-__GT__close
+__APIREL__/api_c/memp_fopen.html__OCT__2 @DB_MPOOLFILE-__GT__open
+__APIREL__/api_c/memp_fopen.html__OCT__DB_CREATE DB_MPOOLFILE-__GT__open@DB_CREATE
+__APIREL__/api_c/memp_fopen.html__OCT__3 turn off system @buffering
+__APIREL__/api_c/memp_fopen.html__OCT__DB_DIRECT DB_MPOOLFILE-__GT__open@DB_DIRECT
+__APIREL__/api_c/memp_fopen.html__OCT__DB_NOMMAP DB_MPOOLFILE-__GT__open@DB_NOMMAP
+__APIREL__/api_c/memp_fopen.html__OCT__DB_ODDFILESIZE DB_MPOOLFILE-__GT__open@DB_ODDFILESIZE
+__APIREL__/api_c/memp_fopen.html__OCT__DB_RDONLY DB_MPOOLFILE-__GT__open@DB_RDONLY
+__APIREL__/api_c/memp_fsync.html__OCT__2 @DB_MPOOLFILE-__GT__sync
+__APIREL__/api_c/memp_register.html__OCT__2 @DB_ENV-__GT__memp_register
+__APIREL__/api_c/memp_stat.html__OCT__2 @DB_ENV-__GT__memp_stat
+__APIREL__/api_c/memp_stat.html__OCT__DB_STAT_CLEAR DB_ENV-__GT__memp_stat@DB_STAT_CLEAR
+__APIREL__/api_c/memp_sync.html__OCT__2 @DB_ENV-__GT__memp_sync
+__APIREL__/api_c/memp_trickle.html__OCT__2 @DB_ENV-__GT__memp_trickle
+__APIREL__/api_c/mempfile_class.html__OCT__2 @DB_MPOOLFILE
+__APIREL__/api_c/memp_fcreate.html__OCT__2 @DB_ENV-__GT__memp_fcreate
+__APIREL__/api_c/memp_fget.html__OCT__2 @DB_MPOOLFILE-__GT__get
+__APIREL__/api_c/memp_fget.html__OCT__DB_MPOOL_CREATE DB_MPOOLFILE-__GT__get@DB_MPOOL_CREATE
+__APIREL__/api_c/memp_fget.html__OCT__DB_MPOOL_LAST DB_MPOOLFILE-__GT__get@DB_MPOOL_LAST
+__APIREL__/api_c/memp_fget.html__OCT__DB_MPOOL_NEW DB_MPOOLFILE-__GT__get@DB_MPOOL_NEW
+__APIREL__/api_c/memp_fget.html__OCT__3 @DB_PAGE_NOTFOUND
+__APIREL__/api_c/memp_fput.html__OCT__2 @DB_MPOOLFILE-__GT__put
+__APIREL__/api_c/memp_fput.html__OCT__DB_MPOOL_CLEAN DB_MPOOLFILE-__GT__put@DB_MPOOL_CLEAN
+__APIREL__/api_c/memp_fput.html__OCT__DB_MPOOL_DIRTY DB_MPOOLFILE-__GT__put@DB_MPOOL_DIRTY
+__APIREL__/api_c/memp_fput.html__OCT__DB_MPOOL_DISCARD DB_MPOOLFILE-__GT__put@DB_MPOOL_DISCARD
+__APIREL__/api_c/memp_fset.html__OCT__2 @DB_MPOOLFILE-__GT__set
+__APIREL__/api_c/memp_fset.html__OCT__DB_MPOOL_CLEAN DB_MPOOLFILE-__GT__set@DB_MPOOL_CLEAN
+__APIREL__/api_c/memp_fset.html__OCT__DB_MPOOL_DIRTY DB_MPOOLFILE-__GT__set@DB_MPOOL_DIRTY
+__APIREL__/api_c/memp_fset.html__OCT__DB_MPOOL_DISCARD DB_MPOOLFILE-__GT__set@DB_MPOOL_DISCARD
+__APIREL__/api_c/memp_set_clear_len.html__OCT__2 @DB_MPOOLFILE-__GT__set_clear_len
+__APIREL__/api_c/memp_set_fileid.html__OCT__2 @DB_MPOOLFILE-__GT__set_fileid
+__APIREL__/api_c/memp_set_ftype.html__OCT__2 @DB_MPOOLFILE-__GT__set_ftype
+__APIREL__/api_c/memp_set_lsn_offset.html__OCT__2 @DB_MPOOLFILE-__GT__set_lsn_offset
+__APIREL__/api_c/memp_set_pgcookie.html__OCT__2 @DB_MPOOLFILE-__GT__set_pgcookie
+__APIREL__/api_c/rep_elect.html__OCT__2 @DB_ENV-__GT__rep_elect
+__APIREL__/api_c/rep_elect.html__OCT__3 @DB_REP_UNAVAIL
+__APIREL__/api_c/rep_limit.html__OCT__2 @DB_ENV-__GT__set_rep_limit
+__APIREL__/api_c/rep_message.html__OCT__2 @DB_ENV-__GT__rep_process_message
+__APIREL__/api_c/rep_start.html__OCT__2 @DB_ENV-__GT__rep_start
+__APIREL__/api_c/rep_start.html__OCT__DB_REP_CLIENT DB_ENV-__GT__rep_start@DB_REP_CLIENT
+__APIREL__/api_c/rep_start.html__OCT__DB_REP_LOGSONLY DB_ENV-__GT__rep_start@DB_REP_LOGSONLY
+__APIREL__/api_c/rep_start.html__OCT__DB_REP_MASTER DB_ENV-__GT__rep_start@DB_REP_MASTER
+__APIREL__/api_c/rep_stat.html__OCT__2 @DB_ENV-__GT__rep_stat
+__APIREL__/api_c/rep_stat.html__OCT__DB_STAT_CLEAR DB_ENV-__GT__rep_stat@DB_STAT_CLEAR
+__APIREL__/api_c/rep_transport.html__OCT__2 @DB_ENV-__GT__set_rep_transport
+__APIREL__/api_c/rep_transport.html__OCT__3 @DB_EID_BROADCAST
+__APIREL__/api_c/rep_transport.html__OCT__DB_REP_PERMANENT DB_ENV-__GT__set_rep_transport@DB_REP_PERMANENT
+__APIREL__/api_c/set_func_close.html__OCT__2 @db_env_set_func_close
+__APIREL__/api_c/set_func_dirfree.html__OCT__2 @db_env_set_func_dirfree
+__APIREL__/api_c/set_func_dirlist.html__OCT__2 @db_env_set_func_dirlist
+__APIREL__/api_c/set_func_exists.html__OCT__2 @db_env_set_func_exists
+__APIREL__/api_c/set_func_free.html__OCT__2 @db_env_set_func_free
+__APIREL__/api_c/set_func_fsync.html__OCT__2 @db_env_set_func_fsync
+__APIREL__/api_c/set_func_ioinfo.html__OCT__2 @db_env_set_func_ioinfo
+__APIREL__/api_c/set_func_malloc.html__OCT__2 @db_env_set_func_malloc
+__APIREL__/api_c/set_func_map.html__OCT__2 @db_env_set_func_map
+__APIREL__/api_c/set_func_open.html__OCT__2 @db_env_set_func_open
+__APIREL__/api_c/set_func_read.html__OCT__2 @db_env_set_func_read
+__APIREL__/api_c/set_func_realloc.html__OCT__2 @db_env_set_func_realloc
+__APIREL__/api_c/set_func_rename.html__OCT__2 @db_env_set_func_rename
+__APIREL__/api_c/set_func_seek.html__OCT__2 @db_env_set_func_seek
+__APIREL__/api_c/set_func_sleep.html__OCT__2 @db_env_set_func_sleep
+__APIREL__/api_c/set_func_unlink.html__OCT__2 @db_env_set_func_unlink
+__APIREL__/api_c/set_func_unmap.html__OCT__2 @db_env_set_func_unmap
+__APIREL__/api_c/set_func_write.html__OCT__2 @db_env_set_func_write
+__APIREL__/api_c/set_func_yield.html__OCT__2 @db_env_set_func_yield
+__APIREL__/api_c/txn_abort.html__OCT__2 @DB_TXN-__GT__abort
+__APIREL__/api_c/txn_begin.html__OCT__2 @DB_ENV-__GT__txn_begin
+__APIREL__/api_c/txn_begin.html__OCT__DB_DIRTY_READ DB_ENV-__GT__txn_begin@DB_DIRTY_READ
+__APIREL__/api_c/txn_begin.html__OCT__DB_TXN_NOSYNC DB_ENV-__GT__txn_begin@DB_TXN_NOSYNC
+__APIREL__/api_c/txn_begin.html__OCT__DB_TXN_NOWAIT DB_ENV-__GT__txn_begin@DB_TXN_NOWAIT
+__APIREL__/api_c/txn_begin.html__OCT__DB_TXN_SYNC DB_ENV-__GT__txn_begin@DB_TXN_SYNC
+__APIREL__/api_c/txn_checkpoint.html__OCT__2 @DB_ENV-__GT__txn_checkpoint
+__APIREL__/api_c/txn_checkpoint.html__OCT__DB_FORCE DB_ENV-__GT__txn_checkpoint@DB_FORCE
+__APIREL__/api_c/txn_class.html__OCT__2 @DB_TXN
+__APIREL__/api_c/txn_commit.html__OCT__2 @DB_TXN-__GT__commit
+__APIREL__/api_c/txn_commit.html__OCT__DB_TXN_NOSYNC DB_TXN-__GT__commit@DB_TXN_NOSYNC
+__APIREL__/api_c/txn_commit.html__OCT__DB_TXN_SYNC DB_TXN-__GT__commit@DB_TXN_SYNC
+__APIREL__/api_c/txn_discard.html__OCT__2 @DB_TXN-__GT__discard
+__APIREL__/api_c/txn_id.html__OCT__2 @DB_TXN-__GT__id
+__APIREL__/api_c/txn_prepare.html__OCT__2 @DB_TXN-__GT__prepare
+__APIREL__/api_c/txn_prepare.html__OCT__3 @DB_XIDDATASIZE
+__APIREL__/api_c/txn_recover.html__OCT__2 @DB_ENV-__GT__txn_recover
+__APIREL__/api_c/txn_recover.html__OCT__DB_FIRST DB_ENV-__GT__txn_recover@DB_FIRST
+__APIREL__/api_c/txn_recover.html__OCT__DB_NEXT DB_ENV-__GT__txn_recover@DB_NEXT
+__APIREL__/api_c/txn_set_timeout.html__OCT__2 @DB_TXN-__GT__set_timeout
+__APIREL__/api_c/txn_set_timeout.html__OCT__DB_SET_LOCK_TIMEOUT DB_TXN-__GT__set_timeout@DB_SET_LOCK_TIMEOUT
+__APIREL__/api_c/txn_set_timeout.html__OCT__DB_SET_TXN_TIMEOUT DB_TXN-__GT__set_timeout@DB_SET_TXN_TIMEOUT
+__APIREL__/api_c/txn_stat.html__OCT__2 @DB_ENV-__GT__txn_stat
+__APIREL__/api_c/txn_stat.html__OCT__DB_STAT_CLEAR DB_ENV-__GT__txn_stat@DB_STAT_CLEAR
diff --git a/libdb/docs/api_c/rep_elect.html b/libdb/docs/api_c/rep_elect.html
new file mode 100644
index 0000000..9ec85ce
--- /dev/null
+++ b/libdb/docs/api_c/rep_elect.html
@@ -0,0 +1,81 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;rep_elect</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;rep_elect</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;rep_elect(DB_ENV *env, int nsites,
+ int priority, u_int32_t timeout, int *envid);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;rep_elect method holds an election for the master of a replication
+group, returning the new master's ID in the <b>envid</b> parameter.
+<p>The <b>nsites</b> parameter indicates the number of environments that
+the application believes are in the replication group. This number is
+used by Berkeley DB to avoid having two masters active simultaneously, even
+in the case of a network partition. During an election, a new master
+cannot be elected unless more than half of <b>nsites</b> agree on
+the new master. Thus, in the face of a network partition, the side of
+the partition with more than half the environments will elect a new
+master and continue, while the environments communicating with fewer
+than half the other environments will fail to find a new master.
+<p>The <b>priority</b> parameter is the priority of this environment. It
+must be a positive integer, or 0 if this environment is not permitted
+to become a master (see <a href="../ref/rep/pri.html">Replication
+environment priorities</a> for more information).
+<a name="3"><!--meow--></a>
+<p>The <b>timeout</b> parameter specifies a timeout period for an
+election. If the election has not completed after <b>timeout</b>
+microseconds, the thread will return DB_REP_UNAVAIL.
+<p>The DB_ENV-&gt;rep_elect method either returns successfully, with the new
+master's environment ID in the memory pointed to by the <b>envid</b>
+parameter, or it will return DB_REP_UNAVAIL if the participating
+group members were unable to elect a new master for any reason. In the
+event of a successful return, the new master's ID may be the ID of the
+previous master, or the ID of the current environment. The application
+is responsible for adjusting its usage of the other environments in the
+replication group, including directing all database updates to the newly
+selected master, in accordance with the results of this election.
+<p>The thread of control that calls the DB_ENV-&gt;rep_elect method must not be the
+thread of control that processes incoming messages; processing the
+incoming messages is necessary to successfully complete an election.
+<p>The DB_ENV-&gt;rep_elect method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;rep_elect method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_REP_UNAVAIL<dd>The replication group was unable to elect a master.
+</dl>
+<p>The DB_ENV-&gt;rep_elect method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;rep_elect method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/rep_list.html">Replication and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/rep_limit.html b/libdb/docs/api_c/rep_limit.html
new file mode 100644
index 0000000..8af0ddd
--- /dev/null
+++ b/libdb/docs/api_c/rep_limit.html
@@ -0,0 +1,55 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_rep_limit</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_rep_limit</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_rep_limit(DB_ENV *env, u_int32_t gbytes, u_int32_t bytes);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;set_rep_limit method imposes a limit on the amount of data that will
+be transmitted from a site during the course of a single call to
+<a href="../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> method.
+<p>The <b>gbytes</b> and <b>bytes</b> parameters together represent the
+maximum number of bytes that can be sent during a single call to
+<a href="../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> method.
+<p>The DB_ENV-&gt;set_rep_limit method configures a database environment, not only operations
+performed using the specified <a href="../api_c/env_class.html">DB_ENV</a> handle.
+<p>The DB_ENV-&gt;set_rep_limit interface may be called at any time during the life of
+the application.
+<p>The DB_ENV-&gt;set_rep_limit method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_rep_limit method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_rep_limit method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/rep_list.html">Replication and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/rep_list.html b/libdb/docs/api_c/rep_list.html
new file mode 100644
index 0000000..4c3c4f9
--- /dev/null
+++ b/libdb/docs/api_c/rep_list.html
@@ -0,0 +1,25 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Replication and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Replication and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Replication and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a></td><td>Configure replication transport</td></tr>
+<tr><td><a href="../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a></td><td>Hold a replication election</td></tr>
+<tr><td><a href="../api_c/rep_limit.html">DB_ENV-&gt;set_rep_limit</a></td><td>Limit data sent in response to a single message</td></tr>
+<tr><td><a href="../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a></td><td>Process a replication message</td></tr>
+<tr><td><a href="../api_c/rep_start.html">DB_ENV-&gt;rep_start</a></td><td>Configure an environment for replication</td></tr>
+<tr><td><a href="../api_c/rep_stat.html">DB_ENV-&gt;rep_stat</a></td><td>Replication statistics</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/rep_message.html b/libdb/docs/api_c/rep_message.html
new file mode 100644
index 0000000..1e7f270
--- /dev/null
+++ b/libdb/docs/api_c/rep_message.html
@@ -0,0 +1,87 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;rep_process_message</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;rep_process_message</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;rep_process_message(DB_ENV *env,
+ DBT *control, DBT *rec, int *envid)
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;rep_process_message method processes an incoming replication
+message sent by a member of the replication group to the local database
+environment.
+<p>The <b>rec</b> and <b>control</b> parameters should reference a copy
+of the parameters specified by Berkeley DB for the <b>rec</b> and
+<b>control</b> parameters on the sending environment.
+<p>The <b>envid</b> parameter should contain the local identifier that
+corresponds to the environment that sent the message to be processed
+(see <a href="../ref/rep/id.html">Replication environment IDs</a> for more
+information).
+<p>For implementation reasons, all incoming replication messages must be
+processed using the same <a href="../api_c/env_class.html">DB_ENV</a> handle. It is not required that
+a single thread of control process all messages, only that all threads
+of control processing messages use the same handle.
+<p>
+If a new master has been elected, the DB_ENV-&gt;rep_process_message method will return DB_REP_NEWMASTER.
+The <b>envid</b> parameter contains the environment ID of the new
+master. If the recipient of this error return has been made master, it
+is the application's responsibility to begin acting as the master
+environment.
+<p>
+If the system received contact information from a new environment, the DB_ENV-&gt;rep_process_message method will return DB_REP_NEWSITE.
+The <b>rec</b> parameter contains the opaque data specified in the
+<b>cdata</b> parameter to the <a href="../api_c/rep_start.html">DB_ENV-&gt;rep_start</a>. The application
+should take whatever action is needed to establish a communication
+channel with this new environment.
+<p>
+If the replication group has more than one master, the DB_ENV-&gt;rep_process_message method will return DB_REP_DUPMASTER.
+The application should reconfigure itself as a client by calling the
+<a href="../api_c/rep_start.html">DB_ENV-&gt;rep_start</a> method, and then call for an election by calling
+<a href="../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a>.
+<p>
+If an election is needed, the DB_ENV-&gt;rep_process_message method will return DB_REP_HOLDELECTION.
+The application should call for an election by
+calling <a href="../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a>.
+<p>
+If the current environment's logs are too far out of date with respect
+to the master to be automatically synchronized, the DB_ENV-&gt;rep_process_message method will return DB_REP_OUTDATED. The
+application should copy over a hot backup of the environment, run
+recovery, and restart the client.
+<p>
+Otherwise, the DB_ENV-&gt;rep_process_message method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;rep_process_message method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;rep_process_message method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/rep_list.html">Replication and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/rep_start.html b/libdb/docs/api_c/rep_start.html
new file mode 100644
index 0000000..05eed91
--- /dev/null
+++ b/libdb/docs/api_c/rep_start.html
@@ -0,0 +1,73 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;rep_start</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;rep_start</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;rep_start(DB_ENV *env, DBT *cdata, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;rep_start method configures the database environment as a client
+or master in a group of replicated database environments. Replication
+master environments are the only database environments where replicated
+databases may be modified. Replication client environments are
+read-only as long as they are clients. Replication client environments
+may be upgraded to be replication master environments in the case that
+the current master fails or there is no master present.
+<p>The enclosing database environment must already have been opened by
+calling the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> method and must already have been configured
+to send replication messages by calling the <a href="../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a> method.
+<p>The <b>cdata</b> parameter is an opaque data item that is sent over
+the communication infrastructure when the client or master comes online
+(see <a href="../ref/rep/newsite.html">Connecting to a new site</a> for
+more information). If no such information is useful, <b>cdata</b>
+should be NULL.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_REP_CLIENT">DB_REP_CLIENT</a><dd>Configure the environment as a replication client.
+<p><dt><a name="DB_REP_LOGSONLY">DB_REP_LOGSONLY</a><dd>Configure the environment as a log files-only client.
+<p><dt><a name="DB_REP_MASTER">DB_REP_MASTER</a><dd>Configure the environment as a replication master.
+</dl>
+<p>The DB_ENV-&gt;rep_start method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;rep_start method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The environment was not already configured to communicate with a
+replication group by a call to <a href="../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a>.
+<p>The environment was not already opened.
+</dl>
+<p>The DB_ENV-&gt;rep_start method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;rep_start method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/rep_list.html">Replication and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/rep_stat.html b/libdb/docs/api_c/rep_stat.html
new file mode 100644
index 0000000..4671cbc
--- /dev/null
+++ b/libdb/docs/api_c/rep_stat.html
@@ -0,0 +1,106 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;rep_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;rep_stat</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;rep_stat(DB_ENV *env, DB_REP_STAT **statp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;rep_stat method returns the replication subsystem statistics.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_STAT_CLEAR">DB_STAT_CLEAR</a><dd>Reset statistics after returning their values.
+</dl>
+<p>The DB_ENV-&gt;rep_stat method creates a statistical structure of type
+DB_REP_STAT and copies a pointer to it into a user-specified memory
+location.
+<p>Statistical structures are created in allocated memory. If application-specific allocation
+routines have been declared (see <a href="../api_c/env_set_alloc.html">DB_ENV-&gt;set_alloc</a> for more
+information), they are used to allocate the memory; otherwise, the
+library <b>malloc</b>(3) interface is used. The caller is
+responsible for deallocating the memory. To deallocate the memory, free
+the memory reference; references inside the returned memory need not be
+individually freed.
+<p>The following DB_REP_STAT fields will be filled in:
+<p><dl compact>
+<dt>u_int32_t st_stat;<dd>The current replication mode. Set to <a href="../api_c/rep_start.html#DB_REP_MASTER">DB_REP_MASTER</a> if the
+environment is a replication master, <a href="../api_c/rep_start.html#DB_REP_CLIENT">DB_REP_CLIENT</a> if the
+environment is a replication client, <a href="../api_c/rep_start.html#DB_REP_LOGSONLY">DB_REP_LOGSONLY</a> if the
+environment is a log-files-only replica, or 0 if replication is not
+configured.
+<dt>DB_LSN st_next_lsn;<dd>In replication environments configured as masters, the next LSN expected.
+In replication environments configured as clients, the next LSN to be used.
+<dt>DB_LSN st_waiting_lsn;<dd>The LSN of the first missed log record being waited for, or 0 if no log
+records are currently missing.
+<dt>u_int32_t st_dupmasters;<dd>The number of duplicate master conditions detected.
+<dt>u_int32_t st_env_id;<dd>The current environment ID.
+<dt>u_int32_t st_env_priority;<dd>The current environment priority.
+<dt>u_int32_t st_gen;<dd>The current generation number.
+<dt>u_int32_t st_log_duplicated;<dd>The number of duplicate log records received.
+<dt>u_int32_t st_log_queued;<dd>The number of log records currently queued.
+<dt>u_int32_t st_log_queued_max;<dd>The maximum number of log records ever queued at once.
+<dt>u_int32_t st_log_queued_total;<dd>The total number of log records queued.
+<dt>u_int32_t st_log_records;<dd>The number of log records received and appended to the log.
+<dt>u_int32_t st_log_requested;<dd>The number of log records missed and requested.
+<dt>u_int32_t st_master;<dd>The current master environment ID.
+<dt>u_int32_t st_master_changes;<dd>The number of times the master has changed.
+<dt>u_int32_t st_msgs_badgen;<dd>The number of messages received with a bad generation number.
+<dt>u_int32_t st_msgs_processed;<dd>The number of messages received and processed.
+<dt>u_int32_t st_msgs_recover;<dd>The number of messages ignored due to pending recovery.
+<dt>u_int32_t st_msgs_send_failures;<dd>The number of failed message sends.
+<dt>u_int32_t st_msgs_sent;<dd>The number of messages sent.
+<dt>u_int32_t st_newsites;<dd>The number of new site messages received.
+<dt>u_int32_t st_outdated;<dd>The number of outdated conditions detected.
+<dt>u_int32_t st_txns_applied;<dd>The number of transactions applied.
+<dt>u_int32_t st_elections;<dd>The number of elections held.
+<dt>u_int32_t st_elections_won;<dd>The number of elections won.
+<dt>u_int32_t st_election_status;<dd>The current election phase (0 if no election is in progress).
+<dt>u_int32_t st_election_cur_winner;<dd>The election winner.
+<dt>u_int32_t st_election_gen;<dd>The election generation number.
+<dt>DB_LSN st_election_lsn;<dd>The maximum LSN of election winner.
+<dt>u_int32_t st_election_nsites;<dd>The number sites expected to participate in elections.
+<dt>u_int32_t st_nthrottles;<dd>Transmission limited. This indicates the number of times that data
+transmission was stopped to limit the amount of data sent in response
+to a single call to <a href="../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a>.
+<dt>u_int32_t st_election_priority;<dd>The election priority.
+<dt>u_int32_t st_election_tiebreaker;<dd>The election tiebreaker value.
+<dt>u_int32_t st_election_votes;<dd>The votes received this election round.
+</dl>
+<p>The DB_ENV-&gt;rep_stat method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;rep_stat method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;rep_stat method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/rep_list.html">Replication and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/rep_transport.html b/libdb/docs/api_c/rep_transport.html
new file mode 100644
index 0000000..e575b46
--- /dev/null
+++ b/libdb/docs/api_c/rep_transport.html
@@ -0,0 +1,95 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_rep_transport</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_rep_transport</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_rep_transport(DB_ENV *env, int envid,
+ int (*send)(DB_ENV *dbenv,
+ const DBT *control, const DBT *rec, int envid, u_int32_t flags));
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;set_rep_transport method initializes the communication infrastructure
+for a database environment participating in a replicated application.
+<p>The <b>envid</b> parameter is the local environment's ID. It must be
+a positive integer and uniquely identify this Berkeley DB database environment
+(see <a href="../ref/rep/id.html">Replication environment IDs</a> for more
+information).
+<p>The <b>send</b> parameter is a callback interface used to transmit data
+using the replication application's communication infrastructure. The
+parameters to <b>send</b> are as follows:
+<p><dl compact>
+<p><dt>dbenv<dd>The enclosing database environment.
+<p><dt>control<dd>The control parameter is the first of the two data elements to be
+transmitted by the <b>send</b> interface.
+<p><dt>rec<dd>The rec parameter is the second of the two data elements to be
+transmitted by the <b>send</b> interface.
+<p><dt>envid<dd>The <b>envid</b> parameter is a positive integer identifier that
+specifies the replication environment to which the message should be
+sent (see <a href="../ref/rep/id.html">Replication environment IDs</a> for
+more information).
+<p><a name="3"><!--meow--></a>
+The special identifier DB_EID_BROADCAST indicates that a message
+should be broadcast to every environment in the replication group. The
+application may use a true broadcast protocol, or may send the message
+in sequence to each machine with which it is in communication.
+<p><dt>flags<dd>
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_REP_PERMANENT">DB_REP_PERMANENT</a><dd>The record being sent is critical for maintaining database integrity
+(for example, the message includes a transaction commit). The
+application should take appropriate action to enforce the reliability
+guarantees it has chosen, such as waiting for acknowledgement from one
+or more clients.
+</dl>
+</dl>
+<p>The <b>send</b> interface must return 0 on success and non-zero on
+failure. If the <b>send</b> interface fails, the message being sent
+is necessary to maintain database integrity, and the local log is not
+configured for synchronous flushing, the local log will be flushed;
+otherwise, any error from the <b>send</b> interface will be ignored.
+<p>It may sometimes be useful to pass application-specific data to the
+<b>send</b> interface; see <a href="../ref/env/faq.html">Environment
+FAQ</a> for a discussion on how to do this.
+<p>The DB_ENV-&gt;set_rep_transport method configures operations performed using the specified
+<a href="../api_c/env_class.html">DB_ENV</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DB_ENV-&gt;set_rep_transport interface may be called at any time during the life of
+the application.
+<p>The DB_ENV-&gt;set_rep_transport method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_rep_transport method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_rep_transport method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>
+<h1>See Also</h1>
+<a href="../api_c/rep_list.html">Replication and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/set_func_close.html b/libdb/docs/api_c/set_func_close.html
new file mode 100644
index 0000000..9e9d283
--- /dev/null
+++ b/libdb/docs/api_c/set_func_close.html
@@ -0,0 +1,55 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_env_set_func_close</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_close(int (*func_close)(int fd));
+</pre></h3>
+<h1>Description</h1>
+<p>Replace Berkeley DB calls to the IEEE/ANSI Std 1003.1 (POSIX) <b>close</b> interface
+with <b>func_close</b>, which must conform to the standard interface.
+<p>The db_env_set_func_close method configures all operations performed by a process and
+all of its threads of control, not operations confined to a single
+database environment.
+<p>Although the db_env_set_func_close interface may be called at any time during the
+life of the application, it should normally be called before making
+calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The db_env_set_func_close method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_env_set_func_close method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The db_env_set_func_close method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_env_set_func_close method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>See Also</h1>
+<a href="../ref/program/runtime.html">Run-time configuration</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/set_func_dirfree.html b/libdb/docs/api_c/set_func_dirfree.html
new file mode 100644
index 0000000..89b6659
--- /dev/null
+++ b/libdb/docs/api_c/set_func_dirfree.html
@@ -0,0 +1,62 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_dirfree</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_env_set_func_dirfree</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_dirfree(void (*func_dirfree)(char **namesp, int cnt));
+</pre></h3>
+<h1>Description</h1>
+<p>The Berkeley DB library requires the ability to return any memory allocated as part
+of the routine which reads through a directory and creates a list of files
+that the directory contains (see <a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>).
+The <b>func_dirfree</b> argument must conform to the following interface:
+<p><blockquote><pre>int dirfree(char **namesp, int cnt);</pre></blockquote>
+<p>The <b>namesp</b> and <b>cnt</b> arguments are the same values as were
+returned by the <a href="../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a> function.
+<p>The <b>func_dirfree</b> interface must return the value of <b>errno</b> on
+failure and 0 on success.
+<p>The db_env_set_func_dirfree method configures all operations performed by a process and
+all of its threads of control, not operations confined to a single
+database environment.
+<p>Although the db_env_set_func_dirfree interface may be called at any time during the
+life of the application, it should normally be called before making
+calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The db_env_set_func_dirfree method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_env_set_func_dirfree method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The db_env_set_func_dirfree method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_env_set_func_dirfree method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>See Also</h1>
+<a href="../ref/program/runtime.html">Run-time configuration</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/set_func_dirlist.html b/libdb/docs/api_c/set_func_dirlist.html
new file mode 100644
index 0000000..8cbfca1
--- /dev/null
+++ b/libdb/docs/api_c/set_func_dirlist.html
@@ -0,0 +1,65 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_dirlist</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_env_set_func_dirlist</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_dirlist(
+ int (*func_dirlist)(const char *dir, char ***namesp, int *cntp));
+</pre></h3>
+<h1>Description</h1>
+<p>The Berkeley DB library requires the ability to read through a directory and
+create a list of files that the directory contains. The
+<b>func_dirlist</b> argument must conform to the following interface:
+<p><blockquote><pre>int dirlist(const char *dir, char ***namesp, int *cntp);</pre></blockquote>
+<p>The <b>dir</b> argument is the name of the directory to be searched.
+The function must return a pointer to an array of nul-terminated file
+names into the memory location to which the <b>namesp</b> argument
+refers, and a count of the number of elements in the array into the
+memory location to which <b>cntp</b> refers.
+<p>The <b>func_dirlist</b> interface must return the value of <b>errno</b> on
+failure and 0 on success.
+<p>The db_env_set_func_dirlist method configures all operations performed by a process and
+all of its threads of control, not operations confined to a single
+database environment.
+<p>Although the db_env_set_func_dirlist interface may be called at any time during the
+life of the application, it should normally be called before making
+calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The db_env_set_func_dirlist method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_env_set_func_dirlist method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The db_env_set_func_dirlist method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_env_set_func_dirlist method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>See Also</h1>
+<a href="../ref/program/runtime.html">Run-time configuration</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/set_func_exists.html b/libdb/docs/api_c/set_func_exists.html
new file mode 100644
index 0000000..97e9ddd
--- /dev/null
+++ b/libdb/docs/api_c/set_func_exists.html
@@ -0,0 +1,62 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_exists</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_env_set_func_exists</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_exists(int (*func_exists)(const char *path, int *isdirp));
+</pre></h3>
+<h1>Description</h1>
+<p>The Berkeley DB library requires the ability to determine whether a file
+exists and whether it is a file of type directory. The <b>func</b>
+argument must conform to the following interface:
+<p><blockquote><pre>int exists(const char *path, int *isdirp);</pre></blockquote>
+<p>The <b>path</b> argument is the pathname of the file to be checked.
+<p>If the <b>isdirp</b> argument is non-NULL, it must be set to non-0 if
+<b>path</b> is a directory, and 0 if <b>path</b> is not a directory.
+<p>The <b>func_exists</b> interface must return the value of <b>errno</b> on
+failure and 0 on success.
+<p>The db_env_set_func_exists method configures all operations performed by a process and
+all of its threads of control, not operations confined to a single
+database environment.
+<p>Although the db_env_set_func_exists interface may be called at any time during the
+life of the application, it should normally be called before making
+calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The db_env_set_func_exists method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_env_set_func_exists method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The db_env_set_func_exists method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_env_set_func_exists method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>See Also</h1>
+<a href="../ref/program/runtime.html">Run-time configuration</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/set_func_free.html b/libdb/docs/api_c/set_func_free.html
new file mode 100644
index 0000000..6c57ecc
--- /dev/null
+++ b/libdb/docs/api_c/set_func_free.html
@@ -0,0 +1,56 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_free</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_env_set_func_free</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_free(void (*func_free)(void *ptr));
+</pre></h3>
+<h1>Description</h1>
+<p>Replace Berkeley DB calls to the ANSI C X3.159-1989 (ANSI C) standard
+<b>free</b> interface with <b>func_free</b>, which must conform to
+the standard interface.
+<p>The db_env_set_func_free method configures all operations performed by a process and
+all of its threads of control, not operations confined to a single
+database environment.
+<p>Although the db_env_set_func_free interface may be called at any time during the
+life of the application, it should normally be called before making
+calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The db_env_set_func_free method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_env_set_func_free method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The db_env_set_func_free method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_env_set_func_free method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>See Also</h1>
+<a href="../ref/program/runtime.html">Run-time configuration</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/set_func_fsync.html b/libdb/docs/api_c/set_func_fsync.html
new file mode 100644
index 0000000..2d7efc4
--- /dev/null
+++ b/libdb/docs/api_c/set_func_fsync.html
@@ -0,0 +1,55 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_fsync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_env_set_func_fsync</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_fsync(int (*func_fsync)(int fd));
+</pre></h3>
+<h1>Description</h1>
+<p>Replace Berkeley DB calls to the IEEE/ANSI Std 1003.1 (POSIX) <b>fsync</b> interface
+with <b>func_fsync</b>, which must conform to the standard interface.
+<p>The db_env_set_func_fsync method configures all operations performed by a process and
+all of its threads of control, not operations confined to a single
+database environment.
+<p>Although the db_env_set_func_fsync interface may be called at any time during the
+life of the application, it should normally be called before making
+calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The db_env_set_func_fsync method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_env_set_func_fsync method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The db_env_set_func_fsync method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_env_set_func_fsync method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>See Also</h1>
+<a href="../ref/program/runtime.html">Run-time configuration</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/set_func_ioinfo.html b/libdb/docs/api_c/set_func_ioinfo.html
new file mode 100644
index 0000000..ccca3bd
--- /dev/null
+++ b/libdb/docs/api_c/set_func_ioinfo.html
@@ -0,0 +1,72 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_ioinfo</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_env_set_func_ioinfo</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_ioinfo(int (*func_ioinfo)(const char *path,
+ int fd, u_int32_t *mbytesp, u_int32_t *bytesp, u_int32_t *iosizep));
+</pre></h3>
+<h1>Description</h1>
+<p>The Berkeley DB library requires the ability to determine the size and I/O
+characteristics of a file. The <b>func_ioinfo</b> argument must conform
+to the following interface:
+<p><blockquote><pre>int ioinfo(const char *path, int fd,
+u_int32_t *mbytesp, u_int32_t *bytesp, u_int32_t *iosizep);</pre></blockquote>
+<p>The <b>path</b> argument is the pathname of the file to be checked, and the
+<b>fd</b> argument is an open file descriptor on the file.
+<p>If the <b>mbytesp</b> and <b>bytesp</b> arguments are non-NULL, the
+<b>ioinfo</b> function must return in them the size of the file: the
+number of megabytes in the file into the memory location to which the
+<b>mbytesp</b> argument refers, and the number of bytes over and above
+that number of megabytes into the memory location to which the
+<b>bytesp</b> argument refers.
+<p>In addition, if the <b>iosizep</b> argument is non-NULL, the <b>ioinfo</b>
+function must return the optimum granularity for I/O operations to the file
+into the memory location to which it refers.
+<p>The <b>func_ioinfo</b> interface must return the value of <b>errno</b> on
+failure and 0 on success.
+<p>The db_env_set_func_ioinfo method configures all operations performed by a process and
+all of its threads of control, not operations confined to a single
+database environment.
+<p>Although the db_env_set_func_ioinfo interface may be called at any time during the
+life of the application, it should normally be called before making
+calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The db_env_set_func_ioinfo method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_env_set_func_ioinfo method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The db_env_set_func_ioinfo method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_env_set_func_ioinfo method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>See Also</h1>
+<a href="../ref/program/runtime.html">Run-time configuration</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/set_func_malloc.html b/libdb/docs/api_c/set_func_malloc.html
new file mode 100644
index 0000000..4c71a93
--- /dev/null
+++ b/libdb/docs/api_c/set_func_malloc.html
@@ -0,0 +1,56 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_malloc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_env_set_func_malloc</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_malloc(void *(*func_malloc)(size_t size));
+</pre></h3>
+<h1>Description</h1>
+<p>Replace Berkeley DB calls to the ANSI C X3.159-1989 (ANSI C) standard
+<b>malloc</b> interface with <b>func_malloc</b>, which must conform to
+the standard interface.
+<p>The db_env_set_func_malloc method configures all operations performed by a process and
+all of its threads of control, not operations confined to a single
+database environment.
+<p>Although the db_env_set_func_malloc interface may be called at any time during the
+life of the application, it should normally be called before making
+calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The db_env_set_func_malloc method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_env_set_func_malloc method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The db_env_set_func_malloc method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_env_set_func_malloc method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>See Also</h1>
+<a href="../ref/program/runtime.html">Run-time configuration</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/set_func_map.html b/libdb/docs/api_c/set_func_map.html
new file mode 100644
index 0000000..20aface
--- /dev/null
+++ b/libdb/docs/api_c/set_func_map.html
@@ -0,0 +1,76 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_map</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_env_set_func_map</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_map(int (*func_map)(char *path,
+ size_t len, int is_region, int is_rdonly, void **addr));
+</pre></h3>
+<h1>Description</h1>
+<p>The Berkeley DB library requires the ability to map a file into memory and to
+create shared memory regions (which may or may not be backed by files).
+The <b>func_map</b> argument must conform to the following interface:
+<p><blockquote><pre>int map(char *path, size_t len,
+int is_region, int is_rdonly, void **addr);</pre></blockquote>
+<p>The <b>path</b> argument is the name of a file.
+<p>The <b>is_region</b> argument will be zero if the intention is to map
+a file into shared memory. In this case, the <b>map</b> function must
+map the first <b>len</b> bytes of the file into memory and return a
+pointer to the mapped location into the memory location to which the
+argument <b>addr</b> refers. The <b>is_rdonly</b> argument will be
+non-zero if the file is considered read-only by the caller.
+<p>The <b>is_region</b> argument will be non-zero if the memory is
+intended to be used as a shared memory region for synchronization
+between Berkeley DB threads/processes. In this case, the returned memory may
+be of any kind (for example, anonymous memory), but must be able to
+support semaphores. In this case, the <b>path</b> argument may be
+ignored (although future <b>map</b> calls using the same <b>path</b>
+must return the same memory), and the <b>is_rdonly</b> argument will
+always be zero.
+<p>The <b>func_map</b> interface must return the value of <b>errno</b> on
+failure and 0 on success.
+<p>The db_env_set_func_map method configures all operations performed by a process and
+all of its threads of control, not operations confined to a single
+database environment.
+<p>Although the db_env_set_func_map interface may be called at any time during the
+life of the application, it should normally be called before making
+calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The db_env_set_func_map method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_env_set_func_map method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The db_env_set_func_map method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_env_set_func_map method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>See Also</h1>
+<a href="../ref/program/runtime.html">Run-time configuration</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/set_func_open.html b/libdb/docs/api_c/set_func_open.html
new file mode 100644
index 0000000..b56b245
--- /dev/null
+++ b/libdb/docs/api_c/set_func_open.html
@@ -0,0 +1,55 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_env_set_func_open</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_open(int (*func_open)(const char *path, int flags, int mode));
+</pre></h3>
+<h1>Description</h1>
+<p>Replace Berkeley DB calls to the IEEE/ANSI Std 1003.1 (POSIX) <b>open</b> interface
+with <b>func_open</b>, which must conform to the standard interface.
+<p>The db_env_set_func_open method configures all operations performed by a process and
+all of its threads of control, not operations confined to a single
+database environment.
+<p>Although the db_env_set_func_open interface may be called at any time during the
+life of the application, it should normally be called before making
+calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The db_env_set_func_open method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_env_set_func_open method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The db_env_set_func_open method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_env_set_func_open method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>See Also</h1>
+<a href="../ref/program/runtime.html">Run-time configuration</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/set_func_read.html b/libdb/docs/api_c/set_func_read.html
new file mode 100644
index 0000000..3d35a58
--- /dev/null
+++ b/libdb/docs/api_c/set_func_read.html
@@ -0,0 +1,55 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_read</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_env_set_func_read</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_read(ssize_t (*func_read)(int fd, void *buf, size_t nbytes));
+</pre></h3>
+<h1>Description</h1>
+<p>Replace Berkeley DB calls to the IEEE/ANSI Std 1003.1 (POSIX) <b>read</b> interface
+with <b>func_read</b>, which must conform to the standard interface.
+<p>The db_env_set_func_read method configures all operations performed by a process and
+all of its threads of control, not operations confined to a single
+database environment.
+<p>Although the db_env_set_func_read interface may be called at any time during the
+life of the application, it should normally be called before making
+calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The db_env_set_func_read method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_env_set_func_read method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The db_env_set_func_read method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_env_set_func_read method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>See Also</h1>
+<a href="../ref/program/runtime.html">Run-time configuration</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/set_func_realloc.html b/libdb/docs/api_c/set_func_realloc.html
new file mode 100644
index 0000000..ea236d6
--- /dev/null
+++ b/libdb/docs/api_c/set_func_realloc.html
@@ -0,0 +1,56 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_realloc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_env_set_func_realloc</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_realloc(void *(*func_realloc)(void *ptr, size_t size));
+</pre></h3>
+<h1>Description</h1>
+<p>Replace Berkeley DB calls to the ANSI C X3.159-1989 (ANSI C) standard
+<b>realloc</b> interface with <b>func_realloc</b>, which must conform to
+the standard interface.
+<p>The db_env_set_func_realloc method configures all operations performed by a process and
+all of its threads of control, not operations confined to a single
+database environment.
+<p>Although the db_env_set_func_realloc interface may be called at any time during the
+life of the application, it should normally be called before making
+calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The db_env_set_func_realloc method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_env_set_func_realloc method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The db_env_set_func_realloc method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_env_set_func_realloc method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>See Also</h1>
+<a href="../ref/program/runtime.html">Run-time configuration</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/set_func_rename.html b/libdb/docs/api_c/set_func_rename.html
new file mode 100644
index 0000000..aabb9e3
--- /dev/null
+++ b/libdb/docs/api_c/set_func_rename.html
@@ -0,0 +1,55 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_rename</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_env_set_func_rename</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_rename(int (*func_rename)(const char *from, const char *to));
+</pre></h3>
+<h1>Description</h1>
+<p>Replace Berkeley DB calls to the IEEE/ANSI Std 1003.1 (POSIX) <b>rename</b> interface
+with <b>func_rename</b>, which must conform to the standard interface.
+<p>The db_env_set_func_rename method configures all operations performed by a process and
+all of its threads of control, not operations confined to a single
+database environment.
+<p>Although the db_env_set_func_rename interface may be called at any time during the
+life of the application, it should normally be called before making
+calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The db_env_set_func_rename method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_env_set_func_rename method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The db_env_set_func_rename method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_env_set_func_rename method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>See Also</h1>
+<a href="../ref/program/runtime.html">Run-time configuration</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/set_func_seek.html b/libdb/docs/api_c/set_func_seek.html
new file mode 100644
index 0000000..8ce7cae
--- /dev/null
+++ b/libdb/docs/api_c/set_func_seek.html
@@ -0,0 +1,70 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_seek</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_env_set_func_seek</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_seek(int (*func_seek)(int fd, size_t pgsize,
+ db_pgno_t pageno, u_int32_t relative, int rewind, int whence));
+</pre></h3>
+<h1>Description</h1>
+<p>The Berkeley DB library requires the ability to specify that a subsequent read
+from or write to a file will occur at a specific location in that file.
+The <b>func_seek</b> argument must conform to the following interface:
+<p><blockquote><pre>int seek(int fd, size_t pgsize, db_pgno_t pageno,
+u_int32_t relative, int rewind, int whence);</pre></blockquote>
+<p>The <b>fd</b> argument is an open file descriptor on the file.
+<p>The <b>seek</b> function must cause a subsequent read from or write to
+the file to occur at a byte offset specified by the calculation:
+<p><blockquote><pre>(pgsize * pageno) + relative</pre></blockquote>
+<p>If <b>rewind</b> is non-zero, the byte offset is treated as a backward
+seek, not a forward one.
+<p>The <b>whence</b> argument specifies where in the file the byte offset
+is relative to, as described by the IEEE/ANSI Std 1003.1 (POSIX) <b>lseek</b> system
+call.
+<p>The <b>func_seek</b> interface must return the value of <b>errno</b> on
+failure and 0 on success.
+<p>The db_env_set_func_seek method configures all operations performed by a process and
+all of its threads of control, not operations confined to a single
+database environment.
+<p>Although the db_env_set_func_seek interface may be called at any time during the
+life of the application, it should normally be called before making
+calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The db_env_set_func_seek method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_env_set_func_seek method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The db_env_set_func_seek method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_env_set_func_seek method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>See Also</h1>
+<a href="../ref/program/runtime.html">Run-time configuration</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/set_func_sleep.html b/libdb/docs/api_c/set_func_sleep.html
new file mode 100644
index 0000000..aec393c
--- /dev/null
+++ b/libdb/docs/api_c/set_func_sleep.html
@@ -0,0 +1,65 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_sleep</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_env_set_func_sleep</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_sleep(int (*func_sleep)(u_long seconds, u_long microseconds));
+</pre></h3>
+<h1>Description</h1>
+<p>The Berkeley DB library requires the ability to cause a thread of control to
+suspend itself for a period of time, relinquishing control of the
+processor to any other waiting thread of control. The
+<b>func_sleep</b> argument must conform to the following interface:
+<p><blockquote><pre>int sleep(u_long seconds, u_long microseconds);</pre></blockquote>
+<p>The <b>seconds</b> and <b>microseconds</b> arguments specify the amount
+of time to wait until the suspending thread of control should run again.
+<p>The <b>seconds</b> and <b>microseconds</b> arguments may not be
+normalized when the <b>sleep</b> function is called; that is, the
+<b>microseconds</b> argument may be greater than 1000000.
+<p>The <b>func_sleep</b> interface must return the value of <b>errno</b> on
+failure and 0 on success.
+<p>The db_env_set_func_sleep method configures all operations performed by a process and
+all of its threads of control, not operations confined to a single
+database environment.
+<p>Although the db_env_set_func_sleep interface may be called at any time during the
+life of the application, it should normally be called before making
+calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The db_env_set_func_sleep method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_env_set_func_sleep method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The db_env_set_func_sleep method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_env_set_func_sleep method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>See Also</h1>
+<a href="../ref/program/runtime.html">Run-time configuration</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/set_func_unlink.html b/libdb/docs/api_c/set_func_unlink.html
new file mode 100644
index 0000000..d70131d
--- /dev/null
+++ b/libdb/docs/api_c/set_func_unlink.html
@@ -0,0 +1,55 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_unlink</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_env_set_func_unlink</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_unlink(int (*func_unlink)(const char *path));
+</pre></h3>
+<h1>Description</h1>
+<p>Replace Berkeley DB calls to the IEEE/ANSI Std 1003.1 (POSIX) <b>unlink</b> interface
+with <b>func_unlink</b>, which must conform to the standard interface.
+<p>The db_env_set_func_unlink method configures all operations performed by a process and
+all of its threads of control, not operations confined to a single
+database environment.
+<p>Although the db_env_set_func_unlink interface may be called at any time during the
+life of the application, it should normally be called before making
+calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The db_env_set_func_unlink method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_env_set_func_unlink method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The db_env_set_func_unlink method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_env_set_func_unlink method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>See Also</h1>
+<a href="../ref/program/runtime.html">Run-time configuration</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/set_func_unmap.html b/libdb/docs/api_c/set_func_unmap.html
new file mode 100644
index 0000000..76a67a4
--- /dev/null
+++ b/libdb/docs/api_c/set_func_unmap.html
@@ -0,0 +1,64 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_unmap</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_env_set_func_unmap</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_unmap(int (*func_unmap)(void *addr, size_t len));
+</pre></h3>
+<h1>Description</h1>
+<p>The Berkeley DB library requires the ability to unmap a file or shared memory
+region from memory. The <b>func_unmap</b> argument must conform to the
+following interface:
+<p><blockquote><pre>int unmap(void *addr, size_t len);</pre></blockquote>
+<p>The <b>addr</b> argument is the argument returned by the
+<a href="../api_c/set_func_map.html">db_env_set_func_map</a> function when the file or region was mapped
+into memory, and the <b>len</b> argument is the same as the <b>len</b>
+argument specified to the <a href="../api_c/set_func_map.html">db_env_set_func_map</a> function when the
+file or region was mapped into memory.
+<p>The <b>func_unmap</b> interface must return the value of <b>errno</b> on
+failure and 0 on success.
+<p>The db_env_set_func_unmap method configures all operations performed by a process and
+all of its threads of control, not operations confined to a single
+database environment.
+<p>Although the db_env_set_func_unmap interface may be called at any time during the
+life of the application, it should normally be called before making
+calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The db_env_set_func_unmap method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_env_set_func_unmap method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The db_env_set_func_unmap method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_env_set_func_unmap method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>See Also</h1>
+<a href="../ref/program/runtime.html">Run-time configuration</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/set_func_write.html b/libdb/docs/api_c/set_func_write.html
new file mode 100644
index 0000000..fe36c7f
--- /dev/null
+++ b/libdb/docs/api_c/set_func_write.html
@@ -0,0 +1,56 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_write</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_env_set_func_write</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_write(
+ ssize_t (*func_write)(int fd, const void *buffer, size_t nbytes));
+</pre></h3>
+<h1>Description</h1>
+<p>Replace Berkeley DB calls to the IEEE/ANSI Std 1003.1 (POSIX) <b>write</b> interface
+with <b>func_write</b>, which must conform to the standard interface.
+<p>The db_env_set_func_write method configures all operations performed by a process and
+all of its threads of control, not operations confined to a single
+database environment.
+<p>Although the db_env_set_func_write interface may be called at any time during the
+life of the application, it should normally be called before making
+calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The db_env_set_func_write method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_env_set_func_write method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The db_env_set_func_write method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_env_set_func_write method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>See Also</h1>
+<a href="../ref/program/runtime.html">Run-time configuration</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/set_func_yield.html b/libdb/docs/api_c/set_func_yield.html
new file mode 100644
index 0000000..09dca01
--- /dev/null
+++ b/libdb/docs/api_c/set_func_yield.html
@@ -0,0 +1,73 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_env_set_func_yield</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_env_set_func_yield</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+db_env_set_func_yield(int (*func_yield)(void));
+</pre></h3>
+<h1>Description</h1>
+<p>The Berkeley DB library requires the ability to yield the processor from the current
+thread of control to any other waiting threads of control.
+The <b>func_yield</b> argument must conform to the following interface:
+<p><blockquote><pre>int yield(void);</pre></blockquote>
+<p>The <b>func_yield</b> function must be able to cause the rescheduling
+of all participants in the current Berkeley DB environment, whether threaded
+or not. It may be incorrect to supply a thread <b>yield</b> function
+if more than a single process is operating in the Berkeley DB environment.
+This is because many thread-yield functions will not allow other
+processes to run, and the contested lock may be held by another process,
+not by another thread.
+<p>If no <b>func_yield</b> function is specified, or if the <b>yield</b>
+function returns an error, the function specified by the
+<a href="../api_c/set_func_sleep.html">db_env_set_func_sleep</a> entry will be used instead or subsequently;
+that is, if no <b>yield</b> function is specified, or if it is possible
+for the <b>yield</b> function to fail, the <b>sleep</b> function
+<b>must</b> cause the processor to reschedule any waiting threads of
+control for execution.
+<p>The <b>func_yield</b> interface must return the value of <b>errno</b> on
+failure and 0 on success.
+<p>The db_env_set_func_yield method configures all operations performed by a process and
+all of its threads of control, not operations confined to a single
+database environment.
+<p>Although the db_env_set_func_yield interface may be called at any time during the
+life of the application, it should normally be called before making
+calls to the <a href="../api_c/env_create.html">db_env_create</a> or <a href="../api_c/db_create.html">db_create</a> methods.
+<p>The db_env_set_func_yield method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The db_env_set_func_yield method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The db_env_set_func_yield method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the db_env_set_func_yield method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>See Also</h1>
+<a href="../ref/program/runtime.html">Run-time configuration</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/txn_abort.html b/libdb/docs/api_c/txn_abort.html
new file mode 100644
index 0000000..a1f5f22
--- /dev/null
+++ b/libdb/docs/api_c/txn_abort.html
@@ -0,0 +1,58 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_TXN-&gt;abort</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_TXN-&gt;abort</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_TXN-&gt;abort(DB_TXN *tid);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_TXN-&gt;abort method causes an abnormal termination of the transaction.
+The log is played backward, and any necessary recovery operations are
+initiated through the <b>recover</b> function specified to
+<a href="../api_c/env_open.html">DB_ENV-&gt;open</a>. After the log processing is completed, all locks
+held by the transaction are released. As is the case for
+<a href="../api_c/txn_commit.html">DB_TXN-&gt;commit</a>, applications that require strict two-phase locking
+should not explicitly release any locks.
+<p>In the case of nested transactions, aborting a parent transaction causes
+all children (unresolved or not) of the parent transaction to be aborted.
+<p>All cursors opened within the transaction must be closed before the
+transaction is aborted.
+<p>After DB_TXN-&gt;abort has been called, regardless of its return, the
+<a href="../api_c/txn_class.html">DB_TXN</a> handle may not be accessed again.
+<p>The DB_TXN-&gt;abort method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_TXN-&gt;abort method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_TXN-&gt;abort method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/txn_class.html">DB_TXN</a>
+<h1>See Also</h1>
+<a href="../api_c/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/txn_begin.html b/libdb/docs/api_c/txn_begin.html
new file mode 100644
index 0000000..9dc764d
--- /dev/null
+++ b/libdb/docs/api_c/txn_begin.html
@@ -0,0 +1,99 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;txn_begin</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;txn_begin</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;txn_begin(DB_ENV *env,
+ DB_TXN *parent, DB_TXN **tid, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;txn_begin method creates a new transaction in the environment
+and copies a pointer to a <a href="../api_c/txn_class.html">DB_TXN</a> that uniquely identifies it into
+the memory to which <b>tid</b> refers.
+Calling the <a href="../api_c/txn_abort.html">DB_TXN-&gt;abort</a>,
+<a href="../api_c/txn_commit.html">DB_TXN-&gt;commit</a> or <a href="../api_c/txn_discard.html">DB_TXN-&gt;discard</a> methods will discard the returned
+handle.
+<p>If the <b>parent</b> argument is non-NULL, the new transaction will
+be a nested transaction, with the transaction indicated by
+<b>parent</b> as its parent. Transactions may be
+nested to any level.
+In the presence of distributed transactions and two-phase commit,
+only the parental transaction, that is a transaction without
+a <b>parent</b> specified, should be passed as an argument to
+<a href="../api_c/txn_prepare.html">DB_TXN-&gt;prepare</a>.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_DIRTY_READ">DB_DIRTY_READ</a><dd>All read operations performed by the transaction will read modified but
+not yet committed data. Silently ignored if the <a href="../api_c/db_open.html#DB_DIRTY_READ">DB_DIRTY_READ</a>
+flag was not specified when the underlying database was opened.
+<p><dt><a name="DB_TXN_NOSYNC">DB_TXN_NOSYNC</a><dd>Do not synchronously flush the log when this transaction commits or
+prepares. This means the transaction will exhibit the ACI (atomicity,
+consistency, and isolation) properties, but not D (durability); that is,
+database integrity will be maintained but it is possible that this
+transaction may be undone during recovery.
+<p>This behavior may be set for a Berkeley DB environment using the
+<a href="../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> interface. Any value specified in this
+interface overrides that setting.
+<p><dt><a name="DB_TXN_NOWAIT">DB_TXN_NOWAIT</a><dd>If a lock is unavailable for any Berkeley DB operation performed in the context
+of this transaction,
+return DB_LOCK_NOTGRANTED
+immediately instead of blocking on the lock.
+<p><dt><a name="DB_TXN_SYNC">DB_TXN_SYNC</a><dd>Synchronously flush the log when this transaction commits or prepares.
+This means the transaction will exhibit all of the ACID (atomicity,
+consistency, isolation, and durability) properties.
+<p>This behavior is the default for Berkeley DB environments unless the
+<a href="../api_c/env_set_flags.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> flag was specified to the <a href="../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a>
+interface. Any value specified in this interface overrides that
+setting.
+</dl>
+<p><b>Note: A transaction may not span threads; that is, each transaction must
+begin and end in the same thread, and each transaction may be used only
+by a single thread.</b>
+<p><b>Note: Cursors may not span transactions; that is, each cursor must be
+opened and closed within a single transaction.</b>
+<p><b>Note: A parent transaction may not issue any Berkeley DB operations -- except for
+DB_ENV-&gt;txn_begin, <a href="../api_c/txn_abort.html">DB_TXN-&gt;abort</a> and <a href="../api_c/txn_commit.html">DB_TXN-&gt;commit</a> -- while it has
+active child transactions (child transactions that have not yet been
+committed or aborted).</b>
+<p>The DB_ENV-&gt;txn_begin method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;txn_begin method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>ENOMEM<dd>The maximum number of concurrent transactions has been reached.
+</dl>
+<p>The DB_ENV-&gt;txn_begin method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;txn_begin method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/txn_class.html">DB_TXN</a>
+<h1>See Also</h1>
+<a href="../api_c/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/txn_checkpoint.html b/libdb/docs/api_c/txn_checkpoint.html
new file mode 100644
index 0000000..e9cd91f
--- /dev/null
+++ b/libdb/docs/api_c/txn_checkpoint.html
@@ -0,0 +1,66 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;txn_checkpoint</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;txn_checkpoint</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;txn_checkpoint(const DB_ENV *env,
+ u_int32_t kbyte, u_int32_t min, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>If there has been database environment activity since the last checkpoint,
+the DB_ENV-&gt;txn_checkpoint method flushes the underlying memory pool, writes a
+checkpoint record to the log, and then flushes the log.
+<p>If <b>kbyte</b> or <b>min</b> is non-zero, the checkpoint is done only
+if more than <b>min</b> minutes have passed since the last checkpoint
+or if more than <b>kbyte</b> kilobytes of log data have been written
+since the last checkpoint.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_FORCE">DB_FORCE</a><dd>Force a checkpoint record, even if there has been no activity since the
+last checkpoint.
+</dl>
+<p>The DB_ENV-&gt;txn_checkpoint method returns a non-zero error value on failure and 0 on success.
+<p>The DB_ENV-&gt;txn_checkpoint method is the underlying interface used by the <a href="../utility/db_checkpoint.html">db_checkpoint</a> utility.
+See the <a href="../utility/db_checkpoint.html">db_checkpoint</a> utility source code for an example of using DB_ENV-&gt;txn_checkpoint
+in a IEEE/ANSI Std 1003.1 (POSIX) environment.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;txn_checkpoint method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB_ENV-&gt;txn_checkpoint method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;txn_checkpoint method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/txn_class.html">DB_TXN</a>
+<h1>See Also</h1>
+<a href="../api_c/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/txn_class.html b/libdb/docs/api_c/txn_class.html
new file mode 100644
index 0000000..6d6ae78
--- /dev/null
+++ b/libdb/docs/api_c/txn_class.html
@@ -0,0 +1,51 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_TXN</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_TXN</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+typedef struct __db_txn DB_TXN;
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_TXN object is the handle for a transaction. Methods off
+the DB_TXN handle are used to configure, abort and commit the
+transaction. DB_TXN handles are provided to <a href="../api_c/db_class.html">DB</a> methods
+in order to transactionally protect those database operations.
+<p>DB_TXN handles are not free-threaded; transactions handles may
+be used by multiple threads, but only serially, that is, the application
+must serialize access to the DB_TXN handle. Once the
+<a href="../api_c/txn_abort.html">DB_TXN-&gt;abort</a> or <a href="../api_c/txn_commit.html">DB_TXN-&gt;commit</a> methods are called, the handle may
+not be accessed again, regardless of the method's return. In addition,
+parent transactions may not issue any Berkeley DB operations while they have
+active child transactions (child transactions that have not yet been
+committed or aborted) except for <a href="../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a>, <a href="../api_c/txn_abort.html">DB_TXN-&gt;abort</a>
+and <a href="../api_c/txn_commit.html">DB_TXN-&gt;commit</a>.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, DB_TXN
+<h1>See Also</h1>
+<a href="../api_c/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/txn_commit.html b/libdb/docs/api_c/txn_commit.html
new file mode 100644
index 0000000..650a5f1
--- /dev/null
+++ b/libdb/docs/api_c/txn_commit.html
@@ -0,0 +1,81 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_TXN-&gt;commit</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_TXN-&gt;commit</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_TXN-&gt;commit(DB_TXN *tid, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_TXN-&gt;commit method ends the transaction.
+<p>In the case of nested transactions, if the transaction is a parent
+transaction, committing the parent transaction causes all unresolved
+children of the parent to be committed. In the case of nested
+transactions, if the transaction is a child transaction, its locks are
+not released, but are acquired by its parent. Although the commit of the
+child transaction will succeed, the actual resolution of the child
+transaction is postponed until the parent transaction is committed or
+aborted; that is, if its parent transaction commits, it will be
+committed; and if its parent transaction aborts, it will be aborted.
+<p>The <b>flags</b> value must be set to 0 or
+one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_TXN_NOSYNC">DB_TXN_NOSYNC</a><dd>Do not synchronously flush the log. This means the transaction will
+exhibit the ACI (atomicity, consistency, and isolation) properties, but
+not D (durability); that is, database integrity will be maintained, but
+it is possible that this transaction may be undone during recovery.
+<p>This behavior may be set for a Berkeley DB environment using the
+<a href="../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> interface or for a single transaction using the
+<a href="../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a> interface. Any value specified in this interface
+overrides both of those settings.
+<p><dt><a name="DB_TXN_SYNC">DB_TXN_SYNC</a><dd>Synchronously flush the log. This means the transaction will exhibit
+all of the ACID (atomicity, consistency, isolation, and durability)
+properties.
+<p>This behavior is the default for Berkeley DB environments unless the
+<a href="../api_c/env_set_flags.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> flag was specified to the <a href="../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a>
+interface. This behavior may also be set for a single transaction using
+the <a href="../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a> interface. Any value specified in this interface
+overrides both of those settings.
+</dl>
+<p>All cursors opened within the transaction must be closed before the
+transaction is committed.
+<p>After DB_TXN-&gt;commit has been called, regardless of its return, the
+<a href="../api_c/txn_class.html">DB_TXN</a> handle may not be accessed again. If DB_TXN-&gt;commit
+encounters an error, the transaction and all child transactions of the
+transaction are aborted.
+<p>The DB_TXN-&gt;commit method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_TXN-&gt;commit method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_TXN-&gt;commit method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/txn_class.html">DB_TXN</a>
+<h1>See Also</h1>
+<a href="../api_c/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/txn_discard.html b/libdb/docs/api_c/txn_discard.html
new file mode 100644
index 0000000..2244672
--- /dev/null
+++ b/libdb/docs/api_c/txn_discard.html
@@ -0,0 +1,65 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_TXN-&gt;discard</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_TXN-&gt;discard</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_TXN-&gt;discard(DB_TXN *tid, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_TXN-&gt;discard method frees up all the per-process resources
+associated with the specified <a href="../api_c/txn_class.html">DB_TXN</a> handle, neither committing
+nor aborting the transaction. This call may be used only after calls
+to <a href="../api_c/txn_recover.html">DB_ENV-&gt;txn_recover</a> when there are multiple global transaction
+managers recovering transactions in a single Berkeley DB environment. Any
+transactions returned by <a href="../api_c/txn_recover.html">DB_ENV-&gt;txn_recover</a> that are not handled by
+the current global transaction manager should be discarded using
+DB_TXN-&gt;discard.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DB_TXN-&gt;discard method returns EINVAL if the transaction handle does
+not refer to a transaction that was recovered into a prepared but not
+yet completed state.
+Otherwise, the DB_TXN-&gt;discard method returns a non-zero error value on failure and 0 on success.
+<p>After DB_TXN-&gt;discard has been called, regardless of its return, the
+<a href="../api_c/txn_class.html">DB_TXN</a> handle may not be accessed again.
+<h1>Errors</h1>
+<p>The DB_TXN-&gt;discard method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The transaction handle does not refer to a transaction that was
+recovered into a prepared but not yet completed state.
+</dl>
+<p>The DB_TXN-&gt;discard method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_TXN-&gt;discard method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/txn_class.html">DB_TXN</a>
+<h1>See Also</h1>
+<a href="../api_c/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/txn_id.html b/libdb/docs/api_c/txn_id.html
new file mode 100644
index 0000000..e07edd6
--- /dev/null
+++ b/libdb/docs/api_c/txn_id.html
@@ -0,0 +1,43 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_TXN-&gt;id</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_TXN-&gt;id</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+u_int32_t
+DB_TXN-&gt;id(DB_TXN *tid);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_TXN-&gt;id method returns the unique transaction id associated with the
+specified transaction. Locking calls made on behalf of this transaction
+should use the value returned from DB_TXN-&gt;id as the locker parameter
+to the <a href="../api_c/lock_get.html">DB_ENV-&gt;lock_get</a> or <a href="../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a> calls.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/txn_class.html">DB_TXN</a>
+<h1>See Also</h1>
+<a href="../api_c/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/txn_list.html b/libdb/docs/api_c/txn_list.html
new file mode 100644
index 0000000..e8517f9
--- /dev/null
+++ b/libdb/docs/api_c/txn_list.html
@@ -0,0 +1,31 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Transaction Subsystem and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Transaction Subsystem and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Transaction Subsystem and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_c/env_set_tx_max.html">DB_ENV-&gt;set_tx_max</a></td><td>Set maximum number of transactions</td></tr>
+<tr><td><a href="../api_c/env_set_tx_timestamp.html">DB_ENV-&gt;set_tx_timestamp</a></td><td>Set recovery timestamp</td></tr>
+<tr><td><a href="../api_c/txn_checkpoint.html">DB_ENV-&gt;txn_checkpoint</a></td><td>Checkpoint the transaction subsystem</td></tr>
+<tr><td><a href="../api_c/txn_recover.html">DB_ENV-&gt;txn_recover</a></td><td>Distributed transaction recovery</td></tr>
+<tr><td><a href="../api_c/txn_stat.html">DB_ENV-&gt;txn_stat</a></td><td>Return transaction subsystem statistics</td></tr>
+<tr><td><a href="../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a></td><td>Begin a transaction</td></tr>
+<tr><td><a href="../api_c/txn_abort.html">DB_TXN-&gt;abort</a></td><td>Abort a transaction</td></tr>
+<tr><td><a href="../api_c/txn_commit.html">DB_TXN-&gt;commit</a></td><td>Commit a transaction</td></tr>
+<tr><td><a href="../api_c/txn_discard.html">DB_TXN-&gt;discard</a></td><td>Discard a prepared but not resolved transaction handle</td></tr>
+<tr><td><a href="../api_c/txn_id.html">DB_TXN-&gt;id</a></td><td>Return a transaction's ID</td></tr>
+<tr><td><a href="../api_c/txn_prepare.html">DB_TXN-&gt;prepare</a></td><td>Prepare a transaction for commit</td></tr>
+<tr><td><a href="../api_c/txn_set_timeout.html">DB_TXN-&gt;set_timeout</a></td><td>Set transaction timeout</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/txn_prepare.html b/libdb/docs/api_c/txn_prepare.html
new file mode 100644
index 0000000..a065196
--- /dev/null
+++ b/libdb/docs/api_c/txn_prepare.html
@@ -0,0 +1,64 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_TXN-&gt;prepare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_TXN-&gt;prepare</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_TXN-&gt;prepare(DB_TXN *tid, u_int8_t gid[DB_XIDDATASIZE]);
+</pre></h3>
+<h1>Description</h1>
+<a name="3"><!--meow--></a>
+<p>The DB_TXN-&gt;prepare method initiates the beginning of a two-phase commit.
+<p>In a distributed transaction environment, Berkeley DB can be used as a local
+transaction manager. In this case, the distributed transaction manager
+must send <i>prepare</i> messages to each local manager. The local
+manager must then issue a DB_TXN-&gt;prepare and await its successful
+return before responding to the distributed transaction manager. Only
+after the distributed transaction manager receives successful responses
+from all of its <i>prepare</i> messages should it issue any
+<i>commit</i> messages.
+<p>In the case of nested transactions, preparing the parent
+causes all unresolved children of the parent transaction to be committed.
+Child transactions should never be explicitly prepared.
+Their fate will be resolved along with their parent's during
+global recovery.
+<p>The <b>gid</b> parameter specifies the global transaction ID by which this
+transaction will be known. This global transaction ID will be returned
+in calls to <a href="../api_c/txn_recover.html">DB_ENV-&gt;txn_recover</a>, telling the application which global
+transactions must be resolved.
+<p>The DB_TXN-&gt;prepare method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_TXN-&gt;prepare method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_TXN-&gt;prepare method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/txn_class.html">DB_TXN</a>
+<h1>See Also</h1>
+<a href="../api_c/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/txn_recover.html b/libdb/docs/api_c/txn_recover.html
new file mode 100644
index 0000000..61ddf23
--- /dev/null
+++ b/libdb/docs/api_c/txn_recover.html
@@ -0,0 +1,79 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;txn_recover</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;txn_recover</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;txn_recover(DB_ENV *dbenv, DB_PREPLIST preplist[],
+ long count, long *retp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;txn_recover interface returns a list of prepared but not
+yet resolved transactions. The DB_ENV-&gt;txn_recover method should only be
+called after the environment has been recovered. Because database
+environment state must be preserved between recovery and the application
+calling DB_ENV-&gt;txn_recover, applications must either call
+DB_ENV-&gt;txn_recover using the same environment handle used when recovery
+is done, or the database environment must not be configured using the
+<a href="../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flag.
+<p>On return from DB_ENV-&gt;txn_recover, the <b>preplist</b> argument will
+be filled in with a list of transactions that must be resolved by the
+application (committed, aborted or discarded). The <b>preplist</b>
+argument is a structure of type DB_PREPLIST; the following DB_PREPLIST
+fields will be filled in:
+<p><dl compact>
+<p><dt>DB_TXN *txn;<dd>The transaction handle for the transaction.
+<p><dt>u_int8_t gid[<a href="../api_c/txn_prepare.html#DB_XIDDATASIZE">DB_XIDDATASIZE</a>];<dd>The global transaction ID for the transaction. The global transaction
+ID is the one specified when the transaction was prepared. The
+application is responsible for ensuring uniqueness among global
+transaction IDs.
+</dl>
+<p>The application must call <a href="../api_c/txn_abort.html">DB_TXN-&gt;abort</a>, <a href="../api_c/txn_commit.html">DB_TXN-&gt;commit</a> or
+<a href="../api_c/txn_discard.html">DB_TXN-&gt;discard</a> on each returned <a href="../api_c/txn_class.html">DB_TXN</a> handle before
+starting any new operations.
+<p>The <b>count</b> parameter specifies the number of available entries
+in the passed-in <b>preplist</b> array. The <b>retp</b> parameter
+returns the number of entries DB_ENV-&gt;txn_recover has filled in, in the
+array.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_FIRST">DB_FIRST</a><dd>Begin returning a list of prepared, but not yet resolved transactions.
+<p><dt><a name="DB_NEXT">DB_NEXT</a><dd>Continue returning a list of prepared, but not yet resolved transactions,
+starting where the last call to DB_ENV-&gt;txn_recover left off.
+</dl>
+<p>The DB_ENV-&gt;txn_recover method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;txn_recover method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;txn_recover method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/txn_class.html">DB_TXN</a>
+<h1>See Also</h1>
+<a href="../api_c/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/txn_set_timeout.html b/libdb/docs/api_c/txn_set_timeout.html
new file mode 100644
index 0000000..87cc728
--- /dev/null
+++ b/libdb/docs/api_c/txn_set_timeout.html
@@ -0,0 +1,75 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_TXN-&gt;set_timeout</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_TXN-&gt;set_timeout</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+u_int32_t
+DB_TXN-&gt;set_timeout(DB_TXN *tid, db_timeout_t timeout, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_TXN-&gt;set_timeout method sets timeout values for locks or
+transactions for the specified transaction. The timeout value is
+currently specified as an unsigned 32-bit number of microseconds,
+limiting the maximum timeout to roughly 71 minutes.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_SET_LOCK_TIMEOUT">DB_SET_LOCK_TIMEOUT</a><dd>Set the timeout value for locks in this transaction.
+<p><dt><a name="DB_SET_TXN_TIMEOUT">DB_SET_TXN_TIMEOUT</a><dd>Set the timeout value for this transaction.
+</dl>
+<p>Timeouts are checked whenever a thread of control blocks on a lock or
+when deadlock detection is performed. (In the case of
+DB_SET_LOCK_TIMEOUT, the lock is one requested explicitly
+through the Lock subsystem interfaces. In the case of
+DB_SET_TXN_TIMEOUT, the lock is one requested on behalf of a
+transaction. In either case, it may be a lock requested by the database
+access methods underlying the application.) As timeouts are only
+checked when the lock request first blocks or when deadlock detection
+is performed, the accuracy of the timeout depends on how often deadlock
+detection is performed.
+<p>Timeout values may be specified for the database environment as a whole.
+See <a href="../api_c/env_set_timeout.html">DB_ENV-&gt;set_timeout</a> and for more information.
+<p>The DB_TXN-&gt;set_timeout method configures operations performed on the underlying
+transaction, not only operations performed using the specified
+<a href="../api_c/txn_class.html">DB_TXN</a> handle.
+<p>The DB_TXN-&gt;set_timeout interface may be called at any time during the life of
+the application.
+<p>The DB_TXN-&gt;set_timeout method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_TXN-&gt;set_timeout method may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB_TXN-&gt;set_timeout method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_TXN-&gt;set_timeout method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/txn_class.html">DB_TXN</a>
+<h1>See Also</h1>
+<a href="../api_c/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_c/txn_stat.html b/libdb/docs/api_c/txn_stat.html
new file mode 100644
index 0000000..1196e85
--- /dev/null
+++ b/libdb/docs/api_c/txn_stat.html
@@ -0,0 +1,88 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;txn_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;txn_stat</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;txn_stat(DB_ENV *env, DB_TXN_STAT **statp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;txn_stat method returns the transaction subsystem statistics.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_STAT_CLEAR">DB_STAT_CLEAR</a><dd>Reset statistics after returning their values.
+</dl>
+<p>The DB_ENV-&gt;txn_stat method creates a statistical structure of type
+DB_TXN_STAT and copies a pointer to it into a user-specified memory
+location.
+<p>Statistical structures are created in allocated memory. If application-specific allocation
+routines have been declared (see <a href="../api_c/env_set_alloc.html">DB_ENV-&gt;set_alloc</a> for more
+information), they are used to allocate the memory; otherwise, the
+library <b>malloc</b>(3) interface is used. The caller is
+responsible for deallocating the memory. To deallocate the memory, free
+the memory reference; references inside the returned memory need not be
+individually freed.
+<p>The following DB_TXN_STAT fields will be filled in:
+<p><dl compact>
+<dt><a href="../api_c/lsn_class.html">DB_LSN</a> st_last_ckp;<dd>The LSN of the last checkpoint.
+<dt>time_t st_time_ckp;<dd>The time the last completed checkpoint finished (as the number of seconds
+since the Epoch, returned by the IEEE/ANSI Std 1003.1 (POSIX) <b>time</b> interface).
+<dt>u_int32_t st_last_txnid;<dd>The last transaction ID allocated.
+<dt>u_int32_t st_maxtxns;<dd>The maximum number of active transactions possible.
+<dt>u_int32_t st_nactive;<dd>The number of transactions that are currently active.
+<dt>u_int32_t st_maxnactive;<dd>The maximum number of active transactions at any one time.
+<dt>u_int32_t st_nbegins;<dd>The number of transactions that have begun.
+<dt>u_int32_t st_naborts;<dd>The number of transactions that have aborted.
+<dt>u_int32_t st_ncommits;<dd>The number of transactions that have committed.
+<dt>u_int32_t st_nrestores;<dd>The number of transactions that have been restored.
+<dt>u_int32_t st_regsize;<dd>The size of the region.
+<dt>u_int32_t st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining the region lock.
+<dt>u_int32_t st_region_nowait;<dd>The number of times that a thread of control was able to obtain
+the region lock without waiting.
+<dt>DB_TXN_ACTIVE *st_txnarray;<dd>A pointer to an array of <b>st_nactive</b> DB_TXN_ACTIVE structures,
+describing the currently active transactions. The following fields of
+the DB_TXN_ACTIVE structure will be filled in:
+<p><dl compact>
+<p><dt>u_int32_t txnid;<dd>The transaction ID of the transaction.
+<dt>u_int32_t parentid;<dd>The transaction ID of the parent transaction (or 0, if no parent).
+<dt><a href="../api_c/lsn_class.html">DB_LSN</a> lsn;<dd>The current log sequence number when the transaction was begun.
+</dl>
+</dl>
+<p>The DB_ENV-&gt;txn_stat method returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;txn_stat method may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;txn_stat method may fail and
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_c/env_class.html">DB_ENV</a>, <a href="../api_c/txn_class.html">DB_TXN</a>
+<h1>See Also</h1>
+<a href="../api_c/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/c_index.html b/libdb/docs/api_cxx/c_index.html
new file mode 100644
index 0000000..6f73744
--- /dev/null
+++ b/libdb/docs/api_cxx/c_index.html
@@ -0,0 +1,172 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: C++ Interface</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: C++ Interface</h1>
+<p><table border=1 align=center>
+<tr><th>Section</th><th>Class/Method</th><th>Description</th></tr>
+<tr><td><b>Database Environment</b></td><td><a href="../api_cxx/env_class.html">DbEnv</a></td><td>Create an environment handle</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_close.html">DbEnv::close</a></td><td>Close an environment</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_dbremove.html">DbEnv::dbremove</a></td><td>Remove a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_dbrename.html">DbEnv::dbrename</a></td><td>Rename a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_err.html">DbEnv::err</a></td><td>Error message with error string</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_err.html">DbEnv::errx</a></td><td>Error message</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_open.html">DbEnv::open</a></td><td>Open an environment</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_remove.html">DbEnv::remove</a></td><td>Remove an environment</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_strerror.html">DbEnv::strerror</a></td><td>Error strings</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_version.html">DbEnv::version</a></td><td>Return version information</td></tr>
+<tr><td><b>Environment Configuration</b></td><td><a href="../api_cxx/env_set_app_dispatch.html">DbEnv::set_app_dispatch</a></td><td>Configure application recovery interface</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_alloc.html">DbEnv::set_alloc</a></td><td>Set local space allocation functions</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_data_dir.html">DbEnv::set_data_dir</a></td><td>Set the environment data directory</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_encrypt.html">DbEnv::set_encrypt</a></td><td>Set the environment cryptographic key</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a></td><td>Set error message FILE</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a></td><td>Set error message output stream</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_feedback.html">DbEnv::set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a></td><td>Environment configuration</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a></td><td>Set panic callback</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_rpc_server.html">DbEnv::set_rpc_server</a></td><td>Establish an RPC server connection</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_shm_key.html">DbEnv::set_shm_key</a></td><td>Set system memory shared segment ID</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_tas_spins.html">DbEnv::set_tas_spins</a></td><td>Set the number of test-and-set spins</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_timeout.html">DbEnv::set_timeout</a></td><td>Set lock and transaction timeout</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_tmp_dir.html">DbEnv::set_tmp_dir</a></td><td>Set the environment temporary file directory</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a></td><td>Set verbose messages</td></tr>
+<tr><td><b>Database Operations</b></td><td><a href="../api_cxx/db_class.html">Db</a></td><td>Create a database handle</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_associate.html">Db::associate</a></td><td>Associate a secondary index</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_close.html">Db::close</a></td><td>Close a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_del.html">Db::del</a></td><td>Delete items from a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_err.html">Db::err</a></td><td>Error message with error string</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_err.html">Db::errx</a></td><td>Error message</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_fd.html">Db::fd</a></td><td>Return a file descriptor from a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_get.html">Db::get</a>, <a href="../api_cxx/db_get.html">Db::pget</a></td><td>Get items from a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a></td><td>Return if the underlying database is in host order</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_get_type.html">Db::get_type</a></td><td>Return the database type</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_join.html">Db::join</a></td><td>Perform a database join on cursors</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_key_range.html">Db::key_range</a></td><td>Return estimate of key location</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_open.html">Db::open</a></td><td>Open a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_put.html">Db::put</a></td><td>Store items into a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_remove.html">Db::remove</a></td><td>Remove a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_rename.html">Db::rename</a></td><td>Rename a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_stat.html">Db::stat</a></td><td>Return database statistics</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_sync.html">Db::sync</a></td><td>Flush a database to stable storage</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_truncate.html">Db::truncate</a></td><td>Empty a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_upgrade.html">Db::upgrade</a></td><td>Upgrade a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_verify.html">Db::verify</a></td><td>Verify/salvage a database</td></tr>
+<tr><td><b>Database Configuration</b></td><td><a href="../api_cxx/db_set_alloc.html">Db::set_alloc</a></td><td>Set local space allocation functions</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_cache_priority.html">Db::set_cache_priority</a></td><td>Set the database cache priority</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a></td><td>Set the database cache size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a></td><td>Set a duplicate comparison function</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_encrypt.html">Db::set_encrypt</a></td><td>Set the database cryptographic key</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a></td><td>Set error message FILE</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_error_stream.html">Db::set_error_stream</a></td><td>Set error message output stream</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_feedback.html">Db::set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_flags.html">Db::set_flags</a></td><td>General database configuration</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a></td><td>Set the database byte order</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a></td><td>Set the underlying database page size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a></td><td>Set panic callback</td></tr>
+<tr><td><b>Btree/Recno Configuration</b></td><td><a href="../api_cxx/db_set_append_recno.html">Db::set_append_recno</a></td><td>Set record append callback</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a></td><td>Set a Btree comparison function</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a></td><td>Set the minimum number of keys per Btree page</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a></td><td>Set a Btree prefix comparison function</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a></td><td>Set the variable-length record delimiter</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a></td><td>Set the fixed-length record length</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a></td><td>Set the fixed-length record pad byte</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a></td><td>Set the backing Recno text file</td></tr>
+<tr><td><b>Hash Configuration</b></td><td><a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a></td><td>Set the Hash table density</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a></td><td>Set a hashing function</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a></td><td>Set the Hash table size</td></tr>
+<tr><td><b>Queue Configuration</b></td><td><a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a></td><td>Set Queue database extent size</td></tr>
+<tr><td><b>Database Cursor Operations</b></td><td><a href="../api_cxx/dbc_class.html">Dbc</a></td><td><b>Cursor class</b></td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_cursor.html">Db::cursor</a></td><td>Create a cursor handle</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/dbc_close.html">Dbc::close</a></td><td>Close a cursor</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/dbc_count.html">Dbc::count</a></td><td>Return count of duplicates</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/dbc_del.html">Dbc::del</a></td><td>Delete by cursor</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/dbc_dup.html">Dbc::dup</a></td><td>Duplicate a cursor</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/dbc_get.html">Dbc::get</a>, <a href="../api_cxx/dbc_get.html">Dbc::pget</a></td><td>Retrieve by cursor</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/dbc_put.html">Dbc::put</a></td><td>Store by cursor</td></tr>
+<tr><td><b>Key/Data Pairs</b></td><td><a href="../api_cxx/dbt_class.html">Dbt</a></td><td><br></td></tr>
+<tr><td><b>Bulk Retrieval</b></td><td><a href="../api_cxx/dbt_bulk.html#DB_MULTIPLE_INIT">DB_MULTIPLE_INIT</a></td><td><br></td></tr>
+<tr><td><b>Lock Subsystem</b></td><td><a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a></td><td>Set lock conflicts matrix</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lk_detect.html">DbEnv::set_lk_detect</a></td><td>Set automatic deadlock detection</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lk_max_lockers.html">DbEnv::set_lk_max_lockers</a></td><td>Set maximum number of lockers</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lk_max_locks.html">DbEnv::set_lk_max_locks</a></td><td>Set maximum number of locks</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lk_max_objects.html">DbEnv::set_lk_max_objects</a></td><td>Set maximum number of lock objects</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/lock_detect.html">DbEnv::lock_detect</a></td><td>Perform deadlock detection</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/lock_get.html">DbEnv::lock_get</a></td><td>Acquire a lock</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/lock_id.html">DbEnv::lock_id</a></td><td>Acquire a locker ID</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/lock_id_free.html">DbEnv::lock_id_free</a></td><td>Release a locker ID</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/lock_put.html">DbEnv::lock_put</a></td><td>Release a lock</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/lock_stat.html">DbEnv::lock_stat</a></td><td>Return lock subsystem statistics</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a></td><td>Acquire/release locks</td></tr>
+<tr><td><b>Log Subsystem</b></td><td><a href="../api_cxx/env_set_lg_bsize.html">DbEnv::set_lg_bsize</a></td><td>Set log buffer size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lg_dir.html">DbEnv::set_lg_dir</a></td><td>Set the environment logging directory</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lg_max.html">DbEnv::set_lg_max</a></td><td>Set log file size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lg_regionmax.html">DbEnv::set_lg_regionmax</a></td><td>Set logging region size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_archive.html">DbEnv::log_archive</a></td><td>List log and database files</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_file.html">DbEnv::log_file</a></td><td>Map Log Sequence Numbers to log files</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_flush.html">DbEnv::log_flush</a></td><td>Flush log records</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_put.html">DbEnv::log_put</a></td><td>Write a log record</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_stat.html">DbEnv::log_stat</a></td><td>Return log subsystem statistics</td></tr>
+<tr><td><b>Log Cursor Operations</b></td><td><a href="../api_cxx/logc_class.html">DbLogc</a></td><td><b>Log cursor class</b></td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_cursor.html">DbEnv::log_cursor</a></td><td>Create a log cursor handle</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/logc_close.html">DbLogc::close</a></td><td>Close a log cursor</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/logc_get.html">DbLogc::get</a></td><td>Retrieve a log record</td></tr>
+<tr><td><b>Log Sequence Numbers</b></td><td><a href="../api_cxx/lsn_class.html">DbLsn</a></td><td><br></td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_compare.html">DbEnv::log_compare</a></td><td>Compare two Log Sequence Numbers</td></tr>
+<tr><td><b>Memory Pool Subsystem</b></td><td><a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a></td><td>Set the environment cache size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a></td><td>Set maximum mapped-in database file size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_register.html">DbEnv::memp_register</a></td><td>Register input/output functions for a file in a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a></td><td>Return memory pool statistics</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a></td><td>Flush pages from a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a></td><td>Trickle flush pages from a memory pool</td></tr>
+<tr><td><b>Memory Pool Files</b></td><td><a href="../api_cxx/mempfile_class.html">DbMpoolFile</a></td><td><b>Memory Pool File class</b></td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_fcreate.html">DbEnv::memp_fcreate</a></td><td>Open a file in a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a></td><td>Close a file in a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a></td><td>Get page from a file in a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a></td><td>Open a file in a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a></td><td>Return a page to a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a></td><td>Set memory pool page status</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a></td><td>Flush pages from a file in a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_set_clear_len.html">DbMpoolFile::set_clear_len</a></td><td>Set file page bytes to be cleared</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_set_fileid.html">DbMpoolFile::set_fileid</a></td><td>Set file unique identifier</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_set_ftype.html">DbMpoolFile::set_ftype</a></td><td>Set file type</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_set_lsn_offset.html">DbMpoolFile::set_lsn_offset</a></td><td>Set file log-sequence-number offset</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_set_pgcookie.html">DbMpoolFile::set_pgcookie</a></td><td>Set file cookie for pgin/pgout</td></tr>
+<tr><td><b>Transaction Subsystem</b></td><td><a href="../api_cxx/env_set_tx_max.html">DbEnv::set_tx_max</a></td><td>Set maximum number of transactions</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_tx_timestamp.html">DbEnv::set_tx_timestamp</a></td><td>Set recovery timestamp</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_checkpoint.html">DbEnv::txn_checkpoint</a></td><td>Checkpoint the transaction subsystem</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_recover.html">DbEnv::txn_recover</a></td><td>Distributed transaction recovery</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_stat.html">DbEnv::txn_stat</a></td><td>Return transaction subsystem statistics</td></tr>
+<tr><td><b>Transactions</b></td><td><a href="../api_cxx/txn_class.html">DbTxn</a></td><td><b>Transaction class</b></td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a></td><td>Begin a transaction</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_abort.html">DbTxn::abort</a></td><td>Abort a transaction</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_commit.html">DbTxn::commit</a></td><td>Commit a transaction</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_discard.html">DbTxn::discard</a></td><td>Discard a prepared but not resolved transaction handle</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_id.html">DbTxn::id</a></td><td>Return a transaction's ID</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_prepare.html">DbTxn::prepare</a></td><td>Prepare a transaction for commit</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_set_timeout.html">DbTxn::set_timeout</a></td><td>Set transaction timeout</td></tr>
+<tr><td><b>Replication</b></td><td><a href="../api_cxx/rep_transport.html">DbEnv::set_rep_transport</a></td><td>Configure replication transport</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/rep_elect.html">DbEnv::rep_elect</a></td><td>Hold a replication election</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/rep_limit.html">DbEnv::set_rep_limit</a></td><td>Limit data sent in response to a single message</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/rep_message.html">DbEnv::rep_process_message</a></td><td>Process a replication message</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/rep_start.html">DbEnv::rep_start</a></td><td>Configure an environment for replication</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/rep_stat.html">DbEnv::rep_stat</a></td><td>Replication statistics</td></tr>
+<tr><td><b>Exceptions</b></td><td><a href="../api_cxx/except_class.html">DbException</a></td><td><b>Exception Class for Berkeley DB Activity</b></td></tr>
+<tr><td><br></td><td><a href="../api_cxx/deadlock_class.html">DbDeadlockException</a></td><td><b>Exception Class for deadlocks</b></td></tr>
+<tr><td><br></td><td><a href="../api_cxx/lockng_class.html">DbLockNotGrantedException</a></td><td><b>Exception Class for lock request failures</b></td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_class.html">DbMemoryException</a></td><td><b>Exception Class for insufficient memory</b></td></tr>
+<tr><td><br></td><td><a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a></td><td><b>Exception Class for failures requiring recovery</b></td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/cxx_pindex.html b/libdb/docs/api_cxx/cxx_pindex.html
new file mode 100644
index 0000000..b01f720
--- /dev/null
+++ b/libdb/docs/api_cxx/cxx_pindex.html
@@ -0,0 +1,709 @@
+<html>
+<head>
+<title>Berkeley DB: C++ Interface Index</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>C++ Interface Index</h1>
+<center>
+<table cellspacing=0 cellpadding=0>
+<tr><td align=right>configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#4">1.85</a> API compatibility</td></tr>
+<tr><td align=right>building a utility to dump Berkeley DB </td><td><a href="../ref/build_unix/conf.html#6">1.85</a> databases</td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.2.0/intro.html#2">2.0</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.3.0/intro.html#2">3.0</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.3.1/intro.html#2">3.1</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.3.2/intro.html#2">3.2</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.3.3/intro.html#2">3.3</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.4.0/intro.html#2">4.0</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.4.1/intro.html#2">4.1</a></td></tr>
+<tr><td align=right>selecting an </td><td><a href="../ref/am_conf/select.html#2">access</a> method</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/faq.html#2">access</a> method FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/tune.html#2">access</a> method tuning</td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/am_conf/intro.html#2">access</a> methods</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/aix.html#2">AIX</a></td></tr>
+<tr><td align=right>data </td><td><a href="../ref/am_misc/align.html#2">alignment</a></td></tr>
+<tr><td align=right>programmatic </td><td><a href="../ref/arch/apis.html#2">APIs</a></td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_archive.html#3">archive</a> log files</td></tr>
+<tr><td align=right>hot </td><td><a href="../ref/transapp/archival.html#4">backup</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/berkeley_db_svc.html#2">berkeley_db_svc</a></td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/mp/intro.html#4">buffer</a> pool subsystem</td></tr>
+<tr><td align=right>turn off system </td><td><a href="../api_cxx/memp_fopen.html#3">buffering</a></td></tr>
+<tr><td align=right>turn off system </td><td><a href="../api_cxx/env_set_flags.html#4">buffering</a> for database files</td></tr>
+<tr><td align=right>turn off system </td><td><a href="../api_cxx/env_set_flags.html#5">buffering</a> for log files</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/intro.html#3">building</a> for QNX</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/intro.html#2">building</a> for UNIX</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/notes.html#2">building</a> for UNIX FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_vxworks/intro.html#2">building</a> for VxWorks</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_vxworks/introae.html#2">building</a> for VxWorks AE</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_vxworks/faq.html#2">building</a> for VxWorks FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_win/intro.html#2">building</a> for Win32</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_win/faq.html#2">building</a> for Windows FAQ</td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/dbt_bulk.html#3">bulk</a> retrieval</td></tr>
+<tr><td align=right>selecting a </td><td><a href="../ref/am_conf/byteorder.html#2">byte</a> order</td></tr>
+<tr><td align=right>configuring the </td><td><a href="../ref/build_unix/conf.html#5">C++</a> API</td></tr>
+<tr><td align=right>flushing the database </td><td><a href="../ref/am/sync.html#2">cache</a></td></tr>
+<tr><td align=right>selecting a </td><td><a href="../ref/am_conf/cachesize.html#2">cache</a> size</td></tr>
+<tr><td align=right>introduction to the memory </td><td><a href="../ref/mp/intro.html#3">cache</a> subsystem</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/archival.html#3">catastrophic</a> recovery</td></tr>
+<tr><td align=right>Patches, Updates and </td><td><a href="http://www.sleepycat.com/update/index.html">Change</a> logs</td></tr>
+<tr><td align=right>utility to take </td><td><a href="../utility/db_checkpoint.html#3">checkpoints</a></td></tr>
+<tr><td align=right>database page </td><td><a href="../api_cxx/db_set_flags.html#3">checksum</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/curclose.html#2">closing</a> a cursor</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/close.html#2">closing</a> a database</td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am_misc/faq.html#3">compaction</a></td></tr>
+<tr><td align=right>specifying a Btree </td><td><a href="../ref/am_conf/bt_compare.html#2">comparison</a> function</td></tr>
+<tr><td align=right>changing </td><td><a href="../ref/build_unix/flags.html#2">compile</a> or load options</td></tr>
+<tr><td align=right></td><td><a href="../ref/cam/intro.html#2">Concurrent</a> Data Store</td></tr>
+<tr><td align=right>database environment </td><td><a href="../ref/env/db_config.html#3">configuration</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/conf.html#2">configuring</a> Berkeley DB for UNIX systems</td></tr>
+<tr><td align=right>salvaging </td><td><a href="../ref/am/verify.html#4">corrupted</a> databases</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/count.html#2">counting</a> data items for a key</td></tr>
+<tr><td align=right>closing a </td><td><a href="../ref/am/curclose.html#3">cursor</a></td></tr>
+<tr><td align=right>deleting records with a </td><td><a href="../ref/am/curdel.html#3">cursor</a></td></tr>
+<tr><td align=right>duplicating a </td><td><a href="../ref/am/curdup.html#3">cursor</a></td></tr>
+<tr><td align=right>retrieving records with a </td><td><a href="../ref/am/curget.html#3">cursor</a></td></tr>
+<tr><td align=right>storing records with a </td><td><a href="../ref/am/curput.html#3">cursor</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/stability.html#2">cursor</a> stability</td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am/cursor.html#2">cursors</a></td></tr>
+<tr><td align=right>Dbt </td><td><a href="../api_cxx/dbt_class.html#data">data</a></td></tr>
+<tr><td align=right>utility to upgrade </td><td><a href="../utility/db_upgrade.html#3">database</a> files</td></tr>
+<tr><td align=right>utility to verify </td><td><a href="../utility/db_verify.html#3">database</a> files</td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_class.html#2">Db</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/env/region.html#2">__db.001</a></td></tr>
+<tr><td align=right>Dbc::put </td><td><a href="../api_cxx/dbc_put.html#DB_AFTER">DB_AFTER</a></td></tr>
+<tr><td align=right>Db::verify </td><td><a href="../api_cxx/db_verify.html#DB_AGGRESSIVE">DB_AGGRESSIVE</a></td></tr>
+<tr><td align=right>Db::put </td><td><a href="../api_cxx/db_put.html#DB_APPEND">DB_APPEND</a></td></tr>
+<tr><td align=right>DbEnv::log_archive </td><td><a href="../api_cxx/log_archive.html#DB_ARCH_ABS">DB_ARCH_ABS</a></td></tr>
+<tr><td align=right>DbEnv::log_archive </td><td><a href="../api_cxx/log_archive.html#DB_ARCH_DATA">DB_ARCH_DATA</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_archive.html#2">db_archive</a></td></tr>
+<tr><td align=right>DbEnv::log_archive </td><td><a href="../api_cxx/log_archive.html#DB_ARCH_LOG">DB_ARCH_LOG</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_associate.html#2">Db::associate</a></td></tr>
+<tr><td align=right>Db::associate </td><td><a href="../api_cxx/db_associate.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>Db::del </td><td><a href="../api_cxx/db_del.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>Db::open </td><td><a href="../api_cxx/db_open.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>Db::put </td><td><a href="../api_cxx/db_put.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>Db::truncate </td><td><a href="../api_cxx/db_truncate.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>DbEnv::dbremove </td><td><a href="../api_cxx/env_dbremove.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>DbEnv::dbrename </td><td><a href="../api_cxx/env_dbrename.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>DbEnv::set_flags </td><td><a href="../api_cxx/env_set_flags.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>Dbc::put </td><td><a href="../api_cxx/dbc_put.html#DB_BEFORE">DB_BEFORE</a></td></tr>
+<tr><td align=right>Db::open </td><td><a href="../api_cxx/db_open.html#DB_BTREE">DB_BTREE</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/dbc_class.html#2">Dbc</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/dbc_close.html#2">Dbc::close</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/dbc_count.html#2">Dbc::count</a></td></tr>
+<tr><td align=right>DbEnv::set_flags </td><td><a href="../api_cxx/env_set_flags.html#DB_CDB_ALLDB">DB_CDB_ALLDB</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/dbc_del.html#2">Dbc::del</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/dbc_dup.html#2">Dbc::dup</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/dbc_get.html#2">Dbc::get</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_checkpoint.html#2">db_checkpoint</a></td></tr>
+<tr><td align=right>Db::set_flags </td><td><a href="../api_cxx/db_set_flags.html#DB_CHKSUM_SHA1">DB_CHKSUM_SHA1</a></td></tr>
+<tr><td align=right>DbEnv </td><td><a href="../api_cxx/env_class.html#DB_CLIENT">DB_CLIENT</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_close.html#2">Db::close</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/env/db_config.html#2">DB_CONFIG</a></td></tr>
+<tr><td align=right>Db::get </td><td><a href="../api_cxx/db_get.html#DB_CONSUME">DB_CONSUME</a></td></tr>
+<tr><td align=right>Db::get </td><td><a href="../api_cxx/db_get.html#DB_CONSUME_WAIT">DB_CONSUME_WAIT</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/dbc_put.html#2">Dbc::put</a></td></tr>
+<tr><td align=right>Db::associate </td><td><a href="../api_cxx/db_associate.html#DB_CREATE">DB_CREATE</a></td></tr>
+<tr><td align=right>Db::open </td><td><a href="../api_cxx/db_open.html#DB_CREATE">DB_CREATE</a></td></tr>
+<tr><td align=right>DbEnv::open </td><td><a href="../api_cxx/env_open.html#DB_CREATE">DB_CREATE</a></td></tr>
+<tr><td align=right>DbMpoolFile::open </td><td><a href="../api_cxx/memp_fopen.html#DB_CREATE">DB_CREATE</a></td></tr>
+<tr><td align=right>Dbc::get </td><td><a href="../api_cxx/dbc_get.html#DB_CURRENT">DB_CURRENT</a></td></tr>
+<tr><td align=right>Dbc::put </td><td><a href="../api_cxx/dbc_put.html#DB_CURRENT">DB_CURRENT</a></td></tr>
+<tr><td align=right>DbLogc::get </td><td><a href="../api_cxx/logc_get.html#DB_CURRENT">DB_CURRENT</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_cursor.html#2">Db::cursor</a></td></tr>
+<tr><td align=right>Db </td><td><a href="../api_cxx/db_class.html#DB_CXX_NO_EXCEPTIONS">DB_CXX_NO_EXCEPTIONS</a></td></tr>
+<tr><td align=right>DbEnv </td><td><a href="../api_cxx/env_class.html#DB_CXX_NO_EXCEPTIONS">DB_CXX_NO_EXCEPTIONS</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_associate.html#3">DB_DBT_APPMALLOC</a></td></tr>
+<tr><td align=right>Dbt </td><td><a href="../api_cxx/dbt_class.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a></td></tr>
+<tr><td align=right>Dbt </td><td><a href="../api_cxx/dbt_class.html#DB_DBT_PARTIAL">DB_DBT_PARTIAL</a></td></tr>
+<tr><td align=right>Dbt </td><td><a href="../api_cxx/dbt_class.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a></td></tr>
+<tr><td align=right>Dbt </td><td><a href="../api_cxx/dbt_class.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_deadlock.html#2">db_deadlock</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/deadlock_class.html#2">DbDeadlockException</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_del.html#2">Db::del</a></td></tr>
+<tr><td align=right>DbMpoolFile::open </td><td><a href="../api_cxx/memp_fopen.html#DB_DIRECT">DB_DIRECT</a></td></tr>
+<tr><td align=right>DbEnv::set_flags </td><td><a href="../api_cxx/env_set_flags.html#DB_DIRECT_DB">DB_DIRECT_DB</a></td></tr>
+<tr><td align=right>DbEnv::set_flags </td><td><a href="../api_cxx/env_set_flags.html#DB_DIRECT_LOG">DB_DIRECT_LOG</a></td></tr>
+<tr><td align=right>Db::cursor </td><td><a href="../api_cxx/db_cursor.html#DB_DIRTY_READ">DB_DIRTY_READ</a></td></tr>
+<tr><td align=right>Db::get </td><td><a href="../api_cxx/db_get.html#DB_DIRTY_READ">DB_DIRTY_READ</a></td></tr>
+<tr><td align=right>Db::join </td><td><a href="../api_cxx/db_join.html#DB_DIRTY_READ">DB_DIRTY_READ</a></td></tr>
+<tr><td align=right>Db::open </td><td><a href="../api_cxx/db_open.html#DB_DIRTY_READ">DB_DIRTY_READ</a></td></tr>
+<tr><td align=right>Dbc::get </td><td><a href="../api_cxx/dbc_get.html#DB_DIRTY_READ">DB_DIRTY_READ</a></td></tr>
+<tr><td align=right>DbEnv::txn_begin </td><td><a href="../api_cxx/txn_begin.html#DB_DIRTY_READ">DB_DIRTY_READ</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_associate.html#4">DB_DONOTINDEX</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_dump.html#2">db_dump</a></td></tr>
+<tr><td align=right>Db::set_flags </td><td><a href="../api_cxx/db_set_flags.html#DB_DUP">DB_DUP</a></td></tr>
+<tr><td align=right>Db::set_flags </td><td><a href="../api_cxx/db_set_flags.html#DB_DUPSORT">DB_DUPSORT</a></td></tr>
+<tr><td align=right>Db::upgrade </td><td><a href="../api_cxx/db_upgrade.html#DB_DUPSORT">DB_DUPSORT</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/rep_transport.html#3">DB_EID_BROADCAST</a></td></tr>
+<tr><td align=right>Db::set_flags </td><td><a href="../api_cxx/db_set_flags.html#DB_ENCRYPT">DB_ENCRYPT</a></td></tr>
+<tr><td align=right>Db::set_encrypt </td><td><a href="../api_cxx/db_set_encrypt.html#DB_ENCRYPT_AES">DB_ENCRYPT_AES</a></td></tr>
+<tr><td align=right>DbEnv::set_encrypt </td><td><a href="../api_cxx/env_set_encrypt.html#DB_ENCRYPT_AES">DB_ENCRYPT_AES</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_class.html#2">DbEnv</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_close.html#2">DbEnv::close</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_dbremove.html#2">DbEnv::dbremove</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_dbrename.html#2">DbEnv::dbrename</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_err.html#2">DbEnv::err</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/lock_detect.html#2">DbEnv::lock_detect</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/lock_get.html#2">DbEnv::lock_get</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/lock_id.html#2">DbEnv::lock_id</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/lock_id_free.html#2">DbEnv::lock_id_free</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/lock_put.html#2">DbEnv::lock_put</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/lock_stat.html#2">DbEnv::lock_stat</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/lock_vec.html#2">DbEnv::lock_vec</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/log_archive.html#2">DbEnv::log_archive</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/log_compare.html#2">DbEnv::log_compare</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/log_cursor.html#2">DbEnv::log_cursor</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/log_file.html#2">DbEnv::log_file</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/log_flush.html#2">DbEnv::log_flush</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/log_put.html#2">DbEnv::log_put</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/log_stat.html#2">DbEnv::log_stat</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/memp_fcreate.html#2">DbEnv::memp_fcreate</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/memp_register.html#2">DbEnv::memp_register</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/memp_stat.html#2">DbEnv::memp_stat</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/memp_sync.html#2">DbEnv::memp_sync</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/memp_trickle.html#2">DbEnv::memp_trickle</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_open.html#2">DbEnv::open</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_remove.html#2">DbEnv::remove</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/rep_elect.html#2">DbEnv::rep_elect</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/rep_message.html#2">DbEnv::rep_process_message</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/rep_start.html#2">DbEnv::rep_start</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/rep_stat.html#2">DbEnv::rep_stat</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_alloc.html#2">DbEnv::set_alloc</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_app_dispatch.html#2">DbEnv::set_app_dispatch</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_cachesize.html#2">DbEnv::set_cachesize</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_data_dir.html#2">DbEnv::set_data_dir</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_encrypt.html#2">DbEnv::set_encrypt</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_errcall.html#2">DbEnv::set_errcall</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_errfile.html#2">DbEnv::set_errfile</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_error_stream.html#2">DbEnv::set_error_stream</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_errpfx.html#2">DbEnv::set_errpfx</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_feedback.html#2">DbEnv::set_feedback</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_flags.html#2">DbEnv::set_flags</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_lg_bsize.html#2">DbEnv::set_lg_bsize</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_lg_dir.html#2">DbEnv::set_lg_dir</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_lg_max.html#2">DbEnv::set_lg_max</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_lg_regionmax.html#2">DbEnv::set_lg_regionmax</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_lk_conflicts.html#2">DbEnv::set_lk_conflicts</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_lk_detect.html#2">DbEnv::set_lk_detect</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_lk_max_lockers.html#2">DbEnv::set_lk_max_lockers</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_lk_max_locks.html#2">DbEnv::set_lk_max_locks</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_lk_max_objects.html#2">DbEnv::set_lk_max_objects</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_mp_mmapsize.html#2">DbEnv::set_mp_mmapsize</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_paniccall.html#2">DbEnv::set_paniccall</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/rep_limit.html#2">DbEnv::set_rep_limit</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/rep_transport.html#2">DbEnv::set_rep_transport</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_rpc_server.html#2">DbEnv::set_rpc_server</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_shm_key.html#2">DbEnv::set_shm_key</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_tas_spins.html#2">DbEnv::set_tas_spins</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_timeout.html#2">DbEnv::set_timeout</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_tmp_dir.html#2">DbEnv::set_tmp_dir</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_tx_max.html#2">DbEnv::set_tx_max</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_tx_timestamp.html#2">DbEnv::set_tx_timestamp</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_verbose.html#2">DbEnv::set_verbose</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_strerror.html#2">DbEnv::strerror</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/txn_begin.html#2">DbEnv::txn_begin</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/txn_checkpoint.html#2">DbEnv::txn_checkpoint</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/txn_recover.html#2">DbEnv::txn_recover</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/txn_stat.html#2">DbEnv::txn_stat</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_version.html#2">DbEnv::version</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_err.html#2">Db::err</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/except_class.html#2">DbException</a></td></tr>
+<tr><td align=right>Db::open </td><td><a href="../api_cxx/db_open.html#DB_EXCL">DB_EXCL</a></td></tr>
+<tr><td align=right>Db::stat </td><td><a href="../api_cxx/db_stat.html#DB_FAST_STAT">DB_FAST_STAT</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_fd.html#2">Db::fd</a></td></tr>
+<tr><td align=right>Dbc::get </td><td><a href="../api_cxx/dbc_get.html#DB_FIRST">DB_FIRST</a></td></tr>
+<tr><td align=right>DbLogc::get </td><td><a href="../api_cxx/logc_get.html#DB_FIRST">DB_FIRST</a></td></tr>
+<tr><td align=right>DbEnv::txn_recover </td><td><a href="../api_cxx/txn_recover.html#DB_FIRST">DB_FIRST</a></td></tr>
+<tr><td align=right>DbEnv::log_put </td><td><a href="../api_cxx/log_put.html#DB_FLUSH">DB_FLUSH</a></td></tr>
+<tr><td align=right>DbEnv::remove </td><td><a href="../api_cxx/env_remove.html#DB_FORCE">DB_FORCE</a></td></tr>
+<tr><td align=right>DbEnv::txn_checkpoint </td><td><a href="../api_cxx/txn_checkpoint.html#DB_FORCE">DB_FORCE</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_get.html#2">Db::get</a></td></tr>
+<tr><td align=right>Db::get </td><td><a href="../api_cxx/db_get.html#DB_GET_BOTH">DB_GET_BOTH</a></td></tr>
+<tr><td align=right>Dbc::get </td><td><a href="../api_cxx/dbc_get.html#DB_GET_BOTH">DB_GET_BOTH</a></td></tr>
+<tr><td align=right>Dbc::get </td><td><a href="../api_cxx/dbc_get.html#DB_GET_BOTH_RANGE">DB_GET_BOTH_RANGE</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_get_byteswapped.html#2">Db::get_byteswapped</a></td></tr>
+<tr><td align=right>Dbc::get </td><td><a href="../api_cxx/dbc_get.html#DB_GET_RECNO">DB_GET_RECNO</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_get_type.html#2">Db::get_type</a></td></tr>
+<tr><td align=right>Db::open </td><td><a href="../api_cxx/db_open.html#DB_HASH">DB_HASH</a></td></tr>
+<tr><td align=right>File naming </td><td><a href="../ref/env/naming.html#DB_HOME">DB_HOME</a></td></tr>
+<tr><td align=right>File naming </td><td><a href="../ref/env/naming.html#db_home">db_home</a></td></tr>
+<tr><td align=right>DbEnv::open </td><td><a href="../api_cxx/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a></td></tr>
+<tr><td align=right>DbEnv::open </td><td><a href="../api_cxx/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a></td></tr>
+<tr><td align=right>DbEnv::open </td><td><a href="../api_cxx/env_open.html#DB_INIT_LOG">DB_INIT_LOG</a></td></tr>
+<tr><td align=right>DbEnv::open </td><td><a href="../api_cxx/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a></td></tr>
+<tr><td align=right>DbEnv::open </td><td><a href="../api_cxx/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_join.html#2">Db::join</a></td></tr>
+<tr><td align=right>DbEnv::open </td><td><a href="../api_cxx/env_open.html#DB_JOINENV">DB_JOINENV</a></td></tr>
+<tr><td align=right>Db::join </td><td><a href="../api_cxx/db_join.html#DB_JOIN_ITEM">DB_JOIN_ITEM</a></td></tr>
+<tr><td align=right>Dbc::get </td><td><a href="../api_cxx/dbc_get.html#DB_JOIN_ITEM">DB_JOIN_ITEM</a></td></tr>
+<tr><td align=right>Db::join </td><td><a href="../api_cxx/db_join.html#DB_JOIN_NOSORT">DB_JOIN_NOSORT</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_KEYEXIST">DB_KEYEXIST</a></td></tr>
+<tr><td align=right>Dbc::put </td><td><a href="../api_cxx/dbc_put.html#DB_KEYFIRST">DB_KEYFIRST</a></td></tr>
+<tr><td align=right>Dbc::put </td><td><a href="../api_cxx/dbc_put.html#DB_KEYLAST">DB_KEYLAST</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_key_range.html#2">Db::key_range</a></td></tr>
+<tr><td align=right>Dbc::get </td><td><a href="../api_cxx/dbc_get.html#DB_LAST">DB_LAST</a></td></tr>
+<tr><td align=right>DbLogc::get </td><td><a href="../api_cxx/logc_get.html#DB_LAST">DB_LAST</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_load.html#2">db_load</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/lock_class.html#2">DbLock</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/program/errorret.html#4">DB_LOCK_DEADLOCK</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a></td></tr>
+<tr><td align=right>DbEnv::set_lk_detect </td><td><a href="../api_cxx/env_set_lk_detect.html#DB_LOCK_DEFAULT">DB_LOCK_DEFAULT</a></td></tr>
+<tr><td align=right>DbEnv::lock_detect </td><td><a href="../api_cxx/lock_detect.html#DB_LOCK_DEFAULT">DB_LOCK_DEFAULT</a></td></tr>
+<tr><td align=right>DbEnv::open </td><td><a href="../api_cxx/env_open.html#DB_LOCKDOWN">DB_LOCKDOWN</a></td></tr>
+<tr><td align=right>DbEnv::set_lk_detect </td><td><a href="../api_cxx/env_set_lk_detect.html#DB_LOCK_EXPIRE">DB_LOCK_EXPIRE</a></td></tr>
+<tr><td align=right>DbEnv::lock_detect </td><td><a href="../api_cxx/lock_detect.html#DB_LOCK_EXPIRE">DB_LOCK_EXPIRE</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec </td><td><a href="../api_cxx/lock_vec.html#DB_LOCK_GET">DB_LOCK_GET</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec </td><td><a href="../api_cxx/lock_vec.html#DB_LOCK_GET_TIMEOUT">DB_LOCK_GET_TIMEOUT</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec </td><td><a href="../api_cxx/lock_vec.html#DB_LOCK_IREAD">DB_LOCK_IREAD</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec </td><td><a href="../api_cxx/lock_vec.html#DB_LOCK_IWR">DB_LOCK_IWR</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec </td><td><a href="../api_cxx/lock_vec.html#DB_LOCK_IWRITE">DB_LOCK_IWRITE</a></td></tr>
+<tr><td align=right>DbEnv::set_lk_detect </td><td><a href="../api_cxx/env_set_lk_detect.html#DB_LOCK_MAXLOCKS">DB_LOCK_MAXLOCKS</a></td></tr>
+<tr><td align=right>DbEnv::lock_detect </td><td><a href="../api_cxx/lock_detect.html#DB_LOCK_MAXLOCKS">DB_LOCK_MAXLOCKS</a></td></tr>
+<tr><td align=right>DbEnv::set_lk_detect </td><td><a href="../api_cxx/env_set_lk_detect.html#DB_LOCK_MINLOCKS">DB_LOCK_MINLOCKS</a></td></tr>
+<tr><td align=right>DbEnv::lock_detect </td><td><a href="../api_cxx/lock_detect.html#DB_LOCK_MINLOCKS">DB_LOCK_MINLOCKS</a></td></tr>
+<tr><td align=right>DbEnv::set_lk_detect </td><td><a href="../api_cxx/env_set_lk_detect.html#DB_LOCK_MINWRITE">DB_LOCK_MINWRITE</a></td></tr>
+<tr><td align=right>DbEnv::lock_detect </td><td><a href="../api_cxx/lock_detect.html#DB_LOCK_MINWRITE">DB_LOCK_MINWRITE</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_LOCK_NOTGRANTED">DB_LOCK_NOTGRANTED</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/lockng_class.html#2">DbLockNotGrantedException</a></td></tr>
+<tr><td align=right>DbEnv::lock_get </td><td><a href="../api_cxx/lock_get.html#DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec </td><td><a href="../api_cxx/lock_vec.html#DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a></td></tr>
+<tr><td align=right>DbEnv::set_lk_detect </td><td><a href="../api_cxx/env_set_lk_detect.html#DB_LOCK_OLDEST">DB_LOCK_OLDEST</a></td></tr>
+<tr><td align=right>DbEnv::lock_detect </td><td><a href="../api_cxx/lock_detect.html#DB_LOCK_OLDEST">DB_LOCK_OLDEST</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec </td><td><a href="../api_cxx/lock_vec.html#DB_LOCK_PUT">DB_LOCK_PUT</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec </td><td><a href="../api_cxx/lock_vec.html#DB_LOCK_PUT_ALL">DB_LOCK_PUT_ALL</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec </td><td><a href="../api_cxx/lock_vec.html#DB_LOCK_PUT_OBJ">DB_LOCK_PUT_OBJ</a></td></tr>
+<tr><td align=right>DbEnv::set_lk_detect </td><td><a href="../api_cxx/env_set_lk_detect.html#DB_LOCK_RANDOM">DB_LOCK_RANDOM</a></td></tr>
+<tr><td align=right>DbEnv::lock_detect </td><td><a href="../api_cxx/lock_detect.html#DB_LOCK_RANDOM">DB_LOCK_RANDOM</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec </td><td><a href="../api_cxx/lock_vec.html#DB_LOCK_READ">DB_LOCK_READ</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec </td><td><a href="../api_cxx/lock_vec.html#DB_LOCK_TIMEOUT">DB_LOCK_TIMEOUT</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec </td><td><a href="../api_cxx/lock_vec.html#DB_LOCK_WRITE">DB_LOCK_WRITE</a></td></tr>
+<tr><td align=right>DbEnv::set_lk_detect </td><td><a href="../api_cxx/env_set_lk_detect.html#DB_LOCK_YOUNGEST">DB_LOCK_YOUNGEST</a></td></tr>
+<tr><td align=right>DbEnv::lock_detect </td><td><a href="../api_cxx/lock_detect.html#DB_LOCK_YOUNGEST">DB_LOCK_YOUNGEST</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/logc_class.html#2">DbLogc</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/logc_close.html#2">DbLogc::close</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/logc_get.html#2">DbLogc::get</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/lsn_class.html#2">DbLsn</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/memp_class.html#2">DbMemoryException</a></td></tr>
+<tr><td align=right>DbMpoolFile::put </td><td><a href="../api_cxx/memp_fput.html#DB_MPOOL_CLEAN">DB_MPOOL_CLEAN</a></td></tr>
+<tr><td align=right>DbMpoolFile::set </td><td><a href="../api_cxx/memp_fset.html#DB_MPOOL_CLEAN">DB_MPOOL_CLEAN</a></td></tr>
+<tr><td align=right>DbMpoolFile::get </td><td><a href="../api_cxx/memp_fget.html#DB_MPOOL_CREATE">DB_MPOOL_CREATE</a></td></tr>
+<tr><td align=right>DbMpoolFile::put </td><td><a href="../api_cxx/memp_fput.html#DB_MPOOL_DIRTY">DB_MPOOL_DIRTY</a></td></tr>
+<tr><td align=right>DbMpoolFile::set </td><td><a href="../api_cxx/memp_fset.html#DB_MPOOL_DIRTY">DB_MPOOL_DIRTY</a></td></tr>
+<tr><td align=right>DbMpoolFile::put </td><td><a href="../api_cxx/memp_fput.html#DB_MPOOL_DISCARD">DB_MPOOL_DISCARD</a></td></tr>
+<tr><td align=right>DbMpoolFile::set </td><td><a href="../api_cxx/memp_fset.html#DB_MPOOL_DISCARD">DB_MPOOL_DISCARD</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/mempfile_class.html#2">DbMpoolFile</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/memp_fclose.html#2">DbMpoolFile::close</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/memp_fget.html#2">DbMpoolFile::get</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/memp_fopen.html#2">DbMpoolFile::open</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/memp_fput.html#2">DbMpoolFile::put</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/memp_fset.html#2">DbMpoolFile::set</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/memp_set_clear_len.html#2">DbMpoolFile::set_clear_len</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/memp_set_fileid.html#2">DbMpoolFile::set_fileid</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/memp_set_ftype.html#2">DbMpoolFile::set_ftype</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/memp_set_lsn_offset.html#2">DbMpoolFile::set_lsn_offset</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/memp_set_pgcookie.html#2">DbMpoolFile::set_pgcookie</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/memp_fsync.html#2">DbMpoolFile::sync</a></td></tr>
+<tr><td align=right>DbMpoolFile::get </td><td><a href="../api_cxx/memp_fget.html#DB_MPOOL_LAST">DB_MPOOL_LAST</a></td></tr>
+<tr><td align=right>DbMpoolFile::get </td><td><a href="../api_cxx/memp_fget.html#DB_MPOOL_NEW">DB_MPOOL_NEW</a></td></tr>
+<tr><td align=right>Db::get </td><td><a href="../api_cxx/db_get.html#DB_MULTIPLE">DB_MULTIPLE</a></td></tr>
+<tr><td align=right>Dbc::get </td><td><a href="../api_cxx/dbc_get.html#DB_MULTIPLE">DB_MULTIPLE</a></td></tr>
+<tr><td align=right>DBT </td><td><a href="../api_cxx/dbt_bulk.html#DB_MULTIPLE_INIT">DB_MULTIPLE_INIT</a></td></tr>
+<tr><td align=right>Dbc::get </td><td><a href="../api_cxx/dbc_get.html#DB_MULTIPLE_KEY">DB_MULTIPLE_KEY</a></td></tr>
+<tr><td align=right>DBT </td><td><a href="../api_cxx/dbt_bulk.html#DB_MULTIPLE_KEY_NEXT">DB_MULTIPLE_KEY_NEXT</a></td></tr>
+<tr><td align=right>DBT </td><td><a href="../api_cxx/dbt_bulk.html#DB_MULTIPLE_NEXT">DB_MULTIPLE_NEXT</a></td></tr>
+<tr><td align=right>DBT </td><td><a href="../api_cxx/dbt_bulk.html#DB_MULTIPLE_RECNO_NEXT">DB_MULTIPLE_RECNO_NEXT</a></td></tr>
+<tr><td align=right>Dbc::get </td><td><a href="../api_cxx/dbc_get.html#DB_NEXT">DB_NEXT</a></td></tr>
+<tr><td align=right>DbLogc::get </td><td><a href="../api_cxx/logc_get.html#DB_NEXT">DB_NEXT</a></td></tr>
+<tr><td align=right>DbEnv::txn_recover </td><td><a href="../api_cxx/txn_recover.html#DB_NEXT">DB_NEXT</a></td></tr>
+<tr><td align=right>Dbc::get </td><td><a href="../api_cxx/dbc_get.html#DB_NEXT_DUP">DB_NEXT_DUP</a></td></tr>
+<tr><td align=right>Dbc::get </td><td><a href="../api_cxx/dbc_get.html#DB_NEXT_NODUP">DB_NEXT_NODUP</a></td></tr>
+<tr><td align=right>Db::put </td><td><a href="../api_cxx/db_put.html#DB_NODUPDATA">DB_NODUPDATA</a></td></tr>
+<tr><td align=right>Dbc::put </td><td><a href="../api_cxx/dbc_put.html#DB_NODUPDATA">DB_NODUPDATA</a></td></tr>
+<tr><td align=right>DbEnv::set_flags </td><td><a href="../api_cxx/env_set_flags.html#DB_NOLOCKING">DB_NOLOCKING</a></td></tr>
+<tr><td align=right>Db::open </td><td><a href="../api_cxx/db_open.html#DB_NOMMAP">DB_NOMMAP</a></td></tr>
+<tr><td align=right>DbEnv::set_flags </td><td><a href="../api_cxx/env_set_flags.html#DB_NOMMAP">DB_NOMMAP</a></td></tr>
+<tr><td align=right>DbMpoolFile::open </td><td><a href="../api_cxx/memp_fopen.html#DB_NOMMAP">DB_NOMMAP</a></td></tr>
+<tr><td align=right>Db::verify </td><td><a href="../api_cxx/db_verify.html#DB_NOORDERCHK">DB_NOORDERCHK</a></td></tr>
+<tr><td align=right>Db::put </td><td><a href="../api_cxx/db_put.html#DB_NOOVERWRITE">DB_NOOVERWRITE</a></td></tr>
+<tr><td align=right>DbEnv::set_flags </td><td><a href="../api_cxx/env_set_flags.html#DB_NOPANIC">DB_NOPANIC</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_rpc_server.html#3">DB_NOSERVER</a></td></tr>
+<tr><td align=right>DbEnv::set_rpc_server </td><td><a href="../api_cxx/env_set_rpc_server.html#DB_NOSERVER">DB_NOSERVER</a></td></tr>
+<tr><td align=right>DbEnv::set_rpc_server </td><td><a href="../api_cxx/env_set_rpc_server.html#DB_NOSERVER_HOME">DB_NOSERVER_HOME</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_rpc_server.html#4">DB_NOSERVER_ID</a></td></tr>
+<tr><td align=right>DbEnv::set_rpc_server </td><td><a href="../api_cxx/env_set_rpc_server.html#DB_NOSERVER_ID">DB_NOSERVER_ID</a></td></tr>
+<tr><td align=right>Db::close </td><td><a href="../api_cxx/db_close.html#DB_NOSYNC">DB_NOSYNC</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a></td></tr>
+<tr><td align=right>DbMpoolFile::open </td><td><a href="../api_cxx/memp_fopen.html#DB_ODDFILESIZE">DB_ODDFILESIZE</a></td></tr>
+<tr><td align=right>Db::open </td><td><a href="../api_cxx/db_open.html#DB_OLD_VERSION">DB_OLD_VERSION</a></td></tr>
+<tr><td align=right>Db::upgrade </td><td><a href="../api_cxx/db_upgrade.html#DB_OLD_VERSION">DB_OLD_VERSION</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_open.html#2">Db::open</a></td></tr>
+<tr><td align=right>Db::verify </td><td><a href="../api_cxx/db_verify.html#DB_ORDERCHKONLY">DB_ORDERCHKONLY</a></td></tr>
+<tr><td align=right>DbEnv::set_flags </td><td><a href="../api_cxx/env_set_flags.html#DB_OVERWRITE">DB_OVERWRITE</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/memp_fget.html#3">DB_PAGE_NOTFOUND</a></td></tr>
+<tr><td align=right>DbEnv::set_flags </td><td><a href="../api_cxx/env_set_flags.html#DB_PANIC_ENVIRONMENT">DB_PANIC_ENVIRONMENT</a></td></tr>
+<tr><td align=right>Dbc::dup </td><td><a href="../api_cxx/dbc_dup.html#DB_POSITION">DB_POSITION</a></td></tr>
+<tr><td align=right>Dbc::get </td><td><a href="../api_cxx/dbc_get.html#DB_PREV">DB_PREV</a></td></tr>
+<tr><td align=right>DbLogc::get </td><td><a href="../api_cxx/logc_get.html#DB_PREV">DB_PREV</a></td></tr>
+<tr><td align=right>Dbc::get </td><td><a href="../api_cxx/dbc_get.html#DB_PREV_NODUP">DB_PREV_NODUP</a></td></tr>
+<tr><td align=right>Db::verify </td><td><a href="../api_cxx/db_verify.html#DB_PRINTABLE">DB_PRINTABLE</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_printlog.html#2">db_printlog</a></td></tr>
+<tr><td align=right>Db::set_cache_priority </td><td><a href="../api_cxx/db_set_cache_priority.html#DB_PRIORITY_DEFAULT">DB_PRIORITY_DEFAULT</a></td></tr>
+<tr><td align=right>Db::set_cache_priority </td><td><a href="../api_cxx/db_set_cache_priority.html#DB_PRIORITY_HIGH">DB_PRIORITY_HIGH</a></td></tr>
+<tr><td align=right>Db::set_cache_priority </td><td><a href="../api_cxx/db_set_cache_priority.html#DB_PRIORITY_LOW">DB_PRIORITY_LOW</a></td></tr>
+<tr><td align=right>Db::set_cache_priority </td><td><a href="../api_cxx/db_set_cache_priority.html#DB_PRIORITY_VERY_HIGH">DB_PRIORITY_VERY_HIGH</a></td></tr>
+<tr><td align=right>Db::set_cache_priority </td><td><a href="../api_cxx/db_set_cache_priority.html#DB_PRIORITY_VERY_LOW">DB_PRIORITY_VERY_LOW</a></td></tr>
+<tr><td align=right>DbEnv::open </td><td><a href="../api_cxx/env_open.html#DB_PRIVATE">DB_PRIVATE</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_put.html#2">Db::put</a></td></tr>
+<tr><td align=right>Db::open </td><td><a href="../api_cxx/db_open.html#DB_QUEUE">DB_QUEUE</a></td></tr>
+<tr><td align=right>Db::open </td><td><a href="../api_cxx/db_open.html#DB_RDONLY">DB_RDONLY</a></td></tr>
+<tr><td align=right>DbMpoolFile::open </td><td><a href="../api_cxx/memp_fopen.html#DB_RDONLY">DB_RDONLY</a></td></tr>
+<tr><td align=right>Db::open </td><td><a href="../api_cxx/db_open.html#DB_RECNO">DB_RECNO</a></td></tr>
+<tr><td align=right>Db::set_flags </td><td><a href="../api_cxx/db_set_flags.html#DB_RECNUM">DB_RECNUM</a></td></tr>
+<tr><td align=right>DbEnv::open </td><td><a href="../api_cxx/env_open.html#DB_RECOVER">DB_RECOVER</a></td></tr>
+<tr><td align=right>DbEnv::set_feedback </td><td><a href="../api_cxx/env_set_feedback.html#DB_RECOVER">DB_RECOVER</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_recover.html#2">db_recover</a></td></tr>
+<tr><td align=right>DbEnv::open </td><td><a href="../api_cxx/env_open.html#DB_RECOVER_FATAL">DB_RECOVER_FATAL</a></td></tr>
+<tr><td align=right>DbEnv::set_flags </td><td><a href="../api_cxx/env_set_flags.html#DB_REGION_INIT">DB_REGION_INIT</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_remove.html#2">Db::remove</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_rename.html#2">Db::rename</a></td></tr>
+<tr><td align=right>Db::set_flags </td><td><a href="../api_cxx/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a></td></tr>
+<tr><td align=right>DbEnv::rep_start </td><td><a href="../api_cxx/rep_start.html#DB_REP_CLIENT">DB_REP_CLIENT</a></td></tr>
+<tr><td align=right>DbEnv::rep_start </td><td><a href="../api_cxx/rep_start.html#DB_REP_LOGSONLY">DB_REP_LOGSONLY</a></td></tr>
+<tr><td align=right>DbEnv::rep_start </td><td><a href="../api_cxx/rep_start.html#DB_REP_MASTER">DB_REP_MASTER</a></td></tr>
+<tr><td align=right>DbEnv::set_rep_transport </td><td><a href="../api_cxx/rep_transport.html#DB_REP_PERMANENT">DB_REP_PERMANENT</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/rep_elect.html#3">DB_REP_UNAVAIL</a></td></tr>
+<tr><td align=right>Db::set_flags </td><td><a href="../api_cxx/db_set_flags.html#DB_REVSPLITOFF">DB_REVSPLITOFF</a></td></tr>
+<tr><td align=right>Db::get </td><td><a href="../api_cxx/db_get.html#DB_RMW">DB_RMW</a></td></tr>
+<tr><td align=right>Db::join </td><td><a href="../api_cxx/db_join.html#DB_RMW">DB_RMW</a></td></tr>
+<tr><td align=right>Dbc::get </td><td><a href="../api_cxx/dbc_get.html#DB_RMW">DB_RMW</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/runrec_class.html#2">DbRunRecoveryException</a></td></tr>
+<tr><td align=right>Db::verify </td><td><a href="../api_cxx/db_verify.html#DB_SALVAGE">DB_SALVAGE</a></td></tr>
+<tr><td align=right>Dbc::get </td><td><a href="../api_cxx/dbc_get.html#DB_SET">DB_SET</a></td></tr>
+<tr><td align=right>DbLogc::get </td><td><a href="../api_cxx/logc_get.html#DB_SET">DB_SET</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_alloc.html#2">Db::set_alloc</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_append_recno.html#2">Db::set_append_recno</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_bt_compare.html#2">Db::set_bt_compare</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_bt_minkey.html#2">Db::set_bt_minkey</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_bt_prefix.html#2">Db::set_bt_prefix</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_cache_priority.html#2">Db::set_cache_priority</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_cachesize.html#2">Db::set_cachesize</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_dup_compare.html#2">Db::set_dup_compare</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_encrypt.html#2">Db::set_encrypt</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_errcall.html#2">Db::set_errcall</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_errfile.html#2">Db::set_errfile</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_error_stream.html#2">Db::set_error_stream</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_errpfx.html#2">Db::set_errpfx</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_feedback.html#2">Db::set_feedback</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_flags.html#2">Db::set_flags</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_h_ffactor.html#2">Db::set_h_ffactor</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_h_hash.html#2">Db::set_h_hash</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_h_nelem.html#2">Db::set_h_nelem</a></td></tr>
+<tr><td align=right>DbEnv::set_timeout </td><td><a href="../api_cxx/env_set_timeout.html#DB_SET_LOCK_TIMEOUT">DB_SET_LOCK_TIMEOUT</a></td></tr>
+<tr><td align=right>DbTxn::set_timeout </td><td><a href="../api_cxx/txn_set_timeout.html#DB_SET_LOCK_TIMEOUT">DB_SET_LOCK_TIMEOUT</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_lorder.html#2">Db::set_lorder</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_pagesize.html#2">Db::set_pagesize</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_paniccall.html#2">Db::set_paniccall</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_q_extentsize.html#2">Db::set_q_extentsize</a></td></tr>
+<tr><td align=right>Dbc::get </td><td><a href="../api_cxx/dbc_get.html#DB_SET_RANGE">DB_SET_RANGE</a></td></tr>
+<tr><td align=right>Db::get </td><td><a href="../api_cxx/db_get.html#DB_SET_RECNO">DB_SET_RECNO</a></td></tr>
+<tr><td align=right>Dbc::get </td><td><a href="../api_cxx/dbc_get.html#DB_SET_RECNO">DB_SET_RECNO</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_re_delim.html#2">Db::set_re_delim</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_re_len.html#2">Db::set_re_len</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_re_pad.html#2">Db::set_re_pad</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_re_source.html#2">Db::set_re_source</a></td></tr>
+<tr><td align=right>DbEnv::set_timeout </td><td><a href="../api_cxx/env_set_timeout.html#DB_SET_TXN_TIMEOUT">DB_SET_TXN_TIMEOUT</a></td></tr>
+<tr><td align=right>DbTxn::set_timeout </td><td><a href="../api_cxx/txn_set_timeout.html#DB_SET_TXN_TIMEOUT">DB_SET_TXN_TIMEOUT</a></td></tr>
+<tr><td align=right>Db::set_flags </td><td><a href="../api_cxx/db_set_flags.html#DB_SNAPSHOT">DB_SNAPSHOT</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_stat.html#2">Db::stat</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_stat.html#2">db_stat</a></td></tr>
+<tr><td align=right>DbEnv::lock_stat </td><td><a href="../api_cxx/lock_stat.html#DB_STAT_CLEAR">DB_STAT_CLEAR</a></td></tr>
+<tr><td align=right>DbEnv::log_stat </td><td><a href="../api_cxx/log_stat.html#DB_STAT_CLEAR">DB_STAT_CLEAR</a></td></tr>
+<tr><td align=right>DbEnv::memp_stat </td><td><a href="../api_cxx/memp_stat.html#DB_STAT_CLEAR">DB_STAT_CLEAR</a></td></tr>
+<tr><td align=right>DbEnv::rep_stat </td><td><a href="../api_cxx/rep_stat.html#DB_STAT_CLEAR">DB_STAT_CLEAR</a></td></tr>
+<tr><td align=right>DbEnv::txn_stat </td><td><a href="../api_cxx/txn_stat.html#DB_STAT_CLEAR">DB_STAT_CLEAR</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_sync.html#2">Db::sync</a></td></tr>
+<tr><td align=right>DbEnv::open </td><td><a href="../api_cxx/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/dbt_bulk.html#2">DBT</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/dbt_class.html#2">Dbt</a></td></tr>
+<tr><td align=right>Db::open </td><td><a href="../api_cxx/db_open.html#DB_THREAD">DB_THREAD</a></td></tr>
+<tr><td align=right>DbEnv::open </td><td><a href="../api_cxx/env_open.html#DB_THREAD">DB_THREAD</a></td></tr>
+<tr><td align=right>Db::open </td><td><a href="../api_cxx/db_open.html#DB_TRUNCATE">DB_TRUNCATE</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_truncate.html#2">Db::truncate</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/txn_class.html#2">DbTxn</a></td></tr>
+<tr><td align=right>DbEnv::set_app_dispatch </td><td><a href="../api_cxx/env_set_app_dispatch.html#DB_TXN_ABORT">DB_TXN_ABORT</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/txn_abort.html#2">DbTxn::abort</a></td></tr>
+<tr><td align=right>DbEnv::set_app_dispatch </td><td><a href="../api_cxx/env_set_app_dispatch.html#DB_TXN_APPLY">DB_TXN_APPLY</a></td></tr>
+<tr><td align=right>DbEnv::set_app_dispatch </td><td><a href="../api_cxx/env_set_app_dispatch.html#DB_TXN_BACKWARD_ROLL">DB_TXN_BACKWARD_ROLL</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/txn_commit.html#2">DbTxn::commit</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/txn_discard.html#2">DbTxn::discard</a></td></tr>
+<tr><td align=right>DbEnv::set_app_dispatch </td><td><a href="../api_cxx/env_set_app_dispatch.html#DB_TXN_FORWARD_ROLL">DB_TXN_FORWARD_ROLL</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/txn_id.html#2">DbTxn::id</a></td></tr>
+<tr><td align=right>DbEnv::set_flags </td><td><a href="../api_cxx/env_set_flags.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a></td></tr>
+<tr><td align=right>DbEnv::txn_begin </td><td><a href="../api_cxx/txn_begin.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a></td></tr>
+<tr><td align=right>DbTxn::commit </td><td><a href="../api_cxx/txn_commit.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a></td></tr>
+<tr><td align=right>DbEnv::txn_begin </td><td><a href="../api_cxx/txn_begin.html#DB_TXN_NOWAIT">DB_TXN_NOWAIT</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/txn_prepare.html#2">DbTxn::prepare</a></td></tr>
+<tr><td align=right>DbEnv::set_app_dispatch </td><td><a href="../api_cxx/env_set_app_dispatch.html#DB_TXN_PRINT">DB_TXN_PRINT</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/txn_set_timeout.html#2">DbTxn::set_timeout</a></td></tr>
+<tr><td align=right>DbEnv::txn_begin </td><td><a href="../api_cxx/txn_begin.html#DB_TXN_SYNC">DB_TXN_SYNC</a></td></tr>
+<tr><td align=right>DbTxn::commit </td><td><a href="../api_cxx/txn_commit.html#DB_TXN_SYNC">DB_TXN_SYNC</a></td></tr>
+<tr><td align=right>DbEnv::set_flags </td><td><a href="../api_cxx/env_set_flags.html#DB_TXN_WRITE_NOSYNC">DB_TXN_WRITE_NOSYNC</a></td></tr>
+<tr><td align=right>Db::open </td><td><a href="../api_cxx/db_open.html#DB_UNKNOWN">DB_UNKNOWN</a></td></tr>
+<tr><td align=right>Db::set_feedback </td><td><a href="../api_cxx/db_set_feedback.html#DB_UPGRADE">DB_UPGRADE</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_upgrade.html#2">Db::upgrade</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_upgrade.html#2">db_upgrade</a></td></tr>
+<tr><td align=right>DbEnv::open </td><td><a href="../api_cxx/env_open.html#DB_USE_ENVIRON">DB_USE_ENVIRON</a></td></tr>
+<tr><td align=right>DbEnv::remove </td><td><a href="../api_cxx/env_remove.html#DB_USE_ENVIRON">DB_USE_ENVIRON</a></td></tr>
+<tr><td align=right>DbEnv::open </td><td><a href="../api_cxx/env_open.html#DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a></td></tr>
+<tr><td align=right>DbEnv::remove </td><td><a href="../api_cxx/env_remove.html#DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a></td></tr>
+<tr><td align=right>DbEnv::set_verbose </td><td><a href="../api_cxx/env_set_verbose.html#DB_VERB_CHKPOINT">DB_VERB_CHKPOINT</a></td></tr>
+<tr><td align=right>DbEnv::set_verbose </td><td><a href="../api_cxx/env_set_verbose.html#DB_VERB_DEADLOCK">DB_VERB_DEADLOCK</a></td></tr>
+<tr><td align=right>DbEnv::set_verbose </td><td><a href="../api_cxx/env_set_verbose.html#DB_VERB_RECOVERY">DB_VERB_RECOVERY</a></td></tr>
+<tr><td align=right>DbEnv::set_verbose </td><td><a href="../api_cxx/env_set_verbose.html#DB_VERB_REPLICATION">DB_VERB_REPLICATION</a></td></tr>
+<tr><td align=right>DbEnv::set_verbose </td><td><a href="../api_cxx/env_set_verbose.html#DB_VERB_WAITSFOR">DB_VERB_WAITSFOR</a></td></tr>
+<tr><td align=right>Db::set_feedback </td><td><a href="../api_cxx/db_set_feedback.html#DB_VERIFY">DB_VERIFY</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_verify.html#2">Db::verify</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_verify.html#2">db_verify</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_verify.html#3">DB_VERIFY_BAD</a></td></tr>
+<tr><td align=right>Db::cursor </td><td><a href="../api_cxx/db_cursor.html#DB_WRITECURSOR">DB_WRITECURSOR</a></td></tr>
+<tr><td align=right>Db </td><td><a href="../api_cxx/db_class.html#DB_XA_CREATE">DB_XA_CREATE</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/txn_prepare.html#3">DB_XIDDATASIZE</a></td></tr>
+<tr><td align=right>DbEnv::set_flags </td><td><a href="../api_cxx/env_set_flags.html#DB_YIELDCPU">DB_YIELDCPU</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/lock/dead.html#2">deadlocks</a></td></tr>
+<tr><td align=right>utility to detect </td><td><a href="../utility/db_deadlock.html#3">deadlocks</a></td></tr>
+<tr><td align=right>introduction to </td><td><a href="../ref/debug/intro.html#2">debugging</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/debug/common.html#2">debugging</a> applications</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/stability.html#4">degrees</a> of isolation</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/delete.html#2">deleting</a> records</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/curdel.html#2">deleting</a> records with a cursor</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/read.html#4">dirty</a> reads</td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--disable-largefile">--disable-largefile</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--disable-shared">--disable-shared</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--disable-static">--disable-static</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/diskspace.html#2">disk</a> space requirements</td></tr>
+<tr><td align=right></td><td><a href="../ref/xa/intro.html#2">Distributed</a> Transactions</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/faq.html#5">double</a> buffering</td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_dump.html#3">dump</a> databases as text files</td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_flags.html#5">duplicate</a> data items</td></tr>
+<tr><td align=right>sorted </td><td><a href="../api_cxx/db_set_flags.html#6">duplicate</a> data items</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_conf/dup.html#2">duplicate</a> data items</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/curdup.html#2">duplicating</a> a cursor</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/embedix.html#2">Embedix</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/truncate.html#3">emptying</a> a database</td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-compat185">--enable-compat185</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-cxx">--enable-cxx</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-debug">--enable-debug</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-debug_rop">--enable-debug_rop</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-debug_wop">--enable-debug_wop</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-diagnostic">--enable-diagnostic</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-dump185">--enable-dump185</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-java">--enable-java</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-posixmutexes">--enable-posixmutexes</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-rpc">--enable-rpc</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-tcl">--enable-tcl</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-test">--enable-test</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-uimutexes">--enable-uimutexes</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-umrw">--enable-umrw</a></td></tr>
+<tr><td align=right>database </td><td><a href="../api_cxx/db_set_flags.html#4">encryption</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/env/encrypt.html#2">encryption</a></td></tr>
+<tr><td align=right>turn off access to a database </td><td><a href="../api_cxx/env_set_flags.html#9">environment</a></td></tr>
+<tr><td align=right>database </td><td><a href="../ref/env/create.html#2">environment</a></td></tr>
+<tr><td align=right>database </td><td><a href="../ref/env/faq.html#2">environment</a> FAQ</td></tr>
+<tr><td align=right>fault database </td><td><a href="../api_cxx/env_set_flags.html#10">environment</a> in during open</td></tr>
+<tr><td align=right></td><td><a href="../ref/program/environ.html#2">environment</a> variables</td></tr>
+<tr><td align=right>use </td><td><a href="../api_cxx/env_open.html#3">environment</a> variables in naming</td></tr>
+<tr><td align=right>use </td><td><a href="../api_cxx/env_remove.html#3">environment</a> variables in naming</td></tr>
+<tr><td align=right>introduction to database </td><td><a href="../ref/env/intro.html#2">environments</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/join.html#2">equality</a> join</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/error.html#2">error</a> handling</td></tr>
+<tr><td align=right></td><td><a href="../ref/program/errorret.html#3">error</a> name space</td></tr>
+<tr><td align=right></td><td><a href="../ref/program/errorret.html#2">error</a> returns</td></tr>
+<tr><td align=right></td><td><a href="../ref/install/file.html#2">/etc/magic</a></td></tr>
+<tr><td align=right>selecting a Queue </td><td><a href="../ref/am_conf/extentsize.html#2">extent</a> size</td></tr>
+<tr><td align=right>hot </td><td><a href="../ref/transapp/hotfail.html#2">failover</a></td></tr>
+<tr><td align=right>Java </td><td><a href="../ref/java/faq.html#2">FAQ</a></td></tr>
+<tr><td align=right>Tcl </td><td><a href="../ref/tcl/faq.html#2">FAQ</a></td></tr>
+<tr><td align=right>XA </td><td><a href="../ref/xa/faq.html#2">FAQ</a></td></tr>
+<tr><td align=right>configuring without large </td><td><a href="../ref/build_unix/conf.html#8">file</a> support</td></tr>
+<tr><td align=right></td><td><a href="../ref/install/file.html#3">file</a> utility</td></tr>
+<tr><td align=right>returning pages to the </td><td><a href="../ref/am_misc/faq.html#4">filesystem</a></td></tr>
+<tr><td align=right>recovery and </td><td><a href="../ref/transapp/filesys.html#2">filesystem</a> operations</td></tr>
+<tr><td align=right>remote </td><td><a href="../ref/env/remote.html#2">filesystems</a></td></tr>
+<tr><td align=right>page </td><td><a href="../ref/am_conf/h_ffactor.html#2">fill</a> factor</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/freebsd.html#2">FreeBSD</a></td></tr>
+<tr><td align=right>Berkeley DB </td><td><a href="../ref/program/scope.html#3">free-threaded</a> handles</td></tr>
+<tr><td align=right>specifying a database </td><td><a href="../ref/am_conf/h_hash.html#2">hash</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am_conf/h_nelem.html#2">hash</a> table size</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/hpux.html#2">HP-UX</a></td></tr>
+<tr><td align=right>secondary </td><td><a href="../ref/am/second.html#3">indices</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/install.html#2">installing</a> Berkeley DB for UNIX systems</td></tr>
+<tr><td align=right></td><td><a href="../ref/program/compatible.html#2">interface</a> compatibility</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/irix.html#2">IRIX</a></td></tr>
+<tr><td align=right>degrees of </td><td><a href="../ref/am_misc/stability.html#5">isolation</a></td></tr>
+<tr><td align=right>configuring the </td><td><a href="../ref/build_unix/conf.html#7">Java</a> API</td></tr>
+<tr><td align=right></td><td><a href="../ref/java/compat.html#2">Java</a> compatibility</td></tr>
+<tr><td align=right></td><td><a href="../ref/java/conf.html#2">Java</a> configuration</td></tr>
+<tr><td align=right></td><td><a href="../ref/java/faq.html#3">Java</a> FAQ</td></tr>
+<tr><td align=right>equality </td><td><a href="../ref/am/join.html#3">join</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/dbt_class.html#3">key/data</a> pairs</td></tr>
+<tr><td align=right>retrieved </td><td><a href="../ref/am_misc/perm.html#3">key/data</a> permanence</td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am_misc/dbsizes.html#2">limits</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/linux.html#2">Linux</a></td></tr>
+<tr><td align=right>changing compile or </td><td><a href="../ref/build_unix/flags.html#3">load</a> options</td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_load.html#3">load</a> text files into databases</td></tr>
+<tr><td align=right>DbEnv::lock_vec </td><td><a href="../api_cxx/lock_vec.html#lock">lock</a></td></tr>
+<tr><td align=right>standard </td><td><a href="../ref/lock/stdmode.html#2">lock</a> modes</td></tr>
+<tr><td align=right>ignore </td><td><a href="../api_cxx/env_set_flags.html#6">locking</a></td></tr>
+<tr><td align=right>page-level </td><td><a href="../ref/lock/page.html#2">locking</a></td></tr>
+<tr><td align=right>two-phase </td><td><a href="../ref/lock/twopl.html#2">locking</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/lock/nondb.html#2">locking</a> and non-Berkeley DB applications</td></tr>
+<tr><td align=right></td><td><a href="../ref/lock/config.html#2">locking</a> configuration</td></tr>
+<tr><td align=right>Berkeley DB Transactional Data Store </td><td><a href="../ref/lock/am_conv.html#2">locking</a> conventions</td></tr>
+<tr><td align=right>Berkeley DB Concurrent Data Store </td><td><a href="../ref/lock/cam_conv.html#2">locking</a> conventions</td></tr>
+<tr><td align=right>configure </td><td><a href="../api_cxx/env_set_flags.html#3">locking</a> for Berkeley DB Concurrent Data Store</td></tr>
+<tr><td align=right></td><td><a href="../ref/lock/page.html#3">locking</a> granularity</td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/lock/intro.html#2">locking</a> subsystem</td></tr>
+<tr><td align=right>sizing the </td><td><a href="../ref/lock/max.html#2">locking</a> subsystem</td></tr>
+<tr><td align=right></td><td><a href="../ref/lock/notxn.html#2">locking</a> without transactions</td></tr>
+<tr><td align=right></td><td><a href="../ref/log/limits.html#2">log</a> file limits</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/logfile.html#2">log</a> file removal</td></tr>
+<tr><td align=right>utility to display </td><td><a href="../utility/db_printlog.html#3">log</a> files as text</td></tr>
+<tr><td align=right></td><td><a href="../ref/log/config.html#2">logging</a> configuration</td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/log/intro.html#2">logging</a> subsystem</td></tr>
+<tr><td align=right>retrieving Btree records by </td><td><a href="../ref/am_conf/bt_recnum.html#3">logical</a> record @number</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/macosx.html#2">Mac</a> OS X</td></tr>
+<tr><td align=right>turn off database file </td><td><a href="../api_cxx/env_set_flags.html#7">memory</a> mapping</td></tr>
+<tr><td align=right></td><td><a href="../ref/mp/config.html#2">memory</a> pool configuration</td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/mp/intro.html#2">memory</a> pool subsystem</td></tr>
+<tr><td align=right>DbEnv::lock_vec </td><td><a href="../api_cxx/lock_vec.html#mode">mode</a></td></tr>
+<tr><td align=right>Berkeley DB library </td><td><a href="../ref/program/namespace.html#2">name</a> spaces</td></tr>
+<tr><td align=right>file </td><td><a href="../ref/env/naming.html#2">naming</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/join.html#4">natural</a> join</td></tr>
+<tr><td align=right>retrieving Btree records by logical record </td><td><a href="../ref/am_conf/bt_recnum.html#2">number</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec </td><td><a href="../api_cxx/lock_vec.html#obj">obj</a></td></tr>
+<tr><td align=right>DbEnv::lock_vec </td><td><a href="../api_cxx/lock_vec.html#op">op</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/open.html#2">opening</a> a database</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/osf1.html#2">OSF/1</a></td></tr>
+<tr><td align=right>selecting a </td><td><a href="../ref/am_conf/pagesize.html#2">page</a> size</td></tr>
+<tr><td align=right>ignore database environment </td><td><a href="../api_cxx/env_set_flags.html#8">panic</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/partial.html#2">partial</a> record storage and retrieval</td></tr>
+<tr><td align=right></td><td><a href="http://www.sleepycat.com/update/index.html">Patches,</a> Updates and Change logs</td></tr>
+<tr><td align=right></td><td><a href="../ref/perl/intro.html#2">Perl</a></td></tr>
+<tr><td align=right>retrieved key/data </td><td><a href="../ref/am_misc/perm.html#2">permanence</a></td></tr>
+<tr><td align=right>task/thread </td><td><a href="../ref/program/faq.html#2">priority</a></td></tr>
+<tr><td align=right>Sleepycat Software's Berkeley DB </td><td><a href="../ref/intro/products.html#2">products</a></td></tr>
+<tr><td align=right>building for </td><td><a href="../ref/build_unix/intro.html#5">QNX</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/qnx.html#2">QNX</a></td></tr>
+<tr><td align=right>dirty </td><td><a href="../ref/transapp/read.html#3">reads</a></td></tr>
+<tr><td align=right>accessing Btree records by </td><td><a href="../api_cxx/db_set_flags.html#7">record</a> number</td></tr>
+<tr><td align=right>logical </td><td><a href="../ref/am_conf/logrec.html#2">record</a> numbers</td></tr>
+<tr><td align=right>managing </td><td><a href="../ref/am_conf/recno.html#2">record-based</a> databases</td></tr>
+<tr><td align=right>logically renumbering </td><td><a href="../ref/am_conf/renumber.html#2">records</a></td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_recover.html#3">recover</a> database environments</td></tr>
+<tr><td align=right>Berkeley DB </td><td><a href="../ref/transapp/reclimit.html#2">recoverability</a></td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/db_set_flags.html#10">renumbering</a> records in Recno databases</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/read.html#2">repeatable</a> read</td></tr>
+<tr><td align=right>introduction to </td><td><a href="../ref/rep/intro.html#2">replication</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/xa/intro.html#3">Resource</a> Manager</td></tr>
+<tr><td align=right>XA </td><td><a href="../ref/xa/xa_intro.html#3">Resource</a> Manager</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/get.html#2">retrieving</a> records</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/get_bulk.html#2">retrieving</a> records in bulk</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/curget.html#2">retrieving</a> records with a cursor</td></tr>
+<tr><td align=right>turn off </td><td><a href="../api_cxx/db_set_flags.html#8">reverse</a> splits in Btree databases</td></tr>
+<tr><td align=right></td><td><a href="../ref/rpc/client.html#2">RPC</a> client</td></tr>
+<tr><td align=right>configuring a </td><td><a href="../ref/build_unix/conf.html#9">RPC</a> client/server</td></tr>
+<tr><td align=right>introduction to </td><td><a href="../ref/rpc/intro.html#2">rpc</a> client/server</td></tr>
+<tr><td align=right>utility to support </td><td><a href="../utility/berkeley_db_svc.html#3">RPC</a> client/server</td></tr>
+<tr><td align=right></td><td><a href="../ref/rpc/faq.html#2">RPC</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/rpc/server.html#2">RPC</a> server</td></tr>
+<tr><td align=right></td><td><a href="../ref/install/rpm.html#2">RPM</a></td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am/verify.html#3">salvage</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/sco.html#2">SCO</a></td></tr>
+<tr><td align=right>Berkeley DB handle </td><td><a href="../ref/program/scope.html#2">scope</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/second.html#2">secondary</a> indices</td></tr>
+<tr><td align=right></td><td><a href="../ref/env/security.html#2">security</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/sendmail/intro.html#2">Sendmail</a></td></tr>
+<tr><td align=right>disabling </td><td><a href="../ref/build_unix/conf.html#10">shared</a> libraries</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/shlib.html#2">shared</a> libraries</td></tr>
+<tr><td align=right></td><td><a href="../ref/program/appsignals.html#2">signal</a> handling</td></tr>
+<tr><td align=right></td><td><a href="http://www.sleepycat.com/">Sleepycat</a> Software</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/solaris.html#2">Solaris</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/distrib/layout.html#2">source</a> code layout</td></tr>
+<tr><td align=right>turn off reverse </td><td><a href="../api_cxx/db_set_flags.html#9">splits</a> in Btree databases</td></tr>
+<tr><td align=right>cursor </td><td><a href="../ref/am_misc/stability.html#3">stability</a></td></tr>
+<tr><td align=right>disabling </td><td><a href="../ref/build_unix/conf.html#11">static</a> libraries</td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am/stat.html#2">statistics</a></td></tr>
+<tr><td align=right>utility to display database and environment </td><td><a href="../utility/db_stat.html#3">statistics</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/put.html#2">storing</a> records</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/curput.html#2">storing</a> records with a cursor</td></tr>
+<tr><td align=right>configure for </td><td><a href="../api_cxx/env_set_flags.html#13">stress</a> testing</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/sunos.html#2">SunOS</a></td></tr>
+<tr><td align=right>loading Berkeley DB with </td><td><a href="../ref/tcl/intro.html#2">Tcl</a></td></tr>
+<tr><td align=right>using Berkeley DB with </td><td><a href="../ref/tcl/using.html#2">Tcl</a></td></tr>
+<tr><td align=right>configuring the </td><td><a href="../ref/build_unix/conf.html#12">Tcl</a> API</td></tr>
+<tr><td align=right></td><td><a href="../ref/tcl/program.html#2">Tcl</a> API programming notes</td></tr>
+<tr><td align=right></td><td><a href="../ref/tcl/faq.html#3">Tcl</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../api_cxx/env_set_tmp_dir.html#3">temporary</a> files</td></tr>
+<tr><td align=right>configuring the </td><td><a href="../ref/build_unix/conf.html#13">test</a> suite</td></tr>
+<tr><td align=right>running the </td><td><a href="../ref/test/run.html#2">test</a> suite</td></tr>
+<tr><td align=right>running the </td><td><a href="../ref/build_unix/test.html#2">test</a> suite under UNIX</td></tr>
+<tr><td align=right>running the </td><td><a href="../ref/build_win/test.html#2">test</a> suite under Windows</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_conf/re_source.html#2">text</a> backing files</td></tr>
+<tr><td align=right>pre-loading </td><td><a href="../api_cxx/db_set_flags.html#11">text</a> files into Recno databases</td></tr>
+<tr><td align=right>loading </td><td><a href="../ref/dumpload/text.html#2">text</a> into databases</td></tr>
+<tr><td align=right>dumping/loading </td><td><a href="../ref/dumpload/utility.html#2">text</a> to/from databases</td></tr>
+<tr><td align=right>building </td><td><a href="../ref/program/mt.html#2">threaded</a> applications</td></tr>
+<tr><td align=right>lock </td><td><a href="../ref/lock/timeout.html#2">timeouts</a></td></tr>
+<tr><td align=right>transaction </td><td><a href="../ref/lock/timeout.html#3">timeouts</a></td></tr>
+<tr><td align=right>turn off synchronous </td><td><a href="../api_cxx/env_set_flags.html#11">transaction</a> commit</td></tr>
+<tr><td align=right>turn off synchronous </td><td><a href="../api_cxx/env_set_flags.html#12">transaction</a> commit</td></tr>
+<tr><td align=right></td><td><a href="../ref/txn/config.html#2">transaction</a> configuration</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/faq.html#2">transaction</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/txn/limits.html#2">transaction</a> limits</td></tr>
+<tr><td align=right></td><td><a href="../ref/xa/build.html#2">Transaction</a> Manager</td></tr>
+<tr><td align=right>administering </td><td><a href="../ref/transapp/admin.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right>archival in </td><td><a href="../ref/transapp/archival.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right>checkpoints in </td><td><a href="../ref/transapp/checkpoint.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right>deadlock detection in </td><td><a href="../ref/transapp/deadlock.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right>recovery in </td><td><a href="../ref/transapp/recovery.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/txn/intro.html#2">transaction</a> subsystem</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/throughput.html#2">transaction</a> throughput</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/tune.html#2">transaction</a> tuning</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/intro.html#2">Transactional</a> Data Store</td></tr>
+<tr><td align=right>nested </td><td><a href="../ref/transapp/nested.html#2">transactions</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/truncate.html#2">truncating</a> a database</td></tr>
+<tr><td align=right>access method </td><td><a href="../ref/am_misc/tune.html#3">tuning</a></td></tr>
+<tr><td align=right>transaction </td><td><a href="../ref/transapp/tune.html#3">tuning</a></td></tr>
+<tr><td align=right>simple </td><td><a href="../ref/simple_tut/intro.html#2">tutorial</a></td></tr>
+<tr><td align=right>configuring Berkeley DB with the </td><td><a href="../ref/xa/xa_config.html#2">Tuxedo</a> System</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/ultrix.html#2">Ultrix</a></td></tr>
+<tr><td align=right>building for </td><td><a href="../ref/build_unix/intro.html#4">UNIX</a></td></tr>
+<tr><td align=right>building for </td><td><a href="../ref/build_unix/notes.html#3">UNIX</a> FAQ</td></tr>
+<tr><td align=right>configuring Berkeley DB for </td><td><a href="../ref/build_unix/conf.html#3">UNIX</a> systems</td></tr>
+<tr><td align=right>Patches, </td><td><a href="http://www.sleepycat.com/update/index.html">Updates</a> and Change logs</td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_upgrade.html#4">upgrade</a> database files</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/upgrade.html#2">upgrading</a> databases</td></tr>
+<tr><td align=right></td><td><a href="../ref/arch/utilities.html#2">utilities</a></td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am/verify.html#2">verification</a></td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_verify.html#4">verify</a> database files</td></tr>
+<tr><td align=right>building for </td><td><a href="../ref/build_vxworks/faq.html#3">VxWorks</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_vxworks/notes.html#2">VxWorks</a> notes</td></tr>
+<tr><td align=right>running the test suite under </td><td><a href="../ref/build_win/test.html#3">Windows</a></td></tr>
+<tr><td align=right>building for </td><td><a href="../ref/build_win/faq.html#3">Windows</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_win/notes.html#2">Windows</a> notes</td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--with-embedix=DIR">--with-embedix=DIR</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--with-mutex=MUTEX">--with-mutex=MUTEX</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--with-rpm=DIR">--with-rpm=DIR</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--with-tcl=DIR">--with-tcl=DIR</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--with-uniquename=NAME">--with-uniquename=NAME</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/xa/faq.html#3">XA</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/xa/xa_intro.html#2">XA</a> Resource Manager</td></tr>
+</table>
+</center>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_associate.html b/libdb/docs/api_cxx/db_associate.html
new file mode 100644
index 0000000..af997aa
--- /dev/null
+++ b/libdb/docs/api_cxx/db_associate.html
@@ -0,0 +1,136 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::associate</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::associate</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::associate(DbTxn *txnid, Db *secondary,
+ int (*callback)(Db *, const Dbt *, const Dbt *, Dbt *),
+ u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::associate function is used to declare one database a
+secondary index for a primary database. After a secondary database has
+been "associated" with a primary database, all updates to the primary
+will be automatically reflected in the secondary and all reads from the
+secondary will return corresponding data from the primary. Note that
+as primary keys must be unique for secondary indices to work, the
+primary database must be configured without support for duplicate data
+items. See <a href="../ref/am/second.html">Secondary indices</a> for
+more information.
+<p>The associate method called should be a method off a database handle for
+the primary database that is to be indexed.
+The <b>secondary</b> argument should be an open database handle of
+either a newly created and empty database that is to be used to store
+a secondary index, or of a database that was previously associated with
+the same primary and contains a secondary index. Note that it is not
+safe to associate as a secondary database a handle that is in use by
+another thread of control or has open cursors. If the handle was opened
+with the <a href="../api_cxx/env_open.html#DB_THREAD">DB_THREAD</a> flag it is safe to use it in multiple threads
+of control after the Db::associate method has returned. Note also
+that either secondary keys must be unique or the secondary database must
+be configured with support for duplicate data items.
+<p>If the operation is to be transaction-protected (other than by specifying
+the DB_AUTO_COMMIT flag), the <b>txnid</b> parameter is a
+transaction handle returned from <a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>; otherwise, NULL.
+<p>The <b>callback</b> argument should refer to a callback function that
+creates a secondary key from a given primary key and data pair. When
+called, the first argument will be the secondary <a href="../api_cxx/db_class.html">Db</a> handle; the
+second and third arguments will be <a href="../api_cxx/dbt_class.html">Dbt</a>s containing a primary
+key and datum respectively; and the fourth argument will be a zeroed
+DBT in which the callback function should fill in <b>data</b> and
+<b>size</b> fields that describe the secondary key.
+<a name="3"><!--meow--></a>
+<p>If the callback function needs to allocate memory for the <b>data</b>
+field rather than simply pointing into the primary key or datum, the
+<b>flags</b> field of the returned <a href="../api_cxx/dbt_class.html">Dbt</a> should be set to
+DB_DBT_APPMALLOC, which indicates that Berkeley DB should free the
+memory when it is done with it.
+<a name="4"><!--meow--></a>
+<p>If any key/data pair in the primary yields a null secondary key and
+should be left out of the secondary index, the callback function may
+optionally return DB_DONOTINDEX. Otherwise, the callback
+function should return 0 in case of success or any other integer error
+code in case of failure; the error code will be returned from the Berkeley DB
+interface call that initiated the callback. Note that if the callback
+function returns DB_DONOTINDEX for any key/data pairs in the
+primary database, the secondary index will not contain any reference to
+those key/data pairs, and such operations as cursor iterations and range
+queries will reflect only the corresponding subset of the database. If
+this is not desirable, the application should ensure that the callback
+function is well-defined for all possible values and never returns
+DB_DONOTINDEX.
+<p>The callback argument may be NULL if and only if both the primary and
+secondary database handles were opened with the <a href="../api_cxx/db_open.html#DB_RDONLY">DB_RDONLY</a> flag.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_CREATE">DB_CREATE</a><dd>If the secondary database is empty, walk through the primary and create
+an index to it in the empty secondary. This operation is potentially
+very expensive.
+<p>If the secondary database has been opened in an environment configured
+with transactions, each put necessary for its creation will be done in
+the context of a transaction created for the purpose.
+<p>Care should be taken not to use a newly-populated secondary database in
+another thread of control until the Db::associate call has
+returned successfully in the first thread.
+<p>If transactions are not being used, care should be taken not to modify
+a primary database being used to populate a secondary database, in
+another thread of control, until the Db::associate call has
+returned successfully in the first thread. If transactions are being
+used, Berkeley DB will perform appropriate locking and the application need
+not do any special operation ordering.
+</dl>
+<p>In addition, the following flag may be set by
+bitwise inclusively <b>OR</b>'ing it into the <b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="DB_AUTO_COMMIT">DB_AUTO_COMMIT</a><dd>Enclose the Db::associate call within a transaction. If the call succeeds,
+changes made by the operation will be recoverable. If the call fails,
+the operation will have made no changes.
+</dl>
+<p>The Db::associate method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::associate method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The secondary database handle has already been associated with this or
+another database handle.
+<p>The secondary database handle is not open.
+<p>The primary database has been configured to allow duplicates.
+</dl>
+<p>The Db::associate method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::associate method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_class.html b/libdb/docs/api_cxx/db_class.html
new file mode 100644
index 0000000..df48bd7
--- /dev/null
+++ b/libdb/docs/api_cxx/db_class.html
@@ -0,0 +1,105 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class Db {
+public:
+ Db(DbEnv *dbenv, u_int32_t flags);
+ ~Db();
+<p>
+ DB *Db::get_DB();
+ const DB *Db::get_const_DB() const;
+ static Db *Db::get_Db(DB *db);
+ static const Db *Db::get_const_Db(const DB *db);
+ ...
+};
+</pre></h3>
+<h1>Description</h1>
+<p>The Db handle is the handle for a Berkeley DB database, which may or
+may not be part of a database environment. Db handles are
+free-threaded if the <a href="../api_cxx/env_open.html#DB_THREAD">DB_THREAD</a> flag is specified to the
+<a href="../api_cxx/db_open.html">Db::open</a> method when the database is opened or if the database
+environment in which the database is opened is free-threaded. The
+handle should not be closed while any other handle that refers to the
+database is in use; for example, database handles must not be closed
+while cursor handles into the database remain open, or transactions that
+include operations on the database have not yet been committed or
+aborted. Once the <a href="../api_cxx/db_close.html">Db::close</a>, <a href="../api_cxx/db_remove.html">Db::remove</a>, or
+<a href="../api_cxx/db_rename.html">Db::rename</a> methods are called, the handle may not be accessed again,
+regardless of the method's return.
+<p>The constructor creates a Db object that is the handle for a
+Berkeley DB database. The constructor allocates memory internally; calling
+the <a href="../api_cxx/db_close.html">Db::close</a>, <a href="../api_cxx/db_remove.html">Db::remove</a> or <a href="../api_cxx/db_rename.html">Db::rename</a> methods will
+free that memory.
+<p>If no <b>dbenv</b> value is specified, the database is standalone; that
+is, it is not part of any Berkeley DB environment.
+<p>If a <b>dbenv</b> value is specified, the database is created within
+the specified Berkeley DB environment. The database access methods
+automatically make calls to the other subsystems in Berkeley DB based on the
+enclosing environment. For example, if the environment has been
+configured to use locking, the access methods will automatically acquire
+the correct locks when reading and writing pages of the database.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_CXX_NO_EXCEPTIONS">DB_CXX_NO_EXCEPTIONS</a><dd>The Berkeley DB C++ API supports two different error behaviors. By default,
+whenever an error occurs, an exception is thrown that encapsulates the
+error information. This generally allows for cleaner logic for
+transaction processing because a try block can surround a single
+transaction. However, if DB_CXX_NO_EXCEPTIONS is specified,
+exceptions are not thrown; instead, each individual function returns an
+error code.
+<p>If <b>dbenv</b> is not null, this flag is ignored, and the error behavior
+of the specified environment is used instead.
+<p><dt><a name="DB_XA_CREATE">DB_XA_CREATE</a><dd>Instead of creating a standalone database, create a database intended to
+be accessed via applications running under a X/Open conformant Transaction
+Manager. The database will be opened in the environment specified by the
+OPENINFO parameter of the GROUPS section of the ubbconfig file. See the
+<a href="../ref/xa/intro.html">XA Resource Manager</a> chapter in the
+Reference Guide for more information.
+</dl>
+<p>Each Db object has an associated DB struct, which is
+used by the underlying implementation of Berkeley DB and its C-language API.
+The Db::get_DB method returns a pointer to this struct. Given a const
+Db object, Db::get_const_DB returns a const pointer to the
+same struct.
+<p>Given a DB struct, the Db::get_Db method returns the
+corresponding Db object, if there is one. If the DB
+object was not associated with a Db (that is, it was not
+returned from a call to Db::get_DB), then the result of Db::get_Db is
+undefined. Given a const DB struct, Db::get_const_Db returns
+the associated const Db object, if there is one.
+<p>These methods may be useful for Berkeley DB applications including both C
+and C++ language software. It should not be necessary to use these
+calls in a purely C++ application.
+<h1>Class</h1>
+Db
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_close.html b/libdb/docs/api_cxx/db_close.html
new file mode 100644
index 0000000..be0c8c6
--- /dev/null
+++ b/libdb/docs/api_cxx/db_close.html
@@ -0,0 +1,80 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::close</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::close(u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::close method flushes any cached database information to disk,
+closes any open cursors, frees any allocated resources, and closes any
+underlying files. Because key/data pairs are cached in memory, failing
+to sync the file with the Db::close or <a href="../api_cxx/db_sync.html">Db::sync</a> method may
+result in inconsistent or lost information.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_NOSYNC">DB_NOSYNC</a><dd>Do not flush cached information to disk. The <a href="../api_cxx/db_close.html#DB_NOSYNC">DB_NOSYNC</a> flag is
+a dangerous option. It should be set only if the application is doing
+logging (with transactions) so that the database is recoverable after
+a system or application crash, or if the database is always generated
+from scratch after any system or application crash.
+<p><b>It is important to understand that flushing cached information to disk
+only minimizes the window of opportunity for corrupted data.</b> Although
+unlikely, it is possible for database corruption to happen if a system
+or application crash occurs while writing data to the database. To
+ensure that database corruption never occurs, applications must either:
+use transactions and logging with automatic recovery; use logging and
+application-specific recovery; or edit a copy of the database, and once
+all applications using the database have successfully called
+Db::close, atomically replace the original database with the
+updated copy.
+</dl>
+<p>When multiple threads are using the <a href="../api_cxx/db_class.html">Db</a> concurrently, only a single
+thread may call the Db::close method.
+<p>The <a href="../api_cxx/db_class.html">Db</a> handle may not be accessed again after Db::close is
+called, regardless of its return.
+<p>The Db::close method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::close method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The Db::close method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::close method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_cursor.html b/libdb/docs/api_cxx/db_cursor.html
new file mode 100644
index 0000000..094bfec
--- /dev/null
+++ b/libdb/docs/api_cxx/db_cursor.html
@@ -0,0 +1,71 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::cursor</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::cursor(DbTxn *txnid, Dbc **cursorp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::cursor method
+creates a cursor and copies a pointer to it into the memory to which
+<b>cursorp</b> refers.
+<p>If the operation is to be transaction-protected, the <b>txnid</b>
+parameter is a transaction handle returned from <a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>;
+otherwise, NULL.
+<p>To transaction-protect cursor operations, cursors must be opened and
+closed within the context of a transaction, and the <b>txnid</b>
+parameter specifies the transaction context in which the cursor may be
+used.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_DIRTY_READ">DB_DIRTY_READ</a><dd>All read operations performed by the cursor may return modified but not
+yet committed data. Silently ignored if the <a href="../api_cxx/db_open.html#DB_DIRTY_READ">DB_DIRTY_READ</a> flag
+was not specified when the underlying database was opened.
+<p><dt><a name="DB_WRITECURSOR">DB_WRITECURSOR</a><dd>Specify that the cursor will be used to update the database. The
+underlying database environment must have been opened using the
+<a href="../api_cxx/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a> flag.
+</dl>
+<p>The Db::cursor method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::cursor method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The Db::cursor method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::cursor method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_cxx/dbc_list.html">Database Cursors and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_del.html b/libdb/docs/api_cxx/db_del.html
new file mode 100644
index 0000000..5594af4
--- /dev/null
+++ b/libdb/docs/api_cxx/db_del.html
@@ -0,0 +1,80 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::del</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::del</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::del(DbTxn *txnid, Dbt *key, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::del method removes key/data pairs from the database. The
+key/data pair associated with the specified <b>key</b> is discarded from
+the database. In the presence of duplicate key values, all records
+associated with the designated key will be discarded.
+<p>When called on a database that has been made into a secondary index
+using the <a href="../api_cxx/db_associate.html">Db::associate</a> method, the Db::del method deletes the
+key/data pair from the primary database and all secondary indices.
+<p>If the operation is to be transaction-protected (other than by specifying
+the DB_AUTO_COMMIT flag), the <b>txnid</b> parameter is a
+transaction handle returned from <a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>; otherwise, NULL.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_AUTO_COMMIT">DB_AUTO_COMMIT</a><dd>Enclose the Db::del call within a transaction. If the call succeeds,
+changes made by the operation will be recoverable. If the call fails,
+the operation will have made no changes.
+</dl>
+<p>
+If the specified key is not in the database, the Db::del method will return DB_NOTFOUND.
+Otherwise, the Db::del method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::del method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_SECONDARY_BAD<dd>A secondary index references a nonexistent primary key.
+</dl>
+<p><dl compact>
+<p><dt>EACCES<dd>An attempt was made to modify a read-only database.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Db::del method will fail and
+and either return <a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> or
+throw a <a href="../api_cxx/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The Db::del method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::del method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_err.html b/libdb/docs/api_cxx/db_err.html
new file mode 100644
index 0000000..d3fb55b
--- /dev/null
+++ b/libdb/docs/api_cxx/db_err.html
@@ -0,0 +1,78 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::err</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::err</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+Db::err(int error, const char *fmt, ...);
+<p>
+Db::errx(const char *fmt, ...);
+</pre></h3>
+<h1>Description</h1>
+<p>The <a href="../api_cxx/env_err.html">DbEnv::err</a>, <a href="../api_cxx/env_err.html">DbEnv::errx</a>, Db::err and
+Db::errx methods provide error-messaging functionality for
+applications written using the Berkeley DB library.
+<p>The <a href="../api_cxx/env_err.html">DbEnv::err</a> method constructs an error message consisting of the
+following elements:
+<p><blockquote><p><dl compact>
+<p><dt>An optional prefix string<dd>If no error callback method has been set using the
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a> method, any prefix string specified using the
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a> method, followed by two separating characters: a colon
+and a &lt;space&gt; character.
+<p><dt>An optional printf-style message<dd>The supplied message <b>fmt</b>, if non-NULL, in which the
+ANSI C X3.159-1989 (ANSI C) printf function specifies how subsequent arguments
+are converted for output.
+<p><dt>A separator<dd>Two separating characters: a colon and a &lt;space&gt; character.
+<p><dt>A standard error string<dd>The standard system or Berkeley DB library error string associated with the
+<b>error</b> value, as returned by the <a href="../api_cxx/env_strerror.html">DbEnv::strerror</a> method.
+</dl>
+</blockquote>
+<p>This constructed error message is then handled as follows:
+<p><blockquote>
+<p>If an error callback method has been set (see <a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>
+and <a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>), that method is called with two
+arguments: any prefix string specified (see <a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a> and
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>) and the error message.
+<p>If a C library FILE * has been set (see <a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a> and
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>), the error message is written to that output
+stream.
+<p>If a C++ ostream has been set
+(see <a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a> and <a href="../api_cxx/db_set_error_stream.html">Db::set_error_stream</a>),
+the error message is written to that stream.
+<p>If none of these output options has been configured, the error message
+is written to stderr, the standard
+error output stream.</blockquote>
+<p>The <a href="../api_cxx/env_err.html">DbEnv::errx</a> and Db::errx methods perform identically to the
+<a href="../api_cxx/env_err.html">DbEnv::err</a> and Db::err methods, except that they do not append
+the final separator characters and standard error string to the error
+message.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_fd.html b/libdb/docs/api_cxx/db_fd.html
new file mode 100644
index 0000000..3c5ffd4
--- /dev/null
+++ b/libdb/docs/api_cxx/db_fd.html
@@ -0,0 +1,58 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::fd</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::fd</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::fd(int *fdp);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::fd method
+copies a file descriptor representative of the underlying database into
+the memory to which <b>fdp</b> refers. A file descriptor referring to
+the same file will be returned to all processes that call
+<a href="../api_cxx/db_open.html">Db::open</a> with the same <b>file</b> argument. This file
+descriptor may be safely used as an argument to the <b>fcntl</b>(2)
+and <b>flock</b>(2) locking functions. The file descriptor is not
+necessarily associated with any of the underlying files actually used
+by the access method.
+<p>The Db::fd method only supports a coarse-grained form of locking.
+Applications should use the lock manager where possible.
+<p>The Db::fd method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::fd method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::fd method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_get.html b/libdb/docs/api_cxx/db_get.html
new file mode 100644
index 0000000..d90333d
--- /dev/null
+++ b/libdb/docs/api_cxx/db_get.html
@@ -0,0 +1,157 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::get</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::get(DbTxn *txnid, Dbt *key, Dbt *data, u_int32_t flags);
+int
+Db::pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Dbt *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::get method retrieves key/data pairs from the database. The
+address
+and length of the data associated with the specified <b>key</b> are
+returned in the structure to which <b>data</b> refers.
+<p>In the presence of duplicate key values, Db::get will return the
+first data item for the designated key. Duplicates are sorted by insert
+order, except where this order has been overridden by cursor operations.
+<b>Retrieval of duplicates requires the use of cursor operations.</b>
+See <a href="../api_cxx/dbc_get.html">Dbc::get</a> for details.
+<p>When called on a database that has been made into a secondary index
+using the <a href="../api_cxx/db_associate.html">Db::associate</a> method, the Db::get and
+Db::pget methods return the key from the secondary index and the data
+item from the primary database. In addition, the Db::pget method
+returns the key from the primary database. In databases that are not
+secondary indices, the Db::pget interface will always fail and
+return EINVAL.
+<p>If the operation is to be transaction-protected, the <b>txnid</b>
+parameter is a transaction handle returned from <a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>;
+otherwise, NULL.
+<p>The <b>flags</b> value must be set to 0 or
+one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_CONSUME">DB_CONSUME</a><dd>Return the record number and data from the available record closest to
+the head of the queue, and delete the record. The cursor will be
+positioned on the deleted record. The record number will be returned
+in <b>key</b>, as described in <a href="../api_cxx/dbt_class.html">Dbt</a>. The data will be returned
+in the <b>data</b> parameter. A record is available if it is not
+deleted and is not currently locked. The underlying database must be
+of type Queue for DB_CONSUME to be specified.
+<p><dt><a name="DB_CONSUME_WAIT">DB_CONSUME_WAIT</a><dd>The DB_CONSUME_WAIT flag is the same as the DB_CONSUME
+flag, except that if the Queue database is empty, the thread of control
+will wait until there is data in the queue before returning. The
+underlying database must be of type Queue for DB_CONSUME_WAIT
+to be specified.
+<p><dt><a name="DB_GET_BOTH">DB_GET_BOTH</a><dd>Retrieve the key/data pair only if both the key and data match the
+arguments.
+<p>When used with the Db::pget version of this interface
+on a secondary index handle, return the secondary key/primary key/data
+tuple only if both the primary and secondary keys match the arguments.
+It is an error to use the DB_GET_BOTH flag with the Db::get
+version of this interface and a secondary index handle.
+<p><dt><a name="DB_SET_RECNO">DB_SET_RECNO</a><dd>Retrieve the specified numbered key/data pair from a database. Upon
+return, both the <b>key</b> and <b>data</b> items will have been
+filled in.
+<p>The <b>data</b> field of the specified <b>key</b>
+must be a pointer to a logical record number (that is, a <b>db_recno_t</b>).
+This record number determines the record to be retrieved.
+<p>For DB_SET_RECNO to be specified, the underlying database must be
+of type Btree, and it must have been created with the DB_RECNUM flag.
+</dl>
+<p>In addition, the following flags may be set by
+bitwise inclusively <b>OR</b>'ing them into the <b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="DB_DIRTY_READ">DB_DIRTY_READ</a><dd>Read modified but not yet committed data. Silently ignored if the
+<a href="../api_cxx/db_open.html#DB_DIRTY_READ">DB_DIRTY_READ</a> flag was not specified when the underlying
+database was opened.
+<p><dt><a name="DB_MULTIPLE">DB_MULTIPLE</a><dd>Return multiple data items. The buffer to which the <b>data</b>
+argument refers is filled with the specified key's data items. If all
+of the data items associated with the key cannot fit into the buffer,
+the size field of the <b>data</b> argument is set to the length needed
+for the specified items, and the error ENOMEM is returned. The buffer
+to which the <b>data</b> argument refers should be large relative to
+the page size of the underlying database, aligned for unsigned integer
+access, and be a multiple of 1024 bytes in size.
+<p>The DB_MULTIPLE flag may only be used alone, or with the
+DB_GET_BOTH and DB_SET_RECNO options. The
+DB_MULTIPLE flag may not be used when accessing databases made
+into secondary indices using the <a href="../api_cxx/db_associate.html">Db::associate</a> method.
+<p>See <a href="../api_cxx/dbt_bulk.html#DB_MULTIPLE_INIT">DB_MULTIPLE_INIT</a> for more information.
+<p><dt><a name="DB_RMW">DB_RMW</a><dd>Acquire write locks instead of read locks when doing the retrieval.
+Setting this flag can eliminate deadlock during a read-modify-write
+cycle by acquiring the write lock during the read part of the cycle so
+that another thread of control acquiring a read lock for the same item,
+in its own read-modify-write cycle, will not result in deadlock.
+<p>Because the Db::get interface will not hold locks
+across Berkeley DB interface calls in non-transactional environments, the
+<a href="../api_cxx/dbc_get.html#DB_RMW">DB_RMW</a> flag to the Db::get call is meaningful only in
+the presence of transactions.
+</dl>
+<p>
+If the specified key is not in the database, the Db::get method will return DB_NOTFOUND.
+If the database is a Queue or Recno database and the specified key
+exists, but was never explicitly created by the application or was
+later deleted, the Db::get method will return DB_KEYEMPTY.
+Otherwise, the Db::get method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::get method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_SECONDARY_BAD<dd>A secondary index references a nonexistent primary key.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>There was insufficient memory to return the requested item.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A record number of 0 was specified.
+<p>The <a href="../api_cxx/env_open.html#DB_THREAD">DB_THREAD</a> flag was specified to the <a href="../api_cxx/db_open.html">Db::open</a> method and
+none of the <a href="../api_cxx/dbt_class.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a>, <a href="../api_cxx/dbt_class.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a> or
+<a href="../api_cxx/dbt_class.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a> flags were set in the <a href="../api_cxx/dbt_class.html">Dbt</a>.
+<p>The Db::pget interface was called with a <a href="../api_cxx/db_class.html">Db</a> handle that
+does not refer to a secondary index.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Db::get method will fail and
+and either return <a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> or
+throw a <a href="../api_cxx/deadlock_class.html">DbDeadlockException</a> exception.
+<p>If the requested item could not be returned due to insufficient memory,
+the Db::get method will fail and
+either return ENOMEM or
+throw a <a href="../api_cxx/memp_class.html">DbMemoryException</a> exception.
+<p>The Db::get method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::get method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_get_byteswapped.html b/libdb/docs/api_cxx/db_get_byteswapped.html
new file mode 100644
index 0000000..738cee2
--- /dev/null
+++ b/libdb/docs/api_cxx/db_get_byteswapped.html
@@ -0,0 +1,60 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::get_byteswapped</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::get_byteswapped</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::get_byteswapped(int *isswapped);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::get_byteswapped method
+stores 0 into the memory location referenced by <b>isswapped</b>
+if the underlying database files were created on an architecture of the
+same byte order as the current one, and
+stores 1 into the memory location referenced by <b>isswapped</b>
+if they were not (that is, big-endian on a little-endian machine, or
+vice versa). This field may be used to determine whether application
+data needs to be adjusted for this architecture or not.
+<p>The Db::get_byteswapped interface may not be called before the <a href="../api_cxx/db_open.html">Db::open</a>
+interface has been called.
+<h1>Errors</h1>
+<p>The Db::get_byteswapped method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called before <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<p>The Db::get_byteswapped method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::get_byteswapped method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_get_type.html b/libdb/docs/api_cxx/db_get_type.html
new file mode 100644
index 0000000..d716db5
--- /dev/null
+++ b/libdb/docs/api_cxx/db_get_type.html
@@ -0,0 +1,59 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::get_type</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::get_type</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::get_type(DBTYPE *type);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::get_type method
+stores the type of the underlying access method (and file format) into
+the memory referenced by <b>type</b>.
+The returned value is one of DB_BTREE, DB_HASH,
+DB_RECNO, or DB_QUEUE. This value may be used to
+determine the type of the database after a return from <a href="../api_cxx/db_open.html">Db::open</a>
+with the <b>type</b> argument set to DB_UNKNOWN.
+<p>The Db::get_type interface may not be called before the <a href="../api_cxx/db_open.html">Db::open</a>
+interface has been called.
+<h1>Errors</h1>
+<p>The Db::get_type method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called before <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<p>The Db::get_type method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::get_type method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_join.html b/libdb/docs/api_cxx/db_join.html
new file mode 100644
index 0000000..1b8871c
--- /dev/null
+++ b/libdb/docs/api_cxx/db_join.html
@@ -0,0 +1,124 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::join</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::join</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::join(Dbc **curslist, Dbc **dbcp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::join method creates a specialized cursor for use in performing
+equality or natural joins on secondary indices. For information on how
+to organize your data to use this functionality, see
+<a href="../ref/am/join.html">Equality join</a>.
+<p>The <b>primary</b> argument contains the <a href="../api_cxx/db_class.html">Db</a> handle of the primary
+database, which is keyed by the data values found in entries in the
+<b>curslist</b>.
+<p>The <b>curslist</b> argument contains a NULL terminated array of cursors.
+Each cursor must have been initialized to refer to the key on which the
+underlying database should be joined. Typically, this initialization is done
+by a <a href="../api_cxx/dbc_get.html">Dbc::get</a> call with the <a href="../api_cxx/dbc_get.html#DB_SET">DB_SET</a> flag specified. Once the
+cursors have been passed as part of a <b>curslist</b>, they should not
+be accessed or modified until the newly created join cursor has been closed,
+or else inconsistent results may be returned.
+<p>Joined values are retrieved by doing a sequential iteration over the first
+cursor in the <b>curslist</b> argument, and a nested iteration over each
+secondary cursor in the order they are specified in the <b>curslist</b>
+argument. This requires database traversals to search for the current
+datum in all the cursors after the first. For this reason, the best join
+performance normally results from sorting the cursors from the one that
+refers to the least number of data items to the one that refers to the
+most. By default, Db::join does this sort on behalf of its caller.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_JOIN_NOSORT">DB_JOIN_NOSORT</a><dd>Do not sort the cursors based on the number of data items to which they
+refer. If the data are structured so that cursors with many data items
+also share many common elements, higher performance will result from
+listing those cursors before cursors with fewer data items; that is, a
+sort order other than the default. The DB_JOIN_NOSORT flag
+permits applications to perform join optimization prior to calling
+Db::join.
+</dl>
+<p>A newly created cursor is returned in the memory location to which
+<b>dbcp</b> refers. It
+supports only the <a href="../api_cxx/dbc_get.html">Dbc::get</a> and <b>dbc_close</b> cursor
+functions:
+<p><dl compact>
+<p><dt><a href="../api_cxx/dbc_get.html">Dbc::get</a><dd>Iterates over the values associated with the keys to which each item in
+<b>curslist</b> was initialized. Any data value that appears in all
+items specified by the <b>curslist</b> argument is then used as a key
+into the <b>primary</b>, and the key/data pair found in the
+<b>primary</b> is returned.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_JOIN_ITEM">DB_JOIN_ITEM</a><dd>Do not use the data value found in all the cursors as a lookup key for
+the <b>primary</b>, but simply return it in the key parameter instead.
+The data parameter is left unchanged.
+</dl>
+<p>In addition, the following flag may be set by
+bitwise inclusively <b>OR</b>'ing it into the <b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="DB_DIRTY_READ">DB_DIRTY_READ</a><dd>Read modified but not yet committed data. Silently ignored if the
+<a href="../api_cxx/db_open.html#DB_DIRTY_READ">DB_DIRTY_READ</a> flag was not specified when the underlying
+database was opened.
+<p><dt><a name="DB_RMW">DB_RMW</a><dd>Acquire write locks instead of read locks when doing the retrieval.
+Setting this flag can eliminate deadlock during a read-modify-write
+cycle by acquiring the write lock during the read part of the cycle so
+that another thread of control acquiring a read lock for the same item,
+in its own read-modify-write cycle, will not result in deadlock.
+</dl>
+<p><dt><a href="../api_cxx/dbc_close.html">Dbc::close</a><dd>Close the returned cursor and release all resources. (Closing the cursors
+in <b>curslist</b> is the responsibility of the caller.)
+</dl>
+<p>For the returned join cursor to be used in a transaction-protected manner,
+the cursors listed in <b>curslist</b> must have been created within the
+context of the same transaction.
+<p>The Db::join method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::join method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_SECONDARY_BAD<dd>A secondary index references a nonexistent primary key.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Cursor functions other than <a href="../api_cxx/dbc_get.html">Dbc::get</a> or <a href="../api_cxx/dbc_close.html">Dbc::close</a> were
+called.
+</dl>
+<p>The Db::join method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::join method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_key_range.html b/libdb/docs/api_cxx/db_key_range.html
new file mode 100644
index 0000000..0a35e8d
--- /dev/null
+++ b/libdb/docs/api_cxx/db_key_range.html
@@ -0,0 +1,72 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::key_range</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::key_range</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::key_range(DbTxn *txnid
+ Dbt *key, DB_KEY_RANGE *key_range, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::key_range method returns an estimate of the proportion of keys
+that are less than, equal to, and greater than the specified key. The
+underlying database must be of type Btree.
+<p>The information is returned in the <b>key_range</b> argument, which
+contains three elements of type double: <b>less</b>, <b>equal</b>,
+and <b>greater</b>. Values are in the range of 0 to 1; for example,
+if the field <b>less</b> is 0.05, 5% of the keys in the database are
+less than the key argument. The value for <b>equal</b> will be zero
+if there is no matching key, and will be non-zero otherwise.
+<p>If the operation is to be transaction-protected, the <b>txnid</b>
+parameter is a transaction handle returned from <a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>;
+otherwise, NULL.
+The Db::key_range method does not retain the locks it acquires for the
+life of the transaction, so estimates may not be repeatable.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The Db::key_range method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::key_range method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The underlying database was not of type Btree.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Db::key_range method will fail and
+and either return <a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> or
+throw a <a href="../api_cxx/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The Db::key_range method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::key_range method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_list.html b/libdb/docs/api_cxx/db_list.html
new file mode 100644
index 0000000..3905cce
--- /dev/null
+++ b/libdb/docs/api_cxx/db_list.html
@@ -0,0 +1,67 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Databases and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Databases and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Databases and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_cxx/db_associate.html">Db::associate</a></td><td>Associate a secondary index</td></tr>
+<tr><td><a href="../api_cxx/db_close.html">Db::close</a></td><td>Close a database</td></tr>
+<tr><td><a href="../api_cxx/db_cursor.html">Db::cursor</a></td><td>Create a cursor handle</td></tr>
+<tr><td><a href="../api_cxx/db_del.html">Db::del</a></td><td>Delete items from a database</td></tr>
+<tr><td><a href="../api_cxx/db_err.html">Db::err</a></td><td>Error message with error string</td></tr>
+<tr><td><a href="../api_cxx/db_err.html">Db::errx</a></td><td>Error message</td></tr>
+<tr><td><a href="../api_cxx/db_fd.html">Db::fd</a></td><td>Return a file descriptor from a database</td></tr>
+<tr><td><a href="../api_cxx/db_get.html">Db::get</a></td><td>Get items from a database</td></tr>
+<tr><td><a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a></td><td>Return if the underlying database is in host order</td></tr>
+<tr><td><a href="../api_cxx/db_get_type.html">Db::get_type</a></td><td>Return the database type</td></tr>
+<tr><td><a href="../api_cxx/db_join.html">Db::join</a></td><td>Perform a database join on cursors</td></tr>
+<tr><td><a href="../api_cxx/db_key_range.html">Db::key_range</a></td><td>Return estimate of key location</td></tr>
+<tr><td><a href="../api_cxx/db_open.html">Db::open</a></td><td>Open a database</td></tr>
+<tr><td><a href="../api_cxx/db_get.html">Db::pget</a></td><td>Get items from a database</td></tr>
+<tr><td><a href="../api_cxx/db_put.html">Db::put</a></td><td>Store items into a database</td></tr>
+<tr><td><a href="../api_cxx/db_remove.html">Db::remove</a></td><td>Remove a database</td></tr>
+<tr><td><a href="../api_cxx/db_rename.html">Db::rename</a></td><td>Rename a database</td></tr>
+<tr><td><a href="../api_cxx/db_set_alloc.html">Db::set_alloc</a></td><td>Set local space allocation functions</td></tr>
+<tr><td><a href="../api_cxx/db_set_append_recno.html">Db::set_append_recno</a></td><td>Set record append callback</td></tr>
+<tr><td><a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a></td><td>Set a Btree comparison function</td></tr>
+<tr><td><a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a></td><td>Set the minimum number of keys per Btree page</td></tr>
+<tr><td><a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a></td><td>Set a Btree prefix comparison function</td></tr>
+<tr><td><a href="../api_cxx/db_set_cache_priority.html">Db::set_cache_priority</a></td><td>Set the database cache priority</td></tr>
+<tr><td><a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a></td><td>Set the database cache size</td></tr>
+<tr><td><a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a></td><td>Set a duplicate comparison function</td></tr>
+<tr><td><a href="../api_cxx/db_set_encrypt.html">Db::set_encrypt</a></td><td>Set the database cryptographic key</td></tr>
+<tr><td><a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a></td><td>Set error message FILE</td></tr>
+<tr><td><a href="../api_cxx/db_set_error_stream.html">Db::set_error_stream</a></td><td>Set error message output stream</td></tr>
+<tr><td><a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><a href="../api_cxx/db_set_feedback.html">Db::set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><a href="../api_cxx/db_set_flags.html">Db::set_flags</a></td><td>General database configuration</td></tr>
+<tr><td><a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a></td><td>Set the Hash table density</td></tr>
+<tr><td><a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a></td><td>Set a hashing function</td></tr>
+<tr><td><a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a></td><td>Set the Hash table size</td></tr>
+<tr><td><a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a></td><td>Set the database byte order</td></tr>
+<tr><td><a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a></td><td>Set the underlying database page size</td></tr>
+<tr><td><a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a></td><td>Set panic callback</td></tr>
+<tr><td><a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a></td><td>Set Queue database extent size</td></tr>
+<tr><td><a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a></td><td>Set the variable-length record delimiter</td></tr>
+<tr><td><a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a></td><td>Set the fixed-length record length</td></tr>
+<tr><td><a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a></td><td>Set the fixed-length record pad byte</td></tr>
+<tr><td><a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a></td><td>Set the backing Recno text file</td></tr>
+<tr><td><a href="../api_cxx/db_stat.html">Db::stat</a></td><td>Return database statistics</td></tr>
+<tr><td><a href="../api_cxx/db_sync.html">Db::sync</a></td><td>Flush a database to stable storage</td></tr>
+<tr><td><a href="../api_cxx/db_truncate.html">Db::truncate</a></td><td>Empty a database</td></tr>
+<tr><td><a href="../api_cxx/db_upgrade.html">Db::upgrade</a></td><td>Upgrade a database</td></tr>
+<tr><td><a href="../api_cxx/db_verify.html">Db::verify</a></td><td>Verify/salvage a database</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_open.html b/libdb/docs/api_cxx/db_open.html
new file mode 100644
index 0000000..140ddf1
--- /dev/null
+++ b/libdb/docs/api_cxx/db_open.html
@@ -0,0 +1,160 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::open</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::open(DbTxn *txnid, const char *file,
+ const char *database, DBTYPE type, u_int32_t flags, int mode);
+</pre></h3>
+<h1>Description</h1>
+<p>The currently supported Berkeley DB file formats (or <i>access methods</i>)
+are Btree, Hash, Queue, and Recno. The Btree format is a representation
+of a sorted, balanced tree structure. The Hash format is an extensible,
+dynamic hashing scheme. The Queue format supports fast access to
+fixed-length records accessed sequentially or by logical record number.
+The Recno format supports fixed- or variable-length records, accessed
+sequentially or by logical record number, and optionally backed by a
+flat text file.
+<p>Storage and retrieval for the Berkeley DB access methods are based on key/data
+pairs; see <a href="../api_cxx/dbt_class.html">Dbt</a> for more information.
+<p>The Db::open interface opens the database represented by the
+<b>file</b> and <b>database</b> arguments for both reading and
+writing. The <b>file</b> argument is used as the name of an underlying
+file that will be used to back the database. The <b>database</b>
+argument is optional, and allows applications to have multiple databases
+in a single file. Although no <b>database</b> argument needs to be
+specified, it is an error to attempt to open a second database in a
+<b>file</b> that was not initially created using a <b>database</b>
+name. Further, the <b>database</b> argument is not supported by the
+Queue format. Finally, when opening multiple databases in the same
+physical file, it is important to consider locking and memory cache
+issues; see <a href="../ref/am/opensub.html">Opening multiple databases
+in a single file</a> for more information.
+<p>In-memory databases never intended to be preserved on disk may be
+created by setting both the <b>file</b> and <b>database</b> arguments
+to NULL. Note that in-memory databases can only ever be shared by
+sharing the single database handle that created them, in circumstances
+where doing so is safe.
+<p>The <b>type</b> argument is of type DBTYPE, and must be set to one of <a name="DB_BTREE">DB_BTREE</a>,
+<a name="DB_HASH">DB_HASH</a>, <a name="DB_QUEUE">DB_QUEUE</a>,
+<a name="DB_RECNO">DB_RECNO</a>, or <a name="DB_UNKNOWN">DB_UNKNOWN</a>. If
+<b>type</b> is DB_UNKNOWN, the database must already exist
+and Db::open will automatically determine its type. The
+<a href="../api_cxx/db_get_type.html">Db::get_type</a> method may be used to determine the underlying type of
+databases opened using DB_UNKNOWN.
+<p>If the operation is to be transaction-protected (other than by specifying
+the DB_AUTO_COMMIT flag), the <b>txnid</b> parameter is a
+transaction handle returned from <a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>; otherwise, NULL.
+<p>The <b>flags</b> and <b>mode</b> arguments specify how files will be opened
+and/or created if they do not already exist.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_AUTO_COMMIT">DB_AUTO_COMMIT</a><dd>Enclose the Db::open call within a transaction. If the call succeeds,
+the open operation will be recoverable. If the call fails, no database will
+have been created.
+<p><dt><a name="DB_CREATE">DB_CREATE</a><dd>Create the database. If the database does not already exist and the DB_CREATE
+flag is not specified, the Db::open will fail.
+<p><dt><a name="DB_DIRTY_READ">DB_DIRTY_READ</a><dd>Support dirty reads; that is, read operations on the database may request the
+return of modified but not yet committed data.
+<p><dt><a name="DB_EXCL">DB_EXCL</a><dd>Return an error if the database already exists. The DB_EXCL flag is
+only meaningful when specified with the DB_CREATE flag.
+<p><dt><a name="DB_NOMMAP">DB_NOMMAP</a><dd>Do not map this database into process memory (see the description of the
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a> method for further information).
+<p><dt><a name="DB_RDONLY">DB_RDONLY</a><dd>Open the database for reading only. Any attempt to modify items in the database
+will fail, regardless of the actual permissions of any underlying files.
+<p><dt><a name="DB_THREAD">DB_THREAD</a><dd>Cause the <a href="../api_cxx/db_class.html">Db</a> handle returned by Db::open to be
+<i>free-threaded</i>; that is, usable by multiple threads within a
+single address space.
+<p><dt><a name="DB_TRUNCATE">DB_TRUNCATE</a><dd>Physically truncate the underlying file, discarding all previous
+databases it might have held. Underlying filesystem primitives are used
+to implement this flag. For this reason, it is applicable only to the
+file and cannot be used to discard databases within a file.
+<p>The DB_TRUNCATE flag cannot be transaction-protected, and it is
+an error to specify it in a transaction-protected environment.
+</dl>
+<p>On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by
+the database open are created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and modified by the process' umask value at the time of creation
+(see <b>umask</b>(2)). If <b>mode</b> is 0, the database open will use a default
+mode of readable and writable by both owner and group. On Windows
+systems, the mode argument is ignored. The group ownership of created
+files is based on the system and directory defaults, and is not further
+specified by Berkeley DB.
+<p>Calling Db::open is a reasonably expensive operation, and maintaining
+a set of open databases will normally be preferable to repeatedly opening
+and closing the database for each new query.
+<p>The Db::open method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+If Db::open fails, the <a href="../api_cxx/db_close.html">Db::close</a> method should be called to discard the
+<a href="../api_cxx/db_class.html">Db</a> handle.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If a <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was specified, the
+environment variable <b>DB_HOME</b> may be used as the path of the
+database environment home.
+<p>Db::open is affected by any database directory specified using the
+<a href="../api_cxx/env_set_data_dir.html">DbEnv::set_data_dir</a> method, or by setting the "set_data_dir" string
+in the environment's <b>DB_CONFIG</b> file.
+</dl>
+<p><dl compact>
+<p><dt>TMPDIR<dd>If the <b>file</b> and <b>dbenv</b> arguments to Db::open are
+NULL, the environment variable <b>TMPDIR</b> may be used as a
+directory in which to create temporary backing files
+</dl>
+<h1>Errors</h1>
+<p>The Db::open method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt><a name="DB_OLD_VERSION">DB_OLD_VERSION</a><dd>The database cannot be opened without being first upgraded.
+<p><dt>EEXIST<dd>DB_CREATE and DB_EXCL were specified and the database exists.
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+(For example, unknown database type, page size, hash function, pad byte,
+byte order) or a flag value or parameter that is incompatible with the
+specified database.
+<p>
+The <a href="../api_cxx/env_open.html#DB_THREAD">DB_THREAD</a> flag was specified and fast mutexes are not
+available for this architecture.
+<p>The <a href="../api_cxx/env_open.html#DB_THREAD">DB_THREAD</a> flag was specified to Db::open, but was not
+specified to the <a href="../api_cxx/env_open.html">DbEnv::open</a> call for the environment in which the
+<a href="../api_cxx/db_class.html">Db</a> handle was created.
+<p>A backing flat text file was specified with either the <a href="../api_cxx/env_open.html#DB_THREAD">DB_THREAD</a>
+flag or the provided database environment supports transaction
+processing.
+<p><dt>ENOENT<dd>A nonexistent <b>re_source</b> file was specified.
+</dl>
+<p>The Db::open method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::open method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_put.html b/libdb/docs/api_cxx/db_put.html
new file mode 100644
index 0000000..30caf92
--- /dev/null
+++ b/libdb/docs/api_cxx/db_put.html
@@ -0,0 +1,110 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::put</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::put(DbTxn *txnid, Dbt *key, Dbt *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::put method stores key/data pairs in the database. The default
+behavior of the Db::put function is to enter the new key/data
+pair, replacing any previously existing key if duplicates are disallowed,
+or adding a duplicate data item if duplicates are allowed. If the database
+supports duplicates, the Db::put method adds the new data value at the
+end of the duplicate set. If the database supports sorted duplicates,
+the new data value is inserted at the correct sorted location.
+<p>If the operation is to be transaction-protected (other than by specifying
+the DB_AUTO_COMMIT flag), the <b>txnid</b> parameter is a
+transaction handle returned from <a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>; otherwise, NULL.
+<p>The <b>flags</b> value must be set to 0 or
+one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_APPEND">DB_APPEND</a><dd>Append the key/data pair to the end of the database. For the
+DB_APPEND flag to be specified, the underlying database must be
+a Queue or Recno database. The record number allocated to the record is
+returned in the specified <b>key</b>.
+<p>There is a minor behavioral difference between the Recno and Queue access
+methods for the DB_APPEND flag. If a transaction enclosing a
+Db::put operation with the DB_APPEND flag aborts, the
+record number may be decremented (and later reallocated by a subsequent
+DB_APPEND operation) by the Recno access method, but will not be
+decremented or reallocated by the Queue access method.
+<p><dt><a name="DB_NODUPDATA">DB_NODUPDATA</a><dd>In the case of the Btree and Hash access methods, enter the new key/data
+pair only if it does not already appear in the database. If the
+key/data pair already appears in the database, <a href="../api_cxx/dbc_put.html#DB_KEYEXIST">DB_KEYEXIST</a> is
+returned. The DB_NODUPDATA flag may only be specified if the
+underlying database has been configured to support sorted duplicates.
+<p>The DB_NODUPDATA flag may not be specified to the Queue or Recno
+access methods.
+<p><dt><a name="DB_NOOVERWRITE">DB_NOOVERWRITE</a><dd>Enter the new key/data pair only if the key does not already appear in
+the database. If the key already appears in the database,
+<a href="../api_cxx/dbc_put.html#DB_KEYEXIST">DB_KEYEXIST</a> is returned. Even if the database allows duplicates,
+a call to Db::put with the DB_NOOVERWRITE flag set will
+fail if the key already exists in the database.
+</dl>
+<p>In addition, the following flag may be set by
+bitwise inclusively <b>OR</b>'ing it into the <b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="DB_AUTO_COMMIT">DB_AUTO_COMMIT</a><dd>Enclose the Db::put call within a transaction. If the call succeeds,
+changes made by the operation will be recoverable. If the call fails,
+the operation will have made no changes.
+</dl>
+<p>
+Otherwise, the Db::put method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::put method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EACCES<dd>An attempt was made to modify a read-only database.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A record number of 0 was specified.
+<p>An attempt was made to add a record to a fixed-length database that was too
+large to fit.
+<p>An attempt was made to do a partial put.
+<p>An attempt was made to add a record to a secondary index.
+</dl>
+<p><dl compact>
+<p><dt>ENOSPC<dd>A btree exceeded the maximum btree depth (255).
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Db::put method will fail and
+and either return <a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> or
+throw a <a href="../api_cxx/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The Db::put method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::put method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_remove.html b/libdb/docs/api_cxx/db_remove.html
new file mode 100644
index 0000000..25fdc28
--- /dev/null
+++ b/libdb/docs/api_cxx/db_remove.html
@@ -0,0 +1,80 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::remove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::remove</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::remove(const char *file, const char *database, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::remove method removes the database specified by the
+<b>file</b> and <b>database</b> arguments. If no <b>database</b> is
+specified, the underlying file represented by <b>file</b> is removed,
+incidentally removing all databases that it contained.
+<p>Applications should never remove databases with open <a href="../api_cxx/db_class.html">Db</a> handles,
+or in the case of removing a file, when any database in the file has an
+open handle. For example, some architectures do not permit the removal
+of files with open system handles. On these architectures, attempts to
+remove databases currently in use by any thread of control in the system
+will fail.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The <a href="../api_cxx/db_class.html">Db</a> handle may not be accessed again after Db::remove is
+called, regardless of its return.
+<p>The Db::remove method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If a <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was specified, the
+environment variable <b>DB_HOME</b> may be used as the path of the
+database environment home.
+<p>Db::remove is affected by any database directory specified using the
+<a href="../api_cxx/env_set_data_dir.html">DbEnv::set_data_dir</a> method, or by setting the "set_data_dir" string
+in the environment's <b>DB_CONFIG</b> file.
+</dl>
+<h1>Errors</h1>
+<p>The Db::remove method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A database in the file is currently open.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<p>If the file or directory does not exist, the Db::remove method will
+fail and
+and either return ENOENT or
+throw a FileNotFoundException exception.
+<p>The Db::remove method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::remove method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_rename.html b/libdb/docs/api_cxx/db_rename.html
new file mode 100644
index 0000000..0960e32
--- /dev/null
+++ b/libdb/docs/api_cxx/db_rename.html
@@ -0,0 +1,83 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::rename</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::rename</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::rename(const char *file,
+ const char *database, const char *newname, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::rename method renames the database specified by the
+<b>file</b> and <b>database</b> arguments to <b>newname</b>. If no
+<b>database</b> is specified, the underlying file represented by
+<b>file</b> is renamed, incidentally renaming all databases that it
+contained.
+<p>Applications should not rename databases that are currently in use. If
+an underlying file is being renamed and logging is currently enabled in
+the database environment, no database in the file may be open when the
+Db::rename method is called. In particular, some architectures do
+not permit renaming files with open handles. On these architectures,
+attempts to rename databases that are currently in use by any thread of
+control in the system will fail.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The <a href="../api_cxx/db_class.html">Db</a> handle may not be accessed again after Db::rename is
+called, regardless of its return.
+<p>The Db::rename method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If a <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was specified, the
+environment variable <b>DB_HOME</b> may be used as the path of the
+database environment home.
+<p>Db::rename is affected by any database directory specified using the
+<a href="../api_cxx/env_set_data_dir.html">DbEnv::set_data_dir</a> method, or by setting the "set_data_dir" string
+in the environment's <b>DB_CONFIG</b> file.
+</dl>
+<h1>Errors</h1>
+<p>The Db::rename method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A database in the file is currently open.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<p>If the file or directory does not exist, the Db::rename method will
+fail and
+and either return ENOENT or
+throw a FileNotFoundException exception.
+<p>The Db::rename method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::rename method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_alloc.html b/libdb/docs/api_cxx/db_set_alloc.html
new file mode 100644
index 0000000..5b9769c
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_alloc.html
@@ -0,0 +1,93 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_alloc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_alloc</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+extern "C" {
+ typedef void *(*db_malloc_fcn_type)(size_t);
+ typedef void *(*db_realloc_fcn_type)(void *, size_t);
+ typedef void *(*db_free_fcn_type)(void *);
+};
+<p>
+int
+Db::set_alloc(db_malloc_fcn_type app_malloc,
+ db_realloc_fcn_type app_realloc,
+ db_free_fcn_type app_free);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the allocation functions used by the <a href="../api_cxx/env_class.html">DbEnv</a> and <a href="../api_cxx/db_class.html">Db</a>
+methods to allocate or free memory owned by the application.
+<p>There are a number of interfaces in Berkeley DB where memory is allocated by
+the library and then given to the application. For example, the
+<a href="../api_cxx/dbt_class.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a> flag, when specified in the <a href="../api_cxx/dbt_class.html">Dbt</a> object,
+will cause the <a href="../api_cxx/db_class.html">Db</a> methods to allocate and reallocate memory
+which then becomes the responsibility of the calling application. (See
+<a href="../api_cxx/dbt_class.html">Dbt</a> for more information.) Other examples are the Berkeley DB
+interfaces which return statistical information to the application:
+<a href="../api_cxx/db_stat.html">Db::stat</a>, <a href="../api_cxx/lock_stat.html">DbEnv::lock_stat</a>, <a href="../api_cxx/log_archive.html">DbEnv::log_archive</a>,
+<a href="../api_cxx/log_stat.html">DbEnv::log_stat</a>, <a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a>, and <a href="../api_cxx/txn_stat.html">DbEnv::txn_stat</a>. There is
+one interface in the Berkeley DB where memory is allocated by the application
+and then given to the library: <a href="../api_cxx/db_associate.html">Db::associate</a>.
+<p>On systems in which there may be multiple library versions of the
+standard allocation routines (notably Windows NT), transferring memory
+between the library and the application will fail because the Berkeley DB
+library allocates memory from a different heap than the application uses
+to free it. To avoid this problem, the <a href="../api_cxx/env_set_alloc.html">DbEnv::set_alloc</a> and
+Db::set_alloc methods can be used to pass Berkeley DB references to the
+application's allocation routines.
+<p>It is not an error to specify only one or two of the possible allocation
+function arguments to these interfaces; however, in that case the
+specified interfaces must be compatible with the standard library
+interfaces, as they will be used together. The methods specified
+must match the calling conventions of the ANSI C X3.159-1989 (ANSI C) library routines
+of the same name.
+<p>Because databases opened within Berkeley DB environments use the allocation
+interfaces specified to the environment, it is an error to attempt to
+set those interfaces in a database created within an environment.
+<p>The Db::set_alloc interface may not be called after the <a href="../api_cxx/db_open.html">Db::open</a>
+interface is called.
+<p>The Db::set_alloc method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::set_alloc method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called in a database environment.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<p>The Db::set_alloc method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::set_alloc method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_append_recno.html b/libdb/docs/api_cxx/db_set_append_recno.html
new file mode 100644
index 0000000..3e2af95
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_append_recno.html
@@ -0,0 +1,62 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_append_recno</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_append_recno</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_append_recno(
+ int (*db_append_recno_fcn)(DB *dbp, Dbt *data, db_recno_t recno));
+</pre></h3>
+<h1>Description</h1>
+<p>When using the <a href="../api_cxx/db_put.html#DB_APPEND">DB_APPEND</a> option of the <a href="../api_cxx/db_put.html">Db::put</a> method,
+it may be useful to modify the stored data based on the generated key.
+If a callback method is specified using the
+Db::set_append_recno method, it will be called after the record number
+has been selected, but before the data has been stored.
+The callback function must return 0 on success and <b>errno</b> or
+a value outside of the Berkeley DB error name space on failure.
+<p>The called function must take three arguments: a reference to the
+enclosing database handle; the data <a href="../api_cxx/dbt_class.html">Dbt</a> to be stored; and the
+selected record number. The called function may then modify the data
+<a href="../api_cxx/dbt_class.html">Dbt</a>.
+<p>If the callback function needs to allocate memory for the <b>data</b>
+field, the <b>flags</b> field of the returned <a href="../api_cxx/dbt_class.html">Dbt</a> should be
+set to DB_DBT_APPMALLOC, which indicates that Berkeley DB should free
+the memory when it is done with it.
+<p>The Db::set_append_recno method configures operations performed using the specified
+<a href="../api_cxx/db_class.html">Db</a> handle, not all operations performed on the underlying
+database.
+<p>The Db::set_append_recno interface may not be called after the <a href="../api_cxx/db_open.html">Db::open</a>
+interface is called.
+<p>The Db::set_append_recno method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_bt_compare.html b/libdb/docs/api_cxx/db_set_bt_compare.html
new file mode 100644
index 0000000..4337971
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_bt_compare.html
@@ -0,0 +1,84 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_bt_compare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_bt_compare</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+extern "C" {
+ typedef int (*bt_compare_fcn_type)(DB *, const DBT *, const DBT *);
+};
+int
+Db::set_bt_compare(bt_compare_fcn_type bt_compare_fcn);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the Btree key comparison function. The comparison function is
+called when it is necessary to compare a key specified by the
+application with a key currently stored in the tree. The first argument
+to the comparison function is the <a href="../api_cxx/dbt_class.html">Dbt</a> representing the
+application supplied key; the second is the current tree's key.
+<p>The comparison function must return an integer value less than, equal
+to, or greater than zero if the first key argument is considered to be
+respectively less than, equal to, or greater than the second key
+argument. In addition, the comparison function must cause the keys in
+the database to be <i>well-ordered</i>. The comparison function
+must correctly handle any key values used by the application (possibly
+including zero-length keys). In addition, when Btree key prefix
+comparison is being performed (see <a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a> for more
+information), the comparison routine may be passed a prefix of any
+database key. The <b>data</b> and <b>size</b> fields of the
+<a href="../api_cxx/dbt_class.html">Dbt</a> are the only fields that may be used for the purposes of
+this comparison, and no particular alignment of the memory to which
+by the <b>data</b> field refers may be assumed.
+<p>If no comparison function is specified, the keys are compared lexically,
+with shorter keys collating before longer keys.
+<p>The Db::set_bt_compare method configures operations performed using the specified
+<a href="../api_cxx/db_class.html">Db</a> handle, not all operations performed on the underlying
+database.
+<p>The Db::set_bt_compare interface may not be called after the <a href="../api_cxx/db_open.html">Db::open</a>
+interface is called.
+If the database already exists when
+<a href="../api_cxx/db_open.html">Db::open</a> is called, the information specified to Db::set_bt_compare must
+be the same as that historically used to create the database or
+corruption can occur.
+<p>The Db::set_bt_compare method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::set_bt_compare method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<p>The Db::set_bt_compare method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::set_bt_compare method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_bt_minkey.html b/libdb/docs/api_cxx/db_set_bt_minkey.html
new file mode 100644
index 0000000..0dfd729
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_bt_minkey.html
@@ -0,0 +1,66 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_bt_minkey</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_bt_minkey</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_bt_minkey(u_int32_t bt_minkey);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the minimum number of key/data pairs intended to be stored on any
+single Btree leaf page.
+<p>This value is used to determine if key or data items will be stored on
+overflow pages instead of Btree leaf pages. For more information on
+the specific algorithm used, see <a href="../ref/am_conf/bt_minkey.html">Minimum keys per page</a>. The <b>bt_minkey</b> value specified must
+be at least 2; if <b>bt_minkey</b> is not explicitly set, a value of
+2 is used.
+<p>The Db::set_bt_minkey method configures a database, not only operations performed
+using the specified <a href="../api_cxx/db_class.html">Db</a> handle.
+<p>The Db::set_bt_minkey interface may not be called after the <a href="../api_cxx/db_open.html">Db::open</a>
+interface is called.
+If the database already exists when
+<a href="../api_cxx/db_open.html">Db::open</a> is called, the information specified to Db::set_bt_minkey will
+be ignored.
+<p>The Db::set_bt_minkey method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::set_bt_minkey method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<p>The Db::set_bt_minkey method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::set_bt_minkey method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_bt_prefix.html b/libdb/docs/api_cxx/db_set_bt_prefix.html
new file mode 100644
index 0000000..1149caf
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_bt_prefix.html
@@ -0,0 +1,87 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_bt_prefix</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_bt_prefix</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+extern "C" {
+ typedef size_t (*bt_prefix_fcn_type)(DB *, const DBT *, const DBT *);
+};
+int
+Db::set_bt_prefix(bt_prefix_fcn_type bt_prefix_fcn);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the Btree prefix function. The prefix function must return the
+number of bytes of the second key argument that would be required by
+the Btree key comparison function to determine the second key argument's
+ordering relationship with respect to the first key argument. If the
+two keys are equal, the key length should be returned. The prefix
+function must correctly handle any key values used by the application
+(possibly including zero-length keys). The <b>data</b> and
+<b>size</b> fields of the <a href="../api_cxx/dbt_class.html">Dbt</a> are the only fields that may be
+used for the purposes of this determination, and no particular alignment
+of the memory to which the <b>data</b> field refers may be assumed.
+<p>The prefix function is used to determine the amount by which keys stored
+on the Btree internal pages can be safely truncated without losing their
+uniqueness. See the <a href="../ref/am_conf/bt_prefix.html">Btree
+prefix comparison</a> section of the Berkeley DB Reference Guide for more details
+about how this works. The usefulness of this is data-dependent, but
+can produce significantly reduced tree sizes and search times in some
+data sets.
+<p>If no prefix function or key comparison function is specified by the
+application, a default lexical comparison function is used as the prefix
+function. If no prefix function is specified and a key comparison
+function is specified, no prefix function is used. It is an error to
+specify a prefix function without also specifying a key comparison
+function.
+<p>The Db::set_bt_prefix method configures operations performed using the specified
+<a href="../api_cxx/db_class.html">Db</a> handle, not all operations performed on the underlying
+database.
+<p>The Db::set_bt_prefix interface may not be called after the <a href="../api_cxx/db_open.html">Db::open</a>
+interface is called.
+If the database already exists when
+<a href="../api_cxx/db_open.html">Db::open</a> is called, the information specified to Db::set_bt_prefix must
+be the same as that historically used to create the database or
+corruption can occur.
+<p>The Db::set_bt_prefix method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::set_bt_prefix method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<p>The Db::set_bt_prefix method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::set_bt_prefix method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_cache_priority.html b/libdb/docs/api_cxx/db_set_cache_priority.html
new file mode 100644
index 0000000..eefc6c4
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_cache_priority.html
@@ -0,0 +1,66 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_cache_priority</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_cache_priority</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_cache_priority(DB_CACHE_PRIORITY priority);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the cache priority for pages from the specified database. The
+priority of a page biases the replacement algorithm to be more or less
+likely to discard a page when space is needed in the buffer pool. The
+bias is temporary, and pages will eventually be discarded if they are
+not referenced again. The Db::set_cache_priority interface is
+only advisory, and does not guarantee pages will be treated in a specific
+way.
+<p>The <b>priority</b> argument must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_PRIORITY_VERY_LOW">DB_PRIORITY_VERY_LOW</a><dd>The lowest priority: pages are the most likely to be discarded.
+<dt><a name="DB_PRIORITY_LOW">DB_PRIORITY_LOW</a><dd>The next lowest priority.
+<dt><a name="DB_PRIORITY_DEFAULT">DB_PRIORITY_DEFAULT</a><dd>The default priority.
+<dt><a name="DB_PRIORITY_HIGH">DB_PRIORITY_HIGH</a><dd>The next highest priority.
+<dt><a name="DB_PRIORITY_VERY_HIGH">DB_PRIORITY_VERY_HIGH</a><dd>The highest priority: pages are the least likely to be discarded.
+</dl>
+<p>The Db::set_cache_priority method configures a database, not only operations performed
+using the specified <a href="../api_cxx/db_class.html">Db</a> handle.
+<p>The Db::set_cache_priority interface may be called at any time during the life of
+the application.
+<p>The Db::set_cache_priority method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::set_cache_priority method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::set_cache_priority method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_cachesize.html b/libdb/docs/api_cxx/db_set_cachesize.html
new file mode 100644
index 0000000..a138fbd
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_cachesize.html
@@ -0,0 +1,88 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_cachesize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_cachesize</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_cachesize(u_int32_t gbytes, u_int32_t bytes, int ncache);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the shared memory buffer pool -- that is, the cache --
+to <b>gbytes</b> gigabytes plus <b>bytes</b>. The cache should be
+the size of the normal working data set of the application, with some
+small amount of additional memory for unusual situations. (Note: the
+working set is not the same as the number of pages accessed
+simultaneously, and should be quite a bit larger!)
+<p>The default cache size is 256KB, and may not be specified as less than
+20KB. Any cache size less than 500MB is automatically increased by 25%
+to account for buffer pool overhead; cache sizes larger than 500MB are
+used as specified. The current maximum size of a single cache is 4GB.
+For information on tuning the Berkeley DB cache size, see
+<a href="../ref/am_conf/cachesize.html">Selecting a cache size</a>.
+<p>It is possible to specify caches to Berkeley DB that are large enough so that
+they cannot be allocated contiguously on some architectures. For
+example, some releases of Solaris limit the amount of memory that may
+be allocated contiguously by a process. If <b>ncache</b> is 0 or 1,
+the cache will be allocated contiguously in memory. If it is greater
+than 1, the cache will be broken up into <b>ncache</b> equally sized,
+separate pieces of memory.
+<p>Because databases opened within Berkeley DB environments use the cache
+specified to the environment, it is an error to attempt to set a cache
+in a database created within an environment.
+<p>The Db::set_cachesize interface may not be called after the <a href="../api_cxx/db_open.html">Db::open</a>
+interface is called.
+<p>The Db::set_cachesize method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's cache size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_cachesize", one or more whitespace characters,
+and the three arguments specified to this interface, separated by whitespace
+characters, for example, "set_cachesize 1 500 2". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The Db::set_cachesize method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The specified cache size was impossibly small.
+<p>Called in a database environment.
+<p>Called after
+<a href="../api_cxx/db_open.html">Db::open</a>
+was called.
+</dl>
+<p>The Db::set_cachesize method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::set_cachesize method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_dup_compare.html b/libdb/docs/api_cxx/db_set_dup_compare.html
new file mode 100644
index 0000000..3cb67b6
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_dup_compare.html
@@ -0,0 +1,79 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_dup_compare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_dup_compare</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+extern "C" {
+ typedef int (*dup_compare_fcn_type)(DB *, const DBT *, const DBT *);
+};
+int
+Db::set_dup_compare(dup_compare_fcn_type dup_compare_fcn);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the duplicate data item comparison function. The comparison function
+is called when it is necessary to compare a data item specified by the
+application with a data item currently stored in the tree. The first
+argument to the comparison function is the <a href="../api_cxx/dbt_class.html">Dbt</a> representing the
+application's data item; the second is the current tree's data item.
+Calling Db::set_dup_compare implies calling <a href="../api_cxx/db_set_flags.html">Db::set_flags</a>
+with the <a href="../api_cxx/db_set_flags.html#DB_DUPSORT">DB_DUPSORT</a> flag.
+<p>The comparison function must return an integer value less than, equal
+to, or greater than zero if the first data item argument is considered
+to be respectively less than, equal to, or greater than the second data
+item argument. In addition, the comparison function must cause the data
+items in the set to be <i>well-ordered</i>. The comparison function
+must correctly handle any data item values used by the application
+(possibly including zero-length data items). The <b>data</b> and
+<b>size</b> fields of the <a href="../api_cxx/dbt_class.html">Dbt</a> are the only fields that may be
+used for the purposes of this comparison, and no particular alignment
+of the memory to which the <b>data</b> field refers may be assumed.
+<p>If no comparison function is specified, the data items are compared
+lexically, with shorter data items collating before longer data items.
+<p>The Db::set_dup_compare interface may not be called after the <a href="../api_cxx/db_open.html">Db::open</a>
+interface is called.
+If the database already exists when
+<a href="../api_cxx/db_open.html">Db::open</a> is called, the information specified to Db::set_dup_compare must
+be the same as that historically used to create the database or
+corruption can occur.
+<p>The Db::set_dup_compare method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::set_dup_compare method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The Db::set_dup_compare method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::set_dup_compare method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_encrypt.html b/libdb/docs/api_cxx/db_set_encrypt.html
new file mode 100644
index 0000000..15b5ab8
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_encrypt.html
@@ -0,0 +1,72 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_encrypt</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_encrypt</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_encrypt(const char *passwd, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the password used by the <a href="../api_cxx/env_class.html">DbEnv</a> and <a href="../api_cxx/db_class.html">Db</a> methods to
+perform encryption and decryption.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_ENCRYPT_AES">DB_ENCRYPT_AES</a><dd>Use the Rijndael/AES (also known as the Advanced Encryption Standard
+and Federal Information Processing Standard (FIPS) 197) algorithm for
+encryption or decryption.
+</dl>
+<p>Because databases opened within Berkeley DB environments use the password
+specified to the environment, it is an error to attempt to set a
+password in a database created within an environment.
+<p>The Db::set_encrypt interface may not be called after the <a href="../api_cxx/db_open.html">Db::open</a>
+interface is called.
+<p>The Db::set_encrypt method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::set_encrypt method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after
+<a href="../api_cxx/db_open.html">Db::open</a>
+was called.
+</dl>
+<p><dl compact>
+<p><dt>EOPNOTSUPP<dd>Cryptography is not available in this Berkeley DB release.
+</dl>
+<p>The Db::set_encrypt method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::set_encrypt method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_errcall.html b/libdb/docs/api_cxx/db_set_errcall.html
new file mode 100644
index 0000000..28e9474
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_errcall.html
@@ -0,0 +1,67 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_errcall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_errcall</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+void Db::set_errcall(
+ void (*db_errcall_fcn)(const char *errpfx, char *msg));
+</pre></h3>
+<h1>Description</h1>
+When an error occurs in the Berkeley DB library, an exception is thrown or an
+error return value is returned by the method. In some cases,
+however, the <b>errno</b> value may be insufficient to completely
+describe the cause of the error, especially during initial application
+debugging.
+<p>The <a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a> and Db::set_errcall methods are used to
+enhance the mechanism for reporting error messages to the application.
+In some cases, when an error occurs, Berkeley DB will call
+<b>db_errcall_fcn</b> with additional error information. The function
+must be defined with two arguments; the first will be the prefix string
+(as previously set by <a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a> or <a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>);
+the second will be the error message string. It is up to the
+<b>db_errcall_fcn</b> method to display the error message in an
+appropriate manner.
+<p>Alternatively, you can use the <a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a> and
+<a href="../api_cxx/db_set_error_stream.html">Db::set_error_stream</a> methods to display the additional information via
+an output stream, or the <a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a> or
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a> methods to display the additional information via a C
+library FILE *. You should not mix these approaches.
+<p>This error-logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>For <a href="../api_cxx/db_class.html">Db</a> handles opened inside of Berkeley DB environments, calling the
+Db::set_errcall method affects the entire environment and is equivalent to calling
+the <a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a> method.
+<p>The Db::set_errcall interface may be called at any time during the life of
+the application.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_errfile.html b/libdb/docs/api_cxx/db_set_errfile.html
new file mode 100644
index 0000000..02938b2
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_errfile.html
@@ -0,0 +1,66 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_errfile</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_errfile</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+void Db::set_errfile(FILE *errfile);
+</pre></h3>
+<h1>Description</h1>
+When an error occurs in the Berkeley DB library, an exception is thrown or an
+error return value is returned by the method. In some cases,
+however, the <b>errno</b> value may be insufficient to completely
+describe the cause of the error, especially during initial application
+debugging.
+<p>The <a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a> and <a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a> methods are used to
+enhance the mechanism for reporting error messages to the application
+by setting a C library FILE * to be used for displaying additional Berkeley DB
+error messages. In some cases, when an error occurs, Berkeley DB will output
+an additional error message to the specified file reference.
+<p>Alternatively, you can use the <a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a> method to display
+the additional information via an output stream, or the
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a> method to capture the additional error information in
+a way that does not use either output streams or C library FILE *'s. You
+should not mix these approaches.
+<p>The error message will consist of the prefix string and a colon
+("<b>:</b>") (if a prefix string was previously specified using
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a> or <a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>), an error string, and
+a trailing &lt;newline&gt; character.
+<p>This error logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>For <a href="../api_cxx/db_class.html">Db</a> handles opened inside of Berkeley DB environments, calling the
+Db::set_errfile method affects the entire environment and is equivalent to calling
+the <a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a> method.
+<p>The Db::set_errfile interface may be called at any time during the life of
+the application.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_error_stream.html b/libdb/docs/api_cxx/db_set_error_stream.html
new file mode 100644
index 0000000..76f8dec
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_error_stream.html
@@ -0,0 +1,64 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_error_stream</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_error_stream</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+void Db::set_error_stream(class ostream*);
+</pre></h3>
+<h1>Description</h1>
+<p>When an error occurs in the Berkeley DB library, an exception is thrown or an
+<b>errno</b> value is returned by the method. In some cases,
+however, the <b>errno</b> value may be insufficient to completely
+describe the cause of the error, especially during initial application
+debugging.
+<p>The <a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a> and Db::set_error_stream methods
+are used to enhance the mechanism for reporting error messages to the
+application by setting the C++ ostream used for displaying additional
+Berkeley DB error messages. In some cases, when an error occurs, Berkeley DB will
+output an additional error message to the specified stream.
+<p>The error message will consist of the prefix string and a colon
+("<b>:</b>") (if a prefix string was previously specified using
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>), an error string, and a trailing
+&lt;newline&gt; character.
+<p>Alternatively, you can use the <a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a> method to display
+the additional information via a C library FILE *, or the
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a> method to capture the additional error information in
+a way that does not use either output streams or C library FILE *'s. You
+should not mix these approaches.
+<p>This error-logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>For <a href="../api_cxx/db_class.html">Db</a> handles opened inside of Berkeley DB environments, calling the
+Db::set_error_stream method affects the entire environment and is equivalent to calling
+the <a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a> method.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_errpfx.html b/libdb/docs/api_cxx/db_set_errpfx.html
new file mode 100644
index 0000000..595e7eb
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_errpfx.html
@@ -0,0 +1,51 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_errpfx</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_errpfx</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+void Db::set_errpfx(const char *errpfx);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the prefix string that appears before error messages issued by Berkeley DB.
+<p>The Db::set_errpfx and <a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a> methods do not copy
+the memory to which the <b>errpfx</b> argument refers; rather, they
+maintain a reference to it. Although this allows applications to modify
+the error message prefix at any time (without repeatedly calling the
+interfaces), it means the memory must be maintained until the handle is
+closed.
+<p>For <a href="../api_cxx/db_class.html">Db</a> handles opened inside of Berkeley DB environments, calling the
+Db::set_errpfx method affects the entire environment and is equivalent to calling
+the <a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a> method.
+<p>The Db::set_errpfx interface may be called at any time during the life of
+the application.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_feedback.html b/libdb/docs/api_cxx/db_set_feedback.html
new file mode 100644
index 0000000..2c0c1b3
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_feedback.html
@@ -0,0 +1,59 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_feedback</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_feedback</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_feedback(
+ void (*db_feedback_fcn)(DB *dbp, int opcode, int pct));
+</pre></h3>
+<h1>Description</h1>
+<p>Some operations performed by the Berkeley DB library can take non-trivial
+amounts of time. The Db::set_feedback method can be used by
+applications to monitor progress within these operations.
+<p>When an operation is likely to take a long time, Berkeley DB will call the
+specified callback method. This method must be declared with
+three arguments: the first will be a reference to the enclosing database
+handle; the second a flag value; and the third the percent of the
+operation that has been completed, specified as an integer value between
+0 and 100. It is up to the callback method to display this
+information in an appropriate manner.
+<p>The <b>opcode</b> argument may take on any of the following values:
+<p><dl compact>
+<p><dt><a name="DB_UPGRADE">DB_UPGRADE</a><dd>The underlying database is being upgraded.
+<p><dt><a name="DB_VERIFY">DB_VERIFY</a><dd>The underlying database is being verified.
+</dl>
+<p>The Db::set_feedback interface may be called at any time during the life of
+the application.
+<p>The Db::set_feedback method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_flags.html b/libdb/docs/api_cxx/db_set_flags.html
new file mode 100644
index 0000000..34b0edd
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_flags.html
@@ -0,0 +1,228 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_flags</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_flags</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_flags(u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>Calling Db::set_flags is additive; there is no way to clear flags.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<h3>General</h3>
+<p>The following flags may be specified for any Berkeley DB access method:
+<p><dl compact>
+<a name="3"><!--meow--></a>
+<p><dt><a name="DB_CHKSUM_SHA1">DB_CHKSUM_SHA1</a><dd>Do checksum verification of pages read into the cache from the backing
+filestore, using the SHA1 Secure Hash Algorithm.
+<p>Calling Db::set_flags with the DB_CHKSUM_SHA1 flag only affects the
+specified <a href="../api_cxx/db_class.html">Db</a> handle (and any other Berkeley DB handles opened within
+the scope of that handle).
+<p>If the database already exists when <a href="../api_cxx/db_open.html">Db::open</a> is called, the DB_CHKSUM_SHA1
+flag
+will be ignored.
+If creating additional databases in a file, the checksum behavior specified
+must be consistent with the existing databases in the file or an error will
+be returned.
+<a name="4"><!--meow--></a>
+<p><dt><a name="DB_ENCRYPT">DB_ENCRYPT</a><dd>Encrypt the database using the cryptographic password specified to the
+<a href="../api_cxx/env_set_encrypt.html">DbEnv::set_encrypt</a> or <a href="../api_cxx/db_set_encrypt.html">Db::set_encrypt</a> methods.
+<p>Calling Db::set_flags with the DB_ENCRYPT flag only affects the
+specified <a href="../api_cxx/db_class.html">Db</a> handle (and any other Berkeley DB handles opened within
+the scope of that handle).
+<p>If the database already exists when <a href="../api_cxx/db_open.html">Db::open</a> is called, the DB_ENCRYPT
+flag
+must be the same as the existing database or an error
+will be returned.
+If creating additional databases in a file, the encryption behavior specified
+must be consistent with the existing databases in the file or an error will
+be returned.
+</dl>
+<h3>Btree</h3>
+<p>The following flags may be specified for the Btree access method:
+<p><dl compact>
+<a name="5"><!--meow--></a>
+<p><dt><a name="DB_DUP">DB_DUP</a><dd>Permit duplicate data items in the tree; that is, insertion when the
+key of the key/data pair being inserted already exists in the tree will
+be successful. The ordering of duplicates in the tree is determined by
+the order of insertion, unless the ordering is otherwise specified by
+use of a cursor operation. It is an error to specify both DB_DUP
+and DB_RECNUM.
+<p>Calling Db::set_flags with the DB_DUP flag affects the
+database, including all threads of control accessing the database.
+<p>If the database already exists when <a href="../api_cxx/db_open.html">Db::open</a> is called, the DB_DUP
+flag
+must be the same as the existing database or an error
+will be returned.
+<a name="6"><!--meow--></a>
+<p><dt><a name="DB_DUPSORT">DB_DUPSORT</a><dd>Permit duplicate data items in the tree; that is, insertion when the
+key of the key/data pair being inserted already exists in the tree will
+be successful. The ordering of duplicates in the tree is determined by
+the duplicate comparison function.
+If the application does not specify a comparison function using the
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a> method, a default lexical comparison will be
+used.
+It is an error to specify both DB_DUPSORT and DB_RECNUM.
+<p>Calling Db::set_flags with the DB_DUPSORT flag affects the
+database, including all threads of control accessing the database.
+<p>If the database already exists when <a href="../api_cxx/db_open.html">Db::open</a> is called, the DB_DUPSORT
+flag
+must be the same as the existing database or an error
+will be returned.
+<a name="7"><!--meow--></a>
+<p><dt><a name="DB_RECNUM">DB_RECNUM</a><dd>Support retrieval from the Btree using record numbers. For more
+information, see the <a href="../api_cxx/db_get.html#DB_SET_RECNO">DB_SET_RECNO</a> flag to the <a href="../api_cxx/db_get.html">Db::get</a>
+and <a href="../api_cxx/dbc_get.html">Dbc::get</a> methods.
+<p>Logical record numbers in Btree databases are mutable in the face of
+record insertion or deletion. See the DB_RENUMBER flag in the
+Recno access method information for further discussion.
+<p>Maintaining record counts within a Btree introduces a serious point of
+contention, namely the page locations where the record counts are
+stored. In addition, the entire tree must be locked during both
+insertions and deletions, effectively single-threading the tree for
+those operations. Specifying DB_RECNUM can result in serious
+performance degradation for some applications and data sets.
+<p>It is an error to specify both DB_DUP and DB_RECNUM.
+<p>Calling Db::set_flags with the DB_RECNUM flag affects the
+database, including all threads of control accessing the database.
+<p>If the database already exists when <a href="../api_cxx/db_open.html">Db::open</a> is called, the DB_RECNUM
+flag
+must be the same as the existing database or an error
+will be returned.
+<a name="8"><!--meow--></a><a name="9"><!--meow--></a>
+<p><dt><a name="DB_REVSPLITOFF">DB_REVSPLITOFF</a><dd>Turn off reverse splitting in the Btree. As pages are emptied in a
+database, the Berkeley DB Btree implementation attempts to coalesce empty pages
+into higher-level pages in order to keep the tree as small as possible
+and minimize tree search time. This can hurt performance in applications
+with cyclical data demands; that is, applications where the database grows
+and shrinks repeatedly. For example, because Berkeley DB does page-level
+locking, the maximum level of concurrency in a database of two pages is far
+smaller than that in a database of 100 pages, so a database that has
+shrunk to a minimal size can cause severe deadlocking when a new cycle of
+data insertion begins.
+<p>Calling Db::set_flags with the DB_REVSPLITOFF flag only affects the
+specified <a href="../api_cxx/db_class.html">Db</a> handle (and any other Berkeley DB handles opened within
+the scope of that handle).
+</dl>
+<h3>Hash</h3>
+<p>The following flags may be specified for the Hash access method:
+<p><dl compact>
+<p><dt><a name="DB_DUP">DB_DUP</a><dd>Permit duplicate data items in the tree; that is, insertion when the
+key of the key/data pair being inserted already exists in the tree will
+be successful. The ordering of duplicates in the tree is determined by
+the order of insertion, unless the ordering is otherwise specified by
+use of a cursor operation. It is an error to specify both DB_DUP
+and DB_RECNUM.
+<p>Calling Db::set_flags with the DB_DUP flag affects the
+database, including all threads of control accessing the database.
+<p>If the database already exists when <a href="../api_cxx/db_open.html">Db::open</a> is called, the DB_DUP
+flag
+must be the same as the existing database or an error
+will be returned.
+<p><dt><a name="DB_DUPSORT">DB_DUPSORT</a><dd>Permit duplicate data items in the tree; that is, insertion when the
+key of the key/data pair being inserted already exists in the tree will
+be successful. The ordering of duplicates in the tree is determined by
+the duplicate comparison function.
+If the application does not specify a comparison function using the
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a> method, a default lexical comparison will be
+used.
+It is an error to specify both DB_DUPSORT and DB_RECNUM.
+<p>Calling Db::set_flags with the DB_DUPSORT flag affects the
+database, including all threads of control accessing the database.
+<p>If the database already exists when <a href="../api_cxx/db_open.html">Db::open</a> is called, the DB_DUPSORT
+flag
+must be the same as the existing database or an error
+will be returned.
+</dl>
+<h3>Queue</h3>
+<p>There are no additional flags that may be specified for the Queue access
+method.
+<h3>Recno</h3>
+<p>The following flags may be specified for the Recno access method:
+<p><dl compact>
+<a name="10"><!--meow--></a>
+<p><dt><a name="DB_RENUMBER">DB_RENUMBER</a><dd>Specifying the DB_RENUMBER flag causes the logical record
+numbers to be mutable, and change as records are added to and deleted
+from the database. For example, the deletion of record number 4 causes
+records numbered 5 and greater to be renumbered downward by one. If a
+cursor was positioned to record number 4 before the deletion, it will
+refer to the new record number 4, if any such record exists, after the
+deletion. If a cursor was positioned after record number 4 before the
+deletion, it will be shifted downward one logical record, continuing to
+refer to the same record as it did before.
+<p>Using the <a href="../api_cxx/db_put.html">Db::put</a> or <a href="../api_cxx/dbc_put.html">Dbc::put</a> interfaces to create new
+records will cause the creation of multiple records if the record number
+is more than one greater than the largest record currently in the
+database. For example, creating record 28, when record 25 was previously
+the last record in the database, will create records 26 and 27 as well as
+28. Attempts to retrieve records that were created in this manner will
+result in an error return of <a href="../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a>.
+<p>If a created record is not at the end of the database, all records
+following the new record will be automatically renumbered upward by one.
+For example, the creation of a new record numbered 8 causes records
+numbered 8 and greater to be renumbered upward by one. If a cursor was
+positioned to record number 8 or greater before the insertion, it will be
+shifted upward one logical record, continuing to refer to the same record
+as it did before.
+<p>For these reasons, concurrent access to a Recno database with the
+DB_RENUMBER flag specified may be largely meaningless, although
+it is supported.
+<p>Calling Db::set_flags with the DB_RENUMBER flag affects the
+database, including all threads of control accessing the database.
+<p>If the database already exists when <a href="../api_cxx/db_open.html">Db::open</a> is called, the DB_RENUMBER
+flag
+must be the same as the existing database or an error
+will be returned.
+<a name="11"><!--meow--></a>
+<p><dt><a name="DB_SNAPSHOT">DB_SNAPSHOT</a><dd>This flag specifies that any specified <b>re_source</b> file be read
+in its entirety when <a href="../api_cxx/db_open.html">Db::open</a> is called. If this flag is not
+specified, the <b>re_source</b> file may be read lazily.
+<p>Calling Db::set_flags with the DB_SNAPSHOT flag only affects the
+specified <a href="../api_cxx/db_class.html">Db</a> handle (and any other Berkeley DB handles opened within
+the scope of that handle).
+</dl>
+<p>The Db::set_flags interface may not be called after the <a href="../api_cxx/db_open.html">Db::open</a>
+interface is called.
+<p>The Db::set_flags method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::set_flags method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The <a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a> method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the <a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a> method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_h_ffactor.html b/libdb/docs/api_cxx/db_set_h_ffactor.html
new file mode 100644
index 0000000..b7fc75e
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_h_ffactor.html
@@ -0,0 +1,68 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_h_ffactor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_h_ffactor</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_h_ffactor(u_int32_t h_ffactor);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the desired density within the hash table.
+<p>The density is an approximation of the number of keys allowed to
+accumulate in any one bucket, determining when the hash table grows or
+shrinks. If you know the average sizes of the keys and data in your
+data set, setting the fill factor can enhance performance. A reasonable
+rule computing fill factor is to set it to the following:
+<p><blockquote><pre>(pagesize - 32) / (average_key_size + average_data_size + 8)</pre></blockquote>
+<p>If no value is specified, the fill factor will be selected dynamically as
+pages are filled.
+<p>The Db::set_h_ffactor method configures a database, not only operations performed
+using the specified <a href="../api_cxx/db_class.html">Db</a> handle.
+<p>The Db::set_h_ffactor interface may not be called after the <a href="../api_cxx/db_open.html">Db::open</a>
+interface is called.
+If the database already exists when
+<a href="../api_cxx/db_open.html">Db::open</a> is called, the information specified to Db::set_h_ffactor will
+be ignored.
+<p>The Db::set_h_ffactor method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::set_h_ffactor method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<p>The Db::set_h_ffactor method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::set_h_ffactor method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_h_hash.html b/libdb/docs/api_cxx/db_set_h_hash.html
new file mode 100644
index 0000000..aa7a2ac
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_h_hash.html
@@ -0,0 +1,77 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_h_hash</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_h_hash</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+extern "C" {
+ typedef u_int32_t (*h_hash_fcn_type)
+ (DB *, const void *bytes, u_int32_t length);
+};
+int
+Db::set_h_hash(h_hash_fcn_type h_hash_fcn);
+</pre></h3>
+<h1>Description</h1>
+<p>Set a user-defined hash method; if no hash method is specified, a default
+hash method is used. Because no hash method performs equally well on all
+possible data, the user may find that the built-in hash method performs
+poorly with a particular data set. User-specified hash functions must
+take a pointer to a byte string and a length as arguments, and return a
+value of type
+<b>u_int32_t</b>.
+The hash function must handle any key values used by the application
+(possibly including zero-length keys).
+<p>If a hash method is specified, <a href="../api_cxx/db_open.html">Db::open</a> will attempt to determine
+whether the hash method specified is the same as the one with which the
+database was created, and will fail if it detects that it is not.
+<p>The Db::set_h_hash method configures operations performed using the specified
+<a href="../api_cxx/db_class.html">Db</a> handle, not all operations performed on the underlying
+database.
+<p>The Db::set_h_hash interface may not be called after the <a href="../api_cxx/db_open.html">Db::open</a>
+interface is called.
+If the database already exists when
+<a href="../api_cxx/db_open.html">Db::open</a> is called, the information specified to Db::set_h_hash must
+be the same as that historically used to create the database or
+corruption can occur.
+<p>The Db::set_h_hash method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::set_h_hash method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<p>The Db::set_h_hash method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::set_h_hash method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_h_nelem.html b/libdb/docs/api_cxx/db_set_h_nelem.html
new file mode 100644
index 0000000..f750bc7
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_h_nelem.html
@@ -0,0 +1,66 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_h_nelem</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_h_nelem</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_h_nelem(u_int32_t h_nelem);
+</pre></h3>
+<h1>Description</h1>
+<p>Set an estimate of the final size of the hash table.
+<p>In order for the estimate to be used when creating the database,
+the <a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a> method must also be called.
+If the estimate or fill factor are not set or are set too low,
+hash tables will still expand gracefully
+as keys are entered, although a slight performance degradation may be
+noticed.
+<p>The Db::set_h_nelem method configures a database, not only operations performed
+using the specified <a href="../api_cxx/db_class.html">Db</a> handle.
+<p>The Db::set_h_nelem interface may not be called after the <a href="../api_cxx/db_open.html">Db::open</a>
+interface is called.
+If the database already exists when
+<a href="../api_cxx/db_open.html">Db::open</a> is called, the information specified to Db::set_h_nelem will
+be ignored.
+<p>The Db::set_h_nelem method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::set_h_nelem method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<p>The Db::set_h_nelem method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::set_h_nelem method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_lorder.html b/libdb/docs/api_cxx/db_set_lorder.html
new file mode 100644
index 0000000..1f6be88
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_lorder.html
@@ -0,0 +1,70 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_lorder</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_lorder</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_lorder(int lorder);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the byte order for integers in the stored database metadata. The
+number should represent the order as an integer; for example, big endian
+order is the value 4,321, and little endian order is the value 1,234.
+If <b>lorder</b> is not explicitly set, the host order of the machine
+where the Berkeley DB library was compiled is used.
+<p><b>The access methods provide no guarantees about the byte ordering of the
+application data stored in the database, and applications are responsible
+for maintaining any necessary ordering.</b>
+<p>The Db::set_lorder method configures a database, not only operations performed
+using the specified <a href="../api_cxx/db_class.html">Db</a> handle.
+<p>The Db::set_lorder interface may not be called after the <a href="../api_cxx/db_open.html">Db::open</a>
+interface is called.
+If the database already exists when
+<a href="../api_cxx/db_open.html">Db::open</a> is called, the information specified to Db::set_lorder will
+be ignored.
+If creating additional databases in a file, the byte order specified must
+be consistent with the existing databases in the file or an error will be
+returned.
+<p>The Db::set_lorder method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::set_lorder method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<p>The Db::set_lorder method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::set_lorder method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_pagesize.html b/libdb/docs/api_cxx/db_set_pagesize.html
new file mode 100644
index 0000000..4d80fa0
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_pagesize.html
@@ -0,0 +1,69 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_pagesize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_pagesize</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_pagesize(u_int32_t pagesize);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the pages used to hold items in the database, in bytes.
+The minimum page size is 512 bytes, and the maximum page size is 64K
+bytes. If the page size is not explicitly set, one is selected based
+on the underlying filesystem I/O block size. The automatically selected
+size has a lower limit of 512 bytes and an upper limit of 16K bytes.
+<p>For information on tuning the Berkeley DB page size, see
+<a href="../ref/am_conf/pagesize.html">Selecting a page size</a>.
+<p>The Db::set_pagesize method configures a database, not only operations performed
+using the specified <a href="../api_cxx/db_class.html">Db</a> handle.
+<p>The Db::set_pagesize interface may not be called after the <a href="../api_cxx/db_open.html">Db::open</a>
+interface is called.
+If the database already exists when
+<a href="../api_cxx/db_open.html">Db::open</a> is called, the information specified to Db::set_pagesize will
+be ignored.
+If creating additional databases in a file, the page size specified must
+be consistent with the existing databases in the file or an error will
+be returned.
+<p>The Db::set_pagesize method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::set_pagesize method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<p>The Db::set_pagesize method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::set_pagesize method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_paniccall.html b/libdb/docs/api_cxx/db_set_paniccall.html
new file mode 100644
index 0000000..808cc29
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_paniccall.html
@@ -0,0 +1,62 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_paniccall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_paniccall</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_paniccall(
+ void (*db_paniccall_fcn)(DbEnv *dbenv, int errval));
+</pre></h3>
+<h1>Description</h1>
+<p>Errors can occur in the Berkeley DB library where the only solution is to shut
+down the application and run recovery (for example, if Berkeley DB is unable
+to allocate heap memory). In these cases, when the C++ error model has
+been configured so that the individual Berkeley DB methods return error codes
+(see <a href="../api_cxx/except_class.html">DbException</a> for more information), the value
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> is returned by Berkeley DB methods.
+<p>In these cases, it is also often simpler to shut down the application
+when such errors occur rather than to try to gracefully return up the
+stack. The <a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a> and Db::set_paniccall methods
+are used to specify methods to be called when
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> is about to be returned from a Berkeley DB interface.
+When called, the <b>dbenv</b> argument will be a reference to the
+current environment, and the <b>errval</b> argument is the error value
+that would have been returned to the calling method.
+<p>For <a href="../api_cxx/db_class.html">Db</a> handles opened inside of Berkeley DB environments, calling the
+Db::set_paniccall method affects the entire environment and is equivalent to calling
+the <a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a> method.
+<p>The Db::set_paniccall interface may be called at any time during the life of
+the application.
+<p>The Db::set_paniccall method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_q_extentsize.html b/libdb/docs/api_cxx/db_set_q_extentsize.html
new file mode 100644
index 0000000..554cbbf
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_q_extentsize.html
@@ -0,0 +1,65 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_q_extentsize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_q_extentsize</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_q_extentsize(u_int32_t extentsize);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the extents used to hold pages in a Queue database,
+specified as a number of pages. Each extent is created as a separate
+physical file. If no extent size is set, the default behavior is to
+create only a single underlying database file.
+<p>For information on tuning the extent size, see
+<a href="../ref/am_conf/extentsize.html">Selecting a extent size</a>.
+<p>The Db::set_q_extentsize method configures a database, not only operations performed
+using the specified <a href="../api_cxx/db_class.html">Db</a> handle.
+<p>The Db::set_q_extentsize interface may not be called after the <a href="../api_cxx/db_open.html">Db::open</a>
+interface is called.
+If the database already exists when
+<a href="../api_cxx/db_open.html">Db::open</a> is called, the information specified to Db::set_q_extentsize will
+be ignored.
+<p>The Db::set_q_extentsize method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::set_q_extentsize method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<p>The Db::set_q_extentsize method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::set_q_extentsize method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_re_delim.html b/libdb/docs/api_cxx/db_set_re_delim.html
new file mode 100644
index 0000000..76ef555
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_re_delim.html
@@ -0,0 +1,65 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_re_delim</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_re_delim</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_re_delim(int re_delim);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the delimiting byte used to mark the end of a record in the backing
+source file for the Recno access method.
+<p>This byte is used for variable length records if the <b>re_source</b>
+file is specified. If the <b>re_source</b> file is specified and no
+delimiting byte was specified, &lt;newline&gt; characters (that
+is, ASCII 0x0a) are interpreted as end-of-record markers.
+<p>The Db::set_re_delim method configures a database, not only operations performed
+using the specified <a href="../api_cxx/db_class.html">Db</a> handle.
+<p>The Db::set_re_delim interface may not be called after the <a href="../api_cxx/db_open.html">Db::open</a>
+interface is called.
+If the database already exists when
+<a href="../api_cxx/db_open.html">Db::open</a> is called, the information specified to Db::set_re_delim will
+be ignored.
+<p>The Db::set_re_delim method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::set_re_delim method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<p>The Db::set_re_delim method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::set_re_delim method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_re_len.html b/libdb/docs/api_cxx/db_set_re_len.html
new file mode 100644
index 0000000..2218687
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_re_len.html
@@ -0,0 +1,72 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_re_len</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_re_len</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_re_len(u_int32_t re_len);
+</pre></h3>
+<h1>Description</h1>
+<p>For the Queue access method, specify that the records are of length
+<b>re_len</b>. For the Queue access method, the record length must be
+enough smaller than the database's page size that at least one record
+plus the database page's metadata information can fit on each database
+page.
+<p>For the Recno access method, specify that the records are fixed-length,
+not byte-delimited, and are of length <b>re_len</b>.
+<p>Any records added to the database that are less than <b>re_len</b> bytes
+long are automatically padded (see <a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a> for more
+information).
+<p>Any attempt to insert records into the database that are greater than
+<b>re_len</b> bytes long will cause the call to fail immediately and
+return an error.
+<p>The Db::set_re_len method configures a database, not only operations performed
+using the specified <a href="../api_cxx/db_class.html">Db</a> handle.
+<p>The Db::set_re_len interface may not be called after the <a href="../api_cxx/db_open.html">Db::open</a>
+interface is called.
+If the database already exists when
+<a href="../api_cxx/db_open.html">Db::open</a> is called, the information specified to Db::set_re_len will
+be ignored.
+<p>The Db::set_re_len method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::set_re_len method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<p>The Db::set_re_len method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::set_re_len method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_re_pad.html b/libdb/docs/api_cxx/db_set_re_pad.html
new file mode 100644
index 0000000..e82b6ea
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_re_pad.html
@@ -0,0 +1,63 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_re_pad</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_re_pad</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_re_pad(int re_pad);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the padding character for short, fixed-length records for the Queue
+and Recno access methods.
+<p>If no pad character is specified, &lt;space&gt; characters (that
+is, ASCII 0x20) are used for padding.
+<p>The Db::set_re_pad method configures a database, not only operations performed
+using the specified <a href="../api_cxx/db_class.html">Db</a> handle.
+<p>The Db::set_re_pad interface may not be called after the <a href="../api_cxx/db_open.html">Db::open</a>
+interface is called.
+If the database already exists when
+<a href="../api_cxx/db_open.html">Db::open</a> is called, the information specified to Db::set_re_pad will
+be ignored.
+<p>The Db::set_re_pad method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::set_re_pad method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<p>The Db::set_re_pad method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::set_re_pad method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_set_re_source.html b/libdb/docs/api_cxx/db_set_re_source.html
new file mode 100644
index 0000000..9677be2
--- /dev/null
+++ b/libdb/docs/api_cxx/db_set_re_source.html
@@ -0,0 +1,104 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::set_re_source</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::set_re_source</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::set_re_source(char *re_source);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the underlying source file for the Recno access method. The purpose
+of the <b>re_source</b> value is to provide fast access and modification
+to databases that are normally stored as flat text files.
+<p>If the <b>re_source</b> field is set, it specifies an underlying flat
+text database file that is read to initialize a transient record number
+index. In the case of variable length records, the records are
+separated, as specified by <a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>. For example,
+standard UNIX byte stream files can be interpreted as a sequence of
+variable length records separated by &lt;newline&gt; characters.
+<p>In addition, when cached data would normally be written back to the
+underlying database file (for example, the <a href="../api_cxx/db_close.html">Db::close</a> or
+<a href="../api_cxx/db_sync.html">Db::sync</a> methods are called), the in-memory copy of the database
+will be written back to the <b>re_source</b> file.
+<p>By default, the backing source file is read lazily; that is, records
+are not read from the file until they are requested by the application.
+<b>If multiple processes (not threads) are accessing a Recno database
+concurrently, and are either inserting or deleting records, the backing
+source file must be read in its entirety before more than a single
+process accesses the database, and only that process should specify the
+backing source file as part of the <a href="../api_cxx/db_open.html">Db::open</a> call. See the
+<a href="../api_cxx/db_set_flags.html#DB_SNAPSHOT">DB_SNAPSHOT</a> flag for more information.</b>
+<p><b>Reading and writing the backing source file specified by <b>re_source</b>
+cannot be transaction-protected because it involves filesystem
+operations that are not part of the Db transaction methodology.</b> For
+this reason, if a temporary database is used to hold the records, it is
+possible to lose the contents of the <b>re_source</b> file, for
+example, if the system crashes at the right instant. If a file is used
+to hold the database, normal database recovery on that file can be used
+to prevent information loss, although it is still possible that the
+contents of <b>re_source</b> will be lost if the system crashes.
+<p>The <b>re_source</b> file must already exist (but may be zero-length) when
+<a href="../api_cxx/db_open.html">Db::open</a> is called.
+<p>It is not an error to specify a read-only <b>re_source</b> file when
+creating a database, nor is it an error to modify the resulting database.
+However, any attempt to write the changes to the backing source file using
+either the <a href="../api_cxx/db_sync.html">Db::sync</a> or <a href="../api_cxx/db_close.html">Db::close</a> methods will fail, of course.
+Specify the <a href="../api_cxx/db_close.html#DB_NOSYNC">DB_NOSYNC</a> flag to the <a href="../api_cxx/db_close.html">Db::close</a> method to stop it
+from attempting to write the changes to the backing file; instead, they
+will be silently discarded.
+<p>For all of the previous reasons, the <b>re_source</b> field is generally
+used to specify databases that are read-only for Berkeley DB applications;
+and that are either generated on the fly by software tools or modified
+using a different mechanism -- for example, a text editor.
+<p>The Db::set_re_source method configures operations performed using the specified
+<a href="../api_cxx/db_class.html">Db</a> handle, not all operations performed on the underlying
+database.
+<p>The Db::set_re_source interface may not be called after the <a href="../api_cxx/db_open.html">Db::open</a>
+interface is called.
+If the database already exists when
+<a href="../api_cxx/db_open.html">Db::open</a> is called, the information specified to Db::set_re_source must
+be the same as that historically used to create the database or
+corruption can occur.
+<p>The Db::set_re_source method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::set_re_source method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/db_open.html">Db::open</a> was called.
+</dl>
+<p>The Db::set_re_source method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::set_re_source method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_stat.html b/libdb/docs/api_cxx/db_stat.html
new file mode 100644
index 0000000..e315654
--- /dev/null
+++ b/libdb/docs/api_cxx/db_stat.html
@@ -0,0 +1,176 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::stat</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::stat(void *sp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::stat method creates a statistical structure and
+copies a pointer to it into user-specified memory locations.
+Specifically, if <b>sp</b> is non-NULL, a pointer to the statistics
+for the database are copied into the memory location to which it refers.
+<p>Statistical structures are created in allocated memory. If application-specific allocation
+routines have been declared (see <a href="../api_cxx/env_set_alloc.html">DbEnv::set_alloc</a> for more
+information), they are used to allocate the memory; otherwise, the
+library <b>malloc</b>(3) interface is used. The caller is
+responsible for deallocating the memory. To deallocate the memory, free
+the memory reference; references inside the returned memory need not be
+individually freed.
+<p>The <b>flags</b> value must be set to 0 or
+one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_FAST_STAT">DB_FAST_STAT</a><dd>Return only the values which do not require traversal of the database.
+Fields returned when this flag is set are noted with an asterisk (*)
+below.
+<p>Among other things, this flag makes it possible for applications to
+request key and record counts without incurring the performance penalty
+of traversing the entire database. If the underlying database is of
+type Recno, or of type Btree and the database was created with the
+<a href="../api_cxx/db_set_flags.html#DB_RECNUM">DB_RECNUM</a> flag, the count of keys will be exact. Otherwise,
+the count of keys will be the value saved the last time the database
+was traversed, or 0 if no count of keys has ever been made. If the
+underlying database is of type Recno, the count of data items will be
+exact, otherwise, the count of data items will be the value saved the
+last time the database was traversed, or 0 if no count of data items
+has ever been done.
+</dl>
+<p>If the DB_FAST_STAT flag has not been specified, the
+Db::stat method will access some of or all the pages in the database,
+incurring a severe performance penalty as well as possibly flushing the
+underlying buffer pool.
+<p>In the presence of multiple threads or processes accessing an active
+database, the information returned by Db::stat may be out-of-date.
+<p>If the database was not opened read-only and the DB_FAST_STAT
+flag was not specified, the cached key and record numbers will be
+updated after the statistical information has been gathered.
+<p>The Db::stat method cannot be transaction-protected. For this reason,
+it should be called in a thread of control that has no open cursors or
+active transactions.
+<p>The Db::stat method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h3>Hash Statistics</h3>
+<p>In the case of a Hash database,
+the statistics are stored in a structure of type DB_HASH_STAT. The
+following fields will be filled in:
+<p><dl compact>
+<p><dt>u_int32_t hash_magic*<dd>Magic number that identifies the file as a Hash file.
+<dt>u_int32_t hash_version*<dd>The version of the Hash database.
+<dt>u_int32_t hash_nkeys*<dd>The number of unique keys in the database. If DB_FAST_STAT was
+specified the count will be the last saved value unless it has never
+been calculated, in which case it will be 0.
+<dt>u_int32_t hash_ndata*<dd>The number of key/data pairs in the database. If DB_FAST_STAT
+was specified the count will be the last saved value unless it has never
+been calculated, in which case it will be 0.
+<dt>u_int32_t hash_pagesize*<dd>The underlying Hash database page (and bucket) size, in bytes.
+<dt>u_int32_t hash_ffactor*<dd>The desired fill factor (number of items per bucket) specified at
+database-creation time.
+<dt>u_int32_t hash_buckets*<dd>The number of hash buckets.
+<dt>u_int32_t hash_free<dd>The number of pages on the free list.
+<dt>u_int32_t hash_bfree<dd>The number of bytes free on bucket pages.
+<dt>u_int32_t hash_bigpages<dd>The number of big key/data pages.
+<dt>u_int32_t hash_big_bfree<dd>The number of bytes free on big item pages.
+<dt>u_int32_t hash_overflows<dd>The number of overflow pages (overflow pages are pages that contain items
+that did not fit in the main bucket page).
+<dt>u_int32_t hash_ovfl_free<dd>The number of bytes free on overflow pages.
+<dt>u_int32_t hash_dup<dd>The number of duplicate pages.
+<dt>u_int32_t hash_dup_free<dd>The number of bytes free on duplicate pages.
+</dl>
+<h3>Btree and Recno Statistics</h3>
+<p>In the case of a Btree or Recno database,
+the statistics are stored in a structure of type DB_BTREE_STAT. The
+following fields will be filled in:
+<p><dl compact>
+<p><dt>u_int32_t bt_magic*<dd>Magic number that identifies the file as a Btree database.
+<dt>u_int32_t bt_version*<dd>The version of the Btree database.
+<dt>u_int32_t bt_nkeys*<dd>For the Btree Access Method, the number of unique keys in the database.
+If DB_FAST_STAT was specified and the database was created with
+the <a href="../api_cxx/db_set_flags.html#DB_RECNUM">DB_RECNUM</a> flag, the count will be exact, otherwise, the
+count will be the last saved value unless it has never been calculated,
+in which case it will be 0.
+<p>For the Recno Access Method, the exact number of records in the
+database.
+<dt>u_int32_t bt_ndata*<dd>For the Btree Access Method, the number of key/data pairs in the
+database. If DB_FAST_STAT was specified the count will be the
+last saved value unless it has never been calculated, in which case it
+will be 0.
+<p>For the Recno Access Method, the exact number of records in the
+database. If the database has been configured to not renumber records
+during deletion, the count of records will only reflect undeleted
+records.
+<dt>u_int32_t bt_pagesize*<dd>Underlying database page size, in bytes.
+<dt>u_int32_t bt_minkey*<dd>The minimum keys per page.
+<dt>u_int32_t bt_re_len*<dd>The length of fixed-length records.
+<dt>u_int32_t bt_re_pad*<dd>The padding byte value for fixed-length records.
+<dt>u_int32_t bt_levels<dd>Number of levels in the database.
+<dt>u_int32_t bt_int_pg<dd>Number of database internal pages.
+<dt>u_int32_t bt_leaf_pg<dd>Number of database leaf pages.
+<dt>u_int32_t bt_dup_pg<dd>Number of database duplicate pages.
+<dt>u_int32_t bt_over_pg<dd>Number of database overflow pages.
+<dt>u_int32_t bt_free<dd>Number of pages on the free list.
+<dt>u_int32_t bt_int_pgfree<dd>Number of bytes free in database internal pages.
+<dt>u_int32_t bt_leaf_pgfree<dd>Number of bytes free in database leaf pages.
+<dt>u_int32_t bt_dup_pgfree<dd>Number of bytes free in database duplicate pages.
+<dt>u_int32_t bt_over_pgfree<dd>Number of bytes free in database overflow pages.
+</dl>
+<h3>Queue Statistics</h3>
+<p>In the case of a Queue database,
+the statistics are stored in a structure of type DB_QUEUE_STAT. The
+following fields will be filled in:
+<p><dl compact>
+<p><dt>u_int32_t qs_magic*<dd>Magic number that identifies the file as a Queue file.
+<dt>u_int32_t qs_version*<dd>The version of the Queue file type.
+<dt>u_int32_t qs_nkeys*<dd>The number of records in the database. If DB_FAST_STAT was
+specified the count will be the last saved value unless it has never
+been calculated, in which case it will be 0.
+<dt>u_int32_t qs_ndata*<dd>The number of records in the database. If DB_FAST_STAT was
+specified the count will be the last saved value unless it has never
+been calculated, in which case it will be 0.
+<dt>u_int32_t qs_pagesize*<dd>Underlying database page size, in bytes.
+<dt>u_int32_t qs_extentsize*<dd>Underlying database extent size, in pages.
+<dt>u_int32_t qs_pages<dd>Number of pages in the database.
+<dt>u_int32_t qs_re_len*<dd>The length of the records.
+<dt>u_int32_t qs_re_pad*<dd>The padding byte value for the records.
+<dt>u_int32_t qs_pgfree<dd>Number of bytes free in database pages.
+<dt>u_int32_t qs_first_recno*<dd>First undeleted record in the database.
+<dt>u_int32_t qs_cur_recno*<dd>Next available record number.
+</dl>
+<p>The Db::stat method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::stat method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::stat method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_sync.html b/libdb/docs/api_cxx/db_sync.html
new file mode 100644
index 0000000..8879abb
--- /dev/null
+++ b/libdb/docs/api_cxx/db_sync.html
@@ -0,0 +1,65 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::sync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::sync</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::sync(u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::sync method flushes any cached information to disk.
+<p>If the database is in memory only, the Db::sync method has no effect and
+will always succeed.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p><b>It is important to understand that flushing cached information to disk
+only minimizes the window of opportunity for corrupted data.</b> Although
+unlikely, it is possible for database corruption to happen if a system
+or application crash occurs while writing data to the database. To
+ensure that database corruption never occurs, applications must either:
+use transactions and logging with automatic recovery; use logging and
+application-specific recovery; or edit a copy of the database, and once
+all applications using the database have successfully called
+<a href="../api_cxx/db_close.html">Db::close</a>, atomically replace the original database with the
+updated copy.
+<p>The Db::sync method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::sync method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The Db::sync method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::sync method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_truncate.html b/libdb/docs/api_cxx/db_truncate.html
new file mode 100644
index 0000000..13aeb86
--- /dev/null
+++ b/libdb/docs/api_cxx/db_truncate.html
@@ -0,0 +1,62 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::truncate</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::truncate</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::truncate(DbTxn *txnid, u_int32_t *countp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::truncate interface empties the database, discarding all
+records it contains.
+The number of records discarded from the database is returned in
+<b>countp</b>.
+<p>If the operation is to be transaction-protected (other than by specifying
+the DB_AUTO_COMMIT flag), the <b>txnid</b> parameter is a
+transaction handle returned from <a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>; otherwise, NULL.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_AUTO_COMMIT">DB_AUTO_COMMIT</a><dd>Enclose the Db::truncate call within a transaction. If the call succeeds,
+changes made by the operation will be recoverable. If the call fails,
+the operation will have made no changes.
+</dl>
+<p>The Db::truncate method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Db::truncate method may fail and throw an exception or return a non-zero error for the following conditions:
+<p>The Db::truncate method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::truncate method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_upgrade.html b/libdb/docs/api_cxx/db_upgrade.html
new file mode 100644
index 0000000..99437c9
--- /dev/null
+++ b/libdb/docs/api_cxx/db_upgrade.html
@@ -0,0 +1,99 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::upgrade</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::upgrade</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::upgrade(const char *file, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::upgrade method upgrades all of the databases included in the
+file <b>file</b>, if necessary. If no upgrade is necessary,
+Db::upgrade always returns success.
+<p><b>Database upgrades are done in place and are destructive. For example,
+if pages need to be allocated and no disk space is available, the
+database may be left corrupted. Backups should be made before databases
+are upgraded. See <a href="../ref/am/upgrade.html">Upgrading databases</a>
+for more information.</b>
+<p>Unlike all other database operations, Db::upgrade may only be done
+on a system with the same byte-order as the database.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_DUPSORT">DB_DUPSORT</a><dd><b>This flag is only meaningful when upgrading databases from
+releases before the Berkeley DB 3.1 release.</b>
+<p>As part of the upgrade from the Berkeley DB 3.0 release to the 3.1 release,
+the on-disk format of duplicate data items changed. To correctly
+upgrade the format requires applications to specify whether duplicate
+data items in the database are sorted or not. Specifying the
+DB_DUPSORT flag informs Db::upgrade that the duplicates
+are sorted; otherwise they are assumed to be unsorted. Incorrectly
+specifying the value of this flag may lead to database corruption.
+<p>Further, because the Db::upgrade method upgrades a physical file
+(including all the databases it contains), it is not possible to use
+Db::upgrade to upgrade files in which some of the databases it
+includes have sorted duplicate data items, and some of the databases it
+includes have unsorted duplicate data items. If the file does not have
+more than a single database, if the databases do not support duplicate
+data items, or if all of the databases that support duplicate data items
+support the same style of duplicates (either sorted or unsorted),
+Db::upgrade will work correctly as long as the
+DB_DUPSORT flag is correctly specified. Otherwise, the file
+cannot be upgraded using Db::upgrade; it must be upgraded
+manually by dumping and reloading the databases.
+</dl>
+<p>The Db::upgrade method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If a <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was specified, the
+environment variable <b>DB_HOME</b> may be used as the path of the
+database environment home.
+<p>Db::upgrade is affected by any database directory specified using the
+<a href="../api_cxx/env_set_data_dir.html">DbEnv::set_data_dir</a> method, or by setting the "set_data_dir" string
+in the environment's <b>DB_CONFIG</b> file.
+</dl>
+<h1>Errors</h1>
+<p>The Db::upgrade method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The database is not in the same byte-order as the system.
+</dl>
+<p><dl compact>
+<p><dt><a name="DB_OLD_VERSION">DB_OLD_VERSION</a><dd>The database cannot be upgraded by this version of the Berkeley DB software.
+</dl>
+<p>The Db::upgrade method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::upgrade method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/db_verify.html b/libdb/docs/api_cxx/db_verify.html
new file mode 100644
index 0000000..416b179
--- /dev/null
+++ b/libdb/docs/api_cxx/db_verify.html
@@ -0,0 +1,130 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db::verify</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db::verify</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Db::verify(const char *file,
+ const char *database, ostream *outfile, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Db::verify method verifies the integrity of all databases in the
+file specified by the <b>file</b> argument, and optionally outputs the
+databases' key/data pairs to the file stream specified by the
+<b>outfile</b> argument.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_SALVAGE">DB_SALVAGE</a><dd>Write the key/data pairs from all databases in the file to the file stream
+named in
+the <b>outfile</b> argument. The output format is the same as that
+specified for the <a href="../utility/db_dump.html">db_dump</a> utility, and can be used as input for
+the <a href="../utility/db_load.html">db_load</a> utility.
+<p>Because the key/data pairs are output in page order as opposed to the sort
+order used by <a href="../utility/db_dump.html">db_dump</a>, using Db::verify to dump key/data
+pairs normally produces less than optimal loads for Btree databases.
+</dl>
+<p>In addition, the following flags may be set by bitwise inclusively <b>OR</b>'ing them into the
+<b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="DB_AGGRESSIVE">DB_AGGRESSIVE</a><dd>Output <b>all</b> the key/data pairs in the file that can be found.
+By default, Db::verify does not assume corruption. For example,
+if a key/data pair on a page is marked as deleted, it is not then written
+to the output file. When DB_AGGRESSIVE is specified, corruption
+is assumed, and any key/data pair that can be found is written. In this
+case, key/data pairs that are corrupted or have been deleted may appear
+in the output (even if the file being salvaged is in no way corrupt), and
+the output will almost certainly require editing before being loaded into
+a database.
+<p><dt><a name="DB_PRINTABLE">DB_PRINTABLE</a><dd>When using the DB_SALVAGE flag, if characters in either the key
+or data items are printing characters (as defined by <b>isprint</b>(3)), use printing characters to represent them. This flag permits users
+to use standard text editors and tools to modify the contents of
+databases or selectively remove data from salvager output.
+<p>Note: different systems may have different notions about what characters
+are considered <i>printing characters</i>, and databases dumped in
+this manner may be less portable to external systems.
+<p><dt><a name="DB_NOORDERCHK">DB_NOORDERCHK</a><dd>Skip the database checks for btree and duplicate sort order and for
+hashing.
+<p>The Db::verify method normally verifies that btree keys and duplicate
+items are correctly sorted, and hash keys are correctly hashed. If the
+file being verified contains multiple databases using differing sorting
+or hashing algorithms, some of them must necessarily fail database
+verification because only one sort order or hash function can be
+specified before Db::verify is called. To verify files with
+multiple databases having differing sorting orders or hashing functions,
+first perform verification of the file as a whole by using the
+DB_NOORDERCHK flag, and then individually verify the sort order
+and hashing function for each database in the file using the
+DB_ORDERCHKONLY flag.
+<p><dt><a name="DB_ORDERCHKONLY">DB_ORDERCHKONLY</a><dd>Perform the database checks for btree and duplicate sort order and for
+hashing, skipped by DB_NOORDERCHK.
+<p>When this flag is specified, a <b>database</b> argument should also be
+specified, indicating the database in the physical file which is to be
+checked. This flag is only safe to use on databases that have already
+successfully been verified using Db::verify with the
+DB_NOORDERCHK flag set.
+</dl>
+<p>The database argument must be set to NULL except when the
+DB_ORDERCHKONLY flag is set.
+<p><b>The Db::verify method does not perform any locking, even in Berkeley DB
+environments that are configured with a locking subsystem. As such, it
+should only be used on files that are not being modified by another
+thread of control.</b>
+<p>The Db::verify interface may not be called after the <a href="../api_cxx/db_open.html">Db::open</a>
+interface is called.
+<a name="3"><!--meow--></a>
+<p>The Db::verify method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, 0 on success, and <a href="../ref/program/errorret.html#DB_VERIFY_BAD">DB_VERIFY_BAD</a> if a database is corrupted. When the
+DB_SALVAGE flag is specified, the <a href="../ref/program/errorret.html#DB_VERIFY_BAD">DB_VERIFY_BAD</a> return
+means that all key/data pairs in the file may not have been successfully
+output.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If a <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was specified, the
+environment variable <b>DB_HOME</b> may be used as the path of the
+database environment home.
+<p>Db::verify is affected by any database directory specified using the
+<a href="../api_cxx/env_set_data_dir.html">DbEnv::set_data_dir</a> method, or by setting the "set_data_dir" string
+in the environment's <b>DB_CONFIG</b> file.
+</dl>
+<h1>Errors</h1>
+<p>The Db::verify method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Db::verify was called after <a href="../api_cxx/db_open.html">Db::open</a>.
+</dl>
+<p>The Db::verify method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db::verify method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/dbc_class.html b/libdb/docs/api_cxx/dbc_class.html
new file mode 100644
index 0000000..5120987
--- /dev/null
+++ b/libdb/docs/api_cxx/dbc_class.html
@@ -0,0 +1,41 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Dbc</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class Dbc { ... };
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc object is the handle for a cursor into a Berkeley DB database.
+The handle is not free-threaded, and cursors may not span threads; nor
+may cursors be used by more than a single thread. If the cursor is to
+be used to perform operations on behalf of a transaction, the cursor
+must be opened and closed within the context of that single transaction.
+Once <a href="../api_cxx/dbc_close.html">Dbc::close</a> has been called, the handle may not be accessed
+again, regardless of the method's return.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/dbc_close.html b/libdb/docs/api_cxx/dbc_close.html
new file mode 100644
index 0000000..574f42d
--- /dev/null
+++ b/libdb/docs/api_cxx/dbc_close.html
@@ -0,0 +1,64 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc::close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Dbc::close</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Dbc::close(void);
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc::close method discards the cursor.
+<p>It is possible for the Dbc::close method to return
+<a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a>, signaling that any enclosing transaction should
+be aborted. If the application is already intending to abort the
+transaction, this error should be ignored, and the application should
+proceed.
+<p>After Dbc::close has been called, regardless of its return, the
+cursor handle may not be used again.
+<p>The Dbc::close method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Dbc::close method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The cursor was previously closed.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Dbc::close method will fail and
+and either return <a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> or
+throw a <a href="../api_cxx/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The Dbc::close method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc::close method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_cxx/dbc_list.html">Database Cursors and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/dbc_count.html b/libdb/docs/api_cxx/dbc_count.html
new file mode 100644
index 0000000..64929b4
--- /dev/null
+++ b/libdb/docs/api_cxx/dbc_count.html
@@ -0,0 +1,55 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc::count</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Dbc::count</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Dbc::count(db_recno_t *countp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc::count method returns a count of the number of duplicate data
+items for the key to which the
+cursor refers, into the memory location to which <b>countp</b> refers.
+If the underlying database does not support duplicate data items, the
+call will still succeed and a count of 1 will be returned.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>
+If the <b>cursor</b> argument is not yet initialized, the Dbc::count method either returns EINVAL or throws an exception that encapsulates EINVAL.
+Otherwise, the Dbc::count method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Dbc::count method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc::count method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_cxx/dbc_list.html">Database Cursors and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/dbc_del.html b/libdb/docs/api_cxx/dbc_del.html
new file mode 100644
index 0000000..07bb7c4
--- /dev/null
+++ b/libdb/docs/api_cxx/dbc_del.html
@@ -0,0 +1,78 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc::del</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Dbc::del</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Dbc::del(u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc::del method deletes the key/data pair to which the cursor
+refers.
+<p>When called on a cursor opened on a database that has been made into a
+secondary index using the <a href="../api_cxx/db_associate.html">Db::associate</a> method, the <a href="../api_cxx/db_del.html">Db::del</a> method
+deletes the key/data pair from the primary database and all secondary
+indices.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The cursor position is unchanged after a delete, and subsequent calls to
+cursor functions expecting the cursor to refer to an existing key will
+fail.
+<p>
+If the element has already been deleted, the Dbc::del method will return DB_KEYEMPTY.
+If the cursor is not yet initialized, the Dbc::del method either returns EINVAL or throws an exception that encapsulates EINVAL.
+Otherwise, the Dbc::del method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Dbc::del method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_SECONDARY_BAD<dd>A secondary index references a nonexistent primary key.
+</dl>
+<p><dl compact>
+<p><dt>EACCES<dd>An attempt was made to modify a read-only database.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p><dl compact>
+<p><dt>EPERM <dd>Write attempted on read-only cursor when the <a href="../api_cxx/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a> flag was
+specified to <a href="../api_cxx/env_open.html">DbEnv::open</a>.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Dbc::del method will fail and
+and either return <a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> or
+throw a <a href="../api_cxx/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The Dbc::del method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc::del method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_cxx/dbc_list.html">Database Cursors and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/dbc_dup.html b/libdb/docs/api_cxx/dbc_dup.html
new file mode 100644
index 0000000..b1a9f1e
--- /dev/null
+++ b/libdb/docs/api_cxx/dbc_dup.html
@@ -0,0 +1,72 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc::dup</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Dbc::dup</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Dbc::dup(Dbc **cursorp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc::dup method creates a new cursor that uses the same transaction
+and locker ID as the original cursor. This is useful when an application
+is using locking and requires two or more cursors in the same thread of
+control.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_POSITION">DB_POSITION</a><dd>The newly created cursor is initialized to refer to the same position
+in the database as the original cursor and hold the same locks. If the
+DB_POSITION flag is not specified, then the created cursor is
+uninitialized and will behave like a cursor newly created using
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>.
+</dl>
+<p>When using the Berkeley DB Concurrent Data Store product, there can be only one active write cursor
+at a time. For this reason, attempting to duplicate a cursor for which
+the <a href="../api_cxx/db_cursor.html#DB_WRITECURSOR">DB_WRITECURSOR</a> flag was specified during creation will return
+an error.
+<p>
+If the <b>cursor</b> argument is not yet initialized, the Dbc::dup method either returns EINVAL or throws an exception that encapsulates EINVAL.
+Otherwise, the Dbc::dup method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The Dbc::dup method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The <b>cursor</b> argument was created using the
+<a href="../api_cxx/db_cursor.html#DB_WRITECURSOR">DB_WRITECURSOR</a> flag in the Berkeley DB Concurrent Data Store product.
+</dl>
+<p>The Dbc::dup method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc::dup method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_cxx/dbc_list.html">Database Cursors and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/dbc_get.html b/libdb/docs/api_cxx/dbc_get.html
new file mode 100644
index 0000000..d1287f7
--- /dev/null
+++ b/libdb/docs/api_cxx/dbc_get.html
@@ -0,0 +1,236 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc::get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Dbc::get</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Dbc::get(Dbt *key, Dbt *data, u_int32_t flags);
+int
+Dbc::pget(Dbt *key, Dbt *pkey, Dbt *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc::get method retrieves key/data pairs from the database. The
+address and length of the key
+are returned in the object to which <b>key</b> refers (except for the
+case of the DB_SET flag, in which the <b>key</b> object is
+unchanged), and the address
+and length of the data are returned in the object to which <b>data</b>
+refers.
+<p>When called on a cursor opened on a database that has been made into a
+secondary index using the <a href="../api_cxx/db_associate.html">Db::associate</a> method, the Dbc::get
+and Dbc::pget methods return the key from the secondary index and the
+data item from the primary database. In addition, the Dbc::pget method
+returns the key from the primary database. In databases that are not
+secondary indices, the Dbc::pget interface will always fail and
+return EINVAL.
+<p>Modifications to the database during a sequential scan will be reflected
+in the scan; that is, records inserted behind a cursor will not be
+returned while records inserted in front of a cursor will be returned.
+<p>In Queue and Recno databases, missing entries (that is, entries that
+were never explicitly created or that were created and then deleted)
+will be skipped during a sequential scan.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_CURRENT">DB_CURRENT</a><dd>Return the key/data pair to which the cursor refers.
+<p>
+If the cursor key/data pair was deleted, the Dbc::get method will return DB_KEYEMPTY.
+If the cursor is not yet initialized, the Dbc::get method either returns EINVAL or throws an exception that encapsulates EINVAL.
+<p><dt><a name="DB_FIRST">DB_FIRST</a>, <a name="DB_LAST">DB_LAST</a><dd>The cursor is set to refer to the first (last) key/data pair of the
+database, and that pair is returned. In the presence of duplicate key
+values, the first (last) data item in the set of duplicates is returned.
+<p>If the database is a Queue or Recno database, Dbc::get using the
+DB_FIRST (DB_LAST) flags will ignore any keys that exist
+but were never explicitly created by the application, or were created and
+later deleted.
+<p>
+If the database is empty, the Dbc::get method will return DB_NOTFOUND.
+<p><dt><a name="DB_GET_BOTH">DB_GET_BOTH</a><dd>The DB_GET_BOTH flag is identical to the DB_SET flag,
+except that both the key and the data arguments must be matched by the
+key and data item in the database.
+<p>When used with the Dbc::pget version of this interface on a
+secondary index handle, both the secondary and primary keys must be
+matched by the secondary and primary key item in the database. It is
+an error to use the DB_GET_BOTH flag with the Dbc::get
+version of this interface and a cursor that has been opened on a
+secondary index handle.
+<p><dt><a name="DB_GET_BOTH_RANGE">DB_GET_BOTH_RANGE</a><dd>The DB_GET_BOTH_RANGE flag is identical to the DB_GET_BOTH
+flag, except that, in the case of any database supporting sorted
+duplicate sets, the returned key/data pair is the smallest data item
+greater than or equal to the specified data item (as determined by the
+comparison function), permitting partial matches and range searches in
+duplicate data sets.
+<p><dt><a name="DB_GET_RECNO">DB_GET_RECNO</a><dd>Return the record number associated with the cursor. The record number
+will be returned in <b>data</b>, as described in <a href="../api_cxx/dbt_class.html">Dbt</a>. The
+<b>key</b> parameter is ignored.
+<p>For DB_GET_RECNO to be specified, the underlying database must be
+of type Btree, and it must have been created with the <a href="../api_cxx/db_set_flags.html#DB_RECNUM">DB_RECNUM</a>
+flag.
+<p><dt><a name="DB_JOIN_ITEM">DB_JOIN_ITEM</a><dd>Do not use the data value found in all of the cursors as a lookup key for
+the primary database, but simply return it in the key parameter instead.
+The data parameter is left unchanged.
+<p>For DB_JOIN_ITEM to be specified, the underlying cursor must have
+been returned from the <a href="../api_cxx/db_join.html">Db::join</a> method.
+<p><dt><a name="DB_NEXT">DB_NEXT</a>, <a name="DB_PREV">DB_PREV</a><dd>If the cursor is not yet initialized, DB_NEXT (DB_PREV)
+is identical to DB_FIRST (DB_LAST). Otherwise, the cursor
+is moved to the next (previous) key/data pair of the database, and that
+pair is returned. In the presence of duplicate key values, the value of
+the key may not change.
+<p>If the database is a Queue or Recno database, Dbc::get using the
+DB_NEXT (DB_PREV) flag will skip any keys that exist
+but were never explicitly created by the application, or those that were
+created and later deleted.
+<p>
+If the cursor is already on the last (first) record in the database, the Dbc::get method will return DB_NOTFOUND.
+<p><dt><a name="DB_NEXT_DUP">DB_NEXT_DUP</a><dd>If the next key/data pair of the database is a duplicate data record for
+the current key/data pair, the cursor is moved to the next key/data pair
+of the database, and that pair is returned.
+If the next key/data pair of the database is not a duplicate data record
+for the current key/data pair, the Dbc::get method will return DB_NOTFOUND.
+If the cursor is not yet initialized, the Dbc::get method either returns EINVAL or throws an exception that encapsulates EINVAL.
+<p><dt><a name="DB_NEXT_NODUP">DB_NEXT_NODUP</a>, <a name="DB_PREV_NODUP">DB_PREV_NODUP</a><dd>If the cursor is not yet initialized, DB_NEXT_NODUP
+(DB_PREV_NODUP) is identical to DB_FIRST
+(DB_LAST). Otherwise, the cursor is moved to the next (previous)
+non-duplicate key of the database, and that key/data pair is returned.
+<p>If the database is a Queue or Recno database, Dbc::get using the
+DB_NEXT_NODUP (DB_PREV_NODUP) flags will ignore any keys
+that exist but were never explicitly created by the application, or those
+that were created and later deleted.
+<p>
+If no non-duplicate key/data pairs occur after (before) the cursor
+position in the database, the Dbc::get method will return DB_NOTFOUND.
+<p><dt><a name="DB_SET">DB_SET</a><dd>Move the cursor to the specified key/data pair of the database, and
+return the datum associated with the given key.
+<p>In the presence of duplicate key values, Dbc::get will return the
+first data item for the given key.
+If no matching keys are found, the Dbc::get method will return DB_NOTFOUND.
+If the database is a Queue or Recno database, and the specified key exists,
+but was never explicitly created by the application or was later deleted, the Dbc::get method will return DB_KEYEMPTY.
+<p><dt><a name="DB_SET_RANGE">DB_SET_RANGE</a><dd>The DB_SET_RANGE flag is identical to the DB_SET flag,
+except that in the case of the Btree access method, the key is returned
+as well as the data item and the returned key/data pair is the smallest
+key greater than or equal to the specified key (as determined by the
+comparison method), permitting partial key matches and range
+searches.
+<p><dt><a name="DB_SET_RECNO">DB_SET_RECNO</a><dd>Move the cursor to the specific numbered record of the database, and
+return the associated key/data pair. The <b>data</b> field of the
+specified <b>key</b>
+must be a pointer to a memory location from which a <a href="../api_cxx/dbt_class.html#db_recno_t">db_recno_t</a>
+may be read, as described in <a href="../api_cxx/dbt_class.html">Dbt</a>. This memory location will be
+read to determine the record to be retrieved.
+<p>For DB_SET_RECNO to be specified, the underlying database must be
+of type Btree, and it must have been created with the <a href="../api_cxx/db_set_flags.html#DB_RECNUM">DB_RECNUM</a>
+flag.
+</dl>
+<p>In addition, the following flags may be set by
+bitwise inclusively <b>OR</b>'ing them into the <b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="DB_DIRTY_READ">DB_DIRTY_READ</a><dd>Read modified but not yet committed data. Silently ignored if the
+<a href="../api_cxx/db_open.html#DB_DIRTY_READ">DB_DIRTY_READ</a> flag was not specified when the underlying
+database was opened.
+<p><dt><a name="DB_MULTIPLE">DB_MULTIPLE</a><dd>Return multiple data items. The buffer to which the <b>data</b>
+argument refers is filled with the specified key's data items. If the
+first data item associated with the key cannot fit into the buffer, the
+size field of the <b>data</b> argument is set to the length needed to
+retrieve it, and the error ENOMEM is returned. Subsequent calls with both the
+DB_NEXT_DUP and DB_MULTIPLE flags specified will return
+additional data items associated with the current key or
+<a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a> if there is no additional data items to return.
+<p>If DB_MULTIPLE is specified for the Queue and Recno access
+methods, the buffer will be filled with as many data records as
+possible. The record number of the first record will be returned in
+the <b>key</b> argument. The record number of each subsequent returned
+record must be calculated from this value.
+<p>The buffer to which the <b>data</b> argument refers should be large
+relative to the page size of the underlying database, aligned for
+unsigned integer access, and be a multiple of 1024 bytes in size.
+<p>The DB_MULTIPLE flag may only be used with the
+DB_CURRENT, DB_FIRST, DB_GET_BOTH,
+DB_NEXT, DB_NEXT_DUP, DB_NEXT_NODUP,
+DB_SET, DB_SET_RANGE, and DB_SET_RECNO
+options.
+<p>The DB_MULTIPLE flag may not be used when accessing databases
+made into secondary indices using the <a href="../api_cxx/db_associate.html">Db::associate</a> method.
+<p>See <a href="../api_cxx/dbt_bulk.html#DB_MULTIPLE_INIT">DB_MULTIPLE_INIT</a> for more information.
+<p><dt><a name="DB_MULTIPLE_KEY">DB_MULTIPLE_KEY</a><dd>Return multiple key and data pairs. The buffer to which the
+<b>data</b> argument refers is filled with key and data pairs. If the
+first key and data pair cannot fit into the buffer, the size field of
+the <b>data</b> argument is set to the length needed to retrieve them,
+and the error ENOMEM is returned.
+<p>The buffer to which the <b>data</b> argument refers should be large
+relative to the page size of the underlying database, aligned for
+unsigned integer access, and be a multiple of 1024 bytes in size.
+<p>The DB_MULTIPLE_KEY flag may only be used with the
+DB_CURRENT, DB_FIRST, DB_GET_BOTH,
+DB_NEXT, DB_NEXT_NODUP, DB_SET,
+DB_SET_RANGE, and DB_SET_RECNO options. The
+DB_MULTIPLE_KEY flag may not be used when accessing databases
+made into secondary indices using the <a href="../api_cxx/db_associate.html">Db::associate</a> method.
+<p>See <a href="../api_cxx/dbt_bulk.html#DB_MULTIPLE_INIT">DB_MULTIPLE_INIT</a> for more information.
+<p><dt><a name="DB_RMW">DB_RMW</a><dd>Acquire write locks instead of read locks when doing the retrieval.
+Setting this flag can eliminate deadlock during a read-modify-write
+cycle by acquiring the write lock during the read part of the cycle so
+that another thread of control acquiring a read lock for the same item,
+in its own read-modify-write cycle, will not result in deadlock.
+</dl>
+<p>
+Otherwise, the Dbc::get method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>If Dbc::get fails for any reason, the state of the cursor will be
+unchanged.
+<h1>Errors</h1>
+<p>The Dbc::get method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_SECONDARY_BAD<dd>A secondary index references a nonexistent primary key.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>There was insufficient memory to return the requested item.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The specified cursor was not currently initialized.
+<p>The Dbc::pget interface was called with a cursor that does not
+refer to a secondary index.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Dbc::get method will fail and
+and either return <a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> or
+throw a <a href="../api_cxx/deadlock_class.html">DbDeadlockException</a> exception.
+<p>If the requested item could not be returned due to insufficient memory,
+the Dbc::get method will fail and
+either return ENOMEM or
+throw a <a href="../api_cxx/memp_class.html">DbMemoryException</a> exception.
+<p>The Dbc::get method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc::get method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_cxx/dbc_list.html">Database Cursors and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/dbc_list.html b/libdb/docs/api_cxx/dbc_list.html
new file mode 100644
index 0000000..f2da2a5
--- /dev/null
+++ b/libdb/docs/api_cxx/dbc_list.html
@@ -0,0 +1,27 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Database Cursors and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Database Cursors and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Database Cursors and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_cxx/db_cursor.html">Db::cursor</a></td><td>Create a cursor handle</td></tr>
+<tr><td><a href="../api_cxx/dbc_close.html">Dbc::close</a></td><td>Close a cursor</td></tr>
+<tr><td><a href="../api_cxx/dbc_count.html">Dbc::count</a></td><td>Return count of duplicates</td></tr>
+<tr><td><a href="../api_cxx/dbc_del.html">Dbc::del</a></td><td>Delete by cursor</td></tr>
+<tr><td><a href="../api_cxx/dbc_dup.html">Dbc::dup</a></td><td>Duplicate a cursor</td></tr>
+<tr><td><a href="../api_cxx/dbc_get.html">Dbc::get</a></td><td>Retrieve by cursor</td></tr>
+<tr><td><a href="../api_cxx/dbc_get.html">Dbc::pget</a></td><td>Retrieve by cursor</td></tr>
+<tr><td><a href="../api_cxx/dbc_put.html">Dbc::put</a></td><td>Store by cursor</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/dbc_put.html b/libdb/docs/api_cxx/dbc_put.html
new file mode 100644
index 0000000..b9aecfa
--- /dev/null
+++ b/libdb/docs/api_cxx/dbc_put.html
@@ -0,0 +1,156 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc::put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Dbc::put</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+Dbc::put(Dbt *key, Dbt *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc::put method stores key/data pairs into the database.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_AFTER">DB_AFTER</a><dd>In the case of the Btree and Hash access methods, insert the data
+element as a duplicate element of the key to which the cursor refers.
+The new element appears immediately after the current cursor position.
+It is an error to specify DB_AFTER if the underlying Btree or
+Hash database does not support duplicate data items. The <b>key</b>
+parameter is ignored.
+<p>In the case of the Recno access method, it is an error to specify
+DB_AFTER if the underlying Recno database was not created with
+the <a href="../api_cxx/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag. If the <a href="../api_cxx/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag was
+specified, a new key is created, all records after the inserted item
+are automatically renumbered, and the key of the new record is returned
+in the structure to which the <b>key</b> argument refers. The initial
+value of the <b>key</b> parameter is ignored. See <a href="../api_cxx/db_open.html">Db::open</a>
+for more information.
+<p>The DB_AFTER flag may not be specified to the Queue access method.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, Dbc::put will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+If the underlying access method is Btree or Recno, the operation will
+succeed.
+<p>If the cursor is not yet initialized or if a duplicate sort function
+has been specified, the Dbc::put function will return EINVAL.
+<p><dt><a name="DB_BEFORE">DB_BEFORE</a><dd>In the case of the Btree and Hash access methods, insert the data
+element as a duplicate element of the key to which the cursor refers.
+The new element appears immediately before the current cursor position.
+It is an error to specify DB_BEFORE if the underlying Btree or
+Hash database does not support duplicate data items. The <b>key</b>
+parameter is ignored.
+<p>In the case of the Recno access method, it is an error to specify
+DB_BEFORE if the underlying Recno database was not created with
+the <a href="../api_cxx/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag. If the <a href="../api_cxx/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag was
+specified, a new key is created, the current record and all records
+after it are automatically renumbered, and the key of the new record is
+returned in the structure to which the <b>key</b> argument refers.
+The initial value of the <b>key</b> parameter is ignored. See
+<a href="../api_cxx/db_open.html">Db::open</a> for more information.
+<p>The DB_BEFORE flag may not be specified to the Queue access method.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, Dbc::put will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+If the underlying access method is Btree or Recno, the operation will
+succeed.
+<p>If the cursor is not yet initialized or if a duplicate sort function
+has been specified, Dbc::put will return EINVAL.
+<p><dt><a name="DB_CURRENT">DB_CURRENT</a><dd>Overwrite the data of the key/data pair to which the cursor refers with
+the specified data item. The <b>key</b> parameter is ignored.
+<p>If a duplicate sort function has been specified and the data item of the
+referenced key/data pair does not compare equally to the <b>data</b>
+parameter, Dbc::put will return EINVAL.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, Dbc::put will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+If the underlying access method is Btree, Queue, or Recno, the operation
+will succeed.
+<p>If the cursor is not yet initialized, Dbc::put will return EINVAL.
+<p><dt><a name="DB_KEYFIRST">DB_KEYFIRST</a><dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database.
+<p>If the underlying database supports duplicate data items, and if the
+key already exists in the database and a duplicate sort function has
+been specified, the inserted data item is added in its sorted location.
+If the key already exists in the database and no duplicate sort function
+has been specified, the inserted data item is added as the first of the
+data items for that key.
+<p>The DB_KEYFIRST flag may not be specified to the Queue or Recno
+access methods.
+<p><dt><a name="DB_KEYLAST">DB_KEYLAST</a><dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database.
+<p>If the underlying database supports duplicate data items, and if the
+key already exists in the database and a duplicate sort function has
+been specified, the inserted data item is added in its sorted location.
+If the key already exists in the database, and no duplicate sort
+function has been specified, the inserted data item is added as the last
+of the data items for that key.
+<p>The DB_KEYLAST flag may not be specified to the Queue or Recno
+access methods.
+<p><dt><a name="DB_NODUPDATA">DB_NODUPDATA</a><dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database, unless it already exists in the database.
+If the key/data pair already appears in the database, <a href="../api_cxx/dbc_put.html#DB_KEYEXIST">DB_KEYEXIST</a>
+is returned. The DB_NODUPDATA flag may only be specified if
+the underlying database has been configured to support sorted duplicate
+data items.
+<p>The DB_NODUPDATA flag may not be specified to the Queue or Recno
+access methods.
+</dl>
+<p>
+Otherwise, the Dbc::put method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>If Dbc::put fails for any reason, the state of the cursor will be
+unchanged. If Dbc::put succeeds and an item is inserted into the
+database, the cursor is always positioned to refer to the newly inserted
+item.
+<h1>Errors</h1>
+<p>The Dbc::put method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EACCES<dd>An attempt was made to modify a read-only database.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The DB_BEFORE or DB_AFTER flags were specified, and the
+underlying access method is Queue.
+<p>An attempt was made to add a record to a fixed-length database that was too
+large to fit.
+<p>An attempt was made to add a record to a secondary index.
+</dl>
+<p><dl compact>
+<p><dt>EPERM <dd>Write attempted on read-only cursor when the <a href="../api_cxx/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a> flag was
+specified to <a href="../api_cxx/env_open.html">DbEnv::open</a>.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Dbc::put method will fail and
+and either return <a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> or
+throw a <a href="../api_cxx/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The Dbc::put method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc::put method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_cxx/dbc_list.html">Database Cursors and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/dbt_bulk.html b/libdb/docs/api_cxx/dbt_bulk.html
new file mode 100644
index 0000000..0045dbe
--- /dev/null
+++ b/libdb/docs/api_cxx/dbt_bulk.html
@@ -0,0 +1,82 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DBT</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DBT: Bulk Retrieval</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<a name="3"><!--meow--></a>
+<p>If either of the <a href="../api_cxx/dbc_get.html#DB_MULTIPLE">DB_MULTIPLE</a> or <a href="../api_cxx/dbc_get.html#DB_MULTIPLE_KEY">DB_MULTIPLE_KEY</a> flags
+were specified to the <a href="../api_cxx/db_get.html">Db::get</a> or <a href="../api_cxx/dbc_get.html">Dbc::get</a> methods, the data
+<a href="../api_cxx/dbt_class.html">Dbt</a> returned by those interfaces will refer to a buffer that
+is filled with data. Access to that data is through the following
+macros:
+<p><dl compact>
+<p><dt><a name="DB_MULTIPLE_INIT">DB_MULTIPLE_INIT</a><dd><pre>DB_MULTIPLE_INIT(void *pointer, <a href="../api_cxx/dbt_class.html">Dbt</a> *data);</pre>
+<p>Initialize the retrieval. The <b>pointer</b> argument is a variable
+to be initialized. The <b>data</b> argument is a <a href="../api_cxx/dbt_class.html">Dbt</a> structure
+returned from a successful call to <a href="../api_cxx/db_get.html">Db::get</a> or <a href="../api_cxx/dbc_get.html">Dbc::get</a>
+for which one of the <a href="../api_cxx/dbc_get.html#DB_MULTIPLE">DB_MULTIPLE</a> or <a href="../api_cxx/dbc_get.html#DB_MULTIPLE_KEY">DB_MULTIPLE_KEY</a>
+flags was specified.
+<p><dt><a name="DB_MULTIPLE_NEXT">DB_MULTIPLE_NEXT</a><dd><pre>DB_MULTIPLE_NEXT(void *pointer, <a href="../api_cxx/dbt_class.html">Dbt</a> *data, void *retdata, size_t retdlen);</pre>
+<p>The <b>data</b> argument is a <a href="../api_cxx/dbt_class.html">Dbt</a> structure returned from a
+successful call to <a href="../api_cxx/db_get.html">Db::get</a> or <a href="../api_cxx/dbc_get.html">Dbc::get</a> for which the
+<a href="../api_cxx/dbc_get.html#DB_MULTIPLE">DB_MULTIPLE</a> flag was specified. The <b>pointer</b> and
+<b>data</b> arguments must have been previously initialized by a call
+to DB_MULTIPLE_INIT. The <b>retdata</b> argument is set to
+refer to the next data element in the returned set, and the
+<b>retdlen</b> argument is set to the length, in bytes, of that data
+element. When used with the Queue and Recno access methods,
+<b>retdata</b> will be set to NULL for deleted records. The
+<b>pointer</b> argument is set to NULL if there are no more data
+elements in the returned set.
+<p><dt><a name="DB_MULTIPLE_KEY_NEXT">DB_MULTIPLE_KEY_NEXT</a><dd><pre>DB_MULTIPLE_KEY_NEXT(void *pointer, <a href="../api_cxx/dbt_class.html">Dbt</a> *data,
+ void *retkey, size_t retklen, void *retdata, size_t retdlen);</pre>
+<p>The <b>data</b> argument is a <a href="../api_cxx/dbt_class.html">Dbt</a> structure returned from a
+successful call to <a href="../api_cxx/db_get.html">Db::get</a> or <a href="../api_cxx/dbc_get.html">Dbc::get</a> for which the
+<a href="../api_cxx/dbc_get.html#DB_MULTIPLE_KEY">DB_MULTIPLE_KEY</a> flag was specified. The <b>pointer</b> and
+<b>data</b> arguments must have been previously initialized by a call
+to DB_MULTIPLE_INIT. The <b>retkey</b> argument is set to
+refer to the next key element in the returned set, and the
+<b>retklen</b> argument is set to the length, in bytes, of that key
+element. The <b>retdata</b> argument is set to refer to the next data
+element in the returned set, and the <b>retdlen</b> argument is set to
+the length, in bytes, of that data element. The <b>pointer</b>
+argument is set to NULL if there are no more key/data pairs in the
+returned set.
+<p><dt><a name="DB_MULTIPLE_RECNO_NEXT">DB_MULTIPLE_RECNO_NEXT</a><dd><pre>DB_MULTIPLE_RECNO_NEXT(void *pointer, <a href="../api_cxx/dbt_class.html">Dbt</a> *data,
+ db_recno_t recno, void * retdata, size_t retdlen);</pre>
+<p>The <b>data</b> argument is a <a href="../api_cxx/dbt_class.html">Dbt</a> structure returned from a
+successful call to <a href="../api_cxx/db_get.html">Db::get</a> or <a href="../api_cxx/dbc_get.html">Dbc::get</a> for which the
+<a href="../api_cxx/dbc_get.html#DB_MULTIPLE_KEY">DB_MULTIPLE_KEY</a> flag was specified. The <b>pointer</b> and
+<b>data</b> arguments must have been previously initialized by a call
+to DB_MULTIPLE_INIT. The <b>recno</b> argument is set to the
+record number of the next record in the returned set. The
+<b>retdata</b> argument is set to refer to the next data element in
+the returned set, and the <b>retdlen</b> argument is set to the length,
+in bytes, of that data element. When used with the Queue and Recno
+access methods, <b>retdata</b> will be set to NULL for deleted
+records. The <b>pointer</b> argument is set to NULL if there are
+no more key/data pairs in the returned set.
+</dl>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/dbt_class.html b/libdb/docs/api_cxx/dbt_class.html
new file mode 100644
index 0000000..91ea90d
--- /dev/null
+++ b/libdb/docs/api_cxx/dbt_class.html
@@ -0,0 +1,217 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Dbt</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Dbt</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class Dbt {
+public:
+ Dbt(void *data, size_t size);
+ Dbt();
+ Dbt(const Dbt &);
+ Dbt &operator = (const Dbt &);
+ ~Dbt();
+<p>
+ void *get_data() const;
+ void set_data(void *);
+<p>
+ u_int32_t get_size() const;
+ void set_size(u_int32_t);
+<p>
+ u_int32_t get_ulen() const;
+ void set_ulen(u_int32_t);
+<p>
+ u_int32_t get_dlen() const;
+ void set_dlen(u_int32_t);
+<p>
+ u_int32_t get_doff() const;
+ void set_doff(u_int32_t);
+<p>
+ u_int32_t get_flags() const;
+ void set_flags(u_int32_t);
+<p>
+ DBT *Dbt::get_DBT();
+ const DBT *Dbt::get_const_DBT() const;
+ static Dbt *Dbt::get_Dbt(DBT *dbt);
+ static const Dbt *Dbt::get_const_Dbt(const DBT *dbt);
+};
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the specific details of the Dbt class,
+used to encode keys and data items in a database.
+<a name="3"><!--meow--></a>
+<h3>Key/Data Pairs</h3>
+<p>Storage and retrieval for the <a href="../api_cxx/db_class.html">Db</a> access methods are based on
+key/data pairs. Both key and data items are represented by Dbt
+objects. Key and data byte strings may refer to strings of zero length
+up to strings of essentially unlimited length. See
+<a href="../ref/am_misc/dbsizes.html">Database limits</a> for more
+information.
+<p>The Dbt class provides simple access to an underlying data
+structure, whose elements can be examined or changed using the
+<b>set_</b> or <b>get_</b> methods. The remainder of the manual
+page sometimes refers to these accesses using the underlying name; for
+example, <b>ulen</b> rather than Dbt::get_ulen and
+Dbt::set_ulen. Dbt can be subclassed, providing a way
+to associate with it additional data or references to other
+structures.
+<p>The constructors set all elements of the underlying structure to zero.
+The constructor with two arguments has the effect of setting all elements
+to zero except for the <b>data</b> and <b>size</b> elements.
+<p>In the case in which the <b>flags</b> structure element is set to 0, when
+the application is providing Berkeley DB a key or data item to store into the
+database, Berkeley DB expects the <b>data</b> object to point to a byte
+string of <b>size</b> bytes. When returning a key/data item to the
+application, Berkeley DB will store into the <b>data</b> object a pointer to
+a byte string of <b>size</b> bytes, and the memory to which the pointer
+refers will be allocated and managed by Berkeley DB.
+<p>Access to Dbt objects is not re-entrant. In particular, if
+multiple threads simultaneously access the same Dbt object using
+<a href="../api_cxx/db_class.html">Db</a> API calls, the results are undefined, and may result in a
+crash. One easy way to avoid problems is to use Dbt objects
+that are
+constructed as stack variables.
+<p>The elements of the structure underlying the Dbt class are defined as follows:
+<p><dl compact>
+<p><dt>void *<a name="data">data</a>;<dd>A pointer to a byte string.
+This element is accessed using Dbt::get_data and
+Dbt::set_data, and may be initialized using one
+of the constructors.
+<p><dt>u_int32_t size;<dd>The length of <b>data</b>, in bytes.
+This element is accessed using Dbt::get_size and
+Dbt::set_size, and may be initialized
+using the constructor with two arguments.
+<p><dt>u_int32_t ulen;<dd>The size of the user's buffer (referred to by <b>data</b>), in bytes.
+This location is not written by the <a href="../api_cxx/db_class.html">Db</a> methods.
+<p>Note that applications can determine the length of a record by setting
+the <b>ulen</b> to 0 and checking the return value found in <b>size</b>.
+See the DB_DBT_USERMEM flag for more information.
+<p>This element is accessed using
+Dbt::get_ulen and Dbt::set_ulen.
+<p><dt>u_int32_t dlen;<dd>The length of the partial record being read or written by the application,
+in bytes.
+See the DB_DBT_PARTIAL flag for more information.
+This element is accessed using
+Dbt::get_dlen, and Dbt::set_dlen.
+<p><dt>u_int32_t doff;<dd>The offset of the partial record being read or written by the application,
+in bytes.
+See the DB_DBT_PARTIAL flag for more information.
+This element is accessed using
+Dbt::get_doff and Dbt::set_doff.
+<p><dt>u_int32_t flags;<dd>This element is accessed using Dbt::get_flags and
+Dbt::set_flags.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_DBT_MALLOC">DB_DBT_MALLOC</a><dd>When this flag is set, Berkeley DB will allocate memory for the returned key
+or data item
+(using <b>malloc</b>(3) or the user-specified malloc method), and
+return a pointer to it in the <b>data</b> field of the key or data
+Dbt object. Because any allocated memory becomes the
+responsibility of the calling application, the caller must determine
+whether memory was allocated using the returned value of the
+<b>data</b> field.
+<p>It is an error to specify more than one of DB_DBT_MALLOC,
+DB_DBT_REALLOC, and DB_DBT_USERMEM.
+<p><dt><a name="DB_DBT_REALLOC">DB_DBT_REALLOC</a><dd>When this flag is set Berkeley DB
+will allocate memory for the returned key or data item (using
+<b>realloc</b>(3) or the user-specified realloc method), and return
+a pointer to it in the <b>data</b> field of the key or data Dbt
+object. Because any allocated memory becomes the responsibility of the
+calling application, the caller must determine whether memory was
+allocated using the returned value of the <b>data</b> field.
+<p>It is an error to specify more than one of DB_DBT_MALLOC,
+DB_DBT_REALLOC, and DB_DBT_USERMEM.
+<p><dt><a name="DB_DBT_USERMEM">DB_DBT_USERMEM</a><dd>The <b>data</b> field of the key or data object must refer to memory
+that is at least <b>ulen</b> bytes in length. If the length of the
+requested item is less than or equal to that number of bytes, the item
+is copied into the memory referred to by the <b>data</b> field.
+Otherwise, the <b>size</b> field is set to the length needed for the
+requested item, and the error ENOMEM is returned.
+<p>It is an error to specify more than one of DB_DBT_MALLOC,
+DB_DBT_REALLOC, and DB_DBT_USERMEM.
+</dl>
+<p>If DB_DBT_MALLOC or DB_DBT_REALLOC is specified, Berkeley DB
+allocates a properly sized byte array to contain the data. This can be
+convenient if you know little about the nature of the data, specifically
+the size of data in the database. However, if your application makes
+repeated calls to retrieve keys or data, you may notice increased garbage
+collection due to this allocation. If you know the maximum size of data
+you are retrieving, you might decrease the memory burden and speed your
+application by allocating your own byte array and using
+DB_DBT_USERMEM. Even if you don't know the maximum size, you can
+use this option and reallocate your array whenever your retrieval API call
+returns an ENOMEM error or throws an exception encapsulating an ENOMEM.
+<p><dl compact>
+<p><dt><a name="DB_DBT_PARTIAL">DB_DBT_PARTIAL</a><dd>Do partial retrieval or storage of an item. If the calling application
+is doing a get, the <b>dlen</b> bytes starting <b>doff</b> bytes from
+the beginning of the retrieved data record are returned as if they
+comprised the entire record. If any or all of the specified bytes do
+not exist in the record, the get is successful, and any existing bytes
+are returned.
+<p>For example, if the data portion of a retrieved record was 100 bytes,
+and a partial retrieval was done using a Dbt having a <b>dlen</b>
+field of 20 and a <b>doff</b> field of 85, the get call would succeed,
+the <b>data</b> field would refer to the last 15 bytes of the record,
+and the <b>size</b> field would be set to 15.
+<p>If the calling application is doing a put, the <b>dlen</b> bytes starting
+<b>doff</b> bytes from the beginning of the specified key's data record
+are replaced by the data specified by the <b>data</b> and <b>size</b>
+objects.
+If <b>dlen</b> is smaller than <b>size</b>, the record will grow; if
+<b>dlen</b> is larger than <b>size</b>, the record will shrink.
+If the specified bytes do not exist, the record will be extended using nul
+bytes as necessary, and the put call will succeed.
+<p>It is an error to attempt a partial put using the <a href="../api_cxx/db_put.html">Db::put</a>
+method in a database that supports duplicate records.
+Partial puts in databases supporting duplicate records must be done
+using a <a href="../api_cxx/dbc_class.html">Dbc</a> method.
+<p>It is an error to attempt a partial put with differing <b>dlen</b> and
+<b>size</b> values in Queue or Recno databases with fixed-length records.
+<p>For example, if the data portion of a retrieved record was 100 bytes,
+and a partial put was done using a Dbt having a <b>dlen</b>
+field of 20, a <b>doff</b> field of 85, and a <b>size</b> field of 30,
+the resulting record would be 115 bytes in length, where the last 30
+bytes would be those specified by the put call.
+</dl>
+</dl>
+<p>Each Dbt object has an associated DBT struct, which is used by
+the underlying implementation of Berkeley DB and its C-language API. The
+Dbt::get_DBT method returns a pointer to this struct. Given a const
+Dbt object, Dbt::get_const_DBT returns a const pointer to the
+same struct.
+<p>Given a DBT struct, the Dbt::get_Dbt method returns the corresponding
+Dbt object, if there is one. If the DBT object was not
+associated with a Dbt (that is, it was not returned from a call
+to Dbt::get_DBT), then the result of Dbt::get_Dbt is undefined. Given
+a const DBT struct, Dbt::get_const_Dbt returns the associated const
+Dbt object, if there is one.
+<p>These methods may be useful for Berkeley DB applications including both C
+and C++ language software. It should not be necessary to use these
+calls in a purely C++ application.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/deadlock_class.html b/libdb/docs/api_cxx/deadlock_class.html
new file mode 100644
index 0000000..0ca1198
--- /dev/null
+++ b/libdb/docs/api_cxx/deadlock_class.html
@@ -0,0 +1,40 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbDeadlockException</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbDeadlockException</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class DbDeadlockException : public DbException { ... };
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the DbDeadlockException class and
+how it is used by the various Db* classes.
+<p>A DbDeadlockException is thrown when multiple threads competing
+for a lock are deadlocked. One of the threads' transactions is selected
+for termination, and a DbDeadlockException is thrown to that thread.
+<p>See <a href="../api_cxx/env_set_lk_detect.html">DbEnv::set_lk_detect</a> for more information.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_class.html b/libdb/docs/api_cxx/env_class.html
new file mode 100644
index 0000000..82d4af9
--- /dev/null
+++ b/libdb/docs/api_cxx/env_class.html
@@ -0,0 +1,96 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class DbEnv {
+public:
+ DbEnv(u_int32 flags);
+ ~DbEnv();
+<p>
+ DB_ENV *DbEnv::get_DB_ENV();
+ const DB_ENV *DbEnv::get_const_DB_ENV() const;
+ static DbEnv *DbEnv::get_DbEnv(DB_ENV *dbenv);
+ static const DbEnv *DbEnv::get_const_DbEnv(const DB_ENV *dbenv);
+ ...
+};
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv object is the handle for a Berkeley DB environment -- a
+collection including support for some or all of caching, locking,
+logging and transaction subsystems, as well as databases and log files.
+Methods off the DbEnv handle are used to configure the
+environment as well as to operate on subsystems and databases in the
+environment.
+<p>DbEnv handles are free-threaded if the <a href="../api_cxx/env_open.html#DB_THREAD">DB_THREAD</a> flag
+is specified to the <a href="../api_cxx/env_open.html">DbEnv::open</a> method when the environment is opened.
+The DbEnv handle should not be closed while any other handle
+remains open that is using it as a reference (for example, <a href="../api_cxx/db_class.html">Db</a>
+or <a href="../api_cxx/txn_class.html">DbTxn</a>). Once either the <a href="../api_cxx/env_close.html">DbEnv::close</a> or
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a> methods are called, the handle may not be accessed again,
+regardless of the method's return.
+<p>The constructor creates the DbEnv object. The constructor
+allocates memory internally; calling the <a href="../api_cxx/env_close.html">DbEnv::close</a> or
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a> methods will free that memory.
+<p>The following <b>flags</b> value may be specified:
+<p><dl compact>
+<p><dt><a name="DB_CLIENT">DB_CLIENT</a><dd>Create a client environment to connect to a server.
+<p>The DB_CLIENT flag indicates to the system that this environment
+is remote on a server. The use of this flag causes the environment
+methods to use functions that call a server instead of local functions.
+Prior to making any environment or database method calls, the
+application must call the <a href="../api_cxx/env_set_rpc_server.html">DbEnv::set_rpc_server</a> method to
+establish the connection to the server.
+<p><dt><a name="DB_CXX_NO_EXCEPTIONS">DB_CXX_NO_EXCEPTIONS</a><dd>The Berkeley DB C++ API supports two different error behaviors. By default,
+whenever an error occurs, an exception is thrown that encapsulates the
+error information. This generally allows for cleaner logic for
+transaction processing because a try block can surround a single
+transaction. However, if DB_CXX_NO_EXCEPTIONS is specified,
+exceptions are not thrown; instead, each individual function returns an
+error code.
+</dl>
+<p>Each DbEnv object has an associated DB_ENV structure,
+which is used by the underlying implementation of Berkeley DB and its
+C-language API. The DbEnv::get_DB_ENV method returns a pointer to this
+struct. Given a const DbEnv object, DbEnv::get_const_DB_ENV
+returns a const pointer to the same struct.
+<p>Given a DB_ENV struct, the DbEnv::get_DbEnv method returns the
+corresponding DbEnv object, if there is one. If the
+DB_ENV object was not associated with a DbEnv (that is,
+it was not returned from a call to DbEnv::get_DB_ENV), then the result
+of DbEnv::get_DbEnv is undefined. Given a const DB_ENV struct,
+DbEnv::get_const_Db_Env returns the associated const DbEnv
+object, if there is one.
+<p>These methods may be useful for Berkeley DB applications including both C
+and C++ language software. It should not be necessary to use these
+calls in a purely C++ application.
+<h1>Class</h1>
+DbEnv
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_close.html b/libdb/docs/api_cxx/env_close.html
new file mode 100644
index 0000000..38b2970
--- /dev/null
+++ b/libdb/docs/api_cxx/env_close.html
@@ -0,0 +1,78 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::close</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+DbEnv::close(u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::close method closes the Berkeley DB environment, freeing any
+allocated resources and closing any underlying subsystems.
+<p>Calling DbEnv::close does not imply closing any databases that
+were opened in the environment, and all databases opened in the
+environment should be closed before the environment is closed.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>Where the environment was initialized with the <a href="../api_cxx/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a> flag,
+calling DbEnv::close does not release any locks still held by the
+closing process, providing functionality for long-lived locks.
+Processes that want to have all their locks
+released can do so by issuing the appropriate <a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a> call.
+<p>Where the environment was initialized with the <a href="../api_cxx/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a>
+flag, calling DbEnv::close implies calls to <a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a> for
+any remaining open files in the memory pool that were returned to this
+process by calls to <a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>. It does not imply a call to
+<a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a> for those files.
+<p>Where the environment was initialized with the <a href="../api_cxx/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a> flag,
+calling DbEnv::close aborts any unresolved transactions.
+Applications should not depend on this behavior for transactions
+involving Berkeley DB databases; all such transactions should be explicitly
+resolved. The problem with depending on this semantic is that aborting
+an unresolved transaction involving database operations requires a
+database handle. Because the database handles should have been closed before
+calling DbEnv::close, it will not be possible to abort the
+transaction, and recovery will have to be run on the Berkeley DB environment
+before further operations are done.
+<p>Where log cursors were created using the <a href="../api_cxx/log_cursor.html">DbEnv::log_cursor</a> method, calling
+DbEnv::close does not imply closing those cursors.
+<p>In multithreaded applications, only a single thread may call
+DbEnv::close.
+<p>After DbEnv::close has been called, regardless of its return, the
+Berkeley DB environment handle may not be accessed again.
+<p>The DbEnv::close method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::close method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::close method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_dbremove.html b/libdb/docs/api_cxx/env_dbremove.html
new file mode 100644
index 0000000..885276d
--- /dev/null
+++ b/libdb/docs/api_cxx/env_dbremove.html
@@ -0,0 +1,87 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::dbremove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::dbremove</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::dbremove(DbTxn *txnid,
+ const char *file, const char *database, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::dbremove method removes the database specified by the
+<b>file</b> and <b>database</b> arguments. If no <b>database</b> is
+specified, the underlying file represented by <b>file</b> is removed,
+incidentally removing all databases that it contained.
+<p>Applications should never remove databases with open <a href="../api_cxx/db_class.html">Db</a> handles,
+or in the case of removing a file, when any database in the file has an
+open handle. For example, some architectures do not permit the removal
+of files with open system handles. On these architectures, attempts to
+remove databases currently in use by any thread of control in the system
+will fail.
+<p>If the operation is to be transaction-protected, the <b>txnid</b>
+parameter is a transaction handle returned from <a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>;
+otherwise, NULL.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_AUTO_COMMIT">DB_AUTO_COMMIT</a><dd>Enclose the DbEnv::dbremove call within a transaction. If the call succeeds,
+changes made by the operation will be recoverable. If the call fails,
+the operation will have made no changes.
+</dl>
+<p>The DbEnv::dbremove method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>The
+environment variable <b>DB_HOME</b> may be used as the path of the
+database environment home.
+<p>DbEnv::dbremove is affected by any database directory specified using the
+<a href="../api_cxx/env_set_data_dir.html">DbEnv::set_data_dir</a> method, or by setting the "set_data_dir" string
+in the environment's <b>DB_CONFIG</b> file.
+</dl>
+<h1>Errors</h1>
+<p>The DbEnv::dbremove method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A database in the file is currently open.
+<p>Called before <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<p>If the file or directory does not exist, the DbEnv::dbremove method will
+fail and
+and either return ENOENT or
+throw a FileNotFoundException exception.
+<p>The DbEnv::dbremove method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::dbremove method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_dbrename.html b/libdb/docs/api_cxx/env_dbrename.html
new file mode 100644
index 0000000..0aa9db0
--- /dev/null
+++ b/libdb/docs/api_cxx/env_dbrename.html
@@ -0,0 +1,89 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::dbrename</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::dbrename</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::dbrename(DbTxn *txnid, const char *file,
+ const char *database, const char *newname, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::dbrename method renames the database specified by the
+<b>file</b> and <b>database</b> arguments to <b>newname</b>. If no
+<b>database</b> is specified, the underlying file represented by
+<b>file</b> is renamed, incidentally renaming all databases that it
+contained.
+<p>Applications should not rename databases that are currently in use. If
+an underlying file is being renamed and logging is currently enabled in
+the database environment, no database in the file may be open when the
+DbEnv::dbrename method is called. In particular, some architectures do
+not permit renaming files with open handles. On these architectures,
+attempts to rename databases that are currently in use by any thread of
+control in the system will fail.
+<p>If the operation is to be transaction-protected, the <b>txnid</b>
+parameter is a transaction handle returned from <a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>;
+otherwise, NULL.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_AUTO_COMMIT">DB_AUTO_COMMIT</a><dd>Enclose the DbEnv::dbrename call within a transaction. If the call succeeds,
+changes made by the operation will be recoverable. If the call fails,
+the operation will have made no changes.
+</dl>
+<p>The DbEnv::dbrename method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>The
+environment variable <b>DB_HOME</b> may be used as the path of the
+database environment home.
+<p>DbEnv::dbrename is affected by any database directory specified using the
+<a href="../api_cxx/env_set_data_dir.html">DbEnv::set_data_dir</a> method, or by setting the "set_data_dir" string
+in the environment's <b>DB_CONFIG</b> file.
+</dl>
+<h1>Errors</h1>
+<p>The DbEnv::dbrename method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A database in the file is currently open.
+<p>Called before <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<p>If the file or directory does not exist, the DbEnv::dbrename method will
+fail and
+and either return ENOENT or
+throw a FileNotFoundException exception.
+<p>The DbEnv::dbrename method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::dbrename method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_err.html b/libdb/docs/api_cxx/env_err.html
new file mode 100644
index 0000000..d082009
--- /dev/null
+++ b/libdb/docs/api_cxx/env_err.html
@@ -0,0 +1,77 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::err</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::err</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+DbEnv::err(int error, const char *fmt, ...);
+<p>
+DbEnv::errx(const char *fmt, ...);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::err, DbEnv::errx, <a href="../api_cxx/db_err.html">Db::err</a> and
+<a href="../api_cxx/db_err.html">Db::errx</a> methods provide error-messaging functionality for
+applications written using the Berkeley DB library.
+<p>The DbEnv::err method constructs an error message consisting of the
+following elements:
+<p><blockquote><p><dl compact>
+<p><dt>An optional prefix string<dd>If no error callback method has been set using the
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a> method, any prefix string specified using the
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a> method, followed by two separating characters: a colon
+and a &lt;space&gt; character.
+<p><dt>An optional printf-style message<dd>The supplied message <b>fmt</b>, if non-NULL, in which the
+ANSI C X3.159-1989 (ANSI C) printf function specifies how subsequent arguments
+are converted for output.
+<p><dt>A separator<dd>Two separating characters: a colon and a &lt;space&gt; character.
+<p><dt>A standard error string<dd>The standard system or Berkeley DB library error string associated with the
+<b>error</b> value, as returned by the <a href="../api_cxx/env_strerror.html">DbEnv::strerror</a> method.
+</dl>
+</blockquote>
+<p>This constructed error message is then handled as follows:
+<p><blockquote>
+<p>If an error callback method has been set (see <a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>
+and <a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>), that method is called with two
+arguments: any prefix string specified (see <a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a> and
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>) and the error message.
+<p>If a C library FILE * has been set (see <a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a> and
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>), the error message is written to that output
+stream.
+<p>If a C++ ostream has been set
+(see <a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a> and <a href="../api_cxx/db_set_error_stream.html">Db::set_error_stream</a>),
+the error message is written to that stream.
+<p>If none of these output options has been configured, the error message
+is written to stderr, the standard
+error output stream.</blockquote>
+<p>The DbEnv::errx and <a href="../api_cxx/db_err.html">Db::errx</a> methods perform identically to the
+DbEnv::err and <a href="../api_cxx/db_err.html">Db::err</a> methods, except that they do not append
+the final separator characters and standard error string to the error
+message.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_list.html b/libdb/docs/api_cxx/env_list.html
new file mode 100644
index 0000000..e29234c
--- /dev/null
+++ b/libdb/docs/api_cxx/env_list.html
@@ -0,0 +1,82 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Database Environments and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Database Environments and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Database Environments and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_cxx/env_close.html">DbEnv::close</a></td><td>Close an environment</td></tr>
+<tr><td><a href="../api_cxx/env_dbremove.html">DbEnv::dbremove</a></td><td>Remove a database</td></tr>
+<tr><td><a href="../api_cxx/env_dbrename.html">DbEnv::dbrename</a></td><td>Rename a database</td></tr>
+<tr><td><a href="../api_cxx/env_err.html">DbEnv::err</a></td><td>Error message with error string</td></tr>
+<tr><td><a href="../api_cxx/env_err.html">DbEnv::errx</a></td><td>Error message</td></tr>
+<tr><td><a href="../api_cxx/lock_detect.html">DbEnv::lock_detect</a></td><td>Perform deadlock detection</td></tr>
+<tr><td><a href="../api_cxx/lock_get.html">DbEnv::lock_get</a></td><td>Acquire a lock</td></tr>
+<tr><td><a href="../api_cxx/lock_id.html">DbEnv::lock_id</a></td><td>Acquire a locker ID</td></tr>
+<tr><td><a href="../api_cxx/lock_id_free.html">DbEnv::lock_id_free</a></td><td>Release a locker ID</td></tr>
+<tr><td><a href="../api_cxx/lock_put.html">DbEnv::lock_put</a></td><td>Release a lock</td></tr>
+<tr><td><a href="../api_cxx/lock_stat.html">DbEnv::lock_stat</a></td><td>Return lock subsystem statistics</td></tr>
+<tr><td><a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a></td><td>Acquire/release locks</td></tr>
+<tr><td><a href="../api_cxx/log_archive.html">DbEnv::log_archive</a></td><td>List log and database files</td></tr>
+<tr><td><a href="../api_cxx/log_file.html">DbEnv::log_file</a></td><td>Map Log Sequence Numbers to log files</td></tr>
+<tr><td><a href="../api_cxx/log_flush.html">DbEnv::log_flush</a></td><td>Flush log records</td></tr>
+<tr><td><a href="../api_cxx/log_put.html">DbEnv::log_put</a></td><td>Write a log record</td></tr>
+<tr><td><a href="../api_cxx/log_stat.html">DbEnv::log_stat</a></td><td>Return log subsystem statistics</td></tr>
+<tr><td><a href="../api_cxx/memp_register.html">DbEnv::memp_register</a></td><td>Register input/output functions for a file in a memory pool</td></tr>
+<tr><td><a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a></td><td>Return memory pool statistics</td></tr>
+<tr><td><a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a></td><td>Flush pages from a memory pool</td></tr>
+<tr><td><a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a></td><td>Trickle flush pages from a memory pool</td></tr>
+<tr><td><a href="../api_cxx/env_open.html">DbEnv::open</a></td><td>Open an environment</td></tr>
+<tr><td><a href="../api_cxx/env_remove.html">DbEnv::remove</a></td><td>Remove an environment</td></tr>
+<tr><td><a href="../api_cxx/rep_elect.html">DbEnv::rep_elect</a></td><td>Hold a replication election</td></tr>
+<tr><td><a href="../api_cxx/rep_message.html">DbEnv::rep_process_message</a></td><td>Process a replication message</td></tr>
+<tr><td><a href="../api_cxx/rep_start.html">DbEnv::rep_start</a></td><td>Configure an environment for replication</td></tr>
+<tr><td><a href="../api_cxx/rep_stat.html">DbEnv::rep_stat</a></td><td>Replication statistics</td></tr>
+<tr><td><a href="../api_cxx/env_set_alloc.html">DbEnv::set_alloc</a></td><td>Set local space allocation functions</td></tr>
+<tr><td><a href="../api_cxx/env_set_app_dispatch.html">DbEnv::set_app_dispatch</a></td><td>Configure application recovery interface</td></tr>
+<tr><td><a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a></td><td>Set the environment cache size</td></tr>
+<tr><td><a href="../api_cxx/env_set_data_dir.html">DbEnv::set_data_dir</a></td><td>Set the environment data directory</td></tr>
+<tr><td><a href="../api_cxx/env_set_encrypt.html">DbEnv::set_encrypt</a></td><td>Set the environment cryptographic key</td></tr>
+<tr><td><a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a></td><td>Set error message FILE</td></tr>
+<tr><td><a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a></td><td>Set error message output stream</td></tr>
+<tr><td><a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><a href="../api_cxx/env_set_feedback.html">DbEnv::set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a></td><td>Environment configuration</td></tr>
+<tr><td><a href="../api_cxx/env_set_lg_bsize.html">DbEnv::set_lg_bsize</a></td><td>Set log buffer size</td></tr>
+<tr><td><a href="../api_cxx/env_set_lg_dir.html">DbEnv::set_lg_dir</a></td><td>Set the environment logging directory</td></tr>
+<tr><td><a href="../api_cxx/env_set_lg_max.html">DbEnv::set_lg_max</a></td><td>Set log file size</td></tr>
+<tr><td><a href="../api_cxx/env_set_lg_regionmax.html">DbEnv::set_lg_regionmax</a></td><td>Set logging region size</td></tr>
+<tr><td><a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a></td><td>Set lock conflicts matrix</td></tr>
+<tr><td><a href="../api_cxx/env_set_lk_detect.html">DbEnv::set_lk_detect</a></td><td>Set automatic deadlock detection</td></tr>
+<tr><td><a href="../api_cxx/env_set_lk_max_lockers.html">DbEnv::set_lk_max_lockers</a></td><td>Set maximum number of lockers</td></tr>
+<tr><td><a href="../api_cxx/env_set_lk_max_locks.html">DbEnv::set_lk_max_locks</a></td><td>Set maximum number of locks</td></tr>
+<tr><td><a href="../api_cxx/env_set_lk_max_objects.html">DbEnv::set_lk_max_objects</a></td><td>Set maximum number of lock objects</td></tr>
+<tr><td><a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a></td><td>Set maximum mapped-in database file size</td></tr>
+<tr><td><a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a></td><td>Set panic callback</td></tr>
+<tr><td><a href="../api_cxx/rep_limit.html">DbEnv::set_rep_limit</a></td><td>Limit data sent in response to a single message</td></tr>
+<tr><td><a href="../api_cxx/rep_transport.html">DbEnv::set_rep_transport</a></td><td>Configure replication transport</td></tr>
+<tr><td><a href="../api_cxx/env_set_rpc_server.html">DbEnv::set_rpc_server</a></td><td>Establish an RPC server connection</td></tr>
+<tr><td><a href="../api_cxx/env_set_shm_key.html">DbEnv::set_shm_key</a></td><td>Set system memory shared segment ID</td></tr>
+<tr><td><a href="../api_cxx/env_set_tas_spins.html">DbEnv::set_tas_spins</a></td><td>Set the number of test-and-set spins</td></tr>
+<tr><td><a href="../api_cxx/env_set_timeout.html">DbEnv::set_timeout</a></td><td>Set lock and transaction timeout</td></tr>
+<tr><td><a href="../api_cxx/env_set_tmp_dir.html">DbEnv::set_tmp_dir</a></td><td>Set the environment temporary file directory</td></tr>
+<tr><td><a href="../api_cxx/env_set_tx_max.html">DbEnv::set_tx_max</a></td><td>Set maximum number of transactions</td></tr>
+<tr><td><a href="../api_cxx/env_set_tx_timestamp.html">DbEnv::set_tx_timestamp</a></td><td>Set recovery timestamp</td></tr>
+<tr><td><a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a></td><td>Set verbose messages</td></tr>
+<tr><td><a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a></td><td>Begin a transaction</td></tr>
+<tr><td><a href="../api_cxx/txn_checkpoint.html">DbEnv::txn_checkpoint</a></td><td>Checkpoint the transaction subsystem</td></tr>
+<tr><td><a href="../api_cxx/txn_recover.html">DbEnv::txn_recover</a></td><td>Distributed transaction recovery</td></tr>
+<tr><td><a href="../api_cxx/txn_stat.html">DbEnv::txn_stat</a></td><td>Return transaction subsystem statistics</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_open.html b/libdb/docs/api_cxx/env_open.html
new file mode 100644
index 0000000..7dd9cf3
--- /dev/null
+++ b/libdb/docs/api_cxx/env_open.html
@@ -0,0 +1,194 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::open</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::open(const char *db_home, u_int32_t flags, int mode);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::open method is the interface for opening the Berkeley DB
+environment. It provides a structure for creating a consistent
+environment for processes using one or more of the features of Berkeley DB.
+<p>The <b>db_home</b> argument to DbEnv::open (and filename
+resolution in general) is described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p>The <b>flags</b> argument specifies the subsystems that are initialized
+and how the application's environment affects Berkeley DB file naming, among
+other things.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p>Because there are a large number of flags that can be specified, they
+have been grouped together by functionality. The first group of flags
+indicates which of the Berkeley DB subsystems should be initialized:
+<p><dl compact>
+<p><dt><a name="DB_JOINENV">DB_JOINENV</a><dd>Join an existing environment. This option allows applications to
+join an existing environment without knowing which Berkeley DB subsystems
+the environment supports.
+<p><dt><a name="DB_INIT_CDB">DB_INIT_CDB</a><dd>Initialize locking for the <a href="../ref/cam/intro.html">Berkeley DB Concurrent Data Store</a>
+product. In this mode, Berkeley DB provides multiple reader/single writer
+access. The only other subsystem that should be specified with the
+DB_INIT_CDB flag is DB_INIT_MPOOL.
+<p><dt><a name="DB_INIT_LOCK">DB_INIT_LOCK</a><dd>Initialize the locking subsystem. This subsystem should be used when
+multiple processes or threads are going to be reading and writing a
+Berkeley DB database, so that they do not interfere with each other. If all
+threads are accessing the database(s) read-only, locking is unnecessary.
+When the DB_INIT_LOCK flag is specified, it is usually necessary
+to run a deadlock detector, as well. See <a href="../utility/db_deadlock.html">db_deadlock</a> and
+<a href="../api_cxx/lock_detect.html">DbEnv::lock_detect</a> for more information.
+<p><dt><a name="DB_INIT_LOG">DB_INIT_LOG</a><dd>Initialize the logging subsystem. This subsystem should be used when
+recovery from application or system failure is necessary. If the log
+region is being created and log files are already present, the log files
+are reviewed; subsequent log writes are appended to the end of the log,
+rather than overwriting current log entries.
+<p><dt><a name="DB_INIT_MPOOL">DB_INIT_MPOOL</a><dd>Initialize the shared memory buffer pool subsystem. This subsystem
+should be used whenever an application is using any Berkeley DB access
+method.
+<p><dt><a name="DB_INIT_TXN">DB_INIT_TXN</a><dd>Initialize the transaction subsystem. This subsystem should be used
+when recovery and atomicity of multiple operations are important. The
+DB_INIT_TXN flag implies the DB_INIT_LOG flag.
+</dl>
+<p>The second group of flags govern what recovery, if any, is performed when
+the environment is initialized:
+<p><dl compact>
+<p><dt><a name="DB_RECOVER">DB_RECOVER</a><dd>Run normal recovery on this environment before opening it for normal
+use. If this flag is set, the DB_CREATE flag must also be set
+because the regions will be removed and re-created.
+<p><dt><a name="DB_RECOVER_FATAL">DB_RECOVER_FATAL</a><dd>Run catastrophic recovery on this environment before opening it for
+normal use. If this flag is set, the DB_CREATE flag must also
+be set because the regions will be removed and re-created.
+</dl>
+<p>A standard part of the recovery process is to remove the existing Berkeley DB
+environment and create a new one in which to perform recovery. If the
+thread of control performing recovery does not specify the correct
+region initialization information (for example, the correct memory pool
+cache size), the result can be an application running in an environment
+with incorrect cache and other subsystem sizes. For this reason, the
+thread of control performing recovery should specify correct
+configuration information before calling the DbEnv::open method; or it
+should remove the environment after recovery is completed, leaving
+creation of the correctly sized environment to a subsequent call to
+DbEnv::open.
+<p>All Berkeley DB recovery processing must be single-threaded; that is, only a
+single thread of control may perform recovery or access a Berkeley DB
+environment while recovery is being performed. Because it is not an
+error to specify DB_RECOVER for an environment for which no
+recovery is required, it is reasonable programming practice for the
+thread of control responsible for performing recovery and creating the
+environment to always specify the DB_CREATE and
+DB_RECOVER flags during startup.
+<p>The DbEnv::open function returns successfully if DB_RECOVER
+or DB_RECOVER_FATAL is specified and no log files exist, so it
+is necessary to ensure that all necessary log files are present before
+running recovery. For further information, consult <a href="../utility/db_archive.html">db_archive</a>
+and <a href="../utility/db_recover.html">db_recover</a>.
+<p>The third group of flags govern file-naming extensions in the environment:
+<p><dl compact>
+<a name="3"><!--meow--></a>
+<p><dt><a name="DB_USE_ENVIRON">DB_USE_ENVIRON</a><dd>The Berkeley DB process' environment may be permitted to specify information
+to be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB File Naming</a>. Because permitting users to specify which files
+are used can create security problems, environment information will be
+used in file naming for all users only if the DB_USE_ENVIRON
+flag is set.
+<p><dt><a name="DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a><dd>The Berkeley DB process' environment may be permitted to specify information
+to be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB File Naming</a>. Because permitting users to specify which files
+are used can create security problems, if the
+DB_USE_ENVIRON_ROOT flag is set, environment information will
+be used for file naming only for users with appropriate permissions (for
+example, users with a user-ID of 0 on UNIX systems).
+</dl>
+<p>Finally, there are a few additional unrelated flags:
+<p><dl compact>
+<p><dt><a name="DB_CREATE">DB_CREATE</a><dd>Cause Berkeley DB subsystems to create any underlying files, as necessary.
+<p><dt><a name="DB_LOCKDOWN">DB_LOCKDOWN</a><dd>Lock shared Berkeley DB environment files and memory-mapped databases into
+memory.
+<p><dt><a name="DB_PRIVATE">DB_PRIVATE</a><dd>Specify that the environment will only be accessed by a single process
+(although that process may be multithreaded). This flag has two effects
+on the Berkeley DB environment. First, all underlying data structures are
+allocated from per-process memory instead of from shared memory that is
+potentially accessible to more than a single process. Second, mutexes
+are only configured to work between threads.
+<p>This flag should not be specified if more than a single process is
+accessing the environment because it is likely to cause database
+corruption and unpredictable behavior. For example, if both a server
+application and the Berkeley DB utility <a href="../utility/db_stat.html">db_stat</a> are expected to access
+the environment, the DB_PRIVATE flag should not be
+specified.
+<p><dt><a name="DB_SYSTEM_MEM">DB_SYSTEM_MEM</a><dd>Allocate memory from system shared memory instead of from memory backed
+by the filesystem. See <a href="../ref/env/region.html">Shared Memory
+Regions</a> for more information.
+<p><dt><a name="DB_THREAD">DB_THREAD</a><dd>Cause the <a href="../api_cxx/env_class.html">DbEnv</a> handle returned by DbEnv::open to be
+<i>free-threaded</i>; that is, usable by multiple threads within a
+single address space.
+</dl>
+<p>On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by
+Berkeley DB are created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and modified by the process' umask value at the time of creation
+(see <b>umask</b>(2)). If <b>mode</b> is 0, Berkeley DB will use a default
+mode of readable and writable by both owner and group. On Windows
+systems, the mode argument is ignored. The group ownership of created
+files is based on the system and directory defaults, and is not further
+specified by Berkeley DB.
+<p>The DbEnv::open method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>The environment variable <b>DB_HOME</b> may be used as the path of
+the database home, as described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+</dl>
+<h1>Errors</h1>
+<p>The DbEnv::open method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EAGAIN<dd>The shared memory region was locked and (repeatedly) unavailable.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>
+The DB_THREAD flag was specified and fast mutexes are not
+available for this architecture.
+<p>The DB_HOME or TMPDIR environment variables were set, but empty.
+<p>An incorrectly formatted <b>NAME VALUE</b> entry or line was found.
+</dl>
+<p><dl compact>
+<p><dt>ENOSPC<dd>HP-UX only: a previously created Berkeley DB environment for this process still
+exists.
+</dl>
+<p>If the file or directory does not exist, the DbEnv::open method will
+fail and
+and either return ENOENT or
+throw a FileNotFoundException exception.
+<p>The DbEnv::open method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::open method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_remove.html b/libdb/docs/api_cxx/env_remove.html
new file mode 100644
index 0000000..e3b2c11
--- /dev/null
+++ b/libdb/docs/api_cxx/env_remove.html
@@ -0,0 +1,116 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::remove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::remove</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::remove(const char *db_home, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::remove method destroys a Berkeley DB environment if it is not
+currently in use. The environment regions, including any backing files,
+are removed. Any log or database files and the environment directory are
+not removed.
+<p>The <b>db_home</b> argument to DbEnv::remove is described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p>If there are processes that have called <a href="../api_cxx/env_open.html">DbEnv::open</a> without
+calling <a href="../api_cxx/env_close.html">DbEnv::close</a> (that is, there are processes currently
+using the environment), DbEnv::remove will fail without further
+action unless the <a href="../api_cxx/env_remove.html#DB_FORCE">DB_FORCE</a> flag is set, in which case
+DbEnv::remove will attempt to remove the environment, regardless
+of any processes still using it.
+<p>The result of attempting to forcibly destroy the environment when it is
+in use is unspecified. Processes using an environment often maintain open
+file descriptors for shared regions within it. On UNIX systems, the
+environment removal will usually succeed, and processes that have already
+joined the region will continue to run in that region without change.
+However, processes attempting to join the environment will either fail
+or create new regions. On other systems in which the <b>unlink</b>(2) system call will fail if any process has an open file descriptor for
+the file (for example Windows/NT), the region removal will fail.
+<p>Calling DbEnv::remove should not be necessary for most applications
+because the Berkeley DB environment is cleaned up as part of normal database
+recovery procedures. However, applications may want to call
+DbEnv::remove as part of application shut down to free up system
+resources. For example, if the <a href="../api_cxx/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag was specified
+to <a href="../api_cxx/env_open.html">DbEnv::open</a>, it may be useful to call DbEnv::remove in
+order to release system shared memory segments that have been allocated.
+Or, on architectures in which mutexes require allocation of underlying
+system resources, it may be useful to call DbEnv::remove in order
+to release those resources. Alternatively, if recovery is not required
+because no database state is maintained across failures, and no system
+resources need to be released, it is possible to clean up an environment
+by simply removing all the Berkeley DB files in the database environment's
+directories.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_FORCE">DB_FORCE</a><dd>If the <a href="../api_cxx/env_remove.html#DB_FORCE">DB_FORCE</a> flag is set, the environment is removed, regardless
+of any processes that may still using it, and no locks are acquired
+during this process. (Generally, the <a href="../api_cxx/env_remove.html#DB_FORCE">DB_FORCE</a> flag is
+specified only when applications were unable to shut down cleanly, and there
+is a risk that an application may have died holding a Berkeley DB lock.)
+<a name="3"><!--meow--></a>
+<p><dt><a name="DB_USE_ENVIRON">DB_USE_ENVIRON</a><dd>The Berkeley DB process' environment may be permitted to specify information
+to be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB File Naming</a>. Because permitting users to specify which files
+are used can create security problems, environment information will be
+used in file naming for all users only if the DB_USE_ENVIRON
+flag is set.
+<p><dt><a name="DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a><dd>The Berkeley DB process' environment may be permitted to specify information
+to be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB File Naming</a>. Because permitting users to specify which files
+are used can create security problems, if the
+DB_USE_ENVIRON_ROOT flag is set, environment information will
+be used for file naming only for users with appropriate permissions (for
+example, users with a user-ID of 0 on UNIX systems).
+</dl>
+<p>In multithreaded applications, only a single thread may call
+DbEnv::remove.
+<p>A <a href="../api_cxx/env_class.html">DbEnv</a> handle that has already been used to open an environment
+should not be used to call the DbEnv::remove method; a new
+<a href="../api_cxx/env_class.html">DbEnv</a> handle should be created for that purpose.
+<p>After DbEnv::remove has been called, regardless of its return,
+the Berkeley DB environment handle may not be accessed again.
+<p>The DbEnv::remove method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EBUSY<dd>The shared memory region was in use and the force flag was not set.
+</dl>
+<p>If the file or directory does not exist, the DbEnv::remove method will
+fail and
+and either return ENOENT or
+throw a FileNotFoundException exception.
+<p>The DbEnv::remove method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::remove method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_alloc.html b/libdb/docs/api_cxx/env_set_alloc.html
new file mode 100644
index 0000000..3434093
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_alloc.html
@@ -0,0 +1,91 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_alloc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_alloc</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+extern "C" {
+ typedef void *(*db_malloc_fcn_type)(size_t);
+ typedef void *(*db_realloc_fcn_type)(void *, size_t);
+ typedef void *(*db_free_fcn_type)(void *);
+};
+<p>
+int
+DbEnv::set_alloc(db_malloc_fcn_type app_malloc,
+ db_realloc_fcn_type app_realloc,
+ db_free_fcn_type app_free);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the allocation functions used by the <a href="../api_cxx/env_class.html">DbEnv</a> and <a href="../api_cxx/db_class.html">Db</a>
+methods to allocate or free memory owned by the application.
+<p>There are a number of interfaces in Berkeley DB where memory is allocated by
+the library and then given to the application. For example, the
+<a href="../api_cxx/dbt_class.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a> flag, when specified in the <a href="../api_cxx/dbt_class.html">Dbt</a> object,
+will cause the <a href="../api_cxx/db_class.html">Db</a> methods to allocate and reallocate memory
+which then becomes the responsibility of the calling application. (See
+<a href="../api_cxx/dbt_class.html">Dbt</a> for more information.) Other examples are the Berkeley DB
+interfaces which return statistical information to the application:
+<a href="../api_cxx/db_stat.html">Db::stat</a>, <a href="../api_cxx/lock_stat.html">DbEnv::lock_stat</a>, <a href="../api_cxx/log_archive.html">DbEnv::log_archive</a>,
+<a href="../api_cxx/log_stat.html">DbEnv::log_stat</a>, <a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a>, and <a href="../api_cxx/txn_stat.html">DbEnv::txn_stat</a>. There is
+one interface in the Berkeley DB where memory is allocated by the application
+and then given to the library: <a href="../api_cxx/db_associate.html">Db::associate</a>.
+<p>On systems in which there may be multiple library versions of the
+standard allocation routines (notably Windows NT), transferring memory
+between the library and the application will fail because the Berkeley DB
+library allocates memory from a different heap than the application uses
+to free it. To avoid this problem, the DbEnv::set_alloc and
+<a href="../api_cxx/db_set_alloc.html">Db::set_alloc</a> methods can be used to pass Berkeley DB references to the
+application's allocation routines.
+<p>It is not an error to specify only one or two of the possible allocation
+function arguments to these interfaces; however, in that case the
+specified interfaces must be compatible with the standard library
+interfaces, as they will be used together. The methods specified
+must match the calling conventions of the ANSI C X3.159-1989 (ANSI C) library routines
+of the same name.
+<p>The DbEnv::set_alloc method configures operations performed using the specified
+<a href="../api_cxx/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv::set_alloc interface may not be called after the <a href="../api_cxx/env_open.html">DbEnv::open</a>
+interface is called.
+<p>The DbEnv::set_alloc method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::set_alloc method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<p>The DbEnv::set_alloc method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_alloc method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_app_dispatch.html b/libdb/docs/api_cxx/env_set_app_dispatch.html
new file mode 100644
index 0000000..10d69e5
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_app_dispatch.html
@@ -0,0 +1,96 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_app_dispatch</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_app_dispatch</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_app_dispatch(int (*)(DbEnv *dbenv,
+ Dbt *log_rec, DbLsn *lsn, db_recops op));
+</pre></h3>
+<h1>Description</h1>
+<p>Set the application's method to be called during transaction abort
+and recovery. This method must return 0 on success and either
+<b>errno</b> or a value outside of the Berkeley DB error name space on
+failure. It takes four arguments:
+<p><dl compact>
+<p><dt>dbenv <dd>A Berkeley DB environment.
+<p><dt>log_rec<dd>A log record.
+<p><dt>lsn<dd>A log sequence number.
+<p><dt>op<dd>One of the following values:
+<p><dl compact>
+<p><dt><a name="DB_TXN_BACKWARD_ROLL">DB_TXN_BACKWARD_ROLL</a><dd>The log is being read backward to determine which transactions have been
+committed and to abort those operations that were not; undo the operation
+described by the log record.
+<p><dt><a name="DB_TXN_FORWARD_ROLL">DB_TXN_FORWARD_ROLL</a><dd>The log is being played forward; redo the operation described by the log
+record.
+<p><dt><a name="DB_TXN_ABORT">DB_TXN_ABORT</a><dd>The log is being read backward during a transaction abort; undo the
+operation described by the log record.
+<p><dt><a name="DB_TXN_APPLY">DB_TXN_APPLY</a><dd>The log is being applied on a replica site; redo the operation
+described by the log record.
+<p><dt><a name="DB_TXN_PRINT">DB_TXN_PRINT</a><dd>The log is being printed for debugging purposes; print the contents of
+this log record in the desired format.
+</dl>
+</dl>
+<p>The DB_TXN_FORWARD_ROLL and DB_TXN_APPLY operations
+frequently imply the same actions, redoing changes that appear in the
+log record, although if a recovery function is to be used on a
+replication client where reads may be taking place concurrently with
+the processing of incoming messages, DB_TXN_APPLY operations
+should also perform appropriate locking. The macro DB_REDO(op) checks
+that the operation is one of DB_TXN_FORWARD_ROLL or
+DB_TXN_APPLY, and should be used in the recovery code to refer
+to the conditions under which operations should be redone. Similarly,
+the macro DB_UNDO(op) checks if the operation is one of
+DB_TXN_BACKWARD_ROLL or DB_TXN_ABORT.
+<p>The DbEnv::set_app_dispatch method configures operations performed using the specified
+<a href="../api_cxx/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv::set_app_dispatch interface may not be called after the <a href="../api_cxx/env_open.html">DbEnv::open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_cxx/env_open.html">DbEnv::open</a> is called, the information specified to DbEnv::set_app_dispatch
+must be consistent with the existing environment or corruption can
+occur.
+<p>The DbEnv::set_app_dispatch method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::set_app_dispatch method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<p>The DbEnv::set_app_dispatch method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_app_dispatch method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_cachesize.html b/libdb/docs/api_cxx/env_set_cachesize.html
new file mode 100644
index 0000000..8909250
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_cachesize.html
@@ -0,0 +1,88 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_cachesize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_cachesize</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_cachesize(u_int32_t gbytes, u_int32_t bytes, int ncache);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the shared memory buffer pool -- that is, the cache --
+to <b>gbytes</b> gigabytes plus <b>bytes</b>. The cache should be
+the size of the normal working data set of the application, with some
+small amount of additional memory for unusual situations. (Note: the
+working set is not the same as the number of pages accessed
+simultaneously, and should be quite a bit larger!)
+<p>The default cache size is 256KB, and may not be specified as less than
+20KB. Any cache size less than 500MB is automatically increased by 25%
+to account for buffer pool overhead; cache sizes larger than 500MB are
+used as specified. The current maximum size of a single cache is 4GB.
+For information on tuning the Berkeley DB cache size, see
+<a href="../ref/am_conf/cachesize.html">Selecting a cache size</a>.
+<p>It is possible to specify caches to Berkeley DB that are large enough so that
+they cannot be allocated contiguously on some architectures. For
+example, some releases of Solaris limit the amount of memory that may
+be allocated contiguously by a process. If <b>ncache</b> is 0 or 1,
+the cache will be allocated contiguously in memory. If it is greater
+than 1, the cache will be broken up into <b>ncache</b> equally sized,
+separate pieces of memory.
+<p>The DbEnv::set_cachesize method configures a database environment, not only operations
+performed using the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle.
+<p>The DbEnv::set_cachesize interface may not be called after the <a href="../api_cxx/env_open.html">DbEnv::open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_cxx/env_open.html">DbEnv::open</a> is called, the information specified to DbEnv::set_cachesize
+will be ignored.
+<p>The DbEnv::set_cachesize method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's cache size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_cachesize", one or more whitespace characters,
+and the three arguments specified to this interface, separated by whitespace
+characters, for example, "set_cachesize 1 500 2". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv::set_cachesize method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The specified cache size was impossibly small.
+<p>Called after
+<a href="../api_cxx/env_open.html">DbEnv::open</a>
+was called.
+</dl>
+<p>The DbEnv::set_cachesize method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_cachesize method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_data_dir.html b/libdb/docs/api_cxx/env_set_data_dir.html
new file mode 100644
index 0000000..1c1cb01
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_data_dir.html
@@ -0,0 +1,79 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_data_dir</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_data_dir</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_data_dir(const char *dir);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the path of a directory to be used as the location of the access
+method database files. Paths specified to the <a href="../api_cxx/db_open.html">Db::open</a> function
+will be searched relative to this path. Paths set using this interface
+are additive, and specifying more than one will result in each specified
+directory being searched for database files. If any directories are
+specified, created database files will always be created in the first path
+specified.
+<p>If no database directories are specified, database files can exist only
+in the environment home directory. See <a href="../ref/env/naming.html">Berkeley DB File Naming</a> for more information.
+<p>For the greatest degree of recoverability from system or application
+failure, database files and log files should be located on separate
+physical devices.
+<p>The DbEnv::set_data_dir method configures operations performed using the specified
+<a href="../api_cxx/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv::set_data_dir interface may not be called after the <a href="../api_cxx/env_open.html">DbEnv::open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_cxx/env_open.html">DbEnv::open</a> is called, the information specified to DbEnv::set_data_dir
+must be consistent with the existing environment or corruption can
+occur.
+<p>The DbEnv::set_data_dir method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's data directory may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_data_dir", one or more whitespace characters,
+and the directory name. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv::set_data_dir method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<p>The DbEnv::set_data_dir method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_data_dir method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_encrypt.html b/libdb/docs/api_cxx/env_set_encrypt.html
new file mode 100644
index 0000000..843cced
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_encrypt.html
@@ -0,0 +1,74 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_encrypt</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_encrypt</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_encrypt(const char *passwd, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the password used by the <a href="../api_cxx/env_class.html">DbEnv</a> and <a href="../api_cxx/db_class.html">Db</a> methods to
+perform encryption and decryption.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_ENCRYPT_AES">DB_ENCRYPT_AES</a><dd>Use the Rijndael/AES (also known as the Advanced Encryption Standard
+and Federal Information Processing Standard (FIPS) 197) algorithm for
+encryption or decryption.
+</dl>
+<p>The DbEnv::set_encrypt method configures a database environment, not only operations
+performed using the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle.
+<p>The DbEnv::set_encrypt interface may not be called after the <a href="../api_cxx/env_open.html">DbEnv::open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_cxx/env_open.html">DbEnv::open</a> is called, the information specified to DbEnv::set_encrypt
+must be consistent with the existing environment or an error will be
+returned.
+<p>The DbEnv::set_encrypt method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::set_encrypt method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after
+<a href="../api_cxx/env_open.html">DbEnv::open</a>
+was called.
+</dl>
+<p><dl compact>
+<p><dt>EOPNOTSUPP<dd>Cryptography is not available in this Berkeley DB release.
+</dl>
+<p>The DbEnv::set_encrypt method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_encrypt method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_errcall.html b/libdb/docs/api_cxx/env_set_errcall.html
new file mode 100644
index 0000000..1410245
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_errcall.html
@@ -0,0 +1,63 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_errcall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_errcall</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+void DbEnv::set_errcall(
+ void (*db_errcall_fcn)(const char *errpfx, char *msg));
+</pre></h3>
+<h1>Description</h1>
+When an error occurs in the Berkeley DB library, an exception is thrown or an
+error return value is returned by the method. In some cases,
+however, the <b>errno</b> value may be insufficient to completely
+describe the cause of the error, especially during initial application
+debugging.
+<p>The DbEnv::set_errcall and <a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a> methods are used to
+enhance the mechanism for reporting error messages to the application.
+In some cases, when an error occurs, Berkeley DB will call
+<b>db_errcall_fcn</b> with additional error information. The function
+must be defined with two arguments; the first will be the prefix string
+(as previously set by <a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a> or <a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>);
+the second will be the error message string. It is up to the
+<b>db_errcall_fcn</b> method to display the error message in an
+appropriate manner.
+<p>Alternatively, you can use the <a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a> and
+<a href="../api_cxx/db_set_error_stream.html">Db::set_error_stream</a> methods to display the additional information via
+an output stream, or the <a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a> or
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a> methods to display the additional information via a C
+library FILE *. You should not mix these approaches.
+<p>This error-logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>The DbEnv::set_errcall interface may be called at any time during the life of
+the application.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_errfile.html b/libdb/docs/api_cxx/env_set_errfile.html
new file mode 100644
index 0000000..8b71570
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_errfile.html
@@ -0,0 +1,62 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_errfile</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_errfile</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+void DbEnv::set_errfile(FILE *errfile);
+</pre></h3>
+<h1>Description</h1>
+When an error occurs in the Berkeley DB library, an exception is thrown or an
+error return value is returned by the method. In some cases,
+however, the <b>errno</b> value may be insufficient to completely
+describe the cause of the error, especially during initial application
+debugging.
+<p>The <a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a> and <a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a> methods are used to
+enhance the mechanism for reporting error messages to the application
+by setting a C library FILE * to be used for displaying additional Berkeley DB
+error messages. In some cases, when an error occurs, Berkeley DB will output
+an additional error message to the specified file reference.
+<p>Alternatively, you can use the <a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a> method to display
+the additional information via an output stream, or the
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a> method to capture the additional error information in
+a way that does not use either output streams or C library FILE *'s. You
+should not mix these approaches.
+<p>The error message will consist of the prefix string and a colon
+("<b>:</b>") (if a prefix string was previously specified using
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a> or <a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>), an error string, and
+a trailing &lt;newline&gt; character.
+<p>This error logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>The DbEnv::set_errfile interface may be called at any time during the life of
+the application.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_error_stream.html b/libdb/docs/api_cxx/env_set_error_stream.html
new file mode 100644
index 0000000..d9b0c1c
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_error_stream.html
@@ -0,0 +1,60 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_error_stream</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_error_stream</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+void DbEnv::set_error_stream(class ostream*);
+</pre></h3>
+<h1>Description</h1>
+<p>When an error occurs in the Berkeley DB library, an exception is thrown or an
+<b>errno</b> value is returned by the method. In some cases,
+however, the <b>errno</b> value may be insufficient to completely
+describe the cause of the error, especially during initial application
+debugging.
+<p>The DbEnv::set_error_stream and <a href="../api_cxx/db_set_error_stream.html">Db::set_error_stream</a> methods
+are used to enhance the mechanism for reporting error messages to the
+application by setting the C++ ostream used for displaying additional
+Berkeley DB error messages. In some cases, when an error occurs, Berkeley DB will
+output an additional error message to the specified stream.
+<p>The error message will consist of the prefix string and a colon
+("<b>:</b>") (if a prefix string was previously specified using
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>), an error string, and a trailing
+&lt;newline&gt; character.
+<p>Alternatively, you can use the <a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a> method to display
+the additional information via a C library FILE *, or the
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a> method to capture the additional error information in
+a way that does not use either output streams or C library FILE *'s. You
+should not mix these approaches.
+<p>This error-logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_errpfx.html b/libdb/docs/api_cxx/env_set_errpfx.html
new file mode 100644
index 0000000..ab518e5
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_errpfx.html
@@ -0,0 +1,47 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_errpfx</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_errpfx</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+void DbEnv::set_errpfx(const char *errpfx);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the prefix string that appears before error messages issued by Berkeley DB.
+<p>The <a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a> and DbEnv::set_errpfx methods do not copy
+the memory to which the <b>errpfx</b> argument refers; rather, they
+maintain a reference to it. Although this allows applications to modify
+the error message prefix at any time (without repeatedly calling the
+interfaces), it means the memory must be maintained until the handle is
+closed.
+<p>The DbEnv::set_errpfx interface may be called at any time during the life of
+the application.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_feedback.html b/libdb/docs/api_cxx/env_set_feedback.html
new file mode 100644
index 0000000..2339c35
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_feedback.html
@@ -0,0 +1,58 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_feedback</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_feedback</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_feedback(
+ void (*db_feedback_fcn)(DbEnv *, int opcode, int pct));
+</pre></h3>
+<h1>Description</h1>
+<p>Some operations performed by the Berkeley DB library can take non-trivial
+amounts of time. The DbEnv::set_feedback method can be used by
+applications to monitor progress within these operations.
+<p>When an operation is likely to take a long time, Berkeley DB will call the
+specified callback method. This method must be declared with
+three arguments: the first will be a reference to the enclosing
+environment, the second a flag value, and the third the percent of the
+operation that has been completed, specified as an integer value between
+0 and 100. It is up to the callback method to display this
+information in an appropriate manner.
+<p>The <b>opcode</b> argument may take on any of the following values:
+<p><dl compact>
+<p><dt><a name="DB_RECOVER">DB_RECOVER</a><dd>The environment is being recovered.
+</dl>
+<p>The DbEnv::set_feedback interface may be called at any time during the life of
+the application.
+<p>The DbEnv::set_feedback method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_flags.html b/libdb/docs/api_cxx/env_set_flags.html
new file mode 100644
index 0000000..42c639e
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_flags.html
@@ -0,0 +1,242 @@
+<!--$Id-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_flags</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_flags</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_flags(u_int32_t flags, int onoff);
+</pre></h3>
+<h1>Description</h1>
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+If <b>onoff</b> is
+zero,
+the specified flags are cleared; otherwise they are set.
+<p><dl compact>
+<p><dt><a name="DB_AUTO_COMMIT">DB_AUTO_COMMIT</a><dd>If set, operations for which no explicit transaction handle was
+specified, and which modify databases in the database environment, will
+be automatically enclosed within a transaction. If the call succeeds,
+changes made by the operation will be recoverable. If the call fails,
+the operation will have made no changes.
+<p>Calling DbEnv::set_flags with the <a href="../api_cxx/env_set_flags.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a> flag only affects
+the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_cxx/env_class.html">DbEnv</a>
+handles opened in the environment must either set the <a href="../api_cxx/env_set_flags.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a> flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The <a href="../api_cxx/env_set_flags.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a> flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="3"><!--meow--></a>
+<p><dt><a name="DB_CDB_ALLDB">DB_CDB_ALLDB</a><dd>If set, Berkeley DB Concurrent Data Store applications will perform locking on an environment-wide
+basis rather than on a per-database basis.
+<p>Calling DbEnv::set_flags with the DB_CDB_ALLDB flag only affects
+the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_cxx/env_class.html">DbEnv</a>
+handles opened in the environment must either set the DB_CDB_ALLDB flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The DB_CDB_ALLDB flag may be used to configure Berkeley DB only before the
+<a href="../api_cxx/env_open.html">DbEnv::open</a> interface is called.
+<a name="4"><!--meow--></a>
+<p><dt><a name="DB_DIRECT_DB">DB_DIRECT_DB</a><dd>If set and supported by the system, Berkeley DB will turn off system buffering
+of Berkeley DB database files to avoid double caching.
+<p>Calling DbEnv::set_flags with the DB_DIRECT_DB flag only affects
+the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_cxx/env_class.html">DbEnv</a>
+handles opened in the environment must either set the DB_DIRECT_DB flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The DB_DIRECT_DB flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="5"><!--meow--></a>
+<p><dt><a name="DB_DIRECT_LOG">DB_DIRECT_LOG</a><dd>If set and supported by the system, Berkeley DB will turn off system buffering
+of Berkeley DB log files to avoid double caching.
+<p>Calling DbEnv::set_flags with the DB_DIRECT_LOG flag only affects
+the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_cxx/env_class.html">DbEnv</a>
+handles opened in the environment must either set the DB_DIRECT_LOG flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The DB_DIRECT_LOG flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="6"><!--meow--></a>
+<p><dt><a name="DB_NOLOCKING">DB_NOLOCKING</a><dd>If set, Berkeley DB will grant all requested mutual exclusion mutexes and
+database locks without regard for their actual availability. This
+functionality should never be used for purposes other than debugging.
+<p>Calling DbEnv::set_flags with the DB_NOLOCKING flag only affects
+the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+<p>The DB_NOLOCKING flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="7"><!--meow--></a>
+<p><dt><a name="DB_NOMMAP">DB_NOMMAP</a><dd>If set, Berkeley DB will copy read-only database files into the local cache
+instead of potentially mapping them into process memory (see the
+description of the <a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a> method for further
+information).
+<p>Calling DbEnv::set_flags with the DB_NOMMAP flag only affects
+the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_cxx/env_class.html">DbEnv</a>
+handles opened in the environment must either set the DB_NOMMAP flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The DB_NOMMAP flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="8"><!--meow--></a>
+<p><dt><a name="DB_NOPANIC">DB_NOPANIC</a><dd>If set, Berkeley DB will ignore any panic state in the database environment.
+(Database environments in a panic state normally refuse all attempts to
+call Berkeley DB functions, returning <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>.) This
+functionality should never be used for purposes other than debugging.
+<p>Calling DbEnv::set_flags with the DB_NOPANIC flag only affects
+the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+<p>The DB_NOPANIC flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<p><dt><a name="DB_OVERWRITE">DB_OVERWRITE</a><dd>Overwrite files stored in encrypted formats before deleting them. Berkeley DB
+overwrites files using alternating 0xff, 0x00 and 0xff byte patterns.
+For file overwriting to be effective, the underlying file must be stored
+on a fixed-block filesystem. Systems with journaling or logging filesystems
+will require operating system support and probably modification of the
+Berkeley DB sources.
+<p>Calling DbEnv::set_flags with the DB_OVERWRITE flag only affects
+the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+<p>The DB_OVERWRITE flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="9"><!--meow--></a>
+<p><dt><a name="DB_PANIC_ENVIRONMENT">DB_PANIC_ENVIRONMENT</a><dd>If set, Berkeley DB will set the panic state for the database environment.
+(Database environments in a panic state normally refuse all attempts to
+call Berkeley DB functions, returning <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>.) This flag may
+not be specified using the environment's <b>DB_CONFIG</b> file. This
+flag may be used to configure Berkeley DB only after the <a href="../api_cxx/env_open.html">DbEnv::open</a>
+interface is called.
+<p>Calling DbEnv::set_flags with the DB_PANIC_ENVIRONMENT flag affects the
+database environment, including all threads of control accessing the
+database environment.
+<p>The DB_PANIC_ENVIRONMENT flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="10"><!--meow--></a>
+<p><dt><a name="DB_REGION_INIT">DB_REGION_INIT</a><dd>In some applications, the expense of page-faulting the underlying shared
+memory regions can affect performance. (For example, if the page-fault
+occurs while holding a lock, other lock requests can convoy, and overall
+throughput may decrease.) If set, Berkeley DB will page-fault shared regions
+into memory when initially creating or joining a Berkeley DB environment. In
+addition, Berkeley DB will write the shared regions when creating an
+environment, forcing the underlying virtual memory and filesystems to
+instantiate both the necessary memory and the necessary disk space.
+This can also avoid out-of-disk space failures later on.
+<p>Calling DbEnv::set_flags with the DB_REGION_INIT flag only affects
+the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_cxx/env_class.html">DbEnv</a>
+handles opened in the environment must either set the DB_REGION_INIT flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The DB_REGION_INIT flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="11"><!--meow--></a>
+<p><dt><a name="DB_TXN_NOSYNC">DB_TXN_NOSYNC</a><dd>If set, Berkeley DB will not write or synchronously flush the log on transaction
+commit or prepare.
+This means that transactions exhibit the ACI (atomicity, consistency,
+and isolation) properties, but not D (durability); that is, database
+integrity will be maintained, but if the application or system fails,
+it is possible some number of the most recently committed transactions
+may be undone during recovery. The number of transactions at risk is
+governed by how many log updates can fit into the log buffer, how often
+the operating system flushes dirty buffers to disk, and how often the
+log is checkpointed
+<p>Calling DbEnv::set_flags with the DB_TXN_NOSYNC flag only affects
+the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_cxx/env_class.html">DbEnv</a>
+handles opened in the environment must either set the DB_TXN_NOSYNC flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The DB_TXN_NOSYNC flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="12"><!--meow--></a>
+<p><dt><a name="DB_TXN_WRITE_NOSYNC">DB_TXN_WRITE_NOSYNC</a><dd>If set, Berkeley DB will write, but will not synchronously flush, the log on
+transaction commit or prepare.
+This means that transactions exhibit the ACI (atomicity, consistency,
+and isolation) properties, but not D (durability); that is, database
+integrity will be maintained, but if the system fails, it is possible
+some number of the most recently committed transactions may be undone
+during recovery. The number of transactions at risk is governed by how
+often the system flushes dirty buffers to disk and how often the log is
+checkpointed.
+<p>Calling DbEnv::set_flags with the DB_TXN_WRITE_NOSYNC flag only affects
+the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_cxx/env_class.html">DbEnv</a>
+handles opened in the environment must either set the DB_TXN_WRITE_NOSYNC flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The DB_TXN_WRITE_NOSYNC flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="13"><!--meow--></a>
+<p><dt><a name="DB_YIELDCPU">DB_YIELDCPU</a><dd>If set, Berkeley DB will yield the processor immediately after each page or
+mutex acquisition. This functionality should never be used for purposes
+other than stress testing.
+<p>Calling DbEnv::set_flags with the DB_YIELDCPU flag only affects
+the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_cxx/env_class.html">DbEnv</a>
+handles opened in the environment must either set the DB_YIELDCPU flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The DB_YIELDCPU flag may be used to configure Berkeley DB at any time during
+the life of the application.
+</dl>
+<p>The DbEnv::set_flags method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's flag values may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_flags", one or more whitespace characters,
+and the interface flag argument as a string; for example, "set_flags
+DB_TXN_NOSYNC". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv::set_flags method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv::set_flags method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_flags method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_lg_bsize.html b/libdb/docs/api_cxx/env_set_lg_bsize.html
new file mode 100644
index 0000000..10d7ce9
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_lg_bsize.html
@@ -0,0 +1,76 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_lg_bsize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_lg_bsize</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_lg_bsize(u_int32_t lg_bsize);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the in-memory log buffer, in bytes. By default, or if
+the value is set to 0, a size of 32K is used. The size of the log file
+(see <a href="../api_cxx/env_set_lg_max.html">DbEnv::set_lg_max</a>) must be at least four times the size of the
+the in-memory log buffer.
+<p>Log information is stored in-memory until the storage space fills up
+or transaction commit forces the information to be flushed to stable
+storage. In the presence of long-running transactions or transactions
+producing large amounts of data, larger buffer sizes can increase
+throughput.
+<p>The DbEnv::set_lg_bsize method configures a database environment, not only operations
+performed using the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle.
+<p>The DbEnv::set_lg_bsize interface may not be called after the <a href="../api_cxx/env_open.html">DbEnv::open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_cxx/env_open.html">DbEnv::open</a> is called, the information specified to DbEnv::set_lg_bsize
+will be ignored.
+<p>The DbEnv::set_lg_bsize method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's log buffer size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lg_bsize", one or more whitespace characters,
+and the size in bytes. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv::set_lg_bsize method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+<p>The size of the log file is less than four times the size of the in-memory
+log buffer.
+</dl>
+<p>The DbEnv::set_lg_bsize method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_lg_bsize method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/logc_class.html">DbLogc</a>, <a href="../api_cxx/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_lg_dir.html b/libdb/docs/api_cxx/env_set_lg_dir.html
new file mode 100644
index 0000000..87749c9
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_lg_dir.html
@@ -0,0 +1,75 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_lg_dir</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_lg_dir</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_lg_dir(const char *dir);
+</pre></h3>
+<h1>Description</h1>
+<p>The path of a directory to be used as the location of logging files.
+Log files created by the Log Manager subsystem will be created in this
+directory.
+<p>If no logging directory is specified, log files are created in the
+environment home directory. See <a href="../ref/env/naming.html">Berkeley DB File Naming</a> for more information.
+<p>For the greatest degree of recoverability from system or application
+failure, database files and log files should be located on separate
+physical devices.
+<p>The DbEnv::set_lg_dir method configures operations performed using the specified
+<a href="../api_cxx/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv::set_lg_dir interface may not be called after the <a href="../api_cxx/env_open.html">DbEnv::open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_cxx/env_open.html">DbEnv::open</a> is called, the information specified to DbEnv::set_lg_dir
+must be consistent with the existing environment or corruption can
+occur.
+<p>The DbEnv::set_lg_dir method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's logging directory may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lg_dir", one or more whitespace characters,
+and the directory name. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv::set_lg_dir method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<p>The DbEnv::set_lg_dir method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_lg_dir method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/logc_class.html">DbLogc</a>, <a href="../api_cxx/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_lg_max.html b/libdb/docs/api_cxx/env_set_lg_max.html
new file mode 100644
index 0000000..ae71acb
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_lg_max.html
@@ -0,0 +1,76 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_lg_max</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_lg_max</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_lg_max(u_int32_t lg_max);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum size of a single file in the log, in bytes. By default,
+or if the <b>lg_max</b> argument is set to 0, a size of 10MB is used.
+Because <a href="../api_cxx/lsn_class.html">DbLsn</a> file offsets are unsigned four-byte values, the
+set value may not be larger than the maximum unsigned four-byte value.
+The size of the log file must be at least four times the size of the
+in-memory log buffer (see <a href="../api_cxx/env_set_lg_bsize.html">DbEnv::set_lg_bsize</a>).
+<p>See <a href="../ref/log/limits.html">Log File Limits</a>
+for more information.
+<p>The DbEnv::set_lg_max method configures a database environment, not only operations
+performed using the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle.
+<p>The DbEnv::set_lg_max interface may be called at any time during the life of
+the application.
+If no size is specified by the application, the size last specified for
+the database region will be used, or if no database region previously
+existed, the default will be used.
+<p>The DbEnv::set_lg_max method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's log file size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lg_max", one or more whitespace characters,
+and the size in bytes. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv::set_lg_max method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+<p>The size of the log file is less than four times the size of the in-memory
+log buffer.
+<p>The specified log file size was too large.
+</dl>
+<p>The DbEnv::set_lg_max method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_lg_max method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/logc_class.html">DbLogc</a>, <a href="../api_cxx/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_lg_regionmax.html b/libdb/docs/api_cxx/env_set_lg_regionmax.html
new file mode 100644
index 0000000..06be86f
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_lg_regionmax.html
@@ -0,0 +1,70 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_lg_regionmax</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_lg_regionmax</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_lg_regionmax(u_int32_t lg_regionmax);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the underlying logging subsystem region, in bytes. By
+default, or if the value is set to 0, the base region size is 60KB.
+The log region is used to store filenames, and so may need to be
+increased in size if a large number of files will be opened and
+registered with the specified Berkeley DB environment's log manager.
+<p>The DbEnv::set_lg_regionmax method configures a database environment, not only operations
+performed using the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle.
+<p>The DbEnv::set_lg_regionmax interface may not be called after the <a href="../api_cxx/env_open.html">DbEnv::open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_cxx/env_open.html">DbEnv::open</a> is called, the information specified to DbEnv::set_lg_regionmax
+will be ignored.
+<p>The DbEnv::set_lg_regionmax method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's log region size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lg_regionmax", one or more whitespace characters,
+and the size in bytes. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv::set_lg_regionmax method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<p>The DbEnv::set_lg_regionmax method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_lg_regionmax method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/logc_class.html">DbLogc</a>, <a href="../api_cxx/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_lk_conflicts.html b/libdb/docs/api_cxx/env_set_lk_conflicts.html
new file mode 100644
index 0000000..fce96aa
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_lk_conflicts.html
@@ -0,0 +1,72 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_lk_conflicts</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_lk_conflicts</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_lk_conflicts(u_int8_t *conflicts, int nmodes);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the locking conflicts matrix.
+The <b>conflicts</b> argument
+is an <b>nmodes</b> by <b>nmodes</b> array.
+A non-0 value for the array element indicates that requested_mode and
+held_mode conflict:
+<p><blockquote><pre>conflicts[requested_mode][held_mode]</pre></blockquote>
+<p>The <i>not-granted</i> mode must be represented by 0.
+<p>If DbEnv::set_lk_conflicts is never called, a standard conflicts
+array is used; see <a href="../ref/lock/stdmode.html">Standard Lock
+Modes</a> for more information.
+<p>The DbEnv::set_lk_conflicts method configures a database environment, not only operations
+performed using the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle.
+<p>The DbEnv::set_lk_conflicts interface may not be called after the <a href="../api_cxx/env_open.html">DbEnv::open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_cxx/env_open.html">DbEnv::open</a> is called, the information specified to DbEnv::set_lk_conflicts
+will be ignored.
+<p>The DbEnv::set_lk_conflicts method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::set_lk_conflicts method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>No memory was available to copy the conflicts array.
+</dl>
+<p>The DbEnv::set_lk_conflicts method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_lk_conflicts method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_lk_detect.html b/libdb/docs/api_cxx/env_set_lk_detect.html
new file mode 100644
index 0000000..11491c8
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_lk_detect.html
@@ -0,0 +1,89 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_lk_detect</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_lk_detect</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_lk_detect(u_int32_t detect);
+</pre></h3>
+<h1>Description</h1>
+<p>Set if the deadlock detector is to be run whenever a lock conflict
+occurs, and specify what lock request(s) should be rejected. As
+transactions acquire locks on behalf of a single locker ID, rejecting
+a lock request associated with a transaction normally requires the
+transaction be aborted. The specified value must be one of the
+following list:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_DEFAULT">DB_LOCK_DEFAULT</a><dd>Use whatever lock policy was specified when the database environment
+was created. If no lock policy has yet been specified, set the lock
+policy to DB_LOCK_RANDOM.
+<dt><a name="DB_LOCK_EXPIRE">DB_LOCK_EXPIRE</a><dd>Reject lock requests which have timed out. No other deadlock detection
+is performed.
+<dt><a name="DB_LOCK_MAXLOCKS">DB_LOCK_MAXLOCKS</a><dd>Reject the lock request for the locker ID with the greatest number of
+locks.
+<dt><a name="DB_LOCK_MINLOCKS">DB_LOCK_MINLOCKS</a><dd>Reject the lock request for the locker ID with the fewest number of
+locks.
+<dt><a name="DB_LOCK_MINWRITE">DB_LOCK_MINWRITE</a><dd>Reject the lock request for the locker ID with the fewest number of
+write locks.
+<dt><a name="DB_LOCK_OLDEST">DB_LOCK_OLDEST</a><dd>Reject the lock request for the oldest locker ID.
+<dt><a name="DB_LOCK_RANDOM">DB_LOCK_RANDOM</a><dd>Reject the lock request for a random locker ID.
+<dt><a name="DB_LOCK_YOUNGEST">DB_LOCK_YOUNGEST</a><dd>Reject the lock request for the youngest locker ID.
+</dl>
+<p>The DbEnv::set_lk_detect method configures a database environment, not only operations
+performed using the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle.
+<p>The DbEnv::set_lk_detect interface may not be called after the <a href="../api_cxx/env_open.html">DbEnv::open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_cxx/env_open.html">DbEnv::open</a> is called, the information specified to DbEnv::set_lk_detect
+must be consistent with the existing environment or an error will be
+returned.
+<p>The DbEnv::set_lk_detect method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's deadlock detector configuration may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_detect", one or more whitespace characters,
+and the interface <b>detect</b> argument as a string; for example,
+"set_lk_detect DB_LOCK_OLDEST". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv::set_lk_detect method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<p>The DbEnv::set_lk_detect method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_lk_detect method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_lk_max_lockers.html b/libdb/docs/api_cxx/env_set_lk_max_lockers.html
new file mode 100644
index 0000000..96a2754
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_lk_max_lockers.html
@@ -0,0 +1,72 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_lk_max_lockers</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_lk_max_lockers</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_lk_max_lockers(u_int32_t max);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of simultaneous locking entities supported by
+the Berkeley DB lock subsystem. This value is used by <a href="../api_cxx/env_open.html">DbEnv::open</a> to
+estimate how much space to allocate for various lock-table data
+structures. The default value is 1000 lockers. For specific
+information on configuring the size of the lock subsystem, see
+<a href="../ref/lock/max.html">Configuring locking: sizing the
+system</a>.
+<p>The DbEnv::set_lk_max_lockers method configures a database environment, not only operations
+performed using the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle.
+<p>The DbEnv::set_lk_max_lockers interface may not be called after the <a href="../api_cxx/env_open.html">DbEnv::open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_cxx/env_open.html">DbEnv::open</a> is called, the information specified to DbEnv::set_lk_max_lockers
+will be ignored.
+<p>The DbEnv::set_lk_max_lockers method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's maximum number of lockers may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_max_lockers", one or more whitespace characters,
+and the number of lockers. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv::set_lk_max_lockers method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<p>The DbEnv::set_lk_max_lockers method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_lk_max_lockers method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_lk_max_locks.html b/libdb/docs/api_cxx/env_set_lk_max_locks.html
new file mode 100644
index 0000000..f55c4fc
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_lk_max_locks.html
@@ -0,0 +1,71 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_lk_max_locks</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_lk_max_locks</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_lk_max_locks(u_int32_t max);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of locks supported by the Berkeley DB lock subsystem.
+This value is used by <a href="../api_cxx/env_open.html">DbEnv::open</a> to estimate how much space to
+allocate for various lock-table data structures. The default value is
+1000 locks. For specific information on configuring the size of the lock
+subsystem, see <a href="../ref/lock/max.html">Configuring locking:
+sizing the system</a>.
+<p>The DbEnv::set_lk_max_locks method configures a database environment, not only operations
+performed using the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle.
+<p>The DbEnv::set_lk_max_locks interface may not be called after the <a href="../api_cxx/env_open.html">DbEnv::open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_cxx/env_open.html">DbEnv::open</a> is called, the information specified to DbEnv::set_lk_max_locks
+will be ignored.
+<p>The DbEnv::set_lk_max_locks method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's maximum number of locks may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_max_locks", one or more whitespace characters,
+and the number of locks. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv::set_lk_max_locks method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<p>The DbEnv::set_lk_max_locks method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_lk_max_locks method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_lk_max_objects.html b/libdb/docs/api_cxx/env_set_lk_max_objects.html
new file mode 100644
index 0000000..e00a4b7
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_lk_max_objects.html
@@ -0,0 +1,72 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_lk_max_objects</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_lk_max_objects</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_lk_max_objects(u_int32_t max);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of simultaneously locked objects supported by
+the Berkeley DB lock subsystem. This value is used by <a href="../api_cxx/env_open.html">DbEnv::open</a> to
+estimate how much space to allocate for various lock-table data
+structures. The default value is 1000 objects. For specific
+information on configuring the size of the lock subsystem, see
+<a href="../ref/lock/max.html">Configuring locking: sizing the
+system</a>.
+<p>The DbEnv::set_lk_max_objects method configures a database environment, not only operations
+performed using the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle.
+<p>The DbEnv::set_lk_max_objects interface may not be called after the <a href="../api_cxx/env_open.html">DbEnv::open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_cxx/env_open.html">DbEnv::open</a> is called, the information specified to DbEnv::set_lk_max_objects
+will be ignored.
+<p>The DbEnv::set_lk_max_objects method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's maximum number of objects may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_max_objects", one or more whitespace characters,
+and the number of objects. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv::set_lk_max_objects method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<p>The DbEnv::set_lk_max_objects method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_lk_max_objects method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_mp_mmapsize.html b/libdb/docs/api_cxx/env_set_mp_mmapsize.html
new file mode 100644
index 0000000..3970011
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_mp_mmapsize.html
@@ -0,0 +1,73 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_mp_mmapsize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_mp_mmapsize</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_mp_mmapsize(size_t mp_mmapsize);
+</pre></h3>
+<h1>Description</h1>
+<p>Files that are opened read-only in the pool (and that satisfy a few
+other criteria) are, by default, mapped into the process address space
+instead of being copied into the local cache. This can result in
+better-than-usual performance because available virtual memory is
+normally much larger than the local cache, and page faults are faster
+than page copying on many systems. However, it can cause resource
+starvation in the presence of limited virtual memory, and it can result
+in immense process sizes in the presence of large databases.
+<p>Set the maximum file size, in bytes, for a file to be mapped into the
+process address space. If no value is specified, it defaults to 10MB.
+<p>The DbEnv::set_mp_mmapsize method configures operations performed using the specified
+<a href="../api_cxx/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv::set_mp_mmapsize interface may be called at any time during the life of
+the application.
+<p>The DbEnv::set_mp_mmapsize method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's maximum mapped file size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_mp_mmapsize", one or more whitespace characters,
+and the size in bytes. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv::set_mp_mmapsize method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<p>The DbEnv::set_mp_mmapsize method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_mp_mmapsize method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_paniccall.html b/libdb/docs/api_cxx/env_set_paniccall.html
new file mode 100644
index 0000000..99b411b
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_paniccall.html
@@ -0,0 +1,57 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_paniccall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_paniccall</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_paniccall(void (*)(DbEnv *, int));
+</pre></h3>
+<h1>Description</h1>
+<p>Errors can occur in the Berkeley DB library where the only solution is to shut
+down the application and run recovery (for example, if Berkeley DB is unable
+to allocate heap memory). In these cases, when the C++ error model has
+been configured so that the individual Berkeley DB methods return error codes
+(see <a href="../api_cxx/except_class.html">DbException</a> for more information), the value
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> is returned by Berkeley DB methods.
+<p>In these cases, it is also often simpler to shut down the application
+when such errors occur rather than to try to gracefully return up the
+stack. The DbEnv::set_paniccall and <a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a> methods
+are used to specify methods to be called when
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> is about to be returned from a Berkeley DB interface.
+When called, the <b>dbenv</b> argument will be a reference to the
+current environment, and the <b>errval</b> argument is the error value
+that would have been returned to the calling method.
+<p>The DbEnv::set_paniccall interface may be called at any time during the life of
+the application.
+<p>The DbEnv::set_paniccall method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_rpc_server.html b/libdb/docs/api_cxx/env_set_rpc_server.html
new file mode 100644
index 0000000..baf5056
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_rpc_server.html
@@ -0,0 +1,84 @@
+<!--"@(#)env_set_rpc_server.so 10.1 (Sleepycat) 8/25/99"-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_rpc_server</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_rpc_server</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_rpc_server(CLIENT *client, char *host,
+ long cl_timeout, long sv_timeout, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>Establishes a connection for this <b>dbenv</b> to a RPC server. If
+the <b>client</b> argument is NULL, this call creates a connection to
+the Berkeley DB server on the indicated hostname and sets up a channel for
+communication.
+If the <b>client</b> channel has been provided by the
+application then Berkeley DB will use it as its connection and the <b>host</b> and
+<b>cl_timeout</b> fields are ignored.
+<a name="3"><!--meow--></a>
+<p>The <b>cl_timeout</b> argument specifies the number of seconds the client
+should wait for results to come back from the server. Once the timeout
+has expired on any communication with the server, DB_NOSERVER will
+be returned. If this value is zero, a default timeout is used.
+<a name="4"><!--meow--></a>
+<p>The <b>sv_timeout</b> argument specifies the number of seconds the server
+should allow a client connection to remain idle before assuming that the
+client is gone. Once that timeout has been reached, the server releases
+all resources associated with that client connection. Subsequent attempts
+by that client to communicate with the server result in
+DB_NOSERVER_ID, indicating that an invalid identifier has been
+given to the server. This value can be considered a hint to the server.
+The server may alter this value based on its own policies or allowed
+values. If this value is zero, a default timeout is used.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>When the DbEnv::set_rpc_server method has been called, subsequent calls
+to Berkeley DB library interfaces may return or throw exceptions encapsulating
+<a name="DB_NOSERVER">DB_NOSERVER</a>, <a name="DB_NOSERVER_ID">DB_NOSERVER_ID</a>, or
+<a name="DB_NOSERVER_HOME">DB_NOSERVER_HOME</a>.
+<p>The DbEnv::set_rpc_server method configures operations performed using the specified
+<a href="../api_cxx/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv::set_rpc_server interface may not be called after the <a href="../api_cxx/env_open.html">DbEnv::open</a>
+interface is called.
+<p>The DbEnv::set_rpc_server method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::set_rpc_server method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv::set_rpc_server method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_rpc_server method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_shm_key.html b/libdb/docs/api_cxx/env_set_shm_key.html
new file mode 100644
index 0000000..b2eb7ec
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_shm_key.html
@@ -0,0 +1,89 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_shm_key</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_shm_key</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_shm_key(long shm_key);
+</pre></h3>
+<h1>Description</h1>
+<p>Specify a base segment ID for Berkeley DB environment shared memory regions
+created in system memory on VxWorks or systems supporting X/Open-style
+shared memory interfaces; for example, UNIX systems supporting
+<b>shmget</b>(2) and related System V IPC interfaces.
+<p>This base segment ID will be used when Berkeley DB shared memory regions are
+first created. It will be incremented a small integer value each time
+a new shared memory region is created; that is, if the base ID is 35,
+the first shared memory region created will have a segment ID of 35,
+and the next one will have a segment ID between 36 and 40 or so. A
+Berkeley DB environment always creates a master shared memory region; an
+additional shared memory region for each of the subsystems supported by
+the environment (Locking, Logging, Memory Pool and Transaction); plus
+an additional shared memory region for each additional memory pool cache
+that is supported. Already existing regions with the same segment IDs
+will be removed. See <a href="../ref/env/region.html">Shared Memory
+Regions</a> for more information.
+<p>The intent behind this interface is two-fold: without it, applications
+have no way to ensure that two Berkeley DB applications don't attempt to use
+the same segment IDs when creating different Berkeley DB environments. In
+addition, by using the same segment IDs each time the environment is
+created, previously created segments will be removed, and the set of
+segments on the system will not grow without bound.
+<p>The DbEnv::set_shm_key method configures operations performed using the specified
+<a href="../api_cxx/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv::set_shm_key interface may not be called after the <a href="../api_cxx/env_open.html">DbEnv::open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_cxx/env_open.html">DbEnv::open</a> is called, the information specified to DbEnv::set_shm_key
+must be consistent with the existing environment or corruption can
+occur.
+<p>The DbEnv::set_shm_key method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's base segment ID may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_shm_key", one or more whitespace characters,
+and the ID. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv::set_shm_key method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<p>The DbEnv::set_shm_key method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_shm_key method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_tas_spins.html b/libdb/docs/api_cxx/env_set_tas_spins.html
new file mode 100644
index 0000000..cecfc68
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_tas_spins.html
@@ -0,0 +1,65 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_tas_spins</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_tas_spins</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_tas_spins(u_int32_t tas_spins);
+</pre></h3>
+<h1>Description</h1>
+<p>Specify that test-and-set mutexes should spin <b>tas_spins</b> times
+without blocking. The value defaults to 1 on uniprocessor systems and
+to 50 times the number of processors on multiprocessor systems.
+<p>The DbEnv::set_tas_spins method configures operations performed using the specified
+<a href="../api_cxx/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv::set_tas_spins interface may be called at any time during the life of
+the application.
+<p>The DbEnv::set_tas_spins method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's test-and-set spin count may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_tas_spins", one or more whitespace characters,
+and the number of spins. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv::set_tas_spins method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv::set_tas_spins method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_tas_spins method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_timeout.html b/libdb/docs/api_cxx/env_set_timeout.html
new file mode 100644
index 0000000..1482627
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_timeout.html
@@ -0,0 +1,89 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_timeout</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_timeout</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_timeout(db_timeout_t timeout, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::set_timeout method sets timeout values for locks or
+transactions in the database environment. The timeout value is
+currently specified as an unsigned 32-bit number of microseconds,
+limiting the maximum timeout to roughly 71 minutes.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_SET_LOCK_TIMEOUT">DB_SET_LOCK_TIMEOUT</a><dd>Set the timeout value for locks in this database environment.
+<p>The database environment's transaction timeout value may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_txn_timeout", one or more whitespace characters,
+and the transaction timeout value. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<p><dt><a name="DB_SET_TXN_TIMEOUT">DB_SET_TXN_TIMEOUT</a><dd>Set the timeout value for transactions in this database environment.
+<p>The database environment's lock timeout value may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lock_timeout", one or more whitespace characters,
+and the lock timeout value. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+</dl>
+<p>Timeouts are checked whenever a thread of control blocks on a lock or
+when deadlock detection is performed. (In the case of
+DB_SET_LOCK_TIMEOUT, the lock is one requested explicitly
+through the Lock subsystem interfaces. In the case of
+DB_SET_TXN_TIMEOUT, the lock is one requested on behalf of a
+transaction. In either case, it may be a lock requested by the database
+access methods underlying the application.) As timeouts are only
+checked when the lock request first blocks or when deadlock detection
+is performed, the accuracy of the timeout depends on how often deadlock
+detection is performed.
+<p>Timeout values specified for the database environment may be overridden
+on a per-lock or per-transaction basis. See <a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a> and
+<a href="../api_cxx/txn_set_timeout.html">DbTxn::set_timeout</a> for more information.
+<p>The DbEnv::set_timeout method configures a database environment, not only operations
+performed using the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle.
+<p>The DbEnv::set_timeout interface may be called at any time during the life of
+the application.
+<p>The DbEnv::set_timeout method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::set_timeout method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv::set_timeout method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_timeout method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_tmp_dir.html b/libdb/docs/api_cxx/env_set_tmp_dir.html
new file mode 100644
index 0000000..c4c7cb8
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_tmp_dir.html
@@ -0,0 +1,92 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_tmp_dir</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_tmp_dir</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_tmp_dir(const char *dir);
+</pre></h3>
+<h1>Description</h1>
+<a name="3"><!--meow--></a>
+<p>The path of a directory to be used as the location of temporary files.
+The files created to back in-memory access method databases will be
+created relative to this path. These temporary files can be quite large,
+depending on the size of the database.
+<p>If no directories are specified, the following alternatives are checked
+in the specified order. The first existing directory path is used for
+all temporary files.
+<p><ol>
+<p><li>The value of the environment variable <b>TMPDIR</b>.
+<li>The value of the environment variable <b>TEMP</b>.
+<li>The value of the environment variable <b>TMP</b>.
+<li>The value of the environment variable <b>TempFolder</b>.
+<li>The value returned by the GetTempPath interface.
+<li>The directory <b>/var/tmp</b>.
+<li>The directory <b>/usr/tmp</b>.
+<li>The directory <b>/temp</b>.
+<li>The directory <b>/tmp</b>.
+<li>The directory <b>C:/temp</b>.
+<li>The directory <b>C:/tmp</b>.
+</ol>
+<p>Note: environment variables are only checked if one of the
+<a href="../api_cxx/env_open.html#DB_USE_ENVIRON">DB_USE_ENVIRON</a> or <a href="../api_cxx/env_open.html#DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a> flags were
+specified.
+<p>Note: the GetTempPath interface is only checked on Win/32 platforms.
+<p>The DbEnv::set_tmp_dir method configures operations performed using the specified
+<a href="../api_cxx/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv::set_tmp_dir interface may not be called after the <a href="../api_cxx/env_open.html">DbEnv::open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_cxx/env_open.html">DbEnv::open</a> is called, the information specified to DbEnv::set_tmp_dir
+must be consistent with the existing environment or corruption can
+occur.
+<p>The DbEnv::set_tmp_dir method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's temporary file directory may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_tmp_dir", one or more whitespace characters,
+and the directory name. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv::set_tmp_dir method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<p>The DbEnv::set_tmp_dir method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_tmp_dir method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_tx_max.html b/libdb/docs/api_cxx/env_set_tx_max.html
new file mode 100644
index 0000000..2e90c66
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_tx_max.html
@@ -0,0 +1,72 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_tx_max</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_tx_max</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_tx_max(u_int32_t tx_max);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of active transactions that are supported by the
+environment. This value bounds the size of backing shared memory regions.
+Note that child transactions must be counted as active until their
+ultimate parent commits or aborts.
+<p>When there are more than the specified number of concurrent transactions,
+calls to <a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a> will fail (until some active transactions
+complete). If no value is specified, a default value of 20 is used.
+<p>The DbEnv::set_tx_max method configures a database environment, not only operations
+performed using the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle.
+<p>The DbEnv::set_tx_max interface may not be called after the <a href="../api_cxx/env_open.html">DbEnv::open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_cxx/env_open.html">DbEnv::open</a> is called, the information specified to DbEnv::set_tx_max
+will be ignored.
+<p>The DbEnv::set_tx_max method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's maximum number of active transactions may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_tx_max", one or more whitespace characters,
+and the number of transactions. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv::set_tx_max method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<p>The DbEnv::set_tx_max method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_tx_max method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_tx_timestamp.html b/libdb/docs/api_cxx/env_set_tx_timestamp.html
new file mode 100644
index 0000000..3ab91bc
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_tx_timestamp.html
@@ -0,0 +1,66 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_tx_timestamp</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_tx_timestamp</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_tx_timestamp(time_t *timestamp);
+</pre></h3>
+<h1>Description</h1>
+<p>Recover to the time specified by <b>timestamp</b> rather than to the most
+current possible date.
+The <b>timestamp</b> argument should be the number of seconds since 0
+hours, 0 minutes, 0 seconds, January 1, 1970, Coordinated Universal
+Time; that is, the Epoch.
+<p>Once a database environment has been upgraded to a new version of Berkeley DB
+involving a log format change (see <a href="../ref/upgrade/process.html">Upgrading Berkeley DB installations</a>), it is no longer possible to recover
+to a specific time before that upgrade.
+<p>The DbEnv::set_tx_timestamp method configures operations performed using the specified
+<a href="../api_cxx/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv::set_tx_timestamp interface may not be called after the <a href="../api_cxx/env_open.html">DbEnv::open</a>
+interface is called.
+<p>The DbEnv::set_tx_timestamp method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::set_tx_timestamp method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>It is not possible to recover to the specified time using the log files
+currently present in the environment.
+</dl>
+<p>The DbEnv::set_tx_timestamp method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_tx_timestamp method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_set_verbose.html b/libdb/docs/api_cxx/env_set_verbose.html
new file mode 100644
index 0000000..c631286
--- /dev/null
+++ b/libdb/docs/api_cxx/env_set_verbose.html
@@ -0,0 +1,77 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_verbose</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_verbose</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_verbose(u_int32_t which, int onoff);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::set_verbose method turns additional informational and
+debugging messages in the Berkeley DB message output on and off. If
+<b>onoff</b> is set to
+non-zero,
+the additional messages are output.
+<p>The <b>which</b> parameter must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_VERB_CHKPOINT">DB_VERB_CHKPOINT</a><dd>Display checkpoint location information when searching the log for
+checkpoints.
+<p><dt><a name="DB_VERB_DEADLOCK">DB_VERB_DEADLOCK</a><dd>Display additional information when doing deadlock detection.
+<p><dt><a name="DB_VERB_RECOVERY">DB_VERB_RECOVERY</a><dd>Display additional information when performing recovery.
+<p><dt><a name="DB_VERB_REPLICATION">DB_VERB_REPLICATION</a><dd>Display additional information when processing replication messages.
+<p><dt><a name="DB_VERB_WAITSFOR">DB_VERB_WAITSFOR</a><dd>Display the waits-for table when doing deadlock detection.
+</dl>
+<p>The DbEnv::set_verbose method configures operations performed using the specified
+<a href="../api_cxx/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv::set_verbose interface may be called at any time during the life of
+the application.
+<p>The DbEnv::set_verbose method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>The database environment's verbosity may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_verbose", one or more whitespace characters,
+and the interface <b>which</b> argument as a string; for example,
+"set_verbose DB_VERB_CHKPOINT". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv::set_verbose method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv::set_verbose method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_verbose method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_strerror.html b/libdb/docs/api_cxx/env_strerror.html
new file mode 100644
index 0000000..dd00a9b
--- /dev/null
+++ b/libdb/docs/api_cxx/env_strerror.html
@@ -0,0 +1,48 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::strerror</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::strerror</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+static char *
+DbEnv::strerror(int error);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::strerror method returns an error message string corresponding
+to the error number <b>error</b>. This interface is a superset of the
+ANSI C X3.159-1989 (ANSI C) <b>strerror</b>(3) interface. If the error number
+<b>error</b> is greater than or equal to 0, then the string returned by
+the system interface <b>strerror</b>(3) is returned. If the error
+number is less than 0, an error string appropriate to the corresponding
+Berkeley DB library error is returned. See
+<a href="../ref/program/errorret.html">Error returns to applications</a>
+for more information.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/env_version.html b/libdb/docs/api_cxx/env_version.html
new file mode 100644
index 0000000..19d6312
--- /dev/null
+++ b/libdb/docs/api_cxx/env_version.html
@@ -0,0 +1,46 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::version</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::version</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+static char *
+DbEnv::version(int *major, int *minor, int *patch);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::version method returns a pointer to a string containing
+Berkeley DB version information. If <b>major</b> is non-NULL, the major
+version of the Berkeley DB release is stored in the memory to which it refers.
+If <b>minor</b> is non-NULL, the minor version of the Berkeley DB release
+is stored in the memory to which it refers. If <b>patch</b> is
+non-NULL, the patch version of the Berkeley DB release is stored in the
+memory to which it refers.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/except_class.html b/libdb/docs/api_cxx/except_class.html
new file mode 100644
index 0000000..af9c3a7
--- /dev/null
+++ b/libdb/docs/api_cxx/except_class.html
@@ -0,0 +1,59 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbException</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbException</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class DbException {
+public:
+ int get_errno() const;
+ virtual const char *what() const;
+};
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the DbException class and how it is used by
+the various Berkeley DB classes.
+<p>Most methods in the Berkeley DB classes return an int, but also throw an
+exception. This allows for two different error behaviors. By default,
+the Berkeley DB C++ API is configured to throw an exception whenever a serious
+error occurs. This generally allows for cleaner logic for transaction
+processing because a try block can surround a single transaction.
+Alternatively, Berkeley DB can be configured to not throw exceptions, and
+instead have the individual function return an error code, by setting
+the <a href="../api_cxx/env_class.html#DB_CXX_NO_EXCEPTIONS">DB_CXX_NO_EXCEPTIONS</a> for the <a href="../api_cxx/db_class.html">Db</a> and <a href="../api_cxx/env_class.html">DbEnv</a>
+constructors.
+<p>A DbException object contains an informational string and an errno.
+The errno can be obtained by using DbException::get_errno.
+The informational string can be obtained by using DbException::what.
+<p>We expect in the future that this class will inherit from the standard
+class exception, but certain language implementation bugs currently
+prevent this on some platforms.
+<p>Some methods may return non-zero values without issuing an exception.
+This occurs in situations that are not normally considered an error, but
+when some informational status is returned. For example, <a href="../api_cxx/db_get.html">Db::get</a>
+returns DB_NOTFOUND when a requested key does not appear in the database.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/lock_class.html b/libdb/docs/api_cxx/lock_class.html
new file mode 100644
index 0000000..eb21991
--- /dev/null
+++ b/libdb/docs/api_cxx/lock_class.html
@@ -0,0 +1,47 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbLock</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbLock</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class DbLock {
+public:
+ DbLock();
+ DbLock(const DbLock &);
+ DbLock &operator = (const DbLock &);
+ ~DbLock();
+};
+</pre></h3>
+<h1>Description</h1>
+<p>The locking interfaces for the Berkeley DB database environment are methods
+of the <a href="../api_cxx/env_class.html">DbEnv</a> handle. The DbLock object is the handle
+for a single lock, and has no methods of its own.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, DbLock
+<h1>See Also</h1>
+<a href="../api_cxx/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/lock_detect.html b/libdb/docs/api_cxx/lock_detect.html
new file mode 100644
index 0000000..edf99f6
--- /dev/null
+++ b/libdb/docs/api_cxx/lock_detect.html
@@ -0,0 +1,72 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::lock_detect</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::lock_detect</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::lock_detect(u_int32_t flags, u_int32_t atype, int *aborted);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::lock_detect method runs one iteration of the deadlock detector.
+The deadlock detector traverses the lock table and marks one of the
+participating lock requesters for rejection in each deadlock it finds.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The <b>atype</b> parameter specifies which lock request(s) to reject.
+It must be set to one following list:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_DEFAULT">DB_LOCK_DEFAULT</a><dd>Use whatever lock policy was specified when the database environment
+was created. If no lock policy has yet been specified, set the lock
+policy to DB_LOCK_RANDOM.
+<dt><a name="DB_LOCK_EXPIRE">DB_LOCK_EXPIRE</a><dd>Reject lock requests which have timed out. No other deadlock detection
+is performed.
+<dt><a name="DB_LOCK_MAXLOCKS">DB_LOCK_MAXLOCKS</a><dd>Reject the lock request for the locker ID with the greatest number of
+locks.
+<dt><a name="DB_LOCK_MINLOCKS">DB_LOCK_MINLOCKS</a><dd>Reject the lock request for the locker ID with the fewest number of
+locks.
+<dt><a name="DB_LOCK_MINWRITE">DB_LOCK_MINWRITE</a><dd>Reject the lock request for the locker ID with the fewest number of
+write locks.
+<dt><a name="DB_LOCK_OLDEST">DB_LOCK_OLDEST</a><dd>Reject the lock request for the oldest locker ID.
+<dt><a name="DB_LOCK_RANDOM">DB_LOCK_RANDOM</a><dd>Reject the lock request for a random locker ID.
+<dt><a name="DB_LOCK_YOUNGEST">DB_LOCK_YOUNGEST</a><dd>Reject the lock request for the youngest locker ID.
+</dl>
+<p>If the <b>aborted</b> parameter is non-NULL, the memory location to
+which it refers will be set to the number of lock requests that were
+rejected.
+<p>The DbEnv::lock_detect method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::lock_detect method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::lock_detect method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/lock_get.html b/libdb/docs/api_cxx/lock_get.html
new file mode 100644
index 0000000..cd6074c
--- /dev/null
+++ b/libdb/docs/api_cxx/lock_get.html
@@ -0,0 +1,93 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::lock_get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::lock_get</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::lock_get(u_int32_t locker, u_int32_t flags,
+ const Dbt *obj, const db_lockmode_t lock_mode, DbLock *lock);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::lock_get method acquires a lock from the lock table, returning
+information about it in
+the <b>lock</b> argument.
+<p>The <b>locker</b> argument specified to DbEnv::lock_get is an unsigned
+32-bit integer quantity. It represents the entity requesting or releasing
+the lock.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a><dd>If a lock cannot be granted because the requested lock conflicts with an
+existing lock,
+return DB_LOCK_NOTGRANTED or throw a
+<a href="../api_cxx/lockng_class.html">DbLockNotGrantedException</a> immediately instead of waiting for
+the lock to become available.
+</dl>
+<p>The <b>obj</b> argument is an untyped byte string that specifies the
+object to be locked or released. Applications using the locking
+subsystem directly while also doing locking via the Berkeley DB access methods
+must take care not to inadvertently lock objects that happen to be equal
+to the unique file IDs used to lock files. See
+<a href="../ref/lock/am_conv.html">Access method locking conventions</a>
+for more information.
+<p>The <b>mode</b> argument is used as an index into the environment's
+lock conflict matrix. When using the default lock conflict matrix,
+<b>mode</b> must be set to one of the following values:
+<p><dl compact>
+<dt>DB_LOCK_READ<dd>read (shared)
+<dt>DB_LOCK_WRITE<dd>write (exclusive)
+<dt>DB_LOCK_IWRITE<dd>intention to write (shared)
+<dt>DB_LOCK_IREAD<dd>intention to read (shared)
+<dt>DB_LOCK_IWR<dd>intention to read and write (shared)
+</dl>
+<p>See <a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a> and <a href="../ref/lock/stdmode.html">Standard Lock Modes</a> for more information on the lock conflict matrix.
+<p>
+Otherwise, the DbEnv::lock_get method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::lock_get method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>The maximum number of locks has been reached.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+DbEnv::lock_get method will fail and
+and either return <a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> or
+throw a <a href="../api_cxx/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The DbEnv::lock_get method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::lock_get method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/lock_id.html b/libdb/docs/api_cxx/lock_id.html
new file mode 100644
index 0000000..a30a638
--- /dev/null
+++ b/libdb/docs/api_cxx/lock_id.html
@@ -0,0 +1,52 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::lock_id</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::lock_id</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::lock_id(u_int32_t *idp);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::lock_id method
+copies a locker ID, which is guaranteed to be unique in the specified lock
+table, into the memory location to which <b>idp</b> refers.
+<p>The <a href="../api_cxx/lock_id_free.html">DbEnv::lock_id_free</a> method should be called to return the locker ID to
+the Berkeley DB library when it is no longer needed.
+<p>The DbEnv::lock_id method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::lock_id method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::lock_id method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/lock_id_free.html b/libdb/docs/api_cxx/lock_id_free.html
new file mode 100644
index 0000000..31898ed
--- /dev/null
+++ b/libdb/docs/api_cxx/lock_id_free.html
@@ -0,0 +1,53 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::lock_id_free</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::lock_id_free</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::lock_id_free(u_int32_t id);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::lock_id_free method frees a locker ID allocated by the
+<a href="../api_cxx/lock_id.html">DbEnv::lock_id</a> method.
+<p>The DbEnv::lock_id_free method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The locker ID is invalid or locks are still held by this locker ID.
+</dl>
+<p>The DbEnv::lock_id_free method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::lock_id_free method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/lock_list.html b/libdb/docs/api_cxx/lock_list.html
new file mode 100644
index 0000000..881265a
--- /dev/null
+++ b/libdb/docs/api_cxx/lock_list.html
@@ -0,0 +1,32 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Locking Subsystem and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Locking Subsystem and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Locking Subsystem and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a></td><td>Set lock conflicts matrix</td></tr>
+<tr><td><a href="../api_cxx/env_set_lk_detect.html">DbEnv::set_lk_detect</a></td><td>Set automatic deadlock detection</td></tr>
+<tr><td><a href="../api_cxx/env_set_lk_max_lockers.html">DbEnv::set_lk_max_lockers</a></td><td>Set maximum number of lockers</td></tr>
+<tr><td><a href="../api_cxx/env_set_lk_max_locks.html">DbEnv::set_lk_max_locks</a></td><td>Set maximum number of locks</td></tr>
+<tr><td><a href="../api_cxx/env_set_lk_max_objects.html">DbEnv::set_lk_max_objects</a></td><td>Set maximum number of lock objects</td></tr>
+<tr><td><a href="../api_cxx/env_set_timeout.html">DbEnv::set_timeout</a></td><td>Set lock and transaction timeout</td></tr>
+<tr><td><a href="../api_cxx/lock_detect.html">DbEnv::lock_detect</a></td><td>Perform deadlock detection</td></tr>
+<tr><td><a href="../api_cxx/lock_get.html">DbEnv::lock_get</a></td><td>Acquire a lock</td></tr>
+<tr><td><a href="../api_cxx/lock_id.html">DbEnv::lock_id</a></td><td>Acquire a locker ID</td></tr>
+<tr><td><a href="../api_cxx/lock_id_free.html">DbEnv::lock_id_free</a></td><td>Release a locker ID</td></tr>
+<tr><td><a href="../api_cxx/lock_put.html">DbEnv::lock_put</a></td><td>Release a lock</td></tr>
+<tr><td><a href="../api_cxx/lock_stat.html">DbEnv::lock_stat</a></td><td>Return lock subsystem statistics</td></tr>
+<tr><td><a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a></td><td>Acquire/release locks</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/lock_put.html b/libdb/docs/api_cxx/lock_put.html
new file mode 100644
index 0000000..c637886
--- /dev/null
+++ b/libdb/docs/api_cxx/lock_put.html
@@ -0,0 +1,52 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::lock_put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::lock_put</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::lock_put(DbLock *lock);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::lock_put method releases <b>lock</b> from the lock table.
+<p>The DbEnv::lock_put method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::lock_put method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv::lock_put method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::lock_put method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/lock_stat.html b/libdb/docs/api_cxx/lock_stat.html
new file mode 100644
index 0000000..2c60404
--- /dev/null
+++ b/libdb/docs/api_cxx/lock_stat.html
@@ -0,0 +1,95 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::lock_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::lock_stat</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::lock_stat(DB_LOCK_STAT **statp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::lock_stat method returns the locking subsystem statistics.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_STAT_CLEAR">DB_STAT_CLEAR</a><dd>Reset statistics after returning their values.
+</dl>
+<p>The DbEnv::lock_stat method creates a statistical structure of type
+DB_LOCK_STAT and copies a pointer to it into a user-specified memory
+location.
+<p>Statistical structures are created in allocated memory. If application-specific allocation
+routines have been declared (see <a href="../api_cxx/env_set_alloc.html">DbEnv::set_alloc</a> for more
+information), they are used to allocate the memory; otherwise, the
+library <b>malloc</b>(3) interface is used. The caller is
+responsible for deallocating the memory. To deallocate the memory, free
+the memory reference; references inside the returned memory need not be
+individually freed.
+<p>The following DB_LOCK_STAT fields will be filled in:
+<p><dl compact>
+<dt>u_int32_t st_id;<dd>The last allocated locker ID.
+<dt>u_int32_t st_cur_maxid;<dd>The current maximum unused locker ID.
+<dt>u_int32_t st_nmodes;<dd>The number of lock modes.
+<dt>u_int32_t st_maxlocks;<dd>The maximum number of locks possible.
+<dt>u_int32_t st_maxlockers;<dd>The maximum number of lockers possible.
+<dt>u_int32_t st_maxobjects;<dd>The maximum number of lock objects possible.
+<dt>u_int32_t st_nlocks;<dd>The number of current locks.
+<dt>u_int32_t st_maxnlocks;<dd>The maximum number of locks at any one time.
+<dt>u_int32_t st_nlockers;<dd>The number of current lockers.
+<dt>u_int32_t st_maxnlockers;<dd>The maximum number of lockers at any one time.
+<dt>u_int32_t st_nobjects;<dd>The number of current lock objects.
+<dt>u_int32_t st_maxnobjects;<dd>The maximum number of lock objects at any one time.
+<dt>u_int32_t st_nrequests;<dd>The total number of locks requested.
+<dt>u_int32_t st_nreleases;<dd>The total number of locks released.
+<dt>u_int32_t st_nnowaits;<dd>The total number of lock requests failing because
+<a href="../api_cxx/lock_vec.html#DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a> was set.
+<dt>u_int32_t st_nconflicts;<dd>The total number of locks not immediately available due to conflicts.
+<dt>u_int32_t st_ndeadlocks;<dd>The number of deadlocks.
+<dt>u_int32_t st_locktimeout;<dd>Lock timeout value.
+<dt>u_int32_t st_nlocktimeouts;<dd>The number of locks that have timed out.
+<dt>u_int32_t st_txntimeout;<dd>Transaction timeout value.
+<dt>u_int32_t st_ntxntimeouts;<dd>The number of transactions that have timed out. This value is also a
+component of <b>st_ndeadlocks</b>, the total number of deadlocks
+detected.
+<dt>u_int32_t st_regsize;<dd>The size of the lock region.
+<dt>u_int32_t st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining the region lock.
+<dt>u_int32_t st_region_nowait;<dd>The number of times that a thread of control was able to obtain
+the region lock without waiting.
+</dl>
+<p>The DbEnv::lock_stat method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::lock_stat method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::lock_stat method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/lock_vec.html b/libdb/docs/api_cxx/lock_vec.html
new file mode 100644
index 0000000..a635656
--- /dev/null
+++ b/libdb/docs/api_cxx/lock_vec.html
@@ -0,0 +1,151 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::lock_vec</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::lock_vec</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::lock_vec(u_int32_t locker, u_int32_t flags,
+ DB_LOCKREQ list[], int nlist, DB_LOCKREQ **elistp);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::lock_vec method atomically obtains and releases one or more locks
+from the lock table. The DbEnv::lock_vec method is intended to support
+acquisition or trading of multiple locks under one lock table semaphore,
+as is needed for lock coupling or in multigranularity locking for lock
+escalation.
+<p>The <b>locker</b> argument specified to DbEnv::lock_vec is an unsigned
+32-bit integer quantity. It represents the entity requesting or releasing
+the locks.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a><dd>If a lock cannot be granted because the requested lock conflicts with
+an existing lock,
+return DB_LOCK_NOTGRANTED or throw a
+<a href="../api_cxx/lockng_class.html">DbLockNotGrantedException</a> immediately instead of waiting for
+the lock to become available. In this case, if non-NULL,
+<b>elistp</b> identifies the request that was not granted, or, if an
+exception is thrown, the index of the request that was not granted can
+be found by calling DbLockNotGrantedException.get_index.
+</dl>
+<p>The <b>list</b> array provided to DbEnv::lock_vec is typedef'd as
+DB_LOCKREQ. A DB_LOCKREQ structure has at least the following fields.
+In order to ensure compatibility with future releases of Berkeley DB, all
+fields of the DB_LOCKREQ structure that are not explicitly set should
+be initialized to 0 before the first time the structure is used. Do
+this by declaring the structure external or static, or by calling
+<b>memset</b>(3).
+<p><dl compact>
+<p><dt>lockop_t <a name="op">op</a>;<dd>The operation to be performed, which must be set to one of the
+following values:
+<p><dl compact>
+<p><dt><a name="DB_LOCK_GET">DB_LOCK_GET</a><dd>Get the lock defined by the values of the <b>mode</b> and <b>obj</b>
+structure fields, for the specified <b>locker</b>. Upon return from
+DbEnv::lock_vec, if the <b>lock</b> field is non-NULL, a reference
+to the acquired lock is stored there. (This reference is invalidated
+by any call to DbEnv::lock_vec or <a href="../api_cxx/lock_put.html">DbEnv::lock_put</a> that releases the
+lock.)
+<p><dt><a name="DB_LOCK_GET_TIMEOUT">DB_LOCK_GET_TIMEOUT</a><dd>Identical to DB_LOCK_GET except that the value in the <b>timeout</b>
+structure field overrides any previously specified timeout value for
+this lock. A value of 0 turns off any previously specified timeout.
+<p><dt><a name="DB_LOCK_PUT">DB_LOCK_PUT</a><dd>The lock to which the <b>lock</b> structure field refers is released.
+The <b>locker</b> argument, and <b>mode</b> and <b>obj</b> fields
+are ignored.
+<p><dt><a name="DB_LOCK_PUT_ALL">DB_LOCK_PUT_ALL</a><dd>All locks held by the specified <b>locker</b> are released. The
+<b>lock</b>, <b>mode</b>, and <b>obj</b> structure fields are
+ignored. Locks acquired in operations performed by the current call to
+DbEnv::lock_vec which appear before the DB_LOCK_PUT_ALL
+operation are released; those acquired in operations appearing after
+the DB_LOCK_PUT_ALL operation are not released.
+<p><dt><a name="DB_LOCK_PUT_OBJ">DB_LOCK_PUT_OBJ</a><dd>All locks held on the object <b>obj</b> are released. The
+<b>locker</b> argument and the <b>lock</b> and <b>mode</b> structure
+fields are ignored. Locks acquired in operations performed by the
+current call to DbEnv::lock_vec that appear before the
+DB_LOCK_PUT_OBJ operation are released; those acquired in
+operations appearing after the DB_LOCK_PUT_OBJ operation are
+not released.
+<p><dt><a name="DB_LOCK_TIMEOUT">DB_LOCK_TIMEOUT</a><dd>Cause the specified <b>locker</b> to timeout immediately. If the
+database environment has not configured automatic deadlock detection,
+the transaction will timeout the next time deadlock detection is
+performed. As transactions acquire locks on behalf of a single locker
+ID, timing out the locker ID associated with a transaction will time
+out the transaction itself.
+</dl>
+<p><dt>DB_LOCK <a name="lock">lock</a>;<dd>A lock reference.
+<p><dt>const lockmode_t <a name="mode">mode</a>;<dd>The lock mode, used as an index into the environment's lock conflict matrix.
+When using the default lock conflict matrix, <b>mode</b> must be set to one
+of the following values:
+<p><dl compact>
+<dt><a name="DB_LOCK_READ">DB_LOCK_READ</a><dd>read (shared)
+<dt><a name="DB_LOCK_WRITE">DB_LOCK_WRITE</a><dd>write (exclusive)
+<dt><a name="DB_LOCK_IWRITE">DB_LOCK_IWRITE</a><dd>intention to write (shared)
+<dt><a name="DB_LOCK_IREAD">DB_LOCK_IREAD</a><dd>intention to read (shared)
+<dt><a name="DB_LOCK_IWR">DB_LOCK_IWR</a><dd>intention to read and write (shared)
+</dl>
+<p>See <a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a> and <a href="../ref/lock/stdmode.html">Standard Lock Modes</a> for more information on the lock conflict matrix.
+<p><dt>const Dbt <a name="obj">obj</a>;<dd>An untyped byte string that specifies the object to be locked or
+released. Applications using the locking subsystem directly while also
+doing locking via the Berkeley DB access methods must take care not to
+inadvertently lock objects that happen to be equal to the unique file
+IDs used to lock files. See <a href="../ref/lock/am_conv.html">Access
+method locking conventions</a> for more information.
+<p><dt>u_int32_t timeout;<dd>The lock timeout value.
+</dl>
+<p>The <b>nlist</b> argument specifies the number of elements in the
+<b>list</b> array.
+<p>If any of the requested locks cannot be acquired, or any of the locks to
+be released cannot be released, the operations before the failing
+operation are guaranteed to have completed successfully, and
+DbEnv::lock_vec returns a non-zero value. In addition, if <b>elistp</b>
+is not NULL, it is set to point to the DB_LOCKREQ entry that was being
+processed when the error occurred.
+<p>
+Otherwise, the DbEnv::lock_vec method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::lock_vec method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>The maximum number of locks has been reached.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+DbEnv::lock_vec method will fail and
+and either return <a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> or
+throw a <a href="../api_cxx/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The DbEnv::lock_vec method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::lock_vec method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/lockng_class.html b/libdb/docs/api_cxx/lockng_class.html
new file mode 100644
index 0000000..dbbf246
--- /dev/null
+++ b/libdb/docs/api_cxx/lockng_class.html
@@ -0,0 +1,66 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbLockNotGrantedException</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbLockNotGrantedException</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class DbLockNotGrantedException : public DbException {
+public:
+ db_lockop_t get_op() const;
+ db_lockmode_t get_mode() const;
+ const Dbt* get_obj() const;
+ DbLock *get_lock() const;
+ int get_index() const;
+};
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the DbLockNotGrantedException class and
+how it is used by the various Db* classes.
+<p>A DbLockNotGrantedException is thrown when a lock, requested
+using the <a href="../api_cxx/lock_get.html">DbEnv::lock_get</a> or <a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a> methods (where the
+<a href="../api_cxx/lock_vec.html#DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a> option was specified), or by any <a href="../api_cxx/db_class.html">Db</a>
+operation performed in the context of a transaction begun using the
+<a href="../api_cxx/txn_begin.html#DB_TXN_NOWAIT">DB_TXN_NOWAIT</a> option, is unable to be granted immediately.
+<p>The <b>get_op</b> method returns 0 when <a href="../api_cxx/lock_get.html">DbEnv::lock_get</a> was called,
+and returns the <b>op</b> for the failed DB_LOCKREQ when
+<a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a> was called.
+<p>The <b>get_mode</b> method returns the <b>mode</b> argument when
+<a href="../api_cxx/lock_get.html">DbEnv::lock_get</a> was called, and returns the <b>mode</b> for the failed
+DB_LOCKREQ when <a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a> was called.
+<p>The <b>get_obj</b> method returns the <b>obj</b> argument when
+<a href="../api_cxx/lock_get.html">DbEnv::lock_get</a> was called, and returns the <b>obj</b> for the failed
+DB_LOCKREQ when <a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a> was called.
+The <a href="../api_cxx/dbt_class.html">Dbt</a> pointer may or may not refer to valid memory, depending on
+whether the <a href="../api_cxx/dbt_class.html">Dbt</a> used in the call to the failed <a href="../api_cxx/lock_get.html">DbEnv::lock_get</a> or
+<a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a> method is still in scope and has not been deleted.
+<p>The <b>get_lock</b> method returns NULL when <a href="../api_cxx/lock_get.html">DbEnv::lock_get</a> was
+called, and returns the <b>lock</b> in the failed DB_LOCKREQ
+when <a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a> was called.
+<p>The <b>get_index</b> method returns -1 when <a href="../api_cxx/lock_get.html">DbEnv::lock_get</a> was
+called, and returns the index of the failed DB_LOCKREQ
+when <a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a> was called.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/log_archive.html b/libdb/docs/api_cxx/log_archive.html
new file mode 100644
index 0000000..abd0ec9
--- /dev/null
+++ b/libdb/docs/api_cxx/log_archive.html
@@ -0,0 +1,101 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::log_archive</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::log_archive</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::log_archive(char *(*listp)[], u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::log_archive method
+creates a NULL-terminated array of log or database filenames, and copies
+a pointer to them into the user-specified memory location <b>listp</b>.
+<p>By default, DbEnv::log_archive returns the names of all of the log
+files that are no longer in use (for example, that are no longer
+involved in active transactions), and that may safely be archived for
+catastrophic recovery and then removed from the system. If there are
+no filenames to return,
+the memory location to which <b>listp</b> refers will be set to NULL.
+<p>Arrays of log filenames are created in allocated memory. If application-specific allocation
+routines have been declared (see <a href="../api_cxx/env_set_alloc.html">DbEnv::set_alloc</a> for more
+information), they are used to allocate the memory; otherwise, the
+library <b>malloc</b>(3) interface is used. The caller is
+responsible for deallocating the memory. To deallocate the memory, free
+the memory reference; references inside the returned memory need not be
+individually freed.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_ARCH_ABS">DB_ARCH_ABS</a><dd>All pathnames are returned as absolute pathnames, instead of relative
+to the database home directory.
+<p><dt><a name="DB_ARCH_DATA">DB_ARCH_DATA</a><dd>Return the database files that need to be archived in order to recover
+the database from catastrophic failure. If any of the database files
+have not been accessed during the lifetime of the current log files,
+DbEnv::log_archive will not include them in this list. It is also
+possible that some of the files referred to by the log have since been
+deleted from the system.
+<p><dt><a name="DB_ARCH_LOG">DB_ARCH_LOG</a><dd>Return all the log filenames, regardless of whether or not they are in
+use.
+</dl>
+<p>The DB_ARCH_DATA and DB_ARCH_LOG flags are mutually
+exclusive.
+<p>Log cursor handles (returned by the <a href="../api_cxx/log_cursor.html">DbEnv::log_cursor</a> method) may have open
+file descriptors for log files in the database environment. Also, the
+Berkeley DB interfaces to the database environment logging subsystem (for
+example, <a href="../api_cxx/log_put.html">DbEnv::log_put</a> and <a href="../api_cxx/txn_abort.html">DbTxn::abort</a>) may allocate log cursors
+and have open file descriptors for log files as well. On operating
+systems where filesystem related system calls (for example, rename and
+unlink on Windows/NT) can fail if a process has an open file descriptor
+for the affected file, attempting to move or remove the log files listed
+by DbEnv::log_archive may fail. All Berkeley DB internal use of log cursors
+operates on active log files only and furthermore, is short-lived in
+nature. So, an application seeing such a failure should be restructured
+to close any open log cursors it may have, and otherwise to retry the
+operation until it succeeds. (Although the latter is not likely to be
+necessary; it is hard to imagine a reason to move or rename a log file
+in which transactions are being logged or aborted.)
+<p>See the <a href="../utility/db_archive.html">db_archive</a> manual page for more information on database
+archival procedures.
+<p>The DbEnv::log_archive method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::log_archive method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The log was corrupted.
+</dl>
+<p>The DbEnv::log_archive method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::log_archive method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/logc_class.html">DbLogc</a>, <a href="../api_cxx/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/log_compare.html b/libdb/docs/api_cxx/log_compare.html
new file mode 100644
index 0000000..c4117d6
--- /dev/null
+++ b/libdb/docs/api_cxx/log_compare.html
@@ -0,0 +1,43 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::log_compare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::log_compare</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+static int
+DbEnv::log_compare(const DbLsn *lsn0, const DbLsn *lsn1);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::log_compare method allows the caller to compare two
+<a href="../api_cxx/lsn_class.html">DbLsn</a> objects,
+returning 0 if they are equal, 1 if <b>lsn0</b> is greater than
+<b>lsn1</b>, and -1 if <b>lsn0</b> is less than <b>lsn1</b>.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/logc_class.html">DbLogc</a>, <a href="../api_cxx/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/log_cursor.html b/libdb/docs/api_cxx/log_cursor.html
new file mode 100644
index 0000000..63762b8
--- /dev/null
+++ b/libdb/docs/api_cxx/log_cursor.html
@@ -0,0 +1,55 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::log_cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::log_cursor</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::log_cursor(DbLogc **cursorp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::log_cursor method
+creates a log cursor and copies a pointer to it into the memory to which
+<b>cursorp</b> refers.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DbEnv::log_cursor method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::log_cursor method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv::log_cursor method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::log_cursor method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/logc_class.html">DbLogc</a>, <a href="../api_cxx/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/log_file.html b/libdb/docs/api_cxx/log_file.html
new file mode 100644
index 0000000..fe965b7
--- /dev/null
+++ b/libdb/docs/api_cxx/log_file.html
@@ -0,0 +1,69 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::log_file</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::log_file</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::log_file(const DbLsn *lsn, char *namep, size_t len);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::log_file method maps
+<a href="../api_cxx/lsn_class.html">DbLsn</a> objects
+to filenames,
+copying the name of the file containing the record named by <b>lsn</b>
+into the memory location to which <b>namep</b> refers.
+<p>The <b>len</b> argument is the length of the <b>namep</b> buffer in bytes.
+If <b>namep</b> is too short to hold the filename, DbEnv::log_file will
+return ENOMEM.
+(Log filenames are normally quite short, on the order of 10 characters.)
+<p>This mapping of
+<a href="../api_cxx/lsn_class.html">DbLsn</a> objects
+to files is needed for database administration. For example, a
+transaction manager typically records the earliest
+<a href="../api_cxx/lsn_class.html">DbLsn</a>
+needed for restart, and the database administrator may want to archive
+log files to tape when they contain only
+<a href="../api_cxx/lsn_class.html">DbLsn</a>
+entries before the earliest one needed for restart.
+<p>The DbEnv::log_file method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::log_file method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>ENOMEM<dd>The supplied buffer was too small to hold the log filename.
+</dl>
+<p>The DbEnv::log_file method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::log_file method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/logc_class.html">DbLogc</a>, <a href="../api_cxx/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/log_flush.html b/libdb/docs/api_cxx/log_flush.html
new file mode 100644
index 0000000..0022093
--- /dev/null
+++ b/libdb/docs/api_cxx/log_flush.html
@@ -0,0 +1,56 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::log_flush</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::log_flush</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::log_flush(const DbLsn *lsn);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::log_flush method guarantees that all log records whose
+<a href="../api_cxx/lsn_class.html">DbLsn</a> values
+are less than or equal to the <b>lsn</b> argument have been
+written to disk. If <b>lsn</b> is NULL, all records in the
+log are flushed.
+<p>The DbEnv::log_flush method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::log_flush method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv::log_flush method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::log_flush method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/logc_class.html">DbLogc</a>, <a href="../api_cxx/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/log_list.html b/libdb/docs/api_cxx/log_list.html
new file mode 100644
index 0000000..783d324
--- /dev/null
+++ b/libdb/docs/api_cxx/log_list.html
@@ -0,0 +1,32 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Logging Subsystem and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Logging Subsystem and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Logging Subsystem and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_cxx/log_archive.html">DbEnv::log_archive</a></td><td>List log and database files</td></tr>
+<tr><td><a href="../api_cxx/log_cursor.html">DbEnv::log_cursor</a></td><td>Create a log cursor handle</td></tr>
+<tr><td><a href="../api_cxx/log_file.html">DbEnv::log_file</a></td><td>Map Log Sequence Numbers to log files</td></tr>
+<tr><td><a href="../api_cxx/log_flush.html">DbEnv::log_flush</a></td><td>Flush log records</td></tr>
+<tr><td><a href="../api_cxx/log_put.html">DbEnv::log_put</a></td><td>Write a log record</td></tr>
+<tr><td><a href="../api_cxx/env_set_lg_bsize.html">DbEnv::set_lg_bsize</a></td><td>Set log buffer size</td></tr>
+<tr><td><a href="../api_cxx/env_set_lg_dir.html">DbEnv::set_lg_dir</a></td><td>Set the environment logging directory</td></tr>
+<tr><td><a href="../api_cxx/env_set_lg_max.html">DbEnv::set_lg_max</a></td><td>Set log file size</td></tr>
+<tr><td><a href="../api_cxx/env_set_lg_regionmax.html">DbEnv::set_lg_regionmax</a></td><td>Set logging region size</td></tr>
+<tr><td><a href="../api_cxx/log_compare.html">DbEnv::log_compare</a></td><td>Compare two Log Sequence Numbers</td></tr>
+<tr><td><a href="../api_cxx/log_stat.html">DbEnv::log_stat</a></td><td>Return log subsystem statistics</td></tr>
+<tr><td><a href="../api_cxx/logc_close.html">DbLogc::close</a></td><td>Close a log cursor</td></tr>
+<tr><td><a href="../api_cxx/logc_get.html">DbLogc::get</a></td><td>Retrieve a log record</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/log_put.html b/libdb/docs/api_cxx/log_put.html
new file mode 100644
index 0000000..1d6dc80
--- /dev/null
+++ b/libdb/docs/api_cxx/log_put.html
@@ -0,0 +1,68 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::log_put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::log_put</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::log_put(DbLsn *lsn, const Dbt *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::log_put method appends records to the log. The <a href="../api_cxx/lsn_class.html">DbLsn</a> of
+the put record is returned in the <b>lsn</b> argument. The <b>flags</b>
+argument may be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_FLUSH">DB_FLUSH</a><dd>The log is forced to disk after this record is written, guaranteeing
+that all records with <a href="../api_cxx/lsn_class.html">DbLsn</a> values less than or equal to the
+one being "put" are on disk before DbEnv::log_put returns.
+</dl>
+<p>The caller is responsible for providing any necessary structure to
+<b>data</b>. (For example, in a write-ahead logging protocol, the
+application must understand what part of <b>data</b> is an operation
+code, what part is redo information, and what part is undo information.
+In addition, most transaction managers will store in <b>data</b> the
+<a href="../api_cxx/lsn_class.html">DbLsn</a> of the previous log record for the same transaction, to
+support chaining back through the transaction's log records during
+undo.)
+<p>The DbEnv::log_put method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The <a href="../api_cxx/log_flush.html">DbEnv::log_flush</a> method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The record to be logged is larger than the maximum log record.
+</dl>
+<p>The DbEnv::log_put method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::log_put method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/logc_class.html">DbLogc</a>, <a href="../api_cxx/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/log_stat.html b/libdb/docs/api_cxx/log_stat.html
new file mode 100644
index 0000000..5437c48
--- /dev/null
+++ b/libdb/docs/api_cxx/log_stat.html
@@ -0,0 +1,93 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::log_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::log_stat</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::log_stat(DB_LOG_STAT **spp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::log_stat method returns the logging subsystem statistics.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_STAT_CLEAR">DB_STAT_CLEAR</a><dd>Reset statistics after returning their values.
+</dl>
+<p>The DbEnv::log_stat method creates a statistical structure of type
+DB_LOG_STAT and copies a pointer to it into a user-specified memory
+location.
+<p>Statistical structures are created in allocated memory. If application-specific allocation
+routines have been declared (see <a href="../api_cxx/env_set_alloc.html">DbEnv::set_alloc</a> for more
+information), they are used to allocate the memory; otherwise, the
+library <b>malloc</b>(3) interface is used. The caller is
+responsible for deallocating the memory. To deallocate the memory, free
+the memory reference; references inside the returned memory need not be
+individually freed.
+<p>The following DB_LOG_STAT fields will be filled in:
+<p><dl compact>
+<dt>u_int32_t st_magic;<dd>The magic number that identifies a file as a log file.
+<dt>u_int32_t st_version;<dd>The version of the log file type.
+<dt>int st_mode;<dd>The mode of any created log files.
+<dt>u_int32_t st_lg_bsize;<dd>The in-memory log record cache size.
+<dt>u_int32_t st_lg_size;<dd>The current log file size.
+<dt>u_int32_t st_w_mbytes;<dd>The number of megabytes written to this log.
+<dt>u_int32_t st_w_bytes;<dd>The number of bytes over and above <b>st_w_mbytes</b> written to this log.
+<dt>u_int32_t st_wc_mbytes;<dd>The number of megabytes written to this log since the last checkpoint.
+<dt>u_int32_t st_wc_bytes;<dd>The number of bytes over and above <b>st_wc_mbytes</b> written to this log
+since the last checkpoint.
+<dt>u_int32_t st_wcount;<dd>The number of times the log has been written to disk.
+<dt>u_int32_t st_wcount_fill;<dd>The number of times the log has been written to disk because the
+in-memory log record cache filled up.
+<dt>u_int32_t st_scount;<dd>The number of times the log has been flushed to disk.
+<dt>u_int32_t st_cur_file;<dd>The current log file number.
+<dt>u_int32_t st_cur_offset;<dd>The byte offset in the current log file.
+<dt>u_int32_t st_disk_file;<dd>The log file number of the last record known to be on disk.
+<dt>u_int32_t st_disk_offset;<dd>The byte offset of the last record known to be on disk.
+<dt>u_int32_t st_cur_offset;<dd>The byte offset of the last record known to be on disk.
+<dt>u_int32_t st_maxcommitperflush;<dd>The maximum number of commits contained in a single log flush.
+<dt>u_int32_t st_mincommitperflush;<dd>The minimum number of commits contained in a single log flush that
+contained a commit.]
+<dt>u_int32_t st_regsize;<dd>The size of the region.
+<dt>u_int32_t st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining the region lock.
+<dt>u_int32_t st_region_nowait;<dd>The number of times that a thread of control was able to obtain
+the region lock without waiting.
+</dl>
+<p>The DbEnv::log_stat method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::log_stat method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::log_stat method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/logc_class.html">DbLogc</a>, <a href="../api_cxx/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/logc_class.html b/libdb/docs/api_cxx/logc_class.html
new file mode 100644
index 0000000..8c7c84f
--- /dev/null
+++ b/libdb/docs/api_cxx/logc_class.html
@@ -0,0 +1,43 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbLogc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbLogc</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class DbLogc { ... };
+</pre></h3>
+<h1>Description</h1>
+<p>The DbLogc object is the handle for a cursor into the log files,
+supporting sequential access to the records stored in log files. The
+handle is not free-threaded. Once the <a href="../api_cxx/logc_close.html">DbLogc::close</a> method is called,
+the handle may not be accessed again, regardless of that method's
+return.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, DbLogc, <a href="../api_cxx/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/logc_close.html b/libdb/docs/api_cxx/logc_close.html
new file mode 100644
index 0000000..4edc265
--- /dev/null
+++ b/libdb/docs/api_cxx/logc_close.html
@@ -0,0 +1,56 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbLogc::close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbLogc::close</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbLogc::close(u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbLogc::close method discards the log cursor. After DbLogc::close
+has been called, regardless of its return, the cursor handle may not be
+used again.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DbLogc::close method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbLogc::close method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The cursor was previously closed.
+</dl>
+<p>The DbLogc::close method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbLogc::close method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/logc_class.html">DbLogc</a>, <a href="../api_cxx/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/logc_get.html b/libdb/docs/api_cxx/logc_get.html
new file mode 100644
index 0000000..27435ad
--- /dev/null
+++ b/libdb/docs/api_cxx/logc_get.html
@@ -0,0 +1,96 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbLogc::get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbLogc::get</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbLogc::get(DbLsn *lsn, Dbt *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbLogc::get method retrieve records from the log according to the
+<b>lsn</b> and <b>flags</b> arguments.
+<p>The data field of the <b>data</b> structure is set to the record
+retrieved, and the size field indicates the number of bytes in the
+record. See <a href="../api_cxx/dbt_class.html">Dbt</a> for a description of other fields in the
+<b>data</b> structure. The <a href="../api_cxx/dbt_class.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a>,
+<a href="../api_cxx/dbt_class.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a> and <a href="../api_cxx/dbt_class.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a> flags may be specified
+for any <a href="../api_cxx/dbt_class.html">Dbt</a> used for data retrieval.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_FIRST">DB_FIRST</a><dd>The first record from any of the log files found in the log directory
+is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_cxx/lsn_class.html">DbLsn</a> of the
+record returned.
+<p>
+If the log is empty, the DbLogc::get method will return DB_NOTFOUND.
+<p><dt><a name="DB_LAST">DB_LAST</a><dd>The last record in the log is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_cxx/lsn_class.html">DbLsn</a> of the
+record returned.
+<p>
+If the log is empty, the DbLogc::get method will return DB_NOTFOUND.
+<p><dt><a name="DB_NEXT">DB_NEXT</a>, <a name="DB_PREV">DB_PREV</a><dd>The current log position is advanced to the next (previous) record in
+the log, and that record is returned in the <b>data</b> argument. The
+<b>lsn</b> argument is overwritten with the <a href="../api_cxx/lsn_class.html">DbLsn</a> of the record
+returned.
+<p>If the cursor has not been initialized via DB_FIRST, DB_LAST, DB_SET,
+DB_NEXT, or DB_PREV, DbLogc::get will return the first (last) record
+in the log.
+If the last (first) log record has already been returned or the log is
+empty, the DbLogc::get method will return DB_NOTFOUND.
+If the log was opened with the DB_THREAD flag set, calls to
+DbLogc::get with the DB_NEXT (DB_PREV) flag set, the DbLogc::get method either returns EINVAL or throws an exception that encapsulates EINVAL.
+<p><dt><a name="DB_CURRENT">DB_CURRENT</a><dd>Return the log record to which the log currently refers.
+If the log cursor has not been initialized via DB_FIRST, DB_LAST,
+DB_SET, DB_NEXT, or DB_PREV, or if the log was opened with the DB_THREAD
+flag set, the DbLogc::get method either returns EINVAL or throws an exception that encapsulates EINVAL.
+<p><dt><a name="DB_SET">DB_SET</a><dd>Retrieve the record specified by the <b>lsn</b> argument.
+If the specified <a href="../api_cxx/lsn_class.html">DbLsn</a> is invalid (for example, it does not
+appear in the log), the DbLogc::get method either returns EINVAL or throws an exception that encapsulates EINVAL.
+</dl>
+<p>
+Otherwise, the DbLogc::get method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbLogc::get method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The DB_CURRENT flag was set and the log pointer had not yet been
+initialized.
+<p>The DB_SET flag was set and the specified log sequence number does not
+exist.
+</dl>
+<p>The DbLogc::get method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbLogc::get method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/logc_class.html">DbLogc</a>, <a href="../api_cxx/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/lsn_class.html b/libdb/docs/api_cxx/lsn_class.html
new file mode 100644
index 0000000..f9c171a
--- /dev/null
+++ b/libdb/docs/api_cxx/lsn_class.html
@@ -0,0 +1,41 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbLsn</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbLsn</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class DbLsn { ... };
+</pre></h3>
+<h1>Description</h1>
+<p>The DbLsn object is a <b>log sequence number</b> which
+specifies a unique location in a log file. It has no methods and
+its data may not be manipulated by an application.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/logc_class.html">DbLogc</a>, DbLsn
+<h1>See Also</h1>
+<a href="../api_cxx/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/memp_class.html b/libdb/docs/api_cxx/memp_class.html
new file mode 100644
index 0000000..00691f6
--- /dev/null
+++ b/libdb/docs/api_cxx/memp_class.html
@@ -0,0 +1,49 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbMemoryException</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMemoryException</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class DbMemoryException : public DbException {
+public:
+ Dbt *get_dbt() const;
+};
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the DbMemoryException class and how
+it is used by the various Db* classes.
+<p>A DbMemoryException is thrown when there is insufficient memory
+to complete an operation, and there is the possibility of recovering.
+An example is during a <a href="../api_cxx/db_get.html">Db::get</a> or <a href="../api_cxx/dbc_get.html">Dbc::get</a> operation with
+the <a href="../api_cxx/dbt_class.html">Dbt</a> flags set to <a href="../api_cxx/dbt_class.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a>.
+<p>The <b>get_dbt</b> method returns the <a href="../api_cxx/dbt_class.html">Dbt</a> that has insufficient
+memory to complete the operation, causing the DbMemoryException
+to be thrown.
+The <a href="../api_cxx/dbt_class.html">Dbt</a> pointer may or may not refer to valid memory, depending
+on whether the <a href="../api_cxx/dbt_class.html">Dbt</a> used in the call to the failed Berkeley DB method
+is still in scope and has not been deleted.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/memp_fclose.html b/libdb/docs/api_cxx/memp_fclose.html
new file mode 100644
index 0000000..d6a76fd
--- /dev/null
+++ b/libdb/docs/api_cxx/memp_fclose.html
@@ -0,0 +1,56 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMpoolFile::close</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbMpoolFile::close(u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile::close method closes the source file indicated by the
+<a href="../api_cxx/mempfile_class.html">DbMpoolFile</a> object. Calling DbMpoolFile::close does not imply a call
+to <a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a>; that is, no pages are written to the source file
+as as a result of calling DbMpoolFile::close.
+<p>If the <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a> was temporary, any underlying files created
+for this <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a> will be removed.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>After DbMpoolFile::close has been called, regardless of its return, the
+<a href="../api_cxx/mempfile_class.html">DbMpoolFile</a> handle may not be accessed again.
+<p>The DbMpoolFile::close method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbMpoolFile::close method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::close method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/memp_fcreate.html b/libdb/docs/api_cxx/memp_fcreate.html
new file mode 100644
index 0000000..9f86e25
--- /dev/null
+++ b/libdb/docs/api_cxx/memp_fcreate.html
@@ -0,0 +1,52 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::memp_fcreate</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::memp_fcreate</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::memp_fcreate(DbMpoolFile **dbmfp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::memp_fcreate method creates a <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a> structure that
+is the handle for a Berkeley DB shared memory buffer pool file. A pointer to
+this structure is returned in the memory to which <b>dbmfp</b> refers.
+Calling the <a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a> method will discard the returned handle.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DbEnv::memp_fcreate method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::memp_fcreate method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::memp_fcreate method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/memp_fget.html b/libdb/docs/api_cxx/memp_fget.html
new file mode 100644
index 0000000..330e125
--- /dev/null
+++ b/libdb/docs/api_cxx/memp_fget.html
@@ -0,0 +1,92 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMpoolFile::get</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbMpoolFile::get(db_pgno_t *pgnoaddr, u_int32_t flags, void **pagep);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile::get method copies a pointer to the page with the page number
+specified by <b>pgnoaddr</b>, from the source file in the <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>,
+into the memory location to which <b>pagep</b> refers. If the page
+does not exist or cannot be retrieved, DbMpoolFile::get will fail.
+<p><b>Page numbers begin at 0; that is, the first page in the file is page
+number 0, not page number 1.</b>
+<p>The returned page is <b>size_t</b> type aligned.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_MPOOL_CREATE">DB_MPOOL_CREATE</a><dd>If the specified page does not exist, create it. In this case, the
+<a href="memp_register.html#pgin">pgin</a> method, if specified, is
+called.
+<p><dt><a name="DB_MPOOL_LAST">DB_MPOOL_LAST</a><dd>Return the last page of the source file, and copy its page number into
+the memory location to which <b>pgnoaddr</b> refers.
+<p><dt><a name="DB_MPOOL_NEW">DB_MPOOL_NEW</a><dd>Create a new page in the file, and copy its page number into the memory
+location to which <b>pgnoaddr</b> refers. In this case, the
+<a href="memp_register.html#pgin">pgin</a> method, if specified, is
+<b>not</b> called.
+</dl>
+<p>The DB_MPOOL_CREATE, DB_MPOOL_LAST, and
+DB_MPOOL_NEW flags are mutually exclusive.
+<p>Fully or partially created pages have all their bytes set to a nul byte,
+unless the <a href="../api_cxx/memp_set_clear_len.html">DbMpoolFile::set_clear_len</a> method was called to specify other
+behavior before the file was opened.
+<p>All pages returned by DbMpoolFile::get will be retained (that is,
+<i>pinned</i>), in the pool until a subsequent call to
+<a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>.
+<a name="3"><!--meow--></a>
+<p>The DbMpoolFile::get method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, 0 on success, and returns DB_PAGE_NOTFOUND if the requested page does not
+exist and DB_MPOOL_CREATE was not set.
+<h1>Errors</h1>
+<p>The DbMpoolFile::get method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EAGAIN<dd>The page reference count has overflowed. (This should never happen unless
+there's a bug in the application.)
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The DB_MPOOL_NEW flag was set, and the source file was not
+opened for writing.
+<p>More than one of DB_MPOOL_CREATE, DB_MPOOL_LAST, and
+DB_MPOOL_NEW was set.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>The cache is full, and no more pages will fit in the pool.
+</dl>
+<p>The DbMpoolFile::get method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::get method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/memp_fopen.html b/libdb/docs/api_cxx/memp_fopen.html
new file mode 100644
index 0000000..16be491
--- /dev/null
+++ b/libdb/docs/api_cxx/memp_fopen.html
@@ -0,0 +1,95 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMpoolFile::open</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbMpoolFile::open(const char *file, u_int32_t flags, int mode, size_t pagesize);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile::open method opens a file in the shared memory buffer pool.
+The <b>file</b> argument is the name of the file to be opened. If
+<b>file</b> is NULL, a private temporary file is created that
+cannot be shared with any other process (although it may be shared with
+other threads).
+<p>The <b>flags</b> and <b>mode</b> arguments specify how files will be opened
+and/or created if they do not already exist.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_CREATE">DB_CREATE</a><dd>Create any underlying files, as necessary. If the files do not already
+exist and the DB_CREATE flag is not specified, the call will
+fail.
+<a name="3"><!--meow--></a>
+<p><dt><a name="DB_DIRECT">DB_DIRECT</a><dd>If set and supported by the system, turn off system buffering of the
+file to avoid double caching.
+<p><dt><a name="DB_NOMMAP">DB_NOMMAP</a><dd>Always copy this file into the local cache instead of potentially mapping
+it into process memory (see the description of the
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a> method for further information).
+<p><dt><a name="DB_ODDFILESIZE">DB_ODDFILESIZE</a><dd>Attempts to open files which are not a multiple of the page size in
+length will fail, by default. If the DB_ODDFILESIZE flag is
+set, any partial page at the end of the file will be ignored and the
+open will proceed.
+<p><dt><a name="DB_RDONLY">DB_RDONLY</a><dd>Open any underlying files for reading only. Any attempt to write the file
+using the pool functions will fail, regardless of the actual permissions
+of the file.
+</dl>
+<p>On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by
+function DbMpoolFile::open are created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and modified by the process' umask value at the time of creation
+(see <b>umask</b>(2)). If <b>mode</b> is 0, function DbMpoolFile::open will use a default
+mode of readable and writable by both owner and group. On Windows
+systems, the mode argument is ignored. The group ownership of created
+files is based on the system and directory defaults, and is not further
+specified by Berkeley DB.
+<p>The <b>pagesize</b> argument is the size, in bytes, of the unit of transfer
+between the application and the pool, although it is not necessarily the
+unit of transfer between the pool and the source file.
+<p>The DbMpoolFile::open method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbMpoolFile::open method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The file has already been entered into the pool, and the <b>pagesize</b>
+value is not the same as when the file was entered into the pool, or the
+length of the file is not zero or a multiple of the <b>pagesize</b>.
+<p>The DB_RDONLY flag was specified for an in-memory pool.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>The maximum number of open files has been reached.
+</dl>
+<p>The DbMpoolFile::open method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::open method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/memp_fput.html b/libdb/docs/api_cxx/memp_fput.html
new file mode 100644
index 0000000..61b0a6b
--- /dev/null
+++ b/libdb/docs/api_cxx/memp_fput.html
@@ -0,0 +1,74 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMpoolFile::put</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbMpoolFile::put(void *pgaddr, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile::put method indicates that the page to which <b>pgaddr</b>
+refers can be evicted from the pool. The <b>pgaddr</b> argument must
+be an address previously returned by <a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_MPOOL_CLEAN">DB_MPOOL_CLEAN</a><dd>Clear any previously set modification information (that is, don't bother
+writing the page back to the source file).
+<p><dt><a name="DB_MPOOL_DIRTY">DB_MPOOL_DIRTY</a><dd>The page has been modified and must be written to the source file before
+being evicted from the pool.
+<p><dt><a name="DB_MPOOL_DISCARD">DB_MPOOL_DISCARD</a><dd>The page is unlikely to be useful in the near future, and should be
+discarded before other pages in the pool.
+</dl>
+<p>The DB_MPOOL_CLEAN and DB_MPOOL_DIRTY flags are
+mutually exclusive.
+<p>The DbMpoolFile::put method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbMpoolFile::put method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EACCES<dd>The <a href="../api_cxx/memp_fput.html#DB_MPOOL_DIRTY">DB_MPOOL_DIRTY</a> flag was set and the source file was not
+opened for writing.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The <b>pgaddr</b> argument does not refer to a page returned by
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>.
+<p>More than one of the <a href="../api_cxx/memp_fput.html#DB_MPOOL_CLEAN">DB_MPOOL_CLEAN</a> and <a href="../api_cxx/memp_fput.html#DB_MPOOL_DIRTY">DB_MPOOL_DIRTY</a>
+flags was set.
+</dl>
+<p>The DbMpoolFile::put method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::put method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/memp_fset.html b/libdb/docs/api_cxx/memp_fset.html
new file mode 100644
index 0000000..d2cb2ab
--- /dev/null
+++ b/libdb/docs/api_cxx/memp_fset.html
@@ -0,0 +1,67 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::set</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMpoolFile::set</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbMpoolFile::set(void *pgaddr, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile::set method sets the flags associated with the page to which
+<b>pgaddr</b> refers without unpinning it from the pool. The
+<b>pgaddr</b> argument must be an address previously returned by
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_MPOOL_CLEAN">DB_MPOOL_CLEAN</a><dd>Clear any previously set modification information (that is, don't bother
+writing the page back to the source file).
+<p><dt><a name="DB_MPOOL_DIRTY">DB_MPOOL_DIRTY</a><dd>The page has been modified and must be written to the source file before
+being evicted from the pool.
+<p><dt><a name="DB_MPOOL_DISCARD">DB_MPOOL_DISCARD</a><dd>The page is unlikely to be useful in the near future, and should be
+discarded before other pages in the pool.
+</dl>
+<p>The DB_MPOOL_CLEAN and DB_MPOOL_DIRTY flags are
+mutually exclusive.
+<p>The DbMpoolFile::set method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbMpoolFile::set method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbMpoolFile::set method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::set method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/memp_fsync.html b/libdb/docs/api_cxx/memp_fsync.html
new file mode 100644
index 0000000..14f8cbe
--- /dev/null
+++ b/libdb/docs/api_cxx/memp_fsync.html
@@ -0,0 +1,53 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::sync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMpoolFile::sync</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbMpoolFile::sync();
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile::sync method writes all pages associated with the
+<a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>, which were marked as modified using
+<a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a> or <a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a>, back to the source file. If
+any of the modified pages are also <i>pinned</i> (that is, this or
+another process currently refers to them), DbMpoolFile::sync will
+ignore them.
+<p>The DbMpoolFile::sync method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbMpoolFile::sync method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::sync method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/memp_list.html b/libdb/docs/api_cxx/memp_list.html
new file mode 100644
index 0000000..2563ad9
--- /dev/null
+++ b/libdb/docs/api_cxx/memp_list.html
@@ -0,0 +1,37 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Memory Pools and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Memory Pools and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Memory Pools and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a></td><td>Set the environment cache size</td></tr>
+<tr><td><a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a></td><td>Set maximum mapped-in database file size</td></tr>
+<tr><td><a href="../api_cxx/memp_register.html">DbEnv::memp_register</a></td><td>Register input/output functions for a file in a memory pool</td></tr>
+<tr><td><a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a></td><td>Return memory pool statistics</td></tr>
+<tr><td><a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a></td><td>Flush pages from a memory pool</td></tr>
+<tr><td><a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a></td><td>Trickle flush pages from a memory pool</td></tr>
+<tr><td><a href="../api_cxx/memp_fcreate.html">DbEnv::memp_fcreate</a></td><td>Open a file in a memory pool</td></tr>
+<tr><td><a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a></td><td>Close a file in a memory pool</td></tr>
+<tr><td><a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a></td><td>Get page from a file in a memory pool</td></tr>
+<tr><td><a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a></td><td>Open a file in a memory pool</td></tr>
+<tr><td><a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a></td><td>Return a page to a memory pool</td></tr>
+<tr><td><a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a></td><td>Set memory pool page status</td></tr>
+<tr><td><a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a></td><td>Flush pages from a file in a memory pool</td></tr>
+<tr><td><a href="../api_cxx/memp_set_clear_len.html">DbMpoolFile::set_clear_len</a></td><td>Set file page bytes to be cleared</td></tr>
+<tr><td><a href="../api_cxx/memp_set_fileid.html">DbMpoolFile::set_fileid</a></td><td>Set file unique identifier</td></tr>
+<tr><td><a href="../api_cxx/memp_set_ftype.html">DbMpoolFile::set_ftype</a></td><td>Set file type</td></tr>
+<tr><td><a href="../api_cxx/memp_set_lsn_offset.html">DbMpoolFile::set_lsn_offset</a></td><td>Set file log-sequence-number offset</td></tr>
+<tr><td><a href="../api_cxx/memp_set_pgcookie.html">DbMpoolFile::set_pgcookie</a></td><td>Set file cookie for pgin/pgout</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/memp_register.html b/libdb/docs/api_cxx/memp_register.html
new file mode 100644
index 0000000..86942b4
--- /dev/null
+++ b/libdb/docs/api_cxx/memp_register.html
@@ -0,0 +1,91 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::memp_register</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::memp_register</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+extern "C" {
+ typedef int (*pgin_fcn_type)(DB_ENV *dbenv,
+ db_pgno_t pgno, void *pgaddr, DBT *pgcookie);
+ typedef int (*pgout_fcn_type)(DB_ENV *dbenv,
+ db_pgno_t pgno, void *pgaddr, DBT *pgcookie);
+};
+int
+DbEnv::memp_register(int ftype,
+ pgin_fcn_type pgin_fcn, pgout_fcn_type pgout_fcn);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::memp_register method registers page-in and page-out
+functions for files of type <b>ftype</b> in the specified pool.
+<p>If the <b>pgin_fcn</b> function is non-NULL, it is called each time
+a page is read into the memory pool from a file of type <b>ftype</b>, or
+a page is created for a file of type <b>ftype</b> (see the
+DB_MPOOL_CREATE flag for the <a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a> method).
+<p>If the <b>pgout_fcn</b> function is non-NULL, it is called each time
+a page is written to a file of type <b>ftype</b>.
+<p>Both the <b>pgin_fcn</b> and <b>pgout_fcn</b> functions are called
+with a reference to the current environment, the page number, a pointer
+to the page being read or written, and any argument <b>pgcookie</b>
+that was specified to the <a href="../api_cxx/memp_set_pgcookie.html">DbMpoolFile::set_pgcookie</a> method. The
+<b>pgin_fcn</b> and <b>pgout_fcn</b> functions should return 0 on
+success, and an applicable non-zero <b>errno</b> value on failure, in
+which case the shared memory pool interface routine (and, by extension,
+any Berkeley DB library function) calling it will also fail, returning that
+<b>errno</b> value.
+<p>The purpose of the DbEnv::memp_register function is to support processing
+when pages are entered into, or flushed from, the pool. A file type must
+be specified to make it possible for unrelated threads or processes that
+are sharing a pool, to evict each other's pages from the pool. During
+initialization, applications should call DbEnv::memp_register for each
+type of file requiring input or output processing that will be sharing
+the underlying pool. (No registry is necessary for the standard Berkeley DB
+access method types because <a href="../api_cxx/db_open.html">Db::open</a> registers them separately.)
+<p>If a thread or process does not call DbEnv::memp_register for a file
+type, it is impossible for it to evict pages for any file requiring input
+or output processing from the pool. For this reason,
+DbEnv::memp_register should always be called by each application sharing
+a pool for each type of file included in the pool, regardless of whether
+or not the application itself uses files of that type.
+<p>There are no standard values for <b>ftype</b>, <b>pgin_fcn</b>,
+<b>pgout_fcn</b>, and <b>pgcookie</b>, except that the <b>ftype</b>
+value for a file must be a non-zero positive number less than 128
+(0 and negative numbers are reserved for internal use by the Berkeley DB
+library). For this reason, applications sharing a pool must coordinate
+the values among themselves.
+<p>The DbEnv::memp_register method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::memp_register method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::memp_register method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/memp_set_clear_len.html b/libdb/docs/api_cxx/memp_set_clear_len.html
new file mode 100644
index 0000000..ae739fe
--- /dev/null
+++ b/libdb/docs/api_cxx/memp_set_clear_len.html
@@ -0,0 +1,60 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::set_clear_len</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMpoolFile::set_clear_len</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbMpoolFile::set(u_int32_t len);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile::set_clear_len method sets the number of initial bytes in a
+page that should be set to nul when the page is created as a result of
+the <a href="../api_cxx/memp_fget.html#DB_MPOOL_CREATE">DB_MPOOL_CREATE</a> or <a href="../api_cxx/memp_fget.html#DB_MPOOL_NEW">DB_MPOOL_NEW</a> flags being
+specified to <a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>. If no value is specified, or <b>len</b>
+is 0, the entire page is cleared.
+<p>The DbMpoolFile::set_clear_len method configures a file in the memory pool, not only
+operations performed using the specified <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a> handle.
+<p>The DbMpoolFile::set_clear_len interface may not be called after the <a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>
+interface is called.
+If the file is already open in the memory pool when
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a> is called, the information specified to DbMpoolFile::set_clear_len
+must be consistent with the existing file or an error will be
+returned.
+<p>The DbMpoolFile::set_clear_len method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbMpoolFile::set_clear_len method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::set_clear_len method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/memp_set_fileid.html b/libdb/docs/api_cxx/memp_set_fileid.html
new file mode 100644
index 0000000..b9d8aa8
--- /dev/null
+++ b/libdb/docs/api_cxx/memp_set_fileid.html
@@ -0,0 +1,77 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::set_fileid</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMpoolFile::set_fileid</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbMpoolFile::set(u_int8_t *fileid);
+</pre></h3>
+<h1>Description</h1>
+<p>The shared memory buffer pool functions must be able to uniquely
+identify files in order that multiple processes wanting to share a file
+will correctly identify it in the pool. The DbMpoolFile::set_fileid method
+specifies a unique identifier for the file. Unique file identifiers
+must be a DB_FILE_ID_LEN length array of bytes.
+<p>On most UNIX/POSIX systems, the <b>fileid</b> field will not need to
+be set, and the memory pool functions will use the file's device and
+inode numbers for this purpose. On Windows systems, the memory pool
+functions use the values returned by GetFileInformationByHandle() by
+default -- these values are known to be constant between processes and
+over reboot in the case of NTFS (in which they are the NTFS MFT
+indices).
+<p>On other filesystems (for example, FAT or NFS), these default values
+are not necessarily unique between processes or across system reboots.
+<b>Applications wanting to maintain a shared memory buffer pool
+between processes or across system reboots, in which the pool contains
+pages from files stored on such filesystems, must specify a unique file
+identifier using the DbMpoolFile::set_fileid method, and each process opening
+the file must provide the same unique identifier.</b>
+<p>This call should not be necessary for most applications. Specifically,
+it is not necessary if the memory pool is not shared between processes
+and is reinstantiated after each system reboot, if the application is
+using the Berkeley DB access methods instead of calling the pool functions
+explicitly, or if the files in the memory pool are stored on filesystems
+in which the default values as described previously are invariant
+between process and across system reboots.
+<p>The DbMpoolFile::set_fileid method configures a file in the memory pool, not only
+operations performed using the specified <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a> handle.
+<p>The DbMpoolFile::set_fileid interface may not be called after the <a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>
+interface is called.
+<p>The DbMpoolFile::set_fileid method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbMpoolFile::set_fileid method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::set_fileid method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/memp_set_ftype.html b/libdb/docs/api_cxx/memp_set_ftype.html
new file mode 100644
index 0000000..8d3435d
--- /dev/null
+++ b/libdb/docs/api_cxx/memp_set_ftype.html
@@ -0,0 +1,60 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::set_ftype</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMpoolFile::set_ftype</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbMpoolFile::set(int ftype);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile::set_ftype method specifies a file type for the purposes of
+input or output processing of the files pages as they are read from or
+written to, the backing filesystem store. The <b>ftype</b> argument
+must be the same as a <b>ftype</b> argument previously specified to
+the <a href="../api_cxx/memp_register.html">DbEnv::memp_register</a> method. (See the <a href="../api_cxx/memp_register.html">DbEnv::memp_register</a>
+documentation for more information.)
+<p>The DbMpoolFile::set_ftype method configures a file in the memory pool, not only
+operations performed using the specified <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a> handle.
+<p>The DbMpoolFile::set_ftype interface may not be called after the <a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>
+interface is called.
+If the file is already open in the memory pool when
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a> is called, the information specified to DbMpoolFile::set_ftype
+will replace the existing information.
+<p>The DbMpoolFile::set_ftype method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbMpoolFile::set_ftype method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::set_ftype method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/memp_set_lsn_offset.html b/libdb/docs/api_cxx/memp_set_lsn_offset.html
new file mode 100644
index 0000000..e3159e2
--- /dev/null
+++ b/libdb/docs/api_cxx/memp_set_lsn_offset.html
@@ -0,0 +1,59 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::set_lsn_offset</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMpoolFile::set_lsn_offset</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbMpoolFile::set(int32_t lsn_offset);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile::set_lsn_offset method specifies the zero-based byte offset
+of a log sequence number (<a href="../api_cxx/lsn_class.html">DbLsn</a>) on the file's pages, for the
+purposes of page-flushing as part of transaction checkpoint. (See the
+<a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a> documentation for more information.)
+<p>The DbMpoolFile::set_lsn_offset method configures a file in the memory pool, not only
+operations performed using the specified <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a> handle.
+<p>The DbMpoolFile::set_lsn_offset interface may not be called after the <a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>
+interface is called.
+If the file is already open in the memory pool when
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a> is called, the information specified to DbMpoolFile::set_lsn_offset
+must be consistent with the existing file or an error will be
+returned.
+<p>The DbMpoolFile::set_lsn_offset method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbMpoolFile::set_lsn_offset method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::set_lsn_offset method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/memp_set_pgcookie.html b/libdb/docs/api_cxx/memp_set_pgcookie.html
new file mode 100644
index 0000000..5a5c2cd
--- /dev/null
+++ b/libdb/docs/api_cxx/memp_set_pgcookie.html
@@ -0,0 +1,59 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::set_pgcookie</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMpoolFile::set_pgcookie</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbMpoolFile::set(DBT *pgcookie);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile::set_pgcookie method specifies a byte string that is provided
+to the functions registered to do input or output processing of the
+file's pages as they are read from or written to, the backing filesystem
+store. (See the <a href="../api_cxx/memp_register.html">DbEnv::memp_register</a> documentation for more
+information.)
+<p>The DbMpoolFile::set_pgcookie method configures a file in the memory pool, not only
+operations performed using the specified <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a> handle.
+<p>The DbMpoolFile::set_pgcookie interface may not be called after the <a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>
+interface is called.
+If the file is already open in the memory pool when
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a> is called, the information specified to DbMpoolFile::set_pgcookie
+will replace the existing information.
+<p>The DbMpoolFile::set_pgcookie method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbMpoolFile::set_pgcookie method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::set_pgcookie method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/memp_stat.html b/libdb/docs/api_cxx/memp_stat.html
new file mode 100644
index 0000000..fb08d70
--- /dev/null
+++ b/libdb/docs/api_cxx/memp_stat.html
@@ -0,0 +1,131 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::memp_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::memp_stat</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::memp_stat(DB_MPOOL_STAT **gsp,
+ DB_MPOOL_FSTAT *(*fsp)[], u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::memp_stat and <a href="../api_cxx/memp_stat.html">DbEnv::memp_fstat</a> methods return the memory pool
+subsystem statistics.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_STAT_CLEAR">DB_STAT_CLEAR</a><dd>Reset statistics after returning their values.
+</dl>
+<p>The DbEnv::memp_stat and <a href="../api_cxx/memp_stat.html">DbEnv::memp_fstat</a> methods create statistical
+structures of type DB_MPOOL_STAT and DB_MPOOL_FSTAT, and copy pointers
+to them into user-specified memory locations. The memory pool region
+statistics are stored in the DB_MPOOL_STAT structure and the per-file
+memory pool statistics are stored the DB_MPOOL_FSTAT structure.
+<p>Statistical structures are created in allocated memory. If application-specific allocation
+routines have been declared (see <a href="../api_cxx/env_set_alloc.html">DbEnv::set_alloc</a> for more
+information), they are used to allocate the memory; otherwise, the
+library <b>malloc</b>(3) interface is used. The caller is
+responsible for deallocating the memory. To deallocate the memory, free
+the memory reference; references inside the returned memory need not be
+individually freed.
+<p>If <b>gsp</b> is non-NULL, the global statistics for the memory pool
+<b>mp</b> are copied into the memory location to which it refers. The
+following DB_MPOOL_STAT fields will be filled in:
+<p><dl compact>
+<dt>size_t st_gbytes;<dd>Gigabytes of cache (total cache size is st_gbytes + st_bytes).
+<dt>size_t st_bytes;<dd>Bytes of cache (total cache size is st_gbytes + st_bytes).
+<dt>u_int32_t st_ncache;<dd>Number of caches.
+<dt>u_int32_t st_regsize;<dd>Individual cache size.
+<dt>u_int32_t st_map;<dd>Requested pages mapped into the process' address space (there is no
+available information about whether or not this request caused disk I/O,
+although examining the application page fault rate may be helpful).
+<dt>u_int32_t st_cache_hit;<dd>Requested pages found in the cache.
+<dt>u_int32_t st_cache_miss;<dd>Requested pages not found in the cache.
+<dt>u_int32_t st_page_create;<dd>Pages created in the cache.
+<dt>u_int32_t st_page_in;<dd>Pages read into the cache.
+<dt>u_int32_t st_page_out;<dd>Pages written from the cache to the backing file.
+<dt>u_int32_t st_ro_evict;<dd>Clean pages forced from the cache.
+<dt>u_int32_t st_rw_evict;<dd>Dirty pages forced from the cache.
+<dt>u_int32_t st_page_trickle;<dd>Dirty pages written using the <a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a> interface.
+<dt>u_int32_t st_pages;<dd>Pages in the cache.
+<dt>u_int32_t st_page_clean;<dd>Clean pages currently in the cache.
+<dt>u_int32_t st_page_dirty;<dd>Dirty pages currently in the cache.
+<dt>u_int32_t st_hash_buckets;<dd>Number of hash buckets in buffer hash table.
+<dt>u_int32_t st_hash_searches;<dd>Total number of buffer hash table lookups.
+<dt>u_int32_t st_hash_longest;<dd>The longest chain ever encountered in buffer hash table lookups.
+<dt>u_int32_t st_hash_examined;<dd>Total number of hash elements traversed during hash table lookups.
+<dt>u_int32_t st_hash_nowait;<dd>The number of times that a thread of control was able to obtain a hash
+bucket lock without waiting.
+<dt>u_int32_t st_hash_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining a hash bucket lock.
+<dt>u_int32_t st_hash_max_wait;<dd>The maximum number of times any hash bucket lock was waited for by a
+thread of control.
+<dt>u_int32_t st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining a region lock.
+<dt>u_int32_t st_region_nowait;<dd>The number of times that a thread of control was able to obtain a region
+lock without waiting.
+<dt>u_int32_t st_alloc;<dd>Number of page allocations.
+<dt>u_int32_t st_alloc_buckets;<dd>Number of hash buckets checked during allocation.
+<dt>u_int32_t st_alloc_max_buckets;<dd>Maximum number of hash buckets checked during an allocation.
+<dt>u_int32_t st_alloc_pages;<dd>Number of pages checked during allocation.
+<dt>u_int32_t st_alloc_max_pages;<dd>Maximum number of pages checked during an allocation.
+</dl>
+<p>If <b>fsp</b> is non-NULL, a pointer to a NULL-terminated
+variable length array of statistics for individual files, in the memory
+pool <b>mp</b>, is copied into the memory location to which it refers.
+If no individual files currently exist in the memory pool, <b>fsp</b>
+will be set to NULL.
+<p>The per-file statistics are stored in structures of type DB_MPOOL_FSTAT.
+The following DB_MPOOL_FSTAT fields will be filled in for each file in
+the pool; that is, each element of the array:
+<p><dl compact>
+<dt>char *file_name;<dd>The name of the file.
+<dt>size_t st_pagesize;<dd>Page size in bytes.
+<dt>u_int32_t st_cache_hit;<dd>Requested pages found in the cache.
+<dt>u_int32_t st_cache_miss;<dd>Requested pages not found in the cache.
+<dt>u_int32_t st_map;<dd>Requested pages mapped into the process' address space.
+<dt>u_int32_t st_page_create;<dd>Pages created in the cache.
+<dt>u_int32_t st_page_in;<dd>Pages read into the cache.
+<dt>u_int32_t st_page_out;<dd>Pages written from the cache to the backing file.
+</dl>
+<p>The DbEnv::memp_stat method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::memp_stat method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv::memp_stat method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::memp_stat method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/memp_sync.html b/libdb/docs/api_cxx/memp_sync.html
new file mode 100644
index 0000000..c36e0a9
--- /dev/null
+++ b/libdb/docs/api_cxx/memp_sync.html
@@ -0,0 +1,68 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::memp_sync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::memp_sync</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::memp_sync(DbLsn *lsn);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::memp_sync method ensures that any modified pages in the pool with
+a log sequence number (<a href="../api_cxx/lsn_class.html">DbLsn</a>) less than the <b>lsn</b> argument
+are written to disk. The purpose of the <b>lsn</b> argument is to
+enable a transaction manager to ensure, as part of a checkpoint, that
+all pages modified by a certain time have been written to disk. Pages
+in the pool that cannot be immediately written back to disk (for
+example, pages that are currently in use by another thread of control)
+are waited for and written to disk as soon as it is possible to do so.
+If <b>lsn</b> is NULL, all modified pages in the pool are written
+to disk.
+<p>To support the DbEnv::memp_sync functionality, it is necessary that the
+pool functions know the location of the log sequence number on the page
+for each file type. This location should be specified when the file is
+opened using the <a href="../api_cxx/memp_set_lsn_offset.html">DbMpoolFile::set_lsn_offset</a> method. It is not required that
+the log sequence number be aligned on the page in any way.
+<p>The DbEnv::memp_sync method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::memp_sync method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The DbEnv::memp_sync function was called without logging having been
+initialized in the environment.
+</dl>
+<p>The DbEnv::memp_sync method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::memp_sync method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/memp_trickle.html b/libdb/docs/api_cxx/memp_trickle.html
new file mode 100644
index 0000000..f882f02
--- /dev/null
+++ b/libdb/docs/api_cxx/memp_trickle.html
@@ -0,0 +1,60 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::memp_trickle</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::memp_trickle</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::memp_trickle(int pct, int *nwrotep);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::memp_trickle method ensures that at least <b>pct</b> percent of
+the pages in the shared memory pool are clean by writing dirty pages to
+their backing files.
+If the <b>nwrotep</b> argument is non-NULL, the number of pages that
+were written to reach the correct percentage is returned in the memory
+location to which it refers.
+<p>The purpose of the DbEnv::memp_trickle function is to enable a memory
+pool manager to ensure that a page is always available for reading in new
+information without having to wait for a write.
+<p>The DbEnv::memp_trickle method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::memp_trickle method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv::memp_trickle method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::memp_trickle method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/mempfile_class.html b/libdb/docs/api_cxx/mempfile_class.html
new file mode 100644
index 0000000..5d02c32
--- /dev/null
+++ b/libdb/docs/api_cxx/mempfile_class.html
@@ -0,0 +1,70 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMpoolFile</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class DbMpoolFile {
+public:
+ DB_MPOOLFILE *DbMpoolFile::get_DB_MPOOLFILE();
+ const DB_MPOOLFILE *DbMpoolFile::get_const_DB_MPOOLFILE() const;
+ ...
+};
+</pre></h3>
+<h1>Description</h1>
+<p>The memory pool interfaces for the Berkeley DB database environment are
+methods of the <a href="../api_cxx/env_class.html">DbEnv</a> handle. The <a href="../api_cxx/env_class.html">DbEnv</a> memory pool
+methods and the DbMpoolFile class provide general-purpose,
+page-oriented buffer management of files. Although designed to work
+with the other <a href="../api_cxx/db_class.html">Db</a> classes, they are also useful for more general
+purposes. The memory pools are referred to in this document as simply
+<i>pools</i>.
+<p>Pools may be shared between processes. Pools are usually filled by
+pages from one or more files. Pages in the pool are replaced in LRU
+(least-recently-used) order, with each new page replacing the page that
+has been unused the longest. Pages retrieved from the pool using
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a> are <i>pinned</i> in the pool until they are
+returned to the control of the buffer pool using the <a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>
+method.
+<p>The DbMpoolFile object is the handle for a file in the memory
+pool. The handle is not free-threaded. Once the <a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a> method
+is called, the handle may not be accessed again, regardless of that
+method's return.
+<p>Each DbMpoolFile object has an associated DB_MPOOLFILE
+structure, which is used by the underlying implementation of Berkeley DB and
+its C-language API. The DbMpoolFile::get_DB_MPOOLFILE method returns
+a pointer to this struct. Given a const DbMpoolFile object,
+DbTxn::get_const_DB_MPOOLFILE returns a const pointer to the same
+struct.
+<p>These methods may be useful for Berkeley DB applications including both C
+and C++ language software. It should not be necessary to use these
+calls in a purely C++ application.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, DbMpoolFile
+<h1>See Also</h1>
+<a href="../api_cxx/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/pindex.src b/libdb/docs/api_cxx/pindex.src
new file mode 100644
index 0000000..17a2620
--- /dev/null
+++ b/libdb/docs/api_cxx/pindex.src
@@ -0,0 +1,416 @@
+__APIREL__/api_cxx/db_associate.html__OCT__2 @Db::associate
+__APIREL__/api_cxx/db_associate.html__OCT__3 @DB_DBT_APPMALLOC
+__APIREL__/api_cxx/db_associate.html__OCT__4 @DB_DONOTINDEX
+__APIREL__/api_cxx/db_associate.html__OCT__DB_CREATE Db::associate@DB_CREATE
+__APIREL__/api_cxx/db_associate.html__OCT__DB_AUTO_COMMIT Db::associate@DB_AUTO_COMMIT
+__APIREL__/api_cxx/db_class.html__OCT__2 @Db
+__APIREL__/api_cxx/db_class.html__OCT__DB_CXX_NO_EXCEPTIONS Db@DB_CXX_NO_EXCEPTIONS
+__APIREL__/api_cxx/db_class.html__OCT__DB_XA_CREATE Db@DB_XA_CREATE
+__APIREL__/api_cxx/db_close.html__OCT__2 @Db::close
+__APIREL__/api_cxx/db_close.html__OCT__DB_NOSYNC Db::close@DB_NOSYNC
+__APIREL__/api_cxx/db_cursor.html__OCT__2 @Db::cursor
+__APIREL__/api_cxx/db_cursor.html__OCT__DB_DIRTY_READ Db::cursor@DB_DIRTY_READ
+__APIREL__/api_cxx/db_cursor.html__OCT__DB_WRITECURSOR Db::cursor@DB_WRITECURSOR
+__APIREL__/api_cxx/db_del.html__OCT__2 @Db::del
+__APIREL__/api_cxx/db_del.html__OCT__DB_AUTO_COMMIT Db::del@DB_AUTO_COMMIT
+__APIREL__/api_cxx/db_err.html__OCT__2 @Db::err
+__APIREL__/api_cxx/db_fd.html__OCT__2 @Db::fd
+__APIREL__/api_cxx/db_get.html__OCT__2 @Db::get
+__APIREL__/api_cxx/db_get.html__OCT__DB_CONSUME Db::get@DB_CONSUME
+__APIREL__/api_cxx/db_get.html__OCT__DB_CONSUME_WAIT Db::get@DB_CONSUME_WAIT
+__APIREL__/api_cxx/db_get.html__OCT__DB_GET_BOTH Db::get@DB_GET_BOTH
+__APIREL__/api_cxx/db_get.html__OCT__DB_SET_RECNO Db::get@DB_SET_RECNO
+__APIREL__/api_cxx/db_get.html__OCT__DB_DIRTY_READ Db::get@DB_DIRTY_READ
+__APIREL__/api_cxx/db_get.html__OCT__DB_MULTIPLE Db::get@DB_MULTIPLE
+__APIREL__/api_cxx/db_get.html__OCT__DB_RMW Db::get@DB_RMW
+__APIREL__/api_cxx/db_get_byteswapped.html__OCT__2 @Db::get_byteswapped
+__APIREL__/api_cxx/db_get_type.html__OCT__2 @Db::get_type
+__APIREL__/api_cxx/db_join.html__OCT__2 @Db::join
+__APIREL__/api_cxx/db_join.html__OCT__DB_JOIN_NOSORT Db::join@DB_JOIN_NOSORT
+__APIREL__/api_cxx/db_join.html__OCT__DB_JOIN_ITEM Db::join@DB_JOIN_ITEM
+__APIREL__/api_cxx/db_join.html__OCT__DB_DIRTY_READ Db::join@DB_DIRTY_READ
+__APIREL__/api_cxx/db_join.html__OCT__DB_RMW Db::join@DB_RMW
+__APIREL__/api_cxx/db_key_range.html__OCT__2 @Db::key_range
+__APIREL__/api_cxx/db_open.html__OCT__2 @Db::open
+__APIREL__/api_cxx/db_open.html__OCT__DB_BTREE Db::open@DB_BTREE
+__APIREL__/api_cxx/db_open.html__OCT__DB_HASH Db::open@DB_HASH
+__APIREL__/api_cxx/db_open.html__OCT__DB_QUEUE Db::open@DB_QUEUE
+__APIREL__/api_cxx/db_open.html__OCT__DB_RECNO Db::open@DB_RECNO
+__APIREL__/api_cxx/db_open.html__OCT__DB_UNKNOWN Db::open@DB_UNKNOWN
+__APIREL__/api_cxx/db_open.html__OCT__DB_AUTO_COMMIT Db::open@DB_AUTO_COMMIT
+__APIREL__/api_cxx/db_open.html__OCT__DB_CREATE Db::open@DB_CREATE
+__APIREL__/api_cxx/db_open.html__OCT__DB_DIRTY_READ Db::open@DB_DIRTY_READ
+__APIREL__/api_cxx/db_open.html__OCT__DB_EXCL Db::open@DB_EXCL
+__APIREL__/api_cxx/db_open.html__OCT__DB_NOMMAP Db::open@DB_NOMMAP
+__APIREL__/api_cxx/db_open.html__OCT__DB_RDONLY Db::open@DB_RDONLY
+__APIREL__/api_cxx/db_open.html__OCT__DB_THREAD Db::open@DB_THREAD
+__APIREL__/api_cxx/db_open.html__OCT__DB_TRUNCATE Db::open@DB_TRUNCATE
+__APIREL__/api_cxx/db_open.html__OCT__DB_OLD_VERSION Db::open@DB_OLD_VERSION
+__APIREL__/api_cxx/db_put.html__OCT__2 @Db::put
+__APIREL__/api_cxx/db_put.html__OCT__DB_APPEND Db::put@DB_APPEND
+__APIREL__/api_cxx/db_put.html__OCT__DB_NODUPDATA Db::put@DB_NODUPDATA
+__APIREL__/api_cxx/db_put.html__OCT__DB_NOOVERWRITE Db::put@DB_NOOVERWRITE
+__APIREL__/api_cxx/db_put.html__OCT__DB_AUTO_COMMIT Db::put@DB_AUTO_COMMIT
+__APIREL__/api_cxx/db_remove.html__OCT__2 @Db::remove
+__APIREL__/api_cxx/db_rename.html__OCT__2 @Db::rename
+__APIREL__/api_cxx/db_set_append_recno.html__OCT__2 @Db::set_append_recno
+__APIREL__/api_cxx/db_set_bt_compare.html__OCT__2 @Db::set_bt_compare
+__APIREL__/api_cxx/db_set_bt_minkey.html__OCT__2 @Db::set_bt_minkey
+__APIREL__/api_cxx/db_set_bt_prefix.html__OCT__2 @Db::set_bt_prefix
+__APIREL__/api_cxx/db_set_cache_priority.html__OCT__2 @Db::set_cache_priority
+__APIREL__/api_cxx/db_set_cache_priority.html__OCT__DB_PRIORITY_VERY_LOW Db::set_cache_priority@DB_PRIORITY_VERY_LOW
+__APIREL__/api_cxx/db_set_cache_priority.html__OCT__DB_PRIORITY_LOW Db::set_cache_priority@DB_PRIORITY_LOW
+__APIREL__/api_cxx/db_set_cache_priority.html__OCT__DB_PRIORITY_DEFAULT Db::set_cache_priority@DB_PRIORITY_DEFAULT
+__APIREL__/api_cxx/db_set_cache_priority.html__OCT__DB_PRIORITY_HIGH Db::set_cache_priority@DB_PRIORITY_HIGH
+__APIREL__/api_cxx/db_set_cache_priority.html__OCT__DB_PRIORITY_VERY_HIGH Db::set_cache_priority@DB_PRIORITY_VERY_HIGH
+__APIREL__/api_cxx/db_set_cachesize.html__OCT__2 @Db::set_cachesize
+__APIREL__/api_cxx/db_set_dup_compare.html__OCT__2 @Db::set_dup_compare
+__APIREL__/api_cxx/db_set_encrypt.html__OCT__2 @Db::set_encrypt
+__APIREL__/api_cxx/db_set_encrypt.html__OCT__DB_ENCRYPT_AES Db::set_encrypt@DB_ENCRYPT_AES
+__APIREL__/api_cxx/db_set_errcall.html__OCT__2 @Db::set_errcall
+__APIREL__/api_cxx/db_set_errpfx.html__OCT__2 @Db::set_errpfx
+__APIREL__/api_cxx/db_set_feedback.html__OCT__2 @Db::set_feedback
+__APIREL__/api_cxx/db_set_feedback.html__OCT__DB_UPGRADE Db::set_feedback@DB_UPGRADE
+__APIREL__/api_cxx/db_set_feedback.html__OCT__DB_VERIFY Db::set_feedback@DB_VERIFY
+__APIREL__/api_cxx/db_set_flags.html__OCT__2 @Db::set_flags
+__APIREL__/api_cxx/db_set_flags.html__OCT__3 database page @checksum
+__APIREL__/api_cxx/db_set_flags.html__OCT__DB_CHKSUM_SHA1 Db::set_flags@DB_CHKSUM_SHA1
+__APIREL__/api_cxx/db_set_flags.html__OCT__4 database @encryption
+__APIREL__/api_cxx/db_set_flags.html__OCT__DB_ENCRYPT Db::set_flags@DB_ENCRYPT
+__APIREL__/api_cxx/db_set_flags.html__OCT__5 @duplicate data items
+__APIREL__/api_cxx/db_set_flags.html__OCT__DB_DUP Db::set_flags@DB_DUP
+__APIREL__/api_cxx/db_set_flags.html__OCT__6 sorted @duplicate data items
+__APIREL__/api_cxx/db_set_flags.html__OCT__DB_DUPSORT Db::set_flags@DB_DUPSORT
+__APIREL__/api_cxx/db_set_flags.html__OCT__7 accessing Btree records by @record number
+__APIREL__/api_cxx/db_set_flags.html__OCT__DB_RECNUM Db::set_flags@DB_RECNUM
+__APIREL__/api_cxx/db_set_flags.html__OCT__8 turn off @reverse splits in Btree databases
+__APIREL__/api_cxx/db_set_flags.html__OCT__9 turn off reverse @splits in Btree databases
+__APIREL__/api_cxx/db_set_flags.html__OCT__DB_REVSPLITOFF Db::set_flags@DB_REVSPLITOFF
+__APIREL__/api_cxx/db_set_flags.html__OCT__DB_DUP Db::set_flags@DB_DUP
+__APIREL__/api_cxx/db_set_flags.html__OCT__DB_DUPSORT Db::set_flags@DB_DUPSORT
+__APIREL__/api_cxx/db_set_flags.html__OCT__10 @renumbering records in Recno databases
+__APIREL__/api_cxx/db_set_flags.html__OCT__DB_RENUMBER Db::set_flags@DB_RENUMBER
+__APIREL__/api_cxx/db_set_flags.html__OCT__11 pre-loading @text files into Recno databases
+__APIREL__/api_cxx/db_set_flags.html__OCT__DB_SNAPSHOT Db::set_flags@DB_SNAPSHOT
+__APIREL__/api_cxx/db_set_h_ffactor.html__OCT__2 @Db::set_h_ffactor
+__APIREL__/api_cxx/db_set_h_hash.html__OCT__2 @Db::set_h_hash
+__APIREL__/api_cxx/db_set_h_nelem.html__OCT__2 @Db::set_h_nelem
+__APIREL__/api_cxx/db_set_lorder.html__OCT__2 @Db::set_lorder
+__APIREL__/api_cxx/db_set_pagesize.html__OCT__2 @Db::set_pagesize
+__APIREL__/api_cxx/db_set_q_extentsize.html__OCT__2 @Db::set_q_extentsize
+__APIREL__/api_cxx/db_set_re_delim.html__OCT__2 @Db::set_re_delim
+__APIREL__/api_cxx/db_set_re_len.html__OCT__2 @Db::set_re_len
+__APIREL__/api_cxx/db_set_re_pad.html__OCT__2 @Db::set_re_pad
+__APIREL__/api_cxx/db_set_re_source.html__OCT__2 @Db::set_re_source
+__APIREL__/api_cxx/db_stat.html__OCT__2 @Db::stat
+__APIREL__/api_cxx/db_stat.html__OCT__DB_FAST_STAT Db::stat@DB_FAST_STAT
+__APIREL__/api_cxx/db_sync.html__OCT__2 @Db::sync
+__APIREL__/api_cxx/db_truncate.html__OCT__2 @Db::truncate
+__APIREL__/api_cxx/db_truncate.html__OCT__DB_AUTO_COMMIT Db::truncate@DB_AUTO_COMMIT
+__APIREL__/api_cxx/db_upgrade.html__OCT__2 @Db::upgrade
+__APIREL__/api_cxx/db_upgrade.html__OCT__DB_DUPSORT Db::upgrade@DB_DUPSORT
+__APIREL__/api_cxx/db_upgrade.html__OCT__DB_OLD_VERSION Db::upgrade@DB_OLD_VERSION
+__APIREL__/api_cxx/db_verify.html__OCT__2 @Db::verify
+__APIREL__/api_cxx/db_verify.html__OCT__DB_SALVAGE Db::verify@DB_SALVAGE
+__APIREL__/api_cxx/db_verify.html__OCT__DB_AGGRESSIVE Db::verify@DB_AGGRESSIVE
+__APIREL__/api_cxx/db_verify.html__OCT__DB_PRINTABLE Db::verify@DB_PRINTABLE
+__APIREL__/api_cxx/db_verify.html__OCT__DB_NOORDERCHK Db::verify@DB_NOORDERCHK
+__APIREL__/api_cxx/db_verify.html__OCT__DB_ORDERCHKONLY Db::verify@DB_ORDERCHKONLY
+__APIREL__/api_cxx/db_verify.html__OCT__3 @DB_VERIFY_BAD
+__APIREL__/api_cxx/dbt_class.html__OCT__2 @Dbt
+__APIREL__/api_cxx/dbt_class.html__OCT__3 @key/data pairs
+__APIREL__/api_cxx/dbt_class.html__OCT__data Dbt@data
+__APIREL__/api_cxx/dbt_class.html__OCT__DB_DBT_MALLOC Dbt@DB_DBT_MALLOC
+__APIREL__/api_cxx/dbt_class.html__OCT__DB_DBT_REALLOC Dbt@DB_DBT_REALLOC
+__APIREL__/api_cxx/dbt_class.html__OCT__DB_DBT_USERMEM Dbt@DB_DBT_USERMEM
+__APIREL__/api_cxx/dbt_class.html__OCT__DB_DBT_PARTIAL Dbt@DB_DBT_PARTIAL
+__APIREL__/api_cxx/db_set_alloc.html__OCT__2 @Db::set_alloc
+__APIREL__/api_cxx/db_set_errfile.html__OCT__2 @Db::set_errfile
+__APIREL__/api_cxx/db_set_error_stream.html__OCT__2 @Db::set_error_stream
+__APIREL__/api_cxx/db_set_paniccall.html__OCT__2 @Db::set_paniccall
+__APIREL__/api_cxx/dbt_bulk.html__OCT__2 @DBT
+__APIREL__/api_cxx/dbt_bulk.html__OCT__3 @bulk retrieval
+__APIREL__/api_cxx/dbt_bulk.html__OCT__DB_MULTIPLE_INIT DBT@DB_MULTIPLE_INIT
+__APIREL__/api_cxx/dbt_bulk.html__OCT__DB_MULTIPLE_NEXT DBT@DB_MULTIPLE_NEXT
+__APIREL__/api_cxx/dbt_bulk.html__OCT__DB_MULTIPLE_KEY_NEXT DBT@DB_MULTIPLE_KEY_NEXT
+__APIREL__/api_cxx/dbt_bulk.html__OCT__DB_MULTIPLE_RECNO_NEXT DBT@DB_MULTIPLE_RECNO_NEXT
+__APIREL__/api_cxx/dbc_class.html__OCT__2 @Dbc
+__APIREL__/api_cxx/dbc_close.html__OCT__2 @Dbc::close
+__APIREL__/api_cxx/dbc_count.html__OCT__2 @Dbc::count
+__APIREL__/api_cxx/dbc_del.html__OCT__2 @Dbc::del
+__APIREL__/api_cxx/dbc_dup.html__OCT__2 @Dbc::dup
+__APIREL__/api_cxx/dbc_dup.html__OCT__DB_POSITION Dbc::dup@DB_POSITION
+__APIREL__/api_cxx/dbc_get.html__OCT__2 @Dbc::get
+__APIREL__/api_cxx/dbc_get.html__OCT__DB_CURRENT Dbc::get@DB_CURRENT
+__APIREL__/api_cxx/dbc_get.html__OCT__DB_FIRST Dbc::get@DB_FIRST
+__APIREL__/api_cxx/dbc_get.html__OCT__DB_LAST Dbc::get@DB_LAST
+__APIREL__/api_cxx/dbc_get.html__OCT__DB_GET_BOTH Dbc::get@DB_GET_BOTH
+__APIREL__/api_cxx/dbc_get.html__OCT__DB_GET_BOTH_RANGE Dbc::get@DB_GET_BOTH_RANGE
+__APIREL__/api_cxx/dbc_get.html__OCT__DB_GET_RECNO Dbc::get@DB_GET_RECNO
+__APIREL__/api_cxx/dbc_get.html__OCT__DB_JOIN_ITEM Dbc::get@DB_JOIN_ITEM
+__APIREL__/api_cxx/dbc_get.html__OCT__DB_NEXT Dbc::get@DB_NEXT
+__APIREL__/api_cxx/dbc_get.html__OCT__DB_PREV Dbc::get@DB_PREV
+__APIREL__/api_cxx/dbc_get.html__OCT__DB_NEXT_DUP Dbc::get@DB_NEXT_DUP
+__APIREL__/api_cxx/dbc_get.html__OCT__DB_NEXT_NODUP Dbc::get@DB_NEXT_NODUP
+__APIREL__/api_cxx/dbc_get.html__OCT__DB_PREV_NODUP Dbc::get@DB_PREV_NODUP
+__APIREL__/api_cxx/dbc_get.html__OCT__DB_SET Dbc::get@DB_SET
+__APIREL__/api_cxx/dbc_get.html__OCT__DB_SET_RANGE Dbc::get@DB_SET_RANGE
+__APIREL__/api_cxx/dbc_get.html__OCT__DB_SET_RECNO Dbc::get@DB_SET_RECNO
+__APIREL__/api_cxx/dbc_get.html__OCT__DB_DIRTY_READ Dbc::get@DB_DIRTY_READ
+__APIREL__/api_cxx/dbc_get.html__OCT__DB_MULTIPLE Dbc::get@DB_MULTIPLE
+__APIREL__/api_cxx/dbc_get.html__OCT__DB_MULTIPLE_KEY Dbc::get@DB_MULTIPLE_KEY
+__APIREL__/api_cxx/dbc_get.html__OCT__DB_RMW Dbc::get@DB_RMW
+__APIREL__/api_cxx/dbc_put.html__OCT__2 @Dbc::put
+__APIREL__/api_cxx/dbc_put.html__OCT__DB_AFTER Dbc::put@DB_AFTER
+__APIREL__/api_cxx/dbc_put.html__OCT__DB_BEFORE Dbc::put@DB_BEFORE
+__APIREL__/api_cxx/dbc_put.html__OCT__DB_CURRENT Dbc::put@DB_CURRENT
+__APIREL__/api_cxx/dbc_put.html__OCT__DB_KEYFIRST Dbc::put@DB_KEYFIRST
+__APIREL__/api_cxx/dbc_put.html__OCT__DB_KEYLAST Dbc::put@DB_KEYLAST
+__APIREL__/api_cxx/dbc_put.html__OCT__DB_NODUPDATA Dbc::put@DB_NODUPDATA
+__APIREL__/api_cxx/except_class.html__OCT__2 @DbException
+__APIREL__/api_cxx/runrec_class.html__OCT__2 @DbRunRecoveryException
+__APIREL__/api_cxx/env_class.html__OCT__2 @DbEnv
+__APIREL__/api_cxx/env_class.html__OCT__DB_CLIENT DbEnv@DB_CLIENT
+__APIREL__/api_cxx/env_class.html__OCT__DB_CXX_NO_EXCEPTIONS DbEnv@DB_CXX_NO_EXCEPTIONS
+__APIREL__/api_cxx/env_close.html__OCT__2 @DbEnv::close
+__APIREL__/api_cxx/env_dbremove.html__OCT__2 @DbEnv::dbremove
+__APIREL__/api_cxx/env_dbremove.html__OCT__DB_AUTO_COMMIT DbEnv::dbremove@DB_AUTO_COMMIT
+__APIREL__/api_cxx/env_dbrename.html__OCT__2 @DbEnv::dbrename
+__APIREL__/api_cxx/env_dbrename.html__OCT__DB_AUTO_COMMIT DbEnv::dbrename@DB_AUTO_COMMIT
+__APIREL__/api_cxx/env_err.html__OCT__2 @DbEnv::err
+__APIREL__/api_cxx/env_open.html__OCT__2 @DbEnv::open
+__APIREL__/api_cxx/env_open.html__OCT__DB_JOINENV DbEnv::open@DB_JOINENV
+__APIREL__/api_cxx/env_open.html__OCT__DB_INIT_CDB DbEnv::open@DB_INIT_CDB
+__APIREL__/api_cxx/env_open.html__OCT__DB_INIT_LOCK DbEnv::open@DB_INIT_LOCK
+__APIREL__/api_cxx/env_open.html__OCT__DB_INIT_LOG DbEnv::open@DB_INIT_LOG
+__APIREL__/api_cxx/env_open.html__OCT__DB_INIT_MPOOL DbEnv::open@DB_INIT_MPOOL
+__APIREL__/api_cxx/env_open.html__OCT__DB_INIT_TXN DbEnv::open@DB_INIT_TXN
+__APIREL__/api_cxx/env_open.html__OCT__DB_RECOVER DbEnv::open@DB_RECOVER
+__APIREL__/api_cxx/env_open.html__OCT__DB_RECOVER_FATAL DbEnv::open@DB_RECOVER_FATAL
+__APIREL__/api_cxx/env_open.html__OCT__3 use @environment variables in naming
+__APIREL__/api_cxx/env_open.html__OCT__DB_USE_ENVIRON DbEnv::open@DB_USE_ENVIRON
+__APIREL__/api_cxx/env_open.html__OCT__DB_USE_ENVIRON_ROOT DbEnv::open@DB_USE_ENVIRON_ROOT
+__APIREL__/api_cxx/env_open.html__OCT__DB_CREATE DbEnv::open@DB_CREATE
+__APIREL__/api_cxx/env_open.html__OCT__DB_LOCKDOWN DbEnv::open@DB_LOCKDOWN
+__APIREL__/api_cxx/env_open.html__OCT__DB_PRIVATE DbEnv::open@DB_PRIVATE
+__APIREL__/api_cxx/env_open.html__OCT__DB_SYSTEM_MEM DbEnv::open@DB_SYSTEM_MEM
+__APIREL__/api_cxx/env_open.html__OCT__DB_THREAD DbEnv::open@DB_THREAD
+__APIREL__/api_cxx/env_remove.html__OCT__2 @DbEnv::remove
+__APIREL__/api_cxx/env_remove.html__OCT__DB_FORCE DbEnv::remove@DB_FORCE
+__APIREL__/api_cxx/env_remove.html__OCT__3 use @environment variables in naming
+__APIREL__/api_cxx/env_remove.html__OCT__DB_USE_ENVIRON DbEnv::remove@DB_USE_ENVIRON
+__APIREL__/api_cxx/env_remove.html__OCT__DB_USE_ENVIRON_ROOT DbEnv::remove@DB_USE_ENVIRON_ROOT
+__APIREL__/api_cxx/env_set_app_dispatch.html__OCT__2 @DbEnv::set_app_dispatch
+__APIREL__/api_cxx/env_set_app_dispatch.html__OCT__DB_TXN_BACKWARD_ROLL DbEnv::set_app_dispatch@DB_TXN_BACKWARD_ROLL
+__APIREL__/api_cxx/env_set_app_dispatch.html__OCT__DB_TXN_FORWARD_ROLL DbEnv::set_app_dispatch@DB_TXN_FORWARD_ROLL
+__APIREL__/api_cxx/env_set_app_dispatch.html__OCT__DB_TXN_ABORT DbEnv::set_app_dispatch@DB_TXN_ABORT
+__APIREL__/api_cxx/env_set_app_dispatch.html__OCT__DB_TXN_APPLY DbEnv::set_app_dispatch@DB_TXN_APPLY
+__APIREL__/api_cxx/env_set_app_dispatch.html__OCT__DB_TXN_PRINT DbEnv::set_app_dispatch@DB_TXN_PRINT
+__APIREL__/api_cxx/env_set_cachesize.html__OCT__2 @DbEnv::set_cachesize
+__APIREL__/api_cxx/env_set_data_dir.html__OCT__2 @DbEnv::set_data_dir
+__APIREL__/api_cxx/env_set_encrypt.html__OCT__2 @DbEnv::set_encrypt
+__APIREL__/api_cxx/env_set_encrypt.html__OCT__DB_ENCRYPT_AES DbEnv::set_encrypt@DB_ENCRYPT_AES
+__APIREL__/api_cxx/env_set_errcall.html__OCT__2 @DbEnv::set_errcall
+__APIREL__/api_cxx/env_set_errpfx.html__OCT__2 @DbEnv::set_errpfx
+__APIREL__/api_cxx/env_set_feedback.html__OCT__2 @DbEnv::set_feedback
+__APIREL__/api_cxx/env_set_feedback.html__OCT__DB_RECOVER DbEnv::set_feedback@DB_RECOVER
+__APIREL__/api_cxx/env_set_flags.html__OCT__2 @DbEnv::set_flags
+__APIREL__/api_cxx/env_set_flags.html__OCT__DB_AUTO_COMMIT DbEnv::set_flags@DB_AUTO_COMMIT
+__APIREL__/api_cxx/env_set_flags.html__OCT__3 configure @locking for Berkeley DB Concurrent Data Store
+__APIREL__/api_cxx/env_set_flags.html__OCT__DB_CDB_ALLDB DbEnv::set_flags@DB_CDB_ALLDB
+__APIREL__/api_cxx/env_set_flags.html__OCT__4 turn off system @buffering for database files
+__APIREL__/api_cxx/env_set_flags.html__OCT__DB_DIRECT_DB DbEnv::set_flags@DB_DIRECT_DB
+__APIREL__/api_cxx/env_set_flags.html__OCT__5 turn off system @buffering for log files
+__APIREL__/api_cxx/env_set_flags.html__OCT__DB_DIRECT_LOG DbEnv::set_flags@DB_DIRECT_LOG
+__APIREL__/api_cxx/env_set_flags.html__OCT__6 ignore @locking
+__APIREL__/api_cxx/env_set_flags.html__OCT__DB_NOLOCKING DbEnv::set_flags@DB_NOLOCKING
+__APIREL__/api_cxx/env_set_flags.html__OCT__7 turn off database file @memory mapping
+__APIREL__/api_cxx/env_set_flags.html__OCT__DB_NOMMAP DbEnv::set_flags@DB_NOMMAP
+__APIREL__/api_cxx/env_set_flags.html__OCT__8 ignore database environment @panic
+__APIREL__/api_cxx/env_set_flags.html__OCT__DB_NOPANIC DbEnv::set_flags@DB_NOPANIC
+__APIREL__/api_cxx/env_set_flags.html__OCT__DB_OVERWRITE DbEnv::set_flags@DB_OVERWRITE
+__APIREL__/api_cxx/env_set_flags.html__OCT__9 turn off access to a database @environment
+__APIREL__/api_cxx/env_set_flags.html__OCT__DB_PANIC_ENVIRONMENT DbEnv::set_flags@DB_PANIC_ENVIRONMENT
+__APIREL__/api_cxx/env_set_flags.html__OCT__10 fault database @environment in during open
+__APIREL__/api_cxx/env_set_flags.html__OCT__DB_REGION_INIT DbEnv::set_flags@DB_REGION_INIT
+__APIREL__/api_cxx/env_set_flags.html__OCT__11 turn off synchronous @transaction commit
+__APIREL__/api_cxx/env_set_flags.html__OCT__DB_TXN_NOSYNC DbEnv::set_flags@DB_TXN_NOSYNC
+__APIREL__/api_cxx/env_set_flags.html__OCT__12 turn off synchronous @transaction commit
+__APIREL__/api_cxx/env_set_flags.html__OCT__DB_TXN_WRITE_NOSYNC DbEnv::set_flags@DB_TXN_WRITE_NOSYNC
+__APIREL__/api_cxx/env_set_flags.html__OCT__13 configure for @stress testing
+__APIREL__/api_cxx/env_set_flags.html__OCT__DB_YIELDCPU DbEnv::set_flags@DB_YIELDCPU
+__APIREL__/api_cxx/env_set_lg_bsize.html__OCT__2 @DbEnv::set_lg_bsize
+__APIREL__/api_cxx/env_set_lg_dir.html__OCT__2 @DbEnv::set_lg_dir
+__APIREL__/api_cxx/env_set_lg_max.html__OCT__2 @DbEnv::set_lg_max
+__APIREL__/api_cxx/env_set_lg_regionmax.html__OCT__2 @DbEnv::set_lg_regionmax
+__APIREL__/api_cxx/env_set_lk_conflicts.html__OCT__2 @DbEnv::set_lk_conflicts
+__APIREL__/api_cxx/env_set_lk_detect.html__OCT__2 @DbEnv::set_lk_detect
+__APIREL__/api_cxx/env_set_lk_detect.html__OCT__DB_LOCK_DEFAULT DbEnv::set_lk_detect@DB_LOCK_DEFAULT
+__APIREL__/api_cxx/env_set_lk_detect.html__OCT__DB_LOCK_EXPIRE DbEnv::set_lk_detect@DB_LOCK_EXPIRE
+__APIREL__/api_cxx/env_set_lk_detect.html__OCT__DB_LOCK_MAXLOCKS DbEnv::set_lk_detect@DB_LOCK_MAXLOCKS
+__APIREL__/api_cxx/env_set_lk_detect.html__OCT__DB_LOCK_MINLOCKS DbEnv::set_lk_detect@DB_LOCK_MINLOCKS
+__APIREL__/api_cxx/env_set_lk_detect.html__OCT__DB_LOCK_MINWRITE DbEnv::set_lk_detect@DB_LOCK_MINWRITE
+__APIREL__/api_cxx/env_set_lk_detect.html__OCT__DB_LOCK_OLDEST DbEnv::set_lk_detect@DB_LOCK_OLDEST
+__APIREL__/api_cxx/env_set_lk_detect.html__OCT__DB_LOCK_RANDOM DbEnv::set_lk_detect@DB_LOCK_RANDOM
+__APIREL__/api_cxx/env_set_lk_detect.html__OCT__DB_LOCK_YOUNGEST DbEnv::set_lk_detect@DB_LOCK_YOUNGEST
+__APIREL__/api_cxx/env_set_lk_max_lockers.html__OCT__2 @DbEnv::set_lk_max_lockers
+__APIREL__/api_cxx/env_set_lk_max_locks.html__OCT__2 @DbEnv::set_lk_max_locks
+__APIREL__/api_cxx/env_set_lk_max_objects.html__OCT__2 @DbEnv::set_lk_max_objects
+__APIREL__/api_cxx/env_set_mp_mmapsize.html__OCT__2 @DbEnv::set_mp_mmapsize
+__APIREL__/api_cxx/env_set_rpc_server.html__OCT__2 @DbEnv::set_rpc_server
+__APIREL__/api_cxx/env_set_rpc_server.html__OCT__3 @DB_NOSERVER
+__APIREL__/api_cxx/env_set_rpc_server.html__OCT__4 @DB_NOSERVER_ID
+__APIREL__/api_cxx/env_set_rpc_server.html__OCT__DB_NOSERVER DbEnv::set_rpc_server@DB_NOSERVER
+__APIREL__/api_cxx/env_set_rpc_server.html__OCT__DB_NOSERVER_ID DbEnv::set_rpc_server@DB_NOSERVER_ID
+__APIREL__/api_cxx/env_set_rpc_server.html__OCT__DB_NOSERVER_HOME DbEnv::set_rpc_server@DB_NOSERVER_HOME
+__APIREL__/api_cxx/env_set_shm_key.html__OCT__2 @DbEnv::set_shm_key
+__APIREL__/api_cxx/env_set_tas_spins.html__OCT__2 @DbEnv::set_tas_spins
+__APIREL__/api_cxx/env_set_timeout.html__OCT__2 @DbEnv::set_timeout
+__APIREL__/api_cxx/env_set_timeout.html__OCT__DB_SET_LOCK_TIMEOUT DbEnv::set_timeout@DB_SET_LOCK_TIMEOUT
+__APIREL__/api_cxx/env_set_timeout.html__OCT__DB_SET_TXN_TIMEOUT DbEnv::set_timeout@DB_SET_TXN_TIMEOUT
+__APIREL__/api_cxx/env_set_tmp_dir.html__OCT__2 @DbEnv::set_tmp_dir
+__APIREL__/api_cxx/env_set_tmp_dir.html__OCT__3 @temporary files
+__APIREL__/api_cxx/env_set_tx_max.html__OCT__2 @DbEnv::set_tx_max
+__APIREL__/api_cxx/env_set_tx_timestamp.html__OCT__2 @DbEnv::set_tx_timestamp
+__APIREL__/api_cxx/env_set_verbose.html__OCT__2 @DbEnv::set_verbose
+__APIREL__/api_cxx/env_set_verbose.html__OCT__DB_VERB_CHKPOINT DbEnv::set_verbose@DB_VERB_CHKPOINT
+__APIREL__/api_cxx/env_set_verbose.html__OCT__DB_VERB_DEADLOCK DbEnv::set_verbose@DB_VERB_DEADLOCK
+__APIREL__/api_cxx/env_set_verbose.html__OCT__DB_VERB_RECOVERY DbEnv::set_verbose@DB_VERB_RECOVERY
+__APIREL__/api_cxx/env_set_verbose.html__OCT__DB_VERB_REPLICATION DbEnv::set_verbose@DB_VERB_REPLICATION
+__APIREL__/api_cxx/env_set_verbose.html__OCT__DB_VERB_WAITSFOR DbEnv::set_verbose@DB_VERB_WAITSFOR
+__APIREL__/api_cxx/env_strerror.html__OCT__2 @DbEnv::strerror
+__APIREL__/api_cxx/env_version.html__OCT__2 @DbEnv::version
+__APIREL__/api_cxx/env_set_errfile.html__OCT__2 @DbEnv::set_errfile
+__APIREL__/api_cxx/env_set_paniccall.html__OCT__2 @DbEnv::set_paniccall
+__APIREL__/api_cxx/env_set_alloc.html__OCT__2 @DbEnv::set_alloc
+__APIREL__/api_cxx/env_set_error_stream.html__OCT__2 @DbEnv::set_error_stream
+__APIREL__/api_cxx/lock_class.html__OCT__2 @DbLock
+__APIREL__/api_cxx/lock_detect.html__OCT__2 @DbEnv::lock_detect
+__APIREL__/api_cxx/lock_detect.html__OCT__DB_LOCK_DEFAULT DbEnv::lock_detect@DB_LOCK_DEFAULT
+__APIREL__/api_cxx/lock_detect.html__OCT__DB_LOCK_EXPIRE DbEnv::lock_detect@DB_LOCK_EXPIRE
+__APIREL__/api_cxx/lock_detect.html__OCT__DB_LOCK_MAXLOCKS DbEnv::lock_detect@DB_LOCK_MAXLOCKS
+__APIREL__/api_cxx/lock_detect.html__OCT__DB_LOCK_MINLOCKS DbEnv::lock_detect@DB_LOCK_MINLOCKS
+__APIREL__/api_cxx/lock_detect.html__OCT__DB_LOCK_MINWRITE DbEnv::lock_detect@DB_LOCK_MINWRITE
+__APIREL__/api_cxx/lock_detect.html__OCT__DB_LOCK_OLDEST DbEnv::lock_detect@DB_LOCK_OLDEST
+__APIREL__/api_cxx/lock_detect.html__OCT__DB_LOCK_RANDOM DbEnv::lock_detect@DB_LOCK_RANDOM
+__APIREL__/api_cxx/lock_detect.html__OCT__DB_LOCK_YOUNGEST DbEnv::lock_detect@DB_LOCK_YOUNGEST
+__APIREL__/api_cxx/lock_get.html__OCT__2 @DbEnv::lock_get
+__APIREL__/api_cxx/lock_get.html__OCT__DB_LOCK_NOWAIT DbEnv::lock_get@DB_LOCK_NOWAIT
+__APIREL__/api_cxx/lock_id.html__OCT__2 @DbEnv::lock_id
+__APIREL__/api_cxx/lock_id_free.html__OCT__2 @DbEnv::lock_id_free
+__APIREL__/api_cxx/lock_put.html__OCT__2 @DbEnv::lock_put
+__APIREL__/api_cxx/lock_stat.html__OCT__2 @DbEnv::lock_stat
+__APIREL__/api_cxx/lock_stat.html__OCT__DB_STAT_CLEAR DbEnv::lock_stat@DB_STAT_CLEAR
+__APIREL__/api_cxx/lock_vec.html__OCT__2 @DbEnv::lock_vec
+__APIREL__/api_cxx/lock_vec.html__OCT__DB_LOCK_NOWAIT DbEnv::lock_vec@DB_LOCK_NOWAIT
+__APIREL__/api_cxx/lock_vec.html__OCT__op DbEnv::lock_vec@op
+__APIREL__/api_cxx/lock_vec.html__OCT__DB_LOCK_GET DbEnv::lock_vec@DB_LOCK_GET
+__APIREL__/api_cxx/lock_vec.html__OCT__DB_LOCK_GET_TIMEOUT DbEnv::lock_vec@DB_LOCK_GET_TIMEOUT
+__APIREL__/api_cxx/lock_vec.html__OCT__DB_LOCK_PUT DbEnv::lock_vec@DB_LOCK_PUT
+__APIREL__/api_cxx/lock_vec.html__OCT__DB_LOCK_PUT_ALL DbEnv::lock_vec@DB_LOCK_PUT_ALL
+__APIREL__/api_cxx/lock_vec.html__OCT__DB_LOCK_PUT_OBJ DbEnv::lock_vec@DB_LOCK_PUT_OBJ
+__APIREL__/api_cxx/lock_vec.html__OCT__DB_LOCK_TIMEOUT DbEnv::lock_vec@DB_LOCK_TIMEOUT
+__APIREL__/api_cxx/lock_vec.html__OCT__lock DbEnv::lock_vec@lock
+__APIREL__/api_cxx/lock_vec.html__OCT__mode DbEnv::lock_vec@mode
+__APIREL__/api_cxx/lock_vec.html__OCT__DB_LOCK_READ DbEnv::lock_vec@DB_LOCK_READ
+__APIREL__/api_cxx/lock_vec.html__OCT__DB_LOCK_WRITE DbEnv::lock_vec@DB_LOCK_WRITE
+__APIREL__/api_cxx/lock_vec.html__OCT__DB_LOCK_IWRITE DbEnv::lock_vec@DB_LOCK_IWRITE
+__APIREL__/api_cxx/lock_vec.html__OCT__DB_LOCK_IREAD DbEnv::lock_vec@DB_LOCK_IREAD
+__APIREL__/api_cxx/lock_vec.html__OCT__DB_LOCK_IWR DbEnv::lock_vec@DB_LOCK_IWR
+__APIREL__/api_cxx/lock_vec.html__OCT__obj DbEnv::lock_vec@obj
+__APIREL__/api_cxx/deadlock_class.html__OCT__2 @DbDeadlockException
+__APIREL__/api_cxx/lockng_class.html__OCT__2 @DbLockNotGrantedException
+__APIREL__/api_cxx/log_archive.html__OCT__2 @DbEnv::log_archive
+__APIREL__/api_cxx/log_archive.html__OCT__DB_ARCH_ABS DbEnv::log_archive@DB_ARCH_ABS
+__APIREL__/api_cxx/log_archive.html__OCT__DB_ARCH_DATA DbEnv::log_archive@DB_ARCH_DATA
+__APIREL__/api_cxx/log_archive.html__OCT__DB_ARCH_LOG DbEnv::log_archive@DB_ARCH_LOG
+__APIREL__/api_cxx/log_compare.html__OCT__2 @DbEnv::log_compare
+__APIREL__/api_cxx/log_cursor.html__OCT__2 @DbEnv::log_cursor
+__APIREL__/api_cxx/log_file.html__OCT__2 @DbEnv::log_file
+__APIREL__/api_cxx/log_flush.html__OCT__2 @DbEnv::log_flush
+__APIREL__/api_cxx/log_put.html__OCT__2 @DbEnv::log_put
+__APIREL__/api_cxx/log_put.html__OCT__DB_FLUSH DbEnv::log_put@DB_FLUSH
+__APIREL__/api_cxx/log_stat.html__OCT__2 @DbEnv::log_stat
+__APIREL__/api_cxx/log_stat.html__OCT__DB_STAT_CLEAR DbEnv::log_stat@DB_STAT_CLEAR
+__APIREL__/api_cxx/logc_class.html__OCT__2 @DbLogc
+__APIREL__/api_cxx/logc_close.html__OCT__2 @DbLogc::close
+__APIREL__/api_cxx/logc_get.html__OCT__2 @DbLogc::get
+__APIREL__/api_cxx/logc_get.html__OCT__DB_FIRST DbLogc::get@DB_FIRST
+__APIREL__/api_cxx/logc_get.html__OCT__DB_LAST DbLogc::get@DB_LAST
+__APIREL__/api_cxx/logc_get.html__OCT__DB_NEXT DbLogc::get@DB_NEXT
+__APIREL__/api_cxx/logc_get.html__OCT__DB_PREV DbLogc::get@DB_PREV
+__APIREL__/api_cxx/logc_get.html__OCT__DB_CURRENT DbLogc::get@DB_CURRENT
+__APIREL__/api_cxx/logc_get.html__OCT__DB_SET DbLogc::get@DB_SET
+__APIREL__/api_cxx/lsn_class.html__OCT__2 @DbLsn
+__APIREL__/api_cxx/memp_fclose.html__OCT__2 @DbMpoolFile::close
+__APIREL__/api_cxx/memp_fopen.html__OCT__2 @DbMpoolFile::open
+__APIREL__/api_cxx/memp_fopen.html__OCT__DB_CREATE DbMpoolFile::open@DB_CREATE
+__APIREL__/api_cxx/memp_fopen.html__OCT__3 turn off system @buffering
+__APIREL__/api_cxx/memp_fopen.html__OCT__DB_DIRECT DbMpoolFile::open@DB_DIRECT
+__APIREL__/api_cxx/memp_fopen.html__OCT__DB_NOMMAP DbMpoolFile::open@DB_NOMMAP
+__APIREL__/api_cxx/memp_fopen.html__OCT__DB_ODDFILESIZE DbMpoolFile::open@DB_ODDFILESIZE
+__APIREL__/api_cxx/memp_fopen.html__OCT__DB_RDONLY DbMpoolFile::open@DB_RDONLY
+__APIREL__/api_cxx/memp_fsync.html__OCT__2 @DbMpoolFile::sync
+__APIREL__/api_cxx/memp_register.html__OCT__2 @DbEnv::memp_register
+__APIREL__/api_cxx/memp_stat.html__OCT__2 @DbEnv::memp_stat
+__APIREL__/api_cxx/memp_stat.html__OCT__DB_STAT_CLEAR DbEnv::memp_stat@DB_STAT_CLEAR
+__APIREL__/api_cxx/memp_sync.html__OCT__2 @DbEnv::memp_sync
+__APIREL__/api_cxx/memp_trickle.html__OCT__2 @DbEnv::memp_trickle
+__APIREL__/api_cxx/mempfile_class.html__OCT__2 @DbMpoolFile
+__APIREL__/api_cxx/memp_fcreate.html__OCT__2 @DbEnv::memp_fcreate
+__APIREL__/api_cxx/memp_fget.html__OCT__2 @DbMpoolFile::get
+__APIREL__/api_cxx/memp_fget.html__OCT__DB_MPOOL_CREATE DbMpoolFile::get@DB_MPOOL_CREATE
+__APIREL__/api_cxx/memp_fget.html__OCT__DB_MPOOL_LAST DbMpoolFile::get@DB_MPOOL_LAST
+__APIREL__/api_cxx/memp_fget.html__OCT__DB_MPOOL_NEW DbMpoolFile::get@DB_MPOOL_NEW
+__APIREL__/api_cxx/memp_fget.html__OCT__3 @DB_PAGE_NOTFOUND
+__APIREL__/api_cxx/memp_fput.html__OCT__2 @DbMpoolFile::put
+__APIREL__/api_cxx/memp_fput.html__OCT__DB_MPOOL_CLEAN DbMpoolFile::put@DB_MPOOL_CLEAN
+__APIREL__/api_cxx/memp_fput.html__OCT__DB_MPOOL_DIRTY DbMpoolFile::put@DB_MPOOL_DIRTY
+__APIREL__/api_cxx/memp_fput.html__OCT__DB_MPOOL_DISCARD DbMpoolFile::put@DB_MPOOL_DISCARD
+__APIREL__/api_cxx/memp_fset.html__OCT__2 @DbMpoolFile::set
+__APIREL__/api_cxx/memp_fset.html__OCT__DB_MPOOL_CLEAN DbMpoolFile::set@DB_MPOOL_CLEAN
+__APIREL__/api_cxx/memp_fset.html__OCT__DB_MPOOL_DIRTY DbMpoolFile::set@DB_MPOOL_DIRTY
+__APIREL__/api_cxx/memp_fset.html__OCT__DB_MPOOL_DISCARD DbMpoolFile::set@DB_MPOOL_DISCARD
+__APIREL__/api_cxx/memp_set_clear_len.html__OCT__2 @DbMpoolFile::set_clear_len
+__APIREL__/api_cxx/memp_set_fileid.html__OCT__2 @DbMpoolFile::set_fileid
+__APIREL__/api_cxx/memp_set_ftype.html__OCT__2 @DbMpoolFile::set_ftype
+__APIREL__/api_cxx/memp_set_lsn_offset.html__OCT__2 @DbMpoolFile::set_lsn_offset
+__APIREL__/api_cxx/memp_set_pgcookie.html__OCT__2 @DbMpoolFile::set_pgcookie
+__APIREL__/api_cxx/memp_class.html__OCT__2 @DbMemoryException
+__APIREL__/api_cxx/rep_elect.html__OCT__2 @DbEnv::rep_elect
+__APIREL__/api_cxx/rep_elect.html__OCT__3 @DB_REP_UNAVAIL
+__APIREL__/api_cxx/rep_limit.html__OCT__2 @DbEnv::set_rep_limit
+__APIREL__/api_cxx/rep_message.html__OCT__2 @DbEnv::rep_process_message
+__APIREL__/api_cxx/rep_start.html__OCT__2 @DbEnv::rep_start
+__APIREL__/api_cxx/rep_start.html__OCT__DB_REP_CLIENT DbEnv::rep_start@DB_REP_CLIENT
+__APIREL__/api_cxx/rep_start.html__OCT__DB_REP_LOGSONLY DbEnv::rep_start@DB_REP_LOGSONLY
+__APIREL__/api_cxx/rep_start.html__OCT__DB_REP_MASTER DbEnv::rep_start@DB_REP_MASTER
+__APIREL__/api_cxx/rep_stat.html__OCT__2 @DbEnv::rep_stat
+__APIREL__/api_cxx/rep_stat.html__OCT__DB_STAT_CLEAR DbEnv::rep_stat@DB_STAT_CLEAR
+__APIREL__/api_cxx/rep_transport.html__OCT__2 @DbEnv::set_rep_transport
+__APIREL__/api_cxx/rep_transport.html__OCT__3 @DB_EID_BROADCAST
+__APIREL__/api_cxx/rep_transport.html__OCT__DB_REP_PERMANENT DbEnv::set_rep_transport@DB_REP_PERMANENT
+__APIREL__/api_cxx/txn_abort.html__OCT__2 @DbTxn::abort
+__APIREL__/api_cxx/txn_begin.html__OCT__2 @DbEnv::txn_begin
+__APIREL__/api_cxx/txn_begin.html__OCT__DB_DIRTY_READ DbEnv::txn_begin@DB_DIRTY_READ
+__APIREL__/api_cxx/txn_begin.html__OCT__DB_TXN_NOSYNC DbEnv::txn_begin@DB_TXN_NOSYNC
+__APIREL__/api_cxx/txn_begin.html__OCT__DB_TXN_NOWAIT DbEnv::txn_begin@DB_TXN_NOWAIT
+__APIREL__/api_cxx/txn_begin.html__OCT__DB_TXN_SYNC DbEnv::txn_begin@DB_TXN_SYNC
+__APIREL__/api_cxx/txn_checkpoint.html__OCT__2 @DbEnv::txn_checkpoint
+__APIREL__/api_cxx/txn_checkpoint.html__OCT__DB_FORCE DbEnv::txn_checkpoint@DB_FORCE
+__APIREL__/api_cxx/txn_class.html__OCT__2 @DbTxn
+__APIREL__/api_cxx/txn_commit.html__OCT__2 @DbTxn::commit
+__APIREL__/api_cxx/txn_commit.html__OCT__DB_TXN_NOSYNC DbTxn::commit@DB_TXN_NOSYNC
+__APIREL__/api_cxx/txn_commit.html__OCT__DB_TXN_SYNC DbTxn::commit@DB_TXN_SYNC
+__APIREL__/api_cxx/txn_discard.html__OCT__2 @DbTxn::discard
+__APIREL__/api_cxx/txn_id.html__OCT__2 @DbTxn::id
+__APIREL__/api_cxx/txn_prepare.html__OCT__2 @DbTxn::prepare
+__APIREL__/api_cxx/txn_prepare.html__OCT__3 @DB_XIDDATASIZE
+__APIREL__/api_cxx/txn_recover.html__OCT__2 @DbEnv::txn_recover
+__APIREL__/api_cxx/txn_recover.html__OCT__DB_FIRST DbEnv::txn_recover@DB_FIRST
+__APIREL__/api_cxx/txn_recover.html__OCT__DB_NEXT DbEnv::txn_recover@DB_NEXT
+__APIREL__/api_cxx/txn_set_timeout.html__OCT__2 @DbTxn::set_timeout
+__APIREL__/api_cxx/txn_set_timeout.html__OCT__DB_SET_LOCK_TIMEOUT DbTxn::set_timeout@DB_SET_LOCK_TIMEOUT
+__APIREL__/api_cxx/txn_set_timeout.html__OCT__DB_SET_TXN_TIMEOUT DbTxn::set_timeout@DB_SET_TXN_TIMEOUT
+__APIREL__/api_cxx/txn_stat.html__OCT__2 @DbEnv::txn_stat
+__APIREL__/api_cxx/txn_stat.html__OCT__DB_STAT_CLEAR DbEnv::txn_stat@DB_STAT_CLEAR
diff --git a/libdb/docs/api_cxx/rep_elect.html b/libdb/docs/api_cxx/rep_elect.html
new file mode 100644
index 0000000..8241d4d
--- /dev/null
+++ b/libdb/docs/api_cxx/rep_elect.html
@@ -0,0 +1,83 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::rep_elect</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::rep_elect</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::rep_elect(int nsites,
+ int priority, u_int32_t timeout, int *envid);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::rep_elect method holds an election for the master of a replication
+group, returning the new master's ID in the <b>envid</b> parameter.
+<p>The <b>nsites</b> parameter indicates the number of environments that
+the application believes are in the replication group. This number is
+used by Berkeley DB to avoid having two masters active simultaneously, even
+in the case of a network partition. During an election, a new master
+cannot be elected unless more than half of <b>nsites</b> agree on
+the new master. Thus, in the face of a network partition, the side of
+the partition with more than half the environments will elect a new
+master and continue, while the environments communicating with fewer
+than half the other environments will fail to find a new master.
+<p>The <b>priority</b> parameter is the priority of this environment. It
+must be a positive integer, or 0 if this environment is not permitted
+to become a master (see <a href="../ref/rep/pri.html">Replication
+environment priorities</a> for more information).
+<a name="3"><!--meow--></a>
+<p>The <b>timeout</b> parameter specifies a timeout period for an
+election. If the election has not completed after <b>timeout</b>
+microseconds, the thread will return DB_REP_UNAVAIL.
+<p>The DbEnv::rep_elect method either returns successfully, with the new
+master's environment ID in the memory pointed to by the <b>envid</b>
+parameter, or it will return DB_REP_UNAVAIL if the participating
+group members were unable to elect a new master for any reason. In the
+event of a successful return, the new master's ID may be the ID of the
+previous master, or the ID of the current environment. The application
+is responsible for adjusting its usage of the other environments in the
+replication group, including directing all database updates to the newly
+selected master, in accordance with the results of this election.
+<p>The thread of control that calls the DbEnv::rep_elect method must not be the
+thread of control that processes incoming messages; processing the
+incoming messages is necessary to successfully complete an election.
+<p>The DbEnv::rep_elect method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::rep_elect method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_REP_UNAVAIL<dd>The replication group was unable to elect a master.
+</dl>
+<p>The DbEnv::rep_elect method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::rep_elect method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/rep_list.html">Replication and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/rep_limit.html b/libdb/docs/api_cxx/rep_limit.html
new file mode 100644
index 0000000..d14dbb9
--- /dev/null
+++ b/libdb/docs/api_cxx/rep_limit.html
@@ -0,0 +1,57 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_rep_limit</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_rep_limit</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_rep_limit(u_int32_t gbytes, u_int32_t bytes);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::set_rep_limit method imposes a limit on the amount of data that will
+be transmitted from a site during the course of a single call to
+<a href="../api_cxx/rep_message.html">DbEnv::rep_process_message</a> method.
+<p>The <b>gbytes</b> and <b>bytes</b> parameters together represent the
+maximum number of bytes that can be sent during a single call to
+<a href="../api_cxx/rep_message.html">DbEnv::rep_process_message</a> method.
+<p>The DbEnv::set_rep_limit method configures a database environment, not only operations
+performed using the specified <a href="../api_cxx/env_class.html">DbEnv</a> handle.
+<p>The DbEnv::set_rep_limit interface may be called at any time during the life of
+the application.
+<p>The DbEnv::set_rep_limit method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::set_rep_limit method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_rep_limit method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/rep_list.html">Replication and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/rep_list.html b/libdb/docs/api_cxx/rep_list.html
new file mode 100644
index 0000000..950005c
--- /dev/null
+++ b/libdb/docs/api_cxx/rep_list.html
@@ -0,0 +1,25 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Replication and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Replication and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Replication and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_cxx/rep_transport.html">DbEnv::set_rep_transport</a></td><td>Configure replication transport</td></tr>
+<tr><td><a href="../api_cxx/rep_elect.html">DbEnv::rep_elect</a></td><td>Hold a replication election</td></tr>
+<tr><td><a href="../api_cxx/rep_limit.html">DbEnv::set_rep_limit</a></td><td>Limit data sent in response to a single message</td></tr>
+<tr><td><a href="../api_cxx/rep_message.html">DbEnv::rep_process_message</a></td><td>Process a replication message</td></tr>
+<tr><td><a href="../api_cxx/rep_start.html">DbEnv::rep_start</a></td><td>Configure an environment for replication</td></tr>
+<tr><td><a href="../api_cxx/rep_stat.html">DbEnv::rep_stat</a></td><td>Replication statistics</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/rep_message.html b/libdb/docs/api_cxx/rep_message.html
new file mode 100644
index 0000000..f314c4b
--- /dev/null
+++ b/libdb/docs/api_cxx/rep_message.html
@@ -0,0 +1,88 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::rep_process_message</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::rep_process_message</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::rep_process_message(Dbt *control, Dbt *rec, int *envid)
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::rep_process_message method processes an incoming replication
+message sent by a member of the replication group to the local database
+environment.
+<p>The <b>rec</b> and <b>control</b> parameters should reference a copy
+of the parameters specified by Berkeley DB for the <b>rec</b> and
+<b>control</b> parameters on the sending environment.
+<p>The <b>envid</b> parameter should contain the local identifier that
+corresponds to the environment that sent the message to be processed
+(see <a href="../ref/rep/id.html">Replication environment IDs</a> for more
+information).
+<p>For implementation reasons, all incoming replication messages must be
+processed using the same <a href="../api_cxx/env_class.html">DbEnv</a> handle. It is not required that
+a single thread of control process all messages, only that all threads
+of control processing messages use the same handle.
+<p>
+If a new master has been elected, the DbEnv::rep_process_message method will return DB_REP_NEWMASTER.
+The <b>envid</b> parameter contains the environment ID of the new
+master. If the recipient of this error return has been made master, it
+is the application's responsibility to begin acting as the master
+environment.
+<p>
+If the system received contact information from a new environment, the DbEnv::rep_process_message method will return DB_REP_NEWSITE.
+The <b>rec</b> parameter contains the opaque data specified in the
+<b>cdata</b> parameter to the <a href="../api_cxx/rep_start.html">DbEnv::rep_start</a>. The application
+should take whatever action is needed to establish a communication
+channel with this new environment.
+<p>
+If the replication group has more than one master, the DbEnv::rep_process_message method either returns DB_REP_DUPMASTER or throws an exception that encapsulates DB_REP_DUPMASTER.
+The application should reconfigure itself as a client by calling the
+<a href="../api_cxx/rep_start.html">DbEnv::rep_start</a> method, and then call for an election by calling
+<a href="../api_cxx/rep_elect.html">DbEnv::rep_elect</a>.
+<p>
+If an election is needed, the DbEnv::rep_process_message method either returns DB_REP_HOLDELECTION or throws an exception that encapsulates DB_REP_HOLDELECTION.
+The application should call for an election by
+calling <a href="../api_cxx/rep_elect.html">DbEnv::rep_elect</a>.
+<p>
+If the current environment's logs are too far out of date with respect
+to the master to be automatically synchronized, the DbEnv::rep_process_message method either returns DB_REP_OUTDATED or throws an exception that encapsulates DB_REP_OUTDATED. The
+application should copy over a hot backup of the environment, run
+recovery, and restart the client.
+<p>
+Otherwise, the DbEnv::rep_process_message method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::rep_process_message method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::rep_process_message method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/rep_list.html">Replication and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/rep_start.html b/libdb/docs/api_cxx/rep_start.html
new file mode 100644
index 0000000..4cdb455
--- /dev/null
+++ b/libdb/docs/api_cxx/rep_start.html
@@ -0,0 +1,75 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::rep_start</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::rep_start</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::rep_start(Dbt *cdata, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::rep_start method configures the database environment as a client
+or master in a group of replicated database environments. Replication
+master environments are the only database environments where replicated
+databases may be modified. Replication client environments are
+read-only as long as they are clients. Replication client environments
+may be upgraded to be replication master environments in the case that
+the current master fails or there is no master present.
+<p>The enclosing database environment must already have been opened by
+calling the <a href="../api_cxx/env_open.html">DbEnv::open</a> method and must already have been configured
+to send replication messages by calling the <a href="../api_cxx/rep_transport.html">DbEnv::set_rep_transport</a> method.
+<p>The <b>cdata</b> parameter is an opaque data item that is sent over
+the communication infrastructure when the client or master comes online
+(see <a href="../ref/rep/newsite.html">Connecting to a new site</a> for
+more information). If no such information is useful, <b>cdata</b>
+should be NULL.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_REP_CLIENT">DB_REP_CLIENT</a><dd>Configure the environment as a replication client.
+<p><dt><a name="DB_REP_LOGSONLY">DB_REP_LOGSONLY</a><dd>Configure the environment as a log files-only client.
+<p><dt><a name="DB_REP_MASTER">DB_REP_MASTER</a><dd>Configure the environment as a replication master.
+</dl>
+<p>The DbEnv::rep_start method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::rep_start method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The environment was not already configured to communicate with a
+replication group by a call to <a href="../api_cxx/rep_transport.html">DbEnv::set_rep_transport</a>.
+<p>The environment was not already opened.
+</dl>
+<p>The DbEnv::rep_start method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::rep_start method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/rep_list.html">Replication and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/rep_stat.html b/libdb/docs/api_cxx/rep_stat.html
new file mode 100644
index 0000000..a58fa9f
--- /dev/null
+++ b/libdb/docs/api_cxx/rep_stat.html
@@ -0,0 +1,108 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::rep_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::rep_stat</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::rep_stat(DB_REP_STAT **statp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::rep_stat method returns the replication subsystem statistics.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_STAT_CLEAR">DB_STAT_CLEAR</a><dd>Reset statistics after returning their values.
+</dl>
+<p>The DbEnv::rep_stat method creates a statistical structure of type
+DB_REP_STAT and copies a pointer to it into a user-specified memory
+location.
+<p>Statistical structures are created in allocated memory. If application-specific allocation
+routines have been declared (see <a href="../api_cxx/env_set_alloc.html">DbEnv::set_alloc</a> for more
+information), they are used to allocate the memory; otherwise, the
+library <b>malloc</b>(3) interface is used. The caller is
+responsible for deallocating the memory. To deallocate the memory, free
+the memory reference; references inside the returned memory need not be
+individually freed.
+<p>The following DB_REP_STAT fields will be filled in:
+<p><dl compact>
+<dt>u_int32_t st_stat;<dd>The current replication mode. Set to <a href="../api_cxx/rep_start.html#DB_REP_MASTER">DB_REP_MASTER</a> if the
+environment is a replication master, <a href="../api_cxx/rep_start.html#DB_REP_CLIENT">DB_REP_CLIENT</a> if the
+environment is a replication client, <a href="../api_cxx/rep_start.html#DB_REP_LOGSONLY">DB_REP_LOGSONLY</a> if the
+environment is a log-files-only replica, or 0 if replication is not
+configured.
+<dt>DB_LSN st_next_lsn;<dd>In replication environments configured as masters, the next LSN expected.
+In replication environments configured as clients, the next LSN to be used.
+<dt>DB_LSN st_waiting_lsn;<dd>The LSN of the first missed log record being waited for, or 0 if no log
+records are currently missing.
+<dt>u_int32_t st_dupmasters;<dd>The number of duplicate master conditions detected.
+<dt>u_int32_t st_env_id;<dd>The current environment ID.
+<dt>u_int32_t st_env_priority;<dd>The current environment priority.
+<dt>u_int32_t st_gen;<dd>The current generation number.
+<dt>u_int32_t st_log_duplicated;<dd>The number of duplicate log records received.
+<dt>u_int32_t st_log_queued;<dd>The number of log records currently queued.
+<dt>u_int32_t st_log_queued_max;<dd>The maximum number of log records ever queued at once.
+<dt>u_int32_t st_log_queued_total;<dd>The total number of log records queued.
+<dt>u_int32_t st_log_records;<dd>The number of log records received and appended to the log.
+<dt>u_int32_t st_log_requested;<dd>The number of log records missed and requested.
+<dt>u_int32_t st_master;<dd>The current master environment ID.
+<dt>u_int32_t st_master_changes;<dd>The number of times the master has changed.
+<dt>u_int32_t st_msgs_badgen;<dd>The number of messages received with a bad generation number.
+<dt>u_int32_t st_msgs_processed;<dd>The number of messages received and processed.
+<dt>u_int32_t st_msgs_recover;<dd>The number of messages ignored due to pending recovery.
+<dt>u_int32_t st_msgs_send_failures;<dd>The number of failed message sends.
+<dt>u_int32_t st_msgs_sent;<dd>The number of messages sent.
+<dt>u_int32_t st_newsites;<dd>The number of new site messages received.
+<dt>u_int32_t st_outdated;<dd>The number of outdated conditions detected.
+<dt>u_int32_t st_txns_applied;<dd>The number of transactions applied.
+<dt>u_int32_t st_elections;<dd>The number of elections held.
+<dt>u_int32_t st_elections_won;<dd>The number of elections won.
+<dt>u_int32_t st_election_status;<dd>The current election phase (0 if no election is in progress).
+<dt>u_int32_t st_election_cur_winner;<dd>The election winner.
+<dt>u_int32_t st_election_gen;<dd>The election generation number.
+<dt>DB_LSN st_election_lsn;<dd>The maximum LSN of election winner.
+<dt>u_int32_t st_election_nsites;<dd>The number sites expected to participate in elections.
+<dt>u_int32_t st_nthrottles;<dd>Transmission limited. This indicates the number of times that data
+transmission was stopped to limit the amount of data sent in response
+to a single call to <a href="../api_cxx/rep_message.html">DbEnv::rep_process_message</a>.
+<dt>u_int32_t st_election_priority;<dd>The election priority.
+<dt>u_int32_t st_election_tiebreaker;<dd>The election tiebreaker value.
+<dt>u_int32_t st_election_votes;<dd>The votes received this election round.
+</dl>
+<p>The DbEnv::rep_stat method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::rep_stat method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::rep_stat method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/rep_list.html">Replication and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/rep_transport.html b/libdb/docs/api_cxx/rep_transport.html
new file mode 100644
index 0000000..34ee324
--- /dev/null
+++ b/libdb/docs/api_cxx/rep_transport.html
@@ -0,0 +1,97 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_rep_transport</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_rep_transport</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_rep_transport(int envid,
+ int (*send)(DB_ENV *dbenv,
+ const Dbt *control, const Dbt *rec, int envid, u_int32_t flags));
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::set_rep_transport method initializes the communication infrastructure
+for a database environment participating in a replicated application.
+<p>The <b>envid</b> parameter is the local environment's ID. It must be
+a positive integer and uniquely identify this Berkeley DB database environment
+(see <a href="../ref/rep/id.html">Replication environment IDs</a> for more
+information).
+<p>The <b>send</b> parameter is a callback interface used to transmit data
+using the replication application's communication infrastructure. The
+parameters to <b>send</b> are as follows:
+<p><dl compact>
+<p><dt>dbenv<dd>The enclosing database environment.
+<p><dt>control<dd>The control parameter is the first of the two data elements to be
+transmitted by the <b>send</b> interface.
+<p><dt>rec<dd>The rec parameter is the second of the two data elements to be
+transmitted by the <b>send</b> interface.
+<p><dt>envid<dd>The <b>envid</b> parameter is a positive integer identifier that
+specifies the replication environment to which the message should be
+sent (see <a href="../ref/rep/id.html">Replication environment IDs</a> for
+more information).
+<p><a name="3"><!--meow--></a>
+The special identifier DB_EID_BROADCAST indicates that a message
+should be broadcast to every environment in the replication group. The
+application may use a true broadcast protocol, or may send the message
+in sequence to each machine with which it is in communication.
+<p><dt>flags<dd>
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_REP_PERMANENT">DB_REP_PERMANENT</a><dd>The record being sent is critical for maintaining database integrity
+(for example, the message includes a transaction commit). The
+application should take appropriate action to enforce the reliability
+guarantees it has chosen, such as waiting for acknowledgement from one
+or more clients.
+</dl>
+</dl>
+<p>The <b>send</b> interface must return 0 on success and non-zero on
+failure. If the <b>send</b> interface fails, the message being sent
+is necessary to maintain database integrity, and the local log is not
+configured for synchronous flushing, the local log will be flushed;
+otherwise, any error from the <b>send</b> interface will be ignored.
+<p>It may sometimes be useful to pass application-specific data to the
+<b>send</b> interface; see <a href="../ref/env/faq.html">Environment
+FAQ</a> for a discussion on how to do this.
+<p>The DbEnv::set_rep_transport method configures operations performed using the specified
+<a href="../api_cxx/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv::set_rep_transport interface may be called at any time during the life of
+the application.
+<p>The DbEnv::set_rep_transport method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::set_rep_transport method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_rep_transport method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/rep_list.html">Replication and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/runrec_class.html b/libdb/docs/api_cxx/runrec_class.html
new file mode 100644
index 0000000..77094e8
--- /dev/null
+++ b/libdb/docs/api_cxx/runrec_class.html
@@ -0,0 +1,42 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbRunRecoveryException</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbRunRecoveryException</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class DbRunRecoveryException : public DbException { ... };
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the DbRunRecoveryException class and
+how it is used by the various Berkeley DB classes.
+<p>Errors can occur in the Berkeley DB library where the only solution is to shut
+down the application and run recovery (for example, if Berkeley DB is unable
+to allocate heap memory). When a fatal error occurs in Berkeley DB, methods
+will throw a DbRunRecoveryException, at which point all
+subsequent database calls will also fail in the same way. When this
+occurs, recovery should be performed.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/txn_abort.html b/libdb/docs/api_cxx/txn_abort.html
new file mode 100644
index 0000000..7c747fa
--- /dev/null
+++ b/libdb/docs/api_cxx/txn_abort.html
@@ -0,0 +1,60 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn::abort</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbTxn::abort</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbTxn::abort();
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn::abort method causes an abnormal termination of the transaction.
+The log is played backward, and any necessary recovery operations are
+initiated through the <b>recover</b> function specified to
+<a href="../api_cxx/env_open.html">DbEnv::open</a>. After the log processing is completed, all locks
+held by the transaction are released. As is the case for
+<a href="../api_cxx/txn_commit.html">DbTxn::commit</a>, applications that require strict two-phase locking
+should not explicitly release any locks.
+<p>In the case of nested transactions, aborting a parent transaction causes
+all children (unresolved or not) of the parent transaction to be aborted.
+<p>All cursors opened within the transaction must be closed before the
+transaction is aborted.
+<p>After DbTxn::abort has been called, regardless of its return, the
+<a href="../api_cxx/txn_class.html">DbTxn</a> handle may not be accessed again.
+<p>The DbTxn::abort method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbTxn::abort method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbTxn::abort method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/txn_begin.html b/libdb/docs/api_cxx/txn_begin.html
new file mode 100644
index 0000000..612b0c8
--- /dev/null
+++ b/libdb/docs/api_cxx/txn_begin.html
@@ -0,0 +1,101 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::txn_begin</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::txn_begin</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::txn_begin(DbTxn *parent, DbTxn **tid, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::txn_begin method creates a new transaction in the environment
+and copies a pointer to a <a href="../api_cxx/txn_class.html">DbTxn</a> that uniquely identifies it into
+the memory to which <b>tid</b> refers.
+Calling the <a href="../api_cxx/txn_abort.html">DbTxn::abort</a>,
+<a href="../api_cxx/txn_commit.html">DbTxn::commit</a> or <a href="../api_cxx/txn_discard.html">DbTxn::discard</a> methods will discard the returned
+handle.
+<p>If the <b>parent</b> argument is non-NULL, the new transaction will
+be a nested transaction, with the transaction indicated by
+<b>parent</b> as its parent. Transactions may be
+nested to any level.
+In the presence of distributed transactions and two-phase commit,
+only the parental transaction, that is a transaction without
+a <b>parent</b> specified, should be passed as an argument to
+<a href="../api_cxx/txn_prepare.html">DbTxn::prepare</a>.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="DB_DIRTY_READ">DB_DIRTY_READ</a><dd>All read operations performed by the transaction will read modified but
+not yet committed data. Silently ignored if the <a href="../api_cxx/db_open.html#DB_DIRTY_READ">DB_DIRTY_READ</a>
+flag was not specified when the underlying database was opened.
+<p><dt><a name="DB_TXN_NOSYNC">DB_TXN_NOSYNC</a><dd>Do not synchronously flush the log when this transaction commits or
+prepares. This means the transaction will exhibit the ACI (atomicity,
+consistency, and isolation) properties, but not D (durability); that is,
+database integrity will be maintained but it is possible that this
+transaction may be undone during recovery.
+<p>This behavior may be set for a Berkeley DB environment using the
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a> interface. Any value specified in this
+interface overrides that setting.
+<p><dt><a name="DB_TXN_NOWAIT">DB_TXN_NOWAIT</a><dd>If a lock is unavailable for any Berkeley DB operation performed in the context
+of this transaction,
+return DB_LOCK_NOTGRANTED or throw a
+<a href="../api_cxx/lockng_class.html">DbLockNotGrantedException</a>
+immediately instead of blocking on the lock.
+<p><dt><a name="DB_TXN_SYNC">DB_TXN_SYNC</a><dd>Synchronously flush the log when this transaction commits or prepares.
+This means the transaction will exhibit all of the ACID (atomicity,
+consistency, isolation, and durability) properties.
+<p>This behavior is the default for Berkeley DB environments unless the
+<a href="../api_cxx/env_set_flags.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> flag was specified to the <a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>
+interface. Any value specified in this interface overrides that
+setting.
+</dl>
+<p><b>Note: A transaction may not span threads; that is, each transaction must
+begin and end in the same thread, and each transaction may be used only
+by a single thread.</b>
+<p><b>Note: Cursors may not span transactions; that is, each cursor must be
+opened and closed within a single transaction.</b>
+<p><b>Note: A parent transaction may not issue any Berkeley DB operations -- except for
+DbEnv::txn_begin, <a href="../api_cxx/txn_abort.html">DbTxn::abort</a> and <a href="../api_cxx/txn_commit.html">DbTxn::commit</a> -- while it has
+active child transactions (child transactions that have not yet been
+committed or aborted).</b>
+<p>The DbEnv::txn_begin method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::txn_begin method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>ENOMEM<dd>The maximum number of concurrent transactions has been reached.
+</dl>
+<p>The DbEnv::txn_begin method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::txn_begin method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/txn_checkpoint.html b/libdb/docs/api_cxx/txn_checkpoint.html
new file mode 100644
index 0000000..3bf2613
--- /dev/null
+++ b/libdb/docs/api_cxx/txn_checkpoint.html
@@ -0,0 +1,64 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::txn_checkpoint</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::txn_checkpoint</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::txn_checkpoint(u_int32_t kbyte, u_int32_t min, u_int32_t flags) const;
+</pre></h3>
+<h1>Description</h1>
+<p>If there has been database environment activity since the last checkpoint,
+the DbEnv::txn_checkpoint method flushes the underlying memory pool, writes a
+checkpoint record to the log, and then flushes the log.
+<p>If <b>kbyte</b> or <b>min</b> is non-zero, the checkpoint is done only
+if more than <b>min</b> minutes have passed since the last checkpoint
+or if more than <b>kbyte</b> kilobytes of log data have been written
+since the last checkpoint.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_FORCE">DB_FORCE</a><dd>Force a checkpoint record, even if there has been no activity since the
+last checkpoint.
+</dl>
+<p>The DbEnv::txn_checkpoint method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::txn_checkpoint method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv::txn_checkpoint method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::txn_checkpoint method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/txn_class.html b/libdb/docs/api_cxx/txn_class.html
new file mode 100644
index 0000000..2552a23
--- /dev/null
+++ b/libdb/docs/api_cxx/txn_class.html
@@ -0,0 +1,73 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbTxn</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class DbTxn {
+public:
+ DB_TXN *DbTxn::get_DB_TXN();
+ const DB_TXN *DbTxn::get_const_DB_TXN() const;
+ static DbTxn *DbTxn::get_DbTxn(DB_TXN *txn);
+ static const DbTxn *DbTxn::get_const_DbTxn(const DB_TXN *txn);
+ ...
+};
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn object is the handle for a transaction. Methods off
+the DbTxn handle are used to configure, abort and commit the
+transaction. DbTxn handles are provided to <a href="../api_cxx/db_class.html">Db</a> methods
+in order to transactionally protect those database operations.
+<p>DbTxn handles are not free-threaded; transactions handles may
+be used by multiple threads, but only serially, that is, the application
+must serialize access to the DbTxn handle. Once the
+<a href="../api_cxx/txn_abort.html">DbTxn::abort</a> or <a href="../api_cxx/txn_commit.html">DbTxn::commit</a> methods are called, the handle may
+not be accessed again, regardless of the method's return. In addition,
+parent transactions may not issue any Berkeley DB operations while they have
+active child transactions (child transactions that have not yet been
+committed or aborted) except for <a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>, <a href="../api_cxx/txn_abort.html">DbTxn::abort</a>
+and <a href="../api_cxx/txn_commit.html">DbTxn::commit</a>.
+<p>Each DbTxn object has an associated DB_TXN struct, which
+is used by the underlying implementation of Berkeley DB and its C-language
+API. The DbTxn::get_DB_TXN method returns a pointer to this struct.
+Given a const DbTxn object, DbTxn::get_const_DB_TXN returns a
+const pointer to the same struct.
+<p>Given a DB_TXN struct, the Db::get_DbTxn method returns the
+corresponding DbTxn object, if there is one. If the
+DB_TXN object was not associated with a DbTxn (that is,
+it was not returned from a call to DbTxn::get_DB_TXN), then the result
+of DbTxn::get_DbTxn is undefined. Given a const DB_TXN struct,
+DbTxn::get_const_DbTxn returns the associated const DbTxn
+object, if there is one.
+<p>These methods may be useful for Berkeley DB applications including both C
+and C++ language software. It should not be necessary to use these
+calls in a purely C++ application.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, DbTxn
+<h1>See Also</h1>
+<a href="../api_cxx/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/txn_commit.html b/libdb/docs/api_cxx/txn_commit.html
new file mode 100644
index 0000000..3354426
--- /dev/null
+++ b/libdb/docs/api_cxx/txn_commit.html
@@ -0,0 +1,83 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn::commit</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbTxn::commit</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbTxn::commit(u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn::commit method ends the transaction.
+<p>In the case of nested transactions, if the transaction is a parent
+transaction, committing the parent transaction causes all unresolved
+children of the parent to be committed. In the case of nested
+transactions, if the transaction is a child transaction, its locks are
+not released, but are acquired by its parent. Although the commit of the
+child transaction will succeed, the actual resolution of the child
+transaction is postponed until the parent transaction is committed or
+aborted; that is, if its parent transaction commits, it will be
+committed; and if its parent transaction aborts, it will be aborted.
+<p>The <b>flags</b> value must be set to 0 or
+one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_TXN_NOSYNC">DB_TXN_NOSYNC</a><dd>Do not synchronously flush the log. This means the transaction will
+exhibit the ACI (atomicity, consistency, and isolation) properties, but
+not D (durability); that is, database integrity will be maintained, but
+it is possible that this transaction may be undone during recovery.
+<p>This behavior may be set for a Berkeley DB environment using the
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a> interface or for a single transaction using the
+<a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a> interface. Any value specified in this interface
+overrides both of those settings.
+<p><dt><a name="DB_TXN_SYNC">DB_TXN_SYNC</a><dd>Synchronously flush the log. This means the transaction will exhibit
+all of the ACID (atomicity, consistency, isolation, and durability)
+properties.
+<p>This behavior is the default for Berkeley DB environments unless the
+<a href="../api_cxx/env_set_flags.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> flag was specified to the <a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>
+interface. This behavior may also be set for a single transaction using
+the <a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a> interface. Any value specified in this interface
+overrides both of those settings.
+</dl>
+<p>All cursors opened within the transaction must be closed before the
+transaction is committed.
+<p>After DbTxn::commit has been called, regardless of its return, the
+<a href="../api_cxx/txn_class.html">DbTxn</a> handle may not be accessed again. If DbTxn::commit
+encounters an error, the transaction and all child transactions of the
+transaction are aborted.
+<p>The DbTxn::commit method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbTxn::commit method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbTxn::commit method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/txn_discard.html b/libdb/docs/api_cxx/txn_discard.html
new file mode 100644
index 0000000..279d695
--- /dev/null
+++ b/libdb/docs/api_cxx/txn_discard.html
@@ -0,0 +1,67 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn::discard</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbTxn::discard</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbTxn::discard(u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn::discard method frees up all the per-process resources
+associated with the specified <a href="../api_cxx/txn_class.html">DbTxn</a> handle, neither committing
+nor aborting the transaction. This call may be used only after calls
+to <a href="../api_cxx/txn_recover.html">DbEnv::txn_recover</a> when there are multiple global transaction
+managers recovering transactions in a single Berkeley DB environment. Any
+transactions returned by <a href="../api_cxx/txn_recover.html">DbEnv::txn_recover</a> that are not handled by
+the current global transaction manager should be discarded using
+DbTxn::discard.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DbTxn::discard method returns EINVAL if the transaction handle does
+not refer to a transaction that was recovered into a prepared but not
+yet completed state.
+Otherwise, the DbTxn::discard method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<p>After DbTxn::discard has been called, regardless of its return, the
+<a href="../api_cxx/txn_class.html">DbTxn</a> handle may not be accessed again.
+<h1>Errors</h1>
+<p>The DbTxn::discard method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The transaction handle does not refer to a transaction that was
+recovered into a prepared but not yet completed state.
+</dl>
+<p>The DbTxn::discard method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbTxn::discard method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/txn_id.html b/libdb/docs/api_cxx/txn_id.html
new file mode 100644
index 0000000..50b8b8d
--- /dev/null
+++ b/libdb/docs/api_cxx/txn_id.html
@@ -0,0 +1,43 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn::id</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbTxn::id</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+u_int32_t
+DbTxn::id();
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn::id method returns the unique transaction id associated with the
+specified transaction. Locking calls made on behalf of this transaction
+should use the value returned from DbTxn::id as the locker parameter
+to the <a href="../api_cxx/lock_get.html">DbEnv::lock_get</a> or <a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a> calls.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/txn_list.html b/libdb/docs/api_cxx/txn_list.html
new file mode 100644
index 0000000..7a3adf4
--- /dev/null
+++ b/libdb/docs/api_cxx/txn_list.html
@@ -0,0 +1,31 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Transaction Subsystem and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Transaction Subsystem and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Transaction Subsystem and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_cxx/env_set_tx_max.html">DbEnv::set_tx_max</a></td><td>Set maximum number of transactions</td></tr>
+<tr><td><a href="../api_cxx/env_set_tx_timestamp.html">DbEnv::set_tx_timestamp</a></td><td>Set recovery timestamp</td></tr>
+<tr><td><a href="../api_cxx/txn_checkpoint.html">DbEnv::txn_checkpoint</a></td><td>Checkpoint the transaction subsystem</td></tr>
+<tr><td><a href="../api_cxx/txn_recover.html">DbEnv::txn_recover</a></td><td>Distributed transaction recovery</td></tr>
+<tr><td><a href="../api_cxx/txn_stat.html">DbEnv::txn_stat</a></td><td>Return transaction subsystem statistics</td></tr>
+<tr><td><a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a></td><td>Begin a transaction</td></tr>
+<tr><td><a href="../api_cxx/txn_abort.html">DbTxn::abort</a></td><td>Abort a transaction</td></tr>
+<tr><td><a href="../api_cxx/txn_commit.html">DbTxn::commit</a></td><td>Commit a transaction</td></tr>
+<tr><td><a href="../api_cxx/txn_discard.html">DbTxn::discard</a></td><td>Discard a prepared but not resolved transaction handle</td></tr>
+<tr><td><a href="../api_cxx/txn_id.html">DbTxn::id</a></td><td>Return a transaction's ID</td></tr>
+<tr><td><a href="../api_cxx/txn_prepare.html">DbTxn::prepare</a></td><td>Prepare a transaction for commit</td></tr>
+<tr><td><a href="../api_cxx/txn_set_timeout.html">DbTxn::set_timeout</a></td><td>Set transaction timeout</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/txn_prepare.html b/libdb/docs/api_cxx/txn_prepare.html
new file mode 100644
index 0000000..dd3a594
--- /dev/null
+++ b/libdb/docs/api_cxx/txn_prepare.html
@@ -0,0 +1,66 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn::prepare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbTxn::prepare</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbTxn::prepare(u_int8_t gid[DB_XIDDATASIZE]);
+</pre></h3>
+<h1>Description</h1>
+<a name="3"><!--meow--></a>
+<p>The DbTxn::prepare method initiates the beginning of a two-phase commit.
+<p>In a distributed transaction environment, Berkeley DB can be used as a local
+transaction manager. In this case, the distributed transaction manager
+must send <i>prepare</i> messages to each local manager. The local
+manager must then issue a DbTxn::prepare and await its successful
+return before responding to the distributed transaction manager. Only
+after the distributed transaction manager receives successful responses
+from all of its <i>prepare</i> messages should it issue any
+<i>commit</i> messages.
+<p>In the case of nested transactions, preparing the parent
+causes all unresolved children of the parent transaction to be committed.
+Child transactions should never be explicitly prepared.
+Their fate will be resolved along with their parent's during
+global recovery.
+<p>The <b>gid</b> parameter specifies the global transaction ID by which this
+transaction will be known. This global transaction ID will be returned
+in calls to <a href="../api_cxx/txn_recover.html">DbEnv::txn_recover</a>, telling the application which global
+transactions must be resolved.
+<p>The DbTxn::prepare method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbTxn::prepare method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbTxn::prepare method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/txn_recover.html b/libdb/docs/api_cxx/txn_recover.html
new file mode 100644
index 0000000..5244791
--- /dev/null
+++ b/libdb/docs/api_cxx/txn_recover.html
@@ -0,0 +1,81 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::txn_recover</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::txn_recover</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::txn_recover(DB_PREPLIST preplist[],
+ long count, long *retp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::txn_recover interface returns a list of prepared but not
+yet resolved transactions. The DbEnv::txn_recover method should only be
+called after the environment has been recovered. Because database
+environment state must be preserved between recovery and the application
+calling DbEnv::txn_recover, applications must either call
+DbEnv::txn_recover using the same environment handle used when recovery
+is done, or the database environment must not be configured using the
+<a href="../api_cxx/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flag.
+<p>On return from DbEnv::txn_recover, the <b>preplist</b> argument will
+be filled in with a list of transactions that must be resolved by the
+application (committed, aborted or discarded). The <b>preplist</b>
+argument is a structure of type DB_PREPLIST; the following DB_PREPLIST
+fields will be filled in:
+<p><dl compact>
+<p><dt>DB_TXN *txn;<dd>The transaction handle for the transaction.
+<p><dt>u_int8_t gid[<a href="../api_cxx/txn_prepare.html#DB_XIDDATASIZE">DB_XIDDATASIZE</a>];<dd>The global transaction ID for the transaction. The global transaction
+ID is the one specified when the transaction was prepared. The
+application is responsible for ensuring uniqueness among global
+transaction IDs.
+</dl>
+<p>The application must call <a href="../api_cxx/txn_abort.html">DbTxn::abort</a>, <a href="../api_cxx/txn_commit.html">DbTxn::commit</a> or
+<a href="../api_cxx/txn_discard.html">DbTxn::discard</a> on each returned <a href="../api_cxx/txn_class.html">DbTxn</a> handle before
+starting any new operations.
+<p>The <b>count</b> parameter specifies the number of available entries
+in the passed-in <b>preplist</b> array. The <b>retp</b> parameter
+returns the number of entries DbEnv::txn_recover has filled in, in the
+array.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_FIRST">DB_FIRST</a><dd>Begin returning a list of prepared, but not yet resolved transactions.
+<p><dt><a name="DB_NEXT">DB_NEXT</a><dd>Continue returning a list of prepared, but not yet resolved transactions,
+starting where the last call to DbEnv::txn_recover left off.
+</dl>
+<p>The DbEnv::txn_recover method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::txn_recover method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::txn_recover method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/txn_set_timeout.html b/libdb/docs/api_cxx/txn_set_timeout.html
new file mode 100644
index 0000000..7aa8d7a
--- /dev/null
+++ b/libdb/docs/api_cxx/txn_set_timeout.html
@@ -0,0 +1,77 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn::set_timeout</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbTxn::set_timeout</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+u_int32_t
+DbTxn::set_timeout(db_timeout_t timeout, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn::set_timeout method sets timeout values for locks or
+transactions for the specified transaction. The timeout value is
+currently specified as an unsigned 32-bit number of microseconds,
+limiting the maximum timeout to roughly 71 minutes.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_SET_LOCK_TIMEOUT">DB_SET_LOCK_TIMEOUT</a><dd>Set the timeout value for locks in this transaction.
+<p><dt><a name="DB_SET_TXN_TIMEOUT">DB_SET_TXN_TIMEOUT</a><dd>Set the timeout value for this transaction.
+</dl>
+<p>Timeouts are checked whenever a thread of control blocks on a lock or
+when deadlock detection is performed. (In the case of
+DB_SET_LOCK_TIMEOUT, the lock is one requested explicitly
+through the Lock subsystem interfaces. In the case of
+DB_SET_TXN_TIMEOUT, the lock is one requested on behalf of a
+transaction. In either case, it may be a lock requested by the database
+access methods underlying the application.) As timeouts are only
+checked when the lock request first blocks or when deadlock detection
+is performed, the accuracy of the timeout depends on how often deadlock
+detection is performed.
+<p>Timeout values may be specified for the database environment as a whole.
+See <a href="../api_cxx/env_set_timeout.html">DbEnv::set_timeout</a> and for more information.
+<p>The DbTxn::set_timeout method configures operations performed on the underlying
+transaction, not only operations performed using the specified
+<a href="../api_cxx/txn_class.html">DbTxn</a> handle.
+<p>The DbTxn::set_timeout interface may be called at any time during the life of
+the application.
+<p>The DbTxn::set_timeout method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbTxn::set_timeout method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbTxn::set_timeout method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbTxn::set_timeout method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_cxx/txn_stat.html b/libdb/docs/api_cxx/txn_stat.html
new file mode 100644
index 0000000..89517d0
--- /dev/null
+++ b/libdb/docs/api_cxx/txn_stat.html
@@ -0,0 +1,90 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::txn_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::txn_stat</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::txn_stat(DB_TXN_STAT **statp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::txn_stat method returns the transaction subsystem statistics.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="DB_STAT_CLEAR">DB_STAT_CLEAR</a><dd>Reset statistics after returning their values.
+</dl>
+<p>The DbEnv::txn_stat method creates a statistical structure of type
+DB_TXN_STAT and copies a pointer to it into a user-specified memory
+location.
+<p>Statistical structures are created in allocated memory. If application-specific allocation
+routines have been declared (see <a href="../api_cxx/env_set_alloc.html">DbEnv::set_alloc</a> for more
+information), they are used to allocate the memory; otherwise, the
+library <b>malloc</b>(3) interface is used. The caller is
+responsible for deallocating the memory. To deallocate the memory, free
+the memory reference; references inside the returned memory need not be
+individually freed.
+<p>The following DB_TXN_STAT fields will be filled in:
+<p><dl compact>
+<dt><a href="../api_cxx/lsn_class.html">DbLsn</a> st_last_ckp;<dd>The LSN of the last checkpoint.
+<dt>time_t st_time_ckp;<dd>The time the last completed checkpoint finished (as the number of seconds
+since the Epoch, returned by the IEEE/ANSI Std 1003.1 (POSIX) <b>time</b> interface).
+<dt>u_int32_t st_last_txnid;<dd>The last transaction ID allocated.
+<dt>u_int32_t st_maxtxns;<dd>The maximum number of active transactions possible.
+<dt>u_int32_t st_nactive;<dd>The number of transactions that are currently active.
+<dt>u_int32_t st_maxnactive;<dd>The maximum number of active transactions at any one time.
+<dt>u_int32_t st_nbegins;<dd>The number of transactions that have begun.
+<dt>u_int32_t st_naborts;<dd>The number of transactions that have aborted.
+<dt>u_int32_t st_ncommits;<dd>The number of transactions that have committed.
+<dt>u_int32_t st_nrestores;<dd>The number of transactions that have been restored.
+<dt>u_int32_t st_regsize;<dd>The size of the region.
+<dt>u_int32_t st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining the region lock.
+<dt>u_int32_t st_region_nowait;<dd>The number of times that a thread of control was able to obtain
+the region lock without waiting.
+<dt>DB_TXN_ACTIVE *st_txnarray;<dd>A pointer to an array of <b>st_nactive</b> DB_TXN_ACTIVE structures,
+describing the currently active transactions. The following fields of
+the DB_TXN_ACTIVE structure will be filled in:
+<p><dl compact>
+<p><dt>u_int32_t txnid;<dd>The transaction ID of the transaction.
+<dt>u_int32_t parentid;<dd>The transaction ID of the parent transaction (or 0, if no parent).
+<dt><a href="../api_cxx/lsn_class.html">DbLsn</a> lsn;<dd>The current log sequence number when the transaction was begun.
+</dl>
+</dl>
+<p>The DbEnv::txn_stat method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::txn_stat method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::txn_stat method may fail and
+either return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw a
+<a href="../api_cxx/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/env_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/c_index.html b/libdb/docs/api_java/c_index.html
new file mode 100644
index 0000000..1c66449
--- /dev/null
+++ b/libdb/docs/api_java/c_index.html
@@ -0,0 +1,153 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Java Interface</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Java Interface</h1>
+<p><table border=1 align=center>
+<tr><th>Section</th><th>Class/Method</th><th>Description</th></tr>
+<tr><td><b>Database Environment</b></td><td><a href="../api_java/env_class.html">DbEnv</a></td><td>Create an environment handle</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_close.html">DbEnv.close</a></td><td>Close an environment</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_dbremove.html">DbEnv.dbremove</a></td><td>Remove a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_dbrename.html">DbEnv.dbrename</a></td><td>Rename a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_err.html">DbEnv.err</a></td><td>Error message with error string</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_err.html">DbEnv.errx</a></td><td>Error message</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_open.html">DbEnv.open</a></td><td>Open an environment</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_remove.html">DbEnv.remove</a></td><td>Remove an environment</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_strerror.html">DbEnv.strerror</a></td><td>Error strings</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_version.html">DbEnv.version</a></td><td>Return version information</td></tr>
+<tr><td><b>Environment Configuration</b></td><td><a href="../api_java/env_set_app_dispatch.html">DbEnv.set_app_dispatch</a></td><td>Configure application recovery interface</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_data_dir.html">DbEnv.set_data_dir</a></td><td>Set the environment data directory</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_encrypt.html">DbEnv.set_encrypt</a></td><td>Set the environment cryptographic key</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a></td><td>Set error message output stream</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_feedback.html">DbEnv.set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_flags.html">DbEnv.set_flags</a></td><td>Environment configuration</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_rpc_server.html">DbEnv.set_rpc_server</a></td><td>Establish an RPC server connection</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_shm_key.html">DbEnv.set_shm_key</a></td><td>Set system memory shared segment ID</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_tas_spins.html">DbEnv.set_tas_spins</a></td><td>Set the number of test-and-set spins</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_timeout.html">DbEnv.set_timeout</a></td><td>Set lock and transaction timeout</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_tmp_dir.html">DbEnv.set_tmp_dir</a></td><td>Set the environment temporary file directory</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a></td><td>Set verbose messages</td></tr>
+<tr><td><b>Database Operations</b></td><td><a href="../api_java/db_class.html">Db</a></td><td>Create a database handle</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_associate.html">Db.associate</a></td><td>Associate a secondary index</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_close.html">Db.close</a></td><td>Close a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_del.html">Db.del</a></td><td>Delete items from a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_err.html">Db.err</a></td><td>Error message with error string</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_err.html">Db.errx</a></td><td>Error message</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_fd.html">Db.fd</a></td><td>Return a file descriptor from a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_get.html">Db.get</a>, <a href="../api_java/db_get.html">Db.pget</a></td><td>Get items from a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a></td><td>Return if the underlying database is in host order</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_get_type.html">Db.get_type</a></td><td>Return the database type</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_join.html">Db.join</a></td><td>Perform a database join on cursors</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_key_range.html">Db.key_range</a></td><td>Return estimate of key location</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_open.html">Db.open</a></td><td>Open a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_put.html">Db.put</a></td><td>Store items into a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_remove.html">Db.remove</a></td><td>Remove a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_rename.html">Db.rename</a></td><td>Rename a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_stat.html">Db.stat</a></td><td>Return database statistics</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_sync.html">Db.sync</a></td><td>Flush a database to stable storage</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_truncate.html">Db.truncate</a></td><td>Empty a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_upgrade.html">Db.upgrade</a></td><td>Upgrade a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_verify.html">Db.verify</a></td><td>Verify/salvage a database</td></tr>
+<tr><td><b>Database Configuration</b></td><td><a href="../api_java/db_set_cache_priority.html">Db.set_cache_priority</a></td><td>Set the database cache priority</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a></td><td>Set the database cache size</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_dup_compare.html">Db.set_dup_compare</a></td><td>Set a duplicate comparison function</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_encrypt.html">Db.set_encrypt</a></td><td>Set the database cryptographic key</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_errcall.html">Db.set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_error_stream.html">Db.set_error_stream</a></td><td>Set error message output stream</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_feedback.html">Db.set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_flags.html">Db.set_flags</a></td><td>General database configuration</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_lorder.html">Db.set_lorder</a></td><td>Set the database byte order</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a></td><td>Set the underlying database page size</td></tr>
+<tr><td><b>Btree/Recno Configuration</b></td><td><a href="../api_java/db_set_append_recno.html">Db.set_append_recno</a></td><td>Set record append callback</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_bt_compare.html">Db.set_bt_compare</a></td><td>Set a Btree comparison function</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a></td><td>Set the minimum number of keys per Btree page</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_bt_prefix.html">Db.set_bt_prefix</a></td><td>Set a Btree prefix comparison function</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a></td><td>Set the variable-length record delimiter</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_re_len.html">Db.set_re_len</a></td><td>Set the fixed-length record length</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a></td><td>Set the fixed-length record pad byte</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_re_source.html">Db.set_re_source</a></td><td>Set the backing Recno text file</td></tr>
+<tr><td><b>Hash Configuration</b></td><td><a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a></td><td>Set the Hash table density</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_h_hash.html">Db.set_h_hash</a></td><td>Set a hashing function</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a></td><td>Set the Hash table size</td></tr>
+<tr><td><b>Queue Configuration</b></td><td><a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a></td><td>Set Queue database extent size</td></tr>
+<tr><td><b>Database Cursor Operations</b></td><td><a href="../api_java/dbc_class.html">Dbc</a></td><td><b>Cursor class</b></td></tr>
+<tr><td><br></td><td><a href="../api_java/db_cursor.html">Db.cursor</a></td><td>Create a cursor handle</td></tr>
+<tr><td><br></td><td><a href="../api_java/dbc_close.html">Dbc.close</a></td><td>Close a cursor</td></tr>
+<tr><td><br></td><td><a href="../api_java/dbc_count.html">Dbc.count</a></td><td>Return count of duplicates</td></tr>
+<tr><td><br></td><td><a href="../api_java/dbc_del.html">Dbc.del</a></td><td>Delete by cursor</td></tr>
+<tr><td><br></td><td><a href="../api_java/dbc_dup.html">Dbc.dup</a></td><td>Duplicate a cursor</td></tr>
+<tr><td><br></td><td><a href="../api_java/dbc_get.html">Dbc.get</a>, <a href="../api_java/dbc_get.html">Dbc.pget</a></td><td>Retrieve by cursor</td></tr>
+<tr><td><br></td><td><a href="../api_java/dbc_put.html">Dbc.put</a></td><td>Store by cursor</td></tr>
+<tr><td><b>Key/Data Pairs</b></td><td><a href="../api_java/dbt_class.html">Dbt</a></td><td><br></td></tr>
+<tr><td><b>Bulk Retrieval</b></td><td><a href="../api_java/dbt_bulk_class.html">DbMultipleDataIterator</a></td><td><br></td></tr>
+<tr><td><br></td><td><a href="../api_java/dbt_bulk_class.html">DbMultipleKeyDataIterator</a></td><td><br></td></tr>
+<tr><td><br></td><td><a href="../api_java/dbt_bulk_class.html">DbMultipleRecnoDataIterator</a></td><td><br></td></tr>
+<tr><td><b>Lock Subsystem</b></td><td><a href="../api_java/env_set_lk_conflicts.html">DbEnv.set_lk_conflicts</a></td><td>Set lock conflicts matrix</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lk_detect.html">DbEnv.set_lk_detect</a></td><td>Set automatic deadlock detection</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lk_max_lockers.html">DbEnv.set_lk_max_lockers</a></td><td>Set maximum number of lockers</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lk_max_locks.html">DbEnv.set_lk_max_locks</a></td><td>Set maximum number of locks</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lk_max_objects.html">DbEnv.set_lk_max_objects</a></td><td>Set maximum number of lock objects</td></tr>
+<tr><td><br></td><td><a href="../api_java/lock_detect.html">DbEnv.lock_detect</a></td><td>Perform deadlock detection</td></tr>
+<tr><td><br></td><td><a href="../api_java/lock_get.html">DbEnv.lock_get</a></td><td>Acquire a lock</td></tr>
+<tr><td><br></td><td><a href="../api_java/lock_id.html">DbEnv.lock_id</a></td><td>Acquire a locker ID</td></tr>
+<tr><td><br></td><td><a href="../api_java/lock_id_free.html">DbEnv.lock_id_free</a></td><td>Release a locker ID</td></tr>
+<tr><td><br></td><td><a href="../api_java/lock_put.html">DbEnv.lock_put</a></td><td>Release a lock</td></tr>
+<tr><td><br></td><td><a href="../api_java/lock_stat.html">DbEnv.lock_stat</a></td><td>Return lock subsystem statistics</td></tr>
+<tr><td><br></td><td><a href="../api_java/lock_vec.html">DbEnv.lock_vec</a></td><td>Acquire/release locks</td></tr>
+<tr><td><b>Log Subsystem</b></td><td><a href="../api_java/env_set_lg_bsize.html">DbEnv.set_lg_bsize</a></td><td>Set log buffer size</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lg_dir.html">DbEnv.set_lg_dir</a></td><td>Set the environment logging directory</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lg_max.html">DbEnv.set_lg_max</a></td><td>Set log file size</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lg_regionmax.html">DbEnv.set_lg_regionmax</a></td><td>Set logging region size</td></tr>
+<tr><td><br></td><td><a href="../api_java/log_archive.html">DbEnv.log_archive</a></td><td>List log and database files</td></tr>
+<tr><td><br></td><td><a href="../api_java/log_file.html">DbEnv.log_file</a></td><td>Map Log Sequence Numbers to log files</td></tr>
+<tr><td><br></td><td><a href="../api_java/log_flush.html">DbEnv.log_flush</a></td><td>Flush log records</td></tr>
+<tr><td><br></td><td><a href="../api_java/log_put.html">DbEnv.log_put</a></td><td>Write a log record</td></tr>
+<tr><td><br></td><td><a href="../api_java/log_stat.html">DbEnv.log_stat</a></td><td>Return log subsystem statistics</td></tr>
+<tr><td><b>Log Cursor Operations</b></td><td><a href="../api_java/logc_class.html">DbLogc</a></td><td><b>Log cursor class</b></td></tr>
+<tr><td><br></td><td><a href="../api_java/log_cursor.html">DbEnv.log_cursor</a></td><td>Create a log cursor handle</td></tr>
+<tr><td><br></td><td><a href="../api_java/logc_close.html">DbLogc.close</a></td><td>Close a log cursor</td></tr>
+<tr><td><br></td><td><a href="../api_java/logc_get.html">DbLogc.get</a></td><td>Retrieve a log record</td></tr>
+<tr><td><b>Log Sequence Numbers</b></td><td><a href="../api_java/lsn_class.html">DbLsn</a></td><td><br></td></tr>
+<tr><td><br></td><td><a href="../api_java/log_compare.html">DbEnv.log_compare</a></td><td>Compare two Log Sequence Numbers</td></tr>
+<tr><td><b>Memory Pool Subsystem</b></td><td><a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a></td><td>Set the environment cache size</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_mp_mmapsize.html">DbEnv.set_mp_mmapsize</a></td><td>Set maximum mapped-in database file size</td></tr>
+<tr><td><br></td><td><a href="../api_java/memp_stat.html">DbEnv.memp_stat</a>, <a href="../api_java/memp_stat.html">DbEnv.memp_fstat</a></td><td>Return memory pool statistics</td></tr>
+<tr><td><br></td><td><a href="../api_java/memp_trickle.html">DbEnv.memp_trickle</a></td><td>Trickle flush pages from a memory pool</td></tr>
+<tr><td><b>Transaction Subsystem</b></td><td><a href="../api_java/env_set_tx_max.html">DbEnv.set_tx_max</a></td><td>Set maximum number of transactions</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_tx_timestamp.html">DbEnv.set_tx_timestamp</a></td><td>Set recovery timestamp</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_checkpoint.html">DbEnv.txn_checkpoint</a></td><td>Checkpoint the transaction subsystem</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_recover.html">DbEnv.txn_recover</a></td><td>Distributed transaction recovery</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_stat.html">DbEnv.txn_stat</a></td><td>Return transaction subsystem statistics</td></tr>
+<tr><td><b>Transactions</b></td><td><a href="../api_java/txn_class.html">DbTxn</a></td><td><b>Transaction class</b></td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_begin.html">DbEnv.txn_begin</a></td><td>Begin a transaction</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_abort.html">DbTxn.abort</a></td><td>Abort a transaction</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_commit.html">DbTxn.commit</a></td><td>Commit a transaction</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_discard.html">DbTxn.discard</a></td><td>Discard a prepared but not resolved transaction handle</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_id.html">DbTxn.id</a></td><td>Return a transaction's ID</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_prepare.html">DbTxn.prepare</a></td><td>Prepare a transaction for commit</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_set_timeout.html">DbTxn.set_timeout</a></td><td>Set transaction timeout</td></tr>
+<tr><td><b>Replication</b></td><td><a href="../api_java/rep_transport.html">DbEnv.set_rep_transport</a></td><td>Configure replication transport</td></tr>
+<tr><td><br></td><td><a href="../api_java/rep_elect.html">DbEnv.rep_elect</a></td><td>Hold a replication election</td></tr>
+<tr><td><br></td><td><a href="../api_java/rep_limit.html">DbEnv.set_rep_limit</a></td><td>Limit data sent in response to a single message</td></tr>
+<tr><td><br></td><td><a href="../api_java/rep_message.html">DbEnv.rep_process_message</a></td><td>Process a replication message</td></tr>
+<tr><td><br></td><td><a href="../api_java/rep_start.html">DbEnv.rep_start</a></td><td>Configure an environment for replication</td></tr>
+<tr><td><br></td><td><a href="../api_java/rep_stat.html">DbEnv.rep_stat</a></td><td>Replication statistics</td></tr>
+<tr><td><b>Exceptions</b></td><td><a href="../api_java/except_class.html">DbException</a></td><td><b>Exception Class for Berkeley DB Activity</b></td></tr>
+<tr><td><br></td><td><a href="../api_java/deadlock_class.html">DbDeadlockException</a></td><td><b>Exception Class for deadlocks</b></td></tr>
+<tr><td><br></td><td><a href="../api_java/lockng_class.html">DbLockNotGrantedException</a></td><td><b>Exception Class for lock request failures</b></td></tr>
+<tr><td><br></td><td><a href="../api_java/memp_class.html">DbMemoryException</a></td><td><b>Exception Class for insufficient memory</b></td></tr>
+<tr><td><br></td><td><a href="../api_java/runrec_class.html">DbRunRecoveryException</a></td><td><b>Exception Class for failures requiring recovery</b></td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_associate.html b/libdb/docs/api_java/db_associate.html
new file mode 100644
index 0000000..f5699a1
--- /dev/null
+++ b/libdb/docs/api_java/db_associate.html
@@ -0,0 +1,140 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.associate</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.associate</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbSecondaryKeyCreate
+{
+ public int secondary_key_create(DbTxn txnid,
+ Db secondary, Dbt key, Dbt data, Dbt result)
+ throws DbException;
+}
+public class Db
+{
+ ...
+ public void associate(Db secondary,
+ DbSecondaryKeyCreate secondary_key_create, int flags)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.associate function is used to declare one database a
+secondary index for a primary database. After a secondary database has
+been "associated" with a primary database, all updates to the primary
+will be automatically reflected in the secondary and all reads from the
+secondary will return corresponding data from the primary. Note that
+as primary keys must be unique for secondary indices to work, the
+primary database must be configured without support for duplicate data
+items. See <a href="../ref/am/second.html">Secondary indices</a> for
+more information.
+<p>The associate method called should be a method off a database handle for
+the primary database that is to be indexed.
+The <b>secondary</b> argument should be an open database handle of
+either a newly created and empty database that is to be used to store
+a secondary index, or of a database that was previously associated with
+the same primary and contains a secondary index. Note that it is not
+safe to associate as a secondary database a handle that is in use by
+another thread of control or has open cursors. If the handle was opened
+with the <a href="../api_java/env_open.html#DB_THREAD">Db.DB_THREAD</a> flag it is safe to use it in multiple threads
+of control after the Db.associate method has returned. Note also
+that either secondary keys must be unique or the secondary database must
+be configured with support for duplicate data items.
+<p>If the operation is to be transaction-protected (other than by specifying
+the Db.DB_AUTO_COMMIT flag), the <b>txnid</b> parameter is a
+transaction handle returned from <a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>; otherwise, null.
+<p>The <b>callback</b> argument should refer to a callback function that
+creates a secondary key from a given primary key and data pair. When
+called, the first argument will be the secondary <a href="../api_java/db_class.html">Db</a> handle; the
+second and third arguments will be <a href="../api_java/dbt_class.html">Dbt</a>s containing a primary
+key and datum respectively; and the fourth argument will be a zeroed
+DBT in which the callback function should fill in <b>data</b> and
+<b>size</b> fields that describe the secondary key.
+<a name="3"><!--meow--></a>
+<a name="4"><!--meow--></a>
+<p>If any key/data pair in the primary yields a null secondary key and
+should be left out of the secondary index, the callback function may
+optionally return Db.DB_DONOTINDEX. Otherwise, the callback
+function should return 0 in case of success or any other integer error
+code in case of failure; the error code will be returned from the Berkeley DB
+interface call that initiated the callback. Note that if the callback
+function returns Db.DB_DONOTINDEX for any key/data pairs in the
+primary database, the secondary index will not contain any reference to
+those key/data pairs, and such operations as cursor iterations and range
+queries will reflect only the corresponding subset of the database. If
+this is not desirable, the application should ensure that the callback
+function is well-defined for all possible values and never returns
+Db.DB_DONOTINDEX.
+<p>The callback argument may be NULL if and only if both the primary and
+secondary database handles were opened with the <a href="../api_java/db_open.html#DB_RDONLY">Db.DB_RDONLY</a> flag.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_CREATE">Db.DB_CREATE</a><dd>If the secondary database is empty, walk through the primary and create
+an index to it in the empty secondary. This operation is potentially
+very expensive.
+<p>If the secondary database has been opened in an environment configured
+with transactions, each put necessary for its creation will be done in
+the context of a transaction created for the purpose.
+<p>Care should be taken not to use a newly-populated secondary database in
+another thread of control until the Db.associate call has
+returned successfully in the first thread.
+<p>If transactions are not being used, care should be taken not to modify
+a primary database being used to populate a secondary database, in
+another thread of control, until the Db.associate call has
+returned successfully in the first thread. If transactions are being
+used, Berkeley DB will perform appropriate locking and the application need
+not do any special operation ordering.
+</dl>
+<p>In addition, the following flag may be set by
+bitwise inclusively <b>OR</b>'ing it into the <b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="Db.DB_AUTO_COMMIT">Db.DB_AUTO_COMMIT</a><dd>Enclose the Db.associate call within a transaction. If the call succeeds,
+changes made by the operation will be recoverable. If the call fails,
+the operation will have made no changes.
+</dl>
+<p>The Db.associate method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.associate method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The secondary database handle has already been associated with this or
+another database handle.
+<p>The secondary database handle is not open.
+<p>The primary database has been configured to allow duplicates.
+</dl>
+<p>The Db.associate method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.associate method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_class.html b/libdb/docs/api_java/db_class.html
new file mode 100644
index 0000000..15fb5f7
--- /dev/null
+++ b/libdb/docs/api_java/db_class.html
@@ -0,0 +1,77 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class Db extends Object
+{
+ Db(DbEnv dbenv, int flags)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>The Db handle is the handle for a Berkeley DB database, which may or
+may not be part of a database environment. Db handles are
+free-threaded if the <a href="../api_java/env_open.html#DB_THREAD">Db.DB_THREAD</a> flag is specified to the
+<a href="../api_java/db_open.html">Db.open</a> method when the database is opened or if the database
+environment in which the database is opened is free-threaded. The
+handle should not be closed while any other handle that refers to the
+database is in use; for example, database handles must not be closed
+while cursor handles into the database remain open, or transactions that
+include operations on the database have not yet been committed or
+aborted. Once the <a href="../api_java/db_close.html">Db.close</a>, <a href="../api_java/db_remove.html">Db.remove</a>, or
+<a href="../api_java/db_rename.html">Db.rename</a> methods are called, the handle may not be accessed again,
+regardless of the method's return.
+<p>The constructor creates a Db object that is the handle for a
+Berkeley DB database. The constructor allocates memory internally; calling
+the <a href="../api_java/db_close.html">Db.close</a>, <a href="../api_java/db_remove.html">Db.remove</a> or <a href="../api_java/db_rename.html">Db.rename</a> methods will
+free that memory.
+<p>If no <b>dbenv</b> value is specified, the database is standalone; that
+is, it is not part of any Berkeley DB environment.
+<p>If a <b>dbenv</b> value is specified, the database is created within
+the specified Berkeley DB environment. The database access methods
+automatically make calls to the other subsystems in Berkeley DB based on the
+enclosing environment. For example, if the environment has been
+configured to use locking, the access methods will automatically acquire
+the correct locks when reading and writing pages of the database.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_XA_CREATE">Db.DB_XA_CREATE</a><dd>Instead of creating a standalone database, create a database intended to
+be accessed via applications running under a X/Open conformant Transaction
+Manager. The database will be opened in the environment specified by the
+OPENINFO parameter of the GROUPS section of the ubbconfig file. See the
+<a href="../ref/xa/intro.html">XA Resource Manager</a> chapter in the
+Reference Guide for more information.
+</dl>
+<h1>Class</h1>
+Db
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_close.html b/libdb/docs/api_java/db_close.html
new file mode 100644
index 0000000..fc73486
--- /dev/null
+++ b/libdb/docs/api_java/db_close.html
@@ -0,0 +1,79 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.close</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int close(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.close method flushes any cached database information to disk,
+closes any open cursors, frees any allocated resources, and closes any
+underlying files. Because key/data pairs are cached in memory, failing
+to sync the file with the Db.close or <a href="../api_java/db_sync.html">Db.sync</a> method may
+result in inconsistent or lost information.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_NOSYNC">Db.DB_NOSYNC</a><dd>Do not flush cached information to disk. The <a href="../api_java/db_close.html#DB_NOSYNC">Db.DB_NOSYNC</a> flag is
+a dangerous option. It should be set only if the application is doing
+logging (with transactions) so that the database is recoverable after
+a system or application crash, or if the database is always generated
+from scratch after any system or application crash.
+<p><b>It is important to understand that flushing cached information to disk
+only minimizes the window of opportunity for corrupted data.</b> Although
+unlikely, it is possible for database corruption to happen if a system
+or application crash occurs while writing data to the database. To
+ensure that database corruption never occurs, applications must either:
+use transactions and logging with automatic recovery; use logging and
+application-specific recovery; or edit a copy of the database, and once
+all applications using the database have successfully called
+Db.close, atomically replace the original database with the
+updated copy.
+</dl>
+<p>When multiple threads are using the <a href="../api_java/db_class.html">Db</a> concurrently, only a single
+thread may call the Db.close method.
+<p>The <a href="../api_java/db_class.html">Db</a> handle may not be accessed again after Db.close is
+called, regardless of its return.
+<p>The Db.close method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.close method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The Db.close method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.close method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_cursor.html b/libdb/docs/api_java/db_cursor.html
new file mode 100644
index 0000000..17e0720
--- /dev/null
+++ b/libdb/docs/api_java/db_cursor.html
@@ -0,0 +1,69 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.cursor</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public Dbc cursor(DbTxn txnid, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.cursor method
+creates a cursor.
+<p>If the operation is to be transaction-protected, the <b>txnid</b>
+parameter is a transaction handle returned from <a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>;
+otherwise, null.
+<p>To transaction-protect cursor operations, cursors must be opened and
+closed within the context of a transaction, and the <b>txnid</b>
+parameter specifies the transaction context in which the cursor may be
+used.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_DIRTY_READ">Db.DB_DIRTY_READ</a><dd>All read operations performed by the cursor may return modified but not
+yet committed data. Silently ignored if the <a href="../api_java/db_open.html#DB_DIRTY_READ">Db.DB_DIRTY_READ</a> flag
+was not specified when the underlying database was opened.
+<p><dt><a name="Db.DB_WRITECURSOR">Db.DB_WRITECURSOR</a><dd>Specify that the cursor will be used to update the database. The
+underlying database environment must have been opened using the
+<a href="../api_java/env_open.html#DB_INIT_CDB">Db.DB_INIT_CDB</a> flag.
+</dl>
+<p>The Db.cursor method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.cursor method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The Db.cursor method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.cursor method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_java/dbc_list.html">Database Cursors and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_del.html b/libdb/docs/api_java/db_del.html
new file mode 100644
index 0000000..0d46f18
--- /dev/null
+++ b/libdb/docs/api_java/db_del.html
@@ -0,0 +1,78 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.del</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.del</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int del(DbTxn txnid, Dbt key, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.del method removes key/data pairs from the database. The
+key/data pair associated with the specified <b>key</b> is discarded from
+the database. In the presence of duplicate key values, all records
+associated with the designated key will be discarded.
+<p>When called on a database that has been made into a secondary index
+using the <a href="../api_java/db_associate.html">Db.associate</a> method, the Db.del method deletes the
+key/data pair from the primary database and all secondary indices.
+<p>If the operation is to be transaction-protected (other than by specifying
+the Db.DB_AUTO_COMMIT flag), the <b>txnid</b> parameter is a
+transaction handle returned from <a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>; otherwise, null.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_AUTO_COMMIT">Db.DB_AUTO_COMMIT</a><dd>Enclose the Db.del call within a transaction. If the call succeeds,
+changes made by the operation will be recoverable. If the call fails,
+the operation will have made no changes.
+</dl>
+<p>
+If the specified key is not in the database, the Db.del method will return Db.DB_NOTFOUND.
+Otherwise, the Db.del method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.del method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>Db.DB_SECONDARY_BAD<dd>A secondary index references a nonexistent primary key.
+</dl>
+<p><dl compact>
+<p><dt>EACCES<dd>An attempt was made to modify a read-only database.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Db.del method will fail and
+throw a <a href="../api_java/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The Db.del method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.del method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_err.html b/libdb/docs/api_java/db_err.html
new file mode 100644
index 0000000..9e48cf3
--- /dev/null
+++ b/libdb/docs/api_java/db_err.html
@@ -0,0 +1,73 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.err</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.err</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void err(int errcode, String message)
+<p>
+public void errx(String message)
+</pre></h3>
+<h1>Description</h1>
+<p>The <a href="../api_java/env_err.html">DbEnv.err</a>, <a href="../api_java/env_err.html">DbEnv.errx</a>, Db.err and
+Db.errx methods provide error-messaging functionality for
+applications written using the Berkeley DB library.
+<p>The <a href="../api_java/env_err.html">DbEnv.err</a> method constructs an error message consisting of the
+following elements:
+<p><blockquote><p><dl compact>
+<p><dt>An optional prefix string<dd>If no error callback method has been set using the
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a> method, any prefix string specified using the
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a> method, followed by two separating characters: a colon
+and a &lt;space&gt; character.
+<p><dt>The supplied message string <b>message</b>.<dd>
+<p><dt>A separator<dd>Two separating characters: a colon and a &lt;space&gt; character.
+<p><dt>A standard error string<dd>The standard system or Berkeley DB library error string associated with the
+<b>error</b> value, as returned by the <a href="../api_java/env_strerror.html">DbEnv.strerror</a> method.
+</dl>
+</blockquote>
+<p>This constructed error message is then handled as follows:
+<p><blockquote>
+<p>If an error callback method has been set (see <a href="../api_java/db_set_errcall.html">Db.set_errcall</a>
+and <a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>), that method is called with two
+arguments: any prefix string specified (see <a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a> and
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>) and the error message.
+<p>If an OutputStream has been set
+(see <a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a> and <a href="../api_java/db_set_error_stream.html">Db.set_error_stream</a>),
+the error message is written to that stream.
+<p>If none of these output options has been configured, the error message
+is written to System.err, the standard
+error output stream.</blockquote>
+<p>The <a href="../api_java/env_err.html">DbEnv.errx</a> and Db.errx methods perform identically to the
+<a href="../api_java/env_err.html">DbEnv.err</a> and Db.err methods, except that they do not append
+the final separator characters and standard error string to the error
+message.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_fd.html b/libdb/docs/api_java/db_fd.html
new file mode 100644
index 0000000..a001019
--- /dev/null
+++ b/libdb/docs/api_java/db_fd.html
@@ -0,0 +1,50 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.fd</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.fd</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int fd()
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.fd method
+returns a file descriptor representative of the underlying database.
+This method does not fit well into the Java framework and may be removed
+in subsequent releases.
+<p>The Db.fd method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.fd method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.fd method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_get.html b/libdb/docs/api_java/db_get.html
new file mode 100644
index 0000000..c55c4d8
--- /dev/null
+++ b/libdb/docs/api_java/db_get.html
@@ -0,0 +1,152 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.get</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int get(DbTxn txnid, Dbt key, Dbt data, int flags)
+ throws DbException;
+public int pget(DbTxn txnid, Dbt key, Dbt pkey, Dbt data, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.get method retrieves key/data pairs from the database. The
+byte array
+and length of the data associated with the specified <b>key</b> are
+returned in the structure to which <b>data</b> refers.
+<p>In the presence of duplicate key values, Db.get will return the
+first data item for the designated key. Duplicates are sorted by insert
+order, except where this order has been overridden by cursor operations.
+<b>Retrieval of duplicates requires the use of cursor operations.</b>
+See <a href="../api_java/dbc_get.html">Dbc.get</a> for details.
+<p>When called on a database that has been made into a secondary index
+using the <a href="../api_java/db_associate.html">Db.associate</a> method, the Db.get and
+Db.pget methods return the key from the secondary index and the data
+item from the primary database. In addition, the Db.pget method
+returns the key from the primary database. In databases that are not
+secondary indices, the Db.pget interface will always fail and
+return EINVAL.
+<p>If the operation is to be transaction-protected, the <b>txnid</b>
+parameter is a transaction handle returned from <a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>;
+otherwise, null.
+<p>The <b>flags</b> value must be set to 0 or
+one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_CONSUME">Db.DB_CONSUME</a><dd>Return the record number and data from the available record closest to
+the head of the queue, and delete the record. The cursor will be
+positioned on the deleted record. The record number will be returned
+in <b>key</b>, as described in <a href="../api_java/dbt_class.html">Dbt</a>. The data will be returned
+in the <b>data</b> parameter. A record is available if it is not
+deleted and is not currently locked. The underlying database must be
+of type Queue for Db.DB_CONSUME to be specified.
+<p><dt><a name="Db.DB_CONSUME_WAIT">Db.DB_CONSUME_WAIT</a><dd>The Db.DB_CONSUME_WAIT flag is the same as the Db.DB_CONSUME
+flag, except that if the Queue database is empty, the thread of control
+will wait until there is data in the queue before returning. The
+underlying database must be of type Queue for Db.DB_CONSUME_WAIT
+to be specified.
+<p><dt><a name="Db.DB_GET_BOTH">Db.DB_GET_BOTH</a><dd>Retrieve the key/data pair only if both the key and data match the
+arguments.
+<p>When used with the Db.pget version of this interface
+on a secondary index handle, return the secondary key/primary key/data
+tuple only if both the primary and secondary keys match the arguments.
+It is an error to use the Db.DB_GET_BOTH flag with the Db.get
+version of this interface and a secondary index handle.
+<p><dt><a name="Db.DB_SET_RECNO">Db.DB_SET_RECNO</a><dd>Retrieve the specified numbered key/data pair from a database. Upon
+return, both the <b>key</b> and <b>data</b> items will have been
+filled in.
+<p>The <b>data</b> field of the specified <b>key</b>
+must be a byte array large enough to hold a logical record number (that
+is, an int).
+This record number determines the record to be retrieved.
+<p>For Db.DB_SET_RECNO to be specified, the underlying database must be
+of type Btree, and it must have been created with the DB_RECNUM flag.
+</dl>
+<p>In addition, the following flags may be set by
+bitwise inclusively <b>OR</b>'ing them into the <b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="Db.DB_DIRTY_READ">Db.DB_DIRTY_READ</a><dd>Read modified but not yet committed data. Silently ignored if the
+<a href="../api_java/db_open.html#DB_DIRTY_READ">Db.DB_DIRTY_READ</a> flag was not specified when the underlying
+database was opened.
+<p><dt><a name="Db.DB_MULTIPLE">Db.DB_MULTIPLE</a><dd>Return multiple data items. The buffer to which the <b>data</b>
+argument refers is filled with the specified key's data items. If all
+of the data items associated with the key cannot fit into the buffer,
+the size field of the <b>data</b> argument is set to the length needed
+for the specified items, and a <a href="../api_java/memp_class.html">DbMemoryException</a> is thrown. The buffer
+to which the <b>data</b> argument refers should be large relative to
+the page size of the underlying database, aligned for unsigned integer
+access, and be a multiple of 1024 bytes in size.
+<p>The Db.DB_MULTIPLE flag may only be used alone, or with the
+Db.DB_GET_BOTH and Db.DB_SET_RECNO options. The
+Db.DB_MULTIPLE flag may not be used when accessing databases made
+into secondary indices using the <a href="../api_java/db_associate.html">Db.associate</a> method.
+<p>See <a href="../api_java/dbt_bulk_class.html">DbMultipleDataIterator</a> for more information.
+<p><dt><a name="Db.DB_RMW">Db.DB_RMW</a><dd>Acquire write locks instead of read locks when doing the retrieval.
+Setting this flag can eliminate deadlock during a read-modify-write
+cycle by acquiring the write lock during the read part of the cycle so
+that another thread of control acquiring a read lock for the same item,
+in its own read-modify-write cycle, will not result in deadlock.
+<p>Because the Db.get interface will not hold locks
+across Berkeley DB interface calls in non-transactional environments, the
+<a href="../api_java/dbc_get.html#DB_RMW">Db.DB_RMW</a> flag to the Db.get call is meaningful only in
+the presence of transactions.
+</dl>
+<p>
+If the specified key is not in the database, the Db.get method will return Db.DB_NOTFOUND.
+If the database is a Queue or Recno database and the specified key
+exists, but was never explicitly created by the application or was
+later deleted, the Db.get method will return Db.DB_KEYEMPTY.
+Otherwise, the Db.get method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.get method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>Db.DB_SECONDARY_BAD<dd>A secondary index references a nonexistent primary key.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A record number of 0 was specified.
+<p>The <a href="../api_java/env_open.html#DB_THREAD">Db.DB_THREAD</a> flag was specified to the <a href="../api_java/db_open.html">Db.open</a> method and
+none of the <a href="../api_java/dbt_class.html#DB_DBT_MALLOC">Db.DB_DBT_MALLOC</a>, <a href="../api_java/dbt_class.html#DB_DBT_REALLOC">Db.DB_DBT_REALLOC</a> or
+<a href="../api_java/dbt_class.html#DB_DBT_USERMEM">Db.DB_DBT_USERMEM</a> flags were set in the <a href="../api_java/dbt_class.html">Dbt</a>.
+<p>The Db.pget interface was called with a <a href="../api_java/db_class.html">Db</a> handle that
+does not refer to a secondary index.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Db.get method will fail and
+throw a <a href="../api_java/deadlock_class.html">DbDeadlockException</a> exception.
+<p>If the requested item could not be returned due to insufficient memory,
+the Db.get method will fail and
+throw a <a href="../api_java/memp_class.html">DbMemoryException</a> exception.
+<p>The Db.get method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.get method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_get_byteswapped.html b/libdb/docs/api_java/db_get_byteswapped.html
new file mode 100644
index 0000000..68ec3ef
--- /dev/null
+++ b/libdb/docs/api_java/db_get_byteswapped.html
@@ -0,0 +1,58 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.get_byteswapped</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.get_byteswapped</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public boolean get_byteswapped();
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.get_byteswapped method
+returns false
+if the underlying database files were created on an architecture of the
+same byte order as the current one, and
+returns true
+if they were not (that is, big-endian on a little-endian machine, or
+vice versa). This field may be used to determine whether application
+data needs to be adjusted for this architecture or not.
+<p>The Db.get_byteswapped interface may not be called before the <a href="../api_java/db_open.html">Db.open</a>
+interface has been called.
+<h1>Errors</h1>
+<p>The Db.get_byteswapped method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called before <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<p>The Db.get_byteswapped method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.get_byteswapped method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_get_type.html b/libdb/docs/api_java/db_get_type.html
new file mode 100644
index 0000000..fca2d5d
--- /dev/null
+++ b/libdb/docs/api_java/db_get_type.html
@@ -0,0 +1,56 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.get_type</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.get_type</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int get_type();
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.get_type method
+returns the type of the underlying access method (and file format).
+The returned value is one of Db.DB_BTREE, Db.DB_HASH,
+Db.DB_RECNO, or Db.DB_QUEUE. This value may be used to
+determine the type of the database after a return from <a href="../api_java/db_open.html">Db.open</a>
+with the <b>type</b> argument set to Db.DB_UNKNOWN.
+<p>The Db.get_type interface may not be called before the <a href="../api_java/db_open.html">Db.open</a>
+interface has been called.
+<h1>Errors</h1>
+<p>The Db.get_type method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called before <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<p>The Db.get_type method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.get_type method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_join.html b/libdb/docs/api_java/db_join.html
new file mode 100644
index 0000000..422a053
--- /dev/null
+++ b/libdb/docs/api_java/db_join.html
@@ -0,0 +1,122 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.join</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.join</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public Dbc join(Dbc[] curslist, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.join method creates a specialized cursor for use in performing
+equality or natural joins on secondary indices. For information on how
+to organize your data to use this functionality, see
+<a href="../ref/am/join.html">Equality join</a>.
+<p>The <b>primary</b> argument contains the <a href="../api_java/db_class.html">Db</a> handle of the primary
+database, which is keyed by the data values found in entries in the
+<b>curslist</b>.
+<p>The <b>curslist</b> argument contains a null terminated array of cursors.
+Each cursor must have been initialized to refer to the key on which the
+underlying database should be joined. Typically, this initialization is done
+by a <a href="../api_java/dbc_get.html">Dbc.get</a> call with the <a href="../api_java/dbc_get.html#DB_SET">Db.DB_SET</a> flag specified. Once the
+cursors have been passed as part of a <b>curslist</b>, they should not
+be accessed or modified until the newly created join cursor has been closed,
+or else inconsistent results may be returned.
+<p>Joined values are retrieved by doing a sequential iteration over the first
+cursor in the <b>curslist</b> argument, and a nested iteration over each
+secondary cursor in the order they are specified in the <b>curslist</b>
+argument. This requires database traversals to search for the current
+datum in all the cursors after the first. For this reason, the best join
+performance normally results from sorting the cursors from the one that
+refers to the least number of data items to the one that refers to the
+most. By default, Db.join does this sort on behalf of its caller.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_JOIN_NOSORT">Db.DB_JOIN_NOSORT</a><dd>Do not sort the cursors based on the number of data items to which they
+refer. If the data are structured so that cursors with many data items
+also share many common elements, higher performance will result from
+listing those cursors before cursors with fewer data items; that is, a
+sort order other than the default. The Db.DB_JOIN_NOSORT flag
+permits applications to perform join optimization prior to calling
+Db.join.
+</dl>
+<p>The returned cursor
+supports only the <a href="../api_java/dbc_get.html">Dbc.get</a> and <b>dbc_close</b> cursor
+functions:
+<p><dl compact>
+<p><dt><a href="../api_java/dbc_get.html">Dbc.get</a><dd>Iterates over the values associated with the keys to which each item in
+<b>curslist</b> was initialized. Any data value that appears in all
+items specified by the <b>curslist</b> argument is then used as a key
+into the <b>primary</b>, and the key/data pair found in the
+<b>primary</b> is returned.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_JOIN_ITEM">Db.DB_JOIN_ITEM</a><dd>Do not use the data value found in all the cursors as a lookup key for
+the <b>primary</b>, but simply return it in the key parameter instead.
+The data parameter is left unchanged.
+</dl>
+<p>In addition, the following flag may be set by
+bitwise inclusively <b>OR</b>'ing it into the <b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="Db.DB_DIRTY_READ">Db.DB_DIRTY_READ</a><dd>Read modified but not yet committed data. Silently ignored if the
+<a href="../api_java/db_open.html#DB_DIRTY_READ">Db.DB_DIRTY_READ</a> flag was not specified when the underlying
+database was opened.
+<p><dt><a name="Db.DB_RMW">Db.DB_RMW</a><dd>Acquire write locks instead of read locks when doing the retrieval.
+Setting this flag can eliminate deadlock during a read-modify-write
+cycle by acquiring the write lock during the read part of the cycle so
+that another thread of control acquiring a read lock for the same item,
+in its own read-modify-write cycle, will not result in deadlock.
+</dl>
+<p><dt><a href="../api_java/dbc_close.html">Dbc.close</a><dd>Close the returned cursor and release all resources. (Closing the cursors
+in <b>curslist</b> is the responsibility of the caller.)
+</dl>
+<p>For the returned join cursor to be used in a transaction-protected manner,
+the cursors listed in <b>curslist</b> must have been created within the
+context of the same transaction.
+<p>The Db.join method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.join method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>Db.DB_SECONDARY_BAD<dd>A secondary index references a nonexistent primary key.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Cursor functions other than <a href="../api_java/dbc_get.html">Dbc.get</a> or <a href="../api_java/dbc_close.html">Dbc.close</a> were
+called.
+</dl>
+<p>The Db.join method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.join method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_key_range.html b/libdb/docs/api_java/db_key_range.html
new file mode 100644
index 0000000..5fc50be
--- /dev/null
+++ b/libdb/docs/api_java/db_key_range.html
@@ -0,0 +1,70 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.key_range</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.key_range</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void key_range(DbTxn txnid
+ Dbt key, DbKeyRange key_range, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.key_range method returns an estimate of the proportion of keys
+that are less than, equal to, and greater than the specified key. The
+underlying database must be of type Btree.
+<p>The information is returned in the <b>key_range</b> argument, which
+contains three elements of type double: <b>less</b>, <b>equal</b>,
+and <b>greater</b>. Values are in the range of 0 to 1; for example,
+if the field <b>less</b> is 0.05, 5% of the keys in the database are
+less than the key argument. The value for <b>equal</b> will be zero
+if there is no matching key, and will be non-zero otherwise.
+<p>If the operation is to be transaction-protected, the <b>txnid</b>
+parameter is a transaction handle returned from <a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>;
+otherwise, null.
+The Db.key_range method does not retain the locks it acquires for the
+life of the transaction, so estimates may not be repeatable.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The Db.key_range method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.key_range method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The underlying database was not of type Btree.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Db.key_range method will fail and
+throw a <a href="../api_java/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The Db.key_range method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.key_range method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_list.html b/libdb/docs/api_java/db_list.html
new file mode 100644
index 0000000..5236ed4
--- /dev/null
+++ b/libdb/docs/api_java/db_list.html
@@ -0,0 +1,64 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Databases and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Databases and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Databases and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_java/db_associate.html">Db.associate</a></td><td>Associate a secondary index</td></tr>
+<tr><td><a href="../api_java/db_close.html">Db.close</a></td><td>Close a database</td></tr>
+<tr><td><a href="../api_java/db_cursor.html">Db.cursor</a></td><td>Create a cursor handle</td></tr>
+<tr><td><a href="../api_java/db_del.html">Db.del</a></td><td>Delete items from a database</td></tr>
+<tr><td><a href="../api_java/db_err.html">Db.err</a></td><td>Error message with error string</td></tr>
+<tr><td><a href="../api_java/db_err.html">Db.errx</a></td><td>Error message</td></tr>
+<tr><td><a href="../api_java/db_fd.html">Db.fd</a></td><td>Return a file descriptor from a database</td></tr>
+<tr><td><a href="../api_java/db_get.html">Db.get</a></td><td>Get items from a database</td></tr>
+<tr><td><a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a></td><td>Return if the underlying database is in host order</td></tr>
+<tr><td><a href="../api_java/db_get_type.html">Db.get_type</a></td><td>Return the database type</td></tr>
+<tr><td><a href="../api_java/db_join.html">Db.join</a></td><td>Perform a database join on cursors</td></tr>
+<tr><td><a href="../api_java/db_key_range.html">Db.key_range</a></td><td>Return estimate of key location</td></tr>
+<tr><td><a href="../api_java/db_open.html">Db.open</a></td><td>Open a database</td></tr>
+<tr><td><a href="../api_java/db_get.html">Db.pget</a></td><td>Get items from a database</td></tr>
+<tr><td><a href="../api_java/db_put.html">Db.put</a></td><td>Store items into a database</td></tr>
+<tr><td><a href="../api_java/db_remove.html">Db.remove</a></td><td>Remove a database</td></tr>
+<tr><td><a href="../api_java/db_rename.html">Db.rename</a></td><td>Rename a database</td></tr>
+<tr><td><a href="../api_java/db_set_append_recno.html">Db.set_append_recno</a></td><td>Set record append callback</td></tr>
+<tr><td><a href="../api_java/db_set_bt_compare.html">Db.set_bt_compare</a></td><td>Set a Btree comparison function</td></tr>
+<tr><td><a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a></td><td>Set the minimum number of keys per Btree page</td></tr>
+<tr><td><a href="../api_java/db_set_bt_prefix.html">Db.set_bt_prefix</a></td><td>Set a Btree prefix comparison function</td></tr>
+<tr><td><a href="../api_java/db_set_cache_priority.html">Db.set_cache_priority</a></td><td>Set the database cache priority</td></tr>
+<tr><td><a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a></td><td>Set the database cache size</td></tr>
+<tr><td><a href="../api_java/db_set_dup_compare.html">Db.set_dup_compare</a></td><td>Set a duplicate comparison function</td></tr>
+<tr><td><a href="../api_java/db_set_encrypt.html">Db.set_encrypt</a></td><td>Set the database cryptographic key</td></tr>
+<tr><td><a href="../api_java/db_set_errcall.html">Db.set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><a href="../api_java/db_set_error_stream.html">Db.set_error_stream</a></td><td>Set error message output stream</td></tr>
+<tr><td><a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><a href="../api_java/db_set_feedback.html">Db.set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><a href="../api_java/db_set_flags.html">Db.set_flags</a></td><td>General database configuration</td></tr>
+<tr><td><a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a></td><td>Set the Hash table density</td></tr>
+<tr><td><a href="../api_java/db_set_h_hash.html">Db.set_h_hash</a></td><td>Set a hashing function</td></tr>
+<tr><td><a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a></td><td>Set the Hash table size</td></tr>
+<tr><td><a href="../api_java/db_set_lorder.html">Db.set_lorder</a></td><td>Set the database byte order</td></tr>
+<tr><td><a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a></td><td>Set the underlying database page size</td></tr>
+<tr><td><a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a></td><td>Set Queue database extent size</td></tr>
+<tr><td><a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a></td><td>Set the variable-length record delimiter</td></tr>
+<tr><td><a href="../api_java/db_set_re_len.html">Db.set_re_len</a></td><td>Set the fixed-length record length</td></tr>
+<tr><td><a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a></td><td>Set the fixed-length record pad byte</td></tr>
+<tr><td><a href="../api_java/db_set_re_source.html">Db.set_re_source</a></td><td>Set the backing Recno text file</td></tr>
+<tr><td><a href="../api_java/db_stat.html">Db.stat</a></td><td>Return database statistics</td></tr>
+<tr><td><a href="../api_java/db_sync.html">Db.sync</a></td><td>Flush a database to stable storage</td></tr>
+<tr><td><a href="../api_java/db_truncate.html">Db.truncate</a></td><td>Empty a database</td></tr>
+<tr><td><a href="../api_java/db_upgrade.html">Db.upgrade</a></td><td>Upgrade a database</td></tr>
+<tr><td><a href="../api_java/db_verify.html">Db.verify</a></td><td>Verify/salvage a database</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_open.html b/libdb/docs/api_java/db_open.html
new file mode 100644
index 0000000..b94a3f6
--- /dev/null
+++ b/libdb/docs/api_java/db_open.html
@@ -0,0 +1,163 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.open</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+<p>
+public void open(DbTxn txnid, String file,
+ String database, int type, int flags, int mode)
+ throws DbException, FileNotFoundException;
+</pre></h3>
+<h1>Description</h1>
+<p>The currently supported Berkeley DB file formats (or <i>access methods</i>)
+are Btree, Hash, Queue, and Recno. The Btree format is a representation
+of a sorted, balanced tree structure. The Hash format is an extensible,
+dynamic hashing scheme. The Queue format supports fast access to
+fixed-length records accessed sequentially or by logical record number.
+The Recno format supports fixed- or variable-length records, accessed
+sequentially or by logical record number, and optionally backed by a
+flat text file.
+<p>Storage and retrieval for the Berkeley DB access methods are based on key/data
+pairs; see <a href="../api_java/dbt_class.html">Dbt</a> for more information.
+<p>The Db.open interface opens the database represented by the
+<b>file</b> and <b>database</b> arguments for both reading and
+writing. The <b>file</b> argument is used as the name of an underlying
+file that will be used to back the database. The <b>database</b>
+argument is optional, and allows applications to have multiple databases
+in a single file. Although no <b>database</b> argument needs to be
+specified, it is an error to attempt to open a second database in a
+<b>file</b> that was not initially created using a <b>database</b>
+name. Further, the <b>database</b> argument is not supported by the
+Queue format. Finally, when opening multiple databases in the same
+physical file, it is important to consider locking and memory cache
+issues; see <a href="../ref/am/opensub.html">Opening multiple databases
+in a single file</a> for more information.
+<p>In-memory databases never intended to be preserved on disk may be
+created by setting both the <b>file</b> and <b>database</b> arguments
+to null. Note that in-memory databases can only ever be shared by
+sharing the single database handle that created them, in circumstances
+where doing so is safe.
+<p>The <b>type</b> argument is of type int, and must be set to one of <a name="Db.DB_BTREE">Db.DB_BTREE</a>,
+<a name="Db.DB_HASH">Db.DB_HASH</a>, <a name="Db.DB_QUEUE">Db.DB_QUEUE</a>,
+<a name="Db.DB_RECNO">Db.DB_RECNO</a>, or <a name="Db.DB_UNKNOWN">Db.DB_UNKNOWN</a>. If
+<b>type</b> is Db.DB_UNKNOWN, the database must already exist
+and Db.open will automatically determine its type. The
+<a href="../api_java/db_get_type.html">Db.get_type</a> method may be used to determine the underlying type of
+databases opened using Db.DB_UNKNOWN.
+<p>If the operation is to be transaction-protected (other than by specifying
+the Db.DB_AUTO_COMMIT flag), the <b>txnid</b> parameter is a
+transaction handle returned from <a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>; otherwise, null.
+<p>The <b>flags</b> and <b>mode</b> arguments specify how files will be opened
+and/or created if they do not already exist.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_AUTO_COMMIT">Db.DB_AUTO_COMMIT</a><dd>Enclose the Db.open call within a transaction. If the call succeeds,
+the open operation will be recoverable. If the call fails, no database will
+have been created.
+<p><dt><a name="Db.DB_CREATE">Db.DB_CREATE</a><dd>Create the database. If the database does not already exist and the Db.DB_CREATE
+flag is not specified, the Db.open will fail.
+<p><dt><a name="Db.DB_DIRTY_READ">Db.DB_DIRTY_READ</a><dd>Support dirty reads; that is, read operations on the database may request the
+return of modified but not yet committed data.
+<p><dt><a name="Db.DB_EXCL">Db.DB_EXCL</a><dd>Return an error if the database already exists. The Db.DB_EXCL flag is
+only meaningful when specified with the Db.DB_CREATE flag.
+<p><dt><a name="Db.DB_NOMMAP">Db.DB_NOMMAP</a><dd>Do not map this database into process memory (see the description of the
+<a href="../api_java/env_set_mp_mmapsize.html">DbEnv.set_mp_mmapsize</a> method for further information).
+<p><dt><a name="Db.DB_RDONLY">Db.DB_RDONLY</a><dd>Open the database for reading only. Any attempt to modify items in the database
+will fail, regardless of the actual permissions of any underlying files.
+<p><dt><a name="Db.DB_THREAD">Db.DB_THREAD</a><dd>Cause the <a href="../api_java/db_class.html">Db</a> handle returned by Db.open to be
+<i>free-threaded</i>; that is, usable by multiple threads within a
+single address space.
+<p>Threading is always assumed in the Java API, so no special flags are
+required, and Berkeley DB functions will always behave as if the
+<a href="../api_java/env_open.html#DB_THREAD">Db.DB_THREAD</a> flag was specified.
+<p><dt><a name="Db.DB_TRUNCATE">Db.DB_TRUNCATE</a><dd>Physically truncate the underlying file, discarding all previous
+databases it might have held. Underlying filesystem primitives are used
+to implement this flag. For this reason, it is applicable only to the
+file and cannot be used to discard databases within a file.
+<p>The Db.DB_TRUNCATE flag cannot be transaction-protected, and it is
+an error to specify it in a transaction-protected environment.
+</dl>
+<p>On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by
+the database open are created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and modified by the process' umask value at the time of creation
+(see <b>umask</b>(2)). If <b>mode</b> is 0, the database open will use a default
+mode of readable and writable by both owner and group. On Windows
+systems, the mode argument is ignored. The group ownership of created
+files is based on the system and directory defaults, and is not further
+specified by Berkeley DB.
+<p>Calling Db.open is a reasonably expensive operation, and maintaining
+a set of open databases will normally be preferable to repeatedly opening
+and closing the database for each new query.
+<p>The Db.open method throws an exception that encapsulates a non-zero error value on
+failure.
+If Db.open fails, the <a href="../api_java/db_close.html">Db.close</a> method should be called to discard the
+<a href="../api_java/db_class.html">Db</a> handle.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If a <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was specified, the
+environment variable <b>DB_HOME</b> may be used as the path of the
+database environment home.
+<p>Db.open is affected by any database directory specified using the
+<a href="../api_java/env_set_data_dir.html">DbEnv.set_data_dir</a> method, or by setting the "set_data_dir" string
+in the environment's <b>DB_CONFIG</b> file.
+</dl>
+<p><dl compact>
+<p><dt>TMPDIR<dd>If the <b>file</b> and <b>dbenv</b> arguments to Db.open are
+null, the environment variable <b>TMPDIR</b> may be used as a
+directory in which to create temporary backing files
+</dl>
+<h1>Errors</h1>
+<p>The Db.open method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt><a name="Db.DB_OLD_VERSION">Db.DB_OLD_VERSION</a><dd>The database cannot be opened without being first upgraded.
+<p><dt>EEXIST<dd>DB_CREATE and DB_EXCL were specified and the database exists.
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+(For example, unknown database type, page size, hash function, pad byte,
+byte order) or a flag value or parameter that is incompatible with the
+specified database.
+<p>
+The <a href="../api_java/env_open.html#DB_THREAD">Db.DB_THREAD</a> flag was specified and fast mutexes are not
+available for this architecture.
+<p>The <a href="../api_java/env_open.html#DB_THREAD">Db.DB_THREAD</a> flag was specified to Db.open, but was not
+specified to the <a href="../api_java/env_open.html">DbEnv.open</a> call for the environment in which the
+<a href="../api_java/db_class.html">Db</a> handle was created.
+<p>A backing flat text file was specified with either the <a href="../api_java/env_open.html#DB_THREAD">Db.DB_THREAD</a>
+flag or the provided database environment supports transaction
+processing.
+<p><dt>ENOENT<dd>A nonexistent <b>re_source</b> file was specified.
+</dl>
+<p>The Db.open method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.open method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_put.html b/libdb/docs/api_java/db_put.html
new file mode 100644
index 0000000..8cbe18c
--- /dev/null
+++ b/libdb/docs/api_java/db_put.html
@@ -0,0 +1,108 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.put</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int put(DbTxn txnid, Dbt key, Dbt data, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.put method stores key/data pairs in the database. The default
+behavior of the Db.put function is to enter the new key/data
+pair, replacing any previously existing key if duplicates are disallowed,
+or adding a duplicate data item if duplicates are allowed. If the database
+supports duplicates, the Db.put method adds the new data value at the
+end of the duplicate set. If the database supports sorted duplicates,
+the new data value is inserted at the correct sorted location.
+<p>If the operation is to be transaction-protected (other than by specifying
+the Db.DB_AUTO_COMMIT flag), the <b>txnid</b> parameter is a
+transaction handle returned from <a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>; otherwise, null.
+<p>The <b>flags</b> value must be set to 0 or
+one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_APPEND">Db.DB_APPEND</a><dd>Append the key/data pair to the end of the database. For the
+Db.DB_APPEND flag to be specified, the underlying database must be
+a Queue or Recno database. The record number allocated to the record is
+returned in the specified <b>key</b>.
+<p>There is a minor behavioral difference between the Recno and Queue access
+methods for the Db.DB_APPEND flag. If a transaction enclosing a
+Db.put operation with the Db.DB_APPEND flag aborts, the
+record number may be decremented (and later reallocated by a subsequent
+Db.DB_APPEND operation) by the Recno access method, but will not be
+decremented or reallocated by the Queue access method.
+<p><dt><a name="Db.DB_NODUPDATA">Db.DB_NODUPDATA</a><dd>In the case of the Btree and Hash access methods, enter the new key/data
+pair only if it does not already appear in the database. If the
+key/data pair already appears in the database, <a href="../api_java/dbc_put.html#DB_KEYEXIST">Db.DB_KEYEXIST</a> is
+returned. The Db.DB_NODUPDATA flag may only be specified if the
+underlying database has been configured to support sorted duplicates.
+<p>The Db.DB_NODUPDATA flag may not be specified to the Queue or Recno
+access methods.
+<p><dt><a name="Db.DB_NOOVERWRITE">Db.DB_NOOVERWRITE</a><dd>Enter the new key/data pair only if the key does not already appear in
+the database. If the key already appears in the database,
+<a href="../api_java/dbc_put.html#DB_KEYEXIST">Db.DB_KEYEXIST</a> is returned. Even if the database allows duplicates,
+a call to Db.put with the Db.DB_NOOVERWRITE flag set will
+fail if the key already exists in the database.
+</dl>
+<p>In addition, the following flag may be set by
+bitwise inclusively <b>OR</b>'ing it into the <b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="Db.DB_AUTO_COMMIT">Db.DB_AUTO_COMMIT</a><dd>Enclose the Db.put call within a transaction. If the call succeeds,
+changes made by the operation will be recoverable. If the call fails,
+the operation will have made no changes.
+</dl>
+<p>
+Otherwise, the Db.put method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.put method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EACCES<dd>An attempt was made to modify a read-only database.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A record number of 0 was specified.
+<p>An attempt was made to add a record to a fixed-length database that was too
+large to fit.
+<p>An attempt was made to do a partial put.
+<p>An attempt was made to add a record to a secondary index.
+</dl>
+<p><dl compact>
+<p><dt>ENOSPC<dd>A btree exceeded the maximum btree depth (255).
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Db.put method will fail and
+throw a <a href="../api_java/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The Db.put method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.put method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_remove.html b/libdb/docs/api_java/db_remove.html
new file mode 100644
index 0000000..60fbd61
--- /dev/null
+++ b/libdb/docs/api_java/db_remove.html
@@ -0,0 +1,79 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.remove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.remove</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+<p>
+public void remove(String file, String database, int flags)
+ throws DbException, FileNotFoundException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.remove method removes the database specified by the
+<b>file</b> and <b>database</b> arguments. If no <b>database</b> is
+specified, the underlying file represented by <b>file</b> is removed,
+incidentally removing all databases that it contained.
+<p>Applications should never remove databases with open <a href="../api_java/db_class.html">Db</a> handles,
+or in the case of removing a file, when any database in the file has an
+open handle. For example, some architectures do not permit the removal
+of files with open system handles. On these architectures, attempts to
+remove databases currently in use by any thread of control in the system
+will fail.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The <a href="../api_java/db_class.html">Db</a> handle may not be accessed again after Db.remove is
+called, regardless of its return.
+<p>The Db.remove method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If a <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was specified, the
+environment variable <b>DB_HOME</b> may be used as the path of the
+database environment home.
+<p>Db.remove is affected by any database directory specified using the
+<a href="../api_java/env_set_data_dir.html">DbEnv.set_data_dir</a> method, or by setting the "set_data_dir" string
+in the environment's <b>DB_CONFIG</b> file.
+</dl>
+<h1>Errors</h1>
+<p>The Db.remove method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A database in the file is currently open.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<p>If the file or directory does not exist, the Db.remove method will
+fail and
+throw a FileNotFoundException exception.
+<p>The Db.remove method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.remove method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_rename.html b/libdb/docs/api_java/db_rename.html
new file mode 100644
index 0000000..092eef9
--- /dev/null
+++ b/libdb/docs/api_java/db_rename.html
@@ -0,0 +1,81 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.rename</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.rename</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+<p>
+public void rename(String file, String database, String newname, int flags)
+ throws DbException, FileNotFoundException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.rename method renames the database specified by the
+<b>file</b> and <b>database</b> arguments to <b>newname</b>. If no
+<b>database</b> is specified, the underlying file represented by
+<b>file</b> is renamed, incidentally renaming all databases that it
+contained.
+<p>Applications should not rename databases that are currently in use. If
+an underlying file is being renamed and logging is currently enabled in
+the database environment, no database in the file may be open when the
+Db.rename method is called. In particular, some architectures do
+not permit renaming files with open handles. On these architectures,
+attempts to rename databases that are currently in use by any thread of
+control in the system will fail.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The <a href="../api_java/db_class.html">Db</a> handle may not be accessed again after Db.rename is
+called, regardless of its return.
+<p>The Db.rename method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If a <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was specified, the
+environment variable <b>DB_HOME</b> may be used as the path of the
+database environment home.
+<p>Db.rename is affected by any database directory specified using the
+<a href="../api_java/env_set_data_dir.html">DbEnv.set_data_dir</a> method, or by setting the "set_data_dir" string
+in the environment's <b>DB_CONFIG</b> file.
+</dl>
+<h1>Errors</h1>
+<p>The Db.rename method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A database in the file is currently open.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<p>If the file or directory does not exist, the Db.rename method will
+fail and
+throw a FileNotFoundException exception.
+<p>The Db.rename method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.rename method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_append_recno.html b/libdb/docs/api_java/db_set_append_recno.html
new file mode 100644
index 0000000..027be62
--- /dev/null
+++ b/libdb/docs/api_java/db_set_append_recno.html
@@ -0,0 +1,67 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_append_recno</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_append_recno</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbAppendRecno
+{
+ public abstract void db_append_recno(Db db, Dbt data, int recno);
+ throws DbException;
+}
+public class Db
+{
+ public void set_append_recno(DbAppendRecno db_append_recno)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>When using the <a href="../api_java/db_put.html#DB_APPEND">Db.DB_APPEND</a> option of the <a href="../api_java/db_put.html">Db.put</a> method,
+it may be useful to modify the stored data based on the generated key.
+If a callback method is specified using the
+Db.set_append_recno method, it will be called after the record number
+has been selected, but before the data has been stored.
+The callback function must throw a <a href="../api_java/except_class.html">DbException</a> object to
+encapsulate the error on failure. That object will be thrown to
+caller of <a href="../api_java/db_put.html">Db.put</a>.
+<p>The called function must take three arguments: a reference to the
+enclosing database handle; the data <a href="../api_java/dbt_class.html">Dbt</a> to be stored; and the
+selected record number. The called function may then modify the data
+<a href="../api_java/dbt_class.html">Dbt</a>.
+<p>The Db.set_append_recno method configures operations performed using the specified
+<a href="../api_java/db_class.html">Db</a> handle, not all operations performed on the underlying
+database.
+<p>The Db.set_append_recno interface may not be called after the <a href="../api_java/db_open.html">Db.open</a>
+interface is called.
+<p>The Db.set_append_recno method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_bt_compare.html b/libdb/docs/api_java/db_set_bt_compare.html
new file mode 100644
index 0000000..23a4ff0
--- /dev/null
+++ b/libdb/docs/api_java/db_set_bt_compare.html
@@ -0,0 +1,88 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_bt_compare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_bt_compare</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbBtreeCompare
+{
+ public abstract int bt_compare(Db db, Dbt dbt1, Dbt dbt2);
+}
+public class Db
+{
+ public void set_bt_compare(DbBtreeCompare bt_compare)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>Set the Btree key comparison function. The comparison function is
+called when it is necessary to compare a key specified by the
+application with a key currently stored in the tree. The first argument
+to the comparison function is the <a href="../api_java/dbt_class.html">Dbt</a> representing the
+application supplied key; the second is the current tree's key.
+<p>The comparison function must return an integer value less than, equal
+to, or greater than zero if the first key argument is considered to be
+respectively less than, equal to, or greater than the second key
+argument. In addition, the comparison function must cause the keys in
+the database to be <i>well-ordered</i>. The comparison function
+must correctly handle any key values used by the application (possibly
+including zero-length keys). In addition, when Btree key prefix
+comparison is being performed (see <a href="../api_java/db_set_bt_prefix.html">Db.set_bt_prefix</a> for more
+information), the comparison routine may be passed a prefix of any
+database key. The <b>data</b> and <b>size</b> fields of the
+<a href="../api_java/dbt_class.html">Dbt</a> are the only fields that may be used for the purposes of
+this comparison, and no particular alignment of the memory to which
+by the <b>data</b> field refers may be assumed.
+<p>If no comparison function is specified, the keys are compared lexically,
+with shorter keys collating before longer keys.
+<p>The Db.set_bt_compare method configures operations performed using the specified
+<a href="../api_java/db_class.html">Db</a> handle, not all operations performed on the underlying
+database.
+<p>The Db.set_bt_compare interface may not be called after the <a href="../api_java/db_open.html">Db.open</a>
+interface is called.
+If the database already exists when
+<a href="../api_java/db_open.html">Db.open</a> is called, the information specified to Db.set_bt_compare must
+be the same as that historically used to create the database or
+corruption can occur.
+<p>The Db.set_bt_compare method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.set_bt_compare method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<p>The Db.set_bt_compare method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.set_bt_compare method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_bt_minkey.html b/libdb/docs/api_java/db_set_bt_minkey.html
new file mode 100644
index 0000000..7098c74
--- /dev/null
+++ b/libdb/docs/api_java/db_set_bt_minkey.html
@@ -0,0 +1,65 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_bt_minkey</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_bt_minkey</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_bt_minkey(int bt_minkey)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the minimum number of key/data pairs intended to be stored on any
+single Btree leaf page.
+<p>This value is used to determine if key or data items will be stored on
+overflow pages instead of Btree leaf pages. For more information on
+the specific algorithm used, see <a href="../ref/am_conf/bt_minkey.html">Minimum keys per page</a>. The <b>bt_minkey</b> value specified must
+be at least 2; if <b>bt_minkey</b> is not explicitly set, a value of
+2 is used.
+<p>The Db.set_bt_minkey method configures a database, not only operations performed
+using the specified <a href="../api_java/db_class.html">Db</a> handle.
+<p>The Db.set_bt_minkey interface may not be called after the <a href="../api_java/db_open.html">Db.open</a>
+interface is called.
+If the database already exists when
+<a href="../api_java/db_open.html">Db.open</a> is called, the information specified to Db.set_bt_minkey will
+be ignored.
+<p>The Db.set_bt_minkey method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.set_bt_minkey method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<p>The Db.set_bt_minkey method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.set_bt_minkey method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_bt_prefix.html b/libdb/docs/api_java/db_set_bt_prefix.html
new file mode 100644
index 0000000..0ab2ff6
--- /dev/null
+++ b/libdb/docs/api_java/db_set_bt_prefix.html
@@ -0,0 +1,91 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_bt_prefix</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_bt_prefix</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbBtreePrefix
+{
+ public abstract int bt_prefix(Db db, Dbt dbt1, Dbt dbt2);
+}
+public class Db
+{
+ public void set_bt_prefix(DbBtreePrefix bt_prefix)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>Set the Btree prefix function. The prefix function must return the
+number of bytes of the second key argument that would be required by
+the Btree key comparison function to determine the second key argument's
+ordering relationship with respect to the first key argument. If the
+two keys are equal, the key length should be returned. The prefix
+function must correctly handle any key values used by the application
+(possibly including zero-length keys). The <b>data</b> and
+<b>size</b> fields of the <a href="../api_java/dbt_class.html">Dbt</a> are the only fields that may be
+used for the purposes of this determination, and no particular alignment
+of the memory to which the <b>data</b> field refers may be assumed.
+<p>The prefix function is used to determine the amount by which keys stored
+on the Btree internal pages can be safely truncated without losing their
+uniqueness. See the <a href="../ref/am_conf/bt_prefix.html">Btree
+prefix comparison</a> section of the Berkeley DB Reference Guide for more details
+about how this works. The usefulness of this is data-dependent, but
+can produce significantly reduced tree sizes and search times in some
+data sets.
+<p>If no prefix function or key comparison function is specified by the
+application, a default lexical comparison function is used as the prefix
+function. If no prefix function is specified and a key comparison
+function is specified, no prefix function is used. It is an error to
+specify a prefix function without also specifying a key comparison
+function.
+<p>The Db.set_bt_prefix method configures operations performed using the specified
+<a href="../api_java/db_class.html">Db</a> handle, not all operations performed on the underlying
+database.
+<p>The Db.set_bt_prefix interface may not be called after the <a href="../api_java/db_open.html">Db.open</a>
+interface is called.
+If the database already exists when
+<a href="../api_java/db_open.html">Db.open</a> is called, the information specified to Db.set_bt_prefix must
+be the same as that historically used to create the database or
+corruption can occur.
+<p>The Db.set_bt_prefix method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.set_bt_prefix method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<p>The Db.set_bt_prefix method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.set_bt_prefix method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_cache_priority.html b/libdb/docs/api_java/db_set_cache_priority.html
new file mode 100644
index 0000000..76ae3ca
--- /dev/null
+++ b/libdb/docs/api_java/db_set_cache_priority.html
@@ -0,0 +1,65 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_cache_priority</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_cache_priority</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_cache_priority(int priority)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the cache priority for pages from the specified database. The
+priority of a page biases the replacement algorithm to be more or less
+likely to discard a page when space is needed in the buffer pool. The
+bias is temporary, and pages will eventually be discarded if they are
+not referenced again. The Db.set_cache_priority interface is
+only advisory, and does not guarantee pages will be treated in a specific
+way.
+<p>The <b>priority</b> argument must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_PRIORITY_VERY_LOW">Db.DB_PRIORITY_VERY_LOW</a><dd>The lowest priority: pages are the most likely to be discarded.
+<dt><a name="Db.DB_PRIORITY_LOW">Db.DB_PRIORITY_LOW</a><dd>The next lowest priority.
+<dt><a name="Db.DB_PRIORITY_DEFAULT">Db.DB_PRIORITY_DEFAULT</a><dd>The default priority.
+<dt><a name="Db.DB_PRIORITY_HIGH">Db.DB_PRIORITY_HIGH</a><dd>The next highest priority.
+<dt><a name="Db.DB_PRIORITY_VERY_HIGH">Db.DB_PRIORITY_VERY_HIGH</a><dd>The highest priority: pages are the least likely to be discarded.
+</dl>
+<p>The Db.set_cache_priority method configures a database, not only operations performed
+using the specified <a href="../api_java/db_class.html">Db</a> handle.
+<p>The Db.set_cache_priority interface may be called at any time during the life of
+the application.
+<p>The Db.set_cache_priority method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.set_cache_priority method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.set_cache_priority method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_cachesize.html b/libdb/docs/api_java/db_set_cachesize.html
new file mode 100644
index 0000000..bda84a2
--- /dev/null
+++ b/libdb/docs/api_java/db_set_cachesize.html
@@ -0,0 +1,87 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_cachesize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_cachesize</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_cachesize(int gbytes, int bytes, int ncache)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the shared memory buffer pool -- that is, the cache --
+to <b>gbytes</b> gigabytes plus <b>bytes</b>. The cache should be
+the size of the normal working data set of the application, with some
+small amount of additional memory for unusual situations. (Note: the
+working set is not the same as the number of pages accessed
+simultaneously, and should be quite a bit larger!)
+<p>The default cache size is 256KB, and may not be specified as less than
+20KB. Any cache size less than 500MB is automatically increased by 25%
+to account for buffer pool overhead; cache sizes larger than 500MB are
+used as specified. The current maximum size of a single cache is 4GB.
+For information on tuning the Berkeley DB cache size, see
+<a href="../ref/am_conf/cachesize.html">Selecting a cache size</a>.
+<p>It is possible to specify caches to Berkeley DB that are large enough so that
+they cannot be allocated contiguously on some architectures. For
+example, some releases of Solaris limit the amount of memory that may
+be allocated contiguously by a process. If <b>ncache</b> is 0 or 1,
+the cache will be allocated contiguously in memory. If it is greater
+than 1, the cache will be broken up into <b>ncache</b> equally sized,
+separate pieces of memory.
+<p>Because databases opened within Berkeley DB environments use the cache
+specified to the environment, it is an error to attempt to set a cache
+in a database created within an environment.
+<p>The Db.set_cachesize interface may not be called after the <a href="../api_java/db_open.html">Db.open</a>
+interface is called.
+<p>The Db.set_cachesize method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's cache size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_cachesize", one or more whitespace characters,
+and the three arguments specified to this interface, separated by whitespace
+characters, for example, "set_cachesize 1 500 2". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The Db.set_cachesize method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The specified cache size was impossibly small.
+<p>Called in a database environment.
+<p>Called after
+<a href="../api_java/db_open.html">Db.open</a>
+was called.
+</dl>
+<p>The Db.set_cachesize method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.set_cachesize method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_dup_compare.html b/libdb/docs/api_java/db_set_dup_compare.html
new file mode 100644
index 0000000..e844a07
--- /dev/null
+++ b/libdb/docs/api_java/db_set_dup_compare.html
@@ -0,0 +1,83 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_dup_compare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_dup_compare</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbDupCompare
+{
+ public abstract int dup_compare(Db db, Dbt dbt1, Dbt dbt2);
+}
+public class Db
+{
+ public void set_dup_compare(DbDupCompare dup_compare)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>Set the duplicate data item comparison function. The comparison function
+is called when it is necessary to compare a data item specified by the
+application with a data item currently stored in the tree. The first
+argument to the comparison function is the <a href="../api_java/dbt_class.html">Dbt</a> representing the
+application's data item; the second is the current tree's data item.
+Calling Db.set_dup_compare implies calling <a href="../api_java/db_set_flags.html">Db.set_flags</a>
+with the <a href="../api_java/db_set_flags.html#DB_DUPSORT">Db.DB_DUPSORT</a> flag.
+<p>The comparison function must return an integer value less than, equal
+to, or greater than zero if the first data item argument is considered
+to be respectively less than, equal to, or greater than the second data
+item argument. In addition, the comparison function must cause the data
+items in the set to be <i>well-ordered</i>. The comparison function
+must correctly handle any data item values used by the application
+(possibly including zero-length data items). The <b>data</b> and
+<b>size</b> fields of the <a href="../api_java/dbt_class.html">Dbt</a> are the only fields that may be
+used for the purposes of this comparison, and no particular alignment
+of the memory to which the <b>data</b> field refers may be assumed.
+<p>If no comparison function is specified, the data items are compared
+lexically, with shorter data items collating before longer data items.
+<p>The Db.set_dup_compare interface may not be called after the <a href="../api_java/db_open.html">Db.open</a>
+interface is called.
+If the database already exists when
+<a href="../api_java/db_open.html">Db.open</a> is called, the information specified to Db.set_dup_compare must
+be the same as that historically used to create the database or
+corruption can occur.
+<p>The Db.set_dup_compare method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.set_dup_compare method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The Db.set_dup_compare method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.set_dup_compare method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_encrypt.html b/libdb/docs/api_java/db_set_encrypt.html
new file mode 100644
index 0000000..8bdabd4
--- /dev/null
+++ b/libdb/docs/api_java/db_set_encrypt.html
@@ -0,0 +1,71 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_encrypt</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_encrypt</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_encrypt(String passwd, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the password used by the <a href="../api_java/env_class.html">DbEnv</a> and <a href="../api_java/db_class.html">Db</a> methods to
+perform encryption and decryption.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_ENCRYPT_AES">Db.DB_ENCRYPT_AES</a><dd>Use the Rijndael/AES (also known as the Advanced Encryption Standard
+and Federal Information Processing Standard (FIPS) 197) algorithm for
+encryption or decryption.
+</dl>
+<p>Because databases opened within Berkeley DB environments use the password
+specified to the environment, it is an error to attempt to set a
+password in a database created within an environment.
+<p>The Db.set_encrypt interface may not be called after the <a href="../api_java/db_open.html">Db.open</a>
+interface is called.
+<p>The Db.set_encrypt method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.set_encrypt method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after
+<a href="../api_java/db_open.html">Db.open</a>
+was called.
+</dl>
+<p><dl compact>
+<p><dt>EOPNOTSUPP<dd>Cryptography is not available in this Berkeley DB release.
+</dl>
+<p>The Db.set_encrypt method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.set_encrypt method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_errcall.html b/libdb/docs/api_java/db_set_errcall.html
new file mode 100644
index 0000000..3fde21b
--- /dev/null
+++ b/libdb/docs/api_java/db_set_errcall.html
@@ -0,0 +1,72 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_errcall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_errcall</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbErrcall
+{
+ public abstract void errcall(String errpfx, String msg);
+}
+public class Db
+{
+ public void set_errcall(DbErrcall errcall);
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+When an error occurs in the Berkeley DB library, an exception is thrown. In
+some cases, however, the <b>errno</b> value may be insufficient to
+completely describe the cause of the error, especially during initial
+application debugging.
+<p>The <a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a> and Db.set_errcall methods are used to
+enhance the mechanism for reporting error messages to the application.
+The <a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a> and Db.set_errcall methods must be
+called with a single object argument. The object's class must implement
+the DbErrcall interface. In some cases, when an error occurs, Berkeley DB
+will invoke the object's errcall() method with two arguments; the first
+is the prefix string (as previously set by <a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a> or
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>), the second will be an error message string.
+It is up to this method to display the message in an appropriate
+manner.
+<p>Alternatively, you can use the <a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a> and
+<a href="../api_java/db_set_error_stream.html">Db.set_error_stream</a> methods to display the additional information via
+an output stream. You should not mix these approaches.
+<p>This error-logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>For <a href="../api_java/db_class.html">Db</a> handles opened inside of Berkeley DB environments, calling the
+Db.set_errcall method affects the entire environment and is equivalent to calling
+the <a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a> method.
+<p>The Db.set_errcall interface may be called at any time during the life of
+the application.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_error_stream.html b/libdb/docs/api_java/db_set_error_stream.html
new file mode 100644
index 0000000..7e6868c
--- /dev/null
+++ b/libdb/docs/api_java/db_set_error_stream.html
@@ -0,0 +1,62 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_error_stream</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_error_stream</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void Db.set_error_stream(OutputStream s)
+ throws DbException
+</pre></h3>
+<h1>Description</h1>
+<p>When an error occurs in the Berkeley DB library, an exception is thrown. In
+some cases, however, the <b>errno</b> value may be insufficient to
+completely describe the cause of the error, especially during initial
+application debugging.
+<p>The <a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a> and Db.set_error_stream methods
+are used to enhance the mechanism for reporting error messages to the
+application by setting a OutputStream to be used for displaying additional
+Berkeley DB error messages. In some cases, when an error occurs, Berkeley DB will
+output an additional error message to the specified stream.
+<p>The error message will consist of the prefix string and a colon
+("<b>:</b>") (if a prefix string was previously specified using
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>), an error string, and a trailing
+&lt;newline&gt; character.
+<p>Alternatively, you can use the <a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a> method to capture the
+additional error information in a way that does not use output streams.
+You should not mix these approaches.
+<p>This error-logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>For <a href="../api_java/db_class.html">Db</a> handles opened inside of Berkeley DB environments, calling the
+Db.set_error_stream method affects the entire environment and is equivalent to calling
+the <a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a> method.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_errpfx.html b/libdb/docs/api_java/db_set_errpfx.html
new file mode 100644
index 0000000..995b53f
--- /dev/null
+++ b/libdb/docs/api_java/db_set_errpfx.html
@@ -0,0 +1,45 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_errpfx</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_errpfx</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_errpfx(String errpfx);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the prefix string that appears before error messages issued by Berkeley DB.
+<p>For <a href="../api_java/db_class.html">Db</a> handles opened inside of Berkeley DB environments, calling the
+Db.set_errpfx method affects the entire environment and is equivalent to calling
+the <a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a> method.
+<p>The Db.set_errpfx interface may be called at any time during the life of
+the application.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_feedback.html b/libdb/docs/api_java/db_set_feedback.html
new file mode 100644
index 0000000..9504072
--- /dev/null
+++ b/libdb/docs/api_java/db_set_feedback.html
@@ -0,0 +1,66 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_feedback</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_feedback</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbFeedback
+{
+ public abstract void feedback(Db db, int opcode, int pct);
+}
+public class Db
+{
+ public void set_feedback(DbFeedback db_feedback)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>Some operations performed by the Berkeley DB library can take non-trivial
+amounts of time. The Db.set_feedback method can be used by
+applications to monitor progress within these operations.
+<p>When an operation is likely to take a long time, Berkeley DB will call the
+specified callback method. This method must be declared with
+three arguments: the first will be a reference to the enclosing database
+handle; the second a flag value; and the third the percent of the
+operation that has been completed, specified as an integer value between
+0 and 100. It is up to the callback method to display this
+information in an appropriate manner.
+<p>The <b>opcode</b> argument may take on any of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_UPGRADE">Db.DB_UPGRADE</a><dd>The underlying database is being upgraded.
+<p><dt><a name="Db.DB_VERIFY">Db.DB_VERIFY</a><dd>The underlying database is being verified.
+</dl>
+<p>The Db.set_feedback interface may be called at any time during the life of
+the application.
+<p>The Db.set_feedback method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_flags.html b/libdb/docs/api_java/db_set_flags.html
new file mode 100644
index 0000000..a673a32
--- /dev/null
+++ b/libdb/docs/api_java/db_set_flags.html
@@ -0,0 +1,223 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_flags</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_flags</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_flags(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Calling Db.set_flags is additive; there is no way to clear flags.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<h3>General</h3>
+<p>The following flags may be specified for any Berkeley DB access method:
+<p><dl compact>
+<a name="3"><!--meow--></a>
+<p><dt><a name="Db.DB_CHKSUM_SHA1">Db.DB_CHKSUM_SHA1</a><dd>Do checksum verification of pages read into the cache from the backing
+filestore, using the SHA1 Secure Hash Algorithm.
+<p>Calling Db.set_flags with the Db.DB_CHKSUM_SHA1 flag only affects the
+specified <a href="../api_java/db_class.html">Db</a> handle (and any other Berkeley DB handles opened within
+the scope of that handle).
+<p>If the database already exists when <a href="../api_java/db_open.html">Db.open</a> is called, the DB_CHKSUM_SHA1
+flag
+will be ignored.
+If creating additional databases in a file, the checksum behavior specified
+must be consistent with the existing databases in the file or an error will
+be returned.
+<a name="4"><!--meow--></a>
+<p><dt><a name="Db.DB_ENCRYPT">Db.DB_ENCRYPT</a><dd>Encrypt the database using the cryptographic password specified to the
+<a href="../api_java/env_set_encrypt.html">DbEnv.set_encrypt</a> or <a href="../api_java/db_set_encrypt.html">Db.set_encrypt</a> methods.
+<p>Calling Db.set_flags with the Db.DB_ENCRYPT flag only affects the
+specified <a href="../api_java/db_class.html">Db</a> handle (and any other Berkeley DB handles opened within
+the scope of that handle).
+<p>If the database already exists when <a href="../api_java/db_open.html">Db.open</a> is called, the DB_ENCRYPT
+flag
+must be the same as the existing database or an error
+will be returned.
+If creating additional databases in a file, the encryption behavior specified
+must be consistent with the existing databases in the file or an error will
+be returned.
+</dl>
+<h3>Btree</h3>
+<p>The following flags may be specified for the Btree access method:
+<p><dl compact>
+<a name="5"><!--meow--></a>
+<p><dt><a name="Db.DB_DUP">Db.DB_DUP</a><dd>Permit duplicate data items in the tree; that is, insertion when the
+key of the key/data pair being inserted already exists in the tree will
+be successful. The ordering of duplicates in the tree is determined by
+the order of insertion, unless the ordering is otherwise specified by
+use of a cursor operation. It is an error to specify both Db.DB_DUP
+and Db.DB_RECNUM.
+<p>Calling Db.set_flags with the Db.DB_DUP flag affects the
+database, including all threads of control accessing the database.
+<p>If the database already exists when <a href="../api_java/db_open.html">Db.open</a> is called, the DB_DUP
+flag
+must be the same as the existing database or an error
+will be returned.
+<a name="6"><!--meow--></a>
+<p><dt><a name="Db.DB_DUPSORT">Db.DB_DUPSORT</a><dd>Permit duplicate data items in the tree; that is, insertion when the
+key of the key/data pair being inserted already exists in the tree will
+be successful. The ordering of duplicates in the tree is determined by
+the duplicate comparison function.
+A default lexical comparison will be used.
+It is an error to specify both Db.DB_DUPSORT and Db.DB_RECNUM.
+<p>Calling Db.set_flags with the Db.DB_DUPSORT flag affects the
+database, including all threads of control accessing the database.
+<p>If the database already exists when <a href="../api_java/db_open.html">Db.open</a> is called, the DB_DUPSORT
+flag
+must be the same as the existing database or an error
+will be returned.
+<a name="7"><!--meow--></a>
+<p><dt><a name="Db.DB_RECNUM">Db.DB_RECNUM</a><dd>Support retrieval from the Btree using record numbers. For more
+information, see the <a href="../api_java/db_get.html#DB_SET_RECNO">Db.DB_SET_RECNO</a> flag to the <a href="../api_java/db_get.html">Db.get</a>
+and <a href="../api_java/dbc_get.html">Dbc.get</a> methods.
+<p>Logical record numbers in Btree databases are mutable in the face of
+record insertion or deletion. See the Db.DB_RENUMBER flag in the
+Recno access method information for further discussion.
+<p>Maintaining record counts within a Btree introduces a serious point of
+contention, namely the page locations where the record counts are
+stored. In addition, the entire tree must be locked during both
+insertions and deletions, effectively single-threading the tree for
+those operations. Specifying Db.DB_RECNUM can result in serious
+performance degradation for some applications and data sets.
+<p>It is an error to specify both Db.DB_DUP and Db.DB_RECNUM.
+<p>Calling Db.set_flags with the Db.DB_RECNUM flag affects the
+database, including all threads of control accessing the database.
+<p>If the database already exists when <a href="../api_java/db_open.html">Db.open</a> is called, the DB_RECNUM
+flag
+must be the same as the existing database or an error
+will be returned.
+<a name="8"><!--meow--></a><a name="9"><!--meow--></a>
+<p><dt><a name="Db.DB_REVSPLITOFF">Db.DB_REVSPLITOFF</a><dd>Turn off reverse splitting in the Btree. As pages are emptied in a
+database, the Berkeley DB Btree implementation attempts to coalesce empty pages
+into higher-level pages in order to keep the tree as small as possible
+and minimize tree search time. This can hurt performance in applications
+with cyclical data demands; that is, applications where the database grows
+and shrinks repeatedly. For example, because Berkeley DB does page-level
+locking, the maximum level of concurrency in a database of two pages is far
+smaller than that in a database of 100 pages, so a database that has
+shrunk to a minimal size can cause severe deadlocking when a new cycle of
+data insertion begins.
+<p>Calling Db.set_flags with the Db.DB_REVSPLITOFF flag only affects the
+specified <a href="../api_java/db_class.html">Db</a> handle (and any other Berkeley DB handles opened within
+the scope of that handle).
+</dl>
+<h3>Hash</h3>
+<p>The following flags may be specified for the Hash access method:
+<p><dl compact>
+<p><dt><a name="Db.DB_DUP">Db.DB_DUP</a><dd>Permit duplicate data items in the tree; that is, insertion when the
+key of the key/data pair being inserted already exists in the tree will
+be successful. The ordering of duplicates in the tree is determined by
+the order of insertion, unless the ordering is otherwise specified by
+use of a cursor operation. It is an error to specify both Db.DB_DUP
+and Db.DB_RECNUM.
+<p>Calling Db.set_flags with the Db.DB_DUP flag affects the
+database, including all threads of control accessing the database.
+<p>If the database already exists when <a href="../api_java/db_open.html">Db.open</a> is called, the DB_DUP
+flag
+must be the same as the existing database or an error
+will be returned.
+<p><dt><a name="Db.DB_DUPSORT">Db.DB_DUPSORT</a><dd>Permit duplicate data items in the tree; that is, insertion when the
+key of the key/data pair being inserted already exists in the tree will
+be successful. The ordering of duplicates in the tree is determined by
+the duplicate comparison function.
+A default lexical comparison will be used.
+It is an error to specify both Db.DB_DUPSORT and Db.DB_RECNUM.
+<p>Calling Db.set_flags with the Db.DB_DUPSORT flag affects the
+database, including all threads of control accessing the database.
+<p>If the database already exists when <a href="../api_java/db_open.html">Db.open</a> is called, the DB_DUPSORT
+flag
+must be the same as the existing database or an error
+will be returned.
+</dl>
+<h3>Queue</h3>
+<p>There are no additional flags that may be specified for the Queue access
+method.
+<h3>Recno</h3>
+<p>The following flags may be specified for the Recno access method:
+<p><dl compact>
+<a name="10"><!--meow--></a>
+<p><dt><a name="Db.DB_RENUMBER">Db.DB_RENUMBER</a><dd>Specifying the Db.DB_RENUMBER flag causes the logical record
+numbers to be mutable, and change as records are added to and deleted
+from the database. For example, the deletion of record number 4 causes
+records numbered 5 and greater to be renumbered downward by one. If a
+cursor was positioned to record number 4 before the deletion, it will
+refer to the new record number 4, if any such record exists, after the
+deletion. If a cursor was positioned after record number 4 before the
+deletion, it will be shifted downward one logical record, continuing to
+refer to the same record as it did before.
+<p>Using the <a href="../api_java/db_put.html">Db.put</a> or <a href="../api_java/dbc_put.html">Dbc.put</a> interfaces to create new
+records will cause the creation of multiple records if the record number
+is more than one greater than the largest record currently in the
+database. For example, creating record 28, when record 25 was previously
+the last record in the database, will create records 26 and 27 as well as
+28. Attempts to retrieve records that were created in this manner will
+result in an error return of <a href="../ref/program/errorret.html#DB_KEYEMPTY">Db.DB_KEYEMPTY</a>.
+<p>If a created record is not at the end of the database, all records
+following the new record will be automatically renumbered upward by one.
+For example, the creation of a new record numbered 8 causes records
+numbered 8 and greater to be renumbered upward by one. If a cursor was
+positioned to record number 8 or greater before the insertion, it will be
+shifted upward one logical record, continuing to refer to the same record
+as it did before.
+<p>For these reasons, concurrent access to a Recno database with the
+Db.DB_RENUMBER flag specified may be largely meaningless, although
+it is supported.
+<p>Calling Db.set_flags with the Db.DB_RENUMBER flag affects the
+database, including all threads of control accessing the database.
+<p>If the database already exists when <a href="../api_java/db_open.html">Db.open</a> is called, the DB_RENUMBER
+flag
+must be the same as the existing database or an error
+will be returned.
+<a name="11"><!--meow--></a>
+<p><dt><a name="Db.DB_SNAPSHOT">Db.DB_SNAPSHOT</a><dd>This flag specifies that any specified <b>re_source</b> file be read
+in its entirety when <a href="../api_java/db_open.html">Db.open</a> is called. If this flag is not
+specified, the <b>re_source</b> file may be read lazily.
+<p>Calling Db.set_flags with the Db.DB_SNAPSHOT flag only affects the
+specified <a href="../api_java/db_class.html">Db</a> handle (and any other Berkeley DB handles opened within
+the scope of that handle).
+</dl>
+<p>The Db.set_flags interface may not be called after the <a href="../api_java/db_open.html">Db.open</a>
+interface is called.
+<p>The Db.set_flags method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.set_flags method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The <a href="../api_java/db_set_bt_compare.html">Db.set_bt_compare</a> method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the <a href="../api_java/db_set_bt_compare.html">Db.set_bt_compare</a> method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_h_ffactor.html b/libdb/docs/api_java/db_set_h_ffactor.html
new file mode 100644
index 0000000..7ce3c41
--- /dev/null
+++ b/libdb/docs/api_java/db_set_h_ffactor.html
@@ -0,0 +1,67 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_h_ffactor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_h_ffactor</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_h_ffactor(int h_ffactor)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the desired density within the hash table.
+<p>The density is an approximation of the number of keys allowed to
+accumulate in any one bucket, determining when the hash table grows or
+shrinks. If you know the average sizes of the keys and data in your
+data set, setting the fill factor can enhance performance. A reasonable
+rule computing fill factor is to set it to the following:
+<p><blockquote><pre>(pagesize - 32) / (average_key_size + average_data_size + 8)</pre></blockquote>
+<p>If no value is specified, the fill factor will be selected dynamically as
+pages are filled.
+<p>The Db.set_h_ffactor method configures a database, not only operations performed
+using the specified <a href="../api_java/db_class.html">Db</a> handle.
+<p>The Db.set_h_ffactor interface may not be called after the <a href="../api_java/db_open.html">Db.open</a>
+interface is called.
+If the database already exists when
+<a href="../api_java/db_open.html">Db.open</a> is called, the information specified to Db.set_h_ffactor will
+be ignored.
+<p>The Db.set_h_ffactor method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.set_h_ffactor method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<p>The Db.set_h_ffactor method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.set_h_ffactor method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_h_hash.html b/libdb/docs/api_java/db_set_h_hash.html
new file mode 100644
index 0000000..d665b4c
--- /dev/null
+++ b/libdb/docs/api_java/db_set_h_hash.html
@@ -0,0 +1,80 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_h_hash</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_h_hash</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbHash
+{
+ public abstract int hash(Db db, byte[] data, int len);
+}
+public class Db
+{
+ public void set_h_hash(DbHash h_hash)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>Set a user-defined hash method; if no hash method is specified, a default
+hash method is used. Because no hash method performs equally well on all
+possible data, the user may find that the built-in hash method performs
+poorly with a particular data set. User-specified hash functions must
+take a pointer to a byte string and a length as arguments, and return a
+value of type
+<b>int</b>.
+The hash function must handle any key values used by the application
+(possibly including zero-length keys).
+<p>If a hash method is specified, <a href="../api_java/db_open.html">Db.open</a> will attempt to determine
+whether the hash method specified is the same as the one with which the
+database was created, and will fail if it detects that it is not.
+<p>The Db.set_h_hash method configures operations performed using the specified
+<a href="../api_java/db_class.html">Db</a> handle, not all operations performed on the underlying
+database.
+<p>The Db.set_h_hash interface may not be called after the <a href="../api_java/db_open.html">Db.open</a>
+interface is called.
+If the database already exists when
+<a href="../api_java/db_open.html">Db.open</a> is called, the information specified to Db.set_h_hash must
+be the same as that historically used to create the database or
+corruption can occur.
+<p>The Db.set_h_hash method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.set_h_hash method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<p>The Db.set_h_hash method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.set_h_hash method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_h_nelem.html b/libdb/docs/api_java/db_set_h_nelem.html
new file mode 100644
index 0000000..fd84995
--- /dev/null
+++ b/libdb/docs/api_java/db_set_h_nelem.html
@@ -0,0 +1,65 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_h_nelem</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_h_nelem</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_h_nelem(int h_nelem)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set an estimate of the final size of the hash table.
+<p>In order for the estimate to be used when creating the database,
+the <a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a> method must also be called.
+If the estimate or fill factor are not set or are set too low,
+hash tables will still expand gracefully
+as keys are entered, although a slight performance degradation may be
+noticed.
+<p>The Db.set_h_nelem method configures a database, not only operations performed
+using the specified <a href="../api_java/db_class.html">Db</a> handle.
+<p>The Db.set_h_nelem interface may not be called after the <a href="../api_java/db_open.html">Db.open</a>
+interface is called.
+If the database already exists when
+<a href="../api_java/db_open.html">Db.open</a> is called, the information specified to Db.set_h_nelem will
+be ignored.
+<p>The Db.set_h_nelem method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.set_h_nelem method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<p>The Db.set_h_nelem method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.set_h_nelem method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_lorder.html b/libdb/docs/api_java/db_set_lorder.html
new file mode 100644
index 0000000..2c93d4d
--- /dev/null
+++ b/libdb/docs/api_java/db_set_lorder.html
@@ -0,0 +1,69 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_lorder</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_lorder</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_lorder(int lorder)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the byte order for integers in the stored database metadata. The
+number should represent the order as an integer; for example, big endian
+order is the value 4,321, and little endian order is the value 1,234.
+If <b>lorder</b> is not explicitly set, the host order of the machine
+where the Berkeley DB library was compiled is used.
+<p><b>The access methods provide no guarantees about the byte ordering of the
+application data stored in the database, and applications are responsible
+for maintaining any necessary ordering.</b>
+<p>The Db.set_lorder method configures a database, not only operations performed
+using the specified <a href="../api_java/db_class.html">Db</a> handle.
+<p>The Db.set_lorder interface may not be called after the <a href="../api_java/db_open.html">Db.open</a>
+interface is called.
+If the database already exists when
+<a href="../api_java/db_open.html">Db.open</a> is called, the information specified to Db.set_lorder will
+be ignored.
+If creating additional databases in a file, the byte order specified must
+be consistent with the existing databases in the file or an error will be
+returned.
+<p>The Db.set_lorder method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.set_lorder method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<p>The Db.set_lorder method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.set_lorder method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_pagesize.html b/libdb/docs/api_java/db_set_pagesize.html
new file mode 100644
index 0000000..170ec7b
--- /dev/null
+++ b/libdb/docs/api_java/db_set_pagesize.html
@@ -0,0 +1,68 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_pagesize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_pagesize</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_pagesize(long pagesize)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the pages used to hold items in the database, in bytes.
+The minimum page size is 512 bytes, and the maximum page size is 64K
+bytes. If the page size is not explicitly set, one is selected based
+on the underlying filesystem I/O block size. The automatically selected
+size has a lower limit of 512 bytes and an upper limit of 16K bytes.
+<p>For information on tuning the Berkeley DB page size, see
+<a href="../ref/am_conf/pagesize.html">Selecting a page size</a>.
+<p>The Db.set_pagesize method configures a database, not only operations performed
+using the specified <a href="../api_java/db_class.html">Db</a> handle.
+<p>The Db.set_pagesize interface may not be called after the <a href="../api_java/db_open.html">Db.open</a>
+interface is called.
+If the database already exists when
+<a href="../api_java/db_open.html">Db.open</a> is called, the information specified to Db.set_pagesize will
+be ignored.
+If creating additional databases in a file, the page size specified must
+be consistent with the existing databases in the file or an error will
+be returned.
+<p>The Db.set_pagesize method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.set_pagesize method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<p>The Db.set_pagesize method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.set_pagesize method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_q_extentsize.html b/libdb/docs/api_java/db_set_q_extentsize.html
new file mode 100644
index 0000000..07d111a
--- /dev/null
+++ b/libdb/docs/api_java/db_set_q_extentsize.html
@@ -0,0 +1,64 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_q_extentsize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_q_extentsize</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_q_extentsize(int extentsize)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the extents used to hold pages in a Queue database,
+specified as a number of pages. Each extent is created as a separate
+physical file. If no extent size is set, the default behavior is to
+create only a single underlying database file.
+<p>For information on tuning the extent size, see
+<a href="../ref/am_conf/extentsize.html">Selecting a extent size</a>.
+<p>The Db.set_q_extentsize method configures a database, not only operations performed
+using the specified <a href="../api_java/db_class.html">Db</a> handle.
+<p>The Db.set_q_extentsize interface may not be called after the <a href="../api_java/db_open.html">Db.open</a>
+interface is called.
+If the database already exists when
+<a href="../api_java/db_open.html">Db.open</a> is called, the information specified to Db.set_q_extentsize will
+be ignored.
+<p>The Db.set_q_extentsize method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.set_q_extentsize method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<p>The Db.set_q_extentsize method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.set_q_extentsize method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_re_delim.html b/libdb/docs/api_java/db_set_re_delim.html
new file mode 100644
index 0000000..6a5fd01
--- /dev/null
+++ b/libdb/docs/api_java/db_set_re_delim.html
@@ -0,0 +1,64 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_re_delim</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_re_delim</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_re_delim(int re_delim)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the delimiting byte used to mark the end of a record in the backing
+source file for the Recno access method.
+<p>This byte is used for variable length records if the <b>re_source</b>
+file is specified. If the <b>re_source</b> file is specified and no
+delimiting byte was specified, &lt;newline&gt; characters (that
+is, ASCII 0x0a) are interpreted as end-of-record markers.
+<p>The Db.set_re_delim method configures a database, not only operations performed
+using the specified <a href="../api_java/db_class.html">Db</a> handle.
+<p>The Db.set_re_delim interface may not be called after the <a href="../api_java/db_open.html">Db.open</a>
+interface is called.
+If the database already exists when
+<a href="../api_java/db_open.html">Db.open</a> is called, the information specified to Db.set_re_delim will
+be ignored.
+<p>The Db.set_re_delim method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.set_re_delim method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<p>The Db.set_re_delim method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.set_re_delim method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_re_len.html b/libdb/docs/api_java/db_set_re_len.html
new file mode 100644
index 0000000..d7ec659
--- /dev/null
+++ b/libdb/docs/api_java/db_set_re_len.html
@@ -0,0 +1,71 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_re_len</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_re_len</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_re_len(int re_len)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>For the Queue access method, specify that the records are of length
+<b>re_len</b>. For the Queue access method, the record length must be
+enough smaller than the database's page size that at least one record
+plus the database page's metadata information can fit on each database
+page.
+<p>For the Recno access method, specify that the records are fixed-length,
+not byte-delimited, and are of length <b>re_len</b>.
+<p>Any records added to the database that are less than <b>re_len</b> bytes
+long are automatically padded (see <a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a> for more
+information).
+<p>Any attempt to insert records into the database that are greater than
+<b>re_len</b> bytes long will cause the call to fail immediately and
+return an error.
+<p>The Db.set_re_len method configures a database, not only operations performed
+using the specified <a href="../api_java/db_class.html">Db</a> handle.
+<p>The Db.set_re_len interface may not be called after the <a href="../api_java/db_open.html">Db.open</a>
+interface is called.
+If the database already exists when
+<a href="../api_java/db_open.html">Db.open</a> is called, the information specified to Db.set_re_len will
+be ignored.
+<p>The Db.set_re_len method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.set_re_len method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<p>The Db.set_re_len method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.set_re_len method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_re_pad.html b/libdb/docs/api_java/db_set_re_pad.html
new file mode 100644
index 0000000..ddb6129
--- /dev/null
+++ b/libdb/docs/api_java/db_set_re_pad.html
@@ -0,0 +1,62 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_re_pad</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_re_pad</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_re_pad(int re_pad)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the padding character for short, fixed-length records for the Queue
+and Recno access methods.
+<p>If no pad character is specified, &lt;space&gt; characters (that
+is, ASCII 0x20) are used for padding.
+<p>The Db.set_re_pad method configures a database, not only operations performed
+using the specified <a href="../api_java/db_class.html">Db</a> handle.
+<p>The Db.set_re_pad interface may not be called after the <a href="../api_java/db_open.html">Db.open</a>
+interface is called.
+If the database already exists when
+<a href="../api_java/db_open.html">Db.open</a> is called, the information specified to Db.set_re_pad will
+be ignored.
+<p>The Db.set_re_pad method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.set_re_pad method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<p>The Db.set_re_pad method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.set_re_pad method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_set_re_source.html b/libdb/docs/api_java/db_set_re_source.html
new file mode 100644
index 0000000..797d6e2
--- /dev/null
+++ b/libdb/docs/api_java/db_set_re_source.html
@@ -0,0 +1,103 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.set_re_source</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.set_re_source</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_re_source(String re_source)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the underlying source file for the Recno access method. The purpose
+of the <b>re_source</b> value is to provide fast access and modification
+to databases that are normally stored as flat text files.
+<p>If the <b>re_source</b> field is set, it specifies an underlying flat
+text database file that is read to initialize a transient record number
+index. In the case of variable length records, the records are
+separated, as specified by <a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>. For example,
+standard UNIX byte stream files can be interpreted as a sequence of
+variable length records separated by &lt;newline&gt; characters.
+<p>In addition, when cached data would normally be written back to the
+underlying database file (for example, the <a href="../api_java/db_close.html">Db.close</a> or
+<a href="../api_java/db_sync.html">Db.sync</a> methods are called), the in-memory copy of the database
+will be written back to the <b>re_source</b> file.
+<p>By default, the backing source file is read lazily; that is, records
+are not read from the file until they are requested by the application.
+<b>If multiple processes (not threads) are accessing a Recno database
+concurrently, and are either inserting or deleting records, the backing
+source file must be read in its entirety before more than a single
+process accesses the database, and only that process should specify the
+backing source file as part of the <a href="../api_java/db_open.html">Db.open</a> call. See the
+<a href="../api_java/db_set_flags.html#DB_SNAPSHOT">Db.DB_SNAPSHOT</a> flag for more information.</b>
+<p><b>Reading and writing the backing source file specified by <b>re_source</b>
+cannot be transaction-protected because it involves filesystem
+operations that are not part of the Db transaction methodology.</b> For
+this reason, if a temporary database is used to hold the records, it is
+possible to lose the contents of the <b>re_source</b> file, for
+example, if the system crashes at the right instant. If a file is used
+to hold the database, normal database recovery on that file can be used
+to prevent information loss, although it is still possible that the
+contents of <b>re_source</b> will be lost if the system crashes.
+<p>The <b>re_source</b> file must already exist (but may be zero-length) when
+<a href="../api_java/db_open.html">Db.open</a> is called.
+<p>It is not an error to specify a read-only <b>re_source</b> file when
+creating a database, nor is it an error to modify the resulting database.
+However, any attempt to write the changes to the backing source file using
+either the <a href="../api_java/db_sync.html">Db.sync</a> or <a href="../api_java/db_close.html">Db.close</a> methods will fail, of course.
+Specify the <a href="../api_java/db_close.html#DB_NOSYNC">Db.DB_NOSYNC</a> flag to the <a href="../api_java/db_close.html">Db.close</a> method to stop it
+from attempting to write the changes to the backing file; instead, they
+will be silently discarded.
+<p>For all of the previous reasons, the <b>re_source</b> field is generally
+used to specify databases that are read-only for Berkeley DB applications;
+and that are either generated on the fly by software tools or modified
+using a different mechanism -- for example, a text editor.
+<p>The Db.set_re_source method configures operations performed using the specified
+<a href="../api_java/db_class.html">Db</a> handle, not all operations performed on the underlying
+database.
+<p>The Db.set_re_source interface may not be called after the <a href="../api_java/db_open.html">Db.open</a>
+interface is called.
+If the database already exists when
+<a href="../api_java/db_open.html">Db.open</a> is called, the information specified to Db.set_re_source must
+be the same as that historically used to create the database or
+corruption can occur.
+<p>The Db.set_re_source method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.set_re_source method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/db_open.html">Db.open</a> was called.
+</dl>
+<p>The Db.set_re_source method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.set_re_source method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_stat.html b/libdb/docs/api_java/db_stat.html
new file mode 100644
index 0000000..8e7f58b
--- /dev/null
+++ b/libdb/docs/api_java/db_stat.html
@@ -0,0 +1,166 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.stat</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public Object stat(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.stat method creates a statistical structure and
+fills it with statistics for the database.
+<p>The <b>flags</b> value must be set to 0 or
+one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_FAST_STAT">Db.DB_FAST_STAT</a><dd>Return only the values which do not require traversal of the database.
+Fields returned when this flag is set are noted with an asterisk (*)
+below.
+<p>Among other things, this flag makes it possible for applications to
+request key and record counts without incurring the performance penalty
+of traversing the entire database. If the underlying database is of
+type Recno, or of type Btree and the database was created with the
+<a href="../api_java/db_set_flags.html#DB_RECNUM">Db.DB_RECNUM</a> flag, the count of keys will be exact. Otherwise,
+the count of keys will be the value saved the last time the database
+was traversed, or 0 if no count of keys has ever been made. If the
+underlying database is of type Recno, the count of data items will be
+exact, otherwise, the count of data items will be the value saved the
+last time the database was traversed, or 0 if no count of data items
+has ever been done.
+</dl>
+<p>If the Db.DB_FAST_STAT flag has not been specified, the
+Db.stat method will access some of or all the pages in the database,
+incurring a severe performance penalty as well as possibly flushing the
+underlying buffer pool.
+<p>In the presence of multiple threads or processes accessing an active
+database, the information returned by Db.stat may be out-of-date.
+<p>If the database was not opened read-only and the Db.DB_FAST_STAT
+flag was not specified, the cached key and record numbers will be
+updated after the statistical information has been gathered.
+<p>The Db.stat method cannot be transaction-protected. For this reason,
+it should be called in a thread of control that has no open cursors or
+active transactions.
+<p>The Db.stat method throws an exception that encapsulates a non-zero error value on
+failure.
+<h3>Hash Statistics</h3>
+<p>In the case of a Hash database,
+the statistics are returned in an instance of DbHashStat. The data
+fields are available from DbHashStat:
+<p><dl compact>
+<p><dt>public int hash_magic*<dd>Magic number that identifies the file as a Hash file.
+<dt>public int hash_version*<dd>The version of the Hash database.
+<dt>public int hash_nkeys*<dd>The number of unique keys in the database. If Db.DB_FAST_STAT was
+specified the count will be the last saved value unless it has never
+been calculated, in which case it will be 0.
+<dt>public int hash_ndata*<dd>The number of key/data pairs in the database. If Db.DB_FAST_STAT
+was specified the count will be the last saved value unless it has never
+been calculated, in which case it will be 0.
+<dt>public int hash_pagesize*<dd>The underlying Hash database page (and bucket) size, in bytes.
+<dt>public int hash_ffactor*<dd>The desired fill factor (number of items per bucket) specified at
+database-creation time.
+<dt>public int hash_buckets*<dd>The number of hash buckets.
+<dt>public int hash_free<dd>The number of pages on the free list.
+<dt>public int hash_bfree<dd>The number of bytes free on bucket pages.
+<dt>public int hash_bigpages<dd>The number of big key/data pages.
+<dt>public int hash_big_bfree<dd>The number of bytes free on big item pages.
+<dt>public int hash_overflows<dd>The number of overflow pages (overflow pages are pages that contain items
+that did not fit in the main bucket page).
+<dt>public int hash_ovfl_free<dd>The number of bytes free on overflow pages.
+<dt>public int hash_dup<dd>The number of duplicate pages.
+<dt>public int hash_dup_free<dd>The number of bytes free on duplicate pages.
+</dl>
+<h3>Btree and Recno Statistics</h3>
+<p>In the case of a Btree or Recno database,
+the statistics are returned in an instance of DbBtreeStat. The data
+fields are available from DbBtreeStat:
+<p><dl compact>
+<p><dt>public int bt_magic*<dd>Magic number that identifies the file as a Btree database.
+<dt>public int bt_version*<dd>The version of the Btree database.
+<dt>public int bt_nkeys*<dd>For the Btree Access Method, the number of unique keys in the database.
+If Db.DB_FAST_STAT was specified and the database was created with
+the <a href="../api_java/db_set_flags.html#DB_RECNUM">Db.DB_RECNUM</a> flag, the count will be exact, otherwise, the
+count will be the last saved value unless it has never been calculated,
+in which case it will be 0.
+<p>For the Recno Access Method, the exact number of records in the
+database.
+<dt>public int bt_ndata*<dd>For the Btree Access Method, the number of key/data pairs in the
+database. If Db.DB_FAST_STAT was specified the count will be the
+last saved value unless it has never been calculated, in which case it
+will be 0.
+<p>For the Recno Access Method, the exact number of records in the
+database. If the database has been configured to not renumber records
+during deletion, the count of records will only reflect undeleted
+records.
+<dt>public int bt_pagesize*<dd>Underlying database page size, in bytes.
+<dt>public int bt_minkey*<dd>The minimum keys per page.
+<dt>public int bt_re_len*<dd>The length of fixed-length records.
+<dt>public int bt_re_pad*<dd>The padding byte value for fixed-length records.
+<dt>public int bt_levels<dd>Number of levels in the database.
+<dt>public int bt_int_pg<dd>Number of database internal pages.
+<dt>public int bt_leaf_pg<dd>Number of database leaf pages.
+<dt>public int bt_dup_pg<dd>Number of database duplicate pages.
+<dt>public int bt_over_pg<dd>Number of database overflow pages.
+<dt>public int bt_free<dd>Number of pages on the free list.
+<dt>public int bt_int_pgfree<dd>Number of bytes free in database internal pages.
+<dt>public int bt_leaf_pgfree<dd>Number of bytes free in database leaf pages.
+<dt>public int bt_dup_pgfree<dd>Number of bytes free in database duplicate pages.
+<dt>public int bt_over_pgfree<dd>Number of bytes free in database overflow pages.
+</dl>
+<h3>Queue Statistics</h3>
+<p>In the case of a Queue database,
+the statistics are returned in an instance of DbQueueStat. The data
+fields are available from DbQueueStat:
+<p><dl compact>
+<p><dt>public int qs_magic*<dd>Magic number that identifies the file as a Queue file.
+<dt>public int qs_version*<dd>The version of the Queue file type.
+<dt>public int qs_nkeys*<dd>The number of records in the database. If Db.DB_FAST_STAT was
+specified the count will be the last saved value unless it has never
+been calculated, in which case it will be 0.
+<dt>public int qs_ndata*<dd>The number of records in the database. If Db.DB_FAST_STAT was
+specified the count will be the last saved value unless it has never
+been calculated, in which case it will be 0.
+<dt>public int qs_pagesize*<dd>Underlying database page size, in bytes.
+<dt>public int qs_extentsize*<dd>Underlying database extent size, in pages.
+<dt>public int qs_pages<dd>Number of pages in the database.
+<dt>public int qs_re_len*<dd>The length of the records.
+<dt>public int qs_re_pad*<dd>The padding byte value for the records.
+<dt>public int qs_pgfree<dd>Number of bytes free in database pages.
+<dt>public int qs_first_recno*<dd>First undeleted record in the database.
+<dt>public int qs_cur_recno*<dd>Next available record number.
+</dl>
+<p>The Db.stat method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.stat method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.stat method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_sync.html b/libdb/docs/api_java/db_sync.html
new file mode 100644
index 0000000..e060447
--- /dev/null
+++ b/libdb/docs/api_java/db_sync.html
@@ -0,0 +1,64 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.sync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.sync</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void sync(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.sync method flushes any cached information to disk.
+<p>If the database is in memory only, the Db.sync method has no effect and
+will always succeed.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p><b>It is important to understand that flushing cached information to disk
+only minimizes the window of opportunity for corrupted data.</b> Although
+unlikely, it is possible for database corruption to happen if a system
+or application crash occurs while writing data to the database. To
+ensure that database corruption never occurs, applications must either:
+use transactions and logging with automatic recovery; use logging and
+application-specific recovery; or edit a copy of the database, and once
+all applications using the database have successfully called
+<a href="../api_java/db_close.html">Db.close</a>, atomically replace the original database with the
+updated copy.
+<p>The Db.sync method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.sync method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The Db.sync method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.sync method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_truncate.html b/libdb/docs/api_java/db_truncate.html
new file mode 100644
index 0000000..4635413
--- /dev/null
+++ b/libdb/docs/api_java/db_truncate.html
@@ -0,0 +1,60 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.truncate</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.truncate</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int truncate(DbTxn txnid, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.truncate interface empties the database, discarding all
+records it contains.
+The number of records discarded from the database is returned.
+<p>If the operation is to be transaction-protected (other than by specifying
+the Db.DB_AUTO_COMMIT flag), the <b>txnid</b> parameter is a
+transaction handle returned from <a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>; otherwise, null.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_AUTO_COMMIT">Db.DB_AUTO_COMMIT</a><dd>Enclose the Db.truncate call within a transaction. If the call succeeds,
+changes made by the operation will be recoverable. If the call fails,
+the operation will have made no changes.
+</dl>
+<p>The Db.truncate method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Db.truncate method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p>The Db.truncate method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.truncate method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_upgrade.html b/libdb/docs/api_java/db_upgrade.html
new file mode 100644
index 0000000..38f3d67
--- /dev/null
+++ b/libdb/docs/api_java/db_upgrade.html
@@ -0,0 +1,98 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.upgrade</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.upgrade</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void upgrade(String file, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.upgrade method upgrades all of the databases included in the
+file <b>file</b>, if necessary. If no upgrade is necessary,
+Db.upgrade always returns success.
+<p><b>Database upgrades are done in place and are destructive. For example,
+if pages need to be allocated and no disk space is available, the
+database may be left corrupted. Backups should be made before databases
+are upgraded. See <a href="../ref/am/upgrade.html">Upgrading databases</a>
+for more information.</b>
+<p>Unlike all other database operations, Db.upgrade may only be done
+on a system with the same byte-order as the database.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_DUPSORT">Db.DB_DUPSORT</a><dd><b>This flag is only meaningful when upgrading databases from
+releases before the Berkeley DB 3.1 release.</b>
+<p>As part of the upgrade from the Berkeley DB 3.0 release to the 3.1 release,
+the on-disk format of duplicate data items changed. To correctly
+upgrade the format requires applications to specify whether duplicate
+data items in the database are sorted or not. Specifying the
+Db.DB_DUPSORT flag informs Db.upgrade that the duplicates
+are sorted; otherwise they are assumed to be unsorted. Incorrectly
+specifying the value of this flag may lead to database corruption.
+<p>Further, because the Db.upgrade method upgrades a physical file
+(including all the databases it contains), it is not possible to use
+Db.upgrade to upgrade files in which some of the databases it
+includes have sorted duplicate data items, and some of the databases it
+includes have unsorted duplicate data items. If the file does not have
+more than a single database, if the databases do not support duplicate
+data items, or if all of the databases that support duplicate data items
+support the same style of duplicates (either sorted or unsorted),
+Db.upgrade will work correctly as long as the
+Db.DB_DUPSORT flag is correctly specified. Otherwise, the file
+cannot be upgraded using Db.upgrade; it must be upgraded
+manually by dumping and reloading the databases.
+</dl>
+<p>The Db.upgrade method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If a <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was specified, the
+environment variable <b>DB_HOME</b> may be used as the path of the
+database environment home.
+<p>Db.upgrade is affected by any database directory specified using the
+<a href="../api_java/env_set_data_dir.html">DbEnv.set_data_dir</a> method, or by setting the "set_data_dir" string
+in the environment's <b>DB_CONFIG</b> file.
+</dl>
+<h1>Errors</h1>
+<p>The Db.upgrade method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The database is not in the same byte-order as the system.
+</dl>
+<p><dl compact>
+<p><dt><a name="Db.DB_OLD_VERSION">Db.DB_OLD_VERSION</a><dd>The database cannot be upgraded by this version of the Berkeley DB software.
+</dl>
+<p>The Db.upgrade method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.upgrade method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/db_verify.html b/libdb/docs/api_java/db_verify.html
new file mode 100644
index 0000000..449bb08
--- /dev/null
+++ b/libdb/docs/api_java/db_verify.html
@@ -0,0 +1,129 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Db.verify</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Db.verify</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void verify(String file,
+ String database, java.io.OutputStream outfile, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Db.verify method verifies the integrity of all databases in the
+file specified by the <b>file</b> argument, and optionally outputs the
+databases' key/data pairs to the file stream specified by the
+<b>outfile</b> argument.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_SALVAGE">Db.DB_SALVAGE</a><dd>Write the key/data pairs from all databases in the file to the file stream
+named in
+the <b>outfile</b> argument. The output format is the same as that
+specified for the <a href="../utility/db_dump.html">db_dump</a> utility, and can be used as input for
+the <a href="../utility/db_load.html">db_load</a> utility.
+<p>Because the key/data pairs are output in page order as opposed to the sort
+order used by <a href="../utility/db_dump.html">db_dump</a>, using Db.verify to dump key/data
+pairs normally produces less than optimal loads for Btree databases.
+</dl>
+<p>In addition, the following flags may be set by bitwise inclusively <b>OR</b>'ing them into the
+<b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="Db.DB_AGGRESSIVE">Db.DB_AGGRESSIVE</a><dd>Output <b>all</b> the key/data pairs in the file that can be found.
+By default, Db.verify does not assume corruption. For example,
+if a key/data pair on a page is marked as deleted, it is not then written
+to the output file. When Db.DB_AGGRESSIVE is specified, corruption
+is assumed, and any key/data pair that can be found is written. In this
+case, key/data pairs that are corrupted or have been deleted may appear
+in the output (even if the file being salvaged is in no way corrupt), and
+the output will almost certainly require editing before being loaded into
+a database.
+<p><dt><a name="Db.DB_PRINTABLE">Db.DB_PRINTABLE</a><dd>When using the Db.DB_SALVAGE flag, if characters in either the key
+or data items are printing characters (as defined by <b>isprint</b>(3)), use printing characters to represent them. This flag permits users
+to use standard text editors and tools to modify the contents of
+databases or selectively remove data from salvager output.
+<p>Note: different systems may have different notions about what characters
+are considered <i>printing characters</i>, and databases dumped in
+this manner may be less portable to external systems.
+<p><dt><a name="Db.DB_NOORDERCHK">Db.DB_NOORDERCHK</a><dd>Skip the database checks for btree and duplicate sort order and for
+hashing.
+<p>The Db.verify method normally verifies that btree keys and duplicate
+items are correctly sorted, and hash keys are correctly hashed. If the
+file being verified contains multiple databases using differing sorting
+or hashing algorithms, some of them must necessarily fail database
+verification because only one sort order or hash function can be
+specified before Db.verify is called. To verify files with
+multiple databases having differing sorting orders or hashing functions,
+first perform verification of the file as a whole by using the
+Db.DB_NOORDERCHK flag, and then individually verify the sort order
+and hashing function for each database in the file using the
+Db.DB_ORDERCHKONLY flag.
+<p><dt><a name="Db.DB_ORDERCHKONLY">Db.DB_ORDERCHKONLY</a><dd>Perform the database checks for btree and duplicate sort order and for
+hashing, skipped by Db.DB_NOORDERCHK.
+<p>When this flag is specified, a <b>database</b> argument should also be
+specified, indicating the database in the physical file which is to be
+checked. This flag is only safe to use on databases that have already
+successfully been verified using Db.verify with the
+Db.DB_NOORDERCHK flag set.
+</dl>
+<p>The database argument must be set to null except when the
+Db.DB_ORDERCHKONLY flag is set.
+<p><b>The Db.verify method does not perform any locking, even in Berkeley DB
+environments that are configured with a locking subsystem. As such, it
+should only be used on files that are not being modified by another
+thread of control.</b>
+<p>The Db.verify interface may not be called after the <a href="../api_java/db_open.html">Db.open</a>
+interface is called.
+<a name="3"><!--meow--></a>
+<p>The Db.verify method throws an exception that encapsulates a non-zero error value on
+failure, and <a href="../ref/program/errorret.html#DB_VERIFY_BAD">Db.DB_VERIFY_BAD</a> if a database is corrupted. When the
+Db.DB_SALVAGE flag is specified, the <a href="../ref/program/errorret.html#DB_VERIFY_BAD">Db.DB_VERIFY_BAD</a> return
+means that all key/data pairs in the file may not have been successfully
+output.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If a <b>dbenv</b> argument to <a href="../api_c/db_create.html">db_create</a> was specified, the
+environment variable <b>DB_HOME</b> may be used as the path of the
+database environment home.
+<p>Db.verify is affected by any database directory specified using the
+<a href="../api_java/env_set_data_dir.html">DbEnv.set_data_dir</a> method, or by setting the "set_data_dir" string
+in the environment's <b>DB_CONFIG</b> file.
+</dl>
+<h1>Errors</h1>
+<p>The Db.verify method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Db.verify was called after <a href="../api_java/db_open.html">Db.open</a>.
+</dl>
+<p>The Db.verify method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Db.verify method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_list.html">Databases and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/dbc_class.html b/libdb/docs/api_java/dbc_class.html
new file mode 100644
index 0000000..23ca279
--- /dev/null
+++ b/libdb/docs/api_java/dbc_class.html
@@ -0,0 +1,41 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Dbc</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class Dbc extends Object { ... }
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc object is the handle for a cursor into a Berkeley DB database.
+The handle is not free-threaded, and cursors may not span threads; nor
+may cursors be used by more than a single thread. If the cursor is to
+be used to perform operations on behalf of a transaction, the cursor
+must be opened and closed within the context of that single transaction.
+Once <a href="../api_java/dbc_close.html">Dbc.close</a> has been called, the handle may not be accessed
+again, regardless of the method's return.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/dbc_close.html b/libdb/docs/api_java/dbc_close.html
new file mode 100644
index 0000000..4ba51f8
--- /dev/null
+++ b/libdb/docs/api_java/dbc_close.html
@@ -0,0 +1,62 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc.close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Dbc.close</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void close()
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc.close method discards the cursor.
+<p>It is possible for the Dbc.close method to return
+<a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">Db.DB_LOCK_DEADLOCK</a>, signaling that any enclosing transaction should
+be aborted. If the application is already intending to abort the
+transaction, this error should be ignored, and the application should
+proceed.
+<p>After Dbc.close has been called, regardless of its return, the
+cursor handle may not be used again.
+<p>The Dbc.close method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Dbc.close method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The cursor was previously closed.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Dbc.close method will fail and
+throw a <a href="../api_java/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The Dbc.close method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc.close method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_java/dbc_list.html">Database Cursors and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/dbc_count.html b/libdb/docs/api_java/dbc_count.html
new file mode 100644
index 0000000..af8127e
--- /dev/null
+++ b/libdb/docs/api_java/dbc_count.html
@@ -0,0 +1,54 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc.count</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Dbc.count</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int count(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc.count method returns a count of the number of duplicate data
+items for the key to which the
+cursor refers.
+If the underlying database does not support duplicate data items, the
+call will still succeed and a count of 1 will be returned.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>
+If the <b>cursor</b> argument is not yet initialized, the Dbc.count method throws an exception that encapsulates Db.EINVAL.
+Otherwise, the Dbc.count method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Dbc.count method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc.count method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_java/dbc_list.html">Database Cursors and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/dbc_del.html b/libdb/docs/api_java/dbc_del.html
new file mode 100644
index 0000000..b3893c9
--- /dev/null
+++ b/libdb/docs/api_java/dbc_del.html
@@ -0,0 +1,76 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc.del</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Dbc.del</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int del(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc.del method deletes the key/data pair to which the cursor
+refers.
+<p>When called on a cursor opened on a database that has been made into a
+secondary index using the <a href="../api_java/db_associate.html">Db.associate</a> method, the <a href="../api_java/db_del.html">Db.del</a> method
+deletes the key/data pair from the primary database and all secondary
+indices.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The cursor position is unchanged after a delete, and subsequent calls to
+cursor functions expecting the cursor to refer to an existing key will
+fail.
+<p>
+If the element has already been deleted, the Dbc.del method will return Db.DB_KEYEMPTY.
+If the cursor is not yet initialized, the Dbc.del method throws an exception that encapsulates Db.EINVAL.
+Otherwise, the Dbc.del method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Dbc.del method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>Db.DB_SECONDARY_BAD<dd>A secondary index references a nonexistent primary key.
+</dl>
+<p><dl compact>
+<p><dt>EACCES<dd>An attempt was made to modify a read-only database.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p><dl compact>
+<p><dt>EPERM <dd>Write attempted on read-only cursor when the <a href="../api_java/env_open.html#DB_INIT_CDB">Db.DB_INIT_CDB</a> flag was
+specified to <a href="../api_java/env_open.html">DbEnv.open</a>.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Dbc.del method will fail and
+throw a <a href="../api_java/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The Dbc.del method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc.del method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_java/dbc_list.html">Database Cursors and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/dbc_dup.html b/libdb/docs/api_java/dbc_dup.html
new file mode 100644
index 0000000..ab05280
--- /dev/null
+++ b/libdb/docs/api_java/dbc_dup.html
@@ -0,0 +1,71 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc.dup</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Dbc.dup</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public Dbc dup(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc.dup method creates a new cursor that uses the same transaction
+and locker ID as the original cursor. This is useful when an application
+is using locking and requires two or more cursors in the same thread of
+control.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_POSITION">Db.DB_POSITION</a><dd>The newly created cursor is initialized to refer to the same position
+in the database as the original cursor and hold the same locks. If the
+Db.DB_POSITION flag is not specified, then the created cursor is
+uninitialized and will behave like a cursor newly created using
+<a href="../api_java/db_cursor.html">Db.cursor</a>.
+</dl>
+<p>When using the Berkeley DB Concurrent Data Store product, there can be only one active write cursor
+at a time. For this reason, attempting to duplicate a cursor for which
+the <a href="../api_java/db_cursor.html#DB_WRITECURSOR">Db.DB_WRITECURSOR</a> flag was specified during creation will return
+an error.
+<p>
+If the <b>cursor</b> argument is not yet initialized, the Dbc.dup method throws an exception that encapsulates Db.EINVAL.
+Otherwise, the Dbc.dup method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The Dbc.dup method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The <b>cursor</b> argument was created using the
+<a href="../api_java/db_cursor.html#DB_WRITECURSOR">Db.DB_WRITECURSOR</a> flag in the Berkeley DB Concurrent Data Store product.
+</dl>
+<p>The Dbc.dup method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc.dup method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_java/dbc_list.html">Database Cursors and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/dbc_get.html b/libdb/docs/api_java/dbc_get.html
new file mode 100644
index 0000000..d1e43f4
--- /dev/null
+++ b/libdb/docs/api_java/dbc_get.html
@@ -0,0 +1,229 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc.get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Dbc.get</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int get(Dbt key, Dbt data, int flags)
+ throws DbException;
+public int pget(Dbt key, Dbt pkey, Dbt data, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc.get method retrieves key/data pairs from the database. The
+byte array and length of the key
+are returned in the object to which <b>key</b> refers (except for the
+case of the Db.DB_SET flag, in which the <b>key</b> object is
+unchanged), and the byte array
+and length of the data are returned in the object to which <b>data</b>
+refers.
+<p>When called on a cursor opened on a database that has been made into a
+secondary index using the <a href="../api_java/db_associate.html">Db.associate</a> method, the Dbc.get
+and Dbc.pget methods return the key from the secondary index and the
+data item from the primary database. In addition, the Dbc.pget method
+returns the key from the primary database. In databases that are not
+secondary indices, the Dbc.pget interface will always fail and
+return EINVAL.
+<p>Modifications to the database during a sequential scan will be reflected
+in the scan; that is, records inserted behind a cursor will not be
+returned while records inserted in front of a cursor will be returned.
+<p>In Queue and Recno databases, missing entries (that is, entries that
+were never explicitly created or that were created and then deleted)
+will be skipped during a sequential scan.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_CURRENT">Db.DB_CURRENT</a><dd>Return the key/data pair to which the cursor refers.
+<p>
+If the cursor key/data pair was deleted, the Dbc.get method will return Db.DB_KEYEMPTY.
+If the cursor is not yet initialized, the Dbc.get method throws an exception that encapsulates Db.EINVAL.
+<p><dt><a name="Db.DB_FIRST">Db.DB_FIRST</a>, <a name="Db.DB_LAST">Db.DB_LAST</a><dd>The cursor is set to refer to the first (last) key/data pair of the
+database, and that pair is returned. In the presence of duplicate key
+values, the first (last) data item in the set of duplicates is returned.
+<p>If the database is a Queue or Recno database, Dbc.get using the
+Db.DB_FIRST (Db.DB_LAST) flags will ignore any keys that exist
+but were never explicitly created by the application, or were created and
+later deleted.
+<p>
+If the database is empty, the Dbc.get method will return Db.DB_NOTFOUND.
+<p><dt><a name="Db.DB_GET_BOTH">Db.DB_GET_BOTH</a><dd>The Db.DB_GET_BOTH flag is identical to the Db.DB_SET flag,
+except that both the key and the data arguments must be matched by the
+key and data item in the database.
+<p>When used with the Dbc.pget version of this interface on a
+secondary index handle, both the secondary and primary keys must be
+matched by the secondary and primary key item in the database. It is
+an error to use the Db.DB_GET_BOTH flag with the Dbc.get
+version of this interface and a cursor that has been opened on a
+secondary index handle.
+<p><dt><a name="Db.DB_GET_BOTH_RANGE">Db.DB_GET_BOTH_RANGE</a><dd>The Db.DB_GET_BOTH_RANGE flag is identical to the Db.DB_GET_BOTH
+flag, except that, in the case of any database supporting sorted
+duplicate sets, the returned key/data pair is the smallest data item
+greater than or equal to the specified data item (as determined by the
+comparison function), permitting partial matches and range searches in
+duplicate data sets.
+<p><dt><a name="Db.DB_GET_RECNO">Db.DB_GET_RECNO</a><dd>Return the record number associated with the cursor. The record number
+will be returned in <b>data</b>, as described in <a href="../api_java/dbt_class.html">Dbt</a>. The
+<b>key</b> parameter is ignored.
+<p>For Db.DB_GET_RECNO to be specified, the underlying database must be
+of type Btree, and it must have been created with the <a href="../api_java/db_set_flags.html#DB_RECNUM">Db.DB_RECNUM</a>
+flag.
+<p><dt><a name="Db.DB_JOIN_ITEM">Db.DB_JOIN_ITEM</a><dd>Do not use the data value found in all of the cursors as a lookup key for
+the primary database, but simply return it in the key parameter instead.
+The data parameter is left unchanged.
+<p>For Db.DB_JOIN_ITEM to be specified, the underlying cursor must have
+been returned from the <a href="../api_java/db_join.html">Db.join</a> method.
+<p><dt><a name="Db.DB_NEXT">Db.DB_NEXT</a>, <a name="Db.DB_PREV">Db.DB_PREV</a><dd>If the cursor is not yet initialized, Db.DB_NEXT (Db.DB_PREV)
+is identical to Db.DB_FIRST (Db.DB_LAST). Otherwise, the cursor
+is moved to the next (previous) key/data pair of the database, and that
+pair is returned. In the presence of duplicate key values, the value of
+the key may not change.
+<p>If the database is a Queue or Recno database, Dbc.get using the
+Db.DB_NEXT (Db.DB_PREV) flag will skip any keys that exist
+but were never explicitly created by the application, or those that were
+created and later deleted.
+<p>
+If the cursor is already on the last (first) record in the database, the Dbc.get method will return Db.DB_NOTFOUND.
+<p><dt><a name="Db.DB_NEXT_DUP">Db.DB_NEXT_DUP</a><dd>If the next key/data pair of the database is a duplicate data record for
+the current key/data pair, the cursor is moved to the next key/data pair
+of the database, and that pair is returned.
+If the next key/data pair of the database is not a duplicate data record
+for the current key/data pair, the Dbc.get method will return Db.DB_NOTFOUND.
+If the cursor is not yet initialized, the Dbc.get method throws an exception that encapsulates Db.EINVAL.
+<p><dt><a name="Db.DB_NEXT_NODUP">Db.DB_NEXT_NODUP</a>, <a name="Db.DB_PREV_NODUP">Db.DB_PREV_NODUP</a><dd>If the cursor is not yet initialized, Db.DB_NEXT_NODUP
+(Db.DB_PREV_NODUP) is identical to Db.DB_FIRST
+(Db.DB_LAST). Otherwise, the cursor is moved to the next (previous)
+non-duplicate key of the database, and that key/data pair is returned.
+<p>If the database is a Queue or Recno database, Dbc.get using the
+Db.DB_NEXT_NODUP (Db.DB_PREV_NODUP) flags will ignore any keys
+that exist but were never explicitly created by the application, or those
+that were created and later deleted.
+<p>
+If no non-duplicate key/data pairs occur after (before) the cursor
+position in the database, the Dbc.get method will return Db.DB_NOTFOUND.
+<p><dt><a name="Db.DB_SET">Db.DB_SET</a><dd>Move the cursor to the specified key/data pair of the database, and
+return the datum associated with the given key.
+<p>In the presence of duplicate key values, Dbc.get will return the
+first data item for the given key.
+If no matching keys are found, the Dbc.get method will return Db.DB_NOTFOUND.
+If the database is a Queue or Recno database, and the specified key exists,
+but was never explicitly created by the application or was later deleted, the Dbc.get method will return Db.DB_KEYEMPTY.
+<p><dt><a name="Db.DB_SET_RANGE">Db.DB_SET_RANGE</a><dd>The Db.DB_SET_RANGE flag is identical to the Db.DB_SET flag,
+except that in the case of the Btree access method, the key is returned
+as well as the data item and the returned key/data pair is the smallest
+key greater than or equal to the specified key (as determined by the
+comparison method), permitting partial key matches and range
+searches.
+<p><dt><a name="Db.DB_SET_RECNO">Db.DB_SET_RECNO</a><dd>Move the cursor to the specific numbered record of the database, and
+return the associated key/data pair. The <b>data</b> field of the
+specified <b>key</b>
+must be a byte array containing a record number, as described in
+<a href="../api_java/dbt_class.html">Dbt</a>. This determines the record to be retrieved.
+<p>For Db.DB_SET_RECNO to be specified, the underlying database must be
+of type Btree, and it must have been created with the <a href="../api_java/db_set_flags.html#DB_RECNUM">Db.DB_RECNUM</a>
+flag.
+</dl>
+<p>In addition, the following flags may be set by
+bitwise inclusively <b>OR</b>'ing them into the <b>flags</b> parameter:
+<p><dl compact>
+<p><dt><a name="Db.DB_DIRTY_READ">Db.DB_DIRTY_READ</a><dd>Read modified but not yet committed data. Silently ignored if the
+<a href="../api_java/db_open.html#DB_DIRTY_READ">Db.DB_DIRTY_READ</a> flag was not specified when the underlying
+database was opened.
+<p><dt><a name="Db.DB_MULTIPLE">Db.DB_MULTIPLE</a><dd>Return multiple data items. The buffer to which the <b>data</b>
+argument refers is filled with the specified key's data items. If the
+first data item associated with the key cannot fit into the buffer, the
+size field of the <b>data</b> argument is set to the length needed to
+retrieve it, and a <a href="../api_java/memp_class.html">DbMemoryException</a> is thrown. Subsequent calls with both the
+Db.DB_NEXT_DUP and Db.DB_MULTIPLE flags specified will return
+additional data items associated with the current key or
+<a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a> if there is no additional data items to return.
+<p>If Db.DB_MULTIPLE is specified for the Queue and Recno access
+methods, the buffer will be filled with as many data records as
+possible. The record number of the first record will be returned in
+the <b>key</b> argument. The record number of each subsequent returned
+record must be calculated from this value.
+<p>The buffer to which the <b>data</b> argument refers should be large
+relative to the page size of the underlying database, aligned for
+unsigned integer access, and be a multiple of 1024 bytes in size.
+<p>The Db.DB_MULTIPLE flag may only be used with the
+Db.DB_CURRENT, Db.DB_FIRST, Db.DB_GET_BOTH,
+Db.DB_NEXT, Db.DB_NEXT_DUP, Db.DB_NEXT_NODUP,
+Db.DB_SET, Db.DB_SET_RANGE, and Db.DB_SET_RECNO
+options.
+<p>The Db.DB_MULTIPLE flag may not be used when accessing databases
+made into secondary indices using the <a href="../api_java/db_associate.html">Db.associate</a> method.
+<p>See <a href="../api_java/dbt_bulk_class.html">DbMultipleDataIterator</a> for more information.
+<p><dt><a name="Db.DB_MULTIPLE_KEY">Db.DB_MULTIPLE_KEY</a><dd>Return multiple key and data pairs. The buffer to which the
+<b>data</b> argument refers is filled with key and data pairs. If the
+first key and data pair cannot fit into the buffer, the size field of
+the <b>data</b> argument is set to the length needed to retrieve them,
+and a <a href="../api_java/memp_class.html">DbMemoryException</a> is thrown.
+<p>The buffer to which the <b>data</b> argument refers should be large
+relative to the page size of the underlying database, aligned for
+unsigned integer access, and be a multiple of 1024 bytes in size.
+<p>The Db.DB_MULTIPLE_KEY flag may only be used with the
+Db.DB_CURRENT, Db.DB_FIRST, Db.DB_GET_BOTH,
+Db.DB_NEXT, Db.DB_NEXT_NODUP, Db.DB_SET,
+Db.DB_SET_RANGE, and Db.DB_SET_RECNO options. The
+Db.DB_MULTIPLE_KEY flag may not be used when accessing databases
+made into secondary indices using the <a href="../api_java/db_associate.html">Db.associate</a> method.
+<p>See <a href="../api_java/dbt_bulk_class.html">DbMultipleKeyDataIterator</a> for more information.
+<p><dt><a name="Db.DB_RMW">Db.DB_RMW</a><dd>Acquire write locks instead of read locks when doing the retrieval.
+Setting this flag can eliminate deadlock during a read-modify-write
+cycle by acquiring the write lock during the read part of the cycle so
+that another thread of control acquiring a read lock for the same item,
+in its own read-modify-write cycle, will not result in deadlock.
+</dl>
+<p>
+Otherwise, the Dbc.get method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>If Dbc.get fails for any reason, the state of the cursor will be
+unchanged.
+<h1>Errors</h1>
+<p>The Dbc.get method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>Db.DB_SECONDARY_BAD<dd>A secondary index references a nonexistent primary key.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The specified cursor was not currently initialized.
+<p>The Dbc.pget interface was called with a cursor that does not
+refer to a secondary index.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Dbc.get method will fail and
+throw a <a href="../api_java/deadlock_class.html">DbDeadlockException</a> exception.
+<p>If the requested item could not be returned due to insufficient memory,
+the Dbc.get method will fail and
+throw a <a href="../api_java/memp_class.html">DbMemoryException</a> exception.
+<p>The Dbc.get method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc.get method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_java/dbc_list.html">Database Cursors and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/dbc_list.html b/libdb/docs/api_java/dbc_list.html
new file mode 100644
index 0000000..63e210e
--- /dev/null
+++ b/libdb/docs/api_java/dbc_list.html
@@ -0,0 +1,27 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Database Cursors and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Database Cursors and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Database Cursors and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_java/db_cursor.html">Db.cursor</a></td><td>Create a cursor handle</td></tr>
+<tr><td><a href="../api_java/dbc_close.html">Dbc.close</a></td><td>Close a cursor</td></tr>
+<tr><td><a href="../api_java/dbc_count.html">Dbc.count</a></td><td>Return count of duplicates</td></tr>
+<tr><td><a href="../api_java/dbc_del.html">Dbc.del</a></td><td>Delete by cursor</td></tr>
+<tr><td><a href="../api_java/dbc_dup.html">Dbc.dup</a></td><td>Duplicate a cursor</td></tr>
+<tr><td><a href="../api_java/dbc_get.html">Dbc.get</a></td><td>Retrieve by cursor</td></tr>
+<tr><td><a href="../api_java/dbc_get.html">Dbc.pget</a></td><td>Retrieve by cursor</td></tr>
+<tr><td><a href="../api_java/dbc_put.html">Dbc.put</a></td><td>Store by cursor</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/dbc_put.html b/libdb/docs/api_java/dbc_put.html
new file mode 100644
index 0000000..a79e23c
--- /dev/null
+++ b/libdb/docs/api_java/dbc_put.html
@@ -0,0 +1,154 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Dbc.put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Dbc.put</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void put(Dbt key, Dbt data, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The Dbc.put method stores key/data pairs into the database.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_AFTER">Db.DB_AFTER</a><dd>In the case of the Btree and Hash access methods, insert the data
+element as a duplicate element of the key to which the cursor refers.
+The new element appears immediately after the current cursor position.
+It is an error to specify Db.DB_AFTER if the underlying Btree or
+Hash database does not support duplicate data items. The <b>key</b>
+parameter is ignored.
+<p>In the case of the Recno access method, it is an error to specify
+Db.DB_AFTER if the underlying Recno database was not created with
+the <a href="../api_java/db_set_flags.html#DB_RENUMBER">Db.DB_RENUMBER</a> flag. If the <a href="../api_java/db_set_flags.html#DB_RENUMBER">Db.DB_RENUMBER</a> flag was
+specified, a new key is created, all records after the inserted item
+are automatically renumbered, and the key of the new record is returned
+in the structure to which the <b>key</b> argument refers. The initial
+value of the <b>key</b> parameter is ignored. See <a href="../api_java/db_open.html">Db.open</a>
+for more information.
+<p>The Db.DB_AFTER flag may not be specified to the Queue access method.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, Dbc.put will return <a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a>.
+If the underlying access method is Btree or Recno, the operation will
+succeed.
+<p>If the cursor is not yet initialized or if a duplicate sort function
+has been specified, the Dbc.put function will return EINVAL.
+<p><dt><a name="Db.DB_BEFORE">Db.DB_BEFORE</a><dd>In the case of the Btree and Hash access methods, insert the data
+element as a duplicate element of the key to which the cursor refers.
+The new element appears immediately before the current cursor position.
+It is an error to specify Db.DB_BEFORE if the underlying Btree or
+Hash database does not support duplicate data items. The <b>key</b>
+parameter is ignored.
+<p>In the case of the Recno access method, it is an error to specify
+Db.DB_BEFORE if the underlying Recno database was not created with
+the <a href="../api_java/db_set_flags.html#DB_RENUMBER">Db.DB_RENUMBER</a> flag. If the <a href="../api_java/db_set_flags.html#DB_RENUMBER">Db.DB_RENUMBER</a> flag was
+specified, a new key is created, the current record and all records
+after it are automatically renumbered, and the key of the new record is
+returned in the structure to which the <b>key</b> argument refers.
+The initial value of the <b>key</b> parameter is ignored. See
+<a href="../api_java/db_open.html">Db.open</a> for more information.
+<p>The Db.DB_BEFORE flag may not be specified to the Queue access method.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, Dbc.put will return <a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a>.
+If the underlying access method is Btree or Recno, the operation will
+succeed.
+<p>If the cursor is not yet initialized or if a duplicate sort function
+has been specified, Dbc.put will return EINVAL.
+<p><dt><a name="Db.DB_CURRENT">Db.DB_CURRENT</a><dd>Overwrite the data of the key/data pair to which the cursor refers with
+the specified data item. The <b>key</b> parameter is ignored.
+<p>If a duplicate sort function has been specified and the data item of the
+referenced key/data pair does not compare equally to the <b>data</b>
+parameter, Dbc.put will return EINVAL.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, Dbc.put will return <a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a>.
+If the underlying access method is Btree, Queue, or Recno, the operation
+will succeed.
+<p>If the cursor is not yet initialized, Dbc.put will return EINVAL.
+<p><dt><a name="Db.DB_KEYFIRST">Db.DB_KEYFIRST</a><dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database.
+<p>If the underlying database supports duplicate data items, and if the
+key already exists in the database and a duplicate sort function has
+been specified, the inserted data item is added in its sorted location.
+If the key already exists in the database and no duplicate sort function
+has been specified, the inserted data item is added as the first of the
+data items for that key.
+<p>The Db.DB_KEYFIRST flag may not be specified to the Queue or Recno
+access methods.
+<p><dt><a name="Db.DB_KEYLAST">Db.DB_KEYLAST</a><dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database.
+<p>If the underlying database supports duplicate data items, and if the
+key already exists in the database and a duplicate sort function has
+been specified, the inserted data item is added in its sorted location.
+If the key already exists in the database, and no duplicate sort
+function has been specified, the inserted data item is added as the last
+of the data items for that key.
+<p>The Db.DB_KEYLAST flag may not be specified to the Queue or Recno
+access methods.
+<p><dt><a name="Db.DB_NODUPDATA">Db.DB_NODUPDATA</a><dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database, unless it already exists in the database.
+If the key/data pair already appears in the database, <a href="../api_java/dbc_put.html#DB_KEYEXIST">Db.DB_KEYEXIST</a>
+is returned. The Db.DB_NODUPDATA flag may only be specified if
+the underlying database has been configured to support sorted duplicate
+data items.
+<p>The Db.DB_NODUPDATA flag may not be specified to the Queue or Recno
+access methods.
+</dl>
+<p>
+Otherwise, the Dbc.put method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>If Dbc.put fails for any reason, the state of the cursor will be
+unchanged. If Dbc.put succeeds and an item is inserted into the
+database, the cursor is always positioned to refer to the newly inserted
+item.
+<h1>Errors</h1>
+<p>The Dbc.put method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EACCES<dd>An attempt was made to modify a read-only database.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The Db.DB_BEFORE or Db.DB_AFTER flags were specified, and the
+underlying access method is Queue.
+<p>An attempt was made to add a record to a fixed-length database that was too
+large to fit.
+<p>An attempt was made to add a record to a secondary index.
+</dl>
+<p><dl compact>
+<p><dt>EPERM <dd>Write attempted on read-only cursor when the <a href="../api_java/env_open.html#DB_INIT_CDB">Db.DB_INIT_CDB</a> flag was
+specified to <a href="../api_java/env_open.html">DbEnv.open</a>.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+Dbc.put method will fail and
+throw a <a href="../api_java/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The Dbc.put method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the Dbc.put method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/dbc_class.html">Dbc</a>
+<h1>See Also</h1>
+<a href="../api_java/dbc_list.html">Database Cursors and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/dbt_bulk_class.html b/libdb/docs/api_java/dbt_bulk_class.html
new file mode 100644
index 0000000..96d5e80
--- /dev/null
+++ b/libdb/docs/api_java/dbt_bulk_class.html
@@ -0,0 +1,93 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbMultipleDataIterator</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMultipleDataIterator</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class DbMultipleDataIterator
+{
+ public DbMultipleDataIterator(Dbt data);
+<p>
+ public boolean next(Dbt data);
+}
+<p>
+public class DbMultipleKeyDataIterator
+{
+ public DbMultipleKeyDataIterator(Dbt data);
+<p>
+ public boolean next(Dbt key, Dbt data);
+}
+<p>
+public class DbMultipleRecnoDataIterator
+{
+ public DbMultipleRecnoDataIterator(Dbt data);
+<p>
+ public boolean next(Dbt key, Dbt data);
+}
+</pre></h3>
+<h1>Description</h1>
+<p>If either of the <a href="../api_java/dbc_get.html#DB_MULTIPLE">Db.DB_MULTIPLE</a> or <a href="../api_java/dbc_get.html#DB_MULTIPLE_KEY">Db.DB_MULTIPLE_KEY</a> flags
+were specified to the <a href="../api_java/db_get.html">Db.get</a> or <a href="../api_java/dbc_get.html">Dbc.get</a> method, the data
+<a href="../api_java/dbt_class.html">Dbt</a> returned by those interfaces will refer to a buffer that
+is filled with data. Access to that data is through the following
+classes.
+<p>All instances of the bulk retrieval classes may be used only once,
+and to traverse the bulk retrieval buffer in the forward direction
+only. However, they are nondestructive, so multiple iterators can be
+instantiated and used on the same returned data <a href="../api_java/dbt_class.html">Dbt</a>.
+<p><dl compact>
+<p><dt>DbMultipleDataIterator<dd>This class is used to iterate through data returned using the
+<a href="../api_java/dbc_get.html#DB_MULTIPLE">Db.DB_MULTIPLE</a> flag from a database belonging to any access method.
+The constructor takes the data <a href="../api_java/dbt_class.html">Dbt</a> returned by the call to
+<a href="../api_java/db_get.html">Db.get</a> or <a href="../api_java/dbc_get.html">Dbc.get</a> that used the <a href="../api_java/dbc_get.html#DB_MULTIPLE">Db.DB_MULTIPLE</a>
+flag. The next() method takes a <a href="../api_java/dbt_class.html">Dbt</a> that will be filled in with
+a reference to a buffer, a size, and an offset that together yield the
+next data item in the original bulk retrieval buffer. The next() method
+returns false if no more data are available, and true otherwise.
+<p><dt>DbMultipleKeyDataIterator<dd>This class is used to iterate through data returned using the
+<a href="../api_java/dbc_get.html#DB_MULTIPLE_KEY">Db.DB_MULTIPLE_KEY</a> flag from a database belonging to the Btree or
+Hash access methods. The constructor takes the data <a href="../api_java/dbt_class.html">Dbt</a>
+returned by the call to <a href="../api_java/db_get.html">Db.get</a> or <a href="../api_java/dbc_get.html">Dbc.get</a> that used the
+<a href="../api_java/dbc_get.html#DB_MULTIPLE_KEY">Db.DB_MULTIPLE_KEY</a> flag. The next() method takes two <a href="../api_java/dbt_class.html">Dbt</a>s,
+one for a key and one for a data item, that will each be filled in with
+a reference to a buffer, a size, and an offset that together yield the
+next key or data item in the original bulk retrieval buffer. The next()
+method returns false if no more data are available, and true
+otherwise.
+<p><dt>DbMultipleRecnoDataIterator<dd>This class is used to iterate through data returned using the
+<a href="../api_java/dbc_get.html#DB_MULTIPLE_KEY">Db.DB_MULTIPLE_KEY</a> flag from a database belonging to the Recno or
+Queue access methods. The constructor takes the data <a href="../api_java/dbt_class.html">Dbt</a>
+returned by the call to <a href="../api_java/db_get.html">Db.get</a> or <a href="../api_java/dbc_get.html">Dbc.get</a> that used the
+<a href="../api_java/dbc_get.html#DB_MULTIPLE_KEY">Db.DB_MULTIPLE_KEY</a> flag. The next() method takes two <a href="../api_java/dbt_class.html">Dbt</a>s,
+one for a key and one for a data item, that will each be filled in with
+a reference to a buffer, a size, and an offset that together yield the
+next key or data item in the original bulk retrieval buffer. The record
+number contained in the key item should be accessed using the
+<a href="../api_java/dbt_class.html">Dbt.get_recno_key_data</a> method. The next() method returns false if no
+more data are available, and true otherwise.
+</dl>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/dbt_class.html b/libdb/docs/api_java/dbt_class.html
new file mode 100644
index 0000000..d2320fa
--- /dev/null
+++ b/libdb/docs/api_java/dbt_class.html
@@ -0,0 +1,238 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Dbt</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>Dbt</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class Dbt extends Object
+{
+ public Dbt(byte[] data);
+ public Dbt(byte[] data, int off, int len);
+<p>
+ public void set_data(byte[] data);
+ public byte[] get_data();
+<p>
+ public Object get_object() throws java.io.IOException, java.lang.ClassNotFoundException;
+ public void set_object(Object serialobj) throws java.io.IOException;
+<p>
+ public void set_recno_key_data(int recno);
+ public int get_recno_key_data();
+<p>
+ public void set_offset(int off);
+ public int get_offset();
+<p>
+ public int get_size();
+ public void set_size(int size);
+<p>
+ public int get_ulen();
+ public void set_ulen(int ulen);
+<p>
+ public int get_dlen();
+ public void set_dlen(int dlen);
+<p>
+ public int get_doff();
+ public void set_doff(int doff);
+<p>
+ public int get_flags();
+ public void set_flags(int flags);
+}
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the specific details of the Dbt class,
+used to encode keys and data items in a database.
+<a name="3"><!--meow--></a>
+<h3>Key/Data Pairs</h3>
+<p>Storage and retrieval for the <a href="../api_java/db_class.html">Db</a> access methods are based on
+key/data pairs. Both key and data items are represented by Dbt
+objects. Key and data byte strings may refer to strings of zero length
+up to strings of essentially unlimited length. See
+<a href="../ref/am_misc/dbsizes.html">Database limits</a> for more
+information.
+<p>The Dbt class provides simple access to an underlying data
+structure, whose elements can be examined or changed using the
+<b>set_</b> or <b>get_</b> methods. The remainder of the manual
+page sometimes refers to these accesses using the underlying name; for
+example, <b>ulen</b> rather than Dbt.get_ulen and
+Dbt.set_ulen. Dbt can be subclassed, providing a way
+to associate with it additional data or references to other
+structures.
+<p>The constructors set all elements of the underlying structure to zero.
+The constructor with one argument has the effect of setting all elements
+to zero except for the <b>data</b> and <b>size</b> elements. The
+constructor with three arguments has the effect of setting all elements
+to zero except for the <b>data</b>, <b>size</b> and <b>offset</b>
+elements.
+<p>In the case where the <b>flags</b> structure element is set to 0, when
+being provided a key or data item by the application, the Berkeley DB package
+expects the <b>data</b> object to be set to a byte array of
+<b>size</b> bytes. When returning a key/data item to the application,
+the Berkeley DB package will store into the <b>data</b> object a byte array
+of <b>size</b> bytes. During a get operation, if none of the
+Db.DB_DBT_MALLOC, Db.DB_DBT_REALLOC or Db.DB_DBT_USERMEM
+flags are specified, the operation occurs as if Db.DB_DBT_MALLOC
+was used.
+<p>Access to Dbt objects is not re-entrant. In particular, if
+multiple threads simultaneously access the same Dbt object using
+<a href="../api_java/db_class.html">Db</a> API calls, the results are undefined, and may result in a
+crash. One easy way to avoid problems is to use Dbt objects
+that are
+created as local variables and not shared among threads.
+<p>The elements of the structure underlying the Dbt class are defined as follows:
+<p><dl compact>
+<p><dt>byte[] <a name="data">data</a>;<dd>A byte array containing the data.
+This element is accessed using Dbt.get_data and
+Dbt.set_data, and may be initialized using one
+of the constructors.
+Note that the array data is not copied immediately, but only when the
+Dbt is used.
+<p>The Java API also provides helper methods Dbt.get_object and
+Dbt.set_object to encode and decode objects using the Java
+serialization API. These methods use <i>ObjectInputStream</i> and
+<i>ObjectOutputStream</i> internally to manipulate an array of bytes
+representing an object (and any connected objects). All of the rules of Java
+Serialization apply. In particular, the object(s) must implement either the
+<i>Serializable</i> or <i>Externalizable</i> interface. Note that the
+serialized encoding trades efficiency for convenience.
+<p><dt>int recno_key_data;<dd>The data representing a key used with a Recno database. Recno database
+records are ordered by integer keys starting at 1. When the
+Dbt.set_recno_key_data method is called, the data, size and offset
+fields in the Dbt are implicitly set to hold a byte array representation
+of the integer key.
+<p><dt>int offset;<dd>The number of bytes offset into the <b>data</b> array to determine the
+portion of the array actually used. This element is accessed using
+Dbt.get_offset and Dbt.set_offset. Although Java
+normally maintains proper alignment of byte arrays, the set_offset
+method can be used to specify unaligned addresses. Unaligned address
+accesses that are not supported by the underlying hardware may be
+reported as an exception, or may stop the running Java program.
+<p><dt>int size;<dd>The length of <b>data</b>, in bytes.
+This element is accessed using Dbt.get_size and
+Dbt.set_size, and may be initialized
+implicitly to the length of the data array with the constructor having
+one argument.
+<p><dt>int ulen;<dd>The size of the user's buffer (referred to by <b>data</b>), in bytes.
+This location is not written by the <a href="../api_java/db_class.html">Db</a> methods.
+<p>Note that applications can determine the length of a record by setting
+the <b>ulen</b> to 0 and checking the return value found in <b>size</b>.
+See the Db.DB_DBT_USERMEM flag for more information.
+<p>This element is accessed using
+Dbt.get_ulen and Dbt.set_ulen.
+<p><dt>int dlen;<dd>The length of the partial record being read or written by the application,
+in bytes.
+See the Db.DB_DBT_PARTIAL flag for more information.
+This element is accessed using
+Dbt.get_dlen, and Dbt.set_dlen.
+<p><dt>int doff;<dd>The offset of the partial record being read or written by the application,
+in bytes.
+See the Db.DB_DBT_PARTIAL flag for more information.
+This element is accessed using
+Dbt.get_doff and Dbt.set_doff.
+<p><dt>int flags;<dd>This element is accessed using Dbt.get_flags and
+Dbt.set_flags.
+<p>The <b>flags</b> value must be set by bitwise inclusively <b>OR</b>'ing together one or more of
+the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_DBT_MALLOC">Db.DB_DBT_MALLOC</a><dd>When this flag is set, Berkeley DB will allocate memory for the returned key
+or data item
+and return a byte array containing the data in the <b>data</b> field of
+the key or data Dbt object.
+<p>If Db.DB_DBT_MALLOC is specified, Berkeley DB allocates a properly sized
+byte array to contain the data. This can be convenient if you know little
+about the nature of the data, specifically the size of data in the
+database. However, if your application makes repeated calls to retrieve
+keys or data, you may notice increased garbage collection due to this
+allocation. If you know the maximum size of data you are retrieving, you
+might decrease the memory burden and speed your application by allocating
+your own byte array and using Db.DB_DBT_USERMEM. Even if you don't
+know the maximum size, you can use this option and reallocate your array
+whenever your retrieval API call
+throws a <a href="../api_java/memp_class.html">DbMemoryException</a>.
+<p>It is an error to specify more than one of Db.DB_DBT_MALLOC,
+Db.DB_DBT_REALLOC, and Db.DB_DBT_USERMEM.
+<p><dt><a name="Db.DB_DBT_REALLOC">Db.DB_DBT_REALLOC</a><dd>When this flag is set Berkeley DB
+will return the data in the <b>data</b> field of the key or data
+Dbt object, reusing the existing byte array if it is large
+enough, or allocating a new one of the appropriate size.
+<p>It is an error to specify more than one of Db.DB_DBT_MALLOC,
+Db.DB_DBT_REALLOC, and Db.DB_DBT_USERMEM.
+<p><dt><a name="Db.DB_DBT_USERMEM">Db.DB_DBT_USERMEM</a><dd>The <b>data</b> field of the key or data object must refer to memory
+that is at least <b>ulen</b> bytes in length. If the length of the
+requested item is less than or equal to that number of bytes, the item
+is copied into the memory referred to by the <b>data</b> field.
+Otherwise, the <b>size</b> field is set to the length needed for the
+requested item, and the error ENOMEM is returned.
+<p>If Db.DB_DBT_USERMEM is specified, the data field of the Dbt
+must be set to an appropriately sized byte array.
+<p>It is an error to specify more than one of Db.DB_DBT_MALLOC,
+Db.DB_DBT_REALLOC, and Db.DB_DBT_USERMEM.
+</dl>
+<p>If Db.DB_DBT_MALLOC or Db.DB_DBT_REALLOC is specified, Berkeley DB
+allocates a properly sized byte array to contain the data. This can be
+convenient if you know little about the nature of the data, specifically
+the size of data in the database. However, if your application makes
+repeated calls to retrieve keys or data, you may notice increased garbage
+collection due to this allocation. If you know the maximum size of data
+you are retrieving, you might decrease the memory burden and speed your
+application by allocating your own byte array and using
+Db.DB_DBT_USERMEM. Even if you don't know the maximum size, you can
+use this option and reallocate your array whenever your retrieval API call
+throws a <a href="../api_java/memp_class.html">DbMemoryException</a>.
+<p><dl compact>
+<p><dt><a name="Db.DB_DBT_PARTIAL">Db.DB_DBT_PARTIAL</a><dd>Do partial retrieval or storage of an item. If the calling application
+is doing a get, the <b>dlen</b> bytes starting <b>doff</b> bytes from
+the beginning of the retrieved data record are returned as if they
+comprised the entire record. If any or all of the specified bytes do
+not exist in the record, the get is successful, and any existing bytes
+are returned.
+<p>For example, if the data portion of a retrieved record was 100 bytes,
+and a partial retrieval was done using a Dbt having a <b>dlen</b>
+field of 20 and a <b>doff</b> field of 85, the get call would succeed,
+the <b>data</b> field would refer to the last 15 bytes of the record,
+and the <b>size</b> field would be set to 15.
+<p>If the calling application is doing a put, the <b>dlen</b> bytes starting
+<b>doff</b> bytes from the beginning of the specified key's data record
+are replaced by the data specified by the <b>data</b> and <b>size</b>
+objects.
+If <b>dlen</b> is smaller than <b>size</b>, the record will grow; if
+<b>dlen</b> is larger than <b>size</b>, the record will shrink.
+If the specified bytes do not exist, the record will be extended using nul
+bytes as necessary, and the put call will succeed.
+<p>It is an error to attempt a partial put using the <a href="../api_java/db_put.html">Db.put</a>
+method in a database that supports duplicate records.
+Partial puts in databases supporting duplicate records must be done
+using a <a href="../api_java/dbc_class.html">Dbc</a> method.
+<p>It is an error to attempt a partial put with differing <b>dlen</b> and
+<b>size</b> values in Queue or Recno databases with fixed-length records.
+<p>For example, if the data portion of a retrieved record was 100 bytes,
+and a partial put was done using a Dbt having a <b>dlen</b>
+field of 20, a <b>doff</b> field of 85, and a <b>size</b> field of 30,
+the resulting record would be 115 bytes in length, where the last 30
+bytes would be those specified by the put call.
+</dl>
+</dl>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/deadlock_class.html b/libdb/docs/api_java/deadlock_class.html
new file mode 100644
index 0000000..99b2324
--- /dev/null
+++ b/libdb/docs/api_java/deadlock_class.html
@@ -0,0 +1,40 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbDeadlockException</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbDeadlockException</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class DbDeadlockException extends DbException { ... }
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the DbDeadlockException class and
+how it is used by the various Db* classes.
+<p>A DbDeadlockException is thrown when multiple threads competing
+for a lock are deadlocked. One of the threads' transactions is selected
+for termination, and a DbDeadlockException is thrown to that thread.
+<p>See <a href="../api_java/env_set_lk_detect.html">DbEnv.set_lk_detect</a> for more information.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_class.html b/libdb/docs/api_java/env_class.html
new file mode 100644
index 0000000..1e0f7e2
--- /dev/null
+++ b/libdb/docs/api_java/env_class.html
@@ -0,0 +1,68 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class DbEnv extends Object
+{
+ public DbEnv(int flags) throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv object is the handle for a Berkeley DB environment -- a
+collection including support for some or all of caching, locking,
+logging and transaction subsystems, as well as databases and log files.
+Methods off the DbEnv handle are used to configure the
+environment as well as to operate on subsystems and databases in the
+environment.
+<p>DbEnv handles are free-threaded if the <a href="../api_java/env_open.html#DB_THREAD">Db.DB_THREAD</a> flag
+is specified to the <a href="../api_java/env_open.html">DbEnv.open</a> method when the environment is opened.
+The DbEnv handle should not be closed while any other handle
+remains open that is using it as a reference (for example, <a href="../api_java/db_class.html">Db</a>
+or <a href="../api_java/txn_class.html">DbTxn</a>). Once either the <a href="../api_java/env_close.html">DbEnv.close</a> or
+<a href="../api_java/env_remove.html">DbEnv.remove</a> methods are called, the handle may not be accessed again,
+regardless of the method's return.
+<p>The constructor creates the DbEnv object. The constructor
+allocates memory internally; calling the <a href="../api_java/env_close.html">DbEnv.close</a> or
+<a href="../api_java/env_remove.html">DbEnv.remove</a> methods will free that memory.
+<p>The following <b>flags</b> value may be specified:
+<p><dl compact>
+<p><dt><a name="Db.DB_CLIENT">Db.DB_CLIENT</a><dd>Create a client environment to connect to a server.
+<p>The Db.DB_CLIENT flag indicates to the system that this environment
+is remote on a server. The use of this flag causes the environment
+methods to use functions that call a server instead of local functions.
+Prior to making any environment or database method calls, the
+application must call the <a href="../api_java/env_set_rpc_server.html">DbEnv.set_rpc_server</a> method to
+establish the connection to the server.
+</dl>
+<h1>Class</h1>
+DbEnv
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_close.html b/libdb/docs/api_java/env_close.html
new file mode 100644
index 0000000..4d25820
--- /dev/null
+++ b/libdb/docs/api_java/env_close.html
@@ -0,0 +1,76 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.close</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void close(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.close method closes the Berkeley DB environment, freeing any
+allocated resources and closing any underlying subsystems.
+<p>Calling DbEnv.close does not imply closing any databases that
+were opened in the environment, and all databases opened in the
+environment should be closed before the environment is closed.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>Where the environment was initialized with the <a href="../api_java/env_open.html#DB_INIT_LOCK">Db.DB_INIT_LOCK</a> flag,
+calling DbEnv.close does not release any locks still held by the
+closing process, providing functionality for long-lived locks.
+<p>Where the environment was initialized with the <a href="../api_java/env_open.html#DB_INIT_MPOOL">Db.DB_INIT_MPOOL</a>
+flag, calling DbEnv.close implies calls to <a href="../api_java/memp_fclose.html">DbMpoolFile.close</a> for
+any remaining open files in the memory pool that were returned to this
+process by calls to <a href="../api_java/memp_fopen.html">DbMpoolFile.open</a>. It does not imply a call to
+<a href="../api_java/memp_fsync.html">DbMpoolFile.sync</a> for those files.
+<p>Where the environment was initialized with the <a href="../api_java/env_open.html#DB_INIT_TXN">Db.DB_INIT_TXN</a> flag,
+calling DbEnv.close aborts any unresolved transactions.
+Applications should not depend on this behavior for transactions
+involving Berkeley DB databases; all such transactions should be explicitly
+resolved. The problem with depending on this semantic is that aborting
+an unresolved transaction involving database operations requires a
+database handle. Because the database handles should have been closed before
+calling DbEnv.close, it will not be possible to abort the
+transaction, and recovery will have to be run on the Berkeley DB environment
+before further operations are done.
+<p>Where log cursors were created using the <a href="../api_java/log_cursor.html">DbEnv.log_cursor</a> method, calling
+DbEnv.close does not imply closing those cursors.
+<p>In multithreaded applications, only a single thread may call
+DbEnv.close.
+<p>After DbEnv.close has been called, regardless of its return, the
+Berkeley DB environment handle may not be accessed again.
+<p>The DbEnv.close method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.close method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.close method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_dbremove.html b/libdb/docs/api_java/env_dbremove.html
new file mode 100644
index 0000000..1911723
--- /dev/null
+++ b/libdb/docs/api_java/env_dbremove.html
@@ -0,0 +1,85 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.dbremove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.dbremove</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+<p>
+public void dbremove(DbTxn txnid, String file, String database, int flags)
+ throws DbException, FileNotFoundException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.dbremove method removes the database specified by the
+<b>file</b> and <b>database</b> arguments. If no <b>database</b> is
+specified, the underlying file represented by <b>file</b> is removed,
+incidentally removing all databases that it contained.
+<p>Applications should never remove databases with open <a href="../api_java/db_class.html">Db</a> handles,
+or in the case of removing a file, when any database in the file has an
+open handle. For example, some architectures do not permit the removal
+of files with open system handles. On these architectures, attempts to
+remove databases currently in use by any thread of control in the system
+will fail.
+<p>If the operation is to be transaction-protected, the <b>txnid</b>
+parameter is a transaction handle returned from <a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>;
+otherwise, null.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_AUTO_COMMIT">Db.DB_AUTO_COMMIT</a><dd>Enclose the DbEnv.dbremove call within a transaction. If the call succeeds,
+changes made by the operation will be recoverable. If the call fails,
+the operation will have made no changes.
+</dl>
+<p>The DbEnv.dbremove method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>The
+environment variable <b>DB_HOME</b> may be used as the path of the
+database environment home.
+<p>DbEnv.dbremove is affected by any database directory specified using the
+<a href="../api_java/env_set_data_dir.html">DbEnv.set_data_dir</a> method, or by setting the "set_data_dir" string
+in the environment's <b>DB_CONFIG</b> file.
+</dl>
+<h1>Errors</h1>
+<p>The DbEnv.dbremove method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A database in the file is currently open.
+<p>Called before <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<p>If the file or directory does not exist, the DbEnv.dbremove method will
+fail and
+throw a FileNotFoundException exception.
+<p>The DbEnv.dbremove method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.dbremove method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_dbrename.html b/libdb/docs/api_java/env_dbrename.html
new file mode 100644
index 0000000..e96e4b9
--- /dev/null
+++ b/libdb/docs/api_java/env_dbrename.html
@@ -0,0 +1,88 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.dbrename</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.dbrename</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+<p>
+public void dbrename(DbTxn txnid,
+ String file, String database, String newname, int flags)
+ throws DbException, FileNotFoundException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.dbrename method renames the database specified by the
+<b>file</b> and <b>database</b> arguments to <b>newname</b>. If no
+<b>database</b> is specified, the underlying file represented by
+<b>file</b> is renamed, incidentally renaming all databases that it
+contained.
+<p>Applications should not rename databases that are currently in use. If
+an underlying file is being renamed and logging is currently enabled in
+the database environment, no database in the file may be open when the
+DbEnv.dbrename method is called. In particular, some architectures do
+not permit renaming files with open handles. On these architectures,
+attempts to rename databases that are currently in use by any thread of
+control in the system will fail.
+<p>If the operation is to be transaction-protected, the <b>txnid</b>
+parameter is a transaction handle returned from <a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>;
+otherwise, null.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_AUTO_COMMIT">Db.DB_AUTO_COMMIT</a><dd>Enclose the DbEnv.dbrename call within a transaction. If the call succeeds,
+changes made by the operation will be recoverable. If the call fails,
+the operation will have made no changes.
+</dl>
+<p>The DbEnv.dbrename method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>The
+environment variable <b>DB_HOME</b> may be used as the path of the
+database environment home.
+<p>DbEnv.dbrename is affected by any database directory specified using the
+<a href="../api_java/env_set_data_dir.html">DbEnv.set_data_dir</a> method, or by setting the "set_data_dir" string
+in the environment's <b>DB_CONFIG</b> file.
+</dl>
+<h1>Errors</h1>
+<p>The DbEnv.dbrename method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>A database in the file is currently open.
+<p>Called before <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<p>If the file or directory does not exist, the DbEnv.dbrename method will
+fail and
+throw a FileNotFoundException exception.
+<p>The DbEnv.dbrename method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.dbrename method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_err.html b/libdb/docs/api_java/env_err.html
new file mode 100644
index 0000000..56da4af
--- /dev/null
+++ b/libdb/docs/api_java/env_err.html
@@ -0,0 +1,72 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.err</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.err</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void err(int errcode, String message)
+<p>
+public void errx(String message)
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.err, DbEnv.errx, <a href="../api_java/db_err.html">Db.err</a> and
+<a href="../api_java/db_err.html">Db.errx</a> methods provide error-messaging functionality for
+applications written using the Berkeley DB library.
+<p>The DbEnv.err method constructs an error message consisting of the
+following elements:
+<p><blockquote><p><dl compact>
+<p><dt>An optional prefix string<dd>If no error callback method has been set using the
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a> method, any prefix string specified using the
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a> method, followed by two separating characters: a colon
+and a &lt;space&gt; character.
+<p><dt>The supplied message string <b>message</b>.<dd>
+<p><dt>A separator<dd>Two separating characters: a colon and a &lt;space&gt; character.
+<p><dt>A standard error string<dd>The standard system or Berkeley DB library error string associated with the
+<b>error</b> value, as returned by the <a href="../api_java/env_strerror.html">DbEnv.strerror</a> method.
+</dl>
+</blockquote>
+<p>This constructed error message is then handled as follows:
+<p><blockquote>
+<p>If an error callback method has been set (see <a href="../api_java/db_set_errcall.html">Db.set_errcall</a>
+and <a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>), that method is called with two
+arguments: any prefix string specified (see <a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a> and
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>) and the error message.
+<p>If an OutputStream has been set
+(see <a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a> and <a href="../api_java/db_set_error_stream.html">Db.set_error_stream</a>),
+the error message is written to that stream.
+<p>If none of these output options has been configured, the error message
+is written to System.err, the standard
+error output stream.</blockquote>
+<p>The DbEnv.errx and <a href="../api_java/db_err.html">Db.errx</a> methods perform identically to the
+DbEnv.err and <a href="../api_java/db_err.html">Db.err</a> methods, except that they do not append
+the final separator characters and standard error string to the error
+message.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_list.html b/libdb/docs/api_java/env_list.html
new file mode 100644
index 0000000..e21085e
--- /dev/null
+++ b/libdb/docs/api_java/env_list.html
@@ -0,0 +1,78 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Database Environments and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Database Environments and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Database Environments and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_java/env_close.html">DbEnv.close</a></td><td>Close an environment</td></tr>
+<tr><td><a href="../api_java/env_dbremove.html">DbEnv.dbremove</a></td><td>Remove a database</td></tr>
+<tr><td><a href="../api_java/env_dbrename.html">DbEnv.dbrename</a></td><td>Rename a database</td></tr>
+<tr><td><a href="../api_java/env_err.html">DbEnv.err</a></td><td>Error message with error string</td></tr>
+<tr><td><a href="../api_java/env_err.html">DbEnv.errx</a></td><td>Error message</td></tr>
+<tr><td><a href="../api_java/lock_detect.html">DbEnv.lock_detect</a></td><td>Perform deadlock detection</td></tr>
+<tr><td><a href="../api_java/lock_get.html">DbEnv.lock_get</a></td><td>Acquire a lock</td></tr>
+<tr><td><a href="../api_java/lock_id.html">DbEnv.lock_id</a></td><td>Acquire a locker ID</td></tr>
+<tr><td><a href="../api_java/lock_id_free.html">DbEnv.lock_id_free</a></td><td>Release a locker ID</td></tr>
+<tr><td><a href="../api_java/lock_put.html">DbEnv.lock_put</a></td><td>Release a lock</td></tr>
+<tr><td><a href="../api_java/lock_stat.html">DbEnv.lock_stat</a></td><td>Return lock subsystem statistics</td></tr>
+<tr><td><a href="../api_java/lock_vec.html">DbEnv.lock_vec</a></td><td>Acquire/release locks</td></tr>
+<tr><td><a href="../api_java/log_archive.html">DbEnv.log_archive</a></td><td>List log and database files</td></tr>
+<tr><td><a href="../api_java/log_file.html">DbEnv.log_file</a></td><td>Map Log Sequence Numbers to log files</td></tr>
+<tr><td><a href="../api_java/log_flush.html">DbEnv.log_flush</a></td><td>Flush log records</td></tr>
+<tr><td><a href="../api_java/log_put.html">DbEnv.log_put</a></td><td>Write a log record</td></tr>
+<tr><td><a href="../api_java/log_stat.html">DbEnv.log_stat</a></td><td>Return log subsystem statistics</td></tr>
+<tr><td><a href="../api_java/memp_stat.html">DbEnv.memp_stat</a></td><td>Return memory pool statistics</td></tr>
+<tr><td><a href="../api_java/memp_stat.html">DbEnv.memp_fstat</a></td><td>Return memory pool statistics</td></tr>
+<tr><td><a href="../api_java/memp_trickle.html">DbEnv.memp_trickle</a></td><td>Trickle flush pages from a memory pool</td></tr>
+<tr><td><a href="../api_java/env_open.html">DbEnv.open</a></td><td>Open an environment</td></tr>
+<tr><td><a href="../api_java/env_remove.html">DbEnv.remove</a></td><td>Remove an environment</td></tr>
+<tr><td><a href="../api_java/rep_elect.html">DbEnv.rep_elect</a></td><td>Hold a replication election</td></tr>
+<tr><td><a href="../api_java/rep_message.html">DbEnv.rep_process_message</a></td><td>Process a replication message</td></tr>
+<tr><td><a href="../api_java/rep_start.html">DbEnv.rep_start</a></td><td>Configure an environment for replication</td></tr>
+<tr><td><a href="../api_java/rep_stat.html">DbEnv.rep_stat</a></td><td>Replication statistics</td></tr>
+<tr><td><a href="../api_java/env_set_app_dispatch.html">DbEnv.set_app_dispatch</a></td><td>Configure application recovery interface</td></tr>
+<tr><td><a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a></td><td>Set the environment cache size</td></tr>
+<tr><td><a href="../api_java/env_set_data_dir.html">DbEnv.set_data_dir</a></td><td>Set the environment data directory</td></tr>
+<tr><td><a href="../api_java/env_set_encrypt.html">DbEnv.set_encrypt</a></td><td>Set the environment cryptographic key</td></tr>
+<tr><td><a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a></td><td>Set error message output stream</td></tr>
+<tr><td><a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><a href="../api_java/env_set_feedback.html">DbEnv.set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><a href="../api_java/env_set_flags.html">DbEnv.set_flags</a></td><td>Environment configuration</td></tr>
+<tr><td><a href="../api_java/env_set_lg_bsize.html">DbEnv.set_lg_bsize</a></td><td>Set log buffer size</td></tr>
+<tr><td><a href="../api_java/env_set_lg_dir.html">DbEnv.set_lg_dir</a></td><td>Set the environment logging directory</td></tr>
+<tr><td><a href="../api_java/env_set_lg_max.html">DbEnv.set_lg_max</a></td><td>Set log file size</td></tr>
+<tr><td><a href="../api_java/env_set_lg_regionmax.html">DbEnv.set_lg_regionmax</a></td><td>Set logging region size</td></tr>
+<tr><td><a href="../api_java/env_set_lk_conflicts.html">DbEnv.set_lk_conflicts</a></td><td>Set lock conflicts matrix</td></tr>
+<tr><td><a href="../api_java/env_set_lk_detect.html">DbEnv.set_lk_detect</a></td><td>Set automatic deadlock detection</td></tr>
+<tr><td><a href="../api_java/env_set_lk_max_lockers.html">DbEnv.set_lk_max_lockers</a></td><td>Set maximum number of lockers</td></tr>
+<tr><td><a href="../api_java/env_set_lk_max_locks.html">DbEnv.set_lk_max_locks</a></td><td>Set maximum number of locks</td></tr>
+<tr><td><a href="../api_java/env_set_lk_max_objects.html">DbEnv.set_lk_max_objects</a></td><td>Set maximum number of lock objects</td></tr>
+<tr><td><a href="../api_java/env_set_mp_mmapsize.html">DbEnv.set_mp_mmapsize</a></td><td>Set maximum mapped-in database file size</td></tr>
+<tr><td><a href="../api_java/rep_limit.html">DbEnv.set_rep_limit</a></td><td>Limit data sent in response to a single message</td></tr>
+<tr><td><a href="../api_java/rep_transport.html">DbEnv.set_rep_transport</a></td><td>Configure replication transport</td></tr>
+<tr><td><a href="../api_java/env_set_rpc_server.html">DbEnv.set_rpc_server</a></td><td>Establish an RPC server connection</td></tr>
+<tr><td><a href="../api_java/env_set_shm_key.html">DbEnv.set_shm_key</a></td><td>Set system memory shared segment ID</td></tr>
+<tr><td><a href="../api_java/env_set_tas_spins.html">DbEnv.set_tas_spins</a></td><td>Set the number of test-and-set spins</td></tr>
+<tr><td><a href="../api_java/env_set_timeout.html">DbEnv.set_timeout</a></td><td>Set lock and transaction timeout</td></tr>
+<tr><td><a href="../api_java/env_set_tmp_dir.html">DbEnv.set_tmp_dir</a></td><td>Set the environment temporary file directory</td></tr>
+<tr><td><a href="../api_java/env_set_tx_max.html">DbEnv.set_tx_max</a></td><td>Set maximum number of transactions</td></tr>
+<tr><td><a href="../api_java/env_set_tx_timestamp.html">DbEnv.set_tx_timestamp</a></td><td>Set recovery timestamp</td></tr>
+<tr><td><a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a></td><td>Set verbose messages</td></tr>
+<tr><td><a href="../api_java/txn_begin.html">DbEnv.txn_begin</a></td><td>Begin a transaction</td></tr>
+<tr><td><a href="../api_java/txn_checkpoint.html">DbEnv.txn_checkpoint</a></td><td>Checkpoint the transaction subsystem</td></tr>
+<tr><td><a href="../api_java/txn_recover.html">DbEnv.txn_recover</a></td><td>Distributed transaction recovery</td></tr>
+<tr><td><a href="../api_java/txn_stat.html">DbEnv.txn_stat</a></td><td>Return transaction subsystem statistics</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_open.html b/libdb/docs/api_java/env_open.html
new file mode 100644
index 0000000..e39a0df
--- /dev/null
+++ b/libdb/docs/api_java/env_open.html
@@ -0,0 +1,196 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.open</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+<p>
+public void open(String db_home, int flags, int mode)
+ throws DbException, FileNotFoundException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.open method is the interface for opening the Berkeley DB
+environment. It provides a structure for creating a consistent
+environment for processes using one or more of the features of Berkeley DB.
+<p>The <b>db_home</b> argument to DbEnv.open (and filename
+resolution in general) is described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p>The <b>flags</b> argument specifies the subsystems that are initialized
+and how the application's environment affects Berkeley DB file naming, among
+other things.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p>Because there are a large number of flags that can be specified, they
+have been grouped together by functionality. The first group of flags
+indicates which of the Berkeley DB subsystems should be initialized:
+<p><dl compact>
+<p><dt><a name="Db.DB_JOINENV">Db.DB_JOINENV</a><dd>Join an existing environment. This option allows applications to
+join an existing environment without knowing which Berkeley DB subsystems
+the environment supports.
+<p><dt><a name="Db.DB_INIT_CDB">Db.DB_INIT_CDB</a><dd>Initialize locking for the <a href="../ref/cam/intro.html">Berkeley DB Concurrent Data Store</a>
+product. In this mode, Berkeley DB provides multiple reader/single writer
+access. The only other subsystem that should be specified with the
+Db.DB_INIT_CDB flag is Db.DB_INIT_MPOOL.
+<p><dt><a name="Db.DB_INIT_LOCK">Db.DB_INIT_LOCK</a><dd>Initialize the locking subsystem. This subsystem should be used when
+multiple processes or threads are going to be reading and writing a
+Berkeley DB database, so that they do not interfere with each other. If all
+threads are accessing the database(s) read-only, locking is unnecessary.
+When the Db.DB_INIT_LOCK flag is specified, it is usually necessary
+to run a deadlock detector, as well. See <a href="../utility/db_deadlock.html">db_deadlock</a> and
+<a href="../api_java/lock_detect.html">DbEnv.lock_detect</a> for more information.
+<p><dt><a name="Db.DB_INIT_LOG">Db.DB_INIT_LOG</a><dd>Initialize the logging subsystem. This subsystem should be used when
+recovery from application or system failure is necessary. If the log
+region is being created and log files are already present, the log files
+are reviewed; subsequent log writes are appended to the end of the log,
+rather than overwriting current log entries.
+<p><dt><a name="Db.DB_INIT_MPOOL">Db.DB_INIT_MPOOL</a><dd>Initialize the shared memory buffer pool subsystem. This subsystem
+should be used whenever an application is using any Berkeley DB access
+method.
+<p><dt><a name="Db.DB_INIT_TXN">Db.DB_INIT_TXN</a><dd>Initialize the transaction subsystem. This subsystem should be used
+when recovery and atomicity of multiple operations are important. The
+Db.DB_INIT_TXN flag implies the Db.DB_INIT_LOG flag.
+</dl>
+<p>The second group of flags govern what recovery, if any, is performed when
+the environment is initialized:
+<p><dl compact>
+<p><dt><a name="Db.DB_RECOVER">Db.DB_RECOVER</a><dd>Run normal recovery on this environment before opening it for normal
+use. If this flag is set, the Db.DB_CREATE flag must also be set
+because the regions will be removed and re-created.
+<p><dt><a name="Db.DB_RECOVER_FATAL">Db.DB_RECOVER_FATAL</a><dd>Run catastrophic recovery on this environment before opening it for
+normal use. If this flag is set, the Db.DB_CREATE flag must also
+be set because the regions will be removed and re-created.
+</dl>
+<p>A standard part of the recovery process is to remove the existing Berkeley DB
+environment and create a new one in which to perform recovery. If the
+thread of control performing recovery does not specify the correct
+region initialization information (for example, the correct memory pool
+cache size), the result can be an application running in an environment
+with incorrect cache and other subsystem sizes. For this reason, the
+thread of control performing recovery should specify correct
+configuration information before calling the DbEnv.open method; or it
+should remove the environment after recovery is completed, leaving
+creation of the correctly sized environment to a subsequent call to
+DbEnv.open.
+<p>All Berkeley DB recovery processing must be single-threaded; that is, only a
+single thread of control may perform recovery or access a Berkeley DB
+environment while recovery is being performed. Because it is not an
+error to specify Db.DB_RECOVER for an environment for which no
+recovery is required, it is reasonable programming practice for the
+thread of control responsible for performing recovery and creating the
+environment to always specify the Db.DB_CREATE and
+Db.DB_RECOVER flags during startup.
+<p>The DbEnv.open function returns successfully if Db.DB_RECOVER
+or Db.DB_RECOVER_FATAL is specified and no log files exist, so it
+is necessary to ensure that all necessary log files are present before
+running recovery. For further information, consult <a href="../utility/db_archive.html">db_archive</a>
+and <a href="../utility/db_recover.html">db_recover</a>.
+<p>The third group of flags govern file-naming extensions in the environment:
+<p><dl compact>
+<a name="3"><!--meow--></a>
+<p><dt><a name="Db.DB_USE_ENVIRON">Db.DB_USE_ENVIRON</a><dd>The Berkeley DB process' environment may be permitted to specify information
+to be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB File Naming</a>. Because permitting users to specify which files
+are used can create security problems, environment information will be
+used in file naming for all users only if the Db.DB_USE_ENVIRON
+flag is set.
+<p><dt><a name="Db.DB_USE_ENVIRON_ROOT">Db.DB_USE_ENVIRON_ROOT</a><dd>The Berkeley DB process' environment may be permitted to specify information
+to be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB File Naming</a>. Because permitting users to specify which files
+are used can create security problems, if the
+Db.DB_USE_ENVIRON_ROOT flag is set, environment information will
+be used for file naming only for users with appropriate permissions (for
+example, users with a user-ID of 0 on UNIX systems).
+</dl>
+<p>Finally, there are a few additional unrelated flags:
+<p><dl compact>
+<p><dt><a name="Db.DB_CREATE">Db.DB_CREATE</a><dd>Cause Berkeley DB subsystems to create any underlying files, as necessary.
+<p><dt><a name="Db.DB_LOCKDOWN">Db.DB_LOCKDOWN</a><dd>Lock shared Berkeley DB environment files and memory-mapped databases into
+memory.
+<p><dt><a name="Db.DB_PRIVATE">Db.DB_PRIVATE</a><dd>Specify that the environment will only be accessed by a single process
+(although that process may be multithreaded). This flag has two effects
+on the Berkeley DB environment. First, all underlying data structures are
+allocated from per-process memory instead of from shared memory that is
+potentially accessible to more than a single process. Second, mutexes
+are only configured to work between threads.
+<p>This flag should not be specified if more than a single process is
+accessing the environment because it is likely to cause database
+corruption and unpredictable behavior. For example, if both a server
+application and the Berkeley DB utility <a href="../utility/db_stat.html">db_stat</a> are expected to access
+the environment, the Db.DB_PRIVATE flag should not be
+specified.
+<p><dt><a name="Db.DB_SYSTEM_MEM">Db.DB_SYSTEM_MEM</a><dd>Allocate memory from system shared memory instead of from memory backed
+by the filesystem. See <a href="../ref/env/region.html">Shared Memory
+Regions</a> for more information.
+<p><dt><a name="Db.DB_THREAD">Db.DB_THREAD</a><dd>Cause the <a href="../api_java/env_class.html">DbEnv</a> handle returned by DbEnv.open to be
+<i>free-threaded</i>; that is, usable by multiple threads within a
+single address space.
+<p>Threading is always assumed in the Java API, so no special flags are
+required and Berkeley DB functions will always behave as if the Db.DB_THREAD
+flag was specified.
+</dl>
+<p>On UNIX systems or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by
+Berkeley DB are created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and modified by the process' umask value at the time of creation
+(see <b>umask</b>(2)). If <b>mode</b> is 0, Berkeley DB will use a default
+mode of readable and writable by both owner and group. On Windows
+systems, the mode argument is ignored. The group ownership of created
+files is based on the system and directory defaults, and is not further
+specified by Berkeley DB.
+<p>The DbEnv.open method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>The environment variable <b>DB_HOME</b> may be used as the path of
+the database home, as described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+</dl>
+<h1>Errors</h1>
+<p>The DbEnv.open method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EAGAIN<dd>The shared memory region was locked and (repeatedly) unavailable.
+</dl>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>
+The Db.DB_THREAD flag was specified and fast mutexes are not
+available for this architecture.
+<p>The DB_HOME or TMPDIR environment variables were set, but empty.
+<p>An incorrectly formatted <b>NAME VALUE</b> entry or line was found.
+</dl>
+<p><dl compact>
+<p><dt>ENOSPC<dd>HP-UX only: a previously created Berkeley DB environment for this process still
+exists.
+</dl>
+<p>If the file or directory does not exist, the DbEnv.open method will
+fail and
+throw a FileNotFoundException exception.
+<p>The DbEnv.open method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.open method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_remove.html b/libdb/docs/api_java/env_remove.html
new file mode 100644
index 0000000..8e9c4c2
--- /dev/null
+++ b/libdb/docs/api_java/env_remove.html
@@ -0,0 +1,115 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.remove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.remove</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+<p>
+public void remove(String db_home, int flags)
+ throws DbException, FileNotFoundException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.remove method destroys a Berkeley DB environment if it is not
+currently in use. The environment regions, including any backing files,
+are removed. Any log or database files and the environment directory are
+not removed.
+<p>The <b>db_home</b> argument to DbEnv.remove is described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p>If there are processes that have called <a href="../api_java/env_open.html">DbEnv.open</a> without
+calling <a href="../api_java/env_close.html">DbEnv.close</a> (that is, there are processes currently
+using the environment), DbEnv.remove will fail without further
+action unless the <a href="../api_java/env_remove.html#DB_FORCE">Db.DB_FORCE</a> flag is set, in which case
+DbEnv.remove will attempt to remove the environment, regardless
+of any processes still using it.
+<p>The result of attempting to forcibly destroy the environment when it is
+in use is unspecified. Processes using an environment often maintain open
+file descriptors for shared regions within it. On UNIX systems, the
+environment removal will usually succeed, and processes that have already
+joined the region will continue to run in that region without change.
+However, processes attempting to join the environment will either fail
+or create new regions. On other systems in which the <b>unlink</b>(2) system call will fail if any process has an open file descriptor for
+the file (for example Windows/NT), the region removal will fail.
+<p>Calling DbEnv.remove should not be necessary for most applications
+because the Berkeley DB environment is cleaned up as part of normal database
+recovery procedures. However, applications may want to call
+DbEnv.remove as part of application shut down to free up system
+resources. For example, if the <a href="../api_java/env_open.html#DB_SYSTEM_MEM">Db.DB_SYSTEM_MEM</a> flag was specified
+to <a href="../api_java/env_open.html">DbEnv.open</a>, it may be useful to call DbEnv.remove in
+order to release system shared memory segments that have been allocated.
+Or, on architectures in which mutexes require allocation of underlying
+system resources, it may be useful to call DbEnv.remove in order
+to release those resources. Alternatively, if recovery is not required
+because no database state is maintained across failures, and no system
+resources need to be released, it is possible to clean up an environment
+by simply removing all the Berkeley DB files in the database environment's
+directories.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_FORCE">Db.DB_FORCE</a><dd>If the <a href="../api_java/env_remove.html#DB_FORCE">Db.DB_FORCE</a> flag is set, the environment is removed, regardless
+of any processes that may still using it, and no locks are acquired
+during this process. (Generally, the <a href="../api_java/env_remove.html#DB_FORCE">Db.DB_FORCE</a> flag is
+specified only when applications were unable to shut down cleanly, and there
+is a risk that an application may have died holding a Berkeley DB lock.)
+<a name="3"><!--meow--></a>
+<p><dt><a name="Db.DB_USE_ENVIRON">Db.DB_USE_ENVIRON</a><dd>The Berkeley DB process' environment may be permitted to specify information
+to be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB File Naming</a>. Because permitting users to specify which files
+are used can create security problems, environment information will be
+used in file naming for all users only if the Db.DB_USE_ENVIRON
+flag is set.
+<p><dt><a name="Db.DB_USE_ENVIRON_ROOT">Db.DB_USE_ENVIRON_ROOT</a><dd>The Berkeley DB process' environment may be permitted to specify information
+to be used when naming files; see <a href="../ref/env/naming.html">Berkeley DB File Naming</a>. Because permitting users to specify which files
+are used can create security problems, if the
+Db.DB_USE_ENVIRON_ROOT flag is set, environment information will
+be used for file naming only for users with appropriate permissions (for
+example, users with a user-ID of 0 on UNIX systems).
+</dl>
+<p>In multithreaded applications, only a single thread may call
+DbEnv.remove.
+<p>A <a href="../api_java/env_class.html">DbEnv</a> handle that has already been used to open an environment
+should not be used to call the DbEnv.remove method; a new
+<a href="../api_java/env_class.html">DbEnv</a> handle should be created for that purpose.
+<p>After DbEnv.remove has been called, regardless of its return,
+the Berkeley DB environment handle may not be accessed again.
+<p>The DbEnv.remove method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EBUSY<dd>The shared memory region was in use and the force flag was not set.
+</dl>
+<p>If the file or directory does not exist, the DbEnv.remove method will
+fail and
+throw a FileNotFoundException exception.
+<p>The DbEnv.remove method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.remove method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_app_dispatch.html b/libdb/docs/api_java/env_set_app_dispatch.html
new file mode 100644
index 0000000..3c89652
--- /dev/null
+++ b/libdb/docs/api_java/env_set_app_dispatch.html
@@ -0,0 +1,103 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_app_dispatch</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_app_dispatch</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbTxnRecover
+{
+ public abstract int
+ tx_recover(DbEnv dbenv, Dbt log_rec, DbLsn lsn, int op);
+}
+public class DbEnv
+{
+ public void set_app_dispatch(DbTxnRecover tx_recover)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>Set the application's method to be called during transaction abort
+and recovery. This method must return 0 on success and either
+<b>errno</b> or a value outside of the Berkeley DB error name space on
+failure. It takes four arguments:
+<p><dl compact>
+<p><dt>dbenv <dd>A Berkeley DB environment.
+<p><dt>log_rec<dd>A log record.
+<p><dt>lsn<dd>A log sequence number.
+<p><dt>op<dd>One of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_TXN_BACKWARD_ROLL">Db.DB_TXN_BACKWARD_ROLL</a><dd>The log is being read backward to determine which transactions have been
+committed and to abort those operations that were not; undo the operation
+described by the log record.
+<p><dt><a name="Db.DB_TXN_FORWARD_ROLL">Db.DB_TXN_FORWARD_ROLL</a><dd>The log is being played forward; redo the operation described by the log
+record.
+<p><dt><a name="Db.DB_TXN_ABORT">Db.DB_TXN_ABORT</a><dd>The log is being read backward during a transaction abort; undo the
+operation described by the log record.
+<p><dt><a name="Db.DB_TXN_APPLY">Db.DB_TXN_APPLY</a><dd>The log is being applied on a replica site; redo the operation
+described by the log record.
+<p><dt><a name="Db.DB_TXN_PRINT">Db.DB_TXN_PRINT</a><dd>The log is being printed for debugging purposes; print the contents of
+this log record in the desired format.
+</dl>
+</dl>
+<p>The Db.DB_TXN_FORWARD_ROLL and Db.DB_TXN_APPLY operations
+frequently imply the same actions, redoing changes that appear in the
+log record, although if a recovery function is to be used on a
+replication client where reads may be taking place concurrently with
+the processing of incoming messages, Db.DB_TXN_APPLY operations
+should also perform appropriate locking. The macro DB_REDO(op) checks
+that the operation is one of Db.DB_TXN_FORWARD_ROLL or
+Db.DB_TXN_APPLY, and should be used in the recovery code to refer
+to the conditions under which operations should be redone. Similarly,
+the macro DB_UNDO(op) checks if the operation is one of
+Db.DB_TXN_BACKWARD_ROLL or Db.DB_TXN_ABORT.
+<p>The DbEnv.set_app_dispatch method configures operations performed using the specified
+<a href="../api_java/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv.set_app_dispatch interface may not be called after the <a href="../api_java/env_open.html">DbEnv.open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_java/env_open.html">DbEnv.open</a> is called, the information specified to DbEnv.set_app_dispatch
+must be consistent with the existing environment or corruption can
+occur.
+<p>The DbEnv.set_app_dispatch method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.set_app_dispatch method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<p>The DbEnv.set_app_dispatch method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_app_dispatch method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_cachesize.html b/libdb/docs/api_java/env_set_cachesize.html
new file mode 100644
index 0000000..0c90fa1
--- /dev/null
+++ b/libdb/docs/api_java/env_set_cachesize.html
@@ -0,0 +1,87 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_cachesize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_cachesize</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int set_cachesize(int gbytes, int bytes, in ncache)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the shared memory buffer pool -- that is, the cache --
+to <b>gbytes</b> gigabytes plus <b>bytes</b>. The cache should be
+the size of the normal working data set of the application, with some
+small amount of additional memory for unusual situations. (Note: the
+working set is not the same as the number of pages accessed
+simultaneously, and should be quite a bit larger!)
+<p>The default cache size is 256KB, and may not be specified as less than
+20KB. Any cache size less than 500MB is automatically increased by 25%
+to account for buffer pool overhead; cache sizes larger than 500MB are
+used as specified. The current maximum size of a single cache is 4GB.
+For information on tuning the Berkeley DB cache size, see
+<a href="../ref/am_conf/cachesize.html">Selecting a cache size</a>.
+<p>It is possible to specify caches to Berkeley DB that are large enough so that
+they cannot be allocated contiguously on some architectures. For
+example, some releases of Solaris limit the amount of memory that may
+be allocated contiguously by a process. If <b>ncache</b> is 0 or 1,
+the cache will be allocated contiguously in memory. If it is greater
+than 1, the cache will be broken up into <b>ncache</b> equally sized,
+separate pieces of memory.
+<p>The DbEnv.set_cachesize method configures a database environment, not only operations
+performed using the specified <a href="../api_java/env_class.html">DbEnv</a> handle.
+<p>The DbEnv.set_cachesize interface may not be called after the <a href="../api_java/env_open.html">DbEnv.open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_java/env_open.html">DbEnv.open</a> is called, the information specified to DbEnv.set_cachesize
+will be ignored.
+<p>The DbEnv.set_cachesize method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's cache size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_cachesize", one or more whitespace characters,
+and the three arguments specified to this interface, separated by whitespace
+characters, for example, "set_cachesize 1 500 2". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv.set_cachesize method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The specified cache size was impossibly small.
+<p>Called after
+<a href="../api_java/env_open.html">DbEnv.open</a>
+was called.
+</dl>
+<p>The DbEnv.set_cachesize method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_cachesize method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_data_dir.html b/libdb/docs/api_java/env_set_data_dir.html
new file mode 100644
index 0000000..c43b018
--- /dev/null
+++ b/libdb/docs/api_java/env_set_data_dir.html
@@ -0,0 +1,78 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_data_dir</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_data_dir</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_data_dir(String dir)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the path of a directory to be used as the location of the access
+method database files. Paths specified to the <a href="../api_java/db_open.html">Db.open</a> function
+will be searched relative to this path. Paths set using this interface
+are additive, and specifying more than one will result in each specified
+directory being searched for database files. If any directories are
+specified, created database files will always be created in the first path
+specified.
+<p>If no database directories are specified, database files can exist only
+in the environment home directory. See <a href="../ref/env/naming.html">Berkeley DB File Naming</a> for more information.
+<p>For the greatest degree of recoverability from system or application
+failure, database files and log files should be located on separate
+physical devices.
+<p>The DbEnv.set_data_dir method configures operations performed using the specified
+<a href="../api_java/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv.set_data_dir interface may not be called after the <a href="../api_java/env_open.html">DbEnv.open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_java/env_open.html">DbEnv.open</a> is called, the information specified to DbEnv.set_data_dir
+must be consistent with the existing environment or corruption can
+occur.
+<p>The DbEnv.set_data_dir method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's data directory may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_data_dir", one or more whitespace characters,
+and the directory name. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv.set_data_dir method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<p>The DbEnv.set_data_dir method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_data_dir method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_encrypt.html b/libdb/docs/api_java/env_set_encrypt.html
new file mode 100644
index 0000000..0265a83
--- /dev/null
+++ b/libdb/docs/api_java/env_set_encrypt.html
@@ -0,0 +1,73 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_encrypt</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_encrypt</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_encrypt(String passwd, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the password used by the <a href="../api_java/env_class.html">DbEnv</a> and <a href="../api_java/db_class.html">Db</a> methods to
+perform encryption and decryption.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_ENCRYPT_AES">Db.DB_ENCRYPT_AES</a><dd>Use the Rijndael/AES (also known as the Advanced Encryption Standard
+and Federal Information Processing Standard (FIPS) 197) algorithm for
+encryption or decryption.
+</dl>
+<p>The DbEnv.set_encrypt method configures a database environment, not only operations
+performed using the specified <a href="../api_java/env_class.html">DbEnv</a> handle.
+<p>The DbEnv.set_encrypt interface may not be called after the <a href="../api_java/env_open.html">DbEnv.open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_java/env_open.html">DbEnv.open</a> is called, the information specified to DbEnv.set_encrypt
+must be consistent with the existing environment or an error will be
+returned.
+<p>The DbEnv.set_encrypt method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.set_encrypt method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after
+<a href="../api_java/env_open.html">DbEnv.open</a>
+was called.
+</dl>
+<p><dl compact>
+<p><dt>EOPNOTSUPP<dd>Cryptography is not available in this Berkeley DB release.
+</dl>
+<p>The DbEnv.set_encrypt method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_encrypt method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_errcall.html b/libdb/docs/api_java/env_set_errcall.html
new file mode 100644
index 0000000..935b27e
--- /dev/null
+++ b/libdb/docs/api_java/env_set_errcall.html
@@ -0,0 +1,68 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_errcall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_errcall</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbErrcall
+{
+ public abstract void errcall(String errpfx, String msg);
+}
+public class DbEnv
+{
+ public void set_errcall(DbErrcall errcall);
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+When an error occurs in the Berkeley DB library, an exception is thrown. In
+some cases, however, the <b>errno</b> value may be insufficient to
+completely describe the cause of the error, especially during initial
+application debugging.
+<p>The DbEnv.set_errcall and <a href="../api_java/db_set_errcall.html">Db.set_errcall</a> methods are used to
+enhance the mechanism for reporting error messages to the application.
+The DbEnv.set_errcall and <a href="../api_java/db_set_errcall.html">Db.set_errcall</a> methods must be
+called with a single object argument. The object's class must implement
+the DbErrcall interface. In some cases, when an error occurs, Berkeley DB
+will invoke the object's errcall() method with two arguments; the first
+is the prefix string (as previously set by <a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a> or
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>), the second will be an error message string.
+It is up to this method to display the message in an appropriate
+manner.
+<p>Alternatively, you can use the <a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a> and
+<a href="../api_java/db_set_error_stream.html">Db.set_error_stream</a> methods to display the additional information via
+an output stream. You should not mix these approaches.
+<p>This error-logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>The DbEnv.set_errcall interface may be called at any time during the life of
+the application.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_error_stream.html b/libdb/docs/api_java/env_set_error_stream.html
new file mode 100644
index 0000000..2177bda
--- /dev/null
+++ b/libdb/docs/api_java/env_set_error_stream.html
@@ -0,0 +1,58 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_error_stream</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_error_stream</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void DbEnv.set_error_stream(OutputStream s)
+ throws DbException
+</pre></h3>
+<h1>Description</h1>
+<p>When an error occurs in the Berkeley DB library, an exception is thrown. In
+some cases, however, the <b>errno</b> value may be insufficient to
+completely describe the cause of the error, especially during initial
+application debugging.
+<p>The DbEnv.set_error_stream and <a href="../api_java/db_set_error_stream.html">Db.set_error_stream</a> methods
+are used to enhance the mechanism for reporting error messages to the
+application by setting a OutputStream to be used for displaying additional
+Berkeley DB error messages. In some cases, when an error occurs, Berkeley DB will
+output an additional error message to the specified stream.
+<p>The error message will consist of the prefix string and a colon
+("<b>:</b>") (if a prefix string was previously specified using
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>), an error string, and a trailing
+&lt;newline&gt; character.
+<p>Alternatively, you can use the <a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a> method to capture the
+additional error information in a way that does not use output streams.
+You should not mix these approaches.
+<p>This error-logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_errpfx.html b/libdb/docs/api_java/env_set_errpfx.html
new file mode 100644
index 0000000..ff4d81f
--- /dev/null
+++ b/libdb/docs/api_java/env_set_errpfx.html
@@ -0,0 +1,41 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_errpfx</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_errpfx</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_errpfx(String errpfx);
+</pre></h3>
+<h1>Description</h1>
+<p>Set the prefix string that appears before error messages issued by Berkeley DB.
+<p>The DbEnv.set_errpfx interface may be called at any time during the life of
+the application.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_feedback.html b/libdb/docs/api_java/env_set_feedback.html
new file mode 100644
index 0000000..6b64b3e
--- /dev/null
+++ b/libdb/docs/api_java/env_set_feedback.html
@@ -0,0 +1,65 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_feedback</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_feedback</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbEnvFeedback
+{
+ public abstract void feedback(DbEnv dbenv, int opcode, int pct);
+}
+public class DbEnv
+{
+ public void set_feedback(DbEnvFeedback db_feedback)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>Some operations performed by the Berkeley DB library can take non-trivial
+amounts of time. The DbEnv.set_feedback method can be used by
+applications to monitor progress within these operations.
+<p>When an operation is likely to take a long time, Berkeley DB will call the
+specified callback method. This method must be declared with
+three arguments: the first will be a reference to the enclosing
+environment, the second a flag value, and the third the percent of the
+operation that has been completed, specified as an integer value between
+0 and 100. It is up to the callback method to display this
+information in an appropriate manner.
+<p>The <b>opcode</b> argument may take on any of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_RECOVER">Db.DB_RECOVER</a><dd>The environment is being recovered.
+</dl>
+<p>The DbEnv.set_feedback interface may be called at any time during the life of
+the application.
+<p>The DbEnv.set_feedback method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_flags.html b/libdb/docs/api_java/env_set_flags.html
new file mode 100644
index 0000000..0a69fa8
--- /dev/null
+++ b/libdb/docs/api_java/env_set_flags.html
@@ -0,0 +1,241 @@
+<!--$Id-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_flags</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_flags</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_flags(int flags, boolean onoff)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+If <b>onoff</b> is
+false,
+the specified flags are cleared; otherwise they are set.
+<p><dl compact>
+<p><dt><a name="Db.DB_AUTO_COMMIT">Db.DB_AUTO_COMMIT</a><dd>If set, operations for which no explicit transaction handle was
+specified, and which modify databases in the database environment, will
+be automatically enclosed within a transaction. If the call succeeds,
+changes made by the operation will be recoverable. If the call fails,
+the operation will have made no changes.
+<p>Calling DbEnv.set_flags with the <a href="../api_java/env_set_flags.html#DB_AUTO_COMMIT">Db.DB_AUTO_COMMIT</a> flag only affects
+the specified <a href="../api_java/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_java/env_class.html">DbEnv</a>
+handles opened in the environment must either set the <a href="../api_java/env_set_flags.html#DB_AUTO_COMMIT">Db.DB_AUTO_COMMIT</a> flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The <a href="../api_java/env_set_flags.html#DB_AUTO_COMMIT">Db.DB_AUTO_COMMIT</a> flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="3"><!--meow--></a>
+<p><dt><a name="Db.DB_CDB_ALLDB">Db.DB_CDB_ALLDB</a><dd>If set, Berkeley DB Concurrent Data Store applications will perform locking on an environment-wide
+basis rather than on a per-database basis.
+<p>Calling DbEnv.set_flags with the Db.DB_CDB_ALLDB flag only affects
+the specified <a href="../api_java/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_java/env_class.html">DbEnv</a>
+handles opened in the environment must either set the Db.DB_CDB_ALLDB flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The Db.DB_CDB_ALLDB flag may be used to configure Berkeley DB only before the
+<a href="../api_java/env_open.html">DbEnv.open</a> interface is called.
+<a name="4"><!--meow--></a>
+<p><dt><a name="Db.DB_DIRECT_DB">Db.DB_DIRECT_DB</a><dd>If set and supported by the system, Berkeley DB will turn off system buffering
+of Berkeley DB database files to avoid double caching.
+<p>Calling DbEnv.set_flags with the Db.DB_DIRECT_DB flag only affects
+the specified <a href="../api_java/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_java/env_class.html">DbEnv</a>
+handles opened in the environment must either set the Db.DB_DIRECT_DB flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The Db.DB_DIRECT_DB flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="5"><!--meow--></a>
+<p><dt><a name="Db.DB_DIRECT_LOG">Db.DB_DIRECT_LOG</a><dd>If set and supported by the system, Berkeley DB will turn off system buffering
+of Berkeley DB log files to avoid double caching.
+<p>Calling DbEnv.set_flags with the Db.DB_DIRECT_LOG flag only affects
+the specified <a href="../api_java/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_java/env_class.html">DbEnv</a>
+handles opened in the environment must either set the Db.DB_DIRECT_LOG flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The Db.DB_DIRECT_LOG flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="6"><!--meow--></a>
+<p><dt><a name="Db.DB_NOLOCKING">Db.DB_NOLOCKING</a><dd>If set, Berkeley DB will grant all requested mutual exclusion mutexes and
+database locks without regard for their actual availability. This
+functionality should never be used for purposes other than debugging.
+<p>Calling DbEnv.set_flags with the Db.DB_NOLOCKING flag only affects
+the specified <a href="../api_java/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+<p>The Db.DB_NOLOCKING flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="7"><!--meow--></a>
+<p><dt><a name="Db.DB_NOMMAP">Db.DB_NOMMAP</a><dd>If set, Berkeley DB will copy read-only database files into the local cache
+instead of potentially mapping them into process memory (see the
+description of the <a href="../api_java/env_set_mp_mmapsize.html">DbEnv.set_mp_mmapsize</a> method for further
+information).
+<p>Calling DbEnv.set_flags with the Db.DB_NOMMAP flag only affects
+the specified <a href="../api_java/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_java/env_class.html">DbEnv</a>
+handles opened in the environment must either set the Db.DB_NOMMAP flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The Db.DB_NOMMAP flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="8"><!--meow--></a>
+<p><dt><a name="Db.DB_NOPANIC">Db.DB_NOPANIC</a><dd>If set, Berkeley DB will ignore any panic state in the database environment.
+(Database environments in a panic state normally refuse all attempts to
+call Berkeley DB functions, returning <a href="../ref/program/errorret.html#DB_RUNRECOVERY">Db.DB_RUNRECOVERY</a>.) This
+functionality should never be used for purposes other than debugging.
+<p>Calling DbEnv.set_flags with the Db.DB_NOPANIC flag only affects
+the specified <a href="../api_java/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+<p>The Db.DB_NOPANIC flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<p><dt><a name="Db.DB_OVERWRITE">Db.DB_OVERWRITE</a><dd>Overwrite files stored in encrypted formats before deleting them. Berkeley DB
+overwrites files using alternating 0xff, 0x00 and 0xff byte patterns.
+For file overwriting to be effective, the underlying file must be stored
+on a fixed-block filesystem. Systems with journaling or logging filesystems
+will require operating system support and probably modification of the
+Berkeley DB sources.
+<p>Calling DbEnv.set_flags with the Db.DB_OVERWRITE flag only affects
+the specified <a href="../api_java/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+<p>The Db.DB_OVERWRITE flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="9"><!--meow--></a>
+<p><dt><a name="Db.DB_PANIC_ENVIRONMENT">Db.DB_PANIC_ENVIRONMENT</a><dd>If set, Berkeley DB will set the panic state for the database environment.
+(Database environments in a panic state normally refuse all attempts to
+call Berkeley DB functions, returning <a href="../ref/program/errorret.html#DB_RUNRECOVERY">Db.DB_RUNRECOVERY</a>.) This flag may
+not be specified using the environment's <b>DB_CONFIG</b> file. This
+flag may be used to configure Berkeley DB only after the <a href="../api_java/env_open.html">DbEnv.open</a>
+interface is called.
+<p>Calling DbEnv.set_flags with the Db.DB_PANIC_ENVIRONMENT flag affects the
+database environment, including all threads of control accessing the
+database environment.
+<p>The Db.DB_PANIC_ENVIRONMENT flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="10"><!--meow--></a>
+<p><dt><a name="Db.DB_REGION_INIT">Db.DB_REGION_INIT</a><dd>In some applications, the expense of page-faulting the underlying shared
+memory regions can affect performance. (For example, if the page-fault
+occurs while holding a lock, other lock requests can convoy, and overall
+throughput may decrease.) If set, Berkeley DB will page-fault shared regions
+into memory when initially creating or joining a Berkeley DB environment. In
+addition, Berkeley DB will write the shared regions when creating an
+environment, forcing the underlying virtual memory and filesystems to
+instantiate both the necessary memory and the necessary disk space.
+This can also avoid out-of-disk space failures later on.
+<p>Calling DbEnv.set_flags with the Db.DB_REGION_INIT flag only affects
+the specified <a href="../api_java/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_java/env_class.html">DbEnv</a>
+handles opened in the environment must either set the Db.DB_REGION_INIT flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The Db.DB_REGION_INIT flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="11"><!--meow--></a>
+<p><dt><a name="Db.DB_TXN_NOSYNC">Db.DB_TXN_NOSYNC</a><dd>If set, Berkeley DB will not write or synchronously flush the log on transaction
+commit or prepare.
+This means that transactions exhibit the ACI (atomicity, consistency,
+and isolation) properties, but not D (durability); that is, database
+integrity will be maintained, but if the application or system fails,
+it is possible some number of the most recently committed transactions
+may be undone during recovery. The number of transactions at risk is
+governed by how many log updates can fit into the log buffer, how often
+the operating system flushes dirty buffers to disk, and how often the
+log is checkpointed
+<p>Calling DbEnv.set_flags with the Db.DB_TXN_NOSYNC flag only affects
+the specified <a href="../api_java/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_java/env_class.html">DbEnv</a>
+handles opened in the environment must either set the Db.DB_TXN_NOSYNC flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The Db.DB_TXN_NOSYNC flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="12"><!--meow--></a>
+<p><dt><a name="Db.DB_TXN_WRITE_NOSYNC">Db.DB_TXN_WRITE_NOSYNC</a><dd>If set, Berkeley DB will write, but will not synchronously flush, the log on
+transaction commit or prepare.
+This means that transactions exhibit the ACI (atomicity, consistency,
+and isolation) properties, but not D (durability); that is, database
+integrity will be maintained, but if the system fails, it is possible
+some number of the most recently committed transactions may be undone
+during recovery. The number of transactions at risk is governed by how
+often the system flushes dirty buffers to disk and how often the log is
+checkpointed.
+<p>Calling DbEnv.set_flags with the Db.DB_TXN_WRITE_NOSYNC flag only affects
+the specified <a href="../api_java/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_java/env_class.html">DbEnv</a>
+handles opened in the environment must either set the Db.DB_TXN_WRITE_NOSYNC flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The Db.DB_TXN_WRITE_NOSYNC flag may be used to configure Berkeley DB at any time during
+the life of the application.
+<a name="13"><!--meow--></a>
+<p><dt><a name="Db.DB_YIELDCPU">Db.DB_YIELDCPU</a><dd>If set, Berkeley DB will yield the processor immediately after each page or
+mutex acquisition. This functionality should never be used for purposes
+other than stress testing.
+<p>Calling DbEnv.set_flags with the Db.DB_YIELDCPU flag only affects
+the specified <a href="../api_java/env_class.html">DbEnv</a> handle (and any other Berkeley DB handles opened
+within the scope of that handle).
+For consistent behavior across the environment, all <a href="../api_java/env_class.html">DbEnv</a>
+handles opened in the environment must either set the Db.DB_YIELDCPU flag
+or the flag should be specified in the <b>DB_CONFIG</b> configuration
+file.
+<p>The Db.DB_YIELDCPU flag may be used to configure Berkeley DB at any time during
+the life of the application.
+</dl>
+<p>The DbEnv.set_flags method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's flag values may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_flags", one or more whitespace characters,
+and the interface flag argument as a string; for example, "set_flags
+DB_TXN_NOSYNC". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv.set_flags method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv.set_flags method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_flags method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_lg_bsize.html b/libdb/docs/api_java/env_set_lg_bsize.html
new file mode 100644
index 0000000..14ab03c
--- /dev/null
+++ b/libdb/docs/api_java/env_set_lg_bsize.html
@@ -0,0 +1,75 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_lg_bsize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_lg_bsize</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_lg_bsize(int lg_bsize)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the in-memory log buffer, in bytes. By default, or if
+the value is set to 0, a size of 32K is used. The size of the log file
+(see <a href="../api_java/env_set_lg_max.html">DbEnv.set_lg_max</a>) must be at least four times the size of the
+the in-memory log buffer.
+<p>Log information is stored in-memory until the storage space fills up
+or transaction commit forces the information to be flushed to stable
+storage. In the presence of long-running transactions or transactions
+producing large amounts of data, larger buffer sizes can increase
+throughput.
+<p>The DbEnv.set_lg_bsize method configures a database environment, not only operations
+performed using the specified <a href="../api_java/env_class.html">DbEnv</a> handle.
+<p>The DbEnv.set_lg_bsize interface may not be called after the <a href="../api_java/env_open.html">DbEnv.open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_java/env_open.html">DbEnv.open</a> is called, the information specified to DbEnv.set_lg_bsize
+will be ignored.
+<p>The DbEnv.set_lg_bsize method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's log buffer size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lg_bsize", one or more whitespace characters,
+and the size in bytes. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv.set_lg_bsize method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+<p>The size of the log file is less than four times the size of the in-memory
+log buffer.
+</dl>
+<p>The DbEnv.set_lg_bsize method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_lg_bsize method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/logc_class.html">DbLogc</a>, <a href="../api_java/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_java/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_lg_dir.html b/libdb/docs/api_java/env_set_lg_dir.html
new file mode 100644
index 0000000..c2ac268
--- /dev/null
+++ b/libdb/docs/api_java/env_set_lg_dir.html
@@ -0,0 +1,74 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_lg_dir</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_lg_dir</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_lg_dir(String dir)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The path of a directory to be used as the location of logging files.
+Log files created by the Log Manager subsystem will be created in this
+directory.
+<p>If no logging directory is specified, log files are created in the
+environment home directory. See <a href="../ref/env/naming.html">Berkeley DB File Naming</a> for more information.
+<p>For the greatest degree of recoverability from system or application
+failure, database files and log files should be located on separate
+physical devices.
+<p>The DbEnv.set_lg_dir method configures operations performed using the specified
+<a href="../api_java/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv.set_lg_dir interface may not be called after the <a href="../api_java/env_open.html">DbEnv.open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_java/env_open.html">DbEnv.open</a> is called, the information specified to DbEnv.set_lg_dir
+must be consistent with the existing environment or corruption can
+occur.
+<p>The DbEnv.set_lg_dir method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's logging directory may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lg_dir", one or more whitespace characters,
+and the directory name. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv.set_lg_dir method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<p>The DbEnv.set_lg_dir method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_lg_dir method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/logc_class.html">DbLogc</a>, <a href="../api_java/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_java/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_lg_max.html b/libdb/docs/api_java/env_set_lg_max.html
new file mode 100644
index 0000000..8e55570
--- /dev/null
+++ b/libdb/docs/api_java/env_set_lg_max.html
@@ -0,0 +1,75 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_lg_max</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_lg_max</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_lg_max(int lg_max)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum size of a single file in the log, in bytes. By default,
+or if the <b>lg_max</b> argument is set to 0, a size of 10MB is used.
+Because <a href="../api_java/lsn_class.html">DbLsn</a> file offsets are unsigned four-byte values, the
+set value may not be larger than the maximum unsigned four-byte value.
+The size of the log file must be at least four times the size of the
+in-memory log buffer (see <a href="../api_java/env_set_lg_bsize.html">DbEnv.set_lg_bsize</a>).
+<p>See <a href="../ref/log/limits.html">Log File Limits</a>
+for more information.
+<p>The DbEnv.set_lg_max method configures a database environment, not only operations
+performed using the specified <a href="../api_java/env_class.html">DbEnv</a> handle.
+<p>The DbEnv.set_lg_max interface may be called at any time during the life of
+the application.
+If no size is specified by the application, the size last specified for
+the database region will be used, or if no database region previously
+existed, the default will be used.
+<p>The DbEnv.set_lg_max method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's log file size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lg_max", one or more whitespace characters,
+and the size in bytes. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv.set_lg_max method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+<p>The size of the log file is less than four times the size of the in-memory
+log buffer.
+<p>The specified log file size was too large.
+</dl>
+<p>The DbEnv.set_lg_max method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_lg_max method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/logc_class.html">DbLogc</a>, <a href="../api_java/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_java/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_lg_regionmax.html b/libdb/docs/api_java/env_set_lg_regionmax.html
new file mode 100644
index 0000000..9c6d67f
--- /dev/null
+++ b/libdb/docs/api_java/env_set_lg_regionmax.html
@@ -0,0 +1,69 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_lg_regionmax</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_lg_regionmax</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_lg_regionmax(int lg_regionmax)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the size of the underlying logging subsystem region, in bytes. By
+default, or if the value is set to 0, the base region size is 60KB.
+The log region is used to store filenames, and so may need to be
+increased in size if a large number of files will be opened and
+registered with the specified Berkeley DB environment's log manager.
+<p>The DbEnv.set_lg_regionmax method configures a database environment, not only operations
+performed using the specified <a href="../api_java/env_class.html">DbEnv</a> handle.
+<p>The DbEnv.set_lg_regionmax interface may not be called after the <a href="../api_java/env_open.html">DbEnv.open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_java/env_open.html">DbEnv.open</a> is called, the information specified to DbEnv.set_lg_regionmax
+will be ignored.
+<p>The DbEnv.set_lg_regionmax method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's log region size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lg_regionmax", one or more whitespace characters,
+and the size in bytes. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv.set_lg_regionmax method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<p>The DbEnv.set_lg_regionmax method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_lg_regionmax method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/logc_class.html">DbLogc</a>, <a href="../api_java/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_java/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_lk_conflicts.html b/libdb/docs/api_java/env_set_lk_conflicts.html
new file mode 100644
index 0000000..0f3a507
--- /dev/null
+++ b/libdb/docs/api_java/env_set_lk_conflicts.html
@@ -0,0 +1,69 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_lk_conflicts</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_lk_conflicts</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_lk_conflicts(byte[][] conflicts)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the locking conflicts matrix.
+A non-0 value for the array element indicates that requested_mode and
+held_mode conflict:
+<p><blockquote><pre>conflicts[requested_mode][held_mode]</pre></blockquote>
+<p>The <i>not-granted</i> mode must be represented by 0.
+<p>If DbEnv.set_lk_conflicts is never called, a standard conflicts
+array is used; see <a href="../ref/lock/stdmode.html">Standard Lock
+Modes</a> for more information.
+<p>The DbEnv.set_lk_conflicts method configures a database environment, not only operations
+performed using the specified <a href="../api_java/env_class.html">DbEnv</a> handle.
+<p>The DbEnv.set_lk_conflicts interface may not be called after the <a href="../api_java/env_open.html">DbEnv.open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_java/env_open.html">DbEnv.open</a> is called, the information specified to DbEnv.set_lk_conflicts
+will be ignored.
+<p>The DbEnv.set_lk_conflicts method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.set_lk_conflicts method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>No memory was available to copy the conflicts array.
+</dl>
+<p>The DbEnv.set_lk_conflicts method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_lk_conflicts method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_lk_detect.html b/libdb/docs/api_java/env_set_lk_detect.html
new file mode 100644
index 0000000..3c49190
--- /dev/null
+++ b/libdb/docs/api_java/env_set_lk_detect.html
@@ -0,0 +1,88 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_lk_detect</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_lk_detect</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_lk_detect(int detect)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set if the deadlock detector is to be run whenever a lock conflict
+occurs, and specify what lock request(s) should be rejected. As
+transactions acquire locks on behalf of a single locker ID, rejecting
+a lock request associated with a transaction normally requires the
+transaction be aborted. The specified value must be one of the
+following list:
+<p><dl compact>
+<p><dt><a name="Db.DB_LOCK_DEFAULT">Db.DB_LOCK_DEFAULT</a><dd>Use whatever lock policy was specified when the database environment
+was created. If no lock policy has yet been specified, set the lock
+policy to Db.DB_LOCK_RANDOM.
+<dt><a name="Db.DB_LOCK_EXPIRE">Db.DB_LOCK_EXPIRE</a><dd>Reject lock requests which have timed out. No other deadlock detection
+is performed.
+<dt><a name="Db.DB_LOCK_MAXLOCKS">Db.DB_LOCK_MAXLOCKS</a><dd>Reject the lock request for the locker ID with the greatest number of
+locks.
+<dt><a name="Db.DB_LOCK_MINLOCKS">Db.DB_LOCK_MINLOCKS</a><dd>Reject the lock request for the locker ID with the fewest number of
+locks.
+<dt><a name="Db.DB_LOCK_MINWRITE">Db.DB_LOCK_MINWRITE</a><dd>Reject the lock request for the locker ID with the fewest number of
+write locks.
+<dt><a name="Db.DB_LOCK_OLDEST">Db.DB_LOCK_OLDEST</a><dd>Reject the lock request for the oldest locker ID.
+<dt><a name="Db.DB_LOCK_RANDOM">Db.DB_LOCK_RANDOM</a><dd>Reject the lock request for a random locker ID.
+<dt><a name="Db.DB_LOCK_YOUNGEST">Db.DB_LOCK_YOUNGEST</a><dd>Reject the lock request for the youngest locker ID.
+</dl>
+<p>The DbEnv.set_lk_detect method configures a database environment, not only operations
+performed using the specified <a href="../api_java/env_class.html">DbEnv</a> handle.
+<p>The DbEnv.set_lk_detect interface may not be called after the <a href="../api_java/env_open.html">DbEnv.open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_java/env_open.html">DbEnv.open</a> is called, the information specified to DbEnv.set_lk_detect
+must be consistent with the existing environment or an error will be
+returned.
+<p>The DbEnv.set_lk_detect method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's deadlock detector configuration may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_detect", one or more whitespace characters,
+and the interface <b>detect</b> argument as a string; for example,
+"set_lk_detect DB_LOCK_OLDEST". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv.set_lk_detect method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<p>The DbEnv.set_lk_detect method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_lk_detect method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_lk_max_lockers.html b/libdb/docs/api_java/env_set_lk_max_lockers.html
new file mode 100644
index 0000000..747061f
--- /dev/null
+++ b/libdb/docs/api_java/env_set_lk_max_lockers.html
@@ -0,0 +1,71 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_lk_max_lockers</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_lk_max_lockers</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_lk_max_lockers(int max)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of simultaneous locking entities supported by
+the Berkeley DB lock subsystem. This value is used by <a href="../api_java/env_open.html">DbEnv.open</a> to
+estimate how much space to allocate for various lock-table data
+structures. The default value is 1000 lockers. For specific
+information on configuring the size of the lock subsystem, see
+<a href="../ref/lock/max.html">Configuring locking: sizing the
+system</a>.
+<p>The DbEnv.set_lk_max_lockers method configures a database environment, not only operations
+performed using the specified <a href="../api_java/env_class.html">DbEnv</a> handle.
+<p>The DbEnv.set_lk_max_lockers interface may not be called after the <a href="../api_java/env_open.html">DbEnv.open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_java/env_open.html">DbEnv.open</a> is called, the information specified to DbEnv.set_lk_max_lockers
+will be ignored.
+<p>The DbEnv.set_lk_max_lockers method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's maximum number of lockers may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_max_lockers", one or more whitespace characters,
+and the number of lockers. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv.set_lk_max_lockers method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<p>The DbEnv.set_lk_max_lockers method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_lk_max_lockers method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_lk_max_locks.html b/libdb/docs/api_java/env_set_lk_max_locks.html
new file mode 100644
index 0000000..41dd5a2
--- /dev/null
+++ b/libdb/docs/api_java/env_set_lk_max_locks.html
@@ -0,0 +1,70 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_lk_max_locks</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_lk_max_locks</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_lk_max_locks(int max)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of locks supported by the Berkeley DB lock subsystem.
+This value is used by <a href="../api_java/env_open.html">DbEnv.open</a> to estimate how much space to
+allocate for various lock-table data structures. The default value is
+1000 locks. For specific information on configuring the size of the lock
+subsystem, see <a href="../ref/lock/max.html">Configuring locking:
+sizing the system</a>.
+<p>The DbEnv.set_lk_max_locks method configures a database environment, not only operations
+performed using the specified <a href="../api_java/env_class.html">DbEnv</a> handle.
+<p>The DbEnv.set_lk_max_locks interface may not be called after the <a href="../api_java/env_open.html">DbEnv.open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_java/env_open.html">DbEnv.open</a> is called, the information specified to DbEnv.set_lk_max_locks
+will be ignored.
+<p>The DbEnv.set_lk_max_locks method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's maximum number of locks may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_max_locks", one or more whitespace characters,
+and the number of locks. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv.set_lk_max_locks method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<p>The DbEnv.set_lk_max_locks method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_lk_max_locks method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_lk_max_objects.html b/libdb/docs/api_java/env_set_lk_max_objects.html
new file mode 100644
index 0000000..3a3d816
--- /dev/null
+++ b/libdb/docs/api_java/env_set_lk_max_objects.html
@@ -0,0 +1,71 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_lk_max_objects</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_lk_max_objects</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_lk_max_objects(int max)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of simultaneously locked objects supported by
+the Berkeley DB lock subsystem. This value is used by <a href="../api_java/env_open.html">DbEnv.open</a> to
+estimate how much space to allocate for various lock-table data
+structures. The default value is 1000 objects. For specific
+information on configuring the size of the lock subsystem, see
+<a href="../ref/lock/max.html">Configuring locking: sizing the
+system</a>.
+<p>The DbEnv.set_lk_max_objects method configures a database environment, not only operations
+performed using the specified <a href="../api_java/env_class.html">DbEnv</a> handle.
+<p>The DbEnv.set_lk_max_objects interface may not be called after the <a href="../api_java/env_open.html">DbEnv.open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_java/env_open.html">DbEnv.open</a> is called, the information specified to DbEnv.set_lk_max_objects
+will be ignored.
+<p>The DbEnv.set_lk_max_objects method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's maximum number of objects may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lk_max_objects", one or more whitespace characters,
+and the number of objects. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv.set_lk_max_objects method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<p>The DbEnv.set_lk_max_objects method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_lk_max_objects method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_mp_mmapsize.html b/libdb/docs/api_java/env_set_mp_mmapsize.html
new file mode 100644
index 0000000..95db04d
--- /dev/null
+++ b/libdb/docs/api_java/env_set_mp_mmapsize.html
@@ -0,0 +1,72 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_mp_mmapsize</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_mp_mmapsize</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_mp_mmapsize(long mmapsize)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Files that are opened read-only in the pool (and that satisfy a few
+other criteria) are, by default, mapped into the process address space
+instead of being copied into the local cache. This can result in
+better-than-usual performance because available virtual memory is
+normally much larger than the local cache, and page faults are faster
+than page copying on many systems. However, it can cause resource
+starvation in the presence of limited virtual memory, and it can result
+in immense process sizes in the presence of large databases.
+<p>Set the maximum file size, in bytes, for a file to be mapped into the
+process address space. If no value is specified, it defaults to 10MB.
+<p>The DbEnv.set_mp_mmapsize method configures operations performed using the specified
+<a href="../api_java/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv.set_mp_mmapsize interface may be called at any time during the life of
+the application.
+<p>The DbEnv.set_mp_mmapsize method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's maximum mapped file size may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_mp_mmapsize", one or more whitespace characters,
+and the size in bytes. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv.set_mp_mmapsize method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<p>The DbEnv.set_mp_mmapsize method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_mp_mmapsize method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_java/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_rpc_server.html b/libdb/docs/api_java/env_set_rpc_server.html
new file mode 100644
index 0000000..ca07cf3
--- /dev/null
+++ b/libdb/docs/api_java/env_set_rpc_server.html
@@ -0,0 +1,82 @@
+<!--"@(#)env_set_rpc_server.so 10.1 (Sleepycat) 8/25/99"-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_rpc_server</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_rpc_server</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_rpc_server(DbClient client,
+ String host, long cl_timeout, long sv_timeout, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Establishes a connection for this <b>dbenv</b> to a RPC server. If
+the <b>client</b> argument is NULL, this call creates a connection to
+the Berkeley DB server on the indicated hostname and sets up a channel for
+communication.
+The <b>client</b> argument is reserved for future use.
+If it is not NULL, an exception is thrown.
+<a name="3"><!--meow--></a>
+<p>The <b>cl_timeout</b> argument specifies the number of seconds the client
+should wait for results to come back from the server. Once the timeout
+has expired on any communication with the server, Db.DB_NOSERVER will
+be returned. If this value is zero, a default timeout is used.
+<a name="4"><!--meow--></a>
+<p>The <b>sv_timeout</b> argument specifies the number of seconds the server
+should allow a client connection to remain idle before assuming that the
+client is gone. Once that timeout has been reached, the server releases
+all resources associated with that client connection. Subsequent attempts
+by that client to communicate with the server result in
+Db.DB_NOSERVER_ID, indicating that an invalid identifier has been
+given to the server. This value can be considered a hint to the server.
+The server may alter this value based on its own policies or allowed
+values. If this value is zero, a default timeout is used.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>When the DbEnv.set_rpc_server method has been called, subsequent calls
+to Berkeley DB library interfaces may return or throw exceptions encapsulating
+<a name="Db.DB_NOSERVER">Db.DB_NOSERVER</a>, <a name="Db.DB_NOSERVER_ID">Db.DB_NOSERVER_ID</a>, or
+<a name="Db.DB_NOSERVER_HOME">Db.DB_NOSERVER_HOME</a>.
+<p>The DbEnv.set_rpc_server method configures operations performed using the specified
+<a href="../api_java/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv.set_rpc_server interface may not be called after the <a href="../api_java/env_open.html">DbEnv.open</a>
+interface is called.
+<p>The DbEnv.set_rpc_server method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.set_rpc_server method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv.set_rpc_server method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_rpc_server method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_shm_key.html b/libdb/docs/api_java/env_set_shm_key.html
new file mode 100644
index 0000000..2945480
--- /dev/null
+++ b/libdb/docs/api_java/env_set_shm_key.html
@@ -0,0 +1,88 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_shm_key</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_shm_key</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_shm_key(long shm_key)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Specify a base segment ID for Berkeley DB environment shared memory regions
+created in system memory on VxWorks or systems supporting X/Open-style
+shared memory interfaces; for example, UNIX systems supporting
+<b>shmget</b>(2) and related System V IPC interfaces.
+<p>This base segment ID will be used when Berkeley DB shared memory regions are
+first created. It will be incremented a small integer value each time
+a new shared memory region is created; that is, if the base ID is 35,
+the first shared memory region created will have a segment ID of 35,
+and the next one will have a segment ID between 36 and 40 or so. A
+Berkeley DB environment always creates a master shared memory region; an
+additional shared memory region for each of the subsystems supported by
+the environment (Locking, Logging, Memory Pool and Transaction); plus
+an additional shared memory region for each additional memory pool cache
+that is supported. Already existing regions with the same segment IDs
+will be removed. See <a href="../ref/env/region.html">Shared Memory
+Regions</a> for more information.
+<p>The intent behind this interface is two-fold: without it, applications
+have no way to ensure that two Berkeley DB applications don't attempt to use
+the same segment IDs when creating different Berkeley DB environments. In
+addition, by using the same segment IDs each time the environment is
+created, previously created segments will be removed, and the set of
+segments on the system will not grow without bound.
+<p>The DbEnv.set_shm_key method configures operations performed using the specified
+<a href="../api_java/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv.set_shm_key interface may not be called after the <a href="../api_java/env_open.html">DbEnv.open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_java/env_open.html">DbEnv.open</a> is called, the information specified to DbEnv.set_shm_key
+must be consistent with the existing environment or corruption can
+occur.
+<p>The DbEnv.set_shm_key method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's base segment ID may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_shm_key", one or more whitespace characters,
+and the ID. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv.set_shm_key method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<p>The DbEnv.set_shm_key method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_shm_key method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_tas_spins.html b/libdb/docs/api_java/env_set_tas_spins.html
new file mode 100644
index 0000000..709cf88
--- /dev/null
+++ b/libdb/docs/api_java/env_set_tas_spins.html
@@ -0,0 +1,65 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_tas_spins</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_tas_spins</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+int
+DbEnv.set_tas_spins(u_int32_t tas_spins);
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Specify that test-and-set mutexes should spin <b>tas_spins</b> times
+without blocking. The value defaults to 1 on uniprocessor systems and
+to 50 times the number of processors on multiprocessor systems.
+<p>The DbEnv.set_tas_spins method configures operations performed using the specified
+<a href="../api_java/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv.set_tas_spins interface may be called at any time during the life of
+the application.
+<p>The DbEnv.set_tas_spins method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's test-and-set spin count may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_tas_spins", one or more whitespace characters,
+and the number of spins. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv.set_tas_spins method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv.set_tas_spins method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_tas_spins method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_timeout.html b/libdb/docs/api_java/env_set_timeout.html
new file mode 100644
index 0000000..a7f447e
--- /dev/null
+++ b/libdb/docs/api_java/env_set_timeout.html
@@ -0,0 +1,88 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_timeout</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_timeout</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_timeout(long timeout, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.set_timeout method sets timeout values for locks or
+transactions in the database environment. The timeout value is
+currently specified as an unsigned 32-bit number of microseconds,
+limiting the maximum timeout to roughly 71 minutes.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_SET_LOCK_TIMEOUT">Db.DB_SET_LOCK_TIMEOUT</a><dd>Set the timeout value for locks in this database environment.
+<p>The database environment's transaction timeout value may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_txn_timeout", one or more whitespace characters,
+and the transaction timeout value. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<p><dt><a name="Db.DB_SET_TXN_TIMEOUT">Db.DB_SET_TXN_TIMEOUT</a><dd>Set the timeout value for transactions in this database environment.
+<p>The database environment's lock timeout value may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lock_timeout", one or more whitespace characters,
+and the lock timeout value. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+</dl>
+<p>Timeouts are checked whenever a thread of control blocks on a lock or
+when deadlock detection is performed. (In the case of
+Db.DB_SET_LOCK_TIMEOUT, the lock is one requested explicitly
+through the Lock subsystem interfaces. In the case of
+Db.DB_SET_TXN_TIMEOUT, the lock is one requested on behalf of a
+transaction. In either case, it may be a lock requested by the database
+access methods underlying the application.) As timeouts are only
+checked when the lock request first blocks or when deadlock detection
+is performed, the accuracy of the timeout depends on how often deadlock
+detection is performed.
+<p>Timeout values specified for the database environment may be overridden
+on a per-lock or per-transaction basis. See <a href="../api_java/lock_vec.html">DbEnv.lock_vec</a> and
+<a href="../api_java/txn_set_timeout.html">DbTxn.set_timeout</a> for more information.
+<p>The DbEnv.set_timeout method configures a database environment, not only operations
+performed using the specified <a href="../api_java/env_class.html">DbEnv</a> handle.
+<p>The DbEnv.set_timeout interface may be called at any time during the life of
+the application.
+<p>The DbEnv.set_timeout method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.set_timeout method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv.set_timeout method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_timeout method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_tmp_dir.html b/libdb/docs/api_java/env_set_tmp_dir.html
new file mode 100644
index 0000000..2e71ad2
--- /dev/null
+++ b/libdb/docs/api_java/env_set_tmp_dir.html
@@ -0,0 +1,91 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_tmp_dir</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_tmp_dir</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_tmp_dir(String dir)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<a name="3"><!--meow--></a>
+<p>The path of a directory to be used as the location of temporary files.
+The files created to back in-memory access method databases will be
+created relative to this path. These temporary files can be quite large,
+depending on the size of the database.
+<p>If no directories are specified, the following alternatives are checked
+in the specified order. The first existing directory path is used for
+all temporary files.
+<p><ol>
+<p><li>The value of the environment variable <b>TMPDIR</b>.
+<li>The value of the environment variable <b>TEMP</b>.
+<li>The value of the environment variable <b>TMP</b>.
+<li>The value of the environment variable <b>TempFolder</b>.
+<li>The value returned by the GetTempPath interface.
+<li>The directory <b>/var/tmp</b>.
+<li>The directory <b>/usr/tmp</b>.
+<li>The directory <b>/temp</b>.
+<li>The directory <b>/tmp</b>.
+<li>The directory <b>C:/temp</b>.
+<li>The directory <b>C:/tmp</b>.
+</ol>
+<p>Note: environment variables are only checked if one of the
+<a href="../api_java/env_open.html#DB_USE_ENVIRON">Db.DB_USE_ENVIRON</a> or <a href="../api_java/env_open.html#DB_USE_ENVIRON_ROOT">Db.DB_USE_ENVIRON_ROOT</a> flags were
+specified.
+<p>Note: the GetTempPath interface is only checked on Win/32 platforms.
+<p>The DbEnv.set_tmp_dir method configures operations performed using the specified
+<a href="../api_java/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv.set_tmp_dir interface may not be called after the <a href="../api_java/env_open.html">DbEnv.open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_java/env_open.html">DbEnv.open</a> is called, the information specified to DbEnv.set_tmp_dir
+must be consistent with the existing environment or corruption can
+occur.
+<p>The DbEnv.set_tmp_dir method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's temporary file directory may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_tmp_dir", one or more whitespace characters,
+and the directory name. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv.set_tmp_dir method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<p>The DbEnv.set_tmp_dir method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_tmp_dir method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_tx_max.html b/libdb/docs/api_java/env_set_tx_max.html
new file mode 100644
index 0000000..010f523
--- /dev/null
+++ b/libdb/docs/api_java/env_set_tx_max.html
@@ -0,0 +1,71 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_tx_max</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_tx_max</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_tx_max(int tx_max)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Set the maximum number of active transactions that are supported by the
+environment. This value bounds the size of backing shared memory regions.
+Note that child transactions must be counted as active until their
+ultimate parent commits or aborts.
+<p>When there are more than the specified number of concurrent transactions,
+calls to <a href="../api_java/txn_begin.html">DbEnv.txn_begin</a> will fail (until some active transactions
+complete). If no value is specified, a default value of 20 is used.
+<p>The DbEnv.set_tx_max method configures a database environment, not only operations
+performed using the specified <a href="../api_java/env_class.html">DbEnv</a> handle.
+<p>The DbEnv.set_tx_max interface may not be called after the <a href="../api_java/env_open.html">DbEnv.open</a>
+interface is called.
+If the database environment already exists when
+<a href="../api_java/env_open.html">DbEnv.open</a> is called, the information specified to DbEnv.set_tx_max
+will be ignored.
+<p>The DbEnv.set_tx_max method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's maximum number of active transactions may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_tx_max", one or more whitespace characters,
+and the number of transactions. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv.set_tx_max method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<p>The DbEnv.set_tx_max method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_tx_max method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_tx_timestamp.html b/libdb/docs/api_java/env_set_tx_timestamp.html
new file mode 100644
index 0000000..81696e4
--- /dev/null
+++ b/libdb/docs/api_java/env_set_tx_timestamp.html
@@ -0,0 +1,64 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_tx_timestamp</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_tx_timestamp</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_tx_timestamp(java.util.Date timestamp)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>Recover to the time specified by <b>timestamp</b> rather than to the most
+current possible date.
+Note that only the seconds (not the milliseconds) of the <b>timestamp</b>
+are used
+<p>Once a database environment has been upgraded to a new version of Berkeley DB
+involving a log format change (see <a href="../ref/upgrade/process.html">Upgrading Berkeley DB installations</a>), it is no longer possible to recover
+to a specific time before that upgrade.
+<p>The DbEnv.set_tx_timestamp method configures operations performed using the specified
+<a href="../api_java/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv.set_tx_timestamp interface may not be called after the <a href="../api_java/env_open.html">DbEnv.open</a>
+interface is called.
+<p>The DbEnv.set_tx_timestamp method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.set_tx_timestamp method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>It is not possible to recover to the specified time using the log files
+currently present in the environment.
+</dl>
+<p>The DbEnv.set_tx_timestamp method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_tx_timestamp method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_set_verbose.html b/libdb/docs/api_java/env_set_verbose.html
new file mode 100644
index 0000000..aba57fd
--- /dev/null
+++ b/libdb/docs/api_java/env_set_verbose.html
@@ -0,0 +1,75 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_verbose</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_verbose</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_verbose(int which, boolean onoff);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.set_verbose method turns additional informational and
+debugging messages in the Berkeley DB message output on and off. If
+<b>onoff</b> is set to
+true,
+the additional messages are output.
+<p>The <b>which</b> parameter must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_VERB_CHKPOINT">Db.DB_VERB_CHKPOINT</a><dd>Display checkpoint location information when searching the log for
+checkpoints.
+<p><dt><a name="Db.DB_VERB_DEADLOCK">Db.DB_VERB_DEADLOCK</a><dd>Display additional information when doing deadlock detection.
+<p><dt><a name="Db.DB_VERB_RECOVERY">Db.DB_VERB_RECOVERY</a><dd>Display additional information when performing recovery.
+<p><dt><a name="Db.DB_VERB_REPLICATION">Db.DB_VERB_REPLICATION</a><dd>Display additional information when processing replication messages.
+<p><dt><a name="Db.DB_VERB_WAITSFOR">Db.DB_VERB_WAITSFOR</a><dd>Display the waits-for table when doing deadlock detection.
+</dl>
+<p>The DbEnv.set_verbose method configures operations performed using the specified
+<a href="../api_java/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv.set_verbose interface may be called at any time during the life of
+the application.
+<p>The DbEnv.set_verbose method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>The database environment's verbosity may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_verbose", one or more whitespace characters,
+and the interface <b>which</b> argument as a string; for example,
+"set_verbose DB_VERB_CHKPOINT". Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<h1>Errors</h1>
+<p>The DbEnv.set_verbose method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv.set_verbose method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_verbose method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_strerror.html b/libdb/docs/api_java/env_strerror.html
new file mode 100644
index 0000000..839c950
--- /dev/null
+++ b/libdb/docs/api_java/env_strerror.html
@@ -0,0 +1,47 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.strerror</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.strerror</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public static String strerror(int errcode);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.strerror method returns an error message string corresponding
+to the error number <b>error</b>. This interface is a superset of the
+ANSI C X3.159-1989 (ANSI C) <b>strerror</b>(3) interface. If the error number
+<b>error</b> is greater than or equal to 0, then the string returned by
+the system interface <b>strerror</b>(3) is returned. If the error
+number is less than 0, an error string appropriate to the corresponding
+Berkeley DB library error is returned. See
+<a href="../ref/program/errorret.html">Error returns to applications</a>
+for more information.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/env_version.html b/libdb/docs/api_java/env_version.html
new file mode 100644
index 0000000..3c80653
--- /dev/null
+++ b/libdb/docs/api_java/env_version.html
@@ -0,0 +1,47 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.get_version_major</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.get_version_major</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public static int get_version_major();
+public static int get_version_minor();
+public static int get_version_patch();
+public static String get_version_string();
+</pre></h3>
+<h1>Description</h1>
+<p>These methods return version information about the underlying Berkeley DB
+software. Berkeley DB is released with a major, minor, and patch number,
+which is returned by DbEnv.get_version_major,
+DbEnv.get_version_minor, and DbEnv.get_version_patch.
+A verbose version of this information, suitable for display, is returned
+by DbEnv.get_version_string.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_list.html">Database Environments and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/except_class.html b/libdb/docs/api_java/except_class.html
new file mode 100644
index 0000000..8aeb028
--- /dev/null
+++ b/libdb/docs/api_java/except_class.html
@@ -0,0 +1,49 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbException</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbException</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class DbException extends Exception {
+ public int get_errno();
+ public DbException(String s);
+ public DbException(String s, int errno);
+}
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the DbException class and how it is used by
+the various Berkeley DB classes.
+<p>Most methods in the Berkeley DB classes throw an exception when an error occurs.
+A DbException object contains an informational string and an errno. The
+errno can be obtained using DbException.get_errno. Since DbException
+inherits from java.lang.Exception, the string portion is available using
+toString().
+<p>Some methods may return non-zero values without issuing an exception.
+This occurs in situations that are not normally considered an error, but
+when some informational status is returned. For example, <a href="../api_java/db_get.html">Db.get</a>
+returns DB_NOTFOUND when a requested key does not appear in the database.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/java_pindex.html b/libdb/docs/api_java/java_pindex.html
new file mode 100644
index 0000000..1cb5313
--- /dev/null
+++ b/libdb/docs/api_java/java_pindex.html
@@ -0,0 +1,671 @@
+<html>
+<head>
+<title>Berkeley DB: Java Interface Index</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Java Interface Index</h1>
+<center>
+<table cellspacing=0 cellpadding=0>
+<tr><td align=right>configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#4">1.85</a> API compatibility</td></tr>
+<tr><td align=right>building a utility to dump Berkeley DB </td><td><a href="../ref/build_unix/conf.html#6">1.85</a> databases</td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.2.0/intro.html#2">2.0</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.3.0/intro.html#2">3.0</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.3.1/intro.html#2">3.1</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.3.2/intro.html#2">3.2</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.3.3/intro.html#2">3.3</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.4.0/intro.html#2">4.0</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.4.1/intro.html#2">4.1</a></td></tr>
+<tr><td align=right>selecting an </td><td><a href="../ref/am_conf/select.html#2">access</a> method</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/faq.html#2">access</a> method FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/tune.html#2">access</a> method tuning</td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/am_conf/intro.html#2">access</a> methods</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/aix.html#2">AIX</a></td></tr>
+<tr><td align=right>data </td><td><a href="../ref/am_misc/align.html#2">alignment</a></td></tr>
+<tr><td align=right>programmatic </td><td><a href="../ref/arch/apis.html#2">APIs</a></td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_archive.html#3">archive</a> log files</td></tr>
+<tr><td align=right>hot </td><td><a href="../ref/transapp/archival.html#4">backup</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/berkeley_db_svc.html#2">berkeley_db_svc</a></td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/mp/intro.html#4">buffer</a> pool subsystem</td></tr>
+<tr><td align=right>turn off system </td><td><a href="../api_java/env_set_flags.html#4">buffering</a> for database files</td></tr>
+<tr><td align=right>turn off system </td><td><a href="../api_java/env_set_flags.html#5">buffering</a> for log files</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/intro.html#3">building</a> for QNX</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/intro.html#2">building</a> for UNIX</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/notes.html#2">building</a> for UNIX FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_vxworks/intro.html#2">building</a> for VxWorks</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_vxworks/introae.html#2">building</a> for VxWorks AE</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_vxworks/faq.html#2">building</a> for VxWorks FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_win/intro.html#2">building</a> for Win32</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_win/faq.html#2">building</a> for Windows FAQ</td></tr>
+<tr><td align=right>selecting a </td><td><a href="../ref/am_conf/byteorder.html#2">byte</a> order</td></tr>
+<tr><td align=right>configuring the </td><td><a href="../ref/build_unix/conf.html#5">C++</a> API</td></tr>
+<tr><td align=right>flushing the database </td><td><a href="../ref/am/sync.html#2">cache</a></td></tr>
+<tr><td align=right>selecting a </td><td><a href="../ref/am_conf/cachesize.html#2">cache</a> size</td></tr>
+<tr><td align=right>introduction to the memory </td><td><a href="../ref/mp/intro.html#3">cache</a> subsystem</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/archival.html#3">catastrophic</a> recovery</td></tr>
+<tr><td align=right>Patches, Updates and </td><td><a href="http://www.sleepycat.com/update/index.html">Change</a> logs</td></tr>
+<tr><td align=right>utility to take </td><td><a href="../utility/db_checkpoint.html#3">checkpoints</a></td></tr>
+<tr><td align=right>database page </td><td><a href="../api_java/db_set_flags.html#3">checksum</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/curclose.html#2">closing</a> a cursor</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/close.html#2">closing</a> a database</td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am_misc/faq.html#3">compaction</a></td></tr>
+<tr><td align=right>specifying a Btree </td><td><a href="../ref/am_conf/bt_compare.html#2">comparison</a> function</td></tr>
+<tr><td align=right>changing </td><td><a href="../ref/build_unix/flags.html#2">compile</a> or load options</td></tr>
+<tr><td align=right></td><td><a href="../ref/cam/intro.html#2">Concurrent</a> Data Store</td></tr>
+<tr><td align=right>database environment </td><td><a href="../ref/env/db_config.html#3">configuration</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/conf.html#2">configuring</a> Berkeley DB for UNIX systems</td></tr>
+<tr><td align=right>salvaging </td><td><a href="../ref/am/verify.html#4">corrupted</a> databases</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/count.html#2">counting</a> data items for a key</td></tr>
+<tr><td align=right>closing a </td><td><a href="../ref/am/curclose.html#3">cursor</a></td></tr>
+<tr><td align=right>deleting records with a </td><td><a href="../ref/am/curdel.html#3">cursor</a></td></tr>
+<tr><td align=right>duplicating a </td><td><a href="../ref/am/curdup.html#3">cursor</a></td></tr>
+<tr><td align=right>retrieving records with a </td><td><a href="../ref/am/curget.html#3">cursor</a></td></tr>
+<tr><td align=right>storing records with a </td><td><a href="../ref/am/curput.html#3">cursor</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/stability.html#2">cursor</a> stability</td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am/cursor.html#2">cursors</a></td></tr>
+<tr><td align=right>Dbt </td><td><a href="../api_java/dbt_class.html#data">data</a></td></tr>
+<tr><td align=right>utility to upgrade </td><td><a href="../utility/db_upgrade.html#3">database</a> files</td></tr>
+<tr><td align=right>utility to verify </td><td><a href="../utility/db_verify.html#3">database</a> files</td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_class.html#2">Db</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/env/region.html#2">__db.001</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_archive.html#2">db_archive</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_associate.html#2">Db.associate</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/dbc_class.html#2">Dbc</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/dbc_close.html#2">Dbc.close</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/dbc_count.html#2">Dbc.count</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/dbc_del.html#2">Dbc.del</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/dbc_dup.html#2">Dbc.dup</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/dbc_get.html#2">Dbc.get</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_checkpoint.html#2">db_checkpoint</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_close.html#2">Db.close</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/env/db_config.html#2">DB_CONFIG</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/dbc_put.html#2">Dbc.put</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_cursor.html#2">Db.cursor</a></td></tr>
+<tr><td align=right>Dbc.put </td><td><a href="../api_java/dbc_put.html#Db.DB_AFTER">Db.DB_AFTER</a></td></tr>
+<tr><td align=right>Db.verify </td><td><a href="../api_java/db_verify.html#Db.DB_AGGRESSIVE">Db.DB_AGGRESSIVE</a></td></tr>
+<tr><td align=right>Db.put </td><td><a href="../api_java/db_put.html#Db.DB_APPEND">Db.DB_APPEND</a></td></tr>
+<tr><td align=right>DbEnv.log_archive </td><td><a href="../api_java/log_archive.html#Db.DB_ARCH_ABS">Db.DB_ARCH_ABS</a></td></tr>
+<tr><td align=right>DbEnv.log_archive </td><td><a href="../api_java/log_archive.html#Db.DB_ARCH_DATA">Db.DB_ARCH_DATA</a></td></tr>
+<tr><td align=right>DbEnv.log_archive </td><td><a href="../api_java/log_archive.html#Db.DB_ARCH_LOG">Db.DB_ARCH_LOG</a></td></tr>
+<tr><td align=right>Db.associate </td><td><a href="../api_java/db_associate.html#Db.DB_AUTO_COMMIT">Db.DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>Db.del </td><td><a href="../api_java/db_del.html#Db.DB_AUTO_COMMIT">Db.DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>Db.open </td><td><a href="../api_java/db_open.html#Db.DB_AUTO_COMMIT">Db.DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>Db.put </td><td><a href="../api_java/db_put.html#Db.DB_AUTO_COMMIT">Db.DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>Db.truncate </td><td><a href="../api_java/db_truncate.html#Db.DB_AUTO_COMMIT">Db.DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>DbEnv.dbremove </td><td><a href="../api_java/env_dbremove.html#Db.DB_AUTO_COMMIT">Db.DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>DbEnv.dbrename </td><td><a href="../api_java/env_dbrename.html#Db.DB_AUTO_COMMIT">Db.DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>DbEnv.set_flags </td><td><a href="../api_java/env_set_flags.html#Db.DB_AUTO_COMMIT">Db.DB_AUTO_COMMIT</a></td></tr>
+<tr><td align=right>Dbc.put </td><td><a href="../api_java/dbc_put.html#Db.DB_BEFORE">Db.DB_BEFORE</a></td></tr>
+<tr><td align=right>Db.open </td><td><a href="../api_java/db_open.html#Db.DB_BTREE">Db.DB_BTREE</a></td></tr>
+<tr><td align=right>DbEnv.set_flags </td><td><a href="../api_java/env_set_flags.html#Db.DB_CDB_ALLDB">Db.DB_CDB_ALLDB</a></td></tr>
+<tr><td align=right>Db.set_flags </td><td><a href="../api_java/db_set_flags.html#Db.DB_CHKSUM_SHA1">Db.DB_CHKSUM_SHA1</a></td></tr>
+<tr><td align=right>DbEnv </td><td><a href="../api_java/env_class.html#Db.DB_CLIENT">Db.DB_CLIENT</a></td></tr>
+<tr><td align=right>Db.get </td><td><a href="../api_java/db_get.html#Db.DB_CONSUME">Db.DB_CONSUME</a></td></tr>
+<tr><td align=right>Db.get </td><td><a href="../api_java/db_get.html#Db.DB_CONSUME_WAIT">Db.DB_CONSUME_WAIT</a></td></tr>
+<tr><td align=right>Db.associate </td><td><a href="../api_java/db_associate.html#Db.DB_CREATE">Db.DB_CREATE</a></td></tr>
+<tr><td align=right>Db.open </td><td><a href="../api_java/db_open.html#Db.DB_CREATE">Db.DB_CREATE</a></td></tr>
+<tr><td align=right>DbEnv.open </td><td><a href="../api_java/env_open.html#Db.DB_CREATE">Db.DB_CREATE</a></td></tr>
+<tr><td align=right>Dbc.get </td><td><a href="../api_java/dbc_get.html#Db.DB_CURRENT">Db.DB_CURRENT</a></td></tr>
+<tr><td align=right>Dbc.put </td><td><a href="../api_java/dbc_put.html#Db.DB_CURRENT">Db.DB_CURRENT</a></td></tr>
+<tr><td align=right>DbLogc.get </td><td><a href="../api_java/logc_get.html#Db.DB_CURRENT">Db.DB_CURRENT</a></td></tr>
+<tr><td align=right>Dbt </td><td><a href="../api_java/dbt_class.html#Db.DB_DBT_MALLOC">Db.DB_DBT_MALLOC</a></td></tr>
+<tr><td align=right>Dbt </td><td><a href="../api_java/dbt_class.html#Db.DB_DBT_PARTIAL">Db.DB_DBT_PARTIAL</a></td></tr>
+<tr><td align=right>Dbt </td><td><a href="../api_java/dbt_class.html#Db.DB_DBT_REALLOC">Db.DB_DBT_REALLOC</a></td></tr>
+<tr><td align=right>Dbt </td><td><a href="../api_java/dbt_class.html#Db.DB_DBT_USERMEM">Db.DB_DBT_USERMEM</a></td></tr>
+<tr><td align=right>DbEnv.set_flags </td><td><a href="../api_java/env_set_flags.html#Db.DB_DIRECT_DB">Db.DB_DIRECT_DB</a></td></tr>
+<tr><td align=right>DbEnv.set_flags </td><td><a href="../api_java/env_set_flags.html#Db.DB_DIRECT_LOG">Db.DB_DIRECT_LOG</a></td></tr>
+<tr><td align=right>Db.cursor </td><td><a href="../api_java/db_cursor.html#Db.DB_DIRTY_READ">Db.DB_DIRTY_READ</a></td></tr>
+<tr><td align=right>Db.get </td><td><a href="../api_java/db_get.html#Db.DB_DIRTY_READ">Db.DB_DIRTY_READ</a></td></tr>
+<tr><td align=right>Db.join </td><td><a href="../api_java/db_join.html#Db.DB_DIRTY_READ">Db.DB_DIRTY_READ</a></td></tr>
+<tr><td align=right>Db.open </td><td><a href="../api_java/db_open.html#Db.DB_DIRTY_READ">Db.DB_DIRTY_READ</a></td></tr>
+<tr><td align=right>Dbc.get </td><td><a href="../api_java/dbc_get.html#Db.DB_DIRTY_READ">Db.DB_DIRTY_READ</a></td></tr>
+<tr><td align=right>DbEnv.txn_begin </td><td><a href="../api_java/txn_begin.html#Db.DB_DIRTY_READ">Db.DB_DIRTY_READ</a></td></tr>
+<tr><td align=right>Db.set_flags </td><td><a href="../api_java/db_set_flags.html#Db.DB_DUP">Db.DB_DUP</a></td></tr>
+<tr><td align=right>Db.set_flags </td><td><a href="../api_java/db_set_flags.html#Db.DB_DUPSORT">Db.DB_DUPSORT</a></td></tr>
+<tr><td align=right>Db.upgrade </td><td><a href="../api_java/db_upgrade.html#Db.DB_DUPSORT">Db.DB_DUPSORT</a></td></tr>
+<tr><td align=right>Db.set_flags </td><td><a href="../api_java/db_set_flags.html#Db.DB_ENCRYPT">Db.DB_ENCRYPT</a></td></tr>
+<tr><td align=right>Db.set_encrypt </td><td><a href="../api_java/db_set_encrypt.html#Db.DB_ENCRYPT_AES">Db.DB_ENCRYPT_AES</a></td></tr>
+<tr><td align=right>DbEnv.set_encrypt </td><td><a href="../api_java/env_set_encrypt.html#Db.DB_ENCRYPT_AES">Db.DB_ENCRYPT_AES</a></td></tr>
+<tr><td align=right>Db.open </td><td><a href="../api_java/db_open.html#Db.DB_EXCL">Db.DB_EXCL</a></td></tr>
+<tr><td align=right>Db.stat </td><td><a href="../api_java/db_stat.html#Db.DB_FAST_STAT">Db.DB_FAST_STAT</a></td></tr>
+<tr><td align=right>Dbc.get </td><td><a href="../api_java/dbc_get.html#Db.DB_FIRST">Db.DB_FIRST</a></td></tr>
+<tr><td align=right>DbLogc.get </td><td><a href="../api_java/logc_get.html#Db.DB_FIRST">Db.DB_FIRST</a></td></tr>
+<tr><td align=right>DbEnv.txn_recover </td><td><a href="../api_java/txn_recover.html#Db.DB_FIRST">Db.DB_FIRST</a></td></tr>
+<tr><td align=right>DbEnv.log_put </td><td><a href="../api_java/log_put.html#Db.DB_FLUSH">Db.DB_FLUSH</a></td></tr>
+<tr><td align=right>DbEnv.remove </td><td><a href="../api_java/env_remove.html#Db.DB_FORCE">Db.DB_FORCE</a></td></tr>
+<tr><td align=right>DbEnv.txn_checkpoint </td><td><a href="../api_java/txn_checkpoint.html#Db.DB_FORCE">Db.DB_FORCE</a></td></tr>
+<tr><td align=right>Db.get </td><td><a href="../api_java/db_get.html#Db.DB_GET_BOTH">Db.DB_GET_BOTH</a></td></tr>
+<tr><td align=right>Dbc.get </td><td><a href="../api_java/dbc_get.html#Db.DB_GET_BOTH">Db.DB_GET_BOTH</a></td></tr>
+<tr><td align=right>Dbc.get </td><td><a href="../api_java/dbc_get.html#Db.DB_GET_BOTH_RANGE">Db.DB_GET_BOTH_RANGE</a></td></tr>
+<tr><td align=right>Dbc.get </td><td><a href="../api_java/dbc_get.html#Db.DB_GET_RECNO">Db.DB_GET_RECNO</a></td></tr>
+<tr><td align=right>Db.open </td><td><a href="../api_java/db_open.html#Db.DB_HASH">Db.DB_HASH</a></td></tr>
+<tr><td align=right>DbEnv.open </td><td><a href="../api_java/env_open.html#Db.DB_INIT_CDB">Db.DB_INIT_CDB</a></td></tr>
+<tr><td align=right>DbEnv.open </td><td><a href="../api_java/env_open.html#Db.DB_INIT_LOCK">Db.DB_INIT_LOCK</a></td></tr>
+<tr><td align=right>DbEnv.open </td><td><a href="../api_java/env_open.html#Db.DB_INIT_LOG">Db.DB_INIT_LOG</a></td></tr>
+<tr><td align=right>DbEnv.open </td><td><a href="../api_java/env_open.html#Db.DB_INIT_MPOOL">Db.DB_INIT_MPOOL</a></td></tr>
+<tr><td align=right>DbEnv.open </td><td><a href="../api_java/env_open.html#Db.DB_INIT_TXN">Db.DB_INIT_TXN</a></td></tr>
+<tr><td align=right>DbEnv.open </td><td><a href="../api_java/env_open.html#Db.DB_JOINENV">Db.DB_JOINENV</a></td></tr>
+<tr><td align=right>Db.join </td><td><a href="../api_java/db_join.html#Db.DB_JOIN_ITEM">Db.DB_JOIN_ITEM</a></td></tr>
+<tr><td align=right>Dbc.get </td><td><a href="../api_java/dbc_get.html#Db.DB_JOIN_ITEM">Db.DB_JOIN_ITEM</a></td></tr>
+<tr><td align=right>Db.join </td><td><a href="../api_java/db_join.html#Db.DB_JOIN_NOSORT">Db.DB_JOIN_NOSORT</a></td></tr>
+<tr><td align=right>Dbc.put </td><td><a href="../api_java/dbc_put.html#Db.DB_KEYFIRST">Db.DB_KEYFIRST</a></td></tr>
+<tr><td align=right>Dbc.put </td><td><a href="../api_java/dbc_put.html#Db.DB_KEYLAST">Db.DB_KEYLAST</a></td></tr>
+<tr><td align=right>Dbc.get </td><td><a href="../api_java/dbc_get.html#Db.DB_LAST">Db.DB_LAST</a></td></tr>
+<tr><td align=right>DbLogc.get </td><td><a href="../api_java/logc_get.html#Db.DB_LAST">Db.DB_LAST</a></td></tr>
+<tr><td align=right>DbEnv.set_lk_detect </td><td><a href="../api_java/env_set_lk_detect.html#Db.DB_LOCK_DEFAULT">Db.DB_LOCK_DEFAULT</a></td></tr>
+<tr><td align=right>DbEnv.lock_detect </td><td><a href="../api_java/lock_detect.html#Db.DB_LOCK_DEFAULT">Db.DB_LOCK_DEFAULT</a></td></tr>
+<tr><td align=right>DbEnv.open </td><td><a href="../api_java/env_open.html#Db.DB_LOCKDOWN">Db.DB_LOCKDOWN</a></td></tr>
+<tr><td align=right>DbEnv.set_lk_detect </td><td><a href="../api_java/env_set_lk_detect.html#Db.DB_LOCK_EXPIRE">Db.DB_LOCK_EXPIRE</a></td></tr>
+<tr><td align=right>DbEnv.lock_detect </td><td><a href="../api_java/lock_detect.html#Db.DB_LOCK_EXPIRE">Db.DB_LOCK_EXPIRE</a></td></tr>
+<tr><td align=right>DbEnv.lock_vec </td><td><a href="../api_java/lock_vec.html#Db.DB_LOCK_GET">Db.DB_LOCK_GET</a></td></tr>
+<tr><td align=right>DbEnv.lock_vec </td><td><a href="../api_java/lock_vec.html#Db.DB_LOCK_GET_TIMEOUT">Db.DB_LOCK_GET_TIMEOUT</a></td></tr>
+<tr><td align=right>DbEnv.lock_vec </td><td><a href="../api_java/lock_vec.html#Db.DB_LOCK_IREAD">Db.DB_LOCK_IREAD</a></td></tr>
+<tr><td align=right>DbEnv.lock_vec </td><td><a href="../api_java/lock_vec.html#Db.DB_LOCK_IWR">Db.DB_LOCK_IWR</a></td></tr>
+<tr><td align=right>DbEnv.lock_vec </td><td><a href="../api_java/lock_vec.html#Db.DB_LOCK_IWRITE">Db.DB_LOCK_IWRITE</a></td></tr>
+<tr><td align=right>DbEnv.set_lk_detect </td><td><a href="../api_java/env_set_lk_detect.html#Db.DB_LOCK_MAXLOCKS">Db.DB_LOCK_MAXLOCKS</a></td></tr>
+<tr><td align=right>DbEnv.lock_detect </td><td><a href="../api_java/lock_detect.html#Db.DB_LOCK_MAXLOCKS">Db.DB_LOCK_MAXLOCKS</a></td></tr>
+<tr><td align=right>DbEnv.set_lk_detect </td><td><a href="../api_java/env_set_lk_detect.html#Db.DB_LOCK_MINLOCKS">Db.DB_LOCK_MINLOCKS</a></td></tr>
+<tr><td align=right>DbEnv.lock_detect </td><td><a href="../api_java/lock_detect.html#Db.DB_LOCK_MINLOCKS">Db.DB_LOCK_MINLOCKS</a></td></tr>
+<tr><td align=right>DbEnv.set_lk_detect </td><td><a href="../api_java/env_set_lk_detect.html#Db.DB_LOCK_MINWRITE">Db.DB_LOCK_MINWRITE</a></td></tr>
+<tr><td align=right>DbEnv.lock_detect </td><td><a href="../api_java/lock_detect.html#Db.DB_LOCK_MINWRITE">Db.DB_LOCK_MINWRITE</a></td></tr>
+<tr><td align=right>DbEnv.lock_get </td><td><a href="../api_java/lock_get.html#Db.DB_LOCK_NOWAIT">Db.DB_LOCK_NOWAIT</a></td></tr>
+<tr><td align=right>DbEnv.lock_vec </td><td><a href="../api_java/lock_vec.html#Db.DB_LOCK_NOWAIT">Db.DB_LOCK_NOWAIT</a></td></tr>
+<tr><td align=right>DbEnv.set_lk_detect </td><td><a href="../api_java/env_set_lk_detect.html#Db.DB_LOCK_OLDEST">Db.DB_LOCK_OLDEST</a></td></tr>
+<tr><td align=right>DbEnv.lock_detect </td><td><a href="../api_java/lock_detect.html#Db.DB_LOCK_OLDEST">Db.DB_LOCK_OLDEST</a></td></tr>
+<tr><td align=right>DbEnv.lock_vec </td><td><a href="../api_java/lock_vec.html#Db.DB_LOCK_PUT">Db.DB_LOCK_PUT</a></td></tr>
+<tr><td align=right>DbEnv.lock_vec </td><td><a href="../api_java/lock_vec.html#Db.DB_LOCK_PUT_ALL">Db.DB_LOCK_PUT_ALL</a></td></tr>
+<tr><td align=right>DbEnv.lock_vec </td><td><a href="../api_java/lock_vec.html#Db.DB_LOCK_PUT_OBJ">Db.DB_LOCK_PUT_OBJ</a></td></tr>
+<tr><td align=right>DbEnv.set_lk_detect </td><td><a href="../api_java/env_set_lk_detect.html#Db.DB_LOCK_RANDOM">Db.DB_LOCK_RANDOM</a></td></tr>
+<tr><td align=right>DbEnv.lock_detect </td><td><a href="../api_java/lock_detect.html#Db.DB_LOCK_RANDOM">Db.DB_LOCK_RANDOM</a></td></tr>
+<tr><td align=right>DbEnv.lock_vec </td><td><a href="../api_java/lock_vec.html#Db.DB_LOCK_READ">Db.DB_LOCK_READ</a></td></tr>
+<tr><td align=right>DbEnv.lock_vec </td><td><a href="../api_java/lock_vec.html#Db.DB_LOCK_TIMEOUT">Db.DB_LOCK_TIMEOUT</a></td></tr>
+<tr><td align=right>DbEnv.lock_vec </td><td><a href="../api_java/lock_vec.html#Db.DB_LOCK_WRITE">Db.DB_LOCK_WRITE</a></td></tr>
+<tr><td align=right>DbEnv.set_lk_detect </td><td><a href="../api_java/env_set_lk_detect.html#Db.DB_LOCK_YOUNGEST">Db.DB_LOCK_YOUNGEST</a></td></tr>
+<tr><td align=right>DbEnv.lock_detect </td><td><a href="../api_java/lock_detect.html#Db.DB_LOCK_YOUNGEST">Db.DB_LOCK_YOUNGEST</a></td></tr>
+<tr><td align=right>Db.get </td><td><a href="../api_java/db_get.html#Db.DB_MULTIPLE">Db.DB_MULTIPLE</a></td></tr>
+<tr><td align=right>Dbc.get </td><td><a href="../api_java/dbc_get.html#Db.DB_MULTIPLE">Db.DB_MULTIPLE</a></td></tr>
+<tr><td align=right>Dbc.get </td><td><a href="../api_java/dbc_get.html#Db.DB_MULTIPLE_KEY">Db.DB_MULTIPLE_KEY</a></td></tr>
+<tr><td align=right>Dbc.get </td><td><a href="../api_java/dbc_get.html#Db.DB_NEXT">Db.DB_NEXT</a></td></tr>
+<tr><td align=right>DbLogc.get </td><td><a href="../api_java/logc_get.html#Db.DB_NEXT">Db.DB_NEXT</a></td></tr>
+<tr><td align=right>DbEnv.txn_recover </td><td><a href="../api_java/txn_recover.html#Db.DB_NEXT">Db.DB_NEXT</a></td></tr>
+<tr><td align=right>Dbc.get </td><td><a href="../api_java/dbc_get.html#Db.DB_NEXT_DUP">Db.DB_NEXT_DUP</a></td></tr>
+<tr><td align=right>Dbc.get </td><td><a href="../api_java/dbc_get.html#Db.DB_NEXT_NODUP">Db.DB_NEXT_NODUP</a></td></tr>
+<tr><td align=right>Db.put </td><td><a href="../api_java/db_put.html#Db.DB_NODUPDATA">Db.DB_NODUPDATA</a></td></tr>
+<tr><td align=right>Dbc.put </td><td><a href="../api_java/dbc_put.html#Db.DB_NODUPDATA">Db.DB_NODUPDATA</a></td></tr>
+<tr><td align=right>DbEnv.set_flags </td><td><a href="../api_java/env_set_flags.html#Db.DB_NOLOCKING">Db.DB_NOLOCKING</a></td></tr>
+<tr><td align=right>Db.open </td><td><a href="../api_java/db_open.html#Db.DB_NOMMAP">Db.DB_NOMMAP</a></td></tr>
+<tr><td align=right>DbEnv.set_flags </td><td><a href="../api_java/env_set_flags.html#Db.DB_NOMMAP">Db.DB_NOMMAP</a></td></tr>
+<tr><td align=right>Db.verify </td><td><a href="../api_java/db_verify.html#Db.DB_NOORDERCHK">Db.DB_NOORDERCHK</a></td></tr>
+<tr><td align=right>Db.put </td><td><a href="../api_java/db_put.html#Db.DB_NOOVERWRITE">Db.DB_NOOVERWRITE</a></td></tr>
+<tr><td align=right>DbEnv.set_flags </td><td><a href="../api_java/env_set_flags.html#Db.DB_NOPANIC">Db.DB_NOPANIC</a></td></tr>
+<tr><td align=right>DbEnv.set_rpc_server </td><td><a href="../api_java/env_set_rpc_server.html#Db.DB_NOSERVER">Db.DB_NOSERVER</a></td></tr>
+<tr><td align=right>DbEnv.set_rpc_server </td><td><a href="../api_java/env_set_rpc_server.html#Db.DB_NOSERVER_HOME">Db.DB_NOSERVER_HOME</a></td></tr>
+<tr><td align=right>DbEnv.set_rpc_server </td><td><a href="../api_java/env_set_rpc_server.html#Db.DB_NOSERVER_ID">Db.DB_NOSERVER_ID</a></td></tr>
+<tr><td align=right>Db.close </td><td><a href="../api_java/db_close.html#Db.DB_NOSYNC">Db.DB_NOSYNC</a></td></tr>
+<tr><td align=right>Db.open </td><td><a href="../api_java/db_open.html#Db.DB_OLD_VERSION">Db.DB_OLD_VERSION</a></td></tr>
+<tr><td align=right>Db.upgrade </td><td><a href="../api_java/db_upgrade.html#Db.DB_OLD_VERSION">Db.DB_OLD_VERSION</a></td></tr>
+<tr><td align=right>Db.verify </td><td><a href="../api_java/db_verify.html#Db.DB_ORDERCHKONLY">Db.DB_ORDERCHKONLY</a></td></tr>
+<tr><td align=right>DbEnv.set_flags </td><td><a href="../api_java/env_set_flags.html#Db.DB_OVERWRITE">Db.DB_OVERWRITE</a></td></tr>
+<tr><td align=right>DbEnv.set_flags </td><td><a href="../api_java/env_set_flags.html#Db.DB_PANIC_ENVIRONMENT">Db.DB_PANIC_ENVIRONMENT</a></td></tr>
+<tr><td align=right>Dbc.dup </td><td><a href="../api_java/dbc_dup.html#Db.DB_POSITION">Db.DB_POSITION</a></td></tr>
+<tr><td align=right>Dbc.get </td><td><a href="../api_java/dbc_get.html#Db.DB_PREV">Db.DB_PREV</a></td></tr>
+<tr><td align=right>DbLogc.get </td><td><a href="../api_java/logc_get.html#Db.DB_PREV">Db.DB_PREV</a></td></tr>
+<tr><td align=right>Dbc.get </td><td><a href="../api_java/dbc_get.html#Db.DB_PREV_NODUP">Db.DB_PREV_NODUP</a></td></tr>
+<tr><td align=right>Db.verify </td><td><a href="../api_java/db_verify.html#Db.DB_PRINTABLE">Db.DB_PRINTABLE</a></td></tr>
+<tr><td align=right>Db.set_cache_priority </td><td><a href="../api_java/db_set_cache_priority.html#Db.DB_PRIORITY_DEFAULT">Db.DB_PRIORITY_DEFAULT</a></td></tr>
+<tr><td align=right>Db.set_cache_priority </td><td><a href="../api_java/db_set_cache_priority.html#Db.DB_PRIORITY_HIGH">Db.DB_PRIORITY_HIGH</a></td></tr>
+<tr><td align=right>Db.set_cache_priority </td><td><a href="../api_java/db_set_cache_priority.html#Db.DB_PRIORITY_LOW">Db.DB_PRIORITY_LOW</a></td></tr>
+<tr><td align=right>Db.set_cache_priority </td><td><a href="../api_java/db_set_cache_priority.html#Db.DB_PRIORITY_VERY_HIGH">Db.DB_PRIORITY_VERY_HIGH</a></td></tr>
+<tr><td align=right>Db.set_cache_priority </td><td><a href="../api_java/db_set_cache_priority.html#Db.DB_PRIORITY_VERY_LOW">Db.DB_PRIORITY_VERY_LOW</a></td></tr>
+<tr><td align=right>DbEnv.open </td><td><a href="../api_java/env_open.html#Db.DB_PRIVATE">Db.DB_PRIVATE</a></td></tr>
+<tr><td align=right>Db.open </td><td><a href="../api_java/db_open.html#Db.DB_QUEUE">Db.DB_QUEUE</a></td></tr>
+<tr><td align=right>Db.open </td><td><a href="../api_java/db_open.html#Db.DB_RDONLY">Db.DB_RDONLY</a></td></tr>
+<tr><td align=right>Db.open </td><td><a href="../api_java/db_open.html#Db.DB_RECNO">Db.DB_RECNO</a></td></tr>
+<tr><td align=right>Db.set_flags </td><td><a href="../api_java/db_set_flags.html#Db.DB_RECNUM">Db.DB_RECNUM</a></td></tr>
+<tr><td align=right>DbEnv.open </td><td><a href="../api_java/env_open.html#Db.DB_RECOVER">Db.DB_RECOVER</a></td></tr>
+<tr><td align=right>DbEnv.set_feedback </td><td><a href="../api_java/env_set_feedback.html#Db.DB_RECOVER">Db.DB_RECOVER</a></td></tr>
+<tr><td align=right>DbEnv.open </td><td><a href="../api_java/env_open.html#Db.DB_RECOVER_FATAL">Db.DB_RECOVER_FATAL</a></td></tr>
+<tr><td align=right>DbEnv.set_flags </td><td><a href="../api_java/env_set_flags.html#Db.DB_REGION_INIT">Db.DB_REGION_INIT</a></td></tr>
+<tr><td align=right>Db.set_flags </td><td><a href="../api_java/db_set_flags.html#Db.DB_RENUMBER">Db.DB_RENUMBER</a></td></tr>
+<tr><td align=right>DbEnv.rep_start </td><td><a href="../api_java/rep_start.html#Db.DB_REP_CLIENT">Db.DB_REP_CLIENT</a></td></tr>
+<tr><td align=right>DbEnv.rep_start </td><td><a href="../api_java/rep_start.html#Db.DB_REP_LOGSONLY">Db.DB_REP_LOGSONLY</a></td></tr>
+<tr><td align=right>DbEnv.rep_start </td><td><a href="../api_java/rep_start.html#Db.DB_REP_MASTER">Db.DB_REP_MASTER</a></td></tr>
+<tr><td align=right>DbEnv.set_rep_transport </td><td><a href="../api_java/rep_transport.html#Db.DB_REP_PERMANENT">Db.DB_REP_PERMANENT</a></td></tr>
+<tr><td align=right>Db.set_flags </td><td><a href="../api_java/db_set_flags.html#Db.DB_REVSPLITOFF">Db.DB_REVSPLITOFF</a></td></tr>
+<tr><td align=right>Db.get </td><td><a href="../api_java/db_get.html#Db.DB_RMW">Db.DB_RMW</a></td></tr>
+<tr><td align=right>Db.join </td><td><a href="../api_java/db_join.html#Db.DB_RMW">Db.DB_RMW</a></td></tr>
+<tr><td align=right>Dbc.get </td><td><a href="../api_java/dbc_get.html#Db.DB_RMW">Db.DB_RMW</a></td></tr>
+<tr><td align=right>Db.verify </td><td><a href="../api_java/db_verify.html#Db.DB_SALVAGE">Db.DB_SALVAGE</a></td></tr>
+<tr><td align=right>Dbc.get </td><td><a href="../api_java/dbc_get.html#Db.DB_SET">Db.DB_SET</a></td></tr>
+<tr><td align=right>DbLogc.get </td><td><a href="../api_java/logc_get.html#Db.DB_SET">Db.DB_SET</a></td></tr>
+<tr><td align=right>DbEnv.set_timeout </td><td><a href="../api_java/env_set_timeout.html#Db.DB_SET_LOCK_TIMEOUT">Db.DB_SET_LOCK_TIMEOUT</a></td></tr>
+<tr><td align=right>DbTxn.set_timeout </td><td><a href="../api_java/txn_set_timeout.html#Db.DB_SET_LOCK_TIMEOUT">Db.DB_SET_LOCK_TIMEOUT</a></td></tr>
+<tr><td align=right>Dbc.get </td><td><a href="../api_java/dbc_get.html#Db.DB_SET_RANGE">Db.DB_SET_RANGE</a></td></tr>
+<tr><td align=right>Db.get </td><td><a href="../api_java/db_get.html#Db.DB_SET_RECNO">Db.DB_SET_RECNO</a></td></tr>
+<tr><td align=right>Dbc.get </td><td><a href="../api_java/dbc_get.html#Db.DB_SET_RECNO">Db.DB_SET_RECNO</a></td></tr>
+<tr><td align=right>DbEnv.set_timeout </td><td><a href="../api_java/env_set_timeout.html#Db.DB_SET_TXN_TIMEOUT">Db.DB_SET_TXN_TIMEOUT</a></td></tr>
+<tr><td align=right>DbTxn.set_timeout </td><td><a href="../api_java/txn_set_timeout.html#Db.DB_SET_TXN_TIMEOUT">Db.DB_SET_TXN_TIMEOUT</a></td></tr>
+<tr><td align=right>Db.set_flags </td><td><a href="../api_java/db_set_flags.html#Db.DB_SNAPSHOT">Db.DB_SNAPSHOT</a></td></tr>
+<tr><td align=right>DbEnv.lock_stat </td><td><a href="../api_java/lock_stat.html#Db.DB_STAT_CLEAR">Db.DB_STAT_CLEAR</a></td></tr>
+<tr><td align=right>DbEnv.log_stat </td><td><a href="../api_java/log_stat.html#Db.DB_STAT_CLEAR">Db.DB_STAT_CLEAR</a></td></tr>
+<tr><td align=right>DbEnv.memp_stat </td><td><a href="../api_java/memp_stat.html#Db.DB_STAT_CLEAR">Db.DB_STAT_CLEAR</a></td></tr>
+<tr><td align=right>DbEnv.rep_stat </td><td><a href="../api_java/rep_stat.html#Db.DB_STAT_CLEAR">Db.DB_STAT_CLEAR</a></td></tr>
+<tr><td align=right>DbEnv.txn_stat </td><td><a href="../api_java/txn_stat.html#Db.DB_STAT_CLEAR">Db.DB_STAT_CLEAR</a></td></tr>
+<tr><td align=right>DbEnv.open </td><td><a href="../api_java/env_open.html#Db.DB_SYSTEM_MEM">Db.DB_SYSTEM_MEM</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_associate.html#3">DB_DBT_APPMALLOC</a></td></tr>
+<tr><td align=right>Db.open </td><td><a href="../api_java/db_open.html#Db.DB_THREAD">Db.DB_THREAD</a></td></tr>
+<tr><td align=right>DbEnv.open </td><td><a href="../api_java/env_open.html#Db.DB_THREAD">Db.DB_THREAD</a></td></tr>
+<tr><td align=right>Db.open </td><td><a href="../api_java/db_open.html#Db.DB_TRUNCATE">Db.DB_TRUNCATE</a></td></tr>
+<tr><td align=right>DbEnv.set_app_dispatch </td><td><a href="../api_java/env_set_app_dispatch.html#Db.DB_TXN_ABORT">Db.DB_TXN_ABORT</a></td></tr>
+<tr><td align=right>DbEnv.set_app_dispatch </td><td><a href="../api_java/env_set_app_dispatch.html#Db.DB_TXN_APPLY">Db.DB_TXN_APPLY</a></td></tr>
+<tr><td align=right>DbEnv.set_app_dispatch </td><td><a href="../api_java/env_set_app_dispatch.html#Db.DB_TXN_BACKWARD_ROLL">Db.DB_TXN_BACKWARD_ROLL</a></td></tr>
+<tr><td align=right>DbEnv.set_app_dispatch </td><td><a href="../api_java/env_set_app_dispatch.html#Db.DB_TXN_FORWARD_ROLL">Db.DB_TXN_FORWARD_ROLL</a></td></tr>
+<tr><td align=right>DbEnv.set_flags </td><td><a href="../api_java/env_set_flags.html#Db.DB_TXN_NOSYNC">Db.DB_TXN_NOSYNC</a></td></tr>
+<tr><td align=right>DbEnv.txn_begin </td><td><a href="../api_java/txn_begin.html#Db.DB_TXN_NOSYNC">Db.DB_TXN_NOSYNC</a></td></tr>
+<tr><td align=right>DbTxn.commit </td><td><a href="../api_java/txn_commit.html#Db.DB_TXN_NOSYNC">Db.DB_TXN_NOSYNC</a></td></tr>
+<tr><td align=right>DbEnv.txn_begin </td><td><a href="../api_java/txn_begin.html#Db.DB_TXN_NOWAIT">Db.DB_TXN_NOWAIT</a></td></tr>
+<tr><td align=right>DbEnv.set_app_dispatch </td><td><a href="../api_java/env_set_app_dispatch.html#Db.DB_TXN_PRINT">Db.DB_TXN_PRINT</a></td></tr>
+<tr><td align=right>DbEnv.txn_begin </td><td><a href="../api_java/txn_begin.html#Db.DB_TXN_SYNC">Db.DB_TXN_SYNC</a></td></tr>
+<tr><td align=right>DbTxn.commit </td><td><a href="../api_java/txn_commit.html#Db.DB_TXN_SYNC">Db.DB_TXN_SYNC</a></td></tr>
+<tr><td align=right>DbEnv.set_flags </td><td><a href="../api_java/env_set_flags.html#Db.DB_TXN_WRITE_NOSYNC">Db.DB_TXN_WRITE_NOSYNC</a></td></tr>
+<tr><td align=right>Db.open </td><td><a href="../api_java/db_open.html#Db.DB_UNKNOWN">Db.DB_UNKNOWN</a></td></tr>
+<tr><td align=right>Db.set_feedback </td><td><a href="../api_java/db_set_feedback.html#Db.DB_UPGRADE">Db.DB_UPGRADE</a></td></tr>
+<tr><td align=right>DbEnv.open </td><td><a href="../api_java/env_open.html#Db.DB_USE_ENVIRON">Db.DB_USE_ENVIRON</a></td></tr>
+<tr><td align=right>DbEnv.remove </td><td><a href="../api_java/env_remove.html#Db.DB_USE_ENVIRON">Db.DB_USE_ENVIRON</a></td></tr>
+<tr><td align=right>DbEnv.open </td><td><a href="../api_java/env_open.html#Db.DB_USE_ENVIRON_ROOT">Db.DB_USE_ENVIRON_ROOT</a></td></tr>
+<tr><td align=right>DbEnv.remove </td><td><a href="../api_java/env_remove.html#Db.DB_USE_ENVIRON_ROOT">Db.DB_USE_ENVIRON_ROOT</a></td></tr>
+<tr><td align=right>DbEnv.set_verbose </td><td><a href="../api_java/env_set_verbose.html#Db.DB_VERB_CHKPOINT">Db.DB_VERB_CHKPOINT</a></td></tr>
+<tr><td align=right>DbEnv.set_verbose </td><td><a href="../api_java/env_set_verbose.html#Db.DB_VERB_DEADLOCK">Db.DB_VERB_DEADLOCK</a></td></tr>
+<tr><td align=right>DbEnv.set_verbose </td><td><a href="../api_java/env_set_verbose.html#Db.DB_VERB_RECOVERY">Db.DB_VERB_RECOVERY</a></td></tr>
+<tr><td align=right>DbEnv.set_verbose </td><td><a href="../api_java/env_set_verbose.html#Db.DB_VERB_REPLICATION">Db.DB_VERB_REPLICATION</a></td></tr>
+<tr><td align=right>DbEnv.set_verbose </td><td><a href="../api_java/env_set_verbose.html#Db.DB_VERB_WAITSFOR">Db.DB_VERB_WAITSFOR</a></td></tr>
+<tr><td align=right>Db.set_feedback </td><td><a href="../api_java/db_set_feedback.html#Db.DB_VERIFY">Db.DB_VERIFY</a></td></tr>
+<tr><td align=right>Db.cursor </td><td><a href="../api_java/db_cursor.html#Db.DB_WRITECURSOR">Db.DB_WRITECURSOR</a></td></tr>
+<tr><td align=right>Db </td><td><a href="../api_java/db_class.html#Db.DB_XA_CREATE">Db.DB_XA_CREATE</a></td></tr>
+<tr><td align=right>DbEnv.set_flags </td><td><a href="../api_java/env_set_flags.html#Db.DB_YIELDCPU">Db.DB_YIELDCPU</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_deadlock.html#2">db_deadlock</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/deadlock_class.html#2">DbDeadlockException</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_del.html#2">Db.del</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_associate.html#4">DB_DONOTINDEX</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_dump.html#2">db_dump</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/rep_transport.html#3">DB_EID_BROADCAST</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_class.html#2">DbEnv</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_close.html#2">DbEnv.close</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_dbremove.html#2">DbEnv.dbremove</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_dbrename.html#2">DbEnv.dbrename</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_err.html#2">DbEnv.err</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_version.html#2">DbEnv.get_version_major</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/lock_detect.html#2">DbEnv.lock_detect</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/lock_get.html#2">DbEnv.lock_get</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/lock_id.html#2">DbEnv.lock_id</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/lock_id_free.html#2">DbEnv.lock_id_free</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/lock_put.html#2">DbEnv.lock_put</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/lock_stat.html#2">DbEnv.lock_stat</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/lock_vec.html#2">DbEnv.lock_vec</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/log_archive.html#2">DbEnv.log_archive</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/log_compare.html#2">DbEnv.log_compare</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/log_cursor.html#2">DbEnv.log_cursor</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/log_file.html#2">DbEnv.log_file</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/log_flush.html#2">DbEnv.log_flush</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/log_put.html#2">DbEnv.log_put</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/log_stat.html#2">DbEnv.log_stat</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/memp_register.html#2">DbEnv.memp_register</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/memp_stat.html#2">DbEnv.memp_stat</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/memp_sync.html#2">DbEnv.memp_sync</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/memp_trickle.html#2">DbEnv.memp_trickle</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_open.html#2">DbEnv.open</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_remove.html#2">DbEnv.remove</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/rep_elect.html#2">DbEnv.rep_elect</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/rep_message.html#2">DbEnv.rep_process_message</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/rep_start.html#2">DbEnv.rep_start</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/rep_stat.html#2">DbEnv.rep_stat</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_app_dispatch.html#2">DbEnv.set_app_dispatch</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_cachesize.html#2">DbEnv.set_cachesize</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_data_dir.html#2">DbEnv.set_data_dir</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_encrypt.html#2">DbEnv.set_encrypt</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_errcall.html#2">DbEnv.set_errcall</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_error_stream.html#2">DbEnv.set_error_stream</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_errpfx.html#2">DbEnv.set_errpfx</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_feedback.html#2">DbEnv.set_feedback</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_flags.html#2">DbEnv.set_flags</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_lg_bsize.html#2">DbEnv.set_lg_bsize</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_lg_dir.html#2">DbEnv.set_lg_dir</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_lg_max.html#2">DbEnv.set_lg_max</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_lg_regionmax.html#2">DbEnv.set_lg_regionmax</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_lk_conflicts.html#2">DbEnv.set_lk_conflicts</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_lk_detect.html#2">DbEnv.set_lk_detect</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_lk_max_lockers.html#2">DbEnv.set_lk_max_lockers</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_lk_max_locks.html#2">DbEnv.set_lk_max_locks</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_lk_max_objects.html#2">DbEnv.set_lk_max_objects</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_mp_mmapsize.html#2">DbEnv.set_mp_mmapsize</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/rep_limit.html#2">DbEnv.set_rep_limit</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/rep_transport.html#2">DbEnv.set_rep_transport</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_rpc_server.html#2">DbEnv.set_rpc_server</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_shm_key.html#2">DbEnv.set_shm_key</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_tas_spins.html#2">DbEnv.set_tas_spins</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_timeout.html#2">DbEnv.set_timeout</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_tmp_dir.html#2">DbEnv.set_tmp_dir</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_tx_max.html#2">DbEnv.set_tx_max</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_tx_timestamp.html#2">DbEnv.set_tx_timestamp</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_verbose.html#2">DbEnv.set_verbose</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_strerror.html#2">DbEnv.strerror</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/txn_begin.html#2">DbEnv.txn_begin</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/txn_checkpoint.html#2">DbEnv.txn_checkpoint</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/txn_recover.html#2">DbEnv.txn_recover</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/txn_stat.html#2">DbEnv.txn_stat</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_err.html#2">Db.err</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/except_class.html#2">DbException</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_fd.html#2">Db.fd</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_get.html#2">Db.get</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_get_byteswapped.html#2">Db.get_byteswapped</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_get_type.html#2">Db.get_type</a></td></tr>
+<tr><td align=right>File naming </td><td><a href="../ref/env/naming.html#DB_HOME">DB_HOME</a></td></tr>
+<tr><td align=right>File naming </td><td><a href="../ref/env/naming.html#db_home">db_home</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_join.html#2">Db.join</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_KEYEXIST">DB_KEYEXIST</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_key_range.html#2">Db.key_range</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_load.html#2">db_load</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/lock_class.html#2">DbLock</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/program/errorret.html#4">DB_LOCK_DEADLOCK</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_LOCK_NOTGRANTED">DB_LOCK_NOTGRANTED</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/lockng_class.html#2">DbLockNotGrantedException</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/logc_class.html#2">DbLogc</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/logc_close.html#2">DbLogc.close</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/logc_get.html#2">DbLogc.get</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/lsn_class.html#2">DbLsn</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/memp_class.html#2">DbMemoryException</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/mempfile_class.html#2">DbMpoolFile</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/memp_fclose.html#2">DbMpoolFile.close</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/memp_fopen.html#2">DbMpoolFile.open</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/memp_fsync.html#2">DbMpoolFile.sync</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/dbt_bulk_class.html#2">DbMultipleDataIterator</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_rpc_server.html#3">DB_NOSERVER</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_rpc_server.html#4">DB_NOSERVER_ID</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_open.html#2">Db.open</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_printlog.html#2">db_printlog</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_put.html#2">Db.put</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_recover.html#2">db_recover</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_remove.html#2">Db.remove</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_rename.html#2">Db.rename</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/rep_elect.html#3">DB_REP_UNAVAIL</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/runrec_class.html#2">DbRunRecoveryException</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_append_recno.html#2">Db.set_append_recno</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_bt_compare.html#2">Db.set_bt_compare</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_bt_minkey.html#2">Db.set_bt_minkey</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_bt_prefix.html#2">Db.set_bt_prefix</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_cache_priority.html#2">Db.set_cache_priority</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_cachesize.html#2">Db.set_cachesize</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_dup_compare.html#2">Db.set_dup_compare</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_encrypt.html#2">Db.set_encrypt</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_errcall.html#2">Db.set_errcall</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_error_stream.html#2">Db.set_error_stream</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_errpfx.html#2">Db.set_errpfx</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_feedback.html#2">Db.set_feedback</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_flags.html#2">Db.set_flags</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_h_ffactor.html#2">Db.set_h_ffactor</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_h_hash.html#2">Db.set_h_hash</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_h_nelem.html#2">Db.set_h_nelem</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_lorder.html#2">Db.set_lorder</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_pagesize.html#2">Db.set_pagesize</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_q_extentsize.html#2">Db.set_q_extentsize</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_re_delim.html#2">Db.set_re_delim</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_re_len.html#2">Db.set_re_len</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_re_pad.html#2">Db.set_re_pad</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_re_source.html#2">Db.set_re_source</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_stat.html#2">Db.stat</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_stat.html#2">db_stat</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_sync.html#2">Db.sync</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/dbt_class.html#2">Dbt</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_truncate.html#2">Db.truncate</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/txn_class.html#2">DbTxn</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/txn_abort.html#2">DbTxn.abort</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/txn_commit.html#2">DbTxn.commit</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/txn_discard.html#2">DbTxn.discard</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/txn_id.html#2">DbTxn.id</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/txn_prepare.html#2">DbTxn.prepare</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/txn_set_timeout.html#2">DbTxn.set_timeout</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_upgrade.html#2">Db.upgrade</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_upgrade.html#2">db_upgrade</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_verify.html#2">Db.verify</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_verify.html#2">db_verify</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_verify.html#3">DB_VERIFY_BAD</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/txn_prepare.html#3">DB_XIDDATASIZE</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/lock/dead.html#2">deadlocks</a></td></tr>
+<tr><td align=right>utility to detect </td><td><a href="../utility/db_deadlock.html#3">deadlocks</a></td></tr>
+<tr><td align=right>introduction to </td><td><a href="../ref/debug/intro.html#2">debugging</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/debug/common.html#2">debugging</a> applications</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/stability.html#4">degrees</a> of isolation</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/delete.html#2">deleting</a> records</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/curdel.html#2">deleting</a> records with a cursor</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/read.html#4">dirty</a> reads</td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--disable-largefile">--disable-largefile</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--disable-shared">--disable-shared</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--disable-static">--disable-static</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/diskspace.html#2">disk</a> space requirements</td></tr>
+<tr><td align=right></td><td><a href="../ref/xa/intro.html#2">Distributed</a> Transactions</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/faq.html#5">double</a> buffering</td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_dump.html#3">dump</a> databases as text files</td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_flags.html#5">duplicate</a> data items</td></tr>
+<tr><td align=right>sorted </td><td><a href="../api_java/db_set_flags.html#6">duplicate</a> data items</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_conf/dup.html#2">duplicate</a> data items</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/curdup.html#2">duplicating</a> a cursor</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/embedix.html#2">Embedix</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/truncate.html#3">emptying</a> a database</td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-compat185">--enable-compat185</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-cxx">--enable-cxx</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-debug">--enable-debug</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-debug_rop">--enable-debug_rop</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-debug_wop">--enable-debug_wop</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-diagnostic">--enable-diagnostic</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-dump185">--enable-dump185</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-java">--enable-java</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-posixmutexes">--enable-posixmutexes</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-rpc">--enable-rpc</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-tcl">--enable-tcl</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-test">--enable-test</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-uimutexes">--enable-uimutexes</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-umrw">--enable-umrw</a></td></tr>
+<tr><td align=right>database </td><td><a href="../api_java/db_set_flags.html#4">encryption</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/env/encrypt.html#2">encryption</a></td></tr>
+<tr><td align=right>turn off access to a database </td><td><a href="../api_java/env_set_flags.html#9">environment</a></td></tr>
+<tr><td align=right>database </td><td><a href="../ref/env/create.html#2">environment</a></td></tr>
+<tr><td align=right>database </td><td><a href="../ref/env/faq.html#2">environment</a> FAQ</td></tr>
+<tr><td align=right>fault database </td><td><a href="../api_java/env_set_flags.html#10">environment</a> in during open</td></tr>
+<tr><td align=right></td><td><a href="../ref/program/environ.html#2">environment</a> variables</td></tr>
+<tr><td align=right>use </td><td><a href="../api_java/env_open.html#3">environment</a> variables in naming</td></tr>
+<tr><td align=right>use </td><td><a href="../api_java/env_remove.html#3">environment</a> variables in naming</td></tr>
+<tr><td align=right>introduction to database </td><td><a href="../ref/env/intro.html#2">environments</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/join.html#2">equality</a> join</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/error.html#2">error</a> handling</td></tr>
+<tr><td align=right></td><td><a href="../ref/program/errorret.html#3">error</a> name space</td></tr>
+<tr><td align=right></td><td><a href="../ref/program/errorret.html#2">error</a> returns</td></tr>
+<tr><td align=right></td><td><a href="../ref/install/file.html#2">/etc/magic</a></td></tr>
+<tr><td align=right>selecting a Queue </td><td><a href="../ref/am_conf/extentsize.html#2">extent</a> size</td></tr>
+<tr><td align=right>hot </td><td><a href="../ref/transapp/hotfail.html#2">failover</a></td></tr>
+<tr><td align=right>Java </td><td><a href="../ref/java/faq.html#2">FAQ</a></td></tr>
+<tr><td align=right>Tcl </td><td><a href="../ref/tcl/faq.html#2">FAQ</a></td></tr>
+<tr><td align=right>XA </td><td><a href="../ref/xa/faq.html#2">FAQ</a></td></tr>
+<tr><td align=right>configuring without large </td><td><a href="../ref/build_unix/conf.html#8">file</a> support</td></tr>
+<tr><td align=right></td><td><a href="../ref/install/file.html#3">file</a> utility</td></tr>
+<tr><td align=right>returning pages to the </td><td><a href="../ref/am_misc/faq.html#4">filesystem</a></td></tr>
+<tr><td align=right>recovery and </td><td><a href="../ref/transapp/filesys.html#2">filesystem</a> operations</td></tr>
+<tr><td align=right>remote </td><td><a href="../ref/env/remote.html#2">filesystems</a></td></tr>
+<tr><td align=right>page </td><td><a href="../ref/am_conf/h_ffactor.html#2">fill</a> factor</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/freebsd.html#2">FreeBSD</a></td></tr>
+<tr><td align=right>Berkeley DB </td><td><a href="../ref/program/scope.html#3">free-threaded</a> handles</td></tr>
+<tr><td align=right>specifying a database </td><td><a href="../ref/am_conf/h_hash.html#2">hash</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am_conf/h_nelem.html#2">hash</a> table size</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/hpux.html#2">HP-UX</a></td></tr>
+<tr><td align=right>secondary </td><td><a href="../ref/am/second.html#3">indices</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/install.html#2">installing</a> Berkeley DB for UNIX systems</td></tr>
+<tr><td align=right></td><td><a href="../ref/program/compatible.html#2">interface</a> compatibility</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/irix.html#2">IRIX</a></td></tr>
+<tr><td align=right>degrees of </td><td><a href="../ref/am_misc/stability.html#5">isolation</a></td></tr>
+<tr><td align=right>configuring the </td><td><a href="../ref/build_unix/conf.html#7">Java</a> API</td></tr>
+<tr><td align=right></td><td><a href="../ref/java/compat.html#2">Java</a> compatibility</td></tr>
+<tr><td align=right></td><td><a href="../ref/java/conf.html#2">Java</a> configuration</td></tr>
+<tr><td align=right></td><td><a href="../ref/java/faq.html#3">Java</a> FAQ</td></tr>
+<tr><td align=right>equality </td><td><a href="../ref/am/join.html#3">join</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/dbt_class.html#3">key/data</a> pairs</td></tr>
+<tr><td align=right>retrieved </td><td><a href="../ref/am_misc/perm.html#3">key/data</a> permanence</td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am_misc/dbsizes.html#2">limits</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/linux.html#2">Linux</a></td></tr>
+<tr><td align=right>changing compile or </td><td><a href="../ref/build_unix/flags.html#3">load</a> options</td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_load.html#3">load</a> text files into databases</td></tr>
+<tr><td align=right>DbEnv.lock_vec </td><td><a href="../api_java/lock_vec.html#lock">lock</a></td></tr>
+<tr><td align=right>standard </td><td><a href="../ref/lock/stdmode.html#2">lock</a> modes</td></tr>
+<tr><td align=right>ignore </td><td><a href="../api_java/env_set_flags.html#6">locking</a></td></tr>
+<tr><td align=right>page-level </td><td><a href="../ref/lock/page.html#2">locking</a></td></tr>
+<tr><td align=right>two-phase </td><td><a href="../ref/lock/twopl.html#2">locking</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/lock/nondb.html#2">locking</a> and non-Berkeley DB applications</td></tr>
+<tr><td align=right></td><td><a href="../ref/lock/config.html#2">locking</a> configuration</td></tr>
+<tr><td align=right>Berkeley DB Transactional Data Store </td><td><a href="../ref/lock/am_conv.html#2">locking</a> conventions</td></tr>
+<tr><td align=right>Berkeley DB Concurrent Data Store </td><td><a href="../ref/lock/cam_conv.html#2">locking</a> conventions</td></tr>
+<tr><td align=right>configure </td><td><a href="../api_java/env_set_flags.html#3">locking</a> for Berkeley DB Concurrent Data Store</td></tr>
+<tr><td align=right></td><td><a href="../ref/lock/page.html#3">locking</a> granularity</td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/lock/intro.html#2">locking</a> subsystem</td></tr>
+<tr><td align=right>sizing the </td><td><a href="../ref/lock/max.html#2">locking</a> subsystem</td></tr>
+<tr><td align=right></td><td><a href="../ref/lock/notxn.html#2">locking</a> without transactions</td></tr>
+<tr><td align=right></td><td><a href="../ref/log/limits.html#2">log</a> file limits</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/logfile.html#2">log</a> file removal</td></tr>
+<tr><td align=right>utility to display </td><td><a href="../utility/db_printlog.html#3">log</a> files as text</td></tr>
+<tr><td align=right></td><td><a href="../ref/log/config.html#2">logging</a> configuration</td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/log/intro.html#2">logging</a> subsystem</td></tr>
+<tr><td align=right>retrieving Btree records by </td><td><a href="../ref/am_conf/bt_recnum.html#3">logical</a> record @number</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/macosx.html#2">Mac</a> OS X</td></tr>
+<tr><td align=right>turn off database file </td><td><a href="../api_java/env_set_flags.html#7">memory</a> mapping</td></tr>
+<tr><td align=right></td><td><a href="../ref/mp/config.html#2">memory</a> pool configuration</td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/mp/intro.html#2">memory</a> pool subsystem</td></tr>
+<tr><td align=right>DbEnv.lock_vec </td><td><a href="../api_java/lock_vec.html#mode">mode</a></td></tr>
+<tr><td align=right>Berkeley DB library </td><td><a href="../ref/program/namespace.html#2">name</a> spaces</td></tr>
+<tr><td align=right>file </td><td><a href="../ref/env/naming.html#2">naming</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/join.html#4">natural</a> join</td></tr>
+<tr><td align=right>retrieving Btree records by logical record </td><td><a href="../ref/am_conf/bt_recnum.html#2">number</a></td></tr>
+<tr><td align=right>DbEnv.lock_vec </td><td><a href="../api_java/lock_vec.html#obj">obj</a></td></tr>
+<tr><td align=right>DbEnv.lock_vec </td><td><a href="../api_java/lock_vec.html#op">op</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/open.html#2">opening</a> a database</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/osf1.html#2">OSF/1</a></td></tr>
+<tr><td align=right>selecting a </td><td><a href="../ref/am_conf/pagesize.html#2">page</a> size</td></tr>
+<tr><td align=right>ignore database environment </td><td><a href="../api_java/env_set_flags.html#8">panic</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/partial.html#2">partial</a> record storage and retrieval</td></tr>
+<tr><td align=right></td><td><a href="http://www.sleepycat.com/update/index.html">Patches,</a> Updates and Change logs</td></tr>
+<tr><td align=right></td><td><a href="../ref/perl/intro.html#2">Perl</a></td></tr>
+<tr><td align=right>retrieved key/data </td><td><a href="../ref/am_misc/perm.html#2">permanence</a></td></tr>
+<tr><td align=right>task/thread </td><td><a href="../ref/program/faq.html#2">priority</a></td></tr>
+<tr><td align=right>Sleepycat Software's Berkeley DB </td><td><a href="../ref/intro/products.html#2">products</a></td></tr>
+<tr><td align=right>building for </td><td><a href="../ref/build_unix/intro.html#5">QNX</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/qnx.html#2">QNX</a></td></tr>
+<tr><td align=right>dirty </td><td><a href="../ref/transapp/read.html#3">reads</a></td></tr>
+<tr><td align=right>accessing Btree records by </td><td><a href="../api_java/db_set_flags.html#7">record</a> number</td></tr>
+<tr><td align=right>logical </td><td><a href="../ref/am_conf/logrec.html#2">record</a> numbers</td></tr>
+<tr><td align=right>managing </td><td><a href="../ref/am_conf/recno.html#2">record-based</a> databases</td></tr>
+<tr><td align=right>logically renumbering </td><td><a href="../ref/am_conf/renumber.html#2">records</a></td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_recover.html#3">recover</a> database environments</td></tr>
+<tr><td align=right>Berkeley DB </td><td><a href="../ref/transapp/reclimit.html#2">recoverability</a></td></tr>
+<tr><td align=right></td><td><a href="../api_java/db_set_flags.html#10">renumbering</a> records in Recno databases</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/read.html#2">repeatable</a> read</td></tr>
+<tr><td align=right>introduction to </td><td><a href="../ref/rep/intro.html#2">replication</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/xa/intro.html#3">Resource</a> Manager</td></tr>
+<tr><td align=right>XA </td><td><a href="../ref/xa/xa_intro.html#3">Resource</a> Manager</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/get.html#2">retrieving</a> records</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/get_bulk.html#2">retrieving</a> records in bulk</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/curget.html#2">retrieving</a> records with a cursor</td></tr>
+<tr><td align=right>turn off </td><td><a href="../api_java/db_set_flags.html#8">reverse</a> splits in Btree databases</td></tr>
+<tr><td align=right></td><td><a href="../ref/rpc/client.html#2">RPC</a> client</td></tr>
+<tr><td align=right>configuring a </td><td><a href="../ref/build_unix/conf.html#9">RPC</a> client/server</td></tr>
+<tr><td align=right>introduction to </td><td><a href="../ref/rpc/intro.html#2">rpc</a> client/server</td></tr>
+<tr><td align=right>utility to support </td><td><a href="../utility/berkeley_db_svc.html#3">RPC</a> client/server</td></tr>
+<tr><td align=right></td><td><a href="../ref/rpc/faq.html#2">RPC</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/rpc/server.html#2">RPC</a> server</td></tr>
+<tr><td align=right></td><td><a href="../ref/install/rpm.html#2">RPM</a></td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am/verify.html#3">salvage</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/sco.html#2">SCO</a></td></tr>
+<tr><td align=right>Berkeley DB handle </td><td><a href="../ref/program/scope.html#2">scope</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/second.html#2">secondary</a> indices</td></tr>
+<tr><td align=right></td><td><a href="../ref/env/security.html#2">security</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/sendmail/intro.html#2">Sendmail</a></td></tr>
+<tr><td align=right>disabling </td><td><a href="../ref/build_unix/conf.html#10">shared</a> libraries</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/shlib.html#2">shared</a> libraries</td></tr>
+<tr><td align=right></td><td><a href="../ref/program/appsignals.html#2">signal</a> handling</td></tr>
+<tr><td align=right></td><td><a href="http://www.sleepycat.com/">Sleepycat</a> Software</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/solaris.html#2">Solaris</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/distrib/layout.html#2">source</a> code layout</td></tr>
+<tr><td align=right>turn off reverse </td><td><a href="../api_java/db_set_flags.html#9">splits</a> in Btree databases</td></tr>
+<tr><td align=right>cursor </td><td><a href="../ref/am_misc/stability.html#3">stability</a></td></tr>
+<tr><td align=right>disabling </td><td><a href="../ref/build_unix/conf.html#11">static</a> libraries</td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am/stat.html#2">statistics</a></td></tr>
+<tr><td align=right>utility to display database and environment </td><td><a href="../utility/db_stat.html#3">statistics</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/put.html#2">storing</a> records</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/curput.html#2">storing</a> records with a cursor</td></tr>
+<tr><td align=right>configure for </td><td><a href="../api_java/env_set_flags.html#13">stress</a> testing</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/sunos.html#2">SunOS</a></td></tr>
+<tr><td align=right>loading Berkeley DB with </td><td><a href="../ref/tcl/intro.html#2">Tcl</a></td></tr>
+<tr><td align=right>using Berkeley DB with </td><td><a href="../ref/tcl/using.html#2">Tcl</a></td></tr>
+<tr><td align=right>configuring the </td><td><a href="../ref/build_unix/conf.html#12">Tcl</a> API</td></tr>
+<tr><td align=right></td><td><a href="../ref/tcl/program.html#2">Tcl</a> API programming notes</td></tr>
+<tr><td align=right></td><td><a href="../ref/tcl/faq.html#3">Tcl</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../api_java/env_set_tmp_dir.html#3">temporary</a> files</td></tr>
+<tr><td align=right>configuring the </td><td><a href="../ref/build_unix/conf.html#13">test</a> suite</td></tr>
+<tr><td align=right>running the </td><td><a href="../ref/test/run.html#2">test</a> suite</td></tr>
+<tr><td align=right>running the </td><td><a href="../ref/build_unix/test.html#2">test</a> suite under UNIX</td></tr>
+<tr><td align=right>running the </td><td><a href="../ref/build_win/test.html#2">test</a> suite under Windows</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_conf/re_source.html#2">text</a> backing files</td></tr>
+<tr><td align=right>pre-loading </td><td><a href="../api_java/db_set_flags.html#11">text</a> files into Recno databases</td></tr>
+<tr><td align=right>loading </td><td><a href="../ref/dumpload/text.html#2">text</a> into databases</td></tr>
+<tr><td align=right>dumping/loading </td><td><a href="../ref/dumpload/utility.html#2">text</a> to/from databases</td></tr>
+<tr><td align=right>building </td><td><a href="../ref/program/mt.html#2">threaded</a> applications</td></tr>
+<tr><td align=right>lock </td><td><a href="../ref/lock/timeout.html#2">timeouts</a></td></tr>
+<tr><td align=right>transaction </td><td><a href="../ref/lock/timeout.html#3">timeouts</a></td></tr>
+<tr><td align=right>turn off synchronous </td><td><a href="../api_java/env_set_flags.html#11">transaction</a> commit</td></tr>
+<tr><td align=right>turn off synchronous </td><td><a href="../api_java/env_set_flags.html#12">transaction</a> commit</td></tr>
+<tr><td align=right></td><td><a href="../ref/txn/config.html#2">transaction</a> configuration</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/faq.html#2">transaction</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/txn/limits.html#2">transaction</a> limits</td></tr>
+<tr><td align=right></td><td><a href="../ref/xa/build.html#2">Transaction</a> Manager</td></tr>
+<tr><td align=right>administering </td><td><a href="../ref/transapp/admin.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right>archival in </td><td><a href="../ref/transapp/archival.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right>checkpoints in </td><td><a href="../ref/transapp/checkpoint.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right>deadlock detection in </td><td><a href="../ref/transapp/deadlock.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right>recovery in </td><td><a href="../ref/transapp/recovery.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/txn/intro.html#2">transaction</a> subsystem</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/throughput.html#2">transaction</a> throughput</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/tune.html#2">transaction</a> tuning</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/intro.html#2">Transactional</a> Data Store</td></tr>
+<tr><td align=right>nested </td><td><a href="../ref/transapp/nested.html#2">transactions</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/truncate.html#2">truncating</a> a database</td></tr>
+<tr><td align=right>access method </td><td><a href="../ref/am_misc/tune.html#3">tuning</a></td></tr>
+<tr><td align=right>transaction </td><td><a href="../ref/transapp/tune.html#3">tuning</a></td></tr>
+<tr><td align=right>simple </td><td><a href="../ref/simple_tut/intro.html#2">tutorial</a></td></tr>
+<tr><td align=right>configuring Berkeley DB with the </td><td><a href="../ref/xa/xa_config.html#2">Tuxedo</a> System</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/ultrix.html#2">Ultrix</a></td></tr>
+<tr><td align=right>building for </td><td><a href="../ref/build_unix/intro.html#4">UNIX</a></td></tr>
+<tr><td align=right>building for </td><td><a href="../ref/build_unix/notes.html#3">UNIX</a> FAQ</td></tr>
+<tr><td align=right>configuring Berkeley DB for </td><td><a href="../ref/build_unix/conf.html#3">UNIX</a> systems</td></tr>
+<tr><td align=right>Patches, </td><td><a href="http://www.sleepycat.com/update/index.html">Updates</a> and Change logs</td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_upgrade.html#4">upgrade</a> database files</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/upgrade.html#2">upgrading</a> databases</td></tr>
+<tr><td align=right></td><td><a href="../ref/arch/utilities.html#2">utilities</a></td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am/verify.html#2">verification</a></td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_verify.html#4">verify</a> database files</td></tr>
+<tr><td align=right>building for </td><td><a href="../ref/build_vxworks/faq.html#3">VxWorks</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_vxworks/notes.html#2">VxWorks</a> notes</td></tr>
+<tr><td align=right>running the test suite under </td><td><a href="../ref/build_win/test.html#3">Windows</a></td></tr>
+<tr><td align=right>building for </td><td><a href="../ref/build_win/faq.html#3">Windows</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_win/notes.html#2">Windows</a> notes</td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--with-embedix=DIR">--with-embedix=DIR</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--with-mutex=MUTEX">--with-mutex=MUTEX</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--with-rpm=DIR">--with-rpm=DIR</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--with-tcl=DIR">--with-tcl=DIR</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--with-uniquename=NAME">--with-uniquename=NAME</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/xa/faq.html#3">XA</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/xa/xa_intro.html#2">XA</a> Resource Manager</td></tr>
+</table>
+</center>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/lock_class.html b/libdb/docs/api_java/lock_class.html
new file mode 100644
index 0000000..db3f203
--- /dev/null
+++ b/libdb/docs/api_java/lock_class.html
@@ -0,0 +1,41 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbLock</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbLock</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class DbLock extends Object { ... }
+</pre></h3>
+<h1>Description</h1>
+<p>The locking interfaces for the Berkeley DB database environment are methods
+of the <a href="../api_java/env_class.html">DbEnv</a> handle. The DbLock object is the handle
+for a single lock, and has no methods of its own.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, DbLock
+<h1>See Also</h1>
+<a href="../api_java/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/lock_detect.html b/libdb/docs/api_java/lock_detect.html
new file mode 100644
index 0000000..1e305e3
--- /dev/null
+++ b/libdb/docs/api_java/lock_detect.html
@@ -0,0 +1,70 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.lock_detect</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.lock_detect</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int lock_detect(int flags, int atype)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.lock_detect method runs one iteration of the deadlock detector.
+The deadlock detector traverses the lock table and marks one of the
+participating lock requesters for rejection in each deadlock it finds.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The <b>atype</b> parameter specifies which lock request(s) to reject.
+It must be set to one following list:
+<p><dl compact>
+<p><dt><a name="Db.DB_LOCK_DEFAULT">Db.DB_LOCK_DEFAULT</a><dd>Use whatever lock policy was specified when the database environment
+was created. If no lock policy has yet been specified, set the lock
+policy to Db.DB_LOCK_RANDOM.
+<dt><a name="Db.DB_LOCK_EXPIRE">Db.DB_LOCK_EXPIRE</a><dd>Reject lock requests which have timed out. No other deadlock detection
+is performed.
+<dt><a name="Db.DB_LOCK_MAXLOCKS">Db.DB_LOCK_MAXLOCKS</a><dd>Reject the lock request for the locker ID with the greatest number of
+locks.
+<dt><a name="Db.DB_LOCK_MINLOCKS">Db.DB_LOCK_MINLOCKS</a><dd>Reject the lock request for the locker ID with the fewest number of
+locks.
+<dt><a name="Db.DB_LOCK_MINWRITE">Db.DB_LOCK_MINWRITE</a><dd>Reject the lock request for the locker ID with the fewest number of
+write locks.
+<dt><a name="Db.DB_LOCK_OLDEST">Db.DB_LOCK_OLDEST</a><dd>Reject the lock request for the oldest locker ID.
+<dt><a name="Db.DB_LOCK_RANDOM">Db.DB_LOCK_RANDOM</a><dd>Reject the lock request for a random locker ID.
+<dt><a name="Db.DB_LOCK_YOUNGEST">Db.DB_LOCK_YOUNGEST</a><dd>Reject the lock request for the youngest locker ID.
+</dl>
+<p>The DbEnv.lock_detect method returns the number of lock requests that were
+rejected.
+<p>The DbEnv.lock_detect method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.lock_detect method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.lock_detect method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/lock_get.html b/libdb/docs/api_java/lock_get.html
new file mode 100644
index 0000000..0f7281a
--- /dev/null
+++ b/libdb/docs/api_java/lock_get.html
@@ -0,0 +1,89 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.lock_get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.lock_get</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public DbLock lock_get(int locker, int flags, Dbt obj, int lock_mode)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.lock_get method acquires a lock from the lock table, returning
+information about it in
+a DbLock object.
+<p>The <b>locker</b> argument specified to DbEnv.lock_get is an unsigned
+32-bit integer quantity. It represents the entity requesting or releasing
+the lock.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_LOCK_NOWAIT">Db.DB_LOCK_NOWAIT</a><dd>If a lock cannot be granted because the requested lock conflicts with an
+existing lock,
+throw a <a href="../api_java/lockng_class.html">DbLockNotGrantedException</a> immediately instead of waiting
+for the lock to become available.
+</dl>
+<p>The <b>obj</b> argument is an untyped byte string that specifies the
+object to be locked or released. Applications using the locking
+subsystem directly while also doing locking via the Berkeley DB access methods
+must take care not to inadvertently lock objects that happen to be equal
+to the unique file IDs used to lock files. See
+<a href="../ref/lock/am_conv.html">Access method locking conventions</a>
+for more information.
+<p>The <b>mode</b> argument is used as an index into the environment's
+lock conflict matrix. When using the default lock conflict matrix,
+<b>mode</b> must be set to one of the following values:
+<p><dl compact>
+<dt>Db.DB_LOCK_READ<dd>read (shared)
+<dt>Db.DB_LOCK_WRITE<dd>write (exclusive)
+<dt>Db.DB_LOCK_IWRITE<dd>intention to write (shared)
+<dt>Db.DB_LOCK_IREAD<dd>intention to read (shared)
+<dt>Db.DB_LOCK_IWR<dd>intention to read and write (shared)
+</dl>
+<p>See <a href="../api_java/env_set_lk_conflicts.html">DbEnv.set_lk_conflicts</a> and <a href="../ref/lock/stdmode.html">Standard Lock Modes</a> for more information on the lock conflict matrix.
+<p>
+Otherwise, the DbEnv.lock_get method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.lock_get method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>The maximum number of locks has been reached.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+DbEnv.lock_get method will fail and
+throw a <a href="../api_java/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The DbEnv.lock_get method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.lock_get method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/lock_id.html b/libdb/docs/api_java/lock_id.html
new file mode 100644
index 0000000..a358488
--- /dev/null
+++ b/libdb/docs/api_java/lock_id.html
@@ -0,0 +1,51 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.lock_id</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.lock_id</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int lock_id()
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.lock_id method
+returns a locker ID, which is guaranteed to be unique in the specified lock
+table.
+<p>The <a href="../api_java/lock_id_free.html">DbEnv.lock_id_free</a> method should be called to return the locker ID to
+the Berkeley DB library when it is no longer needed.
+<p>The DbEnv.lock_id method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.lock_id method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.lock_id method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/lock_id_free.html b/libdb/docs/api_java/lock_id_free.html
new file mode 100644
index 0000000..6f64df2
--- /dev/null
+++ b/libdb/docs/api_java/lock_id_free.html
@@ -0,0 +1,52 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.lock_id_free</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.lock_id_free</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void lock_id_free(int id)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.lock_id_free method frees a locker ID allocated by the
+<a href="../api_java/lock_id.html">DbEnv.lock_id</a> method.
+<p>The DbEnv.lock_id_free method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The locker ID is invalid or locks are still held by this locker ID.
+</dl>
+<p>The DbEnv.lock_id_free method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.lock_id_free method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/lock_list.html b/libdb/docs/api_java/lock_list.html
new file mode 100644
index 0000000..60d96c7
--- /dev/null
+++ b/libdb/docs/api_java/lock_list.html
@@ -0,0 +1,32 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Locking Subsystem and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Locking Subsystem and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Locking Subsystem and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_java/env_set_lk_conflicts.html">DbEnv.set_lk_conflicts</a></td><td>Set lock conflicts matrix</td></tr>
+<tr><td><a href="../api_java/env_set_lk_detect.html">DbEnv.set_lk_detect</a></td><td>Set automatic deadlock detection</td></tr>
+<tr><td><a href="../api_java/env_set_lk_max_lockers.html">DbEnv.set_lk_max_lockers</a></td><td>Set maximum number of lockers</td></tr>
+<tr><td><a href="../api_java/env_set_lk_max_locks.html">DbEnv.set_lk_max_locks</a></td><td>Set maximum number of locks</td></tr>
+<tr><td><a href="../api_java/env_set_lk_max_objects.html">DbEnv.set_lk_max_objects</a></td><td>Set maximum number of lock objects</td></tr>
+<tr><td><a href="../api_java/env_set_timeout.html">DbEnv.set_timeout</a></td><td>Set lock and transaction timeout</td></tr>
+<tr><td><a href="../api_java/lock_detect.html">DbEnv.lock_detect</a></td><td>Perform deadlock detection</td></tr>
+<tr><td><a href="../api_java/lock_get.html">DbEnv.lock_get</a></td><td>Acquire a lock</td></tr>
+<tr><td><a href="../api_java/lock_id.html">DbEnv.lock_id</a></td><td>Acquire a locker ID</td></tr>
+<tr><td><a href="../api_java/lock_id_free.html">DbEnv.lock_id_free</a></td><td>Release a locker ID</td></tr>
+<tr><td><a href="../api_java/lock_put.html">DbEnv.lock_put</a></td><td>Release a lock</td></tr>
+<tr><td><a href="../api_java/lock_stat.html">DbEnv.lock_stat</a></td><td>Return lock subsystem statistics</td></tr>
+<tr><td><a href="../api_java/lock_vec.html">DbEnv.lock_vec</a></td><td>Acquire/release locks</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/lock_put.html b/libdb/docs/api_java/lock_put.html
new file mode 100644
index 0000000..3c9148d
--- /dev/null
+++ b/libdb/docs/api_java/lock_put.html
@@ -0,0 +1,51 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.lock_put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.lock_put</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void lock_put(DbEnv lock)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.lock_put method releases <b>lock</b> from the lock table.
+<p>The DbEnv.lock_put method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.lock_put method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv.lock_put method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.lock_put method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/lock_stat.html b/libdb/docs/api_java/lock_stat.html
new file mode 100644
index 0000000..b1bba66
--- /dev/null
+++ b/libdb/docs/api_java/lock_stat.html
@@ -0,0 +1,86 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.lock_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.lock_stat</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public DbLockStat lock_stat(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.lock_stat method returns the locking subsystem statistics.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_STAT_CLEAR">Db.DB_STAT_CLEAR</a><dd>Reset statistics after returning their values.
+</dl>
+<p>The DbEnv.lock_stat method creates a DbLockStat object encapsulating the
+lock region statistics. The following data fields are available from
+the DbLockStat object:
+<p><dl compact>
+<dt>public int st_id;<dd>The last allocated locker ID.
+<dt>public int st_cur_maxid;<dd>The current maximum unused locker ID.
+<dt>public int st_nmodes;<dd>The number of lock modes.
+<dt>public int st_maxlocks;<dd>The maximum number of locks possible.
+<dt>public int st_maxlockers;<dd>The maximum number of lockers possible.
+<dt>public int st_maxobjects;<dd>The maximum number of lock objects possible.
+<dt>public int st_nlocks;<dd>The number of current locks.
+<dt>public int st_maxnlocks;<dd>The maximum number of locks at any one time.
+<dt>public int st_nlockers;<dd>The number of current lockers.
+<dt>public int st_maxnlockers;<dd>The maximum number of lockers at any one time.
+<dt>public int st_nobjects;<dd>The number of current lock objects.
+<dt>public int st_maxnobjects;<dd>The maximum number of lock objects at any one time.
+<dt>public int st_nrequests;<dd>The total number of locks requested.
+<dt>public int st_nreleases;<dd>The total number of locks released.
+<dt>public int st_nnowaits;<dd>The total number of lock requests failing because
+<a href="../api_java/lock_vec.html#DB_LOCK_NOWAIT">Db.DB_LOCK_NOWAIT</a> was set.
+<dt>public int st_nconflicts;<dd>The total number of locks not immediately available due to conflicts.
+<dt>public int st_ndeadlocks;<dd>The number of deadlocks.
+<dt>public int st_locktimeout;<dd>Lock timeout value.
+<dt>public int st_nlocktimeouts;<dd>The number of locks that have timed out.
+<dt>public int st_txntimeout;<dd>Transaction timeout value.
+<dt>public int st_ntxntimeouts;<dd>The number of transactions that have timed out. This value is also a
+component of <b>st_ndeadlocks</b>, the total number of deadlocks
+detected.
+<dt>public int st_regsize;<dd>The size of the lock region.
+<dt>public int st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining the region lock.
+<dt>public int st_region_nowait;<dd>The number of times that a thread of control was able to obtain
+the region lock without waiting.
+</dl>
+<p>The DbEnv.lock_stat method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.lock_stat method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.lock_stat method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/lock_vec.html b/libdb/docs/api_java/lock_vec.html
new file mode 100644
index 0000000..76691dd
--- /dev/null
+++ b/libdb/docs/api_java/lock_vec.html
@@ -0,0 +1,141 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.lock_vec</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.lock_vec</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void lock_vec(int locker, int flags, DbLockRequest[] list,
+ int offset, int count)
+ throws DbException
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.lock_vec method atomically obtains and releases one or more locks
+from the lock table. The DbEnv.lock_vec method is intended to support
+acquisition or trading of multiple locks under one lock table semaphore,
+as is needed for lock coupling or in multigranularity locking for lock
+escalation.
+<p>The <b>locker</b> argument specified to DbEnv.lock_vec is an unsigned
+32-bit integer quantity. It represents the entity requesting or releasing
+the locks.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_LOCK_NOWAIT">Db.DB_LOCK_NOWAIT</a><dd>If a lock cannot be granted because the requested lock conflicts with
+an existing lock,
+throw a <a href="../api_java/lockng_class.html">DbLockNotGrantedException</a> immediately instead of waiting
+for the lock to become available. In this case, the index of the request
+that was not granted can be found by calling
+DbLockNotGrantedException.get_index.
+</dl>
+<p>The <b>list</b> array provided to DbEnv.lock_vec is a set of
+DbLockRequest objects. Only <b>count</b> elements of <b>list</b>
+starting at <b>offset</b> are considered by DbEnv.lock_vec.
+A DbLockRequest object has at least the following fields.
+For each field there is a corresponding get_ and set_ method
+<p><dl compact>
+<p><dt>int <a name="op">op</a>;<dd>The operation to be performed, which must be set to one of the
+following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_LOCK_GET">Db.DB_LOCK_GET</a><dd>Get the lock defined by the values of the <b>mode</b> and <b>obj</b>
+structure fields, for the specified <b>locker</b>. Upon return from
+DbEnv.lock_vec, if the <b>lock</b> field is non-null, a reference
+to the acquired lock is stored there. (This reference is invalidated
+by any call to DbEnv.lock_vec or <a href="../api_java/lock_put.html">DbEnv.lock_put</a> that releases the
+lock.)
+<p><dt><a name="Db.DB_LOCK_GET_TIMEOUT">Db.DB_LOCK_GET_TIMEOUT</a><dd>Identical to Db.DB_LOCK_GET except that the value in the <b>timeout</b>
+structure field overrides any previously specified timeout value for
+this lock. A value of 0 turns off any previously specified timeout.
+<p><dt><a name="Db.DB_LOCK_PUT">Db.DB_LOCK_PUT</a><dd>The lock to which the <b>lock</b> structure field refers is released.
+The <b>locker</b> argument, and <b>mode</b> and <b>obj</b> fields
+are ignored.
+<p><dt><a name="Db.DB_LOCK_PUT_ALL">Db.DB_LOCK_PUT_ALL</a><dd>All locks held by the specified <b>locker</b> are released. The
+<b>lock</b>, <b>mode</b>, and <b>obj</b> structure fields are
+ignored. Locks acquired in operations performed by the current call to
+DbEnv.lock_vec which appear before the Db.DB_LOCK_PUT_ALL
+operation are released; those acquired in operations appearing after
+the Db.DB_LOCK_PUT_ALL operation are not released.
+<p><dt><a name="Db.DB_LOCK_PUT_OBJ">Db.DB_LOCK_PUT_OBJ</a><dd>All locks held on the object <b>obj</b> are released. The
+<b>locker</b> argument and the <b>lock</b> and <b>mode</b> structure
+fields are ignored. Locks acquired in operations performed by the
+current call to DbEnv.lock_vec that appear before the
+Db.DB_LOCK_PUT_OBJ operation are released; those acquired in
+operations appearing after the Db.DB_LOCK_PUT_OBJ operation are
+not released.
+<p><dt><a name="Db.DB_LOCK_TIMEOUT">Db.DB_LOCK_TIMEOUT</a><dd>Cause the specified <b>locker</b> to timeout immediately. If the
+database environment has not configured automatic deadlock detection,
+the transaction will timeout the next time deadlock detection is
+performed. As transactions acquire locks on behalf of a single locker
+ID, timing out the locker ID associated with a transaction will time
+out the transaction itself.
+</dl>
+<p><dt>DbLock <a name="lock">lock</a>;<dd>A lock reference.
+<p><dt>int <a name="mode">mode</a>;<dd>The lock mode, used as an index into the environment's lock conflict matrix.
+When using the default lock conflict matrix, <b>mode</b> must be set to one
+of the following values:
+<p><dl compact>
+<dt><a name="Db.DB_LOCK_READ">Db.DB_LOCK_READ</a><dd>read (shared)
+<dt><a name="Db.DB_LOCK_WRITE">Db.DB_LOCK_WRITE</a><dd>write (exclusive)
+<dt><a name="Db.DB_LOCK_IWRITE">Db.DB_LOCK_IWRITE</a><dd>intention to write (shared)
+<dt><a name="Db.DB_LOCK_IREAD">Db.DB_LOCK_IREAD</a><dd>intention to read (shared)
+<dt><a name="Db.DB_LOCK_IWR">Db.DB_LOCK_IWR</a><dd>intention to read and write (shared)
+</dl>
+<p>See <a href="../api_java/env_set_lk_conflicts.html">DbEnv.set_lk_conflicts</a> and <a href="../ref/lock/stdmode.html">Standard Lock Modes</a> for more information on the lock conflict matrix.
+<p><dt>Dbt <a name="obj">obj</a>;<dd>An untyped byte string that specifies the object to be locked or
+released. Applications using the locking subsystem directly while also
+doing locking via the Berkeley DB access methods must take care not to
+inadvertently lock objects that happen to be equal to the unique file
+IDs used to lock files. See <a href="../ref/lock/am_conv.html">Access
+method locking conventions</a> for more information.
+<p><dt>int timeout;<dd>The lock timeout value.
+</dl>
+<p>If any of the requested locks cannot be acquired, or any of the locks to
+be released cannot be released, the operations before the failing
+operation are guaranteed to have completed successfully, and
+DbEnv.lock_vec throws an exception
+<p>
+Otherwise, the DbEnv.lock_vec method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.lock_vec method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p><dl compact>
+<p><dt>ENOMEM<dd>The maximum number of locks has been reached.
+</dl>
+<p>If the operation was selected to resolve a deadlock, the
+DbEnv.lock_vec method will fail and
+throw a <a href="../api_java/deadlock_class.html">DbDeadlockException</a> exception.
+<p>The DbEnv.lock_vec method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.lock_vec method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/lock_list.html">Locking Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/lockng_class.html b/libdb/docs/api_java/lockng_class.html
new file mode 100644
index 0000000..d1f8a6b
--- /dev/null
+++ b/libdb/docs/api_java/lockng_class.html
@@ -0,0 +1,62 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbLockNotGrantedException</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbLockNotGrantedException</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class DbLockNotGrantedException extends DbException {
+ public int get_op();
+ public int get_mode();
+ public Dbt get_obj();
+ public DbLock get_lock();
+ public int get_index();
+}
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the DbLockNotGrantedException class and
+how it is used by the various Db* classes.
+<p>A DbLockNotGrantedException is thrown when a lock, requested
+using the <a href="../api_java/lock_get.html">DbEnv.lock_get</a> or <a href="../api_java/lock_vec.html">DbEnv.lock_vec</a> methods (where the
+<a href="../api_java/lock_vec.html#DB_LOCK_NOWAIT">Db.DB_LOCK_NOWAIT</a> option was specified), or by any <a href="../api_java/db_class.html">Db</a>
+operation performed in the context of a transaction begun using the
+<a href="../api_java/txn_begin.html#DB_TXN_NOWAIT">Db.DB_TXN_NOWAIT</a> option, is unable to be granted immediately.
+<p>The <b>get_op</b> method returns 0 when <a href="../api_java/lock_get.html">DbEnv.lock_get</a> was called,
+and returns the <b>op</b> for the failed DbLockRequest when
+<a href="../api_java/lock_vec.html">DbEnv.lock_vec</a> was called.
+<p>The <b>get_mode</b> method returns the <b>mode</b> argument when
+<a href="../api_java/lock_get.html">DbEnv.lock_get</a> was called, and returns the <b>mode</b> for the failed
+DbLockRequest when <a href="../api_java/lock_vec.html">DbEnv.lock_vec</a> was called.
+<p>The <b>get_obj</b> method returns the <b>obj</b> argument when
+<a href="../api_java/lock_get.html">DbEnv.lock_get</a> was called, and returns the <b>obj</b> for the failed
+DbLockRequest when <a href="../api_java/lock_vec.html">DbEnv.lock_vec</a> was called.
+<p>The <b>get_lock</b> method returns null when <a href="../api_java/lock_get.html">DbEnv.lock_get</a> was
+called, and returns the <b>lock</b> in the failed DbLockRequest
+when <a href="../api_java/lock_vec.html">DbEnv.lock_vec</a> was called.
+<p>The <b>get_index</b> method returns -1 when <a href="../api_java/lock_get.html">DbEnv.lock_get</a> was
+called, and returns the index of the failed DbLockRequest
+when <a href="../api_java/lock_vec.html">DbEnv.lock_vec</a> was called.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/log_archive.html b/libdb/docs/api_java/log_archive.html
new file mode 100644
index 0000000..635a2a8
--- /dev/null
+++ b/libdb/docs/api_java/log_archive.html
@@ -0,0 +1,92 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.log_archive</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.log_archive</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public String[] log_archive(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.log_archive method
+returns an array of log or database filenames.
+<p>By default, DbEnv.log_archive returns the names of all of the log
+files that are no longer in use (for example, that are no longer
+involved in active transactions), and that may safely be archived for
+catastrophic recovery and then removed from the system. If there are
+no filenames to return,
+DbEnv.log_archive returns null.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_ARCH_ABS">Db.DB_ARCH_ABS</a><dd>All pathnames are returned as absolute pathnames, instead of relative
+to the database home directory.
+<p><dt><a name="Db.DB_ARCH_DATA">Db.DB_ARCH_DATA</a><dd>Return the database files that need to be archived in order to recover
+the database from catastrophic failure. If any of the database files
+have not been accessed during the lifetime of the current log files,
+DbEnv.log_archive will not include them in this list. It is also
+possible that some of the files referred to by the log have since been
+deleted from the system.
+<p><dt><a name="Db.DB_ARCH_LOG">Db.DB_ARCH_LOG</a><dd>Return all the log filenames, regardless of whether or not they are in
+use.
+</dl>
+<p>The Db.DB_ARCH_DATA and Db.DB_ARCH_LOG flags are mutually
+exclusive.
+<p>Log cursor handles (returned by the <a href="../api_java/log_cursor.html">DbEnv.log_cursor</a> method) may have open
+file descriptors for log files in the database environment. Also, the
+Berkeley DB interfaces to the database environment logging subsystem (for
+example, <a href="../api_java/log_put.html">DbEnv.log_put</a> and <a href="../api_java/txn_abort.html">DbTxn.abort</a>) may allocate log cursors
+and have open file descriptors for log files as well. On operating
+systems where filesystem related system calls (for example, rename and
+unlink on Windows/NT) can fail if a process has an open file descriptor
+for the affected file, attempting to move or remove the log files listed
+by DbEnv.log_archive may fail. All Berkeley DB internal use of log cursors
+operates on active log files only and furthermore, is short-lived in
+nature. So, an application seeing such a failure should be restructured
+to close any open log cursors it may have, and otherwise to retry the
+operation until it succeeds. (Although the latter is not likely to be
+necessary; it is hard to imagine a reason to move or rename a log file
+in which transactions are being logged or aborted.)
+<p>See the <a href="../utility/db_archive.html">db_archive</a> manual page for more information on database
+archival procedures.
+<p>The DbEnv.log_archive method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.log_archive method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The log was corrupted.
+</dl>
+<p>The DbEnv.log_archive method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.log_archive method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/logc_class.html">DbLogc</a>, <a href="../api_java/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_java/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/log_compare.html b/libdb/docs/api_java/log_compare.html
new file mode 100644
index 0000000..bf02cdf
--- /dev/null
+++ b/libdb/docs/api_java/log_compare.html
@@ -0,0 +1,42 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.log_compare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.log_compare</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public static int log_compare(DbLsn lsn0, DbLsn lsn1);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.log_compare method allows the caller to compare two
+<a href="../api_java/lsn_class.html">DbLsn</a> objects,
+returning 0 if they are equal, 1 if <b>lsn0</b> is greater than
+<b>lsn1</b>, and -1 if <b>lsn0</b> is less than <b>lsn1</b>.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/logc_class.html">DbLogc</a>, <a href="../api_java/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_java/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/log_cursor.html b/libdb/docs/api_java/log_cursor.html
new file mode 100644
index 0000000..8f79d68
--- /dev/null
+++ b/libdb/docs/api_java/log_cursor.html
@@ -0,0 +1,53 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.log_cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.log_cursor</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public DbLogc log_cursor(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.log_cursor method
+creates a log cursor.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DbEnv.log_cursor method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.log_cursor method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv.log_cursor method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.log_cursor method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/logc_class.html">DbLogc</a>, <a href="../api_java/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_java/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/log_file.html b/libdb/docs/api_java/log_file.html
new file mode 100644
index 0000000..95c658e
--- /dev/null
+++ b/libdb/docs/api_java/log_file.html
@@ -0,0 +1,67 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.log_file</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.log_file</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public String log_file(DbLsn lsn)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.log_file method maps
+<a href="../api_java/lsn_class.html">DbLsn</a> objects
+to filenames,
+returning the name of the file containing the record named by <b>lsn</b>.
+<p>The <b>len</b> argument is the length of the <b>namep</b> buffer in bytes.
+If <b>namep</b> is too short to hold the filename, DbEnv.log_file will
+return ENOMEM.
+(Log filenames are normally quite short, on the order of 10 characters.)
+<p>This mapping of
+<a href="../api_java/lsn_class.html">DbLsn</a> objects
+to files is needed for database administration. For example, a
+transaction manager typically records the earliest
+<a href="../api_java/lsn_class.html">DbLsn</a>
+needed for restart, and the database administrator may want to archive
+log files to tape when they contain only
+<a href="../api_java/lsn_class.html">DbLsn</a>
+entries before the earliest one needed for restart.
+<p>The DbEnv.log_file method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.log_file method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>ENOMEM<dd>The supplied buffer was too small to hold the log filename.
+</dl>
+<p>The DbEnv.log_file method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.log_file method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/logc_class.html">DbLogc</a>, <a href="../api_java/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_java/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/log_flush.html b/libdb/docs/api_java/log_flush.html
new file mode 100644
index 0000000..77f646b
--- /dev/null
+++ b/libdb/docs/api_java/log_flush.html
@@ -0,0 +1,55 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.log_flush</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.log_flush</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void log_flush(DbLsn lsn)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.log_flush method guarantees that all log records whose
+<a href="../api_java/lsn_class.html">DbLsn</a> values
+are less than or equal to the <b>lsn</b> argument have been
+written to disk. If <b>lsn</b> is null, all records in the
+log are flushed.
+<p>The DbEnv.log_flush method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.log_flush method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv.log_flush method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.log_flush method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/logc_class.html">DbLogc</a>, <a href="../api_java/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_java/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/log_list.html b/libdb/docs/api_java/log_list.html
new file mode 100644
index 0000000..38a0008
--- /dev/null
+++ b/libdb/docs/api_java/log_list.html
@@ -0,0 +1,32 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Logging Subsystem and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Logging Subsystem and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Logging Subsystem and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_java/log_archive.html">DbEnv.log_archive</a></td><td>List log and database files</td></tr>
+<tr><td><a href="../api_java/log_cursor.html">DbEnv.log_cursor</a></td><td>Create a log cursor handle</td></tr>
+<tr><td><a href="../api_java/log_file.html">DbEnv.log_file</a></td><td>Map Log Sequence Numbers to log files</td></tr>
+<tr><td><a href="../api_java/log_flush.html">DbEnv.log_flush</a></td><td>Flush log records</td></tr>
+<tr><td><a href="../api_java/log_put.html">DbEnv.log_put</a></td><td>Write a log record</td></tr>
+<tr><td><a href="../api_java/env_set_lg_bsize.html">DbEnv.set_lg_bsize</a></td><td>Set log buffer size</td></tr>
+<tr><td><a href="../api_java/env_set_lg_dir.html">DbEnv.set_lg_dir</a></td><td>Set the environment logging directory</td></tr>
+<tr><td><a href="../api_java/env_set_lg_max.html">DbEnv.set_lg_max</a></td><td>Set log file size</td></tr>
+<tr><td><a href="../api_java/env_set_lg_regionmax.html">DbEnv.set_lg_regionmax</a></td><td>Set logging region size</td></tr>
+<tr><td><a href="../api_java/log_compare.html">DbEnv.log_compare</a></td><td>Compare two Log Sequence Numbers</td></tr>
+<tr><td><a href="../api_java/log_stat.html">DbEnv.log_stat</a></td><td>Return log subsystem statistics</td></tr>
+<tr><td><a href="../api_java/logc_close.html">DbLogc.close</a></td><td>Close a log cursor</td></tr>
+<tr><td><a href="../api_java/logc_get.html">DbLogc.get</a></td><td>Retrieve a log record</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/log_put.html b/libdb/docs/api_java/log_put.html
new file mode 100644
index 0000000..f6ffdb9
--- /dev/null
+++ b/libdb/docs/api_java/log_put.html
@@ -0,0 +1,67 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.log_put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.log_put</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void log_put(DbLsn lsn, Dbt data, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.log_put method appends records to the log. The <a href="../api_java/lsn_class.html">DbLsn</a> of
+the put record is returned in the <b>lsn</b> argument. The <b>flags</b>
+argument may be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_FLUSH">Db.DB_FLUSH</a><dd>The log is forced to disk after this record is written, guaranteeing
+that all records with <a href="../api_java/lsn_class.html">DbLsn</a> values less than or equal to the
+one being "put" are on disk before DbEnv.log_put returns.
+</dl>
+<p>The caller is responsible for providing any necessary structure to
+<b>data</b>. (For example, in a write-ahead logging protocol, the
+application must understand what part of <b>data</b> is an operation
+code, what part is redo information, and what part is undo information.
+In addition, most transaction managers will store in <b>data</b> the
+<a href="../api_java/lsn_class.html">DbLsn</a> of the previous log record for the same transaction, to
+support chaining back through the transaction's log records during
+undo.)
+<p>The DbEnv.log_put method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The <a href="../api_java/log_flush.html">DbEnv.log_flush</a> method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The record to be logged is larger than the maximum log record.
+</dl>
+<p>The DbEnv.log_put method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.log_put method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/logc_class.html">DbLogc</a>, <a href="../api_java/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_java/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/log_stat.html b/libdb/docs/api_java/log_stat.html
new file mode 100644
index 0000000..a37789b
--- /dev/null
+++ b/libdb/docs/api_java/log_stat.html
@@ -0,0 +1,84 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.log_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.log_stat</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public DbLogStat log_stat(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.log_stat method returns the logging subsystem statistics.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_STAT_CLEAR">Db.DB_STAT_CLEAR</a><dd>Reset statistics after returning their values.
+</dl>
+<p>The DbEnv.log_stat method creates a DbLogStat object encapsulating the log
+region statistics. The following data fields are available from the
+DbLogStat object:
+<p><dl compact>
+<dt>public int st_magic;<dd>The magic number that identifies a file as a log file.
+<dt>public int st_version;<dd>The version of the log file type.
+<dt>public int st_mode;<dd>The mode of any created log files.
+<dt>public int st_lg_bsize;<dd>The in-memory log record cache size.
+<dt>public int st_lg_size;<dd>The current log file size.
+<dt>public int st_w_mbytes;<dd>The number of megabytes written to this log.
+<dt>public int st_w_bytes;<dd>The number of bytes over and above <b>st_w_mbytes</b> written to this log.
+<dt>public int st_wc_mbytes;<dd>The number of megabytes written to this log since the last checkpoint.
+<dt>public int st_wc_bytes;<dd>The number of bytes over and above <b>st_wc_mbytes</b> written to this log
+since the last checkpoint.
+<dt>public int st_wcount;<dd>The number of times the log has been written to disk.
+<dt>public int st_wcount_fill;<dd>The number of times the log has been written to disk because the
+in-memory log record cache filled up.
+<dt>public int st_scount;<dd>The number of times the log has been flushed to disk.
+<dt>public int st_cur_file;<dd>The current log file number.
+<dt>public int st_cur_offset;<dd>The byte offset in the current log file.
+<dt>public int st_disk_file;<dd>The log file number of the last record known to be on disk.
+<dt>public int st_disk_offset;<dd>The byte offset of the last record known to be on disk.
+<dt>public int st_cur_offset;<dd>The byte offset of the last record known to be on disk.
+<dt>public int st_maxcommitperflush;<dd>The maximum number of commits contained in a single log flush.
+<dt>public int st_mincommitperflush;<dd>The minimum number of commits contained in a single log flush that
+contained a commit.]
+<dt>public int st_regsize;<dd>The size of the region.
+<dt>public int st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining the region lock.
+<dt>public int st_region_nowait;<dd>The number of times that a thread of control was able to obtain
+the region lock without waiting.
+</dl>
+<p>The DbEnv.log_stat method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.log_stat method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.log_stat method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/logc_class.html">DbLogc</a>, <a href="../api_java/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_java/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/logc_class.html b/libdb/docs/api_java/logc_class.html
new file mode 100644
index 0000000..2129cca
--- /dev/null
+++ b/libdb/docs/api_java/logc_class.html
@@ -0,0 +1,43 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbLogc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbLogc</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class DbLogc extends Object { ... }
+</pre></h3>
+<h1>Description</h1>
+<p>The DbLogc object is the handle for a cursor into the log files,
+supporting sequential access to the records stored in log files. The
+handle is not free-threaded. Once the <a href="../api_java/logc_close.html">DbLogc.close</a> method is called,
+the handle may not be accessed again, regardless of that method's
+return.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, DbLogc, <a href="../api_java/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_java/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/logc_close.html b/libdb/docs/api_java/logc_close.html
new file mode 100644
index 0000000..b661fd7
--- /dev/null
+++ b/libdb/docs/api_java/logc_close.html
@@ -0,0 +1,55 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbLogc.close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbLogc.close</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void close(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbLogc.close method discards the log cursor. After DbLogc.close
+has been called, regardless of its return, the cursor handle may not be
+used again.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DbLogc.close method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbLogc.close method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The cursor was previously closed.
+</dl>
+<p>The DbLogc.close method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbLogc.close method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/logc_class.html">DbLogc</a>, <a href="../api_java/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_java/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/logc_get.html b/libdb/docs/api_java/logc_get.html
new file mode 100644
index 0000000..24f963c
--- /dev/null
+++ b/libdb/docs/api_java/logc_get.html
@@ -0,0 +1,95 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbLogc.get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbLogc.get</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int get(DbLsn lsn, Dbt data, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbLogc.get method retrieve records from the log according to the
+<b>lsn</b> and <b>flags</b> arguments.
+<p>The data field of the <b>data</b> structure is set to the record
+retrieved, and the size field indicates the number of bytes in the
+record. See <a href="../api_java/dbt_class.html">Dbt</a> for a description of other fields in the
+<b>data</b> structure. The <a href="../api_java/dbt_class.html#DB_DBT_MALLOC">Db.DB_DBT_MALLOC</a>,
+<a href="../api_java/dbt_class.html#DB_DBT_REALLOC">Db.DB_DBT_REALLOC</a> and <a href="../api_java/dbt_class.html#DB_DBT_USERMEM">Db.DB_DBT_USERMEM</a> flags may be specified
+for any <a href="../api_java/dbt_class.html">Dbt</a> used for data retrieval.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_FIRST">Db.DB_FIRST</a><dd>The first record from any of the log files found in the log directory
+is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_java/lsn_class.html">DbLsn</a> of the
+record returned.
+<p>
+If the log is empty, the DbLogc.get method will return Db.DB_NOTFOUND.
+<p><dt><a name="Db.DB_LAST">Db.DB_LAST</a><dd>The last record in the log is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_java/lsn_class.html">DbLsn</a> of the
+record returned.
+<p>
+If the log is empty, the DbLogc.get method will return Db.DB_NOTFOUND.
+<p><dt><a name="Db.DB_NEXT">Db.DB_NEXT</a>, <a name="Db.DB_PREV">Db.DB_PREV</a><dd>The current log position is advanced to the next (previous) record in
+the log, and that record is returned in the <b>data</b> argument. The
+<b>lsn</b> argument is overwritten with the <a href="../api_java/lsn_class.html">DbLsn</a> of the record
+returned.
+<p>If the cursor has not been initialized via DB_FIRST, DB_LAST, DB_SET,
+DB_NEXT, or DB_PREV, DbLogc.get will return the first (last) record
+in the log.
+If the last (first) log record has already been returned or the log is
+empty, the DbLogc.get method will return Db.DB_NOTFOUND.
+If the log was opened with the DB_THREAD flag set, calls to
+DbLogc.get with the DB_NEXT (DB_PREV) flag set, the DbLogc.get method throws an exception that encapsulates Db.EINVAL.
+<p><dt><a name="Db.DB_CURRENT">Db.DB_CURRENT</a><dd>Return the log record to which the log currently refers.
+If the log cursor has not been initialized via DB_FIRST, DB_LAST,
+DB_SET, DB_NEXT, or DB_PREV, or if the log was opened with the DB_THREAD
+flag set, the DbLogc.get method throws an exception that encapsulates Db.EINVAL.
+<p><dt><a name="Db.DB_SET">Db.DB_SET</a><dd>Retrieve the record specified by the <b>lsn</b> argument.
+If the specified <a href="../api_java/lsn_class.html">DbLsn</a> is invalid (for example, it does not
+appear in the log), the DbLogc.get method throws an exception that encapsulates Db.EINVAL.
+</dl>
+<p>
+Otherwise, the DbLogc.get method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbLogc.get method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The DB_CURRENT flag was set and the log pointer had not yet been
+initialized.
+<p>The DB_SET flag was set and the specified log sequence number does not
+exist.
+</dl>
+<p>The DbLogc.get method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbLogc.get method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/logc_class.html">DbLogc</a>, <a href="../api_java/lsn_class.html">DbLsn</a>
+<h1>See Also</h1>
+<a href="../api_java/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/lsn_class.html b/libdb/docs/api_java/lsn_class.html
new file mode 100644
index 0000000..158a27a
--- /dev/null
+++ b/libdb/docs/api_java/lsn_class.html
@@ -0,0 +1,41 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbLsn</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbLsn</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class DbLsn extends Object { ... }
+</pre></h3>
+<h1>Description</h1>
+<p>The DbLsn object is a <b>log sequence number</b> which
+specifies a unique location in a log file. It has no methods and
+its data may not be manipulated by an application.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/logc_class.html">DbLogc</a>, DbLsn
+<h1>See Also</h1>
+<a href="../api_java/log_list.html">Logging Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/memp_class.html b/libdb/docs/api_java/memp_class.html
new file mode 100644
index 0000000..caa5d2a
--- /dev/null
+++ b/libdb/docs/api_java/memp_class.html
@@ -0,0 +1,49 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbMemoryException</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMemoryException</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class DbMemoryException extends DbException {
+ public Dbt get_dbt();
+}
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the DbMemoryException class and how
+it is used by the various Db* classes.
+<p>A DbMemoryException is thrown when there is insufficient memory
+to complete an operation, and there is the possibility of recovering.
+An example is during a <a href="../api_java/db_get.html">Db.get</a> or <a href="../api_java/dbc_get.html">Dbc.get</a> operation with
+the <a href="../api_java/dbt_class.html">Dbt</a> flags set to <a href="../api_java/dbt_class.html#DB_DBT_USERMEM">Db.DB_DBT_USERMEM</a>.
+<p>In a Java Virtual Machine, there are usually separate heaps for memory
+allocated by native code and for objects allocated in Java code. If the
+Java heap is exhausted, the JVM will throw an OutOfMemoryError, so you
+may see that exception rather than DbMemoryException.
+<p>The <b>get_dbt</b> method returns the <a href="../api_java/dbt_class.html">Dbt</a> that has insufficient
+memory to complete the operation, causing the DbMemoryException
+to be thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/memp_fclose.html b/libdb/docs/api_java/memp_fclose.html
new file mode 100644
index 0000000..803ba8f
--- /dev/null
+++ b/libdb/docs/api_java/memp_fclose.html
@@ -0,0 +1,34 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile.close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMpoolFile.close</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile.close method is not included in the Berkeley DB
+Java API.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/memp_fopen.html b/libdb/docs/api_java/memp_fopen.html
new file mode 100644
index 0000000..5b54690
--- /dev/null
+++ b/libdb/docs/api_java/memp_fopen.html
@@ -0,0 +1,34 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile.open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMpoolFile.open</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile.open method is not included in the Berkeley DB
+Java API.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/memp_fsync.html b/libdb/docs/api_java/memp_fsync.html
new file mode 100644
index 0000000..71d6f6d
--- /dev/null
+++ b/libdb/docs/api_java/memp_fsync.html
@@ -0,0 +1,34 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile.sync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMpoolFile.sync</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile.sync method is not included in the Berkeley DB
+Java API.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/memp_list.html b/libdb/docs/api_java/memp_list.html
new file mode 100644
index 0000000..7ec6926
--- /dev/null
+++ b/libdb/docs/api_java/memp_list.html
@@ -0,0 +1,24 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Memory Pools and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Memory Pools and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Memory Pools and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a></td><td>Set the environment cache size</td></tr>
+<tr><td><a href="../api_java/env_set_mp_mmapsize.html">DbEnv.set_mp_mmapsize</a></td><td>Set maximum mapped-in database file size</td></tr>
+<tr><td><a href="../api_java/memp_stat.html">DbEnv.memp_stat</a></td><td>Return memory pool statistics</td></tr>
+<tr><td><a href="../api_java/memp_stat.html">DbEnv.memp_fstat</a></td><td>Return memory pool statistics</td></tr>
+<tr><td><a href="../api_java/memp_trickle.html">DbEnv.memp_trickle</a></td><td>Trickle flush pages from a memory pool</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/memp_register.html b/libdb/docs/api_java/memp_register.html
new file mode 100644
index 0000000..116f107
--- /dev/null
+++ b/libdb/docs/api_java/memp_register.html
@@ -0,0 +1,34 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.memp_register</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.memp_register</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.memp_register method is not included in the Berkeley DB
+Java API.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/memp_stat.html b/libdb/docs/api_java/memp_stat.html
new file mode 100644
index 0000000..3f41de1
--- /dev/null
+++ b/libdb/docs/api_java/memp_stat.html
@@ -0,0 +1,120 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.memp_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.memp_stat</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public DbMpoolStat memp_stat(int flags)
+ throws DbException;
+<p>
+public DbMpoolFStat[] memp_fstat(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.memp_stat and <a href="../api_java/memp_stat.html">DbEnv.memp_fstat</a> methods return the memory pool
+subsystem statistics.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_STAT_CLEAR">Db.DB_STAT_CLEAR</a><dd>Reset statistics after returning their values.
+</dl>
+<p>The DbEnv.memp_stat and <a href="../api_java/memp_stat.html">DbEnv.memp_fstat</a> methods create the DbMpoolStat
+and DbMpoolFStat objects encapsulating the memory pool region
+statistics. The memory pool region statistics are stored in a
+DbMpoolStat object and the per-file memory pool statistics are stored
+in DbMpoolFStat objects. The following data fields are available from
+the DbMpoolStat object:
+<p><dl compact>
+<dt>public long st_gbytes;<dd>Gigabytes of cache (total cache size is st_gbytes + st_bytes).
+<dt>public long st_bytes;<dd>Bytes of cache (total cache size is st_gbytes + st_bytes).
+<dt>public int st_ncache;<dd>Number of caches.
+<dt>public int st_regsize;<dd>Individual cache size.
+<dt>public int st_map;<dd>Requested pages mapped into the process' address space (there is no
+available information about whether or not this request caused disk I/O,
+although examining the application page fault rate may be helpful).
+<dt>public int st_cache_hit;<dd>Requested pages found in the cache.
+<dt>public int st_cache_miss;<dd>Requested pages not found in the cache.
+<dt>public int st_page_create;<dd>Pages created in the cache.
+<dt>public int st_page_in;<dd>Pages read into the cache.
+<dt>public int st_page_out;<dd>Pages written from the cache to the backing file.
+<dt>public int st_ro_evict;<dd>Clean pages forced from the cache.
+<dt>public int st_rw_evict;<dd>Dirty pages forced from the cache.
+<dt>public int st_page_trickle;<dd>Dirty pages written using the <a href="../api_java/memp_trickle.html">DbEnv.memp_trickle</a> interface.
+<dt>public int st_pages;<dd>Pages in the cache.
+<dt>public int st_page_clean;<dd>Clean pages currently in the cache.
+<dt>public int st_page_dirty;<dd>Dirty pages currently in the cache.
+<dt>public int st_hash_buckets;<dd>Number of hash buckets in buffer hash table.
+<dt>public int st_hash_searches;<dd>Total number of buffer hash table lookups.
+<dt>public int st_hash_longest;<dd>The longest chain ever encountered in buffer hash table lookups.
+<dt>public int st_hash_examined;<dd>Total number of hash elements traversed during hash table lookups.
+<dt>public int st_hash_nowait;<dd>The number of times that a thread of control was able to obtain a hash
+bucket lock without waiting.
+<dt>public int st_hash_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining a hash bucket lock.
+<dt>public int st_hash_max_wait;<dd>The maximum number of times any hash bucket lock was waited for by a
+thread of control.
+<dt>public int st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining a region lock.
+<dt>public int st_region_nowait;<dd>The number of times that a thread of control was able to obtain a region
+lock without waiting.
+<dt>public int st_alloc;<dd>Number of page allocations.
+<dt>public int st_alloc_buckets;<dd>Number of hash buckets checked during allocation.
+<dt>public int st_alloc_max_buckets;<dd>Maximum number of hash buckets checked during an allocation.
+<dt>public int st_alloc_pages;<dd>Number of pages checked during allocation.
+<dt>public int st_alloc_max_pages;<dd>Maximum number of pages checked during an allocation.
+</dl>
+<p>The <a href="../api_java/memp_stat.html">DbEnv.memp_fstat</a> method creates an array of DbMpoolFStat objects
+containing statistics for individual files in the pool. Each
+DbMpoolFStat object contains statistics for an individual DbMpoolFile.
+The following data fields are available for each DbMpoolFStat
+object:
+<p><dl compact>
+<dt>public String file_name;<dd>The name of the file.
+<dt>public long st_pagesize;<dd>Page size in bytes.
+<dt>public int st_cache_hit;<dd>Requested pages found in the cache.
+<dt>public int st_cache_miss;<dd>Requested pages not found in the cache.
+<dt>public int st_map;<dd>Requested pages mapped into the process' address space.
+<dt>public int st_page_create;<dd>Pages created in the cache.
+<dt>public int st_page_in;<dd>Pages read into the cache.
+<dt>public int st_page_out;<dd>Pages written from the cache to the backing file.
+</dl>
+<p>The DbEnv.memp_stat method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.memp_stat method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv.memp_stat method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.memp_stat method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_java/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/memp_sync.html b/libdb/docs/api_java/memp_sync.html
new file mode 100644
index 0000000..9bf72d3
--- /dev/null
+++ b/libdb/docs/api_java/memp_sync.html
@@ -0,0 +1,34 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.memp_sync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.memp_sync</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.memp_sync method is not included in the Berkeley DB
+Java API.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/memp_trickle.html b/libdb/docs/api_java/memp_trickle.html
new file mode 100644
index 0000000..7af3145
--- /dev/null
+++ b/libdb/docs/api_java/memp_trickle.html
@@ -0,0 +1,58 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.memp_trickle</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.memp_trickle</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int memp_trickle(int pct)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.memp_trickle method ensures that at least <b>pct</b> percent of
+the pages in the shared memory pool are clean by writing dirty pages to
+their backing files.
+The number of pages that were written to reach the correct percentage is
+returned.
+<p>The purpose of the DbEnv.memp_trickle function is to enable a memory
+pool manager to ensure that a page is always available for reading in new
+information without having to wait for a write.
+<p>The DbEnv.memp_trickle method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.memp_trickle method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv.memp_trickle method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.memp_trickle method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_java/memp_list.html">Memory Pools and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/mempfile_class.html b/libdb/docs/api_java/mempfile_class.html
new file mode 100644
index 0000000..609a5e5
--- /dev/null
+++ b/libdb/docs/api_java/mempfile_class.html
@@ -0,0 +1,35 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMpoolFile</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile class is not included in the Berkeley DB
+Java API.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/pindex.src b/libdb/docs/api_java/pindex.src
new file mode 100644
index 0000000..a97c837
--- /dev/null
+++ b/libdb/docs/api_java/pindex.src
@@ -0,0 +1,378 @@
+__APIREL__/api_java/db_associate.html__OCT__2 @Db.associate
+__APIREL__/api_java/db_associate.html__OCT__3 @DB_DBT_APPMALLOC
+__APIREL__/api_java/db_associate.html__OCT__4 @DB_DONOTINDEX
+__APIREL__/api_java/db_associate.html__OCT__Db.DB_CREATE Db.associate@Db.DB_CREATE
+__APIREL__/api_java/db_associate.html__OCT__Db.DB_AUTO_COMMIT Db.associate@Db.DB_AUTO_COMMIT
+__APIREL__/api_java/db_class.html__OCT__2 @Db
+__APIREL__/api_java/db_class.html__OCT__Db.DB_XA_CREATE Db@Db.DB_XA_CREATE
+__APIREL__/api_java/db_close.html__OCT__2 @Db.close
+__APIREL__/api_java/db_close.html__OCT__Db.DB_NOSYNC Db.close@Db.DB_NOSYNC
+__APIREL__/api_java/db_cursor.html__OCT__2 @Db.cursor
+__APIREL__/api_java/db_cursor.html__OCT__Db.DB_DIRTY_READ Db.cursor@Db.DB_DIRTY_READ
+__APIREL__/api_java/db_cursor.html__OCT__Db.DB_WRITECURSOR Db.cursor@Db.DB_WRITECURSOR
+__APIREL__/api_java/db_del.html__OCT__2 @Db.del
+__APIREL__/api_java/db_del.html__OCT__Db.DB_AUTO_COMMIT Db.del@Db.DB_AUTO_COMMIT
+__APIREL__/api_java/db_err.html__OCT__2 @Db.err
+__APIREL__/api_java/db_fd.html__OCT__2 @Db.fd
+__APIREL__/api_java/db_get.html__OCT__2 @Db.get
+__APIREL__/api_java/db_get.html__OCT__Db.DB_CONSUME Db.get@Db.DB_CONSUME
+__APIREL__/api_java/db_get.html__OCT__Db.DB_CONSUME_WAIT Db.get@Db.DB_CONSUME_WAIT
+__APIREL__/api_java/db_get.html__OCT__Db.DB_GET_BOTH Db.get@Db.DB_GET_BOTH
+__APIREL__/api_java/db_get.html__OCT__Db.DB_SET_RECNO Db.get@Db.DB_SET_RECNO
+__APIREL__/api_java/db_get.html__OCT__Db.DB_DIRTY_READ Db.get@Db.DB_DIRTY_READ
+__APIREL__/api_java/db_get.html__OCT__Db.DB_MULTIPLE Db.get@Db.DB_MULTIPLE
+__APIREL__/api_java/db_get.html__OCT__Db.DB_RMW Db.get@Db.DB_RMW
+__APIREL__/api_java/db_get_byteswapped.html__OCT__2 @Db.get_byteswapped
+__APIREL__/api_java/db_get_type.html__OCT__2 @Db.get_type
+__APIREL__/api_java/db_join.html__OCT__2 @Db.join
+__APIREL__/api_java/db_join.html__OCT__Db.DB_JOIN_NOSORT Db.join@Db.DB_JOIN_NOSORT
+__APIREL__/api_java/db_join.html__OCT__Db.DB_JOIN_ITEM Db.join@Db.DB_JOIN_ITEM
+__APIREL__/api_java/db_join.html__OCT__Db.DB_DIRTY_READ Db.join@Db.DB_DIRTY_READ
+__APIREL__/api_java/db_join.html__OCT__Db.DB_RMW Db.join@Db.DB_RMW
+__APIREL__/api_java/db_key_range.html__OCT__2 @Db.key_range
+__APIREL__/api_java/db_open.html__OCT__2 @Db.open
+__APIREL__/api_java/db_open.html__OCT__Db.DB_BTREE Db.open@Db.DB_BTREE
+__APIREL__/api_java/db_open.html__OCT__Db.DB_HASH Db.open@Db.DB_HASH
+__APIREL__/api_java/db_open.html__OCT__Db.DB_QUEUE Db.open@Db.DB_QUEUE
+__APIREL__/api_java/db_open.html__OCT__Db.DB_RECNO Db.open@Db.DB_RECNO
+__APIREL__/api_java/db_open.html__OCT__Db.DB_UNKNOWN Db.open@Db.DB_UNKNOWN
+__APIREL__/api_java/db_open.html__OCT__Db.DB_AUTO_COMMIT Db.open@Db.DB_AUTO_COMMIT
+__APIREL__/api_java/db_open.html__OCT__Db.DB_CREATE Db.open@Db.DB_CREATE
+__APIREL__/api_java/db_open.html__OCT__Db.DB_DIRTY_READ Db.open@Db.DB_DIRTY_READ
+__APIREL__/api_java/db_open.html__OCT__Db.DB_EXCL Db.open@Db.DB_EXCL
+__APIREL__/api_java/db_open.html__OCT__Db.DB_NOMMAP Db.open@Db.DB_NOMMAP
+__APIREL__/api_java/db_open.html__OCT__Db.DB_RDONLY Db.open@Db.DB_RDONLY
+__APIREL__/api_java/db_open.html__OCT__Db.DB_THREAD Db.open@Db.DB_THREAD
+__APIREL__/api_java/db_open.html__OCT__Db.DB_TRUNCATE Db.open@Db.DB_TRUNCATE
+__APIREL__/api_java/db_open.html__OCT__Db.DB_OLD_VERSION Db.open@Db.DB_OLD_VERSION
+__APIREL__/api_java/db_put.html__OCT__2 @Db.put
+__APIREL__/api_java/db_put.html__OCT__Db.DB_APPEND Db.put@Db.DB_APPEND
+__APIREL__/api_java/db_put.html__OCT__Db.DB_NODUPDATA Db.put@Db.DB_NODUPDATA
+__APIREL__/api_java/db_put.html__OCT__Db.DB_NOOVERWRITE Db.put@Db.DB_NOOVERWRITE
+__APIREL__/api_java/db_put.html__OCT__Db.DB_AUTO_COMMIT Db.put@Db.DB_AUTO_COMMIT
+__APIREL__/api_java/db_remove.html__OCT__2 @Db.remove
+__APIREL__/api_java/db_rename.html__OCT__2 @Db.rename
+__APIREL__/api_java/db_set_append_recno.html__OCT__2 @Db.set_append_recno
+__APIREL__/api_java/db_set_bt_compare.html__OCT__2 @Db.set_bt_compare
+__APIREL__/api_java/db_set_bt_minkey.html__OCT__2 @Db.set_bt_minkey
+__APIREL__/api_java/db_set_bt_prefix.html__OCT__2 @Db.set_bt_prefix
+__APIREL__/api_java/db_set_cache_priority.html__OCT__2 @Db.set_cache_priority
+__APIREL__/api_java/db_set_cache_priority.html__OCT__Db.DB_PRIORITY_VERY_LOW Db.set_cache_priority@Db.DB_PRIORITY_VERY_LOW
+__APIREL__/api_java/db_set_cache_priority.html__OCT__Db.DB_PRIORITY_LOW Db.set_cache_priority@Db.DB_PRIORITY_LOW
+__APIREL__/api_java/db_set_cache_priority.html__OCT__Db.DB_PRIORITY_DEFAULT Db.set_cache_priority@Db.DB_PRIORITY_DEFAULT
+__APIREL__/api_java/db_set_cache_priority.html__OCT__Db.DB_PRIORITY_HIGH Db.set_cache_priority@Db.DB_PRIORITY_HIGH
+__APIREL__/api_java/db_set_cache_priority.html__OCT__Db.DB_PRIORITY_VERY_HIGH Db.set_cache_priority@Db.DB_PRIORITY_VERY_HIGH
+__APIREL__/api_java/db_set_cachesize.html__OCT__2 @Db.set_cachesize
+__APIREL__/api_java/db_set_dup_compare.html__OCT__2 @Db.set_dup_compare
+__APIREL__/api_java/db_set_encrypt.html__OCT__2 @Db.set_encrypt
+__APIREL__/api_java/db_set_encrypt.html__OCT__Db.DB_ENCRYPT_AES Db.set_encrypt@Db.DB_ENCRYPT_AES
+__APIREL__/api_java/db_set_errcall.html__OCT__2 @Db.set_errcall
+__APIREL__/api_java/db_set_errpfx.html__OCT__2 @Db.set_errpfx
+__APIREL__/api_java/db_set_feedback.html__OCT__2 @Db.set_feedback
+__APIREL__/api_java/db_set_feedback.html__OCT__Db.DB_UPGRADE Db.set_feedback@Db.DB_UPGRADE
+__APIREL__/api_java/db_set_feedback.html__OCT__Db.DB_VERIFY Db.set_feedback@Db.DB_VERIFY
+__APIREL__/api_java/db_set_flags.html__OCT__2 @Db.set_flags
+__APIREL__/api_java/db_set_flags.html__OCT__3 database page @checksum
+__APIREL__/api_java/db_set_flags.html__OCT__Db.DB_CHKSUM_SHA1 Db.set_flags@Db.DB_CHKSUM_SHA1
+__APIREL__/api_java/db_set_flags.html__OCT__4 database @encryption
+__APIREL__/api_java/db_set_flags.html__OCT__Db.DB_ENCRYPT Db.set_flags@Db.DB_ENCRYPT
+__APIREL__/api_java/db_set_flags.html__OCT__5 @duplicate data items
+__APIREL__/api_java/db_set_flags.html__OCT__Db.DB_DUP Db.set_flags@Db.DB_DUP
+__APIREL__/api_java/db_set_flags.html__OCT__6 sorted @duplicate data items
+__APIREL__/api_java/db_set_flags.html__OCT__Db.DB_DUPSORT Db.set_flags@Db.DB_DUPSORT
+__APIREL__/api_java/db_set_flags.html__OCT__7 accessing Btree records by @record number
+__APIREL__/api_java/db_set_flags.html__OCT__Db.DB_RECNUM Db.set_flags@Db.DB_RECNUM
+__APIREL__/api_java/db_set_flags.html__OCT__8 turn off @reverse splits in Btree databases
+__APIREL__/api_java/db_set_flags.html__OCT__9 turn off reverse @splits in Btree databases
+__APIREL__/api_java/db_set_flags.html__OCT__Db.DB_REVSPLITOFF Db.set_flags@Db.DB_REVSPLITOFF
+__APIREL__/api_java/db_set_flags.html__OCT__Db.DB_DUP Db.set_flags@Db.DB_DUP
+__APIREL__/api_java/db_set_flags.html__OCT__Db.DB_DUPSORT Db.set_flags@Db.DB_DUPSORT
+__APIREL__/api_java/db_set_flags.html__OCT__10 @renumbering records in Recno databases
+__APIREL__/api_java/db_set_flags.html__OCT__Db.DB_RENUMBER Db.set_flags@Db.DB_RENUMBER
+__APIREL__/api_java/db_set_flags.html__OCT__11 pre-loading @text files into Recno databases
+__APIREL__/api_java/db_set_flags.html__OCT__Db.DB_SNAPSHOT Db.set_flags@Db.DB_SNAPSHOT
+__APIREL__/api_java/db_set_h_ffactor.html__OCT__2 @Db.set_h_ffactor
+__APIREL__/api_java/db_set_h_hash.html__OCT__2 @Db.set_h_hash
+__APIREL__/api_java/db_set_h_nelem.html__OCT__2 @Db.set_h_nelem
+__APIREL__/api_java/db_set_lorder.html__OCT__2 @Db.set_lorder
+__APIREL__/api_java/db_set_pagesize.html__OCT__2 @Db.set_pagesize
+__APIREL__/api_java/db_set_q_extentsize.html__OCT__2 @Db.set_q_extentsize
+__APIREL__/api_java/db_set_re_delim.html__OCT__2 @Db.set_re_delim
+__APIREL__/api_java/db_set_re_len.html__OCT__2 @Db.set_re_len
+__APIREL__/api_java/db_set_re_pad.html__OCT__2 @Db.set_re_pad
+__APIREL__/api_java/db_set_re_source.html__OCT__2 @Db.set_re_source
+__APIREL__/api_java/db_stat.html__OCT__2 @Db.stat
+__APIREL__/api_java/db_stat.html__OCT__Db.DB_FAST_STAT Db.stat@Db.DB_FAST_STAT
+__APIREL__/api_java/db_sync.html__OCT__2 @Db.sync
+__APIREL__/api_java/db_truncate.html__OCT__2 @Db.truncate
+__APIREL__/api_java/db_truncate.html__OCT__Db.DB_AUTO_COMMIT Db.truncate@Db.DB_AUTO_COMMIT
+__APIREL__/api_java/db_upgrade.html__OCT__2 @Db.upgrade
+__APIREL__/api_java/db_upgrade.html__OCT__Db.DB_DUPSORT Db.upgrade@Db.DB_DUPSORT
+__APIREL__/api_java/db_upgrade.html__OCT__Db.DB_OLD_VERSION Db.upgrade@Db.DB_OLD_VERSION
+__APIREL__/api_java/db_verify.html__OCT__2 @Db.verify
+__APIREL__/api_java/db_verify.html__OCT__Db.DB_SALVAGE Db.verify@Db.DB_SALVAGE
+__APIREL__/api_java/db_verify.html__OCT__Db.DB_AGGRESSIVE Db.verify@Db.DB_AGGRESSIVE
+__APIREL__/api_java/db_verify.html__OCT__Db.DB_PRINTABLE Db.verify@Db.DB_PRINTABLE
+__APIREL__/api_java/db_verify.html__OCT__Db.DB_NOORDERCHK Db.verify@Db.DB_NOORDERCHK
+__APIREL__/api_java/db_verify.html__OCT__Db.DB_ORDERCHKONLY Db.verify@Db.DB_ORDERCHKONLY
+__APIREL__/api_java/db_verify.html__OCT__3 @DB_VERIFY_BAD
+__APIREL__/api_java/dbt_class.html__OCT__2 @Dbt
+__APIREL__/api_java/dbt_class.html__OCT__3 @key/data pairs
+__APIREL__/api_java/dbt_class.html__OCT__data Dbt@data
+__APIREL__/api_java/dbt_class.html__OCT__Db.DB_DBT_MALLOC Dbt@Db.DB_DBT_MALLOC
+__APIREL__/api_java/dbt_class.html__OCT__Db.DB_DBT_REALLOC Dbt@Db.DB_DBT_REALLOC
+__APIREL__/api_java/dbt_class.html__OCT__Db.DB_DBT_USERMEM Dbt@Db.DB_DBT_USERMEM
+__APIREL__/api_java/dbt_class.html__OCT__Db.DB_DBT_PARTIAL Dbt@Db.DB_DBT_PARTIAL
+__APIREL__/api_java/db_set_error_stream.html__OCT__2 @Db.set_error_stream
+__APIREL__/api_java/dbt_bulk_class.html__OCT__2 @DbMultipleDataIterator
+__APIREL__/api_java/dbc_class.html__OCT__2 @Dbc
+__APIREL__/api_java/dbc_close.html__OCT__2 @Dbc.close
+__APIREL__/api_java/dbc_count.html__OCT__2 @Dbc.count
+__APIREL__/api_java/dbc_del.html__OCT__2 @Dbc.del
+__APIREL__/api_java/dbc_dup.html__OCT__2 @Dbc.dup
+__APIREL__/api_java/dbc_dup.html__OCT__Db.DB_POSITION Dbc.dup@Db.DB_POSITION
+__APIREL__/api_java/dbc_get.html__OCT__2 @Dbc.get
+__APIREL__/api_java/dbc_get.html__OCT__Db.DB_CURRENT Dbc.get@Db.DB_CURRENT
+__APIREL__/api_java/dbc_get.html__OCT__Db.DB_FIRST Dbc.get@Db.DB_FIRST
+__APIREL__/api_java/dbc_get.html__OCT__Db.DB_LAST Dbc.get@Db.DB_LAST
+__APIREL__/api_java/dbc_get.html__OCT__Db.DB_GET_BOTH Dbc.get@Db.DB_GET_BOTH
+__APIREL__/api_java/dbc_get.html__OCT__Db.DB_GET_BOTH_RANGE Dbc.get@Db.DB_GET_BOTH_RANGE
+__APIREL__/api_java/dbc_get.html__OCT__Db.DB_GET_RECNO Dbc.get@Db.DB_GET_RECNO
+__APIREL__/api_java/dbc_get.html__OCT__Db.DB_JOIN_ITEM Dbc.get@Db.DB_JOIN_ITEM
+__APIREL__/api_java/dbc_get.html__OCT__Db.DB_NEXT Dbc.get@Db.DB_NEXT
+__APIREL__/api_java/dbc_get.html__OCT__Db.DB_PREV Dbc.get@Db.DB_PREV
+__APIREL__/api_java/dbc_get.html__OCT__Db.DB_NEXT_DUP Dbc.get@Db.DB_NEXT_DUP
+__APIREL__/api_java/dbc_get.html__OCT__Db.DB_NEXT_NODUP Dbc.get@Db.DB_NEXT_NODUP
+__APIREL__/api_java/dbc_get.html__OCT__Db.DB_PREV_NODUP Dbc.get@Db.DB_PREV_NODUP
+__APIREL__/api_java/dbc_get.html__OCT__Db.DB_SET Dbc.get@Db.DB_SET
+__APIREL__/api_java/dbc_get.html__OCT__Db.DB_SET_RANGE Dbc.get@Db.DB_SET_RANGE
+__APIREL__/api_java/dbc_get.html__OCT__Db.DB_SET_RECNO Dbc.get@Db.DB_SET_RECNO
+__APIREL__/api_java/dbc_get.html__OCT__Db.DB_DIRTY_READ Dbc.get@Db.DB_DIRTY_READ
+__APIREL__/api_java/dbc_get.html__OCT__Db.DB_MULTIPLE Dbc.get@Db.DB_MULTIPLE
+__APIREL__/api_java/dbc_get.html__OCT__Db.DB_MULTIPLE_KEY Dbc.get@Db.DB_MULTIPLE_KEY
+__APIREL__/api_java/dbc_get.html__OCT__Db.DB_RMW Dbc.get@Db.DB_RMW
+__APIREL__/api_java/dbc_put.html__OCT__2 @Dbc.put
+__APIREL__/api_java/dbc_put.html__OCT__Db.DB_AFTER Dbc.put@Db.DB_AFTER
+__APIREL__/api_java/dbc_put.html__OCT__Db.DB_BEFORE Dbc.put@Db.DB_BEFORE
+__APIREL__/api_java/dbc_put.html__OCT__Db.DB_CURRENT Dbc.put@Db.DB_CURRENT
+__APIREL__/api_java/dbc_put.html__OCT__Db.DB_KEYFIRST Dbc.put@Db.DB_KEYFIRST
+__APIREL__/api_java/dbc_put.html__OCT__Db.DB_KEYLAST Dbc.put@Db.DB_KEYLAST
+__APIREL__/api_java/dbc_put.html__OCT__Db.DB_NODUPDATA Dbc.put@Db.DB_NODUPDATA
+__APIREL__/api_java/except_class.html__OCT__2 @DbException
+__APIREL__/api_java/runrec_class.html__OCT__2 @DbRunRecoveryException
+__APIREL__/api_java/env_class.html__OCT__2 @DbEnv
+__APIREL__/api_java/env_class.html__OCT__Db.DB_CLIENT DbEnv@Db.DB_CLIENT
+__APIREL__/api_java/env_close.html__OCT__2 @DbEnv.close
+__APIREL__/api_java/env_dbremove.html__OCT__2 @DbEnv.dbremove
+__APIREL__/api_java/env_dbremove.html__OCT__Db.DB_AUTO_COMMIT DbEnv.dbremove@Db.DB_AUTO_COMMIT
+__APIREL__/api_java/env_dbrename.html__OCT__2 @DbEnv.dbrename
+__APIREL__/api_java/env_dbrename.html__OCT__Db.DB_AUTO_COMMIT DbEnv.dbrename@Db.DB_AUTO_COMMIT
+__APIREL__/api_java/env_err.html__OCT__2 @DbEnv.err
+__APIREL__/api_java/env_open.html__OCT__2 @DbEnv.open
+__APIREL__/api_java/env_open.html__OCT__Db.DB_JOINENV DbEnv.open@Db.DB_JOINENV
+__APIREL__/api_java/env_open.html__OCT__Db.DB_INIT_CDB DbEnv.open@Db.DB_INIT_CDB
+__APIREL__/api_java/env_open.html__OCT__Db.DB_INIT_LOCK DbEnv.open@Db.DB_INIT_LOCK
+__APIREL__/api_java/env_open.html__OCT__Db.DB_INIT_LOG DbEnv.open@Db.DB_INIT_LOG
+__APIREL__/api_java/env_open.html__OCT__Db.DB_INIT_MPOOL DbEnv.open@Db.DB_INIT_MPOOL
+__APIREL__/api_java/env_open.html__OCT__Db.DB_INIT_TXN DbEnv.open@Db.DB_INIT_TXN
+__APIREL__/api_java/env_open.html__OCT__Db.DB_RECOVER DbEnv.open@Db.DB_RECOVER
+__APIREL__/api_java/env_open.html__OCT__Db.DB_RECOVER_FATAL DbEnv.open@Db.DB_RECOVER_FATAL
+__APIREL__/api_java/env_open.html__OCT__3 use @environment variables in naming
+__APIREL__/api_java/env_open.html__OCT__Db.DB_USE_ENVIRON DbEnv.open@Db.DB_USE_ENVIRON
+__APIREL__/api_java/env_open.html__OCT__Db.DB_USE_ENVIRON_ROOT DbEnv.open@Db.DB_USE_ENVIRON_ROOT
+__APIREL__/api_java/env_open.html__OCT__Db.DB_CREATE DbEnv.open@Db.DB_CREATE
+__APIREL__/api_java/env_open.html__OCT__Db.DB_LOCKDOWN DbEnv.open@Db.DB_LOCKDOWN
+__APIREL__/api_java/env_open.html__OCT__Db.DB_PRIVATE DbEnv.open@Db.DB_PRIVATE
+__APIREL__/api_java/env_open.html__OCT__Db.DB_SYSTEM_MEM DbEnv.open@Db.DB_SYSTEM_MEM
+__APIREL__/api_java/env_open.html__OCT__Db.DB_THREAD DbEnv.open@Db.DB_THREAD
+__APIREL__/api_java/env_remove.html__OCT__2 @DbEnv.remove
+__APIREL__/api_java/env_remove.html__OCT__Db.DB_FORCE DbEnv.remove@Db.DB_FORCE
+__APIREL__/api_java/env_remove.html__OCT__3 use @environment variables in naming
+__APIREL__/api_java/env_remove.html__OCT__Db.DB_USE_ENVIRON DbEnv.remove@Db.DB_USE_ENVIRON
+__APIREL__/api_java/env_remove.html__OCT__Db.DB_USE_ENVIRON_ROOT DbEnv.remove@Db.DB_USE_ENVIRON_ROOT
+__APIREL__/api_java/env_set_app_dispatch.html__OCT__2 @DbEnv.set_app_dispatch
+__APIREL__/api_java/env_set_app_dispatch.html__OCT__Db.DB_TXN_BACKWARD_ROLL DbEnv.set_app_dispatch@Db.DB_TXN_BACKWARD_ROLL
+__APIREL__/api_java/env_set_app_dispatch.html__OCT__Db.DB_TXN_FORWARD_ROLL DbEnv.set_app_dispatch@Db.DB_TXN_FORWARD_ROLL
+__APIREL__/api_java/env_set_app_dispatch.html__OCT__Db.DB_TXN_ABORT DbEnv.set_app_dispatch@Db.DB_TXN_ABORT
+__APIREL__/api_java/env_set_app_dispatch.html__OCT__Db.DB_TXN_APPLY DbEnv.set_app_dispatch@Db.DB_TXN_APPLY
+__APIREL__/api_java/env_set_app_dispatch.html__OCT__Db.DB_TXN_PRINT DbEnv.set_app_dispatch@Db.DB_TXN_PRINT
+__APIREL__/api_java/env_set_cachesize.html__OCT__2 @DbEnv.set_cachesize
+__APIREL__/api_java/env_set_data_dir.html__OCT__2 @DbEnv.set_data_dir
+__APIREL__/api_java/env_set_encrypt.html__OCT__2 @DbEnv.set_encrypt
+__APIREL__/api_java/env_set_encrypt.html__OCT__Db.DB_ENCRYPT_AES DbEnv.set_encrypt@Db.DB_ENCRYPT_AES
+__APIREL__/api_java/env_set_errcall.html__OCT__2 @DbEnv.set_errcall
+__APIREL__/api_java/env_set_errpfx.html__OCT__2 @DbEnv.set_errpfx
+__APIREL__/api_java/env_set_feedback.html__OCT__2 @DbEnv.set_feedback
+__APIREL__/api_java/env_set_feedback.html__OCT__Db.DB_RECOVER DbEnv.set_feedback@Db.DB_RECOVER
+__APIREL__/api_java/env_set_flags.html__OCT__2 @DbEnv.set_flags
+__APIREL__/api_java/env_set_flags.html__OCT__Db.DB_AUTO_COMMIT DbEnv.set_flags@Db.DB_AUTO_COMMIT
+__APIREL__/api_java/env_set_flags.html__OCT__3 configure @locking for Berkeley DB Concurrent Data Store
+__APIREL__/api_java/env_set_flags.html__OCT__Db.DB_CDB_ALLDB DbEnv.set_flags@Db.DB_CDB_ALLDB
+__APIREL__/api_java/env_set_flags.html__OCT__4 turn off system @buffering for database files
+__APIREL__/api_java/env_set_flags.html__OCT__Db.DB_DIRECT_DB DbEnv.set_flags@Db.DB_DIRECT_DB
+__APIREL__/api_java/env_set_flags.html__OCT__5 turn off system @buffering for log files
+__APIREL__/api_java/env_set_flags.html__OCT__Db.DB_DIRECT_LOG DbEnv.set_flags@Db.DB_DIRECT_LOG
+__APIREL__/api_java/env_set_flags.html__OCT__6 ignore @locking
+__APIREL__/api_java/env_set_flags.html__OCT__Db.DB_NOLOCKING DbEnv.set_flags@Db.DB_NOLOCKING
+__APIREL__/api_java/env_set_flags.html__OCT__7 turn off database file @memory mapping
+__APIREL__/api_java/env_set_flags.html__OCT__Db.DB_NOMMAP DbEnv.set_flags@Db.DB_NOMMAP
+__APIREL__/api_java/env_set_flags.html__OCT__8 ignore database environment @panic
+__APIREL__/api_java/env_set_flags.html__OCT__Db.DB_NOPANIC DbEnv.set_flags@Db.DB_NOPANIC
+__APIREL__/api_java/env_set_flags.html__OCT__Db.DB_OVERWRITE DbEnv.set_flags@Db.DB_OVERWRITE
+__APIREL__/api_java/env_set_flags.html__OCT__9 turn off access to a database @environment
+__APIREL__/api_java/env_set_flags.html__OCT__Db.DB_PANIC_ENVIRONMENT DbEnv.set_flags@Db.DB_PANIC_ENVIRONMENT
+__APIREL__/api_java/env_set_flags.html__OCT__10 fault database @environment in during open
+__APIREL__/api_java/env_set_flags.html__OCT__Db.DB_REGION_INIT DbEnv.set_flags@Db.DB_REGION_INIT
+__APIREL__/api_java/env_set_flags.html__OCT__11 turn off synchronous @transaction commit
+__APIREL__/api_java/env_set_flags.html__OCT__Db.DB_TXN_NOSYNC DbEnv.set_flags@Db.DB_TXN_NOSYNC
+__APIREL__/api_java/env_set_flags.html__OCT__12 turn off synchronous @transaction commit
+__APIREL__/api_java/env_set_flags.html__OCT__Db.DB_TXN_WRITE_NOSYNC DbEnv.set_flags@Db.DB_TXN_WRITE_NOSYNC
+__APIREL__/api_java/env_set_flags.html__OCT__13 configure for @stress testing
+__APIREL__/api_java/env_set_flags.html__OCT__Db.DB_YIELDCPU DbEnv.set_flags@Db.DB_YIELDCPU
+__APIREL__/api_java/env_set_lg_bsize.html__OCT__2 @DbEnv.set_lg_bsize
+__APIREL__/api_java/env_set_lg_dir.html__OCT__2 @DbEnv.set_lg_dir
+__APIREL__/api_java/env_set_lg_max.html__OCT__2 @DbEnv.set_lg_max
+__APIREL__/api_java/env_set_lg_regionmax.html__OCT__2 @DbEnv.set_lg_regionmax
+__APIREL__/api_java/env_set_lk_conflicts.html__OCT__2 @DbEnv.set_lk_conflicts
+__APIREL__/api_java/env_set_lk_detect.html__OCT__2 @DbEnv.set_lk_detect
+__APIREL__/api_java/env_set_lk_detect.html__OCT__Db.DB_LOCK_DEFAULT DbEnv.set_lk_detect@Db.DB_LOCK_DEFAULT
+__APIREL__/api_java/env_set_lk_detect.html__OCT__Db.DB_LOCK_EXPIRE DbEnv.set_lk_detect@Db.DB_LOCK_EXPIRE
+__APIREL__/api_java/env_set_lk_detect.html__OCT__Db.DB_LOCK_MAXLOCKS DbEnv.set_lk_detect@Db.DB_LOCK_MAXLOCKS
+__APIREL__/api_java/env_set_lk_detect.html__OCT__Db.DB_LOCK_MINLOCKS DbEnv.set_lk_detect@Db.DB_LOCK_MINLOCKS
+__APIREL__/api_java/env_set_lk_detect.html__OCT__Db.DB_LOCK_MINWRITE DbEnv.set_lk_detect@Db.DB_LOCK_MINWRITE
+__APIREL__/api_java/env_set_lk_detect.html__OCT__Db.DB_LOCK_OLDEST DbEnv.set_lk_detect@Db.DB_LOCK_OLDEST
+__APIREL__/api_java/env_set_lk_detect.html__OCT__Db.DB_LOCK_RANDOM DbEnv.set_lk_detect@Db.DB_LOCK_RANDOM
+__APIREL__/api_java/env_set_lk_detect.html__OCT__Db.DB_LOCK_YOUNGEST DbEnv.set_lk_detect@Db.DB_LOCK_YOUNGEST
+__APIREL__/api_java/env_set_lk_max_lockers.html__OCT__2 @DbEnv.set_lk_max_lockers
+__APIREL__/api_java/env_set_lk_max_locks.html__OCT__2 @DbEnv.set_lk_max_locks
+__APIREL__/api_java/env_set_lk_max_objects.html__OCT__2 @DbEnv.set_lk_max_objects
+__APIREL__/api_java/env_set_mp_mmapsize.html__OCT__2 @DbEnv.set_mp_mmapsize
+__APIREL__/api_java/env_set_rpc_server.html__OCT__2 @DbEnv.set_rpc_server
+__APIREL__/api_java/env_set_rpc_server.html__OCT__3 @DB_NOSERVER
+__APIREL__/api_java/env_set_rpc_server.html__OCT__4 @DB_NOSERVER_ID
+__APIREL__/api_java/env_set_rpc_server.html__OCT__Db.DB_NOSERVER DbEnv.set_rpc_server@Db.DB_NOSERVER
+__APIREL__/api_java/env_set_rpc_server.html__OCT__Db.DB_NOSERVER_ID DbEnv.set_rpc_server@Db.DB_NOSERVER_ID
+__APIREL__/api_java/env_set_rpc_server.html__OCT__Db.DB_NOSERVER_HOME DbEnv.set_rpc_server@Db.DB_NOSERVER_HOME
+__APIREL__/api_java/env_set_shm_key.html__OCT__2 @DbEnv.set_shm_key
+__APIREL__/api_java/env_set_tas_spins.html__OCT__2 @DbEnv.set_tas_spins
+__APIREL__/api_java/env_set_timeout.html__OCT__2 @DbEnv.set_timeout
+__APIREL__/api_java/env_set_timeout.html__OCT__Db.DB_SET_LOCK_TIMEOUT DbEnv.set_timeout@Db.DB_SET_LOCK_TIMEOUT
+__APIREL__/api_java/env_set_timeout.html__OCT__Db.DB_SET_TXN_TIMEOUT DbEnv.set_timeout@Db.DB_SET_TXN_TIMEOUT
+__APIREL__/api_java/env_set_tmp_dir.html__OCT__2 @DbEnv.set_tmp_dir
+__APIREL__/api_java/env_set_tmp_dir.html__OCT__3 @temporary files
+__APIREL__/api_java/env_set_tx_max.html__OCT__2 @DbEnv.set_tx_max
+__APIREL__/api_java/env_set_tx_timestamp.html__OCT__2 @DbEnv.set_tx_timestamp
+__APIREL__/api_java/env_set_verbose.html__OCT__2 @DbEnv.set_verbose
+__APIREL__/api_java/env_set_verbose.html__OCT__Db.DB_VERB_CHKPOINT DbEnv.set_verbose@Db.DB_VERB_CHKPOINT
+__APIREL__/api_java/env_set_verbose.html__OCT__Db.DB_VERB_DEADLOCK DbEnv.set_verbose@Db.DB_VERB_DEADLOCK
+__APIREL__/api_java/env_set_verbose.html__OCT__Db.DB_VERB_RECOVERY DbEnv.set_verbose@Db.DB_VERB_RECOVERY
+__APIREL__/api_java/env_set_verbose.html__OCT__Db.DB_VERB_REPLICATION DbEnv.set_verbose@Db.DB_VERB_REPLICATION
+__APIREL__/api_java/env_set_verbose.html__OCT__Db.DB_VERB_WAITSFOR DbEnv.set_verbose@Db.DB_VERB_WAITSFOR
+__APIREL__/api_java/env_strerror.html__OCT__2 @DbEnv.strerror
+__APIREL__/api_java/env_version.html__OCT__2 @DbEnv.get_version_major
+__APIREL__/api_java/env_set_error_stream.html__OCT__2 @DbEnv.set_error_stream
+__APIREL__/api_java/lock_class.html__OCT__2 @DbLock
+__APIREL__/api_java/lock_detect.html__OCT__2 @DbEnv.lock_detect
+__APIREL__/api_java/lock_detect.html__OCT__Db.DB_LOCK_DEFAULT DbEnv.lock_detect@Db.DB_LOCK_DEFAULT
+__APIREL__/api_java/lock_detect.html__OCT__Db.DB_LOCK_EXPIRE DbEnv.lock_detect@Db.DB_LOCK_EXPIRE
+__APIREL__/api_java/lock_detect.html__OCT__Db.DB_LOCK_MAXLOCKS DbEnv.lock_detect@Db.DB_LOCK_MAXLOCKS
+__APIREL__/api_java/lock_detect.html__OCT__Db.DB_LOCK_MINLOCKS DbEnv.lock_detect@Db.DB_LOCK_MINLOCKS
+__APIREL__/api_java/lock_detect.html__OCT__Db.DB_LOCK_MINWRITE DbEnv.lock_detect@Db.DB_LOCK_MINWRITE
+__APIREL__/api_java/lock_detect.html__OCT__Db.DB_LOCK_OLDEST DbEnv.lock_detect@Db.DB_LOCK_OLDEST
+__APIREL__/api_java/lock_detect.html__OCT__Db.DB_LOCK_RANDOM DbEnv.lock_detect@Db.DB_LOCK_RANDOM
+__APIREL__/api_java/lock_detect.html__OCT__Db.DB_LOCK_YOUNGEST DbEnv.lock_detect@Db.DB_LOCK_YOUNGEST
+__APIREL__/api_java/lock_get.html__OCT__2 @DbEnv.lock_get
+__APIREL__/api_java/lock_get.html__OCT__Db.DB_LOCK_NOWAIT DbEnv.lock_get@Db.DB_LOCK_NOWAIT
+__APIREL__/api_java/lock_id.html__OCT__2 @DbEnv.lock_id
+__APIREL__/api_java/lock_id_free.html__OCT__2 @DbEnv.lock_id_free
+__APIREL__/api_java/lock_put.html__OCT__2 @DbEnv.lock_put
+__APIREL__/api_java/lock_stat.html__OCT__2 @DbEnv.lock_stat
+__APIREL__/api_java/lock_stat.html__OCT__Db.DB_STAT_CLEAR DbEnv.lock_stat@Db.DB_STAT_CLEAR
+__APIREL__/api_java/lock_vec.html__OCT__2 @DbEnv.lock_vec
+__APIREL__/api_java/lock_vec.html__OCT__Db.DB_LOCK_NOWAIT DbEnv.lock_vec@Db.DB_LOCK_NOWAIT
+__APIREL__/api_java/lock_vec.html__OCT__op DbEnv.lock_vec@op
+__APIREL__/api_java/lock_vec.html__OCT__Db.DB_LOCK_GET DbEnv.lock_vec@Db.DB_LOCK_GET
+__APIREL__/api_java/lock_vec.html__OCT__Db.DB_LOCK_GET_TIMEOUT DbEnv.lock_vec@Db.DB_LOCK_GET_TIMEOUT
+__APIREL__/api_java/lock_vec.html__OCT__Db.DB_LOCK_PUT DbEnv.lock_vec@Db.DB_LOCK_PUT
+__APIREL__/api_java/lock_vec.html__OCT__Db.DB_LOCK_PUT_ALL DbEnv.lock_vec@Db.DB_LOCK_PUT_ALL
+__APIREL__/api_java/lock_vec.html__OCT__Db.DB_LOCK_PUT_OBJ DbEnv.lock_vec@Db.DB_LOCK_PUT_OBJ
+__APIREL__/api_java/lock_vec.html__OCT__Db.DB_LOCK_TIMEOUT DbEnv.lock_vec@Db.DB_LOCK_TIMEOUT
+__APIREL__/api_java/lock_vec.html__OCT__lock DbEnv.lock_vec@lock
+__APIREL__/api_java/lock_vec.html__OCT__mode DbEnv.lock_vec@mode
+__APIREL__/api_java/lock_vec.html__OCT__Db.DB_LOCK_READ DbEnv.lock_vec@Db.DB_LOCK_READ
+__APIREL__/api_java/lock_vec.html__OCT__Db.DB_LOCK_WRITE DbEnv.lock_vec@Db.DB_LOCK_WRITE
+__APIREL__/api_java/lock_vec.html__OCT__Db.DB_LOCK_IWRITE DbEnv.lock_vec@Db.DB_LOCK_IWRITE
+__APIREL__/api_java/lock_vec.html__OCT__Db.DB_LOCK_IREAD DbEnv.lock_vec@Db.DB_LOCK_IREAD
+__APIREL__/api_java/lock_vec.html__OCT__Db.DB_LOCK_IWR DbEnv.lock_vec@Db.DB_LOCK_IWR
+__APIREL__/api_java/lock_vec.html__OCT__obj DbEnv.lock_vec@obj
+__APIREL__/api_java/deadlock_class.html__OCT__2 @DbDeadlockException
+__APIREL__/api_java/lockng_class.html__OCT__2 @DbLockNotGrantedException
+__APIREL__/api_java/log_archive.html__OCT__2 @DbEnv.log_archive
+__APIREL__/api_java/log_archive.html__OCT__Db.DB_ARCH_ABS DbEnv.log_archive@Db.DB_ARCH_ABS
+__APIREL__/api_java/log_archive.html__OCT__Db.DB_ARCH_DATA DbEnv.log_archive@Db.DB_ARCH_DATA
+__APIREL__/api_java/log_archive.html__OCT__Db.DB_ARCH_LOG DbEnv.log_archive@Db.DB_ARCH_LOG
+__APIREL__/api_java/log_compare.html__OCT__2 @DbEnv.log_compare
+__APIREL__/api_java/log_cursor.html__OCT__2 @DbEnv.log_cursor
+__APIREL__/api_java/log_file.html__OCT__2 @DbEnv.log_file
+__APIREL__/api_java/log_flush.html__OCT__2 @DbEnv.log_flush
+__APIREL__/api_java/log_put.html__OCT__2 @DbEnv.log_put
+__APIREL__/api_java/log_put.html__OCT__Db.DB_FLUSH DbEnv.log_put@Db.DB_FLUSH
+__APIREL__/api_java/log_stat.html__OCT__2 @DbEnv.log_stat
+__APIREL__/api_java/log_stat.html__OCT__Db.DB_STAT_CLEAR DbEnv.log_stat@Db.DB_STAT_CLEAR
+__APIREL__/api_java/logc_class.html__OCT__2 @DbLogc
+__APIREL__/api_java/logc_close.html__OCT__2 @DbLogc.close
+__APIREL__/api_java/logc_get.html__OCT__2 @DbLogc.get
+__APIREL__/api_java/logc_get.html__OCT__Db.DB_FIRST DbLogc.get@Db.DB_FIRST
+__APIREL__/api_java/logc_get.html__OCT__Db.DB_LAST DbLogc.get@Db.DB_LAST
+__APIREL__/api_java/logc_get.html__OCT__Db.DB_NEXT DbLogc.get@Db.DB_NEXT
+__APIREL__/api_java/logc_get.html__OCT__Db.DB_PREV DbLogc.get@Db.DB_PREV
+__APIREL__/api_java/logc_get.html__OCT__Db.DB_CURRENT DbLogc.get@Db.DB_CURRENT
+__APIREL__/api_java/logc_get.html__OCT__Db.DB_SET DbLogc.get@Db.DB_SET
+__APIREL__/api_java/lsn_class.html__OCT__2 @DbLsn
+__APIREL__/api_java/memp_fclose.html__OCT__2 @DbMpoolFile.close
+__APIREL__/api_java/memp_fopen.html__OCT__2 @DbMpoolFile.open
+__APIREL__/api_java/memp_fsync.html__OCT__2 @DbMpoolFile.sync
+__APIREL__/api_java/memp_register.html__OCT__2 @DbEnv.memp_register
+__APIREL__/api_java/memp_stat.html__OCT__2 @DbEnv.memp_stat
+__APIREL__/api_java/memp_stat.html__OCT__Db.DB_STAT_CLEAR DbEnv.memp_stat@Db.DB_STAT_CLEAR
+__APIREL__/api_java/memp_sync.html__OCT__2 @DbEnv.memp_sync
+__APIREL__/api_java/memp_trickle.html__OCT__2 @DbEnv.memp_trickle
+__APIREL__/api_java/mempfile_class.html__OCT__2 @DbMpoolFile
+__APIREL__/api_java/memp_class.html__OCT__2 @DbMemoryException
+__APIREL__/api_java/rep_elect.html__OCT__2 @DbEnv.rep_elect
+__APIREL__/api_java/rep_elect.html__OCT__3 @DB_REP_UNAVAIL
+__APIREL__/api_java/rep_limit.html__OCT__2 @DbEnv.set_rep_limit
+__APIREL__/api_java/rep_message.html__OCT__2 @DbEnv.rep_process_message
+__APIREL__/api_java/rep_start.html__OCT__2 @DbEnv.rep_start
+__APIREL__/api_java/rep_start.html__OCT__Db.DB_REP_CLIENT DbEnv.rep_start@Db.DB_REP_CLIENT
+__APIREL__/api_java/rep_start.html__OCT__Db.DB_REP_LOGSONLY DbEnv.rep_start@Db.DB_REP_LOGSONLY
+__APIREL__/api_java/rep_start.html__OCT__Db.DB_REP_MASTER DbEnv.rep_start@Db.DB_REP_MASTER
+__APIREL__/api_java/rep_stat.html__OCT__2 @DbEnv.rep_stat
+__APIREL__/api_java/rep_stat.html__OCT__Db.DB_STAT_CLEAR DbEnv.rep_stat@Db.DB_STAT_CLEAR
+__APIREL__/api_java/rep_transport.html__OCT__2 @DbEnv.set_rep_transport
+__APIREL__/api_java/rep_transport.html__OCT__3 @DB_EID_BROADCAST
+__APIREL__/api_java/rep_transport.html__OCT__Db.DB_REP_PERMANENT DbEnv.set_rep_transport@Db.DB_REP_PERMANENT
+__APIREL__/api_java/txn_abort.html__OCT__2 @DbTxn.abort
+__APIREL__/api_java/txn_begin.html__OCT__2 @DbEnv.txn_begin
+__APIREL__/api_java/txn_begin.html__OCT__Db.DB_DIRTY_READ DbEnv.txn_begin@Db.DB_DIRTY_READ
+__APIREL__/api_java/txn_begin.html__OCT__Db.DB_TXN_NOSYNC DbEnv.txn_begin@Db.DB_TXN_NOSYNC
+__APIREL__/api_java/txn_begin.html__OCT__Db.DB_TXN_NOWAIT DbEnv.txn_begin@Db.DB_TXN_NOWAIT
+__APIREL__/api_java/txn_begin.html__OCT__Db.DB_TXN_SYNC DbEnv.txn_begin@Db.DB_TXN_SYNC
+__APIREL__/api_java/txn_checkpoint.html__OCT__2 @DbEnv.txn_checkpoint
+__APIREL__/api_java/txn_checkpoint.html__OCT__Db.DB_FORCE DbEnv.txn_checkpoint@Db.DB_FORCE
+__APIREL__/api_java/txn_class.html__OCT__2 @DbTxn
+__APIREL__/api_java/txn_commit.html__OCT__2 @DbTxn.commit
+__APIREL__/api_java/txn_commit.html__OCT__Db.DB_TXN_NOSYNC DbTxn.commit@Db.DB_TXN_NOSYNC
+__APIREL__/api_java/txn_commit.html__OCT__Db.DB_TXN_SYNC DbTxn.commit@Db.DB_TXN_SYNC
+__APIREL__/api_java/txn_discard.html__OCT__2 @DbTxn.discard
+__APIREL__/api_java/txn_id.html__OCT__2 @DbTxn.id
+__APIREL__/api_java/txn_prepare.html__OCT__2 @DbTxn.prepare
+__APIREL__/api_java/txn_prepare.html__OCT__3 @DB_XIDDATASIZE
+__APIREL__/api_java/txn_recover.html__OCT__2 @DbEnv.txn_recover
+__APIREL__/api_java/txn_recover.html__OCT__Db.DB_FIRST DbEnv.txn_recover@Db.DB_FIRST
+__APIREL__/api_java/txn_recover.html__OCT__Db.DB_NEXT DbEnv.txn_recover@Db.DB_NEXT
+__APIREL__/api_java/txn_set_timeout.html__OCT__2 @DbTxn.set_timeout
+__APIREL__/api_java/txn_set_timeout.html__OCT__Db.DB_SET_LOCK_TIMEOUT DbTxn.set_timeout@Db.DB_SET_LOCK_TIMEOUT
+__APIREL__/api_java/txn_set_timeout.html__OCT__Db.DB_SET_TXN_TIMEOUT DbTxn.set_timeout@Db.DB_SET_TXN_TIMEOUT
+__APIREL__/api_java/txn_stat.html__OCT__2 @DbEnv.txn_stat
+__APIREL__/api_java/txn_stat.html__OCT__Db.DB_STAT_CLEAR DbEnv.txn_stat@Db.DB_STAT_CLEAR
diff --git a/libdb/docs/api_java/rep_elect.html b/libdb/docs/api_java/rep_elect.html
new file mode 100644
index 0000000..636ad55
--- /dev/null
+++ b/libdb/docs/api_java/rep_elect.html
@@ -0,0 +1,81 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.rep_elect</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.rep_elect</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int rep_elect(int nsites, int pri, int timeout);
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.rep_elect method holds an election for the master of a replication
+group, returning the new master's ID in the <b>envid</b> parameter.
+<p>The <b>nsites</b> parameter indicates the number of environments that
+the application believes are in the replication group. This number is
+used by Berkeley DB to avoid having two masters active simultaneously, even
+in the case of a network partition. During an election, a new master
+cannot be elected unless more than half of <b>nsites</b> agree on
+the new master. Thus, in the face of a network partition, the side of
+the partition with more than half the environments will elect a new
+master and continue, while the environments communicating with fewer
+than half the other environments will fail to find a new master.
+<p>The <b>priority</b> parameter is the priority of this environment. It
+must be a positive integer, or 0 if this environment is not permitted
+to become a master (see <a href="../ref/rep/pri.html">Replication
+environment priorities</a> for more information).
+<a name="3"><!--meow--></a>
+<p>The <b>timeout</b> parameter specifies a timeout period for an
+election. If the election has not completed after <b>timeout</b>
+microseconds, the thread will return Db.DB_REP_UNAVAIL.
+<p>The DbEnv.rep_elect method either returns successfully, with the new
+master's environment ID in the memory pointed to by the <b>envid</b>
+parameter, or it will return Db.DB_REP_UNAVAIL if the participating
+group members were unable to elect a new master for any reason. In the
+event of a successful return, the new master's ID may be the ID of the
+previous master, or the ID of the current environment. The application
+is responsible for adjusting its usage of the other environments in the
+replication group, including directing all database updates to the newly
+selected master, in accordance with the results of this election.
+<p>The thread of control that calls the DbEnv.rep_elect method must not be the
+thread of control that processes incoming messages; processing the
+incoming messages is necessary to successfully complete an election.
+<p>The DbEnv.rep_elect method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.rep_elect method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>DB_REP_UNAVAIL<dd>The replication group was unable to elect a master.
+</dl>
+<p>The DbEnv.rep_elect method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.rep_elect method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/rep_list.html">Replication and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/rep_limit.html b/libdb/docs/api_java/rep_limit.html
new file mode 100644
index 0000000..003b97a
--- /dev/null
+++ b/libdb/docs/api_java/rep_limit.html
@@ -0,0 +1,60 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_rep_limit</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_rep_limit</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class DbEnv
+{
+ public int set_rep_limit(int gbytes, int bytes)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.set_rep_limit method imposes a limit on the amount of data that will
+be transmitted from a site during the course of a single call to
+<a href="../api_java/rep_message.html">DbEnv.rep_process_message</a> method.
+<p>The <b>gbytes</b> and <b>bytes</b> parameters together represent the
+maximum number of bytes that can be sent during a single call to
+<a href="../api_java/rep_message.html">DbEnv.rep_process_message</a> method.
+<p>The DbEnv.set_rep_limit method configures a database environment, not only operations
+performed using the specified <a href="../api_java/env_class.html">DbEnv</a> handle.
+<p>The DbEnv.set_rep_limit interface may be called at any time during the life of
+the application.
+<p>The DbEnv.set_rep_limit method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.set_rep_limit method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_rep_limit method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/rep_list.html">Replication and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/rep_list.html b/libdb/docs/api_java/rep_list.html
new file mode 100644
index 0000000..ced06ad
--- /dev/null
+++ b/libdb/docs/api_java/rep_list.html
@@ -0,0 +1,25 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Replication and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Replication and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Replication and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_java/rep_transport.html">DbEnv.set_rep_transport</a></td><td>Configure replication transport</td></tr>
+<tr><td><a href="../api_java/rep_elect.html">DbEnv.rep_elect</a></td><td>Hold a replication election</td></tr>
+<tr><td><a href="../api_java/rep_limit.html">DbEnv.set_rep_limit</a></td><td>Limit data sent in response to a single message</td></tr>
+<tr><td><a href="../api_java/rep_message.html">DbEnv.rep_process_message</a></td><td>Process a replication message</td></tr>
+<tr><td><a href="../api_java/rep_start.html">DbEnv.rep_start</a></td><td>Configure an environment for replication</td></tr>
+<tr><td><a href="../api_java/rep_stat.html">DbEnv.rep_stat</a></td><td>Replication statistics</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/rep_message.html b/libdb/docs/api_java/rep_message.html
new file mode 100644
index 0000000..f7c6800
--- /dev/null
+++ b/libdb/docs/api_java/rep_message.html
@@ -0,0 +1,91 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.rep_process_message</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.rep_process_message</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public static class RepProcessMessage {
+ public int envid;
+}
+public int rep_process_message(
+ Dbt control, Dbt rec, DbEnv.RepProcessMessage envid)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.rep_process_message method processes an incoming replication
+message sent by a member of the replication group to the local database
+environment.
+<p>The <b>rec</b> and <b>control</b> parameters should reference a copy
+of the parameters specified by Berkeley DB for the <b>rec</b> and
+<b>control</b> parameters on the sending environment.
+<p>The <b>envid</b> parameter should contain the local identifier that
+corresponds to the environment that sent the message to be processed
+(see <a href="../ref/rep/id.html">Replication environment IDs</a> for more
+information).
+<p>For implementation reasons, all incoming replication messages must be
+processed using the same <a href="../api_java/env_class.html">DbEnv</a> handle. It is not required that
+a single thread of control process all messages, only that all threads
+of control processing messages use the same handle.
+<p>
+If a new master has been elected, the DbEnv.rep_process_message method will return Db.DB_REP_NEWMASTER.
+The <b>envid</b> parameter contains the environment ID of the new
+master. If the recipient of this error return has been made master, it
+is the application's responsibility to begin acting as the master
+environment.
+<p>
+If the system received contact information from a new environment, the DbEnv.rep_process_message method will return Db.DB_REP_NEWSITE.
+The <b>rec</b> parameter contains the opaque data specified in the
+<b>cdata</b> parameter to the <a href="../api_java/rep_start.html">DbEnv.rep_start</a>. The application
+should take whatever action is needed to establish a communication
+channel with this new environment.
+<p>
+If the replication group has more than one master, the DbEnv.rep_process_message method throws an exception that encapsulates Db.DB_REP_DUPMASTER.
+The application should reconfigure itself as a client by calling the
+<a href="../api_java/rep_start.html">DbEnv.rep_start</a> method, and then call for an election by calling
+<a href="../api_java/rep_elect.html">DbEnv.rep_elect</a>.
+<p>
+If an election is needed, the DbEnv.rep_process_message method throws an exception that encapsulates Db.DB_REP_HOLDELECTION.
+The application should call for an election by
+calling <a href="../api_java/rep_elect.html">DbEnv.rep_elect</a>.
+<p>
+If the current environment's logs are too far out of date with respect
+to the master to be automatically synchronized, the DbEnv.rep_process_message method throws an exception that encapsulates Db.DB_REP_OUTDATED. The
+application should copy over a hot backup of the environment, run
+recovery, and restart the client.
+<p>
+Otherwise, the DbEnv.rep_process_message method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.rep_process_message method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.rep_process_message method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/rep_list.html">Replication and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/rep_start.html b/libdb/docs/api_java/rep_start.html
new file mode 100644
index 0000000..0307a8d
--- /dev/null
+++ b/libdb/docs/api_java/rep_start.html
@@ -0,0 +1,74 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.rep_start</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.rep_start</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void rep_start(Dbt cdata, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.rep_start method configures the database environment as a client
+or master in a group of replicated database environments. Replication
+master environments are the only database environments where replicated
+databases may be modified. Replication client environments are
+read-only as long as they are clients. Replication client environments
+may be upgraded to be replication master environments in the case that
+the current master fails or there is no master present.
+<p>The enclosing database environment must already have been opened by
+calling the <a href="../api_java/env_open.html">DbEnv.open</a> method and must already have been configured
+to send replication messages by calling the <a href="../api_java/rep_transport.html">DbEnv.set_rep_transport</a> method.
+<p>The <b>cdata</b> parameter is an opaque data item that is sent over
+the communication infrastructure when the client or master comes online
+(see <a href="../ref/rep/newsite.html">Connecting to a new site</a> for
+more information). If no such information is useful, <b>cdata</b>
+should be null.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_REP_CLIENT">Db.DB_REP_CLIENT</a><dd>Configure the environment as a replication client.
+<p><dt><a name="Db.DB_REP_LOGSONLY">Db.DB_REP_LOGSONLY</a><dd>Configure the environment as a log files-only client.
+<p><dt><a name="Db.DB_REP_MASTER">Db.DB_REP_MASTER</a><dd>Configure the environment as a replication master.
+</dl>
+<p>The DbEnv.rep_start method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.rep_start method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The environment was not already configured to communicate with a
+replication group by a call to <a href="../api_java/rep_transport.html">DbEnv.set_rep_transport</a>.
+<p>The environment was not already opened.
+</dl>
+<p>The DbEnv.rep_start method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.rep_start method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/rep_list.html">Replication and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/rep_stat.html b/libdb/docs/api_java/rep_stat.html
new file mode 100644
index 0000000..02a9a3a
--- /dev/null
+++ b/libdb/docs/api_java/rep_stat.html
@@ -0,0 +1,99 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.rep_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.rep_stat</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public DbRepStat rep_stat(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.rep_stat method returns the replication subsystem statistics.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_STAT_CLEAR">Db.DB_STAT_CLEAR</a><dd>Reset statistics after returning their values.
+</dl>
+<p>The DbEnv.rep_stat method creates a DbRepStat object encapsulating the
+replication subsystem statistics. The following data fields are
+available from the DbRepStat object:
+<p><dl compact>
+<dt>public int st_stat;<dd>The current replication mode. Set to <a href="../api_java/rep_start.html#DB_REP_MASTER">Db.DB_REP_MASTER</a> if the
+environment is a replication master, <a href="../api_java/rep_start.html#DB_REP_CLIENT">Db.DB_REP_CLIENT</a> if the
+environment is a replication client, <a href="../api_java/rep_start.html#DB_REP_LOGSONLY">Db.DB_REP_LOGSONLY</a> if the
+environment is a log-files-only replica, or 0 if replication is not
+configured.
+<dt>public DbLsn st_next_lsn;<dd>In replication environments configured as masters, the next LSN expected.
+In replication environments configured as clients, the next LSN to be used.
+<dt>public DbLsn st_waiting_lsn;<dd>The LSN of the first missed log record being waited for, or 0 if no log
+records are currently missing.
+<dt>public int st_dupmasters;<dd>The number of duplicate master conditions detected.
+<dt>public int st_env_id;<dd>The current environment ID.
+<dt>public int st_env_priority;<dd>The current environment priority.
+<dt>public int st_gen;<dd>The current generation number.
+<dt>public int st_log_duplicated;<dd>The number of duplicate log records received.
+<dt>public int st_log_queued;<dd>The number of log records currently queued.
+<dt>public int st_log_queued_max;<dd>The maximum number of log records ever queued at once.
+<dt>public int st_log_queued_total;<dd>The total number of log records queued.
+<dt>public int st_log_records;<dd>The number of log records received and appended to the log.
+<dt>public int st_log_requested;<dd>The number of log records missed and requested.
+<dt>public int st_master;<dd>The current master environment ID.
+<dt>public int st_master_changes;<dd>The number of times the master has changed.
+<dt>public int st_msgs_badgen;<dd>The number of messages received with a bad generation number.
+<dt>public int st_msgs_processed;<dd>The number of messages received and processed.
+<dt>public int st_msgs_recover;<dd>The number of messages ignored due to pending recovery.
+<dt>public int st_msgs_send_failures;<dd>The number of failed message sends.
+<dt>public int st_msgs_sent;<dd>The number of messages sent.
+<dt>public int st_newsites;<dd>The number of new site messages received.
+<dt>public int st_outdated;<dd>The number of outdated conditions detected.
+<dt>public int st_txns_applied;<dd>The number of transactions applied.
+<dt>public int st_elections;<dd>The number of elections held.
+<dt>public int st_elections_won;<dd>The number of elections won.
+<dt>public int st_election_status;<dd>The current election phase (0 if no election is in progress).
+<dt>public int st_election_cur_winner;<dd>The election winner.
+<dt>public int st_election_gen;<dd>The election generation number.
+<dt>public DbLsn st_election_lsn;<dd>The maximum LSN of election winner.
+<dt>public int st_election_nsites;<dd>The number sites expected to participate in elections.
+<dt>public int st_nthrottles;<dd>Transmission limited. This indicates the number of times that data
+transmission was stopped to limit the amount of data sent in response
+to a single call to <a href="../api_java/rep_message.html">DbEnv.rep_process_message</a>.
+<dt>public int st_election_priority;<dd>The election priority.
+<dt>public int st_election_tiebreaker;<dd>The election tiebreaker value.
+<dt>public int st_election_votes;<dd>The votes received this election round.
+</dl>
+<p>The DbEnv.rep_stat method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.rep_stat method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.rep_stat method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/rep_list.html">Replication and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/rep_transport.html b/libdb/docs/api_java/rep_transport.html
new file mode 100644
index 0000000..f6bbd28
--- /dev/null
+++ b/libdb/docs/api_java/rep_transport.html
@@ -0,0 +1,103 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_rep_transport</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_rep_transport</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public interface DbEnvSend
+{
+ public abstract int db_send(DB_ENV *dbenv,
+ const Dbt *control, const Dbt *rec, int envid, int flags);
+}
+public class DbEnv
+{
+ public int set_rep_transport(DbEnvSend db_send)
+ throws DbException;
+ ...
+}
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.set_rep_transport method initializes the communication infrastructure
+for a database environment participating in a replicated application.
+<p>The <b>envid</b> parameter is the local environment's ID. It must be
+a positive integer and uniquely identify this Berkeley DB database environment
+(see <a href="../ref/rep/id.html">Replication environment IDs</a> for more
+information).
+<p>The <b>send</b> parameter is a callback interface used to transmit data
+using the replication application's communication infrastructure. The
+parameters to <b>send</b> are as follows:
+<p><dl compact>
+<p><dt>dbenv<dd>The enclosing database environment.
+<p><dt>control<dd>The control parameter is the first of the two data elements to be
+transmitted by the <b>send</b> interface.
+<p><dt>rec<dd>The rec parameter is the second of the two data elements to be
+transmitted by the <b>send</b> interface.
+<p><dt>envid<dd>The <b>envid</b> parameter is a positive integer identifier that
+specifies the replication environment to which the message should be
+sent (see <a href="../ref/rep/id.html">Replication environment IDs</a> for
+more information).
+<p><a name="3"><!--meow--></a>
+The special identifier Db.DB_EID_BROADCAST indicates that a message
+should be broadcast to every environment in the replication group. The
+application may use a true broadcast protocol, or may send the message
+in sequence to each machine with which it is in communication.
+<p><dt>flags<dd>
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_REP_PERMANENT">Db.DB_REP_PERMANENT</a><dd>The record being sent is critical for maintaining database integrity
+(for example, the message includes a transaction commit). The
+application should take appropriate action to enforce the reliability
+guarantees it has chosen, such as waiting for acknowledgement from one
+or more clients.
+</dl>
+</dl>
+<p>The <b>send</b> interface must return 0 on success and non-zero on
+failure. If the <b>send</b> interface fails, the message being sent
+is necessary to maintain database integrity, and the local log is not
+configured for synchronous flushing, the local log will be flushed;
+otherwise, any error from the <b>send</b> interface will be ignored.
+<p>It may sometimes be useful to pass application-specific data to the
+<b>send</b> interface; see <a href="../ref/env/faq.html">Environment
+FAQ</a> for a discussion on how to do this.
+<p>The DbEnv.set_rep_transport method configures operations performed using the specified
+<a href="../api_java/env_class.html">DbEnv</a> handle, not all operations performed on the underlying
+database environment.
+<p>The DbEnv.set_rep_transport interface may be called at any time during the life of
+the application.
+<p>The DbEnv.set_rep_transport method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.set_rep_transport method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_rep_transport method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/rep_list.html">Replication and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/runrec_class.html b/libdb/docs/api_java/runrec_class.html
new file mode 100644
index 0000000..ee2d827
--- /dev/null
+++ b/libdb/docs/api_java/runrec_class.html
@@ -0,0 +1,42 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbRunRecoveryException</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbRunRecoveryException</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class DbRunRecoveryException extends DbException { ... }
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the DbRunRecoveryException class and
+how it is used by the various Berkeley DB classes.
+<p>Errors can occur in the Berkeley DB library where the only solution is to shut
+down the application and run recovery (for example, if Berkeley DB is unable
+to allocate heap memory). When a fatal error occurs in Berkeley DB, methods
+will throw a DbRunRecoveryException, at which point all
+subsequent database calls will also fail in the same way. When this
+occurs, recovery should be performed.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/txn_abort.html b/libdb/docs/api_java/txn_abort.html
new file mode 100644
index 0000000..2fdabb5
--- /dev/null
+++ b/libdb/docs/api_java/txn_abort.html
@@ -0,0 +1,59 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn.abort</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbTxn.abort</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void abort()
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn.abort method causes an abnormal termination of the transaction.
+The log is played backward, and any necessary recovery operations are
+initiated through the <b>recover</b> function specified to
+<a href="../api_java/env_open.html">DbEnv.open</a>. After the log processing is completed, all locks
+held by the transaction are released. As is the case for
+<a href="../api_java/txn_commit.html">DbTxn.commit</a>, applications that require strict two-phase locking
+should not explicitly release any locks.
+<p>In the case of nested transactions, aborting a parent transaction causes
+all children (unresolved or not) of the parent transaction to be aborted.
+<p>All cursors opened within the transaction must be closed before the
+transaction is aborted.
+<p>After DbTxn.abort has been called, regardless of its return, the
+<a href="../api_java/txn_class.html">DbTxn</a> handle may not be accessed again.
+<p>The DbTxn.abort method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbTxn.abort method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbTxn.abort method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/txn_begin.html b/libdb/docs/api_java/txn_begin.html
new file mode 100644
index 0000000..fda6c43
--- /dev/null
+++ b/libdb/docs/api_java/txn_begin.html
@@ -0,0 +1,98 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.txn_begin</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.txn_begin</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public DbTxn txn_begin(DbTxn parent, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.txn_begin method creates a new transaction in the environment
+and returns a <a href="../api_java/txn_class.html">DbTxn</a> that uniquely identifies it.
+Calling the <a href="../api_java/txn_abort.html">DbTxn.abort</a>,
+<a href="../api_java/txn_commit.html">DbTxn.commit</a> or <a href="../api_java/txn_discard.html">DbTxn.discard</a> methods will discard the returned
+handle.
+<p>If the <b>parent</b> argument is non-null, the new transaction will
+be a nested transaction, with the transaction indicated by
+<b>parent</b> as its parent. Transactions may be
+nested to any level.
+In the presence of distributed transactions and two-phase commit,
+only the parental transaction, that is a transaction without
+a <b>parent</b> specified, should be passed as an argument to
+<a href="../api_java/txn_prepare.html">DbTxn.prepare</a>.
+<p>The <b>flags</b> value must be set to 0 or by bitwise inclusively <b>OR</b>'ing together one or
+more of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_DIRTY_READ">Db.DB_DIRTY_READ</a><dd>All read operations performed by the transaction will read modified but
+not yet committed data. Silently ignored if the <a href="../api_java/db_open.html#DB_DIRTY_READ">Db.DB_DIRTY_READ</a>
+flag was not specified when the underlying database was opened.
+<p><dt><a name="Db.DB_TXN_NOSYNC">Db.DB_TXN_NOSYNC</a><dd>Do not synchronously flush the log when this transaction commits or
+prepares. This means the transaction will exhibit the ACI (atomicity,
+consistency, and isolation) properties, but not D (durability); that is,
+database integrity will be maintained but it is possible that this
+transaction may be undone during recovery.
+<p>This behavior may be set for a Berkeley DB environment using the
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a> interface. Any value specified in this
+interface overrides that setting.
+<p><dt><a name="Db.DB_TXN_NOWAIT">Db.DB_TXN_NOWAIT</a><dd>If a lock is unavailable for any Berkeley DB operation performed in the context
+of this transaction,
+throw a <a href="../api_java/lockng_class.html">DbLockNotGrantedException</a>
+immediately instead of blocking on the lock.
+<p><dt><a name="Db.DB_TXN_SYNC">Db.DB_TXN_SYNC</a><dd>Synchronously flush the log when this transaction commits or prepares.
+This means the transaction will exhibit all of the ACID (atomicity,
+consistency, isolation, and durability) properties.
+<p>This behavior is the default for Berkeley DB environments unless the
+<a href="../api_java/env_set_flags.html#DB_TXN_NOSYNC">Db.DB_TXN_NOSYNC</a> flag was specified to the <a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>
+interface. Any value specified in this interface overrides that
+setting.
+</dl>
+<p><b>Note: A transaction may not span threads; that is, each transaction must
+begin and end in the same thread, and each transaction may be used only
+by a single thread.</b>
+<p><b>Note: Cursors may not span transactions; that is, each cursor must be
+opened and closed within a single transaction.</b>
+<p><b>Note: A parent transaction may not issue any Berkeley DB operations -- except for
+DbEnv.txn_begin, <a href="../api_java/txn_abort.html">DbTxn.abort</a> and <a href="../api_java/txn_commit.html">DbTxn.commit</a> -- while it has
+active child transactions (child transactions that have not yet been
+committed or aborted).</b>
+<p>The DbEnv.txn_begin method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.txn_begin method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>ENOMEM<dd>The maximum number of concurrent transactions has been reached.
+</dl>
+<p>The DbEnv.txn_begin method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.txn_begin method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/txn_checkpoint.html b/libdb/docs/api_java/txn_checkpoint.html
new file mode 100644
index 0000000..b19fc18
--- /dev/null
+++ b/libdb/docs/api_java/txn_checkpoint.html
@@ -0,0 +1,64 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.txn_checkpoint</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.txn_checkpoint</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+int
+public void txn_checkpoint(int kbyte, int min, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>If there has been database environment activity since the last checkpoint,
+the DbEnv.txn_checkpoint method flushes the underlying memory pool, writes a
+checkpoint record to the log, and then flushes the log.
+<p>If <b>kbyte</b> or <b>min</b> is non-zero, the checkpoint is done only
+if more than <b>min</b> minutes have passed since the last checkpoint
+or if more than <b>kbyte</b> kilobytes of log data have been written
+since the last checkpoint.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_FORCE">Db.DB_FORCE</a><dd>Force a checkpoint record, even if there has been no activity since the
+last checkpoint.
+</dl>
+<p>The DbEnv.txn_checkpoint method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.txn_checkpoint method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv.txn_checkpoint method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.txn_checkpoint method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/txn_class.html b/libdb/docs/api_java/txn_class.html
new file mode 100644
index 0000000..a52b8bf
--- /dev/null
+++ b/libdb/docs/api_java/txn_class.html
@@ -0,0 +1,51 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbTxn</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class DbTxn extends Object { ... }
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn object is the handle for a transaction. Methods off
+the DbTxn handle are used to configure, abort and commit the
+transaction. DbTxn handles are provided to <a href="../api_java/db_class.html">Db</a> methods
+in order to transactionally protect those database operations.
+<p>DbTxn handles are not free-threaded; transactions handles may
+be used by multiple threads, but only serially, that is, the application
+must serialize access to the DbTxn handle. Once the
+<a href="../api_java/txn_abort.html">DbTxn.abort</a> or <a href="../api_java/txn_commit.html">DbTxn.commit</a> methods are called, the handle may
+not be accessed again, regardless of the method's return. In addition,
+parent transactions may not issue any Berkeley DB operations while they have
+active child transactions (child transactions that have not yet been
+committed or aborted) except for <a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>, <a href="../api_java/txn_abort.html">DbTxn.abort</a>
+and <a href="../api_java/txn_commit.html">DbTxn.commit</a>.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, DbTxn
+<h1>See Also</h1>
+<a href="../api_java/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/txn_commit.html b/libdb/docs/api_java/txn_commit.html
new file mode 100644
index 0000000..09b0a52
--- /dev/null
+++ b/libdb/docs/api_java/txn_commit.html
@@ -0,0 +1,82 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn.commit</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbTxn.commit</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void commit(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn.commit method ends the transaction.
+<p>In the case of nested transactions, if the transaction is a parent
+transaction, committing the parent transaction causes all unresolved
+children of the parent to be committed. In the case of nested
+transactions, if the transaction is a child transaction, its locks are
+not released, but are acquired by its parent. Although the commit of the
+child transaction will succeed, the actual resolution of the child
+transaction is postponed until the parent transaction is committed or
+aborted; that is, if its parent transaction commits, it will be
+committed; and if its parent transaction aborts, it will be aborted.
+<p>The <b>flags</b> value must be set to 0 or
+one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_TXN_NOSYNC">Db.DB_TXN_NOSYNC</a><dd>Do not synchronously flush the log. This means the transaction will
+exhibit the ACI (atomicity, consistency, and isolation) properties, but
+not D (durability); that is, database integrity will be maintained, but
+it is possible that this transaction may be undone during recovery.
+<p>This behavior may be set for a Berkeley DB environment using the
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a> interface or for a single transaction using the
+<a href="../api_java/txn_begin.html">DbEnv.txn_begin</a> interface. Any value specified in this interface
+overrides both of those settings.
+<p><dt><a name="Db.DB_TXN_SYNC">Db.DB_TXN_SYNC</a><dd>Synchronously flush the log. This means the transaction will exhibit
+all of the ACID (atomicity, consistency, isolation, and durability)
+properties.
+<p>This behavior is the default for Berkeley DB environments unless the
+<a href="../api_java/env_set_flags.html#DB_TXN_NOSYNC">Db.DB_TXN_NOSYNC</a> flag was specified to the <a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>
+interface. This behavior may also be set for a single transaction using
+the <a href="../api_java/txn_begin.html">DbEnv.txn_begin</a> interface. Any value specified in this interface
+overrides both of those settings.
+</dl>
+<p>All cursors opened within the transaction must be closed before the
+transaction is committed.
+<p>After DbTxn.commit has been called, regardless of its return, the
+<a href="../api_java/txn_class.html">DbTxn</a> handle may not be accessed again. If DbTxn.commit
+encounters an error, the transaction and all child transactions of the
+transaction are aborted.
+<p>The DbTxn.commit method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbTxn.commit method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbTxn.commit method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/txn_discard.html b/libdb/docs/api_java/txn_discard.html
new file mode 100644
index 0000000..9ba8b53
--- /dev/null
+++ b/libdb/docs/api_java/txn_discard.html
@@ -0,0 +1,66 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn.discard</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbTxn.discard</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void discard(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn.discard method frees up all the per-process resources
+associated with the specified <a href="../api_java/txn_class.html">DbTxn</a> handle, neither committing
+nor aborting the transaction. This call may be used only after calls
+to <a href="../api_java/txn_recover.html">DbEnv.txn_recover</a> when there are multiple global transaction
+managers recovering transactions in a single Berkeley DB environment. Any
+transactions returned by <a href="../api_java/txn_recover.html">DbEnv.txn_recover</a> that are not handled by
+the current global transaction manager should be discarded using
+DbTxn.discard.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DbTxn.discard method returns EINVAL if the transaction handle does
+not refer to a transaction that was recovered into a prepared but not
+yet completed state.
+Otherwise, the DbTxn.discard method throws an exception that encapsulates a non-zero error value on
+failure.
+<p>After DbTxn.discard has been called, regardless of its return, the
+<a href="../api_java/txn_class.html">DbTxn</a> handle may not be accessed again.
+<h1>Errors</h1>
+<p>The DbTxn.discard method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The transaction handle does not refer to a transaction that was
+recovered into a prepared but not yet completed state.
+</dl>
+<p>The DbTxn.discard method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbTxn.discard method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/txn_id.html b/libdb/docs/api_java/txn_id.html
new file mode 100644
index 0000000..e97efed
--- /dev/null
+++ b/libdb/docs/api_java/txn_id.html
@@ -0,0 +1,43 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn.id</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbTxn.id</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int id()
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn.id method returns the unique transaction id associated with the
+specified transaction. Locking calls made on behalf of this transaction
+should use the value returned from DbTxn.id as the locker parameter
+to the <a href="../api_java/lock_get.html">DbEnv.lock_get</a> or <a href="../api_java/lock_vec.html">DbEnv.lock_vec</a> calls.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/txn_list.html b/libdb/docs/api_java/txn_list.html
new file mode 100644
index 0000000..aa8edcf
--- /dev/null
+++ b/libdb/docs/api_java/txn_list.html
@@ -0,0 +1,31 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB: Transaction Subsystem and Related Methods</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB: Transaction Subsystem and Related Methods</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Transaction Subsystem and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../api_java/env_set_tx_max.html">DbEnv.set_tx_max</a></td><td>Set maximum number of transactions</td></tr>
+<tr><td><a href="../api_java/env_set_tx_timestamp.html">DbEnv.set_tx_timestamp</a></td><td>Set recovery timestamp</td></tr>
+<tr><td><a href="../api_java/txn_checkpoint.html">DbEnv.txn_checkpoint</a></td><td>Checkpoint the transaction subsystem</td></tr>
+<tr><td><a href="../api_java/txn_recover.html">DbEnv.txn_recover</a></td><td>Distributed transaction recovery</td></tr>
+<tr><td><a href="../api_java/txn_stat.html">DbEnv.txn_stat</a></td><td>Return transaction subsystem statistics</td></tr>
+<tr><td><a href="../api_java/txn_begin.html">DbEnv.txn_begin</a></td><td>Begin a transaction</td></tr>
+<tr><td><a href="../api_java/txn_abort.html">DbTxn.abort</a></td><td>Abort a transaction</td></tr>
+<tr><td><a href="../api_java/txn_commit.html">DbTxn.commit</a></td><td>Commit a transaction</td></tr>
+<tr><td><a href="../api_java/txn_discard.html">DbTxn.discard</a></td><td>Discard a prepared but not resolved transaction handle</td></tr>
+<tr><td><a href="../api_java/txn_id.html">DbTxn.id</a></td><td>Return a transaction's ID</td></tr>
+<tr><td><a href="../api_java/txn_prepare.html">DbTxn.prepare</a></td><td>Prepare a transaction for commit</td></tr>
+<tr><td><a href="../api_java/txn_set_timeout.html">DbTxn.set_timeout</a></td><td>Set transaction timeout</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/txn_prepare.html b/libdb/docs/api_java/txn_prepare.html
new file mode 100644
index 0000000..629d3b2
--- /dev/null
+++ b/libdb/docs/api_java/txn_prepare.html
@@ -0,0 +1,68 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn.prepare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbTxn.prepare</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void prepare(byte[] gid)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<a name="3"><!--meow--></a>
+<p>The DbTxn.prepare method initiates the beginning of a two-phase commit.
+<p>In a distributed transaction environment, Berkeley DB can be used as a local
+transaction manager. In this case, the distributed transaction manager
+must send <i>prepare</i> messages to each local manager. The local
+manager must then issue a DbTxn.prepare and await its successful
+return before responding to the distributed transaction manager. Only
+after the distributed transaction manager receives successful responses
+from all of its <i>prepare</i> messages should it issue any
+<i>commit</i> messages.
+<p>In the case of nested transactions, preparing the parent
+causes all unresolved children of the parent transaction to be committed.
+Child transactions should never be explicitly prepared.
+Their fate will be resolved along with their parent's during
+global recovery.
+<p>The <b>gid</b> parameter specifies the global transaction ID by which this
+transaction will be known. This global transaction ID will be returned
+in calls to <a href="../api_java/txn_recover.html">DbEnv.txn_recover</a>, telling the application which global
+transactions must be resolved.
+The <b>gid</b> parameter must be sized at least Db.DB_XIDDATASIZE
+(currently 128) bytes; only the first Db.DB_XIDDATASIZE bytes
+are used.
+<p>The DbTxn.prepare method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbTxn.prepare method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbTxn.prepare method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/txn_recover.html b/libdb/docs/api_java/txn_recover.html
new file mode 100644
index 0000000..309a320
--- /dev/null
+++ b/libdb/docs/api_java/txn_recover.html
@@ -0,0 +1,76 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.txn_recover</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.txn_recover</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public DbPreplist[] txn_recover(int count, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.txn_recover interface returns a list of prepared but not
+yet resolved transactions. The DbEnv.txn_recover method should only be
+called after the environment has been recovered. Because database
+environment state must be preserved between recovery and the application
+calling DbEnv.txn_recover, applications must either call
+DbEnv.txn_recover using the same environment handle used when recovery
+is done, or the database environment must not be configured using the
+<a href="../api_java/env_open.html#DB_PRIVATE">Db.DB_PRIVATE</a> flag.
+<p>The DbEnv.txn_recover method returns a list of transactions that must be
+resolved by the application (committed, aborted or discarded). The
+return value is an array of objects of type DbPreplist; the following
+DbPreplist fields will be filled in:
+<p><dl compact>
+<p><dt>public DbTxn txn;<dd>The transaction handle for the transaction.
+<p><dt>public byte[] gid;<dd>The global transaction ID for the transaction. The global transaction
+ID is the one specified when the transaction was prepared. The
+application is responsible for ensuring uniqueness among global
+transaction IDs.
+</dl>
+<p>The application must call <a href="../api_java/txn_abort.html">DbTxn.abort</a>, <a href="../api_java/txn_commit.html">DbTxn.commit</a> or
+<a href="../api_java/txn_discard.html">DbTxn.discard</a> on each returned <a href="../api_java/txn_class.html">DbTxn</a> handle before
+starting any new operations.
+<p>The <b>count</b> parameter specifies the number of maximum size of the
+array that should be returned.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_FIRST">Db.DB_FIRST</a><dd>Begin returning a list of prepared, but not yet resolved transactions.
+<p><dt><a name="Db.DB_NEXT">Db.DB_NEXT</a><dd>Continue returning a list of prepared, but not yet resolved transactions,
+starting where the last call to DbEnv.txn_recover left off.
+</dl>
+<p>The DbEnv.txn_recover method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.txn_recover method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.txn_recover method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/txn_set_timeout.html b/libdb/docs/api_java/txn_set_timeout.html
new file mode 100644
index 0000000..483e99c
--- /dev/null
+++ b/libdb/docs/api_java/txn_set_timeout.html
@@ -0,0 +1,76 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn.set_timeout</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbTxn.set_timeout</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_timeout(long timeout, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn.set_timeout method sets timeout values for locks or
+transactions for the specified transaction. The timeout value is
+currently specified as an unsigned 32-bit number of microseconds,
+limiting the maximum timeout to roughly 71 minutes.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_SET_LOCK_TIMEOUT">Db.DB_SET_LOCK_TIMEOUT</a><dd>Set the timeout value for locks in this transaction.
+<p><dt><a name="Db.DB_SET_TXN_TIMEOUT">Db.DB_SET_TXN_TIMEOUT</a><dd>Set the timeout value for this transaction.
+</dl>
+<p>Timeouts are checked whenever a thread of control blocks on a lock or
+when deadlock detection is performed. (In the case of
+Db.DB_SET_LOCK_TIMEOUT, the lock is one requested explicitly
+through the Lock subsystem interfaces. In the case of
+Db.DB_SET_TXN_TIMEOUT, the lock is one requested on behalf of a
+transaction. In either case, it may be a lock requested by the database
+access methods underlying the application.) As timeouts are only
+checked when the lock request first blocks or when deadlock detection
+is performed, the accuracy of the timeout depends on how often deadlock
+detection is performed.
+<p>Timeout values may be specified for the database environment as a whole.
+See <a href="../api_java/env_set_timeout.html">DbEnv.set_timeout</a> and for more information.
+<p>The DbTxn.set_timeout method configures operations performed on the underlying
+transaction, not only operations performed using the specified
+<a href="../api_java/txn_class.html">DbTxn</a> handle.
+<p>The DbTxn.set_timeout interface may be called at any time during the life of
+the application.
+<p>The DbTxn.set_timeout method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbTxn.set_timeout method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbTxn.set_timeout method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbTxn.set_timeout method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_java/txn_stat.html b/libdb/docs/api_java/txn_stat.html
new file mode 100644
index 0000000..0d6c789
--- /dev/null
+++ b/libdb/docs/api_java/txn_stat.html
@@ -0,0 +1,81 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.txn_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.txn_stat</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public DbTxnStat txn_stat(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.txn_stat method returns the transaction subsystem statistics.
+<p>The <b>flags</b> value must be set to 0 or
+the following value:
+<p><dl compact>
+<p><dt><a name="Db.DB_STAT_CLEAR">Db.DB_STAT_CLEAR</a><dd>Reset statistics after returning their values.
+</dl>
+<p>The DbEnv.txn_stat method creates a DbTxnStat object encapsulating the
+transaction region statistics. The following data fields are available
+from the DbTxnStat object:
+<p><dl compact>
+<dt>public <a href="../api_java/lsn_class.html">DbLsn</a> st_last_ckp;<dd>The LSN of the last checkpoint.
+<dt>public long st_time_ckp;<dd>The time the last completed checkpoint finished (as the number of seconds
+since the Epoch, returned by the IEEE/ANSI Std 1003.1 (POSIX) <b>time</b> interface).
+<dt>public int st_last_txnid;<dd>The last transaction ID allocated.
+<dt>public int st_maxtxns;<dd>The maximum number of active transactions possible.
+<dt>public int st_nactive;<dd>The number of transactions that are currently active.
+<dt>public int st_maxnactive;<dd>The maximum number of active transactions at any one time.
+<dt>public int st_nbegins;<dd>The number of transactions that have begun.
+<dt>public int st_naborts;<dd>The number of transactions that have aborted.
+<dt>public int st_ncommits;<dd>The number of transactions that have committed.
+<dt>public int st_nrestores;<dd>The number of transactions that have been restored.
+<dt>public int st_regsize;<dd>The size of the region.
+<dt>public int st_region_wait;<dd>The number of times that a thread of control was forced to wait before
+obtaining the region lock.
+<dt>public int st_region_nowait;<dd>The number of times that a thread of control was able to obtain
+the region lock without waiting.
+<dt>public Active st_txnarray[];<dd>The array of active transactions. Each element of the array is an object
+of type DbTxnStat.Active, a top level inner class, that has the following
+fields:
+<p><dl compact>
+<p><dt>public int txnid;<dd>The transaction ID of the transaction.
+<dt>public int parentid;<dd>The transaction ID of the parent transaction (or 0, if no parent).
+<dt>public <a href="../api_java/lsn_class.html">DbLsn</a> lsn;<dd>The current log sequence number when the transaction was begun.
+</dl>
+</dl>
+<p>The DbEnv.txn_stat method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.txn_stat method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.txn_stat method may fail and
+throw a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>,
+in which case all subsequent Berkeley DB calls will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/env_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/txn_list.html">Transaction Subsystem and Related Methods</a>
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/db_close.html b/libdb/docs/api_tcl/db_close.html
new file mode 100644
index 0000000..2cb542f
--- /dev/null
+++ b/libdb/docs/api_tcl/db_close.html
@@ -0,0 +1,60 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>db</i> <b>close</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db close
+ [-nosync]
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>close</b> command flushes any cached database information to
+disk, closes any open cursors, frees any allocated resources, and closes
+any underlying files. Because key/data pairs are cached in memory, failing
+to sync the file with the <i>db</i> <b>close</b> or <i>db</i> <b>sync</b> command may
+result in inconsistent or lost information.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-nosync<dd>Do not flush cached information to disk.
+<p>The -nosync flag is a dangerous option. It should only be set if the
+application is doing logging (with transactions) so that the database is
+recoverable after a system or application crash, or if the database is
+always generated from scratch after any system or application crash.
+<p>It is important to understand that flushing cached information to disk
+only minimizes the window of opportunity for corrupted data. Although
+unlikely, it is possible for database corruption to happen if a system or
+application crash occurs while writing data to the database. To ensure
+that database corruption never occurs, applications must either use
+transactions and logging with automatic recovery, use logging and
+application-specific recovery, or edit a copy of the database; and after
+all applications using the database have successfully called
+<i>db</i> <b>close</b>, atomically replace the original database with the
+updated copy.
+</dl>
+<p>After <i>db</i> <b>close</b> has been called, regardless of its return, the DB
+handle may not be accessed again.
+<p>The <i>db</i> <b>close</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/db_count.html b/libdb/docs/api_tcl/db_count.html
new file mode 100644
index 0000000..fa4a16c
--- /dev/null
+++ b/libdb/docs/api_tcl/db_count.html
@@ -0,0 +1,38 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db count</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>db</i> <b>count</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db count key
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>count</b> command returns a count of the number of duplicate
+data items for the key given. If the key does not exist, a value of 0
+is returned. If there are no duplicates, or if the database does not
+support duplicates, but a key/data pair exists, a value of 1 is
+returned. If an error occurs, a Berkeley DB error message is returned or a
+Tcl error is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/db_cursor.html b/libdb/docs/api_tcl/db_cursor.html
new file mode 100644
index 0000000..a6ff7fb
--- /dev/null
+++ b/libdb/docs/api_tcl/db_cursor.html
@@ -0,0 +1,46 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>db</i> <b>cursor</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db cursor
+ [-txn txnid]
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>cursor</b> command creates a database cursor. The returned
+cursor handle is bound to a Tcl command of the form <b>dbN.cX</b>,
+where X is an integer starting at 0 (for example, db0.c0 and db0.c1).
+It is through this Tcl command that the script accesses the cursor
+methods.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-txn txnid<dd>If the operation is to be
+transaction-protected,
+the <b>txnid</b> parameter is a transaction handle returned from
+<i>env</i> <b>txn</b>.
+</dl>
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/db_del.html b/libdb/docs/api_tcl/db_del.html
new file mode 100644
index 0000000..7dccd9d
--- /dev/null
+++ b/libdb/docs/api_tcl/db_del.html
@@ -0,0 +1,55 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db del</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>db</i> <b>del</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db del
+ [-auto_commit]
+ [-glob]
+ [-txn txnid]
+ key
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>del</b> command removes key/data pairs from the database.
+<p>In the presence of duplicate key values, all records associated with the
+designated key will be discarded.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-auto_commit<dd>Enclose the call within a transaction. If the call succeeds, changes
+made by the operation will be recoverable. If the call fails, the
+operation will have made no changes.
+<p><dt>-glob<dd>The specified key is a wildcard pattern, and all keys matching that
+pattern are discarded from the database. The pattern is a simple
+wildcard, any characters after the wildcard character are ignored.
+This option only works on databases using the Btree access method.
+<p><dt>-txn txnid<dd>If the operation is to be
+transaction-protected (other than by specifying the -auto_commit flag),
+the <b>txnid</b> parameter is a transaction handle returned from
+<i>env</i> <b>txn</b>.
+</dl>
+<p>The <i>db</i> <b>del</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/db_get.html b/libdb/docs/api_tcl/db_get.html
new file mode 100644
index 0000000..9bdec1c
--- /dev/null
+++ b/libdb/docs/api_tcl/db_get.html
@@ -0,0 +1,102 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>db</i> <b>get</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db get
+ [-consume]
+ [-consume_wait]
+ [-glob]
+ [-partial {doff dlen}]
+ [-recno]
+ [-rmw]
+ [-txn txnid]
+ key
+db get
+ -get_both
+ [-partial {doff dlen}]
+ [-rmw]
+ [-txn txnid]
+ key data
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>get</b> command returns key/data pairs from the database.
+<p>In the presence of duplicate key values, <i>db</i> <b>get</b> will return all
+duplicate items. Duplicates are sorted by insert order except where this
+order has been overridden by cursor operations.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-consume<dd>Return the record number and data from the available record closest to
+the head of the queue, and delete the record. The cursor will be
+positioned on the deleted record. A record is available if it is not
+deleted and is not currently locked. The underlying database must be
+of type Queue for <b>-consume</b> to be specified.
+<p><dt>-consume_wait<dd>The same as the <b>-consume</b> flag except that if the Queue database
+is empty, the thread of control will wait until there is data in the
+queue before returning. The underlying database must be of type Queue
+for <b>-consume_wait</b> to be specified.
+<p><dt>-get_both key data<dd>Retrieve the key/data pair only if both the key and data match the
+arguments.
+<p><dt>-glob<dd>Return all keys matching the given key, where the key is a simple
+wildcard pattern. Where it is used, it replaces the use of the key with
+the given pattern of a set of keys. Any characters after the wildcard
+character are ignored. For example, in a database of last names, the
+command "db0 get Jones" will return all occurrences of "Jones" in the
+database, and the command "db0 get -glob Jo*" will return both "Jones"
+and "Johnson" from the database. The command "db0 get -glob *" will
+return all of the key/data pairs in the database.
+This option only works on databases using the Btree access method.
+<p><dt>-partial {doff dlen}<dd>The <b>dlen</b> bytes starting <b>doff</b> bytes from the beginning
+of the retrieved data record are returned as if they comprised the
+entire record. If any or all of the specified bytes do not exist in the
+record, the command is successful and any existing bytes are returned.
+<p><dt>-recno<dd>Retrieve the specified numbered key/data pair from a database. For
+<b>-recno</b> to be specified, the specified key must be a record
+number; and the underlying database must be of type Recno or Queue, or
+of type Btree that was created with the <b>-recnum</b> option.
+<p><dt>-rmw<dd>Acquire write locks instead of read locks when doing the retrieval.
+Setting this flag may decrease the likelihood of deadlock during a
+read-modify-write cycle by immediately acquiring the write lock during
+the read part of the cycle so that another thread of control acquiring a
+read lock for the same item, in its own read-modify-write cycle, will not
+result in deadlock.
+<p>Because the <i>db</i> <b>get</b> command will not hold locks across Berkeley DB
+interface calls in nontransactional environments, the <b>-rmw</b>
+argument to the <i>db</i> <b>get</b> call is only meaningful in the presence
+of transactions.
+<p><dt>-txn txnid<dd>If the operation is to be
+transaction-protected,
+the <b>txnid</b> parameter is a transaction handle returned from
+<i>env</i> <b>txn</b>.
+</dl>
+<p>If the underlying database is a Queue or Recno database, the given key
+will be interpreted by Tcl as an integer. For all other database types,
+the key is interpreted by Tcl as a byte array, unless indicated by a
+given option.
+<p>A list of key/data pairs is returned. In the error case that no matching
+key exists, an empty list is returned. In all other cases, a Tcl error
+is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/db_get_join.html b/libdb/docs/api_tcl/db_get_join.html
new file mode 100644
index 0000000..17a52d7
--- /dev/null
+++ b/libdb/docs/api_tcl/db_get_join.html
@@ -0,0 +1,48 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db get_join</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>db</i> <b>get_join</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db get_join
+ [-txn txnid]
+ {<i>db</i> key}
+ {<i>db</i> key}
+ ...
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>get_join</b> command performs the cursor operations required to
+join the specified keys and returns a list of joined {key data} pairs.
+See <a href="../ref/am/join.html">Equality join</a> for more information on
+the underlying requirements for joining.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-txn txnid<dd>If the operation is to be
+transaction-protected,
+the <b>txnid</b> parameter is a transaction handle returned from
+<i>env</i> <b>txn</b>.
+</dl>
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/db_get_type.html b/libdb/docs/api_tcl/db_get_type.html
new file mode 100644
index 0000000..1a6943d
--- /dev/null
+++ b/libdb/docs/api_tcl/db_get_type.html
@@ -0,0 +1,35 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db get_type</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>db</i> <b>get_type</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db get_type
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>get_type</b> command returns the underlying database type,
+returning one of "btree", "hash", "queue" or "recno".
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/db_is_byteswapped.html b/libdb/docs/api_tcl/db_is_byteswapped.html
new file mode 100644
index 0000000..894237b
--- /dev/null
+++ b/libdb/docs/api_tcl/db_is_byteswapped.html
@@ -0,0 +1,38 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db is_byteswapped</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>db</i> <b>is_byteswapped</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db is_byteswapped
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>is_byteswapped</b> command returns 0 if the underlying database
+files were created on an architecture of the same byte order as the
+current one, and 1 if they were not (that is, big-endian on a little-endian
+machine, or vice versa). This value may be used to determine if application
+data needs to be adjusted for this architecture or not.
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/db_join.html b/libdb/docs/api_tcl/db_join.html
new file mode 100644
index 0000000..4b43c24
--- /dev/null
+++ b/libdb/docs/api_tcl/db_join.html
@@ -0,0 +1,49 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db join</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>db</i> <b>join</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db join
+ <i>db.cX</i>
+ <i>db.cY</i>
+ <i>db.cZ</i>
+ ...
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>join</b> command joins the specified cursors and returns a
+cursor handle that can be used to iterate through the joined {key data}
+pairs. The returned cursor handle is bound to a Tcl command of the form
+<b>dbN.cX</b>, where X is an integer starting at 0 (for example,
+db0.c0 and db0.c1). It is through this Tcl command that the script
+accesses the cursor methods.
+<p>The returned join cursor has limited cursor functionality, and only the
+<i>dbc</i> <b>get</b> and <i>dbc</i> <b>close</b> commands will succeed.
+<p>See <a href="../ref/am/join.html">Equality join</a> for more information on
+the underlying requirements for joining.
+<p>In a transaction-protected environment, all the cursors listed must have
+been created within the same transaction.
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/db_open.html b/libdb/docs/api_tcl/db_open.html
new file mode 100644
index 0000000..a32b9cb
--- /dev/null
+++ b/libdb/docs/api_tcl/db_open.html
@@ -0,0 +1,301 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: berkdb open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><b>berkdb open</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>berkdb open
+ [-auto_commit]
+ [-btree | -hash | -recno | -queue | -unknown]
+ [-cachesize {gbytes bytes ncache}]
+ [-create]
+ [-delim delim]
+ [-dup]
+ [-dupsort]
+ [-env env]
+ [-errfile filename]
+ [-excl]
+ [-extent size]
+ [-ffactor density]
+ [-len len]
+ [-mode mode]
+ [-nelem size]
+ [-pad pad]
+ [-pagesize pagesize]
+ [-rdonly]
+ [-recnum]
+ [-renumber]
+ [-snapshot]
+ [-source file]
+ [-truncate]
+ [-txn txnid]
+ [--]
+ [file [database]]
+</pre></h3>
+<h1>Description</h1>
+<p>The <b>berkdb open</b> command opens and optionally creates a database.
+The returned database handle is bound to a Tcl command of the form
+<b>dbN</b>, where N is an integer starting at 0 (for example, db0 and
+db1). It is through this Tcl command that the script accesses the
+database methods.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-auto_commit<dd>Enclose the call within a transaction. If the call succeeds, changes
+made by the operation will be recoverable. If the call fails, the
+operation will have made no changes.
+<p><dt>-btree<dd>Open/create a database of type Btree. The Btree format
+is a representation of a sorted, balanced tree structure.
+<p><dt>-hash<dd>Open/create a database of type Hash. The Hash format is
+an extensible, dynamic hashing scheme.
+<p><dt>-queue<dd>Open/create a database of type Queue. The Queue format
+supports fast access to fixed-length records accessed by sequentially or
+logical record number.
+<p><dt>-recno<dd>Open/create a database of type Recno. The Recno format
+supports fixed- or variable-length records, accessed sequentially or by
+logical record number, and optionally retrieved from a flat text file.
+<p><dt>-unknown<dd>The database is of an unknown type, and must already exist.
+<p><dt>-cachesize {gbytes bytes ncache}<dd>Set the size of the database's shared memory buffer pool (that is, the
+cache), to <b>gbytes</b> gigabytes plus <b>bytes</b>. The cache
+should be the size of the normal working data set of the application,
+with some small amount of additional memory for unusual situations.
+(Note: The working set is not the same as the number of simultaneously
+referenced pages, and should be quite a bit larger!)
+<p>The default cache size is 256KB, and may not be specified as less than
+20KB. Any cache size less than 500MB is automatically increased by 25%
+to account for buffer pool overhead; cache sizes larger than 500MB are
+used as specified.
+<p>It is possible to specify caches to Berkeley DB that are large enough so that
+they cannot be allocated contiguously on some architectures; for example,
+some releases of Solaris limit the amount of memory that may be
+allocated contiguously by a process. If <b>ncache</b> is 0 or 1, the
+cache will be allocated contiguously in memory. If it is greater than
+1, the cache will be broken up into <b>ncache</b> equally sized
+separate pieces of memory.
+<p>For information on tuning the Berkeley DB cache size, see
+<a href="../ref/am_conf/cachesize.html">Selecting a cache size</a>.
+<p>Because databases opened within Berkeley DB environments use the cache
+specified to the environment, it is an error to attempt to set a cache
+in a database created within an environment.
+<p><dt>-create<dd>Create any underlying files, as necessary. If the files do not already
+exist and the <b>-create</b> argument is not specified, the call will
+fail.
+<p><dt>-delim delim<dd>Set the delimiting byte used to mark the end of a record in the backing
+source file for the Recno access method.
+<p>This byte is used for variable length records if the <b>-source</b>
+argument file is specified. If the <b>-source</b> argument file is
+specified and no delimiting byte was specified, &lt;newline&gt;
+characters (that is, ASCII 0x0a) are interpreted as end-of-record
+markers.
+<p><dt>-dup<dd>Permit duplicate data items in the tree, that is, insertion when the
+key of the key/data pair being inserted already exists in the tree will
+be successful. The ordering of duplicates in the tree is determined by
+the order of insertion unless the ordering is otherwise specified by
+use of a cursor or a duplicate comparison function.
+<p>error to specify both <b>-dup</b> and <b>-recnum</b>.
+<p><dt>-dupsort<dd>Sort duplicates within a set of data items. A default lexical
+comparison will be used. Specifying that duplicates are to be sorted
+changes the behavior of the <i>db</i> <b>put</b> operation as well as the
+<i>dbc</i> <b>put</b> operation when the <b>-keyfirst</b>, <b>-keylast</b>
+and <b>-current</b> options are specified.
+<p><dt>-env env<dd>If no <b>-env</b> argument is given, the database is standalone; that
+is, it is not part of any Berkeley DB environment.
+<p>If a <b>-env</b> argument is given, the database is created within the
+specified Berkeley DB environment. The database access methods automatically
+make calls to the other subsystems in Berkeley DB, based on the enclosing
+environment. For example, if the environment has been configured to use
+locking, the access methods will automatically acquire the correct locks
+when reading and writing pages of the database.
+<p><dt>-errfile filename<dd>
+<p>When an error occurs in the Berkeley DB library, a Berkeley DB error or an error
+return value is returned by the function. In some cases, however, the
+errno value may be insufficient to completely describe the cause of the
+error especially during initial application debugging.
+<p>The <b>-errfile</b> argument is used to enhance the mechanism for
+reporting error messages to the application by specifying a file to be
+used for displaying additional Berkeley DB error messages. In some cases, when
+an error occurs, Berkeley DB will output an additional error message to the
+specified file reference.
+<p>The error message will consist of a Tcl command name and a colon (":"),
+an error string, and a trailing &lt;newline&gt; character. If
+the database was opened in an environment, the Tcl command name will be
+the environment name (for example, env0), otherwise it will be the
+database command name (for example, db0).
+<p>This error-logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p>For database handles opened inside of Berkeley DB environments, specifying the
+<b>-errfile</b> argument affects the entire environment and is equivalent
+to specifying the same argument to the <b>berkdb env</b> command.
+<p><dt>-excl<dd>Return an error if the database already exists.
+<p><dt>-extent size<dd>Set the size of the extents of the Queue database; the size is specified
+as the number of pages in an extent. Each extent is created as a
+separate physical file. If no extent size is set, the default behavior
+is to create only a single underlying database file.
+<p>For information on tuning the extent size, see
+<a href="../ref/am_conf/extentsize.html">Selecting a extent size</a>.
+<p><dt>-ffactor density<dd>Set the desired density within the hash table.
+<p>The density is an approximation of the number of keys allowed to
+accumulate in any one bucket
+<p><dt>-len len<dd>For the Queue access method, specify that the records are of length
+<b>len</b>.
+<p>For the Recno access method, specify that the records are fixed-length,
+not byte-delimited, and are of length <b>len</b>.
+<p>Any records added to the database that are less than <b>len</b> bytes
+long are automatically padded (see the <b>-pad</b> argument for more
+information).
+<p>Any attempt to insert records into the database that are greater than
+<b>len</b> bytes long will cause the call to fail immediately and return
+an error.
+<p><dt>-mode mode<dd>
+<p>On UNIX systems, or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by the access methods
+are created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and
+modified by the process' umask value at the time of creation (see
+<b>umask</b>(2)). The group ownership of created files is based on
+the system and directory defaults, and is not further specified by Berkeley DB.
+If <b>mode</b> is 0, files are created readable and writable by both
+owner and group. On Windows systems, the mode argument is ignored.
+<p><dt>-nelem size<dd>Set an estimate of the final size of the hash table.
+<p>If not set or set too low, hash tables will still expand gracefully as
+keys are entered, although a slight performance degradation may be
+noticed.
+<p><dt>-pad pad<dd>Set the padding character for short, fixed-length records for the Queue
+and Recno access methods.
+<p>If no pad character is specified, &lt;space&gt; characters (that is,
+ASCII 0x20) are used for padding.
+<p><dt>-pagesize pagesize<dd>Set the size of the pages used to hold items in the database, in bytes.
+The minimum page size is 512 bytes, and the maximum page size is 64K
+bytes. If the page size is not explicitly set, one is selected based
+on the underlying filesystem I/O block size. The automatically selected
+size has a lower limit of 512 bytes and an upper limit of 16K bytes.
+<p>For information on tuning the Berkeley DB page size, see
+<a href="../ref/am_conf/pagesize.html">Selecting a page size</a>.
+<p><dt>-rdonly<dd>Open the database for reading only. Any attempt to modify items in the
+database will fail, regardless of the actual permissions of any
+underlying files.
+<p><dt>-recnum<dd>Support retrieval from the Btree using record numbers.
+<p>Logical record numbers in Btree databases are mutable in the face of
+record insertion or deletion. See the <b>-renumber</b> argument for
+further discussion.
+<p>Maintaining record counts within a Btree introduces a serious point of
+contention, namely the page locations where the record counts are stored. In
+addition, the entire tree must be locked during both insertions and
+deletions, effectively single-threading the tree for those operations.
+Specifying <b>-recnum</b> can result in serious performance degradation
+for some applications and data sets.
+<p>It is an error to specify both <b>-dup</b> and <b>-recnum</b>.
+<p><dt>-renumber<dd>Specifying the <b>-renumber</b> argument causes the logical record
+numbers to be mutable, and change as records are added to and deleted from
+the database. For example, the deletion of record number 4 causes records
+numbered 5 and greater to be renumbered downward by one. If a cursor was
+positioned to record number 4 before the deletion, it will refer to the
+new record number 4, if any such record exists, after the deletion. If a
+cursor was positioned after record number 4 before the deletion, it will
+be shifted downward one logical record, continuing to refer to the same
+record as it did before.
+<p>Using the <i>db</i> <b>put</b> or <i>dbc</i> <b>put</b> interfaces to create new records will
+cause the creation of multiple records if the record number is more than one
+greater than the largest record currently in the database. For example,
+creating record 28 when record 25 was previously the last record in the
+database, will create records 26 and 27 as well as 28.
+<p>If a created record is not at the end of the database, all records following
+the new record will be automatically renumbered upward by one. For example,
+the creation of a new record numbered 8 causes records numbered 8 and
+greater to be renumbered upward by one. If a cursor was positioned to record
+number 8 or greater before the insertion, it will be shifted upward one
+logical record, continuing to refer to the same record as it did before.
+<p>For these reasons, concurrent access to a Recno database with the
+<b>-renumber</b> flag specified may be largely meaningless, although it
+is supported.
+<p><dt>-snapshot<dd>This argument specifies that any specified <b>-source</b> file be read
+in its entirety when the database is opened. If this argument is not
+specified, the <b>-source</b> file may be read lazily.
+<p><dt>-source file<dd>Set the underlying source file for the Recno access method. The purpose
+of the <b>-source</b> file is to provide fast access and modification
+to databases that are normally stored as flat text files.
+<p>If the <b>-source</b> argument is give, it specifies an underlying flat
+text database file that is read to initialize a transient record number
+index. In the case of variable length records, the records are separated
+as specified by <b>-delim</b>. For example, standard UNIX byte stream
+files can be interpreted as a sequence of variable length records
+separated by &lt;newline&gt; characters.
+<p>In addition, when cached data would normally be written back to the
+underlying database file (for example, when the <i>db</i> <b>close</b> or
+<i>db</i> <b>sync</b> commands are called), the in-memory copy of the database
+will be written back to the <b>-source</b> file.
+<p>By default, the backing source file is read lazily, that is, records
+are not read from the file until they are requested by the application.
+<b>If multiple processes (not threads) are accessing a Recno database
+concurrently and either inserting or deleting records, the backing source
+file must be read in its entirety before more than a single process
+accesses the database, and only that process should specify the backing
+source argument as part of the <b>berkdb open</b> call. See the <b>-snapshot</b>
+argument for more information.</b>
+<p><b>Reading and writing the backing source file specified by <b>-source</b>
+cannot be transaction protected because it involves filesystem
+operations that are not part of the Berkeley DB transaction methodology.</b>
+For this reason, if a temporary database is used to hold the records,
+it is possible to lose the contents of the <b>-file</b> file, for
+example, if the system crashes at the right instant. If a file is used
+to hold the database, that is, a filename was specified as the
+<b>file</b> argument to <b>berkdb open</b>, normal database recovery on
+that file can be used to prevent information loss, although it is still
+possible that the contents of <b>-source</b> will be lost if the system
+crashes.
+<p>The <b>-source</b> file must already exist (but may be zero-length) when
+<b>berkdb open</b> is called.
+<p>It is not an error to specify a read-only <b>-source</b> file when
+creating a database, nor is it an error to modify the resulting database.
+However, any attempt to write the changes to the backing source file using
+either the <i>db</i> <b>close</b> or <i>db</i> <b>sync</b> commands will fail, of course.
+Specify the <b>-nosync</b> argument to the <i>db</i> <b>close</b> command will
+stop it from attempting to write the changes to the backing file; instead,
+they will be silently discarded.
+<p>For all of the previous reasons, the <b>-source</b> file is generally
+used to specify databases that are read-only for Berkeley DB applications,
+and that are either generated on the fly by software tools, or modified
+using a different mechanism such as a text editor.
+<p><dt>-truncate<dd>Physically truncate the underlying file, discarding all previous databases
+it might have held. Underlying filesystem primitives are used to
+implement this flag. For this reason, it is only applicable to the
+physical file and cannot be used to discard databases within a file.
+<p>The <b>-truncate</b> argument cannot be transaction-protected, and it is
+an error to specify it in a transaction-protected environment.
+<p><dt>-txn txnid<dd>If the operation is to be
+transaction-protected (other than by specifying the -auto_commit flag),
+the <b>txnid</b> parameter is a transaction handle returned from
+<i>env</i> <b>txn</b>.
+<p><dt>--<dd>Mark the end of the command arguments.
+<p><dt>file<dd>The name of a single physical file on disk that will be used to back the
+database.
+<p><dt>database<dd>The <b>database</b> argument allows applications to have multiple
+databases inside of a single physical file. This is useful when the
+databases are both numerous and reasonably small, in order to avoid
+creating a large number of underlying files. It is an error to attempt
+to open a second database file that was not initially created using a
+<b>database</b> name.
+</dl>
+<p>The <b>berkdb open</b> command returns a database handle on success.
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/db_put.html b/libdb/docs/api_tcl/db_put.html
new file mode 100644
index 0000000..b295046
--- /dev/null
+++ b/libdb/docs/api_tcl/db_put.html
@@ -0,0 +1,82 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>db</i> <b>put</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db put
+ -append
+ [-auto_commit]
+ [-partial {doff dlen}]
+ [-txn txnid]
+ data
+db put
+ [-auto_commit]
+ [-nooverwrite]
+ [-partial {doff dlen}]
+ [-txn txnid]
+ key data
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>put</b> command stores the specified key/data pair into the
+database.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-auto_commit<dd>Enclose the call within a transaction. If the call succeeds, changes
+made by the operation will be recoverable. If the call fails, the
+operation will have made no changes.
+<p><dt>-append<dd>Append the data item to the end of the database. For the <b>-append</b>
+option to be specified, the underlying database must be a Queue or Recno
+database. The record number allocated to the record is returned on
+success.
+<p><dt>-nooverwrite<dd>Enter the new key/data pair only if the key does not already appear in
+the database.
+<p><dt>-partial {doff dlen}<dd>
+<p>The <b>dlen</b> bytes starting <b>doff</b> bytes from the beginning
+of the specified key's data record are replaced by the data specified
+by the data and size structure elements. If <b>dlen</b> is smaller
+than the length of the supplied data, the record will grow; if
+<b>dlen</b> is larger than the length of the supplied data, the record
+will shrink. If the specified bytes do not exist, the record will be
+extended using nul bytes as necessary, and the <i>db</i> <b>put</b> call will succeed.
+<p>It is an error to attempt a partial put using the <i>db</i> <b>put</b> command in a database
+that supports duplicate records. Partial puts in databases supporting
+duplicate records must be done using a <i>dbc</i> <b>put</b> command.
+<p>It is an error to attempt a partial put with differing <b>dlen</b> and
+supplied data length values in Queue or Recno databases with fixed-length
+records.
+<p><dt>-txn txnid<dd>If the operation is to be
+transaction-protected (other than by specifying the -auto_commit flag),
+the <b>txnid</b> parameter is a transaction handle returned from
+<i>env</i> <b>txn</b>.
+</dl>
+<p>The <i>db</i> <b>put</b> command returns either 0 or a record number for success
+(the record number is returned if the <b>-append</b> option was specified).
+If an error occurs, a Berkeley DB error message is returned or a Tcl error is
+thrown.
+<p>If the underlying database is a Queue or Recno database, then the given
+key will be interpreted by Tcl as an integer. For all other database
+types, the key is interpreted by Tcl as a byte array.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/db_remove.html b/libdb/docs/api_tcl/db_remove.html
new file mode 100644
index 0000000..82ce79b
--- /dev/null
+++ b/libdb/docs/api_tcl/db_remove.html
@@ -0,0 +1,50 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: berkdb dbremove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><b>berkdb dbremove</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>berkdb dbremove
+ [-env env]
+ [--]
+ file
+ [database]
+</pre></h3>
+<h1>Description</h1>
+<p>Remove the Berkeley DB database specified by the database name <b>file</b> and
+[database] name arguments. If no <b>database</b> is specified,
+the physical file represented by <b>file</b> is removed, incidentally
+removing all databases that it contained.
+<p>No reference count of database use is maintained by Berkeley DB. Applications
+should not remove databases that are currently in use.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-env env<dd>If a <b>-env</b> argument is given, the database in the specified Berkeley DB
+environment is removed.
+<p><dt>--<dd>Mark the end of the command arguments.
+</dl>
+<p>The <b>berkdb dbremove</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/db_rename.html b/libdb/docs/api_tcl/db_rename.html
new file mode 100644
index 0000000..d6a4bf9
--- /dev/null
+++ b/libdb/docs/api_tcl/db_rename.html
@@ -0,0 +1,51 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: berkdb dbrename</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><b>berkdb dbrename</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>berkdb rename
+ [-env env]
+ [--]
+ file
+ [database
+ newname]
+</pre></h3>
+<h1>Description</h1>
+<p>Renames the Berkeley DB database specified by the database name <b>file</b> and
+[database] name arguments to the new name given.
+If no <b>database</b> is specified,
+the physical file represented by <b>file</b> is renamed.
+<p>No reference count of database use is maintained by Berkeley DB. Applications
+should not rename databases that are currently in use.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-env env<dd>If a <b>-env</b> argument is given, the database in the specified Berkeley DB
+environment is renamed.
+<p><dt>--<dd>Mark the end of the command arguments.
+</dl>
+<p>The <b>berkdb dbrename</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/db_stat.html b/libdb/docs/api_tcl/db_stat.html
new file mode 100644
index 0000000..a7f84a0
--- /dev/null
+++ b/libdb/docs/api_tcl/db_stat.html
@@ -0,0 +1,41 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>db</i> <b>stat</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db stat
+ [-faststat]
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>stat</b> command returns a list of name/value pairs comprising
+the statistics of the database.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-faststat<dd>Return only that information which does not require a traversal
+of the database.
+</dl>
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/db_sync.html b/libdb/docs/api_tcl/db_sync.html
new file mode 100644
index 0000000..ed60e3b
--- /dev/null
+++ b/libdb/docs/api_tcl/db_sync.html
@@ -0,0 +1,37 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db sync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>db</i> <b>sync</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db sync
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>db</i> <b>sync</b> command function flushes any database cached
+information to disk.
+<p>See <i>db</i> <b>close</b> for a discussion of Berkeley DB and cached data.
+<p>The <i>db</i> <b>sync</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/db_truncate.html b/libdb/docs/api_tcl/db_truncate.html
new file mode 100644
index 0000000..35e21d6
--- /dev/null
+++ b/libdb/docs/api_tcl/db_truncate.html
@@ -0,0 +1,48 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db truncate</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>db</i> <b>truncate</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db truncate
+ [-auto_commit]
+ [-txn txnid]
+</pre></h3>
+<h1>Description</h1>
+<p>Empties the database, discarding all records it contains.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-auto_commit<dd>Enclose the call within a transaction. If the call succeeds, changes
+made by the operation will be recoverable. If the call fails, the
+operation will have made no changes.
+<p><dt>-txn txnid<dd>If the operation is to be
+transaction-protected (other than by specifying the -auto_commit flag),
+the <b>txnid</b> parameter is a transaction handle returned from
+<i>env</i> <b>txn</b>.
+</dl>
+<p>The <i>db</i> <b>truncate</b> command returns the number of records discarded
+from the database on success.
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/dbc_close.html b/libdb/docs/api_tcl/dbc_close.html
new file mode 100644
index 0000000..2604405
--- /dev/null
+++ b/libdb/docs/api_tcl/dbc_close.html
@@ -0,0 +1,37 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>dbc</i> <b>close</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>dbc close
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>dbc</i> <b>close</b> command discards the cursor.
+<p>After <i>dbc</i> <b>close</b> has been called, regardless of its return, the
+cursor handle may not be used again.
+<p>The <i>dbc</i> <b>close</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/dbc_del.html b/libdb/docs/api_tcl/dbc_del.html
new file mode 100644
index 0000000..0ef1347
--- /dev/null
+++ b/libdb/docs/api_tcl/dbc_del.html
@@ -0,0 +1,39 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db del</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>dbc</i> <b>del</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>dbc del
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>dbc</i> <b>del</b> command deletes the key/data pair to which the cursor
+currently refers.
+<p>The cursor position is unchanged after a delete, and subsequent calls to
+cursor commands expecting the cursor to refer to an existing key will
+fail.
+<p>The <i>dbc</i> <b>del</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/dbc_dup.html b/libdb/docs/api_tcl/dbc_dup.html
new file mode 100644
index 0000000..3cd86e9
--- /dev/null
+++ b/libdb/docs/api_tcl/dbc_dup.html
@@ -0,0 +1,47 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db dup</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>dbc</i> <b>dup</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>dbc dup
+ [-position]
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>dbc</i> <b>dup</b> command duplicates the cursor, creates a new cursor
+that uses the same transaction and locker ID as the original cursor. This
+is useful when an application is using locking and requires two or more
+cursors in the same thread of control.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-position<dd>The newly created cursor is initialized to refer to the same position
+in the database as the original cursor and hold the same locks. If the
+<b>-position</b> flag is not specified, the created cursor is
+uninitialized and will behave like a cursor newly created using the
+<i>db</i> <b>cursor</b> command.
+</dl>
+<p>The <i>dbc</i> <b>dup</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/dbc_get.html b/libdb/docs/api_tcl/dbc_get.html
new file mode 100644
index 0000000..f236bd3
--- /dev/null
+++ b/libdb/docs/api_tcl/dbc_get.html
@@ -0,0 +1,168 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>dbc</i> <b>get</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>dbc get
+ [-current]
+ [-first]
+ [-get_recno]
+ [-join_item]
+ [-last]
+ [-next]
+ [-nextdup]
+ [-nextnodup]
+ [-partial {offset length}]
+ [-prev]
+ [-prevnodup]
+ [-rmw]
+dbc get
+ [-partial {offset length}]
+ [-rmw]
+ [-set]
+ [-set_range]
+ [-set_recno]
+ key
+dbc get
+ -get_both
+ [-partial {offset length}]
+ [-rmw]
+ key data
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>dbc</i> <b>get</b> command returns a list of {key value} pairs, except in
+the case of the <b>-get_recno</b> and <b>-join_item</b> options. In
+the case of the <b>-get_recno</b> option, <i>dbc</i> <b>get</b> returns a list
+of the record number. In the case of the <b>-join_item</b> option,
+<i>dbc</i> <b>get</b> returns a list containing the joined key.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-current<dd>Return the key/data pair to which the cursor currently refers.
+<p>If the cursor key/data pair was deleted, <i>dbc</i> <b>get</b> will return an
+empty list.
+<p><dt>-first<dd>The cursor is set to refer to the first key/data pair of the database, and
+that pair is returned. In the presence of duplicate key values, the first
+data item in the set of duplicates is returned.
+<p>If the database is a Queue or Recno database, <i>dbc</i> <b>get</b> using the
+<b>-first</b> option will skip any keys that exist but were never
+explicitly created by the application, or were created and later deleted.
+<p>If the database is empty, <i>dbc</i> <b>get</b> will return an empty list.
+<p><dt>-last<dd>The cursor is set to refer to the last key/data pair of the database, and
+that pair is returned. In the presence of duplicate key values, the last
+data item in the set of duplicates is returned.
+<p>If the database is a Queue or Recno database, <i>dbc</i> <b>get</b> using the
+<b>-last</b> option will skip any keys that exist but were never
+explicitly created by the application, or were created and later deleted.
+<p>If the database is empty, <i>dbc</i> <b>get</b> will return an empty list.
+<p><dt>-next<dd>If the cursor is not yet initialized, the <b>-next</b> option is
+identical to <b>-first</b>.
+<p>Otherwise, the cursor is moved to the next key/data pair of the database,
+and that pair is returned. In the presence of duplicate key values, the
+value of the key may not change.
+<p>If the database is a Queue or Recno database, <i>dbc</i> <b>get</b> using the
+<b>-next</b> option will skip any keys that exist but were never
+explicitly created by the application, or were created and later deleted.
+<p>If the cursor is already on the last record in the database, <i>dbc</i> <b>get</b>
+will return an empty list.
+<p><dt>-nextdup<dd>If the next key/data pair of the database is a duplicate record for the
+current key/data pair, the cursor is moved to the next key/data pair of the
+database, and that pair is returned. Otherwise, <i>dbc</i> <b>get</b> will return
+an empty list.
+<p><dt>-nextnodup<dd>If the cursor is not yet initialized, the <b>-nextnodup</b> option is
+identical to <b>-first</b>.
+<p>Otherwise, the cursor is moved to the next non-duplicate
+key/data pair of the database, and that pair is returned.
+<p>If no non-duplicate key/data pairs occur after the cursor
+position in the database, <i>dbc</i> <b>get</b> will return an empty list.
+<p><dt>-prev<dd>If the cursor is not yet initialized, <b>-prev</b> is identical to
+<b>-last</b>.
+<p>Otherwise, the cursor is moved to the previous key/data pair of the
+database, and that pair is returned. In the presence of duplicate key
+values, the value of the key may not change.
+<p>If the database is a Queue or Recno database, <i>dbc</i> <b>get</b> using the
+<b>-prev</b> flag will skip any keys that exist but were never explicitly
+created by the application, or were created and later deleted.
+<p>If the cursor is already on the first record in the database,
+<i>dbc</i> <b>get</b> will return an empty list.
+<p><dt>-prevnodup<dd>If the cursor is not yet initialized, the <b>-prevnodup</b> option is
+identical to <b>-last</b>.
+<p>Otherwise, the cursor is moved to the previous non-duplicate
+key/data pair of the database, and that pair is returned.
+<p>If no non-duplicate key/data pairs occur before the cursor
+position in the database, <i>dbc</i> <b>get</b> will return an empty list.
+<p><dt>-set<dd>Move the cursor to the specified key/data pair of the database, and return
+the datum associated with the given key.
+<p>In the presence of duplicate key values, <i>dbc</i> <b>get</b> will return the
+first data item for the given key.
+<p>If the database is a Queue or Recno database and the requested key exists,
+but was never explicitly created by the application or was later deleted,
+<i>dbc</i> <b>get</b> will return an empty list.
+<p>If no matching keys are found, <i>dbc</i> <b>get</b> will return an empty list.
+<p><dt>-set_range<dd>The <b>-set_range</b> option is identical to the <b>-set</b> option,
+except that the key is returned as well as the data item, and, in the case
+of the Btree access method, the returned key/data pair is the smallest
+key greater than or equal to the specified key (as determined by the
+comparison function), permitting partial key matches and range searches.
+<p><dt>-get_both<dd>The <b>-get_both</b> option is identical to the <b>-set</b> option,
+except that both the key and the data arguments must be matched by the
+key and data item in the database.
+<p>For <b>-get_both</b> to be specified, the underlying database must be of
+type Btree or Hash.
+<p><dt>-set_recno<dd>Move the cursor to the specific numbered record of the database, and
+return the associated key/data pair. The key
+must be a record number.
+<p>For the <b>-set_recno</b> option to be specified, the underlying database
+must be of type Btree, and it must have been created with the <b>-recnum</b>
+option.
+<p><dt>-get_recno<dd>Return a list of the record number associated with the current cursor
+position. No key argument should be specified.
+<p>For <b>-get_recno</b> to be specified, the underlying database must be
+of type Btree, and it must have been created with the <b>-recnum</b>
+option.
+<p><dt>-join_item<dd>Do not use the data value found in all the cursors as a lookup key for
+the primary database, but simply return it in the key parameter instead.
+The data parameter is left unchanged.
+<p>For <b>-join_item</b> to be specified, the cursor must have been created
+by the <i>db</i> <b>join</b> command.
+<p><dt>-partial {offset length}<dd>The <b>dlen</b> bytes starting <b>doff</b> bytes from the beginning
+of the retrieved data record are returned as if they comprised the
+entire record. If any or all of the specified bytes do not exist in
+the record, the command is successful and any existing bytes are
+returned.
+<p><dt>-rmw<dd>Acquire write locks instead of read locks when doing the retrieval. Setting
+this flag may decrease the likelihood of deadlock during a read-modify-write
+cycle by immediately acquiring the write lock during the read part of the
+cycle so that another thread of control acquiring a read lock for the same
+item, in its own read-modify-write cycle, will not result in deadlock.
+</dl>
+<p>If a key is specified, and if the underlying database is a Queue or
+Recno database, the given key will be interpreted by Tcl as an integer.
+For all other database types, the key is interpreted by Tcl as a byte
+array, unless indicated by a given option.
+<p>In the normal error case of attempting to retrieve a key that does not
+exist an empty list is returned.
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/dbc_put.html b/libdb/docs/api_tcl/dbc_put.html
new file mode 100644
index 0000000..9ebb3a5
--- /dev/null
+++ b/libdb/docs/api_tcl/dbc_put.html
@@ -0,0 +1,135 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: dbc put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>dbc</i> <b>put</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>dbc put
+ [-after]
+ [-before]
+ [-current]
+ [-partial {doff dlen}]
+ data
+dbc put
+ [-keyfirst]
+ [-keylast]
+ [-partial {doff dlen}]
+ key data
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>dbc</i> <b>put</b> command stores the specified key/data pair into the
+database.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-after<dd>In the case of the Btree and Hash access methods, insert the data element
+as a duplicate element of the key to which the cursor refers. The new
+element appears immediately after the current cursor position. It is an
+error to specify <b>-after</b> if the underlying Btree or Hash database
+was not created with the <b>-dup</b> option. No key argument should be
+specified.
+<p>In the case of the Recno access method, it is an error to specify the
+<b>-after</b> option if the underlying Recno database was not created
+with the <b>-renumber</b> option. If the <b>-renumber</b> option was
+specified, a new key is created, all records after the inserted item
+are automatically renumbered, and the key of the new record is returned
+in the structure to which the key argument refers. The initial value of
+the key parameter is ignored. See <b>berkdb open</b> for more information.
+<p>In the case of the Queue access method, it is always an error to specify
+<b>-after</b>.
+<p>If the current cursor record has already been deleted, and the underlying
+access method is Hash, <i>dbc</i> <b>put</b> will throw a Tcl error. If the
+underlying access method is Btree or Recno, the operation will succeed.
+<p><dt>-before<dd>In the case of the Btree and Hash access methods, insert the data element
+as a duplicate element of the key to which the cursor refers. The new
+element appears immediately before the current cursor position. It is an
+error to specify <b>-before</b> if the underlying Btree or Hash database
+was not created with the <b>-dup</b> option. No key argument should be
+specified.
+<p>In the case of the Recno access method, it is an error to specify
+<b>-before</b> if the underlying Recno database was not created with the
+<b>-before</b> option. If the <b>-before</b> option was specified, a
+new key is created, the current record and all records after it are
+automatically renumbered, and the key of the new record is returned in
+the structure to which the key argument refers. The initial value of the
+key parameter is ignored. See <b>berkdb open</b> for more information.
+<p>In the case of the Queue access method, it is always an error to specify
+<b>-before</b>.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, <i>dbc</i> <b>put</b> will throw a Tcl error. If the
+underlying access method is Btree or Recno, the operation will succeed.
+<p><dt>-current<dd>Overwrite the data of the key/data pair to which the cursor refers with
+the specified data item. No key argument should be specified.
+<p>If the <b>-dupsort</b> option was specified to <b>berkdb open</b> and the
+data item of the key/data pair to which the cursor refers does not
+compare equally to the data parameter, <i>dbc</i> <b>put</b> will throw a Tcl
+error.
+<p>If the current cursor record has already been deleted and the underlying
+access method is Hash, <i>dbc</i> <b>put</b> will throw a Tcl error. If the
+underlying access method is Btree, Queue, or Recno, the operation will
+succeed.
+<p><dt>-keyfirst<dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database.
+<p>If the key already exists in the database, and the <b>-dupsort</b> option
+was specified to <b>berkdb open</b>, the inserted data item is added in its
+sorted location. If the key already exists in the database, and the
+<b>-dupsort</b> option was not specified, the inserted data item is added
+as the first of the data items for that key.
+<p>The <b>-keyfirst</b> option may not be specified to the Queue or Recno
+access methods.
+<p><dt>-keylast<dd>In the case of the Btree and Hash access methods, insert the specified
+key/data pair into the database.
+<p>If the key already exists in the database, and the <b>-dupsort</b> option
+was specified to <b>berkdb open</b>, the inserted data item is added in its
+sorted location. If the key already exists in the database, and the
+<b>-dupsort</b> option was not specified, the inserted data item is added
+as the last of the data items for that key.
+<p>The <b>-keylast</b> option may not be specified to the Queue or Recno
+access methods.
+<p><dt>-partial {doff dlen}<dd>
+<p>The <b>dlen</b> bytes starting <b>doff</b> bytes from the beginning
+of the specified key's data record are replaced by the data specified
+by the data and size structure elements. If <b>dlen</b> is smaller
+than the length of the supplied data, the record will grow; if
+<b>dlen</b> is larger than the length of the supplied data, the record
+will shrink. If the specified bytes do not exist, the record will be
+extended using nul bytes as necessary, and the <i>dbc</i> <b>put</b> call will succeed.
+<p>It is an error to attempt a partial put using the <i>dbc</i> <b>put</b> command in a database
+that supports duplicate records. Partial puts in databases supporting
+duplicate records must be done using a <i>dbc</i> <b>put</b> command.
+<p>It is an error to attempt a partial put with differing <b>dlen</b> and
+supplied data length values in Queue or Recno databases with fixed-length
+records.
+</dl>
+<p>If a key is specified, and
+if the underlying database is a Queue or Recno database, the given key
+will be interpreted by Tcl as an integer. For all other database types,
+the key is interpreted by Tcl as a byte array.
+<p>If <i>dbc</i> <b>put</b> fails for any reason, the state of the cursor will be
+unchanged. If <i>dbc</i> <b>put</b> succeeds and an item is inserted into the
+database, the cursor is always positioned to refer to the newly inserted
+item.
+<p>The <i>dbc</i> <b>put</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/env_close.html b/libdb/docs/api_tcl/env_close.html
new file mode 100644
index 0000000..b1fdfbc
--- /dev/null
+++ b/libdb/docs/api_tcl/env_close.html
@@ -0,0 +1,43 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: env close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>env</i> <b>close</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>env close
+</pre></h3>
+<h1>Description</h1>
+<p>Close the Berkeley DB environment, freeing any allocated resources and closing
+any underlying subsystems.
+<p>This does not imply closing any databases that were opened in the
+environment.
+<p>Where the environment was initialized with the <b>-txn</b> option,
+calling <i>env</i> <b>close</b> does not release any locks still held by the
+closing process, providing functionality for long-lived locks.
+<p>After <i>env</i> <b>close</b> has been called the <b>env</b> handle may not be
+accessed again.
+<p>The <i>env</i> <b>close</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/env_dbremove.html b/libdb/docs/api_tcl/env_dbremove.html
new file mode 100644
index 0000000..586b614
--- /dev/null
+++ b/libdb/docs/api_tcl/env_dbremove.html
@@ -0,0 +1,49 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: env dbremove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>env</i> <b>dbremove</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>env dbremove
+ [-auto_commit]
+ [-txn txnid]
+ [--]
+ file
+</pre></h3>
+<h1>Description</h1>
+<p>Remove the Berkeley DB database <b>file</b>.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-auto_commit<dd>Enclose the call within a transaction. If the call succeeds, changes
+made by the operation will be recoverable. If the call fails, the
+operation will have made no changes.
+<p><dt>-txn txnid<dd>If the operation is to be
+transaction-protected (other than by specifying the -auto_commit flag),
+the <b>txnid</b> parameter is a transaction handle returned from
+<i>env</i> <b>txn</b>.
+</dl>
+<p>The <i>env</i> <b>dbremove</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/env_dbrename.html b/libdb/docs/api_tcl/env_dbrename.html
new file mode 100644
index 0000000..fa48406
--- /dev/null
+++ b/libdb/docs/api_tcl/env_dbrename.html
@@ -0,0 +1,50 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: env dbrename</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>env</i> <b>dbrename</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>env dbrename
+ [-auto_commit]
+ [-txn txnid]
+ [--]
+ file
+ newname
+</pre></h3>
+<h1>Description</h1>
+<p>Rename the Berkeley DB database <b>file</b> to <b>newname</b>.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-auto_commit<dd>Enclose the call within a transaction. If the call succeeds, changes
+made by the operation will be recoverable. If the call fails, the
+operation will have made no changes.
+<p><dt>-txn txnid<dd>If the operation is to be
+transaction-protected (other than by specifying the -auto_commit flag),
+the <b>txnid</b> parameter is a transaction handle returned from
+<i>env</i> <b>txn</b>.
+</dl>
+<p>The <i>env</i> <b>dbrename</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/env_open.html b/libdb/docs/api_tcl/env_open.html
new file mode 100644
index 0000000..9c6dc32
--- /dev/null
+++ b/libdb/docs/api_tcl/env_open.html
@@ -0,0 +1,169 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: berkdb env</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><b>berkdb env</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>berkdb env
+ [-cachesize {gbytes bytes ncache}]
+ [-create]
+ [-data_dir dirname]
+ [-errfile filename]
+ [-home directory]
+ [-log_dir dirname]
+ [-mode mode]
+ [-private]
+ [-recover]
+ [-recover_fatal]
+ [-shm_key shmid]
+ [-system_mem]
+ [-tmp_dir dirname]
+ [-txn [nosync]]
+ [-txn_max max]
+ [-use_environ]
+ [-use_environ_root]
+</pre></h3>
+<h1>Description</h1>
+<p>The <b>berkdb env</b> command opens and optionally creates a database
+environment. The returned environment handle is bound to a Tcl command
+of the form <b>envN</b>, where N is an integer starting at 0 (for
+example, env0 and env1). It is through this Tcl command that the script
+accesses the environment methods. The command automatically initializes
+the Shared Memory Buffer Pool subsystem. This subsystem is used
+whenever the application is using any Berkeley DB access method.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-cachesize {gbytes bytes ncache}<dd>Set the size of the database's shared memory buffer pool (that is, the
+cache), to <b>gbytes</b> gigabytes plus <b>bytes</b>. The cache
+should be the size of the normal working data set of the application,
+with some small amount of additional memory for unusual situations.
+(Note: The working set is not the same as the number of simultaneously
+referenced pages, and should be quite a bit larger!)
+<p>The default cache size is 256KB, and may not be specified as less than
+20KB. Any cache size less than 500MB is automatically increased by 25%
+to account for buffer pool overhead; cache sizes larger than 500MB are
+used as specified.
+<p>It is possible to specify caches to Berkeley DB that are large enough so that
+they cannot be allocated contiguously on some architectures; for example,
+some releases of Solaris limit the amount of memory that may be
+allocated contiguously by a process. If <b>ncache</b> is 0 or 1, the
+cache will be allocated contiguously in memory. If it is greater than
+1, the cache will be broken up into <b>ncache</b> equally sized
+separate pieces of memory.
+<p>For information on tuning the Berkeley DB cache size, see
+<a href="../ref/am_conf/cachesize.html">Selecting a cache size</a>.
+<p><dt>-create<dd>Cause Berkeley DB subsystems to create any underlying files, as necessary.
+<p><dt>-data_dir dirname<dd>Specify the environment's data directory as described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p><dt>-errfile filename<dd>
+<p>When an error occurs in the Berkeley DB library, a Berkeley DB error or an error
+return value is returned by the function. In some cases, however, the
+errno value may be insufficient to completely describe the cause of the
+error especially during initial application debugging.
+<p>The <b>-errfile</b> argument is used to enhance the mechanism for
+reporting error messages to the application by specifying a file to be
+used for displaying additional Berkeley DB error messages. In some cases, when
+an error occurs, Berkeley DB will output an additional error message to the
+specified file reference.
+<p>consist of the environment command name (for example, env0) and a colon
+(":"), an error string, and a trailing &lt;newline&gt;
+character.
+<p>This error-logging enhancement does not slow performance or significantly
+increase application size, and may be run during normal operation as well
+as during application debugging.
+<p><dt>-home directory<dd>The <b>-home</b> argument is described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p><dt>-log_dir dirname<dd>Specify the environment's logging file directory as described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p><dt>-mode mode<dd>
+<p>On UNIX systems, or in IEEE/ANSI Std 1003.1 (POSIX) environments, all files created by Berkeley DB
+are created with mode <b>mode</b> (as described in <b>chmod</b>(2)) and
+modified by the process' umask value at the time of creation (see
+<b>umask</b>(2)). The group ownership of created files is based on
+the system and directory defaults, and is not further specified by Berkeley DB.
+If <b>mode</b> is 0, files are created readable and writable by both
+owner and group. On Windows systems, the mode argument is ignored.
+<p><dt>-private<dd>Specify that the environment will only be accessed by a single process
+(although that process may be multithreaded). This flag has two effects
+on the Berkeley DB environment. First, all underlying data structures are
+allocated from per-process memory instead of from shared memory that is
+potentially accessible to more than a single process. Second, mutexes
+are only configured to work between threads.
+<p>This flag should not be specified if more than a single process is
+accessing the environment, as it is likely to cause database corruption
+and unpredictable behavior. For example, if both a server application
+and the Berkeley DB utility <a href="../utility/db_stat.html">db_stat</a> will access the environment, the
+<b>-private</b> option should not be specified.
+<p><dt>-recover<dd>Run normal recovery on this environment before opening it for normal use.
+If this flag is set, the <b>-create</b> option must also be set because
+the regions will be removed and re-created.
+<p><dt>-recover_fatal<dd>Run catastrophic recovery on this environment before opening it for
+normal use. If this flag is set, the <b>-create</b> option must also be
+set since the regions will be removed and re-created.
+<p><dt>-shm_key key<dd>Specify a base segment ID for Berkeley DB environment shared memory regions
+created in system memory on systems supporting X/Open-style shared
+memory interfaces, for example, UNIX systems supporting shmget(2) and
+related System V IPC interfaces. See <a href="../ref/env/region.html">Shared Memory Regions</a> for more information.
+<p><dt>-system_mem<dd>Allocate memory from system shared memory instead of memory backed by the
+filesystem. See <a href="../ref/env/region.html">Shared Memory Regions</a>
+for more information.
+<p><dt>-tmp_dir dirname<dd>Specify the environment's tmp directory, as described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p><dt>-txn [nosync]<dd>Initialize the Transaction subsystem. This subsystem is used when
+recovery and atomicity of multiple operations and recovery are important.
+The <b>-txn</b> option implies the initialization of the logging
+and locking subsystems as well.
+<p>If the optional <b>nosync</b> argument is specified, the log will not be
+synchronously flushed on transaction commit or prepare. This means that
+transactions exhibit the ACI (atomicity, consistency, and isolation)
+properties, but not D (durability); that is, database integrity will be
+maintained, but it is possible that some number of the most recently
+committed transactions may be undone during recovery instead of being
+redone.
+<p>The number of transactions that are potentially at risk is governed by
+how often the log is checkpointed (see <a href="../utility/db_checkpoint.html">db_checkpoint</a> for more
+information) and how many log updates can fit on a single log page.
+<p><dt>-txn_max max<dd>Set the maximum number of simultaneous transactions that are supported
+by the environment, which bounds the size of backing files. When there
+are more than the specified number of concurrent transactions, calls to
+<i>env</i> <b>txn</b> will fail (until some active transactions complete).
+<p><dt>-use_environ<dd>The Berkeley DB process' environment may be permitted to specify information
+to be used when naming files; see
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+Because permitting users to specify which files are used can create
+security problems, environment information will be used in file naming
+for all users only if the <b>-use_environ</b> flag is set.
+<p><dt>-use_environ_root<dd>The Berkeley DB process' environment may be permitted to specify information
+to be used when naming files; see
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+As permitting users to specify which files are used can create security
+problems, if the <b>-use_environ_root</b> flag is set, environment
+information will be used for file naming only for users with appropriate
+permissions (for example, users with a user-ID of 0 on IEEE/ANSI Std 1003.1 (POSIX)
+systems).
+</dl>
+<p>The <b>berkdb env</b> command returns an environment handle on success.
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/env_remove.html b/libdb/docs/api_tcl/env_remove.html
new file mode 100644
index 0000000..1b1b951
--- /dev/null
+++ b/libdb/docs/api_tcl/env_remove.html
@@ -0,0 +1,73 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: berkdb envremove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><b>berkdb envremove</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>berkdb envremove
+ [-data_dir directory]
+ [-force]
+ [-home directory]
+ [-log_dir directory]
+ [-tmp_dir directory]
+ [-use_environ]
+ [-use_environ_root]
+</pre></h3>
+<h1>Description</h1>
+<p>Remove a Berkeley DB environment.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-data_dir dirname<dd>Specify the environment's data directory, as described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p><dt>-force<dd>If there are processes that have called <b>berkdb env</b> without calling
+<i>env</i> <b>close</b> (that is, there are processes currently using the
+environment), <b>berkdb envremove</b> will fail without further action, unless
+the <b>-force</b> flag is set, in which case <b>berkdb envremove</b> will
+attempt to remove the environment regardless of any processes still
+using it.
+<p><dt>-home directory<dd>The <b>-home</b> argument is described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p><dt>-log_dir dirname<dd>Specify the environment's log directory, as described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p><dt>-tmp_dir dirname<dd>Specify the environment's tmp directory, as described in
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+<p><dt>-use_environ<dd>The Berkeley DB process' environment may be permitted to specify information
+to be used when naming files; see
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+Because permitting users to specify which files are used can create
+security problems, environment information will be used in file naming
+for all users only if the <b>-use_environ</b> flag is set.
+<p><dt>-use_environ_root<dd>The Berkeley DB process' environment may be permitted to specify information
+to be used when naming files; see
+<a href="../ref/env/naming.html">Berkeley DB File Naming</a>.
+As permitting users to specify which files are used can create security
+problems, if the <b>-use_environ_root</b> flag is set, environment
+information will be used for file naming only for users with appropriate
+permissions (for example, users with a user-ID of 0 on IEEE/ANSI Std 1003.1 (POSIX)
+systems).
+</dl>
+<p>The <b>berkdb envremove</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/pindex.src b/libdb/docs/api_tcl/pindex.src
new file mode 100644
index 0000000..9d933d7
--- /dev/null
+++ b/libdb/docs/api_tcl/pindex.src
@@ -0,0 +1,30 @@
+__APIREL__/api_tcl/db_close.html__OCT__2 @db close
+__APIREL__/api_tcl/db_count.html__OCT__2 @db count
+__APIREL__/api_tcl/db_cursor.html__OCT__2 @db cursor
+__APIREL__/api_tcl/db_del.html__OCT__2 @db del
+__APIREL__/api_tcl/db_get.html__OCT__2 @db get
+__APIREL__/api_tcl/db_get_join.html__OCT__2 @db get_join
+__APIREL__/api_tcl/db_get_type.html__OCT__2 @db get_type
+__APIREL__/api_tcl/db_is_byteswapped.html__OCT__2 @db is_byteswapped
+__APIREL__/api_tcl/db_join.html__OCT__2 @db join
+__APIREL__/api_tcl/db_open.html__OCT__2 @berkdb open
+__APIREL__/api_tcl/db_put.html__OCT__2 @db put
+__APIREL__/api_tcl/db_remove.html__OCT__2 @berkdb dbremove
+__APIREL__/api_tcl/db_rename.html__OCT__2 @berkdb dbrename
+__APIREL__/api_tcl/db_stat.html__OCT__2 @db stat
+__APIREL__/api_tcl/db_sync.html__OCT__2 @db sync
+__APIREL__/api_tcl/db_truncate.html__OCT__2 @db truncate
+__APIREL__/api_tcl/dbc_close.html__OCT__2 @db close
+__APIREL__/api_tcl/dbc_del.html__OCT__2 @db del
+__APIREL__/api_tcl/dbc_dup.html__OCT__2 @db dup
+__APIREL__/api_tcl/dbc_get.html__OCT__2 @db get
+__APIREL__/api_tcl/dbc_put.html__OCT__2 @dbc put
+__APIREL__/api_tcl/env_close.html__OCT__2 @env close
+__APIREL__/api_tcl/env_dbremove.html__OCT__2 @env dbremove
+__APIREL__/api_tcl/env_dbrename.html__OCT__2 @env dbrename
+__APIREL__/api_tcl/env_open.html__OCT__2 @berkdb env
+__APIREL__/api_tcl/env_remove.html__OCT__2 @berkdb envremove
+__APIREL__/api_tcl/txn.html__OCT__2 @env txn
+__APIREL__/api_tcl/txn_abort.html__OCT__2 @txn abort
+__APIREL__/api_tcl/txn_commit.html__OCT__2 @txn commit
+__APIREL__/api_tcl/version.html__OCT__2 @berkdb version
diff --git a/libdb/docs/api_tcl/tcl_index.html b/libdb/docs/api_tcl/tcl_index.html
new file mode 100644
index 0000000..d70bfd5
--- /dev/null
+++ b/libdb/docs/api_tcl/tcl_index.html
@@ -0,0 +1,53 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Tcl Interface</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Tcl Interface</h1>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Tcl Command</th><th>Description</th></tr>
+<tr><td><a href="../api_tcl/env_open.html"><b>berkdb env</b></a></td><td>Create an environment handle</td></tr>
+<tr><td><a href="../api_tcl/env_remove.html"><b>berkdb envremove</b></a></td><td>Remove an environment</td></tr>
+<tr><td><a href="../api_tcl/env_close.html"><i>env</i> <b>close</b></a></td><td>Close an environment</td></tr>
+<tr><td><a href="../api_tcl/env_dbremove.html"><i>env</i> <b>dbremove</b></a></td><td>Remove a database</td></tr>
+<tr><td><a href="../api_tcl/env_dbrename.html"><i>env</i> <b>dbrename</b></a></td><td>Rename a database</td></tr>
+<tr><td><br></td><td><br></td></tr>
+<tr><td><a href="../api_tcl/txn.html"><i>env</i> <b>txn</b></a></td><td>Begin a transaction</td></tr>
+<tr><td><a href="../api_tcl/txn_abort.html"><i>txn</i> <b>abort</b></a></td><td>Abort a transaction</td></tr>
+<tr><td><a href="../api_tcl/txn_commit.html"><i>txn</i> <b>commit</b></a></td><td>Commit a transaction</td></tr>
+<tr><td><br></td><td><br></td></tr>
+<tr><td><a href="../api_tcl/db_open.html"><b>berkdb open</b></a></td><td>Create a database handle</td></tr>
+<tr><td><a href="../api_tcl/db_remove.html"><b>berkdb dbremove</b></a></td><td>Remove a database</td></tr>
+<tr><td><a href="../api_tcl/db_rename.html"><b>berkdb dbrename</b></a></td><td>Rename a database</td></tr>
+<tr><td><a href="../api_tcl/db_close.html"><i>db</i> <b>close</b></a></td><td>Close a database</td></tr>
+<tr><td><a href="../api_tcl/db_count.html"><i>db</i> <b>count</b></a></td><td>Return a count of a key's data items</td></tr>
+<tr><td><a href="../api_tcl/db_cursor.html"><i>db</i> <b>cursor</b></a></td><td>Open a cursor into a database</td></tr>
+<tr><td><a href="../api_tcl/db_del.html"><i>db</i> <b>del</b></a></td><td>Delete items from a database</td></tr>
+<tr><td><a href="../api_tcl/db_get.html"><i>db</i> <b>get</b></a></td><td>Get items from a database</td></tr>
+<tr><td><a href="../api_tcl/db_get_join.html"><i>db</i> <b>get_join</b></a></td><td>Get items from a database join</td></tr>
+<tr><td><a href="../api_tcl/db_get_type.html"><i>db</i> <b>get_type</b></a></td><td>Return the database type</td></tr>
+<tr><td><a href="../api_tcl/db_is_byteswapped.html"><i>db</i> <b>is_byteswapped</b></a></td><td>Return if the underlying database is in host order</td></tr>
+<tr><td><a href="../api_tcl/db_join.html"><i>db</i> <b>join</b></a></td><td>Perform a database join on cursors</td></tr>
+<tr><td><a href="../api_tcl/db_put.html"><i>db</i> <b>put</b></a></td><td>Store items into a database</td></tr>
+<tr><td><a href="../api_tcl/db_stat.html"><i>db</i> <b>stat</b></a></td><td>Return database statistics</td></tr>
+<tr><td><a href="../api_tcl/db_sync.html"><i>db</i> <b>sync</b></a></td><td>Flush a database to stable storage</td></tr>
+<tr><td><a href="../api_tcl/db_truncate.html"><i>db</i> <b>truncate</b></a></td><td>Truncate a database</td></tr>
+<tr><td><br></td><td><br></td></tr>
+<tr><td><a href="../api_tcl/dbc_close.html"><i>dbc</i> <b>close</b></a></td><td>Close a cursor</td></tr>
+<tr><td><a href="../api_tcl/dbc_del.html"><i>dbc</i> <b>del</b></a></td><td>Delete by cursor</td></tr>
+<tr><td><a href="../api_tcl/dbc_dup.html"><i>dbc</i> <b>dup</b></a></td><td>Duplicate a cursor</td></tr>
+<tr><td><a href="../api_tcl/dbc_get.html"><i>dbc</i> <b>get</b></a></td><td>Retrieve by cursor</td></tr>
+<tr><td><a href="../api_tcl/dbc_put.html"><i>dbc</i> <b>put</b></a></td><td>Store by cursor</td></tr>
+<tr><td><br></td><td><br></td></tr>
+<tr><td><a href="../api_tcl/version.html"><b>berkdb version</b></a></td><td>Return version information</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/tcl_pindex.html b/libdb/docs/api_tcl/tcl_pindex.html
new file mode 100644
index 0000000..59bdb9e
--- /dev/null
+++ b/libdb/docs/api_tcl/tcl_pindex.html
@@ -0,0 +1,325 @@
+<html>
+<head>
+<title>Berkeley DB: Tcl Interface Index</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Tcl Interface Index</h1>
+<center>
+<table cellspacing=0 cellpadding=0>
+<tr><td align=right>configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#4">1.85</a> API compatibility</td></tr>
+<tr><td align=right>building a utility to dump Berkeley DB </td><td><a href="../ref/build_unix/conf.html#6">1.85</a> databases</td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.2.0/intro.html#2">2.0</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.3.0/intro.html#2">3.0</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.3.1/intro.html#2">3.1</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.3.2/intro.html#2">3.2</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.3.3/intro.html#2">3.3</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.4.0/intro.html#2">4.0</a></td></tr>
+<tr><td align=right>Upgrading to release </td><td><a href="../ref/upgrade.4.1/intro.html#2">4.1</a></td></tr>
+<tr><td align=right>selecting an </td><td><a href="../ref/am_conf/select.html#2">access</a> method</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/faq.html#2">access</a> method FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/tune.html#2">access</a> method tuning</td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/am_conf/intro.html#2">access</a> methods</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/aix.html#2">AIX</a></td></tr>
+<tr><td align=right>data </td><td><a href="../ref/am_misc/align.html#2">alignment</a></td></tr>
+<tr><td align=right>programmatic </td><td><a href="../ref/arch/apis.html#2">APIs</a></td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_archive.html#3">archive</a> log files</td></tr>
+<tr><td align=right>hot </td><td><a href="../ref/transapp/archival.html#4">backup</a></td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/db_remove.html#2">berkdb</a> dbremove</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/db_rename.html#2">berkdb</a> dbrename</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/env_open.html#2">berkdb</a> env</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/env_remove.html#2">berkdb</a> envremove</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/db_open.html#2">berkdb</a> open</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/version.html#2">berkdb</a> version</td></tr>
+<tr><td align=right></td><td><a href="../utility/berkeley_db_svc.html#2">berkeley_db_svc</a></td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/mp/intro.html#4">buffer</a> pool subsystem</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/intro.html#3">building</a> for QNX</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/intro.html#2">building</a> for UNIX</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/notes.html#2">building</a> for UNIX FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_vxworks/intro.html#2">building</a> for VxWorks</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_vxworks/introae.html#2">building</a> for VxWorks AE</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_vxworks/faq.html#2">building</a> for VxWorks FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_win/intro.html#2">building</a> for Win32</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_win/faq.html#2">building</a> for Windows FAQ</td></tr>
+<tr><td align=right>selecting a </td><td><a href="../ref/am_conf/byteorder.html#2">byte</a> order</td></tr>
+<tr><td align=right>configuring the </td><td><a href="../ref/build_unix/conf.html#5">C++</a> API</td></tr>
+<tr><td align=right>flushing the database </td><td><a href="../ref/am/sync.html#2">cache</a></td></tr>
+<tr><td align=right>selecting a </td><td><a href="../ref/am_conf/cachesize.html#2">cache</a> size</td></tr>
+<tr><td align=right>introduction to the memory </td><td><a href="../ref/mp/intro.html#3">cache</a> subsystem</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/archival.html#3">catastrophic</a> recovery</td></tr>
+<tr><td align=right>Patches, Updates and </td><td><a href="http://www.sleepycat.com/update/index.html">Change</a> logs</td></tr>
+<tr><td align=right>utility to take </td><td><a href="../utility/db_checkpoint.html#3">checkpoints</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/curclose.html#2">closing</a> a cursor</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/close.html#2">closing</a> a database</td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am_misc/faq.html#3">compaction</a></td></tr>
+<tr><td align=right>specifying a Btree </td><td><a href="../ref/am_conf/bt_compare.html#2">comparison</a> function</td></tr>
+<tr><td align=right>changing </td><td><a href="../ref/build_unix/flags.html#2">compile</a> or load options</td></tr>
+<tr><td align=right></td><td><a href="../ref/cam/intro.html#2">Concurrent</a> Data Store</td></tr>
+<tr><td align=right>database environment </td><td><a href="../ref/env/db_config.html#3">configuration</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/conf.html#2">configuring</a> Berkeley DB for UNIX systems</td></tr>
+<tr><td align=right>salvaging </td><td><a href="../ref/am/verify.html#4">corrupted</a> databases</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/count.html#2">counting</a> data items for a key</td></tr>
+<tr><td align=right>closing a </td><td><a href="../ref/am/curclose.html#3">cursor</a></td></tr>
+<tr><td align=right>deleting records with a </td><td><a href="../ref/am/curdel.html#3">cursor</a></td></tr>
+<tr><td align=right>duplicating a </td><td><a href="../ref/am/curdup.html#3">cursor</a></td></tr>
+<tr><td align=right>retrieving records with a </td><td><a href="../ref/am/curget.html#3">cursor</a></td></tr>
+<tr><td align=right>storing records with a </td><td><a href="../ref/am/curput.html#3">cursor</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/stability.html#2">cursor</a> stability</td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am/cursor.html#2">cursors</a></td></tr>
+<tr><td align=right>utility to upgrade </td><td><a href="../utility/db_upgrade.html#3">database</a> files</td></tr>
+<tr><td align=right>utility to verify </td><td><a href="../utility/db_verify.html#3">database</a> files</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/db_close.html#2">db</a> close</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/dbc_close.html#2">db</a> close</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/db_count.html#2">db</a> count</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/db_cursor.html#2">db</a> cursor</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/db_del.html#2">db</a> del</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/dbc_del.html#2">db</a> del</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/dbc_dup.html#2">db</a> dup</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/db_get.html#2">db</a> get</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/dbc_get.html#2">db</a> get</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/db_get_join.html#2">db</a> get_join</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/db_get_type.html#2">db</a> get_type</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/db_is_byteswapped.html#2">db</a> is_byteswapped</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/db_join.html#2">db</a> join</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/db_put.html#2">db</a> put</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/db_stat.html#2">db</a> stat</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/db_sync.html#2">db</a> sync</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/db_truncate.html#2">db</a> truncate</td></tr>
+<tr><td align=right></td><td><a href="../ref/env/region.html#2">__db.001</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_archive.html#2">db_archive</a></td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/dbc_put.html#2">dbc</a> put</td></tr>
+<tr><td align=right></td><td><a href="../utility/db_checkpoint.html#2">db_checkpoint</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/env/db_config.html#2">DB_CONFIG</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_deadlock.html#2">db_deadlock</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_dump.html#2">db_dump</a></td></tr>
+<tr><td align=right>File naming </td><td><a href="../ref/env/naming.html#DB_HOME">DB_HOME</a></td></tr>
+<tr><td align=right>File naming </td><td><a href="../ref/env/naming.html#db_home">db_home</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_KEYEXIST">DB_KEYEXIST</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_load.html#2">db_load</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/program/errorret.html#4">DB_LOCK_DEADLOCK</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_LOCK_NOTGRANTED">DB_LOCK_NOTGRANTED</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_printlog.html#2">db_printlog</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_recover.html#2">db_recover</a></td></tr>
+<tr><td align=right>Error returns to applications </td><td><a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_stat.html#2">db_stat</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_upgrade.html#2">db_upgrade</a></td></tr>
+<tr><td align=right></td><td><a href="../utility/db_verify.html#2">db_verify</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/lock/dead.html#2">deadlocks</a></td></tr>
+<tr><td align=right>utility to detect </td><td><a href="../utility/db_deadlock.html#3">deadlocks</a></td></tr>
+<tr><td align=right>introduction to </td><td><a href="../ref/debug/intro.html#2">debugging</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/debug/common.html#2">debugging</a> applications</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/stability.html#4">degrees</a> of isolation</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/delete.html#2">deleting</a> records</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/curdel.html#2">deleting</a> records with a cursor</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/read.html#4">dirty</a> reads</td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--disable-largefile">--disable-largefile</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--disable-shared">--disable-shared</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--disable-static">--disable-static</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/diskspace.html#2">disk</a> space requirements</td></tr>
+<tr><td align=right></td><td><a href="../ref/xa/intro.html#2">Distributed</a> Transactions</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/faq.html#5">double</a> buffering</td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_dump.html#3">dump</a> databases as text files</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_conf/dup.html#2">duplicate</a> data items</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/curdup.html#2">duplicating</a> a cursor</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/embedix.html#2">Embedix</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/truncate.html#3">emptying</a> a database</td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-compat185">--enable-compat185</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-cxx">--enable-cxx</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-debug">--enable-debug</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-debug_rop">--enable-debug_rop</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-debug_wop">--enable-debug_wop</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-diagnostic">--enable-diagnostic</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-dump185">--enable-dump185</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-java">--enable-java</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-posixmutexes">--enable-posixmutexes</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-rpc">--enable-rpc</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-tcl">--enable-tcl</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-test">--enable-test</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-uimutexes">--enable-uimutexes</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--enable-umrw">--enable-umrw</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/env/encrypt.html#2">encryption</a></td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/env_close.html#2">env</a> close</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/env_dbremove.html#2">env</a> dbremove</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/env_dbrename.html#2">env</a> dbrename</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/txn.html#2">env</a> txn</td></tr>
+<tr><td align=right>database </td><td><a href="../ref/env/create.html#2">environment</a></td></tr>
+<tr><td align=right>database </td><td><a href="../ref/env/faq.html#2">environment</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/program/environ.html#2">environment</a> variables</td></tr>
+<tr><td align=right>introduction to database </td><td><a href="../ref/env/intro.html#2">environments</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/join.html#2">equality</a> join</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/error.html#2">error</a> handling</td></tr>
+<tr><td align=right></td><td><a href="../ref/program/errorret.html#3">error</a> name space</td></tr>
+<tr><td align=right></td><td><a href="../ref/program/errorret.html#2">error</a> returns</td></tr>
+<tr><td align=right></td><td><a href="../ref/install/file.html#2">/etc/magic</a></td></tr>
+<tr><td align=right>selecting a Queue </td><td><a href="../ref/am_conf/extentsize.html#2">extent</a> size</td></tr>
+<tr><td align=right>hot </td><td><a href="../ref/transapp/hotfail.html#2">failover</a></td></tr>
+<tr><td align=right>Java </td><td><a href="../ref/java/faq.html#2">FAQ</a></td></tr>
+<tr><td align=right>Tcl </td><td><a href="../ref/tcl/faq.html#2">FAQ</a></td></tr>
+<tr><td align=right>XA </td><td><a href="../ref/xa/faq.html#2">FAQ</a></td></tr>
+<tr><td align=right>configuring without large </td><td><a href="../ref/build_unix/conf.html#8">file</a> support</td></tr>
+<tr><td align=right></td><td><a href="../ref/install/file.html#3">file</a> utility</td></tr>
+<tr><td align=right>returning pages to the </td><td><a href="../ref/am_misc/faq.html#4">filesystem</a></td></tr>
+<tr><td align=right>recovery and </td><td><a href="../ref/transapp/filesys.html#2">filesystem</a> operations</td></tr>
+<tr><td align=right>remote </td><td><a href="../ref/env/remote.html#2">filesystems</a></td></tr>
+<tr><td align=right>page </td><td><a href="../ref/am_conf/h_ffactor.html#2">fill</a> factor</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/freebsd.html#2">FreeBSD</a></td></tr>
+<tr><td align=right>Berkeley DB </td><td><a href="../ref/program/scope.html#3">free-threaded</a> handles</td></tr>
+<tr><td align=right>specifying a database </td><td><a href="../ref/am_conf/h_hash.html#2">hash</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am_conf/h_nelem.html#2">hash</a> table size</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/hpux.html#2">HP-UX</a></td></tr>
+<tr><td align=right>secondary </td><td><a href="../ref/am/second.html#3">indices</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/install.html#2">installing</a> Berkeley DB for UNIX systems</td></tr>
+<tr><td align=right></td><td><a href="../ref/program/compatible.html#2">interface</a> compatibility</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/irix.html#2">IRIX</a></td></tr>
+<tr><td align=right>degrees of </td><td><a href="../ref/am_misc/stability.html#5">isolation</a></td></tr>
+<tr><td align=right>configuring the </td><td><a href="../ref/build_unix/conf.html#7">Java</a> API</td></tr>
+<tr><td align=right></td><td><a href="../ref/java/compat.html#2">Java</a> compatibility</td></tr>
+<tr><td align=right></td><td><a href="../ref/java/conf.html#2">Java</a> configuration</td></tr>
+<tr><td align=right></td><td><a href="../ref/java/faq.html#3">Java</a> FAQ</td></tr>
+<tr><td align=right>equality </td><td><a href="../ref/am/join.html#3">join</a></td></tr>
+<tr><td align=right>retrieved </td><td><a href="../ref/am_misc/perm.html#3">key/data</a> permanence</td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am_misc/dbsizes.html#2">limits</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/linux.html#2">Linux</a></td></tr>
+<tr><td align=right>changing compile or </td><td><a href="../ref/build_unix/flags.html#3">load</a> options</td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_load.html#3">load</a> text files into databases</td></tr>
+<tr><td align=right>standard </td><td><a href="../ref/lock/stdmode.html#2">lock</a> modes</td></tr>
+<tr><td align=right>page-level </td><td><a href="../ref/lock/page.html#2">locking</a></td></tr>
+<tr><td align=right>two-phase </td><td><a href="../ref/lock/twopl.html#2">locking</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/lock/nondb.html#2">locking</a> and non-Berkeley DB applications</td></tr>
+<tr><td align=right></td><td><a href="../ref/lock/config.html#2">locking</a> configuration</td></tr>
+<tr><td align=right>Berkeley DB Transactional Data Store </td><td><a href="../ref/lock/am_conv.html#2">locking</a> conventions</td></tr>
+<tr><td align=right>Berkeley DB Concurrent Data Store </td><td><a href="../ref/lock/cam_conv.html#2">locking</a> conventions</td></tr>
+<tr><td align=right></td><td><a href="../ref/lock/page.html#3">locking</a> granularity</td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/lock/intro.html#2">locking</a> subsystem</td></tr>
+<tr><td align=right>sizing the </td><td><a href="../ref/lock/max.html#2">locking</a> subsystem</td></tr>
+<tr><td align=right></td><td><a href="../ref/lock/notxn.html#2">locking</a> without transactions</td></tr>
+<tr><td align=right></td><td><a href="../ref/log/limits.html#2">log</a> file limits</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/logfile.html#2">log</a> file removal</td></tr>
+<tr><td align=right>utility to display </td><td><a href="../utility/db_printlog.html#3">log</a> files as text</td></tr>
+<tr><td align=right></td><td><a href="../ref/log/config.html#2">logging</a> configuration</td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/log/intro.html#2">logging</a> subsystem</td></tr>
+<tr><td align=right>retrieving Btree records by </td><td><a href="../ref/am_conf/bt_recnum.html#3">logical</a> record @number</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/macosx.html#2">Mac</a> OS X</td></tr>
+<tr><td align=right></td><td><a href="../ref/mp/config.html#2">memory</a> pool configuration</td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/mp/intro.html#2">memory</a> pool subsystem</td></tr>
+<tr><td align=right>Berkeley DB library </td><td><a href="../ref/program/namespace.html#2">name</a> spaces</td></tr>
+<tr><td align=right>file </td><td><a href="../ref/env/naming.html#2">naming</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/join.html#4">natural</a> join</td></tr>
+<tr><td align=right>retrieving Btree records by logical record </td><td><a href="../ref/am_conf/bt_recnum.html#2">number</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/open.html#2">opening</a> a database</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/osf1.html#2">OSF/1</a></td></tr>
+<tr><td align=right>selecting a </td><td><a href="../ref/am_conf/pagesize.html#2">page</a> size</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/partial.html#2">partial</a> record storage and retrieval</td></tr>
+<tr><td align=right></td><td><a href="http://www.sleepycat.com/update/index.html">Patches,</a> Updates and Change logs</td></tr>
+<tr><td align=right></td><td><a href="../ref/perl/intro.html#2">Perl</a></td></tr>
+<tr><td align=right>retrieved key/data </td><td><a href="../ref/am_misc/perm.html#2">permanence</a></td></tr>
+<tr><td align=right>task/thread </td><td><a href="../ref/program/faq.html#2">priority</a></td></tr>
+<tr><td align=right>Sleepycat Software's Berkeley DB </td><td><a href="../ref/intro/products.html#2">products</a></td></tr>
+<tr><td align=right>building for </td><td><a href="../ref/build_unix/intro.html#5">QNX</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/qnx.html#2">QNX</a></td></tr>
+<tr><td align=right>dirty </td><td><a href="../ref/transapp/read.html#3">reads</a></td></tr>
+<tr><td align=right>logical </td><td><a href="../ref/am_conf/logrec.html#2">record</a> numbers</td></tr>
+<tr><td align=right>managing </td><td><a href="../ref/am_conf/recno.html#2">record-based</a> databases</td></tr>
+<tr><td align=right>logically renumbering </td><td><a href="../ref/am_conf/renumber.html#2">records</a></td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_recover.html#3">recover</a> database environments</td></tr>
+<tr><td align=right>Berkeley DB </td><td><a href="../ref/transapp/reclimit.html#2">recoverability</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/read.html#2">repeatable</a> read</td></tr>
+<tr><td align=right>introduction to </td><td><a href="../ref/rep/intro.html#2">replication</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/xa/intro.html#3">Resource</a> Manager</td></tr>
+<tr><td align=right>XA </td><td><a href="../ref/xa/xa_intro.html#3">Resource</a> Manager</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/get.html#2">retrieving</a> records</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_misc/get_bulk.html#2">retrieving</a> records in bulk</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/curget.html#2">retrieving</a> records with a cursor</td></tr>
+<tr><td align=right></td><td><a href="../ref/rpc/client.html#2">RPC</a> client</td></tr>
+<tr><td align=right>configuring a </td><td><a href="../ref/build_unix/conf.html#9">RPC</a> client/server</td></tr>
+<tr><td align=right>introduction to </td><td><a href="../ref/rpc/intro.html#2">rpc</a> client/server</td></tr>
+<tr><td align=right>utility to support </td><td><a href="../utility/berkeley_db_svc.html#3">RPC</a> client/server</td></tr>
+<tr><td align=right></td><td><a href="../ref/rpc/faq.html#2">RPC</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/rpc/server.html#2">RPC</a> server</td></tr>
+<tr><td align=right></td><td><a href="../ref/install/rpm.html#2">RPM</a></td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am/verify.html#3">salvage</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/sco.html#2">SCO</a></td></tr>
+<tr><td align=right>Berkeley DB handle </td><td><a href="../ref/program/scope.html#2">scope</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/second.html#2">secondary</a> indices</td></tr>
+<tr><td align=right></td><td><a href="../ref/env/security.html#2">security</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/sendmail/intro.html#2">Sendmail</a></td></tr>
+<tr><td align=right>disabling </td><td><a href="../ref/build_unix/conf.html#10">shared</a> libraries</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/shlib.html#2">shared</a> libraries</td></tr>
+<tr><td align=right></td><td><a href="../ref/program/appsignals.html#2">signal</a> handling</td></tr>
+<tr><td align=right></td><td><a href="http://www.sleepycat.com/">Sleepycat</a> Software</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/solaris.html#2">Solaris</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/distrib/layout.html#2">source</a> code layout</td></tr>
+<tr><td align=right>cursor </td><td><a href="../ref/am_misc/stability.html#3">stability</a></td></tr>
+<tr><td align=right>disabling </td><td><a href="../ref/build_unix/conf.html#11">static</a> libraries</td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am/stat.html#2">statistics</a></td></tr>
+<tr><td align=right>utility to display database and environment </td><td><a href="../utility/db_stat.html#3">statistics</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/put.html#2">storing</a> records</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/curput.html#2">storing</a> records with a cursor</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/sunos.html#2">SunOS</a></td></tr>
+<tr><td align=right>loading Berkeley DB with </td><td><a href="../ref/tcl/intro.html#2">Tcl</a></td></tr>
+<tr><td align=right>using Berkeley DB with </td><td><a href="../ref/tcl/using.html#2">Tcl</a></td></tr>
+<tr><td align=right>configuring the </td><td><a href="../ref/build_unix/conf.html#12">Tcl</a> API</td></tr>
+<tr><td align=right></td><td><a href="../ref/tcl/program.html#2">Tcl</a> API programming notes</td></tr>
+<tr><td align=right></td><td><a href="../ref/tcl/faq.html#3">Tcl</a> FAQ</td></tr>
+<tr><td align=right>configuring the </td><td><a href="../ref/build_unix/conf.html#13">test</a> suite</td></tr>
+<tr><td align=right>running the </td><td><a href="../ref/test/run.html#2">test</a> suite</td></tr>
+<tr><td align=right>running the </td><td><a href="../ref/build_unix/test.html#2">test</a> suite under UNIX</td></tr>
+<tr><td align=right>running the </td><td><a href="../ref/build_win/test.html#2">test</a> suite under Windows</td></tr>
+<tr><td align=right></td><td><a href="../ref/am_conf/re_source.html#2">text</a> backing files</td></tr>
+<tr><td align=right>loading </td><td><a href="../ref/dumpload/text.html#2">text</a> into databases</td></tr>
+<tr><td align=right>dumping/loading </td><td><a href="../ref/dumpload/utility.html#2">text</a> to/from databases</td></tr>
+<tr><td align=right>building </td><td><a href="../ref/program/mt.html#2">threaded</a> applications</td></tr>
+<tr><td align=right>lock </td><td><a href="../ref/lock/timeout.html#2">timeouts</a></td></tr>
+<tr><td align=right>transaction </td><td><a href="../ref/lock/timeout.html#3">timeouts</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/txn/config.html#2">transaction</a> configuration</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/faq.html#2">transaction</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/txn/limits.html#2">transaction</a> limits</td></tr>
+<tr><td align=right></td><td><a href="../ref/xa/build.html#2">Transaction</a> Manager</td></tr>
+<tr><td align=right>administering </td><td><a href="../ref/transapp/admin.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right>archival in </td><td><a href="../ref/transapp/archival.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right>checkpoints in </td><td><a href="../ref/transapp/checkpoint.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right>deadlock detection in </td><td><a href="../ref/transapp/deadlock.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right>recovery in </td><td><a href="../ref/transapp/recovery.html#2">transaction</a> protected applications</td></tr>
+<tr><td align=right>introduction to the </td><td><a href="../ref/txn/intro.html#2">transaction</a> subsystem</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/throughput.html#2">transaction</a> throughput</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/tune.html#2">transaction</a> tuning</td></tr>
+<tr><td align=right></td><td><a href="../ref/transapp/intro.html#2">Transactional</a> Data Store</td></tr>
+<tr><td align=right>nested </td><td><a href="../ref/transapp/nested.html#2">transactions</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/am/truncate.html#2">truncating</a> a database</td></tr>
+<tr><td align=right>access method </td><td><a href="../ref/am_misc/tune.html#3">tuning</a></td></tr>
+<tr><td align=right>transaction </td><td><a href="../ref/transapp/tune.html#3">tuning</a></td></tr>
+<tr><td align=right>simple </td><td><a href="../ref/simple_tut/intro.html#2">tutorial</a></td></tr>
+<tr><td align=right>configuring Berkeley DB with the </td><td><a href="../ref/xa/xa_config.html#2">Tuxedo</a> System</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/txn_abort.html#2">txn</a> abort</td></tr>
+<tr><td align=right></td><td><a href="../api_tcl/txn_commit.html#2">txn</a> commit</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_unix/ultrix.html#2">Ultrix</a></td></tr>
+<tr><td align=right>building for </td><td><a href="../ref/build_unix/intro.html#4">UNIX</a></td></tr>
+<tr><td align=right>building for </td><td><a href="../ref/build_unix/notes.html#3">UNIX</a> FAQ</td></tr>
+<tr><td align=right>configuring Berkeley DB for </td><td><a href="../ref/build_unix/conf.html#3">UNIX</a> systems</td></tr>
+<tr><td align=right>Patches, </td><td><a href="http://www.sleepycat.com/update/index.html">Updates</a> and Change logs</td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_upgrade.html#4">upgrade</a> database files</td></tr>
+<tr><td align=right></td><td><a href="../ref/am/upgrade.html#2">upgrading</a> databases</td></tr>
+<tr><td align=right></td><td><a href="../ref/arch/utilities.html#2">utilities</a></td></tr>
+<tr><td align=right>database </td><td><a href="../ref/am/verify.html#2">verification</a></td></tr>
+<tr><td align=right>utility to </td><td><a href="../utility/db_verify.html#4">verify</a> database files</td></tr>
+<tr><td align=right>building for </td><td><a href="../ref/build_vxworks/faq.html#3">VxWorks</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_vxworks/notes.html#2">VxWorks</a> notes</td></tr>
+<tr><td align=right>running the test suite under </td><td><a href="../ref/build_win/test.html#3">Windows</a></td></tr>
+<tr><td align=right>building for </td><td><a href="../ref/build_win/faq.html#3">Windows</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/build_win/notes.html#2">Windows</a> notes</td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--with-embedix=DIR">--with-embedix=DIR</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--with-mutex=MUTEX">--with-mutex=MUTEX</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--with-rpm=DIR">--with-rpm=DIR</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--with-tcl=DIR">--with-tcl=DIR</a></td></tr>
+<tr><td align=right>Configuring Berkeley DB </td><td><a href="../ref/build_unix/conf.html#--with-uniquename=NAME">--with-uniquename=NAME</a></td></tr>
+<tr><td align=right></td><td><a href="../ref/xa/faq.html#3">XA</a> FAQ</td></tr>
+<tr><td align=right></td><td><a href="../ref/xa/xa_intro.html#2">XA</a> Resource Manager</td></tr>
+</table>
+</center>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/txn.html b/libdb/docs/api_tcl/txn.html
new file mode 100644
index 0000000..717163b
--- /dev/null
+++ b/libdb/docs/api_tcl/txn.html
@@ -0,0 +1,64 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: env txn</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>env</i> <b>txn</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>env txn
+ [-nosync]
+ [-nowait]
+ [-parent txnid]
+ [-sync]
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>env</i> <b>txn</b> command begins a transaction. The returned transaction
+handle is bound to a Tcl command of the form <b>env.txnX</b>, where
+X is an integer starting at 0 (for example, env0.txn0 and env0.txn1).
+It is through this Tcl command that the script accesses the transaction
+methods.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-nosync<dd>Do not synchronously flush the log when this transaction commits or
+prepares. This means the transaction will exhibit the ACI (atomicity,
+consistency, and isolation) properties, but not D (durability); that
+is, database integrity will be maintained, but it is possible that this
+transaction may be undone during recovery instead of being redone.
+<p>This behavior may be set for an entire Berkeley DB environment as part of
+the <b>berkdb env</b> interface.
+<p><dt>-nowait<dd>If a lock is unavailable for any Berkeley DB operation performed in the context
+of this transaction, throw a Tcl error immediately instead of blocking on
+the lock.
+<p><dt>-parent txnid<dd>Create the new transaction as a nested transaction, with the specified
+transaction indicated as its parent. Transactions may be nested to any
+level.
+<p><dt>-sync<dd>Synchronously flush the log when this transaction commits or prepares.
+This means the transaction will exhibit all of the ACID (atomicity,
+consistency, isolation, and durability) properties.
+<p>This behavior is the default for Berkeley DB environments unless the
+<b>-nosync</b> option was specified to the <b>berkdb env</b> interface.
+</dl>
+<p>The <i>env</i> <b>txn</b> command returns a transaction handle on success.
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/txn_abort.html b/libdb/docs/api_tcl/txn_abort.html
new file mode 100644
index 0000000..41b6392
--- /dev/null
+++ b/libdb/docs/api_tcl/txn_abort.html
@@ -0,0 +1,46 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: txn abort</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>txn</i> <b>abort</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>txn abort
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>txn</i> <b>abort</b> command causes an abnormal termination of the
+transaction.
+<p>The log is played backward, and any necessary recovery operations are
+performed. After recovery is completed, all locks held by the
+transaction are acquired by the parent transaction in the case of a
+nested transaction, or released in the case of a non-nested transaction.
+As is the case for <i>txn</i> <b>commit</b>, applications that require strict
+two-phase locking should not explicitly release any locks.
+<p>In the case of nested transactions, aborting the parent transaction
+causes all children of that transaction to be aborted.
+<p>After <i>txn</i> <b>abort</b> has been called, regardless of its return, the
+<b>txn</b> handle may not be accessed again.
+<p>The <i>txn</i> <b>abort</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/txn_commit.html b/libdb/docs/api_tcl/txn_commit.html
new file mode 100644
index 0000000..d4220c7
--- /dev/null
+++ b/libdb/docs/api_tcl/txn_commit.html
@@ -0,0 +1,70 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: txn commit</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><i>txn</i> <b>commit</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>txn commit
+ [-nosync]
+ [-sync]
+</pre></h3>
+<h1>Description</h1>
+<p>The <i>txn</i> <b>commit</b> command ends the transaction.
+<p>In the case of nested transactions, if the transaction is a parent
+transaction with unresolved (neither committed or aborted) child
+transactions, the child transactions are aborted and the commit of the
+parent will succeed.
+<p>In the case of nested transactions, if the transaction is a child
+transaction, its locks are not released, but are acquired by its parent.
+Although the commit of the child transaction will succeed, the actual
+resolution of the child transaction is postponed until the parent
+transaction is committed or aborted; that is, if its parent transaction
+commits, it will be committed, and if its parent transaction aborts, it
+will be aborted.
+<p>If the <b>-nosync</b> option is not specified, a commit log record is
+written and flushed to disk, as are all previously written log records.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-nosync<dd>Do not synchronously flush the log. This means the transaction will
+exhibit the ACI (atomicity, consistency, and isolation) properties, but
+not D (durability); that is, database integrity will be maintained, but
+it is possible that this transaction may be undone during recovery
+instead of being redone.
+<p>This behavior may be set for an entire Berkeley DB environment as part of
+the <b>berkdb env</b> interface.
+<p><dt>-sync<dd>Synchronously flush the log. This means the transaction will exhibit
+all of the ACID (atomicity, consistency, isolation and durability)
+properties.
+<p>This behavior is the default for Berkeley DB environments unless the
+<b>-nosync</b> option was specified to the <b>berkdb env</b> or
+<i>env</i> <b>txn</b> interfaces.
+</dl>
+<p>After <i>txn</i> <b>commit</b> has been called, regardless of its return, the
+<b>txn</b> handle may not be accessed again. If <i>txn</i> <b>commit</b>
+encounters an error, this transaction and all child transactions of this
+transaction are aborted.
+<p>The <i>txn</i> <b>commit</b> command returns 0 on success, and in the case of error, a Tcl error
+is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/api_tcl/version.html b/libdb/docs/api_tcl/version.html
new file mode 100644
index 0000000..6cf3827
--- /dev/null
+++ b/libdb/docs/api_tcl/version.html
@@ -0,0 +1,40 @@
+<!--$Id$-->
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: berkdb version</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1><b>berkdb version</b></h1>
+</td>
+<td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>berkdb version
+ [-string]
+</pre></h3>
+<h1>Description</h1>
+<p>Return a list of the form {major minor patch} for the major, minor and
+patch levels of the underlying Berkeley DB release.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt>-string<dd>Return a string with formatted Berkeley DB version information.
+</dl>
+<p>In the case of error, a Tcl error is thrown.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_tcl/tcl_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/images/api.gif b/libdb/docs/images/api.gif
new file mode 100644
index 0000000..dafd577
Binary files /dev/null and b/libdb/docs/images/api.gif differ
diff --git a/libdb/docs/images/next.gif b/libdb/docs/images/next.gif
new file mode 100644
index 0000000..667ee06
Binary files /dev/null and b/libdb/docs/images/next.gif differ
diff --git a/libdb/docs/images/prev.gif b/libdb/docs/images/prev.gif
new file mode 100644
index 0000000..11dfc52
Binary files /dev/null and b/libdb/docs/images/prev.gif differ
diff --git a/libdb/docs/images/ps.gif b/libdb/docs/images/ps.gif
new file mode 100644
index 0000000..0f565bc
Binary files /dev/null and b/libdb/docs/images/ps.gif differ
diff --git a/libdb/docs/images/ref.gif b/libdb/docs/images/ref.gif
new file mode 100644
index 0000000..75be9c1
Binary files /dev/null and b/libdb/docs/images/ref.gif differ
diff --git a/libdb/docs/images/sleepycat.gif b/libdb/docs/images/sleepycat.gif
new file mode 100644
index 0000000..76a24cb
Binary files /dev/null and b/libdb/docs/images/sleepycat.gif differ
diff --git a/libdb/docs/index.html b/libdb/docs/index.html
new file mode 100644
index 0000000..bab3620
--- /dev/null
+++ b/libdb/docs/index.html
@@ -0,0 +1,69 @@
+<!--$Id$-->
+<html>
+<head>
+<title>Berkeley DB (Version: 4.1.25)</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+
+<p align=center>
+<img src="images/sleepycat.gif" alt="Sleepycat Software Inc.">
+</p>
+
+<center>
+<h1><b>Berkeley DB</b></h1>
+</center>
+
+<p>
+<table align=center cellpadding=4 border=2>
+<tr>
+ <th align=center width="50%">Interface Documentation</th>
+ <th align=center width="50%">Building Berkeley DB</th>
+</tr><tr valign=top>
+ <td>
+ <a href="api_c/c_index.html">C API</a><br>
+ <a href="api_c/c_pindex.html">C API Topic Index</a><br>
+ <p>
+ <a href="api_cxx/c_index.html">C++ API</a><br>
+ <a href="api_cxx/cxx_pindex.html">C++ API Topic Index</a><br>
+ <p>
+ <a href="api_java/c_index.html">Java API</a><br>
+ <a href="api_java/java_pindex.html">Java API Topic Index</a><br>
+ <p>
+ <a href="api_tcl/tcl_index.html">Tcl API</a><br>
+ </td><td>
+ <a href="ref/build_unix/intro.html">Building for UNIX/POSIX systems</a><p>
+ <a href="ref/build_vxworks/intro.html">Building for VxWorks</a><p>
+ <a href="ref/build_win/intro.html">Building for Win32</a><br>
+ <p>
+ <a href="ref/upgrade.4.1/toc.html">Upgrading Applications to the 4.1 release</a><br>
+ </td>
+
+</tr><tr valign=top>
+ <th align=center>Additional Documentation</th>
+ <th align=center>Company and Product Information</th>
+</tr><tr valign=top>
+ <td>
+ <a href="utility/index.html">Supporting Utilities</a><br>
+ <p>
+ <a href="reftoc.html">Programmer's Tutorial and Reference Guide</a><br>
+ </td><td>
+ <a href="sleepycat/contact.html">Contacting Sleepycat Software</a><br>
+ <a href="http://www.sleepycat.com">Sleepycat Software Home Page</a><br>
+ <a href="ref/intro/products.html">Sleepycat Software Product List</a><br>
+ <a href="http://www.sleepycat.com/update/index.html">Release Patches and Change Logs</a><br>
+ <a href="sleepycat/license.html">License</a>,&nbsp;&nbsp;<a href="ref/env/encrypt.html">Cryptography</a>,&nbsp;&nbsp;<a href="sleepycat/legal.html">Legal Notices</a><br>
+ </td>
+</tr>
+
+</table>
+
+<p>
+<center><b>
+Version 4.1.25, December 19, 2002<br>
+Copyright 1997-2002 Sleepycat Software, Inc. All Rights Reserved
+</b></center>
+
+</body>
+</html>
diff --git a/libdb/docs/ref/am/close.html b/libdb/docs/ref/am/close.html
new file mode 100644
index 0000000..0554abe
--- /dev/null
+++ b/libdb/docs/ref/am/close.html
@@ -0,0 +1,44 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Database close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am/sync.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/second.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Database close</h1>
+<p>The <a href="../../api_c/db_close.html">DB-&gt;close</a> method is the standard interface for closing the database.
+By default, <a href="../../api_c/db_close.html">DB-&gt;close</a> also flushes all modified records from the
+database cache to disk.
+<p>There is one flag that you can set to customize <a href="../../api_c/db_close.html">DB-&gt;close</a>:
+<p><dl compact>
+<p><dt><a href="../../api_c/db_close.html#DB_NOSYNC">DB_NOSYNC</a><dd>Do not flush cached information to disk.
+</dl>
+<b>It is important to understand that flushing cached information
+to disk only minimizes the window of opportunity for corrupted data, it
+does not eliminate the possibility.</b>
+<p>While unlikely, it is possible for database corruption to happen if a
+system or application crash occurs while writing data to the database. To
+ensure that database corruption never occurs, applications must either:
+<p><ul type=disc>
+<li>Use transactions and logging with automatic recovery.
+<li>Use logging and application-specific recovery.
+<li>Edit a copy of the database, and, once all applications
+using the database have successfully called <a href="../../api_c/db_close.html">DB-&gt;close</a>, use
+system operations (for example, the POSIX rename system call) to
+atomically replace the original database with the updated copy.
+</ul>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am/sync.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/second.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am/count.html b/libdb/docs/ref/am/count.html
new file mode 100644
index 0000000..91d5cf7
--- /dev/null
+++ b/libdb/docs/ref/am/count.html
@@ -0,0 +1,29 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Data item count</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am/join.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/curclose.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Data item count</h1>
+<p>Once a cursor has been initialized to refer to a particular key in the
+database, it can be used to determine the number of data items that are
+stored for any particular key. The <a href="../../api_c/dbc_count.html">DBcursor-&gt;c_count</a> method returns
+this number of data items. The returned value is always one, unless
+the database supports duplicate data items, in which case it may be any
+number of items.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am/join.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/curclose.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am/curclose.html b/libdb/docs/ref/am/curclose.html
new file mode 100644
index 0000000..ff7cd2f
--- /dev/null
+++ b/libdb/docs/ref/am/curclose.html
@@ -0,0 +1,29 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Cursor close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am/count.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/align.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Cursor close</h1>
+<p>The <a href="../../api_c/dbc_close.html">DBcursor-&gt;c_close</a> method is the standard interface for closing a cursor,
+after which the cursor may no longer be used. Although cursors are
+implicitly closed when the database they point to are closed, it is good
+programming practice to explicitly close cursors. In addition, in
+transactional systems, cursors may not exist outside of a transaction and
+so must be explicitly closed.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am/count.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/align.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am/curdel.html b/libdb/docs/ref/am/curdel.html
new file mode 100644
index 0000000..102e42d
--- /dev/null
+++ b/libdb/docs/ref/am/curdel.html
@@ -0,0 +1,27 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Deleting records with a cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am/curput.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/curdup.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Deleting records with a cursor</h1>
+<p>The <a href="../../api_c/dbc_del.html">DBcursor-&gt;c_del</a> method is the standard interface for deleting records from
+the database using a cursor. The <a href="../../api_c/dbc_del.html">DBcursor-&gt;c_del</a> method deletes the record
+to which the cursor currently refers. In all cases, the cursor position
+is unchanged after a delete.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am/curput.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/curdup.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am/curdup.html b/libdb/docs/ref/am/curdup.html
new file mode 100644
index 0000000..e88790d
--- /dev/null
+++ b/libdb/docs/ref/am/curdup.html
@@ -0,0 +1,35 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Duplicating a cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am/curdel.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/join.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Duplicating a cursor</h1>
+<p>Once a cursor has been initialized (for example, by a call to
+<a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a>), it can be thought of as identifying a particular
+location in a database. The <a href="../../api_c/dbc_dup.html">DBcursor-&gt;c_dup</a> method permits an application to
+create a new cursor that has the same locking and transactional
+information as the cursor from which it is copied, and which optionally
+refers to the same position in the database.
+<p>In order to maintain a cursor position when an application is using
+locking, locks are maintained on behalf of the cursor until the cursor is
+closed. In cases when an application is using locking without
+transactions, cursor duplication is often required to avoid
+self-deadlocks. For further details, refer to
+<a href="../../ref/lock/am_conv.html">Access method locking conventions</a>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am/curdel.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/join.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am/curget.html b/libdb/docs/ref/am/curget.html
new file mode 100644
index 0000000..374d747
--- /dev/null
+++ b/libdb/docs/ref/am/curget.html
@@ -0,0 +1,135 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Retrieving records with a cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am/cursor.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/curput.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Retrieving records with a cursor</h1>
+<p>The <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> method is the standard interface for retrieving records from
+the database with a cursor. The <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> method takes a flag which
+controls how the cursor is positioned within the database and returns the
+key/data item associated with that positioning. Similar to
+<a href="../../api_c/db_get.html">DB-&gt;get</a>, <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> may also take a supplied key and retrieve
+the data associated with that key from the database. There are several
+flags that you can set to customize retrieval.
+<h3>Cursor position flags</h3>
+<p><dl compact>
+<p><dt><a href="../../api_c/dbc_get.html#DB_FIRST">DB_FIRST</a>, <a href="../../api_c/dbc_get.html#DB_LAST">DB_LAST</a><dd>Return the first (last) record in the database.
+<p><dt><a href="../../api_c/dbc_get.html#DB_NEXT">DB_NEXT</a>, <a href="../../api_c/dbc_get.html#DB_PREV">DB_PREV</a><dd>Return the next (previous) record in the database.
+<p><dt><a href="../../api_c/dbc_get.html#DB_NEXT_DUP">DB_NEXT_DUP</a><dd>Return the next record in the database, if it is a duplicate data item
+for the current key.
+<p><dt><a href="../../api_c/dbc_get.html#DB_NEXT_NODUP">DB_NEXT_NODUP</a>, <a href="../../api_c/dbc_get.html#DB_PREV_NODUP">DB_PREV_NODUP</a><dd>Return the next (previous) record in the database that is not a
+duplicate data item for the current key.
+<p><dt><a href="../../api_c/dbc_get.html#DB_CURRENT">DB_CURRENT</a><dd>Return the record from the database to which the cursor currently refers.
+</dl>
+<h3>Retrieving specific key/data pairs</h3>
+<p><dl compact>
+<p><dt><a href="../../api_c/dbc_get.html#DB_SET">DB_SET</a><dd>Return the record from the database that matches the supplied key. In
+the case of duplicates the first duplicate is returned and the cursor
+is positioned at the beginning of the duplicate list. The user can then
+traverse the duplicate entries for the key.
+<p><dt><a href="../../api_c/dbc_get.html#DB_SET_RANGE">DB_SET_RANGE</a><dd>Return the smallest record in the database greater than or equal to the
+supplied key. This functionality permits partial key matches and range
+searches in the Btree access method.
+<p><dt><a href="../../api_c/db_get.html#DB_GET_BOTH">DB_GET_BOTH</a><dd>Return the record from the database that matches both the supplied key
+and data items. This is particularly useful when there are large
+numbers of duplicate records for a key, as it allows the cursor to
+easily be positioned at the correct place for traversal of some part of
+a large set of duplicate records.
+<p><dt><a href="../../api_c/db_get.html#DB_GET_BOTH_RANGE">DB_GET_BOTH_RANGE</a><dd>Return the smallest record in the database greater than or equal to the
+supplied key and data items.
+</dl>
+<h3>Retrieving based on record numbers</h3>
+<p><dl compact>
+<p><dt><a href="../../api_c/db_get.html#DB_SET_RECNO">DB_SET_RECNO</a><dd>If the underlying database is a Btree, and was configured so that it is
+possible to search it by logical record number, retrieve a specific
+record based on a record number argument.
+<p><dt><a href="../../api_c/dbc_get.html#DB_GET_RECNO">DB_GET_RECNO</a><dd>If the underlying database is a Btree, and was configured so that it is
+possible to search it by logical record number, return the record number
+for the record to which the cursor refers.
+</dl>
+<h3>Special-purpose flags</h3>
+<p><dl compact>
+<p><dt><a href="../../api_c/db_get.html#DB_CONSUME">DB_CONSUME</a><dd>Read-and-delete: the first record (the head) of the queue is returned and
+deleted. The underlying database must be a Queue.
+<p><dt><a href="../../api_c/dbc_get.html#DB_RMW">DB_RMW</a><dd>Read-modify-write: acquire write locks instead of read locks during
+retrieval. This can enhance performance in threaded applications by
+reducing the chance of deadlock.
+</dl>
+<p>In all cases, the cursor is repositioned by a <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> operation
+to point to the newly-returned key/data pair in the database.
+<p>The following is a code example showing a cursor walking through a
+database and displaying the records it contains to the standard
+output:
+<p><blockquote><pre>int
+display(database)
+ char *database;
+{
+ DB *dbp;
+ DBC *dbcp;
+ DBT key, data;
+ int close_db, close_dbc, ret;
+<p>
+ close_db = close_dbc = 0;
+<p>
+ /* Open the database. */
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_create: %s\n", progname, db_strerror(ret));
+ return (1);
+ }
+<p>
+ /* Turn on additional error output. */
+ dbp-&gt;set_errfile(dbp, stderr);
+ dbp-&gt;set_errpfx(dbp, progname);
+<p>
+ /* Open the database. */
+ if ((ret =
+ dbp-&gt;open(dbp, NULL, database, NULL, DB_UNKNOWN, DB_RDONLY, 0)) != 0) {
+ dbp-&gt;err(dbp, ret, "%s: DB-&gt;open", database);
+ goto err;
+ }
+<p>
+ /* Acquire a cursor for the database. */
+ if ((ret = dbp-&gt;cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp-&gt;err(dbp, ret, "DB-&gt;cursor");
+ goto err;
+ }
+<p>
+ /* Initialize the key/data return pair. */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+<p>
+ /* Walk through the database and print out the key/data pairs. */
+ while ((ret = dbcp-&gt;c_get(dbcp, &key, &data, DB_NEXT)) == 0)
+ printf("%.*s : %.*s\n",
+ (int)key.size, (char *)key.data,
+ (int)data.size, (char *)data.data);
+ if (ret != DB_NOTFOUND) {
+ dbp-&gt;err(dbp, ret, "DBcursor-&gt;get");
+ goto err;
+ }
+<p>
+err: if (close_dbc && (ret = dbcp-&gt;c_close(dbcp)) != 0)
+ dbp-&gt;err(dbp, ret, "DBcursor-&gt;close");
+ if (close_db && (ret = dbp-&gt;close(dbp, 0)) != 0)
+ fprintf(stderr,
+ "%s: DB-&gt;close: %s\n", progname, db_strerror(ret));
+ return (0);
+}</pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am/cursor.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/curput.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am/curput.html b/libdb/docs/ref/am/curput.html
new file mode 100644
index 0000000..d96a3e5
--- /dev/null
+++ b/libdb/docs/ref/am/curput.html
@@ -0,0 +1,91 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Storing records with a cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am/curget.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/curdel.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Storing records with a cursor</h1>
+<p>The <a href="../../api_c/dbc_put.html">DBcursor-&gt;c_put</a> method is the standard interface for storing records into
+the database with a cursor. In general, <a href="../../api_c/dbc_put.html">DBcursor-&gt;c_put</a> takes a key and
+inserts the associated data into the database, at a location controlled
+by a specified flag.
+<p>There are several flags that you can set to customize storage:
+<p><dl compact>
+<p><dt><a href="../../api_c/dbc_put.html#DB_AFTER">DB_AFTER</a><dd>Create a new record, immediately after the record to which the cursor
+refers.
+<p><dt><a href="../../api_c/dbc_put.html#DB_BEFORE">DB_BEFORE</a><dd>Create a new record, immediately before the record to which the cursor
+refers.
+<p><dt><a href="../../api_c/dbc_get.html#DB_CURRENT">DB_CURRENT</a><dd>Replace the data part of the record to which the cursor refers.
+<p><dt><a href="../../api_c/dbc_put.html#DB_KEYFIRST">DB_KEYFIRST</a><dd>Create a new record as the first of the duplicate records for the
+supplied key.
+<p><dt><a href="../../api_c/dbc_put.html#DB_KEYLAST">DB_KEYLAST</a><dd>Create a new record, as the last of the duplicate records for the supplied
+key.
+</dl>
+<p>In all cases, the cursor is repositioned by a <a href="../../api_c/dbc_put.html">DBcursor-&gt;c_put</a> operation
+to point to the newly inserted key/data pair in the database.
+<p>The following is a code example showing a cursor storing two data items
+in a database that supports duplicate data items:
+<p><blockquote><pre>int
+store(dbp)
+ DB *dbp;
+{
+ DBC *dbcp;
+ DBT key, data;
+ int ret;
+<p>
+ /*
+ * The DB handle for a Btree database supporting duplicate data
+ * items is the argument; acquire a cursor for the database.
+ */
+ if ((ret = dbp-&gt;cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp-&gt;err(dbp, ret, "DB-&gt;cursor");
+ goto err;
+ }
+<p>
+ /* Initialize the key. */
+ memset(&key, 0, sizeof(key));
+ key.data = "new key";
+ key.size = strlen(key.data) + 1;
+<p>
+ /* Initialize the data to be the first of two duplicate records. */
+ memset(&data, 0, sizeof(data));
+ data.data = "new key's data: entry #1";
+ data.size = strlen(data.data) + 1;
+<p>
+ /* Store the first of the two duplicate records. */
+ if ((ret = dbcp-&gt;c_put(dbcp, &key, &data, DB_KEYFIRST)) != 0)
+ dbp-&gt;err(dbp, ret, "DB-&gt;cursor");
+<p>
+ /* Initialize the data to be the second of two duplicate records. */
+ data.data = "new key's data: entry #2";
+ data.size = strlen(data.data) + 1;
+<p>
+ /*
+ * Store the second of the two duplicate records. No duplicate
+ * record sort function has been specified, so we explicitly
+ * store the record as the last of the duplicate set.
+ */
+ if ((ret = dbcp-&gt;c_put(dbcp, &key, &data, DB_KEYLAST)) != 0)
+ dbp-&gt;err(dbp, ret, "DB-&gt;cursor");
+<p>
+err: if ((ret = dbcp-&gt;c_close(dbcp)) != 0)
+ dbp-&gt;err(dbp, ret, "DBcursor-&gt;close");
+<p>
+ return (0);
+}</pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am/curget.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/curdel.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am/cursor.html b/libdb/docs/ref/am/cursor.html
new file mode 100644
index 0000000..89b3035
--- /dev/null
+++ b/libdb/docs/ref/am/cursor.html
@@ -0,0 +1,50 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Cursor operations</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am/second.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/curget.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Cursor operations</h1>
+<p>A database cursor refers to a single key/data pair in the database. It
+supports traversal of the database and is the only way to access
+individual duplicate data items. Cursors are used for operating on
+collections of records, for iterating over a database, and for saving
+handles to individual records, so that they can be modified after they
+have been read.
+<p>The <a href="../../api_c/db_cursor.html">DB-&gt;cursor</a> method is the standard interface for opening a cursor
+into a database. Upon return the cursor is uninitialized -- positioning
+occurs as part of the first cursor operation.
+<p>Once a database cursor has been opened, records may be retrieved (<a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a>),
+stored (<a href="../../api_c/dbc_put.html">DBcursor-&gt;c_put</a>), and deleted (<a href="../../api_c/dbc_del.html">DBcursor-&gt;c_del</a>).
+<p>Additional operations supported by the cursor handle include duplication
+(<a href="../../api_c/dbc_dup.html">DBcursor-&gt;c_dup</a>), equality join (<a href="../../api_c/db_join.html">DB-&gt;join</a>), and a count of
+duplicate data items (<a href="../../api_c/dbc_count.html">DBcursor-&gt;c_count</a>). Cursors are eventually closed
+using <a href="../../api_c/dbc_close.html">DBcursor-&gt;c_close</a>.
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Database Cursors and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../../api_c/db_cursor.html">DB-&gt;cursor</a></td><td>Create a cursor handle</td></tr>
+<tr><td><a href="../../api_c/dbc_close.html">DBcursor-&gt;c_close</a></td><td>Close a cursor</td></tr>
+<tr><td><a href="../../api_c/dbc_count.html">DBcursor-&gt;c_count</a></td><td>Return count of duplicates</td></tr>
+<tr><td><a href="../../api_c/dbc_del.html">DBcursor-&gt;c_del</a></td><td>Delete by cursor</td></tr>
+<tr><td><a href="../../api_c/dbc_dup.html">DBcursor-&gt;c_dup</a></td><td>Duplicate a cursor</td></tr>
+<tr><td><a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a></td><td>Retrieve by cursor</td></tr>
+<tr><td><a href="../../api_c/dbc_get.html">DBcursor-&gt;c_pget</a></td><td>Retrieve by cursor</td></tr>
+<tr><td><a href="../../api_c/dbc_put.html">DBcursor-&gt;c_put</a></td><td>Store by cursor</td></tr>
+</table>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am/second.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/curget.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am/delete.html b/libdb/docs/ref/am/delete.html
new file mode 100644
index 0000000..80061cf
--- /dev/null
+++ b/libdb/docs/ref/am/delete.html
@@ -0,0 +1,29 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Deleting records</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am/put.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Deleting records</h1>
+<p>The <a href="../../api_c/db_del.html">DB-&gt;del</a> method is the standard interface for deleting records from
+the database. In general, <a href="../../api_c/db_del.html">DB-&gt;del</a> takes a key and deletes the
+data item associated with it from the database.
+<p>If the database has been configured to support duplicate records, the
+<a href="../../api_c/db_del.html">DB-&gt;del</a> method will remove all of the duplicate records. To remove
+individual duplicate records, you must use a Berkeley DB cursor interface.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am/put.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am/get.html b/libdb/docs/ref/am/get.html
new file mode 100644
index 0000000..4be6b6d
--- /dev/null
+++ b/libdb/docs/ref/am/get.html
@@ -0,0 +1,40 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Retrieving records</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am/opensub.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/put.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Retrieving records</h1>
+<p>The <a href="../../api_c/db_get.html">DB-&gt;get</a> method is the standard interface for retrieving records from
+the database. In general, <a href="../../api_c/db_get.html">DB-&gt;get</a> takes a key and returns the
+associated data from the database.
+<p>There are a few flags that you can set to customize retrieval:
+<p><dl compact>
+<p><dt><a href="../../api_c/db_get.html#DB_GET_BOTH">DB_GET_BOTH</a><dd>Search for a matching key and data item, that is, only return success
+if both the key and the data items match those stored in the database.
+<p><dt><a href="../../api_c/dbc_get.html#DB_RMW">DB_RMW</a><dd>Read-modify-write: acquire write locks instead of read locks during
+retrieval. This can enhance performance in threaded applications by
+reducing the chance of deadlock.
+<p><dt><a href="../../api_c/db_get.html#DB_SET_RECNO">DB_SET_RECNO</a><dd>If the underlying database is a Btree, and was configured so that it
+is possible to search it by logical record number, retrieve a specific
+record.
+</dl>
+<p>If the database has been configured to support duplicate records,
+<a href="../../api_c/db_get.html">DB-&gt;get</a> will always return the first data item in the duplicate
+set.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am/opensub.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/put.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am/join.html b/libdb/docs/ref/am/join.html
new file mode 100644
index 0000000..fb3022b
--- /dev/null
+++ b/libdb/docs/ref/am/join.html
@@ -0,0 +1,191 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Equality Join</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a><a name="4"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am/curdup.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/count.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Equality Join</h1>
+<p>Berkeley DB supports "equality" (also known as "natural"), joins on secondary
+indices. An equality join is a method of retrieving data from a primary
+database using criteria stored in a set of secondary indices. It
+requires the data be organized as a primary database which contains the
+primary key and primary data field, and a set of secondary indices.
+Each of the secondary indices is indexed by a different secondary key,
+and, for each key in a secondary index, there is a set of duplicate data
+items that match the primary keys in the primary database.
+<p>For example, let's assume the need for an application that will return
+the names of stores in which one can buy fruit of a given color. We
+would first construct a primary database that lists types of fruit as
+the key item, and the store where you can buy them as the data item:
+<p><blockquote><p><table border=1>
+<tr><th>Primary key:</th><th>Primary data:</th></tr>
+<tr> <td align=left>apple</td> <td align=left>Convenience Store</td> </tr>
+<tr> <td align=left>blueberry</td> <td align=left>Farmer's Market</td> </tr>
+<tr> <td align=left>peach</td> <td align=left>Shopway</td> </tr>
+<tr> <td align=left>pear</td> <td align=left>Farmer's Market</td> </tr>
+<tr> <td align=left>raspberry</td> <td align=left>Shopway</td> </tr>
+<tr> <td align=left>strawberry</td> <td align=left>Farmer's Market</td> </tr>
+</table></blockquote>
+<p>We would then create a secondary index with the key <b>color</b>, and,
+as the data items, the names of fruits of different colors.
+<p><blockquote><p><table border=1>
+<tr><th>Secondary key:</th><th>Secondary data:</th></tr>
+<tr> <td align=left>blue</td> <td align=left>blueberry</td> </tr>
+<tr> <td align=left>red</td> <td align=left>apple</td> </tr>
+<tr> <td align=left>red</td> <td align=left>raspberry</td> </tr>
+<tr> <td align=left>red</td> <td align=left>strawberry</td> </tr>
+<tr> <td align=left>yellow</td> <td align=left>peach</td> </tr>
+<tr> <td align=left>yellow</td> <td align=left>pear</td> </tr>
+</table></blockquote>
+<p>This secondary index would allow an application to look up a color, and
+then use the data items to look up the stores where the colored fruit
+could be purchased. For example, by first looking up <b>blue</b>,
+the data item <b>blueberry</b> could be used as the lookup key in the
+primary database, returning <b>Farmer's Market</b>.
+<p>Your data must be organized in the following manner in order to use the
+<a href="../../api_c/db_join.html">DB-&gt;join</a> method:
+<p><ol>
+<p><li>The actual data should be stored in the database represented by the
+<a href="../../api_c/db_class.html">DB</a> object used to invoke this function. Generally, this
+<a href="../../api_c/db_class.html">DB</a> object is called the <i>primary</i>.
+<p><li>Secondary indices should be stored in separate databases, whose keys
+are the values of the secondary indices and whose data items are the
+primary keys corresponding to the records having the designated
+secondary key value. It is acceptable (and expected) that there may be
+duplicate entries in the secondary indices.
+<p>These duplicate entries should be sorted for performance reasons, although
+it is not required. For more information see the <a href="../../api_c/db_set_flags.html#DB_DUPSORT">DB_DUPSORT</a> flag
+to the <a href="../../api_c/db_set_flags.html">DB-&gt;set_flags</a> method.
+</ol>
+<p>What the <a href="../../api_c/db_join.html">DB-&gt;join</a> method does is review a list of secondary keys, and,
+when it finds a data item that appears as a data item for all of the
+secondary keys, it uses that data items as a lookup into the primary
+database, and returns the associated data item.
+<p>If there were a another secondary index that had as its key the
+<b>cost</b> of the fruit, a similar lookup could be done on stores
+where inexpensive fruit could be purchased:
+<p><blockquote><p><table border=1>
+<tr><th>Secondary key:</th><th>Secondary data:</th></tr>
+<tr> <td align=left>expensive</td> <td align=left>blueberry</td> </tr>
+<tr> <td align=left>expensive</td> <td align=left>peach</td> </tr>
+<tr> <td align=left>expensive</td> <td align=left>pear</td> </tr>
+<tr> <td align=left>expensive</td> <td align=left>strawberry</td> </tr>
+<tr> <td align=left>inexpensive</td> <td align=left>apple</td> </tr>
+<tr> <td align=left>inexpensive</td> <td align=left>pear</td> </tr>
+<tr> <td align=left>inexpensive</td> <td align=left>raspberry</td> </tr>
+</table></blockquote>
+<p>The <a href="../../api_c/db_join.html">DB-&gt;join</a> method provides equality join functionality. While not
+strictly cursor functionality, in that it is not a method off a cursor
+handle, it is more closely related to the cursor operations than to the
+standard <a href="../../api_c/db_class.html">DB</a> operations.
+<p>It is also possible to do lookups based on multiple criteria in a single
+operation. For example, it is possible to look up fruits that are both
+red and expensive in a single operation. If the same fruit appeared as
+a data item in both the color and expense indices, then that fruit name
+would be used as the key for retrieval from the primary index, and would
+then return the store where expensive, red fruit could be purchased.
+<h3>Example</h3>
+<p>Consider the following three databases:
+<p><dl compact>
+<p><dt>personnel<dd><p><ul type=disc>
+<li>key = SSN
+<li>data = record containing name, address, phone number, job title
+</ul>
+<p><dt>lastname<dd><p><ul type=disc>
+<li>key = lastname
+<li>data = SSN
+</ul>
+<p><dt>jobs<dd><p><ul type=disc>
+<li>key = job title
+<li>data = SSN
+</ul>
+</dl>
+<p>Consider the following query:
+<p><blockquote><pre>Return the personnel records of all people named smith with the job
+title manager.</pre></blockquote>
+<p>This query finds are all the records in the primary database (personnel)
+for whom the criteria <b>lastname=smith and job title=manager</b> is
+true.
+<p>Assume that all databases have been properly opened and have the
+handles: pers_db, name_db, job_db. We also assume that we have an
+active transaction to which the handle txn refers.
+<p><blockquote><pre>DBC *name_curs, *job_curs, *join_curs;
+DBC *carray[3];
+DBT key, data;
+int ret, tret;
+<p>
+name_curs = NULL;
+job_curs = NULL;
+memset(&key, 0, sizeof(key));
+memset(&data, 0, sizeof(data));
+<p>
+if ((ret =
+ name_db-&gt;cursor(name_db, txn, &name_curs, 0)) != 0)
+ goto err;
+key.data = "smith";
+key.size = sizeof("smith");
+if ((ret =
+ name_curs-&gt;c_get(name_curs, &key, &data, DB_SET)) != 0)
+ goto err;
+<p>
+if ((ret = job_db-&gt;cursor(job_db, txn, &job_curs, 0)) != 0)
+ goto err;
+key.data = "manager";
+key.size = sizeof("manager");
+if ((ret =
+ job_curs-&gt;c_get(job_curs, &key, &data, DB_SET)) != 0)
+ goto err;
+<p>
+carray[0] = name_curs;
+carray[1] = job_curs;
+carray[2] = NULL;
+<p>
+if ((ret =
+ pers_db-&gt;join(pers_db, carray, &join_curs, 0)) != 0)
+ goto err;
+while ((ret =
+ join_curs-&gt;c_get(join_curs, &key, &data, 0)) == 0) {
+ /* Process record returned in key/data. */
+}
+<p>
+/*
+ * If we exited the loop because we ran out of records,
+ * then it has completed successfully.
+ */
+if (ret == DB_NOTFOUND)
+ ret = 0;
+<p>
+err:
+if (join_curs != NULL &&
+ (tret = join_curs-&gt;c_close(join_curs)) != 0 && ret == 0)
+ ret = tret;
+if (name_curs != NULL &&
+ (tret = name_curs-&gt;c_close(name_curs)) != 0 && ret == 0)
+ ret = tret;
+if (job_curs != NULL &&
+ (tret = job_curs-&gt;c_close(job_curs)) != 0 && ret == 0)
+ ret = tret;
+<p>
+return (ret);
+</pre></blockquote>
+<p>The name cursor is positioned at the beginning of the duplicate list
+for <b>smith</b> and the job cursor is placed at the beginning of
+the duplicate list for <b>manager</b>. The join cursor is returned
+from the join method. This code then loops over the join cursor getting
+the personnel records of each one until there are no more.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am/curdup.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/count.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am/open.html b/libdb/docs/ref/am/open.html
new file mode 100644
index 0000000..1509add
--- /dev/null
+++ b/libdb/docs/ref/am/open.html
@@ -0,0 +1,48 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Database open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am/ops.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/opensub.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Database open</h1>
+<p>The <a href="../../api_c/db_open.html">DB-&gt;open</a> method is the standard interface for opening a database,
+and takes five arguments:
+<p><dl compact>
+<p><dt>file<dd>The name of the file to be opened.
+<p><dt>database<dd>An optional database name.
+<p><dt>type<dd>The type of database to open. This value will be one of the four access
+methods Berkeley DB supports: DB_BTREE, DB_HASH, DB_QUEUE or DB_RECNO, or the
+special value DB_UNKNOWN, which allows you to open an existing file
+without knowing its type.
+<p><dt>mode<dd>The permissions to give to any created file.
+</dl>
+<p>There are a few flags that you can set to customize open:
+<p><dl compact>
+<p><dt><a href="../../api_c/env_open.html#DB_CREATE">DB_CREATE</a><dd>Create the underlying database and any necessary physical files.
+<p><dt><a href="../../api_c/env_set_flags.html#DB_NOMMAP">DB_NOMMAP</a><dd>Do not map this database into process memory.
+<p><dt><a href="../../api_c/db_open.html#DB_RDONLY">DB_RDONLY</a><dd>Treat the data base as read-only.
+<p><dt><a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a><dd>The returned handle is free-threaded, that is, it can be used
+simultaneously by multiple threads within the process.
+<p><dt><a href="../../api_c/db_open.html#DB_TRUNCATE">DB_TRUNCATE</a><dd>Physically truncate the underlying database file, discarding all
+databases it contained. Underlying filesystem primitives are used to
+implement this flag. For this reason it is only applicable to the
+physical file and cannot be used to discard individual databases from
+within physical files.
+<p><dt><a href="../../api_c/db_set_feedback.html#DB_UPGRADE">DB_UPGRADE</a><dd>Upgrade the database format as necessary.
+</dl>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am/ops.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/opensub.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am/opensub.html b/libdb/docs/ref/am/opensub.html
new file mode 100644
index 0000000..f725307
--- /dev/null
+++ b/libdb/docs/ref/am/opensub.html
@@ -0,0 +1,78 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Opening multiple databases in a single file</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am/open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/get.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Opening multiple databases in a single file</h1>
+<p>Applications may create multiple databases within a single physical
+file. This is useful when the databases are both numerous and
+reasonably small, in order to avoid creating a large number of
+underlying files, or when it is desirable to include secondary index
+databases in the same file as the primary index database. Multiple
+databases are an administrative convenience and using them is unlikely
+to effect database performance. To open or create a file that will
+include more than a single database, specify a database name when
+calling the <a href="../../api_c/db_open.html">DB-&gt;open</a> method.
+<p>Physical files do not need to be comprised of a single type of database,
+and databases in a file may be of any mixture of types, except for Queue
+databases. Queue databases must be created one per file and cannot
+share a file with any other database type. There is no limit on the
+number of databases that may be created in a single file other than the
+standard Berkeley DB file size and disk space limitations.
+<p>It is an error to attempt to open a second database in a file that was
+not initially created using a database name, that is, the file must
+initially be specified as capable of containing multiple databases for a
+second database to be created in it.
+<p>It is not an error to open a file that contains multiple databases without
+specifying a database name, however the database type should be specified
+as DB_UNKNOWN and the database must be opened read-only. The handle that
+is returned from such a call is a handle on a database whose key values
+are the names of the databases stored in the database file and whose data
+values are opaque objects. No keys or data values may be modified or
+stored using this database handle.
+<p>Storing multiple databases in a single file is identical to storing each
+database in a separate file with the exception of some configuration
+information and the likely need for locking and a shared underlying
+memory pool.
+<p>There are four types of configuration information which must be specified
+consistently for all databases in a file, rather than differing on a
+per-database basis. They are: byte order, checksum and encryption
+behavior, and page size. When creating additional databases in a file,
+any of these configuration values specified must be consistent with the
+existing databases in the file or an error will be returned.
+<p>
+An additional difference is how locking and the underlying memory pool
+services must to be configured. As an example, consider two databases
+instantiated in two different physical files. If access to each
+separate database is single-threaded, there is no reason to perform any
+locking of any kind, and the two databases may be read and written
+simultaneously. Further, there would be no requirement to create a
+shared database environment in which to open the databases.
+<p>Because multiple databases in a file exist in a single physical file,
+opening two databases in the same file requires locking be enabled
+(unless access to the databases is known to be single-threaded, that
+is, only one of the databases is ever read or written at a time).
+As the locks for the two databases can only conflict during page
+allocation, this additional locking is unlikely to effect performance.
+<p>Also, because multiple databases in a file exist in a single physical
+file, opening two databases in the same file requires the databases
+share an underlying memory pool so that per-physical-file information
+common between the two databases is updated correctly.
+<p>In summary, applications opening multiple databases in a single file,
+will almost certainly need to create a shared database environment.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am/open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/get.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am/ops.html b/libdb/docs/ref/am/ops.html
new file mode 100644
index 0000000..1a209ce
--- /dev/null
+++ b/libdb/docs/ref/am/ops.html
@@ -0,0 +1,88 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Access method operations</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_conf/renumber.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Access method operations</h1>
+<p>Once a database handle has been created using <a href="../../api_c/db_create.html">db_create</a>, there
+are several standard access method operations. Each of these operations
+is performed using a method referred to by the returned handle.
+Generally, the database will be opened using <a href="../../api_c/db_open.html">DB-&gt;open</a>. If the
+database is from an old release of Berkeley DB, it may need to be upgraded to
+the current release before it is opened using <a href="../../api_c/db_upgrade.html">DB-&gt;upgrade</a>.
+<p>Once a database has been opened, records may be retrieved (<a href="../../api_c/db_get.html">DB-&gt;get</a>),
+stored (<a href="../../api_c/db_put.html">DB-&gt;put</a>), and deleted (<a href="../../api_c/db_del.html">DB-&gt;del</a>).
+<p>Additional operations supported by the database handle include
+statistics (<a href="../../api_c/db_stat.html">DB-&gt;stat</a>), truncation (<a href="../../api_c/db_truncate.html">DB-&gt;truncate</a>),
+version upgrade (<a href="../../api_c/db_upgrade.html">DB-&gt;upgrade</a>), verification and salvage
+(<a href="../../api_c/db_verify.html">DB-&gt;verify</a>), flushing to a backing file (<a href="../../api_c/db_sync.html">DB-&gt;sync</a>),
+and association of secondary indices (<a href="../../api_c/db_associate.html">DB-&gt;associate</a>). Database
+handles are eventually closed using <a href="../../api_c/db_close.html">DB-&gt;close</a>.
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Databases and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../../api_c/db_associate.html">DB-&gt;associate</a></td><td>Associate a secondary index</td></tr>
+<tr><td><a href="../../api_c/db_close.html">DB-&gt;close</a></td><td>Close a database</td></tr>
+<tr><td><a href="../../api_c/db_cursor.html">DB-&gt;cursor</a></td><td>Create a cursor handle</td></tr>
+<tr><td><a href="../../api_c/db_del.html">DB-&gt;del</a></td><td>Delete items from a database</td></tr>
+<tr><td><a href="../../api_c/db_err.html">DB-&gt;err</a></td><td>Error message with error string</td></tr>
+<tr><td><a href="../../api_c/db_err.html">DB-&gt;errx</a></td><td>Error message</td></tr>
+<tr><td><a href="../../api_c/db_fd.html">DB-&gt;fd</a></td><td>Return a file descriptor from a database</td></tr>
+<tr><td><a href="../../api_c/db_get.html">DB-&gt;get</a></td><td>Get items from a database</td></tr>
+<tr><td><a href="../../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a></td><td>Return if the underlying database is in host order</td></tr>
+<tr><td><a href="../../api_c/db_get_type.html">DB-&gt;get_type</a></td><td>Return the database type</td></tr>
+<tr><td><a href="../../api_c/db_join.html">DB-&gt;join</a></td><td>Perform a database join on cursors</td></tr>
+<tr><td><a href="../../api_c/db_key_range.html">DB-&gt;key_range</a></td><td>Return estimate of key location</td></tr>
+<tr><td><a href="../../api_c/db_open.html">DB-&gt;open</a></td><td>Open a database</td></tr>
+<tr><td><a href="../../api_c/db_get.html">DB-&gt;pget</a></td><td>Get items from a database</td></tr>
+<tr><td><a href="../../api_c/db_put.html">DB-&gt;put</a></td><td>Store items into a database</td></tr>
+<tr><td><a href="../../api_c/db_remove.html">DB-&gt;remove</a></td><td>Remove a database</td></tr>
+<tr><td><a href="../../api_c/db_rename.html">DB-&gt;rename</a></td><td>Rename a database</td></tr>
+<tr><td><a href="../../api_c/db_set_alloc.html">DB-&gt;set_alloc</a></td><td>Set local space allocation functions</td></tr>
+<tr><td><a href="../../api_c/db_set_append_recno.html">DB-&gt;set_append_recno</a></td><td>Set record append callback</td></tr>
+<tr><td><a href="../../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a></td><td>Set a Btree comparison function</td></tr>
+<tr><td><a href="../../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a></td><td>Set the minimum number of keys per Btree page</td></tr>
+<tr><td><a href="../../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a></td><td>Set a Btree prefix comparison function</td></tr>
+<tr><td><a href="../../api_c/db_set_cache_priority.html">DB-&gt;set_cache_priority</a></td><td>Set the database cache priority</td></tr>
+<tr><td><a href="../../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a></td><td>Set the database cache size</td></tr>
+<tr><td><a href="../../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a></td><td>Set a duplicate comparison function</td></tr>
+<tr><td><a href="../../api_c/db_set_encrypt.html">DB-&gt;set_encrypt</a></td><td>Set the database cryptographic key</td></tr>
+<tr><td><a href="../../api_c/db_set_errcall.html">DB-&gt;set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><a href="../../api_c/db_set_errfile.html">DB-&gt;set_errfile</a></td><td>Set error message FILE</td></tr>
+<tr><td><a href="../../api_cxx/db_set_error_stream.html">Db::set_error_stream</a></td><td>Set error message output stream</td></tr>
+<tr><td><a href="../../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><a href="../../api_c/db_set_feedback.html">DB-&gt;set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><a href="../../api_c/db_set_flags.html">DB-&gt;set_flags</a></td><td>General database configuration</td></tr>
+<tr><td><a href="../../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a></td><td>Set the Hash table density</td></tr>
+<tr><td><a href="../../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a></td><td>Set a hashing function</td></tr>
+<tr><td><a href="../../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a></td><td>Set the Hash table size</td></tr>
+<tr><td><a href="../../api_c/db_set_lorder.html">DB-&gt;set_lorder</a></td><td>Set the database byte order</td></tr>
+<tr><td><a href="../../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a></td><td>Set the underlying database page size</td></tr>
+<tr><td><a href="../../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a></td><td>Set panic callback</td></tr>
+<tr><td><a href="../../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a></td><td>Set Queue database extent size</td></tr>
+<tr><td><a href="../../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a></td><td>Set the variable-length record delimiter</td></tr>
+<tr><td><a href="../../api_c/db_set_re_len.html">DB-&gt;set_re_len</a></td><td>Set the fixed-length record length</td></tr>
+<tr><td><a href="../../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a></td><td>Set the fixed-length record pad byte</td></tr>
+<tr><td><a href="../../api_c/db_set_re_source.html">DB-&gt;set_re_source</a></td><td>Set the backing Recno text file</td></tr>
+<tr><td><a href="../../api_c/db_stat.html">DB-&gt;stat</a></td><td>Return database statistics</td></tr>
+<tr><td><a href="../../api_c/db_sync.html">DB-&gt;sync</a></td><td>Flush a database to stable storage</td></tr>
+<tr><td><a href="../../api_c/db_truncate.html">DB-&gt;truncate</a></td><td>Empty a database</td></tr>
+<tr><td><a href="../../api_c/db_upgrade.html">DB-&gt;upgrade</a></td><td>Upgrade a database</td></tr>
+<tr><td><a href="../../api_c/db_verify.html">DB-&gt;verify</a></td><td>Verify/salvage a database</td></tr>
+</table>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_conf/renumber.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am/put.html b/libdb/docs/ref/am/put.html
new file mode 100644
index 0000000..689a62d
--- /dev/null
+++ b/libdb/docs/ref/am/put.html
@@ -0,0 +1,37 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Storing records</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am/get.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/delete.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Storing records</h1>
+<p>The <a href="../../api_c/db_put.html">DB-&gt;put</a> method is the standard interface for storing records into
+the database. In general, <a href="../../api_c/db_put.html">DB-&gt;put</a> takes a key and stores the
+associated data into the database.
+<p>There are a few flags that you can set to customize storage:
+<p><dl compact>
+<p><dt><a href="../../api_c/db_put.html#DB_APPEND">DB_APPEND</a><dd>Simply append the data to the end of the database, treating the database
+much like a simple log. This flag is only valid for the Queue and Recno
+access methods.
+<p><dt><a href="../../api_c/db_put.html#DB_NOOVERWRITE">DB_NOOVERWRITE</a><dd>Only store the data item if the key does not already appear in the database.
+</dl>
+<p>If the database has been configured to support duplicate records, the
+<a href="../../api_c/db_put.html">DB-&gt;put</a> method will add the new data value at the end of the duplicate
+set. If the database supports sorted duplicates, the new data value is
+inserted at the correct sorted location.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am/get.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/delete.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am/second.html b/libdb/docs/ref/am/second.html
new file mode 100644
index 0000000..82edaf2
--- /dev/null
+++ b/libdb/docs/ref/am/second.html
@@ -0,0 +1,218 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Secondary indices</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am/close.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/cursor.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Secondary indices</h1>
+<p>A secondary index, put simply, is a way to efficiently access records
+in a database (the primary) by means of some piece of information other
+than the usual (primary) key. In Berkeley DB, this index is simply another
+database whose keys are these pieces of information (the secondary
+keys), and whose data are the primary keys. Secondary indices can be
+(and often are) created manually by the application; there is no
+disadvantage, other than complexity, to doing so. However, when the
+secondary key can be mechanically derived from the primary key and datum
+that it points to, as is frequently the case, Berkeley DB can automatically
+and transparently manage secondary indices.
+<p>As an example of how secondary indices might be used, consider a
+database containing a list of students at a college, each of whom has
+a unique student ID number. A typical database would use the student
+ID number as the key; however, one might also reasonably want to be
+able to look up students by last name. To do this, one would construct
+a secondary index in which the secondary key was this last name.
+<p>In SQL, this would be done by executing something like the following:
+<p><blockquote><pre>CREATE TABLE students(student_id CHAR(4) NOT NULL,
+ lastname CHAR(15), firstname CHAR(15), PRIMARY KEY(student_id));
+CREATE INDEX lname ON students(lastname);</pre></blockquote>
+<p>In Berkeley DB, this would work as follows:
+<pre><p><blockquote>struct student_record {
+ char student_id[4];
+ char last_name[15];
+ char first_name[15];
+};
+<p>
+void
+second()
+{
+ DB *dbp, *sdbp;
+ int ret;
+ <p>
+ /* Open/create primary */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ handle_error(ret);
+ if ((ret = dbp-&gt;open(dbp, NULL,
+ "students.db", NULL, DB_BTREE, DB_CREATE, 0600)) != 0)
+ handle_error(ret);
+ <p>
+ /*
+ * Open/create secondary. Note that it supports duplicate data
+ * items, since last names might not be unique.
+ */
+ if ((ret = db_create(&sdbp, dbenv, 0)) != 0)
+ handle_error(ret);
+ if ((ret = sdbp-&gt;set_flags(sdbp, DB_DUP | DB_DUPSORT)) != 0)
+ handle_error(ret);
+ if ((ret = sdbp-&gt;open(sdbp, NULL,
+ "lastname.db", NULL, DB_BTREE, DB_CREATE, 0600)) != 0)
+ handle_error(ret);
+ <p>
+ /* Associate the secondary with the primary. */
+ if ((ret = dbp-&gt;associate(dbp, NULL, sdbp, getname, 0)) != 0)
+ handle_error(ret);
+}
+<p>
+/*
+ * getname -- extracts a secondary key (the last name) from a primary
+ * key/data pair
+ */
+int
+getname(dbp, pkey, pdata, skey)
+ DB *dbp;
+ const DBT *pkey, *pdata;
+ DBT *skey;
+{
+ /*
+ * Since the secondary key is a simple structure member of the
+ * record, we don't have to do anything fancy to return it. If
+ * we have composite keys that need to be constructed from the
+ * record, rather than simply pointing into it, then the user's
+ * function might need to allocate space and copy data. In
+ * this case, the DB_DBT_APPMALLOC flag should be set in the
+ * secondary key DBT.
+ */
+ memset(skey, 0, sizeof(DBT));
+ skey-&gt;data = ((struct student_record *)pdata-&gt;data)-&gt;last_name;
+ skey-&gt;size = sizeof((struct student_record *)pdata-&gt;data)-&gt;last_name;
+ return (0);
+}</blockquote></pre>
+<p>From the application's perspective, putting things into the database
+works exactly as it does without a secondary index; one can simply
+insert records into the primary database. In SQL one would do the
+following:
+<p><blockquote><pre>INSERT INTO student
+ VALUES ("WC42", "Churchill ", "Winston ");</pre></blockquote>
+<p>and in Berkeley DB, one does:
+<p><blockquote><pre>struct student_record s;
+DBT data, key;
+<p>
+memset(&key, 0, sizeof(DBT));
+memset(&data, 0, sizeof(DBT));
+memset(&s, 0, sizeof(struct student_record));
+key.data = "WC42";
+key.size = 4;
+memcpy(&s.student_id, "WC42", sizeof(s.student_id));
+memcpy(&s.last_name, "Churchill ", sizeof(s.last_name));
+memcpy(&s.first_name, "Winston ", sizeof(s.first_name));
+data.data = &s;
+data.size = sizeof(s);
+if ((ret = dbp-&gt;put(dbp, txn, &key, &data, 0)) != 0)
+ handle_error(ret);</pre></blockquote>
+<p>Internally, a record with secondary key "Churchill" is inserted into
+the secondary database (in addition to the insertion of "WC42" into the
+primary, of course).
+<p>Deletes are similar. The SQL clause:
+<p><blockquote><pre>DELETE FROM student WHERE (student_id = "WC42");</pre></blockquote>
+<p>looks like:
+<p><blockquote><pre>DBT key;
+<p>
+memset(&key, 0, sizeof(DBT));
+key.data = "WC42";
+key.size = 4;
+if ((ret = dbp-&gt;del(dbp, txn, &key, 0)) != 0)
+ handle_error(ret);</pre></blockquote>
+<p>Deletes can also be performed on the secondary index directly; a delete
+done this way will delete the "real" record in the primary as well. If
+the secondary supports duplicates and there are duplicate occurrences of
+the secondary key, then all records with that secondary key are removed
+from both the secondary index and the primary database. In
+SQL:
+<p><blockquote><pre>DELETE FROM lname WHERE (lastname = "Churchill ");</pre></blockquote>
+<p>In Berkeley DB:
+<p><blockquote><pre>DBT skey;
+<p>
+memset(&skey, 0, sizeof(DBT));
+skey.data = "Churchill ";
+skey.size = 15;
+if ((ret = sdbp-&gt;del(sdbp, txn, &skey, 0)) != 0)
+ handle_error(ret);</pre></blockquote>
+<p>Gets on a secondary automatically return the primary datum. If
+<a href="../../api_c/db_get.html">DB-&gt;pget</a> or <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_pget</a> is used in lieu of <a href="../../api_c/db_get.html">DB-&gt;get</a>
+or <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a>, the primary key is returned as well. Thus, the
+equivalent of:
+<p><blockquote><pre>SELECT * from lname WHERE (lastname = "Churchill ");</pre></blockquote>
+<p>would be:
+<p><blockquote><pre>DBT data, pkey, skey;
+<p>
+memset(&skey, 0, sizeof(DBT));
+memset(&pkey, 0, sizeof(DBT));
+memset(&data, 0, sizeof(DBT));
+skey.data = "Churchill ";
+skey.size = 15;
+if ((ret = sdbp-&gt;pget(sdbp, txn, &skey, &pkey, &data, 0)) != 0)
+ handle_error(ret);
+/*
+ * Now pkey contains "WC42" and data contains Winston's record.
+ */</pre></blockquote>
+<p>To create a secondary index to a Berkeley DB database, open the database that
+is to become a secondary index normally, then pass it as the "secondary"
+argument to the <a href="../../api_c/db_associate.html">DB-&gt;associate</a> interface for some primary database.
+<p>After a <a href="../../api_c/db_associate.html">DB-&gt;associate</a> call is made, the secondary indices become
+alternate interfaces to the primary database. All updates to the
+primary will be automatically reflected in each secondary index that
+has been associated with it. All get operations using the
+<a href="../../api_c/db_get.html">DB-&gt;get</a> or <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> interfaces on the secondary index
+return the primary datum associated with the specified (or otherwise
+current, in the case of cursor operations) secondary key. The
+<a href="../../api_c/db_get.html">DB-&gt;pget</a> and <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_pget</a> interfaces also become usable;
+these behave just like <a href="../../api_c/db_get.html">DB-&gt;get</a> and <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a>, but return
+the primary key in addition to the primary datum, for those applications
+that need it as well.
+<p>Cursor get operations on a secondary index perform as expected; although
+the data returned will by default be those of the primary database, a
+position in the secondary index is maintained normally, and records will
+appear in the order determined by the secondary key and the comparison
+function or other structure of the secondary database.
+<p>Delete operations on a secondary index delete the item from the primary
+database and all relevant secondaries, including the current one.
+<p>Put operations of any kind are forbidden on secondary indices, as there
+is no way to specify a primary key for a newly put item. Instead, the
+application should use the <a href="../../api_c/dbc_put.html">DBcursor-&gt;c_put</a> or <a href="../../api_c/db_put.html">DB-&gt;put</a> methods
+on the primary database.
+<p>Any number of secondary indices may be associated with a given primary
+database, up to limitations on available memory and the number of open
+file descriptors.
+<p>Note that although Berkeley DB guarantees that updates made using any
+<a href="../../api_c/db_class.html">DB</a> handle with an associated secondary will be reflected in the
+that secondary, associating each primary handle with all the appropriate
+secondaries is the responsibility of the application and is not enforced
+by Berkeley DB. It is generally unsafe, but not forbidden by Berkeley DB, to modify
+a database that has secondary indices without having those indices open
+and associated. Similarly, it is generally unsafe, but not forbidden,
+to modify a secondary index directly. Applications that violate these
+rules face the possibility of outdated or incorrect results if the
+secondary indices are later used.
+<p>If a secondary index becomes outdated for any reason, it should be
+discarded using the <a href="../../api_c/db_remove.html">DB-&gt;remove</a> method and a new one created
+using the <a href="../../api_c/db_associate.html">DB-&gt;associate</a> method. If a secondary index is no
+longer needed, all of its handles should be closed using the
+<a href="../../api_c/db_close.html">DB-&gt;close</a> method, and then the database should be removed using
+a new database handle and the <a href="../../api_c/db_remove.html">DB-&gt;remove</a> method.
+<p>Closing a primary database handle automatically disassociates all
+secondary database handles associated with it.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am/close.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/cursor.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am/stat.html b/libdb/docs/ref/am/stat.html
new file mode 100644
index 0000000..0ce8789
--- /dev/null
+++ b/libdb/docs/ref/am/stat.html
@@ -0,0 +1,33 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Database statistics</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am/delete.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/truncate.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Database statistics</h1>
+<p>The <a href="../../api_c/db_stat.html">DB-&gt;stat</a> method is the standard interface for obtaining database
+statistics. Generally, <a href="../../api_c/db_stat.html">DB-&gt;stat</a> returns a set of statistics
+about the underlying database, for example, the number of key/data pairs
+in the database, how the database was originally configured, and so
+on.
+<p>There is one flag you can set to customize the returned statistics:
+<p><dl compact>
+<p><dt><a href="../../api_c/db_stat.html#DB_FAST_STAT">DB_FAST_STAT</a><dd>Return only information that can be acquired without traversing the
+entire database.
+</dl>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am/delete.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/truncate.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am/sync.html b/libdb/docs/ref/am/sync.html
new file mode 100644
index 0000000..c65efc6
--- /dev/null
+++ b/libdb/docs/ref/am/sync.html
@@ -0,0 +1,39 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Flushing the database cache</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am/verify.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/close.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Flushing the database cache</h1>
+<p>The <a href="../../api_c/db_sync.html">DB-&gt;sync</a> method is the standard interface for flushing all modified
+records from the database cache to disk.
+<p><b>It is important to understand that flushing cached information
+to disk only minimizes the window of opportunity for corrupted data, it
+does not eliminate the possibility.</b>
+<p>While unlikely, it is possible for database corruption to happen if a
+system or application crash occurs while writing data to the database. To
+ensure that database corruption never occurs, applications must either:
+<p><ul type=disc>
+<li>Use transactions and logging with automatic recovery.
+<li>Use logging and application-specific recovery.
+<li>Edit a copy of the database, and, once all applications
+using the database have successfully called <a href="../../api_c/db_close.html">DB-&gt;close</a>, use
+system operations (for example, the POSIX rename system call) to
+atomically replace the original database with the updated copy.
+</ul>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am/verify.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/close.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am/truncate.html b/libdb/docs/ref/am/truncate.html
new file mode 100644
index 0000000..401562f
--- /dev/null
+++ b/libdb/docs/ref/am/truncate.html
@@ -0,0 +1,25 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Database truncation</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am/stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/upgrade.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Database truncation</h1>
+<p>The <a href="../../api_c/db_truncate.html">DB-&gt;truncate</a> method is the standard interface for emptying a
+database of all records.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am/stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/upgrade.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am/upgrade.html b/libdb/docs/ref/am/upgrade.html
new file mode 100644
index 0000000..3d0d8e8
--- /dev/null
+++ b/libdb/docs/ref/am/upgrade.html
@@ -0,0 +1,51 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Database upgrade</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am/truncate.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/verify.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Database upgrade</h1>
+<p>When upgrading to a new release of Berkeley DB, it may be necessary to upgrade
+the on-disk format of already-created database files. <b>Berkeley DB
+database upgrades are done in place, and so are potentially
+destructive.</b> This means that if the system crashes during the upgrade
+procedure, or if the upgrade procedure runs out of disk space, the
+databases may be left in an inconsistent and unrecoverable state. To
+guard against failure, the procedures outlined in
+<a href="../../ref/upgrade/process.html">Upgrading Berkeley DB installations</a>
+should be carefully followed. If you are not performing catastrophic
+archival as part of your application upgrade process, you should at
+least copy your database to archival media, verify that your archival
+media is error-free and readable, and that copies of your backups are
+stored offsite!
+<p>The actual database upgrade is done using the <a href="../../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+method, or by dumping the database using the old version of the Berkeley DB
+software and reloading it using the current version.
+<p>After an upgrade, Berkeley DB applications must be recompiled to use the new
+Berkeley DB library before they can access an upgraded database.
+<b>There is no guarantee that applications compiled against
+previous releases of Berkeley DB will work correctly with an upgraded database
+format. Nor is there any guarantee that applications compiled against
+newer releases of Berkeley DB will work correctly with the previous database
+format.</b> We do guarantee that any archived database may be upgraded
+using a current Berkeley DB software release and the <a href="../../api_c/db_upgrade.html">DB-&gt;upgrade</a>
+method, and there is no need to step-wise upgrade the database using
+intermediate releases of Berkeley DB. Sites should consider archiving
+appropriate copies of their application or application sources if they
+may need to access archived databases without first upgrading them.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am/truncate.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/verify.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am/verify.html b/libdb/docs/ref/am/verify.html
new file mode 100644
index 0000000..ede6efd
--- /dev/null
+++ b/libdb/docs/ref/am/verify.html
@@ -0,0 +1,51 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Database verification and salvage</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a><a name="4"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am/upgrade.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/sync.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Database verification and salvage</h1>
+<p>The <a href="../../api_c/db_verify.html">DB-&gt;verify</a> method is the standard interface for verifying
+that a file, and any databases it may contain, are uncorrupted. In
+addition, the method may optionally be called with a file stream
+argument to which all key/data pairs found in the database are output.
+There are two modes for finding key/data pairs to be output:
+<p><ol>
+<p><li>If the <a href="../../api_c/db_verify.html#DB_SALVAGE">DB_SALVAGE</a> flag is specified, the key/data pairs in the
+database are output. When run in this mode, the database is assumed to
+be largely uncorrupted. For example, the <a href="../../api_c/db_verify.html">DB-&gt;verify</a> method will
+search for pages that are no longer linked into the database, and will
+output key/data pairs from such pages. However, key/data items that
+have been marked as deleted in the database will not be output, as the
+page structures are generally trusted in this mode.
+<p><li>If both the <a href="../../api_c/db_verify.html#DB_SALVAGE">DB_SALVAGE</a> and <a href="../../api_c/db_verify.html#DB_AGGRESSIVE">DB_AGGRESSIVE</a> flags are
+specified, all possible key/data pairs are output. When run in this mode,
+the database is assumed to be seriously corrupted. For example, key/data
+pairs that have been deleted will re-appear in the output. In addition,
+because pages may have been subsequently reused and modified during
+normal database operations after the key/data pairs were deleted, it is
+not uncommon for apparently corrupted key/data pairs to be output in this
+mode, even when there is no corruption in the underlying database. The
+output will almost always have to be edited by hand or other means before
+the data is ready for reload into another database. We recommend that
+<a href="../../api_c/db_verify.html#DB_SALVAGE">DB_SALVAGE</a> be tried first, and <a href="../../api_c/db_verify.html#DB_AGGRESSIVE">DB_AGGRESSIVE</a> only tried
+if the output from that first attempt is obviously missing data items or
+the data is sufficiently valuable that human review of the output is
+preferable to any kind of data loss.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am/upgrade.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/sync.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_conf/bt_compare.html b/libdb/docs/ref/am_conf/bt_compare.html
new file mode 100644
index 0000000..6f485e8
--- /dev/null
+++ b/libdb/docs/ref/am_conf/bt_compare.html
@@ -0,0 +1,94 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Btree comparison</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_conf/malloc.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/bt_prefix.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Btree comparison</h1>
+<p>The Btree data structure is a sorted, balanced tree structure storing
+associated key/data pairs. By default, the sort order is lexicographical,
+with shorter keys collating before longer keys. The user can specify the
+sort order for the Btree by using the <a href="../../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a> method.
+<p>Sort routines are passed pointers to keys as arguments. The keys are
+represented as <a href="../../api_c/dbt_class.html">DBT</a> structures. The routine must return an integer
+less than, equal to, or greater than zero if the first argument is
+considered to be respectively less than, equal to, or greater than the
+second argument. The only fields that the routines may examine in the
+<a href="../../api_c/dbt_class.html">DBT</a> structures are <b>data</b> and <b>size</b> fields.
+<p>An example routine that might be used to sort integer keys in the database
+is as follows:
+<p><blockquote><pre>int
+compare_int(dbp, a, b)
+ DB *dbp;
+ const DBT *a, *b;
+{
+ int ai, bi;
+<p>
+ /*
+ * Returns:
+ * &lt; 0 if a &lt; b
+ * = 0 if a = b
+ * &gt; 0 if a &gt; b
+ */
+ memcpy(&ai, a-&gt;data, sizeof(int));
+ memcpy(&bi, b-&gt;data, sizeof(int));
+ return (ai - bi);
+}</pre></blockquote>
+<p>Note that the data must first be copied into memory that is appropriately
+aligned, as Berkeley DB does not guarantee any kind of alignment of the
+underlying data, including for comparison routines. When writing
+comparison routines, remember that databases created on machines of
+different architectures may have different integer byte orders, for which
+your code may need to compensate.
+<p>An example routine that might be used to sort keys based on the first
+five bytes of the key (ignoring any subsequent bytes) is as follows:
+<p><blockquote><pre>int
+compare_dbt(dbp, a, b)
+ DB *dbp;
+ const DBT *a, *b;
+{
+ int len;
+ u_char *p1, *p2;
+<p>
+ /*
+ * Returns:
+ * &lt; 0 if a &lt; b
+ * = 0 if a = b
+ * &gt; 0 if a &gt; b
+ */
+ for (p1 = a-&gt;data, p2 = b-&gt;data, len = 5; len--; ++p1, ++p2)
+ if (*p1 != *p2)
+ return ((long)*p1 - (long)*p2);
+ return (0);
+}</pre></blockquote>
+<p>All comparison functions must cause the keys in the database to be
+well-ordered. The most important implication of being well-ordered is
+that the key relations must be transitive, that is, if key A is less
+than key B, and key B is less than key C, then the comparison routine
+must also return that key A is less than key C. In addition, comparisons
+will only be able to return 0 when comparing full length keys; partial
+key comparisons must always return a result less than or greater than 0.
+<p>It is reasonable for a comparison function to not examine an entire key
+in some applications, which implies that partial keys may be specified
+to the Berkeley DB interfaces. When partial keys are specified to Berkeley DB,
+interfaces which retrieve data items based on a user-specified key (for
+example, <a href="../../api_c/db_get.html">DB-&gt;get</a> and <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> with the <a href="../../api_c/dbc_get.html#DB_SET">DB_SET</a>
+flag), will not modify the user-specified key by returning the actual
+key stored in the database. The actual key can be retrieved by calling
+the <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> method with the <a href="../../api_c/dbc_get.html#DB_CURRENT">DB_CURRENT</a> flag.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_conf/malloc.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/bt_prefix.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_conf/bt_minkey.html b/libdb/docs/ref/am_conf/bt_minkey.html
new file mode 100644
index 0000000..f52c8c1
--- /dev/null
+++ b/libdb/docs/ref/am_conf/bt_minkey.html
@@ -0,0 +1,54 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Minimum keys per page</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_conf/bt_prefix.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/bt_recnum.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Minimum keys per page</h1>
+<p>The number of keys stored on each page affects the size of a Btree and
+how it is maintained. Therefore, it also affects the retrieval and search
+performance of the tree. For each Btree, Berkeley DB computes a maximum key
+and data size. This size is a function of the page size and the fact that
+at least two key/data pairs must fit on any Btree page. Whenever key or
+data items exceed the calculated size, they are stored on overflow pages
+instead of in the standard Btree leaf pages.
+<p>Applications may use the <a href="../../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a> method to change the minimum
+number of keys that must fit on a Btree page from two to another value.
+Altering this value in turn alters the on-page maximum size, and can be
+used to force key and data items which would normally be stored in the
+Btree leaf pages onto overflow pages.
+<p>Some data sets can benefit from this tuning. For example, consider an
+application using large page sizes, with a data set almost entirely
+consisting of small key and data items, but with a few large items. By
+setting the minimum number of keys that must fit on a page, the
+application can force the outsized items to be stored on overflow pages.
+That in turn can potentially keep the tree more compact, that is, with
+fewer internal levels to traverse during searches.
+<p>The following calculation is similar to the one performed by the Btree
+implementation. (The <b>minimum_keys</b> value is multiplied by 2
+because each key/data pair requires 2 slots on a Btree page.)
+<p><blockquote><pre>maximum_size = page_size / (minimum_keys * 2)</pre></blockquote>
+<p>Using this calculation, if the page size is 8KB and the default
+<b>minimum_keys</b> value of 2 is used, then any key or data items
+larger than 2KB will be forced to an overflow page. If an application
+were to specify a <b>minimum_key</b> value of 100, then any key or data
+items larger than roughly 40 bytes would be forced to overflow pages.
+<p>It is important to remember that accesses to overflow pages do not perform
+as well as accesses to the standard Btree leaf pages, and so setting the
+value incorrectly can result in overusing overflow pages and decreasing
+the application's overall performance.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_conf/bt_prefix.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/bt_recnum.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_conf/bt_prefix.html b/libdb/docs/ref/am_conf/bt_prefix.html
new file mode 100644
index 0000000..537f3ee
--- /dev/null
+++ b/libdb/docs/ref/am_conf/bt_prefix.html
@@ -0,0 +1,68 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Btree prefix comparison</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_conf/bt_compare.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/bt_minkey.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Btree prefix comparison</h1>
+<p>The Berkeley DB Btree implementation maximizes the number of keys that can be
+stored on an internal page by storing only as many bytes of each key as
+are necessary to distinguish it from adjacent keys. The prefix
+comparison routine is what determines this minimum number of bytes (that
+is, the length of the unique prefix), that must be stored. A prefix
+comparison function for the Btree can be specified by calling
+<a href="../../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>.
+<p>The prefix comparison routine must be compatible with the overall
+comparison function of the Btree, since what distinguishes any two keys
+depends entirely on the function used to compare them. This means that
+if a prefix comparison routine is specified by the application, a
+compatible overall comparison routine must also have been specified.
+<p>Prefix comparison routines are passed pointers to keys as arguments. The
+keys are represented as <a href="../../api_c/dbt_class.html">DBT</a> structures. The prefix comparison
+function must return the number of bytes of the second key argument that
+are necessary to determine if it is greater than the first key argument.
+If the keys are equal, the length of the second key should be returned.
+The only fields that the routines may examine in the <a href="../../api_c/dbt_class.html">DBT</a>
+structures are <b>data</b> and <b>size</b> fields.
+<p>An example prefix comparison routine follows:
+<p><blockquote><pre>u_int32_t
+compare_prefix(dbp, a, b)
+ DB *dbp;
+ const DBT *a, *b;
+{
+ size_t cnt, len;
+ u_int8_t *p1, *p2;
+<p>
+ cnt = 1;
+ len = a-&gt;size &gt; b-&gt;size ? b-&gt;size : a-&gt;size;
+ for (p1 =
+ a-&gt;data, p2 = b-&gt;data; len--; ++p1, ++p2, ++cnt)
+ if (*p1 != *p2)
+ return (cnt);
+ /*
+ * They match up to the smaller of the two sizes.
+ * Collate the longer after the shorter.
+ */
+ if (a-&gt;size &lt; b-&gt;size)
+ return (a-&gt;size + 1);
+ if (b-&gt;size &lt; a-&gt;size)
+ return (b-&gt;size + 1);
+ return (b-&gt;size);
+}</pre></blockquote>
+<p>The usefulness of this functionality is data-dependent, but in some data
+sets can produce significantly reduced tree sizes and faster search times.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_conf/bt_compare.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/bt_minkey.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_conf/bt_recnum.html b/libdb/docs/ref/am_conf/bt_recnum.html
new file mode 100644
index 0000000..d9bbedd
--- /dev/null
+++ b/libdb/docs/ref/am_conf/bt_recnum.html
@@ -0,0 +1,106 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Retrieving Btree records by logical record number</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_conf/bt_minkey.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/h_ffactor.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Retrieving Btree records by logical record number</h1>
+<p>The Btree access method optionally supports retrieval by logical record
+numbers. To configure a Btree to support record numbers, call the
+<a href="../../api_c/db_set_flags.html">DB-&gt;set_flags</a> method with the <a href="../../api_c/db_set_flags.html#DB_RECNUM">DB_RECNUM</a> flag.
+<p>Configuring a Btree for record numbers should not be done lightly.
+While often useful, it may significantly slow down the speed at which
+items can be stored into the database, and can severely impact
+application throughput. Generally it should be avoided in trees with
+a need for high write concurrency.
+<p>To retrieve by record number, use the <a href="../../api_c/db_get.html#DB_SET_RECNO">DB_SET_RECNO</a> flag to the
+<a href="../../api_c/db_get.html">DB-&gt;get</a> and <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> methods. The following is an example of
+a routine that displays the data item for a Btree database created with
+the <a href="../../api_c/db_set_flags.html#DB_RECNUM">DB_RECNUM</a> option.
+<p><blockquote><pre>int
+rec_display(dbp, recno)
+ DB *dbp;
+ db_recno_t recno;
+{
+ DBT key, data;
+ int ret;
+<p>
+ memset(&key, 0, sizeof(key));
+ key.data = &recno;
+ key.size = sizeof(recno);
+ memset(&data, 0, sizeof(data));
+<p>
+ if ((ret = dbp-&gt;get(dbp, NULL, &key, &data, DB_SET_RECNO)) != 0)
+ return (ret);
+ printf("data for %lu: %.*s\n",
+ (u_long)recno, (int)data.size, (char *)data.data);
+ return (0);
+}</pre></blockquote>
+<p>To determine a key's record number, use the <a href="../../api_c/dbc_get.html#DB_GET_RECNO">DB_GET_RECNO</a> flag
+to the <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> method. The following is an example of a routine that
+displays the record number associated with a specific key.
+<p><blockquote><pre>int
+recno_display(dbp, keyvalue)
+ DB *dbp;
+ char *keyvalue;
+{
+ DBC *dbcp;
+ DBT key, data;
+ db_recno_t recno;
+ int ret, t_ret;
+<p>
+ /* Acquire a cursor for the database. */
+ if ((ret = dbp-&gt;cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp-&gt;err(dbp, ret, "DB-&gt;cursor");
+ goto err;
+ }
+<p>
+ /* Position the cursor. */
+ memset(&key, 0, sizeof(key));
+ key.data = keyvalue;
+ key.size = strlen(keyvalue);
+ memset(&data, 0, sizeof(data));
+ if ((ret = dbcp-&gt;c_get(dbcp, &key, &data, DB_SET)) != 0) {
+ dbp-&gt;err(dbp, ret, "DBC-&gt;c_get(DB_SET): %s", keyvalue);
+ goto err;
+ }
+<p>
+ /*
+ * Request the record number, and store it into appropriately
+ * sized and aligned local memory.
+ */
+ memset(&data, 0, sizeof(data));
+ data.data = &recno;
+ data.ulen = sizeof(recno);
+ data.flags = DB_DBT_USERMEM;
+ if ((ret = dbcp-&gt;c_get(dbcp, &key, &data, DB_GET_RECNO)) != 0) {
+ dbp-&gt;err(dbp, ret, "DBC-&gt;c_get(DB_GET_RECNO)");
+ goto err;
+ }
+<p>
+ printf("key for requested key was %lu\n", (u_long)recno);
+<p>
+err: /* Close the cursor. */
+ if ((t_ret = dbcp-&gt;c_close(dbcp)) != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ dbp-&gt;err(dbp, ret, "DBC-&gt;close");
+ }
+ return (ret);
+}</pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_conf/bt_minkey.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/h_ffactor.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_conf/byteorder.html b/libdb/docs/ref/am_conf/byteorder.html
new file mode 100644
index 0000000..b7dd1f5
--- /dev/null
+++ b/libdb/docs/ref/am_conf/byteorder.html
@@ -0,0 +1,38 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Selecting a byte order</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_conf/cachesize.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/dup.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Selecting a byte order</h1>
+<p>Database files created by Berkeley DB can be created in either little- or
+big-endian formats. The byte order used for the underlying database
+is specified by calling the <a href="../../api_c/db_set_lorder.html">DB-&gt;set_lorder</a> method. If no order
+is selected, the native format of the machine on which the database is
+created will be used.
+<p>Berkeley DB databases are architecture independent, and any format database can
+be used on a machine with a different native format. In this case, as
+each page that is read into or written from the cache must be converted
+to or from the host format, and databases with non-native formats will
+incur a performance penalty for the run-time conversion.
+<p><b>It is important to note that the Berkeley DB access methods do no data
+conversion for application specified data. Key/data pairs written on a
+little-endian format architecture will be returned to the application
+exactly as they were written when retrieved on a big-endian format
+architecture.</b>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_conf/cachesize.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/dup.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_conf/cachesize.html b/libdb/docs/ref/am_conf/cachesize.html
new file mode 100644
index 0000000..8264f9d
--- /dev/null
+++ b/libdb/docs/ref/am_conf/cachesize.html
@@ -0,0 +1,86 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Selecting a cache size</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_conf/pagesize.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/byteorder.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Selecting a cache size</h1>
+<p>The size of the cache used for the underlying database can be specified
+by calling the <a href="../../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a> method.
+Choosing a cache size is, unfortunately, an art. Your cache must be at
+least large enough for your working set plus some overlap for unexpected
+situations.
+<p>When using the Btree access method, you must have a cache big enough for
+the minimum working set for a single access. This will include a root
+page, one or more internal pages (depending on the depth of your tree),
+and a leaf page. If your cache is any smaller than that, each new page
+will force out the least-recently-used page, and Berkeley DB will re-read the
+root page of the tree anew on each database request.
+<p>If your keys are of moderate size (a few tens of bytes) and your pages
+are on the order of 4K to 8K, most Btree applications will be only
+three levels. For example, using 20 byte keys with 20 bytes of data
+associated with each key, a 8KB page can hold roughly 400 keys (or 200
+key/data pairs), so a fully populated three-level Btree will hold 32
+million key/data pairs, and a tree with only a 50% page-fill factor will
+still hold 16 million key/data pairs. We rarely expect trees to exceed
+five levels, although Berkeley DB will support trees up to 255 levels.
+<p>The rule-of-thumb is that cache is good, and more cache is better.
+Generally, applications benefit from increasing the cache size up to a
+point, at which the performance will stop improving as the cache size
+increases. When this point is reached, one of two things have happened:
+either the cache is large enough that the application is almost never
+having to retrieve information from disk, or, your application is doing
+truly random accesses, and therefore increasing size of the cache doesn't
+significantly increase the odds of finding the next requested information
+in the cache. The latter is fairly rare -- almost all applications show
+some form of locality of reference.
+<p>That said, it is important not to increase your cache size beyond the
+capabilities of your system, as that will result in reduced performance.
+Under many operating systems, tying down enough virtual memory will cause
+your memory and potentially your program to be swapped. This is
+especially likely on systems without unified OS buffer caches and virtual
+memory spaces, as the buffer cache was allocated at boot time and so
+cannot be adjusted based on application requests for large amounts of
+virtual memory.
+<p>For example, even if accesses are truly random within a Btree, your
+access pattern will favor internal pages to leaf pages, so your cache
+should be large enough to hold all internal pages. In the steady state,
+this requires at most one I/O per operation to retrieve the appropriate
+leaf page.
+<p>You can use the <a href="../../utility/db_stat.html">db_stat</a> utility to monitor the effectiveness of
+your cache. The following output is excerpted from the output of that
+utility's <b>-m</b> option:
+<p><blockquote><pre>prompt: db_stat -m
+131072 Cache size (128K).
+4273 Requested pages found in the cache (97%).
+134 Requested pages not found in the cache.
+18 Pages created in the cache.
+116 Pages read into the cache.
+93 Pages written from the cache to the backing file.
+5 Clean pages forced from the cache.
+13 Dirty pages forced from the cache.
+0 Dirty buffers written by trickle-sync thread.
+130 Current clean buffer count.
+4 Current dirty buffer count.
+</pre></blockquote>
+<p>The statistics for this cache say that there have been 4,273 requests of
+the cache, and only 116 of those requests required an I/O from disk. This
+means that the cache is working well, yielding a 97% cache hit rate. The
+<a href="../../utility/db_stat.html">db_stat</a> utility will present these statistics both for the cache
+as a whole and for each file within the cache separately.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_conf/pagesize.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/byteorder.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_conf/dup.html b/libdb/docs/ref/am_conf/dup.html
new file mode 100644
index 0000000..d349c4d
--- /dev/null
+++ b/libdb/docs/ref/am_conf/dup.html
@@ -0,0 +1,76 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Duplicate data items</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_conf/byteorder.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/malloc.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Duplicate data items</h1>
+<p>The Btree and Hash access methods support the creation of multiple data
+items for a single key item. By default, multiple data items are not
+permitted, and each database store operation will overwrite any previous
+data item for that key. To configure Berkeley DB for duplicate data items,
+call the <a href="../../api_c/db_set_flags.html">DB-&gt;set_flags</a> method with the <a href="../../api_c/db_set_flags.html#DB_DUP">DB_DUP</a> flag. Only one
+copy of the key will be stored for each set of duplicate data items.
+If the Btree access method comparison routine returns that two keys
+compare equally, it is undefined which of the two keys will be stored
+and returned from future database operations.
+<p>By default, Berkeley DB stores duplicates in the order in which they were added,
+that is, each new duplicate data item will be stored after any already
+existing data items. This default behavior can be overridden by using
+the <a href="../../api_c/dbc_put.html">DBcursor-&gt;c_put</a> method and one of the <a href="../../api_c/dbc_put.html#DB_AFTER">DB_AFTER</a>, <a href="../../api_c/dbc_put.html#DB_BEFORE">DB_BEFORE</a>
+<a href="../../api_c/dbc_put.html#DB_KEYFIRST">DB_KEYFIRST</a> or <a href="../../api_c/dbc_put.html#DB_KEYLAST">DB_KEYLAST</a> flags. Alternatively, Berkeley DB
+may be configured to sort duplicate data items.
+<p>When stepping through the database sequentially, duplicate data items will
+be returned individually, as a key/data pair, where the key item only
+changes after the last duplicate data item has been returned. For this
+reason, duplicate data items cannot be accessed using the
+<a href="../../api_c/db_get.html">DB-&gt;get</a> method, as it always returns the first of the duplicate data
+items. Duplicate data items should be retrieved using the Berkeley DB cursor
+interface, <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a>.
+<p>There is an interface flag that permits applications to request the
+following data item only if it <b>is</b> a duplicate data item of the
+current entry, see <a href="../../api_c/dbc_get.html#DB_NEXT_DUP">DB_NEXT_DUP</a> for more information. There is an
+interface flag that permits applications to request the following data
+item only if it <b>is not</b> a duplicate data item of the current
+entry, see <a href="../../api_c/dbc_get.html#DB_NEXT_NODUP">DB_NEXT_NODUP</a> and <a href="../../api_c/dbc_get.html#DB_PREV_NODUP">DB_PREV_NODUP</a> for more
+information.
+<p>It is also possible to maintain duplicate records in sorted order. Sorting
+duplicates will significantly increase performance when searching them
+and performing equality joins, common operations when using secondary
+indices. To configure Berkeley DB to sort duplicate data items, the application
+must call the <a href="../../api_c/db_set_flags.html">DB-&gt;set_flags</a> method with the <a href="../../api_c/db_set_flags.html#DB_DUPSORT">DB_DUPSORT</a> flag (in
+addition to the <a href="../../api_c/db_set_flags.html#DB_DUP">DB_DUP</a> flag). In addition, a custom sorting
+function may be specified using the <a href="../../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a> method. If the
+<a href="../../api_c/db_set_flags.html#DB_DUPSORT">DB_DUPSORT</a> flag is given, but no comparison routine is specified,
+then Berkeley DB defaults to the same lexicographical sorting used for Btree
+keys, with shorter items collating before longer items.
+<p>If the duplicate data items are unsorted, applications may store identical
+duplicate data items, or, for those that just like the way it sounds,
+<i>duplicate duplicates</i>.
+<p><b>In this release it is an error to attempt to store identical
+duplicate data items when duplicates are being stored in a sorted order.</b>
+This restriction is expected to be lifted in a future release. There is
+an interface flag that permits applications to disallow storing duplicate
+data items when the database has been configured for sorted duplicates,
+see <a href="../../api_c/db_put.html#DB_NODUPDATA">DB_NODUPDATA</a> for more information. Applications not wanting
+to permit duplicate duplicates in databases configured for sorted
+duplicates should begin using the <a href="../../api_c/db_put.html#DB_NODUPDATA">DB_NODUPDATA</a> flag immediately.
+<p>For further information on how searching and insertion behaves in the
+presence of duplicates (sorted or not), see the <a href="../../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../../api_c/db_put.html">DB-&gt;put</a>, <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> and <a href="../../api_c/dbc_put.html">DBcursor-&gt;c_put</a> documentation.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_conf/byteorder.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/malloc.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_conf/extentsize.html b/libdb/docs/ref/am_conf/extentsize.html
new file mode 100644
index 0000000..78bdbf5
--- /dev/null
+++ b/libdb/docs/ref/am_conf/extentsize.html
@@ -0,0 +1,44 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Selecting a Queue extent size</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_conf/recno.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/re_source.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Selecting a Queue extent size</h1>
+<p>In Queue databases, records are allocated sequentially and directly
+mapped to an offset within the file storage for the database. As
+records are deleted from the Queue, pages will become empty and will
+not be reused in normal queue operations. To facilitate the reclamation
+of disk space a Queue may be partitioned into extents. Each extent is
+kept in a separate physical file.
+<p>Extent files are automatically created as needed and marked for deletion
+when the head of the queue moves off the extent. The extent will not
+be deleted until all processes close the extent. In addition, Berkeley DB
+caches a small number of extents that have been recently used; this may
+delay when an extent will be deleted. The number of extents left open
+depends on queue activity.
+<p>The extent size specifies the number of pages that make up each extent.
+By default, if no extent size is specified, the Queue resides in a
+single file and disk space is not reclaimed. In choosing an extent size
+there is a tradeoff between the amount of disk space used and the
+overhead of creating and deleting files. If the extent size is too
+small, the system will pay a performance penalty, creating and deleting
+files frequently. In addition, if the active part of the queue spans
+many files, all those files will need to be open at the same time,
+consuming system and process file resources.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_conf/recno.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/re_source.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_conf/h_ffactor.html b/libdb/docs/ref/am_conf/h_ffactor.html
new file mode 100644
index 0000000..4acbd8c
--- /dev/null
+++ b/libdb/docs/ref/am_conf/h_ffactor.html
@@ -0,0 +1,32 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Page fill factor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_conf/bt_recnum.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/h_hash.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Page fill factor</h1>
+<p>The density, or page fill factor, is an approximation of the number of
+keys allowed to accumulate in any one bucket, determining when the hash
+table grows or shrinks. If you know the average sizes of the keys and
+data in your data set, setting the fill factor can enhance performance.
+A reasonable rule to use to compute fill factor is:
+<p><blockquote><pre>(pagesize - 32) / (average_key_size + average_data_size + 8)</pre></blockquote>
+<p>The desired density within the hash table can be specified by calling
+the <a href="../../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a> method. If no density is specified, one will
+be selected dynamically as pages are filled.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_conf/bt_recnum.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/h_hash.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_conf/h_hash.html b/libdb/docs/ref/am_conf/h_hash.html
new file mode 100644
index 0000000..d783923
--- /dev/null
+++ b/libdb/docs/ref/am_conf/h_hash.html
@@ -0,0 +1,40 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Specifying a database hash</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_conf/h_ffactor.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/h_nelem.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Specifying a database hash</h1>
+<p>The database hash determines in which bucket a particular key will reside.
+The goal of hashing keys is to distribute keys equally across the database
+pages, therefore it is important that the hash function work well with
+the specified keys so that the resulting bucket usage is relatively
+uniform. A hash function that does not work well can effectively turn
+into a sequential list.
+<p>No hash performs equally well on all possible data sets. It is possible
+that applications may find that the default hash function performs poorly
+with a particular set of keys. The distribution resulting from the hash
+function can be checked using <a href="../../utility/db_stat.html">db_stat</a> utility. By comparing the
+number of hash buckets and the number of keys, one can decide if the entries
+are hashing in a well-distributed manner.
+<p>The hash function for the hash table can be specified by calling the
+<a href="../../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a> method. If no hash function is specified, a default
+function will be used. Any application-specified hash function must
+take a reference to a <a href="../../api_c/db_class.html">DB</a> object, a pointer to a byte string and
+its length, as arguments and return an unsigned, 32-bit hash value.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_conf/h_ffactor.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/h_nelem.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_conf/h_nelem.html b/libdb/docs/ref/am_conf/h_nelem.html
new file mode 100644
index 0000000..a95bb44
--- /dev/null
+++ b/libdb/docs/ref/am_conf/h_nelem.html
@@ -0,0 +1,33 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Hash table size</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_conf/h_hash.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/recno.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Hash table size</h1>
+<p>When setting up the hash database, knowing the expected number of elements
+that will be stored in the hash table is useful. This value can be used
+by the Hash access method implementation to more accurately construct the
+necessary number of buckets that the database will eventually require.
+<p>The anticipated number of elements in the hash table can be specified by
+calling the <a href="../../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a> method. If not specified, or set too low,
+hash tables will expand gracefully as keys are entered, although a slight
+performance degradation may be noticed. In order for the estimated number
+of elements to be a useful value to Berkeley DB, the <a href="../../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a> method
+must also be called to set the page fill factor.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_conf/h_hash.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/recno.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_conf/intro.html b/libdb/docs/ref/am_conf/intro.html
new file mode 100644
index 0000000..4636b77
--- /dev/null
+++ b/libdb/docs/ref/am_conf/intro.html
@@ -0,0 +1,46 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: What are the available access methods?</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/simple_tut/close.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/select.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>What are the available access methods?</h1>
+<p>Berkeley DB currently offers four access methods: Btree, Hash, Queue and Recno.
+<h3>Btree</h3>
+<p>The Btree access method is an implementation of a sorted, balanced tree
+structure. Searches, insertions, and deletions in the tree all take O(log
+base_b N) time, where base_b is the average number of keys per page, and
+N is the total number of keys stored. Often, inserting ordered data into
+Btree implementations results in pages that are only half-full. Berkeley DB
+makes ordered (or inverse ordered) insertion the best case, resulting in
+nearly full-page space utilization.
+<h3>Hash</h3>
+<p>The Hash access method data structure is an implementation of Extended
+Linear Hashing, as described in "Linear Hashing: A New Tool for File and
+Table Addressing", Witold Litwin, <i>Proceedings of the 6th
+International Conference on Very Large Databases (VLDB)</i>, 1980.
+<h3>Queue</h3>
+<p>The Queue access method stores fixed-length records with logical record
+numbers as keys. It is designed for fast inserts at the tail and has a
+special cursor consume operation that deletes and returns a record from
+the head of the queue. The Queue access method uses record level locking.
+<h3>Recno</h3>
+<p>The Recno access method stores both fixed and variable-length records with
+logical record numbers as keys, optionally backed by a flat text (byte
+stream) file.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/simple_tut/close.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/select.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_conf/logrec.html b/libdb/docs/ref/am_conf/logrec.html
new file mode 100644
index 0000000..a19e961
--- /dev/null
+++ b/libdb/docs/ref/am_conf/logrec.html
@@ -0,0 +1,123 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Logical record numbers</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_conf/select.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/pagesize.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Logical record numbers</h1>
+<p>The Berkeley DB Btree, Queue and Recno access methods can operate on logical
+record numbers. Record numbers are 1-based, not 0-based, that is, the
+first record in a database is record number 1.
+<p>In all cases for the Queue and Recno access methods, and when calling
+the Btree access method using the <a href="../../api_c/db_get.html">DB-&gt;get</a> and <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> methods
+with the <a href="../../api_c/db_get.html#DB_SET_RECNO">DB_SET_RECNO</a> flag specified, the <b>data</b> field of
+the key <a href="../../api_c/dbt_class.html">DBT</a> must be a pointer to a memory location of type
+<b>db_recno_t</b>, as typedef'd in the standard Berkeley DB include file.
+The <b>size</b> field of the key <a href="../../api_c/dbt_class.html">DBT</a> should be the size of that
+type (for example, "sizeof(db_recno_t)" in the C programming language).
+The <b>db_recno_t</b> type is a 32-bit unsigned type, which limits the
+number of logical records in a Queue or Recno database, and the maximum
+logical record which may be directly retrieved from a Btree database,
+to 4,294,967,295.
+<p>Record numbers in Queue databases wrap around. When the tail of the
+queue reaches the maximum record number, the next record appended will
+be given record number 1. If the head of the queue ever catches up to
+the tail of the queue, the Berkeley DB interface will return the system error
+EFBIG.
+<p>Record numbers in Recno databases can be configured to run in either
+mutable or fixed mode: mutable, where logical record numbers change as
+records are deleted or inserted, and fixed, where record numbers never
+change regardless of the database operation. Record numbers in Queue
+databases are always fixed, and never change regardless of the database
+operation. Record numbers in Btree databases are always mutable, and
+as records are deleted or inserted, the logical record number for other
+records in the database can change. See
+<a href="../../ref/am_conf/renumber.html">Logically renumbering records</a>
+for more information.
+<p>Configuring Btree databases to support record numbers can severely limit
+the throughput of applications with multiple concurrent threads writing
+the database, because locations used to store record counts often become
+hot spots that many different threads all need to update. In the case
+of a Btree supporting duplicate data items, the logical record number
+refers to a key and all of its data items, as duplicate data items are
+not individually numbered.
+<p>The following is an example function that reads records from standard
+input and stores them into a Recno database. The function then uses a
+cursor to step through the database and display the stored records.
+<p><blockquote><pre>int
+recno_build(dbp)
+ DB *dbp;
+{
+ DBC *dbcp;
+ DBT key, data;
+ db_recno_t recno;
+ u_int32_t len;
+ int ret;
+ char buf[1024];
+<p>
+ /* Insert records into the database. */
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ for (recno = 1;; ++recno) {
+ printf("record #%lu&gt; ", (u_long)recno);
+ fflush(stdout);
+ if (fgets(buf, sizeof(buf), stdin) == NULL)
+ break;
+ if ((len = strlen(buf)) &lt;= 1)
+ continue;
+<p>
+ key.data = &recno;
+ key.size = sizeof(recno);
+ data.data = buf;
+ data.size = len - 1;
+<p>
+ switch (ret = dbp-&gt;put(dbp, NULL, &key, &data, 0)) {
+ case 0:
+ break;
+ default:
+ dbp-&gt;err(dbp, ret, "DB-&gt;put");
+ break;
+ }
+ }
+ printf("\n");
+<p>
+ /* Acquire a cursor for the database. */
+ if ((ret = dbp-&gt;cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp-&gt;err(dbp, ret, "DB-&gt;cursor");
+ return (1);
+ }
+<p>
+ /* Re-initialize the key/data pair. */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+<p>
+ /* Walk through the database and print out the key/data pairs. */
+ while ((ret = dbcp-&gt;c_get(dbcp, &key, &data, DB_NEXT)) == 0)
+ printf("%lu : %.*s\n",
+ *(u_long *)key.data, (int)data.size, (char *)data.data);
+ if (ret != DB_NOTFOUND)
+ dbp-&gt;err(dbp, ret, "DBcursor-&gt;get");
+<p>
+ /* Close the cursor. */
+ if ((ret = dbcp-&gt;c_close(dbcp)) != 0) {
+ dbp-&gt;err(dbp, ret, "DBcursor-&gt;close");
+ return (1);
+ }
+ return (0);
+}</pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_conf/select.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/pagesize.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_conf/malloc.html b/libdb/docs/ref/am_conf/malloc.html
new file mode 100644
index 0000000..c6b817a
--- /dev/null
+++ b/libdb/docs/ref/am_conf/malloc.html
@@ -0,0 +1,33 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Non-local memory allocation</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_conf/dup.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/bt_compare.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Non-local memory allocation</h1>
+<p>Berkeley DB allocates memory for returning key/data pairs and statistical
+information which becomes the responsibility of the application.
+There are also interfaces where an application will allocate memory
+which becomes the responsibility of Berkeley DB.
+<p>On systems in which there may be multiple library versions of the
+standard allocation routines (notably Windows NT), transferring memory
+between the library and the application will fail because the Berkeley DB
+library allocates memory from a different heap than the application
+uses to free it, or vice versa. To avoid this problem, the
+<a href="../../api_c/env_set_alloc.html">DB_ENV-&gt;set_alloc</a> and <a href="../../api_c/db_set_alloc.html">DB-&gt;set_alloc</a> methods can be used to
+give Berkeley DB references to the application's allocation routines.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_conf/dup.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/bt_compare.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_conf/pagesize.html b/libdb/docs/ref/am_conf/pagesize.html
new file mode 100644
index 0000000..5990860
--- /dev/null
+++ b/libdb/docs/ref/am_conf/pagesize.html
@@ -0,0 +1,78 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Selecting a page size</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_conf/logrec.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/cachesize.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Selecting a page size</h1>
+<p>The size of the pages used in the underlying database can be specified by
+calling the <a href="../../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a> method. The minimum page size is 512 bytes
+and the maximum page size is 64K bytes, and must be a power of two. If
+no page size is specified by the application, a page size is selected
+based on the underlying filesystem I/O block size. (A page size selected
+in this way has a lower limit of 512 bytes and an upper limit of 16K
+bytes.)
+<p>There are several issues to consider when selecting a pagesize: overflow
+record sizes, locking, I/O efficiency, and recoverability.
+<p>First, the page size implicitly sets the size of an overflow record.
+Overflow records are key or data items that are too large to fit on a
+normal database page because of their size, and are therefore stored in
+overflow pages. Overflow pages are pages that exist outside of the normal
+database structure. For this reason, there is often a significant
+performance penalty associated with retrieving or modifying overflow
+records. Selecting a page size that is too small, and which forces the
+creation of large numbers of overflow pages, can seriously impact the
+performance of an application.
+<p>Second, in the Btree, Hash and Recno access methods, the finest-grained
+lock that Berkeley DB acquires is for a page. (The Queue access method
+generally acquires record-level locks rather than page-level locks.)
+Selecting a page size that is too large, and which causes threads or
+processes to wait because other threads of control are accessing or
+modifying records on the same page, can impact the performance of your
+application.
+<p>Third, the page size specifies the granularity of I/O from the database
+to the operating system. Berkeley DB will give a page-sized unit of bytes to
+the operating system to be scheduled for reading/writing from/to the
+disk. For many operating systems, there is an internal <b>block
+size</b> which is used as the granularity of I/O from the operating system
+to the disk. Generally, it will be more efficient for Berkeley DB to write
+filesystem-sized blocks to the operating system and for the operating
+system to write those same blocks to the disk.
+<p>Selecting a database page size smaller than the filesystem block size
+may cause the operating system to coalesce or otherwise manipulate Berkeley DB
+pages and can impact the performance of your application. When the page
+size is smaller than the filesystem block size and a page written by
+Berkeley DB is not found in the operating system's cache, the operating system
+may be forced to read a block from the disk, copy the page into the
+block it read, and then write out the block to disk, rather than simply
+writing the page to disk. Additionally, as the operating system is
+reading more data into its buffer cache than is strictly necessary to
+satisfy each Berkeley DB request for a page, the operating system buffer cache
+may be wasting memory.
+<p>Alternatively, selecting a page size larger than the filesystem block
+size may cause the operating system to read more data than necessary.
+On some systems, reading filesystem blocks sequentially may cause the
+operating system to begin performing read-ahead. If requesting a single
+database page implies reading enough filesystem blocks to satisfy the
+operating system's criteria for read-ahead, the operating system may do
+more I/O than is required.
+<p>Fourth, when using the Berkeley DB Transactional Data Store product, the page size may affect the errors
+from which your database can recover See
+<a href="../../ref/transapp/reclimit.html">Berkeley DB Recoverability</a> for more
+information.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_conf/logrec.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/cachesize.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_conf/re_source.html b/libdb/docs/ref/am_conf/re_source.html
new file mode 100644
index 0000000..d28f9fa
--- /dev/null
+++ b/libdb/docs/ref/am_conf/re_source.html
@@ -0,0 +1,63 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Flat-text backing files</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_conf/extentsize.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/renumber.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Flat-text backing files</h1>
+<p>It is possible to back any Recno database (either fixed or variable
+length) with a flat-text source file. This provides fast read (and
+potentially write) access to databases that are normally created and
+stored as flat-text files. The backing source file may be specified by
+calling the <a href="../../api_c/db_set_re_source.html">DB-&gt;set_re_source</a> method.
+<p>The backing source file will be read to initialize the database. In the
+case of variable length records, the records are assumed to be separated
+as described for the <a href="../../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a> method interface. For example,
+standard UNIX byte stream files can be interpreted as a sequence of
+variable length records separated by ASCII newline characters. This is
+the default.
+<p>When cached data would normally be written back to the underlying
+database file (for example, when the <a href="../../api_c/db_close.html">DB-&gt;close</a> or
+<a href="../../api_c/db_sync.html">DB-&gt;sync</a> methods are called), the in-memory copy of the database will
+be written back to the backing source file.
+<p>The backing source file must already exist (but may be zero-length) when
+<a href="../../api_c/db_open.html">DB-&gt;open</a> is called. By default, the backing source file is read
+lazily, that is, records are not read from the backing source file until
+they are requested by the application. If multiple processes (not
+threads) are accessing a Recno database concurrently and either
+inserting or deleting records, the backing source file must be read in
+its entirety before more than a single process accesses the database,
+and only that process should specify the backing source file as part of
+the <a href="../../api_c/db_open.html">DB-&gt;open</a> call. This can be accomplished by calling the
+<a href="../../api_c/db_set_flags.html">DB-&gt;set_flags</a> method with the <a href="../../api_c/db_set_flags.html#DB_SNAPSHOT">DB_SNAPSHOT</a> flag.
+<p>Reading and writing the backing source file cannot be transactionally
+protected because it involves filesystem operations that are not part of
+the Berkeley DB transaction methodology. For this reason, if a temporary
+database is used to hold the records (a NULL was specified as the file
+argument to <a href="../../api_c/db_open.html">DB-&gt;open</a>), <b>it is possible to lose the
+contents of the backing source file if the system crashes at the right
+instant</b>. If a permanent file is used to hold the database (a filename
+was specified as the file argument to <a href="../../api_c/db_open.html">DB-&gt;open</a>), normal database
+recovery on that file can be used to prevent information loss. It is
+still possible that the contents of the backing source file itself will
+be corrupted or lost if the system crashes.
+<p>For all of the above reasons, the backing source file is generally used
+to specify databases that are read-only for Berkeley DB applications, and that
+are either generated on the fly by software tools, or modified using a
+different mechanism such as a text editor.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_conf/extentsize.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/renumber.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_conf/recno.html b/libdb/docs/ref/am_conf/recno.html
new file mode 100644
index 0000000..b29f9e7
--- /dev/null
+++ b/libdb/docs/ref/am_conf/recno.html
@@ -0,0 +1,70 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Managing record-based databases</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_conf/h_nelem.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/extentsize.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Managing record-based databases</h1>
+<p>When using fixed- or variable-length record-based databases, particularly
+with flat-text backing files, there are several items that the user can
+control. The Recno access method can be used to store either variable-
+or fixed-length data items. By default, the Recno access method stores
+variable-length data items. The Queue access method can only store
+fixed-length data items.
+<h3>Record Delimiters</h3>
+<p>When using the Recno access method to store variable-length records,
+records read from any backing source file are separated by a specific
+byte value which marks the end of one record and the beginning of the
+next. This delimiting value is ignored except when reading records from
+a backing source file, that is, records may be stored into the database
+that include the delimiter byte. However, if such records are written
+out to the backing source file and the backing source file is
+subsequently read into a database, the records will be split where
+delimiting bytes were found.
+<p>For example, UNIX text files can usually be interpreted as a sequence of
+variable-length records separated by ASCII newline characters. This byte
+value (ASCII 0x0a) is the default delimiter. Applications may specify a
+different delimiting byte using the <a href="../../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a> interface.
+If no backing source file is being used, there is no reason to set the
+delimiting byte value.
+<h3>Record Length</h3>
+<p>When using the Recno or Queue access methods to store fixed-length
+records, the record length must be specified. Since the Queue access
+method always uses fixed-length records, the user must always set the
+record length prior to creating the database. Setting the record length
+is what causes the Recno access method to store fixed-length, not
+variable-length, records.
+<p>The length of the records is specified by calling the
+<a href="../../api_c/db_set_re_len.html">DB-&gt;set_re_len</a> method. The default length of the records is 0 bytes.
+Any record read from a backing source file or otherwise stored in the
+database that is shorter than the declared length will automatically be
+padded as described for the <a href="../../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a> method. Any record stored
+that is longer than the declared length results in an error. For
+further information on backing source files, see
+<a href="../../ref/am_conf/re_source.html">Flat-text backing files</a>.
+<h3>Record Padding Byte Value</h3>
+<p>When storing fixed-length records in a Queue or Recno database, a pad
+character may be specified by calling the <a href="../../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a> method. Any
+record read from the backing source file or otherwise stored in the
+database that is shorter than the expected length will automatically be
+padded with this byte value. If fixed-length records are specified but
+no pad value is specified, a space character (0x20 in the ASCII
+character set) will be used. For further information on backing source
+files, see <a href="../../ref/am_conf/re_source.html">Flat-text backing
+files</a>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_conf/h_nelem.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/extentsize.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_conf/renumber.html b/libdb/docs/ref/am_conf/renumber.html
new file mode 100644
index 0000000..6a039c0
--- /dev/null
+++ b/libdb/docs/ref/am_conf/renumber.html
@@ -0,0 +1,81 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Logically renumbering records</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_conf/re_source.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/ops.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Logically renumbering records</h1>
+<p>Records stored in the Queue and Recno access methods are accessed by
+logical record number. In all cases in Btree databases, and optionally
+in Recno databases (see the <a href="../../api_c/db_set_flags.html">DB-&gt;set_flags</a> method and the
+<a href="../../api_c/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag for more information), record numbers are
+mutable. This means that the record numbers may change as records are
+added to and deleted from the database. The deletion of record number
+4 causes any records numbered 5 and higher to be renumbered downward by
+1; the addition of a new record after record number 4 causes any
+records numbered 5 and higher to be renumbered upward by 1. In all
+cases in Queue databases, and by default in Recno databases, record
+numbers are not mutable, and the addition or deletion of records to the
+database will not cause already-existing record numbers to change. For
+this reason, new records cannot be inserted between already-existing
+records in databases with immutable record numbers.
+<p>Cursors pointing into a Btree database or a Recno database with mutable
+record numbers maintain a reference to a specific record, rather than
+a record number, that is, the record they reference does not change as
+other records are added or deleted. For example, if a database contains
+three records with the record numbers 1, 2, and 3, and the data items
+"A", "B", and "C", respectively, the deletion of record number 2 ("B")
+will cause the record "C" to be renumbered downward to record number 2.
+A cursor positioned at record number 3 ("C") will be adjusted and
+continue to point to "C" after the deletion. Similarly, a cursor
+previously referring to the now deleted record number 2 will be
+positioned between the new record numbers 1 and 2, and an insertion
+using that cursor will appear between those records. In this manner
+records can be added and deleted to a database without disrupting the
+sequential traversal of the database by a cursor.
+<p>Only cursors created using a single <a href="../../api_c/db_class.html">DB</a> handle can adjust each
+other's position in this way, however. If multiple <a href="../../api_c/db_class.html">DB</a> handles
+have a renumbering Recno database open simultaneously (as when multiple
+processes share a single database environment), a record referred to by
+one cursor could change underfoot if a cursor created using another
+<a href="../../api_c/db_class.html">DB</a> handle inserts or deletes records into the database. For
+this reason, applications using Recno databases with mutable record
+numbers will usually make all accesses to the database using a single
+<a href="../../api_c/db_class.html">DB</a> handle and cursors created from that handle, or will
+otherwise single-thread access to the database, for example, by using
+the Berkeley DB Concurrent Data Store product.
+<p>In any Queue or Recno databases, creating new records will cause the
+creation of multiple records if the record number being created is more
+than one greater than the largest record currently in the database. For
+example, creating record number 28, when record 25 was previously the
+last record in the database, will implicitly create records 26 and 27
+as well as 28. All first, last, next and previous cursor operations
+will automatically skip over these implicitly created records. So, if
+record number 5 is the only record the application has created,
+implicitly creating records 1 through 4, the <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> interface
+with the <a href="../../api_c/dbc_get.html#DB_FIRST">DB_FIRST</a> flag will return record number 5, not record
+number 1. Attempts to explicitly retrieve implicitly created records
+by their record number will result in a special error return,
+<a href="../../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a>.
+<p>In any Berkeley DB database, attempting to retrieve a deleted record, using
+a cursor positioned on the record, results in a special error return,
+<a href="../../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a>. In addition, when using Queue databases or Recno
+databases with immutable record numbers, attempting to retrieve a deleted
+record by its record number will also result in the <a href="../../ref/program/errorret.html#DB_KEYEMPTY">DB_KEYEMPTY</a>
+return.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_conf/re_source.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am/ops.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_conf/select.html b/libdb/docs/ref/am_conf/select.html
new file mode 100644
index 0000000..789eb5c
--- /dev/null
+++ b/libdb/docs/ref/am_conf/select.html
@@ -0,0 +1,118 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Selecting an access method</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_conf/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/logrec.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Selecting an access method</h1>
+<p>The Berkeley DB access method implementation unavoidably interacts with each
+application's data set, locking requirements and data access patterns.
+For this reason, one access method may result in dramatically better
+performance for an application than another one. Applications whose data
+could be stored using more than one access method may want to benchmark
+their performance using the different candidates.
+<p>One of the strengths of Berkeley DB is that it provides multiple access methods
+with nearly identical interfaces to the different access methods. This
+means that it is simple to modify an application to use a different access
+method. Applications can easily benchmark the different Berkeley DB access
+methods against each other for their particular data set and access pattern.
+<p>Most applications choose between using the Btree or Hash access methods
+or between using the Queue and Recno access methods, because each of the
+two pairs offer similar functionality.
+<h3>Hash or Btree?</h3>
+<p>The Hash and Btree access methods should be used when logical record
+numbers are not the primary key used for data access. (If logical record
+numbers are a secondary key used for data access, the Btree access method
+is a possible choice, as it supports simultaneous access by a key and a
+record number.)
+<p>Keys in Btrees are stored in sorted order and the relationship between
+them is defined by that sort order. For this reason, the Btree access
+method should be used when there is any locality of reference among keys.
+Locality of reference means that accessing one particular key in the
+Btree implies that the application is more likely to access keys near to
+the key being accessed, where "near" is defined by the sort order. For
+example, if keys are timestamps, and it is likely that a request for an
+8AM timestamp will be followed by a request for a 9AM timestamp, the
+Btree access method is generally the right choice. Or, for example, if
+the keys are names, and the application will want to review all entries
+with the same last name, the Btree access method is again a good choice.
+<p>There is little difference in performance between the Hash and Btree
+access methods on small data sets, where all, or most of, the data set
+fits into the cache. However, when a data set is large enough that
+significant numbers of data pages no longer fit into the cache, then
+the Btree locality of reference described previously becomes important
+for performance reasons. For example, there is no locality of reference
+for the Hash access method, and so key "AAAAA" is as likely to be stored
+on the same database page with key "ZZZZZ" as with key "AAAAB". In the
+Btree access method, because items are sorted, key "AAAAA" is far more
+likely to be near key "AAAAB" than key "ZZZZZ". So, if the application
+exhibits locality of reference in its data requests, then the Btree page
+read into the cache to satisfy a request for key "AAAAA" is much more
+likely to be useful to satisfy subsequent requests from the application
+than the Hash page read into the cache to satisfy the same request.
+This means that for applications with locality of reference, the cache
+is generally much more effective for the Btree access method than the
+Hash access method, and the Btree access method will make many fewer
+I/O calls.
+<p>However, when a data set becomes even larger, the Hash access method can
+outperform the Btree access method. The reason for this is that Btrees
+contain more metadata pages than Hash databases. The data set can grow
+so large that metadata pages begin to dominate the cache for the Btree
+access method. If this happens, the Btree can be forced to do an I/O
+for each data request because the probability that any particular data
+page is already in the cache becomes quite small. Because the Hash access
+method has fewer metadata pages, its cache stays "hotter" longer in the
+presence of large data sets. In addition, once the data set is so large
+that both the Btree and Hash access methods are almost certainly doing
+an I/O for each random data request, the fact that Hash does not have to
+walk several internal pages as part of a key search becomes a performance
+advantage for the Hash access method as well.
+<p>Application data access patterns strongly affect all of these behaviors,
+for example, accessing the data by walking a cursor through the database
+will greatly mitigate the large data set behavior describe above because
+each I/O into the cache will satisfy a fairly large number of subsequent
+data requests.
+<p>In the absence of information on application data and data access
+patterns, for small data sets either the Btree or Hash access methods
+will suffice. For data sets larger than the cache, we normally recommend
+using the Btree access method. If you have truly large data, then the
+Hash access method may be a better choice. The <a href="../../utility/db_stat.html">db_stat</a> utility
+is a useful tool for monitoring how well your cache is performing.
+<h3>Queue or Recno?</h3>
+<p>The Queue or Recno access methods should be used when logical record
+numbers are the primary key used for data access. The advantage of the
+Queue access method is that it performs record level locking and for this
+reason supports significantly higher levels of concurrency than the Recno
+access method. The advantage of the Recno access method is that it
+supports a number of additional features beyond those supported by the
+Queue access method, such as variable-length records and support for
+backing flat-text files.
+<p>Logical record numbers can be mutable or fixed: mutable, where logical
+record numbers can change as records are deleted or inserted, and fixed,
+where record numbers never change regardless of the database operation.
+It is possible to store and retrieve records based on logical record
+numbers in the Btree access method. However, those record numbers are
+always mutable, and as records are deleted or inserted, the logical record
+number for other records in the database will change. The Queue access
+method always runs in fixed mode, and logical record numbers never change
+regardless of the database operation. The Recno access method can be
+configured to run in either mutable or fixed mode.
+<p>In addition, the Recno access method provides support for databases whose
+permanent storage is a flat text file and the database is used as a fast,
+temporary storage area while the data is being read or modified.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_conf/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/logrec.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_misc/align.html b/libdb/docs/ref/am_misc/align.html
new file mode 100644
index 0000000..8c9e503
--- /dev/null
+++ b/libdb/docs/ref/am_misc/align.html
@@ -0,0 +1,29 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Data alignment</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am/curclose.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/get_bulk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Data alignment</h1>
+<p>The Berkeley DB access methods provide no guarantees about byte alignment for
+returned key/data pairs, or callback functions which take <a href="../../api_c/dbt_class.html">DBT</a>
+references as arguments, and applications are responsible for arranging
+any necessary alignment. The <a href="../../api_c/dbt_class.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a>,
+<a href="../../api_c/dbt_class.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a> and <a href="../../api_c/dbt_class.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a> flags may be used to
+store returned items in memory of arbitrary alignment.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am/curclose.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/get_bulk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_misc/dbsizes.html b/libdb/docs/ref/am_misc/dbsizes.html
new file mode 100644
index 0000000..3705eb5
--- /dev/null
+++ b/libdb/docs/ref/am_misc/dbsizes.html
@@ -0,0 +1,46 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Database limits</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_misc/stability.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/diskspace.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Database limits</h1>
+<p>The largest database file that Berkeley DB can handle depends on the page size
+selected by the application. Berkeley DB stores database file page numbers as
+unsigned 32-bit numbers and database file page sizes as unsigned 16-bit
+numbers. Using the maximum database page size of 65536, this results in
+a maximum database file size of 2<sup>48</sup> (256 terabytes). The
+minimum database page size is 512 bytes, which results in a minimum
+maximum database size of 2<sup>41</sup> (2 terabytes).
+<p>The largest database file Berkeley DB can support is potentially further limited
+if the host system does not have filesystem support for files larger than
+2<sup>32</sup>, including the ability to seek to absolute offsets within
+those files.
+<p>The largest key or data item that Berkeley DB can support is largely limited
+by available memory. Specifically, while key and data byte strings may
+be of essentially unlimited length, any one of them must fit into
+available memory so that it can be returned to the application. As some
+of the Berkeley DB interfaces return both key and data items to the application,
+those interfaces will require that any key/data pair fit simultaneously
+into memory. Further, as the access methods may need to compare key and
+data items with other key and data items, it may be a requirement that
+any two key or two data items fit into available memory. Finally, when
+writing applications supporting transactions, it may be necessary to have
+an additional copy of any data item in memory for logging purposes.
+<p>The maximum Btree depth is 255.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_misc/stability.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/diskspace.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_misc/diskspace.html b/libdb/docs/ref/am_misc/diskspace.html
new file mode 100644
index 0000000..0c7866f
--- /dev/null
+++ b/libdb/docs/ref/am_misc/diskspace.html
@@ -0,0 +1,149 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Disk space requirements</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_misc/dbsizes.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/tune.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Disk space requirements</h1>
+<p>It is possible to estimate the total database size based on the size of
+the data. The following calculations are an estimate of how many bytes
+you will need to hold a set of data and then how many pages it will take
+to actually store it on disk.
+<p>Space freed by deleting key/data pairs from a Btree or Hash database is
+never returned to the filesystem, although it is reused where possible.
+This means that the Btree and Hash databases are grow-only. If enough
+keys are deleted from a database that shrinking the underlying file is
+desirable, you should create a new database and copy the records from
+the old one into it.
+<p>These are rough estimates at best. For example, they do not take into
+account overflow records, filesystem metadata information, large sets
+of duplicate data items (where the key is only stored once), or
+real-life situations where the sizes of key and data items are wildly
+variable, and the page-fill factor changes over time.
+<h3>Btree</h3>
+<p>The formulas for the Btree access method are as follows:
+<p><blockquote><pre>useful-bytes-per-page = (page-size - page-overhead) * page-fill-factor
+<p>
+bytes-of-data = n-records *
+ (bytes-per-entry + page-overhead-for-two-entries)
+<p>
+n-pages-of-data = bytes-of-data / useful-bytes-per-page
+<p>
+total-bytes-on-disk = n-pages-of-data * page-size
+</pre></blockquote>
+<p>The <b>useful-bytes-per-page</b> is a measure of the bytes on each page
+that will actually hold the application data. It is computed as the total
+number of bytes on the page that are available to hold application data,
+corrected by the percentage of the page that is likely to contain data.
+The reason for this correction is that the percentage of a page that
+contains application data can vary from close to 50% after a page split
+to almost 100% if the entries in the database were inserted in sorted
+order. Obviously, the <b>page-fill-factor</b> can drastically alter
+the amount of disk space required to hold any particular data set. The
+page-fill factor of any existing database can be displayed using the
+<a href="../../utility/db_stat.html">db_stat</a> utility.
+<p>The page-overhead for Btree databases is 26 bytes. As an example, using
+an 8K page size, with an 85% page-fill factor, there are 6941 bytes of
+useful space on each page:
+<p><blockquote><pre>6941 = (8192 - 26) * .85</pre></blockquote>
+<p>The total <b>bytes-of-data</b> is an easy calculation: It is the
+number of key or data items plus the overhead required to store each
+item on a page. The overhead to store a key or data item on a Btree
+page is 5 bytes. So, it would take 1560000000 bytes, or roughly 1.34GB
+of total data to store 60,000,000 key/data pairs, assuming each key or
+data item was 8 bytes long:
+<p><blockquote><pre>1560000000 = 60000000 * ((8 + 5) * 2)</pre></blockquote>
+<p>The total pages of data, <b>n-pages-of-data</b>, is the
+<b>bytes-of-data</b> divided by the <b>useful-bytes-per-page</b>. In
+the example, there are 224751 pages of data.
+<p><blockquote><pre>224751 = 1560000000 / 6941</pre></blockquote>
+<p>The total bytes of disk space for the database is <b>n-pages-of-data</b>
+multiplied by the <b>page-size</b>. In the example, the result is
+1841160192 bytes, or roughly 1.71GB.
+<p><blockquote><pre>1841160192 = 224751 * 8192</pre></blockquote>
+<h3>Hash</h3>
+<p>The formulas for the Hash access method are as follows:
+<p><blockquote><pre>useful-bytes-per-page = (page-size - page-overhead)
+<p>
+bytes-of-data = n-records *
+ (bytes-per-entry + page-overhead-for-two-entries)
+<p>
+n-pages-of-data = bytes-of-data / useful-bytes-per-page
+<p>
+total-bytes-on-disk = n-pages-of-data * page-size
+</pre></blockquote>
+<p>The <b>useful-bytes-per-page</b> is a measure of the bytes on each page
+that will actually hold the application data. It is computed as the total
+number of bytes on the page that are available to hold application data.
+If the application has explicitly set a page-fill factor, pages will
+not necessarily be kept full. For databases with a preset fill factor,
+see the calculation below. The page-overhead for Hash databases is 26
+bytes and the page-overhead-for-two-entries is 6 bytes.
+<p>As an example, using an 8K page size, there are 8166 bytes of useful space
+on each page:
+<p><blockquote><pre>8166 = (8192 - 26)</pre></blockquote>
+<p>The total <b>bytes-of-data</b> is an easy calculation: it is the number
+of key/data pairs plus the overhead required to store each pair on a page.
+In this case that's 6 bytes per pair. So, assuming 60,000,000 key/data
+pairs, each of which is 8 bytes long, there are 1320000000 bytes, or
+roughly 1.23GB of total data:
+<p><blockquote><pre>1320000000 = 60000000 * (16 + 6)</pre></blockquote>
+<p>The total pages of data, <b>n-pages-of-data</b>, is the
+<b>bytes-of-data</b> divided by the <b>useful-bytes-per-page</b>. In
+this example, there are 161646 pages of data.
+<p><blockquote><pre>161646 = 1320000000 / 8166</pre></blockquote>
+<p>The total bytes of disk space for the database is <b>n-pages-of-data</b>
+multiplied by the <b>page-size</b>. In the example, the result is
+1324204032 bytes, or roughly 1.23GB.
+<p><blockquote><pre>1324204032 = 161646 * 8192</pre></blockquote>
+<p>Now, let's assume that the application specified a fill factor explicitly.
+The fill factor indicates the target number of items to place on a single
+page (a fill factor might reduce the utilization of each page, but it can
+be useful in avoiding splits and preventing buckets from becoming too
+large). Using our estimates above, each item is 22 bytes (16 + 6), and
+there are 8166 useful bytes on a page (8192 - 26). That means that, on
+average, you can fit 371 pairs per page.
+<p><blockquote><pre>371 = 8166 / 22</pre></blockquote>
+<p>However, let's assume that the application designer knows that although
+most items are 8 bytes, they can sometimes be as large as 10, and it's
+very important to avoid overflowing buckets and splitting. Then, the
+application might specify a fill factor of 314.
+<p><blockquote><pre>314 = 8166 / 26</pre></blockquote>
+<p>With a fill factor of 314, then the formula for computing database size
+is
+<p><blockquote><pre>n-pages-of-data = npairs / pairs-per-page</pre></blockquote>
+<p>or 191082.
+<p><blockquote><pre>191082 = 60000000 / 314</pre></blockquote>
+<p>At 191082 pages, the total database size would be 1565343744, or 1.46GB.
+<p><blockquote><pre>1565343744 = 191082 * 8192</pre></blockquote>
+<p>There are a few additional caveats with respect to Hash databases. This
+discussion assumes that the hash function does a good job of evenly
+distributing keys among hash buckets. If the function does not do this,
+you may find your table growing significantly larger than you expected.
+Secondly, in order to provide support for Hash databases coexisting with
+other databases in a single file, pages within a Hash database are
+allocated in power-of-two chunks. That means that a Hash database with 65
+buckets will take up as much space as a Hash database with 128 buckets;
+each time the Hash database grows beyond its current power-of-two number
+of buckets, it allocates space for the next power-of-two buckets. This
+space may be sparsely allocated in the file system, but the files will
+appear to be their full size. Finally, because of this need for
+contiguous allocation, overflow pages and duplicate pages can be allocated
+only at specific points in the file, and this too can lead to sparse hash
+tables.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_misc/dbsizes.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/tune.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_misc/error.html b/libdb/docs/ref/am_misc/error.html
new file mode 100644
index 0000000..b16b046
--- /dev/null
+++ b/libdb/docs/ref/am_misc/error.html
@@ -0,0 +1,63 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Error support</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_misc/perm.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/stability.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Error support</h1>
+<p>Berkeley DB offers programmatic support for displaying error return values.
+<p>The <a href="../../api_c/env_strerror.html">db_strerror</a> interface returns a pointer to the error
+message corresponding to any Berkeley DB error return, similar to the ANSI C
+strerror interface, but is able to handle both system error returns and
+Berkeley DB specific return values.
+<p>For example:
+<p><blockquote><pre>int ret;
+if ((ret = dbp-&gt;put(dbp, NULL, &key, &data, 0)) != 0) {
+ fprintf(stderr, "put failed: %s\n", db_strerror(ret));
+ return (1);
+}</pre></blockquote>
+<p>There are also two additional error interfaces, <a href="../../api_c/db_err.html">DB-&gt;err</a> and
+<a href="../../api_c/db_err.html">DB-&gt;errx</a>. These interfaces work like the ANSI C X3.159-1989 (ANSI C) printf
+interface, taking a printf-style format string and argument list, and
+writing a message constructed from the format string and arguments.
+<p>The <a href="../../api_c/db_err.html">DB-&gt;err</a> method appends the standard error string to the constructed
+message; the <a href="../../api_c/db_err.html">DB-&gt;errx</a> method does not. These interfaces provide simpler
+ways of displaying Berkeley DB error messages. For example, if your application
+tracks session IDs in a variable called session_id, it can include that
+information in its error messages:
+<p>Error messages can additionally be configured to always include a prefix
+(for example, the program name) using the <a href="../../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a> interface.
+<p><blockquote><pre>#define DATABASE "access.db"
+<p>
+int ret;
+<p>
+(void)dbp-&gt;set_errpfx(dbp, program_name);
+<p>
+if ((ret = dbp-&gt;open(dbp,
+ NULL, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp-&gt;err(dbp, ret, "%s", DATABASE);
+ dbp-&gt;errx(dbp,
+ "contact your system administrator: session ID was %d",
+ session_id);
+ return (1);
+}</pre></blockquote>
+<p>For example, if the program were called my_app and the open call returned
+an EACCESS system error, the error messages shown would appear as follows:
+<p><blockquote><pre>my_app: access.db: Permission denied.
+my_app: contact your system administrator: session ID was 14</pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_misc/perm.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/stability.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_misc/faq.html b/libdb/docs/ref/am_misc/faq.html
new file mode 100644
index 0000000..d0261c9
--- /dev/null
+++ b/libdb/docs/ref/am_misc/faq.html
@@ -0,0 +1,130 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Access method FAQ</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_misc/tune.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/arch/bigpic.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Access method FAQ</h1>
+<p><ol>
+<p><li><b>Is a Berkeley DB database the same as a "table"?</b>
+<p>Yes; "tables" are databases, "rows" are key/data pairs, and "columns"
+are application-encapsulated fields within a data item (to which Berkeley DB
+does not directly provide access).
+<p><li><b>I'm getting an error return in my application, but I can't
+figure out what the library is complaining about.</b>
+<p>See <a href="../../api_c/env_set_errcall.html">DB_ENV-&gt;set_errcall</a>, <a href="../../api_c/env_set_errfile.html">DB_ENV-&gt;set_errfile</a> and
+<a href="../../api_c/db_set_errfile.html">DB-&gt;set_errfile</a> for ways to get additional information about
+error returns from Berkeley DB.
+<p><li><b>Are Berkeley DB databases portable between architectures?</b>
+<p>Yes. See <a href="../../ref/am_conf/byteorder.html">Selecting a byte
+order</a> for more information.
+<p><li><b>I'm seeing database corruption when creating multiple databases
+in a single physical file.</b>
+<p>This problem is usually the result of <a href="../../api_c/db_class.html">DB</a> handles not sharing an
+underlying database environment. See <a href="../../ref/am/opensub.html">Opening multiple databases in a single file</a> for more information.
+<a name="3"><!--meow--></a><a name="4"><!--meow--></a>
+<p><li><b>Is there any way to compact databases, or return unused
+database pages to the filesystem?</b>
+<p>When Berkeley DB database pages are emptied, they are made available for other
+uses, that is, new pages will not be allocated from the underlying
+filesystem as long as there are unused pages available. However, the
+pages cannot be returned to the filesystem without dumping the database,
+removing the physical file, and reloading the database. The one
+exception to this rule is Queue access method extent files. Queue
+extent files are removed when they are emptied, and their pages returned
+to the underlying filesystem.
+<p><li><b>I'm using integers as keys for a Btree database, and even
+though the key/data pairs are entered in sorted order, the page-fill
+factor is low.</b>
+<p>This is usually the result of using integer keys on little-endian
+architectures such as the x86. Berkeley DB sorts keys as byte strings, and
+little-endian integers don't sort well when viewed as byte strings.
+For example, take the numbers 254 through 257. Their byte patterns on
+a little-endian system are:
+<p><blockquote><pre>254 fe 0 0 0
+255 ff 0 0 0
+256 0 1 0 0
+257 1 1 0 0</pre></blockquote>
+<p>If you treat them as strings, then they sort badly:
+<p><blockquote><pre>256
+257
+254
+255</pre></blockquote>
+<p>On a big-endian system, their byte patterns are:
+<p><blockquote><pre>254 0 0 0 fe
+255 0 0 0 ff
+256 0 0 1 1
+257 0 0 1 1</pre></blockquote>
+<p>and so, if you treat them as strings they sort nicely. Which means, if
+you use steadily increasing integers as keys on a big-endian system
+Berkeley DB behaves well and you get compact trees, but on a little-endian
+system Berkeley DB produces much less compact trees. To avoid this problem,
+you may want to convert the keys to flat text or big-endian
+representations, or provide your own Btree comparison function.
+<a name="5"><!--meow--></a>
+<p><li><b>Is there any way to avoid double buffering in the Berkeley DB system?</b>
+<p>While you cannot avoid double buffering entirely, there are a few things
+you can do to address this issue:
+<p>First, the Berkeley DB cache size can be explicitly set. Rather than allocate
+additional space in the Berkeley DB cache to cover unexpectedly heavy load or
+large table sizes, double buffering may suggest you size the cache to
+function well under normal conditions, and then depend on the file
+buffer cache to cover abnormal conditions. Obviously, this is a
+trade-off, as Berkeley DB may not then perform as well as usual under abnormal
+conditions.
+<p>Second, depending on the underlying operating system you're using, you
+may be able to alter the amount of physical memory devoted to the file
+buffer cache. Running as the system super-user makes a difference for
+some UNIX or UNIX-like operating systems as well.
+<p>Third, changing the size of the Berkeley DB environment regions can change
+the amount of space the operating system makes available for the file
+buffer cache, and it's often worth considering exactly how the operating
+system is dividing up its available memory. Further, moving the Berkeley DB
+database environment regions from filesystem backed memory into system
+memory (or heap memory), can often make additional system memory
+available for the file buffer cache, especially on systems without a
+unified buffer cache and VM system.
+<p>Finally, for operating systems that allow buffering to be turned off,
+specifying the <a href="../../api_c/env_set_flags.html#DB_DIRECT_DB">DB_DIRECT_DB</a> and <a href="../../api_c/env_set_flags.html#DB_DIRECT_LOG">DB_DIRECT_LOG</a> flags
+will attempt to do so.
+<p><li><b>I'm seeing database corruption when I run out of disk space.</b>
+<p>Berkeley DB can continue to run when when out-of-disk-space errors occur, but
+it requires the application to be transaction protected. Applications
+which do not enclose update operations in transactions cannot recover
+from out-of-disk-space errors, and the result of running out of disk
+space may be database corruption.
+<p><li><b>How can I associate application information with a <a href="../../api_c/db_class.html">DB</a>
+or <a href="../../api_c/env_class.html">DB_ENV</a> handle?</b>
+<p>In the C API, the <a href="../../api_c/db_class.html">DB</a> and <a href="../../api_c/env_class.html">DB_ENV</a> structures each contain
+an "app_private" field intended to be used to reference
+application-specific information. See the <a href="../../api_c/db_create.html">db_create</a> and
+<a href="../../api_c/env_create.html">db_env_create</a> documentation for more information.
+<p>In the C++ or Java APIs, the easiest way to associate
+application-specific data with a handle is to subclass the <a href="../../api_cxx/db_class.html">Db</a>
+or <a href="../../api_cxx/env_class.html">DbEnv</a>, for example subclassing <a href="../../api_cxx/db_class.html">Db</a> to get MyDb.
+Objects of type MyDb will still have the Berkeley DB API methods available on
+them, and you can put any extra data or methods you want into the MyDb
+class. If you are using "callback" APIs that take <a href="../../api_cxx/db_class.html">Db</a> or
+<a href="../../api_cxx/env_class.html">DbEnv</a> arguments (for example, <a href="../../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>)
+these will always be called with the <a href="../../api_cxx/db_class.html">Db</a> or <a href="../../api_cxx/env_class.html">DbEnv</a>
+objects you create. So if you always use MyDb objects, you will be able
+to take the first argument to the callback function and cast it to a
+MyDb (in C++, cast it to (MyDb*)). That will allow you to access your
+data members or methods.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_misc/tune.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/arch/bigpic.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_misc/get_bulk.html b/libdb/docs/ref/am_misc/get_bulk.html
new file mode 100644
index 0000000..99d77c8
--- /dev/null
+++ b/libdb/docs/ref/am_misc/get_bulk.html
@@ -0,0 +1,136 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Retrieving records in bulk</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_misc/align.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/partial.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Retrieving records in bulk</h1>
+<p>When retrieving large numbers of records from the database, the number
+of method calls can often dominate performance. Berkeley DB offers bulk get
+interfaces which can significantly increase performance for some
+applications. To retrieve records in bulk, an application buffer must
+be specified to the <a href="../../api_c/db_get.html">DB-&gt;get</a> or <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> methods. This is done
+in the C API by setting the <b>data</b> and <b>ulen</b> fields of the
+<b>data</b> <a href="../../api_c/dbt_class.html">DBT</a> to reference an application buffer, and the
+<b>flags</b> field of that structure to <a href="../../api_c/dbt_class.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a>. In
+the Berkeley DB C++ and Java APIs, the actions are similar, although there
+are API-specific methods to set the <a href="../../api_c/dbt_class.html">DBT</a> values. Then, the
+<a href="../../api_c/dbc_get.html#DB_MULTIPLE">DB_MULTIPLE</a> or <a href="../../api_c/dbc_get.html#DB_MULTIPLE_KEY">DB_MULTIPLE_KEY</a> flags are specified to
+the <a href="../../api_c/db_get.html">DB-&gt;get</a> or <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> methods, which cause multiple records
+to be returned in the specified buffer.
+<p>The difference between <a href="../../api_c/dbc_get.html#DB_MULTIPLE">DB_MULTIPLE</a> and <a href="../../api_c/dbc_get.html#DB_MULTIPLE_KEY">DB_MULTIPLE_KEY</a>
+is as follows: <a href="../../api_c/dbc_get.html#DB_MULTIPLE">DB_MULTIPLE</a> returns multiple data items for a
+single key. For example, the <a href="../../api_c/dbc_get.html#DB_MULTIPLE">DB_MULTIPLE</a> flag would be used to
+retrieve all of the duplicate data items for a single key in a single
+call. The <a href="../../api_c/dbc_get.html#DB_MULTIPLE_KEY">DB_MULTIPLE_KEY</a> flag is used to retrieve multiple
+key/data pairs, where each returned key may or may not have duplicate
+data items.
+<p>Once the <a href="../../api_c/db_get.html">DB-&gt;get</a> or <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> method has returned, the
+application will walk through the buffer handling the returned records.
+This is implemented for the C and C++ APIs using four macros:
+<a href="../../api_c/dbt_bulk.html#DB_MULTIPLE_INIT">DB_MULTIPLE_INIT</a>, <a href="../../api_c/dbt_bulk.html#DB_MULTIPLE_NEXT">DB_MULTIPLE_NEXT</a>,
+<a href="../../api_c/dbt_bulk.html#DB_MULTIPLE_KEY_NEXT">DB_MULTIPLE_KEY_NEXT</a>, and <a href="../../api_c/dbt_bulk.html#DB_MULTIPLE_RECNO_NEXT">DB_MULTIPLE_RECNO_NEXT</a>. For
+the Java API, this is implemented as three iterator classes:
+<a href="../../api_java/dbt_bulk_class.html">DbMultipleDataIterator</a>,
+<a href="../../api_java/dbt_bulk_class.html">DbMultipleKeyDataIterator</a>, and
+<a href="../../api_java/dbt_bulk_class.html">DbMultipleRecnoDataIterator</a>.
+<p>The <a href="../../api_c/dbt_bulk.html#DB_MULTIPLE_INIT">DB_MULTIPLE_INIT</a> macro is always called first. It
+initializes a local application variable and the <b>data</b>
+<a href="../../api_c/dbt_class.html">DBT</a> for stepping through the set of returned records. Then,
+the application calls one of the remaining three macros:
+<a href="../../api_c/dbt_bulk.html#DB_MULTIPLE_NEXT">DB_MULTIPLE_NEXT</a>, <a href="../../api_c/dbt_bulk.html#DB_MULTIPLE_KEY_NEXT">DB_MULTIPLE_KEY_NEXT</a>, and
+<a href="../../api_c/dbt_bulk.html#DB_MULTIPLE_RECNO_NEXT">DB_MULTIPLE_RECNO_NEXT</a>.
+<p>If the <a href="../../api_c/dbc_get.html#DB_MULTIPLE">DB_MULTIPLE</a> flag was specified to the <a href="../../api_c/db_get.html">DB-&gt;get</a> or
+<a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> method, the application will always call the
+<a href="../../api_c/dbt_bulk.html#DB_MULTIPLE_NEXT">DB_MULTIPLE_NEXT</a> macro. If the <a href="../../api_c/dbc_get.html#DB_MULTIPLE_KEY">DB_MULTIPLE_KEY</a> flag
+was specified to the <a href="../../api_c/db_get.html">DB-&gt;get</a> or <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> method, and, the
+underlying database is a Btree or Hash database, the application will
+always call the <a href="../../api_c/dbt_bulk.html#DB_MULTIPLE_KEY_NEXT">DB_MULTIPLE_KEY_NEXT</a> macro. If the
+<a href="../../api_c/dbc_get.html#DB_MULTIPLE_KEY">DB_MULTIPLE_KEY</a> flag was specified to the <a href="../../api_c/db_get.html">DB-&gt;get</a> or
+<a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a> method, and, the underlying database is a Queue or Recno
+database, the application will always call the
+<a href="../../api_c/dbt_bulk.html#DB_MULTIPLE_RECNO_NEXT">DB_MULTIPLE_RECNO_NEXT</a> macro. The <a href="../../api_c/dbt_bulk.html#DB_MULTIPLE_NEXT">DB_MULTIPLE_NEXT</a>,
+<a href="../../api_c/dbt_bulk.html#DB_MULTIPLE_KEY_NEXT">DB_MULTIPLE_KEY_NEXT</a>, and <a href="../../api_c/dbt_bulk.html#DB_MULTIPLE_RECNO_NEXT">DB_MULTIPLE_RECNO_NEXT</a> macros
+are called repeatedly, until the end of the returned records is reached.
+The end of the returned records is detected by the application's local
+pointer variable being set to NULL.
+<p>The following is an example of a routine that displays the contents of
+a Btree database using the bulk return interfaces.
+<p><blockquote><pre>int
+rec_display(dbp)
+ DB *dbp;
+{
+ DBC *dbcp;
+ DBT key, data;
+ size_t retklen, retdlen;
+ char *retkey, *retdata;
+ int ret, t_ret;
+ void *p;
+<p>
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+<p>
+ /* Review the database in 5MB chunks. */
+#define BUFFER_LENGTH (5 * 1024 * 1024)
+ if ((data.data = malloc(BUFFER_LENGTH)) == NULL)
+ return (errno);
+ data.ulen = BUFFER_LENGTH;
+ data.flags = DB_DBT_USERMEM;
+<p>
+ /* Acquire a cursor for the database. */
+ if ((ret = dbp-&gt;cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp-&gt;err(dbp, ret, "DB-&gt;cursor");
+ free(data.data);
+ return (ret);
+ }
+<p>
+ for (;;) {
+ /*
+ * Acquire the next set of key/data pairs. This code does
+ * not handle single key/data pairs that won't fit in a
+ * BUFFER_LENGTH size buffer, instead returning ENOMEM to
+ * our caller.
+ */
+ if ((ret = dbcp-&gt;c_get(dbcp,
+ &key, &data, DB_MULTIPLE_KEY | DB_NEXT)) != 0) {
+ if (ret != DB_NOTFOUND)
+ dbp-&gt;err(dbp, ret, "DBcursor-&gt;c_get");
+ break;
+ }
+<p>
+ for (DB_MULTIPLE_INIT(p, &data);;) {
+ DB_MULTIPLE_KEY_NEXT(p,
+ &data, retkey, retklen, retdata, retdlen);
+ if (p == NULL)
+ break;
+ printf("key: %.*s, data: %.*s\n",
+ (int)retklen, retkey, (int)retdlen, retdata);
+ }
+ }
+<p>
+ if ((t_ret = dbcp-&gt;c_close(dbcp)) != 0) {
+ dbp-&gt;err(dbp, ret, "DBcursor-&gt;close");
+ if (ret == 0)
+ ret = t_ret;
+ }
+<p>
+ free(data.data);
+<p>
+ return (ret);
+}</pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_misc/align.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/partial.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_misc/partial.html b/libdb/docs/ref/am_misc/partial.html
new file mode 100644
index 0000000..9d1b3a4
--- /dev/null
+++ b/libdb/docs/ref/am_misc/partial.html
@@ -0,0 +1,134 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Partial record storage and retrieval</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_misc/get_bulk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/struct.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Partial record storage and retrieval</h1>
+<p>It is possible to both store and retrieve parts of data items in all
+Berkeley DB access methods. This is done by setting the
+<a href="../../api_c/dbt_class.html#DB_DBT_PARTIAL">DB_DBT_PARTIAL</a> flag in the <a href="../../api_c/dbt_class.html">DBT</a> structure passed to the
+Berkeley DB interface.
+<p>The <a href="../../api_c/dbt_class.html#DB_DBT_PARTIAL">DB_DBT_PARTIAL</a> flag is based on the values of two fields
+of the <a href="../../api_c/dbt_class.html">DBT</a> structure: <b>dlen</b> and <b>doff</b>. The value
+of <b>dlen</b> is the number of bytes of the record in which the
+application is interested. The value of <b>doff</b> is the offset from
+the beginning of the data item where those bytes start.
+<p>For example, if the data item were <b>ABCDEFGHIJKL</b>, a <b>doff</b>
+value of 3 would indicate that the bytes of interest started at
+<b>D</b>, and a <b>dlen</b> value of 4 would indicate that the bytes
+of interest were <b>DEFG</b>.
+<p>When retrieving a data item from a database, the <b>dlen</b> bytes
+starting <b>doff</b> bytes from the beginning of the record are
+returned, as if they comprised the entire record. If any or all of the
+specified bytes do not exist in the record, the retrieval is still
+successful and any existing bytes are returned.
+<p>When storing a data item into the database, the <b>dlen</b> bytes
+starting <b>doff</b> bytes from the beginning of the specified key's
+data record are replaced by the data specified by the <b>data</b> and
+<b>size</b> fields. If <b>dlen</b> is smaller than <b>size</b>, the
+record will grow, and if <b>dlen</b> is larger than <b>size</b>, the
+record will shrink. If the specified bytes do not exist, the record will
+be extended using nul bytes as necessary, and the store call will still
+succeed.
+<p>The following are various examples of the put case for the
+<a href="../../api_c/dbt_class.html#DB_DBT_PARTIAL">DB_DBT_PARTIAL</a> flag. In all examples, the initial data item is 20
+bytes in length:
+<p><b>ABCDEFGHIJ0123456789</b>
+<p><ol>
+<p><li><p><blockquote><pre>size = 20
+doff = 0
+dlen = 20
+data = abcdefghijabcdefghij
+<p>
+Result: The 20 bytes at offset 0 are replaced by the 20 bytes of data;
+that is, the entire record is replaced.
+<p>
+ABCDEFGHIJ0123456789 -&gt; abcdefghijabcdefghij
+</pre></blockquote>
+<p><li><p><blockquote><pre>size = 10
+doff = 20
+dlen = 0
+data = abcdefghij
+<p>
+Result: The 0 bytes at offset 20 are replaced by the 10 bytes of data;
+that is, the record is extended by 10 bytes.
+<p>
+ABCDEFGHIJ0123456789 -&gt; ABCDEFGHIJ0123456789abcdefghij
+</pre></blockquote>
+<p><li><p><blockquote><pre>size = 10
+doff = 10
+dlen = 5
+data = abcdefghij
+<p>
+Result: The 5 bytes at offset 10 are replaced by the 10 bytes of data.
+<p>
+ABCDEFGHIJ0123456789 -&gt; ABCDEFGHIJabcdefghij56789
+</pre></blockquote>
+<p><li><p><blockquote><pre>size = 10
+doff = 10
+dlen = 0
+data = abcdefghij
+<p>
+Result: The 0 bytes at offset 10 are replaced by the 10 bytes of data;
+that is, 10 bytes are inserted into the record.
+<p>
+ABCDEFGHIJ0123456789 -&gt; ABCDEFGHIJabcdefghij0123456789
+</pre></blockquote>
+<p><li><p><blockquote><pre>size = 10
+doff = 2
+dlen = 15
+data = abcdefghij
+<p>
+Result: The 15 bytes at offset 2 are replaced by the 10 bytes of data.
+<p>
+ABCDEFGHIJ0123456789 -&gt; ABabcdefghij789
+</pre></blockquote>
+<p><li><p><blockquote><pre>size = 10
+doff = 0
+dlen = 0
+data = abcdefghij
+<p>
+Result: The 0 bytes at offset 0 are replaced by the 10 bytes of data;
+that is, the 10 bytes are inserted at the beginning of the record.
+<p>
+ABCDEFGHIJ0123456789 -&gt; abcdefghijABCDEFGHIJ0123456789
+</pre></blockquote>
+<p><li><p><blockquote><pre>size = 0
+doff = 0
+dlen = 10
+data = ""
+<p>
+Result: The 10 bytes at offset 0 are replaced by the 0 bytes of data;
+that is, the first 10 bytes of the record are discarded.
+<p>
+ABCDEFGHIJ0123456789 -&gt; 0123456789
+</pre></blockquote>
+<p><li><p><blockquote><pre>size = 10
+doff = 25
+dlen = 0
+data = abcdefghij
+<p>
+Result: The 0 bytes at offset 25 are replaced by the 10 bytes of data;
+that is, 10 bytes are inserted into the record past the end of the
+current data (\0 represents a nul byte).
+<p>
+ABCDEFGHIJ0123456789 -&gt; ABCDEFGHIJ0123456789\0\0\0\0\0abcdefghij
+</pre></blockquote>
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_misc/get_bulk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/struct.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_misc/perm.html b/libdb/docs/ref/am_misc/perm.html
new file mode 100644
index 0000000..fddb376
--- /dev/null
+++ b/libdb/docs/ref/am_misc/perm.html
@@ -0,0 +1,37 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Retrieved key/data permanence for C/C++</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_misc/struct.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/error.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Retrieved key/data permanence for C/C++</h1>
+<p>When using the non-cursor Berkeley DB calls to retrieve key/data items under
+the C/C++ APIs (for example, <a href="../../api_c/db_get.html">DB-&gt;get</a>), the memory to which the
+pointer stored into the <a href="../../api_c/dbt_class.html">DBT</a> refers is only valid until the next
+call to Berkeley DB using the <a href="../../api_c/db_class.html">DB</a> handle. (This includes <b>any</b>
+use of the returned <a href="../../api_c/db_class.html">DB</a> handle, including by another thread of
+control within the process. For this reason, when multiple threads are
+using the returned <a href="../../api_c/db_class.html">DB</a> handle concurrently, one of the
+<a href="../../api_c/dbt_class.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a>, <a href="../../api_c/dbt_class.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a> or <a href="../../api_c/dbt_class.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a>
+flags must be specified with any non-cursor <a href="../../api_c/dbt_class.html">DBT</a> used for key or
+data retrieval.)
+<p>When using the cursor Berkeley DB calls to retrieve key/data items under the
+C/C++ APIs (for example, <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a>), the memory to which the
+pointer stored into the <a href="../../api_c/dbt_class.html">DBT</a> refers is only valid until the next
+call to Berkeley DB using the <a href="../../api_c/dbc_class.html">DBC</a> handle returned by <a href="../../api_c/db_cursor.html">DB-&gt;cursor</a>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_misc/struct.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/error.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_misc/stability.html b/libdb/docs/ref/am_misc/stability.html
new file mode 100644
index 0000000..8d9587d
--- /dev/null
+++ b/libdb/docs/ref/am_misc/stability.html
@@ -0,0 +1,55 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Cursor stability</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_misc/error.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/dbsizes.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Cursor stability</h1>
+<p>In the absence of locking, no guarantees are made about the stability
+of cursors in different threads of control. However, the Btree, Queue
+and Recno access methods guarantee that cursor operations, interspersed
+with any other operation in the same thread of control will always
+return keys in order and will return each non-deleted key/data pair
+exactly once. Because the Hash access method uses a dynamic hashing
+algorithm, it cannot guarantee any form of stability in the presence of
+inserts and deletes unless transactional locking is performed.
+<p>If locking was specified when the Berkeley DB environment was opened, but
+transactions are not in effect, the access methods provide repeatable
+reads with respect to the cursor. That is, a <a href="../../api_c/dbc_get.html#DB_CURRENT">DB_CURRENT</a> call
+on the cursor is guaranteed to return the same record as was returned
+on the last call to the cursor.
+<a name="4"><!--meow--></a><a name="5"><!--meow--></a>
+<p>In the presence of transactions, the Btree, Hash and Recno access
+methods provide degree 3 isolation (serializable transactions). The
+Queue access method provides degree 3 isolation with the exception that
+it permits phantom records to appear between calls. That is, deleted
+records are not locked, therefore another transaction may replace a
+deleted record between two calls to retrieve it. The record would not
+appear in the first call but would be seen by the second call. For
+readers not enclosed in transactions, all access method calls provide
+degree 2 isolation, that is, reads are not repeatable. Finally, Berkeley DB
+provides degree 1 isolation when the <a href="../../api_c/db_open.html#DB_DIRTY_READ">DB_DIRTY_READ</a> flag is
+specified; that is, reads may see data modified in transactions which
+have not yet committed.
+<p>For all access methods, a cursor scan of the database performed within
+the context of a transaction is guaranteed to return each key/data pair
+once and only once, except in the following case. If, while performing
+a cursor scan using the Hash access method, the transaction performing
+the scan inserts a new pair into the database, it is possible that
+duplicate key/data pairs will be returned.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_misc/error.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/dbsizes.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_misc/struct.html b/libdb/docs/ref/am_misc/struct.html
new file mode 100644
index 0000000..6ea0582
--- /dev/null
+++ b/libdb/docs/ref/am_misc/struct.html
@@ -0,0 +1,88 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Storing C/C++ structures/objects</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_misc/partial.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/perm.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Storing C/C++ structures/objects</h1>
+<p>Berkeley DB can store any kind of data, that is, it is entirely 8-bit clean.
+How you use this depends, to some extent, on the application language
+you are using. In the C/C++ languages, there are a couple of different
+ways to store structures and objects.
+<p>First, you can do some form of run-length encoding and copy your
+structure into another piece of memory before storing it:
+<p><blockquote><pre>struct {
+ char *data1;
+ u_int32_t data2;
+ ...
+} info;
+size_t len;
+u_int8_t *p, data_buffer[1024];
+<p>
+p = &data_buffer[0];
+len = strlen(info.data1);
+memcpy(p, &len, sizeof(len));
+p += sizeof(len);
+memcpy(p, info.data1, len);
+p += len;
+memcpy(p, &info.data2, sizeof(info.data2));
+p += sizeof(info.data2);
+...</pre></blockquote>
+<p>and so on, until all the fields of the structure have been loaded into
+the byte array. If you want more examples, see the Berkeley DB logging
+routines (for example, btree/btree_auto.c:__bam_split_log()). This
+technique is generally known as "marshalling". If you use this
+technique, you must then un-marshall the data when you read it back:
+<p><blockquote><pre>struct {
+ char *data1;
+ u_int32_t data2;
+ ...
+} info;
+size_t len;
+u_int8_t *p;
+<p>
+p = &data_buffer[0];
+memcpy(&len, p, sizeof(len));
+p += sizeof(len);
+info.data1 = malloc(len);
+memcpy(info.data1, p, len);
+p += len;
+memcpy(&info.data2, p, sizeof(info.data2));
+p += sizeof(info.data2);
+...</pre></blockquote>
+<p>and so on.
+<p>The second way to solve this problem only works if you have just one
+variable length field in the structure. In that case, you can declare
+the structure as follows:
+<p><blockquote><pre>struct {
+ int a, b, c;
+ u_int8_t buf[1];
+} info;</pre></blockquote>
+<p>Then, let's say you have a string you want to store in this structure.
+When you allocate the structure, you allocate it as:
+<p><blockquote><pre>malloc(sizeof(struct info) + strlen(string));</pre></blockquote>
+<p>Since the allocated memory is contiguous, you can the initialize the
+structure as:
+<p><blockquote><pre>info.a = 1;
+info.b = 2;
+info.c = 3;
+memcpy(&info.buf[0], string, strlen(string));</pre></blockquote>
+<p>and give it to Berkeley DB to store, with a length of:
+<p><blockquote><pre>sizeof(struct info) + strlen(string);</pre></blockquote>
+<p>In this case, the structure can be copied out of the database and used
+without any additional work.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_misc/partial.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/perm.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/am_misc/tune.html b/libdb/docs/ref/am_misc/tune.html
new file mode 100644
index 0000000..8abe33e
--- /dev/null
+++ b/libdb/docs/ref/am_misc/tune.html
@@ -0,0 +1,85 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Access method tuning</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Access Methods</dl></h3></td>
+<td align=right><a href="../../ref/am_misc/diskspace.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Access method tuning</h1>
+<p>There are a few different issues to consider when tuning the performance
+of Berkeley DB access method applications.
+<p><dl compact>
+<p><dt>access method<dd>An application's choice of a database access method can significantly
+affect performance. Applications using fixed-length records and integer
+keys are likely to get better performance from the Queue access method.
+Applications using variable-length records are likely to get better
+performance from the Btree access method, as it tends to be faster for
+most applications than either the Hash or Recno access methods. Because
+the access method APIs are largely identical between the Berkeley DB access
+methods, it is easy for applications to benchmark the different access
+methods against each other. See <a href="../../ref/am_conf/select.html">Selecting an access method</a> for more information.
+<p><dt>cache size<dd>The Berkeley DB database cache defaults to a fairly small size, and most
+applications concerned with performance will want to set it explicitly.
+Using a too-small cache will result in horrible performance. The first
+step in tuning the cache size is to use the db_stat utility (or the
+statistics returned by the <a href="../../api_c/db_stat.html">DB-&gt;stat</a> function) to measure the
+effectiveness of the cache. The goal is to maximize the cache's hit
+rate. Typically, increasing the size of the cache until the hit rate
+reaches 100% or levels off will yield the best performance. However,
+if your working set is sufficiently large, you will be limited by the
+system's available physical memory. Depending on the virtual memory
+and file system buffering policies of your system, and the requirements
+of other applications, the maximum cache size will be some amount
+smaller than the size of physical memory. If you find that
+<a href="../../utility/db_stat.html">db_stat</a> shows that increasing the cache size improves your hit
+rate, but performance is not improving (or is getting worse), then it's
+likely you've hit other system limitations. At this point, you should
+review the system's swapping/paging activity and limit the size of the
+cache to the maximum size possible without triggering paging activity.
+Finally, always remember to make your measurements under conditions as
+close as possible to the conditions your deployed application will run
+under, and to test your final choices under worst-case conditions.
+<p><dt>shared memory<dd>By default, Berkeley DB creates its database environment shared regions in
+filesystem backed memory. Some systems do not distinguish between
+regular filesystem pages and memory-mapped pages backed by the
+filesystem, when selecting dirty pages to be flushed back to disk. For
+this reason, dirtying pages in the Berkeley DB cache may cause intense
+filesystem activity, typically when the filesystem sync thread or
+process is run. In some cases, this can dramatically affect application
+throughput. The workaround to this problem is to create the shared
+regions in system shared memory (<a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a>) or in
+application private memory (<a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a>).
+<p><dt>large key/data items<dd>Storing large key/data items in a database can alter the performance
+characteristics of Btree, Hash and Recno databases. The first parameter
+to consider is the database page size. When a key/data item is too
+large to be placed on a database page, it is stored on "overflow" pages
+that are maintained outside of the normal database structure (typically,
+items that are larger than one-quarter of the page size are deemed to
+be too large). Accessing these overflow pages requires at least one
+additional page reference over a normal access, so it is usually better
+to increase the page size than to create a database with a large number
+of overflow pages. Use the <a href="../../utility/db_stat.html">db_stat</a> utility (or the statistics
+returned by the <a href="../../api_c/db_stat.html">DB-&gt;stat</a> method) to review the number of overflow
+pages in the database.
+<p>The second issue is using large key/data items instead of duplicate data
+items. While this can offer performance gains to some applications
+(because it is possible to retrieve several data items in a single get
+call), once the key/data items are large enough to be pushed off-page,
+they will slow the application down. Using duplicate data items is
+usually the better choice in the long run.
+</dl>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_misc/diskspace.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_misc/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/apprec/auto.html b/libdb/docs/ref/apprec/auto.html
new file mode 100644
index 0000000..567cb13
--- /dev/null
+++ b/libdb/docs/ref/apprec/auto.html
@@ -0,0 +1,159 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Automatically generated functions</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Application Specific Logging and Recovery</dl></h3></td>
+<td align=right><a href="../../ref/apprec/def.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/apprec/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Automatically generated functions</h1>
+<p>The XXX.src file is processed using the gen_rec.awk script included in
+the dist directory of the Berkeley DB distribution. This is an awk script
+that is executed from with the following command line:
+<p><blockquote><pre>awk -f gen_rec.awk \
+ -v source_file=<i>C_FILE</i> \
+ -v header_file=<i>H_FILE</i> \
+ -v template_file=<i>TMP_FILE</i> &lt; XXX.src</pre></blockquote>
+<p>where <i>C_FILE</i> is the name of the file into which to place the
+automatically generated C code, <i>H_FILE</i> is the name of the
+file into which to place the automatically generated data structures
+and declarations, and <i>TMP_FILE</i> is the name of the file into
+which to place a template for the recovery routines.
+<p>Because the gen_rec.awk script uses sources files located relative to
+the Berkeley DB dist directory, it must be run from the dist directory. For
+example, in building the Berkeley DB logging and recovery routines for
+ex_apprec, the following script is used to rebuild the automatically
+generated files:
+<p><blockquote><pre>E=../examples_c/ex_apprec
+<p>
+cd ../../dist
+awk -f gen_rec.awk \
+ -v source_file=$E/ex_apprec_auto.c \
+ -v header_file=$E/ex_apprec_auto.h \
+ -v template_file=$E/ex_apprec_template &lt; $E/ex_apprec.src</pre></blockquote>
+<p>For each log record description found in the XXX.src file, the following
+structure declarations and #defines will be created in the file
+<i>header_file</i>:
+<p><blockquote><pre>#define DB_PREFIX_RECORD_TYPE /* Integer ID number */
+<p>
+typedef struct _PREFIX_RECORD_TYPE_args {
+ /*
+ * These three fields are generated for every record.
+ */
+ u_int32_t type; /* Record type used for dispatch. */
+<p>
+ /*
+ * Transaction handle that identifies the transaction on whose
+ * behalf the record is being logged.
+ */
+ DB_TXN *txnid;
+<p>
+ /*
+ * The log sequence number returned by the previous call to log_put
+ * for this transaction.
+ */
+ DB_LSN *prev_lsn;
+<p>
+ /*
+ * The rest of the structure contains one field for each of
+ * the entries in the record statement.
+ */
+};</pre></blockquote>
+<p>Thus, the auto-generated ex_apprec_mkdir_args structure looks as follows:
+<p><blockquote><pre>typedef struct _ex_apprec_mkdir_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DBT dirname;
+} ex_apprec_mkdir_args;</pre></blockquote>
+<p>The template_file will contain a template for a recovery function. The
+recovery function is called on each record read from the log during
+system recovery, transaction abort, or the application of log records
+on a replication client, and is expected to redo or undo the operations
+described by that record. The details of the recovery function will be
+specific to the record being logged and need to be written manually,
+but the template provides a good starting point. (Note that the
+template assumes that the record is manipulating the internals of a
+Berkeley DB database and sets up database handles, page structures, and such
+for convenience. Many application-specific log records will not need
+these, and may simply delete much of the template. See
+ex_apprec_template and ex_apprec_rec.c for an example.)
+<p>The template file should be copied to a source file in the application
+(but not the automatically generated source_file, as that will get
+overwritten each time gen_rec.awk is run) and fully developed there.
+The recovery function takes the following parameters:
+<p><blockquote><p><dl compact>
+<p><dt>dbenv<dd>The environment in which recovery is running.
+<p><dt>rec<dd>The record being recovered.
+<p><dt>lsn<dd>The log sequence number of the record being recovered. The
+prev_lsn field, automatically included in every auto-generated log
+record, should be returned through this argument. The prev_lsn field
+is used to chain log records together to allow transaction aborts;
+because the recovery function is the only place that a log record gets
+parsed, the responsibility for returning this value lies with the
+recovery function writer.
+<p><dt>op<dd>A parameter of type db_recops, which indicates what operation is being
+run (<a href="../../api_c/env_set_app_dispatch.html#DB_TXN_ABORT">DB_TXN_ABORT</a>, <a href="../../api_c/env_set_app_dispatch.html#DB_TXN_APPLY">DB_TXN_APPLY</a>, <a href="../../api_c/env_set_app_dispatch.html#DB_TXN_BACKWARD_ROLL">DB_TXN_BACKWARD_ROLL</a>,
+<a href="../../api_c/env_set_app_dispatch.html#DB_TXN_FORWARD_ROLL">DB_TXN_FORWARD_ROLL</a> or <a href="../../api_c/env_set_app_dispatch.html#DB_TXN_PRINT">DB_TXN_PRINT</a>).
+<p><dt>info<dd>A structure passed by the dispatch function. It is used to contain a
+list of committed transactions and information about files that may have
+been deleted. Application-specific log records can usually simply
+ignore this field.
+</dl></blockquote>
+<p>In addition to the header_file and template_file, a source_file is
+created, containing a log, read, recovery, and print function for each
+record type.
+<p>The log function marshalls the parameters into a buffer, and calls
+<a href="../../api_c/log_put.html">DB_ENV-&gt;log_put</a> on that buffer returning 0 on success and non-zero on
+failure. The log function takes the following parameters:
+<p><blockquote><p><dl compact>
+<p><dt>dbenv<dd>The environment in which recovery is running.
+<p><dt>txnid<dd>The transaction identifier for the transaction handle returned by
+<a href="../../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a>.
+<p><dt>lsnp<dd>A pointer to storage for a log sequence number into which the log
+sequence number of the new log record will be returned.
+<p><dt>syncflag<dd>A flag indicating whether the record must be written synchronously.
+Valid values are 0 and <a href="../../api_c/log_put.html#DB_FLUSH">DB_FLUSH</a>.
+<p><dt>args<dd>The remaining parameters to the log message are the fields described
+in the XXX.src file, in order.
+</dl></blockquote>
+<p>The read function takes a buffer and unmarshalls its contents into a
+structure of the appropriate type. It returns 0 on success and non-zero
+on error. After the fields of the structure have been used, the pointer
+returned from the read function should be freed. The read function
+takes the following parameters:
+<p><blockquote><p><dl compact>
+<p><dt>dbenv<dd>The environment in which recovery is running.
+<p><dt>recbuf<dd>A buffer.
+<p><dt>argp<dd>A pointer to a structure of the appropriate type.
+</dl></blockquote>
+<p>The print function displays the contents of the record. The print
+function takes the same parameters as the recovery function described
+previously. Although some of the parameters are unused by the print
+function, taking the same parameters allows a single dispatch loop to
+dispatch to a variety of functions. The print function takes the
+following parameters:
+<p><blockquote><p><dl compact>
+<p><dt>dbenv<dd>The environment in which recovery is running.
+<p><dt>rec<dd>The record being recovered.
+<p><dt>lsn<dd>The log sequence number of the record being recovered.
+<p><dt>op<dd>Unused.
+<p><dt>info<dd>Unused.
+</dl></blockquote>
+<p>Finally, the source file will contain a function (named XXX_init_print,
+where XXX is replaced by the prefix) which should be added to the
+initialization part of the standalone <a href="../../utility/db_printlog.html">db_printlog</a> utility code
+so that utility can be used to display application-specific log records.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/apprec/def.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/apprec/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/apprec/config.html b/libdb/docs/ref/apprec/config.html
new file mode 100644
index 0000000..29c9f9d
--- /dev/null
+++ b/libdb/docs/ref/apprec/config.html
@@ -0,0 +1,128 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Application configuration</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Application Specific Logging and Recovery</dl></h3></td>
+<td align=right><a href="../../ref/apprec/auto.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/appsignals.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Application configuration</h1>
+<p>The application should include a dispatch function that dispatches to
+appropriate printing and/or recovery functions based on the log record
+type and the operation code. The dispatch function should take the same
+arguments as the recovery function, and should call the appropriate
+recovery and/or printing functions based on the log record type and the
+operation code. For example, the ex_apprec dispatch function is as
+follows:
+<p><blockquote><pre>int
+apprec_dispatch(dbenv, dbt, lsn, op)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ DB_LSN *lsn;
+ db_recops op;
+{
+ u_int32_t rectype;
+ /* Pull the record type out of the log record. */
+ memcpy(&rectype, dbt-&gt;data, sizeof(rectype));
+ switch (rectype) {
+ case DB_ex_apprec_mkdir:
+ return (ex_apprec_mkdir_recover(dbenv, dbt, lsn, op, NULL));
+ default:
+ /*
+ * We've hit an unexpected, allegedly user-defined record
+ * type.
+ */
+ dbenv-&gt;errx(dbenv, "Unexpected log record type encountered");
+ return (EINVAL);
+ }
+}
+</pre></blockquote>
+<p>Applications use this dispatch function and the automatically generated
+functions as follows:
+<p><ol>
+<p><li>When the application starts, call the <a href="../../api_c/env_set_app_dispatch.html">DB_ENV-&gt;set_app_dispatch</a>
+with your dispatch function.
+<p><li>Issue a <a href="../../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a> call before any operations you want to be
+transaction-protected.
+<p><li>Before accessing any data, issue the appropriate lock call to lock the
+data (either for reading or writing).
+<p><li>Before modifying any data that is transaction-protected, issue a call
+to the appropriate log function.
+<p><li>Call <a href="../../api_c/txn_commit.html">DB_TXN-&gt;commit</a> to save all the changes, or call <a href="../../api_c/txn_abort.html">DB_TXN-&gt;abort</a>
+to cancel all of the modifications.
+</ol>
+<p>The recovery functions are called in the three following cases:
+<p><ol>
+<p><li>During recovery after application or system failure, with op set to
+<a href="../../api_c/env_set_app_dispatch.html#DB_TXN_FORWARD_ROLL">DB_TXN_FORWARD_ROLL</a> or <a href="../../api_c/env_set_app_dispatch.html#DB_TXN_BACKWARD_ROLL">DB_TXN_BACKWARD_ROLL</a>.
+<p><li>During transaction abort, with op set to <a href="../../api_c/env_set_app_dispatch.html#DB_TXN_ABORT">DB_TXN_ABORT</a>.
+<p><li>On a replicated client to apply updates from the master, with op set to
+<a href="../../api_c/env_set_app_dispatch.html#DB_TXN_APPLY">DB_TXN_APPLY</a>.
+</ol>
+<p>For each log record type you declare, you must write the appropriate
+function to undo and redo the modifications. The shell of these
+functions will be generated for you automatically, but you must fill in
+the details.
+<p>Your code must be able to detect whether the described modifications
+have been applied to the data. The function will be called with the
+"op" parameter set to <a href="../../api_c/env_set_app_dispatch.html#DB_TXN_ABORT">DB_TXN_ABORT</a> when a transaction that wrote
+the log record aborts, with <a href="../../api_c/env_set_app_dispatch.html#DB_TXN_FORWARD_ROLL">DB_TXN_FORWARD_ROLL</a> and
+<a href="../../api_c/env_set_app_dispatch.html#DB_TXN_BACKWARD_ROLL">DB_TXN_BACKWARD_ROLL</a> during recovery, and with <a href="../../api_c/env_set_app_dispatch.html#DB_TXN_APPLY">DB_TXN_APPLY</a>
+on a replicated client.
+<p>The actions for <a href="../../api_c/env_set_app_dispatch.html#DB_TXN_ABORT">DB_TXN_ABORT</a> and <a href="../../api_c/env_set_app_dispatch.html#DB_TXN_BACKWARD_ROLL">DB_TXN_BACKWARD_ROLL</a>
+should generally be the same, and the actions for
+<a href="../../api_c/env_set_app_dispatch.html#DB_TXN_FORWARD_ROLL">DB_TXN_FORWARD_ROLL</a> and <a href="../../api_c/env_set_app_dispatch.html#DB_TXN_APPLY">DB_TXN_APPLY</a> should generally
+be the same. However, if the application is using Berkeley DB replication
+and another thread of control may be performing read operations while
+log records are applied on a replication client, the recovery function
+should perform appropriate locking during <a href="../../api_c/env_set_app_dispatch.html#DB_TXN_APPLY">DB_TXN_APPLY</a>
+operations. In this case, the recovery function may encounter deadlocks
+when issuing locking calls. The application should run with the
+deadlock detector, and the recovery function should simply return
+<a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> if a deadlock is detected and a locking
+operation fails with that error.
+<p>The <a href="../../api_c/env_set_app_dispatch.html#DB_TXN_PRINT">DB_TXN_PRINT</a> operation should print the log record,
+typically using the auto-generated print function; it is not used in
+the Berkeley DB library, but may be useful for debugging, as in the
+<a href="../../utility/db_printlog.html">db_printlog</a> utility. Applications may safely ignore this
+operation code, they may handle printing from the recovery function, or
+they may dispatch directly to the auto-generated print function.
+<p>One common way to determine whether operations need to be undone or
+redone is the use of log sequence numbers (LSNs). For example, each
+access method database page contains the LSN of the most recent log
+record that describes a modification to the page. When the access
+method changes a page, it writes a log record describing the change and
+including the LSN that was on the page before the change. This LSN is
+referred to as the previous LSN. The recovery functions read the page
+described by a log record, and compare the LSN on the page to the LSN
+they were passed.
+<p>If the page LSN is less than the passed LSN and the operation is an
+undo, no action is necessary (because the modifications have not been
+written to the page). If the page LSN is the same as the previous LSN
+and the operation is a redo, the actions described are reapplied to the
+page. If the page LSN is equal to the passed LSN and the operation is
+an undo, the actions are removed from the page; if the page LSN is
+greater than the passed LSN and the operation is a redo, no further
+action is necessary. If the action is a redo and the LSN on the page
+is less than the previous LSN in the log record, it is an error because
+it could happen only if some previous log record was not processed.
+<p>Examples of other recovery functions can be found in the Berkeley DB library
+recovery functions (found in files named XXX_rec.c) and in the
+application-specific recovery example (specifically, ex_apprec_rec.c).
+<p>Finally, applications need to ensure that any data modifications they
+have made, that were part of a committed transaction, must be written
+to stable storage before calling the <a href="../../api_c/txn_checkpoint.html">DB_ENV-&gt;txn_checkpoint</a> method. This is
+to allow the periodic removal of database environment log files.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/apprec/auto.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/appsignals.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/apprec/def.html b/libdb/docs/ref/apprec/def.html
new file mode 100644
index 0000000..8ff5da6
--- /dev/null
+++ b/libdb/docs/ref/apprec/def.html
@@ -0,0 +1,96 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Defining application-specific log records</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Application Specific Logging and Recovery</dl></h3></td>
+<td align=right><a href="../../ref/apprec/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/apprec/auto.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Defining application-specific log records</h1>
+<p>By convention, log records are described in files named <b>XXX.src</b>,
+where "XXX" is typically a descriptive name for a subsystem or other
+logical group of logging functions. These files contain interface
+definition language descriptions for each type of log record that is
+used by the subsystem.
+<p>All blank lines and lines beginning with a hash ("#") character in
+the XXX.src files are ignored.
+<p>The first non-comment line in the file should begin with the keyword
+PREFIX, followed by a string that will be prepended to every generated
+function name. Frequently, the PREFIX is either identical or similar
+to the name of the <b>XXX.src</b> file. For example, the Berkeley DB
+application-specific recovery example uses the file
+<b>ex_apprec.src</b>, which begins with the following PREFIX line:
+<p><blockquote><pre>PREFIX ex_apprec</pre></blockquote>
+<p>Following the PREFIX line are the include files required by the
+automatically generated functions. The include files should be listed
+in order, prefixed by the keyword INCLUDE. For example, the Berkeley DB
+application-specific recovery example lists the following include
+files:
+<p><blockquote><pre>INCLUDE #include &lt;ctype.h&gt;
+INCLUDE #include &lt;errno.h&gt;
+INCLUDE #include &lt;stdlib.h&gt;
+INCLUDE #include &lt;string.h&gt;
+INCLUDE
+INCLUDE #include &lt;db.h&gt;
+INCLUDE
+INCLUDE #include "ex_apprec.h"</pre></blockquote>
+<p>The rest of the XXX.src file consists of log record descriptions. Each
+log record description begins with the line:
+<p><blockquote><pre>BEGIN <i>RECORD_NAME</i> <i>RECORD_NUMBER</i></pre></blockquote>
+<p>and ends with the line:
+<p><blockquote><pre>END</pre></blockquote>
+<p>The <i>RECORD_NAME</i> variable should be replaced with a record
+name for this log record. The <i>RECORD_NUMBER</i> variable should
+be replaced with a record number.
+<p>The combination of PREFIX name and <i>RECORD_NAME</i>, and the
+<i>RECORD_NUMBER</i> must be unique for the application, that is,
+values for application-specific and Berkeley DB log records may not overlap.
+Further, because record numbers are stored in log files, which are
+usually portable across application and Berkeley DB releases, any change to
+the record numbers or log record format or should be handled as
+described in the <a href="../../ref/upgrade/process.html">Upgrading Berkeley DB
+installations</a> section on log format changes. The record number space
+below 10,000 is reserved for Berkeley DB itself; applications should choose
+record number values equal to or greater than 10,000.
+<p>Between the BEGIN and END keywords there should be one line for each
+data item logged as part of this log record. The format of these lines
+is as follows:
+<p><blockquote><pre>ARG | DBT | POINTER <i>variable_name</i> <i>variable_type</i> <i>printf_format</i></pre></blockquote>
+<p>The keyword ARG indicates that the argument is a simple parameter of
+the type specified. For example, a file ID might be logged as:
+<p><blockquote><pre>ARG fileID int d</pre></blockquote>
+<p>The keyword DBT indicates that the argument is a Berkeley DB DBT structure,
+containing a length and pointer to a byte string. The keyword POINTER
+indicates that the argument is a pointer to the data type specified (of
+course the data type, not the pointer, is what is logged).
+<p>The <i>variable_name</i> is the field name within the structure that
+will be used to refer to this item. The <i>variable_type</i> is
+the C-language type of the variable, and the printf format is the
+C-language format string, without the leading percent ("%") character,
+that should be used to display the contents of the field (for example,
+"s" for string, "d" for signed integral type, "u" for unsigned integral
+type, "ld" for signed long integral type, "lu" for long unsigned
+integral type, and so on).
+<p>For example, ex_apprec.src defines a single log record type, used to
+log a directory name that has been stored in a DBT:
+<p><blockquote><pre>BEGIN mkdir 10000
+DBT dirname DBT s
+END</pre></blockquote>
+<p>As the name suggests, this example of an application-defined log record
+will be used to log the creation of a directory. There are many more
+examples of XXX.src files in the Berkeley DB distribution. For example, the
+file btree/btree.src contains the definitions for the log records
+supported by the Berkeley DB Btree access method.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/apprec/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/apprec/auto.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/apprec/intro.html b/libdb/docs/ref/apprec/intro.html
new file mode 100644
index 0000000..7561ef6
--- /dev/null
+++ b/libdb/docs/ref/apprec/intro.html
@@ -0,0 +1,77 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Application Specific Logging and Recovery</dl></h3></td>
+<td align=right><a href="../../ref/xa/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/apprec/def.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Introduction</h1>
+<p>It is possible to use the Locking, Logging and Transaction subsystems
+of Berkeley DB to provide transaction semantics on objects other than those
+described by the Berkeley DB access methods. In these cases, the application
+will need application-specific logging and recovery functions.
+<p>For example, consider an application that provides transaction semantics
+on data stored in plain text files accessed using the POSIX read and
+write system calls. The read and write operations for which transaction
+protection is desired will be bracketed by calls to the standard Berkeley DB
+transactional interfaces, <a href="../../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a> and <a href="../../api_c/txn_commit.html">DB_TXN-&gt;commit</a>, and
+the transaction's locker ID will be used to acquire relevant read and
+write locks.
+<p>Before data is accessed, the application must make a call to the lock
+manager, <a href="../../api_c/lock_get.html">DB_ENV-&gt;lock_get</a>, for a lock of the appropriate type (for
+example, read) on the object being locked. The object might be a page
+in the file, a byte, a range of bytes, or some key. It is up to the
+application to ensure that appropriate locks are acquired. Before a
+write is performed, the application should acquire a write lock on the
+object by making an appropriate call to the lock manager,
+<a href="../../api_c/lock_get.html">DB_ENV-&gt;lock_get</a>. Then, the application should make a call to the log
+manager, <a href="../../api_c/log_put.html">DB_ENV-&gt;log_put</a>, to record enough information to redo the
+operation in case of failure after commit and to undo the operation in
+case of abort.
+<p>When designing applications that will use the log subsystem, it is
+important to remember that the application is responsible for providing
+any necessary structure to the log record. For example, the application
+must understand what part of the log record is an operation code, what
+part identifies the file being modified, what part is redo information,
+and what part is undo information.
+<p>After the log message is written, the application may issue the write
+system call. After all requests are issued, the application may call
+<a href="../../api_c/txn_commit.html">DB_TXN-&gt;commit</a>. When <a href="../../api_c/txn_commit.html">DB_TXN-&gt;commit</a> returns, the caller is
+guaranteed that all necessary log writes have been written to disk.
+<p>At any time before issuing a <a href="../../api_c/txn_commit.html">DB_TXN-&gt;commit</a>,
+the application may call <a href="../../api_c/txn_abort.html">DB_TXN-&gt;abort</a>, which will
+result in restoration of the database to a consistent pretransaction
+state. (The application may specify its own recovery function for this
+purpose using the <a href="../../api_c/env_set_app_dispatch.html">DB_ENV-&gt;set_app_dispatch</a> method. The recovery
+function must be able to either reapply or undo the update depending on
+the context, for each different type of log record.)
+<p>If the application crashes, the recovery process uses the log to restore
+the database to a consistent state.
+<p>Berkeley DB includes tools to assist in the development of application-specific
+logging and recovery. Specifically, given a description of information
+to be logged in a family of log records, these tools will automatically
+create log-writing functions (functions that marshall their arguments
+into a single log record), log-reading functions (functions that read
+a log record and unmarshall it into a structure containing fields that
+map into the arguments written to the log), log-printing functions
+(functions that print the contents of a log record for debugging), and
+templates for recovery functions (functions that review log records
+during transaction abort or recovery). The tools and generated code
+are C-language and POSIX-system based, but the generated code should be
+usable on any system, not just POSIX systems.
+<p>A sample application that does application-specific recovery is included
+in the Berkeley DB distribution, in the directory <b>examples_c/ex_apprec</b>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/xa/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/apprec/def.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/arch/apis.html b/libdb/docs/ref/arch/apis.html
new file mode 100644
index 0000000..771ceeb
--- /dev/null
+++ b/libdb/docs/ref/arch/apis.html
@@ -0,0 +1,77 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Programmatic APIs</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Architecture</dl></h3></td>
+<td align=right><a href="../../ref/arch/progmodel.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/arch/script.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Programmatic APIs</h1>
+<p>The Berkeley DB subsystems can be accessed through interfaces from multiple
+languages. The standard library interface is ANSI C. Applications can
+also use Berkeley DB via C++ or Java, as well as from scripting languages.
+Environments can be shared among applications written by using any of
+theses APIs. For example, you might have a local server written in C
+or C++, a script for an administrator written in Perl or Tcl, and a
+Web-based user interface written in Java -- all sharing a single
+database environment.
+<h3>C</h3>
+<p>The Berkeley DB library is written entirely in ANSI C. C applications use a
+single include file:
+<p><blockquote><pre>#include &lt;db.h&gt;</pre></blockquote>
+<h3>C++</h3>
+<p>The C++ classes provide a thin wrapper around the C API, with the major
+advantages being improved encapsulation and an optional exception
+mechanism for errors. C++ applications use a single include file:
+<p><blockquote><pre>#include &lt;db_cxx.h&gt;</pre></blockquote>
+<p>The classes and methods are named in a fashion that directly corresponds
+to structures and functions in the C interface. Likewise, arguments to
+methods appear in the same order as the C interface, except to remove the
+explicit <b>this</b> pointer. The #defines used for flags are identical
+between the C and C++ interfaces.
+<p>As a rule, each C++ object has exactly one structure from the underlying
+C API associated with it. The C structure is allocated with each
+constructor call and deallocated with each destructor call. Thus, the
+rules the user needs to follow in allocating and deallocating structures
+are the same between the C and C++ interfaces.
+<p>To ensure portability to many platforms, both new and old, Berkeley DB makes
+as few assumptions as possible about the C++ compiler and library. For
+example, it does not expect STL, templates, or namespaces to be
+available. The newest C++ feature used is exceptions, which are used
+liberally to transmit error information. Even the use of exceptions
+can be disabled at runtime.
+<h3>Java</h3>
+<p>The Java classes provide a layer around the C API that is almost identical
+to the C++ layer. The classes and methods are, for the most part
+identical to the C++ layer. Berkeley DB constants and #defines are represented as
+"static final int" values. Error conditions are communicated as Java
+exceptions.
+<p>As in C++, each Java object has exactly one structure from the underlying
+C API associated with it. The Java structure is allocated with each
+constructor or open call, but is deallocated only by the Java garbage
+collector. Because the timing of garbage collection is not predictable,
+applications should take care to do a close when finished with any object
+that has a close method.
+<h3>Dbm/Ndbm, Hsearch</h3>
+<p>Berkeley DB supports the standard UNIX interfaces <a href="../../api_c/dbm.html">dbm</a>, <a href="../../api_c/dbm.html">ndbm</a>,
+and <a href="../../api_c/hsearch.html">hsearch</a>. After including a new header file and recompiling,
+programs will run orders of magnitude faster, and underlying databases
+can grow as large as necessary. Also, historic <a href="../../api_c/dbm.html">dbm</a> and
+<a href="../../api_c/dbm.html">ndbm</a> applications can fail once some number of entries are
+inserted into the database, in which the number depends on the
+effectiveness of the internal hashing function on the particular data
+set. This is not a problem with Berkeley DB.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/arch/progmodel.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/arch/script.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/arch/bigpic.gif b/libdb/docs/ref/arch/bigpic.gif
new file mode 100644
index 0000000..48c52ae
Binary files /dev/null and b/libdb/docs/ref/arch/bigpic.gif differ
diff --git a/libdb/docs/ref/arch/bigpic.html b/libdb/docs/ref/arch/bigpic.html
new file mode 100644
index 0000000..636d1d1
--- /dev/null
+++ b/libdb/docs/ref/arch/bigpic.html
@@ -0,0 +1,125 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: The big picture</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Architecture</dl></h3></td>
+<td align=right><a href="../../ref/am_misc/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/arch/progmodel.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>The big picture</h1>
+<p>The previous chapters in this Reference Guide have described
+applications that use the Berkeley DB access methods for fast data storage
+and retrieval. The applications described in the following chapters
+are similar in nature to the access method applications, but they are
+also threaded and/or recoverable in the face of application or system
+failure.
+<p>Application code that uses only the Berkeley DB access methods might appear
+as follows:
+<p><blockquote><pre>switch (ret = dbp-&gt;put(dbp, NULL, &key, &data, 0)) {
+case 0:
+ printf("db: %s: key stored.\n", (char *)key.data);
+ break;
+default:
+ dbp-&gt;err(dbp, ret, "dbp-&gt;put");
+ exit (1);
+}</pre></blockquote>
+<p>The underlying Berkeley DB architecture that supports this is
+<p align=center><img src="smallpic.gif" alt="small">
+<p>As you can see from this diagram, the application makes calls into the
+access methods, and the access methods use the underlying shared memory
+buffer cache to hold recently used file pages in main memory.
+<p>When applications require recoverability, their calls to the Access
+Methods must be wrapped in calls to the transaction subsystem. The
+application must inform Berkeley DB where to begin and end transactions, and
+must be prepared for the possibility that an operation may fail at any
+particular time, causing the transaction to abort.
+<p>An example of transaction-protected code might appear as follows:
+<p><blockquote><pre>for (fail = 0;;) {
+ /* Begin the transaction. */
+ if ((ret = dbenv-&gt;txn_begin(dbenv, NULL, &tid, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "dbenv-&gt;txn_begin");
+ exit (1);
+ }
+<p>
+ /* Store the key. */
+ switch (ret = dbp-&gt;put(dbp, tid, &key, &data, 0)) {
+ case 0:
+ /* Success: commit the change. */
+ printf("db: %s: key stored.\n", (char *)key.data);
+ if ((ret = tid-&gt;commit(tid, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "DB_TXN-&gt;commit");
+ exit (1);
+ }
+ return (0);
+ case DB_LOCK_DEADLOCK:
+ default:
+ /* Failure: retry the operation. */
+ if ((t_ret = tid-&gt;abort(tid)) != 0) {
+ dbenv-&gt;err(dbenv, t_ret, "DB_TXN-&gt;abort");
+ exit (1);
+ }
+ if (++fail == MAXIMUM_RETRY)
+ return (ret);
+ continue;
+ }
+}</pre></blockquote>
+<p>In this example, the same operation is being done as before; however,
+it is wrapped in transaction calls. The transaction is started with
+<a href="../../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a> and finished with <a href="../../api_c/txn_commit.html">DB_TXN-&gt;commit</a>. If the
+operation fails due to a deadlock, the transaction is aborted using
+<a href="../../api_c/txn_abort.html">DB_TXN-&gt;abort</a>, after which the operation may be retried.
+<p>There are actually five major subsystems in Berkeley DB, as follows:
+<p><dl compact>
+<p><dt>Access Methods<dd>The access methods subsystem provides general-purpose support for
+creating and accessing database files formatted as Btrees, Hashed files,
+and Fixed- and Variable-length records. These modules are useful in
+the absence of transactions for applications that need fast formatted
+file support. See <a href="../../api_c/db_open.html">DB-&gt;open</a> and <a href="../../api_c/db_cursor.html">DB-&gt;cursor</a> for more
+information. These functions were already discussed in detail in the
+previous chapters.
+<p><dt>Memory Pool<dd>The Memory Pool subsystem is the general-purpose shared memory buffer pool
+used by Berkeley DB. This is the shared memory cache that allows multiple
+processes and threads within processes to share access to databases. This
+module is useful outside of the Berkeley DB package for processes that require
+portable, page-oriented, cached, shared file access.
+<p><dt>Transaction<dd>The Transaction subsystem allows a group of database changes to be
+treated as an atomic unit so that either all of the changes are done,
+or none of the changes are done. The transaction subsystem implements
+the Berkeley DB transaction model. This module is useful outside of the Berkeley DB
+package for processes that want to transaction-protect their own data
+modifications.
+<p><dt>Locking<dd>The Locking subsystem is the general-purpose lock manager used by Berkeley DB.
+This module is useful outside of the Berkeley DB package for processes that
+require a portable, fast, configurable lock manager.
+<p><dt>Logging<dd>The Logging subsystem is the write-ahead logging used to support the
+Berkeley DB transaction model. It is largely specific to the Berkeley DB package,
+and unlikely to be useful elsewhere except as a supporting module for
+the Berkeley DB transaction subsystem.
+</dl>
+<p>Here is a more complete picture of the Berkeley DB library:
+<p align=center><img src="bigpic.gif" alt="large">
+<p>In this model, the application makes calls to the access methods and to
+the Transaction subsystem. The access methods and Transaction subsystems
+in turn make calls into the Memory Pool, Locking and Logging subsystems
+on behalf of the application.
+<p>The underlying subsystems can be used independently by applications.
+For example, the Memory Pool subsystem can be used apart from the rest
+of Berkeley DB by applications simply wanting a shared memory buffer pool, or
+the Locking subsystem may be called directly by applications that are
+doing their own locking outside of Berkeley DB. However, this usage is not
+common, and most applications will either use only the access methods
+subsystem, or the access methods subsystem wrapped in calls to the Berkeley DB
+transaction interfaces.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/am_misc/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/arch/progmodel.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/arch/progmodel.html b/libdb/docs/ref/arch/progmodel.html
new file mode 100644
index 0000000..f8d30d0
--- /dev/null
+++ b/libdb/docs/ref/arch/progmodel.html
@@ -0,0 +1,44 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Programming model</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Architecture</dl></h3></td>
+<td align=right><a href="../../ref/arch/bigpic.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/arch/apis.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Programming model</h1>
+<p>Berkeley DB is a database library, in which the library is linked into the
+address space of the application using it. The code using Berkeley DB may be
+a standalone application or it may be a server providing functionality
+to many clients via inter-process or remote-process communication
+(IPC/RPC).
+<p>In the standalone application model, one or more applications link the
+Berkeley DB library directly into their address spaces. There may be many
+threads of control in this model because Berkeley DB supports locking for both
+multiple processes and for multiple threads within a process. This
+model provides significantly faster access to the database
+functionality, but implies trust among all threads of control sharing
+the database environment because they will have the ability to read,
+write and potentially corrupt each other's data.
+<p>In the client-server model, developers write a database server
+application that accepts requests via some form of IPC/RPC, and issues
+calls to the Berkeley DB interfaces based on those requests. In this model,
+the database server is the only application linking the Berkeley DB library
+into its address space. The client-server model trades performance for
+protection because it does not require that the applications share a
+protection domain with the server, but IPC/RPC is slower than a function
+call. Of course, this model also greatly simplifies the creation of
+network client-server applications.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/arch/bigpic.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/arch/apis.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/arch/script.html b/libdb/docs/ref/arch/script.html
new file mode 100644
index 0000000..089a89e
--- /dev/null
+++ b/libdb/docs/ref/arch/script.html
@@ -0,0 +1,32 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Scripting languages</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Architecture</dl></h3></td>
+<td align=right><a href="../../ref/arch/apis.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/arch/utilities.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Scripting languages</h1>
+<h3>Perl</h3>
+<p>Two Perl APIs are distributed with the Berkeley DB release. The Perl
+interface to Berkeley DB version 1.85 is called DB_File. The Perl interface
+to Berkeley DB version 2 and later is called BerkeleyDB. See
+<a href="../../ref/perl/intro.html">Using Berkeley DB with Perl</a> for more
+information.
+<h3>Tcl</h3>
+<p>A Tcl API is distributed with the Berkeley DB release. See
+<a href="../../ref/tcl/intro.html">Using Berkeley DB with Tcl</a> for more
+information.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/arch/apis.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/arch/utilities.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/arch/smallpic.gif b/libdb/docs/ref/arch/smallpic.gif
new file mode 100644
index 0000000..5eb7ae8
Binary files /dev/null and b/libdb/docs/ref/arch/smallpic.gif differ
diff --git a/libdb/docs/ref/arch/utilities.html b/libdb/docs/ref/arch/utilities.html
new file mode 100644
index 0000000..af8798c
--- /dev/null
+++ b/libdb/docs/ref/arch/utilities.html
@@ -0,0 +1,64 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Supporting utilities</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Architecture</dl></h3></td>
+<td align=right><a href="../../ref/arch/script.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Supporting utilities</h1>
+<p>The following are the standalone utilities that provide supporting
+functionality for the Berkeley DB environment:
+<p><dl compact>
+<p><dt><a href="../../utility/berkeley_db_svc.html">berkeley_db_svc</a><dd>The <a href="../../utility/berkeley_db_svc.html">berkeley_db_svc</a> utility is the Berkeley DB RPC server that
+provides standard server functionality for client applications.
+<p><dt><a href="../../utility/db_archive.html">db_archive</a><dd>The <a href="../../utility/db_archive.html">db_archive</a> utility supports database backup and archival,
+and log file administration. It facilitates log reclamation and the
+creation of database snapshots. Generally, some form of log archival
+must be done if a database environment has been configured for logging
+or transactions.
+<p><dt><a href="../../utility/db_checkpoint.html">db_checkpoint</a><dd>The <a href="../../utility/db_checkpoint.html">db_checkpoint</a> utility runs as a daemon process, monitoring
+the database log and periodically issuing checkpoints. It facilitates
+log reclamation and the creation of database snapshots. Generally, some
+form of database checkpointing must be done if a database environment has
+been configured for transactions.
+<p><dt><a href="../../utility/db_deadlock.html">db_deadlock</a><dd>The <a href="../../utility/db_deadlock.html">db_deadlock</a> utility runs as a daemon process, periodically
+traversing the database lock structures and aborting transactions when it
+detects a deadlock. Generally, some form of deadlock detection must be
+done if a database environment has been configured for locking.
+<p><dt><a href="../../utility/db_dump.html">db_dump</a><dd>The <a href="../../utility/db_dump.html">db_dump</a> utility writes a copy of the database to a flat-text
+file in a portable format.
+<p><dt><a href="../../utility/db_load.html">db_load</a><dd>The <a href="../../utility/db_load.html">db_load</a> utility reads the flat-text file produced by
+<a href="../../utility/db_dump.html">db_dump</a> and loads it into a database file.
+<p><dt><a href="../../utility/db_printlog.html">db_printlog</a><dd>The <a href="../../utility/db_printlog.html">db_printlog</a> utility displays the contents of Berkeley DB log files
+in a human-readable and parsable format.
+<p><dt><a href="../../utility/db_recover.html">db_recover</a><dd>The <a href="../../utility/db_recover.html">db_recover</a> utility runs after an unexpected Berkeley DB or system
+failure to restore the database to a consistent state. Generally, some
+form of database recovery must be done if databases are being modified.
+<p><dt><a href="../../utility/db_stat.html">db_stat</a> <dd>The <a href="../../utility/db_stat.html">db_stat</a> utility displays statistics for databases and database
+environments.
+<p><dt><a href="../../utility/db_upgrade.html">db_upgrade</a><dd>The <a href="../../utility/db_upgrade.html">db_upgrade</a> utility provides a command-line interface for
+upgrading underlying database formats.
+<p><dt><a href="../../utility/db_verify.html">db_verify</a><dd>The <a href="../../utility/db_verify.html">db_verify</a> utility provides a command-line interface for
+verifying the database format.
+</dl>
+<p>All of the functionality implemented for these utilities is also available
+as part of the standard Berkeley DB API. This means that threaded applications
+can easily create a thread that calls the same Berkeley DB functions as do the
+utilities. This often simplifies an application environment by removing
+the necessity for multiple processes to negotiate database and database
+environment creation and shut down.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/arch/script.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_unix/aix.html b/libdb/docs/ref/build_unix/aix.html
new file mode 100644
index 0000000..6159159
--- /dev/null
+++ b/libdb/docs/ref/build_unix/aix.html
@@ -0,0 +1,72 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: AIX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td align=right><a href="../../ref/build_unix/notes.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/embedix.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>AIX</h1>
+<p><ol>
+<p><li><b>I can't compile and run multithreaded applications.</b>
+<p>Special compile-time flags are required when compiling threaded
+applications on AIX. If you are compiling a threaded application, you
+must compile with the _THREAD_SAFE flag and load with specific
+libraries; for example, "-lc_r". Specifying the compiler name with a
+trailing "_r" usually performs the right actions for the system.
+<p><blockquote><pre>xlc_r ...
+cc -D_THREAD_SAFE -lc_r ...</pre></blockquote>
+<p>The Berkeley DB library will automatically build with the correct options.
+<hr size=1 noshade>
+<p><li><b>I can't run using the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> option to
+<a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>.</b>
+<p>AIX 4.1 allows applications to map only 10 system shared memory
+segments. In AIX 4.3, this has been raised to 256K segments, but only
+if you set the environment variable "export EXTSHM=ON".
+<hr size=1 noshade>
+<p><li><b>On AIX 4.3.2 (or before) I see undefined symbols when linking C++
+applications. Also, Java complains about missing symbols on this
+platform even though I've configured and built for Java.</b>
+<p>Versions of AIX 4.3.2 and before do not have the tools necessary to
+produce shared libraries for languages such as C++ and Java that use
+"name mangling" to map their names to C names. The Berkeley DB C++ API can
+be only used on this platform as a static library. The Berkeley DB Java API
+cannot be used on this platform.
+<hr size=1 noshade>
+<p><li><b>On AIX 4.3.3 I see undefined symbols for DbEnv::set_error_stream,
+Db::set_error_stream or DbEnv::verify when linking C++ applications.
+(These undefined symbols also appear when building the Berkeley DB C++ example
+applications).</b>
+<p>By default, Berkeley DB is built with _LARGE_FILES set to 1 to support the
+creation of "large" database files. However, this also affects how
+standard classes, like iostream, are named internally. When building
+your application, use a "-D_LARGE_FILES=1" compilation option, or insert
+"#define _LARGE_FILES 1" before any #include statements.
+<hr size=1 noshade>
+<p><li><b>I can't create database files larger than 1GB on AIX.</b>
+<p>If you're running on AIX 4.1 or earlier, try changing the source code
+for <b>os/os_open.c</b> to always specify the <b>O_LARGEFILE</b>
+flag to the <b>open</b>(2) system call, and recompile Berkeley DB from
+scratch.
+<p>Also, the documentation for the IBM Visual Age compiler states that it
+does not not support the 64-bit filesystem APIs necessary for creating
+large files; the ibmcxx product must be used instead. We have not heard
+whether the GNU gcc compiler supports the 64-bit APIs or not.
+<p>Finally, to create large files under AIX, the filesystem has to be
+configured to support large files and the system wide user hard-limit
+for file sizes has to be greater than 1GB.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_unix/notes.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/embedix.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_unix/conf.html b/libdb/docs/ref/build_unix/conf.html
new file mode 100644
index 0000000..a9c293a
--- /dev/null
+++ b/libdb/docs/ref/build_unix/conf.html
@@ -0,0 +1,183 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Configuring Berkeley DB</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td align=right><a href="../../ref/build_unix/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/flags.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Configuring Berkeley DB</h1>
+<p>There are several arguments you can specify when configuring Berkeley DB.
+Although only the Berkeley DB-specific ones are described here, most of the
+standard GNU autoconf arguments are available and supported. To see a
+complete list of possible arguments, specify the --help flag to the
+configure program.
+<p>The Berkeley DB specific arguments are as follows:
+<p><dl compact>
+<a name="4"><!--meow--></a>
+<p><dt><a name="--enable-compat185">--enable-compat185</a><dd>To compile or load Berkeley DB 1.85 applications against this release of the
+Berkeley DB library, enter --enable-compat185 as an argument to configure.
+This will include Berkeley DB 1.85 API compatibility code in the library.
+<a name="5"><!--meow--></a>
+<p><dt><a name="--enable-cxx">--enable-cxx</a><dd>To build the Berkeley DB C++ API, enter --enable-cxx as an argument to
+configure.
+<p><dt><a name="--enable-debug">--enable-debug</a><dd>To build Berkeley DB with <b>-g</b> as a compiler flag and with
+<b>DEBUG</b> #defined during compilation, enter --enable-debug as an
+argument to configure. This will create a Berkeley DB library and utilities
+with debugging symbols, as well as load various routines that can be
+called from a debugger to display pages, cursor queues, and so forth.
+If installed, the utilities will not be stripped. This argument should
+not be specified when configuring to build production binaries.
+<p><dt><a name="--enable-debug_rop">--enable-debug_rop</a><dd>To build Berkeley DB to output log records for read operations, enter
+--enable-debug_rop as an argument to configure. This argument should not
+be specified when configuring to build production binaries.
+<p><dt><a name="--enable-debug_wop">--enable-debug_wop</a><dd>To build Berkeley DB to output log records for write operations, enter
+--enable-debug_wop as an argument to configure. This argument should not
+be specified when configuring to build production binaries.
+<p><dt><a name="--enable-diagnostic">--enable-diagnostic</a><dd>To build Berkeley DB with run-time debugging checks, enter --enable-diagnostic
+as an argument to configure. This will cause a number of special checks
+to be performed when Berkeley DB is running. Applications built using this
+argument should not share database environments with applications built
+without this argument. This argument should not be specified when
+configuring to build production binaries.
+<a name="6"><!--meow--></a>
+<p><dt><a name="--enable-dump185">--enable-dump185</a><dd>To convert Berkeley DB 1.85 (or earlier) databases to this release of Berkeley DB,
+enter --enable-dump185 as an argument to configure. This will build the
+<a href="../../utility/db_dump.html">db_dump185</a> utility, which can dump Berkeley DB 1.85 and 1.86 databases
+in a format readable by the Berkeley DB <a href="../../utility/db_load.html">db_load</a> utility.
+<p>The system libraries with which you are loading the <a href="../../utility/db_dump.html">db_dump185</a>
+utility must already contain the Berkeley DB 1.85 library routines for this
+to work because the Berkeley DB distribution does not include them. If you
+are using a non-standard library for the Berkeley DB 1.85 library routines,
+you will have to change the Makefile that the configuration step creates
+to load the <a href="../../utility/db_dump.html">db_dump185</a> utility with that library.
+<a name="7"><!--meow--></a>
+<p><dt><a name="--enable-java">--enable-java</a><dd>To build the Berkeley DB Java API, enter --enable-java as an argument to
+configure. To build Java, you must also build with shared libraries.
+Before configuring, you must set your PATH environment variable to
+include javac. Note that it is not sufficient to include a symbolic
+link to javac in your PATH because the configuration process uses the
+location of javac to determine the location of the Java include files
+(for example, jni.h). On some systems, additional include directories
+may be needed to process jni.h; see <a href="flags.html">Changing compile or
+load options</a> for more information.
+<a name="8"><!--meow--></a>
+<p><dt><a name="--disable-largefile">--disable-largefile</a><dd>Some systems, notably versions of HP/UX and Solaris, require special
+compile-time options in order to create files larger than 2^32 bytes.
+These options are automatically enabled when Berkeley DB is compiled. For
+this reason, binaries built on current versions of these systems may
+not run on earlier versions of the system because the library and system
+calls necessary for large files are not available. To disable building
+with these compile-time options, enter --disable-largefile as an argument
+to configure.
+<p><dt><a name="--enable-posixmutexes">--enable-posixmutexes</a><dd>To force Berkeley DB to use the POSIX pthread mutex interfaces for underlying
+mutex support, enter --enable-posixmutexes as an argument to configure.
+This is rarely necessary: POSIX mutexes will be selected automatically
+on systems where they are the preferred implementation.
+<p>The --enable-posixmutexes configuration argument is normally used in
+two ways: First, when there are multiple mutex implementations available
+and the POSIX mutex implementation is not the preferred one (for
+example, on Solaris where the LWP mutexes are used by default). Second,
+by default the Berkeley DB library will only select the POSIX mutex
+implementation if it supports mutexes shared between multiple processes,
+as described for the pthread_condattr_setpshared and
+pthread_mutexattr_setpshared interfaces. The --enable-posixmutexes
+configuration argument can be used to force the selection of POSIX
+mutexes in this case, which can improve application performance
+significantly when the alternative mutex implementation is a
+non-blocking one (for example test-and-set assembly instructions).
+However, configuring to use POSIX mutexes when the implementation does
+not have inter-process support will only allow the creation of private
+database environments, that is, environments where the
+<a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flag is specified to the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> method.
+<p>Specifying the --enable-posixmutexes configuration argument may require
+that Berkeley DB be linked with the -lpthread library.
+<a name="9"><!--meow--></a>
+<p><dt><a name="--enable-rpc">--enable-rpc</a><dd>To build the Berkeley DB RPC client code and server utility, enter --enable-rpc
+as an argument to configure. The --enable-rpc argument requires that RPC
+libraries already be installed on your system.
+<a name="10"><!--meow--></a><a name="11"><!--meow--></a>
+<p><dt><a name="--disable-shared">--disable-shared</a>, <a name="--disable-static">--disable-static</a><dd>On systems supporting shared libraries, Berkeley DB builds both static and
+shared libraries by default. (Shared libraries are built using
+<a href="http://www.gnu.org/software/libtool/libtool.html">the GNU
+Project's Libtool</a> distribution, which supports shared library builds
+on many (although not all) systems.) To not build shared libraries,
+configure using the --disable-shared argument. To not build static
+libraries, configure using the --disable-static argument.
+<a name="12"><!--meow--></a>
+<p><dt><a name="--enable-tcl">--enable-tcl</a><dd>To build the Berkeley DB Tcl API, enter --enable-tcl as an argument to
+configure. This configuration argument expects to find Tcl's tclConfig.sh
+file in the <b>/usr/local/lib</b> directory. See the --with-tcl
+argument for instructions on specifying a non-standard location for the
+Tcl installation. See <a href="../../ref/tcl/intro.html">Loading Berkeley DB
+with Tcl</a> for information on sites from which you can download Tcl and
+which Tcl versions are compatible with Berkeley DB. To build Tcl, you must
+also build with shared libraries.
+<a name="13"><!--meow--></a>
+<p><dt><a name="--enable-test">--enable-test</a><dd>To build the Berkeley DB test suite, enter --enable-test as an argument to
+configure. To run the Berkeley DB test suite, you must also build the Tcl
+API. This argument should not be specified when configuring to build
+production binaries.
+<p><dt><a name="--enable-uimutexes">--enable-uimutexes</a><dd>To force Berkeley DB to use the UNIX International (UI) mutex interfaces for
+underlying mutex support, enter --enable-uimutexes as an argument to
+configure. This is rarely necessary: UI mutexes will be selected
+automatically on systems where they are the preferred implementation.
+<p>The --enable-uimutexes configuration argument is normally used when
+there are multiple mutex implementations available and the UI mutex
+implementation is not the preferred one (for example, on Solaris where
+the LWP mutexes are used by default).
+<p>Specifying the --enable-uimutexes configuration argument may require
+that Berkeley DB be linked with the -lthread library.
+<p><dt><a name="--enable-umrw">--enable-umrw</a><dd>Rational Software's Purify product and other run-time tools complain
+about uninitialized reads/writes of structure fields whose only purpose
+is padding, as well as when heap memory that was never initialized is
+written to disk. Specify the --enable-umrw argument during
+configuration to mask these errors. This argument should not be
+specified when configuring to build production binaries.
+<p><dt><a name="--with-embedix=DIR">--with-embedix=DIR</a><dd>To build Berkeley DB for Embedix, configure with --with-embedix=DIR, where
+DIR is the directory in which Embedix is installed. If "=DIR" is not
+specified, a default installation directory of <b>/opt/Embedix</b> is
+used. This configuration argument creates an Embedix Component Descriptor
+file (ECD) for Berkeley DB. To configure for Embedix, you must also specify
+the --with-rpm argument.
+<p><dt><a name="--with-mutex=MUTEX">--with-mutex=MUTEX</a><dd>To force Berkeley DB to use a specific mutex implementation, configure with
+--with-mutex=MUTEX, where MUTEX is the mutex implementation you want.
+For example, --with-mutex=x86/gcc-assembly will configure Berkeley DB to use
+the x86 GNU gcc compiler based test-and-set assembly mutexes. This is
+rarely necessary and should be done only when the default configuration
+selects the wrong mutex implementation. A list of available mutex
+implementations can be found in the distribution file
+<b>dist/aclocal/mutex.ac</b>.
+<p><dt><a name="--with-rpm=DIR">--with-rpm=DIR</a><dd>To build Berkeley DB as an RPM software package, configure with --with-rpm=DIR,
+where DIR is the directory in which the gzipped tar archive file of the
+distribution may be found. This configuration argument will create an
+RPM specification file from which the RPM software package can be built,
+using the "make" command.
+<p><dt><a name="--with-tcl=DIR">--with-tcl=DIR</a><dd>To build the Berkeley DB Tcl API, enter --with-tcl=DIR, replacing DIR with
+the directory in which the Tcl tclConfig.sh file may be found. See
+<a href="../../ref/tcl/intro.html">Loading Berkeley DB with Tcl</a> for information
+on sites from which you can download Tcl and which Tcl versions are
+compatible with Berkeley DB. To build Tcl, you must also build with shared
+libraries.
+<p><dt><a name="--with-uniquename=NAME">--with-uniquename=NAME</a><dd>To build Berkeley DB with unique symbol names (in order to avoid conflicts
+with other application modules or libraries), enter --with-uniquename=NAME,
+replacing NAME with a string that to be appended to every Berkeley DB symbol.
+If "=NAME" is not specified, a default value of "_MAJORMINOR" is used,
+where MAJORMINOR is the major and minor release numbers of the Berkeley DB
+release. See <a href="../../ref/install/multiple.html">Building with
+multiple versions of Berkeley DB</a> for more information.
+</dl>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_unix/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/flags.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_unix/embedix.html b/libdb/docs/ref/build_unix/embedix.html
new file mode 100644
index 0000000..6449cc3
--- /dev/null
+++ b/libdb/docs/ref/build_unix/embedix.html
@@ -0,0 +1,37 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Embedix</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td align=right><a href="../../ref/build_unix/aix.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/freebsd.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Embedix</h1>
+<p><ol>
+<p><li><b>How do I make Embedix aware of Berkeley DB?</b>
+<p>In order to make Embedix aware of a software component, two items are
+needed, an Embedix Component Descriptor file and a source RPM file
+for that component. After Berkeley DB has been configured for RPM and
+Embedix using the configuration options described in <a href="conf.html">Configuring
+Berkeley DB</a>, the RPM should be built using:
+<p><blockquote><pre>make</pre></blockquote>
+After the RPM is built, as root, the files need to be installed
+in the Embedix installation area, using:
+<p><blockquote><pre>make install</pre></blockquote>
+After installation, start up the
+Embedix target wizard and Berkeley DB will appear in the component list.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_unix/aix.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/freebsd.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_unix/flags.html b/libdb/docs/ref/build_unix/flags.html
new file mode 100644
index 0000000..5388753
--- /dev/null
+++ b/libdb/docs/ref/build_unix/flags.html
@@ -0,0 +1,62 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Changing compile or load options</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td align=right><a href="../../ref/build_unix/conf.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/install.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Changing compile or load options</h1>
+<p>You can specify compiler and/or compile and load time flags by using
+environment variables during Berkeley DB configuration. For example, if you
+want to use a specific compiler, specify the CC environment variable
+before running configure:
+<p><blockquote><pre>prompt: env CC=gcc ../dist/configure</pre></blockquote>
+<p>Using anything other than the native compiler will almost certainly mean
+that you'll want to check the flags specified to the compiler and
+loader, too.
+<p>To specify debugging and optimization options for the C compiler,
+use the CFLAGS environment variable:
+<p><blockquote><pre>prompt: env CFLAGS=-O2 ../dist/configure</pre></blockquote>
+<p>To specify header file search directories and other miscellaneous options
+for the C preprocessor and compiler, use the CPPFLAGS environment variable:
+<p><blockquote><pre>prompt: env CPPFLAGS=-I/usr/contrib/include ../dist/configure</pre></blockquote>
+<p>To specify debugging and optimization options for the C++ compiler,
+use the CXXFLAGS environment variable:
+<p><blockquote><pre>prompt: env CXXFLAGS=-Woverloaded-virtual ../dist/configure</pre></blockquote>
+<p>To specify miscellaneous options or additional library directories for
+the linker, use the LDFLAGS environment variable:
+<p><blockquote><pre>prompt: env LDFLAGS="-N32 -L/usr/local/lib" ../dist/configure</pre></blockquote>
+<p>If you want to specify additional libraries, set the LIBS environment
+variable before running configure. For example, the following would
+specify two additional libraries to load, "posix" and "socket":
+<p><blockquote><pre>prompt: env LIBS="-lposix -lsocket" ../dist/configure</pre></blockquote>
+<p>Make sure that you prepend -L to any library directory names and that you
+prepend -I to any include file directory names! Also, if the arguments
+you specify contain blank or tab characters, be sure to quote them as
+shown previously; that is with single or double quotes around the values
+you are specifying for LIBS.
+<p>The env command, which is available on most systems, simply sets one or
+more environment variables before running a command. If the env command
+is not available to you, you can set the environment variables in your
+shell before running configure. For example, in sh or ksh, you could
+do the following:
+<p><blockquote><pre>prompt: LIBS="-lposix -lsocket" ../dist/configure</pre></blockquote>
+<p>In csh or tcsh, you could do the following:
+<p><blockquote><pre>prompt: setenv LIBS "-lposix -lsocket"
+prompt: ../dist/configure</pre></blockquote>
+<p>See your command shell's manual page for further information.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_unix/conf.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/install.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_unix/freebsd.html b/libdb/docs/ref/build_unix/freebsd.html
new file mode 100644
index 0000000..eddae04
--- /dev/null
+++ b/libdb/docs/ref/build_unix/freebsd.html
@@ -0,0 +1,59 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: FreeBSD</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td align=right><a href="../../ref/build_unix/embedix.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/hpux.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>FreeBSD</h1>
+<p><ol>
+<p><li><b>I can't compile and run multithreaded applications.</b>
+<p>Special compile-time flags are required when compiling threaded
+applications on FreeBSD. If you are compiling a threaded application,
+you must compile with the _THREAD_SAFE and -pthread flags:
+<p><blockquote><pre>cc -D_THREAD_SAFE -pthread ...</pre></blockquote>
+<p>The Berkeley DB library will automatically build with the correct options.
+<hr size=1 noshade>
+<p><li><b>I get occasional failures when running RPC-based programs under
+FreeBSD clients.</b>
+<p>There is a known bug in the XDR implementation in the FreeBSD C library
+from version 2.2 up to version 4.0-RELEASE, that causes certain-sized
+messages to fail and return a zero-filled reply to the client. A bug
+report (#16028) has been filed with FreeBSD. The following patch is the
+FreeBSD fix:
+<p><blockquote><pre>*** /usr/src/lib/libc/xdr/xdr_rec.c.orig Mon Jan 10 10:20:42 2000
+--- /usr/src/lib/libc/xdr/xdr_rec.c Wed Jan 19 10:53:45 2000
+***************
+*** 558,564 ****
+ * but we don't have any way to be certain that they aren't
+ * what the client actually intended to send us.
+ */
+! if ((header & (~LAST_FRAG)) == 0)
+ return(FALSE);
+ rstrm-&gt;fbtbc = header & (~LAST_FRAG);
+ return (TRUE);
+--- 558,564 ----
+ * but we don't have any way to be certain that they aren't
+ * what the client actually intended to send us.
+ */
+! if (header == 0)
+ return(FALSE);
+ rstrm-&gt;fbtbc = header & (~LAST_FRAG);
+ return (TRUE);
+</pre></blockquote>
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_unix/embedix.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/hpux.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_unix/hpux.html b/libdb/docs/ref/build_unix/hpux.html
new file mode 100644
index 0000000..7332b91
--- /dev/null
+++ b/libdb/docs/ref/build_unix/hpux.html
@@ -0,0 +1,91 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: HP-UX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td align=right><a href="../../ref/build_unix/freebsd.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/irix.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>HP-UX</h1>
+<p><ol>
+<p><li><b>I can't specify the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag to <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>.</b>
+<p>The <b>shmget</b>(2) interfaces are not always used on HP-UX, even
+though they exist, because anonymous memory allocated using <b>shmget</b>(2)
+cannot be used to store the standard HP-UX msemaphore semaphores. For
+this reason, it may not be possible to specify the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a>
+flag on some versions of HP-UX. (We have seen this problem only on HP-UX
+10.XX, so the simplest workaround may be to upgrade your HP-UX release.)
+<hr size=1 noshade>
+<p><li><b>I can't specify both the <a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> and <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a>
+flags to <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>.</b>
+<p>It is not possible to store the standard HP-UX msemaphore semaphores in
+memory returned by <b>malloc</b>(3) in some versions of HP-UX. For
+this reason, it may not be possible to specify both the
+<a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> and <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flags on some versions of
+HP-UX. (We have seen this problem only on HP-UX 10.XX, so the simplest
+workaround may be to upgrade your HP-UX release.)
+<hr size=1 noshade>
+<p><li><b>During configuration, I see a message that large file support
+has been turned off.</b>
+<p>Some HP-UX system include files redefine "open" when big-file support (the
+HAVE_FILE_OFFSET_BITS and _FILE_OFFSET_BITS #defines) is enabled. This
+causes problems when compiling for C++, where "open" is a legal
+identifier, used in the Berkeley DB C++ API. For this reason, we automatically
+turn off big-file support when Berkeley DB is configured with a C++ API. This
+should not be a problem for applications unless there is a need to create
+databases larger than 2GB.
+<hr size=1 noshade>
+<p><li><b>I can't compile and run multithreaded applications.</b>
+<p>Special compile-time flags are required when compiling threaded
+applications on HP-UX. If you are compiling a threaded application, you
+must compile with the _REENTRANT flag:
+<p><blockquote><pre>cc -D_REENTRANT ...</pre></blockquote>
+<p>The Berkeley DB library will automatically build with the correct options.
+<hr size=1 noshade>
+<p><li><b>An ENOMEM error is returned from <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> or
+<a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a>.</b>
+<p>Due to the constraints of the PA-RISC memory architecture, HP-UX does not
+allow a process to map a file into its address space multiple times.
+For this reason, each Berkeley DB environment may be opened only once by a
+process on HP-UX; that is, calls to <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> will fail if the
+specified Berkeley DB environment has been opened and not subsequently closed.
+<hr size=1 noshade>
+<p><li><b>When compiling with gcc, I see the following error:
+<p><blockquote><pre>#error "Large Files (ILP32) not supported in strict ANSI mode."</pre></blockquote></b>
+<p>We believe this is an error in the HP-UX include files, but we don't
+really understand it. The only workaround we have found is to add
+-D__STDC_EXT__ to the C preprocessor defines as part of compilation.
+<hr size=1 noshade>
+<p><li><b>When using the Tcl or Perl APIs (including running the test
+suite), I see the error "Can't shl_load() a library containing Thread
+Local Storage".</b>
+<p>This problem happens when HP-UX has been configured to use pthread mutex
+locking, and an attempt is made to call Berkeley DB using the Tcl or Perl APIs.
+We have never found any way to fix this problem as part of the Berkeley DB
+build process. To work around the problem, rebuild tclsh or Perl, and
+modify its build process to explicitly link it against the HP-UX pthread
+library (currently /usr/lib/libpthread.a).
+<hr size=1 noshade>
+<p><li><b>When running an executable that has been dynamically linked
+against the Berkeley DB library, I see the error "Can't find path for shared
+library" even though I correctly set the SHLIB_PATH environment variable.</b>
+<p>By default, some versions of HP-UX ignore the dynamic library search
+path specified by the SHLIB_PATH environment variable. To work around
+this, specify the "+s" flag to ld when linking, or run the following
+command on the executable that is not working:
+<p><blockquote><pre>chatr +s enable -l /full/path/to/libdb-3.2.sl ...</pre></blockquote>
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_unix/freebsd.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/irix.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_unix/install.html b/libdb/docs/ref/build_unix/install.html
new file mode 100644
index 0000000..ec992ab
--- /dev/null
+++ b/libdb/docs/ref/build_unix/install.html
@@ -0,0 +1,65 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Installing Berkeley DB</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td align=right><a href="../../ref/build_unix/flags.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/shlib.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Installing Berkeley DB</h1>
+<p>Berkeley DB installs the following files into the following locations, with the
+following default values:
+<p><table border=1 align=center>
+<tr><th>Configuration Variables</th><th>Default value</th></tr>
+<tr><td>--prefix</td><td>/usr/local/BerkeleyDB.<b>Major</b>.<b>Minor</b></td></tr>
+<tr><td>--exec_prefix</td><td>$(prefix)</td></tr>
+<tr><td>--bindir</td><td>$(exec_prefix)/bin</td></tr>
+<tr><td>--includedir</td><td>$(prefix)/include</td></tr>
+<tr><td>--libdir</td><td>$(exec_prefix)/lib</td></tr>
+<tr><td>docdir</td><td>$(prefix)/docs</td></tr>
+<tr><th>Files</th><th>Default location</th></tr>
+<tr><td>include files</td><td>$(includedir)</td></tr>
+<tr><td>libraries</td><td>$(libdir)</td></tr>
+<tr><td>utilities</td><td>$(bindir)</td></tr>
+<tr><td>documentation</td><td>$(docdir)</td></tr>
+</table>
+<p>With one exception, this follows the GNU Autoconf and GNU Coding
+Standards installation guidelines; please see that documentation for
+more information and rationale.
+<p>The single exception is the Berkeley DB documentation. The Berkeley DB
+documentation is provided in HTML format, not in UNIX-style man or GNU
+info format. For this reason, Berkeley DB configuration does not support
+<b>--infodir</b> or <b>--mandir</b>. To change the default
+installation location for the Berkeley DB documentation, modify the Makefile
+variable, <b>docdir</b>.
+<p>When installing Berkeley DB on filesystems shared by machines of different
+architectures, please note that although Berkeley DB include files are
+installed based on the value of $(prefix), rather than $(exec_prefix),
+the Berkeley DB include files are not always architecture independent.
+<p>To move the entire installation tree to somewhere besides
+<b>/usr/local</b>, change the value of <b>prefix</b>.
+<p>To move the binaries and libraries to a different location, change the
+value of <b>exec_prefix</b>. The values of <b>includedir</b> and
+<b>libdir</b> may be similarly changed.
+<p>Any of these values except for <b>docdir</b> may be set as part of
+the configuration:
+<p><blockquote><pre>prompt: ../dist/configure --bindir=/usr/local/bin</pre></blockquote>
+<p>Any of these values, including <b>docdir</b>, may be changed when doing
+the install itself:
+<p><blockquote><pre>prompt: make prefix=/usr/contrib/bdb install</pre></blockquote>
+<p>The Berkeley DB installation process will attempt to create any directories that
+do not already exist on the system.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_unix/flags.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/shlib.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_unix/intro.html b/libdb/docs/ref/build_unix/intro.html
new file mode 100644
index 0000000..016599e
--- /dev/null
+++ b/libdb/docs/ref/build_unix/intro.html
@@ -0,0 +1,64 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Building for UNIX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a><a name="4"><!--meow--></a><a name="5"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td align=right><a href="../../ref/debug/common.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/conf.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Building for UNIX</h1>
+<p>The Berkeley DB distribution builds up to four separate libraries: the base
+C API Berkeley DB library and the optional C++, Java, and Tcl API libraries.
+For portability reasons, each library is standalone and contains the
+full Berkeley DB support necessary to build applications; that is, the C++
+API Berkeley DB library does not require any other Berkeley DB libraries to build
+and run C++ applications.
+<p>Building for Linux, Mac OS X and the QNX Neutrino release is the same
+as building for a conventional UNIX platform. Building for the Embedix
+release is somewhat different -- see the <a href="embedix.html">Embedix</a> FAQ
+page for more information.
+<p>The Berkeley DB distribution uses the Free Software Foundation's
+<a href="http://www.gnu.org/software/autoconf/autoconf.html">autoconf</a>
+and <a href="http://www.gnu.org/software/libtool/libtool.html">libtool</a> tools to build on UNIX platforms. In general, the standard
+configuration and installation options for these tools apply to the
+Berkeley DB distribution.
+<p>To do a standard UNIX build of Berkeley DB, change to the <b>build_unix</b>
+directory and then enter the following two commands:
+<p><blockquote><pre>../dist/configure
+make</pre></blockquote>
+<p>This will build the Berkeley DB library.
+<p>To install the Berkeley DB library, enter the following command:
+<p><blockquote><pre>make install</pre></blockquote>
+<p>To rebuild Berkeley DB, enter:
+<p><blockquote><pre>make clean
+make</pre></blockquote>
+<p>If you change your mind about how Berkeley DB is to be configured, you must
+start from scratch by entering the following command:
+<p><blockquote><pre>make realclean
+../dist/configure
+make</pre></blockquote>
+<p>To build multiple UNIX versions of Berkeley DB in the same source tree, create
+a new directory at the same level as the build_unix directory, and then
+configure and build in that directory as described previously.
+<p>If you have trouble with any of these commands, please send email to
+the support addresses found in the Sleepycat Software contact
+information. In that email, please include the following information:
+<p><ul type=disc>
+<li>A screen snapshot of the commands you entered to do configuration and
+compilation, and any output they produced.
+<li>A copy of the <b>config.log</b> file created during configuration.
+</ul>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/debug/common.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/conf.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_unix/irix.html b/libdb/docs/ref/build_unix/irix.html
new file mode 100644
index 0000000..52393b9
--- /dev/null
+++ b/libdb/docs/ref/build_unix/irix.html
@@ -0,0 +1,31 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: IRIX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td align=right><a href="../../ref/build_unix/hpux.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/linux.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>IRIX</h1>
+<p><ol>
+<p><li><b>I can't compile and run multithreaded applications.</b>
+<p>Special compile-time flags are required when compiling threaded
+applications on IRIX. If you are compiling a threaded application, you
+must compile with the _SGI_MP_SOURCE flag:
+<p><blockquote><pre>cc -D_SGI_MP_SOURCE ...</pre></blockquote>
+<p>The Berkeley DB library will automatically build with the correct options.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_unix/hpux.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/linux.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_unix/linux.html b/libdb/docs/ref/build_unix/linux.html
new file mode 100644
index 0000000..8d57487
--- /dev/null
+++ b/libdb/docs/ref/build_unix/linux.html
@@ -0,0 +1,31 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Linux</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td align=right><a href="../../ref/build_unix/irix.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/macosx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Linux</h1>
+<p><ol>
+<p><li><b>I can't compile and run multithreaded applications.</b>
+<p>Special compile-time flags are required when compiling threaded
+applications on Linux. If you are compiling a threaded application, you
+must compile with the _REENTRANT flag:
+<p><blockquote><pre>cc -D_REENTRANT ...</pre></blockquote>
+<p>The Berkeley DB library will automatically build with the correct options.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_unix/irix.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/macosx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_unix/macosx.html b/libdb/docs/ref/build_unix/macosx.html
new file mode 100644
index 0000000..1cb9c29
--- /dev/null
+++ b/libdb/docs/ref/build_unix/macosx.html
@@ -0,0 +1,42 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Mac OS X</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td align=right><a href="../../ref/build_unix/linux.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/osf1.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Mac OS X</h1>
+<p><ol>
+<p><li><b>I cannot use the Java API on Mac OS X.</b>
+<p>The Berkeley DB configuration and build procedure is unable to dynamically
+load some libraries on Mac OS X. This restriction is expected to be
+removed in future Berkeley DB releases.
+<p>There is a workaround for Java. Mac OS X normally requires Java
+loadable libraries to end with a .jnilib extension. Libtool, used by
+our build procedure, does not support this extension. To work around
+this, you must add a -D option to your Java command line to specify the
+complete pathname of the installed library:
+<p><blockquote><pre>% java -Dsleepycat.db.libfile=/full/path/to/libdb_java-VERSION.so</pre></blockquote>
+<p><li><b>I cannot run the test suite on Mac OS X.</b>
+<p>The Mac OS X 10.1 and 10.2 Developer Tools installer will optionally
+install one additional package called simply, "BSD SDK". That package
+contains the additional parts of the TCL distribution required to
+configure, compile and run the Berkeley DB test suite. The Berkeley DB configuration
+step will need the flag "--with-tcl=/System/Library/Tcl/8.3" added to
+the arguments in order to locate the now installed TCL distribution.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_unix/linux.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/osf1.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_unix/notes.html b/libdb/docs/ref/build_unix/notes.html
new file mode 100644
index 0000000..41093c9
--- /dev/null
+++ b/libdb/docs/ref/build_unix/notes.html
@@ -0,0 +1,153 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Architecture independent FAQ</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td align=right><a href="../../ref/build_unix/test.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/aix.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Architecture independent FAQ</h1>
+<p><ol>
+<p><li><b>When compiling with gcc, I get unreferenced symbols; for example
+the following:
+<p><blockquote><pre>symbol __muldi3: referenced symbol not found
+symbol __cmpdi2: referenced symbol not found</pre></blockquote></b>
+<p>Berkeley DB often uses 64-bit integral types on systems supporting large
+files, and gcc performs operations on those types by calling library
+functions. These unreferenced symbol errors are usually caused by
+linking an application by calling "ld" rather than by calling "gcc":
+gcc will link in libgcc.a and will resolve the symbols. If that does
+not help, another possible workaround is to reconfigure Berkeley DB using the
+<a href="../../ref/build_unix/conf.html#--disable-largefile">--disable-largefile</a> configuration option and then rebuild.
+<hr size=1 noshade>
+<p><li><b>My C++ program traps during a failure in a DB call on my
+gcc-based system.</b>
+<p>We believe there are some severe bugs in the implementation of
+exceptions for some gcc compilers. Exceptions require some interaction
+between compiler, assembler, and runtime libraries. We're not sure
+exactly what is at fault, but one failing combination is gcc 2.7.2.3
+running on SuSE Linux 6.0. The problem on this system can be seen with
+a rather simple test case of an exception thrown from a shared library
+and caught in the main program.
+<p>A variation of this problem seems to occur on AIX, although we believe it
+does not necessarily involve shared libraries on that platform.
+<p>If you see a trap that occurs when an exception might be thrown by the
+Berkeley DB runtime, we suggest that you use static libraries instead of
+shared libraries. See the documentation for configuration. If this
+doesn't work and you have a choice of compilers, try using a more recent
+gcc- or a non-gcc based compiler to build Berkeley DB.
+<p>Finally, you can disable the use of exceptions in the C++ runtime for
+Berkeley DB by using the <a href="../../api_cxx/env_class.html#DB_CXX_NO_EXCEPTIONS">DB_CXX_NO_EXCEPTIONS</a> flag with the
+<a href="../../api_cxx/env_class.html">DbEnv</a> or <a href="../../api_cxx/db_class.html">Db</a> constructors. When this flag is on,
+all C++ methods fail by returning an error code rather than throwing an
+exception.
+<hr size=1 noshade>
+<p><li><b>I get unexpected results and database corruption when running
+threaded programs.</b>
+<p><b>I get error messages that mutex (for example, pthread_mutex_XXX or
+mutex_XXX) functions are undefined when linking applications with Berkeley DB.</b>
+<p>On some architectures, the Berkeley DB library uses the ISO POSIX standard
+pthreads and UNIX International (UI) threads interfaces for underlying
+mutex support; for example, Solaris and HP-UX. You can specify
+compilers or compiler flags, or link with the appropriate thread library
+when loading your application to resolve the undefined references:
+<p><blockquote><pre>cc ... -lpthread ...
+cc ... -lthread ...
+xlc_r ...
+cc ... -mt ...</pre></blockquote>
+<p>See the appropriate architecture-specific Reference Guide pages for more
+information.
+<p>On systems where more than one type of mutex is available, it may be
+necessary for applications to use the same threads package from which
+Berkeley DB draws its mutexes. For example, if Berkeley DB was built to use the
+POSIX pthreads mutex calls for mutex support, the application may need
+to be written to use the POSIX pthreads interfaces for its threading
+model. This is only conjecture at this time, and although we know of
+no systems that actually have this requirement, it's not unlikely that
+some exist.
+<p>In a few cases, Berkeley DB can be configured to use specific underlying mutex
+interfaces. You can use the <a href="../../ref/build_unix/conf.html#--enable-posixmutexes">--enable-posixmutexes</a> and
+<a href="../../ref/build_unix/conf.html#--enable-uimutexes">--enable-uimutexes</a> configuration options to specify the POSIX and Unix
+International (UI) threads packages. This should not, however, be
+necessary in most cases.
+<p>In some cases, it is vitally important to make sure that you load the
+correct library. For example, on Solaris systems, there are POSIX
+pthread interfaces in the C library, so applications can link Berkeley DB
+using only C library and not see any undefined symbols. However, the
+C library POSIX pthread mutex support is insufficient for Berkeley DB, and
+Berkeley DB cannot detect that fact. Similar errors can arise when
+applications (for example, tclsh) use dlopen to dynamically load Berkeley DB
+as a library.
+<p>If you are seeing problems in this area after you confirm that you're
+linking with the correct libraries, there are two other things you can
+try. First, if your platform supports interlibrary dependencies, we
+recommend that you change the Berkeley DB Makefile to specify the appropriate
+threads library when creating the Berkeley DB shared library, as an
+interlibrary dependency. Second, if your application is using dlopen
+to dynamically load Berkeley DB, specify the appropriate thread library on
+the link line when you load the application itself.
+<hr size=1 noshade>
+<p><li><b>I get core dumps when running programs that fork children.</b>
+<p>Berkeley DB handles should not be shared across process forks, each forked
+child should acquire its own Berkeley DB handles.
+<hr size=1 noshade>
+<p><li><b>I get reports of uninitialized memory reads and writes when
+running software analysis tools (for example, Rational Software Corp.'s
+Purify tool).</b>
+<p>For performance reasons, Berkeley DB does not write the unused portions of
+database pages or fill in unused structure fields. To turn off these
+errors when running software analysis tools, build with the
+--enable-umrw configuration option.
+<hr size=1 noshade>
+<p><li><b>Berkeley DB programs or the test suite fail unexpectedly.</b>
+<p>The Berkeley DB architecture does not support placing the shared memory
+regions on remote filesystems -- for example, the Network File System
+(NFS) or the Andrew File System (AFS). For this reason, the shared
+memory regions (normally located in the database home directory) must
+reside on a local filesystem. See <a href="../../ref/env/region.html">Shared Memory Regions</a> for more information.
+<p>With respect to running the test suite, always check to make sure that
+TESTDIR is not on a remote mounted filesystem.
+<hr size=1 noshade>
+<p><li><b>The <a href="../../utility/db_dump.html">db_dump185</a> utility fails to build.</b>
+<p>The <a href="../../utility/db_dump.html">db_dump185</a> utility is the utility that supports the
+conversion of Berkeley DB 1.85 and earlier databases to current database
+formats. If the build errors look something like the following, it
+means the db.h include file being loaded is not a Berkeley DB 1.85 version
+include file:
+<p><blockquote><pre>db_dump185.c: In function `main':
+db_dump185.c:210: warning: assignment makes pointer from integer without a cast
+db_dump185.c:212: warning: assignment makes pointer from integer without a cast
+db_dump185.c:227: structure has no member named `seq'
+db_dump185.c:227: `R_NEXT' undeclared (first use in this function)</pre></blockquote>
+<p>If the build errors look something like the following, it means that
+the Berkeley DB 1.85 code was not found in the standard libraries:
+<p><blockquote><pre>cc -o db_dump185 db_dump185.o
+ld:
+Unresolved:
+dbopen</pre></blockquote>
+<p>To build <a href="../../utility/db_dump.html">db_dump185</a>, the Berkeley DB version 1.85 code must already
+been built and available on the system. If the Berkeley DB 1.85 header file
+is not found in a standard place, or if the library is not part of the
+standard libraries used for loading, you will need to edit your
+Makefile, and change the following lines:
+<p><blockquote><pre>DB185INC=
+DB185LIB=</pre></blockquote>
+<p>So that the system Berkeley DB 1.85 header file and library are found; for
+example:
+<p><blockquote><pre>DB185INC=/usr/local/include
+DB185LIB=-ldb185</pre></blockquote>
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_unix/test.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/aix.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_unix/osf1.html b/libdb/docs/ref/build_unix/osf1.html
new file mode 100644
index 0000000..83820f3
--- /dev/null
+++ b/libdb/docs/ref/build_unix/osf1.html
@@ -0,0 +1,31 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: OSF/1</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td align=right><a href="../../ref/build_unix/macosx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/qnx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>OSF/1</h1>
+<p><ol>
+<p><li><b>I can't compile and run multithreaded applications.</b>
+<p>Special compile-time flags are required when compiling threaded
+applications on OSF/1. If you are compiling a threaded application, you
+must compile with the _REENTRANT flag:
+<p><blockquote><pre>cc -D_REENTRANT ...</pre></blockquote>
+<p>The Berkeley DB library will automatically build with the correct options.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_unix/macosx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/qnx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_unix/qnx.html b/libdb/docs/ref/build_unix/qnx.html
new file mode 100644
index 0000000..32ad74f
--- /dev/null
+++ b/libdb/docs/ref/build_unix/qnx.html
@@ -0,0 +1,77 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: QNX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td align=right><a href="../../ref/build_unix/osf1.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/sco.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>QNX</h1>
+<p><ol>
+<p><li><b>To what versions of QNX has DB been ported?</b>
+<p>Berkeley DB has been ported to the QNX Neutrino technology which is commonly
+referred to as QNX RTP (Real-Time Platform). Berkeley DB has not been
+ported to earlier versions of QNX, such as QNX 4.25.
+<p><li><b>Are there any QNX filesystem issues?</b>
+<p>Berkeley DB generates temporary files for use in transactionally
+protected file system operations. Due to the filename length limit of
+48 characters in the QNX filesystem, applications that are using
+transactions should specify a database name that is at most 43 characters.
+<p><li><b>What is the impact of QNX's use of <b>shm_open</b>(2) for
+shared memory regions?</b>
+<p>QNX requires the use of the POSIX <b>shm_open</b>(2) and
+<b>shm_unlink</b>(2) calls for shared memory regions that will later
+be mapped into memory using <b>mmap</b>(2). QNX's implementation
+of the shared memory functions requires that the name given must begin
+with a slash, and that no other slash may appear in the name.
+<p>In order to comply with those requirements and allow relative pathnames
+to find the same environment, Berkeley DB uses only the last component of the
+home directory path and the name of the shared memory file, separated
+by a colon, as the name specified to the shared memory functions. For
+example, if an application specifies a home directory of
+<b>/home/db/DB_DIR</b>, Berkeley DB will use <b>/DB_DIR:__db.001</b> as
+the name for the shared memory area argument to <b>shm_open</b>(2).
+<p>The impact of this decision is that the last component of all
+environment home directory pathnames on QNX must be unique with respect
+to each other. Additionally, Berkeley DB requires that environments use home
+directories for QNX in order to generate a reasonable entry in the
+shared memory area.
+<p><li><b>What are the implications of QNX's requirement to use
+<b>shm_open</b>(2) in order to use <b>mmap</b>(2)?</b>
+<p>QNX requires that files mapped with <b>mmap</b>(2) be opened using
+<b>shm_open</b>(2). There are other places in addition to the
+environment shared memory regions, where Berkeley DB tries to memory map files
+if it can.
+<p>The memory pool subsystem normally attempts to use <b>mmap</b>(2)
+even when using private memory, as indicated by the <a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a>
+flag to <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>. In the case of QNX, if an application is
+using private memory, Berkeley DB will not attempt to map the memory and will
+instead use the local cache.
+<p><li><b>What are the implications of QNX's mutex implementation using
+microkernel resources?</b>
+<p>On QNX, the primitives implementing mutexes consume system resources.
+Therefore, if an application unexpectedly fails, those resources could
+leak. Berkeley DB solves this problem by always allocating mutexes in the
+persistent shared memory regions. Then, if an application fails,
+running recovery or explicitly removing the database environment by
+calling the <a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a> method will allow Berkeley DB to release those
+previously held mutex resources. If an application specifies the
+<a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flag (choosing not to use persistent shared memory),
+and then fails, mutexes allocated in that private memory may leak their
+underlying system resources. Therefore, the <a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flag
+should be used with caution on QNX.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_unix/osf1.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/sco.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_unix/sco.html b/libdb/docs/ref/build_unix/sco.html
new file mode 100644
index 0000000..4da0503
--- /dev/null
+++ b/libdb/docs/ref/build_unix/sco.html
@@ -0,0 +1,30 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: SCO</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td align=right><a href="../../ref/build_unix/qnx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/solaris.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>SCO</h1>
+<p><ol>
+<p><li><b>If I build with gcc, programs such as db_dump and db_stat core dump
+immediately when invoked.</b>
+<p>We suspect gcc or the runtime loader may have a bug, but we haven't
+tracked it down. If you want to use gcc, we suggest building static
+libraries.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_unix/qnx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/solaris.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_unix/shlib.html b/libdb/docs/ref/build_unix/shlib.html
new file mode 100644
index 0000000..ae3f271
--- /dev/null
+++ b/libdb/docs/ref/build_unix/shlib.html
@@ -0,0 +1,99 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Dynamic shared libraries</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td align=right><a href="../../ref/build_unix/install.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/test.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Dynamic shared libraries</h1>
+<p><b>Warning</b>: the following information is intended to be generic and
+is likely to be correct for most UNIX systems. Unfortunately, dynamic
+shared libraries are not standard between UNIX systems, so there may be
+information here that is not correct for your system. If you have
+problems, consult your compiler and linker manual pages, or your system
+administrator.
+<p>The Berkeley DB dynamic shared libraries are created with the name
+libdb-<b>major</b>.<b>minor</b>.so, where <b>major</b> is the major
+version number and <b>minor</b> is the minor version number. Other
+shared libraries are created if Java and Tcl support are enabled --
+specifically, libdb_java-<b>major</b>.<b>minor</b>.so and
+libdb_tcl-<b>major</b>.<b>minor</b>.so.
+<p>On most UNIX systems, when any shared library is created, the linker
+stamps it with a "SONAME". In the case of Berkeley DB, the SONAME is
+libdb-<b>major</b>.<b>minor</b>.so. It is important to realize that
+applications linked against a shared library remember the SONAMEs of the
+libraries they use and not the underlying names in the filesystem.
+<p>When the Berkeley DB shared library is installed, links are created in the
+install lib directory so that libdb-<b>major</b>.<b>minor</b>.so,
+libdb-<b>major</b>.so, and libdb.so all refer to the same library. This
+library will have an SONAME of libdb-<b>major</b>.<b>minor</b>.so.
+<p>Any previous versions of the Berkeley DB libraries that are present in the
+install directory (such as libdb-2.7.so or libdb-2.so) are left unchanged.
+(Removing or moving old shared libraries is one drastic way to identify
+applications that have been linked against those vintage releases.)
+<p>Once you have installed the Berkeley DB libraries, unless they are installed
+in a directory where the linker normally looks for shared libraries,
+you will need to specify the installation directory as part of compiling
+and linking against Berkeley DB. Consult your system manuals or system
+administrator for ways to specify a shared library directory when
+compiling and linking applications with the Berkeley DB libraries. Many
+systems support environment variables (for example, LD_LIBRARY_PATH or
+LD_RUN_PATH), or system configuration files (for example, /etc/ld.so.conf)
+for this purpose.
+<p><b>Warning</b>: some UNIX installations may have an already existing
+<b>/usr/lib/libdb.so</b>, and this library may be an incompatible
+version of Berkeley DB.
+<p>We recommend that applications link against libdb.so (for example, using
+-ldb). Even though the linker uses the file named libdb.so, the
+executable file for the application remembers the library's SONAME
+(libdb-<b>major</b>.<b>minor</b>.so). This has the effect of
+marking the applications with the versions they need at link time.
+Because applications locate their needed SONAMEs when they are executed,
+all previously linked applications will continue to run using the
+library they were linked with, even when a new version of Berkeley DB is
+installed and the file <b>libdb.so</b> is replaced with a new
+version.
+<p>Applications that know they are using features specific to a particular
+Berkeley DB release can be linked to that release. For example, an application
+wanting to link to Berkeley DB major release "3" can link using -ldb-3, and
+applications that know about a particular minor release number can specify
+both major and minor release numbers; for example, -ldb-3.5.
+<p>If you want to link with Berkeley DB before performing library installation,
+the "make" command will have created a shared library object in the
+<b>.libs</b> subdirectory of the build directory, such as
+<b>build_unix/.libs/libdb-major.minor.so</b>. If you want to link a
+file against this library, with, for example, a major number of "3" and
+a minor number of "5", you should be able to do something like the
+following:
+<p><blockquote><pre>cc -L BUILD_DIRECTORY/.libs -o testprog testprog.o -ldb-3.5
+env LD_LIBRARY_PATH="BUILD_DIRECTORY/.libs:$LD_LIBRARY_PATH" ./testprog</pre></blockquote>
+<p>where <b>BUILD_DIRECTORY</b> is the full directory path to the directory
+where you built Berkeley DB.
+<p>The libtool program (which is configured in the build directory) can be
+used to set the shared library path and run a program. For example,
+the following runs the gdb debugger on the db_dump utility after setting
+the appropriate paths:
+<p><blockquote><pre>libtool gdb db_dump</pre></blockquote>
+<p>Libtool may not know what to do with arbitrary commands (it is hardwired
+to recognize "gdb" and some other commands). If it complains the mode
+argument will usually resolve the problem:
+<p><blockquote><pre>libtool --mode=execute my_debugger db_dump</pre></blockquote>
+<p>On most systems, using libtool in this way is exactly equivalent to
+setting the LD_LIBRARY_PATH environment variable and then executing the
+program. On other systems, using libtool has the virtue of knowing about
+any other details on systems that don't behave in this typical way.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_unix/install.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/test.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_unix/solaris.html b/libdb/docs/ref/build_unix/solaris.html
new file mode 100644
index 0000000..1a3dbde
--- /dev/null
+++ b/libdb/docs/ref/build_unix/solaris.html
@@ -0,0 +1,97 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Solaris</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td align=right><a href="../../ref/build_unix/sco.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/sunos.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Solaris</h1>
+<p><ol>
+<p><li><b>I can't compile and run multithreaded applications.</b>
+<p>Special compile-time flags and additional libraries are required when
+compiling threaded applications on Solaris. If you are compiling a
+threaded application, you must compile with the D_REENTRANT flag and link
+with the libpthread.a or libthread.a libraries:
+<p><blockquote><pre>cc -mt ...
+cc -D_REENTRANT ... -lthread
+cc -D_REENTRANT ... -lpthread</pre></blockquote>
+<p>The Berkeley DB library will automatically build with the correct options.
+<hr size=1 noshade>
+<p><li><b>I've installed gcc on my Solaris system, but configuration
+fails because the compiler doesn't work.</b>
+<p>On some versions of Solaris, there is a cc executable in the user's path,
+but all it does is display an error message and fail:
+<p><blockquote><pre>% which cc
+/usr/ucb/cc
+% cc
+/usr/ucb/cc: language optional software package not installed</pre></blockquote>
+<p>Because Berkeley DB always uses the native compiler in preference to gcc, this
+is a fatal error. If the error message you are seeing is the following,
+then this may be the problem:
+<p><blockquote><pre>checking whether the C compiler (cc -O) works... no
+configure: error: installation or configuration problem: C compiler cannot create executables.</pre></blockquote>
+<p>The simplest workaround is to set your CC environment variable to the
+system compiler and reconfigure; for example:
+<p><blockquote><pre>env CC=gcc ../dist/configure</pre></blockquote>
+<p>If you are using the --configure-cxx option, you may also want to specify
+a C++ compiler, for example the following:
+<p><blockquote><pre>env CC=gcc CCC=g++ ../dist/configure</pre></blockquote>
+<hr size=1 noshade>
+<p><li><b>I see the error
+"libc internal error: _rmutex_unlock: rmutex not held", followed by a core
+dump when running threaded or JAVA programs.</b>
+<p>This is a known bug in Solaris 2.5 and it is fixed by Sun patch 103187-25.
+<hr size=1 noshade>
+<p><li><b>I see error reports of nonexistent files, corrupted metadata
+pages and core dumps.</b>
+<p>Solaris 7 contains a bug in the threading libraries (-lpthread,
+-lthread), which causes the wrong version of the pwrite routine to be
+linked into the application if the thread library is linked in after
+the C library. The result will be that the pwrite function is called
+rather than the pwrite64. To work around the problem, use an explicit
+link order when creating your application.
+<p>Sun Microsystems is tracking this problem with Bug Id's 4291109 and 4267207,
+and patch 106980-09 to Solaris 7 fixes the problem:
+<p><blockquote><pre>Bug Id: 4291109
+Duplicate of: 4267207
+Category: library
+Subcategory: libthread
+State: closed
+Synopsis: pwrite64 mapped to pwrite
+Description:
+When libthread is linked after libc, there is a table of functions in
+libthread that gets "wired into" libc via _libc_threads_interface().
+The table in libthread is wrong in both Solaris 7 and on28_35 for the
+TI_PWRITE64 row (see near the end).</pre></blockquote>
+<hr size=1 noshade>
+<p><li><b>I see corrupted databases when doing hot backups or creating
+a hot failover archive.</b>
+<p>The Solaris cp utility is implemented using the mmap system call, and
+so writes are not blocked when it reads database pages. See
+<a href="../../ref/transapp/reclimit.html">Berkeley DB recoverability</a> for more
+information.
+<hr size=1 noshade>
+<p><li><b>I see errors about "open64" when building C++ applications.</b>
+<p>In some releases of Solaris, include files redefine "open" when big-file
+support (the HAVE_FILE_OFFSET_BITS and _FILE_OFFSET_BITS #defines) is
+enabled. This causes problems when compiling for C++, where "open" is
+a legal identifier used in the Berkeley DB C++ API. To work around this
+problem, avoid including those include files in C++ files which also
+include Berkeley DB include files and call into the Berkeley DB API, or specify the
+<a href="../../ref/build_unix/conf.html#--disable-largefile">--disable-largefile</a> configuration option and then rebuild.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_unix/sco.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/sunos.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_unix/sunos.html b/libdb/docs/ref/build_unix/sunos.html
new file mode 100644
index 0000000..bf045fc
--- /dev/null
+++ b/libdb/docs/ref/build_unix/sunos.html
@@ -0,0 +1,31 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: SunOS</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td align=right><a href="../../ref/build_unix/solaris.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/ultrix.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>SunOS</h1>
+<p><ol>
+<p><li><b>I can't specify the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag to <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>.</b>
+<p>The <b>shmget</b>(2) interfaces are not used on SunOS releases prior
+to 5.0, even though they apparently exist, because the distributed
+include files did not allow them to be compiled. For this reason, it
+will not be possible to specify the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag those
+versions of SunOS.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_unix/solaris.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/ultrix.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_unix/test.html b/libdb/docs/ref/build_unix/test.html
new file mode 100644
index 0000000..99054f4
--- /dev/null
+++ b/libdb/docs/ref/build_unix/test.html
@@ -0,0 +1,53 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Running the test suite under UNIX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td align=right><a href="../../ref/build_unix/shlib.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/notes.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Running the test suite under UNIX</h1>
+<p>The Berkeley DB test suite is built if you specify --enable-test as an
+argument when configuring Berkeley DB. The test suite also requires that you
+configure and build the Tcl API.
+<p>Before running the tests for the first time, you may need to edit the
+<b>include.tcl</b> file in your build directory. The Berkeley DB
+configuration assumes that you intend to use the version of the tclsh
+utility included in the Tcl installation with which Berkeley DB was configured
+to run the test suite, and further assumes that the test suite will be
+run with the libraries prebuilt in the Berkeley DB build directory. If
+either of these assumptions are incorrect, you will need to edit the
+<b>include.tcl</b> file and change the following line to correctly
+specify the full path to the version of tclsh with which you are going
+to run the test suite:
+<p><blockquote><pre>set tclsh_path ...</pre></blockquote>
+<p>You may also need to change the following line to correctly specify the
+path from the directory where you are running the test suite to the
+location of the Berkeley DB Tcl API library you built:
+<p><blockquote><pre>set test_path ...</pre></blockquote>
+<p>It may not be necessary that this be a full path if you have configured
+your system's shared library mechanisms to search the directory where
+you built or installed the Tcl library.
+<p>All Berkeley DB tests are run from within <b>tclsh</b>. After starting tclsh,
+you must source the file <b>test.tcl</b> in the test directory. For
+example, if you built in the <b>build_unix</b> directory of the
+distribution, this would be done using the following command:
+<p><blockquote><pre>% source ../test/test.tcl</pre></blockquote>
+<p>If no errors occur, you should get a "%" prompt.
+<p>You are now ready to run tests in the test suite; see
+<a href="../../ref/test/run.html">Running the test suite</a> for more
+information.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_unix/shlib.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/notes.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_unix/ultrix.html b/libdb/docs/ref/build_unix/ultrix.html
new file mode 100644
index 0000000..2a520e4
--- /dev/null
+++ b/libdb/docs/ref/build_unix/ultrix.html
@@ -0,0 +1,28 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Ultrix</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td align=right><a href="../../ref/build_unix/sunos.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_win/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Ultrix</h1>
+<p><ol>
+<p><li><b>Configuration complains that mmap(2) interfaces aren't being used.</b>
+<p>The <b>mmap</b>(2) interfaces are not used on Ultrix, even though
+they exist, because they are known to not work correctly.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_unix/sunos.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_win/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_vxworks/faq.html b/libdb/docs/ref/build_vxworks/faq.html
new file mode 100644
index 0000000..b91c022
--- /dev/null
+++ b/libdb/docs/ref/build_vxworks/faq.html
@@ -0,0 +1,130 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: VxWorks FAQ</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for VxWorks systems</dl></h3></td>
+<td align=right><a href="../../ref/build_vxworks/notes.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade/version.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>VxWorks FAQ</h1>
+<p><ol>
+<p><li><b>I get the error "Workspace open failed: This project workspace is an
+older format.", when trying to open the supplied workspace on Tornado 2.0
+under Windows.</b>
+<p>This error will occur if the files were extracted in a manner that adds
+a CR/LF to lines in the file. Make sure that you download the Berkeley DB
+".zip" version of the Berkeley DB distribution, and, when extracting the Berkeley DB
+sources, that you use an unzipper program that will not do any
+conversion.
+<p><li><b>I sometimes see spurious output errors about temporary directories.</b>
+<p>These messages are coming from the <b>stat</b>(2) function call
+in VxWorks. Unlike other systems, there may not be a well known
+temporary directory on the target. Therefore, we highly recommend that
+all applications use <a href="../../api_c/env_set_tmp_dir.html">DB_ENV-&gt;set_tmp_dir</a> to
+specify a temporary directory for the application.
+<p><li><b>How can I build Berkeley DB without using Tornado?</b>
+<p>The simplest way to build Berkeley DB without using Tornado is to configure
+Berkeley DB on a UNIX system, and then use the Makefile and include files
+generated by that configuration as the starting point for your build.
+The Makefile and include files are created during configuration, in the
+current directory, based on your configuration decisions (for example,
+debugging vs. non-debugging builds), so you'll need to configure the
+system for the way you want Berkeley DB to be built.
+<p>Additionally, you'll need to account for the slight difference between
+the set of source files used in a UNIX build and the set used in a
+VxWorks build. You can use the following command to create a list of
+the Berkeley DB VxWorks files. The commands assume you are in the build_vxworks
+directory of the Berkeley DB distribution:
+<p><blockquote><pre>% cat &gt; /tmp/files.sed
+s/&lt;BEGIN&gt; FILE_//
+s/_objects//
+^D
+% grep FILE_ BerkeleyDB.wpj | grep _objects | sed -f /tmp/files.sed &gt; /tmp/db.files</pre></blockquote>
+<p>You will then have a template Makefile and include files, and a list of
+VxWorks-specific source files. You will need to convert this Makefile
+and list of files into a form that is acceptable to your specific build
+environment.
+<p><li><b>Does Berkeley DB use floating point registers?</b>
+<p>Yes, there are a few places in Berkeley DB where floating point computations
+are performed. As a result, all applications that call
+<i>taskSpawn</i> should specify the <b>VX_FP_TASK</b> option.
+<p><li><b>Can I run the test suite under VxWorks?</b>
+<p>The test suite requires the Berkeley DB Tcl library. In turn, this library
+requires Tcl 8.1 or greater. In order to run the test suite, you would
+need to port Tcl 8.1 or greater to VxWorks. The Tcl shell included in
+<i>windsh</i> is not adequate for two reasons. First, it is based on
+Tcl 8.0. Second, it does not include the necessary Tcl components for
+adding a Tcl extension.
+<p><li><b>Are all Berkeley DB features available for VxWorks?</b>
+<p>All Berkeley DB features are available for VxWorks with the exception of the
+<a href="../../api_c/db_open.html#DB_TRUNCATE">DB_TRUNCATE</a> flag for <a href="../../api_c/db_open.html">DB-&gt;open</a>. The underlying mechanism
+needed for that flag is not available consistently across different file
+systems for VxWorks.
+<p><li><b>Are there any constraints using particular filesystem drivers?</b>
+<p>There are constraints using the dosFs filesystems with Berkeley DB. Namely,
+you must configure your dosFs filesystem to support long filenames if
+you are using Berkeley DB logging in your application. The VxWorks' dosFs
+1.0 filesystem, by default, uses the old MS-DOS 8.3 file-naming
+constraints, restricting to 8 character filenames with a 3 character
+extension. If you have configured with VxWorks' dosFs 2.0 you should
+be compatible with Windows FAT32 filesystems which supports long
+filenames.
+<p><li><b>Are there any dependencies on particular filesystem drivers?</b>
+<p>There is one dependency on specifics of filesystem drivers in the port
+of Berkeley DB to VxWorks. Berkeley DB synchronizes data using the FIOSYNC function
+to ioctl() (another option would have been to use the FIOFLUSH function
+instead). The FIOSYNC function was chosen because the NFS client driver,
+nfsDrv, only supports it and doesn't support FIOFLUSH. All local file
+systems, as of VxWorks 5.4, support FIOSYNC -- with the exception of
+rt11fsLib, which only supports FIOFLUSH. To use rt11fsLib, you will need
+to modify the os/os_fsync.c file to use the FIOFLUSH function; note that
+rt11fsLib cannot work with NFS clients.
+<p><li><b>Are there any known filesystem problems?</b>
+<p>During the course of our internal testing, we came across two problems
+with the dosFs 2.0 filesystem that warranted patches from Wind River Systems.
+You should ask Wind River Systems for the patches to these
+problems if you encounter them.
+<p>The first problem is that files will seem to disappear. You should
+look at <b>SPR 31480</b> in the Wind River Systems' Support pages for
+a more detailed description of this problem.
+<p>The second problem is a semaphore deadlock within the dosFs filesystem
+code. Looking at a stack trace via CrossWind, you will see two or more of
+your application's tasks waiting in semaphore code within dosFs. The patch
+for this problem is under <b>SPR 33221</b> at Wind River Systems.
+There are several SPR numbers at Wind River Systems that refer to this
+particular problem.
+<p><li><b>Are there any filesystems I cannot use?</b>
+<p>The Target Server File System (TSFS) uses the netDrv driver. This driver
+does not support any ioctl that allows flushing to the disk, and therefore
+cannot be used with Berkeley DB.
+<p><li><b>What VxWorks primitives are used for mutual exclusion in Berkeley DB?</b>
+<p>Mutexes inside of Berkeley DB use the basic binary semaphores in VxWorks. The
+mutexes are created using the FIFO queue type.
+<p><li><b>What are the implications of VxWorks' mutex implementation
+using microkernel resources?</b>
+<p>On VxWorks, the semaphore primitives implementing mutexes consume system
+resources. Therefore, if an application unexpectedly fails, those
+resources could leak. Berkeley DB solves this problem by always allocating
+mutexes in the persistent shared memory regions. Then, if an
+application fails, running recovery or explicitly removing the database
+environment by calling the <a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a> method will allow Berkeley DB to
+release those previously held mutex resources. If an application
+specifies the <a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flag (choosing not to use persistent
+shared memory), and then fails, mutexes allocated in that private memory
+may leak their underlying system resources. Therefore, the
+<a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flag should be used with caution on VxWorks.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_vxworks/notes.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade/version.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_vxworks/intro.html b/libdb/docs/ref/build_vxworks/intro.html
new file mode 100644
index 0000000..d1b8b95
--- /dev/null
+++ b/libdb/docs/ref/build_vxworks/intro.html
@@ -0,0 +1,103 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Building for VxWorks</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for VxWorks systems</dl></h3></td>
+<td align=right><a href="../../ref/build_win/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_vxworks/introae.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Building for VxWorks</h1>
+<p>The build_vxworks directory in the Berkeley DB distribution contains a workspace
+and project files for Tornado 2.0. The Berkeley DB distribution also contains
+component files for Tornado 3.1/VxWorks AE. See
+<a href="../../ref/build_vxworks/introae.html">Building for VxWorks AE</a> for
+information about VxWorks AE.
+<p><table border=1 align=center>
+<tr><th>File</th><th>Description</th></tr>
+<tr> <td align=left>Berkeley DB.wsp</td> <td align=left>Berkeley DB Workspace file</td> </tr>
+<tr> <td align=left>Berkeley DB.wpj</td> <td align=left>Berkeley DB Project file</td> </tr>
+<tr> <td align=left>dbdemo/dbdemo.wpj</td> <td align=left><a href="../../ref/build_vxworks/notes.html">Demo program</a> project file</td> </tr>
+<tr> <td align=left>db_*/*.wpj</td> <td align=left><a href="../../ref/build_vxworks/notes.html">Support utilities</a> project files</td> </tr>
+</table>
+<h3>Building With Tornado 2.0</h3>
+<p>Open the workspace <b>Berkeley DB.wsp</b>. The list of projects
+in this workspace will be shown. These projects were created for
+the x86 BSP for VxWorks.
+<p>The remainder of this document assumes that you already have a
+VxWorks target and a target server, both up and running. It also
+assumes that your VxWorks image is configured properly for your
+needs. It also assumes that you
+have an acceptable file system already available. See
+<a href="../../ref/build_vxworks/faq.html">VxWorks FAQ</a> for more
+information about file system requirements.
+<p>First, you need to set the include directories. To do this, go to the
+<i>Builds</i> tab for the workspace. Open up <i>Berkeley DB
+Builds</i>. You will see several different builds, containing different
+configurations. All of the projects in the Berkeley DB workspace are created
+to be downloadable applications.
+<p><table border=1 align=center>
+<tr><th>Build</th><th>Description</th></tr>
+<tr> <td align=left>PENTIUM_debug</td> <td align=left>x86 BSP with debugging</td> </tr>
+<tr> <td align=left>PENTIUM_nodebug</td> <td align=left>x86 BSP no debugging</td> </tr>
+</table>
+<p>You have to add a new build specification if you use a
+different BSP, want to add a build for the simulator or
+want to customize further. For instance, if you have the Power PC (PPC)
+BSP, you need to add a new build for the PPC tool chain. To do so,
+select the "Builds" tab, select the Berkeley DB project name, and right-click.
+Choose the <i>New Build...</i> selection and create the new build
+target. For your new build target, you need to decide whether
+it should be built for
+debugging. See the properties of the Pentium builds for ways to
+configure for each case. After you add this build you, you still need
+to configure correctly the include directories, as described in the
+sections that follow.
+<p>If you are running with a different
+BSP, you should remove the build specifications that do not apply to
+your hardware. We recommend that you do this after you configure any
+new build specifications first. The Tornado tools will get confused if
+you have a PENTIUMgnu build specification for a PPC BSP,
+for instance.
+<p>Select the build you are interested in, and right-click. Choose the
+<i>Properties...</i> selection. At this point, a tabbed dialog
+should appear. In this new window, choose the <i>C/C++ compiler</i>
+tab. In the edit box, you need to modify the full pathname of the
+<i>build_vxworks</i> subdirectory of Berkeley DB, followed by the full
+pathname of the <i>include</i> and <i>include_auto</i>
+subdirectories of Berkeley DB. Then, click OK.
+<p>Note that some versions of Tornado (such as the version for Windows)
+do not correctly handle relative pathnames in the include paths.
+If you get an error about an inability to find header files
+you should edit the <i>include</i> and <i>include_auto</i>
+pathnames to remove the <i>build_vxworks/..</i> relative
+portion. Then you should rebuild your dependencies.
+<p>To build and download the Berkeley DB downloadable application for the first time
+requires several steps:
+<p><ol>
+<p><li>Select the build you are interested in, and right-click. Choose the
+<i>Set... as Active Build</i> selection.
+<p><li>Select the build you are interested in, and right-click. Choose the
+<i>Dependencies...</i> selection. Run dependencies over all files
+in the Berkeley DB project.
+<p><li>Select the build you are interested in, and right-click. Choose the
+<i>Rebuild All (Berkeley DB.out)</i> selection.
+<p><li>Select the Berkeley DB project name, and right-click. Choose the
+<i>Download "Berkeley DB.out"</i> selection.
+</ol>
+<p>You need to repeat this procedure for all builds you are interested in
+building, as well as for all of the utility project builds you want to
+run.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_win/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_vxworks/introae.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_vxworks/introae.html b/libdb/docs/ref/build_vxworks/introae.html
new file mode 100644
index 0000000..2905651
--- /dev/null
+++ b/libdb/docs/ref/build_vxworks/introae.html
@@ -0,0 +1,135 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Building for VxWorks AE</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for VxWorks systems</dl></h3></td>
+<td align=right><a href="../../ref/build_vxworks/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_vxworks/notes.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Building for VxWorks AE</h1>
+<p>The build_vxworks directory in the Berkeley DB distribution contains component
+files for Tornado 3.1. The Berkeley DB distribution also contains workspace
+and project files for Tornado 2.0. See
+<a href="../../ref/build_vxworks/intro.html">Building for VxWorks</a> for
+information about Tornado 2.0/VxWorks 5.4.
+The VxWorks AE component files are all within subdirectories, and
+all component files are named <i>component.wpj</i>.
+<p><table border=1 align=center>
+<tr><th>File</th><th>Description</th></tr>
+<tr> <td align=left>Berkeley DB/</td> <td align=left>Berkeley DB component directory</td> </tr>
+<tr> <td align=left>dbdemo/dbdemo</td> <td align=left><a href="../../ref/build_vxworks/notes.html">Demo program</a> component directory</td> </tr>
+<tr> <td align=left>db_*/db_*</td> <td align=left><a href="../../ref/build_vxworks/notes.html">Support utilities</a> component directories</td> </tr>
+</table>
+<h3>Building With Tornado 3.1</h3>
+<p>This document assumes you already have a workspace set up and you
+want to add Berkeley DB as a component in that workspace. You may include
+this component in any domain deemed appropriate for your application.
+<p>To add the Berkeley DB component into your workspace, right-click on the
+"Components" and choose <i>Add existing...</i>. Enter
+the pathname to your Berkeley DB distribution in the form
+<i>&lt;pathname to your Berkeley DB distribution&gt;</i>
+<b>/build_vxworks/Berkeley DB/</b>.
+You will see <b>component.wpj</b> listed under the "Files".
+Choose that filename and click "Ok". You will now have
+a Berkeley DB component in your workspace.
+<p>There are essentially three options regarding protection
+domains and the Berkeley DB component. The first option is to add
+the Berkeley DB component directly into your application domain. You may
+choose to do this by downloading Berkeley DB into that domain on your
+target, or by adding the component to the domain itself and it will
+be built when the application domain is built. The disadvantage
+of this option is that no other application domain will have access to
+the Berkeley DB interfaces.
+<p>The second option is to add the Berkeley DB component directly into your
+<i>vxKernel</i> domain. The advantage is that any application
+using the Berkeley DB interface would have access to them and no changes
+would be necessary to the linkage path. The disadvantage is that
+all Berkeley DB code would run with system privileges.
+<p>The third option is to add a Berkeley DB shared library domain to your
+system. Then add or download the Berkeley DB component to that shared
+library domain. The advantage is that all application domains
+using the Berkeley DB interfaces can access a single copy of the library
+running in user mode. The disadvantages are that one must
+remember to add the Berkeley DB shared library domain to the linkage
+path of every application domain using Berkeley DB and that shared library
+domains may not link against one another, a consideration if the
+application using Berkeley DB is itself a shared library.
+<p>We believe the options outlined above are the most common methods
+that the Berkeley DB component will be used. We believe that the third
+option, creating a shared library domain, is the most useful option.
+Ultimately, the responsibility of choosing the correct mechanism
+for including the Berkeley DB component into the appropriate domain falls
+to the application developer.
+<p>The remainder of this document assumes that you already have a
+VxWorks AE target and a target server, both up and running. It also
+assumes that your VxWorks AE image is configured properly for your
+needs. It also assumes that you
+have an acceptable file system already available. See
+<a href="../../ref/build_vxworks/faq.html">VxWorks FAQ</a> for more
+information about file system requirements.
+<p>To build Berkeley DB, first, you need to set the build selection. To do
+this, right-click on the Berkeley DB component name and choose the
+<i>Build settings...</i> selection. If you look at the
+<i>Active Build Specification</i> drop down list, you will see
+several different builds, containing different configurations.
+<p><table border=1 align=center>
+<tr><th>Build</th><th>Description</th></tr>
+<tr> <td align=left>PENTIUM2gnu.debug</td> <td align=left>PII BSP with debugging</td> </tr>
+<tr> <td align=left>PENTIUM2gnu.release</td> <td align=left>PII BSP no debugging</td> </tr>
+</table>
+<p>You have to add a new build specification if you use a
+different BSP, want to add a build for the simulator or
+want to customize further. For instance, if you have the Power PC (PPC)
+BSP, you need to add a new build for the PPC tool chain. To do so,
+select the "Add..." button in the <i>Build Settings</i> window.
+A new window will appear giving you a list of all the BSPs
+you have available from which to choose.
+For your new build target, you need to decide whether it should be built for
+debugging. See the <i>C/C++ compiler</i> tab of the Pentium
+builds for ways to
+configure for each case. After you add this build, you still need
+to configure the include directories correctly, as described in the
+sections that follow.
+<p>If you are running with a different
+BSP, you should remove the build specifications that do not apply to
+your hardware. We recommend that you do this after you configure any
+new build specifications first.
+<p>If you are adding a new build you must set the include directories
+correctly. After you have added the new build in the "Build Settings"
+window, click on the <i>C/C++ compiler</i> tab.
+In the edit box, you need to add the pathname of the
+<i>build_vxworks</i> subdirectory of Berkeley DB, followed by the
+pathname of the <i>include</i> and <i>include_auto</i>
+subdirectories of Berkeley DB. You should add these directories
+relative to the project directory, using the <b>PRJ_DIR</b>
+macro, which is the Berkeley DB subdirectory of <i>build_vxworks</i>.
+Then, click OK. The typical addition of include
+directories will look like:
+<p><blockquote><pre>-I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto</pre></blockquote>
+<p>To build and download the Berkeley DB downloadable application for the first time
+requires several steps:
+<p><ol>
+<p><li>Select the build you are interested in using the <i>Build Settings</i>
+window. Click OK when done.
+<p><li>Select the Berkeley DB component and right-click. Choose the
+<i>Clean Build</i> selection.
+<p><li>Select the Berkeley DB component and right-click. Choose the
+<i>Download...</i> selection.
+</ol>
+<p>You need to repeat this procedure for all builds you are interested in
+building, as well as for all of the utility project builds you want to
+run.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_vxworks/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_vxworks/notes.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_vxworks/notes.html b/libdb/docs/ref/build_vxworks/notes.html
new file mode 100644
index 0000000..abd321b
--- /dev/null
+++ b/libdb/docs/ref/build_vxworks/notes.html
@@ -0,0 +1,70 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: VxWorks notes</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for VxWorks systems</dl></h3></td>
+<td align=right><a href="../../ref/build_vxworks/introae.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_vxworks/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>VxWorks notes</h1>
+<p>Berkeley DB currently disallows the DB_TRUNC flag to <a href="../../api_c/db_open.html">DB-&gt;open</a>. The
+operations that this flag represents are not fully supported under
+VxWorks.
+<p>The <a href="../../api_c/db_sync.html">DB-&gt;sync</a> function is implemented using an ioctl call into
+the file system driver with the FIOSYNC command. Most, but not all file
+system drivers support this call. Berkeley DB requires the use of a file
+system supporting FIOSYNC.
+<h3>Building and Running the Demo Program</h3>
+<p>The demo program should be built in a manner very similar to
+building Berkeley DB. If you want different or additional BSP
+build specifications you should add them by following the
+directions indicated in <a href="../../ref/build_vxworks/intro.html">Building
+with Tornado 2.0</a> or <a href="../../ref/build_vxworks/introae.html">Building
+with Tornado 3.1</a>.
+<p>The demo program can be downloaded and run by calling the entry function
+<b>dbdemo</b> with the pathname of a database to use. The demo
+program will ask for some input keys. It creates a database and adds
+those keys into the database, using the reverse of the key as the data
+value. When complete you can either enter EOF (control-D) or
+<b>quit</b> and the demo program will display all of the key/data
+items in the database.
+<h3>Building and Running the Utility Programs</h3>
+<p>The Berkeley DB <a href="../../utility/index.html">utility programs</a>
+can be downloaded and run by calling the
+function equivalent to the utility's name. The utility functions take
+a string containing all the supported arguments. The program will then
+decompose that string into a traditional argc/argv used internally.
+For example, to execute <a href="../../utility/db_stat.html">db_stat</a> on a database within an
+environment you would execute the following from the windsh prompt.
+Obviously you would change the pathname and database name to reflect
+your system.
+<p><blockquote><pre>-&gt; db_stat "-h /tmp/myenvhome -d mydatabase.db"</pre></blockquote>
+<h3>Notes for VxWorks 5.4</h3>
+<p>The memory on VxWorks is always resident and fully shared among all
+tasks running on the target. For this reason, the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a>
+flag is implied for any application that does not specify the
+<a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flag. Additionally, applications must use a segment
+ID to ensure that different applications do not overwrite each other's
+database environments. See the <a href="../../api_c/env_set_shm_key.html">DB_ENV-&gt;set_shm_key</a> method for more
+information. Also, the <a href="../../api_c/env_open.html#DB_LOCKDOWN">DB_LOCKDOWN</a> flag has no effect.
+<h3>Notes for VxWorks AE 1.1</h3>
+<p>All tasks wishing to access a particular environment must run in
+the same application domain. The memory regions used by the
+environment are only accessible to the application domain.
+If more than one application domain attempts to access an
+environment simultaneously, the results are undefined but will
+likely lead to corruption.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_vxworks/introae.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_vxworks/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_win/faq.html b/libdb/docs/ref/build_win/faq.html
new file mode 100644
index 0000000..5f0d32e
--- /dev/null
+++ b/libdb/docs/ref/build_win/faq.html
@@ -0,0 +1,60 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Windows FAQ</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for Windows systems</dl></h3></td>
+<td align=right><a href="../../ref/build_win/notes.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_vxworks/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Windows FAQ</h1>
+<p><ol>
+<p><li><b>My Win* C/C++ application crashes in the Berkeley DB library when Berkeley DB calls
+fprintf (or some other standard C library function).</b>
+<p>You should be using the "Debug Multithreaded DLL" compiler option in
+your application when you link with the
+build_win32/Debug/libdb41d.lib library (this .lib file
+is actually a stub for libdb41d.DLL). To check this
+setting in Visual C++, choose the <i>Project/Settings</i> menu
+item and select <i>Code Generation</i> under the tab marked
+<i>C/C++</i>; and see the box marked <i>Use runtime
+library</i>. This should be set to <i>Debug Multithreaded DLL</i>.
+If your application is linked against the static library,
+build_win32/Debug/libdb41sd.lib; then, you will want
+to set <i>Use runtime library</i> to <i>Debug Multithreaded</i>.
+<p>Setting this option incorrectly can cause multiple versions of the
+standard libraries to be linked into your application (one on behalf
+of your application, and one on behalf of the Berkeley DB library). That
+violates assumptions made by these libraries, and traps can result.
+<p><li><b>Why are the build options for DB_DLL marked as "Use MFC in a Shared DLL"?
+Does Berkeley DB use MFC?</b>
+<p>Berkeley DB does not use MFC at all. It does however, call malloc and free
+and other facilities provided by the Microsoft C runtime library. We
+found in our work that many applications and libraries are built
+assuming MFC, and specifying this for Berkeley DB solves various
+interoperation issues, and guarantees that the right runtime libraries
+are selected. Note that because we do not use MFC facilities, the MFC
+library DLL is not marked as a dependency for libdb.dll, but the
+appropriate Microsoft C runtime is.
+<p><li><b>The test suite hangs under Windows.</b>
+<p>There are bugs in some versions of Tcl that may cause the test suite to
+hang on Windows (specifically, we've seen hangs on Windows/NT 4.0). Tcl
+version 8.4 (currently available as an alpha release) has fixed the
+problem, or there are patches available for Tcl 8.3.2 (see bug #119188
+in the Tcl SourceForge database). Note that if you want to run the test
+suite against a Debug version of Berkeley DB, you need to build a Debug
+version of Tcl. This involves building Tcl from its source.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_win/notes.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_vxworks/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_win/intro.html b/libdb/docs/ref/build_win/intro.html
new file mode 100644
index 0000000..2b61153
--- /dev/null
+++ b/libdb/docs/ref/build_win/intro.html
@@ -0,0 +1,159 @@
+<!--"@(#)intro.so 10.26 (Sleepycat) 11/18/99"-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Building for Win32</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for Windows systems</dl></h3></td>
+<td align=right><a href="../../ref/build_unix/ultrix.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_win/test.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Building for Win32</h1>
+<p>The build_win32 directory in the Berkeley DB distribution contains project files
+for Microsoft Visual C++:
+<p><table border=1 align=center>
+<tr><th>Project File</th><th>Description</th></tr>
+<tr> <td align=center>Berkeley_DB.dsw</td> <td align=center>Visual C++ 6.0 workspace</td> </tr>
+<tr> <td align=center>*.dsp</td> <td align=center>Visual C++ 6.0 projects</td> </tr>
+</table>
+<p>These project files can be used to build Berkeley DB for any Win32 platform:
+Windows/XP, Windows/2000, Windows/NT, Windows/98 and Windows/95.
+<h3>Building Berkeley DB with Visual C++</h3>
+<p>Open the file <b>Berkeley_DB.dsw</b>. This workspace includes a number
+of subprojects needed to build Berkeley DB. To do this in Visual C++ .NET,
+choose <i>Open Solution</i> from the <i>File</i> menu. Then
+choose <i>Compatible Workspace Files</i> under <i>Files of
+type</i>. After you select <b>Berkeley_DB.dsw</b>, you will be prompted to
+upgrade the project files. Choose <i>Yes to All</i>.
+<p>First, you'll need to set the include directories. To do this, select
+<i>Options...</i> from the <i>Tools</i> pull-down menu. In Visual
+C++ 6.0, a tabbed dialog should appear. Choose the <i>Directories</i>
+tab in the dialog, and for the <i>Platform</i>, select
+<i>Win32</i>. In Visual C++ .NET, a different window appears. Choose
+<i>Projects</i>, then <i>VC++ Directories</i>.
+<p>In either case, choose <i>Include files</i> under <i>Show
+directories for</i>. You should then add two directories to the list of
+directories: the full pathname of the <i>build_win32</i> subdirectory
+of Berkeley DB, followed by the full pathname of the <i>dbinc</i>
+subdirectory of Berkeley DB. Then click OK.
+<p>In Visual C++ 6.0, select <i>Active Project Configuration</i> under
+the <i>Build</i> pull-down menu. For a debug version of the
+libraries, tools, and examples, select <i>build_all -- Win32
+Debug</i>. Results from this build are put into <b>build_win32/Debug</b>.
+For a release version, select <i>build_all -- Win32 Release</i>;
+results are put into <b>build_win32/Release</b>. For a debug version
+that has all tools and examples built with static libraries, select
+<i>build_all -- Win32 Debug Static</i>; results are put into
+<b>build_win32/Debug_static</b>. For a release version of the same,
+select <i>build_all -- Win32 Release Static</i>; results are put into
+<b>build_win32/Release_static</b>. Finally, to build, select
+<i>Build build_all.exe</i> under the <i>Build</i> pull-down
+menu.
+<p>In Visual C++ .NET, choose the configuration from the drop down list on
+the tool bar (<i>Debug</i>, <i>Release</i>, <i>Debug
+Static</i> or <i>Release Static</i>). Then, to build, right-click on
+<i>build_all</i> and choose <i>Build</i>.
+<p>When building your application, you should normally use compile options
+"debug multithreaded dll" and link against
+<b>build_win32/Debug/libdb41d.lib</b>. If you want to
+link against a static (non-DLL) version of the library, use the "debug
+multithreaded" compile options and link against
+<b>build_win32/Debug_static/libdb41sd.lib</b>. You
+can also build using a release version of the libraries and tools, which
+will be placed in
+<b>build_win32/Release/libdb41.lib</b>. The static
+version will be in
+<b>build_win32/Release_static/libdb41s.lib</b>.
+<p>Each release of Berkeley DB is built and tested with this procedure using
+Microsoft Visual C++ 6.0, Standard Version and Microsoft Visual C++ .NET,
+Standard Version.
+<h3>Building the C++ API</h3>
+<p>C++ support is built automatically on Win32.
+<h3>Building the Java API</h3>
+<p>Java support is not built automatically. The following instructions
+assume that you have installed the Sun Java Development Kit in
+<b>d:/java</b>. Of course, if you installed elsewhere or have different
+Java software, you will need to adjust the pathnames accordingly. First,
+use the previous instructions to open the Tools/Options window for adding
+include directories. In addition to the directories specified previously,
+add <b>d:/java/include</b> and <b>d:/java/include/win32</b>. These
+are the directories needed when including <b>jni.h</b>. Now, before
+clicking OK, choose <i>Executable files</i> under <i>Show
+directories for</i>. Add <b>d:/java/bin</b>. That directory is needed to
+find javac. Now select OK.
+<p>In Visual C++ 6.0, select <i>Active Project Configuration</i> under
+the <i>Build</i> pull-down menu. Choose <i>db_java -- Win32
+Release</i>. To build, select <i>Build
+libdb_java41.dll</i> under the <i>Build</i> pull-down
+menu. This builds the Java support library for Berkeley DB and compiles all the
+java files, placing the resulting <b>db.jar</b> and
+<b>dbexamples.jar</b> files in the <b>build_win32/Release</b>
+subdirectory of Berkeley DB.
+<p>In Visual C++ .NET, set the build type to <i>Release</i> in the drop
+down list on the toolbar, then right-click on <i>db_java</i> and
+choose <i>Build</i>.
+<p>To run Java code, set your environment variable <b>CLASSPATH</b> to
+include the full pathname of these jar files, and your environment
+variable <b>PATH</b> to include the <b>build_win32/Release</b>
+subdirectory. On Windows, remember that files or directories in the
+<b>CLASSPATH</b> and <b>PATH</b> variables must be separated by
+semicolons (unlike UNIX). Then, try running the following command as a
+test:
+<p><blockquote><pre>java com.sleepycat.examples.AccessExample</pre></blockquote>
+<p>If you want to run Java code using a Debug build, it is slightly more
+complicated. Make sure you build the Debug version of <i>db_java</i>
+instead of the Release version. Also make sure that your <b>PATH</b>
+contains <b>build_win32/Debug</b>. Then run the following (as one
+command):
+<p><blockquote><pre>java -dsleepycat.db.libname=libdb_java41d com.sleepycat.examples.AccessExample</pre></blockquote>
+<h3>Building the Tcl API</h3>
+<p>Tcl support is not built automatically. See
+<a href="../../ref/tcl/intro.html">Loading Berkeley DB with Tcl</a> for information on
+sites from which you can download Tcl and which Tcl versions are
+compatible with Berkeley DB.
+<p>The Tcl library must be built as the same build type as the Berkeley DB library
+(both Release or both Debug). We found that the binary release of Tcl can
+be used with the Release configuration of Berkeley DB, but you will need to need
+to build Tcl from sources for the Debug configuration. Before building
+Tcl, you will need to modify its makefile to make sure that you are
+building a debug version, including thread support. This is because the
+set of DLLs linked into the Tcl executable must match the corresponding
+set of DLLs used by Berkeley DB.
+<p>These notes assume that Tcl is installed as <b>d:/tcl</b>, but you can
+change that if you want. If you run using a version of Tcl different from
+the one currently being used by Sleepycat Software, you will need to
+change the name of the Tcl library used in the build (for example,
+<b>tcl83d.lib</b>) to the appropriate name. See
+Projects-&gt;Settings-&gt;Link in the db_tcl subproject.
+<p>Use the previous instructions for Visual C++ to open the
+<i>Tools/Options</i> window for adding include directories. In
+addition to the directories specified previously, add
+<b>d:/tcl/include</b>. This is the directory that contains
+<b>tcl.h</b>. Then, in that same window, choose <i>Library
+Files</i> under <i>Show directories for</i>. Add <b>d:/tcl/lib</b> (or
+whatever directory contains <b>tcl83d.lib</b> in your distribution) to
+the list. Now, select OK.
+<p>In Visual C++ 6.0, select <i>Active Project Configuration</i> under
+the <i>Build</i> pull-down menu. Choose <i>db_tcl -- Win32
+Release</i>. To build, select <i>Build
+libdb_tcl41.dll</i> under the <i>Build</i> pull-down
+menu. This builds the Tcl support library for Berkeley DB, placing the result
+into <b>build_win32/Release/libdb_tcl41.dll</b>.
+Selecting an Active Configuration of <i>db_tcl -- Win32 Debug</i> will
+build a debug version, placing the result into
+<b>build_win32/Debug/libdb_tcl41d.dll</b>.
+<p>In Visual C++ .NET, choose the build type (<i>Debug</i> or
+<i>Release</i>) from the drop down list on the toolbar, then
+right-click on <i>db_tcl</i> and choose <i>Build</i>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_unix/ultrix.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_win/test.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_win/notes.html b/libdb/docs/ref/build_win/notes.html
new file mode 100644
index 0000000..b98656c
--- /dev/null
+++ b/libdb/docs/ref/build_win/notes.html
@@ -0,0 +1,60 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Windows notes</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for Windows systems</dl></h3></td>
+<td align=right><a href="../../ref/build_win/test.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_win/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Windows notes</h1>
+<p><ol>
+<p><li>Various Berkeley DB interfaces take a <b>mode</b> argument, which is intended
+to specify the underlying file permissions for created files. Berkeley DB
+currently ignores this argument on Windows systems.
+<p>It would be possible to construct a set of security attributes to pass to
+<b>CreateFile</b> that accurately represents the mode. In the worst
+case, this would involve looking up user and all group names, and creating
+an entry for each. Alternatively, we could call the <b>_chmod</b>
+(partial emulation) function after file creation, although this leaves us
+with an obvious race.
+<p>Practically speaking, however, these efforts would be largely meaningless
+on FAT, the most common file system, which only has a "readable" and
+"writable" flag, applying to all users.
+<p><li>On Windows/9X, files opened by multiple processes do not share data
+correctly. For this reason, the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag is implied
+for any application that does not specify the <a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flag,
+causing the system paging file to be used for sharing data.
+<p>On all Windows platforms, system paging file memory is freed on last
+close. For this reason, multiple processes sharing a database
+environment created using the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag must arrange
+for at least one process to always have the environment open, or
+alternatively that any process joining the environment be prepared to
+re-create it. If a shared environment is closed by all processes, a
+subsequent open without specifying the <a href="../../api_c/env_open.html#DB_CREATE">DB_CREATE</a> flag will
+return an error. Further, if a shared environment that supports
+transactions is closed by all processes, recovery must be run by the
+next process to open the environment or data corruption may occur.
+<p>When using the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag, Berkeley DB shared regions are
+created without ACLs, which means that the regions are only accessible
+to a single user. If wider sharing is appropriate (for example, both
+user applications and Windows/NT service applications need to access
+the Berkeley DB regions), the Berkeley DB code will need to be modified to create
+the shared regions with the correct ACLs. Alternatively, by not
+specifying the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag, filesystem-backed regions
+will be created instead, and the permissions on those files may be
+directly specified through the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> interface.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_win/test.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_win/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/build_win/test.html b/libdb/docs/ref/build_win/test.html
new file mode 100644
index 0000000..c1581ee
--- /dev/null
+++ b/libdb/docs/ref/build_win/test.html
@@ -0,0 +1,75 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Running the test suite under Windows</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for Windows systems</dl></h3></td>
+<td align=right><a href="../../ref/build_win/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_win/notes.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Running the test suite under Windows</h1>
+<p>To build the test suite on Win32 platforms, you will need to configure
+Tcl support. You will also need sufficient main memory (at least 64MB),
+and disk (around 250MB of disk will be sufficient).
+<h3>Building the software needed by the tests</h3>
+<p>To build for testing, perform the following steps.
+<p><ol>
+<p><li>Open the <b>build_win32/Berkeley_DB.dsw</b> workspace.
+<p><li>Build the Tcl API (see <a href="../../ref/build_win/intro.html">Building the Tcl API</a> for details).
+<p><li>In Visual C++ 6.0, set the active configuration to db_test --
+Debug. To set an active configuration, under the <i>Build</i> menu,
+select <i>Set Active Configuration</i>. Then choose <i>db_test
+-- Debug</i>. In Visual C++ .NET, just make sure <i>Debug</i> is
+selected in the drop down list on the tool bar.
+<p><li>Build. In Visual C++ 6.0, the IDE menu item for this is called
+"build dbkill.exe", even though dbkill is just one of the things that is
+built. In Visual C++ .NET, right-click on the <i>db_test</i> project
+and select <i>Build</i>. This step makes sure that the base Berkeley DB
+.dll, tcl support, and various tools that are needed by the test suite are
+all built.
+</ol>
+<p>Note that if you want to run the test suite against a Debug version of
+Berkeley DB, you need to build a Debug version of Tcl. This involves building
+Tcl from its source.
+<h3>Running the test suite under Windows</h3>
+<p>Before running the tests for the first time, you must edit the file
+<b>include.tcl</b> in your build directory and change the line
+that reads:
+<p><blockquote><pre>set tclsh_path SET_YOUR_TCLSH_PATH</pre></blockquote>
+<p>You will want to use the location of the <b>tclsh</b> program (be
+sure to include the name of the executable). For example, if Tcl is
+installed in <b>d:/tcl</b>, this line should be the following:
+<p><blockquote><pre>set tclsh_path d:/tcl/bin/tclsh83d.exe</pre></blockquote>
+<p>If your path includes spaces be sure to enclose it in quotes:
+<p><blockquote><pre>set tclsh_path "c:/Program Files/tcl/bin/tclsh83d.exe"</pre></blockquote>
+<p>If you run the test suite from the command prompt make sure that the
+path to Berkeley DB's tcl library is in your current path. On Windows NT/2000/XP,
+edit your PATH using the My Computer -&gt; Properties -&gt; Advanced
+-&gt; Environment Variables dialog. On earlier versions of Windows, you
+may find it convenient to add a line to c:\AUTOEXEC.BAT:
+<p><blockquote><pre>SET PATH=%PATH%;c:\db\build_win32\Debug</pre></blockquote>
+<p>Then, in a shell of your choice enter the following commands:
+<p><ol>
+<p><li>cd build_win32
+<p><li>run <b>d:/tcl/bin/tclsh83d.exe</b>, or the equivalent name of
+the Tcl shell for your distribution.
+<p>You should get a "%" prompt.
+<p><li>% source ../test/test.tcl.
+<p>If no errors occur, you should get a "%" prompt.
+</ol>
+<p>You are now ready to run tests in the test suite; see
+<a href="../../ref/test/run.html">Running the test suite</a> for more
+information.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_win/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_win/notes.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/cam/intro.html b/libdb/docs/ref/cam/intro.html
new file mode 100644
index 0000000..b596bb9
--- /dev/null
+++ b/libdb/docs/ref/cam/intro.html
@@ -0,0 +1,108 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Berkeley DB Concurrent Data Store applications</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Concurrent Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/env/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Berkeley DB Concurrent Data Store applications</h1>
+<p>It is often desirable to have concurrent read-write access to a database
+when there is no need for full recoverability or transaction semantics.
+For this class of applications, Berkeley DB provides an interface supporting
+deadlock-free, multiple-reader/single writer access to the database.
+This means that at any instant in time, there may be either multiple
+readers accessing data or a single writer modifying data. The
+application is entirely unaware of which is happening, and Berkeley DB
+implements the necessary locking and blocking to ensure this behavior.
+<p>To create Berkeley DB Concurrent Data Store applications, you must first initialize an environment
+by calling <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>. You must specify the <a href="../../api_c/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a>
+and <a href="../../api_c/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a> flags to that interface. It is an error to
+specify any of the other <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> subsystem or recovery
+configuration flags, for example, <a href="../../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a>,
+<a href="../../api_c/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a>, or <a href="../../api_c/env_open.html#DB_RECOVER">DB_RECOVER</a>. All databases must, of
+course, be created in this environment by using the <a href="../../api_c/db_create.html">db_create</a>
+interface or <a href="../../api_cxx/db_class.html">Db</a> constructor, and specifying the environment
+as an argument.
+<p>Berkeley DB performs appropriate locking in its interface so that safe
+enforcement of the deadlock-free, multiple-reader/single-writer semantic
+is transparent to the application. However, a basic understanding of
+Berkeley DB Concurrent Data Store locking behavior is helpful when writing Berkeley DB Concurrent Data Store applications.
+<p>Berkeley DB Concurrent Data Store
+avoids deadlocks without the need for a deadlock detector by performing
+all locking on an entire database at once (or on an entire environment
+in the case of the <a href="../../api_c/env_set_flags.html#DB_CDB_ALLDB">DB_CDB_ALLDB</a> flag), and by ensuring that at
+any given time only one thread of control is allowed to simultaneously
+hold a read (shared) lock and attempt to acquire a write (exclusive)
+lock.
+<p>All open Berkeley DB cursors hold a read lock, which serves as a guarantee
+that the database will not change beneath them; likewise, all
+non-cursor <a href="../../api_c/db_get.html">DB-&gt;get</a> operations temporarily acquire and release
+a read lock that is held during the actual traversal of the database.
+Because read locks will not conflict with each other, any number of
+cursors in any number of threads of control may be open simultaneously,
+and any number of <a href="../../api_c/db_get.html">DB-&gt;get</a> operations may be concurrently in
+progress.
+<p>To enforce the rule that only one thread of control at a time can
+attempt to upgrade a read lock to a write lock, however, Berkeley DB must
+forbid multiple cursors from attempting to write concurrently. This is
+done using the <a href="../../api_c/db_cursor.html#DB_WRITECURSOR">DB_WRITECURSOR</a> flag to the <a href="../../api_c/db_cursor.html">DB-&gt;cursor</a>
+interface. This is the only difference between access method calls in
+Berkeley DB Concurrent Data Store and in the other Berkeley DB products. The <a href="../../api_c/db_cursor.html#DB_WRITECURSOR">DB_WRITECURSOR</a> flag
+causes the newly created cursor to be a "write" cursor; that is, a
+cursor capable of performing writes as well as reads. Only cursors thus
+created are permitted to perform write operations (either deletes or
+puts), and only one such cursor can exist at any given time.
+<p>Any attempt to create a second write cursor or to perform a non-cursor
+write operation while a write cursor is open will block until that write
+cursor is closed. Read cursors may open and perform reads without blocking
+while a write cursor is extant. However, any attempts to actually perform
+a write, either using the write cursor or directly using the
+<a href="../../api_c/db_put.html">DB-&gt;put</a> or <a href="../../api_c/db_del.html">DB-&gt;del</a> methods, will block until all read cursors
+are closed. This is how the multiple-reader/single-writer semantic is
+enforced, and prevents reads from seeing an inconsistent database state
+that may be an intermediate stage of a write operation.
+<p>With these behaviors, Berkeley DB can guarantee deadlock-free concurrent
+database access, so that multiple threads of control are free to perform
+reads and writes without needing to handle synchronization themselves
+or having to run a deadlock detector. Because Berkeley DB has no knowledge
+of which cursors belong to which threads, however, some care must be
+taken to ensure that applications do not inadvertently block themselves,
+causing the application to hang and be unable to proceed. Some common
+mistakes include the following:
+<p><ol>
+<p><li>Keeping a cursor open while issuing a <a href="../../api_c/db_put.html">DB-&gt;put</a> or <a href="../../api_c/db_del.html">DB-&gt;del</a>
+access method call.
+<p><li>Attempting to open a write cursor while a write cursor is already being
+held open by the same thread of control. Note that it is correct
+operation for one thread of control to attempt to open a write cursor
+or to perform a non-cursor write (<a href="../../api_c/db_put.html">DB-&gt;put</a> or <a href="../../api_c/db_del.html">DB-&gt;del</a>)
+while a write cursor is already active in another thread. It is only
+a problem if these things are done within a single thread of control --
+in which case that thread will block and never be able to release the
+lock that is blocking it.
+<p><li>Keeping a write cursor open for an extended period of time.
+<p><li>Not testing Berkeley DB error return codes (if any cursor operation returns
+an unexpected error, that cursor must still be closed).
+<p><li>By default, Berkeley DB Concurrent Data Store does locking on a per-database basis. For this reason,
+accessing multiple databases in different orders in different threads
+or processes, or leaving cursors open on one database while accessing
+another database, can cause an application to hang. If this behavior
+is a requirement for the application, Berkeley DB should be configured to do
+locking on an environment-wide basis. See the <a href="../../api_c/env_set_flags.html#DB_CDB_ALLDB">DB_CDB_ALLDB</a> flag
+of the <a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> function for more information.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/env/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/debug/common.html b/libdb/docs/ref/debug/common.html
new file mode 100644
index 0000000..21ca146
--- /dev/null
+++ b/libdb/docs/ref/debug/common.html
@@ -0,0 +1,111 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Common errors</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Debugging Applications</dl></h3></td>
+<td align=right><a href="../../ref/debug/printlog.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Common errors</h1>
+<p>This page outlines some of the most common problems that people encounter
+and some suggested courses of action.
+<p><dl compact>
+<p><dt><b>Symptom:</b><dd>Core dumps or garbage returns from random Berkeley DB operations.
+<p><dt>Possible Cause:<dd>Failure to zero out DBT structure before issuing request.
+<p><dt>Fix:<dd>Before using a <a href="../../api_c/dbt_class.html">DBT</a>, you must initialize all its elements
+to 0 and then set the ones you are using explicitly.
+<p><dt><b>Symptom:</b><dd>Random crashes and/or database corruption.
+<p><dt>Possible Cause:<dd>Running multiple threads, but did not specify <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a>
+to <a href="../../api_c/db_open.html">DB-&gt;open</a> or <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>.
+<p><dt>Fix:<dd>Any time you are sharing a handle across multiple threads, you must
+specify <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> when you open that handle.
+<p><dt><b>Symptom:</b><dd><a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> returns EINVAL.
+<p><dt>Possible Cause:<dd>The environment home directory is a remote mounted filesystem.
+<p><dt>Fix:<dd>Use a locally mounted filesystem instead.
+<p><dt><b>Symptom:</b><dd><a href="../../api_c/db_get.html">DB-&gt;get</a> calls are returning EINVAL.
+<p><dt>Possible Cause:<dd>The application is running with threads, but did not specify the
+<a href="../../api_c/dbt_class.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a>, <a href="../../api_c/dbt_class.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a> or <a href="../../api_c/dbt_class.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a>
+flags in the <a href="../../api_c/dbt_class.html">DBT</a> structures used in the call.
+<p><dt>Fix:<dd>When running with threaded handles (that is, specifying <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a>
+to <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> or <a href="../../api_c/db_open.html">DB-&gt;open</a>), you must specify one of those
+flags for all <a href="../../api_c/dbt_class.html">DBT</a> structures in which Berkeley DB is returning data.
+<p><dt><b>Symptom:</b><dd>Running multiple threads or processes, and the database appears to be
+getting corrupted.
+<p><dt>Possible Cause:<dd>Locking is not enabled.
+<p><dt>Fix:<dd>Make sure that you are acquiring locks in your access methods. You
+must specify <a href="../../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a> to your <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> call and then
+pass that environment to <a href="../../api_c/db_open.html">DB-&gt;open</a>.
+<p><dt><b>Symptom:</b><dd>Locks are accumulating, or threads and/or processes are deadlocking,
+even though there is no concurrent access to the database.
+<p><dt>Possible Cause:<dd>Failure to close a cursor.
+<p><dt>Fix:<dd>Cursors retain locks between calls. Everywhere the application uses
+a cursor, the cursor should be explicitly closed as soon as possible after
+it is used.
+<p><dt><b>Symptom:</b><dd>The system locks up.
+<p><dt>Possible Cause:<dd>Application not checking for <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a>.
+<p><dt>Fix:<dd>Unless you are using the Concurrent Data Store product, whenever you
+have multiple threads and/or processes and at least one of them is
+writing, you have the potential for deadlock. As a result, you must
+test for the <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> return on every Berkeley DB call. In
+general, updates should take place in a transaction, or you might leave
+the database in an inconsistent state. Reads may take place outside
+the context of a transaction under common conditions.
+<p>Whenever you get a <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> return, you should do the
+following:
+<p><ol>
+<p><li>If you are running in a transaction, abort the transaction after first
+closing any cursors opened in the transaction.
+<p><li>If you are not running in a transaction, simply close the cursor that got
+the <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> (if it was a cursor operation), and retry.
+</ol>
+<p>See <a href="../../ref/transapp/put.html">Recoverability and deadlock
+avoidance</a> for further information.
+<p><dt><b>Symptom:</b><dd>An inordinately high number of deadlocks.
+<p><dt>Possible Cause:<dd>Read-Modify-Write pattern without using the RMW flag.
+<p><dt>Fix:<dd>If you frequently read a piece of data, modify it and then write
+it, you may be inadvertently causing a large number of deadlocks. Try
+specifying the <a href="../../api_c/dbc_get.html#DB_RMW">DB_RMW</a> flag on your get calls.
+<p>Or, if the application is doing a large number of updates in a small
+database, turning off Btree splits may help (see <a href="../../api_c/db_set_flags.html#DB_REVSPLITOFF">DB_REVSPLITOFF</a>
+for more information.)
+<p><dt><b>Symptom:</b><dd>I run recovery and it exits cleanly, but my database changes are missing.
+<p><dt>Possible Cause:<dd>Failure to enable logging and transactions in the database environment;
+failure to specify <a href="../../api_c/env_class.html">DB_ENV</a> handle when creating <a href="../../api_c/db_class.html">DB</a> handle;
+transaction handle not passed to Berkeley DB interface; failure to commit the
+transaction.
+<p><dt>Fix:<dd>Make sure that the environment and database handles are properly
+created, that the application passes the transaction handle returned by
+<a href="../../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a> to the appropriate Berkeley DB interfaces, and that each
+transaction is eventually committed.
+<p><dt><b>Symptom:</b><dd>Recovery fails.
+<p><dt>Possible Cause:<dd>A database was updated in a transactional environment, both with and
+without transactional handles.
+<p><dt>Fix:<dd>If any database write operation is done using a transaction handle,
+every write operation must be done in the context of a transaction.
+<p><dt><b>Symptom:</b><dd>A database environment locks up, sometimes gradually.
+<p><dt>Possible Cause:<dd>A thread of control exited unexpectedly, holding Berkeley DB resources.
+<p><dt>Fix:<dd>Whenever a thread of control exits holding Berkeley DB resources, all threads
+of control must exit the database environment, and recovery must be run.
+<p><dt><b>Symptom:</b><dd>A database environment locks up, sometimes gradually.
+<p><dt>Possible Cause:<dd>Cursors are not being closed before transaction abort.
+<p><dt>Fix:<dd>Before an application aborts a transaction, any cursors opened within
+the context of that transaction must be closed.
+<p><dt><b>Symptom:</b><dd>Transaction abort or recovery fail, or database corruption occurs.
+<p><dt>Possible Cause:<dd>Log files were removed before it was safe.
+<p><dt>Fix:<dd>Do not remove any log files from a database environment until Berkeley DB
+declares it safe.
+</dl>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/debug/printlog.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/debug/compile.html b/libdb/docs/ref/debug/compile.html
new file mode 100644
index 0000000..afc14f9
--- /dev/null
+++ b/libdb/docs/ref/debug/compile.html
@@ -0,0 +1,45 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Compile-time configuration</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Debugging Applications</dl></h3></td>
+<td align=right><a href="../../ref/debug/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/debug/runtime.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Compile-time configuration</h1>
+<p>There are three compile-time configuration options that assist in
+debugging Berkeley DB and Berkeley DB applications:
+<p><dl compact>
+<p><dt><a href="../../ref/build_unix/conf.html#--enable-debug">--enable-debug</a><dd>If you want to build Berkeley DB with <b>-g</b> as the C and C++ compiler
+flag, enter --enable-debug as an argument to configure. This will create
+Berkeley DB with debugging symbols, as well as load various Berkeley DB routines
+that can be called directly from a debugger to display database page
+content, cursor queues, and so forth. (Note that the <b>-O</b>
+optimization flag will still be specified. To compile with only the
+<b>-g</b>, explicitly set the <b>CFLAGS</b> environment variable
+before configuring.)
+<p><dt><a href="../../ref/build_unix/conf.html#--enable-diagnostic">--enable-diagnostic</a><dd>If you want to build Berkeley DB with debugging run-time sanity checks and with
+DIAGNOSTIC #defined during compilation, enter --enable-diagnostic as an
+argument to configure. This will cause a number of special checks to be
+performed when Berkeley DB is running. This flag should not be defined when
+configuring to build production binaries because it degrades performance.
+<p><dt><a href="../../ref/build_unix/conf.html#--enable-umrw">--enable-umrw</a><dd>When compiling Berkeley DB for use in run-time memory consistency checkers
+(in particular, programs that look for reads and writes of uninitialized
+memory), use --enable-umrw as an argument to configure. This
+guarantees, among other things, that Berkeley DB will completely initialize
+allocated pages rather than initializing only the minimum necessary
+amount.
+</dl>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/debug/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/debug/runtime.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/debug/intro.html b/libdb/docs/ref/debug/intro.html
new file mode 100644
index 0000000..a2f2c2f
--- /dev/null
+++ b/libdb/docs/ref/debug/intro.html
@@ -0,0 +1,59 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Debugging Applications</dl></h3></td>
+<td align=right><a href="../../ref/install/rpm.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/debug/compile.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Introduction</h1>
+<p>Because Berkeley DB is an embedded library, debugging applications that use
+Berkeley DB is both harder and easier than debugging a separate server.
+Debugging can be harder because when a problem arises, it is not always
+readily apparent whether the problem is in the application, is in the
+database library, or is a result of an unexpected interaction between
+the two. Debugging can be easier because it is easier to track down a
+problem when you can review a stack trace rather than deciphering
+interprocess communication messages. This chapter is intended to assist
+you with debugging applications and reporting bugs to us so that we can
+provide you with the correct answer or fix as quickly as possible.
+<p>When you encounter a problem, there are a few general actions you can
+take:
+<p><dl compact>
+<p><dt>Review the Berkeley DB error output<dd>If an error output mechanism has been configured in the Berkeley DB
+environment, additional run-time error messages are made available to
+the applications. If you are not using an environment, it is well worth
+modifying your application to create one so that you can get more
+detailed error messages. See <a href="runtime.html">Run-time error
+information</a> for more information on configuring Berkeley DB to output these
+error messages.
+<p><dt>Review <a href="../../api_c/env_set_verbose.html">DB_ENV-&gt;set_verbose</a><dd>Check the list of flags for the <a href="../../api_c/env_set_verbose.html">DB_ENV-&gt;set_verbose</a> function, and
+see if any of them will produce additional information that might help
+understand the problem.
+<p><dt>Add run-time diagnostics<dd>You can configure and build Berkeley DB to perform run-time diagnostics. (By
+default, these checks are not done because they can seriously impact
+performance.) See <a href="compile.html">Compile-time configuration</a> for more
+information.
+<p><dt>Apply all available patches<dd>Before reporting a problem to Sleepycat Software, please upgrade to the
+latest Sleepycat Software release of Berkeley DB, if possible, or at least
+make sure you have applied any updates available for your release from
+the <a href="http://www.sleepycat.com/update/index.html">Sleepycat
+Software web site</a>.
+<p><dt>Run the test suite<dd>If you see repeated failures or failures of simple test cases, run the
+Berkeley DB test suite to determine whether the distribution of Berkeley DB you are
+using was built and configured correctly.
+</dl>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/install/rpm.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/debug/compile.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/debug/printlog.html b/libdb/docs/ref/debug/printlog.html
new file mode 100644
index 0000000..b09f30b
--- /dev/null
+++ b/libdb/docs/ref/debug/printlog.html
@@ -0,0 +1,153 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Reviewing Berkeley DB log files</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Debugging Applications</dl></h3></td>
+<td align=right><a href="../../ref/debug/runtime.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/debug/common.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Reviewing Berkeley DB log files</h1>
+<p>If you are running with transactions and logging, the <a href="../../utility/db_printlog.html">db_printlog</a>
+utility can be a useful debugging aid. The <a href="../../utility/db_printlog.html">db_printlog</a> utility
+will display the contents of your log files in a human readable (and
+machine-processable) format.
+<p>The <a href="../../utility/db_printlog.html">db_printlog</a> utility will attempt to display any and all
+log files present in a designated db_home directory. For each log record,
+<a href="../../utility/db_printlog.html">db_printlog</a> will display a line of the form:
+<p><blockquote><pre>[22][28]db_big: rec: 43 txnid 80000963 prevlsn [21][10483281]</pre></blockquote>
+<p>The opening numbers in square brackets are the <i>log sequence
+number</i> (<i>LSN</i>) of the log record being displayed. The first
+number indicates the log file in which the record appears, and the
+second number indicates the offset in that file of the record.
+<p>The first character string identifies the particular log operation being
+reported. The log records corresponding to particular operations are
+described following. The rest of the line consists of name/value pairs.
+<p>The rec field indicates the record type (this is used to dispatch records
+in the log to appropriate recovery functions).
+<p>The txnid field identifies the transaction for which this record was
+written. A txnid of 0 means that the record was written outside the
+context of any transaction. You will see these most frequently for
+checkpoints.
+<p>Finally, the prevlsn contains the LSN of the last record for this
+transaction. By following prevlsn fields, you can accumulate all the
+updates for a particular transaction. During normal abort processing,
+this field is used to quickly access all the records for a particular
+transaction.
+<p>After the initial line identifying the record type, each field of the log
+record is displayed, one item per line. There are several fields that
+appear in many different records and a few fields that appear only in
+some records.
+<p>The following table presents each currently written log record type with
+a brief description of the operation it describes.
+<!--START LOG RECORD TYPES-->
+<p><table border=1>
+<tr><th>Log Record Type</th><th>Description</th></tr>
+<tr><td>bam_adj</td><td>Used when we insert/remove an index into/from the page header of a Btree page.</td></tr>
+<tr><td>bam_cadjust</td><td>Keeps track of record counts in a Btree or Recno database.</td></tr>
+<tr><td>bam_cdel</td><td>Used to mark a record on a page as deleted.</td></tr>
+<tr><td>bam_curadj</td><td>Used to adjust a cursor location when a nearby record changes in a Btree database.</td></tr>
+<tr><td>bam_rcuradj</td><td>Used to adjust a cursor location when a nearby record changes in a Recno database.</td></tr>
+<tr><td>bam_repl</td><td>Describes a replace operation on a record.</td></tr>
+<tr><td>bam_root</td><td>Describes an assignment of a root page.</td></tr>
+<tr><td>bam_rsplit</td><td>Describes a reverse page split.</td></tr>
+<tr><td>bam_split</td><td>Describes a page split.</td></tr>
+<tr><td>crdel_metasub</td><td>Describes the creation of a metadata page for a subdatabase.</td></tr>
+<tr><td>db_addrem</td><td>Add or remove an item from a page of duplicates.</td></tr>
+<tr><td>db_big</td><td>Add an item to an overflow page (<i>overflow pages</i> contain items too large to place on the main page)</td></tr>
+<tr><td>db_cksum</td><td>Unable to checksum a page.</td></tr>
+<tr><td>db_debug</td><td>Log debugging message.</td></tr>
+<tr><td>db_noop</td><td>This marks an operation that did nothing but update the LSN on a page.</td></tr>
+<tr><td>db_ovref</td><td>Increment or decrement the reference count for a big item.</td></tr>
+<tr><td>db_pg_alloc</td><td>Indicates that we allocated a page to a Btree.</td></tr>
+<tr><td>db_pg_free</td><td>Indicates that we freed a page in the Btree (freed pages are added to a freelist and reused).</td></tr>
+<tr><td>db_relink</td><td>Fix prev/next chains on duplicate pages because a page was added or removed.</td></tr>
+<tr><td>dbreg_register</td><td>Records an open of a file (mapping the filename to a log-id that is used in subsequent log operations).</td></tr>
+<tr><td>ham_chgpg</td><td>Used to adjust a cursor location when a Hash page is removed, and its elements are moved to a different Hash page.</td></tr>
+<tr><td>ham_copypage</td><td>Used when we empty a bucket page, but there are overflow pages for the bucket; one needs to be copied back into the actual bucket.</td></tr>
+<tr><td>ham_curadj</td><td>Used to adjust a cursor location when a nearby record changes in a Hash database.</td></tr>
+<tr><td>ham_groupalloc</td><td>Allocate some number of contiguous pages to the Hash database.</td></tr>
+<tr><td>ham_insdel</td><td>Insert/delete an item on a Hash page.</td></tr>
+<tr><td>ham_metagroup</td><td>Update the metadata page to reflect the allocation of a sequence of contiguous pages.</td></tr>
+<tr><td>ham_newpage</td><td>Adds or removes overflow pages from a Hash bucket.</td></tr>
+<tr><td>ham_replace</td><td>Handle updates to records that are on the main page.</td></tr>
+<tr><td>ham_splitdata</td><td>Record the page data for a split.</td></tr>
+<tr><td>qam_add</td><td>Describes the actual addition of a new record to a Queue.</td></tr>
+<tr><td>qam_del</td><td>Delete a record in a Queue.</td></tr>
+<tr><td>qam_delext</td><td>Delete a record in a Queue with extents.</td></tr>
+<tr><td>qam_incfirst</td><td>Increments the record number that refers to the first record in the database.</td></tr>
+<tr><td>qam_mvptr</td><td>Indicates that we changed the reference to either or both of the first and current records in the file.</td></tr>
+<tr><td>txn_child</td><td>Commit a child transaction.</td></tr>
+<tr><td>txn_ckp</td><td>Transaction checkpoint.</td></tr>
+<tr><td>txn_recycle</td><td>Transaction IDs wrapped.</td></tr>
+<tr><td>txn_regop</td><td>Logs a regular (non-child) transaction commit.</td></tr>
+<tr><td>txn_xa_regop</td><td>Logs a prepare message.</td></tr>
+</table>
+<!--END LOG RECORD TYPES-->
+<h3>Augmenting the Log for Debugging</h3>
+<p>When debugging applications, it is sometimes useful to log not only the
+actual operations that modify pages, but also the underlying Berkeley DB
+functions being executed. This form of logging can add significant bulk
+to your log, but can permit debugging application errors that are almost
+impossible to find any other way. To turn on these log messages, specify
+the --enable-debug_rop and --enable-debug_wop configuration options when
+configuring Berkeley DB. See <a href="../../ref/build_unix/conf.html">Configuring
+Berkeley DB</a> for more information.
+<h3>Extracting Committed Transactions and Transaction Status</h3>
+<p>Sometimes, it is helpful to use the human-readable log output to
+determine which transactions committed and aborted. The awk script,
+commit.awk, (found in the db_printlog directory of the Berkeley DB
+distribution) allows you to do just that. The following command,
+where log_output is the output of db_printlog, will display a list of
+the transaction IDs of all committed transactions found in the log:
+<p><blockquote><pre>awk -f commit.awk log_output</pre></blockquote>
+<p>If you need a complete list of both committed and aborted transactions,
+then the script status.awk will produce it. The syntax is as follows:
+<p><blockquote><pre>awk -f status.awk log_output</pre></blockquote>
+<h3>Extracting Transaction Histories</h3>
+<p>Another useful debugging aid is to print out the complete history of a
+transaction. The awk script txn.awk allows you to do that. The
+following command line, where log_output is the output of
+<a href="../../utility/db_printlog.html">db_printlog</a> and txnlist is a comma-separated list of transaction
+IDs, will display all log records associated with the designated
+transaction ids:
+<p><blockquote><pre>awk -f txn.awk TXN=txnlist log_output</pre></blockquote>
+<h3>Extracting File Histories</h3>
+<p>The awk script fileid.awk allows you to extract all log records that
+refer to a designated file. The syntax for the fileid.awk script is
+the following, where log_output is the output of db_printlog and fids
+is a comma-separated list of fileids:
+<p><blockquote><pre>awk -f fileid.awk PGNO=fids log_output</pre></blockquote>
+<h3>Extracting Page Histories</h3>
+<p>The awk script pgno.awk allows you to extract all log records that refer
+to designated page numbers. However, because this script will extract
+records with the designated page numbers for all files, it is most
+useful in conjunction with the fileid script. The syntax for the
+pgno.awk script is the following, where log_output is the output of
+db_printlog and pgnolist is a comma-separated list of page numbers:
+<p><blockquote><pre>awk -f pgno.awk PGNO=pgnolist log_output</pre></blockquote>
+<h3>Other log processing tools</h3>
+<p>The awk script count.awk prints out the number of log records
+encountered that belonged to some transaction (that is, the number of
+log records excluding those for checkpoints and
+non-transaction-protected operations).
+<p>The script range.awk will extract a subset of a log. This is useful
+when the output of <a href="../../utility/db_printlog.html">db_printlog</a> is too large to be reasonably
+manipulated with an editor or other tool. The syntax for range.awk is
+the following, where <b>sf</b> and <b>so</b> represent the LSN
+of the beginning of the sublog you want to extract, and <b>ef</b> and
+<b>eo</b> represent the LSN of the end of the sublog you want to
+extract:
+<p><blockquote><pre>awk -f range.awk START_FILE=sf START_OFFSET=so END_FILE=ef END_OFFSET=eo log_output</pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/debug/runtime.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/debug/common.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/debug/runtime.html b/libdb/docs/ref/debug/runtime.html
new file mode 100644
index 0000000..5e703f4
--- /dev/null
+++ b/libdb/docs/ref/debug/runtime.html
@@ -0,0 +1,48 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Run-time error information</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Debugging Applications</dl></h3></td>
+<td align=right><a href="../../ref/debug/compile.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/debug/printlog.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Run-time error information</h1>
+<p>Normally, when an error occurs in the Berkeley DB library, an integer value
+(either a Berkeley DB specific value or a system <b>errno</b> value) is
+returned by the Berkeley DB interface. In some cases, however, this value
+may be insufficient to completely describe the cause of the error,
+especially during initial application debugging.
+<p>There are four interfaces intended to provide applications with
+additional run-time error information:
+<a href="../../api_c/env_set_errcall.html">DB_ENV-&gt;set_errcall</a>, <a href="../../api_c/env_set_errfile.html">DB_ENV-&gt;set_errfile</a>,
+<a href="../../api_c/env_set_errpfx.html">DB_ENV-&gt;set_errpfx</a>, and <a href="../../api_c/env_set_verbose.html">DB_ENV-&gt;set_verbose</a>.
+<p>If the environment is configured with these interfaces, many Berkeley DB errors
+will result in additional information being written to a file or passed
+as an argument to an application function.
+<p>The Berkeley DB error-reporting facilities do not slow performance or
+significantly increase application size, and may be run during normal
+operation as well as during debugging. Where possible, we recommend
+that these options always be configured and the output saved in the
+filesystem. We have found that this often saves time when debugging
+installation or other system-integration problems.
+<p>In addition, there are three interfaces to assist applications in
+displaying their own error messages: <a href="../../api_c/env_strerror.html">db_strerror</a>,
+<a href="../../api_c/env_err.html">DB_ENV-&gt;err</a>, and <a href="../../api_c/env_err.html">DB_ENV-&gt;errx</a>. The first is a superset of
+the ANSI C strerror interface, and returns a descriptive string for any
+error return from the Berkeley DB library. The <a href="../../api_c/env_err.html">DB_ENV-&gt;err</a> and
+<a href="../../api_c/env_err.html">DB_ENV-&gt;errx</a> methods use the error message configuration options
+described previously to format and display error messages to appropriate
+output devices.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/debug/compile.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/debug/printlog.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/distrib/layout.html b/libdb/docs/ref/distrib/layout.html
new file mode 100644
index 0000000..f2872f0
--- /dev/null
+++ b/libdb/docs/ref/distrib/layout.html
@@ -0,0 +1,80 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Source code layout</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Distribution</dl></h3></td>
+<td align=right><a href="../../ref/distrib/port.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/refs/refs.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Source code layout</h1>
+<p><table border=1 align=center>
+<tr><th>Directory</th><th>Description</th></tr>
+<tr><td>LICENSE</td><td>Berkeley DB Copyright</td></tr>
+<tr><td>btree</td><td>Btree access method source code</td></tr>
+<tr><td>build_unix</td><td>UNIX build directory</td></tr>
+<tr><td>build_vxworks</td><td>VxWorks build directory.</td></tr>
+<tr><td>build_win32</td><td>Windows build directory.</td></tr>
+<tr><td>clib</td><td>C library replacement functions</td></tr>
+<tr><td>common</td><td>Common Berkeley DB functions</td></tr>
+<tr><td>crypto</td><td>Cryptographic support</td></tr>
+<tr><td>cxx</td><td>C++ API</td></tr>
+<tr><td>db</td><td>Berkeley DB database interfaces</td></tr>
+<tr><td>db185</td><td>Berkeley DB version 1.85 compatibility API</td></tr>
+<tr><td>db_archive</td><td>The db_archive utility</td></tr>
+<tr><td>db_checkpoint</td><td>The db_checkpoint utility</td></tr>
+<tr><td>db_deadlock</td><td>The db_deadlock utility</td></tr>
+<tr><td>db_dump</td><td>The db_dump utility</td></tr>
+<tr><td>db_dump185</td><td>The db_dump185 utility</td></tr>
+<tr><td>db_load</td><td>The db_load utility</td></tr>
+<tr><td>db_printlog</td><td>The db_printlog debugging utility</td></tr>
+<tr><td>db_recover</td><td>The db_recover utility</td></tr>
+<tr><td>db_stat</td><td>The db_stat utility</td></tr>
+<tr><td>db_upgrade</td><td>The db_upgrade utility</td></tr>
+<tr><td>db_verify</td><td>The db_verify utility</td></tr>
+<tr><td>dbinc</td><td>C language include files</td></tr>
+<tr><td>dbinc_auto</td><td>Automatically generated C language include files</td></tr>
+<tr><td>dbm</td><td>The dbm/ndbm compatibility APIs</td></tr>
+<tr><td>dbreg</td><td>Berkeley DB database handle logging support</td></tr>
+<tr><td>dist</td><td>Berkeley DB administration/distribution tools</td></tr>
+<tr><td>docs</td><td>Documentation</td></tr>
+<tr><td>env</td><td>Berkeley DB environment interfaces</td></tr>
+<tr><td>examples_c</td><td>C API example programs</td></tr>
+<tr><td>examples_cxx</td><td>C++ API example programs</td></tr>
+<tr><td>examples_java</td><td>Java API example programs</td></tr>
+<tr><td>fileops</td><td>File object operation support</td></tr>
+<tr><td>hash</td><td>Hash access method</td></tr>
+<tr><td>hmac</td><td>Checksum support</td></tr>
+<tr><td>hsearch</td><td>The hsearch compatibility API</td></tr>
+<tr><td>java</td><td>Java API</td></tr>
+<tr><td>libdb_java</td><td>The libdb_java shared library</td></tr>
+<tr><td>lock</td><td>Lock manager</td></tr>
+<tr><td>log</td><td>Log manager</td></tr>
+<tr><td>mp</td><td>Shared memory buffer pool</td></tr>
+<tr><td>mutex</td><td>Mutexes</td></tr>
+<tr><td>os</td><td>POSIX 1003.1 operating-system specific functionality</td></tr>
+<tr><td>os_vxworks</td><td>VxWorks operating-system specific functionality</td></tr>
+<tr><td>os_win32</td><td>Windows operating-system specific functionality</td></tr>
+<tr><td>perl</td><td>DB_File and BerkeleyDB Perl modules</td></tr>
+<tr><td>qam</td><td>Queue access method source code</td></tr>
+<tr><td>rep</td><td>Replication source code</td></tr>
+<tr><td>rpc_client</td><td>RPC client interface</td></tr>
+<tr><td>rpc_server</td><td>RPC server utility</td></tr>
+<tr><td>tcl</td><td>Tcl API</td></tr>
+<tr><td>test</td><td>Test suite</td></tr>
+<tr><td>txn</td><td>Transaction manager</td></tr>
+<tr><td>xa</td><td>X/Open Distributed Transaction Processing XA interface</td></tr>
+</table>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/distrib/port.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/refs/refs.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/distrib/port.html b/libdb/docs/ref/distrib/port.html
new file mode 100644
index 0000000..b993041
--- /dev/null
+++ b/libdb/docs/ref/distrib/port.html
@@ -0,0 +1,110 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Porting Berkeley DB to new architectures</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Distribution</dl></h3></td>
+<td align=right><a href="../../ref/test/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/distrib/layout.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Porting Berkeley DB to new architectures</h1>
+<p>Berkeley DB is generally easy to port to new architectures. Berkeley DB was
+designed to be as portable as possible, and has been ported to a wide
+variety of systems, from Wind River's Tornado system, to VMS, to
+Windows/NT and Windows/95, and most existing UNIX platforms. It runs
+on 16, 32 and 64-bit machines, little or big-endian. The difficulty of
+a port depends on how much of the ANSI C and POSIX 1003.1 standards the
+new architecture offers.
+<p>An abstraction layer separates the main Berkeley DB code from the operating
+system and architecture specific components. This layer is comprised
+of approximately 2500 lines of C language code, found in the <b>os</b>
+subdirectory of the Berkeley DB distribution. The following list of files
+include functionality that may need to be modified or implemented in
+order to support a new architecture. Within each file, there is usually
+one, but sometimes several functions (for example, the
+<b>os_alloc.c</b> file contains the malloc, calloc, realloc, free,
+and strdup functions).
+<p><table border=1 align=center>
+<tr><th>Source file</th><th>Description</th></tr>
+<tr><td>os_abs.c</td><td>Return if a filename is an absolute pathname</td></tr>
+<tr><td>os_alloc.c</td><td>ANSI C malloc, calloc, realloc, strdup, free front-ends</td></tr>
+<tr><td>os_clock.c</td><td>Return the current time-of-day</td></tr>
+<tr><td>os_config.c</td><td>Return run-time configuration information</td></tr>
+<tr><td>os_dir.c</td><td>Read the filenames from a directory</td></tr>
+<tr><td>os_errno.c</td><td>Set/get the ANSI C errno value</td></tr>
+<tr><td>os_fid.c</td><td>Create a unique ID for a file</td></tr>
+<tr><td>os_fsync.c</td><td>POSIX 1003.1 fsync front-end</td></tr>
+<tr><td>os_handle.c</td><td>Open file handles</td></tr>
+<tr><td>os_id.c</td><td>Return thread ID</td></tr>
+<tr><td>os_map.c</td><td>Map a shared memory area</td></tr>
+<tr><td>os_method.c</td><td>Run-time replacement of system calls</td></tr>
+<tr><td>os_oflags.c</td><td>Convert POSIX 1003.1 open flags, modes to Berkeley DB flags</td></tr>
+<tr><td>os_open.c</td><td>Open file handles</td></tr>
+<tr><td>os_region.c</td><td>Map a shared memory area</td></tr>
+<tr><td>os_rename.c</td><td>POSIX 1003.1 rename call</td></tr>
+<tr><td>os_root.c</td><td>Return if application has special permissions</td></tr>
+<tr><td>os_rpath.c</td><td>Return last pathname separator</td></tr>
+<tr><td>os_rw.c</td><td>POSIX 1003.1 read/write calls</td></tr>
+<tr><td>os_seek.c</td><td>POSIX 1003.1 seek call</td></tr>
+<tr><td>os_sleep.c</td><td>Cause a thread of control to release the CPU</td></tr>
+<tr><td>os_spin.c</td><td>Return the times to spin while waiting for a mutex</td></tr>
+<tr><td>os_stat.c</td><td>POSIX 1003.1 stat call</td></tr>
+<tr><td>os_tmpdir.c</td><td>Set the path for temporary files</td></tr>
+<tr><td>os_unlink.c</td><td>POSIX 1003.1 unlink call</td></tr>
+</table>
+<p>All but a few of these files contain relatively trivial pieces of code.
+Typically, there is only a single version of the code for all platforms
+Berkeley DB supports, and that code lives in the <b>os</b> directory of the
+distribution. Where different code is required, the code is either
+conditionally compiled or an entirely different version is written. For
+example, VxWorks versions of some of these files can be found in the
+distribution directory os_vxworks, and Win32 versions can be found in
+os_win32.
+<p>Historically, there are only two difficult questions to answer for each
+new port. The first question is how to handle shared memory. In order
+to write multiprocess database applications (not multithreaded, but
+threads of control running in different address spaces), Berkeley DB must be
+able to name pieces of shared memory and access them from multiple
+processes. On UNIX/POSIX systems, we use <b>mmap</b> and
+<b>shmget</b> for that purpose, but any interface that provides access
+to named shared memory is sufficient. If you have a simple, flat
+address space, you should be able to use the code in
+<b>os_vxworks/os_map.c</b> as a starting point for the port. If you
+are not intending to write multiprocess database applications, then
+this won't be necessary, as Berkeley DB can simply allocate memory from the
+heap if all threads of control will live in a single address space.
+<p>The second question is mutex support. Berkeley DB requires some form of
+<b>self-blocking</b> mutual exclusion mutex. Blocking mutexes are
+preferred as they tend to be less CPU-expensive and less likely to cause
+thrashing. If blocking mutexes are not available, however, test-and-set
+will work as well. The code for mutexes is in two places in the system:
+the include file <b>dbinc/mutex.h</b>, and the distribution directory
+<b>mutex</b>.
+<p>Berkeley DB uses the GNU autoconf tools for configuration on almost all of
+the platforms it supports. Specifically, the include file
+<b>db_config.h</b> configures the Berkeley DB build. The simplest way to
+begin a port is to configure and build Berkeley DB on a UNIX or UNIX-like
+system, and then take the <b>Makefile</b> and <b>db_config.h</b>
+file created by that configuration, and modify it by hand to reflect
+the needs of the new architecture. Unless you're already familiar with
+the GNU autoconf toolset, we don't recommend you take the time to
+integrate your changes back into the Berkeley DB autoconfiguration framework.
+Instead, send Sleepycat Software context diffs of your changes and any
+new source files you created, and we'll integrate the changes into our
+source tree.
+<p>Finally, we're happy to work with you on the port, or potentially, do
+the port ourselves, if that is of interest to you. Regardless, if you
+have any porting questions, just let us know, and we will be happy to
+answer them.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/test/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/distrib/layout.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/dumpload/format.html b/libdb/docs/ref/dumpload/format.html
new file mode 100644
index 0000000..e997272
--- /dev/null
+++ b/libdb/docs/ref/dumpload/format.html
@@ -0,0 +1,70 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Dump output formats</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Dumping and Reloading</dl></h3></td>
+<td align=right><a href="../../ref/dumpload/utility.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/dumpload/text.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Dump output formats</h1>
+<p>There are two output formats used by <a href="../../utility/db_dump.html">db_dump</a> and <a href="../../utility/db_dump.html">db_dump185</a>.
+<p>In both output formats, the first few lines of the output contain header
+information describing the underlying access method, filesystem page size,
+and other bookkeeping information.
+<p>The header information starts with a single line, VERSION=N, where N is
+the version number of the dump output format.
+<p>The header information is then output in name=value pairs, where name may
+be any of the keywords listed in the <a href="../../utility/db_load.html">db_load</a> manual page, and
+value will be its value. Although this header information can be manually
+edited before the database is reloaded, there is rarely any reason to do
+so because all of this information can also be specified or overridden by
+command-line arguments to <a href="../../utility/db_load.html">db_load</a>.
+<p>The header information ends with single line HEADER=END.
+<p>Following the header information are the key/data pairs from the
+database. If the database being dumped is a Btree or Hash database, or
+if the <b>-k</b> option was specified, the output will be paired lines
+of text where the first line of the pair is the key item, and the second
+line of the pair is its corresponding data item. If the database being
+dumped is a Queue or Recno database, and the <b>-k</b> option was not
+specified, the output will be lines of text where each line is the next
+data item for the database. Each of these lines is preceded by a single
+space.
+<p>If the <b>-p</b> option to <a href="../../utility/db_dump.html">db_dump</a> or <a href="../../utility/db_dump.html">db_dump185</a> was
+specified, the key/data lines will consist of single characters
+representing any characters from the database that are <i>printing
+characters</i> and backslash (<b>\</b>) escaped characters
+for any that were not. Backslash characters appearing in the output mean
+one of two things: if the backslash character precedes another backslash
+character, it means that a literal backslash character occurred in the
+key or data item. If the backslash character precedes any other
+character, the next two characters must be interpreted as hexadecimal
+specification of a single character; for example, <b>\0a</b>
+is a newline character in the ASCII character set.
+<p>Although some care should be exercised, it is perfectly reasonable to use
+standard text editors and tools to edit databases dumped using the
+<b>-p</b> option before reloading them using the <a href="../../utility/db_load.html">db_load</a>
+utility.
+<p>Note that the definition of a printing character may vary from system to
+system, so database representations created using the <b>-p</b>
+option may be less portable than those created without it.
+<p>If the <b>-p</b> option to <a href="../../utility/db_dump.html">db_dump</a> or <a href="../../utility/db_dump.html">db_dump185</a> is
+not specified, each output line will consist of paired hexadecimal values;
+for example, the line <b>726f6f74</b> is the string <b>root</b> in
+the ASCII character set.
+<p>In all output formats, the key and data items are ended by a single line
+DATA=END.
+<p>Where multiple databases have been dumped from a file, the overall output
+will repeat; that is, a new set of headers and a new set of data items.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/dumpload/utility.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/dumpload/text.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/dumpload/text.html b/libdb/docs/ref/dumpload/text.html
new file mode 100644
index 0000000..53153c2
--- /dev/null
+++ b/libdb/docs/ref/dumpload/text.html
@@ -0,0 +1,33 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Loading text into databases</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Dumping and Reloading</dl></h3></td>
+<td align=right><a href="../../ref/dumpload/format.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/install/file.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Loading text into databases</h1>
+<p>The <a href="../../utility/db_load.html">db_load</a> utility can be used to load text into databases.
+The <b>-T</b> option permits nondatabase applications to create
+flat-text files that are then loaded into databases for fast,
+highly-concurrent access. For example, the following command loads the
+standard UNIX <b>/etc/passwd</b> file into a database, with the login
+name as the key item and the entire password entry as the data item:
+<p><blockquote><pre>awk -F: '{print $1; print $0}' &lt; /etc/passwd |\
+ sed 's/\\/\\\\/g' | db_load -T -t hash passwd.db</pre></blockquote>
+<p>Note that backslash characters naturally occurring in the text are escaped
+to avoid interpretation as escape characters by <a href="../../utility/db_load.html">db_load</a>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/dumpload/format.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/install/file.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/dumpload/utility.html b/libdb/docs/ref/dumpload/utility.html
new file mode 100644
index 0000000..9e7698f
--- /dev/null
+++ b/libdb/docs/ref/dumpload/utility.html
@@ -0,0 +1,46 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: The db_dump and db_load utilities</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Dumping and Reloading</dl></h3></td>
+<td align=right><a href="../../ref/sendmail/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/dumpload/format.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>The db_dump and db_load utilities</h1>
+<p>There are three utilities used for dumping and loading Berkeley DB
+databases: <a href="../../utility/db_dump.html">db_dump</a>, <a href="../../utility/db_dump.html">db_dump185</a>, and <a href="../../utility/db_load.html">db_load</a>.
+<p>The <a href="../../utility/db_dump.html">db_dump</a> and <a href="../../utility/db_dump.html">db_dump185</a> utilities dump Berkeley DB
+databases into a flat-text representation of the data that can be read
+by <a href="../../utility/db_load.html">db_load</a>. The only difference between them is that
+<a href="../../utility/db_dump.html">db_dump</a> reads Berkeley DB version 2 and greater database formats,
+whereas <a href="../../utility/db_dump.html">db_dump185</a> reads Berkeley DB version 1.85 and 1.86 database
+formats.
+<p>The <a href="../../utility/db_load.html">db_load</a> utility reads either the output format used
+by the dump utilities or (optionally) a flat-text representation
+created using other tools, and stores it into a Berkeley DB database.
+<p>Dumping and reloading Hash databases that use user-defined hash functions
+will result in new databases that use the default hash function. Although
+using the default hash function may not be optimal for the new database,
+it will continue to work correctly.
+<p>Dumping and reloading Btree databases that use user-defined prefix or
+comparison functions will result in new databases that use the default
+prefix and comparison functions. In this case, it is quite likely that
+applications will be unable to retrieve records, and it is possible that
+the load process itself will fail.
+<p>The only available workaround for either Hash or Btree databases is to
+modify the sources for the <a href="../../utility/db_load.html">db_load</a> utility to load the database
+using the correct hash, prefix, and comparison functions.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/sendmail/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/dumpload/format.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/env/create.html b/libdb/docs/ref/env/create.html
new file mode 100644
index 0000000..0f401f0
--- /dev/null
+++ b/libdb/docs/ref/env/create.html
@@ -0,0 +1,130 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Creating a database environment</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Environment</dl></h3></td>
+<td align=right><a href="../../ref/env/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Creating a database environment</h1>
+<p>The Berkeley DB environment is created and described by the <a href="../../api_c/env_create.html">db_env_create</a>
+and <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> interfaces. In situations where customization is
+desired, such as storing log files on a separate disk drive or selection
+of a particular cache size, applications must describe the customization
+by either creating an environment configuration file in the environment
+home directory or by arguments passed to other <a href="../../api_c/env_class.html">DB_ENV</a> handle methods.
+<p>Once an environment has been created, database files specified using
+relative pathnames will be named relative to the home directory. Using
+pathnames relative to the home directory allows the entire environment
+to be easily moved, simplifying restoration and recovery of a database
+in a different directory or on a different system.
+<p>Applications first obtain an environment handle using the
+<a href="../../api_c/env_create.html">db_env_create</a> method, then call the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> method which creates
+or joins the database environment. There are a number of options you
+can set to customize <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> for your environment. These
+options fall into four broad categories:
+<p><dl compact>
+<p><dt>Subsystem Initialization:<dd>These flags indicate which Berkeley DB subsystems will be initialized for the
+environment, and what operations will happen automatically when
+databases are accessed within the environment. The flags include
+<a href="../../api_c/env_open.html#DB_JOINENV">DB_JOINENV</a>, <a href="../../api_c/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a>, <a href="../../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a>,
+<a href="../../api_c/env_open.html#DB_INIT_LOG">DB_INIT_LOG</a>, <a href="../../api_c/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a>, and <a href="../../api_c/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a>.
+The <a href="../../api_c/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a> flag does initialization for Berkeley DB Concurrent Data Store
+applications. (See <a href="../../ref/cam/intro.html">Building Berkeley DB Concurrent Data Store
+applications</a> for more information.) The rest of the flags initialize
+a single subsystem; that is, when <a href="../../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a> is specified,
+applications reading and writing databases opened in this environment
+will be using locking to ensure that they do not overwrite each other's
+changes.
+<p><dt>Recovery options:<dd>These flags, which include <a href="../../api_c/env_open.html#DB_RECOVER">DB_RECOVER</a> and
+<a href="../../api_c/env_open.html#DB_RECOVER_FATAL">DB_RECOVER_FATAL</a>, indicate what recovery is to be performed on
+the environment before it is opened for normal use.
+<p><dt>Naming options:<dd>These flags, which include <a href="../../api_c/env_open.html#DB_USE_ENVIRON">DB_USE_ENVIRON</a> and
+<a href="../../api_c/env_open.html#DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a>, modify how file naming happens in the
+environment.
+<p><dt>Miscellaneous:<dd>Finally, there are a number of miscellaneous flags, for example,
+<a href="../../api_c/env_open.html#DB_CREATE">DB_CREATE</a> which causes underlying files to be created as
+necessary. See the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> manual pages for further
+information.
+</dl>
+<p>Most applications either specify only the <a href="../../api_c/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a> flag or
+they specify all four subsystem initialization flags
+(<a href="../../api_c/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a>, <a href="../../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a>, <a href="../../api_c/env_open.html#DB_INIT_LOG">DB_INIT_LOG</a>, and
+<a href="../../api_c/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a>). The former configuration is for applications that
+simply want to use the basic Access Method interfaces with a shared
+underlying buffer pool, but don't care about recoverability after
+application or system failure. The latter is for applications that need
+recoverability. There are situations in which other combinations of
+the initialization flags make sense, but they are rare.
+<p>The <a href="../../api_c/env_open.html#DB_RECOVER">DB_RECOVER</a> flag is specified by applications that want to
+perform any necessary database recovery when they start running. That
+is, if there was a system or application failure the last time they ran,
+they want the databases to be made consistent before they start running
+again. It is not an error to specify this flag when no recovery needs
+to be done.
+<p>The <a href="../../api_c/env_open.html#DB_RECOVER_FATAL">DB_RECOVER_FATAL</a> flag is more special-purpose. It performs
+catastrophic database recovery, and normally requires that some initial
+arrangements be made; that is, archived log files be brought back into
+the filesystem. Applications should not normally specify this flag.
+Instead, under these rare conditions, the <a href="../../utility/db_recover.html">db_recover</a> utility
+should be used.
+<p>The following is a simple example of a function that opens a database
+environment for a transactional program.
+<p><blockquote><pre>DB_ENV *
+db_setup(home, data_dir, errfp, progname)
+ char *home, *data_dir, *progname;
+ FILE *errfp;
+{
+ DB_ENV *dbenv;
+ int ret;
+<p>
+ /*
+ * Create an environment and initialize it for additional error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(errfp, "%s: %s\n", progname, db_strerror(ret));
+ return (NULL);
+ }
+ dbenv-&gt;set_errfile(dbenv, errfp);
+ dbenv-&gt;set_errpfx(dbenv, progname);
+<p>
+ /*
+ * Specify the shared memory buffer pool cachesize: 5MB.
+ * Databases are in a subdirectory of the environment home.
+ */
+ if ((ret = dbenv-&gt;set_cachesize(dbenv, 0, 5 * 1024 * 1024, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "set_cachesize");
+ goto err;
+ }
+ if ((ret = dbenv-&gt;set_data_dir(dbenv, data_dir)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "set_data_dir: %s", data_dir);
+ goto err;
+ }
+<p>
+ /* Open the environment with full transactional support. */
+ if ((ret = dbenv-&gt;open(dbenv, home, DB_CREATE |
+ DB_INIT_LOG | DB_INIT_LOCK | DB_INIT_MPOOL | DB_INIT_TXN, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "environment open: %s", home);
+ goto err;
+ }
+<p>
+ return (dbenv);
+<p>
+err: (void)dbenv-&gt;close(dbenv, 0);
+ return (NULL);
+}</pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/env/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/env/db_config.html b/libdb/docs/ref/env/db_config.html
new file mode 100644
index 0000000..0b26450
--- /dev/null
+++ b/libdb/docs/ref/env/db_config.html
@@ -0,0 +1,51 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: DB_CONFIG configuration file</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Environment</dl></h3></td>
+<td align=right><a href="../../ref/env/error.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/naming.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>DB_CONFIG configuration file</h1>
+<p>Most of the configuration information that can be specified to
+<a href="../../api_c/env_class.html">DB_ENV</a> methods can also be specified using a configuration file.
+If an environment home directory has been specified (either by the
+application specifying a non-NULL <b>db_home</b> argument to
+<a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>, or by the application setting the
+<a href="../../api_c/env_open.html#DB_USE_ENVIRON">DB_USE_ENVIRON</a> or <a href="../../api_c/env_open.html#DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a> flags and the
+<a href="../../ref/env/naming.html#DB_HOME">DB_HOME</a> environment variable being set), any file named
+DB_CONFIG in the database home directory will be read for lines
+of the format <b>NAME VALUE</b>.
+<p>One or more whitespace characters are used to delimit the two parts of
+the line, and trailing whitespace characters are discarded. All empty
+lines or lines whose first character is a whitespace or hash
+(<b>#</b>) character will be ignored. Each line must specify both
+the NAME and the VALUE of the pair. The specific NAME VALUE pairs are
+documented in the manual for the corresponding methods (for example,
+the <a href="../../api_c/env_set_data_dir.html">DB_ENV-&gt;set_data_dir</a> documentation includes NAME VALUE pair
+information Berkeley DB administrators can use to configure locations for
+database files).
+<p>The DB_CONFIG configuration file is intended to allow database
+environment administrators to customize environments independent of
+applications using the environment. For example, a database
+administrator can move the database log and data files to a different
+location without application recompilation. In addition, because the
+DB_CONFIG file is read when the database environment is opened,
+it can be used to overrule application configuration done before that
+time. For example a database administrator could override the
+compiled-in application cache size to a size more appropriate for a
+specific machine.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/env/error.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/naming.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/env/encrypt.html b/libdb/docs/ref/env/encrypt.html
new file mode 100644
index 0000000..2c4c6c3
--- /dev/null
+++ b/libdb/docs/ref/env/encrypt.html
@@ -0,0 +1,99 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Encryption</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Environment</dl></h3></td>
+<td align=right><a href="../../ref/env/security.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/remote.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Encryption</h1>
+<p>Berkeley DB releases optionally include strong cryptography support; this release
+<b>DOES NOT</b> contain cryptography support.
+<p><b>Note that export/import and/or use of cryptography
+software, or even communicating technical details about cryptography
+software, is illegal in some parts of the world. You are strongly
+advised to pay close attention to any export/import and/or use laws
+which apply to you when you import release of Berkeley DB including
+cryptography to your country or re-distribute source code from it in
+any way.</b>
+<p>Berkeley DB supports encryption using the Rijndael/AES (also known as the
+Advanced Encryption Standard and Federal Information Processing Standard
+(FIPS) 197) algorithm for encryption or decryption. The algorithm is
+configured to use a 128-bit key. Berkeley DB uses a 16-byte initialization
+vector generated using the Mersenne Twister. All encrypted information
+is additionally checksummed using the SHA1 Secure Hash Algorithm, using
+a 160-bit message digest.
+<p>The encryption support provided with Berkeley DB is intended to protect
+applications from an attacker obtaining physical access to the media on
+which a Berkeley DB database is stored, or an attacker compromising a system
+on which Berkeley DB is running but who is unable to read system or process
+memory on that system.
+<b>The encryption support provided with Berkeley DB will not protect applications
+from attackers able to read system memory on the system where Berkeley DB is
+running.</b>
+<p>Encryption is not the default for created databases, even in database
+environments configured for encryption. In addition to configuring for
+encryption by calling the <a href="../../api_c/env_set_encrypt.html">DB_ENV-&gt;set_encrypt</a> or
+<a href="../../api_c/db_set_encrypt.html">DB-&gt;set_encrypt</a> methods, applications must specify the
+<a href="../../api_c/db_set_flags.html#DB_ENCRYPT">DB_ENCRYPT</a> flag before creating the database in order for the
+database to be encrypted. Further, databases cannot be converted to an
+encrypted format after they have been created without dumping and
+re-creating them.
+<p>Each encrypted database environment (including all its encrypted
+databases) is encrypted using a single password and a single algorithm.
+Applications wanting to provide a finer granularity of database access
+must either use multiple database environments or implement additional
+access controls outside of Berkeley DB.
+<p>The only encrypted parts of a database environment are its databases
+and its log files. Specifically, the <a href="../../ref/env/region.html">Shared memory regions</a> supporting the database environment are not
+encrypted. For this reason, it may be possible for an attacker to read
+some or all of an encrypted database by reading the on-disk files that
+back these shared memory regions. To prevent such attacks, applications
+may want to use in-memory filesystem support (on systems that support
+it), or the <a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> or <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flags to the
+<a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> method, to place the shared memory regions in memory that
+is never written to a disk. As some systems page system memory to a
+backing disk, it is important to consider the specific operating system
+running on the machine as well. Finally, when backing database
+environment shared regions with the filesystem, Berkeley DB can be configured
+to overwrite the shared regions before removing them by specifying the
+<a href="../../api_c/env_set_flags.html#DB_OVERWRITE">DB_OVERWRITE</a> flag. This option is only effective in the
+presence of fixed-block filesystems, journaling or logging filesystems
+will require operating system support and probably modification of the
+Berkeley DB sources.
+<p>While all user data is encrypted, parts of the databases and log files
+in an encrypted environment are maintained in an unencrypted state.
+Specifically, log record headers are not encrypted, only the actual log
+records. Additionally, database internal page header fields are not
+encrypted. These page header fields includes information such as the
+page's <a href="../../api_c/lsn_class.html">DB_LSN</a>, number, and position in the database's sort
+order.
+<p>Log records distributed by replication master to replicated clients are
+transmitted to the clients in unencrypted form. If encryption is
+desired in a replicated application, the use of a secure transport
+is strongly suggested.
+<p>Sleepycat Software gratefully acknowledges:
+<p><ul type=disc>
+<li>Vincent Rijmen, Antoon Bosselaers and Paulo Barreto for writing the
+Rijndael/AES code used in Berkeley DB.
+<li>Steve Reid and James H. Brown for writing the SHA1 checksum code used
+in Berkeley DB.
+<li>Makoto Matsumoto and Takuji Nishimura for writing the Mersenne Twister
+code used in Berkeley DB.
+<li>Adam Stubblefield for integrating the Rijndael/AES, SHA1 checksum and
+Mersenne Twister code into Berkeley DB.
+</ul>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/env/security.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/remote.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/env/error.html b/libdb/docs/ref/env/error.html
new file mode 100644
index 0000000..23b9d98
--- /dev/null
+++ b/libdb/docs/ref/env/error.html
@@ -0,0 +1,59 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Error support</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Environment</dl></h3></td>
+<td align=right><a href="../../ref/env/open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/db_config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Error support</h1>
+<p>Berkeley DB offers programmatic support for displaying error return values.
+The <a href="../../api_c/env_strerror.html">db_strerror</a> interface returns a pointer to the error
+message corresponding to any Berkeley DB error return. This is similar to
+the ANSI C strerror interface, but can handle both system error returns
+and Berkeley DB-specific return values.
+<p>For example:
+<p><blockquote><pre>int ret;
+if ((ret = dbenv-&gt;set_cachesize(dbenv, 0, 32 * 1024, 1)) != 0) {
+ fprintf(stderr, "set_cachesize failed: %s\n", db_strerror(ret));
+ return (1);
+}</pre></blockquote>
+<p>There are also two additional error interfaces: <a href="../../api_c/env_err.html">DB_ENV-&gt;err</a> and
+<a href="../../api_c/env_err.html">DB_ENV-&gt;errx</a>. These functions work like the ANSI C printf
+interface, taking a printf-style format string and argument list, and
+writing a message constructed from the format string and arguments.
+<p>The <a href="../../api_c/env_err.html">DB_ENV-&gt;err</a> function appends the standard error string to the
+constructed message; the <a href="../../api_c/env_err.html">DB_ENV-&gt;errx</a> function does not.
+<p>Error messages can be configured always to include a prefix (for
+example, the program name) using the <a href="../../api_c/env_set_errpfx.html">DB_ENV-&gt;set_errpfx</a>
+interface.
+<p>These functions provide simpler ways of displaying Berkeley DB error messages:
+<p><blockquote><pre>int ret;
+dbenv-&gt;set_errpfx(dbenv, program_name);
+if ((ret = dbenv-&gt;open(dbenv, home, NULL,
+ DB_CREATE | DB_INIT_LOG | DB_INIT_TXN | DB_USE_ENVIRON))
+ != 0) {
+ dbenv-&gt;err(dbenv, ret, "open: %s", home);
+ dbenv-&gt;errx(dbenv,
+ "contact your system administrator: session ID was %d",
+ session_id);
+ return (1);
+}</pre></blockquote>
+<p>For example, if the program was called "my_app", and it tried to open
+an environment home directory in "/tmp/home" and the open call returned
+a permission error, the error messages shown would look like this:
+<p><blockquote><pre>my_app: open: /tmp/home: Permission denied.
+my_app: contact your system administrator: session ID was 2</pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/env/open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/db_config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/env/faq.html b/libdb/docs/ref/env/faq.html
new file mode 100644
index 0000000..a17072b
--- /dev/null
+++ b/libdb/docs/ref/env/faq.html
@@ -0,0 +1,60 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Environment FAQ</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Environment</dl></h3></td>
+<td align=right><a href="../../ref/env/remote.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/cam/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Environment FAQ</h1>
+<p><ol>
+<p><li><b>I'm using multiple processes to access an Berkeley DB database
+environment; is there any way to ensure that two processes don't run
+recovery at the same time, or that all processes have exited the
+database environment so that recovery can be run?</b>
+<p>It's the responsibility of the processes (and/or threads of control)
+using a Berkeley DB environment to ensure recovery is never performed if there
+are other processes running recovery or using an existing database
+environment. It would be great if Berkeley DB could solve this, but it
+requires a way to single-thread execution on a system, and there's
+rarely anything Berkeley DB can use for that purpose -- let alone a portable
+method.
+<p>Most application suites solve this problem by writing a tiny watch
+program that recovers the database environment and then runs the
+processes that actually use the database environment to perform work.
+The watcher program then monitors the working processes, and if any of
+them exit badly for any reason, the watcher kills any remaining
+processes and restarts the cycle.
+<p><li><b>How can I associate application information with a <a href="../../api_c/db_class.html">DB</a>
+or <a href="../../api_c/env_class.html">DB_ENV</a> handle?</b>
+<p>In the C API, the <a href="../../api_c/db_class.html">DB</a> and <a href="../../api_c/env_class.html">DB_ENV</a> structures each contain
+an "app_private" field intended to be used to reference
+application-specific information. See the <a href="../../api_c/db_create.html">db_create</a> and
+<a href="../../api_c/env_create.html">db_env_create</a> documentation for more information.
+<p>In the C++ or Java APIs, the easiest way to associate
+application-specific data with a handle is to subclass the <a href="../../api_cxx/db_class.html">Db</a>
+or <a href="../../api_cxx/env_class.html">DbEnv</a>, for example subclassing <a href="../../api_cxx/db_class.html">Db</a> to get MyDb.
+Objects of type MyDb will still have the Berkeley DB API methods available on
+them, and you can put any extra data or methods you want into the MyDb
+class. If you are using "callback" APIs that take <a href="../../api_cxx/db_class.html">Db</a> or
+<a href="../../api_cxx/env_class.html">DbEnv</a> arguments (for example, <a href="../../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>)
+these will always be called with the <a href="../../api_cxx/db_class.html">Db</a> or <a href="../../api_cxx/env_class.html">DbEnv</a>
+objects you create. So if you always use MyDb objects, you will be able
+to take the first argument to the callback function and cast it to a
+MyDb (in C++, cast it to (MyDb*)). That will allow you to access your
+data members or methods.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/env/remote.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/cam/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/env/intro.html b/libdb/docs/ref/env/intro.html
new file mode 100644
index 0000000..a6e960a
--- /dev/null
+++ b/libdb/docs/ref/env/intro.html
@@ -0,0 +1,118 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Environment</dl></h3></td>
+<td align=right><a href="../../ref/arch/utilities.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/create.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Introduction</h1>
+<p>A Berkeley DB environment is an encapsulation of one or more databases, log
+files and region files. Region files are the shared memory areas that
+contain information about the database environment such as memory pool
+cache pages. Only databases are byte-order independent and only
+database files can be moved between machines of different byte orders.
+Log files can be moved between machines of the same byte order. Region
+files are usually unique to a specific machine and potentially to a
+specific operating system release.
+<p>The simplest way to administer a Berkeley DB application environment is to
+create a single <b>home</b> directory that stores the files for the
+applications that will share the environment. The environment home
+directory must be created before any Berkeley DB applications are run. Berkeley DB
+itself never creates the environment home directory. The environment can
+then be identified by the name of that directory.
+<p>An environment may be shared by any number of processes, as well as by
+any number of threads within those processes. It is possible for an
+environment to include resources from other directories on the system,
+and applications often choose to distribute resources to other
+directories or disks for performance or other reasons. However, by
+default, the databases, shared regions (the locking, logging, memory
+pool, and transaction shared memory areas) and log files will be stored
+in a single directory hierarchy.
+<p>It is important to realize that all applications sharing a database
+environment implicitly trust each other. They have access to each
+other's data as it resides in the shared regions, and they will share
+resources such as buffer space and locks. At the same time, any
+applications using the same databases <b>must</b> share an environment
+if consistency is to be maintained between them.
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Database Environments and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../../api_c/env_close.html">DB_ENV-&gt;close</a></td><td>Close an environment</td></tr>
+<tr><td><a href="../../api_c/env_dbremove.html">DB_ENV-&gt;dbremove</a></td><td>Remove a database</td></tr>
+<tr><td><a href="../../api_c/env_dbrename.html">DB_ENV-&gt;dbrename</a></td><td>Rename a database</td></tr>
+<tr><td><a href="../../api_c/env_err.html">DB_ENV-&gt;err</a></td><td>Error message with error string</td></tr>
+<tr><td><a href="../../api_c/env_err.html">DB_ENV-&gt;errx</a></td><td>Error message</td></tr>
+<tr><td><a href="../../api_c/lock_detect.html">DB_ENV-&gt;lock_detect</a></td><td>Perform deadlock detection</td></tr>
+<tr><td><a href="../../api_c/lock_get.html">DB_ENV-&gt;lock_get</a></td><td>Acquire a lock</td></tr>
+<tr><td><a href="../../api_c/lock_id.html">DB_ENV-&gt;lock_id</a></td><td>Acquire a locker ID</td></tr>
+<tr><td><a href="../../api_c/lock_id_free.html">DB_ENV-&gt;lock_id_free</a></td><td>Release a locker ID</td></tr>
+<tr><td><a href="../../api_c/lock_put.html">DB_ENV-&gt;lock_put</a></td><td>Release a lock</td></tr>
+<tr><td><a href="../../api_c/lock_stat.html">DB_ENV-&gt;lock_stat</a></td><td>Return lock subsystem statistics</td></tr>
+<tr><td><a href="../../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a></td><td>Acquire/release locks</td></tr>
+<tr><td><a href="../../api_c/log_archive.html">DB_ENV-&gt;log_archive</a></td><td>List log and database files</td></tr>
+<tr><td><a href="../../api_c/log_file.html">DB_ENV-&gt;log_file</a></td><td>Map Log Sequence Numbers to log files</td></tr>
+<tr><td><a href="../../api_c/log_flush.html">DB_ENV-&gt;log_flush</a></td><td>Flush log records</td></tr>
+<tr><td><a href="../../api_c/log_put.html">DB_ENV-&gt;log_put</a></td><td>Write a log record</td></tr>
+<tr><td><a href="../../api_c/log_stat.html">DB_ENV-&gt;log_stat</a></td><td>Return log subsystem statistics</td></tr>
+<tr><td><a href="../../api_c/memp_register.html">DB_ENV-&gt;memp_register</a></td><td>Register input/output functions for a file in a memory pool</td></tr>
+<tr><td><a href="../../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a></td><td>Return memory pool statistics</td></tr>
+<tr><td><a href="../../api_c/memp_sync.html">DB_ENV-&gt;memp_sync</a></td><td>Flush pages from a memory pool</td></tr>
+<tr><td><a href="../../api_c/memp_trickle.html">DB_ENV-&gt;memp_trickle</a></td><td>Trickle flush pages from a memory pool</td></tr>
+<tr><td><a href="../../api_c/env_open.html">DB_ENV-&gt;open</a></td><td>Open an environment</td></tr>
+<tr><td><a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a></td><td>Remove an environment</td></tr>
+<tr><td><a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a></td><td>Hold a replication election</td></tr>
+<tr><td><a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a></td><td>Process a replication message</td></tr>
+<tr><td><a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a></td><td>Configure an environment for replication</td></tr>
+<tr><td><a href="../../api_c/rep_stat.html">DB_ENV-&gt;rep_stat</a></td><td>Replication statistics</td></tr>
+<tr><td><a href="../../api_c/env_set_alloc.html">DB_ENV-&gt;set_alloc</a></td><td>Set local space allocation functions</td></tr>
+<tr><td><a href="../../api_c/env_set_app_dispatch.html">DB_ENV-&gt;set_app_dispatch</a></td><td>Configure application recovery interface</td></tr>
+<tr><td><a href="../../api_c/env_set_cachesize.html">DB_ENV-&gt;set_cachesize</a></td><td>Set the environment cache size</td></tr>
+<tr><td><a href="../../api_c/env_set_data_dir.html">DB_ENV-&gt;set_data_dir</a></td><td>Set the environment data directory</td></tr>
+<tr><td><a href="../../api_c/env_set_encrypt.html">DB_ENV-&gt;set_encrypt</a></td><td>Set the environment cryptographic key</td></tr>
+<tr><td><a href="../../api_c/env_set_errcall.html">DB_ENV-&gt;set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><a href="../../api_c/env_set_errfile.html">DB_ENV-&gt;set_errfile</a></td><td>Set error message FILE</td></tr>
+<tr><td><a href="../../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a></td><td>Set error message output stream</td></tr>
+<tr><td><a href="../../api_c/env_set_errpfx.html">DB_ENV-&gt;set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><a href="../../api_c/env_set_feedback.html">DB_ENV-&gt;set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a></td><td>Environment configuration</td></tr>
+<tr><td><a href="../../api_c/env_set_lg_bsize.html">DB_ENV-&gt;set_lg_bsize</a></td><td>Set log buffer size</td></tr>
+<tr><td><a href="../../api_c/env_set_lg_dir.html">DB_ENV-&gt;set_lg_dir</a></td><td>Set the environment logging directory</td></tr>
+<tr><td><a href="../../api_c/env_set_lg_max.html">DB_ENV-&gt;set_lg_max</a></td><td>Set log file size</td></tr>
+<tr><td><a href="../../api_c/env_set_lg_regionmax.html">DB_ENV-&gt;set_lg_regionmax</a></td><td>Set logging region size</td></tr>
+<tr><td><a href="../../api_c/env_set_lk_conflicts.html">DB_ENV-&gt;set_lk_conflicts</a></td><td>Set lock conflicts matrix</td></tr>
+<tr><td><a href="../../api_c/env_set_lk_detect.html">DB_ENV-&gt;set_lk_detect</a></td><td>Set automatic deadlock detection</td></tr>
+<tr><td><a href="../../api_c/env_set_lk_max_lockers.html">DB_ENV-&gt;set_lk_max_lockers</a></td><td>Set maximum number of lockers</td></tr>
+<tr><td><a href="../../api_c/env_set_lk_max_locks.html">DB_ENV-&gt;set_lk_max_locks</a></td><td>Set maximum number of locks</td></tr>
+<tr><td><a href="../../api_c/env_set_lk_max_objects.html">DB_ENV-&gt;set_lk_max_objects</a></td><td>Set maximum number of lock objects</td></tr>
+<tr><td><a href="../../api_c/env_set_mp_mmapsize.html">DB_ENV-&gt;set_mp_mmapsize</a></td><td>Set maximum mapped-in database file size</td></tr>
+<tr><td><a href="../../api_c/env_set_paniccall.html">DB_ENV-&gt;set_paniccall</a></td><td>Set panic callback</td></tr>
+<tr><td><a href="../../api_c/rep_limit.html">DB_ENV-&gt;set_rep_limit</a></td><td>Limit data sent in response to a single message</td></tr>
+<tr><td><a href="../../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a></td><td>Configure replication transport</td></tr>
+<tr><td><a href="../../api_c/env_set_rpc_server.html">DB_ENV-&gt;set_rpc_server</a></td><td>Establish an RPC server connection</td></tr>
+<tr><td><a href="../../api_c/env_set_shm_key.html">DB_ENV-&gt;set_shm_key</a></td><td>Set system memory shared segment ID</td></tr>
+<tr><td><a href="../../api_c/env_set_tas_spins.html">DB_ENV-&gt;set_tas_spins</a></td><td>Set the number of test-and-set spins</td></tr>
+<tr><td><a href="../../api_c/env_set_timeout.html">DB_ENV-&gt;set_timeout</a></td><td>Set lock and transaction timeout</td></tr>
+<tr><td><a href="../../api_c/env_set_tmp_dir.html">DB_ENV-&gt;set_tmp_dir</a></td><td>Set the environment temporary file directory</td></tr>
+<tr><td><a href="../../api_c/env_set_tx_max.html">DB_ENV-&gt;set_tx_max</a></td><td>Set maximum number of transactions</td></tr>
+<tr><td><a href="../../api_c/env_set_tx_timestamp.html">DB_ENV-&gt;set_tx_timestamp</a></td><td>Set recovery timestamp</td></tr>
+<tr><td><a href="../../api_c/env_set_verbose.html">DB_ENV-&gt;set_verbose</a></td><td>Set verbose messages</td></tr>
+<tr><td><a href="../../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a></td><td>Begin a transaction</td></tr>
+<tr><td><a href="../../api_c/txn_checkpoint.html">DB_ENV-&gt;txn_checkpoint</a></td><td>Checkpoint the transaction subsystem</td></tr>
+<tr><td><a href="../../api_c/txn_recover.html">DB_ENV-&gt;txn_recover</a></td><td>Distributed transaction recovery</td></tr>
+<tr><td><a href="../../api_c/txn_stat.html">DB_ENV-&gt;txn_stat</a></td><td>Return transaction subsystem statistics</td></tr>
+</table>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/arch/utilities.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/create.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/env/naming.html b/libdb/docs/ref/env/naming.html
new file mode 100644
index 0000000..0086600
--- /dev/null
+++ b/libdb/docs/ref/env/naming.html
@@ -0,0 +1,128 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: File naming</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Environment</dl></h3></td>
+<td align=right><a href="../../ref/env/db_config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/region.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>File naming</h1>
+<p>One of the most important tasks of the database environment is to
+structure file naming within Berkeley DB. Cooperating applications (or
+multiple invocations of the same application) must agree on the location
+of the database environment, log files and other files used by the Berkeley DB
+subsystems, and, of course, the database files. Although it is possible
+to specify full pathnames to all Berkeley DB functions, this is
+cumbersome and requires that applications be recompiled when database
+files are moved.
+<p>Applications are normally expected to specify a single directory home
+for the database environment. This can be done easily in the call to
+<a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> by specifying a value for the <b>db_home</b>
+argument. There are more complex configurations in which it may be
+desirable to override <b>db_home</b> or provide supplementary path
+information.
+<h3>Specifying file naming to Berkeley DB</h3>
+<p>The following list describes the possible ways in which file naming
+information may be specified to the Berkeley DB library. The specific
+circumstances and order in which these ways are applied are described
+in a subsequent paragraph.
+<p><dl compact>
+<p><dt><a name="db_home">db_home</a><dd>If the <b>db_home</b> argument to <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> is non-NULL,
+its value may be used as the database home, and files named relative to
+its path.
+<p><dt><a name="DB_HOME">DB_HOME</a><dd>If the DB_HOME environment variable is set when <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> is
+called, its value may be used as the database home, and files named
+relative to its path.
+<p>The DB_HOME environment variable is intended to permit users and system
+administrators to override application and installation defaults. For
+example::
+<p><blockquote><pre>env DB_HOME=/database/my_home application</pre></blockquote>
+<p>Application writers are encouraged to support the <b>-h</b> option
+found in the supporting Berkeley DB utilities to let users specify a database
+home.
+<p><dt><a href="../../api_c/env_class.html">DB_ENV</a> methods<dd>There are three <a href="../../api_c/env_class.html">DB_ENV</a> methods that affect file naming. The
+<a href="../../api_c/env_set_data_dir.html">DB_ENV-&gt;set_data_dir</a> method specifies a directory to search for database
+files. The <a href="../../api_c/env_set_lg_dir.html">DB_ENV-&gt;set_lg_dir</a> method specifies a directory in which to
+create logging files. The <a href="../../api_c/env_set_tmp_dir.html">DB_ENV-&gt;set_tmp_dir</a> method specifies a
+directory in which to create backing temporary files. These methods
+are intended to permit applications to customize a file location for a
+database. For example, an application writer can place data files and
+log files in different directories or instantiate a new log directory
+each time the application runs.
+<p><dt><a href="../../ref/env/db_config.html#DB_CONFIG">DB_CONFIG</a><dd>The same information specified to the <a href="../../api_c/env_class.html">DB_ENV</a> methods may also be
+specified using the <a href="../../ref/env/db_config.html#DB_CONFIG">DB_CONFIG</a> configuration file.
+</dl>
+<h3>Filename resolution in Berkeley DB</h3>
+<p>The following list describes the specific circumstances and order in
+which the different ways of specifying file naming information are
+applied. Berkeley DB filename processing proceeds sequentially through the
+following steps:
+<p><dl compact>
+<p><dt>absolute pathnames<dd>If the filename specified to a Berkeley DB function is an <i>absolute
+pathname</i>, that filename is used without modification by Berkeley DB.
+<p>On UNIX systems, an absolute pathname is defined as any pathname that
+begins with a leading slash (<b>/</b>).
+<p>On Windows systems, an absolute pathname is any pathname that begins with
+a leading slash or leading backslash (<b>\</b>); or any
+pathname beginning with a single alphabetic character, a colon and a
+leading slash or backslash (for example, <b>C:/tmp</b>).
+<p><dt><a href="../../api_c/env_class.html">DB_ENV</a> methods, DB_CONFIG<dd>If a relevant configuration string (for example, set_data_dir), is
+specified either by calling a <a href="../../api_c/env_class.html">DB_ENV</a> method or as a line in the
+DB_CONFIG configuration file, the value is prepended to the filename.
+If the resulting filename is an absolute pathname, the filename is used
+without further modification by Berkeley DB.
+<p><dt>db_home<dd>If the application specified a non-NULL <b>db_home</b> argument to
+<a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>, its value is prepended to the filename. If the
+resulting filename is an absolute pathname, the filename is used without
+further modification by Berkeley DB.
+<p><dt>DB_HOME<dd>If the <b>db_home</b> argument is NULL, the DB_HOME environment variable
+was set, and the application has set the appropriate DB_USE_ENVIRON or
+DB_USE_ENVIRON_ROOT environment variable, its value is prepended to the
+filename. If the resulting filename is an absolute pathname, the file
+name is used without further modification by Berkeley DB.
+<p><dt>default<dd>Finally, all filenames are interpreted relative to the current working
+directory of the process.
+</dl>
+<p>The common model for a Berkeley DB environment is one in which only the DB_HOME
+environment variable, or the <b>db_home</b> argument is specified. In
+this case, all data filenames are relative to that directory, and all
+files created by the Berkeley DB subsystems will be created in that directory.
+<p>The more complex model for a transaction environment might be one in
+which a database home is specified, using either the DB_HOME environment
+variable or the <b>db_home</b> argument to <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>; and then
+the data directory and logging directory are set to the relative
+pathnames of directories underneath the environment home.
+<h3>Examples</h3>
+<p>Store all files in the directory <b>/a/database</b>:
+<p><blockquote><pre>dbenv-&gt;open(dbenv, "/a/database", flags, mode);</pre></blockquote>
+<p>Create temporary backing files in <b>/b/temporary</b>, and all other files
+in <b>/a/database</b>:
+<p><blockquote><pre>dbenv-&gt;set_tmp_dir(dbenv, "/b/temporary");
+dbenv-&gt;open(dbenv, "/a/database", flags, mode);</pre></blockquote>
+<p>Store data files in <b>/a/database/datadir</b>, log files in
+<b>/a/database/logdir</b>, and all other files in the directory
+<b>/a/database</b>:
+<p><blockquote><pre>dbenv-&gt;set_lg_dir(dbenv, "logdir");
+dbenv-&gt;set_data_dir(dbenv, "datadir");
+dbenv-&gt;open(dbenv, "/a/database", flags, mode);</pre></blockquote>
+<p>Store data files in <b>/a/database/data1</b> and <b>/b/data2</b>, and
+all other files in the directory <b>/a/database</b>. Any data files
+that are created will be created in <b>/b/data2</b>, because it is
+the first data file directory specified:
+<p><blockquote><pre>dbenv-&gt;set_data_dir(dbenv, "/b/data2");
+dbenv-&gt;set_data_dir(dbenv, "data1");
+dbenv-&gt;open(dbenv, "/a/database", flags, mode);</pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/env/db_config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/region.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/env/open.html b/libdb/docs/ref/env/open.html
new file mode 100644
index 0000000..4e3fd43
--- /dev/null
+++ b/libdb/docs/ref/env/open.html
@@ -0,0 +1,66 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Opening databases within the environment</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Environment</dl></h3></td>
+<td align=right><a href="../../ref/env/create.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/error.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Opening databases within the environment</h1>
+<p>Once the environment has been created, database handles may be created
+and then opened within the environment. This is done by calling the
+<a href="../../api_c/db_create.html">db_create</a> interface and specifying the appropriate environment
+as an argument.
+<p>File naming, database operations, and error handling will all be done as
+specified for the environment. For example, if the <a href="../../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a>
+or <a href="../../api_c/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a> flags were specified when the environment was
+created or joined, database operations will automatically perform all
+necessary locking operations for the application.
+<p>The following is a simple example of opening two databases within a
+database environment:
+<p><blockquote><pre> DB_ENV *dbenv;
+ DB *dbp1, *dbp2;
+ int ret;
+</pre></blockquote>
+<p><blockquote><pre> /* Open an environment with just a memory pool. */
+ if ((ret =
+ dbenv-&gt;open(dbenv, home, DB_CREATE | DB_INIT_MPOOL, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "environment open: %s", home);
+ return (ret);
+ }
+<p>
+ /* Open database #1. */
+ if ((ret = db_create(&dbp1, dbenv, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "database create");
+ return (ret);
+ }
+ if ((ret = dbp1-&gt;open(dbp1,
+ NULL, DATABASE1, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "DB-&gt;open: %s", DATABASE1);
+ return (ret);
+ }
+<p>
+ /* Open database #2. */
+ if ((ret = db_create(&dbp2, dbenv, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "database create");
+ return (ret);
+ }
+ if ((ret = dbp2-&gt;open(dbp2,
+ NULL, DATABASE2, NULL, DB_HASH, DB_CREATE, 0664)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "DB-&gt;open: %s", DATABASE2);
+ return (ret);
+ }
+</pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/env/create.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/error.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/env/region.html b/libdb/docs/ref/env/region.html
new file mode 100644
index 0000000..9cda3fc
--- /dev/null
+++ b/libdb/docs/ref/env/region.html
@@ -0,0 +1,73 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Shared memory regions</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Environment</dl></h3></td>
+<td align=right><a href="../../ref/env/naming.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/security.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Shared memory regions</h1>
+<p>Each of the Berkeley DB subsystems within an environment is described by one or
+more regions. The regions contain all of the per-process and per-thread
+shared information, including mutexes, that comprise a Berkeley DB environment.
+These regions are created in one of three areas, depending on the flags
+specified to the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> function:
+<p><ol>
+<p><li>If the <a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flag is specified to <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>,
+regions are created in per-process heap memory; that is, memory returned
+by <b>malloc</b>(3). In this case, the Berkeley DB environment may only
+be accessed by a single process, although that process may be
+multithreaded.
+<p><li>If the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag is specified to <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>,
+regions are created in system memory. When regions are created in system
+memory, the Berkeley DB environment may be accessed by both multiple processes
+and multiple threads within processes.
+<p>The system memory used by Berkeley DB is potentially useful past the lifetime
+of any particular process. Therefore, additional cleanup may be
+necessary after an application fails because there may be no way for
+Berkeley DB to ensure that system resources backing the shared memory regions
+are returned to the system.
+<p>The system memory that is used is architecture-dependent. For example,
+on systems supporting X/Open-style shared memory interfaces, such as
+UNIX systems, the <b>shmget</b>(2) and related System V IPC
+interfaces are used. Additionally, VxWorks systems use system memory.
+In these cases, an initial segment ID must be specified by the
+application to ensure that applications do not overwrite each other's
+database environments, so that the number of segments created does not
+grow without bounds. See the <a href="../../api_c/env_set_shm_key.html">DB_ENV-&gt;set_shm_key</a> method for more
+information.
+<p>On Windows platforms, the use of the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> flag is
+problematic because the operating system uses reference counting to
+clean up shared objects in the paging file automatically. See
+<a href="../../ref/build_win/notes.html">Windows Notes</a> for more
+information.
+<p><li>If no memory-related flags are specified to <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>, memory
+backed by the filesystem is used to store the regions. On UNIX systems,
+the Berkeley DB library will use the POSIX mmap interface. If mmap is not
+available, the UNIX shmget interfaces will be used, assuming they are
+available.
+</ol>
+<a name="2"><!--meow--></a>
+<p>Any files created in the filesystem to back the regions are created in
+the environment home directory specified to the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> call.
+These files are named __db.### (for example, __db.001, __db.002 and
+so on). When region files are backed by the filesystem, one file per
+region is created. When region files are backed by system memory, a
+single file will still be created because there must be a well-known
+name in the filesystem so that multiple processes can locate the system
+shared memory that is being used by the environment.
+<p>Statistics about the shared memory regions in the environment can be
+displayed using the <b>-e</b> option to the <a href="../../utility/db_stat.html">db_stat</a> utility.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/env/naming.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/security.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/env/remote.html b/libdb/docs/ref/env/remote.html
new file mode 100644
index 0000000..0390fdc
--- /dev/null
+++ b/libdb/docs/ref/env/remote.html
@@ -0,0 +1,50 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Remote filesystems</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Environment</dl></h3></td>
+<td align=right><a href="../../ref/env/encrypt.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Remote filesystems</h1>
+<p>When regions are backed by the filesystem, it is a common error to attempt
+to create Berkeley DB environments backed by remote filesystems such as the
+Network File System (NFS) or the Andrew File System (AFS). Remote
+filesystems rarely support mapping files into process memory, and even
+more rarely support correct semantics for mutexes after the attempt
+succeeds. For this reason, we strongly recommend that the database
+environment directory reside in a local filesystem.
+<p>For remote filesystems that do allow system files to be mapped into
+process memory, home directories accessed via remote filesystems cannot
+be used simultaneously from multiple clients. None of the commercial
+remote filesystems available today implement coherent, distributed
+shared memory for remote-mounted files. As a result, different machines
+will see different versions of these shared regions, and the system
+behavior is undefined.
+<p>Databases, log files, and temporary files may be placed on remote
+filesystems, <b>as long as the remote filesystem fully supports
+standard POSIX filesystem semantics</b> (although the application may
+incur a performance penalty for doing so). Obviously, NFS-mounted
+databases cannot be accessed from more than one Berkeley DB environment at a
+time (and therefore from more than one system), because no Berkeley DB
+database may be accessed from more than one Berkeley DB environment at a
+time.
+<p><dl compact>
+<p><dt>Linux note:<dd>Some Linux releases are known to not support complete semantics for the
+POSIX fsync call on NFS-mounted filesystems. No Berkeley DB files should be
+placed on NFS-mounted filesystems on these systems.
+</dl>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/env/encrypt.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/env/security.html b/libdb/docs/ref/env/security.html
new file mode 100644
index 0000000..26632e2
--- /dev/null
+++ b/libdb/docs/ref/env/security.html
@@ -0,0 +1,56 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Security</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Environment</dl></h3></td>
+<td align=right><a href="../../ref/env/region.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/encrypt.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Security</h1>
+<p>The following are security issues that should be considered when writing
+Berkeley DB applications:
+<p><dl compact>
+<p><dt>Database environment permissions<dd>The directory used as the Berkeley DB database environment should have its
+permissions set to ensure that files in the environment are not accessible
+to users without appropriate permissions. Applications that add to the
+user's permissions (for example, UNIX setuid or setgid applications),
+must be carefully checked to not permit illegal use of those permissions
+such as general file access in the environment directory.
+<p><dt>Environment variables<dd>Setting the <a href="../../api_c/env_open.html#DB_USE_ENVIRON">DB_USE_ENVIRON</a> and <a href="../../api_c/env_open.html#DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a> flags
+and allowing the use of environment variables during file naming can be
+dangerous. Setting those flags in Berkeley DB applications with additional
+permissions (for example, UNIX setuid or setgid applications) could
+potentially allow users to read and write databases to which they would
+not normally have access.
+<p><dt>File permissions<dd>By default, Berkeley DB always creates files readable and writable by the owner
+and the group (that is, S_IRUSR, S_IWUSR, S_IRGRP and S_IWGRP; or octal mode
+0660 on historic UNIX systems). The group ownership of created files is
+based on the system and directory defaults, and is not further specified
+by Berkeley DB.
+<p><dt>Temporary backing files<dd>If an unnamed database is created and the cache is too small to hold
+the database in memory, Berkeley DB will create a temporary physical file to
+enable it to page the database to disk as needed. In this case,
+environment variables such as <b>TMPDIR</b> may be used to specify
+the location of that temporary file. Although temporary backing files
+are created readable and writable by the owner only (S_IRUSR and
+S_IWUSR, or octal mode 0600 on historic UNIX systems), some filesystems
+may not sufficiently protect temporary files created in random
+directories from improper access. To be absolutely safe, applications
+storing sensitive data in unnamed databases should use the
+<a href="../../api_c/env_set_tmp_dir.html">DB_ENV-&gt;set_tmp_dir</a> method to specify a temporary directory with
+known permissions.
+</dl>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/env/region.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/env/encrypt.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/install/file.html b/libdb/docs/ref/install/file.html
new file mode 100644
index 0000000..8149de3
--- /dev/null
+++ b/libdb/docs/ref/install/file.html
@@ -0,0 +1,38 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: File utility /etc/magic information</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>System Installation Notes</dl></h3></td>
+<td align=right><a href="../../ref/dumpload/text.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/install/multiple.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>File utility /etc/magic information</h1>
+<p>The <b>file</b>(1) utility is a UNIX utility that examines and
+classifies files, based on information found in its database of file
+types, the /etc/magic file. The following information may be added
+to your system's /etc/magic file to enable <b>file</b>(1) to
+correctly identify Berkeley DB database files.
+<p>The <b>file</b>(1) utility <b>magic</b>(5) information for the
+standard System V UNIX implementation of the <b>file</b>(1) utility
+is included in the Berkeley DB distribution for both
+<a href="magic.s5.be.txt">big-endian</a> (for example, Sparc) and
+<a href="magic.s5.le.txt">little-endian</a> (for example, x86) architectures.
+<p>The <b>file</b>(1) utility <b>magic</b>(5) information for
+Release 3.X of Ian Darwin's implementation of the file utility (as
+distributed by FreeBSD and most Linux distributions) is included in the
+Berkeley DB distribution. This <a href="magic.txt">magic.txt</a> information
+is correct for both big-endian and little-endian architectures.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/dumpload/text.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/install/multiple.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/install/magic.s5.be.txt b/libdb/docs/ref/install/magic.s5.be.txt
new file mode 100644
index 0000000..2802ca9
--- /dev/null
+++ b/libdb/docs/ref/install/magic.s5.be.txt
@@ -0,0 +1,87 @@
+# Berkeley DB
+# $Id$
+#
+# System V /etc/magic files: big-endian version.
+#
+# Hash 1.85/1.86 databases store metadata in network byte order.
+# Btree 1.85/1.86 databases store the metadata in host byte order.
+# Hash and Btree 2.X and later databases store the metadata in host byte order.
+
+0 long 0x00053162 Berkeley DB 1.85/1.86 (Btree,
+>4 long 0x00000002 version 2,
+>4 long 0x00000003 version 3,
+>0 long 0x00053162 native byte-order)
+
+0 long 0x62310500 Berkeley DB 1.85/1.86 (Btree,
+>4 long 0x02000000 version 2,
+>4 long 0x03000000 version 3,
+>0 long 0x62310500 little-endian)
+
+12 long 0x00053162 Berkeley DB (Btree,
+>16 long 0x00000004 version 4,
+>16 long 0x00000005 version 5,
+>16 long 0x00000006 version 6,
+>16 long 0x00000007 version 7,
+>16 long 0x00000008 version 8,
+>16 long 0x00000009 version 9,
+>12 long 0x00053162 native byte-order)
+
+12 long 0x62310500 Berkeley DB (Btree,
+>16 long 0x04000000 version 4,
+>16 long 0x05000000 version 5,
+>16 long 0x06000000 version 6,
+>16 long 0x07000000 version 7,
+>16 long 0x08000000 version 8,
+>16 long 0x09000000 version 9,
+>12 long 0x62310500 little-endian)
+
+0 long 0x00061561 Berkeley DB
+>4 long >2 1.86
+>4 long <3 1.85
+>0 long 0x00061561 (Hash,
+>4 long 2 version 2,
+>4 long 3 version 3,
+>8 long 0x000004D2 little-endian)
+>8 long 0x000010E1 native byte-order)
+
+12 long 0x00061561 Berkeley DB (Hash,
+>16 long 0x00000004 version 4,
+>16 long 0x00000005 version 5,
+>16 long 0x00000006 version 6,
+>16 long 0x00000007 version 7,
+>16 long 0x00000008 version 8,
+>16 long 0x00000009 version 9,
+>12 long 0x00061561 native byte-order)
+
+12 long 0x61150600 Berkeley DB (Hash,
+>16 long 0x04000000 version 4,
+>16 long 0x05000000 version 5,
+>16 long 0x06000000 version 6,
+>16 long 0x07000000 version 7,
+>16 long 0x08000000 version 8,
+>16 long 0x09000000 version 9,
+>12 long 0x61150600 little-endian)
+
+12 long 0x00042253 Berkeley DB (Queue,
+>16 long 0x00000001 version 1,
+>16 long 0x00000002 version 2,
+>16 long 0x00000003 version 3,
+>16 long 0x00000004 version 4,
+>16 long 0x00000005 version 5,
+>16 long 0x00000006 version 6,
+>16 long 0x00000007 version 7,
+>16 long 0x00000008 version 8,
+>16 long 0x00000009 version 9,
+>12 long 0x00042253 native byte-order)
+
+12 long 0x53220400 Berkeley DB (Queue,
+>16 long 0x01000000 version 1,
+>16 long 0x02000000 version 2,
+>16 long 0x03000000 version 3,
+>16 long 0x04000000 version 4,
+>16 long 0x05000000 version 5,
+>16 long 0x06000000 version 6,
+>16 long 0x07000000 version 7,
+>16 long 0x08000000 version 8,
+>16 long 0x09000000 version 9,
+>12 long 0x53220400 little-endian)
diff --git a/libdb/docs/ref/install/magic.s5.le.txt b/libdb/docs/ref/install/magic.s5.le.txt
new file mode 100644
index 0000000..0216142
--- /dev/null
+++ b/libdb/docs/ref/install/magic.s5.le.txt
@@ -0,0 +1,87 @@
+# Berkeley DB
+# $Id$
+#
+# System V /etc/magic files: little-endian version.
+#
+# Hash 1.85/1.86 databases store metadata in network byte order.
+# Btree 1.85/1.86 databases store the metadata in host byte order.
+# Hash and Btree 2.X and later databases store the metadata in host byte order.
+
+0 long 0x00053162 Berkeley DB 1.85/1.86 (Btree,
+>4 long 0x00000002 version 2,
+>4 long 0x00000003 version 3,
+>0 long 0x00053162 native byte-order)
+
+0 long 0x62310500 Berkeley DB 1.85/1.86 (Btree,
+>4 long 0x02000000 version 2,
+>4 long 0x03000000 version 3,
+>0 long 0x62310500 big-endian)
+
+12 long 0x00053162 Berkeley DB (Btree,
+>16 long 0x00000004 version 4,
+>16 long 0x00000005 version 5,
+>16 long 0x00000006 version 6,
+>16 long 0x00000007 version 7,
+>16 long 0x00000008 version 8,
+>16 long 0x00000009 version 9,
+>12 long 0x00053162 native byte-order)
+
+12 long 0x62310500 Berkeley DB (Btree,
+>16 long 0x04000000 version 4,
+>16 long 0x05000000 version 5,
+>16 long 0x06000000 version 6,
+>16 long 0x07000000 version 7,
+>16 long 0x08000000 version 8,
+>16 long 0x09000000 version 9,
+>12 long 0x62310500 big-endian)
+
+0 long 0x61150600 Berkeley DB
+>4 long >0x02000000 1.86
+>4 long <0x03000000 1.85
+>0 long 0x00061561 (Hash,
+>4 long 0x02000000 version 2,
+>4 long 0x03000000 version 3,
+>8 long 0xD2040000 native byte-order)
+>8 long 0xE1100000 big-endian)
+
+12 long 0x00061561 Berkeley DB (Hash,
+>16 long 0x00000004 version 4,
+>16 long 0x00000005 version 5,
+>16 long 0x00000006 version 6,
+>16 long 0x00000007 version 7,
+>16 long 0x00000008 version 8,
+>16 long 0x00000009 version 9,
+>12 long 0x00061561 native byte-order)
+
+12 long 0x61150600 Berkeley DB (Hash,
+>16 long 0x04000000 version 4,
+>16 long 0x05000000 version 5,
+>16 long 0x06000000 version 6,
+>16 long 0x07000000 version 7,
+>16 long 0x08000000 version 8,
+>16 long 0x09000000 version 9,
+>12 long 0x61150600 big-endian)
+
+12 long 0x00042253 Berkeley DB (Queue,
+>16 long 0x00000001 version 1,
+>16 long 0x00000002 version 2,
+>16 long 0x00000003 version 3,
+>16 long 0x00000004 version 4,
+>16 long 0x00000005 version 5,
+>16 long 0x00000006 version 6,
+>16 long 0x00000007 version 7,
+>16 long 0x00000008 version 8,
+>16 long 0x00000009 version 9,
+>12 long 0x00042253 native byte-order)
+
+12 long 0x53220400 Berkeley DB (Queue,
+>16 long 0x01000000 version 1,
+>16 long 0x02000000 version 2,
+>16 long 0x03000000 version 3,
+>16 long 0x04000000 version 4,
+>16 long 0x05000000 version 5,
+>16 long 0x06000000 version 6,
+>16 long 0x07000000 version 7,
+>16 long 0x08000000 version 8,
+>16 long 0x09000000 version 9,
+>12 long 0x53220400 big-endian)
diff --git a/libdb/docs/ref/install/magic.txt b/libdb/docs/ref/install/magic.txt
new file mode 100644
index 0000000..07c1a7e
--- /dev/null
+++ b/libdb/docs/ref/install/magic.txt
@@ -0,0 +1,56 @@
+# Berkeley DB
+# $Id$
+#
+# Ian Darwin's file /etc/magic files: big/little-endian version.
+#
+# Hash 1.85/1.86 databases store metadata in network byte order.
+# Btree 1.85/1.86 databases store the metadata in host byte order.
+# Hash and Btree 2.X and later databases store the metadata in host byte order.
+
+0 long 0x00061561 Berkeley DB
+>8 belong 4321
+>>4 belong >2 1.86
+>>4 belong <3 1.85
+>>4 belong >0 (Hash, version %d, native byte-order)
+>8 belong 1234
+>>4 belong >2 1.86
+>>4 belong <3 1.85
+>>4 belong >0 (Hash, version %d, little-endian)
+
+0 belong 0x00061561 Berkeley DB
+>8 belong 4321
+>>4 belong >2 1.86
+>>4 belong <3 1.85
+>>4 belong >0 (Hash, version %d, big-endian)
+>8 belong 1234
+>>4 belong >2 1.86
+>>4 belong <3 1.85
+>>4 belong >0 (Hash, version %d, native byte-order)
+
+0 long 0x00053162 Berkeley DB 1.85/1.86
+>4 long >0 (Btree, version %d, native byte-order)
+0 belong 0x00053162 Berkeley DB 1.85/1.86
+>4 belong >0 (Btree, version %d, big-endian)
+0 lelong 0x00053162 Berkeley DB 1.85/1.86
+>4 lelong >0 (Btree, version %d, little-endian)
+
+12 long 0x00061561 Berkeley DB
+>16 long >0 (Hash, version %d, native byte-order)
+12 belong 0x00061561 Berkeley DB
+>16 belong >0 (Hash, version %d, big-endian)
+12 lelong 0x00061561 Berkeley DB
+>16 lelong >0 (Hash, version %d, little-endian)
+
+12 long 0x00053162 Berkeley DB
+>16 long >0 (Btree, version %d, native byte-order)
+12 belong 0x00053162 Berkeley DB
+>16 belong >0 (Btree, version %d, big-endian)
+12 lelong 0x00053162 Berkeley DB
+>16 lelong >0 (Btree, version %d, little-endian)
+
+12 long 0x00042253 Berkeley DB
+>16 long >0 (Queue, version %d, native byte-order)
+12 belong 0x00042253 Berkeley DB
+>16 belong >0 (Queue, version %d, big-endian)
+12 lelong 0x00042253 Berkeley DB
+>16 lelong >0 (Queue, version %d, little-endian)
diff --git a/libdb/docs/ref/install/multiple.html b/libdb/docs/ref/install/multiple.html
new file mode 100644
index 0000000..5b74c31
--- /dev/null
+++ b/libdb/docs/ref/install/multiple.html
@@ -0,0 +1,55 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Building with multiple versions of Berkeley DB</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>System Installation Notes</dl></h3></td>
+<td align=right><a href="../../ref/install/file.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/install/rpm.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Building with multiple versions of Berkeley DB</h1>
+<p>In some cases it may be necessary to build applications which include
+multiple versions of Berkeley DB. Examples include applications which include
+software from other vendors, or applications running on a system where
+the system C library itself uses Berkeley DB. In such cases, the two versions
+of Berkeley DB may be incompatible, that is, they may have different external
+and internal interfaces, and may even have different underlying database
+formats.
+<p>To create a Berkeley DB library whose symbols won't collide with other Berkeley DB
+libraries (or other application or library modules, for that matter),
+configure Berkeley DB using the <a href="../../ref/build_unix/conf.html#--with-uniquename=NAME">--with-uniquename=NAME</a> configuration option,
+and then build Berkeley DB as usual. (Note that
+<a href="../../ref/build_unix/conf.html#--with-uniquename=NAME">--with-uniquename</a> only affects the Berkeley DB C language library build;
+loading multiple versions of the C++ or Java APIs will require
+additional work.) The modified symbol names are hidden from the
+application in the Berkeley DB header files, that is, there is no need for
+the application to be aware that it is using a special library build as
+long as it includes the appropriate Berkeley DB header file.
+<p>If "NAME" is not specified when configuring with
+<a href="../../ref/build_unix/conf.html#--with-uniquename=NAME">--with-uniquename</a>, a default value built from the major and minor
+numbers of the Berkeley DB release will be used. It is rarely necessary to
+specify NAME; using the major and minor release numbers will ensure that
+only one copy of the library will be loaded into the application unless
+two distinct versions really are necessary.
+<p>When distributing any library software that uses Berkeley DB, or any software
+which will be recompiled by users for their systems, we recommend two
+things: First, include the Berkeley DB release as part of your release. This
+will insulate your software from potential Berkeley DB API changes as well as
+simplifying your coding because you will only have to code to a single
+version of the Berkeley DB API instead of adapting at compile time to whatever
+version of Berkeley DB happens to be installed on the target system. Second,
+use <a href="../../ref/build_unix/conf.html#--with-uniquename=NAME">--with-uniquename</a> when configuring Berkeley DB, because that will insure that
+you do not unexpectedly collide with other application code or a library
+already installed on the target system.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/install/file.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/install/rpm.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/install/rpm.html b/libdb/docs/ref/install/rpm.html
new file mode 100644
index 0000000..d359e29
--- /dev/null
+++ b/libdb/docs/ref/install/rpm.html
@@ -0,0 +1,29 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Building RPM distribution packages</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>System Installation Notes</dl></h3></td>
+<td align=right><a href="../../ref/install/multiple.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/debug/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Building RPM distribution packages</h1>
+<p>If you would like to distribute your configuration and build of Berkeley DB
+as an RPM software package, first configure Berkeley DB using the
+<a href="../../ref/build_unix/conf.html#--with-rpm">--with-rpm</a>
+configuration option, and then build Berkeley DB as follows:
+<p><blockquote><pre>../dist/configure --with-rpm=DIR
+make</pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/install/multiple.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/debug/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/intro/data.html b/libdb/docs/ref/intro/data.html
new file mode 100644
index 0000000..13dc52d
--- /dev/null
+++ b/libdb/docs/ref/intro/data.html
@@ -0,0 +1,55 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: An introduction to data management</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Introduction</dl></h3></td>
+<td align=right><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/terrain.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>An introduction to data management</h1>
+<p>Cheap, powerful computing and networking have created countless new
+applications that could not have existed a decade ago. The advent of
+the World-Wide Web, and its influence in driving the Internet into homes
+and businesses, is one obvious example. Equally important, though, is
+the from large, general-purpose desktop and server computers
+toward smaller, special-purpose devices with built-in processing and
+communications services.
+<p>As computer hardware has spread into virtually every corner of our
+lives, of course, software has followed. Software developers today are
+building applications not just for conventional desktop and server
+environments, but also for handheld computers, home appliances,
+networking hardware, cars and trucks, factory floor automation systems,
+and more.
+<p>While these operating environments are diverse, the problems that
+software engineers must solve in them are often strikingly similar. Most
+systems must deal with the outside world, whether that means
+communicating with users or controlling machinery. As a result, most
+need some sort of I/O system. Even a simple, single-function system
+generally needs to handle multiple tasks, and so needs some kind of
+operating system to schedule and manage control threads. Also, many
+computer systems must store and retrieve data to track history, record
+configuration settings, or manage access.
+<p>Data management can be very simple. In some cases, just recording
+configuration in a flat text file is enough. More often, though,
+programs need to store and search a large amount of data, or
+structurally complex data. Database management systems are tools that
+programmers can use to do this work quickly and efficiently using
+off-the-shelf software.
+<p>Of course, database management systems have been around for a long time.
+Data storage is a problem dating back to the earliest days of computing.
+Software developers can choose from hundreds of good,
+commercially-available database systems. The problem is selecting the
+one that best solves the problems that their applications face.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/terrain.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/intro/dbis.html b/libdb/docs/ref/intro/dbis.html
new file mode 100644
index 0000000..07cea7f
--- /dev/null
+++ b/libdb/docs/ref/intro/dbis.html
@@ -0,0 +1,160 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: What is Berkeley DB?</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Introduction</dl></h3></td>
+<td align=right><a href="../../ref/intro/terrain.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/dbisnot.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>What is Berkeley DB?</h1>
+<p>So far, we've discussed database systems in general terms. It's time
+now to consider Berkeley DB in particular and see how it fits into the
+framework we have introduced. The key question is, what kinds of
+applications should use Berkeley DB?
+<p>Berkeley DB is an open source embedded database library that provides
+scalable, high-performance, transaction-protected data management
+services to applications. Berkeley DB provides a simple function-call API
+for data access and management.
+<p>By "open source," we mean that Berkeley DB is distributed under a license that
+conforms to the <a href="http://www.opensource.org/osd.html">Open
+Source Definition</a>. This license guarantees that Berkeley DB is freely
+available for use and redistribution in other open source products.
+<a href="http://www.sleepycat.com">Sleepycat Software</a> sells
+commercial licenses for redistribution in proprietary applications, but
+in all cases the complete source code for Berkeley DB is freely available for
+download and use.
+<p>Berkeley DB is embedded because it links directly into the application. It
+runs in the same address space as the application. As a result, no
+inter-process communication, either over the network or between
+processes on the same machine, is required for database operations.
+Berkeley DB provides a simple function-call API for a number of programming
+languages, including C, C++, Java, Perl, Tcl, Python, and PHP. All
+database operations happen inside the library. Multiple processes, or
+multiple threads in a single process, can all use the database at the
+same time as each uses the Berkeley DB library. Low-level services like
+locking, transaction logging, shared buffer management, memory
+management, and so on are all handled transparently by the library.
+<p>The library is extremely portable. It runs under almost all UNIX and
+Linux variants, Windows, and a number of embedded real-time operating
+systems. It runs on both 32-bit and 64-bit systems.
+It has been deployed on high-end
+Internet servers, desktop machines, and on palmtop computers, set-top
+boxes, in network switches, and elsewhere. Once Berkeley DB is linked into
+the application, the end user generally does not know that there's a
+database present at all.
+<p>Berkeley DB is scalable in a number of respects. The database library itself
+is quite compact (under 300 kilobytes of text space on common
+architectures), but it can manage databases up to 256 terabytes in size.
+It also supports high concurrency, with thousands of users operating on
+the same database at the same time. Berkeley DB is small enough to run in
+tightly constrained embedded systems, but can take advantage of
+gigabytes of memory and terabytes of disk on high-end server machines.
+<p>Berkeley DB generally outperforms relational and object-oriented database
+systems in embedded applications for a couple of reasons. First, because
+the library runs in the same address space, no inter-process
+communication is required for database operations. The cost of
+communicating between processes on a single machine, or among machines
+on a network, is much higher than the cost of making a function call.
+Second, because Berkeley DB uses a simple function-call interface for all
+operations, there is no query language to parse, and no execution plan
+to produce.
+<h3>Data Access Services</h3>
+<p>Berkeley DB applications can choose the storage structure that best suits the
+application. Berkeley DB supports hash tables, Btrees, simple
+record-number-based storage, and persistent queues. Programmers can
+create tables using any of these storage structures, and can mix
+operations on different kinds of tables in a single application.
+<p>Hash tables are generally good for very large databases that need
+predictable search and update times for random-access records. Hash
+tables allow users to ask, "Does this key exist?" or to fetch a record
+with a known key. Hash tables do not allow users to ask for records
+with keys that are close to a known key.
+<p>Btrees are better for range-based searches, as when the application
+needs to find all records with keys between some starting and ending
+value. Btrees also do a better job of exploiting <i>locality
+of reference</i>. If the application is likely to touch keys near each
+other at the same time, the Btrees work well. The tree structure keeps
+keys that are close together near one another in storage, so fetching
+nearby values usually doesn't require a disk access.
+<p>Record-number-based storage is natural for applications that need to
+store and fetch records, but that do not have a simple way to generate
+keys of their own. In a record number table, the record number is the
+key for the record. Berkeley DB will generate these record numbers
+automatically.
+<p>Queues are well-suited for applications that create records, and then
+must deal with those records in creation order. A good example is
+on-line purchasing systems. Orders can enter the system at any time,
+but should generally be filled in the order in which they were placed.
+<h3>Data management services</h3>
+<p>Berkeley DB offers important data management services, including concurrency,
+transactions, and recovery. All of these services work on all of the
+storage structures.
+<p>Many users can work on the same database concurrently. Berkeley DB handles
+locking transparently, ensuring that two users working on the same
+record do not interfere with one another.
+<p>The library provides strict ACID transaction semantics, by default.
+However, applications are allowed to relax the isolation guarantees
+the database system makes.
+<p>Multiple operations can be grouped into a single transaction, and can
+be committed or rolled back atomically. Berkeley DB uses a technique called
+<i>two-phase locking</i> to be sure that concurrent transactions
+are isolated from one another, and a technique called
+<i>write-ahead logging</i> to guarantee that committed changes
+survive application, system, or hardware failures.
+<p>When an application starts up, it can ask Berkeley DB to run recovery.
+Recovery restores the database to a clean state, with all committed
+changes present, even after a crash. The database is guaranteed to be
+consistent and all committed changes are guaranteed to be present when
+recovery completes.
+<p>An application can specify, when it starts up, which data management
+services it will use. Some applications need fast, single-user,
+non-transactional Btree data storage. In that case, the application can
+disable the locking and transaction systems, and will not incur the
+overhead of locking or logging. If an application needs to support
+multiple concurrent users, but doesn't need transactions, it can turn
+on locking without transactions. Applications that need concurrent,
+transaction-protected database access can enable all of the
+subsystems.
+<p>In all these cases, the application uses the same function-call API to
+fetch and update records.
+<h3>Design</h3>
+<p>Berkeley DB was designed to provide industrial-strength database services to
+application developers, without requiring them to become database
+experts. It is a classic C-library style <i>toolkit</i>, providing
+a broad base of functionality to application writers. Berkeley DB was designed
+by programmers, for programmers: its modular design surfaces simple,
+orthogonal interfaces to core services, and it provides mechanism (for
+example, good thread support) without imposing policy (for example, the
+use of threads is not required). Just as importantly, Berkeley DB allows
+developers to balance performance against the need for crash recovery
+and concurrent use. An application can use the storage structure that
+provides the fastest access to its data and can request only the degree
+of logging and locking that it needs.
+<p>Because of the tool-based approach and separate interfaces for each
+Berkeley DB subsystem, you can support a complete transaction environment for
+other system operations. Berkeley DB even allows you to wrap transactions
+around the standard UNIX file read and write operations! Further, Berkeley DB
+was designed to interact correctly with the native system's toolset, a
+feature no other database package offers. For example, Berkeley DB supports
+hot backups (database backups while the database is in use), using
+standard UNIX system utilities, for example, dump, tar, cpio, pax or
+even cp.
+<p>Finally, because scripting language interfaces are available for Berkeley DB
+(notably Tcl and Perl), application writers can build incredibly powerful
+database engines with little effort. You can build transaction-protected
+database applications using your favorite scripting languages, an
+increasingly important feature in a world using CGI scripts to deliver
+HTML.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/intro/terrain.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/dbisnot.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/intro/dbisnot.html b/libdb/docs/ref/intro/dbisnot.html
new file mode 100644
index 0000000..e519ece
--- /dev/null
+++ b/libdb/docs/ref/intro/dbisnot.html
@@ -0,0 +1,141 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: What Berkeley DB is not</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Introduction</dl></h3></td>
+<td align=right><a href="../../ref/intro/dbis.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/need.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>What Berkeley DB is not</h1>
+<p>In contrast to most other database systems, Berkeley DB provides relatively
+simple data access services.
+<p>Records in Berkeley DB are (<i>key</i>, <i>value</i>) pairs. Berkeley DB
+supports only a few logical operations on records. They are:
+<p><ul type=disc>
+<li>Insert a record in a table.
+<li>Delete a record from a table.
+<li>Find a record in a table by looking up its key.
+<li>Update a record that has already been found.
+</ul>
+<p>Notice that Berkeley DB never operates on the value part of a record.
+Values are simply payload, to be
+stored with keys and reliably delivered back to the application on
+demand.
+<p>Both keys and values can be arbitrary byte strings, either fixed-length
+or variable-length. As a result, programmers can put native programming
+language data structures into the database without converting them to
+a foreign record format first. Storage and retrieval are very simple,
+but the application needs to know what the structure of a key and a
+value is in advance. It cannot ask Berkeley DB, because Berkeley DB doesn't know.
+<p>This is an important feature of Berkeley DB, and one worth considering more
+carefully. On the one hand, Berkeley DB cannot provide the programmer with
+any information on the contents or structure of the values that it
+stores. The application must understand the keys and values that it
+uses. On the other hand, there is literally no limit to the data types
+that can be store in a Berkeley DB database. The application never needs to
+convert its own program data into the data types that Berkeley DB supports.
+Berkeley DB is able to operate on any data type the application uses, no
+matter how complex.
+<p>Because both keys and values can be up to four gigabytes in length, a
+single record can store images, audio streams, or other large data
+values. Large values are not treated specially in Berkeley DB. They are
+simply broken into page-sized chunks, and reassembled on demand when
+the application needs them. Unlike some other database systems, Berkeley DB
+offers no special support for binary large objects (BLOBs).
+<h3>Not a relational database</h3>
+<p>Berkeley DB is not a relational database.
+<p>First, Berkeley DB does not support SQL queries. All access to data is through
+the Berkeley DB API. Developers must learn a new set of interfaces in order
+to work with Berkeley DB. Although the interfaces are fairly simple, they are
+non-standard.
+<p>SQL support is a double-edged sword. One big advantage of relational
+databases is that they allow users to write simple declarative queries
+in a high-level language. The database system knows everything about
+the data and can carry out the command. This means that it's simple to
+search for data in new ways, and to ask new questions of the database.
+No programming is required.
+<p>On the other hand, if a programmer can predict in advance how an
+application will access data, then writing a low-level program to get
+and store records can be faster. It eliminates the overhead of query
+parsing, optimization, and execution. The programmer must understand
+the data representation, and must write the code to do the work, but
+once that's done, the application can be very fast.
+<p>Second, Berkeley DB has no notion of <i>schema</i> and data types in
+the way that relational systems do. Schema is the structure of records
+in tables, and the relationships among the tables in the database. For
+example, in a relational system the programmer can create a record from
+a fixed menu of data types. Because the record types are declared to
+the system, the relational engine can reach inside records and examine
+individual values in them. In addition, programmers can use SQL to
+declare relationships among tables, and to create indices on tables.
+Relational engines usually maintain these relationships and indices
+automatically.
+<p>In Berkeley DB, the key and value in a record are opaque to Berkeley DB. They may
+have a rich internal structure, but the library is unaware of it. As a
+result, Berkeley DB cannot decompose the value part of a record into its
+constituent parts, and cannot use those parts to find values of
+interest. Only the application, which knows the data structure, can do
+that. Berkeley DB does support indices on tables and automatically maintain
+those indices as their associated tables are modified.
+<p>Berkeley DB is not a relational system. Relational database systems are
+semantically rich and offer high-level database access. Compared to such
+systems, Berkeley DB is a high-performance, transactional library for record
+storage. It's possible to build a relational system on top of Berkeley DB. In
+fact, the popular MySQL relational system uses Berkeley DB for
+transaction-protected table management, and takes care of all the SQL
+parsing and execution. It uses Berkeley DB for the storage level, and provides
+the semantics and access tools.
+<h3>Not an object-oriented database</h3>
+<p>Object-oriented databases are designed for very tight integration with
+object-oriented programming languages. Berkeley DB is written entirely in the
+C programming language. It includes language bindings for C++, Java,
+and other languages, but the library has no information about the
+objects created in any object-oriented application. Berkeley DB never makes
+method calls on any application object. It has no idea what methods are
+defined on user objects, and cannot see the public or private members
+of any instance. The key and value part of all records are opaque to
+Berkeley DB.
+<p>Berkeley DB cannot automatically page in objects as they are accessed, as some
+object-oriented databases do. The object-oriented application programmer
+must decide what records are required, and must fetch them by making
+method calls on Berkeley DB objects.
+<h3>Not a network database</h3>
+<p>Berkeley DB does not support network-style navigation among records, as
+network databases do. Records in a Berkeley DB table may move around over
+time, as new records are added to the table and old ones are deleted.
+Berkeley DB is able to do fast searches for records based on keys, but there
+is no way to create a persistent physical pointer to a record.
+Applications can only refer to records by key, not by address.
+<h3>Not a database server</h3>
+<p>Berkeley DB is not a standalone database server. It is a library, and runs in
+the address space of the application that uses it. If more than one
+application links in Berkeley DB, then all can use the same database at the
+same time; the library handles coordination among the applications, and
+guarantees that they do not interfere with one another.
+<p>Recent releases of Berkeley DB allow programmers to compile the library as a
+standalone process, and to use RPC stubs to connect to it and to carry
+out operations. However, there are some important limitations to this
+feature. The RPC stubs provide exactly the same API that the library
+itself does. There is no higher-level access provided by the standalone
+process. Tuning the standalone process is difficult, since Berkeley DB does
+no threading in the library (applications can be threaded, but the
+library never creates a thread on its own).
+<p>It is possible to build a server application that uses Berkeley DB for data
+management. For example, many commercial and open source Lightweight
+Directory Access Protocol (LDAP) servers use Berkeley DB for record storage.
+LDAP clients connect to these servers over the network. Individual
+servers make calls through the Berkeley DB API to find records and return them
+to clients. On its own, however, Berkeley DB is not a server.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/intro/dbis.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/need.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/intro/distrib.html b/libdb/docs/ref/intro/distrib.html
new file mode 100644
index 0000000..26d87da
--- /dev/null
+++ b/libdb/docs/ref/intro/distrib.html
@@ -0,0 +1,29 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: What does the Berkeley DB distribution include?</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Introduction</dl></h3></td>
+<td align=right><a href="../../ref/intro/what.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/where.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>What does the Berkeley DB distribution include?</h1>
+<p>The Berkeley DB distribution includes complete source code for the Berkeley DB
+library, including all three Berkeley DB products and their supporting
+utilities, as well as complete documentation in HTML format.
+<p>The distribution does not include prebuilt binaries or libraries, or
+hard-copy documentation. Prebuilt libraries and binaries for some
+architecture/compiler combinations are available as part of Sleepycat
+Software's Berkeley DB support services.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/intro/what.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/where.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/intro/need.html b/libdb/docs/ref/intro/need.html
new file mode 100644
index 0000000..c29af8b
--- /dev/null
+++ b/libdb/docs/ref/intro/need.html
@@ -0,0 +1,61 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Do you need Berkeley DB?</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Introduction</dl></h3></td>
+<td align=right><a href="../../ref/intro/dbisnot.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/what.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Do you need Berkeley DB?</h1>
+<p>Berkeley DB is an ideal database system for applications that need fast,
+scalable, and reliable embedded database management. For applications
+that need different services, however, it can be a poor choice.
+<p>First, do you need the ability to access your data in ways you cannot
+predict in advance? If your users want to be able to enter SQL
+queries to perform
+complicated searches that you cannot program into your application to
+begin with, then you should consider a relational engine instead. Berkeley DB
+requires a programmer to write code in order to run a new kind of query.
+<p>On the other hand, if you can predict your data access patterns up front
+-- and in particular if you need fairly simple key/value lookups -- then
+Berkeley DB is a good choice. The queries can be coded up once, and will then
+run very quickly because there is no SQL to parse and execute.
+<p>Second, are there political arguments for or against a standalone
+relational server? If you're building an application for your own use
+and have a relational system installed with administrative support
+already, it may be simpler to use that than to build and learn Berkeley DB.
+On the other hand, if you'll be shipping many copies of your application
+to customers, and don't want your customers to have to buy, install,
+and manage a separate database system, then Berkeley DB may be a better
+choice.
+<p>Third, are there any technical advantages to an embedded database? If
+you're building an application that will run unattended for long periods
+of time, or for end users who are not sophisticated administrators, then
+a separate server process may be too big a burden. It will require
+separate installation and management, and if it creates new ways for
+the application to fail, or new complexities to master in the field,
+then Berkeley DB may be a better choice.
+<p>The fundamental question is, how closely do your requirements match the
+Berkeley DB design? Berkeley DB was conceived and built to provide fast, reliable,
+transaction-protected record storage. The library itself was never
+intended to provide interactive query support, graphical reporting
+tools, or similar services that some other database systems provide. We
+have tried always to err on the side of minimalism and simplicity. By
+keeping the library small and simple, we create fewer opportunities for
+bugs to creep in, and we guarantee that the database system stays fast,
+because there is very little code to execute. If your application needs
+that set of features, then Berkeley DB is almost certainly the best choice
+for you.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/intro/dbisnot.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/what.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/intro/products.html b/libdb/docs/ref/intro/products.html
new file mode 100644
index 0000000..3de8165
--- /dev/null
+++ b/libdb/docs/ref/intro/products.html
@@ -0,0 +1,70 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Sleepycat Software's Berkeley DB products</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Introduction</dl></h3></td>
+<td align=right><a href="../../ref/intro/where.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Sleepycat Software's Berkeley DB products</h1>
+<p>Sleepycat Software licenses four different products that use the Berkeley DB
+technology. Each product offers a distinct level of database support.
+It is not possible to mix-and-match products, that is, each application
+or group of applications must use the same Berkeley DB product.
+<p>All four products are included in the single Open Source distribution
+of Berkeley DB from Sleepycat Software, and building that distribution
+automatically builds all four products. Each product adds new
+interfaces and services to the product that precedes it in the list.
+As a result, developers can download Berkeley DB and build an application that
+does only single-user, read-only database access, and easily add support
+later for more users and more complex database access patterns.
+<p>Users who distribute Berkeley DB must ensure that they are licensed for the
+Berkeley DB interfaces they use. Information on licensing is available
+directly from Sleepycat Software.
+<h3>Berkeley DB Data Store</h3>
+<p>The Berkeley DB Data Store product is an embeddable, high-performance data store. It
+supports multiple concurrent threads of control to read information
+managed by Berkeley DB. When updates are required, only a single process may
+be using the database. That process may be multithreaded, but only one
+thread of control should be allowed to update the database at any time.
+The Berkeley DB Data Store does no locking, and so provides no guarantees of correct
+behavior if more than one thread of control is updating the database at
+a time. The Berkeley DB Data Store is intended for use in single-user or read-only
+applications that can guarantee that no more than one thread of control
+will ever update the database at any time.
+<h3>Berkeley DB Concurrent Data Store</h3>
+<p>The Berkeley DB Concurrent Data Store product adds multiple-reader, single writer capabilities to
+the Berkeley DB Data Store product, supporting applications that need concurrent updates
+and do not want to implement their own locking protocols. Berkeley DB Concurrent Data Store is
+intended for applications that require occasional write access to a
+database that is largely used for reading.
+<h3>Berkeley DB Transactional Data Store</h3>
+<p>The Berkeley DB Transactional Data Store product adds full transactional support and recoverability
+to the Berkeley DB Data Store product. Berkeley DB Transactional Data Store is intended for applications that require
+industrial-strength database services, including excellent performance
+under high-concurrency workloads with a mixture of readers and writers,
+the ability to commit or roll back multiple changes to the database at
+a single instant, and the guarantee that even in the event of a
+catastrophic system or hardware failure, any committed database changes
+will be preserved.
+<h3>Berkeley DB High Availability</h3>
+<p>The Berkeley DB High Availability product support for data replication. A single master system
+handles all updates, and distributes them to as many replicas as the
+application requires. All replicas can handle read requests during
+normal processing. If the master system fails for any reason, one of
+the replicas takes over as the new master system, and distributes
+updates to the remaining replicas.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/intro/where.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/intro/terrain.html b/libdb/docs/ref/intro/terrain.html
new file mode 100644
index 0000000..46f74da
--- /dev/null
+++ b/libdb/docs/ref/intro/terrain.html
@@ -0,0 +1,249 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Mapping the terrain: theory and practice</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Introduction</dl></h3></td>
+<td align=right><a href="../../ref/intro/data.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/dbis.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Mapping the terrain: theory and practice</h1>
+<p>The first step in selecting a database system is figuring out what the
+choices are. Decades of research and real-world deployment have produced
+countless systems. We need to organize them somehow to reduce the number
+of options.
+<p>One obvious way to group systems is to use the common labels that
+vendors apply to them. The buzzwords here include "network,"
+"relational," "object-oriented," and "embedded," with some
+cross-fertilization like "object-relational" and "embedded network".
+Understanding the buzzwords is important. Each has some grounding in
+theory, but has also evolved into a practical label for categorizing
+systems that work in a certain way.
+<p>All database systems, regardless of the buzzwords that apply to them,
+provide a few common services. All of them store data, for example.
+We'll begin by exploring the common services that all systems provide,
+and then examine the differences among the different kinds of systems.
+<h3>Data access and data management</h3>
+<p>Fundamentally, database systems provide two services.
+<p>The first service is <i>data access</i>. Data access means adding
+new data to the database (inserting), finding data of interest
+(searching), changing data already stored (updating), and removing data
+from the database (deleting). All databases provide these services. How
+they work varies from category to category, and depends on the record
+structure that the database supports.
+<p>Each record in a database is a collection of values. For example, the
+record for a Web site customer might include a name, email address,
+shipping address, and payment information. Records are usually stored
+in tables. Each table holds records of the same kind. For example, the
+<b>customer</b> table at an e-commerce Web site might store the
+customer records for every person who shopped at the site. Often,
+database records have a different structure from the structures or
+instances supported by the programming language in which an application
+is written. As a result, working with records can mean:
+<p><ul type=disc>
+<li>using database operations like searches and updates on records; and
+<li>converting between programming language structures and database record
+types in the application.
+</ul>
+<p>The second service is <i>data management</i>. Data management is
+more complicated than data access. Providing good data management
+services is the hard part of building a database system. When you
+choose a database system to use in an application you build, making sure
+it supports the data management services you need is critical.
+<p>Data management services include allowing multiple users to work on the
+database simultaneously (concurrency), allowing multiple records to be
+changed instantaneously (transactions), and surviving application and
+system crashes (recovery). Different database systems offer different
+data management services. Data management services are entirely
+independent of the data access services listed above. For example,
+nothing about relational database theory requires that the system
+support transactions, but most commercial relational systems do.
+<p>Concurrency means that multiple users can operate on the database at
+the same time. Support for concurrency ranges from none (single-user
+access only) to complete (many readers and writers working
+simultaneously).
+<p>Transactions permit users to make multiple changes appear at once. For
+example, a transfer of funds between bank accounts needs to be a
+transaction because the balance in one account is reduced and the
+balance in the other increases. If the reduction happened before the
+increase, than a poorly-timed system crash could leave the customer
+poorer; if the bank used the opposite order, then the same system crash
+could make the customer richer. Obviously, both the customer and the
+bank are best served if both operations happen at the same instant.
+<p>Transactions have well-defined properties in database systems. They are
+<i>atomic</i>, so that the changes happen all at once or not at all.
+They are <i>consistent</i>, so that the database is in a legal state
+when the transaction begins and when it ends. They are typically
+<i>isolated</i>, which means that any other users in the database
+cannot interfere with them while they are in progress. And they are
+<i>durable</i>, so that if the system or application crashes after
+a transaction finishes, the changes are not lost. Together, the
+properties of <i>atomicity</i>, <i>consistency</i>,
+<i>isolation</i>, and <i>durability</i> are known as the ACID
+properties.
+<p>As is the case for concurrency, support for transactions varies among
+databases. Some offer atomicity without making guarantees about
+durability. Some ignore isolatability, especially in single-user
+systems; there's no need to isolate other users from the effects of
+changes when there are no other users.
+<p>Another important data management service is recovery. Strictly
+speaking, recovery is a procedure that the system carries out when it
+starts up. The purpose of recovery is to guarantee that the database is
+complete and usable. This is most important after a system or
+application crash, when the database may have been damaged. The recovery
+process guarantees that the internal structure of the database is good.
+Recovery usually means that any completed transactions are checked, and
+any lost changes are reapplied to the database. At the end of the
+recovery process, applications can use the database as if there had been
+no interruption in service.
+<p>Finally, there are a number of data management services that permit
+copying of data. For example, most database systems are able to import
+data from other sources, and to export it for use elsewhere. Also, most
+systems provide some way to back up databases and to restore in the
+event of a system failure that damages the database. Many commercial
+systems allow <i>hot backups</i>, so that users can back up
+databases while they are in use. Many applications must run without
+interruption, and cannot be shut down for backups.
+<p>A particular database system may provide other data management services.
+Some provide browsers that show database structure and contents. Some
+include tools that enforce data integrity rules, such as the rule that
+no employee can have a negative salary. These data management services
+are not common to all systems, however. Concurrency, recovery, and
+transactions are the data management services that most database vendors
+support.
+<p>Deciding what kind of database to use means understanding the data
+access and data management services that your application needs. Berkeley DB
+is an embedded database that supports fairly simple data access with a
+rich set of data management services. To highlight its strengths and
+weaknesses, we can compare it to other database system categories.
+<h3>Relational databases</h3>
+<p>Relational databases are probably the best-known database variant,
+because of the success of companies like Oracle. Relational databases
+are based on the mathematical field of set theory. The term "relation"
+is really just a synonym for "set" -- a relation is just a set of
+records or, in our terminology, a table. One of the main innovations in
+early relational systems was to insulate the programmer from the
+physical organization of the database. Rather than walking through
+arrays of records or traversing pointers, programmers make statements
+about tables in a high-level language, and the system executes those
+statements.
+<p>Relational databases operate on <i>tuples</i>, or records, composed
+of values of several different data types, including integers, character
+strings, and others. Operations include searching for records whose
+values satisfy some criteria, updating records, and so on.
+<p>Virtually all relational databases use the Structured Query Language,
+or SQL. This language permits people and computer programs to work with
+the database by writing simple statements. The database engine reads
+those statements and determines how to satisfy them on the tables in
+the database.
+<p>SQL is the main practical advantage of relational database systems.
+Rather than writing a computer program to find records of interest, the
+relational system user can just type a query in a simple syntax, and
+let the engine do the work. This gives users enormous flexibility; they
+do not need to decide in advance what kind of searches they want to do,
+and they do not need expensive programmers to find the data they need.
+Learning SQL requires some effort, but it's much simpler than a
+full-blown high-level programming language for most purposes. And there
+are a lot of programmers who have already learned SQL.
+<h3>Object-oriented databases</h3>
+<p>Object-oriented databases are less common than relational systems, but
+are still fairly widespread. Most object-oriented databases were
+originally conceived as persistent storage systems closely wedded to
+particular high-level programming languages like C++. With the spread
+of Java, most now support more than one programming language, but
+object-oriented database systems fundamentally provide the same class
+and method abstractions as do object-oriented programming languages.
+<p>Many object-oriented systems allow applications to operate on objects
+uniformly, whether they are in memory or on disk. These systems create
+the illusion that all objects are in memory all the time. The advantage
+to object-oriented programmers who simply want object storage and
+retrieval is clear. They need never be aware of whether an object is in
+memory or not. The application simply uses objects, and the database
+system moves them between disk and memory transparently. All of the
+operations on an object, and all its behavior, are determined by the
+programming language.
+<p>Object-oriented databases aren't nearly as widely deployed as relational
+systems. In order to attract developers who understand relational
+systems, many of the object-oriented systems have added support for
+query languages very much like SQL. In practice, though, object-oriented
+databases are mostly used for persistent storage of objects in C++ and
+Java programs.
+<h3>Network databases</h3>
+<p>The "network model" is a fairly old technique for managing and
+navigating application data. Network databases are designed to make
+pointer traversal very fast. Every record stored in a network database
+is allowed to contain pointers to other records. These pointers are
+generally physical addresses, so fetching the record to which it refers
+just means reading it from disk by its disk address.
+<p>Network database systems generally permit records to contain integers,
+floating point numbers, and character strings, as well as references to
+other records. An application can search for records of interest. After
+retrieving a record, the application can fetch any record to which it
+refers, quickly.
+<p>Pointer traversal is fast because most network systems use physical disk
+addresses as pointers. When the application wants to fetch a record,
+the database system uses the address to fetch exactly the right string
+of bytes from the disk. This requires only a single disk access in all
+cases. Other systems, by contrast, often must do more than one disk read
+to find a particular record.
+<p>The key advantage of the network model is also its main drawback. The
+fact that pointer traversal is so fast means that applications that do
+it will run well. On the other hand, storing pointers all over the
+database makes it very hard to reorganize the database. In effect, once
+you store a pointer to a record, it is difficult to move that record
+elsewhere. Some network databases handle this by leaving forwarding
+pointers behind, but this defeats the speed advantage of doing a single
+disk access in the first place. Other network databases find, and fix,
+all the pointers to a record when it moves, but this makes
+reorganization very expensive. Reorganization is often necessary in
+databases, since adding and deleting records over time will consume
+space that cannot be reclaimed without reorganizing. Without periodic
+reorganization to compact network databases, they can end up with a
+considerable amount of wasted space.
+<h3>Clients and servers</h3>
+<p>Database vendors have two choices for system architecture. They can
+build a server to which remote clients connect, and do all the database
+management inside the server. Alternatively, they can provide a module
+that links directly into the application, and does all database
+management locally. In either case, the application developer needs
+some way of communicating with the database (generally, an Application
+Programming Interface (API) that does work in the process or that
+communicates with a server to get work done).
+<p>Almost all commercial database products are implemented as servers, and
+applications connect to them as clients. Servers have several features
+that make them attractive.
+<p>First, because all of the data is managed by a separate process, and
+possibly on a separate machine, it's easy to isolate the database server
+from bugs and crashes in the application.
+<p>Second, because some database products (particularly relational engines)
+are quite large, splitting them off as separate server processes keeps
+applications small, which uses less disk space and memory. Relational
+engines include code to parse SQL statements, to analyze them and
+produce plans for execution, to optimize the plans, and to execute
+them.
+<p>Finally, by storing all the data in one place and managing it with a
+single server, it's easier for organizations to back up, protect, and
+set policies on their databases. The enterprise databases for large
+companies often have several full-time administrators caring for them,
+making certain that applications run quickly, granting and denying
+access to users, and making backups.
+<p>However, centralized administration can be a disadvantage in some cases.
+In particular, if a programmer wants to build an application that uses
+a database for storage of important information, then shipping and
+supporting the application is much harder. The end user needs to install
+and administer a separate database server, and the programmer must
+support not just one product, but two. Adding a server process to the
+application creates new opportunity for installation mistakes and
+run-time problems.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/intro/data.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/dbis.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/intro/what.html b/libdb/docs/ref/intro/what.html
new file mode 100644
index 0000000..e41cae1
--- /dev/null
+++ b/libdb/docs/ref/intro/what.html
@@ -0,0 +1,54 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: What other services does Berkeley DB provide?</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Introduction</dl></h3></td>
+<td align=right><a href="../../ref/intro/need.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/distrib.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>What other services does Berkeley DB provide?</h1>
+<p>Berkeley DB also provides core database services to developers. These
+services include:
+<p><dl compact>
+<p><dt>Page cache management:<dd>The page cache provides fast access to a cache of database pages,
+handling the I/O associated with the cache to ensure that dirty pages
+are written back to the file system and that new pages are allocated on
+demand. Applications may use the Berkeley DB shared memory buffer manager to
+serve their own files and pages.
+<p><dt>Transactions and logging:<dd>The transaction and logging systems provide recoverability and atomicity
+for multiple database operations. The transaction system uses two-phase
+locking and write-ahead logging protocols to ensure that database
+operations may be undone or redone in the case of application or system
+failure. Applications may use Berkeley DB transaction and logging subsystems
+to protect their own data structures and operations from application or
+system failure.
+<p><dt>Locking:<dd>The locking system provides multiple reader or single writer access to
+objects. The Berkeley DB access methods use the locking system to acquire
+the right to read or write database pages. Applications may use the
+Berkeley DB locking subsystem to support their own locking needs.
+</dl>
+<p>By combining the page cache, transaction, locking, and logging systems,
+Berkeley DB provides the same services found in much larger, more complex and
+more expensive database systems. Berkeley DB supports multiple simultaneous
+readers and writers and guarantees that all changes are recoverable, even
+in the case of a catastrophic hardware failure during a database update.
+<p>Developers may select some or all of the core database services for any
+access method or database. Therefore, it is possible to choose the
+appropriate storage structure and the right degrees of concurrency and
+recoverability for any application. In addition, some of the subsystems
+(for example, the Locking subsystem) can be called separately from the
+Berkeley DB access method. As a result, developers can integrate non-database
+objects into their transactional applications using Berkeley DB.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/intro/need.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/distrib.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/intro/where.html b/libdb/docs/ref/intro/where.html
new file mode 100644
index 0000000..313a524
--- /dev/null
+++ b/libdb/docs/ref/intro/where.html
@@ -0,0 +1,45 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Where does Berkeley DB run?</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Introduction</dl></h3></td>
+<td align=right><a href="../../ref/intro/distrib.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/products.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Where does Berkeley DB run?</h1>
+<p>Berkeley DB requires only underlying IEEE/ANSI Std 1003.1 (POSIX) system calls and can be
+ported easily to new architectures by adding stub routines to connect
+the native system interfaces to the Berkeley DB POSIX-style system calls.
+See <a href="../../ref/distrib/port.html">Porting Berkeley DB to new
+architectures</a> for more information.
+<p>Berkeley DB will autoconfigure and run on almost any modern UNIX, POSIX or
+Linux systems, and on most historical UNIX platforms. Berkeley DB will
+autoconfigure and run on almost any GNU gcc toolchain-based embedded
+platform, including Cygwin, Embedix, OpenLinux and others. See
+<a href="../../ref/build_unix/intro.html">Building for UNIX systems</a> for
+more information.
+<p>The Berkeley DB distribution includes support for QNX Neutrino. See
+<a href="../../ref/build_unix/intro.html">Building for UNIX systems</a> for
+more information.
+<p>The Berkeley DB distribution includes support for VxWorks, via a workspace
+and project files for Tornado 2.0. See
+<a href="../../ref/build_vxworks/intro.html">Building for VxWorks</a> for more
+information.
+<p>The Berkeley DB distribution includes support for Windows/95, Windows/98,
+Windows/NT, Windows/2000 and Windows/XP, via the Microsoft Visual C++
+6.0 and .NET development environments. See
+<a href="../../ref/build_win/intro.html">Building for Windows systems</a>
+for more information.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/intro/distrib.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/intro/products.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/java/compat.html b/libdb/docs/ref/java/compat.html
new file mode 100644
index 0000000..b92045a
--- /dev/null
+++ b/libdb/docs/ref/java/compat.html
@@ -0,0 +1,35 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Compatibility</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Java API</dl></h3></td>
+<td align=right><a href="../../ref/java/conf.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/java/program.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Compatibility</h1>
+<p>The Berkeley DB Java API has been tested with the
+<a href="http://www.javasoft.com">Sun Microsystems JDK 1.1.3</a> on SunOS
+5.5; and Sun's JDK 1.1.7, JDK 1.2.2, JDK 1.3.0 and JDK 1.4.0 on Linux and
+Windows/NT. It should work with any JDK 1.1-, 1.2-, 1.3- or 1.4-compatible
+environment (the latter three are known as Java 2). IBM's VM 1.3.0 has
+also been tested on Linux.
+<p>The primary requirement of the Berkeley DB Java API is that the target Java
+environment must support JNI (Java Native Interface) rather than another
+method for allowing native C/C++ code to interface to Java. The JNI was
+new in JDK 1.1, but is the most likely interface to be implemented
+across multiple platforms. However, using the JNI means that Berkeley DB will
+not be compatible with Microsoft Visual J++.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/java/conf.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/java/program.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/java/conf.html b/libdb/docs/ref/java/conf.html
new file mode 100644
index 0000000..2eb60a7
--- /dev/null
+++ b/libdb/docs/ref/java/conf.html
@@ -0,0 +1,94 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Java configuration</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Java API</dl></h3></td>
+<td align=right><a href="../../ref/rpc/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/java/compat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Java configuration</h1>
+<p>Building the Berkeley DB java classes, the examples and the native support
+library is integrated into the normal build process. See
+<a href="../../ref/build_unix/conf.html#--enable-java">Configuring
+Berkeley DB</a> and <a href="../../ref/build_win/intro.html">Building for Win32</a>
+for more information.
+<p>We expect that you already installed the Java JDK or equivalent on your
+system. For the sake of discussion, we assume that it is in a directory
+called db-VERSION; for example, you downloaded a Berkeley DB archive, and you
+did not change the top-level directory name. The files related to Java
+are in two subdirectories of db-VERSION: java (the java source files)
+and libdb_java (the C++ files that provide the "glue" between java and
+Berkeley DB). The directory tree looks like this:
+<p><blockquote><pre> db-VERSION
+ / \
+ java libdb_java
+ | |
+ src ...
+ |
+ com
+ |
+ sleepycat
+ / \
+ db examples
+ | |
+ ... ...
+</pre></blockquote>
+<p>This naming conforms to the de facto standard for naming java packages.
+When the java code is built, it is placed into two jar files:
+<b>db.jar</b>, containing the db package,
+and <b>dbexamples.jar</b>, containing the examples.
+<p>For your application to use Berkeley DB successfully, you must set your
+<b>CLASSPATH</b> environment variable to include the full pathname of
+the db jar files as well as the classes in your java distribution.
+On UNIX, <b>CLASSPATH</b> is a colon-separated
+list of directories and jar files;
+on Windows, it is separated by semicolons.
+On UNIX, the jar files are put in your build directory, and when
+you do the make install step, they are copied to the lib directory
+of your installation tree. On Windows, the jar files are placed
+in the Release or Debug subdirectory with your other objects.
+<p>The Berkeley DB Java classes are mostly implemented in native
+methods. Before you can use them, you need to make sure that the
+DLL or shared library containing the native methods can be found
+by your Java runtime. On Windows, you should set your PATH variable
+to include:
+<p><blockquote><pre><b>db-VERSION\build_win32\Release</b></pre></blockquote>
+<p>On UNIX, you should set the
+<b>LD_LIBRARY_PATH</b> environment variable or local equivalent
+to include the Berkeley DB library installation directory. Of course, the
+standard install directory may have been changed for your site; see your
+system administrator for details.
+<p>On other platforms, the path can be set on the command line as follows
+(assuming the shared library is in <b>/usr/local/BerkeleyDB/lib</b>:)
+<p><blockquote><pre>% java -Djava.library.path=/usr/local/BerkeleyDB/lib ...</pre></blockquote>
+<p>Regardless, if you get the following exception when you run, you
+probably do not have the library search path configured correctly:
+<p><blockquote><pre>java.lang.UnsatisfiedLinkError</pre></blockquote>
+<p>Different Java interpreters provide different error messages if the
+<b>CLASSPATH</b> value is incorrect, a typical error is the following:
+<p><blockquote><pre>java.lang.NoClassDefFoundError</pre></blockquote>
+<p>To ensure that everything is running correctly, you may want to try a
+simple test from the example programs in
+<p><blockquote><pre><b>db-VERSION/java/src/com/sleepycat/examples</b></pre></blockquote>
+<p>For example, the following sample program will prompt for text input
+lines, which are then stored in a Btree database named <b>access.db</b> in
+your current directory:
+<p><blockquote><pre>% java com.sleepycat.examples.AccessExample</pre></blockquote>
+<p>Try giving it a few lines of input text and then end-of-file. Before
+it exits, you should see a list of the lines you entered display with
+data items. This is a simple check to make sure the fundamental
+configuration is working correctly.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rpc/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/java/compat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/java/faq.html b/libdb/docs/ref/java/faq.html
new file mode 100644
index 0000000..bb7af6a
--- /dev/null
+++ b/libdb/docs/ref/java/faq.html
@@ -0,0 +1,40 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Java FAQ</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Java API</dl></h3></td>
+<td align=right><a href="../../ref/java/program.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/perl/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Java FAQ</h1>
+<p><ol>
+<p><li><b>During one of the first calls to the Berkeley DB Java API, a
+DbException is thrown with a "Bad file number" or "Bad file descriptor"
+message.</b>
+<p>There are known large-file support bugs under JNI in various releases
+of the JDK. Please upgrade to the latest release of the JDK, and, if
+that does not solve the problem, disable big file support using the
+--disable-largefile configuration option.
+<p><li><b>How can I use native methods from a debug build of the
+Java library?</b>
+<p>The Berkeley DB Java code checks a system property for the library name
+before defaulting to the released library. On Windows, run as
+follows (note the 'd' at the end):
+<p><blockquote><pre>% java -Dsleepycat.db.libname=libdb_java-VERSIONd</pre></blockquote>
+<p>On UNIX, try:
+<p><blockquote><pre>% java -Dsleepycat.db.libname=db_java_g-VERSION</pre></blockquote>
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/java/program.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/perl/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/java/program.html b/libdb/docs/ref/java/program.html
new file mode 100644
index 0000000..7b7aef1
--- /dev/null
+++ b/libdb/docs/ref/java/program.html
@@ -0,0 +1,78 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Java programming notes</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Java API</dl></h3></td>
+<td align=right><a href="../../ref/java/compat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/java/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Java programming notes</h1>
+<p>The Java API closely parallels the Berkeley DB C++ and C interfaces. If you
+are currently using either of those APIs, there will be very little to
+surprise you in the Java API. We have even taken care to make the names
+of classes, constants, methods and arguments identical, where possible,
+across all three APIs.
+<p><ol>
+<p><li>The Java runtime does not automatically close Berkeley DB objects on
+finalization. There are several reasons for this. One is that
+finalization is generally run only when garbage collection occurs, and
+there is no guarantee that this occurs at all, even on exit. Allowing
+specific Berkeley DB actions to occur in ways that cannot be replicated seems
+wrong. Second, finalization of objects may happen in an arbitrary
+order, so we would have to do extra bookkeeping to make sure that
+everything was closed in the proper order. The best word of advice is
+to always do a close() for any matching open() call. Specifically, the
+Berkeley DB package requires that you explicitly call close on each individual
+<a href="../../api_java/db_class.html">Db</a> and <a href="../../api_java/dbc_class.html">Dbc</a> object that you opened. Your database
+activity may not be synchronized to disk unless you do so.
+<p><li>Some methods in the Java API have no return type, and throw a
+<a href="../../api_java/except_class.html">DbException</a> when an severe error arises. There are some notable
+methods that do have a return value, and can also throw an exception.
+<a href="../../api_java/db_get.html">Db.get</a> and <a href="../../api_java/dbc_get.html">Dbc.get</a> both return 0 when a get succeeds,
+return <a href="../../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a> when the key is not found, and throw an error
+when there is a severe error. This approach allows the programmer to
+check for typical data-driven errors by watching return values without
+special casing exceptions.
+<p>An object of type <a href="../../api_java/deadlock_class.html">DbDeadlockException</a> is thrown when a deadlock
+would occur.
+<p>An object of type <a href="../../api_java/memp_class.html">DbMemoryException</a> is thrown when the system
+cannot provide enough memory to complete the operation (the ENOMEM
+system error on UNIX).
+<p>An object of type <a href="../../api_java/runrec_class.html">DbRunRecoveryException</a>, a subclass of
+<a href="../../api_java/except_class.html">DbException</a>, is thrown when there is an error that requires a
+recovery of the database using <a href="../../utility/db_recover.html">db_recover</a>.
+<p><li>There is no class corresponding to the C++ DbMpoolFile class in the Berkeley DB
+Java API. There is a subset of the memp_XXX methods in the <a href="../../api_java/env_class.html">DbEnv</a>
+class. This has been provided to allow you to perform certain
+administrative actions on underlying memory pools opened as a consequence
+of <a href="../../api_java/env_open.html">DbEnv.open</a>. Direct access to other memory pool functionality
+is not appropriate for the Java environment.
+<p><li>Berkeley DB always turns on the <a href="../../api_java/env_open.html#DB_THREAD">Db.DB_THREAD</a> flag because
+threads are expected in Java.
+<p><li>If there are embedded null strings in the <b>curslist</b> argument for
+<a href="../../api_java/db_join.html">Db.join</a>, they will be treated as the end of the list of
+cursors, even if you may have allocated a longer array. Fill in all
+the strings in your array unless you intend to cut it short.
+<p><li>The callback installed for <a href="../../api_java/env_set_errcall.html">DbEnv.set_errcall</a> will run in the same
+thread as the caller to <a href="../../api_java/env_set_errcall.html">DbEnv.set_errcall</a>. Make sure that thread
+remains running until your application exits or until <a href="../../api_java/env_close.html">DbEnv.close</a>
+is called.
+<p><li>If you are using custom class loaders in your application, make sure
+that the Berkeley DB classes are loaded by the system class loader, not a
+custom class loader. This is due to a JVM bug that can cause an access
+violation during finalization (see the bug 4238486 in Sun Microsystem's
+Java Bug Database).
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/java/compat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/java/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/lock/am_conv.html b/libdb/docs/ref/lock/am_conv.html
new file mode 100644
index 0000000..ad04056
--- /dev/null
+++ b/libdb/docs/ref/lock/am_conv.html
@@ -0,0 +1,124 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Berkeley DB Transactional Data Store locking conventions</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/lock/cam_conv.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/nondb.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Berkeley DB Transactional Data Store locking conventions</h1>
+<p>All Berkeley DB access methods follow the same conventions for locking
+database objects. Applications that do their own locking and also do
+locking via the access methods must be careful to adhere to these
+conventions.
+<p>Whenever a Berkeley DB database is opened, the <a href="../../api_c/db_class.html">DB</a> handle is assigned
+a unique locker ID. Unless transactions are specified, that ID is used
+as the locker for all calls that the Berkeley DB methods make to the lock
+subsystem. In order to lock a file, pages in the file, or records in
+the file, we must create a unique ID that can be used as the object to
+be locked in calls to the lock manager. Under normal operation, that
+object is a 28-byte value created by the concatenation of a unique file
+identifier, a page or record number, and an object type (page or record).
+<p>In a transaction-protected environment, database create and delete
+operations are recoverable and single-threaded. This single-threading
+is achieved using a single lock for the entire environment that must be
+acquired before beginning a create or delete operation. In this case,
+the object on which Berkeley DB will lock is a 4-byte unsigned integer with
+a value of 0.
+<p>If applications are using the lock subsystem directly while they are
+also using locking via the access methods, they must take care not to
+inadvertently lock objects that happen to be equal to the unique file
+IDs used to lock files. This is most easily accomplished by using a
+lock object with a length different from the values used by Berkeley DB.
+<p>All the access methods other than Queue use standard read/write locks
+in a simple multiple-reader/single writer page-locking scheme. An
+operation that returns data (for example, <a href="../../api_c/db_get.html">DB-&gt;get</a> or
+<a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a>) obtains a read lock on all the pages accessed while
+locating the requested record. When an update operation is requested
+(for example, <a href="../../api_c/db_put.html">DB-&gt;put</a> or <a href="../../api_c/dbc_del.html">DBcursor-&gt;c_del</a>), the page containing
+the updated (or new) data is write-locked. As read-modify-write cycles
+are quite common and are deadlock-prone under normal circumstances, the
+Berkeley DB interfaces allow the application to specify the <a href="../../api_c/dbc_get.html#DB_RMW">DB_RMW</a>
+flag, which causes operations to immediately obtain a write lock, even
+though they are only reading the data. Although this may reduce
+concurrency somewhat, it reduces the probability of deadlock. In the
+presence of transactions, page locks are held until transaction commit.
+<p>The Queue access method does not hold long-term page locks. Instead,
+page locks are held only long enough to locate records or to change
+metadata on a page, and record locks are held for the appropriate
+duration. In the presence of transactions, record locks are held until
+transaction commit. For Berkeley DB operations, record locks are held until
+operation completion; for <a href="../../api_c/dbc_class.html">DBC</a> operations, record locks are held
+until subsequent records are returned or the cursor is closed.
+<p>Under non-transaction operations, the access methods do not normally
+hold locks across calls to the Berkeley DB interfaces. The one exception to
+this rule is when cursors are used. Because cursors maintain a position
+in a file, they must hold locks across calls; in fact, they will hold
+locks until the cursor is closed.
+<p>In this mode, the assignment of locker IDs to <a href="../../api_c/db_class.html">DB</a> and cursor
+handles is complicated. If the <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> option was specified
+when the <a href="../../api_c/db_class.html">DB</a> handle was opened, each use of a <a href="../../api_c/db_class.html">DB</a> has its
+own unique locker ID, and each cursor is assigned its own unique locker
+ID when it is created, so <a href="../../api_c/db_class.html">DB</a> handle and cursor operations can
+all conflict with one another. (This is because when Berkeley DB handles
+may be shared by multiple threads of control the Berkeley DB library cannot
+identify which operations are performed by which threads of control,
+and it must ensure that two different threads of control are not
+simultaneously modifying the same data structure. By assigning each
+<a href="../../api_c/db_class.html">DB</a> handle and cursor its own locker, two threads of control
+sharing a handle cannot inadvertently interfere with each other.)
+<p>This has important implications. If a single thread of control opens
+two cursors, uses a combination of cursor and non-cursor operations, or
+begins two separate transactions, the operations are performed on behalf
+of different lockers. Conflicts that arise between these different
+lockers may not cause actual deadlocks, but can, in fact, permanently
+block the thread of control. For example, assume that an application
+creates a cursor and uses it to read record A. Now, assume a second
+cursor is opened, and the application attempts to write record A using
+the second cursor. Unfortunately, the first cursor has a read lock, so
+the second cursor cannot obtain its write lock. However, that read lock
+is held by the same thread of control, so the read lock can never be
+released if we block waiting for the write lock. This might appear to
+be a deadlock from the application's perspective, but Berkeley DB cannot
+identify it as such because it has no knowledge of which lockers belong
+to which threads of control. For this reason, application designers
+are encouraged to close cursors as soon as they are done with them.
+<p>If the <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> option was not specified when the <a href="../../api_c/db_class.html">DB</a>
+handle was opened, all uses of the <a href="../../api_c/db_class.html">DB</a> handle and all cursors
+created using that handle will use the same locker ID for all
+operations. In this case, if a single thread of control opens two
+cursors or uses a combination of cursor and non-cursor operations, these
+operations are performed on behalf of the same locker, and so cannot
+deadlock or block the thread of control.
+<p>Complicated operations that require multiple cursors (or combinations
+of cursor and non-cursor operations) can be performed in two ways.
+First, they may be performed within a transaction, in which case all
+operations lock on behalf of the designated transaction. Second, they
+may be performed using a local <a href="../../api_c/db_class.html">DB</a> handle, although, as
+<a href="../../api_c/db_open.html">DB-&gt;open</a> operations are relatively slow, this may not be a good
+idea. Finally, the <a href="../../api_c/dbc_dup.html">DBcursor-&gt;c_dup</a> function duplicates a cursor, using
+the same locker ID as the originating cursor. There is no way to
+achieve this duplication functionality through the <a href="../../api_c/db_class.html">DB</a> handle
+calls, but any <a href="../../api_c/db_class.html">DB</a> call can be implemented by one or more calls
+through a cursor.
+<p>When the access methods use transactions, many of these problems disappear.
+The transaction ID is used as the locker ID for all operations performed
+on behalf of the transaction. This means that the application may open
+multiple cursors on behalf of the same transaction and these cursors will
+all share a common locker ID. This is safe because transactions cannot
+span threads of control, so the library knows that two cursors in the same
+transaction cannot modify the database concurrently.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/lock/cam_conv.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/nondb.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/lock/cam_conv.html b/libdb/docs/ref/lock/cam_conv.html
new file mode 100644
index 0000000..e3eebae
--- /dev/null
+++ b/libdb/docs/ref/lock/cam_conv.html
@@ -0,0 +1,54 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Berkeley DB Concurrent Data Store locking conventions</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/lock/twopl.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/am_conv.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Berkeley DB Concurrent Data Store locking conventions</h1>
+<p>The Berkeley DB Concurrent Data Store product has a simple set of conventions for locking. It
+provides multiple-reader/single-writer semantics, but not per-page
+locking or transaction recoverability. As such, it does its locking
+entirely at the interface to the access methods.
+<p>The object it locks is the file, identified by its unique file number.
+The locking matrix is not one of the two standard lock modes, instead,
+we use a four-lock set, consisting of the following:
+<p><dl compact>
+<p><dt>DB_LOCK_NG<dd>not granted (always 0)
+<dt>DB_LOCK_READ<dd>read (shared)
+<dt>DB_LOCK_WRITE<dd>write (exclusive)
+<dt>DB_LOCK_IWRITE<dd>intention-to-write (shared with NG and READ, but conflicts with WRITE and IWRITE)
+</dl>
+<p>The IWRITE lock is used for cursors that will be used for updating
+(IWRITE locks are implicitly obtained for write operations through the
+Berkeley DB handles, for example, <a href="../../api_c/db_put.html">DB-&gt;put</a> or <a href="../../api_c/db_del.html">DB-&gt;del</a>). While
+the cursor is reading, the IWRITE lock is held; but as soon as the
+cursor is about to modify the database, the IWRITE is upgraded to a
+WRITE lock. This upgrade blocks until all readers have exited the
+database. Because only one IWRITE lock is allowed at any one time, no
+two cursors can ever try to upgrade to a WRITE lock at the same time,
+and therefore deadlocks are prevented, which is essential because Berkeley DB Concurrent Data Store
+does not include deadlock detection and recovery.
+<p>Applications that need to lock compatibly with Berkeley DB Concurrent Data Store must obey the
+following rules:
+<p><ol>
+<p><li>Use only lock modes DB_LOCK_NG, DB_LOCK_READ, DB_LOCK_WRITE,
+DB_LOCK_IWRITE.
+<p><li>Never attempt to acquire a WRITE lock on an object that is
+already locked with a READ lock.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/lock/twopl.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/am_conv.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/lock/config.html b/libdb/docs/ref/lock/config.html
new file mode 100644
index 0000000..b8a400b
--- /dev/null
+++ b/libdb/docs/ref/lock/config.html
@@ -0,0 +1,44 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Configuring locking</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/lock/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/max.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Configuring locking</h1>
+<p>The <a href="../../api_c/env_set_lk_detect.html">DB_ENV-&gt;set_lk_detect</a> method specifies that the deadlock detector
+should be run whenever a lock is about to block. This option provides
+for rapid detection of deadlocks at the expense of potentially frequent
+invocations of the deadlock detector. On a fast processor with a highly
+contentious application where response time is critical, this is a good
+choice. An option argument to the <a href="../../api_c/env_set_lk_detect.html">DB_ENV-&gt;set_lk_detect</a> method
+indicates which lock requests should be rejected.
+<p>In general, when applications are not specifying lock and transaction
+timeout values, the <a href="../../api_c/env_set_lk_detect.html#DB_LOCK_DEFAULT">DB_LOCK_DEFAULT</a> option is probably the
+correct first choice, and other options should only be selected based
+on evidence that they improve transaction throughput. If an application
+has long-running transactions, <a href="../../api_c/env_set_lk_detect.html#DB_LOCK_YOUNGEST">DB_LOCK_YOUNGEST</a> will guarantee
+that transactions eventually complete, but it may do so at the expense
+of a large number of lock request rejections (and therefore, transaction
+aborts).
+<p>The alternative to using the <a href="../../api_c/env_set_lk_detect.html">DB_ENV-&gt;set_lk_detect</a> interface is
+to explicitly perform deadlock detection using the Berkeley DB
+<a href="../../api_c/lock_detect.html">DB_ENV-&gt;lock_detect</a> interface.
+<p>The <a href="../../api_c/env_set_lk_conflicts.html">DB_ENV-&gt;set_lk_conflicts</a> method allows you to specify your own
+locking conflicts matrix. This is an advanced configuration option,
+and is almost never necessary.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/lock/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/max.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/lock/dead.html b/libdb/docs/ref/lock/dead.html
new file mode 100644
index 0000000..48681b2
--- /dev/null
+++ b/libdb/docs/ref/lock/dead.html
@@ -0,0 +1,84 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Deadlock detection</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/lock/stdmode.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/timeout.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Deadlock detection</h1>
+<p>Practically any application that uses locking may deadlock. The
+exceptions to this rule are when all the threads of control accessing
+the database are read-only or when the Berkeley DB Concurrent Data Store product is used; the
+Berkeley DB Concurrent Data Store product guarantees deadlock-free operation at the expense of
+reduced concurrency. While there are data access patterns that are
+deadlock free (for example, an application doing nothing but overwriting
+fixed-length records in an already existing database), they are
+extremely rare.
+<p>When a deadlock exists in the system, all the threads of control
+involved in the deadlock are, by definition, waiting on a lock. The
+deadlock detector examines the state of the lock manager and identifies
+a deadlock, and selects one of the lock requests to reject. (See
+<a href="../../ref/lock/config.html">Configuring locking</a> for a
+discussion of how a participant is selected). The <a href="../../api_c/lock_get.html">DB_ENV-&gt;lock_get</a> or
+<a href="../../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a> call for which the selected participant is waiting then
+returns a <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> error. When using the Berkeley DB access
+methods, this error return is propagated back through the Berkeley DB
+interface database handle interface to the calling application.
+<p>The deadlock detector identifies deadlocks by looking for a cycle in
+what is commonly referred to as its "waits-for" graph. More precisely,
+the deadlock detector reads through the lock table, and reviews each
+lock object currently locked. Each object has lockers that currently
+hold locks on the object and possibly a list of lockers waiting for a
+lock on the object. Each object's list of waiting lockers defines a
+partial ordering. That is, for a particular object, every waiting
+locker comes after every holding locker because that holding locker must
+release its lock before the waiting locker can make forward progress.
+Conceptually, after each object has been examined, the partial orderings
+are topologically sorted. If this topological sort reveals any cycles,
+the lockers forming the cycle are involved in a deadlock. One of the
+lockers is selected for rejection.
+<p>It is possible that rejecting a single lock request involved in a
+deadlock is not enough to allow other lockers to make forward progress.
+Unfortunately, at the time a lock request is selected for rejection,
+there is not enough information available to determine whether rejecting
+that single lock request will allow forward progress or not. Because
+most applications have few deadlocks, Berkeley DB takes the conservative
+approach, rejecting as few requests as may be necessary to resolve the
+existing deadlocks. In particular, for each unique cycle found in the
+waits-for graph described in the previous paragraph, only one lock
+request is selected for rejection. However, if there are multiple
+cycles, one lock request from each cycle is selected for rejection.
+Only after the enclosing transactions have received the lock request
+rejection return and aborted their transactions can it be determined
+whether it is necessary to reject additional lock requests in order to
+allow forward progress.
+<p>The <a href="../../utility/db_deadlock.html">db_deadlock</a> utility performs deadlock detection by calling
+the underlying Berkeley DB <a href="../../api_c/lock_detect.html">DB_ENV-&gt;lock_detect</a> method at regular intervals
+(<a href="../../api_c/lock_detect.html">DB_ENV-&gt;lock_detect</a> runs a single iteration of the Berkeley DB deadlock
+detector). Alternatively, applications can create their own deadlock
+utility or thread by calling the <a href="../../api_c/lock_detect.html">DB_ENV-&gt;lock_detect</a> method directly, or by
+using the <a href="../../api_c/env_set_lk_detect.html">DB_ENV-&gt;set_lk_detect</a> method to configure Berkeley DB to
+automatically run the deadlock detector whenever there is a conflict
+over a lock. The tradeoffs between using the <a href="../../api_c/lock_detect.html">DB_ENV-&gt;lock_detect</a> and
+<a href="../../api_c/env_set_lk_detect.html">DB_ENV-&gt;set_lk_detect</a> methods is that automatic deadlock detection will
+resolve deadlocks more quickly (because as the deadlock detector runs
+as soon as the lock request blocks), however, automatic deadlock
+detection often runs the deadlock detector when there is no need for
+it, and for applications with large numbers of locks and/or where many
+operations block temporarily on locks but are soon able to proceed,
+automatic detection can decrease performance.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/lock/stdmode.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/timeout.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/lock/deaddbg.html b/libdb/docs/ref/lock/deaddbg.html
new file mode 100644
index 0000000..a07409a
--- /dev/null
+++ b/libdb/docs/ref/lock/deaddbg.html
@@ -0,0 +1,141 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Deadlock debugging</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/lock/timeout.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/page.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Deadlock debugging</h1>
+<p>An occasional debugging problem in Berkeley DB applications is unresolvable
+deadlock. The output of the <b>-Co</b> flags of the <a href="../../utility/db_stat.html">db_stat</a>
+utility can be used to detect and debug these problems. The following
+is a typical example of the output of this utility:
+<p><blockquote><pre>Locks grouped by object
+Locker Mode Count Status ----------- Object ----------
+ 1 READ 1 HELD a.db handle 0
+80000004 WRITE 1 HELD a.db page 3</pre></blockquote>
+<p>In this example, we have opened a database and stored a single key/data
+pair in it. Because we have a database handle open, we have a read lock
+on that database handle. The database handle lock is the read lock
+labelled <i>handle</i>. (We can normally ignore handle locks for
+the purposes of database debugging, as they will only conflict with
+other handle operations, for example, an attempt to remove the database
+will block because we are holding the handle locked, but reading and
+writing the database will not conflict with the handle lock.)
+<p>It is important to note that locker IDs are 32-bit unsigned integers,
+and are divided into two name spaces. Locker IDs with the high bit set
+(that is, values 80000000 or higher), are locker IDs associated with
+transactions. Locker IDs without the high bit set are locker IDs that
+are not associated with a transaction. Locker IDs associated with
+transactions map one-to-one with the transaction, that is, a transaction
+never has more than a single locker ID, and all of the locks acquired
+by the transaction will be acquired on behalf of the same locker ID.
+<p>We also hold a write lock on the database page where we stored the new
+key/data pair. The page lock is labeled <i>page</i> and is on page
+number 3. If we were to put an additional key/data pair in the
+database, we would see the following output:
+<p><blockquote><pre>Locks grouped by object
+Locker Mode Count Status ----------- Object ----------
+80000004 WRITE 2 HELD a.db page 3
+ 1 READ 1 HELD a.db handle 0</pre></blockquote>
+<p>That is, we have acquired a second reference count to page number 3, but
+have not acquired any new locks. If we add an entry to a different page
+in the database, we would acquire additional locks:
+<p><blockquote><pre>Locks grouped by object
+Locker Mode Count Status ----------- Object ----------
+ 1 READ 1 HELD a.db handle 0
+80000004 WRITE 2 HELD a.db page 3
+80000004 WRITE 1 HELD a.db page 2</pre></blockquote>
+<p>Here's a simple example of one lock blocking another one:
+<p><blockquote><pre>Locks grouped by object
+Locker Mode Count Status ----------- Object ----------
+80000004 WRITE 1 HELD a.db page 2
+80000005 WRITE 1 WAIT a.db page 2
+ 1 READ 1 HELD a.db handle 0
+80000004 READ 1 HELD a.db page 1</pre></blockquote>
+<p>In this example, there are two different transactional lockers (80000004 and
+80000005). Locker 80000004 is holding a write lock on page 2, and
+locker 80000005 is waiting for a write lock on page 2. This is not a
+deadlock, because locker 80000004 is not blocked on anything.
+Presumably, the thread of control using locker 80000004 will proceed,
+eventually release its write lock on page 2, at which point the thread
+of control using locker 80000005 can also proceed, acquiring a write
+lock on page 2.
+<p>If lockers 80000004 and 80000005 are not in different threads of
+control, the result would be <i>self deadlock</i>. Self deadlock
+is not a true deadlock, and won't be detected by the Berkeley DB deadlock
+detector. It's not a true deadlock because, if work could continue to
+be done on behalf of locker 80000004, then the lock would eventually be
+released, and locker 80000005 could acquire the lock and itself proceed.
+So, the key element is that the thread of control holding the lock
+cannot proceed because it the same thread as is blocked waiting on the
+lock.
+<p>Here's an example of three transactions reaching true deadlock. First,
+three different threads of control opened the database, acquiring three
+database handle read locks.
+<p><blockquote><pre>Locks grouped by object
+Locker Mode Count Status ----------- Object ----------
+ 1 READ 1 HELD a.db handle 0
+ 3 READ 1 HELD a.db handle 0
+ 5 READ 1 HELD a.db handle 0</pre></blockquote>
+<p>The three threads then each began a transaction, and put a key/data pair
+on a different page:
+<p><blockquote><pre>Locks grouped by object
+Locker Mode Count Status ----------- Object ----------
+80000008 WRITE 1 HELD a.db page 4
+ 1 READ 1 HELD a.db handle 0
+ 3 READ 1 HELD a.db handle 0
+ 5 READ 1 HELD a.db handle 0
+80000006 READ 1 HELD a.db page 1
+80000007 READ 1 HELD a.db page 1
+80000008 READ 1 HELD a.db page 1
+80000006 WRITE 1 HELD a.db page 2
+80000007 WRITE 1 HELD a.db page 3</pre></blockquote>
+<p>The thread using locker 80000006 put a new key/data pair on page 2, the
+thread using locker 80000007, on page 3, and the thread using locker
+80000008 on page 4. Because the database is a 2-level Btree, the tree
+was searched, and so each transaction acquired a read lock on the Btree
+root page (page 1) as part of this operation.
+<p>The three threads then each attempted to put a second key/data pair on
+a page currently locked by another thread. The thread using locker
+80000006 tried to put a key/data pair on page 3, the thread using locker
+80000007 on page 4, and the thread using locker 80000008 on page 2:
+<p><blockquote><pre>Locks grouped by object
+Locker Mode Count Status ----------- Object ----------
+80000008 WRITE 1 HELD a.db page 4
+80000007 WRITE 1 WAIT a.db page 4
+ 1 READ 1 HELD a.db handle 0
+ 3 READ 1 HELD a.db handle 0
+ 5 READ 1 HELD a.db handle 0
+80000006 READ 2 HELD a.db page 1
+80000007 READ 2 HELD a.db page 1
+80000008 READ 2 HELD a.db page 1
+80000006 WRITE 1 HELD a.db page 2
+80000008 WRITE 1 WAIT a.db page 2
+80000007 WRITE 1 HELD a.db page 3
+80000006 WRITE 1 WAIT a.db page 3</pre></blockquote>
+<p>Now, each of the threads of control are blocked, waiting on a different
+thread of control.
+The thread using locker 80000007 is blocked by
+the thread using locker 80000008, due to the lock on page 4.
+The thread using locker 80000008 is blocked by
+the thread using locker 80000006, due to the lock on page 2.
+And the thread using locker 80000006 is blocked by
+the thread using locker 80000007, due to the lock on page 3.
+Since none of the threads of control can make
+progress, one of them will have to be killed in order to resolve the
+deadlock.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/lock/timeout.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/page.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/lock/intro.html b/libdb/docs/ref/lock/intro.html
new file mode 100644
index 0000000..df90ae9
--- /dev/null
+++ b/libdb/docs/ref/lock/intro.html
@@ -0,0 +1,107 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Berkeley DB and locking</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/program/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Berkeley DB and locking</h1>
+<p>The locking subsystem provides interprocess and intraprocess concurrency
+control mechanisms. Although the lock system is used extensively by
+the Berkeley DB access methods and transaction system, it may also be used as
+a standalone subsystem to provide concurrency control to any set of
+designated resources.
+<p>The Lock subsystem is created, initialized, and opened by calls to
+<a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> with the <a href="../../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a> or <a href="../../api_c/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a>
+flags specified.
+<p>The <a href="../../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a> interface is used to acquire and release locks.
+The <a href="../../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a> function performs any number of lock operations
+atomically. It also provides the capability to release all locks held
+by a particular locker and release all the locks on a particular object.
+(Performing multiple lock operations atomically is useful in performing
+Btree traversals -- you want to acquire a lock on a child page and once
+acquired, immediately release the lock on its parent. This is
+traditionally referred to as <i>lock-coupling</i>). Two additional
+interfaces, <a href="../../api_c/lock_get.html">DB_ENV-&gt;lock_get</a> and <a href="../../api_c/lock_put.html">DB_ENV-&gt;lock_put</a>, are provided. These
+interfaces are simpler front-ends to the <a href="../../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a> functionality,
+where <a href="../../api_c/lock_get.html">DB_ENV-&gt;lock_get</a> acquires a lock, and <a href="../../api_c/lock_put.html">DB_ENV-&gt;lock_put</a> releases
+a lock that was acquired using <a href="../../api_c/lock_get.html">DB_ENV-&gt;lock_get</a> or <a href="../../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a>.
+All locks explicitly requested by an application should be released via
+calls to <a href="../../api_c/lock_put.html">DB_ENV-&gt;lock_put</a> or <a href="../../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a>. Using <a href="../../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a>
+instead of separate calls to <a href="../../api_c/lock_put.html">DB_ENV-&gt;lock_put</a> and <a href="../../api_c/lock_get.html">DB_ENV-&gt;lock_get</a> also
+reduces the synchronization overhead between multiple threads or
+processes. The three interfaces are fully compatible, and may be used
+interchangeably.
+<p>Applications must specify lockers and lock objects appropriately. When
+used with the Berkeley DB access methods, lockers and objects are handled
+completely internally, but an application using the lock manager
+directly must either use the same conventions as the access methods or
+define its own convention to which it adheres. If an application is
+using the access methods with locking at the same time that it is
+calling the lock manager directly, the application must follow a
+convention that is compatible with the access methods' use of the
+locking subsystem. See <a href="../../ref/lock/am_conv.html">Access
+method locking conventions</a> for more information.
+<p>The <a href="../../api_c/lock_id.html">DB_ENV-&gt;lock_id</a> function returns a unique ID that may safely be used
+as the locker parameter to the <a href="../../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a> interface. The access
+methods use <a href="../../api_c/lock_id.html">DB_ENV-&gt;lock_id</a> to generate unique lockers for the cursors
+associated with a database.
+<p>The <a href="../../api_c/lock_detect.html">DB_ENV-&gt;lock_detect</a> function provides the programmatic interface to
+the Berkeley DB deadlock detector. Whenever two threads of control issue lock
+requests concurrently, the possibility for deadlock arises. A deadlock
+occurs when two or more threads of control are blocked, waiting for
+actions that another one of the blocked threads must take. For example,
+assume that threads A and B have each obtained read locks on object X.
+Now suppose that both threads want to obtain write locks on object X.
+Neither thread can be granted its write lock (because of the other
+thread's read lock). Both threads block and will never unblock because
+the event for which they are waiting can never happen.
+<p>The deadlock detector examines all the locks held in the environment,
+and identifies situations where no thread can make forward progress.
+It then selects one of the participants in the deadlock (according to
+the argument that was specified to <a href="../../api_c/env_set_lk_detect.html">DB_ENV-&gt;set_lk_detect</a>), and
+forces it to return the value <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a>, which indicates
+that a deadlock occurred. The thread receiving such an error must
+release all of its locks and undo any incomplete modifications to the
+locked resource. Locks are typically released, and modifications
+undone, by closing any cursors involved in the operation and aborting
+any transaction enclosing the operation. The operation may optionally
+be retried.
+<p>The <a href="../../api_c/lock_stat.html">DB_ENV-&gt;lock_stat</a> function returns information about the status of
+the lock subsystem. It is the programmatic interface used by the
+<a href="../../utility/db_stat.html">db_stat</a> utility.
+<p>The locking subsystem is closed by the call to <a href="../../api_c/env_close.html">DB_ENV-&gt;close</a>.
+<p>Finally, the entire locking subsystem may be discarded using the
+<a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a> interface.
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Locking Subsystem and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../../api_c/env_set_lk_conflicts.html">DB_ENV-&gt;set_lk_conflicts</a></td><td>Set lock conflicts matrix</td></tr>
+<tr><td><a href="../../api_c/env_set_lk_detect.html">DB_ENV-&gt;set_lk_detect</a></td><td>Set automatic deadlock detection</td></tr>
+<tr><td><a href="../../api_c/env_set_lk_max_lockers.html">DB_ENV-&gt;set_lk_max_lockers</a></td><td>Set maximum number of lockers</td></tr>
+<tr><td><a href="../../api_c/env_set_lk_max_locks.html">DB_ENV-&gt;set_lk_max_locks</a></td><td>Set maximum number of locks</td></tr>
+<tr><td><a href="../../api_c/env_set_lk_max_objects.html">DB_ENV-&gt;set_lk_max_objects</a></td><td>Set maximum number of lock objects</td></tr>
+<tr><td><a href="../../api_c/env_set_timeout.html">DB_ENV-&gt;set_timeout</a></td><td>Set lock and transaction timeout</td></tr>
+<tr><td><a href="../../api_c/lock_detect.html">DB_ENV-&gt;lock_detect</a></td><td>Perform deadlock detection</td></tr>
+<tr><td><a href="../../api_c/lock_get.html">DB_ENV-&gt;lock_get</a></td><td>Acquire a lock</td></tr>
+<tr><td><a href="../../api_c/lock_id.html">DB_ENV-&gt;lock_id</a></td><td>Acquire a locker ID</td></tr>
+<tr><td><a href="../../api_c/lock_id_free.html">DB_ENV-&gt;lock_id_free</a></td><td>Release a locker ID</td></tr>
+<tr><td><a href="../../api_c/lock_put.html">DB_ENV-&gt;lock_put</a></td><td>Release a lock</td></tr>
+<tr><td><a href="../../api_c/lock_stat.html">DB_ENV-&gt;lock_stat</a></td><td>Return lock subsystem statistics</td></tr>
+<tr><td><a href="../../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a></td><td>Acquire/release locks</td></tr>
+</table>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/program/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/lock/max.html b/libdb/docs/ref/lock/max.html
new file mode 100644
index 0000000..2fa82bf
--- /dev/null
+++ b/libdb/docs/ref/lock/max.html
@@ -0,0 +1,88 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Configuring locking: sizing the system</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/lock/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/stdmode.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Configuring locking: sizing the system</h1>
+<p>The lock system is sized using the following three functions:
+<p><blockquote><pre><a href="../../api_c/env_set_lk_max_locks.html">DB_ENV-&gt;set_lk_max_locks</a>
+<a href="../../api_c/env_set_lk_max_lockers.html">DB_ENV-&gt;set_lk_max_lockers</a>
+<a href="../../api_c/env_set_lk_max_objects.html">DB_ENV-&gt;set_lk_max_objects</a></pre></blockquote>
+<p>The <a href="../../api_c/env_set_lk_max_locks.html">DB_ENV-&gt;set_lk_max_locks</a>, <a href="../../api_c/env_set_lk_max_lockers.html">DB_ENV-&gt;set_lk_max_lockers</a>,
+and <a href="../../api_c/env_set_lk_max_objects.html">DB_ENV-&gt;set_lk_max_objects</a> methods specify the maximum number of
+locks, lockers, and locked objects supported by the lock subsystem,
+respectively. The maximum number of locks is the number of locks that
+can be simultaneously requested in the system. The maximum number of
+lockers is the number of lockers that can simultaneously request locks
+in the system. The maximum number of lock objects is the number of
+objects that can simultaneously be locked in the system. Selecting
+appropriate values requires an understanding of your application and
+its databases. If the values are too small, requests for locks in an
+application will fail. If the values are too large, the locking
+subsystem will consume more resources than is necessary. It is better
+to err in the direction of allocating too many locks, lockers, and
+objects because increasing the number of locks does not require large
+amounts of additional resources.
+<p>The recommended algorithm for selecting the maximum number of locks,
+lockers, and lock objects is to run the application under stressful
+conditions and then review the lock system's statistics to determine
+the maximum number of locks, lockers, and lock objects that were used.
+Then, double these values for safety. However, in some large
+applications, finer granularity of control is necessary in order to
+minimize the size of the Lock subsystem.
+<p>The maximum number of lockers can be estimated as follows:
+<p><ul type=disc>
+<li>If the database environment is configured to use transactions, the
+maximum number of lockers needed is the number of simultaneously active
+transactions and child transactions (where a child transaction is active
+until its parent commits or aborts, not until it commits or aborts).
+<li>If the database environment is not configured to use transactions, the
+maximum number of lockers needed is the number of simultaneous
+non-cursor operations plus an additional locker for every simultaneously
+open cursor.
+</ul>
+<p>The maximum number of lock objects needed can be estimated as follows:
+<p><ul type=disc>
+<li>For Btree and Recno access methods, you will need one lock object per
+level of the database tree, at a minimum. (Unless keys are quite large
+with respect to the page size, neither Recno nor Btree database trees
+should ever be deeper than five levels.) Then, you will need one lock
+object for each leaf page of the database tree that will be
+simultaneously accessed.
+<li>For the Queue access method, you will need one lock object per record
+that is simultaneously accessed. To this, add one lock object per page
+that will be simultaneously accessed. (Because the Queue access method
+uses fixed-length records and the database page size is known, it is
+possible to calculate the number of pages -- and, therefore, the lock
+objects -- required.) Deleted records skipped by a <a href="../../api_c/dbc_get.html#DB_NEXT">DB_NEXT</a> or
+<a href="../../api_c/dbc_get.html#DB_PREV">DB_PREV</a> operation do not require a separate lock object.
+Further, if your application is using transactions, no database
+operation will ever use more than three lock objects at any time.
+<li>For the Hash access method, you only need a single lock object.
+</ul>
+<p>For all access methods, you should then add an additional lock object
+per database for the database's metadata page.
+<p>The maximum number of locks required by an application cannot be easily
+estimated. It is possible to calculate a maximum number of locks by
+multiplying the maximum number of lockers, times the maximum number of
+lock objects, times two (two for the two possible lock modes for each
+object, read and write). However, this is a pessimal value, and real
+applications are unlikely to actually need that many locks. Reviewing
+the Lock subsystem statistics is the best way to determine this value.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/lock/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/stdmode.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/lock/nondb.html b/libdb/docs/ref/lock/nondb.html
new file mode 100644
index 0000000..c10304a
--- /dev/null
+++ b/libdb/docs/ref/lock/nondb.html
@@ -0,0 +1,51 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Locking and non-Berkeley DB applications</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/lock/am_conv.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/log/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Locking and non-Berkeley DB applications</h1>
+<p>The Lock subsystem is useful outside the context of Berkeley DB. It can be
+used to manage concurrent access to any collection of either ephemeral
+or persistent objects. That is, the lock region can persist across
+invocations of an application, so it can be used to provide long-term
+locking (for example, conference room scheduling).
+<p>In order to use the locking subsystem in such a general way, the
+applications must adhere to a convention for identifying objects and
+lockers. Consider a conference room scheduling problem, in which there
+are three conference rooms scheduled in half-hour intervals. The
+scheduling application must then select a way to identify each
+conference room/time slot combination. In this case, we could describe
+the objects being locked as bytestrings consisting of the conference
+room name, the date when it is needed, and the beginning of the
+appropriate half-hour slot.
+<p>Lockers are 32-bit numbers, so we might choose to use the User ID of
+the individual running the scheduling program. To schedule half-hour
+slots, all the application needs to do is issue a <a href="../../api_c/lock_get.html">DB_ENV-&gt;lock_get</a> call
+for the appropriate locker/object pair. To schedule a longer slot, the
+application needs to issue a <a href="../../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a> call, with one
+<a href="../../api_c/lock_get.html">DB_ENV-&gt;lock_get</a> operation per half-hour -- up to the total length. If
+the <a href="../../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a> call fails, the application would have to release
+the parts of the time slot that were obtained.
+<p>To cancel a reservation, the application would make the appropriate
+<a href="../../api_c/lock_put.html">DB_ENV-&gt;lock_put</a> calls. To reschedule a reservation, the
+<a href="../../api_c/lock_get.html">DB_ENV-&gt;lock_get</a> and <a href="../../api_c/lock_put.html">DB_ENV-&gt;lock_put</a> calls could all be made inside of
+a single <a href="../../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a> call. The output of <a href="../../api_c/lock_stat.html">DB_ENV-&gt;lock_stat</a> could
+be post-processed into a human-readable schedule of conference room
+use.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/lock/am_conv.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/log/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/lock/notxn.html b/libdb/docs/ref/lock/notxn.html
new file mode 100644
index 0000000..3428e55
--- /dev/null
+++ b/libdb/docs/ref/lock/notxn.html
@@ -0,0 +1,47 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Locking without transactions</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/lock/page.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/twopl.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Locking without transactions</h1>
+<p>If an application runs with locking specified, but not transactions (for
+example, <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> is called with <a href="../../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a> or
+<a href="../../api_c/env_open.html#DB_INIT_CDB">DB_INIT_CDB</a> specified, but not <a href="../../api_c/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a>), locks are
+normally acquired during each Berkeley DB operation and released before the
+operation returns to the caller. The only exception is in the case of
+cursor operations. Cursors identify a particular position in a file.
+For this reason, cursors must retain read locks across cursor calls to
+make sure that the position is uniquely identifiable during a subsequent
+cursor call, and so that an operation using <a href="../../api_c/dbc_get.html#DB_CURRENT">DB_CURRENT</a> will
+always refer to the same record as a previous cursor call. These cursor
+locks cannot be released until the cursor is either repositioned and a
+new cursor lock established (for example, using the <a href="../../api_c/dbc_get.html#DB_NEXT">DB_NEXT</a>
+or <a href="../../api_c/dbc_get.html#DB_SET">DB_SET</a> flags), or the cursor is closed. As a result,
+application writers are encouraged to close cursors as soon as
+possible.
+<p>It is important to realize that concurrent applications that use locking
+must ensure that two concurrent threads do not block each other.
+However, because Btree and Hash access method page splits can occur at
+any time, there is virtually no way to guarantee that an application
+that writes the database cannot deadlock. Applications running without
+the protection of transactions may deadlock, and can leave the database
+in an inconsistent state when they do so. Applications that need
+concurrent access, but not transactions, are more safely implemented
+using the Berkeley DB Concurrent Data Store Product.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/lock/page.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/twopl.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/lock/page.html b/libdb/docs/ref/lock/page.html
new file mode 100644
index 0000000..2cccde6
--- /dev/null
+++ b/libdb/docs/ref/lock/page.html
@@ -0,0 +1,72 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Locking granularity</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/lock/deaddbg.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/notxn.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Locking granularity</h1>
+<p>With the exception of the Queue access method, the Berkeley DB access methods
+do page-level locking. The size of pages in a database may be set when
+the database is created by calling the <a href="../../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a> method. If
+not specified by the application, Berkeley DB selects a page size that will
+provide the best I/O performance by setting the page size equal to the
+block size of the underlying file system. Selecting a smaller page size
+can result in increased concurrency for some applications.
+<p>In the Btree access method, Berkeley DB uses a technique called lock coupling
+to improve concurrency. The traversal of a Btree requires reading a
+page, searching that page to determine which page to search next, and
+then repeating this process on the next page. Once a page has been
+searched, it will never be accessed again for this operation, unless a
+page split is required. To improve concurrency in the tree, once the
+next page to read/search has been determined, that page is locked and
+then the original page lock is released atomically (that is, without
+relinquishing control of the lock manager). When page splits become
+necessary, write locks are reacquired.
+<p>Because the Recno access method is built upon Btree, it also uses lock
+coupling for read operations. However, because the Recno access method
+must maintain a count of records on its internal pages, it cannot
+lock-couple during write operations. Instead, it retains write locks
+on all internal pages during every update operation. For this reason,
+it is not possible to have high concurrency in the Recno access method
+in the presence of write operations.
+<p>The Queue access method uses only short-term page locks. That is, a page
+lock is released prior to requesting another page lock. Record locks are
+used for transaction isolation. The provides a high degree of concurrency
+for write operations. A metadata page is used to keep track of the head
+and tail of the queue. This page is never locked during other locking or
+I/O operations.
+<p>The Hash access method does not have such traversal issues, but it must
+always refer to its metadata while computing a hash function because it
+implements dynamic hashing. This metadata is stored on a special page
+in the hash database. This page must therefore be read-locked on every
+operation. Fortunately, it needs to be write-locked only when new pages
+are allocated to the file, which happens in three cases:
+<p><ul type=disc>
+<li>a hash bucket becomes full and needs to split
+<li>a key or data item is too large to fit on a normal page
+<li>the number of duplicate items for a fixed key becomes so large that they
+are moved to an auxiliary page
+</ul>
+<p>In this case, the access method must obtain a write lock on the metadata
+page, thus requiring that all readers be blocked from entering the tree
+until the update completes.
+<p>Finally, when traversing duplicate data items for a key, the lock on
+the key value also acts as a lock on all duplicates of that key.
+Therefore, two conflicting threads of control cannot access the same
+duplicate set simultaneously.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/lock/deaddbg.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/notxn.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/lock/stdmode.html b/libdb/docs/ref/lock/stdmode.html
new file mode 100644
index 0000000..812dff3
--- /dev/null
+++ b/libdb/docs/ref/lock/stdmode.html
@@ -0,0 +1,59 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Standard lock modes</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/lock/max.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/dead.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Standard lock modes</h1>
+<p>The Berkeley DB locking protocol is described by a conflict matrix. A
+conflict matrix is an NxN array in which N is the number of different
+lock modes supported, and the (i, j)th entry of the array indicates
+whether a lock of mode i conflicts with a lock of mode j. In addition,
+Berkeley DB defines the type <b>db_lockmode_t</b>, which is the type of a
+lock mode within a conflict matrix.
+<p>The following is an example of a conflict matrix. The actual conflict
+matrix used by Berkeley DB to support the underlying access methods is more
+complicated, but this matrix shows the lock mode relationships available
+to applications using the Berkeley DB Locking subsystem interfaces directly.
+<p><dl compact>
+<p><dt>DB_LOCK_NG<dd>not granted (always 0)
+<dt>DB_LOCK_READ<dd>read (shared)
+<dt>DB_LOCK_WRITE<dd>write (exclusive)
+<dt>DB_LOCK_IWRITE<dd>intention to write (shared)
+<dt>DB_LOCK_IREAD<dd>intention to read (shared)
+<dt>DB_LOCK_IWR<dd>intention to read and write (shared)
+</dl>
+<p>In a conflict matrix, the rows indicate the lock that is held, and the
+columns indicate the lock that is requested. A 1 represents a conflict
+(that is, do not grant the lock if the indicated lock is held), and a
+0 indicates that it is OK to grant the lock.
+<p><blockquote><pre> Notheld Read Write IWrite IRead IRW
+Notheld 0 0 0 0 0 0
+Read* 0 0 1 1 0 1
+Write** 0 1 1 1 1 1
+Intent Write 0 1 1 0 0 0
+Intent Read 0 0 1 0 0 0
+Intent RW 0 1 1 0 0 0</pre></blockquote>
+<p><dl compact>
+<p><dt>*<dd>In this case, suppose that there is a read lock held on an object. A new
+request for a read lock would be granted, but a request for a write lock
+would not.
+<p><dt>**<dd>In this case, suppose that there is a write lock held on an object. A
+new request for either a read or write lock would be denied.
+</dl>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/lock/max.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/dead.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/lock/timeout.html b/libdb/docs/ref/lock/timeout.html
new file mode 100644
index 0000000..c4232e4
--- /dev/null
+++ b/libdb/docs/ref/lock/timeout.html
@@ -0,0 +1,59 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Deadlock detection using timers</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/lock/dead.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/deaddbg.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Deadlock detection using timers</h1>
+<p>Lock and transaction timeouts may be used in place of, or in addition
+to, regular deadlock detection. If lock timeouts are set, lock requests
+will return <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> from a lock call when it is
+detected that the locks timeouts has expired, that is, the lock request
+has blocked, waiting, longer than the specified timeout. If transaction
+timeouts are set, lock requests will return <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a>
+from a lock call when it has been detected that the transaction has been
+active longer than the specified timeout.
+<p>As lock and transaction timeouts are only checked when lock requests
+first block or when deadlock detection is performed, the accuracy of
+the timeout depends on how often deadlock detection is performed. More
+specifically, transactions will continue to run after their timeout has
+expired if they do not block on a lock request after that time.
+<p>If the database environment deadlock detector has been configured with
+the <a href="../../api_c/env_set_lk_detect.html#DB_LOCK_EXPIRE">DB_LOCK_EXPIRE</a> option, timeouts are the only mechanism by
+which deadlocks will be broken. If the deadlock detector has been
+configured with a different option, then regular deadlock detection will
+be performed, and in addition, if timeouts have also been specified,
+lock requests and transactions will time out as well.
+<p>Lock and transaction timeouts may be specified on a database environment
+wide basis using the <a href="../../api_c/env_set_timeout.html">DB_ENV-&gt;set_timeout</a> method. Lock timeouts may be
+specified on a per-lock request basis using the <a href="../../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a> method.
+Transaction timeouts may be specified on a per-transaction basis using
+the <a href="../../api_c/txn_set_timeout.html">DB_TXN-&gt;set_timeout</a> method. Per-lock and per-transaction timeouts
+supersede environment wide timeouts.
+<p>For example, consider that the environment wide transaction timeout has
+been set to 20ms, the environment wide lock timeout has been set to
+10ms, a transaction has been created in this environment and its timeout
+value set to 8ms, and a specific lock request has been made on behalf
+of this transaction where the lock timeout was set to 4ms. By default,
+transactions in this environment will be timed out if they block waiting
+for a lock after 20ms. The specific transaction described will be timed
+out if it blocks waiting for a lock after 8ms. By default, any lock
+request in this system will be timed out if it blocks longer than 10ms,
+and the specific lock described will be timed out if it blocks longer
+than 4ms.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/lock/dead.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/deaddbg.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/lock/twopl.html b/libdb/docs/ref/lock/twopl.html
new file mode 100644
index 0000000..3cd027d
--- /dev/null
+++ b/libdb/docs/ref/lock/twopl.html
@@ -0,0 +1,51 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Locking with transactions: two-phase locking</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/lock/notxn.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/cam_conv.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Locking with transactions: two-phase locking</h1>
+<p>Berkeley DB uses a locking protocol called <i>two-phase locking (2PL)</i>. This
+is the traditional protocol used in conjunction with lock-based transaction
+systems.
+<p>In a two-phase locking system, transactions are divided into two
+distinct phases. During the first phase, the transaction only acquires
+locks; during the second phase, the transaction only releases locks.
+More formally, once a transaction releases a lock, it may not acquire
+any additional locks. Practically, this translates into a system in
+which locks are acquired as they are needed throughout a transaction
+and retained until the transaction ends, either by committing or
+aborting. In Berkeley DB, locks are released during <a href="../../api_c/txn_abort.html">DB_TXN-&gt;abort</a> or
+<a href="../../api_c/txn_commit.html">DB_TXN-&gt;commit</a>. The only exception to this protocol occurs when we
+use lock-coupling to traverse a data structure. If the locks are held
+only for traversal purposes, it is safe to release locks before
+transactions commit or abort.
+<p>For applications, the implications of 2PL are that long-running
+transactions will hold locks for a long time. When designing
+applications, lock contention should be considered. In order to reduce
+the probability of deadlock and achieve the best level of concurrency
+possible, the following guidelines are helpful.
+<p><ol>
+<p><li>When accessing multiple databases, design all transactions so that they
+access the files in the same order.
+<p><li>If possible, access your most hotly contested resources last (so that
+their locks are held for the shortest time possible).
+<p><li>If possible, use nested transactions to protect the parts of your
+transaction most likely to deadlock.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/lock/notxn.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/cam_conv.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/log/config.html b/libdb/docs/ref/log/config.html
new file mode 100644
index 0000000..b7a35a1
--- /dev/null
+++ b/libdb/docs/ref/log/config.html
@@ -0,0 +1,47 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Configuring logging</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Logging Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/log/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/log/limits.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Configuring logging</h1>
+<p>The aspects of logging that may be configured are the size of the
+logging subsystem's region, the size of the log files on disk and the
+size of the log buffer in memory. The <a href="../../api_c/env_set_lg_regionmax.html">DB_ENV-&gt;set_lg_regionmax</a>
+interface specifies the size of the logging subsystem's region, in
+bytes. The logging subsystem's default size is 60KB. This value may
+need to be increased if a large number of files are registered with the
+Berkeley DB log manager, for example, by opening a large number of Berkeley DB
+database files in a transactional application.
+<p>The <a href="../../api_c/env_set_lg_max.html">DB_ENV-&gt;set_lg_max</a> interface specifies the individual log file
+size for all the applications sharing the Berkeley DB environment. Setting
+the log file size is largely a matter of convenience and a reflection
+of the application's preferences in backup media and frequency.
+However, setting the log file size too low can potentially cause
+problems because it would be possible to run out of log sequence
+numbers, which requires a full archival and application restart to
+reset. See <a href="../../ref/log/limits.html">Log file limits</a> for more
+information.
+<p>The <a href="../../api_c/env_set_lg_bsize.html">DB_ENV-&gt;set_lg_bsize</a> interface specifies the size of the
+in-memory log buffer, in bytes. Log information is stored in memory
+until the buffer fills up or transaction commit forces the buffer to be
+written to disk. Larger buffer sizes can significantly increase
+throughput in the presence of long-running transactions, highly
+concurrent applications, or transactions producing large amounts of
+data. By default, the buffer is 32KB.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/log/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/log/limits.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/log/intro.html b/libdb/docs/ref/log/intro.html
new file mode 100644
index 0000000..9e4bb6c
--- /dev/null
+++ b/libdb/docs/ref/log/intro.html
@@ -0,0 +1,70 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Berkeley DB and logging</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Logging Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/lock/nondb.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/log/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Berkeley DB and logging</h1>
+<p>The Logging subsystem is the logging facility used by Berkeley DB. It is
+largely Berkeley DB-specific, although it is potentially useful outside of
+the Berkeley DB package for applications wanting write-ahead logging support.
+Applications wanting to use the log for purposes other than logging file
+modifications based on a set of open file descriptors will almost
+certainly need to make source code modifications to the Berkeley DB code
+base.
+<p>A log can be shared by any number of threads of control. The
+<a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> interface is used to open a log. When the log is no
+longer in use, it should be closed using the <a href="../../api_c/env_close.html">DB_ENV-&gt;close</a>
+interface.
+<p>Individual log entries are identified by log sequence numbers. Log
+sequence numbers are stored in an opaque object, a <a href="../../api_c/lsn_class.html">DB_LSN</a>.
+<p>The <a href="../../api_c/log_cursor.html">DB_ENV-&gt;log_cursor</a> method is used to allocate a log cursor. Log cursors
+have two methods: <a href="../../api_c/logc_get.html">DB_LOGC-&gt;get</a> method to retrieve log records from the
+log, and <a href="../../api_c/logc_close.html">DB_LOGC-&gt;close</a> method to destroy the cursor.
+<p>There are additional interfaces for integrating the log subsystem with a
+transaction processing system:
+<p><dl compact>
+<p><dt><a href="../../api_c/log_flush.html">DB_ENV-&gt;log_flush</a><dd>Flushes the log up to a particular log sequence number.
+<p><dt><a href="../../api_c/log_compare.html">log_compare</a><dd>Allows applications to compare any two log sequence numbers.
+<p><dt><a href="../../api_c/log_file.html">DB_ENV-&gt;log_file</a> <dd>Maps a log sequence number to the specific log file that contains it.
+<p><dt><a href="../../api_c/log_archive.html">DB_ENV-&gt;log_archive</a><dd>Returns various sets of log filenames. These interfaces are used for
+database administration; for example, to determine if log files may
+safely be removed from the system.
+<p><dt><a href="../../api_c/log_stat.html">DB_ENV-&gt;log_stat</a> <dd>The display <a href="../../utility/db_stat.html">db_stat</a> utility uses the <a href="../../api_c/log_stat.html">DB_ENV-&gt;log_stat</a> interface
+to display statistics about the log.
+<p><dt><a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a><dd>The log meta-information (but not the log files themselves) may be
+removed using the <a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a> interface.
+</dl>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Logging Subsystem and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../../api_c/log_archive.html">DB_ENV-&gt;log_archive</a></td><td>List log and database files</td></tr>
+<tr><td><a href="../../api_c/log_cursor.html">DB_ENV-&gt;log_cursor</a></td><td>Create a log cursor handle</td></tr>
+<tr><td><a href="../../api_c/log_file.html">DB_ENV-&gt;log_file</a></td><td>Map Log Sequence Numbers to log files</td></tr>
+<tr><td><a href="../../api_c/log_flush.html">DB_ENV-&gt;log_flush</a></td><td>Flush log records</td></tr>
+<tr><td><a href="../../api_c/log_put.html">DB_ENV-&gt;log_put</a></td><td>Write a log record</td></tr>
+<tr><td><a href="../../api_c/env_set_lg_bsize.html">DB_ENV-&gt;set_lg_bsize</a></td><td>Set log buffer size</td></tr>
+<tr><td><a href="../../api_c/env_set_lg_dir.html">DB_ENV-&gt;set_lg_dir</a></td><td>Set the environment logging directory</td></tr>
+<tr><td><a href="../../api_c/env_set_lg_max.html">DB_ENV-&gt;set_lg_max</a></td><td>Set log file size</td></tr>
+<tr><td><a href="../../api_c/env_set_lg_regionmax.html">DB_ENV-&gt;set_lg_regionmax</a></td><td>Set logging region size</td></tr>
+<tr><td><a href="../../api_c/log_compare.html">log_compare</a></td><td>Compare two Log Sequence Numbers</td></tr>
+<tr><td><a href="../../api_c/log_stat.html">DB_ENV-&gt;log_stat</a></td><td>Return log subsystem statistics</td></tr>
+<tr><td><a href="../../api_c/logc_close.html">DB_LOGC-&gt;close</a></td><td>Close a log cursor</td></tr>
+<tr><td><a href="../../api_c/logc_get.html">DB_LOGC-&gt;get</a></td><td>Retrieve a log record</td></tr>
+</table>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/lock/nondb.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/log/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/log/limits.html b/libdb/docs/ref/log/limits.html
new file mode 100644
index 0000000..efab9eb
--- /dev/null
+++ b/libdb/docs/ref/log/limits.html
@@ -0,0 +1,49 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Log file limits</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Logging Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/log/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/mp/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Log file limits</h1>
+<p>Log filenames and sizes impose a limit on how long databases may be
+used in a Berkeley DB database environment. It is quite unlikely that an
+application will reach this limit; however, if the limit is reached,
+the Berkeley DB environment's databases must be dumped and reloaded.
+<p>The log filename consists of <b>log.</b> followed by 10 digits, with
+a maximum of 2,000,000,000 log files. Consider an application performing
+6000 transactions per second for 24 hours a day, logged into 10MB log
+files, in which each transaction is logging approximately 500 bytes of data.
+The following calculation:
+<p><blockquote><pre>(10 * 2^20 * 2000000000) / (6000 * 500 * 365 * 60 * 60 * 24) = ~221</pre></blockquote>
+<p>indicates that the system will run out of log filenames in roughly 221
+years.
+<p>There is no way to reset the log filename space in Berkeley DB. If your
+application is reaching the end of its log filename space, you must do
+the following:
+<p><ol>
+<p><li>Archive your databases as if to prepare for catastrophic failure (see
+<a href="../../utility/db_archive.html">db_archive</a> for more information).
+<p><li>Dump and reload all your databases (see <a href="../../utility/db_dump.html">db_dump</a> and
+<a href="../../utility/db_load.html">db_load</a> for more information).
+<p><li>Remove all of the log files from the database environment. Note: This
+is the only situation in which all the log files are removed from an
+environment; in all other cases, at least a single log file is
+retained.
+<p><li>Restart your application.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/log/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/mp/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/mp/config.html b/libdb/docs/ref/mp/config.html
new file mode 100644
index 0000000..696f2c5
--- /dev/null
+++ b/libdb/docs/ref/mp/config.html
@@ -0,0 +1,52 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Configuring the memory pool</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Memory Pool Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/mp/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/txn/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Configuring the memory pool</h1>
+<p>There are two issues to consider when configuring the memory pool.
+<p>The first issue, the most important tuning parameter for Berkeley DB
+applications, is the size of the memory pool. There are two ways to
+specify the pool size. First, calling the <a href="../../api_c/env_set_cachesize.html">DB_ENV-&gt;set_cachesize</a> method
+specifies the pool size for all of the applications sharing the Berkeley DB
+environment. Second, the <a href="../../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a> method only specifies a
+pool size for the specific database. Note: It is meaningless to call
+<a href="../../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a> for a database opened inside of a Berkeley DB
+environment because the environment pool size will override any pool
+size specified for a single database. For information on tuning the
+Berkeley DB cache size, see <a href="../../ref/am_conf/cachesize.html">Selecting
+a cache size</a>.
+<p>The second memory pool configuration issue is the maximum size an
+underlying file can be and still be mapped into the process address
+space (instead of reading the file's pages into the cache). Mapping
+files into the process address space can result in better performance
+because available virtual memory is often much larger than the local
+cache, and page faults are faster than page copying on many systems.
+However, in the presence of limited virtual memory, it can cause
+resource starvation; and in the presence of large databases, it can
+result in immense process sizes. In addition, because of the
+requirements of the Berkeley DB transactional implementation, only read-only
+files can be mapped into process memory.
+<p>To specify that no files are to be mapped into the process address space,
+specify the <a href="../../api_c/env_set_flags.html#DB_NOMMAP">DB_NOMMAP</a> flag to the <a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> interface.
+To specify that any individual file should not be mapped into the process
+address space, specify the <a href="../../api_c/env_set_flags.html#DB_NOMMAP">DB_NOMMAP</a> flag to the
+<a href="../../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a> interface. To limit the size of files mapped into the
+process address space, use the <a href="../../api_c/env_set_mp_mmapsize.html">DB_ENV-&gt;set_mp_mmapsize</a> method.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/mp/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/txn/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/mp/intro.html b/libdb/docs/ref/mp/intro.html
new file mode 100644
index 0000000..298bc4d
--- /dev/null
+++ b/libdb/docs/ref/mp/intro.html
@@ -0,0 +1,97 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Berkeley DB and the memory pool</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a><a name="4"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Memory Pool Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/log/limits.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/mp/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Berkeley DB and the memory pool</h1>
+<p>The Memory Pool subsystem is the general-purpose shared memory buffer
+pool used by Berkeley DB. This module is useful outside of the Berkeley DB package
+for processes that require page-oriented, shared and cached file access.
+<p>A <i>memory pool</i> is a memory cache shared among any number of
+threads of control. The <a href="../../api_c/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a> flag to the
+<a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> method opens and optionally creates a memory pool. When
+that pool is no longer in use, it should be closed using the
+<a href="../../api_c/env_close.html">DB_ENV-&gt;close</a> method.
+<p>The <a href="../../api_c/memp_fcreate.html">DB_ENV-&gt;memp_fcreate</a> method returns a <a href="../../api_c/mempfile_class.html">DB_MPOOLFILE</a> handle on an
+underlying file within the memory pool. The file may be opened using
+the <a href="../../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a> method. The <a href="../../api_c/memp_fget.html">DB_MPOOLFILE-&gt;get</a> method is used to retrieve
+pages from files in the pool. All retrieved pages must be subsequently
+returned using the <a href="../../api_c/memp_fput.html">DB_MPOOLFILE-&gt;put</a> method. At the time pages are returned,
+they may be marked <b>dirty</b>, which causes them to be written to
+the underlying file before being discarded from the pool. If there is
+insufficient room to bring a new page in the pool, a page is selected
+to be discarded from the pool using a least-recently-used algorithm.
+Pages in files may also be explicitly marked clean or dirty using the
+<a href="../../api_c/memp_fset.html">DB_MPOOLFILE-&gt;set</a> method. All dirty pages in the pool from the file may be
+flushed using the <a href="../../api_c/memp_fsync.html">DB_MPOOLFILE-&gt;sync</a> method. When the file handle is no
+longer in use, it should be closed using the <a href="../../api_c/memp_fclose.html">DB_MPOOLFILE-&gt;close</a> method.
+<p>There are additional configuration interfaces that apply when opening
+a new file in the memory pool:
+<p><ul type=disc>
+<li>The <a href="../../api_c/memp_set_clear_len.html">DB_MPOOLFILE-&gt;set_clear_len</a> method specifies the number of bytes to clear
+when creating a new page in the memory pool.
+<li>The <a href="../../api_c/memp_set_fileid.html">DB_MPOOLFILE-&gt;set_fileid</a> method specifies a unique ID associated with the file.
+<li>The <a href="../../api_c/memp_set_ftype.html">DB_MPOOLFILE-&gt;set_ftype</a> method specifies the type of file for the purposes of
+page input and output processing.
+<li>The <a href="../../api_c/memp_set_lsn_offset.html">DB_MPOOLFILE-&gt;set_lsn_offset</a> method specifies the byte offset of each page's
+log sequence number (<a href="../../api_c/lsn_class.html">DB_LSN</a>) for the purposes of transaction
+checkpoints.
+<li>The <a href="../../api_c/memp_set_pgcookie.html">DB_MPOOLFILE-&gt;set_pgcookie</a> method specifies an application provided argument
+for the purposes of page input and output processing.
+</ul>
+<p>There are additional interfaces for the memory pool as a whole:
+<p><ul type=disc>
+<li>It is possible to gradually flush buffers from the pool in order to
+maintain a consistent percentage of clean buffers in the pool using
+the <a href="../../api_c/memp_trickle.html">DB_ENV-&gt;memp_trickle</a> method.
+<li>Because special-purpose processing may be necessary when pages are read
+or written (for example, compression or endian conversion), the
+<a href="../../api_c/memp_register.html">DB_ENV-&gt;memp_register</a> function allows applications to specify automatic
+input and output processing in these cases.
+<li>The <a href="../../utility/db_stat.html">db_stat</a> utility uses the <a href="../../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a> method to display
+statistics about the efficiency of the pool.
+<li>All dirty pages in the pool may be flushed using the <a href="../../api_c/memp_sync.html">DB_ENV-&gt;memp_sync</a> method.
+In addition, <a href="../../api_c/memp_sync.html">DB_ENV-&gt;memp_sync</a> takes an argument that is specific to
+database systems, and which allows the memory pool to be flushed up to
+a specified log sequence number (<a href="../../api_c/lsn_class.html">DB_LSN</a>).
+<li>The entire pool may be discarded using the <a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a> method.
+</ul>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Memory Pools and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../../api_c/env_set_cachesize.html">DB_ENV-&gt;set_cachesize</a></td><td>Set the environment cache size</td></tr>
+<tr><td><a href="../../api_c/env_set_mp_mmapsize.html">DB_ENV-&gt;set_mp_mmapsize</a></td><td>Set maximum mapped-in database file size</td></tr>
+<tr><td><a href="../../api_c/memp_register.html">DB_ENV-&gt;memp_register</a></td><td>Register input/output functions for a file in a memory pool</td></tr>
+<tr><td><a href="../../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a></td><td>Return memory pool statistics</td></tr>
+<tr><td><a href="../../api_c/memp_sync.html">DB_ENV-&gt;memp_sync</a></td><td>Flush pages from a memory pool</td></tr>
+<tr><td><a href="../../api_c/memp_trickle.html">DB_ENV-&gt;memp_trickle</a></td><td>Trickle flush pages from a memory pool</td></tr>
+<tr><td><a href="../../api_c/memp_fcreate.html">DB_ENV-&gt;memp_fcreate</a></td><td>Open a file in a memory pool</td></tr>
+<tr><td><a href="../../api_c/memp_fclose.html">DB_MPOOLFILE-&gt;close</a></td><td>Close a file in a memory pool</td></tr>
+<tr><td><a href="../../api_c/memp_fget.html">DB_MPOOLFILE-&gt;get</a></td><td>Get page from a file in a memory pool</td></tr>
+<tr><td><a href="../../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a></td><td>Open a file in a memory pool</td></tr>
+<tr><td><a href="../../api_c/memp_fput.html">DB_MPOOLFILE-&gt;put</a></td><td>Return a page to a memory pool</td></tr>
+<tr><td><a href="../../api_c/memp_fset.html">DB_MPOOLFILE-&gt;set</a></td><td>Set memory pool page status</td></tr>
+<tr><td><a href="../../api_c/memp_fsync.html">DB_MPOOLFILE-&gt;sync</a></td><td>Flush pages from a file in a memory pool</td></tr>
+<tr><td><a href="../../api_c/memp_set_clear_len.html">DB_MPOOLFILE-&gt;set_clear_len</a></td><td>Set file page bytes to be cleared</td></tr>
+<tr><td><a href="../../api_c/memp_set_fileid.html">DB_MPOOLFILE-&gt;set_fileid</a></td><td>Set file unique identifier</td></tr>
+<tr><td><a href="../../api_c/memp_set_ftype.html">DB_MPOOLFILE-&gt;set_ftype</a></td><td>Set file type</td></tr>
+<tr><td><a href="../../api_c/memp_set_lsn_offset.html">DB_MPOOLFILE-&gt;set_lsn_offset</a></td><td>Set file log-sequence-number offset</td></tr>
+<tr><td><a href="../../api_c/memp_set_pgcookie.html">DB_MPOOLFILE-&gt;set_pgcookie</a></td><td>Set file cookie for pgin/pgout</td></tr>
+</table>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/log/limits.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/mp/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/perl/intro.html b/libdb/docs/ref/perl/intro.html
new file mode 100644
index 0000000..7257019
--- /dev/null
+++ b/libdb/docs/ref/perl/intro.html
@@ -0,0 +1,43 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Using Berkeley DB with Perl</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Perl</dl></h3></td>
+<td align=right><a href="../../ref/java/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/tcl/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Using Berkeley DB with Perl</h1>
+<p>The original Perl module for Berkeley DB was DB_File, which was written to
+interface to Berkeley DB version 1.85. The newer Perl module for Berkeley DB is
+BerkeleyDB, which was written to interface to version 2.0 and subsequent
+releases. Because Berkeley DB version 2.X has a compatibility API for version
+1.85, you can (and should!) build DB_File using version 2.X of Berkeley DB,
+although DB_File will still only support the 1.85 functionality.
+<p>DB_File is distributed with the standard Perl source distribution (look
+in the directory "ext/DB_File"). You can find both DB_File and BerkeleyDB
+on CPAN, the Comprehensive Perl Archive Network of mirrored FTP sites.
+The master CPAN site is
+<a href="ftp://ftp.funet.fi/">ftp://ftp.funet.fi/</a>.
+<p>Versions of both BerkeleyDB and DB_File that are known to work correctly
+with each release of Berkeley DB are included in the distributed Berkeley DB source
+tree, in the subdirectories <b>perl.BerkeleyDB</b> and
+<b>perl.DB_File</b>. Each of those directories contains a
+<b>README</b> file with instructions on installing and using those
+modules.
+<p>The Perl interface is not maintained by Sleepycat Software. Questions
+about the DB_File and BerkeleyDB modules are best asked on the Usenet
+newsgroup comp.lang.perl.modules.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/java/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/tcl/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/pindex.src b/libdb/docs/ref/pindex.src
new file mode 100644
index 0000000..bf0ef22
--- /dev/null
+++ b/libdb/docs/ref/pindex.src
@@ -0,0 +1,276 @@
+__APIREL__/ref/am/close.html__OCT__2 @closing a database
+__APIREL__/ref/am/count.html__OCT__2 @counting data items for a key
+__APIREL__/ref/am/curclose.html__OCT__2 @closing a cursor
+__APIREL__/ref/am/curclose.html__OCT__3 closing a @cursor
+__APIREL__/ref/am/curdel.html__OCT__2 @deleting records with a cursor
+__APIREL__/ref/am/curdel.html__OCT__3 deleting records with a @cursor
+__APIREL__/ref/am/curdup.html__OCT__2 @duplicating a cursor
+__APIREL__/ref/am/curdup.html__OCT__3 duplicating a @cursor
+__APIREL__/ref/am/curget.html__OCT__2 @retrieving records with a cursor
+__APIREL__/ref/am/curget.html__OCT__3 retrieving records with a @cursor
+__APIREL__/ref/am/curput.html__OCT__2 @storing records with a cursor
+__APIREL__/ref/am/curput.html__OCT__3 storing records with a @cursor
+__APIREL__/ref/am/cursor.html__OCT__2 database @cursors
+__APIREL__/ref/am/delete.html__OCT__2 @deleting records
+__APIREL__/ref/am/get.html__OCT__2 @retrieving records
+__APIREL__/ref/am/join.html__OCT__2 @equality join
+__APIREL__/ref/am/join.html__OCT__3 equality @join
+__APIREL__/ref/am/join.html__OCT__4 @natural join
+__APIREL__/ref/am/open.html__OCT__2 @opening a database
+__APIREL__/ref/am/put.html__OCT__2 @storing records
+__APIREL__/ref/am/second.html__OCT__2 @secondary indices
+__APIREL__/ref/am/second.html__OCT__3 secondary @indices
+__APIREL__/ref/am/stat.html__OCT__2 database @statistics
+__APIREL__/ref/am/sync.html__OCT__2 flushing the database @cache
+__APIREL__/ref/am/truncate.html__OCT__2 @truncating a database
+__APIREL__/ref/am/truncate.html__OCT__3 @emptying a database
+__APIREL__/ref/am/upgrade.html__OCT__2 @upgrading databases
+__APIREL__/ref/am/verify.html__OCT__2 database @verification
+__APIREL__/ref/am/verify.html__OCT__3 database @salvage
+__APIREL__/ref/am/verify.html__OCT__4 salvaging @corrupted databases
+__APIREL__/ref/am_conf/bt_compare.html__OCT__2 specifying a Btree @comparison function
+__APIREL__/ref/am_conf/bt_recnum.html__OCT__2 retrieving Btree records by logical record @number
+__APIREL__/ref/am_conf/bt_recnum.html__OCT__3 retrieving Btree records by @logical record @number
+__APIREL__/ref/am_conf/byteorder.html__OCT__2 selecting a @byte order
+__APIREL__/ref/am_conf/cachesize.html__OCT__2 selecting a @cache size
+__APIREL__/ref/am_conf/dup.html__OCT__2 @duplicate data items
+__APIREL__/ref/am_conf/extentsize.html__OCT__2 selecting a Queue @extent size
+__APIREL__/ref/am_conf/h_ffactor.html__OCT__2 page @fill factor
+__APIREL__/ref/am_conf/h_hash.html__OCT__2 specifying a database @hash
+__APIREL__/ref/am_conf/h_nelem.html__OCT__2 @hash table size
+__APIREL__/ref/am_conf/intro.html__OCT__2 introduction to the @access methods
+__APIREL__/ref/am_conf/logrec.html__OCT__2 logical @record numbers
+__APIREL__/ref/am_conf/pagesize.html__OCT__2 selecting a @page size
+__APIREL__/ref/am_conf/re_source.html__OCT__2 @text backing files
+__APIREL__/ref/am_conf/recno.html__OCT__2 managing @record-based databases
+__APIREL__/ref/am_conf/renumber.html__OCT__2 logically renumbering @records
+__APIREL__/ref/am_conf/select.html__OCT__2 selecting an @access method
+__APIREL__/ref/am_misc/align.html__OCT__2 data @alignment
+__APIREL__/ref/am_misc/dbsizes.html__OCT__2 database @limits
+__APIREL__/ref/am_misc/diskspace.html__OCT__2 @disk space requirements
+__APIREL__/ref/am_misc/error.html__OCT__2 @error handling
+__APIREL__/ref/am_misc/faq.html__OCT__2 @access method FAQ
+__APIREL__/ref/am_misc/faq.html__OCT__3 database @compaction
+__APIREL__/ref/am_misc/faq.html__OCT__4 returning pages to the @filesystem
+__APIREL__/ref/am_misc/faq.html__OCT__5 @double buffering
+__APIREL__/ref/am_misc/get_bulk.html__OCT__2 @retrieving records in bulk
+__APIREL__/ref/am_misc/partial.html__OCT__2 @partial record storage and retrieval
+__APIREL__/ref/am_misc/perm.html__OCT__2 retrieved key/data @permanence
+__APIREL__/ref/am_misc/perm.html__OCT__3 retrieved @key/data permanence
+__APIREL__/ref/am_misc/stability.html__OCT__2 @cursor stability
+__APIREL__/ref/am_misc/stability.html__OCT__3 cursor @stability
+__APIREL__/ref/am_misc/stability.html__OCT__4 @degrees of isolation
+__APIREL__/ref/am_misc/stability.html__OCT__5 degrees of @isolation
+__APIREL__/ref/am_misc/tune.html__OCT__2 @access method tuning
+__APIREL__/ref/am_misc/tune.html__OCT__3 access method @tuning
+__APIREL__/ref/arch/apis.html__OCT__2 programmatic @APIs
+__APIREL__/ref/arch/utilities.html__OCT__2 @utilities
+__APIREL__/ref/build_unix/aix.html__OCT__2 @AIX
+__APIREL__/ref/build_unix/conf.html__OCT__2 @configuring Berkeley DB for UNIX systems
+__APIREL__/ref/build_unix/conf.html__OCT__3 configuring Berkeley DB for @UNIX systems
+__APIREL__/ref/build_unix/conf.html__OCT__4 configuring Berkeley DB @1.85 API compatibility
+__APIREL__/ref/build_unix/conf.html__OCT__--enable-compat185 Configuring Berkeley DB@--enable-compat185
+__APIREL__/ref/build_unix/conf.html__OCT__5 configuring the @C++ API
+__APIREL__/ref/build_unix/conf.html__OCT__--enable-cxx Configuring Berkeley DB@--enable-cxx
+__APIREL__/ref/build_unix/conf.html__OCT__--enable-debug Configuring Berkeley DB@--enable-debug
+__APIREL__/ref/build_unix/conf.html__OCT__--enable-debug_rop Configuring Berkeley DB@--enable-debug_rop
+__APIREL__/ref/build_unix/conf.html__OCT__--enable-debug_wop Configuring Berkeley DB@--enable-debug_wop
+__APIREL__/ref/build_unix/conf.html__OCT__--enable-diagnostic Configuring Berkeley DB@--enable-diagnostic
+__APIREL__/ref/build_unix/conf.html__OCT__6 building a utility to dump Berkeley DB @1.85 databases
+__APIREL__/ref/build_unix/conf.html__OCT__--enable-dump185 Configuring Berkeley DB@--enable-dump185
+__APIREL__/ref/build_unix/conf.html__OCT__7 configuring the @Java API
+__APIREL__/ref/build_unix/conf.html__OCT__--enable-java Configuring Berkeley DB@--enable-java
+__APIREL__/ref/build_unix/conf.html__OCT__8 configuring without large @file support
+__APIREL__/ref/build_unix/conf.html__OCT__--disable-largefile Configuring Berkeley DB@--disable-largefile
+__APIREL__/ref/build_unix/conf.html__OCT__--enable-posixmutexes Configuring Berkeley DB@--enable-posixmutexes
+__APIREL__/ref/build_unix/conf.html__OCT__9 configuring a @RPC client/server
+__APIREL__/ref/build_unix/conf.html__OCT__--enable-rpc Configuring Berkeley DB@--enable-rpc
+__APIREL__/ref/build_unix/conf.html__OCT__10 disabling @shared libraries
+__APIREL__/ref/build_unix/conf.html__OCT__11 disabling @static libraries
+__APIREL__/ref/build_unix/conf.html__OCT__--disable-shared Configuring Berkeley DB@--disable-shared
+__APIREL__/ref/build_unix/conf.html__OCT__--disable-static Configuring Berkeley DB@--disable-static
+__APIREL__/ref/build_unix/conf.html__OCT__12 configuring the @Tcl API
+__APIREL__/ref/build_unix/conf.html__OCT__--enable-tcl Configuring Berkeley DB@--enable-tcl
+__APIREL__/ref/build_unix/conf.html__OCT__13 configuring the @test suite
+__APIREL__/ref/build_unix/conf.html__OCT__--enable-test Configuring Berkeley DB@--enable-test
+__APIREL__/ref/build_unix/conf.html__OCT__--enable-uimutexes Configuring Berkeley DB@--enable-uimutexes
+__APIREL__/ref/build_unix/conf.html__OCT__--enable-umrw Configuring Berkeley DB@--enable-umrw
+__APIREL__/ref/build_unix/conf.html__OCT__--with-embedix=DIR Configuring Berkeley DB@--with-embedix=DIR
+__APIREL__/ref/build_unix/conf.html__OCT__--with-mutex=MUTEX Configuring Berkeley DB@--with-mutex=MUTEX
+__APIREL__/ref/build_unix/conf.html__OCT__--with-rpm=DIR Configuring Berkeley DB@--with-rpm=DIR
+__APIREL__/ref/build_unix/conf.html__OCT__--with-tcl=DIR Configuring Berkeley DB@--with-tcl=DIR
+__APIREL__/ref/build_unix/conf.html__OCT__--with-uniquename=NAME Configuring Berkeley DB@--with-uniquename=NAME
+__APIREL__/ref/build_unix/embedix.html__OCT__2 @Embedix
+__APIREL__/ref/build_unix/flags.html__OCT__2 changing @compile or load options
+__APIREL__/ref/build_unix/flags.html__OCT__3 changing compile or @load options
+__APIREL__/ref/build_unix/freebsd.html__OCT__2 @FreeBSD
+__APIREL__/ref/build_unix/hpux.html__OCT__2 @HP-UX
+__APIREL__/ref/build_unix/install.html__OCT__2 @installing Berkeley DB for UNIX systems
+__APIREL__/ref/build_unix/intro.html__OCT__2 @building for UNIX
+__APIREL__/ref/build_unix/intro.html__OCT__3 @building for QNX
+__APIREL__/ref/build_unix/intro.html__OCT__4 building for @UNIX
+__APIREL__/ref/build_unix/intro.html__OCT__5 building for @QNX
+__APIREL__/ref/build_unix/irix.html__OCT__2 @IRIX
+__APIREL__/ref/build_unix/linux.html__OCT__2 @Linux
+__APIREL__/ref/build_unix/macosx.html__OCT__2 @Mac OS X
+__APIREL__/ref/build_unix/notes.html__OCT__2 @building for UNIX FAQ
+__APIREL__/ref/build_unix/notes.html__OCT__3 building for @UNIX FAQ
+__APIREL__/ref/build_unix/osf1.html__OCT__2 @OSF/1
+__APIREL__/ref/build_unix/qnx.html__OCT__2 @QNX
+__APIREL__/ref/build_unix/sco.html__OCT__2 @SCO
+__APIREL__/ref/build_unix/shlib.html__OCT__2 @shared libraries
+__APIREL__/ref/build_unix/solaris.html__OCT__2 @Solaris
+__APIREL__/ref/build_unix/sunos.html__OCT__2 @SunOS
+__APIREL__/ref/build_unix/test.html__OCT__2 running the @test suite under UNIX
+__APIREL__/ref/build_unix/ultrix.html__OCT__2 @Ultrix
+__APIREL__/ref/build_vxworks/faq.html__OCT__2 @building for VxWorks FAQ
+__APIREL__/ref/build_vxworks/faq.html__OCT__3 building for @VxWorks FAQ
+__APIREL__/ref/build_vxworks/intro.html__OCT__2 @building for VxWorks
+__APIREL__/ref/build_vxworks/introae.html__OCT__2 @building for VxWorks AE
+__APIREL__/ref/build_vxworks/notes.html__OCT__2 @VxWorks notes
+__APIREL__/ref/build_win/faq.html__OCT__2 @building for Windows FAQ
+__APIREL__/ref/build_win/faq.html__OCT__3 building for @Windows FAQ
+__APIREL__/ref/build_win/intro.html__OCT__2 @building for Win32
+__APIREL__/ref/build_win/notes.html__OCT__2 @Windows notes
+__APIREL__/ref/build_win/test.html__OCT__2 running the @test suite under Windows
+__APIREL__/ref/build_win/test.html__OCT__3 running the test suite under @Windows
+__APIREL__/ref/cam/intro.html__OCT__2 @Concurrent Data Store
+__APIREL__/ref/debug/intro.html__OCT__2 introduction to @debugging
+__APIREL__/ref/debug/common.html__OCT__2 @debugging applications
+__APIREL__/ref/distrib/layout.html__OCT__2 @source code layout
+__APIREL__/ref/dumpload/text.html__OCT__2 loading @text into databases
+__APIREL__/ref/dumpload/utility.html__OCT__2 dumping/loading @text to/from databases
+__APIREL__/ref/env/create.html__OCT__2 database @environment
+__APIREL__/ref/env/db_config.html__OCT__2 @DB_CONFIG
+__APIREL__/ref/env/db_config.html__OCT__3 database environment @configuration
+__APIREL__/ref/env/encrypt.html__OCT__2 @encryption
+__APIREL__/ref/env/faq.html__OCT__2 database @environment FAQ
+__APIREL__/ref/env/intro.html__OCT__2 introduction to database @environments
+__APIREL__/ref/env/naming.html__OCT__2 file @naming
+__APIREL__/ref/env/naming.html__OCT__db_home File naming@db_home
+__APIREL__/ref/env/naming.html__OCT__DB_HOME File naming@DB_HOME
+__APIREL__/ref/env/region.html__OCT__2 @__db.001
+__APIREL__/ref/env/remote.html__OCT__2 remote @filesystems
+__APIREL__/ref/env/security.html__OCT__2 @security
+__APIREL__/ref/intro/products.html__OCT__2 Sleepycat Software's Berkeley DB @products
+__APIREL__/ref/install/file.html__OCT__2 @/etc/magic
+__APIREL__/ref/install/file.html__OCT__3 @file utility
+__APIREL__/ref/install/rpm.html__OCT__2 @RPM
+__APIREL__/ref/java/compat.html__OCT__2 @Java compatibility
+__APIREL__/ref/java/conf.html__OCT__2 @Java configuration
+__APIREL__/ref/java/faq.html__OCT__2 Java @FAQ
+__APIREL__/ref/java/faq.html__OCT__3 @Java FAQ
+__APIREL__/ref/lock/am_conv.html__OCT__2 Berkeley DB Transactional Data Store @locking conventions
+__APIREL__/ref/lock/cam_conv.html__OCT__2 Berkeley DB Concurrent Data Store @locking conventions
+__APIREL__/ref/lock/config.html__OCT__2 @locking configuration
+__APIREL__/ref/lock/dead.html__OCT__2 @deadlocks
+__APIREL__/ref/lock/intro.html__OCT__2 introduction to the @locking subsystem
+__APIREL__/ref/lock/max.html__OCT__2 sizing the @locking subsystem
+__APIREL__/ref/lock/nondb.html__OCT__2 @locking and non-Berkeley DB applications
+__APIREL__/ref/lock/notxn.html__OCT__2 @locking without transactions
+__APIREL__/ref/lock/page.html__OCT__2 page-level @locking
+__APIREL__/ref/lock/page.html__OCT__3 @locking granularity
+__APIREL__/ref/lock/stdmode.html__OCT__2 standard @lock modes
+__APIREL__/ref/lock/timeout.html__OCT__2 lock @timeouts
+__APIREL__/ref/lock/timeout.html__OCT__3 transaction @timeouts
+__APIREL__/ref/lock/twopl.html__OCT__2 two-phase @locking
+__APIREL__/ref/log/config.html__OCT__2 @logging configuration
+__APIREL__/ref/log/intro.html__OCT__2 introduction to the @logging subsystem
+__APIREL__/ref/log/limits.html__OCT__2 @log file limits
+__APIREL__/ref/mp/intro.html__OCT__2 introduction to the @memory pool subsystem
+__APIREL__/ref/mp/intro.html__OCT__3 introduction to the memory @cache subsystem
+__APIREL__/ref/mp/intro.html__OCT__4 introduction to the @buffer pool subsystem
+__APIREL__/ref/mp/config.html__OCT__2 @memory pool configuration
+__APIREL__/ref/perl/intro.html__OCT__2 @Perl
+__APIREL__/ref/program/appsignals.html__OCT__2 @signal handling
+__APIREL__/ref/program/compatible.html__OCT__2 @interface compatibility
+__APIREL__/ref/program/environ.html__OCT__2 @environment variables
+__APIREL__/ref/program/errorret.html__OCT__2 @error returns
+__APIREL__/ref/program/errorret.html__OCT__3 @error name space
+__APIREL__/ref/program/errorret.html__OCT__DB_NOTFOUND Error returns to applications@DB_NOTFOUND
+__APIREL__/ref/program/errorret.html__OCT__DB_KEYEMPTY Error returns to applications@DB_KEYEMPTY
+__APIREL__/ref/program/errorret.html__OCT__DB_KEYEXIST Error returns to applications@DB_KEYEXIST
+__APIREL__/ref/program/errorret.html__OCT__4 @DB_LOCK_DEADLOCK
+__APIREL__/ref/program/errorret.html__OCT__DB_LOCK_DEADLOCK Error returns to applications@DB_LOCK_DEADLOCK
+__APIREL__/ref/program/errorret.html__OCT__DB_LOCK_NOTGRANTED Error returns to applications@DB_LOCK_NOTGRANTED
+__APIREL__/ref/program/errorret.html__OCT__DB_RUNRECOVERY Error returns to applications@DB_RUNRECOVERY
+__APIREL__/ref/program/faq.html__OCT__2 task/thread @priority
+__APIREL__/ref/program/mt.html__OCT__2 building @threaded applications
+__APIREL__/ref/program/namespace.html__OCT__2 Berkeley DB library @name spaces
+__APIREL__/ref/program/scope.html__OCT__2 Berkeley DB handle @scope
+__APIREL__/ref/program/scope.html__OCT__3 Berkeley DB @free-threaded handles
+__APIREL__/ref/rep/intro.html__OCT__2 introduction to @replication
+__APIREL__/ref/rpc/client.html__OCT__2 @RPC client
+__APIREL__/ref/rpc/faq.html__OCT__2 @RPC FAQ
+__APIREL__/ref/rpc/intro.html__OCT__2 introduction to @rpc client/server
+__APIREL__/ref/rpc/server.html__OCT__2 @RPC server
+__APIREL__/ref/sendmail/intro.html__OCT__2 @Sendmail
+__APIREL__/ref/simple_tut/intro.html__OCT__2 simple @tutorial
+__APIREL__/ref/tcl/intro.html__OCT__2 loading Berkeley DB with @Tcl
+__APIREL__/ref/tcl/faq.html__OCT__2 Tcl @FAQ
+__APIREL__/ref/tcl/faq.html__OCT__3 @Tcl FAQ
+__APIREL__/ref/tcl/program.html__OCT__2 @Tcl API programming notes
+__APIREL__/ref/tcl/using.html__OCT__2 using Berkeley DB with @Tcl
+__APIREL__/ref/test/run.html__OCT__2 running the @test suite
+__APIREL__/ref/transapp/admin.html__OCT__2 administering @transaction protected applications
+__APIREL__/ref/transapp/archival.html__OCT__2 archival in @transaction protected applications
+__APIREL__/ref/transapp/archival.html__OCT__3 @catastrophic recovery
+__APIREL__/ref/transapp/archival.html__OCT__4 hot @backup
+__APIREL__/ref/transapp/checkpoint.html__OCT__2 checkpoints in @transaction protected applications
+__APIREL__/ref/transapp/deadlock.html__OCT__2 deadlock detection in @transaction protected applications
+__APIREL__/ref/transapp/faq.html__OCT__2 @transaction FAQ
+__APIREL__/ref/transapp/filesys.html__OCT__2 recovery and @filesystem operations
+__APIREL__/ref/transapp/hotfail.html__OCT__2 hot @failover
+__APIREL__/ref/transapp/intro.html__OCT__2 @Transactional Data Store
+__APIREL__/ref/transapp/logfile.html__OCT__2 @log file removal
+__APIREL__/ref/transapp/nested.html__OCT__2 nested @transactions
+__APIREL__/ref/transapp/read.html__OCT__2 @repeatable read
+__APIREL__/ref/transapp/read.html__OCT__3 dirty @reads
+__APIREL__/ref/transapp/read.html__OCT__4 @dirty reads
+__APIREL__/ref/transapp/reclimit.html__OCT__2 Berkeley DB @recoverability
+__APIREL__/ref/transapp/recovery.html__OCT__2 recovery in @transaction protected applications
+__APIREL__/ref/transapp/throughput.html__OCT__2 @transaction throughput
+__APIREL__/ref/transapp/tune.html__OCT__2 @transaction tuning
+__APIREL__/ref/transapp/tune.html__OCT__3 transaction @tuning
+__APIREL__/ref/txn/config.html__OCT__2 @transaction configuration
+__APIREL__/ref/txn/intro.html__OCT__2 introduction to the @transaction subsystem
+__APIREL__/ref/txn/limits.html__OCT__2 @transaction limits
+__APIREL__/ref/upgrade.2.0/intro.html__OCT__2 Upgrading to release @2.0
+__APIREL__/ref/upgrade.3.0/intro.html__OCT__2 Upgrading to release @3.0
+__APIREL__/ref/upgrade.3.1/intro.html__OCT__2 Upgrading to release @3.1
+__APIREL__/ref/upgrade.3.2/intro.html__OCT__2 Upgrading to release @3.2
+__APIREL__/ref/upgrade.3.3/intro.html__OCT__2 Upgrading to release @3.3
+__APIREL__/ref/upgrade.4.0/intro.html__OCT__2 Upgrading to release @4.0
+__APIREL__/ref/upgrade.4.1/intro.html__OCT__2 Upgrading to release @4.1
+__APIREL__/ref/xa/build.html__OCT__2 @Transaction Manager
+__APIREL__/ref/xa/intro.html__OCT__2 @Distributed Transactions
+__APIREL__/ref/xa/intro.html__OCT__3 @Resource Manager
+__APIREL__/ref/xa/faq.html__OCT__2 XA @FAQ
+__APIREL__/ref/xa/faq.html__OCT__3 @XA FAQ
+__APIREL__/ref/xa/xa_config.html__OCT__2 configuring Berkeley DB with the @Tuxedo System
+__APIREL__/ref/xa/xa_intro.html__OCT__2 @XA Resource Manager
+__APIREL__/ref/xa/xa_intro.html__OCT__3 XA @Resource Manager
+__APIREL__/utility/berkeley_db_svc.html__OCT__2 @berkeley_db_svc
+__APIREL__/utility/berkeley_db_svc.html__OCT__3 utility to support @RPC client/server
+__APIREL__/utility/db_archive.html__OCT__2 @db_archive
+__APIREL__/utility/db_archive.html__OCT__3 utility to @archive log files
+__APIREL__/utility/db_checkpoint.html__OCT__2 @db_checkpoint
+__APIREL__/utility/db_checkpoint.html__OCT__3 utility to take @checkpoints
+__APIREL__/utility/db_deadlock.html__OCT__2 @db_deadlock
+__APIREL__/utility/db_deadlock.html__OCT__3 utility to detect @deadlocks
+__APIREL__/utility/db_dump.html__OCT__2 @db_dump
+__APIREL__/utility/db_dump.html__OCT__3 utility to @dump databases as text files
+__APIREL__/utility/db_load.html__OCT__2 @db_load
+__APIREL__/utility/db_load.html__OCT__3 utility to @load text files into databases
+__APIREL__/utility/db_printlog.html__OCT__2 @db_printlog
+__APIREL__/utility/db_printlog.html__OCT__3 utility to display @log files as text
+__APIREL__/utility/db_recover.html__OCT__2 @db_recover
+__APIREL__/utility/db_recover.html__OCT__3 utility to @recover database environments
+__APIREL__/utility/db_stat.html__OCT__2 @db_stat
+__APIREL__/utility/db_stat.html__OCT__3 utility to display database and environment @statistics
+__APIREL__/utility/db_upgrade.html__OCT__2 @db_upgrade
+__APIREL__/utility/db_upgrade.html__OCT__3 utility to upgrade @database files
+__APIREL__/utility/db_upgrade.html__OCT__4 utility to @upgrade database files
+__APIREL__/utility/db_verify.html__OCT__2 @db_verify
+__APIREL__/utility/db_verify.html__OCT__3 utility to verify @database files
+__APIREL__/utility/db_verify.html__OCT__4 utility to @verify database files
diff --git a/libdb/docs/ref/program/appsignals.html b/libdb/docs/ref/program/appsignals.html
new file mode 100644
index 0000000..de4bf6d
--- /dev/null
+++ b/libdb/docs/ref/program/appsignals.html
@@ -0,0 +1,39 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Signal handling</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td align=right><a href="../../ref/apprec/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/errorret.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Signal handling</h1>
+<p>When applications using Berkeley DB receive signals, it is important that they
+exit gracefully, discarding any Berkeley DB locks that they may hold. This is
+normally done by setting a flag when a signal arrives and then checking
+for that flag periodically within the application. Because Berkeley DB is not
+re-entrant, the signal handler should not attempt to release locks and/or
+close the database handles itself. Re-entering Berkeley DB is not guaranteed to
+work correctly, and the results are undefined.
+<p>If an application exits holding a lock, the situation is no different
+than if the application crashed, and all applications participating in
+the database environment must be shut down, and then recovery must be
+performed. If this is not done, databases may be left in an
+inconsistent state, or locks the application held may cause unresolvable
+deadlocks inside the environment, causing applications to hang.
+<p>Berkeley DB restarts all system calls interrupted by signals, that is, any
+underlying system calls that return failure with errno set to EINTR will
+be restarted rather than failing.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/apprec/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/errorret.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/program/cache.html b/libdb/docs/ref/program/cache.html
new file mode 100644
index 0000000..d70b343
--- /dev/null
+++ b/libdb/docs/ref/program/cache.html
@@ -0,0 +1,35 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Disk drive caches</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td align=right><a href="../../ref/program/namespace.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/copy.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Disk drive caches</h1>
+<p>Many disk drives contain onboard caches. Some of these drives include
+battery-backup or other functionality that guarantees that all cached
+data will be completely written if the power fails. These drives can
+offer substantial performance improvements over drives without caching
+support. However, some caching drives rely on capacitors or other
+mechanisms that guarantee only that the write of the current sector
+will complete. These drives can endanger your database and potentially
+cause corruption of your data.
+<p>To avoid losing your data, make sure the caching on your disk drives is
+properly configured so the drive will never report that data has been written
+unless the data is guaranteed to be written in the face of a power failure.
+Many times, this means that write-caching on the disk drive must
+be disabled.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/program/namespace.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/copy.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/program/compatible.html b/libdb/docs/ref/program/compatible.html
new file mode 100644
index 0000000..2b76a27
--- /dev/null
+++ b/libdb/docs/ref/program/compatible.html
@@ -0,0 +1,33 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Compatibility with historic UNIX interfaces</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td align=right><a href="../../ref/program/copy.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/runtime.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Compatibility with historic UNIX interfaces</h1>
+<p>The Berkeley DB version 2 library provides backward-compatible interfaces for
+the historic UNIX <a href="../../api_c/dbm.html">dbm</a>, <a href="../../api_c/dbm.html">ndbm</a>, and <a href="../../api_c/hsearch.html">hsearch</a>
+interfaces. It also provides a backward-compatible interface for the
+historic Berkeley DB 1.85 release.
+<p>Berkeley DB version 2 does not provide database compatibility for any of the
+previous interfaces, and existing databases must be converted manually.
+To convert existing databases from the Berkeley DB 1.85 format to the Berkeley DB
+version 2 format, review the <a href="../../utility/db_dump.html">db_dump185</a> and <a href="../../utility/db_load.html">db_load</a>
+information. No utilities are provided to convert UNIX <a href="../../api_c/dbm.html">dbm</a>,
+<a href="../../api_c/dbm.html">ndbm</a>, or <a href="../../api_c/hsearch.html">hsearch</a> databases.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/program/copy.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/runtime.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/program/copy.html b/libdb/docs/ref/program/copy.html
new file mode 100644
index 0000000..9d8361a
--- /dev/null
+++ b/libdb/docs/ref/program/copy.html
@@ -0,0 +1,64 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Copying databases</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td align=right><a href="../../ref/program/cache.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/compatible.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Copying databases</h1>
+<p>Because file identification cookies (for example, filenames, device and
+inode numbers, volume and file IDs, and so on) are not necessarily
+unique or maintained across system reboots, each Berkeley DB database file
+contains a 20-byte file identification bytestring that is stored in the
+first page of the database, starting with the 53rd byte on the page.
+When multiple processes or threads open the same database file in Berkeley DB,
+it is this bytestring that is used to ensure that the same underlying
+pages are updated in the shared memory buffer pool, no matter which
+Berkeley DB handle is used for the operation.
+<p>It is usually a bad idea to physically copy a database to a new name. In
+the few cases in which copying is the best solution for your application,
+you must guarantee that there are never two different databases with
+the same file identification bytestring in the memory pool at the same
+time. Copying databases is further complicated by the fact that the
+shared memory buffer pool does not discard all cached copies of pages
+for a database when the database is logically closed; that is, when
+<a href="../../api_c/db_close.html">DB-&gt;close</a> is called. Nor is there a Berkeley DB interface to
+explicitly discard pages from the shared memory buffer pool for any
+particular database.
+<p>Before copying a database, you must ensure that all modified pages have
+been written from the memory pool cache to the backing database file.
+This is done using the <a href="../../api_c/db_sync.html">DB-&gt;sync</a> or <a href="../../api_c/db_close.html">DB-&gt;close</a> interfaces.
+<p>Before using a copy of a database from Berkeley DB, you must ensure that all
+pages from any database with the same bytestring have been removed from
+the memory pool cache. If the environment in which you intend to open
+the copy of the database potentially has pages from files with identical
+bytestrings to the copied database (which is likely to be the case), there
+are a few possible solutions:
+<p><ol>
+<p><li>Remove the environment, either explicitly or by calling <a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a>.
+Note that this will not allow you to access both the original and copy
+of the database at the same time.
+<p><li>Create a new file that will have a new bytestring. The simplest way to
+create a new file that will have a new bytestring is to call the
+<a href="../../utility/db_dump.html">db_dump</a> utility to dump out the contents of the database and
+then use the <a href="../../utility/db_load.html">db_load</a> utility to load the dumped output into a
+new file. This allows you to access both the original and copy of
+the database at the same time.
+<p><li>If your database is too large to be copied, overwrite the bytestring in
+the copied database with a new bytestring. This allows you to access
+both the original and copy of the database at the same time.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/program/cache.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/compatible.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/program/environ.html b/libdb/docs/ref/program/environ.html
new file mode 100644
index 0000000..ca44051
--- /dev/null
+++ b/libdb/docs/ref/program/environ.html
@@ -0,0 +1,34 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Environment variables</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td align=right><a href="../../ref/program/errorret.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/mt.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Environment variables</h1>
+<p>The Berkeley DB library uses the following environment variables:
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the environment variable DB_HOME is set, it is used as part of
+<a href="../../ref/env/naming.html">File Naming</a>.
+Note: For the DB_HOME variable to take effect, either the
+<a href="../../api_c/env_open.html#DB_USE_ENVIRON">DB_USE_ENVIRON</a> or <a href="../../api_c/env_open.html#DB_USE_ENVIRON_ROOT">DB_USE_ENVIRON_ROOT</a> flags must be
+specified to <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>.
+<p><dt>TMPDIR, TEMP, TMP, TempFolder<dd>The TMPDIR, TEMP, TMP, and TempFolder environment variables are all
+checked as locations in which to create temporary files. See
+<a href="../../api_c/env_set_tmp_dir.html">DB_ENV-&gt;set_tmp_dir</a> for more information.
+</dl>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/program/errorret.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/mt.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/program/errorret.html b/libdb/docs/ref/program/errorret.html
new file mode 100644
index 0000000..1baf2ab
--- /dev/null
+++ b/libdb/docs/ref/program/errorret.html
@@ -0,0 +1,114 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Error returns to applications</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td align=right><a href="../../ref/program/appsignals.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/environ.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Error returns to applications</h1>
+<p>Except for the historic <a href="../../api_c/dbm.html">dbm</a>, <a href="../../api_c/dbm.html">ndbm</a>, and <a href="../../api_c/hsearch.html">hsearch</a>
+interfaces, Berkeley DB does not use the global variable <b>errno</b> to
+return error values. The return values for all Berkeley DB functions are
+grouped into the following three categories:
+<p><dl compact>
+<p><dt>0<dd>A return value of 0 indicates that the operation was successful.
+<p><dt>&gt; 0<dd>A return value that is greater than 0 indicates that there was a system
+error. The <b>errno</b> value returned by the system is returned by
+the function; for example, when a Berkeley DB function is unable to allocate
+memory, the return value from the function will be ENOMEM.
+<p><dt>&lt; 0<dd>A return value that is less than 0 indicates a condition that was not
+a system failure, but was not an unqualified success, either. For
+example, a routine to retrieve a key/data pair from the database may
+return DB_NOTFOUND when the key/data pair does not appear in
+the database; as opposed to the value of 0, which would be returned if
+the key/data pair were found in the database.
+<p><a name="3"><!--meow--></a>
+All values returned by Berkeley DB functions are less than 0 in order to avoid
+conflict with possible values of <b>errno</b>. Specifically, Berkeley DB
+reserves all values from -30,800 to -30,999 to itself as possible error
+values. There are a few Berkeley DB interfaces where it is possible for an
+application function to be called by a Berkeley DB function and subsequently
+fail with an application-specific return. Such failure returns will be
+passed back to the function that originally called a Berkeley DB interface.
+To avoid ambiguity about the cause of the error, error values separate
+from the Berkeley DB error name space should be used.
+</dl>
+<p>Although possible error returns are specified by each individual function's
+manual page, there are a few error returns that deserve general mention:
+<h3><a name="DB_NOTFOUND">DB_NOTFOUND</a> and <a name="DB_KEYEMPTY">DB_KEYEMPTY</a></h3>
+<p>There are two special return values that are similar in meaning and that
+are returned in similar situations, and therefore might be confused:
+DB_NOTFOUND and DB_KEYEMPTY.
+<p>The DB_NOTFOUND error return indicates that the requested key/data
+pair did not exist in the database or that start-of- or end-of-file has
+been reached by a cursor.
+<p>The DB_KEYEMPTY error return indicates that the requested
+key/data pair logically exists but was never explicitly created by the
+application (the Recno and Queue access methods will automatically
+create key/data pairs under some circumstances; see <a href="../../api_c/db_open.html">DB-&gt;open</a>
+for more information), or that the requested key/data pair was deleted
+and never re-created. In addition, the Queue access method will return
+DB_KEYEMPTY for records that were created as part of a
+transaction that was later aborted and never re-created.
+<h3><a name="DB_KEYEXIST">DB_KEYEXIST</a></h3>
+<p>The DB_KEYEXIST error return indicates the <a href="../../api_c/db_put.html#DB_NOOVERWRITE">DB_NOOVERWRITE</a>
+option was specified to the <a href="../../api_c/db_put.html">DB-&gt;put</a> method and the key already exists
+in the database.
+<a name="4"><!--meow--></a>
+<h3><a name="DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a></h3>
+<p>When multiple threads of control are modifying the database, there is
+normally the potential for deadlock. In Berkeley DB, deadlock is signified by
+an error return from the Berkeley DB function of the value
+DB_LOCK_DEADLOCK. Whenever a Berkeley DB function returns
+DB_LOCK_DEADLOCK, the enclosing transaction should be aborted.
+<p>Any Berkeley DB function that attempts to acquire locks can potentially return
+DB_LOCK_DEADLOCK. Practically speaking, the safest way to deal
+with applications that can deadlock is to handle a
+DB_LOCK_DEADLOCK return from any Berkeley DB access method call.
+<h3><a name="DB_LOCK_NOTGRANTED">DB_LOCK_NOTGRANTED</a></h3>
+<p>When multiple threads of control are modifying the database, there is
+normally the potential for deadlock. In order to avoid deadlock,
+applications may specify -- on a per-transaction basis -- that if a lock
+is unavailable, the Berkeley DB operation should return immediately instead
+of waiting on the lock. The error return in this case will be
+DB_LOCK_NOTGRANTED. Whenever a Berkeley DB function returns
+DB_LOCK_NOTGRANTED, the enclosing transaction should be
+aborted.
+<h3><a name="DB_RUNRECOVERY">DB_RUNRECOVERY</a></h3>
+<p>There exists a class of errors that Berkeley DB considers fatal to an entire
+Berkeley DB environment. An example of this type of error is a corrupted
+database page. The only way to recover from these failures is to have
+all threads of control exit the Berkeley DB environment, run recovery of the
+environment, and re-enter Berkeley DB. (It is not strictly necessary that the
+processes exit, although that is the only way to recover system
+resources, such as file descriptors and memory, allocated by Berkeley DB.)
+<p>When this type of error is encountered, the error value
+DB_RUNRECOVERY is returned. This error can be returned by any
+Berkeley DB interface. Once DB_RUNRECOVERY is returned by any
+interface, it will be returned from all subsequent Berkeley DB calls made by
+any threads of control participating in the environment.
+<p>Optionally, applications may also specify a fatal-error callback function
+using the <a href="../../api_c/env_set_paniccall.html">DB_ENV-&gt;set_paniccall</a> method. This callback function will be
+called with two arguments: a reference to the <a href="../../api_c/env_class.html">DB_ENV</a> structure
+associated with the environment and the <b>errno</b> value
+associated with the underlying error that caused the problem.
+<p>Applications can handle such fatal errors in one of two ways: by checking
+for DB_RUNRECOVERY as part of their normal Berkeley DB error return
+checking, similarly to DB_LOCK_DEADLOCK or any other error, or
+by simply exiting the application when the callback function is called
+in applications that have no cleanup processing of their own.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/program/appsignals.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/environ.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/program/faq.html b/libdb/docs/ref/program/faq.html
new file mode 100644
index 0000000..b99b598
--- /dev/null
+++ b/libdb/docs/ref/program/faq.html
@@ -0,0 +1,31 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Programmer notes FAQ</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td align=right><a href="../../ref/program/runtime.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Programmer notes FAQ</h1>
+<p><ol>
+<a name="2"><!--meow--></a>
+<p><li><b>What priorities should threads/tasks executing Berkeley DB functions
+be given?</b>
+<p>Tasks executing Berkeley DB functions should have the same, or roughly
+equivalent, system priorities. For example, it can be dangerous to give
+tasks of control performing checkpoints a lower priority than tasks of
+control doing database lookups, and starvation can sometimes result.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/program/runtime.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/program/mt.html b/libdb/docs/ref/program/mt.html
new file mode 100644
index 0000000..484b6c6
--- /dev/null
+++ b/libdb/docs/ref/program/mt.html
@@ -0,0 +1,74 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Multithreaded applications</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td align=right><a href="../../ref/program/environ.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/scope.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Multithreaded applications</h1>
+<p>Berkeley DB fully supports multithreaded applications. The Berkeley DB library is
+not itself multithreaded, as it was deliberately architected to not use
+threads internally because of the portability problems it would
+introduce. Database environment and database object handles returned
+from Berkeley DB library functions are free-threaded. No other object handles
+returned from the Berkeley DB library are free-threaded. The following rules
+should be observed when using threads to access the Berkeley DB library:
+<p><ol>
+<p><li>The <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag must be specified to the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>
+and <a href="../../api_c/db_open.html">DB-&gt;open</a> methods if the Berkeley DB handles returned by those interfaces
+will be used in the context of more than one thread. Setting the
+<a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag inconsistently may result in database corruption.
+<p>Threading is assumed in the Java API, so no special flags are required;
+and Berkeley DB functions will always behave as if the <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag
+was specified.
+<p>Only a single thread may call the <a href="../../api_c/env_close.html">DB_ENV-&gt;close</a> or <a href="../../api_c/db_close.html">DB-&gt;close</a> methods
+for a returned environment or database handle.
+<p>No other Berkeley DB handles are free-threaded; for example, cursors and
+transactions may not span threads because their returned handles are
+not free-threaded.
+<p><li>When using the non-cursor Berkeley DB calls to retrieve key/data items (for
+example, <a href="../../api_c/db_get.html">DB-&gt;get</a>), the memory to which the pointer stored into
+the Dbt refers is valid only until the next call using the <a href="../../api_c/db_class.html">DB</a>
+handle returned by <a href="../../api_c/db_open.html">DB-&gt;open</a>. This includes <b>any</b> use of
+the returned <a href="../../api_c/db_class.html">DB</a> handle, including by another thread within the
+process.
+<p>For this reason, if the <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> handle was specified to the
+<a href="../../api_c/db_open.html">DB-&gt;open</a> method, either <a href="../../api_c/dbt_class.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a>, <a href="../../api_c/dbt_class.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a>,
+or <a href="../../api_c/dbt_class.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a> must be specified in the <a href="../../api_c/dbt_class.html">DBT</a> when
+performing any non-cursor key or data retrieval.
+<p><li>Transactions may not span threads. Each transaction must begin and end
+in the same thread, and each transaction may be used only by a single
+thread.
+<p>Cursors may not span transactions or threads. Each cursor must be
+allocated and deallocated within the same transaction and within
+the same thread.
+<p><li>User-level synchronization mutexes must have been implemented for the
+compiler/architecture combination. Attempting to specify the DB_THREAD
+flag will fail if fast mutexes are not available.
+<p>If blocking mutexes are available (for example POSIX pthreads), they
+will be used. Otherwise, the Berkeley DB library will make a system call to
+pause for some amount of time when it is necessary to wait on a lock.
+This may not be optimal, especially in a thread-only environment, in
+which it will be more efficient to explicitly yield the processor to
+another thread.
+<p>It is possible to specify a yield function on an per-application basis.
+See <a href="../../api_c/set_func_yield.html">db_env_set_func_yield</a> for more information.
+<p>It is possible to specify the number of attempts that will be made to
+acquire the mutex before waiting. See <a href="../../api_c/env_set_tas_spins.html">DB_ENV-&gt;set_tas_spins</a> for
+more information.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/program/environ.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/scope.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/program/namespace.html b/libdb/docs/ref/program/namespace.html
new file mode 100644
index 0000000..d67abcb
--- /dev/null
+++ b/libdb/docs/ref/program/namespace.html
@@ -0,0 +1,48 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Name spaces</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td align=right><a href="../../ref/program/scope.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/cache.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Name spaces</h1>
+<h3>C Language Name Space</h3>
+<p>The Berkeley DB library is careful to avoid C language programmer name spaces,
+but there are a few potential areas for concern, mostly in the Berkeley DB
+include file db.h. The db.h include file defines a number of types and
+strings. Where possible, all of these types and strings are prefixed with
+"DB_" or "db_". There are a few notable exceptions.
+<p>The Berkeley DB library uses a macro named "__P" to configure for systems that
+do not provide ANSI C function prototypes. This could potentially collide
+with other systems using a "__P" macro for similar or different purposes.
+<p>The Berkeley DB library needs information about specifically sized types for
+each architecture. If they are not provided by the system, they are
+typedef'd in the db.h include file. The types that may be typedef'd
+by db.h include the following: u_int8_t, int16_t, u_int16_t, int32_t,
+u_int32_t, u_char, u_short, u_int, and u_long.
+<p>The Berkeley DB library declares a number of external routines. All these
+routines are prefixed with the strings "db_", "lock_", "log_", "memp_"
+or "txn_". All internal routines are prefixed with the strings "__db_",
+"__lock_", "__log_", "__memp_", or "__txn_".
+<h3>Filesystem Name Space</h3>
+<p>Berkeley DB environments create or use some number of files in environment
+home directories. These files are named <a href="../../ref/env/db_config.html#DB_CONFIG">DB_CONFIG</a>, "log.NNNNN"
+(for example, log.0000000003, where the number of digits following the
+dot is unspecified), or with the string prefix "__db" (for example,
+__db.001). Database files that match these names should not be created
+in the environment directory.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/program/scope.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/cache.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/program/runtime.html b/libdb/docs/ref/program/runtime.html
new file mode 100644
index 0000000..84d5f7f
--- /dev/null
+++ b/libdb/docs/ref/program/runtime.html
@@ -0,0 +1,55 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Run-time configuration</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td align=right><a href="../../ref/program/compatible.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Run-time configuration</h1>
+<p>It is possible to configure Berkeley DB at run-time so applications can
+redirect all Berkeley DB calls to underlying library or system call
+functionality. For example, an application might want Berkeley DB to call
+debugging memory allocation routines rather than the standard C library
+interfaces. The following interfaces support this functionality:
+<p><blockquote><pre><a href="../../api_c/set_func_close.html">db_env_set_func_close</a>
+<a href="../../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a>
+<a href="../../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a>
+<a href="../../api_c/set_func_exists.html">db_env_set_func_exists</a>
+<a href="../../api_c/set_func_free.html">db_env_set_func_free</a>
+<a href="../../api_c/set_func_fsync.html">db_env_set_func_fsync</a>
+<a href="../../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a>
+<a href="../../api_c/set_func_malloc.html">db_env_set_func_malloc</a>
+<a href="../../api_c/set_func_map.html">db_env_set_func_map</a>
+<a href="../../api_c/set_func_open.html">db_env_set_func_open</a>
+<a href="../../api_c/set_func_read.html">db_env_set_func_read</a>
+<a href="../../api_c/set_func_realloc.html">db_env_set_func_realloc</a>
+<a href="../../api_c/set_func_seek.html">db_env_set_func_seek</a>
+<a href="../../api_c/set_func_sleep.html">db_env_set_func_sleep</a>
+<a href="../../api_c/set_func_unlink.html">db_env_set_func_unlink</a>
+<a href="../../api_c/set_func_unmap.html">db_env_set_func_unmap</a>
+<a href="../../api_c/set_func_write.html">db_env_set_func_write</a>
+<a href="../../api_c/set_func_yield.html">db_env_set_func_yield</a></pre></blockquote>
+<p>These interfaces are available only from the Berkeley DB C language API, and
+are not available from all of the operating systems Berkeley DB supports.
+<p>A not-uncommon problem for applications is the new API in Solaris 2.6
+for manipulating large files. Because this API was not part of Solaris
+2.5, it is difficult to create a single binary that takes advantage of
+the large file functionality in Solaris 2.6, but still runs on Solaris
+2.5. <a href="solaris.txt">Example code</a> that supports this is
+included in the Berkeley DB distribution, however, the example code was
+written using previous versions of the Berkeley DB APIs, and is only useful
+as an example.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/program/compatible.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/program/scope.html b/libdb/docs/ref/program/scope.html
new file mode 100644
index 0000000..3e82d1b
--- /dev/null
+++ b/libdb/docs/ref/program/scope.html
@@ -0,0 +1,74 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Berkeley DB handles</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Programmer Notes</dl></h3></td>
+<td align=right><a href="../../ref/program/mt.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/namespace.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Berkeley DB handles</h1>
+<p>The Berkeley DB library has a number of object handles. The following table
+lists those handles, their scope, and whether they are free-threaded
+(that is, whether multiple threads within a process can share them).
+<p><dl compact>
+<p><dt><a href="../../api_c/env_class.html">DB_ENV</a><dd>The <a href="../../api_c/env_class.html">DB_ENV</a> handle, created by the <a href="../../api_c/env_create.html">db_env_create</a> method, refers
+to a Berkeley DB database environment -- a collection of Berkeley DB subsystems,
+log files and databases. <a href="../../api_c/env_class.html">DB_ENV</a> handles are free-threaded if
+the <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag is specified to the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> method when
+the environment is opened. The handle should not be closed while any
+other handle remains open that is using it as a reference (for example,
+<a href="../../api_c/db_class.html">DB</a> or <a href="../../api_c/txn_class.html">DB_TXN</a>). Once either the <a href="../../api_c/env_close.html">DB_ENV-&gt;close</a> or
+<a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a> methods are called, the handle may not be accessed again,
+regardless of the function's return.
+<p><dt><a href="../../api_c/txn_class.html">DB_TXN</a><dd>The <a href="../../api_c/txn_class.html">DB_TXN</a> handle, created by the <a href="../../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a> method, refers to
+a single transaction. The handle is not free-threaded; and transactions
+may not span threads, nor may transactions be used by more than a single
+thread. Once the <a href="../../api_c/txn_abort.html">DB_TXN-&gt;abort</a> or <a href="../../api_c/txn_commit.html">DB_TXN-&gt;commit</a> methods are called,
+the handle may not be accessed again, regardless of the function's
+return. In addition, parent transactions may not issue any Berkeley DB
+operations while they have active child transactions (child transactions
+that have not yet been committed or aborted) except for
+<a href="../../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a>, <a href="../../api_c/txn_abort.html">DB_TXN-&gt;abort</a> and <a href="../../api_c/txn_commit.html">DB_TXN-&gt;commit</a>.
+<p><dt><a href="../../api_c/logc_class.html">DB_LOGC</a><dd>The <a href="../../api_c/logc_class.html">DB_LOGC</a> handle refers to a cursor into the log files. The
+handle is not free-threaded. Once the <a href="../../api_c/logc_close.html">DB_LOGC-&gt;close</a> method is called,
+the handle may not be accessed again, regardless of the function's
+return.
+<p><dt><a href="../../api_c/mempfile_class.html">DB_MPOOLFILE</a><dd>The <a href="../../api_c/mempfile_class.html">DB_MPOOLFILE</a> handle refers to an open file in the shared
+memory buffer pool of the database environment. The handle is not
+free-threaded. Once the <a href="../../api_c/memp_fclose.html">DB_MPOOLFILE-&gt;close</a> method is called, the handle may
+not be accessed again, regardless of the function's return.
+<p><dt><a href="../../api_c/db_class.html">DB</a><dd>The <a href="../../api_c/db_class.html">DB</a> handle, created by the <a href="../../api_c/db_create.html">db_create</a> method, refers to a
+single Berkeley DB database, which may or may not be part of a database
+environment. <a href="../../api_c/db_class.html">DB</a> handles are free-threaded if the
+<a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag is specified to the <a href="../../api_c/db_open.html">DB-&gt;open</a> method when the
+database is opened or if the database environment in which the database
+is opened is free-threaded. The handle should not be closed while any
+other handle that refers to the database is in use; for example,
+database handles must not be closed while cursor handles into the
+database remain open, or transactions that include operations on the
+database have not yet been committed or aborted. Once the
+<a href="../../api_c/db_close.html">DB-&gt;close</a>, <a href="../../api_c/db_remove.html">DB-&gt;remove</a>, or <a href="../../api_c/db_rename.html">DB-&gt;rename</a> methods are
+called, the handle may not be accessed again, regardless of the
+function's return.
+<p><dt><a href="../../api_c/dbc_class.html">DBC</a><dd>The <a href="../../api_c/dbc_class.html">DBC</a> handle refers to a cursor into a Berkeley DB database. The
+handle is not free-threaded, and cursors may not span threads; nor may
+cursors be used by more than a single thread. If the cursor is to be
+used to perform operations on behalf of a transaction, the cursor must
+be opened and closed within the context of that single transaction.
+Once <a href="../../api_c/dbc_close.html">DBcursor-&gt;c_close</a> has been called, the handle may not be accessed
+again, regardless of the function's return.
+</dl>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/program/mt.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/program/namespace.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/program/solaris.txt b/libdb/docs/ref/program/solaris.txt
new file mode 100644
index 0000000..d2ec316
--- /dev/null
+++ b/libdb/docs/ref/program/solaris.txt
@@ -0,0 +1,213 @@
+#ifdef OS_solaris
+ * This is all for Solaris 2.6.
+ *
+ * Sun defined a new API in Solaris2.6 to be used when manipulating large
+ * (>2Gbyte) files. This API isn't present in 2.5.x, so we can't simply
+ * call it -- that would mean two binaries, one for 2.5.x and the other for
+ * 2.6. Not pretty. So, what we do here is determine the OS on which we're
+ * running at runtime, and adjust the underlying Berkeley DB calls to use
+ * the new API if it's there.
+ */
+
+/* This must match the definition of stat64 in Solaris2.6 */
+struct our_stat64 {
+ dev_t st_dev;
+ long st_pad1[3]; /* reserve for dev expansion */
+ u_longlong_t st_ino;
+ mode_t st_mode;
+ nlink_t st_nlink;
+ uid_t st_uid;
+ gid_t st_gid;
+ dev_t st_rdev;
+ long st_pad2[2];
+ longlong_t st_size;
+ timestruc_t mst_atime;
+ timestruc_t mst_mtime;
+ timestruc_t mst_ctime;
+ long st_blksize;
+ longlong_t st_blocks; /* large file support */
+ char st_fstype[_ST_FSTYPSZ];
+ long st_pad4[8]; /* expansion area */
+};
+
+#define MEGABYTE (1024 * 1024)
+
+typedef int (*open_fn)(const char *path, int flags, ...);
+typedef longlong_t (*lseek64_fn)(int fildes, longlong_t offset, int whence);
+typedef longlong_t (*fstat64_fn)(int fildes, struct our_stat64 *s);
+typedef void* (*mmap64_fn)(void* addr, size_t len, int prot, int flags,
+int filedes, longlong_t off);
+
+static fstat64_fn os_fstat64_fn = NULL;
+static lseek64_fn os_lseek64_fn = NULL;
+static mmap64_fn os_mmap64_fn = NULL;
+static open_fn os_open64_fn = NULL;
+
+static int dblayer_load_largefile_fns()
+{
+ void *lib_handle = NULL;
+ void *function_found = NULL;
+ int ret = 0;
+
+ lib_handle = dlopen(NULL, RTLD_NOW);
+ if (NULL == lib_handle)
+ return (-1);
+
+ function_found = dlsym(lib_handle,"open64");
+ if (NULL == function_found)
+ return (-1);
+ os_open64_fn = (open_fn)function_found;
+
+ function_found = dlsym(lib_handle,"lseek64");
+ if (NULL == function_found)
+ return (-1);
+ os_lseek64_fn = (lseek64_fn)function_found;
+
+ function_found = dlsym(lib_handle,"fstat64");
+ if (NULL == function_found)
+ return (-1);
+ os_fstat64_fn = (fstat64_fn)function_found;
+
+ function_found = dlsym(lib_handle,"mmap64");
+ if (NULL == function_found)
+ return (-1);
+ os_mmap64_fn = (mmap64_fn)function_found;
+
+ return 0;
+}
+
+/* Helper function for large seeks */
+static int dblayer_seek_fn_solaris(int fd,
+ size_t pgsize, db_pgno_t pageno, u_long relative, int whence)
+{
+ longlong_t offset = 0;
+ longlong_t ret = 0;
+
+ if (NULL == os_lseek64_fn) {
+ return -1;
+ }
+
+ offset = (longlong_t)pgsize * pageno + relative;
+
+ ret = (*os_lseek64_fn)(fd,offset,whence);
+
+ return (ret == -1) ? errno : 0;
+}
+
+/* Helper function for large file mmap */
+static int dblayer_map_solaris(fd, len, is_private, is_rdonly, addr)
+ int fd, is_private, is_rdonly;
+ size_t len;
+ void **addr;
+{
+ void *p;
+ int flags, prot;
+
+ flags = is_private ? MAP_PRIVATE : MAP_SHARED;
+ prot = PROT_READ | (is_rdonly ? 0 : PROT_WRITE);
+
+ if ((p = (*os_mmap64_fn)(NULL,
+ len, prot, flags, fd, (longlong_t)0)) == (void *)MAP_FAILED)
+ return (errno);
+
+ *addr = p;
+ return (0);
+}
+
+/* Helper function for large fstat */
+static int dblayer_ioinfo_solaris(const char *path,
+ int fd, u_int32_t *mbytesp, u_int32_t *bytesp, u_int32_t *iosizep)
+{
+ struct our_stat64 sb;
+
+ if (NULL == os_fstat64_fn) {
+ return -1;
+ }
+
+ if ((*os_fstat64_fn)(fd, &sb) == -1)
+ return (errno);
+
+ /* Return the size of the file. */
+ if (mbytesp != NULL)
+ *mbytesp = (u_int32_t) (sb.st_size / (longlong_t)MEGABYTE);
+ if (bytesp != NULL)
+ *bytesp = (u_int32_t) (sb.st_size % (longlong_t)MEGABYTE);
+
+ /*
+ * Return the underlying filesystem blocksize, if available. Default
+ * to 8K on the grounds that most OS's use less than 8K as their VM
+ * page size.
+ */
+ if (iosizep != NULL)
+ *iosizep = sb.st_blksize;
+ return (0);
+}
+#endif
+
+#ifdef irix
+ * A similar mess to Solaris: a new API added in IRIX6.2 to support large
+ * files. We always build on 6.2 or later, so no need to do the same song
+ * and dance as on Solaris -- we always have the header files for the
+ * 64-bit API.
+ */
+
+/* Helper function for large seeks */
+static int dblayer_seek_fn_irix(int fd,
+ size_t pgsize, db_pgno_t pageno, u_long relative, int whence)
+{
+ off64_t offset = 0;
+ off64_t ret = 0;
+
+ offset = (off64_t)pgsize * pageno + relative;
+
+ ret = lseek64(fd,offset,whence);
+
+ return (ret == -1) ? errno : 0;
+}
+
+/* Helper function for large fstat */
+static int dblayer_ioinfo_irix(const char *path,
+ int fd, u_int32_t *mbytesp, u_int32_t *bytesp, u_int32_t *iosizep)
+{
+ struct stat64 sb;
+
+ if (fstat64(fd, &sb) == -1) {
+ return (errno);
+ }
+
+ /* Return the size of the file. */
+ if (mbytesp != NULL)
+ *mbytesp = (u_int32_t) (sb.st_size / (off64_t)MEGABYTE);
+ if (bytesp != NULL)
+ *bytesp = (u_int32_t) (sb.st_size % (off64_t)MEGABYTE);
+
+ if (iosizep != NULL)
+ *iosizep = sb.st_blksize;
+ return (0);
+}
+#endif /* irix */
+
+static int dblayer_override_libdb_functions(dblayer_private *priv)
+{
+#if defined(OS_solaris)
+ int ret = 0;
+
+ ret = dblayer_load_largefile_fns();
+ if (0 != ret) {
+ Debug("Not Solaris2.6: no large file support enabled\n");
+ } else {
+ /* Means we did get the XXX64 functions, so let's use them */
+ db_jump_set((void*)os_open64_fn, DB_FUNC_OPEN);
+ db_jump_set((void*)dblayer_seek_fn_solaris, DB_FUNC_SEEK);
+ db_jump_set((void*)dblayer_ioinfo_solaris, DB_FUNC_IOINFO);
+ db_jump_set((void*)dblayer_map_solaris, DB_FUNC_MAP);
+ Debug("Solaris2.6: selected 64-bit file handling.\n");
+ }
+#else
+#if defined (irix)
+ db_jump_set((void*)dblayer_seek_fn_irix, DB_FUNC_SEEK);
+ db_jump_set((void*)dblayer_ioinfo_irix, DB_FUNC_IOINFO);
+#endif /* irix */
+#endif /* OS_solaris */
+ return 0;
+}
diff --git a/libdb/docs/ref/refs/bdb_usenix.html b/libdb/docs/ref/refs/bdb_usenix.html
new file mode 100644
index 0000000..6ff3211
--- /dev/null
+++ b/libdb/docs/ref/refs/bdb_usenix.html
@@ -0,0 +1,1120 @@
+<!--"@(#)usenix.html 1.2 4/26/99"-->
+<html>
+<head>
+<title>Berkeley DB</title>
+</head>
+<body bgcolor="white">
+<center>
+<h1>
+Berkeley DB
+</h1>
+<p>
+<i>
+Michael A. Olson
+<br>
+Keith Bostic
+<br>
+Margo Seltzer
+<br>&nbsp;
+<br>
+Sleepycat Software, Inc.
+<br>&nbsp;
+<br>
+</i>
+<b>
+Abstract
+</b>
+</center>
+<font size="-1">
+<blockquote>
+<p>
+Berkeley DB is an Open Source embedded database system with a number
+of key advantages over comparable systems. It is simple to use, supports
+concurrent access by multiple users, and provides industrial-strength
+transaction support, including surviving system and disk crashes. This
+paper describes the design and technical features of Berkeley DB, the
+distribution, and its license.
+</blockquote>
+</font>
+<h1>
+Introduction
+</h1>
+<p>
+The Berkeley Database (Berkeley DB) is an embedded database system
+that can be used in applications requiring high-performance
+concurrent storage and retrieval of key/value pairs. The software
+is distributed as a library that can be linked directly into an
+application.
+It provides a variety of programmatic interfaces,
+including callable APIs for C, C++, Perl, Tcl and Java.
+Users may download Berkeley DB from Sleepycat Software's Web site,
+at
+<a href="http://www.sleepycat.com">www.sleepycat.com</a>.
+<p>
+Sleepycat distributes Berkeley DB as an Open Source product. The company
+collects license fees for certain uses of the software and sells support
+and services.
+<h2>
+History
+</h2>
+<p>
+Berkeley DB began as a new implementation of a hash access method
+to replace both
+<tt>hsearch</tt>
+and the various
+<tt>dbm</tt>
+implementations
+(<tt>dbm</tt> from AT&T,
+<tt>ndbm</tt>
+from Berkeley, and
+<tt>gdbm</tt>
+from the GNU project).
+In 1990 Seltzer and Yigit produced a package called Hash to do this
+<a href="#Selt91">[Selt91]</a>.
+<p>
+The first general release of Berkeley DB, in 1991,
+included some interface changes and a new B+tree access method.
+At roughly the same time, Seltzer and Olson
+developed a prototype transaction
+system based on Berkeley DB, called LIBTP <a href="#Selt92">[Selt92]</a>,
+but never released the code.
+<p>
+The 4.4BSD UNIX release included Berkeley DB 1.85 in 1992.
+Seltzer and Bostic maintained the code in the early 1990s
+in Berkeley and in Massachusetts.
+Many users adopted the code during this period.
+<p>
+By mid-1996,
+users wanted commercial support for the software.
+In response, Bostic and Seltzer formed Sleepycat Software.
+The company enhances, distributes, and
+supports Berkeley DB and supporting software and documentation.
+Sleepycat released version 2.1 of Berkeley DB in mid-1997
+with important new features, including
+support for concurrent access to databases.
+The company makes about three commercial releases a year,
+and most recently shipped version 2.8.
+<h2>
+Overview of Berkeley DB
+</h2>
+<p>
+The C interfaces in Berkeley DB permit
+<tt>dbm</tt>-style
+record management
+for databases,
+with significant extensions to handle duplicate data items elegantly,
+to deal with concurrent access, and to provide transactional
+support so that multiple changes can be simultaneously committed
+(so that they are made permanent) or rolled back (so that the
+database is restored to its state at the beginning of the transaction).
+<p>
+C++ and Java interfaces provide a small set of classes for
+operating on a database. The main class in both cases is called
+<tt>Db</tt>,
+and provides methods that encapsulate the
+<tt>dbm</tt>-style
+interfaces that the C interfaces provide.
+<p>
+Tcl and Perl interfaces allow developers working in those languages
+to use Berkeley DB in their applications.
+Bindings for both languages are included in the distribution.
+<p>
+Developers may compile their applications and link in Berkeley DB
+statically or dynamically.
+<h2>
+How Berkeley DB is used
+</h2>
+<p>
+The Berkeley DB library supports concurrent access to databases.
+It can be linked
+into standalone applications, into a collection of cooperating applications,
+or into servers that handle requests and do database operations on
+behalf of clients.
+<p>
+Compared to using a standalone database management system, Berkeley
+DB is easy to understand and simple to use. The
+software stores and retrieves records, which consist of key/value pairs.
+Keys are used to locate items and can be any data type or structure
+supported by the programming language.
+<p>
+The programmer can provide the functions that Berkeley DB uses to
+operate on keys.
+For example,
+B+trees can use a custom comparison function,
+and the Hash access method can use a custom hash function.
+Berkeley DB uses default functions if none are supplied.
+Otherwise, Berkeley DB does not examine or interpret either keys
+or values in any way.
+Values may be arbitrarily long.
+<p>
+It is also important to understand what Berkeley DB is not.
+It is not a database server that handles network requests. It is not an
+SQL engine that executes queries. It is not a relational or object-oriented
+database management system.
+<p>
+It is possible to build any of those on top of Berkeley DB,
+but the package, as distributed,
+is an embedded database engine. It has been designed
+to be portable, small, fast, and reliable.
+<h2>
+Applications that use Berkeley DB
+</h2>
+<p>
+Berkeley DB is embedded in a variety of proprietary and Open Source
+software packages.
+This section highlights a few of the products that use it.
+<p>
+Directory servers, which do data storage and retrieval using the
+Local Directory Access Protocol (LDAP), provide naming and directory
+lookup service on local-area networks.
+This service is,
+essentially,
+database query and update,
+but uses a simple protocol rather than SQL or ODBC.
+Berkeley DB is the embedded data manager in the majority of deployed
+directory servers today,
+including LDAP servers from Netscape,
+MessageDirect (formerly Isode),
+and others.
+<p>
+Berkeley DB is also embedded in a large number of mail servers.
+Intermail,
+from Software.com,
+uses Berkeley DB as a message store
+and as the backing store for its directory server.
+The sendmail server
+(including both the commercial Sendmail Pro offering from Sendmail,
+Inc. and the version distributed by sendmail.org)
+uses Berkeley DB to store aliases and other information.
+Similarly,
+Postfix (formerly VMailer) uses Berkeley DB
+to store administrative information.
+<p>
+In addition,
+Berkeley DB is embedded in a wide variety of other software products.
+Example applications include managing access control lists,
+storing user keys in a public-key infrastructure,
+recording machine-to-network-address mappings in address servers,
+and storing configuration and device information in video
+post-production software.
+<p>
+Finally,
+Berkeley DB is a part of many other Open Source software packages
+available on the Internet.
+For example,
+the software is embedded in the Apache Web server and the Gnome desktop.
+<h1>
+Access Methods
+</h1>
+<p>
+In database terminology, an access method is the disk-based structure
+used to store data and the operations available on that structure.
+For example, many database systems support a B+tree access method.
+B+trees allow equality-based lookups (find keys equal to some constant),
+range-based lookups (find keys between two constants) and record
+insertion and deletion.
+<p>
+Berkeley DB supports three access methods: B+tree,
+Extended Linear Hashing (Hash),
+and Fixed- or Variable-length Records (Recno).
+All three operate on records composed of a key and a data value.
+In the B+tree and Hash access methods, keys can have arbitrary structure.
+In the Recno access method, each record is assigned a record number, which
+serves as the key.
+In all the access methods, the
+value can have arbitrary structure.
+The programmer can supply comparison or hashing functions for keys,
+and Berkeley DB stores and retrieves values without
+interpreting them.
+<p>
+All of the access methods use the host filesystem as a backing store.
+<h2>
+Hash
+</h2>
+<p>
+Berkeley DB includes a Hash access method that implements extended
+linear hashing <a href="#Litw80">[Litw80]</a>.
+Extended linear hashing adjusts the hash function as the hash
+table grows, attempting to keep all buckets underfull in the steady
+state.
+<p>
+The Hash access method supports insertion and deletion of records and
+lookup by exact match only. Applications may iterate over all records
+stored in a table, but the order in which they are returned is undefined.
+<h2>
+B+tree
+</h2>
+<p>
+Berkeley DB includes a B+tree <a href="#Come79">[Come79]</a> access method.
+B+trees store records of key/value pairs in leaf pages,
+and pairs of (key, child page address) at internal nodes.
+Keys in the tree are stored in sorted order,
+where the order is determined by the comparison function supplied when the
+database was created.
+Pages at the leaf level of the tree include pointers
+to their neighbors to simplify traversal. B+trees support lookup by
+exact match (equality) or range (greater than or equal to a key).
+Like Hash tables, B+trees support record insertion,
+deletion, and iteration over all records in the tree.
+<p>
+As records are inserted and pages in the B+tree fill up, they are split,
+with about half the keys going into a new peer page at the same level in
+the tree.
+Most B+tree implementations leave both nodes half-full after a split.
+This leads to poor performance in a common case, where the caller inserts
+keys in order.
+To handle this case, Berkeley DB keeps track of the insertion order,
+and splits pages unevenly to keep pages fuller.
+This reduces tree size, yielding better search performance and smaller
+databases.
+<p>
+On deletion, empty pages are coalesced by reverse splits
+into single pages.
+The access method does no other page balancing on insertion
+or deletion.
+Keys are not moved among pages at every update
+to keep the tree well-balanced. While this could improve search times
+in some cases, the additional code complexity leads to slower updates and
+is prone to deadlocks.
+<p>
+For simplicity, Berkeley DB B+trees do no prefix compression of keys
+at internal or leaf nodes.
+<h2>
+Recno
+</h2>
+<p>
+Berkeley DB includes a fixed- or variable-length record access method,
+called
+<i>Recno</i>.
+The Recno access method assigns logical record numbers to each
+record,
+and can search for and update records by record number.
+Recno is able,
+for example,
+to load a text file into a database,
+treating each line as a record.
+This permits fast searches by line number for applications like
+text editors <a href="#Ston82">[Ston82]</a>.
+<p>
+Recno is actually built
+on top of the B+tree access method and provides a simple interface
+for storing sequentially-ordered data values.
+The Recno access method generates keys internally.
+The programmer's view of the values is that
+they are numbered sequentially from one.
+Developers can choose to have records automatically renumbered
+when lower-numbered records are added or deleted.
+In this case, new keys can be inserted between existing keys.
+<h1>
+Features
+</h1>
+<p>
+This section describes important features of Berkeley DB.
+In general,
+developers can choose which features are useful to them,
+and use only those that are required by their application.
+<p>
+For example,
+when an application opens a database, it can declare the degree of
+concurrency and recovery that it requires. Simple stand-alone applications,
+and in particular ports of applications that used
+<tt>dbm</tt>
+or one of its
+variants, generally do not require concurrent access or crash recovery.
+Other applications, such as enterprise-class database management systems
+that store sales transactions or other critical data, need full
+transactional service. Single-user operation is faster than multi-user
+operation, since no overhead is incurred by locking. Running with
+the recovery system disabled is faster than running with it enabled,
+since log records need not be written when changes are made to the
+database.
+<p>
+In addition, some core subsystems, including the locking system and
+the logging facility,
+can be used outside the context of the access methods as well.
+Although few users have chosen to do so, it is possible to
+use only the lock manager in Berkeley DB to control concurrency
+in an application, without using any of the standard database services.
+Alternatively, the caller can integrate locking of non-database resources
+with Berkeley DB's transactional two-phase locking system, to impose
+transaction semantics on objects outside the database.
+<h2>
+Programmatic interfaces
+</h2>
+<p>
+Berkeley DB defines a simple API for database management.
+The package does not include industry-standard
+programmatic interfaces such as Open Database Connectivity (ODBC),
+Object Linking and Embedding for Databases (OleDB), or Structured
+Query Language (SQL). These interfaces, while useful, were
+designed to promote interoperability of database systems, and not
+simplicity or performance.
+<p>
+In response to customer demand,
+Berkeley DB 2.5 introduced support for the XA standard <a href="#Open94">[Open94]</a>.
+XA permits Berkeley DB to participate in distributed transactions
+under a transaction processing monitor like Tuxedo from BEA Systems.
+Like XA, other standard interfaces can be built on top of the
+core system.
+The standards do not belong inside Berkeley DB,
+since not all applications need them.
+<h2>
+Working with records
+</h2>
+<p>
+A database user may need to search for particular keys in a database,
+or may simply want to browse available records.
+Berkeley DB supports both keyed access,
+to find one or more records with a given key,
+or sequential access,
+to retrieve all the records in the database one at a time.
+The order of the records returned during sequential scans
+depends on the access method.
+B+tree and Recno databases return records in sort order,
+and Hash databases return them in apparently random order.
+<p>
+Similarly,
+Berkeley DB defines simple interfaces for inserting,
+updating,
+and deleting records in a database.
+<h2>
+Long keys and values
+</h2>
+<p>
+Berkeley DB manages keys and values as large as
+2<sup>32</sup> bytes.
+Since the time required to copy a record is proportional to its size,
+Berkeley DB includes interfaces that operate on partial records.
+If an application requires only part of a large record,
+it requests partial record retrieval,
+and receives just the bytes that it needs.
+The smaller copy saves both time and memory.
+<p>
+Berkeley DB allows the programmer to define the data types of
+keys and values.
+Developers use any type expressible in the programming language.
+<h2>
+Large databases
+</h2>
+<p>
+A single database managed by Berkeley DB can be up to 2<sup>48</sup>
+bytes,
+or 256 petabytes,
+in size.
+Berkeley DB uses the host filesystem as the backing store
+for the database,
+so large databases require big file support from the operating system.
+Sleepycat Software has customers using Berkeley DB
+to manage single databases in excess of 100 gigabytes.
+<h2>
+Main memory databases
+</h2>
+<p>
+Applications that do not require persistent storage can create
+databases that exist only in main memory.
+These databases bypass the overhead imposed by the I/O system
+altogether.
+<p>
+Some applications do need to use disk as a backing store,
+but run on machines with very large memory.
+Berkeley DB is able to manage very large shared memory regions
+for cached data pages,
+log records,
+and lock management.
+For example,
+the cache region used for data pages may be gigabytes in size,
+reducing the likelihood that any read operation will need to
+visit the disk in the steady state.
+The programmer declares the size of the cache region at
+startup.
+<p>
+Finally, many operating systems provide memory-mapped file services
+that are much faster than their general-purpose file system
+interfaces.
+Berkeley DB can memory-map its database files for read-only database use.
+The application operates on records stored directly on the pages,
+with no cache management overhead.
+Because the application gets pointers directly into the
+Berkeley DB pages,
+writes cannot be permitted.
+Otherwise,
+changes could bypass the locking and logging systems,
+and software errors could corrupt the database.
+Read-only applications can use Berkeley DB's memory-mapped
+file service to improve performance on most architectures.
+<h2>
+Configurable page size
+</h2>
+<p>
+Programmers declare the size of the pages used by their access
+methods when they create a database.
+Although Berkeley DB provides reasonable defaults,
+developers may override them to control system performance.
+Small pages reduce the number of records that fit on a single page.
+Fewer records on a page means that fewer records are locked when
+the page is locked,
+improving concurrency.
+The per-page overhead is proportionally higher with smaller pages,
+of course,
+but developers can trade off space for time as an application requires.
+<h2>
+Small footprint
+</h2>
+<p>
+Berkeley DB is a compact system.
+The full package, including all access methods, recoverability,
+and transaction support
+is roughly 175K of text space on common architectures.
+<h2>
+Cursors
+</h2>
+<p>
+In database terminology, a cursor is a pointer into an access method
+that can be called iteratively to return records in sequence. Berkeley
+DB includes cursor interfaces for all access methods. This permits,
+for example, users to traverse a B+tree and view records in order.
+Pointers to records in cursors are persistent, so that once fetched,
+a record may be updated in place. Finally, cursors support access to
+chains of duplicate data items in the various access methods.
+<h2>
+Joins
+</h2>
+<p>
+In database terminology,
+a join is an operation that spans multiple separate
+tables (or in the case of Berkeley DB, multiple separate DB files).
+For example, a company may store information about its customers
+in one table and information about sales in another. An application
+will likely want to look up sales information by customer name; this
+requires matching records in the two tables that share a common
+customer ID field.
+This combining of records from multiple tables is called a join.
+<p>
+Berkeley DB includes interfaces for joining two or more tables.
+<h2>
+Transactions
+</h2>
+<p>
+Transactions have four properties <a href="#Gray93">[Gray93]</a>:
+<ul>
+<li>
+They are atomic. That is, all of the changes made in a single
+transaction must be applied at the same instant or not at all.
+This permits, for example, the transfer of money between two
+accounts to be accomplished, by making the reduction of the
+balance in one account and the increase in the other into a
+single, atomic action.
+</li>
+<li>
+They must be consistent. That is, changes to the database
+by any transaction cannot leave the database in an illegal
+or corrupt state.
+</li>
+<li>
+They must be isolatable. Regardless of the number of users
+working in the database at the same time, every user must have
+the illusion that no other activity is going on.
+</li>
+<li>
+They must be durable. Even if the disk that stores the database
+is lost, it must be possible to recover the database to its last
+transaction-consistent state.
+</li>
+</ul>
+<p>
+This combination of properties -- atomicity, consistency, isolation, and
+durability -- is referred to as ACIDity in the literature. Berkeley DB,
+like most database systems, provides ACIDity using a collection of core
+services.
+<p>
+Programmers can choose to use Berkeley DB's transaction services
+for applications that need them.
+<h3>
+Write-ahead logging
+</h3>
+<p>
+Programmers can enable the logging system when they start up Berkeley DB.
+During a transaction,
+the application makes a series of changes to the database.
+Each change is captured in a log entry,
+which holds the state of the database record
+both before and after the change.
+The log record is guaranteed
+to be flushed to stable storage before any of the changed data pages
+are written.
+This behavior -- writing the log before the data pages -- is called
+<i>write-ahead logging</i>.
+<p>
+At any time during the transaction,
+the application can
+<i>commit</i>,
+making the changes permanent,
+or
+<i>roll back</i>,
+cancelling all changes and restoring the database to its
+pre-transaction state.
+If the application
+rolls back the transaction, then the log holds the state of all
+changed pages prior to the transaction, and Berkeley DB simply
+restores that state.
+If the application commits the transaction,
+Berkeley DB writes the log records to disk.
+In-memory copies of the data pages already reflect the changes,
+and will be flushed as necessary during normal processing.
+Since log writes are sequential, but data page
+writes are random, this improves performance.
+<h3>
+Crashes and recovery
+</h3>
+<p>
+Berkeley DB's write-ahead log is used by the transaction
+system to commit or roll back transactions.
+It also gives the recovery system the information that
+it needs to protect against data loss or corruption
+from crashes.
+Berkeley DB is able to survive application crashes,
+system crashes,
+and even catastrophic failures like the loss of a hard
+disk,
+without losing any data.
+<p>
+Surviving crashes requires data stored in several different places.
+During normal processing,
+Berkeley DB has copies of active log records and recently-used
+data pages in memory.
+Log records are flushed to the log disk when transactions commit.
+Data pages trickle out to the data disk as pages move through
+the buffer cache.
+Periodically,
+the system administrator backs up the data disk,
+creating a safe copy of the database at a particular instant.
+When the database is backed up,
+the log can be truncated.
+For maximum robustness,
+the log disk and data disk should be separate devices.
+<p>
+Different system failures can destroy memory,
+the log disk,
+or the data disk.
+Berkeley DB is able to survive the loss of any one
+of these repositories
+without losing any committed transactions.
+<p>
+If the computer's memory is lost,
+through an application or operating system crash,
+then the log holds all committed transactions.
+On restart,
+the recovery system rolls the log forward against
+the database,
+reapplying any changes to on-disk pages that were in memory at the
+time of the crash.
+Since the log contains pre- and post-change state for
+transactions,
+the recovery system also uses the log to restore any pages to
+their original state if they were modified by transactions
+that never committed.
+<p>
+If the data disk is lost,
+the system administrator can restore the most recent copy from backup.
+The recovery system will roll the entire log forward against
+the original database,
+reapplying all committed changes.
+When it finishes,
+the database will contain every change made by every
+transaction that ever committed.
+<p>
+If the log disk is lost,
+then the recovery system can use the in-memory copies of
+log entries to roll back any uncommitted transactions,
+flush all in-memory database pages to the data disk,
+and shut down gracefully.
+At that point,
+the system administrator can back up the database disk,
+install a new log disk,
+and restart the system.
+<h3>
+Checkpoints
+</h3>
+<p>
+Berkeley DB includes a checkpointing service that interacts
+with the recovery system.
+During normal processing,
+both the log and the database are changing continually.
+At any given instant,
+the on-disk versions of the two are not guaranteed to be consistent.
+The log probably contains changes that are not yet in the database.
+<p>
+When an application makes a
+<i>checkpoint</i>,
+all committed changes in the log up to that point
+are guaranteed to be present on the data disk,
+too.
+Checkpointing is moderately expensive during normal processing,
+but limits the time spent recovering from crashes.
+<p>
+After an application or operating system crash,
+the recovery system only needs to go back two checkpoints
+to start rolling the log forward.
+(One checkpoint is not far enough.
+The recovery system cannot be sure that the most recent
+checkpoint completed --
+it may have been interrupted by the crash that forced the
+recovery system to run in the first place.)
+Without checkpoints,
+there is no way to be sure how long restarting after a crash will take.
+With checkpoints,
+the restart interval can be fixed by the programmer.
+Recovery processing can be guaranteed to complete in a second or two.
+<p>
+Software crashes are much more common than disk failures.
+Many developers want to guarantee that software bugs do not destroy data,
+but are willing to restore from tape,
+and to tolerate a day or two of lost work,
+in the unlikley event of a disk crash.
+With Berkeley DB,
+programmers may truncate the log at checkpoints.
+As long as the two most recent checkpoints are present,
+the recovery system can guarantee that no committed transactions
+are lost after a software crash.
+In this case,
+the recovery system does not require that the log and the
+data be on separate devices,
+although separating them can still improve performance
+by spreading out writes.
+<h3>
+Two-phase locking
+</h3>
+<p>
+Berkeley DB provides a service known as two-phase locking.
+In order to reduce the likelihood of deadlocks and to guarantee ACID
+properties, database systems manage locks in two phases. First, during
+the operation of a transaction, they acquire locks, but never release
+them. Second, at the end of the transaction, they release locks, but
+never acquire them. In practice, most database systems, including Berkeley
+DB, acquire locks on demand over the course of the transaction, then
+flush the log, then release all locks.
+<p>
+Berkeley DB can lock entire database files, which correspond to tables,
+or individual pages in them.
+It does no record-level locking.
+By shrinking the page size,
+however,
+developers can guarantee that every page holds only a small
+number of records.
+This reduces contention.
+<p>
+If locking is enabled,
+then read and write operations on a database acquire two-phase locks,
+which are held until the transaction completes.
+Which objects are locked and the order of lock acquisition
+depend on the workload for each transaction.
+It is possible for two or more transactions to deadlock,
+so that each is waiting for a lock that is held by another.
+<p>
+Berkeley DB detects deadlocks and automatically rolls back
+one of the transactions.
+This releases the locks that it held
+and allows the other transactions to continue.
+The caller is notified that its transaction did not complete,
+and may restart it.
+Developers can specify the deadlock detection interval
+and the policy to use in choosing a transaction to roll back.
+<p>
+The two-phase locking interfaces are separately callable by applications
+that link Berkeley DB, though few users have needed to use that facility
+directly.
+Using these interfaces,
+Berkeley DB provides a fast,
+platform-portable locking system for general-purpose use.
+It also lets users include non-database objects in a database transaction,
+by controlling access to them exactly as if they were inside the database.
+<p>
+The Berkeley DB two-phase locking facility is built on the fastest correct
+locking primitives that are supported by the underlying architecture.
+In the current implementation, this means that the locking system is
+different on the various UNIX platforms, and is still more different
+on Windows NT. In our experience, the most difficult aspect of performance
+tuning is finding the fastest locking primitives that work correctly
+on a particular architecture and then integrating the new
+interface with the several that we already support.
+<p>
+The world would be a better place if the operating systems community
+would uniformly implement POSIX locking primitives and would guarantee
+that acquiring an uncontested lock was a fast operation.
+Locks must work both among threads in a single process
+and among processes.
+<h2>
+Concurrency
+</h2>
+<p>
+Good performance under concurrent operation is a critical design point
+for Berkeley DB. Although Berkeley DB is itself not multi-threaded,
+it is thread-safe, and runs well in threaded applications.
+Philosophically,
+we view the use of threads and the choice of a threads package
+as a policy decision,
+and prefer to offer mechanism (the ability to run threaded or not),
+allowing applications to choose their own policies.
+<p>
+The locking, logging, and buffer pool subsystems all use shared memory
+or other OS-specific sharing facilities to communicate. Locks, buffer
+pool fetches, and log writes behave in the same way across threads in
+a single process as they do across different processes on a single
+machine.
+<p>
+As a result, concurrent database applications may start up a new process
+for every single user, may create a single server which spawns a new
+thread for every client request, or may choose any policy in between.
+<p>
+Berkeley DB has been carefully designed to minimize contention
+and maximize concurrency.
+The cache manager allows all threads or processes to benefit from
+I/O done by one.
+Shared resources must sometimes be locked for exclusive access
+by one thread of control.
+We have kept critical sections small,
+and are careful not to hold critical resource locks across
+system calls that could deschedule the locking thread or process.
+Sleepycat Software has customers with hundreds of concurrent
+users working on a single database in production.
+<h1>
+Engineering Philosophy
+</h1>
+<p>
+Fundamentally, Berkeley DB is a collection of access methods with
+important facilities, like logging, locking, and transactional access
+underlying them. In both the research and the commercial world,
+the techniques for building systems like Berkeley DB have been well-known
+for a long time.
+<p>
+The key advantage of Berkeley DB is the careful attention that has been
+paid to engineering details throughout its life. We have carefully
+designed the system so that the core facilities, like locking and I/O,
+surface the right interfaces and are otherwise opaque to the caller.
+As programmers, we understand the value of simplicity and have worked
+hard to simplify the interfaces we surface to users of the
+database system.
+<p>
+Berkeley DB avoids limits in the code. It places no practical limit
+on the size of keys, values, or databases; they may grow to occupy
+the available storage space.
+<p>
+The locking and logging subsystems have been carefully crafted to
+reduce contention and improve throughput by shrinking or eliminating
+critical sections, and reducing the sizes of locked regions and log
+entries.
+<p>
+There is nothing in the design or implementation of Berkeley DB that
+pushes the state of the art in database systems. Rather, we have been
+very careful to get the engineering right. The result is a system that
+is superior, as an embedded database system, to any other solution
+available.
+<p>
+Most database systems trade off simplicity for correctness. Either the
+system is easy to use, or it supports concurrent use and survives system
+failures. Berkeley DB, because of its careful design and implementation,
+offers both simplicity and correctness.
+<p>
+The system has a small footprint,
+makes simple operations simple to carry out (inserting a new record takes
+just a few lines of code), and behaves correctly in the face of heavy
+concurrent use, system crashes, and even catastrophic failures like loss
+of a hard disk.
+<h1>
+The Berkeley DB 2.x Distribution
+</h1>
+<p>
+Berkeley DB is distributed in source code form from
+<a href="http://www.sleepycat.com">www.sleepycat.com</a>.
+Users are free to download and build the software, and to use it in
+their applications.
+<h2>
+What is in the distribution
+</h2>
+<p>
+The distribution is a compressed archive file.
+It includes the source code for the Berkeley DB library,
+as well as documentation, test suites, and supporting utilities.
+<p>
+The source code includes build support for all supported platforms.
+On UNIX systems Berkeley DB uses the GNU autoconfiguration tool,
+<tt>autoconf</tt>,
+to identify the system and to build the library
+and supporting utilities.
+Berkeley DB includes specific build environments for other platforms,
+such as VMS and Windows.
+<h3>
+Documentation
+</h3>
+<p>
+The distributed system includes documentation in HTML format.
+The documentation is in two parts:
+a UNIX-style reference manual for use by programmers,
+and a reference guide which is tutorial in nature.
+<h3>
+Test suite
+</h3>
+<p>
+The software also includes a complete test suite, written in Tcl.
+We believe that the test suite is a key advantage of Berkeley DB
+over comparable systems.
+<p>
+First, the test suite allows users who download and build the software
+to be sure that it is operating correctly.
+<p>
+Second, the test suite allows us, like other commercial developers
+of database software, to exercise the system thoroughly at every
+release. When we learn of new bugs, we add them to the test suite.
+We run the test suite continually during development cycles, and
+always prior to release. The result is a much more reliable system
+by the time it reaches beta release.
+<h2>
+Binary distribution
+</h2>
+<p>
+Sleepycat makes compiled libraries and general binary distributions available
+to customers for a fee.
+<h2>
+Supported platforms
+</h2>
+<p>
+Berkeley DB runs on any operating system with a
+POSIX 1003.1 interface <a href="#IEEE96">[IEEE96]</a>,
+which includes virtually every UNIX system.
+In addition,
+the software runs on VMS,
+Windows/95,
+Windows/98,
+and Windows/NT.
+Sleepycat Software no longer supports deployment on sixteen-bit
+Windows systems.
+<h1>
+Berkeley DB 2.x Licensing
+</h1>
+<p>
+Berkeley DB 2.x is distributed as an Open Source product. The software
+is freely available from us at our Web site, and in other media. Users
+are free to download the software and build applications with it.
+<p>
+The 1.x versions of Berkeley DB were covered by the UC Berkeley copyright
+that covers software freely redistributable in source form. When
+Sleepycat Software was formed, we needed to draft a license consistent
+with the copyright governing the existing, older software. Because
+of important differences between the UC Berkeley copyright and the GPL,
+it was impossible for us to use the GPL.
+A second copyright, with
+terms contradictory to the first, simply would not have worked.
+<p>
+Sleepycat wanted to continue Open Source development of Berkeley DB
+for several reasons.
+We agree with Raymond <a href="#Raym98">[Raym98]</a> and others that Open
+Source software is typically of higher quality than proprietary,
+binary-only products.
+Our customers benefit from a community of developers who
+know and use Berkeley DB,
+and can help with application design,
+debugging,
+and performance tuning.
+Widespread distribution and use of the source code tends to
+isolate bugs early,
+and to get fixes back into the distributed system quickly.
+As a result,
+Berkeley DB is more reliable.
+Just as importantly,
+individual users are able to contribute new features
+and performance enhancements,
+to the benefit of everyone who uses Berkeley DB.
+From a business perspective,
+Open Source and free distribution of the
+software creates share for us, and gives us a market into which
+we can sell products and services.
+Finally, making the source code
+freely available reduces our support load, since customers can
+find and fix bugs without recourse to us, in many cases.
+<p>
+To preserve the Open Source heritage of the older Berkeley DB code,
+we drafted a new license governing the distribution of Berkeley DB
+2.x. We adopted terms from the GPL that make it impossible to
+turn our Open Source code into proprietary code owned by someone else.
+<p>
+Briefly, the terms governing the use and distribution of Berkeley DB
+are:
+<ul>
+<li>
+your application must be internal to your site, or
+</li>
+<li>
+your application must be freely redistributable in source form, or
+</li>
+<li>
+you must get a license from us.
+</li>
+</ul>
+<p>
+For customers who prefer not to distribute Open Source products,
+we sell licenses to use and extend Berkeley DB at a reasonable cost.
+<p>
+We work hard to accommodate the needs of the Open Source community.
+For example,
+we have crafted special licensing arrangements with Gnome
+to encourage its use and distribution of Berkeley DB.
+<p>
+Berkeley DB conforms to the Open Source definition <a href="#Open99">[Open99]</a>.
+The license has
+been carefully crafted to keep the product available as an Open Source
+offering,
+while providing enough of a return on our investment to fund continued
+development and support of the product. The current license has
+created a business capable of funding three years of development on
+the software that simply would not have happened otherwise.
+<h1>
+Summary
+</h1>
+<p>
+Berkeley DB offers a unique collection of features, targeted squarely
+at software developers who need simple, reliable database management
+services in their applications. Good design and implementation and
+careful engineering throughout make the software better than many
+other systems.
+<p>
+Berkeley DB is an Open Source product, available at
+<a href="http://www.sleepycat.com">www.sleepycat.com</a>.
+for download. The distributed system includes everything needed to
+build and deploy the software or to port it to new systems.
+<p>
+Sleepycat Software distributes Berkeley DB under a license agreement
+that draws on both the UC Berkeley copyright and the GPL. The license
+guarantees that Berkeley DB will remain an Open Source product and
+provides Sleepycat with opportunities to make money to fund continued
+development on the software.
+<h1>
+References
+</h1>
+<table border=0 cellpadding=4 cellspacing=2>
+<tr>
+<td valign="top"><a name="Come79">[Come79]</a></td>
+<td>
+<p>
+Comer, D.,
+&quot;The Ubiquitous B-tree,&quot;
+<i>ACM Computing Surveys</i>
+Volume 11, number 2,
+June 1979.
+</td>
+</tr>
+<tr>
+<td valign="top">
+<a name="Gray93">[Gray93]</a>
+</td>
+<td>
+<p>
+Gray, J., and Reuter, A.,
+<i>Transaction Processing: Concepts and Techniques</i>,
+Morgan-Kaufman Publishers,
+1993.
+</td>
+</tr>
+<tr>
+<td valign="top">
+<a name="IEEE96">[IEEE96]</a>
+</td>
+<td>
+<p>
+Institute for Electrical and Electronics Engineers,
+<i>IEEE/ANSI Std 1003.1</i>,
+1996 Edition.
+</td>
+</tr>
+<tr>
+<td valign="top">
+<a name="Litw80">[Litw80]</a>
+</td>
+<td>
+<p>
+Litwin, W.,
+&quot;Linear Hashing: A New Tool for File and Table Addressing,&quot;
+<i>Proceedings of the 6th International Conference on Very Large Databases (VLDB)</i>,
+Montreal, Quebec, Canada,
+October 1980.
+</td>
+</tr>
+<tr>
+<td valign="top">
+<a name="Open94">[Open94]</a>
+</td>
+<td>
+<p>
+The Open Group,
+<i>Distributed TP: The XA+ Specification, Version 2</i>,
+The Open Group, 1994.
+</td>
+</tr>
+<tr>
+<td valign="top">
+<a name="Open99">[Open99]</a>
+</td>
+<td>
+<p>
+Opensource.org,
+&quot;Open Source Definition,&quot;
+<a href="http://www.opensource.org/osd.html"><i>www.opensource.org/osd.html</i></a>,
+version 1.4,
+1999.
+</td>
+</tr>
+<tr>
+<td valign="top">
+<a name="Raym98">[Raym98]</a>
+</td>
+<td>
+<p>
+Raymond, E.S.,
+&quot;The Cathedral and the Bazaar,&quot;
+<a href="http://www.tuxedo.org/~esr/writings/cathedral-bazaar/">
+www.tuxedo.org/~esr/writings/cathedral-bazaar/cathedral-bazaar.html</a>,
+January 1998.
+</td>
+</tr>
+<tr>
+<td valign="top">
+<a name="Selt91">[Selt91]</a>
+</td>
+<td>
+<p>
+Seltzer, M., and Yigit, O.,
+&quot;A New Hashing Package for UNIX,&quot;
+<i>Proceedings 1991 Winter USENIX Conference</i>,
+Dallas, TX,
+January 1991.
+</td>
+</tr>
+<tr>
+<td valign="top">
+<a name="Selt92">[Selt92]</a>
+</td>
+<td>
+<p>
+Seltzer, M., and Olson, M.,
+&quot;LIBTP: Portable Modular Transactions for UNIX,&quot;
+<i>Proceedings 1992 Winter Usenix Conference</i>
+San Francisco, CA,
+January 1992.]
+</td>
+</tr>
+<tr>
+<td valign="top">
+<a name="Ston82">[Ston82]</a>
+</td>
+<td>
+<p>
+Stonebraker, M., Stettner, H., Kalash, J., Guttman, A., and Lynn, N.,
+&quot;Document Processing in a Relational Database System,&quot;
+Memorandum No. UCB/ERL M82/32,
+University of California at Berkeley,
+Berkeley, CA,
+May 1982.
+</td>
+</tr>
+</table>
+</body>
+</html>
diff --git a/libdb/docs/ref/refs/bdb_usenix.ps b/libdb/docs/ref/refs/bdb_usenix.ps
new file mode 100644
index 0000000..82e6789
--- /dev/null
+++ b/libdb/docs/ref/refs/bdb_usenix.ps
@@ -0,0 +1,1441 @@
+%!PS-Adobe-3.0
+%%Creator: groff version 1.11
+%%CreationDate: Mon Apr 26 13:38:12 1999
+%%DocumentNeededResources: font Times-Bold
+%%+ font Times-Roman
+%%+ font Times-Italic
+%%+ font Courier
+%%DocumentSuppliedResources: procset grops 1.11 0
+%%Pages: 9
+%%PageOrder: Ascend
+%%Orientation: Portrait
+%%EndComments
+%%BeginProlog
+%%BeginResource: procset grops 1.11 0
+/setpacking where{
+pop
+currentpacking
+true setpacking
+}if
+/grops 120 dict dup begin
+/SC 32 def
+/A/show load def
+/B{0 SC 3 -1 roll widthshow}bind def
+/C{0 exch ashow}bind def
+/D{0 exch 0 SC 5 2 roll awidthshow}bind def
+/E{0 rmoveto show}bind def
+/F{0 rmoveto 0 SC 3 -1 roll widthshow}bind def
+/G{0 rmoveto 0 exch ashow}bind def
+/H{0 rmoveto 0 exch 0 SC 5 2 roll awidthshow}bind def
+/I{0 exch rmoveto show}bind def
+/J{0 exch rmoveto 0 SC 3 -1 roll widthshow}bind def
+/K{0 exch rmoveto 0 exch ashow}bind def
+/L{0 exch rmoveto 0 exch 0 SC 5 2 roll awidthshow}bind def
+/M{rmoveto show}bind def
+/N{rmoveto 0 SC 3 -1 roll widthshow}bind def
+/O{rmoveto 0 exch ashow}bind def
+/P{rmoveto 0 exch 0 SC 5 2 roll awidthshow}bind def
+/Q{moveto show}bind def
+/R{moveto 0 SC 3 -1 roll widthshow}bind def
+/S{moveto 0 exch ashow}bind def
+/T{moveto 0 exch 0 SC 5 2 roll awidthshow}bind def
+/SF{
+findfont exch
+[exch dup 0 exch 0 exch neg 0 0]makefont
+dup setfont
+[exch/setfont cvx]cvx bind def
+}bind def
+/MF{
+findfont
+[5 2 roll
+0 3 1 roll
+neg 0 0]makefont
+dup setfont
+[exch/setfont cvx]cvx bind def
+}bind def
+/level0 0 def
+/RES 0 def
+/PL 0 def
+/LS 0 def
+/MANUAL{
+statusdict begin/manualfeed true store end
+}bind def
+/PLG{
+gsave newpath clippath pathbbox grestore
+exch pop add exch pop
+}bind def
+/BP{
+/level0 save def
+1 setlinecap
+1 setlinejoin
+72 RES div dup scale
+LS{
+90 rotate
+}{
+0 PL translate
+}ifelse
+1 -1 scale
+}bind def
+/EP{
+level0 restore
+showpage
+}bind def
+/DA{
+newpath arcn stroke
+}bind def
+/SN{
+transform
+.25 sub exch .25 sub exch
+round .25 add exch round .25 add exch
+itransform
+}bind def
+/DL{
+SN
+moveto
+SN
+lineto stroke
+}bind def
+/DC{
+newpath 0 360 arc closepath
+}bind def
+/TM matrix def
+/DE{
+TM currentmatrix pop
+translate scale newpath 0 0 .5 0 360 arc closepath
+TM setmatrix
+}bind def
+/RC/rcurveto load def
+/RL/rlineto load def
+/ST/stroke load def
+/MT/moveto load def
+/CL/closepath load def
+/FL{
+currentgray exch setgray fill setgray
+}bind def
+/BL/fill load def
+/LW/setlinewidth load def
+/RE{
+findfont
+dup maxlength 1 index/FontName known not{1 add}if dict begin
+{
+1 index/FID ne{def}{pop pop}ifelse
+}forall
+/Encoding exch def
+dup/FontName exch def
+currentdict end definefont pop
+}bind def
+/DEFS 0 def
+/EBEGIN{
+moveto
+DEFS begin
+}bind def
+/EEND/end load def
+/CNT 0 def
+/level1 0 def
+/PBEGIN{
+/level1 save def
+translate
+div 3 1 roll div exch scale
+neg exch neg exch translate
+0 setgray
+0 setlinecap
+1 setlinewidth
+0 setlinejoin
+10 setmiterlimit
+[]0 setdash
+/setstrokeadjust where{
+pop
+false setstrokeadjust
+}if
+/setoverprint where{
+pop
+false setoverprint
+}if
+newpath
+/CNT countdictstack def
+userdict begin
+/showpage{}def
+}bind def
+/PEND{
+clear
+countdictstack CNT sub{end}repeat
+level1 restore
+}bind def
+end def
+/setpacking where{
+pop
+setpacking
+}if
+%%EndResource
+%%IncludeResource: font Times-Bold
+%%IncludeResource: font Times-Roman
+%%IncludeResource: font Times-Italic
+%%IncludeResource: font Courier
+grops begin/DEFS 1 dict def DEFS begin/u{.001 mul}bind def end/RES 72
+def/PL 792 def/LS false def/ENC0[/asciicircum/asciitilde/Scaron/Zcaron
+/scaron/zcaron/Ydieresis/trademark/quotesingle/.notdef/.notdef/.notdef
+/.notdef/.notdef/.notdef/.notdef/.notdef/.notdef/.notdef/.notdef/.notdef
+/.notdef/.notdef/.notdef/.notdef/.notdef/.notdef/.notdef/.notdef/.notdef
+/.notdef/.notdef/space/exclam/quotedbl/numbersign/dollar/percent
+/ampersand/quoteright/parenleft/parenright/asterisk/plus/comma/hyphen
+/period/slash/zero/one/two/three/four/five/six/seven/eight/nine/colon
+/semicolon/less/equal/greater/question/at/A/B/C/D/E/F/G/H/I/J/K/L/M/N/O
+/P/Q/R/S/T/U/V/W/X/Y/Z/bracketleft/backslash/bracketright/circumflex
+/underscore/quoteleft/a/b/c/d/e/f/g/h/i/j/k/l/m/n/o/p/q/r/s/t/u/v/w/x/y
+/z/braceleft/bar/braceright/tilde/.notdef/quotesinglbase/guillemotleft
+/guillemotright/bullet/florin/fraction/perthousand/dagger/daggerdbl
+/endash/emdash/ff/fi/fl/ffi/ffl/dotlessi/dotlessj/grave/hungarumlaut
+/dotaccent/breve/caron/ring/ogonek/quotedblleft/quotedblright/oe/lslash
+/quotedblbase/OE/Lslash/.notdef/exclamdown/cent/sterling/currency/yen
+/brokenbar/section/dieresis/copyright/ordfeminine/guilsinglleft
+/logicalnot/minus/registered/macron/degree/plusminus/twosuperior
+/threesuperior/acute/mu/paragraph/periodcentered/cedilla/onesuperior
+/ordmasculine/guilsinglright/onequarter/onehalf/threequarters
+/questiondown/Agrave/Aacute/Acircumflex/Atilde/Adieresis/Aring/AE
+/Ccedilla/Egrave/Eacute/Ecircumflex/Edieresis/Igrave/Iacute/Icircumflex
+/Idieresis/Eth/Ntilde/Ograve/Oacute/Ocircumflex/Otilde/Odieresis
+/multiply/Oslash/Ugrave/Uacute/Ucircumflex/Udieresis/Yacute/Thorn
+/germandbls/agrave/aacute/acircumflex/atilde/adieresis/aring/ae/ccedilla
+/egrave/eacute/ecircumflex/edieresis/igrave/iacute/icircumflex/idieresis
+/eth/ntilde/ograve/oacute/ocircumflex/otilde/odieresis/divide/oslash
+/ugrave/uacute/ucircumflex/udieresis/yacute/thorn/ydieresis]def
+/Courier@0 ENC0/Courier RE/Times-Italic@0 ENC0/Times-Italic RE
+/Times-Roman@0 ENC0/Times-Roman RE/Times-Bold@0 ENC0/Times-Bold RE
+%%EndProlog
+%%Page: 1 1
+%%BeginPageSetup
+BP
+%%EndPageSetup
+/F0 14/Times-Bold@0 SF(Berk)275.358 100.8 Q(eley DB)-.14 E/F1 12
+/Times-Roman@0 SF(Michael A. Olson)270.372 129.6 Q -.3(Ke)283.182 144 S
+(ith Bostic).3 E(Mar)279.15 158.4 Q(go Seltzer)-.216 E/F2 12
+/Times-Italic@0 SF(Sleepycat Softwar)255.492 174.24 Q .24 -.12(e, I)
+-.444 H(nc.).12 E/F3 12/Times-Bold@0 SF(Abstract)290.874 210.24 Q/F4 10
+/Times-Roman@0 SF(Berk)79.2 226.44 Q(ele)-.1 E 2.925(yD)-.15 G 2.925(Bi)
+-2.925 G 2.924(sa)-2.925 G 2.924(nO)-2.924 G .424
+(pen Source embedded database system with a number of k)-2.924 F .724
+-.15(ey a)-.1 H(dv).15 E .424(antages o)-.25 F -.15(ve)-.15 G 2.924(rc)
+.15 G .424(omparable sys-)-2.924 F 3.102(tems. It)79.2 238.44 R .602(is simple to use, supports concurrent access by multiple users, and pro)
+3.102 F .602(vides industrial-strength transaction)-.15 F 1.555
+(support, including survi)79.2 250.44 R 1.555
+(ving system and disk crashes.)-.25 F 1.554
+(This paper describes the design and technical features of)6.555 F(Berk)
+79.2 262.44 Q(ele)-.1 E 2.5(yD)-.15 G(B, the distrib)-2.5 E
+(ution, and its license.)-.2 E F3 3(1. Intr)79.2 286.44 R(oduction)-.216
+E F4 .691(The Berk)79.2 302.64 R(ele)-.1 E 3.191(yD)-.15 G .691
+(atabase \(Berk)-3.191 F(ele)-.1 E 3.191(yD)-.15 G .692
+(B\) is an embedded)-3.191 F .253
+(database system that can be used in applications requir)79.2 314.64 R
+(-)-.2 E 1.636(ing high-performance concurrent storage and retrie)79.2
+326.64 R -.25(va)-.25 G(l).25 E 2.619(of k)79.2 338.64 R -.15(ey)-.1 G
+(/v).15 E 2.619(alue pairs.)-.25 F 2.619(The softw)7.619 F 2.619
+(are is distrib)-.1 F 2.618(uted as a)-.2 F .057
+(library that can be link)79.2 350.64 R .058
+(ed directly into an application.)-.1 F(It)5.058 E(pro)79.2 362.64 Q
+1.454(vides a v)-.15 F 1.453(ariety of programmatic interf)-.25 F 1.453
+(aces, includ-)-.1 F .237
+(ing callable APIs for C, C++, Perl, Tcl and Ja)79.2 374.64 R -.25(va)
+-.2 G 5.237(.U).25 G(sers)-5.237 E .327(may do)79.2 386.64 R .327
+(wnload Berk)-.25 F(ele)-.1 E 2.827(yD)-.15 G 2.827(Bf)-2.827 G .326
+(rom Sleep)-2.827 F .326(ycat Softw)-.1 F(are')-.1 E(s)-.55 E -.8(We)
+79.2 398.64 S 2.5(bs).8 G(ite, at)-2.5 E/F5 10/Times-Italic@0 SF(www)2.5
+E(.sleepycat.com)-.74 E F4(.)A(Sleep)79.2 414.84 Q 1.33(ycat distrib)-.1
+F 1.33(utes Berk)-.2 F(ele)-.1 E 3.83(yD)-.15 G 3.83(Ba)-3.83 G 3.83(sa)
+-3.83 G 3.83(nO)-3.83 G 1.33(pen Source)-3.83 F 3.3(product. The)79.2
+426.84 R(compan)3.3 E 3.3(yc)-.15 G .8(ollects license fees for certain)
+-3.3 F(uses of the softw)79.2 438.84 Q
+(are and sells support and services.)-.1 E F3 3(1.1. History)79.2 468.84
+R F4(Berk)79.2 485.04 Q(ele)-.1 E 3.057(yD)-.15 G 3.057(Bb)-3.057 G
+-2.25 -.15(eg a)-3.057 H 3.058(na).15 G 3.058(san)-3.058 G 1.058 -.25
+(ew i)-3.058 H .558(mplementation of a hash).25 F .843
+(access method to replace both)79.2 497.04 R/F6 10/Courier@0 SF(hsearch)
+3.342 E F4 .842(and the v)3.342 F(ari-)-.25 E(ous)79.2 509.04 Q F6(dbm)
+5.466 E F4 2.967(implementations \()5.466 F F6(dbm)A F4 2.967(from A)
+5.467 F(T&T)-1.11 E(,)-.74 E F6(ndbm)5.467 E F4 1.334(from Berk)79.2
+521.04 R(ele)-.1 E 2.634 -.65(y, a)-.15 H(nd).65 E F6(gdbm)3.834 E F4
+1.334(from the GNU project\).)3.834 F(In)6.333 E .367
+(1990 Seltzer and Y)79.2 533.04 R .368
+(igit produced a package called Hash)-.55 F(to do this [Selt91].)79.2
+545.04 Q 3.106(The \214rst general release of Berk)79.2 561.24 R(ele)-.1
+E 5.606(yD)-.15 G 3.106(B, in 1991,)-5.606 F 3.038(included some interf)
+79.2 573.24 R 3.039(ace changes and a ne)-.1 F 5.539(wB)-.25 G(+tree)
+-5.539 E .887(access method.)79.2 585.24 R .886
+(At roughly the same time, Seltzer and)5.887 F 1.201(Olson de)79.2
+597.24 R -.15(ve)-.25 G 1.202
+(loped a prototype transaction system based).15 F 3.356(on Berk)79.2
+609.24 R(ele)-.1 E 5.856(yD)-.15 G 3.356(B, called LIBTP [Selt92], b)
+-5.856 F 3.355(ut ne)-.2 F -.15(ve)-.25 G(r).15 E(released the code.)
+79.2 621.24 Q .653(The 4.4BSD UNIX release included Berk)79.2 637.44 R
+(ele)-.1 E 3.153(yD)-.15 G 3.153(B1)-3.153 G(.85)-3.153 E .602(in 1992.)
+79.2 649.44 R .601(Seltzer and Bostic maintained the code in the)5.601 F
+1.545(early 1990s in Berk)79.2 661.44 R(ele)-.1 E 4.046(ya)-.15 G 1.546
+(nd in Massachusetts.)-4.046 F(Man)6.546 E(y)-.15 E
+(users adopted the code during this period.)79.2 673.44 Q .432
+(By mid-1996, users w)79.2 689.64 R .431
+(anted commercial support for the)-.1 F(softw)79.2 701.64 Q 7.033
+(are. In)-.1 F 4.533(response, Bostic and Seltzer formed)7.033 F(Sleep)
+79.2 713.64 Q 10.128(ycat Softw)-.1 F 12.628(are. The)-.1 F(compan)
+12.627 E 15.127(ye)-.15 G(nhances,)-15.127 E(distrib)323.2 286.44 Q
+1.623(utes, and supports Berk)-.2 F(ele)-.1 E 4.123(yD)-.15 G 4.124(Ba)
+-4.123 G 1.624(nd supporting)-4.124 F(softw)323.2 298.44 Q 2.2
+(are and documentation.)-.1 F(Sleep)7.2 E 2.2(ycat released v)-.1 F(er)
+-.15 E(-)-.2 E 1.677(sion 2.1 of Berk)323.2 310.44 R(ele)-.1 E 4.177(yD)
+-.15 G 4.178(Bi)-4.177 G 4.178(nm)-4.178 G 1.678(id-1997 with important)
+-4.178 F(ne)323.2 322.44 Q 2.56(wf)-.25 G .06
+(eatures, including support for concurrent access to)-2.56 F 4.176
+(databases. The)323.2 334.44 R(compan)4.176 E 4.177(ym)-.15 G(ak)-4.177
+E 1.677(es about three commer)-.1 F(-)-.2 E .958(cial releases a year)
+323.2 346.44 R 3.458(,a)-.4 G .957(nd most recently shipped v)-3.458 F
+(ersion)-.15 E(2.8.)323.2 358.44 Q F3 3(1.2. Ov)323.2 388.44 R(er)-.12 E
+(view of Berk)-.12 E(eley DB)-.12 E F4 3.094(The C interf)323.2 404.64 R
+3.094(aces in Berk)-.1 F(ele)-.1 E 5.594(yD)-.15 G 5.595(Bp)-5.594 G
+(ermit)-5.595 E F6(dbm)5.595 E F4(-style)A 4.586
+(record management for databases, with signi\214cant)323.2 416.64 R -.15
+(ex)323.2 428.64 S 1.273(tensions to handle duplicate data items ele).15
+F -.05(ga)-.15 G(ntly).05 E 3.773(,t)-.65 G(o)-3.773 E 2.427
+(deal with concurrent access, and to pro)323.2 440.64 R 2.427
+(vide transac-)-.15 F .71
+(tional support so that multiple changes can be simulta-)323.2 452.64 R
+1.273(neously committed \(so that the)323.2 464.64 R 3.773(ya)-.15 G
+1.273(re made permanent\))-3.773 F 1.848
+(or rolled back \(so that the database is restored to its)323.2 476.64 R
+(state at the be)323.2 488.64 Q(ginning of the transaction\).)-.15 E
+1.034(C++ and Ja)323.2 504.84 R 1.534 -.25(va i)-.2 H(nterf).25 E 1.033
+(aces pro)-.1 F 1.033(vide a small set of classes)-.15 F 1.961
+(for operating on a database.)323.2 516.84 R 1.961
+(The main class in both)6.961 F .587(cases is called)323.2 528.84 R F6
+(Db)3.086 E F4 3.086(,a)C .586(nd pro)-3.086 F .586
+(vides methods that encapsu-)-.15 F 1.128(late the)323.2 540.84 R F6
+(dbm)3.628 E F4 1.129(-style interf)B 1.129(aces that the C interf)-.1 F
+1.129(aces pro-)-.1 F(vide.)323.2 552.84 Q 2.565(Tcl and Perl interf)
+323.2 569.04 R 2.564(aces allo)-.1 F 5.064(wd)-.25 G -2.15 -.25(ev e)
+-5.064 H 2.564(lopers w).25 F 2.564(orking in)-.1 F 1.716
+(those languages to use Berk)323.2 581.04 R(ele)-.1 E 4.216(yD)-.15 G
+4.216(Bi)-4.216 G 4.217(nt)-4.216 G 1.717(heir applica-)-4.217 F 3.419
+(tions. Bindings)323.2 593.04 R .919
+(for both languages are included in the)3.419 F(distrib)323.2 605.04 Q
+(ution.)-.2 E(De)323.2 621.24 Q -.15(ve)-.25 G 1.069
+(lopers may compile their applications and link in).15 F(Berk)323.2
+633.24 Q(ele)-.1 E 2.5(yD)-.15 G 2.5(Bs)-2.5 G(tatically or dynamically)
+-2.5 E(.)-.65 E F3 3(1.3. Ho)323.2 663.24 R 3(wB)-.12 G(erk)-3 E
+(eley DB is used)-.12 E F4 .655(The Berk)323.2 679.44 R(ele)-.1 E 3.155
+(yD)-.15 G 3.154(Bl)-3.155 G .654(ibrary supports concurrent access to)
+-3.154 F 5.115(databases. It)323.2 691.44 R 2.616(can be link)5.115 F
+2.616(ed into standalone applica-)-.1 F 1.487
+(tions, into a collection of cooperating applications, or)323.2 703.44 R
+4.21(into serv)323.2 715.44 R 4.21
+(ers that handle requests and do database)-.15 F EP
+%%Page: 2 2
+%%BeginPageSetup
+BP
+%%EndPageSetup
+/F0 10/Times-Roman@0 SF(operations on behalf of clients.)79.2 84 Q .858
+(Compared to using a standalone database management)79.2 100.2 R .846
+(system, Berk)79.2 112.2 R(ele)-.1 E 3.346(yD)-.15 G 3.346(Bi)-3.346 G
+3.346(se)-3.346 G .846(asy to understand and simple)-3.346 F 3.826
+(to use.)79.2 124.2 R 3.826(The softw)8.826 F 3.826
+(are stores and retrie)-.1 F -.15(ve)-.25 G 6.325(sr).15 G(ecords,)
+-6.325 E 2.77(which consist of k)79.2 136.2 R -.15(ey)-.1 G(/v).15 E
+2.77(alue pairs.)-.25 F -2.15 -.25(Ke y)7.77 H 5.27(sa).25 G 2.77
+(re used to)-5.27 F .698(locate items and can be an)79.2 148.2 R 3.198
+(yd)-.15 G .698(ata type or structure sup-)-3.198 F
+(ported by the programming language.)79.2 160.2 Q .813
+(The programmer can pro)79.2 176.4 R .813(vide the functions that Berk)
+-.15 F(e-)-.1 E(le)79.2 188.4 Q 3.264(yD)-.15 G 3.264(Bu)-3.264 G .763
+(ses to operate on k)-3.264 F -.15(ey)-.1 G 3.263(s. F).15 F .763(or e)
+-.15 F .763(xample, B+trees)-.15 F 1.72
+(can use a custom comparison function, and the Hash)79.2 200.4 R .519
+(access method can use a custom hash function.)79.2 212.4 R(Berk)5.518 E
+(e-)-.1 E(le)79.2 224.4 Q 5.222(yD)-.15 G 5.222(Bu)-5.222 G 2.722
+(ses def)-5.222 F 2.723(ault functions if none are supplied.)-.1 F .873
+(Otherwise, Berk)79.2 236.4 R(ele)-.1 E 3.373(yD)-.15 G 3.373(Bd)-3.373
+G .873(oes not e)-3.373 F .873(xamine or interpret)-.15 F .934(either k)
+79.2 248.4 R -.15(ey)-.1 G 3.434(so).15 G 3.434(rv)-3.434 G .934
+(alues in an)-3.684 F 3.434(yw)-.15 G(ay)-3.534 E 5.934(.V)-.65 G .934
+(alues may be arbi-)-7.044 F(trarily long.)79.2 260.4 Q .69
+(It is also important to understand what Berk)79.2 276.6 R(ele)-.1 E
+3.19(yD)-.15 G 3.19(Bi)-3.19 G(s)-3.19 E 4.365(not. It)79.2 288.6 R
+1.865(is not a database serv)4.365 F 1.866(er that handles netw)-.15 F
+(ork)-.1 E 2.797(requests. It)79.2 300.6 R .297
+(is not an SQL engine that e)2.797 F -.15(xe)-.15 G .296(cutes queries.)
+.15 F 1.547(It is not a relational or object-oriented database man-)79.2
+312.6 R(agement system.)79.2 324.6 Q 1.101(It is possible to b)79.2
+340.8 R 1.101(uild an)-.2 F 3.601(yo)-.15 G 3.601(ft)-3.601 G 1.101
+(hose on top of Berk)-3.601 F(ele)-.1 E(y)-.15 E 2.116(DB, b)79.2 352.8
+R 2.116(ut the package, as distrib)-.2 F 2.117(uted, is an embedded)-.2
+F 1.444(database engine.)79.2 364.8 R 1.444
+(It has been designed to be portable,)6.444 F(small, f)79.2 376.8 Q
+(ast, and reliable.)-.1 E/F1 12/Times-Bold@0 SF 3(1.4. A)79.2 406.8 R
+(pplications that use Berk)-.3 E(eley DB)-.12 E F0(Berk)79.2 423 Q(ele)
+-.1 E 4.248(yD)-.15 G 4.248(Bi)-4.248 G 4.249(se)-4.248 G 1.749
+(mbedded in a v)-4.249 F 1.749(ariety of proprietary)-.25 F 3.84
+(and Open Source softw)79.2 435 R 3.84(are packages.)-.1 F 3.84
+(This section)8.84 F(highlights a fe)79.2 447 Q 2.5(wo)-.25 G 2.5(ft)
+-2.5 G(he products that use it.)-2.5 E 1.467(Directory serv)79.2 463.2 R
+1.467(ers, which do data storage and retrie)-.15 F -.25(va)-.25 G(l).25
+E 2.823(using the Local Directory Access Protocol \(LD)79.2 475.2 R
+(AP\),)-.4 E(pro)79.2 487.2 Q .956
+(vide naming and directory lookup service on local-)-.15 F 2.837
+(area netw)79.2 499.2 R 5.337(orks. This)-.1 F 2.837
+(service is, essentially)5.337 F 5.336(,d)-.65 G(atabase)-5.336 E .039
+(query and update, b)79.2 511.2 R .039
+(ut uses a simple protocol rather than)-.2 F 2.202(SQL or ODBC.)79.2
+523.2 R(Berk)7.201 E(ele)-.1 E 4.701(yD)-.15 G 4.701(Bi)-4.701 G 4.701
+(st)-4.701 G 2.201(he embedded data)-4.701 F 1.288
+(manager in the majority of deplo)79.2 535.2 R 1.289(yed directory serv)
+-.1 F(ers)-.15 E(today)79.2 547.2 Q 4.855(,i)-.65 G 2.355(ncluding LD)
+-4.855 F 2.355(AP serv)-.4 F 2.355(ers from Netscape, Mes-)-.15 F
+(sageDirect \(formerly Isode\), and others.)79.2 559.2 Q(Berk)79.2 575.4
+Q(ele)-.1 E 4.385(yD)-.15 G 4.385(Bi)-4.385 G 4.385(sa)-4.385 G 1.886
+(lso embedded in a lar)-4.385 F 1.886(ge number of)-.18 F 5.302
+(mail serv)79.2 587.4 R 7.802(ers. Intermail,)-.15 F 5.302(from Softw)
+7.802 F 5.302(are.com, uses)-.1 F(Berk)79.2 599.4 Q(ele)-.1 E 4.613(yD)
+-.15 G 4.613(Ba)-4.613 G 4.613(sam)-4.613 G 2.114
+(essage store and as the backing)-4.613 F 3.597
+(store for its directory serv)79.2 611.4 R(er)-.15 E 8.597(.T)-.55 G
+3.597(he sendmail serv)-8.597 F(er)-.15 E 1.175
+(\(including both the commercial Sendmail Pro of)79.2 623.4 R(fering)
+-.25 E 3.283(from Sendmail, Inc. and the v)79.2 635.4 R 3.283
+(ersion distrib)-.15 F 3.282(uted by)-.2 F(sendmail.or)79.2 647.4 Q
+2.304(g\) uses Berk)-.18 F(ele)-.1 E 4.804(yD)-.15 G 4.804(Bt)-4.804 G
+4.804(os)-4.804 G 2.305(tore aliases and)-4.804 F 9.01
+(other information.)79.2 659.4 R(Similarly)14.01 E 11.51(,P)-.65 G 9.01
+(ost\214x \(formerly)-11.51 F 3.465(VMailer\) uses Berk)79.2 671.4 R
+(ele)-.1 E 5.965(yD)-.15 G 5.965(Bt)-5.965 G 5.965(os)-5.965 G 3.465
+(tore administrati)-5.965 F -.15(ve)-.25 G(information.)79.2 683.4 Q
+.134(In addition, Berk)79.2 699.6 R(ele)-.1 E 2.634(yD)-.15 G 2.633(Bi)
+-2.634 G 2.633(se)-2.633 G .133(mbedded in a wide v)-2.633 F(ariety)-.25
+E 4.994(of other softw)79.2 711.6 R 4.994(are products.)-.1 F 4.994
+(Example applications)9.994 F .373
+(include managing access control lists, storing user k)323.2 84 R -.15
+(ey)-.1 G(s).15 E 2.75(in a public-k)323.2 96 R 3.05 -.15(ey i)-.1 H
+2.75(nfrastructure, recording machine-to-).15 F(netw)323.2 108 Q .519
+(ork-address mappings in address serv)-.1 F .518(ers, and stor)-.15 F(-)
+-.2 E .411(ing con\214guration and de)323.2 120 R .412
+(vice information in video post-)-.25 F(production softw)323.2 132 Q
+(are.)-.1 E(Finally)323.2 148.2 Q 4.978(,B)-.65 G(erk)-4.978 E(ele)-.1 E
+4.978(yD)-.15 G 4.978(Bi)-4.978 G 4.978(sap)-4.978 G 2.478(art of man)
+-4.978 F 4.977(yo)-.15 G 2.477(ther Open)-4.977 F .005(Source softw)
+323.2 160.2 R .005(are packages a)-.1 F -.25(va)-.2 G .006
+(ilable on the Internet.).25 F -.15(Fo)5.006 G(r).15 E -.15(ex)323.2
+172.2 S .604(ample, the softw).15 F .604
+(are is embedded in the Apache W)-.1 F(eb)-.8 E(serv)323.2 184.2 Q
+(er and the Gnome desktop.)-.15 E F1 3(2. Access)323.2 214.2 R(Methods)3
+E F0 .828(In database terminology)323.2 230.4 R 3.329(,a)-.65 G 3.329
+(na)-3.329 G .829(ccess method is the disk-)-3.329 F 1.964
+(based structure used to store data and the operations)323.2 242.4 R -.2
+(av)323.2 254.4 S 6.053(ailable on that structure.)-.05 F -.15(Fo)11.053
+G 8.554(re).15 G 6.054(xample, man)-8.704 F(y)-.15 E 3.853
+(database systems support a B+tree access method.)323.2 266.4 R 1.203
+(B+trees allo)323.2 278.4 R 3.703(we)-.25 G 1.203
+(quality-based lookups \(\214nd k)-3.703 F -.15(ey)-.1 G 3.704(se).15 G
+(qual)-3.704 E 4(to some constant\), range-based lookups \(\214nd k)
+323.2 290.4 R -.15(ey)-.1 G(s).15 E 1.188(between tw)323.2 302.4 R 3.688
+(oc)-.1 G 1.189(onstants\) and record insertion and dele-)-3.688 F
+(tion.)323.2 314.4 Q(Berk)323.2 330.6 Q(ele)-.1 E 4.729(yD)-.15 G 4.729
+(Bs)-4.729 G 2.228(upports three access methods: B+tree,)-4.729 F 1.553
+(Extended Linear Hashing \(Hash\), and Fix)323.2 342.6 R 1.553(ed- or V)
+-.15 F(ari-)-1.11 E 3.639(able-length Records \(Recno\).)323.2 354.6 R
+3.638(All three operate on)8.638 F 1.956(records composed of a k)323.2
+366.6 R 2.256 -.15(ey a)-.1 H 1.956(nd a data v).15 F 4.456(alue. In)
+-.25 F(the)4.456 E 1.301(B+tree and Hash access methods, k)323.2 378.6 R
+-.15(ey)-.1 G 3.801(sc).15 G 1.301(an ha)-3.801 F 1.601 -.15(ve a)-.2 H
+(rbi-).15 E 3.595(trary structure.)323.2 390.6 R 3.596
+(In the Recno access method, each)8.595 F .266
+(record is assigned a record number)323.2 402.6 R 2.765(,w)-.4 G .265
+(hich serv)-2.765 F .265(es as the)-.15 F -.1(ke)323.2 414.6 S 4.106
+-.65(y. I)-.05 H 2.806(na).65 G .306(ll the access methods, the v)-2.806
+F .306(alue can ha)-.25 F .606 -.15(ve a)-.2 H(rbi-).15 E 1.417
+(trary structure.)323.2 426.6 R 1.417
+(The programmer can supply compari-)6.417 F 2.129
+(son or hashing functions for k)323.2 438.6 R -.15(ey)-.1 G 2.129
+(s, and Berk).15 F(ele)-.1 E 4.629(yD)-.15 G(B)-4.629 E
+(stores and retrie)323.2 450.6 Q -.15(ve)-.25 G 2.5(sv).15 G
+(alues without interpreting them.)-2.75 E 1.069
+(All of the access methods use the host \214lesystem as a)323.2 466.8 R
+(backing store.)323.2 478.8 Q F1 3(2.1. Hash)323.2 508.8 R F0(Berk)323.2
+525 Q(ele)-.1 E 6.485(yD)-.15 G 6.485(Bi)-6.485 G 3.986
+(ncludes a Hash access method that)-6.485 F 9.863(implements e)323.2 537
+R 9.862(xtended linear hashing [Litw80].)-.15 F .017
+(Extended linear hashing adjusts the hash function as the)323.2 549 R
+.507(hash table gro)323.2 561 R .506(ws, attempting to k)-.25 F .506
+(eep all b)-.1 F(uck)-.2 E .506(ets under)-.1 F(-)-.2 E
+(full in the steady state.)323.2 573 Q 1.649
+(The Hash access method supports insertion and dele-)323.2 589.2 R .259
+(tion of records and lookup by e)323.2 601.2 R .259(xact match only)-.15
+F 5.258(.A)-.65 G(ppli-)-5.258 E .038(cations may iterate o)323.2 613.2
+R -.15(ve)-.15 G 2.538(ra).15 G .038(ll records stored in a table, b)
+-2.538 F(ut)-.2 E(the order in which the)323.2 625.2 Q 2.5(ya)-.15 G
+(re returned is unde\214ned.)-2.5 E F1 3(2.2. B+tr)323.2 655.2 R(ee)
+-.216 E F0(Berk)323.2 671.4 Q(ele)-.1 E 7.184(yD)-.15 G 7.184(Bi)-7.184
+G 4.683(ncludes a B+tree [Come79] access)-7.184 F 2.502(method. B+trees)
+323.2 683.4 R .002(store records of k)2.502 F -.15(ey)-.1 G(/v).15 E
+.003(alue pairs in leaf)-.25 F .52(pages, and pairs of \(k)323.2 695.4 R
+-.15(ey)-.1 G 3.02(,c)-.5 G .52(hild page address\) at internal)-3.02 F
+5.384(nodes. K)323.2 707.4 R -.15(ey)-.25 G 5.384(si).15 G 5.384(nt)
+-5.384 G 2.885(he tree are stored in sorted order)-5.384 F(,)-.4 E EP
+%%Page: 3 3
+%%BeginPageSetup
+BP
+%%EndPageSetup
+/F0 10/Times-Roman@0 SF .576
+(where the order is determined by the comparison func-)79.2 84 R .815
+(tion supplied when the database w)79.2 96 R .815(as created.)-.1 F -.15
+(Pa)5.815 G .815(ges at).15 F .389(the leaf le)79.2 108 R -.15(ve)-.25 G
+2.889(lo).15 G 2.889(ft)-2.889 G .389
+(he tree include pointers to their neigh-)-2.889 F 1.444
+(bors to simplify tra)79.2 120 R -.15(ve)-.2 G 3.944(rsal. B+trees).15 F
+1.445(support lookup by)3.944 F -.15(ex)79.2 132 S .068
+(act match \(equality\) or range \(greater than or equal to).15 F 2.891
+(ak)79.2 144 S -.15(ey)-2.991 G 2.891(\). Lik).15 F 2.891(eH)-.1 G .391
+(ash tables, B+trees support record inser)-2.891 F(-)-.2 E
+(tion, deletion, and iteration o)79.2 156 Q -.15(ve)-.15 G 2.5(ra).15 G
+(ll records in the tree.)-2.5 E .646
+(As records are inserted and pages in the B+tree \214ll up,)79.2 172.2 R
+(the)79.2 184.2 Q 2.722(ya)-.15 G .223(re split, with about half the k)
+-2.722 F -.15(ey)-.1 G 2.723(sg).15 G .223(oing into a ne)-2.723 F(w)
+-.25 E 1.603(peer page at the same le)79.2 196.2 R -.15(ve)-.25 G 4.103
+(li).15 G 4.103(nt)-4.103 G 1.603(he tree.)-4.103 F 1.603(Most B+tree)
+6.603 F .387(implementations lea)79.2 208.2 R .687 -.15(ve b)-.2 H .387
+(oth nodes half-full after a split.).15 F 2.763
+(This leads to poor performance in a common case,)79.2 220.2 R 1.522
+(where the caller inserts k)79.2 232.2 R -.15(ey)-.1 G 4.022(si).15 G
+4.022(no)-4.022 G(rder)-4.022 E 6.522(.T)-.55 G 4.023(oh)-7.322 G 1.523
+(andle this)-4.023 F 1.643(case, Berk)79.2 244.2 R(ele)-.1 E 4.143(yD)
+-.15 G 4.143(Bk)-4.143 G 1.642(eeps track of the insertion order)-4.243
+F(,)-.4 E 2.023(and splits pages une)79.2 256.2 R -.15(ve)-.25 G 2.024
+(nly to k).15 F 2.024(eep pages fuller)-.1 F 7.024(.T)-.55 G(his)-7.024
+E 2.3(reduces tree size, yielding better search performance)79.2 268.2 R
+(and smaller databases.)79.2 280.2 Q 3.177
+(On deletion, empty pages are coalesced by re)79.2 296.4 R -.15(ve)-.25
+G(rse).15 E 2.03(splits into single pages.)79.2 308.4 R 2.03
+(The access method does no)7.03 F .347
+(other page balancing on insertion or deletion.)79.2 320.4 R -2.15 -.25
+(Ke y)5.348 H 2.848(sa).25 G(re)-2.848 E 1.927(not mo)79.2 332.4 R -.15
+(ve)-.15 G 4.427(da).15 G 1.927(mong pages at e)-4.427 F -.15(ve)-.25 G
+1.926(ry update to k).15 F 1.926(eep the)-.1 F 2.206
+(tree well-balanced.)79.2 344.4 R 2.207(While this could impro)7.206 F
+2.507 -.15(ve s)-.15 H(earch).15 E 2.341
+(times in some cases, the additional code comple)79.2 356.4 R(xity)-.15
+E(leads to slo)79.2 368.4 Q(wer updates and is prone to deadlocks.)-.25
+E -.15(Fo)79.2 384.6 S 2.948(rs).15 G(implicity)-2.948 E 2.948(,B)-.65 G
+(erk)-2.948 E(ele)-.1 E 2.949(yD)-.15 G 2.949(BB)-2.949 G .449
+(+trees do no pre\214x com-)-2.949 F(pression of k)79.2 396.6 Q -.15(ey)
+-.1 G 2.5(sa).15 G 2.5(ti)-2.5 G(nternal or leaf nodes.)-2.5 E/F1 12
+/Times-Bold@0 SF 3(2.3. Recno)79.2 426.6 R F0(Berk)79.2 442.8 Q(ele)-.1
+E 2.736(yD)-.15 G 2.736(Bi)-2.736 G .236(ncludes a \214x)-2.736 F .236
+(ed- or v)-.15 F .235(ariable-length record)-.25 F 5.075
+(access method, called)79.2 454.8 R/F2 10/Times-Italic@0 SF(Recno)7.575
+E F0 10.075(.T)C 5.075(he Recno access)-10.075 F .896
+(method assigns logical record numbers to each record,)79.2 466.8 R .978
+(and can search for and update records by record num-)79.2 478.8 R(ber)
+79.2 490.8 Q 5.037(.R)-.55 G .037(ecno is able, for e)-5.037 F .037
+(xample, to load a te)-.15 F .036(xt \214le into a)-.15 F 1.514
+(database, treating each line as a record.)79.2 502.8 R 1.514
+(This permits)6.514 F -.1(fa)79.2 514.8 S 1.313
+(st searches by line number for applications lik).1 F 3.812(et)-.1 G
+-.15(ex)-3.812 G(t).15 E(editors [Ston82].)79.2 526.8 Q 2.59
+(Recno is actually b)79.2 543 R 2.59(uilt on top of the B+tree access)
+-.2 F 3.192(method and pro)79.2 555 R 3.191(vides a simple interf)-.15 F
+3.191(ace for storing)-.1 F 3.14(sequentially-ordered data v)79.2 567 R
+5.64(alues. The)-.25 F 3.14(Recno access)5.64 F 2.266
+(method generates k)79.2 579 R -.15(ey)-.1 G 4.766(si).15 G(nternally)
+-4.766 E 7.266(.T)-.65 G 2.266(he programmer')-7.266 F(s)-.55 E(vie)79.2
+591 Q 4.102(wo)-.25 G 4.102(ft)-4.102 G 1.602(he v)-4.102 F 1.602
+(alues is that the)-.25 F 4.102(ya)-.15 G 1.603(re numbered sequen-)
+-4.102 F .254(tially from one.)79.2 603 R(De)5.254 E -.15(ve)-.25 G .254
+(lopers can choose to ha).15 F .553 -.15(ve r)-.2 H(ecords).15 E 9
+(automatically renumbered when lo)79.2 615 R(wer)-.25 E(-numbered)-.2 E
+.041(records are added or deleted.)79.2 627 R .041(In this case, ne)
+5.041 F 2.541(wk)-.25 G -.15(ey)-2.641 G 2.541(sc).15 G(an)-2.541 E
+(be inserted between e)79.2 639 Q(xisting k)-.15 E -.15(ey)-.1 G(s.).15
+E F1 3(3. F)79.2 669 R(eatur)-.3 E(es)-.216 E F0 1.827
+(This section describes important features of Berk)79.2 685.2 R(ele)-.1
+E(y)-.15 E 3.456(DB. In)79.2 697.2 R .956(general, de)3.456 F -.15(ve)
+-.25 G .956(lopers can choose which features).15 F .488
+(are useful to them, and use only those that are required)79.2 709.2 R
+(by their application.)323.2 84 Q -.15(Fo)323.2 100.2 S 3.529(re).15 G
+1.029(xample, when an application opens a database, it)-3.679 F .101
+(can declare the de)323.2 112.2 R .101(gree of concurrenc)-.15 F 2.601
+(ya)-.15 G .102(nd reco)-2.601 F -.15(ve)-.15 G .102(ry that).15 F .049
+(it requires.)323.2 124.2 R .048
+(Simple stand-alone applications, and in par)5.049 F(-)-.2 E .491
+(ticular ports of applications that used)323.2 136.2 R/F3 10/Courier@0
+SF(dbm)2.991 E F0 .491(or one of its)2.991 F -.25(va)323.2 148.2 S 1.093
+(riants, generally do not require concurrent access or).25 F .975
+(crash reco)323.2 160.2 R -.15(ve)-.15 G(ry).15 E 5.975(.O)-.65 G .975
+(ther applications, such as enterprise-)-5.975 F 3.08
+(class database management systems that store sales)323.2 172.2 R 2.643
+(transactions or other critical data, need full transac-)323.2 184.2 R
+3.93(tional service.)323.2 196.2 R 3.93(Single-user operation is f)8.93
+F 3.93(aster than)-.1 F 1.175(multi-user operation, since no o)323.2
+208.2 R -.15(ve)-.15 G 1.176(rhead is incurred by).15 F 3.156
+(locking. Running)323.2 220.2 R .656(with the reco)3.156 F -.15(ve)-.15
+G .655(ry system disabled is).15 F -.1(fa)323.2 232.2 S 1.732
+(ster than running with it enabled, since log records).1 F 2.703
+(need not be written when changes are made to the)323.2 244.2 R
+(database.)323.2 256.2 Q .851
+(In addition, some core subsystems, including the lock-)323.2 272.4 R
+.345(ing system and the logging f)323.2 284.4 R(acility)-.1 E 2.844(,c)
+-.65 G .344(an be used outside)-2.844 F 1.772(the conte)323.2 296.4 R
+1.772(xt of the access methods as well.)-.15 F(Although)6.773 E(fe)323.2
+308.4 Q 4.284(wu)-.25 G 1.784(sers ha)-4.284 F 2.084 -.15(ve c)-.2 H
+1.784(hosen to do so, it is possible to use).15 F .939
+(only the lock manager in Berk)323.2 320.4 R(ele)-.1 E 3.439(yD)-.15 G
+3.439(Bt)-3.439 G 3.439(oc)-3.439 G .939(ontrol con-)-3.439 F(currenc)
+323.2 332.4 Q 4.743(yi)-.15 G 4.743(na)-4.743 G 4.743(na)-4.743 G 2.242
+(pplication, without using an)-4.743 F 4.742(yo)-.15 G 4.742(ft)-4.742 G
+(he)-4.742 E .158(standard database services.)323.2 344.4 R(Alternati)
+5.158 E -.15(ve)-.25 G(ly).15 E 2.658(,t)-.65 G .159(he caller can)
+-2.658 F(inte)323.2 356.4 Q .07
+(grate locking of non-database resources with Berk)-.15 F(e-)-.1 E(le)
+323.2 368.4 Q 5.201(yD)-.15 G(B')-5.201 E 5.201(st)-.55 G 2.702
+(ransactional tw)-5.201 F 2.702(o-phase locking system, to)-.1 F 2.892
+(impose transaction semantics on objects outside the)323.2 380.4 R
+(database.)323.2 392.4 Q F1 3(3.1. Pr)323.2 422.4 R
+(ogrammatic interfaces)-.216 E F0(Berk)323.2 438.6 Q(ele)-.1 E 4.008(yD)
+-.15 G 4.008(Bd)-4.008 G 1.509(e\214nes a simple API for database man-)
+-4.008 F 3.452(agement. The)323.2 450.6 R .952
+(package does not include industry-stan-)3.452 F 1.898
+(dard programmatic interf)323.2 462.6 R 1.898
+(aces such as Open Database)-.1 F(Connecti)323.2 474.6 Q .852
+(vity \(ODBC\), Object Linking and Embedding)-.25 F .817
+(for Databases \(OleDB\), or Structured Query Language)323.2 486.6 R
+4.027(\(SQL\). These)323.2 498.6 R(interf)4.027 E 1.527
+(aces, while useful, were designed)-.1 F 2.477
+(to promote interoperability of database systems, and)323.2 510.6 R
+(not simplicity or performance.)323.2 522.6 Q 3.192
+(In response to customer demand, Berk)323.2 538.8 R(ele)-.1 E 5.691(yD)
+-.15 G 5.691(B2)-5.691 G(.5)-5.691 E .538
+(introduced support for the XA standard [Open94].)323.2 550.8 R(XA)5.539
+E .52(permits Berk)323.2 562.8 R(ele)-.1 E 3.02(yD)-.15 G 3.02(Bt)-3.02
+G 3.02(op)-3.02 G .52(articipate in distrib)-3.02 F .52(uted trans-)-.2
+F 3.373(actions under a transaction processing monitor lik)323.2 574.8 R
+(e)-.1 E -.45(Tu)323.2 586.8 S -.15(xe).45 G 1.31(do from BEA Systems.)
+.15 F(Lik)6.31 E 3.81(eX)-.1 G 1.31(A, other standard)-3.81 F(interf)
+323.2 598.8 Q .99(aces can be b)-.1 F .99
+(uilt on top of the core system.)-.2 F(The)5.99 E .846
+(standards do not belong inside Berk)323.2 610.8 R(ele)-.1 E 3.346(yD)
+-.15 G .846(B, since not)-3.346 F(all applications need them.)323.2
+622.8 Q F1 3(3.2. W)323.2 652.8 R(orking with r)-.9 E(ecords)-.216 E F0
+3.134(Ad)323.2 669 S .634
+(atabase user may need to search for particular k)-3.134 F -.15(ey)-.1 G
+(s).15 E .908(in a database, or may simply w)323.2 681 R .908
+(ant to bro)-.1 F .907(wse a)-.25 F -.25(va)-.2 G(ilable).25 E 4.101
+(records. Berk)323.2 693 R(ele)-.1 E 4.101(yD)-.15 G 4.101(Bs)-4.101 G
+1.601(upports both k)-4.101 F -.15(ey)-.1 G 1.602(ed access, to).15 F
+.173(\214nd one or more records with a gi)323.2 705 R -.15(ve)-.25 G
+2.673(nk).15 G -.15(ey)-2.773 G 2.673(,o)-.5 G 2.673(rs)-2.673 G
+(equential)-2.673 E .53(access, to retrie)323.2 717 R .83 -.15(ve a)-.25
+H .53(ll the records in the database one at).15 F EP
+%%Page: 4 4
+%%BeginPageSetup
+BP
+%%EndPageSetup
+/F0 10/Times-Roman@0 SF 6.34(at)79.2 84 S 6.34(ime. The)-6.34 F 3.84
+(order of the records returned during)6.34 F .208
+(sequential scans depends on the access method.)79.2 96 R(B+tree)5.209 E
+1.495(and Recno databases return records in sort order)79.2 108 R 3.995
+(,a)-.4 G(nd)-3.995 E .023
+(Hash databases return them in apparently random order)79.2 120 R(.)-.55
+E(Similarly)79.2 136.2 Q 4.959(,B)-.65 G(erk)-4.959 E(ele)-.1 E 4.959
+(yD)-.15 G 4.958(Bd)-4.959 G 2.458(e\214nes simple interf)-4.958 F 2.458
+(aces for)-.1 F
+(inserting, updating, and deleting records in a database.)79.2 148.2 Q
+/F1 12/Times-Bold@0 SF 3(3.3. Long)79.2 178.2 R -.12(ke)3 G(ys and v).12
+E(alues)-.12 E F0(Berk)79.2 194.4 Q(ele)-.1 E 3.553(yD)-.15 G 3.553(Bm)
+-3.553 G 1.053(anages k)-3.553 F -.15(ey)-.1 G 3.553(sa).15 G 1.053
+(nd v)-3.553 F 1.053(alues as lar)-.25 F 1.054(ge as 2)-.18 F/F2 8
+/Times-Roman@0 SF(32)-5 I F0 3.192(bytes. Since)79.2 206.4 R .692
+(the time required to cop)3.192 F 3.192(yar)-.1 G .692(ecord is pro-)
+-3.192 F 1.895(portional to its size, Berk)79.2 218.4 R(ele)-.1 E 4.396
+(yD)-.15 G 4.396(Bi)-4.396 G 1.896(ncludes interf)-4.396 F(aces)-.1 E
+4.507(that operate on partial records.)79.2 230.4 R 4.507
+(If an application)9.507 F 1.273(requires only part of a lar)79.2 242.4
+R 1.274(ge record, it requests partial)-.18 F .026(record retrie)79.2
+254.4 R -.25(va)-.25 G .026(l, and recei).25 F -.15(ve)-.25 G 2.526(sj)
+.15 G .025(ust the bytes that it needs.)-2.526 F(The smaller cop)79.2
+266.4 Q 2.5(ys)-.1 G -2.25 -.2(av e)-2.5 H 2.5(sb).2 G
+(oth time and memory)-2.5 E(.)-.65 E(Berk)79.2 282.6 Q(ele)-.1 E 3.206
+(yD)-.15 G 3.206(Ba)-3.206 G(llo)-3.206 E .706
+(ws the programmer to de\214ne the data)-.25 F 2.72(types of k)79.2
+294.6 R -.15(ey)-.1 G 5.22(sa).15 G 2.72(nd v)-5.22 F 5.22(alues. De)
+-.25 F -.15(ve)-.25 G 2.72(lopers use an).15 F 5.22(yt)-.15 G(ype)-5.22
+E -.15(ex)79.2 306.6 S(pressible in the programming language.).15 E F1 3
+(3.4. Lar)79.2 336.6 R(ge databases)-.12 E F0 3.255(As)79.2 352.8 S .755
+(ingle database managed by Berk)-3.255 F(ele)-.1 E 3.256(yD)-.15 G 3.256
+(Bc)-3.256 G .756(an be up)-3.256 F 1.716(to 2)79.2 364.8 R F2(48)-5 I
+F0 1.716(bytes, or 256 petabytes, in size.)4.216 5 N(Berk)6.715 E(ele)
+-.1 E 4.215(yD)-.15 G(B)-4.215 E 2.144
+(uses the host \214lesystem as the backing store for the)79.2 376.8 R
+2.668(database, so lar)79.2 388.8 R 2.667
+(ge databases require big \214le support)-.18 F 3.113
+(from the operating system.)79.2 400.8 R(Sleep)8.113 E 3.114(ycat Softw)
+-.1 F 3.114(are has)-.1 F 5.712(customers using Berk)79.2 412.8 R(ele)
+-.1 E 8.212(yD)-.15 G 8.212(Bt)-8.212 G 8.211(om)-8.212 G 5.711
+(anage single)-8.211 F(databases in e)79.2 424.8 Q(xcess of 100 gig)-.15
+E(abytes.)-.05 E F1 3(3.5. Main)79.2 454.8 R(memory databases)3 E F0
+1.171(Applications that do not require persistent storage can)79.2 471 R
+.119(create databases that e)79.2 483 R .119(xist only in main memory)
+-.15 F 5.118(.T)-.65 G(hese)-5.118 E .542(databases bypass the o)79.2
+495 R -.15(ve)-.15 G .543(rhead imposed by the I/O sys-).15 F
+(tem altogether)79.2 507 Q(.)-.55 E 2.144
+(Some applications do need to use disk as a backing)79.2 523.2 R 2.248
+(store, b)79.2 535.2 R 2.249(ut run on machines with v)-.2 F 2.249
+(ery lar)-.15 F 2.249(ge memory)-.18 F(.)-.65 E(Berk)79.2 547.2 Q(ele)
+-.1 E 2.799(yD)-.15 G 2.799(Bi)-2.799 G 2.799(sa)-2.799 G .299
+(ble to manage v)-2.799 F .299(ery lar)-.15 F .299(ge shared mem-)-.18 F
+.128(ory re)79.2 559.2 R .129
+(gions for cached data pages, log records, and lock)-.15 F 3.938
+(management. F)79.2 571.2 R 1.437(or e)-.15 F 1.437
+(xample, the cache re)-.15 F 1.437(gion used for)-.15 F .033
+(data pages may be gig)79.2 583.2 R .034
+(abytes in size, reducing the lik)-.05 F(eli-)-.1 E .639(hood that an)
+79.2 595.2 R 3.139(yr)-.15 G .639
+(ead operation will need to visit the disk)-3.139 F 1.201
+(in the steady state.)79.2 607.2 R 1.201
+(The programmer declares the size)6.201 F(of the cache re)79.2 619.2 Q
+(gion at startup.)-.15 E(Finally)79.2 635.4 Q 7.048(,m)-.65 G(an)-7.048
+E 7.048(yo)-.15 G 4.548(perating systems pro)-7.048 F 4.548
+(vide memory-)-.15 F 2.532(mapped \214le services that are much f)79.2
+647.4 R 2.533(aster than their)-.1 F 2.602
+(general-purpose \214le system interf)79.2 659.4 R 5.102(aces. Berk)-.1
+F(ele)-.1 E 5.102(yD)-.15 G(B)-5.102 E 5.118
+(can memory-map its database \214les for read-only)79.2 671.4 R 3.917
+(database use.)79.2 683.4 R 3.917(The application operates on records)
+8.917 F 2.069(stored directly on the pages, with no cache manage-)79.2
+695.4 R 1.557(ment o)79.2 707.4 R -.15(ve)-.15 G 4.057(rhead. Because)
+.15 F 1.556(the application gets pointers)4.057 F 1.265
+(directly into the Berk)323.2 84 R(ele)-.1 E 3.765(yD)-.15 G 3.765(Bp)
+-3.765 G 1.265(ages, writes cannot be)-3.765 F 3.775
+(permitted. Otherwise,)323.2 96 R 1.275(changes could bypass the lock-)
+3.775 F .23(ing and logging systems, and softw)323.2 108 R .23
+(are errors could cor)-.1 F(-)-.2 E 4.007(rupt the database.)323.2 120 R
+4.006(Read-only applications can use)9.007 F(Berk)323.2 132 Q(ele)-.1 E
+2.893(yD)-.15 G(B')-2.893 E 2.893(sm)-.55 G .393
+(emory-mapped \214le service to impro)-2.893 F -.15(ve)-.15 G
+(performance on most architectures.)323.2 144 Q F1 3
+(3.6. Con\214gurable)323.2 174 R(page size)3 E F0 .111
+(Programmers declare the size of the pages used by their)323.2 190.2 R
+.403(access methods when the)323.2 202.2 R 2.903(yc)-.15 G .403
+(reate a database.)-2.903 F(Although)5.403 E(Berk)323.2 214.2 Q(ele)-.1
+E 4.046(yD)-.15 G 4.046(Bp)-4.046 G(ro)-4.046 E 1.546
+(vides reasonable def)-.15 F 1.546(aults, de)-.1 F -.15(ve)-.25 G
+(lopers).15 E 3.64(may o)323.2 226.2 R -.15(ve)-.15 G 3.64
+(rride them to control system performance.).15 F .793
+(Small pages reduce the number of records that \214t on a)323.2 238.2 R
+.353(single page.)323.2 250.2 R(Fe)5.353 E .353
+(wer records on a page means that fe)-.25 F(wer)-.25 E .724
+(records are lock)323.2 262.2 R .724(ed when the page is lock)-.1 F .723
+(ed, impro)-.1 F(ving)-.15 E(concurrenc)323.2 274.2 Q 5.262 -.65(y. T)
+-.15 H 1.462(he per).65 F 1.462(-page o)-.2 F -.15(ve)-.15 G 1.462
+(rhead is proportionally).15 F 2.29
+(higher with smaller pages, of course, b)323.2 286.2 R 2.29(ut de)-.2 F
+-.15(ve)-.25 G(lopers).15 E(can trade of)323.2 298.2 Q 2.5(fs)-.25 G
+(pace for time as an application requires.)-2.5 E F1 3(3.7. Small)323.2
+328.2 R -.3(fo)3 G(otprint).3 E F0(Berk)323.2 344.4 Q(ele)-.1 E 3.973
+(yD)-.15 G 3.973(Bi)-3.973 G 3.974(sac)-3.973 G 1.474(ompact system.)
+-3.974 F 1.474(The full package,)6.474 F .832
+(including all access methods, reco)323.2 356.4 R -.15(ve)-.15 G
+(rability).15 E 3.331(,a)-.65 G .831(nd trans-)-3.331 F 1.235
+(action support is roughly 175K of te)323.2 368.4 R 1.236
+(xt space on com-)-.15 F(mon architectures.)323.2 380.4 Q F1 3
+(3.8. Cursors)323.2 410.4 R F0 1.57(In database terminology)323.2 426.6
+R 4.07(,ac)-.65 G 1.57(ursor is a pointer into an)-4.07 F 1.806
+(access method that can be called iterati)323.2 438.6 R -.15(ve)-.25 G
+1.807(ly to return).15 F 3.68(records in sequence.)323.2 450.6 R(Berk)
+8.68 E(ele)-.1 E 6.18(yD)-.15 G 6.18(Bi)-6.18 G 3.68(ncludes cursor)
+-6.18 F(interf)323.2 462.6 Q 2.814(aces for all access methods.)-.1 F
+2.815(This permits, for)7.814 F -.15(ex)323.2 474.6 S .34
+(ample, users to tra).15 F -.15(ve)-.2 G .34(rse a B+tree and vie).15 F
+2.84(wr)-.25 G .34(ecords in)-2.84 F(order)323.2 486.6 Q 6.233(.P)-.55 G
+1.234(ointers to records in cursors are persistent, so)-6.233 F 1.779
+(that once fetched, a record may be updated in place.)323.2 498.6 R
+(Finally)323.2 510.6 Q 4.438(,c)-.65 G 1.939
+(ursors support access to chains of duplicate)-4.438 F
+(data items in the v)323.2 522.6 Q(arious access methods.)-.25 E F1 3
+(3.9. J)323.2 552.6 R(oins)-.18 E F0 2.703(In database terminology)323.2
+568.8 R 5.203(,aj)-.65 G 2.702(oin is an operation that)-5.203 F .616
+(spans multiple separate tables \(or in the case of Berk)323.2 580.8 R
+(e-)-.1 E(le)323.2 592.8 Q 4.518(yD)-.15 G 2.018
+(B, multiple separate DB \214les\).)-4.518 F -.15(Fo)7.017 G 4.517(re)
+.15 G 2.017(xample, a)-4.667 F(compan)323.2 604.8 Q 3.372(ym)-.15 G .873
+(ay store information about its customers in)-3.372 F 1.545
+(one table and information about sales in another)323.2 616.8 R 6.545
+(.A)-.55 G(n)-6.545 E 1.498(application will lik)323.2 628.8 R 1.499
+(ely w)-.1 F 1.499(ant to look up sales informa-)-.1 F .933
+(tion by customer name; this requires matching records)323.2 640.8 R
+2.28(in the tw)323.2 652.8 R 4.78(ot)-.1 G 2.28
+(ables that share a common customer ID)-4.78 F 2.515(\214eld. This)323.2
+664.8 R .015(combining of records from multiple tables is)2.515 F
+(called a join.)323.2 676.8 Q(Berk)323.2 693 Q(ele)-.1 E 5.561(yD)-.15 G
+5.561(Bi)-5.561 G 3.061(ncludes interf)-5.561 F 3.062
+(aces for joining tw)-.1 F 5.562(oo)-.1 G(r)-5.562 E(more tables.)323.2
+705 Q EP
+%%Page: 5 5
+%%BeginPageSetup
+BP
+%%EndPageSetup
+/F0 12/Times-Bold@0 SF 3(3.10. T)79.2 84 R(ransactions)-.888 E/F1 10
+/Times-Roman@0 SF -.35(Tr)79.2 100.2 S(ansactions ha).35 E .3 -.15(ve f)
+-.2 H(our properties [Gray93]:).15 E/F2 8/Times-Roman@0 SF<83>84.2 116.4
+Q F1(The)17.2 E 5.489(ya)-.15 G 2.989(re atomic.)-5.489 F 2.989
+(That is, all of the changes)7.989 F 1.475
+(made in a single transaction must be applied at)104.2 128.4 R 1.31
+(the same instant or not at all.)104.2 140.4 R 1.31(This permits, for)
+6.31 F -.15(ex)104.2 152.4 S 3.565(ample, the transfer of mone).15 F
+6.065(yb)-.15 G 3.565(etween tw)-6.065 F(o)-.1 E 3.68
+(accounts to be accomplished, by making the)104.2 164.4 R 1.27
+(reduction of the balance in one account and the)104.2 176.4 R
+(increase in the other into a single, atomic action.)104.2 188.4 Q F2
+<83>84.2 204.6 Q F1(The)17.2 E 3.125(ym)-.15 G .625(ust be consistent.)
+-3.125 F .625(That is, changes to the)5.625 F 3.628(database by an)104.2
+216.6 R 6.128(yt)-.15 G 3.628(ransaction cannot lea)-6.128 F 3.929 -.15
+(ve t)-.2 H(he).15 E(database in an ille)104.2 228.6 Q -.05(ga)-.15 G
+2.5(lo).05 G 2.5(rc)-2.5 G(orrupt state.)-2.5 E F2<83>84.2 244.8 Q F1
+(The)17.2 E 3.006(ym)-.15 G .506(ust be isolatable.)-3.006 F(Re)5.506 E
+-.05(ga)-.15 G .505(rdless of the num-).05 F .8(ber of users w)104.2
+256.8 R .8(orking in the database at the same)-.1 F 1.88(time, e)104.2
+268.8 R -.15(ve)-.25 G 1.88(ry user must ha).15 F 2.18 -.15(ve t)-.2 H
+1.88(he illusion that no).15 F(other acti)104.2 280.8 Q
+(vity is going on.)-.25 E F2<83>84.2 297 Q F1(The)17.2 E 5.54(ym)-.15 G
+3.04(ust be durable.)-5.54 F(Ev)8.04 E 3.04(en if the disk that)-.15 F
+.877(stores the database is lost, it must be possible to)104.2 309 R
+(reco)104.2 321 Q -.15(ve)-.15 G 2.668(rt).15 G .168
+(he database to its last transaction-consis-)-2.668 F(tent state.)104.2
+333 Q 2.49(This combination of properties \212 atomicity)79.2 349.2 R
+4.99(,c)-.65 G(onsis-)-4.99 E(tenc)79.2 361.2 Q 4.542 -.65(y, i)-.15 H
+3.243(solation, and durability \212 is referred to as).65 F -.4(AC)79.2
+373.2 S 3.459(IDity in the literature.).4 F(Berk)8.459 E(ele)-.1 E 5.958
+(yD)-.15 G 3.458(B, lik)-5.958 F 5.958(em)-.1 G(ost)-5.958 E .993
+(database systems, pro)79.2 385.2 R .993(vides A)-.15 F .994
+(CIDity using a collection)-.4 F(of core services.)79.2 397.2 Q .257
+(Programmers can choose to use Berk)79.2 413.4 R(ele)-.1 E 2.757(yD)-.15
+G(B')-2.757 E 2.757(st)-.55 G(ransac-)-2.757 E
+(tion services for applications that need them.)79.2 425.4 Q F0 3
+(3.10.1. Write-ahead)79.2 455.4 R(logging)3 E F1 .479
+(Programmers can enable the logging system when the)79.2 471.6 R(y)-.15
+E .918(start up Berk)79.2 483.6 R(ele)-.1 E 3.418(yD)-.15 G 3.418
+(B. During)-3.418 F 3.417(at)3.417 G .917(ransaction, the appli-)-3.417
+F .493(cation mak)79.2 495.6 R .493
+(es a series of changes to the database.)-.1 F(Each)5.494 E .552
+(change is captured in a log entry)79.2 507.6 R 3.052(,w)-.65 G .552
+(hich holds the state)-3.052 F .207
+(of the database record both before and after the change.)79.2 519.6 R
+2.208(The log record is guaranteed to be \215ushed to stable)79.2 531.6
+R .871(storage before an)79.2 543.6 R 3.371(yo)-.15 G 3.371(ft)-3.371 G
+.871(he changed data pages are writ-)-3.371 F 3.989(ten. This)79.2 555.6
+R(beha)3.989 E 1.489(vior \212 writing the log before the data)-.2 F
+(pages \212 is called)79.2 567.6 Q/F3 10/Times-Italic@0 SF
+(write-ahead lo)2.5 E -.1(gg)-.1 G(ing).1 E F1(.)A .835(At an)79.2 583.8
+R 3.335(yt)-.15 G .835(ime during the transaction, the application can)
+-3.335 F F3(commit)79.2 595.8 Q F1 4.202(,m)C 1.702
+(aking the changes permanent, or)-4.202 F F3 -.45(ro)4.201 G 1.701
+(ll bac).45 F(k)-.2 E F1(,)A .852
+(cancelling all changes and restoring the database to its)79.2 607.8 R
+1.57(pre-transaction state.)79.2 619.8 R 1.57
+(If the application rolls back the)6.57 F 1.003
+(transaction, then the log holds the state of all changed)79.2 631.8 R
+.5(pages prior to the transaction, and Berk)79.2 643.8 R(ele)-.1 E 3(yD)
+-.15 G 3(Bs)-3 G(imply)-3 E .226(restores that state.)79.2 655.8 R .226
+(If the application commits the trans-)5.226 F .538(action, Berk)79.2
+667.8 R(ele)-.1 E 3.038(yD)-.15 G 3.038(Bw)-3.038 G .538
+(rites the log records to disk.)-3.038 F(In-)5.537 E 2.312
+(memory copies of the data pages already re\215ect the)79.2 679.8 R
+1.399(changes, and will be \215ushed as necessary during nor)79.2 691.8
+R(-)-.2 E 2.35(mal processing.)79.2 703.8 R 2.35
+(Since log writes are sequential, b)7.35 F(ut)-.2 E 8.732
+(data page writes are random, this impro)79.2 715.8 R -.15(ve)-.15 G(s)
+.15 E(performance.)323.2 84 Q F0 3(3.10.2. Crashes)323.2 114 R(and r)3 E
+(eco)-.216 E -.12(ve)-.12 G(ry).12 E F1(Berk)323.2 130.2 Q(ele)-.1 E
+3.592(yD)-.15 G(B')-3.592 E 3.592(sw)-.55 G 1.093
+(rite-ahead log is used by the transac-)-3.592 F .415
+(tion system to commit or roll back transactions.)323.2 142.2 R .414
+(It also)5.414 F(gi)323.2 154.2 Q -.15(ve)-.25 G 3.23(st).15 G .73
+(he reco)-3.23 F -.15(ve)-.15 G .73
+(ry system the information that it needs).15 F .824(to protect ag)323.2
+166.2 R .824(ainst data loss or corruption from crashes.)-.05 F(Berk)
+323.2 178.2 Q(ele)-.1 E 2.703(yD)-.15 G 2.703(Bi)-2.703 G 2.704(sa)
+-2.703 G .204(ble to survi)-2.704 F .504 -.15(ve a)-.25 H .204
+(pplication crashes, sys-).15 F .408(tem crashes, and e)323.2 190.2 R
+-.15(ve)-.25 G 2.908(nc).15 G .407(atastrophic f)-2.908 F .407
+(ailures lik)-.1 F 2.907(et)-.1 G .407(he loss)-2.907 F
+(of a hard disk, without losing an)323.2 202.2 Q 2.5(yd)-.15 G(ata.)-2.5
+E(Survi)323.2 218.4 Q .538(ving crashes requires data stored in se)-.25
+F -.15(ve)-.25 G .539(ral dif).15 F(fer)-.25 E(-)-.2 E 2.52(ent places.)
+323.2 230.4 R 2.52(During normal processing, Berk)7.52 F(ele)-.1 E 5.02
+(yD)-.15 G(B)-5.02 E .766(has copies of acti)323.2 242.4 R 1.066 -.15
+(ve l)-.25 H .766(og records and recently-used data).15 F 1.539
+(pages in memory)323.2 254.4 R 6.539(.L)-.65 G 1.539
+(og records are \215ushed to the log)-6.539 F .694
+(disk when transactions commit.)323.2 266.4 R .695
+(Data pages trickle out)5.694 F .008(to the data disk as pages mo)323.2
+278.4 R .308 -.15(ve t)-.15 H .008(hrough the b).15 F(uf)-.2 E .008
+(fer cache.)-.25 F(Periodically)323.2 290.4 Q 2.691(,t)-.65 G .191
+(he system administrator backs up the data)-2.691 F .278
+(disk, creating a safe cop)323.2 302.4 R 2.778(yo)-.1 G 2.778(ft)-2.778
+G .278(he database at a particular)-2.778 F 2.609(instant. When)323.2
+314.4 R .109(the database is back)2.609 F .109(ed up, the log can be)-.1
+F 3.838(truncated. F)323.2 326.4 R 1.337(or maximum rob)-.15 F 1.337
+(ustness, the log disk and)-.2 F(data disk should be separate de)323.2
+338.4 Q(vices.)-.25 E(Dif)323.2 354.6 Q 1.29(ferent system f)-.25 F 1.29
+(ailures can destro)-.1 F 3.79(ym)-.1 G(emory)-3.79 E 3.79(,t)-.65 G
+1.29(he log)-3.79 F 1.106(disk, or the data disk.)323.2 366.6 R(Berk)
+6.106 E(ele)-.1 E 3.606(yD)-.15 G 3.606(Bi)-3.606 G 3.606(sa)-3.606 G
+1.106(ble to survi)-3.606 F -.15(ve)-.25 G .679(the loss of an)323.2
+378.6 R 3.179(yo)-.15 G .679(ne of these repositories without losing)
+-3.179 F(an)323.2 390.6 Q 2.5(yc)-.15 G(ommitted transactions.)-2.5 E
+1.372(If the computer')323.2 406.8 R 3.871(sm)-.55 G 1.371
+(emory is lost, through an applica-)-3.871 F 1.619
+(tion or operating system crash, then the log holds all)323.2 418.8 R
+1.789(committed transactions.)323.2 430.8 R 1.788(On restart, the reco)
+6.789 F -.15(ve)-.15 G 1.788(ry sys-).15 F .49(tem rolls the log forw)
+323.2 442.8 R .49(ard ag)-.1 F .49(ainst the database, reapply-)-.05 F
+.682(ing an)323.2 454.8 R 3.181(yc)-.15 G .681
+(hanges to on-disk pages that were in memory)-3.181 F .14
+(at the time of the crash.)323.2 466.8 R .14
+(Since the log contains pre- and)5.14 F .957
+(post-change state for transactions, the reco)323.2 478.8 R -.15(ve)-.15
+G .956(ry system).15 F 1.14(also uses the log to restore an)323.2 490.8
+R 3.64(yp)-.15 G 1.14(ages to their original)-3.64 F 1.615(state if the)
+323.2 502.8 R 4.115(yw)-.15 G 1.615
+(ere modi\214ed by transactions that ne)-4.115 F -.15(ve)-.25 G(r).15 E
+(committed.)323.2 514.8 Q 2.051
+(If the data disk is lost, the system administrator can)323.2 531 R .887
+(restore the most recent cop)323.2 543 R 3.386(yf)-.1 G .886
+(rom backup.)-3.386 F .886(The reco)5.886 F(v-)-.15 E 1.298
+(ery system will roll the entire log forw)323.2 555 R 1.298(ard ag)-.1 F
+1.298(ainst the)-.05 F 2.64
+(original database, reapplying all committed changes.)323.2 567 R 4.363
+(When it \214nishes, the database will contain e)323.2 579 R -.15(ve)
+-.25 G(ry).15 E .535(change made by e)323.2 591 R -.15(ve)-.25 G .534
+(ry transaction that e).15 F -.15(ve)-.25 G 3.034(rc).15 G(ommitted.)
+-3.034 E .494(If the log disk is lost, then the reco)323.2 607.2 R -.15
+(ve)-.15 G .495(ry system can use).15 F 1.853
+(the in-memory copies of log entries to roll back an)323.2 619.2 R(y)
+-.15 E .026(uncommitted transactions, \215ush all in-memory database)
+323.2 631.2 R 1.659(pages to the data disk, and shut do)323.2 643.2 R
+1.659(wn gracefully)-.25 F 6.658(.A)-.65 G(t)-6.658 E 2.204
+(that point, the system administrator can back up the)323.2 655.2 R .039
+(database disk, install a ne)323.2 667.2 R 2.539(wl)-.25 G .039
+(og disk, and restart the sys-)-2.539 F(tem.)323.2 679.2 Q EP
+%%Page: 6 6
+%%BeginPageSetup
+BP
+%%EndPageSetup
+/F0 12/Times-Bold@0 SF 3(3.10.3. Checkpoints)79.2 84 R/F1 10
+/Times-Roman@0 SF(Berk)79.2 100.2 Q(ele)-.1 E 6.085(yD)-.15 G 6.085(Bi)
+-6.085 G 3.585(ncludes a checkpointing service that)-6.085 F .263
+(interacts with the reco)79.2 112.2 R -.15(ve)-.15 G .263(ry system.).15
+F .263(During normal pro-)5.263 F 2.415
+(cessing, both the log and the database are changing)79.2 124.2 R
+(continually)79.2 136.2 Q 5.925(.A)-.65 G 3.425(ta)-5.925 G 1.224 -.15
+(ny g)-3.425 H -2.15 -.25(iv e).15 H 3.424(ni).25 G .924
+(nstant, the on-disk v)-3.424 F(ersions)-.15 E .414(of the tw)79.2 148.2
+R 2.914(oa)-.1 G .414(re not guaranteed to be consistent.)-2.914 F .414
+(The log)5.414 F 3.838
+(probably contains changes that are not yet in the)79.2 160.2 R
+(database.)79.2 172.2 Q .085(When an application mak)79.2 188.4 R .086
+(es a)-.1 F/F2 10/Times-Italic@0 SF -.15(ch)2.586 G(ec).15 E(kpoint)-.2
+E F1 2.586(,a)C .086(ll committed)-2.586 F .443
+(changes in the log up to that point are guaranteed to be)79.2 200.4 R
+.631(present on the data disk, too.)79.2 212.4 R .632
+(Checkpointing is moder)5.631 F(-)-.2 E .046(ately e)79.2 224.4 R
+(xpensi)-.15 E .346 -.15(ve d)-.25 H .046(uring normal processing, b).15
+F .045(ut limits the)-.2 F(time spent reco)79.2 236.4 Q -.15(ve)-.15 G
+(ring from crashes.).15 E 3.117
+(After an application or operating system crash, the)79.2 252.6 R(reco)
+79.2 264.6 Q -.15(ve)-.15 G 7.419(ry system only needs to go back tw).15
+F(o)-.1 E(checkpoints)79.2 278.6 Q/F3 7/Times-Roman@0 SF(1)-4 I F1 1.376
+(to start rolling the log forw)3.876 4 N 3.875(ard. W)-.1 F(ithout)-.4 E
+3.264(checkpoints, there is no w)79.2 290.6 R 3.265(ay to be sure ho)-.1
+F 5.765(wl)-.25 G(ong)-5.765 E .395(restarting after a crash will tak)
+79.2 302.6 R 2.895(e. W)-.1 F .395(ith checkpoints, the)-.4 F .088
+(restart interv)79.2 314.6 R .089(al can be \214x)-.25 F .089
+(ed by the programmer)-.15 F 5.089(.R)-.55 G(eco)-5.089 E(v-)-.15 E .668
+(ery processing can be guaranteed to complete in a sec-)79.2 326.6 R
+(ond or tw)79.2 338.6 Q(o.)-.1 E(Softw)79.2 354.8 Q 2.457
+(are crashes are much more common than disk)-.1 F -.1(fa)79.2 366.8 S
+3.385(ilures. Man).1 F 3.385(yd)-.15 G -2.15 -.25(ev e)-3.385 H .884
+(lopers w).25 F .884(ant to guarantee that soft-)-.1 F -.1(wa)79.2 378.8
+S .158(re b).1 F .158(ugs do not destro)-.2 F 2.658(yd)-.1 G .158
+(ata, b)-2.658 F .158(ut are willing to restore)-.2 F .631
+(from tape, and to tolerate a day or tw)79.2 390.8 R 3.131(oo)-.1 G
+3.131(fl)-3.131 G .63(ost w)-3.131 F .63(ork, in)-.1 F .89(the unlikle)
+79.2 402.8 R 3.39(ye)-.15 G -.15(ve)-3.64 G .89(nt of a disk crash.).15
+F -.4(Wi)5.89 G .89(th Berk).4 F(ele)-.1 E 3.39(yD)-.15 G(B,)-3.39 E
+1.093(programmers may truncate the log at checkpoints.)79.2 414.8 R(As)
+6.092 E .09(long as the tw)79.2 426.8 R 2.59(om)-.1 G .09
+(ost recent checkpoints are present, the)-2.59 F(reco)79.2 438.8 Q -.15
+(ve)-.15 G .106(ry system can guarantee that no committed trans-).15 F
+.611(actions are lost after a softw)79.2 450.8 R .611(are crash.)-.1 F
+.611(In this case, the)5.611 F(reco)79.2 462.8 Q -.15(ve)-.15 G 1.439
+(ry system does not require that the log and the).15 F 1.328
+(data be on separate de)79.2 474.8 R 1.329
+(vices, although separating them)-.25 F(can still impro)79.2 486.8 Q .3
+-.15(ve p)-.15 H(erformance by spreading out writes.).15 E F0 3
+(3.10.4. T)79.2 516.8 R -.12(wo)-.888 G(-phase locking).12 E F1(Berk)
+79.2 533 Q(ele)-.1 E 4.416(yD)-.15 G 4.416(Bp)-4.416 G(ro)-4.416 E 1.916
+(vides a service kno)-.15 F 1.915(wn as tw)-.25 F(o-phase)-.1 E 3.017
+(locking. In)79.2 545 R .517(order to reduce the lik)3.017 F .518
+(elihood of deadlocks)-.1 F 2.547(and to guarantee A)79.2 557 R 2.546
+(CID properties, database systems)-.4 F .063(manage locks in tw)79.2 569
+R 2.564(op)-.1 G 2.564(hases. First,)-2.564 F .064(during the operation)
+2.564 F 1.574(of a transaction, the)79.2 581 R 4.074(ya)-.15 G 1.574
+(cquire locks, b)-4.074 F 1.573(ut ne)-.2 F -.15(ve)-.25 G 4.073(rr).15
+G(elease)-4.073 E 6.147(them. Second,)79.2 593 R 3.648
+(at the end of the transaction, the)6.147 F(y)-.15 E .235
+(release locks, b)79.2 605 R .235(ut ne)-.2 F -.15(ve)-.25 G 2.735(ra)
+.15 G .235(cquire them.)-2.735 F .235(In practice, most)5.235 F 4.69
+(database systems, including Berk)79.2 617 R(ele)-.1 E 7.19(yD)-.15 G
+4.69(B, acquire)-7.19 F 2.314(locks on demand o)79.2 629 R -.15(ve)-.15
+G 4.814(rt).15 G 2.314(he course of the transaction,)-4.814 F
+(then \215ush the log, then release all locks.)79.2 641 Q .32 LW 83.2
+650.6 79.2 650.6 DL 87.2 650.6 83.2 650.6 DL 91.2 650.6 87.2 650.6 DL
+95.2 650.6 91.2 650.6 DL 99.2 650.6 95.2 650.6 DL 103.2 650.6 99.2 650.6
+DL 107.2 650.6 103.2 650.6 DL 111.2 650.6 107.2 650.6 DL 115.2 650.6
+111.2 650.6 DL 119.2 650.6 115.2 650.6 DL 123.2 650.6 119.2 650.6 DL
+127.2 650.6 123.2 650.6 DL 131.2 650.6 127.2 650.6 DL 135.2 650.6 131.2
+650.6 DL 139.2 650.6 135.2 650.6 DL 143.2 650.6 139.2 650.6 DL 147.2
+650.6 143.2 650.6 DL 151.2 650.6 147.2 650.6 DL 155.2 650.6 151.2 650.6
+DL 159.2 650.6 155.2 650.6 DL 163.2 650.6 159.2 650.6 DL 167.2 650.6
+163.2 650.6 DL 171.2 650.6 167.2 650.6 DL 175.2 650.6 171.2 650.6 DL
+179.2 650.6 175.2 650.6 DL 183.2 650.6 179.2 650.6 DL 187.2 650.6 183.2
+650.6 DL 191.2 650.6 187.2 650.6 DL 195.2 650.6 191.2 650.6 DL 199.2
+650.6 195.2 650.6 DL 203.2 650.6 199.2 650.6 DL 207.2 650.6 203.2 650.6
+DL 211.2 650.6 207.2 650.6 DL 215.2 650.6 211.2 650.6 DL 219.2 650.6
+215.2 650.6 DL 223.2 650.6 219.2 650.6 DL/F4 5/Times-Roman@0 SF(1)100.8
+661 Q/F5 8/Times-Roman@0 SF .338(One checkpoint is not f)2.338 3.2 N
+.338(ar enough.)-.08 F .338(The reco)4.338 F -.12(ve)-.12 G .338
+(ry system can-).12 F .211
+(not be sure that the most recent checkpoint completed \212 it may ha)
+79.2 673.8 R -.12(ve)-.16 G .734
+(been interrupted by the crash that forced the reco)79.2 683.4 R -.12
+(ve)-.12 G .734(ry system to run).12 F(in the \214rst place.)79.2 693 Q
+F1(Berk)323.2 84 Q(ele)-.1 E 3.306(yD)-.15 G 3.306(Bc)-3.306 G .806
+(an lock entire database \214les, which cor)-3.306 F(-)-.2 E .845
+(respond to tables, or indi)323.2 96 R .844(vidual pages in them.)-.25 F
+.844(It does)5.844 F 2.141(no record-le)323.2 108 R -.15(ve)-.25 G 4.641
+(ll).15 G 4.641(ocking. By)-4.641 F 2.142(shrinking the page size,)4.641
+F(ho)323.2 120 Q(we)-.25 E -.15(ve)-.25 G 4.427 -.4(r, d).15 H -2.15
+-.25(ev e).4 H 3.627(lopers can guarantee that e).25 F -.15(ve)-.25 G
+3.626(ry page).15 F 2.101(holds only a small number of records.)323.2
+132 R 2.102(This reduces)7.102 F(contention.)323.2 144 Q .388
+(If locking is enabled, then read and write operations on)323.2 160.2 R
+5.317(ad)323.2 172.2 S 2.817(atabase acquire tw)-5.317 F 2.817
+(o-phase locks, which are held)-.1 F 3.635
+(until the transaction completes.)323.2 184.2 R 3.635(Which objects are)
+8.635 F(lock)323.2 196.2 Q .738
+(ed and the order of lock acquisition depend on the)-.1 F -.1(wo)323.2
+208.2 S .503(rkload for each transaction.).1 F .502
+(It is possible for tw)5.502 F 3.002(oo)-.1 G(r)-3.002 E 1.315
+(more transactions to deadlock, so that each is w)323.2 220.2 R(aiting)
+-.1 E(for a lock that is held by another)323.2 232.2 Q(.)-.55 E(Berk)
+323.2 248.4 Q(ele)-.1 E 3.307(yD)-.15 G 3.307(Bd)-3.307 G .807
+(etects deadlocks and automatically rolls)-3.307 F 1.825
+(back one of the transactions.)323.2 260.4 R 1.825
+(This releases the locks)6.825 F 1.926(that it held and allo)323.2 272.4
+R 1.925(ws the other transactions to con-)-.25 F 3.346(tinue. The)323.2
+284.4 R .847(caller is noti\214ed that its transaction did not)3.346 F
+1.747(complete, and may restart it.)323.2 296.4 R(De)6.747 E -.15(ve)
+-.25 G 1.747(lopers can specify).15 F .646
+(the deadlock detection interv)323.2 308.4 R .647(al and the polic)-.25
+F 3.147(yt)-.15 G 3.147(ou)-3.147 G .647(se in)-3.147 F
+(choosing a transaction to roll back.)323.2 320.4 Q 6.686(The tw)323.2
+336.6 R 6.686(o-phase locking interf)-.1 F 6.686(aces are separately)-.1
+F .927(callable by applications that link Berk)323.2 348.6 R(ele)-.1 E
+3.427(yD)-.15 G .928(B, though)-3.427 F(fe)323.2 360.6 Q 5.64(wu)-.25 G
+3.14(sers ha)-5.64 F 3.44 -.15(ve n)-.2 H 3.14(eeded to use that f).15 F
+3.14(acility directly)-.1 F(.)-.65 E 2.211(Using these interf)323.2
+372.6 R 2.211(aces, Berk)-.1 F(ele)-.1 E 4.711(yD)-.15 G 4.712(Bp)-4.711
+G(ro)-4.712 E 2.212(vides a f)-.15 F(ast,)-.1 E 2.4
+(platform-portable locking system for general-purpose)323.2 384.6 R
+2.917(use. It)323.2 396.6 R .418
+(also lets users include non-database objects in a)2.917 F 3.497
+(database transaction, by controlling access to them)323.2 408.6 R -.15
+(ex)323.2 420.6 S(actly as if the).15 E 2.5(yw)-.15 G
+(ere inside the database.)-2.5 E .583(The Berk)323.2 436.8 R(ele)-.1 E
+3.083(yD)-.15 G 3.084(Bt)-3.083 G -.1(wo)-3.084 G .584(-phase locking f)
+.1 F .584(acility is b)-.1 F .584(uilt on)-.2 F .609(the f)323.2 448.8 R
+.609(astest correct locking primiti)-.1 F -.15(ve)-.25 G 3.108(st).15 G
+.608(hat are supported)-3.108 F 1.967(by the underlying architecture.)
+323.2 460.8 R 1.967(In the current imple-)6.967 F .593
+(mentation, this means that the locking system is dif)323.2 472.8 R(fer)
+-.25 E(-)-.2 E 1.709(ent on the v)323.2 484.8 R 1.709
+(arious UNIX platforms, and is still more)-.25 F(dif)323.2 496.8 Q .695
+(ferent on W)-.25 F(indo)-.4 E .695(ws NT)-.25 F 5.695(.I)-.74 G 3.195
+(no)-5.695 G .695(ur e)-3.195 F .695(xperience, the most)-.15 F(dif)
+323.2 508.8 Q 2.634
+(\214cult aspect of performance tuning is \214nding the)-.25 F -.1(fa)
+323.2 520.8 S .883(stest locking primiti).1 F -.15(ve)-.25 G 3.383(st)
+.15 G .883(hat w)-3.383 F .882(ork correctly on a par)-.1 F(-)-.2 E 1.26
+(ticular architecture and then inte)323.2 532.8 R 1.26(grating the ne)
+-.15 F 3.76(wi)-.25 G(nter)-3.76 E(-)-.2 E -.1(fa)323.2 544.8 S
+(ce with the se).1 E -.15(ve)-.25 G(ral that we already support.).15 E
+.536(The w)323.2 561 R .536(orld w)-.1 F .536
+(ould be a better place if the operating sys-)-.1 F 2.096
+(tems community w)323.2 573 R 2.096(ould uniformly implement POSIX)-.1 F
+1.31(locking primiti)323.2 585 R -.15(ve)-.25 G 3.81(sa).15 G 1.31(nd w)
+-3.81 F 1.31(ould guarantee that acquiring)-.1 F 1.085
+(an uncontested lock w)323.2 597 R 1.085(as a f)-.1 F 1.085
+(ast operation.)-.1 F 1.085(Locks must)6.085 F -.1(wo)323.2 609 S 3.641
+(rk both among threads in a single process and).1 F(among processes.)
+323.2 621 Q F0 3(3.11. Concurr)323.2 651 R(ency)-.216 E F1 .383
+(Good performance under concurrent operation is a crit-)323.2 667.2 R
+.766(ical design point for Berk)323.2 679.2 R(ele)-.1 E 3.266(yD)-.15 G
+3.265(B. Although)-3.266 F(Berk)3.265 E(ele)-.1 E(y)-.15 E 1.961
+(DB is itself not multi-threaded, it is thread-safe, and)323.2 691.2 R
+.547(runs well in threaded applications.)323.2 703.2 R(Philosophically)
+5.546 E 3.046(,w)-.65 G(e)-3.046 E(vie)323.2 715.2 Q 4.764(wt)-.25 G
+2.264(he use of threads and the choice of a threads)-4.764 F EP
+%%Page: 7 7
+%%BeginPageSetup
+BP
+%%EndPageSetup
+/F0 10/Times-Roman@0 SF .066(package as a polic)79.2 84 R 2.566(yd)-.15
+G .065(ecision, and prefer to of)-2.566 F .065(fer mecha-)-.25 F .042
+(nism \(the ability to run threaded or not\), allo)79.2 96 R .043
+(wing appli-)-.25 F(cations to choose their o)79.2 108 Q(wn policies.)
+-.25 E 1.947(The locking, logging, and b)79.2 124.2 R(uf)-.2 E 1.947
+(fer pool subsystems all)-.25 F .711
+(use shared memory or other OS-speci\214c sharing f)79.2 136.2 R(acili-)
+-.1 E 1.713(ties to communicate.)79.2 148.2 R 1.713(Locks, b)6.713 F(uf)
+-.2 E 1.713(fer pool fetches, and)-.25 F 1.061(log writes beha)79.2
+160.2 R 1.361 -.15(ve i)-.2 H 3.561(nt).15 G 1.061(he same w)-3.561 F
+1.061(ay across threads in a)-.1 F .033(single process as the)79.2 172.2
+R 2.532(yd)-.15 G 2.532(oa)-2.532 G .032(cross dif)-2.532 F .032
+(ferent processes on a)-.25 F(single machine.)79.2 184.2 Q .896
+(As a result, concurrent database applications may start)79.2 200.4 R
+1.651(up a ne)79.2 212.4 R 4.151(wp)-.25 G 1.651(rocess for e)-4.151 F
+-.15(ve)-.25 G 1.651(ry single user).15 F 4.151(,m)-.4 G 1.651
+(ay create a)-4.151 F 2.848(single serv)79.2 224.4 R 2.848(er which spa)
+-.15 F 2.849(wns a ne)-.15 F 5.349(wt)-.25 G 2.849(hread for e)-5.349 F
+-.15(ve)-.25 G(ry).15 E(client request, or may choose an)79.2 236.4 Q
+2.5(yp)-.15 G(olic)-2.5 E 2.5(yi)-.15 G 2.5(nb)-2.5 G(etween.)-2.5 E
+(Berk)79.2 252.6 Q(ele)-.1 E 3.629(yD)-.15 G 3.629(Bh)-3.629 G 1.128
+(as been carefully designed to minimize)-3.629 F .07
+(contention and maximize concurrenc)79.2 264.6 R 3.87 -.65(y. T)-.15 H
+.07(he cache man-).65 F .57(ager allo)79.2 276.6 R .57
+(ws all threads or processes to bene\214t from I/O)-.25 F 2.917
+(done by one.)79.2 288.6 R 2.917(Shared resources must sometimes be)
+7.917 F(lock)79.2 300.6 Q 1.804(ed for e)-.1 F(xclusi)-.15 E 2.104 -.15
+(ve a)-.25 H 1.804(ccess by one thread of control.).15 F 1.757 -.8(We h)
+79.2 312.6 T -2.25 -.2(av e).8 H -.1(ke)2.857 G .158
+(pt critical sections small, and are careful not).1 F 1.199
+(to hold critical resource locks across system calls that)79.2 324.6 R
+.538(could deschedule the locking thread or process.)79.2 336.6 R
+(Sleep-)5.539 E .979(ycat Softw)79.2 348.6 R .979
+(are has customers with hundreds of concur)-.1 F(-)-.2 E(rent users w)
+79.2 360.6 Q(orking on a single database in production.)-.1 E/F1 12
+/Times-Bold@0 SF 3(4. Engineering)79.2 390.6 R(Philosoph)3 E(y)-.18 E F0
+(Fundamentally)79.2 406.8 Q 3.998(,B)-.65 G(erk)-3.998 E(ele)-.1 E 3.998
+(yD)-.15 G 3.998(Bi)-3.998 G 3.999(sac)-3.998 G 1.499
+(ollection of access)-3.999 F .19(methods with important f)79.2 418.8 R
+.19(acilities, lik)-.1 F 2.69(el)-.1 G .19(ogging, locking,)-2.69 F
+1.251(and transactional access underlying them.)79.2 430.8 R 1.252
+(In both the)6.252 F .992(research and the commercial w)79.2 442.8 R
+.991(orld, the techniques for)-.1 F -.2(bu)79.2 454.8 S 2.727
+(ilding systems lik).2 F 5.227(eB)-.1 G(erk)-5.227 E(ele)-.1 E 5.227(yD)
+-.15 G 5.227(Bh)-5.227 G -2.25 -.2(av e)-5.227 H 2.728(been well-)5.427
+F(kno)79.2 466.8 Q(wn for a long time.)-.25 E .443(The k)79.2 483 R .743
+-.15(ey a)-.1 H(dv).15 E .442(antage of Berk)-.25 F(ele)-.1 E 2.942(yD)
+-.15 G 2.942(Bi)-2.942 G 2.942(st)-2.942 G .442(he careful atten-)-2.942
+F 1.059(tion that has been paid to engineering details through-)79.2 495
+R 1.039(out its life.)79.2 507 R 2.639 -.8(We h)6.039 H -2.25 -.2(av e)
+.8 H 1.039(carefully designed the system so)3.739 F .452
+(that the core f)79.2 519 R .452(acilities, lik)-.1 F 2.952(el)-.1 G
+.452(ocking and I/O, surf)-2.952 F .453(ace the)-.1 F .972(right interf)
+79.2 531 R .971(aces and are otherwise opaque to the caller)-.1 F(.)-.55
+E .294(As programmers, we understand the v)79.2 543 R .295
+(alue of simplicity)-.25 F .206(and ha)79.2 555 R .506 -.15(ve w)-.2 H
+(ork).05 E .206(ed hard to simplify the interf)-.1 F .205(aces we sur)
+-.1 F(-)-.2 E -.1(fa)79.2 567 S(ce to users of the database system.).1 E
+(Berk)79.2 583.2 Q(ele)-.1 E 4.531(yD)-.15 G 4.531(Ba)-4.531 G -.2(vo)
+-4.731 G 2.031(ids limits in the code.).2 F 2.031(It places no)7.031 F
+.474(practical limit on the size of k)79.2 595.2 R -.15(ey)-.1 G .473
+(s, v).15 F .473(alues, or databases;)-.25 F(the)79.2 607.2 Q 2.5(ym)
+-.15 G(ay gro)-2.5 E 2.5(wt)-.25 G 2.5(oo)-2.5 G(ccup)-2.5 E 2.5(yt)-.1
+G(he a)-2.5 E -.25(va)-.2 G(ilable storage space.).25 E 1.857
+(The locking and logging subsystems ha)79.2 623.4 R 2.157 -.15(ve b)-.2
+H 1.858(een care-).15 F .184
+(fully crafted to reduce contention and impro)79.2 635.4 R .484 -.15
+(ve t)-.15 H(hrough-).15 E 2.16
+(put by shrinking or eliminating critical sections, and)79.2 647.4 R
+(reducing the sizes of lock)79.2 659.4 Q(ed re)-.1 E
+(gions and log entries.)-.15 E 2.238
+(There is nothing in the design or implementation of)79.2 675.6 R(Berk)
+79.2 687.6 Q(ele)-.1 E 2.818(yD)-.15 G 2.818(Bt)-2.818 G .318
+(hat pushes the state of the art in database)-2.818 F 3.545
+(systems. Rather)79.2 699.6 R 3.545(,w)-.4 G 3.545(eh)-3.545 G -2.25 -.2
+(av e)-3.545 H 1.044(been v)3.745 F 1.044(ery careful to get the)-.15 F
+4.321(engineering right.)79.2 711.6 R 4.321
+(The result is a system that is)9.321 F(superior)323.2 84 Q 2.867(,a)-.4
+G 2.867(sa)-2.867 G 2.866(ne)-2.867 G .366
+(mbedded database system, to an)-2.866 F 2.866(yo)-.15 G(ther)-2.866 E
+(solution a)323.2 96 Q -.25(va)-.2 G(ilable.).25 E .811
+(Most database systems trade of)323.2 112.2 R 3.312(fs)-.25 G .812
+(implicity for correct-)-3.312 F 4.151(ness. Either)323.2 124.2 R 1.651
+(the system is easy to use, or it supports)4.151 F 1.17
+(concurrent use and survi)323.2 136.2 R -.15(ve)-.25 G 3.67(ss).15 G
+1.17(ystem f)-3.67 F 3.67(ailures. Berk)-.1 F(ele)-.1 E(y)-.15 E 1.013
+(DB, because of its careful design and implementation,)323.2 148.2 R(of)
+323.2 160.2 Q(fers both simplicity and correctness.)-.25 E .759
+(The system has a small footprint, mak)323.2 176.4 R .759
+(es simple opera-)-.1 F 1.012
+(tions simple to carry out \(inserting a ne)323.2 188.4 R 3.512(wr)-.25
+G 1.012(ecord tak)-3.512 F(es)-.1 E 1.16(just a fe)323.2 200.4 R 3.66
+(wl)-.25 G 1.16(ines of code\), and beha)-3.66 F -.15(ve)-.2 G 3.66(sc)
+.15 G 1.16(orrectly in the)-3.66 F -.1(fa)323.2 212.4 S .528(ce of hea)
+.1 F .527(vy concurrent use, system crashes, and e)-.2 F -.15(ve)-.25 G
+(n).15 E(catastrophic f)323.2 224.4 Q(ailures lik)-.1 E 2.5(el)-.1 G
+(oss of a hard disk.)-2.5 E F1 3(5. The)323.2 254.4 R(Berk)3 E
+(eley DB 2.x Distrib)-.12 E(ution)-.24 E F0(Berk)323.2 270.6 Q(ele)-.1 E
+4.171(yD)-.15 G 4.171(Bi)-4.171 G 4.171(sd)-4.171 G(istrib)-4.171 E
+1.671(uted in source code form from)-.2 F/F2 10/Times-Italic@0 SF(www)
+323.2 282.6 Q(.sleepycat.com)-.74 E F0 7.322(.U)C 2.322
+(sers are free to do)-7.322 F 2.321(wnload and)-.25 F -.2(bu)323.2 294.6
+S(ild the softw).2 E(are, and to use it in their applications.)-.1 E F1
+3(5.1. What)323.2 324.6 R(is in the distrib)3 E(ution)-.24 E F0 4.827
+(The distrib)323.2 340.8 R 4.827(ution is a compressed archi)-.2 F 5.127
+-.15(ve \214)-.25 H 7.328(le. It).15 F .057
+(includes the source code for the Berk)323.2 352.8 R(ele)-.1 E 2.556(yD)
+-.15 G 2.556(Bl)-2.556 G(ibrary)-2.556 E 2.556(,a)-.65 G(s)-2.556 E .453
+(well as documentation, test suites, and supporting utili-)323.2 364.8 R
+(ties.)323.2 376.8 Q 2.613(The source code includes b)323.2 393 R 2.612
+(uild support for all sup-)-.2 F .254(ported platforms.)323.2 405 R .254
+(On UNIX systems Berk)5.254 F(ele)-.1 E 2.755(yD)-.15 G 2.755(Bu)-2.755
+G(ses)-2.755 E 1.28(the GNU autocon\214guration tool,)323.2 417 R/F3 10
+/Courier@0 SF(autoconf)3.78 E F0 3.78(,t)C 3.78(oi)-3.78 G(den-)-3.78 E
+.992(tify the system and to b)323.2 429 R .992
+(uild the library and supporting)-.2 F 3.589(utilities. Berk)323.2 441 R
+(ele)-.1 E 3.589(yD)-.15 G 3.588(Bi)-3.589 G 1.088(ncludes speci\214c b)
+-3.588 F 1.088(uild en)-.2 F(viron-)-.4 E .515
+(ments for other platforms, such as VMS and W)323.2 453 R(indo)-.4 E
+(ws.)-.25 E F1 3(5.1.1. Documentation)323.2 483 R F0 5.008(The distrib)
+323.2 499.2 R 5.008(uted system includes documentation in)-.2 F 1.626
+(HTML format.)323.2 511.2 R 1.626(The documentation is in tw)6.626 F
+4.127(op)-.1 G 1.627(arts: a)-4.127 F .725
+(UNIX-style reference manual for use by programmers,)323.2 523.2 R
+(and a reference guide which is tutorial in nature.)323.2 535.2 Q F1 3
+(5.1.2. T)323.2 565.2 R(est suite)-1.104 E F0 1.107(The softw)323.2
+581.4 R 1.108(are also includes a complete test suite, writ-)-.1 F .155
+(ten in Tcl.)323.2 593.4 R 1.754 -.8(We b)5.154 H(elie).8 E .454 -.15
+(ve t)-.25 H .154(hat the test suite is a k).15 F .454 -.15(ey a)-.1 H
+(dv).15 E(an-)-.25 E(tage of Berk)323.2 605.4 Q(ele)-.1 E 2.5(yD)-.15 G
+2.5(Bo)-2.5 G -.15(ve)-2.65 G 2.5(rc).15 G(omparable systems.)-2.5 E
+2.612(First, the test suite allo)323.2 621.6 R 2.613(ws users who do)
+-.25 F 2.613(wnload and)-.25 F -.2(bu)323.2 633.6 S 1.731(ild the softw)
+.2 F 1.731(are to be sure that it is operating cor)-.1 F(-)-.2 E(rectly)
+323.2 645.6 Q(.)-.65 E .893(Second, the test suite allo)323.2 661.8 R
+.894(ws us, lik)-.25 F 3.394(eo)-.1 G .894(ther commercial)-3.394 F(de)
+323.2 673.8 Q -.15(ve)-.25 G .536(lopers of database softw).15 F .536
+(are, to e)-.1 F -.15(xe)-.15 G .535(rcise the system).15 F 2.256
+(thoroughly at e)323.2 685.8 R -.15(ve)-.25 G 2.256(ry release.).15 F
+2.256(When we learn of ne)7.256 F(w)-.25 E -.2(bu)323.2 697.8 S 1.719
+(gs, we add them to the test suite.).2 F 3.319 -.8(We r)6.719 H 1.719
+(un the test).8 F 5.692(suite continually during de)323.2 709.8 R -.15
+(ve)-.25 G 5.692(lopment c).15 F 5.692(ycles, and)-.15 F EP
+%%Page: 8 8
+%%BeginPageSetup
+BP
+%%EndPageSetup
+/F0 10/Times-Roman@0 SF(al)79.2 84 Q -.1(wa)-.1 G .314
+(ys prior to release.).1 F .314(The result is a much more reli-)5.314 F
+(able system by the time it reaches beta release.)79.2 96 Q/F1 12
+/Times-Bold@0 SF 3(5.2. Binary)79.2 126 R(distrib)3 E(ution)-.24 E F0
+(Sleep)79.2 142.2 Q .893(ycat mak)-.1 F .893
+(es compiled libraries and general binary)-.1 F(distrib)79.2 154.2 Q
+(utions a)-.2 E -.25(va)-.2 G(ilable to customers for a fee.).25 E F1 3
+(5.3. Supported)79.2 184.2 R(platf)3 E(orms)-.3 E F0(Berk)79.2 200.4 Q
+(ele)-.1 E 5.623(yD)-.15 G 5.623(Br)-5.623 G 3.123(uns on an)-5.623 F
+5.622(yo)-.15 G 3.122(perating system with a)-5.622 F .816
+(POSIX 1003.1 interf)79.2 212.4 R .817(ace [IEEE96], which includes vir)
+-.1 F(-)-.2 E 1.998(tually e)79.2 224.4 R -.15(ve)-.25 G 1.997
+(ry UNIX system.).15 F 1.997(In addition, the softw)6.997 F(are)-.1 E
+2.85(runs on VMS, W)79.2 236.4 R(indo)-.4 E 2.85(ws/95, W)-.25 F(indo)
+-.4 E 2.85(ws/98, and W)-.25 F(in-)-.4 E(do)79.2 248.4 Q(ws/NT)-.25 E
+10.21(.S)-.74 G(leep)-10.21 E 5.21(ycat Softw)-.1 F 5.21
+(are no longer supports)-.1 F(deplo)79.2 260.4 Q(yment on sixteen-bit W)
+-.1 E(indo)-.4 E(ws systems.)-.25 E F1 3(6. Berk)79.2 290.4 R
+(eley DB 2.x Licensing)-.12 E F0(Berk)79.2 306.6 Q(ele)-.1 E 2.627(yD)
+-.15 G 2.627(B2)-2.627 G .128(.x is distrib)-2.627 F .128
+(uted as an Open Source prod-)-.2 F 4.709(uct. The)79.2 318.6 R(softw)
+4.709 E 2.209(are is freely a)-.1 F -.25(va)-.2 G 2.209
+(ilable from us at our).25 F -.8(We)79.2 330.6 S 3.372(bs).8 G .872
+(ite, and in other media.)-3.372 F .872(Users are free to do)5.872 F
+(wn-)-.25 E(load the softw)79.2 342.6 Q(are and b)-.1 E
+(uild applications with it.)-.2 E 1.023(The 1.x v)79.2 358.8 R 1.022
+(ersions of Berk)-.15 F(ele)-.1 E 3.522(yD)-.15 G 3.522(Bw)-3.522 G
+1.022(ere co)-3.522 F -.15(ve)-.15 G 1.022(red by the).15 F 3.763
+(UC Berk)79.2 370.8 R(ele)-.1 E 6.263(yc)-.15 G(op)-6.263 E 3.763
+(yright that co)-.1 F -.15(ve)-.15 G 3.764(rs softw).15 F 3.764
+(are freely)-.1 F(redistrib)79.2 382.8 Q 1.742(utable in source form.)
+-.2 F 1.741(When Sleep)6.742 F 1.741(ycat Soft-)-.1 F -.1(wa)79.2 394.8
+S .906(re w).1 F .907(as formed, we needed to draft a license consis-)
+-.1 F 2.319(tent with the cop)79.2 406.8 R 2.319(yright go)-.1 F -.15
+(ve)-.15 G 2.318(rning the e).15 F 2.318(xisting, older)-.15 F(softw)
+79.2 418.8 Q 5.328(are. Because)-.1 F 2.828(of important dif)5.328 F
+2.828(ferences between)-.25 F .497(the UC Berk)79.2 430.8 R(ele)-.1 E
+2.997(yc)-.15 G(op)-2.997 E .497(yright and the GPL, it w)-.1 F .496
+(as impos-)-.1 F .884(sible for us to use the GPL.)79.2 442.8 R 3.384
+(As)5.884 G .884(econd cop)-3.384 F .884(yright, with)-.1 F .87
+(terms contradictory to the \214rst, simply w)79.2 454.8 R .87
+(ould not ha)-.1 F -.15(ve)-.2 G -.1(wo)79.2 466.8 S(rk).1 E(ed.)-.1 E
+(Sleep)79.2 483 Q 2.533(ycat w)-.1 F 2.533
+(anted to continue Open Source de)-.1 F -.15(ve)-.25 G(lop-).15 E 2.079
+(ment of Berk)79.2 495 R(ele)-.1 E 4.579(yD)-.15 G 4.579(Bf)-4.579 G
+2.079(or se)-4.579 F -.15(ve)-.25 G 2.079(ral reasons.).15 F 3.678 -.8
+(We a)7.078 H(gree).8 E .853
+(with Raymond [Raym98] and others that Open Source)79.2 507 R(softw)79.2
+519 Q .763(are is typically of higher quality than proprietary)-.1 F(,)
+-.65 E 2.616(binary-only products.)79.2 531 R 2.617
+(Our customers bene\214t from a)7.616 F .983(community of de)79.2 543 R
+-.15(ve)-.25 G .983(lopers who kno).15 F 3.483(wa)-.25 G .983
+(nd use Berk)-3.483 F(ele)-.1 E(y)-.15 E 1.317
+(DB, and can help with application design, deb)79.2 555 R(ugging,)-.2 E
+1.65(and performance tuning.)79.2 567 R -.4(Wi)6.65 G 1.65
+(despread distrib).4 F 1.65(ution and)-.2 F 1.017
+(use of the source code tends to isolate b)79.2 579 R 1.017(ugs early)
+-.2 F 3.517(,a)-.65 G(nd)-3.517 E .032(to get \214x)79.2 591 R .031
+(es back into the distrib)-.15 F .031(uted system quickly)-.2 F 5.031
+(.A)-.65 G(s)-5.031 E 3.553(ar)79.2 603 S 1.053(esult, Berk)-3.553 F
+(ele)-.1 E 3.553(yD)-.15 G 3.553(Bi)-3.553 G 3.553(sm)-3.553 G 1.053
+(ore reliable.)-3.553 F 1.054(Just as impor)6.054 F(-)-.2 E(tantly)79.2
+615 Q 3.695(,i)-.65 G(ndi)-3.695 E 1.195
+(vidual users are able to contrib)-.25 F 1.195(ute ne)-.2 F 3.695(wf)
+-.25 G(ea-)-3.695 E 1.056
+(tures and performance enhancements, to the bene\214t of)79.2 627 R
+-2.15 -.25(ev e)79.2 639 T .359(ryone who uses Berk).25 F(ele)-.1 E
+2.859(yD)-.15 G 2.859(B. From)-2.859 F 2.858(ab)2.859 G .358
+(usiness per)-3.058 F(-)-.2 E(specti)79.2 651 Q -.15(ve)-.25 G 3.115(,O)
+.15 G .615(pen Source and free distrib)-3.115 F .615(ution of the soft-)
+-.2 F -.1(wa)79.2 663 S 1.605(re creates share for us, and gi).1 F -.15
+(ve)-.25 G 4.105(su).15 G 4.105(sam)-4.105 G(ark)-4.105 E 1.605(et into)
+-.1 F .412(which we can sell products and services.)79.2 675 R(Finally)
+5.413 E 2.913(,m)-.65 G(ak-)-2.913 E .148(ing the source code freely a)
+79.2 687 R -.25(va)-.2 G .147(ilable reduces our support).25 F 2.436
+(load, since customers can \214nd and \214x b)79.2 699 R 2.437
+(ugs without)-.2 F(recourse to us, in man)79.2 711 Q 2.5(yc)-.15 G
+(ases.)-2.5 E 4.727 -.8(To p)323.2 84 T(reserv).8 E 5.627(et)-.15 G
+3.126(he Open Source heritage of the older)-5.627 F(Berk)323.2 96 Q(ele)
+-.1 E 3.003(yD)-.15 G 3.003(Bc)-3.003 G .504(ode, we drafted a ne)-3.003
+F 3.004(wl)-.25 G .504(icense go)-3.004 F -.15(ve)-.15 G(rning).15 E
+.417(the distrib)323.2 108 R .417(ution of Berk)-.2 F(ele)-.1 E 2.916
+(yD)-.15 G 2.916(B2)-2.916 G 2.916(.x. W)-2.916 F 2.916(ea)-.8 G .416
+(dopted terms)-2.916 F .411(from the GPL that mak)323.2 120 R 2.911(ei)
+-.1 G 2.911(ti)-2.911 G .411(mpossible to turn our Open)-2.911 F 1.289
+(Source code into proprietary code o)323.2 132 R 1.288(wned by someone)
+-.25 F(else.)323.2 144 Q(Brie\215y)323.2 160.2 Q 3.18(,t)-.65 G .68
+(he terms go)-3.18 F -.15(ve)-.15 G .68(rning the use and distrib).15 F
+.68(ution of)-.2 F(Berk)323.2 172.2 Q(ele)-.1 E 2.5(yD)-.15 G 2.5(Ba)
+-2.5 G(re:)-2.5 E/F2 8/Times-Roman@0 SF<83>328.2 188.4 Q F0
+(your application must be internal to your site, or)17.2 E F2<83>328.2
+204.6 Q F0 .612(your application must be freely redistrib)17.2 F .611
+(utable in)-.2 F(source form, or)348.2 216.6 Q F2<83>328.2 232.8 Q F0
+(you must get a license from us.)17.2 E -.15(Fo)323.2 249 S 2.631(rc).15
+G .131(ustomers who prefer not to distrib)-2.631 F .132(ute Open Source)
+-.2 F 1.493(products, we sell licenses to use and e)323.2 261 R 1.492
+(xtend Berk)-.15 F(ele)-.1 E(y)-.15 E(DB at a reasonable cost.)323.2 273
+Q 2.675 -.8(We w)323.2 289.2 T 1.076
+(ork hard to accommodate the needs of the Open).7 F .606
+(Source community)323.2 301.2 R 5.606(.F)-.65 G .606(or e)-5.756 F .606
+(xample, we ha)-.15 F .905 -.15(ve c)-.2 H .605(rafted spe-).15 F 1.415
+(cial licensing arrangements with Gnome to encourage)323.2 313.2 R
+(its use and distrib)323.2 325.2 Q(ution of Berk)-.2 E(ele)-.1 E 2.5(yD)
+-.15 G(B.)-2.5 E(Berk)323.2 341.4 Q(ele)-.1 E 4.103(yD)-.15 G 4.103(Bc)
+-4.103 G 1.603(onforms to the Open Source de\214nition)-4.103 F 4.867
+([Open99]. The)323.2 353.4 R 2.367
+(license has been carefully crafted to)4.867 F -.1(ke)323.2 365.4 S .643
+(ep the product a).1 F -.25(va)-.2 G .642(ilable as an Open Source of)
+.25 F(fering,)-.25 E(while pro)323.2 377.4 Q
+(viding enough of a return on our in)-.15 E -.15(ve)-.4 G(stment to).15
+E 1.546(fund continued de)323.2 389.4 R -.15(ve)-.25 G 1.546
+(lopment and support of the prod-).15 F 3.033(uct. The)323.2 401.4 R
+.534(current license has created a b)3.033 F .534(usiness capable)-.2 F
+.916(of funding three years of de)323.2 413.4 R -.15(ve)-.25 G .916
+(lopment on the softw).15 F(are)-.1 E(that simply w)323.2 425.4 Q
+(ould not ha)-.1 E .3 -.15(ve h)-.2 H(appened otherwise.).15 E F1 3
+(7. Summary)323.2 455.4 R F0(Berk)323.2 471.6 Q(ele)-.1 E 2.991(yD)-.15
+G 2.991(Bo)-2.991 G -.25(ff)-2.991 G .491
+(ers a unique collection of features, tar).25 F(-)-.2 E .175
+(geted squarely at softw)323.2 483.6 R .174(are de)-.1 F -.15(ve)-.25 G
+.174(lopers who need simple,).15 F .492
+(reliable database management services in their applica-)323.2 495.6 R
+5.3(tions. Good)323.2 507.6 R 2.8(design and implementation and careful)
+5.3 F 1.633(engineering throughout mak)323.2 519.6 R 4.133(et)-.1 G
+1.633(he softw)-4.133 F 1.634(are better than)-.1 F(man)323.2 531.6 Q
+2.5(yo)-.15 G(ther systems.)-2.5 E(Berk)323.2 547.8 Q(ele)-.1 E 4.1(yD)
+-.15 G 4.1(Bi)-4.1 G 4.1(sa)-4.1 G 4.1(nO)-4.1 G 1.6
+(pen Source product, a)-4.1 F -.25(va)-.2 G 1.6(ilable at).25 F/F3 10
+/Times-Italic@0 SF(www)323.2 559.8 Q(.sleepycat.com)-.74 E F0 .654
+(for do)3.154 F 3.154(wnload. The)-.25 F(distrib)3.154 E .654(uted sys-)
+-.2 F .383(tem includes e)323.2 571.8 R -.15(ve)-.25 G .383
+(rything needed to b).15 F .382(uild and deplo)-.2 F 2.882(yt)-.1 G(he)
+-2.882 E(softw)323.2 583.8 Q(are or to port it to ne)-.1 E 2.5(ws)-.25 G
+(ystems.)-2.5 E(Sleep)323.2 600 Q 2.633(ycat Softw)-.1 F 2.633
+(are distrib)-.1 F 2.633(utes Berk)-.2 F(ele)-.1 E 5.133(yD)-.15 G 5.134
+(Bu)-5.133 G 2.634(nder a)-5.134 F .764(license agreement that dra)323.2
+612 R .764(ws on both the UC Berk)-.15 F(ele)-.1 E(y)-.15 E(cop)323.2
+624 Q 2.377(yright and the GPL.)-.1 F 2.377(The license guarantees that)
+7.377 F(Berk)323.2 636 Q(ele)-.1 E 3.384(yD)-.15 G 3.384(Bw)-3.384 G
+.884(ill remain an Open Source product and)-3.384 F(pro)323.2 648 Q
+1.493(vides Sleep)-.15 F 1.493(ycat with opportunities to mak)-.1 F
+3.994(em)-.1 G(one)-3.994 E(y)-.15 E(to fund continued de)323.2 660 Q
+-.15(ve)-.25 G(lopment on the softw).15 E(are.)-.1 E EP
+%%Page: 9 9
+%%BeginPageSetup
+BP
+%%EndPageSetup
+/F0 12/Times-Bold@0 SF 3(8. Refer)79.2 84 R(ences)-.216 E/F1 10
+/Times-Roman@0 SF([Come79])79.2 100.2 Q(Comer)104.2 112.2 Q 3.127(,D)-.4
+G .627(., \231The Ubiquitous B-tree,)-3.127 F<9a>-.7 E/F2 10
+/Times-Italic@0 SF -.3(AC)3.126 G 3.126(MC).3 G(om-)-3.126 E .404
+(puting Surve)104.2 124.2 R(ys)-.3 E F1 -1.29(Vo)2.904 G .404
+(lume 11, number 2, June 1979.)1.29 F([Gray93])79.2 140.4 Q(Gray)104.2
+152.4 Q 2.982(,J)-.65 G .482(., and Reuter)-2.982 F 2.982(,A)-.4 G(.,)
+-2.982 E F2 -1.55 -.55(Tr a)2.981 H .481(nsaction Pr).55 F(ocessing:)
+-.45 E 6.776(Concepts and T)104.2 164.4 R(ec)-.92 E(hniques)-.15 E F1
+9.277(,M)C(or)-9.277 E -.05(ga)-.18 G(n-Kaufman).05 E(Publishers, 1993.)
+104.2 176.4 Q([IEEE96])79.2 192.6 Q .364
+(Institute for Electrical and Electronics Engineers,)104.2 204.6 R F2
+(IEEE/ANSI Std 1003.1)104.2 216.6 Q F1 2.5(,1)C(996 Edition.)-2.5 E
+([Litw80])79.2 232.8 Q 2.365(Litwin, W)104.2 244.8 R 2.366
+(., \231Linear Hashing: A Ne)-.92 F 4.866(wT)-.25 G 2.366(ool for)-5.666
+F 1.784(File and T)104.2 256.8 R 1.783(able Addressing,)-.8 F<9a>-.7 E
+F2(Pr)4.283 E 1.783(oceedings of the)-.45 F 4.804
+(6th International Confer)104.2 268.8 R 4.804(ence on V)-.37 F 4.804
+(ery Lar)-1.11 F -.1(ge)-.37 G 1.983(Databases \(VLDB\))104.2 280.8 R F1
+4.483(,M)C 1.982(ontreal, Quebec, Canada,)-4.483 F(October 1980.)104.2
+292.8 Q([Open94])79.2 309 Q 4.068(The Open Group,)104.2 321 R F2
+(Distrib)6.568 E 4.069(uted TP: The XA+)-.2 F .78(Speci\214cation, V)
+104.2 333 R(er)-1.11 E .78(sion 2)-.1 F F1 3.28(,T)C .78
+(he Open Group, 1994.)-3.28 F([Open99])79.2 349.2 Q(Opensource.or)104.2
+361.2 Q 8.307(g, \231Open Source De\214nition,)-.18 F<9a>-.7 E F2(www)
+104.2 373.2 Q(.opensour)-.74 E(ce)-.37 E(.or)-.15 E(g/osd.html)-.37 E F1
+3.13(,v)C .63(ersion 1.4, 1999.)-3.28 F([Raym98])79.2 389.4 Q .718
+(Raymond, E.S., \231The Cathedral and the Bazaar)104.2 401.4 R -.7<2c9a>
+-.4 G F2(www)104.2 413.4 Q(.tuxedo.or)-.74 E(g/~esr/writings/cathedr)
+-.37 E(al-)-.15 E(bazaar/cathedr)104.2 425.4 Q(al-bazaar)-.15 E(.html)
+-1.11 E F1 2.5(,J)C(anuary 1998.)-2.5 E([Selt91])79.2 441.6 Q(Seltzer)
+104.2 453.6 Q 2.578(,M)-.4 G .078(., and Y)-2.578 F .079(igit, O., \231)
+-.55 F 2.579(AN)-.8 G .579 -.25(ew H)-2.579 H .079(ashing P).25 F(ack-)
+-.15 E 6.704(age for UNIX,)104.2 465.6 R<9a>-.7 E F2(Pr)9.204 E 6.704
+(oceedings 1991 W)-.45 F(inter)-.55 E(USENIX Confer)104.2 477.6 Q(ence)
+-.37 E F1 2.5(,D)C(allas, TX, January 1991.)-2.5 E([Selt92])79.2 493.8 Q
+(Seltzer)104.2 505.8 Q 5.365(,M)-.4 G 2.865
+(., and Olson, M., \231LIBTP: Portable)-5.365 F 2.845(Modular T)104.2
+517.8 R 2.845(ransactions for UNIX,)-.35 F<9a>-.7 E F2(Pr)5.345 E
+(oceedings)-.45 E 1.49(1992 W)104.2 529.8 R 1.49(inter Usenix Confer)
+-.55 F(ence)-.37 E F1 3.99(,S)C 1.49(an Francisco,)-3.99 F
+(CA, January 1992.)104.2 541.8 Q([Ston82])79.2 558 Q(Stonebrak)104.2 570
+Q(er)-.1 E 10.04(,M)-.4 G 7.54(., Stettner)-10.04 F 10.04(,H)-.4 G 7.54
+(., Kalash, J.,)-10.04 F .763(Guttman, A., and L)104.2 582 R .764
+(ynn, N., \231Document Process-)-.55 F .557
+(ing in a Relational Database System,)104.2 594 R 3.056<9a4d>-.7 G
+(emoran-)-3.056 E .825(dum No. UCB/ERL M82/32, Uni)104.2 606 R -.15(ve)
+-.25 G .825(rsity of Cali-).15 F(fornia at Berk)104.2 618 Q(ele)-.1 E
+1.3 -.65(y, B)-.15 H(erk).65 E(ele)-.1 E 1.3 -.65(y, C)-.15 H
+(A, May 1982.).65 E EP
+%%Trailer
+end
+%%EOF
diff --git a/libdb/docs/ref/refs/embedded.html b/libdb/docs/ref/refs/embedded.html
new file mode 100644
index 0000000..b7641d9
--- /dev/null
+++ b/libdb/docs/ref/refs/embedded.html
@@ -0,0 +1,672 @@
+<html>
+<head>
+<title>Challenges in Embedded Database System Administration</title>
+</head>
+<body bgcolor=white>
+<center>
+<h1>Challenges in Embedded Database System Administration</h1>
+<h3>Margo Seltzer, Harvard University</h3>
+<h3>Michael Olson, Sleepycat Software, Inc.</h3>
+<em>{margo,mao}@sleepycat.com</em>
+</center>
+<p>
+Database configuration and maintenance have historically been complex tasks,
+often
+requiring expert knowledge of database design and application
+behavior.
+In an embedded environment, it is not feasible to require such
+expertise and ongoing database maintenance.
+This paper discusses the database administration
+challenges posed by embedded systems and describes how the
+Berkeley DB architecture addresses these challenges.
+
+<h2>1. Introduction</h2>
+
+Embedded systems provide a combination of opportunities and challenges
+in application and system configuration and management.
+As an embedded system is most often dedicated to a single application or
+small set of tasks, the operating conditions of the system are
+typically better understood than those of general purpose computing
+environments.
+Similarly, as embedded systems are dedicated to a small set of tasks,
+one would expect that the software to manage them should be small
+and simple.
+On the other hand, once an embedded system is deployed, it must
+continue to function without interruption and without administrator
+intervention.
+<p>
+Database administration consists of two components,
+initial configuration and ongoing maintenance.
+Initial configuration consists of database design, manifestation,
+and tuning.
+The instantiation of the design includes decomposing the design
+into tables, relations, or objects and designating proper indices
+and their implementations (e.g., Btrees, hash tables, etc.).
+Tuning a design requires selecting a location for the log and
+data files, selecting appropriate database page sizes, specifying
+the size of in-memory caches, and specifying the limits of
+multi-threading and concurrency.
+As embedded systems define a specific environment and set of tasks,
+requiring expertise during the initial system
+configuration process is acceptable, and we focus our efforts on
+the ongoing maintenance of the system.
+In this way, our emphasis differs from other projects such as
+Microsoft's AutoAdmin project <a href="#Chaud982">[3]</a>, and the "no-knobs"
+administration that is identified as an area of important future
+research by the Asilomar authors<a href="#Bern98">[1]</a>.
+<p>
+In this paper, we focus on what the authors
+of the Asilomar report call "gizmo" databases <a href="#Bern98"> [1]</a>,
+databases
+that reside in devices such as smart cards, toasters, or telephones.
+The key characteristics of such databases are that their
+functionality is completely transparent to users, no one ever
+performs explicit database operations or
+database maintenance, the database may crash at any time and
+must recover instantly, the device may undergo a hard reset at
+any time, requiring that the database return to its initial
+state, and the semantic integrity of the database must be maintained
+at all times.
+In Section 2, we provide more detail on the sorts of tasks
+typically performed by database administrators (DBAs) that must
+be automated in an embedded system.
+<p>
+The rest of this paper is structured as follows.
+In Section 2, we outline the requirements for embedded database support.
+In Section 3, we discuss how Berkeley DB
+is conducive to the hands-off management
+required in embedded systems.
+In Section 4, we discuss novel features that
+enhance Berkeley
+DB's suitability for the embedded applications.
+In Section 5, we discuss issues of footprint size.
+In Section 6 we discuss related work, and we conclude
+in Section 7.
+
+<h2>2. Embedded Database Requirements</h2>
+Historically, much of the commercial database industry has been driven
+by the requirements of high performance online transaction
+processing (OLTP), complex query processing, and the industry
+standard benchmarks that have emerged (e.g., TPC-C <a href="#TPCC">[9]</a>,
+TPC-D <a href="#TPCD">[10]</a>) to
+allow for system comparisons.
+As embedded systems typically perform fairly simple queries,
+such metrics are not nearly as relevant for embedded database
+systems as are ease of maintenance, robustness, and small footprint.
+Of these three requirements, robustness and ease of maintenance
+are the key issues.
+Users must trust the data stored in their devices and must not need
+to manually perform anything resembling system administration in order
+to get their unit to work properly.
+Fortunately, ease of use and robustness are important side
+effects of simplicity and good design.
+These, in turn, lead to a small size, providing the third
+requirement of an embedded system.
+<h3>2.1 The User Perspective</h3>
+<p>
+In the embedded database arena, it is the ongoing maintenance tasks
+that must be automated, not necessarily the initial system configuration.
+There are five tasks
+that are traditionally performed by DBAs,
+but must be performed automatically
+in embedded database systems.
+These tasks are
+log archival and reclamation,
+backup,
+data compaction/reorganization,
+automatic and rapid recovery, and
+reinitialization from scratch.
+<P>
+Log archival and backup are tightly coupled.
+Database backups are part of any
+large database installation, and log archival is analogous to incremental
+backup.
+It is not clear what the implications of backup and archival are in
+an embedded system.
+Consumers do not back up their VCRs or refrigerators, yet they do
+(or should) back up their personal computers or personal digital
+assistants.
+For the remainder of this paper, we assume that backups, in some form,
+are required for gizmo databases (imagine having to reprogram, manually,
+the television viewing access pattern learned by some set-top television
+systems today).
+Furthermore, we require that those backups are nearly instantaneous or
+completely transparent,
+as users should not be aware that their gizmos are being backed up
+and should not have to explicitly initiate such backups.
+<p>
+Data compaction or reorganization has traditionally required periodic
+dumping and restoration of
+database tables and the recreation of indices.
+In an embedded system, such reorganization must happen automatically.
+<p>
+Recovery issues are similar in embedded and traditional environments
+with a few exceptions.
+While a few seconds or even a minute recovery is acceptable
+for a large server installation, no one is willing to wait
+for their telephone or television to reboot.
+As with archival, recovery must be nearly instantaneous in an embedded product.
+Secondly, it is often the case that a system will be completely
+reinitialized, rather than simply rebooted.
+In this case, the embedded database must be restored to its initial
+state, freeing all its resources.
+This is not typically a requirement of large server systems.
+<h3>2.2 The Developer Perspective</h3>
+<p>
+In addition to the maintenance-free operation required of the
+embedded systems, there are a number of requirements that fall
+out of the constrained resources typically found in the "gizmos"
+using gizmo databases. These requirements are:
+small footprint,
+short code-path,
+programmatic interface for tight application coupling and
+to avoid the overhead (in both time and size) of
+interfaces such as SQL and ODBC,
+application configurability and flexibility,
+support for complete memory-resident operation (e.g., these systems
+must run on gizmos without file systems), and
+support for multi-threading.
+<p>
+A small footprint and short code-path are self-explanatory, however
+what is not as obvious is that the programmatic interface requirement
+is the logical result of them.
+Traditional interfaces such as ODBC and SQL add significant
+size overhead and frequently add multiple context/thread switches
+per operation, not to mention several IPC calls.
+An embedded product is less likely to require the complex
+query processing that SQL enables.
+Instead, in the embedded space, the ability for an application
+to configure the database for the specific tasks in question
+is more important than a general query interface.
+<p>
+As some systems do not provide storage other than RAM and ROM,
+it is essential that an embedded database work seemlessly
+in memory-only environments.
+Similarly, many of today's embedded operating systems provide a
+single address space architecture, so a simple, multi-threaded
+capability is essential for application requiring any concurrency.
+<p>
+In general, embedded applications run on gizmos whose native
+operating system support varies tremendously.
+For example, the embedded OS may or may
+not support user-level processing or multi-threading.
+Even if it does, a particular embedded
+application may or may not need it.
+Not all applications need more than one thread of control.
+An embedded database must provide mechanisms to developers
+without deciding policy.
+For example, the threading model in an application is a matter of policy,
+and depends
+not on the database software, but on the hardware, operating
+system, and the application's feature set.
+Therefore, the data manager must provide for the use of multi-threading,
+but not require it.
+
+<h2>3. Berkeley DB: A Database for Embedded Systems</h2>
+Berkeley DB is the result of implementing database functionality
+using the UNIX tool-based philosophy.
+The current Berkeley DB package, as distributed by Sleepycat
+Software, is a descendant of the hash and btree access methods
+distributed with 4.4BSD and its descendents.
+The original package (referred to as DB-1.85),
+while intended as a public domain replacement for dbm and
+its followers (e.g., ndbm, gdbm, etc), rapidly became widely
+used as an efficient, easy-to-use data store.
+It was incorporated into a number of Open Source packages including
+Perl, Sendmail, Kerberos, and the GNU C-library.
+<p>
+Versions 2.X and higher are distributed by Sleepycat Software and
+add functionality for concurrency, logging, transactions, and
+recovery.
+Each piece of additional functionality is implemented as an independent
+module, which means that the subsystems can be used outside the
+context of Berkeley DB. For example, the locking subsystem can
+easily be used to implement locking for a non-DB application and
+the shared memory buffer pool can be used for any application
+caching data in main memory.
+This subsystem design allows a designer to pick and choose
+the functionality necessary for the application, minimizing
+memory footprint and maximizing performance.
+This addresses the small footprint and short code-path criteria
+mentioned in the previous section.
+<p>
+As Berkeley DB grew out of a replacement for dbm, its primary
+implementation language has always been C and its interface has
+been programmatic. The C interface is the native interface,
+unlike many database systems where the programmatic API is simply
+a layer on top of an already-costly query interface (e.g. embedded
+SQL).
+Berkeley DB's heritage is also apparent in its data model; it has
+none.
+The database stores unstructured key/data pairs, specified as
+variable length byte strings.
+This leaves schema design and representation issues the responsibility
+of the application, which is ideal for an embedded environment.
+Applications retain full control over specification of their data
+types, representation, index values, and index relationships.
+In other words, Berkeley DB provides a robust, high-performance,
+keyed storage system, not a particular database management system.
+We have designed for simplicity and performance, trading off
+complex, general purpose support that is better encapsulated in
+applications.
+<p>
+Another element of Berkeley DB's programmatic interface is its
+customizability; applications can specify Btree comparison and
+prefix compression functions, hash functions, error routines,
+and recovery models.
+This means that embedded applications can tailor the underlying
+database to best suit their data demands.
+Similarly, the utilities traditionally bundled with a database
+manager (e.g., recovery, dump/restore, archive) are implemented
+as tiny wrapper programs around library routines. This means
+that it is not necessary to run separate applications for the
+utilities. Instead, independent threads can act as utility
+daemons, or regular query threads can perform utility functions.
+Many of the current products built on Berkeley DB are bundled as
+a single large server with independent threads that perform functions
+such as checkpoint, deadlock detection, and performance monitoring.
+<p>
+As mentioned earlier, living in an embedded environment requires
+flexible management of storage.
+Berkeley DB does not require any preallocation of disk space
+for log or data files.
+While many commercial database systems take complete control
+of a raw device, Berkeley DB uses a normal file system, and
+can therefore, safely and easily share a data space with other
+programs.
+All databases and log files are native files of the host environment,
+so whatever utilities are provided by the environment can be used
+to manage database files as well.
+<p>
+Berkeley DB provides three different memory models for its
+management of shared information.
+Applications can use the IEEE Std 1003.1b-1993 (POSIX) <tt>mmap</tt>
+interface to share
+data, they can use system shared memory, as frequently provided
+by the shmget family of interfaces, or they can use per-process
+heap memory (e.g., malloc).
+Applications that require no permanent storage and do not provide
+shared memory facilities can still use Berkeley DB by requesting
+strictly private memory and specifying that all databases be
+memory-resident.
+This provides pure-memory operation.
+<p>
+Lastly, Berkeley DB is designed for rapid startup -- recovery can
+happen automatically as part of system initialization.
+This means that Berkeley DB works correctly in environments where
+gizmos are suddenly shut down and restarted.
+
+<h2>4. Extensions for Embedded Environments </h2>
+While the Berkeley DB library has been designed for use in
+embedded systems, all the features described above are useful
+in more conventional systems as well.
+In this section, we discuss a number of features and "automatic
+knobs" that are specifically geared
+toward the more constrained environments found in gizmo databases.
+
+<h3>4.1 Automatic compression</h3>
+Following the programmatic interface design philosophy, we
+support application-specific (or default) compression routines.
+These can be geared toward the particular data types present
+in the application's dataset, thus providing better compression
+than a general purpose routine.
+Note that the application could instead specify an encryption
+function and create encrypted databases instead of compressed ones.
+Alternately, the application might specify a function that performs
+both compression and encryption.
+<p>
+As applications are also permitted to specify comparison and hash
+functions, the application can chose to organize its data based
+either on uncompressed and clear-text data or compressed and encrypted
+data.
+If the application indicates that data should be compared in its
+processed form (i.e., compressed and encrypted), then the compression
+and encryption are performed on individual data items and the in-memory
+representation retains these characteristics.
+However, if the application indicates that data should be compared in
+its original form, then entire pages are transformed upon being read
+into or written out of the main memory buffer cache.
+These two alternatives provide the flexibility to trade space
+and security for performance.
+
+<h3>4.2 In-memory logging & transactions</h3>
+One of the four key properties of transaction systems is durability.
+This means that transaction systems are designed for permanent storage
+(most commonly disk). However, as mentioned above, embedded systems
+do not necessarily contain any such storage.
+Nevertheless, transactions can be useful in this environment to
+preserve the semantic integrity of the underlying storage.
+Berkeley DB optionally provides logging functionality and
+transaction support regardless of whether the database and logs
+are on disk or in memory.
+
+<h3>4.3 Remote Logs</h3>
+While we do not expect users to backup their television sets and
+toasters, it is conceivable that a set-top box provided by a
+cable carrier should, in fact, be backed up by that cable carrier.
+The ability to store logs remotely can provide "information appliance"
+functionality, and can also be used in conjunction with local logs
+to enhance reliability.
+Furthermore, remote logs provide for catastrophic recovery, e.g., loss
+of the gizmo, destruction of the gizmo, etc.
+
+<h3>4.4 Application References to Database Buffers</h3>
+
+Typically, when data is returned to the user, it must be copied
+from the data manager's buffer cache (or data page) into the
+application's memory.
+However, in an embedded environment, the robustness of the
+total software package is of paramount importance, not the
+isolation between the application and the data manager.
+As a result, it is possible for the data manager to avoid
+copies by giving applications direct references to data items
+in a shared memory cache.
+This is a significant performance optimization that can be
+allowed when the application and data manager are tightly
+integrated.
+
+<h3>4.5 Recoverable database creation/deletion</h3>
+
+In a conventional database management system, the creation of
+database tables (relations) and indices are heavyweight operations
+that are not recoverable.
+This is not acceptable in a complex embedded environment where
+instantaneous recovery and robust operation in the face of
+all types of database operations is essential.
+While Berkeley DB files can be removed using normal file system
+utilities, we provide transaction protected utilities that
+allow us to recover both database creation and deletion.
+
+<h3>4.6 Adaptive concurrency control</h3>
+The Berkeley DB package uses page-level locking by default.
+This trades off fine grain concurrency control for simplicity
+during recovery. (Finer grain concurrency control can be
+obtained by reducing the page size in the database.)
+However, when multiple threads/processes perform page-locking
+in the presence of writing operations, there is the
+potential for deadlock.
+As some environments do not need or desire the overhead of
+logging and transactions, it is important to provide the
+ability for concurrent access without the potential for
+deadlock.
+<p>
+Berkeley DB provides an option to perform coarser grain,
+deadlock-free locking.
+Rather than locking on pages, locking is performed at the
+interface to the database.
+Multiple readers or a single writer are allowed to be
+active in the database at any instant in time, with
+conflicting requests queued automatically.
+The presence of cursors, through which applications can both
+read and write data, complicates this design.
+If a cursor is currently being used for reading, but will later
+be used to write, the system will be deadlock prone if no
+special precautions are taken.
+To handle this situation, we require that, when a cursor is
+created, the application specify any future intention to write.
+If there is an intention to write, the cursor is granted an
+intention-to-write lock which does not conflict with readers,
+but does conflict with other intention-to-write locks and write
+locks.
+The end result is that the application is limited to a single
+potentially writing cursor accessing the database at any point
+in time.
+<p>
+Under periods of low contention (but potentially high throughput),
+the normal page-level locking provides the best overall throughput.
+However, as contention rises, so does the potential for deadlock.
+As some cross-over point, switching to the less concurrent, but
+deadlock-free locking protocol will result in higher throughput
+as operations must never be retried.
+Given the operating conditions of an embedded database manager,
+it is useful to make this change automatically as the system
+itself detects high contention.
+
+<h3>4.7 Adaptive synchronization</h3>
+
+In addition to the logical locks that protect the integrity of the
+database pages, Berkeley DB must synchronize access to shared memory
+data structures, such as the lock table, in-memory buffer pool, and
+in-memory log buffer.
+Each independent module uses a single mutex to protect its shared
+data structures, under the assumption that operations that require
+the mutex are very short and the potential for conflict is
+low.
+Unfortunately, in highly concurrent environments with multiple processors
+present, this assumption is not always true.
+When this assumption becomes invalid (that is, we observe significant
+contention for the subsystem mutexes), we can switch over to a finer-grained
+concurrency model for the mutexes.
+Once again, there is a performance trade-off. Fine-grain mutexes
+impose a penalty of approximately 25% (due to the increased number
+of mutexes required for each operation), but allow for higher throughput.
+Using fine-grain mutexes under low contention would cause a decrease
+in performance, so it is important to monitor the system carefully,
+so that the change can be executed only when it will increase system
+throughput without jeopardizing latency.
+
+<h2>5. Footprint of an Embedded System</h2>
+While traditional systems compete on price-performance, the
+embedded players will compete on price, features, and footprint.
+The earlier sections have focused on features; in this section
+we focus on footprint.
+<p>
+Oracle reports that Oracle Lite 3.0 requires 350 KB to 750 KB
+of memory and approximately 2.5 MB of hard disk space <a href="#Oracle">[7]</a>.
+This includes drivers for interfaces such as ODBC and JDBC.
+In contrast, Berkeley DB ranges in size from 75 KB to under 200 KB,
+foregoing heavyweight interfaces such as ODBC and JDBC and
+providing a variety of deployed sizes that can be used depending
+on application needs. At the low end, applications requiring
+a simple single-user access method can choose from either extended
+linear hashing, B+ trees, or record-number based retrieval and
+pay only the 75 KB space requirement.
+Applications requiring all three access methods will observe the
+110 KB footprint.
+At the high end, a fully recoverable, high-performance system
+occupies less than a quarter megabyte of memory.
+This is a system you can easily incorporate in your toaster oven.
+Table 1 shows the per-module break down of the entire Berkeley DB
+library. Note that this does not include memory used to cache database
+pages.
+
+<table border>
+<tr><th colspan=4>Object sizes in bytes</th></tr>
+<tr><th align=left>Subsystem</th><th align=center>Text</th><th align=center>Data</th><th align=center>Bss</th></tr>
+<tr><td>Btree-specific routines</td><td align=right>28812</td><td align=right>0</td><td align=right>0</td></tr>
+<tr><td>Recno-specific routines</td><td align=right>7211</td><td align=right>0</td><td align=right>0</td></tr>
+<tr><td>Hash-specific routines</td><td align=right>23742</td><td align=right>0</td><td align=right>0</td></tr>
+<tr><td colspan=4></td></tr>
+<tr><td>Memory Pool</td><td align=right>14535</td><td align=right>0</td><td align=right>0</td></tr>
+<tr><td>Access method common code</td><td align=right>23252</td><td align=right>0</td><td align=right>0</td></tr>
+<tr><td>OS compatibility library</td><td align=right>4980</td><td align=right>52</td><td align=right>0</td></tr>
+<tr><td>Support utilities</td><td align=right>6165</td><td align=right>0</td><td align=right>0</td></tr>
+<tr><td colspan=4></td></tr>
+<tr><th>All modules for Btree access method only</th><td align=right>77744</td><td align=right>52</td><td align=right>0</td></tr>
+<tr><th>All modules for Recno access method only</th><td align=right>84955</td><td align=right>52</td><td align=right>0</td></tr>
+<tr><th>All modules for Hash access method only</th><td align=right>72674</td><td align=right>52</td><td align=right>0</td></tr>
+<tr><td colspan=4></td></tr>
+<tr><th align=left>All Access Methods</th><td align=right>108697</td><td align=right>52</td><td align=right>0</td></tr>
+<tr><td colspan=4><br></td></tr>
+<tr><td>Locking</td><td align=right>12533</td><td align=right>0</td><td align=right>0</td></tr>
+<tr><td colspan=4></td></tr>
+<tr><td>Recovery</td><td align=right>26948</td><td align=right>8</td><td align=right>4</td></tr>
+<tr><td>Logging</td><td align=right>37367</td><td align=right>0</td><td align=right>0</td></tr>
+<tr><td colspan=4></td></tr>
+<tr><th align=left>Full Package</th><td align=right>185545</td><td align=right>60</td><td align=right>4</td></tr>
+<tr><br></tr>
+</table>
+
+<h2>6. Related Work</h2>
+
+Every three to five years, leading researchers in the database
+community convene to identify future directions in database
+research.
+They produce a report of this meeting, named for the year and
+location of the meeting.
+The most recent of these reports, the 1998 Asilomar report,
+identifies the embedded database market as one of the
+high growth areas in database research <a href="#Bern98">[1]</a>.
+Not surprisingly, market analysts identify the embedded database
+market as a high-growth area in the commercial sector as well <a href="#Host98">
+[5]</a>.
+<p>
+The Asilomar report identifies a new class of database applications, which they
+term "gizmo" databases, small databases embedded in tiny mobile
+appliances, e.g., smart-cards, telephones, personal digital assistants.
+Such databases must be self-managing, secure and reliable.
+Thus, the idea is that gizmo databases require plug and play data
+management with no database administrator (DBA), no human settable
+parameters, and the ability to adapt to changing conditions.
+More specifically, the Asilomar authors claim that the goal is
+self-tuning, including defining the physical DB design, the
+logical DB design, and automatic reports and utilities <a href="#Bern98">[1]</a>
+To date,
+few researchers have accepted this challenge, and there is a dearth
+of research literature on the subject.
+<p>
+Our approach to embedded database administration is fundamentally
+different than that described by the Asilomar authors.
+We adopt their terminology, but view the challenge in supporting
+gizmo databases to be that of self-sustenance <em>after</em> initial
+deployment. Therefore, we find it, not only acceptable, but
+desirable to assume that application developers control initial
+database design and configuration. To the best of our knowledge,
+none of the published work in this area addresses this approach.
+<p>
+As the research community has not provided guidance in this
+arena, most work in embedded database administration has fallen
+to the commercial vendors.
+These vendors fall into two camps, companies selling databases
+specifically designed for embedding or programmatic access
+and the major database vendors (e.g., Oracle, Informix, Sybase).
+<p>
+The embedded vendors all acknowledge the need for automatic
+administration, but fail to identify precisely how their
+products actually accomplish this.
+A notable exception is Interbase whose white paper
+comparison with Sybase and Microsoft's SQL servers
+explicitly address features of maintenance ease.
+Interbase claims that as they use no log files, there is
+no need for log reclamation, checkpoint tuning, or other
+tasks associated with log management. However, Interbase
+uses Transaction Information Pages, and it is unclear
+how these are reused or reclaimed <a href="#Interbase">[6]</a>.
+Additionally, with a log-free system, they must use
+a FORCE policy (write all pages to disk at commit),
+as defined by Haerder and Reuter <a href="#Haerder">[4]</a>. This has
+serious performance consequences for disk-based systems.
+The approach described in this paper does use logs and
+therefore requires log reclamation,
+but provides hooks so the application may reclaim logs
+safely and programmatically.
+While Berkeley DB does require checkpoints, the goal of
+tuning the checkpoint interval is to bound recovery time.
+Since the checkpoint interval in Berkeley DB can be expressed
+by the amount of log data written, it requires no tuning.
+The application designer sets a target recovery time, and
+selects the amount of log data that can be read in that interval
+and specifies the checkpoint interval appropriately. Even as
+load changes, the time to recover does not.
+<p>
+The backup approaches taken by Interbase and Berkeley DB
+are similar in that they both allow online backup, but
+rather different in their affect on transactions running
+during backup. As Interbase performs backups as transactions
+<a href="#Interbase">[6]</a>, concurrent queries can suffer potentially long
+delays. Berkeley DB uses native operating system system utilities
+and recovery for backups, so there is no interference with
+concurrent activity, other than potential contention on disk
+arms.
+<p>
+There are a number of database vendors selling in
+the embedded market (e.g., Raima,
+Centura, Pervasive, Faircom), but none highlight
+the special requirements of embedded database
+applications.
+On the other end of the spectrum, the major vendors,
+Oracle, Sybase, Microsoft, are all becoming convinced
+of the importance of the embedded market.
+As mentioned earlier, Oracle has announced its
+Oracle Lite server for embedded use.
+Sybase has announced its UltraLite platform for "application-optimized,
+high-performance, SQL database engine for professional
+application developers building solutions for mobile and embedded platforms."
+<a href="#Sybase">[8]</a>.
+We believe that SQL is incompatible with the
+gizmo database environment or truly embedded systems for which Berkeley
+DB is most suitable.
+Microsoft research is taking a different approach, developing
+technology to assist in automating initial database design and
+index specification <a href="#Chaud98">[2]</a><a href="#Chaud982">[3]</a>.
+As mentioned earlier, we believe that such configuration is, not only
+acceptable in the embedded market, but desirable so that applications
+can tune their database management for the target environment.
+<h2>7. Conclusions</h2>
+The coming wave of embedded systems poses a new set of challenges
+for data management.
+The traditional server-based, big footprint systems designed for
+high performance on big iron are not the right approach in this
+environment.
+Instead, application developers need small, fast, versatile systems
+that can be tailored to a specific environment.
+In this paper, we have identified several of the key issues in
+providing these systems and shown how Berkeley DB provides
+many of the characteristics necessary for such applications.
+
+<h2>8. References</h2>
+<p>
+[1] <a name="Bern98"> Bernstein, P., Brodie, M., Ceri, S., DeWitt, D., Franklin, M.,
+Garcia-Molina, H., Gray, J., Held, J., Hellerstein, J.,
+Jagadish, H., Lesk, M., Maier, D., Naughton, J.,
+Pirahesh, H., Stonebraker, M., Ullman, J.,
+"The Asilomar Report on Database Research,"
+SIGMOD Record 27(4): 74-80, 1998.
+</a>
+<p>
+[2] <a name="Chaud98"> Chaudhuri, S., Narasayya, V.,
+"AutoAdmin 'What-If' Index Analysis Utility,"
+<em>Proceedings of the ACM SIGMOD Conference</em>, Seattle, 1998.
+</a>
+<p>
+[3] <a name="Chaud982"> Chaudhuri, S., Narasayya, V.,
+"An Efficient, Cost-Driver Index Selection Tool for Microsoft SQL Server,"
+<em>Proceedings of the 23rd VLDB Conference</em>, Athens, Greece, 1997.
+</a>
+<p>
+[4] <a name="Harder"> Haerder, T., Reuter, A.,
+"Principles of Transaction-Oriented Database Recovery,"
+<em>Computing Surveys 15</em>,4 (1983), 237-318.
+</a>
+<p>
+[5] <a name="Host98"> Hostetler, M., "Cover Is Off A New Type of Database,"
+Embedded DB News,
+http://www.theadvisors.com/embeddeddbnews.htm,
+5/6/98.
+</a>
+<p>
+[6] <a name="Interbase"> Interbase, "A Comparison of Borland InterBase 4.0
+Sybase SQL Server and Microsoft SQL Server,"
+http://web.interbase.com/products/doc_info_f.html.
+</a>
+<p>
+[7] <a name="Oracle"> Oracle, "Oracle Delivers New Server, Application Suite
+to Power the Web for Mission-Critical Business,"
+http://www.oracle.com.sg/partners/news/newserver.htm,
+May 1998.
+</a>
+<p>
+[8] <a name="Sybase"> Sybase, Sybase UltraLite, http://www.sybase.com/products/ultralite/beta.
+</a>
+<p>
+[9] <a name="TPCC"> Transaction Processing Council, "TPC-C Benchmark Specification,
+Version 3.4," San Jose, CA, August 1998.
+</a>
+<p>
+[10] <a name="TPCD"> Transaction Processing Council, "TPC-D Benchmark Specification,
+Version 2.1," San Jose, CA, April 1999.
+</a>
+</body>
+</html>
+
+
diff --git a/libdb/docs/ref/refs/hash_usenix.ps b/libdb/docs/ref/refs/hash_usenix.ps
new file mode 100644
index 0000000..acdea09
--- /dev/null
+++ b/libdb/docs/ref/refs/hash_usenix.ps
@@ -0,0 +1,12209 @@
+%!PS-Adobe-1.0
+%%Creator: utopia:margo (& Seltzer,608-13E,8072,)
+%%Title: stdin (ditroff)
+%%CreationDate: Tue Dec 11 15:06:45 1990
+%%EndComments
+% @(#)psdit.pro 1.3 4/15/88
+% lib/psdit.pro -- prolog for psdit (ditroff) files
+% Copyright (c) 1984, 1985 Adobe Systems Incorporated. All Rights Reserved.
+% last edit: shore Sat Nov 23 20:28:03 1985
+% RCSID: $Header$
+
+% Changed by Edward Wang (edward@ucbarpa.berkeley.edu) to handle graphics,
+% 17 Feb, 87.
+
+/$DITroff 140 dict def $DITroff begin
+/fontnum 1 def /fontsize 10 def /fontheight 10 def /fontslant 0 def
+/xi{0 72 11 mul translate 72 resolution div dup neg scale 0 0 moveto
+ /fontnum 1 def /fontsize 10 def /fontheight 10 def /fontslant 0 def F
+ /pagesave save def}def
+/PB{save /psv exch def currentpoint translate
+ resolution 72 div dup neg scale 0 0 moveto}def
+/PE{psv restore}def
+/arctoobig 90 def /arctoosmall .05 def
+/m1 matrix def /m2 matrix def /m3 matrix def /oldmat matrix def
+/tan{dup sin exch cos div}def
+/point{resolution 72 div mul}def
+/dround {transform round exch round exch itransform}def
+/xT{/devname exch def}def
+/xr{/mh exch def /my exch def /resolution exch def}def
+/xp{}def
+/xs{docsave restore end}def
+/xt{}def
+/xf{/fontname exch def /slotno exch def fontnames slotno get fontname eq not
+ {fonts slotno fontname findfont put fontnames slotno fontname put}if}def
+/xH{/fontheight exch def F}def
+/xS{/fontslant exch def F}def
+/s{/fontsize exch def /fontheight fontsize def F}def
+/f{/fontnum exch def F}def
+/F{fontheight 0 le{/fontheight fontsize def}if
+ fonts fontnum get fontsize point 0 0 fontheight point neg 0 0 m1 astore
+ fontslant 0 ne{1 0 fontslant tan 1 0 0 m2 astore m3 concatmatrix}if
+ makefont setfont .04 fontsize point mul 0 dround pop setlinewidth}def
+/X{exch currentpoint exch pop moveto show}def
+/N{3 1 roll moveto show}def
+/Y{exch currentpoint pop exch moveto show}def
+/S{show}def
+/ditpush{}def/ditpop{}def
+/AX{3 -1 roll currentpoint exch pop moveto 0 exch ashow}def
+/AN{4 2 roll moveto 0 exch ashow}def
+/AY{3 -1 roll currentpoint pop exch moveto 0 exch ashow}def
+/AS{0 exch ashow}def
+/MX{currentpoint exch pop moveto}def
+/MY{currentpoint pop exch moveto}def
+/MXY{moveto}def
+/cb{pop}def % action on unknown char -- nothing for now
+/n{}def/w{}def
+/p{pop showpage pagesave restore /pagesave save def}def
+/Dt{/Dlinewidth exch def}def 1 Dt
+/Ds{/Ddash exch def}def -1 Ds
+/Di{/Dstipple exch def}def 1 Di
+/Dsetlinewidth{2 Dlinewidth mul setlinewidth}def
+/Dsetdash{Ddash 4 eq{[8 12]}{Ddash 16 eq{[32 36]}
+ {Ddash 20 eq{[32 12 8 12]}{[]}ifelse}ifelse}ifelse 0 setdash}def
+/Dstroke{gsave Dsetlinewidth Dsetdash 1 setlinecap stroke grestore
+ currentpoint newpath moveto}def
+/Dl{rlineto Dstroke}def
+/arcellipse{/diamv exch def /diamh exch def oldmat currentmatrix pop
+ currentpoint translate 1 diamv diamh div scale /rad diamh 2 div def
+ currentpoint exch rad add exch rad -180 180 arc oldmat setmatrix}def
+/Dc{dup arcellipse Dstroke}def
+/De{arcellipse Dstroke}def
+/Da{/endv exch def /endh exch def /centerv exch def /centerh exch def
+ /cradius centerv centerv mul centerh centerh mul add sqrt def
+ /eradius endv endv mul endh endh mul add sqrt def
+ /endang endv endh atan def
+ /startang centerv neg centerh neg atan def
+ /sweep startang endang sub dup 0 lt{360 add}if def
+ sweep arctoobig gt
+ {/midang startang sweep 2 div sub def /midrad cradius eradius add 2 div def
+ /midh midang cos midrad mul def /midv midang sin midrad mul def
+ midh neg midv neg endh endv centerh centerv midh midv Da
+ Da}
+ {sweep arctoosmall ge
+ {/controldelt 1 sweep 2 div cos sub 3 sweep 2 div sin mul div 4 mul def
+ centerv neg controldelt mul centerh controldelt mul
+ endv neg controldelt mul centerh add endh add
+ endh controldelt mul centerv add endv add
+ centerh endh add centerv endv add rcurveto Dstroke}
+ {centerh endh add centerv endv add rlineto Dstroke}
+ ifelse}
+ ifelse}def
+/Dpatterns[
+[%cf[widthbits]
+[8<0000000000000010>]
+[8<0411040040114000>]
+[8<0204081020408001>]
+[8<0000103810000000>]
+[8<6699996666999966>]
+[8<0000800100001008>]
+[8<81c36666c3810000>]
+[8<0f0e0c0800000000>]
+[8<0000000000000010>]
+[8<0411040040114000>]
+[8<0204081020408001>]
+[8<0000001038100000>]
+[8<6699996666999966>]
+[8<0000800100001008>]
+[8<81c36666c3810000>]
+[8<0f0e0c0800000000>]
+[8<0042660000246600>]
+[8<0000990000990000>]
+[8<0804020180402010>]
+[8<2418814242811824>]
+[8<6699996666999966>]
+[8<8000000008000000>]
+[8<00001c3e363e1c00>]
+[8<0000000000000000>]
+[32<00000040000000c00000004000000040000000e0000000000000000000000000>]
+[32<00000000000060000000900000002000000040000000f0000000000000000000>]
+[32<000000000000000000e0000000100000006000000010000000e0000000000000>]
+[32<00000000000000002000000060000000a0000000f00000002000000000000000>]
+[32<0000000e0000000000000000000000000000000f000000080000000e00000001>]
+[32<0000090000000600000000000000000000000000000007000000080000000e00>]
+[32<00010000000200000004000000040000000000000000000000000000000f0000>]
+[32<0900000006000000090000000600000000000000000000000000000006000000>]]
+[%ug
+[8<0000020000000000>]
+[8<0000020000002000>]
+[8<0004020000002000>]
+[8<0004020000402000>]
+[8<0004060000402000>]
+[8<0004060000406000>]
+[8<0006060000406000>]
+[8<0006060000606000>]
+[8<00060e0000606000>]
+[8<00060e000060e000>]
+[8<00070e000060e000>]
+[8<00070e000070e000>]
+[8<00070e020070e000>]
+[8<00070e020070e020>]
+[8<04070e020070e020>]
+[8<04070e024070e020>]
+[8<04070e064070e020>]
+[8<04070e064070e060>]
+[8<06070e064070e060>]
+[8<06070e066070e060>]
+[8<06070f066070e060>]
+[8<06070f066070f060>]
+[8<060f0f066070f060>]
+[8<060f0f0660f0f060>]
+[8<060f0f0760f0f060>]
+[8<060f0f0760f0f070>]
+[8<0e0f0f0760f0f070>]
+[8<0e0f0f07e0f0f070>]
+[8<0e0f0f0fe0f0f070>]
+[8<0e0f0f0fe0f0f0f0>]
+[8<0f0f0f0fe0f0f0f0>]
+[8<0f0f0f0ff0f0f0f0>]
+[8<1f0f0f0ff0f0f0f0>]
+[8<1f0f0f0ff1f0f0f0>]
+[8<1f0f0f8ff1f0f0f0>]
+[8<1f0f0f8ff1f0f0f8>]
+[8<9f0f0f8ff1f0f0f8>]
+[8<9f0f0f8ff9f0f0f8>]
+[8<9f0f0f9ff9f0f0f8>]
+[8<9f0f0f9ff9f0f0f9>]
+[8<9f8f0f9ff9f0f0f9>]
+[8<9f8f0f9ff9f8f0f9>]
+[8<9f8f1f9ff9f8f0f9>]
+[8<9f8f1f9ff9f8f1f9>]
+[8<bf8f1f9ff9f8f1f9>]
+[8<bf8f1f9ffbf8f1f9>]
+[8<bf8f1fdffbf8f1f9>]
+[8<bf8f1fdffbf8f1fd>]
+[8<ff8f1fdffbf8f1fd>]
+[8<ff8f1fdffff8f1fd>]
+[8<ff8f1ffffff8f1fd>]
+[8<ff8f1ffffff8f1ff>]
+[8<ff9f1ffffff8f1ff>]
+[8<ff9f1ffffff9f1ff>]
+[8<ff9f9ffffff9f1ff>]
+[8<ff9f9ffffff9f9ff>]
+[8<ffbf9ffffff9f9ff>]
+[8<ffbf9ffffffbf9ff>]
+[8<ffbfdffffffbf9ff>]
+[8<ffbfdffffffbfdff>]
+[8<ffffdffffffbfdff>]
+[8<ffffdffffffffdff>]
+[8<fffffffffffffdff>]
+[8<ffffffffffffffff>]]
+[%mg
+[8<8000000000000000>]
+[8<0822080080228000>]
+[8<0204081020408001>]
+[8<40e0400000000000>]
+[8<66999966>]
+[8<8001000010080000>]
+[8<81c36666c3810000>]
+[8<f0e0c08000000000>]
+[16<07c00f801f003e007c00f800f001e003c007800f001f003e007c00f801f003e0>]
+[16<1f000f8007c003e001f000f8007c003e001f800fc007e003f001f8007c003e00>]
+[8<c3c300000000c3c3>]
+[16<0040008001000200040008001000200040008000000100020004000800100020>]
+[16<0040002000100008000400020001800040002000100008000400020001000080>]
+[16<1fc03fe07df0f8f8f07de03fc01f800fc01fe03ff07df8f87df03fe01fc00f80>]
+[8<80>]
+[8<8040201000000000>]
+[8<84cc000048cc0000>]
+[8<9900009900000000>]
+[8<08040201804020100800020180002010>]
+[8<2418814242811824>]
+[8<66999966>]
+[8<8000000008000000>]
+[8<70f8d8f870000000>]
+[8<0814224180402010>]
+[8<aa00440a11a04400>]
+[8<018245aa45820100>]
+[8<221c224180808041>]
+[8<88000000>]
+[8<0855800080550800>]
+[8<2844004482440044>]
+[8<0810204080412214>]
+[8<00>]]]def
+/Dfill{
+ transform /maxy exch def /maxx exch def
+ transform /miny exch def /minx exch def
+ minx maxx gt{/minx maxx /maxx minx def def}if
+ miny maxy gt{/miny maxy /maxy miny def def}if
+ Dpatterns Dstipple 1 sub get exch 1 sub get
+ aload pop /stip exch def /stipw exch def /stiph 128 def
+ /imatrix[stipw 0 0 stiph 0 0]def
+ /tmatrix[stipw 0 0 stiph 0 0]def
+ /minx minx cvi stiph idiv stiph mul def
+ /miny miny cvi stipw idiv stipw mul def
+ gsave eoclip 0 setgray
+ miny stiph maxy{
+ tmatrix exch 5 exch put
+ minx stipw maxx{
+ tmatrix exch 4 exch put tmatrix setmatrix
+ stipw stiph true imatrix {stip} imagemask
+ }for
+ }for
+ grestore
+}def
+/Dp{Dfill Dstroke}def
+/DP{Dfill currentpoint newpath moveto}def
+end
+
+/ditstart{$DITroff begin
+ /nfonts 60 def % NFONTS makedev/ditroff dependent!
+ /fonts[nfonts{0}repeat]def
+ /fontnames[nfonts{()}repeat]def
+/docsave save def
+}def
+
+% character outcalls
+/oc{
+ /pswid exch def /cc exch def /name exch def
+ /ditwid pswid fontsize mul resolution mul 72000 div def
+ /ditsiz fontsize resolution mul 72 div def
+ ocprocs name known{ocprocs name get exec}{name cb}ifelse
+}def
+/fractm [.65 0 0 .6 0 0] def
+/fraction{
+ /fden exch def /fnum exch def gsave /cf currentfont def
+ cf fractm makefont setfont 0 .3 dm 2 copy neg rmoveto
+ fnum show rmoveto currentfont cf setfont(\244)show setfont fden show
+ grestore ditwid 0 rmoveto
+}def
+/oce{grestore ditwid 0 rmoveto}def
+/dm{ditsiz mul}def
+/ocprocs 50 dict def ocprocs begin
+(14){(1)(4)fraction}def
+(12){(1)(2)fraction}def
+(34){(3)(4)fraction}def
+(13){(1)(3)fraction}def
+(23){(2)(3)fraction}def
+(18){(1)(8)fraction}def
+(38){(3)(8)fraction}def
+(58){(5)(8)fraction}def
+(78){(7)(8)fraction}def
+(sr){gsave 0 .06 dm rmoveto(\326)show oce}def
+(is){gsave 0 .15 dm rmoveto(\362)show oce}def
+(->){gsave 0 .02 dm rmoveto(\256)show oce}def
+(<-){gsave 0 .02 dm rmoveto(\254)show oce}def
+(==){gsave 0 .05 dm rmoveto(\272)show oce}def
+(uc){gsave currentpoint 400 .009 dm mul add translate
+ 8 -8 scale ucseal oce}def
+end
+
+% an attempt at a PostScript FONT to implement ditroff special chars
+% this will enable us to
+% cache the little buggers
+% generate faster, more compact PS out of psdit
+% confuse everyone (including myself)!
+50 dict dup begin
+/FontType 3 def
+/FontName /DIThacks def
+/FontMatrix [.001 0 0 .001 0 0] def
+/FontBBox [-260 -260 900 900] def% a lie but ...
+/Encoding 256 array def
+0 1 255{Encoding exch /.notdef put}for
+Encoding
+ dup 8#040/space put %space
+ dup 8#110/rc put %right ceil
+ dup 8#111/lt put %left top curl
+ dup 8#112/bv put %bold vert
+ dup 8#113/lk put %left mid curl
+ dup 8#114/lb put %left bot curl
+ dup 8#115/rt put %right top curl
+ dup 8#116/rk put %right mid curl
+ dup 8#117/rb put %right bot curl
+ dup 8#120/rf put %right floor
+ dup 8#121/lf put %left floor
+ dup 8#122/lc put %left ceil
+ dup 8#140/sq put %square
+ dup 8#141/bx put %box
+ dup 8#142/ci put %circle
+ dup 8#143/br put %box rule
+ dup 8#144/rn put %root extender
+ dup 8#145/vr put %vertical rule
+ dup 8#146/ob put %outline bullet
+ dup 8#147/bu put %bullet
+ dup 8#150/ru put %rule
+ dup 8#151/ul put %underline
+ pop
+/DITfd 100 dict def
+/BuildChar{0 begin
+ /cc exch def /fd exch def
+ /charname fd /Encoding get cc get def
+ /charwid fd /Metrics get charname get def
+ /charproc fd /CharProcs get charname get def
+ charwid 0 fd /FontBBox get aload pop setcachedevice
+ 2 setlinejoin 40 setlinewidth
+ newpath 0 0 moveto gsave charproc grestore
+ end}def
+/BuildChar load 0 DITfd put
+/CharProcs 50 dict def
+CharProcs begin
+/space{}def
+/.notdef{}def
+/ru{500 0 rls}def
+/rn{0 840 moveto 500 0 rls}def
+/vr{0 800 moveto 0 -770 rls}def
+/bv{0 800 moveto 0 -1000 rls}def
+/br{0 840 moveto 0 -1000 rls}def
+/ul{0 -140 moveto 500 0 rls}def
+/ob{200 250 rmoveto currentpoint newpath 200 0 360 arc closepath stroke}def
+/bu{200 250 rmoveto currentpoint newpath 200 0 360 arc closepath fill}def
+/sq{80 0 rmoveto currentpoint dround newpath moveto
+ 640 0 rlineto 0 640 rlineto -640 0 rlineto closepath stroke}def
+/bx{80 0 rmoveto currentpoint dround newpath moveto
+ 640 0 rlineto 0 640 rlineto -640 0 rlineto closepath fill}def
+/ci{500 360 rmoveto currentpoint newpath 333 0 360 arc
+ 50 setlinewidth stroke}def
+
+/lt{0 -200 moveto 0 550 rlineto currx 800 2cx s4 add exch s4 a4p stroke}def
+/lb{0 800 moveto 0 -550 rlineto currx -200 2cx s4 add exch s4 a4p stroke}def
+/rt{0 -200 moveto 0 550 rlineto currx 800 2cx s4 sub exch s4 a4p stroke}def
+/rb{0 800 moveto 0 -500 rlineto currx -200 2cx s4 sub exch s4 a4p stroke}def
+/lk{0 800 moveto 0 300 -300 300 s4 arcto pop pop 1000 sub
+ 0 300 4 2 roll s4 a4p 0 -200 lineto stroke}def
+/rk{0 800 moveto 0 300 s2 300 s4 arcto pop pop 1000 sub
+ 0 300 4 2 roll s4 a4p 0 -200 lineto stroke}def
+/lf{0 800 moveto 0 -1000 rlineto s4 0 rls}def
+/rf{0 800 moveto 0 -1000 rlineto s4 neg 0 rls}def
+/lc{0 -200 moveto 0 1000 rlineto s4 0 rls}def
+/rc{0 -200 moveto 0 1000 rlineto s4 neg 0 rls}def
+end
+
+/Metrics 50 dict def Metrics begin
+/.notdef 0 def
+/space 500 def
+/ru 500 def
+/br 0 def
+/lt 416 def
+/lb 416 def
+/rt 416 def
+/rb 416 def
+/lk 416 def
+/rk 416 def
+/rc 416 def
+/lc 416 def
+/rf 416 def
+/lf 416 def
+/bv 416 def
+/ob 350 def
+/bu 350 def
+/ci 750 def
+/bx 750 def
+/sq 750 def
+/rn 500 def
+/ul 500 def
+/vr 0 def
+end
+
+DITfd begin
+/s2 500 def /s4 250 def /s3 333 def
+/a4p{arcto pop pop pop pop}def
+/2cx{2 copy exch}def
+/rls{rlineto stroke}def
+/currx{currentpoint pop}def
+/dround{transform round exch round exch itransform} def
+end
+end
+/DIThacks exch definefont pop
+ditstart
+(psc)xT
+576 1 1 xr
+1(Times-Roman)xf 1 f
+2(Times-Italic)xf 2 f
+3(Times-Bold)xf 3 f
+4(Times-BoldItalic)xf 4 f
+5(Helvetica)xf 5 f
+6(Helvetica-Bold)xf 6 f
+7(Courier)xf 7 f
+8(Courier-Bold)xf 8 f
+9(Symbol)xf 9 f
+10(DIThacks)xf 10 f
+10 s
+1 f
+xi
+%%EndProlog
+
+%%Page: 1 1
+10 s 10 xH 0 xS 1 f
+3 f
+22 s
+1249 626(A)N
+1420(N)X
+1547(ew)X
+1796(H)X
+1933(ashing)X
+2467(P)X
+2574(ackage)X
+3136(for)X
+3405(U)X
+3532(N)X
+3659(IX)X
+2 f
+20 s
+3855 562(1)N
+1 f
+12 s
+1607 779(Margo)N
+1887(Seltzer)X
+9 f
+2179(-)X
+1 f
+2256(University)X
+2686(of)X
+2790(California,)X
+3229(Berkeley)X
+2015 875(Ozan)N
+2242(Yigit)X
+9 f
+2464(-)X
+1 f
+2541(York)X
+2762(University)X
+3 f
+2331 1086(ABSTRACT)N
+1 f
+10 s
+1152 1222(UNIX)N
+1385(support)X
+1657(of)X
+1756(disk)X
+1921(oriented)X
+2216(hashing)X
+2497(was)X
+2654(originally)X
+2997(provided)X
+3314(by)X
+2 f
+3426(dbm)X
+1 f
+3595([ATT79])X
+3916(and)X
+1152 1310(subsequently)N
+1595(improved)X
+1927(upon)X
+2112(in)X
+2 f
+2199(ndbm)X
+1 f
+2402([BSD86].)X
+2735(In)X
+2826(AT&T)X
+3068(System)X
+3327(V,)X
+3429(in-memory)X
+3809(hashed)X
+1152 1398(storage)N
+1420(and)X
+1572(access)X
+1814(support)X
+2090(was)X
+2251(added)X
+2479(in)X
+2577(the)X
+2 f
+2711(hsearch)X
+1 f
+3000(library)X
+3249(routines)X
+3542([ATT85].)X
+3907(The)X
+1152 1486(result)N
+1367(is)X
+1457(a)X
+1530(system)X
+1789(with)X
+1968(two)X
+2125(incompatible)X
+2580(hashing)X
+2865(schemes,)X
+3193(each)X
+3377(with)X
+3555(its)X
+3666(own)X
+3840(set)X
+3965(of)X
+1152 1574(shortcomings.)N
+1152 1688(This)N
+1316(paper)X
+1517(presents)X
+1802(the)X
+1922(design)X
+2152(and)X
+2289(performance)X
+2717(characteristics)X
+3198(of)X
+3286(a)X
+3343(new)X
+3498(hashing)X
+3768(package)X
+1152 1776(providing)N
+1483(a)X
+1539(superset)X
+1822(of)X
+1909(the)X
+2027(functionality)X
+2456(provided)X
+2761(by)X
+2 f
+2861(dbm)X
+1 f
+3019(and)X
+2 f
+3155(hsearch)X
+1 f
+3409(.)X
+3469(The)X
+3614(new)X
+3768(package)X
+1152 1864(uses)N
+1322(linear)X
+1537(hashing)X
+1818(to)X
+1912(provide)X
+2189(ef\256cient)X
+2484(support)X
+2755(of)X
+2853(both)X
+3026(memory)X
+3324(based)X
+3538(and)X
+3685(disk)X
+3849(based)X
+1152 1952(hash)N
+1319(tables)X
+1526(with)X
+1688(performance)X
+2115(superior)X
+2398(to)X
+2480(both)X
+2 f
+2642(dbm)X
+1 f
+2800(and)X
+2 f
+2936(hsearch)X
+1 f
+3210(under)X
+3413(most)X
+3588(conditions.)X
+3 f
+1380 2128(Introduction)N
+1 f
+892 2260(Current)N
+1196(UNIX)X
+1456(systems)X
+1768(offer)X
+1984(two)X
+2163(forms)X
+2409(of)X
+720 2348(hashed)N
+973(data)X
+1137(access.)X
+2 f
+1413(Dbm)X
+1 f
+1599(and)X
+1745(its)X
+1850(derivatives)X
+2231(provide)X
+720 2436(keyed)N
+939(access)X
+1171(to)X
+1259(disk)X
+1418(resident)X
+1698(data)X
+1858(while)X
+2 f
+2062(hsearch)X
+1 f
+2342(pro-)X
+720 2524(vides)N
+929(access)X
+1175(for)X
+1309(memory)X
+1616(resident)X
+1910(data.)X
+2124(These)X
+2356(two)X
+720 2612(access)N
+979(methods)X
+1302(are)X
+1453(incompatible)X
+1923(in)X
+2037(that)X
+2209(memory)X
+720 2700(resident)N
+1011(hash)X
+1195(tables)X
+1419(may)X
+1593(not)X
+1731(be)X
+1843(stored)X
+2075(on)X
+2191(disk)X
+2360(and)X
+720 2788(disk)N
+884(resident)X
+1169(tables)X
+1387(cannot)X
+1632(be)X
+1739(read)X
+1909(into)X
+2063(memory)X
+2360(and)X
+720 2876(accessed)N
+1022(using)X
+1215(the)X
+1333(in-memory)X
+1709(routines.)X
+2 f
+892 2990(Dbm)N
+1 f
+1091(has)X
+1241(several)X
+1512(shortcomings.)X
+2026(Since)X
+2247(data)X
+2423(is)X
+720 3078(assumed)N
+1032(to)X
+1130(be)X
+1242(disk)X
+1411(resident,)X
+1721(each)X
+1905(access)X
+2146(requires)X
+2440(a)X
+720 3166(system)N
+963(call,)X
+1120(and)X
+1257(almost)X
+1491(certainly,)X
+1813(a)X
+1869(disk)X
+2022(operation.)X
+2365(For)X
+720 3254(extremely)N
+1072(large)X
+1264(databases,)X
+1623(where)X
+1851(caching)X
+2131(is)X
+2214(unlikely)X
+720 3342(to)N
+810(be)X
+914(effective,)X
+1244(this)X
+1386(is)X
+1466(acceptable,)X
+1853(however,)X
+2177(when)X
+2378(the)X
+720 3430(database)N
+1022(is)X
+1100(small)X
+1298(\(i.e.)X
+1447(the)X
+1569(password)X
+1896(\256le\),)X
+2069(performance)X
+720 3518(improvements)N
+1204(can)X
+1342(be)X
+1443(obtained)X
+1744(through)X
+2018(caching)X
+2293(pages)X
+720 3606(of)N
+818(the)X
+947(database)X
+1255(in)X
+1348(memory.)X
+1685(In)X
+1782(addition,)X
+2 f
+2094(dbm)X
+1 f
+2262(cannot)X
+720 3694(store)N
+902(data)X
+1062(items)X
+1261(whose)X
+1492(total)X
+1660(key)X
+1802(and)X
+1943(data)X
+2102(size)X
+2252(exceed)X
+720 3782(the)N
+850(page)X
+1034(size)X
+1191(of)X
+1290(the)X
+1420(hash)X
+1599(table.)X
+1827(Similarly,)X
+2176(if)X
+2257(two)X
+2409(or)X
+720 3870(more)N
+907(keys)X
+1076(produce)X
+1357(the)X
+1477(same)X
+1664(hash)X
+1833(value)X
+2029(and)X
+2166(their)X
+2334(total)X
+720 3958(size)N
+876(exceeds)X
+1162(the)X
+1291(page)X
+1474(size,)X
+1650(the)X
+1779(table)X
+1966(cannot)X
+2210(store)X
+2396(all)X
+720 4046(the)N
+838(colliding)X
+1142(keys.)X
+892 4160(The)N
+1050(in-memory)X
+2 f
+1439(hsearch)X
+1 f
+1725(routines)X
+2015(have)X
+2199(different)X
+720 4248(shortcomings.)N
+1219(First,)X
+1413(the)X
+1539(notion)X
+1771(of)X
+1865(a)X
+1928(single)X
+2146(hash)X
+2320(table)X
+720 4336(is)N
+807(embedded)X
+1171(in)X
+1266(the)X
+1397(interface,)X
+1732(preventing)X
+2108(an)X
+2217(applica-)X
+720 4424(tion)N
+902(from)X
+1116(accessing)X
+1482(multiple)X
+1806(tables)X
+2050(concurrently.)X
+720 4512(Secondly,)N
+1063(the)X
+1186(routine)X
+1438(to)X
+1525(create)X
+1743(a)X
+1804(hash)X
+1976(table)X
+2157(requires)X
+2440(a)X
+720 4600(parameter)N
+1066(which)X
+1286(declares)X
+1573(the)X
+1694(size)X
+1842(of)X
+1932(the)X
+2053(hash)X
+2223(table.)X
+2422(If)X
+720 4688(this)N
+856(size)X
+1001(is)X
+1074(set)X
+1183(too)X
+1305(low,)X
+1465(performance)X
+1892(degradation)X
+2291(or)X
+2378(the)X
+720 4776(inability)N
+1008(to)X
+1092(add)X
+1230(items)X
+1425(to)X
+1509(the)X
+1628(table)X
+1805(may)X
+1964(result.)X
+2223(In)X
+2311(addi-)X
+720 4864(tion,)N
+2 f
+910(hsearch)X
+1 f
+1210(requires)X
+1515(that)X
+1681(the)X
+1825(application)X
+2226(allocate)X
+720 4952(memory)N
+1037(for)X
+1181(the)X
+1329(key)X
+1495(and)X
+1661(data)X
+1845(items.)X
+2108(Lastly,)X
+2378(the)X
+2 f
+720 5040(hsearch)N
+1 f
+1013(routines)X
+1310(provide)X
+1594(no)X
+1713(interface)X
+2034(to)X
+2135(store)X
+2329(hash)X
+720 5128(tables)N
+927(on)X
+1027(disk.)X
+16 s
+720 5593 MXY
+864 0 Dl
+2 f
+8 s
+760 5648(1)N
+1 f
+9 s
+5673(UNIX)Y
+990(is)X
+1056(a)X
+1106(registered)X
+1408(trademark)X
+1718(of)X
+1796(AT&T.)X
+10 s
+2878 2128(The)N
+3032(goal)X
+3199(of)X
+3295(our)X
+3431(work)X
+3625(was)X
+3779(to)X
+3870(design)X
+4108(and)X
+4253(imple-)X
+2706 2216(ment)N
+2900(a)X
+2970(new)X
+3138(package)X
+3436(that)X
+3590(provides)X
+3899(a)X
+3968(superset)X
+4264(of)X
+4364(the)X
+2706 2304(functionality)N
+3144(of)X
+3240(both)X
+2 f
+3411(dbm)X
+1 f
+3578(and)X
+2 f
+3723(hsearch)X
+1 f
+3977(.)X
+4045(The)X
+4198(package)X
+2706 2392(had)N
+2871(to)X
+2982(overcome)X
+3348(the)X
+3495(interface)X
+3826(shortcomings)X
+4306(cited)X
+2706 2480(above)N
+2930(and)X
+3078(its)X
+3185(implementation)X
+3719(had)X
+3867(to)X
+3961(provide)X
+4238(perfor-)X
+2706 2568(mance)N
+2942(equal)X
+3142(or)X
+3235(superior)X
+3524(to)X
+3612(that)X
+3758(of)X
+3851(the)X
+3975(existing)X
+4253(imple-)X
+2706 2656(mentations.)N
+3152(In)X
+3274(order)X
+3498(to)X
+3614(provide)X
+3913(a)X
+4003(compact)X
+4329(disk)X
+2706 2744(representation,)N
+3224(graceful)X
+3531(table)X
+3729(growth,)X
+4018(and)X
+4176(expected)X
+2706 2832(constant)N
+3033(time)X
+3234(performance,)X
+3720(we)X
+3873(selected)X
+4191(Litwin's)X
+2706 2920(linear)N
+2923(hashing)X
+3206(algorithm)X
+3551([LAR88,)X
+3872(LIT80].)X
+4178(We)X
+4324(then)X
+2706 3008(enhanced)N
+3037(the)X
+3161(algorithm)X
+3498(to)X
+3586(handle)X
+3826(page)X
+4004(over\257ows)X
+4346(and)X
+2706 3096(large)N
+2900(key)X
+3049(handling)X
+3362(with)X
+3537(a)X
+3606(single)X
+3830(mechanism,)X
+4248(named)X
+2706 3184(buddy-in-waiting.)N
+3 f
+2975 3338(Existing)N
+3274(UNIX)X
+3499(Hashing)X
+3802(Techniques)X
+1 f
+2878 3470(Over)N
+3076(the)X
+3210(last)X
+3357(decade,)X
+3637(several)X
+3901(dynamic)X
+4213(hashing)X
+2706 3558(schemes)N
+3000(have)X
+3174(been)X
+3348(developed)X
+3700(for)X
+3816(the)X
+3936(UNIX)X
+4159(timeshar-)X
+2706 3646(ing)N
+2856(system,)X
+3146(starting)X
+3433(with)X
+3622(the)X
+3767(inclusion)X
+4107(of)X
+2 f
+4221(dbm)X
+1 f
+4359(,)X
+4426(a)X
+2706 3734(minimal)N
+3008(database)X
+3321(library)X
+3571(written)X
+3834(by)X
+3950(Ken)X
+4120(Thompson)X
+2706 3822([THOM90],)N
+3141(in)X
+3248(the)X
+3391(Seventh)X
+3694(Edition)X
+3974(UNIX)X
+4220(system.)X
+2706 3910(Since)N
+2916(then,)X
+3106(an)X
+3214(extended)X
+3536(version)X
+3804(of)X
+3903(the)X
+4032(same)X
+4228(library,)X
+2 f
+2706 3998(ndbm)N
+1 f
+2884(,)X
+2933(and)X
+3078(a)X
+3142(public-domain)X
+3637(clone)X
+3839(of)X
+3934(the)X
+4060(latter,)X
+2 f
+4273(sdbm)X
+1 f
+4442(,)X
+2706 4086(have)N
+2902(been)X
+3098(developed.)X
+3491(Another)X
+3797 0.1645(interface-compatible)AX
+2706 4174(library)N
+2 f
+2950(gdbm)X
+1 f
+3128(,)X
+3178(was)X
+3333(recently)X
+3622(made)X
+3826(available)X
+4145(as)X
+4241(part)X
+4395(of)X
+2706 4262(the)N
+2829(Free)X
+2997(Software)X
+3312(Foundation's)X
+3759(\(FSF\))X
+3970(software)X
+4271(distri-)X
+2706 4350(bution.)N
+2878 4464(All)N
+3017(of)X
+3121(these)X
+3323(implementations)X
+3893(are)X
+4029(based)X
+4248(on)X
+4364(the)X
+2706 4552(idea)N
+2871(of)X
+2969(revealing)X
+3299(just)X
+3445(enough)X
+3711(bits)X
+3856(of)X
+3953(a)X
+4019(hash)X
+4196(value)X
+4400(to)X
+2706 4640(locate)N
+2920(a)X
+2978(page)X
+3151(in)X
+3234(a)X
+3291(single)X
+3503(access.)X
+3770(While)X
+2 f
+3987(dbm/ndbm)X
+1 f
+4346(and)X
+2 f
+2706 4728(sdbm)N
+1 f
+2908(map)X
+3079(the)X
+3210(hash)X
+3390(value)X
+3597(directly)X
+3874(to)X
+3968(a)X
+4036(disk)X
+4201(address,)X
+2 f
+2706 4816(gdbm)N
+1 f
+2921(uses)X
+3096(the)X
+3231(hash)X
+3414(value)X
+3624(to)X
+3722(index)X
+3936(into)X
+4096(a)X
+2 f
+4168(directory)X
+1 f
+2706 4904([ENB88])N
+3020(containing)X
+3378(disk)X
+3531(addresses.)X
+2878 5018(The)N
+2 f
+3033(hsearch)X
+1 f
+3317(routines)X
+3605(in)X
+3697(System)X
+3962(V)X
+4049(are)X
+4177(designed)X
+2706 5106(to)N
+2804(provide)X
+3085(memory-resident)X
+3669(hash)X
+3852(tables.)X
+4115(Since)X
+4328(data)X
+2706 5194(access)N
+2948(does)X
+3131(not)X
+3269(require)X
+3533(disk)X
+3702(access,)X
+3964(simple)X
+4213(hashing)X
+2706 5282(schemes)N
+3010(which)X
+3238(may)X
+3408(require)X
+3667(multiple)X
+3964(probes)X
+4209(into)X
+4364(the)X
+2706 5370(table)N
+2889(are)X
+3015(used.)X
+3209(A)X
+3294(more)X
+3486(interesting)X
+3851(version)X
+4114(of)X
+2 f
+4208(hsearch)X
+1 f
+2706 5458(is)N
+2784(a)X
+2845(public)X
+3070(domain)X
+3335(library,)X
+2 f
+3594(dynahash)X
+1 f
+3901(,)X
+3945(that)X
+4089(implements)X
+2706 5546(Larson's)N
+3036(in-memory)X
+3440(adaptation)X
+3822([LAR88])X
+4164(of)X
+4279(linear)X
+2706 5634(hashing)N
+2975([LIT80].)X
+3 f
+720 5960(USENIX)N
+9 f
+1042(-)X
+3 f
+1106(Winter)X
+1371('91)X
+9 f
+1498(-)X
+3 f
+1562(Dallas,)X
+1815(TX)X
+1 f
+4424(1)X
+
+2 p
+%%Page: 2 2
+10 s 10 xH 0 xS 1 f
+3 f
+432 258(A)N
+510(New)X
+682(Hashing)X
+985(Package)X
+1290(for)X
+1413(UNIX)X
+3663(Seltzer)X
+3920(&)X
+4007(Yigit)X
+2 f
+1074 538(dbm)N
+1 f
+1232(and)X
+2 f
+1368(ndbm)X
+1 f
+604 670(The)N
+2 f
+760(dbm)X
+1 f
+928(and)X
+2 f
+1074(ndbm)X
+1 f
+1282(library)X
+1526(implementations)X
+2089(are)X
+432 758(based)N
+667(on)X
+799(the)X
+949(same)X
+1166(algorithm)X
+1529(by)X
+1661(Ken)X
+1846(Thompson)X
+432 846([THOM90,)N
+824(TOR88,)X
+1113(WAL84],)X
+1452(but)X
+1582(differ)X
+1789(in)X
+1879(their)X
+2054(pro-)X
+432 934(grammatic)N
+801(interfaces.)X
+1160(The)X
+1311(latter)X
+1502(is)X
+1581(a)X
+1643(modi\256ed)X
+1952(version)X
+432 1022(of)N
+533(the)X
+665(former)X
+918(which)X
+1148(adds)X
+1328(support)X
+1601(for)X
+1728(multiple)X
+2027(data-)X
+432 1110(bases)N
+634(to)X
+724(be)X
+828(open)X
+1011(concurrently.)X
+1484(The)X
+1636(discussion)X
+1996(of)X
+2090(the)X
+432 1198(algorithm)N
+774(that)X
+925(follows)X
+1196(is)X
+1280(applicable)X
+1640(to)X
+1732(both)X
+2 f
+1904(dbm)X
+1 f
+2072(and)X
+2 f
+432 1286(ndbm)N
+1 f
+610(.)X
+604 1400(The)N
+760(basic)X
+956(structure)X
+1268(of)X
+2 f
+1366(dbm)X
+1 f
+1535(calls)X
+1712(for)X
+1836(\256xed-sized)X
+432 1488(disk)N
+612(blocks)X
+868(\(buckets\))X
+1214(and)X
+1377(an)X
+2 f
+1499(access)X
+1 f
+1755(function)X
+2068(that)X
+432 1576(maps)N
+623(a)X
+681(key)X
+819(to)X
+902(a)X
+959(bucket.)X
+1234(The)X
+1380(interface)X
+1683(routines)X
+1962(use)X
+2090(the)X
+2 f
+432 1664(access)N
+1 f
+673(function)X
+970(to)X
+1062(obtain)X
+1292(the)X
+1420(appropriate)X
+1816(bucket)X
+2060(in)X
+2152(a)X
+432 1752(single)N
+643(disk)X
+796(access.)X
+604 1866(Within)N
+869(the)X
+2 f
+1010(access)X
+1 f
+1263(function,)X
+1593(a)X
+1672(bit-randomizing)X
+432 1954(hash)N
+610(function)X
+2 f
+8 s
+877 1929(2)N
+1 f
+10 s
+940 1954(is)N
+1024(used)X
+1202(to)X
+1294(convert)X
+1565(a)X
+1631(key)X
+1777(into)X
+1931(a)X
+1997(32-bit)X
+432 2042(hash)N
+605(value.)X
+825(Out)X
+971(of)X
+1064(these)X
+1254(32)X
+1359(bits,)X
+1519(only)X
+1686(as)X
+1778(many)X
+1981(bits)X
+2121(as)X
+432 2130(necessary)N
+773(are)X
+900(used)X
+1075(to)X
+1165(determine)X
+1514(the)X
+1639(particular)X
+1974(bucket)X
+432 2218(on)N
+533(which)X
+750(a)X
+807(key)X
+944(resides.)X
+1228(An)X
+1347(in-memory)X
+1724(bitmap)X
+1967(is)X
+2041(used)X
+432 2306(to)N
+533(determine)X
+893(how)X
+1070(many)X
+1287(bits)X
+1441(are)X
+1579(required.)X
+1905(Each)X
+2104(bit)X
+432 2394(indicates)N
+746(whether)X
+1033(its)X
+1136(associated)X
+1494(bucket)X
+1736(has)X
+1871(been)X
+2051(split)X
+432 2482(yet)N
+562(\(a)X
+657(0)X
+728(indicating)X
+1079(that)X
+1230(the)X
+1359(bucket)X
+1604(has)X
+1742(not)X
+1875(yet)X
+2004(split\).)X
+432 2570(The)N
+590(use)X
+730(of)X
+830(the)X
+961(hash)X
+1141(function)X
+1441(and)X
+1590(the)X
+1720(bitmap)X
+1974(is)X
+2059(best)X
+432 2658(described)N
+769(by)X
+878(stepping)X
+1177(through)X
+1454(database)X
+1759(creation)X
+2046(with)X
+432 2746(multiple)N
+718(invocations)X
+1107(of)X
+1194(a)X
+2 f
+1250(store)X
+1 f
+1430(operation.)X
+604 2860(Initially,)N
+906(the)X
+1033(hash)X
+1209(table)X
+1394(contains)X
+1690(a)X
+1755(single)X
+1974(bucket)X
+432 2948(\(bucket)N
+711(0\),)X
+836(the)X
+972(bit)X
+1094(map)X
+1270(contains)X
+1575(a)X
+1649(single)X
+1878(bit)X
+2000(\(bit)X
+2148(0)X
+432 3036(corresponding)N
+913(to)X
+997(bucket)X
+1233(0\),)X
+1342(and)X
+1480(0)X
+1542(bits)X
+1699(of)X
+1788(a)X
+1846(hash)X
+2014(value)X
+432 3124(are)N
+560(examined)X
+901(to)X
+992(determine)X
+1342(where)X
+1568(a)X
+1633(key)X
+1778(is)X
+1860(placed)X
+2099(\(in)X
+432 3212(bucket)N
+670(0\).)X
+801(When)X
+1017(bucket)X
+1255(0)X
+1319(is)X
+1396(full,)X
+1551(its)X
+1650(bit)X
+1758(in)X
+1844(the)X
+1966(bitmap)X
+432 3300(\(bit)N
+564(0\))X
+652(is)X
+726(set,)X
+856(and)X
+993(its)X
+1089(contents)X
+1377(are)X
+1497(split)X
+1655(between)X
+1943(buckets)X
+432 3388(0)N
+499(and)X
+641(1,)X
+727(by)X
+833(considering)X
+1233(the)X
+1357(0)X
+2 f
+7 s
+3356(th)Y
+10 s
+1 f
+1480 3388(bit)N
+1590(\(the)X
+1741(lowest)X
+1976(bit)X
+2086(not)X
+432 3476(previously)N
+800(examined\))X
+1169(of)X
+1266(the)X
+1393(hash)X
+1569(value)X
+1772(for)X
+1895(each)X
+2072(key)X
+432 3564(within)N
+668(the)X
+798(bucket.)X
+1064(Given)X
+1292(a)X
+1359(well-designed)X
+1840(hash)X
+2018(func-)X
+432 3652(tion,)N
+613(approximately)X
+1112(half)X
+1273(of)X
+1376(the)X
+1510(keys)X
+1693(will)X
+1853(have)X
+2041(hash)X
+432 3740(values)N
+666(with)X
+837(the)X
+964(0)X
+2 f
+7 s
+3708(th)Y
+10 s
+1 f
+1090 3740(bit)N
+1203(set.)X
+1341(All)X
+1471(such)X
+1646(keys)X
+1821(and)X
+1965(associ-)X
+432 3828(ated)N
+586(data)X
+740(are)X
+859(moved)X
+1097(to)X
+1179(bucket)X
+1413(1,)X
+1493(and)X
+1629(the)X
+1747(rest)X
+1883(remain)X
+2126(in)X
+432 3916(bucket)N
+666(0.)X
+604 4030(After)N
+804(this)X
+949(split,)X
+1135(the)X
+1262(\256le)X
+1393(now)X
+1560(contains)X
+1856(two)X
+2005(buck-)X
+432 4118(ets,)N
+562(and)X
+699(the)X
+818(bitmap)X
+1061(contains)X
+1349(three)X
+1530(bits:)X
+1687(the)X
+1805(0)X
+2 f
+7 s
+4086(th)Y
+10 s
+1 f
+1922 4118(bit)N
+2026(is)X
+2099(set)X
+432 4206(to)N
+525(indicate)X
+810(a)X
+876(bucket)X
+1120(0)X
+1190(split)X
+1357(when)X
+1561(no)X
+1671(bits)X
+1816(of)X
+1913(the)X
+2041(hash)X
+432 4294(value)N
+648(are)X
+789(considered,)X
+1199(and)X
+1357(two)X
+1519(more)X
+1726(unset)X
+1937(bits)X
+2094(for)X
+432 4382(buckets)N
+706(0)X
+775(and)X
+920(1.)X
+1029(The)X
+1183(placement)X
+1542(of)X
+1638(an)X
+1742(incoming)X
+2072(key)X
+432 4470(now)N
+604(requires)X
+897(examination)X
+1327(of)X
+1428(the)X
+1560(0)X
+2 f
+7 s
+4438(th)Y
+10 s
+1 f
+1691 4470(bit)N
+1809(of)X
+1910(the)X
+2041(hash)X
+432 4558(value,)N
+667(and)X
+824(the)X
+963(key)X
+1119(is)X
+1212(placed)X
+1462(either)X
+1685(in)X
+1787(bucket)X
+2041(0)X
+2121(or)X
+432 4646(bucket)N
+674(1.)X
+782(If)X
+864(either)X
+1075(bucket)X
+1317(0)X
+1385(or)X
+1480(bucket)X
+1722(1)X
+1790(\256lls)X
+1937(up,)X
+2064(it)X
+2135(is)X
+432 4734(split)N
+598(as)X
+693(before,)X
+947(its)X
+1050(bit)X
+1162(is)X
+1243(set)X
+1360(in)X
+1450(the)X
+1576(bitmap,)X
+1846(and)X
+1990(a)X
+2054(new)X
+432 4822(set)N
+541(of)X
+628(unset)X
+817(bits)X
+952(are)X
+1071(added)X
+1283(to)X
+1365(the)X
+1483(bitmap.)X
+604 4936(Each)N
+791(time)X
+959(we)X
+1079(consider)X
+1376(a)X
+1437(new)X
+1596(bit)X
+1705(\(bit)X
+1841(n\),)X
+1953(we)X
+2072(add)X
+432 5024(2)N
+2 f
+7 s
+4992(n)Y
+9 f
+509(+)X
+1 f
+540(1)X
+10 s
+595 5024(bits)N
+737(to)X
+826(the)X
+951(bitmap)X
+1199(and)X
+1341(obtain)X
+1567(2)X
+2 f
+7 s
+4992(n)Y
+9 f
+1644(+)X
+1 f
+1675(1)X
+10 s
+1729 5024(more)N
+1920(address-)X
+432 5112(able)N
+595(buckets)X
+869(in)X
+960(the)X
+1087(\256le.)X
+1258(As)X
+1376(a)X
+1441(result,)X
+1668(the)X
+1795(bitmap)X
+2045(con-)X
+432 5200(tains)N
+618(the)X
+751(previous)X
+1062(2)X
+2 f
+7 s
+5168(n)Y
+9 f
+1139(+)X
+1 f
+1170(1)X
+2 f
+10 s
+9 f
+5200(-)Y
+1 f
+1242(1)X
+1317(bits)X
+1467(\(1)X
+2 f
+9 f
+1534(+)X
+1 f
+1578(2)X
+2 f
+9 f
+(+)S
+1 f
+1662(4)X
+2 f
+9 f
+(+)S
+1 f
+1746(...)X
+2 f
+9 f
+(+)S
+1 f
+1850(2)X
+2 f
+7 s
+5168(n)Y
+10 s
+1 f
+1931 5200(\))N
+1992(which)X
+432 5288(trace)N
+649(the)X
+807(entire)X
+2 f
+1050(split)X
+1247(history)X
+1 f
+1529(of)X
+1656(the)X
+1813(addressable)X
+16 s
+432 5433 MXY
+864 0 Dl
+2 f
+8 s
+472 5488(2)N
+1 f
+9 s
+523 5513(This)N
+670(bit-randomizing)X
+1153(property)X
+1416(is)X
+1482(important)X
+1780(to)X
+1854(obtain)X
+2052(radi-)X
+432 5593(cally)N
+599(different)X
+874(hash)X
+1033(values)X
+1244(for)X
+1355(nearly)X
+1562(identical)X
+1836(keys,)X
+2012(which)X
+432 5673(in)N
+506(turn)X
+640(avoids)X
+846(clustering)X
+1148(of)X
+1226(such)X
+1376(keys)X
+1526(in)X
+1600(a)X
+1650(single)X
+1840(bucket.)X
+10 s
+2418 538(buckets.)N
+2590 652(Given)N
+2809(a)X
+2868(key)X
+3007(and)X
+3146(the)X
+3267(bitmap)X
+3512(created)X
+3768(by)X
+3871(this)X
+4009(algo-)X
+2418 740(rithm,)N
+2638(we)X
+2759(\256rst)X
+2910(examine)X
+3209(bit)X
+3320(0)X
+3386(of)X
+3479(the)X
+3603(bitmap)X
+3851(\(the)X
+4002(bit)X
+4112(to)X
+2418 828(consult)N
+2673(when)X
+2871(0)X
+2934(bits)X
+3072(of)X
+3162(the)X
+3283(hash)X
+3453(value)X
+3650(are)X
+3772(being)X
+3973(exam-)X
+2418 916(ined\).)N
+2631(If)X
+2713(it)X
+2785(is)X
+2866(set)X
+2982(\(indicating)X
+3356(that)X
+3503(the)X
+3628(bucket)X
+3869(split\),)X
+4080(we)X
+2418 1004(begin)N
+2617(considering)X
+3012(the)X
+3131(bits)X
+3267(of)X
+3355(the)X
+3473(32-bit)X
+3684(hash)X
+3851(value.)X
+4085(As)X
+2418 1092(bit)N
+2525(n)X
+2587(is)X
+2662(revealed,)X
+2977(a)X
+3035(mask)X
+3226(equal)X
+3422(to)X
+3506(2)X
+2 f
+7 s
+1060(n)Y
+9 f
+3583(+)X
+1 f
+3614(1)X
+2 f
+10 s
+9 f
+1092(-)Y
+1 f
+3686(1)X
+3748(will)X
+3894(yield)X
+4076(the)X
+2418 1180(current)N
+2675(bucket)X
+2918(address.)X
+3228(Adding)X
+3496(2)X
+2 f
+7 s
+1148(n)Y
+9 f
+3573(+)X
+1 f
+3604(1)X
+2 f
+10 s
+9 f
+1180(-)Y
+1 f
+3676(1)X
+3744(to)X
+3834(the)X
+3960(bucket)X
+2418 1268(address)N
+2701(identi\256es)X
+3035(which)X
+3272(bit)X
+3397(in)X
+3500(the)X
+3639(bitmap)X
+3902(must)X
+4098(be)X
+2418 1356(checked.)N
+2743(We)X
+2876(continue)X
+3173(revealing)X
+3493(bits)X
+3628(of)X
+3715(the)X
+3833(hash)X
+4000(value)X
+2418 1444(until)N
+2591(all)X
+2698(set)X
+2814(bits)X
+2955(in)X
+3043(the)X
+3167(bitmap)X
+3415(are)X
+3540(exhausted.)X
+3907(The)X
+4058(fol-)X
+2418 1532(lowing)N
+2682(algorithm,)X
+3055(a)X
+3133(simpli\256cation)X
+3614(of)X
+3723(the)X
+3863(algorithm)X
+2418 1620(due)N
+2565(to)X
+2658(Ken)X
+2823(Thompson)X
+3196([THOM90,)X
+3590(TOR88],)X
+3908(uses)X
+4076(the)X
+2418 1708(hash)N
+2625(value)X
+2839(and)X
+2995(the)X
+3133(bitmap)X
+3395(to)X
+3497(calculate)X
+3823(the)X
+3960(bucket)X
+2418 1796(address)N
+2679(as)X
+2766(discussed)X
+3093(above.)X
+0(Courier)xf 0 f
+1 f
+0 f
+8 s
+2418 2095(hash)N
+2608(=)X
+2684 -0.4038(calchash\(key\);)AX
+2418 2183(mask)N
+2608(=)X
+2684(0;)X
+2418 2271(while)N
+2646 -0.4018(\(isbitset\(\(hash)AX
+3254(&)X
+3330(mask\))X
+3558(+)X
+3634(mask\)\))X
+2706 2359(mask)N
+2896(=)X
+2972(\(mask)X
+3200(<<)X
+3314(1\))X
+3428(+)X
+3504(1;)X
+2418 2447(bucket)N
+2684(=)X
+2760(hash)X
+2950(&)X
+3026(mask;)X
+2 f
+10 s
+3211 2812(sdbm)N
+1 f
+2590 2944(The)N
+2 f
+2738(sdbm)X
+1 f
+2930(library)X
+3167(is)X
+3243(a)X
+3302(public-domain)X
+3791(clone)X
+3987(of)X
+4076(the)X
+2 f
+2418 3032(ndbm)N
+1 f
+2638(library,)X
+2914(developed)X
+3286(by)X
+3408(Ozan)X
+3620(Yigit)X
+3826(to)X
+3929(provide)X
+2 f
+2418 3120(ndbm)N
+1 f
+2596('s)X
+2692(functionality)X
+3139(under)X
+3359(some)X
+3565(versions)X
+3869(of)X
+3973(UNIX)X
+2418 3208(that)N
+2559(exclude)X
+2830(it)X
+2894(for)X
+3008(licensing)X
+3317(reasons)X
+3578([YIG89].)X
+3895(The)X
+4040(pro-)X
+2418 3296(grammer)N
+2735(interface,)X
+3064(and)X
+3207(the)X
+3332(basic)X
+3524(structure)X
+3832(of)X
+2 f
+3926(sdbm)X
+1 f
+4121(is)X
+2418 3384(identical)N
+2733(to)X
+2 f
+2834(ndbm)X
+1 f
+3051(but)X
+3192(internal)X
+3476(details)X
+3723(of)X
+3828(the)X
+2 f
+3964(access)X
+1 f
+2418 3472(function,)N
+2726(such)X
+2894(as)X
+2982(the)X
+3101(calculation)X
+3474(of)X
+3561(the)X
+3679(bucket)X
+3913(address,)X
+2418 3560(and)N
+2563(the)X
+2690(use)X
+2825(of)X
+2920(different)X
+3225(hash)X
+3400(functions)X
+3726(make)X
+3928(the)X
+4054(two)X
+2418 3648(incompatible)N
+2856(at)X
+2934(the)X
+3052(database)X
+3349(level.)X
+2590 3762(The)N
+2 f
+2740(sdbm)X
+1 f
+2934(library)X
+3173(is)X
+3251(based)X
+3458(on)X
+3562(a)X
+3622(simpli\256ed)X
+3965(imple-)X
+2418 3850(mentation)N
+2778(of)X
+2885(Larson's)X
+3206(1978)X
+2 f
+3406(dynamic)X
+3717(hashing)X
+1 f
+4009(algo-)X
+2418 3938(rithm)N
+2616(including)X
+2943(the)X
+2 f
+3066(re\256nements)X
+3461(and)X
+3605(variations)X
+1 f
+3953(of)X
+4044(sec-)X
+2418 4026(tion)N
+2562(5)X
+2622([LAR78].)X
+2956(Larson's)X
+3257(original)X
+3526(algorithm)X
+3857(calls)X
+4024(for)X
+4138(a)X
+2418 4114(forest)N
+2635(of)X
+2736(binary)X
+2975(hash)X
+3156(trees)X
+3341(that)X
+3494(are)X
+3626(accessed)X
+3941(by)X
+4054(two)X
+2418 4202(hash)N
+2586(functions.)X
+2925(The)X
+3071(\256rst)X
+3216(hash)X
+3384(function)X
+3672(selects)X
+3907(a)X
+3964(partic-)X
+2418 4290(ular)N
+2571(tree)X
+2720(within)X
+2952(the)X
+3078(forest.)X
+3309(The)X
+3462(second)X
+3713(hash)X
+3887(function,)X
+2418 4378(which)N
+2659(is)X
+2757(required)X
+3070(to)X
+3177(be)X
+3297(a)X
+3377(boolean)X
+3675(pseudo-random)X
+2418 4466(number)N
+2687(generator)X
+3015(that)X
+3159(is)X
+3236(seeded)X
+3479(by)X
+3583(the)X
+3705(key,)X
+3865(is)X
+3942(used)X
+4112(to)X
+2418 4554(traverse)N
+2733(the)X
+2890(tree)X
+3070(until)X
+3275(internal)X
+3579(\(split\))X
+3829(nodes)X
+4075(are)X
+2418 4642(exhausted)N
+2763(and)X
+2903(an)X
+3003(external)X
+3286(\(non-split\))X
+3648(node)X
+3827(is)X
+3903(reached.)X
+2418 4730(The)N
+2571(bucket)X
+2813(addresses)X
+3149(are)X
+3276(stored)X
+3500(directly)X
+3772(in)X
+3861(the)X
+3986(exter-)X
+2418 4818(nal)N
+2536(nodes.)X
+2590 4932(Larson's)N
+2903(re\256nements)X
+3309(are)X
+3440(based)X
+3655(on)X
+3767(the)X
+3897(observa-)X
+2418 5020(tion)N
+2570(that)X
+2718(the)X
+2844(nodes)X
+3059(can)X
+3199(be)X
+3303(represented)X
+3702(by)X
+3809(a)X
+3872(single)X
+4090(bit)X
+2418 5108(that)N
+2569(is)X
+2653(set)X
+2773(for)X
+2898(internal)X
+3174(nodes)X
+3392(and)X
+3539(not)X
+3672(set)X
+3791(for)X
+3915(external)X
+2418 5196(nodes,)N
+2652(resulting)X
+2959(in)X
+3048(a)X
+3111(radix)X
+3303(search)X
+3536(trie.)X
+3709(Figure)X
+3944(1)X
+4010(illus-)X
+2418 5284(trates)N
+2621(this.)X
+2804(Nodes)X
+3037(A)X
+3123(and)X
+3267(B)X
+3348(are)X
+3475(internal)X
+3748(\(split\))X
+3967(nodes,)X
+2418 5372(thus)N
+2573(having)X
+2813(no)X
+2915(bucket)X
+3151(addresses)X
+3480(associated)X
+3831(with)X
+3994(them.)X
+2418 5460(Instead,)N
+2693(the)X
+2814(external)X
+3096(nodes)X
+3306(\(C,)X
+3429(D,)X
+3530(and)X
+3669(E\))X
+3768(each)X
+3938(need)X
+4112(to)X
+2418 5548(refer)N
+2594(to)X
+2679(a)X
+2738(bucket)X
+2975(address.)X
+3279(These)X
+3494(bucket)X
+3731(addresses)X
+4062(can)X
+2418 5636(be)N
+2529(stored)X
+2760(in)X
+2857(the)X
+2990(trie)X
+3132(itself)X
+3327(where)X
+3559(the)X
+3691(subtries)X
+3974(would)X
+3 f
+432 5960(2)N
+2970(USENIX)X
+9 f
+3292(-)X
+3 f
+3356(Winter)X
+3621('91)X
+9 f
+3748(-)X
+3 f
+3812(Dallas,)X
+4065(TX)X
+
+3 p
+%%Page: 3 3
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+720 258(Seltzer)N
+977(&)X
+1064(Yigit)X
+3278(A)X
+3356(New)X
+3528(Hashing)X
+3831(Package)X
+4136(for)X
+4259(UNIX)X
+1 f
+720 538(live)N
+862(if)X
+933(they)X
+1092(existed)X
+1340([KNU68].)X
+1709(For)X
+1841(example,)X
+2154(if)X
+2224(nodes)X
+2432(F)X
+720 626(and)N
+858(G)X
+938(were)X
+1117(the)X
+1237(children)X
+1522(of)X
+1610(node)X
+1787(C,)X
+1881(the)X
+2000(bucket)X
+2235(address)X
+720 714(L00)N
+886(could)X
+1101(reside)X
+1330(in)X
+1429(the)X
+1563(bits)X
+1714(that)X
+1870(will)X
+2030(eventually)X
+2400(be)X
+720 802(used)N
+887(to)X
+969(store)X
+1145(nodes)X
+1352(F)X
+1416(and)X
+1552(G)X
+1630(and)X
+1766(all)X
+1866(their)X
+2033(children.)X
+10 f
+720 890 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+3 f
+1894 2247(L1)N
+784 1925(A)N
+1431(E)X
+1106 2247(D)N
+1428 1281(C)N
+1109 1603(B)N
+1884 1930(L01)N
+1879 1286(L00)N
+1221 1814(1)N
+903 2131(1)N
+1221 1402(0)N
+903 1714(0)N
+1 Dt
+1397 1821 MXY
+-8 -32 Dl
+-5 19 Dl
+-20 6 Dl
+33 7 Dl
+-187 -182 Dl
+1397 1322 MXY
+-33 7 Dl
+20 6 Dl
+5 19 Dl
+8 -32 Dl
+-187 182 Dl
+1069 1639 MXY
+-32 7 Dl
+20 6 Dl
+5 19 Dl
+7 -32 Dl
+-186 182 Dl
+1374 1891 MXY
+185 Dc
+1779 2133 MXY
+0 161 Dl
+322 0 Dl
+0 -161 Dl
+-322 0 Dl
+1811 MY
+0 161 Dl
+322 0 Dl
+0 -161 Dl
+-322 0 Dl
+1166 MY
+0 161 Dl
+322 0 Dl
+0 -161 Dl
+-322 0 Dl
+1052 2213 MXY
+185 Dc
+1569 MY
+185 Dc
+720 1881 MXY
+185 Dc
+1779 2213 MXY
+-28 -17 Dl
+10 17 Dl
+-10 18 Dl
+28 -18 Dl
+-543 0 Dl
+1769 1891 MXY
+-28 -18 Dl
+10 18 Dl
+-10 18 Dl
+28 -18 Dl
+-201 0 Dl
+1364 1247 MXY
+185 Dc
+1769 MX
+-28 -18 Dl
+10 18 Dl
+-10 18 Dl
+28 -18 Dl
+-201 0 Dl
+1064 2143 MXY
+-7 -32 Dl
+-5 19 Dl
+-20 6 Dl
+32 7 Dl
+-181 -181 Dl
+3 Dt
+-1 Ds
+8 s
+720 2482(Figure)N
+925(1:)X
+1 f
+1002(Radix)X
+1179(search)X
+1365(trie)X
+1474(with)X
+1612(internal)X
+1831(nodes)X
+2004(A)X
+2074(and)X
+2189(B,)X
+2271(external)X
+720 2570(nodes)N
+891(C,)X
+972(D,)X
+1056(and)X
+1170(E,)X
+1247(and)X
+1361(bucket)X
+1553(addresses)X
+1819(stored)X
+1997(in)X
+2069(the)X
+2168(unused)X
+2370(por-)X
+720 2658(tion)N
+836(of)X
+905(the)X
+999(trie.)X
+10 s
+10 f
+720 2922 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+1 f
+892 3124(Further)N
+1153(simpli\256cations)X
+1647(of)X
+1738(the)X
+1860(above)X
+2076([YIG89])X
+2377(are)X
+720 3212(possible.)N
+1038(Using)X
+1265(a)X
+1337(single)X
+1564(radix)X
+1765(trie)X
+1908(to)X
+2006(avoid)X
+2219(the)X
+2352(\256rst)X
+720 3300(hash)N
+904(function,)X
+1227(replacing)X
+1562(the)X
+1696(pseudo-random)X
+2231(number)X
+720 3388(generator)N
+1052(with)X
+1222(a)X
+1286(well)X
+1452(designed,)X
+1785(bit-randomizing)X
+2329(hash)X
+720 3476(function,)N
+1053(and)X
+1215(using)X
+1434(the)X
+1578(portion)X
+1855(of)X
+1967(the)X
+2110(hash)X
+2302(value)X
+720 3564(exposed)N
+1021(during)X
+1268(the)X
+1404(trie)X
+1549(traversal)X
+1864(as)X
+1969(a)X
+2042(direct)X
+2262(bucket)X
+720 3652(address)N
+990(results)X
+1228(in)X
+1319(an)X
+2 f
+1424(access)X
+1 f
+1663(function)X
+1959(that)X
+2108(works)X
+2333(very)X
+720 3740(similar)N
+974(to)X
+1068(Thompson's)X
+1499(algorithm)X
+1841(above.)X
+2084(The)X
+2240(follow-)X
+720 3828(ing)N
+847(algorithm)X
+1183(uses)X
+1346(the)X
+1469(hash)X
+1641(value)X
+1840(to)X
+1927(traverse)X
+2206(a)X
+2266(linear-)X
+720 3916(ized)N
+874(radix)X
+1059(trie)X
+2 f
+8 s
+1166 3891(3)N
+1 f
+10 s
+1218 3916(starting)N
+1478(at)X
+1556(the)X
+1674(0)X
+2 f
+7 s
+3884(th)Y
+10 s
+1 f
+1791 3916(bit.)N
+0 f
+8 s
+720 4215(tbit)N
+910(=)X
+986(0;)X
+1296(/*)X
+1410(radix)X
+1638(trie)X
+1828(index)X
+2056(*/)X
+720 4303(hbit)N
+910(=)X
+986(0;)X
+1296(/*)X
+1410(hash)X
+1600(bit)X
+1752(index)X
+2056(*/)X
+720 4391(mask)N
+910(=)X
+986(0;)X
+720 4479(hash)N
+910(=)X
+986 -0.4038(calchash\(key\);)AX
+720 4655(for)N
+872(\(mask)X
+1100(=)X
+1176(0;)X
+910 4743 -0.4018(isbitset\(tbit\);)AN
+910 4831(mask)N
+1100(=)X
+1176(\(mask)X
+1404(<<)X
+1518(1\))X
+1632(+)X
+1708(1\))X
+1008 4919(if)N
+1122(\(hash)X
+1350(&)X
+1426(\(1)X
+1540(<<)X
+1654 -0.4219(hbit++\)\)\))AX
+1160 5007(/*)N
+1274(right)X
+1502(son)X
+1692(*/)X
+1160 5095(tbit)N
+1350(=)X
+1426(2)X
+1502(*)X
+1578(tbit)X
+1768(+)X
+1844(2;)X
+1008 5183(else)N
+1 f
+16 s
+720 5353 MXY
+864 0 Dl
+2 f
+8 s
+760 5408(3)N
+1 f
+9 s
+818 5433(A)N
+896(linearized)X
+1206(radix)X
+1380(trie)X
+1502(is)X
+1576(merely)X
+1802(an)X
+1895(array)X
+2068(representation)X
+720 5513(of)N
+800(the)X
+908(radix)X
+1076(search)X
+1280(trie)X
+1396(described)X
+1692(above.)X
+1920(The)X
+2052(children)X
+2308(of)X
+2388(the)X
+720 5593(node)N
+885(with)X
+1038(index)X
+1223(i)X
+1267(can)X
+1391(be)X
+1483(found)X
+1675(at)X
+1751(the)X
+1863(nodes)X
+2055(indexed)X
+2307(2*i+1)X
+720 5673(and)N
+842(2*i+2.)X
+0 f
+8 s
+3146 538(/*)N
+3260(left)X
+3450(son)X
+3678(*/)X
+3146 626(tbit)N
+3336(=)X
+3412(2)X
+3488(*)X
+3564(tbit)X
+3754(+)X
+3830(1;)X
+2706 802(bucket)N
+2972(=)X
+3048(hash)X
+3238(&)X
+3314(mask;)X
+2 f
+10 s
+3495 1167(gdbm)N
+1 f
+2878 1299(The)N
+3027(gdbm)X
+3233(\(GNU)X
+3458(data)X
+3616(base)X
+3783(manager\))X
+4111(library)X
+4349(is)X
+4426(a)X
+2706 1387(UNIX)N
+2933(database)X
+3236(manager)X
+3539(written)X
+3792(by)X
+3897(Philip)X
+4112(A.)X
+4215(Nelson,)X
+2706 1475(and)N
+2848(made)X
+3048(available)X
+3364(as)X
+3457(a)X
+3518(part)X
+3668(of)X
+3760(the)X
+3883(FSF)X
+4040(software)X
+4342(dis-)X
+2706 1563(tribution.)N
+3052(The)X
+3207(gdbm)X
+3419(library)X
+3663(provides)X
+3969(the)X
+4097(same)X
+4292(func-)X
+2706 1651(tionality)N
+3028(of)X
+3151(the)X
+2 f
+3304(dbm)X
+1 f
+3442(/)X
+2 f
+3464(ndbm)X
+1 f
+3697(libraries)X
+4015([NEL90])X
+4360(but)X
+2706 1739(attempts)N
+3018(to)X
+3121(avoid)X
+3340(some)X
+3550(of)X
+3658(their)X
+3846(shortcomings.)X
+4337(The)X
+2706 1827(gdbm)N
+2918(library)X
+3162(allows)X
+3401(for)X
+3525(arbitrary-length)X
+4059(data,)X
+4242(and)X
+4387(its)X
+2706 1915(database)N
+3027(is)X
+3124(a)X
+3203(singular,)X
+3524(non-sparse)X
+2 f
+8 s
+3872 1890(4)N
+1 f
+10 s
+3947 1915(\256le.)N
+4112(The)X
+4280(gdbm)X
+2706 2003(library)N
+2947(also)X
+3103(includes)X
+2 f
+3396(dbm)X
+1 f
+3560(and)X
+2 f
+3702(ndbm)X
+1 f
+3906(compatible)X
+4288(inter-)X
+2706 2091(faces.)N
+2878 2205(The)N
+3025(gdbm)X
+3229(library)X
+3465(is)X
+3540(based)X
+3745(on)X
+2 f
+3847(extensible)X
+4189(hashing)X
+1 f
+4442(,)X
+2706 2293(a)N
+2766(dynamic)X
+3066(hashing)X
+3339(algorithm)X
+3674(by)X
+3778(Fagin)X
+3984(et)X
+4066(al)X
+4148([FAG79].)X
+2706 2381(This)N
+2881(algorithm)X
+3225(differs)X
+3467(from)X
+3655(the)X
+3785(previously)X
+4155(discussed)X
+2706 2469(algorithms)N
+3069(in)X
+3152(that)X
+3293(it)X
+3358(uses)X
+3517(a)X
+2 f
+3574(directory)X
+1 f
+3889(that)X
+4030(is)X
+4103(a)X
+4159(collapsed)X
+2706 2557(representation)N
+3192([ENB88])X
+3517(of)X
+3615(the)X
+3744(radix)X
+3940(search)X
+4177(trie)X
+4315(used)X
+2706 2645(by)N
+2 f
+2806(sdbm)X
+1 f
+2975(.)X
+10 f
+2706 2733 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+3 f
+7 s
+3572 3761(L1)N
+1 Dt
+3485 3738 MXY
+-20 -13 Dl
+7 13 Dl
+-7 13 Dl
+20 -13 Dl
+-400 0 Dl
+3180 3027 MXY
+136 Dc
+2706 3494 MXY
+136 Dc
+2950 3264 MXY
+136 Dc
+3738 MY
+136 Dc
+3485 2968 MXY
+0 118 Dl
+238 0 Dl
+0 -118 Dl
+-238 0 Dl
+3442 MY
+0 119 Dl
+238 0 Dl
+0 -119 Dl
+-238 0 Dl
+3679 MY
+0 119 Dl
+238 0 Dl
+0 -119 Dl
+-238 0 Dl
+3187 3501 MXY
+136 Dc
+2963 3316 MXY
+-24 5 Dl
+15 4 Dl
+4 15 Dl
+5 -24 Dl
+-137 134 Dl
+3204 3083 MXY
+-24 5 Dl
+15 4 Dl
+3 14 Dl
+6 -23 Dl
+-137 133 Dl
+3204 3450 MXY
+-6 -24 Dl
+-3 14 Dl
+-15 5 Dl
+24 5 Dl
+-137 -134 Dl
+2842 3369(0)N
+3075 3139(0)N
+2842 3676(1)N
+3075 3443(1)N
+3562 3054(L00)N
+3565 3528(L01)N
+4197 2968 MXY
+0 118 Dl
+237 0 Dl
+0 -118 Dl
+-237 0 Dl
+3205 MY
+0 119 Dl
+237 0 Dl
+0 -119 Dl
+-237 0 Dl
+3561 MY
+0 118 Dl
+237 0 Dl
+0 -118 Dl
+-237 0 Dl
+3960 2909 MXY
+0 237 Dl
+118 0 Dl
+0 -237 Dl
+-118 0 Dl
+3146 MY
+0 237 Dl
+118 0 Dl
+0 -237 Dl
+-118 0 Dl
+3383 MY
+0 237 Dl
+118 0 Dl
+0 -237 Dl
+-118 0 Dl
+3620 MY
+0 237 Dl
+118 0 Dl
+0 -237 Dl
+-118 0 Dl
+4197 3027 MXY
+-21 -13 Dl
+8 13 Dl
+-8 13 Dl
+21 -13 Dl
+-119 0 Dl
+4197 3264 MXY
+-21 -13 Dl
+8 13 Dl
+-8 13 Dl
+21 -13 Dl
+-119 0 Dl
+3501 MY
+59 0 Dl
+0 89 Dl
+4078 3738 MXY
+59 0 Dl
+0 -88 Dl
+4197 3590 MXY
+-21 -13 Dl
+8 13 Dl
+-8 13 Dl
+21 -13 Dl
+-60 0 Dl
+4197 3650 MXY
+-21 -13 Dl
+8 13 Dl
+-8 13 Dl
+21 -13 Dl
+-60 0 Dl
+3991 3050(00)N
+3991 3287(01)N
+3991 3524(10)N
+3991 3761(11)N
+4269 3050(L00)N
+4269 3287(L01)N
+4283 3643(L1)N
+3485 3501 MXY
+-20 -13 Dl
+7 13 Dl
+-7 13 Dl
+20 -13 Dl
+-155 0 Dl
+3485 3027 MXY
+-20 -13 Dl
+7 13 Dl
+-7 13 Dl
+20 -13 Dl
+-163 0 Dl
+2967 3687 MXY
+-5 -24 Dl
+-4 14 Dl
+-15 4 Dl
+24 6 Dl
+-141 -141 Dl
+3 Dt
+-1 Ds
+8 s
+2706 4033(Figure)N
+2903(2:)X
+1 f
+2972(A)X
+3034(radix)X
+3181(search)X
+3359(trie)X
+3460(and)X
+3568(a)X
+3612(directory)X
+3858(representing)X
+4189(the)X
+4283(trie.)X
+10 s
+10 f
+2706 4209 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+1 f
+2878 4411(In)N
+2968(this)X
+3106(algorithm,)X
+3460(a)X
+3519(directory)X
+3832(consists)X
+4108(of)X
+4198(a)X
+4256(search)X
+2706 4499(trie)N
+2847(of)X
+2947(depth)X
+2 f
+3158(n)X
+1 f
+3211(,)X
+3264(containing)X
+3635(2)X
+2 f
+7 s
+4467(n)Y
+10 s
+1 f
+3749 4499(bucket)N
+3996(addresses)X
+4337(\(i.e.)X
+2706 4587(each)N
+2897(element)X
+3194(of)X
+3304(the)X
+3445(trie)X
+3594(is)X
+3689(a)X
+3767(bucket)X
+4023(address\).)X
+4373(To)X
+2706 4675(access)N
+2935(the)X
+3056(hash)X
+3226(table,)X
+3425(a)X
+3483(32-bit)X
+3696(hash)X
+3865(value)X
+4061(is)X
+4136(calculated)X
+2706 4763(and)N
+2 f
+2861(n)X
+1 f
+2953(bits)X
+3107(of)X
+3213(the)X
+3350(value)X
+3563(are)X
+3701(used)X
+3886(to)X
+3986(index)X
+4202(into)X
+4364(the)X
+2706 4851(directory)N
+3018(to)X
+3102(obtain)X
+3324(a)X
+3382(bucket)X
+3618(address.)X
+3921(It)X
+3992(is)X
+4067(important)X
+4400(to)X
+2706 4939(note)N
+2866(that)X
+3008(multiple)X
+3296(entries)X
+3532(of)X
+3620(this)X
+3756(directory)X
+4067(may)X
+4226(contain)X
+2706 5027(the)N
+2833(same)X
+3026(bucket)X
+3268(address)X
+3537(as)X
+3632(a)X
+3696(result)X
+3902(of)X
+3997(directory)X
+4315(dou-)X
+2706 5115(bling)N
+2903(during)X
+3145(bucket)X
+3392(splitting.)X
+3706(Figure)X
+3948(2)X
+4021(illustrates)X
+4364(the)X
+2706 5203(relationship)N
+3126(between)X
+3436(a)X
+3513(typical)X
+3772(\(skewed\))X
+4108(search)X
+4355(trie)X
+2706 5291(and)N
+2850(its)X
+2953(directory)X
+3271(representation.)X
+3774(The)X
+3927(formation)X
+4270(of)X
+4364(the)X
+2706 5379(directory)N
+3016(shown)X
+3245(in)X
+3327(the)X
+3445(\256gure)X
+3652(is)X
+3725(as)X
+3812(follows.)X
+16 s
+2706 5593 MXY
+864 0 Dl
+2 f
+8 s
+2746 5648(4)N
+1 f
+9 s
+2796 5673(It)N
+2858(does)X
+3008(not)X
+3118(contain)X
+3348(holes.)X
+3 f
+10 s
+720 5960(USENIX)N
+9 f
+1042(-)X
+3 f
+1106(Winter)X
+1371('91)X
+9 f
+1498(-)X
+3 f
+1562(Dallas,)X
+1815(TX)X
+4424(3)X
+
+4 p
+%%Page: 4 4
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+432 258(A)N
+510(New)X
+682(Hashing)X
+985(Package)X
+1290(for)X
+1413(UNIX)X
+3663(Seltzer)X
+3920(&)X
+4007(Yigit)X
+1 f
+604 538(Initially,)N
+937(there)X
+1158(is)X
+1271(one)X
+1446(slot)X
+1620(in)X
+1741(the)X
+1898(directory)X
+432 626(addressing)N
+802(a)X
+865(single)X
+1083(bucket.)X
+1364(The)X
+1515(depth)X
+1719(of)X
+1812(the)X
+1936(trie)X
+2069(is)X
+2148(0)X
+432 714(and)N
+577(0)X
+646(bits)X
+790(of)X
+886(each)X
+1063(hash)X
+1239(value)X
+1442(are)X
+1570(examined)X
+1910(to)X
+2000(deter-)X
+432 802(mine)N
+624(in)X
+718(which)X
+946(bucket)X
+1192(to)X
+1286(place)X
+1488(a)X
+1556(key;)X
+1726(all)X
+1837(keys)X
+2015(go)X
+2126(in)X
+432 890(bucket)N
+682(0.)X
+797(When)X
+1024(this)X
+1174(bucket)X
+1423(is)X
+1511(full,)X
+1677(its)X
+1787(contents)X
+2089(are)X
+432 978(divided)N
+698(between)X
+992(L0)X
+1107(and)X
+1249(L1)X
+1363(as)X
+1455(was)X
+1605(done)X
+1786(in)X
+1873(the)X
+1996(previ-)X
+432 1066(ously)N
+664(discussed)X
+1030(algorithms.)X
+1471(After)X
+1700(this)X
+1874(split,)X
+2090(the)X
+432 1154(address)N
+710(of)X
+814(the)X
+948(second)X
+1207(bucket)X
+1457(must)X
+1648(be)X
+1760(stored)X
+1992(in)X
+2090(the)X
+432 1242(directory.)N
+796(To)X
+939(accommodate)X
+1438(the)X
+1589(new)X
+1776(address,)X
+2090(the)X
+432 1330(directory)N
+752(is)X
+835(split)X
+2 f
+8 s
+972 1305(5)N
+1 f
+10 s
+1330(,)Y
+1054(by)X
+1163(doubling)X
+1476(it,)X
+1569(thus)X
+1731(increasing)X
+2090(the)X
+432 1418(depth)N
+630(of)X
+717(the)X
+835(directory)X
+1145(by)X
+1245(one.)X
+604 1532(After)N
+813(this)X
+967(split,)X
+1163(a)X
+1237(single)X
+1466(bit)X
+1588(of)X
+1693(the)X
+1829(hash)X
+2014(value)X
+432 1620(needs)N
+663(to)X
+773(be)X
+896(examined)X
+1255(to)X
+1364(decide)X
+1621(whether)X
+1927(the)X
+2072(key)X
+432 1708(belongs)N
+711(to)X
+803(L0)X
+922(or)X
+1019(L1.)X
+1158(Once)X
+1358(one)X
+1504(of)X
+1601(these)X
+1795(buckets)X
+2069(\256lls)X
+432 1796(\(L0)N
+578(for)X
+702(example\),)X
+1051(it)X
+1125(is)X
+1208(split)X
+1375(as)X
+1472(before,)X
+1728(and)X
+1873(the)X
+2000(direc-)X
+432 1884(tory)N
+585(is)X
+662(split)X
+823(again)X
+1021(to)X
+1107(make)X
+1305(room)X
+1498(for)X
+1615(the)X
+1736(address)X
+2000(of)X
+2090(the)X
+432 1972(third)N
+618(bucket.)X
+927(This)X
+1104(splitting)X
+1400(causes)X
+1645(the)X
+1778(addresses)X
+2121(of)X
+432 2060(the)N
+567(non-splitting)X
+1012(bucket)X
+1263(\(L1\))X
+1443(to)X
+1541(be)X
+1653(duplicated.)X
+2063(The)X
+432 2148(directory)N
+766(now)X
+948(has)X
+1099(four)X
+1277(entries,)X
+1555(a)X
+1635(depth)X
+1857(of)X
+1968(2,)X
+2072(and)X
+432 2236(indexes)N
+700(the)X
+821(buckets)X
+1089(L00,)X
+1261(L01)X
+1413(and)X
+1552(L1,)X
+1684(as)X
+1774(shown)X
+2006(in)X
+2090(the)X
+432 2324(Figure)N
+661(2.)X
+604 2438(The)N
+756(crucial)X
+1002(part)X
+1154(of)X
+1247(the)X
+1371(algorithm)X
+1708(is)X
+1787(the)X
+1911(observa-)X
+432 2526(tion)N
+580(that)X
+724(L1)X
+837(is)X
+914(addressed)X
+1255(twice)X
+1453(in)X
+1539(the)X
+1661(directory.)X
+1995(If)X
+2073(this)X
+432 2614(bucket)N
+679(were)X
+869(to)X
+964(split)X
+1134(now,)X
+1324(the)X
+1454(directory)X
+1776(already)X
+2045(con-)X
+432 2702(tains)N
+611(room)X
+808(to)X
+898(hold)X
+1067(the)X
+1192(address)X
+1460(of)X
+1554(the)X
+1679(new)X
+1840(bucket.)X
+2121(In)X
+432 2790(general,)N
+711(the)X
+831(relationship)X
+1231(between)X
+1521(the)X
+1641(directory)X
+1953(and)X
+2090(the)X
+432 2878(number)N
+704(of)X
+798(bucket)X
+1039(addresses)X
+1374(contained)X
+1713(therein)X
+1962(is)X
+2041(used)X
+432 2966(to)N
+517(decide)X
+750(when)X
+947(to)X
+1031(split)X
+1190(the)X
+1310(directory.)X
+1662(Each)X
+1845(bucket)X
+2081(has)X
+432 3054(a)N
+505(depth,)X
+740(\()X
+2 f
+767(n)X
+7 s
+3070(b)Y
+10 s
+1 f
+848 3054(\),)N
+932(associated)X
+1299(with)X
+1478(it)X
+1558(and)X
+1710(appears)X
+1992(in)X
+2090(the)X
+432 3142(directory)N
+744(exactly)X
+998(2)X
+2 f
+7 s
+3106(n)Y
+9 f
+1075(-)X
+2 f
+1106(n)X
+4 s
+3110(b)Y
+7 s
+1 f
+10 s
+1181 3142(times.)N
+1396(When)X
+1610(a)X
+1668(bucket)X
+1904(splits,)X
+2113(its)X
+432 3230(depth)N
+638(increases)X
+961(by)X
+1069(one.)X
+1253(The)X
+1406(directory)X
+1724(must)X
+1907(split)X
+2072(any)X
+432 3318(time)N
+602(a)X
+665(bucket's)X
+964(depth)X
+1169(exceeds)X
+1451(the)X
+1576(depth)X
+1781(of)X
+1875(the)X
+2000(direc-)X
+432 3406(tory.)N
+630(The)X
+784(following)X
+1123(code)X
+1303(fragment)X
+1621(helps)X
+1818(to)X
+1908(illustrate)X
+432 3494(the)N
+554(extendible)X
+912(hashing)X
+1185(algorithm)X
+1520([FAG79])X
+1838(for)X
+1955(access-)X
+432 3582(ing)N
+554(individual)X
+898(buckets)X
+1163(and)X
+1299(maintaining)X
+1701(the)X
+1819(directory.)X
+0 f
+8 s
+432 3881(hash)N
+622(=)X
+698 -0.4038(calchash\(key\);)AX
+432 3969(mask)N
+622(=)X
+698 -0.4018(maskvec[depth];)AX
+432 4145(bucket)N
+698(=)X
+774 -0.4038(directory[hash)AX
+1344(&)X
+1420(mask];)X
+432 4321(/*)N
+546(Key)X
+698 -0.4219(Insertion)AX
+1078(*/)X
+432 4409(if)N
+546 -0.4038(\(store\(bucket,)AX
+1116(key,)X
+1306(data\))X
+1534(==)X
+1648(FAIL\))X
+1876({)X
+720 4497(newbl)N
+948(=)X
+1024 -0.4167(getpage\(\);)AX
+720 4585 -0.4000(bucket->depth++;)AN
+720 4673 -0.4091(newbl->depth)AN
+1214(=)X
+1290 -0.4038(bucket->depth;)AX
+720 4761(if)N
+834 -0.4038(\(bucket->depth)AX
+1404(>)X
+1480(depth\))X
+1746({)X
+1008 4849(/*)N
+1122(double)X
+1388 -0.4219(directory)AX
+1768(*/)X
+1008 4937(depth++;)N
+1 f
+16 s
+432 5033 MXY
+864 0 Dl
+2 f
+8 s
+472 5088(5)N
+1 f
+9 s
+534 5113(This)N
+692(decision)X
+962(to)X
+1048(split)X
+1202(the)X
+1319(directory)X
+1608(is)X
+1685(based)X
+1878(on)X
+1979(a)X
+2040(com-)X
+432 5193(parison)N
+666(of)X
+748(the)X
+858(depth)X
+1040(of)X
+1121(the)X
+1230(page)X
+1387(being)X
+1568(split)X
+1713(and)X
+1838(the)X
+1947(depth)X
+2128(of)X
+432 5273(the)N
+543(trie.)X
+698(In)X
+781(Figure)X
+992(2,)X
+1069(the)X
+1180(depths)X
+1390(of)X
+1472(both)X
+1622(L00)X
+1760(and)X
+1886(L01)X
+2024(are)X
+2134(2,)X
+432 5353(whereas)N
+689(the)X
+798(depth)X
+979(of)X
+1060(L1)X
+1161(is)X
+1230(1.)X
+1323(Therefore,)X
+1646(if)X
+1710(L1)X
+1810(were)X
+1970(to)X
+2046(split,)X
+432 5433(the)N
+543(directory)X
+826(would)X
+1029(not)X
+1144(need)X
+1303(to)X
+1382(split.)X
+1565(In)X
+1648(reality,)X
+1872(a)X
+1926(bucket)X
+2140(is)X
+432 5513(allocated)N
+727(for)X
+846(the)X
+969(directory)X
+1264(at)X
+1351(the)X
+1474(time)X
+1637(of)X
+1732(\256le)X
+1858(creation)X
+2124(so)X
+432 5593(although)N
+707(the)X
+818(directory)X
+1100(splits)X
+1274(logically,)X
+1566(physical)X
+1828(splits)X
+2002(do)X
+2096(not)X
+432 5673(occur)N
+610(until)X
+760(the)X
+866(\256le)X
+976(becomes)X
+1246(quite)X
+1408(large.)X
+0 f
+8 s
+2994 538 -0.4219(directory)AN
+3374(=)X
+3450 -0.3971(double\(directory\);)AX
+2706 626(})N
+2706 714 -0.3958(splitbucket\(bucket,)AN
+3466(newbl\))X
+2706 802(...)N
+2418 890(})N
+2 f
+10 s
+3169 1255(hsearch)N
+1 f
+2590 1387(Since)N
+2 f
+2807(hsearch)X
+1 f
+3100(does)X
+3286(not)X
+3427(have)X
+3617(to)X
+3717(translate)X
+4027(hash)X
+2418 1475(values)N
+2659(into)X
+2819(disk)X
+2988(addresses,)X
+3352(it)X
+3432(can)X
+3579(use)X
+3721(much)X
+3934(simpler)X
+2418 1563(algorithms)N
+2808(than)X
+2994(those)X
+3211(de\256ned)X
+3495(above.)X
+3775(System)X
+4058(V's)X
+2 f
+2418 1651(hsearch)N
+1 f
+2708(constructs)X
+3069(a)X
+3141(\256xed-size)X
+3489(hash)X
+3671(table)X
+3862(\(speci\256ed)X
+2418 1739(by)N
+2519(the)X
+2637(user)X
+2791(at)X
+2869(table)X
+3045(creation\).)X
+3391(By)X
+3504(default,)X
+3767(a)X
+3823(multiplica-)X
+2418 1827(tive)N
+2570(hash)X
+2748(function)X
+3046(based)X
+3260(on)X
+3371(that)X
+3522(described)X
+3861(in)X
+3954(Knuth,)X
+2418 1915(Volume)N
+2710(3,)X
+2804(section)X
+3065(6.4)X
+3199([KNU68])X
+3541(is)X
+3628(used)X
+3809(to)X
+3905(obtain)X
+4138(a)X
+2418 2003(primary)N
+2694(bucket)X
+2930(address.)X
+3233(If)X
+3309(this)X
+3446(bucket)X
+3681(is)X
+3755(full,)X
+3907(a)X
+3964(secon-)X
+2418 2091(dary)N
+2593(multiplicative)X
+3069(hash)X
+3248(value)X
+3454(is)X
+3538(computed)X
+3885(to)X
+3978(de\256ne)X
+2418 2179(the)N
+2542(probe)X
+2751(interval.)X
+3062(The)X
+3213(probe)X
+3422(interval)X
+3693(is)X
+3772(added)X
+3989(to)X
+4076(the)X
+2418 2267(original)N
+2712(bucket)X
+2971(address)X
+3257(\(modulo)X
+3573(the)X
+3716(table)X
+3916(size\))X
+4112(to)X
+2418 2355(obtain)N
+2658(a)X
+2734(new)X
+2908(bucket)X
+3162(address.)X
+3483(This)X
+3665(process)X
+3946(repeats)X
+2418 2443(until)N
+2588(an)X
+2688(empty)X
+2911(bucket)X
+3148(is)X
+3224(found.)X
+3474(If)X
+3551(no)X
+3654(bucket)X
+3891(is)X
+3967(found,)X
+2418 2531(an)N
+2514(insertion)X
+2814(fails)X
+2972(with)X
+3134(a)X
+3190(``table)X
+3420(full'')X
+3605(condition.)X
+2590 2645(The)N
+2768(basic)X
+2986(algorithm)X
+3350(may)X
+3541(be)X
+3670(modi\256ed)X
+4006(by)X
+4138(a)X
+2418 2733(number)N
+2705(of)X
+2813(compile)X
+3112(time)X
+3295(options)X
+3571(available)X
+3902(to)X
+4005(those)X
+2418 2821(users)N
+2604(with)X
+2767(AT&T)X
+3006(source)X
+3237(code.)X
+3450(First,)X
+3637(the)X
+3756(package)X
+4040(pro-)X
+2418 2909(vides)N
+2638(two)X
+2809(options)X
+3094(for)X
+3238(hash)X
+3435(functions.)X
+3803(Users)X
+4036(may)X
+2418 2997(specify)N
+2690(their)X
+2877(own)X
+3055(hash)X
+3242(function)X
+3549(by)X
+3669(compiling)X
+4032(with)X
+2418 3085(``USCR'')N
+2757(de\256ned)X
+3016(and)X
+3155(declaring)X
+3477(and)X
+3616(de\256ning)X
+3901(the)X
+4022(vari-)X
+2418 3173(able)N
+2 f
+2578(hcompar)X
+1 f
+2863(,)X
+2909(a)X
+2971(function)X
+3263(taking)X
+3488(two)X
+3633(string)X
+3840(arguments)X
+2418 3261(and)N
+2560(returning)X
+2880(an)X
+2982(integer.)X
+3271(Users)X
+3480(may)X
+3643(also)X
+3797(request)X
+4054(that)X
+2418 3349(hash)N
+2587(values)X
+2814(be)X
+2912(computed)X
+3250(simply)X
+3489(by)X
+3590(taking)X
+3811(the)X
+3930(modulo)X
+2418 3437(of)N
+2521(key)X
+2673(\(using)X
+2909(division)X
+3201(rather)X
+3424(than)X
+3597(multiplication)X
+4080(for)X
+2418 3525(hash)N
+2589(value)X
+2787(calculation\).)X
+3230(If)X
+3308(this)X
+3447(technique)X
+3783(is)X
+3859(used,)X
+4049(col-)X
+2418 3613(lisions)N
+2651(are)X
+2775(resolved)X
+3072(by)X
+3176(scanning)X
+3485(sequentially)X
+3896(from)X
+4076(the)X
+2418 3701(selected)N
+2702(bucket)X
+2941(\(linear)X
+3176(probing\).)X
+3517(This)X
+3684(option)X
+3913(is)X
+3991(avail-)X
+2418 3789(able)N
+2572(by)X
+2672(de\256ning)X
+2954(the)X
+3072(variable)X
+3351(``DIV'')X
+3622(at)X
+3700(compile)X
+3978(time.)X
+2590 3903(A)N
+2720(second)X
+3015(option,)X
+3311(based)X
+3565(on)X
+3716(an)X
+3863(algorithm)X
+2418 3991(discovered)N
+2787(by)X
+2888(Richard)X
+3163(P.)X
+3248(Brent,)X
+3466(rearranges)X
+3822(the)X
+3940(table)X
+4116(at)X
+2418 4079(the)N
+2549(time)X
+2724(of)X
+2824(insertion)X
+3137(in)X
+3232(order)X
+3434(to)X
+3528(speed)X
+3743(up)X
+3855(retrievals.)X
+2418 4167(The)N
+2571(basic)X
+2764(idea)X
+2926(is)X
+3007(to)X
+3097(shorten)X
+3361(long)X
+3531(probe)X
+3741(sequences)X
+4094(by)X
+2418 4255(lengthening)N
+2833(short)X
+3030(probe)X
+3249(sequences.)X
+3651(Once)X
+3857(the)X
+3991(probe)X
+2418 4343(chain)N
+2613(has)X
+2741(exceeded)X
+3062(some)X
+3252(threshold)X
+3571(\(Brent)X
+3796(suggests)X
+4087(2\),)X
+2418 4431(we)N
+2541(attempt)X
+2809(to)X
+2899(shuf\257e)X
+3145(any)X
+3289(colliding)X
+3601(keys)X
+3776(\(keys)X
+3978(which)X
+2418 4519(appeared)N
+2734(in)X
+2821(the)X
+2944(probe)X
+3152(sequence)X
+3471(of)X
+3562(the)X
+3684(new)X
+3842(key\).)X
+4049(The)X
+2418 4607(details)N
+2652(of)X
+2744(this)X
+2884(key)X
+3025(shuf\257ing)X
+3333(can)X
+3469(be)X
+3569(found)X
+3780(in)X
+3866([KNU68])X
+2418 4695(and)N
+2576([BRE73].)X
+2946(This)X
+3129(algorithm)X
+3481(may)X
+3660(be)X
+3777(obtained)X
+4094(by)X
+2418 4783(de\256ning)N
+2700(the)X
+2818(variable)X
+3097(``BRENT'')X
+3487(at)X
+3565(compile)X
+3843(time.)X
+2590 4897(A)N
+2698(third)X
+2899(set)X
+3038(of)X
+3154(options,)X
+3458(obtained)X
+3783(by)X
+3912(de\256ning)X
+2418 4985(``CHAINED'',)N
+2943(use)X
+3086(linked)X
+3321(lists)X
+3484(to)X
+3581(resolve)X
+3848(collisions.)X
+2418 5073(Either)N
+2647(of)X
+2747(the)X
+2878(primary)X
+3164(hash)X
+3343(function)X
+3642(described)X
+3982(above)X
+2418 5161(may)N
+2584(be)X
+2688(used,)X
+2882(but)X
+3011(all)X
+3118(collisions)X
+3451(are)X
+3577(resolved)X
+3876(by)X
+3983(build-)X
+2418 5249(ing)N
+2554(a)X
+2623(linked)X
+2856(list)X
+2986(of)X
+3086(entries)X
+3333(from)X
+3522(the)X
+3653(primary)X
+3940(bucket.)X
+2418 5337(By)N
+2542(default,)X
+2816(new)X
+2981(entries)X
+3226(will)X
+3381(be)X
+3488(added)X
+3711(to)X
+3804(a)X
+3871(bucket)X
+4116(at)X
+2418 5425(the)N
+2541(beginning)X
+2886(of)X
+2978(the)X
+3101(bucket)X
+3339(chain.)X
+3577(However,)X
+3916(compile)X
+2418 5513(options)N
+2706(``SORTUP'')X
+3173(or)X
+3293(``SORTDOWN'')X
+3908(may)X
+4098(be)X
+2418 5601(speci\256ed)N
+2723(to)X
+2805(order)X
+2995(the)X
+3113(hash)X
+3280(chains)X
+3505(within)X
+3729(each)X
+3897(bucket.)X
+3 f
+432 5960(4)N
+2970(USENIX)X
+9 f
+3292(-)X
+3 f
+3356(Winter)X
+3621('91)X
+9 f
+3748(-)X
+3 f
+3812(Dallas,)X
+4065(TX)X
+
+5 p
+%%Page: 5 5
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+720 258(Seltzer)N
+977(&)X
+1064(Yigit)X
+3278(A)X
+3356(New)X
+3528(Hashing)X
+3831(Package)X
+4136(for)X
+4259(UNIX)X
+2 f
+1444 538(dynahash)N
+1 f
+892 670(The)N
+2 f
+1054(dynahash)X
+1 f
+1398(library,)X
+1669(written)X
+1932(by)X
+2048(Esmond)X
+2346(Pitt,)X
+720 758(implements)N
+1183(Larson's)X
+1554(linear)X
+1827(hashing)X
+2165(algorithm)X
+720 846([LAR88])N
+1097(with)X
+1302(an)X
+2 f
+1440(hsearch)X
+1 f
+1756(compatible)X
+2174(interface.)X
+720 934(Intuitively,)N
+1099(a)X
+1161(hash)X
+1334(table)X
+1516(begins)X
+1751(as)X
+1844(a)X
+1905(single)X
+2121(bucket)X
+2360(and)X
+720 1022(grows)N
+941(in)X
+1028(generations,)X
+1443(where)X
+1665(a)X
+1725(generation)X
+2088(corresponds)X
+720 1110(to)N
+815(a)X
+884(doubling)X
+1201(in)X
+1296(the)X
+1427(size)X
+1585(of)X
+1685(the)X
+1815(hash)X
+1994(table.)X
+2222(The)X
+2379(0)X
+2 f
+7 s
+1078(th)Y
+10 s
+1 f
+720 1198(generation)N
+1085(occurs)X
+1321(as)X
+1414(the)X
+1538(table)X
+1719(grows)X
+1940(from)X
+2121(one)X
+2262(bucket)X
+720 1286(to)N
+814(two.)X
+1006(In)X
+1105(the)X
+1235(next)X
+1405(generation)X
+1776(the)X
+1906(table)X
+2093(grows)X
+2320(from)X
+720 1374(two)N
+862(to)X
+946(four.)X
+1122(During)X
+1371(each)X
+1541(generation,)X
+1921(every)X
+2121(bucket)X
+2356(that)X
+720 1462(existed)N
+967(at)X
+1045(the)X
+1163(beginning)X
+1503(of)X
+1590(the)X
+1708(generation)X
+2067(is)X
+2140(split.)X
+892 1576(The)N
+1041(table)X
+1221(starts)X
+1414(as)X
+1505(a)X
+1565(single)X
+1780(bucket)X
+2018(\(numbered)X
+2389(0\),)X
+720 1664(the)N
+839(current)X
+1088(split)X
+1245(bucket)X
+1479(is)X
+1552(set)X
+1661(to)X
+1743(bucket)X
+1977(0,)X
+2057(and)X
+2193(the)X
+2311(max-)X
+720 1752(imum)N
+933(split)X
+1097(point)X
+1288(is)X
+1368(set)X
+1483(to)X
+1571(twice)X
+1771(the)X
+1895(current)X
+2149(split)X
+2312(point)X
+720 1840(\(0\).)N
+863(When)X
+1084(it)X
+1157(is)X
+1239(time)X
+1410(for)X
+1532(a)X
+1596(bucket)X
+1838(to)X
+1928(split,)X
+2113(the)X
+2239(keys)X
+2414(in)X
+720 1928(the)N
+872(current)X
+1154(split)X
+1345(bucket)X
+1612(are)X
+1764(divided)X
+2057(between)X
+2378(the)X
+720 2016(current)N
+981(split)X
+1151(bucket)X
+1397(and)X
+1545(a)X
+1613(new)X
+1779(bucket)X
+2025(whose)X
+2262(bucket)X
+720 2104(number)N
+1000(is)X
+1088(equal)X
+1297(to)X
+1394(1)X
+1469(+)X
+1549(current)X
+1812(split)X
+1984(bucket)X
+2232(+)X
+2311(max-)X
+720 2192(imum)N
+927(split)X
+1085(point.)X
+1310(We)X
+1442(can)X
+1574(determine)X
+1915(which)X
+2131(keys)X
+2298(move)X
+720 2280(to)N
+807(the)X
+929(new)X
+1087(bucket)X
+1325(by)X
+1429(examining)X
+1791(the)X
+2 f
+1913(n)X
+7 s
+1962 2248(th)N
+10 s
+1 f
+2043 2280(bit)N
+2151(of)X
+2242(a)X
+2302(key's)X
+720 2368(hash)N
+899(value)X
+1105(where)X
+1334(n)X
+1406(is)X
+1491(the)X
+1620(generation)X
+1990(number.)X
+2306(After)X
+720 2456(the)N
+846(bucket)X
+1088(at)X
+1174(the)X
+1300(maximum)X
+1651(split)X
+1815(point)X
+2006(has)X
+2140(been)X
+2319(split,)X
+720 2544(the)N
+839(generation)X
+1198(number)X
+1463(is)X
+1536(incremented,)X
+1973(the)X
+2091(current)X
+2339(split)X
+720 2632(point)N
+908(is)X
+985(set)X
+1098(back)X
+1274(to)X
+1360(zero,)X
+1543(and)X
+1683(the)X
+1805(maximum)X
+2152(split)X
+2312(point)X
+720 2720(is)N
+815(set)X
+946(to)X
+1050(the)X
+1190(number)X
+1477(of)X
+1586(the)X
+1725(last)X
+1877(bucket)X
+2132(in)X
+2235(the)X
+2374(\256le)X
+720 2808(\(which)N
+971(is)X
+1052(equal)X
+1253(to)X
+1342(twice)X
+1543(the)X
+1668(old)X
+1797(maximum)X
+2148(split)X
+2312(point)X
+720 2896(plus)N
+873(1\).)X
+892 3010(To)N
+1031(facilitate)X
+1361(locating)X
+1668(keys,)X
+1884(we)X
+2027(maintain)X
+2356(two)X
+720 3098(masks.)N
+989(The)X
+1143(low)X
+1291(mask)X
+1488(is)X
+1569(equal)X
+1771(to)X
+1861(the)X
+1987(maximum)X
+2339(split)X
+720 3186(bucket)N
+967(and)X
+1116(the)X
+1247(high)X
+1422(mask)X
+1624(is)X
+1710(equal)X
+1917(to)X
+2011(the)X
+2141(next)X
+2311(max-)X
+720 3274(imum)N
+931(split)X
+1093(bucket.)X
+1372(To)X
+1486(locate)X
+1703(a)X
+1764(speci\256c)X
+2033(key,)X
+2193(we)X
+2311(com-)X
+720 3362(pute)N
+881(a)X
+940(32-bit)X
+1154(hash)X
+1324(value)X
+1520(using)X
+1715(a)X
+1773(bit-randomizing)X
+2311(algo-)X
+720 3450(rithm)N
+932(such)X
+1118(as)X
+1224(the)X
+1361(one)X
+1516(described)X
+1862(in)X
+1962([LAR88].)X
+2334(This)X
+720 3538(hash)N
+893(value)X
+1093(is)X
+1172(then)X
+1336(masked)X
+1607(with)X
+1775(the)X
+1898(high)X
+2065(mask.)X
+2299(If)X
+2378(the)X
+720 3626(resulting)N
+1026(number)X
+1297(is)X
+1376(greater)X
+1626(than)X
+1790(the)X
+1913(maximum)X
+2262(bucket)X
+720 3714(in)N
+823(the)X
+962(table)X
+1159(\(current)X
+1455(split)X
+1633(bucket)X
+1888(+)X
+1974(maximum)X
+2339(split)X
+720 3802(point\),)N
+962(the)X
+1091(hash)X
+1269(value)X
+1474(is)X
+1558(masked)X
+1834(with)X
+2007(the)X
+2136(low)X
+2287(mask.)X
+720 3890(In)N
+825(either)X
+1046(case,)X
+1242(the)X
+1377(result)X
+1592(of)X
+1696(the)X
+1831(mask)X
+2037(is)X
+2127(the)X
+2262(bucket)X
+720 3978(number)N
+989(for)X
+1107(the)X
+1229(given)X
+1431(key.)X
+1611(The)X
+1759(algorithm)X
+2093(below)X
+2312(illus-)X
+720 4066(trates)N
+914(this)X
+1049(process.)X
+0 f
+8 s
+720 4365(h)N
+796(=)X
+872 -0.4038(calchash\(key\);)AX
+720 4453(bucket)N
+986(=)X
+1062(h)X
+1138(&)X
+1214 -0.4167(high_mask;)AX
+720 4541(if)N
+834(\()X
+910(bucket)X
+1176(>)X
+1252 -0.4167(max_bucket)AX
+1670(\))X
+1008 4629(bucket)N
+1274(=)X
+1350(h)X
+1426(&)X
+1502 -0.4219(low_mask;)AX
+720 4717 -0.4018(return\(bucket\);)AN
+1 f
+10 s
+892 5042(In)N
+1013(order)X
+1237(to)X
+1353(decide)X
+1617(when)X
+1845(to)X
+1961(split)X
+2152(a)X
+2242(bucket,)X
+2 f
+720 5130(dynahash)N
+1 f
+1050(uses)X
+2 f
+1210(controlled)X
+1561(splitting)X
+1 f
+1822(.)X
+1884(A)X
+1964(hash)X
+2133(table)X
+2311(has)X
+2440(a)X
+720 5218(\256ll)N
+837(factor)X
+1054(which)X
+1279(is)X
+1361(expressed)X
+1707(in)X
+1798(terms)X
+2004(of)X
+2099(the)X
+2225(average)X
+720 5306(number)N
+990(of)X
+1082(keys)X
+1253(in)X
+1339(each)X
+1511(bucket.)X
+1789(Each)X
+1974(time)X
+2140(the)X
+2262(table's)X
+720 5394(total)N
+885(number)X
+1153(of)X
+1243(keys)X
+1413(divided)X
+1676(by)X
+1778(its)X
+1875(number)X
+2142(of)X
+2231(buckets)X
+720 5482(exceeds)N
+995(this)X
+1130(\256ll)X
+1238(factor,)X
+1466(a)X
+1522(bucket)X
+1756(is)X
+1829(split.)X
+2878 538(Since)N
+3079(the)X
+2 f
+3200(hsearch)X
+1 f
+3477(create)X
+3693(interface)X
+3998(\()X
+2 f
+4025(hcreate)X
+1 f
+4266(\))X
+4315(calls)X
+2706 626(for)N
+2842(an)X
+2960(estimate)X
+3269(of)X
+3378(the)X
+3518(\256nal)X
+3702(size)X
+3869(of)X
+3978(the)X
+4118(hash)X
+4306(table)X
+2706 714(\()N
+2 f
+2733(nelem)X
+1 f
+2925(\),)X
+2 f
+3007(dynahash)X
+1 f
+3349(uses)X
+3522(this)X
+3672(information)X
+4085(to)X
+4182(initialize)X
+2706 802(the)N
+2848(table.)X
+3088(The)X
+3257(initial)X
+3486(number)X
+3774(of)X
+3884(buckets)X
+4172(is)X
+4268(set)X
+4400(to)X
+2 f
+2706 890(nelem)N
+1 f
+2926(rounded)X
+3217(to)X
+3306(the)X
+3431(next)X
+3596(higher)X
+3828(power)X
+4056(of)X
+4150(two.)X
+4337(The)X
+2706 978(current)N
+2958(split)X
+3118(point)X
+3305(is)X
+3381(set)X
+3493(to)X
+3578(0)X
+3641(and)X
+3780(the)X
+3901(maximum)X
+4248(bucket)X
+2706 1066(and)N
+2842(maximum)X
+3186(split)X
+3343(point)X
+3527(are)X
+3646(set)X
+3755(to)X
+3837(this)X
+3972(rounded)X
+4255(value.)X
+3 f
+3148 1220(The)N
+3301(New)X
+3473(Implementation)X
+1 f
+2878 1352(Our)N
+3042(implementation)X
+3583(is)X
+3675(also)X
+3842(based)X
+4063(on)X
+4181(Larson's)X
+2706 1440(linear)N
+2939(hashing)X
+3238([LAR88])X
+3582(algorithm)X
+3943(as)X
+4060(well)X
+4248(as)X
+4364(the)X
+2 f
+2706 1528(dynahash)N
+1 f
+3047(implementation.)X
+3623(The)X
+2 f
+3782(dbm)X
+1 f
+3954(family)X
+4197(of)X
+4297(algo-)X
+2706 1616(rithms)N
+2942(decide)X
+3184(dynamically)X
+3612(which)X
+3840(bucket)X
+4085(to)X
+4178(split)X
+4346(and)X
+2706 1704(when)N
+2914(to)X
+3010(split)X
+3180(it)X
+3257(\(when)X
+3491(it)X
+3568(over\257ows\))X
+3944(while)X
+2 f
+4155(dynahash)X
+1 f
+2706 1792(splits)N
+2933(in)X
+3054(a)X
+3149(prede\256ned)X
+3547(order)X
+3776(\(linearly\))X
+4134(and)X
+4309(at)X
+4426(a)X
+2706 1880(prede\256ned)N
+3116(time)X
+3328(\(when)X
+3599(the)X
+3767(table)X
+3993(\256ll)X
+4151(factor)X
+4409(is)X
+2706 1968(exceeded\).)N
+3121(We)X
+3280(use)X
+3434(a)X
+3517(hybrid)X
+3773(of)X
+3887(these)X
+4099(techniques.)X
+2706 2056(Splits)N
+2913(occur)X
+3118(in)X
+3206(the)X
+3330(prede\256ned)X
+3695(order)X
+3891(of)X
+3984(linear)X
+4193(hashing,)X
+2706 2144(but)N
+2845(the)X
+2980(time)X
+3159(at)X
+3253(which)X
+3485(pages)X
+3704(are)X
+3839(split)X
+4012(is)X
+4101(determined)X
+2706 2232(both)N
+2869(by)X
+2970(page)X
+3143(over\257ows)X
+3480(\()X
+2 f
+3507(uncontrolled)X
+3937(splitting)X
+1 f
+4198(\))X
+4246(and)X
+4382(by)X
+2706 2320(exceeding)N
+3052(the)X
+3170(\256ll)X
+3278(factor)X
+3486(\()X
+2 f
+3513(controlled)X
+3862(splitting)X
+1 f
+4123(\))X
+2878 2434(A)N
+2962(hash)X
+3135(table)X
+3317(is)X
+3395(parameterized)X
+3876(by)X
+3981(both)X
+4148(its)X
+4248(bucket)X
+2706 2522(size)N
+2904(\()X
+2 f
+2931(bsize)X
+1 f
+(\))S
+3191(and)X
+3380(\256ll)X
+3541(factor)X
+3801(\()X
+2 f
+3828(ffactor)X
+1 f
+4041(\).)X
+4180(Whereas)X
+2 f
+2706 2610(dynahash's)N
+1 f
+3095(buckets)X
+3364(can)X
+3500(be)X
+3599(represented)X
+3993(as)X
+4083(a)X
+4142(linked)X
+4365(list)X
+2706 2698(of)N
+2798(elements)X
+3108(in)X
+3195(memory,)X
+3507(our)X
+3639(package)X
+3928(needs)X
+4136(to)X
+4222(support)X
+2706 2786(disk)N
+2874(access,)X
+3135(and)X
+3286(must)X
+3476(represent)X
+3806(buckets)X
+4086(in)X
+4183(terms)X
+4395(of)X
+2706 2874(pages.)N
+2955(The)X
+2 f
+3106(bsize)X
+1 f
+3291(is)X
+3369(the)X
+3492(size)X
+3642(\(in)X
+3756(bytes\))X
+3977(of)X
+4069(these)X
+4259(pages.)X
+2706 2962(As)N
+2833(in)X
+2933(linear)X
+3154(hashing,)X
+3461(the)X
+3597(number)X
+3879(of)X
+3983(buckets)X
+4265(in)X
+4364(the)X
+2706 3050(table)N
+2906(is)X
+3003(equal)X
+3221(to)X
+3327(the)X
+3469(number)X
+3758(of)X
+3869(keys)X
+4060(in)X
+4165(the)X
+4306(table)X
+2706 3138(divided)N
+2988(by)X
+2 f
+3110(ffactor)X
+1 f
+3323(.)X
+2 f
+8 s
+3113(6)Y
+1 f
+10 s
+3417 3138(The)N
+3584(controlled)X
+3950(splitting)X
+4252(occurs)X
+2706 3226(each)N
+2878(time)X
+3044(the)X
+3166(number)X
+3435(of)X
+3526(keys)X
+3697(in)X
+3783(the)X
+3905(table)X
+4085(exceeds)X
+4364(the)X
+2706 3314(\256ll)N
+2814(factor)X
+3022(multiplied)X
+3370(by)X
+3470(the)X
+3588(number)X
+3853(of)X
+3940(buckets.)X
+2878 3428(Inserting)N
+3187(keys)X
+3358(and)X
+3498(splitting)X
+3783(buckets)X
+4051(is)X
+4127(performed)X
+2706 3516(precisely)N
+3018(as)X
+3107(described)X
+3437(previously)X
+3796(for)X
+2 f
+3911(dynahash)X
+1 f
+4218(.)X
+4279(How-)X
+2706 3604(ever,)N
+2897(since)X
+3094(buckets)X
+3371(are)X
+3502(now)X
+3671(comprised)X
+4036(of)X
+4134(pages,)X
+4368(we)X
+2706 3692(must)N
+2883(be)X
+2981(prepared)X
+3284(to)X
+3367(handle)X
+3602(cases)X
+3793(where)X
+4011(the)X
+4130(size)X
+4276(of)X
+4364(the)X
+2706 3780(keys)N
+2873(and)X
+3009(data)X
+3163(in)X
+3245(a)X
+3301(bucket)X
+3535(exceed)X
+3779(the)X
+3897(bucket)X
+4131(size.)X
+3 f
+3318 3934(Over\257ow)N
+3654(Pages)X
+1 f
+2878 4066(There)N
+3095(are)X
+3223(two)X
+3372(cases)X
+3571(where)X
+3797(a)X
+3862(key)X
+4007(may)X
+4174(not)X
+4305(\256t)X
+4400(in)X
+2706 4154(its)N
+2802(designated)X
+3166(bucket.)X
+3441(In)X
+3529(the)X
+3647(\256rst)X
+3791(case,)X
+3970(the)X
+4088(total)X
+4250(size)X
+4395(of)X
+2706 4242(the)N
+2833(key)X
+2978(and)X
+3123(data)X
+3286(may)X
+3453(exceed)X
+3706(the)X
+3833(bucket)X
+4076(size.)X
+4269(In)X
+4364(the)X
+2706 4330(second,)N
+3008(addition)X
+3328(of)X
+3453(a)X
+3547(new)X
+3739(key)X
+3913(could)X
+4149(cause)X
+4386(an)X
+2706 4418(over\257ow,)N
+3068(but)X
+3227(the)X
+3382(bucket)X
+3652(in)X
+3770(question)X
+4097(is)X
+4206(not)X
+4364(yet)X
+2706 4506(scheduled)N
+3049(to)X
+3133(be)X
+3230(split.)X
+3428(In)X
+3516(existing)X
+3790(implementations,)X
+4364(the)X
+2706 4594(second)N
+2953(case)X
+3115(never)X
+3317(arises)X
+3523(\(since)X
+3738(buckets)X
+4006(are)X
+4128(split)X
+4288(when)X
+2706 4682(they)N
+2871(over\257ow\))X
+3210(and)X
+3352(the)X
+3476(\256rst)X
+3626(case)X
+3791(is)X
+3870(not)X
+3998(handled)X
+4278(at)X
+4362(all.)X
+2706 4770(Although)N
+3036(large)X
+3225(key/data)X
+3525(pair)X
+3678(handling)X
+3986(is)X
+4066(dif\256cult)X
+4346(and)X
+2706 4858(expensive,)N
+3083(it)X
+3163(is)X
+3252(essential.)X
+3604(In)X
+3706(a)X
+3777(linear)X
+3995(hashed)X
+4253(imple-)X
+2706 4946(mentation,)N
+3087(over\257ow)X
+3413(pages)X
+3636(are)X
+3775(required)X
+4083(for)X
+4217(buckets)X
+2706 5034(which)N
+2935(over\257ow)X
+3253(before)X
+3492(they)X
+3662(are)X
+3793(split,)X
+3982(so)X
+4085(we)X
+4211(can)X
+4355(use)X
+2706 5122(the)N
+2833(same)X
+3027(mechanism)X
+3421(for)X
+3544(large)X
+3734(key/data)X
+4035(pairs)X
+4220(that)X
+4368(we)X
+2706 5210(use)N
+2837(for)X
+2955(over\257ow)X
+3264(pages.)X
+3511(Logically,)X
+3862(we)X
+3980(chain)X
+4177(over\257ow)X
+16 s
+2706 5353 MXY
+864 0 Dl
+2 f
+8 s
+2746 5408(6)N
+1 f
+9 s
+2801 5433(This)N
+2952(is)X
+3023(not)X
+3138(strictly)X
+3361(true.)X
+3532(The)X
+3667(\256le)X
+3782(does)X
+3937(not)X
+4052(contract)X
+4306(when)X
+2706 5513(keys)N
+2861(are)X
+2972(deleted,)X
+3221(so)X
+3308(the)X
+3419(number)X
+3662(of)X
+3744(buckets)X
+3986(is)X
+4056(actually)X
+4306(equal)X
+2706 5593(to)N
+2782(the)X
+2890(maximum)X
+3202(number)X
+3441(of)X
+3520(keys)X
+3671(ever)X
+3814(present)X
+4041(in)X
+4116(the)X
+4223(table)X
+4382(di-)X
+2706 5673(vided)N
+2884(by)X
+2974(the)X
+3080(\256ll)X
+3178(factor.)X
+3 f
+10 s
+720 5960(USENIX)N
+9 f
+1042(-)X
+3 f
+1106(Winter)X
+1371('91)X
+9 f
+1498(-)X
+3 f
+1562(Dallas,)X
+1815(TX)X
+4424(5)X
+
+6 p
+%%Page: 6 6
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+432 258(A)N
+510(New)X
+682(Hashing)X
+985(Package)X
+1290(for)X
+1413(UNIX)X
+3663(Seltzer)X
+3920(&)X
+4007(Yigit)X
+1 f
+432 538(pages)N
+639(to)X
+725(the)X
+847(buckets)X
+1116(\(also)X
+1296(called)X
+1512(primary)X
+1789(pages\).)X
+2062(In)X
+2152(a)X
+432 626(memory)N
+730(based)X
+943(representation,)X
+1448(over\257ow)X
+1763(pages)X
+1976(do)X
+2086(not)X
+432 714(pose)N
+628(any)X
+792(special)X
+1063(problems)X
+1409(because)X
+1712(we)X
+1854(can)X
+2014(chain)X
+432 802(over\257ow)N
+776(pages)X
+1017(to)X
+1137(primary)X
+1449(pages)X
+1690(using)X
+1921(memory)X
+432 890(pointers.)N
+776(However,)X
+1137(mapping)X
+1463(these)X
+1674(over\257ow)X
+2005(pages)X
+432 978(into)N
+584(a)X
+648(disk)X
+809(\256le)X
+939(is)X
+1019(more)X
+1211(of)X
+1305(a)X
+1368(challenge,)X
+1723(since)X
+1915(we)X
+2036(need)X
+432 1066(to)N
+547(be)X
+675(able)X
+861(to)X
+975(address)X
+1268(both)X
+1462(bucket)X
+1728(pages,)X
+1983(whose)X
+432 1154(numbers)N
+729(are)X
+849(growing)X
+1137(linearly,)X
+1422(and)X
+1558(some)X
+1747(indeterminate)X
+432 1242(number)N
+715(of)X
+820(over\257ow)X
+1143(pages)X
+1364(without)X
+1646(reorganizing)X
+2090(the)X
+432 1330(\256le.)N
+604 1444(One)N
+789(simple)X
+1053(solution)X
+1361(would)X
+1612(be)X
+1739(to)X
+1852(allocate)X
+2152(a)X
+432 1532(separate)N
+737(\256le)X
+880(for)X
+1015(over\257ow)X
+1341(pages.)X
+1604(The)X
+1769(disadvantage)X
+432 1620(with)N
+605(such)X
+783(a)X
+850(technique)X
+1193(is)X
+1276(that)X
+1426(it)X
+1500(requires)X
+1789(an)X
+1895(extra)X
+2086(\256le)X
+432 1708(descriptor,)N
+794(an)X
+891(extra)X
+1073(system)X
+1316(call)X
+1453(on)X
+1554(open)X
+1731(and)X
+1867(close,)X
+2072(and)X
+432 1796(logically)N
+739(associating)X
+1122(two)X
+1269(independent)X
+1687(\256les.)X
+1886(For)X
+2023(these)X
+432 1884(reasons,)N
+728(we)X
+857(wanted)X
+1123(to)X
+1219(map)X
+1391(both)X
+1567(primary)X
+1855(pages)X
+2072(and)X
+432 1972(over\257ow)N
+737(pages)X
+940(into)X
+1084(the)X
+1202(same)X
+1387(\256le)X
+1509(space.)X
+604 2086(The)N
+799(buddy-in-waiting)X
+1425(algorithm)X
+1806(provides)X
+2152(a)X
+432 2174(mechanism)N
+851(to)X
+966(support)X
+1259(multiple)X
+1578(pages)X
+1814(per)X
+1970(logical)X
+432 2262(bucket)N
+685(while)X
+902(retaining)X
+1226(the)X
+1362(simple)X
+1613(split)X
+1788(sequence)X
+2121(of)X
+432 2350(linear)N
+681(hashing.)X
+1015(Over\257ow)X
+1383(pages)X
+1631(are)X
+1795(preallocated)X
+432 2438(between)N
+781(generations)X
+1232(of)X
+1379(primary)X
+1713(pages.)X
+1996(These)X
+432 2526(over\257ow)N
+759(pages)X
+984(are)X
+1125(used)X
+1314(by)X
+1436(any)X
+1594(bucket)X
+1850(containing)X
+432 2614(more)N
+646(keys)X
+842(than)X
+1029(\256t)X
+1144(on)X
+1273(the)X
+1420(primary)X
+1723(page)X
+1924(and)X
+2089(are)X
+432 2702(reclaimed,)N
+808(if)X
+896(possible,)X
+1217(when)X
+1430(the)X
+1567(bucket)X
+1819(later)X
+2000(splits.)X
+432 2790(Figure)N
+687(3)X
+773(depicts)X
+1045(the)X
+1188(layout)X
+1433(of)X
+1545(primary)X
+1844(pages)X
+2072(and)X
+432 2878(over\257ow)N
+752(pages)X
+970(within)X
+1209(the)X
+1342(same)X
+1542(\256le.)X
+1699(Over\257ow)X
+2036(page)X
+432 2966(use)N
+586(information)X
+1011(is)X
+1111(recorded)X
+1440(in)X
+1548(bitmaps)X
+1847(which)X
+2089(are)X
+432 3054(themselves)N
+819(stored)X
+1046(on)X
+1157(over\257ow)X
+1472(pages.)X
+1725(The)X
+1880(addresses)X
+432 3142(of)N
+520(the)X
+639(bitmap)X
+882(pages)X
+1086(and)X
+1223(the)X
+1342(number)X
+1608(of)X
+1695(pages)X
+1898(allocated)X
+432 3230(at)N
+515(each)X
+688(split)X
+850(point)X
+1039(are)X
+1163(stored)X
+1384(in)X
+1470(the)X
+1592(\256le)X
+1718(header.)X
+1997(Using)X
+432 3318(this)N
+577(information,)X
+1005(both)X
+1177(over\257ow)X
+1492(addresses)X
+1829(and)X
+1974(bucket)X
+432 3406(addresses)N
+764(can)X
+900(be)X
+999(mapped)X
+1276(to)X
+1361(disk)X
+1517(addresses)X
+1848(by)X
+1951(the)X
+2072(fol-)X
+432 3494(lowing)N
+674(calculation:)X
+0 f
+8 s
+432 3793(int)N
+736(bucket;)X
+1192(/*)X
+1306(bucket)X
+1572(address)X
+1876(*/)X
+432 3881(u_short)N
+736(oaddr;)X
+1192(/*)X
+1306(OVERFLOW)X
+1648(address)X
+1952(*/)X
+432 3969(int)N
+736 -0.4125(nhdr_pages;)AX
+1192(/*)X
+1306(npages)X
+1572(in)X
+1686 -112.4062(\256le)AX
+1838(header)X
+2104(*/)X
+432 4057(int)N
+736 -0.4125(spares[32];)AX
+1192(/*)X
+1306(npages)X
+1572(at)X
+1686(each)X
+1876(split)X
+2104(*/)X
+432 4145(int)N
+736(log2\(\);)X
+1198(/*)X
+1312(ceil\(log)X
+1654(base)X
+1844(2\))X
+1958(*/)X
+432 4321(#DEFINE)N
+736 -0.3929(BUCKET_TO_PAGE\(bucket\))AX
+1610(\\)X
+584 4409(bucket)N
+850(+)X
+926 -0.4167(nhdr_pages)AX
+1344(+)X
+1420(\\)X
+584 4497 -0.3894(\(bucket?spares[logs2\(bucket)AN
+1648(+)X
+1724(1\)-1]:0\))X
+432 4673(#DEFINE)N
+736 -0.3947(OADDR_TO_PAGE\(oaddr\))AX
+1534(\\)X
+584 4761 -0.3984(BUCKET_TO_PAGE\(\(1)AN
+1268(<<)X
+1382 -0.4091(\(oaddr>>11\)\))AX
+1876(-)X
+1952(1\))X
+2066(+)X
+2142(\\)X
+584 4849(oaddr)N
+812(&)X
+888(0x7ff;)X
+1 f
+10 s
+604 5262(An)N
+728(over\257ow)X
+1039(page)X
+1217(is)X
+1295(addressed)X
+1637(by)X
+1742(its)X
+1842(split)X
+2004(point,)X
+432 5350(identifying)N
+858(the)X
+1031(generations)X
+1476(between)X
+1819(which)X
+2090(the)X
+432 5438(over\257ow)N
+740(page)X
+915(is)X
+991(allocated,)X
+1324(and)X
+1463(its)X
+1561(page)X
+1736(number,)X
+2023(iden-)X
+432 5526(tifying)N
+665(the)X
+783(particular)X
+1111(page)X
+1283(within)X
+1507(the)X
+1625(split)X
+1782(point.)X
+1986(In)X
+2073(this)X
+432 5614(implementation,)N
+983(offsets)X
+1225(within)X
+1457(pages)X
+1668(are)X
+1795(16)X
+1903(bits)X
+2046(long)X
+432 5702(\(limiting)N
+732(the)X
+851(maximum)X
+1196(page)X
+1368(size)X
+1513(to)X
+1595(32K\),)X
+1800(so)X
+1891(we)X
+2005(select)X
+2418 538(an)N
+2535(over\257ow)X
+2860(page)X
+3052(addressing)X
+3435(algorithm)X
+3786(that)X
+3946(can)X
+4098(be)X
+2418 626(expressed)N
+2760(in)X
+2847(16)X
+2952(bits)X
+3091(and)X
+3231(which)X
+3451(allows)X
+3684(quick)X
+3886(retrieval.)X
+2418 714(The)N
+2568(top)X
+2695(\256ve)X
+2840(bits)X
+2980(indicate)X
+3258(the)X
+3380(split)X
+3541(point)X
+3729(and)X
+3869(the)X
+3991(lower)X
+2418 802(eleven)N
+2650(indicate)X
+2926(the)X
+3046(page)X
+3220(number)X
+3487(within)X
+3713(the)X
+3832(split)X
+3990(point.)X
+2418 890(Since)N
+2633(\256ve)X
+2789(bits)X
+2940(are)X
+3075(reserved)X
+3384(for)X
+3514(the)X
+3648(split)X
+3821(point,)X
+4041(\256les)X
+2418 978(may)N
+2578(split)X
+2737(32)X
+2839(times)X
+3034(yielding)X
+3318(a)X
+3376(maximum)X
+3721(\256le)X
+3844(size)X
+3990(of)X
+4078(2)X
+7 s
+946(32)Y
+10 s
+2418 1066(buckets)N
+2698(and)X
+2849(32)X
+2 f
+(*)S
+1 f
+2982(2)X
+7 s
+1034(11)Y
+10 s
+3113 1066(over\257ow)N
+3433(pages.)X
+3691(The)X
+3850(maximum)X
+2418 1154(page)N
+2597(size)X
+2749(is)X
+2829(2)X
+7 s
+1122(15)Y
+10 s
+1154(,)Y
+2971(yielding)X
+3259(a)X
+3321(maximum)X
+3671(\256le)X
+3799(size)X
+3950(greater)X
+2418 1242(than)N
+2601(131,000)X
+2906(GB)X
+3061(\(on)X
+3212(\256le)X
+3358(systems)X
+3655(supporting)X
+4041(\256les)X
+2418 1330(larger)N
+2626(than)X
+2784(4GB\).)X
+10 f
+2418 1418 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+1 Dt
+4014 2275 MXY
+0 133 Dl
+3881 2275 MXY
+0 133 Dl
+3748 2275 MXY
+0 133 Dl
+3083 2275 MXY
+0 133 Dl
+5 s
+1 f
+3523 2475(2/3)N
+3390(2/2)X
+3257(2/1)X
+2859(1/2)X
+2726(1/1)X
+5 Dt
+3814 1743 MXY
+0 133 Dl
+3282 1743 MXY
+0 133 Dl
+3017 1743 MXY
+0 133 Dl
+2884 1743 MXY
+0 133 Dl
+1 Dt
+3681 1743 MXY
+0 133 Dl
+133 0 Dl
+0 -133 Dl
+-133 0 Dl
+3548 MX
+0 133 Dl
+133 0 Dl
+0 -133 Dl
+-133 0 Dl
+3415 MX
+0 133 Dl
+133 0 Dl
+0 -133 Dl
+-133 0 Dl
+3282 MX
+0 133 Dl
+133 0 Dl
+0 -133 Dl
+-133 0 Dl
+3150 MX
+0 133 Dl
+132 0 Dl
+0 -133 Dl
+-132 0 Dl
+3017 MX
+0 133 Dl
+133 0 Dl
+0 -133 Dl
+-133 0 Dl
+2884 MX
+0 133 Dl
+133 0 Dl
+0 -133 Dl
+-133 0 Dl
+3 f
+8 s
+3017 2601(Over\257ow)N
+3285(Addresses)X
+3515 2833(Over\257ow)N
+3783(Pages)X
+2850(Buckets)X
+1 Di
+3349 2740 MXY
+ 3349 2740 lineto
+ 3482 2740 lineto
+ 3482 2873 lineto
+ 3349 2873 lineto
+ 3349 2740 lineto
+closepath 3 3349 2740 3482 2873 Dp
+2684 MX
+0 133 Dl
+133 0 Dl
+0 -133 Dl
+-133 0 Dl
+5 Dt
+4146 2275 MXY
+0 133 Dl
+3216 2275 MXY
+0 133 Dl
+2684 2275 MXY
+0 133 Dl
+2551 2275 MXY
+0 133 Dl
+1 f
+3798 1963(3)N
+3266 1980(2)N
+3001(1)X
+2868(0)X
+1 Dt
+2751 1743 MXY
+0 133 Dl
+133 0 Dl
+0 -133 Dl
+-133 0 Dl
+3548 2275 MXY
+-15 -22 Dl
+2 16 Dl
+-13 11 Dl
+26 -5 Dl
+-282 -117 Dl
+3432 2275 MXY
+-10 -25 Dl
+-2 16 Dl
+-15 8 Dl
+27 1 Dl
+-166 -117 Dl
+3282 2275 MXY
+12 -25 Dl
+-14 10 Dl
+-15 -6 Dl
+17 21 Dl
+-16 -117 Dl
+2884 2275 MXY
+26 7 Dl
+-12 -12 Dl
+3 -16 Dl
+-17 21 Dl
+382 -117 Dl
+2751 2275 MXY
+25 9 Dl
+-11 -12 Dl
+5 -17 Dl
+-19 20 Dl
+515 -117 Dl
+3 f
+3070 2152(Over\257ow)N
+3338(Pages)X
+3482 2275 MXY
+ 3482 2275 lineto
+ 3615 2275 lineto
+ 3615 2408 lineto
+ 3482 2408 lineto
+ 3482 2275 lineto
+closepath 3 3482 2275 3615 2408 Dp
+3349 MX
+ 3349 2275 lineto
+ 3482 2275 lineto
+ 3482 2408 lineto
+ 3349 2408 lineto
+ 3349 2275 lineto
+closepath 3 3349 2275 3482 2408 Dp
+3216 MX
+ 3216 2275 lineto
+ 3349 2275 lineto
+ 3349 2408 lineto
+ 3216 2408 lineto
+ 3216 2275 lineto
+closepath 3 3216 2275 3349 2408 Dp
+2817 MX
+ 2817 2275 lineto
+ 2950 2275 lineto
+ 2950 2408 lineto
+ 2817 2408 lineto
+ 2817 2275 lineto
+closepath 3 2817 2275 2950 2408 Dp
+2684 MX
+ 2684 2275 lineto
+ 2817 2275 lineto
+ 2817 2408 lineto
+ 2684 2408 lineto
+ 2684 2275 lineto
+closepath 3 2684 2275 2817 2408 Dp
+3615 MX
+0 133 Dl
+531 0 Dl
+0 -133 Dl
+-531 0 Dl
+2950 MX
+0 133 Dl
+266 0 Dl
+0 -133 Dl
+-266 0 Dl
+2551 MX
+0 133 Dl
+133 0 Dl
+0 -133 Dl
+-133 0 Dl
+3798 1726 MXY
+-21 -18 Dl
+6 16 Dl
+-10 13 Dl
+25 -11 Dl
+-599 -99 Dl
+3266 1726 MXY
+-1 -27 Dl
+-7 15 Dl
+-17 1 Dl
+25 11 Dl
+-67 -99 Dl
+3033 1726 MXY
+27 1 Dl
+-14 -8 Dl
+-1 -17 Dl
+-12 24 Dl
+166 -99 Dl
+2900 1726 MXY
+27 7 Dl
+-13 -11 Dl
+3 -17 Dl
+-17 21 Dl
+299 -99 Dl
+3058 1621(Split)N
+3203(Points)X
+2418 2275 MXY
+0 133 Dl
+133 0 Dl
+0 -133 Dl
+-133 0 Dl
+3 Dt
+-1 Ds
+3137(Figure)Y
+2619(3:)X
+1 f
+2691(Split)X
+2832(points)X
+3008(occur)X
+3168(between)X
+3399(generations)X
+3712(and)X
+3823(are)X
+3919(numbered)X
+2418 3225(from)N
+2560(0.)X
+2642(In)X
+2713(this)X
+2824(\256gure)X
+2991(there)X
+3136(are)X
+3231(two)X
+3345(over\257ow)X
+3590(pages)X
+3753(allocated)X
+4000(at)X
+4063(split)X
+2418 3313(point)N
+2566(1)X
+2614(and)X
+2722(three)X
+2865(allocated)X
+3111(at)X
+3173(split)X
+3300(point)X
+3448(2.)X
+10 s
+10 f
+2418 3489 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+3 f
+2949 3731(Buffer)N
+3192(Management)X
+1 f
+2590 3863(The)N
+2744(hash)X
+2920(table)X
+3105(is)X
+3187(stored)X
+3412(in)X
+3502(memory)X
+3797(as)X
+3892(a)X
+3956(logical)X
+2418 3951(array)N
+2633(of)X
+2749(bucket)X
+3012(pointers.)X
+3359(Physically,)X
+3761(the)X
+3907(array)X
+4121(is)X
+2418 4039(arranged)N
+2728(in)X
+2818(segments)X
+3144(of)X
+3239(256)X
+3387(pointers.)X
+3713(Initially,)X
+4013(there)X
+2418 4127(is)N
+2530(space)X
+2767(to)X
+2887(allocate)X
+3195(256)X
+3373(segments.)X
+3769(Reallocation)X
+2418 4215(occurs)N
+2651(when)X
+2847(the)X
+2967(number)X
+3234(of)X
+3323(buckets)X
+3590(exceeds)X
+3867(32K)X
+4027(\(256)X
+2418 4303(*)N
+2508(256\).)X
+2745(Primary)X
+3053(pages)X
+3286(may)X
+3473(be)X
+3598(accessed)X
+3929(directly)X
+2418 4391(through)N
+2711(the)X
+2853(array)X
+3062(by)X
+3185(bucket)X
+3442(number)X
+3730(and)X
+3889(over\257ow)X
+2418 4479(pages)N
+2628(are)X
+2754 0.4028(referenced)AX
+3122(logically)X
+3429(by)X
+3536(their)X
+3710(over\257ow)X
+4022(page)X
+2418 4567(address.)N
+2726(For)X
+2864(small)X
+3063(hash)X
+3236(tables,)X
+3469(it)X
+3539(is)X
+3618(desirable)X
+3934(to)X
+4022(keep)X
+2418 4655(all)N
+2525(pages)X
+2735(in)X
+2823(main)X
+3009(memory)X
+3302(while)X
+3506(on)X
+3612(larger)X
+3826(tables,)X
+4059(this)X
+2418 4743(is)N
+2523(probably)X
+2860(impossible.)X
+3298(To)X
+3438(satisfy)X
+3698(both)X
+3891(of)X
+4009(these)X
+2418 4831(requirements,)N
+2900(the)X
+3041(package)X
+3348(includes)X
+3658(buffer)X
+3897(manage-)X
+2418 4919(ment)N
+2598(with)X
+2760(LRU)X
+2940(\(least)X
+3134(recently)X
+3413(used\))X
+3607(replacement.)X
+2590 5033(By)N
+2730(default,)X
+3020(the)X
+3165(package)X
+3475(allocates)X
+3802(up)X
+3928(to)X
+4036(64K)X
+2418 5121(bytes)N
+2616(of)X
+2712(buffered)X
+3014(pages.)X
+3246(All)X
+3377(pages)X
+3589(in)X
+3680(the)X
+3807(buffer)X
+4032(pool)X
+2418 5209(are)N
+2542(linked)X
+2766(in)X
+2852(LRU)X
+3036(order)X
+3230(to)X
+3316(facilitate)X
+3621(fast)X
+3761(replacement.)X
+2418 5297(Whereas)N
+2724(ef\256cient)X
+3011(access)X
+3241(to)X
+3327(primary)X
+3605(pages)X
+3812(is)X
+3889(provided)X
+2418 5385(by)N
+2521(the)X
+2642(bucket)X
+2879(array,)X
+3087(ef\256cient)X
+3372(access)X
+3600(to)X
+3684(over\257ow)X
+3991(pages)X
+2418 5473(is)N
+2501(provided)X
+2816(by)X
+2926(linking)X
+3182(over\257ow)X
+3497(page)X
+3679(buffers)X
+3936(to)X
+4027(their)X
+2418 5561(predecessor)N
+2827(page)X
+3008(\(either)X
+3247(the)X
+3374(primary)X
+3657(page)X
+3838(or)X
+3933(another)X
+2418 5649(over\257ow)N
+2742(page\).)X
+3000(This)X
+3181(means)X
+3425(that)X
+3584(an)X
+3699(over\257ow)X
+4022(page)X
+3 f
+432 5960(6)N
+2970(USENIX)X
+9 f
+3292(-)X
+3 f
+3356(Winter)X
+3621('91)X
+9 f
+3748(-)X
+3 f
+3812(Dallas,)X
+4065(TX)X
+
+7 p
+%%Page: 7 7
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+720 258(Seltzer)N
+977(&)X
+1064(Yigit)X
+3278(A)X
+3356(New)X
+3528(Hashing)X
+3831(Package)X
+4136(for)X
+4259(UNIX)X
+1 f
+720 538(cannot)N
+955(be)X
+1052(present)X
+1305(in)X
+1388(the)X
+1507(buffer)X
+1724(pool)X
+1886(if)X
+1955(its)X
+2050(primary)X
+2324(page)X
+720 626(is)N
+804(not)X
+937(present.)X
+1240(This)X
+1413(does)X
+1591(not)X
+1724(impact)X
+1972(performance)X
+2409(or)X
+720 714(functionality,)N
+1209(because)X
+1524(an)X
+1660(over\257ow)X
+2005(page)X
+2217(will)X
+2400(be)X
+720 802(accessed)N
+1048(only)X
+1236(after)X
+1430(its)X
+1550(predecessor)X
+1975(page)X
+2172(has)X
+2324(been)X
+720 890(accessed.)N
+1068(Figure)X
+1303(4)X
+1369(depicts)X
+1622(the)X
+1746(data)X
+1905(structures)X
+2242(used)X
+2414(to)X
+720 978(manage)N
+990(the)X
+1108(buffer)X
+1325(pool.)X
+892 1092(The)N
+1040(in-memory)X
+1419(bucket)X
+1656(array)X
+1845(contains)X
+2134(pointers)X
+2414(to)X
+720 1180(buffer)N
+975(header)X
+1248(structures)X
+1617(which)X
+1870(represent)X
+2222(primary)X
+720 1268(pages.)N
+968(Buffer)X
+1203(headers)X
+1474(contain)X
+1735(modi\256ed)X
+2043(bits,)X
+2202(the)X
+2324(page)X
+720 1356(address)N
+995(of)X
+1096(the)X
+1228(buffer,)X
+1479(a)X
+1548(pointer)X
+1808(to)X
+1903(the)X
+2034(actual)X
+2259(buffer,)X
+720 1444(and)N
+875(a)X
+950(pointer)X
+1216(to)X
+1317(the)X
+1454(buffer)X
+1690(header)X
+1944(for)X
+2077(an)X
+2191(over\257ow)X
+720 1532(page)N
+901(if)X
+979(it)X
+1052(exists,)X
+1283(in)X
+1374(addition)X
+1665(to)X
+1756(the)X
+1883(LRU)X
+2072(links.)X
+2296(If)X
+2378(the)X
+720 1620(buffer)N
+950(corresponding)X
+1442(to)X
+1537(a)X
+1606(particular)X
+1947(bucket)X
+2194(is)X
+2280(not)X
+2414(in)X
+720 1708(memory,)N
+1048(its)X
+1164(pointer)X
+1432(is)X
+1526(NULL.)X
+1801(In)X
+1909(effect,)X
+2154(pages)X
+2377(are)X
+720 1796(linked)N
+950(in)X
+1042(three)X
+1233(ways.)X
+1468(Using)X
+1689(the)X
+1817(buffer)X
+2043(headers,)X
+2338(they)X
+720 1884(are)N
+851(linked)X
+1083(physically)X
+1444(through)X
+1725(the)X
+1854(LRU)X
+2045(links)X
+2231(and)X
+2378(the)X
+720 1972(over\257ow)N
+1036(links.)X
+1241(Using)X
+1462(the)X
+1590(pages)X
+1803(themselves,)X
+2209(they)X
+2377(are)X
+720 2060(linked)N
+943(logically)X
+1246(through)X
+1518(the)X
+1639(over\257ow)X
+1946(addresses)X
+2276(on)X
+2378(the)X
+720 2148(page.)N
+948(Since)X
+1162(over\257ow)X
+1482(pages)X
+1700(are)X
+1834(accessed)X
+2151(only)X
+2328(after)X
+720 2236(their)N
+904(predecessor)X
+1321(pages,)X
+1560(they)X
+1734(are)X
+1869(removed)X
+2186(from)X
+2378(the)X
+720 2324(buffer)N
+937(pool)X
+1099(when)X
+1293(their)X
+1460(primary)X
+1734(is)X
+1807(removed.)X
+10 f
+720 2412 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+1 Dt
+2309 3177 MXY
+24 15 Dl
+-8 -15 Dl
+8 -15 Dl
+-24 15 Dl
+52 0 Dl
+789 3160 MXY
+-35 0 Dl
+0 -156 Dl
+1607 0 Dl
+0 173 Dl
+789 3091 MXY
+-24 -15 Dl
+9 15 Dl
+-9 15 Dl
+24 -15 Dl
+-69 0 Dl
+2309 3125 MXY
+104 0 Dl
+0 -155 Dl
+-1693 0 Dl
+0 121 Dl
+927 3160 MXY
+24 15 Dl
+-9 -15 Dl
+9 -15 Dl
+-24 15 Dl
+553 0 Dl
+1618 3177 MXY
+8 27 Dl
+4 -17 Dl
+16 -6 Dl
+-28 -4 Dl
+138 121 Dl
+1895 3315 MXY
+28 3 Dl
+-15 -9 Dl
+1 -18 Dl
+-14 24 Dl
+276 -138 Dl
+3108 MY
+-28 -3 Dl
+15 10 Dl
+-1 17 Dl
+14 -24 Dl
+-276 138 Dl
+1756 3229 MXY
+-8 -27 Dl
+-3 17 Dl
+-16 6 Dl
+27 4 Dl
+-138 -121 Dl
+1480 MX
+-24 -15 Dl
+9 15 Dl
+-9 15 Dl
+24 -15 Dl
+-553 0 Dl
+3 f
+5 s
+1083 3073(LRU)N
+1178(chain)X
+4 Ds
+1402 3851 MXY
+ 1402 3851 lineto
+ 1471 3851 lineto
+ 1471 3920 lineto
+ 1402 3920 lineto
+ 1402 3851 lineto
+closepath 19 1402 3851 1471 3920 Dp
+1445 3747(Over\257ow)N
+1613(Address)X
+1549 3609 MXY
+0 69 Dl
+1756 MX
+-23 -15 Dl
+8 15 Dl
+-8 15 Dl
+23 -15 Dl
+-207 0 Dl
+-1 Ds
+3 Dt
+1756 3419 MXY
+-6 -28 Dl
+-4 17 Dl
+-17 5 Dl
+27 6 Dl
+-138 -138 Dl
+2240 3471 MXY
+15 -24 Dl
+-15 9 Dl
+-15 -9 Dl
+15 24 Dl
+0 -138 Dl
+1826 3609 MXY
+15 -24 Dl
+-15 9 Dl
+-16 -9 Dl
+16 24 Dl
+0 -138 Dl
+1549 MX
+15 -24 Dl
+-15 9 Dl
+-15 -9 Dl
+15 24 Dl
+0 -138 Dl
+858 3471 MXY
+15 -24 Dl
+-15 9 Dl
+-15 -9 Dl
+15 24 Dl
+0 -138 Dl
+2240 3056 MXY
+15 -24 Dl
+-15 9 Dl
+-15 -9 Dl
+15 24 Dl
+0 -138 Dl
+1549 3056 MXY
+15 -24 Dl
+-15 9 Dl
+-15 -9 Dl
+15 24 Dl
+0 -138 Dl
+858 3056 MXY
+15 -24 Dl
+-15 9 Dl
+-15 -9 Dl
+15 24 Dl
+0 -138 Dl
+1 Dt
+2171 3471 MXY
+ 2171 3471 lineto
+ 2448 3471 lineto
+ 2448 3609 lineto
+ 2171 3609 lineto
+ 2171 3471 lineto
+closepath 19 2171 3471 2448 3609 Dp
+1756 3609 MXY
+ 1756 3609 lineto
+ 2033 3609 lineto
+ 2033 3747 lineto
+ 1756 3747 lineto
+ 1756 3609 lineto
+closepath 3 1756 3609 2033 3747 Dp
+1480 3471 MXY
+ 1480 3471 lineto
+ 1756 3471 lineto
+ 1756 3609 lineto
+ 1480 3609 lineto
+ 1480 3471 lineto
+closepath 19 1480 3471 1756 3609 Dp
+789 MX
+ 789 3471 lineto
+ 1065 3471 lineto
+ 1065 3609 lineto
+ 789 3609 lineto
+ 789 3471 lineto
+closepath 19 789 3471 1065 3609 Dp
+962 3903(Buffer)N
+1083(Header)X
+849 3851 MXY
+ 849 3851 lineto
+ 918 3851 lineto
+ 918 3920 lineto
+ 849 3920 lineto
+ 849 3851 lineto
+closepath 14 849 3851 918 3920 Dp
+1756 3194 MXY
+ 1756 3194 lineto
+ 1895 3194 lineto
+ 1895 3471 lineto
+ 1756 3471 lineto
+ 1756 3194 lineto
+closepath 14 1756 3194 1895 3471 Dp
+2171 3056 MXY
+ 2171 3056 lineto
+ 2309 3056 lineto
+ 2309 3333 lineto
+ 2171 3333 lineto
+ 2171 3056 lineto
+closepath 14 2171 3056 2309 3333 Dp
+1480 MX
+ 1480 3056 lineto
+ 1618 3056 lineto
+ 1618 3333 lineto
+ 1480 3333 lineto
+ 1480 3056 lineto
+closepath 14 1480 3056 1618 3333 Dp
+789 MX
+ 789 3056 lineto
+ 927 3056 lineto
+ 927 3333 lineto
+ 789 3333 lineto
+ 789 3056 lineto
+closepath 14 789 3056 927 3333 Dp
+2780 MY
+0 138 Dl
+138 0 Dl
+0 -138 Dl
+-138 0 Dl
+927 MX
+0 138 Dl
+138 0 Dl
+0 -138 Dl
+-138 0 Dl
+1065 MX
+0 138 Dl
+138 0 Dl
+0 -138 Dl
+-138 0 Dl
+1203 MX
+0 138 Dl
+139 0 Dl
+0 -138 Dl
+-139 0 Dl
+1342 MX
+0 138 Dl
+138 0 Dl
+0 -138 Dl
+-138 0 Dl
+1480 MX
+0 138 Dl
+138 0 Dl
+0 -138 Dl
+-138 0 Dl
+1618 MX
+0 138 Dl
+138 0 Dl
+0 -138 Dl
+-138 0 Dl
+1756 MX
+0 138 Dl
+139 0 Dl
+0 -138 Dl
+-139 0 Dl
+1895 MX
+0 138 Dl
+138 0 Dl
+0 -138 Dl
+-138 0 Dl
+2033 MX
+0 138 Dl
+138 0 Dl
+0 -138 Dl
+-138 0 Dl
+2171 MX
+0 138 Dl
+138 0 Dl
+0 -138 Dl
+-138 0 Dl
+2309 MX
+0 138 Dl
+139 0 Dl
+0 -138 Dl
+-139 0 Dl
+13 s
+1048 2720(In)N
+1173(Memory)X
+1580(Bucket)X
+1918(Array)X
+867 3584(B0)N
+1558(B5)X
+2223(B10)X
+1788 3722(O1/1)N
+5 s
+1515 3903(Primay)N
+1651(Buffer)X
+4 Ds
+1990 3851 MXY
+ 1990 3851 lineto
+ 2059 3851 lineto
+ 2059 3920 lineto
+ 1990 3920 lineto
+ 1990 3851 lineto
+closepath 3 1990 3851 2059 3920 Dp
+2102 3903(Over\257ow)N
+2270(Buffer)X
+3 Dt
+-1 Ds
+8 s
+720 4184(Figure)N
+922(4:)X
+1 f
+996(Three)X
+1164(primary)X
+1386(pages)X
+1551(\(B0,)X
+1683(B5,)X
+1794(B10\))X
+1942(are)X
+2039(accessed)X
+2281(directly)X
+720 4272(from)N
+862(the)X
+958(bucket)X
+1146(array.)X
+1326(The)X
+1443(one)X
+1553(over\257ow)X
+1798(page)X
+1935(\(O1/1\))X
+2122(is)X
+2182(linked)X
+2359(phy-)X
+720 4360(sically)N
+915(from)X
+1067(its)X
+1155(primary)X
+1384(page's)X
+1577(buffer)X
+1759(header)X
+1955(as)X
+2035(well)X
+2172(as)X
+2252(logically)X
+720 4448(from)N
+860(its)X
+937(predecessor)X
+1253(page)X
+1389(buffer)X
+1560(\(B5\).)X
+10 s
+10 f
+720 4624 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+3 f
+1191 4954(Table)N
+1406(Parameterization)X
+1 f
+892 5086(When)N
+1107(a)X
+1166(hash)X
+1336(table)X
+1515(is)X
+1590(created,)X
+1865(the)X
+1985(bucket)X
+2221(size,)X
+2388(\256ll)X
+720 5174(factor,)N
+953(initial)X
+1164(number)X
+1434(of)X
+1526(elements,)X
+1856(number)X
+2125(of)X
+2216(bytes)X
+2409(of)X
+720 5262(main)N
+919(memory)X
+1225(used)X
+1411(for)X
+1543(caching,)X
+1851(and)X
+2005(a)X
+2079(user-de\256ned)X
+720 5350(hash)N
+892(function)X
+1184(may)X
+1347(be)X
+1448(speci\256ed.)X
+1797(The)X
+1946(bucket)X
+2184(size)X
+2333(\(and)X
+720 5438(page)N
+906(size)X
+1064(for)X
+1191(over\257ow)X
+1509(pages\))X
+1752(defaults)X
+2039(to)X
+2134(256)X
+2287(bytes.)X
+720 5526(For)N
+858(tables)X
+1072(with)X
+1241(large)X
+1429(data)X
+1590(items,)X
+1810(it)X
+1881(may)X
+2046(be)X
+2149(preferable)X
+720 5614(to)N
+803(increase)X
+1088(the)X
+1207(page)X
+1380(size,)X
+1545(and,)X
+1701(conversely,)X
+2089(applications)X
+720 5702(storing)N
+1002(small)X
+1235(items)X
+1467(exclusively)X
+1891(in)X
+2012(memory)X
+2338(may)X
+2706 538(bene\256t)N
+2966(from)X
+3164(a)X
+3242(smaller)X
+3520(bucket)X
+3776(size.)X
+3983(A)X
+4082(bucket)X
+4337(size)X
+2706 626(smaller)N
+2962(than)X
+3120(64)X
+3220(bytes)X
+3409(is)X
+3482(not)X
+3604(recommended.)X
+2878 740(The)N
+3031(\256ll)X
+3147(factor)X
+3363(indicates)X
+3676(a)X
+3740(desired)X
+4000(density)X
+4258(within)X
+2706 828(the)N
+2833(hash)X
+3009(table.)X
+3234(It)X
+3312(is)X
+3394(an)X
+3499(approximation)X
+3995(of)X
+4091(the)X
+4217(number)X
+2706 916(of)N
+2815(keys)X
+3004(allowed)X
+3300(to)X
+3404(accumulate)X
+3811(in)X
+3914(any)X
+4071(one)X
+4228(bucket,)X
+2706 1004(determining)N
+3119(when)X
+3319(the)X
+3442(hash)X
+3614(table)X
+3795(grows.)X
+4056(Its)X
+4161(default)X
+4409(is)X
+2706 1092(eight.)N
+2953(If)X
+3054(the)X
+3199(user)X
+3380(knows)X
+3636(the)X
+3781(average)X
+4079(size)X
+4251(of)X
+4364(the)X
+2706 1180(key/data)N
+3008(pairs)X
+3194(being)X
+3402(stored)X
+3627(in)X
+3718(the)X
+3845(table,)X
+4050(near)X
+4218(optimal)X
+2706 1268(bucket)N
+2943(sizes)X
+3122(and)X
+3261(\256ll)X
+3372(factors)X
+3614(may)X
+3775(be)X
+3874(selected)X
+4155(by)X
+4257(apply-)X
+2706 1356(ing)N
+2828(the)X
+2946(equation:)X
+0 f
+8 s
+2706 1655(\(1\))N
+2994 -0.3938(\(\(average_pair_length)AX
+3830(+)X
+3906(4\))X
+4020(*)X
+3032 1743(ffactor\))N
+3374(>=)X
+3488(bsize)X
+1 f
+10 s
+2706 2042(For)N
+2859(highly)X
+3104(time)X
+3287(critical)X
+3551(applications,)X
+3999(experimenting)X
+2706 2130(with)N
+2919(different)X
+3266(bucket)X
+3550(sizes)X
+3776(and)X
+3962(\256ll)X
+4120(factors)X
+4409(is)X
+2706 2218(encouraged.)N
+2878 2332(Figures)N
+3144(5a,b,)X
+3326(and)X
+3468(c)X
+3530(illustrate)X
+3836(the)X
+3960(effects)X
+4200(of)X
+4292(vary-)X
+2706 2420(ing)N
+2841(page)X
+3026(sizes)X
+3215(and)X
+3363(\256ll)X
+3483(factors)X
+3734(for)X
+3860(the)X
+3990(same)X
+4187(data)X
+4353(set.)X
+2706 2508(The)N
+2864(data)X
+3031(set)X
+3152(consisted)X
+3482(of)X
+3581(24474)X
+3813(keys)X
+3992(taken)X
+4198(from)X
+4386(an)X
+2706 2596(online)N
+2931(dictionary.)X
+3301(The)X
+3451(data)X
+3609(value)X
+3807(for)X
+3925(each)X
+4097(key)X
+4237(was)X
+4386(an)X
+2706 2684(ASCII)N
+2938(string)X
+3143(for)X
+3260(an)X
+3359(integer)X
+3605(from)X
+3784(1)X
+3847(to)X
+3931(24474)X
+4153(inclusive.)X
+2706 2772(The)N
+2867(test)X
+3013(run)X
+3155(consisted)X
+3488(of)X
+3590(creating)X
+3884(a)X
+3955(new)X
+4124(hash)X
+4306(table)X
+2706 2860(\(where)N
+2966(the)X
+3100(ultimate)X
+3398(size)X
+3559(of)X
+3662(the)X
+3796(table)X
+3987(was)X
+4147(known)X
+4400(in)X
+2706 2948(advance\),)N
+3054(entering)X
+3354(each)X
+3539(key/data)X
+3848(pair)X
+4010(into)X
+4171(the)X
+4306(table)X
+2706 3036(and)N
+2849(then)X
+3014(retrieving)X
+3353(each)X
+3528(key/data)X
+3827(pair)X
+3979(from)X
+4162(the)X
+4286(table.)X
+2706 3124(Each)N
+2898(of)X
+2996(the)X
+3125(graphs)X
+3369(shows)X
+3599(the)X
+3727(timings)X
+3996(resulting)X
+4306(from)X
+2706 3212(varying)N
+2973(the)X
+3093(pagesize)X
+3392(from)X
+3570(128)X
+3712(bytes)X
+3903(to)X
+3986(1M)X
+4118(and)X
+4255(the)X
+4374(\256ll)X
+2706 3300(factor)N
+2929(from)X
+3120(1)X
+3195(to)X
+3292(128.)X
+3486(For)X
+3631(each)X
+3813(run,)X
+3974(the)X
+4106(buffer)X
+4337(size)X
+2706 3388(was)N
+2874(set)X
+3006(at)X
+3106(1M.)X
+3299(The)X
+3466(tests)X
+3650(were)X
+3849(all)X
+3971(run)X
+4120(on)X
+4242(an)X
+4360(HP)X
+2706 3476(9000/370)N
+3077(\(33.3)X
+3312(Mhz)X
+3527(MC68030\),)X
+3966(with)X
+4176(16M)X
+4395(of)X
+2706 3564(memory,)N
+3042(64K)X
+3228(physically)X
+3605(addressed)X
+3970(cache,)X
+4222(and)X
+4386(an)X
+2706 3652(HP7959S)N
+3055(disk)X
+3231(drive,)X
+3459(running)X
+3751(4.3BSD-Reno)X
+4244(single-)X
+2706 3740(user.)N
+2878 3854(Both)N
+3066(system)X
+3321(time)X
+3496(\(Figure)X
+3764(5a\))X
+3899(and)X
+4047(elapsed)X
+4320(time)X
+2706 3942(\(Figure)N
+2966(5b\))X
+3097(show)X
+3290(that)X
+3434(for)X
+3552(all)X
+3655(bucket)X
+3892(sizes,)X
+4091(the)X
+4212(greatest)X
+2706 4030(performance)N
+3137(gains)X
+3329(are)X
+3451(made)X
+3648(by)X
+3751(increasing)X
+4104(the)X
+4225(\256ll)X
+4336(fac-)X
+2706 4118(tor)N
+2822(until)X
+2995(equation)X
+3298(1)X
+3365(is)X
+3445(satis\256ed.)X
+3774(The)X
+3925(user)X
+4085(time)X
+4253(shown)X
+2706 4206(in)N
+2791(Figure)X
+3023(5c)X
+3122(gives)X
+3314(a)X
+3373(more)X
+3561(detailed)X
+3838(picture)X
+4083(of)X
+4172(how)X
+4332(per-)X
+2706 4294(formance)N
+3054(varies.)X
+3330(The)X
+3499(smaller)X
+3778(bucket)X
+4035(sizes)X
+4234(require)X
+2706 4382(fewer)N
+2921(keys)X
+3099(per)X
+3233(page)X
+3416(to)X
+3509(satisfy)X
+3749(equation)X
+4056(1)X
+4127(and)X
+4274(there-)X
+2706 4470(fore)N
+2860(incur)X
+3049(fewer)X
+3257(collisions.)X
+3607(However,)X
+3946(when)X
+4144(the)X
+4265(buffer)X
+2706 4558(pool)N
+2884(size)X
+3045(is)X
+3134(\256xed,)X
+3349(smaller)X
+3620(pages)X
+3838(imply)X
+4059(more)X
+4259(pages.)X
+2706 4646(An)N
+2830(increased)X
+3160(number)X
+3430(of)X
+3522(pages)X
+3730(means)X
+3960(more)X
+2 f
+4150(malloc\(3\))X
+1 f
+2706 4734(calls)N
+2879(and)X
+3021(more)X
+3212(overhead)X
+3533(in)X
+3621(the)X
+3745(hash)X
+3918(package's)X
+4265(buffer)X
+2706 4822(manager)N
+3003(to)X
+3085(manage)X
+3355(the)X
+3473(additional)X
+3813(pages.)X
+2878 4936(The)N
+3028(tradeoff)X
+3308(works)X
+3529(out)X
+3655(most)X
+3834(favorably)X
+4166(when)X
+4364(the)X
+2706 5024(page)N
+2886(size)X
+3039(is)X
+3120(256)X
+3268(and)X
+3412(the)X
+3538(\256ll)X
+3654(factor)X
+3870(is)X
+3950(8.)X
+4057(Similar)X
+4319(con-)X
+2706 5112(clusions)N
+3009(were)X
+3207(obtained)X
+3524(if)X
+3614(the)X
+3753(test)X
+3905(was)X
+4071(run)X
+4218(without)X
+2706 5200(knowing)N
+3007(the)X
+3126(\256nal)X
+3289(table)X
+3466(size)X
+3612(in)X
+3695(advance.)X
+4020(If)X
+4095(the)X
+4214(\256le)X
+4337(was)X
+2706 5288(closed)N
+2942(and)X
+3088(written)X
+3345(to)X
+3437(disk,)X
+3620(the)X
+3748(conclusions)X
+4156(were)X
+4343(still)X
+2706 5376(the)N
+2832(same.)X
+3065(However,)X
+3408(rereading)X
+3740(the)X
+3865(\256le)X
+3994(from)X
+4177(disk)X
+4337(was)X
+2706 5464(slightly)N
+2983(faster)X
+3199(if)X
+3285(a)X
+3358(larger)X
+3583(bucket)X
+3834(size)X
+3996(and)X
+4149(\256ll)X
+4274(factor)X
+2706 5552(were)N
+2898(used)X
+3079(\(1K)X
+3238(bucket)X
+3486(size)X
+3645(and)X
+3795(32)X
+3909(\256ll)X
+4031(factor\).)X
+4320(This)X
+2706 5640(follows)N
+2987(intuitively)X
+3356(from)X
+3553(the)X
+3691(improved)X
+4038(ef\256ciency)X
+4395(of)X
+3 f
+720 5960(USENIX)N
+9 f
+1042(-)X
+3 f
+1106(Winter)X
+1371('91)X
+9 f
+1498(-)X
+3 f
+1562(Dallas,)X
+1815(TX)X
+4424(7)X
+
+8 p
+%%Page: 8 8
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+432 258(A)N
+510(New)X
+682(Hashing)X
+985(Package)X
+1290(for)X
+1413(UNIX)X
+3663(Seltzer)X
+3920(&)X
+4007(Yigit)X
+1 f
+432 538(performing)N
+830(1K)X
+965(reads)X
+1172(from)X
+1365(the)X
+1500(disk)X
+1670(rather)X
+1894(than)X
+2068(256)X
+432 626(byte)N
+609(reads.)X
+857(In)X
+962(general,)X
+1257(performance)X
+1702(for)X
+1834(disk)X
+2005(based)X
+432 714(tables)N
+639(is)X
+712(best)X
+861(when)X
+1055(the)X
+1173(page)X
+1345(size)X
+1490(is)X
+1563(approximately)X
+2046(1K.)X
+10 f
+432 802 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+619 2380 MXY
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+629 2437 MXY
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+648 2504 MXY
+-12 25 Dl
+24 0 Dl
+-12 -25 Dl
+686 2515 MXY
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+762 2516 MXY
+-12 24 Dl
+25 0 Dl
+-13 -24 Dl
+916 2515 MXY
+-13 24 Dl
+25 0 Dl
+-12 -24 Dl
+1222 2516 MXY
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+1834 2515 MXY
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+1 Dt
+619 2392 MXY
+10 57 Dl
+19 67 Dl
+38 11 Dl
+76 1 Dl
+154 -1 Dl
+306 1 Dl
+612 -1 Dl
+8 s
+1 f
+1628 2522(128)N
+3 Dt
+607 2245 MXY
+24 Dc
+617 2375 MXY
+23 Dc
+635 2442 MXY
+24 Dc
+674 2525 MXY
+23 Dc
+750 2529 MXY
+24 Dc
+904 2527 MXY
+23 Dc
+1210 MX
+23 Dc
+1822 2528 MXY
+23 Dc
+20 Ds
+1 Dt
+619 2245 MXY
+10 130 Dl
+19 67 Dl
+38 83 Dl
+76 4 Dl
+154 -2 Dl
+306 0 Dl
+612 1 Dl
+678 2482(256)N
+-1 Ds
+3 Dt
+619 2127 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+629 2191 MXY
+0 25 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+648 2334 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+686 2409 MXY
+0 25 Dl
+0 -13 Dl
+12 0 Dl
+-24 0 Dl
+762 2516 MXY
+0 25 Dl
+0 -12 Dl
+13 0 Dl
+-25 0 Dl
+916 2516 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-25 0 Dl
+1222 2515 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+1834 2515 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+5 Dt
+619 2139 MXY
+10 65 Dl
+19 142 Dl
+38 75 Dl
+76 108 Dl
+154 -1 Dl
+306 -1 Dl
+612 0 Dl
+694 2401(512)N
+3 Dt
+631 2064 MXY
+-24 24 Dl
+12 -12 Dl
+-12 -12 Dl
+24 24 Dl
+641 2077 MXY
+-24 25 Dl
+12 -12 Dl
+-12 -13 Dl
+24 25 Dl
+660 2132 MXY
+-24 24 Dl
+12 -12 Dl
+-12 -12 Dl
+24 24 Dl
+698 2292 MXY
+-24 24 Dl
+12 -12 Dl
+-12 -12 Dl
+24 24 Dl
+775 2382 MXY
+-25 24 Dl
+12 -12 Dl
+-12 -12 Dl
+25 24 Dl
+928 2516 MXY
+-25 24 Dl
+13 -12 Dl
+-13 -12 Dl
+25 24 Dl
+1234 2516 MXY
+-24 25 Dl
+12 -12 Dl
+-12 -13 Dl
+24 25 Dl
+1846 2516 MXY
+-24 24 Dl
+12 -12 Dl
+-12 -12 Dl
+24 24 Dl
+16 Ds
+1 Dt
+619 2076 MXY
+10 14 Dl
+19 54 Dl
+38 160 Dl
+76 90 Dl
+154 134 Dl
+306 1 Dl
+612 -1 Dl
+694 2257(1024)N
+-1 Ds
+3 Dt
+619 1877 MXY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+629 1855 MXY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+648 1838 MXY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+686 1860 MXY
+12 -25 Dl
+-24 0 Dl
+12 25 Dl
+762 1923 MXY
+13 -24 Dl
+-25 0 Dl
+12 24 Dl
+916 2087 MXY
+12 -24 Dl
+-25 0 Dl
+13 24 Dl
+1222 2256 MXY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+1834 2541 MXY
+12 -25 Dl
+-24 0 Dl
+12 25 Dl
+619 1865 MXY
+10 -22 Dl
+19 -17 Dl
+38 21 Dl
+76 64 Dl
+154 164 Dl
+306 169 Dl
+612 285 Dl
+1645 2427(4096)N
+619 1243 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+629 1196 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+648 1146 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+686 1174 MXY
+0 25 Dl
+0 -13 Dl
+12 0 Dl
+-24 0 Dl
+762 1249 MXY
+0 24 Dl
+0 -12 Dl
+13 0 Dl
+-25 0 Dl
+916 1371 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-25 0 Dl
+1222 1680 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+1834 1999 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+619 1255 MXY
+10 -47 Dl
+19 -50 Dl
+38 28 Dl
+76 75 Dl
+154 122 Dl
+306 309 Dl
+612 319 Dl
+1741 1934(8192)N
+5 Dt
+609 2531 MXY
+1225 0 Dl
+609 MX
+0 -1553 Dl
+2531 MY
+0 16 Dl
+4 Ds
+1 Dt
+2531 MY
+0 -1553 Dl
+593 2625(0)N
+-1 Ds
+5 Dt
+916 2531 MXY
+0 16 Dl
+4 Ds
+1 Dt
+2531 MY
+0 -1553 Dl
+884 2625(32)N
+-1 Ds
+5 Dt
+1222 2531 MXY
+0 16 Dl
+4 Ds
+1 Dt
+2531 MY
+0 -1553 Dl
+1190 2625(64)N
+-1 Ds
+5 Dt
+1528 2531 MXY
+0 16 Dl
+4 Ds
+1 Dt
+2531 MY
+0 -1553 Dl
+1496 2625(96)N
+-1 Ds
+5 Dt
+1834 2531 MXY
+0 16 Dl
+4 Ds
+1 Dt
+2531 MY
+0 -1553 Dl
+1786 2625(128)N
+-1 Ds
+5 Dt
+609 2531 MXY
+-16 0 Dl
+4 Ds
+1 Dt
+609 MX
+1225 0 Dl
+545 2558(0)N
+-1 Ds
+5 Dt
+609 2013 MXY
+-16 0 Dl
+4 Ds
+1 Dt
+609 MX
+1225 0 Dl
+481 2040(100)N
+-1 Ds
+5 Dt
+609 1496 MXY
+-16 0 Dl
+4 Ds
+1 Dt
+609 MX
+1225 0 Dl
+481 1523(200)N
+-1 Ds
+5 Dt
+609 978 MXY
+-16 0 Dl
+4 Ds
+1 Dt
+609 MX
+1225 0 Dl
+481 1005(300)N
+1088 2724(Fill)N
+1194(Factor)X
+422 1611(S)N
+426 1667(e)N
+426 1724(c)N
+424 1780(o)N
+424 1837(n)N
+424 1893(d)N
+428 1949(s)N
+3 Dt
+-1 Ds
+3 f
+432 2882(Figure)N
+636(5a:)X
+1 f
+744(System)X
+956(Time)X
+1113(for)X
+1209(dictionary)X
+1490(data)X
+1618(set)X
+1711(with)X
+1847(1M)X
+1958(of)X
+2033(buffer)X
+432 2970(space)N
+594(and)X
+707(varying)X
+923(bucket)X
+1114(sizes)X
+1259(and)X
+1372(\256ll)X
+1465(factors.)X
+1675(Each)X
+1823(line)X
+1940(is)X
+2004(labeled)X
+432 3058(with)N
+562(its)X
+639(bucket)X
+825(size.)X
+10 s
+10 f
+432 3234 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+8 s
+1 f
+428 4381(s)N
+424 4325(d)N
+424 4269(n)N
+424 4212(o)N
+426 4156(c)N
+426 4099(e)N
+422 4043(S)N
+1116 5156(Fill)N
+1222(Factor)X
+506 3437(3200)N
+4 Ds
+1 Dt
+666 3410 MXY
+1168 0 Dl
+-1 Ds
+5 Dt
+666 MX
+-16 0 Dl
+506 3825(2400)N
+4 Ds
+1 Dt
+666 3799 MXY
+1168 0 Dl
+-1 Ds
+5 Dt
+666 MX
+-16 0 Dl
+506 4214(1600)N
+4 Ds
+1 Dt
+666 4186 MXY
+1168 0 Dl
+-1 Ds
+5 Dt
+666 MX
+-16 0 Dl
+538 4602(800)N
+4 Ds
+1 Dt
+666 4575 MXY
+1168 0 Dl
+-1 Ds
+5 Dt
+666 MX
+-16 0 Dl
+602 4990(0)N
+4 Ds
+1 Dt
+666 4963 MXY
+1168 0 Dl
+-1 Ds
+5 Dt
+666 MX
+-16 0 Dl
+1786 5057(128)N
+4 Ds
+1 Dt
+1834 4963 MXY
+0 -1553 Dl
+-1 Ds
+5 Dt
+4963 MY
+0 16 Dl
+1510 5057(96)N
+4 Ds
+1 Dt
+1542 4963 MXY
+0 -1553 Dl
+-1 Ds
+5 Dt
+4963 MY
+0 16 Dl
+1218 5057(64)N
+4 Ds
+1 Dt
+1250 4963 MXY
+0 -1553 Dl
+-1 Ds
+5 Dt
+4963 MY
+0 16 Dl
+926 5057(32)N
+4 Ds
+1 Dt
+958 4963 MXY
+0 -1553 Dl
+-1 Ds
+5 Dt
+4963 MY
+0 16 Dl
+650 5057(0)N
+4 Ds
+1 Dt
+666 4963 MXY
+0 -1553 Dl
+-1 Ds
+5 Dt
+4963 MY
+0 16 Dl
+4963 MY
+0 -1553 Dl
+4963 MY
+1168 0 Dl
+1741 4752(8192)N
+3 Dt
+675 3732 MXY
+9 -172 Dl
+18 -118 Dl
+37 128 Dl
+73 -121 Dl
+146 623 Dl
+292 497 Dl
+584 245 Dl
+4802 MY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+1250 4557 MXY
+0 25 Dl
+0 -13 Dl
+12 0 Dl
+-24 0 Dl
+958 4060 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+812 3437 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+739 3558 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+702 3430 MXY
+0 25 Dl
+0 -13 Dl
+13 0 Dl
+-25 0 Dl
+684 3548 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+675 3720 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+1637 4912(4096)N
+675 4307 MXY
+9 -58 Dl
+18 30 Dl
+37 89 Dl
+73 144 Dl
+146 235 Dl
+292 122 Dl
+584 89 Dl
+4970 MY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+1250 4881 MXY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+958 4759 MXY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+812 4524 MXY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+739 4380 MXY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+702 4291 MXY
+13 -24 Dl
+-25 0 Dl
+12 24 Dl
+684 4261 MXY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+675 4319 MXY
+12 -24 Dl
+-24 0 Dl
+12 24 Dl
+734 4662(1024)N
+16 Ds
+1 Dt
+675 4352 MXY
+9 60 Dl
+18 134 Dl
+37 266 Dl
+73 117 Dl
+146 30 Dl
+292 0 Dl
+584 -1 Dl
+-1 Ds
+3 Dt
+1846 4946 MXY
+-24 24 Dl
+12 -12 Dl
+-12 -12 Dl
+24 24 Dl
+1262 4946 MXY
+-24 25 Dl
+12 -12 Dl
+-12 -13 Dl
+24 25 Dl
+970 4947 MXY
+-24 24 Dl
+12 -12 Dl
+-12 -12 Dl
+24 24 Dl
+824 4917 MXY
+-24 24 Dl
+12 -12 Dl
+-12 -12 Dl
+24 24 Dl
+751 4800 MXY
+-24 24 Dl
+12 -12 Dl
+-12 -12 Dl
+24 24 Dl
+715 4534 MXY
+-25 25 Dl
+12 -13 Dl
+-12 -12 Dl
+25 25 Dl
+696 4400 MXY
+-24 24 Dl
+12 -12 Dl
+-12 -12 Dl
+24 24 Dl
+687 4339 MXY
+-24 25 Dl
+12 -12 Dl
+-12 -13 Dl
+24 25 Dl
+718 4792(512)N
+5 Dt
+675 4422 MXY
+9 137 Dl
+18 278 Dl
+37 105 Dl
+73 18 Dl
+146 -1 Dl
+292 0 Dl
+584 -1 Dl
+3 Dt
+4946 MY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+1250 4946 MXY
+0 25 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+958 4947 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+812 4948 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+739 4930 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+702 4824 MXY
+0 25 Dl
+0 -12 Dl
+13 0 Dl
+-25 0 Dl
+684 4547 MXY
+0 24 Dl
+0 -12 Dl
+12 0 Dl
+-24 0 Dl
+675 4410 MXY
+0 25 Dl
+0 -13 Dl
+12 0 Dl
+-24 0 Dl
+750 4921(256)N
+20 Ds
+1 Dt
+675 4597 MXY
+9 246 Dl
+18 106 Dl
+37 10 Dl
+73 0 Dl
+146 0 Dl
+292 0 Dl
+584 -1 Dl
+-1 Ds
+3 Dt
+1822 MX
+23 Dc
+1238 4959 MXY
+23 Dc
+946 MX
+23 Dc
+800 MX
+23 Dc
+727 MX
+23 Dc
+691 4949 MXY
+23 Dc
+672 4843 MXY
+24 Dc
+663 4597 MXY
+24 Dc
+1395 4961(128)N
+1 Dt
+675 4855 MXY
+9 93 Dl
+18 10 Dl
+37 1 Dl
+73 0 Dl
+146 -1 Dl
+292 0 Dl
+584 0 Dl
+3 Dt
+4946 MY
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+1250 MX
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+958 MX
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+812 MX
+-12 25 Dl
+24 0 Dl
+-12 -25 Dl
+739 4947 MXY
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+702 4946 MXY
+-12 24 Dl
+25 0 Dl
+-13 -24 Dl
+684 4936 MXY
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+675 4843 MXY
+-12 24 Dl
+24 0 Dl
+-12 -24 Dl
+3 Dt
+-1 Ds
+3 f
+432 5314(Figure)N
+634(5b:)X
+1 f
+744(Elapsed)X
+967(Time)X
+1123(for)X
+1218(dictionary)X
+1498(data)X
+1625(set)X
+1717(with)X
+1851(1M)X
+1960(of)X
+2033(buffer)X
+432 5402(space)N
+593(and)X
+705(varying)X
+920(bucket)X
+1110(sizes)X
+1254(and)X
+1366(\256ll)X
+1457(factors.)X
+1681(Each)X
+1827(line)X
+1942(is)X
+2004(labeled)X
+432 5490(with)N
+562(its)X
+639(bucket)X
+825(size.)X
+10 s
+2590 538(If)N
+2677(an)X
+2785(approximation)X
+3284(of)X
+3383(the)X
+3513(number)X
+3790(of)X
+3889(elements)X
+2418 626(ultimately)N
+2773(to)X
+2866(be)X
+2973(stored)X
+3200(in)X
+3293(the)X
+3422(hash)X
+3599(table)X
+3785(is)X
+3868(known)X
+4116(at)X
+2418 714(the)N
+2564(time)X
+2754(of)X
+2869(creation,)X
+3196(the)X
+3342(hash)X
+3536(package)X
+3847(takes)X
+4059(this)X
+2418 802(number)N
+2688(as)X
+2779(a)X
+2839(parameter)X
+3185(and)X
+3325(uses)X
+3487(it)X
+3555(to)X
+3641(hash)X
+3812(entries)X
+4050(into)X
+2418 890(the)N
+2541(full)X
+2677(sized)X
+2867(table)X
+3048(rather)X
+3261(than)X
+3424(growing)X
+3716(the)X
+3838(table)X
+4018(from)X
+2418 978(a)N
+2477(single)X
+2691(bucket.)X
+2968(If)X
+3044(this)X
+3181(number)X
+3448(is)X
+3523(not)X
+3647(known,)X
+3907(the)X
+4027(hash)X
+2418 1066(table)N
+2632(starts)X
+2859(with)X
+3059(a)X
+3153(single)X
+3402(bucket)X
+3674(and)X
+3848(gracefully)X
+2418 1154(expands)N
+2707(as)X
+2800(elements)X
+3111(are)X
+3236(added,)X
+3474(although)X
+3780(a)X
+3842(slight)X
+4044(per-)X
+2418 1242(formance)N
+2747(degradation)X
+3151(may)X
+3313(be)X
+3413(noticed.)X
+3713(Figure)X
+3946(6)X
+4010(illus-)X
+2418 1330(trates)N
+2625(the)X
+2756(difference)X
+3116(in)X
+3211(performance)X
+3651(between)X
+3952(storing)X
+2418 1418(keys)N
+2588(in)X
+2673(a)X
+2732(\256le)X
+2857(when)X
+3054(the)X
+3174(ultimate)X
+3458(size)X
+3605(is)X
+3680(known)X
+3920(\(the)X
+4067(left)X
+2418 1506(bars)N
+2581(in)X
+2672(each)X
+2849(set\),)X
+3014(compared)X
+3360(to)X
+3450(building)X
+3744(the)X
+3870(\256le)X
+4000(when)X
+2418 1594(the)N
+2550(ultimate)X
+2846(size)X
+3005(is)X
+3091(unknown)X
+3422(\(the)X
+3580(right)X
+3764(bars)X
+3931(in)X
+4026(each)X
+2418 1682(set\).)N
+2609(Once)X
+2814(the)X
+2947(\256ll)X
+3069(factor)X
+3291(is)X
+3378(suf\256ciently)X
+3772(high)X
+3948(for)X
+4076(the)X
+2418 1770(page)N
+2596(size)X
+2747(\(8\),)X
+2887(growing)X
+3180(the)X
+3304(table)X
+3486(dynamically)X
+3908(does)X
+4081(lit-)X
+2418 1858(tle)N
+2518(to)X
+2600(degrade)X
+2875(performance.)X
+10 f
+2418 1946 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+9 s
+1 f
+2413 3238(s)N
+2409 3173(d)N
+2409 3108(n)N
+2409 3043(o)N
+2411 2979(c)N
+2411 2914(e)N
+2407 2849(S)N
+3143 4129(Fill)N
+3261(Factor)X
+2448 2152(15)N
+4 Ds
+1 Dt
+2557 2122 MXY
+1473 0 Dl
+-1 Ds
+5 Dt
+2557 MX
+-19 0 Dl
+2448 2747(10)N
+4 Ds
+1 Dt
+2557 2717 MXY
+1473 0 Dl
+-1 Ds
+5 Dt
+2557 MX
+-19 0 Dl
+2484 3343(5)N
+4 Ds
+1 Dt
+2557 3313 MXY
+1473 0 Dl
+-1 Ds
+5 Dt
+2557 MX
+-19 0 Dl
+2484 3938(0)N
+4 Ds
+1 Dt
+2557 3908 MXY
+1473 0 Dl
+-1 Ds
+5 Dt
+2557 MX
+-19 0 Dl
+3976 4015(128)N
+4 Ds
+1 Dt
+4030 3908 MXY
+0 -1786 Dl
+-1 Ds
+5 Dt
+3908 MY
+0 19 Dl
+3626 4015(96)N
+4 Ds
+1 Dt
+3662 3908 MXY
+0 -1786 Dl
+-1 Ds
+5 Dt
+3908 MY
+0 19 Dl
+3258 4015(64)N
+4 Ds
+1 Dt
+3294 3908 MXY
+0 -1786 Dl
+-1 Ds
+5 Dt
+3908 MY
+0 19 Dl
+2889 4015(32)N
+4 Ds
+1 Dt
+2925 3908 MXY
+0 -1786 Dl
+-1 Ds
+5 Dt
+3908 MY
+0 19 Dl
+2539 4015(0)N
+4 Ds
+1 Dt
+2557 3908 MXY
+0 -1786 Dl
+-1 Ds
+5 Dt
+3908 MY
+0 19 Dl
+3908 MY
+0 -1786 Dl
+3908 MY
+1473 0 Dl
+4053 2378(8192)N
+3 Dt
+2569 2277 MXY
+11 0 Dl
+23 48 Dl
+46 -167 Dl
+92 35 Dl
+184 12 Dl
+369 143 Dl
+736 0 Dl
+2334 MY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+3294 2334 MXY
+0 28 Dl
+0 -14 Dl
+13 0 Dl
+-27 0 Dl
+2925 2192 MXY
+0 27 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+2741 2180 MXY
+0 27 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+2649 2144 MXY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+2603 2311 MXY
+0 27 Dl
+0 -13 Dl
+14 0 Dl
+-28 0 Dl
+2580 2263 MXY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+2569 2263 MXY
+0 28 Dl
+0 -14 Dl
+13 0 Dl
+-27 0 Dl
+4053 2591(4096)N
+2569 2348 MXY
+11 -11 Dl
+23 -96 Dl
+46 71 Dl
+92 72 Dl
+184 226 Dl
+369 48 Dl
+736 -60 Dl
+2612 MY
+14 -28 Dl
+-28 0 Dl
+14 28 Dl
+3294 2672 MXY
+13 -28 Dl
+-27 0 Dl
+14 28 Dl
+2925 2624 MXY
+14 -28 Dl
+-28 0 Dl
+14 28 Dl
+2741 2398 MXY
+14 -28 Dl
+-28 0 Dl
+14 28 Dl
+2649 2326 MXY
+14 -27 Dl
+-28 0 Dl
+14 27 Dl
+2603 2255 MXY
+14 -28 Dl
+-28 0 Dl
+14 28 Dl
+2580 2350 MXY
+14 -27 Dl
+-28 0 Dl
+14 27 Dl
+2569 2362 MXY
+13 -28 Dl
+-27 0 Dl
+14 28 Dl
+4053 2681(1024)N
+16 Ds
+1 Dt
+2569 2300 MXY
+11 48 Dl
+23 96 Dl
+46 95 Dl
+92 274 Dl
+184 202 Dl
+369 -155 Dl
+736 -190 Dl
+-1 Ds
+3 Dt
+4044 2656 MXY
+-28 28 Dl
+14 -14 Dl
+-14 -14 Dl
+28 28 Dl
+3307 2846 MXY
+-27 28 Dl
+14 -14 Dl
+-14 -14 Dl
+27 28 Dl
+2939 3001 MXY
+-28 28 Dl
+14 -14 Dl
+-14 -14 Dl
+28 28 Dl
+2755 2799 MXY
+-28 28 Dl
+14 -14 Dl
+-14 -14 Dl
+28 28 Dl
+2663 2525 MXY
+-28 28 Dl
+14 -14 Dl
+-14 -14 Dl
+28 28 Dl
+2617 2430 MXY
+-28 28 Dl
+14 -14 Dl
+-14 -14 Dl
+28 28 Dl
+2594 2334 MXY
+-28 28 Dl
+14 -14 Dl
+-14 -14 Dl
+28 28 Dl
+2582 2287 MXY
+-27 27 Dl
+14 -14 Dl
+-14 -13 Dl
+27 27 Dl
+4053 2851(512)N
+5 Dt
+2569 2372 MXY
+11 -24 Dl
+23 405 Dl
+46 83 Dl
+92 227 Dl
+184 -72 Dl
+369 -119 Dl
+736 -107 Dl
+3 Dt
+2751 MY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+3294 2858 MXY
+0 28 Dl
+0 -14 Dl
+13 0 Dl
+-27 0 Dl
+2925 2977 MXY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+2741 3049 MXY
+0 27 Dl
+0 -13 Dl
+14 0 Dl
+-28 0 Dl
+2649 2823 MXY
+0 27 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+2603 2739 MXY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+2580 2334 MXY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+2569 2358 MXY
+0 28 Dl
+0 -14 Dl
+13 0 Dl
+-27 0 Dl
+4053 2795(256)N
+20 Ds
+1 Dt
+2569 2456 MXY
+11 285 Dl
+23 95 Dl
+46 251 Dl
+92 -60 Dl
+184 -84 Dl
+369 -107 Dl
+736 -71 Dl
+-1 Ds
+3 Dt
+4016 MX
+27 Dc
+3280 2836 MXY
+27 Dc
+2912 2943 MXY
+27 Dc
+2728 3027 MXY
+27 Dc
+2635 3087 MXY
+28 Dc
+2589 2836 MXY
+28 Dc
+2566 2741 MXY
+27 Dc
+2554 2456 MXY
+28 Dc
+4053 2741(128)N
+1 Dt
+2569 2729 MXY
+11 203 Dl
+23 131 Dl
+46 -60 Dl
+92 -119 Dl
+184 -60 Dl
+369 -83 Dl
+736 -12 Dl
+3 Dt
+2716 MY
+-14 27 Dl
+28 0 Dl
+-14 -27 Dl
+3294 2727 MXY
+-14 28 Dl
+27 0 Dl
+-13 -28 Dl
+2925 2811 MXY
+-14 27 Dl
+28 0 Dl
+-14 -27 Dl
+2741 2870 MXY
+-14 28 Dl
+28 0 Dl
+-14 -28 Dl
+2649 2989 MXY
+-14 28 Dl
+28 0 Dl
+-14 -28 Dl
+2603 3049 MXY
+-14 27 Dl
+28 0 Dl
+-14 -27 Dl
+2580 2918 MXY
+-14 28 Dl
+28 0 Dl
+-14 -28 Dl
+2569 2716 MXY
+-14 27 Dl
+27 0 Dl
+-13 -27 Dl
+3 Dt
+-1 Ds
+3 f
+8 s
+2418 4286(Figure)N
+2628(5c:)X
+1 f
+2738(User)X
+2887(Time)X
+3051(for)X
+3154(dictionary)X
+3442(data)X
+3577(set)X
+3677(with)X
+3820(1M)X
+3938(of)X
+4019(buffer)X
+2418 4374(space)N
+2579(and)X
+2691(varying)X
+2906(bucket)X
+3096(sizes)X
+3240(and)X
+3352(\256ll)X
+3443(factors.)X
+3667(Each)X
+3813(line)X
+3928(is)X
+3990(labeled)X
+2418 4462(with)N
+2548(its)X
+2625(bucket)X
+2811(size.)X
+10 s
+10 f
+2418 4638 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+1 f
+2590 4840(Since)N
+2796(no)X
+2904(known)X
+3150(hash)X
+3325(function)X
+3620(performs)X
+3938(equally)X
+2418 4928(well)N
+2589(on)X
+2702(all)X
+2815(possible)X
+3110(data,)X
+3297(the)X
+3428(user)X
+3595(may)X
+3766(\256nd)X
+3923(that)X
+4076(the)X
+2418 5016(built-in)N
+2678(hash)X
+2849(function)X
+3140(does)X
+3311(poorly)X
+3544(on)X
+3648(a)X
+3708(particular)X
+4040(data)X
+2418 5104(set.)N
+2548(In)X
+2636(this)X
+2771(case,)X
+2950(a)X
+3006(hash)X
+3173(function,)X
+3480(taking)X
+3700(two)X
+3840(arguments)X
+2418 5192(\(a)N
+2507(pointer)X
+2760(to)X
+2848(a)X
+2910(byte)X
+3074(string)X
+3282(and)X
+3424(a)X
+3486(length\))X
+3739(and)X
+3880(returning)X
+2418 5280(an)N
+2517(unsigned)X
+2829(long)X
+2993(to)X
+3077(be)X
+3175(used)X
+3344(as)X
+3433(the)X
+3553(hash)X
+3722(value,)X
+3938(may)X
+4098(be)X
+2418 5368(speci\256ed)N
+2731(at)X
+2817(hash)X
+2992(table)X
+3176(creation)X
+3463(time.)X
+3673(When)X
+3893(an)X
+3996(exist-)X
+2418 5456(ing)N
+2570(hash)X
+2767(table)X
+2973(is)X
+3076(opened)X
+3358(and)X
+3524(a)X
+3609(hash)X
+3805(function)X
+4121(is)X
+2418 5544(speci\256ed,)N
+2752(the)X
+2879(hash)X
+3054(package)X
+3346(will)X
+3498(try)X
+3615(to)X
+3705(determine)X
+4054(that)X
+2418 5632(the)N
+2546(hash)X
+2723(function)X
+3020(supplied)X
+3321(is)X
+3404(the)X
+3532(one)X
+3678(with)X
+3850(which)X
+4076(the)X
+2418 5720(table)N
+2630(was)X
+2811(created.)X
+3139(There)X
+3382(are)X
+3536(a)X
+3627(variety)X
+3905(of)X
+4027(hash)X
+3 f
+432 5960(8)N
+2970(USENIX)X
+9 f
+3292(-)X
+3 f
+3356(Winter)X
+3621('91)X
+9 f
+3748(-)X
+3 f
+3812(Dallas,)X
+4065(TX)X
+
+9 p
+%%Page: 9 9
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+720 258(Seltzer)N
+977(&)X
+1064(Yigit)X
+3278(A)X
+3356(New)X
+3528(Hashing)X
+3831(Package)X
+4136(for)X
+4259(UNIX)X
+1 f
+720 538(functions)N
+1065(provided)X
+1397(with)X
+1586(the)X
+1731(package.)X
+2082(The)X
+2253(default)X
+720 626(function)N
+1014(for)X
+1135(the)X
+1260(package)X
+1551(is)X
+1631(the)X
+1755(one)X
+1897(which)X
+2119(offered)X
+2378(the)X
+720 714(best)N
+875(performance)X
+1308(in)X
+1396(terms)X
+1600(of)X
+1693(cycles)X
+1920(executed)X
+2232(per)X
+2360(call)X
+720 802(\(it)N
+827(did)X
+965(not)X
+1103(produce)X
+1398(the)X
+1531(fewest)X
+1776(collisions)X
+2117(although)X
+2432(it)X
+720 890(was)N
+866(within)X
+1091(a)X
+1148(small)X
+1341(percentage)X
+1710(of)X
+1797(the)X
+1915(function)X
+2202(that)X
+2342(pro-)X
+720 978(duced)N
+947(the)X
+1080(fewest)X
+1324(collisions\).)X
+1731(Again,)X
+1981(in)X
+2077(time)X
+2253(critical)X
+720 1066(applications,)N
+1152(users)X
+1342(are)X
+1466(encouraged)X
+1862(to)X
+1949(experiment)X
+2334(with)X
+720 1154(a)N
+783(variety)X
+1032(of)X
+1125(hash)X
+1298(functions)X
+1622(to)X
+1710(achieve)X
+1982(optimal)X
+2252(perfor-)X
+720 1242(mance.)N
+10 f
+720 1330 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+3 f
+7 s
+1038 2925(Full)N
+1149(size)X
+1251(table)X
+1384(\(left\))X
+1547 2718(Fill)N
+1643(Factor)X
+2268 2662(64)N
+1964(32)X
+1674(16)X
+1384(8)X
+1093(4)X
+4 Ds
+1 Dt
+900 2280 MXY
+1548 0 Dl
+900 1879 MXY
+1548 0 Dl
+900 1506 MXY
+1548 0 Dl
+1563 2902 MXY
+111 0 Dl
+-1 Ds
+900 MX
+110 0 Dl
+1425 2828(System)N
+983(User)X
+1895 2778 MXY
+ 1895 2778 lineto
+ 1950 2778 lineto
+ 1950 2833 lineto
+ 1895 2833 lineto
+ 1895 2778 lineto
+closepath 21 1895 2778 1950 2833 Dp
+1342 MX
+ 1342 2778 lineto
+ 1397 2778 lineto
+ 1397 2833 lineto
+ 1342 2833 lineto
+ 1342 2778 lineto
+closepath 14 1342 2778 1397 2833 Dp
+900 MX
+ 900 2778 lineto
+ 955 2778 lineto
+ 955 2833 lineto
+ 900 2833 lineto
+ 900 2778 lineto
+closepath 3 900 2778 955 2833 Dp
+5 Dt
+2283 2211 MXY
+96 0 Dl
+1992 MX
+97 0 Dl
+1702 MX
+97 0 Dl
+1411 2252 MXY
+97 0 Dl
+4 Ds
+1 Dt
+2283 2211 MXY
+ 2283 2211 lineto
+ 2379 2211 lineto
+ 2379 2252 lineto
+ 2283 2252 lineto
+ 2283 2211 lineto
+closepath 14 2283 2211 2379 2252 Dp
+1992 MX
+ 1992 2211 lineto
+ 2089 2211 lineto
+ 2089 2252 lineto
+ 1992 2252 lineto
+ 1992 2211 lineto
+closepath 14 1992 2211 2089 2252 Dp
+1702 MX
+ 1702 2211 lineto
+ 1799 2211 lineto
+ 1799 2252 lineto
+ 1702 2252 lineto
+ 1702 2211 lineto
+closepath 14 1702 2211 1799 2252 Dp
+1411 2252 MXY
+ 1411 2252 lineto
+ 1508 2252 lineto
+ 1508 2294 lineto
+ 1411 2294 lineto
+ 1411 2252 lineto
+closepath 14 1411 2252 1508 2294 Dp
+2283 MX
+ 2283 2252 lineto
+ 2379 2252 lineto
+ 2379 2612 lineto
+ 2283 2612 lineto
+ 2283 2252 lineto
+closepath 3 2283 2252 2379 2612 Dp
+1992 MX
+ 1992 2252 lineto
+ 2089 2252 lineto
+ 2089 2612 lineto
+ 1992 2612 lineto
+ 1992 2252 lineto
+closepath 3 1992 2252 2089 2612 Dp
+1702 MX
+ 1702 2252 lineto
+ 1799 2252 lineto
+ 1799 2612 lineto
+ 1702 2612 lineto
+ 1702 2252 lineto
+closepath 3 1702 2252 1799 2612 Dp
+1411 2294 MXY
+ 1411 2294 lineto
+ 1508 2294 lineto
+ 1508 2612 lineto
+ 1411 2612 lineto
+ 1411 2294 lineto
+closepath 3 1411 2294 1508 2612 Dp
+-1 Ds
+2158 2238 MXY
+ 2158 2238 lineto
+ 2255 2238 lineto
+ 2255 2252 lineto
+ 2158 2252 lineto
+ 2158 2238 lineto
+closepath 21 2158 2238 2255 2252 Dp
+1868 MX
+ 1868 2238 lineto
+ 1965 2238 lineto
+ 1965 2280 lineto
+ 1868 2280 lineto
+ 1868 2238 lineto
+closepath 21 1868 2238 1965 2280 Dp
+1577 MX
+ 1577 2238 lineto
+ 1674 2238 lineto
+ 1674 2308 lineto
+ 1577 2308 lineto
+ 1577 2238 lineto
+closepath 21 1577 2238 1674 2308 Dp
+1287 2308 MXY
+ 1287 2308 lineto
+ 1287 2280 lineto
+ 1384 2280 lineto
+ 1384 2308 lineto
+ 1287 2308 lineto
+closepath 21 1287 2280 1384 2308 Dp
+2158 2280 MXY
+ 2158 2280 lineto
+ 2158 2252 lineto
+ 2255 2252 lineto
+ 2255 2280 lineto
+ 2158 2280 lineto
+closepath 14 2158 2252 2255 2280 Dp
+1868 2308 MXY
+ 1868 2308 lineto
+ 1868 2280 lineto
+ 1965 2280 lineto
+ 1965 2308 lineto
+ 1868 2308 lineto
+closepath 14 1868 2280 1965 2308 Dp
+1577 2335 MXY
+ 1577 2335 lineto
+ 1577 2308 lineto
+ 1674 2308 lineto
+ 1674 2335 lineto
+ 1577 2335 lineto
+closepath 14 1577 2308 1674 2335 Dp
+1287 2363 MXY
+ 1287 2363 lineto
+ 1287 2308 lineto
+ 1384 2308 lineto
+ 1384 2363 lineto
+ 1287 2363 lineto
+closepath 14 1287 2308 1384 2363 Dp
+2158 2280 MXY
+ 2158 2280 lineto
+ 2255 2280 lineto
+ 2255 2612 lineto
+ 2158 2612 lineto
+ 2158 2280 lineto
+closepath 3 2158 2280 2255 2612 Dp
+1868 2308 MXY
+ 1868 2308 lineto
+ 1965 2308 lineto
+ 1965 2612 lineto
+ 1868 2612 lineto
+ 1868 2308 lineto
+closepath 3 1868 2308 1965 2612 Dp
+1577 2335 MXY
+ 1577 2335 lineto
+ 1674 2335 lineto
+ 1674 2612 lineto
+ 1577 2612 lineto
+ 1577 2335 lineto
+closepath 3 1577 2335 1674 2612 Dp
+1287 2363 MXY
+ 1287 2363 lineto
+ 1384 2363 lineto
+ 1384 2612 lineto
+ 1287 2612 lineto
+ 1287 2363 lineto
+closepath 3 1287 2363 1384 2612 Dp
+4 Ds
+1121 2066 MXY
+ 1121 2066 lineto
+ 1218 2066 lineto
+ 1224 2080 lineto
+ 1127 2080 lineto
+ 1121 2066 lineto
+closepath 21 1121 2066 1224 2080 Dp
+2080 MY
+ 1121 2080 lineto
+ 1218 2080 lineto
+ 1218 2273 lineto
+ 1121 2273 lineto
+ 1121 2080 lineto
+closepath 14 1121 2080 1218 2273 Dp
+2273 MY
+ 1121 2273 lineto
+ 1218 2273 lineto
+ 1218 2612 lineto
+ 1121 2612 lineto
+ 1121 2273 lineto
+closepath 3 1121 2273 1218 2612 Dp
+-1 Ds
+997 1589 MXY
+ 997 1589 lineto
+ 1093 1589 lineto
+ 1093 1644 lineto
+ 997 1644 lineto
+ 997 1589 lineto
+closepath 21 997 1589 1093 1644 Dp
+1644 MY
+ 997 1644 lineto
+ 1093 1644 lineto
+ 1093 2280 lineto
+ 997 2280 lineto
+ 997 1644 lineto
+closepath 14 997 1644 1093 2280 Dp
+2280 MY
+ 997 2280 lineto
+ 1093 2280 lineto
+ 1093 2612 lineto
+ 997 2612 lineto
+ 997 2280 lineto
+closepath 3 997 2280 1093 2612 Dp
+10 s
+719 2093(s)N
+712 2037(d)N
+712 1982(n)N
+714 1927(o)N
+716 1872(c)N
+716 1816(e)N
+712 1761(S)N
+804 2286(10)N
+804 1899(20)N
+804 1540(30)N
+3 Dt
+900 1506 MXY
+0 1106 Dl
+1548 0 Dl
+7 s
+1978 2828(Elapsed)N
+1701 2925(Dynamically)N
+2018(grown)X
+2184(table)X
+2317(\(right\))X
+3 Dt
+-1 Ds
+8 s
+720 3180(Figure)N
+934(6:)X
+1 f
+1020(The)X
+1152(total)X
+1299(regions)X
+1520(indicate)X
+1755(the)X
+1865(difference)X
+2154(between)X
+2398(the)X
+720 3268(elapsed)N
+931(time)X
+1065(and)X
+1177(the)X
+1275(sum)X
+1402(of)X
+1475(the)X
+1573(system)X
+1771(and)X
+1883(user)X
+2008(time.)X
+2173(The)X
+2291(left)X
+2395(bar)X
+720 3356(of)N
+798(each)X
+939(set)X
+1035(depicts)X
+1241(the)X
+1344(timing)X
+1537(of)X
+1615(the)X
+1718(test)X
+1831(run)X
+1940(when)X
+2102(the)X
+2204(number)X
+2423(of)X
+720 3444(entries)N
+910(is)X
+973(known)X
+1167(in)X
+1237(advance.)X
+1496(The)X
+1614(right)X
+1754(bars)X
+1879(depict)X
+2054(the)X
+2151(timing)X
+2338(when)X
+720 3532(the)N
+814(\256le)X
+912(is)X
+971(grown)X
+1150(from)X
+1290(a)X
+1334(single)X
+1503(bucket.)X
+10 s
+10 f
+720 3708 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+1 f
+892 3910(Since)N
+1131(this)X
+1307(hashing)X
+1617(package)X
+1942(provides)X
+2279(buffer)X
+720 3998(management,)N
+1188(the)X
+1323(amount)X
+1600(of)X
+1704(space)X
+1920(allocated)X
+2247(for)X
+2378(the)X
+720 4086(buffer)N
+948(pool)X
+1121(may)X
+1290(be)X
+1397(speci\256ed)X
+1713(by)X
+1824(the)X
+1953(user.)X
+2157(Using)X
+2378(the)X
+720 4174(same)N
+910(data)X
+1069(set)X
+1183(and)X
+1324(test)X
+1459(procedure)X
+1805(as)X
+1896(used)X
+2067(to)X
+2153(derive)X
+2378(the)X
+720 4262(graphs)N
+962(in)X
+1052(Figures)X
+1320(5a-c,)X
+1507(Figure)X
+1744(7)X
+1812(shows)X
+2039(the)X
+2164(impact)X
+2409(of)X
+720 4350(varying)N
+997(the)X
+1126(size)X
+1282(of)X
+1380(the)X
+1509(buffer)X
+1737(pool.)X
+1950(The)X
+2106(bucket)X
+2351(size)X
+720 4438(was)N
+873(set)X
+989(to)X
+1078(256)X
+1225(bytes)X
+1421(and)X
+1564(the)X
+1689(\256ll)X
+1804(factor)X
+2019(was)X
+2171(set)X
+2287(to)X
+2376(16.)X
+720 4526(The)N
+869(buffer)X
+1090(pool)X
+1256(size)X
+1404(was)X
+1552(varied)X
+1776(from)X
+1955(0)X
+2018(\(the)X
+2166(minimum)X
+720 4614(number)N
+986(of)X
+1074(pages)X
+1277(required)X
+1565(to)X
+1647(be)X
+1743(buffered\))X
+2063(to)X
+2145(1M.)X
+2316(With)X
+720 4702(1M)N
+854(of)X
+944(buffer)X
+1164(space,)X
+1386(the)X
+1507(package)X
+1794(performed)X
+2151(no)X
+2253(I/O)X
+2382(for)X
+720 4790(this)N
+871(data)X
+1040(set.)X
+1204(As)X
+1328(Figure)X
+1572(7)X
+1647(illustrates,)X
+2013(increasing)X
+2378(the)X
+720 4878(buffer)N
+944(pool)X
+1113(size)X
+1265(can)X
+1404(have)X
+1583(a)X
+1646(dramatic)X
+1954(affect)X
+2165(on)X
+2271(result-)X
+720 4966(ing)N
+842(performance.)X
+2 f
+8 s
+1269 4941(7)N
+1 f
+16 s
+720 5353 MXY
+864 0 Dl
+2 f
+8 s
+760 5408(7)N
+1 f
+9 s
+826 5433(Some)N
+1024(allocators)X
+1338(are)X
+1460(extremely)X
+1782(inef\256cient)X
+2107(at)X
+2192(allocating)X
+720 5513(memory.)N
+1029(If)X
+1110(you)X
+1251(\256nd)X
+1396(that)X
+1536(applications)X
+1916(are)X
+2036(running)X
+2292(out)X
+2416(of)X
+720 5593(memory)N
+1005(before)X
+1234(you)X
+1386(think)X
+1578(they)X
+1746(should,)X
+2000(try)X
+2124(varying)X
+2388(the)X
+720 5673(pagesize)N
+986(to)X
+1060(get)X
+1166(better)X
+1348(utilization)X
+1658(from)X
+1816(the)X
+1922(memory)X
+2180(allocator.)X
+10 s
+2830 1975 MXY
+0 -28 Dl
+28 0 Dl
+0 28 Dl
+-28 0 Dl
+2853 2004 MXY
+0 -27 Dl
+28 0 Dl
+0 27 Dl
+-28 0 Dl
+2876 2016 MXY
+0 -27 Dl
+27 0 Dl
+0 27 Dl
+-27 0 Dl
+2922 1998 MXY
+0 -27 Dl
+27 0 Dl
+0 27 Dl
+-27 0 Dl
+2967 2025 MXY
+0 -28 Dl
+28 0 Dl
+0 28 Dl
+-28 0 Dl
+3013 2031 MXY
+0 -28 Dl
+28 0 Dl
+0 28 Dl
+-28 0 Dl
+3059 MX
+0 -28 Dl
+27 0 Dl
+0 28 Dl
+-27 0 Dl
+3196 2052 MXY
+0 -28 Dl
+27 0 Dl
+0 28 Dl
+-27 0 Dl
+3561 2102 MXY
+0 -28 Dl
+28 0 Dl
+0 28 Dl
+-28 0 Dl
+4292 2105 MXY
+0 -28 Dl
+27 0 Dl
+0 28 Dl
+-27 0 Dl
+4 Ds
+1 Dt
+2844 1961 MXY
+23 30 Dl
+23 12 Dl
+45 -18 Dl
+46 26 Dl
+46 6 Dl
+45 0 Dl
+137 21 Dl
+366 50 Dl
+730 3 Dl
+9 s
+4227 2158(User)N
+-1 Ds
+3 Dt
+2830 1211 MXY
+27 Dc
+2853 1261 MXY
+27 Dc
+2876 1267 MXY
+27 Dc
+2921 1341 MXY
+27 Dc
+2967 1385 MXY
+27 Dc
+3013 1450 MXY
+27 Dc
+3059 1497 MXY
+27 Dc
+3196 1686 MXY
+27 Dc
+3561 2109 MXY
+27 Dc
+4292 2295 MXY
+27 Dc
+20 Ds
+1 Dt
+2844 1211 MXY
+23 50 Dl
+23 6 Dl
+45 74 Dl
+46 44 Dl
+46 65 Dl
+45 47 Dl
+137 189 Dl
+366 423 Dl
+730 186 Dl
+4181 2270(System)N
+-1 Ds
+3 Dt
+2844 583 MXY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+2867 672 MXY
+0 27 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+2890 701 MXY
+0 28 Dl
+0 -14 Dl
+13 0 Dl
+-27 0 Dl
+2935 819 MXY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-27 0 Dl
+2981 849 MXY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+3027 908 MXY
+0 27 Dl
+0 -13 Dl
+14 0 Dl
+-28 0 Dl
+3072 1026 MXY
+0 27 Dl
+0 -13 Dl
+14 0 Dl
+-27 0 Dl
+3209 1292 MXY
+0 27 Dl
+0 -14 Dl
+14 0 Dl
+-27 0 Dl
+3575 1823 MXY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-28 0 Dl
+4305 2059 MXY
+0 28 Dl
+0 -14 Dl
+14 0 Dl
+-27 0 Dl
+5 Dt
+2844 597 MXY
+23 88 Dl
+23 30 Dl
+45 118 Dl
+46 30 Dl
+46 59 Dl
+45 118 Dl
+137 265 Dl
+366 532 Dl
+730 236 Dl
+4328 2103(Total)N
+2844 2310 MXY
+1461 0 Dl
+2844 MX
+0 -1772 Dl
+2310 MY
+0 18 Dl
+4 Ds
+1 Dt
+2310 MY
+0 -1772 Dl
+2826 2416(0)N
+-1 Ds
+5 Dt
+3209 2310 MXY
+0 18 Dl
+4 Ds
+1 Dt
+2310 MY
+0 -1772 Dl
+3155 2416(256)N
+-1 Ds
+5 Dt
+3575 2310 MXY
+0 18 Dl
+4 Ds
+1 Dt
+2310 MY
+0 -1772 Dl
+3521 2416(512)N
+-1 Ds
+5 Dt
+3940 2310 MXY
+0 18 Dl
+4 Ds
+1 Dt
+2310 MY
+0 -1772 Dl
+3886 2416(768)N
+-1 Ds
+5 Dt
+4305 2310 MXY
+0 18 Dl
+4 Ds
+1 Dt
+2310 MY
+0 -1772 Dl
+4233 2416(1024)N
+-1 Ds
+5 Dt
+2844 2310 MXY
+-18 0 Dl
+4 Ds
+1 Dt
+2844 MX
+1461 0 Dl
+2771 2340(0)N
+-1 Ds
+5 Dt
+2844 2014 MXY
+-18 0 Dl
+2844 1719 MXY
+-18 0 Dl
+4 Ds
+1 Dt
+2844 MX
+1461 0 Dl
+2735 1749(20)N
+-1 Ds
+5 Dt
+2844 1423 MXY
+-18 0 Dl
+2844 1128 MXY
+-18 0 Dl
+4 Ds
+1 Dt
+2844 MX
+1461 0 Dl
+2735 1158(40)N
+-1 Ds
+5 Dt
+2844 833 MXY
+-18 0 Dl
+2844 538 MXY
+-18 0 Dl
+4 Ds
+1 Dt
+2844 MX
+1461 0 Dl
+2735 568(60)N
+3239 2529(Buffer)N
+3445(Pool)X
+3595(Size)X
+3737(\(in)X
+3835(K\))X
+2695 1259(S)N
+2699 1324(e)N
+2699 1388(c)N
+2697 1452(o)N
+2697 1517(n)N
+2697 1581(d)N
+2701 1645(s)N
+3 Dt
+-1 Ds
+3 f
+8 s
+2706 2773(Figure)N
+2908(7:)X
+1 f
+2982(User)X
+3123(time)X
+3258(is)X
+3322(virtually)X
+3560(insensitive)X
+3854(to)X
+3924(the)X
+4022(amount)X
+4234(of)X
+4307(buffer)X
+2706 2861(pool)N
+2852(available,)X
+3130(however,)X
+3396(both)X
+3541(system)X
+3750(time)X
+3895(and)X
+4018(elapsed)X
+4240(time)X
+4385(are)X
+2706 2949(inversely)N
+2960(proportional)X
+3296(to)X
+3366(the)X
+3464(size)X
+3583(of)X
+3656(the)X
+3753(buffer)X
+3927(pool.)X
+4092(Even)X
+4242(for)X
+4335(large)X
+2706 3037(data)N
+2831(sets)X
+2946(where)X
+3120(one)X
+3230(expects)X
+3439(few)X
+3552(collisions,)X
+3832(specifying)X
+4116(a)X
+4162(large)X
+4307(buffer)X
+2706 3125(pool)N
+2836(dramatically)X
+3171(improves)X
+3425(performance.)X
+10 s
+10 f
+2706 3301 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+3 f
+3175 3543(Enhanced)N
+3536(Functionality)X
+1 f
+2878 3675(This)N
+3046(hashing)X
+3320(package)X
+3609(provides)X
+3910(a)X
+3971(set)X
+4085(of)X
+4177(compati-)X
+2706 3763(bility)N
+2895(routines)X
+3174(to)X
+3257(implement)X
+3620(the)X
+2 f
+3739(ndbm)X
+1 f
+3937(interface.)X
+4279(How-)X
+2706 3851(ever,)N
+2893(when)X
+3095(the)X
+3220(native)X
+3443(interface)X
+3752(is)X
+3832(used,)X
+4026(the)X
+4151(following)X
+2706 3939(additional)N
+3046(functionality)X
+3475(is)X
+3548(provided:)X
+10 f
+2798 4071(g)N
+1 f
+2946(Inserts)X
+3197(never)X
+3413(fail)X
+3556(because)X
+3847(too)X
+3985(many)X
+4199(keys)X
+2946 4159(hash)N
+3113(to)X
+3195(the)X
+3313(same)X
+3498(value.)X
+10 f
+2798 4247(g)N
+1 f
+2946(Inserts)X
+3187(never)X
+3393(fail)X
+3527(because)X
+3808(key)X
+3950(and/or)X
+4181(asso-)X
+2946 4335(ciated)N
+3158(data)X
+3312(is)X
+3385(too)X
+3507(large)X
+10 f
+2798 4423(g)N
+1 f
+2946(Hash)X
+3131(functions)X
+3449(may)X
+3607(be)X
+3703(user-speci\256ed.)X
+10 f
+2798 4511(g)N
+1 f
+2946(Multiple)X
+3268(pages)X
+3498(may)X
+3683(be)X
+3806(cached)X
+4077(in)X
+4186(main)X
+2946 4599(memory.)N
+2706 4731(It)N
+2801(also)X
+2976(provides)X
+3298(a)X
+3380(set)X
+3514(of)X
+3626(compatibility)X
+4097(routines)X
+4400(to)X
+2706 4819(implement)N
+3087(the)X
+2 f
+3224(hsearch)X
+1 f
+3516(interface.)X
+3876(Again,)X
+4130(the)X
+4266(native)X
+2706 4907(interface)N
+3008(offers)X
+3216(enhanced)X
+3540(functionality:)X
+10 f
+2798 5039(g)N
+1 f
+2946(Files)X
+3121(may)X
+3279(grow)X
+3464(beyond)X
+2 f
+3720(nelem)X
+1 f
+3932(elements.)X
+10 f
+2798 5127(g)N
+1 f
+2946(Multiple)X
+3247(hash)X
+3420(tables)X
+3632(may)X
+3795(be)X
+3896(accessed)X
+4203(con-)X
+2946 5215(currently.)N
+10 f
+2798 5303(g)N
+1 f
+2946(Hash)X
+3134(tables)X
+3344(may)X
+3505(be)X
+3604(stored)X
+3823(and)X
+3962(accessed)X
+4266(on)X
+2946 5391(disk.)N
+10 f
+2798 5479(g)N
+1 f
+2946(Hash)X
+3155(functions)X
+3497(may)X
+3679(be)X
+3799(user-speci\256ed)X
+4288(at)X
+2946 5567(runtime.)N
+3 f
+720 5960(USENIX)N
+9 f
+1042(-)X
+3 f
+1106(Winter)X
+1371('91)X
+9 f
+1498(-)X
+3 f
+1562(Dallas,)X
+1815(TX)X
+4424(9)X
+
+10 p
+%%Page: 10 10
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+432 258(A)N
+510(New)X
+682(Hashing)X
+985(Package)X
+1290(for)X
+1413(UNIX)X
+3663(Seltzer)X
+3920(&)X
+4007(Yigit)X
+459 538(Relative)N
+760(Performance)X
+1227(of)X
+1314(the)X
+1441(New)X
+1613(Implementation)X
+1 f
+604 670(The)N
+761(performance)X
+1200(testing)X
+1445(of)X
+1544(the)X
+1674(new)X
+1840(package)X
+2135(is)X
+432 758(divided)N
+711(into)X
+874(two)X
+1033(test)X
+1183(suites.)X
+1424(The)X
+1588(\256rst)X
+1751(suite)X
+1941(of)X
+2046(tests)X
+432 846(requires)N
+727(that)X
+882(the)X
+1015(tables)X
+1237(be)X
+1348(read)X
+1522(from)X
+1713(and)X
+1864(written)X
+2126(to)X
+432 934(disk.)N
+640(In)X
+742(these)X
+942(tests,)X
+1139(the)X
+1272(basis)X
+1467(for)X
+1595(comparison)X
+2003(is)X
+2090(the)X
+432 1022(4.3BSD-Reno)N
+908(version)X
+1169(of)X
+2 f
+1260(ndbm)X
+1 f
+1438(.)X
+1502(Based)X
+1722(on)X
+1826(the)X
+1948(designs)X
+432 1110(of)N
+2 f
+521(sdbm)X
+1 f
+712(and)X
+2 f
+850(gdbm)X
+1 f
+1028(,)X
+1070(they)X
+1230(are)X
+1351(expected)X
+1659(to)X
+1743(perform)X
+2024(simi-)X
+432 1198(larly)N
+605(to)X
+2 f
+693(ndbm)X
+1 f
+871(,)X
+917(and)X
+1059(we)X
+1179(do)X
+1285(not)X
+1413(show)X
+1608(their)X
+1781(performance)X
+432 1286(numbers.)N
+800(The)X
+977(second)X
+1252(suite)X
+1454(contains)X
+1772(the)X
+1921(memory)X
+432 1374(resident)N
+712(test)X
+849(which)X
+1071(does)X
+1243(not)X
+1370(require)X
+1623(that)X
+1768(the)X
+1891(\256les)X
+2049(ever)X
+432 1462(be)N
+533(written)X
+784(to)X
+870(disk,)X
+1047(only)X
+1213(that)X
+1357(hash)X
+1528(tables)X
+1739(may)X
+1901(be)X
+2001(mani-)X
+432 1550(pulated)N
+692(in)X
+778(main)X
+961(memory.)X
+1291(In)X
+1381(this)X
+1519(test,)X
+1673(we)X
+1790(compare)X
+2090(the)X
+432 1638(performance)N
+859(to)X
+941(that)X
+1081(of)X
+1168(the)X
+2 f
+1286(hsearch)X
+1 f
+1560(routines.)X
+604 1752(For)N
+760(both)X
+947(suites,)X
+1194(two)X
+1358(different)X
+1679(databases)X
+2031(were)X
+432 1840(used.)N
+656(The)X
+818(\256rst)X
+979(is)X
+1069(the)X
+1204(dictionary)X
+1566(database)X
+1880(described)X
+432 1928(previously.)N
+836(The)X
+987(second)X
+1236(was)X
+1386(constructed)X
+1781(from)X
+1962(a)X
+2023(pass-)X
+432 2016(word)N
+647(\256le)X
+799(with)X
+990(approximately)X
+1502(300)X
+1671(accounts.)X
+2041(Two)X
+432 2104(records)N
+700(were)X
+887(constructed)X
+1287(for)X
+1411(each)X
+1589(account.)X
+1909(The)X
+2064(\256rst)X
+432 2192(used)N
+604(the)X
+727(logname)X
+1028(as)X
+1120(the)X
+1243(key)X
+1384(and)X
+1525(the)X
+1648(remainder)X
+1999(of)X
+2090(the)X
+432 2280(password)N
+768(entry)X
+965(for)X
+1091(the)X
+1221(data.)X
+1427(The)X
+1584(second)X
+1839(was)X
+1996(keyed)X
+432 2368(by)N
+541(uid)X
+672(and)X
+817(contained)X
+1157(the)X
+1283(entire)X
+1494(password)X
+1825(entry)X
+2018(as)X
+2113(its)X
+432 2456(data)N
+589(\256eld.)X
+794(The)X
+942(tests)X
+1107(were)X
+1287(all)X
+1389(run)X
+1518(on)X
+1620(the)X
+1740(HP)X
+1864(9000)X
+2046(with)X
+432 2544(the)N
+574(same)X
+783(con\256guration)X
+1254(previously)X
+1636(described.)X
+2027(Each)X
+432 2632(test)N
+576(was)X
+734(run)X
+874(\256ve)X
+1027(times)X
+1232(and)X
+1380(the)X
+1510(timing)X
+1750(results)X
+1991(of)X
+2090(the)X
+432 2720(runs)N
+602(were)X
+791(averaged.)X
+1154(The)X
+1311(variance)X
+1616(across)X
+1849(the)X
+1979(5)X
+2050(runs)X
+432 2808(was)N
+591(approximately)X
+1088(1%)X
+1229(of)X
+1330(the)X
+1462(average)X
+1746(yielding)X
+2041(95%)X
+432 2896(con\256dence)N
+800(intervals)X
+1096(of)X
+1183(approximately)X
+1666(2%.)X
+3 f
+1021 3050(Disk)N
+1196(Based)X
+1420(Tests)X
+1 f
+604 3182(In)N
+693(these)X
+880(tests,)X
+1064(we)X
+1180(use)X
+1308(a)X
+1365(bucket)X
+1600(size)X
+1746(of)X
+1834(1024)X
+2015(and)X
+2152(a)X
+432 3270(\256ll)N
+540(factor)X
+748(of)X
+835(32.)X
+3 f
+432 3384(create)N
+663(test)X
+1 f
+547 3498(The)N
+703(keys)X
+881(are)X
+1011(entered)X
+1279(into)X
+1433(the)X
+1561(hash)X
+1738(table,)X
+1944(and)X
+2090(the)X
+547 3586(\256le)N
+669(is)X
+742(\257ushed)X
+993(to)X
+1075(disk.)X
+3 f
+432 3700(read)N
+608(test)X
+1 f
+547 3814(A)N
+640(lookup)X
+897(is)X
+984(performed)X
+1353(for)X
+1481(each)X
+1663(key)X
+1813(in)X
+1909(the)X
+2041(hash)X
+547 3902(table.)N
+3 f
+432 4016(verify)N
+653(test)X
+1 f
+547 4130(A)N
+640(lookup)X
+897(is)X
+984(performed)X
+1353(for)X
+1481(each)X
+1663(key)X
+1813(in)X
+1909(the)X
+2041(hash)X
+547 4218(table,)N
+759(and)X
+911(the)X
+1045(data)X
+1215(returned)X
+1519(is)X
+1608(compared)X
+1961(against)X
+547 4306(that)N
+687(originally)X
+1018(stored)X
+1234(in)X
+1316(the)X
+1434(hash)X
+1601(table.)X
+3 f
+432 4420(sequential)N
+798(retrieve)X
+1 f
+547 4534(All)N
+674(keys)X
+846(are)X
+970(retrieved)X
+1281(in)X
+1367(sequential)X
+1716(order)X
+1910(from)X
+2090(the)X
+547 4622(hash)N
+724(table.)X
+950(The)X
+2 f
+1105(ndbm)X
+1 f
+1313(interface)X
+1625(allows)X
+1863(sequential)X
+547 4710(retrieval)N
+848(of)X
+948(the)X
+1079(keys)X
+1259(from)X
+1448(the)X
+1578(database,)X
+1907(but)X
+2041(does)X
+547 4798(not)N
+701(return)X
+945(the)X
+1094(data)X
+1279(associated)X
+1660(with)X
+1853(each)X
+2052(key.)X
+547 4886(Therefore,)N
+929(we)X
+1067(compare)X
+1388(the)X
+1530(performance)X
+1980(of)X
+2090(the)X
+547 4974(new)N
+703(package)X
+989(to)X
+1073(two)X
+1215(different)X
+1514(runs)X
+1674(of)X
+2 f
+1763(ndbm)X
+1 f
+1941(.)X
+2002(In)X
+2090(the)X
+547 5062(\256rst)N
+697(case,)X
+2 f
+882(ndbm)X
+1 f
+1086(returns)X
+1335(only)X
+1503(the)X
+1627(keys)X
+1800(while)X
+2003(in)X
+2090(the)X
+547 5150(second,)N
+2 f
+823(ndbm)X
+1 f
+1034(returns)X
+1290(both)X
+1465(the)X
+1596(keys)X
+1776(and)X
+1924(the)X
+2054(data)X
+547 5238(\(requiring)N
+894(a)X
+956(second)X
+1204(call)X
+1345(to)X
+1432(the)X
+1555(library\).)X
+1861(There)X
+2074(is)X
+2152(a)X
+547 5326(single)N
+764(run)X
+897(for)X
+1017(the)X
+1141(new)X
+1300(library)X
+1539(since)X
+1729(it)X
+1798(returns)X
+2046(both)X
+547 5414(the)N
+665(key)X
+801(and)X
+937(the)X
+1055(data.)X
+3 f
+3014 538(In-Memory)N
+3431(Test)X
+1 f
+2590 670(This)N
+2757(test)X
+2892(uses)X
+3054(a)X
+3114(bucket)X
+3352(size)X
+3501(of)X
+3592(256)X
+3736(and)X
+3876(a)X
+3936(\256ll)X
+4048(fac-)X
+2418 758(tor)N
+2527(of)X
+2614(8.)X
+3 f
+2418 872(create/read)N
+2827(test)X
+1 f
+2533 986(In)N
+2627(this)X
+2769(test,)X
+2927(a)X
+2989(hash)X
+3162(table)X
+3344(is)X
+3423(created)X
+3682(by)X
+3788(inserting)X
+4094(all)X
+2533 1074(the)N
+2660(key/data)X
+2961(pairs.)X
+3186(Then)X
+3380(a)X
+3445(keyed)X
+3666(retrieval)X
+3963(is)X
+4044(per-)X
+2533 1162(formed)N
+2801(for)X
+2931(each)X
+3115(pair,)X
+3295(and)X
+3446(the)X
+3579(hash)X
+3761(table)X
+3952(is)X
+4040(des-)X
+2533 1250(troyed.)N
+3 f
+2938 1404(Performance)N
+3405(Results)X
+1 f
+2590 1536(Figures)N
+2866(8a)X
+2978(and)X
+3130(8b)X
+3246(show)X
+3451(the)X
+3585(user)X
+3755(time,)X
+3952(system)X
+2418 1624(time,)N
+2608(and)X
+2752(elapsed)X
+3021(time)X
+3191(for)X
+3312(each)X
+3487(test)X
+3625(for)X
+3746(both)X
+3915(the)X
+4040(new)X
+2418 1712(implementation)N
+2951(and)X
+3098(the)X
+3227(old)X
+3360(implementation)X
+3893(\()X
+2 f
+3920(hsearch)X
+1 f
+2418 1800(or)N
+2 f
+2528(ndbm)X
+1 f
+2706(,)X
+2769(whichever)X
+3147(is)X
+3243(appropriate\))X
+3678(as)X
+3787(well)X
+3967(as)X
+4076(the)X
+2418 1888(improvement.)N
+2929(The)X
+3098(improvement)X
+3569(is)X
+3666(expressed)X
+4027(as)X
+4138(a)X
+2418 1976(percentage)N
+2787(of)X
+2874(the)X
+2992(old)X
+3114(running)X
+3383(time:)X
+0 f
+8 s
+2418 2275(%)N
+2494(=)X
+2570(100)X
+2722(*)X
+2798 -0.4219(\(old_time)AX
+3178(-)X
+3254 -0.4219(new_time\))AX
+3634(/)X
+3710(old_time)X
+1 f
+10 s
+2590 2600(In)N
+2700(nearly)X
+2944(all)X
+3067(cases,)X
+3299(the)X
+3439(new)X
+3615(routines)X
+3915(perform)X
+2418 2688(better)N
+2628(than)X
+2793(the)X
+2918(old)X
+3047(routines)X
+3332(\(both)X
+2 f
+3527(hsearch)X
+1 f
+3807(and)X
+2 f
+3949(ndbm)X
+1 f
+4127(\).)X
+2418 2776(Although)N
+2755(the)X
+3 f
+2888(create)X
+1 f
+3134(tests)X
+3311(exhibit)X
+3567(superior)X
+3864(user)X
+4032(time)X
+2418 2864(performance,)N
+2869(the)X
+2991(test)X
+3126(time)X
+3292(is)X
+3369(dominated)X
+3731(by)X
+3834(the)X
+3955(cost)X
+4107(of)X
+2418 2952(writing)N
+2677(the)X
+2803(actual)X
+3023(\256le)X
+3153(to)X
+3243(disk.)X
+3444(For)X
+3583(the)X
+3709(large)X
+3897(database)X
+2418 3040(\(the)N
+2564(dictionary\),)X
+2957(this)X
+3093(completely)X
+3470(overwhelmed)X
+3927(the)X
+4045(sys-)X
+2418 3128(tem)N
+2570(time.)X
+2783(However,)X
+3129(for)X
+3254(the)X
+3383(small)X
+3587(data)X
+3752(base,)X
+3946(we)X
+4071(see)X
+2418 3216(that)N
+2569(differences)X
+2958(in)X
+3051(both)X
+3224(user)X
+3389(and)X
+3536(system)X
+3788(time)X
+3960(contri-)X
+2418 3304(bute)N
+2576(to)X
+2658(the)X
+2776(superior)X
+3059(performance)X
+3486(of)X
+3573(the)X
+3691(new)X
+3845(package.)X
+2590 3418(The)N
+3 f
+2764(read)X
+1 f
+2920(,)X
+3 f
+2989(verify)X
+1 f
+3190(,)X
+3259(and)X
+3 f
+3424(sequential)X
+1 f
+3818(results)X
+4075(are)X
+2418 3506(deceptive)N
+2758(for)X
+2883(the)X
+3012(small)X
+3216(database)X
+3524(since)X
+3720(the)X
+3849(entire)X
+4063(test)X
+2418 3594(ran)N
+2551(in)X
+2643(under)X
+2856(a)X
+2922(second.)X
+3215(However,)X
+3560(on)X
+3669(the)X
+3796(larger)X
+4013(data-)X
+2418 3682(base)N
+2590(the)X
+3 f
+2716(read)X
+1 f
+2900(and)X
+3 f
+3044(verify)X
+1 f
+3273(tests)X
+3443(bene\256t)X
+3689(from)X
+3873(the)X
+3999(cach-)X
+2418 3770(ing)N
+2546(of)X
+2639(buckets)X
+2910(in)X
+2998(the)X
+3122(new)X
+3282(package)X
+3571(to)X
+3658(improve)X
+3950(perfor-)X
+2418 3858(mance)N
+2666(by)X
+2784(over)X
+2965(80%.)X
+3169(Since)X
+3384(the)X
+3519(\256rst)X
+3 f
+3680(sequential)X
+1 f
+4063(test)X
+2418 3946(does)N
+2598(not)X
+2733(require)X
+2 f
+2994(ndbm)X
+1 f
+3205(to)X
+3299(return)X
+3523(the)X
+3653(data)X
+3819(values,)X
+4076(the)X
+2418 4034(user)N
+2573(time)X
+2735(is)X
+2808(lower)X
+3011(than)X
+3169(for)X
+3283(the)X
+3401(new)X
+3555(package.)X
+3879(However)X
+2418 4122(when)N
+2613(we)X
+2728(require)X
+2977(both)X
+3139(packages)X
+3454(to)X
+3536(return)X
+3748(data,)X
+3922(the)X
+4040(new)X
+2418 4210(package)N
+2702(excels)X
+2923(in)X
+3005(all)X
+3105(three)X
+3286(timings.)X
+2590 4324(The)N
+2773(small)X
+3003(database)X
+3337(runs)X
+3532(so)X
+3660(quickly)X
+3957(in)X
+4076(the)X
+2418 4412(memory-resident)N
+3000(case)X
+3173(that)X
+3326(the)X
+3457(results)X
+3699(are)X
+3831(uninterest-)X
+2418 4500(ing.)N
+2589(However,)X
+2933(for)X
+3056(the)X
+3183(larger)X
+3400(database)X
+3706(the)X
+3833(new)X
+3995(pack-)X
+2418 4588(age)N
+2567(pays)X
+2751(a)X
+2824(small)X
+3033(penalty)X
+3305(in)X
+3403(system)X
+3661(time)X
+3839(because)X
+4130(it)X
+2418 4676(limits)N
+2636(its)X
+2748(main)X
+2944(memory)X
+3247(utilization)X
+3607(and)X
+3759(swaps)X
+3991(pages)X
+2418 4764(out)N
+2550(to)X
+2642(temporary)X
+3002(storage)X
+3264(in)X
+3356(the)X
+3484(\256le)X
+3616(system)X
+3868(while)X
+4076(the)X
+2 f
+2418 4852(hsearch)N
+1 f
+2698(package)X
+2988(requires)X
+3273(that)X
+3419(the)X
+3543(application)X
+3924(allocate)X
+2418 4940(enough)N
+2692(space)X
+2909(for)X
+3041(all)X
+3159(key/data)X
+3468(pair.)X
+3670(However,)X
+4022(even)X
+2418 5028(with)N
+2600(the)X
+2738(system)X
+3000(time)X
+3182(penalty,)X
+3477(the)X
+3614(resulting)X
+3933(elapsed)X
+2418 5116(time)N
+2580(improves)X
+2898(by)X
+2998(over)X
+3161(50%.)X
+3 f
+432 5960(10)N
+2970(USENIX)X
+9 f
+3292(-)X
+3 f
+3356(Winter)X
+3621('91)X
+9 f
+3748(-)X
+3 f
+3812(Dallas,)X
+4065(TX)X
+
+11 p
+%%Page: 11 11
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+720 258(Seltzer)N
+977(&)X
+1064(Yigit)X
+3278(A)X
+3356(New)X
+3528(Hashing)X
+3831(Package)X
+4136(for)X
+4259(UNIX)X
+1 f
+10 f
+908 454(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2 f
+1379 546(hash)N
+1652(ndbm)X
+1950(%change)X
+1 f
+10 f
+908 550(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+948 642(CREATE)N
+10 f
+908 646(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+1125 738(user)N
+1424(6.4)X
+1671(12.2)X
+2073(48)X
+1157 826(sys)N
+1384(32.5)X
+1671(34.7)X
+2113(6)X
+3 f
+1006 914(elapsed)N
+10 f
+1310 922(c)N
+890(c)Y
+810(c)Y
+730(c)Y
+3 f
+1384 914(90.4)N
+10 f
+1581 922(c)N
+890(c)Y
+810(c)Y
+730(c)Y
+3 f
+1671 914(99.6)N
+10 f
+1883 922(c)N
+890(c)Y
+810(c)Y
+730(c)Y
+3 f
+2113 914(9)N
+1 f
+10 f
+908 910(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+908 926(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+948 1010(READ)N
+10 f
+908 1014(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+1125 1106(user)N
+1424(3.4)X
+1711(6.1)X
+2073(44)X
+1157 1194(sys)N
+1424(1.2)X
+1671(15.3)X
+2073(92)X
+3 f
+1006 1282(elapsed)N
+10 f
+1310 1290(c)N
+1258(c)Y
+1178(c)Y
+1098(c)Y
+3 f
+1424 1282(4.0)N
+10 f
+1581 1290(c)N
+1258(c)Y
+1178(c)Y
+1098(c)Y
+3 f
+1671 1282(21.2)N
+10 f
+1883 1290(c)N
+1258(c)Y
+1178(c)Y
+1098(c)Y
+3 f
+2073 1282(81)N
+1 f
+10 f
+908 1278(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+908 1294(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+948 1378(VERIFY)N
+10 f
+908 1382(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+1125 1474(user)N
+1424(3.5)X
+1711(6.3)X
+2073(44)X
+1157 1562(sys)N
+1424(1.2)X
+1671(15.3)X
+2073(92)X
+3 f
+1006 1650(elapsed)N
+10 f
+1310 1658(c)N
+1626(c)Y
+1546(c)Y
+1466(c)Y
+3 f
+1424 1650(4.0)N
+10 f
+1581 1658(c)N
+1626(c)Y
+1546(c)Y
+1466(c)Y
+3 f
+1671 1650(21.2)N
+10 f
+1883 1658(c)N
+1626(c)Y
+1546(c)Y
+1466(c)Y
+3 f
+2073 1650(81)N
+1 f
+10 f
+908 1646(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+908 1662(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+948 1746(SEQUENTIAL)N
+10 f
+908 1750(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+1125 1842(user)N
+1424(2.7)X
+1711(1.9)X
+2046(-42)X
+1157 1930(sys)N
+1424(0.7)X
+1711(3.9)X
+2073(82)X
+3 f
+1006 2018(elapsed)N
+10 f
+1310 2026(c)N
+1994(c)Y
+1914(c)Y
+1834(c)Y
+3 f
+1424 2018(3.0)N
+10 f
+1581 2026(c)N
+1994(c)Y
+1914(c)Y
+1834(c)Y
+3 f
+1711 2018(5.0)N
+10 f
+1883 2026(c)N
+1994(c)Y
+1914(c)Y
+1834(c)Y
+3 f
+2073 2018(40)N
+1 f
+10 f
+908 2014(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+908 2030(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+948 2114(SEQUENTIAL)N
+1467(\(with)X
+1656(data)X
+1810(retrieval\))X
+10 f
+908 2118(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+1125 2210(user)N
+1424(2.7)X
+1711(8.2)X
+2073(67)X
+1157 2298(sys)N
+1424(0.7)X
+1711(4.3)X
+2073(84)X
+3 f
+1006 2386(elapsed)N
+1424(3.0)X
+1671(12.0)X
+2073(75)X
+1 f
+10 f
+908 2390(i)N
+927(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+899 2394(c)N
+2378(c)Y
+2298(c)Y
+2218(c)Y
+2138(c)Y
+2058(c)Y
+1978(c)Y
+1898(c)Y
+1818(c)Y
+1738(c)Y
+1658(c)Y
+1578(c)Y
+1498(c)Y
+1418(c)Y
+1338(c)Y
+1258(c)Y
+1178(c)Y
+1098(c)Y
+1018(c)Y
+938(c)Y
+858(c)Y
+778(c)Y
+698(c)Y
+618(c)Y
+538(c)Y
+1310 2394(c)N
+2362(c)Y
+2282(c)Y
+2202(c)Y
+1581 2394(c)N
+2362(c)Y
+2282(c)Y
+2202(c)Y
+1883 2394(c)N
+2362(c)Y
+2282(c)Y
+2202(c)Y
+2278 2394(c)N
+2378(c)Y
+2298(c)Y
+2218(c)Y
+2138(c)Y
+2058(c)Y
+1978(c)Y
+1898(c)Y
+1818(c)Y
+1738(c)Y
+1658(c)Y
+1578(c)Y
+1498(c)Y
+1418(c)Y
+1338(c)Y
+1258(c)Y
+1178(c)Y
+1098(c)Y
+1018(c)Y
+938(c)Y
+858(c)Y
+778(c)Y
+698(c)Y
+618(c)Y
+538(c)Y
+905 2574(i)N
+930(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2 f
+1318 2666(hash)N
+1585(hsearch)X
+1953(%change)X
+1 f
+10 f
+905 2670(i)N
+930(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+945 2762(CREATE/READ)N
+10 f
+905 2766(i)N
+930(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+1064 2858(user)N
+1343(6.6)X
+1642(17.2)X
+2096(62)X
+1096 2946(sys)N
+1343(1.1)X
+1682(0.3)X
+2029(-266)X
+3 f
+945 3034(elapsed)N
+1343(7.8)X
+1642(17.0)X
+2096(54)X
+1 f
+10 f
+905 3038(i)N
+930(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+896 3050(c)N
+2978(c)Y
+2898(c)Y
+2818(c)Y
+2738(c)Y
+2658(c)Y
+1249 3034(c)N
+3010(c)Y
+2930(c)Y
+2850(c)Y
+1520 3034(c)N
+3010(c)Y
+2930(c)Y
+2850(c)Y
+1886 3034(c)N
+3010(c)Y
+2930(c)Y
+2850(c)Y
+2281 3050(c)N
+2978(c)Y
+2898(c)Y
+2818(c)Y
+2738(c)Y
+2658(c)Y
+3 f
+720 3174(Figure)N
+967(8a:)X
+1 f
+1094(Timing)X
+1349(results)X
+1578(for)X
+1692(the)X
+1810(dictionary)X
+2155(database.)X
+10 f
+720 3262 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+3 f
+1407 3504(Conclusion)N
+1 f
+892 3636(This)N
+1063(paper)X
+1271(has)X
+1407(presented)X
+1744(the)X
+1871(design,)X
+2129(implemen-)X
+720 3724(tation)N
+928(and)X
+1070(performance)X
+1503(of)X
+1596(a)X
+1658(new)X
+1818(hashing)X
+2093(package)X
+2382(for)X
+720 3812(UNIX.)N
+993(The)X
+1150(new)X
+1316(package)X
+1612(provides)X
+1919(a)X
+1986(superset)X
+2280(of)X
+2378(the)X
+720 3900(functionality)N
+1159(of)X
+1255(existing)X
+1537(hashing)X
+1815(packages)X
+2139(and)X
+2284(incor-)X
+720 3988(porates)N
+975(additional)X
+1318(features)X
+1596(such)X
+1766(as)X
+1855(large)X
+2038(key)X
+2176(handling,)X
+720 4076(user)N
+876(de\256ned)X
+1134(hash)X
+1302(functions,)X
+1641(multiple)X
+1928(hash)X
+2096(tables,)X
+2324(vari-)X
+720 4164(able)N
+894(sized)X
+1099(pages,)X
+1342(and)X
+1498(linear)X
+1721(hashing.)X
+2050(In)X
+2156(nearly)X
+2396(all)X
+720 4252(cases,)N
+954(the)X
+1096(new)X
+1274(package)X
+1582(provides)X
+1902(improved)X
+2252(perfor-)X
+720 4340(mance)N
+974(on)X
+1098(the)X
+1240(order)X
+1454(of)X
+1565(50-80%)X
+1863(for)X
+2001(the)X
+2142(workloads)X
+720 4428(shown.)N
+990(Applications)X
+1420(such)X
+1588(as)X
+1676(the)X
+1794(loader,)X
+2035(compiler,)X
+2360(and)X
+720 4516(mail,)N
+921(which)X
+1156(currently)X
+1485(implement)X
+1866(their)X
+2051(own)X
+2227(hashing)X
+720 4604(routines,)N
+1032(should)X
+1279(be)X
+1389(modi\256ed)X
+1706(to)X
+1801(use)X
+1941(the)X
+2072(generic)X
+2342(rou-)X
+720 4692(tines.)N
+892 4806(This)N
+1087(hashing)X
+1389(package)X
+1705(is)X
+1810(one)X
+1978(access)X
+2236(method)X
+720 4894(which)N
+953(is)X
+1043(part)X
+1205(of)X
+1309(a)X
+1382(generic)X
+1656(database)X
+1970(access)X
+2212(package)X
+720 4982(being)N
+955(developed)X
+1342(at)X
+1457(the)X
+1612(University)X
+2007(of)X
+2131(California,)X
+720 5070(Berkeley.)N
+1089(It)X
+1177(will)X
+1340(include)X
+1614(a)X
+1688(btree)X
+1887(access)X
+2131(method)X
+2409(as)X
+720 5158(well)N
+916(as)X
+1041(\256xed)X
+1259(and)X
+1433(variable)X
+1750(length)X
+2007(record)X
+2270(access)X
+720 5246(methods)N
+1024(in)X
+1119(addition)X
+1414(to)X
+1509(the)X
+1640(hashed)X
+1896(support)X
+2168(presented)X
+720 5334(here.)N
+948(All)X
+1099(of)X
+1215(the)X
+1361(access)X
+1615(methods)X
+1934(are)X
+2081(based)X
+2312(on)X
+2440(a)X
+720 5422(key/data)N
+1037(pair)X
+1207(interface)X
+1533(and)X
+1693(appear)X
+1952(identical)X
+2272(to)X
+2378(the)X
+720 5510(application)N
+1121(layer,)X
+1347(allowing)X
+1671(application)X
+2071(implementa-)X
+720 5598(tions)N
+906(to)X
+999(be)X
+1106(largely)X
+1360(independent)X
+1783(of)X
+1881(the)X
+2010(database)X
+2318(type.)X
+720 5686(The)N
+873(package)X
+1165(is)X
+1246(expected)X
+1560(to)X
+1650(be)X
+1754(an)X
+1858(integral)X
+2131(part)X
+2284(of)X
+2378(the)X
+2706 538(4.4BSD)N
+3006(system,)X
+3293(with)X
+3479(various)X
+3759(standard)X
+4075(applications)X
+2706 626(such)N
+2879(as)X
+2972(more\(1\),)X
+3277(sort\(1\))X
+3517(and)X
+3659(vi\(1\))X
+3841(based)X
+4050(on)X
+4156(it.)X
+4266(While)X
+2706 714(the)N
+2833(current)X
+3089(design)X
+3326(does)X
+3501(not)X
+3631(support)X
+3899(multi-user)X
+4256(access)X
+2706 802(or)N
+2804(transactions,)X
+3238(they)X
+3407(could)X
+3616(be)X
+3723(incorporated)X
+4159(relatively)X
+2706 890(easily.)N
+10 f
+2894 938(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2 f
+3365 1030(hash)N
+3638(ndbm)X
+3936(%change)X
+1 f
+10 f
+2894 1034(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+2934 1126(CREATE)N
+10 f
+2894 1130(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+3111 1222(user)N
+3390(0.2)X
+3677(0.4)X
+4079(50)X
+3143 1310(sys)N
+3390(0.1)X
+3677(1.0)X
+4079(90)X
+3 f
+2992 1398(elapsed)N
+10 f
+3296 1406(c)N
+1374(c)Y
+1294(c)Y
+1214(c)Y
+3 f
+3390 1398(0)N
+10 f
+3567 1406(c)N
+1374(c)Y
+1294(c)Y
+1214(c)Y
+3 f
+3677 1398(3.2)N
+10 f
+3869 1406(c)N
+1374(c)Y
+1294(c)Y
+1214(c)Y
+3 f
+4039 1398(100)N
+1 f
+10 f
+2894 1394(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2894 1410(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+2934 1494(READ)N
+10 f
+2894 1498(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+3111 1590(user)N
+3390(0.1)X
+3677(0.1)X
+4119(0)X
+3143 1678(sys)N
+3390(0.1)X
+3677(0.4)X
+4079(75)X
+3 f
+2992 1766(elapsed)N
+10 f
+3296 1774(c)N
+1742(c)Y
+1662(c)Y
+1582(c)Y
+3 f
+3390 1766(0.0)N
+10 f
+3567 1774(c)N
+1742(c)Y
+1662(c)Y
+1582(c)Y
+3 f
+3677 1766(0.0)N
+10 f
+3869 1774(c)N
+1742(c)Y
+1662(c)Y
+1582(c)Y
+3 f
+4119 1766(0)N
+1 f
+10 f
+2894 1762(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2894 1778(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+2934 1862(VERIFY)N
+10 f
+2894 1866(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+3111 1958(user)N
+3390(0.1)X
+3677(0.2)X
+4079(50)X
+3143 2046(sys)N
+3390(0.1)X
+3677(0.3)X
+4079(67)X
+3 f
+2992 2134(elapsed)N
+10 f
+3296 2142(c)N
+2110(c)Y
+2030(c)Y
+1950(c)Y
+3 f
+3390 2134(0.0)N
+10 f
+3567 2142(c)N
+2110(c)Y
+2030(c)Y
+1950(c)Y
+3 f
+3677 2134(0.0)N
+10 f
+3869 2142(c)N
+2110(c)Y
+2030(c)Y
+1950(c)Y
+3 f
+4119 2134(0)N
+1 f
+10 f
+2894 2130(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2894 2146(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+2934 2230(SEQUENTIAL)N
+10 f
+2894 2234(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+3111 2326(user)N
+3390(0.1)X
+3677(0.0)X
+4012(-100)X
+3143 2414(sys)N
+3390(0.1)X
+3677(0.1)X
+4119(0)X
+3 f
+2992 2502(elapsed)N
+10 f
+3296 2510(c)N
+2478(c)Y
+2398(c)Y
+2318(c)Y
+3 f
+3390 2502(0.0)N
+10 f
+3567 2510(c)N
+2478(c)Y
+2398(c)Y
+2318(c)Y
+3 f
+3677 2502(0.0)N
+10 f
+3869 2510(c)N
+2478(c)Y
+2398(c)Y
+2318(c)Y
+3 f
+4119 2502(0)N
+1 f
+10 f
+2894 2498(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2894 2514(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+2934 2598(SEQUENTIAL)N
+3453(\(with)X
+3642(data)X
+3796(retrieval\))X
+10 f
+2894 2602(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+3111 2694(user)N
+3390(0.1)X
+3677(0.1)X
+4119(0)X
+3143 2782(sys)N
+3390(0.1)X
+3677(0.1)X
+4119(0)X
+3 f
+2992 2870(elapsed)N
+3390(0.0)X
+3677(0.0)X
+4119(0)X
+1 f
+10 f
+2894 2874(i)N
+2913(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2885 2878(c)N
+2862(c)Y
+2782(c)Y
+2702(c)Y
+2622(c)Y
+2542(c)Y
+2462(c)Y
+2382(c)Y
+2302(c)Y
+2222(c)Y
+2142(c)Y
+2062(c)Y
+1982(c)Y
+1902(c)Y
+1822(c)Y
+1742(c)Y
+1662(c)Y
+1582(c)Y
+1502(c)Y
+1422(c)Y
+1342(c)Y
+1262(c)Y
+1182(c)Y
+1102(c)Y
+1022(c)Y
+3296 2878(c)N
+2846(c)Y
+2766(c)Y
+2686(c)Y
+3567 2878(c)N
+2846(c)Y
+2766(c)Y
+2686(c)Y
+3869 2878(c)N
+2846(c)Y
+2766(c)Y
+2686(c)Y
+4264 2878(c)N
+2862(c)Y
+2782(c)Y
+2702(c)Y
+2622(c)Y
+2542(c)Y
+2462(c)Y
+2382(c)Y
+2302(c)Y
+2222(c)Y
+2142(c)Y
+2062(c)Y
+1982(c)Y
+1902(c)Y
+1822(c)Y
+1742(c)Y
+1662(c)Y
+1582(c)Y
+1502(c)Y
+1422(c)Y
+1342(c)Y
+1262(c)Y
+1182(c)Y
+1102(c)Y
+1022(c)Y
+2891 3058(i)N
+2916(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2 f
+3304 3150(hash)N
+3571(hsearch)X
+3939(%change)X
+1 f
+10 f
+2891 3154(i)N
+2916(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+2931 3246(CREATE/READ)N
+10 f
+2891 3250(i)N
+2916(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+3050 3342(user)N
+3329(0.3)X
+3648(0.4)X
+4048(25)X
+3082 3430(sys)N
+3329(0.0)X
+3648(0.0)X
+4088(0)X
+3 f
+2931 3518(elapsed)N
+3329(0.0)X
+3648(0.0)X
+4088(0)X
+1 f
+10 f
+2891 3522(i)N
+2916(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2882 3534(c)N
+3462(c)Y
+3382(c)Y
+3302(c)Y
+3222(c)Y
+3142(c)Y
+3235 3518(c)N
+3494(c)Y
+3414(c)Y
+3334(c)Y
+3506 3518(c)N
+3494(c)Y
+3414(c)Y
+3334(c)Y
+3872 3518(c)N
+3494(c)Y
+3414(c)Y
+3334(c)Y
+4267 3534(c)N
+3462(c)Y
+3382(c)Y
+3302(c)Y
+3222(c)Y
+3142(c)Y
+3 f
+2706 3658(Figure)N
+2953(8b:)X
+1 f
+3084(Timing)X
+3339(results)X
+3568(for)X
+3682(the)X
+3800(password)X
+4123(database.)X
+10 f
+2706 3746 -0.0930(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)AN
+3 f
+3396 3988(References)N
+1 f
+2706 4120([ATT79])N
+3058(AT&T,)X
+3358(DBM\(3X\),)X
+2 f
+3773(Unix)X
+3990(Programmer's)X
+2878 4208(Manual,)N
+3194(Seventh)X
+3491(Edition,)X
+3793(Volume)X
+4085(1)X
+1 f
+(,)S
+4192(January,)X
+2878 4296(1979.)N
+2706 4472([ATT85])N
+3027(AT&T,)X
+3296(HSEARCH\(BA_LIB\),)X
+2 f
+4053(Unix)X
+4239(System)X
+2878 4560(User's)N
+3112(Manual,)X
+3401(System)X
+3644(V.3)X
+1 f
+3753(,)X
+3793(pp.)X
+3913(506-508,)X
+4220(1985.)X
+2706 4736([BRE73])N
+3025(Brent,)X
+3253(Richard)X
+3537(P.,)X
+3651(``Reducing)X
+4041(the)X
+4168(Retrieval)X
+2878 4824(Time)N
+3071(of)X
+3162(Scatter)X
+3409(Storage)X
+3678(Techniques'',)X
+2 f
+4146(Commun-)X
+2878 4912(ications)N
+3175(of)X
+3281(the)X
+3422(ACM)X
+1 f
+3591(,)X
+3654(Volume)X
+3955(16,)X
+4098(No.)X
+4259(2,)X
+4362(pp.)X
+2878 5000(105-109,)N
+3185(February,)X
+3515(1973.)X
+2706 5176([BSD86])N
+3055(NDBM\(3\),)X
+2 f
+3469(4.3BSD)X
+3775(Unix)X
+3990(Programmer's)X
+2878 5264(Manual)N
+3155(Reference)X
+3505(Guide)X
+1 f
+3701(,)X
+3749(University)X
+4114(of)X
+4208(Califor-)X
+2878 5352(nia,)N
+3016(Berkeley,)X
+3346(1986.)X
+2706 5528([ENB88])N
+3025(Enbody,)X
+3319(R.)X
+3417(J.,)X
+3533(Du,)X
+3676(H.)X
+3779(C.,)X
+3897(``Dynamic)X
+4270(Hash-)X
+2878 5616(ing)N
+3034(Schemes'',)X
+2 f
+3427(ACM)X
+3630(Computing)X
+4019(Surveys)X
+1 f
+4269(,)X
+4322(Vol.)X
+2878 5704(20,)N
+2998(No.)X
+3136(2,)X
+3216(pp.)X
+3336(85-113,)X
+3603(June)X
+3770(1988.)X
+3 f
+720 5960(USENIX)N
+9 f
+1042(-)X
+3 f
+1106(Winter)X
+1371('91)X
+9 f
+1498(-)X
+3 f
+1562(Dallas,)X
+1815(TX)X
+4384(11)X
+
+12 p
+%%Page: 12 12
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+432 258(A)N
+510(New)X
+682(Hashing)X
+985(Package)X
+1290(for)X
+1413(UNIX)X
+3663(Seltzer)X
+3920(&)X
+4007(Yigit)X
+1 f
+432 538([FAG79])N
+776(Ronald)X
+1057(Fagin,)X
+1308(Jurg)X
+1495(Nievergelt,)X
+1903(Nicholas)X
+604 626(Pippenger,)N
+1003(H.)X
+1135(Raymond)X
+1500(Strong,)X
+1787(``Extendible)X
+604 714(Hashing)N
+901(--)X
+985(A)X
+1073(Fast)X
+1236(Access)X
+1493(Method)X
+1771(for)X
+1894(Dynamic)X
+604 802(Files'',)N
+2 f
+855(ACM)X
+1046(Transactions)X
+1485(on)X
+1586(Database)X
+1914(Systems)X
+1 f
+2168(,)X
+604 890(Volume)N
+882(4,)X
+962(No.)X
+1100(3.,)X
+1200(September)X
+1563(1979,)X
+1763(pp)X
+1863(315-34)X
+432 1066([KNU68],)N
+802(Knuth,)X
+1064(D.E.,)X
+2 f
+1273(The)X
+1434(Art)X
+1577(of)X
+1680(Computer)X
+2041(Pro-)X
+604 1154(gramming)N
+971(Vol.)X
+1140(3:)X
+1245(Sorting)X
+1518(and)X
+1676(Searching)X
+1 f
+2001(,)X
+2058(sec-)X
+604 1242(tions)N
+779(6.3-6.4,)X
+1046(pp)X
+1146(481-550.)X
+432 1418([LAR78])N
+747(Larson,)X
+1011(Per-Ake,)X
+1319(``Dynamic)X
+1687(Hashing'',)X
+2 f
+2048(BIT)X
+1 f
+(,)S
+604 1506(Vol.)N
+764(18,)X
+884(1978,)X
+1084(pp.)X
+1204(184-201.)X
+432 1682([LAR88])N
+752(Larson,)X
+1021(Per-Ake,)X
+1335(``Dynamic)X
+1709(Hash)X
+1900(Tables'',)X
+2 f
+604 1770(Communications)N
+1183(of)X
+1281(the)X
+1415(ACM)X
+1 f
+1584(,)X
+1640(Volume)X
+1934(31,)X
+2070(No.)X
+604 1858(4.,)N
+704(April)X
+893(1988,)X
+1093(pp)X
+1193(446-457.)X
+432 2034([LIT80])N
+731(Witold,)X
+1013(Litwin,)X
+1286(``Linear)X
+1590(Hashing:)X
+1939(A)X
+2036(New)X
+604 2122(Tool)N
+786(for)X
+911(File)X
+1065(and)X
+1211(Table)X
+1424(Addressing'',)X
+2 f
+1893(Proceed-)X
+604 2210(ings)N
+761(of)X
+847(the)X
+969(6th)X
+1095(International)X
+1540(Conference)X
+1933(on)X
+2036(Very)X
+604 2298(Large)N
+815(Databases)X
+1 f
+1153(,)X
+1193(1980.)X
+432 2474([NEL90])N
+743(Nelson,)X
+1011(Philip)X
+1222(A.,)X
+2 f
+1341(Gdbm)X
+1558(1.4)X
+1679(source)X
+1913(distribu-)X
+604 2562(tion)N
+748(and)X
+888(README)X
+1 f
+1209(,)X
+1249(August)X
+1500(1990.)X
+432 2738([THOM90])N
+840(Ken)X
+1011(Thompson,)X
+1410(private)X
+1670(communication,)X
+604 2826(Nov.)N
+782(1990.)X
+432 3002([TOR87])N
+790(Torek,)X
+1066(C.,)X
+1222(``Re:)X
+1470(dbm.a)X
+1751(and)X
+1950(ndbm.a)X
+604 3090(archives'',)N
+2 f
+966(USENET)X
+1279(newsgroup)X
+1650(comp.unix)X
+1 f
+2002(1987.)X
+432 3266([TOR88])N
+760(Torek,)X
+1006(C.,)X
+1133(``Re:)X
+1351(questions)X
+1686(regarding)X
+2027(data-)X
+604 3354(bases)N
+826(created)X
+1106(with)X
+1295(dbm)X
+1484(and)X
+1647(ndbm)X
+1876(routines'')X
+2 f
+604 3442(USENET)N
+937(newsgroup)X
+1328(comp.unix.questions)X
+1 f
+1982(,)X
+2041(June)X
+604 3530(1988.)N
+432 3706([WAL84])N
+773(Wales,)X
+1018(R.,)X
+1135(``Discussion)X
+1564(of)X
+1655("dbm")X
+1887(data)X
+2045(base)X
+604 3794(system'',)N
+2 f
+973(USENET)X
+1339(newsgroup)X
+1762(unix.wizards)X
+1 f
+2168(,)X
+604 3882(January,)N
+894(1984.)X
+432 4058([YIG89])N
+751(Ozan)X
+963(S.)X
+1069(Yigit,)X
+1294(``How)X
+1545(to)X
+1648(Roll)X
+1826(Your)X
+2032(Own)X
+604 4146(Dbm/Ndbm'',)N
+2 f
+1087(unpublished)X
+1504(manuscript)X
+1 f
+(,)S
+1910(Toronto,)X
+604 4234(July,)N
+777(1989)X
+3 f
+432 5960(12)N
+2970(USENIX)X
+9 f
+3292(-)X
+3 f
+3356(Winter)X
+3621('91)X
+9 f
+3748(-)X
+3 f
+3812(Dallas,)X
+4065(TX)X
+
+13 p
+%%Page: 13 13
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+720 258(Seltzer)N
+977(&)X
+1064(Yigit)X
+3278(A)X
+3356(New)X
+3528(Hashing)X
+3831(Package)X
+4136(for)X
+4259(UNIX)X
+1 f
+720 538(Margo)N
+960(I.)X
+1033(Seltzer)X
+1282(is)X
+1361(a)X
+1423(Ph.D.)X
+1631(student)X
+1887(in)X
+1974(the)X
+2097(Department)X
+720 626(of)N
+823(Electrical)X
+1167(Engineering)X
+1595(and)X
+1747(Computer)X
+2102(Sciences)X
+2418(at)X
+720 714(the)N
+850(University)X
+1220(of)X
+1318(California,)X
+1694(Berkeley.)X
+2055(Her)X
+2207(research)X
+720 802(interests)N
+1017(include)X
+1283(\256le)X
+1415(systems,)X
+1718(databases,)X
+2076(and)X
+2221(transac-)X
+720 890(tion)N
+896(processing)X
+1291(systems.)X
+1636(She)X
+1807(spent)X
+2027(several)X
+2306(years)X
+720 978(working)N
+1026(at)X
+1123(startup)X
+1380(companies)X
+1762(designing)X
+2112(and)X
+2267(imple-)X
+720 1066(menting)N
+1048(\256le)X
+1216(systems)X
+1535(and)X
+1716(transaction)X
+2133(processing)X
+720 1154(software)N
+1026(and)X
+1170(designing)X
+1509(microprocessors.)X
+2103(Ms.)X
+2253(Seltzer)X
+720 1242(received)N
+1057(her)X
+1223(AB)X
+1397(in)X
+1522(Applied)X
+1843(Mathematics)X
+2320(from)X
+720 1330 0.1953(Harvard/Radcliffe)AN
+1325(College)X
+1594(in)X
+1676(1983.)X
+720 1444(In)N
+810(her)X
+936(spare)X
+1129(time,)X
+1313(Margo)X
+1549(can)X
+1683(usually)X
+1936(be)X
+2034(found)X
+2243(prepar-)X
+720 1532(ing)N
+868(massive)X
+1171(quantities)X
+1527(of)X
+1639(food)X
+1831(for)X
+1970(hungry)X
+2242(hoards,)X
+720 1620(studying)N
+1022(Japanese,)X
+1355(or)X
+1449(playing)X
+1716(soccer)X
+1948(with)X
+2116(an)X
+2218(exciting)X
+720 1708(Bay)N
+912(Area)X
+1132(Women's)X
+1507(Soccer)X
+1788(team,)X
+2026(the)X
+2186(Berkeley)X
+720 1796(Bruisers.)N
+720 1910(Ozan)N
+915(\()X
+3 f
+942(Oz)X
+1 f
+1040(\))X
+1092(Yigit)X
+1281(is)X
+1358(currently)X
+1672(a)X
+1732(software)X
+2033(engineer)X
+2334(with)X
+720 1998(the)N
+886(Communications)X
+1499(Research)X
+1861(and)X
+2044(Development)X
+720 2086(group,)N
+948(Computing)X
+1328(Services,)X
+1641(York)X
+1826(University.)X
+2224(His)X
+2355(for-)X
+720 2174(mative)N
+967(years)X
+1166(were)X
+1352(also)X
+1510(spent)X
+1708(at)X
+1795(York,)X
+2009(where)X
+2234(he)X
+2338(held)X
+720 2262(system)N
+985(programmer)X
+1425(and)X
+1583(administrator)X
+2052(positions)X
+2382(for)X
+720 2350(various)N
+995(mixtures)X
+1314(of)X
+1420(of)X
+1526(UNIX)X
+1765(systems)X
+2056(starting)X
+2334(with)X
+720 2438(Berkeley)N
+1031(4.1)X
+1151(in)X
+1233(1982,)X
+1433(while)X
+1631(at)X
+1709(the)X
+1827(same)X
+2012(time)X
+2174(obtaining)X
+720 2526(a)N
+776(degree)X
+1011(in)X
+1093(Computer)X
+1433(Science.)X
+720 2640(In)N
+813(his)X
+931(copious)X
+1205(free)X
+1356(time,)X
+1543(Oz)X
+1662(enjoys)X
+1896(working)X
+2188(on)X
+2293(what-)X
+720 2728(ever)N
+890(software)X
+1197(looks)X
+1400(interesting,)X
+1788(which)X
+2014(often)X
+2209(includes)X
+720 2816(language)N
+1044(interpreters,)X
+1464(preprocessors,)X
+1960(and)X
+2110(lately,)X
+2342(pro-)X
+720 2904(gram)N
+905(generators)X
+1260(and)X
+1396(expert)X
+1617(systems.)X
+720 3018(Oz)N
+836(has)X
+964(authored)X
+1266(several)X
+1515(public-domain)X
+2003(software)X
+2301(tools,)X
+720 3106(including)N
+1069(an)X
+1191(nroff-like)X
+1545(text)X
+1711(formatter)X
+2 f
+2056(proff)X
+1 f
+2257(that)X
+2423(is)X
+720 3194(apparently)N
+1083(still)X
+1226(used)X
+1397(in)X
+1483(some)X
+1676(basement)X
+2002(PCs.)X
+2173(His)X
+2307(latest)X
+720 3282(obsessions)N
+1143(include)X
+1460(the)X
+1639(incredible)X
+2040(programming)X
+720 3370(language)N
+1030(Scheme,)X
+1324(and)X
+1460(Chinese)X
+1738(Brush)X
+1949(painting.)X
+3 f
+720 5960(USENIX)N
+9 f
+1042(-)X
+3 f
+1106(Winter)X
+1371('91)X
+9 f
+1498(-)X
+3 f
+1562(Dallas,)X
+1815(TX)X
+4384(13)X
+
+14 p
+%%Page: 14 14
+0(Courier)xf 0 f
+10 s 10 xH 0 xS 0 f
+3 f
+432 5960(14)N
+2970(USENIX)X
+9 f
+3292(-)X
+3 f
+3356(Winter)X
+3621('91)X
+9 f
+3748(-)X
+3 f
+3812(Dallas,)X
+4065(TX)X
+
+14 p
+%%Trailer
+xt
+
+xs
diff --git a/libdb/docs/ref/refs/libtp_usenix.ps b/libdb/docs/ref/refs/libtp_usenix.ps
new file mode 100644
index 0000000..5b5ba6e
--- /dev/null
+++ b/libdb/docs/ref/refs/libtp_usenix.ps
@@ -0,0 +1,12340 @@
+%!PS-Adobe-1.0
+%%Creator: utopia:margo (& Seltzer,608-13E,8072,)
+%%Title: stdin (ditroff)
+%%CreationDate: Thu Dec 12 15:32:11 1991
+%%EndComments
+% @(#)psdit.pro 1.3 4/15/88
+% lib/psdit.pro -- prolog for psdit (ditroff) files
+% Copyright (c) 1984, 1985 Adobe Systems Incorporated. All Rights Reserved.
+% last edit: shore Sat Nov 23 20:28:03 1985
+% RCSID: $Header$
+
+% Changed by Edward Wang (edward@ucbarpa.berkeley.edu) to handle graphics,
+% 17 Feb, 87.
+
+/$DITroff 140 dict def $DITroff begin
+/fontnum 1 def /fontsize 10 def /fontheight 10 def /fontslant 0 def
+/xi{0 72 11 mul translate 72 resolution div dup neg scale 0 0 moveto
+ /fontnum 1 def /fontsize 10 def /fontheight 10 def /fontslant 0 def F
+ /pagesave save def}def
+/PB{save /psv exch def currentpoint translate
+ resolution 72 div dup neg scale 0 0 moveto}def
+/PE{psv restore}def
+/arctoobig 90 def /arctoosmall .05 def
+/m1 matrix def /m2 matrix def /m3 matrix def /oldmat matrix def
+/tan{dup sin exch cos div}def
+/point{resolution 72 div mul}def
+/dround {transform round exch round exch itransform}def
+/xT{/devname exch def}def
+/xr{/mh exch def /my exch def /resolution exch def}def
+/xp{}def
+/xs{docsave restore end}def
+/xt{}def
+/xf{/fontname exch def /slotno exch def fontnames slotno get fontname eq not
+ {fonts slotno fontname findfont put fontnames slotno fontname put}if}def
+/xH{/fontheight exch def F}def
+/xS{/fontslant exch def F}def
+/s{/fontsize exch def /fontheight fontsize def F}def
+/f{/fontnum exch def F}def
+/F{fontheight 0 le{/fontheight fontsize def}if
+ fonts fontnum get fontsize point 0 0 fontheight point neg 0 0 m1 astore
+ fontslant 0 ne{1 0 fontslant tan 1 0 0 m2 astore m3 concatmatrix}if
+ makefont setfont .04 fontsize point mul 0 dround pop setlinewidth}def
+/X{exch currentpoint exch pop moveto show}def
+/N{3 1 roll moveto show}def
+/Y{exch currentpoint pop exch moveto show}def
+/S{show}def
+/ditpush{}def/ditpop{}def
+/AX{3 -1 roll currentpoint exch pop moveto 0 exch ashow}def
+/AN{4 2 roll moveto 0 exch ashow}def
+/AY{3 -1 roll currentpoint pop exch moveto 0 exch ashow}def
+/AS{0 exch ashow}def
+/MX{currentpoint exch pop moveto}def
+/MY{currentpoint pop exch moveto}def
+/MXY{moveto}def
+/cb{pop}def % action on unknown char -- nothing for now
+/n{}def/w{}def
+/p{pop showpage pagesave restore /pagesave save def}def
+/Dt{/Dlinewidth exch def}def 1 Dt
+/Ds{/Ddash exch def}def -1 Ds
+/Di{/Dstipple exch def}def 1 Di
+/Dsetlinewidth{2 Dlinewidth mul setlinewidth}def
+/Dsetdash{Ddash 4 eq{[8 12]}{Ddash 16 eq{[32 36]}
+ {Ddash 20 eq{[32 12 8 12]}{[]}ifelse}ifelse}ifelse 0 setdash}def
+/Dstroke{gsave Dsetlinewidth Dsetdash 1 setlinecap stroke grestore
+ currentpoint newpath moveto}def
+/Dl{rlineto Dstroke}def
+/arcellipse{/diamv exch def /diamh exch def oldmat currentmatrix pop
+ currentpoint translate 1 diamv diamh div scale /rad diamh 2 div def
+ currentpoint exch rad add exch rad -180 180 arc oldmat setmatrix}def
+/Dc{dup arcellipse Dstroke}def
+/De{arcellipse Dstroke}def
+/Da{/endv exch def /endh exch def /centerv exch def /centerh exch def
+ /cradius centerv centerv mul centerh centerh mul add sqrt def
+ /eradius endv endv mul endh endh mul add sqrt def
+ /endang endv endh atan def
+ /startang centerv neg centerh neg atan def
+ /sweep startang endang sub dup 0 lt{360 add}if def
+ sweep arctoobig gt
+ {/midang startang sweep 2 div sub def /midrad cradius eradius add 2 div def
+ /midh midang cos midrad mul def /midv midang sin midrad mul def
+ midh neg midv neg endh endv centerh centerv midh midv Da
+ Da}
+ {sweep arctoosmall ge
+ {/controldelt 1 sweep 2 div cos sub 3 sweep 2 div sin mul div 4 mul def
+ centerv neg controldelt mul centerh controldelt mul
+ endv neg controldelt mul centerh add endh add
+ endh controldelt mul centerv add endv add
+ centerh endh add centerv endv add rcurveto Dstroke}
+ {centerh endh add centerv endv add rlineto Dstroke}
+ ifelse}
+ ifelse}def
+/Dpatterns[
+[%cf[widthbits]
+[8<0000000000000010>]
+[8<0411040040114000>]
+[8<0204081020408001>]
+[8<0000103810000000>]
+[8<6699996666999966>]
+[8<0000800100001008>]
+[8<81c36666c3810000>]
+[8<0f0e0c0800000000>]
+[8<0000000000000010>]
+[8<0411040040114000>]
+[8<0204081020408001>]
+[8<0000001038100000>]
+[8<6699996666999966>]
+[8<0000800100001008>]
+[8<81c36666c3810000>]
+[8<0f0e0c0800000000>]
+[8<0042660000246600>]
+[8<0000990000990000>]
+[8<0804020180402010>]
+[8<2418814242811824>]
+[8<6699996666999966>]
+[8<8000000008000000>]
+[8<00001c3e363e1c00>]
+[8<0000000000000000>]
+[32<00000040000000c00000004000000040000000e0000000000000000000000000>]
+[32<00000000000060000000900000002000000040000000f0000000000000000000>]
+[32<000000000000000000e0000000100000006000000010000000e0000000000000>]
+[32<00000000000000002000000060000000a0000000f00000002000000000000000>]
+[32<0000000e0000000000000000000000000000000f000000080000000e00000001>]
+[32<0000090000000600000000000000000000000000000007000000080000000e00>]
+[32<00010000000200000004000000040000000000000000000000000000000f0000>]
+[32<0900000006000000090000000600000000000000000000000000000006000000>]]
+[%ug
+[8<0000020000000000>]
+[8<0000020000002000>]
+[8<0004020000002000>]
+[8<0004020000402000>]
+[8<0004060000402000>]
+[8<0004060000406000>]
+[8<0006060000406000>]
+[8<0006060000606000>]
+[8<00060e0000606000>]
+[8<00060e000060e000>]
+[8<00070e000060e000>]
+[8<00070e000070e000>]
+[8<00070e020070e000>]
+[8<00070e020070e020>]
+[8<04070e020070e020>]
+[8<04070e024070e020>]
+[8<04070e064070e020>]
+[8<04070e064070e060>]
+[8<06070e064070e060>]
+[8<06070e066070e060>]
+[8<06070f066070e060>]
+[8<06070f066070f060>]
+[8<060f0f066070f060>]
+[8<060f0f0660f0f060>]
+[8<060f0f0760f0f060>]
+[8<060f0f0760f0f070>]
+[8<0e0f0f0760f0f070>]
+[8<0e0f0f07e0f0f070>]
+[8<0e0f0f0fe0f0f070>]
+[8<0e0f0f0fe0f0f0f0>]
+[8<0f0f0f0fe0f0f0f0>]
+[8<0f0f0f0ff0f0f0f0>]
+[8<1f0f0f0ff0f0f0f0>]
+[8<1f0f0f0ff1f0f0f0>]
+[8<1f0f0f8ff1f0f0f0>]
+[8<1f0f0f8ff1f0f0f8>]
+[8<9f0f0f8ff1f0f0f8>]
+[8<9f0f0f8ff9f0f0f8>]
+[8<9f0f0f9ff9f0f0f8>]
+[8<9f0f0f9ff9f0f0f9>]
+[8<9f8f0f9ff9f0f0f9>]
+[8<9f8f0f9ff9f8f0f9>]
+[8<9f8f1f9ff9f8f0f9>]
+[8<9f8f1f9ff9f8f1f9>]
+[8<bf8f1f9ff9f8f1f9>]
+[8<bf8f1f9ffbf8f1f9>]
+[8<bf8f1fdffbf8f1f9>]
+[8<bf8f1fdffbf8f1fd>]
+[8<ff8f1fdffbf8f1fd>]
+[8<ff8f1fdffff8f1fd>]
+[8<ff8f1ffffff8f1fd>]
+[8<ff8f1ffffff8f1ff>]
+[8<ff9f1ffffff8f1ff>]
+[8<ff9f1ffffff9f1ff>]
+[8<ff9f9ffffff9f1ff>]
+[8<ff9f9ffffff9f9ff>]
+[8<ffbf9ffffff9f9ff>]
+[8<ffbf9ffffffbf9ff>]
+[8<ffbfdffffffbf9ff>]
+[8<ffbfdffffffbfdff>]
+[8<ffffdffffffbfdff>]
+[8<ffffdffffffffdff>]
+[8<fffffffffffffdff>]
+[8<ffffffffffffffff>]]
+[%mg
+[8<8000000000000000>]
+[8<0822080080228000>]
+[8<0204081020408001>]
+[8<40e0400000000000>]
+[8<66999966>]
+[8<8001000010080000>]
+[8<81c36666c3810000>]
+[8<f0e0c08000000000>]
+[16<07c00f801f003e007c00f800f001e003c007800f001f003e007c00f801f003e0>]
+[16<1f000f8007c003e001f000f8007c003e001f800fc007e003f001f8007c003e00>]
+[8<c3c300000000c3c3>]
+[16<0040008001000200040008001000200040008000000100020004000800100020>]
+[16<0040002000100008000400020001800040002000100008000400020001000080>]
+[16<1fc03fe07df0f8f8f07de03fc01f800fc01fe03ff07df8f87df03fe01fc00f80>]
+[8<80>]
+[8<8040201000000000>]
+[8<84cc000048cc0000>]
+[8<9900009900000000>]
+[8<08040201804020100800020180002010>]
+[8<2418814242811824>]
+[8<66999966>]
+[8<8000000008000000>]
+[8<70f8d8f870000000>]
+[8<0814224180402010>]
+[8<aa00440a11a04400>]
+[8<018245aa45820100>]
+[8<221c224180808041>]
+[8<88000000>]
+[8<0855800080550800>]
+[8<2844004482440044>]
+[8<0810204080412214>]
+[8<00>]]]def
+/Dfill{
+ transform /maxy exch def /maxx exch def
+ transform /miny exch def /minx exch def
+ minx maxx gt{/minx maxx /maxx minx def def}if
+ miny maxy gt{/miny maxy /maxy miny def def}if
+ Dpatterns Dstipple 1 sub get exch 1 sub get
+ aload pop /stip exch def /stipw exch def /stiph 128 def
+ /imatrix[stipw 0 0 stiph 0 0]def
+ /tmatrix[stipw 0 0 stiph 0 0]def
+ /minx minx cvi stiph idiv stiph mul def
+ /miny miny cvi stipw idiv stipw mul def
+ gsave eoclip 0 setgray
+ miny stiph maxy{
+ tmatrix exch 5 exch put
+ minx stipw maxx{
+ tmatrix exch 4 exch put tmatrix setmatrix
+ stipw stiph true imatrix {stip} imagemask
+ }for
+ }for
+ grestore
+}def
+/Dp{Dfill Dstroke}def
+/DP{Dfill currentpoint newpath moveto}def
+end
+
+/ditstart{$DITroff begin
+ /nfonts 60 def % NFONTS makedev/ditroff dependent!
+ /fonts[nfonts{0}repeat]def
+ /fontnames[nfonts{()}repeat]def
+/docsave save def
+}def
+
+% character outcalls
+/oc{
+ /pswid exch def /cc exch def /name exch def
+ /ditwid pswid fontsize mul resolution mul 72000 div def
+ /ditsiz fontsize resolution mul 72 div def
+ ocprocs name known{ocprocs name get exec}{name cb}ifelse
+}def
+/fractm [.65 0 0 .6 0 0] def
+/fraction{
+ /fden exch def /fnum exch def gsave /cf currentfont def
+ cf fractm makefont setfont 0 .3 dm 2 copy neg rmoveto
+ fnum show rmoveto currentfont cf setfont(\244)show setfont fden show
+ grestore ditwid 0 rmoveto
+}def
+/oce{grestore ditwid 0 rmoveto}def
+/dm{ditsiz mul}def
+/ocprocs 50 dict def ocprocs begin
+(14){(1)(4)fraction}def
+(12){(1)(2)fraction}def
+(34){(3)(4)fraction}def
+(13){(1)(3)fraction}def
+(23){(2)(3)fraction}def
+(18){(1)(8)fraction}def
+(38){(3)(8)fraction}def
+(58){(5)(8)fraction}def
+(78){(7)(8)fraction}def
+(sr){gsave 0 .06 dm rmoveto(\326)show oce}def
+(is){gsave 0 .15 dm rmoveto(\362)show oce}def
+(->){gsave 0 .02 dm rmoveto(\256)show oce}def
+(<-){gsave 0 .02 dm rmoveto(\254)show oce}def
+(==){gsave 0 .05 dm rmoveto(\272)show oce}def
+(uc){gsave currentpoint 400 .009 dm mul add translate
+ 8 -8 scale ucseal oce}def
+end
+
+% an attempt at a PostScript FONT to implement ditroff special chars
+% this will enable us to
+% cache the little buggers
+% generate faster, more compact PS out of psdit
+% confuse everyone (including myself)!
+50 dict dup begin
+/FontType 3 def
+/FontName /DIThacks def
+/FontMatrix [.001 0 0 .001 0 0] def
+/FontBBox [-260 -260 900 900] def% a lie but ...
+/Encoding 256 array def
+0 1 255{Encoding exch /.notdef put}for
+Encoding
+ dup 8#040/space put %space
+ dup 8#110/rc put %right ceil
+ dup 8#111/lt put %left top curl
+ dup 8#112/bv put %bold vert
+ dup 8#113/lk put %left mid curl
+ dup 8#114/lb put %left bot curl
+ dup 8#115/rt put %right top curl
+ dup 8#116/rk put %right mid curl
+ dup 8#117/rb put %right bot curl
+ dup 8#120/rf put %right floor
+ dup 8#121/lf put %left floor
+ dup 8#122/lc put %left ceil
+ dup 8#140/sq put %square
+ dup 8#141/bx put %box
+ dup 8#142/ci put %circle
+ dup 8#143/br put %box rule
+ dup 8#144/rn put %root extender
+ dup 8#145/vr put %vertical rule
+ dup 8#146/ob put %outline bullet
+ dup 8#147/bu put %bullet
+ dup 8#150/ru put %rule
+ dup 8#151/ul put %underline
+ pop
+/DITfd 100 dict def
+/BuildChar{0 begin
+ /cc exch def /fd exch def
+ /charname fd /Encoding get cc get def
+ /charwid fd /Metrics get charname get def
+ /charproc fd /CharProcs get charname get def
+ charwid 0 fd /FontBBox get aload pop setcachedevice
+ 2 setlinejoin 40 setlinewidth
+ newpath 0 0 moveto gsave charproc grestore
+ end}def
+/BuildChar load 0 DITfd put
+/CharProcs 50 dict def
+CharProcs begin
+/space{}def
+/.notdef{}def
+/ru{500 0 rls}def
+/rn{0 840 moveto 500 0 rls}def
+/vr{0 800 moveto 0 -770 rls}def
+/bv{0 800 moveto 0 -1000 rls}def
+/br{0 840 moveto 0 -1000 rls}def
+/ul{0 -140 moveto 500 0 rls}def
+/ob{200 250 rmoveto currentpoint newpath 200 0 360 arc closepath stroke}def
+/bu{200 250 rmoveto currentpoint newpath 200 0 360 arc closepath fill}def
+/sq{80 0 rmoveto currentpoint dround newpath moveto
+ 640 0 rlineto 0 640 rlineto -640 0 rlineto closepath stroke}def
+/bx{80 0 rmoveto currentpoint dround newpath moveto
+ 640 0 rlineto 0 640 rlineto -640 0 rlineto closepath fill}def
+/ci{500 360 rmoveto currentpoint newpath 333 0 360 arc
+ 50 setlinewidth stroke}def
+
+/lt{0 -200 moveto 0 550 rlineto currx 800 2cx s4 add exch s4 a4p stroke}def
+/lb{0 800 moveto 0 -550 rlineto currx -200 2cx s4 add exch s4 a4p stroke}def
+/rt{0 -200 moveto 0 550 rlineto currx 800 2cx s4 sub exch s4 a4p stroke}def
+/rb{0 800 moveto 0 -500 rlineto currx -200 2cx s4 sub exch s4 a4p stroke}def
+/lk{0 800 moveto 0 300 -300 300 s4 arcto pop pop 1000 sub
+ 0 300 4 2 roll s4 a4p 0 -200 lineto stroke}def
+/rk{0 800 moveto 0 300 s2 300 s4 arcto pop pop 1000 sub
+ 0 300 4 2 roll s4 a4p 0 -200 lineto stroke}def
+/lf{0 800 moveto 0 -1000 rlineto s4 0 rls}def
+/rf{0 800 moveto 0 -1000 rlineto s4 neg 0 rls}def
+/lc{0 -200 moveto 0 1000 rlineto s4 0 rls}def
+/rc{0 -200 moveto 0 1000 rlineto s4 neg 0 rls}def
+end
+
+/Metrics 50 dict def Metrics begin
+/.notdef 0 def
+/space 500 def
+/ru 500 def
+/br 0 def
+/lt 416 def
+/lb 416 def
+/rt 416 def
+/rb 416 def
+/lk 416 def
+/rk 416 def
+/rc 416 def
+/lc 416 def
+/rf 416 def
+/lf 416 def
+/bv 416 def
+/ob 350 def
+/bu 350 def
+/ci 750 def
+/bx 750 def
+/sq 750 def
+/rn 500 def
+/ul 500 def
+/vr 0 def
+end
+
+DITfd begin
+/s2 500 def /s4 250 def /s3 333 def
+/a4p{arcto pop pop pop pop}def
+/2cx{2 copy exch}def
+/rls{rlineto stroke}def
+/currx{currentpoint pop}def
+/dround{transform round exch round exch itransform} def
+end
+end
+/DIThacks exch definefont pop
+ditstart
+(psc)xT
+576 1 1 xr
+1(Times-Roman)xf 1 f
+2(Times-Italic)xf 2 f
+3(Times-Bold)xf 3 f
+4(Times-BoldItalic)xf 4 f
+5(Helvetica)xf 5 f
+6(Helvetica-Bold)xf 6 f
+7(Courier)xf 7 f
+8(Courier-Bold)xf 8 f
+9(Symbol)xf 9 f
+10(DIThacks)xf 10 f
+10 s
+1 f
+xi
+%%EndProlog
+
+%%Page: 1 1
+10 s 10 xH 0 xS 1 f
+3 f
+14 s
+1205 1206(LIBTP:)N
+1633(Portable,)X
+2100(M)X
+2206(odular)X
+2551(Transactions)X
+3202(for)X
+3374(UNIX)X
+1 f
+11 s
+3661 1162(1)N
+2 f
+12 s
+2182 1398(Margo)N
+2467(Seltzer)X
+2171 1494(Michael)N
+2511(Olson)X
+1800 1590(University)N
+2225(of)X
+2324(California,)X
+2773(Berkeley)X
+3 f
+2277 1878(Abstract)N
+1 f
+10 s
+755 2001(Transactions)N
+1198(provide)X
+1475(a)X
+1543(useful)X
+1771(programming)X
+2239(paradigm)X
+2574(for)X
+2700(maintaining)X
+3114(logical)X
+3364(consistency,)X
+3790(arbitrating)X
+4156(con-)X
+555 2091(current)N
+808(access,)X
+1059(and)X
+1200(managing)X
+1540(recovery.)X
+1886(In)X
+1977(traditional)X
+2330(UNIX)X
+2555(systems,)X
+2852(the)X
+2974(only)X
+3140(easy)X
+3307(way)X
+3465(of)X
+3556(using)X
+3753(transactions)X
+4160(is)X
+4237(to)X
+555 2181(purchase)N
+876(a)X
+947(database)X
+1258(system.)X
+1554(Such)X
+1748(systems)X
+2035(are)X
+2168(often)X
+2367(slow,)X
+2572(costly,)X
+2817(and)X
+2967(may)X
+3139(not)X
+3275(provide)X
+3554(the)X
+3686(exact)X
+3890(functionality)X
+555 2271(desired.)N
+848(This)X
+1011(paper)X
+1210(presents)X
+1493(the)X
+1611(design,)X
+1860(implementation,)X
+2402(and)X
+2538(performance)X
+2965(of)X
+3052(LIBTP,)X
+3314(a)X
+3370(simple,)X
+3623(non-proprietary)X
+4147(tran-)X
+555 2361(saction)N
+809(library)X
+1050(using)X
+1249(the)X
+1373(4.4BSD)X
+1654(database)X
+1957(access)X
+2189(routines)X
+2473(\()X
+3 f
+2500(db)X
+1 f
+2588(\(3\)\).)X
+2775(On)X
+2899(a)X
+2961(conventional)X
+3401(transaction)X
+3779(processing)X
+4148(style)X
+555 2451(benchmark,)N
+959(its)X
+1061(performance)X
+1495(is)X
+1575(approximately)X
+2065(85%)X
+2239(that)X
+2386(of)X
+2480(the)X
+2604(database)X
+2907(access)X
+3139(routines)X
+3423(without)X
+3693(transaction)X
+4071(protec-)X
+555 2541(tion,)N
+725(200%)X
+938(that)X
+1084(of)X
+1177(using)X
+3 f
+1376(fsync)X
+1 f
+1554(\(2\))X
+1674(to)X
+1761(commit)X
+2030(modi\256cations)X
+2490(to)X
+2577(disk,)X
+2755(and)X
+2896(125%)X
+3108(that)X
+3253(of)X
+3345(a)X
+3406(commercial)X
+3810(relational)X
+4138(data-)X
+555 2631(base)N
+718(system.)X
+3 f
+555 2817(1.)N
+655(Introduction)X
+1 f
+755 2940(Transactions)N
+1186(are)X
+1306(used)X
+1474(in)X
+1557(database)X
+1855(systems)X
+2129(to)X
+2212(enable)X
+2443(concurrent)X
+2807(users)X
+2992(to)X
+3074(apply)X
+3272(multi-operation)X
+3790(updates)X
+4055(without)X
+555 3030(violating)N
+863(the)X
+985(integrity)X
+1280(of)X
+1371(the)X
+1493(database.)X
+1814(They)X
+2003(provide)X
+2271(the)X
+2392(properties)X
+2736(of)X
+2826(atomicity,)X
+3171(consistency,)X
+3588(isolation,)X
+3906(and)X
+4045(durabil-)X
+555 3120(ity.)N
+701(By)X
+816(atomicity,)X
+1160(we)X
+1276(mean)X
+1472(that)X
+1614(the)X
+1734(set)X
+1845(of)X
+1934(updates)X
+2200(comprising)X
+2581(a)X
+2638(transaction)X
+3011(must)X
+3187(be)X
+3284(applied)X
+3541(as)X
+3629(a)X
+3686(single)X
+3898(unit;)X
+4085(that)X
+4226(is,)X
+555 3210(they)N
+714(must)X
+890(either)X
+1094(all)X
+1195(be)X
+1292(applied)X
+1549(to)X
+1632(the)X
+1751(database)X
+2049(or)X
+2137(all)X
+2238(be)X
+2335(absent.)X
+2601(Consistency)X
+3013(requires)X
+3293(that)X
+3434(a)X
+3491(transaction)X
+3864(take)X
+4019(the)X
+4138(data-)X
+555 3300(base)N
+725(from)X
+908(one)X
+1051(logically)X
+1358(consistent)X
+1704(state)X
+1877(to)X
+1965(another.)X
+2272(The)X
+2423(property)X
+2721(of)X
+2814(isolation)X
+3115(requires)X
+3400(that)X
+3546(concurrent)X
+3916(transactions)X
+555 3390(yield)N
+750(results)X
+994(which)X
+1225(are)X
+1358(indistinguishable)X
+1938(from)X
+2128(the)X
+2260(results)X
+2503(which)X
+2733(would)X
+2967(be)X
+3077(obtained)X
+3387(by)X
+3501(running)X
+3784(the)X
+3916(transactions)X
+555 3480(sequentially.)N
+1002(Finally,)X
+1268(durability)X
+1599(requires)X
+1878(that)X
+2018(once)X
+2190(transactions)X
+2593(have)X
+2765(been)X
+2937(committed,)X
+3319(their)X
+3486(results)X
+3715(must)X
+3890(be)X
+3986(preserved)X
+555 3570(across)N
+776(system)X
+1018(failures)X
+1279([TPCB90].)X
+755 3693(Although)N
+1080(these)X
+1268(properties)X
+1612(are)X
+1734(most)X
+1912(frequently)X
+2265(discussed)X
+2595(in)X
+2680(the)X
+2801(context)X
+3060(of)X
+3150(databases,)X
+3501(they)X
+3661(are)X
+3782(useful)X
+4000(program-)X
+555 3783(ming)N
+750(paradigms)X
+1114(for)X
+1238(more)X
+1433(general)X
+1700(purpose)X
+1984(applications.)X
+2441(There)X
+2659(are)X
+2788(several)X
+3046(different)X
+3353(situations)X
+3689(where)X
+3916(transactions)X
+555 3873(can)N
+687(be)X
+783(used)X
+950(to)X
+1032(replace)X
+1285(current)X
+1533(ad-hoc)X
+1772(mechanisms.)X
+755 3996(One)N
+910(situation)X
+1206(is)X
+1280(when)X
+1475(multiple)X
+1762(\256les)X
+1916(or)X
+2004(parts)X
+2181(of)X
+2269(\256les)X
+2422(need)X
+2594(to)X
+2676(be)X
+2772(updated)X
+3046(in)X
+3128(an)X
+3224(atomic)X
+3462(fashion.)X
+3758(For)X
+3889(example,)X
+4201(the)X
+555 4086(traditional)N
+907(UNIX)X
+1131(\256le)X
+1256(system)X
+1501(uses)X
+1661(ordering)X
+1955(constraints)X
+2324(to)X
+2408(achieve)X
+2676(recoverability)X
+3144(in)X
+3228(the)X
+3348(face)X
+3505(of)X
+3594(crashes.)X
+3893(When)X
+4107(a)X
+4165(new)X
+555 4176(\256le)N
+678(is)X
+752(created,)X
+1026(its)X
+1122(inode)X
+1321(is)X
+1395(written)X
+1642(to)X
+1724(disk)X
+1877(before)X
+2103(the)X
+2221(new)X
+2375(\256le)X
+2497(is)X
+2570(added)X
+2782(to)X
+2864(the)X
+2982(directory)X
+3292(structure.)X
+3633(This)X
+3795(guarantees)X
+4159(that,)X
+555 4266(if)N
+627(the)X
+748(system)X
+993(crashes)X
+1253(between)X
+1544(the)X
+1665(two)X
+1808(I/O's,)X
+2016(the)X
+2137(directory)X
+2450(does)X
+2620(not)X
+2744(contain)X
+3002(a)X
+3060 0.4531(reference)AX
+3383(to)X
+3467(an)X
+3565(invalid)X
+3809(inode.)X
+4049(In)X
+4138(actu-)X
+555 4356(ality,)N
+741(the)X
+863(desired)X
+1119(effect)X
+1326(is)X
+1402(that)X
+1545(these)X
+1733(two)X
+1876(updates)X
+2144(have)X
+2319(the)X
+2440(transactional)X
+2873(property)X
+3168(of)X
+3258(atomicity)X
+3583(\(either)X
+3816(both)X
+3981(writes)X
+4200(are)X
+555 4446(visible)N
+790(or)X
+879(neither)X
+1124(is\).)X
+1266(Rather)X
+1501(than)X
+1660(building)X
+1947(special)X
+2191(purpose)X
+2466(recovery)X
+2769(mechanisms)X
+3186(into)X
+3331(the)X
+3450(\256le)X
+3573(system)X
+3816(or)X
+3904(related)X
+4144(tools)X
+555 4536(\()N
+2 f
+582(e.g.)X
+3 f
+726(fsck)X
+1 f
+864(\(8\)\),)X
+1033(one)X
+1177(could)X
+1383(use)X
+1518(general)X
+1783(purpose)X
+2064(transaction)X
+2443(recovery)X
+2752(protocols)X
+3077(after)X
+3252(system)X
+3501(failure.)X
+3778(Any)X
+3943(application)X
+555 4626(that)N
+705(needs)X
+918(to)X
+1010(keep)X
+1192(multiple,)X
+1508(related)X
+1757(\256les)X
+1920(\(or)X
+2044(directories\))X
+2440(consistent)X
+2790(should)X
+3032(do)X
+3141(so)X
+3241(using)X
+3443(transactions.)X
+3895(Source)X
+4147(code)X
+555 4716(control)N
+805(systems,)X
+1101(such)X
+1271(as)X
+1361(RCS)X
+1534(and)X
+1673(SCCS,)X
+1910(should)X
+2146(use)X
+2276(transaction)X
+2651(semantics)X
+2990(to)X
+3075(allow)X
+3276(the)X
+3397(``checking)X
+3764(in'')X
+3903(of)X
+3992(groups)X
+4232(of)X
+555 4806(related)N
+801(\256les.)X
+1001(In)X
+1095(this)X
+1237(way,)X
+1418(if)X
+1493(the)X
+1617 0.2841(``check-in'')AX
+2028(fails,)X
+2212(the)X
+2336(transaction)X
+2714(may)X
+2878(be)X
+2980(aborted,)X
+3267(backing)X
+3547(out)X
+3675(the)X
+3799(partial)X
+4030(``check-)X
+555 4896(in'')N
+691(leaving)X
+947(the)X
+1065(source)X
+1295(repository)X
+1640(in)X
+1722(a)X
+1778(consistent)X
+2118(state.)X
+755 5019(A)N
+842(second)X
+1094(situation)X
+1398(where)X
+1624(transactions)X
+2036(can)X
+2177(be)X
+2282(used)X
+2458(to)X
+2549(replace)X
+2811(current)X
+3068(ad-hoc)X
+3316(mechanisms)X
+3741(is)X
+3822(in)X
+3912(applications)X
+555 5109(where)N
+776(concurrent)X
+1144(updates)X
+1413(to)X
+1499(a)X
+1559(shared)X
+1793(\256le)X
+1919(are)X
+2042(desired,)X
+2318(but)X
+2444(there)X
+2629(is)X
+2706(logical)X
+2948(consistency)X
+3345(of)X
+3435(the)X
+3556(data)X
+3713(which)X
+3932(needs)X
+4138(to)X
+4223(be)X
+555 5199(preserved.)N
+928(For)X
+1059(example,)X
+1371(when)X
+1565(the)X
+1683(password)X
+2006(\256le)X
+2128(is)X
+2201(updated,)X
+2495(\256le)X
+2617(locking)X
+2877(is)X
+2950(used)X
+3117(to)X
+3199(disallow)X
+3490(concurrent)X
+3854(access.)X
+4120(Tran-)X
+555 5289(saction)N
+804(semantics)X
+1142(on)X
+1244(the)X
+1364(password)X
+1689(\256les)X
+1844(would)X
+2066(allow)X
+2266(concurrent)X
+2632(updates,)X
+2919(while)X
+3119(preserving)X
+3479(the)X
+3598(logical)X
+3837(consistency)X
+4232(of)X
+555 5379(the)N
+681(password)X
+1012(database.)X
+1357(Similarly,)X
+1702(UNIX)X
+1930(utilities)X
+2196(which)X
+2419(rewrite)X
+2674(\256les)X
+2834(face)X
+2996(a)X
+3059(potential)X
+3366(race)X
+3528(condition)X
+3857(between)X
+4152(their)X
+555 5469(rewriting)N
+871(a)X
+929(\256le)X
+1053(and)X
+1191(another)X
+1453(process)X
+1715(reading)X
+1977(the)X
+2096(\256le.)X
+2259(For)X
+2391(example,)X
+2704(the)X
+2823(compiler)X
+3129(\(more)X
+3342(precisely,)X
+3673(the)X
+3792(assembler\))X
+4161(may)X
+8 s
+10 f
+555 5541(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)N
+5 s
+1 f
+727 5619(1)N
+8 s
+763 5644(To)N
+850(appear)X
+1035(in)X
+1101(the)X
+2 f
+1195(Proceedings)X
+1530(of)X
+1596(the)X
+1690(1992)X
+1834(Winter)X
+2024(Usenix)X
+1 f
+2201(,)X
+2233(San)X
+2345(Francisco,)X
+2625(CA,)X
+2746(January)X
+2960(1992.)X
+
+2 p
+%%Page: 2 2
+8 s 8 xH 0 xS 1 f
+10 s
+3 f
+1 f
+555 630(have)N
+737(to)X
+829(rewrite)X
+1087(a)X
+1152(\256le)X
+1283(to)X
+1374(which)X
+1599(it)X
+1672(has)X
+1808(write)X
+2002(permission)X
+2382(in)X
+2473(a)X
+2538(directory)X
+2857(to)X
+2948(which)X
+3173(it)X
+3246(does)X
+3422(not)X
+3553(have)X
+3734(write)X
+3928(permission.)X
+555 720(While)N
+779(the)X
+904(``.o'')X
+1099(\256le)X
+1228(is)X
+1308(being)X
+1513(written,)X
+1787(another)X
+2055(utility)X
+2272(such)X
+2446(as)X
+3 f
+2540(nm)X
+1 f
+2651(\(1\))X
+2772(or)X
+3 f
+2866(ar)X
+1 f
+2942(\(1\))X
+3063(may)X
+3228(read)X
+3394(the)X
+3519(\256le)X
+3648(and)X
+3791(produce)X
+4077(invalid)X
+555 810(results)N
+790(since)X
+981(the)X
+1105(\256le)X
+1233(has)X
+1366(not)X
+1494(been)X
+1672(completely)X
+2054(written.)X
+2347(Currently,)X
+2700(some)X
+2895(utilities)X
+3160(use)X
+3293(special)X
+3542(purpose)X
+3821(code)X
+3998(to)X
+4085(handle)X
+555 900(such)N
+722(cases)X
+912(while)X
+1110(others)X
+1326(ignore)X
+1551(the)X
+1669(problem)X
+1956(and)X
+2092(force)X
+2278(users)X
+2463(to)X
+2545(live)X
+2685(with)X
+2847(the)X
+2965(consequences.)X
+755 1023(In)N
+845(this)X
+983(paper,)X
+1205(we)X
+1322(present)X
+1577(a)X
+1635(simple)X
+1870(library)X
+2106(which)X
+2324(provides)X
+2622(transaction)X
+2996(semantics)X
+3334(\(atomicity,)X
+3705(consistency,)X
+4121(isola-)X
+555 1113(tion,)N
+720(and)X
+857(durability\).)X
+1236(The)X
+1382(4.4BSD)X
+1658(database)X
+1956(access)X
+2182(methods)X
+2473(have)X
+2645(been)X
+2817(modi\256ed)X
+3121(to)X
+3203(use)X
+3330(this)X
+3465(library,)X
+3719(optionally)X
+4063(provid-)X
+555 1203(ing)N
+682(shared)X
+917(buffer)X
+1139(management)X
+1574(between)X
+1867(applications,)X
+2298(locking,)X
+2582(and)X
+2722(transaction)X
+3098(semantics.)X
+3478(Any)X
+3640(UNIX)X
+3865(program)X
+4161(may)X
+555 1293(transaction)N
+930(protect)X
+1176(its)X
+1274(data)X
+1430(by)X
+1532(requesting)X
+1888(transaction)X
+2262(protection)X
+2609(with)X
+2773(the)X
+3 f
+2893(db)X
+1 f
+2981(\(3\))X
+3097(library)X
+3333(or)X
+3422(by)X
+3524(adding)X
+3764(appropriate)X
+4152(calls)X
+555 1383(to)N
+646(the)X
+773(transaction)X
+1154(manager,)X
+1480(buffer)X
+1706(manager,)X
+2032(lock)X
+2199(manager,)X
+2525(and)X
+2670(log)X
+2801(manager.)X
+3147(The)X
+3301(library)X
+3543(routines)X
+3829(may)X
+3995(be)X
+4099(linked)X
+555 1473(into)N
+708(the)X
+834(host)X
+995(application)X
+1379(and)X
+1523(called)X
+1743(by)X
+1851(subroutine)X
+2217(interface,)X
+2547(or)X
+2642(they)X
+2808(may)X
+2974(reside)X
+3194(in)X
+3284(a)X
+3348(separate)X
+3640(server)X
+3865(process.)X
+4174(The)X
+555 1563(server)N
+772(architecture)X
+1172(provides)X
+1468(for)X
+1582(network)X
+1865(access)X
+2091(and)X
+2227(better)X
+2430(protection)X
+2775(mechanisms.)X
+3 f
+555 1749(2.)N
+655(Related)X
+938(Work)X
+1 f
+755 1872(There)N
+1000(has)X
+1164(been)X
+1373(much)X
+1608(discussion)X
+1998(in)X
+2117(recent)X
+2371(years)X
+2597(about)X
+2831(new)X
+3021(transaction)X
+3429(models)X
+3716(and)X
+3888(architectures)X
+555 1962 0.1172([SPEC88][NODI90][CHEN91][MOHA91].)AN
+2009(Much)X
+2220(of)X
+2310(this)X
+2448(work)X
+2636(focuses)X
+2900(on)X
+3003(new)X
+3160(ways)X
+3348(to)X
+3433(model)X
+3656(transactions)X
+4062(and)X
+4201(the)X
+555 2052(interactions)N
+953(between)X
+1245(them,)X
+1449(while)X
+1651(the)X
+1772(work)X
+1960(presented)X
+2291(here)X
+2453(focuses)X
+2717(on)X
+2820(the)X
+2941(implementation)X
+3466(and)X
+3605(performance)X
+4035(of)X
+4125(tradi-)X
+555 2142(tional)N
+757(transaction)X
+1129(techniques)X
+1492(\(write-ahead)X
+1919(logging)X
+2183(and)X
+2319(two-phase)X
+2669(locking\))X
+2956(on)X
+3056(a)X
+3112(standard)X
+3404(operating)X
+3727(system)X
+3969(\(UNIX\).)X
+755 2265(Such)N
+947(traditional)X
+1308(operating)X
+1643(systems)X
+1928(are)X
+2059(often)X
+2256(criticized)X
+2587(for)X
+2713(their)X
+2892(inability)X
+3190(to)X
+3283(perform)X
+3573(transaction)X
+3956(processing)X
+555 2355(adequately.)N
+971([STON81])X
+1342(cites)X
+1517(three)X
+1706(main)X
+1894(areas)X
+2088(of)X
+2183(inadequate)X
+2559(support:)X
+2849(buffer)X
+3074(management,)X
+3532(the)X
+3658(\256le)X
+3788(system,)X
+4058(and)X
+4201(the)X
+555 2445(process)N
+823(structure.)X
+1191(These)X
+1410(arguments)X
+1771(are)X
+1897(summarized)X
+2316(in)X
+2405(table)X
+2587(one.)X
+2769(Fortunately,)X
+3184(much)X
+3388(has)X
+3521(changed)X
+3815(since)X
+4006(1981.)X
+4232(In)X
+555 2535(the)N
+683(area)X
+848(of)X
+945(buffer)X
+1172(management,)X
+1632(most)X
+1817(UNIX)X
+2048(systems)X
+2331(provide)X
+2606(the)X
+2734(ability)X
+2968(to)X
+3060(memory)X
+3357(map)X
+3525(\256les,)X
+3708(thus)X
+3870(obviating)X
+4201(the)X
+555 2625(need)N
+734(for)X
+855(a)X
+918(copy)X
+1101(between)X
+1396(kernel)X
+1624(and)X
+1766(user)X
+1926(space.)X
+2171(If)X
+2251(a)X
+2313(database)X
+2616(system)X
+2864(is)X
+2943(going)X
+3151(to)X
+3239(use)X
+3372(the)X
+3496(\256le)X
+3624(system)X
+3872(buffer)X
+4095(cache,)X
+555 2715(then)N
+719(a)X
+781(system)X
+1029(call)X
+1171(is)X
+1250(required.)X
+1584(However,)X
+1924(if)X
+1998(buffering)X
+2322(is)X
+2400(provided)X
+2710(at)X
+2793(user)X
+2952(level)X
+3133(using)X
+3331(shared)X
+3566(memory,)X
+3878(as)X
+3970(in)X
+4057(LIBTP,)X
+555 2805(buffer)N
+776(management)X
+1210(is)X
+1287(only)X
+1452(as)X
+1542(slow)X
+1716(as)X
+1806(access)X
+2035(to)X
+2120(shared)X
+2353(memory)X
+2643(and)X
+2782(any)X
+2921(replacement)X
+3337(algorithm)X
+3671(may)X
+3832(be)X
+3931(used.)X
+4121(Since)X
+555 2895(multiple)N
+849(processes)X
+1185(can)X
+1325(access)X
+1559(the)X
+1685(shared)X
+1923(data,)X
+2105(prefetching)X
+2499(may)X
+2665(be)X
+2769(accomplished)X
+3238(by)X
+3346(separate)X
+3638(processes)X
+3973(or)X
+4067(threads)X
+555 2985(whose)N
+782(sole)X
+932(purpose)X
+1207(is)X
+1281(to)X
+1364(prefetch)X
+1649(pages)X
+1853(and)X
+1990(wait)X
+2149(on)X
+2250(them.)X
+2471(There)X
+2680(is)X
+2754(still)X
+2894(no)X
+2995(way)X
+3150(to)X
+3233(enforce)X
+3496(write)X
+3682(ordering)X
+3975(other)X
+4161(than)X
+555 3075(keeping)N
+829(pages)X
+1032(in)X
+1114(user)X
+1268(memory)X
+1555(and)X
+1691(using)X
+1884(the)X
+3 f
+2002(fsync)X
+1 f
+2180(\(3\))X
+2294(system)X
+2536(call)X
+2672(to)X
+2754(perform)X
+3033(synchronous)X
+3458(writes.)X
+755 3198(In)N
+845(the)X
+966(area)X
+1124(of)X
+1214(\256le)X
+1339(systems,)X
+1635(the)X
+1756(fast)X
+1895(\256le)X
+2020(system)X
+2265(\(FFS\))X
+2474([MCKU84])X
+2871(allows)X
+3103(allocation)X
+3442(in)X
+3527(units)X
+3704(up)X
+3806(to)X
+3890(64KBytes)X
+4232(as)X
+555 3288(opposed)N
+846(to)X
+932(the)X
+1054(4KByte)X
+1327(and)X
+1466(8KByte)X
+1738(\256gures)X
+1979(quoted)X
+2220(in)X
+2305([STON81].)X
+2711(The)X
+2859(measurements)X
+3341(in)X
+3426(this)X
+3564(paper)X
+3766(were)X
+3946(taken)X
+4143(from)X
+555 3378(an)N
+655(8KByte)X
+928(FFS,)X
+1104(but)X
+1230(as)X
+1320(LIBTP)X
+1565(runs)X
+1726(exclusively)X
+2114(in)X
+2199(user)X
+2356(space,)X
+2578(there)X
+2762(is)X
+2838(nothing)X
+3105(to)X
+3190(prevent)X
+3454(it)X
+3521(from)X
+3700(being)X
+3901(run)X
+4031(on)X
+4134(other)X
+555 3468(UNIX)N
+776(compatible)X
+1152(\256le)X
+1274(systems)X
+1547(\(e.g.)X
+1710(log-structured)X
+2180([ROSE91],)X
+2558(extent-based,)X
+3004(or)X
+3091(multi-block)X
+3484([SELT91]\).)X
+755 3591(Finally,)N
+1029(with)X
+1199(regard)X
+1433(to)X
+1523(the)X
+1648(process)X
+1916(structure,)X
+2244(neither)X
+2494(context)X
+2757(switch)X
+2993(time)X
+3162(nor)X
+3296(scheduling)X
+3670(around)X
+3920(semaphores)X
+555 3681(seems)N
+785(to)X
+881(affect)X
+1099(the)X
+1231(system)X
+1487(performance.)X
+1968(However,)X
+2317(the)X
+2449(implementation)X
+2984(of)X
+3084(semaphores)X
+3496(can)X
+3641(impact)X
+3892(performance)X
+555 3771(tremendously.)N
+1051(This)X
+1213(is)X
+1286(discussed)X
+1613(in)X
+1695(more)X
+1880(detail)X
+2078(in)X
+2160(section)X
+2407(4.3.)X
+755 3894(The)N
+908(Tuxedo)X
+1181(system)X
+1431(from)X
+1615(AT&T)X
+1861(is)X
+1941(a)X
+2004(transaction)X
+2383(manager)X
+2687(which)X
+2910(coordinates)X
+3307(distributed)X
+3676(transaction)X
+4055(commit)X
+555 3984(from)N
+738(a)X
+801(variety)X
+1051(of)X
+1145(different)X
+1449(local)X
+1632(transaction)X
+2011(managers.)X
+2386(At)X
+2493(this)X
+2634(time,)X
+2822(LIBTP)X
+3070(does)X
+3243(not)X
+3371(have)X
+3549(its)X
+3650(own)X
+3814(mechanism)X
+4205(for)X
+555 4074(distributed)N
+942(commit)X
+1231(processing,)X
+1639(but)X
+1786(could)X
+2009(be)X
+2130(used)X
+2322(as)X
+2434(a)X
+2515(local)X
+2716(transaction)X
+3113(agent)X
+3331(by)X
+3455(systems)X
+3752(such)X
+3943(as)X
+4054(Tuxedo)X
+555 4164([ANDR89].)N
+10 f
+863 4393(i)N
+870(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+903 4483(Buffer)N
+1133(Management)X
+10 f
+1672(g)X
+1 f
+1720(Data)X
+1892(must)X
+2067(be)X
+2163(copied)X
+2397(between)X
+2685(kernel)X
+2906(space)X
+3105(and)X
+3241(user)X
+3395(space.)X
+10 f
+1672 4573(g)N
+1 f
+1720(Buffer)X
+1950(pool)X
+2112(access)X
+2338(is)X
+2411(too)X
+2533(slow.)X
+10 f
+1672 4663(g)N
+1 f
+1720(There)X
+1928(is)X
+2001(no)X
+2101(way)X
+2255(to)X
+2337(request)X
+2589(prefetch.)X
+10 f
+1672 4753(g)N
+1 f
+1720(Replacement)X
+2159(is)X
+2232(usually)X
+2483(LRU)X
+2663(which)X
+2879(may)X
+3037(be)X
+3133(suboptimal)X
+3508(for)X
+3622(databases.)X
+10 f
+1672 4843(g)N
+1 f
+1720(There)X
+1928(is)X
+2001(no)X
+2101(way)X
+2255(to)X
+2337(guarantee)X
+2670(write)X
+2855(ordering.)X
+10 f
+863 4853(i)N
+870(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+903 4943(File)N
+1047(System)X
+10 f
+1672(g)X
+1 f
+1720(Allocation)X
+2078(is)X
+2151(done)X
+2327(in)X
+2409(small)X
+2602(blocks)X
+2831(\(usually)X
+3109(4K)X
+3227(or)X
+3314(8K\).)X
+10 f
+1672 5033(g)N
+1 f
+1720(Logical)X
+1985(organization)X
+2406(of)X
+2493(\256les)X
+2646(is)X
+2719(redundantly)X
+3122(expressed.)X
+10 f
+863 5043(i)N
+870(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+903 5133(Process)N
+1168(Structure)X
+10 f
+1672(g)X
+1 f
+1720(Context)X
+1993(switching)X
+2324(and)X
+2460(message)X
+2752(passing)X
+3012(are)X
+3131(too)X
+3253(slow.)X
+10 f
+1672 5223(g)N
+1 f
+1720(A)X
+1798(process)X
+2059(may)X
+2217(be)X
+2313(descheduled)X
+2730(while)X
+2928(holding)X
+3192(a)X
+3248(semaphore.)X
+10 f
+863 5233(i)N
+870(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+863(c)X
+5193(c)Y
+5113(c)Y
+5033(c)Y
+4953(c)Y
+4873(c)Y
+4793(c)Y
+4713(c)Y
+4633(c)Y
+4553(c)Y
+4473(c)Y
+3990 5233(c)N
+5193(c)Y
+5113(c)Y
+5033(c)Y
+4953(c)Y
+4873(c)Y
+4793(c)Y
+4713(c)Y
+4633(c)Y
+4553(c)Y
+4473(c)Y
+3 f
+1156 5446(Table)N
+1371(One:)X
+1560(Shortcomings)X
+2051(of)X
+2138(UNIX)X
+2363(transaction)X
+2770(support)X
+3056(cited)X
+3241(in)X
+3327([STON81].)X
+
+3 p
+%%Page: 3 3
+10 s 10 xH 0 xS 3 f
+1 f
+755 630(The)N
+901(transaction)X
+1274(architecture)X
+1675(presented)X
+2004(in)X
+2087([YOUN91])X
+2474(is)X
+2548(very)X
+2712(similar)X
+2955(to)X
+3038(that)X
+3179(implemented)X
+3618(in)X
+3701(the)X
+3820(LIBTP.)X
+4103(While)X
+555 720([YOUN91])N
+947(presents)X
+1236(a)X
+1298(model)X
+1524(for)X
+1644(providing)X
+1981(transaction)X
+2359(services,)X
+2663(this)X
+2803(paper)X
+3007(focuses)X
+3273(on)X
+3378(the)X
+3501(implementation)X
+4028(and)X
+4169(per-)X
+555 810(formance)N
+881(of)X
+970(a)X
+1028(particular)X
+1358(system.)X
+1642(In)X
+1731(addition,)X
+2034(we)X
+2149(provide)X
+2415(detailed)X
+2690(comparisons)X
+3116(with)X
+3279(alternative)X
+3639(solutions:)X
+3970(traditional)X
+555 900(UNIX)N
+776(services)X
+1055(and)X
+1191(commercial)X
+1590(database)X
+1887(management)X
+2317(systems.)X
+3 f
+555 1086(3.)N
+655(Architecture)X
+1 f
+755 1209(The)N
+906(library)X
+1146(is)X
+1224(designed)X
+1534(to)X
+1621(provide)X
+1891(well)X
+2054(de\256ned)X
+2315(interfaces)X
+2653(to)X
+2740(the)X
+2863(services)X
+3147(required)X
+3440(for)X
+3559(transaction)X
+3936(processing.)X
+555 1299(These)N
+777(services)X
+1066(are)X
+1195(recovery,)X
+1527(concurrency)X
+1955(control,)X
+2232(and)X
+2378(the)X
+2506(management)X
+2946(of)X
+3043(shared)X
+3283(data.)X
+3487(First)X
+3663(we)X
+3787(will)X
+3941(discuss)X
+4201(the)X
+555 1389(design)N
+795(tradeoffs)X
+1112(in)X
+1205(the)X
+1334(selection)X
+1650(of)X
+1748(recovery,)X
+2081(concurrency)X
+2510(control,)X
+2787(and)X
+2933(buffer)X
+3160(management)X
+3600(implementations,)X
+4183(and)X
+555 1479(then)N
+713(we)X
+827(will)X
+971(present)X
+1223(the)X
+1341(overall)X
+1584(library)X
+1818(architecture)X
+2218(and)X
+2354(module)X
+2614(descriptions.)X
+3 f
+555 1665(3.1.)N
+715(Design)X
+966(Tradeoffs)X
+1 f
+3 f
+555 1851(3.1.1.)N
+775(Crash)X
+1004(Recovery)X
+1 f
+755 1974(The)N
+909(recovery)X
+1220(protocol)X
+1516(is)X
+1598(responsible)X
+1992(for)X
+2115(providing)X
+2455(the)X
+2582(transaction)X
+2963(semantics)X
+3308(discussed)X
+3644(earlier.)X
+3919(There)X
+4136(are)X
+4263(a)X
+555 2064(wide)N
+739(range)X
+946(of)X
+1041(recovery)X
+1351(protocols)X
+1677(available)X
+1995([HAER83],)X
+2395(but)X
+2525(we)X
+2647(can)X
+2786(crudely)X
+3054(divide)X
+3281(them)X
+3468(into)X
+3619(two)X
+3766(main)X
+3953(categories.)X
+555 2154(The)N
+706(\256rst)X
+856(category)X
+1159(records)X
+1422(all)X
+1528(modi\256cations)X
+1989(to)X
+2077(the)X
+2201(database)X
+2504(in)X
+2592(a)X
+2653(separate)X
+2942(\256le,)X
+3089(and)X
+3230(uses)X
+3393(this)X
+3533(\256le)X
+3660(\(log\))X
+3841(to)X
+3928(back)X
+4105(out)X
+4232(or)X
+555 2244(reapply)N
+825(these)X
+1019(modi\256cations)X
+1483(if)X
+1561(a)X
+1626(transaction)X
+2007(aborts)X
+2232(or)X
+2328(the)X
+2455(system)X
+2706(crashes.)X
+3012(We)X
+3153(call)X
+3298(this)X
+3442(set)X
+3560(the)X
+3 f
+3687(logging)X
+3963(protocols)X
+1 f
+4279(.)X
+555 2334(The)N
+703(second)X
+949(category)X
+1249(avoids)X
+1481(the)X
+1602(use)X
+1732(of)X
+1822(a)X
+1881(log)X
+2006(by)X
+2109(carefully)X
+2418(controlling)X
+2792(when)X
+2989(data)X
+3146(are)X
+3268(written)X
+3518(to)X
+3603(disk.)X
+3799(We)X
+3934(call)X
+4073(this)X
+4210(set)X
+555 2424(the)N
+3 f
+673(non-logging)X
+1096(protocols)X
+1 f
+1412(.)X
+755 2547(Non-logging)N
+1185(protocols)X
+1504(hold)X
+1666(dirty)X
+1837(buffers)X
+2085(in)X
+2167(main)X
+2347(memory)X
+2634(or)X
+2721(temporary)X
+3071(\256les)X
+3224(until)X
+3390(commit)X
+3654(and)X
+3790(then)X
+3948(force)X
+4134(these)X
+555 2637(pages)N
+769(to)X
+862(disk)X
+1026(at)X
+1115(transaction)X
+1498(commit.)X
+1813(While)X
+2040(we)X
+2165(can)X
+2308(use)X
+2446(temporary)X
+2807(\256les)X
+2971(to)X
+3064(hold)X
+3237(dirty)X
+3418(pages)X
+3631(that)X
+3781(may)X
+3949(need)X
+4131(to)X
+4223(be)X
+555 2727(evicted)N
+810(from)X
+988(memory)X
+1277(during)X
+1508(a)X
+1566(long-running)X
+2006(transaction,)X
+2400(the)X
+2520(only)X
+2684(user-level)X
+3023(mechanism)X
+3410(to)X
+3494(force)X
+3682(pages)X
+3887(to)X
+3971(disk)X
+4126(is)X
+4201(the)X
+3 f
+555 2817(fsync)N
+1 f
+733(\(2\))X
+850(system)X
+1095(call.)X
+1274(Unfortunately,)X
+3 f
+1767(fsync)X
+1 f
+1945(\(2\))X
+2062(is)X
+2138(an)X
+2237(expensive)X
+2581(system)X
+2826(call)X
+2965(in)X
+3050(that)X
+3193(it)X
+3260(forces)X
+3480(all)X
+3583(pages)X
+3789(of)X
+3879(a)X
+3938(\256le)X
+4062(to)X
+4146(disk,)X
+555 2907(and)N
+691(transactions)X
+1094(that)X
+1234(manage)X
+1504(more)X
+1689(than)X
+1847(one)X
+1983(\256le)X
+2105(must)X
+2280(issue)X
+2460(one)X
+2596(call)X
+2732(per)X
+2855(\256le.)X
+755 3030(In)N
+853(addition,)X
+3 f
+1166(fsync)X
+1 f
+1344(\(2\))X
+1469(provides)X
+1776(no)X
+1887(way)X
+2051(to)X
+2143(control)X
+2400(the)X
+2528(order)X
+2728(in)X
+2820(which)X
+3046(dirty)X
+3227(pages)X
+3440(are)X
+3569(written)X
+3826(to)X
+3918(disk.)X
+4121(Since)X
+555 3120(non-logging)N
+976(protocols)X
+1304(must)X
+1489(sometimes)X
+1861(order)X
+2061(writes)X
+2287(carefully)X
+2603([SULL92],)X
+2987(they)X
+3155(are)X
+3284(dif\256cult)X
+3567(to)X
+3659(implement)X
+4030(on)X
+4139(Unix)X
+555 3210(systems.)N
+868(As)X
+977(a)X
+1033(result,)X
+1251(we)X
+1365(have)X
+1537(chosen)X
+1780(to)X
+1862(implement)X
+2224(a)X
+2280(logging)X
+2544(protocol.)X
+755 3333(Logging)N
+1050(protocols)X
+1372(may)X
+1534(be)X
+1634(categorized)X
+2029(based)X
+2236(on)X
+2340(how)X
+2502(information)X
+2904(is)X
+2981(logged)X
+3223(\(physically)X
+3602(or)X
+3692(logically\))X
+4022(and)X
+4161(how)X
+555 3423(much)N
+767(is)X
+854(logged)X
+1106(\(before)X
+1373(images,)X
+1654(after)X
+1836(images)X
+2097(or)X
+2198(both\).)X
+2441(In)X
+3 f
+2542(physical)X
+2855(logging)X
+1 f
+3103(,)X
+3157(images)X
+3417(of)X
+3517(complete)X
+3844(physical)X
+4144(units)X
+555 3513(\(pages)N
+786(or)X
+874(buffers\))X
+1150(are)X
+1270(recorded,)X
+1593(while)X
+1792(in)X
+3 f
+1875(logical)X
+2118(logging)X
+1 f
+2387(a)X
+2444(description)X
+2820(of)X
+2907(the)X
+3025(operation)X
+3348(is)X
+3421(recorded.)X
+3763(Therefore,)X
+4121(while)X
+555 3603(we)N
+675(may)X
+839(record)X
+1071(entire)X
+1280(pages)X
+1489(in)X
+1577(a)X
+1639(physical)X
+1932(log,)X
+2080(we)X
+2200(need)X
+2378(only)X
+2546(record)X
+2777(the)X
+2900(records)X
+3162(being)X
+3365(modi\256ed)X
+3674(in)X
+3761(a)X
+3822(logical)X
+4065(log.)X
+4232(In)X
+555 3693(fact,)N
+718(physical)X
+1006(logging)X
+1271(can)X
+1404(be)X
+1501(thought)X
+1766(of)X
+1854(as)X
+1942(a)X
+1999(special)X
+2243(case)X
+2403(of)X
+2491(logical)X
+2730(logging,)X
+3015(since)X
+3201(the)X
+3320 0.3125(``records'')AX
+3686(that)X
+3827(we)X
+3942(log)X
+4065(in)X
+4148(logi-)X
+555 3783(cal)N
+673(logging)X
+941(might)X
+1151(be)X
+1251(physical)X
+1542(pages.)X
+1789(Since)X
+1991(logical)X
+2233(logging)X
+2501(is)X
+2578(both)X
+2743(more)X
+2931(space-ef\256cient)X
+3423(and)X
+3562(more)X
+3750(general,)X
+4030(we)X
+4147(have)X
+555 3873(chosen)N
+798(it)X
+862(for)X
+976(our)X
+1103(logging)X
+1367(protocol.)X
+755 3996(In)N
+3 f
+843(before-image)X
+1315(logging)X
+1 f
+1563(,)X
+1604(we)X
+1719(log)X
+1842(a)X
+1899(copy)X
+2076(of)X
+2164(the)X
+2283(data)X
+2438(before)X
+2665(the)X
+2784(update,)X
+3039(while)X
+3238(in)X
+3 f
+3321(after-image)X
+3739(logging)X
+1 f
+3987(,)X
+4027(we)X
+4141(log)X
+4263(a)X
+555 4086(copy)N
+740(of)X
+836(the)X
+963(data)X
+1126(after)X
+1303(the)X
+1429(update.)X
+1711(If)X
+1793(we)X
+1915(log)X
+2045(only)X
+2215(before-images,)X
+2723(then)X
+2889(there)X
+3078(is)X
+3159(suf\256cient)X
+3485(information)X
+3891(in)X
+3981(the)X
+4107(log)X
+4237(to)X
+555 4176(allow)N
+761(us)X
+860(to)X
+3 f
+950(undo)X
+1 f
+1150(the)X
+1276(transaction)X
+1656(\(go)X
+1791(back)X
+1971(to)X
+2061(the)X
+2187(state)X
+2361(represented)X
+2759(by)X
+2866(the)X
+2991(before-image\).)X
+3514(However,)X
+3876(if)X
+3952(the)X
+4077(system)X
+555 4266(crashes)N
+814(and)X
+952(a)X
+1010(committed)X
+1374(transaction's)X
+1806(changes)X
+2087(have)X
+2261(not)X
+2385(reached)X
+2658(the)X
+2778(disk,)X
+2953(we)X
+3068(have)X
+3241(no)X
+3342(means)X
+3568(to)X
+3 f
+3651(redo)X
+1 f
+3828(the)X
+3947(transaction)X
+555 4356(\(reapply)N
+849(the)X
+973(updates\).)X
+1311(Therefore,)X
+1675(logging)X
+1945(only)X
+2113(before-images)X
+2599(necessitates)X
+3004(forcing)X
+3262(dirty)X
+3439(pages)X
+3648(at)X
+3732(commit)X
+4002(time.)X
+4210(As)X
+555 4446(mentioned)N
+913(above,)X
+1145(forcing)X
+1397(pages)X
+1600(at)X
+1678(commit)X
+1942(is)X
+2015(considered)X
+2383(too)X
+2505(costly.)X
+755 4569(If)N
+834(we)X
+953(log)X
+1080(only)X
+1247(after-images,)X
+1694(then)X
+1857(there)X
+2043(is)X
+2121(suf\256cient)X
+2444(information)X
+2847(in)X
+2934(the)X
+3057(log)X
+3184(to)X
+3271(allow)X
+3474(us)X
+3570(to)X
+3657(redo)X
+3825(the)X
+3947(transaction)X
+555 4659(\(go)N
+687(forward)X
+967(to)X
+1054(the)X
+1177(state)X
+1348(represented)X
+1743(by)X
+1847(the)X
+1969(after-image\),)X
+2411(but)X
+2537(we)X
+2655(do)X
+2759(not)X
+2885(have)X
+3061(the)X
+3183(information)X
+3585(required)X
+3877(to)X
+3963(undo)X
+4147(tran-)X
+555 4749(sactions)N
+845(which)X
+1073(aborted)X
+1346(after)X
+1526(dirty)X
+1709(pages)X
+1924(were)X
+2113(written)X
+2372(to)X
+2466(disk.)X
+2670(Therefore,)X
+3039(logging)X
+3314(only)X
+3487(after-images)X
+3920(necessitates)X
+555 4839(holding)N
+819(all)X
+919(dirty)X
+1090(buffers)X
+1338(in)X
+1420(main)X
+1600(memory)X
+1887(until)X
+2053(commit)X
+2317(or)X
+2404(writing)X
+2655(them)X
+2835(to)X
+2917(a)X
+2973(temporary)X
+3323(\256le.)X
+755 4962(Since)N
+956(neither)X
+1202(constraint)X
+1541(\(forcing)X
+1823(pages)X
+2029(on)X
+2132(commit)X
+2399(or)X
+2489(buffering)X
+2811(pages)X
+3016(until)X
+3184(commit\))X
+3477(was)X
+3624(feasible,)X
+3916(we)X
+4032(chose)X
+4237(to)X
+555 5052(log)N
+683(both)X
+851(before)X
+1083(and)X
+1225(after)X
+1399(images.)X
+1672(The)X
+1823(only)X
+1991(remaining)X
+2342(consideration)X
+2800(is)X
+2879(when)X
+3079(changes)X
+3363(get)X
+3486(written)X
+3738(to)X
+3825(disk.)X
+4023(Changes)X
+555 5142(affect)N
+764(both)X
+931(data)X
+1090(pages)X
+1298(and)X
+1438(the)X
+1560(log.)X
+1726(If)X
+1804(the)X
+1926(changed)X
+2218(data)X
+2376(page)X
+2552(is)X
+2629(written)X
+2880(before)X
+3110(the)X
+3232(log)X
+3358(page,)X
+3554(and)X
+3694(the)X
+3816(system)X
+4062(crashes)X
+555 5232(before)N
+787(the)X
+911(log)X
+1039(page)X
+1217(is)X
+1296(written,)X
+1569(the)X
+1693(log)X
+1820(will)X
+1969(contain)X
+2230(insuf\256cient)X
+2615(information)X
+3018(to)X
+3105(undo)X
+3290(the)X
+3413(change.)X
+3706(This)X
+3873(violates)X
+4147(tran-)X
+555 5322(saction)N
+803(semantics,)X
+1160(since)X
+1346(some)X
+1536(changed)X
+1825(data)X
+1980(pages)X
+2184(may)X
+2343(not)X
+2466(have)X
+2638(been)X
+2810(written,)X
+3077(and)X
+3213(the)X
+3331(database)X
+3628(cannot)X
+3862(be)X
+3958(restored)X
+4237(to)X
+555 5412(its)N
+650(pre-transaction)X
+1152(state.)X
+755 5535(The)N
+914(log)X
+1050(record)X
+1290(describing)X
+1658(an)X
+1768(update)X
+2016(must)X
+2205(be)X
+2315(written)X
+2576(to)X
+2672(stable)X
+2893(storage)X
+3159(before)X
+3398(the)X
+3529(modi\256ed)X
+3846(page.)X
+4071(This)X
+4246(is)X
+3 f
+555 5625(write-ahead)N
+992(logging)X
+1 f
+1240(.)X
+1307(If)X
+1388(log)X
+1517(records)X
+1781(are)X
+1907(safely)X
+2126(written)X
+2380(to)X
+2469(disk,)X
+2649(data)X
+2810(pages)X
+3020(may)X
+3185(be)X
+3288(written)X
+3542(at)X
+3627(any)X
+3770(time)X
+3939(afterwards.)X
+555 5715(This)N
+721(means)X
+950(that)X
+1094(the)X
+1216(only)X
+1382(\256le)X
+1508(that)X
+1652(ever)X
+1815(needs)X
+2022(to)X
+2108(be)X
+2208(forced)X
+2438(to)X
+2524(disk)X
+2681(is)X
+2758(the)X
+2880(log.)X
+3046(Since)X
+3248(the)X
+3370(log)X
+3495(is)X
+3571(append-only,)X
+4015(modi\256ed)X
+
+4 p
+%%Page: 4 4
+10 s 10 xH 0 xS 1 f
+3 f
+1 f
+555 630(pages)N
+760(always)X
+1005(appear)X
+1242(at)X
+1322(the)X
+1442(end)X
+1580(and)X
+1718(may)X
+1878(be)X
+1976(written)X
+2224(to)X
+2307(disk)X
+2461(ef\256ciently)X
+2807(in)X
+2890(any)X
+3027(\256le)X
+3150(system)X
+3393(that)X
+3534(favors)X
+3756(sequential)X
+4102(order-)X
+555 720(ing)N
+677(\()X
+2 f
+704(e.g.)X
+1 f
+820(,)X
+860(FFS,)X
+1032(log-structured)X
+1502(\256le)X
+1624(system,)X
+1886(or)X
+1973(an)X
+2069(extent-based)X
+2495(system\).)X
+3 f
+555 906(3.1.2.)N
+775(Concurrency)X
+1245(Control)X
+1 f
+755 1029(The)N
+918(concurrency)X
+1354(control)X
+1619(protocol)X
+1923(is)X
+2013(responsible)X
+2415(for)X
+2546(maintaining)X
+2965(consistency)X
+3376(in)X
+3475(the)X
+3610(presence)X
+3929(of)X
+4033(multiple)X
+555 1119(accesses.)N
+897(There)X
+1114(are)X
+1242(several)X
+1499(alternative)X
+1867(solutions)X
+2183(such)X
+2358(as)X
+2453(locking,)X
+2741(optimistic)X
+3088(concurrency)X
+3514(control)X
+3769([KUNG81],)X
+4183(and)X
+555 1209(timestamp)N
+912(ordering)X
+1208([BERN80].)X
+1619(Since)X
+1821(optimistic)X
+2164(methods)X
+2459(and)X
+2599(timestamp)X
+2956(ordering)X
+3252(are)X
+3374(generally)X
+3696(more)X
+3884(complex)X
+4183(and)X
+555 1299(restrict)N
+804(concurrency)X
+1228(without)X
+1498(eliminating)X
+1888(starvation)X
+2230(or)X
+2323(deadlocks,)X
+2690(we)X
+2810(chose)X
+3018(two-phase)X
+3373(locking)X
+3638(\(2PL\).)X
+3890(Strict)X
+4088(2PL)X
+4246(is)X
+555 1389(suboptimal)N
+935(for)X
+1054(certain)X
+1297(data)X
+1455(structures)X
+1791(such)X
+1962(as)X
+2053(B-trees)X
+2309(because)X
+2588(it)X
+2656(can)X
+2792(limit)X
+2966(concurrency,)X
+3408(so)X
+3503(we)X
+3621(use)X
+3752(a)X
+3812(special)X
+4059(locking)X
+555 1479(protocol)N
+842(based)X
+1045(on)X
+1145(one)X
+1281(described)X
+1609(in)X
+1691([LEHM81].)X
+755 1602(The)N
+901(B-tree)X
+1123(locking)X
+1384(protocol)X
+1672(we)X
+1787(implemented)X
+2226(releases)X
+2502(locks)X
+2691(at)X
+2769(internal)X
+3034(nodes)X
+3241(in)X
+3323(the)X
+3441(tree)X
+3582(as)X
+3669(it)X
+3733(descends.)X
+4083(A)X
+4161(lock)X
+555 1692(on)N
+658(an)X
+757(internal)X
+1025(page)X
+1200(is)X
+1276(always)X
+1522(released)X
+1808(before)X
+2036(a)X
+2094(lock)X
+2254(on)X
+2356(its)X
+2453(child)X
+2635(is)X
+2710(obtained)X
+3008(\(that)X
+3177(is,)X
+3272(locks)X
+3463(are)X
+3584(not)X
+3 f
+3708(coupled)X
+1 f
+3996([BAY77])X
+555 1782(during)N
+786(descent\).)X
+1116(When)X
+1330(a)X
+1388(leaf)X
+1531(\(or)X
+1647(internal\))X
+1941(page)X
+2115(is)X
+2190(split,)X
+2369(a)X
+2427(write)X
+2614(lock)X
+2774(is)X
+2849(acquired)X
+3148(on)X
+3250(the)X
+3370(parent)X
+3593(before)X
+3821(the)X
+3941(lock)X
+4100(on)X
+4201(the)X
+555 1872(just-split)N
+855(page)X
+1028(is)X
+1102(released)X
+1387(\(locks)X
+1604(are)X
+3 f
+1724(coupled)X
+1 f
+2011(during)X
+2241(ascent\).)X
+2530(Write)X
+2734(locks)X
+2924(on)X
+3025(internal)X
+3291(pages)X
+3495(are)X
+3615(released)X
+3899(immediately)X
+555 1962(after)N
+723(the)X
+841(page)X
+1013(is)X
+1086(updated,)X
+1380(but)X
+1502(locks)X
+1691(on)X
+1791(leaf)X
+1932(pages)X
+2135(are)X
+2254(held)X
+2412(until)X
+2578(the)X
+2696(end)X
+2832(of)X
+2919(the)X
+3037(transaction.)X
+755 2085(Since)N
+964(locks)X
+1164(are)X
+1294(released)X
+1589(during)X
+1828(descent,)X
+2119(the)X
+2247(structure)X
+2558(of)X
+2655(the)X
+2783(tree)X
+2934(may)X
+3102(change)X
+3360(above)X
+3582(a)X
+3648(node)X
+3834(being)X
+4042(used)X
+4219(by)X
+555 2175(some)N
+752(process.)X
+1061(If)X
+1143(that)X
+1291(process)X
+1560(must)X
+1743(later)X
+1914(ascend)X
+2161(the)X
+2287(tree)X
+2435(because)X
+2717(of)X
+2811(a)X
+2874(page)X
+3053(split,)X
+3237(any)X
+3380(such)X
+3554(change)X
+3809(must)X
+3991(not)X
+4120(cause)X
+555 2265(confusion.)N
+938(We)X
+1077(use)X
+1211(the)X
+1336(technique)X
+1675(described)X
+2010(in)X
+2099([LEHM81])X
+2487(which)X
+2710(exploits)X
+2989(the)X
+3113(ordering)X
+3411(of)X
+3504(data)X
+3664(on)X
+3770(a)X
+3832(B-tree)X
+4059(page)X
+4237(to)X
+555 2355(guarantee)N
+888(that)X
+1028(no)X
+1128(process)X
+1389(ever)X
+1548(gets)X
+1697(lost)X
+1832(as)X
+1919(a)X
+1975(result)X
+2173(of)X
+2260(internal)X
+2525(page)X
+2697(updates)X
+2962(made)X
+3156(by)X
+3256(other)X
+3441(processes.)X
+755 2478(If)N
+836(a)X
+899(transaction)X
+1278(that)X
+1425(updates)X
+1697(a)X
+1760(B-tree)X
+1988(aborts,)X
+2231(the)X
+2356(user-visible)X
+2757(changes)X
+3043(to)X
+3131(the)X
+3255(tree)X
+3402(must)X
+3583(be)X
+3685(rolled)X
+3898(back.)X
+4116(How-)X
+555 2568(ever,)N
+735(changes)X
+1015(to)X
+1097(the)X
+1215(internal)X
+1480(nodes)X
+1687(of)X
+1774(the)X
+1892(tree)X
+2033(need)X
+2205(not)X
+2327(be)X
+2423(rolled)X
+2630(back,)X
+2822(since)X
+3007(these)X
+3192(pages)X
+3395(contain)X
+3651(no)X
+3751(user-visible)X
+4145(data.)X
+555 2658(When)N
+771(rolling)X
+1008(back)X
+1184(a)X
+1244(transaction,)X
+1640(we)X
+1758(roll)X
+1893(back)X
+2069(all)X
+2173(leaf)X
+2318(page)X
+2494(updates,)X
+2783(but)X
+2909(no)X
+3013(internal)X
+3281(insertions)X
+3615(or)X
+3705(page)X
+3880(splits.)X
+4111(In)X
+4201(the)X
+555 2748(worst)N
+759(case,)X
+944(this)X
+1085(will)X
+1235(leave)X
+1431(a)X
+1493(leaf)X
+1640(page)X
+1818(less)X
+1964(than)X
+2128(half)X
+2279(full.)X
+2456(This)X
+2624(may)X
+2788(cause)X
+2993(poor)X
+3166(space)X
+3371(utilization,)X
+3741(but)X
+3869(does)X
+4042(not)X
+4170(lose)X
+555 2838(user)N
+709(data.)X
+755 2961(Holding)N
+1038(locks)X
+1228(on)X
+1329(leaf)X
+1471(pages)X
+1675(until)X
+1842(transaction)X
+2215(commit)X
+2480(guarantees)X
+2845(that)X
+2986(no)X
+3087(other)X
+3273(process)X
+3535(can)X
+3668(insert)X
+3866(or)X
+3953(delete)X
+4165(data)X
+555 3051(that)N
+711(has)X
+854(been)X
+1042(touched)X
+1332(by)X
+1448(this)X
+1598(process.)X
+1914(Rolling)X
+2188(back)X
+2375(insertions)X
+2721(and)X
+2872(deletions)X
+3196(on)X
+3311(leaf)X
+3467(pages)X
+3685(guarantees)X
+4064(that)X
+4219(no)X
+555 3141(aborted)N
+819(updates)X
+1087(are)X
+1209(ever)X
+1371(visible)X
+1607(to)X
+1692(other)X
+1880(transactions.)X
+2326(Leaving)X
+2612(page)X
+2787(splits)X
+2978(intact)X
+3179(permits)X
+3442(us)X
+3536(to)X
+3621(release)X
+3867(internal)X
+4134(write)X
+555 3231(locks)N
+744(early.)X
+965(Thus)X
+1145(transaction)X
+1517(semantics)X
+1853(are)X
+1972(preserved,)X
+2325(and)X
+2461(locks)X
+2650(are)X
+2769(held)X
+2927(for)X
+3041(shorter)X
+3284(periods.)X
+755 3354(The)N
+901(extra)X
+1083(complexity)X
+1464(introduced)X
+1828(by)X
+1929(this)X
+2065(locking)X
+2326(protocol)X
+2614(appears)X
+2881(substantial,)X
+3264(but)X
+3387(it)X
+3452(is)X
+3525(important)X
+3856(for)X
+3970(multi-user)X
+555 3444(execution.)N
+950(The)X
+1118(bene\256ts)X
+1410(of)X
+1520(non-two-phase)X
+2040(locking)X
+2323(on)X
+2446(B-trees)X
+2721(are)X
+2863(well)X
+3044(established)X
+3443(in)X
+3548(the)X
+3689(database)X
+4009(literature)X
+555 3534([BAY77],)N
+899([LEHM81].)X
+1320(If)X
+1394(a)X
+1450(process)X
+1711(held)X
+1869(locks)X
+2058(until)X
+2224(it)X
+2288(committed,)X
+2670(then)X
+2828(a)X
+2884(long-running)X
+3322(update)X
+3556(could)X
+3754(lock)X
+3912(out)X
+4034(all)X
+4134(other)X
+555 3624(transactions)N
+967(by)X
+1076(preventing)X
+1448(any)X
+1593(other)X
+1787(process)X
+2057(from)X
+2241(locking)X
+2509(the)X
+2635(root)X
+2792(page)X
+2972(of)X
+3067(the)X
+3193(tree.)X
+3382(The)X
+3535(B-tree)X
+3764(locking)X
+4032(protocol)X
+555 3714(described)N
+884(above)X
+1096(guarantees)X
+1460(that)X
+1600(locks)X
+1789(on)X
+1889(internal)X
+2154(pages)X
+2357(are)X
+2476(held)X
+2634(for)X
+2748(extremely)X
+3089(short)X
+3269(periods,)X
+3545(thereby)X
+3806(increasing)X
+4156(con-)X
+555 3804(currency.)N
+3 f
+555 3990(3.1.3.)N
+775(Management)X
+1245(of)X
+1332(Shared)X
+1596(Data)X
+1 f
+755 4113(Database)N
+1075(systems)X
+1353(permit)X
+1587(many)X
+1790(users)X
+1980(to)X
+2067(examine)X
+2364(and)X
+2505(update)X
+2744(the)X
+2866(same)X
+3055(data)X
+3213(concurrently.)X
+3683(In)X
+3774(order)X
+3968(to)X
+4054(provide)X
+555 4203(this)N
+702(concurrent)X
+1078(access)X
+1316(and)X
+1464(enforce)X
+1738(the)X
+1868(write-ahead)X
+2280(logging)X
+2556(protocol)X
+2855(described)X
+3195(in)X
+3289(section)X
+3548(3.1.1,)X
+3759(we)X
+3884(use)X
+4022(a)X
+4089(shared)X
+555 4293(memory)N
+848(buffer)X
+1071(manager.)X
+1414(Not)X
+1559(only)X
+1726(does)X
+1898(this)X
+2038(provide)X
+2308(the)X
+2431(guarantees)X
+2800(we)X
+2919(require,)X
+3192(but)X
+3319(a)X
+3380(user-level)X
+3722(buffer)X
+3944(manager)X
+4246(is)X
+555 4383(frequently)N
+916(faster)X
+1126(than)X
+1295(using)X
+1498(the)X
+1626(\256le)X
+1758(system)X
+2010(buffer)X
+2237(cache.)X
+2491(Reads)X
+2717(or)X
+2814(writes)X
+3040(involving)X
+3376(the)X
+3504(\256le)X
+3636(system)X
+3888(buffer)X
+4115(cache)X
+555 4473(often)N
+746(require)X
+1000(copying)X
+1284(data)X
+1444(between)X
+1738(user)X
+1898(and)X
+2040(kernel)X
+2266(space)X
+2470(while)X
+2673(a)X
+2734(user-level)X
+3076(buffer)X
+3298(manager)X
+3600(can)X
+3737(return)X
+3954(pointers)X
+4237(to)X
+555 4563(data)N
+709(pages)X
+912(directly.)X
+1217(Additionally,)X
+1661(if)X
+1730(more)X
+1915(than)X
+2073(one)X
+2209(process)X
+2470(uses)X
+2628(the)X
+2746(same)X
+2931(page,)X
+3123(then)X
+3281(fewer)X
+3485(copies)X
+3710(may)X
+3868(be)X
+3964(required.)X
+3 f
+555 4749(3.2.)N
+715(Module)X
+997(Architecture)X
+1 f
+755 4872(The)N
+913(preceding)X
+1262(sections)X
+1552(described)X
+1892(modules)X
+2195(for)X
+2321(managing)X
+2669(the)X
+2799(transaction)X
+3183(log,)X
+3337(locks,)X
+3558(and)X
+3706(a)X
+3774(cache)X
+3990(of)X
+4089(shared)X
+555 4962(buffers.)N
+847(In)X
+938(addition,)X
+1244(we)X
+1362(need)X
+1538(to)X
+1624(provide)X
+1893(functionality)X
+2326(for)X
+2444(transaction)X
+2 f
+2819(begin)X
+1 f
+2997(,)X
+2 f
+3040(commit)X
+1 f
+3276(,)X
+3319(and)X
+2 f
+3458(abort)X
+1 f
+3654(processing,)X
+4040(necessi-)X
+555 5052(tating)N
+769(a)X
+837(transaction)X
+1221(manager.)X
+1570(In)X
+1669(order)X
+1871(to)X
+1965(arbitrate)X
+2265(concurrent)X
+2641(access)X
+2879(to)X
+2973(locks)X
+3173(and)X
+3320(buffers,)X
+3599(we)X
+3724(include)X
+3991(a)X
+4058(process)X
+555 5142(management)N
+995(module)X
+1264(which)X
+1489(manages)X
+1799(a)X
+1864(collection)X
+2209(of)X
+2305(semaphores)X
+2713(used)X
+2889(to)X
+2980(block)X
+3187(and)X
+3332(release)X
+3585(processes.)X
+3962(Finally,)X
+4237(in)X
+555 5232(order)N
+752(to)X
+841(provide)X
+1113(a)X
+1176(simple,)X
+1436(standard)X
+1735(interface)X
+2044(we)X
+2165(have)X
+2344(modi\256ed)X
+2655(the)X
+2780(database)X
+3084(access)X
+3317(routines)X
+3602(\()X
+3 f
+3629(db)X
+1 f
+3717(\(3\)\).)X
+3904(For)X
+4041(the)X
+4165(pur-)X
+555 5322(poses)N
+758(of)X
+850(this)X
+990(paper)X
+1194(we)X
+1313(call)X
+1453(the)X
+1575(modi\256ed)X
+1883(package)X
+2171(the)X
+3 f
+2293(Record)X
+2567(Manager)X
+1 f
+2879(.)X
+2943(Figure)X
+3176(one)X
+3316(shows)X
+3540(the)X
+3662(main)X
+3846(interfaces)X
+4183(and)X
+555 5412(architecture)N
+955(of)X
+1042(LIBTP.)X
+
+5 p
+%%Page: 5 5
+10 s 10 xH 0 xS 1 f
+3 f
+1 f
+11 s
+1851 1520(log_commit)N
+2764 2077(buf_unpin)N
+2764 1987(buf_get)N
+3633 1408(buf_unpin)N
+3633 1319(buf_pin)N
+3633 1230(buf_get)N
+3 f
+17 s
+1163 960(Txn)N
+1430(M)X
+1559(anager)X
+2582(Record)X
+3040(M)X
+3169(anager)X
+1 Dt
+2363 726 MXY
+0 355 Dl
+1426 0 Dl
+0 -355 Dl
+-1426 0 Dl
+3255 1616 MXY
+0 535 Dl
+534 0 Dl
+0 -535 Dl
+-534 0 Dl
+2185 MX
+0 535 Dl
+535 0 Dl
+0 -535 Dl
+-535 0 Dl
+1116 MX
+0 535 Dl
+534 0 Dl
+0 -535 Dl
+-534 0 Dl
+726 MY
+0 355 Dl
+891 0 Dl
+0 -355 Dl
+-891 0 Dl
+1 f
+11 s
+2207 1297(lock)N
+2564 1386(log)N
+865(unlock_all)X
+1851 1609(log_unroll)N
+1650 2508 MXY
+0 178 Dl
+1605 0 Dl
+0 -178 Dl
+-1605 0 Dl
+1294 1616 MXY
+19 -30 Dl
+-19 11 Dl
+-20 -11 Dl
+20 30 Dl
+0 -535 Dl
+2319 2508 MXY
+-22 -30 Dl
+4 23 Dl
+-18 14 Dl
+36 -7 Dl
+-936 -357 Dl
+3277 2455(sleep_on)N
+1405 1616 MXY
+36 4 Dl
+-18 -13 Dl
+1 -22 Dl
+-19 31 Dl
+1070 -535 Dl
+2631 2508 MXY
+36 6 Dl
+-18 -14 Dl
+3 -22 Dl
+-21 30 Dl
+891 -357 Dl
+1426 2455(sleep_on)N
+3255 1884 MXY
+-31 -20 Dl
+11 20 Dl
+-11 19 Dl
+31 -19 Dl
+-535 0 Dl
+1554 2366(wake)N
+3277(wake)X
+2185 1884 MXY
+-31 -20 Dl
+12 20 Dl
+-12 19 Dl
+31 -19 Dl
+-356 0 Dl
+0 -803 Dl
+3 f
+17 s
+1236 1851(Lock)N
+1118 2030(M)N
+1247(anager)X
+2339 1851(Log)N
+2187 2030(M)N
+2316(anager)X
+3333 1851(Buffer)N
+3257 2030(M)N
+3386(anager)X
+3522 1616 MXY
+20 -30 Dl
+-20 11 Dl
+-20 -11 Dl
+20 30 Dl
+0 -535 Dl
+1950 2654(Process)N
+2424(M)X
+2553(anager)X
+2542 1616 MXY
+19 -30 Dl
+-19 11 Dl
+-20 -11 Dl
+20 30 Dl
+0 -535 Dl
+1 f
+11 s
+2207 1364(unlock)N
+2452 2508 MXY
+20 -31 Dl
+-20 11 Dl
+-19 -11 Dl
+19 31 Dl
+0 -357 Dl
+2497 2322(sleep_on)N
+2497 2233(wake)N
+3 Dt
+-1 Ds
+3 f
+10 s
+1790 2830(Figure)N
+2037(1:)X
+2144(Library)X
+2435(module)X
+2708(interfaces.)X
+1 f
+10 f
+555 3010(h)N
+579(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)X
+3 f
+555 3286(3.2.1.)N
+775(The)X
+928(Log)X
+1081(Manager)X
+1 f
+755 3409(The)N
+3 f
+907(Log)X
+1067(Manager)X
+1 f
+1406(enforces)X
+1706(the)X
+1831(write-ahead)X
+2238(logging)X
+2509(protocol.)X
+2843(Its)X
+2949(primitive)X
+3268(operations)X
+3628(are)X
+2 f
+3753(log)X
+1 f
+3855(,)X
+2 f
+3901(log_commit)X
+1 f
+4279(,)X
+2 f
+555 3499(log_read)N
+1 f
+844(,)X
+2 f
+889(log_roll)X
+1 f
+1171(and)X
+2 f
+1312(log_unroll)X
+1 f
+1649(.)X
+1714(The)X
+2 f
+1864(log)X
+1 f
+1991(call)X
+2132(performs)X
+2447(a)X
+2508(buffered)X
+2806(write)X
+2996(of)X
+3088(the)X
+3211(speci\256ed)X
+3520(log)X
+3646(record)X
+3876(and)X
+4016(returns)X
+4263(a)X
+555 3589(unique)N
+809(log)X
+947(sequence)X
+1278(number)X
+1559(\(LSN\).)X
+1840(This)X
+2017(LSN)X
+2203(may)X
+2376(then)X
+2549(be)X
+2660(used)X
+2842(to)X
+2939(retrieve)X
+3220(a)X
+3291(record)X
+3532(from)X
+3723(the)X
+3856(log)X
+3993(using)X
+4201(the)X
+2 f
+555 3679(log_read)N
+1 f
+865(call.)X
+1042(The)X
+2 f
+1188(log)X
+1 f
+1311(interface)X
+1614(knows)X
+1844(very)X
+2008(little)X
+2175(about)X
+2374(the)X
+2493(internal)X
+2759(format)X
+2993(of)X
+3080(the)X
+3198(log)X
+3320(records)X
+3577(it)X
+3641(receives.)X
+3965(Rather,)X
+4219(all)X
+555 3769(log)N
+681(records)X
+942(are)X
+1065 0.4028(referenced)AX
+1430(by)X
+1534(a)X
+1594(header)X
+1833(structure,)X
+2158(a)X
+2218(log)X
+2344(record)X
+2574(type,)X
+2756(and)X
+2896(a)X
+2956(character)X
+3276(buffer)X
+3497(containing)X
+3859(the)X
+3981(data)X
+4138(to)X
+4223(be)X
+555 3859(logged.)N
+834(The)X
+980(log)X
+1103(record)X
+1330(type)X
+1489(is)X
+1563(used)X
+1731(to)X
+1814(call)X
+1951(the)X
+2070(appropriate)X
+2457(redo)X
+2621(and)X
+2758(undo)X
+2939(routines)X
+3217(during)X
+2 f
+3446(abort)X
+1 f
+3639(and)X
+2 f
+3775(commit)X
+1 f
+4031(process-)X
+555 3949(ing.)N
+721(While)X
+941(we)X
+1059(have)X
+1235(used)X
+1406(the)X
+3 f
+1528(Log)X
+1684(Manager)X
+1 f
+2019(to)X
+2104(provide)X
+2372(before)X
+2601(and)X
+2740(after)X
+2911(image)X
+3130(logging,)X
+3417(it)X
+3484(may)X
+3645(also)X
+3797(be)X
+3896(used)X
+4066(for)X
+4183(any)X
+555 4039(of)N
+642(the)X
+760(logging)X
+1024(algorithms)X
+1386(discussed.)X
+755 4162(The)N
+2 f
+905(log_commit)X
+1 f
+1308(operation)X
+1636(behaves)X
+1920(exactly)X
+2177(like)X
+2322(the)X
+2 f
+2445(log)X
+1 f
+2572(operation)X
+2900(but)X
+3026(guarantees)X
+3394(that)X
+3538(the)X
+3660(log)X
+3786(has)X
+3917(been)X
+4093(forced)X
+555 4252(to)N
+643(disk)X
+802(before)X
+1034(returning.)X
+1394(A)X
+1478(discussion)X
+1837(of)X
+1930(our)X
+2063(commit)X
+2333(strategy)X
+2613(appears)X
+2884(in)X
+2971(the)X
+3094(implementation)X
+3621(section)X
+3873(\(section)X
+4152(4.2\).)X
+2 f
+555 4342(Log_unroll)N
+1 f
+935(reads)X
+1126(log)X
+1249(records)X
+1507(from)X
+1684(the)X
+1803(log,)X
+1946(following)X
+2278(backward)X
+2611(transaction)X
+2983(pointers)X
+3261(and)X
+3397(calling)X
+3635(the)X
+3753(appropriate)X
+4139(undo)X
+555 4432(routines)N
+839(to)X
+927(implement)X
+1295(transaction)X
+1673(abort.)X
+1904(In)X
+1997(a)X
+2059(similar)X
+2307(manner,)X
+2 f
+2594(log_roll)X
+1 f
+2877(reads)X
+3073(log)X
+3201(records)X
+3464(sequentially)X
+3877(forward,)X
+4178(cal-)X
+555 4522(ling)N
+699(the)X
+817(appropriate)X
+1203(redo)X
+1366(routines)X
+1644(to)X
+1726(recover)X
+1988(committed)X
+2350(transactions)X
+2753(after)X
+2921(a)X
+2977(system)X
+3219(crash.)X
+3 f
+555 4708(3.2.2.)N
+775(The)X
+928(Buffer)X
+1171(Manager)X
+1 f
+755 4831(The)N
+3 f
+912(Buffer)X
+1167(Manager)X
+1 f
+1511(uses)X
+1681(a)X
+1749(pool)X
+1923(of)X
+2022(shared)X
+2264(memory)X
+2563(to)X
+2657(provide)X
+2934(a)X
+3002(least-recently-used)X
+3641(\(LRU\))X
+3886(block)X
+4095(cache.)X
+555 4921(Although)N
+886(the)X
+1013(current)X
+1270(library)X
+1513(provides)X
+1818(an)X
+1923(LRU)X
+2112(cache,)X
+2345(it)X
+2418(would)X
+2647(be)X
+2752(simple)X
+2994(to)X
+3085(add)X
+3229(alternate)X
+3534(replacement)X
+3955(policies)X
+4232(as)X
+555 5011(suggested)N
+903(by)X
+1015([CHOU85])X
+1408(or)X
+1507(to)X
+1601(provide)X
+1878(multiple)X
+2176(buffer)X
+2405(pools)X
+2610(with)X
+2784(different)X
+3092(policies.)X
+3412(Transactions)X
+3853(request)X
+4116(pages)X
+555 5101(from)N
+736(the)X
+859(buffer)X
+1081(manager)X
+1383(and)X
+1524(keep)X
+1701(them)X
+3 f
+1886(pinned)X
+1 f
+2145(to)X
+2232(ensure)X
+2466(that)X
+2610(they)X
+2772(are)X
+2895(not)X
+3021(written)X
+3272(to)X
+3358(disk)X
+3515(while)X
+3717(they)X
+3879(are)X
+4002(in)X
+4088(a)X
+4148(logi-)X
+555 5191(cally)N
+732(inconsistent)X
+1135(state.)X
+1343(When)X
+1556(page)X
+1729(replacement)X
+2143(is)X
+2217(necessary,)X
+2571(the)X
+3 f
+2689(Buffer)X
+2932(Manager)X
+1 f
+3264(\256nds)X
+3439(an)X
+3535(unpinned)X
+3853(page)X
+4025(and)X
+4161(then)X
+555 5281(checks)N
+794(with)X
+956(the)X
+3 f
+1074(Log)X
+1227(Manager)X
+1 f
+1559(to)X
+1641(ensure)X
+1871(that)X
+2011(the)X
+2129(write-ahead)X
+2529(protocol)X
+2816(is)X
+2889(enforced.)X
+3 f
+555 5467(3.2.3.)N
+775(The)X
+928(Lock)X
+1121(Manager)X
+1 f
+755 5590(The)N
+3 f
+901(Lock)X
+1095(Manager)X
+1 f
+1428(supports)X
+1720(general)X
+1978(purpose)X
+2253(locking)X
+2514(\(single)X
+2753(writer,)X
+2986(multiple)X
+3273(readers\))X
+3553(which)X
+3769(is)X
+3842(currently)X
+4152(used)X
+555 5680(to)N
+638(provide)X
+904(two-phase)X
+1254(locking)X
+1514(and)X
+1650(high)X
+1812(concurrency)X
+2230(B-tree)X
+2451(locking.)X
+2751(However,)X
+3086(the)X
+3204(general)X
+3461(purpose)X
+3735(nature)X
+3956(of)X
+4043(the)X
+4161(lock)X
+
+6 p
+%%Page: 6 6
+10 s 10 xH 0 xS 1 f
+3 f
+1 f
+555 630(manager)N
+857(provides)X
+1158(the)X
+1281(ability)X
+1510(to)X
+1597(support)X
+1862(a)X
+1923(variety)X
+2171(of)X
+2263(locking)X
+2528(protocols.)X
+2890(Currently,)X
+3241(all)X
+3345(locks)X
+3538(are)X
+3661(issued)X
+3885(at)X
+3967(the)X
+4089(granu-)X
+555 720(larity)N
+747(of)X
+837(a)X
+896(page)X
+1071(\(the)X
+1219(size)X
+1367(of)X
+1457(a)X
+1516(buffer)X
+1736(in)X
+1821(the)X
+1942(buffer)X
+2161(pool\))X
+2352(which)X
+2570(is)X
+2645(identi\256ed)X
+2969(by)X
+3071(two)X
+3213(4-byte)X
+3440(integers)X
+3716(\(a)X
+3801(\256le)X
+3925(id)X
+4009(and)X
+4147(page)X
+555 810(number\).)N
+898(This)X
+1071(provides)X
+1378(the)X
+1507(necessary)X
+1851(information)X
+2259(to)X
+2351(extend)X
+2595(the)X
+3 f
+2723(Lock)X
+2926(Manager)X
+1 f
+3268(to)X
+3360(perform)X
+3649(hierarchical)X
+4059(locking)X
+555 900([GRAY76].)N
+982(The)X
+1133(current)X
+1387(implementation)X
+1915(does)X
+2088(not)X
+2216(support)X
+2482(locks)X
+2677(at)X
+2760(other)X
+2950(granularities)X
+3376(and)X
+3517(does)X
+3689(not)X
+3816(promote)X
+4108(locks;)X
+555 990(these)N
+740(are)X
+859(obvious)X
+1132(future)X
+1344(additions)X
+1657(to)X
+1739(the)X
+1857(system.)X
+755 1113(If)N
+831(an)X
+929(incoming)X
+1253(lock)X
+1413(request)X
+1667(cannot)X
+1903(be)X
+2001(granted,)X
+2284(the)X
+2404(requesting)X
+2760(process)X
+3023(is)X
+3098(queued)X
+3352(for)X
+3467(the)X
+3586(lock)X
+3745(and)X
+3882(descheduled.)X
+555 1203(When)N
+769(a)X
+827(lock)X
+987(is)X
+1062(released,)X
+1368(the)X
+1488(wait)X
+1647(queue)X
+1860(is)X
+1934(traversed)X
+2250(and)X
+2387(any)X
+2524(newly)X
+2741(compatible)X
+3118(locks)X
+3308(are)X
+3428(granted.)X
+3730(Locks)X
+3947(are)X
+4067(located)X
+555 1293(via)N
+680(a)X
+743(\256le)X
+872(and)X
+1015(page)X
+1194(hash)X
+1368(table)X
+1551(and)X
+1694(are)X
+1820(chained)X
+2097(both)X
+2266(by)X
+2373(object)X
+2595(and)X
+2737(by)X
+2843(transaction,)X
+3241(facilitating)X
+3614(rapid)X
+3805(traversal)X
+4108(of)X
+4201(the)X
+555 1383(lock)N
+713(table)X
+889(during)X
+1118(transaction)X
+1490(commit)X
+1754(and)X
+1890(abort.)X
+755 1506(The)N
+907(primary)X
+1188(interfaces)X
+1528(to)X
+1617(the)X
+1742(lock)X
+1907(manager)X
+2211(are)X
+2 f
+2337(lock)X
+1 f
+2471(,)X
+2 f
+2518(unlock)X
+1 f
+2732(,)X
+2779(and)X
+2 f
+2922(lock_unlock_all)X
+1 f
+3434(.)X
+2 f
+3500(Lock)X
+1 f
+3682(obtains)X
+3939(a)X
+4001(new)X
+4161(lock)X
+555 1596(for)N
+680(a)X
+747(speci\256c)X
+1023(object.)X
+1290(There)X
+1509(are)X
+1638(also)X
+1797(two)X
+1947(variants)X
+2231(of)X
+2328(the)X
+2 f
+2456(lock)X
+1 f
+2620(request,)X
+2 f
+2902(lock_upgrade)X
+1 f
+3373(and)X
+2 f
+3519(lock_downgrade)X
+1 f
+4053(,)X
+4103(which)X
+555 1686(allow)N
+755(the)X
+875(caller)X
+1076(to)X
+1160(atomically)X
+1519(trade)X
+1701(a)X
+1758(lock)X
+1917(of)X
+2005(one)X
+2142(type)X
+2301(for)X
+2416(a)X
+2473(lock)X
+2632(of)X
+2720(another.)X
+2 f
+3022(Unlock)X
+1 f
+3275(releases)X
+3551(a)X
+3608(speci\256c)X
+3874(mode)X
+4073(of)X
+4161(lock)X
+555 1776(on)N
+655(a)X
+711(speci\256c)X
+976(object.)X
+2 f
+1232(Lock_unlock_all)X
+1 f
+1786(releases)X
+2061(all)X
+2161(the)X
+2279(locks)X
+2468(associated)X
+2818(with)X
+2980(a)X
+3036(speci\256c)X
+3301(transaction.)X
+3 f
+555 1962(3.2.4.)N
+775(The)X
+928(Process)X
+1207(Manager)X
+1 f
+755 2085(The)N
+3 f
+900(Process)X
+1179(Manager)X
+1 f
+1511(acts)X
+1656(as)X
+1743(a)X
+1799(user-level)X
+2136(scheduler)X
+2464(to)X
+2546(make)X
+2740(processes)X
+3068(wait)X
+3226(on)X
+3326(unavailable)X
+3716(locks)X
+3905(and)X
+4041(pending)X
+555 2175(buffer)N
+778(cache)X
+988(I/O.)X
+1161(For)X
+1297(each)X
+1470(process,)X
+1756(a)X
+1817(semaphore)X
+2190(is)X
+2268(maintained)X
+2649(upon)X
+2834(which)X
+3055(that)X
+3200(process)X
+3466(waits)X
+3660(when)X
+3859(it)X
+3928(needs)X
+4136(to)X
+4223(be)X
+555 2265(descheduled.)N
+1014(When)X
+1228(a)X
+1286(process)X
+1549(needs)X
+1754(to)X
+1838(be)X
+1936(run,)X
+2084(its)X
+2180(semaphore)X
+2549(is)X
+2623(cleared,)X
+2897(and)X
+3034(the)X
+3153(operating)X
+3477(system)X
+3720(reschedules)X
+4116(it.)X
+4201(No)X
+555 2355(sophisticated)N
+1002(scheduling)X
+1378(algorithm)X
+1718(is)X
+1799(applied;)X
+2085(if)X
+2162(the)X
+2288(lock)X
+2454(for)X
+2576(which)X
+2800(a)X
+2864(process)X
+3133(was)X
+3286(waiting)X
+3554(becomes)X
+3863(available,)X
+4201(the)X
+555 2445(process)N
+824(is)X
+905(made)X
+1107(runnable.)X
+1456(It)X
+1533(would)X
+1761(have)X
+1941(been)X
+2121(possible)X
+2411(to)X
+2501(change)X
+2757(the)X
+2883(kernel's)X
+3170(process)X
+3439(scheduler)X
+3775(to)X
+3865(interact)X
+4134(more)X
+555 2535(ef\256ciently)N
+900(with)X
+1062(the)X
+1180(lock)X
+1338(manager,)X
+1655(but)X
+1777(doing)X
+1979(so)X
+2070(would)X
+2290(have)X
+2462(compromised)X
+2918(our)X
+3045(commitment)X
+3469(to)X
+3551(a)X
+3607(user-level)X
+3944(package.)X
+3 f
+555 2721(3.2.5.)N
+775(The)X
+928(Transaction)X
+1361(Manager)X
+1 f
+755 2844(The)N
+3 f
+901(Transaction)X
+1335(Manager)X
+1 f
+1668(provides)X
+1965(the)X
+2084(standard)X
+2377(interface)X
+2680(of)X
+2 f
+2768(txn_begin)X
+1 f
+3084(,)X
+2 f
+3125(txn_commit)X
+1 f
+3499(,)X
+3540(and)X
+2 f
+3676(txn_abort)X
+1 f
+3987(.)X
+4047(It)X
+4116(keeps)X
+555 2934(track)N
+742(of)X
+835(all)X
+941(active)X
+1159(transactions,)X
+1588(assigns)X
+1845(unique)X
+2089(transaction)X
+2467(identi\256ers,)X
+2833(and)X
+2974(directs)X
+3213(the)X
+3336(abort)X
+3526(and)X
+3667(commit)X
+3936(processing.)X
+555 3024(When)N
+772(a)X
+2 f
+833(txn_begin)X
+1 f
+1174(is)X
+1252(issued,)X
+1497(the)X
+3 f
+1620(Transaction)X
+2058(Manager)X
+1 f
+2395(assigns)X
+2651(the)X
+2773(next)X
+2935(available)X
+3249(transaction)X
+3625(identi\256er,)X
+3958(allocates)X
+4263(a)X
+555 3114(per-process)N
+948(transaction)X
+1322(structure)X
+1625(in)X
+1709(shared)X
+1941(memory,)X
+2249(increments)X
+2622(the)X
+2741(count)X
+2940(of)X
+3028(active)X
+3241(transactions,)X
+3665(and)X
+3802(returns)X
+4046(the)X
+4165(new)X
+555 3204(transaction)N
+937(identi\256er)X
+1256(to)X
+1348(the)X
+1476(calling)X
+1724(process.)X
+2034(The)X
+2188(in-memory)X
+2573(transaction)X
+2954(structure)X
+3264(contains)X
+3560(a)X
+3625(pointer)X
+3881(into)X
+4034(the)X
+4161(lock)X
+555 3294(table)N
+734(for)X
+851(locks)X
+1043(held)X
+1204(by)X
+1307(this)X
+1445(transaction,)X
+1840(the)X
+1961(last)X
+2095(log)X
+2220(sequence)X
+2538(number,)X
+2826(a)X
+2885(transaction)X
+3260(state)X
+3430(\()X
+2 f
+3457(idle)X
+1 f
+(,)S
+2 f
+3620(running)X
+1 f
+3873(,)X
+2 f
+3915(aborting)X
+1 f
+4190(,)X
+4232(or)X
+2 f
+555 3384(committing\))N
+1 f
+942(,)X
+982(an)X
+1078(error)X
+1255(code,)X
+1447(and)X
+1583(a)X
+1639(semaphore)X
+2007(identi\256er.)X
+755 3507(At)N
+859(commit,)X
+1147(the)X
+3 f
+1269(Transaction)X
+1706(Manager)X
+1 f
+2042(calls)X
+2 f
+2213(log_commit)X
+1 f
+2615(to)X
+2700(record)X
+2929(the)X
+3050(end)X
+3189(of)X
+3279(transaction)X
+3654(and)X
+3793(to)X
+3878(\257ush)X
+4056(the)X
+4177(log.)X
+555 3597(Then)N
+743(it)X
+810(directs)X
+1047(the)X
+3 f
+1168(Lock)X
+1364(Manager)X
+1 f
+1699(to)X
+1784(release)X
+2031(all)X
+2134(locks)X
+2325(associated)X
+2677(with)X
+2841(the)X
+2961(given)X
+3161(transaction.)X
+3575(If)X
+3651(a)X
+3709(transaction)X
+4083(aborts,)X
+555 3687(the)N
+3 f
+680(Transaction)X
+1120(Manager)X
+1 f
+1459(calls)X
+1633(on)X
+2 f
+1739(log_unroll)X
+1 f
+2102(to)X
+2190(read)X
+2355(the)X
+2479(transaction's)X
+2915(log)X
+3043(records)X
+3306(and)X
+3448(undo)X
+3634(any)X
+3776(modi\256cations)X
+4237(to)X
+555 3777(the)N
+673(database.)X
+1010(As)X
+1119(in)X
+1201(the)X
+1319(commit)X
+1583(case,)X
+1762(it)X
+1826(then)X
+1984(calls)X
+2 f
+2151(lock_unlock_all)X
+1 f
+2683(to)X
+2765(release)X
+3009(the)X
+3127(transaction's)X
+3557(locks.)X
+3 f
+555 3963(3.2.6.)N
+775(The)X
+928(Record)X
+1198(Manager)X
+1 f
+755 4086(The)N
+3 f
+919(Record)X
+1208(Manager)X
+1 f
+1559(supports)X
+1869(the)X
+2006(abstraction)X
+2397(of)X
+2503(reading)X
+2783(and)X
+2938(writing)X
+3208(records)X
+3484(to)X
+3585(a)X
+3660(database.)X
+3996(We)X
+4147(have)X
+555 4176(modi\256ed)N
+861(the)X
+981(the)X
+1101(database)X
+1399(access)X
+1626(routines)X
+3 f
+1905(db)X
+1 f
+1993(\(3\))X
+2108([BSD91])X
+2418(to)X
+2501(call)X
+2638(the)X
+2757(log,)X
+2900(lock,)X
+3079(and)X
+3216(buffer)X
+3434(managers.)X
+3803(In)X
+3891(order)X
+4082(to)X
+4165(pro-)X
+555 4266(vide)N
+718(functionality)X
+1152(to)X
+1239(perform)X
+1523(undo)X
+1708(and)X
+1849(redo,)X
+2037(the)X
+3 f
+2160(Record)X
+2434(Manager)X
+1 f
+2770(de\256nes)X
+3021(a)X
+3081(collection)X
+3421(of)X
+3512(log)X
+3638(record)X
+3868(types)X
+4061(and)X
+4201(the)X
+555 4356(associated)N
+920(undo)X
+1115(and)X
+1266(redo)X
+1444(routines.)X
+1777(The)X
+3 f
+1937(Log)X
+2105(Manager)X
+1 f
+2452(performs)X
+2777(a)X
+2848(table)X
+3039(lookup)X
+3296(on)X
+3411(the)X
+3543(record)X
+3783(type)X
+3955(to)X
+4051(call)X
+4201(the)X
+555 4446(appropriate)N
+951(routines.)X
+1299(For)X
+1440(example,)X
+1762(the)X
+1890(B-tree)X
+2121(access)X
+2356(method)X
+2625(requires)X
+2913(two)X
+3062(log)X
+3193(record)X
+3428(types:)X
+3648(insert)X
+3855(and)X
+4000(delete.)X
+4241(A)X
+555 4536(replace)N
+808(operation)X
+1131(is)X
+1204(implemented)X
+1642(as)X
+1729(a)X
+1785(delete)X
+1997(followed)X
+2302(by)X
+2402(an)X
+2498(insert)X
+2696(and)X
+2832(is)X
+2905(logged)X
+3143(accordingly.)X
+3 f
+555 4722(3.3.)N
+715(Application)X
+1134(Architectures)X
+1 f
+755 4845(The)N
+907(structure)X
+1215(of)X
+1309(LIBTP)X
+1558(allows)X
+1794(application)X
+2177(designers)X
+2507(to)X
+2596(trade)X
+2784(off)X
+2905(performance)X
+3339(and)X
+3481(protection.)X
+3872(Since)X
+4076(a)X
+4138(large)X
+555 4935(portion)N
+810(of)X
+901(LIBTP's)X
+1205(functionality)X
+1638(is)X
+1715(provided)X
+2024(by)X
+2128(managing)X
+2468(structures)X
+2804(in)X
+2889(shared)X
+3122(memory,)X
+3432(its)X
+3530(structures)X
+3865(are)X
+3987(subject)X
+4237(to)X
+555 5025(corruption)N
+926(by)X
+1043(applications)X
+1467(when)X
+1678(the)X
+1813(library)X
+2064(is)X
+2154(linked)X
+2391(directly)X
+2673(with)X
+2852(the)X
+2987(application.)X
+3420(For)X
+3568(this)X
+3720(reason,)X
+3987(LIBTP)X
+4246(is)X
+555 5115(designed)N
+864(to)X
+950(allow)X
+1152(compilation)X
+1558(into)X
+1706(a)X
+1766(separate)X
+2053(server)X
+2273(process)X
+2537(which)X
+2756(may)X
+2917(be)X
+3016(accessed)X
+3321(via)X
+3442(a)X
+3501(socket)X
+3729(interface.)X
+4094(In)X
+4184(this)X
+555 5205(way)N
+712(LIBTP's)X
+1015(data)X
+1172(structures)X
+1507(are)X
+1629(protected)X
+1951(from)X
+2130(application)X
+2509(code,)X
+2704(but)X
+2829(communication)X
+3349(overhead)X
+3666(is)X
+3741(increased.)X
+4107(When)X
+555 5295(applications)N
+975(are)X
+1107(trusted,)X
+1377(LIBTP)X
+1631(may)X
+1801(be)X
+1909(compiled)X
+2239(directly)X
+2516(into)X
+2672(the)X
+2802(application)X
+3190(providing)X
+3533(improved)X
+3872(performance.)X
+555 5385(Figures)N
+815(two)X
+955(and)X
+1091(three)X
+1272(show)X
+1461(the)X
+1579(two)X
+1719(alternate)X
+2016(application)X
+2392(architectures.)X
+755 5508(There)N
+964(are)X
+1084(potentially)X
+1447(two)X
+1588(modes)X
+1818(in)X
+1901(which)X
+2118(one)X
+2255(might)X
+2462(use)X
+2590(LIBTP)X
+2833(in)X
+2916(a)X
+2972(server)X
+3189(based)X
+3392(architecture.)X
+3832(In)X
+3919(the)X
+4037(\256rst,)X
+4201(the)X
+555 5598(server)N
+778(would)X
+1004(provide)X
+1275(the)X
+1399(capability)X
+1741(to)X
+1829(respond)X
+2109(to)X
+2197(requests)X
+2486(to)X
+2574(each)X
+2747(of)X
+2839(the)X
+2962(low)X
+3107(level)X
+3288(modules)X
+3584(\(lock,)X
+3794(log,)X
+3941(buffer,)X
+4183(and)X
+555 5688(transaction)N
+944(managers\).)X
+1356(Unfortunately,)X
+1863(the)X
+1998(performance)X
+2442(of)X
+2546(such)X
+2730(a)X
+2803(system)X
+3062(is)X
+3152(likely)X
+3371(to)X
+3470(be)X
+3583(blindingly)X
+3947(slow)X
+4134(since)X
+
+7 p
+%%Page: 7 7
+10 s 10 xH 0 xS 1 f
+3 f
+1 f
+1 Dt
+1864 1125 MXY
+15 -26 Dl
+-15 10 Dl
+-14 -10 Dl
+14 26 Dl
+0 -266 Dl
+1315 1125 MXY
+15 -26 Dl
+-15 10 Dl
+-14 -10 Dl
+14 26 Dl
+0 -266 Dl
+3 Dt
+1133 1125 MXY
+0 798 Dl
+931 0 Dl
+0 -798 Dl
+-931 0 Dl
+1 Dt
+1266 1257 MXY
+0 133 Dl
+665 0 Dl
+0 -133 Dl
+-665 0 Dl
+3 f
+8 s
+1513 1351(driver)N
+1502 1617(LIBTP)N
+1266 1390 MXY
+0 400 Dl
+665 0 Dl
+0 -400 Dl
+-665 0 Dl
+3 Dt
+1133 726 MXY
+0 133 Dl
+931 0 Dl
+0 -133 Dl
+-931 0 Dl
+1 f
+1029 1098(txn_abort)N
+964 1015(txn_commit)N
+1018 932(txn_begin)N
+1910 1015(db_ops)N
+3 f
+1308 820(Application)N
+1645(Program)X
+1398 1218(Server)N
+1594(Process)X
+1 f
+1390 986(socket)N
+1569(interface)X
+1 Dt
+1848 967 MXY
+-23 -14 Dl
+8 14 Dl
+-8 15 Dl
+23 -15 Dl
+-50 0 Dl
+1324 MX
+23 15 Dl
+-9 -15 Dl
+9 -14 Dl
+-23 14 Dl
+50 0 Dl
+3 Dt
+2862 859 MXY
+0 1064 Dl
+932 0 Dl
+0 -1064 Dl
+-932 0 Dl
+1 Dt
+3178 1390 MXY
+24 -12 Dl
+-17 0 Dl
+-8 -15 Dl
+1 27 Dl
+150 -265 Dl
+3494 1390 MXY
+0 -27 Dl
+-8 15 Dl
+-16 1 Dl
+24 11 Dl
+-166 -265 Dl
+3 f
+3232 1617(LIBTP)N
+2995 1390 MXY
+0 400 Dl
+666 0 Dl
+0 -400 Dl
+-666 0 Dl
+992 MY
+0 133 Dl
+666 0 Dl
+0 -133 Dl
+-666 0 Dl
+3168 1086(Application)N
+1 f
+2939 1201(txn_begin)N
+2885 1284(txn_commit)N
+2950 1368(txn_abort)N
+3465 1284(db_ops)N
+3 f
+3155 766(Single)N
+3339(Process)X
+3 Dt
+-1 Ds
+811 2100(Figure)N
+1023(2:)X
+1107(Server)X
+1318(Architecture.)X
+1 f
+1727(In)X
+1811(this)X
+1934(con\256guration,)X
+811 2190(the)N
+916(library)X
+1113(is)X
+1183(loaded)X
+1380(into)X
+1507(a)X
+1562(server)X
+1744(process)X
+1962(which)X
+2145(is)X
+2214(ac-)X
+811 2280(cessed)N
+993(via)X
+1087(a)X
+1131(socket)X
+1310(interface.)X
+3 f
+2563 2100(Figure)N
+2803(3:)X
+2914(Single)X
+3140(Process)X
+3403(Architecture.)X
+1 f
+3839(In)X
+3950(this)X
+2563 2190(con\256guration,)N
+2948(the)X
+3053(library)X
+3250(routines)X
+3483(are)X
+3587(loaded)X
+3784(as)X
+3864(part)X
+3990(of)X
+2563 2280(the)N
+2657(application)X
+2957(and)X
+3065(accessed)X
+3303(via)X
+3397(a)X
+3441(subroutine)X
+3727(interface.)X
+10 s
+10 f
+555 2403(h)N
+579(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)X
+1 f
+555 2679(modifying)N
+909(a)X
+966(piece)X
+1157(of)X
+1245(data)X
+1400(would)X
+1621(require)X
+1870(three)X
+2051(or)X
+2138(possibly)X
+2424(four)X
+2578(separate)X
+2862(communications:)X
+3433(one)X
+3569(to)X
+3651(lock)X
+3809(the)X
+3927(data,)X
+4101(one)X
+4237(to)X
+555 2769(obtain)N
+781(the)X
+905(data,)X
+1085(one)X
+1227(to)X
+1315(log)X
+1443(the)X
+1567(modi\256cation,)X
+2017(and)X
+2159(possibly)X
+2451(one)X
+2593(to)X
+2681(transmit)X
+2969(the)X
+3093(modi\256ed)X
+3403(data.)X
+3583(Figure)X
+3817(four)X
+3976(shows)X
+4201(the)X
+555 2859(relative)N
+826(performance)X
+1263(for)X
+1387(retrieving)X
+1728(a)X
+1793(single)X
+2013(record)X
+2248(using)X
+2450(the)X
+2577(record)X
+2812(level)X
+2997(call)X
+3142(versus)X
+3376(using)X
+3578(the)X
+3705(lower)X
+3917(level)X
+4102(buffer)X
+555 2949(management)N
+987(and)X
+1125(locking)X
+1387(calls.)X
+1616(The)X
+1763(2:1)X
+1887(ratio)X
+2056(observed)X
+2367(in)X
+2450(the)X
+2569(single)X
+2781(process)X
+3043(case)X
+3203(re\257ects)X
+3456(the)X
+3575(additional)X
+3916(overhead)X
+4232(of)X
+555 3039(parsing)N
+819(eight)X
+1006(commands)X
+1380(rather)X
+1595(than)X
+1760(one)X
+1903(while)X
+2108(the)X
+2233(3:1)X
+2362(ratio)X
+2536(observed)X
+2853(in)X
+2942(the)X
+3067(client/server)X
+3491(architecture)X
+3898(re\257ects)X
+4157(both)X
+555 3129(the)N
+679(parsing)X
+941(and)X
+1083(the)X
+1207(communication)X
+1731(overheard.)X
+2118(Although)X
+2445(there)X
+2631(may)X
+2794(be)X
+2895(applications)X
+3307(which)X
+3528(could)X
+3731(tolerate)X
+3997(such)X
+4169(per-)X
+555 3219(formance,)N
+904(it)X
+973(seems)X
+1194(far)X
+1309(more)X
+1499(feasible)X
+1774(to)X
+1861(support)X
+2126(a)X
+2187(higher)X
+2417(level)X
+2597(interface,)X
+2923(such)X
+3094(as)X
+3185(that)X
+3329(provided)X
+3638(by)X
+3742(a)X
+3802(query)X
+4009(language)X
+555 3309(\()N
+2 f
+582(e.g.)X
+1 f
+718(SQL)X
+889([SQL86]\).)X
+755 3432(Although)N
+1081(LIBTP)X
+1327(does)X
+1498(not)X
+1624(have)X
+1800(an)X
+1900(SQL)X
+2075(parser,)X
+2316(we)X
+2433(have)X
+2608(built)X
+2777(a)X
+2836(server)X
+3056(application)X
+3435(using)X
+3631(the)X
+3752(toolkit)X
+3983(command)X
+555 3522(language)N
+882(\(TCL\))X
+1124([OUST90].)X
+1544(The)X
+1706(server)X
+1940(supports)X
+2248(a)X
+2321(command)X
+2674(line)X
+2831(interface)X
+3150(similar)X
+3409(to)X
+3508(the)X
+3643(subroutine)X
+4017(interface)X
+555 3612(de\256ned)N
+811(in)X
+3 f
+893(db)X
+1 f
+981(\(3\).)X
+1135(Since)X
+1333(it)X
+1397(is)X
+1470(based)X
+1673(on)X
+1773(TCL,)X
+1964(it)X
+2028(provides)X
+2324(control)X
+2571(structures)X
+2903(as)X
+2990(well.)X
+3 f
+555 3798(4.)N
+655(Implementation)X
+1 f
+3 f
+555 3984(4.1.)N
+715(Locking)X
+1014(and)X
+1162(Deadlock)X
+1502(Detection)X
+1 f
+755 4107(LIBTP)N
+1007(uses)X
+1175(two-phase)X
+1535(locking)X
+1805(for)X
+1929(user)X
+2093(data.)X
+2297(Strictly)X
+2562(speaking,)X
+2897(the)X
+3024(two)X
+3173(phases)X
+3416(in)X
+3507(two-phase)X
+3866(locking)X
+4135(are)X
+4263(a)X
+3 f
+555 4197(grow)N
+1 f
+756(phase,)X
+986(during)X
+1221(which)X
+1443(locks)X
+1638(are)X
+1763(acquired,)X
+2086(and)X
+2228(a)X
+3 f
+2290(shrink)X
+1 f
+2537(phase,)X
+2766(during)X
+3001(which)X
+3223(locks)X
+3418(are)X
+3543(released.)X
+3873(No)X
+3997(lock)X
+4161(may)X
+555 4287(ever)N
+720(be)X
+822(acquired)X
+1124(during)X
+1358(the)X
+1481(shrink)X
+1706(phase.)X
+1954(The)X
+2104(grow)X
+2294(phase)X
+2502(lasts)X
+2669(until)X
+2840(the)X
+2963(\256rst)X
+3112(release,)X
+3381(which)X
+3602(marks)X
+3823(the)X
+3946(start)X
+4109(of)X
+4201(the)X
+555 4377(shrink)N
+780(phase.)X
+1028(In)X
+1120(practice,)X
+1420(the)X
+1543(grow)X
+1733(phase)X
+1941(lasts)X
+2108(for)X
+2227(the)X
+2350(duration)X
+2642(of)X
+2734(a)X
+2795(transaction)X
+3172(in)X
+3259(LIBTP)X
+3506(and)X
+3647(in)X
+3734(commercial)X
+4138(data-)X
+555 4467(base)N
+721(systems.)X
+1037(The)X
+1184(shrink)X
+1406(phase)X
+1611(takes)X
+1798(place)X
+1990(during)X
+2221(transaction)X
+2595(commit)X
+2861(or)X
+2950(abort.)X
+3177(This)X
+3341(means)X
+3568(that)X
+3710(locks)X
+3901(are)X
+4022(acquired)X
+555 4557(on)N
+655(demand)X
+929(during)X
+1158(the)X
+1276(lifetime)X
+1545(of)X
+1632(a)X
+1688(transaction,)X
+2080(and)X
+2216(held)X
+2374(until)X
+2540(commit)X
+2804(time,)X
+2986(at)X
+3064(which)X
+3280(point)X
+3464(all)X
+3564(locks)X
+3753(are)X
+3872(released.)X
+755 4680(If)N
+832(multiple)X
+1121(transactions)X
+1527(are)X
+1649(active)X
+1864(concurrently,)X
+2313(deadlocks)X
+2657(can)X
+2792(occur)X
+2994(and)X
+3133(must)X
+3311(be)X
+3410(detected)X
+3701(and)X
+3840(resolved.)X
+4174(The)X
+555 4770(lock)N
+715(table)X
+893(can)X
+1027(be)X
+1125(thought)X
+1391(of)X
+1480(as)X
+1569(a)X
+1627(representation)X
+2104(of)X
+2193(a)X
+2251(directed)X
+2532(graph.)X
+2777(The)X
+2924(nodes)X
+3133(in)X
+3216(the)X
+3335(graph)X
+3539(are)X
+3659(transactions.)X
+4103(Edges)X
+555 4860(represent)N
+878(the)X
+3 f
+1004(waits-for)X
+1 f
+1340(relation)X
+1613(between)X
+1909(transactions;)X
+2342(if)X
+2419(transaction)X
+2 f
+2799(A)X
+1 f
+2876(is)X
+2957(waiting)X
+3225(for)X
+3347(a)X
+3411(lock)X
+3577(held)X
+3743(by)X
+3851(transaction)X
+2 f
+4230(B)X
+1 f
+4279(,)X
+555 4950(then)N
+716(a)X
+775(directed)X
+1057(edge)X
+1232(exists)X
+1437(from)X
+2 f
+1616(A)X
+1 f
+1687(to)X
+2 f
+1771(B)X
+1 f
+1842(in)X
+1926(the)X
+2046(graph.)X
+2291(A)X
+2371(deadlock)X
+2683(exists)X
+2887(if)X
+2958(a)X
+3016(cycle)X
+3208(appears)X
+3476(in)X
+3560(the)X
+3680(graph.)X
+3925(By)X
+4040(conven-)X
+555 5040(tion,)N
+719(no)X
+819(transaction)X
+1191(ever)X
+1350(waits)X
+1539(for)X
+1653(a)X
+1709(lock)X
+1867(it)X
+1931(already)X
+2188(holds,)X
+2401(so)X
+2492(re\257exive)X
+2793(edges)X
+2996(are)X
+3115(impossible.)X
+755 5163(A)N
+836(distinguished)X
+1285(process)X
+1549(monitors)X
+1856(the)X
+1977(lock)X
+2138(table,)X
+2337(searching)X
+2668(for)X
+2785(cycles.)X
+3048(The)X
+3195(frequency)X
+3539(with)X
+3703(which)X
+3921(this)X
+4058(process)X
+555 5253(runs)N
+716(is)X
+792(user-settable;)X
+1243(for)X
+1360(the)X
+1481(multi-user)X
+1833(tests)X
+1998(discussed)X
+2328(in)X
+2413(section)X
+2663(5.1.2,)X
+2866(it)X
+2933(has)X
+3063(been)X
+3238(set)X
+3350(to)X
+3435(wake)X
+3628(up)X
+3731(every)X
+3932(second,)X
+4197(but)X
+555 5343(more)N
+742(sophisticated)X
+1182(schedules)X
+1516(are)X
+1636(certainly)X
+1938(possible.)X
+2261(When)X
+2474(a)X
+2531(cycle)X
+2722(is)X
+2796(detected,)X
+3105(one)X
+3242(of)X
+3330(the)X
+3449(transactions)X
+3853(in)X
+3936(the)X
+4055(cycle)X
+4246(is)X
+555 5433(nominated)N
+917(and)X
+1057(aborted.)X
+1362(When)X
+1578(the)X
+1700(transaction)X
+2076(aborts,)X
+2315(it)X
+2382(rolls)X
+2547(back)X
+2722(its)X
+2820(changes)X
+3102(and)X
+3241(releases)X
+3519(its)X
+3617(locks,)X
+3829(thereby)X
+4093(break-)X
+555 5523(ing)N
+677(the)X
+795(cycle)X
+985(in)X
+1067(the)X
+1185(graph.)X
+
+8 p
+%%Page: 8 8
+10 s 10 xH 0 xS 1 f
+3 f
+1 f
+4 Ds
+1 Dt
+1866 865 MXY
+1338 0 Dl
+1866 1031 MXY
+1338 0 Dl
+1866 1199 MXY
+1338 0 Dl
+1866 1366 MXY
+1338 0 Dl
+1866 1533 MXY
+1338 0 Dl
+1866 1701 MXY
+1338 0 Dl
+-1 Ds
+5 Dt
+1866 1868 MXY
+1338 0 Dl
+1 Dt
+1 Di
+2981 MX
+ 2981 1868 lineto
+ 2981 1575 lineto
+ 3092 1575 lineto
+ 3092 1868 lineto
+ 2981 1868 lineto
+closepath 21 2981 1575 3092 1868 Dp
+2646 MX
+ 2646 1868 lineto
+ 2646 949 lineto
+ 2758 949 lineto
+ 2758 1868 lineto
+ 2646 1868 lineto
+closepath 14 2646 949 2758 1868 Dp
+2312 MX
+ 2312 1868 lineto
+ 2312 1701 lineto
+ 2423 1701 lineto
+ 2423 1868 lineto
+ 2312 1868 lineto
+closepath 3 2312 1701 2423 1868 Dp
+1977 MX
+ 1977 1868 lineto
+ 1977 1512 lineto
+ 2089 1512 lineto
+ 2089 1868 lineto
+ 1977 1868 lineto
+closepath 19 1977 1512 2089 1868 Dp
+3 f
+2640 2047(Client/Server)N
+1957(Single)X
+2185(Process)X
+7 s
+2957 1957(record)N
+2570(component)X
+2289(record)X
+1890(components)X
+1733 1724(.1)N
+1733 1556(.2)N
+1733 1389(.3)N
+1733 1222(.4)N
+1733 1055(.5)N
+1733 889(.6)N
+1590 726(Elapsed)N
+1794(Time)X
+1613 782(\(in)N
+1693(seconds\))X
+3 Dt
+-1 Ds
+8 s
+555 2255(Figure)N
+756(4:)X
+829(Comparison)X
+1187(of)X
+1260(High)X
+1416(and)X
+1540(Low)X
+1681(Level)X
+1850(Interfaces.)X
+1 f
+2174(Elapsed)X
+2395(time)X
+2528(in)X
+2597(seconds)X
+2818(to)X
+2887(perform)X
+3111(a)X
+3158(single)X
+3330(record)X
+3511(retrieval)X
+3742(from)X
+3885(a)X
+3932(command)X
+4203(line)X
+555 2345(\(rather)N
+751(than)X
+888(a)X
+943(procedural)X
+1241(interface\))X
+1510(is)X
+1579(shown)X
+1772(on)X
+1862(the)X
+1966(y)X
+2024(axis.)X
+2185(The)X
+2310(``component'')X
+2704(numbers)X
+2950(re\257ect)X
+3135(the)X
+3239(timings)X
+3458(when)X
+3622(the)X
+3726(record)X
+3914(is)X
+3983(retrieved)X
+4235(by)X
+555 2435(separate)N
+785(calls)X
+924(to)X
+996(the)X
+1096(lock)X
+1228(manager)X
+1469(and)X
+1583(buffer)X
+1760(manager)X
+2001(while)X
+2165(the)X
+2264(``record'')X
+2531(timings)X
+2745(were)X
+2889(obtained)X
+3130(by)X
+3215(using)X
+3375(a)X
+3424(single)X
+3598(call)X
+3711(to)X
+3782(the)X
+3881(record)X
+4064(manager.)X
+555 2525(The)N
+674(2:1)X
+776(ratio)X
+913(observed)X
+1163(for)X
+1257(the)X
+1355(single)X
+1528(process)X
+1739(case)X
+1868(is)X
+1930(a)X
+1977(re\257ection)X
+2237(of)X
+2309(the)X
+2406(parsing)X
+2613(overhead)X
+2865(for)X
+2958(executing)X
+3225(eight)X
+3372(separate)X
+3599(commands)X
+3895(rather)X
+4062(than)X
+4191(one.)X
+555 2615(The)N
+673(additional)X
+948(factor)X
+1115(of)X
+1187(one)X
+1298(re\257ected)X
+1536(in)X
+1605(the)X
+1702(3:1)X
+1803(ratio)X
+1939(for)X
+2031(the)X
+2127(client/server)X
+2460(architecture)X
+2794(is)X
+2855(due)X
+2965(to)X
+3033(the)X
+3129(communication)X
+3545(overhead.)X
+3828(The)X
+3945(true)X
+4062(ratios)X
+4222(are)X
+555 2705(actually)N
+775(worse)X
+945(since)X
+1094(the)X
+1190(component)X
+1492(timings)X
+1703(do)X
+1785(not)X
+1884(re\257ect)X
+2060(the)X
+2155(search)X
+2334(times)X
+2490(within)X
+2671(each)X
+2804(page)X
+2941(or)X
+3011(the)X
+3106(time)X
+3237(required)X
+3466(to)X
+3533(transmit)X
+3760(the)X
+3855(page)X
+3992(between)X
+4221(the)X
+555 2795(two)N
+667(processes.)X
+10 s
+10 f
+555 2885(h)N
+579(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)X
+3 f
+555 3161(4.2.)N
+715(Group)X
+961(Commit)X
+1 f
+755 3284(Since)N
+959(the)X
+1083(log)X
+1211(must)X
+1392(be)X
+1494(\257ushed)X
+1751(to)X
+1839(disk)X
+1997(at)X
+2080(commit)X
+2349(time,)X
+2536(disk)X
+2694(bandwidth)X
+3057(fundamentally)X
+3545(limits)X
+3751(the)X
+3874(rate)X
+4020(at)X
+4103(which)X
+555 3374(transactions)N
+959(complete.)X
+1314(Since)X
+1513(most)X
+1688(transactions)X
+2091(write)X
+2276(only)X
+2438(a)X
+2494(few)X
+2635(small)X
+2828(records)X
+3085(to)X
+3167(the)X
+3285(log,)X
+3427(the)X
+3545(last)X
+3676(page)X
+3848(of)X
+3935(the)X
+4053(log)X
+4175(will)X
+555 3464(be)N
+658(\257ushed)X
+916(once)X
+1095(by)X
+1202(every)X
+1408(transaction)X
+1787(which)X
+2010(writes)X
+2233(to)X
+2322(it.)X
+2433(In)X
+2527(the)X
+2652(naive)X
+2853(implementation,)X
+3402(these)X
+3593(\257ushes)X
+3841(would)X
+4067(happen)X
+555 3554(serially.)N
+755 3677(LIBTP)N
+1008(uses)X
+3 f
+1177(group)X
+1412(commit)X
+1 f
+1702([DEWI84])X
+2077(in)X
+2170(order)X
+2371(to)X
+2464(amortize)X
+2775(the)X
+2903(cost)X
+3062(of)X
+3159(one)X
+3305(synchronous)X
+3740(disk)X
+3903(write)X
+4098(across)X
+555 3767(multiple)N
+851(transactions.)X
+1304(Group)X
+1539(commit)X
+1812(provides)X
+2117(a)X
+2182(way)X
+2345(for)X
+2468(a)X
+2533(group)X
+2749(of)X
+2845(transactions)X
+3257(to)X
+3348(commit)X
+3621(simultaneously.)X
+4174(The)X
+555 3857(\256rst)N
+709(several)X
+967(transactions)X
+1380(to)X
+1472(commit)X
+1745(write)X
+1939(their)X
+2115(changes)X
+2403(to)X
+2494(the)X
+2621(in-memory)X
+3006(log)X
+3137(page,)X
+3338(then)X
+3505(sleep)X
+3699(on)X
+3808(a)X
+3873(distinguished)X
+555 3947(semaphore.)N
+966(Later,)X
+1179(a)X
+1238(committing)X
+1629(transaction)X
+2004(\257ushes)X
+2249(the)X
+2370(page)X
+2545(to)X
+2630(disk,)X
+2805(and)X
+2943(wakes)X
+3166(up)X
+3268(all)X
+3370(its)X
+3467(sleeping)X
+3756(peers.)X
+3988(The)X
+4135(point)X
+555 4037(at)N
+635(which)X
+853(changes)X
+1134(are)X
+1255(actually)X
+1531(written)X
+1780(is)X
+1855(determined)X
+2238(by)X
+2340(three)X
+2523(thresholds.)X
+2914(The)X
+3061(\256rst)X
+3207(is)X
+3281(the)X
+2 f
+3400(group)X
+3612(threshold)X
+1 f
+3935(and)X
+4072(de\256nes)X
+555 4127(the)N
+674(minimum)X
+1005(number)X
+1271(of)X
+1359(transactions)X
+1763(which)X
+1979(must)X
+2154(be)X
+2250(active)X
+2462(in)X
+2544(the)X
+2662(system)X
+2904(before)X
+3130(transactions)X
+3533(are)X
+3652(forced)X
+3878(to)X
+3960(participate)X
+555 4217(in)N
+646(a)X
+711(group)X
+927(commit.)X
+1240(The)X
+1394(second)X
+1646(is)X
+1728(the)X
+2 f
+1855(wait)X
+2021(threshold)X
+1 f
+2352(which)X
+2577(is)X
+2658(expressed)X
+3003(as)X
+3098(the)X
+3224(percentage)X
+3601(of)X
+3696(active)X
+3916(transactions)X
+555 4307(waiting)N
+826(to)X
+919(be)X
+1026(committed.)X
+1439(The)X
+1595(last)X
+1737(is)X
+1821(the)X
+2 f
+1950(logdelay)X
+2257(threshold)X
+1 f
+2590(which)X
+2816(indicates)X
+3131(how)X
+3299(much)X
+3507(un\257ushed)X
+3848(log)X
+3980(should)X
+4223(be)X
+555 4397(allowed)N
+829(to)X
+911(accumulate)X
+1297(before)X
+1523(a)X
+1579(waiting)X
+1839(transaction's)X
+2289(commit)X
+2553(record)X
+2779(is)X
+2852(\257ushed.)X
+755 4520(Group)N
+981(commit)X
+1246(can)X
+1379(substantially)X
+1803(improve)X
+2090(performance)X
+2517(for)X
+2631(high-concurrency)X
+3218(environments.)X
+3714(If)X
+3788(only)X
+3950(a)X
+4006(few)X
+4147(tran-)X
+555 4610(sactions)N
+836(are)X
+957(running,)X
+1248(it)X
+1314(is)X
+1389(unlikely)X
+1673(to)X
+1757(improve)X
+2046(things)X
+2263(at)X
+2343(all.)X
+2485(The)X
+2632(crossover)X
+2962(point)X
+3148(is)X
+3223(the)X
+3343(point)X
+3529(at)X
+3609(which)X
+3827(the)X
+3947(transaction)X
+555 4700(commit)N
+823(rate)X
+968(is)X
+1045(limited)X
+1295(by)X
+1399(the)X
+1521(bandwidth)X
+1883(of)X
+1974(the)X
+2096(device)X
+2330(on)X
+2434(which)X
+2654(the)X
+2776(log)X
+2902(resides.)X
+3189(If)X
+3267(processes)X
+3599(are)X
+3722(trying)X
+3937(to)X
+4023(\257ush)X
+4201(the)X
+555 4790(log)N
+677(faster)X
+876(than)X
+1034(the)X
+1152(log)X
+1274(disk)X
+1427(can)X
+1559(accept)X
+1785(data,)X
+1959(then)X
+2117(group)X
+2324(commit)X
+2588(will)X
+2732(increase)X
+3016(the)X
+3134(commit)X
+3398(rate.)X
+3 f
+555 4976(4.3.)N
+715(Kernel)X
+971(Intervention)X
+1418(for)X
+1541(Synchronization)X
+1 f
+755 5099(Since)N
+954(LIBTP)X
+1197(uses)X
+1356(data)X
+1511(in)X
+1594(shared)X
+1825(memory)X
+2113(\()X
+2 f
+2140(e.g.)X
+1 f
+2277(the)X
+2395(lock)X
+2553(table)X
+2729(and)X
+2865(buffer)X
+3082(pool\))X
+3271(it)X
+3335(must)X
+3510(be)X
+3606(possible)X
+3888(for)X
+4002(a)X
+4058(process)X
+555 5189(to)N
+640(acquire)X
+900(exclusive)X
+1226(access)X
+1454(to)X
+1538(shared)X
+1770(data)X
+1926(in)X
+2010(order)X
+2202(to)X
+2286(prevent)X
+2549(corruption.)X
+2945(In)X
+3034(addition,)X
+3338(the)X
+3458(process)X
+3721(manager)X
+4020(must)X
+4197(put)X
+555 5279(processes)N
+886(to)X
+971(sleep)X
+1159(when)X
+1356(the)X
+1477(lock)X
+1638(or)X
+1728(buffer)X
+1948(they)X
+2109(request)X
+2364(is)X
+2440(in)X
+2525(use)X
+2655(by)X
+2758(some)X
+2950(other)X
+3138(process.)X
+3441(In)X
+3530(the)X
+3650(LIBTP)X
+3894(implementa-)X
+555 5385(tion)N
+705(under)X
+914(Ultrix)X
+1131(4.0)X
+7 s
+5353(2)Y
+10 s
+5385(,)Y
+1305(we)X
+1424(use)X
+1556(System)X
+1816(V)X
+1899(semaphores)X
+2303(to)X
+2390(provide)X
+2660(this)X
+2800(synchronization.)X
+3377(Semaphores)X
+3794(implemented)X
+4237(in)X
+555 5475(this)N
+701(fashion)X
+968(turn)X
+1128(out)X
+1261(to)X
+1354(be)X
+1461(an)X
+1568(expensive)X
+1920(choice)X
+2161(for)X
+2285(synchronization,)X
+2847(because)X
+3132(each)X
+3310(access)X
+3546(traps)X
+3732(to)X
+3824(the)X
+3952(kernel)X
+4183(and)X
+8 s
+10 f
+555 5547(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)N
+5 s
+1 f
+727 5625(2)N
+8 s
+763 5650(Ultrix)N
+932(and)X
+1040(DEC)X
+1184(are)X
+1277(trademarks)X
+1576(of)X
+1645(Digital)X
+1839(Equipment)X
+2136(Corporation.)X
+
+9 p
+%%Page: 9 9
+8 s 8 xH 0 xS 1 f
+10 s
+3 f
+1 f
+555 630(executes)N
+852(atomically)X
+1210(there.)X
+755 753(On)N
+878(architectures)X
+1314(that)X
+1459(support)X
+1724(atomic)X
+1967(test-and-set,)X
+2382(a)X
+2443(much)X
+2646(better)X
+2854(choice)X
+3089(would)X
+3314(be)X
+3415(to)X
+3502(attempt)X
+3767(to)X
+3854(obtain)X
+4079(a)X
+4139(spin-)X
+555 843(lock)N
+714(with)X
+877(a)X
+934(test-and-set,)X
+1345(and)X
+1482(issue)X
+1663(a)X
+1720(system)X
+1963(call)X
+2100(only)X
+2263(if)X
+2333(the)X
+2452(spinlock)X
+2744(is)X
+2818(unavailable.)X
+3249(Since)X
+3447(virtually)X
+3738(all)X
+3838(semaphores)X
+4237(in)X
+555 933(LIBTP)N
+801(are)X
+924(uncontested)X
+1330(and)X
+1469(are)X
+1591(held)X
+1752(for)X
+1869(very)X
+2035(short)X
+2218(periods)X
+2477(of)X
+2567(time,)X
+2752(this)X
+2890(would)X
+3113(improve)X
+3403(performance.)X
+3873(For)X
+4007(example,)X
+555 1023(processes)N
+885(must)X
+1062(acquire)X
+1321(exclusive)X
+1646(access)X
+1874(to)X
+1958(buffer)X
+2177(pool)X
+2341(metadata)X
+2653(in)X
+2737(order)X
+2929(to)X
+3013(\256nd)X
+3159(and)X
+3297(pin)X
+3421(a)X
+3479(buffer)X
+3698(in)X
+3781(shared)X
+4012(memory.)X
+555 1113(This)N
+721(semaphore)X
+1093(is)X
+1170(requested)X
+1502(most)X
+1681(frequently)X
+2034(in)X
+2119(LIBTP.)X
+2404(However,)X
+2742(once)X
+2917(it)X
+2984(is)X
+3060(acquired,)X
+3380(only)X
+3545(a)X
+3604(few)X
+3748(instructions)X
+4144(must)X
+555 1203(be)N
+656(executed)X
+966(before)X
+1196(it)X
+1264(is)X
+1341(released.)X
+1669(On)X
+1791(one)X
+1931(architecture)X
+2335(for)X
+2453(which)X
+2673(we)X
+2791(were)X
+2972(able)X
+3130(to)X
+3216(gather)X
+3441(detailed)X
+3719(pro\256ling)X
+4018(informa-)X
+555 1293(tion,)N
+729(the)X
+857(cost)X
+1015(of)X
+1111(the)X
+1238(semaphore)X
+1615(calls)X
+1791(accounted)X
+2146(for)X
+2269(25%)X
+2445(of)X
+2541(the)X
+2668(total)X
+2839(time)X
+3010(spent)X
+3208(updating)X
+3517(the)X
+3644(metadata.)X
+4003(This)X
+4174(was)X
+555 1383(fairly)N
+749(consistent)X
+1089(across)X
+1310(most)X
+1485(of)X
+1572(the)X
+1690(critical)X
+1933(sections.)X
+755 1506(In)N
+848(an)X
+950(attempt)X
+1216(to)X
+1304(quantify)X
+1597(the)X
+1720(overhead)X
+2040(of)X
+2132(kernel)X
+2358(synchronization,)X
+2915(we)X
+3034(ran)X
+3162(tests)X
+3329(on)X
+3434(a)X
+3495(version)X
+3756(of)X
+3848(4.3BSD-Reno)X
+555 1596(which)N
+786(had)X
+937(been)X
+1123(modi\256ed)X
+1441(to)X
+1537(support)X
+1811(binary)X
+2050(semaphore)X
+2432(facilities)X
+2742(similar)X
+2998(to)X
+3094(those)X
+3297(described)X
+3639(in)X
+3735([POSIX91].)X
+4174(The)X
+555 1686(hardware)N
+880(platform)X
+1181(consisted)X
+1504(of)X
+1595(an)X
+1695(HP300)X
+1941(\(33MHz)X
+2237(MC68030\))X
+2612(workstation)X
+3014(with)X
+3180(16MBytes)X
+3537(of)X
+3628(main)X
+3812(memory,)X
+4123(and)X
+4263(a)X
+555 1776(600MByte)N
+920(HP7959)X
+1205(SCSI)X
+1396(disk)X
+1552(\(17)X
+1682(ms)X
+1798(average)X
+2072(seek)X
+2237(time\).)X
+2468(We)X
+2602(ran)X
+2727(three)X
+2910(sets)X
+3052(of)X
+3141(comparisons)X
+3568(which)X
+3786(are)X
+3907(summarized)X
+555 1866(in)N
+645(\256gure)X
+860(\256ve.)X
+1028(In)X
+1123(each)X
+1299(comparison)X
+1701(we)X
+1823(ran)X
+1954(two)X
+2102(tests,)X
+2292(one)X
+2436(using)X
+2637(hardware)X
+2965(spinlocks)X
+3295(and)X
+3438(the)X
+3563(other)X
+3755(using)X
+3955(kernel)X
+4183(call)X
+555 1956(synchronization.)N
+1135(Since)X
+1341(the)X
+1467(test)X
+1606(was)X
+1758(run)X
+1892(single-user,)X
+2291(none)X
+2474(of)X
+2568(the)X
+2693(the)X
+2818(locks)X
+3014(were)X
+3198(contested.)X
+3568(In)X
+3662(the)X
+3787(\256rst)X
+3938(two)X
+4085(sets)X
+4232(of)X
+555 2046(tests,)N
+743(we)X
+863(ran)X
+992(the)X
+1116(full)X
+1253(transaction)X
+1631(processing)X
+2000(benchmark)X
+2383(described)X
+2717(in)X
+2805(section)X
+3058(5.1.)X
+3223(In)X
+3315(one)X
+3456(case)X
+3620(we)X
+3739(ran)X
+3867(with)X
+4034(both)X
+4201(the)X
+555 2136(database)N
+854(and)X
+992(log)X
+1116(on)X
+1218(the)X
+1338(same)X
+1525(disk)X
+1680(\(1)X
+1769(Disk\))X
+1969(and)X
+2107(in)X
+2191(the)X
+2311(second,)X
+2576(we)X
+2692(ran)X
+2817(with)X
+2981(the)X
+3101(database)X
+3400(and)X
+3538(log)X
+3661(on)X
+3762(separate)X
+4047(disks)X
+4232(\(2)X
+555 2226(Disk\).)N
+800(In)X
+894(the)X
+1019(last)X
+1157(test,)X
+1315(we)X
+1436(wanted)X
+1695(to)X
+1784(create)X
+2004(a)X
+2067(CPU)X
+2249(bound)X
+2476(environment,)X
+2928(so)X
+3026(we)X
+3146(used)X
+3319(a)X
+3381(database)X
+3684(small)X
+3883(enough)X
+4145(to)X
+4233(\256t)X
+555 2316(completely)N
+941(in)X
+1033(the)X
+1161(cache)X
+1375(and)X
+1521(issued)X
+1751(read-only)X
+2089(transactions.)X
+2541(The)X
+2695(results)X
+2933(in)X
+3024(\256gure)X
+3240(\256ve)X
+3389(express)X
+3659(the)X
+3786(kernel)X
+4016(call)X
+4161(syn-)X
+555 2406(chronization)N
+980(performance)X
+1411(as)X
+1502(a)X
+1562(percentage)X
+1935(of)X
+2026(the)X
+2148(spinlock)X
+2443(performance.)X
+2914(For)X
+3049(example,)X
+3365(in)X
+3451(the)X
+3573(1)X
+3637(disk)X
+3794(case,)X
+3977(the)X
+4098(kernel)X
+555 2496(call)N
+697(implementation)X
+1225(achieved)X
+1537(4.4)X
+1662(TPS)X
+1824(\(transactions)X
+2259(per)X
+2387(second\))X
+2662(while)X
+2865(the)X
+2988(semaphore)X
+3361(implementation)X
+3888(achieved)X
+4199(4.6)X
+555 2586(TPS,)N
+735(and)X
+874(the)X
+995(relative)X
+1259(performance)X
+1689(of)X
+1779(the)X
+1900(kernel)X
+2123(synchronization)X
+2657(is)X
+2732(96%)X
+2901(that)X
+3043(of)X
+3132(the)X
+3252(spinlock)X
+3545(\(100)X
+3714(*)X
+3776(4.4)X
+3898(/)X
+3942(4.6\).)X
+4111(There)X
+555 2676(are)N
+674(two)X
+814(striking)X
+1078(observations)X
+1503(from)X
+1679(these)X
+1864(results:)X
+10 f
+635 2799(g)N
+1 f
+755(even)X
+927(when)X
+1121(the)X
+1239(system)X
+1481(is)X
+1554(disk)X
+1707(bound,)X
+1947(the)X
+2065(CPU)X
+2240(cost)X
+2389(of)X
+2476(synchronization)X
+3008(is)X
+3081(noticeable,)X
+3451(and)X
+10 f
+635 2922(g)N
+1 f
+755(when)X
+949(we)X
+1063(are)X
+1182(CPU)X
+1357(bound,)X
+1597(the)X
+1715(difference)X
+2062(is)X
+2135(dramatic)X
+2436(\(67%\).)X
+3 f
+555 3108(4.4.)N
+715(Transaction)X
+1148(Protected)X
+1499(Access)X
+1747(Methods)X
+1 f
+755 3231(The)N
+903(B-tree)X
+1127(and)X
+1266(\256xed)X
+1449(length)X
+1671(recno)X
+1872(\(record)X
+2127(number\))X
+2421(access)X
+2649(methods)X
+2942(have)X
+3116(been)X
+3290(modi\256ed)X
+3596(to)X
+3680(provide)X
+3947(transaction)X
+555 3321(protection.)N
+941(Whereas)X
+1244(the)X
+1363(previously)X
+1722(published)X
+2054(interface)X
+2357(to)X
+2440(the)X
+2559(access)X
+2786(routines)X
+3065(had)X
+3202(separate)X
+3487(open)X
+3664(calls)X
+3832(for)X
+3946(each)X
+4114(of)X
+4201(the)X
+10 f
+555 3507(h)N
+579(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)X
+1 Dt
+2978 5036 MXY
+ 2978 5036 lineto
+ 2978 4662 lineto
+ 3093 4662 lineto
+ 3093 5036 lineto
+ 2978 5036 lineto
+closepath 21 2978 4662 3093 5036 Dp
+2518 MX
+ 2518 5036 lineto
+ 2518 3960 lineto
+ 2633 3960 lineto
+ 2633 5036 lineto
+ 2518 5036 lineto
+closepath 3 2518 3960 2633 5036 Dp
+2059 MX
+ 2059 5036 lineto
+ 2059 3946 lineto
+ 2174 3946 lineto
+ 2174 5036 lineto
+ 2059 5036 lineto
+closepath 1 2059 3946 2174 5036 Dp
+3 f
+7 s
+2912 5141(Read-only)N
+1426 3767(of)N
+1487(Spinlock)X
+1710(Throughput)X
+1480 3710(Throughput)N
+1786(as)X
+1850(a)X
+1892(%)X
+11 s
+1670 4843(20)N
+1670 4614(40)N
+1670 4384(60)N
+1670 4155(80)N
+1648 3925(100)N
+7 s
+2041 5141(1)N
+2083(Disk)X
+2490(2)X
+2532(Disks)X
+5 Dt
+1829 5036 MXY
+1494 0 Dl
+4 Ds
+1 Dt
+1829 4806 MXY
+1494 0 Dl
+1829 4577 MXY
+1494 0 Dl
+1829 4347 MXY
+1494 0 Dl
+1829 4118 MXY
+1494 0 Dl
+1829 3888 MXY
+1494 0 Dl
+3 Dt
+-1 Ds
+8 s
+555 5360(Figure)N
+753(5:)X
+823(Kernel)X
+1028(Overhead)X
+1315(for)X
+1413(System)X
+1625(Call)X
+1756(Synchronization.)X
+1 f
+2254(The)X
+2370(performance)X
+2708(of)X
+2778(the)X
+2873(kernel)X
+3049(call)X
+3158(synchronization)X
+3583(is)X
+3643(expressed)X
+3911(as)X
+3980(a)X
+4024(percentage)X
+555 5450(of)N
+625(the)X
+720(spinlock)X
+954(synchronization)X
+1379(performance.)X
+1749(In)X
+1819(disk)X
+1943(bound)X
+2120(cases)X
+2271(\(1)X
+2341(Disk)X
+2479(and)X
+2588(2)X
+2637(Disks\),)X
+2837(we)X
+2928(see)X
+3026(that)X
+3139(4-6%)X
+3294(of)X
+3364(the)X
+3459(performance)X
+3797(is)X
+3857(lost)X
+3966(due)X
+4074(to)X
+4140(kernel)X
+555 5540(calls)N
+688(while)X
+846(in)X
+912(the)X
+1006(CPU)X
+1147(bound)X
+1323(case,)X
+1464(we)X
+1554(have)X
+1690(lost)X
+1799(67%)X
+1932(of)X
+2001(the)X
+2095(performance)X
+2432(due)X
+2540(to)X
+2606(kernel)X
+2781(calls.)X
+
+10 p
+%%Page: 10 10
+8 s 8 xH 0 xS 1 f
+10 s
+3 f
+1 f
+555 630(access)N
+781(methods,)X
+1092(we)X
+1206(now)X
+1364(have)X
+1536(an)X
+1632(integrated)X
+1973(open)X
+2149(call)X
+2285(with)X
+2447(the)X
+2565(following)X
+2896(calling)X
+3134(conventions:)X
+7 f
+715 753(DB)N
+859(*dbopen)X
+1243(\(const)X
+1579(char)X
+1819(*file,)X
+2155(int)X
+2347(flags,)X
+2683(int)X
+2875(mode,)X
+3163(DBTYPE)X
+3499(type,)X
+1291 843(int)N
+1483(dbflags,)X
+1915(const)X
+2203(void)X
+2443(*openinfo\))X
+1 f
+555 966(where)N
+2 f
+774(\256le)X
+1 f
+894(is)X
+969(the)X
+1089(name)X
+1285(of)X
+1374(the)X
+1494(\256le)X
+1618(being)X
+1818(opened,)X
+2 f
+2092(\257ags)X
+1 f
+2265(and)X
+2 f
+2402(mode)X
+1 f
+2597(are)X
+2717(the)X
+2836(standard)X
+3129(arguments)X
+3484(to)X
+3 f
+3567(open)X
+1 f
+3731(\(2\),)X
+2 f
+3866(type)X
+1 f
+4021(is)X
+4095(one)X
+4232(of)X
+555 1056(the)N
+680(access)X
+913(method)X
+1180(types,)X
+2 f
+1396(db\257ags)X
+1 f
+1654(indicates)X
+1966(the)X
+2091(mode)X
+2296(of)X
+2390(the)X
+2515(buffer)X
+2739(pool)X
+2907(and)X
+3049(transaction)X
+3427(protection,)X
+3798(and)X
+2 f
+3940(openinfo)X
+1 f
+4246(is)X
+555 1146(the)N
+681(access)X
+915(method)X
+1183(speci\256c)X
+1456(information.)X
+1902(Currently,)X
+2257(the)X
+2383(possible)X
+2673(values)X
+2906(for)X
+2 f
+3028(db\257ags)X
+1 f
+3287(are)X
+3414(DB_SHARED)X
+3912(and)X
+4055(DB_TP)X
+555 1236(indicating)N
+895(that)X
+1035(buffers)X
+1283(should)X
+1516(be)X
+1612(kept)X
+1770(in)X
+1852(a)X
+1908(shared)X
+2138(buffer)X
+2355(pool)X
+2517(and)X
+2653(that)X
+2793(the)X
+2911(\256le)X
+3033(should)X
+3266(be)X
+3362(transaction)X
+3734(protected.)X
+755 1359(The)N
+900(modi\256cations)X
+1355(required)X
+1643(to)X
+1725(add)X
+1861(transaction)X
+2233(protection)X
+2578(to)X
+2660(an)X
+2756(access)X
+2982(method)X
+3242(are)X
+3361(quite)X
+3541(simple)X
+3774(and)X
+3910(localized.)X
+715 1482(1.)N
+795(Replace)X
+1074(\256le)X
+2 f
+1196(open)X
+1 f
+1372(with)X
+2 f
+1534(buf_open)X
+1 f
+1832(.)X
+715 1572(2.)N
+795(Replace)X
+1074(\256le)X
+2 f
+1196(read)X
+1 f
+1363(and)X
+2 f
+1499(write)X
+1 f
+1683(calls)X
+1850(with)X
+2012(buffer)X
+2229(manager)X
+2526(calls)X
+2693(\()X
+2 f
+2720(buf_get)X
+1 f
+(,)S
+2 f
+3000(buf_unpin)X
+1 f
+3324(\).)X
+715 1662(3.)N
+795(Precede)X
+1070(buffer)X
+1287(manager)X
+1584(calls)X
+1751(with)X
+1913(an)X
+2009(appropriate)X
+2395(\(read)X
+2581(or)X
+2668(write\))X
+2880(lock)X
+3038(call.)X
+715 1752(4.)N
+795(Before)X
+1034(updates,)X
+1319(issue)X
+1499(a)X
+1555(logging)X
+1819(operation.)X
+715 1842(5.)N
+795(After)X
+985(data)X
+1139(have)X
+1311(been)X
+1483(accessed,)X
+1805(release)X
+2049(the)X
+2167(buffer)X
+2384(manager)X
+2681(pin.)X
+715 1932(6.)N
+795(Provide)X
+1064(undo/redo)X
+1409(code)X
+1581(for)X
+1695(each)X
+1863(type)X
+2021(of)X
+2108(log)X
+2230(record)X
+2456(de\256ned.)X
+555 2071(The)N
+702(following)X
+1035(code)X
+1209(fragments)X
+1552(show)X
+1743(how)X
+1903(to)X
+1987(transaction)X
+2361(protect)X
+2606(several)X
+2856(updates)X
+3123(to)X
+3206(a)X
+3263(B-tree.)X
+7 s
+3484 2039(3)N
+10 s
+3533 2071(In)N
+3621(the)X
+3740(unprotected)X
+4140(case,)X
+555 2161(an)N
+652(open)X
+829(call)X
+966(is)X
+1040(followed)X
+1346(by)X
+1447(a)X
+1504(read)X
+1664(call)X
+1801(to)X
+1884(obtain)X
+2105(the)X
+2224(meta-data)X
+2562(for)X
+2677(the)X
+2796(B-tree.)X
+3058(Instead,)X
+3331(we)X
+3446(issue)X
+3627(an)X
+3724(open)X
+3901(to)X
+3984(the)X
+4102(buffer)X
+555 2251(manager)N
+852(to)X
+934(obtain)X
+1154(a)X
+1210(\256le)X
+1332(id)X
+1414(and)X
+1550(a)X
+1606(buffer)X
+1823(request)X
+2075(to)X
+2157(obtain)X
+2377(the)X
+2495(meta-data)X
+2832(as)X
+2919(shown)X
+3148(below.)X
+7 f
+715 2374(char)N
+955(*path;)X
+715 2464(int)N
+907(fid,)X
+1147(flags,)X
+1483(len,)X
+1723(mode;)X
+715 2644(/*)N
+859(Obtain)X
+1195(a)X
+1291(file)X
+1531(id)X
+1675(with)X
+1915(which)X
+2203(to)X
+2347(access)X
+2683(the)X
+2875(buffer)X
+3211(pool)X
+3451(*/)X
+715 2734(fid)N
+907(=)X
+1003(buf_open\(path,)X
+1723(flags,)X
+2059(mode\);)X
+715 2914(/*)N
+859(Read)X
+1099(the)X
+1291(meta)X
+1531(data)X
+1771(\(page)X
+2059(0\))X
+2203(for)X
+2395(the)X
+2587(B-tree)X
+2923(*/)X
+715 3004(if)N
+859(\(tp_lock\(fid,)X
+1531(0,)X
+1675(READ_LOCK\)\))X
+1003 3094(return)N
+1339(error;)X
+715 3184(meta_data_ptr)N
+1387(=)X
+1483(buf_get\(fid,)X
+2107(0,)X
+2251(BF_PIN,)X
+2635(&len\);)X
+1 f
+555 3307(The)N
+714(BF_PIN)X
+1014(argument)X
+1350(to)X
+2 f
+1445(buf_get)X
+1 f
+1718(indicates)X
+2036(that)X
+2189(we)X
+2316(wish)X
+2500(to)X
+2595(leave)X
+2798(this)X
+2946(page)X
+3131(pinned)X
+3382(in)X
+3477(memory)X
+3777(so)X
+3881(that)X
+4034(it)X
+4111(is)X
+4197(not)X
+555 3397(swapped)N
+862(out)X
+990(while)X
+1194(we)X
+1314(are)X
+1439(accessing)X
+1772(it.)X
+1881(The)X
+2031(last)X
+2167(argument)X
+2495(to)X
+2 f
+2582(buf_get)X
+1 f
+2847(returns)X
+3095(the)X
+3218(number)X
+3488(of)X
+3580(bytes)X
+3774(on)X
+3879(the)X
+4002(page)X
+4179(that)X
+555 3487(were)N
+732(valid)X
+912(so)X
+1003(that)X
+1143(the)X
+1261(access)X
+1487(method)X
+1747(may)X
+1905(initialize)X
+2205(the)X
+2323(page)X
+2495(if)X
+2564(necessary.)X
+755 3610(Next,)N
+955(consider)X
+1251(inserting)X
+1555(a)X
+1615(record)X
+1845(on)X
+1949(a)X
+2009(particular)X
+2341(page)X
+2517(of)X
+2608(a)X
+2668(B-tree.)X
+2932(In)X
+3022(the)X
+3143(unprotected)X
+3545(case,)X
+3727(we)X
+3844(read)X
+4006(the)X
+4127(page,)X
+555 3700(call)N
+2 f
+693(_bt_insertat)X
+1 f
+1079(,)X
+1121(and)X
+1258(write)X
+1444(the)X
+1563(page.)X
+1776(Instead,)X
+2049(we)X
+2164(lock)X
+2323(the)X
+2442(page,)X
+2635(request)X
+2888(the)X
+3007(buffer,)X
+3245(log)X
+3368(the)X
+3487(change,)X
+3756(modify)X
+4008(the)X
+4127(page,)X
+555 3790(and)N
+691(release)X
+935(the)X
+1053(buffer.)X
+7 f
+715 3913(int)N
+907(fid,)X
+1147(len,)X
+1387(pageno;)X
+1867(/*)X
+2011(Identifies)X
+2539(the)X
+2731(buffer)X
+3067(*/)X
+715 4003(int)N
+907(index;)X
+1867(/*)X
+2011(Location)X
+2443(at)X
+2587(which)X
+2875(to)X
+3019(insert)X
+3355(the)X
+3547(new)X
+3739(pair)X
+3979(*/)X
+715 4093(DBT)N
+907(*keyp,)X
+1243(*datap;)X
+1867(/*)X
+2011(Key/Data)X
+2443(pair)X
+2683(to)X
+2827(be)X
+2971(inserted)X
+3403(*/)X
+715 4183(DATUM)N
+1003(*d;)X
+1867(/*)X
+2011(Key/data)X
+2443(structure)X
+2923(to)X
+3067(insert)X
+3403(*/)X
+715 4363(/*)N
+859(Lock)X
+1099(and)X
+1291(request)X
+1675(the)X
+1867(buffer)X
+2203(*/)X
+715 4453(if)N
+859(\(tp_lock\(fid,)X
+1531(pageno,)X
+1915(WRITE_LOCK\)\))X
+1003 4543(return)N
+1339(error;)X
+715 4633(buffer_ptr)N
+1243(=)X
+1339(buf_get\(fid,)X
+1963(pageno,)X
+2347(BF_PIN,)X
+2731(&len\);)X
+715 4813(/*)N
+859(Log)X
+1051(and)X
+1243(perform)X
+1627(the)X
+1819(update)X
+2155(*/)X
+715 4903(log_insdel\(BTREE_INSERT,)N
+1915(fid,)X
+2155(pageno,)X
+2539(keyp,)X
+2827(datap\);)X
+715 4993(_bt_insertat\(buffer_ptr,)N
+1915(d,)X
+2059(index\);)X
+715 5083(buf_unpin\(buffer_ptr\);)N
+1 f
+555 5206(Succinctly,)N
+942(the)X
+1068(algorithm)X
+1407(for)X
+1529(turning)X
+1788(unprotected)X
+2195(code)X
+2375(into)X
+2527(protected)X
+2854(code)X
+3034(is)X
+3115(to)X
+3205(replace)X
+3466(read)X
+3633(operations)X
+3995(with)X
+2 f
+4165(lock)X
+1 f
+555 5296(and)N
+2 f
+691(buf_get)X
+1 f
+951(operations)X
+1305(and)X
+1441(write)X
+1626(operations)X
+1980(with)X
+2 f
+2142(log)X
+1 f
+2264(and)X
+2 f
+2400(buf_unpin)X
+1 f
+2744(operations.)X
+8 s
+10 f
+555 5458(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)N
+5 s
+1 f
+727 5536(3)N
+8 s
+766 5561(The)N
+884(following)X
+1152(code)X
+1291(fragments)X
+1565(are)X
+1661(examples,)X
+1937(but)X
+2038(do)X
+2120(not)X
+2220(de\256ne)X
+2394(the)X
+2490(\256nal)X
+2622(interface.)X
+2894(The)X
+3011(\256nal)X
+3143(interface)X
+3383(will)X
+3501(be)X
+3579(determined)X
+3884(after)X
+4018(LIBTP)X
+4214(has)X
+555 5633(been)N
+691(fully)X
+828(integrated)X
+1099(with)X
+1229(the)X
+1323(most)X
+1464(recent)X
+3 f
+1635(db)X
+1 f
+1707(\(3\))X
+1797(release)X
+1989(from)X
+2129(the)X
+2223(Computer)X
+2495(Systems)X
+2725(Research)X
+2974(Group)X
+3153(at)X
+3215(University)X
+3501(of)X
+3570(California,)X
+3861(Berkeley.)X
+
+11 p
+%%Page: 11 11
+8 s 8 xH 0 xS 1 f
+10 s
+3 f
+555 630(5.)N
+655(Performance)X
+1 f
+755 753(In)N
+845(this)X
+983(section,)X
+1253(we)X
+1370(present)X
+1625(the)X
+1746(results)X
+1978(of)X
+2067(two)X
+2209(very)X
+2374(different)X
+2673(benchmarks.)X
+3103(The)X
+3250(\256rst)X
+3396(is)X
+3471(an)X
+3569(online)X
+3791(transaction)X
+4165(pro-)X
+555 843(cessing)N
+824(benchmark,)X
+1234(similar)X
+1489(to)X
+1584(the)X
+1715(standard)X
+2020(TPCB,)X
+2272(but)X
+2407(has)X
+2547(been)X
+2732(adapted)X
+3015(to)X
+3110(run)X
+3250(in)X
+3345(a)X
+3414(desktop)X
+3696(environment.)X
+4174(The)X
+555 933(second)N
+798(emulates)X
+1103(a)X
+1159(computer-aided)X
+1683(design)X
+1912(environment)X
+2337(and)X
+2473(provides)X
+2769(more)X
+2954(complex)X
+3250(query)X
+3453(processing.)X
+3 f
+555 1119(5.1.)N
+715(Transaction)X
+1148(Processing)X
+1533(Benchmark)X
+1 f
+755 1242(For)N
+887(this)X
+1023(section,)X
+1291(all)X
+1392(performance)X
+1820(numbers)X
+2117(shown)X
+2346(except)X
+2576(for)X
+2690(the)X
+2808(commercial)X
+3207(database)X
+3504(system)X
+3746(were)X
+3923(obtained)X
+4219(on)X
+555 1332(a)N
+614(DECstation)X
+1009(5000/200)X
+1333(with)X
+1497(32MBytes)X
+1852(of)X
+1941(memory)X
+2230(running)X
+2501(Ultrix)X
+2714(V4.0,)X
+2914(accessing)X
+3244(a)X
+3302(DEC)X
+3484(RZ57)X
+3688(1GByte)X
+3959(disk)X
+4114(drive.)X
+555 1422(The)N
+720(commercial)X
+1139(relational)X
+1482(database)X
+1799(system)X
+2061(tests)X
+2242(were)X
+2438(run)X
+2584(on)X
+2703(a)X
+2778(comparable)X
+3192(machine,)X
+3523(a)X
+3598(Sparcstation)X
+4033(1+)X
+4157(with)X
+555 1512(32MBytes)N
+915(memory)X
+1209(and)X
+1352(a)X
+1415(1GByte)X
+1691(external)X
+1976(disk)X
+2135(drive.)X
+2366(The)X
+2517(database,)X
+2840(binaries)X
+3120(and)X
+3262(log)X
+3390(resided)X
+3648(on)X
+3754(the)X
+3878(same)X
+4069(device.)X
+555 1602(Reported)N
+869(times)X
+1062(are)X
+1181(the)X
+1299(means)X
+1524(of)X
+1611(\256ve)X
+1751(tests)X
+1913(and)X
+2049(have)X
+2221(standard)X
+2513(deviations)X
+2862(within)X
+3086(two)X
+3226(percent)X
+3483(of)X
+3570(the)X
+3688(mean.)X
+755 1725(The)N
+905(test)X
+1041(database)X
+1343(was)X
+1493(con\256gured)X
+1861(according)X
+2203(to)X
+2290(the)X
+2413(TPCB)X
+2637(scaling)X
+2889(rules)X
+3070(for)X
+3189(a)X
+3250(10)X
+3355(transaction)X
+3732(per)X
+3860(second)X
+4108(\(TPS\))X
+555 1815(system)N
+817(with)X
+999(1,000,000)X
+1359(account)X
+1649(records,)X
+1946(100)X
+2106(teller)X
+2311(records,)X
+2607(and)X
+2762(10)X
+2881(branch)X
+3139(records.)X
+3455(Where)X
+3709(TPS)X
+3885(numbers)X
+4200(are)X
+555 1905(reported,)N
+865(we)X
+981(are)X
+1102(running)X
+1373(a)X
+1431(modi\256ed)X
+1737(version)X
+1995(of)X
+2084(the)X
+2203(industry)X
+2486(standard)X
+2779(transaction)X
+3152(processing)X
+3516(benchmark,)X
+3914(TPCB.)X
+4174(The)X
+555 1995(TPCB)N
+780(benchmark)X
+1163(simulates)X
+1491(a)X
+1553(withdrawal)X
+1940(performed)X
+2301(by)X
+2407(a)X
+2469(hypothetical)X
+2891(teller)X
+3082(at)X
+3166(a)X
+3228(hypothetical)X
+3650(bank.)X
+3872(The)X
+4022(database)X
+555 2085(consists)N
+831(of)X
+921(relations)X
+1220(\(\256les\))X
+1430(for)X
+1547(accounts,)X
+1871(branches,)X
+2200(tellers,)X
+2439(and)X
+2578(history.)X
+2863(For)X
+2997(each)X
+3168(transaction,)X
+3563(the)X
+3684(account,)X
+3976(teller,)X
+4183(and)X
+555 2175(branch)N
+795(balances)X
+1093(must)X
+1269(be)X
+1366(updated)X
+1641(to)X
+1724(re\257ect)X
+1946(the)X
+2065(withdrawal)X
+2447(and)X
+2584(a)X
+2640(history)X
+2882(record)X
+3108(is)X
+3181(written)X
+3428(which)X
+3644(contains)X
+3931(the)X
+4049(account)X
+555 2265(id,)N
+657(branch)X
+896(id,)X
+998(teller)X
+1183(id,)X
+1285(and)X
+1421(the)X
+1539(amount)X
+1799(of)X
+1886(the)X
+2004(withdrawal)X
+2385([TPCB90].)X
+755 2388(Our)N
+914(implementation)X
+1450(of)X
+1551(the)X
+1683(benchmark)X
+2074(differs)X
+2317(from)X
+2506(the)X
+2637(speci\256cation)X
+3075(in)X
+3170(several)X
+3431(aspects.)X
+3736(The)X
+3894(speci\256cation)X
+555 2478(requires)N
+840(that)X
+985(the)X
+1108(database)X
+1410(keep)X
+1587(redundant)X
+1933(logs)X
+2091(on)X
+2196(different)X
+2498(devices,)X
+2784(but)X
+2911(we)X
+3030(use)X
+3162(a)X
+3223(single)X
+3439(log.)X
+3606(Furthermore,)X
+4052(all)X
+4157(tests)X
+555 2568(were)N
+734(run)X
+863(on)X
+965(a)X
+1023(single,)X
+1256(centralized)X
+1631(system)X
+1875(so)X
+1968(there)X
+2151(is)X
+2226(no)X
+2328(notion)X
+2553(of)X
+2641(remote)X
+2885(accesses.)X
+3219(Finally,)X
+3486(we)X
+3601(calculated)X
+3948(throughput)X
+555 2658(by)N
+662(dividing)X
+955(the)X
+1080(total)X
+1249(elapsed)X
+1517(time)X
+1686(by)X
+1793(the)X
+1918(number)X
+2190(of)X
+2284(transactions)X
+2694(processed)X
+3038(rather)X
+3253(than)X
+3418(by)X
+3525(computing)X
+3894(the)X
+4018(response)X
+555 2748(time)N
+717(for)X
+831(each)X
+999(transaction.)X
+755 2871(The)N
+912(performance)X
+1351(comparisons)X
+1788(focus)X
+1993(on)X
+2104(traditional)X
+2464(Unix)X
+2655(techniques)X
+3029(\(unprotected,)X
+3486(using)X
+3 f
+3690(\257ock)X
+1 f
+3854(\(2\))X
+3979(and)X
+4126(using)X
+3 f
+555 2961(fsync)N
+1 f
+733(\(2\)\))X
+884(and)X
+1030(a)X
+1096(commercial)X
+1504(relational)X
+1836(database)X
+2142(system.)X
+2433(Well-behaved)X
+2913(applications)X
+3329(using)X
+3 f
+3531(\257ock)X
+1 f
+3695(\(2\))X
+3818(are)X
+3946(guaranteed)X
+555 3051(that)N
+704(concurrent)X
+1077(processes')X
+1441(updates)X
+1715(do)X
+1824(not)X
+1955(interact)X
+2225(with)X
+2396(one)X
+2541(another,)X
+2831(but)X
+2962(no)X
+3070(guarantees)X
+3442(about)X
+3648(atomicity)X
+3978(are)X
+4105(made.)X
+555 3141(That)N
+731(is,)X
+833(if)X
+911(the)X
+1038(system)X
+1289(crashes)X
+1555(in)X
+1646(mid-transaction,)X
+2198(only)X
+2369(parts)X
+2554(of)X
+2649(that)X
+2797(transaction)X
+3177(will)X
+3329(be)X
+3433(re\257ected)X
+3738(in)X
+3828(the)X
+3954 0.3125(after-crash)AX
+555 3231(state)N
+725(of)X
+815(the)X
+936(database.)X
+1276(The)X
+1424(use)X
+1554(of)X
+3 f
+1643(fsync)X
+1 f
+1821(\(2\))X
+1937(at)X
+2017(transaction)X
+2391(commit)X
+2657(time)X
+2821(provides)X
+3119(guarantees)X
+3485(of)X
+3574(durability)X
+3907(after)X
+4077(system)X
+555 3321(failure.)N
+825(However,)X
+1160(there)X
+1341(is)X
+1414(no)X
+1514(mechanism)X
+1899(to)X
+1981(perform)X
+2260(transaction)X
+2632(abort.)X
+3 f
+555 3507(5.1.1.)N
+775(Single-User)X
+1191(Tests)X
+1 f
+755 3630(These)N
+978(tests)X
+1151(compare)X
+1459(LIBTP)X
+1712(in)X
+1804(a)X
+1870(variety)X
+2123(of)X
+2220(con\256gurations)X
+2708(to)X
+2800(traditional)X
+3159(UNIX)X
+3390(solutions)X
+3708(and)X
+3854(a)X
+3920(commercial)X
+555 3720(relational)N
+884(database)X
+1187(system)X
+1435(\(RDBMS\).)X
+1814(To)X
+1929(demonstrate)X
+2347(the)X
+2471(server)X
+2694(architecture)X
+3100(we)X
+3220(built)X
+3392(a)X
+3454(front)X
+3636(end)X
+3777(test)X
+3913(process)X
+4179(that)X
+555 3810(uses)N
+732(TCL)X
+922([OUST90])X
+1304(to)X
+1405(parse)X
+1614(database)X
+1930(access)X
+2175(commands)X
+2561(and)X
+2716(call)X
+2870(the)X
+3006(database)X
+3321(access)X
+3565(routines.)X
+3901(In)X
+4006(one)X
+4160(case)X
+555 3900(\(SERVER\),)N
+956(frontend)X
+1249(and)X
+1386(backend)X
+1675(processes)X
+2004(were)X
+2181(created)X
+2434(which)X
+2650(communicated)X
+3142(via)X
+3260(an)X
+3356(IP)X
+3447(socket.)X
+3712(In)X
+3799(the)X
+3917(second)X
+4160(case)X
+555 3990(\(TCL\),)N
+802(a)X
+860(single)X
+1073(process)X
+1336(read)X
+1497(queries)X
+1751(from)X
+1929(standard)X
+2223(input,)X
+2429(parsed)X
+2660(them,)X
+2861(and)X
+2998(called)X
+3211(the)X
+3330(database)X
+3628(access)X
+3855(routines.)X
+4174(The)X
+555 4080(performance)N
+987(difference)X
+1338(between)X
+1630(the)X
+1752(TCL)X
+1927(and)X
+2067(SERVER)X
+2397(tests)X
+2563(quanti\256es)X
+2898(the)X
+3020(communication)X
+3542(overhead)X
+3861(of)X
+3952(the)X
+4074(socket.)X
+555 4170(The)N
+732(RDBMS)X
+1063(implementation)X
+1617(used)X
+1816(embedded)X
+2198(SQL)X
+2401(in)X
+2515(C)X
+2620(with)X
+2814(stored)X
+3062(database)X
+3391(procedures.)X
+3835(Therefore,)X
+4224(its)X
+555 4260(con\256guration)N
+1003(is)X
+1076(a)X
+1132(hybrid)X
+1361(of)X
+1448(the)X
+1566(single)X
+1777(process)X
+2038(architecture)X
+2438(and)X
+2574(the)X
+2692(server)X
+2909(architecture.)X
+3349(The)X
+3494(graph)X
+3697(in)X
+3779(\256gure)X
+3986(six)X
+4099(shows)X
+555 4350(a)N
+611(comparison)X
+1005(of)X
+1092(the)X
+1210(following)X
+1541(six)X
+1654(con\256gurations:)X
+1126 4506(LIBTP)N
+1552(Uses)X
+1728(the)X
+1846(LIBTP)X
+2088(library)X
+2322(in)X
+2404(a)X
+2460(single)X
+2671(application.)X
+1126 4596(TCL)N
+1552(Uses)X
+1728(the)X
+1846(LIBTP)X
+2088(library)X
+2322(in)X
+2404(a)X
+2460(single)X
+2671(application,)X
+3067(requires)X
+3346(query)X
+3549(parsing.)X
+1126 4686(SERVER)N
+1552(Uses)X
+1728(the)X
+1846(LIBTP)X
+2088(library)X
+2322(in)X
+2404(a)X
+2460(server)X
+2677(con\256guration,)X
+3144(requires)X
+3423(query)X
+3626(parsing.)X
+1126 4776(NOTP)N
+1552(Uses)X
+1728(no)X
+1828(locking,)X
+2108(logging,)X
+2392(or)X
+2479(concurrency)X
+2897(control.)X
+1126 4866(FLOCK)N
+1552(Uses)X
+3 f
+1728(\257ock)X
+1 f
+1892(\(2\))X
+2006(for)X
+2120(concurrency)X
+2538(control)X
+2785(and)X
+2921(nothing)X
+3185(for)X
+3299(durability.)X
+1126 4956(FSYNC)N
+1552(Uses)X
+3 f
+1728(fsync)X
+1 f
+1906(\(2\))X
+2020(for)X
+2134(durability)X
+2465(and)X
+2601(nothing)X
+2865(for)X
+2979(concurrency)X
+3397(control.)X
+1126 5046(RDBMS)N
+1552(Uses)X
+1728(a)X
+1784(commercial)X
+2183(relational)X
+2506(database)X
+2803(system.)X
+755 5235(The)N
+902(results)X
+1133(show)X
+1324(that)X
+1466(LIBTP,)X
+1730(both)X
+1894(in)X
+1978(the)X
+2098(procedural)X
+2464(and)X
+2602(parsed)X
+2834(environments,)X
+3312(is)X
+3387(competitive)X
+3787(with)X
+3951(a)X
+4009(commer-)X
+555 5325(cial)N
+692(system)X
+935(\(comparing)X
+1326(LIBTP,)X
+1589(TCL,)X
+1781(and)X
+1917(RDBMS\).)X
+2263(Compared)X
+2617(to)X
+2699(existing)X
+2972(UNIX)X
+3193(solutions,)X
+3521(LIBTP)X
+3763(is)X
+3836(approximately)X
+555 5415(15%)N
+738(slower)X
+988(than)X
+1162(using)X
+3 f
+1371(\257ock)X
+1 f
+1535(\(2\))X
+1665(or)X
+1768(no)X
+1884(protection)X
+2245(but)X
+2383(over)X
+2562(80%)X
+2745(better)X
+2964(than)X
+3137(using)X
+3 f
+3345(fsync)X
+1 f
+3523(\(2\))X
+3652(\(comparing)X
+4057(LIBTP,)X
+555 5505(FLOCK,)N
+857(NOTP,)X
+1106(and)X
+1242(FSYNC\).)X
+
+12 p
+%%Page: 12 12
+10 s 10 xH 0 xS 1 f
+3 f
+8 s
+3500 2184(RDBMS)N
+1 Dt
+3553 2085 MXY
+ 3553 2085 lineto
+ 3676 2085 lineto
+ 3676 1351 lineto
+ 3553 1351 lineto
+ 3553 2085 lineto
+closepath 16 3553 1351 3676 2085 Dp
+2018 2184(SERVER)N
+1720 1168 MXY
+0 917 Dl
+122 0 Dl
+0 -917 Dl
+-122 0 Dl
+1715 2184(TCL)N
+2087 1534 MXY
+ 2087 1534 lineto
+ 2209 1534 lineto
+ 2209 2085 lineto
+ 2087 2085 lineto
+ 2087 1534 lineto
+closepath 12 2087 1534 2209 2085 Dp
+3187 MX
+ 3187 1534 lineto
+ 3309 1534 lineto
+ 3309 2085 lineto
+ 3187 2085 lineto
+ 3187 1534 lineto
+closepath 19 3187 1534 3309 2085 Dp
+3142 2184(FSYNC)N
+2425(NOTP)X
+2453 955 MXY
+ 2453 955 lineto
+ 2576 955 lineto
+ 2576 2085 lineto
+ 2453 2085 lineto
+ 2453 955 lineto
+closepath 21 2453 955 2576 2085 Dp
+2820 1000 MXY
+ 2820 1000 lineto
+ 2942 1000 lineto
+ 2942 2085 lineto
+ 2820 2085 lineto
+ 2820 1000 lineto
+closepath 14 2820 1000 2942 2085 Dp
+5 Dt
+1231 2085 MXY
+2567 0 Dl
+4 Ds
+1 Dt
+1231 1840 MXY
+2567 0 Dl
+1231 1596 MXY
+2567 0 Dl
+1231 1351 MXY
+2567 0 Dl
+1231 1108 MXY
+2567 0 Dl
+1231 863 MXY
+2567 0 Dl
+11 s
+1087 1877(2)N
+1087 1633(4)N
+1087 1388(6)N
+1087 1145(8)N
+1065 900(10)N
+1028 763(TPS)N
+-1 Ds
+1353 2085 MXY
+ 1353 2085 lineto
+ 1353 1151 lineto
+ 1476 1151 lineto
+ 1476 2085 lineto
+ 1353 2085 lineto
+closepath 3 1353 1151 1476 2085 Dp
+8 s
+1318 2184(LIBTP)N
+2767(FLOCK)X
+3 Dt
+-1 Ds
+10 s
+1597 2399(Figure)N
+1844(6:)X
+1931(Single-User)X
+2347(Performance)X
+2814(Comparison.)X
+1 f
+10 f
+555 2579(h)N
+579(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)X
+3 f
+555 2855(5.1.2.)N
+775(Multi-User)X
+1174(Tests)X
+1 f
+755 2978(While)N
+975(the)X
+1097(single-user)X
+1473(tests)X
+1639(form)X
+1819(a)X
+1878(basis)X
+2061(for)X
+2178(comparing)X
+2544(LIBTP)X
+2789(to)X
+2874(other)X
+3062(systems,)X
+3358(our)X
+3488(goal)X
+3649(in)X
+3734(multi-user)X
+4086(testing)X
+555 3068(was)N
+714(to)X
+810(analyze)X
+1089(its)X
+1197(scalability.)X
+1579(To)X
+1701(this)X
+1849(end,)X
+2018(we)X
+2145(have)X
+2330(run)X
+2470(the)X
+2601(benchmark)X
+2991(in)X
+3086(three)X
+3280(modes,)X
+3542(the)X
+3673(normal)X
+3933(disk)X
+4099(bound)X
+555 3158(con\256guration)N
+1010(\(\256gure)X
+1252(seven\),)X
+1510(a)X
+1573(CPU)X
+1755(bound)X
+1982(con\256guration)X
+2436(\(\256gure)X
+2677(eight,)X
+2884(READ-ONLY\),)X
+3426(and)X
+3569(lock)X
+3734(contention)X
+4099(bound)X
+555 3248(\(\256gure)N
+796(eight,)X
+1003(NO_FSYNC\).)X
+1510(Since)X
+1715(the)X
+1840(normal)X
+2094(con\256guration)X
+2548(is)X
+2628(completely)X
+3011(disk)X
+3171(bound)X
+3398(\(each)X
+3600(transaction)X
+3978(requires)X
+4263(a)X
+555 3354(random)N
+823(read,)X
+1005(a)X
+1064(random)X
+1332(write,)X
+1540(and)X
+1679(a)X
+1738(sequential)X
+2086(write)X
+7 s
+2251 3322(4)N
+10 s
+3354(\))Y
+2329(we)X
+2446(expect)X
+2679(to)X
+2764(see)X
+2890(little)X
+3059(performance)X
+3489(improvement)X
+3939(as)X
+4028(the)X
+4148(mul-)X
+555 3444(tiprogramming)N
+1064(level)X
+1249(increases.)X
+1613(In)X
+1709(fact,)X
+1879(\256gure)X
+2095(seven)X
+2307(reveals)X
+2564(that)X
+2713(we)X
+2836(are)X
+2964(able)X
+3127(to)X
+3218(overlap)X
+3487(CPU)X
+3670(and)X
+3814(disk)X
+3975(utilization)X
+555 3534(slightly)N
+825(producing)X
+1181(approximately)X
+1674(a)X
+1740(10%)X
+1917(performance)X
+2354(improvement)X
+2811(with)X
+2983(two)X
+3133(processes.)X
+3511(After)X
+3711(that)X
+3861(point,)X
+4075(perfor-)X
+555 3624(mance)N
+785(drops)X
+983(off,)X
+1117(and)X
+1253(at)X
+1331(a)X
+1387(multi-programming)X
+2038(level)X
+2214(of)X
+2301(4,)X
+2381(we)X
+2495(are)X
+2614(performing)X
+2995(worse)X
+3207(than)X
+3365(in)X
+3447(the)X
+3565(single)X
+3776(process)X
+4037(case.)X
+755 3747(Similar)N
+1021(behavior)X
+1333(was)X
+1489(reported)X
+1787(on)X
+1897(the)X
+2025(commercial)X
+2434(relational)X
+2767(database)X
+3074(system)X
+3326(using)X
+3529(the)X
+3657(same)X
+3852(con\256guration.)X
+555 3837(The)N
+707(important)X
+1045(conclusion)X
+1419(to)X
+1508(draw)X
+1696(from)X
+1879(this)X
+2021(is)X
+2101(that)X
+2248(you)X
+2395(cannot)X
+2636(attain)X
+2841(good)X
+3028(multi-user)X
+3384(scaling)X
+3638(on)X
+3745(a)X
+3808(badly)X
+4013(balanced)X
+555 3927(system.)N
+839(If)X
+915(multi-user)X
+1266(performance)X
+1695(on)X
+1797(applications)X
+2205(of)X
+2293(this)X
+2429(sort)X
+2570(is)X
+2644(important,)X
+2996(one)X
+3133(must)X
+3309(have)X
+3482(a)X
+3539(separate)X
+3824(logging)X
+4089(device)X
+555 4017(and)N
+697(horizontally)X
+1110(partition)X
+1407(the)X
+1531(database)X
+1834(to)X
+1921(allow)X
+2124(a)X
+2185(suf\256ciently)X
+2570(high)X
+2737(degree)X
+2977(of)X
+3069(multiprogramming)X
+3698(that)X
+3843(group)X
+4055(commit)X
+555 4107(can)N
+687(amortize)X
+988(the)X
+1106(cost)X
+1255(of)X
+1342(log)X
+1464(\257ushing.)X
+755 4230(By)N
+871(using)X
+1067(a)X
+1126(very)X
+1292(small)X
+1488(database)X
+1788(\(one)X
+1954(that)X
+2097(can)X
+2232(be)X
+2331(entirely)X
+2599(cached)X
+2846(in)X
+2930(main)X
+3112(memory\))X
+3428(and)X
+3566(read-only)X
+3896(transactions,)X
+555 4320(we)N
+670(generated)X
+1004(a)X
+1061(CPU)X
+1236(bound)X
+1456(environment.)X
+1921(By)X
+2034(using)X
+2227(the)X
+2345(same)X
+2530(small)X
+2723(database,)X
+3040(the)X
+3158(complete)X
+3472(TPCB)X
+3691(transaction,)X
+4083(and)X
+4219(no)X
+3 f
+555 4410(fsync)N
+1 f
+733(\(2\))X
+862(on)X
+977(the)X
+1110(log)X
+1247(at)X
+1340(commit,)X
+1639(we)X
+1768(created)X
+2036(a)X
+2107(lock)X
+2280(contention)X
+2652(bound)X
+2886(environment.)X
+3365(The)X
+3524(small)X
+3731(database)X
+4042(used)X
+4223(an)X
+555 4500(account)N
+828(\256le)X
+953(containing)X
+1314(only)X
+1479(1000)X
+1662(records)X
+1922(rather)X
+2133(than)X
+2294(the)X
+2415(full)X
+2549(1,000,000)X
+2891(records)X
+3150(and)X
+3288(ran)X
+3413(enough)X
+3671(transactions)X
+4076(to)X
+4160(read)X
+555 4590(the)N
+677(entire)X
+883(database)X
+1183(into)X
+1330(the)X
+1451(buffer)X
+1671(pool)X
+1836(\(2000\))X
+2073(before)X
+2302(beginning)X
+2645(measurements.)X
+3147(The)X
+3295(read-only)X
+3626(transaction)X
+4001(consisted)X
+555 4680(of)N
+646(three)X
+831(database)X
+1132(reads)X
+1326(\(from)X
+1533(the)X
+1655(1000)X
+1839(record)X
+2069(account)X
+2343(\256le,)X
+2489(the)X
+2611(100)X
+2754(record)X
+2983(teller)X
+3171(\256le,)X
+3316(and)X
+3455(the)X
+3576(10)X
+3679(record)X
+3908(branch)X
+4150(\256le\).)X
+555 4770(Since)N
+759(no)X
+865(data)X
+1025(were)X
+1208(modi\256ed)X
+1518(and)X
+1660(no)X
+1766(history)X
+2014(records)X
+2277(were)X
+2460(written,)X
+2733(no)X
+2839(log)X
+2966(records)X
+3228(were)X
+3410(written.)X
+3702(For)X
+3838(the)X
+3961(contention)X
+555 4860(bound)N
+780(con\256guration,)X
+1252(we)X
+1371(used)X
+1543(the)X
+1666(normal)X
+1918(TPCB)X
+2142(transaction)X
+2519(\(against)X
+2798(the)X
+2920(small)X
+3117(database\))X
+3445(and)X
+3585(disabled)X
+3876(the)X
+3998(log)X
+4124(\257ush.)X
+555 4950(Figure)N
+784(eight)X
+964(shows)X
+1184(both)X
+1346(of)X
+1433(these)X
+1618(results.)X
+755 5073(The)N
+902(read-only)X
+1231(test)X
+1363(indicates)X
+1669(that)X
+1810(we)X
+1925(barely)X
+2147(scale)X
+2329(at)X
+2408(all)X
+2509(in)X
+2592(the)X
+2711(CPU)X
+2887(bound)X
+3108(case.)X
+3308(The)X
+3454(explanation)X
+3849(for)X
+3964(that)X
+4105(is)X
+4179(that)X
+555 5163(even)N
+735(with)X
+905(a)X
+969(single)X
+1188(process,)X
+1477(we)X
+1599(are)X
+1726(able)X
+1888(to)X
+1978(drive)X
+2171(the)X
+2297(CPU)X
+2480(utilization)X
+2832(to)X
+2922(96%.)X
+3137(As)X
+3254(a)X
+3317(result,)X
+3542(that)X
+3689(gives)X
+3885(us)X
+3983(very)X
+4153(little)X
+555 5253(room)N
+753(for)X
+876(improvement,)X
+1352(and)X
+1497(it)X
+1570(takes)X
+1764(a)X
+1829(multiprogramming)X
+2462(level)X
+2647(of)X
+2743(four)X
+2906(to)X
+2997(approach)X
+3321(100%)X
+3537(CPU)X
+3721(saturation.)X
+4106(In)X
+4201(the)X
+555 5343(case)N
+718(where)X
+939(we)X
+1057(do)X
+1161(perform)X
+1444(writes,)X
+1684(we)X
+1802(are)X
+1925(interested)X
+2261(in)X
+2347(detecting)X
+2665(when)X
+2863(lock)X
+3025(contention)X
+3387(becomes)X
+3691(a)X
+3750(dominant)X
+4075(perfor-)X
+555 5433(mance)N
+787(factor.)X
+1037(Contention)X
+1414(will)X
+1560(cause)X
+1761(two)X
+1903(phenomena;)X
+2317(we)X
+2433(will)X
+2579(see)X
+2704(transactions)X
+3109(queueing)X
+3425(behind)X
+3665(frequently)X
+4017(accessed)X
+555 5523(data,)N
+731(and)X
+869(we)X
+985(will)X
+1131(see)X
+1256(transaction)X
+1629(abort)X
+1815(rates)X
+1988(increasing)X
+2339(due)X
+2476(to)X
+2559(deadlock.)X
+2910(Given)X
+3127(that)X
+3268(the)X
+3387(branch)X
+3627(\256le)X
+3750(contains)X
+4038(only)X
+4201(ten)X
+8 s
+10 f
+555 5595(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)N
+5 s
+1 f
+727 5673(4)N
+8 s
+763 5698(Although)N
+1021(the)X
+1115(log)X
+1213(is)X
+1272(written)X
+1469(sequentially,)X
+1810(we)X
+1900(do)X
+1980(not)X
+2078(get)X
+2172(the)X
+2266(bene\256t)X
+2456(of)X
+2525(sequentiality)X
+2868(since)X
+3015(the)X
+3109(log)X
+3207(and)X
+3315(database)X
+3550(reside)X
+3718(on)X
+3798(the)X
+3892(same)X
+4039(disk.)X
+
+13 p
+%%Page: 13 13
+8 s 8 xH 0 xS 1 f
+10 s
+3 f
+1 f
+3187 2051 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3286 2028 MXY
+0 17 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3384 1926 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3483 1910 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3581 1910 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3680 1832 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3778 1909 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3877 1883 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3975 1679 MXY
+0 17 Dl
+0 -8 Dl
+9 0 Dl
+-18 0 Dl
+4074 1487 MXY
+0 17 Dl
+0 -8 Dl
+9 0 Dl
+-18 0 Dl
+5 Dt
+3187 2060 MXY
+99 -24 Dl
+98 -101 Dl
+99 -16 Dl
+98 0 Dl
+99 -78 Dl
+98 77 Dl
+99 -26 Dl
+98 -204 Dl
+99 -192 Dl
+3 f
+6 s
+4088 1516(SMALL)N
+3 Dt
+3187 2051 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3286 2051 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3384 2041 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3483 1990 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3581 1843 MXY
+0 17 Dl
+0 -8 Dl
+9 0 Dl
+-18 0 Dl
+3680 1578 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3778 1496 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3877 1430 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+3975 1269 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+4074 1070 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+1 Dt
+3187 2060 MXY
+99 0 Dl
+98 -10 Dl
+99 -51 Dl
+98 -147 Dl
+99 -265 Dl
+98 -82 Dl
+99 -66 Dl
+98 -161 Dl
+99 -199 Dl
+4088 1099(LARGE)N
+5 Dt
+3089 2060 MXY
+985 0 Dl
+3089 MX
+0 -1174 Dl
+4 Ds
+1 Dt
+3581 2060 MXY
+0 -1174 Dl
+4074 2060 MXY
+0 -1174 Dl
+3089 1825 MXY
+985 0 Dl
+9 s
+2993 1855(25)N
+3089 1591 MXY
+985 0 Dl
+2993 1621(50)N
+3089 1356 MXY
+985 0 Dl
+2993 1386(75)N
+3089 1121 MXY
+985 0 Dl
+2957 1151(100)N
+3089 886 MXY
+985 0 Dl
+2957 916(125)N
+3281 2199(Multiprogramming)N
+3071 2152(0)N
+3569(5)X
+4038(10)X
+2859 787(Aborts)N
+3089(per)X
+3211(500)X
+2901 847(transactions)N
+-1 Ds
+3 Dt
+2037 1342 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2125 1358 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2213 1341 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2301 1191 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2388 1124 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-17 0 Dl
+2476 1157 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2564 1157 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2652 1161 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2740 1153 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2828 1150 MXY
+0 18 Dl
+0 -9 Dl
+8 0 Dl
+-17 0 Dl
+5 Dt
+2037 1351 MXY
+88 16 Dl
+88 -17 Dl
+88 -150 Dl
+87 -67 Dl
+88 33 Dl
+88 0 Dl
+88 4 Dl
+88 -8 Dl
+88 -3 Dl
+6 s
+2685 1234(READ-ONLY)N
+3 Dt
+2037 1464 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2125 1640 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2213 1854 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2301 1872 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2388 1871 MXY
+0 17 Dl
+0 -9 Dl
+9 0 Dl
+-17 0 Dl
+2476 1933 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2564 1914 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2652 1903 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2740 1980 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+2828 2004 MXY
+0 18 Dl
+0 -9 Dl
+8 0 Dl
+-17 0 Dl
+1 Dt
+2037 1473 MXY
+88 176 Dl
+88 214 Dl
+88 18 Dl
+87 -2 Dl
+88 63 Dl
+88 -19 Dl
+88 -11 Dl
+88 77 Dl
+88 24 Dl
+2759 1997(NO-FSYNC)N
+5 Dt
+1949 2060 MXY
+879 0 Dl
+1949 MX
+0 -1174 Dl
+4 Ds
+1 Dt
+2388 2060 MXY
+0 -1174 Dl
+2828 2060 MXY
+0 -1174 Dl
+1949 1825 MXY
+879 0 Dl
+9 s
+1842 1855(40)N
+1949 1591 MXY
+879 0 Dl
+1842 1621(80)N
+1949 1356 MXY
+879 0 Dl
+1806 1386(120)N
+1949 1121 MXY
+879 0 Dl
+1806 1151(160)N
+1949 886 MXY
+879 0 Dl
+1806 916(200)N
+2088 2199(Multiprogramming)N
+1844 863(in)N
+1922(TPS)X
+1761 792(Throughput)N
+1931 2121(0)N
+2370 2133(5)N
+2792(10)X
+6 s
+1679 1833(LIBTP)N
+-1 Ds
+3 Dt
+837 1019 MXY
+0 17 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+929 878 MXY
+0 17 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+1021 939 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+1113 1043 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+1205 1314 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+1297 1567 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+1389 1665 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+1481 1699 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+1573 1828 MXY
+0 18 Dl
+0 -9 Dl
+9 0 Dl
+-18 0 Dl
+1665 1804 MXY
+0 18 Dl
+0 -9 Dl
+8 0 Dl
+-17 0 Dl
+5 Dt
+837 1027 MXY
+92 -141 Dl
+92 62 Dl
+92 104 Dl
+92 271 Dl
+92 253 Dl
+92 98 Dl
+92 34 Dl
+92 129 Dl
+92 -24 Dl
+745 2060 MXY
+920 0 Dl
+745 MX
+0 -1174 Dl
+4 Ds
+1 Dt
+1205 2060 MXY
+0 -1174 Dl
+1665 2060 MXY
+0 -1174 Dl
+745 1766 MXY
+920 0 Dl
+9 s
+673 1796(3)N
+745 1473 MXY
+920 0 Dl
+673 1503(5)N
+745 1180 MXY
+920 0 Dl
+673 1210(8)N
+745 886 MXY
+920 0 Dl
+637 916(10)N
+905 2199(Multiprogramming)N
+622 851(in)N
+700(TPS)X
+575 792(Throughput)N
+733 2152(0)N
+1196(5)X
+1629(10)X
+3 Dt
+-1 Ds
+8 s
+655 2441(Figure)N
+872(7:)X
+960(Multi-user)X
+1286(Performance.)X
+1 f
+655 2531(Since)N
+825(the)X
+931(con\256guration)X
+1300(is)X
+1371(completely)X
+655 2621(disk)N
+790(bound,)X
+994(we)X
+1096(see)X
+1204(only)X
+1345(a)X
+1400(small)X
+1566(im-)X
+655 2711(provement)N
+964(by)X
+1064(adding)X
+1274(a)X
+1337(second)X
+1549(pro-)X
+655 2801(cess.)N
+849(Adding)X
+1081(any)X
+1213(more)X
+1383(concurrent)X
+655 2891(processes)N
+935(causes)X
+1137(performance)X
+1493(degra-)X
+655 2981(dation.)N
+3 f
+1927 2441(Figure)N
+2149(8:)X
+2243(Multi-user)X
+2574(Performance)X
+1927 2531(on)N
+2021(a)X
+2079(small)X
+2251(database.)X
+1 f
+2551(With)X
+2704(one)X
+2821(pro-)X
+1927 2621(cess,)N
+2075(we)X
+2174(are)X
+2276(driving)X
+2486(the)X
+2589(CPU)X
+2739(at)X
+2810(96%)X
+1927 2711(utilization)N
+2215(leaving)X
+2430(little)X
+2575(room)X
+2737(for)X
+2838(im-)X
+1927 2801(provement)N
+2238(as)X
+2328(the)X
+2443(multiprogramming)X
+1927 2891(level)N
+2091(increases.)X
+2396(In)X
+2489(the)X
+2607(NO-FSYNC)X
+1927 2981(case,)N
+2076(lock)X
+2209(contention)X
+2502(degrades)X
+2751(perfor-)X
+1927 3071(mance)N
+2117(as)X
+2194(soon)X
+2339(as)X
+2416(a)X
+2468(second)X
+2669(process)X
+2884(is)X
+1927 3161(added.)N
+3 f
+3199 2441(Figure)N
+3405(9:)X
+3482(Abort)X
+3669(rates)X
+3827(on)X
+3919(the)X
+4028(TPCB)X
+3199 2531(Benchmark.)N
+1 f
+3589(The)X
+3726(abort)X
+3895(rate)X
+4028(climbs)X
+3199 2621(more)N
+3366(quickly)X
+3594(for)X
+3704(the)X
+3818(large)X
+3980(database)X
+3199 2711(test)N
+3324(since)X
+3491(processes)X
+3771(are)X
+3884(descheduled)X
+3199 2801(more)N
+3409(frequently,)X
+3766(allowing)X
+4068(more)X
+3199 2891(processes)N
+3459(to)X
+3525(vie)X
+3619(for)X
+3709(the)X
+3803(same)X
+3950(locks.)X
+10 s
+10 f
+555 3284(h)N
+579(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)X
+1 f
+555 3560(records,)N
+835(we)X
+952(expect)X
+1185(contention)X
+1546(to)X
+1631(become)X
+1904(a)X
+1963(factor)X
+2174(quickly)X
+2437(and)X
+2576(the)X
+2697(NO-FSYNC)X
+3120(line)X
+3263(in)X
+3348(\256gure)X
+3557(eight)X
+3739(demonstrates)X
+4184(this)X
+555 3650(dramatically.)N
+1022(Each)X
+1209(additional)X
+1555(process)X
+1822(causes)X
+2058(both)X
+2226(more)X
+2417(waiting)X
+2682(and)X
+2823(more)X
+3013(deadlocking.)X
+3470(Figure)X
+3704(nine)X
+3867(shows)X
+4092(that)X
+4237(in)X
+555 3740(the)N
+681(small)X
+882(database)X
+1187(case)X
+1353(\(SMALL\),)X
+1725(waiting)X
+1992(is)X
+2072(the)X
+2197(dominant)X
+2526(cause)X
+2732(of)X
+2826(declining)X
+3151(performance)X
+3585(\(the)X
+3737(number)X
+4009(of)X
+4103(aborts)X
+555 3830(increases)N
+878(less)X
+1026(steeply)X
+1281(than)X
+1447(the)X
+1573(performance)X
+2008(drops)X
+2214(off)X
+2336(in)X
+2426(\256gure)X
+2641(eight\),)X
+2876(while)X
+3082(in)X
+3172(the)X
+3298(large)X
+3487(database)X
+3792(case)X
+3958(\(LARGE\),)X
+555 3920(deadlocking)N
+967(contributes)X
+1343(more)X
+1528(to)X
+1610(the)X
+1728(declining)X
+2046(performance.)X
+755 4043(Deadlocks)N
+1116(are)X
+1237(more)X
+1424(likely)X
+1628(to)X
+1712(occur)X
+1913(in)X
+1997(the)X
+2116(LARGE)X
+2404(test)X
+2536(than)X
+2695(in)X
+2778(the)X
+2897(SMALL)X
+3189(test)X
+3321(because)X
+3597(there)X
+3779(are)X
+3899(more)X
+4085(oppor-)X
+555 4133(tunities)N
+814(to)X
+900(wait.)X
+1082(In)X
+1173(the)X
+1295(SMALL)X
+1590(case,)X
+1773(processes)X
+2105(never)X
+2307(do)X
+2410(I/O)X
+2540(and)X
+2679(are)X
+2801(less)X
+2944(likely)X
+3149(to)X
+3234(be)X
+3333(descheduled)X
+3753(during)X
+3985(a)X
+4044(transac-)X
+555 4223(tion.)N
+740(In)X
+828(the)X
+947(LARGE)X
+1235(case,)X
+1415(processes)X
+1744(will)X
+1889(frequently)X
+2240(be)X
+2337(descheduled)X
+2755(since)X
+2941(they)X
+3100(have)X
+3273(to)X
+3356(perform)X
+3636(I/O.)X
+3804(This)X
+3967(provides)X
+4263(a)X
+555 4313(window)N
+837(where)X
+1058(a)X
+1118(second)X
+1365(process)X
+1630(can)X
+1766(request)X
+2022(locks)X
+2215(on)X
+2318(already)X
+2578(locked)X
+2815(pages,)X
+3041(thus)X
+3197(increasing)X
+3550(the)X
+3671(likelihood)X
+4018(of)X
+4108(build-)X
+555 4403(ing)N
+677(up)X
+777(long)X
+939(chains)X
+1164(of)X
+1251(waiting)X
+1511(processes.)X
+1879(Eventually,)X
+2266(this)X
+2401(leads)X
+2586(to)X
+2668(deadlock.)X
+3 f
+555 4589(5.2.)N
+715(The)X
+868(OO1)X
+1052(Benchmark)X
+1 f
+755 4712(The)N
+903(TPCB)X
+1125(benchmark)X
+1505(described)X
+1836(in)X
+1921(the)X
+2042(previous)X
+2341(section)X
+2591(measures)X
+2913(performance)X
+3343(under)X
+3549(a)X
+3608(conventional)X
+4044(transac-)X
+555 4802(tion)N
+706(processing)X
+1076(workload.)X
+1446(Other)X
+1656(application)X
+2039(domains,)X
+2357(such)X
+2531(as)X
+2625(computer-aided)X
+3156(design,)X
+3412(have)X
+3591(substantially)X
+4022(different)X
+555 4892(access)N
+786(patterns.)X
+1105(In)X
+1197(order)X
+1392(to)X
+1479(measure)X
+1772(the)X
+1895(performance)X
+2327(of)X
+2418(LIBTP)X
+2664(under)X
+2871(workloads)X
+3229(of)X
+3320(this)X
+3459(type,)X
+3641(we)X
+3759(implemented)X
+4201(the)X
+555 4982(OO1)N
+731(benchmark)X
+1108(described)X
+1436(in)X
+1518([CATT91].)X
+755 5105(The)N
+908(database)X
+1213(models)X
+1472(a)X
+1535(set)X
+1651(of)X
+1745(electronics)X
+2120(components)X
+2534(with)X
+2703(connections)X
+3113(among)X
+3358(them.)X
+3585(One)X
+3746(table)X
+3929(stores)X
+4143(parts)X
+555 5195(and)N
+696(another)X
+962(stores)X
+1174(connections.)X
+1622(There)X
+1835(are)X
+1959(three)X
+2145(connections)X
+2552(originating)X
+2927(at)X
+3009(any)X
+3149(given)X
+3351(part.)X
+3540(Ninety)X
+3782(percent)X
+4043(of)X
+4134(these)X
+555 5285(connections)N
+960(are)X
+1081(to)X
+1165(nearby)X
+1406(parts)X
+1584(\(those)X
+1802(with)X
+1966(nearby)X
+2 f
+2207(ids)X
+1 f
+2300(\))X
+2348(to)X
+2431(model)X
+2652(the)X
+2771(spatial)X
+3001(locality)X
+3262(often)X
+3448(exhibited)X
+3767(in)X
+3850(CAD)X
+4040(applica-)X
+555 5375(tions.)N
+779(Ten)X
+933(percent)X
+1198(of)X
+1293(the)X
+1419(connections)X
+1830(are)X
+1957(randomly)X
+2292(distributed)X
+2662(among)X
+2908(all)X
+3016(other)X
+3209(parts)X
+3393(in)X
+3483(the)X
+3609(database.)X
+3954(Every)X
+4174(part)X
+555 5465(appears)N
+829(exactly)X
+1089(three)X
+1278(times)X
+1479(in)X
+1569(the)X
+2 f
+1695(from)X
+1 f
+1874(\256eld)X
+2043(of)X
+2137(a)X
+2200(connection)X
+2579(record,)X
+2832(and)X
+2975(zero)X
+3141(or)X
+3235(more)X
+3427(times)X
+3627(in)X
+3716(the)X
+2 f
+3841(to)X
+1 f
+3930(\256eld.)X
+4139(Parts)X
+555 5555(have)N
+2 f
+727(x)X
+1 f
+783(and)X
+2 f
+919(y)X
+1 f
+975(locations)X
+1284(set)X
+1393(randomly)X
+1720(in)X
+1802(an)X
+1898(appropriate)X
+2284(range.)X
+
+14 p
+%%Page: 14 14
+10 s 10 xH 0 xS 1 f
+3 f
+1 f
+755 630(The)N
+900(intent)X
+1102(of)X
+1189(OO1)X
+1365(is)X
+1438(to)X
+1520(measure)X
+1808(the)X
+1926(overall)X
+2169(cost)X
+2318(of)X
+2405(a)X
+2461(query)X
+2664(mix)X
+2808(characteristic)X
+3257(of)X
+3344(engineering)X
+3743(database)X
+4040(applica-)X
+555 720(tions.)N
+770(There)X
+978(are)X
+1097(three)X
+1278(tests:)X
+10 f
+635 843(g)N
+2 f
+755(Lookup)X
+1 f
+1022(generates)X
+1353(1,000)X
+1560(random)X
+1832(part)X
+2 f
+1984(ids)X
+1 f
+2077(,)X
+2124(fetches)X
+2378(the)X
+2502(corresponding)X
+2987(parts)X
+3169(from)X
+3351(the)X
+3475(database,)X
+3798(and)X
+3940(calls)X
+4113(a)X
+4175(null)X
+755 933(procedure)N
+1097(in)X
+1179(the)X
+1297(host)X
+1450(programming)X
+1906(language)X
+2216(with)X
+2378(the)X
+2496(parts')X
+2 f
+2699(x)X
+1 f
+2755(and)X
+2 f
+2891(y)X
+1 f
+2947(positions.)X
+10 f
+635 1056(g)N
+2 f
+755(Traverse)X
+1 f
+1067(retrieves)X
+1371(a)X
+1434(random)X
+1706(part)X
+1858(from)X
+2041(the)X
+2166(database)X
+2470(and)X
+2613(follows)X
+2880(connections)X
+3290(from)X
+3473(it)X
+3544(to)X
+3632(other)X
+3823(parts.)X
+4045(Each)X
+4232(of)X
+755 1146(those)N
+947(parts)X
+1126(is)X
+1202(retrieved,)X
+1531(and)X
+1670(all)X
+1773(connections)X
+2179(from)X
+2358(it)X
+2424(followed.)X
+2771(This)X
+2935(procedure)X
+3279(is)X
+3354(repeated)X
+3649(depth-\256rst)X
+4000(for)X
+4116(seven)X
+755 1236(hops)N
+930(from)X
+1110(the)X
+1232(original)X
+1505(part,)X
+1674(for)X
+1792(a)X
+1852(total)X
+2018(of)X
+2109(3280)X
+2293(parts.)X
+2513(Backward)X
+2862(traversal)X
+3162(also)X
+3314(exists,)X
+3539(and)X
+3678(follows)X
+3941(all)X
+4044(connec-)X
+755 1326(tions)N
+930(into)X
+1074(a)X
+1130(given)X
+1328(part)X
+1473(to)X
+1555(their)X
+1722(origin.)X
+10 f
+635 1449(g)N
+2 f
+755(Insert)X
+1 f
+962(adds)X
+1129(100)X
+1269(new)X
+1423(parts)X
+1599(and)X
+1735(their)X
+1902(connections.)X
+755 1572(The)N
+913(benchmark)X
+1303(is)X
+1389(single-user,)X
+1794(but)X
+1929(multi-user)X
+2291(access)X
+2530(controls)X
+2821(\(locking)X
+3120(and)X
+3268(transaction)X
+3652(protection\))X
+4036(must)X
+4223(be)X
+555 1662(enforced.)N
+898(It)X
+968(is)X
+1042(designed)X
+1348(to)X
+1431(be)X
+1528(run)X
+1656(on)X
+1757(a)X
+1814(database)X
+2112(with)X
+2275(20,000)X
+2516(parts,)X
+2713(and)X
+2850(on)X
+2951(one)X
+3087(with)X
+3249(200,000)X
+3529(parts.)X
+3745(Because)X
+4033(we)X
+4147(have)X
+555 1752(insuf\256cient)N
+935(disk)X
+1088(space)X
+1287(for)X
+1401(the)X
+1519(larger)X
+1727(database,)X
+2044(we)X
+2158(report)X
+2370(results)X
+2599(only)X
+2761(for)X
+2875(the)X
+2993(20,000)X
+3233(part)X
+3378(database.)X
+3 f
+555 1938(5.2.1.)N
+775(Implementation)X
+1 f
+755 2061(The)N
+920(LIBTP)X
+1182(implementation)X
+1724(of)X
+1831(OO1)X
+2027(uses)X
+2205(the)X
+2342(TCL)X
+2532([OUST90])X
+2914(interface)X
+3235(described)X
+3582(earlier.)X
+3867(The)X
+4031(backend)X
+555 2151(accepts)N
+813(commands)X
+1181(over)X
+1345(an)X
+1442(IP)X
+1534(socket)X
+1760(and)X
+1897(performs)X
+2208(the)X
+2327(requested)X
+2656(database)X
+2954(actions.)X
+3242(The)X
+3387(frontend)X
+3679(opens)X
+3886(and)X
+4022(executes)X
+555 2241(a)N
+618(TCL)X
+796(script.)X
+1041(This)X
+1210(script)X
+1415(contains)X
+1709(database)X
+2013(accesses)X
+2313(interleaved)X
+2697(with)X
+2866(ordinary)X
+3165(program)X
+3463(control)X
+3716(statements.)X
+4120(Data-)X
+555 2331(base)N
+718(commands)X
+1085(are)X
+1204(submitted)X
+1539(to)X
+1621(the)X
+1739(backend)X
+2027(and)X
+2163(results)X
+2392(are)X
+2511(bound)X
+2731(to)X
+2813(program)X
+3105(variables.)X
+755 2454(The)N
+903(parts)X
+1082(table)X
+1261(was)X
+1409(stored)X
+1628(as)X
+1718(a)X
+1776(B-tree)X
+1999(indexed)X
+2275(by)X
+2 f
+2377(id)X
+1 f
+2439(.)X
+2501(The)X
+2648(connection)X
+3022(table)X
+3200(was)X
+3347(stored)X
+3565(as)X
+3654(a)X
+3712(set)X
+3823(of)X
+3912(\256xed-length)X
+555 2544(records)N
+824(using)X
+1029(the)X
+1159(4.4BSD)X
+1446(recno)X
+1657(access)X
+1895(method.)X
+2207(In)X
+2306(addition,)X
+2620(two)X
+2771(B-tree)X
+3003(indices)X
+3261(were)X
+3449(maintained)X
+3836(on)X
+3947(connection)X
+555 2634(table)N
+732(entries.)X
+1007(One)X
+1162(index)X
+1360(mapped)X
+1634(the)X
+2 f
+1752(from)X
+1 f
+1923(\256eld)X
+2085(to)X
+2167(a)X
+2223(connection)X
+2595(record)X
+2821(number,)X
+3106(and)X
+3242(the)X
+3360(other)X
+3545(mapped)X
+3819(the)X
+2 f
+3937(to)X
+1 f
+4019(\256eld)X
+4181(to)X
+4263(a)X
+555 2724(connection)N
+932(record)X
+1163(number.)X
+1473(These)X
+1690(indices)X
+1941(support)X
+2205(fast)X
+2345(lookups)X
+2622(on)X
+2726(connections)X
+3133(in)X
+3219(both)X
+3385(directions.)X
+3765(For)X
+3900(the)X
+4022(traversal)X
+555 2814(tests,)N
+743(the)X
+867(frontend)X
+1165(does)X
+1338(an)X
+1439(index)X
+1642(lookup)X
+1889(to)X
+1976(discover)X
+2273(the)X
+2396(connected)X
+2747(part's)X
+2 f
+2955(id)X
+1 f
+3017(,)X
+3062(and)X
+3203(then)X
+3366(does)X
+3538(another)X
+3804(lookup)X
+4051(to)X
+4138(fetch)X
+555 2904(the)N
+673(part)X
+818(itself.)X
+3 f
+555 3090(5.2.2.)N
+775(Performance)X
+1242(Measurements)X
+1766(for)X
+1889(OO1)X
+1 f
+755 3213(We)N
+888(compare)X
+1186(LIBTP's)X
+1487(OO1)X
+1664(performance)X
+2092(to)X
+2174(that)X
+2314(reported)X
+2602(in)X
+2684([CATT91].)X
+3087(Those)X
+3303(results)X
+3532(were)X
+3709(collected)X
+4019(on)X
+4119(a)X
+4175(Sun)X
+555 3303(3/280)N
+759(\(25)X
+888(MHz)X
+1075(MC68020\))X
+1448(with)X
+1612(16)X
+1714(MBytes)X
+1989(of)X
+2078(memory)X
+2367(and)X
+2505(two)X
+2647(Hitachi)X
+2904(892MByte)X
+3267(disks)X
+3452(\(15)X
+3580(ms)X
+3694(average)X
+3966(seek)X
+4130(time\))X
+555 3393(behind)N
+793(an)X
+889(SMD-4)X
+1149(controller.)X
+1521(Frontends)X
+1861(ran)X
+1984(on)X
+2084(an)X
+2180(8MByte)X
+2462(Sun)X
+2606(3/260.)X
+755 3516(In)N
+844(order)X
+1036(to)X
+1120(measure)X
+1410(performance)X
+1839(on)X
+1941(a)X
+1999(machine)X
+2293(of)X
+2382(roughly)X
+2653(equivalent)X
+3009(processor)X
+3339(power,)X
+3582(we)X
+3698(ran)X
+3822(one)X
+3959(set)X
+4069(of)X
+4157(tests)X
+555 3606(on)N
+666(a)X
+733(standalone)X
+1107(MC68030-based)X
+1671(HP300)X
+1923(\(33MHz)X
+2225(MC68030\).)X
+2646(The)X
+2801(database)X
+3108(was)X
+3263(stored)X
+3489(on)X
+3599(a)X
+3665(300MByte)X
+4037(HP7959)X
+555 3696(SCSI)N
+744(disk)X
+898(\(17)X
+1026(ms)X
+1139(average)X
+1410(seek)X
+1573(time\).)X
+1802(Since)X
+2000(this)X
+2135(machine)X
+2427(is)X
+2500(not)X
+2622(connected)X
+2968(to)X
+3050(a)X
+3106(network,)X
+3409(we)X
+3523(ran)X
+3646(local)X
+3822(tests)X
+3984(where)X
+4201(the)X
+555 3786(frontend)N
+855(and)X
+999(backend)X
+1295(run)X
+1430(on)X
+1538(the)X
+1664(same)X
+1856(machine.)X
+2195(We)X
+2334(compare)X
+2638(these)X
+2830(measurements)X
+3316(with)X
+3485(Cattell's)X
+3783(local)X
+3966(Sun)X
+4117(3/280)X
+555 3876(numbers.)N
+755 3999(Because)N
+1051(the)X
+1177(benchmark)X
+1562(requires)X
+1849(remote)X
+2100(access,)X
+2354(we)X
+2476(ran)X
+2607(another)X
+2876(set)X
+2993(of)X
+3088(tests)X
+3258(on)X
+3365(a)X
+3428(DECstation)X
+3828(5000/200)X
+4157(with)X
+555 4089(32M)N
+732(of)X
+825(memory)X
+1118(running)X
+1393(Ultrix)X
+1610(V4.0)X
+1794(and)X
+1936(a)X
+1998(DEC)X
+2184(1GByte)X
+2459(RZ57)X
+2666(SCSI)X
+2859(disk.)X
+3057(We)X
+3194(compare)X
+3496(the)X
+3619(local)X
+3800(performance)X
+4232(of)X
+555 4179(OO1)N
+734(on)X
+837(the)X
+958(DECstation)X
+1354(to)X
+1439(its)X
+1536(remote)X
+1781(performance.)X
+2250(For)X
+2383(the)X
+2503(remote)X
+2748(case,)X
+2929(we)X
+3045(ran)X
+3170(the)X
+3290(frontend)X
+3584(on)X
+3686(a)X
+3744(DECstation)X
+4139(3100)X
+555 4269(with)N
+717(16)X
+817(MBytes)X
+1090(of)X
+1177(main)X
+1357(memory.)X
+755 4392(The)N
+900(databases)X
+1228(tested)X
+1435(in)X
+1517([CATT91])X
+1880(are)X
+10 f
+635 4515(g)N
+1 f
+755(INDEX,)X
+1045(a)X
+1101(highly-optimized)X
+1672(access)X
+1898(method)X
+2158(package)X
+2442(developed)X
+2792(at)X
+2870(Sun)X
+3014(Microsystems.)X
+10 f
+635 4638(g)N
+1 f
+755(OODBMS,)X
+1137(a)X
+1193(beta)X
+1347(release)X
+1591(of)X
+1678(a)X
+1734(commercial)X
+2133(object-oriented)X
+2639(database)X
+2936(management)X
+3366(system.)X
+10 f
+635 4761(g)N
+1 f
+755(RDBMS,)X
+1076(a)X
+1133(UNIX-based)X
+1565(commercial)X
+1965(relational)X
+2289(data)X
+2444(manager)X
+2742(at)X
+2821(production)X
+3189(release.)X
+3474(The)X
+3620(OO1)X
+3797(implementation)X
+755 4851(used)N
+922(embedded)X
+1272(SQL)X
+1443(in)X
+1525(C.)X
+1638(Stored)X
+1867(procedures)X
+2240(were)X
+2417(de\256ned)X
+2673(to)X
+2755(reduce)X
+2990(client-server)X
+3412(traf\256c.)X
+755 4974(Table)N
+974(two)X
+1130(shows)X
+1366(the)X
+1500(measurements)X
+1995(from)X
+2187([CATT91])X
+2566(and)X
+2718(LIBTP)X
+2976(for)X
+3106(a)X
+3178(local)X
+3370(test)X
+3517(on)X
+3632(the)X
+3765(MC680x0-based)X
+555 5064(hardware.)N
+915(All)X
+1037(caches)X
+1272(are)X
+1391(cleared)X
+1644(before)X
+1870(each)X
+2038(test.)X
+2209(All)X
+2331(times)X
+2524(are)X
+2643(in)X
+2725(seconds.)X
+755 5187(Table)N
+960(two)X
+1102(shows)X
+1324(that)X
+1466(LIBTP)X
+1710(outperforms)X
+2123(the)X
+2242(commercial)X
+2642(relational)X
+2966(system,)X
+3229(but)X
+3352(is)X
+3426(slower)X
+3661(than)X
+3820(OODBMS)X
+4183(and)X
+555 5277(INDEX.)N
+872(Since)X
+1077(the)X
+1202(caches)X
+1444(were)X
+1628(cleared)X
+1888(at)X
+1973(the)X
+2098(start)X
+2263(of)X
+2356(each)X
+2530(test,)X
+2687(disk)X
+2846(throughput)X
+3223(is)X
+3302(critical)X
+3551(in)X
+3639(this)X
+3780(test.)X
+3957(The)X
+4108(single)X
+555 5367(SCSI)N
+749(HP)X
+877(drive)X
+1068(used)X
+1241(by)X
+1347(LIBTP)X
+1595(is)X
+1674(approximately)X
+2163(13%)X
+2336(slower)X
+2576(than)X
+2739(the)X
+2862(disks)X
+3051(used)X
+3223(in)X
+3310([CATT91])X
+3678(which)X
+3899(accounts)X
+4205(for)X
+555 5457(part)N
+700(of)X
+787(the)X
+905(difference.)X
+755 5580(OODBMS)N
+1118(and)X
+1255(INDEX)X
+1525(outperform)X
+1906(LIBTP)X
+2148(most)X
+2323(dramatically)X
+2744(on)X
+2844(traversal.)X
+3181(This)X
+3343(is)X
+3416(because)X
+3691(we)X
+3805(use)X
+3932(index)X
+4130(look-)X
+555 5670(ups)N
+689(to)X
+774(\256nd)X
+921(connections,)X
+1347(whereas)X
+1634(the)X
+1755(other)X
+1942(two)X
+2084(systems)X
+2359(use)X
+2488(a)X
+2546(link)X
+2692(access)X
+2920(method.)X
+3222(The)X
+3369(index)X
+3569(requires)X
+3850(us)X
+3943(to)X
+4027(examine)X
+
+15 p
+%%Page: 15 15
+10 s 10 xH 0 xS 1 f
+3 f
+1 f
+10 f
+555 679(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)N
+2 f
+606 769(Measure)N
+1 f
+1019(INDEX)X
+1389(OODBMS)X
+1851(RDBMS)X
+2250(LIBTP)X
+10 f
+555 771(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)N
+555 787(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)N
+1 f
+595 869(Lookup)N
+1114(5.4)X
+1490(12.9)X
+1950(27)X
+2291(27.2)X
+595 959(Traversal)N
+1074(13)X
+1530(9.8)X
+1950(90)X
+2291(47.3)X
+595 1049(Insert)N
+1114(7.4)X
+1530(1.5)X
+1950(22)X
+2331(9.7)X
+10 f
+555 1059(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)N
+555(c)X
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+959 1059(c)N
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+1329 1059(c)N
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+1791 1059(c)N
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+2190 1059(c)N
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+2512 1059(c)N
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+2618 679(i)N
+2629(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2 f
+2829 769(Measure)N
+3401(Cache)X
+3726(Local)X
+4028(Remote)X
+1 f
+10 f
+2618 771(i)N
+2629(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2618 787(i)N
+2629(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+2658 869(Lookup)N
+3401(cold)X
+3747(15.7)X
+4078(20.6)X
+3401 959(warm)N
+3787(7.8)X
+4078(12.4)X
+10 f
+2618 969(i)N
+2629(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+2658 1059(Forward)N
+2950(traversal)X
+3401(cold)X
+3747(28.4)X
+4078(52.6)X
+3401 1149(warm)N
+3747(23.5)X
+4078(47.4)X
+10 f
+2618 1159(i)N
+2629(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+2658 1249(Backward)N
+3004(traversal)X
+3401(cold)X
+3747(24.2)X
+4078(47.4)X
+3401 1339(warm)N
+3747(24.3)X
+4078(47.6)X
+10 f
+2618 1349(i)N
+2629(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+1 f
+2658 1439(Insert)N
+3401(cold)X
+3787(7.5)X
+4078(10.3)X
+3401 1529(warm)N
+3787(6.7)X
+4078(10.9)X
+10 f
+2618 1539(i)N
+2629(iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii)X
+2618(c)X
+1479(c)Y
+1399(c)Y
+1319(c)Y
+1239(c)Y
+1159(c)Y
+1079(c)Y
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+3341 1539(c)N
+1479(c)Y
+1399(c)Y
+1319(c)Y
+1239(c)Y
+1159(c)Y
+1079(c)Y
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+3666 1539(c)N
+1479(c)Y
+1399(c)Y
+1319(c)Y
+1239(c)Y
+1159(c)Y
+1079(c)Y
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+3968 1539(c)N
+1479(c)Y
+1399(c)Y
+1319(c)Y
+1239(c)Y
+1159(c)Y
+1079(c)Y
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+4309 1539(c)N
+1479(c)Y
+1399(c)Y
+1319(c)Y
+1239(c)Y
+1159(c)Y
+1079(c)Y
+999(c)Y
+919(c)Y
+839(c)Y
+759(c)Y
+3 f
+587 1785(Table)N
+823(2:)X
+931(Local)X
+1163(MC680x0)X
+1538(Performance)X
+2026(of)X
+2133(Several)X
+587 1875(Systems)N
+883(on)X
+987(OO1.)X
+2667 1785(Table)N
+2909(3:)X
+3023(Local)X
+3260(vs.)X
+3397(Remote)X
+3707(Performance)X
+4200(of)X
+2667 1875(LIBTP)N
+2926(on)X
+3030(OO1.)X
+1 f
+10 f
+555 1998(h)N
+579(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)X
+1 f
+555 2274(two)N
+696(disk)X
+850(pages,)X
+1074(but)X
+1197(the)X
+1316(links)X
+1492(require)X
+1741(only)X
+1904(one,)X
+2061(regardless)X
+2408(of)X
+2496(database)X
+2794(size.)X
+2980(Cattell)X
+3214(reports)X
+3458(that)X
+3599(lookups)X
+3873(using)X
+4067(B-trees)X
+555 2364(instead)N
+808(of)X
+901(links)X
+1082(makes)X
+1313(traversal)X
+1616(take)X
+1776(twice)X
+1976(as)X
+2069(long)X
+2237(in)X
+2325(INDEX.)X
+2641(Adding)X
+2907(a)X
+2969(link)X
+3119(access)X
+3351(method)X
+3617(to)X
+3 f
+3704(db)X
+1 f
+3792(\(3\))X
+3911(or)X
+4003(using)X
+4201(the)X
+555 2454(existing)N
+828(hash)X
+995(method)X
+1255(would)X
+1475(apparently)X
+1834(be)X
+1930(a)X
+1986(good)X
+2166(idea.)X
+755 2577(Both)N
+936(OODBMS)X
+1304(and)X
+1446(INDEX)X
+1722(issue)X
+1908 0.1944(coarser-granularity)AX
+2545(locks)X
+2739(than)X
+2902(LIBTP.)X
+3189(This)X
+3356(limits)X
+3562(concurrency)X
+3985(for)X
+4104(multi-)X
+555 2667(user)N
+711(applications,)X
+1140(but)X
+1264(helps)X
+1455(single-user)X
+1829(applications.)X
+2278(In)X
+2367(addition,)X
+2671(the)X
+2791(fact)X
+2934(that)X
+3076(LIBTP)X
+3319(releases)X
+3595(B-tree)X
+3817(locks)X
+4007(early)X
+4189(is)X
+4263(a)X
+555 2757(drawback)N
+896(in)X
+986(OO1.)X
+1210(Since)X
+1416(there)X
+1605(is)X
+1686(no)X
+1793(concurrency)X
+2218(in)X
+2307(the)X
+2432(benchmark,)X
+2836(high-concurrency)X
+3430(strategies)X
+3760(only)X
+3929(show)X
+4125(up)X
+4232(as)X
+555 2847(increased)N
+882(locking)X
+1145(overhead.)X
+1503(Finally,)X
+1772(the)X
+1892(architecture)X
+2294(of)X
+2383(the)X
+2503(LIBTP)X
+2747(implementation)X
+3271(was)X
+3418(substantially)X
+3844(different)X
+4143(from)X
+555 2937(that)N
+702(of)X
+796(either)X
+1006(OODBMS)X
+1375(or)X
+1469(INDEX.)X
+1786(Both)X
+1968(of)X
+2062(those)X
+2258(systems)X
+2538(do)X
+2645(the)X
+2770(searches)X
+3070(in)X
+3159(the)X
+3284(user's)X
+3503(address)X
+3771(space,)X
+3997(and)X
+4139(issue)X
+555 3027(requests)N
+844(for)X
+964(pages)X
+1173(to)X
+1260(the)X
+1383(server)X
+1605(process.)X
+1911(Pages)X
+2123(are)X
+2247(cached)X
+2496(in)X
+2583(the)X
+2706(client,)X
+2929(and)X
+3070(many)X
+3273(queries)X
+3530(can)X
+3667(be)X
+3768(satis\256ed)X
+4055(without)X
+555 3117(contacting)N
+910(the)X
+1029(server)X
+1247(at)X
+1326(all.)X
+1467(LIBTP)X
+1710(submits)X
+1979(all)X
+2080(the)X
+2199(queries)X
+2452(to)X
+2535(the)X
+2653(server)X
+2870(process,)X
+3151(and)X
+3287(receives)X
+3571(database)X
+3868(records)X
+4125(back;)X
+555 3207(it)N
+619(does)X
+786(no)X
+886(client)X
+1084(caching.)X
+755 3330(The)N
+911(RDBMS)X
+1221(architecture)X
+1632(is)X
+1716(much)X
+1925(closer)X
+2148(to)X
+2241(that)X
+2392(of)X
+2490(LIBTP.)X
+2783(A)X
+2872(server)X
+3100(process)X
+3372(receives)X
+3667(queries)X
+3930(and)X
+4076(returns)X
+555 3420(results)N
+786(to)X
+870(a)X
+928(client.)X
+1168(The)X
+1315(timing)X
+1545(results)X
+1776(in)X
+1860(table)X
+2038(two)X
+2180(clearly)X
+2421(show)X
+2612(that)X
+2754(the)X
+2874(conventional)X
+3309(database)X
+3607(client/server)X
+4025(model)X
+4246(is)X
+555 3510(expensive.)N
+941(LIBTP)X
+1188(outperforms)X
+1605(the)X
+1728(RDBMS)X
+2032(on)X
+2136(traversal)X
+2437(and)X
+2577(insertion.)X
+2921(We)X
+3057(speculate)X
+3380(that)X
+3524(this)X
+3663(is)X
+3740(due)X
+3880(in)X
+3966(part)X
+4115(to)X
+4201(the)X
+555 3600(overhead)N
+870(of)X
+957(query)X
+1160(parsing,)X
+1436(optimization,)X
+1880(and)X
+2016(repeated)X
+2309(interpretation)X
+2761(of)X
+2848(the)X
+2966(plan)X
+3124(tree)X
+3265(in)X
+3347(the)X
+3465(RDBMS')X
+3791(query)X
+3994(executor.)X
+755 3723(Table)N
+962(three)X
+1147(shows)X
+1371(the)X
+1492(differences)X
+1873(between)X
+2164(local)X
+2343(and)X
+2482(remote)X
+2728(execution)X
+3063(of)X
+3153(LIBTP's)X
+3456(OO1)X
+3635(implementation)X
+4160(on)X
+4263(a)X
+555 3813(DECstation.)N
+989(We)X
+1122(measured)X
+1451(performance)X
+1879(with)X
+2042(a)X
+2099(populated)X
+2436(\(warm\))X
+2694(cache)X
+2899(and)X
+3036(an)X
+3133(empty)X
+3354(\(cold\))X
+3567(cache.)X
+3812(Reported)X
+4126(times)X
+555 3903(are)N
+681(the)X
+806(means)X
+1037(of)X
+1130(twenty)X
+1374(tests,)X
+1562(and)X
+1704(are)X
+1829(in)X
+1917(seconds.)X
+2237(Standard)X
+2548(deviations)X
+2903(were)X
+3086(within)X
+3316(seven)X
+3525(percent)X
+3788(of)X
+3881(the)X
+4005(mean)X
+4205(for)X
+555 3993(remote,)N
+818(and)X
+954(two)X
+1094(percent)X
+1351(of)X
+1438(the)X
+1556(mean)X
+1750(for)X
+1864(local.)X
+755 4116(The)N
+914(20ms)X
+1121(overhead)X
+1450(of)X
+1551(TCP/IP)X
+1824(on)X
+1938(an)X
+2048(Ethernet)X
+2354(entirely)X
+2633(accounts)X
+2948(for)X
+3076(the)X
+3207(difference)X
+3567(in)X
+3662(speed.)X
+3918(The)X
+4076(remote)X
+555 4206(traversal)N
+857(times)X
+1055(are)X
+1179(nearly)X
+1405(double)X
+1648(the)X
+1771(local)X
+1952(times)X
+2150(because)X
+2430(we)X
+2549(do)X
+2653(index)X
+2855(lookups)X
+3132(and)X
+3272(part)X
+3421(fetches)X
+3673(in)X
+3759(separate)X
+4047(queries.)X
+555 4296(It)N
+629(would)X
+854(make)X
+1053(sense)X
+1252(to)X
+1339(do)X
+1444(indexed)X
+1723(searches)X
+2021(on)X
+2126(the)X
+2248(server,)X
+2489(but)X
+2615(we)X
+2733(were)X
+2914(unwilling)X
+3244(to)X
+3330(hard-code)X
+3676(knowledge)X
+4052(of)X
+4143(OO1)X
+555 4386(indices)N
+803(into)X
+948(our)X
+1075(LIBTP)X
+1317(TCL)X
+1488(server.)X
+1745(Cold)X
+1920(and)X
+2056(warm)X
+2259(insertion)X
+2559(times)X
+2752(are)X
+2871(identical)X
+3167(since)X
+3352(insertions)X
+3683(do)X
+3783(not)X
+3905(bene\256t)X
+4143(from)X
+555 4476(caching.)N
+755 4599(One)N
+915(interesting)X
+1279(difference)X
+1632(shown)X
+1867(by)X
+1973(table)X
+2155(three)X
+2342(is)X
+2421(the)X
+2545(cost)X
+2700(of)X
+2793(forward)X
+3074(versus)X
+3305(backward)X
+3644(traversal.)X
+3987(When)X
+4205(we)X
+555 4689(built)N
+725(the)X
+847(database,)X
+1168(we)X
+1285(inserted)X
+1562(parts)X
+1741(in)X
+1826(part)X
+2 f
+1974(id)X
+1 f
+2059(order.)X
+2292(We)X
+2427(built)X
+2596(the)X
+2717(indices)X
+2967(at)X
+3048(the)X
+3169(same)X
+3357(time.)X
+3562(Therefore,)X
+3923(the)X
+4044(forward)X
+555 4779(index)N
+757(had)X
+897(keys)X
+1068(inserted)X
+1346(in)X
+1432(order,)X
+1646(while)X
+1848(the)X
+1970(backward)X
+2307(index)X
+2509(had)X
+2649(keys)X
+2820(inserted)X
+3098(more)X
+3286(randomly.)X
+3656(In-order)X
+3943(insertion)X
+4246(is)X
+555 4885(pessimal)N
+858(for)X
+975(B-tree)X
+1199(indices,)X
+1469(so)X
+1563(the)X
+1684(forward)X
+1962(index)X
+2163(is)X
+2239(much)X
+2440(larger)X
+2651(than)X
+2812(the)X
+2933(backward)X
+3269(one)X
+7 s
+3385 4853(5)N
+10 s
+4885(.)Y
+3476(This)X
+3640(larger)X
+3850(size)X
+3997(shows)X
+4219(up)X
+555 4975(as)N
+642(extra)X
+823(disk)X
+976(reads)X
+1166(in)X
+1248(the)X
+1366(cold)X
+1524(benchmark.)X
+3 f
+555 5161(6.)N
+655(Conclusions)X
+1 f
+755 5284(LIBTP)N
+1006(provides)X
+1311(the)X
+1438(basic)X
+1632(building)X
+1927(blocks)X
+2165(to)X
+2256(support)X
+2525(transaction)X
+2906(protection.)X
+3300(In)X
+3396(comparison)X
+3799(with)X
+3970(traditional)X
+555 5374(Unix)N
+746(libraries)X
+1040(and)X
+1187(commercial)X
+1597(systems,)X
+1900(it)X
+1974(offers)X
+2192(a)X
+2258(variety)X
+2511(of)X
+2608(tradeoffs.)X
+2964(Using)X
+3185(complete)X
+3509(transaction)X
+3891(protection)X
+4246(is)X
+555 5464(more)N
+747(complicated)X
+1166(than)X
+1331(simply)X
+1575(adding)X
+3 f
+1820(fsync)X
+1 f
+1998(\(2\))X
+2119(and)X
+3 f
+2262(\257ock)X
+1 f
+2426(\(2\))X
+2547(calls)X
+2721(to)X
+2810(code,)X
+3008(but)X
+3136(it)X
+3206(is)X
+3285(faster)X
+3490(in)X
+3578(some)X
+3773(cases)X
+3969(and)X
+4111(offers)X
+8 s
+10 f
+555 5536(hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)N
+5 s
+1 f
+727 5614(5)N
+8 s
+763 5639(The)N
+878(next)X
+1004(release)X
+1196(of)X
+1265(the)X
+1359(4.4BSD)X
+1580(access)X
+1758(method)X
+1966(will)X
+2082(automatically)X
+2446(detect)X
+2614(and)X
+2722(compensate)X
+3039(for)X
+3129(in-order)X
+3350(insertion,)X
+3606(eliminating)X
+3914(this)X
+4023(problem.)X
+
+16 p
+%%Page: 16 16
+8 s 8 xH 0 xS 1 f
+10 s
+3 f
+1 f
+555 630(stricter)N
+801(guarantees)X
+1168(\(atomicity,)X
+1540(consistency,)X
+1957(isolation,)X
+2275(and)X
+2414(durability\).)X
+2815(If)X
+2892(the)X
+3013(data)X
+3170(to)X
+3255(be)X
+3354(protected)X
+3676(are)X
+3798(already)X
+4058(format-)X
+555 720(ted)N
+675(\()X
+2 f
+702(i.e.)X
+1 f
+821(use)X
+949(one)X
+1086(of)X
+1174(the)X
+1293(database)X
+1591(access)X
+1818(methods\),)X
+2157(then)X
+2316(adding)X
+2555(transaction)X
+2928(protection)X
+3274(requires)X
+3554(no)X
+3655(additional)X
+3996(complex-)X
+555 810(ity,)N
+679(but)X
+801(incurs)X
+1017(a)X
+1073(performance)X
+1500(penalty)X
+1756(of)X
+1843(approximately)X
+2326(15%.)X
+755 933(In)N
+844(comparison)X
+1240(with)X
+1404(commercial)X
+1805(database)X
+2104(systems,)X
+2399(the)X
+2519(tradeoffs)X
+2827(are)X
+2948(more)X
+3135(complex.)X
+3473(LIBTP)X
+3717(does)X
+3886(not)X
+4009(currently)X
+555 1023(support)N
+825(a)X
+891(standard)X
+1193(query)X
+1406(language.)X
+1766(The)X
+1921(TCL-based)X
+2312(server)X
+2539(process)X
+2810(allows)X
+3049(a)X
+3115(certain)X
+3364(ease)X
+3533(of)X
+3630(use)X
+3767(which)X
+3993(would)X
+4223(be)X
+555 1113(enhanced)N
+882(with)X
+1047(a)X
+1106(more)X
+1294(user-friendly)X
+1732(interface)X
+2037(\()X
+2 f
+2064(e.g.)X
+1 f
+2203(a)X
+2261(windows)X
+2572(based)X
+2777(query-by-form)X
+3272(application\),)X
+3697(for)X
+3813(which)X
+4031(we)X
+4147(have)X
+555 1203(a)N
+620(working)X
+916(prototype.)X
+1292(When)X
+1513(accesses)X
+1815(do)X
+1924(not)X
+2055(require)X
+2312(sophisticated)X
+2758(query)X
+2969(processing,)X
+3360(the)X
+3486(TCL)X
+3665(interface)X
+3975(is)X
+4056(an)X
+4160(ade-)X
+555 1293(quate)N
+756(solution.)X
+1080(What)X
+1281(LIBTP)X
+1529(fails)X
+1693(to)X
+1781(provide)X
+2052(in)X
+2140(functionality,)X
+2595(it)X
+2665(makes)X
+2896(up)X
+3002(for)X
+3122(in)X
+3210(performance)X
+3643(and)X
+3785(\257exibility.)X
+4161(Any)X
+555 1383(application)N
+931(may)X
+1089(make)X
+1283(use)X
+1410(of)X
+1497(its)X
+1592(record)X
+1818(interface)X
+2120(or)X
+2207(the)X
+2325(more)X
+2510(primitive)X
+2823(log,)X
+2965(lock,)X
+3143(and)X
+3279(buffer)X
+3496(calls.)X
+755 1506(Future)N
+987(work)X
+1175(will)X
+1322(focus)X
+1519(on)X
+1621(overcoming)X
+2026(some)X
+2217(of)X
+2306(the)X
+2426(areas)X
+2614(in)X
+2698(which)X
+2916(LIBTP)X
+3160(is)X
+3235(currently)X
+3547(de\256cient)X
+3845(and)X
+3983(extending)X
+555 1596(its)N
+652(transaction)X
+1026(model.)X
+1288(The)X
+1435(addition)X
+1719(of)X
+1808(an)X
+1905(SQL)X
+2077(parser)X
+2295(and)X
+2432(forms)X
+2640(front)X
+2817(end)X
+2954(will)X
+3099(improve)X
+3387(the)X
+3506(system's)X
+3807(ease)X
+3967(of)X
+4055(use)X
+4183(and)X
+555 1686(make)N
+750(it)X
+815(more)X
+1001(competitive)X
+1400(with)X
+1563(commercial)X
+1963(systems.)X
+2277(In)X
+2365(the)X
+2484(long)X
+2647(term,)X
+2835(we)X
+2950(would)X
+3170(like)X
+3310(to)X
+3392(add)X
+3528(generalized)X
+3919(hierarchical)X
+555 1776(locking,)N
+836(nested)X
+1062(transactions,)X
+1486(parallel)X
+1748(transactions,)X
+2171(passing)X
+2431(of)X
+2518(transactions)X
+2921(between)X
+3209(processes,)X
+3557(and)X
+3693(distributed)X
+4055(commit)X
+555 1866(handling.)N
+900(In)X
+992(the)X
+1115(short)X
+1300(term,)X
+1492(the)X
+1614(next)X
+1776(step)X
+1929(is)X
+2006(to)X
+2092(integrate)X
+2397(LIBTP)X
+2643(with)X
+2809(the)X
+2931(most)X
+3110(recent)X
+3331(release)X
+3579(of)X
+3670(the)X
+3792(database)X
+4093(access)X
+555 1956(routines)N
+833(and)X
+969(make)X
+1163(it)X
+1227(freely)X
+1435(available)X
+1745(via)X
+1863(anonymous)X
+2252(ftp.)X
+3 f
+555 2142(7.)N
+655(Acknowledgements)X
+1 f
+755 2265(We)N
+888(would)X
+1109(like)X
+1250(to)X
+1332(thank)X
+1530(John)X
+1701(Wilkes)X
+1948(and)X
+2084(Carl)X
+2242(Staelin)X
+2484(of)X
+2571(Hewlett-Packard)X
+3131(Laboratories)X
+3557(and)X
+3693(Jon)X
+3824(Krueger.)X
+4148(John)X
+555 2355(and)N
+694(Carl)X
+855(provided)X
+1162(us)X
+1255(with)X
+1419(an)X
+1517(extra)X
+1700(disk)X
+1855(for)X
+1971(the)X
+2091(HP)X
+2215(testbed)X
+2464(less)X
+2606(than)X
+2766(24)X
+2868(hours)X
+3068(after)X
+3238(we)X
+3354(requested)X
+3684(it.)X
+3770(Jon)X
+3903(spent)X
+4094(count-)X
+555 2445(less)N
+699(hours)X
+901(helping)X
+1164(us)X
+1258(understand)X
+1633(the)X
+1754(intricacies)X
+2107(of)X
+2197(commercial)X
+2599(database)X
+2899(products)X
+3198(and)X
+3337(their)X
+3507(behavior)X
+3811(under)X
+4017(a)X
+4076(variety)X
+555 2535(of)N
+642(system)X
+884(con\256gurations.)X
+3 f
+555 2721(8.)N
+655(References)X
+1 f
+555 2901([ANDR89])N
+942(Andrade,)X
+1265(J.,)X
+1361(Carges,)X
+1629(M.,)X
+1765(Kovach,)X
+2060(K.,)X
+2183(``Building)X
+2541(an)X
+2642(On-Line)X
+2939(Transaction)X
+3343(Processing)X
+3715(System)X
+3975(On)X
+4098(UNIX)X
+727 2991(System)N
+982(V'',)X
+2 f
+1134(CommUNIXations)X
+1 f
+1725(,)X
+1765 0.2188(November/December)AX
+2477(1989.)X
+555 3171([BAY77])N
+878(Bayer,)X
+1110(R.,)X
+1223(Schkolnick,)X
+1623(M.,)X
+1754(``Concurrency)X
+2243(of)X
+2330(Operations)X
+2702(on)X
+2802(B-Trees'',)X
+2 f
+3155(Acta)X
+3322(Informatica)X
+1 f
+3700(,)X
+3740(1977.)X
+555 3351([BERN80])N
+936(Bernstein,)X
+1297(P.,)X
+1415(Goodman,)X
+1785(N.,)X
+1917(``Timestamp)X
+2365(Based)X
+2595(Algorithms)X
+2992(for)X
+3119(Concurrency)X
+3567(Control)X
+3844(in)X
+3939(Distributed)X
+727 3441(Database)N
+1042(Systems'',)X
+2 f
+1402(Proceedings)X
+1823(6th)X
+1945(International)X
+2387(Conference)X
+2777(on)X
+2877(Very)X
+3049(Large)X
+3260(Data)X
+3440(Bases)X
+1 f
+3627(,)X
+3667(October)X
+3946(1980.)X
+555 3621([BSD91])N
+864(DB\(3\),)X
+2 f
+1109(4.4BSD)X
+1376(Unix)X
+1552(Programmer's)X
+2044(Manual)X
+2313(Reference)X
+2655(Guide)X
+1 f
+2851(,)X
+2891(University)X
+3249(of)X
+3336(California,)X
+3701(Berkeley,)X
+4031(1991.)X
+555 3801([CATT91])N
+923(Cattell,)X
+1181(R.G.G.,)X
+1455(``An)X
+1632(Engineering)X
+2049(Database)X
+2369(Benchmark'',)X
+2 f
+2838(The)X
+2983(Benchmark)X
+3373(Handbook)X
+3731(for)X
+3848(Database)X
+4179(and)X
+727 3891(Transaction)N
+1133(Processing)X
+1509(Systems)X
+1 f
+1763(,)X
+1803(J.)X
+1874(Gray,)X
+2075(editor,)X
+2302(Morgan)X
+2576(Kaufman)X
+2895(1991.)X
+555 4071([CHEN91])N
+929(Cheng,)X
+1180(E.,)X
+1291(Chang,)X
+1542(E.,)X
+1653(Klein,)X
+1872(J.,)X
+1964(Lee,)X
+2126(D.,)X
+2245(Lu,)X
+2375(E.,)X
+2485(Lutgardo,)X
+2820(A.,)X
+2939(Obermarck,)X
+3342(R.,)X
+3456(``An)X
+3629(Open)X
+3824(and)X
+3961(Extensible)X
+727 4161(Event-Based)N
+1157(Transaction)X
+1556(Manager'',)X
+2 f
+1936(Proceedings)X
+2357(1991)X
+2537(Summer)X
+2820(Usenix)X
+1 f
+3043(,)X
+3083(Nashville,)X
+3430(TN,)X
+3577(June)X
+3744(1991.)X
+555 4341([CHOU85])N
+943(Chou,)X
+1163(H.,)X
+1288(DeWitt,)X
+1570(D.,)X
+1694(``An)X
+1872(Evaluation)X
+2245(of)X
+2338(Buffer)X
+2574(Management)X
+3019(Strategies)X
+3361(for)X
+3481(Relational)X
+3836(Database)X
+4157(Sys-)X
+727 4431(tems'',)N
+2 f
+972(Proceedings)X
+1393(of)X
+1475(the)X
+1593(11th)X
+1755(International)X
+2197(Conference)X
+2587(on)X
+2687(Very)X
+2859(Large)X
+3070(Databases)X
+1 f
+3408(,)X
+3448(1985.)X
+555 4611([DEWI84])N
+925(DeWitt,)X
+1207(D.,)X
+1331(Katz,)X
+1529(R.,)X
+1648(Olken,)X
+1890(F.,)X
+2000(Shapiro,)X
+2295(L.,)X
+2410(Stonebraker,)X
+2843(M.,)X
+2979(Wood,)X
+3220(D.,)X
+3343(``Implementation)X
+3929(Techniques)X
+727 4701(for)N
+841(Main)X
+1030(Memory)X
+1326(Database)X
+1641(Systems'',)X
+2 f
+2001(Proceedings)X
+2422(of)X
+2504(SIGMOD)X
+1 f
+2812(,)X
+2852(pp.)X
+2972(1-8,)X
+3119(June)X
+3286(1984.)X
+555 4881([GRAY76])N
+944(Gray,)X
+1153(J.,)X
+1252(Lorie,)X
+1474(R.,)X
+1595(Putzolu,)X
+1887(F.,)X
+1999(and)X
+2143(Traiger,)X
+2428(I.,)X
+2522(``Granularity)X
+2973(of)X
+3067(locks)X
+3263(and)X
+3406(degrees)X
+3679(of)X
+3773(consistency)X
+4174(in)X
+4263(a)X
+727 4971(large)N
+909(shared)X
+1140(data)X
+1295(base'',)X
+2 f
+1533(Modeling)X
+1861(in)X
+1944(Data)X
+2125(Base)X
+2301(Management)X
+2740(Systems)X
+1 f
+2994(,)X
+3034(Elsevier)X
+3317(North)X
+3524(Holland,)X
+3822(New)X
+3994(York,)X
+4199(pp.)X
+727 5061(365-394.)N
+555 5241([HAER83])N
+931(Haerder,)X
+1235(T.)X
+1348(Reuter,)X
+1606(A.)X
+1728(``Principles)X
+2126(of)X
+2217(Transaction-Oriented)X
+2928(Database)X
+3246(Recovery'',)X
+2 f
+3651(Computing)X
+4029(Surveys)X
+1 f
+4279(,)X
+727 5331(15\(4\);)N
+943(237-318,)X
+1250(1983.)X
+555 5511([KUNG81])N
+943(Kung,)X
+1162(H.)X
+1261(T.,)X
+1371(Richardson,)X
+1777(J.,)X
+1869(``On)X
+2042(Optimistic)X
+2400(Methods)X
+2701(for)X
+2816(Concurrency)X
+3252(Control'',)X
+2 f
+3591(ACM)X
+3781(Transactions)X
+4219(on)X
+727 5601(Database)N
+1054(Systems)X
+1 f
+1328(6\(2\);)X
+1504(213-226,)X
+1811(1981.)X
+
+17 p
+%%Page: 17 17
+10 s 10 xH 0 xS 1 f
+3 f
+1 f
+555 630([LEHM81])N
+939(Lehman,)X
+1245(P.,)X
+1352(Yao,)X
+1529(S.,)X
+1636(``Ef\256cient)X
+1989(Locking)X
+2279(for)X
+2396(Concurrent)X
+2780(Operations)X
+3155(on)X
+3258(B-trees'',)X
+2 f
+3587(ACM)X
+3779(Transactions)X
+4219(on)X
+727 720(Database)N
+1054(Systems)X
+1 f
+1308(,)X
+1348(6\(4\),)X
+1522(December)X
+1873(1981.)X
+555 900([MOHA91])N
+964(Mohan,)X
+1241(C.,)X
+1364(Pirahesh,)X
+1690(H.,)X
+1818(``ARIES-RRH:)X
+2366(Restricted)X
+2721(Repeating)X
+3076(of)X
+3173(History)X
+3442(in)X
+3533(the)X
+3660(ARIES)X
+3920(Transaction)X
+727 990(Recovery)N
+1055(Method'',)X
+2 f
+1398(Proceedings)X
+1819(7th)X
+1941(International)X
+2383(Conference)X
+2773(on)X
+2873(Data)X
+3053(Engineering)X
+1 f
+3449(,)X
+3489(Kobe,)X
+3703(Japan,)X
+3926(April)X
+4115(1991.)X
+555 1170([NODI90])N
+914(Nodine,)X
+1194(M.,)X
+1328(Zdonik,)X
+1602(S.,)X
+1709(``Cooperative)X
+2178(Transaction)X
+2580(Hierarchies:)X
+2996(A)X
+3077(Transaction)X
+3479(Model)X
+3711(to)X
+3796(Support)X
+4072(Design)X
+727 1260(Applications'',)N
+2 f
+1242(Proceedings)X
+1675(16th)X
+1849(International)X
+2303(Conference)X
+2704(on)X
+2815(Very)X
+2998(Large)X
+3220(Data)X
+3411(Bases)X
+1 f
+3598(,)X
+3649(Brisbane,)X
+3985(Australia,)X
+727 1350(August)N
+978(1990.)X
+555 1530([OUST90])N
+923(Ousterhout,)X
+1324(J.,)X
+1420(``Tcl:)X
+1648(An)X
+1771(Embeddable)X
+2197(Command)X
+2555(Language'',)X
+2 f
+2971(Proceedings)X
+3396(1990)X
+3580(Winter)X
+3822(Usenix)X
+1 f
+4045(,)X
+4089(Wash-)X
+727 1620(ington,)N
+971(D.C.,)X
+1162(January)X
+1432(1990.)X
+555 1800([POSIX91])N
+955(``Unapproved)X
+1441(Draft)X
+1645(for)X
+1773(Realtime)X
+2096(Extension)X
+2450(for)X
+2578(Portable)X
+2879(Operating)X
+3234(Systems'',)X
+3608(Draft)X
+3812(11,)X
+3946(October)X
+4239(7,)X
+727 1890(1991,)N
+927(IEEE)X
+1121(Computer)X
+1461(Society.)X
+555 2070([ROSE91])N
+925(Rosenblum,)X
+1341(M.,)X
+1484(Ousterhout,)X
+1892(J.,)X
+1995(``The)X
+2206(Design)X
+2464(and)X
+2611(Implementation)X
+3149(of)X
+3247(a)X
+3314(Log-Structured)X
+3835(File)X
+3990(System'',)X
+2 f
+727 2160(Proceedings)N
+1148(of)X
+1230(the)X
+1348(13th)X
+1510(Symposium)X
+1895(on)X
+1995(Operating)X
+2344(Systems)X
+2618(Principles)X
+1 f
+2947(,)X
+2987(1991.)X
+555 2340([SELT91])N
+904(Seltzer,)X
+1171(M.,)X
+1306(Stonebraker,)X
+1738(M.,)X
+1873(``Read)X
+2116(Optimized)X
+2478(File)X
+2626(Systems:)X
+2938(A)X
+3020(Performance)X
+3454(Evaluation'',)X
+2 f
+3898(Proceedings)X
+727 2430(7th)N
+849(Annual)X
+1100(International)X
+1542(Conference)X
+1932(on)X
+2032(Data)X
+2212(Engineering)X
+1 f
+2608(,)X
+2648(Kobe,)X
+2862(Japan,)X
+3085(April)X
+3274(1991.)X
+555 2610([SPEC88])N
+907(Spector,)X
+1200(Rausch,)X
+1484(Bruell,)X
+1732(``Camelot:)X
+2107(A)X
+2192(Flexible,)X
+2501(Distributed)X
+2888(Transaction)X
+3294(Processing)X
+3668(System'',)X
+2 f
+4004(Proceed-)X
+727 2700(ings)N
+880(of)X
+962(Spring)X
+1195(COMPCON)X
+1606(1988)X
+1 f
+(,)S
+1806(February)X
+2116(1988.)X
+555 2880([SQL86])N
+862(American)X
+1201(National)X
+1499(Standards)X
+1836(Institute,)X
+2139(``Database)X
+2509(Language)X
+2847(SQL'',)X
+3093(ANSI)X
+3301(X3.135-1986)X
+3747(\(ISO)X
+3924(9075\),)X
+4152(May)X
+727 2970(1986.)N
+555 3150([STON81])N
+919(Stonebraker,)X
+1348(M.,)X
+1480(``Operating)X
+1876(System)X
+2132(Support)X
+2406(for)X
+2520(Database)X
+2835(Management'',)X
+2 f
+3348(Communications)X
+3910(of)X
+3992(the)X
+4110(ACM)X
+1 f
+4279(,)X
+727 3240(1981.)N
+555 3420([SULL92])N
+925(Sullivan,)X
+1247(M.,)X
+1394(Olson,)X
+1641(M.,)X
+1788(``An)X
+1976(Index)X
+2195(Implementation)X
+2737(Supporting)X
+3127(Fast)X
+3295(Recovery)X
+3638(for)X
+3767(the)X
+3900(POSTGRES)X
+727 3510(Storage)N
+1014(System'',)X
+1365(to)X
+1469(appear)X
+1726(in)X
+2 f
+1830(Proceedings)X
+2272(8th)X
+2415(Annual)X
+2687(International)X
+3150(Conference)X
+3561(on)X
+3682(Data)X
+3883(Engineering)X
+1 f
+4279(,)X
+727 3600(Tempe,)N
+990(Arizona,)X
+1289(February)X
+1599(1992.)X
+555 3780([TPCB90])N
+914(Transaction)X
+1319(Processing)X
+1692(Performance)X
+2129(Council,)X
+2428(``TPC)X
+2653(Benchmark)X
+3048(B'',)X
+3200(Standard)X
+3510(Speci\256cation,)X
+3973(Waterside)X
+727 3870(Associates,)N
+1110(Fremont,)X
+1421(CA.,)X
+1592(1990.)X
+555 4050([YOUN91])N
+947(Young,)X
+1211(M.)X
+1328(W.,)X
+1470(Thompson,)X
+1858(D.)X
+1962(S.,)X
+2072(Jaffe,)X
+2274(E.,)X
+2388(``A)X
+2525(Modular)X
+2826(Architecture)X
+3253(for)X
+3372(Distributed)X
+3757(Transaction)X
+4161(Pro-)X
+727 4140(cessing'',)N
+2 f
+1057(Proceedings)X
+1478(1991)X
+1658(Winter)X
+1896(Usenix)X
+1 f
+2119(,)X
+2159(Dallas,)X
+2404(TX,)X
+2551(January)X
+2821(1991.)X
+3 f
+755 4263(Margo)N
+1008(I.)X
+1080(Seltzer)X
+1 f
+1338(is)X
+1411(a)X
+1467(Ph.D.)X
+1669(student)X
+1920(in)X
+2002(the)X
+2120(Department)X
+2519(of)X
+2606(Electrical)X
+2934(Engineering)X
+3346(and)X
+3482(Computer)X
+3822(Sciences)X
+4123(at)X
+4201(the)X
+555 4353(University)N
+919(of)X
+1012(California,)X
+1383(Berkeley.)X
+1739(Her)X
+1886(research)X
+2181(interests)X
+2474(include)X
+2735(\256le)X
+2862(systems,)X
+3160(databases,)X
+3513(and)X
+3654(transaction)X
+4031(process-)X
+555 4443(ing)N
+686(systems.)X
+1008(She)X
+1157(spent)X
+1355(several)X
+1612(years)X
+1811(working)X
+2107(at)X
+2194(startup)X
+2441(companies)X
+2813(designing)X
+3153(and)X
+3298(implementing)X
+3771(\256le)X
+3902(systems)X
+4183(and)X
+555 4533(transaction)N
+929(processing)X
+1294(software)X
+1592(and)X
+1729(designing)X
+2061(microprocessors.)X
+2648(Ms.)X
+2791(Seltzer)X
+3035(received)X
+3329(her)X
+3453(AB)X
+3585(in)X
+3668(Applied)X
+3947(Mathemat-)X
+555 4623(ics)N
+664(from)X
+840 0.1953(Harvard/Radcliffe)AX
+1445(College)X
+1714(in)X
+1796(1983.)X
+755 4746(In)N
+845(her)X
+971(spare)X
+1163(time,)X
+1347(Margo)X
+1583(can)X
+1717(usually)X
+1970(be)X
+2068(found)X
+2277(preparing)X
+2607(massive)X
+2887(quantities)X
+3220(of)X
+3309(food)X
+3478(for)X
+3594(hungry)X
+3843(hordes,)X
+4099(study-)X
+555 4836(ing)N
+677(Japanese,)X
+1003(or)X
+1090(playing)X
+1350(soccer)X
+1576(with)X
+1738(an)X
+1834(exciting)X
+2112(Bay)X
+2261(Area)X
+2438(Women's)X
+2770(Soccer)X
+3009(team,)X
+3205(the)X
+3323(Berkeley)X
+3633(Bruisers.)X
+3 f
+755 5049(Michael)N
+1056(A.)X
+1159(Olson)X
+1 f
+1383(is)X
+1461(a)X
+1522(Master's)X
+1828(student)X
+2084(in)X
+2170(the)X
+2292(Department)X
+2695(of)X
+2786(Electrical)X
+3118(Engineering)X
+3534(and)X
+3674(Computer)X
+4018(Sciences)X
+555 5139(at)N
+645(the)X
+774(University)X
+1143(of)X
+1241(California,)X
+1617(Berkeley.)X
+1978(His)X
+2120(primary)X
+2405(interests)X
+2703(are)X
+2833(database)X
+3141(systems)X
+3425(and)X
+3572(mass)X
+3763(storage)X
+4026(systems.)X
+555 5229(Mike)N
+759(spent)X
+963(two)X
+1118(years)X
+1323(working)X
+1625(for)X
+1754(a)X
+1825(commercial)X
+2239(database)X
+2551(system)X
+2808(vendor)X
+3066(before)X
+3307(joining)X
+3567(the)X
+3699(Postgres)X
+4004(Research)X
+555 5319(Group)N
+780(at)X
+858(Berkeley)X
+1168(in)X
+1250(1988.)X
+1470(He)X
+1584(received)X
+1877(his)X
+1990(B.A.)X
+2161(in)X
+2243(Computer)X
+2583(Science)X
+2853(from)X
+3029(Berkeley)X
+3339(in)X
+3421(May)X
+3588(1991.)X
+755 5442(Mike)N
+945(only)X
+1108(recently)X
+1388(transferred)X
+1758(into)X
+1903(Sin)X
+2030(City,)X
+2208(but)X
+2330(is)X
+2403(rapidly)X
+2650(adopting)X
+2950(local)X
+3126(customs)X
+3408(and)X
+3544(coloration.)X
+3929(In)X
+4016(his)X
+4129(spare)X
+555 5532(time,)N
+742(he)X
+843(organizes)X
+1176(informal)X
+1477(Friday)X
+1711(afternoon)X
+2043(study)X
+2240(groups)X
+2482(to)X
+2568(discuss)X
+2823(recent)X
+3044(technical)X
+3358(and)X
+3498(economic)X
+3834(developments.)X
+555 5622(Among)N
+815(his)X
+928(hobbies)X
+1197(are)X
+1316(Charles)X
+1581(Dickens,)X
+1884(Red)X
+2033(Rock,)X
+2242(and)X
+2378(speaking)X
+2683(Dutch)X
+2899(to)X
+2981(anyone)X
+3233(who)X
+3391(will)X
+3535(permit)X
+3764(it.)X
+
+17 p
+%%Trailer
+xt
+
+xs
+
diff --git a/libdb/docs/ref/refs/refs.html b/libdb/docs/ref/refs/refs.html
new file mode 100644
index 0000000..b72666e
--- /dev/null
+++ b/libdb/docs/ref/refs/refs.html
@@ -0,0 +1,76 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Additional references</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Additional References</dl></h3></td>
+<td align=right><a href="../../ref/distrib/layout.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Additional references</h1>
+<p>For more information on Berkeley DB or on database systems theory in general,
+we recommend the following sources:
+<h3>Technical Papers on Berkeley DB</h3>
+<p>These papers have appeared in refereed conference proceedings, and are
+subject to copyrights held by the conference organizers and the authors
+of the papers. Sleepycat Software makes them available here as a courtesy
+with the permission of the copyright holders.
+<p><dl compact>
+<p><dt><i>Berkeley DB</i> (<a href="bdb_usenix.html">HTML</a>, <a href="bdb_usenix.ps">Postscript</a>)<dd>Michael Olson, Keith Bostic, and Margo Seltzer, Proceedings of the 1999
+Summer Usenix Technical Conference, Monterey, California, June 1999. This
+paper describes recent commercial releases of Berkeley DB, its most important
+features, the history of the software, and Sleepycat's Open Source
+licensing policies.
+<p><dt><i>Challenges in Embedded Database System Administration</i>
+(<a href="embedded.html">HTML</a>)<dd>Margo Seltzer and Michael Olson, First Workshop on Embedded Systems,
+Cambridge, Massachusetts, March 1999. This paper describes the challenges
+that face embedded systems developers, and how Berkeley DB has been designed to
+address them.
+<p><dt><i>LIBTP: Portable Modular Transactions for UNIX</i>
+(<a href="libtp_usenix.ps">Postscript</a>)<dd>Margo Seltzer and Michael Olson, USENIX Conference Proceedings, Winter
+1992. This paper describes an early prototype of the transactional system
+for Berkeley DB.
+<p><dt><i>A New Hashing Package for UNIX</i>
+(<a href="hash_usenix.ps">Postscript</a>)<dd>Margo Seltzer and Oz Yigit, USENIX Conference Proceedings, Winter 1991.
+This paper describes the Extended Linear Hashing techniques used by Berkeley DB.
+</dl>
+<h3>Background on Berkeley DB Features</h3>
+<p>These papers, although not specific to Berkeley DB, give a good overview of the
+way different Berkeley DB features were implemented.
+<p><dl compact>
+<p><dt><i>Operating System Support for Database Management</i><dd>Michael Stonebraker, Communications of the ACM 24(7), 1981, pp. 412-418.
+<p><dt><i>Dynamic Hash Tables</i><dd>Per-Ake Larson, Communications of the ACM, April 1988.
+<p><dt><i>Linear Hashing: A New Tool for File and Table Addressing</i><dd><a href="witold.html">Witold Litwin</a>, Proceedings of the 6th International
+Conference on Very Large Databases (VLDB), 1980
+<p><dt><i>The Ubiquitous B-tree</i><dd>Douglas Comer, ACM Comput. Surv. 11, 2 (June 1979), pp. 121-138.
+<p><dt><i>Prefix B-trees</i><dd>Bayer and Unterauer, ACM Transactions on Database Systems, Vol. 2, 1
+(March 1977), pp. 11-26.
+<p><dt><i>The Art of Computer Programming Vol. 3: Sorting and Searching</i><dd>D.E. Knuth, 1968, pp. 471-480.
+<p><dt><i>Document Processing in a Relational Database System</i><dd>Michael Stonebraker, Heidi Stettner, Joseph Kalash, Antonin Guttman,
+Nadene Lynn, Memorandum No. UCB/ERL M82/32, May 1982.
+</dl>
+<h3>Database Systems Theory</h3>
+<p>These publications are standard reference works on the design and
+implementation of database systems. Berkeley DB uses many of the ideas they
+describe.
+<p><dl compact>
+<p><dt><i>Transaction Processing Concepts and Techniques</i><dd>by Jim Gray and Andreas Reuter, Morgan Kaufmann Publishers.
+We recommend chapters 1, 4 (skip 4.6, 4.7, 4.9, 4.10 and 4.11),
+7, 9, 10.3, and 10.4.
+<p><dt><i>An Introduction to Database Systems, Volume 1</i><dd>by C.J. Date, Addison Wesley Longman Publishers.
+In the 5th Edition, we recommend chapters 1, 2, 3, 16 and 17.
+<p><dt><i>Concurrency Control and Recovery in Database Systems</i><dd>by Bernstein, Goodman, Hadzilaco. Currently out of print, but available
+from <a href="http://research.microsoft.com/pubs/ccontrol/">http://research.microsoft.com/pubs/ccontrol/</a>.
+</dl>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/distrib/layout.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/refs/witold.html b/libdb/docs/ref/refs/witold.html
new file mode 100644
index 0000000..9b72c76
--- /dev/null
+++ b/libdb/docs/ref/refs/witold.html
@@ -0,0 +1,24 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Witold Litwin</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Additional References</dl></h3></td>
+<td align=right><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Witold Litwin</h1>
+<p>Witold is a hell of a guy to take you on a late-night high-speed car
+chase up the mountains of Austria in search of very green wine.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/rep/app.html b/libdb/docs/ref/rep/app.html
new file mode 100644
index 0000000..1397a71
--- /dev/null
+++ b/libdb/docs/ref/rep/app.html
@@ -0,0 +1,96 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Building replicated applications</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/pri.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/comm.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Building replicated applications</h1>
+<p>The simplest way to build a replicated Berkeley DB application is to first
+build (and debug!) the transactional version of the same application.
+Then, add a thin replication layer to the application. All highly available
+applications use the following additional four Berkeley DB methods:
+<a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a>, <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a>, <a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a> and
+<a href="../../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a> and may also use the configuration method
+<a href="../../api_c/rep_limit.html">DB_ENV-&gt;set_rep_limit</a>:
+<p><dl compact>
+<p><dt><a href="../../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a><dd>The <a href="../../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a> method configures the replication system's
+communications infrastructure.
+<p><dt><a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a><dd>The <a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a> method configures (or reconfigures) an existing database
+environment to be a replication master or client.
+<p><dt><a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a><dd>The <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> method is used to process incoming messages from other
+environments in the replication group. For clients, it is responsible
+for accepting log records and updating the local databases based on
+messages from the master. For both the master and the clients, it is
+responsible for handling administrative functions (for example, the
+protocol for dealing with lost messages), and permitting new clients to
+join an active replication group.
+<p><dt><a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a><dd>The <a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a> method causes the replication group to elect a new
+master; it is called whenever contact with the master is lost.
+<p><dt><a href="../../api_c/rep_limit.html">DB_ENV-&gt;set_rep_limit</a><dd>The <a href="../../api_c/rep_limit.html">DB_ENV-&gt;set_rep_limit</a> imposes an upper bound on the amount of data
+that will be sent in response to a single call to <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a>.
+</dl>
+<p>To add replication to a Berkeley DB application, application initialization
+must be changed and the application's communications infrastructure must
+be written. The application initialization changes are relatively
+simple, but the communications infrastructure code can be complex.
+<p>During application initialization, the application performs two
+additional tasks: first, it must provide Berkeley DB information about its
+communications infrastructure, and second, it must start the Berkeley DB
+replication system. Generally, a replicated application will do normal
+Berkeley DB recovery and configuration, exactly like any other transactional
+application. Then, once the database environment has been opened, it
+will call the <a href="../../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a> method to configure Berkeley DB for replication,
+and then will call the <a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a> method to join or create the
+replication group.
+<p>When calling <a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a>, the application has two choices:
+specifically configure the master for the replication group, or,
+alternatively, configure all group members as clients and then call an
+election, letting the clients select the master from among themselves.
+Either is correct, and the choice is entirely up to the application.
+The result of calling <a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a> is usually the discovery of a
+master, or the declaration of the local environment as the master. If
+a master has not been discovered after a reasonable amount of time, the
+application should call <a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a> to call for an election.
+<p>In the case of multiple processes accessing a replicated environment,
+all of the threads of control expecting to modify databases in the
+environment or process replication messages must call the
+<a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a> method. Note that not all processes running in replicated
+environments need to call <a href="../../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a> or <a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a>.
+Read-only processes running in a master environment do not need to be
+configured for replication in any way. Processes running in a client
+environment are read-only by definition, and so do not need to be
+configured for replication either (although, in the case of clients that
+may become masters, it is usually simplest to configure for replication
+on process startup rather than trying to reconfigure when the client
+becomes a master). Obviously, at least one thread of control on each
+client must be configured for replication as messages must be passed
+between the master and the client.
+<p>Databases are generally opened read-write on both clients and masters
+in order to simplify upgrading replication clients to be masters. (If
+databases are opened read-only on clients, and the client is then
+upgraded to be the master, the client would have to close and reopen
+all of its databases in order to support database update queries.)
+However, even though the database is opened read-write on the client,
+any attempt to update it will result in an error until the client is
+reconfigured as a master. No databases can be opened on clients before
+calling <a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a>, and attempting to do so will result in an
+error.
+<p>There are no additional interface calls required to shut down a database
+environment participating in a replication group. The application
+should shut down the environment in the usual manner, by calling the
+<a href="../../api_c/env_close.html">DB_ENV-&gt;close</a> method.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/pri.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/comm.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/rep/comm.html b/libdb/docs/ref/rep/comm.html
new file mode 100644
index 0000000..dd3864c
--- /dev/null
+++ b/libdb/docs/ref/rep/comm.html
@@ -0,0 +1,79 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Building the communications infrastructure</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/app.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/newsite.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Building the communications infrastructure</h1>
+<p>The replication support in an application is typically written with one
+or more threads of control looping on one or more communication
+channels, receiving and sending messages. These threads accept messages
+from remote environments for the local database environment, and accept
+messages from the local environment for remote environments. Messages
+from remote environments are passed to the local database environment
+using the <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> method. Messages from the local environment
+are passed to the application for transmission using the callback
+interface specified to the <a href="../../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a> method.
+<p>Processes establish communication channels by calling the
+<a href="../../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a> method, regardless of whether they are running in client
+or server environments. This method specifies the <b>send</b>
+interface, a callback interface used by Berkeley DB for sending messages to
+other database environments in the replication group. The <b>send</b>
+interface takes an environment ID and two opaque data objects. It is
+the responsibility of the <b>send</b> interface to transmit the
+information in the two data objects to the database environment
+corresponding to the ID, with the receiving application then calling
+the <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> method to process the message.
+<p>The details of the transport mechanism are left entirely to the
+application; the only requirement is that the data buffer and size of
+each of the control and rec <a href="../../api_c/dbt_class.html">DBT</a>s passed to the <b>send</b>
+function on the sending site be faithfully copied and delivered to the
+receiving site by means of a call to <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> with
+corresponding arguments. The <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> method is free-threaded; it
+is safe to deliver any number of messages simultaneously, and from any
+arbitrary thread or process in the Berkeley DB environment.
+<p>There are a number of informational returns from the
+<a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> method:
+<p><dl compact>
+<p><dt><a href="../../api_c/rep_message.html#DB_REP_DUPMASTER">DB_REP_DUPMASTER</a><dd>When <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> returns <a href="../../api_c/rep_message.html#DB_REP_DUPMASTER">DB_REP_DUPMASTER</a>, it means that
+another database environment in the replication group also believes
+itself to be the master. The application should complete all active
+transactions, close all open database handles, reconfigure itself as
+a client using the <a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a> method, and then call for an election by
+calling the <a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a> method.
+<p><dt><a href="../../api_c/rep_message.html#DB_REP_HOLDELECTION">DB_REP_HOLDELECTION</a><dd>When <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> returns <a href="../../api_c/rep_message.html#DB_REP_HOLDELECTION">DB_REP_HOLDELECTION</a>, it means
+that another database environment in the replication group has called
+for an election. The application should call the <a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a> method.
+<p><dt><a href="../../api_c/rep_message.html#DB_REP_NEWMASTER">DB_REP_NEWMASTER</a><dd>When <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> returns <a href="../../api_c/rep_message.html#DB_REP_NEWMASTER">DB_REP_NEWMASTER</a>, it means that
+a new master has been elected. The call will also return the local
+environment's ID for that master. If the ID of the master has changed,
+the application may need to reconfigure itself (for example, to redirect
+update queries to the new master rather then the old one). If the new
+master is the local environment, then the application must call the
+<a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a> method, and reconfigure the supporting Berkeley DB library as a
+replication master.
+<p><dt><a href="../../api_c/rep_message.html#DB_REP_NEWSITE">DB_REP_NEWSITE</a><dd>When <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> returns <a href="../../api_c/rep_message.html#DB_REP_NEWSITE">DB_REP_NEWSITE</a>, it means that
+a message from a previously unknown member of the replication group has
+been received. The application should reconfigure itself as necessary
+so it is able to send messages to this site.
+<p><dt><a href="../../api_c/rep_message.html#DB_REP_OUTDATED">DB_REP_OUTDATED</a><dd>When <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> returns <a href="../../api_c/rep_message.html#DB_REP_OUTDATED">DB_REP_OUTDATED</a>, it means that
+the environment has been partitioned from the master for too long a
+time, and the master no longer has the necessary log files to update
+the local client. The application should shut down, and the client
+should be reinitialized (see <a href="../../ref/rep/init.html">Initializing a new site</a> for more information).
+</dl>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/app.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/newsite.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/rep/elect.html b/libdb/docs/ref/rep/elect.html
new file mode 100644
index 0000000..7237c97
--- /dev/null
+++ b/libdb/docs/ref/rep/elect.html
@@ -0,0 +1,97 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Elections</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/init.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/logonly.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Elections</h1>
+<p>Berkeley DB never initiates elections, that is the responsibility of the
+application. It is not dangerous to hold an election, as the Berkeley DB
+election process ensures there is never more than a single master
+environment. Clients should initiate an election whenever they lose
+contact with the master environment, whenever they see a return of
+<a href="../../api_c/rep_message.html#DB_REP_HOLDELECTION">DB_REP_HOLDELECTION</a> from the <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> method, or when, for
+whatever reason, they do not know who the master is. It is not
+necessary for applications to immediately hold elections when they
+start, as any existing master will be quickly discovered after calling
+<a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a>. If no master has been found after a short wait
+period, then the application should call for an election.
+<p>For a client to become the master, the client must win an election. To
+win an election, the replication group must currently have no master,
+the client must have the highest priority of the database environments
+participating in the election, and at least (N / 2 + 1) of the members
+of the replication group must participate in the election. In the case
+of multiple database environments with equal priorities, the environment
+with the most recent log records will win.
+<p>It is dangerous to configure more than one master environment using the
+<a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a> method, and applications should be careful not to do so.
+Applications should only configure themselves as the master environment
+if they are the only possible master, or if they have won an election.
+An application can only know it has won an election if the
+<a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a> method returns success and the local database environment's
+ID as the new master environment ID, or if the <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> method
+returns <a href="../../api_c/rep_message.html#DB_REP_NEWMASTER">DB_REP_NEWMASTER</a> and the local database environment's
+ID as the new master environment ID.
+<p>To add a database environment to the replication group with the intent
+of it becoming the master, first add it as a client. Since it may be
+out-of-date with respect to the current master, allow it to update
+itself from the current master. Then, shut the current master down.
+Presumably, the added client will win the subsequent election. If the
+client does not win the election, it is likely that it was not given
+sufficient time to update itself with respect to the current master.
+<p>If a client is unable to find a master or win an election, it means that
+the network has been partitioned and there are not enough environments
+participating in the election for one of the participants to win. In
+this case, the application should repeatedly call <a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a> and
+<a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a>, alternating between attempting to discover an
+existing master, and holding an election to declare a new one. In
+desperate circumstances, an application could simply declare itself the
+master by calling <a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a>, or by reducing the number of
+participants required to win an election until the election is won.
+Neither of these solutions is recommended: in the case of a network
+partition, either of these choices can result in there being two masters
+in one replication group, and the databases in the environment might
+irretrievably diverge as they are modified in different ways by the
+masters.
+<p>It is possible for a less-preferred database environment to win an
+election if a number of systems crash at the same time. Because an
+election winner is declared as soon as enough environments participate
+in the election, the environment on a slow booting but well-connected
+machine might lose to an environment on a badly connected but faster
+booting machine. In the case of a number of environments crashing at
+the same time (for example, a set of replicated servers in a single
+machine room), applications should bring the database environments on
+line as clients initially (which will allow them to process read queries
+immediately), and then hold an election after sufficient time has passed
+for the slower booting machines to catch up.
+<p>If, for any reason, a less-preferred database environment becomes the
+master, it is possible to switch masters in a replicated environment,
+although it is not a simple operation. For example, the preferred
+master crashes, and one of the replication group clients becomes the
+group master. In order to restore the preferred master to master
+status, take the following steps:
+<p><ol>
+<p><li>The preferred master should reboot and re-join the replication group
+as a client.
+<li>Once the preferred master has caught up with the replication group, the
+application on the current master should complete all active
+transactions, close all open database handles, and reconfigure itself
+as a client using the <a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a> method.
+<li>Then, the current or preferred master should call for an election using
+the <a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a> method.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/init.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/logonly.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/rep/ex.html b/libdb/docs/ref/rep/ex.html
new file mode 100644
index 0000000..6236744
--- /dev/null
+++ b/libdb/docs/ref/rep/ex.html
@@ -0,0 +1,65 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Ex_repquote: a replication example</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/ex_comm.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Ex_repquote: a replication example</h1>
+<p>Ex_repquote, found in the <b>examples_c/ex_repquote</b> subdirectory
+of the Berkeley DB distribution, is a simple but complete demonstration of a
+replicated application. The application is a mock stock ticker. The
+master accepts a stock symbol and an integer value as input, stores this
+information into a replicated database; the clients display the contents
+of the database every few seconds.
+<p>The ex_repquote application's communication infrastructure is based on
+TCP/IP sockets, and uses POSIX 1003.1 style networking/socket support.
+As a result, it is not as portable as the Berkeley DB library itself. The
+Makefile created by the standard UNIX configuration will build the
+ex_repquote application on most platforms. Enter "make ex_repquote" to
+attempt to build it.
+<p>The synopsis for ex_repquote is as follows:
+<pre>ex_repquote [<b>-MC</b>] [<b>-h home</b>] [<b>-m host:port</b>] [<b>-o host:port</b>] [<b>-n sites</b>] [<b>-p priority</b>]</pre>
+<p>The options to ex_repquote are as follows:
+<p><dl compact>
+<p><dt><b>-M</b><dd>Configure this process as a master.
+<p><dt><b>-C</b><dd>Configure this process as a client.
+<p><dt><b>-h</b><dd>Specify a home directory for the database environment; by
+default, the current working directory is used.
+<p><dt><b>-m</b><dd>Listen on port "port" of host "host" for incoming connections.
+<p><dt><b>-o</b><dd>Attempt to connect to another member of the replication group which is
+listening on host "host" at port "port". Members of a replication group
+should be able to find all other members of a replication group so long
+as they are in contact with at least one other member of the replication
+group.
+<p><dt><b>-n</b><dd>Specify the total number of sites in the replication group.
+<p><dt><b>-p</b><dd>Set the election priority. See <a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a> for more
+information.
+</dl>
+<p>A typical ex_repquote session begins with a command such as the
+following, to start a master:
+<p><blockquote><pre>ex_repquote -M -p 100 -n 4 -h DIR1 -m localhost:5000</pre></blockquote>
+<p>and several clients:
+<p><blockquote><pre>ex_repquote -C -p 50 -n 4 -h DIR2 -m localhost:5001 -o localhost:5000
+ex_repquote -C -p 10 -n 4 -h DIR3 -m localhost:5002 -o localhost:5000
+ex_repquote -C -p 0 -n 4 -h DIR4 -m localhost:5003 -o localhost:5000</pre></blockquote>
+<p>In this example, the client with home directory DIR4 can never become
+a master (its priority is 0). Both of the other clients can become
+masters, but the one with home directory DIR2 is preferred. Priorities
+are assigned by the application and should reflect the desirability of
+having particular clients take over as master in the case that the
+master fails.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/ex_comm.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/rep/ex_comm.html b/libdb/docs/ref/rep/ex_comm.html
new file mode 100644
index 0000000..4a87c6e
--- /dev/null
+++ b/libdb/docs/ref/rep/ex_comm.html
@@ -0,0 +1,177 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Ex_repquote: a TCP/IP based communication infrastructure</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/ex.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/ex_rq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Ex_repquote: a TCP/IP based communication infrastructure</h1>
+<p>All Berkeley DB replication applications must implement a communication
+infrastructure. The communication infrastructure consists of three
+parts: a way to map environment IDs to particular sites, the functions
+to get and receive messages, and the application architecture that
+supports the particular communication infrastructure used (for example,
+individual threads per communicating site, a shared message handler for
+all sites, a hybrid solution). The communication infrastructure is
+implemented in the file <b>ex_repquote/ex_rq_net.c</b>, and each part
+of that infrastructure is described as follows.
+<p>Ex_repquote maintains a table of environment ID to TCP/IP port mappings.
+This table is stored in the app_private field of the <a href="../../api_c/env_class.html">DB_ENV</a>
+object so it can be accessed by any function that has the database
+environment handle. The table is represented by a machtab_t structure
+which contains a reference to a linked list of member_t's, both of which
+are defined in <b>ex_repquote/ex_rq_net.c</b>. Each member_t contains
+the host and port identification, the environment ID, and a file
+descriptor. The table is maintained by the following interfaces:
+<p><blockquote><pre>int machtab_add(machtab_t *machtab, int fd, u_int32_t hostaddr, int port, int *eidp);
+int machtab_init(machtab_t **machtabp, int priority, int nsites);
+int machtab_getinfo(machtab_t *machtab, int eid, u_int32_t *hostp, int *portp);
+void machtab_parm(machtab_t *machtab, int *nump, int *priorityp, u_int32_t *timeoutp);
+int machtab_rem(machtab_t *machtab, int eid, int lock);
+</pre></blockquote>
+<p>These interfaces are particular to this application and communication
+infrastructure, but provide an indication of the sort of functionality
+that is needed to maintain the application-specific state for a
+TCP/IP-based infrastructure. The goal of the table and its interfaces
+is threefold: First, it must guarantee that given an environment ID,
+the send function can send a message to the appropriate place. Second,
+when given the special environment ID <a href="../../api_c/rep_transport.html#DB_EID_BROADCAST">DB_EID_BROADCAST</a>, the send
+function can send messages to all the machines in the group. Third,
+upon receipt of an incoming message, the receive function can correctly
+identify the sender and pass the appropriate environment ID to the
+<a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> method.
+<p>Mapping a particular environment ID to a specific port is accomplished
+by looping through the linked list until the desired environment ID is
+found. Broadcast communication is implemented by looping through the
+linked list and sending to each member found. Since each port
+communicates with only a single other environment, receipt of a message
+on a particular port precisely identifies the sender.
+<p>The example provided is merely one way to satisfy these requirements,
+and there are alternative implementations as well. For instance,
+instead of associating separate socket connections with each remote
+environment, an application might instead label each message with a
+sender identifier; instead of looping through a table and sending a
+copy of a message to each member of the replication group, the
+application could send a single message using a broadcast protocol.
+<p>In ex_repquote's case, the send function (slightly simplified) is as
+follows:
+<pre><p><blockquote>int
+quote_send(dbenv, control, rec, eid, flags)
+ DB_ENV *dbenv;
+ const DBT *control, *rec;
+ int eid;
+ u_int32_t flags;
+{
+ int fd, n, ret;
+ machtab_t *machtab;
+ member_t *m;
+<p>
+ machtab = (machtab_t *)dbenv-&gt;app_private;
+<p>
+ /*
+ * If this is a broadcast, call a separate function to
+ * iterate through the table of environment (a/k/a
+ * machine) IDs and call quote_send_one on each.
+ * (This function is not reproduced here, but can be
+ * seen in ex_rq_net.c.)
+ */
+ if (eid == DB_EID_BROADCAST) {
+ n = quote_send_broadcast(machtab, rec, control, flags);
+ if (n &lt; 0)
+ return (DB_REP_UNAVAIL);
+ return (0);
+ }
+<p>
+ /* Find the fild descriptor, fd, associated with this EID. */
+ fd = 0;
+ if ((ret = pthread_mutex_lock(&machtab-&gt;mtmutex)) != 0)
+ return (0);
+ for (m = LIST_FIRST(&machtab-&gt;machlist); m != NULL;
+ m = LIST_NEXT(m, links)) {
+ if (m-&gt;eid == eid) {
+ fd = m-&gt;fd;
+ break;
+ }
+ }
+ if (pthread_mutex_unlock(&machtab-&gt;mtmutex) != 0)
+ return (-1);
+<p>
+ if (fd == 0)
+ return (DB_REP_UNAVAIL);
+<p>
+ /* We have a file descriptor; write the data over it. */
+ ret = quote_send_one(rec, control, fd, flags);
+<p>
+ return (ret);
+}
+<p>
+int
+quote_send_broadcast(machtab, rec, control, flags)
+ machtab_t *machtab;
+ const DBT *rec, *control;
+ u_int32_t flags;
+{
+ int ret, sent;
+ member_t *m, *next;
+ if ((ret = pthread_mutex_lock(&machtab-&gt;mtmutex)) != 0)
+ return (0);
+ sent = 0;
+ for (m = LIST_FIRST(&machtab-&gt;machlist); m != NULL; m = next) {
+ next = LIST_NEXT(m, links);
+ if ((ret = quote_send_one(rec, control, m-&gt;fd, flags)) != 0) {
+ (void)machtab_rem(machtab, m-&gt;eid, 0);
+ } else
+ sent++;
+ }
+ if (pthread_mutex_unlock(&machtab-&gt;mtmutex) != 0)
+ return (-1);
+ return (sent);
+}</blockquote></pre>
+<p>The quote_send_one function has been omitted as it simply writes the
+data requested over the file descriptor that it is passed. It contains
+nothing specific to Berkeley DB or this communication infrastructure. The
+complete code can be found in <b>ex_repquote/ex_rq_net.c</b>.
+<p>The quote_send function is passed as the callback to <a href="../../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a>;
+Berkeley DB automatically sends messages as needed for replication. The
+receive function is a mirror to the quote_send_one function. It is not
+a callback function (the application is responsible for collecting
+messages and calling <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> on them as is convenient). In
+the sample application, all messages transmitted are Berkeley DB messages that
+get handled by <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a>, however, this is not always going
+to be the case. The application may want to pass its own messages
+across the same channels, distinguish between its own messages and those
+of Berkeley DB, and then pass only the Berkeley DB ones to <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a>.
+<p>The final component of the communication infrastructure is the process
+model used to communicate with all the sites in the replication group.
+Each site creates a thread of control that listens on its designated
+socket (as specified by the <b>-m</b> command line argument) and
+then creates a new channel for each site that contacts it. In addition,
+each site explicitly connects to the sites specified in the
+<b>-o</b> command line argument. This is a fairly standard TCP/IP
+process architecture and is implemented by the following functions (all
+in <b>ex_repquote/ex_rq_net.c</b>).
+<p><blockquote><pre>int get_connected_socket(machtab_t *machtab, char *progname, char *remotehost,
+int port, int *is_open, int *eidp): Connect to the specified host/port, add the
+site to the machtab, and return a file descriptor for communication with this
+site.
+<p>
+int listen_socket_init(char *progname, int port): Initialize a socket for
+listening on a particular part.
+<p>
+int listen_socket_accept(machtab_t *machtab, char *progname, int socket,
+int *eidp): Accept a connection on a socket and add it to the machtab.
+int listen_socket_connect</pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/ex.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/ex_rq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/rep/ex_rq.html b/libdb/docs/ref/rep/ex_rq.html
new file mode 100644
index 0000000..80b7cd2
--- /dev/null
+++ b/libdb/docs/ref/rep/ex_rq.html
@@ -0,0 +1,236 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Ex_repquote: putting it all together</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/ex_comm.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/xa/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Ex_repquote: putting it all together</h1>
+<p>A replicated application must initialize a replicated environment, set
+up its communication infrastructure, and then make sure that incoming
+messages are received and processed.
+<p>To initialize replication, ex_repquote creates a Berkeley DB environment and
+calls <a href="../../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a> to establish a send function. The following
+code fragment (from the env_init function, found in
+<b>ex_repquote/ex_rq_main.c</b>) demonstrates this. Prior to calling
+this function, the application has called machtab_init to initialize
+its environment ID to port mapping structure and passed this structure
+into env_init.
+<pre><p><blockquote>if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: env create failed: %s\n",
+ progname, db_strerror(ret));
+ return (ret);
+}
+dbenv-&gt;set_errfile(dbenv, stderr);
+dbenv-&gt;set_errpfx(dbenv, prefix);
+(void)dbenv-&gt;set_cachesize(dbenv, 0, CACHESIZE, 0);
+<p>
+dbenv-&gt;app_private = machtab;
+(void)dbenv-&gt;set_rep_transport(dbenv, SELF_EID, quote_send);
+<p>
+flags = DB_CREATE | DB_THREAD |
+ DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN;
+<p>
+ret = dbenv-&gt;open(dbenv, home, flags, 0);</blockquote></pre>
+<p>ex_repquote opens a listening socket for incoming connections and opens
+an outgoing connection to every machine that it knows about (that is,
+all the sites listed in the <b>-o</b> command line argument).
+Applications can structure the details of this in different ways, but
+ex_repquote creates a user-level thread to listen on its socket, plus
+a thread to loop and handle messages on each socket, in addition to the
+threads needed to manage the user interface, update the database on the
+master, and read from the database on the client (in other words, in
+addition to the normal functionality of any database application).
+<p>Once the initial threads have all been started and the communications
+infrastructure is initialized, the application signals that it is ready
+for replication and joins a replication group by calling
+<a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a>:
+<pre><p><blockquote>if (whoami == MASTER) {
+ if ((ret = dbenv-&gt;rep_start(dbenv, NULL, DB_REP_MASTER)) != 0) {
+ /* Complain and exit on error. */
+ }
+ /* Go run the master application code. */
+} else {
+ memset(&local, 0, sizeof(local));
+ local.data = myaddr;
+ local.size = strlen(myaddr) + 1;
+ if ((ret =
+ dbenv-&gt;rep_start(dbenv, &local, DB_REP_CLIENT)) != 0) {
+ /* Complain and exit on error. */
+ }
+ /* Sleep to give ourselves a minute to find a master. */
+ sleep(5);
+ /* Go run the client application code. */
+}</blockquote></pre>
+<p>Note the use of the optional second argument to <a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a> in
+the client initialization code. The argument "myaddr" is a piece of
+data, opaque to Berkeley DB, that will be broadcast to each member of a
+replication group; it allows new clients to join a replication group,
+without knowing the location of all its members; the new client will
+be contacted by the members it does not know about, who will receive
+the new client's contact information that was specified in "myaddr."
+See <a href="../../ref/rep/newsite.html">Connecting to a new site</a> for more
+information.
+<p>The final piece of a replicated application is the code that loops,
+receives, and processes messages from a given remote environment.
+ex_repquote runs one of these loops in a parallel thread for each socket
+connection; other applications may want to queue messages somehow and
+process them asynchronously, or select() on a number of sockets and
+either look up the correct environment ID for each or encapsulate the
+ID in the communications protocol. The details may thus vary from
+application to application, but in ex_repquote the message-handling loop
+is as follows (code fragment from the hm_loop function, found in
+<b>ex_repquote/ex_rq_util.c</b>):
+<pre><p><blockquote>DB_ENV *dbenv;
+DBT rec, control; /* Structures encapsulating a received message. */
+elect_args *ea; /* Parameters to the elect thread. */
+machtab_t *tab; /* The environment ID to fd mapping table. */
+pthread_t elect_thr; /* Election thread spawned. */
+repsite_t self; /* My host and port identification. */
+int eid; /* Environment from whom I am receiving messages. */
+int fd; /* FD on which I am receiving messages. */
+int master_eid; /* Global indicating the current master eid. */
+int n; /* Number of sites; obtained from machtab_parm. */
+int newm; /* New master EID. */
+int open; /* Boolean indicating if connection already exists. */
+int pri; /* My priority. */
+int r, ret; /* Return values. */
+int timeout; /* My election timeout value. */
+int tmpid; /* Used to call dbenv-&gt;rep_process_message. */
+char *c; /* Temp used in parsing host:port names. */
+char *myaddr; /* My host/port address. */
+char *progname; /* Program name for error messages. */
+void *status; /* Pthread return status. */
+for (ret = 0; ret == 0;) {
+ if ((ret = get_next_message(fd, &rec, &control)) != 0) {
+ /*
+ * There was some sort of network error; close this
+ * connection and remove it from the table of
+ * environment IDs.
+ */
+ close(fd);
+ if ((ret = machtab_rem(tab, eid, 1)) != 0)
+ break;
+<p>
+ /*
+ * If I'm the master, I just lost a client and this
+ * thread is done.
+ */
+ if (master_eid == SELF_EID)
+ break;
+<p>
+ /*
+ * If I was talking with the master and the master
+ * went away, I need to call an election; else I'm
+ * done.
+ */
+ if (master_eid != eid)
+ break;
+<p>
+ master_eid = DB_EID_INVALID;
+ /*
+ * In ex_repquote, the environment ID table stores
+ * election parameters.
+ */
+ machtab_parm(tab, &n, &pri, &timeout);
+ if ((ret = dbenv-&gt;rep_elect(dbenv,
+ n, pri, timeout, &newm)) != 0)
+ continue;
+<p>
+ /*
+ * If I won the election, become the master.
+ * Otherwise, just exit.
+ */
+ if (newm == SELF_EID && (ret =
+ dbenv-&gt;rep_start(dbenv, NULL, DB_REP_MASTER)) == 0)
+ ret = domaster(dbenv, progname);
+ break;
+ }
+<p>
+ /* If we get here, we have a message to process. */
+<p>
+ tmpid = eid;
+ switch(r = dbenv-&gt;rep_process_message(dbenv,
+ &control, &rec, &tmpid)) {
+ case DB_REP_NEWSITE:
+ /*
+ * Check if we got sent connect information and if we
+ * did, if this is me or if we already have a
+ * connection to this new site. If we don't,
+ * establish a new one.
+ */
+<p>
+ /* No connect info. */
+ if (rec.size == 0)
+ break;
+<p>
+ /* It's me, do nothing. */
+ if (strncmp(myaddr, rec.data, rec.size) == 0)
+ break;
+<p>
+ self.host = (char *)rec.data;
+ self.host = strtok(self.host, ":");
+ if ((c = strtok(NULL, ":")) == NULL) {
+ dbenv-&gt;errx(dbenv, "Bad host specification");
+ goto err;
+ }
+ self.port = atoi(c);
+<p>
+ /*
+ * We try to connect to the new site. If we can't,
+ * we treat it as an error since we know that the site
+ * should be up if we got a message from it (even
+ * indirectly).
+ */
+ if ((ret = connect_site(dbenv,
+ tab, progname, &self, &open, &eid)) != 0)
+ goto err;
+ break;
+ case DB_REP_HOLDELECTION:
+ if (master_eid == SELF_EID)
+ break;
+ /* Make sure that previous election has finished. */
+ if (ea != NULL) {
+ (void)pthread_join(elect_thr, &status);
+ ea = NULL;
+ }
+ if ((ea = calloc(sizeof(elect_args), 1)) == NULL) {
+ ret = errno;
+ goto err;
+ }
+ ea-&gt;dbenv = dbenv;
+ ea-&gt;machtab = tab;
+ ret = pthread_create(&elect_thr,
+ NULL, elect_thread, (void *)ea);
+ break;
+ case DB_REP_NEWMASTER:
+ /* Check if it's us. */
+ master_eid = tmpid;
+ if (tmpid == SELF_EID) {
+ if ((ret = dbenv-&gt;rep_start(dbenv,
+ NULL, DB_REP_MASTER)) != 0)
+ goto err;
+ ret = domaster(dbenv, progname);
+ }
+ break;
+ case 0:
+ break;
+ default:
+ dbenv-&gt;err(dbenv, r, "DBENV-&gt;rep_process_message");
+ break;
+ }
+}</blockquote></pre>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/ex_comm.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/xa/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/rep/faq.html b/libdb/docs/ref/rep/faq.html
new file mode 100644
index 0000000..cb29415
--- /dev/null
+++ b/libdb/docs/ref/rep/faq.html
@@ -0,0 +1,105 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Replication FAQ</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/partition.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/ex.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Replication FAQ</h1>
+<p><ol>
+<p><li><b>Does Berkeley DB provide support for forwarding write queries from
+clients to masters?</b>
+<p>No, it does not. The Berkeley DB RPC server code could be modified to support
+this functionality, but in general this protocol is left entirely to
+the application. Note, there is no reason not to use the communications
+channels the application establishes for replication support to forward
+database update messages to the master, Berkeley DB does not require that
+those channels be used exclusively for replication messages.
+<p><li><b>Can I use replication to partition my environment across
+multiple sites?</b>
+<p>No, this is not possible. All replicated databases must be equally
+shared by all environments in the replication group.
+<p><li><b>How can I distinguish Berkeley DB messages from application messages?</b>
+<p>There is no way to distinguish Berkeley DB messages from application-specific
+messages, nor does Berkeley DB offer any way to wrap application messages
+inside of Berkeley DB messages. Distributed applications exchanging their
+own messages should either enclose Berkeley DB messages in their own wrappers,
+or use separate network connections to send and receive Berkeley DB messages.
+The one exception to this rule is connection information for new sites;
+Berkeley DB offers a simple method for sites joining replication groups to
+send connection information to the other database environments in the
+group (see <a href="../../ref/rep/newsite.html">Connecting to a new site</a>
+for more information).
+<p><li><b>How should I build my <b>send</b> function?</b>
+<p>This depends on the specifics of the application. One common way is to
+write the <b>rec</b> and <b>control</b> arguments' sizes and data to
+a socket connected to each remote site. On a fast, local area net, the
+simplest method is likely to be construct broadcast messages. Each
+Berkeley DB message would be encapsulated inside an application specific
+message, with header information specifying the intended recipient(s)
+for the message. This will likely require a global numbering scheme,
+however, as the Berkeley DB library has to be able to send specific log
+records to clients apart from the general broadcast of new log records
+intended for all members of a replication group.
+<p><li><b>Does every one of my threads of control on the master have to
+set up its own connection to every client? And, does every one of my
+threads of control on the client have to set up its own connection to
+every master?</b>
+<p>This is not always necessary. In the Berkeley DB replication model, any
+thread of control which modifies a database in the master environment
+must be prepared to send a message to the client environments, and any
+thread of control which delivers a message to a client environment must
+be prepared to send a message to the master. There are many ways in
+which these requirements can be satisfied.
+<p>The simplest case is probably a single, multithreaded process running
+on the master and clients. The process running on the master would
+require a single write connection to each client and a single read
+connection from each client. A process running on each client would
+require a single read connection from the master and a single write
+connection to the master. Threads running in these processes on the
+master and clients would use the same network connections to pass
+messages back and forth.
+<p>A common complication is when there are multiple processes running on
+the master and clients. A straight-forward solution is to increase the
+numbers of connections on the master -- each process running on the
+master has its own write connection to each client. However, this
+requires only one additional connection for each possible client in the
+master process. The master environment still requires only a single
+read connection from each client (this can be done by allocating a
+separate thread of control which does nothing other than receive client
+messages and forward them into the database). Similarly, each client
+still only requires a single thread of control that receives master
+messages and forwards them into the database, and which also takes
+database messages and forwards them back to the master. This model
+requires the networking infrastructure support many-to-one
+writers-to-readers, of course.
+<p>If the number of network connections is a problem in the multiprocess
+model, and inter-process communication on the system is inexpensive
+enough, an alternative is have a single process which communicates
+between the master the each client, and whenever a process'
+<b>send</b> function is called, the process passes the message to the
+communications process which is responsible for forwarding the message
+to the appropriate client. Alternatively, a broadcast mechanism will
+simplify the entire networking infrastructure, as processes will likely
+no longer have to maintain their own specific network connections.
+<p><li><b>Can I use replication to replicate just the database
+environment's log files?</b>
+<p>Yes. If the <a href="../../api_c/rep_start.html#DB_REP_LOGSONLY">DB_REP_LOGSONLY</a> flag is specified to
+<a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a>, the client site acts as a repository for logfiles
+(see <a href="../../ref/rep/logonly.html">Log file only clients</a> for more
+information).
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/partition.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/ex.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/rep/id.html b/libdb/docs/ref/rep/id.html
new file mode 100644
index 0000000..eb4beed
--- /dev/null
+++ b/libdb/docs/ref/rep/id.html
@@ -0,0 +1,45 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Replication environment IDs</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/pri.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Replication environment IDs</h1>
+<p>Each database environment included in a replication group must have a
+unique identifier for itself and for the other members of the
+replication group. The identifiers do not need to be global, that is,
+each database environment can assign local identifiers to members of
+the replication group as it encounters them. For example, given three
+sites: A, B and C, site A might assign the identifiers 1 and 2 to sites
+B and C respectively, while site B might assign the identifiers 301 and
+302 to sites A and C respectively. Note that it is not wrong to have
+global identifiers, it is just not a requirement.
+<p>It is the responsibility of the application to label each incoming
+replication message passed to <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> method with the appropriate
+identifier. Subsequently, Berkeley DB will label outgoing messages to the
+<b>send</b> interface with those same identifiers.
+<p>Negative identifiers are reserved for use by Berkeley DB, and should never be
+assigned to environments by the application. Two of these reserved
+identifiers are intended for application use, as follows:
+<p><dl compact>
+<p><dt><a href="../../api_c/rep_transport.html#DB_EID_BROADCAST">DB_EID_BROADCAST</a><dd>The <a href="../../api_c/rep_transport.html#DB_EID_BROADCAST">DB_EID_BROADCAST</a> identifier indicates a message should be
+broadcast to all members of a replication group.
+<p><dt><a href="../../api_c/rep_transport.html#DB_EID_INVALID">DB_EID_INVALID</a><dd>The <a href="../../api_c/rep_transport.html#DB_EID_INVALID">DB_EID_INVALID</a> identifier is an invalid environment ID, and
+may be used to initialize environment ID variables that are subsequently
+checked for validity.
+</dl>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/pri.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/rep/init.html b/libdb/docs/ref/rep/init.html
new file mode 100644
index 0000000..63028a0
--- /dev/null
+++ b/libdb/docs/ref/rep/init.html
@@ -0,0 +1,47 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Initializing a new site</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/newsite.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/elect.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Initializing a new site</h1>
+<p>Perform the following steps to add a new site to the replication
+group:
+<p><ol>
+<p><li>Do an archival backup of the master's environment, as described in
+<a href="../../ref/transapp/archival.html">Database and log file
+archival</a>. The backup can either be a conventional backup or a hot
+backup.
+<p><li>Copy the archival backup into a clean environment directory on the
+client.
+<p><li>Run catastrophic recovery on the client's new environment, as described
+in <a href="../../ref/transapp/recovery.html">Recovery procedures</a>.
+<p><li>Reconfigure and reopen the environment as a client member of the
+replication group.
+</ol>
+<p>If copying the backup to the client takes a long time relative to the
+frequency with which log files are reclaimed using the
+<a href="../../utility/db_archive.html">db_archive</a> utility or the <a href="../../api_c/log_archive.html">DB_ENV-&gt;log_archive</a> method, it may be
+necessary to suppress log reclamation until the newly restarted client
+has "caught up" and applied all log records generated during its
+downtime.
+<p>As with any Berkeley DB application, the database environment must be in a
+consistent state at application startup. This is most easily assured
+by running recovery at startup time in one thread or process; it is
+harmless to do this on both clients and masters even when not strictly
+necessary.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/newsite.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/elect.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/rep/intro.html b/libdb/docs/ref/rep/intro.html
new file mode 100644
index 0000000..6715441
--- /dev/null
+++ b/libdb/docs/ref/rep/intro.html
@@ -0,0 +1,73 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/transapp/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/id.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Introduction</h1>
+<p>Berkeley DB includes support for building highly available applications based
+on replication. Berkeley DB replication groups consist of some number of
+independently configured database environments. There is a single
+<i>master</i> database environment and one or more <i>client</i>
+database environments. Master environments support both database reads
+and writes; client environments support only database reads. If the
+master environment fails, applications may upgrade a client to be the
+new master. The database environments might be on separate computers,
+on separate hardware partitions in a non-uniform memory access (NUMA)
+system, or on separate disks in a single server. As always with Berkeley DB
+environments, any number of concurrent processes or threads may access
+a database environment. In the case of a master environment, any number
+of threads of control may read and write the environment, and in the
+case of a client environment, any number of threads of control may read
+the environment.
+<p>Applications may be written to provide various degrees of consistency
+between the master and clients. The system can be run synchronously
+such that replicas are guaranteed to be up-to-date with all committed
+transactions, but doing so may incur a significant performance penalty.
+Higher performance solutions sacrifice total consistency, allowing the
+clients to be out of date for an application-controlled amount of time.
+<p>While Berkeley DB includes the database infrastructure necessary to construct
+highly available database environments, applications must still provide
+some critical components:
+<p><ol>
+<p><li>The application is responsible for providing the communication
+infrastructure. Applications may use whatever wire protocol is
+appropriate for their application (for example, RPC, TCP/IP, UDP, VI or
+message-passing over the backplane).
+<p><li>The application is responsible for naming. Berkeley DB refers to the members
+of a replication group using an application-provided ID, and
+applications must map that ID to a particular database environment or
+communication channel.
+<p><li>The application is responsible for monitoring the status of the master
+and clients, and identifying any unavailable database environments.
+<p><li>The application must provide whatever security policies are needed.
+For example, the application may choose to encrypt data, use a secure
+socket layer, or do nothing at all. The level of security is left to
+the sole discretion of the application.
+</ol>
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Replication and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a></td><td>Configure replication transport</td></tr>
+<tr><td><a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a></td><td>Hold a replication election</td></tr>
+<tr><td><a href="../../api_c/rep_limit.html">DB_ENV-&gt;set_rep_limit</a></td><td>Limit data sent in response to a single message</td></tr>
+<tr><td><a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a></td><td>Process a replication message</td></tr>
+<tr><td><a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a></td><td>Configure an environment for replication</td></tr>
+<tr><td><a href="../../api_c/rep_stat.html">DB_ENV-&gt;rep_stat</a></td><td>Replication statistics</td></tr>
+</table>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/id.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/rep/logonly.html b/libdb/docs/ref/rep/logonly.html
new file mode 100644
index 0000000..45e06eb
--- /dev/null
+++ b/libdb/docs/ref/rep/logonly.html
@@ -0,0 +1,58 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Log file only clients</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/elect.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/trans.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Log file only clients</h1>
+<p>Applications wanting to use replication to support recovery after
+catastrophic failure of the master may want to configure a site as a
+logs-file-only replica. Such clients cannot respond to read (or write)
+queries but they still receive a complete copy the log files, so that in the
+event of master failure, a copy of the logs is available.
+<p>Log file only clients are configured like other client sites, except
+they should specify the <a href="../../api_c/rep_start.html#DB_REP_LOGSONLY">DB_REP_LOGSONLY</a> flag to the
+<a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a> method and should specify a priority of 0 to the
+<a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a> method.
+<p>There are two ways to recover using a log-file-only replica.
+The simplest way is to copy the log files from the log-file-only
+replica onto another site (either master or replica) and run catastrophic
+recovery there. If that is not an option, then
+recovery must be run on the log-file-only replica, using the log files
+that have accumulated there.
+If the log files are entirely
+self-contained, that is, they start with log file number 1, then a log
+replica can simply run catastrophic recovery. Obviously, if there are
+a large number of log files in this case, recovery may take a long time.
+If the log files are not self-contained, an archival copy of the
+databases must first be restored onto the replica before running
+catastrophic recovery. In the latter case (that is, running recovery
+on the log-file-only replica), once the site returns to being a log-file-only
+replica, the database files on the log-file-only replica should be removed,
+and if the log files do not begin with log file number 1,
+a new set of archival databases should be created from
+the current master.
+<p>More specifically, the log files accumulating on the log-file-only
+replica can take the place of the log files described in
+<i>catastrophic recovery</i> section of the
+<a href="../../ref/transapp/recovery.html">Recovery procedures</a> Berkeley DB
+Reference Guide.
+<p>In all other ways, a log-file-only site behaves as other replication
+clients do. It should have a thread or process receiving messages and
+passing them to <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> and must respond to all returns
+described for that interface.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/elect.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/trans.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/rep/newsite.html b/libdb/docs/ref/rep/newsite.html
new file mode 100644
index 0000000..d616458
--- /dev/null
+++ b/libdb/docs/ref/rep/newsite.html
@@ -0,0 +1,44 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Connecting to a new site</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/comm.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/init.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Connecting to a new site</h1>
+<p>Connecting to a new site in the replication group happens whenever the
+<a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> method returns <a href="../../api_c/rep_message.html#DB_REP_NEWSITE">DB_REP_NEWSITE</a>. The application
+should assign the new site a local environment ID number, and all future
+messages from the site passed to <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> should include that
+environment ID number. It is possible, of course, for the application
+to be aware of a new site before the return of <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> (for
+example, applications using connection-oriented protocols are likely to
+detect new sites immediately, while applications using broadcast
+protocols may not).
+<p>Regardless, in applications supporting the dynamic addition of database
+environments to replication groups, environments joining an existing
+replication group may need to provide contact information. (For
+example, in an application using TCP/IP sockets, a DNS name or IP
+address might be a reasonable value to provide.) This can be done using
+the <b>cdata</b> parameter to the <a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a> method. The information
+referenced by <b>cdata</b> is wrapped in the initial contact message
+sent by the new environment, and is provided to the existing members of
+the group using the <b>rec</b> parameter returned by <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a>.
+If no additional information was provided for Berkeley DB to forward to the
+existing members of the group, the <b>data</b> field of the <b>rec</b>
+parameter passed to the <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> method will be NULL after
+<a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> returns <a href="../../api_c/rep_message.html#DB_REP_NEWSITE">DB_REP_NEWSITE</a>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/comm.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/init.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/rep/partition.html b/libdb/docs/ref/rep/partition.html
new file mode 100644
index 0000000..a89f5c1
--- /dev/null
+++ b/libdb/docs/ref/rep/partition.html
@@ -0,0 +1,90 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Network partitions</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/trans.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Network partitions</h1>
+<p>The Berkeley DB replication implementation can be affected by network
+partitioning problems.
+<p>For example, consider a replication group with N members. The network
+partitions with the master on one side and more than N/2 of the sites
+on the other side. The sites on the side with the master will continue
+forward, and the master will continue to accept write queries for the
+databases. Unfortunately, the sites on the other side of the partition,
+realizing they no longer have a master, will hold an election. The
+election will succeed as there are more than N/2 of the total sites
+participating, and there will then be two masters for the replication
+group. Since both masters are potentially accepting write queries, the
+databases could diverge in incompatible ways.
+<p>If multiple masters are ever found to exist in a replication group, a
+master detecting the problem will return <a href="../../api_c/rep_message.html#DB_REP_DUPMASTER">DB_REP_DUPMASTER</a>. If
+the application sees this return, it should reconfigure itself as a
+client (by calling <a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a>), and then call for an election
+(by calling <a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a>). The site that wins the election may be
+one of the two previous masters, or it may be another site entirely.
+Regardless, the winning system will bring all of the other systems into
+conformance.
+<p>As another example, consider a replication group with a master
+environment and two clients A and B, where client A may upgrade to
+master status and client B cannot. Then, assume client A is partitioned
+from the other two database environments, and it becomes out-of-date
+with respect to the master. Then, assume the master crashes and does
+not come back on-line. Subsequently, the network partition is restored,
+and clients A and B hold an election. As client B cannot win the
+election, client A will win by default, and in order to get back into
+sync with client B, possibly committed transactions on client B will be
+unrolled until the two sites can once again move forward together.
+<p>In both of these examples, there is a phase where a newly elected master
+brings the members of a replication group into conformance with itself
+so that it can start sending new information to them. This can result
+in the loss of information as previously committed transactions are
+unrolled.
+<p>In architectures where network partitions are an issue, applications
+may want to implement a heart-beat protocol to minimize the consequences
+of a bad network partition. As long as a master is able to contact at
+least half of the sites in the replication group, it is impossible for
+there to be two masters. If the master can no longer contact a
+sufficient number of systems, it should reconfigure itself as a client,
+and hold an election.
+<p>There is another tool applications can use to minimize the damage in
+the case of a network partition. By specifying a <b>nsites</b>
+argument to <a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a> that is larger than the actual number of
+database environments in the replication group, applications can keep
+systems from declaring themselves the master unless they can talk to at
+a large percentage of the sites in the system. For example, if there
+are 20 database environments in the replication group, and an argument
+of 30 is specified to the <a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a> method, then a system will have
+to be able to talk to at least 16 of the sites to declare itself the
+master.
+<p>Specifying a <b>nsites</b> argument to <a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a> that is
+smaller than the actual number of database environments in the
+replication group has its uses as well. For example, consider a
+replication group with 2 environments. If they are partitioned from
+each other, neither of the sites could ever get enough votes to become
+the master. A reasonable alternative would be to specify a
+<b>nsites</b> argument of 2 to one of the systems and a <b>nsites</b>
+argument of 1 to the other. That way, one of the systems could win
+elections even when partitioned, while the other one could not. This
+would allow at one of the systems to continue accepting write queries
+after the partition.
+<p>These scenarios stress the importance of good network infrastructure in
+Berkeley DB replicated environments. When replicating database environments
+over sufficiently lossy networking, the best solution may well be to
+pick a single master, and only hold elections when human intervention
+has determined the selected master is unable to recover at all.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/trans.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/rep/pri.html b/libdb/docs/ref/rep/pri.html
new file mode 100644
index 0000000..c40d143
--- /dev/null
+++ b/libdb/docs/ref/rep/pri.html
@@ -0,0 +1,35 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Replication environment priorities</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/id.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/app.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Replication environment priorities</h1>
+<p>Each database environment included in a replication group must have a
+priority, which specifies a relative ordering among the different
+environments in a replication group. This ordering determines which
+environment will be selected as a new master in case the existing master
+fails.
+<p>Priorities must be a non-negative integer, but do not need to be unique
+throughout the replication group. A priority of 0 means the system can
+never become a master, regardless. Otherwise, larger valued priorities
+indicate a more desirable master. For example, if a replication group
+consists of three database environments, two of which are connected by
+an OC3 and the third of which is connected by a T1, the third database
+environment should be assigned a priority value which is lower than
+either of the other two.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/id.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/app.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/rep/trans.html b/libdb/docs/ref/rep/trans.html
new file mode 100644
index 0000000..f59d57f
--- /dev/null
+++ b/libdb/docs/ref/rep/trans.html
@@ -0,0 +1,104 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Transactional guarantees</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/logonly.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/partition.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Transactional guarantees</h1>
+<p>It is important to consider replication in the context of the overall
+database environment's transactional guarantees. To briefly review,
+transactional guarantees in a non-replicated application are based on
+the writing of log file records to "stable storage", usually a disk
+drive. If the application or system then fails, the Berkeley DB logging
+information is reviewed during recovery, and the databases are updated
+so that all changes made as part of committed transactions appear, and
+all changes made as part of uncommitted transactions do not appear. In
+this case, no information will have been lost.
+<p>If a database environment does not require that the log be flushed to
+stable storage on transaction commit (using the <a href="../../api_c/env_set_flags.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a>
+flag to increase performance at the cost of sacrificing transactional
+durability), Berkeley DB recovery will only be able to restore the system to
+the state of the last commit found on stable storage. In this case,
+information may have been lost (for example, the changes made by some
+committed transactions may not appear in the databases after recovery).
+<p>Further, if there is database or log file loss or corruption (for
+example, if a disk drive fails), then catastrophic recovery is
+necessary, and Berkeley DB recovery will only be able to restore the system
+to the state of the last archived log file. In this case, information
+may also have been lost.
+<p>Replicating the database environment extends this model, by adding a
+new component to "stable storage": the client's replicated information.
+If a database environment is replicated, there is no lost information
+in the case of database or log file loss, because the replicated system
+can be configured to contain a complete set of databases and log records
+up to the point of failure. A database environment that loses a disk
+drive can have the drive replaced, and it can rejoin the replication
+group as a client.
+<p>Because of this new component of stable storage, specifying
+<a href="../../api_c/env_set_flags.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> in a replicated environment no longer sacrifices
+durability, as long as one or more clients have acknowledged receipt of
+the messages sent by the master. Since network connections are often
+faster than local disk writes, replication becomes a way for
+applications to significantly improve their performance as well as their
+reliability.
+<p>The return status from the <b>send</b> interface specified to the
+<a href="../../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a> method must be set by the application to ensure the
+transactional guarantees the application wants to provide. The effect
+of the <b>send</b> interface returning failure is to flush the local
+database environment's log as necessary to ensure that any information
+critical to database integrity is not lost. Because this flush is an
+expensive operation in terms of database performance, applications will
+want to avoid returning an error from the <b>send</b> interface, if at
+all possible:
+<p>First, there is no reason for the <b>send</b> interface to ever return
+failure unless the <a href="../../api_c/rep_transport.html#DB_REP_PERMANENT">DB_REP_PERMANENT</a> flag is specified. Messages
+without that flag do not make visible changes to databases, and
+therefore the application's <b>send</b> interface can return success
+to Berkeley DB for such messages as soon as the message has been sent or even
+just copied to local memory.
+<p>Further, unless the master's database environment has been configured
+to not synchronously flush the log on transaction commit, there is no
+reason for the <b>send</b> interface to ever return failure, as any
+information critical to database integrity has already been flushed to
+the local log before <b>send</b> was called. Again, the <b>send</b>
+interface should return success to Berkeley DB as soon as possible. However,
+in this case, in order to avoid potential loss of information after the
+master database environment fails, the master database environment
+should be recovered before holding an election, as only the master
+database environment is guaranteed to have the most up-to-date logs.
+<p>To sum up, the only reason for the <b>send</b> interface to return
+failure is when the master database environment has been configured to
+not synchronously flush the log on transaction commit, the
+<a href="../../api_c/rep_transport.html#DB_REP_PERMANENT">DB_REP_PERMANENT</a> flag is specified for the message, and the
+<b>send</b> interface was unable to determine that some number of
+clients have received the current message (and all messages preceding
+the current message). How many clients should receive the message
+before the <b>send</b> interface can return success is an application
+choice (and may not depend as much on a specific number of clients
+reporting success as one or more geographically distributed clients).
+<p>Of course, it is important to ensure that the replicated master and
+client environments are truly independent of each other. For example,
+it does not help matters that a client has acknowledged receipt of a
+message if both master and clients are on the same power supply, as the
+failure of the power supply will still potentially lose information.
+<p>Finally, the Berkeley DB replication implementation has one other additional
+feature to increase application reliability. Replication in Berkeley DB is
+implemented to perform database updates using a different code path than
+the standard ones. This means operations which manage to crash the
+replication master due to a software bug will not necessarily also crash
+replication clients.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/logonly.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/partition.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/rpc/client.html b/libdb/docs/ref/rpc/client.html
new file mode 100644
index 0000000..fed24a0
--- /dev/null
+++ b/libdb/docs/ref/rpc/client.html
@@ -0,0 +1,84 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Client program</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>RPC Client/Server</dl></h3></td>
+<td align=right><a href="../../ref/rpc/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rpc/server.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Client program</h1>
+<p>Changing a Berkeley DB application to remotely call a server program requires
+only a few changes on the client side:
+<p><ol>
+<p><li>The client application must create and use a Berkeley DB environment; that
+is, it cannot simply call the <a href="../../api_c/db_create.html">db_create</a> interface, but must
+first call the <a href="../../api_c/env_create.html">db_env_create</a> interface to create an environment
+in which the database will live.
+<p><li>The client application must call <a href="../../api_c/env_create.html">db_env_create</a> using the
+<a href="../../api_c/env_create.html#DB_CLIENT">DB_CLIENT</a> flag.
+<p><li>The client application must call the additional <a href="../../api_c/env_class.html">DB_ENV</a> method
+<a href="../../api_c/env_set_rpc_server.html">DB_ENV-&gt;set_rpc_server</a> to specify the database server. This call must
+be made before opening the environment with the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>
+call.
+</ol>
+<p>The client application provides a few pieces of information to Berkeley DB as
+part of the <a href="../../api_c/env_set_rpc_server.html">DB_ENV-&gt;set_rpc_server</a> call:
+<p><ol>
+<p><li>A client structure. Applications wishing to control their own client
+structures can pass one in, and Berkeley DB will use it to connect to the
+server. Most applications will not make use of this argument and should pass
+in NULL. If this argument is used, the hostname and client timeout
+arguments are ignored. Applications using this mechanism must
+create their client structures using DB_RPC_SERVERPROG as the program
+number and DB_RPC_SERVERVERS as the version number.
+<p><li>The hostname of the server. The hostname format is not specified by
+Berkeley DB, but must be in a format acceptable to the local network support
+-- specifically, the RPC clnt_create interface.
+<p><li>The client
+timeout. This is the number of seconds the client will wait for the
+server to respond to its requests. A default is used if this value is
+zero.
+<p><li>The server timeout. This is the number of seconds the server will allow
+client resources to remain idle before releasing those resources. The
+resources this applies to are transactions and cursors because those
+objects hold locks; and if a client dies, the server needs to release
+those resources in a timely manner. This value is really a hint to the
+server because the server may choose to override this value with its
+own.
+</ol>
+<p>The only other item of interest to the client is the home directory
+that is given to the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> call.
+The server is started with a list of allowed home directories.
+The client must use one of those names (where a name is the last
+component of the home directory). This allows the pathname structure
+on the server to change without client applications needing to be
+aware of it.
+<p>Once the <a href="../../api_c/env_set_rpc_server.html">DB_ENV-&gt;set_rpc_server</a> call has been made, the client is
+connected to the server, and all subsequent Berkeley DB
+operations will be forwarded to the server. The client does not need to
+be otherwise aware that it is using a database server rather than
+accessing the database locally.
+<p>It is important to realize that the client portion of the Berkeley DB library
+acts as a simple conduit, forwarding Berkeley DB interface arguments to the
+server without interpretation. This has two important implications.
+First, all pathnames must be specified relative to the server. For
+example, the home directory and other configuration information passed
+by the application when creating its environment or databases must be
+pathnames for the server, not the client system. In addition, because
+there is no logical bundling of operations at the server, performance
+is usually significantly less than when Berkeley DB is embedded within the
+client's address space, even if the RPC is to a local address.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rpc/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rpc/server.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/rpc/faq.html b/libdb/docs/ref/rpc/faq.html
new file mode 100644
index 0000000..75f3267
--- /dev/null
+++ b/libdb/docs/ref/rpc/faq.html
@@ -0,0 +1,32 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: RPC FAQ</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>RPC Client/Server</dl></h3></td>
+<td align=right><a href="../../ref/rpc/server.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/java/conf.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>RPC FAQ</h1>
+<p><ol>
+<p><li><b>I get the <a href="../../api_c/env_set_rpc_server.html#DB_NOSERVER">DB_NOSERVER</a> error back from a
+<a href="../../api_c/env_set_rpc_server.html">DB_ENV-&gt;set_rpc_server</a> call that is using the default client
+timeout value.</b>
+<p>Some systems have a default RPC client timeout value that is too small,
+and the client times out the request before the server has a chance to
+process and reply. If you get this error, try explicitly setting the
+client timeout value.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rpc/server.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/java/conf.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/rpc/intro.html b/libdb/docs/ref/rpc/intro.html
new file mode 100644
index 0000000..d6460ba
--- /dev/null
+++ b/libdb/docs/ref/rpc/intro.html
@@ -0,0 +1,80 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>RPC Client/Server</dl></h3></td>
+<td align=right><a href="../../ref/txn/limits.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rpc/client.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Introduction</h1>
+<p>Berkeley DB includes an implementation of a client-server protocol using Sun
+Microsystem's Remote Procedure Call Protocol. RPC support is
+available only for UNIX systems, and is not included in the Berkeley DB library by
+default, but must be enabled during configuration. See
+<a href="../../ref/build_unix/conf.html">Configuring Berkeley DB</a> for more
+information. For more information on RPC itself, see your UNIX system
+documentation or <i>RPC: Remote Procedure Call Protocol
+Specification, RFC1831, Sun Microsystems, Inc., USC-ISI</i>.
+<p>Only some of the complete Berkeley DB functionality is available when using RPC.
+The following functionality is available:
+<p><ol>
+<li>The <a href="../../api_c/env_create.html">db_env_create</a> interface and the <a href="../../api_c/env_class.html">DB_ENV</a> handle
+methods.
+<li>The <a href="../../api_c/db_create.html">db_create</a> interface and the <a href="../../api_c/db_class.html">DB</a> handle methods.
+<li>The <a href="../../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a>, <a href="../../api_c/txn_commit.html">DB_TXN-&gt;commit</a>, and <a href="../../api_c/txn_abort.html">DB_TXN-&gt;abort</a>
+interfaces.
+</ol>
+<p>The <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag to the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> and <a href="../../api_c/db_open.html">DB-&gt;open</a> methods
+may not be specified, that is, <a href="../../api_c/env_class.html">DB_ENV</a> and <a href="../../api_c/db_class.html">DB</a> handles used
+in RPC clients are not free-threaded.
+<p>The RPC client/server code does not support any of the user-defined
+comparison or allocation functions; for example, an application using
+the RPC support may not specify its own Btree comparison function. If
+your application requires only those portions of Berkeley DB, then using RPC
+is fairly simple. If your application requires other Berkeley DB
+functionality, such as direct access to locking, logging or shared
+memory buffer memory pools, then your application cannot use the RPC
+support.
+<p>Although a client cannot specify a callback, Berkeley DB does support the
+<a href="../../api_c/db_associate.html">DB-&gt;associate</a> call in a limited manner. A client program
+wishing to use a secondary index may do so only in a read-only fashion.
+The primary and secondary databases must have been created locally on
+the server machine. Client programs must then open both the primary
+and secondary databases with the <a href="../../api_c/db_open.html#DB_RDONLY">DB_RDONLY</a> flag set. The client
+must also specify a NULL callback for the <a href="../../api_c/db_associate.html">DB-&gt;associate</a> call.
+<p>The Berkeley DB RPC code requires that the client and server programs be
+running the exact same version numbers.
+The Berkeley DB RPC protocol version number is tied to the Berkeley DB major
+and minor release numbers.
+As such, the server program will reject requests from clients
+using a different version number.
+<p><b>The Berkeley DB RPC support does not provide any security or authentication of
+any kind.</b> Sites needing any kind of data security measures must modify
+the client and server code to provide whatever level of security they
+require.
+<p>One particularly interesting use of the RPC support is for debugging Berkeley DB
+applications. The seamless nature of the interface means that with very
+minor application code changes, an application can run outside of the
+Berkeley DB address space, making it far easier to track down many types of
+errors -- such as memory misuse.
+<p>Using the RPC mechanisms in Berkeley DB involves two basic steps:
+<p><ol>
+<p><li>Modify your Berkeley DB application to act as a client and call the RPC
+server.
+<li>Run the <a href="../../utility/berkeley_db_svc.html">berkeley_db_svc</a> server program on the system where the
+database resides.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/txn/limits.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rpc/client.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/rpc/server.html b/libdb/docs/ref/rpc/server.html
new file mode 100644
index 0000000..f73a48d
--- /dev/null
+++ b/libdb/docs/ref/rpc/server.html
@@ -0,0 +1,55 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Server program</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>RPC Client/Server</dl></h3></td>
+<td align=right><a href="../../ref/rpc/client.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rpc/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Server program</h1>
+<p>The Berkeley DB server utility, <a href="../../utility/berkeley_db_svc.html">berkeley_db_svc</a>, handles all the
+client application requests.
+<p>Currently, the <a href="../../utility/berkeley_db_svc.html">berkeley_db_svc</a> utility is single-threaded,
+limiting the number of requests that it can handle. Modifying the
+server implementation to run in multithread or multiprocess mode
+requires modification of the server code automatically generated by the
+rpcgen program.
+<p>There are two different types of timeouts used by <a href="../../utility/berkeley_db_svc.html">berkeley_db_svc</a>.
+The first timeout (which can be modified within some constraints by the
+client application), is the resource timeout. When clients use
+transactions or cursors, those resources hold locks in Berkeley DB across calls
+to the server. If a client application dies or loses its connection to
+the server while holding those resources, it prevents any other client
+from acquiring them. Therefore, it is important to detect that a client
+has not used a resource for some period of time and release them. In the
+case of transactions, the server aborts the transaction. In the case of
+cursors, the server closes the cursor.
+<p>The second timeout is an idle timeout. A client application may remain
+idle with an open handle to an environment and a database. Doing so
+simply consumes some memory; it does not hold locks. However, the Berkeley DB
+server may want to eventually reclaim resources if a client dies or
+remains disconnected for a long period of time, so there is a separate
+idle timeout for open Berkeley DB handles.
+<p>The list of home directories specified to <a href="../../utility/berkeley_db_svc.html">berkeley_db_svc</a> are
+the only ones client applications are allowed to use. When
+<a href="../../utility/berkeley_db_svc.html">berkeley_db_svc</a> is started, it is given a list of pathnames.
+Clients are expected to specify the name of the home directory (defined
+as the last component in the directory pathname) as the database
+environment they are opening. In this manner, clients need to know only
+the name of their home environment; not its full pathname on the server
+machine. This means, of course, that only one environment of a
+particular name is allowed on the server at any given time.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rpc/client.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rpc/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/sendmail/intro.html b/libdb/docs/ref/sendmail/intro.html
new file mode 100644
index 0000000..9aaacac
--- /dev/null
+++ b/libdb/docs/ref/sendmail/intro.html
@@ -0,0 +1,52 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Using Berkeley DB with Sendmail</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Sendmail</dl></h3></td>
+<td align=right><a href="../../ref/tcl/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/dumpload/utility.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Using Berkeley DB with Sendmail</h1>
+<p>If you are attempting to use Berkeley DB with Sendmail 8.8.X, you must use
+Berkeley DB version 1.85 (see the Sleepycat Software web site's
+<a href="http://www.sleepycat.com/historic.html">historic releases</a>
+of Berkeley DB page for more information).
+<p>Berkeley DB versions 2.0 and later are only supported by Sendmail versions 8.9.X
+and later.
+<p>Berkeley DB versions 3.0 and later are only supported by Sendmail versions
+8.10.X and later.
+<p>We strongly recommend that you not use Berkeley DB version 1.85. It is no longer
+maintained or supported and has known bugs that can cause Sendmail to
+fail. Instead, please upgrade to Sendmail version 8.9.X or later and use
+a later version of Berkeley DB. For more information on using Berkeley DB with
+Sendmail, please review the README and src/README files in the Sendmail
+distribution.
+<p>To load sendmail against Berkeley DB, add the following lines to
+BuildTools/Site/site.config.m4:
+<p><blockquote><pre>APPENDDEF(`confINCDIRS', `-I/usr/local/BerkeleyDB.4.1/include')
+APPENDDEF(`confLIBDIRS', `-L/usr/local/BerkeleyDB.4.1/lib')</pre></blockquote>
+<p>where those are the paths to #include &lt;db.h&gt; and libdb.a respectively.
+Then, run "Build -c" from the src directory.
+<p>Note that this Build script will use -DNEWDB on the compiles
+and -L/path/to/libdb/directory -ldb on the link if it can find libdb.a;
+the search path is $LIBDIRS:/lib:/usr/lib:/usr/shlib. $LIBDIRS is
+NULL by default for most systems, but some set it in BuildTools/OS/foo.
+Anyone can append to it as above (confLIBDIRS is the m4 variable name;
+LIBDIRS is the shell-script variable name).
+<p>To download Sendmail, or to obtain more information on Sendmail, see the
+<a href="http://www.sendmail.org">Sendmail home page</a>, which includes
+FAQ pages and problem addresses.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/tcl/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/dumpload/utility.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/simple_tut/close.html b/libdb/docs/ref/simple_tut/close.html
new file mode 100644
index 0000000..dc0bf44
--- /dev/null
+++ b/libdb/docs/ref/simple_tut/close.html
@@ -0,0 +1,102 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Closing a database</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Simple Tutorial</dl></h3></td>
+<td align=right><a href="../../ref/simple_tut/del.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Closing a database</h1>
+<p>The only other operation that we need for our simple example is closing
+the database, and cleaning up the DB handle.
+<p>It is necessary that the database be closed. The most important reason
+for this is that Berkeley DB runs on top of an underlying buffer cache. If
+the modified database pages are never explicitly flushed to disk and
+the database is never closed, changes made to the database may never
+make it out to disk, because they are held in the Berkeley DB cache. As the
+default behavior of the close function is to flush the Berkeley DB cache,
+closing the database will update the on-disk information.
+<p>The <a href="../../api_c/db_close.html">DB-&gt;close</a> interface takes two arguments:
+<p><dl compact>
+<p><dt>db<dd>The database handle returned by <a href="../../api_c/db_create.html">db_create</a>.
+<p><dt>flags<dd>Optional flags modifying the underlying behavior of the <a href="../../api_c/db_close.html">DB-&gt;close</a>
+interface.
+</dl>
+<p>Here's what the code to call <a href="../../api_c/db_close.html">DB-&gt;close</a> looks like:
+<p><blockquote><pre>#include &lt;sys/types.h&gt;
+#include &lt;stdio.h&gt;
+#include &lt;db.h&gt;
+<p>
+#define DATABASE "access.db"
+<p>
+int
+main()
+{
+ DB *dbp;
+ DBT key, data;
+ <b>int ret, t_ret;</b>
+<p>
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ fprintf(stderr, "db_create: %s\n", db_strerror(ret));
+ exit (1);
+ }
+ if ((ret = dbp-&gt;open(dbp,
+ NULL, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp-&gt;err(dbp, ret, "%s", DATABASE);
+ goto err;
+ }
+<p>
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = "fruit";
+ key.size = sizeof("fruit");
+ data.data = "apple";
+ data.size = sizeof("apple");
+<p>
+ if ((ret = dbp-&gt;put(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key stored.\n", (char *)key.data);
+ else {
+ dbp-&gt;err(dbp, ret, "DB-&gt;put");
+ goto err;
+ }
+<p>
+ if ((ret = dbp-&gt;get(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key retrieved: data was %s.\n",
+ (char *)key.data, (char *)data.data);
+ else {
+ dbp-&gt;err(dbp, ret, "DB-&gt;get");
+ goto err;
+ }
+<p>
+ if ((ret = dbp-&gt;del(dbp, NULL, &key, 0)) == 0)
+ printf("db: %s: key was deleted.\n", (char *)key.data);
+ else {
+ dbp-&gt;err(dbp, ret, "DB-&gt;del");
+ goto err;
+ }
+<p>
+ if ((ret = dbp-&gt;get(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key retrieved: data was %s.\n",
+ (char *)key.data, (char *)data.data);
+ else
+ dbp-&gt;err(dbp, ret, "DB-&gt;get");
+<p><b>err: if ((t_ret = dbp-&gt;close(dbp, 0)) != 0 && ret == 0)
+ ret = t_ret; </b>
+<p>
+ exit(ret);
+}</pre></blockquote>
+<p>Note that we do not necessarily overwrite the <b>ret</b> variable, as it
+may contain error return information from a previous Berkeley DB call.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/simple_tut/del.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/am_conf/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/simple_tut/del.html b/libdb/docs/ref/simple_tut/del.html
new file mode 100644
index 0000000..e21b23b
--- /dev/null
+++ b/libdb/docs/ref/simple_tut/del.html
@@ -0,0 +1,94 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Removing elements from a database</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Simple Tutorial</dl></h3></td>
+<td align=right><a href="../../ref/simple_tut/get.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/close.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Removing elements from a database</h1>
+<p>The simplest way to remove elements from a database is the <a href="../../api_c/db_del.html">DB-&gt;del</a>
+interface.
+<p>The <a href="../../api_c/db_del.html">DB-&gt;del</a> interface takes four of the same five arguments that
+the <a href="../../api_c/db_get.html">DB-&gt;get</a> and <a href="../../api_c/db_put.html">DB-&gt;put</a> interfaces take. The difference
+is that there is no need to specify a data item, as the delete operation
+is only interested in the key that you want to remove.
+<p><dl compact>
+<p><dt>db<dd>The database handle returned by <a href="../../api_c/db_create.html">db_create</a>.
+<p><dt>txnid<dd>A transaction ID.
+In our simple case, we aren't expecting to recover the database after
+application or system crash, so we aren't using transactions, and will
+leave this argument unspecified.
+<p><dt>key<dd>The key item for the key/data pair that we want to delete from the
+database.
+<p><dt>flags<dd>Optional flags modifying the underlying behavior of the <a href="../../api_c/db_del.html">DB-&gt;del</a>
+interface. There are currently no available flags for this interface,
+so the flags argument should always be set to 0.
+</dl>
+<p>Here's what the code to call <a href="../../api_c/db_del.html">DB-&gt;del</a> looks like:
+<p><blockquote><pre>#include &lt;sys/types.h&gt;
+#include &lt;stdio.h&gt;
+#include &lt;db.h&gt;
+<p>
+#define DATABASE "access.db"
+<p>
+int
+main()
+{
+ DB *dbp;
+ DBT key, data;
+ int ret;
+<p>
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ fprintf(stderr, "db_create: %s\n", db_strerror(ret));
+ exit (1);
+ }
+ if ((ret = dbp-&gt;open(dbp,
+ NULL, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp-&gt;err(dbp, ret, "%s", DATABASE);
+ goto err;
+ }
+<p>
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = "fruit";
+ key.size = sizeof("fruit");
+ data.data = "apple";
+ data.size = sizeof("apple");
+<p>
+ if ((ret = dbp-&gt;put(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key stored.\n", (char *)key.data);
+ else {
+ dbp-&gt;err(dbp, ret, "DB-&gt;put");
+ goto err;
+ }
+<p>
+ if ((ret = dbp-&gt;get(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key retrieved: data was %s.\n",
+ (char *)key.data, (char *)data.data);
+ else {
+ dbp-&gt;err(dbp, ret, "DB-&gt;get");
+ goto err;
+ }
+<p><b> if ((ret = dbp-&gt;del(dbp, NULL, &key, 0)) == 0)
+ printf("db: %s: key was deleted.\n", (char *)key.data);
+ else {
+ dbp-&gt;err(dbp, ret, "DB-&gt;del");
+ goto err;
+ }
+</b></pre></blockquote>
+<p>After the <a href="../../api_c/db_del.html">DB-&gt;del</a> call returns, the entry to which the key
+<b>fruit</b> refers has been removed from the database.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/simple_tut/get.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/close.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/simple_tut/errors.html b/libdb/docs/ref/simple_tut/errors.html
new file mode 100644
index 0000000..1ff0c94
--- /dev/null
+++ b/libdb/docs/ref/simple_tut/errors.html
@@ -0,0 +1,47 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Error returns</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Simple Tutorial</dl></h3></td>
+<td align=right><a href="../../ref/simple_tut/handles.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Error returns</h1>
+<p>The Berkeley DB interfaces always return a value of 0 on success. If the
+operation does not succeed for any reason, the return value will be
+non-zero.
+<p>If a system error occurred (for example, Berkeley DB ran out of disk space,
+or permission to access a file was denied, or an illegal argument was
+specified to one of the interfaces), Berkeley DB returns an <b>errno</b>
+value. All of the possible values of <b>errno</b> are greater than
+0.
+<p>If the operation didn't fail due to a system error, but wasn't
+successful either, Berkeley DB returns a special error value. For example,
+if you tried to retrieve the data item associated with the key
+<b>fruit</b>, and there was no such key/data pair in the database,
+Berkeley DB would return <a href="../../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>, a special error value that means
+the requested key does not appear in the database. All of the possible
+special error values are less than 0.
+<p>Berkeley DB also offers programmatic support for displaying error return values.
+First, the <a href="../../api_c/env_strerror.html">db_strerror</a> interface returns a pointer to the error
+message corresponding to any Berkeley DB error return, similar to the ANSI C
+strerror interface, but is able to handle both system error returns and
+Berkeley DB-specific return values.
+<p>Second, there are two error functions, <a href="../../api_c/db_err.html">DB-&gt;err</a> and <a href="../../api_c/db_err.html">DB-&gt;errx</a>.
+These functions work like the ANSI C printf interface, taking a
+printf-style format string and argument list, and optionally appending
+the standard error string to a message constructed from the format string
+and other arguments.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/simple_tut/handles.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/simple_tut/example.cs b/libdb/docs/ref/simple_tut/example.cs
new file mode 100644
index 0000000..598ba15
--- /dev/null
+++ b/libdb/docs/ref/simple_tut/example.cs
@@ -0,0 +1,73 @@
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db.h"
+
+#define DATABASE "access.db"
+
+int
+main()
+{
+ DB *dbp;
+ DBT key, data;
+ int ret, t_ret;
+
+ /* Create the database handle and open the underlying database. */
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ fprintf(stderr, "db_create: %s\n", db_strerror(ret));
+ exit (1);
+ }
+ if ((ret = dbp->open(dbp,
+ NULL, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp->err(dbp, ret, "%s", DATABASE);
+ goto err;
+ }
+
+ /* Initialize key/data structures. */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = "fruit";
+ key.size = sizeof("fruit");
+ data.data = "apple";
+ data.size = sizeof("apple");
+
+ /* Store a key/data pair. */
+ if ((ret = dbp->put(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key stored.\n", (char *)key.data);
+ else {
+ dbp->err(dbp, ret, "DB->put");
+ goto err;
+ }
+
+ /* Retrieve a key/data pair. */
+ if ((ret = dbp->get(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key retrieved: data was %s.\n",
+ (char *)key.data, (char *)data.data);
+ else {
+ dbp->err(dbp, ret, "DB->get");
+ goto err;
+ }
+
+ /* Delete a key/data pair. */
+ if ((ret = dbp->del(dbp, NULL, &key, 0)) == 0)
+ printf("db: %s: key was deleted.\n", (char *)key.data);
+ else {
+ dbp->err(dbp, ret, "DB->del");
+ goto err;
+ }
+
+ /* Retrieve a key/data pair. */
+ if ((ret = dbp->get(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key retrieved: data was %s.\n",
+ (char *)key.data, (char *)data.data);
+ else
+ dbp->err(dbp, ret, "DB->get");
+
+err: if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ exit(ret);
+}
diff --git a/libdb/docs/ref/simple_tut/get.html b/libdb/docs/ref/simple_tut/get.html
new file mode 100644
index 0000000..1c80885
--- /dev/null
+++ b/libdb/docs/ref/simple_tut/get.html
@@ -0,0 +1,98 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Retrieving elements from a database</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Simple Tutorial</dl></h3></td>
+<td align=right><a href="../../ref/simple_tut/put.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/del.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Retrieving elements from a database</h1>
+<p>The simplest way to retrieve elements from a database is the
+<a href="../../api_c/db_get.html">DB-&gt;get</a> interface.
+<p>The <a href="../../api_c/db_get.html">DB-&gt;get</a> interface takes the same five arguments that the
+<a href="../../api_c/db_put.html">DB-&gt;put</a> interface takes:
+<p><dl compact>
+<p><dt>db<dd>The database handle returned by <a href="../../api_c/db_create.html">db_create</a>.
+<p><dt>txnid<dd>A transaction ID. In our simple case, we aren't expecting to recover
+the database after application or system crash, so we aren't using
+transactions, and will leave this argument NULL.
+<p><dt>key<dd>The key item for the key/data pair that we want to retrieve from the
+database.
+<p><dt>data<dd>The data item for the key/data pair that we want to retrieve from the
+database.
+<p><dt>flags<dd>Optional flags modifying the underlying behavior of the <a href="../../api_c/db_get.html">DB-&gt;get</a>
+interface.
+</dl>
+<p>Here's what the code to call <a href="../../api_c/db_get.html">DB-&gt;get</a> looks like:
+<p><blockquote><pre>#include &lt;sys/types.h&gt;
+#include &lt;stdio.h&gt;
+#include &lt;db.h&gt;
+<p>
+#define DATABASE "access.db"
+<p>
+int
+main()
+{
+ DB *dbp;
+ DBT key, data;
+ int ret;
+<p>
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ fprintf(stderr, "db_create: %s\n", db_strerror(ret));
+ exit (1);
+ }
+ if ((ret = dbp-&gt;open(dbp,
+ NULL, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp-&gt;err(dbp, ret, "%s", DATABASE);
+ goto err;
+ }
+<p>
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = "fruit";
+ key.size = sizeof("fruit");
+ data.data = "apple";
+ data.size = sizeof("apple");
+<p>
+ if ((ret = dbp-&gt;put(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key stored.\n", (char *)key.data);
+ else {
+ dbp-&gt;err(dbp, ret, "DB-&gt;put");
+ goto err;
+ }
+<p><b> if ((ret = dbp-&gt;get(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key retrieved: data was %s.\n",
+ (char *)key.data, (char *)data.data);
+ else {
+ dbp-&gt;err(dbp, ret, "DB-&gt;get");
+ goto err;
+ }
+</b></pre></blockquote>
+<p>It is not usually necessary to clear the <a href="../../api_c/dbt_class.html">DBT</a> structures passed
+to the Berkeley DB functions between calls. This is not always true, when
+some of the less commonly used flags for <a href="../../api_c/dbt_class.html">DBT</a> structures are
+used. The <a href="../../api_c/dbt_class.html">DBT</a> manual page specified the details of those cases.
+<p>It is possible, of course, to distinguish between system errors and the
+key/data pair simply not existing in the database. There are three
+standard returns from <a href="../../api_c/db_get.html">DB-&gt;get</a>:
+<p><ol>
+<p><li>The call might be successful and the key found, in which case the return
+value will be 0.
+<li>The call might be successful, but the key not found, in which case the
+return value will be <a href="../../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<li>The call might not be successful, in which case the return value will
+be a system error.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/simple_tut/put.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/del.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/simple_tut/handles.html b/libdb/docs/ref/simple_tut/handles.html
new file mode 100644
index 0000000..c243a4a
--- /dev/null
+++ b/libdb/docs/ref/simple_tut/handles.html
@@ -0,0 +1,30 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Object handles</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Simple Tutorial</dl></h3></td>
+<td align=right><a href="../../ref/simple_tut/keydata.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/errors.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Object handles</h1>
+<p>With a few minor exceptions, Berkeley DB functionality is accessed by creating
+a structure and then calling functions that are fields in that structure.
+This is, of course, similar to object-oriented concepts, of instances and
+methods on them. For simplicity, we will often refer to these structure
+fields as methods of the handle.
+<p>The manual pages will show these methods as C structure references. For
+example, the open-a-database method for a database handle is represented
+as <a href="../../api_c/db_open.html">DB-&gt;open</a>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/simple_tut/keydata.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/errors.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/simple_tut/intro.html b/libdb/docs/ref/simple_tut/intro.html
new file mode 100644
index 0000000..6b91aa2
--- /dev/null
+++ b/libdb/docs/ref/simple_tut/intro.html
@@ -0,0 +1,42 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Simple Tutorial</dl></h3></td>
+<td align=right><a href="../../ref/intro/products.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/keydata.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Introduction</h1>
+<p>As an introduction to Berkeley DB, we will present a few Berkeley DB programming
+concepts, and then a simple database application.
+<p>The programming concepts are:
+<p><ul type=disc>
+<li><a href="keydata.html">Key/data pairs</a>
+<li><a href="handles.html">Object handles</a>
+<li><a href="errors.html">Error returns</a>
+</ul>
+<p>This database application will:
+<p><ul type=disc>
+<li><a href="open.html">Create a simple database</a>
+<li><a href="put.html">Store items</a>
+<li><a href="get.html">Retrieve items</a>
+<li><a href="del.html">Remove items</a>
+<li><a href="close.html">Close the database</a>
+</ul>
+<p>The introduction will be presented using the programming language C. The
+<a href="example.cs">complete source</a> of the final version of the
+example program is included in the Berkeley DB distribution.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/intro/products.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/keydata.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/simple_tut/keydata.html b/libdb/docs/ref/simple_tut/keydata.html
new file mode 100644
index 0000000..b7eb005
--- /dev/null
+++ b/libdb/docs/ref/simple_tut/keydata.html
@@ -0,0 +1,49 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Key/data pairs</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Simple Tutorial</dl></h3></td>
+<td align=right><a href="../../ref/simple_tut/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/handles.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Key/data pairs</h1>
+<p>Berkeley DB uses key/data pairs to identify elements in the database.
+That is, in the general case, whenever you call a Berkeley DB interface,
+you present a key to identify the key/data pair on which you intend
+to operate.
+<p>For example, you might store some key/data pairs as follows:
+<p><table border=1>
+<tr><th>Key:</th><th>Data:</th></tr>
+<tr><td>fruit</td><td>apple</td></tr>
+<tr><td>sport</td><td>cricket</td></tr>
+<tr><td>drink</td><td>water</td></tr>
+</table>
+<p>In each case, the first element of the pair is the key, and the second is
+the data. To store the first of these key/data pairs into the database,
+you would call the Berkeley DB interface to store items, with <b>fruit</b> as
+the key, and <b>apple</b> as the data. At some future time, you could
+then retrieve the data item associated with <b>fruit</b>, and the Berkeley DB
+retrieval interface would return <b>apple</b> to you. While there are
+many variations and some subtleties, all accesses to data in Berkeley DB come
+down to key/data pairs.
+<p>Both key and data items are stored in simple structures (called
+<a href="../../api_c/dbt_class.html">DBT</a>s) that contain a reference to memory and a length, counted
+in bytes. (The name <a href="../../api_c/dbt_class.html">DBT</a> is an acronym for <i>database
+thang</i>, chosen because nobody could think of a sensible name that wasn't
+already in use somewhere else.) Key and data items can be arbitrary
+binary data of practically any length, including 0 bytes. There is a
+single data item for each key item, by default, but databases can be
+configured to support multiple data items for each key item.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/simple_tut/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/handles.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/simple_tut/open.html b/libdb/docs/ref/simple_tut/open.html
new file mode 100644
index 0000000..bc598bd
--- /dev/null
+++ b/libdb/docs/ref/simple_tut/open.html
@@ -0,0 +1,91 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Opening a database</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Simple Tutorial</dl></h3></td>
+<td align=right><a href="../../ref/simple_tut/errors.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/put.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Opening a database</h1>
+<p>Opening a database is done in two steps: first, a <a href="../../api_c/db_class.html">DB</a> handle is
+created using the Berkeley DB <a href="../../api_c/db_create.html">db_create</a> interface, and then the
+actual database is opened using the <a href="../../api_c/db_open.html">DB-&gt;open</a> function.
+<p>The <a href="../../api_c/db_create.html">db_create</a> interface takes three arguments:
+<p><dl compact>
+<p><dt>dbp<dd>A location to store a reference to the created structure.
+<p><dt>environment<dd>A location to specify an enclosing Berkeley DB environment, not used in our
+example.
+<p><dt>flags<dd>A placeholder for flags, not used in our example.
+</dl>
+<p>The <a href="../../api_c/db_open.html">DB-&gt;open</a> interface takes five arguments:
+<p><dl compact>
+<p><dt>file<dd>The name of the database file to be opened.
+<p><dt>database<dd>The optional database name, not used in this example.
+<p><dt>type<dd>The type of database to open. This value will be one of the four access
+methods Berkeley DB supports: DB_BTREE, DB_HASH, DB_QUEUE or DB_RECNO, or the
+special value DB_UNKNOWN, which allows you to open an existing file
+without knowing its type.
+<p><dt>flags<dd>Various flags that modify the behavior of <a href="../../api_c/db_open.html">DB-&gt;open</a>. In our
+simple case, the only interesting flag is <a href="../../api_c/env_open.html#DB_CREATE">DB_CREATE</a>. This flag
+behaves similarly to the IEEE/ANSI Std 1003.1 (POSIX) O_CREATE flag to the open system
+call, causing Berkeley DB to create the underlying database if it does not
+yet exist.
+<p><dt>mode<dd>The file mode of any underlying files that <a href="../../api_c/db_open.html">DB-&gt;open</a> will create.
+The mode behaves as does the IEEE/ANSI Std 1003.1 (POSIX) mode argument to the open
+system call, and specifies file read, write and execute permissions.
+Of course, only the read and write permissions are relevant to Berkeley DB.
+</dl>
+<p>Here's what the code to create the handle and then call <a href="../../api_c/db_open.html">DB-&gt;open</a>
+looks like:
+<p><blockquote><pre><b>#include &lt;sys/types.h&gt;
+#include &lt;stdio.h&gt;
+#include &lt;db.h&gt;
+<p>
+#define DATABASE "access.db"
+<p>
+int
+main()
+{
+ DB *dbp;
+ int ret;
+<p>
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ fprintf(stderr, "db_create: %s\n", db_strerror(ret));
+ exit (1);
+ }
+ if ((ret = dbp-&gt;open(dbp,
+ NULL, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp-&gt;err(dbp, ret, "%s", DATABASE);
+ goto err;
+ }</b>
+</pre></blockquote>
+<p>If the call to <a href="../../api_c/db_create.html">db_create</a> is successful, the variable <b>dbp</b>
+will contain a database handle that will be used to configure and access
+an underlying database.
+<p>As you see, the program opens a database named <b>access.db</b>. The
+underlying database is a Btree. Because the <a href="../../api_c/env_open.html#DB_CREATE">DB_CREATE</a> flag was
+specified, the file will be created if it does not already exist. The
+mode of any created files will be 0664 (that is, readable and writable by
+the owner and the group, and readable by everyone else).
+<p>One additional function call is used in this code sample, <a href="../../api_c/db_err.html">DB-&gt;err</a>.
+This method works like the ANSI C printf interface. The second argument
+is the error return from a Berkeley DB function, and the rest of the arguments
+are a printf-style format string and argument list. The error message
+associated with the error return will be appended to a message constructed
+from the format string and other arguments. In the above code, if the
+<a href="../../api_c/db_open.html">DB-&gt;open</a> call were to fail, the message it would display would be
+something like
+<p><blockquote><pre>access.db: Operation not permitted</pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/simple_tut/errors.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/put.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/simple_tut/put.html b/libdb/docs/ref/simple_tut/put.html
new file mode 100644
index 0000000..9d35e82
--- /dev/null
+++ b/libdb/docs/ref/simple_tut/put.html
@@ -0,0 +1,128 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Adding elements to a database</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Simple Tutorial</dl></h3></td>
+<td align=right><a href="../../ref/simple_tut/open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/get.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Adding elements to a database</h1>
+<p>The simplest way to add elements to a database is the <a href="../../api_c/db_put.html">DB-&gt;put</a>
+interface.
+<p>The <a href="../../api_c/db_put.html">DB-&gt;put</a> interface takes five arguments:
+<p><dl compact>
+<p><dt>db<dd>The database handle returned by <a href="../../api_c/db_create.html">db_create</a>.
+<p><dt>txnid<dd>A transaction handle. In our simple case, we aren't expecting to
+recover the database after application or system crash, so we aren't
+using transactions, and will leave this argument NULL.
+<p><dt>key<dd>The key item for the key/data pair that we want to add to the database.
+<p><dt>data<dd>The data item for the key/data pair that we want to add to the database.
+<p><dt>flags<dd>Optional flags modifying the underlying behavior of the <a href="../../api_c/db_put.html">DB-&gt;put</a>
+interface.
+</dl>
+<p>Here's what the code to call <a href="../../api_c/db_put.html">DB-&gt;put</a> looks like:
+<p><blockquote><pre>#include &lt;sys/types.h&gt;
+#include &lt;stdio.h&gt;
+#include &lt;db.h&gt;
+<p>
+#define DATABASE "access.db"
+<p>
+int
+main()
+{
+ DB *dbp;
+ <b>DBT key, data;</b>
+ int ret;
+<p>
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ fprintf(stderr, "db_create: %s\n", db_strerror(ret));
+ exit (1);
+ }
+ if ((ret = dbp-&gt;open(dbp,
+ NULL, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp-&gt;err(dbp, ret, "%s", DATABASE);
+ goto err;
+ }
+<p><b> memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = "fruit";
+ key.size = sizeof("fruit");
+ data.data = "apple";
+ data.size = sizeof("apple");
+<p>
+ if ((ret = dbp-&gt;put(dbp, NULL, &key, &data, 0)) == 0)
+ printf("db: %s: key stored.\n", (char *)key.data);
+ else {
+ dbp-&gt;err(dbp, ret, "DB-&gt;put");
+ goto err;
+ }
+</b></pre></blockquote>
+<p>The first thing to notice about this new code is that we clear the
+<a href="../../api_c/dbt_class.html">DBT</a> structures that we're about to pass as arguments to Berkeley DB
+functions. This is very important, and being careful to do so will
+result in fewer errors in your programs. All Berkeley DB structures
+instantiated in the application and handed to Berkeley DB should be cleared
+before use, without exception. This is necessary so that future
+versions of Berkeley DB may add additional fields to the structures. If
+applications clear the structures before use, it will be possible for
+Berkeley DB to change those structures without requiring that the applications
+be rewritten to be aware of the changes.
+<p>Notice also that we're storing the trailing nul byte found in the C
+strings <b>"fruit"</b> and <b>"apple"</b> in both the key and data
+items, that is, the trailing nul byte is part of the stored key, and
+therefore has to be specified in order to access the data item. There is
+no requirement to store the trailing nul byte, it simply makes it easier
+for us to display strings that we've stored in programming languages that
+use nul bytes to terminate strings.
+<p>In many applications, it is important not to overwrite existing data.
+For example, we might not want to store the key/data pair
+<b>fruit/apple</b> if it already existed, for example, if the key/data
+pair <b>fruit/cherry</b> had been previously stored into the
+database.
+<p>This is easily accomplished by adding the <a href="../../api_c/db_put.html#DB_NOOVERWRITE">DB_NOOVERWRITE</a> flag to
+the <a href="../../api_c/db_put.html">DB-&gt;put</a> call:
+<p><blockquote><pre><b>if ((ret =
+ dbp-&gt;put(dbp, NULL, &key, &data, DB_NOOVERWRITE)) == 0)
+ printf("db: %s: key stored.\n", (char *)key.data);
+else {
+ dbp-&gt;err(dbp, ret, "DB-&gt;put");
+ goto err;
+}</b></pre></blockquote>
+<p>This flag causes the underlying database functions to not overwrite any
+previously existing key/data pair. (Note that the value of the previously
+existing data doesn't matter in this case. The only question is if a
+key/data pair already exists where the key matches the key that we are
+trying to store.)
+<p>Specifying <a href="../../api_c/db_put.html#DB_NOOVERWRITE">DB_NOOVERWRITE</a> opens up the possibility of a new
+Berkeley DB return value from the <a href="../../api_c/db_put.html">DB-&gt;put</a> function, <a href="../../api_c/dbc_put.html#DB_KEYEXIST">DB_KEYEXIST</a>,
+which means we were unable to add the key/data pair to the database
+because the key already existed in the database. While the above sample
+code simply displays a message in this case:
+<p><blockquote><pre>DB-&gt;put: DB_KEYEXIST: Key/data pair already exists</pre></blockquote>
+<p>The following code shows an explicit check for this possibility:
+<p><blockquote><pre><b>switch (ret =
+ dbp-&gt;put(dbp, NULL, &key, &data, DB_NOOVERWRITE)) {
+case 0:
+ printf("db: %s: key stored.\n", (char *)key.data);
+ break;
+case DB_KEYEXIST:
+ printf("db: %s: key previously stored.\n",
+ (char *)key.data);
+ break;
+default:
+ dbp-&gt;err(dbp, ret, "DB-&gt;put");
+ goto err;
+}</b></pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/simple_tut/open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/simple_tut/get.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/tcl/error.html b/libdb/docs/ref/tcl/error.html
new file mode 100644
index 0000000..3193b9f
--- /dev/null
+++ b/libdb/docs/ref/tcl/error.html
@@ -0,0 +1,71 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Tcl error handling</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Tcl API</dl></h3></td>
+<td align=right><a href="../../ref/tcl/program.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/tcl/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Tcl error handling</h1>
+<p>The Tcl interfaces to Berkeley DB generally return TCL_OK on success and throw
+a Tcl error on failure, using the appropriate Tcl interfaces to provide
+the user with an informative error message. There are some "expected"
+failures, however, for which no Tcl error will be thrown and for which
+Tcl commands will return TCL_OK. These failures include times when a
+searched-for key is not found, a requested key/data pair was previously
+deleted, or a key/data pair cannot be written because the key already
+exists.
+<p>These failures can be detected by searching the Berkeley DB error message that
+is returned. For example, use the following to detect that an attempt
+to put a record into the database failed because the key already
+existed:
+<p><blockquote><pre>% berkdb open -create -btree a.db
+db0
+% db0 put dog cat
+0
+% set ret [db0 put -nooverwrite dog newcat]
+DB_KEYEXIST: Key/data pair already exists
+% if { [string first DB_KEYEXIST $ret] != -1 } {
+ puts "This was an error; the key existed"
+}
+This was an error; the key existed
+% db0 close
+0
+% exit</pre></blockquote>
+<p>To simplify parsing, it is recommended that the initial Berkeley DB error name
+be checked; for example, <a href="../../api_c/dbc_put.html#DB_KEYEXIST">DB_KEYEXIST</a> in the previous example.
+To ensure that Tcl scripts are not broken by upgrading to new releases
+of Berkeley DB, these values will not change in future releases of Berkeley DB.
+There are currently only three such "expected" error returns:
+<p><blockquote><pre>DB_NOTFOUND: No matching key/data pair found
+DB_KEYEMPTY: Nonexistent key/data pair
+DB_KEYEXIST: Key/data pair already exists</pre></blockquote>
+<p>Finally, sometimes Berkeley DB will output additional error information when
+a Berkeley DB error occurs. By default, all Berkeley DB error messages will be
+prefixed with the created command in whose context the error occurred
+(for example, "env0", "db2", and so on). There are several ways to
+capture and access this information.
+<p>First, if Berkeley DB invokes the error callback function, the additional
+information will be placed in the error result returned from the command
+and in the errorInfo backtrace variable in Tcl.
+<p>Also, the two calls to open an environment and open a database take an
+option, <b>-errfile filename</b>, which sets an output file to which
+these additional error messages should be written.
+<p>Additionally, the two calls to open an environment and open a database
+take an option, <b>-errpfx string</b>, which sets the error prefix to
+the given string. This option may be useful in circumstances where a
+more descriptive prefix is desired or where a constant prefix indicating
+an error is desired.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/tcl/program.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/tcl/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/tcl/faq.html b/libdb/docs/ref/tcl/faq.html
new file mode 100644
index 0000000..58db960
--- /dev/null
+++ b/libdb/docs/ref/tcl/faq.html
@@ -0,0 +1,58 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Tcl FAQ</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Tcl API</dl></h3></td>
+<td align=right><a href="../../ref/tcl/error.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/sendmail/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Tcl FAQ</h1>
+<p><ol>
+<p><li><b>I have several versions of Tcl installed. How do I configure
+Berkeley DB to use a particular version?</b>
+<p>To compile the Tcl interface with a particular version of Tcl, use the
+--with-tcl option to specify the Tcl installation directory that
+contains the tclConfig.sh file. See
+<a href="../../ref/build_unix/flags.html">Changing compile or load
+options</a> for more information.
+<hr size=1 noshade>
+<p><li><b>Berkeley DB was configured using --enable-tcl or --with-tcl and fails
+to build.</b>
+<p>The Berkeley DB Tcl interface requires Tcl version 8.1 or greater.
+<hr size=1 noshade>
+<p><li><b>Berkeley DB was configured using --enable-tcl or --with-tcl and fails
+to build.</b>
+<p>If the Tcl installation was moved after it was configured and installed,
+try reconfiguring and reinstalling Tcl.
+<p>Also, some systems do not search for shared libraries by default, or do
+not search for shared libraries named the way the Tcl installation names
+them, or are searching for a different kind of library than those in
+your Tcl installation. For example, Linux systems often require linking
+"libtcl.a" to "libtcl#.#.a", whereas AIX systems often require adding the
+"-brtl" flag to the linker. A simpler solution that almost always works
+on all systems is to create a link from "libtcl.#.#.a" or "libtcl.so"
+(or whatever you happen to have) to "libtcl.a" and reconfigure.
+<hr size=1 noshade>
+<p><li><b>Loading the Berkeley DB library into Tcl on AIX causes a core dump.</b>
+<p>In some versions of Tcl, the "tclConfig.sh" autoconfiguration script
+created by the Tcl installation does not work properly under AIX, and
+you may have to modify values in the tclConfig.sh file to in order to
+load the Berkeley DB library into Tcl. Specifically, the TCL_LIB_SPEC
+variable should contain sufficient linker flags to find and link against
+the installed libtcl library. In some circumstances, the tclConfig.sh
+file built by Tcl does not.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/tcl/error.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/sendmail/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/tcl/intro.html b/libdb/docs/ref/tcl/intro.html
new file mode 100644
index 0000000..528964b
--- /dev/null
+++ b/libdb/docs/ref/tcl/intro.html
@@ -0,0 +1,71 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Loading Berkeley DB with Tcl</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Tcl API</dl></h3></td>
+<td align=right><a href="../../ref/perl/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/tcl/using.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Loading Berkeley DB with Tcl</h1>
+<p>Berkeley DB includes a dynamically loadable Tcl API, which requires that
+Tcl/Tk 8.1 or later already be installed on your system. We recommend
+that you install later releases of Tcl/Tk than 8.1 if possible,
+especially on Windows platforms, because we found that we had to make
+local fixes to the 8.1 release in a few cases. You can download a copy
+of Tcl from the <a href="http://dev.scriptics.com/">Tcl Developer
+Xchange</a> Web site.
+<p>This document assumes that you already configured Berkeley DB for Tcl
+support, and you have built and installed everything where you want it
+to be. If you have not done so, see
+<a href="../../ref/build_unix/conf.html">Configuring Berkeley DB</a> or
+<a href="../../ref/build_win/intro.html">Building for Win32</a> for more
+information.
+<h3>Installing as a Tcl Package</h3>
+<p>Once enabled, the Berkeley DB shared library for Tcl is automatically installed
+as part of the standard installation process. However, if you want to be
+able to dynamically load it as a Tcl package into your script, there are
+several steps that must be performed:
+<p><ol>
+<p><li>Run the Tcl shell in the install directory.
+<li>Append this directory to your auto_path variable.
+<li>Run the pkg_mkIndex proc, giving the name of the Berkeley DB Tcl library.
+</ol>
+<p>For example:
+<p><blockquote><pre># tclsh8.3
+% lappend auto_path /usr/local/BerkeleyDB.4.1/lib
+% pkg_mkIndex /usr/local/BerkeleyDB.4.1/lib libdb_tcl-4.1.so</pre></blockquote>
+<p>Note that your Tcl and Berkeley DB version numbers may differ from the
+example, and so your tclsh and library names may be different.
+<h3>Loading Berkeley DB with Tcl</h3>
+<p>The Berkeley DB package may be loaded into the user's interactive Tcl script
+(or wish session) via the <b>load</b> command. For example:
+<p><blockquote><pre>load /usr/local/BerkeleyDB.4.1/lib/libdb_tcl-4.1.so</pre></blockquote>
+<p>Note that your Berkeley DB version numbers may differ from the example, and so
+the library name may be different.
+<p>If you installed your library to run as a Tcl package, Tcl application
+scripts should use the <b>package</b> command to indicate to the Tcl
+interpreter that it needs the Berkeley DB package and where to find it. For
+example:
+<p><blockquote><pre>lappend auto_path "/usr/local/BerkeleyDB.4.1/lib"
+package require Db_tcl</pre></blockquote>
+<p>No matter which way the library gets loaded, it creates a command named
+<b>berkdb</b>. All the Berkeley DB functionality is accessed via this
+command and additional commands it creates on behalf of the application.
+A simple test to determine whether everything is loaded and ready is to
+display the library version, as follows:
+<p><blockquote><pre>berkdb version -string</pre></blockquote>
+<p>This should return you the Berkeley DB version in a string format.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/perl/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/tcl/using.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/tcl/program.html b/libdb/docs/ref/tcl/program.html
new file mode 100644
index 0000000..7b2764a
--- /dev/null
+++ b/libdb/docs/ref/tcl/program.html
@@ -0,0 +1,31 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Tcl API programming notes</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Tcl API</dl></h3></td>
+<td align=right><a href="../../ref/tcl/using.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/tcl/error.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Tcl API programming notes</h1>
+<p>The Tcl API closely parallels the Berkeley DB programmatic interfaces. If you
+are already familiar with one of those interfaces, there will not be many
+surprises in the Tcl API.
+<p>Several pieces of Berkeley DB functionality are not available in the Tcl API.
+Any of the functions that require a user-provided function are not
+supported via the Tcl API. For example, there is no equivalent to the
+<a href="../../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a> or the <a href="../../api_c/env_set_errcall.html">DB_ENV-&gt;set_errcall</a>
+methods.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/tcl/using.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/tcl/error.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/tcl/using.html b/libdb/docs/ref/tcl/using.html
new file mode 100644
index 0000000..d509b93
--- /dev/null
+++ b/libdb/docs/ref/tcl/using.html
@@ -0,0 +1,55 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Using Berkeley DB with Tcl</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Tcl API</dl></h3></td>
+<td align=right><a href="../../ref/tcl/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/tcl/program.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Using Berkeley DB with Tcl</h1>
+<p>All commands in the Berkeley DB Tcl interface are in the following form:
+<p><blockquote><pre>command_handle operation options</pre></blockquote>
+<p>The <i>command handle</i> is <b>berkdb</b> or one of the additional
+commands that may be created. The <i>operation</i> is what you want
+to do to that handle, and the <i>options</i> apply to the operation.
+Commands that get created on behalf of the application have their own sets
+of operations. Generally, any calls in DB that result in new object
+handles will translate into a new command handle in Tcl. Then, the user
+can access the operations of the handle via the new Tcl command handle.
+<p>Newly created commands are named with an abbreviated form of their
+objects, followed by a number. Some created commands are subcommands of
+other created commands and will be the first command, followed by a
+period (.), and then followed by the new subcommand. For example,
+suppose that you have a database already existing called my_data.db.
+The following example shows the commands created when you open the
+database and when you open a cursor:
+<p><blockquote><pre># First open the database and get a database command handle
+% berkdb open my_data.db
+db0
+#Get some data from that database
+% db0 get my_key
+{{my_key my_data0}{my_key my_data1}}
+#Open a cursor in this database, get a new cursor handle
+% db0 cursor
+db0.c0
+#Get the first data from the cursor
+% db0.c0 get -first
+{{first_key first_data}}</pre></blockquote>
+<p>All commands in the library support a special option <b>-?</b> that will
+list the correct operations for a command or the correct options.
+<p>A list of commands and operations can be found in the
+<a href="../../api_tcl/tcl_index.html">Tcl Interface</a> documentation.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/tcl/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/tcl/program.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/test/faq.html b/libdb/docs/ref/test/faq.html
new file mode 100644
index 0000000..eed831e
--- /dev/null
+++ b/libdb/docs/ref/test/faq.html
@@ -0,0 +1,33 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Test suite FAQ</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Test Suite</dl></h3></td>
+<td align=right><a href="../../ref/test/run.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/distrib/port.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Test suite FAQ</h1>
+<p><ol>
+<p><li><b>The test suite has been running for over a day. What's wrong?</b>
+<p>The test suite can take anywhere from some number of hours to several
+days to run, depending on your hardware configuration. As long as the
+run is making forward progress and new lines are being written to the
+<b>ALL.OUT</b> file, everything is probably fine.
+<p><li><b>The test suite hangs.</b>
+<p>The test suite requires Tcl 8.1 or greater, preferably at least Tcl 8.3.
+If you are using an earlier version of Tcl, the test suite may simply
+hang at some point.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/test/run.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/distrib/port.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/test/run.html b/libdb/docs/ref/test/run.html
new file mode 100644
index 0000000..5b81129
--- /dev/null
+++ b/libdb/docs/ref/test/run.html
@@ -0,0 +1,80 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Running the test suite</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Test Suite</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.1/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/test/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Running the test suite</h1>
+<p>Once you have started tclsh and have loaded the test.tcl source file (see
+<a href="../../ref/build_unix/test.html">Running the test suite under UNIX</a>
+and <a href="../../ref/build_win/test.html">Running the test suite under
+Windows</a> for more information), you are ready to run the test suite. At
+the tclsh prompt, to run the entire test suite, enter the following:
+<p><blockquote><pre>% run_std</pre></blockquote>
+<p>Running all the tests can take from several hours to a few days to
+complete, depending on your hardware. For this reason, the output from
+this command is redirected to a file in the current directory named
+<b>ALL.OUT</b>. Periodically, a line will be written to the standard
+output, indicating what test is being run. When the test suite has
+finished, a single message indicating that the test suite completed
+successfully or that it failed will be written. If the run failed, you
+should review the file <b>ALL.OUT</b> to determine which tests failed.
+Errors will appear in that file as output lines, beginning with the
+string "FAIL".
+<p>It is also possible to run specific tests or tests for a particular
+subsystem:
+<p><blockquote><pre>% r archive
+% r btree
+% r env
+% r frecno
+% r hash
+% r join
+% r join
+% r lock
+% r log
+% r mpool
+% r mutex
+% r queue
+% r rbtree
+% r recno
+% r rrecno
+% r subdb
+% r txn</pre></blockquote>
+<p>Or to run a single, individual test:
+<p><blockquote><pre>% test001 btree</pre></blockquote>
+<p>It is also possible to modify the test run based on arguments on the
+command line. For example, the following command will run a greatly
+abbreviated form of test001, doing 10 operations instead of 10,000:
+<p><blockquote><pre>% test001 btree 10</pre></blockquote>
+<p>In all cases, when not running the entire test suite as described
+previously, a successful test run will return you to the tclsh prompt.
+On failure, a message is displayed indicating what failed.
+<p>Tests are run, by default, in the directory <b>TESTDIR</b>. However,
+the test files are often very large. To use a different directory for
+the test directory, edit the file include.tcl in your build directory,
+and change the following line to a more appropriate value for your
+system:
+<p><blockquote><pre>set testdir ./TESTDIR</pre></blockquote>
+<p>For example, you might change it to the following:
+<p><blockquote><pre>set testdir /var/tmp/db.test</pre></blockquote>
+<p>Alternatively, you can create a symbolic link named TESTDIR in your
+build directory to an appropriate location for running the tests.
+Regardless of where you run the tests, the TESTDIR directory should be
+on a local filesystem, using a remote filesystem (for example, NFS) will
+almost certainly cause spurious test failures.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.1/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/test/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/admin.html b/libdb/docs/ref/transapp/admin.html
new file mode 100644
index 0000000..70a18d9
--- /dev/null
+++ b/libdb/docs/ref/transapp/admin.html
@@ -0,0 +1,48 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Environment infrastructure</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/nested.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/deadlock.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Environment infrastructure</h1>
+<p>When building transactional applications, it is usually necessary to
+build an administrative infrastructure around the database environment.
+There are five components to this infrastructure, and each is
+supported by the Berkeley DB package in two different ways: a standalone
+utility and one or more library interfaces.
+<p><ul type=disc>
+<li>Deadlock detection: <a href="../../utility/db_deadlock.html">db_deadlock</a>,
+<a href="../../api_c/lock_detect.html">DB_ENV-&gt;lock_detect</a>, <a href="../../api_c/env_set_lk_detect.html">DB_ENV-&gt;set_lk_detect</a>
+<li>Checkpoints: <a href="../../utility/db_checkpoint.html">db_checkpoint</a>, <a href="../../api_c/txn_checkpoint.html">DB_ENV-&gt;txn_checkpoint</a>
+<li>Database and log file archival:
+<a href="../../utility/db_archive.html">db_archive</a>, <a href="../../api_c/log_archive.html">DB_ENV-&gt;log_archive</a>
+<li>Log file removal: <a href="../../utility/db_archive.html">db_archive</a>, <a href="../../api_c/log_archive.html">DB_ENV-&gt;log_archive</a>
+<li>Recovery procedures: <a href="../../utility/db_recover.html">db_recover</a>, <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>
+</ul>
+<p>When writing multithreaded server applications and/or applications
+intended for download from the Web, it is usually simpler to create
+local threads that are responsible for administration of the database
+environment as scheduling is often simpler in a single-process model,
+and only a single binary need be installed and run. However, the
+supplied utilities can be generally useful tools even when the
+application is responsible for doing its own administration because
+applications rarely offer external interfaces to database
+administration. The utilities are required when programming to a Berkeley DB
+scripting interface because the scripting APIs do not always offer
+interfaces to the administrative functionality.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/nested.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/deadlock.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/app.html b/libdb/docs/ref/transapp/app.html
new file mode 100644
index 0000000..a5051e2
--- /dev/null
+++ b/libdb/docs/ref/transapp/app.html
@@ -0,0 +1,130 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Application structure</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/term.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/env_open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Application structure</h1>
+<p>When building transactionally protected applications, there are some
+special issues that must be considered. The most important one is that
+if any thread of control exits for any reason while holding Berkeley DB
+resources, recovery must be performed to do the following:
+<p><ul type=disc>
+<li>Recover the Berkeley DB resources.
+<li>Release any locks or mutexes that may have been held to avoid starvation
+as the remaining threads of control convoy behind the failed thread's
+locks.
+<li>Clean up any partially completed operations that may have left a
+database in an inconsistent or corrupted state.
+</ul>
+<p>Complicating this problem is the fact that the Berkeley DB library itself
+cannot determine whether recovery is required; the application itself
+<b>must</b> make that decision. A further complication is that
+recovery must be single-threaded; that is, one thread of control or
+process must perform recovery before any other thread of control or
+processes attempts to create or join the Berkeley DB environment.
+<p>There are two approaches to handling this problem:
+<p><dl compact>
+<p><dt>The hard way:<dd>An application can track its own state carefully enough that it knows
+when recovery needs to be performed. Specifically, the rule to use is
+that recovery must be performed before using a Berkeley DB environment any
+time the threads of control previously using the Berkeley DB environment did
+not shut the environment down cleanly before exiting the environment
+for any reason (including application or system failure).
+<p>Requirements for shutting down the environment cleanly differ, depending
+on the type of environment created. If the environment is public and
+persistent (that is, the <a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flag was not specified to
+the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> method), recovery must be performed if any transaction
+was not committed or aborted, or <a href="../../api_c/env_close.html">DB_ENV-&gt;close</a> method was not called
+for any open <a href="../../api_c/env_class.html">DB_ENV</a> handle.
+<p>If the environment is private and temporary (that is, the
+<a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flag was specified to the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> method),
+recovery must be performed if any transaction was not committed or
+aborted, or <a href="../../api_c/env_close.html">DB_ENV-&gt;close</a> method was not called for any open
+<a href="../../api_c/env_class.html">DB_ENV</a> handle. In addition, at least one transaction checkpoint
+must be performed after all existing transactions have been committed
+or aborted.
+<p><dt>The easy way:<dd>It greatly simplifies matters that recovery may be performed regardless
+of whether recovery strictly needs to be performed; that is, it is not
+an error to run recovery on a database for which no recovery is
+necessary. Because of this fact, it is almost invariably simpler to
+ignore the previous rules about shutting an application down cleanly,
+and simply run recovery each time a thread of control accessing a
+database environment fails for any reason, as well as before accessing
+any database environment after system reboot.
+</dl>
+<p>There are two common ways to build transactionally protected Berkeley DB
+applications. The most common way is as a single, usually
+multithreaded, process. This architecture is simplest because it
+requires no monitoring of other threads of control. When the
+application starts, it opens and potentially creates the environment,
+runs recovery (whether it was needed or not), and then opens its
+databases. From then on, the application can create new threads of
+control as it chooses. All threads of control share the open Berkeley DB
+<a href="../../api_c/env_class.html">DB_ENV</a> and <a href="../../api_c/db_class.html">DB</a> handles. In this model, databases are
+rarely opened or closed when more than a single thread of control is
+running; that is, they are opened when only a single thread is running,
+and closed after all threads but one have exited. The last thread of
+control to exit closes the databases and the environment.
+<p>An alternative way to build Berkeley DB applications is as a set of
+cooperating processes, which may or may not be multithreaded. This
+architecture is more complicated.
+<p>First, this architecture requires that the order in which threads of
+control are created and subsequently access the Berkeley DB environment be
+controlled because recovery must be single-threaded. The first thread
+of control to access the environment must run recovery, and no other
+thread should attempt to access the environment until recovery is
+complete. (Note that this ordering requirement does not apply to
+environment creation without recovery. If multiple threads attempt to
+create a Berkeley DB environment, only one will perform the creation and the
+others will join the already existing environment.)
+<p>Second, this architecture requires that threads of control be monitored.
+If any thread of control that owns Berkeley DB resources exits without first
+cleanly discarding those resources, recovery is usually necessary.
+Before running recovery, all threads using the Berkeley DB environment must
+relinquish all of their Berkeley DB resources (it does not matter if they do
+so gracefully or because they are forced to exit). Then, recovery can
+be run and the threads of control continued or restarted.
+<p>We have found that the safest way to structure groups of cooperating
+processes is to first create a single process (often a shell script)
+that opens/creates the Berkeley DB environment and runs recovery, and that
+then creates the processes or threads that will actually perform work.
+The initial thread has no further responsibilities other than to monitor
+the threads of control it has created, to ensure that none of them
+unexpectedly exits. If one exits, the initial process then forces all
+of the threads of control using the Berkeley DB environment to exit, runs
+recovery, and restarts the working threads of control.
+<p>If it is not practical to have a single parent for the processes sharing
+a Berkeley DB environment, each process sharing the environment should log
+their connection to and exit from the environment in a way that allows
+a monitoring process to detect if a thread of control might have
+acquired Berkeley DB resources and never released them. In this model, an
+initial "watcher" process opens/creates the Berkeley DB environment and runs
+recovery, and then creates a sentinel file. Any other process wanting
+to use the Berkeley DB environment checks for the sentinel file; if the
+sentinel file exists, the other process registers its process ID with
+the watcher and joins the database environment. When the other process
+finishes with the environment, it unregisters its process ID with the
+water. The watcher periodically checks to ensure that no process has
+failed while using the environment. If a process does fail while using
+the environment, the watcher removes the sentinel file, kills all
+processes currently using the environment, runs recovery, and re-creates
+the sentinel file.
+<p>Obviously, it is important that the monitoring process in either case
+be as simple and well-tested as possible because there is no recourse
+if it fails.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/term.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/env_open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/archival.html b/libdb/docs/ref/transapp/archival.html
new file mode 100644
index 0000000..fa4f93b
--- /dev/null
+++ b/libdb/docs/ref/transapp/archival.html
@@ -0,0 +1,154 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Database and log file archival</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/checkpoint.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/logfile.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Database and log file archival</h1>
+<p>The third component of the administrative infrastructure, archival for
+catastrophic recovery, concerns the recoverability of the database in
+the face of catastrophic failure. Recovery after catastrophic failure
+is intended to minimize data loss when physical hardware has been
+destroyed -- for example, loss of a disk that contains databases or log
+files. Although the application may still experience data loss in this
+case, it is possible to minimize it.
+<p>First, you may want to periodically create snapshots (that is, backups)
+of your databases to make it possible to recover from catastrophic
+failure. These snapshots are either a standard backup, which creates a
+consistent picture of the databases as of a single instant in time; or
+an on-line backup (also known as a <i>hot</i> backup), which creates
+a consistent picture of the databases as of an unspecified instant
+during the period of time when the snapshot was made. The advantage of
+a hot backup is that applications may continue to read and write the
+databases while the snapshot is being taken. The disadvantage of a hot
+backup is that more information must be archived, and recovery based on
+a hot backup is to an unspecified time between the start of the backup
+and when the backup is completed.
+<p>Second, after taking a snapshot, you should periodically archive the
+log files being created in the environment. It is often helpful to
+think of database archival in terms of full and incremental filesystem
+backups. A snapshot is a full backup, whereas the periodic archival of
+the current log files is an incremental backup. For example, it might
+be reasonable to take a full snapshot of a database environment weekly
+or monthly, and archive additional log files daily. Using both the
+snapshot and the log files, a catastrophic crash at any time can be
+recovered to the time of the most recent log archival; a time long after
+the original snapshot.
+<p>To create a standard backup of your database that can be used to recover
+from catastrophic failure, take the following steps:
+<p><ol>
+<p><li>Commit or abort all ongoing transactions.
+<p><li>Stop writing your databases until the backup has completed. Read-only
+operations are permitted, but no write operations and no filesystem
+operations may be performed (for example, the <a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a> and
+<a href="../../api_c/db_open.html">DB-&gt;open</a> methods may not be called).
+<p><li>Force an environment checkpoint (see <a href="../../utility/db_checkpoint.html">db_checkpoint</a> for more
+information).
+<p><li>Run <a href="../../utility/db_archive.html">db_archive</a> <b>-s</b> to identify all the database data
+files, and copy them to a backup device such as CD-ROM, alternate disk,
+or tape.
+<p>If the database files are stored in a separate directory from the other
+Berkeley DB files, it may be simpler to archive the directory itself instead
+of the individual files (see <a href="../../api_c/env_set_data_dir.html">DB_ENV-&gt;set_data_dir</a> for additional
+information). <b>Note: if any of the database files did not have
+an open <a href="../../api_c/db_class.html">DB</a> handle during the lifetime of the current log files,
+<a href="../../utility/db_archive.html">db_archive</a> will not list them in its output!</b> This is another
+reason it may be simpler to use a separate database file directory and
+archive the entire directory instead of archiving only the files listed
+by <a href="../../utility/db_archive.html">db_archive</a>.
+<p><li>Run <a href="../../utility/db_archive.html">db_archive</a> <b>-l</b> to identify all the log files,
+and copy the last one (that is, the one with the highest number) to a
+backup device such as CD-ROM, alternate disk, or tape.
+</ol>
+<a name="4"><!--meow--></a>
+<p>To create a <i>hot</i> backup of your database that can be used to
+recover from catastrophic failure, take the following steps:
+<p><ol>
+<p><li>Archive your databases, as described in the previous step #4.
+You do not have to halt ongoing transactions or force a
+checkpoint. In the case of a hot backup, the utility you use to copy
+the databases must read database pages atomically (as described by
+<a href="../../ref/transapp/reclimit.html">Berkeley DB recoverability</a>).
+<p><li>When performing a hot backup, you must additionally archive all of the
+log files. Note that the order of these two operations is required,
+and the database files must be archived before the log files. This
+means that if the database files and log files are in the same
+directory, you cannot simply archive the directory; you must make sure
+that the correct order of archival is maintained.
+<p>To archive your log files, run the <a href="../../utility/db_archive.html">db_archive</a> utility using
+the <b>-l</b> option to identify all the database log files, and
+copy them to your backup media. If the database log files are stored
+in a separate directory from the other database files, it may be simpler
+to archive the directory itself instead of the individual files (see
+the <a href="../../api_c/env_set_lg_dir.html">DB_ENV-&gt;set_lg_dir</a> method for more information).
+</ol>
+<p>Once these steps are completed, your database can be recovered from
+catastrophic failure (see <a href="recovery.html">Recovery procedures</a> for
+more information).
+<p>To update your snapshot so that recovery from catastrophic failure is
+possible up to a new point in time, repeat step 2 under the hot backup
+instructions -- copying all existing log files to a backup device. This
+is applicable to both standard and hot backups; that is, you can update
+snapshots made either way. Each time both the database and log files
+are copied to backup media, you may discard all previous database
+snapshots and saved log files. Archiving additional log files does not
+allow you to discard either previous database snapshots or log files.
+<p>The time to restore from catastrophic failure is a function of the
+number of log records that have been written since the snapshot was
+originally created. Perhaps more importantly, the more separate pieces
+of backup media you use, the more likely it is that you will have a
+problem reading from one of them. For these reasons, it is often best
+to make snapshots on a regular basis.
+<p><b>Obviously, the reliability of your archive media will affect the safety
+of your data. For archival safety, ensure that you have multiple copies
+of your database backups, verify that your archival media is error-free
+and readable, and that copies of your backups are stored offsite!</b>
+<p>The functionality provided by the <a href="../../utility/db_archive.html">db_archive</a> utility is also
+available directly from the Berkeley DB library. The following code fragment
+prints out a list of log and database files that need to be archived:
+<p><blockquote><pre>void
+log_archlist(DB_ENV *dbenv)
+{
+ int ret;
+ char **begin, **list;
+<p>
+ /* Get the list of database files. */
+ if ((ret = dbenv-&gt;log_archive(dbenv,
+ &list, DB_ARCH_ABS | DB_ARCH_DATA)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "DB_ENV-&gt;log_archive: DB_ARCH_DATA");
+ exit (1);
+ }
+ if (list != NULL) {
+ for (begin = list; *list != NULL; ++list)
+ printf("database file: %s\n", *list);
+ free (begin);
+ }
+<p>
+ /* Get the list of log files. */
+ if ((ret = dbenv-&gt;log_archive(dbenv,
+ &list, DB_ARCH_ABS | DB_ARCH_LOG)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "DB_ENV-&gt;log_archive: DB_ARCH_LOG");
+ exit (1);
+ }
+ if (list != NULL) {
+ for (begin = list; *list != NULL; ++list)
+ printf("log file: %s\n", *list);
+ free (begin);
+ }
+}</pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/checkpoint.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/logfile.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/atomicity.html b/libdb/docs/ref/transapp/atomicity.html
new file mode 100644
index 0000000..c8fbf20
--- /dev/null
+++ b/libdb/docs/ref/transapp/atomicity.html
@@ -0,0 +1,63 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Atomicity</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/put.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/inc.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Atomicity</h1>
+<p>The second reason listed for using transactions was <i>atomicity</i>.
+Atomicity means that multiple operations can be grouped into a single
+logical entity, that is, other threads of control accessing the database
+will either see all of the changes or none of the changes. Atomicity
+is important for applications wanting to update two related databases
+(for example, a primary database and secondary index) in a single
+logical action. Or, for an application wanting to update multiple
+records in one database in a single logical action.
+<p>Any number of operations on any number of databases can be included in
+a single transaction to ensure the atomicity of the operations. There
+is, however, a trade-off between the number of operations included in
+a single transaction and both throughput and the possibility of
+deadlock. The reason for this is because transactions acquire locks
+throughout their lifetime and do not release the locks until commit or
+abort time. So, the more operations included in a transaction, the more
+likely it is that a transaction will block other operations and that
+deadlock will occur. However, each transaction commit requires a
+synchronous disk I/O, so grouping multiple operations into a transaction
+can increase overall throughput. (There is one exception to this: the
+<a href="../../api_c/env_set_flags.html#DB_TXN_WRITE_NOSYNC">DB_TXN_WRITE_NOSYNC</a> and <a href="../../api_c/env_set_flags.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> flags cause
+transactions to exhibit the ACI (atomicity, consistency and isolation)
+properties, but not D (durability); avoiding the write and/or
+synchronous disk I/O on transaction commit greatly increases transaction
+throughput for some applications.)
+<p>When applications do create complex transactions, they often avoid
+having more than one complex transaction at a time because simple
+operations like a single <a href="../../api_c/db_put.html">DB-&gt;put</a> are unlikely to deadlock with
+each other or the complex transaction; while multiple complex
+transactions are likely to deadlock with each other because they will
+both acquire many locks over their lifetime. Alternatively, complex
+transactions can be broken up into smaller sets of operations, and each
+of those sets may be encapsulated in a nested transaction. Because
+nested transactions may be individually aborted and retried without
+causing the entire transaction to be aborted, this allows complex
+transactions to proceed even in the face of heavy contention, repeatedly
+trying the suboperations until they succeed.
+<p>It is also helpful to order operations within a transaction; that is,
+access the databases and items within the databases in the same order,
+to the extent possible, in all transactions. Accessing databases and
+items in different orders greatly increases the likelihood of operations
+being blocked and failing due to deadlocks.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/put.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/inc.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/checkpoint.html b/libdb/docs/ref/transapp/checkpoint.html
new file mode 100644
index 0000000..9001538
--- /dev/null
+++ b/libdb/docs/ref/transapp/checkpoint.html
@@ -0,0 +1,124 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Checkpoints</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/deadlock.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/archival.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Checkpoints</h1>
+<p>The second component of the infrastructure is performing checkpoints of
+the log files. As transactions commit, change records are written into
+the log files, but the actual changes to the database are not
+necessarily written to disk. When a checkpoint is performed, the
+changes to the database that are part of committed transactions are
+written into the backing database file.
+<p>Performing checkpoints is necessary for two reasons. First, you can
+remove the Berkeley DB log files from your system only after a checkpoint.
+Second, the frequency of your checkpoints is inversely proportional to
+the amount of time it takes to run database recovery after a system or
+application failure.
+<p>Once the database pages are written, log files can be archived and removed
+from the system because they will never be needed for anything other than
+catastrophic failure. In addition, recovery after system or application
+failure has to redo or undo changes only since the last checkpoint since
+changes before the checkpoint have all been flushed to the filesystem.
+<p>Berkeley DB provides a separate utility, <a href="../../utility/db_checkpoint.html">db_checkpoint</a>, which can be
+used to perform checkpoints. Alternatively, applications can write
+their own checkpoint utility using the underlying <a href="../../api_c/txn_checkpoint.html">DB_ENV-&gt;txn_checkpoint</a>
+function. The following code fragment checkpoints the database
+environment every 60 seconds:
+<p><blockquote><pre>int
+main(int argc, char *argv)
+{
+ extern char *optarg;
+ extern int optind;
+ DB *db_cats, *db_color, *db_fruit;
+ DB_ENV *dbenv;
+ pthread_t ptid;
+ int ch;
+<p>
+ while ((ch = getopt(argc, argv, "")) != EOF)
+ switch (ch) {
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+<p>
+ env_dir_create();
+ env_open(&dbenv);
+<p>
+<b> /* Start a checkpoint thread. */
+ if ((errno = pthread_create(
+ &ptid, NULL, checkpoint_thread, (void *)dbenv)) != 0) {
+ fprintf(stderr,
+ "txnapp: failed spawning checkpoint thread: %s\n",
+ strerror(errno));
+ exit (1);
+ }</b>
+<p>
+ /* Open database: Key is fruit class; Data is specific type. */
+ db_open(dbenv, &db_fruit, "fruit", 0);
+<p>
+ /* Open database: Key is a color; Data is an integer. */
+ db_open(dbenv, &db_color, "color", 0);
+<p>
+ /*
+ * Open database:
+ * Key is a name; Data is: company name, cat breeds.
+ */
+ db_open(dbenv, &db_cats, "cats", 1);
+<p>
+ add_fruit(dbenv, db_fruit, "apple", "yellow delicious");
+<p>
+ add_color(dbenv, db_color, "blue", 0);
+ add_color(dbenv, db_color, "blue", 3);
+<p>
+ add_cat(dbenv, db_cats,
+ "Amy Adams",
+ "Sleepycat Software",
+ "abyssinian",
+ "bengal",
+ "chartreaux",
+ NULL);
+<p>
+ return (0);
+}
+<p>
+<b>void *
+checkpoint_thread(void *arg)
+{
+ DB_ENV *dbenv;
+ int ret;
+<p>
+ dbenv = arg;
+ dbenv-&gt;errx(dbenv, "Checkpoint thread: %lu", (u_long)pthread_self());
+<p>
+ /* Checkpoint once a minute. */
+ for (;; sleep(60))
+ if ((ret = dbenv-&gt;txn_checkpoint(dbenv, 0, 0, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "checkpoint thread");
+ exit (1);
+ }
+<p>
+ /* NOTREACHED */
+}</b></pre></blockquote>
+<p>Because checkpoints can be quite expensive, choosing how often to
+perform a checkpoint is a common tuning parameter for Berkeley DB
+applications.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/deadlock.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/archival.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/cursor.html b/libdb/docs/ref/transapp/cursor.html
new file mode 100644
index 0000000..6a323cc
--- /dev/null
+++ b/libdb/docs/ref/transapp/cursor.html
@@ -0,0 +1,171 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Transactional cursors</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/read.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/nested.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Transactional cursors</h1>
+<p>Berkeley DB cursors may be used inside a transaction, exactly as any other
+<a href="../../api_c/db_class.html">DB</a> method. The enclosing transaction ID must be specified when
+the cursor is created, but it does not then need to be further specified
+on operations performed using the cursor. One important point to
+remember is that a cursor <b>must be closed</b> before the enclosing
+transaction is committed or aborted.
+<p>The following code fragment uses a cursor to store a new key in the cats
+database with four associated data items. The key is a name. The data
+items are a company name, an address, and a list of the breeds of cat
+owned. Each of the data entries is stored as a duplicate data item.
+In this example, transactions are necessary to ensure that either all or none
+of the data items appear in case of system or application failure.
+<p><blockquote><pre>int
+main(int argc, char *argv)
+{
+ extern char *optarg;
+ extern int optind;
+ DB *db_cats, *db_color, *db_fruit;
+ DB_ENV *dbenv;
+ pthread_t ptid;
+ int ch;
+<p>
+ while ((ch = getopt(argc, argv, "")) != EOF)
+ switch (ch) {
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+<p>
+ env_dir_create();
+ env_open(&dbenv);
+<p>
+ /* Open database: Key is fruit class; Data is specific type. */
+ db_open(dbenv, &db_fruit, "fruit", 0);
+<p>
+ /* Open database: Key is a color; Data is an integer. */
+ db_open(dbenv, &db_color, "color", 0);
+<p>
+ /*
+ * Open database:
+ * Key is a name; Data is: company name, cat breeds.
+ */
+ db_open(dbenv, &db_cats, "cats", 1);
+<p>
+ add_fruit(dbenv, db_fruit, "apple", "yellow delicious");
+<p>
+ add_color(dbenv, db_color, "blue", 0);
+ add_color(dbenv, db_color, "blue", 3);
+<p>
+<b> add_cat(dbenv, db_cats,
+ "Amy Adams",
+ "Sleepycat Software",
+ "abyssinian",
+ "bengal",
+ "chartreaux",
+ NULL);</b>
+<p>
+ return (0);
+}
+<p>
+<b>int
+add_cat(DB_ENV *dbenv, DB *db, char *name, ...)
+{
+ va_list ap;
+ DBC *dbc;
+ DBT key, data;
+ DB_TXN *tid;
+ int fail, ret, t_ret;
+ char *s;
+<p>
+ /* Initialization. */
+ fail = 0;
+<p>
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = name;
+ key.size = strlen(name);
+<p>
+retry: /* Begin the transaction. */
+ if ((ret = dbenv-&gt;txn_begin(dbenv, NULL, &tid, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "DB_ENV-&gt;txn_begin");
+ exit (1);
+ }
+<p>
+ /* Delete any previously existing item. */
+ switch (ret = db-&gt;del(db, tid, &key, 0)) {
+ case 0:
+ case DB_NOTFOUND:
+ break;
+ case DB_LOCK_DEADLOCK:
+ default:
+ /* Retry the operation. */
+ if ((t_ret = tid-&gt;abort(tid)) != 0) {
+ dbenv-&gt;err(dbenv, t_ret, "DB_TXN-&gt;abort");
+ exit (1);
+ }
+ if (++fail == MAXIMUM_RETRY)
+ return (ret);
+ goto retry;
+ }
+<p>
+ /* Create a cursor. */
+ if ((ret = db-&gt;cursor(db, tid, &dbc, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "db-&gt;cursor");
+ exit (1);
+ }
+<p>
+ /* Append the items, in order. */
+ va_start(ap, name);
+ while ((s = va_arg(ap, char *)) != NULL) {
+ data.data = s;
+ data.size = strlen(s);
+ switch (ret = dbc-&gt;c_put(dbc, &key, &data, DB_KEYLAST)) {
+ case 0:
+ break;
+ case DB_LOCK_DEADLOCK:
+ default:
+ va_end(ap);
+<p>
+ /* Retry the operation. */
+ if ((t_ret = dbc-&gt;c_close(dbc)) != 0) {
+ dbenv-&gt;err(
+ dbenv, t_ret, "dbc-&gt;c_close");
+ exit (1);
+ }
+ if ((t_ret = tid-&gt;abort(tid)) != 0) {
+ dbenv-&gt;err(dbenv, t_ret, "DB_TXN-&gt;abort");
+ exit (1);
+ }
+ if (++fail == MAXIMUM_RETRY)
+ return (ret);
+ goto retry;
+ }
+ }
+ va_end(ap);
+<p>
+ /* Success: commit the change. */
+ if ((ret = dbc-&gt;c_close(dbc)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "dbc-&gt;c_close");
+ exit (1);
+ }
+ if ((ret = tid-&gt;commit(tid, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "DB_TXN-&gt;commit");
+ exit (1);
+ }
+ return (0);
+}</b></pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/read.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/nested.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/data_open.html b/libdb/docs/ref/transapp/data_open.html
new file mode 100644
index 0000000..ff22d28
--- /dev/null
+++ b/libdb/docs/ref/transapp/data_open.html
@@ -0,0 +1,143 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Opening the databases</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/env_open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/put.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Opening the databases</h1>
+<p>Next, we open three databases ("color" and "fruit" and "cats"), in the
+database environment. Again, our <a href="../../api_c/db_class.html">DB</a> database handles are
+declared to be free-threaded using the <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag, and so
+may be used by any number of threads we subsequently create.
+<p><blockquote><pre>int
+main(int argc, char *argv)
+{
+ extern char *optarg;
+ extern int optind;
+ DB *db_cats, *db_color, *db_fruit;
+ DB_ENV *dbenv;
+ int ch;
+<p>
+ while ((ch = getopt(argc, argv, "")) != EOF)
+ switch (ch) {
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+<p>
+ env_dir_create();
+ env_open(&dbenv);
+<p>
+<b> /* Open database: Key is fruit class; Data is specific type. */
+ if (db_open(dbenv, &db_fruit, "fruit", 0))
+ return (1);
+<p>
+ /* Open database: Key is a color; Data is an integer. */
+ if (db_open(dbenv, &db_color, "color", 0))
+ return (1);
+<p>
+ /*
+ * Open database:
+ * Key is a name; Data is: company name, cat breeds.
+ */
+ if (db_open(dbenv, &db_cats, "cats", 1))
+ return (1);</b>
+<p>
+ return (0);
+}
+<p>
+<b>int
+db_open(DB_ENV *dbenv, DB **dbp, char *name, int dups)
+{
+ DB *db;
+ int ret;
+<p>
+ /* Create the database handle. */
+ if ((ret = db_create(&db, dbenv, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "db_create");
+ return (1);
+ }
+<p>
+ /* Optionally, turn on duplicate data items. */
+ if (dups && (ret = db-&gt;set_flags(db, DB_DUP)) != 0) {
+ (void)db-&gt;close(db, 0);
+ dbenv-&gt;err(dbenv, ret, "db-&gt;set_flags: DB_DUP");
+ return (1);
+ }
+<p>
+ /*
+ * Open a database in the environment:
+ * create if it doesn't exist
+ * free-threaded handle
+ * read/write owner only
+ */
+ if ((ret = db-&gt;open(db, NULL, name, NULL, DB_BTREE,
+ DB_CREATE | DB_THREAD | DB_AUTO_COMMIT, S_IRUSR | S_IWUSR)) != 0) {
+ (void)db-&gt;close(db, 0);
+ dbenv-&gt;err(dbenv, ret, "db-&gt;open: %s", name);
+ return (1);
+ }
+<p>
+ *dbp = db;
+ return (0);
+}</b></pre></blockquote>
+<p>After opening the database, we can use the <a href="../../utility/db_stat.html">db_stat</a> utility to
+display information about a database we have created:
+<p><blockquote><pre>prompt&gt; db_stat -h TXNAPP -d color
+53162 Btree magic number.
+8 Btree version number.
+Flags:
+2 Minimum keys per-page.
+8192 Underlying database page size.
+1 Number of levels in the tree.
+0 Number of unique keys in the tree.
+0 Number of data items in the tree.
+0 Number of tree internal pages.
+0 Number of bytes free in tree internal pages (0% ff).
+1 Number of tree leaf pages.
+8166 Number of bytes free in tree leaf pages (0.% ff).
+0 Number of tree duplicate pages.
+0 Number of bytes free in tree duplicate pages (0% ff).
+0 Number of tree overflow pages.
+0 Number of bytes free in tree overflow pages (0% ff).
+0 Number of pages on the free list.</pre></blockquote>
+<p>The database open must be enclosed within a transaction in order to be
+recoverable. The transaction will ensure that created files are
+re-created in recovered environments (or do not appear at all).
+Additional database operations or operations on other databases can be
+included in the same transaction, of course. In the simple case, where
+the open is the only operation in the transaction, an application can
+set the <a href="../../api_c/env_set_flags.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a> flag instead of creating and managing
+its own transaction handle. The <a href="../../api_c/env_set_flags.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a> flag will
+internally wrap the operation in a transaction, simplifying application
+code.
+<p>The previous example is the simplest case of transaction protection for
+database open. Obviously, additional database operations can be done
+in the scope of the same transaction. For example, an application
+maintaining a list of the databases in a database environment in a
+well-known file might include an update of the list in the same
+transaction in which the database is created. Or, an application might
+create both a primary and secondary database in a single transaction.
+<p><a href="../../api_c/db_class.html">DB</a> handles that will later be used for transactionally protected
+operations must be opened within a transaction. Specifying a
+transaction handle to operations using handles not opened within a
+transaction will return an error. Similarly, not specifying a
+transaction handle to operations using handles that were opened within
+a transaction will also return an error.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/env_open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/put.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/deadlock.html b/libdb/docs/ref/transapp/deadlock.html
new file mode 100644
index 0000000..f188a89
--- /dev/null
+++ b/libdb/docs/ref/transapp/deadlock.html
@@ -0,0 +1,109 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Deadlock detection</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/admin.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/checkpoint.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Deadlock detection</h1>
+<p>The first component of the infrastructure, <i>deadlock
+detection</i>, is not so much a requirement specific to
+transaction-protected applications, but instead is necessary for almost
+all applications in which more than a single thread of control will be
+accessing the database at one time. Even when Berkeley DB automatically
+handles database locking, it is normally possible for deadlock to occur.
+Because the underlying database access methods may update multiple pages
+during a single Berkeley DB API call, deadlock is possible even when threads
+of control are making only single update calls into the database. The
+exception to this rule is when all the threads of control accessing the
+database are read-only or when the Berkeley DB Concurrent Data Store product is used; the Berkeley DB Concurrent Data Store
+product guarantees deadlock-free operation at the expense of reduced
+concurrency.
+<p>When the deadlock occurs, two (or more) threads of control each request
+additional locks that can never be granted because one of the threads
+of control waiting holds the requested resource. For example, consider
+two processes: A and B. Let's say that A obtains a write lock on item
+X, and B obtains a write lock on item Y. Then, A requests a lock on Y,
+and B requests a lock on X. A will wait until resource Y becomes
+available and B will wait until resource X becomes available.
+Unfortunately, because both A and B are waiting, neither will release
+the locks they hold and neither will ever obtain the resource on which
+it is waiting. For another example, consider two transactions, A and
+B, each of which may want to modify item X. Assume that transaction A
+obtains a read lock on X and confirms that a modification is needed.
+Then it is descheduled and the thread containing transaction B runs.
+At that time, transaction B obtains a read lock on X and confirms that
+it also wants to make a modification. Both transactions A and B will
+block when they attempt to upgrade their read locks to write locks
+because the other already holds a read lock. This is a deadlock.
+Transaction A cannot make forward progress until Transaction B releases
+its read lock on X, but Transaction B cannot make forward progress until
+Transaction A releases its read lock on X.
+<p>In order to detect that deadlock has happened, a separate process or
+thread must review the locks currently held in the database. If
+deadlock has occurred, a victim must be selected, and that victim will
+then return the error <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> from whatever Berkeley DB call
+it was making. Berkeley DB provides a separate UNIX-style utility that can
+be used to perform this deadlock detection, named <a href="../../utility/db_deadlock.html">db_deadlock</a>.
+Alternatively, applications can create their own deadlock utility or
+thread using the underlying <a href="../../api_c/lock_detect.html">DB_ENV-&gt;lock_detect</a> function, or specify
+that Berkeley DB run the deadlock detector internally whenever there is a
+conflict over a lock (see <a href="../../api_c/env_set_lk_detect.html">DB_ENV-&gt;set_lk_detect</a> for more
+information). The following code fragment does the latter:
+<p><blockquote><pre>void
+env_open(DB_ENV **dbenvp)
+{
+ DB_ENV *dbenv;
+ int ret;
+<p>
+ /* Create the environment handle. */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "txnapp: db_env_create: %s\n", db_strerror(ret));
+ exit (1);
+ }
+<p>
+ /* Set up error handling. */
+ dbenv-&gt;set_errpfx(dbenv, "txnapp");
+<p>
+<b> /* Do deadlock detection internally. */
+ if ((ret = dbenv-&gt;set_lk_detect(dbenv, DB_LOCK_DEFAULT)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "set_lk_detect: DB_LOCK_DEFAULT");
+ exit (1);
+ }</b>
+<p>
+ /*
+ * Open a transactional environment:
+ * create if it doesn't exist
+ * free-threaded handle
+ * run recovery
+ * read/write owner only
+ */
+ if ((ret = dbenv-&gt;open(dbenv, ENV_DIRECTORY,
+ DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_MPOOL | DB_INIT_TXN | DB_RECOVER | DB_THREAD,
+ S_IRUSR | S_IWUSR)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "dbenv-&gt;open: %s", ENV_DIRECTORY);
+ exit (1);
+ }
+<p>
+ *dbenvp = dbenv;
+}</pre></blockquote>
+<p>Deciding how often to run the deadlock detector and which of the
+deadlocked transactions will be forced to abort when the deadlock is
+detected is a common tuning parameter for Berkeley DB applications.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/admin.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/checkpoint.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/env_open.html b/libdb/docs/ref/transapp/env_open.html
new file mode 100644
index 0000000..ef2ad0d
--- /dev/null
+++ b/libdb/docs/ref/transapp/env_open.html
@@ -0,0 +1,175 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Opening the environment</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/app.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/data_open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Opening the environment</h1>
+<p>Creating transaction-protected applications using the Berkeley DB library is
+quite easy. Applications first use <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> to initialize
+the database environment. Transaction-protected applications normally
+require all four Berkeley DB subsystems, so the <a href="../../api_c/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a>,
+<a href="../../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a>, <a href="../../api_c/env_open.html#DB_INIT_LOG">DB_INIT_LOG</a>, and <a href="../../api_c/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a> flags
+should be specified.
+<p>Once the application has called <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>, it opens its
+databases within the environment. Once the databases are opened, the
+application makes changes to the databases inside of transactions. Each
+set of changes that entails a unit of work should be surrounded by the
+appropriate <a href="../../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a>, <a href="../../api_c/txn_commit.html">DB_TXN-&gt;commit</a>, and <a href="../../api_c/txn_abort.html">DB_TXN-&gt;abort</a>
+calls. The Berkeley DB access methods will make the appropriate calls into
+the Lock, Log and Memory Pool subsystems in order to guarantee
+transaction semantics. When the application is ready to exit, all
+outstanding transactions should have been committed or aborted.
+<p>Databases accessed by a transaction must not be closed during the
+transaction. Once all outstanding transactions are finished, all open
+Berkeley DB files should be closed. When the Berkeley DB database files have been
+closed, the environment should be closed by calling
+<a href="../../api_c/env_close.html">DB_ENV-&gt;close</a>.
+<p>The following code fragment creates the database environment directory
+then opens the environment, running recovery. Our <a href="../../api_c/env_class.html">DB_ENV</a>
+database environment handle is declared to be free-threaded using the
+<a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a> flag, and so may be used by any number of threads that
+we may subsequently create.
+<p><blockquote><pre>#include &lt;sys/types.h&gt;
+#include &lt;sys/stat.h&gt;
+<p>
+#include &lt;errno.h&gt;
+#include &lt;stdarg.h&gt;
+#include &lt;stdlib.h&gt;
+#include &lt;string.h&gt;
+#include &lt;unistd.h&gt;
+<p>
+#include &lt;db.h&gt;
+<p>
+#define ENV_DIRECTORY "TXNAPP"
+<p>
+void env_dir_create(void);
+void env_open(DB_ENV **);
+<p>
+int
+main(int argc, char *argv)
+{
+ extern char *optarg;
+ extern int optind;
+ DB *db_cats, *db_color, *db_fruit;
+ DB_ENV *dbenv;
+ int ch;
+<p>
+ while ((ch = getopt(argc, argv, "")) != EOF)
+ switch (ch) {
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+<p>
+ env_dir_create();
+ env_open(&dbenv);
+<p>
+ return (0);
+}
+<p>
+void
+env_dir_create()
+{
+ struct stat sb;
+<p>
+ /*
+ * If the directory exists, we're done. We do not further check
+ * the type of the file, DB will fail appropriately if it's the
+ * wrong type.
+ */
+ if (stat(ENV_DIRECTORY, &sb) == 0)
+ return;
+<p>
+ /* Create the directory, read/write/access owner only. */
+ if (mkdir(ENV_DIRECTORY, S_IRWXU) != 0) {
+ fprintf(stderr,
+ "txnapp: mkdir: %s: %s\n", ENV_DIRECTORY, strerror(errno));
+ exit (1);
+ }
+}
+<p>
+void
+env_open(DB_ENV **dbenvp)
+{
+ DB_ENV *dbenv;
+ int ret;
+<p>
+ /* Create the environment handle. */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "txnapp: db_env_create: %s\n", db_strerror(ret));
+ exit (1);
+ }
+<p>
+ /* Set up error handling. */
+ dbenv-&gt;set_errpfx(dbenv, "txnapp");
+<p>
+ /*
+ * Open a transactional environment:
+ * create if it doesn't exist
+ * free-threaded handle
+ * run recovery
+ * read/write owner only
+ */
+ if ((ret = dbenv-&gt;open(dbenv, ENV_DIRECTORY,
+ DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_MPOOL | DB_INIT_TXN | DB_RECOVER | DB_THREAD,
+ S_IRUSR | S_IWUSR)) != 0) {
+ (void)dbenv-&gt;close(dbenv, 0);
+ dbenv-&gt;err(dbenv, ret, "dbenv-&gt;open: %s", ENV_DIRECTORY);
+ exit (1);
+ }
+<p>
+ *dbenvp = dbenv;
+}</pre></blockquote>
+<p>After running this initial program, we can use the <a href="../../utility/db_stat.html">db_stat</a>
+utility to display the contents of the environment directory:
+<p><blockquote><pre>prompt&gt; db_stat -e -h TXNAPP
+3.2.1 Environment version.
+120897 Magic number.
+0 Panic value.
+1 References.
+6 Locks granted without waiting.
+0 Locks granted after waiting.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Mpool Region: 4.
+264KB Size (270336 bytes).
+-1 Segment ID.
+1 Locks granted without waiting.
+0 Locks granted after waiting.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Log Region: 3.
+96KB Size (98304 bytes).
+-1 Segment ID.
+3 Locks granted without waiting.
+0 Locks granted after waiting.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Lock Region: 2.
+240KB Size (245760 bytes).
+-1 Segment ID.
+1 Locks granted without waiting.
+0 Locks granted after waiting.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Txn Region: 5.
+8KB Size (8192 bytes).
+-1 Segment ID.
+1 Locks granted without waiting.
+0 Locks granted after waiting.</pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/app.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/data_open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/faq.html b/libdb/docs/ref/transapp/faq.html
new file mode 100644
index 0000000..fbb74dd
--- /dev/null
+++ b/libdb/docs/ref/transapp/faq.html
@@ -0,0 +1,95 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Transaction FAQ</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/throughput.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Transaction FAQ</h1>
+<p><ol>
+<p><li><b>What should a transactional program do when an error occurs?</b>
+<p>Any time an error occurs, such that a transactionally protected set of
+operations cannot complete successfully, the transaction must be
+aborted. While deadlock is by far the most common of these errors,
+there are other possibilities; for example, running out of disk space
+for the filesystem. In Berkeley DB transactional applications, there are
+three classes of error returns: "expected" errors, "unexpected but
+recoverable" errors, and a single "unrecoverable" error. Expected
+errors are errors like <a href="../../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>, which indicates that a
+searched-for key item is not present in the database. Applications may
+want to explicitly test for and handle this error, or, in the case where
+the absence of a key implies the enclosing transaction should fail,
+simply call <a href="../../api_c/txn_abort.html">DB_TXN-&gt;abort</a>. Unexpected but recoverable errors are
+errors like <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a>, which indicates that an operation
+has been selected to resolve a deadlock, or a system error such as EIO,
+which likely indicates that the filesystem has no available disk space.
+Applications must immediately call <a href="../../api_c/txn_abort.html">DB_TXN-&gt;abort</a> when these returns
+occur, as it is not possible to proceed otherwise. The only
+unrecoverable error is <a href="../../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, which indicates that the
+system must stop and recovery must be run.
+<p><li><b>How can hot backups work? Can't you get an inconsistent picture
+of the database when you copy it?</b>
+<p>First, Berkeley DB is based on the technique of "write-ahead logging", which
+means that before any change is made to a database, a log record is
+written that describes the change. Further, Berkeley DB guarantees that the
+log record that describes the change will always be written to stable
+storage (that is, disk) before the database page where the change was
+made is written to stable storage. Because of this guarantee, we know
+that any change made to a database will appear either in just a log
+file, or both the database and a log file, but never in just the
+database.
+<p>Second, you can always create a consistent and correct database based
+on the log files and the databases from a database environment. So,
+during a hot backup, we first make a copy of the databases and then a
+copy of the log files. The tricky part is that there may be pages in
+the database that are related for which we won't get a consistent
+picture during this copy. For example, let's say that we copy pages
+1-4 of the database, and then are swapped out. For whatever reason
+(perhaps because we needed to flush pages from the cache, or because of
+a checkpoint), the database pages 1 and 5 are written. Then, the hot
+backup process is re-scheduled, and it copies page 5. Obviously, we
+have an inconsistent database snapshot, because we have a copy of page
+1 from before it was written by the other thread of control, and a copy
+of page 5 after it was written by the other thread. What makes this
+work is the order of operations in a hot backup. Because of the
+write-ahead logging guarantees, we know that any page written to the
+database will first be referenced in the log. If we copy the database
+first, then we can also know that any inconsistency in the database will
+be described in the log files, and so we know that we can fix everything
+up during recovery.
+<p><li><b>How can I move a database from one transactional environment
+into another?</b>
+<p>Because database pages contain references to log records, databases
+cannot be simply moved into different database environments. To move
+a database into a different environment, dump and reload the database
+before moving it. If the database is too large to dump and reload, the
+database may be prepared in place by setting the first eight bytes of
+each database page in the file to 0.
+<p><li><b>I'm seeing the error "log_flush: LSN past current end-of-log",
+what does that mean?</b>
+<p>The most common cause of this error is that a system administrator has
+removed all of the log files from a database environment. You should
+shut down your database environment as gracefully as possible, first
+flushing the database environment cache to disk, if that's possible.
+Then, dump and reload your databases. If your databases are too large
+to dump and reload, the database may be repaired in place by setting
+the first eight bytes of each database page in the file to 0, but if
+you do that, you must verify your databases before using them again.
+(It is possible for the databases to be corrupted when this happens,
+and the longer the application runs, the worse it can get.)
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/throughput.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/filesys.html b/libdb/docs/ref/transapp/filesys.html
new file mode 100644
index 0000000..419781e
--- /dev/null
+++ b/libdb/docs/ref/transapp/filesys.html
@@ -0,0 +1,66 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Recovery and filesystem operations</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/hotfail.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/reclimit.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Recovery and filesystem operations</h1>
+<p>The Berkeley DB API supports creating, removing and renaming files. Creating
+files is supported by the <a href="../../api_c/db_open.html">DB-&gt;open</a> method. Removing files is
+supported by the <a href="../../api_c/env_dbremove.html">DB_ENV-&gt;dbremove</a> and <a href="../../api_c/db_remove.html">DB-&gt;remove</a> methods.
+Renaming files is supported by the <a href="../../api_c/env_dbrename.html">DB_ENV-&gt;dbrename</a> and
+<a href="../../api_c/db_rename.html">DB-&gt;rename</a> methods. (There are two methods for removing and renaming
+files because one of the methods is transactionally protected and one
+is not.)
+<p>Berkeley DB does not permit specifying the <a href="../../api_c/db_open.html#DB_TRUNCATE">DB_TRUNCATE</a> flag when
+opening a file in a transaction-protected environment. This is an
+implicit file deletion, but one that does not always require the same
+operating system file permissions as deleting and creating a file do.
+<p>If you have changed the name of a file or deleted it outside of the
+Berkeley DB library (for example, you explicitly removed a file using your
+normal operating system utilities), then it is possible that recovery
+will not be able to find a database to which the log refers. In this
+case, <a href="../../utility/db_recover.html">db_recover</a> will produce a warning message, saying it was
+unable to locate a file it expected to find. This message is only a
+warning because the file may have been subsequently deleted as part of
+normal database operations before the failure occurred, so is not
+necessarily a problem.
+<p>Generally, any filesystem operations that are performed outside the
+Berkeley DB interface should be performed at the same time as making a
+snapshot of the database. To perform filesystem operations correctly,
+do the following:
+<p><ol>
+<p><li>Cleanly shut down database operations.
+<p>To shut down database operations cleanly, all applications accessing
+the database environment must be shut down and a transaction checkpoint
+must be taken. If the applications are not implemented so they can be
+shut down gracefully (that is, closing all references to the database
+environment), recovery must be performed after all applications have
+been killed to ensure that the underlying databases are consistent on
+disk.
+<p><li>Perform the filesystem operations; for example, remove or rename one or
+more files.
+<p><li>Make an archival snapshot of the database.
+<p>Although this step is not strictly necessary, it is strongly
+recommended. If this step is not performed, recovery from catastrophic
+failure will require that recovery first be performed up to the time of
+the filesystem operations, the filesystem operations be redone, and then
+recovery be performed from the filesystem operations forward.
+<p><li>Restart the database applications.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/hotfail.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/reclimit.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/hotfail.html b/libdb/docs/ref/transapp/hotfail.html
new file mode 100644
index 0000000..e6d84a5
--- /dev/null
+++ b/libdb/docs/ref/transapp/hotfail.html
@@ -0,0 +1,83 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Hot failover</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/recovery.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/filesys.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Hot failover</h1>
+<p>For some applications, it may be useful to periodically snapshot the
+database environment for use as a hot failover should the primary system
+fail. The following steps can be taken to keep a backup environment in
+close synchrony with an active environment. The active environment is
+entirely unaffected by these procedures, and both read and write
+operations are allowed during all steps described here.
+<p><ol>
+<p><li>Run <a href="../../utility/db_archive.html">db_archive</a> <b>-s</b> in the active environment to
+identify all of the active environment's database files, and copy them
+to the backup directory.
+<p>If the database files are stored in a separate directory from the other
+Berkeley DB files, it may be simpler to copy the directory itself instead of
+the individual files (see <a href="../../api_c/env_set_data_dir.html">DB_ENV-&gt;set_data_dir</a> for additional
+information). <b>Note: if any of the database files did not have
+an open <a href="../../api_c/db_class.html">DB</a> handle during the lifetime of the current log files,
+<a href="../../utility/db_archive.html">db_archive</a> will not list them in its output!</b> This is another
+reason it may be simpler to use a separate database file directory and
+copy the entire directory instead of archiving only the files listed by
+<a href="../../utility/db_archive.html">db_archive</a>.
+<p><li>Remove all existing log files from the backup directory.
+<p><li>Run <a href="../../utility/db_archive.html">db_archive</a> <b>-l</b> in the active environment to
+identify all of the active environment's log files, and copy them to
+the backup directory.
+<p><li>Run <a href="../../utility/db_recover.html">db_recover</a> <b>-c</b> in the backup directory to
+catastrophically recover the copied environment.
+</ol>
+<p>Steps 2, 3 and 4 may be repeated as often as you like. If Step 1 (the
+initial copy of the database files) is repeated, then Steps 2, 3 and 4
+<b>must</b> be performed at least once in order to ensure a consistent
+database environment snapshot.
+<p>These procedures must be integrated with your other archival procedures,
+of course. If you are periodically removing log files from your active
+environment, you must be sure to copy them to the backup directory
+before removing them from the active directory. Not copying a log file
+to the backup directory and subsequently running recovery with it
+present may leave the backup snapshot of the environment corrupted. A
+simple way to ensure this never happens is to archive the log files in
+Step 2 as you remove them from the backup directory, and move inactive
+log files from your active environment into your backup directory
+(rather than copying them), in Step 3. The following steps describe
+this procedure in more detail:
+<p><ol>
+<p><li>Run <a href="../../utility/db_archive.html">db_archive</a> <b>-s</b> in the active environment to
+identify all of the active environment's database files, and copy them
+to the backup directory.
+<p><li>Archive all existing log files from the backup directory, moving them
+to a backup device such as CD-ROM, alternate disk, or tape.
+<p><li>Run <a href="../../utility/db_archive.html">db_archive</a> (without any option) in the active environment
+to identify all of the log files in the active environment that are no
+longer in use, and <b>move</b> them to the backup directory.
+<p><li>Run <a href="../../utility/db_archive.html">db_archive</a> <b>-l</b> in the active environment to
+identify all of the remaining log files in the active environment, and
+<b>copy</b> the log files to the backup directory.
+<p><li>Run <a href="../../utility/db_recover.html">db_recover</a> <b>-c</b> in the backup directory to
+catastrophically recover the copied environment.
+</ol>
+<p>As before, steps 2, 3, 4 and 5 may be repeated as often as you like.
+If Step 1 (the initial copy of the database files) is repeated, then
+Steps 2 through 5 <b>must</b> be performed at least once in order to
+ensure a consistent database environment snapshot.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/recovery.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/filesys.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/inc.html b/libdb/docs/ref/transapp/inc.html
new file mode 100644
index 0000000..d22887c
--- /dev/null
+++ b/libdb/docs/ref/transapp/inc.html
@@ -0,0 +1,166 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Isolation</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/atomicity.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/read.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Isolation</h1>
+<p>The third reason listed for using transactions was <i>isolation</i>.
+Consider an application suite in which multiple threads of control
+(multiple processes or threads in one or more processes) are changing
+the values associated with a key in one or more databases. Specifically,
+they are taking the current value, incrementing it, and then storing it
+back into the database.
+<p>Such an application requires isolation. Because we want to change a value
+in the database, we must make sure that after we read it, no other thread
+of control modifies it. For example, assume that both thread #1 and
+thread #2 are doing similar operations in the database, where thread #1
+is incrementing records by 3, and thread #2 is incrementing records by
+5. We want to increment the record by a total of 8. If the operations
+interleave in the right (well, wrong) order, that is not what will
+happen:
+<p><blockquote><pre>thread #1 <b>read</b> record: the value is 2
+thread #2 <b>read</b> record: the value is 2
+thread #2 <b>write</b> record + 5 back into the database (new value 7)
+thread #1 <b>write</b> record + 3 back into the database (new value 5)</pre></blockquote>
+<p>As you can see, instead of incrementing the record by a total of 8,
+we've incremented it only by 3 because thread #1 overwrote thread #2's
+change. By wrapping the operations in transactions, we ensure that this
+cannot happen. In a transaction, when the first thread reads the
+record, locks are acquired that will not be released until the
+transaction finishes, guaranteeing that all other readers and writers
+will block, waiting for the first thread's transaction to complete (or
+to be aborted).
+<p>Here is an example function that does transaction-protected increments
+on database records to ensure isolation:
+<p><blockquote><pre>int
+main(int argc, char *argv)
+{
+ extern char *optarg;
+ extern int optind;
+ DB *db_cats, *db_color, *db_fruit;
+ DB_ENV *dbenv;
+ pthread_t ptid;
+ int ch;
+<p>
+ while ((ch = getopt(argc, argv, "")) != EOF)
+ switch (ch) {
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+<p>
+ env_dir_create();
+ env_open(&dbenv);
+<p>
+ /* Open database: Key is fruit class; Data is specific type. */
+ db_open(dbenv, &db_fruit, "fruit", 0);
+<p>
+ /* Open database: Key is a color; Data is an integer. */
+ db_open(dbenv, &db_color, "color", 0);
+<p>
+ /*
+ * Open database:
+ * Key is a name; Data is: company name, cat breeds.
+ */
+ db_open(dbenv, &db_cats, "cats", 1);
+<p>
+ add_fruit(dbenv, db_fruit, "apple", "yellow delicious");
+<p>
+<b> add_color(dbenv, db_color, "blue", 0);
+ add_color(dbenv, db_color, "blue", 3);</b>
+<p>
+ return (0);
+}
+<p>
+<b>int
+add_color(DB_ENV *dbenv, DB *dbp, char *color, int increment)
+{
+ DBT key, data;
+ DB_TXN *tid;
+ int fail, original, ret, t_ret;
+ char buf64;
+<p>
+ /* Initialization. */
+ memset(&key, 0, sizeof(key));
+ key.data = color;
+ key.size = strlen(color);
+ memset(&data, 0, sizeof(data));
+ data.flags = DB_DBT_MALLOC;
+<p>
+ for (fail = 0;;) {
+ /* Begin the transaction. */
+ if ((ret = dbenv-&gt;txn_begin(dbenv, NULL, &tid, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "DB_ENV-&gt;txn_begin");
+ exit (1);
+ }
+<p>
+ /*
+ * Get the key. If it exists, we increment the value. If it
+ * doesn't exist, we create it.
+ */
+ switch (ret = dbp-&gt;get(dbp, tid, &key, &data, 0)) {
+ case 0:
+ original = atoi(data.data);
+ break;
+ case DB_LOCK_DEADLOCK:
+ default:
+ /* Retry the operation. */
+ if ((t_ret = tid-&gt;abort(tid)) != 0) {
+ dbenv-&gt;err(dbenv, t_ret, "DB_TXN-&gt;abort");
+ exit (1);
+ }
+ if (++fail == MAXIMUM_RETRY)
+ return (ret);
+ continue;
+ case DB_NOTFOUND:
+ original = 0;
+ break;
+ }
+ if (data.data != NULL)
+ free(data.data);
+<p>
+ /* Create the new data item. */
+ (void)snprintf(buf, sizeof(buf), "%d", original + increment);
+ data.data = buf;
+ data.size = strlen(buf) + 1;
+<p>
+ /* Store the new value. */
+ switch (ret = dbp-&gt;put(dbp, tid, &key, &data, 0)) {
+ case 0:
+ /* Success: commit the change. */
+ if ((ret = tid-&gt;commit(tid, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "DB_TXN-&gt;commit");
+ exit (1);
+ }
+ return (0);
+ case DB_LOCK_DEADLOCK:
+ default:
+ /* Retry the operation. */
+ if ((t_ret = tid-&gt;abort(tid)) != 0) {
+ dbenv-&gt;err(dbenv, t_ret, "DB_TXN-&gt;abort");
+ exit (1);
+ }
+ if (++fail == MAXIMUM_RETRY)
+ return (ret);
+ break;
+ }
+ }
+}</b></pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/atomicity.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/read.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/intro.html b/libdb/docs/ref/transapp/intro.html
new file mode 100644
index 0000000..2faa48e
--- /dev/null
+++ b/libdb/docs/ref/transapp/intro.html
@@ -0,0 +1,42 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Berkeley DB Transactional Data Store applications</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/cam/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/why.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Berkeley DB Transactional Data Store applications</h1>
+<p>It is difficult to write a useful transactional tutorial and still keep
+within reasonable bounds of documentation; that is, without writing a
+book on transactional programming. We have two goals in this section:
+to familiarize readers with the transactional interfaces of Berkeley DB and
+to provide code building blocks that will be useful for creating
+applications.
+<p>We have not attempted to present this information using a real-world
+application. First, transactional applications are often complex and
+time-consuming to explain. Also, one of our goals is to give you an
+understanding of the wide variety of tools Berkeley DB makes available to you,
+and no single application would use most of the interfaces included in
+the Berkeley DB library. For these reasons, we have chosen to simply present
+the Berkeley DB data structures and programming solutions, using examples that
+differ from page to page. All the examples are included in a standalone
+program you can examine, modify, and run; and from which you will be able
+to extract code blocks for your own applications. Fragments of the
+program will be presented throughout this chapter, and the complete text
+of the <a href="transapp.cs">example program</a> for IEEE/ANSI Std 1003.1 (POSIX)
+standard systems is included in the Berkeley DB distribution.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/cam/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/why.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/logfile.html b/libdb/docs/ref/transapp/logfile.html
new file mode 100644
index 0000000..9ae85f9
--- /dev/null
+++ b/libdb/docs/ref/transapp/logfile.html
@@ -0,0 +1,100 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Log file removal</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/archival.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/recovery.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Log file removal</h1>
+<p>The fourth component of the infrastructure, log file removal, concerns
+the ongoing disk consumption of the database log files. Depending on
+the rate at which the application writes to the databases and the
+available disk space, the number of log files may increase quickly
+enough so that disk space will be a resource problem. For this reason,
+you will periodically want to remove log files in order to conserve disk
+space. This procedure is distinct from database and log file archival
+for catastrophic recovery, and you cannot remove the current log files
+simply because you have created a database snapshot or copied log files
+to archival media.
+<p>Log files may be removed at any time, as long as:
+<p><ul type=disc>
+<li>the log file is not involved in an active transaction.
+<li>a checkpoint has been written subsequent to the log file's
+creation.
+<li>the log file is not the only log file in the environment.
+</ul>
+<p>Obviously, if you are preparing for catastrophic failure, you will want
+to copy the log files to archival media before you remove them.
+<p>To remove log files, take the following steps:
+<p><ol>
+<p><li>If you are concerned with catastrophic failure, first copy the log files
+to backup media as described in
+<a href="archival.html">Archival for catastrophic recovery</a>.
+<p><li>Run <a href="../../utility/db_archive.html">db_archive</a> without options to identify all the log files
+that are no longer in use (for example, are no longer involved in an
+active transaction.)
+<p><li>Remove those log files from the system.
+</ol>
+<p>The functionality provided by the <a href="../../utility/db_archive.html">db_archive</a> utility is also
+available directly from the Berkeley DB library. The following code fragment
+removes log files no longer needed by the database environment:
+<p><blockquote><pre>int
+main(int argc, char *argv)
+{
+<b> /* Start a logfile removal thread. */
+ if ((ret = pthread_create(
+ &ptid, NULL, logfile_thread, (void *)dbenv)) != 0) {
+ fprintf(stderr,
+ "txnapp: failed spawning log file removal thread: %s\n",
+ strerror(ret));
+ exit (1);
+ }</b>
+}
+<p>
+<b>void *
+logfile_thread(void *arg)
+{
+ DB_ENV *dbenv;
+ int ret;
+ char **begin, **list;
+<p>
+ dbenv = arg;
+ dbenv-&gt;errx(dbenv,
+ "Log file removal thread: %lu", (u_long)pthread_self());
+<p>
+ /* Check once every 5 minutes. */
+ for (;; sleep(300)) {
+ /* Get the list of log files. */
+ if ((ret = dbenv-&gt;log_archive(dbenv, &list, DB_ARCH_ABS)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "DB_ENV-&gt;log_archive");
+ exit (1);
+ }
+<p>
+ /* Remove the log files. */
+ if (list != NULL) {
+ for (begin = list; *list != NULL; ++list)
+ if ((ret = remove(*list)) != 0) {
+ dbenv-&gt;err(dbenv,
+ ret, "remove %s", *list);
+ exit (1);
+ }
+ free (begin);
+ }
+ }
+ /* NOTREACHED */
+}</b></pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/archival.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/recovery.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/nested.html b/libdb/docs/ref/transapp/nested.html
new file mode 100644
index 0000000..4d5e3ad
--- /dev/null
+++ b/libdb/docs/ref/transapp/nested.html
@@ -0,0 +1,66 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Nested transactions</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/cursor.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/admin.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Nested transactions</h1>
+<p>Berkeley DB provides support for nested transactions. Nested transactions
+allow an application to decompose a large or long-running transaction
+into smaller units that may be independently aborted.
+<p>Normally, when beginning a transaction, the application will pass a NULL
+value for the parent argument to <a href="../../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a>. If, however, the
+parent argument is a <a href="../../api_c/txn_class.html">DB_TXN</a> handle, the newly created transaction
+will be treated as a nested transaction within the parent. Transactions
+may nest arbitrarily deeply. For the purposes of this discussion,
+transactions created with a parent identifier will be called
+<i>child transactions</i>.
+<p>Once a transaction becomes a parent, as long as any of its child
+transactions are unresolved (that is, they have neither committed nor
+aborted), the parent may not issue any Berkeley DB calls except to begin more
+child transactions, or to commit or abort. For example, it may not
+issue any access method or cursor calls. After all of a parent's
+children have committed or aborted, the parent may again request
+operations on its own behalf.
+<p>The semantics of nested transactions are as follows. When a child
+transaction is begun, it inherits all the locks of its parent. This
+means that the child will never block waiting on a lock held by its
+parent. Further, locks held by two children of the same parent will
+also conflict. To make this concrete, consider the following set of
+transactions and lock acquisitions.
+<p>Transaction T1 is the parent transaction. It acquires a write lock on
+item A and then begins two child transactions: C1 and C2. C1 also
+wishes to acquire a write lock on A; this succeeds. If C2 attempts to
+acquire a write lock on A, it will block until C1 releases the lock, at
+which point it will succeed. Now, let's say that C1 acquires a write
+lock on B. If C2 now attempts to obtain a lock on B, it will block.
+However, let's now assume that C1 commits. Its locks are
+anti-inherited, which means they are given to T1, so T1 will now hold
+a lock on B. At this point, C2 would be unblocked and would then
+acquire a lock on B.
+<p>Child transactions are entirely subservient to their parent transaction.
+They may abort, undoing their operations regardless of the eventual fate
+of the parent. However, even if a child transaction commits, if its
+parent transaction is eventually aborted, the child's changes are undone
+and the child's transaction is effectively aborted. Any child
+transactions that are not yet resolved when the parent commits or aborts
+are resolved based on the parent's resolution -- committing if the
+parent commits and aborting if the parent aborts. Any child
+transactions that are not yet resolved when the parent prepares are also
+prepared.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/cursor.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/admin.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/put.html b/libdb/docs/ref/transapp/put.html
new file mode 100644
index 0000000..621cc47
--- /dev/null
+++ b/libdb/docs/ref/transapp/put.html
@@ -0,0 +1,205 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Recoverability and deadlock handling</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/data_open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/atomicity.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Recoverability and deadlock handling</h1>
+<p>The first reason listed for using transactions was recoverability. Any
+logical change to a database may require multiple changes to underlying
+data structures. For example, modifying a record in a Btree may require
+leaf and internal pages to split, so a single <a href="../../api_c/db_put.html">DB-&gt;put</a> method
+call can potentially require that multiple physical database pages be
+written. If only some of those pages are written and then the system
+or application fails, the database is left inconsistent and cannot be
+used until it has been recovered; that is, until the partially completed
+changes have been undone.
+<p><i>Write-ahead-logging</i> is the term that describes the underlying
+implementation that Berkeley DB uses to ensure recoverability. What it means
+is that before any change is made to a database, information about the
+change is written to a database log. During recovery, the log is read,
+and databases are checked to ensure that changes described in the log
+for committed transactions appear in the database. Changes that appear
+in the database but are related to aborted or unfinished transactions
+in the log are undone from the database.
+<p>For recoverability after application or system failure, operations that
+modify the database must be protected by transactions. More
+specifically, operations are not recoverable unless a transaction is
+begun and each operation is associated with the transaction via the
+Berkeley DB interfaces, and then the transaction successfully committed. This
+is true even if logging is turned on in the database environment.
+<p>Here is an example function that updates a record in a database in a
+transactionally protected manner. The function takes a key and data
+items as arguments and then attempts to store them into the database.
+<p><blockquote><pre>int
+main(int argc, char *argv)
+{
+ extern char *optarg;
+ extern int optind;
+ DB *db_cats, *db_color, *db_fruit;
+ DB_ENV *dbenv;
+ pthread_t ptid;
+ int ch;
+<p>
+ while ((ch = getopt(argc, argv, "")) != EOF)
+ switch (ch) {
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+<p>
+ env_dir_create();
+ env_open(&dbenv);
+<p>
+ /* Open database: Key is fruit class; Data is specific type. */
+ db_open(dbenv, &db_fruit, "fruit", 0);
+<p>
+ /* Open database: Key is a color; Data is an integer. */
+ db_open(dbenv, &db_color, "color", 0);
+<p>
+ /*
+ * Open database:
+ * Key is a name; Data is: company name, cat breeds.
+ */
+ db_open(dbenv, &db_cats, "cats", 1);
+<p>
+<b> add_fruit(dbenv, db_fruit, "apple", "yellow delicious");</b>
+<p>
+ return (0);
+}
+<p>
+<b>int
+add_fruit(DB_ENV *dbenv, DB *db, char *fruit, char *name)
+{
+ DBT key, data;
+ DB_TXN *tid;
+ int fail, ret, t_ret;
+<p>
+ /* Initialization. */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = fruit;
+ key.size = strlen(fruit);
+ data.data = name;
+ data.size = strlen(name);
+<p>
+ for (fail = 0;;) {
+ /* Begin the transaction. */
+ if ((ret = dbenv-&gt;txn_begin(dbenv, NULL, &tid, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "DB_ENV-&gt;txn_begin");
+ exit (1);
+ }
+<p>
+ /* Store the value. */
+ switch (ret = db-&gt;put(db, tid, &key, &data, 0)) {
+ case 0:
+ /* Success: commit the change. */
+ if ((ret = tid-&gt;commit(tid, 0)) != 0) {
+ dbenv-&gt;err(dbenv, ret, "DB_TXN-&gt;commit");
+ exit (1);
+ }
+ return (0);
+ case DB_LOCK_DEADLOCK:
+ default:
+ /* Retry the operation. */
+ if ((t_ret = tid-&gt;abort(tid)) != 0) {
+ dbenv-&gt;err(dbenv, t_ret, "DB_TXN-&gt;abort");
+ exit (1);
+ }
+ if (++fail == MAXIMUM_RETRY)
+ return (ret);
+ break;
+ }
+ }
+}</b></pre></blockquote>
+<p>Berkeley DB also uses transactions to recover from deadlock. Database
+operations (that is, any call to a function underlying the handles
+returned by <a href="../../api_c/db_open.html">DB-&gt;open</a> and <a href="../../api_c/db_cursor.html">DB-&gt;cursor</a>) are usually
+performed on behalf of a unique locker. Transactions can be used to
+perform multiple calls on behalf of the same locker within a single
+thread of control. For example, consider the case in which a cursor
+scan locates a record and then accesses some other item in the database,
+based on that record. If these operations are done using the handle's
+default locker IDs, they may conflict. If the locks are obtained on
+behalf of a transaction, using the transaction's locker ID instead of
+the handle's locker ID, the operations will not conflict.
+<p>There is a new error return in this function that you may not have seen
+before. In transactional (not Concurrent Data Store) applications
+supporting both readers and writers, or just multiple writers, Berkeley DB
+functions have an additional possible error return:
+<a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a>. This means that two thread of controls
+deadlocked, and the thread receiving the <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> error
+return has been selected to discard its locks in order to resolve the
+problem. When an application receives a <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a>
+return, the correct action is to close any cursors involved in the
+operation and abort any enclosing transaction. In the sample code, any
+time the <a href="../../api_c/db_put.html">DB-&gt;put</a> method returns <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a>,
+<a href="../../api_c/txn_abort.html">DB_TXN-&gt;abort</a> is called (which releases the transaction's Berkeley DB
+resources and undoes any partial changes to the databases), and then
+the transaction is retried from the beginning.
+<p>There is no requirement that the transaction be attempted again, but
+that is a common course of action for applications. Applications may
+want to set an upper bound on the number of times an operation will be
+retried because some operations on some data sets may simply be unable
+to succeed. For example, updating all of the pages on a large Web site
+during prime business hours may simply be impossible because of the high
+access rate to the database.
+<p>The <a href="../../api_c/txn_abort.html">DB_TXN-&gt;abort</a> method is called in error cases other than deadlock.
+Any time an error occurs, such that a transactionally protected set of
+operations cannot complete successfully, the transaction must be
+aborted. While deadlock is by far the most common of these errors,
+there are other possibilities; for example, running out of disk space
+for the filesystem. In Berkeley DB transactional applications, there are
+three classes of error returns: "expected" errors, "unexpected but
+recoverable" errors, and a single "unrecoverable" error. Expected
+errors are errors like <a href="../../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>, which indicates that a
+searched-for key item is not present in the database. Applications may
+want to explicitly test for and handle this error, or, in the case where
+the absence of a key implies the enclosing transaction should fail,
+simply call <a href="../../api_c/txn_abort.html">DB_TXN-&gt;abort</a>. Unexpected but recoverable errors are
+errors like <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a>, which indicates that an operation
+has been selected to resolve a deadlock, or a system error such as EIO,
+which likely indicates that the filesystem has no available disk space.
+Applications must immediately call <a href="../../api_c/txn_abort.html">DB_TXN-&gt;abort</a> when these returns
+occur, as it is not possible to proceed otherwise. The only
+unrecoverable error is <a href="../../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, which indicates that the
+system must stop and recovery must be run.
+<p>It is possible to simplify the above code in the case of a transaction
+comprised entirely of a single database put or delete operation. The
+<a href="../../api_c/db_put.html">DB-&gt;put</a> and <a href="../../api_c/db_del.html">DB-&gt;del</a> method (and other) calls support the
+<a href="../../api_c/env_set_flags.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a> flag that allows applications to implicitly wrap
+the operation in a transaction. For example, with the
+<a href="../../api_c/env_set_flags.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a> flag, the above code could be more simply written
+as:
+<p><blockquote><pre><b> for (fail = 0; fail++ &lt;= MAXIMUM_RETRY && (ret =
+ db-&gt;put(db, NULL, &key, &data, DB_AUTO_COMMIT)) == DB_LOCK_DEADLOCK;)
+ ;
+ return (ret == 0 ? 0 : 1);</b></pre></blockquote>
+<p>Programmers should not attempt to enumerate all possible error returns
+in their software. Instead, they should explicitly handle expected
+returns and default to aborting the transaction for the rest. It is
+entirely the choice of the programmer whether to check for
+<a href="../../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> explicitly or not -- attempting new Berkeley DB
+operations after <a href="../../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> is returned does not worsen the
+situation. Alternatively, using the <a href="../../api_c/env_set_paniccall.html">DB_ENV-&gt;set_paniccall</a> method to
+handle an unrecoverable error and simply doing some number of
+abort-and-retry cycles for any unexpected Berkeley DB or system error in the
+mainline code often results in the simplest and cleanest application
+code.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/data_open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/atomicity.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/read.html b/libdb/docs/ref/transapp/read.html
new file mode 100644
index 0000000..ccd5224
--- /dev/null
+++ b/libdb/docs/ref/transapp/read.html
@@ -0,0 +1,56 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Degrees of isolation</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/inc.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/cursor.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Degrees of isolation</h1>
+<a name="2"><!--meow--></a>
+<p>Transactions can be isolated from each other to different degrees.
+<i>Repeatable reads</i> provide the most isolation, and mean that,
+for the life of the transaction, every time a thread of control reads
+a data item, it will be unchanged from its previous value (assuming, of
+course, the thread of control does not itself modify the item). Berkeley DB
+enforces repeatable reads whenever database reads are wrapped in
+transactions.
+<p>Most applications do not need to enclose reads in transactions, and when
+possible, transactionally protected reads should be avoided as they can
+cause performance problems. For example, a transactionally protected
+cursor sequentially reading each key/data pair in a database, will
+acquire a read lock on most of the pages in the database and so will
+gradually block all write operations on the databases until the
+transaction commits or aborts. Note, however, that if there are update
+transactions present in the application, the read operations must still
+use locking, and must be prepared to repeat any operation (possibly
+closing and reopening a cursor) that fails with a return value of
+<a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a>. Applications that need repeatable reads are
+ones that require the ability to repeatedly access a data item knowing
+that it will not have changed (for example, an operation modifying a
+data item based on its existing value).
+<a name="3"><!--meow--></a>
+<a name="4"><!--meow--></a>
+<p>Berkeley DB optionally supports reading uncommitted data; that is, read
+operations may request data which has been modified but not yet
+committed by another transaction. This is done by first specifying the
+<a href="../../api_c/db_open.html#DB_DIRTY_READ">DB_DIRTY_READ</a> flag when opening the underlying database, and
+then specifying the <a href="../../api_c/db_open.html#DB_DIRTY_READ">DB_DIRTY_READ</a> flag when beginning a
+transaction, opening a cursor, or performing a read operation. The
+advantage of using <a href="../../api_c/db_open.html#DB_DIRTY_READ">DB_DIRTY_READ</a> is that read operations will
+not block when another transaction holds a write lock on the requested
+data; the disadvantage is that read operations may return data that will
+disappear should the transaction holding the write lock abort.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/inc.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/cursor.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/reclimit.html b/libdb/docs/ref/transapp/reclimit.html
new file mode 100644
index 0000000..24e6fd6
--- /dev/null
+++ b/libdb/docs/ref/transapp/reclimit.html
@@ -0,0 +1,148 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Berkeley DB recoverability</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/filesys.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/tune.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Berkeley DB recoverability</h1>
+<p>Berkeley DB recovery is based on write-ahead logging. This means that
+when a change is made to a database page, a description of the change is
+written into a log file. This description in the log file is guaranteed
+to be written to stable storage before the database pages that were
+changed are written to stable storage. This is the fundamental feature
+of the logging system that makes durability and rollback work.
+<p>If the application or system crashes, the log is reviewed during
+recovery. Any database changes described in the log that were part of
+committed transactions and that were never written to the actual
+database itself are written to the database as part of recovery. Any
+database changes described in the log that were never committed and that
+were written to the actual database itself are backed-out of the
+database as part of recovery. This design allows the database to be
+written lazily, and only blocks from the log file have to be forced to
+disk as part of transaction commit.
+<p>There are two interfaces that are a concern when considering Berkeley DB
+recoverability:
+<p><ol>
+<p><li>The interface between Berkeley DB and the operating system/filesystem.
+<li>The interface between the operating system/filesystem and the
+underlying stable storage hardware.
+</ol>
+<p>Berkeley DB uses the operating system interfaces and its underlying filesystem
+when writing its files. This means that Berkeley DB can fail if the underlying
+filesystem fails in some unrecoverable way. Otherwise, the interface
+requirements here are simple: The system call that Berkeley DB uses to flush
+data to disk (normally <b>fsync</b>(2)), must guarantee that all the
+information necessary for a file's recoverability has been written to
+stable storage before it returns to Berkeley DB, and that no possible
+application or system crash can cause that file to be unrecoverable.
+<p>In addition, Berkeley DB implicitly uses the interface between the operating
+system and the underlying hardware. The interface requirements here are
+not as simple.
+<p>First, it is necessary to consider the underlying page size of the Berkeley DB
+databases. The Berkeley DB library performs all database writes using the
+page size specified by the application, and Berkeley DB assumes pages are written
+atomically. This means that if the operating system performs filesystem
+I/O in blocks of different sizes than the database page size, it may
+increase the possibility for database corruption. For example, assume
+that Berkeley DB is writing 32KB pages for a database, and the operating
+system does filesystem I/O in 16KB blocks. If the operating system
+writes the first 16KB of the database page successfully, but crashes
+before being able to write the second 16KB of the database, the database
+has been corrupted and this corruption may or may not be detected during
+recovery. For this reason, it may be important to select database page
+sizes that will be written as single block transfers by the underlying
+operating system. If you do not select a page size that the underlying
+operating system will write as a single block, you may want to configure
+the database to use checksums (see the <a href="../../api_c/db_set_flags.html#DB_CHKSUM_SHA1">DB_CHKSUM_SHA1</a> flag for
+more information). By configuring checksums, you guarantee this kind
+of corruption will be detected at the expense of the CPU required to
+generate the checksums. When such an error is detected, the only
+course of recovery is to perform catastrophic recovery to restore the
+database.
+<p>Second, if you are copying database files (either as part of doing a
+hot backup or creation of a hot failover area), there is an additional
+question related to the page size of the Berkeley DB databases. You must copy
+databases atomically, in units of the database page size. In other
+words, the reads made by the copy program must not be interleaved with
+writes by other threads of control, and the copy program must read the
+databases in chunks that are a multiple of the underlying database page
+size. Generally, this is not a problem, as operating systems already
+make this guarantee and system utilities normally read in power-of-2
+sized chunks, which are larger than the largest possible Berkeley DB database
+page size.
+<p>However, we have seen one problem in this area because some releases of
+Solaris implemented the cp utility using the mmap system call rather
+than the read system call. Because the Solaris' mmap system call did
+not make the same guarantee of read atomicity as the read system call,
+using the cp utility could create corrupted copies of the databases.
+Using the dd utility instead of the cp utility (and specifying an
+appropriate block size), fixed the problem. If you plan to use a system
+utility to copy database files, you may want to use a system call trace
+utility (for example, ktrace or truss) to check for an I/O size smaller
+or not a multiple of the database page size and system calls other than
+read.
+<p>Third, it is necessary to consider the behavior of the system's
+underlying stable storage hardware. For example, consider a SCSI
+controller that has been configured to cache data and return to the
+operating system that the data has been written to stable storage, when,
+in fact, it has only been written into the controller RAM cache. If
+power is lost before the controller is able to flush its cache to disk,
+and the controller cache is not stable (that is, the writes will not be
+flushed to disk when power returns), the writes will be lost. If the
+writes include database blocks, there is no loss because recovery will
+correctly update the database. If the writes include log file blocks,
+it is possible that transactions that were already committed may not
+appear in the recovered database, although the recovered database will
+be coherent after a crash.
+<p>If the underlying hardware can fail in any way so that only part of the
+block was written, the failure conditions are the same as those
+described previously for an operating system failure that writes only
+part of a logical database block. In such cases, configuring the
+database for checksums will ensure the corruption is detected.
+<p>For these reasons, it may be important to select hardware that does not
+do partial writes and does not cache data writes (or does not return
+that the data has been written to stable storage until it has either
+been written to stable storage or the actual writing of all of the data
+is guaranteed, barring catastrophic hardware failure -- that is, your
+disk drive exploding).
+<p>If the disk drive on which you are storing your databases explodes, you
+can perform normal Berkeley DB catastrophic recovery, because it requires only
+a snapshot of your databases plus the log files you have archived since
+those snapshots were taken. In this case, you should lose no database
+changes at all.
+<p>If the disk drive on which you are storing your log files explodes, you
+can also perform catastrophic recovery, but you will lose any database
+changes made as part of transactions committed since your last archival
+of the log files. Alternatively, if your database environment and
+databases are still available after you lose the log file disk, you
+should be able to dump your databases. However, you may see an
+inconsistent snapshot of your data after doing the dump, because
+changes that were part of transactions that were not yet committed
+may appear in the database dump. Depending on the value of the data,
+a reasonable alternative may be to perform both the database dump and
+the catastrophic recovery and then compare the databases created by
+the two methods.
+<p>Regardless, for these reasons, storing your databases and log files on
+different disks should be considered a safety measure as well as a
+performance enhancement.
+<p>Finally, you should be aware that Berkeley DB does not protect against all
+cases of stable storage hardware failure, nor does it protect against
+simple hardware misbehavior (for example, a disk controller writing
+incorrect data to the disk). However, configuring the database for
+checksums will ensure that any such corruption is detected.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/filesys.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/tune.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/recovery.html b/libdb/docs/ref/transapp/recovery.html
new file mode 100644
index 0000000..b2f61d2
--- /dev/null
+++ b/libdb/docs/ref/transapp/recovery.html
@@ -0,0 +1,95 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Recovery procedures</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/logfile.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/hotfail.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Recovery procedures</h1>
+<p>The fifth component of the infrastructure, recovery procedures, concerns
+the recoverability of the database. After any application or system
+failure, there are two possible approaches to database recovery:
+<p><ol>
+<p><li>There is no need for recoverability, and all databases can be re-created
+from scratch. Although these applications may still need transaction
+protection for other reasons, recovery usually consists of removing the
+Berkeley DB environment home directory and all files it contains, and then
+restarting the application.
+<p><li>It is necessary to recover information after system or application
+failure. In this case, recovery processing must be performed on any
+database environments that were active at the time of the failure.
+Recovery processing involves running the <a href="../../utility/db_recover.html">db_recover</a> utility or
+calling the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> method with the <a href="../../api_c/env_open.html#DB_RECOVER">DB_RECOVER</a> or
+<a href="../../api_c/env_open.html#DB_RECOVER_FATAL">DB_RECOVER_FATAL</a> flags.
+<p>During recovery processing, all database changes made by aborted or
+unfinished transactions are undone, and all database changes made by
+committed transactions are redone, as necessary. Database applications
+must not be restarted until recovery completes. After recovery
+finishes, the environment is properly initialized so that applications
+may be restarted.
+</ol>
+<p>If performing recovery, there are two types of recovery processing:
+<i>normal</i> and <i>catastrophic</i>. Which you choose depends
+on the source for the database and log files you are using to recover.
+<p>If up-to-the-minute database and log files are accessible on a stable
+filesystem, normal recovery is usually sufficient. Run the
+<a href="../../utility/db_recover.html">db_recover</a> utility or call the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> method specifying
+the <a href="../../api_c/env_open.html#DB_RECOVER">DB_RECOVER</a> flag. Note this case never includes recovery
+using archival snapshots of the database environment. For example, you
+cannot archive databases and log files, restore the backup and then run
+normal recovery -- you must always run catastrophic recovery when using
+archived files.
+<p>If the database or log files have been destroyed or corrupted, or normal
+recovery fails, catastrophic recovery is required. For example,
+catastrophic failure includes the case where the disk drive on which
+the database or log files are stored has been physically destroyed, or
+when the underlying filesystem is corrupted and the operating system's
+normal filesystem checking procedures cannot bring that filesystem to
+a consistent state. This is often difficult to detect, and a common
+sign of the need for catastrophic recovery is when normal Berkeley DB recovery
+procedures fail, or when checksum errors are displayed during normal
+database procedures. When catastrophic recovery is necessary, take the
+following steps:
+<p><ol>
+<p><li>Restore the most recent snapshots of the database and log files from
+the backup media into the directory where recovery will be performed.
+<p><li>If any log files were archived since the last snapshot was made, they
+should be restored into the directory where recovery will be performed.
+<p>If any log files are available from the database environment that failed
+(for example, the disk holding the database files crashed, but the disk
+holding the log files is fine), those log files should be copied into
+the directory where recovery will be performed.
+<p>Be sure to restore all log files in the order they were written. The
+order is important because it's possible the same log file appears on
+multiple backups, and you want to run recovery using the most recent
+version of each log file.
+<p><li>Run the <a href="../../utility/db_recover.html">db_recover</a> utility, specifying its <b>-c</b> option;
+or call the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> method, specifying the <a href="../../api_c/env_open.html#DB_RECOVER_FATAL">DB_RECOVER_FATAL</a>
+flag. The catastrophic recovery process will review the logs and
+database files to bring the environment databases to a consistent state
+as of the time of the last uncorrupted log file that is found. It is
+important to realize that only transactions committed before that date
+will appear in the databases.
+<p>It is possible to re-create the database in a location different from
+the original by specifying appropriate pathnames to the <b>-h</b>
+option of the <a href="../../utility/db_recover.html">db_recover</a> utility. In order for this to work
+properly, it is important that your application refer to files by names
+relative to the database home directory or the pathname(s) specified in
+calls to <a href="../../api_c/env_set_data_dir.html">DB_ENV-&gt;set_data_dir</a>, instead of using full
+pathnames.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/logfile.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/hotfail.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/term.html b/libdb/docs/ref/transapp/term.html
new file mode 100644
index 0000000..c95f52e
--- /dev/null
+++ b/libdb/docs/ref/transapp/term.html
@@ -0,0 +1,65 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Terminology</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/why.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/app.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Terminology</h1>
+<p>Here are some definitions that will be helpful in understanding
+transactions:
+<p><dl compact>
+<p><dt>Thread of control<dd>Berkeley DB is indifferent to the type or style of threads being used by the
+application; or, for that matter, if threads are being used at all --
+because Berkeley DB supports multiprocess access. In the Berkeley DB documentation,
+any time we refer to a <i>thread of control</i>, it can be read as
+a true thread (one of many in an application's address space) or a
+process.
+<p><dt>Free-threaded<dd>A Berkeley DB handle that can be used by multiple threads simultaneously
+without any application-level synchronization is called
+<i>free-threaded</i>.
+<p><dt>Transaction<dd>A <i>transaction</i> is a one or more operations on one or more
+databases that should be treated as a single unit of work. For example,
+changes to a set of databases, in which either all of the changes must be
+applied to the database(s) or none of them should. Applications specify
+when each transaction starts, what database operations are included in
+it, and when it ends.
+<p><dt>Transaction abort/commit<dd>Every transaction ends by <i>committing</i> or <i>aborting</i>.
+If a transaction commits, Berkeley DB guarantees that any database changes
+included in the transaction will never be lost, even after system or
+application failure. If a transaction aborts, or is uncommitted when
+the system or application fails, then the changes involved will never
+appear in the database.
+<p><dt>System or application failure<dd><i>System or application failure</i> is the phrase we use to
+describe something bad happening near your data. It can be an
+application dumping core, being interrupted by a signal, the disk
+filling up, or the entire system crashing. In any case, for whatever
+reason, the application can no longer make forward progress, and its
+databases are left in an unknown state.
+<p><dt>Recovery<dd><i>Recovery</i> is what makes the database consistent after a system
+or application failure. The recovery process includes review of log
+files and databases to ensure that the changes from each committed
+transaction appear in the database, and that no changes from an
+unfinished (or aborted) transaction do. Whenever system or application
+failure occurs, applications must usually run recovery.
+<p><dt>Deadlock<dd><i>Deadlock</i>, in its simplest form, happens when one thread of
+control owns resource A, but needs resource B; while another thread of
+control owns resource B, but needs resource A. Neither thread of
+control can make progress, and so one has to give up and release all
+its resources, at which time the remaining thread of control can make
+forward progress.
+</dl>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/why.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/app.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/throughput.html b/libdb/docs/ref/transapp/throughput.html
new file mode 100644
index 0000000..163f4db
--- /dev/null
+++ b/libdb/docs/ref/transapp/throughput.html
@@ -0,0 +1,125 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Transaction throughput</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/tune.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Transaction throughput</h1>
+<p>Generally, the speed of a database system is measured by the
+<i>transaction throughput</i>, expressed as a number of
+transactions per second. The two gating factors for Berkeley DB performance
+in a transactional system are usually the underlying database files and
+the log file. Both are factors because they require disk I/O, which is
+slow relative to other system resources such as CPU.
+<p>In the worst-case scenario:
+<p><ul type=disc>
+<li>Database access is truly random and the database is too large for any
+significant percentage of it to fit into the cache, resulting in a
+single I/O per requested key/data pair.
+<li>Both the database and the log are on a single disk.
+</ul>
+<p>This means that for each transaction, Berkeley DB is potentially performing
+several filesystem operations:
+<p><ul type=disc>
+<li>Disk seek to database file
+<li>Database file read
+<li>Disk seek to log file
+<li>Log file write
+<li>Flush log file information to disk
+<li>Disk seek to update log file metadata (for example, inode information)
+<li>Log metadata write
+<li>Flush log file metadata to disk
+</ul>
+<p>There are a number of ways to increase transactional throughput, all of
+which attempt to decrease the number of filesystem operations per
+transaction. First, the Berkeley DB software includes support for
+<i>group commit</i>. Group commit simply means that when the
+information about one transaction is flushed to disk, the information
+for any other waiting transactions will be flushed to disk at the same
+time, potentially amortizing a single log write over a large number of
+transactions. There are additional tuning parameters which may be
+useful to application writers:
+<p><ul type=disc>
+<li>Tune the size of the database cache. If the Berkeley DB key/data pairs used
+during the transaction are found in the database cache, the seek and read
+from the database are no longer necessary, resulting in two fewer
+filesystem operations per transaction. To determine whether your cache
+size is too small, see <a href="../../ref/am_conf/cachesize.html">Selecting
+a cache size</a>.
+<li>Put the database and the log files on different disks. This allows reads
+and writes to the log files and the database files to be performed
+concurrently.
+<li>Set the filesystem configuration so that file access and modification times
+are not updated. Note that although the file access and modification times
+are not used by Berkeley DB, this may affect other programs -- so be careful.
+<li>Upgrade your hardware. When considering the hardware on which to run your
+application, however, it is important to consider the entire system. The
+controller and bus can have as much to do with the disk performance as
+the disk itself. It is also important to remember that throughput is
+rarely the limiting factor, and that disk seek times are normally the true
+performance issue for Berkeley DB.
+<li>Turn on the <a href="../../api_c/env_set_flags.html#DB_TXN_WRITE_NOSYNC">DB_TXN_WRITE_NOSYNC</a> or <a href="../../api_c/env_set_flags.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> flags.
+This changes the Berkeley DB behavior so that the log files are not written
+and/or flushed when transactions are committed. Although this change
+will greatly increase your transaction throughput, it means that
+transactions will exhibit the ACI (atomicity, consistency, and
+isolation) properties, but not D (durability). Database integrity will
+be maintained, but it is possible that some number of the most recently
+committed transactions may be undone during recovery instead of being
+redone.
+</ul>
+<p>If you are bottlenecked on logging, the following test will help you
+confirm that the number of transactions per second that your application
+does is reasonable for the hardware on which you're running. Your test
+program should repeatedly perform the following operations:
+<p><ul type=disc>
+<li>Seek to the beginning of a file
+<li>Write to the file
+<li>Flush the file write to disk
+</ul>
+<p>The number of times that you can perform these three operations per
+second is a rough measure of the minimum number of transactions per
+second of which the hardware is capable. This test simulates the
+operations applied to the log file. (As a simplifying assumption in this
+experiment, we assume that the database files are either on a separate
+disk; or that they fit, with some few exceptions, into the database
+cache.) We do not have to directly simulate updating the log file
+directory information because it will normally be updated and flushed
+to disk as a result of flushing the log file write to disk.
+<p>Running this test program, in which we write 256 bytes for 1000 operations
+on reasonably standard commodity hardware (Pentium II CPU, SCSI disk),
+returned the following results:
+<p><blockquote><pre>% testfile -b256 -o1000
+running: 1000 ops
+Elapsed time: 16.641934 seconds
+1000 ops: 60.09 ops per second</pre></blockquote>
+<p>Note that the number of bytes being written to the log as part of each
+transaction can dramatically affect the transaction throughput. The
+test run used 256, which is a reasonable size log write. Your log
+writes may be different. To determine your average log write size, use
+the <a href="../../utility/db_stat.html">db_stat</a> utility to display your log statistics.
+<p>As a quick sanity check, the average seek time is 9.4 msec for this
+particular disk, and the average latency is 4.17 msec. That results in
+a minimum requirement for a data transfer to the disk of 13.57 msec, or
+a maximum of 74 transfers per second. This is close enough to the
+previous 60 operations per second (which wasn't done on a quiescent
+disk) that the number is believable.
+<p>An implementation of the previous <a href="writetest.cs">example test
+program</a> for IEEE/ANSI Std 1003.1 (POSIX) standard systems is included in the Berkeley DB
+distribution.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/tune.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/transapp.cs b/libdb/docs/ref/transapp/transapp.cs
new file mode 100644
index 0000000..61fa314
--- /dev/null
+++ b/libdb/docs/ref/transapp/transapp.cs
@@ -0,0 +1,489 @@
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#define ENV_DIRECTORY "TXNAPP"
+
+void add_cat(DB_ENV *, DB *, char *, ...);
+void add_color(DB_ENV *, DB *, char *, int);
+void add_fruit(DB_ENV *, DB *, char *, char *);
+void *checkpoint_thread(void *);
+void log_archlist(DB_ENV *);
+void *logfile_thread(void *);
+void db_open(DB_ENV *, DB **, char *, int);
+void env_dir_create(void);
+void env_open(DB_ENV **);
+void usage(void);
+
+int
+main(int argc, char *argv[])
+{
+ extern char *optarg;
+ extern int optind;
+ DB *db_cats, *db_color, *db_fruit;
+ DB_ENV *dbenv;
+ pthread_t ptid;
+ int ch, ret;
+
+ while ((ch = getopt(argc, argv, "")) != EOF)
+ switch (ch) {
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+
+ env_dir_create();
+ env_open(&dbenv);
+
+ /* Start a checkpoint thread. */
+ if ((ret = pthread_create(
+ &ptid, NULL, checkpoint_thread, (void *)dbenv)) != 0) {
+ fprintf(stderr,
+ "txnapp: failed spawning checkpoint thread: %s\n",
+ strerror(ret));
+ exit (1);
+ }
+
+ /* Start a logfile removal thread. */
+ if ((ret = pthread_create(
+ &ptid, NULL, logfile_thread, (void *)dbenv)) != 0) {
+ fprintf(stderr,
+ "txnapp: failed spawning log file removal thread: %s\n",
+ strerror(ret));
+ exit (1);
+ }
+
+ /* Open database: Key is fruit class; Data is specific type. */
+ db_open(dbenv, &db_fruit, "fruit", 0);
+
+ /* Open database: Key is a color; Data is an integer. */
+ db_open(dbenv, &db_color, "color", 0);
+
+ /*
+ * Open database:
+ * Key is a name; Data is: company name, address, cat breeds.
+ */
+ db_open(dbenv, &db_cats, "cats", 1);
+
+ add_fruit(dbenv, db_fruit, "apple", "yellow delicious");
+
+ add_color(dbenv, db_color, "blue", 0);
+ add_color(dbenv, db_color, "blue", 3);
+
+ add_cat(dbenv, db_cats,
+ "Amy Adams",
+ "Sleepycat Software",
+ "394 E. Riding Dr., Carlisle, MA 01741, USA",
+ "abyssinian",
+ "bengal",
+ "chartreaux",
+ NULL);
+
+ return (0);
+}
+
+void
+env_dir_create()
+{
+ struct stat sb;
+
+ /*
+ * If the directory exists, we're done. We do not further check
+ * the type of the file, DB will fail appropriately if it's the
+ * wrong type.
+ */
+ if (stat(ENV_DIRECTORY, &sb) == 0)
+ return;
+
+ /* Create the directory, read/write/access owner only. */
+ if (mkdir(ENV_DIRECTORY, S_IRWXU) != 0) {
+ fprintf(stderr,
+ "txnapp: mkdir: %s: %s\n", ENV_DIRECTORY, strerror(errno));
+ exit (1);
+ }
+}
+
+void
+env_open(DB_ENV **dbenvp)
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ /* Create the environment handle. */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "txnapp: db_env_create: %s\n", db_strerror(ret));
+ exit (1);
+ }
+
+ /* Set up error handling. */
+ dbenv->set_errpfx(dbenv, "txnapp");
+
+ /* Do deadlock detection internally. */
+ if ((ret = dbenv->set_lk_detect(dbenv, DB_LOCK_DEFAULT)) != 0) {
+ dbenv->err(dbenv, ret, "set_lk_detect: DB_LOCK_DEFAULT");
+ exit (1);
+ }
+
+ /*
+ * Open a transactional environment:
+ * create if it doesn't exist
+ * free-threaded handle
+ * run recovery
+ * read/write owner only
+ */
+ if ((ret = dbenv->open(dbenv, ENV_DIRECTORY,
+ DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_MPOOL | DB_INIT_TXN | DB_RECOVER | DB_THREAD,
+ S_IRUSR | S_IWUSR)) != 0) {
+ dbenv->err(dbenv, ret, "dbenv->open: %s", ENV_DIRECTORY);
+ exit (1);
+ }
+
+ *dbenvp = dbenv;
+}
+
+void *
+checkpoint_thread(void *arg)
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = arg;
+ dbenv->errx(dbenv, "Checkpoint thread: %lu", (u_long)pthread_self());
+
+ /* Checkpoint once a minute. */
+ for (;; sleep(60))
+ if ((ret = dbenv->txn_checkpoint(dbenv, 0, 0, 0)) != 0) {
+ dbenv->err(dbenv, ret, "checkpoint thread");
+ exit (1);
+ }
+
+ /* NOTREACHED */
+}
+
+void *
+logfile_thread(void *arg)
+{
+ DB_ENV *dbenv;
+ int ret;
+ char **begin, **list;
+
+ dbenv = arg;
+ dbenv->errx(dbenv,
+ "Log file removal thread: %lu", (u_long)pthread_self());
+
+ /* Check once every 5 minutes. */
+ for (;; sleep(300)) {
+ /* Get the list of log files. */
+ if ((ret =
+ dbenv->log_archive(dbenv, &list, DB_ARCH_ABS)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->log_archive");
+ exit (1);
+ }
+
+ /* Remove the log files. */
+ if (list != NULL) {
+ for (begin = list; *list != NULL; ++list)
+ if ((ret = remove(*list)) != 0) {
+ dbenv->err(dbenv,
+ ret, "remove %s", *list);
+ exit (1);
+ }
+ free (begin);
+ }
+ }
+ /* NOTREACHED */
+}
+
+void
+log_archlist(DB_ENV *dbenv)
+{
+ int ret;
+ char **begin, **list;
+
+ /* Get the list of database files. */
+ if ((ret = dbenv->log_archive(dbenv,
+ &list, DB_ARCH_ABS | DB_ARCH_DATA)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->log_archive: DB_ARCH_DATA");
+ exit (1);
+ }
+ if (list != NULL) {
+ for (begin = list; *list != NULL; ++list)
+ printf("database file: %s\n", *list);
+ free (begin);
+ }
+
+ /* Get the list of log files. */
+ if ((ret = dbenv->log_archive(dbenv,
+ &list, DB_ARCH_ABS | DB_ARCH_LOG)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->log_archive: DB_ARCH_LOG");
+ exit (1);
+ }
+ if (list != NULL) {
+ for (begin = list; *list != NULL; ++list)
+ printf("log file: %s\n", *list);
+ free (begin);
+ }
+}
+
+void
+db_open(DB_ENV *dbenv, DB **dbp, char *name, int dups)
+{
+ DB *db;
+ int ret;
+
+ /* Create the database handle. */
+ if ((ret = db_create(&db, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ exit (1);
+ }
+
+ /* Optionally, turn on duplicate data items. */
+ if (dups && (ret = db->set_flags(db, DB_DUP)) != 0) {
+ dbenv->err(dbenv, ret, "db->set_flags: DB_DUP");
+ exit (1);
+ }
+
+ /*
+ * Open a database in the environment:
+ * create if it doesn't exist
+ * free-threaded handle
+ * read/write owner only
+ */
+ if ((ret = db->open(db, NULL, name, NULL,
+ DB_BTREE, DB_CREATE | DB_THREAD, S_IRUSR | S_IWUSR)) != 0) {
+ dbenv->err(dbenv, ret, "db->open: %s", name);
+ exit (1);
+ }
+
+ *dbp = db;
+}
+
+void
+add_fruit(DB_ENV *dbenv, DB *db, char *fruit, char *name)
+{
+ DBT key, data;
+ DB_TXN *tid;
+ int ret;
+
+ /* Initialization. */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = fruit;
+ key.size = strlen(fruit);
+ data.data = name;
+ data.size = strlen(name);
+
+ for (;;) {
+ /* Begin the transaction. */
+ if ((ret = dbenv->txn_begin(dbenv, NULL, &tid, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->txn_begin");
+ exit (1);
+ }
+
+ /* Store the value. */
+ switch (ret = db->put(db, tid, &key, &data, 0)) {
+ case 0:
+ /* Success: commit the change. */
+ if ((ret = tid->commit(tid, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_TXN->commit");
+ exit (1);
+ }
+ return;
+ case DB_LOCK_DEADLOCK:
+ /* Deadlock: retry the operation. */
+ if ((ret = tid->abort(tid)) != 0) {
+ dbenv->err(dbenv, ret, "DB_TXN->abort");
+ exit (1);
+ }
+ break;
+ default:
+ /* Error: run recovery. */
+ dbenv->err(dbenv, ret, "dbc->put: %s/%s", fruit, name);
+ exit (1);
+ }
+ }
+}
+
+void
+add_color(DB_ENV *dbenv, DB *dbp, char *color, int increment)
+{
+ DBT key, data;
+ DB_TXN *tid;
+ int original, ret;
+ char buf[64];
+
+ /* Initialization. */
+ memset(&key, 0, sizeof(key));
+ key.data = color;
+ key.size = strlen(color);
+ memset(&data, 0, sizeof(data));
+ data.flags = DB_DBT_MALLOC;
+
+ for (;;) {
+ /* Begin the transaction. */
+ if ((ret = dbenv->txn_begin(dbenv, NULL, &tid, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->txn_begin");
+ exit (1);
+ }
+
+ /*
+ * Get the key. If it exists, we increment the value. If it
+ * doesn't exist, we create it.
+ */
+ switch (ret = dbp->get(dbp, tid, &key, &data, 0)) {
+ case 0:
+ original = atoi(data.data);
+ break;
+ case DB_LOCK_DEADLOCK:
+ /* Deadlock: retry the operation. */
+ if ((ret = tid->abort(tid)) != 0) {
+ dbenv->err(dbenv, ret, "DB_TXN->abort");
+ exit (1);
+ }
+ continue;
+ case DB_NOTFOUND:
+ original = 0;
+ break;
+ default:
+ /* Error: run recovery. */
+ dbenv->err(
+ dbenv, ret, "dbc->get: %s/%d", color, increment);
+ exit (1);
+ }
+ if (data.data != NULL)
+ free(data.data);
+
+ /* Create the new data item. */
+ (void)snprintf(buf, sizeof(buf), "%d", original + increment);
+ data.data = buf;
+ data.size = strlen(buf) + 1;
+
+ /* Store the new value. */
+ switch (ret = dbp->put(dbp, tid, &key, &data, 0)) {
+ case 0:
+ /* Success: commit the change. */
+ if ((ret = tid->commit(tid, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_TXN->commit");
+ exit (1);
+ }
+ return;
+ case DB_LOCK_DEADLOCK:
+ /* Deadlock: retry the operation. */
+ if ((ret = tid->abort(tid)) != 0) {
+ dbenv->err(dbenv, ret, "DB_TXN->abort");
+ exit (1);
+ }
+ break;
+ default:
+ /* Error: run recovery. */
+ dbenv->err(
+ dbenv, ret, "dbc->put: %s/%d", color, increment);
+ exit (1);
+ }
+ }
+}
+
+void
+add_cat(DB_ENV *dbenv, DB *db, char *name, ...)
+{
+ va_list ap;
+ DBC *dbc;
+ DBT key, data;
+ DB_TXN *tid;
+ int ret;
+ char *s;
+
+ /* Initialization. */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = name;
+ key.size = strlen(name);
+
+retry: /* Begin the transaction. */
+ if ((ret = dbenv->txn_begin(dbenv, NULL, &tid, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->txn_begin");
+ exit (1);
+ }
+
+ /* Delete any previously existing item. */
+ switch (ret = db->del(db, tid, &key, 0)) {
+ case 0:
+ case DB_NOTFOUND:
+ break;
+ case DB_LOCK_DEADLOCK:
+ /* Deadlock: retry the operation. */
+ if ((ret = tid->abort(tid)) != 0) {
+ dbenv->err(dbenv, ret, "DB_TXN->abort");
+ exit (1);
+ }
+ goto retry;
+ default:
+ dbenv->err(dbenv, ret, "db->del: %s", name);
+ exit (1);
+ }
+
+ /* Create a cursor. */
+ if ((ret = db->cursor(db, tid, &dbc, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db->cursor");
+ exit (1);
+ }
+
+ /* Append the items, in order. */
+ va_start(ap, name);
+ while ((s = va_arg(ap, char *)) != NULL) {
+ data.data = s;
+ data.size = strlen(s);
+ switch (ret = dbc->c_put(dbc, &key, &data, DB_KEYLAST)) {
+ case 0:
+ break;
+ case DB_LOCK_DEADLOCK:
+ va_end(ap);
+
+ /* Deadlock: retry the operation. */
+ if ((ret = dbc->c_close(dbc)) != 0) {
+ dbenv->err(
+ dbenv, ret, "dbc->c_close");
+ exit (1);
+ }
+ if ((ret = tid->abort(tid)) != 0) {
+ dbenv->err(dbenv, ret, "DB_TXN->abort");
+ exit (1);
+ }
+ goto retry;
+ default:
+ /* Error: run recovery. */
+ dbenv->err(dbenv, ret, "dbc->put: %s/%s", name, s);
+ exit (1);
+ }
+ }
+ va_end(ap);
+
+ /* Success: commit the change. */
+ if ((ret = dbc->c_close(dbc)) != 0) {
+ dbenv->err(dbenv, ret, "dbc->c_close");
+ exit (1);
+ }
+ if ((ret = tid->commit(tid, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_TXN->commit");
+ exit (1);
+ }
+}
+
+void
+usage()
+{
+ (void)fprintf(stderr, "usage: txnapp\n");
+ exit(1);
+}
diff --git a/libdb/docs/ref/transapp/tune.html b/libdb/docs/ref/transapp/tune.html
new file mode 100644
index 0000000..6114df6
--- /dev/null
+++ b/libdb/docs/ref/transapp/tune.html
@@ -0,0 +1,110 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Transaction tuning</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/reclimit.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/throughput.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Transaction tuning</h1>
+<p>There are a few different issues to consider when tuning the performance
+of Berkeley DB transactional applications. First, you should review
+<a href="../../ref/am_misc/tune.html">Access method tuning</a>, as the
+tuning issues for access method applications are applicable to
+transactional applications as well. The following are additional tuning
+issues for Berkeley DB transactional applications:
+<p><dl compact>
+<p><dt>access method<dd>Highly concurrent applications should use the Queue access method, where
+possible, as it provides finer-granularity of locking than the other
+access methods. Otherwise, applications usually see better concurrency
+when using the Btree access method than when using either the Hash or
+Recno access methods.
+<p><dt>record numbers<dd>Using record numbers outside of the Queue access method will often slow
+down concurrent applications as they limit the degree of concurrency
+available in the database.
+Using the Recno access method, or the Btree access
+method with retrieval by record number configured can slow applications
+down.
+<p><dt>Btree database size<dd>When using the Btree access method, applications supporting concurrent
+access may see excessive numbers of deadlocks in small databases. There
+are two different approaches to resolving this problem. First, as the
+Btree access method uses page-level locking, decreasing the database
+page size can result in fewer lock conflicts. Second, in the case of
+databases that are cyclically growing and shrinking, turning off reverse
+splits can leave the database with enough pages that there will be fewer
+lock conflicts.
+<p><dt>transactionally protected read operations<dd>Most applications do not need repeatable reads. Performing all read
+operations outside of transactions can often significantly increase
+application throughput. In addition, limiting the lifetime of
+non-transactional cursors will reduce the length of times locks are
+held, thereby improving concurrency.
+<p><dt><a href="../../api_c/env_set_flags.html#DB_DIRECT_DB">DB_DIRECT_DB</a>, <a href="../../api_c/env_set_flags.html#DB_DIRECT_LOG">DB_DIRECT_LOG</a><dd>Consider using the <a href="../../api_c/env_set_flags.html#DB_DIRECT_DB">DB_DIRECT_DB</a> and <a href="../../api_c/env_set_flags.html#DB_DIRECT_LOG">DB_DIRECT_LOG</a> flags.
+On some systems, avoiding caching in the operating system can improve
+write throughput and allow the creation of larger Berkeley DB caches.
+<p><dt><a href="../../api_c/db_open.html#DB_DIRTY_READ">DB_DIRTY_READ</a><dd>Consider using the <a href="../../api_c/db_open.html#DB_DIRTY_READ">DB_DIRTY_READ</a> flag for transactions, cursors
+or individual read operations. This flag allows read operations to
+potentially return data which has been modified but not yet committed,
+and can significantly increase application throughput in applications
+that do not require data be guaranteed to be permanent in the database.
+<p><dt><a href="../../api_c/dbc_get.html#DB_RMW">DB_RMW</a><dd>Consider using the <a href="../../api_c/dbc_get.html#DB_RMW">DB_RMW</a> flag to immediate acquire write locks
+when reading data items that will subsequently be modified. Although
+this flag may increase contention (because write locks are held longer
+than they would otherwise be), it may decrease the number of deadlocks
+that occur.
+<p><dt><a href="../../api_c/env_set_flags.html#DB_TXN_WRITE_NOSYNC">DB_TXN_WRITE_NOSYNC</a>, <a href="../../api_c/env_set_flags.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a><dd>By default, transactional commit in Berkeley DB implies durability, that is,
+all committed operations will be present in the database after recovery
+from any application or system failure. For applications not requiring
+that level of certainty, specifying the <a href="../../api_c/env_set_flags.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> flag will
+often provide a significant performance improvement. In this case, the
+database will still be fully recoverable, but some number of committed
+transactions might be lost after application or system failure.
+<p><dt>access databases in order<dd>When modifying multiple databases in a single transaction, always access
+physical files and databases within physical files, in the same order
+where possible. In addition, avoid returning to a physical file or
+database, that is, avoid accessing a database, moving on to another
+database and then returning to the first database. This can
+significantly reduce the chance of deadlock between threads of
+control.
+<p><dt>large key/data items<dd>Transactional protections in Berkeley DB are guaranteed by before and after
+physical image logging. This means applications modifying large
+key/data items also write large log records, and, in the case of the
+default transaction commit, threads of control must wait until those
+log records have been flushed to disk. Applications supporting
+concurrent access should try and keep key/data items small wherever
+possible.
+<p><dt><a href="../../ref/build_unix/conf.html#--enable-posixmutexes">--enable-posixmutexes</a><dd>By default, the Berkeley DB library will only select the POSIX pthread mutex
+implementation if it supports mutexes shared between multiple processes.
+If your application does not share its database environment between
+processes and your system's POSIX mutex support was not selected because
+it did not support inter-process mutexes, you may be able to increase
+performance and transactional throughput by configuring with the
+<a href="../../ref/build_unix/conf.html#--enable-posixmutexes">--enable-posixmutexes</a> argument.
+<p><dt>log buffer size<dd>Berkeley DB internally maintains a buffer of log writes. The buffer is
+written to disk at transaction commit, by default, or, whenever it
+is filled. If it is consistently being filled before transaction
+commit, it will be written multiple times per transaction, costing
+application performance. In these cases, increasing the size of the
+log buffer can increase application throughput.
+<p><dt>trickle write<dd>In some applications, the cache is sufficiently active and dirty that
+readers frequently need to write a dirty page in order to have space in
+which to read a new page from the backing database file. You can use
+the <a href="../../utility/db_stat.html">db_stat</a> utility (or the statistics returned by the
+<a href="../../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a> method) to see how often this is happening in your
+application's cache. In this case, using a separate thread of control
+and the <a href="../../api_c/memp_trickle.html">DB_ENV-&gt;memp_trickle</a> interface to trickle-write pages can often
+increase the overall throughput of the application.
+</dl>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/reclimit.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/throughput.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/why.html b/libdb/docs/ref/transapp/why.html
new file mode 100644
index 0000000..f31d79b
--- /dev/null
+++ b/libdb/docs/ref/transapp/why.html
@@ -0,0 +1,40 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Why transactions?</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/term.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Why transactions?</h1>
+<p>Perhaps the first question to answer is "Why transactions?" There are
+a number of reasons to include transactional support in your applications.
+The most common ones are the following:
+<p><dl compact>
+<p><dt>Recoverability<dd>Applications often need to ensure that no matter how the system or
+application fails, previously saved data is available the next time the
+application runs.
+<p><dt>Atomicity<dd>Applications may need to make multiple changes to one or more databases,
+but ensure that either all of the changes happen, or none of them
+happens. Transactions guarantee that a group of changes are atomic;
+that is, if the application or system fails, either all of the changes
+to the databases will appear when the application next runs, or none of
+them.
+<p><dt>Isolation<dd>Applications may need to make changes in isolation, that is, ensure that
+only a single thread of control is modifying a key/data pair at a time.
+Transactions ensure each thread of control sees all records as if all
+other transactions either completed before or after its transaction.
+</dl>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/term.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/transapp/writetest.cs b/libdb/docs/ref/transapp/writetest.cs
new file mode 100644
index 0000000..45cd0d9
--- /dev/null
+++ b/libdb/docs/ref/transapp/writetest.cs
@@ -0,0 +1,104 @@
+/*
+ * writetest --
+ *
+ * $Id$
+ */
+#include <sys/types.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+void usage __P((void));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ struct timeval start_time, end_time;
+ long usecs;
+ int bytes, ch, cnt, fd, ops;
+ char *fname, buf[100 * 1024];
+
+ bytes = 256;
+ fname = "testfile";
+ ops = 1000;
+ while ((ch = getopt(argc, argv, "b:f:o:")) != EOF)
+ switch (ch) {
+ case 'b':
+ if ((bytes = atoi(optarg)) > sizeof(buf)) {
+ fprintf(stderr,
+ "max -b option %d\n", sizeof(buf));
+ exit (1);
+ }
+ break;
+ case 'f':
+ fname = optarg;
+ break;
+ case 'o':
+ if ((ops = atoi(optarg)) <= 0) {
+ fprintf(stderr, "illegal -o option value\n");
+ exit (1);
+ }
+ break;
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+
+ (void)unlink(fname);
+ if ((fd = open(fname, O_RDWR | O_CREAT, 0666)) == -1) {
+ perror(fname);
+ exit (1);
+ }
+
+ memset(buf, 0, bytes);
+
+ printf("running: %d ops\n", ops);
+
+ (void)gettimeofday(&start_time, NULL);
+ for (cnt = 0; cnt < ops; ++cnt) {
+ if (write(fd, buf, bytes) != bytes) {
+ fprintf(stderr, "write: %s\n", strerror(errno));
+ exit (1);
+ }
+ if (lseek(fd, (off_t)0, SEEK_SET) == -1) {
+ fprintf(stderr, "lseek: %s\n", strerror(errno));
+ exit (1);
+ }
+ if (fsync(fd) != 0) {
+ fprintf(stderr, "fsync: %s\n", strerror(errno));
+ exit (1);
+ }
+ }
+ (void)gettimeofday(&end_time, NULL);
+
+ if (end_time.tv_sec != start_time.tv_sec) {
+ end_time.tv_usec += 1000000;
+ --end_time.tv_sec;
+ }
+ usecs = (end_time.tv_sec - start_time.tv_sec) * 1000000 +
+ end_time.tv_usec - start_time.tv_usec;
+ printf("Elapsed time: %ld.%06ld seconds\n",
+ usecs / 1000000, usecs % 1000000);
+ printf("%d ops: %7.2f ops per second\n",
+ ops, (float)1000000 * ops/usecs);
+
+ (void)unlink(fname);
+ exit (0);
+}
+
+void
+usage()
+{
+ (void)fprintf(stderr,
+ "usage: testfile [-b bytes] [-f file] [-o ops]\n");
+ exit(1);
+}
diff --git a/libdb/docs/ref/txn/config.html b/libdb/docs/ref/txn/config.html
new file mode 100644
index 0000000..e01f301
--- /dev/null
+++ b/libdb/docs/ref/txn/config.html
@@ -0,0 +1,41 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Configuring transactions</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/txn/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/txn/limits.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Configuring transactions</h1>
+<p>The application may change the number of simultaneous outstanding
+transactions supported by the Berkeley DB environment by calling the
+<a href="../../api_c/env_set_tx_max.html">DB_ENV-&gt;set_tx_max</a> method. This will also set the size of the
+underlying transaction subsystem's region. When the number of
+outstanding transactions is reached, additional calls to
+<a href="../../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a> will fail until some active transactions complete.
+<p>There is an additional parameter used in configuring transactions; the
+<a href="../../api_c/env_set_flags.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> flag. Setting the <a href="../../api_c/env_set_flags.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> flag to
+<a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> when opening a transaction region changes the
+behavior of transactions to not write or synchronously flush the log
+during transaction commit.
+<p>This change may significantly increase application transactional
+throughput. However, it means that although transactions will continue
+to exhibit the ACI (atomicity, consistency, and isolation) properties,
+they will not have D (durability). Database integrity will be
+maintained, but it is possible that some number of the most recently
+committed transactions may be undone during recovery instead of being
+redone.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/txn/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/txn/limits.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/txn/intro.html b/libdb/docs/ref/txn/intro.html
new file mode 100644
index 0000000..7c59a6b
--- /dev/null
+++ b/libdb/docs/ref/txn/intro.html
@@ -0,0 +1,96 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Berkeley DB and transactions</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/mp/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/txn/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Berkeley DB and transactions</h1>
+<p>The Transaction subsystem makes operations atomic, consistent, isolated,
+and durable in the face of system and application failures. The subsystem
+requires that the data be properly logged and locked in order to attain
+these properties. Berkeley DB contains all the components necessary to
+transaction-protect the Berkeley DB access methods, and other forms of data may
+be protected if they are logged and locked appropriately.
+<p>The Transaction subsystem is created, initialized, and opened by calls to
+<a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> with the <a href="../../api_c/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a> flag specified. Note
+that enabling transactions automatically enables logging, but does not
+enable locking because a single thread of control that needed atomicity
+and recoverability would not require it.
+<p>The <a href="../../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a> function starts a transaction, returning an opaque
+handle to a transaction. If the parent parameter to <a href="../../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a> is
+non-NULL, the new transaction is a child of the designated parent
+transaction.
+<p>The <a href="../../api_c/txn_abort.html">DB_TXN-&gt;abort</a> function ends the designated transaction and causes
+all updates performed by the transaction to be undone. The end result is
+that the database is left in a state identical to the state that existed
+prior to the <a href="../../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a>. If the aborting transaction has any child
+transactions associated with it (even ones that have already been
+committed), they are also aborted. Any transactions that are unresolved
+(neither committed nor aborted) when the application or system fails
+are aborted during recovery.
+<p>The <a href="../../api_c/txn_commit.html">DB_TXN-&gt;commit</a> function ends the designated transaction and makes
+all the updates performed by the transaction permanent, even in the face
+of application or system failure. If this is a parent transaction
+committing, all child transactions that individually committed or
+had not been resolved are also committed.
+<p>Transactions are identified by 32-bit unsigned integers. The ID
+associated with any transaction can be obtained using the <a href="../../api_c/txn_id.html">DB_TXN-&gt;id</a>
+function. If an application is maintaining information outside of Berkeley DB
+that it wishes to transaction-protect, it should use this transaction ID
+as the locking ID.
+<p>The <a href="../../api_c/txn_checkpoint.html">DB_ENV-&gt;txn_checkpoint</a> function causes a transaction checkpoint. A
+checkpoint is performed using to a specific log sequence number (LSN),
+referred to as the checkpoint LSN. When a checkpoint completes
+successfully, it means that all data buffers whose updates are described
+by LSNs less than the checkpoint LSN have been written to disk. This, in
+turn, means that the log records less than the checkpoint LSN are no
+longer necessary for normal recovery (although they would be required for
+catastrophic recovery if the database files were lost), and all log files
+containing only records prior to the checkpoint LSN may be safely archived
+and removed.
+<p>The time required to run normal recovery is proportional to the amount
+of work done between checkpoints. If a large number of modifications
+happen between checkpoints, many updates recorded in the log may
+not have been written to disk when failure occurred, and recovery may
+take longer to run. Generally, if the interval between checkpoints is
+short, data may be being written to disk more frequently, but the
+recovery time will be shorter. Often, the checkpoint interval is tuned
+for each specific application.
+<p>The <a href="../../api_c/txn_stat.html">DB_ENV-&gt;txn_stat</a> function returns information about the status of
+the transaction subsystem. It is the programmatic interface used by the
+<a href="../../utility/db_stat.html">db_stat</a> utility.
+<p>The transaction system is closed by a call to <a href="../../api_c/env_close.html">DB_ENV-&gt;close</a>.
+<p>Finally, the entire transaction system may be removed using the
+<a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a> interface.
+<!--$Id$-->
+<p><table border=1 align=center>
+<tr><th>Transaction Subsystem and Related Methods</th><th>Description</th></tr>
+<tr><td><a href="../../api_c/env_set_tx_max.html">DB_ENV-&gt;set_tx_max</a></td><td>Set maximum number of transactions</td></tr>
+<tr><td><a href="../../api_c/env_set_tx_timestamp.html">DB_ENV-&gt;set_tx_timestamp</a></td><td>Set recovery timestamp</td></tr>
+<tr><td><a href="../../api_c/txn_checkpoint.html">DB_ENV-&gt;txn_checkpoint</a></td><td>Checkpoint the transaction subsystem</td></tr>
+<tr><td><a href="../../api_c/txn_recover.html">DB_ENV-&gt;txn_recover</a></td><td>Distributed transaction recovery</td></tr>
+<tr><td><a href="../../api_c/txn_stat.html">DB_ENV-&gt;txn_stat</a></td><td>Return transaction subsystem statistics</td></tr>
+<tr><td><a href="../../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a></td><td>Begin a transaction</td></tr>
+<tr><td><a href="../../api_c/txn_abort.html">DB_TXN-&gt;abort</a></td><td>Abort a transaction</td></tr>
+<tr><td><a href="../../api_c/txn_commit.html">DB_TXN-&gt;commit</a></td><td>Commit a transaction</td></tr>
+<tr><td><a href="../../api_c/txn_discard.html">DB_TXN-&gt;discard</a></td><td>Discard a prepared but not resolved transaction handle</td></tr>
+<tr><td><a href="../../api_c/txn_id.html">DB_TXN-&gt;id</a></td><td>Return a transaction's ID</td></tr>
+<tr><td><a href="../../api_c/txn_prepare.html">DB_TXN-&gt;prepare</a></td><td>Prepare a transaction for commit</td></tr>
+<tr><td><a href="../../api_c/txn_set_timeout.html">DB_TXN-&gt;set_timeout</a></td><td>Set transaction timeout</td></tr>
+</table>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/mp/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/txn/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/txn/limits.html b/libdb/docs/ref/txn/limits.html
new file mode 100644
index 0000000..bd857da
--- /dev/null
+++ b/libdb/docs/ref/txn/limits.html
@@ -0,0 +1,56 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Transaction limits</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Transaction Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/txn/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rpc/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Transaction limits</h1>
+<h3>Transaction IDs</h3>
+<p>Transactions are identified by 31-bit unsigned integers, which means
+there are just over two billion unique transaction IDs. When a database
+environment is initially created or recovery is run, the transaction ID
+name space is reset, and new transactions are numbered starting from
+0x80000000 (2,147,483,648). The IDs will wrap if the maximum
+transaction ID is reached, starting again from 0x80000000. The most
+recently allocated transaction ID is the <b>st_last_txnid</b> value in
+the transaction statistics information, and can be displayed by the
+<a href="../../utility/db_stat.html">db_stat</a> utility.
+<h3>Cursors</h3>
+<p>When using transactions, cursors are localized to a single transaction.
+That is, a cursor may not span transactions, and must be opened and
+closed within a single transaction. In addition, intermingling
+transaction-protected cursor operations and non-transaction-protected
+cursor operations on the same database in a single thread of control is
+practically guaranteed to deadlock because the locks obtained for
+transactions and non-transactions can conflict.
+<h3>Multiple Threads of Control</h3>
+<p>Because transactions must hold all their locks until commit, a single
+transaction may accumulate a large number of long-term locks during its
+lifetime. As a result, when two concurrently running transactions
+access the same database, there is strong potential for conflict.
+Although Berkeley DB allows an application to have multiple outstanding
+transactions active within a single thread of control, great care must
+be taken to ensure that the transactions do not block each other (for
+example, attempt to obtain conflicting locks on the same data). If two
+concurrently active transactions in the same thread of control do
+encounter a lock conflict, the thread of control will deadlock so that
+the deadlock detector cannot detect the problem. In this case, there
+is no true deadlock, but because the transaction on which a transaction
+is waiting is in the same thread of control, no forward progress can be
+made.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/txn/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rpc/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.2.0/convert.html b/libdb/docs/ref/upgrade.2.0/convert.html
new file mode 100644
index 0000000..f9e67c1
--- /dev/null
+++ b/libdb/docs/ref/upgrade.2.0/convert.html
@@ -0,0 +1,75 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 2.0: converting applications</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.2.0/system.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.2.0/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 2.0: converting applications</h1>
+<p>Mapping the Berkeley DB 1.85 functionality into Berkeley DB version 2 is almost always
+simple. The manual page <a href="../../api_c/db_open.html">DB-&gt;open</a> replaces the Berkeley DB 1.85 manual
+pages <b>dbopen</b>(3), <b>btree</b>(3), <b>hash</b>(3) and
+<b>recno</b>(3). You should be able to convert each 1.85 function
+call into a Berkeley DB version 2 function call using just the <a href="../../api_c/db_open.html">DB-&gt;open</a>
+documentation.
+<p>Some guidelines and things to watch out for:
+<p><ol>
+<p><li>Most access method functions have exactly the same semantics as in Berkeley DB
+1.85, although the arguments to the functions have changed in some cases.
+To get your code to compile, the most common change is to add the
+transaction ID as an argument (NULL, since Berkeley DB 1.85 did not support
+transactions.)
+<p><li>You must always initialize DBT structures to zero before using them with
+any Berkeley DB version 2 function. (They do not normally have to be
+reinitialized each time, only when they are first allocated. Do this by
+declaring the DBT structure external or static, or by calling the C
+library routine <b>bzero</b>(3) or <b>memset</b>(3).)
+<p><li>The error returns are completely different in the two versions. In Berkeley DB
+1.85, &lt; 0 meant an error, and &gt; 0 meant a minor Berkeley DB exception.
+In Berkeley DB 2.0, &gt; 0 means an error (the Berkeley DB version 2 functions
+return <b>errno</b> on error) and &lt; 0 means a Berkeley DB exception.
+See <a href="../../ref/program/errorret.html">Error Returns to Applications</a>
+for more information.
+<p><li>The Berkeley DB 1.85 DB-&gt;seq function has been replaced by cursors in Berkeley DB
+version 2. The semantics are approximately the same, but cursors require
+the creation of an extra object (the DBC object), which is then used to
+access the database.
+<p>Specifically, the partial key match and range search functionality of the
+R_CURSOR flag in DB-&gt;seq has been replaced by the
+<a href="../../api_c/dbc_get.html#DB_SET_RANGE">DB_SET_RANGE</a> flag in <a href="../../api_c/dbc_get.html">DBcursor-&gt;c_get</a>.
+<p><li>In version 2 of the Berkeley DB library, additions or deletions into Recno
+(fixed and variable-length record) databases no longer automatically
+logically renumber all records after the add/delete point, by default.
+The default behavior is that deleting records does not cause subsequent
+records to be renumbered, and it is an error to attempt to add new records
+between records already in the database. Applications wanting the
+historic Recno access method semantics should call the
+<a href="../../api_c/db_set_flags.html">DB-&gt;set_flags</a> method with the <a href="../../api_c/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag.
+<p><li>Opening a database in Berkeley DB version 2 is a much heavier-weight operation
+than it was in Berkeley DB 1.85. Therefore, if your historic applications were
+written to open a database, perform a single operation, and close the
+database, you may observe performance degradation. In most cases, this
+is due to the expense of creating the environment upon each open. While
+we encourage restructuring your application to avoid repeated opens and
+closes, you can probably recover most of the lost performance by simply
+using a persistent environment across invocations.
+</ol>
+<p>While simply converting Berkeley DB 1.85 function calls to Berkeley DB version 2
+function calls will work, we recommend that you eventually reconsider your
+application's interface to the Berkeley DB database library in light of the
+additional functionality supplied by Berkeley DB version 2, as it is likely to
+result in enhanced application performance.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.2.0/system.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.2.0/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.2.0/disk.html b/libdb/docs/ref/upgrade.2.0/disk.html
new file mode 100644
index 0000000..fa163b5
--- /dev/null
+++ b/libdb/docs/ref/upgrade.2.0/disk.html
@@ -0,0 +1,28 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 2.0: upgrade requirements</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.2.0/convert.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 2.0: upgrade requirements</h1>
+<p>You will need to upgrade your on-disk databases, as all access method
+database formats changed in the Berkeley DB 2.0 release. For information on
+converting databases from Berkeley DB 1.85 to Berkeley DB 2.0, see the
+<a href="../../utility/db_dump.html">db_dump185</a> and <a href="../../utility/db_load.html">db_load</a> documentation. As database
+environments did not exist prior to the 2.0 release, there is no
+question of upgrading existing database environments.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.2.0/convert.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.2.0/intro.html b/libdb/docs/ref/upgrade.2.0/intro.html
new file mode 100644
index 0000000..9d304b0
--- /dev/null
+++ b/libdb/docs/ref/upgrade.2.0/intro.html
@@ -0,0 +1,33 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 2.0: introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade/process.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.2.0/system.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 2.0: introduction</h1>
+<p>The following pages describe how to upgrade applications coded against
+the Berkeley DB 1.85 and 1.86 release interfaces to the Berkeley DB 2.0 release
+interfaces. They do not describe how to upgrade to the current Berkeley DB
+release interfaces.
+<p>It is not difficult to upgrade Berkeley DB 1.85 applications to use the Berkeley DB
+version 2 library. The Berkeley DB version 2 library has a Berkeley DB 1.85
+compatibility API, which you can use by either recompiling your
+application's source code or by relinking its object files against the
+version 2 library. The underlying databases must be converted, however,
+as the Berkeley DB version 2 library has a different underlying database format.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade/process.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.2.0/system.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.2.0/system.html b/libdb/docs/ref/upgrade.2.0/system.html
new file mode 100644
index 0000000..3739964
--- /dev/null
+++ b/libdb/docs/ref/upgrade.2.0/system.html
@@ -0,0 +1,85 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 2.0: system integration</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.2.0/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.2.0/convert.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 2.0: system integration</h1>
+<p><ol>
+<p><li>It is possible to maintain both the Berkeley DB 1.85 and Berkeley DB version 2
+libraries on your system. However, the <b>db.h</b> include file that
+was distributed with Berkeley DB 1.85 is not compatible with the <b>db.h</b>
+file distributed with Berkeley DB version 2, so you will have to install them
+in different locations. In addition, both the Berkeley DB 1.85 and Berkeley DB
+version 2 libraries are named <b>libdb.a</b>.
+<p>As the Berkeley DB 1.85 library did not have an installation target in the
+Makefile, there's no way to know exactly where it was installed on the
+system. In addition, many vendors included it in the C library instead
+of as a separate library, and so it may actually be part of libc and the
+<b>db.h</b> include file may be installed in <b>/usr/include</b>.
+<p>For these reasons, the simplest way to maintain both libraries is to
+install Berkeley DB version 2 in a completely separate area of your system.
+The Berkeley DB version 2 installation process allows you to install into a
+standalone directory hierarchy on your system. See the
+<a href="../../ref/build_unix/intro.html">Building for UNIX systems</a>
+documentation for more information and instructions on how to install the
+Berkeley DB version 2 library, include files and documentation into specific
+locations.
+<p><li>Alternatively, you can replace Berkeley DB 1.85 on your system with Berkeley DB
+version 2. In this case, you'll probably want to install Berkeley DB version
+2 in the normal place on your system, wherever that may be, and delete
+the Berkeley DB 1.85 include files, manual pages and libraries.
+<p>To replace 1.85 with version 2, you must either convert your 1.85
+applications to use the version 2 API or build the Berkeley DB version 2 library
+to include Berkeley DB 1.85 interface compatibility code. Whether converting
+your applications to use the version 2 interface or using the version 1.85
+compatibility API, you will need to recompile or relink your 1.85
+applications, and you must convert any persistent application databases
+to the Berkeley DB version 2 database formats.
+<p>If you want to recompile your Berkeley DB 1.85 applications, you will have to
+change them to include the file <b>db_185.h</b> instead of
+<b>db.h</b>. (The <b>db_185.h</b> file is automatically installed
+during the Berkeley DB version 2 installation process.) You can then recompile
+the applications, linking them against the Berkeley DB version 2 library.
+<p>For more information on compiling the Berkeley DB 1.85 compatibility code into
+the Berkeley DB version 2 library, see <a href="../../ref/build_unix/intro.html">Building for UNIX platforms</a>.
+<p>For more information on converting databases from the Berkeley DB 1.85 formats
+to the Berkeley DB version 2 formats, see the <a href="../../utility/db_dump.html">db_dump185</a> and
+<a href="../../utility/db_load.html">db_load</a> documentation.
+<p><li>Finally, although we certainly do not recommend it, it is possible to
+load both Berkeley DB 1.85 and Berkeley DB version 2 into the same library.
+Similarly, it is possible to use both Berkeley DB 1.85 and Berkeley DB version 2
+within a single application, although it is not possible to use them from
+within the same file.
+<p>The name space in Berkeley DB version 2 has been changed from that of previous
+Berkeley DB versions, notably version 1.85, for portability and consistency
+reasons. The only name collisions in the two libraries are the names used
+by the historic <a href="../../api_c/dbm.html">dbm</a>, <a href="../../api_c/dbm.html">ndbm</a> and <a href="../../api_c/hsearch.html">hsearch</a> interfaces,
+and the Berkeley DB 1.85 compatibility interfaces in the Berkeley DB version 2
+library.
+<p>If you are loading both Berkeley DB 1.85 and Berkeley DB version 2 into a single
+library, remove the historic interfaces from one of the two library
+builds, and configure the Berkeley DB version 2 build to not include the Berkeley DB
+1.85 compatibility API, otherwise you could have collisions and undefined
+behavior. This can be done by editing the library Makefiles and
+reconfiguring and rebuilding the Berkeley DB version 2 library. Obviously, if
+you use the historic interfaces, you will get the version in the library
+from which you did not remove them. Similarly, you will not be able to
+access Berkeley DB version 2 files using the Berkeley DB 1.85 compatibility interface,
+since you have removed that from the library as well.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.2.0/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.2.0/convert.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.2.0/toc.html b/libdb/docs/ref/upgrade.2.0/toc.html
new file mode 100644
index 0000000..e917e40
--- /dev/null
+++ b/libdb/docs/ref/upgrade.2.0/toc.html
@@ -0,0 +1,28 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Upgrading Berkeley DB 1.XX applications to Berkeley DB 2.0</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Upgrading Berkeley DB 1.XX applications to Berkeley DB 2.0</h1>
+<p><ol>
+<p><li><a href="intro.html">Release 2.0: introduction</a>
+<li><a href="system.html">Release 2.0: system integration</a>
+<li><a href="convert.html">Release 2.0: converting applications</a>
+<li><a href="disk.html">Release 2.0: upgrade requirements</a>
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/close.html b/libdb/docs/ref/upgrade.3.0/close.html
new file mode 100644
index 0000000..4b2f14b
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/close.html
@@ -0,0 +1,35 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: DB-&gt;sync and DB-&gt;close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/lock_put.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: DB-&gt;sync and DB-&gt;close</h1>
+<p>In previous Berkeley DB releases, the <a href="../../api_c/db_close.html">DB-&gt;close</a> and <a href="../../api_c/db_sync.html">DB-&gt;sync</a> methods
+discarded any return of DB_INCOMPLETE from the underlying buffer
+pool interfaces, and returned success to its caller. (The
+DB_INCOMPLETE error will be returned if the buffer pool functions
+are unable to flush all of the database's dirty blocks from the pool.
+This often happens if another thread is reading or writing the database's
+pages in the pool.)
+<p>In the 3.X release, <a href="../../api_c/db_sync.html">DB-&gt;sync</a> and <a href="../../api_c/db_close.html">DB-&gt;close</a> will return
+DB_INCOMPLETE to the application. The best solution is to not
+call <a href="../../api_c/db_sync.html">DB-&gt;sync</a> and specify the <a href="../../api_c/db_close.html#DB_NOSYNC">DB_NOSYNC</a> flag to the
+<a href="../../api_c/db_close.html">DB-&gt;close</a> method when multiple threads are expected to be accessing the
+database. Alternatively, the caller can ignore any error return of
+DB_INCOMPLETE.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/lock_put.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/cxx.html b/libdb/docs/ref/upgrade.3.0/cxx.html
new file mode 100644
index 0000000..081909f
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/cxx.html
@@ -0,0 +1,32 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: additional C++ changes</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/db_cxx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/java.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: additional C++ changes</h1>
+<p>The Db::set_error_model method is gone. The way to change the C++ API to
+return errors rather than throw exceptions is via a flag on the DbEnv or
+Db constructor. For example:
+<p><blockquote><pre>int dberr;
+DbEnv *dbenv = new DbEnv(DB_CXX_NO_EXCEPTIONS);</pre></blockquote>
+<p>creates an environment that will never throw exceptions, and method
+returns should be checked instead.
+<p>There are a number of smaller changes to the API that bring the C, C++
+and Java APIs much closer in terms of functionality and usage. Please
+refer to the pages for upgrading C applications for further details.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/db_cxx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/java.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/db.html b/libdb/docs/ref/upgrade.3.0/db.html
new file mode 100644
index 0000000..f50514f
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/db.html
@@ -0,0 +1,49 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: the DB structure</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/xa.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/dbinfo.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: the DB structure</h1>
+<p>The <a href="../../api_c/db_class.html">DB</a> structure is now opaque for applications in the Berkeley DB 3.0
+release. Accesses to any fields within that structure by the application
+should be replaced with method calls. The following example illustrates
+this using the historic type structure field. In the Berkeley DB 2.X releases,
+applications could find the type of an underlying database using code
+similar to the following:
+<p><blockquote><pre>DB *db;
+DB_TYPE type;
+<p>
+ type = db-&gt;type;</pre></blockquote>
+<p>in the Berkeley DB 3.X releases, this should be done using the
+<a href="../../api_c/db_get_type.html">DB-&gt;get_type</a> method, as follows:
+<p><blockquote><pre>DB *db;
+DB_TYPE type;
+<p>
+ type = db-&gt;get_type(db);</pre></blockquote>
+<p>The following table lists the <a href="../../api_c/db_class.html">DB</a> fields previously used by
+applications and the methods that should now be used to get or set them.
+<p><table border=1 align=center>
+<tr><th><a href="../../api_c/db_class.html">DB</a> field</th><th>Berkeley DB 3.X method</th></tr>
+<tr><td>byteswapped</td><td><a href="../../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a></td></tr>
+<tr><td>db_errcall</td><td><a href="../../api_c/db_set_errcall.html">DB-&gt;set_errcall</a></td></tr>
+<tr><td>db_errfile</td><td><a href="../../api_c/db_set_errfile.html">DB-&gt;set_errfile</a></td></tr>
+<tr><td>db_errpfx</td><td><a href="../../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a></td></tr>
+<tr><td>db_paniccall</td><td><a href="../../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a></td></tr>
+<tr><td>type</td><td><a href="../../api_c/db_get_type.html">DB-&gt;get_type</a></td></tr>
+</table>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/xa.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/dbinfo.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/db_cxx.html b/libdb/docs/ref/upgrade.3.0/db_cxx.html
new file mode 100644
index 0000000..0295b66
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/db_cxx.html
@@ -0,0 +1,48 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: the Db class for C++ and Java</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/dbenv_cxx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/cxx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: the Db class for C++ and Java</h1>
+<p>The static Db::open method and the DbInfo class have been removed in the
+Berkeley DB 3.0 release. The way to open a database file is to use the new Db
+constructor with two arguments, followed by set_XXX methods to configure
+the Db object, and finally a call to the new (nonstatic) Db::open(). In
+comparing the Berkeley DB 3.0 release open method with the 2.X static open
+method, the second argument is new. It is a database name, which can
+be null. The DbEnv argument has been removed, as the environment is now
+specified in the constructor. The open method no longer returns a Db,
+since it operates on one.
+<p>Here's a C++ example opening a Berkeley DB database using the 2.X interface:
+<p><blockquote><pre>// Note: by default, errors are thrown as exceptions
+Db *table;
+Db::open("lookup.db", DB_BTREE, DB_CREATE, 0644, dbenv, 0, &table);</pre></blockquote>
+<p>In the Berkeley DB 3.0 release, this code would be written as:
+<p><blockquote><pre>// Note: by default, errors are thrown as exceptions
+Db *table = new Db(dbenv, 0);
+table-&gt;open("lookup.db", NULL, DB_BTREE, DB_CREATE, 0644);</pre></blockquote>
+<p>Here's a Java example opening a Berkeley DB database using the 2.X interface:
+<p><blockquote><pre>// Note: errors are thrown as exceptions
+Db table = Db.open("lookup.db", Db.DB_BTREE, Db.DB_CREATE, 0644, dbenv, 0);</pre></blockquote>
+<p>In the Berkeley DB 3.0 release, this code would be written as:
+<p><blockquote><pre>// Note: errors are thrown as exceptions
+Db table = new Db(dbenv, 0);
+table.open("lookup.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);</pre></blockquote>
+<p>Note that if the dbenv argument is null, the database will not exist
+within an environment.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/dbenv_cxx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/cxx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/dbenv.html b/libdb/docs/ref/upgrade.3.0/dbenv.html
new file mode 100644
index 0000000..912753d
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/dbenv.html
@@ -0,0 +1,69 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: the DB_ENV structure</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/func.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: the DB_ENV structure</h1>
+<p>The <a href="../../api_c/env_class.html">DB_ENV</a> structure is now opaque for applications in the Berkeley DB
+3.0 release. Accesses to any fields within that structure by the
+application should be replaced with method calls. The following example
+illustrates this using the historic errpfx structure field. In the Berkeley DB
+2.X releases, applications set error prefixes using code similar to the
+following:
+<p><blockquote><pre>DB_ENV *dbenv;
+<p>
+ dbenv-&gt;errpfx = "my prefix";</pre></blockquote>
+<p>in the Berkeley DB 3.X releases, this should be done using the
+<a href="../../api_c/env_set_errpfx.html">DB_ENV-&gt;set_errpfx</a> method, as follows:
+<p><blockquote><pre>DB_ENV *dbenv;
+<p>
+ dbenv-&gt;set_errpfx(dbenv, "my prefix");</pre></blockquote>
+<p>The following table lists the <a href="../../api_c/env_class.html">DB_ENV</a> fields previously used by
+applications and the methods that should now be used to set them.
+<p><table border=1 align=center>
+<tr><th><a href="../../api_c/env_class.html">DB_ENV</a> field</th><th>Berkeley DB 3.X method</th></tr>
+<tr><td>db_errcall</td><td><a href="../../api_c/env_set_errcall.html">DB_ENV-&gt;set_errcall</a></td></tr>
+<tr><td>db_errfile</td><td><a href="../../api_c/env_set_errfile.html">DB_ENV-&gt;set_errfile</a></td></tr>
+<tr><td>db_errpfx</td><td><a href="../../api_c/env_set_errpfx.html">DB_ENV-&gt;set_errpfx</a></td></tr>
+<tr><td>db_lorder</td><td>This field was removed from the <a href="../../api_c/env_class.html">DB_ENV</a> structure in the Berkeley DB
+3.0 release as no application should have ever used it. Any code using
+it should be evaluated for potential bugs.</td></tr>
+<tr><td>db_paniccall</td><td><a href="../../api_c/env_set_paniccall.html">DB_ENV-&gt;set_paniccall</a></td></tr>
+<tr><td>db_verbose</td><td><a href="../../api_c/env_set_verbose.html">DB_ENV-&gt;set_verbose</a>
+<p>Note: the db_verbose field was a simple boolean toggle, the
+<a href="../../api_c/env_set_verbose.html">DB_ENV-&gt;set_verbose</a> method takes arguments that specify exactly
+which verbose messages are desired.</td></tr>
+<tr><td>lg_max</td><td><a href="../../api_c/env_set_lg_max.html">DB_ENV-&gt;set_lg_max</a></td></tr>
+<tr><td>lk_conflicts</td><td><a href="../../api_c/env_set_lk_conflicts.html">DB_ENV-&gt;set_lk_conflicts</a></td></tr>
+<tr><td>lk_detect</td><td><a href="../../api_c/env_set_lk_detect.html">DB_ENV-&gt;set_lk_detect</a></td></tr>
+<tr><td>lk_max</td><td>dbenv-&gt;set_lk_max</td></tr>
+<tr><td>lk_modes</td><td><a href="../../api_c/env_set_lk_conflicts.html">DB_ENV-&gt;set_lk_conflicts</a></td></tr>
+<tr><td>mp_mmapsize</td><td><a href="../../api_c/env_set_mp_mmapsize.html">DB_ENV-&gt;set_mp_mmapsize</a></td></tr>
+<tr><td>mp_size</td><td><a href="../../api_c/env_set_cachesize.html">DB_ENV-&gt;set_cachesize</a>
+<p>Note: the <a href="../../api_c/env_set_cachesize.html">DB_ENV-&gt;set_cachesize</a> function takes additional arguments.
+Setting both the second argument (the number of GB in the pool) and the
+last argument (the number of memory pools to create) to 0 will result in
+behavior that is backward-compatible with previous Berkeley DB releases.</td></tr>
+<tr><td>tx_info</td><td>This field was used by applications as an argument to the transaction
+subsystem functions. As those functions take references to a
+<a href="../../api_c/env_class.html">DB_ENV</a> structure as arguments in the Berkeley DB 3.0 release, it should
+no longer be used by any application.</td></tr>
+<tr><td>tx_max</td><td><a href="../../api_c/env_set_tx_max.html">DB_ENV-&gt;set_tx_max</a></td></tr>
+<tr><td>tx_recover</td><td>dbenv-&gt;set_tx_recover</td></tr>
+</table>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/func.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/open.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/dbenv_cxx.html b/libdb/docs/ref/upgrade.3.0/dbenv_cxx.html
new file mode 100644
index 0000000..2dcc189
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/dbenv_cxx.html
@@ -0,0 +1,73 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: the DbEnv class for C++ and Java</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/value_set.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/db_cxx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: the DbEnv class for C++ and Java</h1>
+<p>The DbEnv::appinit() method and two constructors for the DbEnv class are
+gone. There is now a single way to create and initialize the environment.
+The way to create an environment is to use the new DbEnv constructor with
+one argument. After this call, the DbEnv can be configured with various
+set_XXX methods. Finally, a call to DbEnv::open is made to initialize
+the environment.
+<p>Here's a C++ example creating a Berkeley DB environment using the 2.X interface
+<p><blockquote><pre>int dberr;
+DbEnv *dbenv = new DbEnv();
+<p>
+dbenv-&gt;set_error_stream(&cerr);
+dbenv-&gt;set_errpfx("myprog");
+<p>
+if ((dberr = dbenv-&gt;appinit("/database/home",
+ NULL, DB_CREATE | DB_INIT_LOCK | DB_INIT_MPOOL)) != 0) {
+ cerr &lt;&lt; "failure: " &lt;&lt; strerror(dberr);
+ exit (1);
+}</pre></blockquote>
+<p>In the Berkeley DB 3.0 release, this code would be written as:
+<p><blockquote><pre>int dberr;
+DbEnv *dbenv = new DbEnv(0);
+<p>
+dbenv-&gt;set_error_stream(&cerr);
+dbenv-&gt;set_errpfx("myprog");
+<p>
+if ((dberr = dbenv-&gt;open("/database/home",
+ NULL, DB_CREATE | DB_INIT_LOCK | DB_INIT_MPOOL, 0)) != 0) {
+ cerr &lt;&lt; "failure: " &lt;&lt; dbenv-&gt;strerror(dberr);
+ exit (1);
+}</pre></blockquote>
+<p>Here's a Java example creating a Berkeley DB environment using the 2.X interface:
+<p><blockquote><pre>int dberr;
+DbEnv dbenv = new DbEnv();
+<p>
+dbenv.set_error_stream(System.err);
+dbenv.set_errpfx("myprog");
+<p>
+dbenv.appinit("/database/home",
+ null, Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL);</pre></blockquote>
+<p>In the Berkeley DB 3.0 release, this code would be written as:
+<p><blockquote><pre>int dberr;
+DbEnv dbenv = new DbEnv(0);
+<p>
+dbenv.set_error_stream(System.err);
+dbenv.set_errpfx("myprog");
+<p>
+dbenv.open("/database/home",
+ null, Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL, 0);</pre></blockquote>
+<p>In the Berkeley DB 2.X release, DbEnv had accessors to obtain "managers" of type
+DbTxnMgr, DbMpool, DbLog, DbTxnMgr. If you used any of these managers,
+all their methods are now found directly in the DbEnv class.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/value_set.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/db_cxx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/dbinfo.html b/libdb/docs/ref/upgrade.3.0/dbinfo.html
new file mode 100644
index 0000000..cf4796d
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/dbinfo.html
@@ -0,0 +1,73 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: the DBINFO structure</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/db.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/join.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: the DBINFO structure</h1>
+<p>The DB_INFO structure has been removed from the Berkeley DB 3.0 release.
+Accesses to any fields within that structure by the application should be
+replaced with method calls on the <a href="../../api_c/db_class.html">DB</a> handle. The following
+example illustrates this using the historic db_cachesize structure field.
+In the Berkeley DB 2.X releases, applications could set the size of an
+underlying database cache using code similar to the following:
+<p><blockquote><pre>DB_INFO dbinfo;
+<p>
+ memset(dbinfo, 0, sizeof(dbinfo));
+ dbinfo.db_cachesize = 1024 * 1024;</pre></blockquote>
+<p>in the Berkeley DB 3.X releases, this should be done using the
+<a href="../../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a> method, as follows:
+<p><blockquote><pre>DB *db;
+int ret;
+<p>
+ ret = db-&gt;set_cachesize(db, 0, 1024 * 1024, 0);</pre></blockquote>
+<p>The DB_INFO structure is no longer used in any way by the Berkeley DB 3.0
+release, and should be removed from the application.
+<p>The following table lists the DB_INFO fields previously used by
+applications and the methods that should now be used to set
+them. Because these calls provide configuration for the
+database open, they must precede the call to <a href="../../api_c/db_open.html">DB-&gt;open</a>.
+Calling them after the call to <a href="../../api_c/db_open.html">DB-&gt;open</a> will return an
+error.
+<p><table border=1 align=center>
+<tr><th>DB_INFO field</th><th>Berkeley DB 3.X method</th></tr>
+<tr><td>bt_compare</td><td><a href="../../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a></td></tr>
+<tr><td>bt_minkey</td><td><a href="../../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a></td></tr>
+<tr><td>bt_prefix</td><td><a href="../../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a></td></tr>
+<tr><td>db_cachesize</td><td><a href="../../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>
+<p>Note: the <a href="../../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a> function takes additional arguments.
+Setting both the second argument (the number of GB in the pool) and the
+last argument (the number of memory pools to create) to 0 will result in
+behavior that is backward-compatible with previous Berkeley DB releases.</td></tr>
+<tr><td>db_lorder</td><td><a href="../../api_c/db_set_lorder.html">DB-&gt;set_lorder</a></td></tr>
+<tr><td>db_malloc</td><td>DB-&gt;set_malloc</td></tr>
+<tr><td>db_pagesize</td><td><a href="../../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a></td></tr>
+<tr><td>dup_compare</td><td><a href="../../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a></td></tr>
+<tr><td>flags</td><td><a href="../../api_c/db_set_flags.html">DB-&gt;set_flags</a>
+<p>Note: the DB_DELIMITER, DB_FIXEDLEN and DB_PAD flags no longer need to be
+set as there are specific methods off the <a href="../../api_c/db_class.html">DB</a> handle that set the
+file delimiter, the length of fixed-length records and the fixed-length
+record pad character. They should simply be discarded from the application.</td></tr>
+<tr><td>h_ffactor</td><td><a href="../../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a></td></tr>
+<tr><td>h_hash</td><td><a href="../../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a></td></tr>
+<tr><td>h_nelem</td><td><a href="../../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a></td></tr>
+<tr><td>re_delim</td><td><a href="../../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a></td></tr>
+<tr><td>re_len</td><td><a href="../../api_c/db_set_re_len.html">DB-&gt;set_re_len</a></td></tr>
+<tr><td>re_pad</td><td><a href="../../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a></td></tr>
+<tr><td>re_source</td><td><a href="../../api_c/db_set_re_source.html">DB-&gt;set_re_source</a></td></tr>
+</table>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/db.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/join.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/disk.html b/libdb/docs/ref/upgrade.3.0/disk.html
new file mode 100644
index 0000000..86fefc6
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/disk.html
@@ -0,0 +1,31 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: upgrade requirements</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/java.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: upgrade requirements</h1>
+<p>Log file formats and the Btree, Recno and Hash Access Method database
+formats changed in the Berkeley DB 3.0 release. (The on-disk Btree/Recno
+format changed from version 6 to version 7. The on-disk Hash format
+changed from version 5 to version 6.) Until the underlying databases
+are upgraded, the <a href="../../api_c/db_open.html">DB-&gt;open</a> method will return a <a href="../../api_c/db_open.html#DB_OLD_VERSION">DB_OLD_VERSION</a>
+error.
+<p>For further information on upgrading Berkeley DB installations, see
+<a href="../../ref/upgrade/process.html">Upgrading Berkeley DB
+installations</a>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/java.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/eacces.html b/libdb/docs/ref/upgrade.3.0/eacces.html
new file mode 100644
index 0000000..c4edfc9
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/eacces.html
@@ -0,0 +1,29 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: EACCES</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/eagain.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/jump_set.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: EACCES</h1>
+<p>There was an error in previous releases of the Berkeley DB documentation that
+said that the lock_put and lock_vec interfaces could return EACCES as
+an error to indicate that a lock could not be released because it was
+held by another locker. The application should be searched for any
+occurrences of EACCES. For each of these, any that are checking for an
+error return from lock_put or lock_vec should have the test and any
+error handling removed.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/eagain.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/jump_set.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/eagain.html b/libdb/docs/ref/upgrade.3.0/eagain.html
new file mode 100644
index 0000000..e33f618
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/eagain.html
@@ -0,0 +1,35 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: EAGAIN</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/lock_notheld.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/eacces.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: EAGAIN</h1>
+<p>Historically, the Berkeley DB interfaces have returned the POSIX error value
+EAGAIN to indicate a deadlock. This has been removed from the Berkeley DB 3.0
+release in order to make it possible for applications to distinguish
+between EAGAIN errors returned by the system and returns from Berkeley DB
+indicating deadlock.
+<p>The application should be searched for any occurrences of EAGAIN. For
+each of these, any that are checking for a deadlock return from Berkeley DB
+should be changed to check for the DB_LOCK_DEADLOCK return value.
+<p>If, for any reason, this is a difficult change for the application to
+make, the <b>include/db.src</b> distribution file should be modified to
+translate all returns of DB_LOCK_DEADLOCK to EAGAIN. Search for the
+string EAGAIN in that file, there is a comment that describes how to make
+the change.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/lock_notheld.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/eacces.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/envopen.html b/libdb/docs/ref/upgrade.3.0/envopen.html
new file mode 100644
index 0000000..8fad811
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/envopen.html
@@ -0,0 +1,156 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: environment open/close/unlink</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/func.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: environment open/close/unlink</h1>
+<p>The hardest part of upgrading your application from a 2.X code base to
+the 3.0 release is translating the Berkeley DB environment open, close and
+remove calls.
+<p>There were two logical changes in this part of the Berkeley DB interface.
+First, in Berkeley DB 3.0, there are no longer separate structures that
+represent each subsystem (for example, DB_LOCKTAB or DB_TXNMGR) and an
+overall <a href="../../api_c/env_class.html">DB_ENV</a> environment structure. Instead there is only the
+<a href="../../api_c/env_class.html">DB_ENV</a> structure. This means that <a href="../../api_c/env_class.html">DB_ENV</a> references
+should be passed around by your application instead of passing around
+DB_LOCKTAB or DB_TXNMGR references. This is likely to be a simple
+change for most applications as few applications use the lock_XXX,
+log_XXX, memp_XXX or txn_XXX interfaces to create Berkeley DB environments.
+<p>The second change is that there are no longer separate open, close, and
+unlink interfaces to the Berkeley DB subsystems. For example, in previous
+releases, it was possible to open a lock subsystem either using
+db_appinit or using the lock_open call. In the 3.0 release the XXX_open
+interfaces to the subsystems have been removed, and subsystems must now
+be opened using the 3.0 replacement for the db_appinit call.
+<p>To upgrade your application, first find each place your application opens,
+closes and/or removes a Berkeley DB environment. This will be code of the form:
+<p><blockquote><pre>db_appinit, db_appexit
+lock_open, lock_close, lock_unlink
+log_open, log_close, log_unlink
+memp_open, memp_close, memp_unlink
+txn_open, txn_close, txn_unlink</pre></blockquote>
+<p>Each of these groups of calls should be replaced with calls to:
+<p><blockquote><pre><a href="../../api_c/env_create.html">db_env_create</a>, <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>, <a href="../../api_c/env_close.html">DB_ENV-&gt;close</a>,
+<a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a></pre></blockquote>
+<p>The <a href="../../api_c/env_create.html">db_env_create</a> call and the call to the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>
+method replace the db_appinit, lock_open, log_open, memp_open and txn_open
+calls. The <a href="../../api_c/env_close.html">DB_ENV-&gt;close</a> method replaces the db_appexit,
+lock_close, log_close, memp_close and txn_close calls. The
+<a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a> call replaces the lock_unlink, log_unlink,
+memp_unlink and txn_unlink calls.
+<p>Here's an example creating a Berkeley DB environment using the 2.X interface:
+<p><blockquote><pre>/*
+ * db_init --
+ * Initialize the environment.
+ */
+DB_ENV *
+db_init(home)
+ char *home;
+{
+ DB_ENV *dbenv;
+<p>
+ if ((dbenv = (DB_ENV *)calloc(sizeof(DB_ENV), 1)) == NULL)
+ return (errno);
+<p>
+ if ((errno = db_appinit(home, NULL, dbenv,
+ DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN |
+ DB_USE_ENVIRON)) == 0)
+ return (dbenv);
+<p>
+ free(dbenv);
+ return (NULL);
+}</pre></blockquote>
+<p>In the Berkeley DB 3.0 release, this code would be written as:
+<p><blockquote><pre>/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_init(home, dbenvp)
+ char *home;
+ DB_ENV **dbenvp;
+{
+ int ret;
+ DB_ENV *dbenv;
+<p>
+ if ((ret = db_env_create(&dbenv, 0)) != 0)
+ return (ret);
+<p>
+ if ((ret = dbenv-&gt;open(dbenv, home, NULL,
+ DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN |
+ DB_USE_ENVIRON, 0)) == 0) {
+ *dbenvp = dbenv;
+ return (0);
+ }
+<p>
+ (void)dbenv-&gt;close(dbenv, 0);
+ return (ret);
+}</pre></blockquote>
+<p>As you can see, the arguments to db_appinit and to <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> are
+largely the same. There is some minor re-organization: the mapping is
+that arguments #1, 2, 3, and 4 to db_appinit become arguments #2, 3, 1
+and 4 to <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>. There is one additional argument to
+<a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>, argument #5. For backward compatibility with the 2.X
+Berkeley DB releases, simply set that argument to 0.
+<p>It is only slightly more complex to translate calls to XXX_open to the
+<a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> method. Here's an example of creating a lock region
+using the 2.X interface:
+<p><blockquote><pre>lock_open(dir, DB_CREATE, 0664, dbenv, &regionp);</pre></blockquote>
+<p>In the Berkeley DB 3.0 release, this code would be written as:
+<p><blockquote><pre>if ((ret = db_env_create(&dbenv, 0)) != 0)
+ return (ret);
+<p>
+if ((ret = dbenv-&gt;open(dbenv,
+ dir, NULL, DB_CREATE | DB_INIT_LOCK, 0664)) == 0) {
+ *dbenvp = dbenv;
+ return (0);
+}</pre></blockquote>
+<p>Note that in this example, you no longer need the DB_LOCKTAB structure
+reference that was required in Berkeley DB 2.X releases.
+<p>The final issue with upgrading the db_appinit call is the DB_MPOOL_PRIVATE
+option previously provided for the db_appinit interface. If your
+application is using this flag, it should almost certainly use the new
+<a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flag to the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> interface. Regardless,
+you should carefully consider this change before converting to use the
+<a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flag.
+<p>Translating db_appexit or XXX_close calls to <a href="../../api_c/env_close.html">DB_ENV-&gt;close</a> is equally
+simple. Instead of taking a reference to a per-subsystem structure such
+as DB_LOCKTAB or DB_TXNMGR, all calls take a reference to a <a href="../../api_c/env_class.html">DB_ENV</a>
+structure. The calling sequence is otherwise unchanged. Note that as
+the application no longer allocates the memory for the DB_ENV structure,
+application code to discard it after the call to db_appexit() is no longer
+needed.
+<p>Translating XXX_unlink calls to <a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a> is slightly more complex.
+As with <a href="../../api_c/env_close.html">DB_ENV-&gt;close</a>, the call takes a reference to a <a href="../../api_c/env_class.html">DB_ENV</a>
+structure instead of a per-subsystem structure. The calling sequence is
+slightly different, however. Here is an example of removing a lock region
+using the 2.X interface:
+<p><blockquote><pre>DB_ENV *dbenv;
+<p>
+ret = lock_unlink(dir, 1, dbenv);</pre></blockquote>
+<p>In the Berkeley DB 3.0 release, this code fragment would be written as:
+<p><blockquote><pre>DB_ENV *dbenv;
+<p>
+ret = dbenv-&gt;remove(dbenv, dir, NULL, DB_FORCE);</pre></blockquote>
+<p>The additional argument to the <a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a> function is a
+configuration argument similar to that previously taken by db_appinit and
+now taken by the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> method. For backward compatibility
+this new argument should simply be set to NULL. The force argument to
+XXX_unlink is now a flag value that is set by bitwise inclusively <b>OR</b>'ing it the
+<a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a> flag argument.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/func.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/func.html b/libdb/docs/ref/upgrade.3.0/func.html
new file mode 100644
index 0000000..d8e32c2
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/func.html
@@ -0,0 +1,70 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: function arguments</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/envopen.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/dbenv.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: function arguments</h1>
+<p>In Berkeley DB 3.0, there are no longer separate structures that represent
+each subsystem (for example, DB_LOCKTAB or DB_TXNMGR), and an overall
+<a href="../../api_c/env_class.html">DB_ENV</a> environment structure. Instead there is only the
+<a href="../../api_c/env_class.html">DB_ENV</a> structure. This means that <a href="../../api_c/env_class.html">DB_ENV</a> references
+should be passed around by your application instead of passing around
+DB_LOCKTAB or DB_TXNMGR references.
+<p>Each of the following functions:
+<p><blockquote><pre>lock_detect
+lock_get
+lock_id
+lock_put
+lock_stat
+lock_vec</pre></blockquote>
+<p>should have its first argument, a reference to the DB_LOCKTAB structure,
+replaced with a reference to the enclosing <a href="../../api_c/env_class.html">DB_ENV</a> structure. For
+example, the following line of code from a Berkeley DB 2.X application:
+<p><blockquote><pre>DB_LOCKTAB *lt;
+DB_LOCK lock;
+ ret = lock_put(lt, lock);</pre></blockquote>
+<p>should now be written as follows:
+<p><blockquote><pre>DB_ENV *dbenv;
+DB_LOCK *lock;
+ ret = lock_put(dbenv, lock);</pre></blockquote>
+<p>Similarly, all of the functions:
+<p><blockquote><pre>log_archive
+log_compare
+log_file
+log_flush
+log_get
+log_put
+log_register
+log_stat
+log_unregister</pre></blockquote>
+<p>should have their DB_LOG argument replaced with a reference to a
+<a href="../../api_c/env_class.html">DB_ENV</a> structure, and the functions:
+<p><blockquote><pre>memp_fopen
+memp_register
+memp_stat
+memp_sync
+memp_trickle</pre></blockquote>
+<p>should have their DB_MPOOL argument replaced with a reference to a
+<a href="../../api_c/env_class.html">DB_ENV</a> structure.
+<p>You should remove all references to DB_LOCKTAB, DB_LOG, DB_MPOOL, and
+DB_TXNMGR structures from your application, they are no longer useful
+in any way. In fact, a simple way to identify all of the places that
+need to be upgraded is to remove all such structures and variables
+they declare, and then compile. You will see a warning message from
+your compiler in each case that needs to be upgraded.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/envopen.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/dbenv.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/intro.html b/libdb/docs/ref/upgrade.3.0/intro.html
new file mode 100644
index 0000000..2f33d57
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/intro.html
@@ -0,0 +1,27 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.2.0/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/envopen.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: introduction</h1>
+<p>The following pages describe how to upgrade applications coded against
+the Berkeley DB 2.X release interfaces to the Berkeley DB 3.0 release interfaces.
+This information does not describe how to upgrade Berkeley DB 1.85 release
+applications.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.2.0/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/envopen.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/java.html b/libdb/docs/ref/upgrade.3.0/java.html
new file mode 100644
index 0000000..66c8f56
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/java.html
@@ -0,0 +1,35 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: additional Java changes</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/cxx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: additional Java changes</h1>
+<p>There are several additional types of exceptions thrown in the Berkeley DB 3.0
+Java API.
+<p>DbMemoryException and DbDeadlockException can be caught independently of
+DbException if you want to do special handling for these kinds of errors.
+Since they are subclassed from DbException, a try block that catches
+DbException will catch these also, so code is not required to change.
+The catch clause for these new exceptions should appear before the catch
+clause for DbException.
+<p>You will need to add a catch clause for java.io.FileNotFoundException,
+since that can be thrown by the <a href="../../api_java/db_open.html">Db.open</a> and <a href="../../api_java/env_open.html">DbEnv.open</a> methods.
+<p>There are a number of smaller changes to the API that bring the C, C++
+and Java APIs much closer in terms of functionality and usage. Please
+refer to the pages for upgrading C applications for further details.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/cxx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/join.html b/libdb/docs/ref/upgrade.3.0/join.html
new file mode 100644
index 0000000..e2e0eb7
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/join.html
@@ -0,0 +1,29 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: DB-&gt;join</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/dbinfo.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: DB-&gt;join</h1>
+<p>Historically, the last two arguments to the Berkeley DB <a href="../../api_c/db_join.html">DB-&gt;join</a>
+interface were a flags value followed by a reference to a memory location
+to store the returned cursor object. In the Berkeley DB 3.0 release, the
+order of those two arguments has been swapped for consistency with other
+Berkeley DB interfaces.
+<p>The application should be searched for any occurrences of <a href="../../api_c/db_join.html">DB-&gt;join</a>.
+For each of these, the order of the last two arguments should be swapped.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/dbinfo.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/jump_set.html b/libdb/docs/ref/upgrade.3.0/jump_set.html
new file mode 100644
index 0000000..3b61bf6
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/jump_set.html
@@ -0,0 +1,49 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: db_jump_set</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/eacces.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/value_set.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: db_jump_set</h1>
+<p>The db_jump_set interface has been removed from the Berkeley DB 3.0 release,
+replaced by method calls on the <a href="../../api_c/env_class.html">DB_ENV</a> handle.
+<p>The following table lists the db_jump_set arguments previously used by
+applications and the methods that should now be used instead.
+<p><table border=1 align=center>
+<tr><th>db_jump_set argument</th><th>Berkeley DB 3.X method</th></tr>
+<tr><td>DB_FUNC_CLOSE</td><td><a href="../../api_c/set_func_close.html">db_env_set_func_close</a></td></tr>
+<tr><td>DB_FUNC_DIRFREE</td><td><a href="../../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a></td></tr>
+<tr><td>DB_FUNC_DIRLIST</td><td><a href="../../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a></td></tr>
+<tr><td>DB_FUNC_EXISTS</td><td><a href="../../api_c/set_func_exists.html">db_env_set_func_exists</a></td></tr>
+<tr><td>DB_FUNC_FREE</td><td><a href="../../api_c/set_func_free.html">db_env_set_func_free</a></td></tr>
+<tr><td>DB_FUNC_FSYNC</td><td><a href="../../api_c/set_func_fsync.html">db_env_set_func_fsync</a></td></tr>
+<tr><td>DB_FUNC_IOINFO</td><td><a href="../../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a></td></tr>
+<tr><td>DB_FUNC_MALLOC</td><td><a href="../../api_c/set_func_malloc.html">db_env_set_func_malloc</a></td></tr>
+<tr><td>DB_FUNC_MAP</td><td><a href="../../api_c/set_func_map.html">db_env_set_func_map</a></td></tr>
+<tr><td>DB_FUNC_OPEN</td><td><a href="../../api_c/set_func_open.html">db_env_set_func_open</a></td></tr>
+<tr><td>DB_FUNC_READ</td><td><a href="../../api_c/set_func_read.html">db_env_set_func_read</a></td></tr>
+<tr><td>DB_FUNC_REALLOC</td><td><a href="../../api_c/set_func_realloc.html">db_env_set_func_realloc</a></td></tr>
+<tr><td>DB_FUNC_RUNLINK</td><td>The DB_FUNC_RUNLINK functionality has been removed from the Berkeley DB
+3.0 release, and should be removed from the application.</td></tr>
+<tr><td>DB_FUNC_SEEK</td><td><a href="../../api_c/set_func_seek.html">db_env_set_func_seek</a></td></tr>
+<tr><td>DB_FUNC_SLEEP</td><td><a href="../../api_c/set_func_sleep.html">db_env_set_func_sleep</a></td></tr>
+<tr><td>DB_FUNC_UNLINK</td><td><a href="../../api_c/set_func_unlink.html">db_env_set_func_unlink</a></td></tr>
+<tr><td>DB_FUNC_UNMAP</td><td><a href="../../api_c/set_func_unmap.html">db_env_set_func_unmap</a></td></tr>
+<tr><td>DB_FUNC_WRITE</td><td><a href="../../api_c/set_func_write.html">db_env_set_func_write</a></td></tr>
+<tr><td>DB_FUNC_YIELD</td><td><a href="../../api_c/set_func_yield.html">db_env_set_func_yield</a></td></tr>
+</table>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/eacces.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/value_set.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/lock_detect.html b/libdb/docs/ref/upgrade.3.0/lock_detect.html
new file mode 100644
index 0000000..4f644f3
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/lock_detect.html
@@ -0,0 +1,25 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: lock_detect</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/lock_put.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/lock_stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: lock_detect</h1>
+<p>An additional argument has been added to the lock_detect interface.
+<p>The application should be searched for any occurrences of lock_detect.
+For each one, a NULL argument should be appended to the current arguments.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/lock_put.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/lock_stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/lock_notheld.html b/libdb/docs/ref/upgrade.3.0/lock_notheld.html
new file mode 100644
index 0000000..99a1345
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/lock_notheld.html
@@ -0,0 +1,28 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: DB_LOCK_NOTHELD</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/rmw.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/eagain.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: DB_LOCK_NOTHELD</h1>
+<p>Historically, the Berkeley DB lock_put and lock_vec interfaces could return
+the DB_LOCK_NOTHELD error to indicate that a lock could not be released
+as it was held by another locker. This error can no longer be returned
+under any circumstances. The application should be searched for any
+occurrences of DB_LOCK_NOTHELD. For each of these, the test and any
+error processing should be removed.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/rmw.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/eagain.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/lock_put.html b/libdb/docs/ref/upgrade.3.0/lock_put.html
new file mode 100644
index 0000000..40ab194
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/lock_put.html
@@ -0,0 +1,26 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: lock_put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/close.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/lock_detect.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: lock_put</h1>
+<p>An argument change has been made in the lock_put interface.
+<p>The application should be searched for any occurrences of lock_put.
+For each one, instead of passing a DB_LOCK variable as the last argument
+to the function, the address of the DB_LOCK variable should be passed.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/close.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/lock_detect.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/lock_stat.html b/libdb/docs/ref/upgrade.3.0/lock_stat.html
new file mode 100644
index 0000000..0c06fc4
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/lock_stat.html
@@ -0,0 +1,25 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: lock_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/lock_detect.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/log_register.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: lock_stat</h1>
+<p>The <b>st_magic</b>, <b>st_version</b>, <b>st_numobjs</b> and
+<b>st_refcnt</b> fields returned from the lock_stat interface
+have been removed, and this information is no longer available.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/lock_detect.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/log_register.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/log_register.html b/libdb/docs/ref/upgrade.3.0/log_register.html
new file mode 100644
index 0000000..77f9a40
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/log_register.html
@@ -0,0 +1,26 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: log_register</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/lock_stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/log_stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: log_register</h1>
+<p>An argument has been removed from the log_register interface. The
+application should be searched for any occurrences of log_register. In
+each of these, the DBTYPE argument (it is the fourth argument) should
+be removed.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/lock_stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/log_stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/log_stat.html b/libdb/docs/ref/upgrade.3.0/log_stat.html
new file mode 100644
index 0000000..fc7f0a4
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/log_stat.html
@@ -0,0 +1,24 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: log_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/log_register.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/memp_stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: log_stat</h1>
+<p>The <b>st_refcnt</b> field returned from the log_stat interface
+has been removed, and this information is no longer available.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/log_register.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/memp_stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/memp_stat.html b/libdb/docs/ref/upgrade.3.0/memp_stat.html
new file mode 100644
index 0000000..5d7acd7
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/memp_stat.html
@@ -0,0 +1,27 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: memp_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/log_stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/txn_begin.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: memp_stat</h1>
+<p>The <b>st_refcnt</b> field returned from the memp_stat interface
+has been removed, and this information is no longer available.
+<p>The <b>st_cachesize</b> field returned from the memp_stat interface
+has been replaced with two new fields, <b>st_gbytes</b> and
+<b>st_bytes</b>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/log_stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/txn_begin.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/open.html b/libdb/docs/ref/upgrade.3.0/open.html
new file mode 100644
index 0000000..995942f
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/open.html
@@ -0,0 +1,66 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: database open/close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/dbenv.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/xa.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: database open/close</h1>
+<p>Database opens were changed in the Berkeley DB 3.0 release in a similar way to
+environment opens.
+<p>To upgrade your application, first find each place your application opens
+a database, that is, calls the db_open function. Each of these calls
+should be replaced with calls to <a href="../../api_c/db_create.html">db_create</a> and <a href="../../api_c/db_open.html">DB-&gt;open</a>.
+<p>Here's an example creating a Berkeley DB database using the 2.X interface:
+<p><blockquote><pre>DB *dbp;
+DB_ENV *dbenv;
+int ret;
+<p>
+if ((ret = db_open(DATABASE,
+ DB_BTREE, DB_CREATE, 0664, dbenv, NULL, &dbp)) != 0)
+ return (ret);</pre></blockquote>
+<p>In the Berkeley DB 3.0 release, this code would be written as:
+<p><blockquote><pre>DB *dbp;
+DB_ENV *dbenv;
+int ret;
+<p>
+if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return (ret);
+<p>
+if ((ret = dbp-&gt;open(dbp,
+ DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ (void)dbp-&gt;close(dbp, 0);
+ return (ret);
+}</pre></blockquote>
+<p>As you can see, the arguments to db_open and to <a href="../../api_c/db_open.html">DB-&gt;open</a> are
+largely the same. There is some re-organization, and note that the
+enclosing <a href="../../api_c/env_class.html">DB_ENV</a> structure is specified when the <a href="../../api_c/db_class.html">DB</a> object
+is created using the <a href="../../api_c/db_create.html">db_create</a> interface. There is one
+additional argument to <a href="../../api_c/db_open.html">DB-&gt;open</a>, argument #3. For backward
+compatibility with the 2.X Berkeley DB releases, simply set that argument to
+NULL.
+<p>There are two additional issues with the db_open call.
+<p>First, it was possible in the 2.X releases for an application to provide
+an environment that did not contain a shared memory buffer pool as the
+database environment, and Berkeley DB would create a private one automatically.
+This functionality is no longer available, applications must specify the
+<a href="../../api_c/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a> flag if databases are going to be opened in the
+environment.
+<p>The final issue with upgrading the db_open call is that the DB_INFO
+structure is no longer used, having been replaced by individual methods
+on the <a href="../../api_c/db_class.html">DB</a> handle. That change is discussed in detail later in
+this chapter.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/dbenv.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/xa.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/rmw.html b/libdb/docs/ref/upgrade.3.0/rmw.html
new file mode 100644
index 0000000..926e3bb
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/rmw.html
@@ -0,0 +1,32 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: DB_RMW</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/txn_stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/lock_notheld.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: DB_RMW</h1>
+<p>The following change applies only to applications using the
+Berkeley DB Concurrent Data Store product. If your application is not using that product,
+you can ignore this change.
+<p>Historically, the Berkeley DB <a href="../../api_c/db_cursor.html">DB-&gt;cursor</a> interface took the DB_RMW flag
+to indicate that the created cursor would be used for write operations on
+the database. This flag has been renamed to the <a href="../../api_c/db_cursor.html#DB_WRITECURSOR">DB_WRITECURSOR</a>
+flag.
+<p>The application should be searched for any occurrences of DB_RMW. For
+each of these, any that are arguments to the <a href="../../api_c/db_cursor.html">DB-&gt;cursor</a> function
+should be changed to pass in the <a href="../../api_c/db_cursor.html#DB_WRITECURSOR">DB_WRITECURSOR</a> flag instead.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/txn_stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/lock_notheld.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/stat.html b/libdb/docs/ref/upgrade.3.0/stat.html
new file mode 100644
index 0000000..18f7b90
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/stat.html
@@ -0,0 +1,25 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: DB-&gt;stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/join.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/close.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: DB-&gt;stat</h1>
+<p>The <b>bt_flags</b> field returned from the <a href="../../api_c/db_stat.html">DB-&gt;stat</a> interface
+for Btree and Recno databases has been removed, and this information is
+no longer available.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/join.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/close.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/toc.html b/libdb/docs/ref/upgrade.3.0/toc.html
new file mode 100644
index 0000000..5942806
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/toc.html
@@ -0,0 +1,55 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Upgrading Berkeley DB 2.X.X applications to Berkeley DB 3.0</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Upgrading Berkeley DB 2.X.X applications to Berkeley DB 3.0</h1>
+<p><ol>
+<p><li><a href="intro.html">Release 3.0: introduction</a>
+<li><a href="envopen.html">Release 3.0: environment open/close/unlink</a>
+<li><a href="func.html">Release 3.0: function arguments</a>
+<li><a href="dbenv.html">Release 3.0: the DB_ENV structure</a>
+<li><a href="open.html">Release 3.0: database open/close</a>
+<li><a href="xa.html">Release 3.0: db_xa_open</a>
+<li><a href="db.html">Release 3.0: the DB structure</a>
+<li><a href="dbinfo.html">Release 3.0: the DBINFO structure</a>
+<li><a href="join.html">Release 3.0: DB-&gt;join</a>
+<li><a href="stat.html">Release 3.0: DB-&gt;stat</a>
+<li><a href="close.html">Release 3.0: DB-&gt;sync and DB-&gt;close</a>
+<li><a href="lock_put.html">Release 3.0: lock_put</a>
+<li><a href="lock_detect.html">Release 3.0: lock_detect</a>
+<li><a href="lock_stat.html">Release 3.0: lock_stat</a>
+<li><a href="log_register.html">Release 3.0: log_register</a>
+<li><a href="log_stat.html">Release 3.0: log_stat</a>
+<li><a href="memp_stat.html">Release 3.0: memp_stat</a>
+<li><a href="txn_begin.html">Release 3.0: txn_begin</a>
+<li><a href="txn_commit.html">Release 3.0: txn_commit</a>
+<li><a href="txn_stat.html">Release 3.0: txn_stat</a>
+<li><a href="rmw.html">Release 3.0: DB_RMW</a>
+<li><a href="lock_notheld.html">Release 3.0: DB_LOCK_NOTHELD</a>
+<li><a href="eagain.html">Release 3.0: EAGAIN</a>
+<li><a href="eacces.html">Release 3.0: EACCES</a>
+<li><a href="jump_set.html">Release 3.0: db_jump_set</a>
+<li><a href="value_set.html">Release 3.0: db_value_set</a>
+<li><a href="dbenv_cxx.html">Release 3.0: the DbEnv class for C++ and Java</a>
+<li><a href="db_cxx.html">Release 3.0: the Db class for C++ and Java</a>
+<li><a href="cxx.html">Release 3.0: additional C++ changes</a>
+<li><a href="java.html">Release 3.0: additional Java changes</a>
+<li><a href="disk.html">Release 3.0: upgrade requirements</a>
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/txn_begin.html b/libdb/docs/ref/upgrade.3.0/txn_begin.html
new file mode 100644
index 0000000..a723eca
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/txn_begin.html
@@ -0,0 +1,26 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: txn_begin</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/memp_stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/txn_commit.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: txn_begin</h1>
+<p>An additional argument has been added to the txn_begin interface.
+<p>The application should be searched for any occurrences of txn_begin.
+For each one, an argument of 0 should be appended to the current
+arguments.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/memp_stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/txn_commit.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/txn_commit.html b/libdb/docs/ref/upgrade.3.0/txn_commit.html
new file mode 100644
index 0000000..47567b5
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/txn_commit.html
@@ -0,0 +1,26 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: txn_commit</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/txn_begin.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/txn_stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: txn_commit</h1>
+<p>An additional argument has been added to the txn_commit interface.
+<p>The application should be searched for any occurrences of txn_commit.
+For each one, an argument of 0 should be appended to the current
+arguments.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/txn_begin.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/txn_stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/txn_stat.html b/libdb/docs/ref/upgrade.3.0/txn_stat.html
new file mode 100644
index 0000000..9e8f98e
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/txn_stat.html
@@ -0,0 +1,24 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: txn_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/txn_commit.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/rmw.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: txn_stat</h1>
+<p>The <b>st_refcnt</b> field returned from the txn_stat interface
+has been removed, and this information is no longer available.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/txn_commit.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/rmw.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/value_set.html b/libdb/docs/ref/upgrade.3.0/value_set.html
new file mode 100644
index 0000000..d51412e
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/value_set.html
@@ -0,0 +1,42 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: db_value_set</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/jump_set.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/dbenv_cxx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: db_value_set</h1>
+<p>The db_value_set interface has been removed from the Berkeley DB 3.0 release,
+replaced by method calls on the <a href="../../api_c/env_class.html">DB_ENV</a> handle.
+<p>The following table lists the db_value_set arguments previously used by
+applications and the interfaces that should now be used instead.
+<p><table border=1 align=center>
+<tr><th>db_value_set argument</th><th>Berkeley DB 3.X method</th></tr>
+<tr><td>DB_MUTEX_LOCKS</td><td>dbenv_set_mutexlocks</td></tr>
+<tr><td>DB_REGION_ANON</td><td>The DB_REGION_ANON functionality has
+been replaced by the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> and <a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flags
+to the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> function. A direct translation is not
+available, please review the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> manual page for more
+information.</td></tr>
+<tr><td>DB_REGION_INIT</td><td>dbenv_set_region_init</td></tr>
+<tr><td>DB_REGION_NAME</td><td>The DB_REGION_NAME functionality has
+been replaced by the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> and <a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a> flags
+to the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> function. A direct translation is not
+available, please review the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> manual page for more
+information.</td></tr>
+<tr><td>DB_TSL_SPINS</td><td>dbenv_set_tas_spins</td></tr>
+</table>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/jump_set.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/dbenv_cxx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.0/xa.html b/libdb/docs/ref/upgrade.3.0/xa.html
new file mode 100644
index 0000000..2756e69
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.0/xa.html
@@ -0,0 +1,34 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.0: db_xa_open</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/db.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.0: db_xa_open</h1>
+<p>The following change applies only to applications using Berkeley DB as an XA
+Resource Manager. If your application is not using Berkeley DB in this way,
+you can ignore this change.
+<p>The db_xa_open function has been replaced with the <a href="../../api_c/db_create.html#DB_XA_CREATE">DB_XA_CREATE</a>
+flag to the <a href="../../api_c/db_create.html">db_create</a> function. All calls to db_xa_open should
+be replaced with calls to <a href="../../api_c/db_create.html">db_create</a> with the <a href="../../api_c/db_create.html#DB_XA_CREATE">DB_XA_CREATE</a>
+flag set, followed by a call to the <a href="../../api_c/db_open.html">DB-&gt;open</a> function.
+<p>A similar change has been made for the C++ API, where the
+<a href="../../api_c/db_create.html#DB_XA_CREATE">DB_XA_CREATE</a> flag should be specified to the Db constructor. All
+calls to the Db::xa_open method should be replaced with the
+<a href="../../api_c/db_create.html#DB_XA_CREATE">DB_XA_CREATE</a> flag to the Db constructor, followed by a call to
+the DB::open method.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/open.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.0/db.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.1/btstat.html b/libdb/docs/ref/upgrade.3.1/btstat.html
new file mode 100644
index 0000000..c077e88
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.1/btstat.html
@@ -0,0 +1,51 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: DB-&gt;stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.1/dup.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/sysmem.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: DB-&gt;stat</h1>
+<p>For Btree database statistics, the <a href="../../api_c/db_stat.html">DB-&gt;stat</a> interface field
+<b>bt_nrecs</b> has been removed, replaced by two fields:
+<b>bt_nkeys</b> and <b>bt_ndata</b>. The <b>bt_nkeys</b> field returns
+a count of the unique keys in the database. The <b>bt_ndata</b> field
+returns a count of the key/data pairs in the database. Neither exactly
+matches the previous value of the <b>bt_nrecs</b> field, which returned
+a count of keys in the database, but, in the case of Btree databases,
+could overcount as it sometimes counted duplicate data items as unique
+keys. The application should be searched for any uses of the
+<b>bt_nrecs</b> field and the field should be changed to be either
+<b>bt_nkeys</b> or <b>bt_ndata</b>, whichever is more appropriate.
+<p>For Hash database statistics, the <a href="../../api_c/db_stat.html">DB-&gt;stat</a> interface field
+<b>hash_nrecs</b> has been removed, replaced by two fields:
+<b>hash_nkeys</b> and <b>hash_ndata</b>. The <b>hash_nkeys</b> field
+returns a count of the unique keys in the database. The
+<b>hash_ndata</b> field returns a count of the key/data pairs in the
+database. The new <b>hash_nkeys</b> field exactly matches the previous
+value of the <b>hash_nrecs</b> field. The application should be searched
+for any uses of the <b>hash_nrecs</b> field, and the field should be
+changed to be <b>hash_nkeys</b>.
+<p>For Queue database statistics, the <a href="../../api_c/db_stat.html">DB-&gt;stat</a> interface field
+<b>qs_nrecs</b> has been removed, replaced by two fields:
+<b>qs_nkeys</b> and <b>qs_ndata</b>. The <b>qs_nkeys</b> field returns
+a count of the unique keys in the database. The <b>qs_ndata</b> field
+returns a count of the key/data pairs in the database. The new
+<b>qs_nkeys</b> field exactly matches the previous value of the
+<b>qs_nrecs</b> field. The application should be searched for any uses
+of the <b>qs_nrecs</b> field, and the field should be changed to be
+<b>qs_nkeys</b>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.1/dup.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/sysmem.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.1/config.html b/libdb/docs/ref/upgrade.3.1/config.html
new file mode 100644
index 0000000..756767f
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.1/config.html
@@ -0,0 +1,36 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: DB_ENV-&gt;open, DB_ENV-&gt;remove</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.1/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/set_tx_recover.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: DB_ENV-&gt;open, DB_ENV-&gt;remove</h1>
+<p>In the Berkeley DB 3.1 release, the <b>config</b> argument to the
+<a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>, <a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a> methods has been removed,
+replaced by additional methods on the <a href="../../api_c/env_class.html">DB_ENV</a> handle. If your
+application calls <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> or <a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a> with a NULL
+<b>config</b> argument, find those functions and remove the config
+argument from the call. If your application has non-NULL <b>config</b>
+argument, the strings values in that argument are replaced with calls to
+<a href="../../api_c/env_class.html">DB_ENV</a> methods as follows:
+<p><table border=1 align=center>
+<tr><th>Previous config string</th><th>Berkeley DB 3.1 version method</th></tr>
+<tr><td>DB_DATA_DIR</td><td><a href="../../api_c/env_set_data_dir.html">DB_ENV-&gt;set_data_dir</a></td></tr>
+<tr><td>DB_LOG_DIR</td><td><a href="../../api_c/env_set_lg_dir.html">DB_ENV-&gt;set_lg_dir</a></td></tr>
+<tr><td>DB_TMP_DIR</td><td><a href="../../api_c/env_set_tmp_dir.html">DB_ENV-&gt;set_tmp_dir</a></td></tr>
+</table>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.1/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/set_tx_recover.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.1/disk.html b/libdb/docs/ref/upgrade.3.1/disk.html
new file mode 100644
index 0000000..d4c1b34
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.1/disk.html
@@ -0,0 +1,35 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: upgrade requirements</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.1/logalloc.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: upgrade requirements</h1>
+<p>Log file formats and the Btree, Queue, Recno and Hash Access Method
+database formats changed in the Berkeley DB 3.1 release. (The on-disk
+Btree/Recno format changed from version 7 to version 8. The on-disk
+Hash format changed from version 6 to version 7. The on-disk Queue
+format changed from version 1 to version 2.) Until the underlying
+databases are upgraded, the <a href="../../api_c/db_open.html">DB-&gt;open</a> method will return a
+<a href="../../api_c/db_open.html#DB_OLD_VERSION">DB_OLD_VERSION</a> error.
+<p>An additional flag, <a href="../../api_c/db_set_flags.html#DB_DUPSORT">DB_DUPSORT</a>, has been added to the
+<a href="../../api_c/db_upgrade.html">DB-&gt;upgrade</a> method for this upgrade. Please review the
+<a href="../../api_c/db_upgrade.html">DB-&gt;upgrade</a> documentation for further information.
+<p>For further information on upgrading Berkeley DB installations, see
+<a href="../../ref/upgrade/process.html">Upgrading Berkeley DB
+installations</a>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.1/logalloc.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.1/dup.html b/libdb/docs/ref/upgrade.3.1/dup.html
new file mode 100644
index 0000000..786364b
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.1/dup.html
@@ -0,0 +1,32 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: identical duplicate data items</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.1/put.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/btstat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: identical duplicate data items</h1>
+<p>In previous releases of Berkeley DB, it was not an error to store identical
+duplicate data items, or, for those that just like the way it sounds,
+duplicate duplicates. However, there were implementation bugs where
+storing duplicate duplicates could cause database corruption.
+<p>In this release, applications may store identical duplicate data items
+as long as the data items are unsorted. It is an error to attempt to
+store identical duplicate data items when duplicates are being stored
+in a sorted order. This restriction is expected to be lifted in a future
+release. See <a href="../../ref/am_conf/dup.html">Duplicate data items</a>
+for more information.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.1/put.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/btstat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.1/env.html b/libdb/docs/ref/upgrade.3.1/env.html
new file mode 100644
index 0000000..096cc92
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.1/env.html
@@ -0,0 +1,54 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: environment configuration</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.1/txn_check.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/tcl.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: environment configuration</h1>
+<p>A set of <a href="../../api_c/env_class.html">DB_ENV</a> configuration methods which were not environment
+specific, but which instead affected the entire application space, have
+been removed from the <a href="../../api_c/env_class.html">DB_ENV</a> object and replaced by static
+functions. The following table lists the <a href="../../api_c/env_class.html">DB_ENV</a> methods previously
+available to applications and the static functions that should now be used
+instead.
+<p><table border=1 align=center>
+<tr><th><a href="../../api_c/env_class.html">DB_ENV</a> method</th><th>Berkeley DB 3.1 function</th></tr>
+<tr><td>DB_ENV-&gt;set_func_close</td><td><a href="../../api_c/set_func_close.html">db_env_set_func_close</a></td></tr>
+<tr><td>DB_ENV-&gt;set_func_dirfree</td><td><a href="../../api_c/set_func_dirfree.html">db_env_set_func_dirfree</a></td></tr>
+<tr><td>DB_ENV-&gt;set_func_dirlist</td><td><a href="../../api_c/set_func_dirlist.html">db_env_set_func_dirlist</a></td></tr>
+<tr><td>DB_ENV-&gt;set_func_exists</td><td><a href="../../api_c/set_func_exists.html">db_env_set_func_exists</a></td></tr>
+<tr><td>DB_ENV-&gt;set_func_free</td><td><a href="../../api_c/set_func_free.html">db_env_set_func_free</a></td></tr>
+<tr><td>DB_ENV-&gt;set_func_fsync</td><td><a href="../../api_c/set_func_fsync.html">db_env_set_func_fsync</a></td></tr>
+<tr><td>DB_ENV-&gt;set_func_ioinfo</td><td><a href="../../api_c/set_func_ioinfo.html">db_env_set_func_ioinfo</a></td></tr>
+<tr><td>DB_ENV-&gt;set_func_malloc</td><td><a href="../../api_c/set_func_malloc.html">db_env_set_func_malloc</a></td></tr>
+<tr><td>DB_ENV-&gt;set_func_map</td><td><a href="../../api_c/set_func_map.html">db_env_set_func_map</a></td></tr>
+<tr><td>DB_ENV-&gt;set_func_open</td><td><a href="../../api_c/set_func_open.html">db_env_set_func_open</a></td></tr>
+<tr><td>DB_ENV-&gt;set_func_read</td><td><a href="../../api_c/set_func_read.html">db_env_set_func_read</a></td></tr>
+<tr><td>DB_ENV-&gt;set_func_realloc</td><td><a href="../../api_c/set_func_realloc.html">db_env_set_func_realloc</a></td></tr>
+<tr><td>DB_ENV-&gt;set_func_rename</td><td><a href="../../api_c/set_func_rename.html">db_env_set_func_rename</a></td></tr>
+<tr><td>DB_ENV-&gt;set_func_seek</td><td><a href="../../api_c/set_func_seek.html">db_env_set_func_seek</a></td></tr>
+<tr><td>DB_ENV-&gt;set_func_sleep</td><td><a href="../../api_c/set_func_sleep.html">db_env_set_func_sleep</a></td></tr>
+<tr><td>DB_ENV-&gt;set_func_unlink</td><td><a href="../../api_c/set_func_unlink.html">db_env_set_func_unlink</a></td></tr>
+<tr><td>DB_ENV-&gt;set_func_unmap</td><td><a href="../../api_c/set_func_unmap.html">db_env_set_func_unmap</a></td></tr>
+<tr><td>DB_ENV-&gt;set_func_write</td><td><a href="../../api_c/set_func_write.html">db_env_set_func_write</a></td></tr>
+<tr><td>DB_ENV-&gt;set_func_yield</td><td><a href="../../api_c/set_func_yield.html">db_env_set_func_yield</a></td></tr>
+<tr><td>DB_ENV-&gt;set_pageyield</td><td>dbenv_set_pageyield</td></tr>
+<tr><td>DB_ENV-&gt;set_region_init</td><td>dbenv_set_region_init</td></tr>
+<tr><td>DB_ENV-&gt;set_mutexlocks</td><td>dbenv_set_mutexlocks</td></tr>
+<tr><td>DB_ENV-&gt;set_tas_spins</td><td>dbenv_set_tas_spins</td></tr>
+</table>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.1/txn_check.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/tcl.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.1/intro.html b/libdb/docs/ref/upgrade.3.1/intro.html
new file mode 100644
index 0000000..8206afc
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.1/intro.html
@@ -0,0 +1,27 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.0/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: introduction</h1>
+<p>The following pages describe how to upgrade applications coded against
+the Berkeley DB 3.0 release interfaces to the Berkeley DB 3.1 release interfaces.
+This information does not describe how to upgrade Berkeley DB 1.85 release
+applications.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.0/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.1/log_register.html b/libdb/docs/ref/upgrade.3.1/log_register.html
new file mode 100644
index 0000000..1b31998
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.1/log_register.html
@@ -0,0 +1,29 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: log_register</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.1/sysmem.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/memp_register.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: log_register</h1>
+<p>The arguments to the log_register and log_unregister interfaces have
+changed. Instead of returning (and passing in) a logging file ID, a
+reference to the <a href="../../api_c/db_class.html">DB</a> structure being registered (or unregistered)
+is passed. The application should be searched for any occurrences of
+log_register and log_unregister. For each one, change the arguments to
+be a reference to the <a href="../../api_c/db_class.html">DB</a> structure being registered or
+unregistered.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.1/sysmem.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/memp_register.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.1/logalloc.html b/libdb/docs/ref/upgrade.3.1/logalloc.html
new file mode 100644
index 0000000..9f54e86
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.1/logalloc.html
@@ -0,0 +1,28 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: log file pre-allocation</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.1/tmp.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: log file pre-allocation</h1>
+<p>This change only affects Win/32 applications.
+<p>On Win/32 platforms Berkeley DB no longer pre-allocates log files. The problem
+was a noticeable performance spike as each log file was created. To turn
+this feature back on, search for the flag DB_OSO_LOG in the source file
+<b>log/log_put.c</b> and make the change described there, or contact
+Sleepycat Software for assistance.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.1/tmp.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.1/memp_register.html b/libdb/docs/ref/upgrade.3.1/memp_register.html
new file mode 100644
index 0000000..b0ce440
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.1/memp_register.html
@@ -0,0 +1,31 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: memp_register</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.1/log_register.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/txn_check.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: memp_register</h1>
+<p>An additional argument has been added to the <b>pgin</b> and
+<b>pgout</b> functions provided to the memp_register interface.
+The application should be searched for any occurrences of
+memp_register. For each one, if <b>pgin</b> or <b>pgout</b>
+functions are specified, the <b>pgin</b> and <b>pgout</b> functions
+should be modified to take an initial argument of a <b>DB_ENV *</b>.
+This argument is intended to support better error reporting for
+applications, and may be entirely ignored by the <b>pgin</b> and
+<b>pgout</b> functions themselves.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.1/log_register.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/txn_check.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.1/put.html b/libdb/docs/ref/upgrade.3.1/put.html
new file mode 100644
index 0000000..f0d5831
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.1/put.html
@@ -0,0 +1,65 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: DB-&gt;put</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.1/set_paniccall.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/dup.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: DB-&gt;put</h1>
+<p>For the Queue and Recno access methods, when the <a href="../../api_c/db_put.html#DB_APPEND">DB_APPEND</a> flag
+is specified to the <a href="../../api_c/db_put.html">DB-&gt;put</a> interface, the allocated record
+number is returned to the application in the <b>key</b> <a href="../../api_c/dbt_class.html">DBT</a>
+argument. In previous releases of Berkeley DB, this <a href="../../api_c/dbt_class.html">DBT</a> structure
+did not follow the usual <a href="../../api_c/dbt_class.html">DBT</a> conventions. For example, it was
+not possible to cause Berkeley DB to allocate space for the returned record
+number. Rather, it was always assumed that the <b>data</b> field of
+the <b>key</b> structure referred to memory that could be used as
+storage for a db_recno_t type.
+<p>As of the Berkeley DB 3.1.0 release, the <b>key</b> structure behaves as
+described in the <a href="../../api_c/dbt_class.html">DBT</a> C++/Java class or C structure documentation.
+<p>Applications which are using the <a href="../../api_c/db_put.html#DB_APPEND">DB_APPEND</a> flag for Queue and
+Recno access method databases will require a change to upgrade to the
+Berkeley DB 3.1 releases. The simplest change is likely to be to add the
+<a href="../../api_c/dbt_class.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a> flag to the <b>key</b> structure. For example,
+code that appears as follows:
+<p><blockquote><pre>DBT key;
+db_recno_t recno;
+<p>
+memset(&key, 0, sizeof(DBT));
+key.data = &recno;
+key.size = sizeof(recno);
+DB-&gt;put(DB, NULL, &key, &data, DB_APPEND);
+printf("new record number is %lu\n", (u_long)recno);</pre></blockquote>
+<p>would be changed to:
+<p><blockquote><pre>DBT key;
+db_recno_t recno;
+<p>
+memset(&key, 0, sizeof(DBT));
+key.data = &recno;
+key.ulen = sizeof(recno);
+key.flags = DB_DBT_USERMEM;
+DB-&gt;put(DB, NULL, &key, &data, DB_APPEND);
+printf("new record number is %lu\n", (u_long)recno);</pre></blockquote>
+<p>Note that the <b>ulen</b> field is now set as well as the flag value.
+An alternative change would be:
+<p><blockquote><pre>DBT key;
+db_recno_t recno;
+<p>
+memset(&key, 0, sizeof(DBT));
+DB-&gt;put(DB, NULL, &key, &data, DB_APPEND);
+recno = *(db_recno_t *)key-&gt;data;
+printf("new record number is %lu\n", (u_long)recno);</pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.1/set_paniccall.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/dup.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.1/set_feedback.html b/libdb/docs/ref/upgrade.3.1/set_feedback.html
new file mode 100644
index 0000000..ceb8d75
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.1/set_feedback.html
@@ -0,0 +1,28 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: DB_ENV-&gt;set_feedback, DB-&gt;set_feedback</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.1/set_tx_recover.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/set_paniccall.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: DB_ENV-&gt;set_feedback, DB-&gt;set_feedback</h1>
+<p>Starting with the 3.1 release of Berkeley DB, the <a href="../../api_c/env_set_feedback.html">DB_ENV-&gt;set_feedback</a>
+and <a href="../../api_c/db_set_feedback.html">DB-&gt;set_feedback</a> methods may return an error value, that is, they
+are no longer declared as returning no value, instead they return an int
+or throw an exception as appropriate when an error occurs.
+<p>If your application calls these functions, you may want to check for a
+possible error on return.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.1/set_tx_recover.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/set_paniccall.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.1/set_paniccall.html b/libdb/docs/ref/upgrade.3.1/set_paniccall.html
new file mode 100644
index 0000000..fff271d
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.1/set_paniccall.html
@@ -0,0 +1,28 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: DB_ENV-&gt;set_paniccall, DB-&gt;set_paniccall</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.1/set_feedback.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/put.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: DB_ENV-&gt;set_paniccall, DB-&gt;set_paniccall</h1>
+<p>Starting with the 3.1 release of Berkeley DB, the <a href="../../api_c/env_set_paniccall.html">DB_ENV-&gt;set_paniccall</a>
+and <a href="../../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a> methods may return an error value, that is, they
+are no longer declared as returning no value, instead they return an int
+or throw an exception as appropriate when an error occurs.
+<p>If your application calls these functions, you may want to check for a
+possible error on return.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.1/set_feedback.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/put.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.1/set_tx_recover.html b/libdb/docs/ref/upgrade.3.1/set_tx_recover.html
new file mode 100644
index 0000000..b230807
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.1/set_tx_recover.html
@@ -0,0 +1,37 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: DB_ENV-&gt;set_tx_recover</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.1/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/set_feedback.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: DB_ENV-&gt;set_tx_recover</h1>
+<p>The redo parameter of the function passed to DB_ENV-&gt;set_tx_recover
+used to be an integer set to any one of a number of #defined values. In
+the 3.1 release of Berkeley DB, the redo parameter has been replaced by the op
+parameter which is an enumerated type of type db_recops.
+<p>If your application calls DB_ENV-&gt;set_tx_recover, then find the
+function referred to by the call. Replace the flag values in that
+function as follows:
+<p><table border=1 align=center>
+<tr><th>Previous flag</th><th>Berkeley DB 3.1 version flag</th></tr>
+<tr><td>TXN_BACKWARD_ROLL</td><td>DB_TXN_BACKWARD_ROLL</td></tr>
+<tr><td>TXN_FORWARD_ROLL</td><td>DB_TXN_FORWARD_ROLL</td></tr>
+<tr><td>TXN_OPENFILES</td><td>DB_TXN_OPENFILES</td></tr>
+<tr><td>TXN_REDO</td><td>DB_TXN_FORWARD_ROLL</td></tr>
+<tr><td>TXN_UNDO</td><td>DB_TXN_ABORT</td></tr>
+</table>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.1/config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/set_feedback.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.1/sysmem.html b/libdb/docs/ref/upgrade.3.1/sysmem.html
new file mode 100644
index 0000000..f73eaf1
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.1/sysmem.html
@@ -0,0 +1,26 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: DB_SYSTEM_MEM</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.1/btstat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/log_register.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: DB_SYSTEM_MEM</h1>
+<p>Using the <a href="../../api_c/env_open.html#DB_SYSTEM_MEM">DB_SYSTEM_MEM</a> option on UNIX systems now requires the
+specification of a base system memory segment ID, using the
+<a href="../../api_c/env_set_shm_key.html">DB_ENV-&gt;set_shm_key</a> method. Any valid segment ID may be specified, for
+example, one returned by the UNIX <b>ftok</b>(3) interface.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.1/btstat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/log_register.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.1/tcl.html b/libdb/docs/ref/upgrade.3.1/tcl.html
new file mode 100644
index 0000000..14cda3d
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.1/tcl.html
@@ -0,0 +1,32 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: Tcl API</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.1/env.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/tmp.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: Tcl API</h1>
+<p>The Berkeley DB Tcl API has been modified so that the <b>-mpool</b> option to
+the <b>berkdb env</b> command is now the default behavior. The Tcl API
+has also been modified so that the <b>-txn</b> option to the
+<b>berkdb env</b> command implies the <b>-lock</b> and <b>-log</b>
+options. Tcl scripts should be updated to remove the <b>-mpool</b>,
+<b>-lock</b> and <b>-log</b> options.
+<p>The Berkeley DB Tcl API has been modified to follow the Tcl standard rules
+for integer conversion, for example, if the first two characters of a
+record number are "0x", the record number is expected to be in
+hexadecimal form.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.1/env.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/tmp.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.1/tmp.html b/libdb/docs/ref/upgrade.3.1/tmp.html
new file mode 100644
index 0000000..f183b9b
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.1/tmp.html
@@ -0,0 +1,35 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: DB_TMP_DIR</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.1/tcl.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/logalloc.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: DB_TMP_DIR</h1>
+<p>This change only affects Win/32 applications that create in-memory
+databases.
+<p>On Win/32 platforms an additional test has been added when searching for
+the appropriate directory in which to create the temporary files that are
+used to back in-memory databases. Berkeley DB now uses any return value from
+the GetTempPath interface as the temporary file directory name before
+resorting to the static list of compiled-in pathnames.
+<p>If the system registry does not return the same directory as Berkeley DB has
+been using previously, this change could cause temporary backing files to
+move to a new directory when applications are upgraded to the 3.1 release.
+In extreme cases, this could create (or fix) security problems if the file
+protection modes for the system registry directory are different from
+those on the directory previously used by Berkeley DB.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.1/tcl.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/logalloc.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.1/toc.html b/libdb/docs/ref/upgrade.3.1/toc.html
new file mode 100644
index 0000000..327924b
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.1/toc.html
@@ -0,0 +1,41 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Upgrading Berkeley DB 3.0.X applications to Berkeley DB 3.1</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Upgrading Berkeley DB 3.0.X applications to Berkeley DB 3.1</h1>
+<p><ol>
+<p><li><a href="intro.html">Release 3.1: introduction</a>
+<li><a href="config.html">Release 3.1: DB_ENV-&gt;open, DB_ENV-&gt;remove</a>
+<li><a href="set_tx_recover.html">Release 3.1: DB_ENV-&gt;set_tx_recover</a>
+<li><a href="set_feedback.html">Release 3.1: DB_ENV-&gt;set_feedback, DB-&gt;set_feedback</a>
+<li><a href="set_paniccall.html">Release 3.1: DB_ENV-&gt;set_paniccall, DB-&gt;set_paniccall</a>
+<li><a href="put.html">Release 3.1: DB-&gt;put</a>
+<li><a href="dup.html">Release 3.1: identical duplicate data items</a>
+<li><a href="btstat.html">Release 3.1: DB-&gt;stat</a>
+<li><a href="sysmem.html">Release 3.1: DB_SYSTEM_MEM</a>
+<li><a href="log_register.html">Release 3.1: log_register</a>
+<li><a href="memp_register.html">Release 3.1: memp_register</a>
+<li><a href="txn_check.html">Release 3.1: txn_checkpoint</a>
+<li><a href="env.html">Release 3.1: environment configuration</a>
+<li><a href="tcl.html">Release 3.1: Tcl API</a>
+<li><a href="tmp.html">Release 3.1: DB_TMP_DIR</a>
+<li><a href="logalloc.html">Release 3.1: log file pre-allocation</a>
+<li><a href="disk.html">Release 3.1: upgrade requirements</a>
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.1/txn_check.html b/libdb/docs/ref/upgrade.3.1/txn_check.html
new file mode 100644
index 0000000..de490bf
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.1/txn_check.html
@@ -0,0 +1,26 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.1: txn_checkpoint</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.1/memp_register.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/env.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.1: txn_checkpoint</h1>
+<p>An additional argument has been added to the txn_checkpoint interface.
+<p>The application should be searched for any occurrences of
+txn_checkpoint. For each one, an argument of 0 should be appended to
+the current arguments.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.1/memp_register.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.1/env.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.2/callback.html b/libdb/docs/ref/upgrade.3.2/callback.html
new file mode 100644
index 0000000..35d37f4
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.2/callback.html
@@ -0,0 +1,40 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: DB callback functions, app_private field</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.2/set_flags.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/renumber.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: DB callback functions, app_private field</h1>
+<p>In the Berkeley DB 3.2 release, four application callback functions (the
+callback functions set by <a href="../../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>, <a href="../../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a> and
+<a href="../../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>) were modified to take a reference to a
+<a href="../../api_c/db_class.html">DB</a> object as their first argument. This change allows the Berkeley DB
+Java API to reasonably support these interfaces. There is currently no
+need for the callback functions to do anything with this additional
+argument.
+<p>C and C++ applications that specify their own Btree key comparison,
+Btree prefix comparison, duplicate data item comparison or Hash
+functions should modify these functions to take a reference to a
+<a href="../../api_c/db_class.html">DB</a> structure as their first argument. No further change is
+required.
+<p>The app_private field of the <a href="../../api_c/dbt_class.html">DBT</a> structure (accessible only from
+the Berkeley DB C API) has been removed in the 3.2 release. It was replaced
+with app_private fields in the <a href="../../api_c/env_class.html">DB_ENV</a> and <a href="../../api_c/db_class.html">DB</a> handles.
+Applications using this field will have to convert to using one of the
+replacement fields.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.2/set_flags.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/renumber.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.2/db_dump.html b/libdb/docs/ref/upgrade.3.2/db_dump.html
new file mode 100644
index 0000000..e4be202
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.2/db_dump.html
@@ -0,0 +1,30 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: db_dump</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.2/notfound.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: db_dump</h1>
+<p>In previous releases of Berkeley DB, the <a href="../../utility/db_dump.html">db_dump</a> utility dumped Recno
+access method database keys as numeric strings. For consistency, the
+<a href="../../utility/db_dump.html">db_dump</a> utility has been changed in the 3.2 release to dump
+record numbers as hex pairs when the data items are being dumped as hex
+pairs. (See the <b>-k</b> and <b>-p</b> options to the
+<a href="../../utility/db_dump.html">db_dump</a> utility for more information.) Any applications or
+scripts post-processing the <a href="../../utility/db_dump.html">db_dump</a> output of Recno databases
+under these conditions may require modification.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.2/notfound.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.2/disk.html b/libdb/docs/ref/upgrade.3.2/disk.html
new file mode 100644
index 0000000..bd99427
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.2/disk.html
@@ -0,0 +1,29 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: upgrade requirements</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.2/db_dump.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: upgrade requirements</h1>
+<p>Log file formats and the Queue Access Method database formats changed
+in the Berkeley DB 3.2 release. (The on-disk Queue format changed from
+version 2 to version 3.) Until the underlying databases are upgraded,
+the <a href="../../api_c/db_open.html">DB-&gt;open</a> method will return a <a href="../../api_c/db_open.html#DB_OLD_VERSION">DB_OLD_VERSION</a> error.
+<p>For further information on upgrading Berkeley DB installations, see
+<a href="../../ref/upgrade/process.html">Upgrading Berkeley DB
+installations</a>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.2/db_dump.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.2/handle.html b/libdb/docs/ref/upgrade.3.2/handle.html
new file mode 100644
index 0000000..8d8a2b5
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.2/handle.html
@@ -0,0 +1,28 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: Java and C++ object reuse</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.2/mutexlock.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/notfound.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: Java and C++ object reuse</h1>
+<p>In previous releases of Berkeley DB, Java <a href="../../api_java/env_class.html">DbEnv</a> and <a href="../../api_java/db_class.html">Db</a>
+objects, and C++ <a href="../../api_cxx/env_class.html">DbEnv</a> and <a href="../../api_cxx/db_class.html">Db</a> objects could be
+reused after they were closed, by calling open on them again. This is
+no longer permitted, and these objects no longer allow any operations
+after a close. Applications reusing these objects should be modified
+to create new objects instead.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.2/mutexlock.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/notfound.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.2/incomplete.html b/libdb/docs/ref/upgrade.3.2/incomplete.html
new file mode 100644
index 0000000..830d314
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.2/incomplete.html
@@ -0,0 +1,40 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: DB_INCOMPLETE</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.2/renumber.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/tx_recover.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: DB_INCOMPLETE</h1>
+<p>There are a number of functions that flush pages from the Berkeley DB shared
+memory buffer pool to disk. Most of those functions can potentially
+fail because a page that needs to be flushed is not currently available.
+However, this is not a hard failure and is rarely cause for concern.
+In the Berkeley DB 3.2 release, the C++ API (if that API is configured to
+throw exceptions) and the Java API have been changed so that this
+failure does not throw an exception, but rather returns a non-zero error
+code of DB_INCOMPLETE.
+<p>The following C++ methods will return DB_INCOMPLETE rather than throw
+an exception: <a href="../../api_cxx/db_close.html">Db::close</a>, <a href="../../api_cxx/db_sync.html">Db::sync</a>, DbEnv::memp_sync,
+DbEnv::txn_checkpoint, DbMpoolFile::memp_fsync.
+<p>The following Java methods are now declared "public int" rather than
+"public void", and will return Db.DB_INCOMPLETE rather than
+throw an exception: <a href="../../api_java/db_close.html">Db.close</a>, <a href="../../api_java/db_sync.html">Db.sync</a>,
+DbEnv.txn_checkpoint.
+<p>It is likely that the only change required by any application will be
+those currently checking for a DB_INCOMPLETE return that has
+been encapsulated in an exception.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.2/renumber.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/tx_recover.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.2/intro.html b/libdb/docs/ref/upgrade.3.2/intro.html
new file mode 100644
index 0000000..52bc236
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.2/intro.html
@@ -0,0 +1,27 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.1/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/set_flags.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: introduction</h1>
+<p>The following pages describe how to upgrade applications coded against
+the Berkeley DB 3.1 release interfaces to the Berkeley DB 3.2 release interfaces.
+This information does not describe how to upgrade Berkeley DB 1.85 release
+applications.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.1/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/set_flags.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.2/mutexlock.html b/libdb/docs/ref/upgrade.3.2/mutexlock.html
new file mode 100644
index 0000000..a20c1d5
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.2/mutexlock.html
@@ -0,0 +1,29 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: DB_ENV-&gt;set_mutexlocks</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.2/tx_recover.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/handle.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: DB_ENV-&gt;set_mutexlocks</h1>
+<p>Previous Berkeley DB releases included the db_env_set_mutexlocks interface,
+intended for debugging, that allows applications to always obtain
+requested mutual exclusion mutexes without regard for their
+availability. This interface has been replaced with
+dbenv_set_mutexlocks, which provides the same functionality on a
+per-database environment basis. Applications using the old interface
+should be updated to use the new one.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.2/tx_recover.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/handle.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.2/notfound.html b/libdb/docs/ref/upgrade.3.2/notfound.html
new file mode 100644
index 0000000..085ef06
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.2/notfound.html
@@ -0,0 +1,26 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: Java java.io.FileNotFoundException</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.2/handle.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/db_dump.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: Java java.io.FileNotFoundException</h1>
+<p>The Java <a href="../../api_java/env_remove.html">DbEnv.remove</a>, <a href="../../api_java/db_remove.html">Db.remove</a> and
+<a href="../../api_java/db_rename.html">Db.rename</a> methods now throw java.io.FileNotFoundException
+in the case where the named file does not exist. Applications should
+be modified to catch this exception where appropriate.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.2/handle.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/db_dump.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.2/renumber.html b/libdb/docs/ref/upgrade.3.2/renumber.html
new file mode 100644
index 0000000..10bee1e
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.2/renumber.html
@@ -0,0 +1,40 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: Logically renumbering records</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.2/callback.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/incomplete.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: Logically renumbering records</h1>
+<p>In the Berkeley DB 3.2 release, cursor adjustment semantics changed for Recno
+databases with mutable record numbers. Before the 3.2 release, cursors
+were adjusted to point to the previous or next record at the time the
+record to which the cursor referred was deleted. This could lead to
+unexpected behaviors. For example, two cursors referring to sequential
+records that were both deleted would lose their relationship to each
+other and would refer to the same position in the database instead of
+their original sequential relationship. There were also command
+sequences that would have unexpected results. For example, DB_AFTER
+and DB_BEFORE cursor put operations, using a cursor previously used to
+delete an item, would perform the put relative to the cursor's adjusted
+position and not its original position.
+<p>In the Berkeley DB 3.2 release, cursors maintain their position in the tree
+regardless of deletion operations using the cursor. Applications that
+perform database operations, using cursors previously used to delete
+entries in Recno databases with mutable record numbers, should be
+evaluated to ensure that the new semantics do not cause application
+failure.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.2/callback.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/incomplete.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.2/set_flags.html b/libdb/docs/ref/upgrade.3.2/set_flags.html
new file mode 100644
index 0000000..3844db8
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.2/set_flags.html
@@ -0,0 +1,36 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: DB_ENV-&gt;set_flags</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.2/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/callback.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: DB_ENV-&gt;set_flags</h1>
+<p>A new method has been added to the Berkeley DB environment handle,
+<a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a>. This interface currently takes three flags:
+<a href="../../api_c/env_set_flags.html#DB_CDB_ALLDB">DB_CDB_ALLDB</a>, <a href="../../api_c/env_set_flags.html#DB_NOMMAP">DB_NOMMAP</a> and <a href="../../api_c/env_set_flags.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a>. The
+first of these flags, <a href="../../api_c/env_set_flags.html#DB_CDB_ALLDB">DB_CDB_ALLDB</a>, provides new functionality,
+allowing Berkeley DB Concurrent Data Store applications to do locking across multiple databases.
+<p>The other two flags, <a href="../../api_c/env_set_flags.html#DB_NOMMAP">DB_NOMMAP</a> and <a href="../../api_c/env_set_flags.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a>, were
+specified to the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> method in previous releases. In
+the 3.2 release, they have been moved to the <a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> method
+because this allows the database environment's value to be toggled
+during the life of the application as well as because it is a more
+appropriate place for them. Applications specifying either the
+<a href="../../api_c/env_set_flags.html#DB_NOMMAP">DB_NOMMAP</a> or <a href="../../api_c/env_set_flags.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> flags to the
+<a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> method should replace those flags with calls to the
+<a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> method.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.2/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/callback.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.2/toc.html b/libdb/docs/ref/upgrade.3.2/toc.html
new file mode 100644
index 0000000..e94a43c
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.2/toc.html
@@ -0,0 +1,35 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Upgrading Berkeley DB 3.1.X applications to Berkeley DB 3.2</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Upgrading Berkeley DB 3.1.X applications to Berkeley DB 3.2</h1>
+<p><ol>
+<p><li><a href="intro.html">Release 3.2: introduction</a>
+<li><a href="set_flags.html">Release 3.2: DB_ENV-&gt;set_flags</a>
+<li><a href="callback.html">Release 3.2: DB callback functions, app_private field</a>
+<li><a href="renumber.html">Release 3.2: logically renumbering records</a>
+<li><a href="incomplete.html">Release 3.2: DB_INCOMPLETE</a>
+<li><a href="tx_recover.html">Release 3.2: DB_ENV-&gt;set_tx_recover</a>
+<li><a href="mutexlock.html">Release 3.2: DB_ENV-&gt;set_mutexlocks</a>
+<li><a href="handle.html">Release 3.2: Java and C++ object reuse</a>
+<li><a href="notfound.html">Release 3.2: Java java.io.FileNotFoundException</a>
+<li><a href="db_dump.html">Release 3.2: db_dump</a>
+<li><a href="disk.html">Release 3.2: upgrade requirements</a>
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.2/tx_recover.html b/libdb/docs/ref/upgrade.3.2/tx_recover.html
new file mode 100644
index 0000000..e29d0dc
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.2/tx_recover.html
@@ -0,0 +1,33 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.2: DB_ENV-&gt;set_tx_recover</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.2/incomplete.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/mutexlock.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.2: DB_ENV-&gt;set_tx_recover</h1>
+<p>The <b>info</b> parameter of the function passed to
+DB_ENV-&gt;set_tx_recover is no longer needed. If your application
+calls DB_ENV-&gt;set_tx_recover, find the callback function referred
+to by that call and remove the <b>info</b> parameter.
+<p>In addition, the called function no longer needs to handle Berkeley DB log
+records, Berkeley DB will handle them internally as well as call the
+application-specified function. Any handling of Berkeley DB log records in the
+application's callback function may be removed.
+<p>In addition, the callback function will no longer be called with the
+<a href="../../api_c/env_set_app_dispatch.html#DB_TXN_FORWARD_ROLL">DB_TXN_FORWARD_ROLL</a> flag specified unless the transaction
+enclosing the operation successfully committed.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.2/incomplete.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.2/mutexlock.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.3/alloc.html b/libdb/docs/ref/upgrade.3.3/alloc.html
new file mode 100644
index 0000000..470ee79
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.3/alloc.html
@@ -0,0 +1,67 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.3: DB-&gt;set_malloc, DB-&gt;set_realloc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.3/getswap.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/conflict.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.3: DB-&gt;set_malloc, DB-&gt;set_realloc</h1>
+<p>There are two new interfaces in the Berkeley DB 3.3 release:
+<a href="../../api_c/env_set_alloc.html">DB_ENV-&gt;set_alloc</a> and <a href="../../api_c/db_set_alloc.html">DB-&gt;set_alloc</a>. These functions
+allow applications to specify a set of allocation functions for the
+Berkeley DB library to use when allocating memory to be owned by the
+application and when freeing memory that was originally allocated by
+the application.
+<p>The new interfaces affect or replace the following historic
+interfaces:
+<p><dl compact>
+<p><dt>DB-&gt;set_malloc<dd>The DB-&gt;set_malloc interface has been replaced in its entirety.
+Applications using this interface should replace the call with a call
+to <a href="../../api_c/db_set_alloc.html">DB-&gt;set_alloc</a>.
+<p><dt>DB-&gt;set_realloc<dd>The DB-&gt;set_realloc interface has been replaced in its entirety.
+Applications using this interface should replace the call with a call
+to <a href="../../api_c/db_set_alloc.html">DB-&gt;set_alloc</a>.
+<p><dt><a href="../../api_c/db_stat.html">DB-&gt;stat</a><dd>The historic <b>db_malloc</b> argument to the <a href="../../api_c/db_stat.html">DB-&gt;stat</a> method has
+been replaced. Applications using this interface should do as follows:
+if the argument is NULL, it should simply be removed. If non-NULL,
+it should be replaced with a call to <a href="../../api_c/db_set_alloc.html">DB-&gt;set_alloc</a>.
+<p><dt>lock_stat<dd>The historic <b>db_malloc</b> argument to the lock_stat interface has
+been replaced. Applications using this interface should do as follows:
+if the argument is NULL, it should simply be removed. If
+non-NULL, it should be replaced with a call to <a href="../../api_c/env_set_alloc.html">DB_ENV-&gt;set_alloc</a>.
+<p><dt>log_archive<dd>The historic <b>db_malloc</b> argument to the log_archive interface has
+been replaced. Applications using this interface should do as follows:
+if the argument is NULL, it should simply be removed. If non-NULL,
+it should be replaced with a call to <a href="../../api_c/env_set_alloc.html">DB_ENV-&gt;set_alloc</a>.
+<p><dt>log_stat<dd>The historic <b>db_malloc</b> argument to the log_stat interface has
+been replaced. Applications using this interface should do as follows:
+if the argument is NULL, it should simply be removed. If non-NULL,
+it should be replaced with a call to <a href="../../api_c/env_set_alloc.html">DB_ENV-&gt;set_alloc</a>.
+<p><dt>memp_stat<dd>The historic <b>db_malloc</b> argument to the memp_stat function has
+been replaced. Applications using this interface should do as follows:
+if the argument is NULL, it should simply be removed. If non-NULL,
+it should be replaced with a call to <a href="../../api_c/env_set_alloc.html">DB_ENV-&gt;set_alloc</a>.
+<p><dt>txn_stat<dd>The historic <b>db_malloc</b> argument to the txn_stat function has
+been replaced. Applications using this interface should do as follows:
+if the argument is NULL, it should simply be removed. If non-NULL,
+it should be replaced with a call to <a href="../../api_c/env_set_alloc.html">DB_ENV-&gt;set_alloc</a>.
+</dl>
+<p>One potential incompatibility for historic applications is that the
+allocation functions for a database environment must now be set before
+the environment is opened. Historically, Berkeley DB applications could open
+the environment first, and subsequently call the DB-&gt;set_malloc
+and DB-&gt;set_realloc interfaces; that use is no longer supported.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.3/getswap.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/conflict.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.3/bigfile.html b/libdb/docs/ref/upgrade.3.3/bigfile.html
new file mode 100644
index 0000000..c0daefd
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.3/bigfile.html
@@ -0,0 +1,29 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.3: --disable-bigfile</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.3/shared.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.3: --disable-bigfile</h1>
+<p>In previous releases, Berkeley DB UNIX used the --disable-bigfile configuration
+option for systems that could not, for whatever reason, include large
+file support in a particular Berkeley DB configuration. However, large file
+support has been integrated into the autoconf configuration tool as of
+version 2.50. For that reason, Berkeley DB configuration no longer supports
+--disable-bigfile, the autoconf standard --disable-largefile should be
+used instead.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.3/shared.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.3/conflict.html b/libdb/docs/ref/upgrade.3.3/conflict.html
new file mode 100644
index 0000000..da8d122
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.3/conflict.html
@@ -0,0 +1,25 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.3: DB_LOCK_CONFLICT</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.3/alloc.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/memp_fget.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.3: DB_LOCK_CONFLICT</h1>
+<p>The DB_LOCK_CONFLICT flag has been removed from the lock_detect interface.
+Applications specifying the DB_LOCK_CONFLICT flag should simply replace
+it with a flags argument of 0.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.3/alloc.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/memp_fget.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.3/disk.html b/libdb/docs/ref/upgrade.3.3/disk.html
new file mode 100644
index 0000000..828a716
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.3/disk.html
@@ -0,0 +1,25 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.3: upgrade requirements</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.3/bigfile.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.3: upgrade requirements</h1>
+<p>No database formats or log file formats changed in the Berkeley DB 3.3 release.
+<p>For further information on upgrading Berkeley DB installations, see
+<a href="../../ref/upgrade/process.html">Upgrading Berkeley DB installations</a>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.3/bigfile.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.3/getswap.html b/libdb/docs/ref/upgrade.3.3/getswap.html
new file mode 100644
index 0000000..9c7bd21
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.3/getswap.html
@@ -0,0 +1,29 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.3: DB-&gt;get_byteswapped</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.3/gettype.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/alloc.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.3: DB-&gt;get_byteswapped</h1>
+<p>The <a href="../../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a> method method can return an error in the Berkeley DB
+3.3 release, and so requires an interface change. C and C++
+applications calling <a href="../../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a> should be changed to
+treat the method's return as an error code, and to pass an additional
+second argument of type <b>int *</b> to the method. The additional
+argument is used as a memory location in which to store the requested
+information.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.3/gettype.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/alloc.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.3/gettype.html b/libdb/docs/ref/upgrade.3.3/gettype.html
new file mode 100644
index 0000000..c48c918
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.3/gettype.html
@@ -0,0 +1,28 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.3: DB-&gt;get_type</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.3/rpc.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/getswap.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.3: DB-&gt;get_type</h1>
+<p>The <a href="../../api_c/db_get_type.html">DB-&gt;get_type</a> method method can return an error in the Berkeley DB 3.3
+release, and so requires an interface change. C and C++ applications
+calling <a href="../../api_c/db_get_type.html">DB-&gt;get_type</a> should be changed to treat the method's
+return as an error code, and to pass an additional second argument of
+type <b>DBTYPE *</b> to the method. The additional argument is used
+as a memory location in which to store the requested information.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.3/rpc.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/getswap.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.3/intro.html b/libdb/docs/ref/upgrade.3.3/intro.html
new file mode 100644
index 0000000..be348d7
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.3/intro.html
@@ -0,0 +1,27 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.3: introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.2/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/rpc.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.3: introduction</h1>
+<p>The following pages describe how to upgrade applications coded against
+the Berkeley DB 3.2 release interfaces to the Berkeley DB 3.3 release interfaces.
+This information does not describe how to upgrade Berkeley DB 1.85 release
+applications.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.2/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/rpc.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.3/memp_fget.html b/libdb/docs/ref/upgrade.3.3/memp_fget.html
new file mode 100644
index 0000000..1e1dca0
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.3/memp_fget.html
@@ -0,0 +1,37 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.3: memp_fget, EIO</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.3/conflict.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/txn_prepare.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.3: memp_fget, EIO</h1>
+<p>Previous releases of Berkeley DB returned the system error EIO when the
+memp_fget interface was called to retrieve a page, the page did not
+exist, and the <a href="../../api_c/memp_fget.html#DB_MPOOL_CREATE">DB_MPOOL_CREATE</a> flag was not set. In the 3.3
+release, the error <a href="../../api_c/memp_fget.html#DB_PAGE_NOTFOUND">DB_PAGE_NOTFOUND</a> is returned instead, to
+allow applications to distinguish between recoverable and
+non-recoverable errors. Applications calling the memp_fget interface
+and checking for a return of EIO should check for
+<a href="../../api_c/memp_fget.html#DB_PAGE_NOTFOUND">DB_PAGE_NOTFOUND</a> instead.
+<p>Previous releases of Berkeley DB treated filesystem I/O failure (the most
+common of which the filesystem running out of space), as a fatal error,
+returning <a href="../../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>. When a filesystem failure happens in
+the 3.3 release Berkeley DB returns the underlying system error (usually EIO),
+but can continue to run. Applications should abort any enclosing
+transaction when a recoverable system error occurs in order to recover
+from the error.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.3/conflict.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/txn_prepare.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.3/rpc.html b/libdb/docs/ref/upgrade.3.3/rpc.html
new file mode 100644
index 0000000..e2eb15e
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.3/rpc.html
@@ -0,0 +1,29 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.3: DB_ENV-&gt;set_server</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.3/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/gettype.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.3: DB_ENV-&gt;set_server</h1>
+<p>The DB_ENV-&gt;set_server interface has been deprecated and replaced
+with the <a href="../../api_c/env_set_rpc_server.html">DB_ENV-&gt;set_rpc_server</a> method. The DB_ENV-&gt;set_server
+interface will be removed in a future release, and so applications using
+it should convert. The DB_ENV-&gt;set_server interface can be easily
+converted to the <a href="../../api_c/env_set_rpc_server.html">DB_ENV-&gt;set_rpc_server</a> method by changing the name,
+and specifying a NULL for the added argument, second in the argument
+list.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.3/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/gettype.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.3/shared.html b/libdb/docs/ref/upgrade.3.3/shared.html
new file mode 100644
index 0000000..036e896
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.3/shared.html
@@ -0,0 +1,31 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.3: --enable-dynamic, --enable-shared</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.3/txn_prepare.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/bigfile.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.3: --enable-dynamic, --enable-shared</h1>
+<p>In previous releases, Berkeley DB required separate configuration and builds
+to create both static and shared libraries. This has changed in the
+3.3 release, and Berkeley DB now builds and installs both shared and static
+versions of the Berkeley DB libraries by default. This change was based on
+Berkeley DB upgrading to release 1.4 of the GNU Project's Libtool distribution.
+For this reason, Berkeley DB no longer supports the previous --enable-dynamic
+and --enable-shared configuration options. Instead, as Berkeley DB now builds
+both static and shared libraries by default, the useful options are
+Libtool's --disable-shared and --disable-static options.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.3/txn_prepare.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/bigfile.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.3/toc.html b/libdb/docs/ref/upgrade.3.3/toc.html
new file mode 100644
index 0000000..56fa10b
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.3/toc.html
@@ -0,0 +1,35 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Upgrading Berkeley DB 3.2.X applications to Berkeley DB 3.3</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Upgrading Berkeley DB 3.2.X applications to Berkeley DB 3.3</h1>
+<p><ol>
+<p><li><a href="intro.html">Release 3.3: introduction</a>
+<li><a href="rpc.html">Release 3.3: DB_ENV-&gt;set_server</a>
+<li><a href="gettype.html">Release 3.3: DB-&gt;get_type</a>
+<li><a href="getswap.html">Release 3.3: DB-&gt;get_byteswapped</a>
+<li><a href="alloc.html">Release 3.3: DB-&gt;set_malloc, DB-&gt;set_realloc</a>
+<li><a href="conflict.html">Release 3.3: DB_LOCK_CONFLICT</a>
+<li><a href="memp_fget.html">Release 3.3: memp_fget, EIO</a>
+<li><a href="txn_prepare.html">Release 3.3: txn_prepare</a>
+<li><a href="shared.html">Release 3.3: --enable-dynamic, --enable-shared</a>
+<li><a href="bigfile.html">Release 3.3: --disable-bigfile</a>
+<li><a href="disk.html">Release 3.3: upgrade requirements</a>
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.3.3/txn_prepare.html b/libdb/docs/ref/upgrade.3.3/txn_prepare.html
new file mode 100644
index 0000000..f437f1a
--- /dev/null
+++ b/libdb/docs/ref/upgrade.3.3/txn_prepare.html
@@ -0,0 +1,27 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 3.3: txn_prepare</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.3/memp_fget.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/shared.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 3.3: txn_prepare</h1>
+<p>An additional argument has been added to the txn_prepare interface. If
+your application calls txn_prepare (that is, is performing two-phase
+commit using Berkeley DB as a local resource manager), see
+<a href="../../ref/xa/intro.html">Distributed Transactions</a> for more
+information.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.3/memp_fget.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.3.3/shared.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.0/asr.html b/libdb/docs/ref/upgrade.4.0/asr.html
new file mode 100644
index 0000000..8fc2025
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.0/asr.html
@@ -0,0 +1,40 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: application-specific recovery</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/cxx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: application-specific recovery</h1>
+<p>If you have created your own logging and recovery routines, you may need
+to upgrade them to the Berkeley DB 4.0 release.
+<p>First, you should regenerate your logging, print, read and the other
+automatically generated routines, using the dist/gen_rec.awk tool
+included in the Berkeley DB distribution.
+<p>Next, compare the template file code generated by the gen_rec.awk tool
+against the code generated by the last release in which you built a
+template file. Any changes in the templates should be incorporated into
+the recovery routines you have written.
+<p>Third, if your recovery functions refer to <a href="../../api_c/env_set_app_dispatch.html#DB_TXN_FORWARD_ROLL">DB_TXN_FORWARD_ROLL</a>
+(that is, your code checks for that particular operation code), you
+should replace it with DB_REDO(op) which compares the operation code to
+both <a href="../../api_c/env_set_app_dispatch.html#DB_TXN_FORWARD_ROLL">DB_TXN_FORWARD_ROLL</a> and <a href="../../api_c/env_set_app_dispatch.html#DB_TXN_APPLY">DB_TXN_APPLY</a>.
+(<a href="../../api_c/env_set_app_dispatch.html#DB_TXN_APPLY">DB_TXN_APPLY</a> is a potential value for the operation code as of
+the 4.0 release.)
+<p>Finally, if you have created your own logging and recovery routines, we
+recommend that you contact Sleepycat support and ask us to review those
+routines for you.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/cxx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.0/cxx.html b/libdb/docs/ref/upgrade.4.0/cxx.html
new file mode 100644
index 0000000..2c0163d
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.0/cxx.html
@@ -0,0 +1,49 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: C++ ostream objects</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/java.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/asr.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: C++ ostream objects</h1>
+<p>In the 4.0 release, the Berkeley DB C++ API has been changed to use the ISO
+standard C++ API in preference to the older, less portable interfaces,
+where available. This means the Berkeley DB methods that used to take an
+ostream object as a parameter now expect a std::ostream. Specifically,
+the following methods have changed:
+<p><blockquote><pre>DbEnv::set_error_stream
+Db::set_error_stream
+Db::verify</pre></blockquote>
+<p>On many platforms, the old and the new C++ styles are interchangeable;
+on some platforms (notably Win32), they are incompatible. If your code
+uses these methods and you have trouble with the 4.0 release, you should
+update code that looks like this:
+<p><blockquote><pre>#include &lt;iostream.h&gt;
+#include &lt;db_cxx.h&gt;
+<p>
+void foo(Db db) {
+ db.set_error_stream(&cerr);
+}</pre></blockquote>
+<p>to look like this:
+<p><blockquote><pre>#include &lt;iostream&gt;
+#include &lt;db_cxx.h&gt;
+<p>
+using std::cerr;
+<p>
+void foo(Db db) {
+ db.set_error_stream(&cerr);
+}</pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/java.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/asr.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.0/deadlock.html b/libdb/docs/ref/upgrade.4.0/deadlock.html
new file mode 100644
index 0000000..3226da9
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.0/deadlock.html
@@ -0,0 +1,26 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: db_deadlock</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/lock.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: db_deadlock</h1>
+<p>The <b>-w</b> option to the <a href="../../utility/db_deadlock.html">db_deadlock</a> utility has been
+deprecated. Applications can get the functionality of the <b>-w</b>
+option by using the <b>-t</b> option with an argument of
+<b>.100000</b>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/lock.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.0/disk.html b/libdb/docs/ref/upgrade.4.0/disk.html
new file mode 100644
index 0000000..d824aef
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.0/disk.html
@@ -0,0 +1,26 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: upgrade requirements</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/asr.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: upgrade requirements</h1>
+<p>The log file format changed in the Berkeley DB 4.0 release. No database
+formats changed in the Berkeley DB 4.0 release.
+<p>For further information on upgrading Berkeley DB installations, see
+<a href="../../ref/upgrade/process.html">Upgrading Berkeley DB installations</a>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/asr.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.0/env.html b/libdb/docs/ref/upgrade.4.0/env.html
new file mode 100644
index 0000000..74a118f
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.0/env.html
@@ -0,0 +1,82 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: db_env_set_XXX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/txn.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/rpc.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: db_env_set_XXX</h1>
+<p>The db_env_set_region_init interface was removed in the 4.0 release and
+replaced with the <a href="../../api_c/env_set_flags.html#DB_REGION_INIT">DB_REGION_INIT</a> flag to the
+<a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> interface. This is an interface change:
+historically, the db_env_set_region_init interface operated on the
+entire Berkeley DB library, not a single environment. The new interface only
+operates on a single <a href="../../api_c/env_class.html">DB_ENV</a> handle (and any handles created in
+the scope of that handle). Applications calling the
+db_env_set_region_init interface should update their calls: calls to
+the historic routine with an argument of 1 (0) are equivalent to calling
+<a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> with the <a href="../../api_c/env_set_flags.html#DB_REGION_INIT">DB_REGION_INIT</a> flag and an
+argument of 1 (0).
+<p>The db_env_set_tas_spins interface was removed in the 4.0 release and
+replaced with the <a href="../../api_c/env_set_tas_spins.html">DB_ENV-&gt;set_tas_spins</a> method. This is an interface
+change: historically, the db_env_set_tas_spins interface operated on
+the entire Berkeley DB library, not a single environment. The new interface
+only operates on a single <a href="../../api_c/env_class.html">DB_ENV</a> handle (and any handles created
+in the scope of that handle). Applications calling the
+db_env_set_tas_spins interface should update their calls: calls to the
+historic routine are equivalent to calling <a href="../../api_c/env_set_tas_spins.html">DB_ENV-&gt;set_tas_spins</a> method
+with the same argument. In addition, for consistent behavior, all
+<a href="../../api_c/env_class.html">DB_ENV</a> handles opened by the application should make the same
+configuration call, or the value will need to be entered into the
+environment's <b>DB_CONFIG</b> file.
+<p>Also, three of the standard Berkeley DB debugging interfaces changed in the
+4.0 release. It is quite unlikely that Berkeley DB applications use these
+interfaces.
+<p>The DB_ENV-&gt;set_mutexlocks interface was removed in the 4.0 release
+and replaced with the <a href="../../api_c/env_set_flags.html#DB_NOLOCKING">DB_NOLOCKING</a> flag to the
+<a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> interface. Applications calling the
+DB_ENV-&gt;set_mutexlocks interface should update their calls: calls
+to the historic routine with an argument of 1 (0) are equivalent to
+calling <a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> with the <a href="../../api_c/env_set_flags.html#DB_NOLOCKING">DB_NOLOCKING</a> flag and
+an argument of 1 (0).
+<p>The db_env_set_pageyield interface was removed in the 4.0 release and
+replaced with the <a href="../../api_c/env_set_flags.html#DB_YIELDCPU">DB_YIELDCPU</a> flag to the
+<a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> interface. This is an interface change:
+historically, the db_env_set_pageyield interface operated on the entire
+Berkeley DB library, not a single environment. The new interface only
+operates on a single <a href="../../api_c/env_class.html">DB_ENV</a> handle (and any handles created in
+the scope of that handle). Applications calling the
+db_env_set_pageyield interface should update their calls: calls to the
+historic routine with an argument of 1 (0) are equivalent to calling
+<a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> with the <a href="../../api_c/env_set_flags.html#DB_YIELDCPU">DB_YIELDCPU</a> flag and an
+argument of 1 (0). In addition, all <a href="../../api_c/env_class.html">DB_ENV</a> handles opened by
+the application will need to make the same call, or the
+<a href="../../api_c/env_set_flags.html#DB_YIELDCPU">DB_YIELDCPU</a> flag will need to be entered into the environment's
+<b>DB_CONFIG</b> file.
+<p>The db_env_set_panicstate interface was removed in the 4.0 release,
+replaced with the <a href="../../api_c/env_set_flags.html#DB_PANIC_ENVIRONMENT">DB_PANIC_ENVIRONMENT</a> and <a href="../../api_c/env_set_flags.html#DB_NOPANIC">DB_NOPANIC</a>
+flags to the <a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> interface. (The
+<a href="../../api_c/env_set_flags.html#DB_PANIC_ENVIRONMENT">DB_PANIC_ENVIRONMENT</a> flag will cause an environment to panic,
+affecting all threads of control using that environment. The
+<a href="../../api_c/env_set_flags.html#DB_NOPANIC">DB_NOPANIC</a> flag will cause a single <a href="../../api_c/env_class.html">DB_ENV</a> handle to
+ignore the current panic state of the environment.) This is an
+interface change: historically the db_env_set_panicstate interface
+operated on the entire Berkeley DB library, not a single environment.
+Applications calling the db_env_set_panicstate interface should update
+their calls, replacing the historic call with a call to
+<a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> and the appropriate flag, depending on their
+usage of the historic interface.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/txn.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/rpc.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.0/intro.html b/libdb/docs/ref/upgrade.4.0/intro.html
new file mode 100644
index 0000000..e0dbaa6
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.0/intro.html
@@ -0,0 +1,27 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.3/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/deadlock.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: introduction</h1>
+<p>The following pages describe how to upgrade applications coded against
+the Berkeley DB 3.3 release interfaces to the Berkeley DB 4.0 release interfaces.
+This information does not describe how to upgrade Berkeley DB 1.85 release
+applications.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.3/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/deadlock.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.0/java.html b/libdb/docs/ref/upgrade.4.0/java.html
new file mode 100644
index 0000000..9f8753d
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.0/java.html
@@ -0,0 +1,33 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: Java CLASSPATH environment variable</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/lock_id_free.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/cxx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: Java CLASSPATH environment variable</h1>
+<p>The Berkeley DB Java class files are now packaged as jar files. In the 4.0
+release, the <b>CLASSPATH</b> environment variable must change to
+include at least the <b>db.jar</b> file. It can optionally include
+the <b>dbexamples.jar</b> file if you want to run the examples. For
+example, on UNIX:
+<p><blockquote><pre>export CLASSPATH="/usr/local/BerkeleyDB.4.1/lib/db.jar:/usr/local/BerkeleyDB.4.1/lib/dbexamples.jar"</pre></blockquote>
+<p>For example, on Windows:
+<p><blockquote><pre>set CLASSPATH="D:\db\build_win32\Release\db.jar;D:\db\build_win32\Release\dbexamples.jar"</pre></blockquote>
+<p>For more information on Java configuration, please see
+<a href="../../ref/java/conf.html">Java configuration</a> and
+<a href="../../ref/build_win/intro.html">Building for Win32</a>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/lock_id_free.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/cxx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.0/lock.html b/libdb/docs/ref/upgrade.4.0/lock.html
new file mode 100644
index 0000000..cfafb20
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.0/lock.html
@@ -0,0 +1,46 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: lock_XXX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/deadlock.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/log.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: lock_XXX</h1>
+<p>The C API for the Berkeley DB Locking subsystem was reworked in the 4.0
+release as follows:
+<p><table border=1 align=center>
+<tr><th>Historic functional interface</th><th>Berkeley DB 4.X method</th></tr>
+<tr><td>lock_detect</td><td><a href="../../api_c/lock_detect.html">DB_ENV-&gt;lock_detect</a></td></tr>
+<tr><td>lock_get</td><td><a href="../../api_c/lock_get.html">DB_ENV-&gt;lock_get</a></td></tr>
+<tr><td>lock_id</td><td><a href="../../api_c/lock_id.html">DB_ENV-&gt;lock_id</a></td></tr>
+<tr><td>lock_put</td><td><a href="../../api_c/lock_put.html">DB_ENV-&gt;lock_put</a></td></tr>
+<tr><td>lock_stat</td><td><a href="../../api_c/lock_stat.html">DB_ENV-&gt;lock_stat</a></td></tr>
+<tr><td>lock_vec</td><td><a href="../../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a></td></tr>
+</table>
+<p>Applications calling any of these functions should update their calls
+to use the enclosing <a href="../../api_c/env_class.html">DB_ENV</a> handle's method (easily done as the
+first argument to the existing call is the correct handle to use).
+<p>In addition, the <a href="../../api_c/lock_stat.html">DB_ENV-&gt;lock_stat</a> call has been changed in the 4.0
+release to take a flags argument. To leave their historic behavior
+unchanged, applications should add a final argument of 0 to any calls
+made to <a href="../../api_c/lock_stat.html">DB_ENV-&gt;lock_stat</a>.
+<p>The C++ and Java APIs for the DbLock::put (DbLock.put) method was
+reworked in the 4.0 release to make the lock put interface a method of
+the <a href="../../api_c/env_class.html">DB_ENV</a> handle rather than the DbLock handle. Applications
+calling the DbLock::put or DbLock.put method should update their calls
+to use the enclosing <a href="../../api_c/env_class.html">DB_ENV</a> handle's method (easily done as the
+first argument to the existing call is the correct handle to use).
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/deadlock.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/log.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.0/lock_id_free.html b/libdb/docs/ref/upgrade.4.0/lock_id_free.html
new file mode 100644
index 0000000..c145545
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.0/lock_id_free.html
@@ -0,0 +1,26 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: DB_ENV-&gt;lock_id_free</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/set_lk_max.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/java.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: DB_ENV-&gt;lock_id_free</h1>
+<p>A new locker ID related API, the <a href="../../api_c/lock_id_free.html">DB_ENV-&gt;lock_id_free</a> method, was added to
+Berkeley DB 4.0 release. Applications using the <a href="../../api_c/lock_id.html">DB_ENV-&gt;lock_id</a> method to allocate
+locker IDs may want to update their applications to free the locker ID
+when it is no longer needed.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/set_lk_max.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/java.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.0/log.html b/libdb/docs/ref/upgrade.4.0/log.html
new file mode 100644
index 0000000..6e05097
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.0/log.html
@@ -0,0 +1,56 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: log_XXX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/lock.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/mp.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: log_XXX</h1>
+<p>The C API for the Berkeley DB Logging subsystem was reworked in the 4.0
+release as follows:
+<p><table border=1 align=center>
+<tr><th>Historic functional interface</th><th>Berkeley DB 4.X method</th></tr>
+<tr><td>log_archive</td><td><a href="../../api_c/log_archive.html">DB_ENV-&gt;log_archive</a></td></tr>
+<tr><td>log_file</td><td><a href="../../api_c/log_file.html">DB_ENV-&gt;log_file</a></td></tr>
+<tr><td>log_flush</td><td><a href="../../api_c/log_flush.html">DB_ENV-&gt;log_flush</a></td></tr>
+<tr><td>log_get</td><td><a href="../../api_c/log_cursor.html">DB_ENV-&gt;log_cursor</a>, <a href="../../api_c/logc_get.html">DB_LOGC-&gt;get</a>, <a href="../../api_c/logc_close.html">DB_LOGC-&gt;close</a></td></tr>
+<tr><td>log_put</td><td><a href="../../api_c/log_put.html">DB_ENV-&gt;log_put</a></td></tr>
+<tr><td>log_register</td><td>DB_ENV-&gt;log_register</td></tr>
+<tr><td>log_stat</td><td><a href="../../api_c/log_stat.html">DB_ENV-&gt;log_stat</a></td></tr>
+<tr><td>log_unregister</td><td>DB_ENV-&gt;log_unregister</td></tr>
+</table>
+<p>Applications calling any of these functions should update their calls
+to use the enclosing <a href="../../api_c/env_class.html">DB_ENV</a> handle's method (in all cases other
+than the log_get call, this is easily done as the first argument to the
+existing call is the correct handle to use).
+<p>Application calls to the historic log_get interface must be replaced
+with the creation of a log file cursor (a <a href="../../api_c/logc_class.html">DB_LOGC</a> object), using
+the <a href="../../api_c/log_cursor.html">DB_ENV-&gt;log_cursor</a> method, calls to the <a href="../../api_c/logc_get.html">DB_LOGC-&gt;get</a> method to retrieve log
+records and calls to the <a href="../../api_c/logc_close.html">DB_LOGC-&gt;close</a> method to destroy the cursor. It
+may also be possible to simplify some applications. In previous
+releases of Berkeley DB, the DB_CURRENT, DB_NEXT, and DB_PREV flags to the
+log_get function could not be used by a free-threaded <a href="../../api_c/env_class.html">DB_ENV</a>
+handle. If their <a href="../../api_c/env_class.html">DB_ENV</a> handle was free-threaded, applications
+had to create an additional, unique environment handle by separately
+calling <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> without specifying <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a>. This
+is no longer an issue in the log cursor interface, and applications may
+be able to remove the now unnecessary creation of the additional
+<a href="../../api_c/env_class.html">DB_ENV</a> object.
+<p>Finally, the <a href="../../api_c/log_stat.html">DB_ENV-&gt;log_stat</a> call has been changed in the 4.0 release
+to take a flags argument. To leave their historic behavior unchanged,
+applications should add a final argument of 0 to any calls made to
+<a href="../../api_c/log_stat.html">DB_ENV-&gt;log_stat</a>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/lock.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/mp.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.0/mp.html b/libdb/docs/ref/upgrade.4.0/mp.html
new file mode 100644
index 0000000..6b1a0fe
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.0/mp.html
@@ -0,0 +1,66 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: memp_XXX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/log.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/txn.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: memp_XXX</h1>
+<p>The C API for the Berkeley DB Memory Pool subsystem was reworked in the 4.0
+release as follows:
+<p><table border=1 align=center>
+<tr><th>Historic functional interface</th><th>Berkeley DB 4.X method</th></tr>
+<tr><td>memp_register</td><td><a href="../../api_c/memp_register.html">DB_ENV-&gt;memp_register</a></td></tr>
+<tr><td>memp_stat</td><td><a href="../../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a></td></tr>
+<tr><td>memp_sync</td><td><a href="../../api_c/memp_sync.html">DB_ENV-&gt;memp_sync</a></td></tr>
+<tr><td>memp_trickle</td><td><a href="../../api_c/memp_trickle.html">DB_ENV-&gt;memp_trickle</a></td></tr>
+<tr><td>memp_fopen</td><td><a href="../../api_c/memp_fcreate.html">DB_ENV-&gt;memp_fcreate</a></td></tr>
+<tr><td>DB_MPOOL_FINFO: ftype</td><td><a href="../../api_c/memp_set_ftype.html">DB_MPOOLFILE-&gt;set_ftype</a></td></tr>
+<tr><td>DB_MPOOL_FINFO: pgcookie</td><td><a href="../../api_c/memp_set_pgcookie.html">DB_MPOOLFILE-&gt;set_pgcookie</a></td></tr>
+<tr><td>DB_MPOOL_FINFO: fileid</td><td><a href="../../api_c/memp_set_fileid.html">DB_MPOOLFILE-&gt;set_fileid</a></td></tr>
+<tr><td>DB_MPOOL_FINFO: lsn_offset</td><td><a href="../../api_c/memp_set_lsn_offset.html">DB_MPOOLFILE-&gt;set_lsn_offset</a></td></tr>
+<tr><td>DB_MPOOL_FINFO: clear_len</td><td><a href="../../api_c/memp_set_clear_len.html">DB_MPOOLFILE-&gt;set_clear_len</a></td></tr>
+<tr><td>memp_fopen</td><td><a href="../../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a></td></tr>
+<tr><td>memp_fclose</td><td><a href="../../api_c/memp_fclose.html">DB_MPOOLFILE-&gt;close</a></td></tr>
+<tr><td>memp_fput</td><td><a href="../../api_c/memp_fput.html">DB_MPOOLFILE-&gt;put</a></td></tr>
+<tr><td>memp_fset</td><td><a href="../../api_c/memp_fset.html">DB_MPOOLFILE-&gt;set</a></td></tr>
+<tr><td>memp_fsync</td><td><a href="../../api_c/memp_fsync.html">DB_MPOOLFILE-&gt;sync</a></td></tr>
+</table>
+<p>Applications calling any of the memp_register, memp_stat, memp_sync or
+memp_trickle interfaces should update those calls to use the enclosing
+<a href="../../api_c/env_class.html">DB_ENV</a> handle's method (easily done as the first argument to the
+existing call is the correct <a href="../../api_c/env_class.html">DB_ENV</a> handle).
+<p>In addition, the <a href="../../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a> call has been changed in the 4.0
+release to take a flags argument. To leave their historic behavior
+unchanged, applications should add a final argument of 0 to any calls
+made to <a href="../../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a>.
+<p>Applications calling the memp_fopen interface should update those calls
+as follows: First, acquire a <a href="../../api_c/mempfile_class.html">DB_MPOOLFILE</a> handle using the
+<a href="../../api_c/memp_fcreate.html">DB_ENV-&gt;memp_fcreate</a> method. Second, if the DB_MPOOL_FINFO structure
+reference passed to the memp_fopen interface was non-NULL, call the
+<a href="../../api_c/mempfile_class.html">DB_MPOOLFILE</a> method corresponding to each initialized field in
+the DB_MPOOL_FINFO structure. Third, call the <a href="../../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a> method
+method to open the underlying file. If the <a href="../../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a> method call
+fails, then <a href="../../api_c/memp_fclose.html">DB_MPOOLFILE-&gt;close</a> method must be called to destroy the allocated
+handle.
+<p>Applications calling the memp_fopen, memp_fclose, memp_fput, memp_fset,
+or memp_fsync interfaces should update those calls to use the enclosing
+<a href="../../api_c/mempfile_class.html">DB_MPOOLFILE</a> handle's method. Again, this is easily done as the
+first argument to the existing call is the correct <a href="../../api_c/mempfile_class.html">DB_MPOOLFILE</a>
+handle. With one exception, the calling conventions of the old a new
+interfaces are identical; the one exception is the <a href="../../api_c/memp_fclose.html">DB_MPOOLFILE-&gt;close</a> method,
+which requires an additional flag parameter that should be set to 0.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/log.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/txn.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.0/rpc.html b/libdb/docs/ref/upgrade.4.0/rpc.html
new file mode 100644
index 0000000..2755245
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.0/rpc.html
@@ -0,0 +1,27 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: DB_ENV-&gt;set_server</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/env.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/set_lk_max.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: DB_ENV-&gt;set_server</h1>
+<p>The DB_ENV-&gt;set_server interface has been replaced with the
+<a href="../../api_c/env_set_rpc_server.html">DB_ENV-&gt;set_rpc_server</a> method. The DB_ENV-&gt;set_server interface
+can be easily converted to the <a href="../../api_c/env_set_rpc_server.html">DB_ENV-&gt;set_rpc_server</a> method by changing
+the name, and specifying a NULL for the added argument, second in
+the argument list.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/env.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/set_lk_max.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.0/set_lk_max.html b/libdb/docs/ref/upgrade.4.0/set_lk_max.html
new file mode 100644
index 0000000..1ba5bf0
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.0/set_lk_max.html
@@ -0,0 +1,27 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: DB_ENV-&gt;set_lk_max</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/rpc.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/lock_id_free.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: DB_ENV-&gt;set_lk_max</h1>
+<p>The DB_ENV-&gt;set_lk_max interface has been deprecated in favor of
+the <a href="../../api_c/env_set_lk_max_locks.html">DB_ENV-&gt;set_lk_max_locks</a>, <a href="../../api_c/env_set_lk_max_lockers.html">DB_ENV-&gt;set_lk_max_lockers</a>,
+and <a href="../../api_c/env_set_lk_max_objects.html">DB_ENV-&gt;set_lk_max_objects</a> methods. The DB_ENV-&gt;set_lk_max
+interface continues to be available, but is no longer documented and
+is expected to be removed in a future release.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/rpc.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/lock_id_free.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.0/toc.html b/libdb/docs/ref/upgrade.4.0/toc.html
new file mode 100644
index 0000000..108d7f2
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.0/toc.html
@@ -0,0 +1,38 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Upgrading Berkeley DB 3.3.X applications to Berkeley DB 4.0</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Upgrading Berkeley DB 3.3.X applications to Berkeley DB 4.0</h1>
+<p><ol>
+<p><li><a href="intro.html">Release 4.0: introduction</a>
+<li><a href="deadlock.html">Release 4.0: db_deadlock</a>
+<li><a href="lock.html">Release 4.0: lock_XXX</a>
+<li><a href="log.html">Release 4.0: log_XXX</a>
+<li><a href="mp.html">Release 4.0: memp_XXX</a>
+<li><a href="txn.html">Release 4.0: txn_XXX</a>
+<li><a href="env.html">Release 4.0: db_env_set_XXX</a>
+<li><a href="rpc.html">Release 4.0: DB_ENV-&gt;set_server</a>
+<li><a href="set_lk_max.html">Release 4.0: DB_ENV-&gt;set_lk_max</a>
+<li><a href="lock_id_free.html">Release 4.0: DB_ENV-&gt;lock_id_free</a>
+<li><a href="java.html">Release 4.0: Java CLASSPATH environment variable</a>
+<li><a href="cxx.html">Release 4.0: C++ ostream objects</a>
+<li><a href="asr.html">Release 4.0: application-specific recovery</a>
+<li><a href="disk.html">Release 4.0: upgrade requirements</a>
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.0/txn.html b/libdb/docs/ref/upgrade.4.0/txn.html
new file mode 100644
index 0000000..536867f
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.0/txn.html
@@ -0,0 +1,47 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: txn_XXX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/mp.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/env.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: txn_XXX</h1>
+<p>The C API for the Berkeley DB Transaction subsystem was reworked in the 4.0
+release as follows:
+<p><table border=1 align=center>
+<tr><th>Historic functional interface</th><th>Berkeley DB 4.X method</th></tr>
+<tr><td>txn_abort</td><td><a href="../../api_c/txn_abort.html">DB_TXN-&gt;abort</a></td></tr>
+<tr><td>txn_begin</td><td><a href="../../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a></td></tr>
+<tr><td>txn_checkpoint</td><td><a href="../../api_c/txn_checkpoint.html">DB_ENV-&gt;txn_checkpoint</a></td></tr>
+<tr><td>txn_commit</td><td><a href="../../api_c/txn_commit.html">DB_TXN-&gt;commit</a></td></tr>
+<tr><td>txn_discard</td><td><a href="../../api_c/txn_discard.html">DB_TXN-&gt;discard</a></td></tr>
+<tr><td>txn_id</td><td><a href="../../api_c/txn_id.html">DB_TXN-&gt;id</a></td></tr>
+<tr><td>txn_prepare</td><td><a href="../../api_c/txn_prepare.html">DB_TXN-&gt;prepare</a></td></tr>
+<tr><td>txn_recover</td><td><a href="../../api_c/txn_recover.html">DB_ENV-&gt;txn_recover</a></td></tr>
+<tr><td>txn_stat</td><td><a href="../../api_c/txn_stat.html">DB_ENV-&gt;txn_stat</a></td></tr>
+</table>
+<p>Applications calling any of these functions should update their calls
+to use the enclosing <a href="../../api_c/env_class.html">DB_ENV</a> or <a href="../../api_c/txn_class.html">DB_TXN</a> handle's method
+(easily done as the first argument to the existing call is the correct
+handle to use).
+<p>As a special case, since applications might potentially have many calls
+to the txn_abort, txn_begin and txn_commit functions, those interfaces
+continue to work unchanged in the Berkeley DB 4.0 release.
+<p>In addition, the <a href="../../api_c/txn_stat.html">DB_ENV-&gt;txn_stat</a> call has been changed in the 4.0
+release to take a flags argument. To leave their historic behavior
+unchanged, applications should add a final argument of 0 to any calls
+made to <a href="../../api_c/txn_stat.html">DB_ENV-&gt;txn_stat</a>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/mp.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/env.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.1/app_dispatch.html b/libdb/docs/ref/upgrade.4.1/app_dispatch.html
new file mode 100644
index 0000000..9f10810
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.1/app_dispatch.html
@@ -0,0 +1,32 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.1: Application-specific logging and recovery</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.1/cxx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.1: Application-specific logging and recovery</h1>
+<p>The application-specific logging and recovery tools and interfaces have
+been reworked in the 4.1 release to make it simpler for applications to
+use Berkeley DB to support their own logging and recovery of non-Berkeley DB
+objects. Specifically, the DB_ENV-&gt;set_recovery_init and
+DB_ENV-&gt;set_tx_recover interfaces have been removed, replaced by
+<a href="../../api_c/env_set_app_dispatch.html">DB_ENV-&gt;set_app_dispatch</a>. Applications using either of the
+removed interfaces should be updated to call
+<a href="../../api_c/env_set_app_dispatch.html">DB_ENV-&gt;set_app_dispatch</a>. For more information see
+<a href="../../ref/apprec/intro.html">"Application-specific logging and
+recovery"</a> and the <a href="../../api_c/env_set_app_dispatch.html">DB_ENV-&gt;set_app_dispatch</a> documentation.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.1/cxx.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.1/checkpoint.html b/libdb/docs/ref/upgrade.4.1/checkpoint.html
new file mode 100644
index 0000000..d0a9a26
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.1/checkpoint.html
@@ -0,0 +1,30 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.1: DB_CHECKPOINT, DB_CURLSN</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.1/log_stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/incomplete.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.1: DB_CHECKPOINT, DB_CURLSN</h1>
+<p>The DB_CHECKPOINT flag has been removed from the <a href="../../api_c/logc_get.html">DB_LOGC-&gt;get</a> and
+<a href="../../api_c/log_put.html">DB_ENV-&gt;log_put</a> methods. It is very unlikely application programs used this
+flag. If your application used this flag, please contact Sleepycat
+Software support for help in upgrading.
+<p>The DB_CURLSN flag has been removed from the <a href="../../api_c/log_put.html">DB_ENV-&gt;log_put</a> method. It is
+very unlikely application programs used this flag. If your application
+used this flag, please contact Sleepycat Software support for help in
+upgrading.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.1/log_stat.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/incomplete.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.1/cxx.html b/libdb/docs/ref/upgrade.4.1/cxx.html
new file mode 100644
index 0000000..a90735a
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.1/cxx.html
@@ -0,0 +1,48 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.1: C++ exceptions</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.1/java.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/app_dispatch.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.1: C++ exceptions</h1>
+<p>With default flags, the C++ <a href="../../api_cxx/env_class.html">DbEnv</a> and <a href="../../api_cxx/db_class.html">Db</a> classes
+can throw exceptions from their constructors. For example, this can
+happen if invalid parameters are passed in or the underlying C
+structures could not be created. If the objects are created in an
+environment that is not configured for exceptions (that is, the
+<a href="../../api_c/env_class.html#DB_CXX_NO_EXCEPTIONS">DB_CXX_NO_EXCEPTIONS</a> flag is specified), errors from the
+constructor will be returned when the handle's open method is called.
+<p>In addition, the behavior of the <a href="../../api_cxx/env_class.html">DbEnv</a> and <a href="../../api_cxx/db_class.html">Db</a>
+destructors has changed to simplify exception handling in applications.
+The destructors will now close the handle if the handle's close method
+was not called prior to the object being destroyed. The return value
+of the call is discarded, and no exceptions will be thrown.
+Applications should call the close method in normal situations so any
+errors while closing can be handled by the application.
+<p>This change allows applications to be structured as follows:
+<p><blockquote><pre>try {
+ DbEnv env(0);
+ env.open(/* ... */);
+ Db db(&env, 0);
+ db.open(/* ... */);
+ /* ... */
+ db.close(0);
+ env.close(0);
+} catch (DbException &dbe) {
+ // Handle the exception, the handles have already been closed.
+}</pre></blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.1/java.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/app_dispatch.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.1/disk.html b/libdb/docs/ref/upgrade.4.1/disk.html
new file mode 100644
index 0000000..e216f13
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.1/disk.html
@@ -0,0 +1,31 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.1: upgrade requirements</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.1/app_dispatch.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/test/run.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.1: upgrade requirements</h1>
+<p>All of the access method database formats changed in the Berkeley DB 4.1
+release (Btree/Recno: version 8 to version 9, Hash: version 7 to version
+8, and Queue: version 3 to version 4). <b>The format changes are
+entirely backward-compatible, and no database upgrades are needed.</b>
+Note, however, that databases created using the 4.1 release may not be
+usable with earlier Berkeley DB releases.
+<p>The log file format changed in the Berkeley DB 4.1 release.
+<p>For further information on upgrading Berkeley DB installations, see
+<a href="../../ref/upgrade/process.html">Upgrading Berkeley DB installations</a>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.1/app_dispatch.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/test/run.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.1/excl.html b/libdb/docs/ref/upgrade.4.1/excl.html
new file mode 100644
index 0000000..302a926
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.1/excl.html
@@ -0,0 +1,26 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.1: DB_EXCL</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.1/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/fop.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.1: DB_EXCL</h1>
+<p>The <a href="../../api_c/db_open.html#DB_EXCL">DB_EXCL</a> flag to the <a href="../../api_c/db_open.html">DB-&gt;open</a> method now works for
+subdatabases as well as physical files, and it is now possible to use
+the <a href="../../api_c/db_open.html#DB_EXCL">DB_EXCL</a> flag to check for the previous existence of
+subdatabases.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.1/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/fop.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.1/fop.html b/libdb/docs/ref/upgrade.4.1/fop.html
new file mode 100644
index 0000000..ed7d566
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.1/fop.html
@@ -0,0 +1,128 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.1: DB-&gt;associate, DB-&gt;open, DB-&gt;remove, DB-&gt;rename</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.1/excl.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/log_register.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.1: DB-&gt;associate, DB-&gt;open, DB-&gt;remove, DB-&gt;rename</h1>
+<p>Historic releases of Berkeley DB transaction-protected the <a href="../../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../../api_c/db_remove.html">DB-&gt;remove</a> and <a href="../../api_c/db_rename.html">DB-&gt;rename</a> methods, but did it in an implicit
+way, that is, applications did not specify the <a href="../../api_c/txn_class.html">DB_TXN</a> handles
+associated with the operations. This approach had a number of problems,
+the most significant of which was there was no way to group operations
+that included database creation, removal or rename. For example,
+applications wanting to maintain a list of the databases in an
+environment in a well-known database had no way to update the well-known
+database and create a database within a single transaction, and so there
+was no way to guarantee the list of databases was correct for the
+environment after system or application failure. Another example might
+be the creation of both a primary database and a database intended to
+serve as a secondary index, where again there was no way to group the
+creation of both databases in a single atomic operation.
+<p>In the 4.1 release of Berkeley DB, this is no longer the case. The
+<a href="../../api_c/db_open.html">DB-&gt;open</a> and <a href="../../api_c/db_associate.html">DB-&gt;associate</a> methods now take a <a href="../../api_c/txn_class.html">DB_TXN</a>
+handle returned by <a href="../../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a> as an optional argument. New
+<a href="../../api_c/env_dbremove.html">DB_ENV-&gt;dbremove</a> and <a href="../../api_c/env_dbrename.html">DB_ENV-&gt;dbrename</a> methods taking a
+<a href="../../api_c/txn_class.html">DB_TXN</a> handle as an optional argument have been added.
+<p>To upgrade, applications must add a <a href="../../api_c/txn_class.html">DB_TXN</a> parameter in the
+appropriate location for the <a href="../../api_c/db_open.html">DB-&gt;open</a> method calls, and the
+<a href="../../api_c/db_associate.html">DB-&gt;associate</a> method calls (in both cases, the second argument for
+the C API, the first for the C++ or Java APIs).
+<p>Applications wanting to transaction-protect their <a href="../../api_c/db_open.html">DB-&gt;open</a> and
+<a href="../../api_c/db_associate.html">DB-&gt;associate</a> method calls can add a NULL <a href="../../api_c/txn_class.html">DB_TXN</a>
+argument and specify the <a href="../../api_c/env_set_flags.html#DB_AUTO_COMMIT">DB_AUTO_COMMIT</a> flag to the two calls,
+which wraps the operation in an internal Berkeley DB transaction.
+Applications wanting to transaction-protect the remove and rename
+operations must rewrite their calls to the <a href="../../api_c/db_remove.html">DB-&gt;remove</a> and
+<a href="../../api_c/db_rename.html">DB-&gt;rename</a> methods to be, instead, calls to the new
+<a href="../../api_c/env_dbremove.html">DB_ENV-&gt;dbremove</a> and <a href="../../api_c/env_dbrename.html">DB_ENV-&gt;dbrename</a> methods. Applications not
+wanting to transaction-protect any of the operations can add a NULL
+argument to their <a href="../../api_c/db_open.html">DB-&gt;open</a> and <a href="../../api_c/db_associate.html">DB-&gt;associate</a> method calls and
+require no further changes.
+<p>For example, an application currently opening and closing a database as
+follows:
+<p><blockquote><pre>DB *dbp;
+DB_ENV *dbenv;
+int ret;
+<p>
+if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ goto err_handler;
+<p>
+if ((ret = dbp-&gt;open(dbp, "file", NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ (void)dbp-&gt;close(dbp);
+ goto err_handler;
+}</pre></blockquote>
+<p>could transaction-protect the <a href="../../api_c/db_open.html">DB-&gt;open</a> call as follows:
+<p><blockquote><pre>DB *dbp;
+DB_ENV *dbenv;
+int ret;
+<p>
+if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ goto err_handler;
+<p>
+if ((ret = dbp-&gt;open(dbp,
+ NULL, "file", NULL, DB_BTREE, DB_CREATE | DB_AUTO_COMMIT, 0664)) != 0) {
+ (void)dbp-&gt;close(dbp);
+ goto err_handler;
+}</pre></blockquote>
+<p>An application currently removing a database as follows:
+<p><blockquote><pre>DB *dbp;
+DB_ENV *dbenv;
+int ret;
+<p>
+if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ goto err_handler;
+<p>
+if ((ret = dbp-&gt;remove(dbp, "file", NULL, 0)) != 0)
+ goto err_handler;</pre></blockquote>
+<p>could transaction-protect the database removal as follows:
+<p><blockquote><pre>DB *dbp;
+DB_ENV *dbenv;
+int ret;
+<p>
+if ((ret =
+ dbenv-&gt;dbremove(dbenv, NULL, "file", NULL, DB_AUTO_COMMIT)) != 0)
+ goto err_handler;</pre></blockquote>
+<p>An application currently renaming a database as follows:
+<p><blockquote><pre>DB *dbp;
+DB_ENV *dbenv;
+int ret;
+<p>
+if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ goto err_handler;
+<p>
+if ((ret = dbp-&gt;rename(dbp, "file", NULL, "newname", 0)) != 0)
+ goto err_handler;</pre></blockquote>
+<p>could transaction-protect the database renaming as follows:
+<p><blockquote><pre>DB *dbp;
+DB_ENV *dbenv;
+int ret;
+<p>
+if ((ret = dbenv-&gt;dbrename(
+ dbenv, NULL, "file", NULL, "newname", DB_AUTO_COMMIT)) != 0)
+ goto err_handler;</pre></blockquote>
+<p>These examples are the simplest possible translation, and will result in
+behavior matching that of previous releases. For further discussion on
+how to transaction-protect <a href="../../api_c/db_open.html">DB-&gt;open</a> method calls, see
+<a href="../../ref/transapp/data_open.html">Opening the databases</a>.
+<p><a href="../../api_c/db_class.html">DB</a> handles that will later be used for transaction-protected
+operations must be opened within a transaction. Specifying a
+transaction handle to operations using handles not opened within a
+transaction will return an error. Similarly, not specifying a
+transaction handle to operations using handles that were opened within
+a transaction will also return an error.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.1/excl.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/log_register.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.1/hash_nelem.html b/libdb/docs/ref/upgrade.4.1/hash_nelem.html
new file mode 100644
index 0000000..ea14711
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.1/hash_nelem.html
@@ -0,0 +1,25 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.1: DB-&gt;stat.hash_nelem</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.1/memp_sync.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/java.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.1: DB-&gt;stat.hash_nelem</h1>
+<p>The <b>hash_nelem</b> field of the <a href="../../api_c/db_stat.html">DB-&gt;stat</a> method for Hash
+databases has been removed from the 4.1 release, this information is no
+longer available to applications.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.1/memp_sync.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/java.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.1/incomplete.html b/libdb/docs/ref/upgrade.4.1/incomplete.html
new file mode 100644
index 0000000..07520e3
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.1/incomplete.html
@@ -0,0 +1,30 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.1: DB_INCOMPLETE</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.1/checkpoint.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/memp_sync.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.1: DB_INCOMPLETE</h1>
+<p>The DB_INCOMPLETE error has been removed from the 4.1 release, and is
+no longer returned by the Berkeley DB library. Applications no longer need
+to check for this error return, as the underlying Berkeley DB interfaces that
+could historically fail to checkpoint or flush the cache and return this
+error can no longer fail for that reason. Applications should remove
+all uses of DB_INCOMPLETE.
+<p>Additionally, the <a href="../../api_java/txn_checkpoint.html">DbEnv.txn_checkpoint</a> and <a href="../../api_java/db_sync.html">Db.sync</a>
+methods have been changed from returning int to returning void.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.1/checkpoint.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/memp_sync.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.1/intro.html b/libdb/docs/ref/upgrade.4.1/intro.html
new file mode 100644
index 0000000..f9d9f31
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.1/intro.html
@@ -0,0 +1,27 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.1: introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/excl.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.1: introduction</h1>
+<p>The following pages describe how to upgrade applications coded against
+the Berkeley DB 4.0 release interfaces to the Berkeley DB 4.1 release interfaces.
+This information does not describe how to upgrade Berkeley DB 1.85 release
+applications.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/excl.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.1/java.html b/libdb/docs/ref/upgrade.4.1/java.html
new file mode 100644
index 0000000..6a6647a
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.1/java.html
@@ -0,0 +1,29 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.1: Java exceptions</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.1/hash_nelem.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/cxx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.1: Java exceptions</h1>
+<p>The Java <a href="../../api_java/env_class.html">DbEnv</a> constructor is now marked with "throws
+DbException". This means applications must construct <a href="../../api_java/env_class.html">DbEnv</a>
+objects in a context where <a href="../../api_java/except_class.html">DbException</a> throwables are
+handled (either in a try/catch block or in a method that propagates the
+exception up the stack). Note that previous versions of the Berkeley DB Java
+API could throw this exception from the constructor but it was not
+marked.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.1/hash_nelem.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/cxx.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.1/log_register.html b/libdb/docs/ref/upgrade.4.1/log_register.html
new file mode 100644
index 0000000..81137d8
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.1/log_register.html
@@ -0,0 +1,27 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.1: DB_ENV-&gt;log_register</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.1/fop.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/log_stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.1: DB_ENV-&gt;log_register</h1>
+<p>The DB_ENV-&gt;log_register and DB_ENV-&gt;log_unregister interfaces
+were removed from the Berkeley DB 4.1 release. It is very unlikely
+application programs used these interfaces. If your application used
+these interfaces, please contact Sleepycat Software support for help in
+upgrading.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.1/fop.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/log_stat.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.1/log_stat.html b/libdb/docs/ref/upgrade.4.1/log_stat.html
new file mode 100644
index 0000000..7b7057e
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.1/log_stat.html
@@ -0,0 +1,26 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.1: st_flushcommit</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.1/log_register.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/checkpoint.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.1: st_flushcommit</h1>
+<p>The DB_ENV-&gt;log_stat "st_flushcommits" statistic has been removed
+from Berkeley DB, as it is now the same as the "st_scount" statistic. Any
+application using the "st_flushcommits" statistic should remove it, or
+replace it with the "st_count" statistic.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.1/log_register.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/checkpoint.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.1/memp_sync.html b/libdb/docs/ref/upgrade.4.1/memp_sync.html
new file mode 100644
index 0000000..40b0cc5
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.1/memp_sync.html
@@ -0,0 +1,31 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.1: DB_ENV-&gt;memp_sync</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.1/incomplete.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/hash_nelem.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.1: DB_ENV-&gt;memp_sync</h1>
+<p>Historical documentation for the <a href="../../api_c/memp_sync.html">DB_ENV-&gt;memp_sync</a> interface stated:
+<p><blockquote><pre>In addition, if <a href="../../api_c/memp_sync.html">DB_ENV-&gt;memp_sync</a> returns success, the value of
+<b>lsn</b> will be overwritten with the largest log sequence number
+from any page that was written by <a href="../../api_c/memp_sync.html">DB_ENV-&gt;memp_sync</a> to satisfy this
+request.</pre></blockquote>
+<p>This functionality was never correctly implemented, and has been removed
+in the Berkeley DB 4.1 release. It is very unlikely application programs used
+this information. If your application used this information, please
+contact Sleepycat Software support for help in upgrading.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.1/incomplete.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.1/hash_nelem.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade.4.1/toc.html b/libdb/docs/ref/upgrade.4.1/toc.html
new file mode 100644
index 0000000..de2a0d5
--- /dev/null
+++ b/libdb/docs/ref/upgrade.4.1/toc.html
@@ -0,0 +1,37 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Upgrading Berkeley DB 4.0.X applications to Berkeley DB 4.1</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Upgrading Berkeley DB 4.0.X applications to Berkeley DB 4.1</h1>
+<p><ol>
+<p><li><a href="intro.html">Release 4.1: introduction</a>
+<li><a href="excl.html">Release 4.1: DB_EXCL</a>
+<li><a href="fop.html">Release 4.1: DB-&gt;associate, DB-&gt;open, DB-&gt;remove, DB-&gt;rename</a>
+<li><a href="log_register.html">Release 4.1: DB_ENV-&gt;log_register</a>
+<li><a href="log_stat.html">Release 4.1: st_flushcommit</a>
+<li><a href="checkpoint.html">Release 4.1: DB_CHECKPOINT, DB_CURLSN</a>
+<li><a href="incomplete.html">Release 4.1: DB_INCOMPLETE</a>
+<li><a href="memp_sync.html">Release 4.1: DB_ENV-&gt;memp_sync</a>
+<li><a href="hash_nelem.html">Release 4.1: DB-&gt;stat.hash_nelem</a>
+<li><a href="java.html">Release 4.1: Java exceptions</a>
+<li><a href="cxx.html">Release 4.1: C++ exceptions</a>
+<li><a href="app_dispatch.html">Release 4.1: Application-specific logging and recovery</a>
+<li><a href="disk.html">Release 4.1: upgrade requirements</a>
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade/process.html b/libdb/docs/ref/upgrade/process.html
new file mode 100644
index 0000000..a4d20cf
--- /dev/null
+++ b/libdb/docs/ref/upgrade/process.html
@@ -0,0 +1,131 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Upgrading Berkeley DB installations</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade/version.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.2.0/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Upgrading Berkeley DB installations</h1>
+<p>The following information describes the general process of upgrading
+Berkeley DB installations. There are four areas to be considered when
+upgrading Berkeley DB applications and database environments: the application
+API, the database environment's region files, the underlying database
+formats, and, in the case of transactional database environments, the
+log files. The upgrade procedures required depend on whether or not
+the release is a major or minor release (in which either the major or
+minor number of the version changed), or a patch release (in which only
+the patch number in the version changed). Berkeley DB major and minor
+releases may optionally include changes in all four areas, that is, the
+application API, region files, database formats, and log files may not
+be backward-compatible with previous releases.
+<p>Each Berkeley DB major or minor release has information in this chapter of
+the Reference Guide, describing how to upgrade to the new release. The
+section describes any API changes made in the release. Application
+maintainers should review the API changes and update their applications
+as necessary before recompiling with the new release. In addition, each
+section includes a page specifying whether the log file format or
+database formats changed in non-backward-compatible ways as part of the
+release. Because there are several underlying Berkeley DB database formats,
+and they do not all necessarily change in the same release, changes to
+a database format in a release may not affect any particular
+application. Further, database and log file formats may have changed
+but be entirely backward-compatible, in which case no upgrade will be
+necessary.
+<p>A Berkeley DB patch release will never modify the API, regions, log files, or
+database formats in incompatible ways, and so applications need only be
+relinked (or, in the case of a shared library, pointed at the new
+version of the shared library) to upgrade to a new release. Note that
+internal Berkeley DB interfaces may change at any time and in any release
+(including patch releases) without warning. This means the library must
+be entirely recompiled and reinstalled when upgrading to new releases
+of the library because there is no guarantee that modules from one
+version of the library will interact correctly with modules from another
+release.
+<p>If the release is a patch release, do the following:
+<p><ol>
+<p><li>Shut down the old version of the application.
+<li>Install the new version of the application by relinking or installing
+a new version of the Berkeley DB shared library.
+<li>Restart the application.
+</ol>
+<p>Otherwise, if the application <b>does not</b> have a Berkeley DB
+transactional environment, the application may be installed in the field
+using the following steps:
+<p><ol>
+<p><li>Shut down the old version of the application.
+<li>Remove any Berkeley DB environment using the <a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a> method or an
+appropriate system utility.
+<li>Recompile and install the new version of the application.
+<li>If necessary, upgrade the application's databases. See
+<a href="../../ref/am/upgrade.html">Upgrading databases</a> for more
+information.
+<li>Restart the application.
+</ol>
+<p>Otherwise, if the application has a Berkeley DB transactional environment,
+but neither the log file nor database formats need upgrading, the
+application may be installed in the field using the following steps:
+<p><ol>
+<p><li>Shut down the old version of the application.
+<li>Run recovery on the database environment using the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> method
+or the <a href="../../utility/db_recover.html">db_recover</a> utility.
+<li>Remove any Berkeley DB environment using the <a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a> method or an
+appropriate system utility.
+<li>Recompile and install the new version of the application.
+<li>Restart the application.
+</ol>
+<p>If the application has a Berkeley DB transactional environment, and the log
+files needs upgrading but the databases do not, the application may be
+installed in the field using the following steps:
+<p><ol>
+<p><li>Shut down the old version of the application.
+<li>Run recovery on the database environment using the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> method
+or the <a href="../../utility/db_recover.html">db_recover</a> utility.
+<li>Remove any Berkeley DB environment using the <a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a> method or an
+appropriate system utility.
+<li>Archive the database environment for catastrophic recovery. See
+<a href="../../ref/transapp/archival.html">Archival procedures</a> for more
+information.
+<li>Recompile and install the new version of the application.
+<li>Restart the application.
+</ol>
+<p>Otherwise, if the application has a Berkeley DB transactional environment and
+the databases need upgrading, the application may be installed in the
+field using the following steps:
+<p><ol>
+<p><li>Shut down the old version of the application.
+<li>Run recovery on the database environment using the <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> method
+or the <a href="../../utility/db_recover.html">db_recover</a> utility.
+<li>Remove any Berkeley DB environment using the <a href="../../api_c/env_remove.html">DB_ENV-&gt;remove</a> method or an
+appropriate system utility.
+<li>Archive the database environment for catastrophic recovery. See
+<a href="../../ref/transapp/archival.html">Archival procedures</a> for more
+information.
+<li>Recompile and install the new version of the application.
+<li>Upgrade the application's databases. See
+<a href="../../ref/am/upgrade.html">Upgrading databases</a> for more
+information.
+<li>Archive the database for catastrophic recovery again (using different
+media than before, of course). Note: This archival is not strictly
+necessary. However, if you have to perform catastrophic recovery after
+restarting the application, that recovery must be done based on the
+last archive you have made. If you make this second archive, you can
+use it as the basis of that catastrophic recovery. If you do not make
+this second archive, you have to use the archive you made in step 4 as
+the basis of your recovery, and you have to do a full upgrade on it
+before you can apply log files created after the upgrade to it.
+<li>Restart the application.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade/version.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.2.0/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/upgrade/version.html b/libdb/docs/ref/upgrade/version.html
new file mode 100644
index 0000000..26a64ae
--- /dev/null
+++ b/libdb/docs/ref/upgrade/version.html
@@ -0,0 +1,47 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Library version information</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/build_vxworks/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade/process.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Library version information</h1>
+<p>Each release of the Berkeley DB library has a major version number, a minor
+version number, and a patch number.
+<p>The major version number changes only when major portions of the Berkeley DB
+functionality have been changed. In this case, it may be necessary to
+significantly modify applications in order to upgrade them to use the new
+version of the library.
+<p>The minor version number changes when Berkeley DB interfaces have changed,
+and the new release is not entirely backward-compatible with previous
+releases. To upgrade applications to the new version, they must be
+recompiled and potentially, minor modifications made (for example, the
+order of arguments to a function might have changed).
+<p>The patch number changes on each release. If only the patch number has
+changed in a release, applications do not need to be recompiled, and
+they can be upgraded to the new version by installing the new version
+of a shared library or by relinking the application to the new version
+of a static library.
+<p>Internal Berkeley DB interfaces may change at any time and during any release,
+without warning. This means that the library must be entirely recompiled
+and reinstalled when upgrading to new releases of the library because
+there is no guarantee that modules from the current version of the
+library will interact correctly with modules from a previous release.
+<p>To retrieve the Berkeley DB version information, applications should use the
+<a href="../../api_c/env_version.html">db_version</a> interface. In addition to the previous
+information, the <a href="../../api_c/env_version.html">db_version</a> interface returns a string
+encapsulating the version information, suitable for display to a user.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_vxworks/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade/process.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/xa/build.html b/libdb/docs/ref/xa/build.html
new file mode 100644
index 0000000..6c90cd7
--- /dev/null
+++ b/libdb/docs/ref/xa/build.html
@@ -0,0 +1,178 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Building a Global Transaction Manager</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Distributed Transactions</dl></h3></td>
+<td align=right><a href="../../ref/xa/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/xa/xa_intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Building a Global Transaction Manager</h1>
+<p>Managing distributed transactions and using the two-phase commit
+protocol of Berkeley DB from an application requires the application provide
+the functionality of a global transaction manager (GTM). The GTM is
+responsible for the following:
+<p><ul type=disc>
+<li>Communicating with the multiple environments (potentially on separate
+systems).
+<li>Managing the global transaction ID name space.
+<li>Maintaining state information about each distributed transaction.
+<li>Recovering from failures of individual environments.
+<li>Recovering the global transaction state after failure of the global
+transaction manager.
+</ul>
+<h3>Communicating with multiple Berkeley DB environments</h3>
+<p>Two-phase commit is required if an application wants to transaction
+protect Berkeley DB calls across multiple environments. If the environments
+reside on the same machine, the application can communicate with each
+environment through its own address space with no additional complexity.
+If the environments reside on separate machines, the application can
+either use the Berkeley DB RPC server to manage the remote environments or it
+may use its own messaging capability, translating messages on the remote
+machine into calls into the Berkeley DB library (including the recovery
+calls). For some applications, it might be sufficient to use Tcl's
+remote invocation to remote copies of the tclsh utility into which the
+Berkeley DB library has been dynamically loaded.
+<h3>Managing the Global Transaction ID (GID) name space</h3>
+<p>A global transaction is a transaction that spans multiple environments.
+Each global transaction must have a unique transaction ID. This unique
+ID is the global transaction ID (GID). In Berkeley DB, global transaction
+IDs must be represented with the confines of a <a href="../../api_c/txn_prepare.html#DB_XIDDATASIZE">DB_XIDDATASIZE</a>
+size (currently 128 bytes) array. It is the responsibility of the
+global transaction manager to assign GIDs, guarantee their uniqueness,
+and manage the mapping of local transactions to GID. That is, for each
+GID, the GTM should know which local transactions managers participated.
+The Berkeley DB logging system or a Berkeley DB table could be used to record this
+information.
+<h3>Maintaining state for each distributed transaction.</h3>
+<p>In addition to knowing which local environments participate in each
+global transaction, the GTM must also know the state of each active
+global transaction. As soon as a transaction becomes distributed (that
+is, a second environment participates), the GTM must record the
+existence of the global transaction and all participants (whether this
+must reside on stable storage or not depends on the exact configuration
+of the system). As new environments participate, the GTM must keep this
+information up to date.
+<p>When the GTM is ready to begin commit processing, it should issue
+<a href="../../api_c/txn_prepare.html">DB_TXN-&gt;prepare</a> calls to each participating environment, indicating
+the GID of the global transaction. Once all the participants have
+successfully prepared, then the GTM must record that the global
+transaction will be committed. This record should go to stable
+storage. Once written to stable storage, the GTM can send
+<a href="../../api_c/txn_commit.html">DB_TXN-&gt;commit</a> requests to each participating environment. Once
+all environments have successfully completed the commit, the GTM can
+either record the successful commit or can somehow "forget" the global
+transaction.
+<p>If nested transactions are used (that is, the <b>parent</b> parameter
+is specified to <a href="../../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a>), no <a href="../../api_c/txn_prepare.html">DB_TXN-&gt;prepare</a> call should
+be made on behalf of any child transaction. Only the ultimate parent
+should even issue a <a href="../../api_c/txn_prepare.html">DB_TXN-&gt;prepare</a>.
+<p>Should any participant fail to prepare, then the GTM must abort the
+global transaction. The fact that the transaction is going to be
+aborted should be written to stable storage. Once written, the GTM can
+then issue <a href="../../api_c/txn_abort.html">DB_TXN-&gt;abort</a> requests to each environment. When all
+aborts have returned successfully, the GTM can either record the
+successful abort or "forget" the global transaction.
+<p>In summary, for each transaction, the GTM must maintain the following:
+<p><ul type=disc>
+<li>A list of participating environments
+<li>The current state of each transaction (pre-prepare, preparing,
+committing, aborting, done)
+</ul>
+<h3>Recovering from the failure of a single environment</h3>
+<p>If a single environment fails, there is no need to bring down or recover
+other environments (the only exception to this is if all environments
+are managed in the same application address space and there is a risk
+that the failure of the environment corrupted other environments).
+Instead, once the failing environment comes back up, it should be
+recovered (that is, conventional recovery, via <a href="../../utility/db_recover.html">db_recover</a> or by
+specifying the <a href="../../api_c/env_open.html#DB_RECOVER">DB_RECOVER</a> flag to <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> should be
+run).
+If the <a href="../../utility/db_recover.html">db_recover</a> utility is used, then the -e option must be
+specified. In this case, the application will almost certainly want
+to specify environmental parameters via a DB_CONFIG file in the
+environment's home directory, so that <a href="../../utility/db_recover.html">db_recover</a> can create
+an appropriately configured environment.
+If the <a href="../../utility/db_recover.html">db_recover</a> utility is not used, then <a href="../../api_c/env_open.html#DB_PRIVATE">DB_PRIVATE</a>
+should not be specified, unless all processing including recovery, calls
+to <a href="../../api_c/txn_recover.html">DB_ENV-&gt;txn_recover</a>, and calls to finish prepared, but not yet
+complete transactions take place using the same database environment
+handle.
+The GTM should then issue a <a href="../../api_c/txn_recover.html">DB_ENV-&gt;txn_recover</a> call to the environment.
+This call will return a list of prepared, but not yet committed or
+aborted transactions. For each transaction, the GTM should look up the
+GID in its local store to determine if the transaction should commit or
+abort.
+If the GTM is running in a system with multiple GTMs, it is possible that
+some of the transactions returned via <a href="../../api_c/txn_recover.html">DB_ENV-&gt;txn_recover</a> do not belong
+to the current environment. The GTM should detect this and call
+<a href="../../api_c/txn_discard.html">DB_TXN-&gt;discard</a> on each such transaction handle.
+Furthermore, it is important to note that the environment does not
+retain information about which GTM has issued <a href="../../api_c/txn_recover.html">DB_ENV-&gt;txn_recover</a>
+operations. Therefore, each GTM should issue all its <a href="../../api_c/txn_recover.html">DB_ENV-&gt;txn_recover</a>
+calls, before another GTM issues its calls. If the calls are interleaved,
+each GTM may not get a complete and consistent set of transactions.
+The simplest way to enforce this is for each GTM to make sure it can
+receive all its outstanding transactions in a single <a href="../../api_c/txn_recover.html">DB_ENV-&gt;txn_recover</a>
+call. The maximum number of possible outstanding transactions is bounded
+by the maximum number of active transactions in the environment. This
+number can be obtained by using the <a href="../../api_c/txn_stat.html">DB_ENV-&gt;txn_stat</a> interface or the
+<a href="../../utility/db_stat.html">db_stat</a> utility.
+<p>The newly recovered environment will forbid any new transactions from
+being started until the prepared but not yet committed/aborted
+transactions have been resolved. In the multiple GTM case, this means
+that all GTMs must recover before any GTM can begin issuing new transactions.
+<p>Because Berkeley DB flushed both commit and abort records to disk for
+two-phase transaction, once the global transaction has either committed
+or aborted, no action will be necessary in any environment. If local
+environments are running with the <a href="../../api_c/env_set_flags.html#DB_TXN_WRITE_NOSYNC">DB_TXN_WRITE_NOSYNC</a> or
+<a href="../../api_c/env_set_flags.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> options (that is, is not writing and/or flushing
+the log synchronously at commit time), then it is possible that a commit
+or abort operation may not have been written in the environment. In
+this case, the GTM must always have a record of completed transactions
+to determine if prepared transactions should be committed or aborted.
+<h3>Recovering from GTM failure</h3>
+<p>If the GTM fails, it must first recover its local state. Assuming the
+GTM uses Berkeley DB tables to maintain state, it should run
+<a href="../../utility/db_recover.html">db_recover</a> (or the <a href="../../api_c/env_open.html#DB_RECOVER">DB_RECOVER</a> option to
+<a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>) upon startup. Once the GTM is back up and running,
+it needs to review all its outstanding global transactions, that is all
+transaction which are recorded, but not yet committed or aborted.
+<p>Any global transactions which have not yet reached the prepare phase
+should be aborted. If these transactions were on remote systems, the
+remote systems should eventually time them out and abort them. If these
+transactions are on the local system, we assume they crashed and were
+aborted as part of GTM startup.
+<p>The GTM must then identify all environments which need to have their
+<a href="../../api_c/txn_recover.html">DB_ENV-&gt;txn_recover</a> interface called. This includes all environments
+that participate in any transaction that is in the preparing, aborting,
+or committing state. For each environment, the GTM should issue a
+<a href="../../api_c/txn_recover.html">DB_ENV-&gt;txn_recover</a> call. Once each environment has responded, the GTM
+can determine the fate of each transaction. The correct behavior is
+defined depending on the state of the global transaction according to
+the table below.
+<p><dl compact>
+<p><dt>preparing<dd>if all participating environments return the transaction in the prepared
+but not yet committed/aborted state, then the GTM should commit the
+transaction. If any participating environment fails to return it, then
+the GTM should issue an abort to all environments that did return it.
+<p><dt>committing<dd>the GTM should send a commit to any environment that returned this
+transaction in its list of prepared but not yet committed/aborted
+transactions.
+<p><dt>aborting<dd>the GTM should send an abort to any environment that returned this
+transaction in its list of prepared but not yet committed/aborted
+transactions.
+</dl>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/xa/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/xa/xa_intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/xa/faq.html b/libdb/docs/ref/xa/faq.html
new file mode 100644
index 0000000..d383cfc
--- /dev/null
+++ b/libdb/docs/ref/xa/faq.html
@@ -0,0 +1,66 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: XA FAQ</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Distributed Transactions</dl></h3></td>
+<td align=right><a href="../../ref/xa/xa_config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/apprec/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>XA FAQ</h1>
+<p><ol>
+<p><li><b>Is it possible to mix XA and non-XA transactions?</b>
+<p>Yes. It is also possible for XA and non-XA transactions to coexist in
+the same Berkeley DB environment. To do this, specify the same environment
+to the non-XA <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> calls as was specified in the Tuxedo
+configuration file.
+<hr size=1 noshade>
+<p><li><b>Does converting an application to run within XA change any of
+the already existing C/C++ API calls it does?</b>
+<p>When converting an application to run under XA, the application's Berkeley DB
+calls are unchanged, with three exceptions:
+<p><ol>
+<p><li>The application must specify the <a href="../../api_c/db_create.html#DB_XA_CREATE">DB_XA_CREATE</a> flag to
+the <a href="../../api_c/db_create.html">db_create</a> interface.
+<p><li>Unless the application is performing an operation for a non-XA
+transaction, the application should never explicitly call
+<a href="../../api_c/txn_commit.html">DB_TXN-&gt;commit</a>, <a href="../../api_c/txn_abort.html">DB_TXN-&gt;abort</a> or <a href="../../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a>, and those
+calls should be replaced by calls into the Tuxedo transaction manager.
+<p><li>Unless the application is performing an operation for a non-XA
+transaction, the application should specify a transaction argument of NULL
+to Berkeley DB methods taking transaction arguments (for example, <a href="../../api_c/db_put.html">DB-&gt;put</a>
+or <a href="../../api_c/db_cursor.html">DB-&gt;cursor</a>).
+</ol>
+<p>Otherwise, the application should be unchanged.
+<hr size=1 noshade>
+<p><li><b>How does Berkeley DB recovery interact with recovery by the Tuxedo
+transaction manager?</b>
+<p>Recovery is completed in two steps. First, each resource manager should
+recover its environment(s). This can be done via a program that calls
+<a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> or by calling the <a href="../../utility/db_recover.html">db_recover</a> utility. If
+using the <a href="../../utility/db_recover.html">db_recover</a> utility, then the <b>-e</b> option
+should be specified so that the regions that are recovered persist after
+the utility exits. Any transactions that were prepared, but neither
+completed nor aborted, are restored to their prepared state so that they
+may be aborted or committed via the Tuxedo recovery mechanisms. After
+each resource manager has recovered, then Tuxedo recovery may begin.
+Tuxedo will interact with each resource manager via the __db_xa_recover
+function which returns the list of prepared, but not yet completed
+transactions. It should issue a commit or abort for each one, and only
+after having completed each transaction will normal processing resume.
+<p>Finally, standard log file archival and catastrophic recovery procedures
+should occur independently of XA operation.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/xa/xa_config.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/apprec/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/xa/intro.html b/libdb/docs/ref/xa/intro.html
new file mode 100644
index 0000000..49021d0
--- /dev/null
+++ b/libdb/docs/ref/xa/intro.html
@@ -0,0 +1,53 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Distributed Transactions</dl></h3></td>
+<td align=right><a href="../../ref/rep/ex_rq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/xa/build.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Introduction</h1>
+<p>Berkeley DB provides support for distributed transactions using a two-phase
+commit protocol via its <a href="../../api_c/txn_prepare.html">DB_TXN-&gt;prepare</a> and <a href="../../api_c/txn_recover.html">DB_ENV-&gt;txn_recover</a>
+interfaces. The <a href="../../api_c/txn_prepare.html">DB_TXN-&gt;prepare</a> method performs the first phase of a
+two-phase commit, flushing the log to disk, and associating a global
+transaction ID with the underlying Berkeley DB transaction. This global
+transaction ID should be used by the global transaction manager to
+identify the Berkeley DB transaction, and will be returned by the
+<a href="../../api_c/txn_recover.html">DB_ENV-&gt;txn_recover</a> method when it is called during recovery.
+<p>Distributed transactions are necessary whenever an application wants to
+transaction-protect data in multiple Berkeley DB environments, even if those
+environments are on the same machine. However, Berkeley DB does not perform
+distributed deadlock detection, therefore it is the responsibility of
+the application to ensure that accesses in different environments cannot
+deadlock (this can be accomplished through careful ordering of
+operations to the multiple environments), or by using the
+<a href="../../api_c/lock_vec.html#DB_LOCK_NOWAIT">DB_LOCK_NOWAIT</a> option and eventually abort transactions that
+have been waiting "too long" (where "too long" is determined by an
+application-specific timeout).
+<p>In order to use the two-phase commit feature of Berkeley DB, an application
+must either implement its own global transaction manager or use an
+XA-compliant transaction manager (as Berkeley DB can act as an XA-compliant
+resource manager).
+<p>When using distributed transactions, there is no way to perform
+hot backups of multiple environments and guarantee that the backups
+are global-transaction-consistent across these multiple environments.
+If backups are desired, then all write transactions should be suspended;
+that is, active write transactions must be allowed to complete and no
+new write transactions should be begun. Once there are no active write
+transactions, the logs may be copied for backup purposes and the backup
+will be consistent across the multiple environments.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/ex_rq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/xa/build.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/xa/xa_config.html b/libdb/docs/ref/xa/xa_config.html
new file mode 100644
index 0000000..a94d4fc
--- /dev/null
+++ b/libdb/docs/ref/xa/xa_config.html
@@ -0,0 +1,81 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Configuring Berkeley DB with the Tuxedo System</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Distributed Transactions</dl></h3></td>
+<td align=right><a href="../../ref/xa/xa_intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/xa/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Configuring Berkeley DB with the Tuxedo System</h1>
+<p>This information assumes that you have already installed the Berkeley DB
+library.
+<p>First, you must update the resource manager file in Tuxedo. For the
+purposes of this discussion, assume that the Tuxedo home directory is in
+<p><blockquote><pre>/home/tuxedo</pre></blockquote>
+In that case, the resource manager file will be located in
+<p><blockquote><pre>/home/tuxedo/udataobj/RM</pre></blockquote>
+Edit the resource manager file, adding the line
+<p><blockquote><pre>BERKELEY-DB:db_xa_switch:-L${DB_INSTALL}/lib -ldb \
+ -lsocket -ldl -lm</pre></blockquote>
+<p>where ${DB_INSTALLHOME} is the directory into which you installed the Berkeley DB
+library.
+<p><b>Note that the previous load options are for a Sun Microsystems
+Solaris 5.6 Sparc installation of Tuxedo, and may not be correct for
+your system.</b>
+<p>Next, you must build the transaction manager server. To do this, use the
+Tuxedo <b>buildtms</b>(1) utility. The buildtms utility will create
+the Berkeley-DB resource manager in the directory from which it was run.
+The parameters to buildtms should be
+<p><blockquote><pre>buildtms -v -o DBRM -r BERKELEY-DB</pre></blockquote>
+<p>This will create an executable transaction manager server, DBRM, which is
+called by Tuxedo to process begins, commits, and aborts.
+<p>Finally, you must make sure that your TUXCONFIG environment variable
+identifies an ubbconfig file that properly identifies your resource
+managers. In the GROUPS section of the ubb file, you should identify the
+group's LMID and GRPNO, as well as the transaction manager server name
+"TMSNAME=DBRM." You must also specify the OPENINFO parameter, setting it
+equal to the string
+<p><blockquote><pre>rm_name:dir</pre></blockquote>
+<p>where rm_name is the resource name specified in the RM file (that is,
+BERKELEY-DB) and dir is the directory for the Berkeley DB home environment
+(see <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> for a discussion of Berkeley DB environments).
+<p>Because Tuxedo resource manager startup accepts only a single string
+for configuration, any environment customization that might have been
+done via the config parameter to <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> must instead be done
+by placing a <a href="../../ref/env/db_config.html#DB_CONFIG">DB_CONFIG</a> file in the Berkeley DB environment directory.
+See <a href="../../ref/env/naming.html">Berkeley DB File Naming</a> for further
+information.
+<p>Consider the following configuration. We have built a transaction
+manager server, as described previously. We want the Berkeley DB environment
+to be <b>/home/dbhome</b>, our database files to be maintained in
+<b>/home/datafiles</b>, our log files to be maintained in
+<b>/home/log</b>, and we want a duplexed server.
+<p>The GROUPS section of the ubb file might look like the following
+<p><blockquote><pre>group_tm LMID=myname GRPNO=1 TMSNAME=DBRM TMSCOUNT=2 \
+ OPENINFO="BERKELEY-DB:/home/dbhome"</pre></blockquote>
+<p>There would be a <a href="../../ref/env/db_config.html#DB_CONFIG">DB_CONFIG</a> configuration file in the directory
+<b>/home/dbhome</b> that contained the following two lines:
+<p><blockquote><pre>set_data_dir /home/datafiles
+set_log_dir /home/log
+</pre></blockquote>
+<p>Finally, the ubb file must be translated into a binary version using
+Tuxedo's <b>tmloadcf</b>(1) utility, and then the pathname of that
+binary file must be specified as your TUXCONFIG environment variable.
+<p>At this point, your system is properly initialized to use the Berkeley DB
+resource manager.
+<p>See <a href="../../api_c/db_create.html">db_create</a> for further information on accessing data files
+using XA.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/xa/xa_intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/xa/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/ref/xa/xa_intro.html b/libdb/docs/ref/xa/xa_intro.html
new file mode 100644
index 0000000..829abf1
--- /dev/null
+++ b/libdb/docs/ref/xa/xa_intro.html
@@ -0,0 +1,62 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: XA Introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Distributed Transactions</dl></h3></td>
+<td align=right><a href="../../ref/xa/build.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/xa/xa_config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>XA Introduction</h1>
+<p>Berkeley DB can be used as an XA-compliant resource manager. The XA
+implementation is known to work with the Tuxedo(tm) transaction
+manager.
+<p>The XA support is encapsulated in the resource manager switch
+db_xa_switch, which defines the following functions:
+<p><blockquote><pre>__db_xa_close Close the resource manager.
+__db_xa_commit Commit the specified transaction.
+__db_xa_complete Wait for asynchronous operations to
+ complete.
+__db_xa_end Disassociate the application from a
+ transaction.
+__db_xa_forget Forget about a transaction that was heuristically
+ completed. (Berkeley DB does not support heuristic
+ completion.)
+__db_xa_open Open the resource manager.
+__db_xa_prepare Prepare the specified transaction.
+__db_xa_recover Return a list of prepared, but not yet
+ committed transactions.
+__db_xa_rollback Abort the specified transaction.
+__db_xa_start Associate the application with a
+ transaction.
+</pre></blockquote>
+<p>The Berkeley DB resource manager does not support the following optional
+XA features:
+<p><ul type=disc>
+<li>Asynchronous operations
+<li>Transaction migration
+</ul>
+<p>The Tuxedo System is available from <a href="http://www.beasys.com">BEA Systems, Inc.</a>
+<p>For additional information on Tuxedo, see
+<p><blockquote><i>Building Client/Server Applications Using Tuxedo</i>,
+by Hall (John Wiley & Sons, Inc.).</blockquote>
+<p>For additional information on XA Resource Managers, see
+<p><blockquote>X/Open CAE Specification
+<i>Distributed Transaction Processing: The XA Specification</i>,
+X/Open Document Number: XO/CAE/91/300.</blockquote>
+<p>For additional information on The Tuxedo System, see
+<p><blockquote><i>The Tuxedo System</i>,
+by Andrade, Carges, Dwyer and Felts (Addison Wesley Longman).</blockquote>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/xa/build.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/xa/xa_config.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/reftoc.html b/libdb/docs/reftoc.html
new file mode 100644
index 0000000..d871051
--- /dev/null
+++ b/libdb/docs/reftoc.html
@@ -0,0 +1,372 @@
+<!--$Id$-->
+<html>
+<head>
+<title>Berkeley DB Tutorial and Reference Guide (Version: 4.1.25)</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+
+<center>
+<h1><b>Berkeley DB Tutorial and Reference Guide, Version 4.1.25</b></h1>
+</center>
+
+<ol>
+<font size="+1"><li><a name="intro">Introduction</a></font>
+ <ol>
+ <li><a href="ref/intro/data.html">An introduction to data management</a>
+ <li><a href="ref/intro/terrain.html">Mapping the terrain: theory and practice</a>
+ <li><a href="ref/intro/dbis.html">What is Berkeley DB?</a>
+ <li><a href="ref/intro/dbisnot.html">What Berkeley DB is not</a>
+ <li><a href="ref/intro/need.html">Do you need Berkeley DB?</a>
+ <li><a href="ref/intro/what.html">What other services does Berkeley DB provide?</a>
+ <li><a href="ref/intro/distrib.html">What does the Berkeley DB distribution include?</a>
+ <li><a href="ref/intro/where.html">Where does Berkeley DB run?</a>
+ <li><a href="ref/intro/products.html">Sleepycat Software's Berkeley DB products</a>
+ </ol>
+<font size="+1"><li><a name="simple_tut">A Simple Access Method Tutorial</a></font>
+ <ol>
+ <li><a href="ref/simple_tut/intro.html">Introduction</a>
+ <li><a href="ref/simple_tut/keydata.html">Key/data pairs</a>
+ <li><a href="ref/simple_tut/handles.html">Object handles</a>
+ <li><a href="ref/simple_tut/errors.html">Error returns</a>
+ <li><a href="ref/simple_tut/open.html">Opening a database</a>
+ <li><a href="ref/simple_tut/put.html">Adding elements to a database</a>
+ <li><a href="ref/simple_tut/get.html">Retrieving elements from a database</a>
+ <li><a href="ref/simple_tut/del.html">Removing elements from a database</a>
+ <li><a href="ref/simple_tut/close.html">Closing a database</a>
+ </ol>
+<font size="+1"><li><a name="am_conf">Access Method Configuration</a></font>
+ <ol>
+ <li><a href="ref/am_conf/intro.html">What are the available access methods?</a>
+ <li><a href="ref/am_conf/select.html">Selecting an access method</a>
+ <li><a href="ref/am_conf/logrec.html">Logical record numbers</a>
+ <li>General access method configuration
+ <ol>
+ <li><a href="ref/am_conf/pagesize.html">Selecting a page size</a>
+ <li><a href="ref/am_conf/cachesize.html">Selecting a cache size</a>
+ <li><a href="ref/am_conf/byteorder.html">Selecting a byte order</a>
+ <li><a href="ref/am_conf/dup.html">Duplicate data items</a>
+ <li><a href="ref/am_conf/malloc.html">Non-local memory allocation</a>
+ </ol>
+ <li>Btree access method specific configuration
+ <ol>
+ <li><a href="ref/am_conf/bt_compare.html">Btree comparison</a>
+ <li><a href="ref/am_conf/bt_prefix.html">Btree prefix comparison</a>
+ <li><a href="ref/am_conf/bt_minkey.html">Minimum keys per page</a>
+ <li><a href="ref/am_conf/bt_recnum.html">
+ Retrieving Btree records by logical record number</a>
+ </ol>
+ <li>Hash access method specific configuration
+ <ol>
+ <li><a href="ref/am_conf/h_ffactor.html">Page fill factor</a>
+ <li><a href="ref/am_conf/h_hash.html">Specifying a database hash</a>
+ <li><a href="ref/am_conf/h_nelem.html">Hash table size</a>
+ </ol>
+ <li>Queue and Recno access method specific configuration
+ <ol>
+ <li><a href="ref/am_conf/recno.html">Managing record-based databases</a>
+ <li><a href="ref/am_conf/extentsize.html">Selecting a Queue extent size</a>
+ <li><a href="ref/am_conf/re_source.html">Flat-text backing files</a>
+ <li><a href="ref/am_conf/renumber.html">Logically renumbering records</a>
+ </ol>
+ </ol>
+<font size="+1"><li><a name="am">Access Method Operations</a></font>
+ <ol>
+ <li><a href="ref/am/ops.html">Access method operations</a>
+ <li><a href="ref/am/open.html">Database open</a>
+ <li><a href="ref/am/opensub.html">Opening multiple databases in a single file</a>
+ <li><a href="ref/am/get.html">Retrieving records</a>
+ <li><a href="ref/am/put.html">Storing records</a>
+ <li><a href="ref/am/delete.html">Deleting records</a>
+ <li><a href="ref/am/stat.html">Database statistics</a>
+ <li><a href="ref/am/truncate.html">Database truncation</a>
+ <li><a href="ref/am/upgrade.html">Database upgrade</a>
+ <li><a href="ref/am/verify.html">Database verification and salvage</a>
+ <li><a href="ref/am/sync.html">Flushing the database cache</a>
+ <li><a href="ref/am/close.html">Database close</a>
+ <li><a href="ref/am/second.html">Secondary indices</a>
+ <li><a href="ref/am/cursor.html">Cursor operations</a>
+ <ol>
+ <li><a href="ref/am/curget.html">Retrieving records with a cursor</a>
+ <li><a href="ref/am/curput.html">Storing records with a cursor</a>
+ <li><a href="ref/am/curdel.html">Deleting records with a cursor</a>
+ <li><a href="ref/am/curdup.html">Duplicating a cursor</a>
+ <li><a href="ref/am/join.html">Equality join</a>
+ <li><a href="ref/am/count.html">Data item count</a>
+ <li><a href="ref/am/curclose.html">Cursor close</a>
+ </ol>
+ </ol>
+<font size="+1"><li><a name="am_misc">Access Method Wrapup</a></font>
+ <ol>
+ <li><a href="ref/am_misc/align.html">Data alignment</a>
+ <li><a href="ref/am_misc/get_bulk.html">Retrieving records in bulk</a>
+ <li><a href="ref/am_misc/partial.html">Partial record storage and retrieval</a>
+ <li><a href="ref/am_misc/struct.html">Storing C/C++ structures/objects</a>
+ <li><a href="ref/am_misc/perm.html">Retrieved key/data permanence for C/C++</a>
+ <li><a href="ref/am_misc/error.html">Error support</a>
+ <li><a href="ref/am_misc/stability.html">Cursor stability</a>
+ <li><a href="ref/am_misc/dbsizes.html">Database limits</a>
+ <li><a href="ref/am_misc/diskspace.html">Disk space requirements</a>
+ <li><a href="ref/am_misc/tune.html">Access method tuning</a>
+ <li><a href="ref/am_misc/faq.html">Access method FAQ</a>
+ </ol>
+<font size="+1"><li><a name="arch">Berkeley DB Architecture</a></font>
+ <ol>
+ <li><a href="ref/arch/bigpic.html">The big picture</a>
+ <li><a href="ref/arch/progmodel.html">Programming model</a>
+ <li><a href="ref/arch/apis.html">Programmatic APIs</a>
+ <li><a href="ref/arch/script.html">Scripting languages</a>
+ <li><a href="ref/arch/utilities.html">Supporting utilities</a>
+ </ol>
+<font size="+1"><li><a name="env">The Berkeley DB Environment</a></font>
+ <ol>
+ <li><a href="ref/env/intro.html">Introduction</a>
+ <li><a href="ref/env/create.html">Creating a database environment</a>
+ <li><a href="ref/env/open.html">Opening databases within the environment</a>
+ <li><a href="ref/env/error.html">Error support</a>
+ <li><a href="ref/env/db_config.html">DB_CONFIG configuration file</a>
+ <li><a href="ref/env/naming.html">File naming</a>
+ <li><a href="ref/env/region.html">Shared memory regions</a>
+ <li><a href="ref/env/security.html">Security</a>
+ <li><a href="ref/env/encrypt.html">Encryption</a>
+ <li><a href="ref/env/remote.html">Remote filesystems</a>
+ <li><a href="ref/env/faq.html">Environment FAQ</a>
+ </ol>
+<font size="+1"><li><a name="cam">Berkeley DB Concurrent Data Store Applications</a></font>
+ <ol>
+ <li><a href="ref/cam/intro.html">Berkeley DB Concurrent Data Store applications</a>
+ </ol>
+<font size="+1"><li><a name="transapp">Berkeley DB Transactional Data Store Applications</a></font>
+ <ol>
+ <li><a href="ref/transapp/intro.html">Berkeley DB Transactional Data Store applications</a>
+ <li><a href="ref/transapp/why.html">Why transactions?</a>
+ <li><a href="ref/transapp/term.html">Terminology</a>
+ <li><a href="ref/transapp/app.html">Application structure</a>
+ <li><a href="ref/transapp/env_open.html">Opening the environment</a>
+ <li><a href="ref/transapp/data_open.html">Opening the databases</a>
+ <li><a href="ref/transapp/put.html">Recoverability and deadlock handling</a>
+ <li><a href="ref/transapp/atomicity.html">Atomicity</a>
+ <li><a href="ref/transapp/inc.html">Isolation</a>
+ <li><a href="ref/transapp/read.html">Degrees of isolation</a>
+ <li><a href="ref/transapp/cursor.html">Transactional cursors</a>
+ <li><a href="ref/transapp/nested.html">Nested transactions</a>
+ <li><a href="ref/transapp/admin.html">Environment infrastructure</a>
+ <li><a href="ref/transapp/deadlock.html">Deadlock detection</a>
+ <li><a href="ref/transapp/checkpoint.html">Checkpoints</a>
+ <li><a href="ref/transapp/archival.html">Database and log file archival</a>
+ <li><a href="ref/transapp/logfile.html">Log file removal</a>
+ <li><a href="ref/transapp/recovery.html">Recovery procedures</a>
+ <li><a href="ref/transapp/hotfail.html">Hot failover</a>
+ <li><a href="ref/transapp/filesys.html">Recovery and filesystem operations</a>
+ <li><a href="ref/transapp/reclimit.html">Berkeley DB recoverability</a>
+ <li><a href="ref/transapp/tune.html">Transaction tuning</a>
+ <li><a href="ref/transapp/throughput.html">Transaction throughput</a>
+ <li><a href="ref/transapp/faq.html">Transaction FAQ</a>
+ </ol>
+<font size="+1"><li><a name="transapp">Berkeley DB Replication</a></font>
+ <ol>
+ <li><a href="ref/rep/intro.html">Introduction</a>
+ <li><a href="ref/rep/id.html">Replication environment IDs</a>
+ <li><a href="ref/rep/pri.html">Replication environment priorities</a>
+ <li><a href="ref/rep/app.html">Building replicated applications</a>
+ <li><a href="ref/rep/comm.html">Building the communications infrastructure</a>
+ <li><a href="ref/rep/newsite.html">Connecting to a new site</a>
+ <li><a href="ref/rep/init.html">Initializing a new site</a>
+ <li><a href="ref/rep/elect.html">Elections</a>
+ <li><a href="ref/rep/logonly.html">Log file only clients</a>
+ <li><a href="ref/rep/trans.html">Transactional guarantees</a>
+ <li><a href="ref/rep/partition.html">Network partitions</a>
+ <li><a href="ref/rep/faq.html">Replication FAQ</a>
+ <li><a href="ref/rep/ex.html">Ex_repquote: a replication example</a>
+ <li><a href="ref/rep/ex_comm.html">Ex_repquote: a TCP/IP based communication infrastructure</a>
+ <li><a href="ref/rep/ex_rq.html">Ex_repquote: putting it all together</a>
+ </ol>
+<font size="+1"><li><a name="xa">Distributed Transactions</a></font>
+ <ol>
+ <li><a href="ref/xa/intro.html">Introduction</a>
+ <li><a href="ref/xa/build.html">Building a Global Transaction Manager</a>
+ <li><a href="ref/xa/xa_intro.html">XA Introduction</a>
+ <li><a href="ref/xa/xa_config.html">Configuring Berkeley DB with the Tuxedo System</a>
+ <li><a href="ref/xa/faq.html">Frequently Asked Questions</a>
+ </ol>
+<font size="+1"><li><a name="apprec">Application Specific Logging and Recovery</a></font>
+ <ol>
+ <li><a href="ref/apprec/intro.html">Introduction</a>
+ <li><a href="ref/apprec/def.html">Defining application-specific log records</a>
+ <li><a href="ref/apprec/auto.html">Automatically generated functions</a>
+ <li><a href="ref/apprec/config.html">Application configuration</a>
+ </ol>
+<font size="+1"><li><a name="program">Programmer Notes</a></font>
+ <ol>
+ <li><a href="ref/program/appsignals.html">Signal handling</a>
+ <li><a href="ref/program/errorret.html">Error returns to applications</a>
+ <li><a href="ref/program/environ.html">Environmental variables</a>
+ <li><a href="ref/program/mt.html">Multithreaded applications</a>
+ <li><a href="ref/program/scope.html">Berkeley DB handles</a>
+ <li><a href="ref/program/namespace.html">Name spaces</a>
+ <li><a href="ref/program/cache.html">Disk drive caches</a>
+ <li><a href="ref/program/copy.html">Copying databases</a>
+ <li><a href="ref/program/compatible.html">Compatibility with historic UNIX interfaces</a>
+ <li><a href="ref/program/runtime.html">Run-time configuration</a>
+ <li><a href="ref/program/faq.html">Programmer notes FAQ</a>
+ </ol>
+<font size="+1"><li><a name="lock">The Locking Subsystem</a></font>
+ <ol>
+ <li><a href="ref/lock/intro.html">Berkeley DB and locking</a>
+ <li><a href="ref/lock/config.html">Configuring locking</a>
+ <li><a href="ref/lock/max.html">Configuring locking: sizing the system</a>
+ <li><a href="ref/lock/stdmode.html">Standard lock modes</a>
+ <li><a href="ref/lock/dead.html">Deadlock detection</a>
+ <li><a href="ref/lock/timeout.html">Deadlock detection using timers</a>
+ <li><a href="ref/lock/deaddbg.html">Deadlock debugging</a>
+ <li><a href="ref/lock/page.html">Locking granularity</a>
+ <li><a href="ref/lock/notxn.html">Locking without transactions</a>
+ <li><a href="ref/lock/twopl.html">Locking with transactions: two-phase locking</a>
+ <li><a href="ref/lock/cam_conv.html">Berkeley DB Concurrent Data Store locking conventions</a>
+ <li><a href="ref/lock/am_conv.html">Berkeley DB Transactional Data Store locking conventions</a>
+ <li><a href="ref/lock/nondb.html">Locking and non-Berkeley DB applications</a>
+ </ol>
+<font size="+1"><li><a name="log">The Logging Subsystem</a></font>
+ <ol>
+ <li><a href="ref/log/intro.html">Berkeley DB and logging</a>
+ <li><a href="ref/log/config.html">Configuring logging</a>
+ <li><a href="ref/log/limits.html">Log file limits</a>
+ </ol>
+<font size="+1"><li><a name="mp">The Memory Pool Subsystem</a></font>
+ <ol>
+ <li><a href="ref/mp/intro.html">Berkeley DB and the memory pool</a>
+ <li><a href="ref/mp/config.html">Configuring the memory pool</a>
+ </ol>
+<font size="+1"><li><a name="txn">The Transaction Subsystem</a></font>
+ <ol>
+ <li><a href="ref/txn/intro.html">Berkeley DB and transactions</a>
+ <li><a href="ref/txn/config.html">Configuring transactions</a>
+ <li><a href="ref/txn/limits.html">Transaction limits</a>
+ </ol>
+<font size="+1"><li><a name="rpc">RPC Client/Server</a></font>
+ <ol>
+ <li><a href="ref/rpc/intro.html">Introduction</a>
+ <li><a href="ref/rpc/client.html">Client program</a>
+ <li><a href="ref/rpc/server.html">Server program</a>
+ <li><a href="ref/rpc/faq.html">RPC FAQ</a>
+ </ol>
+<font size="+1"><li><a name="java">Java API</a></font>
+ <ol>
+ <li><a href="ref/java/conf.html">Java configuration</a>
+ <li><a href="ref/java/compat.html">Compatibility</a>
+ <li><a href="ref/java/program.html">Java programming notes</a>
+ <li><a href="ref/java/faq.html">Java FAQ</a>
+ </ol>
+<font size="+1"><li><a name="perl">Perl API</a></font>
+ <ol>
+ <li><a href="ref/perl/intro.html">Using Berkeley DB with Perl</a>
+ </ol>
+<font size="+1"><li><a name="tcl">Tcl API</a></font>
+ <ol>
+ <li><a href="ref/tcl/intro.html">Loading Berkeley DB with Tcl</a>
+ <li><a href="ref/tcl/using.html">Using Berkeley DB with Tcl</a>
+ <li><a href="ref/tcl/program.html">Tcl API programming notes</a>
+ <li><a href="ref/tcl/error.html">Tcl error handling</a>
+ <li><a href="ref/tcl/faq.html">Tcl FAQ</a>
+ </ol>
+<font size="+1"><li><a name="sendmail">Sendmail</a></font>
+ <ol>
+ <li><a href="ref/sendmail/intro.html">Using Berkeley DB with Sendmail</a>
+ </ol>
+<font size="+1"><li><a name="dumpload">Dumping and Reloading Databases</a></font>
+ <ol>
+ <li><a href="ref/dumpload/utility.html">The db_dump and db_load utilities</a>
+ <li><a href="ref/dumpload/format.html">Dump output formats</a>
+ <li><a href="ref/dumpload/text.html">Loading text into databases</a>
+ </ol>
+<font size="+1"><li><a name="install">System Installation Notes</a></font>
+ <ol>
+ <li><a href="ref/install/file.html">File utility /etc/magic information</a>
+ <li><a href="ref/install/multiple.html">Building with multiple versions of Berkeley DB</a>
+ <li><a href="ref/install/rpm.html">Building RPM distribution packages</a>
+ </ol>
+<font size="+1"><li><a name="debug">Debugging Applications</a></font>
+ <ol>
+ <li><a href="ref/debug/intro.html">Introduction</a>
+ <li><a href="ref/debug/compile.html">Compile-time configuration</a>
+ <li><a href="ref/debug/runtime.html">Run-time error information</a>
+ <li><a href="ref/debug/printlog.html">Reviewing Berkeley DB log files</a>
+ <li><a href="ref/debug/common.html">Common errors</a>
+ </ol>
+<font size="+1"><li><a name="build_unix">Berkeley DB for UNIX/POSIX systems</a></font>
+ <ol>
+ <li><a href="ref/build_unix/intro.html">Building for UNIX</a>
+ <li><a href="ref/build_unix/conf.html">Configuring Berkeley DB</a>
+ <li><a href="ref/build_unix/flags.html">Changing compile or load options</a>
+ <li><a href="ref/build_unix/install.html">Installing Berkeley DB</a>
+ <li><a href="ref/build_unix/shlib.html">Dynamic shared libraries</a>
+ <li><a href="ref/build_unix/test.html">Running the test suite under UNIX</a>
+ <li><a href="ref/build_unix/notes.html">Architecture independent FAQ</a>
+ <li>Architecture specific FAQs:<br>
+ <a href="ref/build_unix/aix.html">AIX</a>,
+ <a href="ref/build_unix/embedix.html">Embedix</a>,
+ <a href="ref/build_unix/freebsd.html">FreeBSD</a>,
+ <a href="ref/build_unix/hpux.html">HP-UX</a>,
+ <a href="ref/build_unix/irix.html">IRIX</a>,
+ <a href="ref/build_unix/linux.html">Linux</a>,
+ <a href="ref/build_unix/macosx.html">Mac OS X</a>,
+ <a href="ref/build_unix/osf1.html">OSF/1</a>,
+ <a href="ref/build_unix/qnx.html">QNX</a>,
+ <a href="ref/build_unix/sco.html">SCO</a>,
+ <a href="ref/build_unix/solaris.html">Solaris</a>,
+ <a href="ref/build_unix/sunos.html">SunOS</a>,
+ <a href="ref/build_unix/ultrix.html">Ultrix</a>
+ </ol>
+<font size="+1"><li><a name="build_win">Berkeley DB for Win32 platforms</a></font>
+ <ol>
+ <li><a href="ref/build_win/intro.html">Building for Win32</a>
+ <li><a href="ref/build_win/test.html">Running the test suite under Windows</a>
+ <li><a href="ref/build_win/notes.html">Windows notes</a>
+ <li><a href="ref/build_win/faq.html">Windows FAQ</a>
+ </ol>
+<font size="+1"><li><a name="build_vxworks">Berkeley DB for VxWorks systems</a></font>
+ <ol>
+ <li><a href="ref/build_vxworks/intro.html">Building for VxWorks 5.4</a>
+ <li><a href="ref/build_vxworks/introae.html">Building for VxWorks AE</a>
+ <li><a href="ref/build_vxworks/notes.html">VxWorks notes</a>
+ <li><a href="ref/build_vxworks/faq.html">VxWorks FAQ</a>
+ </ol>
+<font size="+1"><li><a name="upgrade">Upgrading Berkeley DB Applications</a></font>
+ <ol>
+ <li><a href="ref/upgrade/version.html">Library version information</a>
+ <li><a href="ref/upgrade/process.html">
+ Upgrading Berkeley DB installations</a>
+ <li><a href="ref/upgrade.2.0/toc.html">
+ Upgrading Berkeley DB 1.XX applications to Berkeley DB 2.0</a>
+ <li><a href="ref/upgrade.3.0/toc.html">
+ Upgrading Berkeley DB 2.X.X applications to Berkeley DB 3.0</a>
+ <li><a href="ref/upgrade.3.1/toc.html">
+ Upgrading Berkeley DB 3.0.X applications to Berkeley DB 3.1</a>
+ <li><a href="ref/upgrade.3.2/toc.html">
+ Upgrading Berkeley DB 3.1.X applications to Berkeley DB 3.2</a>
+ <li><a href="ref/upgrade.3.3/toc.html">
+ Upgrading Berkeley DB 3.2.X applications to Berkeley DB 3.3</a>
+ <li><a href="ref/upgrade.4.0/toc.html">
+ Upgrading Berkeley DB 3.3.X applications to Berkeley DB 4.0</a>
+ <li><a href="ref/upgrade.4.1/toc.html">
+ Upgrading Berkeley DB 4.0.X applications to Berkeley DB 4.1</a>
+ </ol>
+<font size="+1"><li><a name="test">Test Suite</a></font>
+ <ol>
+ <li><a href="ref/test/run.html">Running the test suite</a>
+ <li><a href="ref/test/faq.html">Test suite FAQ</a>
+ </ol>
+<font size="+1"><li><a name="distrib">Distribution</a></font>
+ <ol>
+ <li><a href="ref/distrib/port.html">Porting Berkeley DB to new architectures</a>
+ <li><a href="ref/distrib/layout.html">Source code layout</a>
+ </ol>
+<font size="+1"><li><a name="refs">Additional References</a></font>
+ <ol>
+ <li><a href="ref/refs/refs.html">Additional references</a>
+ </ol>
+</ol>
+
+</body>
+</html>
diff --git a/libdb/docs/sleepycat/contact.html b/libdb/docs/sleepycat/contact.html
new file mode 100644
index 0000000..dd73089
--- /dev/null
+++ b/libdb/docs/sleepycat/contact.html
@@ -0,0 +1,68 @@
+<!--$Id$-->
+<html>
+<head>
+<title>The Sleepycat Software Contact Page</title>
+<meta name="description" content="DB: A database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+
+<p align=center>
+<img src="../images/sleepycat.gif" alt="Sleepycat Software Inc.">
+
+<table>
+<tr>
+
+<td valign=top>
+<b>
+General:
+</b>
+</td><td>
+<a href="mailto:info@sleepycat.com">info@sleepycat.com</a>
+</td>
+
+</tr><tr>
+<td><br></td>
+</tr><tr>
+
+<td valign=top>
+<b>
+Sales and Marketing:
+</b>
+</td><td>
+<a href="mailto:sales@sleepycat.com">sales@sleepycat.com</a><br>
++1-617-876-0858<br>
++1-877-SLEEPYCAT (USA only, toll-free)<br>
+</td>
+
+</tr><tr>
+<td><br></td>
+</tr><tr>
+
+<td valign=top>
+<b>
+Technical Support:
+</b>
+</td><td>
+<a href="mailto:support@sleepycat.com">support@sleepycat.com</a>
+</td>
+
+</tr><tr>
+<td><br></td>
+</tr><tr>
+
+<td valign=top>
+<b>
+Postal Mail:
+</b>
+</td><td bgcolor="#EEEEEE">
+Sleepycat Software Inc.<br>
+118 Tower Rd.<br>
+Lincoln, MA 01773-4403<br>
+USA
+</td>
+</tr>
+</table>
+
+</body>
+</html>
diff --git a/libdb/docs/sleepycat/legal.html b/libdb/docs/sleepycat/legal.html
new file mode 100644
index 0000000..2975a59
--- /dev/null
+++ b/libdb/docs/sleepycat/legal.html
@@ -0,0 +1,67 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Sleepycat Software Legal Notices</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<p align=center>
+<img src="../images/sleepycat.gif" alt="Sleepycat Software Inc."></p>
+<h1 align=center>Sleepycat Software Legal Notices</h1>
+<p>Copyright (c) 1990-2002 Sleepycat Software, Inc., 118 Tower Rd.,
+Lincoln, MA 01773, U.S.A. All Rights Reserved.
+<p>This product and publication is protected by copyright and distributed
+under licenses restricting its use, copying and distribution. Permission
+to use this publication or portions of this publication is granted by
+Sleepycat Software provided that the above copyright notice appears in
+all copies and that use of such publications is for non-commercial use
+only and no modifications of the publication is made.
+<p>RESTRICTED RIGHTS: Use, duplication, or disclosure by the U.S. Government
+is subject to restrictions of FAR 52.227-14(g)(2)(6/87) and FAR
+52.227-19(6/87), or DFAR 252.227-7015(b)(6/95) and DFAR 227.7202-3(a).
+<p>Sleepycat and the names of Sleepycat Software products referenced herein
+are trademarks, registered trademarks or service marks of Sleepycat
+Software, Inc.
+<p>DIGITAL and ULTRIX are trademarks, registered trademarks or service
+marks of Compaq Corporation.
+<p>Embedix and Lineo are trademarks, registered trademarks or service marks
+of Lineo, Inc.
+<p>Hewlett-Packard and HP-UX are trademarks, registered trademarks or
+service marks of Hewlett-Packard Company.
+<p>Microsoft, Windows, Windows NT, Windows 2000 and Windows XP are
+trademarks, registered trademarks or service marks of Microsoft
+Corporation.
+<p>QNX and Neutrino are trademarks, registered trademarks or service marks
+QNX Software Systems Ltd.
+<p>Sun Microsystems, SunOS and Solaris are trademarks, registered
+trademarks or service marks of Sun Microsystems, Inc.
+<p>TUXEDO is a trademarks, registered trademark or service mark of BEA
+Systems, Inc.
+<p>VxWorks and Tornado are trademarks, registered trademarks or service
+marks of Wind River Systems Inc.
+<p>All other brand, company and product names referenced in this publication
+may be trademarks, registered trademarks or service marks of their
+respective holders and are used here for informational purposes only.
+<p>WARNING: There is a non-zero chance that, through a process know as
+"tunneling," this product may spontaneously disappear from its present
+location and reappear at any random place in the universe. Sleepycat
+Software will not be responsible for damages or inconvenience that may
+result.
+<p><b>THIS PRODUCT IS PROVIDED BY SLEEPYCAT SOFTWARE "AS IS" AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT,
+ARE DISCLAIMED. IN NO EVENT SHALL SLEEPYCAT SOFTWARE BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.</b>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/sleepycat/license.html b/libdb/docs/sleepycat/license.html
new file mode 100644
index 0000000..f402890
--- /dev/null
+++ b/libdb/docs/sleepycat/license.html
@@ -0,0 +1,113 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Sleepycat Software Product License</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<p align=center>
+<img src="../images/sleepycat.gif" alt="Sleepycat Software Inc."></p>
+<h1 align=center>Sleepycat Software Product License</h1>
+<p>The following is the license that applies to this copy of the Berkeley DB
+software. For a license to use the Berkeley DB software under conditions
+other than those described here, or to purchase support for this
+software, please <a href="contact.html">contact Sleepycat Software</a>.
+<p><blockquote><pre>/*
+ * Copyright (c) 1990-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Redistributions in any form must be accompanied by information on
+ * how to obtain complete source code for the DB software and any
+ * accompanying software that uses the DB software. The source code
+ * must either be included in the distribution or be available for no
+ * more than the cost of distribution plus a nominal fee, and must be
+ * freely redistributable under reasonable conditions. For an
+ * executable file, complete source code means the source code for all
+ * modules it contains. It does not include source code for modules or
+ * files that typically accompany the major components of the operating
+ * system on which the executable file runs.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SLEEPYCAT SOFTWARE ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
+ * NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL SLEEPYCAT SOFTWARE
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY HARVARD AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL HARVARD OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+</pre></blockquote>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/utility/berkeley_db_svc.html b/libdb/docs/utility/berkeley_db_svc.html
new file mode 100644
index 0000000..fb51a0c
--- /dev/null
+++ b/libdb/docs/utility/berkeley_db_svc.html
@@ -0,0 +1,74 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: berkeley_db_svc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>berkeley_db_svc</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>berkeley_db_svc [<b>-Vv</b>] [<b>-h home</b>]
+ [<b>-I seconds</b>] [<b>-L file</b>] [<b>-t seconds</b>] [<b>-T seconds</b>]</pre></h3>
+<h1>Description</h1>
+<a name="3"><!--meow--></a>
+<p>The berkeley_db_svc utility is the Berkeley DB RPC server.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-h</b><dd>Add the specified home directory to the list of allowed home directories
+that can be specified by the client. The home directory should be an
+absolute pathname. The last component of each home directory specified
+must be unique because that is how clients specify which database environment
+they want to join.
+<p>Recovery will be run on each specified environment before the server
+begins accepting requests from clients. For this reason, only one copy
+of the server program should ever be run at any time because recovery
+must always be single-threaded.
+<p><dt><b>-I</b><dd>Set the default idle timeout for client environments to the specified
+number of seconds. The default timeout is 24 hours.
+<p><dt><b>-L</b><dd>Log the execution of the berkeley_db_svc utility to the specified file in the
+following format, where <i>###</i> is the process ID, and the date
+is the time the utility was started.
+<p><blockquote><pre>berkeley_db_svc: ### Wed Jun 15 01:23:45 EDT 1995</pre></blockquote>
+This file will be removed if the berkeley_db_svc utility exits gracefully.
+<p><dt><b>-t</b><dd>Set the default timeout for client resources (idle transactions and
+cursors) to the specified number of seconds. When the timeout expires,
+if the resource is a transaction, it is aborted; if the resource is a
+cursor, it is closed. The default timeout is 5 minutes.
+<p><dt><b>-T</b><dd>Set the maximum timeout allowed for client resources. The default
+timeout is 20 minutes. If a client application requests a server
+timeout greater than the maximum timeout set for this server, the
+client's timeout will be capped at the maximum timeout value.
+<p><dt><b>-V</b><dd>Write the library version number to the standard output, and exit.
+<p><dt><b>-v</b><dd>Run in verbose mode.
+</dl>
+<p>The berkeley_db_svc utility uses a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a
+Berkeley DB environment, berkeley_db_svc should always be given the chance to
+detach from the environment and exit gracefully. To cause berkeley_db_svc
+to release all environment resources and exit cleanly, send it an
+interrupt signal (SIGINT).
+<p>The berkeley_db_svc utility exits 0 on success, and &gt;0 if an error occurs.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>.
+</dl>
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/utility/db_archive.html b/libdb/docs/utility/db_archive.html
new file mode 100644
index 0000000..b4a8dac
--- /dev/null
+++ b/libdb/docs/utility/db_archive.html
@@ -0,0 +1,95 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_archive</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_archive</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db_archive [<b>-alsVv</b>] [<b>-h home</b>] [<b>-P password</b>]</pre></h3>
+<h1>Description</h1>
+<a name="3"><!--meow--></a>
+<p>The db_archive utility writes the pathnames of log files that
+are no longer in use (for example, no longer involved in active
+transactions), to the standard output, one pathname per line. These
+log files should be written to backup media to provide for recovery in
+the case of catastrophic failure (which also requires a snapshot of the
+database files), but they may then be deleted from the system to reclaim
+disk space.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-a</b><dd>Write all pathnames as absolute pathnames, instead of relative to the
+database home directories.
+<p><dt><b>-h</b><dd>Specify a home directory for the database environment; by
+default, the current working directory is used.
+<p><dt><b>-l</b><dd>Write out the pathnames of all the database log files, whether or not
+they are involved in active transactions.
+<p><dt><b>-P</b><dd>Specify an environment password. Although Berkeley DB utilities overwrite
+password strings as soon as possible, be aware there may be a window of
+vulnerability on systems where unprivileged users can see command-line
+arguments or where utilities are not able to overwrite the memory
+containing the command-line arguments.
+<p><dt><b>-s</b><dd>Write the pathnames of all the database files that need to be archived
+in order to recover the database from catastrophic failure. If any of
+the database files have not been accessed during the lifetime of the
+current log files, db_archive will not include them in this
+output.
+<p>It is possible that some of the files to which the log refers have since
+been deleted from the system. In this case, db_archive will
+ignore them. When <a href="../utility/db_recover.html">db_recover</a> is run, any files to which the
+log refers that are not present during recovery are assumed to have been
+deleted and will not be recovered.
+<p><dt><b>-V</b><dd>Write the library version number to the standard output, and exit.
+<p><dt><b>-v</b><dd>Run in verbose mode, listing the checkpoints in the log files as they
+are reviewed.
+</dl>
+<p>Log cursor handles (returned by the <a href="../api_c/log_cursor.html">DB_ENV-&gt;log_cursor</a> method) may have open
+file descriptors for log files in the database environment. Also, the
+Berkeley DB interfaces to the database environment logging subsystem (for
+example, <a href="../api_c/log_put.html">DB_ENV-&gt;log_put</a> and <a href="../api_c/txn_abort.html">DB_TXN-&gt;abort</a>) may allocate log cursors
+and have open file descriptors for log files as well. On operating
+systems where filesystem related system calls (for example, rename and
+unlink on Windows/NT) can fail if a process has an open file descriptor
+for the affected file, attempting to move or remove the log files listed
+by db_archive may fail. All Berkeley DB internal use of log cursors
+operates on active log files only and furthermore, is short-lived in
+nature. So, an application seeing such a failure should be restructured
+to close any open log cursors it may have, and otherwise to retry the
+operation until it succeeds. (Although the latter is not likely to be
+necessary; it is hard to imagine a reason to move or rename a log file
+in which transactions are being logged or aborted.)
+<p>The db_archive utility uses a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a
+Berkeley DB environment, db_archive should always be given the chance to
+detach from the environment and exit gracefully. To cause db_archive
+to release all environment resources and exit cleanly, send it an
+interrupt signal (SIGINT).
+<p>The <a href="../api_c/log_archive.html">DB_ENV-&gt;log_archive</a> method is the underlying interface used by the db_archive utility.
+See the db_archive utility source code for an example of using <a href="../api_c/log_archive.html">DB_ENV-&gt;log_archive</a>
+in a IEEE/ANSI Std 1003.1 (POSIX) environment.
+<p>The db_archive utility exits 0 on success, and &gt;0 if an error occurs.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>.
+</dl>
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/utility/db_checkpoint.html b/libdb/docs/utility/db_checkpoint.html
new file mode 100644
index 0000000..a1c229d
--- /dev/null
+++ b/libdb/docs/utility/db_checkpoint.html
@@ -0,0 +1,79 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_checkpoint</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_checkpoint</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db_checkpoint [<b>-1Vv</b>] [<b>-h home</b>]
+ [<b>-k kbytes</b>] [<b>-L file</b>] [<b>-P password</b>] [<b>-p min</b>]</pre></h3>
+<h1>Description</h1>
+<a name="3"><!--meow--></a>
+<p>The db_checkpoint utility is a daemon process that monitors the
+database log, and periodically calls <a href="../api_c/txn_checkpoint.html">DB_ENV-&gt;txn_checkpoint</a> to checkpoint
+it.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-1</b><dd>Checkpoint the log once, regardless of whether or not there has been
+activity since the last checkpoint and then exit.
+<p><dt><b>-h</b><dd>Specify a home directory for the database environment; by
+default, the current working directory is used.
+<p><dt><b>-k</b><dd>Checkpoint the database at least as often as every <b>kbytes</b> of log
+file are written.
+<p><dt><b>-L</b><dd>Log the execution of the db_checkpoint utility to the specified file in the
+following format, where <i>###</i> is the process ID, and the date
+is the time the utility was started.
+<p><blockquote><pre>db_checkpoint: ### Wed Jun 15 01:23:45 EDT 1995</pre></blockquote>
+This file will be removed if the db_checkpoint utility exits gracefully.
+<p><dt><b>-P</b><dd>Specify an environment password. Although Berkeley DB utilities overwrite
+password strings as soon as possible, be aware there may be a window of
+vulnerability on systems where unprivileged users can see command-line
+arguments or where utilities are not able to overwrite the memory
+containing the command-line arguments.
+<p><dt><b>-p</b><dd>Checkpoint the database at least every <b>min</b> minutes if there has
+been any activity since the last checkpoint.
+<p><dt><b>-V</b><dd>Write the library version number to the standard output, and exit.
+<p><dt><b>-v</b><dd>Write the time of each checkpoint attempt to the standard output.
+</dl>
+<p>At least one of the <b>-1</b>, <b>-k</b>, and <b>-p</b> options
+must be specified.
+<p>The db_checkpoint utility uses a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a
+Berkeley DB environment, db_checkpoint should always be given the chance to
+detach from the environment and exit gracefully. To cause db_checkpoint
+to release all environment resources and exit cleanly, send it an
+interrupt signal (SIGINT).
+<p>The db_checkpoint utility does not attempt to create the Berkeley DB
+shared memory regions if they do not already exist. The application
+that creates the region should be started first, and once the region is
+created, the db_checkpoint utility should be started.
+<p>The <a href="../api_c/txn_checkpoint.html">DB_ENV-&gt;txn_checkpoint</a> method is the underlying interface used by the db_checkpoint utility.
+See the db_checkpoint utility source code for an example of using <a href="../api_c/txn_checkpoint.html">DB_ENV-&gt;txn_checkpoint</a>
+in a IEEE/ANSI Std 1003.1 (POSIX) environment.
+<p>The db_checkpoint utility exits 0 on success, and &gt;0 if an error occurs.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>.
+</dl>
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/utility/db_deadlock.html b/libdb/docs/utility/db_deadlock.html
new file mode 100644
index 0000000..aa6317b
--- /dev/null
+++ b/libdb/docs/utility/db_deadlock.html
@@ -0,0 +1,89 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_deadlock</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_deadlock</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db_deadlock [<b>-Vv</b>]
+ [<b>-a e | m | n | o | w | y</b>] [<b>-h home</b>] [<b>-L file</b>] [<b>-t sec.usec</b>]</pre></h3>
+<h1>Description</h1>
+<a name="3"><!--meow--></a>
+<p>The db_deadlock utility traverses the database environment lock
+region, and aborts a lock request each time it detects a deadlock or a
+lock request that has timed out. By default, in the case of a deadlock,
+a random lock request is chosen to be aborted.
+<p>This utility should be run as a background daemon, or the underlying
+Berkeley DB deadlock detection interfaces should be called in some other way,
+whenever there are multiple threads or processes accessing a database
+and at least one of them is modifying it.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-a</b><dd>When a deadlock is detected, abort the locker:
+<p><dl compact>
+<p><dt>m<dd>with the greatest number of locks
+<dt>n<dd>with the fewest number of locks
+<dt>o<dd>with the oldest locker ID
+<dt>w<dd>with the fewest number of write locks
+<dt>y<dd>with the youngest locker ID
+</dl>
+<p>When lock or transaction timeouts have been specified:
+<p><dl compact>
+<p><dt>e<dd>abort any lock request that has timed out
+</dl>
+<p><dt><b>-h</b><dd>Specify a home directory for the database environment; by
+default, the current working directory is used.
+<p><dt><b>-L</b><dd>Log the execution of the db_deadlock utility to the specified file in the
+following format, where <i>###</i> is the process ID, and the date
+is the time the utility was started.
+<p><blockquote><pre>db_deadlock: ### Wed Jun 15 01:23:45 EDT 1995</pre></blockquote>
+This file will be removed if the db_deadlock utility exits gracefully.
+<p><dt><b>-t</b><dd>Check the database environment every <b>sec</b> seconds plus
+<b>usec</b> microseconds to see if a process has been forced to wait
+for a lock; if one has, review the database environment lock
+structures.
+<p><dt><b>-V</b><dd>Write the library version number to the standard output, and exit.
+<p><dt><b>-v</b><dd>Run in verbose mode, generating messages each time the detector runs.
+</dl>
+<p>If the <b>-t</b> option is not specified, db_deadlock will
+run once and exit.
+<p>The db_deadlock utility uses a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a
+Berkeley DB environment, db_deadlock should always be given the chance to
+detach from the environment and exit gracefully. To cause db_deadlock
+to release all environment resources and exit cleanly, send it an
+interrupt signal (SIGINT).
+<p>The db_deadlock utility does not attempt to create the Berkeley DB
+shared memory regions if they do not already exist. The application
+which creates the region should be started first, and then, once the
+region is created, the db_deadlock utility should be started.
+<p>The <a href="../api_c/lock_detect.html">DB_ENV-&gt;lock_detect</a> method is the underlying interface used by the db_deadlock utility.
+See the db_deadlock utility source code for an example of using <a href="../api_c/lock_detect.html">DB_ENV-&gt;lock_detect</a>
+in a IEEE/ANSI Std 1003.1 (POSIX) environment.
+<p>The db_deadlock utility exits 0 on success, and &gt;0 if an error occurs.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>.
+</dl>
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/utility/db_dump.html b/libdb/docs/utility/db_dump.html
new file mode 100644
index 0000000..41b6843
--- /dev/null
+++ b/libdb/docs/utility/db_dump.html
@@ -0,0 +1,124 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_dump</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_dump</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db_dump [<b>-klNpRrV</b>] [<b>-d ahr</b>]
+ [<b>-f output</b>] [<b>-h home</b>] [<b>-P password</b>] [<b>-s database</b>] file
+db_dump185 [<b>-p</b>] [<b>-f output</b>] file</pre></h3>
+<h1>Description</h1>
+<a name="3"><!--meow--></a>
+<p>The db_dump utility reads the database file <b>file</b> and
+writes it to the standard output using a portable flat-text format
+understood by the <a href="../utility/db_load.html">db_load</a> utility. The argument <b>file</b>
+must be a file produced using the Berkeley DB library functions.
+<p>The <a href="../utility/db_dump.html">db_dump185</a> utility is similar to the db_dump utility,
+except that it reads databases in the format used by Berkeley DB versions 1.85
+and 1.86.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-d</b><dd>Dump the specified database in a format helpful for debugging the Berkeley DB
+library routines.
+<p><dl compact>
+<p><dt>a<dd>Display all information.
+<dt>h<dd>Display only page headers.
+<dt>r<dd>Do not display the free-list or pages on the free list. This
+mode is used by the recovery tests.
+</dl>
+<p><b>The output format of the <b>-d</b> option is not standard and may change,
+without notice, between releases of the Berkeley DB library.</b>
+<p><dt><b>-f</b><dd>Write to the specified <b>file</b> instead of to the standard output.
+<p><dt><b>-h</b><dd>Specify a home directory for the database environment; by
+default, the current working directory is used.
+<p><dt><b>-k</b><dd>Dump record numbers from Queue and Recno databases as keys.
+<p><dt><b>-l</b><dd>List the databases stored in the file.
+<p><dt><b>-N</b><dd>Do not acquire shared region mutexes while running. Other problems,
+such as potentially fatal errors in Berkeley DB, will be ignored as well.
+This option is intended only for debugging errors, and should not be
+used under any other circumstances.
+<p><dt><b>-P</b><dd>Specify an environment password. Although Berkeley DB utilities overwrite
+password strings as soon as possible, be aware there may be a window of
+vulnerability on systems where unprivileged users can see command-line
+arguments or where utilities are not able to overwrite the memory
+containing the command-line arguments.
+<p><dt><b>-p</b><dd>If characters in either the key or data items are printing characters (as
+defined by <b>isprint</b>(3)), use printing characters in <b>file</b>
+to represent them. This option permits users to use standard text editors
+and tools to modify the contents of databases.
+<p>Note: different systems may have different notions about what characters
+are considered <i>printing characters</i>, and databases dumped in
+this manner may be less portable to external systems.
+<p><dt><b>-R</b><dd>Aggressively salvage data from a possibly corrupt file. The <b>-R</b>
+flag differs from the <b>-r</b> option in that it will return all
+possible data from the file at the risk of also returning already deleted
+or otherwise nonsensical items. Data dumped in this fashion will almost
+certainly have to be edited by hand or other means before the data is
+ready for reload into another database
+<p><dt><b>-r</b><dd>Salvage data from a possibly corrupt file. When used on a uncorrupted
+database, this option should return equivalent data to a normal dump, but
+most likely in a different order.
+<p><dt><b>-s</b><dd>Specify a single database to dump. If no database is specified, all
+databases in the database file are dumped.
+<p><dt><b>-V</b><dd>Write the library version number to the standard output, and exit.
+</dl>
+<p>Dumping and reloading Hash databases that use user-defined hash
+functions will result in new databases that use the default hash
+function. Although using the default hash function may not be optimal
+for the new database, it will continue to work correctly.
+<p>Dumping and reloading Btree databases that use user-defined prefix or
+comparison functions will result in new databases that use the default
+prefix and comparison functions.
+<b>In this case, it is quite likely that the database will be damaged
+beyond repair permitting neither record storage or retrieval.</b>
+<p>The only available workaround for either case is to modify the sources
+for the <a href="../utility/db_load.html">db_load</a> utility to load the database using the correct
+hash, prefix, and comparison functions.
+<p>The <a href="../utility/db_dump.html">db_dump185</a> utility may not be available on your system
+because it is not always built when the Berkeley DB libraries and utilities
+are installed. If you are unable to find it, see your system
+administrator for further information.
+<p>The db_dump and <a href="../utility/db_dump.html">db_dump185</a> utility output formats are
+documented in the <a href="../ref/dumpload/format.html">Dump Output
+Formats</a> section of the Berkeley DB Reference Guide.
+<p>The db_dump utility may be used with a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a
+Berkeley DB environment, db_dump should always be given the chance to
+detach from the environment and exit gracefully. To cause db_dump
+to release all environment resources and exit cleanly, send it an
+interrupt signal (SIGINT).
+<p>Even when using a Berkeley DB database environment, the db_dump
+utility does not use any kind of database locking if it is invoked with
+the <b>-d</b>, <b>-R</b>, or <b>-r</b> arguments. If used with
+one of these arguments, the db_dump utility may only be safely
+run on databases that are not being modified by any other process;
+otherwise, the output may be corrupt.
+<p>The db_dump utility exits 0 on success, and &gt;0 if an error occurs.
+<p>The <a href="../utility/db_dump.html">db_dump185</a> utility exits 0 on success, and &gt;0 if an error occurs.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>.
+</dl>
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/utility/db_load.html b/libdb/docs/utility/db_load.html
new file mode 100644
index 0000000..4688fa2
--- /dev/null
+++ b/libdb/docs/utility/db_load.html
@@ -0,0 +1,149 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_load</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_load</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db_load [<b>-nTV</b>] [<b>-c name=value</b>] [<b>-f file</b>]
+ [<b>-h home</b>] [<b>-P password</b>] [<b>-t btree | hash | queue | recno</b>] file</pre></h3>
+<h1>Description</h1>
+<a name="3"><!--meow--></a>
+<p>The db_load utility reads from the standard input and loads it
+into the database <b>file</b>. The database <b>file</b> is created if
+it does not already exist.
+<p>The input to db_load must be in the output format specified by the
+<a href="../utility/db_dump.html">db_dump</a> utility, utilities, or as specified for the <b>-T</b>
+below.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-c</b><dd>Specify configuration options ignoring any value they may have based on
+the input. The command-line format is <b>name=value</b>. See the
+Supported Keywords section below for a list of keywords supported by
+the <b>-c</b> option.
+<p><dt><b>-f</b><dd>Read from the specified <b>input</b> file instead of from the standard
+input.
+<p><dt><b>-h</b><dd>Specify a home directory for the database environment.
+<p>If a home directory is specified, the database environment is opened
+using the <a href="../api_c/env_open.html#DB_INIT_LOCK">DB_INIT_LOCK</a>, <a href="../api_c/env_open.html#DB_INIT_LOG">DB_INIT_LOG</a>,
+<a href="../api_c/env_open.html#DB_INIT_MPOOL">DB_INIT_MPOOL</a>, <a href="../api_c/env_open.html#DB_INIT_TXN">DB_INIT_TXN</a>, and <a href="../api_c/env_open.html#DB_USE_ENVIRON">DB_USE_ENVIRON</a>
+flags to <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>. (This means that db_load can be
+used to load data into databases while they are in use by other
+processes.) If the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> call fails, or if no home
+directory is specified, the database is still updated, but the
+environment is ignored; for example, no locking is done.
+<p><dt><b>-n</b><dd>Do not overwrite existing keys in the database when loading into an
+already existing database. If a key/data pair cannot be loaded into the
+database for this reason, a warning message is displayed on the standard
+error output, and the key/data pair are skipped.
+<p><dt><b>-P</b><dd>Specify an environment password. Although Berkeley DB utilities overwrite
+password strings as soon as possible, be aware there may be a window of
+vulnerability on systems where unprivileged users can see command-line
+arguments or where utilities are not able to overwrite the memory
+containing the command-line arguments.
+<p><dt><b>-T</b><dd>The <b>-T</b> option allows non-Berkeley DB applications to easily load text
+files into databases.
+<p>If the database to be created is of type Btree or Hash, or the keyword
+<b>keys</b> is specified as set, the input must be paired lines of text,
+where the first line of the pair is the key item, and the second line of
+the pair is its corresponding data item. If the database to be created
+is of type Queue or Recno and the keywork <b>keys</b> is not set, the
+input must be lines of text, where each line is a new data item for the
+database.
+<p>A simple escape mechanism, where newline and backslash (\)
+characters are special, is applied to the text input. Newline characters
+are interpreted as record separators. Backslash characters in the text
+will be interpreted in one of two ways: If the backslash character
+precedes another backslash character, the pair will be interpreted as a
+literal backslash. If the backslash character precedes any other
+character, the two characters following the backslash will be interpreted
+as a hexadecimal specification of a single character; for example,
+\0a is a newline character in the ASCII character set.
+<p>For this reason, any backslash or newline characters that naturally
+occur in the text input must be escaped to avoid misinterpretation by
+db_load.
+<p>If the <b>-T</b> option is specified, the underlying access method type
+must be specified using the <b>-t</b> option.
+<p><dt><b>-t</b><dd>Specify the underlying access method. If no <b>-t</b> option is
+specified, the database will be loaded into a database of the same type
+as was dumped; for example, a Hash database will be created if a Hash
+database was dumped.
+<p>Btree and Hash databases may be converted from one to the other. Queue
+and Recno databases may be converted from one to the other. If the
+<b>-k</b> option was specified on the call to <a href="../utility/db_dump.html">db_dump</a> then Queue
+and Recno databases may be converted to Btree or Hash, with the key being
+the integer record number.
+<p><dt><b>-V</b><dd>Write the library version number to the standard output, and exit.
+</dl>
+<p>The db_load utility may be used with a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a
+Berkeley DB environment, db_load should always be given the chance to
+detach from the environment and exit gracefully. To cause db_load
+to release all environment resources and exit cleanly, send it an
+interrupt signal (SIGINT).
+<p>The db_load utility exits 0 on success, 1 if one or more key/data
+pairs were not loaded into the database because the key already existed,
+and &gt;1 if an error occurs.
+<h3>Examples</h3>
+<p>The db_load utility can be used to load text files into databases.
+For example, the following command loads the standard UNIX
+<i>/etc/passwd</i> file into a database, with the login name as the
+key item and the entire password entry as the data item:
+<p><blockquote><pre>awk -F: '{print $1; print $0}' &lt; /etc/passwd |
+ sed 's/\\/\\\\/g' | db_load -T -t hash passwd.db</pre></blockquote>
+<p>Note that backslash characters naturally occurring in the text are escaped
+to avoid interpretation as escape characters by db_load.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>.
+</dl>
+<h3>Supported Keywords</h3>
+The following keywords are supported for the <b>-c</b> command-line
+option to the db_load utility. See <a href="../api_c/db_open.html">DB-&gt;open</a> for further
+discussion of these keywords and what values should be specified.
+<p>The parenthetical listing specifies how the value part of the
+<b>name=value</b> pair is interpreted. Items listed as (boolean)
+expect value to be <b>1</b> (set) or <b>0</b> (unset). Items listed
+as (number) convert value to a number. Items listed as (string) use
+the string value without modification.
+<p><dl compact>
+<dt>bt_minkey (number)<dd>The minimum number of keys per page.
+<dt>chksum (boolean)<dd>Enable page checksums.
+<dt>database (string)<dd>The database to load.
+<dt>db_lorder (number)<dd>The byte order for integers in the stored database metadata.
+<dt>db_pagesize (number)<dd>The size of database pages, in bytes.
+<dt>duplicates (boolean)<dd>The value of the <a href="../api_c/db_set_flags.html#DB_DUP">DB_DUP</a> flag.
+<dt>dupsort (boolean)<dd>The value of the <a href="../api_c/db_set_flags.html#DB_DUPSORT">DB_DUPSORT</a> flag.
+<dt>extentsize (number)<dd>The size of database extents, in pages, for Queue databases configured
+to use extents.
+<dt>h_ffactor (number)<dd>The density within the Hash database.
+<dt>h_nelem (number)<dd>The size of the Hash database.
+<dt>keys (boolean)<dd>Specify whether keys are present for Queue or Recno databases.
+<dt>re_len (number)<dd>Specify fixed-length records of the specified length.
+<dt>re_pad (string)<dd>Specify the fixed-length record pad character.
+<dt>recnum (boolean)<dd>The value of the <a href="../api_c/db_set_flags.html#DB_RECNUM">DB_RECNUM</a> flag.
+<dt>renumber (boolean)<dd>The value of the <a href="../api_c/db_set_flags.html#DB_RENUMBER">DB_RENUMBER</a> flag.
+<dt>subdatabase (string)<dd>The subdatabase to load.
+</dl>
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/utility/db_printlog.html b/libdb/docs/utility/db_printlog.html
new file mode 100644
index 0000000..5caac62
--- /dev/null
+++ b/libdb/docs/utility/db_printlog.html
@@ -0,0 +1,64 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_printlog</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_printlog</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db_printlog [<b>-NrV</b>] [<b>-h home</b>] [<b>-P password</b>]</pre></h3>
+<h1>Description</h1>
+<a name="3"><!--meow--></a>
+<p>The db_printlog utility is a debugging utility that dumps Berkeley DB
+log files in a human-readable format.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-h</b><dd>Specify a home directory for the database environment; by
+default, the current working directory is used.
+<p><dt><b>-N</b><dd>Do not acquire shared region mutexes while running. Other problems,
+such as potentially fatal errors in Berkeley DB, will be ignored as well.
+This option is intended only for debugging errors, and should not be
+used under any other circumstances.
+<p><dt><b>-P</b><dd>Specify an environment password. Although Berkeley DB utilities overwrite
+password strings as soon as possible, be aware there may be a window of
+vulnerability on systems where unprivileged users can see command-line
+arguments or where utilities are not able to overwrite the memory
+containing the command-line arguments.
+<p><dt><b>-r</b><dd>Read the log files in reverse order.
+<p><dt><b>-V</b><dd>Write the library version number to the standard output, and exit.
+</dl>
+<p>For more information on the db_printlog output and using it to
+debug applications, see <a href="../ref/debug/printlog.html">Reviewing
+Berkeley DB log files</a>.
+<p>The db_printlog utility uses a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a
+Berkeley DB environment, db_printlog should always be given the chance to
+detach from the environment and exit gracefully. To cause db_printlog
+to release all environment resources and exit cleanly, send it an
+interrupt signal (SIGINT).
+<p>The db_printlog utility exits 0 on success, and &gt;0 if an error occurs.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>.
+</dl>
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/utility/db_recover.html b/libdb/docs/utility/db_recover.html
new file mode 100644
index 0000000..78fdccb
--- /dev/null
+++ b/libdb/docs/utility/db_recover.html
@@ -0,0 +1,95 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_recover</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_recover</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db_recover [<b>-ceVv</b>] [<b>-h home</b>] [<b>-P password</b>] [<b>-t [[CC]YY]MMDDhhmm[.SS]]</b>]</pre></h3>
+<h1>Description</h1>
+<a name="3"><!--meow--></a>
+<p>The db_recover utility must be run after an unexpected application,
+Berkeley DB, or system failure to restore the database to a consistent state.
+All committed transactions are guaranteed to appear after db_recover
+has run, and all uncommitted transactions will be completely undone.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-c</b><dd>Perform catastrophic recovery instead of normal recovery.
+<p><dt><b>-e</b><dd>Retain the environment after running recovery. This option
+will rarely be used unless a DB_CONFIG file is present in the home
+directory. If a DB_CONFIG file is not present, then the regions will be
+created with default parameter values.
+<p><dt><b>-h</b><dd>Specify a home directory for the database environment; by
+default, the current working directory is used.
+<p><dt><b>-P</b><dd>Specify an environment password. Although Berkeley DB utilities overwrite
+password strings as soon as possible, be aware there may be a window of
+vulnerability on systems where unprivileged users can see command-line
+arguments or where utilities are not able to overwrite the memory
+containing the command-line arguments.
+<p><dt><b>-t</b><dd>Recover to the time specified rather than to the most current possible
+date. The timestamp argument should be in the form
+[[CC]YY]MMDDhhmm[.SS] where each pair of
+letters represents the following:
+<p><dl compact>
+<p><dt>CC<dd>The first two digits of the year (the century).
+<dt>YY<dd>The second two digits of the year. If "YY" is specified, but "CC" is not,
+a value for "YY" between 69 and 99 results in a "YY" value of 19. Otherwise,
+a "YY" value of 20 is used.
+<dt>MM<dd>The month of the year, from 1 to 12.
+<dt>DD<dd>The day of the month, from 1 to 31.
+<dt>hh<dd>The hour of the day, from 0 to 23.
+<dt>mm<dd>The minute of the hour, from 0 to 59.
+<dt>SS<dd>The second of the minute, from 0 to 61.
+</dl>
+<p>If the "CC" and "YY" letter pairs are not specified, the values default
+to the current year. If the "SS" letter pair is not specified, the value
+defaults to 0.
+<p><dt><b>-V</b><dd>Write the library version number to the standard output, and exit.
+<p><dt><b>-v</b><dd>Run in verbose mode.
+</dl>
+<p>In the case of catastrophic recovery, an archival copy -- or
+<i>snapshot</i> -- of all database files must be restored along with
+all of the log files written since the database file snapshot was made.
+(If disk space is a problem, log files may be referenced by symbolic
+links). For further information on creating a database snapshot, see
+<a href="../ref/transapp/archival.html">Archival Procedures</a>. For
+further information on performing recovery, see
+<a href="../ref/transapp/recovery.html">Recovery Procedures</a>.
+<p>If the failure was not catastrophic, the files present on the system at the
+time of failure are sufficient to perform recovery.
+<p>If log files are missing, db_recover will identify the missing
+log file(s) and fail, in which case the missing log files need to be
+restored and recovery performed again.
+<p>The db_recover utility uses a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a
+Berkeley DB environment, db_recover should always be given the chance to
+detach from the environment and exit gracefully. To cause db_recover
+to release all environment resources and exit cleanly, send it an
+interrupt signal (SIGINT).
+<p>The db_recover utility exits 0 on success, and &gt;0 if an error occurs.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>.
+</dl>
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/utility/db_stat.html b/libdb/docs/utility/db_stat.html
new file mode 100644
index 0000000..8c56c5d
--- /dev/null
+++ b/libdb/docs/utility/db_stat.html
@@ -0,0 +1,103 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_stat</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_stat</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db_stat <b>-d</b> <b>file</b> [<b>-fN</b>] [<b>-h home</b>] [<b>-P password</b>] [<b>-s database</b>]
+db_stat [<b>-celmNrtVZ</b>] [<b>-C Aclmop</b>] [<b>-h home</b>] [<b>-M Ahm</b>] [<b>-P password</b>]</pre></h3>
+<h1>Description</h1>
+<a name="3"><!--meow--></a>
+<p>The db_stat utility displays statistics for Berkeley DB environments.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-C</b><dd>Display internal information about the lock region. (The output from this
+option is often both voluminous and meaningless, and is intended only for
+debugging.)
+<p><dl compact>
+<p><dt>A<dd>Display all information.
+<dt>c<dd>Display lock conflict matrix.
+<dt>l<dd>Display lockers within hash chains.
+<dt>m<dd>Display region memory information.
+<dt>o<dd>Display objects within hash chains.
+<dt>p<dd>Display lock region parameters.
+</dl>
+<p><dt><b>-c</b><dd>Display lock region statistics, as described in <a href="../api_c/lock_stat.html">DB_ENV-&gt;lock_stat</a>.
+<p><dt><b>-d</b><dd>Display database statistics for the specified file, as described in
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>.
+<p>If the database contains multiple databases and the <b>-s</b> flag is
+not specified, the statistics are for the internal database that describes
+the other databases the file contains, and not for the file as a whole.
+<p><dt><b>-e</b><dd>Display current environment statistics.
+<p><dt><b>-f</b><dd>Display only those database statistics that can be
+acquired without traversing the database.
+<p><dt><b>-h</b><dd>Specify a home directory for the database environment; by
+default, the current working directory is used.
+<p><dt><b>-l</b><dd>Display log region statistics, as described in <a href="../api_c/log_stat.html">DB_ENV-&gt;log_stat</a>.
+<p><dt><b>-M</b><dd>Display internal information about the shared memory buffer pool. (The
+output from this option is often both voluminous and meaningless, and is
+intended only for debugging.)
+<p><dl compact>
+<p><dt>A<dd>Display all information.
+<dt>h<dd>Display buffers within hash chains.
+<dt>m<dd>Display region memory information.
+</dl>
+<p><dt><b>-m</b><dd>Display shared memory buffer pool statistics, as described in
+<a href="../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a>.
+<p><dt><b>-N</b><dd>Do not acquire shared region mutexes while running. Other problems,
+such as potentially fatal errors in Berkeley DB, will be ignored as well.
+This option is intended only for debugging errors, and should not be
+used under any other circumstances.
+<p><dt><b>-P</b><dd>Specify an environment password. Although Berkeley DB utilities overwrite
+password strings as soon as possible, be aware there may be a window of
+vulnerability on systems where unprivileged users can see command-line
+arguments or where utilities are not able to overwrite the memory
+containing the command-line arguments.
+<p><dt><b>-r</b><dd>Display replication statistics, as described in <a href="../api_c/rep_stat.html">DB_ENV-&gt;rep_stat</a>.
+<p><dt><b>-s</b><dd>Display statistics for the specified database contained in the file
+specified with the <b>-d</b> flag.
+<p><dt><b>-t</b><dd>Display transaction region statistics, as described in <a href="../api_c/txn_stat.html">DB_ENV-&gt;txn_stat</a>.
+<p><dt><b>-V</b><dd>Write the library version number to the standard output, and exit.
+<p><dt><b>-Z</b><dd>Reset the statistics after reporting them; valid only with the
+<b>-c</b>, <b>-e</b>, <b>-l</b>, <b>-m</b>, and <b>-t</b>
+options.
+</dl>
+<p>Values normally displayed in quantities of bytes are displayed as a
+combination of gigabytes (GB), megabytes (MB), kilobytes (KB), and bytes
+(B). Otherwise, values smaller than 10 million are displayed without
+any special notation, and values larger than 10 million are displayed
+as a number followed by "M".
+<p>The db_stat utility may be used with a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a
+Berkeley DB environment, db_stat should always be given the chance to
+detach from the environment and exit gracefully. To cause db_stat
+to release all environment resources and exit cleanly, send it an
+interrupt signal (SIGINT).
+<p>The db_stat utility exits 0 on success, and &gt;0 if an error occurs.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>.
+</dl>
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/utility/db_upgrade.html b/libdb/docs/utility/db_upgrade.html
new file mode 100644
index 0000000..2261eed
--- /dev/null
+++ b/libdb/docs/utility/db_upgrade.html
@@ -0,0 +1,87 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_upgrade</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_upgrade</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db_upgrade [<b>-NsV</b>] [<b>-h home</b>] [<b>-P password</b>] file ...</pre></h3>
+<h1>Description</h1>
+<a name="3"><!--meow--></a><a name="4"><!--meow--></a>
+<p>The db_upgrade utility upgrades the Berkeley DB version of one or more
+files and the databases they contain to the current release version.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-h</b><dd>Specify a home directory for the database environment; by
+default, the current working directory is used.
+<p><dt><b>-N</b><dd>Do not acquire shared region mutexes while running. Other problems,
+such as potentially fatal errors in Berkeley DB, will be ignored as well.
+This option is intended only for debugging errors, and should not be
+used under any other circumstances.
+<p><dt><b>-P</b><dd>Specify an environment password. Although Berkeley DB utilities overwrite
+password strings as soon as possible, be aware there may be a window of
+vulnerability on systems where unprivileged users can see command-line
+arguments or where utilities are not able to overwrite the memory
+containing the command-line arguments.
+<p><dt><b>-s</b><dd>This flag is only meaningful when upgrading databases from releases
+before the Berkeley DB 3.1 release.
+<p>As part of the upgrade from the Berkeley DB 3.0 release to the 3.1 release,
+the on-disk format of duplicate data items changed. To correctly upgrade
+the format requires that applications specify whether duplicate data
+items in the database are sorted or not. Specifying the <b>-s</b>
+flag means that the duplicates are sorted; otherwise, they are assumed
+to be unsorted. Incorrectly specifying the value of this flag may lead
+to database corruption.
+<p>Because the db_upgrade utility upgrades a physical file
+(including all the databases it contains), it is not possible to use
+db_upgrade to upgrade files where some of the databases it
+includes have sorted duplicate data items, and some of the databases it
+includes have unsorted duplicate data items. If the file does not have
+more than a single database, if the databases do not support duplicate
+data items, or if all the databases that support duplicate data items
+support the same style of duplicates (either sorted or unsorted),
+db_upgrade will work correctly as long as the <b>-s</b> flag
+is correctly specified. Otherwise, the file cannot be upgraded using
+db_upgrade, and must be upgraded manually using the
+<a href="../utility/db_dump.html">db_dump</a> and <a href="../utility/db_load.html">db_load</a> utilities.
+<p><dt><b>-V</b><dd>Write the library version number to the standard output, and exit.
+</dl>
+<p><b>It is important to realize that Berkeley DB database upgrades are done
+in place, and so are potentially destructive.</b> This means that if the
+system crashes during the upgrade procedure, or if the upgrade procedure
+runs out of disk space, the databases may be left in an inconsistent and
+unrecoverable state. See <a href="../ref/am/upgrade.html">Upgrading
+databases</a> for more information.
+<p>The db_upgrade utility may be used with a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a
+Berkeley DB environment, db_upgrade should always be given the chance to
+detach from the environment and exit gracefully. To cause db_upgrade
+to release all environment resources and exit cleanly, send it an
+interrupt signal (SIGINT).
+<p>The db_upgrade utility exits 0 on success, and &gt;0 if an error occurs.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>.
+</dl>
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/utility/db_verify.html b/libdb/docs/utility/db_verify.html
new file mode 100644
index 0000000..ce1693c
--- /dev/null
+++ b/libdb/docs/utility/db_verify.html
@@ -0,0 +1,76 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: db_verify</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>db_verify</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>db_verify [<b>-NoqV</b>] [<b>-h home</b>] [<b>-P password</b>] file ...</pre></h3>
+<h1>Description</h1>
+<a name="3"><!--meow--></a><a name="4"><!--meow--></a>
+<p>The db_verify utility verifies the structure of one or more
+files and the databases they contain.
+<p>The options are as follows:
+<p><dl compact>
+<p><dt><b>-h</b><dd>Specify a home directory for the database environment; by
+default, the current working directory is used.
+<p><dt><b>-o</b><dd>Skip the database checks for btree and duplicate sort order and for
+hashing.
+<p>If the file being verified contains databases using non-default
+comparison or hashing functions, calling the db_verify
+utility without the <b>-o</b> flag will usually return failure. The
+<b>-o</b> flag causes db_verify to ignore database sort or
+hash ordering and allows db_verify to be used on these files.
+To fully verify these files, verify them explicitly using the
+<a href="../api_c/db_verify.html">DB-&gt;verify</a> method, after setting the correct comparison or hashing
+functions.
+<p><dt><b>-N</b><dd>Do not acquire shared region mutexes while running. Other problems,
+such as potentially fatal errors in Berkeley DB, will be ignored as well.
+This option is intended only for debugging errors, and should not be
+used under any other circumstances.
+<p><dt><b>-P</b><dd>Specify an environment password. Although Berkeley DB utilities overwrite
+password strings as soon as possible, be aware there may be a window of
+vulnerability on systems where unprivileged users can see command-line
+arguments or where utilities are not able to overwrite the memory
+containing the command-line arguments.
+<p><dt><b>-q</b><dd>Suppress the printing of any error descriptions, simply exit success or
+failure.
+<p><dt><b>-V</b><dd>Write the library version number to the standard output, and exit.
+</dl>
+<p><b>The db_verify utility does not perform any locking, even in
+Berkeley DB environments that are configured with a locking subsystem. As
+such, it should only be used on files that are not being modified by
+another thread of control.</b>
+<p>The db_verify utility may be used with a Berkeley DB environment (as described for the
+<b>-h</b> option, the environment variable <b>DB_HOME</b>, or
+because the utility was run in a directory containing a Berkeley DB
+environment). In order to avoid environment corruption when using a
+Berkeley DB environment, db_verify should always be given the chance to
+detach from the environment and exit gracefully. To cause db_verify
+to release all environment resources and exit cleanly, send it an
+interrupt signal (SIGINT).
+<p>The db_verify utility exits 0 on success, and &gt;0 if an error occurs.
+<h1>Environment Variables</h1>
+<p><dl compact>
+<p><dt>DB_HOME<dd>If the <b>-h</b> option is not specified and the environment variable
+DB_HOME is set, it is used as the path of the database home, as described
+in <a href="../api_c/env_open.html">DB_ENV-&gt;open</a>.
+</dl>
+</tt>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/docs/utility/index.html b/libdb/docs/utility/index.html
new file mode 100644
index 0000000..7e1b385
--- /dev/null
+++ b/libdb/docs/utility/index.html
@@ -0,0 +1,29 @@
+<!--$Id$-->
+<!--Copyright 1997-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<!--See the file LICENSE for redistribution information.-->
+<html>
+<head>
+<title>Berkeley DB: Berkeley DB Supporting Utilities</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Berkeley DB Supporting Utilities</h1>
+<p><table border=1 align=center>
+<tr><th>Utility</th><th>Description</th></tr>
+<tr><td><a href="../utility/berkeley_db_svc.html">berkeley_db_svc</a></td><td>RPC server utility</td></tr>
+<tr><td><a href="../utility/db_archive.html">db_archive</a></td><td>Archival utility</td></tr>
+<tr><td><a href="../utility/db_checkpoint.html">db_checkpoint</a></td><td>Transaction checkpoint utility</td></tr>
+<tr><td><a href="../utility/db_deadlock.html">db_deadlock</a></td><td>Deadlock detection utility</td></tr>
+<tr><td><a href="../utility/db_dump.html">db_dump</a></td><td>Database dump utility</td></tr>
+<tr><td><a href="../utility/db_load.html">db_load</a></td><td>Database load utility</td></tr>
+<tr><td><a href="../utility/db_printlog.html">db_printlog</a></td><td>Transaction log display utility</td></tr>
+<tr><td><a href="../utility/db_recover.html">db_recover</a></td><td>Recovery utility</td></tr>
+<tr><td><a href="../utility/db_stat.html">db_stat</a></td><td>Statistics utility</td></tr>
+<tr><td><a href="../utility/db_upgrade.html">db_upgrade</a></td><td>Database upgrade utility</td></tr>
+<tr><td><a href="../utility/db_verify.html">db_verify</a></td><td>Verification utility</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/libdb/env/db_salloc.c b/libdb/env/db_salloc.c
new file mode 100644
index 0000000..32499d7
--- /dev/null
+++ b/libdb/env/db_salloc.c
@@ -0,0 +1,338 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * Implement shared memory region allocation, using simple first-fit algorithm.
+ * The model is that we take a "chunk" of shared memory store and begin carving
+ * it up into areas, similarly to how malloc works. We do coalescing on free.
+ *
+ * The "len" field in the __data struct contains the length of the free region
+ * (less the size_t bytes that holds the length). We use the address provided
+ * by the caller to find this length, which allows us to free a chunk without
+ * requiring that the caller pass in the length of the chunk they're freeing.
+ */
+SH_LIST_HEAD(__head);
+struct __data {
+ size_t len;
+ SH_LIST_ENTRY links;
+};
+
+/*
+ * __db_shalloc_init --
+ * Initialize the area as one large chunk.
+ *
+ * PUBLIC: void __db_shalloc_init __P((void *, size_t));
+ */
+void
+__db_shalloc_init(area, size)
+ void *area;
+ size_t size;
+{
+ struct __data *elp;
+ struct __head *hp;
+
+ hp = area;
+ SH_LIST_INIT(hp);
+
+ elp = (struct __data *)(hp + 1);
+ elp->len = size - sizeof(struct __head) - sizeof(elp->len);
+ SH_LIST_INSERT_HEAD(hp, elp, links, __data);
+}
+
+/*
+ * __db_shalloc_size --
+ * Return the space needed for an allocation, including alignment.
+ *
+ * PUBLIC: int __db_shalloc_size __P((size_t, size_t));
+ */
+int
+__db_shalloc_size(len, align)
+ size_t len, align;
+{
+ /* Never allocate less than the size of a struct __data. */
+ if (len < sizeof(struct __data))
+ len = sizeof(struct __data);
+
+#ifdef DIAGNOSTIC
+ /* Add room for a guard byte. */
+ ++len;
+#endif
+
+ /* Never align to less than a db_align_t boundary. */
+ if (align <= sizeof(db_align_t))
+ align = sizeof(db_align_t);
+
+ return ((int)(ALIGN(len, align) + sizeof (struct __data)));
+}
+
+/*
+ * __db_shalloc --
+ * Allocate some space from the shared region.
+ *
+ * PUBLIC: int __db_shalloc __P((void *, size_t, size_t, void *));
+ */
+int
+__db_shalloc(p, len, align, retp)
+ void *p, *retp;
+ size_t len, align;
+{
+ struct __data *elp;
+ size_t *sp;
+ void *rp;
+
+ /* Never allocate less than the size of a struct __data. */
+ if (len < sizeof(struct __data))
+ len = sizeof(struct __data);
+
+#ifdef DIAGNOSTIC
+ /* Add room for a guard byte. */
+ ++len;
+#endif
+
+ /* Never align to less than a db_align_t boundary. */
+ if (align <= sizeof(db_align_t))
+ align = sizeof(db_align_t);
+
+ /* Walk the list, looking for a slot. */
+ for (elp = SH_LIST_FIRST((struct __head *)p, __data);
+ elp != NULL;
+ elp = SH_LIST_NEXT(elp, links, __data)) {
+ /*
+ * Calculate the value of the returned pointer if we were to
+ * use this chunk.
+ * + Find the end of the chunk.
+ * + Subtract the memory the user wants.
+ * + Find the closest previous correctly-aligned address.
+ */
+ rp = (u_int8_t *)elp + sizeof(size_t) + elp->len;
+ rp = (u_int8_t *)rp - len;
+ rp = (u_int8_t *)((db_alignp_t)rp & ~(align - 1));
+
+ /*
+ * Rp may now point before elp->links, in which case the chunk
+ * was too small, and we have to try again.
+ */
+ if ((u_int8_t *)rp < (u_int8_t *)&elp->links)
+ continue;
+
+ *(void **)retp = rp;
+#ifdef DIAGNOSTIC
+ /*
+ * At this point, whether or not we still need to split up a
+ * chunk, retp is the address of the region we are returning,
+ * and (u_int8_t *)elp + sizeof(size_t) + elp->len gives us
+ * the address of the first byte after the end of the chunk.
+ * Make the byte immediately before that the guard byte.
+ */
+ *((u_int8_t *)elp + sizeof(size_t) + elp->len - 1) = GUARD_BYTE;
+#endif
+
+#define SHALLOC_FRAGMENT 32
+ /*
+ * If there are at least SHALLOC_FRAGMENT additional bytes of
+ * memory, divide the chunk into two chunks.
+ */
+ if ((u_int8_t *)rp >=
+ (u_int8_t *)&elp->links + SHALLOC_FRAGMENT) {
+ sp = rp;
+ *--sp = elp->len -
+ ((u_int8_t *)rp - (u_int8_t *)&elp->links);
+ elp->len -= *sp + sizeof(size_t);
+ return (0);
+ }
+
+ /*
+ * Otherwise, we return the entire chunk, wasting some amount
+ * of space to keep the list compact. However, because the
+ * address we're returning to the user may not be the address
+ * of the start of the region for alignment reasons, set the
+ * size_t length fields back to the "real" length field to a
+ * flag value, so that we can find the real length during free.
+ */
+#define ILLEGAL_SIZE 1
+ SH_LIST_REMOVE(elp, links, __data);
+ for (sp = rp; (u_int8_t *)--sp >= (u_int8_t *)&elp->links;)
+ *sp = ILLEGAL_SIZE;
+ return (0);
+ }
+
+ return (ENOMEM);
+}
+
+/*
+ * __db_shalloc_free --
+ * Free a shared memory allocation.
+ *
+ * PUBLIC: void __db_shalloc_free __P((void *, void *));
+ */
+void
+__db_shalloc_free(regionp, ptr)
+ void *regionp, *ptr;
+{
+ struct __data *elp, *lastp, *newp;
+ struct __head *hp;
+ size_t free_size, *sp;
+ int merged;
+
+ /*
+ * Step back over flagged length fields to find the beginning of
+ * the object and its real size.
+ */
+ for (sp = (size_t *)ptr; sp[-1] == ILLEGAL_SIZE; --sp)
+ ;
+ ptr = sp;
+
+ newp = (struct __data *)((u_int8_t *)ptr - sizeof(size_t));
+ free_size = newp->len;
+
+#ifdef DIAGNOSTIC
+ /*
+ * The "real size" includes the guard byte; it's just the last
+ * byte in the chunk, and the caller never knew it existed.
+ *
+ * Check it to make sure it hasn't been stomped.
+ */
+ if (*((u_int8_t *)ptr + free_size - 1) != GUARD_BYTE) {
+ /*
+ * Eventually, once we push a DB_ENV handle down to these
+ * routines, we should use the standard output channels.
+ */
+ fprintf(stderr,
+ "Guard byte incorrect during shared memory free.\n");
+ abort();
+ /* NOTREACHED */
+ }
+
+ /* Trash the returned memory (including guard byte). */
+ memset(ptr, CLEAR_BYTE, free_size);
+#endif
+
+ /*
+ * Walk the list, looking for where this entry goes.
+ *
+ * We keep the free list sorted by address so that coalescing is
+ * trivial.
+ *
+ * XXX
+ * Probably worth profiling this to see how expensive it is.
+ */
+ hp = (struct __head *)regionp;
+ for (elp = SH_LIST_FIRST(hp, __data), lastp = NULL;
+ elp != NULL && (void *)elp < (void *)ptr;
+ lastp = elp, elp = SH_LIST_NEXT(elp, links, __data))
+ ;
+
+ /*
+ * Elp is either NULL (we reached the end of the list), or the slot
+ * after the one that's being returned. Lastp is either NULL (we're
+ * returning the first element of the list) or the element before the
+ * one being returned.
+ *
+ * Check for coalescing with the next element.
+ */
+ merged = 0;
+ if ((u_int8_t *)ptr + free_size == (u_int8_t *)elp) {
+ newp->len += elp->len + sizeof(size_t);
+ SH_LIST_REMOVE(elp, links, __data);
+ if (lastp != NULL)
+ SH_LIST_INSERT_AFTER(lastp, newp, links, __data);
+ else
+ SH_LIST_INSERT_HEAD(hp, newp, links, __data);
+ merged = 1;
+ }
+
+ /* Check for coalescing with the previous element. */
+ if (lastp != NULL && (u_int8_t *)lastp +
+ lastp->len + sizeof(size_t) == (u_int8_t *)newp) {
+ lastp->len += newp->len + sizeof(size_t);
+
+ /*
+ * If we have already put the new element into the list take
+ * it back off again because it's just been merged with the
+ * previous element.
+ */
+ if (merged)
+ SH_LIST_REMOVE(newp, links, __data);
+ merged = 1;
+ }
+
+ if (!merged) {
+ if (lastp == NULL)
+ SH_LIST_INSERT_HEAD(hp, newp, links, __data);
+ else
+ SH_LIST_INSERT_AFTER(lastp, newp, links, __data);
+ }
+}
+
+/*
+ * __db_shsizeof --
+ * Return the size of a shalloc'd piece of memory.
+ *
+ * !!!
+ * Note that this is from an internal standpoint -- it includes not only
+ * the size of the memory being used, but also the extra alignment bytes
+ * in front and, #ifdef DIAGNOSTIC, the guard byte at the end.
+ *
+ * PUBLIC: size_t __db_shsizeof __P((void *));
+ */
+size_t
+__db_shsizeof(ptr)
+ void *ptr;
+{
+ struct __data *elp;
+ size_t *sp;
+
+ /*
+ * Step back over flagged length fields to find the beginning of
+ * the object and its real size.
+ */
+ for (sp = (size_t *)ptr; sp[-1] == ILLEGAL_SIZE; --sp)
+ ;
+
+ elp = (struct __data *)((u_int8_t *)sp - sizeof(size_t));
+ return (elp->len);
+}
+
+/*
+ * __db_shalloc_dump --
+ *
+ * PUBLIC: void __db_shalloc_dump __P((void *, FILE *));
+ */
+void
+__db_shalloc_dump(addr, fp)
+ void *addr;
+ FILE *fp;
+{
+ struct __data *elp;
+
+ /* Make it easy to call from the debugger. */
+ if (fp == NULL)
+ fp = stderr;
+
+ fprintf(fp, "%s\nMemory free list\n", DB_LINE);
+
+ for (elp = SH_LIST_FIRST((struct __head *)addr, __data);
+ elp != NULL;
+ elp = SH_LIST_NEXT(elp, links, __data))
+ fprintf(fp, "%#lx: %lu\t", P_TO_ULONG(elp), (u_long)elp->len);
+ fprintf(fp, "\n");
+}
diff --git a/libdb/env/db_shash.c b/libdb/env/db_shash.c
new file mode 100644
index 0000000..93f2b23
--- /dev/null
+++ b/libdb/env/db_shash.c
@@ -0,0 +1,125 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * Table of good hash values. Up to ~250,000 buckets, we use powers of 2.
+ * After that, we slow the rate of increase by half. For each choice, we
+ * then use a nearby prime number as the hash value.
+ *
+ * If a terabyte is the maximum cache we'll see, and we assume there are
+ * 10 1K buckets on each hash chain, then 107374182 is the maximum number
+ * of buckets we'll ever need.
+ */
+static const struct {
+ u_int32_t power;
+ u_int32_t prime;
+} list[] = {
+ { 32, 37}, /* 2^5 */
+ { 64, 67}, /* 2^6 */
+ { 128, 131}, /* 2^7 */
+ { 256, 257}, /* 2^8 */
+ { 512, 521}, /* 2^9 */
+ { 1024, 1031}, /* 2^10 */
+ { 2048, 2053}, /* 2^11 */
+ { 4096, 4099}, /* 2^12 */
+ { 8192, 8191}, /* 2^13 */
+ { 16384, 16381}, /* 2^14 */
+ { 32768, 32771}, /* 2^15 */
+ { 65536, 65537}, /* 2^16 */
+ { 131072, 131071}, /* 2^17 */
+ { 262144, 262147}, /* 2^18 */
+ { 393216, 393209}, /* 2^18 + 2^18/2 */
+ { 524288, 524287}, /* 2^19 */
+ { 786432, 786431}, /* 2^19 + 2^19/2 */
+ { 1048576, 1048573}, /* 2^20 */
+ { 1572864, 1572869}, /* 2^20 + 2^20/2 */
+ { 2097152, 2097169}, /* 2^21 */
+ { 3145728, 3145721}, /* 2^21 + 2^21/2 */
+ { 4194304, 4194301}, /* 2^22 */
+ { 6291456, 6291449}, /* 2^22 + 2^22/2 */
+ { 8388608, 8388617}, /* 2^23 */
+ { 12582912, 12582917}, /* 2^23 + 2^23/2 */
+ { 16777216, 16777213}, /* 2^24 */
+ { 25165824, 25165813}, /* 2^24 + 2^24/2 */
+ { 33554432, 33554393}, /* 2^25 */
+ { 50331648, 50331653}, /* 2^25 + 2^25/2 */
+ { 67108864, 67108859}, /* 2^26 */
+ { 100663296, 100663291}, /* 2^26 + 2^26/2 */
+ { 134217728, 134217757}, /* 2^27 */
+ { 201326592, 201326611}, /* 2^27 + 2^27/2 */
+ { 268435456, 268435459}, /* 2^28 */
+ { 402653184, 402653189}, /* 2^28 + 2^28/2 */
+ { 536870912, 536870909}, /* 2^29 */
+ { 805306368, 805306357}, /* 2^29 + 2^29/2 */
+ {1073741824, 1073741827}, /* 2^30 */
+ {0, 0}
+};
+
+/*
+ * __db_tablesize --
+ * Choose a size for the hash table.
+ *
+ * PUBLIC: int __db_tablesize __P((u_int32_t));
+ */
+int
+__db_tablesize(n_buckets)
+ u_int32_t n_buckets;
+{
+ int i;
+
+ /*
+ * We try to be clever about how big we make the hash tables. Use a
+ * prime number close to the "suggested" number of elements that will
+ * be in the hash table. Use 64 as the minimum hash table size.
+ *
+ * Ref: Sedgewick, Algorithms in C, "Hash Functions"
+ */
+ if (n_buckets < 32)
+ n_buckets = 32;
+
+ for (i = 0;; ++i) {
+ if (list[i].power == 0) {
+ --i;
+ break;
+ }
+ if (list[i].power >= n_buckets)
+ break;
+ }
+ return (list[i].prime);
+}
+
+/*
+ * __db_hashinit --
+ * Initialize a hash table that resides in shared memory.
+ *
+ * PUBLIC: void __db_hashinit __P((void *, u_int32_t));
+ */
+void
+__db_hashinit(begin, nelements)
+ void *begin;
+ u_int32_t nelements;
+{
+ u_int32_t i;
+ SH_TAILQ_HEAD(hash_head) *headp;
+
+ headp = (struct hash_head *)begin;
+
+ for (i = 0; i < nelements; i++, headp++)
+ SH_TAILQ_INIT(headp);
+}
diff --git a/libdb/env/env_file.c b/libdb/env/env_file.c
new file mode 100644
index 0000000..bc5a080
--- /dev/null
+++ b/libdb/env/env_file.c
@@ -0,0 +1,166 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+static int __db_overwrite_pass __P((DB_ENV *,
+ const char *, DB_FH *, u_int32_t, u_int32_t, u_int32_t));
+
+/*
+ * __db_fileinit --
+ * Initialize a regular file, optionally zero-filling it as well.
+ *
+ * PUBLIC: int __db_fileinit __P((DB_ENV *, DB_FH *, size_t, int));
+ */
+int
+__db_fileinit(dbenv, fhp, size, zerofill)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ size_t size;
+ int zerofill;
+{
+ db_pgno_t pages;
+ size_t i;
+ size_t nw;
+ u_int32_t relative;
+ int ret;
+ char buf[OS_VMPAGESIZE];
+
+ /* Write nuls to the new bytes. */
+ memset(buf, 0, sizeof(buf));
+
+ /*
+ * Extend the region by writing the last page. If the region is >4Gb,
+ * increment may be larger than the maximum possible seek "relative"
+ * argument, as it's an unsigned 32-bit value. Break the offset into
+ * pages of 1MB each so that we don't overflow (2^20 + 2^32 is bigger
+ * than any memory I expect to see for awhile).
+ */
+ if ((ret = __os_seek(dbenv, fhp, 0, 0, 0, 0, DB_OS_SEEK_END)) != 0)
+ return (ret);
+ pages = (db_pgno_t)((size - OS_VMPAGESIZE) / MEGABYTE);
+ relative = (u_int32_t)((size - OS_VMPAGESIZE) % MEGABYTE);
+ if ((ret = __os_seek(dbenv,
+ fhp, MEGABYTE, pages, relative, 0, DB_OS_SEEK_CUR)) != 0)
+ return (ret);
+ if ((ret = __os_write(dbenv, fhp, buf, sizeof(buf), &nw)) != 0)
+ return (ret);
+
+ /*
+ * We may want to guarantee that there is enough disk space for the
+ * file, so we also write a byte to each page. We write the byte
+ * because reading it is insufficient on systems smart enough not to
+ * instantiate disk pages to satisfy a read (e.g., Solaris).
+ */
+ if (zerofill) {
+ pages = (db_pgno_t)(size / MEGABYTE);
+ relative = (u_int32_t)(size % MEGABYTE);
+ if ((ret = __os_seek(dbenv, fhp,
+ MEGABYTE, pages, relative, 1, DB_OS_SEEK_END)) != 0)
+ return (ret);
+
+ /* Write a byte to each page. */
+ for (i = 0; i < size; i += OS_VMPAGESIZE) {
+ if ((ret = __os_write(dbenv, fhp, buf, 1, &nw)) != 0)
+ return (ret);
+ if ((ret = __os_seek(dbenv, fhp,
+ 0, 0, OS_VMPAGESIZE - 1, 0, DB_OS_SEEK_CUR)) != 0)
+ return (ret);
+ }
+ }
+ return (0);
+}
+
+/*
+ * __db_overwrite --
+ * Overwrite a file.
+ *
+ * PUBLIC: int __db_overwrite __P((DB_ENV *, const char *));
+ */
+int
+__db_overwrite(dbenv, path)
+ DB_ENV *dbenv;
+ const char *path;
+{
+ DB_FH fh, *fhp;
+ u_int32_t mbytes, bytes;
+ int ret;
+
+ fhp = &fh;
+ if ((ret = __os_open(dbenv, path, DB_OSO_REGION, 0, fhp)) == 0 &&
+ (ret = __os_ioinfo(dbenv, path, fhp, &mbytes, &bytes, NULL)) == 0) {
+ /*
+ * !!!
+ * Overwrite a regular file with alternating 0xff, 0x00 and 0xff
+ * byte patterns. Implies a fixed-block filesystem, journaling
+ * or logging filesystems will require operating system support.
+ */
+ if ((ret = __db_overwrite_pass(
+ dbenv, path, fhp, mbytes, bytes, 0xff)) != 0)
+ goto err;
+ if ((ret = __db_overwrite_pass(
+ dbenv, path, fhp, mbytes, bytes, 0x00)) != 0)
+ goto err;
+ if ((ret = __db_overwrite_pass(
+ dbenv, path, fhp, mbytes, bytes, 0xff)) != 0)
+ goto err;
+ } else
+ __db_err(dbenv, "%s: %s", path, db_strerror(ret));
+
+err: if (F_ISSET(fhp, DB_FH_VALID))
+ __os_closehandle(dbenv, fhp);
+ return (ret);
+}
+
+/*
+ * __db_overwrite_pass --
+ * A single pass over the file, writing the specified byte pattern.
+ */
+static int
+__db_overwrite_pass(dbenv, path, fhp, mbytes, bytes, pattern)
+ DB_ENV *dbenv;
+ const char *path;
+ DB_FH *fhp;
+ u_int32_t mbytes, bytes, pattern;
+{
+ size_t len, nw;
+ int i, ret;
+ char buf[8 * 1024];
+
+ if ((ret = __os_seek(dbenv, fhp, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+
+ memset(buf, pattern, sizeof(buf));
+
+ for (; mbytes > 0; --mbytes)
+ for (i = MEGABYTE / sizeof(buf); i > 0; --i)
+ if ((ret =
+ __os_write(dbenv, fhp, buf, sizeof(buf), &nw)) != 0)
+ goto err;
+ for (; bytes > 0; bytes -= (u_int32_t)len) {
+ len = bytes < sizeof(buf) ? bytes : sizeof(buf);
+ if ((ret = __os_write(dbenv, fhp, buf, len, &nw)) != 0)
+ goto err;
+ }
+
+ if ((ret = __os_fsync(dbenv, fhp)) != 0)
+err: __db_err(dbenv, "%s: %s", path, db_strerror(ret));
+
+ return (ret);
+}
diff --git a/libdb/env/env_method.c b/libdb/env/env_method.c
new file mode 100644
index 0000000..d98748a
--- /dev/null
+++ b/libdb/env/env_method.c
@@ -0,0 +1,632 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
+
+#include <string.h>
+#endif
+
+/*
+ * This is the file that initializes the global array. Do it this way because
+ * people keep changing one without changing the other. Having declaration and
+ * initialization in one file will hopefully fix that.
+ */
+#define DB_INITIALIZE_DB_GLOBALS 1
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/hmac.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/mp.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+#ifdef HAVE_RPC
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+#endif
+
+static void __dbenv_err __P((const DB_ENV *, int, const char *, ...));
+static void __dbenv_errx __P((const DB_ENV *, const char *, ...));
+static int __dbenv_init __P((DB_ENV *));
+static int __dbenv_set_alloc __P((DB_ENV *, void *(*)(size_t),
+ void *(*)(void *, size_t), void (*)(void *)));
+static int __dbenv_set_app_dispatch __P((DB_ENV *,
+ int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
+static int __dbenv_set_data_dir __P((DB_ENV *, const char *));
+static int __dbenv_set_encrypt __P((DB_ENV *, const char *, u_int32_t));
+static void __dbenv_set_errcall __P((DB_ENV *, void (*)(const char *, char *)));
+static void __dbenv_set_errfile __P((DB_ENV *, FILE *));
+static void __dbenv_set_errpfx __P((DB_ENV *, const char *));
+static int __dbenv_set_feedback __P((DB_ENV *, void (*)(DB_ENV *, int, int)));
+static int __dbenv_set_flags __P((DB_ENV *, u_int32_t, int));
+static int __dbenv_set_paniccall __P((DB_ENV *, void (*)(DB_ENV *, int)));
+static int __dbenv_set_rpc_server_noclnt
+ __P((DB_ENV *, void *, const char *, long, long, u_int32_t));
+static int __dbenv_set_shm_key __P((DB_ENV *, long));
+static int __dbenv_set_tas_spins __P((DB_ENV *, u_int32_t));
+static int __dbenv_set_tmp_dir __P((DB_ENV *, const char *));
+static int __dbenv_set_verbose __P((DB_ENV *, u_int32_t, int));
+
+/*
+ * db_env_create --
+ * DB_ENV constructor.
+ *
+ * EXTERN: int db_env_create __P((DB_ENV **, u_int32_t));
+ */
+int
+db_env_create(dbenvpp, flags)
+ DB_ENV **dbenvpp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ *
+ * !!!
+ * We can't call the flags-checking routines, we don't have an
+ * environment yet.
+ */
+ if (flags != 0 && flags != DB_CLIENT)
+ return (EINVAL);
+
+ if ((ret = __os_calloc(NULL, 1, sizeof(*dbenv), &dbenv)) != 0)
+ return (ret);
+
+#ifdef HAVE_RPC
+ if (LF_ISSET(DB_CLIENT))
+ F_SET(dbenv, DB_ENV_RPCCLIENT);
+#endif
+ ret = __dbenv_init(dbenv);
+
+ if (ret != 0) {
+ __os_free(NULL, dbenv);
+ return (ret);
+ }
+
+ *dbenvpp = dbenv;
+ return (0);
+}
+
+/*
+ * __dbenv_init --
+ * Initialize a DB_ENV structure.
+ */
+static int
+__dbenv_init(dbenv)
+ DB_ENV *dbenv;
+{
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ *
+ * Set up methods that are the same in both normal and RPC
+ */
+ dbenv->err = __dbenv_err;
+ dbenv->errx = __dbenv_errx;
+ dbenv->set_errcall = __dbenv_set_errcall;
+ dbenv->set_errfile = __dbenv_set_errfile;
+ dbenv->set_errpfx = __dbenv_set_errpfx;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ dbenv->close = __dbcl_env_close;
+ dbenv->dbremove = __dbcl_env_dbremove;
+ dbenv->dbrename = __dbcl_env_dbrename;
+ dbenv->open = __dbcl_env_open_wrap;
+ dbenv->remove = __dbcl_env_remove;
+ dbenv->set_alloc = __dbcl_env_alloc;
+ dbenv->set_app_dispatch = __dbcl_set_app_dispatch;
+ dbenv->set_data_dir = __dbcl_set_data_dir;
+ dbenv->set_encrypt = __dbcl_env_encrypt;
+ dbenv->set_feedback = __dbcl_env_set_feedback;
+ dbenv->set_flags = __dbcl_env_flags;
+ dbenv->set_paniccall = __dbcl_env_paniccall;
+ dbenv->set_rpc_server = __dbcl_envrpcserver;
+ dbenv->set_shm_key = __dbcl_set_shm_key;
+ dbenv->set_tas_spins = __dbcl_set_tas_spins;
+ dbenv->set_timeout = __dbcl_set_timeout;
+ dbenv->set_tmp_dir = __dbcl_set_tmp_dir;
+ dbenv->set_verbose = __dbcl_set_verbose;
+ } else {
+#endif
+ dbenv->close = __dbenv_close;
+ dbenv->dbremove = __dbenv_dbremove;
+ dbenv->dbrename = __dbenv_dbrename;
+ dbenv->open = __dbenv_open;
+ dbenv->remove = __dbenv_remove;
+ dbenv->set_alloc = __dbenv_set_alloc;
+ dbenv->set_app_dispatch = __dbenv_set_app_dispatch;
+ dbenv->set_data_dir = __dbenv_set_data_dir;
+ dbenv->set_encrypt = __dbenv_set_encrypt;
+ dbenv->set_feedback = __dbenv_set_feedback;
+ dbenv->set_flags = __dbenv_set_flags;
+ dbenv->set_paniccall = __dbenv_set_paniccall;
+ dbenv->set_rpc_server = __dbenv_set_rpc_server_noclnt;
+ dbenv->set_shm_key = __dbenv_set_shm_key;
+ dbenv->set_tas_spins = __dbenv_set_tas_spins;
+ dbenv->set_tmp_dir = __dbenv_set_tmp_dir;
+ dbenv->set_verbose = __dbenv_set_verbose;
+#ifdef HAVE_RPC
+ }
+#endif
+ dbenv->shm_key = INVALID_REGION_SEGID;
+ dbenv->db_ref = 0;
+
+ __log_dbenv_create(dbenv); /* Subsystem specific. */
+ __lock_dbenv_create(dbenv);
+ __memp_dbenv_create(dbenv);
+ __rep_dbenv_create(dbenv);
+ __txn_dbenv_create(dbenv);
+
+ return (0);
+}
+
+/*
+ * __dbenv_err --
+ * Error message, including the standard error string.
+ */
+static void
+#ifdef __STDC__
+__dbenv_err(const DB_ENV *dbenv, int error, const char *fmt, ...)
+#else
+__dbenv_err(dbenv, error, fmt, va_alist)
+ const DB_ENV *dbenv;
+ int error;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ DB_REAL_ERR(dbenv, error, 1, 1, fmt);
+}
+
+/*
+ * __dbenv_errx --
+ * Error message.
+ */
+static void
+#ifdef __STDC__
+__dbenv_errx(const DB_ENV *dbenv, const char *fmt, ...)
+#else
+__dbenv_errx(dbenv, fmt, va_alist)
+ const DB_ENV *dbenv;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ DB_REAL_ERR(dbenv, 0, 0, 1, fmt);
+}
+
+static int
+__dbenv_set_alloc(dbenv, mal_func, real_func, free_func)
+ DB_ENV *dbenv;
+ void *(*mal_func) __P((size_t));
+ void *(*real_func) __P((void *, size_t));
+ void (*free_func) __P((void *));
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_alloc");
+
+ dbenv->db_malloc = mal_func;
+ dbenv->db_realloc = real_func;
+ dbenv->db_free = free_func;
+ return (0);
+}
+
+/*
+ * __dbenv_set_app_dispatch --
+ * Set the transaction abort recover function.
+ */
+static int
+__dbenv_set_app_dispatch(dbenv, app_dispatch)
+ DB_ENV *dbenv;
+ int (*app_dispatch) __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_app_dispatch");
+
+ dbenv->app_dispatch = app_dispatch;
+ return (0);
+}
+
+static int
+__dbenv_set_encrypt(dbenv, passwd, flags)
+ DB_ENV *dbenv;
+ const char *passwd;
+ u_int32_t flags;
+{
+#ifdef HAVE_CRYPTO
+ DB_CIPHER *db_cipher;
+ int ret;
+
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_encrypt");
+#define OK_CRYPTO_FLAGS (DB_ENCRYPT_AES)
+
+ if (flags != 0 && LF_ISSET(~OK_CRYPTO_FLAGS))
+ return (__db_ferr(dbenv, "DB_ENV->set_encrypt", 0));
+
+ if (passwd == NULL || strlen(passwd) == 0) {
+ __db_err(dbenv, "Empty password specified to set_encrypt");
+ return (EINVAL);
+ }
+ if (!CRYPTO_ON(dbenv)) {
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_CIPHER), &db_cipher))
+ != 0)
+ goto err;
+ dbenv->crypto_handle = db_cipher;
+ } else
+ db_cipher = (DB_CIPHER *)dbenv->crypto_handle;
+
+ if (dbenv->passwd != NULL)
+ __os_free(dbenv, dbenv->passwd);
+ if ((ret = __os_strdup(dbenv, passwd, &dbenv->passwd)) != 0) {
+ __os_free(dbenv, db_cipher);
+ goto err;
+ }
+ /*
+ * We're going to need this often enough to keep around
+ */
+ dbenv->passwd_len = strlen(dbenv->passwd) + 1;
+ /*
+ * The MAC key is for checksumming, and is separate from
+ * the algorithm. So initialize it here, even if they
+ * are using CIPHER_ANY.
+ */
+ __db_derive_mac((u_int8_t *)dbenv->passwd,
+ dbenv->passwd_len, db_cipher->mac_key);
+ switch (flags) {
+ case 0:
+ F_SET(db_cipher, CIPHER_ANY);
+ break;
+ case DB_ENCRYPT_AES:
+ if ((ret = __crypto_algsetup(dbenv, db_cipher, CIPHER_AES, 0))
+ != 0)
+ goto err1;
+ break;
+ }
+ return (0);
+
+err1:
+ __os_free(dbenv, dbenv->passwd);
+ __os_free(dbenv, db_cipher);
+ dbenv->crypto_handle = NULL;
+err:
+ return (ret);
+#else
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(passwd, NULL);
+ COMPQUIET(flags, 0);
+
+ return (__db_eopnotsup(dbenv));
+#endif
+}
+
+static int
+__dbenv_set_flags(dbenv, flags, onoff)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+ int onoff;
+{
+#define OK_FLAGS \
+ (DB_AUTO_COMMIT | DB_CDB_ALLDB | DB_DIRECT_DB | DB_DIRECT_LOG | \
+ DB_NOLOCKING | DB_NOMMAP | DB_NOPANIC | DB_OVERWRITE | \
+ DB_PANIC_ENVIRONMENT | DB_REGION_INIT | DB_TXN_NOSYNC | \
+ DB_TXN_WRITE_NOSYNC | DB_YIELDCPU)
+
+ if (LF_ISSET(~OK_FLAGS))
+ return (__db_ferr(dbenv, "DB_ENV->set_flags", 0));
+ if (onoff && LF_ISSET(DB_TXN_WRITE_NOSYNC) && LF_ISSET(DB_TXN_NOSYNC))
+ return (__db_ferr(dbenv, "DB_ENV->set_flags", 1));
+
+ if (LF_ISSET(DB_AUTO_COMMIT)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_AUTO_COMMIT);
+ else
+ F_CLR(dbenv, DB_ENV_AUTO_COMMIT);
+ }
+ if (LF_ISSET(DB_CDB_ALLDB)) {
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_flags: DB_CDB_ALLDB");
+ if (onoff)
+ F_SET(dbenv, DB_ENV_CDB_ALLDB);
+ else
+ F_CLR(dbenv, DB_ENV_CDB_ALLDB);
+ }
+ if (LF_ISSET(DB_DIRECT_DB)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_DIRECT_DB);
+ else
+ F_CLR(dbenv, DB_ENV_DIRECT_DB);
+ }
+ if (LF_ISSET(DB_DIRECT_LOG)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_DIRECT_LOG);
+ else
+ F_CLR(dbenv, DB_ENV_DIRECT_LOG);
+ }
+ if (LF_ISSET(DB_NOLOCKING)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_NOLOCKING);
+ else
+ F_CLR(dbenv, DB_ENV_NOLOCKING);
+ }
+ if (LF_ISSET(DB_NOMMAP)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_NOMMAP);
+ else
+ F_CLR(dbenv, DB_ENV_NOMMAP);
+ }
+ if (LF_ISSET(DB_NOPANIC)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_NOPANIC);
+ else
+ F_CLR(dbenv, DB_ENV_NOPANIC);
+ }
+ if (LF_ISSET(DB_OVERWRITE)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_OVERWRITE);
+ else
+ F_CLR(dbenv, DB_ENV_OVERWRITE);
+ }
+ if (LF_ISSET(DB_PANIC_ENVIRONMENT)) {
+ ENV_ILLEGAL_BEFORE_OPEN(dbenv,
+ "set_flags: DB_PANIC_ENVIRONMENT");
+ PANIC_SET(dbenv, onoff);
+ }
+ if (LF_ISSET(DB_REGION_INIT)) {
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_flags: DB_REGION_INIT");
+ if (onoff)
+ F_SET(dbenv, DB_ENV_REGION_INIT);
+ else
+ F_CLR(dbenv, DB_ENV_REGION_INIT);
+ }
+ if (LF_ISSET(DB_TXN_NOSYNC)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_TXN_NOSYNC);
+ else
+ F_CLR(dbenv, DB_ENV_TXN_NOSYNC);
+ }
+ if (LF_ISSET(DB_TXN_WRITE_NOSYNC)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_TXN_WRITE_NOSYNC);
+ else
+ F_CLR(dbenv, DB_ENV_TXN_WRITE_NOSYNC);
+ }
+ if (LF_ISSET(DB_YIELDCPU)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_YIELDCPU);
+ else
+ F_CLR(dbenv, DB_ENV_YIELDCPU);
+ }
+ return (0);
+}
+
+static int
+__dbenv_set_data_dir(dbenv, dir)
+ DB_ENV *dbenv;
+ const char *dir;
+{
+ int ret;
+
+#define DATA_INIT_CNT 20 /* Start with 20 data slots. */
+ if (dbenv->db_data_dir == NULL) {
+ if ((ret = __os_calloc(dbenv, DATA_INIT_CNT,
+ sizeof(char **), &dbenv->db_data_dir)) != 0)
+ return (ret);
+ dbenv->data_cnt = DATA_INIT_CNT;
+ } else if (dbenv->data_next == dbenv->data_cnt - 1) {
+ dbenv->data_cnt *= 2;
+ if ((ret = __os_realloc(dbenv,
+ dbenv->data_cnt * sizeof(char **),
+ &dbenv->db_data_dir)) != 0)
+ return (ret);
+ }
+ return (__os_strdup(dbenv,
+ dir, &dbenv->db_data_dir[dbenv->data_next++]));
+}
+
+static void
+__dbenv_set_errcall(dbenv, errcall)
+ DB_ENV *dbenv;
+ void (*errcall) __P((const char *, char *));
+{
+ dbenv->db_errcall = errcall;
+}
+
+static void
+__dbenv_set_errfile(dbenv, errfile)
+ DB_ENV *dbenv;
+ FILE *errfile;
+{
+ dbenv->db_errfile = errfile;
+}
+
+static void
+__dbenv_set_errpfx(dbenv, errpfx)
+ DB_ENV *dbenv;
+ const char *errpfx;
+{
+ dbenv->db_errpfx = errpfx;
+}
+
+static int
+__dbenv_set_feedback(dbenv, feedback)
+ DB_ENV *dbenv;
+ void (*feedback) __P((DB_ENV *, int, int));
+{
+ dbenv->db_feedback = feedback;
+ return (0);
+}
+
+static int
+__dbenv_set_paniccall(dbenv, paniccall)
+ DB_ENV *dbenv;
+ void (*paniccall) __P((DB_ENV *, int));
+{
+ dbenv->db_paniccall = paniccall;
+ return (0);
+}
+
+static int
+__dbenv_set_shm_key(dbenv, shm_key)
+ DB_ENV *dbenv;
+ long shm_key; /* !!!: really a key_t. */
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_shm_key");
+
+ dbenv->shm_key = shm_key;
+ return (0);
+}
+
+static int
+__dbenv_set_tas_spins(dbenv, tas_spins)
+ DB_ENV *dbenv;
+ u_int32_t tas_spins;
+{
+ dbenv->tas_spins = tas_spins;
+ return (0);
+}
+
+static int
+__dbenv_set_tmp_dir(dbenv, dir)
+ DB_ENV *dbenv;
+ const char *dir;
+{
+ if (dbenv->db_tmp_dir != NULL)
+ __os_free(dbenv, dbenv->db_tmp_dir);
+ return (__os_strdup(dbenv, dir, &dbenv->db_tmp_dir));
+}
+
+static int
+__dbenv_set_verbose(dbenv, which, onoff)
+ DB_ENV *dbenv;
+ u_int32_t which;
+ int onoff;
+{
+ switch (which) {
+ case DB_VERB_CHKPOINT:
+ case DB_VERB_DEADLOCK:
+ case DB_VERB_RECOVERY:
+ case DB_VERB_REPLICATION:
+ case DB_VERB_WAITSFOR:
+ if (onoff)
+ FLD_SET(dbenv->verbose, which);
+ else
+ FLD_CLR(dbenv->verbose, which);
+ break;
+ default:
+ return (EINVAL);
+ }
+ return (0);
+}
+
+/*
+ * __db_mi_env --
+ * Method illegally called with public environment.
+ *
+ * PUBLIC: int __db_mi_env __P((DB_ENV *, const char *));
+ */
+int
+__db_mi_env(dbenv, name)
+ DB_ENV *dbenv;
+ const char *name;
+{
+ __db_err(dbenv, "%s: method not permitted in shared environment", name);
+ return (EINVAL);
+}
+
+/*
+ * __db_mi_open --
+ * Method illegally called after open.
+ *
+ * PUBLIC: int __db_mi_open __P((DB_ENV *, const char *, int));
+ */
+int
+__db_mi_open(dbenv, name, after)
+ DB_ENV *dbenv;
+ const char *name;
+ int after;
+{
+ __db_err(dbenv, "%s: method not permitted %s open",
+ name, after ? "after" : "before");
+ return (EINVAL);
+}
+
+/*
+ * __db_env_config --
+ * Method or function called without required configuration.
+ *
+ * PUBLIC: int __db_env_config __P((DB_ENV *, char *, u_int32_t));
+ */
+int
+__db_env_config(dbenv, i, flags)
+ DB_ENV *dbenv;
+ char *i;
+ u_int32_t flags;
+{
+ char *sub;
+
+ switch (flags) {
+ case DB_INIT_LOCK:
+ sub = "locking";
+ break;
+ case DB_INIT_LOG:
+ sub = "logging";
+ break;
+ case DB_INIT_MPOOL:
+ sub = "memory pool";
+ break;
+ case DB_INIT_TXN:
+ sub = "transaction";
+ break;
+ default:
+ sub = "<unspecified>";
+ break;
+ }
+ __db_err(dbenv,
+ "%s interface requires an environment configured for the %s subsystem",
+ i, sub);
+ return (EINVAL);
+}
+
+static int
+__dbenv_set_rpc_server_noclnt(dbenv, cl, host, tsec, ssec, flags)
+ DB_ENV *dbenv;
+ void *cl;
+ const char *host;
+ long tsec, ssec;
+ u_int32_t flags;
+{
+ COMPQUIET(host, NULL);
+ COMPQUIET(cl, NULL);
+ COMPQUIET(tsec, 0);
+ COMPQUIET(ssec, 0);
+ COMPQUIET(flags, 0);
+
+ __db_err(dbenv,
+ "set_rpc_server method not permitted in non-RPC environment");
+ return (__db_eopnotsup(dbenv));
+}
diff --git a/libdb/env/env_open.c b/libdb/env/env_open.c
new file mode 100644
index 0000000..a1c4bbb
--- /dev/null
+++ b/libdb/env/env_open.c
@@ -0,0 +1,1195 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/mp.h"
+#include "dbinc/qam.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+#include "dbinc/fop.h"
+
+#ifdef _WIN32
+#define strcasecmp _stricmp
+#endif
+
+static int __db_parse __P((DB_ENV *, char *));
+static int __db_tmp_open __P((DB_ENV *, u_int32_t, char *, DB_FH *));
+static int __dbenv_config __P((DB_ENV *, const char *, u_int32_t));
+static int __dbenv_iremove __P((DB_ENV *, const char *, u_int32_t));
+static int __dbenv_refresh __P((DB_ENV *, u_int32_t));
+
+/*
+ * db_version --
+ * Return version information.
+ *
+ * EXTERN: char *db_version __P((int *, int *, int *));
+ */
+char *
+db_version(majverp, minverp, patchp)
+ int *majverp, *minverp, *patchp;
+{
+ if (majverp != NULL)
+ *majverp = DB_VERSION_MAJOR;
+ if (minverp != NULL)
+ *minverp = DB_VERSION_MINOR;
+ if (patchp != NULL)
+ *patchp = DB_VERSION_PATCH;
+ return ((char *)DB_VERSION_STRING);
+}
+
+/*
+ * __dbenv_open --
+ * Initialize an environment.
+ *
+ * PUBLIC: int __dbenv_open __P((DB_ENV *, const char *, u_int32_t, int));
+ */
+int
+__dbenv_open(dbenv, db_home, flags, mode)
+ DB_ENV *dbenv;
+ const char *db_home;
+ u_int32_t flags;
+ int mode;
+{
+ DB_MPOOL *dbmp;
+ int ret;
+ u_int32_t init_flags, orig_flags;
+
+ orig_flags = dbenv->flags;
+
+#undef OKFLAGS
+#define OKFLAGS \
+ DB_CREATE | DB_INIT_CDB | DB_INIT_LOCK | DB_INIT_LOG | \
+ DB_INIT_MPOOL | DB_INIT_TXN | DB_JOINENV | DB_LOCKDOWN | \
+ DB_PRIVATE | DB_RECOVER | DB_RECOVER_FATAL | DB_SYSTEM_MEM | \
+ DB_THREAD | DB_USE_ENVIRON | DB_USE_ENVIRON_ROOT
+#undef OKFLAGS_CDB
+#define OKFLAGS_CDB \
+ DB_CREATE | DB_INIT_CDB | DB_INIT_MPOOL | DB_LOCKDOWN | \
+ DB_PRIVATE | DB_SYSTEM_MEM | DB_THREAD | \
+ DB_USE_ENVIRON | DB_USE_ENVIRON_ROOT
+
+ /*
+ * Flags saved in the init_flags field of the environment, representing
+ * flags to DB_ENV->set_flags and DB_ENV->open that need to be set.
+ */
+#define DB_INITENV_CDB 0x0001 /* DB_INIT_CDB */
+#define DB_INITENV_CDB_ALLDB 0x0002 /* DB_INIT_CDB_ALLDB */
+#define DB_INITENV_LOCK 0x0004 /* DB_INIT_LOCK */
+#define DB_INITENV_LOG 0x0008 /* DB_INIT_LOG */
+#define DB_INITENV_MPOOL 0x0010 /* DB_INIT_MPOOL */
+#define DB_INITENV_TXN 0x0020 /* DB_INIT_TXN */
+
+ if ((ret = __db_fchk(dbenv, "DB_ENV->open", flags, OKFLAGS)) != 0)
+ return (ret);
+ if (LF_ISSET(DB_INIT_CDB) &&
+ (ret = __db_fchk(dbenv, "DB_ENV->open", flags, OKFLAGS_CDB)) != 0)
+ return (ret);
+ if ((ret = __db_fcchk(dbenv,
+ "DB_ENV->open", flags, DB_PRIVATE, DB_SYSTEM_MEM)) != 0)
+ return (ret);
+ if ((ret = __db_fcchk(dbenv,
+ "DB_ENV->open", flags, DB_RECOVER, DB_RECOVER_FATAL)) != 0)
+ return (ret);
+ if ((ret = __db_fcchk(dbenv, "DB_ENV->open", flags, DB_JOINENV,
+ DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL |
+ DB_INIT_TXN | DB_PRIVATE)) != 0)
+ return (ret);
+
+ /*
+ * Currently we support one kind of mutex that is intra-process only,
+ * POSIX 1003.1 pthreads, because a variety of systems don't support
+ * the full pthreads API, and our only alternative is test-and-set.
+ */
+#ifdef HAVE_MUTEX_THREAD_ONLY
+ if (!LF_ISSET(DB_PRIVATE)) {
+ __db_err(dbenv,
+ "Berkeley DB library configured to support only DB_PRIVATE environments");
+ return (EINVAL);
+ }
+#endif
+
+ /*
+ * If we're doing recovery, destroy the environment so that we create
+ * all the regions from scratch. I'd like to reuse already created
+ * regions, but that's hard. We would have to create the environment
+ * region from scratch, at least, as we have no way of knowing if its
+ * linked lists are corrupted.
+ *
+ * I suppose we could set flags while modifying those links, but that
+ * is going to be difficult to get right. The major concern I have
+ * is if the application stomps the environment with a rogue pointer.
+ * We have no way of detecting that, and we could be forced into a
+ * situation where we start up and then crash, repeatedly.
+ *
+ * Note that we do not check any flags like DB_PRIVATE before calling
+ * remove. We don't care if the current environment was private or
+ * not, we just want to nail any files that are left-over for whatever
+ * reason, from whatever session.
+ */
+ if (LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL))
+ if ((ret = __dbenv_iremove(dbenv, db_home, DB_FORCE)) != 0 ||
+ (ret = __dbenv_refresh(dbenv, orig_flags)) != 0)
+ return (ret);
+
+ /* Initialize the DB_ENV structure. */
+ if ((ret = __dbenv_config(dbenv, db_home, flags)) != 0)
+ goto err;
+
+ /* Convert the DB_ENV->open flags to internal flags. */
+ if (LF_ISSET(DB_CREATE))
+ F_SET(dbenv, DB_ENV_CREATE);
+ if (LF_ISSET(DB_LOCKDOWN))
+ F_SET(dbenv, DB_ENV_LOCKDOWN);
+ if (LF_ISSET(DB_PRIVATE))
+ F_SET(dbenv, DB_ENV_PRIVATE);
+ if (LF_ISSET(DB_RECOVER_FATAL))
+ F_SET(dbenv, DB_ENV_FATAL);
+ if (LF_ISSET(DB_SYSTEM_MEM))
+ F_SET(dbenv, DB_ENV_SYSTEM_MEM);
+ if (LF_ISSET(DB_THREAD))
+ F_SET(dbenv, DB_ENV_THREAD);
+
+ /* Default permissions are read-write for both owner and group. */
+ dbenv->db_mode = mode == 0 ? __db_omode("rwrw--") : mode;
+
+ /*
+ * Create/join the environment. We pass in the flags that
+ * will be of interest to an environment joining later; if
+ * we're not the ones to do the create, we
+ * pull out whatever has been stored, if we don't do a create.
+ */
+ init_flags = 0;
+ init_flags |= (LF_ISSET(DB_INIT_CDB) ? DB_INITENV_CDB : 0);
+ init_flags |= (LF_ISSET(DB_INIT_LOCK) ? DB_INITENV_LOCK : 0);
+ init_flags |= (LF_ISSET(DB_INIT_LOG) ? DB_INITENV_LOG : 0);
+ init_flags |= (LF_ISSET(DB_INIT_MPOOL) ? DB_INITENV_MPOOL : 0);
+ init_flags |= (LF_ISSET(DB_INIT_TXN) ? DB_INITENV_TXN : 0);
+ init_flags |=
+ (F_ISSET(dbenv, DB_ENV_CDB_ALLDB) ? DB_INITENV_CDB_ALLDB : 0);
+
+ if ((ret = __db_e_attach(dbenv, &init_flags)) != 0)
+ goto err;
+
+ /*
+ * __db_e_attach will return the saved init_flags field, which
+ * contains the DB_INIT_* flags used when we were created.
+ */
+ if (LF_ISSET(DB_JOINENV)) {
+ LF_CLR(DB_JOINENV);
+
+ LF_SET((init_flags & DB_INITENV_CDB) ? DB_INIT_CDB : 0);
+ LF_SET((init_flags & DB_INITENV_LOCK) ? DB_INIT_LOCK : 0);
+ LF_SET((init_flags & DB_INITENV_LOG) ? DB_INIT_LOG : 0);
+ LF_SET((init_flags & DB_INITENV_MPOOL) ? DB_INIT_MPOOL : 0);
+ LF_SET((init_flags & DB_INITENV_TXN) ? DB_INIT_TXN : 0);
+
+ if (LF_ISSET(DB_INITENV_CDB_ALLDB) &&
+ (ret = dbenv->set_flags(dbenv, DB_CDB_ALLDB, 1)) != 0)
+ goto err;
+ }
+
+ /* Initialize for CDB product. */
+ if (LF_ISSET(DB_INIT_CDB)) {
+ LF_SET(DB_INIT_LOCK);
+ F_SET(dbenv, DB_ENV_CDB);
+ }
+
+ /*
+ * Initialize the subsystems. Transactions imply logging but do not
+ * imply locking. While almost all applications want both locking
+ * and logging, it would not be unreasonable for a single threaded
+ * process to want transactions for atomicity guarantees, but not
+ * necessarily need concurrency.
+ */
+
+ if (LF_ISSET(DB_INIT_MPOOL))
+ if ((ret = __memp_open(dbenv)) != 0)
+ goto err;
+
+#ifdef HAVE_CRYPTO
+ /*
+ * Initialize the ciphering area prior to any running of recovery so
+ * that we can initialize the keys, etc. before recovery.
+ *
+ * !!!
+ * This must be after the mpool init, but before the log initialization
+ * because log_open may attempt to run log_recover during its open.
+ */
+ if ((ret = __crypto_region_init(dbenv)) != 0)
+ goto err;
+#endif
+
+ if (LF_ISSET(DB_INIT_LOG | DB_INIT_TXN))
+ if ((ret = __log_open(dbenv)) != 0)
+ goto err;
+ if (LF_ISSET(DB_INIT_LOCK))
+ if ((ret = __lock_open(dbenv)) != 0)
+ goto err;
+ if (LF_ISSET(DB_INIT_TXN)) {
+ if ((ret = __txn_open(dbenv)) != 0)
+ goto err;
+
+ /*
+ * If the application is running with transactions, initialize
+ * the function tables.
+ */
+ if ((ret = __bam_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
+ goto err;
+ if ((ret = __crdel_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
+ goto err;
+ if ((ret = __db_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
+ goto err;
+ if ((ret = __dbreg_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
+ goto err;
+ if ((ret = __fop_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
+ goto err;
+ if ((ret = __ham_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
+ goto err;
+ if ((ret = __qam_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
+ goto err;
+ if ((ret = __txn_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
+ goto err;
+
+ /* Perform recovery for any previous run. */
+ if (LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL) &&
+ (ret = __db_apprec(dbenv, NULL,
+ LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL))) != 0)
+ goto err;
+ }
+
+ /* Initialize the replication area just in case. */
+ if ((ret = __rep_region_init(dbenv)) != 0)
+ goto err;
+
+ /*
+ * Initialize the DB list, and its mutex as necessary. If the env
+ * handle isn't free-threaded we don't need a mutex because there
+ * will never be more than a single DB handle on the list. If the
+ * mpool wasn't initialized, then we can't ever open a DB handle.
+ *
+ * We also need to initialize the MT mutex as necessary, so do them
+ * both. If we error, __dbenv_refresh() will clean up.
+ *
+ * !!!
+ * This must come after the __memp_open call above because if we are
+ * recording mutexes for system resources, we will do it in the mpool
+ * region for environments and db handles. So, the mpool region must
+ * already be initialized.
+ */
+ LIST_INIT(&dbenv->dblist);
+ if (F_ISSET(dbenv, DB_ENV_THREAD) && LF_ISSET(DB_INIT_MPOOL)) {
+ dbmp = dbenv->mp_handle;
+ if ((ret = __db_mutex_setup(
+ dbenv, dbmp->reginfo, &dbenv->dblist_mutexp,
+ MUTEX_ALLOC | MUTEX_THREAD)) != 0)
+ goto err;
+ if ((ret = __db_mutex_setup(
+ dbenv, dbmp->reginfo, &dbenv->mt_mutexp,
+ MUTEX_ALLOC | MUTEX_THREAD)) != 0)
+ goto err;
+ }
+
+ /*
+ * If we've created the regions, are running with transactions, and did
+ * not just run recovery, we need to log the fact that the transaction
+ * IDs got reset.
+ *
+ * If we ran recovery, there may be prepared-but-not-yet-committed
+ * transactions that need to be resolved. Recovery resets the minimum
+ * transaction ID and logs the reset if that's appropriate, so we
+ * don't need to do anything here in the recover case.
+ */
+ if (TXN_ON(dbenv) &&
+ F_ISSET((REGINFO *)dbenv->reginfo, REGION_CREATE) &&
+ !LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL) &&
+ (ret = __txn_reset(dbenv)) != 0)
+ goto err;
+
+ return (0);
+
+err: /* If we fail after creating the regions, remove them. */
+ if (dbenv->reginfo != NULL &&
+ F_ISSET((REGINFO *)dbenv->reginfo, REGION_CREATE)) {
+ ret = __db_panic(dbenv, ret);
+
+ (void)__dbenv_refresh(dbenv, orig_flags);
+ (void)__dbenv_iremove(dbenv, db_home, DB_FORCE);
+ }
+ (void)__dbenv_refresh(dbenv, orig_flags);
+
+ return (ret);
+}
+
+/*
+ * __dbenv_remove --
+ * Discard an environment.
+ *
+ * PUBLIC: int __dbenv_remove __P((DB_ENV *, const char *, u_int32_t));
+ */
+int
+__dbenv_remove(dbenv, db_home, flags)
+ DB_ENV *dbenv;
+ const char *db_home;
+ u_int32_t flags;
+{
+ int ret, t_ret;
+
+ ret = __dbenv_iremove(dbenv, db_home, flags);
+
+ if ((t_ret = dbenv->close(dbenv, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __dbenv_iremove --
+ * Discard an environment, internal version.
+ */
+static int
+__dbenv_iremove(dbenv, db_home, flags)
+ DB_ENV *dbenv;
+ const char *db_home;
+ u_int32_t flags;
+{
+ int ret;
+
+#undef OKFLAGS
+#define OKFLAGS \
+ DB_FORCE | DB_USE_ENVIRON | DB_USE_ENVIRON_ROOT
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB_ENV->remove", flags, OKFLAGS)) != 0)
+ return (ret);
+
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "DB_ENV->remove");
+
+ /* Initialize the DB_ENV structure. */
+ if ((ret = __dbenv_config(dbenv, db_home, flags)) != 0)
+ return (ret);
+
+ /* Remove the environment. */
+ return (__db_e_remove(dbenv, flags));
+}
+
+/*
+ * __dbenv_config --
+ * Initialize the DB_ENV structure.
+ */
+static int
+__dbenv_config(dbenv, db_home, flags)
+ DB_ENV *dbenv;
+ const char *db_home;
+ u_int32_t flags;
+{
+ FILE *fp;
+ int ret;
+ char *p, buf[256];
+
+ /*
+ * Set the database home. Do this before calling __db_appname,
+ * it uses the home directory.
+ */
+ if ((ret = __db_home(dbenv, db_home, flags)) != 0)
+ return (ret);
+
+ /* Parse the config file. */
+ if ((ret =
+ __db_appname(dbenv, DB_APP_NONE, "DB_CONFIG", 0, NULL, &p)) != 0)
+ return (ret);
+
+ fp = fopen(p, "r");
+ __os_free(dbenv, p);
+
+ if (fp != NULL) {
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if ((p = strchr(buf, '\n')) != NULL)
+ *p = '\0';
+ else if (strlen(buf) + 1 == sizeof(buf)) {
+ __db_err(dbenv, "DB_CONFIG: line too long");
+ (void)fclose(fp);
+ return (EINVAL);
+ }
+ if (buf[0] == '\0' ||
+ buf[0] == '#' || isspace((int)buf[0]))
+ continue;
+
+ if ((ret = __db_parse(dbenv, buf)) != 0) {
+ (void)fclose(fp);
+ return (ret);
+ }
+ }
+ (void)fclose(fp);
+ }
+
+ /*
+ * If no temporary directory path was specified in the config file,
+ * choose one.
+ */
+ if (dbenv->db_tmp_dir == NULL && (ret = __os_tmpdir(dbenv, flags)) != 0)
+ return (ret);
+
+ /*
+ * The locking file descriptor is rarely on. Set the fd to -1, not
+ * because it's ever tested, but to make sure we catch mistakes.
+ */
+ if ((ret = __os_calloc(
+ dbenv, 1, sizeof(*dbenv->lockfhp), &dbenv->lockfhp)) != 0)
+ return (ret);
+ dbenv->lockfhp->fd = -1;
+
+ /* Flag that the DB_ENV structure has been initialized. */
+ F_SET(dbenv, DB_ENV_OPEN_CALLED);
+
+ return (0);
+}
+
+/*
+ * __dbenv_close --
+ * DB_ENV destructor.
+ *
+ * PUBLIC: int __dbenv_close __P((DB_ENV *, u_int32_t));
+ */
+int
+__dbenv_close(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ char **p;
+ int ret, t_ret;
+
+ COMPQUIET(flags, 0);
+
+ PANIC_CHECK(dbenv);
+ ret = 0;
+
+ /*
+ * Before checking the reference count, we have to see if we
+ * were in the middle of restoring transactions and need to
+ * close the open files.
+ */
+ if (TXN_ON(dbenv) && (t_ret = __txn_preclose(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (dbenv->rep_handle != NULL &&
+ (t_ret = __rep_preclose(dbenv, 1)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (dbenv->db_ref != 0) {
+ __db_err(dbenv,
+ "Database handles open during environment close");
+ if (ret == 0)
+ ret = EINVAL;
+ }
+
+ /*
+ * Detach from the regions and undo the allocations done by
+ * DB_ENV->open.
+ */
+ if ((t_ret = __dbenv_refresh(dbenv, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Do per-subsystem destruction. */
+ __lock_dbenv_close(dbenv); /* void */
+ if ((t_ret = __rep_dbenv_close(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+
+#ifdef HAVE_CRYPTO
+ if ((t_ret = __crypto_dbenv_close(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+#endif
+
+ /* Release any string-based configuration parameters we've copied. */
+ if (dbenv->db_log_dir != NULL)
+ __os_free(dbenv, dbenv->db_log_dir);
+ if (dbenv->db_tmp_dir != NULL)
+ __os_free(dbenv, dbenv->db_tmp_dir);
+ if (dbenv->db_data_dir != NULL) {
+ for (p = dbenv->db_data_dir; *p != NULL; ++p)
+ __os_free(dbenv, *p);
+ __os_free(dbenv, dbenv->db_data_dir);
+ }
+
+ /* Discard the structure. */
+ memset(dbenv, CLEAR_BYTE, sizeof(DB_ENV));
+ __os_free(NULL, dbenv);
+
+ return (ret);
+}
+
+/*
+ * __dbenv_refresh --
+ * Refresh the DB_ENV structure, releasing resources allocated by
+ * DB_ENV->open, and returning it to the state it was in just before
+ * open was called. (Note that this means that any state set by
+ * pre-open configuration functions must be preserved.)
+ */
+static int
+__dbenv_refresh(dbenv, orig_flags)
+ DB_ENV *dbenv;
+ u_int32_t orig_flags;
+{
+ DB_MPOOL *dbmp;
+ int ret, t_ret;
+
+ ret = 0;
+
+ /*
+ * Close subsystems, in the reverse order they were opened (txn
+ * must be first, it may want to discard locks and flush the log).
+ *
+ * !!!
+ * Note that these functions, like all of __dbenv_refresh, only undo
+ * the effects of __dbenv_open. Functions that undo work done by
+ * db_env_create or by a configurator function should go in
+ * __dbenv_close.
+ */
+ if (TXN_ON(dbenv) &&
+ (t_ret = __txn_dbenv_refresh(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (LOGGING_ON(dbenv) &&
+ (t_ret = __log_dbenv_refresh(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * Locking should come after logging, because closing log results
+ * in files closing which may require locks being released.
+ */
+ if (LOCKING_ON(dbenv) &&
+ (t_ret = __lock_dbenv_refresh(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * Discard DB list and its mutex.
+ * Discard the MT mutex.
+ *
+ * !!!
+ * This must be done before we close the mpool region because we
+ * may have allocated the DB handle mutex in the mpool region.
+ * It must be done *after* we close the log region, though, because
+ * we close databases and try to acquire the mutex when we close
+ * log file handles. Ick.
+ */
+ LIST_INIT(&dbenv->dblist);
+ if (dbenv->dblist_mutexp != NULL) {
+ dbmp = dbenv->mp_handle;
+ __db_mutex_free(dbenv, dbmp->reginfo, dbenv->dblist_mutexp);
+ }
+ if (dbenv->mt_mutexp != NULL) {
+ dbmp = dbenv->mp_handle;
+ __db_mutex_free(dbenv, dbmp->reginfo, dbenv->mt_mutexp);
+ }
+ if (dbenv->mt != NULL) {
+ __os_free(dbenv, dbenv->mt);
+ dbenv->mt = NULL;
+ }
+
+ if (MPOOL_ON(dbenv)) {
+ /*
+ * If it's a private environment, flush the contents to disk.
+ * Recovery would have put everything back together, but it's
+ * faster and cleaner to flush instead.
+ */
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE) &&
+ (t_ret = dbenv->memp_sync(dbenv, NULL)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = __memp_dbenv_refresh(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ /* Detach from the region. */
+ if (dbenv->reginfo != NULL) {
+ if ((t_ret = __db_e_detach(dbenv, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ /*
+ * !!!
+ * Don't free dbenv->reginfo or set the reference to NULL,
+ * that was done by __db_e_detach().
+ */
+ }
+
+ /* Undo changes and allocations done by __dbenv_open. */
+ if (dbenv->db_home != NULL) {
+ __os_free(dbenv, dbenv->db_home);
+ dbenv->db_home = NULL;
+ }
+
+ dbenv->db_mode = 0;
+
+ if (dbenv->lockfhp != NULL) {
+ __os_free(dbenv, dbenv->lockfhp);
+ dbenv->lockfhp = NULL;
+ }
+
+ if (dbenv->recover_dtab != NULL) {
+ __os_free(dbenv, dbenv->recover_dtab);
+ dbenv->recover_dtab = NULL;
+ dbenv->recover_dtab_size = 0;
+ }
+
+ dbenv->flags = orig_flags;
+
+ return (ret);
+}
+
+#define DB_ADDSTR(add) { \
+ if ((add) != NULL) { \
+ /* If leading slash, start over. */ \
+ if (__os_abspath(add)) { \
+ p = str; \
+ slash = 0; \
+ } \
+ /* Append to the current string. */ \
+ len = strlen(add); \
+ if (slash) \
+ *p++ = PATH_SEPARATOR[0]; \
+ memcpy(p, add, len); \
+ p += len; \
+ slash = strchr(PATH_SEPARATOR, p[-1]) == NULL; \
+ } \
+}
+
+/*
+ * __db_appname --
+ * Given an optional DB environment, directory and file name and type
+ * of call, build a path based on the DB_ENV->open rules, and return
+ * it in allocated space.
+ *
+ * PUBLIC: int __db_appname __P((DB_ENV *, APPNAME,
+ * PUBLIC: const char *, u_int32_t, DB_FH *, char **));
+ */
+int
+__db_appname(dbenv, appname, file, tmp_oflags, fhp, namep)
+ DB_ENV *dbenv;
+ APPNAME appname;
+ const char *file;
+ u_int32_t tmp_oflags;
+ DB_FH *fhp;
+ char **namep;
+{
+ size_t len, str_len;
+ int data_entry, ret, slash, tmp_create;
+ const char *a, *b;
+ char *p, *str;
+
+ a = b = NULL;
+ data_entry = -1;
+ tmp_create = 0;
+
+ /*
+ * We don't return a name when creating temporary files, just a file
+ * handle. Default to an error now.
+ */
+ if (fhp != NULL)
+ F_CLR(fhp, DB_FH_VALID);
+ if (namep != NULL)
+ *namep = NULL;
+
+ /*
+ * Absolute path names are never modified. If the file is an absolute
+ * path, we're done.
+ */
+ if (file != NULL && __os_abspath(file))
+ return (__os_strdup(dbenv, file, namep));
+
+ /* Everything else is relative to the environment home. */
+ if (dbenv != NULL)
+ a = dbenv->db_home;
+
+retry: /*
+ * DB_APP_NONE:
+ * DB_HOME/file
+ * DB_APP_DATA:
+ * DB_HOME/DB_DATA_DIR/file
+ * DB_APP_LOG:
+ * DB_HOME/DB_LOG_DIR/file
+ * DB_APP_TMP:
+ * DB_HOME/DB_TMP_DIR/<create>
+ */
+ switch (appname) {
+ case DB_APP_NONE:
+ break;
+ case DB_APP_DATA:
+ if (dbenv != NULL && dbenv->db_data_dir != NULL &&
+ (b = dbenv->db_data_dir[++data_entry]) == NULL) {
+ data_entry = -1;
+ b = dbenv->db_data_dir[0];
+ }
+ break;
+ case DB_APP_LOG:
+ if (dbenv != NULL)
+ b = dbenv->db_log_dir;
+ break;
+ case DB_APP_TMP:
+ if (dbenv != NULL)
+ b = dbenv->db_tmp_dir;
+ tmp_create = 1;
+ break;
+ }
+
+ len =
+ (a == NULL ? 0 : strlen(a) + 1) +
+ (b == NULL ? 0 : strlen(b) + 1) +
+ (file == NULL ? 0 : strlen(file) + 1);
+
+ /*
+ * Allocate space to hold the current path information, as well as any
+ * temporary space that we're going to need to create a temporary file
+ * name.
+ */
+#define DB_TRAIL "BDBXXXXXX"
+ str_len = len + sizeof(DB_TRAIL) + 10;
+ if ((ret = __os_malloc(dbenv, str_len, &str)) != 0)
+ return (ret);
+
+ slash = 0;
+ p = str;
+ DB_ADDSTR(a);
+ DB_ADDSTR(b);
+ DB_ADDSTR(file);
+ *p = '\0';
+
+ /*
+ * If we're opening a data file, see if it exists. If it does,
+ * return it, otherwise, try and find another one to open.
+ */
+ if (__os_exists(str, NULL) != 0 && data_entry != -1) {
+ __os_free(dbenv, str);
+ b = NULL;
+ goto retry;
+ }
+
+ /* Create the file if so requested. */
+ if (tmp_create &&
+ (ret = __db_tmp_open(dbenv, tmp_oflags, str, fhp)) != 0) {
+ __os_free(dbenv, str);
+ return (ret);
+ }
+
+ if (namep == NULL)
+ __os_free(dbenv, str);
+ else
+ *namep = str;
+ return (0);
+}
+
+/*
+ * __db_home --
+ * Find the database home.
+ *
+ * PUBLIC: int __db_home __P((DB_ENV *, const char *, u_int32_t));
+ */
+int
+__db_home(dbenv, db_home, flags)
+ DB_ENV *dbenv;
+ const char *db_home;
+ u_int32_t flags;
+{
+ const char *p;
+
+ /*
+ * Use db_home by default, this allows utilities to reasonably
+ * override the environment either explicitly or by using a -h
+ * option. Otherwise, use the environment if it's permitted
+ * and initialized.
+ */
+ if ((p = db_home) == NULL &&
+ (LF_ISSET(DB_USE_ENVIRON) ||
+ (LF_ISSET(DB_USE_ENVIRON_ROOT) && __os_isroot())) &&
+ (p = getenv("DB_HOME")) != NULL && p[0] == '\0') {
+ __db_err(dbenv, "illegal DB_HOME environment variable");
+ return (EINVAL);
+ }
+
+ return (p == NULL ? 0 : __os_strdup(dbenv, p, &dbenv->db_home));
+}
+
+#define __DB_OVFL(v, max) \
+ if (v > max) { \
+ __v = v; \
+ __max = max; \
+ goto toobig; \
+ }
+
+/*
+ * __db_parse --
+ * Parse a single NAME VALUE pair.
+ */
+static int
+__db_parse(dbenv, s)
+ DB_ENV *dbenv;
+ char *s;
+{
+ u_long __max, __v, v1, v2, v3;
+ u_int32_t flags;
+ char *name, *p, *value, v4;
+
+ /*
+ * !!!
+ * The value of 40 is hard-coded into format arguments to sscanf
+ * below, it can't be changed here without changing it there, too.
+ */
+ char arg[40];
+
+ /*
+ * Name/value pairs are parsed as two white-space separated strings.
+ * Leading and trailing white-space is trimmed from the value, but
+ * it may contain embedded white-space. Note: we use the isspace(3)
+ * macro because it's more portable, but that means that you can use
+ * characters like form-feed to separate the strings.
+ */
+ name = s;
+ for (p = name; *p != '\0' && !isspace((int)*p); ++p)
+ ;
+ if (*p == '\0' || p == name)
+ goto illegal;
+ *p = '\0';
+ for (++p; isspace((int)*p); ++p)
+ ;
+ if (*p == '\0')
+ goto illegal;
+ value = p;
+ for (++p; *p != '\0'; ++p)
+ ;
+ for (--p; isspace((int)*p); --p)
+ ;
+ ++p;
+ if (p == value) {
+illegal: __db_err(dbenv, "mis-formatted name-value pair: %s", s);
+ return (EINVAL);
+ }
+ *p = '\0';
+
+ if (!strcasecmp(name, "set_cachesize")) {
+ if (sscanf(value, "%lu %lu %lu %c", &v1, &v2, &v3, &v4) != 3)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ __DB_OVFL(v2, UINT32_T_MAX);
+ __DB_OVFL(v3, 10000);
+ return (dbenv->set_cachesize(
+ dbenv, (u_int32_t)v1, (u_int32_t)v2, (int)v3));
+ }
+
+ if (!strcasecmp(name, "set_data_dir") ||
+ !strcasecmp(name, "db_data_dir")) /* Compatibility. */
+ return (dbenv->set_data_dir(dbenv, value));
+
+ if (!strcasecmp(name, "set_flags")) {
+ if (sscanf(value, "%40s %c", arg, &v4) != 1)
+ goto badarg;
+
+ if (!strcasecmp(value, "db_cdb_alldb"))
+ return (dbenv->set_flags(dbenv, DB_CDB_ALLDB, 1));
+ if (!strcasecmp(value, "db_direct_db"))
+ return (dbenv->set_flags(dbenv, DB_DIRECT_DB, 1));
+ if (!strcasecmp(value, "db_direct_log"))
+ return (dbenv->set_flags(dbenv, DB_DIRECT_LOG, 1));
+ if (!strcasecmp(value, "db_nolocking"))
+ return (dbenv->set_flags(dbenv, DB_NOLOCKING, 1));
+ if (!strcasecmp(value, "db_nommap"))
+ return (dbenv->set_flags(dbenv, DB_NOMMAP, 1));
+ if (!strcasecmp(value, "db_overwrite"))
+ return (dbenv->set_flags(dbenv, DB_OVERWRITE, 1));
+ if (!strcasecmp(value, "db_nopanic"))
+ return (dbenv->set_flags(dbenv, DB_NOPANIC, 1));
+ if (!strcasecmp(value, "db_region_init"))
+ return (dbenv->set_flags(dbenv, DB_REGION_INIT, 1));
+ if (!strcasecmp(value, "db_txn_nosync"))
+ return (dbenv->set_flags(dbenv, DB_TXN_NOSYNC, 1));
+ if (!strcasecmp(value, "db_txn_write_nosync"))
+ return (
+ dbenv->set_flags(dbenv, DB_TXN_WRITE_NOSYNC, 1));
+ if (!strcasecmp(value, "db_yieldcpu"))
+ return (dbenv->set_flags(dbenv, DB_YIELDCPU, 1));
+ goto badarg;
+ }
+
+ if (!strcasecmp(name, "set_lg_bsize")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_lg_bsize(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_lg_max")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_lg_max(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_lg_regionmax")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_lg_regionmax(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_lg_dir") ||
+ !strcasecmp(name, "db_log_dir")) /* Compatibility. */
+ return (dbenv->set_lg_dir(dbenv, value));
+
+ if (!strcasecmp(name, "set_lk_detect")) {
+ if (sscanf(value, "%40s %c", arg, &v4) != 1)
+ goto badarg;
+ if (!strcasecmp(value, "db_lock_default"))
+ flags = DB_LOCK_DEFAULT;
+ else if (!strcasecmp(value, "db_lock_expire"))
+ flags = DB_LOCK_EXPIRE;
+ else if (!strcasecmp(value, "db_lock_maxlocks"))
+ flags = DB_LOCK_MAXLOCKS;
+ else if (!strcasecmp(value, "db_lock_minlocks"))
+ flags = DB_LOCK_MINLOCKS;
+ else if (!strcasecmp(value, "db_lock_minwrite"))
+ flags = DB_LOCK_MINWRITE;
+ else if (!strcasecmp(value, "db_lock_oldest"))
+ flags = DB_LOCK_OLDEST;
+ else if (!strcasecmp(value, "db_lock_random"))
+ flags = DB_LOCK_RANDOM;
+ else if (!strcasecmp(value, "db_lock_youngest"))
+ flags = DB_LOCK_YOUNGEST;
+ else
+ goto badarg;
+ return (dbenv->set_lk_detect(dbenv, flags));
+ }
+
+ if (!strcasecmp(name, "set_lk_max")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_lk_max(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_lk_max_locks")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_lk_max_locks(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_lk_max_lockers")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_lk_max_lockers(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_lk_max_objects")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_lk_max_objects(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_lock_timeout")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_timeout(
+ dbenv, (u_int32_t)v1, DB_SET_LOCK_TIMEOUT));
+ }
+
+ if (!strcasecmp(name, "set_mp_mmapsize")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_mp_mmapsize(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_region_init")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1 || v1 != 1)
+ goto badarg;
+ return (dbenv->set_flags(
+ dbenv, DB_REGION_INIT, v1 == 0 ? 0 : 1));
+ }
+
+ if (!strcasecmp(name, "set_shm_key")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ return (dbenv->set_shm_key(dbenv, (long)v1));
+ }
+
+ if (!strcasecmp(name, "set_tas_spins")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_tas_spins(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_tmp_dir") ||
+ !strcasecmp(name, "db_tmp_dir")) /* Compatibility.*/
+ return (dbenv->set_tmp_dir(dbenv, value));
+
+ if (!strcasecmp(name, "set_tx_max")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_tx_max(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_txn_timeout")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_timeout(
+ dbenv, (u_int32_t)v1, DB_SET_TXN_TIMEOUT));
+ }
+
+ if (!strcasecmp(name, "set_verbose")) {
+ if (sscanf(value, "%40s %c", arg, &v4) != 1)
+ goto badarg;
+
+ if (!strcasecmp(value, "db_verb_chkpoint"))
+ flags = DB_VERB_CHKPOINT;
+ else if (!strcasecmp(value, "db_verb_deadlock"))
+ flags = DB_VERB_DEADLOCK;
+ else if (!strcasecmp(value, "db_verb_recovery"))
+ flags = DB_VERB_RECOVERY;
+ else if (!strcasecmp(value, "db_verb_waitsfor"))
+ flags = DB_VERB_WAITSFOR;
+ else
+ goto badarg;
+ return (dbenv->set_verbose(dbenv, flags, 1));
+ }
+
+ __db_err(dbenv, "unrecognized name-value pair: %s", s);
+ return (EINVAL);
+
+badarg: __db_err(dbenv, "incorrect arguments for name-value pair: %s", s);
+ return (EINVAL);
+
+toobig: __db_err(dbenv,
+ "%s: %lu larger than maximum value %lu", s, __v, __max);
+ return (EINVAL);
+}
+
+/*
+ * __db_tmp_open --
+ * Create a temporary file.
+ */
+static int
+__db_tmp_open(dbenv, tmp_oflags, path, fhp)
+ DB_ENV *dbenv;
+ u_int32_t tmp_oflags;
+ char *path;
+ DB_FH *fhp;
+{
+ u_int32_t id;
+ int mode, isdir, ret;
+ const char *p;
+ char *trv;
+
+ /*
+ * Check the target directory; if you have six X's and it doesn't
+ * exist, this runs for a *very* long time.
+ */
+ if ((ret = __os_exists(path, &isdir)) != 0) {
+ __db_err(dbenv, "%s: %s", path, db_strerror(ret));
+ return (ret);
+ }
+ if (!isdir) {
+ __db_err(dbenv, "%s: %s", path, db_strerror(EINVAL));
+ return (EINVAL);
+ }
+
+ /* Build the path. */
+ for (trv = path; *trv != '\0'; ++trv)
+ ;
+ *trv = PATH_SEPARATOR[0];
+ for (p = DB_TRAIL; (*++trv = *p) != '\0'; ++p)
+ ;
+
+ /* Replace the X's with the process ID. */
+ for (__os_id(&id); *--trv == 'X'; id /= 10)
+ switch (id % 10) {
+ case 0: *trv = '0'; break;
+ case 1: *trv = '1'; break;
+ case 2: *trv = '2'; break;
+ case 3: *trv = '3'; break;
+ case 4: *trv = '4'; break;
+ case 5: *trv = '5'; break;
+ case 6: *trv = '6'; break;
+ case 7: *trv = '7'; break;
+ case 8: *trv = '8'; break;
+ case 9: *trv = '9'; break;
+ }
+ ++trv;
+
+ /* Set up open flags and mode. */
+ mode = __db_omode("rw----");
+
+ /* Loop, trying to open a file. */
+ for (;;) {
+ if ((ret = __os_open(dbenv, path,
+ tmp_oflags | DB_OSO_CREATE | DB_OSO_EXCL | DB_OSO_TEMP,
+ mode, fhp)) == 0)
+ return (0);
+
+ /*
+ * !!!:
+ * If we don't get an EEXIST error, then there's something
+ * seriously wrong. Unfortunately, if the implementation
+ * doesn't return EEXIST for O_CREAT and O_EXCL regardless
+ * of other possible errors, we've lost.
+ */
+ if (ret != EEXIST) {
+ __db_err(dbenv,
+ "tmp_open: %s: %s", path, db_strerror(ret));
+ return (ret);
+ }
+
+ /*
+ * Tricky little algorithm for backward compatibility.
+ * Assumes sequential ordering of lower-case characters.
+ */
+ for (;;) {
+ if (*trv == '\0')
+ return (EINVAL);
+ if (*trv == 'z')
+ *trv++ = 'a';
+ else {
+ if (isdigit((int)*trv))
+ *trv = 'a';
+ else
+ ++*trv;
+ break;
+ }
+ }
+ }
+ /* NOTREACHED */
+}
diff --git a/libdb/env/env_recover.c b/libdb/env/env_recover.c
new file mode 100644
index 0000000..afe7353
--- /dev/null
+++ b/libdb/env/env_recover.c
@@ -0,0 +1,790 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id$";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+#include "dbinc/db_am.h"
+
+static int __log_backup __P((DB_ENV *, DB_LOGC *, DB_LSN *, DB_LSN *));
+static int __log_earliest __P((DB_ENV *, DB_LOGC *, int32_t *, DB_LSN *));
+static double __lsn_diff __P((DB_LSN *, DB_LSN *, DB_LSN *, u_int32_t, int));
+
+/*
+ * __db_apprec --
+ * Perform recovery. If max_lsn is non-NULL, then we are trying
+ * to synchronize this system up with another system that has a max
+ * LSN of max_lsn, so we need to roll back sufficiently far for that
+ * to work. See __log_backup for details.
+ *
+ * PUBLIC: int __db_apprec __P((DB_ENV *, DB_LSN *, u_int32_t));
+ */
+int
+__db_apprec(dbenv, max_lsn, flags)
+ DB_ENV *dbenv;
+ DB_LSN *max_lsn;
+ u_int32_t flags;
+{
+ DBT data;
+ DB_LOGC *logc;
+ DB_LSN ckp_lsn, first_lsn, last_lsn, lowlsn, lsn, stop_lsn;
+ DB_TXNREGION *region;
+ __txn_ckp_args *ckp_args;
+ time_t now, tlow;
+ int32_t log_size, low;
+ double nfiles;
+ int have_rec, is_thread, progress, ret, t_ret;
+ int (**dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t dtabsize;
+ u_int32_t hi_txn, lockid, txnid;
+ char *p, *pass, t1[60], t2[60];
+ void *txninfo;
+
+ COMPQUIET(nfiles, (double)0);
+
+ logc = NULL;
+ ckp_args = NULL;
+ dtab = NULL;
+ hi_txn = TXN_MAXIMUM;
+ lockid = DB_LOCK_INVALIDID;
+ txninfo = NULL;
+ pass = "initial";
+
+ /*
+ * XXX
+ * Get the log size. No locking required because we're single-threaded
+ * during recovery.
+ */
+ log_size =
+ ((LOG *)(((DB_LOG *)dbenv->lg_handle)->reginfo.primary))->log_size;
+
+ /*
+ * Save the state of the thread flag -- we don't need it on at the
+ * moment because we're single-threaded until recovery is complete.
+ */
+ is_thread = F_ISSET(dbenv, DB_ENV_THREAD) ? 1 : 0;
+ F_CLR(dbenv, DB_ENV_THREAD);
+
+ /* Set in-recovery flags. */
+ F_SET((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+ region = ((DB_TXNMGR *)dbenv->tx_handle)->reginfo.primary;
+ F_SET(region, TXN_IN_RECOVERY);
+
+ /* Allocate a cursor for the log. */
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+
+ /*
+ * If the user is specifying recovery to a particular point in time
+ * or to a particular LSN, find the point to start recovery from.
+ */
+ ZERO_LSN(lowlsn);
+ if (max_lsn != NULL) {
+ if ((ret = __log_backup(dbenv, logc, max_lsn, &lowlsn)) != 0)
+ goto err;
+ } else if (dbenv->tx_timestamp != 0) {
+ if ((ret = __log_earliest(dbenv, logc, &low, &lowlsn)) != 0)
+ goto err;
+ if ((int32_t)dbenv->tx_timestamp < low) {
+ (void)snprintf(t1, sizeof(t1),
+ "%s", ctime(&dbenv->tx_timestamp));
+ if ((p = strchr(t1, '\n')) != NULL)
+ *p = '\0';
+ tlow = (time_t)low;
+ (void)snprintf(t2, sizeof(t2), "%s", ctime(&tlow));
+ if ((p = strchr(t2, '\n')) != NULL)
+ *p = '\0';
+ __db_err(dbenv,
+ "Invalid recovery timestamp %s; earliest time is %s",
+ t1, t2);
+ ret = EINVAL;
+ goto err;
+ }
+ }
+
+ /*
+ * Recovery is done in three passes:
+ * Pass #0:
+ * We need to find the position from which we will open files.
+ * We need to open files beginning with the earlier of the
+ * most recent checkpoint LSN and a checkpoint LSN before the
+ * recovery timestamp, if specified. We need to be before the
+ * most recent checkpoint LSN because we are going to collect
+ * information about which transactions were begun before we
+ * start rolling forward. Those that were should never be undone
+ * because queue cannot use LSNs to determine what operations can
+ * safely be aborted and it cannot rollback operations in
+ * transactions for which there may be records not processed
+ * during recovery. We need to consider earlier points in time
+ * in case we are recovering to a particular timestamp.
+ *
+ * Pass #1:
+ * Read forward through the log from the position found in pass 0
+ * opening and closing files, and recording transactions for which
+ * we've seen their first record (the transaction's prev_lsn is
+ * 0,0). At the end of this pass, we know all transactions for
+ * which we've seen begins and we have the "current" set of files
+ * open.
+ *
+ * Pass #2:
+ * Read backward through the log undoing any uncompleted TXNs.
+ * There are four cases:
+ * 1. If doing catastrophic recovery, we read to the
+ * beginning of the log
+ * 2. If we are doing normal reovery, then we have to roll
+ * back to the most recent checkpoint LSN.
+ * 3. If we are recovering to a point in time, then we have
+ * to roll back to the checkpoint whose ckp_lsn is earlier
+ * than the specified time. __log_earliest will figure
+ * this out for us.
+ * 4. If we are recovering back to a particular LSN, then
+ * we have to roll back to the checkpoint whose ckp_lsn
+ * is earlier than the max_lsn. __log_backup will figure
+ * that out for us.
+ * In case 2, "uncompleted TXNs" include all those who commited
+ * after the user's specified timestamp.
+ *
+ * Pass #3:
+ * Read forward through the log from the LSN found in pass #2,
+ * redoing any committed TXNs (which commited after any user-
+ * specified rollback point). During this pass, checkpoint
+ * file information is ignored, and file openings and closings
+ * are redone.
+ *
+ * ckp_lsn -- lsn of the last checkpoint or the first in the log.
+ * first_lsn -- the lsn where the forward passes begin.
+ * last_lsn -- the last lsn in the log, used for feedback
+ * lowlsn -- the lsn we are rolling back to, if we are recovering
+ * to a point in time.
+ * lsn -- temporary use lsn.
+ * stop_lsn -- the point at which forward roll should stop
+ */
+
+ /*
+ * Find out the last lsn, so that we can estimate how far along we
+ * are in recovery. This will help us determine how much log there
+ * is between the first LSN that we're going to be working with and
+ * the last one. We assume that each of the three phases takes the
+ * same amount of time (a false assumption) and then use the %-age
+ * of the amount of log traversed to figure out how much of the
+ * pass we've accomplished.
+ *
+ * If we can't find any log records, we're kind of done.
+ */
+#ifdef UMRW
+ ZERO_LSN(last_lsn);
+#endif
+ memset(&data, 0, sizeof(data));
+ if ((ret = logc->get(logc, &last_lsn, &data, DB_LAST)) != 0) {
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ else
+ __db_err(dbenv, "Last log record not found");
+ goto err;
+ }
+
+ do {
+ /* txnid is after rectype, which is a u_int32. */
+ memcpy(&txnid,
+ (u_int8_t *)data.data + sizeof(u_int32_t), sizeof(txnid));
+
+ if (txnid != 0)
+ break;
+ } while ((ret = logc->get(logc, &lsn, &data, DB_PREV)) == 0);
+
+ /*
+ * There are no transactions, so there is nothing to do unless
+ * we're recovering to an LSN. If we are, we need to proceed since
+ * we'll still need to do a vtruncate based on information we haven't
+ * yet collected.
+ */
+ if (ret == DB_NOTFOUND) {
+ ret = 0;
+ if (max_lsn == NULL)
+ goto done;
+ }
+ if (ret != 0)
+ goto err;
+
+ hi_txn = txnid;
+
+ /*
+ * Pass #0
+ * Find the LSN from which we begin OPENFILES.
+ *
+ * If this is a catastrophic recovery, or if no checkpoint exists
+ * in the log, the LSN is the first LSN in the log.
+ *
+ * Otherwise, it is the minimum of (1) the LSN in the last checkpoint
+ * and (2) the LSN in the checkpoint before any specified recovery
+ * timestamp or max_lsn.
+ */
+ /*
+ * Get the first LSN in the log; it's an initial default
+ * even if this is not a catastrophic recovery.
+ */
+ if ((ret = logc->get(logc, &ckp_lsn, &data, DB_FIRST)) != 0) {
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ else
+ __db_err(dbenv, "First log record not found");
+ goto err;
+ }
+ first_lsn = ckp_lsn;
+ have_rec = 1;
+
+ if (!LF_ISSET(DB_RECOVER_FATAL)) {
+ if ((ret = __txn_getckp(dbenv, &ckp_lsn)) == 0 &&
+ (ret = logc->get(logc, &ckp_lsn, &data, DB_SET)) == 0) {
+ /* We have a recent checkpoint. This is LSN (1). */
+ if ((ret = __txn_ckp_read(dbenv,
+ data.data, &ckp_args)) != 0) {
+ __db_err(dbenv,
+ "Invalid checkpoint record at [%ld][%ld]",
+ (u_long)ckp_lsn.file,
+ (u_long)ckp_lsn.offset);
+ goto err;
+ }
+ first_lsn = ckp_args->ckp_lsn;
+ have_rec = 0;
+ }
+
+ /*
+ * If LSN (2) exists, use it if it's before LSN (1).
+ * (If LSN (1) doesn't exist, first_lsn is the
+ * beginning of the log, so will "win" this check.)
+ *
+ * XXX
+ * In the recovery-to-a-timestamp case, lowlsn is chosen by
+ * __log_earliest, and is the checkpoint LSN of the
+ * *earliest* checkpoint in the unreclaimed log. I
+ * (krinsky) believe that we could optimize this by looking
+ * instead for the LSN of the *latest* checkpoint before
+ * the timestamp of interest, but I'm not sure that this
+ * is worth doing right now. (We have to look for lowlsn
+ * and low anyway, to make sure the requested timestamp is
+ * somewhere in the logs we have, and all that's required
+ * is that we pick *some* checkpoint after the beginning of
+ * the logs and before the timestamp.
+ */
+ if ((dbenv->tx_timestamp != 0 || max_lsn != NULL) &&
+ log_compare(&lowlsn, &first_lsn) < 0) {
+ DB_ASSERT(have_rec == 0);
+ first_lsn = lowlsn;
+ }
+ }
+
+ /* Get the record at first_lsn if we don't have it already. */
+ if (!have_rec &&
+ (ret = logc->get(logc, &first_lsn, &data, DB_SET)) != 0) {
+ __db_err(dbenv, "Checkpoint LSN record [%ld][%ld] not found",
+ (u_long)first_lsn.file, (u_long)first_lsn.offset);
+ goto err;
+ }
+
+ if (dbenv->db_feedback != NULL) {
+ if (last_lsn.file == first_lsn.file)
+ nfiles = (double)
+ (last_lsn.offset - first_lsn.offset) / log_size;
+ else
+ nfiles = (double)(last_lsn.file - first_lsn.file) +
+ (double)(log_size - first_lsn.offset +
+ last_lsn.offset) / log_size;
+ /* We are going to divide by nfiles; make sure it isn't 0. */
+ if (nfiles == 0)
+ nfiles = (double)0.001;
+ }
+
+ /* Find a low txnid. */
+ ret = 0;
+ do {
+ /* txnid is after rectype, which is a u_int32. */
+ memcpy(&txnid,
+ (u_int8_t *)data.data + sizeof(u_int32_t), sizeof(txnid));
+
+ if (txnid != 0)
+ break;
+ } while ((ret = logc->get(logc, &lsn, &data, DB_NEXT)) == 0);
+
+ /*
+ * There are no transactions and we're not recovering to an LSN (see
+ * above), so there is nothing to do.
+ */
+ if (ret == DB_NOTFOUND) {
+ ret = 0;
+ if (max_lsn == NULL)
+ goto done;
+ }
+
+ /* Reset to the first lsn. */
+ if (ret != 0 || (ret = logc->get(logc, &first_lsn, &data, DB_SET)) != 0)
+ goto err;
+
+ /* Initialize the transaction list. */
+ if ((ret =
+ __db_txnlist_init(dbenv, txnid, hi_txn, max_lsn, &txninfo)) != 0)
+ goto err;
+
+ /*
+ * Pass #1
+ * Run forward through the log starting at the first relevant lsn.
+ */
+ if ((ret = __env_openfiles(dbenv, logc,
+ txninfo, &data, &first_lsn, &last_lsn, nfiles, 1)) != 0)
+ goto err;
+
+ /*
+ * Pass #2.
+ *
+ * We used first_lsn to tell us how far back we need to recover,
+ * use it here.
+ */
+
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_RECOVERY))
+ __db_err(dbenv, "Recovery starting from [%lu][%lu]",
+ (u_long)first_lsn.file, (u_long)first_lsn.offset);
+
+ /*
+ * If we are doing client recovery, then we need to allocate
+ * the page-info lock table.
+ */
+ if (max_lsn != NULL) {
+ if ((ret = __rep_lockpgno_init(dbenv, &dtab, &dtabsize)) != 0)
+ goto err;
+ if ((ret = dbenv->lock_id(dbenv, &lockid)) != 0)
+ goto err;
+ }
+
+ pass = "backward";
+ for (ret = logc->get(logc, &lsn, &data, DB_LAST);
+ ret == 0 && log_compare(&lsn, &first_lsn) >= 0;
+ ret = logc->get(logc, &lsn, &data, DB_PREV)) {
+ if (dbenv->db_feedback != NULL) {
+ progress = 34 + (int)(33 * (__lsn_diff(&first_lsn,
+ &last_lsn, &lsn, log_size, 0) / nfiles));
+ dbenv->db_feedback(dbenv, DB_RECOVER, progress);
+ }
+ if (max_lsn != NULL && (ret = __rep_lockpages(dbenv,
+ dtab, dtabsize, &lsn, NULL, NULL, lockid)) != 0)
+ continue;
+
+ ret = __db_dispatch(dbenv, dbenv->recover_dtab,
+ dbenv->recover_dtab_size, &data, &lsn,
+ DB_TXN_BACKWARD_ROLL, txninfo);
+ if (ret != 0) {
+ if (ret != DB_TXN_CKP)
+ goto msgerr;
+ else
+ ret = 0;
+ }
+ }
+ if (ret != 0 && ret != DB_NOTFOUND)
+ goto err;
+
+ /*
+ * Pass #3. If we are recovering to a timestamp or to an LSN,
+ * we need to make sure that we don't roll-forward beyond that
+ * point because there may be non-transactional operations (e.g.,
+ * closes that would fail). The last_lsn variable is used for
+ * feedback calculations, but use it to set an initial stopping
+ * point for the forward pass, and then reset appropriately to
+ * derive a real stop_lsn that tells how far the forward pass
+ * should go.
+ */
+ pass = "forward";
+ stop_lsn = last_lsn;
+ if (max_lsn != NULL || dbenv->tx_timestamp != 0)
+ stop_lsn = ((DB_TXNHEAD *)txninfo)->maxlsn;
+
+ for (ret = logc->get(logc, &lsn, &data, DB_NEXT);
+ ret == 0; ret = logc->get(logc, &lsn, &data, DB_NEXT)) {
+ /*
+ * If we are recovering to a timestamp or an LSN,
+ * we need to make sure that we don't try to roll
+ * forward beyond the soon-to-be end of log.
+ */
+ if (log_compare(&lsn, &stop_lsn) > 0)
+ break;
+
+ if (dbenv->db_feedback != NULL) {
+ progress = 67 + (int)(33 * (__lsn_diff(&first_lsn,
+ &last_lsn, &lsn, log_size, 1) / nfiles));
+ dbenv->db_feedback(dbenv, DB_RECOVER, progress);
+ }
+ ret = __db_dispatch(dbenv, dbenv->recover_dtab,
+ dbenv->recover_dtab_size, &data, &lsn,
+ DB_TXN_FORWARD_ROLL, txninfo);
+ if (ret != 0) {
+ if (ret != DB_TXN_CKP)
+ goto msgerr;
+ else
+ ret = 0;
+ }
+
+ }
+ if (ret != 0 && ret != DB_NOTFOUND)
+ goto err;
+
+ /*
+ * Process any pages that were on the limbo list and move them to
+ * the free list. Do this before checkpointing the database.
+ */
+ if ((ret = __db_do_the_limbo(dbenv, NULL, NULL, txninfo)) != 0)
+ goto err;
+
+ if (max_lsn == NULL)
+ region->last_txnid = ((DB_TXNHEAD *)txninfo)->maxid;
+
+ /* Take a checkpoint here to force any dirty data pages to disk. */
+ if (dbenv->tx_timestamp != 0) {
+ region->last_ckp = ((DB_TXNHEAD *)txninfo)->ckplsn;
+ __log_vtruncate(dbenv, &((DB_TXNHEAD *)txninfo)->maxlsn,
+ &((DB_TXNHEAD *)txninfo)->ckplsn);
+ }
+
+ if ((ret = dbenv->txn_checkpoint(dbenv, 0, 0, DB_FORCE)) != 0)
+ goto err;
+
+ /* Close all the db files that are open. */
+ if ((ret = __dbreg_close_files(dbenv)) != 0)
+ goto err;
+
+ if (max_lsn != NULL) {
+ region->last_ckp = ((DB_TXNHEAD *)txninfo)->ckplsn;
+
+ /* We are going to truncate, so we'd best close the cursor. */
+ if (logc != NULL && (ret = logc->close(logc, 0)) != 0)
+ goto err;
+ __log_vtruncate(dbenv,
+ max_lsn, &((DB_TXNHEAD *)txninfo)->ckplsn);
+
+ /*
+ * Now we need to open files that should be open in order for
+ * client processing to continue. However, since we've
+ * truncated the log, we need to recompute from where the
+ * openfiles pass should begin.
+ */
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+ if ((ret = logc->get(logc, &first_lsn, &data, DB_FIRST)) != 0) {
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ else
+ __db_err(dbenv, "First log record not found");
+ goto err;
+ }
+ if ((ret = __txn_getckp(dbenv, &first_lsn)) == 0 &&
+ (ret = logc->get(logc, &first_lsn, &data, DB_SET)) == 0) {
+ /* We have a recent checkpoint. This is LSN (1). */
+ if ((ret = __txn_ckp_read(dbenv,
+ data.data, &ckp_args)) != 0) {
+ __db_err(dbenv,
+ "Invalid checkpoint record at [%ld][%ld]",
+ (u_long)first_lsn.file,
+ (u_long)first_lsn.offset);
+ goto err;
+ }
+ first_lsn = ckp_args->ckp_lsn;
+ }
+ if ((ret = logc->get(logc, &first_lsn, &data, DB_SET)) != 0)
+ goto err;
+ if ((ret = __env_openfiles(dbenv, logc,
+ txninfo, &data, &first_lsn, NULL, nfiles, 1)) != 0)
+ goto err;
+ } else if (region->stat.st_nrestores == 0)
+ /*
+ * If there are no prepared transactions that need resolution,
+ * we need to reset the transaction ID space and log this fact.
+ */
+ if ((ret = __txn_reset(dbenv)) != 0)
+ goto err;
+
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_RECOVERY)) {
+ (void)time(&now);
+ __db_err(dbenv, "Recovery complete at %.24s", ctime(&now));
+ __db_err(dbenv, "%s %lx %s [%lu][%lu]",
+ "Maximum transaction ID",
+ ((DB_TXNHEAD *)txninfo)->maxid,
+ "Recovery checkpoint",
+ (u_long)region->last_ckp.file,
+ (u_long)region->last_ckp.offset);
+ }
+
+ if (0) {
+msgerr: __db_err(dbenv,
+ "Recovery function for LSN %lu %lu failed on %s pass",
+ (u_long)lsn.file, (u_long)lsn.offset, pass);
+ }
+
+done:
+err: if (lockid != DB_LOCK_INVALIDID) {
+ if ((t_ret = __rep_unlockpages(dbenv, lockid)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret =
+ dbenv->lock_id_free(dbenv, lockid)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ if (logc != NULL && (t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (txninfo != NULL)
+ __db_txnlist_end(dbenv, txninfo);
+
+ if (dtab != NULL)
+ __os_free(dbenv, dtab);
+
+ if (ckp_args != NULL)
+ __os_free(dbenv, ckp_args);
+
+ dbenv->tx_timestamp = 0;
+
+ /* Restore the state of the thread flag, clear in-recovery flags. */
+ if (is_thread)
+ F_SET(dbenv, DB_ENV_THREAD);
+ F_CLR((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+ F_CLR(region, TXN_IN_RECOVERY);
+
+ return (ret);
+}
+
+/*
+ * Figure out how many logfiles we have processed. If we are moving
+ * forward (is_forward != 0), then we're computing current - low. If
+ * we are moving backward, we are computing high - current. max is
+ * the number of bytes per logfile.
+ */
+static double
+__lsn_diff(low, high, current, max, is_forward)
+ DB_LSN *low, *high, *current;
+ u_int32_t max;
+ int is_forward;
+{
+ double nf;
+
+ /*
+ * There are three cases in each direction. If you are in the
+ * same file, then all you need worry about is the difference in
+ * offsets. If you are in different files, then either your offsets
+ * put you either more or less than the integral difference in the
+ * number of files -- we need to handle both of these.
+ */
+ if (is_forward) {
+ if (current->file == low->file)
+ nf = (double)(current->offset - low->offset) / max;
+ else if (current->offset < low->offset)
+ nf = (double)(current->file - low->file - 1) +
+ (double)(max - low->offset + current->offset) / max;
+ else
+ nf = (double)(current->file - low->file) +
+ (double)(current->offset - low->offset) / max;
+ } else {
+ if (current->file == high->file)
+ nf = (double)(high->offset - current->offset) / max;
+ else if (current->offset > high->offset)
+ nf = (double)(high->file - current->file - 1) +
+ (double)
+ (max - current->offset + high->offset) / max;
+ else
+ nf = (double)(high->file - current->file) +
+ (double)(high->offset - current->offset) / max;
+ }
+ return (nf);
+}
+
+/*
+ * __log_backup --
+ *
+ * This is used to find the earliest log record to process when a client
+ * is trying to sync up with a master whose max LSN is less than this
+ * client's max lsn; we want to roll back everything after that
+ *
+ * Find the latest checkpoint whose ckp_lsn is less than the max lsn.
+ */
+static int
+__log_backup(dbenv, logc, max_lsn, start_lsn)
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
+ DB_LSN *max_lsn, *start_lsn;
+{
+ DB_LSN lsn;
+ DBT data;
+ __txn_ckp_args *ckp_args;
+ int ret;
+
+ memset(&data, 0, sizeof(data));
+ ckp_args = NULL;
+
+ /*
+ * Follow checkpoints through the log until we find one with
+ * a ckp_lsn less than max_lsn.
+ */
+ if ((ret = __txn_getckp(dbenv, &lsn)) != 0)
+ goto err;
+ while ((ret = logc->get(logc, &lsn, &data, DB_SET)) == 0) {
+ if ((ret = __txn_ckp_read(dbenv, data.data, &ckp_args)) != 0)
+ return (ret);
+ if (log_compare(&ckp_args->ckp_lsn, max_lsn) <= 0) {
+ *start_lsn = ckp_args->ckp_lsn;
+ break;
+ }
+
+ lsn = ckp_args->prev_lsn;
+ if (IS_ZERO_LSN(lsn))
+ break;
+ __os_free(dbenv, ckp_args);
+ }
+
+ if (ckp_args != NULL)
+ __os_free(dbenv, ckp_args);
+err: if (IS_ZERO_LSN(*start_lsn) && (ret == 0 || ret == DB_NOTFOUND))
+ ret = logc->get(logc, start_lsn, &data, DB_FIRST);
+ return (ret);
+}
+
+/*
+ * __log_earliest --
+ *
+ * Return the earliest recovery point for the log files present. The
+ * earliest recovery time is the time stamp of the first checkpoint record
+ * whose checkpoint LSN is greater than the first LSN we process.
+ */
+static int
+__log_earliest(dbenv, logc, lowtime, lowlsn)
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
+ int32_t *lowtime;
+ DB_LSN *lowlsn;
+{
+ DB_LSN first_lsn, lsn;
+ DBT data;
+ __txn_ckp_args *ckpargs;
+ u_int32_t rectype;
+ int cmp, ret;
+
+ memset(&data, 0, sizeof(data));
+ /*
+ * Read forward through the log looking for the first checkpoint
+ * record whose ckp_lsn is greater than first_lsn.
+ */
+
+ for (ret = logc->get(logc, &first_lsn, &data, DB_FIRST);
+ ret == 0; ret = logc->get(logc, &lsn, &data, DB_NEXT)) {
+ memcpy(&rectype, data.data, sizeof(rectype));
+ if (rectype != DB___txn_ckp)
+ continue;
+ if ((ret = __txn_ckp_read(dbenv, data.data, &ckpargs)) == 0) {
+ cmp = log_compare(&ckpargs->ckp_lsn, &first_lsn);
+ *lowlsn = ckpargs->ckp_lsn;
+ *lowtime = ckpargs->timestamp;
+
+ __os_free(dbenv, ckpargs);
+ if (cmp >= 0)
+ break;
+ }
+ }
+
+ return (ret);
+}
+
+/*
+ * __env_openfiles --
+ * Perform the pass of recovery that opens files. This is used
+ * both during regular recovery and an initial call to txn_recover (since
+ * we need files open in order to abort prepared, but not yet committed
+ * transactions).
+ *
+ * See the comments in db_apprec for a detailed description of the
+ * various recovery passes.
+ *
+ * If we are not doing feedback processing (i.e., we are doing txn_recover
+ * processing and in_recovery is zero), then last_lsn can be NULL.
+ *
+ * PUBLIC: int __env_openfiles __P((DB_ENV *, DB_LOGC *,
+ * PUBLIC: void *, DBT *, DB_LSN *, DB_LSN *, double, int));
+ */
+int
+__env_openfiles(dbenv, logc, txninfo,
+ data, open_lsn, last_lsn, nfiles, in_recovery)
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
+ void *txninfo;
+ DBT *data;
+ DB_LSN *open_lsn, *last_lsn;
+ int in_recovery;
+ double nfiles;
+{
+ DB_LSN lsn;
+ u_int32_t log_size;
+ int progress, ret;
+
+ /*
+ * XXX
+ * Get the log size. No locking required because we're single-threaded
+ * during recovery.
+ */
+ log_size =
+ ((LOG *)(((DB_LOG *)dbenv->lg_handle)->reginfo.primary))->log_size;
+
+ lsn = *open_lsn;
+ for (;;) {
+ if (in_recovery && dbenv->db_feedback != NULL) {
+ DB_ASSERT(last_lsn != NULL);
+ progress = (int)(33 * (__lsn_diff(open_lsn,
+ last_lsn, &lsn, log_size, 1) / nfiles));
+ dbenv->db_feedback(dbenv, DB_RECOVER, progress);
+ }
+ ret = __db_dispatch(dbenv,
+ dbenv->recover_dtab, dbenv->recover_dtab_size, data, &lsn,
+ in_recovery ? DB_TXN_OPENFILES : DB_TXN_POPENFILES,
+ txninfo);
+ if (ret != 0 && ret != DB_TXN_CKP) {
+ __db_err(dbenv,
+ "Recovery function for LSN %lu %lu failed",
+ (u_long)lsn.file, (u_long)lsn.offset);
+ break;
+ }
+ if ((ret = logc->get(logc, &lsn, data, DB_NEXT)) != 0) {
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ break;
+ }
+ }
+
+ return (ret);
+}
diff --git a/libdb/env/env_region.c b/libdb/env/env_region.c
new file mode 100644
index 0000000..e641e25
--- /dev/null
+++ b/libdb/env/env_region.c
@@ -0,0 +1,1256 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/mp.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+static int __db_des_destroy __P((DB_ENV *, REGION *));
+static int __db_des_get __P((DB_ENV *, REGINFO *, REGINFO *, REGION **));
+static int __db_e_remfile __P((DB_ENV *));
+static int __db_faultmem __P((DB_ENV *, void *, size_t, int));
+static void __db_region_destroy __P((DB_ENV *, REGINFO *));
+
+/*
+ * __db_e_attach
+ * Join/create the environment
+ *
+ * PUBLIC: int __db_e_attach __P((DB_ENV *, u_int32_t *));
+ */
+int
+__db_e_attach(dbenv, init_flagsp)
+ DB_ENV *dbenv;
+ u_int32_t *init_flagsp;
+{
+ REGENV *renv;
+ REGENV_REF ref;
+ REGINFO *infop;
+ REGION *rp, tregion;
+ size_t size;
+ size_t nrw;
+ u_int32_t mbytes, bytes;
+ int retry_cnt, ret, segid;
+ char buf[sizeof(DB_REGION_FMT) + 20];
+
+#if !defined(HAVE_MUTEX_THREADS)
+ /*
+ * !!!
+ * If we don't have spinlocks, we need a file descriptor for fcntl(2)
+ * locking. We use the file handle from the REGENV file for this
+ * purpose.
+ *
+ * Since we may be using shared memory regions, e.g., shmget(2), and
+ * not a mapped-in regular file, the backing file may be only a few
+ * bytes in length. So, this depends on the ability to call fcntl to
+ * lock file offsets much larger than the actual physical file. I
+ * think that's safe -- besides, very few systems actually need this
+ * kind of support, SunOS is the only one still in wide use of which
+ * I'm aware.
+ *
+ * The error case is if an application lacks spinlocks and wants to be
+ * threaded. That doesn't work because fcntl may lock the underlying
+ * process, including all its threads.
+ */
+ if (F_ISSET(dbenv, DB_ENV_THREAD)) {
+ __db_err(dbenv,
+"architecture lacks fast mutexes: applications cannot be threaded");
+ return (EINVAL);
+ }
+#endif
+
+ /* Initialization */
+ retry_cnt = 0;
+
+ /* Repeated initialization. */
+loop: renv = NULL;
+
+ /* Set up the DB_ENV's REG_INFO structure. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(REGINFO), &infop)) != 0)
+ return (ret);
+ infop->type = REGION_TYPE_ENV;
+ infop->id = REGION_ID_ENV;
+ infop->mode = dbenv->db_mode;
+ infop->flags = REGION_JOIN_OK;
+ if (F_ISSET(dbenv, DB_ENV_CREATE))
+ F_SET(infop, REGION_CREATE_OK);
+
+ /*
+ * We have to single-thread the creation of the REGENV region. Once
+ * it exists, we can do locking using locks in the region, but until
+ * then we have to be the only player in the game.
+ *
+ * If this is a private environment, we are only called once and there
+ * are no possible race conditions.
+ *
+ * If this is a public environment, we use the filesystem to ensure
+ * the creation of the environment file is single-threaded.
+ */
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+ if ((ret = __os_strdup(dbenv,
+ "process-private", &infop->name)) != 0)
+ goto err;
+ goto creation;
+ }
+
+ /* Build the region name. */
+ (void)snprintf(buf, sizeof(buf), "%s", DB_REGION_ENV);
+ if ((ret = __db_appname(dbenv,
+ DB_APP_NONE, buf, 0, NULL, &infop->name)) != 0)
+ goto err;
+
+ /*
+ * Try to create the file, if we have the authority. We have to ensure
+ * that multiple threads/processes attempting to simultaneously create
+ * the file are properly ordered. Open using the O_CREAT and O_EXCL
+ * flags so that multiple attempts to create the region will return
+ * failure in all but one. POSIX 1003.1 requires that EEXIST be the
+ * errno return value -- I sure hope they're right.
+ */
+ if (F_ISSET(dbenv, DB_ENV_CREATE)) {
+ if ((ret = __os_open(dbenv, infop->name,
+ DB_OSO_CREATE | DB_OSO_DIRECT | DB_OSO_EXCL | DB_OSO_REGION,
+ dbenv->db_mode, dbenv->lockfhp)) == 0)
+ goto creation;
+ if (ret != EEXIST) {
+ __db_err(dbenv,
+ "%s: %s", infop->name, db_strerror(ret));
+ goto err;
+ }
+ }
+
+ /*
+ * If we couldn't create the file, try and open it. (If that fails,
+ * we're done.)
+ */
+ if ((ret = __os_open(dbenv, infop->name, DB_OSO_REGION | DB_OSO_DIRECT,
+ dbenv->db_mode, dbenv->lockfhp)) != 0)
+ goto err;
+
+ /*
+ * !!!
+ * The region may be in system memory not backed by the filesystem
+ * (more specifically, not backed by this file), and we're joining
+ * it. In that case, the process that created it will have written
+ * out a REGENV_REF structure as its only contents. We read that
+ * structure before we do anything further, e.g., we can't just map
+ * that file in and then figure out what's going on.
+ *
+ * All of this noise is because some systems don't have a coherent VM
+ * and buffer cache, and what's worse, when you mix operations on the
+ * VM and buffer cache, half the time you hang the system.
+ *
+ * If the file is the size of an REGENV_REF structure, then we know
+ * the real region is in some other memory. (The only way you get a
+ * file that size is to deliberately write it, as it's smaller than
+ * any possible disk sector created by writing a file or mapping the
+ * file into memory.) In which case, retrieve the structure from the
+ * file and use it to acquire the referenced memory.
+ *
+ * If the structure is larger than a REGENV_REF structure, then this
+ * file is backing the shared memory region, and we just map it into
+ * memory.
+ *
+ * And yes, this makes me want to take somebody and kill them. (I
+ * digress -- but you have no freakin' idea. This is unbelievably
+ * stupid and gross, and I've probably spent six months of my life,
+ * now, trying to make different versions of it work.)
+ */
+ if ((ret = __os_ioinfo(dbenv, infop->name,
+ dbenv->lockfhp, &mbytes, &bytes, NULL)) != 0) {
+ __db_err(dbenv, "%s: %s", infop->name, db_strerror(ret));
+ goto err;
+ }
+
+ /*
+ * !!!
+ * A size_t is OK -- regions get mapped into memory, and so can't
+ * be larger than a size_t.
+ */
+ size = mbytes * MEGABYTE + bytes;
+
+ /*
+ * If the size is less than the size of a REGENV_REF structure, the
+ * region (or, possibly, the REGENV_REF structure) has not yet been
+ * completely written. Wait awhile and try again.
+ *
+ * Otherwise, if the size is the size of a REGENV_REF structure,
+ * read it into memory and use it as a reference to the real region.
+ */
+ if (size <= sizeof(ref)) {
+ if (size != sizeof(ref))
+ goto retry;
+
+ if ((ret = __os_read(dbenv, dbenv->lockfhp, &ref,
+ sizeof(ref), &nrw)) != 0 || nrw < (size_t)sizeof(ref)) {
+ if (ret == 0)
+ ret = EIO;
+ __db_err(dbenv,
+ "%s: unable to read system-memory information from: %s",
+ infop->name, db_strerror(ret));
+ goto err;
+ }
+ size = ref.size;
+ segid = ref.segid;
+
+ F_SET(dbenv, DB_ENV_SYSTEM_MEM);
+ } else if (F_ISSET(dbenv, DB_ENV_SYSTEM_MEM)) {
+ ret = EINVAL;
+ __db_err(dbenv,
+ "%s: existing environment not created in system memory: %s",
+ infop->name, db_strerror(ret));
+ goto err;
+ } else
+ segid = INVALID_REGION_SEGID;
+
+ /*
+ * If not doing thread locking, we need to save the file handle for
+ * fcntl(2) locking. Otherwise, discard the handle, we no longer
+ * need it, and the less contact between the buffer cache and the VM,
+ * the better.
+ */
+#ifdef HAVE_MUTEX_THREADS
+ __os_closehandle(dbenv, dbenv->lockfhp);
+#endif
+
+ /* Call the region join routine to acquire the region. */
+ memset(&tregion, 0, sizeof(tregion));
+ tregion.size = (roff_t)size;
+ tregion.segid = segid;
+ if ((ret = __os_r_attach(dbenv, infop, &tregion)) != 0)
+ goto err;
+
+ /*
+ * The environment's REGENV structure has to live at offset 0 instead
+ * of the usual shalloc information. Set the primary reference and
+ * correct the "addr" value to reference the shalloc region. Note,
+ * this means that all of our offsets (R_ADDR/R_OFFSET) get shifted
+ * as well, but that should be fine.
+ */
+ infop->primary = R_ADDR(infop, 0);
+ infop->addr = (u_int8_t *)infop->addr + sizeof(REGENV);
+
+ /*
+ * Check if the environment has had a catastrophic failure.
+ *
+ * Check the magic number to ensure the region is initialized. If the
+ * magic number isn't set, the lock may not have been initialized, and
+ * an attempt to use it could lead to random behavior.
+ *
+ * The panic and magic values aren't protected by any lock, so we never
+ * use them in any check that's more complex than set/not-set.
+ *
+ * !!!
+ * I'd rather play permissions games using the underlying file, but I
+ * can't because Windows/NT filesystems won't open files mode 0.
+ */
+ renv = infop->primary;
+ if (renv->envpanic && !F_ISSET(dbenv, DB_ENV_NOPANIC)) {
+ ret = __db_panic_msg(dbenv);
+ goto err;
+ }
+ if (renv->magic != DB_REGION_MAGIC)
+ goto retry;
+
+ /* Make sure the region matches our build. */
+ if (renv->majver != DB_VERSION_MAJOR ||
+ renv->minver != DB_VERSION_MINOR ||
+ renv->patch != DB_VERSION_PATCH) {
+ __db_err(dbenv,
+ "Program version %d.%d.%d doesn't match environment version %d.%d.%d",
+ DB_VERSION_MAJOR, DB_VERSION_MINOR, DB_VERSION_PATCH,
+ renv->majver, renv->minver, renv->patch);
+#ifndef DIAGNOSTIC
+ ret = EINVAL;
+ goto err;
+#endif
+ }
+
+ /* Lock the environment. */
+ MUTEX_LOCK(dbenv, &renv->mutex);
+
+ /*
+ * Finally! We own the environment now. Repeat the panic check, it's
+ * possible that it was set while we waited for the lock.
+ */
+ if (renv->envpanic && !F_ISSET(dbenv, DB_ENV_NOPANIC)) {
+ ret = __db_panic_msg(dbenv);
+ goto err_unlock;
+ }
+
+ /*
+ * Get a reference to the underlying REGION information for this
+ * environment.
+ */
+ if ((ret = __db_des_get(dbenv, infop, infop, &rp)) != 0 || rp == NULL) {
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+ goto find_err;
+ }
+ infop->rp = rp;
+
+ /*
+ * There's still a possibility for inconsistent data. When we acquired
+ * the size of the region and attached to it, it might have still been
+ * growing as part of its creation. We can detect this by checking the
+ * size we originally found against the region's current size. (The
+ * region's current size has to be final, the creator finished growing
+ * it before releasing the environment for us to lock.)
+ */
+ if (rp->size != size) {
+err_unlock: MUTEX_UNLOCK(dbenv, &renv->mutex);
+ goto retry;
+ }
+
+ /* Increment the reference count. */
+ ++renv->refcnt;
+
+ /*
+ * If our caller wants them, return the flags this environment was
+ * initialized with.
+ */
+ if (init_flagsp != NULL)
+ *init_flagsp = renv->init_flags;
+
+ /* Discard our lock. */
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ /*
+ * Fault the pages into memory. Note, do this AFTER releasing the
+ * lock, because we're only reading the pages, not writing them.
+ */
+ (void)__db_faultmem(dbenv, infop->primary, rp->size, 0);
+
+ /* Everything looks good, we're done. */
+ dbenv->reginfo = infop;
+ return (0);
+
+creation:
+ /* Create the environment region. */
+ F_SET(infop, REGION_CREATE);
+
+ /*
+ * Allocate room for 50 REGION structures plus overhead (we're going
+ * to use this space for last-ditch allocation requests), although we
+ * should never need anything close to that.
+ *
+ * Encryption passwds are stored in the env region. Add that in too.
+ */
+ memset(&tregion, 0, sizeof(tregion));
+ tregion.size = (roff_t)(50 * sizeof(REGION) +
+ dbenv->passwd_len + 2048);
+ tregion.segid = INVALID_REGION_SEGID;
+ if ((ret = __os_r_attach(dbenv, infop, &tregion)) != 0)
+ goto err;
+
+ /*
+ * Fault the pages into memory. Note, do this BEFORE we initialize
+ * anything, because we're writing the pages, not just reading them.
+ */
+ (void)__db_faultmem(dbenv, infop->addr, tregion.size, 1);
+
+ /*
+ * The first object in the region is the REGENV structure. This is
+ * different from the other regions, and, from everything else in
+ * this region, where all objects are allocated from the pool, i.e.,
+ * there aren't any fixed locations. The remaining space is made
+ * available for later allocation.
+ *
+ * The allocation space must be size_t aligned, because that's what
+ * the initialization routine is going to store there. To make sure
+ * that happens, the REGENV structure was padded with a final size_t.
+ * No other region needs to worry about it because all of them treat
+ * the entire region as allocation space.
+ *
+ * Set the primary reference and correct the "addr" value to reference
+ * the shalloc region. Note, this requires that we "uncorrect" it at
+ * region detach, and that all of our offsets (R_ADDR/R_OFFSET) will be
+ * shifted as well, but that should be fine.
+ */
+ infop->primary = R_ADDR(infop, 0);
+ infop->addr = (u_int8_t *)infop->addr + sizeof(REGENV);
+ __db_shalloc_init(infop->addr, tregion.size - sizeof(REGENV));
+
+ /*
+ * Initialize the rest of the REGENV structure, except for the magic
+ * number which validates the file/environment.
+ */
+ renv = infop->primary;
+ renv->envpanic = 0;
+ db_version(&renv->majver, &renv->minver, &renv->patch);
+ SH_LIST_INIT(&renv->regionq);
+ renv->refcnt = 1;
+ renv->cipher_off = INVALID_ROFF;
+ renv->rep_off = INVALID_ROFF;
+
+ /*
+ * Initialize init_flags to store the flags that any other environment
+ * handle that uses DB_JOINENV to join this environment will need.
+ */
+ renv->init_flags = (init_flagsp == NULL) ? 0 : *init_flagsp;
+
+ /*
+ * Lock the environment.
+ *
+ * Check the lock call return. This is the first lock we initialize
+ * and acquire, and we have to know if it fails. (It CAN fail, e.g.,
+ * SunOS, when using fcntl(2) for locking and using an in-memory
+ * filesystem as the database home. But you knew that, I'm sure -- it
+ * probably wasn't even worth mentioning.)
+ */
+ if ((ret = __db_mutex_setup(dbenv, infop, &renv->mutex,
+ MUTEX_NO_RECORD | MUTEX_NO_RLOCK)) != 0) {
+ __db_err(dbenv, "%s: unable to initialize environment lock: %s",
+ infop->name, db_strerror(ret));
+ goto err;
+ }
+
+ if (!F_ISSET(&renv->mutex, MUTEX_IGNORE) &&
+ (ret = __db_mutex_lock(dbenv, &renv->mutex)) != 0) {
+ __db_err(dbenv, "%s: unable to acquire environment lock: %s",
+ infop->name, db_strerror(ret));
+ goto err;
+ }
+
+ /*
+ * Get the underlying REGION structure for this environment. Note,
+ * we created the underlying OS region before we acquired the REGION
+ * structure, which is backwards from the normal procedure. Update
+ * the REGION structure.
+ */
+ if ((ret = __db_des_get(dbenv, infop, infop, &rp)) != 0) {
+find_err: __db_err(dbenv,
+ "%s: unable to find environment", infop->name);
+ if (ret == 0)
+ ret = EINVAL;
+ goto err;
+ }
+ infop->rp = rp;
+ rp->size = tregion.size;
+ rp->segid = tregion.segid;
+
+ /*
+ * !!!
+ * If we create an environment where regions are public and in system
+ * memory, we have to inform processes joining the environment how to
+ * attach to the shared memory segment. So, we write the shared memory
+ * identifier into the file, to be read by those other processes.
+ *
+ * XXX
+ * This is really OS-layer information, but I can't see any easy way
+ * to move it down there without passing down information that it has
+ * no right to know, e.g., that this is the one-and-only REGENV region
+ * and not some other random region.
+ */
+ if (tregion.segid != INVALID_REGION_SEGID) {
+ ref.size = tregion.size;
+ ref.segid = tregion.segid;
+ if ((ret = __os_write(
+ dbenv, dbenv->lockfhp, &ref, sizeof(ref), &nrw)) != 0) {
+ __db_err(dbenv,
+ "%s: unable to write out public environment ID: %s",
+ infop->name, db_strerror(ret));
+ goto err;
+ }
+ }
+
+ /*
+ * If not doing thread locking, we need to save the file handle for
+ * fcntl(2) locking. Otherwise, discard the handle, we no longer
+ * need it, and the less contact between the buffer cache and the VM,
+ * the better.
+ */
+#if defined(HAVE_MUTEX_THREADS)
+ if (F_ISSET(dbenv->lockfhp, DB_FH_VALID))
+ __os_closehandle(dbenv, dbenv->lockfhp);
+#endif
+
+ /* Validate the file. */
+ renv->magic = DB_REGION_MAGIC;
+
+ /* Discard our lock. */
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ /* Everything looks good, we're done. */
+ dbenv->reginfo = infop;
+ return (0);
+
+err:
+retry: /* Close any open file handle. */
+ if (F_ISSET(dbenv->lockfhp, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, dbenv->lockfhp);
+
+ /*
+ * If we joined or created the region, detach from it. If we created
+ * it, destroy it. Note, there's a path in the above code where we're
+ * using a temporary REGION structure because we haven't yet allocated
+ * the real one. In that case the region address (addr) will be filled
+ * in, but the REGION pointer (rp) won't. Fix it.
+ */
+ if (infop->addr != NULL) {
+ if (infop->rp == NULL)
+ infop->rp = &tregion;
+
+ /* Reset the addr value that we "corrected" above. */
+ infop->addr = infop->primary;
+ (void)__os_r_detach(dbenv,
+ infop, F_ISSET(infop, REGION_CREATE));
+ }
+
+ /* Free the allocated name and/or REGINFO structure. */
+ if (infop->name != NULL)
+ __os_free(dbenv, infop->name);
+ __os_free(dbenv, infop);
+
+ /* If we had a temporary error, wait awhile and try again. */
+ if (ret == 0) {
+ if (++retry_cnt > 3) {
+ __db_err(dbenv, "unable to join the environment");
+ ret = EAGAIN;
+ } else {
+ __os_sleep(dbenv, retry_cnt * 3, 0);
+ goto loop;
+ }
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_e_detach --
+ * Detach from the environment.
+ *
+ * PUBLIC: int __db_e_detach __P((DB_ENV *, int));
+ */
+int
+__db_e_detach(dbenv, destroy)
+ DB_ENV *dbenv;
+ int destroy;
+{
+ REGENV *renv;
+ REGINFO *infop;
+
+ infop = dbenv->reginfo;
+ renv = infop->primary;
+
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE))
+ destroy = 1;
+ /* Lock the environment. */
+ MUTEX_LOCK(dbenv, &renv->mutex);
+
+ /* Decrement the reference count. */
+ if (renv->refcnt == 0) {
+ __db_err(dbenv,
+ "region %lu (environment): reference count went negative",
+ infop->rp->id);
+ } else
+ --renv->refcnt;
+
+ /* Release the lock. */
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ /* Close the locking file handle. */
+ if (F_ISSET(dbenv->lockfhp, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, dbenv->lockfhp);
+
+ /* Reset the addr value that we "corrected" above. */
+ infop->addr = infop->primary;
+
+ /*
+ * If we are destroying the environment, we need to
+ * destroy any system resources backing the mutex, as well
+ * as any system resources that the replication system may have
+ * acquired and put in the main region.
+ *
+ * Do these now before we free the memory in __os_r_detach.
+ */
+ if (destroy) {
+ __rep_region_destroy(dbenv);
+ __db_mutex_destroy(&renv->mutex);
+ __db_mutex_destroy(&infop->rp->mutex);
+ }
+
+ /*
+ * Release the region, and kill our reference.
+ *
+ * We set the DB_ENV->reginfo field to NULL here and discard its memory.
+ * DB_ENV->remove calls __dbenv_remove to do the region remove, and
+ * __dbenv_remove attached and then detaches from the region. We don't
+ * want to return to DB_ENV->remove with a non-NULL DB_ENV->reginfo
+ * field because it will attempt to detach again as part of its cleanup.
+ */
+ (void)__os_r_detach(dbenv, infop, destroy);
+
+ if (infop->name != NULL)
+ __os_free(dbenv, infop->name);
+ __os_free(dbenv, dbenv->reginfo);
+ dbenv->reginfo = NULL;
+
+ return (0);
+}
+
+/*
+ * __db_e_remove --
+ * Discard an environment if it's not in use.
+ *
+ * PUBLIC: int __db_e_remove __P((DB_ENV *, u_int32_t));
+ */
+int
+__db_e_remove(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ REGENV *renv;
+ REGINFO *infop, reginfo;
+ REGION *rp;
+ u_int32_t db_env_reset;
+ int force, ret;
+
+ force = LF_ISSET(DB_FORCE) ? 1 : 0;
+ /*
+ * This routine has to walk a nasty line between not looking into
+ * the environment (which may be corrupted after an app or system
+ * crash), and removing everything that needs removing. What we
+ * do is:
+ * 1. Connect to the environment (so it better be OK).
+ * 2. If the environment is in use (reference count is non-zero),
+ * return EBUSY.
+ * 3. Overwrite the magic number so that any threads of control
+ * attempting to connect will backoff and retry.
+ * 4. Walk the list of regions. Connect to each region and then
+ * disconnect with the destroy flag set. This shouldn't cause
+ * any problems, even if the region is corrupted, because we
+ * should never be looking inside the region.
+ * 5. Walk the list of files in the directory, unlinking any
+ * files that match a region name. Unlink the environment
+ * file last.
+ *
+ * If the force flag is set, we do not acquire any locks during this
+ * process.
+ */
+ db_env_reset = F_ISSET(dbenv, DB_ENV_NOLOCKING | DB_ENV_NOPANIC);
+ if (force)
+ F_SET(dbenv, DB_ENV_NOLOCKING);
+ F_SET(dbenv, DB_ENV_NOPANIC);
+
+ /* Join the environment. */
+ if ((ret = __db_e_attach(dbenv, NULL)) != 0) {
+ /*
+ * If we can't join it, we assume that's because it doesn't
+ * exist. It would be better to know why we failed, but it
+ * probably isn't important.
+ */
+ ret = 0;
+ if (force)
+ goto remfiles;
+ goto done;
+ }
+
+ infop = dbenv->reginfo;
+ renv = infop->primary;
+
+ /* Lock the environment. */
+ MUTEX_LOCK(dbenv, &renv->mutex);
+
+ /*
+ * If it's in use, we're done unless we're forcing the issue or the
+ * environment has panic'd. (Presumably, if the environment panic'd,
+ * the thread holding the reference count may not have cleaned up.)
+ */
+ if (renv->refcnt == 1 || renv->envpanic == 1 || force) {
+ /*
+ * Set the panic flag and overwrite the magic number.
+ *
+ * !!!
+ * From this point on, there's no going back, we pretty
+ * much ignore errors, and just whack on whatever we can.
+ */
+ renv->envpanic = 1;
+ renv->magic = 0;
+
+ /*
+ * Unlock the environment. We should no longer need the lock
+ * because we've poisoned the pool, but we can't continue to
+ * hold it either, because other routines may want it.
+ */
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ /*
+ * Attach to each sub-region and destroy it.
+ *
+ * !!!
+ * The REGION_CREATE_OK flag is set for Windows/95 -- regions
+ * are zero'd out when the last reference to the region goes
+ * away, in which case the underlying OS region code requires
+ * callers be prepared to create the region in order to join it.
+ */
+ memset(&reginfo, 0, sizeof(reginfo));
+restart: for (rp = SH_LIST_FIRST(&renv->regionq, __db_region);
+ rp != NULL; rp = SH_LIST_NEXT(rp, q, __db_region)) {
+ if (rp->type == REGION_TYPE_ENV)
+ continue;
+
+ reginfo.id = rp->id;
+ reginfo.flags = REGION_CREATE_OK;
+ if ((ret = __db_r_attach(dbenv, &reginfo, 0)) != 0) {
+ __db_err(dbenv,
+ "region %s attach: %s", db_strerror(ret));
+ continue;
+ }
+ R_UNLOCK(dbenv, &reginfo);
+ if ((ret = __db_r_detach(dbenv, &reginfo, 1)) != 0) {
+ __db_err(dbenv,
+ "region detach: %s", db_strerror(ret));
+ continue;
+ }
+ /*
+ * If we have an error, we continue so we eventually
+ * reach the end of the list. If we succeed, restart
+ * the list because it was relinked when we destroyed
+ * the entry.
+ */
+ goto restart;
+ }
+
+ /* Destroy the environment's region. */
+ (void)__db_e_detach(dbenv, 1);
+
+ /* Discard any remaining physical files. */
+remfiles: (void)__db_e_remfile(dbenv);
+ } else {
+ /* Unlock the environment. */
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ /* Discard the environment. */
+ (void)__db_e_detach(dbenv, 0);
+
+ ret = EBUSY;
+ }
+
+done: F_CLR(dbenv, DB_ENV_NOLOCKING | DB_ENV_NOPANIC);
+ F_SET(dbenv, db_env_reset);
+
+ return (ret);
+}
+
+/*
+ * __db_e_remfile --
+ * Discard any region files in the filesystem.
+ */
+static int
+__db_e_remfile(dbenv)
+ DB_ENV *dbenv;
+{
+ static char *old_region_names[] = {
+ "__db_lock.share",
+ "__db_log.share",
+ "__db_mpool.share",
+ "__db_txn.share",
+ NULL
+ };
+ int cnt, fcnt, lastrm, ret;
+ u_int8_t saved_byte;
+ const char *dir;
+ char *p, **names, *path, buf[sizeof(DB_REGION_FMT) + 20];
+
+ /* Get the full path of a file in the environment. */
+ (void)snprintf(buf, sizeof(buf), "%s", DB_REGION_ENV);
+ if ((ret = __db_appname(dbenv, DB_APP_NONE, buf, 0, NULL, &path)) != 0)
+ return (ret);
+
+ /* Get the parent directory for the environment. */
+ if ((p = __db_rpath(path)) == NULL) {
+ p = path;
+ saved_byte = *p;
+
+ dir = PATH_DOT;
+ } else {
+ saved_byte = *p;
+ *p = '\0';
+
+ dir = path;
+ }
+
+ /* Get the list of file names. */
+ if ((ret = __os_dirlist(dbenv, dir, &names, &fcnt)) != 0)
+ __db_err(dbenv, "%s: %s", dir, db_strerror(ret));
+
+ /* Restore the path, and free it. */
+ *p = saved_byte;
+ __os_free(dbenv, path);
+
+ if (ret != 0)
+ return (ret);
+
+ /*
+ * Search for valid region names, and remove them. We remove the
+ * environment region last, because it's the key to this whole mess.
+ */
+ for (lastrm = -1, cnt = fcnt; --cnt >= 0;) {
+ if (strlen(names[cnt]) != DB_REGION_NAME_LENGTH ||
+ memcmp(names[cnt], DB_REGION_FMT, DB_REGION_NAME_NUM) != 0)
+ continue;
+ if (strcmp(names[cnt], DB_REGION_ENV) == 0) {
+ lastrm = cnt;
+ continue;
+ }
+ for (p = names[cnt] + DB_REGION_NAME_NUM;
+ *p != '\0' && isdigit((int)*p); ++p)
+ ;
+ if (*p != '\0')
+ continue;
+
+ if (__db_appname(dbenv,
+ DB_APP_NONE, names[cnt], 0, NULL, &path) == 0) {
+ if (F_ISSET(dbenv, DB_ENV_OVERWRITE))
+ (void)__db_overwrite(dbenv, path);
+ (void)__os_unlink(dbenv, path);
+ __os_free(dbenv, path);
+ }
+ }
+
+ if (lastrm != -1)
+ if (__db_appname(dbenv,
+ DB_APP_NONE, names[lastrm], 0, NULL, &path) == 0) {
+ if (F_ISSET(dbenv, DB_ENV_OVERWRITE))
+ (void)__db_overwrite(dbenv, path);
+ (void)__os_unlink(dbenv, path);
+ __os_free(dbenv, path);
+ }
+ __os_dirfree(dbenv, names, fcnt);
+
+ /*
+ * !!!
+ * Backward compatibility -- remove region files from releases
+ * before 2.8.XX.
+ */
+ for (names = (char **)old_region_names; *names != NULL; ++names)
+ if (__db_appname(dbenv,
+ DB_APP_NONE, *names, 0, NULL, &path) == 0) {
+ (void)__os_unlink(dbenv, path);
+ __os_free(dbenv, path);
+ }
+
+ return (0);
+}
+
+/*
+ * __db_e_stat
+ * Statistics for the environment.
+ *
+ * PUBLIC: int __db_e_stat __P((DB_ENV *,
+ * PUBLIC: REGENV *, REGION *, int *, u_int32_t));
+ */
+int
+__db_e_stat(dbenv, arg_renv, arg_regions, arg_regions_cnt, flags)
+ DB_ENV *dbenv;
+ REGENV *arg_renv;
+ REGION *arg_regions;
+ int *arg_regions_cnt;
+ u_int32_t flags;
+{
+ REGENV *renv;
+ REGINFO *infop;
+ REGION *rp;
+ int n, ret;
+
+ infop = dbenv->reginfo;
+ renv = infop->primary;
+ rp = infop->rp;
+ if ((ret = __db_fchk(dbenv,
+ "DB_ENV->stat", flags, DB_STAT_CLEAR)) != 0)
+ return (ret);
+
+ /* Lock the environment. */
+ MUTEX_LOCK(dbenv, &rp->mutex);
+
+ *arg_renv = *renv;
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ renv->mutex.mutex_set_nowait = 0;
+ renv->mutex.mutex_set_wait = 0;
+ }
+
+ for (n = 0, rp = SH_LIST_FIRST(&renv->regionq, __db_region);
+ n < *arg_regions_cnt && rp != NULL;
+ ++n, rp = SH_LIST_NEXT(rp, q, __db_region)) {
+ arg_regions[n] = *rp;
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ rp->mutex.mutex_set_nowait = 0;
+ rp->mutex.mutex_set_wait = 0;
+ }
+ }
+
+ /* Release the lock. */
+ rp = infop->rp;
+ MUTEX_UNLOCK(dbenv, &rp->mutex);
+
+ *arg_regions_cnt = n == 0 ? n : n - 1;
+
+ return (0);
+}
+
+/*
+ * __db_r_attach
+ * Join/create a region.
+ *
+ * PUBLIC: int __db_r_attach __P((DB_ENV *, REGINFO *, size_t));
+ */
+int
+__db_r_attach(dbenv, infop, size)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ size_t size;
+{
+ REGENV *renv;
+ REGION *rp;
+ int ret;
+ char buf[sizeof(DB_REGION_FMT) + 20];
+
+ renv = ((REGINFO *)dbenv->reginfo)->primary;
+
+ /* Lock the environment. */
+ MUTEX_LOCK(dbenv, &renv->mutex);
+
+ /*
+ * Find or create a REGION structure for this region. If we create
+ * it, the REGION_CREATE flag will be set in the infop structure.
+ */
+ F_CLR(infop, REGION_CREATE);
+ if ((ret = __db_des_get(dbenv, dbenv->reginfo, infop, &rp)) != 0) {
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+ return (ret);
+ }
+ infop->rp = rp;
+ infop->type = rp->type;
+ infop->id = rp->id;
+
+ /* If we're creating the region, set the desired size. */
+ if (F_ISSET(infop, REGION_CREATE))
+ rp->size = (roff_t)size;
+
+ /* Join/create the underlying region. */
+ (void)snprintf(buf, sizeof(buf), DB_REGION_FMT, infop->id);
+ if ((ret = __db_appname(dbenv,
+ DB_APP_NONE, buf, 0, NULL, &infop->name)) != 0)
+ goto err;
+ if ((ret = __os_r_attach(dbenv, infop, rp)) != 0)
+ goto err;
+
+ /*
+ * Fault the pages into memory. Note, do this BEFORE we initialize
+ * anything because we're writing pages in created regions, not just
+ * reading them.
+ */
+ (void)__db_faultmem(dbenv,
+ infop->addr, rp->size, F_ISSET(infop, REGION_CREATE));
+
+ /*
+ * !!!
+ * The underlying layer may have just decided that we are going
+ * to create the region. There are various system issues that
+ * can result in a useless region that requires re-initialization.
+ *
+ * If we created the region, initialize it for allocation.
+ */
+ if (F_ISSET(infop, REGION_CREATE)) {
+ ((REGION *)(infop->addr))->magic = DB_REGION_MAGIC;
+
+ (void)__db_shalloc_init(infop->addr, rp->size);
+ }
+
+ /*
+ * If the underlying REGION isn't the environment, acquire a lock
+ * for it and release our lock on the environment.
+ */
+ if (infop->type != REGION_TYPE_ENV) {
+ MUTEX_LOCK(dbenv, &rp->mutex);
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+ }
+
+ return (0);
+
+ /* Discard the underlying region. */
+err: if (infop->addr != NULL)
+ (void)__os_r_detach(dbenv,
+ infop, F_ISSET(infop, REGION_CREATE));
+ infop->rp = NULL;
+ infop->id = INVALID_REGION_ID;
+
+ /* Discard the REGION structure if we created it. */
+ if (F_ISSET(infop, REGION_CREATE)) {
+ (void)__db_des_destroy(dbenv, rp);
+ F_CLR(infop, REGION_CREATE);
+ }
+
+ /* Release the environment lock. */
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ return (ret);
+}
+
+/*
+ * __db_r_detach --
+ * Detach from a region.
+ *
+ * PUBLIC: int __db_r_detach __P((DB_ENV *, REGINFO *, int));
+ */
+int
+__db_r_detach(dbenv, infop, destroy)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ int destroy;
+{
+ REGENV *renv;
+ REGION *rp;
+ int ret, t_ret;
+
+ renv = ((REGINFO *)dbenv->reginfo)->primary;
+ rp = infop->rp;
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE))
+ destroy = 1;
+
+ /* Lock the environment. */
+ MUTEX_LOCK(dbenv, &renv->mutex);
+
+ /* Acquire the lock for the REGION. */
+ MUTEX_LOCK(dbenv, &rp->mutex);
+
+ /*
+ * We need to call destroy on per-subsystem info before
+ * we free the memory associated with the region.
+ */
+ if (destroy)
+ __db_region_destroy(dbenv, infop);
+
+ /* Detach from the underlying OS region. */
+ ret = __os_r_detach(dbenv, infop, destroy);
+
+ /* Release the REGION lock. */
+ MUTEX_UNLOCK(dbenv, &rp->mutex);
+
+ /* If we destroyed the region, discard the REGION structure. */
+ if (destroy &&
+ ((t_ret = __db_des_destroy(dbenv, rp)) != 0) && ret == 0)
+ ret = t_ret;
+
+ /* Release the environment lock. */
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ /* Destroy the structure. */
+ if (infop->name != NULL)
+ __os_free(dbenv, infop->name);
+
+ return (ret);
+}
+
+/*
+ * __db_des_get --
+ * Return a reference to the shared information for a REGION,
+ * optionally creating a new entry.
+ */
+static int
+__db_des_get(dbenv, env_infop, infop, rpp)
+ DB_ENV *dbenv;
+ REGINFO *env_infop, *infop;
+ REGION **rpp;
+{
+ REGENV *renv;
+ REGION *rp, *first_type;
+ u_int32_t maxid;
+ int ret;
+
+ /*
+ * !!!
+ * Called with the environment already locked.
+ */
+ *rpp = NULL;
+ renv = env_infop->primary;
+
+ /*
+ * If the caller wants to join a region, walk through the existing
+ * regions looking for a matching ID (if ID specified) or matching
+ * type (if type specified). If we return based on a matching type
+ * return the "primary" region, that is, the first region that was
+ * created of this type.
+ *
+ * Track the maximum region ID so we can allocate a new region,
+ * note that we have to start at 1 because the primary environment
+ * uses ID == 1.
+ */
+ maxid = REGION_ID_ENV;
+ for (first_type = NULL,
+ rp = SH_LIST_FIRST(&renv->regionq, __db_region);
+ rp != NULL; rp = SH_LIST_NEXT(rp, q, __db_region)) {
+ if (infop->id != INVALID_REGION_ID) {
+ if (infop->id == rp->id)
+ break;
+ continue;
+ }
+ if (infop->type == rp->type &&
+ F_ISSET(infop, REGION_JOIN_OK) &&
+ (first_type == NULL || first_type->id > rp->id))
+ first_type = rp;
+
+ if (rp->id > maxid)
+ maxid = rp->id;
+ }
+ if (rp == NULL)
+ rp = first_type;
+
+ /*
+ * If we didn't find a region and we can't create the region, fail.
+ * The caller generates any error message.
+ */
+ if (rp == NULL && !F_ISSET(infop, REGION_CREATE_OK))
+ return (ENOENT);
+
+ /*
+ * If we didn't find a region, create and initialize a REGION structure
+ * for the caller. If id was set, use that value, otherwise we use the
+ * next available ID.
+ */
+ if (rp == NULL) {
+ if ((ret = __db_shalloc(env_infop->addr,
+ sizeof(REGION), MUTEX_ALIGN, &rp)) != 0)
+ return (ret);
+
+ /* Initialize the region. */
+ memset(rp, 0, sizeof(*rp));
+ if ((ret = __db_mutex_setup(dbenv, env_infop, &rp->mutex,
+ MUTEX_NO_RECORD | MUTEX_NO_RLOCK)) != 0) {
+ __db_shalloc_free(env_infop->addr, rp);
+ return (ret);
+ }
+ rp->segid = INVALID_REGION_SEGID;
+
+ /*
+ * Set the type and ID; if no region ID was specified,
+ * allocate one.
+ */
+ rp->type = infop->type;
+ rp->id = infop->id == INVALID_REGION_ID ? maxid + 1 : infop->id;
+
+ SH_LIST_INSERT_HEAD(&renv->regionq, rp, q, __db_region);
+ F_SET(infop, REGION_CREATE);
+ }
+
+ *rpp = rp;
+ return (0);
+}
+
+/*
+ * __db_des_destroy --
+ * Destroy a reference to a REGION.
+ */
+static int
+__db_des_destroy(dbenv, rp)
+ DB_ENV *dbenv;
+ REGION *rp;
+{
+ REGINFO *infop;
+
+ /*
+ * !!!
+ * Called with the environment already locked.
+ */
+ infop = dbenv->reginfo;
+
+ SH_LIST_REMOVE(rp, q, __db_region);
+ __db_mutex_destroy(&rp->mutex);
+ __db_shalloc_free(infop->addr, rp);
+
+ return (0);
+}
+
+/*
+ * __db_faultmem --
+ * Fault the region into memory.
+ */
+static int
+__db_faultmem(dbenv, addr, size, created)
+ DB_ENV *dbenv;
+ void *addr;
+ size_t size;
+ int created;
+{
+ int ret;
+ u_int8_t *p, *t;
+
+ /*
+ * It's sometimes significantly faster to page-fault in all of the
+ * region's pages before we run the application, as we see nasty
+ * side-effects when we page-fault while holding various locks, i.e.,
+ * the lock takes a long time to acquire because of the underlying
+ * page fault, and the other threads convoy behind the lock holder.
+ *
+ * If we created the region, we write a non-zero value so that the
+ * system can't cheat. If we're just joining the region, we can
+ * only read the value and try to confuse the compiler sufficiently
+ * that it doesn't figure out that we're never really using it.
+ */
+ ret = 0;
+ if (F_ISSET(dbenv, DB_ENV_REGION_INIT)) {
+ if (created)
+ for (p = addr, t = (u_int8_t *)addr + size;
+ p < t; p += OS_VMPAGESIZE)
+ p[0] = 0xdb;
+ else
+ for (p = addr, t = (u_int8_t *)addr + size;
+ p < t; p += OS_VMPAGESIZE)
+ ret |= p[0];
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_region_destroy --
+ * Destroy per-subsystem region information.
+ * Called with the region already locked.
+ */
+static void
+__db_region_destroy(dbenv, infop)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+{
+ switch (infop->type) {
+ case REGION_TYPE_LOCK:
+ __lock_region_destroy(dbenv, infop);
+ break;
+ case REGION_TYPE_LOG:
+ __log_region_destroy(dbenv, infop);
+ break;
+ case REGION_TYPE_MPOOL:
+ __mpool_region_destroy(dbenv, infop);
+ break;
+ case REGION_TYPE_TXN:
+ __txn_region_destroy(dbenv, infop);
+ break;
+ case REGION_TYPE_ENV:
+ case REGION_TYPE_MUTEX:
+ break;
+ default:
+ DB_ASSERT(0);
+ break;
+ }
+}
diff --git a/libdb/examples_c/README b/libdb/examples_c/README
new file mode 100644
index 0000000..3886151
--- /dev/null
+++ b/libdb/examples_c/README
@@ -0,0 +1,29 @@
+# $Id$
+
+ex_access.c Using just the DB access methods.
+
+ex_apprec Application-specific recovery.
+
+ex_btrec.c Using the BTREE access method with record numbers.
+
+ex_env.c Setting up the DB environment.
+
+ex_lock.c Locking.
+
+ex_mpool.c Shared memory buffer pools.
+
+ex_repquote Replication. This creates a toy stock quote server
+ with DB's single-master, multiple-client replication,
+ with communication over TCP.
+
+ex_tpcb.c TPC/B.
+ Ex_tpcb sets up a framework in which to run a TPC/B test.
+ Database initialization (the -i flag) and running the
+ benchmark (-n flag) must take place separately (i.e.,
+ first create the database, then run 1 or more copies of
+ the benchmark). Furthermore, when running more than one
+ TPCB process, it is necessary to run the deadlock detector
+ (db_deadlock), since it is possible for concurrent tpcb
+ processes to deadlock. For performance measurement, it
+ will also be beneficial to run the db_checkpoint process
+ as well.
diff --git a/libdb/examples_c/bench_001.c b/libdb/examples_c/bench_001.c
new file mode 100644
index 0000000..7ef4442
--- /dev/null
+++ b/libdb/examples_c/bench_001.c
@@ -0,0 +1,382 @@
+/*-
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * bench_001 - time bulk fetch interface.
+ * Without -R builds a btree acording to the arguments.
+ * With -R runs and times bulk fetches. If -d is specified
+ * during reads the DB_MULTIPLE interface is used
+ * otherwise the DB_MULTIPLE_KEY interface is used.
+ *
+ * ARGUMENTS:
+ * -c cachesize [1000 * pagesize]
+ * -d number of duplicates [none]
+ * -E don't use environment
+ * -I Just initialize the environment
+ * -i number of read iterations [1000000]
+ * -l length of data item [20]
+ * -n number of keys [1000000]
+ * -p pagesize [65536]
+ * -R perform read test.
+ * -T incorporate transactions.
+ *
+ * COMPILE:
+ * cc -I /usr/local/BerkeleyDB/include \
+ * -o bench_001 -O2 bench_001.c /usr/local/BerkeleyDB/lib/libdb.so
+ */
+#include <sys/types.h>
+
+#include <sys/time.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#define DATABASE "bench_001.db"
+
+int main(int, char *[]);
+void usage(void);
+
+const char
+ *progname = "bench_001"; /* Program name. */
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+DB_ENV *
+db_init(home, prefix, cachesize, txn)
+ char *home, *prefix;
+ int cachesize, txn;
+{
+ DB_ENV *dbenv;
+ int flags, ret;
+
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_env_create");
+ return (NULL);
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, prefix);
+ (void)dbenv->set_cachesize(dbenv, 0,
+ cachesize == 0 ? 50 * 1024 * 1024 : (u_int32_t)cachesize, 0);
+
+ flags = DB_CREATE | DB_INIT_MPOOL;
+ if (txn)
+ flags |= DB_INIT_TXN | DB_INIT_LOCK;
+ if ((ret = dbenv->open(dbenv, home, flags, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->open: %s", home);
+ (void)dbenv->close(dbenv, 0);
+ return (NULL);
+ }
+ return (dbenv);
+}
+
+/*
+ * get -- loop getting batches of records.
+ *
+ */
+int
+get(dbp, txn, datalen, num, dups, iter, countp)
+ DB *dbp;
+ int txn, datalen, num, dups, iter, *countp;
+{
+ DBC *dbcp;
+ DBT key, data;
+ DB_TXN *txnp;
+ u_int32_t len, klen;
+ int count, flags, i, j, ret;
+ void *pointer, *dp, *kp;
+
+ memset(&key, 0, sizeof(key));
+ key.data = &j;
+ key.size = sizeof(j);
+ memset(&data, 0, sizeof(data));
+ data.flags = DB_DBT_USERMEM;
+ data.data = malloc(datalen*1024*1024);
+ data.ulen = data.size = datalen*1024*1024;
+ count = 0;
+ flags = DB_SET;
+ if (!dups)
+ flags |= DB_MULTIPLE_KEY;
+ else
+ flags |= DB_MULTIPLE;
+ for (i = 0; i < iter; i++) {
+ txnp = NULL;
+ if (txn)
+ dbp->dbenv->txn_begin(dbp->dbenv, NULL, &txnp, 0);
+ dbp->cursor(dbp, txnp, &dbcp, 0);
+
+ j = random() % num;
+ switch (ret = dbcp->c_get(dbcp, &key, &data, flags)) {
+ case 0:
+ break;
+ default:
+ dbp->err(dbcp->dbp, ret, "DBC->c_get");
+ return (ret);
+ }
+ DB_MULTIPLE_INIT(pointer, &data);
+ if (dups)
+ while (pointer != NULL) {
+ DB_MULTIPLE_NEXT(pointer, &data, dp, len);
+ if (dp != NULL)
+ count++;
+ }
+ else
+ while (pointer != NULL) {
+ DB_MULTIPLE_KEY_NEXT(pointer,
+ &data, kp, klen, dp, len);
+ if (kp != NULL)
+ count++;
+ }
+ dbcp->c_close(dbcp);
+ if (txn)
+ txnp->commit(txnp, 0);
+ }
+
+ *countp = count;
+ return (0);
+}
+
+/*
+ * fill - fill a db
+ * Since we open/created the db with transactions (potentially),
+ * we need to populate it with transactions. We'll bundle the puts
+ * 10 to a transaction.
+ */
+#define PUTS_PER_TXN 10
+int
+fill(dbenv, dbp, txn, datalen, num, dups)
+ DB_ENV *dbenv;
+ DB *dbp;
+ int txn, datalen, num, dups;
+{
+ DBT key, data;
+ DB_TXN *txnp;
+ struct data {
+ int id;
+ char str[1];
+ } *data_val;
+ int count, i, ret;
+ /*
+ * Insert records into the database, where the key is the user
+ * input and the data is the user input in reverse order.
+ */
+ txnp = NULL;
+ ret = 0;
+ count = 0;
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ key.data = &i;
+ key.size = sizeof(i);
+ data.data = data_val = (struct data *) malloc(datalen);
+ memcpy(data_val->str, "0123456789012345678901234567890123456789",
+ datalen - sizeof (data_val->id));
+ data.size = datalen;
+ data.flags = DB_DBT_USERMEM;
+
+ for (i = 0; i < num; i++) {
+ if (txn != 0 && i % PUTS_PER_TXN == 0) {
+ if (txnp != NULL) {
+ ret = txnp->commit(txnp, 0);
+ txnp = NULL;
+ if (ret != 0)
+ goto err;
+ }
+ if ((ret =
+ dbenv->txn_begin(dbenv, NULL, &txnp, 0)) != 0)
+ goto err;
+ }
+ data_val->id = 0;
+ do {
+ switch (ret =
+ dbp->put(dbp, txnp, &key, &data, 0)) {
+ case 0:
+ count++;
+ break;
+ default:
+ dbp->err(dbp, ret, "DB->put");
+ goto err;
+ }
+ } while (++data_val->id < dups);
+ }
+ if (txnp != NULL)
+ ret = txnp->commit(txnp, 0);
+
+ printf("%d\n", count);
+ return (ret);
+
+err: if (txnp != NULL)
+ (void)txnp->abort(txnp);
+ return (ret);
+}
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DB *dbp;
+ DB_ENV *dbenv;
+ DB_TXN *txnp;
+ struct timeval start_time, end_time;
+ double secs;
+ int cache, ch, count, datalen, dups, env, init, iter, num, pagesize;
+ int ret, rflag, txn;
+
+ txnp = NULL;
+ datalen = 20;
+ iter = num = 1000000;
+ env = 1;
+ dups = init = rflag = txn = 0;
+
+ pagesize = 65536;
+ cache = 1000 * pagesize;
+
+ while ((ch = getopt(argc, argv, "c:d:EIi:l:n:p:RT")) != EOF)
+ switch (ch) {
+ case 'c':
+ cache = atoi(optarg);
+ break;
+ case 'd':
+ dups = atoi(optarg);
+ break;
+ case 'E':
+ env = 0;
+ break;
+ case 'I':
+ init = 1;
+ break;
+ case 'i':
+ iter = atoi(optarg);
+ break;
+ case 'l':
+ datalen = atoi(optarg);
+ break;
+ case 'n':
+ num = atoi(optarg);
+ break;
+ case 'p':
+ pagesize = atoi(optarg);
+ break;
+ case 'R':
+ rflag = 1;
+ break;
+ case 'T':
+ txn = 1;
+ break;
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+
+ /* Remove the previous database. */
+ if (!rflag) {
+ if (env)
+ system("rm -rf BENCH_001; mkdir BENCH_001");
+ else
+ (void)unlink(DATABASE);
+ }
+
+ dbenv = NULL;
+ if (env == 1 &&
+ (dbenv = db_init("BENCH_001", "bench_001", cache, txn)) == NULL)
+ return (-1);
+ if (init)
+ exit(0);
+ /* Create and initialize database object, open the database. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_create: %s\n", progname, db_strerror(ret));
+ exit(EXIT_FAILURE);
+ }
+ dbp->set_errfile(dbp, stderr);
+ dbp->set_errpfx(dbp, progname);
+ if ((ret = dbp->set_pagesize(dbp, pagesize)) != 0) {
+ dbp->err(dbp, ret, "set_pagesize");
+ goto err1;
+ }
+ if (dups && (ret = dbp->set_flags(dbp, DB_DUP)) != 0) {
+ dbp->err(dbp, ret, "set_flags");
+ goto err1;
+ }
+
+ if (env == 0 && (ret = dbp->set_cachesize(dbp, 0, cache, 0)) != 0) {
+ dbp->err(dbp, ret, "set_cachesize");
+ goto err1;
+ }
+
+ if ((ret = dbp->set_flags(dbp, DB_DUP)) != 0) {
+ dbp->err(dbp, ret, "set_flags");
+ goto err1;
+ }
+
+ if (txn != 0)
+ if ((ret = dbenv->txn_begin(dbenv, NULL, &txnp, 0)) != 0)
+ goto err1;
+
+ if ((ret = dbp->open(
+ dbp, txnp, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp->err(dbp, ret, "%s: open", DATABASE);
+ if (txnp != NULL)
+ (void)txnp->abort(txnp);
+ goto err1;
+ }
+
+ if (txnp != NULL)
+ ret = txnp->commit(txnp, 0);
+ txnp = NULL;
+ if (ret != 0)
+ goto err1;
+
+ if (rflag) {
+ /* If no environment, fill the cache. */
+ if (!env && (ret =
+ get(dbp, txn, datalen, num, dups, iter, &count)) != 0)
+ goto err1;
+
+ /* Time the get loop. */
+ gettimeofday(&start_time, NULL);
+ if ((ret =
+ get(dbp, txn, datalen, num, dups, iter, &count)) != 0)
+ goto err1;
+ gettimeofday(&end_time, NULL);
+ secs =
+ (((double)end_time.tv_sec * 1000000 + end_time.tv_usec) -
+ ((double)start_time.tv_sec * 1000000 + start_time.tv_usec))
+ / 1000000;
+ printf("%d records read using %d batches in %.2f seconds: ",
+ count, iter, secs);
+ printf("%.0f records/second\n", (double)count / secs);
+
+ } else if ((ret = fill(dbenv, dbp, txn, datalen, num, dups)) != 0)
+ goto err1;
+
+ /* Close everything down. */
+ if ((ret = dbp->close(dbp, rflag ? DB_NOSYNC : 0)) != 0) {
+ fprintf(stderr,
+ "%s: DB->close: %s\n", progname, db_strerror(ret));
+ return (1);
+ }
+ return (ret);
+
+err1: (void)dbp->close(dbp, 0);
+ return (1);
+}
+
+void
+usage()
+{
+ (void)fprintf(stderr, "usage: %s %s\n\t%s\n",
+ progname, "[-EIRT] [-c cachesize] [-d dups]",
+ "[-i iterations] [-l datalen] [-n keys] [-p pagesize]");
+ exit(EXIT_FAILURE);
+}
diff --git a/libdb/examples_c/ex_access.c b/libdb/examples_c/ex_access.c
new file mode 100644
index 0000000..c993145
--- /dev/null
+++ b/libdb/examples_c/ex_access.c
@@ -0,0 +1,162 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef _WIN32
+extern int getopt(int, char * const *, const char *);
+#else
+#include <unistd.h>
+#endif
+
+#include <db.h>
+
+#define DATABASE "access.db"
+int main __P((int, char *[]));
+int usage __P((void));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern int optind;
+ DB *dbp;
+ DBC *dbcp;
+ DBT key, data;
+ u_int32_t len;
+ int ch, ret, rflag;
+ char *database, *p, *t, buf[1024], rbuf[1024];
+ const char *progname = "ex_access"; /* Program name. */
+
+ rflag = 0;
+ while ((ch = getopt(argc, argv, "r")) != EOF)
+ switch (ch) {
+ case 'r':
+ rflag = 1;
+ break;
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ /* Accept optional database name. */
+ database = *argv == NULL ? DATABASE : argv[0];
+
+ /* Optionally discard the database. */
+ if (rflag)
+ (void)remove(database);
+
+ /* Create and initialize database object, open the database. */
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_create: %s\n", progname, db_strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ dbp->set_errfile(dbp, stderr);
+ dbp->set_errpfx(dbp, progname);
+ if ((ret = dbp->set_pagesize(dbp, 1024)) != 0) {
+ dbp->err(dbp, ret, "set_pagesize");
+ goto err1;
+ }
+ if ((ret = dbp->set_cachesize(dbp, 0, 32 * 1024, 0)) != 0) {
+ dbp->err(dbp, ret, "set_cachesize");
+ goto err1;
+ }
+ if ((ret = dbp->open(dbp,
+ NULL, database, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp->err(dbp, ret, "%s: open", database);
+ goto err1;
+ }
+
+ /*
+ * Insert records into the database, where the key is the user
+ * input and the data is the user input in reverse order.
+ */
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ for (;;) {
+ printf("input> ");
+ fflush(stdout);
+ if (fgets(buf, sizeof(buf), stdin) == NULL)
+ break;
+ if (strcmp(buf, "exit\n") == 0 || strcmp(buf, "quit\n") == 0)
+ break;
+ if ((len = strlen(buf)) <= 1)
+ continue;
+ for (t = rbuf, p = buf + (len - 2); p >= buf;)
+ *t++ = *p--;
+ *t++ = '\0';
+
+ key.data = buf;
+ data.data = rbuf;
+ data.size = key.size = len - 1;
+
+ switch (ret =
+ dbp->put(dbp, NULL, &key, &data, DB_NOOVERWRITE)) {
+ case 0:
+ break;
+ default:
+ dbp->err(dbp, ret, "DB->put");
+ if (ret != DB_KEYEXIST)
+ goto err1;
+ break;
+ }
+ }
+ printf("\n");
+
+ /* Acquire a cursor for the database. */
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->cursor");
+ goto err1;
+ }
+
+ /* Initialize the key/data pair so the flags aren't set. */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Walk through the database and print out the key/data pairs. */
+ while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0)
+ printf("%.*s : %.*s\n",
+ (int)key.size, (char *)key.data,
+ (int)data.size, (char *)data.data);
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ goto err2;
+ }
+
+ /* Close everything down. */
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ goto err1;
+ }
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ fprintf(stderr,
+ "%s: DB->close: %s\n", progname, db_strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ return (EXIT_SUCCESS);
+
+err2: (void)dbcp->c_close(dbcp);
+err1: (void)dbp->close(dbp, 0);
+ return (EXIT_FAILURE);
+}
+
+int
+usage()
+{
+ (void)fprintf(stderr, "usage: ex_access [-r] [database]\n");
+ return (EXIT_FAILURE);
+}
diff --git a/libdb/examples_c/ex_apprec/auto_rebuild b/libdb/examples_c/ex_apprec/auto_rebuild
new file mode 100644
index 0000000..3425198
--- /dev/null
+++ b/libdb/examples_c/ex_apprec/auto_rebuild
@@ -0,0 +1,9 @@
+# Script to rebuild automatically generated files for ex_apprec.
+
+E=../examples_c/ex_apprec
+
+cd ../../dist
+awk -f gen_rec.awk \
+ -v source_file=$E/ex_apprec_auto.c \
+ -v header_file=$E/ex_apprec_auto.h \
+ -v template_file=$E/ex_apprec_template < $E/ex_apprec.src
diff --git a/libdb/examples_c/ex_apprec/ex_apprec.c b/libdb/examples_c/ex_apprec/ex_apprec.c
new file mode 100644
index 0000000..52dca61
--- /dev/null
+++ b/libdb/examples_c/ex_apprec/ex_apprec.c
@@ -0,0 +1,267 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <errno.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#include "ex_apprec.h"
+
+int apprec_dispatch __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+int open_env __P((const char *, FILE *, const char *, DB_ENV **));
+int verify_absence __P((DB_ENV *, const char *));
+int verify_presence __P((DB_ENV *, const char *));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *txn;
+ DBT dirnamedbt;
+ int ret;
+ const char *home;
+ char ch, dirname[256];
+ const char *progname = "ex_apprec"; /* Program name. */
+
+ /* Default home. */
+ home = "TESTDIR";
+
+ while ((ch = getopt(argc, argv, "h:")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ default:
+ fprintf(stderr, "usage: %s [-h home]", progname);
+ exit(EXIT_FAILURE);
+ }
+
+ printf("Set up environment.\n");
+ if ((ret = open_env(home, stderr, progname, &dbenv)) != 0)
+ return (EXIT_FAILURE);
+
+ printf("Create a directory in a transaction.\n");
+ /*
+ * This application's convention is to log the full directory name,
+ * including trailing nul.
+ */
+ memset(&dirnamedbt, 0, sizeof(dirnamedbt));
+ sprintf(dirname, "%s/MYDIRECTORY", home);
+ dirnamedbt.data = dirname;
+ dirnamedbt.size = strlen(dirname) + 1;
+
+ if ((ret = dbenv->txn_begin(dbenv, NULL, &txn, 0)) != 0) {
+ dbenv->err(dbenv, ret, "txn_begin");
+ return (EXIT_FAILURE);
+ }
+
+ /* Remember, always log actions before you execute them! */
+ memset(&lsn, 0, sizeof(lsn));
+ if ((ret =
+ ex_apprec_mkdir_log(dbenv, txn, &lsn, 0, &dirnamedbt)) != 0) {
+ dbenv->err(dbenv, ret, "mkdir_log");
+ return (EXIT_FAILURE);
+ }
+ if (mkdir(dirname, 0755) != 0) {
+ dbenv->err(dbenv, errno, "mkdir");
+ return (EXIT_FAILURE);
+ }
+
+ printf("Verify the directory's presence: ");
+ verify_presence(dbenv, dirname);
+ printf("check.\n");
+
+ /* Now abort the transaction and verify that the directory goes away. */
+ printf("Abort the transaction.\n");
+ if ((ret = txn->abort(txn)) != 0) {
+ dbenv->err(dbenv, ret, "txn_abort");
+ return (EXIT_FAILURE);
+ }
+
+ printf("Verify the directory's absence: ");
+ verify_absence(dbenv, dirname);
+ printf("check.\n");
+
+ /* Now do the same thing over again, only with a commit this time. */
+ printf("Create a directory in a transaction.\n");
+ memset(&dirnamedbt, 0, sizeof(dirnamedbt));
+ sprintf(dirname, "%s/MYDIRECTORY", home);
+ dirnamedbt.data = dirname;
+ dirnamedbt.size = strlen(dirname) + 1;
+ if ((ret = dbenv->txn_begin(dbenv, NULL, &txn, 0)) != 0) {
+ dbenv->err(dbenv, ret, "txn_begin");
+ return (EXIT_FAILURE);
+ }
+
+ memset(&lsn, 0, sizeof(lsn));
+ if ((ret =
+ ex_apprec_mkdir_log(dbenv, txn, &lsn, 0, &dirnamedbt)) != 0) {
+ dbenv->err(dbenv, ret, "mkdir_log");
+ return (EXIT_FAILURE);
+ }
+ if (mkdir(dirname, 0755) != 0) {
+ dbenv->err(dbenv, errno, "mkdir");
+ return (EXIT_FAILURE);
+ }
+
+ printf("Verify the directory's presence: ");
+ verify_presence(dbenv, dirname);
+ printf("check.\n");
+
+ /* Now abort the transaction and verify that the directory goes away. */
+ printf("Commit the transaction.\n");
+ if ((ret = txn->commit(txn, 0)) != 0) {
+ dbenv->err(dbenv, ret, "txn_commit");
+ return (EXIT_FAILURE);
+ }
+
+ printf("Verify the directory's presence: ");
+ verify_presence(dbenv, dirname);
+ printf("check.\n");
+
+ printf("Now remove the directory, then run recovery.\n");
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ fprintf(stderr, "DB_ENV->close: %s\n", db_strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ if (rmdir(dirname) != 0) {
+ fprintf(stderr,
+ "%s: rmdir failed with error %s", progname,
+ strerror(errno));
+ }
+ verify_absence(dbenv, dirname);
+
+ /* Opening with DB_RECOVER runs recovery. */
+ if ((ret = open_env(home, stderr, progname, &dbenv)) != 0)
+ return (EXIT_FAILURE);
+
+ printf("Verify the directory's presence: ");
+ verify_presence(dbenv, dirname);
+ printf("check.\n");
+
+ /* Close the handle. */
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ fprintf(stderr, "DB_ENV->close: %s\n", db_strerror(ret));
+ return (EXIT_FAILURE);
+ }
+
+ return (EXIT_SUCCESS);
+}
+
+int
+open_env(home, errfp, progname, dbenvp)
+ const char *home, *progname;
+ FILE *errfp;
+ DB_ENV **dbenvp;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(errfp, "%s: %s\n", progname, db_strerror(ret));
+ return (ret);
+ }
+ dbenv->set_errfile(dbenv, errfp);
+ dbenv->set_errpfx(dbenv, progname);
+
+ /* Set up our custom recovery dispatch function. */
+ if ((ret = dbenv->set_app_dispatch(dbenv, apprec_dispatch)) != 0) {
+ dbenv->err(dbenv, ret, "set_app_dispatch");
+ return (ret);
+ }
+
+ /*
+ * Open the environment with full transactional support, running
+ * recovery.
+ */
+ if ((ret =
+ dbenv->open(dbenv, home, DB_CREATE | DB_RECOVER | DB_INIT_LOCK |
+ DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN, 0)) != 0) {
+ dbenv->err(dbenv, ret, "environment open: %s", home);
+ dbenv->close(dbenv, 0);
+ return (ret);
+ }
+
+ *dbenvp = dbenv;
+ return (0);
+}
+
+/*
+ * Sample application dispatch function to handle user-specified log record
+ * types.
+ */
+int
+apprec_dispatch(dbenv, dbt, lsn, op)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ DB_LSN *lsn;
+ db_recops op;
+{
+ u_int32_t rectype;
+
+ /* Pull the record type out of the log record. */
+ memcpy(&rectype, dbt->data, sizeof(rectype));
+
+ switch (rectype) {
+ case DB_ex_apprec_mkdir:
+ return (ex_apprec_mkdir_recover(dbenv, dbt, lsn, op, NULL));
+ default:
+ /*
+ * We've hit an unexpected, allegedly user-defined record
+ * type.
+ */
+ dbenv->errx(dbenv, "Unexpected log record type encountered");
+ return (EINVAL);
+ }
+}
+
+int
+verify_absence(dbenv, dirname)
+ DB_ENV *dbenv;
+ const char *dirname;
+{
+
+ if (access(dirname, F_OK) == 0) {
+ dbenv->errx(dbenv, "Error--directory present!");
+ exit(EXIT_FAILURE);
+ }
+
+ return (0);
+}
+
+int
+verify_presence(dbenv, dirname)
+ DB_ENV *dbenv;
+ const char *dirname;
+{
+
+ if (access(dirname, F_OK) != 0) {
+ dbenv->errx(dbenv, "Error--directory not present!");
+ exit(EXIT_FAILURE);
+ }
+
+ return (0);
+}
diff --git a/libdb/examples_c/ex_apprec/ex_apprec.h b/libdb/examples_c/ex_apprec/ex_apprec.h
new file mode 100644
index 0000000..852107a
--- /dev/null
+++ b/libdb/examples_c/ex_apprec/ex_apprec.h
@@ -0,0 +1,24 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _EX_APPREC_H_
+#define _EX_APPREC_H_
+
+#include "ex_apprec_auto.h"
+
+int ex_apprec_mkdir_log
+ __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *));
+int ex_apprec_mkdir_print
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int ex_apprec_mkdir_read
+ __P((DB_ENV *, void *, ex_apprec_mkdir_args **));
+int ex_apprec_mkdir_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+
+#endif /* !_EX_APPREC_H_ */
diff --git a/libdb/examples_c/ex_apprec/ex_apprec.src b/libdb/examples_c/ex_apprec/ex_apprec.src
new file mode 100644
index 0000000..5f78a27
--- /dev/null
+++ b/libdb/examples_c/ex_apprec/ex_apprec.src
@@ -0,0 +1,41 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+PREFIX ex_apprec
+
+/*
+ * This is the source file used to generate the application-specific recovery
+ * functions used by the ex_apprec example. It should be turned into usable
+ * source code (including a template for the recovery function itself) by
+ * invoking changing to the dist directory of the DB distribution and
+ * running the gen_rec.awk script there as follows:
+ *
+ * awk -f ./gen_rec.awk \
+ * -v source_file=../examples_c/ex_apprec/ex_apprec_auto.c \
+ * -v header_file=../examples_c/ex_apprec/ex_apprec_auto.h \
+ * -v template_file=../examples_c/ex_apprec/ex_apprec_template \
+ * < ../examples_c/ex_apprec/ex_apprec.src
+
+INCLUDE #include <ctype.h>
+INCLUDE #include <errno.h>
+INCLUDE #include <stdlib.h>
+INCLUDE #include <string.h>
+INCLUDE
+INCLUDE #include <db.h>
+INCLUDE
+INCLUDE #include "ex_apprec.h"
+
+/*
+ * mkdir: used to create a directory
+ *
+ * dirname: relative or absolute pathname of the directory to be created
+ */
+BEGIN mkdir 10000
+DBT dirname DBT s
+END
diff --git a/libdb/examples_c/ex_apprec/ex_apprec_auto.c b/libdb/examples_c/ex_apprec/ex_apprec_auto.c
new file mode 100644
index 0000000..d8c27e7
--- /dev/null
+++ b/libdb/examples_c/ex_apprec/ex_apprec_auto.c
@@ -0,0 +1,188 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+#include <ctype.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <db.h>
+
+#include "ex_apprec.h"
+/*
+ * PUBLIC: int ex_apprec_mkdir_log __P((DB_ENV *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, const DBT *));
+ */
+int
+ex_apprec_mkdir_log(dbenv, txnid, ret_lsnp, flags,
+ dirname)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ const DBT *dirname;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_ex_apprec_mkdir;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t) + (dirname == NULL ? 0 : dirname->size);
+ if ((logrec.data = malloc(logrec.size)) == NULL)
+ return (ENOMEM);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ if (dirname == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &dirname->size, sizeof(dirname->size));
+ bp += sizeof(dirname->size);
+ memcpy(bp, dirname->data, dirname->size);
+ bp += dirname->size;
+ }
+
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)ex_apprec_mkdir_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ free(logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int ex_apprec_mkdir_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+ex_apprec_mkdir_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ ex_apprec_mkdir_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = ex_apprec_mkdir_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]ex_apprec_mkdir: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tdirname: ");
+ for (i = 0; i < argp->dirname.size; i++) {
+ ch = ((u_int8_t *)argp->dirname.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\n");
+ free(argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int ex_apprec_mkdir_read __P((DB_ENV *, void *,
+ * PUBLIC: ex_apprec_mkdir_args **));
+ */
+int
+ex_apprec_mkdir_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ ex_apprec_mkdir_args **argpp;
+{
+ ex_apprec_mkdir_args *argp;
+ u_int8_t *bp;
+ /* Keep the compiler quiet. */
+
+ dbenv = NULL;
+ if ((argp = malloc(sizeof(ex_apprec_mkdir_args) + sizeof(DB_TXN))) == NULL)
+ return (ENOMEM);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memset(&argp->dirname, 0, sizeof(argp->dirname));
+ memcpy(&argp->dirname.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->dirname.data = bp;
+ bp += argp->dirname.size;
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int ex_apprec_init_print __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+ex_apprec_init_print(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int __db_add_recovery __P((DB_ENV *,
+ int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *),
+ size_t *,
+ int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), u_int32_t));
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ ex_apprec_mkdir_print, DB_ex_apprec_mkdir)) != 0)
+ return (ret);
+ return (0);
+}
+
diff --git a/libdb/examples_c/ex_apprec/ex_apprec_auto.h b/libdb/examples_c/ex_apprec/ex_apprec_auto.h
new file mode 100644
index 0000000..358b1a9
--- /dev/null
+++ b/libdb/examples_c/ex_apprec/ex_apprec_auto.h
@@ -0,0 +1,13 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef ex_apprec_AUTO_H
+#define ex_apprec_AUTO_H
+#define DB_ex_apprec_mkdir 10000
+typedef struct _ex_apprec_mkdir_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DBT dirname;
+} ex_apprec_mkdir_args;
+
+#endif
diff --git a/libdb/examples_c/ex_apprec/ex_apprec_rec.c b/libdb/examples_c/ex_apprec/ex_apprec_rec.c
new file mode 100644
index 0000000..983ecbe
--- /dev/null
+++ b/libdb/examples_c/ex_apprec/ex_apprec_rec.c
@@ -0,0 +1,115 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * This file is based on the template file ex_apprec_template. Note that
+ * because ex_apprec_mkdir, like most application-specific recovery functions,
+ * does not make use of DB-private structures, it has actually been simplified
+ * significantly.
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#include "ex_apprec.h"
+
+/*
+ * ex_apprec_mkdir_recover --
+ * Recovery function for mkdir.
+ *
+ * PUBLIC: int ex_apprec_mkdir_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+ex_apprec_mkdir_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ ex_apprec_mkdir_args *argp;
+ int ret;
+
+ argp = NULL;
+
+ /*
+ * Shut up the compiler--"info" is used for the recovery functions
+ * belonging to transaction meta-operations such as txn_create, and
+ * need not concern us here either.
+ */
+ info = NULL;
+
+ if ((ret = ex_apprec_mkdir_read(dbenv, dbtp->data, &argp)) != 0)
+ goto out;
+
+ switch (op) {
+ case DB_TXN_ABORT:
+ case DB_TXN_BACKWARD_ROLL:
+ /*
+ * If we're aborting, we need to remove the directory if it
+ * exists. We log the trailing zero in pathnames, so we can
+ * simply pass the data part of the DBT into rmdir as a string.
+ * (Note that we don't have any alignment guarantees, but for
+ * a char * this doesn't matter.)
+ *
+ * Ignore all errors other than ENOENT; DB may attempt to undo
+ * or redo operations without knowing whether they have already
+ * been done or undone, so we should never assume in a recovery
+ * function that the task definitely needs doing or undoing.
+ */
+ ret = rmdir(argp->dirname.data);
+ if (ret != 0 && errno != ENOENT)
+ dbenv->err(dbenv, ret, "Error in abort of mkdir");
+ else
+ ret = 0;
+ break;
+ case DB_TXN_FORWARD_ROLL:
+ /*
+ * The forward direction is just the opposite; here, we ignore
+ * EEXIST, because the directory may already exist.
+ */
+ ret = mkdir(argp->dirname.data, 0755);
+ if (ret != 0 && errno != EEXIST)
+ dbenv->err(dbenv,
+ ret, "Error in roll-forward of mkdir");
+ else
+ ret = 0;
+ break;
+ default:
+ /*
+ * We might want to handle DB_TXN_PRINT or DB_TXN_APPLY here,
+ * too, but we don't try to print the log records and aren't
+ * using replication, so there's no need to in this example.
+ */
+ dbenv->errx(dbenv, "Unexpected operation type\n");
+ return (EINVAL);
+ }
+
+ /*
+ * The recovery function is responsible for returning the LSN of the
+ * previous log record in this transaction, so that transaction aborts
+ * can follow the chain backwards.
+ *
+ * (If we'd wanted the LSN of this record earlier, we could have
+ * read it from lsnp, as well--but because we weren't working with
+ * pages or other objects that store their LSN and base recovery
+ * decisions on it, we didn't need to.)
+ */
+ *lsnp = argp->prev_lsn;
+
+out: if (argp != NULL)
+ free(argp);
+ return (ret);
+}
diff --git a/libdb/examples_c/ex_apprec/ex_apprec_template b/libdb/examples_c/ex_apprec/ex_apprec_template
new file mode 100644
index 0000000..e67ccb6
--- /dev/null
+++ b/libdb/examples_c/ex_apprec/ex_apprec_template
@@ -0,0 +1,75 @@
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/ex_apprec.h"
+#include "dbinc/log.h"
+
+/*
+ * ex_apprec_mkdir_recover --
+ * Recovery function for mkdir.
+ *
+ * PUBLIC: int ex_apprec_mkdir_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+ex_apprec_mkdir_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ ex_apprec_mkdir_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(ex_apprec_mkdir_print);
+ REC_INTRO(ex_apprec_mkdir_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/libdb/examples_c/ex_btrec.c b/libdb/examples_c/ex_btrec.c
new file mode 100644
index 0000000..c003aa3
--- /dev/null
+++ b/libdb/examples_c/ex_btrec.c
@@ -0,0 +1,203 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <db.h>
+
+#define DATABASE "access.db"
+#define WORDLIST "../test/wordlist"
+int main __P((void));
+
+int ex_btrec __P((void));
+void show __P((const char *, DBT *, DBT *));
+
+int
+main()
+{
+ return (ex_btrec() == 1 ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+int
+ex_btrec()
+{
+ DB *dbp;
+ DBC *dbcp;
+ DBT key, data;
+ DB_BTREE_STAT *statp;
+ FILE *fp;
+ db_recno_t recno;
+ u_int32_t len;
+ int cnt, ret;
+ char *p, *t, buf[1024], rbuf[1024];
+ const char *progname = "ex_btrec"; /* Program name. */
+
+ /* Open the word database. */
+ if ((fp = fopen(WORDLIST, "r")) == NULL) {
+ fprintf(stderr, "%s: open %s: %s\n",
+ progname, WORDLIST, db_strerror(errno));
+ return (1);
+ }
+
+ /* Remove the previous database. */
+ (void)remove(DATABASE);
+
+ /* Create and initialize database object, open the database. */
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_create: %s\n", progname, db_strerror(ret));
+ return (1);
+ }
+ dbp->set_errfile(dbp, stderr);
+ dbp->set_errpfx(dbp, progname); /* 1K page sizes. */
+ if ((ret = dbp->set_pagesize(dbp, 1024)) != 0) {
+ dbp->err(dbp, ret, "set_pagesize");
+ return (1);
+ } /* Record numbers. */
+ if ((ret = dbp->set_flags(dbp, DB_RECNUM)) != 0) {
+ dbp->err(dbp, ret, "set_flags: DB_RECNUM");
+ return (1);
+ }
+ if ((ret = dbp->open(dbp,
+ NULL, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp->err(dbp, ret, "open: %s", DATABASE);
+ return (1);
+ }
+
+ /*
+ * Insert records into the database, where the key is the word
+ * preceded by its record number, and the data is the same, but
+ * in reverse order.
+ */
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ for (cnt = 1; cnt <= 1000; ++cnt) {
+ (void)sprintf(buf, "%04d_", cnt);
+ if (fgets(buf + 4, sizeof(buf) - 4, fp) == NULL)
+ break;
+ len = strlen(buf);
+ for (t = rbuf, p = buf + (len - 2); p >= buf;)
+ *t++ = *p--;
+ *t++ = '\0';
+
+ key.data = buf;
+ data.data = rbuf;
+ data.size = key.size = len - 1;
+
+ if ((ret =
+ dbp->put(dbp, NULL, &key, &data, DB_NOOVERWRITE)) != 0) {
+ dbp->err(dbp, ret, "DB->put");
+ if (ret != DB_KEYEXIST)
+ goto err1;
+ }
+ }
+
+ /* Close the word database. */
+ (void)fclose(fp);
+
+ /* Print out the number of records in the database. */
+ if ((ret = dbp->stat(dbp, &statp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ goto err1;
+ }
+ printf("%s: database contains %lu records\n",
+ progname, (u_long)statp->bt_ndata);
+ free(statp);
+
+ /* Acquire a cursor for the database. */
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->cursor");
+ goto err1;
+ }
+
+ /*
+ * Prompt the user for a record number, then retrieve and display
+ * that record.
+ */
+ for (;;) {
+ /* Get a record number. */
+ printf("recno #> ");
+ fflush(stdout);
+ if (fgets(buf, sizeof(buf), stdin) == NULL)
+ break;
+ recno = atoi(buf);
+
+ /*
+ * Reset the key each time, the dbp->c_get() routine returns
+ * the key and data pair, not just the key!
+ */
+ key.data = &recno;
+ key.size = sizeof(recno);
+ if ((ret = dbcp->c_get(dbcp, &key, &data, DB_SET_RECNO)) != 0)
+ goto get_err;
+
+ /* Display the key and data. */
+ show("k/d\t", &key, &data);
+
+ /* Move the cursor a record forward. */
+ if ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) != 0)
+ goto get_err;
+
+ /* Display the key and data. */
+ show("next\t", &key, &data);
+
+ /*
+ * Retrieve the record number for the following record into
+ * local memory.
+ */
+ data.data = &recno;
+ data.size = sizeof(recno);
+ data.ulen = sizeof(recno);
+ data.flags |= DB_DBT_USERMEM;
+ if ((ret = dbcp->c_get(dbcp, &key, &data, DB_GET_RECNO)) != 0) {
+get_err: dbp->err(dbp, ret, "DBcursor->get");
+ if (ret != DB_NOTFOUND && ret != DB_KEYEMPTY)
+ goto err2;
+ } else
+ printf("retrieved recno: %lu\n", (u_long)recno);
+
+ /* Reset the data DBT. */
+ memset(&data, 0, sizeof(data));
+ }
+
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ goto err1;
+ }
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ fprintf(stderr,
+ "%s: DB->close: %s\n", progname, db_strerror(ret));
+ return (1);
+ }
+
+ return (0);
+
+err2: (void)dbcp->c_close(dbcp);
+err1: (void)dbp->close(dbp, 0);
+ return (ret);
+
+}
+
+/*
+ * show --
+ * Display a key/data pair.
+ */
+void
+show(msg, key, data)
+ const char *msg;
+ DBT *key, *data;
+{
+ printf("%s%.*s : %.*s\n", msg,
+ (int)key->size, (char *)key->data,
+ (int)data->size, (char *)data->data);
+}
diff --git a/libdb/examples_c/ex_dbclient.c b/libdb/examples_c/ex_dbclient.c
new file mode 100644
index 0000000..5f199b9
--- /dev/null
+++ b/libdb/examples_c/ex_dbclient.c
@@ -0,0 +1,226 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#define DATABASE_HOME "database"
+
+#define DATABASE "access.db"
+
+int db_clientrun __P((DB_ENV *, const char *));
+int ex_dbclient_run __P((const char *, FILE *, const char *, const char *));
+int main __P((int, char *[]));
+
+/*
+ * An example of a program creating/configuring a Berkeley DB environment.
+ */
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ const char *home;
+
+ if (argc != 2) {
+ fprintf(stderr, "Usage: %s hostname\n", argv[0]);
+ return (EXIT_FAILURE);
+ }
+
+ /*
+ * All of the shared database files live in DATABASE_HOME, but
+ * data files will live in CONFIG_DATA_DIR.
+ */
+ home = DATABASE_HOME;
+ return (ex_dbclient_run(home,
+ stderr, argv[1], argv[0]) == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+ex_dbclient(host)
+ const char *host;
+{
+ const char *home;
+ const char *progname = "ex_dbclient"; /* Program name. */
+ int ret;
+
+ /*
+ * All of the shared database files live in DATABASE_HOME, but
+ * data files will live in CONFIG_DATA_DIR.
+ */
+ home = DATABASE_HOME;
+
+ if ((ret = ex_dbclient_run(home, stderr, host, progname)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+int
+ex_dbclient_run(home, errfp, host, progname)
+ const char *home, *host, *progname;
+ FILE *errfp;
+{
+ DB_ENV *dbenv;
+ int ret, retry;
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, DB_CLIENT)) != 0) {
+ fprintf(errfp, "%s: %s\n", progname, db_strerror(ret));
+ return (1);
+ }
+ retry = 0;
+retry:
+ while (retry < 5) {
+ /*
+ * Set the server host we are talking to.
+ */
+ if ((ret = dbenv->set_rpc_server(dbenv, NULL, host, 10000,
+ 10000, 0)) != 0) {
+ fprintf(stderr, "Try %d: DB_ENV->set_rpc_server: %s\n",
+ retry, db_strerror(ret));
+ retry++;
+ sleep(15);
+ } else
+ break;
+ }
+
+ if (retry >= 5) {
+ fprintf(stderr,
+ "DB_ENV->set_rpc_server: %s\n", db_strerror(ret));
+ dbenv->close(dbenv, 0);
+ return (1);
+ }
+ /*
+ * We want to specify the shared memory buffer pool cachesize,
+ * but everything else is the default.
+ */
+ if ((ret = dbenv->set_cachesize(dbenv, 0, 64 * 1024, 0)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ dbenv->close(dbenv, 0);
+ return (1);
+ }
+ /*
+ * We have multiple processes reading/writing these files, so
+ * we need concurrency control and a shared buffer pool, but
+ * not logging or transactions.
+ */
+ if ((ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_LOCK | DB_INIT_MPOOL, 0)) != 0) {
+ dbenv->err(dbenv, ret, "environment open: %s", home);
+ dbenv->close(dbenv, 0);
+ if (ret == DB_NOSERVER)
+ goto retry;
+ return (1);
+ }
+
+ ret = db_clientrun(dbenv, progname);
+ printf("db_clientrun returned %d\n", ret);
+ if (ret == DB_NOSERVER)
+ goto retry;
+
+ /* Close the handle. */
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ fprintf(stderr, "DB_ENV->close: %s\n", db_strerror(ret));
+ return (1);
+ }
+ return (0);
+}
+
+int
+db_clientrun(dbenv, progname)
+ DB_ENV *dbenv;
+ const char *progname;
+{
+ DB *dbp;
+ DBT key, data;
+ u_int32_t len;
+ int ret;
+ char *p, *t, buf[1024], rbuf[1024];
+
+ /* Remove the previous database. */
+
+ /* Create and initialize database object, open the database. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_create: %s\n", progname, db_strerror(ret));
+ return (ret);
+ }
+ if ((ret = dbp->set_pagesize(dbp, 1024)) != 0) {
+ dbp->err(dbp, ret, "set_pagesize");
+ goto err1;
+ }
+ if ((ret = dbp->open(dbp,
+ NULL, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp->err(dbp, ret, "%s: open", DATABASE);
+ goto err1;
+ }
+
+ /*
+ * Insert records into the database, where the key is the user
+ * input and the data is the user input in reverse order.
+ */
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ for (;;) {
+ printf("input> ");
+ fflush(stdout);
+ if (fgets(buf, sizeof(buf), stdin) == NULL)
+ break;
+ if ((len = strlen(buf)) <= 1)
+ continue;
+ for (t = rbuf, p = buf + (len - 2); p >= buf;)
+ *t++ = *p--;
+ *t++ = '\0';
+
+ key.data = buf;
+ data.data = rbuf;
+ data.size = key.size = len - 1;
+
+ switch (ret =
+ dbp->put(dbp, NULL, &key, &data, DB_NOOVERWRITE)) {
+ case 0:
+ break;
+ default:
+ dbp->err(dbp, ret, "DB->put");
+ if (ret != DB_KEYEXIST)
+ goto err1;
+ break;
+ }
+ memset(&data, 0, sizeof(DBT));
+ switch (ret = dbp->get(dbp, NULL, &key, &data, 0)) {
+ case 0:
+ printf("%.*s : %.*s\n",
+ (int)key.size, (char *)key.data,
+ (int)data.size, (char *)data.data);
+ break;
+ default:
+ dbp->err(dbp, ret, "DB->get");
+ break;
+ }
+ }
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ fprintf(stderr,
+ "%s: DB->close: %s\n", progname, db_strerror(ret));
+ return (1);
+ }
+ return (0);
+
+err1: (void)dbp->close(dbp, 0);
+ return (ret);
+}
diff --git a/libdb/examples_c/ex_env.c b/libdb/examples_c/ex_env.c
new file mode 100644
index 0000000..68e4e3b
--- /dev/null
+++ b/libdb/examples_c/ex_env.c
@@ -0,0 +1,135 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <db.h>
+
+#ifdef macintosh
+#define DATABASE_HOME ":database"
+#define CONFIG_DATA_DIR ":database"
+#else
+#ifdef DB_WIN32
+#define DATABASE_HOME "\\tmp\\database"
+#define CONFIG_DATA_DIR "\\database\\files"
+#else
+#define DATABASE_HOME "/tmp/database"
+#define CONFIG_DATA_DIR "/database/files"
+#endif
+#endif
+
+int db_setup __P((const char *, const char *, FILE *, const char *));
+int db_teardown __P((const char *, const char *, FILE *, const char *));
+int main __P((void));
+
+/*
+ * An example of a program creating/configuring a Berkeley DB environment.
+ */
+int
+main()
+{
+ const char *data_dir, *home;
+ const char *progname = "ex_env"; /* Program name. */
+
+ /*
+ * All of the shared database files live in DATABASE_HOME, but
+ * data files will live in CONFIG_DATA_DIR.
+ */
+ home = DATABASE_HOME;
+ data_dir = CONFIG_DATA_DIR;
+
+ printf("Setup env\n");
+ if (db_setup(home, data_dir, stderr, progname) != 0)
+ return (EXIT_FAILURE);
+
+ printf("Teardown env\n");
+ if (db_teardown(home, data_dir, stderr, progname) != 0)
+ return (EXIT_FAILURE);
+
+ return (EXIT_SUCCESS);
+}
+
+int
+db_setup(home, data_dir, errfp, progname)
+ const char *home, *data_dir, *progname;
+ FILE *errfp;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(errfp, "%s: %s\n", progname, db_strerror(ret));
+ return (1);
+ }
+ dbenv->set_errfile(dbenv, errfp);
+ dbenv->set_errpfx(dbenv, progname);
+
+ /*
+ * We want to specify the shared memory buffer pool cachesize,
+ * but everything else is the default.
+ */
+ if ((ret = dbenv->set_cachesize(dbenv, 0, 64 * 1024, 0)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ dbenv->close(dbenv, 0);
+ return (1);
+ }
+
+ /* Databases are in a subdirectory. */
+ (void)dbenv->set_data_dir(dbenv, data_dir);
+
+ /* Open the environment with full transactional support. */
+ if ((ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN,
+ 0)) != 0) {
+ dbenv->err(dbenv, ret, "environment open: %s", home);
+ dbenv->close(dbenv, 0);
+ return (1);
+ }
+
+ /* Do something interesting... */
+
+ /* Close the handle. */
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ fprintf(stderr, "DB_ENV->close: %s\n", db_strerror(ret));
+ return (1);
+ }
+ return (0);
+}
+
+int
+db_teardown(home, data_dir, errfp, progname)
+ const char *home, *data_dir, *progname;
+ FILE *errfp;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ /* Remove the shared database regions. */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(errfp, "%s: %s\n", progname, db_strerror(ret));
+ return (1);
+ }
+ dbenv->set_errfile(dbenv, errfp);
+ dbenv->set_errpfx(dbenv, progname);
+
+ (void)dbenv->set_data_dir(dbenv, data_dir);
+ if ((ret = dbenv->remove(dbenv, home, 0)) != 0) {
+ fprintf(stderr, "DB_ENV->remove: %s\n", db_strerror(ret));
+ return (1);
+ }
+ return (0);
+}
diff --git a/libdb/examples_c/ex_lock.c b/libdb/examples_c/ex_lock.c
new file mode 100644
index 0000000..fe70fe0
--- /dev/null
+++ b/libdb/examples_c/ex_lock.c
@@ -0,0 +1,239 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef _WIN32
+extern int getopt(int, char * const *, const char *);
+#else
+#include <unistd.h>
+#endif
+
+#include <db.h>
+
+int db_init __P((const char *, u_int32_t, int));
+int main __P((int, char *[]));
+int usage __P((void));
+
+DB_ENV *dbenv;
+const char
+ *progname = "ex_lock"; /* Program name. */
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DBT lock_dbt;
+ DB_LOCK lock;
+ DB_LOCK *locks;
+ db_lockmode_t lock_type;
+ long held;
+ u_int32_t len, locker, maxlocks;
+ int ch, do_unlink, did_get, i, lockid, lockcount, ret;
+ const char *home;
+ char opbuf[16], objbuf[1024], lockbuf[16];
+
+ home = "TESTDIR";
+ maxlocks = 0;
+ do_unlink = 0;
+ while ((ch = getopt(argc, argv, "h:m:u")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'm':
+ if ((i = atoi(optarg)) <= 0)
+ return (usage());
+ maxlocks = (u_int32_t)i; /* XXX: possible overflow. */
+ break;
+ case 'u':
+ do_unlink = 1;
+ break;
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ return (usage());
+
+ /* Initialize the database environment. */
+ if ((ret = db_init(home, maxlocks, do_unlink)) != 0)
+ return (ret);
+
+ locks = 0;
+ lockcount = 0;
+
+ /*
+ * Accept lock requests.
+ */
+ if ((ret = dbenv->lock_id(dbenv, &locker)) != 0) {
+ dbenv->err(dbenv, ret, "unable to get locker id");
+ (void)dbenv->close(dbenv, 0);
+ return (EXIT_FAILURE);
+ }
+ lockid = -1;
+
+ memset(&lock_dbt, 0, sizeof(lock_dbt));
+ for (held = 0, did_get = 0;;) {
+ printf("Operation get/release [get]> ");
+ fflush(stdout);
+ if (fgets(opbuf, sizeof(opbuf), stdin) == NULL)
+ break;
+ if ((len = strlen(opbuf)) <= 1 || strcmp(opbuf, "get\n") == 0) {
+ /* Acquire a lock. */
+ printf("input object (text string) to lock> ");
+ fflush(stdout);
+ if (fgets(objbuf, sizeof(objbuf), stdin) == NULL)
+ break;
+ if ((len = strlen(objbuf)) <= 1)
+ continue;
+
+ do {
+ printf("lock type read/write [read]> ");
+ fflush(stdout);
+ if (fgets(lockbuf,
+ sizeof(lockbuf), stdin) == NULL)
+ break;
+ len = strlen(lockbuf);
+ } while (len > 1 &&
+ strcmp(lockbuf, "read\n") != 0 &&
+ strcmp(lockbuf, "write\n") != 0);
+ if (len == 1 || strcmp(lockbuf, "read\n") == 0)
+ lock_type = DB_LOCK_READ;
+ else
+ lock_type = DB_LOCK_WRITE;
+
+ lock_dbt.data = objbuf;
+ lock_dbt.size = strlen(objbuf);
+ ret = dbenv->lock_get(dbenv, locker,
+ DB_LOCK_NOWAIT, &lock_dbt, lock_type, &lock);
+ if (ret == 0) {
+ did_get = 1;
+ lockid = lockcount++;
+ if (locks == NULL)
+ locks =
+ (DB_LOCK *)malloc(sizeof(DB_LOCK));
+ else
+ locks = (DB_LOCK *)realloc(locks,
+ lockcount * sizeof(DB_LOCK));
+ locks[lockid] = lock;
+ }
+ } else {
+ /* Release a lock. */
+ do {
+ printf("input lock to release> ");
+ fflush(stdout);
+ if (fgets(objbuf,
+ sizeof(objbuf), stdin) == NULL)
+ break;
+ } while ((len = strlen(objbuf)) <= 1);
+ lockid = strtol(objbuf, NULL, 16);
+ if (lockid < 0 || lockid >= lockcount) {
+ printf("Lock #%d out of range\n", lockid);
+ continue;
+ }
+ lock = locks[lockid];
+ ret = dbenv->lock_put(dbenv, &lock);
+ did_get = 0;
+ }
+ switch (ret) {
+ case 0:
+ printf("Lock #%d %s\n", lockid,
+ did_get ? "granted" : "released");
+ held += did_get ? 1 : -1;
+ break;
+ case DB_LOCK_NOTGRANTED:
+ dbenv->err(dbenv, ret, NULL);
+ break;
+ case DB_LOCK_DEADLOCK:
+ dbenv->err(dbenv, ret,
+ "lock_%s", did_get ? "get" : "put");
+ break;
+ default:
+ dbenv->err(dbenv, ret,
+ "lock_%s", did_get ? "get" : "put");
+ (void)dbenv->close(dbenv, 0);
+ return (EXIT_FAILURE);
+ }
+ }
+
+ printf("\nClosing lock region %ld locks held\n", held);
+
+ if (locks != NULL)
+ free(locks);
+
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ return (EXIT_SUCCESS);
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_init(home, maxlocks, do_unlink)
+ const char *home;
+ u_int32_t maxlocks;
+ int do_unlink;
+{
+ int ret;
+
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: db_env_create: %s\n",
+ progname, db_strerror(ret));
+ return (EXIT_FAILURE);
+ }
+
+ if (do_unlink) {
+ if ((ret = dbenv->remove(dbenv, home, DB_FORCE)) != 0) {
+ fprintf(stderr, "%s: dbenv->remove: %s\n",
+ progname, db_strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: db_env_create: %s\n",
+ progname, db_strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ }
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ if (maxlocks != 0)
+ dbenv->set_lk_max_locks(dbenv, maxlocks);
+
+ if ((ret =
+ dbenv->open(dbenv, home, DB_CREATE | DB_INIT_LOCK, 0)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ (void)dbenv->close(dbenv, 0);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
+
+int
+usage()
+{
+ (void)fprintf(stderr,
+ "usage: %s [-u] [-h home] [-m maxlocks]\n", progname);
+ return (EXIT_FAILURE);
+}
diff --git a/libdb/examples_c/ex_mpool.c b/libdb/examples_c/ex_mpool.c
new file mode 100644
index 0000000..4ce6969
--- /dev/null
+++ b/libdb/examples_c/ex_mpool.c
@@ -0,0 +1,253 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#ifdef _WIN32
+extern int getopt(int, char * const *, const char *);
+#else
+#include <unistd.h>
+#endif
+
+#include <db.h>
+
+int init __P((const char *, int, int, const char *));
+int run __P((int, int, int, int, const char *));
+int run_mpool __P((int, int, int, int, const char *));
+int main __P((int, char *[]));
+int usage __P((const char *));
+#define MPOOL "mpool" /* File. */
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ int cachesize, ch, hits, npages, pagesize;
+ char *progname;
+
+ cachesize = 20 * 1024;
+ hits = 1000;
+ npages = 50;
+ pagesize = 1024;
+ progname = argv[0];
+ while ((ch = getopt(argc, argv, "c:h:n:p:")) != EOF)
+ switch (ch) {
+ case 'c':
+ if ((cachesize = atoi(optarg)) < 20 * 1024)
+ return (usage(progname));
+ break;
+ case 'h':
+ if ((hits = atoi(optarg)) <= 0)
+ return (usage(progname));
+ break;
+ case 'n':
+ if ((npages = atoi(optarg)) <= 0)
+ return (usage(progname));
+ break;
+ case 'p':
+ if ((pagesize = atoi(optarg)) <= 0)
+ return (usage(progname));
+ break;
+ case '?':
+ default:
+ return (usage(progname));
+ }
+ argc -= optind;
+ argv += optind;
+
+ return (run_mpool(pagesize, cachesize,
+ hits, npages, progname) == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+usage(progname)
+ const char *progname;
+{
+ (void)fprintf(stderr,
+ "usage: %s [-c cachesize] [-h hits] [-n npages] [-p pagesize]\n",
+ progname);
+ return (EXIT_FAILURE);
+}
+
+int
+run_mpool(pagesize, cachesize, hits, npages, progname)
+ int pagesize, cachesize, hits, npages;
+ const char *progname;
+{
+ int ret;
+
+ /* Initialize the file. */
+ if ((ret = init(MPOOL, pagesize, npages, progname)) != 0)
+ return (ret);
+
+ /* Get the pages. */
+ if ((ret = run(hits, cachesize, pagesize, npages, progname)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * init --
+ * Create a backing file.
+ */
+int
+init(file, pagesize, npages, progname)
+ const char *file, *progname;
+ int pagesize, npages;
+{
+ FILE *fp;
+ int cnt;
+ char *p;
+
+ /*
+ * Create a file with the right number of pages, and store a page
+ * number on each page.
+ */
+ if ((fp = fopen(file, "wb")) == NULL) {
+ fprintf(stderr,
+ "%s: %s: %s\n", progname, file, strerror(errno));
+ return (1);
+ }
+ if ((p = (char *)malloc(pagesize)) == NULL) {
+ fprintf(stderr, "%s: %s\n", progname, strerror(ENOMEM));
+ return (1);
+ }
+
+ /* The pages are numbered from 0. */
+ for (cnt = 0; cnt <= npages; ++cnt) {
+ *(int *)p = cnt;
+ if (fwrite(p, pagesize, 1, fp) != 1) {
+ fprintf(stderr,
+ "%s: %s: %s\n", progname, file, strerror(errno));
+ return (1);
+ }
+ }
+
+ (void)fclose(fp);
+ free(p);
+ return (0);
+}
+
+/*
+ * run --
+ * Get a set of pages.
+ */
+int
+run(hits, cachesize, pagesize, npages, progname)
+ int hits, cachesize, pagesize, npages;
+ const char *progname;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mfp;
+ db_pgno_t pageno;
+ int cnt, ret;
+ void *p;
+
+ dbenv = NULL;
+ mfp = NULL;
+
+ printf("%s: cachesize: %d; pagesize: %d; N pages: %d\n",
+ progname, cachesize, pagesize, npages);
+
+ /*
+ * Open a memory pool, specify a cachesize, output error messages
+ * to stderr.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ return (1);
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+#ifdef HAVE_VXWORKS
+ if ((ret = dbenv->set_shm_key(dbenv, VXSHM_KEY)) != 0) {
+ dbenv->err(dbenv, ret, "set_shm_key");
+ return (1);
+ }
+#endif
+
+ /* Set the cachesize. */
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cachesize, 0)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ goto err;
+ }
+
+ /* Open the environment. */
+ if ((ret = dbenv->open(
+ dbenv, NULL, DB_CREATE | DB_INIT_MPOOL, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->open");
+ goto err;
+ }
+
+ /* Open the file in the environment. */
+ if ((ret = dbenv->memp_fcreate(dbenv, &mfp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->memp_fcreate: %s", MPOOL);
+ goto err;
+ }
+ if ((ret = mfp->open(mfp, MPOOL, 0, 0, pagesize)) != 0) {
+ dbenv->err(dbenv, ret, "DB_MPOOLFILE->open: %s", MPOOL);
+ goto err;
+ }
+
+ printf("retrieve %d random pages... ", hits);
+
+ srand((u_int)time(NULL));
+ for (cnt = 0; cnt < hits; ++cnt) {
+ pageno = (rand() % npages) + 1;
+ if ((ret = mfp->get(mfp, &pageno, 0, &p)) != 0) {
+ dbenv->err(dbenv, ret,
+ "unable to retrieve page %lu", (u_long)pageno);
+ goto err;
+ }
+ if (*(db_pgno_t *)p != pageno) {
+ dbenv->errx(dbenv,
+ "wrong page retrieved (%lu != %d)",
+ (u_long)pageno, *(int *)p);
+ goto err;
+ }
+ if ((ret = mfp->put(mfp, p, 0)) != 0) {
+ dbenv->err(dbenv, ret,
+ "unable to return page %lu", (u_long)pageno);
+ goto err;
+ }
+ }
+
+ printf("successful.\n");
+
+ /* Close the file. */
+ if ((ret = mfp->close(mfp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_MPOOLFILE->close");
+ goto err;
+ }
+
+ /* Close the pool. */
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ return (1);
+ }
+ return (0);
+
+err: if (mfp != NULL)
+ (void)mfp->close(mfp, 0);
+ if (dbenv != NULL)
+ (void)dbenv->close(dbenv, 0);
+ return (1);
+}
diff --git a/libdb/examples_c/ex_repquote/ex_repquote.h b/libdb/examples_c/ex_repquote/ex_repquote.h
new file mode 100644
index 0000000..5dbf6c9
--- /dev/null
+++ b/libdb/examples_c/ex_repquote/ex_repquote.h
@@ -0,0 +1,69 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _EX_REPQUOTE_H_
+#define _EX_REPQUOTE_H_
+
+#define SELF_EID 1
+
+typedef struct {
+ char *host; /* Host name. */
+ u_int32_t port; /* Port on which to connect to this site. */
+} repsite_t;
+
+/* Globals */
+extern int master_eid;
+extern char *myaddr;
+
+struct __member; typedef struct __member member_t;
+struct __machtab; typedef struct __machtab machtab_t;
+
+/* Arguments for the connect_all thread. */
+typedef struct {
+ DB_ENV *dbenv;
+ const char *progname;
+ const char *home;
+ machtab_t *machtab;
+ repsite_t *sites;
+ int nsites;
+} all_args;
+
+/* Arguments for the connect_loop thread. */
+typedef struct {
+ DB_ENV *dbenv;
+ const char * home;
+ const char * progname;
+ machtab_t *machtab;
+ int port;
+} connect_args;
+
+#define CACHESIZE (10 * 1024 * 1024)
+#define DATABASE "quote.db"
+#define SLEEPTIME 3
+
+void *connect_all __P((void *args));
+void *connect_thread __P((void *args));
+int doclient __P((DB_ENV *, const char *, machtab_t *));
+int domaster __P((DB_ENV *, const char *));
+int get_accepted_socket __P((const char *, int));
+int get_connected_socket __P((machtab_t *, const char *, const char *, int, int *, int *));
+int get_next_message __P((int, DBT *, DBT *));
+int listen_socket_init __P((const char *, int));
+int listen_socket_accept __P((machtab_t *, const char *, int, int *));
+int machtab_getinfo __P((machtab_t *, int, u_int32_t *, int *));
+int machtab_init __P((machtab_t **, int, int));
+void machtab_parm __P((machtab_t *, int *, int *, u_int32_t *));
+int machtab_rem __P((machtab_t *, int, int));
+int quote_send __P((DB_ENV *, const DBT *, const DBT *, int, u_int32_t));
+
+#ifndef COMPQUIET
+#define COMPQUIET(x,y) x = (y)
+#endif
+
+#endif /* !_EX_REPQUOTE_H_ */
diff --git a/libdb/examples_c/ex_repquote/ex_rq_client.c b/libdb/examples_c/ex_repquote/ex_rq_client.c
new file mode 100644
index 0000000..6662872
--- /dev/null
+++ b/libdb/examples_c/ex_repquote/ex_rq_client.c
@@ -0,0 +1,250 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+#include <sys/wait.h>
+
+#include <errno.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#include "ex_repquote.h"
+
+static void *check_loop __P((void *));
+static void *display_loop __P((void *));
+static int print_stocks __P((DBC *));
+
+typedef struct {
+ const char *progname;
+ DB_ENV *dbenv;
+} disploop_args;
+
+typedef struct {
+ DB_ENV *dbenv;
+ machtab_t *machtab;
+} checkloop_args;
+
+int
+doclient(dbenv, progname, machtab)
+ DB_ENV *dbenv;
+ const char *progname;
+ machtab_t *machtab;
+{
+ checkloop_args cargs;
+ disploop_args dargs;
+ pthread_t check_thr, disp_thr;
+ void *cstatus, *dstatus;
+ int rval, s;
+
+ rval = EXIT_SUCCESS;
+ s = -1;
+
+ memset(&dargs, 0, sizeof(dargs));
+ dstatus = (void *)EXIT_FAILURE;
+
+ dargs.progname = progname;
+ dargs.dbenv = dbenv;
+ if (pthread_create(&disp_thr, NULL, display_loop, (void *)&dargs)) {
+ dbenv->err(dbenv, errno, "display_loop pthread_create failed");
+ goto err;
+ }
+
+ cargs.dbenv = dbenv;
+ cargs.machtab = machtab;
+ if (pthread_create(&check_thr, NULL, check_loop, (void *)&cargs)) {
+ dbenv->err(dbenv, errno, "check_thread pthread_create failed");
+ goto err;
+ }
+ if (pthread_join(disp_thr, &dstatus) ||
+ pthread_join(check_thr, &cstatus)) {
+ dbenv->err(dbenv, errno, "pthread_join failed");
+ goto err;
+ }
+
+ if (0) {
+err: rval = EXIT_FAILURE;
+ }
+ return (rval);
+}
+
+/*
+ * Our only job is to check that the master is valid and if it's not
+ * for an extended period, to trigger an election. We do two phases.
+ * If we do not have a master, first we send out a request for a master
+ * to identify itself (that would be a call to rep_start). If that fails,
+ * we trigger an election.
+ */
+static void *
+check_loop(args)
+ void *args;
+{
+ DB_ENV *dbenv;
+ DBT dbt;
+ checkloop_args *cargs;
+ int count, n, pri;
+ machtab_t *machtab;
+ u_int32_t timeout;
+
+ cargs = (checkloop_args *)args;
+ dbenv = cargs->dbenv;
+ machtab = cargs->machtab;
+
+#define IDLE_INTERVAL 1
+
+ count = 0;
+ while (master_eid == DB_EID_INVALID) {
+ /*
+ * Call either rep_start or rep_elect depending on if
+ * count is 0 or 1.
+ */
+
+ if (count == 0) {
+ memset(&dbt, 0, sizeof(dbt));
+ dbt.data = myaddr;
+ dbt.size = strlen(myaddr) + 1;
+ (void)dbenv->rep_start(dbenv, &dbt, DB_REP_CLIENT);
+ count = 1;
+ } else {
+ machtab_parm(machtab, &n, &pri, &timeout);
+ (void)dbenv->rep_elect(dbenv,
+ n, pri, timeout, &master_eid);
+ count = 0;
+ }
+ sleep(IDLE_INTERVAL);
+ }
+
+ return ((void *)EXIT_SUCCESS);
+}
+
+static void *
+display_loop(args)
+ void *args;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ DBC *dbc;
+ const char *progname;
+ disploop_args *dargs;
+ int ret, rval;
+
+ dargs = (disploop_args *)args;
+ progname = dargs->progname;
+ dbenv = dargs->dbenv;
+
+ dbc = NULL;
+ dbp = NULL;
+
+ for (;;) {
+ /* If we become master, shut this loop off. */
+ if (master_eid == SELF_EID)
+ break;
+
+ if (dbp == NULL) {
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ return ((void *)EXIT_FAILURE);
+ }
+
+ if ((ret = dbp->open(dbp, NULL,
+ DATABASE, NULL, DB_BTREE, DB_RDONLY, 0)) != 0) {
+ if (ret == ENOENT) {
+ printf(
+ "No stock database yet available.\n");
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ dbenv->err(dbenv,
+ ret, "DB->close");
+ goto err;
+ }
+ dbp = NULL;
+ sleep(SLEEPTIME);
+ continue;
+ }
+ dbenv->err(dbenv, ret, "DB->open");
+ goto err;
+ }
+ }
+
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->cursor");
+ goto err;
+ }
+
+ if ((ret = print_stocks(dbc)) != 0) {
+ dbenv->err(dbenv, ret, "database traversal failed");
+ goto err;
+ }
+
+ if ((ret = dbc->c_close(dbc)) != 0) {
+ dbenv->err(dbenv, ret, "DB->close");
+ goto err;
+ }
+
+ dbc = NULL;
+
+ sleep(SLEEPTIME);
+ }
+
+ rval = EXIT_SUCCESS;
+
+ if (0) {
+err: rval = EXIT_FAILURE;
+ }
+
+ if (dbc != NULL && (ret = dbc->c_close(dbc)) != 0) {
+ dbenv->err(dbenv, ret, "DB->close");
+ rval = EXIT_FAILURE;
+ }
+
+ if (dbp != NULL && (ret = dbp->close(dbp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->close");
+ return ((void *)EXIT_FAILURE);
+ }
+
+ return ((void *)rval);
+}
+
+static int
+print_stocks(dbc)
+ DBC *dbc;
+{
+ DBT key, data;
+#define MAXKEYSIZE 10
+#define MAXDATASIZE 20
+ char keybuf[MAXKEYSIZE + 1], databuf[MAXDATASIZE + 1];
+ int ret;
+ u_int32_t keysize, datasize;
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ printf("\tSymbol\tPrice\n");
+ printf("\t======\t=====\n");
+
+ for (ret = dbc->c_get(dbc, &key, &data, DB_FIRST);
+ ret == 0;
+ ret = dbc->c_get(dbc, &key, &data, DB_NEXT)) {
+ keysize = key.size > MAXKEYSIZE ? MAXKEYSIZE : key.size;
+ memcpy(keybuf, key.data, keysize);
+ keybuf[keysize] = '\0';
+
+ datasize = data.size >= MAXDATASIZE ? MAXDATASIZE : data.size;
+ memcpy(databuf, data.data, datasize);
+ databuf[datasize] = '\0';
+
+ printf("\t%s\t%s\n", keybuf, databuf);
+ }
+ printf("\n");
+ return (ret == DB_NOTFOUND ? 0 : ret);
+}
diff --git a/libdb/examples_c/ex_repquote/ex_rq_main.c b/libdb/examples_c/ex_repquote/ex_rq_main.c
new file mode 100644
index 0000000..3980361
--- /dev/null
+++ b/libdb/examples_c/ex_repquote/ex_rq_main.c
@@ -0,0 +1,303 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+#include <pthread.h>
+
+#include <errno.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#include "ex_repquote.h"
+
+/*
+ * Process globals (we could put these in the machtab I suppose.
+ */
+int master_eid;
+char *myaddr;
+
+static int env_init __P((const char *, const char *, DB_ENV **, machtab_t *,
+ u_int32_t));
+static void usage __P((const char *));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DB_ENV *dbenv;
+ DBT local;
+ enum { MASTER, CLIENT, UNKNOWN } whoami;
+ all_args aa;
+ connect_args ca;
+ machtab_t *machtab;
+ pthread_t all_thr, conn_thr;
+ repsite_t site, *sitep, self, *selfp;
+ struct sigaction sigact;
+ int maxsites, nsites, ret, priority, totalsites;
+ char *c, ch;
+ const char *home, *progname;
+ void *astatus, *cstatus;
+
+ master_eid = DB_EID_INVALID;
+
+ dbenv = NULL;
+ whoami = UNKNOWN;
+ machtab = NULL;
+ selfp = sitep = NULL;
+ maxsites = nsites = ret = totalsites = 0;
+ priority = 100;
+ home = "TESTDIR";
+ progname = "ex_repquote";
+
+ while ((ch = getopt(argc, argv, "Ch:Mm:n:o:p:")) != EOF)
+ switch (ch) {
+ case 'M':
+ whoami = MASTER;
+ master_eid = SELF_EID;
+ break;
+ case 'C':
+ whoami = CLIENT;
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'm':
+ if ((myaddr = strdup(optarg)) == NULL) {
+ fprintf(stderr,
+ "System error %s\n", strerror(errno));
+ goto err;
+ }
+ self.host = optarg;
+ self.host = strtok(self.host, ":");
+ if ((c = strtok(NULL, ":")) == NULL) {
+ fprintf(stderr, "Bad host specification.\n");
+ goto err;
+ }
+ self.port = atoi(c);
+ selfp = &self;
+ break;
+ case 'n':
+ totalsites = atoi(optarg);
+ break;
+ case 'o':
+ site.host = optarg;
+ site.host = strtok(site.host, ":");
+ if ((c = strtok(NULL, ":")) == NULL) {
+ fprintf(stderr, "Bad host specification.\n");
+ goto err;
+ }
+ site.port = atoi(c);
+ if (sitep == NULL || nsites >= maxsites) {
+ maxsites = maxsites == 0 ? 10 : 2 * maxsites;
+ if ((sitep = realloc(sitep,
+ maxsites * sizeof(repsite_t))) == NULL) {
+ fprintf(stderr, "System error %s\n",
+ strerror(errno));
+ goto err;
+ }
+ }
+ sitep[nsites++] = site;
+ break;
+ case 'p':
+ priority = atoi(optarg);
+ break;
+ case '?':
+ default:
+ usage(progname);
+ }
+
+ /* Error check command line. */
+ if (whoami == UNKNOWN) {
+ fprintf(stderr, "Must specify -M or -C.\n");
+ goto err;
+ }
+
+ if (selfp == NULL)
+ usage(progname);
+
+ if (home == NULL)
+ usage(progname);
+
+ /*
+ * Turn off SIGPIPE so that we don't kill processes when they
+ * happen to lose a connection at the wrong time.
+ */
+ memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_handler = SIG_IGN;
+ if ((ret = sigaction(SIGPIPE, &sigact, NULL)) != 0) {
+ fprintf(stderr,
+ "Unable to turn off SIGPIPE: %s\n", strerror(ret));
+ goto err;
+ }
+
+ /*
+ * We are hardcoding priorities here that all clients have the
+ * same priority except for a designated master who gets a higher
+ * priority.
+ */
+ if ((ret =
+ machtab_init(&machtab, priority, totalsites)) != 0)
+ goto err;
+
+ /*
+ * We can know open our environment, although we're not ready to
+ * begin replicating. However, we want to have a dbenv around
+ * so that we can send it into any of our message handlers.
+ */
+ if ((ret = env_init(progname, home, &dbenv, machtab, DB_RECOVER)) != 0)
+ goto err;
+
+ /*
+ * Now sets up comm infrastructure. There are two phases. First,
+ * we open our port for listening for incoming connections. Then
+ * we attempt to connect to every host we know about.
+ */
+
+ ca.dbenv = dbenv;
+ ca.home = home;
+ ca.progname = progname;
+ ca.machtab = machtab;
+ ca.port = selfp->port;
+ if ((ret = pthread_create(&conn_thr, NULL, connect_thread, &ca)) != 0)
+ goto err;
+
+ aa.dbenv = dbenv;
+ aa.progname = progname;
+ aa.home = home;
+ aa.machtab = machtab;
+ aa.sites = sitep;
+ aa.nsites = nsites;
+ if ((ret = pthread_create(&all_thr, NULL, connect_all, &aa)) != 0)
+ goto err;
+
+ /*
+ * We have now got the entire communication infrastructure set up.
+ * It's time to declare ourselves to be a client or master.
+ */
+ if (whoami == MASTER) {
+ if ((ret = dbenv->rep_start(dbenv, NULL, DB_REP_MASTER)) != 0) {
+ dbenv->err(dbenv, ret, "dbenv->rep_start failed");
+ goto err;
+ }
+ if ((ret = domaster(dbenv, progname)) != 0) {
+ dbenv->err(dbenv, ret, "Master failed");
+ goto err;
+ }
+ } else {
+ memset(&local, 0, sizeof(local));
+ local.data = myaddr;
+ local.size = strlen(myaddr) + 1;
+ if ((ret =
+ dbenv->rep_start(dbenv, &local, DB_REP_CLIENT)) != 0) {
+ dbenv->err(dbenv, ret, "dbenv->rep_start failed");
+ goto err;
+ }
+ /* Sleep to give ourselves a minute to find a master. */
+ sleep(5);
+ if ((ret = doclient(dbenv, progname, machtab)) != 0) {
+ dbenv->err(dbenv, ret, "Client failed");
+ goto err;
+ }
+
+ }
+
+ /* Wait on the connection threads. */
+ if (pthread_join(all_thr, &astatus) || pthread_join(conn_thr, &cstatus))
+ ret = errno;
+ if (ret == 0 &&
+ ((int)astatus != EXIT_SUCCESS || (int)cstatus != EXIT_SUCCESS))
+ ret = -1;
+
+err: if (machtab != NULL)
+ free(machtab);
+ if (dbenv != NULL)
+ (void)dbenv->close(dbenv, 0);
+ return (ret);
+}
+
+/*
+ * In this application, we specify all communication via the command line.
+ * In a real application, we would expect that information about the other
+ * sites in the system would be maintained in some sort of configuration
+ * file. The critical part of this interface is that we assume at startup
+ * that we can find out 1) what host/port we wish to listen on for connections,
+ * 2) a (possibly empty) list of other sites we should attempt to connect to.
+ * 3) whether we are a master or client (if we don't know, we should come up
+ * as a client and see if there is a master out there) and 4) what our
+ * Berkeley DB home environment is.
+ *
+ * These pieces of information are expressed by the following flags.
+ * -m host:port (required; m stands for me)
+ * -o host:port (optional; o stands for other; any number of these may be
+ * specified)
+ * -[MC] M for master/C for client
+ * -h home directory
+ * -n nsites (optional; number of sites in replication group; defaults to 0
+ * in which case we try to dynamically compute the number of sites in
+ * the replication group.)
+ * -p priority (optional: defaults to 100)
+ */
+static void
+usage(progname)
+ const char *progname;
+{
+ fprintf(stderr, "usage: %s ", progname);
+ fprintf(stderr, "[-CM][-h home][-o host:port][-m host:port]%s",
+ "[-n nsites][-p priority]\n");
+ exit(EXIT_FAILURE);
+}
+
+/* Open and configure an environment. */
+int
+env_init(progname, home, dbenvp, machtab, flags)
+ const char *progname, *home;
+ DB_ENV **dbenvp;
+ machtab_t *machtab;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret;
+ char *prefix;
+
+ if ((prefix = malloc(strlen(progname) + 2)) == NULL) {
+ fprintf(stderr,
+ "%s: System error: %s\n", progname, strerror(errno));
+ return (errno);
+ }
+ sprintf(prefix, "%s:", progname);
+
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: env create failed: %s\n",
+ progname, db_strerror(ret));
+ return (ret);
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, prefix);
+ /* (void)dbenv->set_verbose(dbenv, DB_VERB_REPLICATION, 1); */
+ (void)dbenv->set_cachesize(dbenv, 0, CACHESIZE, 0);
+ /* (void)dbenv->set_flags(dbenv, DB_TXN_NOSYNC, 1); */
+
+ dbenv->app_private = machtab;
+ (void)dbenv->set_rep_transport(dbenv, SELF_EID, quote_send);
+
+ flags |= DB_CREATE | DB_THREAD |
+ DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN;
+
+ ret = dbenv->open(dbenv, home, flags, 0);
+
+ *dbenvp = dbenv;
+ return (ret);
+}
diff --git a/libdb/examples_c/ex_repquote/ex_rq_master.c b/libdb/examples_c/ex_repquote/ex_rq_master.c
new file mode 100644
index 0000000..080e372
--- /dev/null
+++ b/libdb/examples_c/ex_repquote/ex_rq_master.c
@@ -0,0 +1,165 @@
+/*-
+ * #include <pthread.h>
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#include "ex_repquote.h"
+
+static void *master_loop __P((void *));
+
+#define BUFSIZE 1024
+
+int
+domaster(dbenv, progname)
+ DB_ENV *dbenv;
+ const char *progname;
+{
+ int ret, t_ret;
+ pthread_t interface_thr;
+ pthread_attr_t attr;
+
+ COMPQUIET(progname, NULL);
+
+ /* Spawn off a thread to handle the basic master interface. */
+ if ((ret = pthread_attr_init(&attr)) != 0 &&
+ (ret = pthread_attr_setdetachstate(&attr,
+ PTHREAD_CREATE_DETACHED)) != 0)
+ goto err;
+
+ if ((ret = pthread_create(&interface_thr,
+ &attr, master_loop, (void *)dbenv)) != 0)
+ goto err;
+
+err: if ((t_ret = pthread_attr_destroy(&attr)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+static void *
+master_loop(dbenvv)
+ void *dbenvv;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DBT key, data;
+ char buf[BUFSIZE], *rbuf;
+ int ret;
+
+ dbp = NULL;
+ txn = NULL;
+
+ dbenv = (DB_ENV *)dbenvv;
+ /*
+ * Check if the database exists and if it verifies cleanly.
+ * If it does, run with it; else recreate it and go. Note
+ * that we have to verify outside of the environment.
+ */
+#ifdef NOTDEF
+ if ((ret = db_create(&dbp, NULL, 0)) != 0)
+ return (ret);
+ if ((ret = dbp->verify(dbp, DATABASE, NULL, NULL, 0)) != 0) {
+ if ((ret = dbp->remove(dbp, DATABASE, NULL, 0)) != 0 &&
+ ret != DB_NOTFOUND && ret != ENOENT)
+ return (ret);
+#endif
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return ((void *)ret);
+
+ if ((ret = dbenv->txn_begin(dbenv, NULL, &txn, 0)) != 0)
+ goto err;
+ if ((ret = dbp->open(dbp, txn, DATABASE,
+ NULL, DB_BTREE, DB_CREATE /* | DB_THREAD */, 0)) != 0)
+ goto err;
+ ret = txn->commit(txn, 0);
+ txn = NULL;
+ if (ret != 0) {
+ dbp = NULL;
+ goto err;
+ }
+
+#ifdef NOTDEF
+ } else {
+ /* Reopen in the environment. */
+ if ((ret = dbp->close(dbp, 0)) != 0)
+ return (ret);
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return (ret);
+ if ((ret = dbp->open(dbp,
+ DATABASE, NULL, DB_UNKNOWN, DB_THREAD, 0)) != 0)
+ goto err;
+ }
+#endif
+ /*
+ * XXX
+ * It would probably be kind of cool to do this in Tcl and
+ * have a nice GUI. It would also be cool to be independently
+ * wealthy.
+ */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ for (;;) {
+ printf("QUOTESERVER> ");
+ fflush(stdout);
+
+ if (fgets(buf, sizeof(buf), stdin) == NULL)
+ break;
+ (void)strtok(&buf[0], " \t\n");
+ rbuf = strtok(NULL, " \t\n");
+ if (rbuf == NULL || rbuf[0] == '\0') {
+ if (strncmp(buf, "exit", 4) == 0 ||
+ strncmp(buf, "quit", 4) == 0)
+ break;
+ dbenv->errx(dbenv, "Format: TICKER VALUE");
+ continue;
+ }
+
+ key.data = buf;
+ key.size = strlen(buf);
+
+ data.data = rbuf;
+ data.size = strlen(rbuf);
+
+ if ((ret = dbenv->txn_begin(dbenv, NULL, &txn, 0)) != 0)
+ goto err;
+ switch (ret =
+ dbp->put(dbp, txn, &key, &data, 0)) {
+ case 0:
+ break;
+ default:
+ dbp->err(dbp, ret, "DB->put");
+ if (ret != DB_KEYEXIST)
+ goto err;
+ break;
+ }
+ ret = txn->commit(txn, 0);
+ txn = NULL;
+ if (ret != 0)
+ goto err;
+ }
+
+err: if (txn != NULL)
+ (void)txn->abort(txn);
+
+ if (dbp != NULL)
+ (void)dbp->close(dbp, DB_NOSYNC);
+
+ return ((void *)ret);
+}
diff --git a/libdb/examples_c/ex_repquote/ex_rq_net.c b/libdb/examples_c/ex_repquote/ex_rq_net.c
new file mode 100644
index 0000000..bfadf0c
--- /dev/null
+++ b/libdb/examples_c/ex_repquote/ex_rq_net.c
@@ -0,0 +1,692 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+
+#include <netinet/in.h>
+#include <sys/socket.h>
+#include <sys/wait.h>
+
+#include <assert.h>
+#include <errno.h>
+#include <netdb.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+#include <dbinc/queue.h> /* !!!: for the LIST_XXX macros. */
+
+#include "ex_repquote.h"
+
+int machtab_add __P((machtab_t *, int, u_int32_t, int, int *));
+ssize_t readn __P((int, void *, size_t));
+
+/*
+ * This file defines the communication infrastructure for the ex_repquote
+ * sample application.
+ *
+ * This application uses TCP/IP for its communication. In an N-site
+ * replication group, this means that there are N * N communication
+ * channels so that every site can communicate with every other site
+ * (this allows elections to be held when the master fails). We do
+ * not require that anyone know about all sites when the application
+ * starts up. In order to communicate, the application should know
+ * about someone, else it has no idea how to ever get in the game.
+ *
+ * Communication is handled via a number of different threads. These
+ * thread functions are implemented in rep_util.c In this file, we
+ * define the data structures that maintain the state that describes
+ * the comm infrastructure, the functions that manipulates this state
+ * and the routines used to actually send and receive data over the
+ * sockets.
+ */
+
+/*
+ * The communication infrastructure is represented by a machine table,
+ * machtab_t, which is essentially a mutex-protected linked list of members
+ * of the group. The machtab also contains the parameters that are needed
+ * to call for an election. We hardwire values for these parameters in the
+ * init function, but these could be set via some configuration setup in a
+ * real application. We reserve the machine-id 1 to refer to ourselves and
+ * make the machine-id 0 be invalid.
+ */
+
+#define MACHID_INVALID 0
+#define MACHID_SELF 1
+
+struct __machtab {
+ LIST_HEAD(__machlist, __member) machlist;
+ int nextid;
+ pthread_mutex_t mtmutex;
+ u_int32_t timeout_time;
+ int current;
+ int max;
+ int nsites;
+ int priority;
+};
+
+/* Data structure that describes each entry in the machtab. */
+struct __member {
+ u_int32_t hostaddr; /* Host IP address. */
+ int port; /* Port number. */
+ int eid; /* Application-specific machine id. */
+ int fd; /* File descriptor for the socket. */
+ LIST_ENTRY(__member) links;
+ /* For linked list of all members we know of. */
+};
+
+static int quote_send_broadcast __P((machtab_t *,
+ const DBT *, const DBT *, u_int32_t));
+static int quote_send_one __P((const DBT *, const DBT *, int, u_int32_t));
+
+/*
+ * machtab_init --
+ * Initialize the machine ID table.
+ * XXX Right now we treat the number of sites as the maximum
+ * number we've ever had on the list at one time. We probably
+ * want to make that smarter.
+ */
+int
+machtab_init(machtabp, pri, nsites)
+ machtab_t **machtabp;
+ int pri, nsites;
+{
+ int ret;
+ machtab_t *machtab;
+
+ if ((machtab = malloc(sizeof(machtab_t))) == NULL)
+ return (ENOMEM);
+
+ LIST_INIT(&machtab->machlist);
+
+ /* Reserve eid's 0 and 1. */
+ machtab->nextid = 2;
+ machtab->timeout_time = 2 * 1000000; /* 2 seconds. */
+ machtab->current = machtab->max = 0;
+ machtab->priority = pri;
+ machtab->nsites = nsites;
+
+ ret = pthread_mutex_init(&machtab->mtmutex, NULL);
+
+ *machtabp = machtab;
+
+ return (ret);
+}
+
+/*
+ * machtab_add --
+ * Add a file descriptor to the table of machines, returning
+ * a new machine ID.
+ */
+int
+machtab_add(machtab, fd, hostaddr, port, idp)
+ machtab_t *machtab;
+ int fd;
+ u_int32_t hostaddr;
+ int port, *idp;
+{
+ int ret;
+ member_t *m, *member;
+
+ if ((member = malloc(sizeof(member_t))) == NULL)
+ return (ENOMEM);
+
+ member->fd = fd;
+ member->hostaddr = hostaddr;
+ member->port = port;
+
+ if ((ret = pthread_mutex_lock(&machtab->mtmutex)) != 0)
+ return (ret);
+
+ for (m = LIST_FIRST(&machtab->machlist);
+ m != NULL; m = LIST_NEXT(m, links))
+ if (m->hostaddr == hostaddr && m->port == port)
+ break;
+
+ if (m == NULL) {
+ member->eid = machtab->nextid++;
+ LIST_INSERT_HEAD(&machtab->machlist, member, links);
+ } else
+ member->eid = m->eid;
+
+ ret = pthread_mutex_unlock(&machtab->mtmutex);
+
+ if (idp != NULL)
+ *idp = member->eid;
+
+ if (m == NULL) {
+ if (++machtab->current > machtab->max)
+ machtab->max = machtab->current;
+ } else {
+ free(member);
+ ret = EEXIST;
+ }
+ return (ret);
+}
+
+/*
+ * machtab_getinfo --
+ * Return host and port information for a particular machine id.
+ */
+int
+machtab_getinfo(machtab, eid, hostp, portp)
+ machtab_t *machtab;
+ int eid;
+ u_int32_t *hostp;
+ int *portp;
+{
+ int ret;
+ member_t *member;
+
+ if ((ret = pthread_mutex_lock(&machtab->mtmutex)) != 0)
+ return (ret);
+
+ for (member = LIST_FIRST(&machtab->machlist);
+ member != NULL;
+ member = LIST_NEXT(member, links))
+ if (member->eid == eid) {
+ *hostp = member->hostaddr;
+ *portp = member->port;
+ break;
+ }
+
+ if ((ret = pthread_mutex_unlock(&machtab->mtmutex)) != 0)
+ return (ret);
+
+ return (member != NULL ? 0 : EINVAL);
+}
+
+/*
+ * machtab_rem --
+ * Remove a mapping from the table of machines. Lock indicates
+ * whether we need to lock the machtab or not (0 indicates we do not
+ * need to lock; non-zero indicates that we do need to lock).
+ */
+int
+machtab_rem(machtab, eid, lock)
+ machtab_t *machtab;
+ int eid;
+ int lock;
+{
+ int found, ret;
+ member_t *member;
+
+ ret = 0;
+ if (lock && (ret = pthread_mutex_lock(&machtab->mtmutex)) != 0)
+ return (ret);
+
+ for (found = 0, member = LIST_FIRST(&machtab->machlist);
+ member != NULL;
+ member = LIST_NEXT(member, links))
+ if (member->eid == eid) {
+ found = 1;
+ LIST_REMOVE(member, links);
+ (void)close(member->fd);
+ free(member);
+ machtab->current--;
+ break;
+ }
+
+ if (LIST_FIRST(&machtab->machlist) == NULL)
+ machtab->nextid = 2;
+
+ if (lock)
+ ret = pthread_mutex_unlock(&machtab->mtmutex);
+
+ return (ret);
+}
+
+void
+machtab_parm(machtab, nump, prip, timeoutp)
+ machtab_t *machtab;
+ int *nump, *prip;
+ u_int32_t *timeoutp;
+{
+ if (machtab->nsites == 0)
+ *nump = machtab->max;
+ else
+ *nump = machtab->nsites;
+ *prip = machtab->priority;
+ *timeoutp = machtab->timeout_time;
+}
+
+/*
+ * listen_socket_init --
+ * Initialize a socket for listening on the specified port. Returns
+ * a file descriptor for the socket, ready for an accept() call
+ * in a thread that we're happy to let block.
+ */
+int
+listen_socket_init(progname, port)
+ const char *progname;
+ int port;
+{
+ int s;
+ struct protoent *proto;
+ struct sockaddr_in si;
+
+ if ((proto = getprotobyname("tcp")) == NULL)
+ return (-1);
+
+ if ((s = socket(AF_INET, SOCK_STREAM, proto->p_proto)) < 0)
+ return (-1);
+
+ memset(&si, 0, sizeof(si));
+ si.sin_family = AF_INET;
+ si.sin_addr.s_addr = htonl(INADDR_ANY);
+ si.sin_port = htons(port);
+
+ if (bind(s, (struct sockaddr *)&si, sizeof(si)) != 0)
+ goto err;
+
+ if (listen(s, 5) != 0)
+ goto err;
+
+ return (s);
+
+err: fprintf(stderr, "%s: %s", progname, strerror(errno));
+ close (s);
+ return (-1);
+}
+
+/*
+ * listen_socket_accept --
+ * Accept a connection on a socket. This is essentially just a wrapper
+ * for accept(3).
+ */
+int
+listen_socket_accept(machtab, progname, s, eidp)
+ machtab_t *machtab;
+ const char *progname;
+ int s, *eidp;
+{
+ struct sockaddr_in si;
+ int si_len;
+ int host, ns, port, ret;
+
+ COMPQUIET(progname, NULL);
+
+wait: memset(&si, 0, sizeof(si));
+ si_len = sizeof(si);
+ ns = accept(s, (struct sockaddr *)&si, &si_len);
+ host = ntohl(si.sin_addr.s_addr);
+ port = ntohs(si.sin_port);
+ ret = machtab_add(machtab, ns, host, port, eidp);
+ if (ret == EEXIST) {
+ close(ns);
+ goto wait;
+ } else if (ret != 0)
+ goto err;
+
+ return (ns);
+
+err: close(ns);
+ return (-1);
+}
+
+/*
+ * get_accepted_socket --
+ * Listen on the specified port, and return a file descriptor
+ * when we have accepted a connection on it.
+ */
+int
+get_accepted_socket(progname, port)
+ const char *progname;
+ int port;
+{
+ struct protoent *proto;
+ struct sockaddr_in si;
+ int si_len;
+ int s, ns;
+
+ if ((proto = getprotobyname("tcp")) == NULL)
+ return (-1);
+
+ if ((s = socket(AF_INET, SOCK_STREAM, proto->p_proto)) < 0)
+ return (-1);
+
+ memset(&si, 0, sizeof(si));
+ si.sin_family = AF_INET;
+ si.sin_addr.s_addr = htonl(INADDR_ANY);
+ si.sin_port = htons(port);
+
+ if (bind(s, (struct sockaddr *)&si, sizeof(si)) != 0)
+ goto err;
+
+ if (listen(s, 5) != 0)
+ goto err;
+
+ memset(&si, 0, sizeof(si));
+ si_len = sizeof(si);
+ ns = accept(s, (struct sockaddr *)&si, &si_len);
+
+ return (ns);
+
+err: fprintf(stderr, "%s: %s", progname, strerror(errno));
+ close (s);
+ return (-1);
+}
+
+/*
+ * get_connected_socket --
+ * Connect to the specified port of the specified remote machine,
+ * and return a file descriptor when we have accepted a connection on it.
+ * Add this connection to the machtab. If we already have a connection
+ * open to this machine, then don't create another one, return the eid
+ * of the connection (in *eidp) and set is_open to 1. Return 0.
+ */
+int
+get_connected_socket(machtab, progname, remotehost, port, is_open, eidp)
+ machtab_t *machtab;
+ const char *progname, *remotehost;
+ int port, *is_open, *eidp;
+{
+ int ret, s;
+ struct hostent *hp;
+ struct protoent *proto;
+ struct sockaddr_in si;
+ u_int32_t addr;
+
+ *is_open = 0;
+
+ if ((proto = getprotobyname("tcp")) == NULL)
+ return (-1);
+
+ if ((hp = gethostbyname(remotehost)) == NULL) {
+ fprintf(stderr, "%s: host not found: %s\n", progname,
+ strerror(errno));
+ return (-1);
+ }
+
+ if ((s = socket(AF_INET, SOCK_STREAM, proto->p_proto)) < 0)
+ return (-1);
+ memset(&si, 0, sizeof(si));
+ memcpy((char *)&si.sin_addr, hp->h_addr, hp->h_length);
+ addr = ntohl(si.sin_addr.s_addr);
+ ret = machtab_add(machtab, s, addr, port, eidp);
+ if (ret == EEXIST) {
+ *is_open = 1;
+ close(s);
+ return (0);
+ } else if (ret != 0) {
+ close (s);
+ return (-1);
+ }
+
+ si.sin_family = AF_INET;
+ si.sin_port = htons(port);
+ if (connect(s, (struct sockaddr *)&si, sizeof(si)) < 0) {
+ fprintf(stderr, "%s: connection failed: %s",
+ progname, strerror(errno));
+ (void)machtab_rem(machtab, *eidp, 1);
+ return (-1);
+ }
+
+ return (s);
+}
+
+/*
+ * get_next_message --
+ * Read a single message from the specified file descriptor, and
+ * return it in the format used by rep functions (two DBTs and a type).
+ *
+ * This function is called in a loop by both clients and masters, and
+ * the resulting DBTs are manually dispatched to DB_ENV->rep_process_message().
+ */
+int
+get_next_message(fd, rec, control)
+ int fd;
+ DBT *rec, *control;
+{
+ size_t nr;
+ u_int32_t rsize, csize;
+ u_int8_t *recbuf, *controlbuf;
+
+ /*
+ * The protocol we use on the wire is dead simple:
+ *
+ * 4 bytes - rec->size
+ * (# read above) - rec->data
+ * 4 bytes - control->size
+ * (# read above) - control->data
+ */
+
+ /* Read rec->size. */
+ nr = readn(fd, &rsize, 4);
+ if (nr != 4)
+ return (1);
+
+ /* Read the record itself. */
+ if (rsize > 0) {
+ if (rec->size < rsize)
+ rec->data = realloc(rec->data, rsize);
+ recbuf = rec->data;
+ nr = readn(fd, recbuf, rsize);
+ } else {
+ if (rec->data != NULL)
+ free(rec->data);
+ rec->data = NULL;
+ }
+ rec->size = rsize;
+
+ /* Read control->size. */
+ nr = readn(fd, &csize, 4);
+ if (nr != 4)
+ return (1);
+
+ /* Read the control struct itself. */
+ if (csize > 0) {
+ controlbuf = control->data;
+ if (control->size < csize)
+ controlbuf = realloc(controlbuf, csize);
+ nr = readn(fd, controlbuf, csize);
+ if (nr != csize)
+ return (1);
+ } else {
+ if (control->data != NULL)
+ free(control->data);
+ controlbuf = NULL;
+ }
+ control->data = controlbuf;
+ control->size = csize;
+
+ return (0);
+}
+
+/*
+ * readn --
+ * Read a full n characters from a file descriptor, unless we get an error
+ * or EOF.
+ */
+ssize_t
+readn(fd, vptr, n)
+ int fd;
+ void *vptr;
+ size_t n;
+{
+ size_t nleft;
+ ssize_t nread;
+ char *ptr;
+
+ ptr = vptr;
+ nleft = n;
+ while (nleft > 0) {
+ if ( (nread = read(fd, ptr, nleft)) < 0) {
+ /*
+ * Call read() again on interrupted system call;
+ * on other errors, bail.
+ */
+ if (errno == EINTR)
+ nread = 0;
+ else
+ return (-1);
+ } else if (nread == 0)
+ break; /* EOF */
+
+ nleft -= nread;
+ ptr += nread;
+ }
+
+ return (n - nleft);
+}
+
+/*
+ * quote_send --
+ * The f_send function for DB_ENV->set_rep_transport.
+ */
+int
+quote_send(dbenv, control, rec, eid, flags)
+ DB_ENV *dbenv;
+ const DBT *control, *rec;
+ int eid;
+ u_int32_t flags;
+{
+ int fd, n, ret, t_ret;
+ machtab_t *machtab;
+ member_t *m;
+
+ machtab = (machtab_t *)dbenv->app_private;
+
+ if (eid == DB_EID_BROADCAST) {
+ /*
+ * Right now, we do not require successful transmission.
+ * I'd like to move this requiring at least one successful
+ * transmission on PERMANENT requests.
+ */
+ n = quote_send_broadcast(machtab, rec, control, flags);
+ if (n < 0 /*|| (n == 0 && LF_ISSET(DB_REP_PERMANENT))*/)
+ return (DB_REP_UNAVAIL);
+ return (0);
+ }
+
+ if ((ret = pthread_mutex_lock(&machtab->mtmutex)) != 0)
+ return (ret);
+
+ fd = 0;
+ for (m = LIST_FIRST(&machtab->machlist); m != NULL;
+ m = LIST_NEXT(m, links)) {
+ if (m->eid == eid) {
+ fd = m->fd;
+ break;
+ }
+ }
+
+ if (fd == 0) {
+ dbenv->err(dbenv, DB_REP_UNAVAIL,
+ "quote_send: cannot find machine ID %d", eid);
+ return (DB_REP_UNAVAIL);
+ }
+
+ ret = quote_send_one(rec, control, fd, flags);
+
+ if ((t_ret = (pthread_mutex_unlock(&machtab->mtmutex))) != 0 &&
+ ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * quote_send_broadcast --
+ * Send a message to everybody.
+ * Returns the number of sites to which this message was successfully
+ * communicated. A -1 indicates a fatal error.
+ */
+static int
+quote_send_broadcast(machtab, rec, control, flags)
+ machtab_t *machtab;
+ const DBT *rec, *control;
+ u_int32_t flags;
+{
+ int ret, sent;
+ member_t *m, *next;
+
+ if ((ret = pthread_mutex_lock(&machtab->mtmutex)) != 0)
+ return (0);
+
+ sent = 0;
+ for (m = LIST_FIRST(&machtab->machlist); m != NULL; m = next) {
+ next = LIST_NEXT(m, links);
+ if ((ret = quote_send_one(rec, control, m->fd, flags)) != 0) {
+ (void)machtab_rem(machtab, m->eid, 0);
+ } else
+ sent++;
+ }
+
+ if (pthread_mutex_unlock(&machtab->mtmutex) != 0)
+ return (-1);
+
+ return (sent);
+}
+
+/*
+ * quote_send_one --
+ * Send a message to a single machine, given that machine's file
+ * descriptor.
+ *
+ * !!!
+ * Note that the machtab mutex should be held through this call.
+ * It doubles as a synchronizer to make sure that two threads don't
+ * intersperse writes that are part of two single messages.
+ */
+static int
+quote_send_one(rec, control, fd, flags)
+ const DBT *rec, *control;
+ int fd;
+ u_int32_t flags;
+
+{
+ int retry;
+ ssize_t bytes_left, nw;
+ u_int8_t *wp;
+
+ COMPQUIET(flags, 0);
+
+ /*
+ * The protocol is simply: write rec->size, write rec->data,
+ * write control->size, write control->data.
+ */
+ nw = write(fd, &rec->size, 4);
+ if (nw != 4)
+ return (DB_REP_UNAVAIL);
+
+ if (rec->size > 0) {
+ nw = write(fd, rec->data, rec->size);
+ if (nw < 0)
+ return (DB_REP_UNAVAIL);
+ if (nw != (ssize_t)rec->size) {
+ /* Try a couple of times to finish the write. */
+ wp = (u_int8_t *)rec->data + nw;
+ bytes_left = rec->size - nw;
+ for (retry = 0; bytes_left > 0 && retry < 3; retry++) {
+ nw = write(fd, wp, bytes_left);
+ if (nw < 0)
+ return (DB_REP_UNAVAIL);
+ bytes_left -= nw;
+ wp += nw;
+ }
+ if (bytes_left > 0)
+ return (DB_REP_UNAVAIL);
+ }
+ }
+
+ nw = write(fd, &control->size, 4);
+ if (nw != 4)
+ return (DB_REP_UNAVAIL);
+ if (control->size > 0) {
+ nw = write(fd, control->data, control->size);
+ if (nw != (ssize_t)control->size)
+ return (DB_REP_UNAVAIL);
+ }
+ return (0);
+}
diff --git a/libdb/examples_c/ex_repquote/ex_rq_util.c b/libdb/examples_c/ex_repquote/ex_rq_util.c
new file mode 100644
index 0000000..40ef235
--- /dev/null
+++ b/libdb/examples_c/ex_repquote/ex_rq_util.c
@@ -0,0 +1,412 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#include "ex_repquote.h"
+
+static int connect_site __P((DB_ENV *, machtab_t *, const char *,
+ repsite_t *, int *, int *));
+void * elect_thread __P((void *));
+
+typedef struct {
+ DB_ENV *dbenv;
+ machtab_t *machtab;
+} elect_args;
+
+typedef struct {
+ DB_ENV *dbenv;
+ const char *progname;
+ const char *home;
+ int fd;
+ u_int32_t eid;
+ machtab_t *tab;
+} hm_loop_args;
+
+/*
+ * This is a generic message handling loop that is used both by the
+ * master to accept messages from a client as well as by clients
+ * to communicate with other clients.
+ */
+void *
+hm_loop(args)
+ void *args;
+{
+ DB_ENV *dbenv;
+ DBT rec, control;
+ const char *c, *home, *progname;
+ int fd, eid, n, newm;
+ int open, pri, r, ret, t_ret, tmpid;
+ elect_args *ea;
+ hm_loop_args *ha;
+ machtab_t *tab;
+ pthread_t elect_thr;
+ repsite_t self;
+ u_int32_t timeout;
+ void *status;
+
+ ea = NULL;
+
+ ha = (hm_loop_args *)args;
+ dbenv = ha->dbenv;
+ fd = ha->fd;
+ home = ha->home;
+ eid = ha->eid;
+ progname = ha->progname;
+ tab = ha->tab;
+ free(ha);
+
+ memset(&rec, 0, sizeof(DBT));
+ memset(&control, 0, sizeof(DBT));
+
+ for (ret = 0; ret == 0;) {
+ if ((ret = get_next_message(fd, &rec, &control)) != 0) {
+ /*
+ * Close this connection; if it's the master call
+ * for an election.
+ */
+ close(fd);
+ if ((ret = machtab_rem(tab, eid, 1)) != 0)
+ break;
+
+ /*
+ * If I'm the master, I just lost a client and this
+ * thread is done.
+ */
+ if (master_eid == SELF_EID)
+ break;
+
+ /*
+ * If I was talking with the master and the master
+ * went away, I need to call an election; else I'm
+ * done.
+ */
+ if (master_eid != eid)
+ break;
+
+ master_eid = DB_EID_INVALID;
+ machtab_parm(tab, &n, &pri, &timeout);
+ if ((ret = dbenv->rep_elect(dbenv,
+ n, pri, timeout, &newm)) != 0)
+ continue;
+
+ /*
+ * Regardless of the results, the site I was talking
+ * to is gone, so I have nothing to do but exit.
+ */
+ if (newm == SELF_EID && (ret =
+ dbenv->rep_start(dbenv, NULL, DB_REP_MASTER)) == 0)
+ ret = domaster(dbenv, progname);
+ break;
+ }
+
+ tmpid = eid;
+ switch(r = dbenv->rep_process_message(dbenv,
+ &control, &rec, &tmpid)) {
+ case DB_REP_NEWSITE:
+ /*
+ * Check if we got sent connect information and if we
+ * did, if this is me or if we already have a
+ * connection to this new site. If we don't,
+ * establish a new one.
+ */
+
+ /* No connect info. */
+ if (rec.size == 0)
+ break;
+
+ /* It's me, do nothing. */
+ if (strncmp(myaddr, rec.data, rec.size) == 0)
+ break;
+
+ self.host = (char *)rec.data;
+ self.host = strtok(self.host, ":");
+ if ((c = strtok(NULL, ":")) == NULL) {
+ dbenv->errx(dbenv, "Bad host specification");
+ goto out;
+ }
+ self.port = atoi(c);
+
+ /*
+ * We try to connect to the new site. If we can't,
+ * we treat it as an error since we know that the site
+ * should be up if we got a message from it (even
+ * indirectly).
+ */
+ if ((ret = connect_site(dbenv,
+ tab, progname, &self, &open, &eid)) != 0)
+ goto out;
+ break;
+ case DB_REP_HOLDELECTION:
+ if (master_eid == SELF_EID)
+ break;
+ /* Make sure that previous election has finished. */
+ if (ea != NULL) {
+ (void)pthread_join(elect_thr, &status);
+ ea = NULL;
+ }
+ if ((ea = calloc(sizeof(elect_args), 1)) == NULL) {
+ ret = errno;
+ goto out;
+ }
+ ea->dbenv = dbenv;
+ ea->machtab = tab;
+ ret = pthread_create(&elect_thr,
+ NULL, elect_thread, (void *)ea);
+ break;
+ case DB_REP_NEWMASTER:
+ /* Check if it's us. */
+ master_eid = tmpid;
+ if (tmpid == SELF_EID) {
+ if ((ret = dbenv->rep_start(dbenv,
+ NULL, DB_REP_MASTER)) != 0)
+ goto out;
+ ret = domaster(dbenv, progname);
+ }
+ break;
+ case 0:
+ break;
+ default:
+ dbenv->err(dbenv, r, "DB_ENV->rep_process_message");
+ break;
+ }
+ }
+
+out: if ((t_ret = machtab_rem(tab, eid, 1)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Don't close the environment before any children exit. */
+ if (ea != NULL)
+ (void)pthread_join(elect_thr, &status);
+
+ return ((void *)ret);
+}
+
+/*
+ * This is a generic thread that spawns a thread to listen for connections
+ * on a socket and then spawns off child threads to handle each new
+ * connection.
+ */
+void *
+connect_thread(args)
+ void *args;
+{
+ DB_ENV *dbenv;
+ const char *home, *progname;
+ int fd, i, eid, ns, port, ret;
+ hm_loop_args *ha;
+ connect_args *cargs;
+ machtab_t *machtab;
+#define MAX_THREADS 25
+ pthread_t hm_thrs[MAX_THREADS];
+ pthread_attr_t attr;
+
+ ha = NULL;
+ cargs = (connect_args *)args;
+ dbenv = cargs->dbenv;
+ home = cargs->home;
+ progname = cargs->progname;
+ machtab = cargs->machtab;
+ port = cargs->port;
+
+ if ((ret = pthread_attr_init(&attr)) != 0)
+ return ((void *)EXIT_FAILURE);
+
+ if ((ret =
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)) != 0)
+ goto err;
+
+ /*
+ * Loop forever, accepting connections from new machines,
+ * and forking off a thread to handle each.
+ */
+ if ((fd = listen_socket_init(progname, port)) < 0) {
+ ret = errno;
+ goto err;
+ }
+
+ for (i = 0; i < MAX_THREADS; i++) {
+ if ((ns = listen_socket_accept(machtab,
+ progname, fd, &eid)) < 0) {
+ ret = errno;
+ goto err;
+ }
+ if ((ha = calloc(sizeof(hm_loop_args), 1)) == NULL)
+ goto err;
+ ha->progname = progname;
+ ha->home = home;
+ ha->fd = ns;
+ ha->eid = eid;
+ ha->tab = machtab;
+ ha->dbenv = dbenv;
+ if ((ret = pthread_create(&hm_thrs[i++], &attr,
+ hm_loop, (void *)ha)) != 0)
+ goto err;
+ ha = NULL;
+ }
+
+ /* If we fell out, we ended up with too many threads. */
+ dbenv->errx(dbenv, "Too many threads");
+ ret = ENOMEM;
+
+err: pthread_attr_destroy(&attr);
+ return (ret == 0 ? (void *)EXIT_SUCCESS : (void *)EXIT_FAILURE);
+}
+
+/*
+ * Open a connection to everyone that we've been told about. If we
+ * cannot open some connections, keep trying.
+ */
+void *
+connect_all(args)
+ void *args;
+{
+ DB_ENV *dbenv;
+ all_args *aa;
+ const char *home, *progname;
+ hm_loop_args *ha;
+ int failed, i, eid, nsites, open, ret, *success;
+ machtab_t *machtab;
+ repsite_t *sites;
+
+ ha = NULL;
+ aa = (all_args *)args;
+ dbenv = aa->dbenv;
+ progname = aa->progname;
+ home = aa->home;
+ machtab = aa->machtab;
+ nsites = aa->nsites;
+ sites = aa->sites;
+
+ ret = 0;
+
+ /* Some implementations of calloc are sad about alloc'ing 0 things. */
+ if ((success = calloc(nsites > 0 ? nsites : 1, sizeof(int))) == NULL) {
+ dbenv->err(dbenv, errno, "connect_all");
+ ret = 1;
+ goto err;
+ }
+
+ for (failed = nsites; failed > 0;) {
+ for (i = 0; i < nsites; i++) {
+ if (success[i])
+ continue;
+
+ ret = connect_site(dbenv, machtab,
+ progname, &sites[i], &open, &eid);
+
+ /*
+ * If we couldn't make the connection, this isn't
+ * fatal to the loop, but we have nothing further
+ * to do on this machine at the moment.
+ */
+ if (ret == DB_REP_UNAVAIL)
+ continue;
+
+ if (ret != 0)
+ goto err;
+
+ failed--;
+ success[i] = 1;
+
+ /* If the connection is already open, we're done. */
+ if (ret == 0 && open == 1)
+ continue;
+
+ }
+ sleep(1);
+ }
+
+err: free(success);
+ return (ret ? (void *)EXIT_FAILURE : (void *)EXIT_SUCCESS);
+}
+
+int
+connect_site(dbenv, machtab, progname, site, is_open, eidp)
+ DB_ENV *dbenv;
+ machtab_t *machtab;
+ const char *progname;
+ repsite_t *site;
+ int *is_open;
+ int *eidp;
+{
+ int ret, s;
+ hm_loop_args *ha;
+ pthread_t hm_thr;
+
+ if ((s = get_connected_socket(machtab, progname,
+ site->host, site->port, is_open, eidp)) < 0)
+ return (DB_REP_UNAVAIL);
+
+ if (*is_open)
+ return (0);
+
+ if ((ha = calloc(sizeof(hm_loop_args), 1)) == NULL) {
+ ret = errno;
+ goto err;
+ }
+
+ ha->progname = progname;
+ ha->fd = s;
+ ha->eid = *eidp;
+ ha->tab = machtab;
+ ha->dbenv = dbenv;
+
+ if ((ret = pthread_create(&hm_thr, NULL,
+ hm_loop, (void *)ha)) != 0) {
+ dbenv->err(dbenv, ret, "connect site");
+ goto err1;
+ }
+
+ return (0);
+
+err1: free(ha);
+err:
+ return (ret);
+}
+
+/*
+ * We need to spawn off a new thread in which to hold an election in
+ * case we are the only thread listening on for messages.
+ */
+void *
+elect_thread(args)
+ void *args;
+{
+ DB_ENV *dbenv;
+ elect_args *eargs;
+ int n, ret, pri;
+ machtab_t *machtab;
+ u_int32_t timeout;
+
+ eargs = (elect_args *)args;
+ dbenv = eargs->dbenv;
+ machtab = eargs->machtab;
+ free(eargs);
+
+ machtab_parm(machtab, &n, &pri, &timeout);
+ while ((ret =
+ dbenv->rep_elect(dbenv, n, pri, timeout, &master_eid)) != 0)
+ sleep(2);
+
+ /* Check if it's us. */
+ if (master_eid == SELF_EID)
+ ret = dbenv->rep_start(dbenv, NULL, DB_REP_MASTER);
+
+ return ((void *)(ret == 0 ? EXIT_SUCCESS : EXIT_FAILURE));
+}
diff --git a/libdb/examples_c/ex_thread.c b/libdb/examples_c/ex_thread.c
new file mode 100644
index 0000000..191d593
--- /dev/null
+++ b/libdb/examples_c/ex_thread.c
@@ -0,0 +1,629 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+#include <sys/time.h>
+
+#include <errno.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#ifdef _WIN32
+extern int getopt(int, char * const *, const char *);
+#else
+#include <unistd.h>
+#endif
+
+#include <db.h>
+
+/*
+ * NB: This application is written using POSIX 1003.1b-1993 pthreads
+ * interfaces, which may not be portable to your system.
+ */
+extern int sched_yield __P((void)); /* Pthread yield function. */
+
+int db_init __P((const char *));
+void *deadlock __P((void *));
+void fatal __P((const char *, int, int));
+void onint __P((int));
+int main __P((int, char *[]));
+int reader __P((int));
+void stats __P((void));
+void *trickle __P((void *));
+void *tstart __P((void *));
+int usage __P((void));
+void word __P((void));
+int writer __P((int));
+
+int quit; /* Interrupt handling flag. */
+
+struct _statistics {
+ int aborted; /* Write. */
+ int aborts; /* Read/write. */
+ int adds; /* Write. */
+ int deletes; /* Write. */
+ int txns; /* Write. */
+ int found; /* Read. */
+ int notfound; /* Read. */
+} *perf;
+
+const char
+ *progname = "ex_thread"; /* Program name. */
+
+#define DATABASE "access.db" /* Database name. */
+#define WORDLIST "../test/wordlist" /* Dictionary. */
+
+/*
+ * We can seriously increase the number of collisions and transaction
+ * aborts by yielding the scheduler after every DB call. Specify the
+ * -p option to do this.
+ */
+int punish; /* -p */
+int nlist; /* -n */
+int nreaders; /* -r */
+int verbose; /* -v */
+int nwriters; /* -w */
+
+DB *dbp; /* Database handle. */
+DB_ENV *dbenv; /* Database environment. */
+int nthreads; /* Total threads. */
+char **list; /* Word list. */
+
+/*
+ * ex_thread --
+ * Run a simple threaded application of some numbers of readers and
+ * writers competing for a set of words.
+ *
+ * Example UNIX shell script to run this program:
+ * % rm -rf TESTDIR
+ * % mkdir TESTDIR
+ * % ex_thread -h TESTDIR
+ */
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int errno, optind;
+ DB_TXN *txnp;
+ pthread_t *tids;
+ int ch, i, ret;
+ const char *home;
+ void *retp;
+
+ txnp = NULL;
+ nlist = 1000;
+ nreaders = nwriters = 4;
+ home = "TESTDIR";
+ while ((ch = getopt(argc, argv, "h:pn:r:vw:")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'p':
+ punish = 1;
+ break;
+ case 'n':
+ nlist = atoi(optarg);
+ break;
+ case 'r':
+ nreaders = atoi(optarg);
+ break;
+ case 'v':
+ verbose = 1;
+ break;
+ case 'w':
+ nwriters = atoi(optarg);
+ break;
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ /* Initialize the random number generator. */
+ srand(getpid() | time(NULL));
+
+ /* Register the signal handler. */
+ (void)signal(SIGINT, onint);
+
+ /* Build the key list. */
+ word();
+
+ /* Remove the previous database. */
+ (void)remove(DATABASE);
+
+ /* Initialize the database environment. */
+ if ((ret = db_init(home)) != 0)
+ return (ret);
+
+ /* Initialize the database. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ (void)dbenv->close(dbenv, 0);
+ return (EXIT_FAILURE);
+ }
+ if ((ret = dbp->set_pagesize(dbp, 1024)) != 0) {
+ dbp->err(dbp, ret, "set_pagesize");
+ goto err;
+ }
+
+ if ((ret = dbenv->txn_begin(dbenv, NULL, &txnp, 0)) != 0)
+ fatal("txn_begin", ret, 1);
+ if ((ret = dbp->open(dbp, txnp,
+ DATABASE, NULL, DB_BTREE, DB_CREATE | DB_THREAD, 0664)) != 0) {
+ dbp->err(dbp, ret, "%s: open", DATABASE);
+ goto err;
+ } else {
+ ret = txnp->commit(txnp, 0);
+ txnp = NULL;
+ if (ret != 0)
+ goto err;
+ }
+
+ nthreads = nreaders + nwriters + 2;
+ printf("Running: readers %d, writers %d\n", nreaders, nwriters);
+ fflush(stdout);
+
+ /* Create statistics structures, offset by 1. */
+ if ((perf = calloc(nreaders + nwriters + 1, sizeof(*perf))) == NULL)
+ fatal(NULL, errno, 1);
+
+ /* Create thread ID structures. */
+ if ((tids = malloc(nthreads * sizeof(pthread_t))) == NULL)
+ fatal(NULL, errno, 1);
+
+ /* Create reader/writer threads. */
+ for (i = 0; i < nreaders + nwriters; ++i)
+ if ((ret =
+ pthread_create(&tids[i], NULL, tstart, (void *)i)) != 0)
+ fatal("pthread_create", ret > 0 ? ret : errno, 1);
+
+ /* Create buffer pool trickle thread. */
+ if (pthread_create(&tids[i], NULL, trickle, &i))
+ fatal("pthread_create", errno, 1);
+ ++i;
+
+ /* Create deadlock detector thread. */
+ if (pthread_create(&tids[i], NULL, deadlock, &i))
+ fatal("pthread_create", errno, 1);
+
+ /* Wait for the threads. */
+ for (i = 0; i < nthreads; ++i)
+ (void)pthread_join(tids[i], &retp);
+
+ printf("Exiting\n");
+ stats();
+
+err: if (txnp != NULL)
+ (void)txnp->abort(txnp);
+ (void)dbp->close(dbp, 0);
+ (void)dbenv->close(dbenv, 0);
+
+ return (EXIT_SUCCESS);
+}
+
+int
+reader(id)
+ int id;
+{
+ DBT key, data;
+ int n, ret;
+ char buf[64];
+
+ /*
+ * DBT's must use local memory or malloc'd memory if the DB handle
+ * is accessed in a threaded fashion.
+ */
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ data.flags = DB_DBT_MALLOC;
+
+ /*
+ * Read-only threads do not require transaction protection, unless
+ * there's a need for repeatable reads.
+ */
+ while (!quit) {
+ /* Pick a key at random, and look it up. */
+ n = rand() % nlist;
+ key.data = list[n];
+ key.size = strlen(key.data);
+
+ if (verbose) {
+ sprintf(buf, "reader: %d: list entry %d\n", id, n);
+ write(STDOUT_FILENO, buf, strlen(buf));
+ }
+
+ switch (ret = dbp->get(dbp, NULL, &key, &data, 0)) {
+ case DB_LOCK_DEADLOCK: /* Deadlock. */
+ ++perf[id].aborts;
+ break;
+ case 0: /* Success. */
+ ++perf[id].found;
+ free(data.data);
+ break;
+ case DB_NOTFOUND: /* Not found. */
+ ++perf[id].notfound;
+ break;
+ default:
+ sprintf(buf,
+ "reader %d: dbp->get: %s", id, (char *)key.data);
+ fatal(buf, ret, 0);
+ }
+ }
+ return (0);
+}
+
+int
+writer(id)
+ int id;
+{
+ DBT key, data;
+ DB_TXN *tid;
+ time_t now, then;
+ int n, ret;
+ char buf[256], dbuf[10000];
+
+ time(&now);
+ then = now;
+
+ /*
+ * DBT's must use local memory or malloc'd memory if the DB handle
+ * is accessed in a threaded fashion.
+ */
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ data.data = dbuf;
+ data.ulen = sizeof(dbuf);
+ data.flags = DB_DBT_USERMEM;
+
+ while (!quit) {
+ /* Pick a random key. */
+ n = rand() % nlist;
+ key.data = list[n];
+ key.size = strlen(key.data);
+
+ if (verbose) {
+ sprintf(buf, "writer: %d: list entry %d\n", id, n);
+ write(STDOUT_FILENO, buf, strlen(buf));
+ }
+
+ /* Abort and retry. */
+ if (0) {
+retry: if ((ret = tid->abort(tid)) != 0)
+ fatal("DB_TXN->abort", ret, 1);
+ ++perf[id].aborts;
+ ++perf[id].aborted;
+ }
+
+ /* Thread #1 prints out the stats every 20 seconds. */
+ if (id == 1) {
+ time(&now);
+ if (now - then >= 20) {
+ stats();
+ then = now;
+ }
+ }
+
+ /* Begin the transaction. */
+ if ((ret = dbenv->txn_begin(dbenv, NULL, &tid, 0)) != 0)
+ fatal("txn_begin", ret, 1);
+
+ /*
+ * Get the key. If it doesn't exist, add it. If it does
+ * exist, delete it.
+ */
+ switch (ret = dbp->get(dbp, tid, &key, &data, 0)) {
+ case DB_LOCK_DEADLOCK:
+ goto retry;
+ case 0:
+ goto delete;
+ case DB_NOTFOUND:
+ goto add;
+ }
+
+ sprintf(buf, "writer: %d: dbp->get", id);
+ fatal(buf, ret, 1);
+ /* NOTREACHED */
+
+delete: /* Delete the key. */
+ switch (ret = dbp->del(dbp, tid, &key, 0)) {
+ case DB_LOCK_DEADLOCK:
+ goto retry;
+ case 0:
+ ++perf[id].deletes;
+ goto commit;
+ }
+
+ sprintf(buf, "writer: %d: dbp->del", id);
+ fatal(buf, ret, 1);
+ /* NOTREACHED */
+
+add: /* Add the key. 1 data item in 30 is an overflow item. */
+ data.size = 20 + rand() % 128;
+ if (rand() % 30 == 0)
+ data.size += 8192;
+
+ switch (ret = dbp->put(dbp, tid, &key, &data, 0)) {
+ case DB_LOCK_DEADLOCK:
+ goto retry;
+ case 0:
+ ++perf[id].adds;
+ goto commit;
+ default:
+ sprintf(buf, "writer: %d: dbp->put", id);
+ fatal(buf, ret, 1);
+ }
+
+commit: /* The transaction finished, commit it. */
+ if ((ret = tid->commit(tid, 0)) != 0)
+ fatal("DB_TXN->commit", ret, 1);
+
+ /*
+ * Every time the thread completes 20 transactions, show
+ * our progress.
+ */
+ if (++perf[id].txns % 20 == 0) {
+ sprintf(buf,
+"writer: %2d: adds: %4d: deletes: %4d: aborts: %4d: txns: %4d\n",
+ id, perf[id].adds, perf[id].deletes,
+ perf[id].aborts, perf[id].txns);
+ write(STDOUT_FILENO, buf, strlen(buf));
+ }
+
+ /*
+ * If this thread was aborted more than 5 times before
+ * the transaction finished, complain.
+ */
+ if (perf[id].aborted > 5) {
+ sprintf(buf,
+"writer: %2d: adds: %4d: deletes: %4d: aborts: %4d: txns: %4d: ABORTED: %2d\n",
+ id, perf[id].adds, perf[id].deletes,
+ perf[id].aborts, perf[id].txns, perf[id].aborted);
+ write(STDOUT_FILENO, buf, strlen(buf));
+ }
+ perf[id].aborted = 0;
+ }
+ return (0);
+}
+
+/*
+ * stats --
+ * Display reader/writer thread statistics. To display the statistics
+ * for the mpool trickle or deadlock threads, use db_stat(1).
+ */
+void
+stats()
+{
+ int id;
+ char *p, buf[8192];
+
+ p = buf + sprintf(buf, "-------------\n");
+ for (id = 0; id < nreaders + nwriters;)
+ if (id++ < nwriters)
+ p += sprintf(p,
+ "writer: %2d: adds: %4d: deletes: %4d: aborts: %4d: txns: %4d\n",
+ id, perf[id].adds,
+ perf[id].deletes, perf[id].aborts, perf[id].txns);
+ else
+ p += sprintf(p,
+ "reader: %2d: found: %5d: notfound: %5d: aborts: %4d\n",
+ id, perf[id].found,
+ perf[id].notfound, perf[id].aborts);
+ p += sprintf(p, "-------------\n");
+
+ write(STDOUT_FILENO, buf, p - buf);
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_init(home)
+ const char *home;
+{
+ int ret;
+
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ if (punish) {
+ (void)dbenv->set_flags(dbenv, DB_YIELDCPU, 1);
+ (void)db_env_set_func_yield(sched_yield);
+ }
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ (void)dbenv->set_cachesize(dbenv, 0, 100 * 1024, 0);
+ (void)dbenv->set_lg_max(dbenv, 200000);
+
+ if ((ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_MPOOL | DB_INIT_TXN | DB_THREAD, 0)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ (void)dbenv->close(dbenv, 0);
+ return (EXIT_FAILURE);
+ }
+
+ return (0);
+}
+
+/*
+ * tstart --
+ * Thread start function for readers and writers.
+ */
+void *
+tstart(arg)
+ void *arg;
+{
+ pthread_t tid;
+ u_int id;
+
+ id = (u_int)arg + 1;
+
+ tid = pthread_self();
+
+ if (id <= (u_int)nwriters) {
+ printf("write thread %d starting: tid: %lu\n", id, (u_long)tid);
+ fflush(stdout);
+ writer(id);
+ } else {
+ printf("read thread %d starting: tid: %lu\n", id, (u_long)tid);
+ fflush(stdout);
+ reader(id);
+ }
+
+ /* NOTREACHED */
+ return (NULL);
+}
+
+/*
+ * deadlock --
+ * Thread start function for DB_ENV->lock_detect.
+ */
+void *
+deadlock(arg)
+ void *arg;
+{
+ struct timeval t;
+ pthread_t tid;
+
+ arg = arg; /* XXX: shut the compiler up. */
+ tid = pthread_self();
+
+ printf("deadlock thread starting: tid: %lu\n", (u_long)tid);
+ fflush(stdout);
+
+ t.tv_sec = 0;
+ t.tv_usec = 100000;
+ while (!quit) {
+ (void)dbenv->lock_detect(dbenv, 0, DB_LOCK_YOUNGEST, NULL);
+
+ /* Check every 100ms. */
+ (void)select(0, NULL, NULL, NULL, &t);
+ }
+
+ return (NULL);
+}
+
+/*
+ * trickle --
+ * Thread start function for memp_trickle.
+ */
+void *
+trickle(arg)
+ void *arg;
+{
+ pthread_t tid;
+ int wrote;
+ char buf[64];
+
+ arg = arg; /* XXX: shut the compiler up. */
+ tid = pthread_self();
+
+ printf("trickle thread starting: tid: %lu\n", (u_long)tid);
+ fflush(stdout);
+
+ while (!quit) {
+ (void)dbenv->memp_trickle(dbenv, 10, &wrote);
+ if (verbose) {
+ sprintf(buf, "trickle: wrote %d\n", wrote);
+ write(STDOUT_FILENO, buf, strlen(buf));
+ }
+ if (wrote == 0) {
+ sleep(1);
+ sched_yield();
+ }
+ }
+
+ return (NULL);
+}
+
+/*
+ * word --
+ * Build the dictionary word list.
+ */
+void
+word()
+{
+ FILE *fp;
+ int cnt;
+ char buf[256];
+
+ if ((fp = fopen(WORDLIST, "r")) == NULL)
+ fatal(WORDLIST, errno, 1);
+
+ if ((list = malloc(nlist * sizeof(char *))) == NULL)
+ fatal(NULL, errno, 1);
+
+ for (cnt = 0; cnt < nlist; ++cnt) {
+ if (fgets(buf, sizeof(buf), fp) == NULL)
+ break;
+ if ((list[cnt] = strdup(buf)) == NULL)
+ fatal(NULL, errno, 1);
+ }
+ nlist = cnt; /* In case nlist was larger than possible. */
+}
+
+/*
+ * fatal --
+ * Report a fatal error and quit.
+ */
+void
+fatal(msg, err, syserr)
+ const char *msg;
+ int err, syserr;
+{
+ fprintf(stderr, "%s: ", progname);
+ if (msg != NULL) {
+ fprintf(stderr, "%s", msg);
+ if (syserr)
+ fprintf(stderr, ": ");
+ }
+ if (syserr)
+ fprintf(stderr, "%s", strerror(err));
+ fprintf(stderr, "\n");
+ exit(EXIT_FAILURE);
+
+ /* NOTREACHED */
+}
+
+/*
+ * usage --
+ * Usage message.
+ */
+int
+usage()
+{
+ (void)fprintf(stderr,
+ "usage: %s [-pv] [-h home] [-n words] [-r readers] [-w writers]\n",
+ progname);
+ return (EXIT_FAILURE);
+}
+
+/*
+ * onint --
+ * Interrupt signal handler.
+ */
+void
+onint(signo)
+ int signo;
+{
+ signo = 0; /* Quiet compiler. */
+ quit = 1;
+}
diff --git a/libdb/examples_c/ex_tpcb.c b/libdb/examples_c/ex_tpcb.c
new file mode 100644
index 0000000..6667c2c
--- /dev/null
+++ b/libdb/examples_c/ex_tpcb.c
@@ -0,0 +1,698 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#ifdef _WIN32
+extern int getopt(int, char * const *, const char *);
+#else
+#include <unistd.h>
+#endif
+
+#include <db.h>
+
+typedef enum { ACCOUNT, BRANCH, TELLER } FTYPE;
+
+DB_ENV *db_init __P((const char *, const char *, int, int, u_int32_t));
+int hpopulate __P((DB *, int, int, int, int));
+int populate __P((DB *, u_int32_t, u_int32_t, int, const char *));
+u_int32_t random_id __P((FTYPE, int, int, int));
+u_int32_t random_int __P((u_int32_t, u_int32_t));
+int tp_populate __P((DB_ENV *, int, int, int, int, int));
+int tp_run __P((DB_ENV *, int, int, int, int, int));
+int tp_txn __P((DB_ENV *, DB *, DB *, DB *, DB *, int, int, int, int));
+
+int invarg __P((const char *, int, const char *));
+int main __P((int, char *[]));
+int usage __P((const char *));
+
+/*
+ * This program implements a basic TPC/B driver program. To create the
+ * TPC/B database, run with the -i (init) flag. The number of records
+ * with which to populate the account, history, branch, and teller tables
+ * is specified by the a, s, b, and t flags respectively. To run a TPC/B
+ * test, use the n flag to indicate a number of transactions to run (note
+ * that you can run many of these processes in parallel to simulate a
+ * multiuser test run).
+ */
+#define TELLERS_PER_BRANCH 10
+#define ACCOUNTS_PER_TELLER 10000
+#define HISTORY_PER_BRANCH 2592000
+
+/*
+ * The default configuration that adheres to TPCB scaling rules requires
+ * nearly 3 GB of space. To avoid requiring that much space for testing,
+ * we set the parameters much lower. If you want to run a valid 10 TPS
+ * configuration, define VALID_SCALING.
+ */
+#ifdef VALID_SCALING
+#define ACCOUNTS 1000000
+#define BRANCHES 10
+#define TELLERS 100
+#define HISTORY 25920000
+#endif
+
+#ifdef TINY
+#define ACCOUNTS 1000
+#define BRANCHES 10
+#define TELLERS 100
+#define HISTORY 10000
+#endif
+
+#ifdef VERY_TINY
+#define ACCOUNTS 500
+#define BRANCHES 10
+#define TELLERS 50
+#define HISTORY 5000
+#endif
+
+#if !defined(VALID_SCALING) && !defined(TINY) && !defined(VERY_TINY)
+#define ACCOUNTS 100000
+#define BRANCHES 10
+#define TELLERS 100
+#define HISTORY 259200
+#endif
+
+#define HISTORY_LEN 100
+#define RECLEN 100
+#define BEGID 1000000
+
+typedef struct _defrec {
+ u_int32_t id;
+ u_int32_t balance;
+ u_int8_t pad[RECLEN - sizeof(u_int32_t) - sizeof(u_int32_t)];
+} defrec;
+
+typedef struct _histrec {
+ u_int32_t aid;
+ u_int32_t bid;
+ u_int32_t tid;
+ u_int32_t amount;
+ u_int8_t pad[RECLEN - 4 * sizeof(u_int32_t)];
+} histrec;
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DB_ENV *dbenv;
+ int accounts, branches, seed, tellers, history;
+ int ch, iflag, mpool, ntxns, ret, txn_no_sync, verbose;
+ const char *home, *progname;
+
+ home = "TESTDIR";
+ progname = "ex_tpcb";
+ accounts = branches = history = tellers = 0;
+ iflag = mpool = ntxns = txn_no_sync = verbose = 0;
+ seed = (int)time(NULL);
+
+ while ((ch = getopt(argc, argv, "a:b:c:fh:in:S:s:t:v")) != EOF)
+ switch (ch) {
+ case 'a': /* Number of account records */
+ if ((accounts = atoi(optarg)) <= 0)
+ return (invarg(progname, ch, optarg));
+ break;
+ case 'b': /* Number of branch records */
+ if ((branches = atoi(optarg)) <= 0)
+ return (invarg(progname, ch, optarg));
+ break;
+ case 'c': /* Cachesize in bytes */
+ if ((mpool = atoi(optarg)) <= 0)
+ return (invarg(progname, ch, optarg));
+ break;
+ case 'f': /* Fast mode: no txn sync. */
+ txn_no_sync = 1;
+ break;
+ case 'h': /* DB home. */
+ home = optarg;
+ break;
+ case 'i': /* Initialize the test. */
+ iflag = 1;
+ break;
+ case 'n': /* Number of transactions */
+ if ((ntxns = atoi(optarg)) <= 0)
+ return (invarg(progname, ch, optarg));
+ break;
+ case 'S': /* Random number seed. */
+ if ((seed = atoi(optarg)) <= 0)
+ return (invarg(progname, ch, optarg));
+ break;
+ case 's': /* Number of history records */
+ if ((history = atoi(optarg)) <= 0)
+ return (invarg(progname, ch, optarg));
+ break;
+ case 't': /* Number of teller records */
+ if ((tellers = atoi(optarg)) <= 0)
+ return (invarg(progname, ch, optarg));
+ break;
+ case 'v': /* Verbose option. */
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ return (usage(progname));
+ }
+ argc -= optind;
+ argv += optind;
+
+ srand((u_int)seed);
+
+ /* Initialize the database environment. */
+ if ((dbenv = db_init(home,
+ progname, mpool, iflag, txn_no_sync ? DB_TXN_NOSYNC : 0)) == NULL)
+ return (EXIT_FAILURE);
+
+ accounts = accounts == 0 ? ACCOUNTS : accounts;
+ branches = branches == 0 ? BRANCHES : branches;
+ tellers = tellers == 0 ? TELLERS : tellers;
+ history = history == 0 ? HISTORY : history;
+
+ if (verbose)
+ printf("%ld Accounts, %ld Branches, %ld Tellers, %ld History\n",
+ (long)accounts, (long)branches,
+ (long)tellers, (long)history);
+
+ if (iflag) {
+ if (ntxns != 0)
+ return (usage(progname));
+ tp_populate(dbenv,
+ accounts, branches, history, tellers, verbose);
+ } else {
+ if (ntxns == 0)
+ return (usage(progname));
+ tp_run(dbenv, ntxns, accounts, branches, tellers, verbose);
+ }
+
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: dbenv->close failed: %s\n",
+ progname, db_strerror(ret));
+ return (EXIT_FAILURE);
+ }
+
+ return (EXIT_SUCCESS);
+}
+
+int
+invarg(progname, arg, str)
+ const char *progname;
+ int arg;
+ const char *str;
+{
+ (void)fprintf(stderr,
+ "%s: invalid argument for -%c: %s\n", progname, arg, str);
+ return (EXIT_FAILURE);
+}
+
+int
+usage(progname)
+ const char *progname;
+{
+ const char *a1, *a2;
+
+ a1 = "[-fv] [-a accounts] [-b branches]\n";
+ a2 = "\t[-c cache_size] [-h home] [-S seed] [-s history] [-t tellers]";
+ (void)fprintf(stderr, "usage: %s -i %s %s\n", progname, a1, a2);
+ (void)fprintf(stderr,
+ " %s -n transactions %s %s\n", progname, a1, a2);
+ return (EXIT_FAILURE);
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+DB_ENV *
+db_init(home, prefix, cachesize, initializing, flags)
+ const char *home, *prefix;
+ int cachesize, initializing;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ u_int32_t local_flags;
+ int ret;
+
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_env_create");
+ return (NULL);
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, prefix);
+ (void)dbenv->set_cachesize(dbenv, 0,
+ cachesize == 0 ? 4 * 1024 * 1024 : (u_int32_t)cachesize, 0);
+
+ if (flags & (DB_TXN_NOSYNC))
+ (void)dbenv->set_flags(dbenv, DB_TXN_NOSYNC, 1);
+ flags &= ~(DB_TXN_NOSYNC);
+
+ local_flags = flags | DB_CREATE | (initializing ? DB_INIT_MPOOL :
+ DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL);
+ if ((ret = dbenv->open(dbenv, home, local_flags, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->open: %s", home);
+ (void)dbenv->close(dbenv, 0);
+ return (NULL);
+ }
+ return (dbenv);
+}
+
+/*
+ * Initialize the database to the specified number of accounts, branches,
+ * history records, and tellers.
+ */
+int
+tp_populate(env, accounts, branches, history, tellers, verbose)
+ DB_ENV *env;
+ int accounts, branches, history, tellers, verbose;
+{
+ DB *dbp;
+ u_int32_t balance, idnum, oflags;
+ u_int32_t end_anum, end_bnum, end_tnum;
+ u_int32_t start_anum, start_bnum, start_tnum;
+ int ret;
+
+ idnum = BEGID;
+ balance = 500000;
+ oflags = DB_CREATE | DB_TRUNCATE;
+
+ if ((ret = db_create(&dbp, env, 0)) != 0) {
+ env->err(env, ret, "db_create");
+ return (1);
+ }
+ (void)dbp->set_h_nelem(dbp, (u_int32_t)accounts);
+
+ if ((ret = dbp->open(dbp, NULL, "account", NULL,
+ DB_HASH, oflags, 0644)) != 0) {
+ env->err(env, ret, "DB->open: account");
+ return (1);
+ }
+
+ start_anum = idnum;
+ populate(dbp, idnum, balance, accounts, "account");
+ idnum += accounts;
+ end_anum = idnum - 1;
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ env->err(env, ret, "DB->close: account");
+ return (1);
+ }
+ if (verbose)
+ printf("Populated accounts: %ld - %ld\n",
+ (long)start_anum, (long)end_anum);
+
+ /*
+ * Since the number of branches is very small, we want to use very
+ * small pages and only 1 key per page, i.e., key-locking instead
+ * of page locking.
+ */
+ if ((ret = db_create(&dbp, env, 0)) != 0) {
+ env->err(env, ret, "db_create");
+ return (1);
+ }
+ (void)dbp->set_h_ffactor(dbp, 1);
+ (void)dbp->set_h_nelem(dbp, (u_int32_t)branches);
+ (void)dbp->set_pagesize(dbp, 512);
+ if ((ret = dbp->open(dbp, NULL, "branch", NULL,
+ DB_HASH, oflags, 0644)) != 0) {
+ env->err(env, ret, "DB->open: branch");
+ return (1);
+ }
+ start_bnum = idnum;
+ populate(dbp, idnum, balance, branches, "branch");
+ idnum += branches;
+ end_bnum = idnum - 1;
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ env->err(env, ret, "DB->close: branch");
+ return (1);
+ }
+ if (verbose)
+ printf("Populated branches: %ld - %ld\n",
+ (long)start_bnum, (long)end_bnum);
+
+ /*
+ * In the case of tellers, we also want small pages, but we'll let
+ * the fill factor dynamically adjust itself.
+ */
+ if ((ret = db_create(&dbp, env, 0)) != 0) {
+ env->err(env, ret, "db_create");
+ return (1);
+ }
+ (void)dbp->set_h_ffactor(dbp, 0);
+ (void)dbp->set_h_nelem(dbp, (u_int32_t)tellers);
+ (void)dbp->set_pagesize(dbp, 512);
+ if ((ret = dbp->open(dbp, NULL, "teller", NULL,
+ DB_HASH, oflags, 0644)) != 0) {
+ env->err(env, ret, "DB->open: teller");
+ return (1);
+ }
+
+ start_tnum = idnum;
+ populate(dbp, idnum, balance, tellers, "teller");
+ idnum += tellers;
+ end_tnum = idnum - 1;
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ env->err(env, ret, "DB->close: teller");
+ return (1);
+ }
+ if (verbose)
+ printf("Populated tellers: %ld - %ld\n",
+ (long)start_tnum, (long)end_tnum);
+
+ if ((ret = db_create(&dbp, env, 0)) != 0) {
+ env->err(env, ret, "db_create");
+ return (1);
+ }
+ (void)dbp->set_re_len(dbp, HISTORY_LEN);
+ if ((ret = dbp->open(dbp, NULL, "history", NULL,
+ DB_RECNO, oflags, 0644)) != 0) {
+ env->err(env, ret, "DB->open: history");
+ return (1);
+ }
+
+ hpopulate(dbp, history, accounts, branches, tellers);
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ env->err(env, ret, "DB->close: history");
+ return (1);
+ }
+ return (0);
+}
+
+int
+populate(dbp, start_id, balance, nrecs, msg)
+ DB *dbp;
+ u_int32_t start_id, balance;
+ int nrecs;
+ const char *msg;
+{
+ DBT kdbt, ddbt;
+ defrec drec;
+ int i, ret;
+
+ kdbt.flags = 0;
+ kdbt.data = &drec.id;
+ kdbt.size = sizeof(u_int32_t);
+ ddbt.flags = 0;
+ ddbt.data = &drec;
+ ddbt.size = sizeof(drec);
+ memset(&drec.pad[0], 1, sizeof(drec.pad));
+
+ for (i = 0; i < nrecs; i++) {
+ drec.id = start_id + (u_int32_t)i;
+ drec.balance = balance;
+ if ((ret =
+ (dbp->put)(dbp, NULL, &kdbt, &ddbt, DB_NOOVERWRITE)) != 0) {
+ dbp->err(dbp,
+ ret, "Failure initializing %s file\n", msg);
+ return (1);
+ }
+ }
+ return (0);
+}
+
+int
+hpopulate(dbp, history, accounts, branches, tellers)
+ DB *dbp;
+ int history, accounts, branches, tellers;
+{
+ DBT kdbt, ddbt;
+ histrec hrec;
+ db_recno_t key;
+ int i, ret;
+
+ memset(&kdbt, 0, sizeof(kdbt));
+ memset(&ddbt, 0, sizeof(ddbt));
+ ddbt.data = &hrec;
+ ddbt.size = sizeof(hrec);
+ kdbt.data = &key;
+ kdbt.size = sizeof(key);
+ memset(&hrec.pad[0], 1, sizeof(hrec.pad));
+ hrec.amount = 10;
+
+ for (i = 1; i <= history; i++) {
+ hrec.aid = random_id(ACCOUNT, accounts, branches, tellers);
+ hrec.bid = random_id(BRANCH, accounts, branches, tellers);
+ hrec.tid = random_id(TELLER, accounts, branches, tellers);
+ if ((ret = dbp->put(dbp, NULL, &kdbt, &ddbt, DB_APPEND)) != 0) {
+ dbp->err(dbp, ret, "dbp->put");
+ return (1);
+ }
+ }
+ return (0);
+}
+
+u_int32_t
+random_int(lo, hi)
+ u_int32_t lo, hi;
+{
+ u_int32_t ret;
+ int t;
+
+#ifndef RAND_MAX
+#define RAND_MAX 0x7fffffff
+#endif
+ t = rand();
+ ret = (u_int32_t)(((double)t / ((double)(RAND_MAX) + 1)) *
+ (hi - lo + 1));
+ ret += lo;
+ return (ret);
+}
+
+u_int32_t
+random_id(type, accounts, branches, tellers)
+ FTYPE type;
+ int accounts, branches, tellers;
+{
+ u_int32_t min, max, num;
+
+ max = min = BEGID;
+ num = accounts;
+ switch(type) {
+ case TELLER:
+ min += branches;
+ num = tellers;
+ /* FALLTHROUGH */
+ case BRANCH:
+ if (type == BRANCH)
+ num = branches;
+ min += accounts;
+ /* FALLTHROUGH */
+ case ACCOUNT:
+ max = min + num - 1;
+ }
+ return (random_int(min, max));
+}
+
+int
+tp_run(dbenv, n, accounts, branches, tellers, verbose)
+ DB_ENV *dbenv;
+ int n, accounts, branches, tellers, verbose;
+{
+ DB *adb, *bdb, *hdb, *tdb;
+ double gtps, itps;
+ int failed, ifailed, ret, txns;
+ time_t starttime, curtime, lasttime;
+
+ adb = bdb = hdb = tdb = NULL;
+ txns = failed = 0;
+
+ /*
+ * Open the database files.
+ */
+ if ((ret = db_create(&adb, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto err;
+ }
+ if ((ret = adb->open(adb, NULL, "account", NULL, DB_UNKNOWN,
+ DB_AUTO_COMMIT, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->open: account");
+ goto err;
+ }
+ if ((ret = db_create(&bdb, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto err;
+ }
+ if ((ret = bdb->open(bdb, NULL, "branch", NULL, DB_UNKNOWN,
+ DB_AUTO_COMMIT, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->open: branch");
+ goto err;
+ }
+ if ((ret = db_create(&hdb, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto err;
+ }
+ if ((ret = hdb->open(hdb, NULL, "history", NULL, DB_UNKNOWN,
+ DB_AUTO_COMMIT, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->open: history");
+ goto err;
+ }
+ if ((ret = db_create(&tdb, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto err;
+ }
+ if ((ret = tdb->open(tdb, NULL, "teller", NULL, DB_UNKNOWN,
+ DB_AUTO_COMMIT, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->open: teller");
+ goto err;
+ }
+
+ starttime = time(NULL);
+ lasttime = starttime;
+ for (ifailed = 0; n-- > 0;) {
+ txns++;
+ ret = tp_txn(dbenv, adb, bdb, tdb, hdb,
+ accounts, branches, tellers, verbose);
+ if (ret != 0) {
+ failed++;
+ ifailed++;
+ }
+ if (n % 5000 == 0) {
+ curtime = time(NULL);
+ gtps = (double)(txns - failed) / (curtime - starttime);
+ itps = (double)(5000 - ifailed) / (curtime - lasttime);
+ printf("%d txns %d failed ", txns, failed);
+ printf("%6.2f TPS (gross) %6.2f TPS (interval)\n",
+ gtps, itps);
+ lasttime = curtime;
+ ifailed = 0;
+ }
+ }
+
+err: if (adb != NULL)
+ (void)adb->close(adb, 0);
+ if (bdb != NULL)
+ (void)bdb->close(bdb, 0);
+ if (tdb != NULL)
+ (void)tdb->close(tdb, 0);
+ if (hdb != NULL)
+ (void)hdb->close(hdb, 0);
+
+ printf("%ld transactions begun %ld failed\n", (long)txns, (long)failed);
+ return (ret == 0 ? 0 : 1);
+}
+
+/*
+ * XXX Figure out the appropriate way to pick out IDs.
+ */
+int
+tp_txn(dbenv, adb, bdb, tdb, hdb, accounts, branches, tellers, verbose)
+ DB_ENV *dbenv;
+ DB *adb, *bdb, *tdb, *hdb;
+ int accounts, branches, tellers, verbose;
+{
+ DBC *acurs, *bcurs, *tcurs;
+ DBT d_dbt, d_histdbt, k_dbt, k_histdbt;
+ DB_TXN *t;
+ db_recno_t key;
+ defrec rec;
+ histrec hrec;
+ int account, branch, teller, ret;
+
+ t = NULL;
+ acurs = bcurs = tcurs = NULL;
+
+ /*
+ * XXX We could move a lot of this into the driver to make this
+ * faster.
+ */
+ account = random_id(ACCOUNT, accounts, branches, tellers);
+ branch = random_id(BRANCH, accounts, branches, tellers);
+ teller = random_id(TELLER, accounts, branches, tellers);
+
+ memset(&d_histdbt, 0, sizeof(d_histdbt));
+
+ memset(&k_histdbt, 0, sizeof(k_histdbt));
+ k_histdbt.data = &key;
+ k_histdbt.size = sizeof(key);
+
+ memset(&k_dbt, 0, sizeof(k_dbt));
+ k_dbt.size = sizeof(int);
+
+ memset(&d_dbt, 0, sizeof(d_dbt));
+ d_dbt.flags = DB_DBT_USERMEM;
+ d_dbt.data = &rec;
+ d_dbt.ulen = sizeof(rec);
+
+ hrec.aid = account;
+ hrec.bid = branch;
+ hrec.tid = teller;
+ hrec.amount = 10;
+ /* Request 0 bytes since we're just positioning. */
+ d_histdbt.flags = DB_DBT_PARTIAL;
+
+ /* START TIMING */
+ if (dbenv->txn_begin(dbenv, NULL, &t, 0) != 0)
+ goto err;
+
+ if (adb->cursor(adb, t, &acurs, 0) != 0 ||
+ bdb->cursor(bdb, t, &bcurs, 0) != 0 ||
+ tdb->cursor(tdb, t, &tcurs, 0) != 0)
+ goto err;
+
+ /* Account record */
+ k_dbt.data = &account;
+ if (acurs->c_get(acurs, &k_dbt, &d_dbt, DB_SET) != 0)
+ goto err;
+ rec.balance += 10;
+ if (acurs->c_put(acurs, &k_dbt, &d_dbt, DB_CURRENT) != 0)
+ goto err;
+
+ /* Branch record */
+ k_dbt.data = &branch;
+ if (bcurs->c_get(bcurs, &k_dbt, &d_dbt, DB_SET) != 0)
+ goto err;
+ rec.balance += 10;
+ if (bcurs->c_put(bcurs, &k_dbt, &d_dbt, DB_CURRENT) != 0)
+ goto err;
+
+ /* Teller record */
+ k_dbt.data = &teller;
+ if (tcurs->c_get(tcurs, &k_dbt, &d_dbt, DB_SET) != 0)
+ goto err;
+ rec.balance += 10;
+ if (tcurs->c_put(tcurs, &k_dbt, &d_dbt, DB_CURRENT) != 0)
+ goto err;
+
+ /* History record */
+ d_histdbt.flags = 0;
+ d_histdbt.data = &hrec;
+ d_histdbt.ulen = sizeof(hrec);
+ if (hdb->put(hdb, t, &k_histdbt, &d_histdbt, DB_APPEND) != 0)
+ goto err;
+
+ if (acurs->c_close(acurs) != 0 || bcurs->c_close(bcurs) != 0 ||
+ tcurs->c_close(tcurs) != 0)
+ goto err;
+
+ ret = t->commit(t, 0);
+ t = NULL;
+ if (ret != 0)
+ goto err;
+
+ /* END TIMING */
+ return (0);
+
+err: if (acurs != NULL)
+ (void)acurs->c_close(acurs);
+ if (bcurs != NULL)
+ (void)bcurs->c_close(bcurs);
+ if (tcurs != NULL)
+ (void)tcurs->c_close(tcurs);
+ if (t != NULL)
+ (void)t->abort(t);
+
+ if (verbose)
+ printf("Transaction A=%ld B=%ld T=%ld failed\n",
+ (long)account, (long)branch, (long)teller);
+ return (-1);
+}
diff --git a/libdb/examples_c/ex_tpcb.h b/libdb/examples_c/ex_tpcb.h
new file mode 100644
index 0000000..dd447e8
--- /dev/null
+++ b/libdb/examples_c/ex_tpcb.h
@@ -0,0 +1,39 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _TPCB_H_
+#define _TPCB_H_
+
+typedef enum { ACCOUNT, BRANCH, TELLER } FTYPE;
+
+#define TELLERS_PER_BRANCH 100
+#define ACCOUNTS_PER_TELLER 1000
+
+#define ACCOUNTS 1000000
+#define BRANCHES 10
+#define TELLERS 1000
+#define HISTORY 1000000
+#define HISTORY_LEN 100
+#define RECLEN 100
+#define BEGID 1000000
+
+typedef struct _defrec {
+ u_int32_t id;
+ u_int32_t balance;
+ u_int8_t pad[RECLEN - sizeof(u_int32_t) - sizeof(u_int32_t)];
+} defrec;
+
+typedef struct _histrec {
+ u_int32_t aid;
+ u_int32_t bid;
+ u_int32_t tid;
+ u_int32_t amount;
+ u_int8_t pad[RECLEN - 4 * sizeof(u_int32_t)];
+} histrec;
+#endif /* _TPCB_H_ */
diff --git a/libdb/examples_cxx/AccessExample.cpp b/libdb/examples_cxx/AccessExample.cpp
new file mode 100644
index 0000000..8f3449f
--- /dev/null
+++ b/libdb/examples_cxx/AccessExample.cpp
@@ -0,0 +1,136 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+
+#include <iostream>
+#include <iomanip>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <db_cxx.h>
+
+using std::cin;
+using std::cout;
+using std::cerr;
+
+class AccessExample
+{
+public:
+ AccessExample();
+ void run();
+
+private:
+ static const char FileName[];
+
+ // no need for copy and assignment
+ AccessExample(const AccessExample &);
+ void operator = (const AccessExample &);
+};
+
+int main()
+{
+ // Use a try block just to report any errors.
+ // An alternate approach to using exceptions is to
+ // use error models (see DbEnv::set_error_model()) so
+ // that error codes are returned for all Berkeley DB methods.
+ //
+ try {
+ AccessExample app;
+ app.run();
+ return (EXIT_SUCCESS);
+ }
+ catch (DbException &dbe) {
+ cerr << "AccessExample: " << dbe.what() << "\n";
+ return (EXIT_FAILURE);
+ }
+}
+
+const char AccessExample::FileName[] = "access.db";
+
+AccessExample::AccessExample()
+{
+}
+
+void AccessExample::run()
+{
+ // Remove the previous database.
+ (void)remove(FileName);
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db db(0, 0);
+
+ db.set_error_stream(&cerr);
+ db.set_errpfx("AccessExample");
+ db.set_pagesize(1024); /* Page size: 1K. */
+ db.set_cachesize(0, 32 * 1024, 0);
+ db.open(NULL, FileName, NULL, DB_BTREE, DB_CREATE, 0664);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ char buf[1024];
+ char rbuf[1024];
+ char *t;
+ char *p;
+ int ret;
+ int len;
+
+ for (;;) {
+ cout << "input> ";
+ cout.flush();
+
+ cin.getline(buf, sizeof(buf));
+ if (cin.eof())
+ break;
+
+ if ((len = strlen(buf)) <= 0)
+ continue;
+ for (t = rbuf, p = buf + (len - 1); p >= buf;)
+ *t++ = *p--;
+ *t++ = '\0';
+
+ Dbt key(buf, len + 1);
+ Dbt data(rbuf, len + 1);
+
+ ret = db.put(0, &key, &data, DB_NOOVERWRITE);
+ if (ret == DB_KEYEXIST) {
+ cout << "Key " << buf << " already exists.\n";
+ }
+ }
+ cout << "\n";
+
+ // We put a try block around this section of code
+ // to ensure that our database is properly closed
+ // in the event of an error.
+ //
+ try {
+ // Acquire a cursor for the table.
+ Dbc *dbcp;
+ db.cursor(NULL, &dbcp, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ Dbt key;
+ Dbt data;
+ while (dbcp->get(&key, &data, DB_NEXT) == 0) {
+ char *key_string = (char *)key.get_data();
+ char *data_string = (char *)data.get_data();
+ cout << key_string << " : " << data_string << "\n";
+ }
+ dbcp->close();
+ }
+ catch (DbException &dbe) {
+ cerr << "AccessExample: " << dbe.what() << "\n";
+ }
+
+ db.close(0);
+}
diff --git a/libdb/examples_cxx/BtRecExample.cpp b/libdb/examples_cxx/BtRecExample.cpp
new file mode 100644
index 0000000..c3ee5ed
--- /dev/null
+++ b/libdb/examples_cxx/BtRecExample.cpp
@@ -0,0 +1,224 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <iostream>
+#include <iomanip>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <db_cxx.h>
+
+using std::cout;
+using std::cerr;
+
+#define DATABASE "access.db"
+#define WORDLIST "../test/wordlist"
+
+const char *progname = "BtRecExample"; // Program name.
+
+class BtRecExample
+{
+public:
+ BtRecExample(FILE *fp);
+ ~BtRecExample();
+ void run();
+ void stats();
+ void show(const char *msg, Dbt *key, Dbt *data);
+
+private:
+ Db *dbp;
+ Dbc *dbcp;
+};
+
+BtRecExample::BtRecExample(FILE *fp)
+{
+ char *p, *t, buf[1024], rbuf[1024];
+ int ret;
+
+ // Remove the previous database.
+ (void)remove(DATABASE);
+
+ dbp = new Db(NULL, 0);
+
+ dbp->set_error_stream(&cerr);
+ dbp->set_errpfx(progname);
+ dbp->set_pagesize(1024); // 1K page sizes.
+
+ dbp->set_flags(DB_RECNUM); // Record numbers.
+ dbp->open(NULL, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664);
+
+ //
+ // Insert records into the database, where the key is the word
+ // preceded by its record number, and the data is the same, but
+ // in reverse order.
+ //
+
+ for (int cnt = 1; cnt <= 1000; ++cnt) {
+ (void)sprintf(buf, "%04d_", cnt);
+ if (fgets(buf + 4, sizeof(buf) - 4, fp) == NULL)
+ break;
+ u_int32_t len = strlen(buf);
+ buf[len - 1] = '\0';
+ for (t = rbuf, p = buf + (len - 2); p >= buf;)
+ *t++ = *p--;
+ *t++ = '\0';
+
+ // As a convenience for printing, we include the null terminator
+ // in the stored data.
+ //
+ Dbt key(buf, len);
+ Dbt data(rbuf, len);
+
+ if ((ret = dbp->put(NULL, &key, &data, DB_NOOVERWRITE)) != 0) {
+ dbp->err(ret, "Db::put");
+ if (ret != DB_KEYEXIST)
+ throw DbException(ret);
+ }
+ }
+}
+
+BtRecExample::~BtRecExample()
+{
+ if (dbcp != 0)
+ dbcp->close();
+ dbp->close(0);
+ delete dbp;
+}
+
+//
+// Print out the number of records in the database.
+//
+void BtRecExample::stats()
+{
+ DB_BTREE_STAT *statp;
+
+ dbp->stat(&statp, 0);
+ cout << progname << ": database contains "
+ << (u_long)statp->bt_ndata << " records\n";
+
+ // Note: must use free, not delete.
+ // This struct is allocated by C.
+ //
+ free(statp);
+}
+
+void BtRecExample::run()
+{
+ db_recno_t recno;
+ int ret;
+ char buf[1024];
+
+ // Acquire a cursor for the database.
+ dbp->cursor(NULL, &dbcp, 0);
+
+ //
+ // Prompt the user for a record number, then retrieve and display
+ // that record.
+ //
+ for (;;) {
+ // Get a record number.
+ cout << "recno #> ";
+ cout.flush();
+ if (fgets(buf, sizeof(buf), stdin) == NULL)
+ break;
+ recno = atoi(buf);
+
+ //
+ // Start with a fresh key each time,
+ // the dbp->get() routine returns
+ // the key and data pair, not just the key!
+ //
+ Dbt key(&recno, sizeof(recno));
+ Dbt data;
+
+ if ((ret = dbcp->get(&key, &data, DB_SET_RECNO)) != 0) {
+ dbp->err(ret, "DBcursor->get");
+ throw DbException(ret);
+ }
+
+ // Display the key and data.
+ show("k/d\t", &key, &data);
+
+ // Move the cursor a record forward.
+ if ((ret = dbcp->get(&key, &data, DB_NEXT)) != 0) {
+ dbp->err(ret, "DBcursor->get");
+ throw DbException(ret);
+ }
+
+ // Display the key and data.
+ show("next\t", &key, &data);
+
+ //
+ // Retrieve the record number for the following record into
+ // local memory.
+ //
+ data.set_data(&recno);
+ data.set_size(sizeof(recno));
+ data.set_ulen(sizeof(recno));
+ data.set_flags(data.get_flags() | DB_DBT_USERMEM);
+
+ if ((ret = dbcp->get(&key, &data, DB_GET_RECNO)) != 0) {
+ if (ret != DB_NOTFOUND && ret != DB_KEYEMPTY) {
+ dbp->err(ret, "DBcursor->get");
+ throw DbException(ret);
+ }
+ }
+ else {
+ cout << "retrieved recno: " << (u_long)recno << "\n";
+ }
+ }
+
+ dbcp->close();
+ dbcp = NULL;
+}
+
+//
+// show --
+// Display a key/data pair.
+//
+void BtRecExample::show(const char *msg, Dbt *key, Dbt *data)
+{
+ cout << msg << (char *)key->get_data()
+ << " : " << (char *)data->get_data() << "\n";
+}
+
+int
+main()
+{
+ FILE *fp;
+
+ // Open the word database.
+ if ((fp = fopen(WORDLIST, "r")) == NULL) {
+ fprintf(stderr, "%s: open %s: %s\n",
+ progname, WORDLIST, db_strerror(errno));
+ return (EXIT_FAILURE);
+ }
+
+ try {
+ BtRecExample app(fp);
+
+ // Close the word database.
+ (void)fclose(fp);
+ fp = NULL;
+
+ app.stats();
+ app.run();
+ }
+ catch (DbException &dbe) {
+ cerr << "Exception: " << dbe.what() << "\n";
+ return (EXIT_FAILURE);
+ }
+
+ return (EXIT_SUCCESS);
+}
diff --git a/libdb/examples_cxx/EnvExample.cpp b/libdb/examples_cxx/EnvExample.cpp
new file mode 100644
index 0000000..43205fa
--- /dev/null
+++ b/libdb/examples_cxx/EnvExample.cpp
@@ -0,0 +1,121 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <iostream>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <db_cxx.h>
+
+using std::ostream;
+using std::cout;
+using std::cerr;
+
+#ifdef macintosh
+#define DATABASE_HOME ":database"
+#define CONFIG_DATA_DIR ":database"
+#else
+#ifdef DB_WIN32
+#define DATABASE_HOME "\\tmp\\database"
+#define CONFIG_DATA_DIR "\\database\\files"
+#else
+#define DATABASE_HOME "/tmp/database"
+#define CONFIG_DATA_DIR "/database/files"
+#endif
+#endif
+
+void db_setup(const char *, const char *, ostream&);
+void db_teardown(const char *, const char *, ostream&);
+
+const char *progname = "EnvExample"; /* Program name. */
+
+//
+// An example of a program creating/configuring a Berkeley DB environment.
+//
+int
+main(int, char **)
+{
+ //
+ // Note: it may be easiest to put all Berkeley DB operations in a
+ // try block, as seen here. Alternatively, you can change the
+ // ErrorModel in the DbEnv so that exceptions are never thrown
+ // and check error returns from all methods.
+ //
+ try {
+ const char *data_dir, *home;
+
+ //
+ // All of the shared database files live in /home/database,
+ // but data files live in /database.
+ //
+ home = DATABASE_HOME;
+ data_dir = CONFIG_DATA_DIR;
+
+ cout << "Setup env\n";
+ db_setup(home, data_dir, cerr);
+
+ cout << "Teardown env\n";
+ db_teardown(home, data_dir, cerr);
+ return (EXIT_SUCCESS);
+ }
+ catch (DbException &dbe) {
+ cerr << "EnvExample: " << dbe.what() << "\n";
+ return (EXIT_FAILURE);
+ }
+}
+
+// Note that any of the db calls can throw DbException
+void
+db_setup(const char *home, const char *data_dir, ostream& err_stream)
+{
+ //
+ // Create an environment object and initialize it for error
+ // reporting.
+ //
+ DbEnv *dbenv = new DbEnv(0);
+ dbenv->set_error_stream(&err_stream);
+ dbenv->set_errpfx(progname);
+
+ //
+ // We want to specify the shared memory buffer pool cachesize,
+ // but everything else is the default.
+ //
+ dbenv->set_cachesize(0, 64 * 1024, 0);
+
+ // Databases are in a subdirectory.
+ (void)dbenv->set_data_dir(data_dir);
+
+ // Open the environment with full transactional support.
+ dbenv->open(home,
+ DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN, 0);
+
+ // Do something interesting...
+
+ // Close the handle.
+ dbenv->close(0);
+}
+
+void
+db_teardown(const char *home, const char *data_dir, ostream& err_stream)
+{
+ // Remove the shared database regions.
+ DbEnv *dbenv = new DbEnv(0);
+
+ dbenv->set_error_stream(&err_stream);
+ dbenv->set_errpfx(progname);
+
+ (void)dbenv->set_data_dir(data_dir);
+ dbenv->remove(home, 0);
+ delete dbenv;
+}
diff --git a/libdb/examples_cxx/LockExample.cpp b/libdb/examples_cxx/LockExample.cpp
new file mode 100644
index 0000000..40530ac
--- /dev/null
+++ b/libdb/examples_cxx/LockExample.cpp
@@ -0,0 +1,244 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <iostream>
+#include <stdlib.h>
+#include <string.h>
+
+#include <db_cxx.h>
+
+using std::cin;
+using std::cout;
+using std::cerr;
+
+const char *progname = "LockExample"; // Program name.
+
+//
+// An example of a program using DBLock and related classes.
+//
+class LockExample : public DbEnv
+{
+public:
+ void run();
+ int error_code() { return (ecode); }
+
+ LockExample(const char *home, u_int32_t maxlocks, int do_unlink);
+
+private:
+ static const char FileName[];
+ int ecode;
+
+ // no need for copy and assignment
+ LockExample(const LockExample &);
+ void operator = (const LockExample &);
+};
+
+static int usage(); // forward
+
+int
+main(int argc, char *argv[])
+{
+ const char *home;
+ int do_unlink;
+ u_int32_t maxlocks;
+ int i;
+
+ home = "TESTDIR";
+ maxlocks = 0;
+ do_unlink = 0;
+ for (int argnum = 1; argnum < argc; ++argnum) {
+ if (strcmp(argv[argnum], "-h") == 0) {
+ if (++argnum >= argc)
+ return (usage());
+ home = argv[argnum];
+ }
+ else if (strcmp(argv[argnum], "-m") == 0) {
+ if (++argnum >= argc)
+ return (usage());
+ if ((i = atoi(argv[argnum])) <= 0)
+ return (usage());
+ maxlocks = (u_int32_t)i; /* XXX: possible overflow. */
+ }
+ else if (strcmp(argv[argnum], "-u") == 0) {
+ do_unlink = 1;
+ }
+ else {
+ return (usage());
+ }
+ }
+
+ try {
+ int ecode;
+
+ if (do_unlink) {
+ // Create an environment that immediately
+ // removes all files.
+ LockExample tmp(home, maxlocks, do_unlink);
+ if ((ecode = tmp.error_code()) != 0)
+ return (ecode);
+ }
+
+ LockExample app(home, maxlocks, do_unlink);
+ if ((ecode = app.error_code()) != 0)
+ return (ecode);
+ app.run();
+ app.close(0);
+ return (EXIT_SUCCESS);
+ }
+ catch (DbException &dbe) {
+ cerr << "LockExample: " << dbe.what() << "\n";
+ return (EXIT_FAILURE);
+ }
+}
+
+LockExample::LockExample(const char *home, u_int32_t maxlocks, int do_unlink)
+: DbEnv(0)
+, ecode(0)
+{
+ int ret;
+
+ if (do_unlink) {
+ if ((ret = remove(home, DB_FORCE)) != 0) {
+ cerr << progname << ": DbEnv::remove: "
+ << strerror(errno) << "\n";
+ ecode = EXIT_FAILURE;
+ }
+ }
+ else {
+ set_error_stream(&cerr);
+ set_errpfx("LockExample");
+ if (maxlocks != 0)
+ set_lk_max_locks(maxlocks);
+ open(home, DB_CREATE | DB_INIT_LOCK, 0);
+ }
+}
+
+void LockExample::run()
+{
+ long held;
+ u_int32_t len, locker;
+ int did_get, ret;
+ DbLock *locks = 0;
+ int lockcount = 0;
+ char objbuf[1024];
+ int lockid = 0;
+
+ //
+ // Accept lock requests.
+ //
+ lock_id(&locker);
+ for (held = 0;;) {
+ cout << "Operation get/release [get]> ";
+ cout.flush();
+
+ char opbuf[16];
+ cin.getline(opbuf, sizeof(opbuf));
+ if (cin.eof())
+ break;
+ if ((len = strlen(opbuf)) <= 1 || strcmp(opbuf, "get") == 0) {
+ // Acquire a lock.
+ cout << "input object (text string) to lock> ";
+ cout.flush();
+ cin.getline(objbuf, sizeof(objbuf));
+ if (cin.eof())
+ break;
+ if ((len = strlen(objbuf)) <= 0)
+ continue;
+
+ char lockbuf[16];
+ do {
+ cout << "lock type read/write [read]> ";
+ cout.flush();
+ cin.getline(lockbuf, sizeof(lockbuf));
+ if (cin.eof())
+ break;
+ len = strlen(lockbuf);
+ } while (len >= 1 &&
+ strcmp(lockbuf, "read") != 0 &&
+ strcmp(lockbuf, "write") != 0);
+
+ db_lockmode_t lock_type;
+ if (len <= 1 || strcmp(lockbuf, "read") == 0)
+ lock_type = DB_LOCK_READ;
+ else
+ lock_type = DB_LOCK_WRITE;
+
+ Dbt dbt(objbuf, strlen(objbuf));
+
+ DbLock lock;
+ ret = lock_get(locker, DB_LOCK_NOWAIT, &dbt,
+ lock_type, &lock);
+ did_get = 1;
+ lockid = lockcount++;
+ if (locks == NULL) {
+ locks = new DbLock[1];
+ }
+ else {
+ DbLock *newlocks = new DbLock[lockcount];
+ for (int lockno = 0; lockno < lockid; lockno++) {
+ newlocks[lockno] = locks[lockno];
+ }
+ delete locks;
+ locks = newlocks;
+ }
+ locks[lockid] = lock;
+ } else {
+ // Release a lock.
+ do {
+ cout << "input lock to release> ";
+ cout.flush();
+ cin.getline(objbuf, sizeof(objbuf));
+ if (cin.eof())
+ break;
+ } while ((len = strlen(objbuf)) <= 0);
+ lockid = strtol(objbuf, NULL, 16);
+ if (lockid < 0 || lockid >= lockcount) {
+ cout << "Lock #" << lockid << " out of range\n";
+ continue;
+ }
+ DbLock lock = locks[lockid];
+ ret = lock_put(&lock);
+ did_get = 0;
+ }
+
+ switch (ret) {
+ case 0:
+ cout << "Lock #" << lockid << " "
+ << (did_get ? "granted" : "released")
+ << "\n";
+ held += did_get ? 1 : -1;
+ break;
+ case DB_LOCK_NOTGRANTED:
+ cout << "Lock not granted\n";
+ break;
+ case DB_LOCK_DEADLOCK:
+ cerr << "LockExample: lock_"
+ << (did_get ? "get" : "put")
+ << ": " << "returned DEADLOCK";
+ break;
+ default:
+ cerr << "LockExample: lock_get: %s",
+ strerror(errno);
+ }
+ }
+ cout << "\n";
+ cout << "Closing lock region " << held << " locks held\n";
+ if (locks != 0)
+ delete locks;
+}
+
+static int
+usage()
+{
+ cerr << "usage: LockExample [-u] [-h home] [-m maxlocks]\n";
+ return (EXIT_FAILURE);
+}
diff --git a/libdb/examples_cxx/MpoolExample.cpp b/libdb/examples_cxx/MpoolExample.cpp
new file mode 100644
index 0000000..d8fe558
--- /dev/null
+++ b/libdb/examples_cxx/MpoolExample.cpp
@@ -0,0 +1,218 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <iostream>
+#include <fstream>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include <db_cxx.h>
+
+using std::cout;
+using std::cerr;
+using std::ios;
+using std::ofstream;
+
+#define MPOOL "mpool"
+
+int init(const char *, int, int);
+int run(DB_ENV *, int, int, int);
+
+static int usage();
+
+const char *progname = "MpoolExample"; // Program name.
+
+class MpoolExample : public DbEnv
+{
+public:
+ MpoolExample();
+ int initdb(const char *home, int cachesize);
+ int run(int hits, int pagesize, int npages);
+
+private:
+ static const char FileName[];
+
+ // no need for copy and assignment
+ MpoolExample(const MpoolExample &);
+ void operator = (const MpoolExample &);
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+ int cachesize = 20 * 1024;
+ int hits = 1000;
+ int npages = 50;
+ int pagesize = 1024;
+
+ for (int i = 1; i < argc; ++i) {
+ if (strcmp(argv[i], "-c") == 0) {
+ if ((cachesize = atoi(argv[++i])) < 20 * 1024)
+ usage();
+ }
+ else if (strcmp(argv[i], "-h") == 0) {
+ if ((hits = atoi(argv[++i])) <= 0)
+ usage();
+ }
+ else if (strcmp(argv[i], "-n") == 0) {
+ if ((npages = atoi(argv[++i])) <= 0)
+ usage();
+ }
+ else if (strcmp(argv[i], "-p") == 0) {
+ if ((pagesize = atoi(argv[++i])) <= 0)
+ usage();
+ }
+ else {
+ usage();
+ }
+ }
+
+ // Initialize the file.
+ if ((ret = init(MPOOL, pagesize, npages)) != 0)
+ return (ret);
+
+ try {
+ MpoolExample app;
+
+ cout << progname
+ << ": cachesize: " << cachesize
+ << "; pagesize: " << pagesize
+ << "; N pages: " << npages << "\n";
+
+ if ((ret = app.initdb(NULL, cachesize)) != 0)
+ return (ret);
+ if ((ret = app.run(hits, pagesize, npages)) != 0)
+ return (ret);
+ cout << "MpoolExample: completed\n";
+ return (EXIT_SUCCESS);
+ }
+ catch (DbException &dbe) {
+ cerr << "MpoolExample: " << dbe.what() << "\n";
+ return (EXIT_FAILURE);
+ }
+}
+
+//
+// init --
+// Create a backing file.
+//
+int
+init(const char *file, int pagesize, int npages)
+{
+ // Create a file with the right number of pages, and store a page
+ // number on each page.
+ ofstream of(file, ios::out | ios::binary);
+
+ if (of.fail()) {
+ cerr << "MpoolExample: " << file << ": open failed\n";
+ return (EXIT_FAILURE);
+ }
+ char *p = new char[pagesize];
+ memset(p, 0, pagesize);
+
+ // The pages are numbered from 0.
+ for (int cnt = 0; cnt <= npages; ++cnt) {
+ *(db_pgno_t *)p = cnt;
+ of.write(p, pagesize);
+ if (of.fail()) {
+ cerr << "MpoolExample: " << file << ": write failed\n";
+ return (EXIT_FAILURE);
+ }
+ }
+ delete [] p;
+ return (EXIT_SUCCESS);
+}
+
+static int
+usage()
+{
+ cerr << "usage: MpoolExample [-c cachesize] "
+ << "[-h hits] [-n npages] [-p pagesize]\n";
+ return (EXIT_FAILURE);
+}
+
+// Note: by using DB_CXX_NO_EXCEPTIONS, we get explicit error returns
+// from various methods rather than exceptions so we can report more
+// information with each error.
+//
+MpoolExample::MpoolExample()
+: DbEnv(DB_CXX_NO_EXCEPTIONS)
+{
+}
+
+int MpoolExample::initdb(const char *home, int cachesize)
+{
+ set_error_stream(&cerr);
+ set_errpfx("MpoolExample");
+ set_cachesize(0, cachesize, 0);
+
+ open(home, DB_CREATE | DB_INIT_MPOOL, 0);
+ return (EXIT_SUCCESS);
+}
+
+//
+// run --
+// Get a set of pages.
+//
+int
+MpoolExample::run(int hits, int pagesize, int npages)
+{
+ db_pgno_t pageno;
+ int cnt, ret;
+ void *p;
+
+ // Open the file in the environment.
+ DbMpoolFile *mfp;
+
+ if ((ret = memp_fcreate(&mfp, 0)) != 0) {
+ cerr << "MpoolExample: memp_fcreate failed: "
+ << strerror(ret) << "\n";
+ return (EXIT_FAILURE);
+ }
+ mfp->open(MPOOL, 0, 0, pagesize);
+
+ cout << "retrieve " << hits << " random pages... ";
+
+ srand((unsigned int)time(NULL));
+ for (cnt = 0; cnt < hits; ++cnt) {
+ pageno = (rand() % npages) + 1;
+ if ((ret = mfp->get(&pageno, 0, &p)) != 0) {
+ cerr << "MpoolExample: unable to retrieve page "
+ << (unsigned long)pageno << ": "
+ << strerror(ret) << "\n";
+ return (EXIT_FAILURE);
+ }
+ if (*(db_pgno_t *)p != pageno) {
+ cerr << "MpoolExample: wrong page retrieved ("
+ << (unsigned long)pageno << " != "
+ << *(int *)p << ")\n";
+ return (EXIT_FAILURE);
+ }
+ if ((ret = mfp->put(p, 0)) != 0) {
+ cerr << "MpoolExample: unable to return page "
+ << (unsigned long)pageno << ": "
+ << strerror(ret) << "\n";
+ return (EXIT_FAILURE);
+ }
+ }
+
+ cout << "successful.\n";
+
+ // Close the pool.
+ if ((ret = close(0)) != 0) {
+ cerr << "MpoolExample: " << strerror(ret) << "\n";
+ return (EXIT_FAILURE);
+ }
+ return (EXIT_SUCCESS);
+}
diff --git a/libdb/examples_cxx/TpcbExample.cpp b/libdb/examples_cxx/TpcbExample.cpp
new file mode 100644
index 0000000..512c775
--- /dev/null
+++ b/libdb/examples_cxx/TpcbExample.cpp
@@ -0,0 +1,657 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include <iostream>
+#include <iomanip>
+#include <db_cxx.h>
+
+using std::cout;
+using std::cerr;
+
+typedef enum { ACCOUNT, BRANCH, TELLER } FTYPE;
+
+static int invarg(int, char *);
+u_int32_t random_id(FTYPE, u_int32_t, u_int32_t, u_int32_t);
+u_int32_t random_int(u_int32_t, u_int32_t);
+static int usage(void);
+
+int verbose;
+const char *progname = "TpcbExample"; // Program name.
+
+class TpcbExample : public DbEnv
+{
+public:
+ void populate(int, int, int, int);
+ void run(int, int, int, int);
+ int txn(Db *, Db *, Db *, Db *,
+ int, int, int);
+ void populateHistory(Db *, int, u_int32_t, u_int32_t, u_int32_t);
+ void populateTable(Db *, u_int32_t, u_int32_t, int, const char *);
+
+ // Note: the constructor creates a DbEnv(), which is
+ // not fully initialized until the DbEnv::open() method
+ // is called.
+ //
+ TpcbExample(const char *home, int cachesize,
+ int initializing, int flags);
+
+private:
+ static const char FileName[];
+
+ // no need for copy and assignment
+ TpcbExample(const TpcbExample &);
+ void operator = (const TpcbExample &);
+};
+
+//
+// This program implements a basic TPC/B driver program. To create the
+// TPC/B database, run with the -i (init) flag. The number of records
+// with which to populate the account, history, branch, and teller tables
+// is specified by the a, s, b, and t flags respectively. To run a TPC/B
+// test, use the n flag to indicate a number of transactions to run (note
+// that you can run many of these processes in parallel to simulate a
+// multiuser test run).
+//
+#define TELLERS_PER_BRANCH 100
+#define ACCOUNTS_PER_TELLER 1000
+#define HISTORY_PER_BRANCH 2592000
+
+/*
+ * The default configuration that adheres to TPCB scaling rules requires
+ * nearly 3 GB of space. To avoid requiring that much space for testing,
+ * we set the parameters much lower. If you want to run a valid 10 TPS
+ * configuration, define VALID_SCALING.
+ */
+#ifdef VALID_SCALING
+#define ACCOUNTS 1000000
+#define BRANCHES 10
+#define TELLERS 100
+#define HISTORY 25920000
+#endif
+
+#ifdef TINY
+#define ACCOUNTS 1000
+#define BRANCHES 10
+#define TELLERS 100
+#define HISTORY 10000
+#endif
+
+#if !defined(VALID_SCALING) && !defined(TINY)
+#define ACCOUNTS 100000
+#define BRANCHES 10
+#define TELLERS 100
+#define HISTORY 259200
+#endif
+
+#define HISTORY_LEN 100
+#define RECLEN 100
+#define BEGID 1000000
+
+struct Defrec {
+ u_int32_t id;
+ u_int32_t balance;
+ u_int8_t pad[RECLEN - sizeof(u_int32_t) - sizeof(u_int32_t)];
+};
+
+struct Histrec {
+ u_int32_t aid;
+ u_int32_t bid;
+ u_int32_t tid;
+ u_int32_t amount;
+ u_int8_t pad[RECLEN - 4 * sizeof(u_int32_t)];
+};
+
+int
+main(int argc, char *argv[])
+{
+ unsigned long seed;
+ int accounts, branches, tellers, history;
+ int iflag, mpool, ntxns, txn_no_sync;
+ const char *home;
+ char *endarg;
+
+ home = "TESTDIR";
+ accounts = branches = history = tellers = 0;
+ txn_no_sync = 0;
+ mpool = ntxns = 0;
+ verbose = 0;
+ iflag = 0;
+ seed = (unsigned long)time(NULL);
+
+ for (int i = 1; i < argc; ++i) {
+
+ if (strcmp(argv[i], "-a") == 0) {
+ // Number of account records
+ if ((accounts = atoi(argv[++i])) <= 0)
+ return (invarg('a', argv[i]));
+ }
+ else if (strcmp(argv[i], "-b") == 0) {
+ // Number of branch records
+ if ((branches = atoi(argv[++i])) <= 0)
+ return (invarg('b', argv[i]));
+ }
+ else if (strcmp(argv[i], "-c") == 0) {
+ // Cachesize in bytes
+ if ((mpool = atoi(argv[++i])) <= 0)
+ return (invarg('c', argv[i]));
+ }
+ else if (strcmp(argv[i], "-f") == 0) {
+ // Fast mode: no txn sync.
+ txn_no_sync = 1;
+ }
+ else if (strcmp(argv[i], "-h") == 0) {
+ // DB home.
+ home = argv[++i];
+ }
+ else if (strcmp(argv[i], "-i") == 0) {
+ // Initialize the test.
+ iflag = 1;
+ }
+ else if (strcmp(argv[i], "-n") == 0) {
+ // Number of transactions
+ if ((ntxns = atoi(argv[++i])) <= 0)
+ return (invarg('n', argv[i]));
+ }
+ else if (strcmp(argv[i], "-S") == 0) {
+ // Random number seed.
+ seed = strtoul(argv[++i], &endarg, 0);
+ if (*endarg != '\0')
+ return (invarg('S', argv[i]));
+ }
+ else if (strcmp(argv[i], "-s") == 0) {
+ // Number of history records
+ if ((history = atoi(argv[++i])) <= 0)
+ return (invarg('s', argv[i]));
+ }
+ else if (strcmp(argv[i], "-t") == 0) {
+ // Number of teller records
+ if ((tellers = atoi(argv[++i])) <= 0)
+ return (invarg('t', argv[i]));
+ }
+ else if (strcmp(argv[i], "-v") == 0) {
+ // Verbose option.
+ verbose = 1;
+ }
+ else {
+ return (usage());
+ }
+ }
+
+ srand((unsigned int)seed);
+
+ accounts = accounts == 0 ? ACCOUNTS : accounts;
+ branches = branches == 0 ? BRANCHES : branches;
+ tellers = tellers == 0 ? TELLERS : tellers;
+ history = history == 0 ? HISTORY : history;
+
+ if (verbose)
+ cout << (long)accounts << " Accounts, "
+ << (long)branches << " Branches, "
+ << (long)tellers << " Tellers, "
+ << (long)history << " History\n";
+
+ try {
+ // Initialize the database environment.
+ // Must be done in within a try block, unless you
+ // change the error model in the environment options.
+ //
+ TpcbExample app(home, mpool, iflag,
+ txn_no_sync ? DB_TXN_NOSYNC : 0);
+
+ if (iflag) {
+ if (ntxns != 0)
+ return (usage());
+ app.populate(accounts, branches, history, tellers);
+ }
+ else {
+ if (ntxns == 0)
+ return (usage());
+ app.run(ntxns, accounts, branches, tellers);
+ }
+
+ app.close(0);
+ return (EXIT_SUCCESS);
+ }
+ catch (DbException &dbe) {
+ cerr << "TpcbExample: " << dbe.what() << "\n";
+ return (EXIT_FAILURE);
+ }
+}
+
+static int
+invarg(int arg, char *str)
+{
+ cerr << "TpcbExample: invalid argument for -"
+ << (char)arg << ": " << str << "\n";
+ return (EXIT_FAILURE);
+}
+
+static int
+usage()
+{
+ cerr << "usage: TpcbExample [-fiv] [-a accounts] [-b branches]\n"
+ << " [-c cachesize] [-h home] [-n transactions ]\n"
+ << " [-S seed] [-s history] [-t tellers]\n";
+ return (EXIT_FAILURE);
+}
+
+TpcbExample::TpcbExample(const char *home, int cachesize,
+ int initializing, int flags)
+: DbEnv(0)
+{
+ u_int32_t local_flags;
+
+ set_error_stream(&cerr);
+ set_errpfx("TpcbExample");
+ (void)set_cachesize(0, cachesize == 0 ?
+ 4 * 1024 * 1024 : (u_int32_t)cachesize, 0);
+
+ if (flags & (DB_TXN_NOSYNC))
+ set_flags(DB_TXN_NOSYNC, 1);
+ flags &= ~(DB_TXN_NOSYNC);
+
+ local_flags = flags | DB_CREATE | DB_INIT_MPOOL;
+ if (!initializing)
+ local_flags |= DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG;
+ open(home, local_flags, 0);
+}
+
+//
+// Initialize the database to the specified number of accounts, branches,
+// history records, and tellers.
+//
+void
+TpcbExample::populate(int accounts, int branches, int history, int tellers)
+{
+ Db *dbp;
+
+ int err;
+ u_int32_t balance, idnum;
+ u_int32_t end_anum, end_bnum, end_tnum;
+ u_int32_t start_anum, start_bnum, start_tnum;
+
+ idnum = BEGID;
+ balance = 500000;
+
+ dbp = new Db(this, 0);
+ dbp->set_h_nelem((unsigned int)accounts);
+
+ if ((err = dbp->open(NULL, "account", NULL, DB_HASH,
+ DB_CREATE | DB_TRUNCATE, 0644)) != 0) {
+ DbException except("Account file create failed", err);
+ throw except;
+ }
+
+ start_anum = idnum;
+ populateTable(dbp, idnum, balance, accounts, "account");
+ idnum += accounts;
+ end_anum = idnum - 1;
+ if ((err = dbp->close(0)) != 0) {
+ DbException except("Account file close failed", err);
+ throw except;
+ }
+ delete dbp;
+ if (verbose)
+ cout << "Populated accounts: "
+ << (long)start_anum << " - " << (long)end_anum << "\n";
+
+ dbp = new Db(this, 0);
+ //
+ // Since the number of branches is very small, we want to use very
+ // small pages and only 1 key per page. This is the poor-man's way
+ // of getting key locking instead of page locking.
+ //
+ dbp->set_h_ffactor(1);
+ dbp->set_h_nelem((unsigned int)branches);
+ dbp->set_pagesize(512);
+
+ if ((err = dbp->open(NULL, "branch", NULL, DB_HASH,
+ DB_CREATE | DB_TRUNCATE, 0644)) != 0) {
+ DbException except("Branch file create failed", err);
+ throw except;
+ }
+ start_bnum = idnum;
+ populateTable(dbp, idnum, balance, branches, "branch");
+ idnum += branches;
+ end_bnum = idnum - 1;
+ if ((err = dbp->close(0)) != 0) {
+ DbException except("Close of branch file failed", err);
+ throw except;
+ }
+ delete dbp;
+
+ if (verbose)
+ cout << "Populated branches: "
+ << (long)start_bnum << " - " << (long)end_bnum << "\n";
+
+ dbp = new Db(this, 0);
+ //
+ // In the case of tellers, we also want small pages, but we'll let
+ // the fill factor dynamically adjust itself.
+ //
+ dbp->set_h_ffactor(0);
+ dbp->set_h_nelem((unsigned int)tellers);
+ dbp->set_pagesize(512);
+
+ if ((err = dbp->open(NULL, "teller", NULL, DB_HASH,
+ DB_CREATE | DB_TRUNCATE, 0644)) != 0) {
+ DbException except("Teller file create failed", err);
+ throw except;
+ }
+
+ start_tnum = idnum;
+ populateTable(dbp, idnum, balance, tellers, "teller");
+ idnum += tellers;
+ end_tnum = idnum - 1;
+ if ((err = dbp->close(0)) != 0) {
+ DbException except("Close of teller file failed", err);
+ throw except;
+ }
+ delete dbp;
+ if (verbose)
+ cout << "Populated tellers: "
+ << (long)start_tnum << " - " << (long)end_tnum << "\n";
+
+ dbp = new Db(this, 0);
+ dbp->set_re_len(HISTORY_LEN);
+ if ((err = dbp->open(NULL, "history", NULL, DB_RECNO,
+ DB_CREATE | DB_TRUNCATE, 0644)) != 0) {
+ DbException except("Create of history file failed", err);
+ throw except;
+ }
+
+ populateHistory(dbp, history, accounts, branches, tellers);
+ if ((err = dbp->close(0)) != 0) {
+ DbException except("Close of history file failed", err);
+ throw except;
+ }
+ delete dbp;
+}
+
+void
+TpcbExample::populateTable(Db *dbp,
+ u_int32_t start_id, u_int32_t balance,
+ int nrecs, const char *msg)
+{
+ Defrec drec;
+ memset(&drec.pad[0], 1, sizeof(drec.pad));
+
+ Dbt kdbt(&drec.id, sizeof(u_int32_t));
+ Dbt ddbt(&drec, sizeof(drec));
+
+ for (int i = 0; i < nrecs; i++) {
+ drec.id = start_id + (u_int32_t)i;
+ drec.balance = balance;
+ int err;
+ if ((err =
+ dbp->put(NULL, &kdbt, &ddbt, DB_NOOVERWRITE)) != 0) {
+ cerr << "Failure initializing " << msg << " file: "
+ << strerror(err) << "\n";
+ DbException except("failure initializing file", err);
+ throw except;
+ }
+ }
+}
+
+void
+TpcbExample::populateHistory(Db *dbp, int nrecs, u_int32_t accounts,
+ u_int32_t branches, u_int32_t tellers)
+{
+ Histrec hrec;
+ memset(&hrec.pad[0], 1, sizeof(hrec.pad));
+ hrec.amount = 10;
+ db_recno_t key;
+
+ Dbt kdbt(&key, sizeof(u_int32_t));
+ Dbt ddbt(&hrec, sizeof(hrec));
+
+ for (int i = 1; i <= nrecs; i++) {
+ hrec.aid = random_id(ACCOUNT, accounts, branches, tellers);
+ hrec.bid = random_id(BRANCH, accounts, branches, tellers);
+ hrec.tid = random_id(TELLER, accounts, branches, tellers);
+
+ int err;
+ key = (db_recno_t)i;
+ if ((err = dbp->put(NULL, &kdbt, &ddbt, DB_APPEND)) != 0) {
+ DbException except("failure initializing history file",
+ err);
+ throw except;
+ }
+ }
+}
+
+u_int32_t
+random_int(u_int32_t lo, u_int32_t hi)
+{
+ u_int32_t ret;
+ int t;
+
+ t = rand();
+ ret = (u_int32_t)(((double)t / ((double)(RAND_MAX) + 1)) *
+ (hi - lo + 1));
+ ret += lo;
+ return (ret);
+}
+
+u_int32_t
+random_id(FTYPE type, u_int32_t accounts, u_int32_t branches, u_int32_t tellers)
+{
+ u_int32_t min, max, num;
+
+ max = min = BEGID;
+ num = accounts;
+ switch(type) {
+ case TELLER:
+ min += branches;
+ num = tellers;
+ // Fallthrough
+ case BRANCH:
+ if (type == BRANCH)
+ num = branches;
+ min += accounts;
+ // Fallthrough
+ case ACCOUNT:
+ max = min + num - 1;
+ }
+ return (random_int(min, max));
+}
+
+void
+TpcbExample::run(int n, int accounts, int branches, int tellers)
+{
+ Db *adb, *bdb, *hdb, *tdb;
+ double gtps, itps;
+ int failed, ifailed, ret, txns;
+ time_t starttime, curtime, lasttime;
+
+ //
+ // Open the database files.
+ //
+
+ int err;
+ adb = new Db(this, 0);
+ if ((err = adb->open(NULL, "account", NULL, DB_UNKNOWN,
+ DB_AUTO_COMMIT, 0)) != 0) {
+ DbException except("Open of account file failed", err);
+ throw except;
+ }
+
+ bdb = new Db(this, 0);
+ if ((err = bdb->open(NULL, "branch", NULL, DB_UNKNOWN,
+ DB_AUTO_COMMIT, 0)) != 0) {
+ DbException except("Open of branch file failed", err);
+ throw except;
+ }
+
+ tdb = new Db(this, 0);
+ if ((err = tdb->open(NULL, "teller", NULL, DB_UNKNOWN,
+ DB_AUTO_COMMIT, 0)) != 0) {
+ DbException except("Open of teller file failed", err);
+ throw except;
+ }
+
+ hdb = new Db(this, 0);
+ if ((err = hdb->open(NULL, "history", NULL, DB_UNKNOWN,
+ DB_AUTO_COMMIT, 0)) != 0) {
+ DbException except("Open of history file failed", err);
+ throw except;
+ }
+
+ txns = failed = ifailed = 0;
+ starttime = time(NULL);
+ lasttime = starttime;
+ while (n-- > 0) {
+ txns++;
+ ret = txn(adb, bdb, tdb, hdb, accounts, branches, tellers);
+ if (ret != 0) {
+ failed++;
+ ifailed++;
+ }
+ if (n % 5000 == 0) {
+ curtime = time(NULL);
+ gtps = (double)(txns - failed) / (curtime - starttime);
+ itps = (double)(5000 - ifailed) / (curtime - lasttime);
+
+ // We use printf because it provides much simpler
+ // formatting than iostreams.
+ //
+ printf("%d txns %d failed ", txns, failed);
+ printf("%6.2f TPS (gross) %6.2f TPS (interval)\n",
+ gtps, itps);
+ lasttime = curtime;
+ ifailed = 0;
+ }
+ }
+
+ (void)adb->close(0);
+ (void)bdb->close(0);
+ (void)tdb->close(0);
+ (void)hdb->close(0);
+
+ cout << (long)txns << " transactions begun "
+ << (long)failed << " failed\n";
+}
+
+//
+// XXX Figure out the appropriate way to pick out IDs.
+//
+int
+TpcbExample::txn(Db *adb, Db *bdb, Db *tdb, Db *hdb,
+ int accounts, int branches, int tellers)
+{
+ Dbc *acurs = NULL;
+ Dbc *bcurs = NULL;
+ Dbc *tcurs = NULL;
+ DbTxn *t = NULL;
+
+ db_recno_t key;
+ Defrec rec;
+ Histrec hrec;
+ int account, branch, teller, ret;
+
+ Dbt d_dbt;
+ Dbt d_histdbt;
+ Dbt k_dbt;
+ Dbt k_histdbt(&key, sizeof(key));
+
+ //
+ // XXX We could move a lot of this into the driver to make this
+ // faster.
+ //
+ account = random_id(ACCOUNT, accounts, branches, tellers);
+ branch = random_id(BRANCH, accounts, branches, tellers);
+ teller = random_id(TELLER, accounts, branches, tellers);
+
+ k_dbt.set_size(sizeof(int));
+
+ d_dbt.set_flags(DB_DBT_USERMEM);
+ d_dbt.set_data(&rec);
+ d_dbt.set_ulen(sizeof(rec));
+
+ hrec.aid = account;
+ hrec.bid = branch;
+ hrec.tid = teller;
+ hrec.amount = 10;
+ // Request 0 bytes since we're just positioning.
+ d_histdbt.set_flags(DB_DBT_PARTIAL);
+
+ // START TIMING
+ if (txn_begin(NULL, &t, 0) != 0)
+ goto err;
+
+ if (adb->cursor(t, &acurs, 0) != 0 ||
+ bdb->cursor(t, &bcurs, 0) != 0 ||
+ tdb->cursor(t, &tcurs, 0) != 0)
+ goto err;
+
+ // Account record
+ k_dbt.set_data(&account);
+ if (acurs->get(&k_dbt, &d_dbt, DB_SET) != 0)
+ goto err;
+ rec.balance += 10;
+ if (acurs->put(&k_dbt, &d_dbt, DB_CURRENT) != 0)
+ goto err;
+
+ // Branch record
+ k_dbt.set_data(&branch);
+ if (bcurs->get(&k_dbt, &d_dbt, DB_SET) != 0)
+ goto err;
+ rec.balance += 10;
+ if (bcurs->put(&k_dbt, &d_dbt, DB_CURRENT) != 0)
+ goto err;
+
+ // Teller record
+ k_dbt.set_data(&teller);
+ if (tcurs->get(&k_dbt, &d_dbt, DB_SET) != 0)
+ goto err;
+ rec.balance += 10;
+ if (tcurs->put(&k_dbt, &d_dbt, DB_CURRENT) != 0)
+ goto err;
+
+ // History record
+ d_histdbt.set_flags(0);
+ d_histdbt.set_data(&hrec);
+ d_histdbt.set_ulen(sizeof(hrec));
+ if (hdb->put(t, &k_histdbt, &d_histdbt, DB_APPEND) != 0)
+ goto err;
+
+ if (acurs->close() != 0 || bcurs->close() != 0 || tcurs->close() != 0)
+ goto err;
+
+ ret = t->commit(0);
+ t = NULL;
+ if (ret != 0)
+ goto err;
+
+ // END TIMING
+ return (0);
+
+err:
+ if (acurs != NULL)
+ (void)acurs->close();
+ if (bcurs != NULL)
+ (void)bcurs->close();
+ if (tcurs != NULL)
+ (void)tcurs->close();
+ if (t != NULL)
+ (void)t->abort();
+
+ if (verbose)
+ cout << "Transaction A=" << (long)account
+ << " B=" << (long)branch
+ << " T=" << (long)teller << " failed\n";
+ return (-1);
+}
diff --git a/libdb/fileops/fileops.src b/libdb/fileops/fileops.src
new file mode 100644
index 0000000..8a68984
--- /dev/null
+++ b/libdb/fileops/fileops.src
@@ -0,0 +1,111 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+PREFIX __fop
+DBPRIVATE
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/txn.h"
+INCLUDE #include "dbinc/fop.h"
+INCLUDE
+
+/*
+ * create -- create a file system object.
+ *
+ * name: name in the file system
+ * appname: indicates if the name needs to go through __db_appname
+ * mode: file system mode
+ */
+BEGIN create 143
+DBT name DBT s
+ARG appname u_int32_t lu
+ARG mode u_int32_t o
+END
+
+/*
+ * remove -- remove a file system object.
+ *
+ * name: name in the file system
+ * appname: indicates if the name needs to go through __db_appname
+ */
+BEGIN remove 144
+DBT name DBT s
+DBT fid DBT s
+ARG appname u_int32_t lu
+END
+
+/*
+ * write: log the writing of data into an object.
+ *
+ * name: file containing the page.
+ * appname: indicates if the name needs to go through __db_appname
+ * offset: offset in the file.
+ * page: the actual meta-data page.
+ * flag: non-0 indicates that this is a tempfile, so we needn't undo
+ * these modifications (we'll toss the file).
+ */
+BEGIN write 145
+DBT name DBT s
+ARG appname u_int32_t lu
+ARG offset u_int32_t lu
+PGDBT page DBT s
+ARG flag u_int32_t lu
+END
+
+/*
+ * rename: move a file from one name to another.
+ * The appname value indicates if this is a path name that should be used
+ * directly (i.e., no interpretation) or if it is a pathname that should
+ * be interpreted via calls to __db_appname. The fileid is the 20-byte
+ * DB fileid of the file being renamed. We need to check it on recovery
+ * so that we don't inadvertently overwrite good files.
+ */
+BEGIN rename 146
+DBT oldname DBT s
+DBT newname DBT s
+DBT fileid DBT s
+ARG appname u_int32_t lu
+END
+
+/*
+ * File removal record. This is a DB-level log record that indicates
+ * we've just completed some form of file removal. The purpose of this
+ * log record is to logically identify the particular instance of the
+ * named file so that during recovery, in deciding if we should roll-forward
+ * a remove or a rename, we can make sure that we don't roll one forward and
+ * delete or overwrite the wrong file.
+ * real_fid: The 20-byte unique file identifier of the original file being
+ * removed.
+ * tmp_fid: The unique fid of the tmp file that is removed.
+ * name: The pre- __db_appname name of the file
+ * child: The transaction that removed or renamed the file.
+ */
+ */
+BEGIN file_remove 141
+DBT real_fid DBT s
+DBT tmp_fid DBT s
+DBT name DBT s
+ARG appname u_int32_t lu
+ARG child u_int32_t lx
+END
diff --git a/libdb/fileops/fileops_auto.c b/libdb/fileops/fileops_auto.c
new file mode 100644
index 0000000..f38640b
--- /dev/null
+++ b/libdb/fileops/fileops_auto.c
@@ -0,0 +1,1371 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+#include "dbinc/fop.h"
+
+/*
+ * PUBLIC: int __fop_create_log __P((DB_ENV *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, const DBT *, u_int32_t, u_int32_t));
+ */
+int
+__fop_create_log(dbenv, txnid, ret_lsnp, flags,
+ name, appname, mode)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ const DBT *name;
+ u_int32_t appname;
+ u_int32_t mode;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB___fop_create;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t) + (name == NULL ? 0 : name->size)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ if (name == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &name->size, sizeof(name->size));
+ bp += sizeof(name->size);
+ memcpy(bp, name->data, name->size);
+ bp += name->size;
+ }
+
+ uinttmp = (u_int32_t)appname;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)mode;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__fop_create_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __fop_create_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__fop_create_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_create_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__fop_create_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __fop_create_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __fop_create_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__fop_create: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tname: ");
+ for (i = 0; i < argp->name.size; i++) {
+ ch = ((u_int8_t *)argp->name.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tappname: %lu\n", (u_long)argp->appname);
+ (void)printf("\tmode: %o\n", argp->mode);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_create_read __P((DB_ENV *, void *, __fop_create_args **));
+ */
+int
+__fop_create_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __fop_create_args **argpp;
+{
+ __fop_create_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__fop_create_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memset(&argp->name, 0, sizeof(argp->name));
+ memcpy(&argp->name.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->name.data = bp;
+ bp += argp->name.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->appname = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->mode = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_remove_log __P((DB_ENV *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, const DBT *, const DBT *, u_int32_t));
+ */
+int
+__fop_remove_log(dbenv, txnid, ret_lsnp, flags,
+ name, fid, appname)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ const DBT *name;
+ const DBT *fid;
+ u_int32_t appname;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB___fop_remove;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t) + (name == NULL ? 0 : name->size)
+ + sizeof(u_int32_t) + (fid == NULL ? 0 : fid->size)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ if (name == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &name->size, sizeof(name->size));
+ bp += sizeof(name->size);
+ memcpy(bp, name->data, name->size);
+ bp += name->size;
+ }
+
+ if (fid == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &fid->size, sizeof(fid->size));
+ bp += sizeof(fid->size);
+ memcpy(bp, fid->data, fid->size);
+ bp += fid->size;
+ }
+
+ uinttmp = (u_int32_t)appname;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__fop_remove_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __fop_remove_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__fop_remove_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_remove_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__fop_remove_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __fop_remove_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __fop_remove_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__fop_remove: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tname: ");
+ for (i = 0; i < argp->name.size; i++) {
+ ch = ((u_int8_t *)argp->name.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tfid: ");
+ for (i = 0; i < argp->fid.size; i++) {
+ ch = ((u_int8_t *)argp->fid.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tappname: %lu\n", (u_long)argp->appname);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_remove_read __P((DB_ENV *, void *, __fop_remove_args **));
+ */
+int
+__fop_remove_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __fop_remove_args **argpp;
+{
+ __fop_remove_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__fop_remove_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memset(&argp->name, 0, sizeof(argp->name));
+ memcpy(&argp->name.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->name.data = bp;
+ bp += argp->name.size;
+
+ memset(&argp->fid, 0, sizeof(argp->fid));
+ memcpy(&argp->fid.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->fid.data = bp;
+ bp += argp->fid.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->appname = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_write_log __P((DB_ENV *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, const DBT *, u_int32_t, u_int32_t, const DBT *,
+ * PUBLIC: u_int32_t));
+ */
+int
+__fop_write_log(dbenv, txnid, ret_lsnp, flags,
+ name, appname, offset, page, flag)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ const DBT *name;
+ u_int32_t appname;
+ u_int32_t offset;
+ const DBT *page;
+ u_int32_t flag;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB___fop_write;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t) + (name == NULL ? 0 : name->size)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t) + (page == NULL ? 0 : page->size)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ if (name == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &name->size, sizeof(name->size));
+ bp += sizeof(name->size);
+ memcpy(bp, name->data, name->size);
+ bp += name->size;
+ }
+
+ uinttmp = (u_int32_t)appname;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)offset;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (page == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &page->size, sizeof(page->size));
+ bp += sizeof(page->size);
+ memcpy(bp, page->data, page->size);
+ bp += page->size;
+ }
+
+ uinttmp = (u_int32_t)flag;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__fop_write_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __fop_write_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__fop_write_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_write_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__fop_write_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __fop_write_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __fop_write_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__fop_write: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tname: ");
+ for (i = 0; i < argp->name.size; i++) {
+ ch = ((u_int8_t *)argp->name.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tappname: %lu\n", (u_long)argp->appname);
+ (void)printf("\toffset: %lu\n", (u_long)argp->offset);
+ (void)printf("\tpage: ");
+ for (i = 0; i < argp->page.size; i++) {
+ ch = ((u_int8_t *)argp->page.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tflag: %lu\n", (u_long)argp->flag);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_write_read __P((DB_ENV *, void *, __fop_write_args **));
+ */
+int
+__fop_write_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __fop_write_args **argpp;
+{
+ __fop_write_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__fop_write_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memset(&argp->name, 0, sizeof(argp->name));
+ memcpy(&argp->name.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->name.data = bp;
+ bp += argp->name.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->appname = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->offset = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memset(&argp->page, 0, sizeof(argp->page));
+ memcpy(&argp->page.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->page.data = bp;
+ bp += argp->page.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->flag = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_rename_log __P((DB_ENV *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, const DBT *, const DBT *, const DBT *, u_int32_t));
+ */
+int
+__fop_rename_log(dbenv, txnid, ret_lsnp, flags,
+ oldname, newname, fileid, appname)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ const DBT *oldname;
+ const DBT *newname;
+ const DBT *fileid;
+ u_int32_t appname;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB___fop_rename;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t) + (oldname == NULL ? 0 : oldname->size)
+ + sizeof(u_int32_t) + (newname == NULL ? 0 : newname->size)
+ + sizeof(u_int32_t) + (fileid == NULL ? 0 : fileid->size)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ if (oldname == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &oldname->size, sizeof(oldname->size));
+ bp += sizeof(oldname->size);
+ memcpy(bp, oldname->data, oldname->size);
+ bp += oldname->size;
+ }
+
+ if (newname == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &newname->size, sizeof(newname->size));
+ bp += sizeof(newname->size);
+ memcpy(bp, newname->data, newname->size);
+ bp += newname->size;
+ }
+
+ if (fileid == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &fileid->size, sizeof(fileid->size));
+ bp += sizeof(fileid->size);
+ memcpy(bp, fileid->data, fileid->size);
+ bp += fileid->size;
+ }
+
+ uinttmp = (u_int32_t)appname;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__fop_rename_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __fop_rename_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__fop_rename_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_rename_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__fop_rename_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __fop_rename_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __fop_rename_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__fop_rename: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\toldname: ");
+ for (i = 0; i < argp->oldname.size; i++) {
+ ch = ((u_int8_t *)argp->oldname.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tnewname: ");
+ for (i = 0; i < argp->newname.size; i++) {
+ ch = ((u_int8_t *)argp->newname.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tfileid: ");
+ for (i = 0; i < argp->fileid.size; i++) {
+ ch = ((u_int8_t *)argp->fileid.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tappname: %lu\n", (u_long)argp->appname);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_rename_read __P((DB_ENV *, void *, __fop_rename_args **));
+ */
+int
+__fop_rename_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __fop_rename_args **argpp;
+{
+ __fop_rename_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__fop_rename_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memset(&argp->oldname, 0, sizeof(argp->oldname));
+ memcpy(&argp->oldname.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->oldname.data = bp;
+ bp += argp->oldname.size;
+
+ memset(&argp->newname, 0, sizeof(argp->newname));
+ memcpy(&argp->newname.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->newname.data = bp;
+ bp += argp->newname.size;
+
+ memset(&argp->fileid, 0, sizeof(argp->fileid));
+ memcpy(&argp->fileid.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->fileid.data = bp;
+ bp += argp->fileid.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->appname = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_file_remove_log __P((DB_ENV *, DB_TXN *,
+ * PUBLIC: DB_LSN *, u_int32_t, const DBT *, const DBT *, const DBT *,
+ * PUBLIC: u_int32_t, u_int32_t));
+ */
+int
+__fop_file_remove_log(dbenv, txnid, ret_lsnp, flags,
+ real_fid, tmp_fid, name, appname, child)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ const DBT *real_fid;
+ const DBT *tmp_fid;
+ const DBT *name;
+ u_int32_t appname;
+ u_int32_t child;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB___fop_file_remove;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t) + (real_fid == NULL ? 0 : real_fid->size)
+ + sizeof(u_int32_t) + (tmp_fid == NULL ? 0 : tmp_fid->size)
+ + sizeof(u_int32_t) + (name == NULL ? 0 : name->size)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ if (real_fid == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &real_fid->size, sizeof(real_fid->size));
+ bp += sizeof(real_fid->size);
+ memcpy(bp, real_fid->data, real_fid->size);
+ bp += real_fid->size;
+ }
+
+ if (tmp_fid == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &tmp_fid->size, sizeof(tmp_fid->size));
+ bp += sizeof(tmp_fid->size);
+ memcpy(bp, tmp_fid->data, tmp_fid->size);
+ bp += tmp_fid->size;
+ }
+
+ if (name == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &name->size, sizeof(name->size));
+ bp += sizeof(name->size);
+ memcpy(bp, name->data, name->size);
+ bp += name->size;
+ }
+
+ uinttmp = (u_int32_t)appname;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)child;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__fop_file_remove_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __fop_file_remove_getpgnos __P((DB_ENV *, DBT *,
+ * PUBLIC: DB_LSN *, db_recops, void *));
+ */
+int
+__fop_file_remove_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_file_remove_print __P((DB_ENV *, DBT *,
+ * PUBLIC: DB_LSN *, db_recops, void *));
+ */
+int
+__fop_file_remove_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __fop_file_remove_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __fop_file_remove_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__fop_file_remove: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\treal_fid: ");
+ for (i = 0; i < argp->real_fid.size; i++) {
+ ch = ((u_int8_t *)argp->real_fid.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\ttmp_fid: ");
+ for (i = 0; i < argp->tmp_fid.size; i++) {
+ ch = ((u_int8_t *)argp->tmp_fid.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tname: ");
+ for (i = 0; i < argp->name.size; i++) {
+ ch = ((u_int8_t *)argp->name.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tappname: %lu\n", (u_long)argp->appname);
+ (void)printf("\tchild: 0x%lx\n", (u_long)argp->child);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_file_remove_read __P((DB_ENV *, void *,
+ * PUBLIC: __fop_file_remove_args **));
+ */
+int
+__fop_file_remove_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __fop_file_remove_args **argpp;
+{
+ __fop_file_remove_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__fop_file_remove_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memset(&argp->real_fid, 0, sizeof(argp->real_fid));
+ memcpy(&argp->real_fid.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->real_fid.data = bp;
+ bp += argp->real_fid.size;
+
+ memset(&argp->tmp_fid, 0, sizeof(argp->tmp_fid));
+ memcpy(&argp->tmp_fid.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->tmp_fid.data = bp;
+ bp += argp->tmp_fid.size;
+
+ memset(&argp->name, 0, sizeof(argp->name));
+ memcpy(&argp->name.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->name.data = bp;
+ bp += argp->name.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->appname = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->child = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_init_print __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__fop_init_print(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_create_print, DB___fop_create)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_remove_print, DB___fop_remove)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_write_print, DB___fop_write)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_rename_print, DB___fop_rename)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_file_remove_print, DB___fop_file_remove)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__fop_init_getpgnos(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_create_getpgnos, DB___fop_create)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_remove_getpgnos, DB___fop_remove)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_write_getpgnos, DB___fop_write)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_rename_getpgnos, DB___fop_rename)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_file_remove_getpgnos, DB___fop_file_remove)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_init_recover __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__fop_init_recover(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_create_recover, DB___fop_create)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_remove_recover, DB___fop_remove)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_write_recover, DB___fop_write)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_rename_recover, DB___fop_rename)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_file_remove_recover, DB___fop_file_remove)) != 0)
+ return (ret);
+ return (0);
+}
diff --git a/libdb/fileops/fop_basic.c b/libdb/fileops/fop_basic.c
new file mode 100644
index 0000000..c039518
--- /dev/null
+++ b/libdb/fileops/fop_basic.c
@@ -0,0 +1,275 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <string.h>
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/log.h"
+#include "dbinc/db_page.h"
+#include "dbinc/fop.h"
+#include "dbinc/txn.h"
+#include "dbinc/db_am.h"
+
+/*
+ * This file implements the basic file-level operations. This code
+ * ought to be fairly independent of DB, other than through its
+ * error-reporting mechanism.
+ */
+
+/*
+ * __fop_create --
+ * Create a (transactionally protected) file system object. This is used
+ * to create DB files now, potentially blobs, queue extents and anything
+ * else you wish to store in a file system object.
+ *
+ * PUBLIC: int __fop_create __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, DB_FH *, const char *, APPNAME, int));
+ */
+int
+__fop_create(dbenv, txn, fhp, name, appname, mode)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB_FH *fhp;
+ const char *name;
+ APPNAME appname;
+ int mode;
+{
+ DB_FH fh;
+ DB_LSN lsn;
+ DBT data;
+ char *real_name;
+ int do_close, ret;
+
+ ret = 0;
+ real_name = NULL;
+
+ if (fhp != NULL)
+ do_close = 0;
+ else {
+ fhp = &fh;
+ memset(fhp, 0, sizeof(fh));
+ do_close = 1;
+ }
+
+ if (mode == 0)
+ mode = __db_omode("rw----");
+
+ if ((ret =
+ __db_appname(dbenv, appname, name, 0, NULL, &real_name)) != 0)
+ goto err;
+
+ if (DBENV_LOGGING(dbenv)) {
+ memset(&data, 0, sizeof(data));
+ data.data = (void *)name;
+ data.size = (u_int32_t)strlen(name) + 1;
+ if ((ret = __fop_create_log(dbenv,
+ txn, &lsn, DB_FLUSH, &data, (u_int32_t)appname, mode)) != 0)
+ goto err;
+ }
+
+ DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_POSTLOG, ret, name);
+
+ ret =
+ __os_open(dbenv, real_name, DB_OSO_CREATE | DB_OSO_EXCL, mode, fhp);
+
+err:
+DB_TEST_RECOVERY_LABEL
+ if (do_close && F_ISSET(fhp, DB_FH_VALID))
+ __os_closehandle(dbenv, fhp);
+ if (real_name != NULL)
+ __os_free(dbenv, real_name);
+ return (ret);
+}
+
+/*
+ * __fop_remove --
+ * Remove a file system object.
+ *
+ * PUBLIC: int __fop_remove __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, u_int8_t *, const char *, APPNAME));
+ */
+int
+__fop_remove(dbenv, txn, fileid, name, appname)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ u_int8_t *fileid;
+ const char *name;
+ APPNAME appname;
+{
+ DB_LSN lsn;
+ DBT fdbt, ndbt;
+ char *real_name;
+ int ret;
+
+ real_name = NULL;
+
+ if ((ret =
+ __db_appname(dbenv, appname, name, 0, NULL, &real_name)) != 0)
+ goto err;
+
+ if (txn == NULL) {
+ if (fileid != NULL && (ret = dbenv->memp_nameop(
+ dbenv, fileid, NULL, real_name, NULL)) != 0)
+ goto err;
+ } else {
+ if (DBENV_LOGGING(dbenv)) {
+ memset(&fdbt, 0, sizeof(ndbt));
+ fdbt.data = fileid;
+ fdbt.size = fileid == NULL ? 0 : DB_FILE_ID_LEN;
+ memset(&ndbt, 0, sizeof(ndbt));
+ ndbt.data = (void *)name;
+ ndbt.size = (u_int32_t)strlen(name) + 1;
+ if ((ret = __fop_remove_log(dbenv,
+ txn, &lsn, 0, &ndbt, &fdbt, appname)) != 0)
+ goto err;
+ }
+ ret = __txn_remevent(dbenv, txn, real_name, fileid);
+ }
+
+err: if (real_name != NULL)
+ __os_free(dbenv, real_name);
+ return (ret);
+}
+
+/*
+ * __fop_write
+ *
+ * Write "size" bytes from "buf" to file "name" beginning at offset "off."
+ * If the file is open, supply a handle in fhp. Istmp indicate if this is
+ * an operation that needs to be undone in the face of failure (i.e., if
+ * this is a write to a temporary file, we're simply going to remove the
+ * file, so don't worry about undoing the write).
+ *
+ * Currently, we *only* use this with istmp true. If we need more general
+ * handling, then we'll have to zero out regions on abort (and possibly
+ * log the before image of the data in the log record).
+ *
+ * PUBLIC: int __fop_write __P((DB_ENV *, DB_TXN *, const char *, APPNAME,
+ * PUBLIC: DB_FH *, u_int32_t, u_int8_t *, u_int32_t, u_int32_t));
+ */
+int
+__fop_write(dbenv, txn, name, appname, fhp, off, buf, size, istmp)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ const char *name;
+ APPNAME appname;
+ DB_FH *fhp;
+ u_int32_t off;
+ u_int8_t *buf;
+ u_int32_t size, istmp;
+{
+ DB_FH fh;
+ DB_LSN lsn;
+ DBT data, namedbt;
+ char *real_name;
+ int ret, t_ret, we_opened;
+ size_t nbytes;
+
+ ret = 0;
+ we_opened = 0;
+ real_name = NULL;
+
+ if ((ret =
+ __db_appname(dbenv, appname, name, 0, NULL, &real_name)) != 0)
+ goto err;
+
+ if (DBENV_LOGGING(dbenv)) {
+ memset(&data, 0, sizeof(data));
+ data.data = buf;
+ data.size = size;
+ memset(&namedbt, 0, sizeof(namedbt));
+ namedbt.data = (void *)name;
+ namedbt.size = (u_int32_t)strlen(name) + 1;
+ if ((ret = __fop_write_log(dbenv,
+ txn, &lsn, 0, &namedbt, appname, off, &data, istmp)) != 0)
+ goto err;
+ }
+
+ if (fhp == NULL) {
+ /* File isn't open; we need to reopen it. */
+ if ((ret = __os_open(dbenv, real_name, 0, 0, &fh)) != 0)
+ goto err;
+ fhp = &fh;
+ we_opened = 1;
+ } else
+ we_opened = 0;
+
+ /* Seek to offset. */
+ if ((ret = __os_seek(dbenv, fhp, 0, 0, off, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+
+ /* Now do the write. */
+ if ((ret = __os_write(dbenv, fhp, buf, size, &nbytes)) != 0)
+ goto err;
+
+err: if (we_opened)
+ if ((t_ret = __os_closehandle(dbenv, fhp)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (real_name != NULL)
+ __os_free(dbenv, real_name);
+ return (ret);
+}
+
+/*
+ * __fop_rename --
+ * Change a file's name.
+ *
+ * PUBLIC: int __fop_rename __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, const char *, const char *, u_int8_t *, APPNAME));
+ */
+int
+__fop_rename(dbenv, txn, oldname, newname, fid, appname)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ const char *oldname;
+ const char *newname;
+ u_int8_t *fid;
+ APPNAME appname;
+{
+ DB_LSN lsn;
+ DBT fiddbt, new, old;
+ int ret;
+ char *n, *o;
+
+ if ((ret = __db_appname(dbenv, appname, oldname, 0, NULL, &o)) != 0)
+ goto err;
+ if ((ret = __db_appname(dbenv, appname, newname, 0, NULL, &n)) != 0)
+ goto err;
+
+ if (DBENV_LOGGING(dbenv)) {
+ memset(&old, 0, sizeof(old));
+ memset(&new, 0, sizeof(new));
+ memset(&fiddbt, 0, sizeof(fiddbt));
+ old.data = (void *)oldname;
+ old.size = (u_int32_t)strlen(oldname) + 1;
+ new.data = (void *)newname;
+ new.size = (u_int32_t)strlen(newname) + 1;
+ fiddbt.data = fid;
+ fiddbt.size = DB_FILE_ID_LEN;
+ if ((ret = __fop_rename_log(dbenv, txn, &lsn,
+ DB_FLUSH, &old, &new, &fiddbt, (u_int32_t)appname)) != 0)
+ goto err;
+ }
+
+ ret = dbenv->memp_nameop(dbenv, fid, newname, o, n);
+
+err: if (o != oldname)
+ __os_free(dbenv, o);
+ if (n != newname)
+ __os_free(dbenv, n);
+ return (ret);
+}
diff --git a/libdb/fileops/fop_rec.c b/libdb/fileops/fop_rec.c
new file mode 100644
index 0000000..1b08fb6
--- /dev/null
+++ b/libdb/fileops/fop_rec.c
@@ -0,0 +1,317 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/fop.h"
+#include "dbinc/db_am.h"
+#include "dbinc/txn.h"
+
+/*
+ * __fop_create_recover --
+ * Recovery function for create.
+ *
+ * PUBLIC: int __fop_create_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_create_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ DB_FH fh;
+ __fop_create_args *argp;
+ char *real_name;
+ int ret;
+
+ real_name = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__fop_create_print);
+ REC_NOOP_INTRO(__fop_create_read);
+
+ if ((ret = __db_appname(dbenv, (APPNAME)argp->appname,
+ (const char *)argp->name.data, 0, NULL, &real_name)) != 0)
+ goto out;
+
+ if (DB_UNDO(op))
+ (void)__os_unlink(dbenv, real_name);
+ else if (DB_REDO(op))
+ if ((ret = __os_open(dbenv, real_name,
+ DB_OSO_CREATE | DB_OSO_EXCL, argp->mode, &fh)) == 0)
+ __os_closehandle(dbenv, &fh);
+
+ *lsnp = argp->prev_lsn;
+
+out: if (real_name != NULL)
+ __os_free(dbenv, real_name);
+
+ REC_NOOP_CLOSE;
+}
+
+/*
+ * __fop_remove_recover --
+ * Recovery function for remove.
+ *
+ * PUBLIC: int __fop_remove_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_remove_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_remove_args *argp;
+ char *real_name;
+ int ret;
+
+ real_name = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__fop_remove_print);
+ REC_NOOP_INTRO(__fop_remove_read);
+
+ if ((ret = __db_appname(dbenv, (APPNAME)argp->appname,
+ (const char *)argp->name.data, 0, NULL, &real_name)) != 0)
+ goto out;
+
+ if (DB_REDO(op) && (ret = dbenv->memp_nameop(dbenv,
+ (u_int8_t *)argp->fid.data, NULL, real_name, NULL)) != 0)
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+out: if (real_name != NULL)
+ __os_free(dbenv, real_name);
+ REC_NOOP_CLOSE;
+}
+
+/*
+ * __fop_write_recover --
+ * Recovery function for writechunk.
+ *
+ * PUBLIC: int __fop_write_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_write_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_write_args *argp;
+ int ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__fop_write_print);
+ REC_NOOP_INTRO(__fop_write_read);
+
+ if (DB_UNDO(op))
+ DB_ASSERT(argp->flag != 0);
+ else if (DB_REDO(op))
+ ret = __fop_write(dbenv,
+ argp->txnid, argp->name.data, argp->appname, NULL,
+ argp->offset, argp->page.data, argp->page.size, argp->flag);
+
+ *lsnp = argp->prev_lsn;
+ REC_NOOP_CLOSE;
+}
+
+/*
+ * __fop_rename_recover --
+ * Recovery function for rename.
+ *
+ * PUBLIC: int __fop_rename_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_rename_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_rename_args *argp;
+ DBMETA *meta;
+ char *real_new, *real_old, *src;
+ int ret;
+ u_int8_t *fileid, mbuf[DBMETASIZE];
+
+ real_new = NULL;
+ real_old = NULL;
+ ret = 0;
+ meta = (DBMETA *)&mbuf[0];
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__fop_rename_print);
+ REC_NOOP_INTRO(__fop_rename_read);
+ fileid = argp->fileid.data;
+
+ if ((ret = __db_appname(dbenv, (APPNAME)argp->appname,
+ (const char *)argp->newname.data, 0, NULL, &real_new)) != 0)
+ goto out;
+ if ((ret = __db_appname(dbenv, (APPNAME)argp->appname,
+ (const char *)argp->oldname.data, 0, NULL, &real_old)) != 0)
+ goto out;
+
+ /*
+ * Verify that we are manipulating the correct file. We should always
+ * be OK on an ABORT or an APPLY, but during recovery, we have to
+ * check.
+ */
+ if (op != DB_TXN_ABORT && op != DB_TXN_APPLY) {
+ src = DB_UNDO(op) ? real_new : real_old;
+ /*
+ * Interpret any error as meaning that the file either doesn't
+ * exist, doesn't have a meta-data page, or is in some other
+ * way, shape or form, incorrect, so that we should not restore
+ * it.
+ */
+ if (__fop_read_meta(
+ dbenv, src, mbuf, DBMETASIZE, NULL, 1, NULL, 0) != 0)
+ goto done;
+ if (__db_chk_meta(dbenv, NULL, meta, 1) != 0)
+ goto done;
+ if (memcmp(argp->fileid.data, meta->uid, DB_FILE_ID_LEN) != 0)
+ goto done;
+ }
+
+ if (DB_UNDO(op))
+ (void)dbenv->memp_nameop(dbenv, fileid,
+ (const char *)argp->oldname.data, real_new, real_old);
+ if (DB_REDO(op))
+ (void)dbenv->memp_nameop(dbenv, fileid,
+ (const char *)argp->newname.data, real_old, real_new);
+
+done: *lsnp = argp->prev_lsn;
+out: if (real_new != NULL)
+ __os_free(dbenv, real_new);
+ if (real_old != NULL)
+ __os_free(dbenv, real_old);
+
+ REC_NOOP_CLOSE;
+}
+
+/*
+ * __fop_file_remove_recover --
+ * Recovery function for file_remove. On the REDO pass, we need to
+ * make sure no one recreated the file while we weren't looking. On an
+ * undo pass must check if the file we are interested in is the one that
+ * exists and then set the status of the child transaction depending on
+ * what we find out.
+ *
+ * PUBLIC: int __fop_file_remove_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_file_remove_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_file_remove_args *argp;
+ DBMETA *meta;
+ char *real_name;
+ int is_real, is_tmp, ret;
+ size_t len;
+ u_int8_t mbuf[DBMETASIZE];
+ u_int32_t cstat;
+
+ real_name = NULL;
+ is_real = is_tmp = 0;
+ meta = (DBMETA *)&mbuf[0];
+ REC_PRINT(__fop_file_remove_print);
+ REC_NOOP_INTRO(__fop_file_remove_read);
+
+ /*
+ * This record is only interesting on the backward, forward, and
+ * apply phases.
+ */
+ if (op != DB_TXN_BACKWARD_ROLL &&
+ op != DB_TXN_FORWARD_ROLL && op != DB_TXN_APPLY)
+ goto done;
+
+ if ((ret = __db_appname(dbenv,
+ (APPNAME)argp->appname, argp->name.data, 0, NULL, &real_name)) != 0)
+ goto out;
+
+ /* Verify that we are manipulating the correct file. */
+ if ((ret = __fop_read_meta(dbenv,
+ real_name, mbuf, DBMETASIZE, NULL, 1, &len, 0)) != 0) {
+ /*
+ * If len is non-zero, then the file exists and has something
+ * in it, but that something isn't a full meta-data page, so
+ * this is very bad. Bail out!
+ */
+ if (len != 0)
+ goto out;
+
+ /* File does not exist. */
+ cstat = TXN_EXPECTED;
+ } else {
+ /*
+ * We can ignore errors here since we'll simply fail the
+ * checks below and assume this is the wrong file.
+ */
+ (void)__db_chk_meta(dbenv, NULL, meta, 1);
+ is_real =
+ memcmp(argp->real_fid.data, meta->uid, DB_FILE_ID_LEN) == 0;
+ is_tmp =
+ memcmp(argp->tmp_fid.data, meta->uid, DB_FILE_ID_LEN) == 0;
+
+ if (!is_real && !is_tmp)
+ /* File exists, but isn't what we were removing. */
+ cstat = TXN_IGNORE;
+ else
+ /* File exists and is the one that we were removing. */
+ cstat = TXN_COMMIT;
+ }
+
+ if (DB_UNDO(op)) {
+ /* On the backward pass, we leave a note for the child txn. */
+ if ((ret = __db_txnlist_update(dbenv,
+ info, argp->child, cstat, NULL)) == DB_NOTFOUND)
+ ret = __db_txnlist_add(dbenv,
+ info, argp->child, cstat, NULL);
+ } else if (DB_REDO(op)) {
+ /*
+ * On the forward pass, check if someone recreated the
+ * file while we weren't looking.
+ */
+ if (cstat == TXN_COMMIT)
+ (void)dbenv->memp_nameop(dbenv,
+ is_real ? argp->real_fid.data : argp->tmp_fid.data,
+ NULL, real_name, NULL);
+ }
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (real_name != NULL)
+ __os_free(dbenv, real_name);
+ REC_NOOP_CLOSE;
+}
diff --git a/libdb/fileops/fop_util.c b/libdb/fileops/fop_util.c
new file mode 100644
index 0000000..4c651bd
--- /dev/null
+++ b/libdb/fileops/fop_util.c
@@ -0,0 +1,997 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_am.h"
+#include "dbinc/fop.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+static int __fop_set_pgsize __P((DB *, DB_FH *, const char *));
+
+/*
+ * Acquire the environment meta-data lock. The parameters are the
+ * environment (ENV), the locker id to use in acquiring the lock (ID)
+ * and a pointer to a DB_LOCK.
+ */
+#define GET_ENVLOCK(ENV, ID, L) do { \
+ DBT __dbt; \
+ u_int32_t __lockval; \
+ \
+ if (LOCKING_ON((ENV))) { \
+ __lockval = 0; \
+ __dbt.data = &__lockval; \
+ __dbt.size = sizeof(__lockval); \
+ if ((ret = (ENV)->lock_get((ENV), (ID), \
+ 0, &__dbt, DB_LOCK_WRITE, (L))) != 0) \
+ goto err; \
+ } \
+} while (0)
+
+#define REL_ENVLOCK(ENV, L) \
+ (!LOCK_ISSET(*(L)) ? 0 : (ENV)->lock_put((ENV), (L)))
+
+/*
+ * If our caller is doing fcntl(2) locking, then we can't close it
+ * because that would discard the caller's lock. Otherwise, close
+ * the handle.
+ */
+#define CLOSE_HANDLE(D, F) { \
+ if (F_ISSET((F), DB_FH_VALID)) { \
+ if (LF_ISSET(DB_FCNTL_LOCKING)) \
+ (D)->saved_open_fhp = (F); \
+ else if ((t_ret = __os_closehandle((D)->dbenv,(F))) != 0) { \
+ if (ret == 0) \
+ ret = t_ret; \
+ goto err; \
+ } \
+ } \
+}
+
+/*
+ * __fop_lock_handle --
+ *
+ * Get the handle lock for a database. If the envlock is specified,
+ * do this as a lock_vec call that releases the enviroment lock before
+ * acquiring the handle lock.
+ *
+ * PUBLIC: int __fop_lock_handle __P((DB_ENV *,
+ * PUBLIC: DB *, u_int32_t, db_lockmode_t, DB_LOCK *, u_int32_t));
+ *
+ */
+int
+__fop_lock_handle(dbenv, dbp, locker, mode, elock, flags)
+ DB_ENV *dbenv;
+ DB *dbp;
+ u_int32_t locker;
+ db_lockmode_t mode;
+ DB_LOCK *elock;
+ u_int32_t flags;
+{
+ DBT fileobj;
+ DB_LOCKREQ reqs[2], *ereq;
+ DB_LOCK_ILOCK lock_desc;
+ int ret;
+
+ if (!LOCKING_ON(dbenv) || F_ISSET(dbp, DB_AM_COMPENSATE))
+ return (0);
+
+ /*
+ * If we are in recovery, the only locking we should be
+ * doing is on the global environment.
+ */
+ if (IS_RECOVERING(dbenv)) {
+ if (elock != NULL)
+ REL_ENVLOCK(dbenv, elock);
+ return (0);
+ }
+
+ memcpy(&lock_desc.fileid, &dbp->fileid, DB_FILE_ID_LEN);
+ lock_desc.pgno = dbp->meta_pgno;
+ lock_desc.type = DB_HANDLE_LOCK;
+
+ memset(&fileobj, 0, sizeof(fileobj));
+ fileobj.data = &lock_desc;
+ fileobj.size = sizeof(lock_desc);
+ DB_TEST_SUBLOCKS(dbenv, flags);
+ if (elock == NULL)
+ ret = dbenv->lock_get(dbenv, locker,
+ flags, &fileobj, mode, &dbp->handle_lock);
+ else {
+ reqs[0].op = DB_LOCK_PUT;
+ reqs[0].lock = *elock;
+ reqs[1].op = DB_LOCK_GET;
+ reqs[1].mode = mode;
+ reqs[1].obj = &fileobj;
+ reqs[1].timeout = 0;
+ if ((ret = __lock_vec(dbenv,
+ locker, flags, reqs, 2, &ereq)) == 0) {
+ dbp->handle_lock = reqs[1].lock;
+ LOCK_INIT(*elock);
+ } else if (ereq != reqs)
+ LOCK_INIT(*elock);
+ }
+
+ dbp->cur_lid = locker;
+ return (ret);
+}
+
+/*
+ * __fop_file_setup --
+ *
+ * Perform all the needed checking and locking to open up or create a
+ * file.
+ *
+ * There's a reason we don't push this code down into the buffer cache.
+ * The problem is that there's no information external to the file that
+ * we can use as a unique ID. UNIX has dev/inode pairs, but they are
+ * not necessarily unique after reboot, if the file was mounted via NFS.
+ * Windows has similar problems, as the FAT filesystem doesn't maintain
+ * dev/inode numbers across reboot. So, we must get something from the
+ * file we can use to ensure that, even after a reboot, the file we're
+ * joining in the cache is the right file for us to join. The solution
+ * we use is to maintain a file ID that's stored in the database, and
+ * that's why we have to open and read the file before calling into the
+ * buffer cache or obtaining a lock (we use this unique fileid to lock
+ * as well as to identify like files in the cache).
+ *
+ * PUBLIC: int __fop_file_setup __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, int, u_int32_t, u_int32_t *));
+ */
+int
+__fop_file_setup(dbp, txn, name, mode, flags, retidp)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name;
+ int mode;
+ u_int32_t flags, *retidp;
+{
+ DB_ENV *dbenv;
+ DB_FH fh, *fhp;
+ DB_LOCK elock, tmp_lock;
+ DB_TXN *stxn;
+ char *real_name, *real_tmpname, *tmpname;
+ db_lockmode_t lmode;
+ int created_fhp, created_locker, ret, tmp_created, t_ret, truncating;
+ size_t len;
+ u_int32_t locker, oflags;
+ u_int8_t mbuf[DBMETASIZE];
+
+ DB_ASSERT(name != NULL);
+
+ *retidp = TXN_INVALID;
+
+ dbenv = dbp->dbenv;
+ LOCK_INIT(elock);
+ LOCK_INIT(tmp_lock);
+ stxn = NULL;
+ created_fhp = created_locker = 0;
+ real_name = real_tmpname = tmpname = NULL;
+ tmp_created = truncating = 0;
+
+ /*
+ * If we open a file handle and our caller is doing fcntl(2) locking,
+ * we can't close it because that would discard the caller's lock.
+ * Save it until we close or refresh the DB handle.
+ */
+ if (LF_ISSET(DB_FCNTL_LOCKING)) {
+ if ((ret = __os_malloc(dbenv, sizeof(*fhp), &fhp)) != 0)
+ return (ret);
+ created_fhp = 1;
+ } else
+ fhp = &fh;
+ memset(fhp, 0, sizeof(*fhp));
+
+ /*
+ * Get a lockerid for this handle. There are paths through queue
+ * rename and remove where this dbp already has a locker, so make
+ * sure we don't clobber it and conflict.
+ */
+ if (LOCKING_ON(dbenv) &&
+ !F_ISSET(dbp, DB_AM_COMPENSATE) && dbp->lid == DB_LOCK_INVALIDID) {
+ if ((ret = __lock_id(dbenv, &dbp->lid)) != 0)
+ goto err;
+ created_locker = 1;
+ }
+
+ locker = txn == NULL ? dbp->lid : txn->txnid;
+
+ /* Get the real backing file name. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
+ goto err;
+
+ /* Fill in the default file mode. */
+ if (mode == 0)
+ mode = __db_omode("rwrw--");
+
+ oflags = 0;
+ if (LF_ISSET(DB_RDONLY))
+ oflags |= DB_OSO_RDONLY;
+
+retry: if (!F_ISSET(dbp, DB_AM_COMPENSATE))
+ GET_ENVLOCK(dbenv, locker, &elock);
+ if ((ret = __os_exists(real_name, NULL)) == 0) {
+ if (LF_ISSET(DB_EXCL)) {
+ ret = EEXIST;
+ goto err;
+ }
+
+ /*
+ * This is special handling for applications that
+ * are locking outside of Berkeley DB (e.g., Sendmail,
+ * Postfix). If we are relying on external FCNTL
+ * locking and we're going to truncate the file, we
+ * cannot first open the file to verify that it is
+ * a DB file and then close/reopen to do the truncate
+ * since that will lose the external FCNTL lock.
+ * So, we special case it and leap right into the
+ * truncate code.
+ */
+ if (LF_ISSET(DB_FCNTL_LOCKING) && LF_ISSET(DB_TRUNCATE))
+ goto do_trunc;
+
+reopen: ret = __fop_read_meta(dbenv, real_name,
+ mbuf, sizeof(mbuf), fhp,
+ LF_ISSET(DB_FCNTL_LOCKING) && txn == NULL ? 1 : 0,
+ &len, oflags);
+ /*
+ * This is special handling for applications that are doing
+ * file locking outside of Berkeley DB (e.g., Sendmail,
+ * Postfix). So, if you're doing FCNTL_LOCKING and are non
+ * transactional, we're going to treat 0-length files as a
+ * special case and let you proceed.
+ */
+ if (ret != 0 &&
+ LF_ISSET(DB_FCNTL_LOCKING) && txn == NULL && len == 0) {
+ tmpname = (char *)real_name;
+ real_name = NULL;
+ goto creat2;
+ }
+
+ if (ret != 0)
+ goto err;
+
+ if ((ret = __db_meta_setup(dbenv,
+ dbp, real_name, (DBMETA *)mbuf, flags, 1)) != 0)
+ goto err;
+
+ /* Now, get our handle lock. */
+ lmode = LF_ISSET(DB_TRUNCATE) ? DB_LOCK_WRITE : DB_LOCK_READ;
+ if ((ret = __fop_lock_handle(dbenv,
+ dbp, locker, lmode, NULL, DB_LOCK_NOWAIT)) == 0) {
+ if ((ret = REL_ENVLOCK(dbenv, &elock)) != 0)
+ goto err;
+ } else {
+ /*
+ * If someone is doing FCNTL locking outside of us,
+ * then we should never have a lock conflict and
+ * should never get to here. We need to assert that
+ * because we are about to close the fd which will
+ * release the FCNTL locks.
+ */
+ DB_ASSERT(!LF_ISSET(DB_FCNTL_LOCKING));
+ if ((ret = __os_closehandle(dbenv, fhp)) != 0)
+ goto err;
+ ret = __fop_lock_handle(dbenv,
+ dbp, locker, lmode, &elock, 0);
+ if (ret == DB_LOCK_NOTEXIST)
+ goto retry;
+ if (ret != 0)
+ goto err;
+ /*
+ * XXX I need to convince myself that I don't need
+ * to re-read the metadata page here.
+ * XXX If you do need to re-read it you'd better
+ * decrypt it too...
+ */
+ if ((ret = __os_open(dbenv, real_name, 0, 0, fhp)) != 0)
+ goto err;
+ }
+
+ /*
+ * Check for a truncate which needs to leap over to the
+ * create case.
+ */
+ if (LF_ISSET(DB_TRUNCATE)) {
+ /*
+ * Sadly, we need to close and reopen the handle
+ * in order to do the actual truncate. We couldn't
+ * do the truncate on the initial open because we
+ * needed to read the old file-id in order to lock.
+ */
+ if ((ret = __os_closehandle(dbenv, fhp)) != 0)
+ goto err;
+do_trunc: if ((ret = __os_open(dbenv,
+ real_name, DB_OSO_TRUNC, 0, fhp)) != 0)
+ goto err;
+ /*
+ * This is not-transactional, so we'll do the
+ * open/create in-place.
+ */
+ tmp_lock = dbp->handle_lock;
+ truncating = 1;
+ tmpname = (char *)name;
+ goto creat2;
+ }
+
+ /*
+ * Check for a file in the midst of a rename
+ */
+ if (F_ISSET(dbp, DB_AM_IN_RENAME)) {
+ if (LF_ISSET(DB_CREATE)) {
+ F_CLR(dbp, DB_AM_IN_RENAME);
+ goto create;
+ } else {
+ ret = ENOENT;
+ goto err;
+ }
+ }
+
+ CLOSE_HANDLE(dbp, fhp);
+ goto done;
+ }
+
+ /* File does not exist. */
+ if (!LF_ISSET(DB_CREATE))
+ goto err;
+ ret = 0;
+
+ /*
+ * Need to create file; we need to set up the file,
+ * the fileid and the locks. Then we need to call
+ * the appropriate routines to create meta-data pages.
+ */
+ if ((ret = REL_ENVLOCK(dbenv, &elock)) != 0)
+ goto err;
+
+create: if ((ret = __db_backup_name(dbenv, name, txn, &tmpname)) != 0)
+ goto err;
+ if (TXN_ON(dbenv) && txn != NULL &&
+ (ret = dbenv->txn_begin(dbenv, txn, &stxn, 0)) != 0)
+ goto err;
+ if ((ret = __fop_create(dbenv,
+ stxn, fhp, tmpname, DB_APP_DATA, mode)) != 0)
+ goto err;
+ tmp_created = 1;
+creat2: if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, tmpname, 0, NULL, &real_tmpname)) != 0)
+ goto err;
+
+ /* Set the pagesize if it isn't yet set. */
+ if (dbp->pgsize == 0 &&
+ (ret = __fop_set_pgsize(dbp, fhp, real_tmpname)) != 0)
+ goto errmsg;
+
+ /* Construct a file_id. */
+ if ((ret = __os_fileid(dbenv, real_tmpname, 1, dbp->fileid)) != 0)
+ goto errmsg;
+
+ if ((ret = __db_new_file(dbp, stxn, fhp, tmpname)) != 0)
+ goto err;
+ CLOSE_HANDLE(dbp, fhp);
+
+ /* Now move the file into place. */
+ if (!F_ISSET(dbp, DB_AM_COMPENSATE))
+ GET_ENVLOCK(dbenv, locker, &elock);
+ if (!truncating && __os_exists(real_name, NULL) == 0) {
+ /*
+ * Someone managed to create the file; remove our temp
+ * and try to open the file that now exists.
+ */
+ (void)__fop_remove(dbenv,
+ NULL, dbp->fileid, tmpname, DB_APP_DATA);
+ if (LOCKING_ON(dbenv))
+ dbenv->lock_put(dbenv, &dbp->handle_lock);
+ LOCK_INIT(dbp->handle_lock);
+
+ /* If we have a saved handle; close it. */
+ if (LF_ISSET(DB_FCNTL_LOCKING))
+ (void)__os_closehandle(dbenv, fhp);
+ if (stxn != NULL) {
+ ret = stxn->abort(stxn);
+ stxn = NULL;
+ }
+ if (ret != 0)
+ goto err;
+ goto reopen;
+ }
+
+ /* We've successfully created, move the file into place. */
+ if ((ret = __fop_lock_handle(dbenv,
+ dbp, locker, DB_LOCK_WRITE, &elock, 0)) != 0)
+ goto err;
+ if (!truncating && (ret = __fop_rename(dbenv,
+ stxn, tmpname, name, dbp->fileid, DB_APP_DATA)) != 0)
+ goto err;
+
+ /* If this was a truncate; release lock on the old file. */
+ if (LOCK_ISSET(tmp_lock) && (ret = __lock_put(dbenv, &tmp_lock)) != 0)
+ goto err;
+
+ if (stxn != NULL) {
+ *retidp = stxn->txnid;
+ ret = stxn->commit(stxn, 0);
+ stxn = NULL;
+ } else
+ *retidp = TXN_INVALID;
+
+ if (ret != 0)
+ goto err;
+
+ F_SET(dbp, DB_AM_CREATED);
+
+ if (0) {
+errmsg: __db_err(dbenv, "%s: %s", name, db_strerror(ret));
+
+err: if (stxn != NULL)
+ (void)stxn->abort(stxn);
+ if (tmp_created && txn == NULL)
+ (void)__fop_remove(dbenv,
+ NULL, NULL, tmpname, DB_APP_DATA);
+ if (F_ISSET(fhp, DB_FH_VALID))
+ CLOSE_HANDLE(dbp, fhp);
+ if (LOCK_ISSET(tmp_lock))
+ __lock_put(dbenv, &tmp_lock);
+ if (LOCK_ISSET(dbp->handle_lock) && txn == NULL)
+ __lock_put(dbenv, &dbp->handle_lock);
+ if (LOCK_ISSET(elock))
+ (void)REL_ENVLOCK(dbenv, &elock);
+ if (created_locker) {
+ (void)__lock_id_free(dbenv, dbp->lid);
+ dbp->lid = DB_LOCK_INVALIDID;
+ }
+ if (created_fhp && !F_ISSET(fhp, DB_FH_VALID))
+ __os_free(dbenv, fhp);
+ }
+
+done: /*
+ * There are cases where real_name and tmpname take on the
+ * exact same string, so we need to make sure that we do not
+ * free twice.
+ */
+ if (!truncating && tmpname != NULL && tmpname != real_name)
+ __os_free(dbenv, tmpname);
+ if (real_name != NULL)
+ __os_free(dbenv, real_name);
+ if (real_tmpname != NULL)
+ __os_free(dbenv, real_tmpname);
+
+ return (ret);
+}
+
+/*
+ * __fop_set_pgsize --
+ * Set the page size based on file information.
+ */
+static int
+__fop_set_pgsize(dbp, fhp, name)
+ DB *dbp;
+ DB_FH *fhp;
+ const char *name;
+{
+ DB_ENV *dbenv;
+ u_int32_t iopsize;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * Use the filesystem's optimum I/O size as the pagesize if a pagesize
+ * not specified. Some filesystems have 64K as their optimum I/O size,
+ * but as that results in fairly large default caches, we limit the
+ * default pagesize to 16K.
+ */
+ if ((ret = __os_ioinfo(dbenv, name, fhp, NULL, NULL, &iopsize)) != 0) {
+ __db_err(dbenv, "%s: %s", name, db_strerror(ret));
+ return (ret);
+ }
+ if (iopsize < 512)
+ iopsize = 512;
+ if (iopsize > 16 * 1024)
+ iopsize = 16 * 1024;
+
+ /*
+ * Sheer paranoia, but we don't want anything that's not a power-of-2
+ * (we rely on that for alignment of various types on the pages), and
+ * we want a multiple of the sector size as well. If the value
+ * we got out of __os_ioinfo looks bad, use a default instead.
+ */
+ if (!IS_VALID_PAGESIZE(iopsize))
+ iopsize = DB_DEF_IOSIZE;
+
+ dbp->pgsize = iopsize;
+ F_SET(dbp, DB_AM_PGDEF);
+
+ return (0);
+}
+
+/*
+ * __fop_subdb_setup --
+ *
+ * Subdb setup is significantly simpler than file setup. In terms of
+ * locking, for the duration of the operation/transaction, the locks on
+ * the meta-data page will suffice to protect us from simultaneous operations
+ * on the sub-database. Before we complete the operation though, we'll get a
+ * handle lock on the subdatabase so that on one else can try to remove it
+ * while we've got it open. We use an object that looks like the meta-data
+ * page lock with a different type (DB_HANDLE_LOCK) for the long-term handle.
+ * locks.
+ *
+ * PUBLIC: int __fop_subdb_setup __P((DB *, DB_TXN *,
+ * PUBLIC: const char *, const char *, int, u_int32_t));
+ */
+int
+__fop_subdb_setup(dbp, txn, mname, name, mode, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *mname, *name;
+ int mode;
+ u_int32_t flags;
+{
+ DB *mdbp;
+ DB_ENV *dbenv;
+ int do_remove, ret;
+
+ mdbp = NULL;
+ dbenv = dbp->dbenv;
+
+ if ((ret = __db_master_open(dbp, txn, mname, flags, mode, &mdbp)) != 0)
+ return (ret);
+
+ /*
+ * We are going to close this instance of the master, so we can
+ * steal its handle instead of reopening a handle on the database.
+ */
+ if (LF_ISSET(DB_FCNTL_LOCKING)) {
+ dbp->saved_open_fhp = mdbp->saved_open_fhp;
+ mdbp->saved_open_fhp = NULL;
+ }
+
+ /* Now copy the pagesize. */
+ dbp->pgsize = mdbp->pgsize;
+ F_SET(dbp, DB_AM_SUBDB);
+
+ if (name != NULL && (ret = __db_master_update(mdbp, dbp, txn,
+ name, dbp->type, MU_OPEN, NULL, flags)) != 0)
+ goto err;
+
+ /*
+ * Hijack the master's locker ID as well, so that our locks don't
+ * conflict with the master's. Since we're closing the master,
+ * that lid would just have been freed anyway. Once we've gotten
+ * the locker id, we need to acquire the handle lock for this
+ * subdatabase.
+ */
+ dbp->lid = mdbp->lid;
+ mdbp->lid = DB_LOCK_INVALIDID;
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTLOG, ret, mname);
+
+ /*
+ * We copy our fileid from our master so that we all open
+ * the same file in mpool. We'll use the meta-pgno to lock
+ * so that we end up with different handle locks.
+ */
+
+ memcpy(dbp->fileid, mdbp->fileid, DB_FILE_ID_LEN);
+ if ((ret = __fop_lock_handle(dbenv, dbp,
+ txn == NULL ? dbp->lid : txn->txnid,
+ F_ISSET(dbp, DB_AM_CREATED) || LF_ISSET(DB_WRITEOPEN) ?
+ DB_LOCK_WRITE : DB_LOCK_READ, NULL, 0)) != 0)
+ goto err;
+
+ if ((ret = __db_init_subdb(mdbp, dbp, name, txn)) != 0)
+ goto err;
+
+ /*
+ * In the file create case, these happen in separate places so we have
+ * two different tests. They end up in the same place for subdbs, but
+ * for compatibility with file testing, we put them both here anyway.
+ */
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTLOGMETA, ret, mname);
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTSYNC, ret, mname);
+
+ /*
+ * File exists and we have the appropriate locks; we should now
+ * process a normal open.
+ */
+ if (F_ISSET(mdbp, DB_AM_CREATED)) {
+ F_SET(dbp, DB_AM_CREATED_MSTR);
+ F_CLR(mdbp, DB_AM_DISCARD);
+ }
+
+ /*
+ * The master's handle lock is under the control of the
+ * subdb (it acquired the master's locker). We want to
+ * keep the master's handle lock so that no one can remove
+ * the file while the subdb is open. If we register the
+ * trade event and then invalidate the copy of the lock
+ * in the master's handle, that will accomplish this. However,
+ * before we register this event, we'd better remove any
+ * events that we've already registered for the master.
+ */
+
+ if (!F_ISSET(dbp, DB_AM_RECOVER) && txn != NULL) {
+ /* Unregister old master events. */
+ __txn_remlock(dbenv,
+ txn, &mdbp->handle_lock, DB_LOCK_INVALIDID);
+
+ /* Now register the new event. */
+ if ((ret = __txn_lockevent(dbenv,
+ txn, dbp, &mdbp->handle_lock, dbp->lid)) != 0)
+ goto err;
+ }
+ LOCK_INIT(mdbp->handle_lock);
+ return (__db_close_i(mdbp, txn, 0));
+
+err:
+DB_TEST_RECOVERY_LABEL
+ if (LOCK_ISSET(dbp->handle_lock) && txn == NULL)
+ __lock_put(dbenv, &dbp->handle_lock);
+
+ /* If we created the master file then we need to remove it. */
+ if (mdbp != NULL) {
+ do_remove = F_ISSET(mdbp, DB_AM_CREATED) ? 1 : 0;
+ if (do_remove)
+ F_SET(mdbp, DB_AM_DISCARD);
+ (void)__db_close_i(mdbp, txn, 0);
+ if (do_remove) {
+ (void)db_create(&mdbp, dbp->dbenv, 0);
+ (void)__db_remove_i(mdbp, txn, mname, NULL);
+ }
+ }
+ return (ret);
+}
+
+/*
+ * __fop_remove_setup --
+ * Open handle appropriately and lock for removal of a database file.
+ *
+ * PUBLIC: int __fop_remove_setup __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, u_int32_t));
+ */
+int
+__fop_remove_setup(dbp, txn, name, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ DB_LOCK elock;
+ u_int8_t mbuf[DBMETASIZE];
+ int ret;
+
+ COMPQUIET(flags, 0);
+ dbenv = dbp->dbenv;
+ PANIC_CHECK(dbenv);
+ LOCK_INIT(elock);
+
+ /* Create locker if necessary. */
+ if (LOCKING_ON(dbenv)) {
+ if (txn != NULL)
+ dbp->lid = txn->txnid;
+ else if (dbp->lid == DB_LOCK_INVALIDID) {
+ if ((ret = __lock_id(dbenv, &dbp->lid)) != 0)
+ goto err;
+ }
+ }
+
+ /*
+ * We are about to open a file handle and then possibly close it.
+ * We cannot close handles if we are doing FCNTL locking. However,
+ * there is no way to pass the FCNTL flag into this routine via the
+ * user API. The only way we can get in here and be doing FCNTL
+ * locking is if we are trying to clean up an open that was called
+ * with FCNTL locking. In that case, the save_fhp should already be
+ * set. So, we use that field to tell us if we need to make sure
+ * that we shouldn't close the handle.
+ */
+ fhp = dbp->saved_open_fhp;
+ DB_ASSERT(LF_ISSET(DB_FCNTL_LOCKING) ||
+ fhp == NULL || !F_ISSET(fhp, DB_FH_VALID));
+
+ /*
+ * Lock environment to protect file open. That will enable us to
+ * read the meta-data page and get the fileid so that we can lock
+ * the handle.
+ */
+ GET_ENVLOCK(dbenv, dbp->lid, &elock);
+ if ((ret = __fop_read_meta(dbenv,
+ name, mbuf, sizeof(mbuf), fhp, 0, NULL, 0)) != 0)
+ goto err;
+
+ if ((ret =
+ __db_meta_setup(dbenv, dbp, name, (DBMETA *)mbuf, flags, 1)) != 0)
+ goto err;
+
+ /* Now, release the environment and get the handle lock. */
+ if ((ret = __fop_lock_handle(dbenv,
+ dbp, dbp->lid, DB_LOCK_WRITE, &elock, 0)) != 0)
+ goto err;
+
+ return (0);
+
+err: (void)REL_ENVLOCK(dbenv, &elock);
+ return (ret);
+}
+
+/*
+ * __fop_read_meta --
+ * Read the meta-data page from a file and return it in buf. The
+ * open file handle is returned in fhp.
+ *
+ * PUBLIC: int __fop_read_meta __P((DB_ENV *, const char *,
+ * PUBLIC: u_int8_t *, size_t, DB_FH *, int, size_t *, u_int32_t));
+ */
+int
+__fop_read_meta(dbenv, name, buf, size, fhp, errok, nbytesp, flags)
+ DB_ENV *dbenv;
+ const char *name;
+ u_int8_t *buf;
+ size_t size;
+ DB_FH *fhp;
+ int errok;
+ size_t *nbytesp;
+ u_int32_t flags;
+{
+ DB_FH fh, *lfhp;
+ size_t nr;
+ int myfhp, ret;
+
+ nr = 0;
+ myfhp = 0;
+ memset(&fh, 0, sizeof(fh));
+ lfhp = fhp == NULL ? &fh : fhp;
+ myfhp = F_ISSET(lfhp, DB_FH_VALID);
+ if (!myfhp && (ret = __os_open(dbenv, name, flags, 0, lfhp)) != 0)
+ goto err;
+ if ((ret = __os_read(dbenv, lfhp, buf, size, &nr)) != 0) {
+ if (!errok)
+ __db_err(dbenv, "%s: %s", name, db_strerror(ret));
+ goto err;
+ }
+
+ if (nr != size) {
+ if (!errok)
+ __db_err(dbenv,
+ "%s: unexpected file type or format", name);
+ ret = EINVAL;
+ goto err;
+ }
+
+err: /*
+ * On error, we would like to close the handle. However, if the
+ * handle was opened in the caller, we cannot. If there is no error,
+ * then we only close the handle if we opened it here.
+ */
+ if (!myfhp && F_ISSET((lfhp), DB_FH_VALID) && (ret != 0 || fhp == NULL))
+ __os_closehandle(dbenv, lfhp);
+
+ if (nbytesp != NULL)
+ *nbytesp = nr;
+ return (ret);
+}
+
+/*
+ * __fop_dummy --
+ * This implements the creation and name swapping of dummy files that
+ * we use for remove and rename (remove is simply a rename with a delayed
+ * remove).
+ *
+ * PUBLIC: int __fop_dummy __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, const char *, u_int32_t));
+ */
+int
+__fop_dummy(dbp, txn, old, new, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *old, *new;
+ u_int32_t flags;
+{
+ DB *tmpdbp;
+ DB_ENV *dbenv;
+ DB_LOCK elock;
+ DB_LSN lsn;
+ DBT fiddbt, namedbt, tmpdbt;
+ DB_TXN *stxn;
+ char *back;
+ char *realback, *realnew, *realold;
+ int ret, t_ret;
+ u_int8_t mbuf[DBMETASIZE];
+ u_int32_t locker, stxnid;
+
+ dbenv = dbp->dbenv;
+ LOCK_INIT(elock);
+ realback = NULL;
+ realnew = NULL;
+ realold = NULL;
+ back = NULL;
+ stxn = NULL;
+ tmpdbp = NULL;
+
+ DB_ASSERT(txn != NULL);
+ locker = txn->txnid;
+
+ /* Begin sub transaction to encapsulate the rename. */
+ if (TXN_ON(dbenv) &&
+ (ret = dbenv->txn_begin(dbenv, txn, &stxn, 0)) != 0)
+ goto err;
+
+ /* We need to create a dummy file as a place holder. */
+ if ((ret = __db_backup_name(dbenv, new, stxn, &back)) != 0)
+ goto err;
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, back, flags, NULL, &realback)) != 0)
+ goto err;
+ if ((ret = __fop_create(dbenv, stxn, NULL, back, DB_APP_DATA, 0)) != 0)
+ goto err;
+
+ memset(mbuf, 0, sizeof(mbuf));
+ if ((ret =
+ __os_fileid(dbenv, realback, 1, ((DBMETA *)mbuf)->uid)) != 0)
+ goto err;
+ ((DBMETA *)mbuf)->magic = DB_RENAMEMAGIC;
+ if ((ret = __fop_write(dbenv,
+ stxn, back, DB_APP_DATA, NULL, 0, mbuf, DBMETASIZE, 1)) != 0)
+ goto err;
+
+ /* Create a dummy dbp handle. */
+ if ((ret = db_create(&tmpdbp, dbenv, 0)) != 0)
+ goto err;
+ memcpy(&tmpdbp->fileid, ((DBMETA *)mbuf)->uid, DB_FILE_ID_LEN);
+
+ /* Now, lock the name space while we initialize this file. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, new, 0, NULL, &realnew)) != 0)
+ goto err;
+ GET_ENVLOCK(dbenv, locker, &elock);
+ if (__os_exists(realnew, NULL) == 0) {
+ ret = EEXIST;
+ goto err;
+ }
+
+ /*
+ * While we have the namespace locked, do the renames and then
+ * swap for the handle lock.
+ */
+ if ((ret = __fop_rename(dbenv,
+ stxn, old, new, dbp->fileid, DB_APP_DATA)) != 0)
+ goto err;
+ if ((ret = __fop_rename(dbenv,
+ stxn, back, old, tmpdbp->fileid, DB_APP_DATA)) != 0)
+ goto err;
+ if ((ret = __fop_lock_handle(dbenv,
+ tmpdbp, locker, DB_LOCK_WRITE, &elock, 0)) != 0)
+ goto err;
+
+ /*
+ * We just acquired a transactional lock on the tmp handle.
+ * We need to null out the tmp handle's lock so that it
+ * doesn't create problems for us in the close path.
+ */
+ LOCK_INIT(tmpdbp->handle_lock);
+
+ if (stxn != NULL) {
+ /* Commit the child. */
+ stxnid = stxn->txnid;
+ ret = stxn->commit(stxn, 0);
+ stxn = NULL;
+
+ /* Now log the child information in the parent. */
+ memset(&fiddbt, 0, sizeof(fiddbt));
+ memset(&tmpdbt, 0, sizeof(fiddbt));
+ memset(&namedbt, 0, sizeof(namedbt));
+ fiddbt.data = dbp->fileid;
+ fiddbt.size = DB_FILE_ID_LEN;
+ tmpdbt.data = tmpdbp->fileid;
+ tmpdbt.size = DB_FILE_ID_LEN;
+ namedbt.data = (void *)old;
+ namedbt.size = (u_int32_t)strlen(old) + 1;
+ if ((t_ret =
+ __fop_file_remove_log(dbenv, txn, &lsn, 0, &fiddbt,
+ &tmpdbt, &namedbt, DB_APP_DATA, stxnid)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ /* This is a delayed delete of the dummy file. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, old, flags, NULL, &realold)) != 0)
+ goto err;
+ if ((ret = __txn_remevent(dbenv, txn, realold, NULL)) != 0)
+ goto err;
+
+err: (void)REL_ENVLOCK(dbenv, &elock);
+ if (stxn != NULL)
+ (void)stxn->abort(stxn);
+ if (tmpdbp != NULL &&
+ (t_ret = __db_close_i(tmpdbp, NULL, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (realold != NULL)
+ __os_free(dbenv, realold);
+ if (realnew != NULL)
+ __os_free(dbenv, realnew);
+ if (realback != NULL)
+ __os_free(dbenv, realback);
+ if (back != NULL)
+ __os_free(dbenv, back);
+ return (ret);
+}
+
+/*
+ * __fop_dbrename --
+ * Do the appropriate file locking and file system operations
+ * to effect a dbrename in the absence of transactions (__fop_dummy
+ * and the subsequent calls in __db_rename do the work for the
+ * transactional case).
+ *
+ * PUBLIC: int __fop_dbrename __P((DB *, const char *, const char *));
+ */
+int
+__fop_dbrename(dbp, old, new)
+ DB *dbp;
+ const char *old, *new;
+{
+ DB_ENV *dbenv;
+ DB_LOCK elock;
+ char *real_new, *real_old;
+ int ret, tret;
+
+ dbenv = dbp->dbenv;
+ real_new = NULL;
+ real_old = NULL;
+ LOCK_INIT(elock);
+
+ /* Find the real newname of the file. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, new, 0, NULL, &real_new)) != 0)
+ goto err;
+
+ /*
+ * It is an error to rename a file over one that already exists,
+ * as that wouldn't be transaction-safe.
+ */
+ GET_ENVLOCK(dbenv, dbp->lid, &elock);
+ if (__os_exists(real_new, NULL) == 0) {
+ ret = EEXIST;
+ __db_err(dbenv, "rename: file %s exists", real_new);
+ goto err;
+ }
+
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, old, 0, NULL, &real_old)) != 0)
+ goto err;
+
+ ret = dbenv->memp_nameop(dbenv, dbp->fileid, new, real_old, real_new);
+
+err: if ((tret = REL_ENVLOCK(dbenv, &elock)) != 0 && ret == 0)
+ ret = tret;
+ if (real_old != NULL)
+ __os_free(dbenv, real_old);
+ if (real_new != NULL)
+ __os_free(dbenv, real_new);
+ return (ret);
+}
diff --git a/libdb/hash/hash.c b/libdb/hash/hash.c
new file mode 100644
index 0000000..8f22b58
--- /dev/null
+++ b/libdb/hash/hash.c
@@ -0,0 +1,2062 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+
+static int __ham_bulk __P((DBC *, DBT *, u_int32_t));
+static int __ham_c_close __P((DBC *, db_pgno_t, int *));
+static int __ham_c_del __P((DBC *));
+static int __ham_c_destroy __P((DBC *));
+static int __ham_c_get __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+static int __ham_c_put __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+static int __ham_c_writelock __P((DBC *));
+static int __ham_dup_return __P((DBC *, DBT *, u_int32_t));
+static int __ham_expand_table __P((DBC *));
+static int __ham_lookup __P((DBC *,
+ const DBT *, u_int32_t, db_lockmode_t, db_pgno_t *));
+static int __ham_overwrite __P((DBC *, DBT *, u_int32_t));
+
+/*
+ * __ham_quick_delete --
+ * When performing a DB->del operation that does not involve secondary
+ * indices and is not removing an off-page duplicate tree, we can
+ * speed things up substantially by removing the entire duplicate
+ * set, if any is present, in one operation, rather than by conjuring
+ * up and deleting each of the items individually. (All are stored
+ * in one big HKEYDATA structure.) We don't bother to distinguish
+ * on-page duplicate sets from single, non-dup items; they're deleted
+ * in exactly the same way.
+ *
+ * This function is called by __db_delete when the appropriate
+ * conditions are met, and it performs the delete in the optimized way.
+ *
+ * The cursor should be set to the first item in the duplicate
+ * set, or to the sole key/data pair when the key does not have a
+ * duplicate set, before the function is called.
+ *
+ * PUBLIC: int __ham_quick_delete __P((DBC *));
+ */
+int
+__ham_quick_delete(dbc)
+ DBC *dbc;
+{
+ int ret, t_ret;
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ return (ret);
+
+ /* Assert that we're not using secondary indices. */
+ DB_ASSERT(!F_ISSET(dbc->dbp, DB_AM_SECONDARY));
+ /*
+ * We should assert that we're not a primary either, but that
+ * would require grabbing the dbp's mutex, so we don't bother.
+ */
+
+ /* Assert that we're set, but not to an off-page duplicate. */
+ DB_ASSERT(IS_INITIALIZED(dbc));
+ DB_ASSERT(((HASH_CURSOR *)dbc->internal)->opd == NULL);
+
+ ret = __ham_del_pair(dbc, 1);
+
+ if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/* ****************** CURSORS ********************************** */
+/*
+ * __ham_c_init --
+ * Initialize the hash-specific portion of a cursor.
+ *
+ * PUBLIC: int __ham_c_init __P((DBC *));
+ */
+int
+__ham_c_init(dbc)
+ DBC *dbc;
+{
+ DB_ENV *dbenv;
+ HASH_CURSOR *new_curs;
+ int ret;
+
+ dbenv = dbc->dbp->dbenv;
+ if ((ret = __os_calloc(dbenv,
+ 1, sizeof(struct cursor_t), &new_curs)) != 0)
+ return (ret);
+ if ((ret = __os_malloc(dbenv,
+ dbc->dbp->pgsize, &new_curs->split_buf)) != 0) {
+ __os_free(dbenv, new_curs);
+ return (ret);
+ }
+
+ dbc->internal = (DBC_INTERNAL *) new_curs;
+ dbc->c_close = __db_c_close;
+ dbc->c_count = __db_c_count;
+ dbc->c_del = __db_c_del;
+ dbc->c_dup = __db_c_dup;
+ dbc->c_get = dbc->c_real_get = __db_c_get;
+ dbc->c_pget = __db_c_pget;
+ dbc->c_put = __db_c_put;
+ dbc->c_am_bulk = __ham_bulk;
+ dbc->c_am_close = __ham_c_close;
+ dbc->c_am_del = __ham_c_del;
+ dbc->c_am_destroy = __ham_c_destroy;
+ dbc->c_am_get = __ham_c_get;
+ dbc->c_am_put = __ham_c_put;
+ dbc->c_am_writelock = __ham_c_writelock;
+
+ __ham_item_init(dbc);
+
+ return (0);
+}
+
+/*
+ * __ham_c_close --
+ * Close down the cursor from a single use.
+ */
+static int
+__ham_c_close(dbc, root_pgno, rmroot)
+ DBC *dbc;
+ db_pgno_t root_pgno;
+ int *rmroot;
+{
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ HKEYDATA *dp;
+ int doroot, gotmeta, ret, t_ret;
+ u_int32_t dirty;
+
+ COMPQUIET(rmroot, 0);
+ mpf = dbc->dbp->mpf;
+ dirty = 0;
+ doroot = gotmeta = ret = 0;
+ hcp = (HASH_CURSOR *) dbc->internal;
+
+ /* Check for off page dups. */
+ if (dbc->internal->opd != NULL) {
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto done;
+ gotmeta = 1;
+ if ((ret = __ham_get_cpage(dbc, DB_LOCK_READ)) != 0)
+ goto out;
+ dp = (HKEYDATA *)H_PAIRDATA(dbc->dbp, hcp->page, hcp->indx);
+
+ /* If its not a dup we aborted before we changed it. */
+ if (HPAGE_PTYPE(dp) == H_OFFDUP)
+ memcpy(&root_pgno,
+ HOFFPAGE_PGNO(dp), sizeof(db_pgno_t));
+ else
+ root_pgno = PGNO_INVALID;
+
+ if ((ret =
+ hcp->opd->c_am_close(hcp->opd, root_pgno, &doroot)) != 0)
+ goto out;
+ if (doroot != 0) {
+ if ((ret = __ham_del_pair(dbc, 1)) != 0)
+ goto out;
+ dirty = DB_MPOOL_DIRTY;
+ }
+ }
+
+out: if (hcp->page != NULL && (t_ret =
+ mpf->put(mpf, hcp->page, dirty)) != 0 && ret == 0)
+ ret = t_ret;
+ if (gotmeta != 0 && (t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+done:
+ __ham_item_init(dbc);
+ return (ret);
+}
+
+/*
+ * __ham_c_destroy --
+ * Cleanup the access method private part of a cursor.
+ */
+static int
+__ham_c_destroy(dbc)
+ DBC *dbc;
+{
+ HASH_CURSOR *hcp;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if (hcp->split_buf != NULL)
+ __os_free(dbc->dbp->dbenv, hcp->split_buf);
+ __os_free(dbc->dbp->dbenv, hcp);
+
+ return (0);
+}
+
+/*
+ * __ham_c_count --
+ * Return a count of on-page duplicates.
+ *
+ * PUBLIC: int __ham_c_count __P((DBC *, db_recno_t *));
+ */
+int
+__ham_c_count(dbc, recnop)
+ DBC *dbc;
+ db_recno_t *recnop;
+{
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ db_indx_t len;
+ db_recno_t recno;
+ int ret, t_ret;
+ u_int8_t *p, *pend;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ recno = 0;
+
+ if ((ret = __ham_get_cpage(dbc, DB_LOCK_READ)) != 0)
+ return (ret);
+
+ switch (HPAGE_PTYPE(H_PAIRDATA(dbp, hcp->page, hcp->indx))) {
+ case H_KEYDATA:
+ case H_OFFPAGE:
+ recno = 1;
+ break;
+ case H_DUPLICATE:
+ p = HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx));
+ pend = p +
+ LEN_HDATA(dbp, hcp->page, dbp->pgsize, hcp->indx);
+ for (; p < pend; recno++) {
+ /* p may be odd, so copy rather than just dereffing */
+ memcpy(&len, p, sizeof(db_indx_t));
+ p += 2 * sizeof(db_indx_t) + len;
+ }
+
+ break;
+ default:
+ ret = __db_pgfmt(dbp->dbenv, hcp->pgno);
+ goto err;
+ }
+
+ *recnop = recno;
+
+err: if ((t_ret = mpf->put(mpf, hcp->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ hcp->page = NULL;
+ return (ret);
+}
+
+static int
+__ham_c_del(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DBT repldbt;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if (F_ISSET(hcp, H_DELETED))
+ return (DB_NOTFOUND);
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto out;
+
+ if ((ret = __ham_get_cpage(dbc, DB_LOCK_WRITE)) != 0)
+ goto out;
+
+ /* Off-page duplicates. */
+ if (HPAGE_TYPE(dbp, hcp->page, H_DATAINDEX(hcp->indx)) == H_OFFDUP)
+ goto out;
+
+ if (F_ISSET(hcp, H_ISDUP)) { /* On-page duplicate. */
+ if (hcp->dup_off == 0 &&
+ DUP_SIZE(hcp->dup_len) == LEN_HDATA(dbp, hcp->page,
+ hcp->hdr->dbmeta.pagesize, hcp->indx))
+ ret = __ham_del_pair(dbc, 1);
+ else {
+ repldbt.flags = 0;
+ F_SET(&repldbt, DB_DBT_PARTIAL);
+ repldbt.doff = hcp->dup_off;
+ repldbt.dlen = DUP_SIZE(hcp->dup_len);
+ repldbt.size = 0;
+ repldbt.data = HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page,
+ hcp->indx));
+ if ((ret = __ham_replpair(dbc, &repldbt, 0)) == 0) {
+ hcp->dup_tlen -= DUP_SIZE(hcp->dup_len);
+ F_SET(hcp, H_DELETED);
+ ret = __ham_c_update(dbc,
+ DUP_SIZE(hcp->dup_len), 0, 1);
+ }
+ }
+
+ } else /* Not a duplicate */
+ ret = __ham_del_pair(dbc, 1);
+
+out: if (hcp->page != NULL) {
+ if ((t_ret = mpf->put(mpf,
+ hcp->page, ret == 0 ? DB_MPOOL_DIRTY : 0)) && ret == 0)
+ ret = t_ret;
+ hcp->page = NULL;
+ }
+ if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __ham_c_dup --
+ * Duplicate a hash cursor, such that the new one holds appropriate
+ * locks for the position of the original.
+ *
+ * PUBLIC: int __ham_c_dup __P((DBC *, DBC *));
+ */
+int
+__ham_c_dup(orig_dbc, new_dbc)
+ DBC *orig_dbc, *new_dbc;
+{
+ HASH_CURSOR *orig, *new;
+
+ orig = (HASH_CURSOR *)orig_dbc->internal;
+ new = (HASH_CURSOR *)new_dbc->internal;
+
+ new->bucket = orig->bucket;
+ new->lbucket = orig->lbucket;
+ new->dup_off = orig->dup_off;
+ new->dup_len = orig->dup_len;
+ new->dup_tlen = orig->dup_tlen;
+
+ if (F_ISSET(orig, H_DELETED))
+ F_SET(new, H_DELETED);
+ if (F_ISSET(orig, H_ISDUP))
+ F_SET(new, H_ISDUP);
+
+ /*
+ * If the old cursor held a lock and we're not in transactions, get one
+ * for the new one. The reason that we don't need a new lock if we're
+ * in a transaction is because we already hold a lock and will continue
+ * to do so until commit, so there is no point in reaquiring it. We
+ * don't know if the old lock was a read or write lock, but it doesn't
+ * matter. We'll get a read lock. We know that this locker already
+ * holds a lock of the correct type, so if we need a write lock and
+ * request it, we know that we'll get it.
+ */
+ if (!LOCK_ISSET(orig->lock) || orig_dbc->txn != NULL)
+ return (0);
+
+ return (__ham_lock_bucket(new_dbc, DB_LOCK_READ));
+}
+
+static int
+__ham_c_get(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key;
+ DBT *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ db_lockmode_t lock_type;
+ int get_key, ret, t_ret;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+
+ /* Clear OR'd in additional bits so we can check for flag equality. */
+ if (F_ISSET(dbc, DBC_RMW))
+ lock_type = DB_LOCK_WRITE;
+ else
+ lock_type = DB_LOCK_READ;
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ return (ret);
+ hcp->seek_size = 0;
+
+ ret = 0;
+ get_key = 1;
+ switch (flags) {
+ case DB_PREV_NODUP:
+ F_SET(hcp, H_NEXT_NODUP);
+ /* FALLTHROUGH */
+ case DB_PREV:
+ if (IS_INITIALIZED(dbc)) {
+ ret = __ham_item_prev(dbc, lock_type, pgnop);
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_LAST:
+ ret = __ham_item_last(dbc, lock_type, pgnop);
+ break;
+ case DB_NEXT_NODUP:
+ F_SET(hcp, H_NEXT_NODUP);
+ /* FALLTHROUGH */
+ case DB_NEXT:
+ if (IS_INITIALIZED(dbc)) {
+ ret = __ham_item_next(dbc, lock_type, pgnop);
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_FIRST:
+ ret = __ham_item_first(dbc, lock_type, pgnop);
+ break;
+ case DB_NEXT_DUP:
+ /* cgetchk has already determined that the cursor is set. */
+ F_SET(hcp, H_DUPONLY);
+ ret = __ham_item_next(dbc, lock_type, pgnop);
+ break;
+ case DB_SET:
+ case DB_SET_RANGE:
+ case DB_GET_BOTH:
+ case DB_GET_BOTH_RANGE:
+ ret = __ham_lookup(dbc, key, 0, lock_type, pgnop);
+ get_key = 0;
+ break;
+ case DB_GET_BOTHC:
+ F_SET(hcp, H_DUPONLY);
+
+ ret = __ham_item_next(dbc, lock_type, pgnop);
+ get_key = 0;
+ break;
+ case DB_CURRENT:
+ /* cgetchk has already determined that the cursor is set. */
+ if (F_ISSET(hcp, H_DELETED)) {
+ ret = DB_KEYEMPTY;
+ goto err;
+ }
+
+ ret = __ham_item(dbc, lock_type, pgnop);
+ break;
+ }
+
+ /*
+ * Must always enter this loop to do error handling and
+ * check for big key/data pair.
+ */
+ for (;;) {
+ if (ret != 0 && ret != DB_NOTFOUND)
+ goto err;
+ else if (F_ISSET(hcp, H_OK)) {
+ if (*pgnop == PGNO_INVALID)
+ ret = __ham_dup_return(dbc, data, flags);
+ break;
+ } else if (!F_ISSET(hcp, H_NOMORE)) {
+ __db_err(dbp->dbenv,
+ "H_NOMORE returned to __ham_c_get");
+ ret = EINVAL;
+ break;
+ }
+
+ /*
+ * Ran out of entries in a bucket; change buckets.
+ */
+ switch (flags) {
+ case DB_LAST:
+ case DB_PREV:
+ case DB_PREV_NODUP:
+ ret = mpf->put(mpf, hcp->page, 0);
+ hcp->page = NULL;
+ if (hcp->bucket == 0) {
+ ret = DB_NOTFOUND;
+ hcp->pgno = PGNO_INVALID;
+ goto err;
+ }
+ F_CLR(hcp, H_ISDUP);
+ hcp->bucket--;
+ hcp->indx = NDX_INVALID;
+ hcp->pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+ if (ret == 0)
+ ret = __ham_item_prev(dbc,
+ lock_type, pgnop);
+ break;
+ case DB_FIRST:
+ case DB_NEXT:
+ case DB_NEXT_NODUP:
+ ret = mpf->put(mpf, hcp->page, 0);
+ hcp->page = NULL;
+ hcp->indx = NDX_INVALID;
+ hcp->bucket++;
+ F_CLR(hcp, H_ISDUP);
+ hcp->pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+ if (hcp->bucket > hcp->hdr->max_bucket) {
+ ret = DB_NOTFOUND;
+ hcp->pgno = PGNO_INVALID;
+ goto err;
+ }
+ if (ret == 0)
+ ret = __ham_item_next(dbc,
+ lock_type, pgnop);
+ break;
+ case DB_GET_BOTH:
+ case DB_GET_BOTHC:
+ case DB_GET_BOTH_RANGE:
+ case DB_NEXT_DUP:
+ case DB_SET:
+ case DB_SET_RANGE:
+ /* Key not found. */
+ ret = DB_NOTFOUND;
+ goto err;
+ case DB_CURRENT:
+ /*
+ * This should only happen if you are doing
+ * deletes and reading with concurrent threads
+ * and not doing proper locking. We return
+ * the same error code as we would if the
+ * cursor were deleted.
+ */
+ ret = DB_KEYEMPTY;
+ goto err;
+ default:
+ DB_ASSERT(0);
+ }
+ }
+
+ if (get_key == 0)
+ F_SET(key, DB_DBT_ISSET);
+
+err: if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ F_CLR(hcp, H_DUPONLY);
+ F_CLR(hcp, H_NEXT_NODUP);
+
+ return (ret);
+}
+
+/*
+ * __ham_bulk -- Return bulk data from a hash table.
+ */
+static int
+__ham_bulk(dbc, data, flags)
+ DBC *dbc;
+ DBT *data;
+ u_int32_t flags;
+{
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *cp;
+ PAGE *pg;
+ db_indx_t dup_len, dup_off, dup_tlen, indx, *inp;
+ db_lockmode_t lock_mode;
+ db_pgno_t pgno;
+ int32_t *endp, key_off, *offp, *saveoff;
+ u_int32_t key_size, size, space;
+ u_int8_t *dbuf, *dp, *hk, *np, *tmp;
+ int is_dup, is_key;
+ int need_pg, next_key, no_dup, pagesize, ret, t_ret;
+
+ ret = 0;
+ key_off = 0;
+ dup_len = dup_off = dup_tlen = 0;
+ size = 0;
+ dbp = dbc->dbp;
+ pagesize = dbp->pgsize;
+ mpf = dbp->mpf;
+ cp = (HASH_CURSOR *)dbc->internal;
+ is_key = LF_ISSET(DB_MULTIPLE_KEY) ? 1 : 0;
+ next_key = is_key && LF_ISSET(DB_OPFLAGS_MASK) != DB_NEXT_DUP;
+ no_dup = LF_ISSET(DB_OPFLAGS_MASK) == DB_NEXT_NODUP;
+ dbuf = data->data;
+ np = dp = dbuf;
+
+ /* Keep track of space that is left. There is an termination entry */
+ space = data->ulen;
+ space -= sizeof(*offp);
+
+ /* Build the offset/size table from the end up. */
+ endp = (int32_t *) ((u_int8_t *)dbuf + data->ulen);
+ endp--;
+ offp = endp;
+
+ key_size = 0;
+ lock_mode = F_ISSET(dbc, DBC_RMW) ? DB_LOCK_WRITE: DB_LOCK_READ;
+
+next_pg:
+ need_pg = 1;
+ indx = cp->indx;
+ pg = cp->page;
+ inp = P_INP(dbp, pg);
+
+ do {
+ if (is_key) {
+ hk = H_PAIRKEY(dbp, pg, indx);
+ if (HPAGE_PTYPE(hk) == H_OFFPAGE) {
+ memcpy(&key_size,
+ HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
+ memcpy(&pgno,
+ HOFFPAGE_PGNO(hk), sizeof(db_pgno_t));
+ size = key_size;
+ if (key_size > space)
+ goto get_key_space;
+ if ((ret = __bam_bulk_overflow(
+ dbc, key_size, pgno, np)) != 0)
+ return (ret);
+ space -= key_size;
+ key_off = (int32_t)(np - dbuf);
+ np += key_size;
+ } else {
+ if (need_pg) {
+ dp = np;
+ size = pagesize - HOFFSET(pg);
+ if (space < size) {
+get_key_space:
+ if (offp == endp) {
+ data->size =
+ ALIGN(size +
+ pagesize,
+ sizeof(u_int32_t));
+ return (ENOMEM);
+ }
+ goto back_up;
+ }
+ memcpy(dp,
+ (u_int8_t *)pg + HOFFSET(pg), size);
+ need_pg = 0;
+ space -= size;
+ np += size;
+ }
+ key_size = LEN_HKEY(dbp, pg, pagesize, indx);
+ key_off = (int32_t)(inp[indx] - HOFFSET(pg)
+ + dp - dbuf + SSZA(HKEYDATA, data));
+ }
+ }
+
+ hk = H_PAIRDATA(dbp, pg, indx);
+ switch (HPAGE_PTYPE(hk)) {
+ case H_DUPLICATE:
+ case H_KEYDATA:
+ if (need_pg) {
+ dp = np;
+ size = pagesize - HOFFSET(pg);
+ if (space < size) {
+back_up:
+ if (indx != 0) {
+ indx -= 2;
+ /* XXX
+ * It's not clear that this is
+ * the right way to fix this,
+ * but here goes.
+ * If we are backing up onto a
+ * duplicate, then we need to
+ * position ourselves at the
+ * end of the duplicate set.
+ * We probably need to make
+ * this work for H_OFFDUP too.
+ * It might be worth making a
+ * dummy cursor and calling
+ * __ham_item_prev.
+ */
+ tmp = H_PAIRDATA(dbp, pg, indx);
+ if (HPAGE_PTYPE(tmp) ==
+ H_DUPLICATE) {
+ dup_off = dup_tlen =
+ LEN_HDATA(dbp, pg,
+ pagesize, indx + 1);
+ memcpy(&dup_len,
+ HKEYDATA_DATA(tmp),
+ sizeof(db_indx_t));
+ }
+ goto get_space;
+ }
+ /* indx == 0 */
+ if ((ret = __ham_item_prev(dbc,
+ lock_mode, &pgno)) != 0) {
+ if (ret != DB_NOTFOUND)
+ return (ret);
+ if ((ret = mpf->put(mpf,
+ cp->page, 0)) != 0)
+ return (ret);
+ cp->page = NULL;
+ if (cp->bucket == 0) {
+ cp->indx = indx =
+ NDX_INVALID;
+ goto get_space;
+ }
+ if ((ret =
+ __ham_get_meta(dbc)) != 0)
+ return (ret);
+
+ cp->bucket--;
+ cp->pgno = BUCKET_TO_PAGE(cp,
+ cp->bucket);
+ cp->indx = NDX_INVALID;
+ if ((ret = __ham_release_meta(
+ dbc)) != 0)
+ return (ret);
+ if ((ret = __ham_item_prev(dbc,
+ lock_mode, &pgno)) != 0)
+ return (ret);
+ }
+ indx = cp->indx;
+get_space:
+ /*
+ * See if we put any data in the buffer.
+ */
+ if (offp >= endp ||
+ F_ISSET(dbc, DBC_TRANSIENT)) {
+ data->size = ALIGN(size +
+ data->ulen - space,
+ sizeof(u_int32_t));
+ return (ENOMEM);
+ }
+ /*
+ * Don't continue; we're all out
+ * of space, even though we're
+ * returning success.
+ */
+ next_key = 0;
+ break;
+ }
+ memcpy(dp, (u_int8_t *)pg + HOFFSET(pg), size);
+ need_pg = 0;
+ space -= size;
+ np += size;
+ }
+
+ /*
+ * We're about to crack the offset(s) and length(s)
+ * out of an H_KEYDATA or H_DUPLICATE item.
+ * There are three cases:
+ * 1. We were moved into a duplicate set by
+ * the standard hash cursor code. Respect
+ * the dup_off and dup_tlen we were given.
+ * 2. We stumbled upon a duplicate set while
+ * walking the page on our own. We need to
+ * recognize it as a dup and set dup_off and
+ * dup_tlen.
+ * 3. The current item is not a dup.
+ */
+ if (F_ISSET(cp, H_ISDUP)) {
+ /* Case 1 */
+ is_dup = 1;
+ dup_len = cp->dup_len;
+ dup_off = cp->dup_off;
+ dup_tlen = cp->dup_tlen;
+ } else if (HPAGE_PTYPE(hk) == H_DUPLICATE) {
+ /* Case 2 */
+ is_dup = 1;
+ /*
+ * If we run out of memory and bail,
+ * make sure the fact we're in a dup set
+ * isn't ignored later.
+ */
+ F_SET(cp, H_ISDUP);
+ dup_off = 0;
+ memcpy(&dup_len,
+ HKEYDATA_DATA(hk), sizeof(db_indx_t));
+ dup_tlen = LEN_HDATA(dbp, pg, pagesize, indx);
+ } else
+ /* Case 3 */
+ is_dup = dup_len = dup_off = dup_tlen = 0;
+
+ do {
+ space -= (is_key ? 4 : 2) * sizeof(*offp);
+ size += (is_key ? 4 : 2) * sizeof(*offp);
+ /*
+ * Since space is an unsigned, if we happen
+ * to wrap, then this comparison will turn out
+ * to be true. XXX Wouldn't it be better to
+ * simply check above that space is greater than
+ * the value we're about to subtract???
+ */
+ if (space > data->ulen) {
+ if (!is_dup || dup_off == 0)
+ goto back_up;
+ dup_off -= (db_indx_t)DUP_SIZE(offp[1]);
+ goto get_space;
+ }
+ if (is_key) {
+ *offp-- = key_off;
+ *offp-- = key_size;
+ }
+ if (is_dup) {
+ *offp-- = (int32_t)(
+ inp[indx + 1] - HOFFSET(pg) +
+ dp - dbuf + SSZA(HKEYDATA, data) +
+ dup_off + sizeof(db_indx_t));
+ memcpy(&dup_len,
+ HKEYDATA_DATA(hk) + dup_off,
+ sizeof(db_indx_t));
+ dup_off += DUP_SIZE(dup_len);
+ *offp-- = dup_len;
+ } else {
+ *offp-- = (int32_t)(
+ inp[indx + 1] - HOFFSET(pg) +
+ dp - dbuf + SSZA(HKEYDATA, data));
+ *offp-- = LEN_HDATA(dbp, pg,
+ pagesize, indx);
+ }
+ } while (is_dup && dup_off < dup_tlen && no_dup == 0);
+ F_CLR(cp, H_ISDUP);
+ break;
+ case H_OFFDUP:
+ memcpy(&pgno, HOFFPAGE_PGNO(hk), sizeof(db_pgno_t));
+ space -= 2 * sizeof(*offp);
+ if (space > data->ulen)
+ goto back_up;
+
+ if (is_key) {
+ space -= 2 * sizeof(*offp);
+ if (space > data->ulen)
+ goto back_up;
+ *offp-- = key_off;
+ *offp-- = key_size;
+ }
+ saveoff = offp;
+ if ((ret = __bam_bulk_duplicates(dbc,
+ pgno, dbuf, is_key ? offp + 2 : NULL,
+ &offp, &np, &space, no_dup)) != 0) {
+ if (ret == ENOMEM) {
+ size = space;
+ if (is_key && saveoff == offp) {
+ offp += 2;
+ goto back_up;
+ }
+ goto get_space;
+ }
+ return (ret);
+ }
+ break;
+ case H_OFFPAGE:
+ space -= (is_key ? 4 : 2) * sizeof(*offp);
+ if (space > data->ulen)
+ goto back_up;
+
+ memcpy(&size, HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
+ memcpy(&pgno, HOFFPAGE_PGNO(hk), sizeof(db_pgno_t));
+ if (size > space)
+ goto back_up;
+
+ if ((ret =
+ __bam_bulk_overflow(dbc, size, pgno, np)) != 0)
+ return (ret);
+
+ if (is_key) {
+ *offp-- = key_off;
+ *offp-- = key_size;
+ }
+
+ *offp-- = (int32_t)(np - dbuf);
+ *offp-- = size;
+
+ np += size;
+ space -= size;
+ break;
+ }
+ } while (next_key && (indx += 2) < NUM_ENT(pg));
+
+ cp->indx = indx;
+ cp->dup_len = dup_len;
+ cp->dup_off = dup_off;
+ cp->dup_tlen = dup_tlen;
+
+ /* If we are off the page then try to the next page. */
+ if (ret == 0 && next_key && indx >= NUM_ENT(pg)) {
+ if ((ret = __ham_item_next(dbc, lock_mode, &pgno)) == 0)
+ goto next_pg;
+ if (ret != DB_NOTFOUND)
+ return (ret);
+ if ((ret = mpf->put(dbc->dbp->mpf, cp->page, 0)) != 0)
+ return (ret);
+ cp->page = NULL;
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ return (ret);
+
+ cp->bucket++;
+ if (cp->bucket > cp->hdr->max_bucket) {
+ /*
+ * Restore cursor to its previous state. We're past
+ * the last item in the last bucket, so the next
+ * DBC->c_get(DB_NEXT) will return DB_NOTFOUND.
+ */
+ cp->bucket--;
+ ret = DB_NOTFOUND;
+ } else {
+ /*
+ * Start on the next bucket.
+ *
+ * Note that if this new bucket happens to be empty,
+ * but there's another non-empty bucket after it,
+ * we'll return early. This is a rare case, and we
+ * don't guarantee any particular number of keys
+ * returned on each call, so just let the next call
+ * to bulk get move forward by yet another bucket.
+ */
+ cp->pgno = BUCKET_TO_PAGE(cp, cp->bucket);
+ cp->indx = NDX_INVALID;
+ F_CLR(cp, H_ISDUP);
+ ret = __ham_item_next(dbc, lock_mode, &pgno);
+ }
+
+ if ((t_ret = __ham_release_meta(dbc)) != 0)
+ return (t_ret);
+ if (ret == 0)
+ goto next_pg;
+ if (ret != DB_NOTFOUND)
+ return (ret);
+ }
+ *offp = (u_int32_t) -1;
+ return (0);
+}
+
+static int
+__ham_c_put(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key;
+ DBT *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ DBT tmp_val, *myval;
+ HASH_CURSOR *hcp;
+ u_int32_t nbytes;
+ int ret, t_ret;
+
+ /*
+ * The compiler doesn't realize that we only use this when ret is
+ * equal to 0 and that if ret is equal to 0, that we must have set
+ * myval. So, we initialize it here to shut the compiler up.
+ */
+ COMPQUIET(myval, NULL);
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if (F_ISSET(hcp, H_DELETED) &&
+ flags != DB_KEYFIRST && flags != DB_KEYLAST)
+ return (DB_NOTFOUND);
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto err1;
+
+ switch (flags) {
+ case DB_KEYLAST:
+ case DB_KEYFIRST:
+ case DB_NODUPDATA:
+ nbytes = (ISBIG(hcp, key->size) ? HOFFPAGE_PSIZE :
+ HKEYDATA_PSIZE(key->size)) +
+ (ISBIG(hcp, data->size) ? HOFFPAGE_PSIZE :
+ HKEYDATA_PSIZE(data->size));
+ if ((ret = __ham_lookup(dbc,
+ key, nbytes, DB_LOCK_WRITE, pgnop)) == DB_NOTFOUND) {
+ ret = 0;
+ if (hcp->seek_found_page != PGNO_INVALID &&
+ hcp->seek_found_page != hcp->pgno) {
+ if ((ret = mpf->put(mpf, hcp->page, 0)) != 0)
+ goto err2;
+ hcp->page = NULL;
+ hcp->pgno = hcp->seek_found_page;
+ hcp->indx = NDX_INVALID;
+ }
+
+ if (F_ISSET(data, DB_DBT_PARTIAL) && data->doff != 0) {
+ /*
+ * A partial put, but the key does not exist
+ * and we are not beginning the write at 0.
+ * We must create a data item padded up to doff
+ * and then write the new bytes represented by
+ * val.
+ */
+ if ((ret = __ham_init_dbt(dbp->dbenv, &tmp_val,
+ data->size + data->doff,
+ &dbc->my_rdata.data,
+ &dbc->my_rdata.ulen)) == 0) {
+ memset(tmp_val.data, 0, data->doff);
+ memcpy((u_int8_t *)tmp_val.data +
+ data->doff, data->data, data->size);
+ myval = &tmp_val;
+ }
+ } else
+ myval = (DBT *)data;
+
+ if (ret == 0)
+ ret = __ham_add_el(dbc, key, myval, H_KEYDATA);
+ goto done;
+ }
+ break;
+ case DB_BEFORE:
+ case DB_AFTER:
+ case DB_CURRENT:
+ ret = __ham_item(dbc, DB_LOCK_WRITE, pgnop);
+ break;
+ }
+
+ if (*pgnop == PGNO_INVALID && ret == 0) {
+ if (flags == DB_CURRENT ||
+ ((flags == DB_KEYFIRST ||
+ flags == DB_KEYLAST || flags == DB_NODUPDATA) &&
+ !(F_ISSET(dbp, DB_AM_DUP) || F_ISSET(key, DB_DBT_DUPOK))))
+ ret = __ham_overwrite(dbc, data, flags);
+ else
+ ret = __ham_add_dup(dbc, data, flags, pgnop);
+ }
+
+done: if (ret == 0 && F_ISSET(hcp, H_EXPAND)) {
+ ret = __ham_expand_table(dbc);
+ F_CLR(hcp, H_EXPAND);
+ }
+
+ if (hcp->page != NULL &&
+ (t_ret = mpf->set(mpf, hcp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+
+err2: if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+err1: return (ret);
+}
+
+/********************************* UTILITIES ************************/
+
+/*
+ * __ham_expand_table --
+ */
+static int
+__ham_expand_table(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DB_LOCK metalock;
+ DB_LSN lsn;
+ DB_MPOOLFILE *mpf;
+ DBMETA *mmeta;
+ HASH_CURSOR *hcp;
+ PAGE *h;
+ db_pgno_t pgno, mpgno;
+ u_int32_t newalloc, new_bucket, old_bucket;
+ int dirty_meta, got_meta, logn, new_double, ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if ((ret = __ham_dirty_meta(dbc)) != 0)
+ return (ret);
+
+ LOCK_INIT(metalock);
+ mmeta = (DBMETA *) hcp->hdr;
+ mpgno = mmeta->pgno;
+ h = NULL;
+ dirty_meta = 0;
+ got_meta = 0;
+ newalloc = 0;
+
+ /*
+ * If the split point is about to increase, make sure that we
+ * have enough extra pages. The calculation here is weird.
+ * We'd like to do this after we've upped max_bucket, but it's
+ * too late then because we've logged the meta-data split. What
+ * we'll do between then and now is increment max bucket and then
+ * see what the log of one greater than that is; here we have to
+ * look at the log of max + 2. VERY NASTY STUFF.
+ *
+ * We figure out what we need to do, then we log it, then request
+ * the pages from mpool. We don't want to fail after extending
+ * the file.
+ *
+ * If the page we are about to split into has already been allocated,
+ * then we simply need to get it to get its LSN. If it hasn't yet
+ * been allocated, then we know it's LSN (0,0).
+ */
+
+ new_bucket = hcp->hdr->max_bucket + 1;
+ old_bucket = new_bucket & hcp->hdr->low_mask;
+
+ new_double = hcp->hdr->max_bucket == hcp->hdr->high_mask;
+ logn = __db_log2(new_bucket);
+
+ if (!new_double || hcp->hdr->spares[logn + 1] != PGNO_INVALID) {
+ /* Page exists; get it so we can get its LSN */
+ pgno = BUCKET_TO_PAGE(hcp, new_bucket);
+ if ((ret =
+ mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &h)) != 0)
+ goto err;
+ lsn = h->lsn;
+ } else {
+ /* Get the master meta-data page to do allocation. */
+ if (F_ISSET(dbp, DB_AM_SUBDB)) {
+ mpgno = PGNO_BASE_MD;
+ if ((ret = __db_lget(dbc,
+ 0, mpgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ goto err;
+ if ((ret =
+ mpf->get(mpf, &mpgno, 0, (PAGE **)&mmeta)) != 0)
+ goto err;
+ got_meta = 1;
+ }
+ pgno = mmeta->last_pgno + 1;
+ ZERO_LSN(lsn);
+ newalloc = 1;
+ }
+
+ /* Log the meta-data split first. */
+ if (DBC_LOGGING(dbc)) {
+ /*
+ * We always log the page number of the first page of
+ * the allocation group. However, the LSN that we log
+ * is either the LSN on the first page (if we did not
+ * do the actual allocation here) or the LSN on the last
+ * page of the unit (if we did do the allocation here).
+ */
+ if ((ret = __ham_metagroup_log(dbp, dbc->txn,
+ &lsn, 0, hcp->hdr->max_bucket, mpgno, &mmeta->lsn,
+ hcp->hdr->dbmeta.pgno, &hcp->hdr->dbmeta.lsn,
+ pgno, &lsn, newalloc)) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(lsn);
+
+ hcp->hdr->dbmeta.lsn = lsn;
+
+ if (new_double && hcp->hdr->spares[logn + 1] == PGNO_INVALID) {
+ /*
+ * We need to begin a new doubling and we have not allocated
+ * any pages yet. Read the last page in and initialize it to
+ * make the allocation contiguous. The pgno we calculated
+ * above is the first page allocated. The entry in spares is
+ * that page number minus any buckets already allocated (it
+ * simplifies bucket to page transaction). After we've set
+ * that, we calculate the last pgno.
+ */
+
+ hcp->hdr->spares[logn + 1] = pgno - new_bucket;
+ pgno += hcp->hdr->max_bucket;
+ mmeta->last_pgno = pgno;
+ mmeta->lsn = lsn;
+ dirty_meta = DB_MPOOL_DIRTY;
+
+ if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &h)) != 0)
+ goto err;
+
+ P_INIT(h, dbp->pgsize,
+ pgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+ }
+
+ /* Write out whatever page we ended up modifying. */
+ h->lsn = lsn;
+ if ((ret = mpf->put(mpf, h, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ h = NULL;
+
+ /*
+ * Update the meta-data page of this hash database.
+ */
+ hcp->hdr->max_bucket = new_bucket;
+ if (new_double) {
+ hcp->hdr->low_mask = hcp->hdr->high_mask;
+ hcp->hdr->high_mask = new_bucket | hcp->hdr->low_mask;
+ }
+
+ /* Relocate records to the new bucket */
+ ret = __ham_split_page(dbc, old_bucket, new_bucket);
+
+err: if (got_meta)
+ (void)mpf->put(mpf, mmeta, dirty_meta);
+
+ if (LOCK_ISSET(metalock))
+ (void)__TLPUT(dbc, metalock);
+
+ if (h != NULL)
+ (void)mpf->put(mpf, h, 0);
+
+ return (ret);
+}
+
+/*
+ * PUBLIC: u_int32_t __ham_call_hash __P((DBC *, u_int8_t *, int32_t));
+ */
+u_int32_t
+__ham_call_hash(dbc, k, len)
+ DBC *dbc;
+ u_int8_t *k;
+ int32_t len;
+{
+ DB *dbp;
+ u_int32_t n, bucket;
+ HASH_CURSOR *hcp;
+ HASH *hashp;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ hashp = dbp->h_internal;
+
+ n = (u_int32_t)(hashp->h_hash(dbp, k, len));
+
+ bucket = n & hcp->hdr->high_mask;
+ if (bucket > hcp->hdr->max_bucket)
+ bucket = bucket & hcp->hdr->low_mask;
+ return (bucket);
+}
+
+/*
+ * Check for duplicates, and call __db_ret appropriately. Release
+ * everything held by the cursor.
+ */
+static int
+__ham_dup_return(dbc, val, flags)
+ DBC *dbc;
+ DBT *val;
+ u_int32_t flags;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ PAGE *pp;
+ DBT *myval, tmp_val;
+ db_indx_t ndx;
+ db_pgno_t pgno;
+ u_int32_t off, tlen;
+ u_int8_t *hk, type;
+ int cmp, ret;
+ db_indx_t len;
+
+ /* Check for duplicate and return the first one. */
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ ndx = H_DATAINDEX(hcp->indx);
+ type = HPAGE_TYPE(dbp, hcp->page, ndx);
+ pp = hcp->page;
+ myval = val;
+
+ /*
+ * There are 4 cases:
+ * 1. We are not in duplicate, simply return; the upper layer
+ * will do the right thing.
+ * 2. We are looking at keys and stumbled onto a duplicate.
+ * 3. We are in the middle of a duplicate set. (ISDUP set)
+ * 4. We need to check for particular data match.
+ */
+
+ /* We should never get here with off-page dups. */
+ DB_ASSERT(type != H_OFFDUP);
+
+ /* Case 1 */
+ if (type != H_DUPLICATE && flags != DB_GET_BOTH &&
+ flags != DB_GET_BOTHC && flags != DB_GET_BOTH_RANGE)
+ return (0);
+
+ /*
+ * Here we check for the case where we just stumbled onto a
+ * duplicate. In this case, we do initialization and then
+ * let the normal duplicate code handle it. (Case 2)
+ */
+ if (!F_ISSET(hcp, H_ISDUP) && type == H_DUPLICATE) {
+ F_SET(hcp, H_ISDUP);
+ hcp->dup_tlen = LEN_HDATA(dbp, hcp->page,
+ hcp->hdr->dbmeta.pagesize, hcp->indx);
+ hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
+ if (flags == DB_LAST ||
+ flags == DB_PREV || flags == DB_PREV_NODUP) {
+ hcp->dup_off = 0;
+ do {
+ memcpy(&len,
+ HKEYDATA_DATA(hk) + hcp->dup_off,
+ sizeof(db_indx_t));
+ hcp->dup_off += DUP_SIZE(len);
+ } while (hcp->dup_off < hcp->dup_tlen);
+ hcp->dup_off -= DUP_SIZE(len);
+ } else {
+ memcpy(&len,
+ HKEYDATA_DATA(hk), sizeof(db_indx_t));
+ hcp->dup_off = 0;
+ }
+ hcp->dup_len = len;
+ }
+
+ /*
+ * If we are retrieving a specific key/data pair, then we
+ * may need to adjust the cursor before returning data.
+ * Case 4
+ */
+ if (flags == DB_GET_BOTH ||
+ flags == DB_GET_BOTHC || flags == DB_GET_BOTH_RANGE) {
+ if (F_ISSET(hcp, H_ISDUP)) {
+ /*
+ * If we're doing a join, search forward from the
+ * current position, not the beginning of the dup set.
+ */
+ if (flags == DB_GET_BOTHC)
+ F_SET(hcp, H_CONTINUE);
+
+ __ham_dsearch(dbc, val, &off, &cmp, flags);
+
+ /*
+ * This flag is set nowhere else and is safe to
+ * clear unconditionally.
+ */
+ F_CLR(hcp, H_CONTINUE);
+ hcp->dup_off = off;
+ } else {
+ hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
+ if (((HKEYDATA *)hk)->type == H_OFFPAGE) {
+ memcpy(&tlen,
+ HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
+ memcpy(&pgno,
+ HOFFPAGE_PGNO(hk), sizeof(db_pgno_t));
+ if ((ret = __db_moff(dbp, val,
+ pgno, tlen, dbp->dup_compare, &cmp)) != 0)
+ return (ret);
+ } else {
+ /*
+ * We do not zero tmp_val since the comparison
+ * routines may only look at data and size.
+ */
+ tmp_val.data = HKEYDATA_DATA(hk);
+ tmp_val.size = LEN_HDATA(dbp, hcp->page,
+ dbp->pgsize, hcp->indx);
+ cmp = dbp->dup_compare == NULL ?
+ __bam_defcmp(dbp, &tmp_val, val) :
+ dbp->dup_compare(dbp, &tmp_val, val);
+ }
+ }
+
+ if (cmp != 0)
+ return (DB_NOTFOUND);
+ }
+
+ /*
+ * If we're doing a bulk get, we don't want to actually return
+ * the data: __ham_bulk will take care of cracking out the
+ * duplicates appropriately.
+ *
+ * The rest of this function calculates partial offsets and
+ * handles the actual __db_ret, so just return if
+ * DB_MULTIPLE(_KEY) is set.
+ */
+ if (F_ISSET(dbc, DBC_MULTIPLE | DBC_MULTIPLE_KEY))
+ return (0);
+
+ /*
+ * Now, everything is initialized, grab a duplicate if
+ * necessary.
+ */
+ if (F_ISSET(hcp, H_ISDUP)) { /* Case 3 */
+ /*
+ * Copy the DBT in case we are retrieving into user
+ * memory and we need the parameters for it. If the
+ * user requested a partial, then we need to adjust
+ * the user's parameters to get the partial of the
+ * duplicate which is itself a partial.
+ */
+ memcpy(&tmp_val, val, sizeof(*val));
+ if (F_ISSET(&tmp_val, DB_DBT_PARTIAL)) {
+ /*
+ * Take the user's length unless it would go
+ * beyond the end of the duplicate.
+ */
+ if (tmp_val.doff + hcp->dup_off > hcp->dup_len)
+ tmp_val.dlen = 0;
+ else if (tmp_val.dlen + tmp_val.doff >
+ hcp->dup_len)
+ tmp_val.dlen =
+ hcp->dup_len - tmp_val.doff;
+
+ /*
+ * Calculate the new offset.
+ */
+ tmp_val.doff += hcp->dup_off;
+ } else {
+ F_SET(&tmp_val, DB_DBT_PARTIAL);
+ tmp_val.dlen = hcp->dup_len;
+ tmp_val.doff = hcp->dup_off + sizeof(db_indx_t);
+ }
+ myval = &tmp_val;
+ }
+
+ /*
+ * Finally, if we had a duplicate, pp, ndx, and myval should be
+ * set appropriately.
+ */
+ if ((ret = __db_ret(dbp, pp, ndx, myval, &dbc->rdata->data,
+ &dbc->rdata->ulen)) != 0)
+ return (ret);
+
+ /*
+ * In case we sent a temporary off to db_ret, set the real
+ * return values.
+ */
+ val->data = myval->data;
+ val->size = myval->size;
+
+ F_SET(val, DB_DBT_ISSET);
+
+ return (0);
+}
+
+static int
+__ham_overwrite(dbc, nval, flags)
+ DBC *dbc;
+ DBT *nval;
+ u_int32_t flags;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ HASH_CURSOR *hcp;
+ DBT *myval, tmp_val, tmp_val2;
+ void *newrec;
+ u_int8_t *hk, *p;
+ u_int32_t len, nondup_size;
+ db_indx_t newsize;
+ int ret;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if (F_ISSET(hcp, H_ISDUP)) {
+ /*
+ * This is an overwrite of a duplicate. We should never
+ * be off-page at this point.
+ */
+ DB_ASSERT(hcp->opd == NULL);
+ /* On page dups */
+ if (F_ISSET(nval, DB_DBT_PARTIAL)) {
+ /*
+ * We're going to have to get the current item, then
+ * construct the record, do any padding and do a
+ * replace.
+ */
+ memset(&tmp_val, 0, sizeof(tmp_val));
+ if ((ret =
+ __ham_dup_return(dbc, &tmp_val, DB_CURRENT)) != 0)
+ return (ret);
+
+ /* Figure out new size. */
+ nondup_size = tmp_val.size;
+ newsize = nondup_size;
+
+ /*
+ * Three cases:
+ * 1. strictly append (may need to allocate space
+ * for pad bytes; really gross).
+ * 2. overwrite some and append.
+ * 3. strictly overwrite.
+ */
+ if (nval->doff > nondup_size)
+ newsize +=
+ (nval->doff - nondup_size + nval->size);
+ else if (nval->doff + nval->dlen > nondup_size)
+ newsize += nval->size -
+ (nondup_size - nval->doff);
+ else
+ newsize += nval->size - nval->dlen;
+
+ /*
+ * Make sure that the new size doesn't put us over
+ * the onpage duplicate size in which case we need
+ * to convert to off-page duplicates.
+ */
+ if (ISBIG(hcp, hcp->dup_tlen - nondup_size + newsize)) {
+ if ((ret = __ham_dup_convert(dbc)) != 0)
+ return (ret);
+ return (hcp->opd->c_am_put(hcp->opd,
+ NULL, nval, flags, NULL));
+ }
+
+ if ((ret = __os_malloc(dbp->dbenv,
+ DUP_SIZE(newsize), &newrec)) != 0)
+ return (ret);
+ memset(&tmp_val2, 0, sizeof(tmp_val2));
+ F_SET(&tmp_val2, DB_DBT_PARTIAL);
+
+ /* Construct the record. */
+ p = newrec;
+ /* Initial size. */
+ memcpy(p, &newsize, sizeof(db_indx_t));
+ p += sizeof(db_indx_t);
+
+ /* First part of original record. */
+ len = nval->doff > tmp_val.size
+ ? tmp_val.size : nval->doff;
+ memcpy(p, tmp_val.data, len);
+ p += len;
+
+ if (nval->doff > tmp_val.size) {
+ /* Padding */
+ memset(p, 0, nval->doff - tmp_val.size);
+ p += nval->doff - tmp_val.size;
+ }
+
+ /* New bytes */
+ memcpy(p, nval->data, nval->size);
+ p += nval->size;
+
+ /* End of original record (if there is any) */
+ if (nval->doff + nval->dlen < tmp_val.size) {
+ len = tmp_val.size - nval->doff - nval->dlen;
+ memcpy(p, (u_int8_t *)tmp_val.data +
+ nval->doff + nval->dlen, len);
+ p += len;
+ }
+
+ /* Final size. */
+ memcpy(p, &newsize, sizeof(db_indx_t));
+
+ /*
+ * Make sure that the caller isn't corrupting
+ * the sort order.
+ */
+ if (dbp->dup_compare != NULL) {
+ tmp_val2.data =
+ (u_int8_t *)newrec + sizeof(db_indx_t);
+ tmp_val2.size = newsize;
+ if (dbp->dup_compare(
+ dbp, &tmp_val, &tmp_val2) != 0) {
+ (void)__os_free(dbenv, newrec);
+ return (__db_duperr(dbp, flags));
+ }
+ }
+
+ tmp_val2.data = newrec;
+ tmp_val2.size = DUP_SIZE(newsize);
+ tmp_val2.doff = hcp->dup_off;
+ tmp_val2.dlen = DUP_SIZE(hcp->dup_len);
+
+ ret = __ham_replpair(dbc, &tmp_val2, 0);
+ (void)__os_free(dbenv, newrec);
+
+ /* Update cursor */
+ if (ret != 0)
+ return (ret);
+
+ if (newsize > nondup_size)
+ hcp->dup_tlen += (newsize - nondup_size);
+ else
+ hcp->dup_tlen -= (nondup_size - newsize);
+ hcp->dup_len = DUP_SIZE(newsize);
+ return (0);
+ } else {
+ /* Check whether we need to convert to off page. */
+ if (ISBIG(hcp,
+ hcp->dup_tlen - hcp->dup_len + nval->size)) {
+ if ((ret = __ham_dup_convert(dbc)) != 0)
+ return (ret);
+ return (hcp->opd->c_am_put(hcp->opd,
+ NULL, nval, flags, NULL));
+ }
+
+ /* Make sure we maintain sort order. */
+ if (dbp->dup_compare != NULL) {
+ tmp_val2.data =
+ HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page,
+ hcp->indx)) + hcp->dup_off +
+ sizeof(db_indx_t);
+ tmp_val2.size = hcp->dup_len;
+ if (dbp->dup_compare(dbp, nval, &tmp_val2) != 0)
+ return (EINVAL);
+ }
+ /* Overwriting a complete duplicate. */
+ if ((ret =
+ __ham_make_dup(dbp->dbenv, nval, &tmp_val,
+ &dbc->my_rdata.data, &dbc->my_rdata.ulen)) != 0)
+ return (ret);
+ /* Now fix what we are replacing. */
+ tmp_val.doff = hcp->dup_off;
+ tmp_val.dlen = DUP_SIZE(hcp->dup_len);
+
+ /* Update cursor */
+ if (nval->size > hcp->dup_len)
+ hcp->dup_tlen += (nval->size - hcp->dup_len);
+ else
+ hcp->dup_tlen -= (hcp->dup_len - nval->size);
+ hcp->dup_len = (db_indx_t)DUP_SIZE(nval->size);
+ }
+ myval = &tmp_val;
+ } else if (!F_ISSET(nval, DB_DBT_PARTIAL)) {
+ /* Put/overwrite */
+ memcpy(&tmp_val, nval, sizeof(*nval));
+ F_SET(&tmp_val, DB_DBT_PARTIAL);
+ tmp_val.doff = 0;
+ hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
+ if (HPAGE_PTYPE(hk) == H_OFFPAGE)
+ memcpy(&tmp_val.dlen,
+ HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
+ else
+ tmp_val.dlen = LEN_HDATA(dbp, hcp->page,
+ hcp->hdr->dbmeta.pagesize, hcp->indx);
+ myval = &tmp_val;
+ } else
+ /* Regular partial put */
+ myval = nval;
+
+ return (__ham_replpair(dbc, myval, 0));
+}
+
+/*
+ * Given a key and a cursor, sets the cursor to the page/ndx on which
+ * the key resides. If the key is found, the cursor H_OK flag is set
+ * and the pagep, bndx, pgno (dpagep, dndx, dpgno) fields are set.
+ * If the key is not found, the H_OK flag is not set. If the sought
+ * field is non-0, the pagep, bndx, pgno (dpagep, dndx, dpgno) fields
+ * are set indicating where an add might take place. If it is 0,
+ * non of the cursor pointer field are valid.
+ */
+static int
+__ham_lookup(dbc, key, sought, mode, pgnop)
+ DBC *dbc;
+ const DBT *key;
+ u_int32_t sought;
+ db_lockmode_t mode;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ db_pgno_t pgno;
+ u_int32_t tlen;
+ int match, ret;
+ u_int8_t *hk, *dk;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ /*
+ * Set up cursor so that we're looking for space to add an item
+ * as we cycle through the pages looking for the key.
+ */
+ if ((ret = __ham_item_reset(dbc)) != 0)
+ return (ret);
+ hcp->seek_size = sought;
+
+ hcp->bucket = __ham_call_hash(dbc, (u_int8_t *)key->data, key->size);
+ hcp->pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+
+ for (;;) {
+ *pgnop = PGNO_INVALID;
+ if ((ret = __ham_item_next(dbc, mode, pgnop)) != 0)
+ return (ret);
+
+ if (F_ISSET(hcp, H_NOMORE))
+ break;
+
+ hk = H_PAIRKEY(dbp, hcp->page, hcp->indx);
+ switch (HPAGE_PTYPE(hk)) {
+ case H_OFFPAGE:
+ memcpy(&tlen, HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
+ if (tlen == key->size) {
+ memcpy(&pgno,
+ HOFFPAGE_PGNO(hk), sizeof(db_pgno_t));
+ if ((ret = __db_moff(dbp,
+ key, pgno, tlen, NULL, &match)) != 0)
+ return (ret);
+ if (match == 0)
+ goto found_key;
+ }
+ break;
+ case H_KEYDATA:
+ if (key->size ==
+ LEN_HKEY(dbp, hcp->page, dbp->pgsize, hcp->indx) &&
+ memcmp(key->data,
+ HKEYDATA_DATA(hk), key->size) == 0) {
+ /* Found the key, check for data type. */
+found_key: F_SET(hcp, H_OK);
+ dk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
+ if (HPAGE_PTYPE(dk) == H_OFFDUP)
+ memcpy(pgnop, HOFFDUP_PGNO(dk),
+ sizeof(db_pgno_t));
+ return (0);
+ }
+ break;
+ case H_DUPLICATE:
+ case H_OFFDUP:
+ /*
+ * These are errors because keys are never
+ * duplicated, only data items are.
+ */
+ return (__db_pgfmt(dbp->dbenv, PGNO(hcp->page)));
+ }
+ }
+
+ /*
+ * Item was not found.
+ */
+
+ if (sought != 0)
+ return (ret);
+
+ return (ret);
+}
+
+/*
+ * __ham_init_dbt --
+ * Initialize a dbt using some possibly already allocated storage
+ * for items.
+ *
+ * PUBLIC: int __ham_init_dbt __P((DB_ENV *,
+ * PUBLIC: DBT *, u_int32_t, void **, u_int32_t *));
+ */
+int
+__ham_init_dbt(dbenv, dbt, size, bufp, sizep)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ u_int32_t size;
+ void **bufp;
+ u_int32_t *sizep;
+{
+ int ret;
+
+ memset(dbt, 0, sizeof(*dbt));
+ if (*sizep < size) {
+ if ((ret = __os_realloc(dbenv, size, bufp)) != 0) {
+ *sizep = 0;
+ return (ret);
+ }
+ *sizep = size;
+ }
+ dbt->data = *bufp;
+ dbt->size = size;
+ return (0);
+}
+
+/*
+ * Adjust the cursor after an insert or delete. The cursor passed is
+ * the one that was operated upon; we just need to check any of the
+ * others.
+ *
+ * len indicates the length of the item added/deleted
+ * add indicates if the item indicated by the cursor has just been
+ * added (add == 1) or deleted (add == 0).
+ * dup indicates if the addition occurred into a duplicate set.
+ *
+ * PUBLIC: int __ham_c_update
+ * PUBLIC: __P((DBC *, u_int32_t, int, int));
+ */
+int
+__ham_c_update(dbc, len, add, is_dup)
+ DBC *dbc;
+ u_int32_t len;
+ int add, is_dup;
+{
+ DB *dbp, *ldbp;
+ DBC *cp;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ HASH_CURSOR *hcp, *lcp;
+ int found, ret;
+ u_int32_t order;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ /*
+ * Adjustment will only be logged if this is a subtransaction.
+ * Only subtransactions can abort and effect their parent
+ * transactions cursors.
+ */
+
+ my_txn = IS_SUBTRANSACTION(dbc->txn) ? dbc->txn : NULL;
+ found = 0;
+
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+
+ /*
+ * Calculate the order of this deleted record.
+ * This will be one greater than any cursor that is pointing
+ * at this record and already marked as deleted.
+ */
+ order = 0;
+ if (!add) {
+ order = 1;
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links)) {
+ if (cp == dbc || cp->dbtype != DB_HASH)
+ continue;
+ lcp = (HASH_CURSOR *)cp->internal;
+ if (F_ISSET(lcp, H_DELETED) &&
+ hcp->pgno == lcp->pgno &&
+ hcp->indx == lcp->indx &&
+ order <= lcp->order &&
+ (!is_dup || hcp->dup_off == lcp->dup_off))
+ order = lcp->order + 1;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ hcp->order = order;
+ }
+
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links)) {
+ if (cp == dbc || cp->dbtype != DB_HASH)
+ continue;
+
+ lcp = (HASH_CURSOR *)cp->internal;
+
+ if (lcp->pgno != hcp->pgno || lcp->indx == NDX_INVALID)
+ continue;
+
+ if (my_txn != NULL && cp->txn != my_txn)
+ found = 1;
+
+ if (!is_dup) {
+ if (add) {
+ /*
+ * This routine is not called to add
+ * non-dup records which are always put
+ * at the end. It is only called from
+ * recovery in this case and the
+ * cursor will be marked deleted.
+ * We are "undeleting" so unmark all
+ * cursors with the same order.
+ */
+ if (lcp->indx == hcp->indx &&
+ F_ISSET(lcp, H_DELETED)) {
+ if (lcp->order == hcp->order)
+ F_CLR(lcp, H_DELETED);
+ else if (lcp->order >
+ hcp->order) {
+
+ /*
+ * If we've moved this cursor's
+ * index, split its order
+ * number--i.e., decrement it by
+ * enough so that the lowest
+ * cursor moved has order 1.
+ * cp_arg->order is the split
+ * point, so decrement by one
+ * less than that.
+ */
+ lcp->order -=
+ (hcp->order - 1);
+ lcp->indx += 2;
+ }
+ } else if (lcp->indx >= hcp->indx)
+ lcp->indx += 2;
+
+ } else {
+ if (lcp->indx > hcp->indx) {
+ lcp->indx -= 2;
+ if (lcp->indx == hcp->indx &&
+ F_ISSET(lcp, H_DELETED))
+ lcp->order += order;
+ } else if (lcp->indx == hcp->indx &&
+ !F_ISSET(lcp, H_DELETED)) {
+ F_SET(lcp, H_DELETED);
+ F_CLR(lcp, H_ISDUP);
+ lcp->order = order;
+ }
+ }
+ } else if (lcp->indx == hcp->indx) {
+ /*
+ * Handle duplicates. This routine is
+ * only called for on page dups.
+ * Off page dups are handled by btree/rtree
+ * code.
+ */
+ if (add) {
+ lcp->dup_tlen += len;
+ if (lcp->dup_off == hcp->dup_off &&
+ F_ISSET(hcp, H_DELETED) &&
+ F_ISSET(lcp, H_DELETED)) {
+ /* Abort of a delete. */
+ if (lcp->order == hcp->order)
+ F_CLR(lcp, H_DELETED);
+ else if (lcp->order >
+ hcp->order) {
+ lcp->order -=
+ (hcp->order -1);
+ lcp->dup_off += len;
+ }
+ } else if (lcp->dup_off >= hcp->dup_off)
+ lcp->dup_off += len;
+ } else {
+ lcp->dup_tlen -= len;
+ if (lcp->dup_off > hcp->dup_off) {
+ lcp->dup_off -= len;
+ if (lcp->dup_off ==
+ hcp->dup_off &&
+ F_ISSET(lcp, H_DELETED))
+ lcp->order += order;
+ } else if (lcp->dup_off ==
+ hcp->dup_off &&
+ !F_ISSET(lcp, H_DELETED)) {
+ F_SET(lcp, H_DELETED);
+ lcp->order = order;
+ }
+ }
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DBC_LOGGING(dbc)) {
+ if ((ret = __ham_curadj_log(dbp, my_txn, &lsn, 0, hcp->pgno,
+ hcp->indx, len, hcp->dup_off, add, is_dup, order)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * __ham_get_clist --
+ *
+ * Get a list of cursors either on a particular bucket or on a particular
+ * page and index combination. The former is so that we can update
+ * cursors on a split. The latter is so we can update cursors when we
+ * move items off page.
+ *
+ * PUBLIC: int __ham_get_clist __P((DB *, db_pgno_t, u_int32_t, DBC ***));
+ */
+int
+__ham_get_clist(dbp, pgno, indx, listp)
+ DB *dbp;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ DBC ***listp;
+{
+ DB *ldbp;
+ DBC *cp;
+ DB_ENV *dbenv;
+ int nalloc, nused, ret;
+
+ /*
+ * Assume that finding anything is the exception, so optimize for
+ * the case where there aren't any.
+ */
+ nalloc = nused = 0;
+ *listp = NULL;
+ dbenv = dbp->dbenv;
+
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links))
+ /*
+ * We match if cp->pgno matches the specified
+ * pgno, and if either the cp->indx matches
+ * or we weren't given an index.
+ */
+ if (cp->internal->pgno == pgno &&
+ (indx == NDX_INVALID ||
+ cp->internal->indx == indx)) {
+ if (nused >= nalloc) {
+ nalloc += 10;
+ if ((ret = __os_realloc(dbp->dbenv,
+ nalloc * sizeof(HASH_CURSOR *),
+ listp)) != 0)
+ goto err;
+ }
+ (*listp)[nused++] = cp;
+ }
+
+ MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (listp != NULL) {
+ if (nused >= nalloc) {
+ nalloc++;
+ if ((ret = __os_realloc(dbp->dbenv,
+ nalloc * sizeof(HASH_CURSOR *), listp)) != 0)
+ return (ret);
+ }
+ (*listp)[nused] = NULL;
+ }
+ return (0);
+err:
+ MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+ return (ret);
+}
+
+static int
+__ham_c_writelock(dbc)
+ DBC *dbc;
+{
+ DB_ENV *dbenv;
+ DB_LOCK tmp_lock;
+ HASH_CURSOR *hcp;
+ int ret;
+
+ /*
+ * All we need do is acquire the lock and let the off-page
+ * dup tree do its thing.
+ */
+ if (!STD_LOCKING(dbc))
+ return (0);
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if ((!LOCK_ISSET(hcp->lock) || hcp->lock_mode == DB_LOCK_READ)) {
+ tmp_lock = hcp->lock;
+ if ((ret = __ham_lock_bucket(dbc, DB_LOCK_WRITE)) != 0)
+ return (ret);
+ dbenv = dbc->dbp->dbenv;
+ if (LOCK_ISSET(tmp_lock) &&
+ (ret = dbenv->lock_put(dbenv, &tmp_lock)) != 0)
+ return (ret);
+ }
+ return (0);
+}
diff --git a/libdb/hash/hash.src b/libdb/hash/hash.src
new file mode 100644
index 0000000..b683235
--- /dev/null
+++ b/libdb/hash/hash.src
@@ -0,0 +1,266 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+PREFIX __ham
+DBPRIVATE
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/hash.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/txn.h"
+INCLUDE
+
+/*
+ * HASH-insdel: used for hash to insert/delete a pair of entries onto a master
+ * page. The pair might be regular key/data pairs or they might be the
+ * structures that refer to off page items, duplicates or offpage duplicates.
+ * opcode - PUTPAIR/DELPAIR + big masks
+ * fileid - identifies the file referenced
+ * pgno - page within file
+ * ndx - index on the page of the item being added (item index)
+ * pagelsn - lsn on the page before the update
+ * key - the key being inserted
+ * data - the data being inserted
+ */
+BEGIN insdel 21
+ARG opcode u_int32_t lu
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+ARG ndx u_int32_t lu
+POINTER pagelsn DB_LSN * lu
+DBT key DBT s
+DBT data DBT s
+END
+
+/*
+ * Used to add and remove overflow pages.
+ * prev_pgno is the previous page that is going to get modified to
+ * point to this one. If this is the first page in a chain
+ * then prev_pgno should be PGNO_INVALID.
+ * new_pgno is the page being allocated.
+ * next_pgno is the page that follows this one. On allocation,
+ * this should be PGNO_INVALID. For deletes, it may exist.
+ * pagelsn is the old lsn on the page.
+ */
+BEGIN newpage 22
+ARG opcode u_int32_t lu
+DB fileid int32_t ld
+WRLOCKNZ prev_pgno db_pgno_t lu
+POINTER prevlsn DB_LSN * lu
+WRLOCKNZ new_pgno db_pgno_t lu
+POINTER pagelsn DB_LSN * lu
+WRLOCKNZ next_pgno db_pgno_t lu
+POINTER nextlsn DB_LSN * lu
+END
+
+/*
+ * Splitting requires two types of log messages. The second logs the
+ * data on the original page. To redo the split, we have to visit the
+ * new page (pages) and add the items back on the page if they are not
+ * yet there.
+ */
+BEGIN splitdata 24
+DB fileid int32_t ld
+ARG opcode u_int32_t lu
+WRLOCK pgno db_pgno_t lu
+PGDBT pageimage DBT s
+POINTER pagelsn DB_LSN * lu
+END
+
+/*
+ * HASH-replace: is used for hash to handle partial puts that only
+ * affect a single master page.
+ * fileid - identifies the file referenced
+ * pgno - page within file
+ * ndx - index on the page of the item being modified (item index)
+ * pagelsn - lsn on the page before the update
+ * off - offset in the old item where the new item is going.
+ * olditem - DBT that describes the part of the item being replaced.
+ * newitem - DBT of the new item.
+ * makedup - this was a replacement that made an item a duplicate.
+ */
+BEGIN replace 25
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+ARG ndx u_int32_t lu
+POINTER pagelsn DB_LSN * lu
+ARG off int32_t ld
+DBT olditem DBT s
+DBT newitem DBT s
+ARG makedup u_int32_t lu
+END
+
+/*
+ * Used when we empty the first page in a bucket and there are pages after
+ * it. The page after it gets copied into the bucket page (since bucket
+ * pages have to be in fixed locations).
+ * pgno: the bucket page
+ * pagelsn: the old LSN on the bucket page
+ * next_pgno: the page number of the next page
+ * nnext_pgno: page after next_pgno (may need to change its prev)
+ * nnextlsn: the LSN of nnext_pgno.
+ */
+BEGIN copypage 28
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+POINTER pagelsn DB_LSN * lu
+WRLOCK next_pgno db_pgno_t lu
+POINTER nextlsn DB_LSN * lu
+WRLOCKNZ nnext_pgno db_pgno_t lu
+POINTER nnextlsn DB_LSN * lu
+PGDBT page DBT s
+END
+
+/*
+ * This record logs the meta-data aspects of a split operation. It has enough
+ * information so that we can record both an individual page allocation as well
+ * as a group allocation which we do because in sub databases, the pages in
+ * a hash doubling, must be contiguous. If we do a group allocation, the
+ * number of pages allocated is bucket + 1, pgno is the page number of the
+ * first newly allocated bucket.
+ *
+ * bucket: Old maximum bucket number.
+ * mmpgno: Master meta-data page number (0 if same as mpgno).
+ * mmetalsn: Lsn of the master meta-data page.
+ * mpgno: Meta-data page number.
+ * metalsn: Lsn of the meta-data page.
+ * pgno: Page allocated to bucket + 1 (first newly allocated page)
+ * pagelsn: Lsn of either the first page allocated (if newalloc == 0) or
+ * the last page allocated (if newalloc == 1).
+ * newalloc: 1 indicates that this record did the actual allocation;
+ * 0 indicates that the pages were already allocated from a
+ * previous (failed) allocation.
+ */
+BEGIN metagroup 29
+DB fileid int32_t ld
+ARG bucket u_int32_t lu
+WRLOCK mmpgno db_pgno_t lu
+POINTER mmetalsn DB_LSN * lu
+WRLOCKNZ mpgno db_pgno_t lu
+POINTER metalsn DB_LSN * lu
+WRLOCK pgno db_pgno_t lu
+POINTER pagelsn DB_LSN * lu
+ARG newalloc u_int32_t lu
+END
+
+/*
+ * groupalloc
+ *
+ * This is used in conjunction with MPOOL_NEW_GROUP when we are creating
+ * a new database to make sure that we recreate or reclaim free pages
+ * when we allocate a chunk of contiguous ones during database creation.
+ *
+ * pgno: meta-data page number
+ * metalsn: meta-data lsn
+ * start_pgno: starting page number
+ * num: number of allocated pages
+ */
+BEGIN groupalloc 32
+DB fileid int32_t ld
+POINTER meta_lsn DB_LSN * lu
+WRLOCK start_pgno db_pgno_t lu
+ARG num u_int32_t lu
+ARG free db_pgno_t lu
+END
+
+/*
+ * Records for backing out cursor adjustment.
+ * curadj - added or deleted a record or a dup
+ * within a record.
+ * pgno - page that was effected
+ * indx - indx of recrod effected.
+ * len - if a dup its length.
+ * dup_off - if a dup its offset
+ * add - 1 if add 0 if delete
+ * is_dup - 1 if dup 0 otherwise.
+ * order - order assigned to this deleted record or dup.
+ *
+ * chgpg - rmoved a page, move the records to a new page
+ * mode - CHGPG page was deleted or records move to new page.
+ * - SPLIT we split a bucket
+ * - DUP we convered to off page duplicates.
+ * old_pgno, new_pgno - old and new page numbers.
+ * old_index, new_index - old and new index numbers, NDX_INVALID if
+ * it effects all records on the page.
+ * For three opcodes new in 3.3 (DB_HAM_DELFIRSTPG, DELMIDPG,
+ * and DELLASTPG), we overload old_indx and new_indx to avoid
+ * needing a new log record type: old_indx stores the only
+ * indx of interest to these records, and new_indx stores the
+ * order that's assigned to the lowest deleted record we're
+ * moving.
+ */
+BEGIN curadj 33
+DB fileid int32_t ld
+ARG pgno db_pgno_t lu
+ARG indx u_int32_t lu
+ARG len u_int32_t lu
+ARG dup_off u_int32_t lu
+ARG add int ld
+ARG is_dup int ld
+ARG order u_int32_t lu
+END
+
+BEGIN chgpg 34
+DB fileid int32_t ld
+ARG mode db_ham_mode ld
+ARG old_pgno db_pgno_t lu
+ARG new_pgno db_pgno_t lu
+ARG old_indx u_int32_t lu
+ARG new_indx u_int32_t lu
+END
+
diff --git a/libdb/hash/hash_auto.c b/libdb/hash/hash_auto.c
new file mode 100644
index 0000000..918797b
--- /dev/null
+++ b/libdb/hash/hash_auto.c
@@ -0,0 +1,2752 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc/db_am.h"
+#include "dbinc/hash.h"
+#include "dbinc/rep.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+/*
+ * PUBLIC: int __ham_insdel_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, u_int32_t, db_pgno_t, u_int32_t, DB_LSN *,
+ * PUBLIC: const DBT *, const DBT *));
+ */
+int
+__ham_insdel_log(dbp, txnid, ret_lsnp, flags,
+ opcode, pgno, ndx, pagelsn, key,
+ data)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t opcode;
+ db_pgno_t pgno;
+ u_int32_t ndx;
+ DB_LSN * pagelsn;
+ const DBT *key;
+ const DBT *data;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___ham_insdel;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(*pagelsn)
+ + sizeof(u_int32_t) + (key == NULL ? 0 : key->size)
+ + sizeof(u_int32_t) + (data == NULL ? 0 : data->size);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ uinttmp = (u_int32_t)opcode;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)ndx;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (pagelsn != NULL)
+ memcpy(bp, pagelsn, sizeof(*pagelsn));
+ else
+ memset(bp, 0, sizeof(*pagelsn));
+ bp += sizeof(*pagelsn);
+
+ if (key == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &key->size, sizeof(key->size));
+ bp += sizeof(key->size);
+ memcpy(bp, key->data, key->size);
+ bp += key->size;
+ }
+
+ if (data == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &data->size, sizeof(data->size));
+ bp += sizeof(data->size);
+ memcpy(bp, data->data, data->size);
+ bp += data->size;
+ }
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__ham_insdel_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __ham_insdel_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__ham_insdel_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __ham_insdel_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __ham_insdel_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __ham_insdel_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__ham_insdel_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_insdel_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_insdel_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__ham_insdel: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\topcode: %lu\n", (u_long)argp->opcode);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tndx: %lu\n", (u_long)argp->ndx);
+ (void)printf("\tpagelsn: [%lu][%lu]\n",
+ (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset);
+ (void)printf("\tkey: ");
+ for (i = 0; i < argp->key.size; i++) {
+ ch = ((u_int8_t *)argp->key.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tdata: ");
+ for (i = 0; i < argp->data.size; i++) {
+ ch = ((u_int8_t *)argp->data.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_insdel_read __P((DB_ENV *, void *, __ham_insdel_args **));
+ */
+int
+__ham_insdel_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_insdel_args **argpp;
+{
+ __ham_insdel_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__ham_insdel_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->opcode = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->ndx = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->pagelsn, bp, sizeof(argp->pagelsn));
+ bp += sizeof(argp->pagelsn);
+
+ memset(&argp->key, 0, sizeof(argp->key));
+ memcpy(&argp->key.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->key.data = bp;
+ bp += argp->key.size;
+
+ memset(&argp->data, 0, sizeof(argp->data));
+ memcpy(&argp->data.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->data.data = bp;
+ bp += argp->data.size;
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_newpage_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *,
+ * PUBLIC: db_pgno_t, DB_LSN *));
+ */
+int
+__ham_newpage_log(dbp, txnid, ret_lsnp, flags,
+ opcode, prev_pgno, prevlsn, new_pgno, pagelsn,
+ next_pgno, nextlsn)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t opcode;
+ db_pgno_t prev_pgno;
+ DB_LSN * prevlsn;
+ db_pgno_t new_pgno;
+ DB_LSN * pagelsn;
+ db_pgno_t next_pgno;
+ DB_LSN * nextlsn;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___ham_newpage;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(*prevlsn)
+ + sizeof(u_int32_t)
+ + sizeof(*pagelsn)
+ + sizeof(u_int32_t)
+ + sizeof(*nextlsn);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ uinttmp = (u_int32_t)opcode;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)prev_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (prevlsn != NULL)
+ memcpy(bp, prevlsn, sizeof(*prevlsn));
+ else
+ memset(bp, 0, sizeof(*prevlsn));
+ bp += sizeof(*prevlsn);
+
+ uinttmp = (u_int32_t)new_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (pagelsn != NULL)
+ memcpy(bp, pagelsn, sizeof(*pagelsn));
+ else
+ memset(bp, 0, sizeof(*pagelsn));
+ bp += sizeof(*pagelsn);
+
+ uinttmp = (u_int32_t)next_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (nextlsn != NULL)
+ memcpy(bp, nextlsn, sizeof(*nextlsn));
+ else
+ memset(bp, 0, sizeof(*nextlsn));
+ bp += sizeof(*nextlsn);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__ham_newpage_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __ham_newpage_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__ham_newpage_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __ham_newpage_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __ham_newpage_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 3)) != 0)
+ goto err;
+
+ if (argp->prev_pgno != PGNO_INVALID) {
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->prev_pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+ }
+ if (argp->new_pgno != PGNO_INVALID) {
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->new_pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+ }
+ if (argp->next_pgno != PGNO_INVALID) {
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->next_pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+ }
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __ham_newpage_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__ham_newpage_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_newpage_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_newpage_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__ham_newpage: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\topcode: %lu\n", (u_long)argp->opcode);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tprev_pgno: %lu\n", (u_long)argp->prev_pgno);
+ (void)printf("\tprevlsn: [%lu][%lu]\n",
+ (u_long)argp->prevlsn.file, (u_long)argp->prevlsn.offset);
+ (void)printf("\tnew_pgno: %lu\n", (u_long)argp->new_pgno);
+ (void)printf("\tpagelsn: [%lu][%lu]\n",
+ (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset);
+ (void)printf("\tnext_pgno: %lu\n", (u_long)argp->next_pgno);
+ (void)printf("\tnextlsn: [%lu][%lu]\n",
+ (u_long)argp->nextlsn.file, (u_long)argp->nextlsn.offset);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_newpage_read __P((DB_ENV *, void *,
+ * PUBLIC: __ham_newpage_args **));
+ */
+int
+__ham_newpage_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_newpage_args **argpp;
+{
+ __ham_newpage_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__ham_newpage_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->opcode = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->prev_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->prevlsn, bp, sizeof(argp->prevlsn));
+ bp += sizeof(argp->prevlsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->new_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->pagelsn, bp, sizeof(argp->pagelsn));
+ bp += sizeof(argp->pagelsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->next_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->nextlsn, bp, sizeof(argp->nextlsn));
+ bp += sizeof(argp->nextlsn);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_splitdata_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, u_int32_t, db_pgno_t, const DBT *, DB_LSN *));
+ */
+int
+__ham_splitdata_log(dbp, txnid, ret_lsnp, flags, opcode, pgno, pageimage, pagelsn)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t opcode;
+ db_pgno_t pgno;
+ const DBT *pageimage;
+ DB_LSN * pagelsn;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___ham_splitdata;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t) + (pageimage == NULL ? 0 : pageimage->size)
+ + sizeof(*pagelsn);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)opcode;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (pageimage == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &pageimage->size, sizeof(pageimage->size));
+ bp += sizeof(pageimage->size);
+ memcpy(bp, pageimage->data, pageimage->size);
+ bp += pageimage->size;
+ }
+
+ if (pagelsn != NULL)
+ memcpy(bp, pagelsn, sizeof(*pagelsn));
+ else
+ memset(bp, 0, sizeof(*pagelsn));
+ bp += sizeof(*pagelsn);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__ham_splitdata_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __ham_splitdata_getpgnos __P((DB_ENV *, DBT *,
+ * PUBLIC: DB_LSN *, db_recops, void *));
+ */
+int
+__ham_splitdata_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __ham_splitdata_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __ham_splitdata_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __ham_splitdata_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__ham_splitdata_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_splitdata_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_splitdata_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__ham_splitdata: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\topcode: %lu\n", (u_long)argp->opcode);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tpageimage: ");
+ for (i = 0; i < argp->pageimage.size; i++) {
+ ch = ((u_int8_t *)argp->pageimage.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tpagelsn: [%lu][%lu]\n",
+ (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_splitdata_read __P((DB_ENV *, void *,
+ * PUBLIC: __ham_splitdata_args **));
+ */
+int
+__ham_splitdata_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_splitdata_args **argpp;
+{
+ __ham_splitdata_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__ham_splitdata_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->opcode = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memset(&argp->pageimage, 0, sizeof(argp->pageimage));
+ memcpy(&argp->pageimage.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->pageimage.data = bp;
+ bp += argp->pageimage.size;
+
+ memcpy(&argp->pagelsn, bp, sizeof(argp->pagelsn));
+ bp += sizeof(argp->pagelsn);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_replace_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, db_pgno_t, u_int32_t, DB_LSN *, int32_t, const DBT *,
+ * PUBLIC: const DBT *, u_int32_t));
+ */
+int
+__ham_replace_log(dbp, txnid, ret_lsnp, flags, pgno, ndx, pagelsn, off, olditem,
+ newitem, makedup)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ db_pgno_t pgno;
+ u_int32_t ndx;
+ DB_LSN * pagelsn;
+ int32_t off;
+ const DBT *olditem;
+ const DBT *newitem;
+ u_int32_t makedup;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___ham_replace;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(*pagelsn)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t) + (olditem == NULL ? 0 : olditem->size)
+ + sizeof(u_int32_t) + (newitem == NULL ? 0 : newitem->size)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)ndx;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (pagelsn != NULL)
+ memcpy(bp, pagelsn, sizeof(*pagelsn));
+ else
+ memset(bp, 0, sizeof(*pagelsn));
+ bp += sizeof(*pagelsn);
+
+ uinttmp = (u_int32_t)off;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (olditem == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &olditem->size, sizeof(olditem->size));
+ bp += sizeof(olditem->size);
+ memcpy(bp, olditem->data, olditem->size);
+ bp += olditem->size;
+ }
+
+ if (newitem == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &newitem->size, sizeof(newitem->size));
+ bp += sizeof(newitem->size);
+ memcpy(bp, newitem->data, newitem->size);
+ bp += newitem->size;
+ }
+
+ uinttmp = (u_int32_t)makedup;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__ham_replace_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __ham_replace_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__ham_replace_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __ham_replace_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __ham_replace_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __ham_replace_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__ham_replace_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_replace_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_replace_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__ham_replace: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tndx: %lu\n", (u_long)argp->ndx);
+ (void)printf("\tpagelsn: [%lu][%lu]\n",
+ (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset);
+ (void)printf("\toff: %ld\n", (long)argp->off);
+ (void)printf("\tolditem: ");
+ for (i = 0; i < argp->olditem.size; i++) {
+ ch = ((u_int8_t *)argp->olditem.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tnewitem: ");
+ for (i = 0; i < argp->newitem.size; i++) {
+ ch = ((u_int8_t *)argp->newitem.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tmakedup: %lu\n", (u_long)argp->makedup);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_replace_read __P((DB_ENV *, void *,
+ * PUBLIC: __ham_replace_args **));
+ */
+int
+__ham_replace_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_replace_args **argpp;
+{
+ __ham_replace_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__ham_replace_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->ndx = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->pagelsn, bp, sizeof(argp->pagelsn));
+ bp += sizeof(argp->pagelsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->off = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memset(&argp->olditem, 0, sizeof(argp->olditem));
+ memcpy(&argp->olditem.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->olditem.data = bp;
+ bp += argp->olditem.size;
+
+ memset(&argp->newitem, 0, sizeof(argp->newitem));
+ memcpy(&argp->newitem.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->newitem.data = bp;
+ bp += argp->newitem.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->makedup = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_copypage_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t,
+ * PUBLIC: DB_LSN *, const DBT *));
+ */
+int
+__ham_copypage_log(dbp, txnid, ret_lsnp, flags, pgno, pagelsn, next_pgno, nextlsn, nnext_pgno,
+ nnextlsn, page)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ db_pgno_t pgno;
+ DB_LSN * pagelsn;
+ db_pgno_t next_pgno;
+ DB_LSN * nextlsn;
+ db_pgno_t nnext_pgno;
+ DB_LSN * nnextlsn;
+ const DBT *page;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___ham_copypage;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(*pagelsn)
+ + sizeof(u_int32_t)
+ + sizeof(*nextlsn)
+ + sizeof(u_int32_t)
+ + sizeof(*nnextlsn)
+ + sizeof(u_int32_t) + (page == NULL ? 0 : page->size);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (pagelsn != NULL)
+ memcpy(bp, pagelsn, sizeof(*pagelsn));
+ else
+ memset(bp, 0, sizeof(*pagelsn));
+ bp += sizeof(*pagelsn);
+
+ uinttmp = (u_int32_t)next_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (nextlsn != NULL)
+ memcpy(bp, nextlsn, sizeof(*nextlsn));
+ else
+ memset(bp, 0, sizeof(*nextlsn));
+ bp += sizeof(*nextlsn);
+
+ uinttmp = (u_int32_t)nnext_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (nnextlsn != NULL)
+ memcpy(bp, nnextlsn, sizeof(*nnextlsn));
+ else
+ memset(bp, 0, sizeof(*nnextlsn));
+ bp += sizeof(*nnextlsn);
+
+ if (page == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &page->size, sizeof(page->size));
+ bp += sizeof(page->size);
+ memcpy(bp, page->data, page->size);
+ bp += page->size;
+ }
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__ham_copypage_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __ham_copypage_getpgnos __P((DB_ENV *, DBT *,
+ * PUBLIC: DB_LSN *, db_recops, void *));
+ */
+int
+__ham_copypage_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __ham_copypage_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __ham_copypage_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 3)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->next_pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+ if (argp->nnext_pgno != PGNO_INVALID) {
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->nnext_pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+ }
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __ham_copypage_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__ham_copypage_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_copypage_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_copypage_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__ham_copypage: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tpagelsn: [%lu][%lu]\n",
+ (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset);
+ (void)printf("\tnext_pgno: %lu\n", (u_long)argp->next_pgno);
+ (void)printf("\tnextlsn: [%lu][%lu]\n",
+ (u_long)argp->nextlsn.file, (u_long)argp->nextlsn.offset);
+ (void)printf("\tnnext_pgno: %lu\n", (u_long)argp->nnext_pgno);
+ (void)printf("\tnnextlsn: [%lu][%lu]\n",
+ (u_long)argp->nnextlsn.file, (u_long)argp->nnextlsn.offset);
+ (void)printf("\tpage: ");
+ for (i = 0; i < argp->page.size; i++) {
+ ch = ((u_int8_t *)argp->page.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_copypage_read __P((DB_ENV *, void *,
+ * PUBLIC: __ham_copypage_args **));
+ */
+int
+__ham_copypage_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_copypage_args **argpp;
+{
+ __ham_copypage_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__ham_copypage_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->pagelsn, bp, sizeof(argp->pagelsn));
+ bp += sizeof(argp->pagelsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->next_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->nextlsn, bp, sizeof(argp->nextlsn));
+ bp += sizeof(argp->nextlsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->nnext_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->nnextlsn, bp, sizeof(argp->nnextlsn));
+ bp += sizeof(argp->nnextlsn);
+
+ memset(&argp->page, 0, sizeof(argp->page));
+ memcpy(&argp->page.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->page.data = bp;
+ bp += argp->page.size;
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_metagroup_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *,
+ * PUBLIC: db_pgno_t, DB_LSN *, u_int32_t));
+ */
+int
+__ham_metagroup_log(dbp, txnid, ret_lsnp, flags, bucket, mmpgno, mmetalsn, mpgno, metalsn,
+ pgno, pagelsn, newalloc)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t bucket;
+ db_pgno_t mmpgno;
+ DB_LSN * mmetalsn;
+ db_pgno_t mpgno;
+ DB_LSN * metalsn;
+ db_pgno_t pgno;
+ DB_LSN * pagelsn;
+ u_int32_t newalloc;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___ham_metagroup;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(*mmetalsn)
+ + sizeof(u_int32_t)
+ + sizeof(*metalsn)
+ + sizeof(u_int32_t)
+ + sizeof(*pagelsn)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)bucket;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)mmpgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (mmetalsn != NULL)
+ memcpy(bp, mmetalsn, sizeof(*mmetalsn));
+ else
+ memset(bp, 0, sizeof(*mmetalsn));
+ bp += sizeof(*mmetalsn);
+
+ uinttmp = (u_int32_t)mpgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (metalsn != NULL)
+ memcpy(bp, metalsn, sizeof(*metalsn));
+ else
+ memset(bp, 0, sizeof(*metalsn));
+ bp += sizeof(*metalsn);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (pagelsn != NULL)
+ memcpy(bp, pagelsn, sizeof(*pagelsn));
+ else
+ memset(bp, 0, sizeof(*pagelsn));
+ bp += sizeof(*pagelsn);
+
+ uinttmp = (u_int32_t)newalloc;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__ham_metagroup_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __ham_metagroup_getpgnos __P((DB_ENV *, DBT *,
+ * PUBLIC: DB_LSN *, db_recops, void *));
+ */
+int
+__ham_metagroup_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __ham_metagroup_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __ham_metagroup_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 3)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->mmpgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+ if (argp->mpgno != PGNO_INVALID) {
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->mpgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+ }
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __ham_metagroup_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__ham_metagroup_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_metagroup_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_metagroup_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__ham_metagroup: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tbucket: %lu\n", (u_long)argp->bucket);
+ (void)printf("\tmmpgno: %lu\n", (u_long)argp->mmpgno);
+ (void)printf("\tmmetalsn: [%lu][%lu]\n",
+ (u_long)argp->mmetalsn.file, (u_long)argp->mmetalsn.offset);
+ (void)printf("\tmpgno: %lu\n", (u_long)argp->mpgno);
+ (void)printf("\tmetalsn: [%lu][%lu]\n",
+ (u_long)argp->metalsn.file, (u_long)argp->metalsn.offset);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tpagelsn: [%lu][%lu]\n",
+ (u_long)argp->pagelsn.file, (u_long)argp->pagelsn.offset);
+ (void)printf("\tnewalloc: %lu\n", (u_long)argp->newalloc);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_metagroup_read __P((DB_ENV *, void *,
+ * PUBLIC: __ham_metagroup_args **));
+ */
+int
+__ham_metagroup_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_metagroup_args **argpp;
+{
+ __ham_metagroup_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__ham_metagroup_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->bucket = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->mmpgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->mmetalsn, bp, sizeof(argp->mmetalsn));
+ bp += sizeof(argp->mmetalsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->mpgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->metalsn, bp, sizeof(argp->metalsn));
+ bp += sizeof(argp->metalsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->pagelsn, bp, sizeof(argp->pagelsn));
+ bp += sizeof(argp->pagelsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->newalloc = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_groupalloc_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_pgno_t));
+ */
+int
+__ham_groupalloc_log(dbp, txnid, ret_lsnp, flags, meta_lsn, start_pgno, num, free)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ DB_LSN * meta_lsn;
+ db_pgno_t start_pgno;
+ u_int32_t num;
+ db_pgno_t free;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___ham_groupalloc;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(*meta_lsn)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (meta_lsn != NULL)
+ memcpy(bp, meta_lsn, sizeof(*meta_lsn));
+ else
+ memset(bp, 0, sizeof(*meta_lsn));
+ bp += sizeof(*meta_lsn);
+
+ uinttmp = (u_int32_t)start_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)num;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)free;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__ham_groupalloc_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __ham_groupalloc_getpgnos __P((DB_ENV *, DBT *,
+ * PUBLIC: DB_LSN *, db_recops, void *));
+ */
+int
+__ham_groupalloc_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __ham_groupalloc_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __ham_groupalloc_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->start_pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __ham_groupalloc_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__ham_groupalloc_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_groupalloc_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_groupalloc_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__ham_groupalloc: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tmeta_lsn: [%lu][%lu]\n",
+ (u_long)argp->meta_lsn.file, (u_long)argp->meta_lsn.offset);
+ (void)printf("\tstart_pgno: %lu\n", (u_long)argp->start_pgno);
+ (void)printf("\tnum: %lu\n", (u_long)argp->num);
+ (void)printf("\tfree: %lu\n", (u_long)argp->free);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_groupalloc_read __P((DB_ENV *, void *,
+ * PUBLIC: __ham_groupalloc_args **));
+ */
+int
+__ham_groupalloc_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_groupalloc_args **argpp;
+{
+ __ham_groupalloc_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__ham_groupalloc_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->meta_lsn, bp, sizeof(argp->meta_lsn));
+ bp += sizeof(argp->meta_lsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->start_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->num = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->free = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_curadj_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, db_pgno_t, u_int32_t, u_int32_t, u_int32_t, int, int,
+ * PUBLIC: u_int32_t));
+ */
+int
+__ham_curadj_log(dbp, txnid, ret_lsnp, flags, pgno, indx, len, dup_off, add,
+ is_dup, order)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ u_int32_t len;
+ u_int32_t dup_off;
+ int add;
+ int is_dup;
+ u_int32_t order;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___ham_curadj;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)indx;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)len;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)dup_off;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)add;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)is_dup;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)order;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__ham_curadj_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __ham_curadj_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__ham_curadj_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_curadj_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__ham_curadj_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_curadj_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_curadj_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__ham_curadj: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tindx: %lu\n", (u_long)argp->indx);
+ (void)printf("\tlen: %lu\n", (u_long)argp->len);
+ (void)printf("\tdup_off: %lu\n", (u_long)argp->dup_off);
+ (void)printf("\tadd: %ld\n", (long)argp->add);
+ (void)printf("\tis_dup: %ld\n", (long)argp->is_dup);
+ (void)printf("\torder: %lu\n", (u_long)argp->order);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_curadj_read __P((DB_ENV *, void *, __ham_curadj_args **));
+ */
+int
+__ham_curadj_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_curadj_args **argpp;
+{
+ __ham_curadj_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__ham_curadj_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->indx = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->len = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->dup_off = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->add = (int)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->is_dup = (int)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->order = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_chgpg_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, db_ham_mode, db_pgno_t, db_pgno_t, u_int32_t,
+ * PUBLIC: u_int32_t));
+ */
+int
+__ham_chgpg_log(dbp, txnid, ret_lsnp, flags, mode, old_pgno, new_pgno, old_indx, new_indx)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ db_ham_mode mode;
+ db_pgno_t old_pgno;
+ db_pgno_t new_pgno;
+ u_int32_t old_indx;
+ u_int32_t new_indx;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___ham_chgpg;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)mode;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)old_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)new_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)old_indx;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)new_indx;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__ham_chgpg_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __ham_chgpg_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__ham_chgpg_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_chgpg_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__ham_chgpg_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __ham_chgpg_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __ham_chgpg_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__ham_chgpg: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tmode: %ld\n", (long)argp->mode);
+ (void)printf("\told_pgno: %lu\n", (u_long)argp->old_pgno);
+ (void)printf("\tnew_pgno: %lu\n", (u_long)argp->new_pgno);
+ (void)printf("\told_indx: %lu\n", (u_long)argp->old_indx);
+ (void)printf("\tnew_indx: %lu\n", (u_long)argp->new_indx);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_chgpg_read __P((DB_ENV *, void *, __ham_chgpg_args **));
+ */
+int
+__ham_chgpg_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __ham_chgpg_args **argpp;
+{
+ __ham_chgpg_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__ham_chgpg_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->mode = (db_ham_mode)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->old_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->new_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->old_indx = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->new_indx = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_init_print __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__ham_init_print(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_insdel_print, DB___ham_insdel)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_newpage_print, DB___ham_newpage)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_splitdata_print, DB___ham_splitdata)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_replace_print, DB___ham_replace)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_copypage_print, DB___ham_copypage)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_metagroup_print, DB___ham_metagroup)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_groupalloc_print, DB___ham_groupalloc)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_curadj_print, DB___ham_curadj)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_chgpg_print, DB___ham_chgpg)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__ham_init_getpgnos(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_insdel_getpgnos, DB___ham_insdel)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_newpage_getpgnos, DB___ham_newpage)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_splitdata_getpgnos, DB___ham_splitdata)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_replace_getpgnos, DB___ham_replace)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_copypage_getpgnos, DB___ham_copypage)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_metagroup_getpgnos, DB___ham_metagroup)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_groupalloc_getpgnos, DB___ham_groupalloc)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_curadj_getpgnos, DB___ham_curadj)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_chgpg_getpgnos, DB___ham_chgpg)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_init_recover __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__ham_init_recover(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_insdel_recover, DB___ham_insdel)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_newpage_recover, DB___ham_newpage)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_splitdata_recover, DB___ham_splitdata)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_replace_recover, DB___ham_replace)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_copypage_recover, DB___ham_copypage)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_metagroup_recover, DB___ham_metagroup)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_groupalloc_recover, DB___ham_groupalloc)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_curadj_recover, DB___ham_curadj)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __ham_chgpg_recover, DB___ham_chgpg)) != 0)
+ return (ret);
+ return (0);
+}
diff --git a/libdb/hash/hash_conv.c b/libdb/hash/hash_conv.c
new file mode 100644
index 0000000..073aecc
--- /dev/null
+++ b/libdb/hash/hash_conv.c
@@ -0,0 +1,116 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/hash.h"
+
+/*
+ * __ham_pgin --
+ * Convert host-specific page layout from the host-independent format
+ * stored on disk.
+ *
+ * PUBLIC: int __ham_pgin __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
+ */
+int
+__ham_pgin(dbenv, dummydbp, pg, pp, cookie)
+ DB_ENV *dbenv;
+ DB *dummydbp;
+ db_pgno_t pg;
+ void *pp;
+ DBT *cookie;
+{
+ DB_PGINFO *pginfo;
+ PAGE *h;
+
+ h = pp;
+ pginfo = (DB_PGINFO *)cookie->data;
+
+ /*
+ * The hash access method does blind reads of pages, causing them
+ * to be created. If the type field isn't set it's one of them,
+ * initialize the rest of the page and return.
+ */
+ if (h->type != P_HASHMETA && h->pgno == PGNO_INVALID) {
+ P_INIT(pp, (db_indx_t)pginfo->db_pagesize,
+ pg, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+ return (0);
+ }
+
+ if (!F_ISSET(pginfo, DB_AM_SWAP))
+ return (0);
+
+ return (h->type == P_HASHMETA ? __ham_mswap(pp) :
+ __db_byteswap(dbenv, dummydbp, pg, pp, pginfo->db_pagesize, 1));
+}
+
+/*
+ * __ham_pgout --
+ * Convert host-specific page layout to the host-independent format
+ * stored on disk.
+ *
+ * PUBLIC: int __ham_pgout __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
+ */
+int
+__ham_pgout(dbenv, dummydbp, pg, pp, cookie)
+ DB_ENV *dbenv;
+ DB *dummydbp;
+ db_pgno_t pg;
+ void *pp;
+ DBT *cookie;
+{
+ DB_PGINFO *pginfo;
+ PAGE *h;
+
+ pginfo = (DB_PGINFO *)cookie->data;
+ if (!F_ISSET(pginfo, DB_AM_SWAP))
+ return (0);
+
+ h = pp;
+ return (h->type == P_HASHMETA ? __ham_mswap(pp) :
+ __db_byteswap(dbenv, dummydbp, pg, pp, pginfo->db_pagesize, 0));
+}
+
+/*
+ * __ham_mswap --
+ * Swap the bytes on the hash metadata page.
+ *
+ * PUBLIC: int __ham_mswap __P((void *));
+ */
+int
+__ham_mswap(pg)
+ void *pg;
+{
+ u_int8_t *p;
+ int i;
+
+ __db_metaswap(pg);
+
+ p = (u_int8_t *)pg + sizeof(DBMETA);
+
+ SWAP32(p); /* max_bucket */
+ SWAP32(p); /* high_mask */
+ SWAP32(p); /* low_mask */
+ SWAP32(p); /* ffactor */
+ SWAP32(p); /* nelem */
+ SWAP32(p); /* h_charkey */
+ for (i = 0; i < NCACHED; ++i)
+ SWAP32(p); /* spares */
+ p += 59 * sizeof(u_int32_t); /* unusued */
+ SWAP32(p); /* crypto_magic */
+ return (0);
+}
diff --git a/libdb/hash/hash_dup.c b/libdb/hash/hash_dup.c
new file mode 100644
index 0000000..102fab5
--- /dev/null
+++ b/libdb/hash/hash_dup.c
@@ -0,0 +1,891 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+/*
+ * PACKAGE: hashing
+ *
+ * DESCRIPTION:
+ * Manipulation of duplicates for the hash package.
+ */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+#include "dbinc/btree.h"
+
+static int __ham_c_chgpg __P((DBC *,
+ db_pgno_t, u_int32_t, db_pgno_t, u_int32_t));
+static int __ham_check_move __P((DBC *, u_int32_t));
+static int __ham_dcursor __P((DBC *, db_pgno_t, u_int32_t));
+static int __ham_move_offpage __P((DBC *, PAGE *, u_int32_t, db_pgno_t));
+
+/*
+ * Called from hash_access to add a duplicate key. nval is the new
+ * value that we want to add. The flags correspond to the flag values
+ * to cursor_put indicating where to add the new element.
+ * There are 4 cases.
+ * Case 1: The existing duplicate set already resides on a separate page.
+ * We return and let the common code handle this.
+ * Case 2: The element is small enough to just be added to the existing set.
+ * Case 3: The element is large enough to be a big item, so we're going to
+ * have to push the set onto a new page.
+ * Case 4: The element is large enough to push the duplicate set onto a
+ * separate page.
+ *
+ * PUBLIC: int __ham_add_dup __P((DBC *, DBT *, u_int32_t, db_pgno_t *));
+ */
+int
+__ham_add_dup(dbc, nval, flags, pgnop)
+ DBC *dbc;
+ DBT *nval;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ DBT pval, tmp_val;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ u_int32_t add_bytes, new_size;
+ int cmp, ret;
+ u_int8_t *hk;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ DB_ASSERT(flags != DB_CURRENT);
+
+ add_bytes = nval->size +
+ (F_ISSET(nval, DB_DBT_PARTIAL) ? nval->doff : 0);
+ add_bytes = DUP_SIZE(add_bytes);
+
+ if ((ret = __ham_check_move(dbc, add_bytes)) != 0)
+ return (ret);
+
+ /*
+ * Check if resulting duplicate set is going to need to go
+ * onto a separate duplicate page. If so, convert the
+ * duplicate set and add the new one. After conversion,
+ * hcp->dndx is the first free ndx or the index of the
+ * current pointer into the duplicate set.
+ */
+ hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
+ /* Add the len bytes to the current singleton. */
+ if (HPAGE_PTYPE(hk) != H_DUPLICATE)
+ add_bytes += DUP_SIZE(0);
+ new_size =
+ LEN_HKEYDATA(dbp, hcp->page, dbp->pgsize, H_DATAINDEX(hcp->indx)) +
+ add_bytes;
+
+ /*
+ * We convert to off-page duplicates if the item is a big item,
+ * the addition of the new item will make the set large, or
+ * if there isn't enough room on this page to add the next item.
+ */
+ if (HPAGE_PTYPE(hk) != H_OFFDUP &&
+ (HPAGE_PTYPE(hk) == H_OFFPAGE || ISBIG(hcp, new_size) ||
+ add_bytes > P_FREESPACE(dbp, hcp->page))) {
+
+ if ((ret = __ham_dup_convert(dbc)) != 0)
+ return (ret);
+ return (hcp->opd->c_am_put(hcp->opd,
+ NULL, nval, flags, NULL));
+ }
+
+ /* There are two separate cases here: on page and off page. */
+ if (HPAGE_PTYPE(hk) != H_OFFDUP) {
+ if (HPAGE_PTYPE(hk) != H_DUPLICATE) {
+ pval.flags = 0;
+ pval.data = HKEYDATA_DATA(hk);
+ pval.size = LEN_HDATA(dbp, hcp->page, dbp->pgsize,
+ hcp->indx);
+ if ((ret = __ham_make_dup(dbp->dbenv,
+ &pval, &tmp_val, &dbc->my_rdata.data,
+ &dbc->my_rdata.ulen)) != 0 || (ret =
+ __ham_replpair(dbc, &tmp_val, 1)) != 0)
+ return (ret);
+ hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
+ HPAGE_PTYPE(hk) = H_DUPLICATE;
+
+ /*
+ * Update the cursor position since we now are in
+ * duplicates.
+ */
+ F_SET(hcp, H_ISDUP);
+ hcp->dup_off = 0;
+ hcp->dup_len = pval.size;
+ hcp->dup_tlen = DUP_SIZE(hcp->dup_len);
+ }
+
+ /* Now make the new entry a duplicate. */
+ if ((ret = __ham_make_dup(dbp->dbenv, nval,
+ &tmp_val, &dbc->my_rdata.data, &dbc->my_rdata.ulen)) != 0)
+ return (ret);
+
+ tmp_val.dlen = 0;
+ switch (flags) { /* On page. */
+ case DB_KEYFIRST:
+ case DB_KEYLAST:
+ case DB_NODUPDATA:
+ if (dbp->dup_compare != NULL) {
+ __ham_dsearch(dbc,
+ nval, &tmp_val.doff, &cmp, flags);
+
+ /* dup dups are not supported w/ sorted dups */
+ if (cmp == 0)
+ return (__db_duperr(dbp, flags));
+ } else {
+ hcp->dup_tlen = LEN_HDATA(dbp, hcp->page,
+ dbp->pgsize, hcp->indx);
+ hcp->dup_len = nval->size;
+ F_SET(hcp, H_ISDUP);
+ if (flags == DB_KEYFIRST)
+ hcp->dup_off = tmp_val.doff = 0;
+ else
+ hcp->dup_off =
+ tmp_val.doff = hcp->dup_tlen;
+ }
+ break;
+ case DB_BEFORE:
+ tmp_val.doff = hcp->dup_off;
+ break;
+ case DB_AFTER:
+ tmp_val.doff = hcp->dup_off + DUP_SIZE(hcp->dup_len);
+ break;
+ }
+ /* Add the duplicate. */
+ ret = __ham_replpair(dbc, &tmp_val, 0);
+ if (ret == 0)
+ ret = mpf->set(mpf, hcp->page, DB_MPOOL_DIRTY);
+ if (ret != 0)
+ return (ret);
+
+ /* Now, update the cursor if necessary. */
+ switch (flags) {
+ case DB_AFTER:
+ hcp->dup_off += DUP_SIZE(hcp->dup_len);
+ hcp->dup_len = nval->size;
+ hcp->dup_tlen += (db_indx_t)DUP_SIZE(nval->size);
+ break;
+ case DB_KEYFIRST:
+ case DB_KEYLAST:
+ case DB_BEFORE:
+ hcp->dup_tlen += (db_indx_t)DUP_SIZE(nval->size);
+ hcp->dup_len = nval->size;
+ break;
+ }
+ ret = __ham_c_update(dbc, tmp_val.size, 1, 1);
+ return (ret);
+ }
+
+ /*
+ * If we get here, then we're on duplicate pages; set pgnop and
+ * return so the common code can handle it.
+ */
+ memcpy(pgnop, HOFFDUP_PGNO(H_PAIRDATA(dbp, hcp->page, hcp->indx)),
+ sizeof(db_pgno_t));
+
+ return (ret);
+}
+
+/*
+ * Convert an on-page set of duplicates to an offpage set of duplicates.
+ *
+ * PUBLIC: int __ham_dup_convert __P((DBC *));
+ */
+int
+__ham_dup_convert(dbc)
+ DBC *dbc;
+{
+ BOVERFLOW bo;
+ DB *dbp;
+ DBC **hcs;
+ DBT dbt;
+ DB_LSN lsn;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ HOFFPAGE ho;
+ PAGE *dp;
+ db_indx_t i, len, off;
+ int c, ret, t_ret;
+ u_int8_t *p, *pend;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ /*
+ * Create a new page for the duplicates.
+ */
+ if ((ret = __db_new(dbc,
+ dbp->dup_compare == NULL ? P_LRECNO : P_LDUP, &dp)) != 0)
+ return (ret);
+ P_INIT(dp, dbp->pgsize,
+ dp->pgno, PGNO_INVALID, PGNO_INVALID, LEAFLEVEL, TYPE(dp));
+
+ /*
+ * Get the list of cursors that may need to be updated.
+ */
+ if ((ret = __ham_get_clist(dbp,
+ PGNO(hcp->page), (u_int32_t)hcp->indx, &hcs)) != 0)
+ goto err;
+
+ /*
+ * Now put the duplicates onto the new page.
+ */
+ dbt.flags = 0;
+ switch (HPAGE_PTYPE(H_PAIRDATA(dbp, hcp->page, hcp->indx))) {
+ case H_KEYDATA:
+ /* Simple case, one key on page; move it to dup page. */
+ dbt.size = LEN_HDATA(dbp, hcp->page, dbp->pgsize, hcp->indx);
+ dbt.data = HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx));
+ ret = __db_pitem(dbc,
+ dp, 0, BKEYDATA_SIZE(dbt.size), NULL, &dbt);
+ goto finish;
+ case H_OFFPAGE:
+ /* Simple case, one key on page; move it to dup page. */
+ memcpy(&ho, P_ENTRY(dbp, hcp->page, H_DATAINDEX(hcp->indx)),
+ HOFFPAGE_SIZE);
+ UMRW_SET(bo.unused1);
+ B_TSET(bo.type, ho.type, 0);
+ UMRW_SET(bo.unused2);
+ bo.pgno = ho.pgno;
+ bo.tlen = ho.tlen;
+ dbt.size = BOVERFLOW_SIZE;
+ dbt.data = &bo;
+
+ ret = __db_pitem(dbc, dp, 0, dbt.size, &dbt, NULL);
+finish: if (ret == 0) {
+ if ((ret = mpf->set(mpf, dp, DB_MPOOL_DIRTY)) != 0)
+ break;
+
+ /* Update any other cursors. */
+ if (hcs != NULL && DBC_LOGGING(dbc) &&
+ IS_SUBTRANSACTION(dbc->txn)) {
+ if ((ret = __ham_chgpg_log(dbp, dbc->txn,
+ &lsn, 0, DB_HAM_DUP, PGNO(hcp->page),
+ PGNO(dp), hcp->indx, 0)) != 0)
+ break;
+ }
+ for (c = 0; hcs != NULL && hcs[c] != NULL; c++)
+ if ((ret = __ham_dcursor(hcs[c],
+ PGNO(dp), 0)) != 0)
+ break;
+ }
+ break;
+ case H_DUPLICATE:
+ p = HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx));
+ pend = p +
+ LEN_HDATA(dbp, hcp->page, dbp->pgsize, hcp->indx);
+
+ /*
+ * We need to maintain the duplicate cursor position.
+ * Keep track of where we are in the duplicate set via
+ * the offset, and when it matches the one in the cursor,
+ * set the off-page duplicate cursor index to the current
+ * index.
+ */
+ for (off = 0, i = 0; p < pend; i++) {
+ memcpy(&len, p, sizeof(db_indx_t));
+ dbt.size = len;
+ p += sizeof(db_indx_t);
+ dbt.data = p;
+ p += len + sizeof(db_indx_t);
+ if ((ret = __db_pitem(dbc, dp,
+ i, BKEYDATA_SIZE(dbt.size), NULL, &dbt)) != 0)
+ break;
+
+ /* Update any other cursors */
+ if (hcs != NULL && DBC_LOGGING(dbc) &&
+ IS_SUBTRANSACTION(dbc->txn)) {
+ if ((ret = __ham_chgpg_log(dbp, dbc->txn,
+ &lsn, 0, DB_HAM_DUP, PGNO(hcp->page),
+ PGNO(dp), hcp->indx, i)) != 0)
+ break;
+ }
+ for (c = 0; hcs != NULL && hcs[c] != NULL; c++)
+ if (((HASH_CURSOR *)(hcs[c]->internal))->dup_off
+ == off && (ret = __ham_dcursor(hcs[c],
+ PGNO(dp), i)) != 0)
+ goto err;
+ off += len + 2 * sizeof(db_indx_t);
+ }
+ break;
+ default:
+ ret = __db_pgfmt(dbp->dbenv, (u_long)hcp->pgno);
+ break;
+ }
+
+ /*
+ * Now attach this to the source page in place of the old duplicate
+ * item.
+ */
+ if (ret == 0)
+ ret = __ham_move_offpage(dbc, hcp->page,
+ (u_int32_t)H_DATAINDEX(hcp->indx), PGNO(dp));
+
+err: if (ret == 0)
+ ret = mpf->set(mpf, hcp->page, DB_MPOOL_DIRTY);
+
+ if ((t_ret =
+ mpf->put(mpf, dp, ret == 0 ? DB_MPOOL_DIRTY : 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (ret == 0)
+ hcp->dup_tlen = hcp->dup_off = hcp->dup_len = 0;
+
+ if (hcs != NULL)
+ __os_free(dbp->dbenv, hcs);
+
+ return (ret);
+}
+
+/*
+ * __ham_make_dup
+ *
+ * Take a regular dbt and make it into a duplicate item with all the partial
+ * information set appropriately. If the incoming dbt is a partial, assume
+ * we are creating a new entry and make sure that we do any initial padding.
+ *
+ * PUBLIC: int __ham_make_dup __P((DB_ENV *,
+ * PUBLIC: const DBT *, DBT *d, void **, u_int32_t *));
+ */
+int
+__ham_make_dup(dbenv, notdup, duplicate, bufp, sizep)
+ DB_ENV *dbenv;
+ const DBT *notdup;
+ DBT *duplicate;
+ void **bufp;
+ u_int32_t *sizep;
+{
+ db_indx_t tsize, item_size;
+ int ret;
+ u_int8_t *p;
+
+ item_size = (db_indx_t)notdup->size;
+ if (F_ISSET(notdup, DB_DBT_PARTIAL))
+ item_size += notdup->doff;
+
+ tsize = DUP_SIZE(item_size);
+ if ((ret = __ham_init_dbt(dbenv, duplicate, tsize, bufp, sizep)) != 0)
+ return (ret);
+
+ duplicate->dlen = 0;
+ duplicate->flags = notdup->flags;
+ F_SET(duplicate, DB_DBT_PARTIAL);
+
+ p = duplicate->data;
+ memcpy(p, &item_size, sizeof(db_indx_t));
+ p += sizeof(db_indx_t);
+ if (F_ISSET(notdup, DB_DBT_PARTIAL)) {
+ memset(p, 0, notdup->doff);
+ p += notdup->doff;
+ }
+ memcpy(p, notdup->data, notdup->size);
+ p += notdup->size;
+ memcpy(p, &item_size, sizeof(db_indx_t));
+
+ duplicate->doff = 0;
+ duplicate->dlen = notdup->size;
+
+ return (0);
+}
+
+/*
+ * __ham_check_move --
+ *
+ * Check if we can do whatever we need to on this page. If not,
+ * then we'll have to move the current element to a new page.
+ */
+static int
+__ham_check_move(dbc, add_len)
+ DBC *dbc;
+ u_int32_t add_len;
+{
+ DB *dbp;
+ DBT k, d;
+ DB_LSN new_lsn;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ PAGE *next_pagep;
+ db_pgno_t next_pgno;
+ u_int32_t new_datalen, old_len, rectype;
+ u_int8_t *hk;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
+
+ /*
+ * If the item is already off page duplicates or an offpage item,
+ * then we know we can do whatever we need to do in-place
+ */
+ if (HPAGE_PTYPE(hk) == H_OFFDUP || HPAGE_PTYPE(hk) == H_OFFPAGE)
+ return (0);
+
+ old_len = LEN_HITEM(dbp, hcp->page, dbp->pgsize, H_DATAINDEX(hcp->indx));
+ new_datalen = old_len - HKEYDATA_SIZE(0) + add_len;
+ if (HPAGE_PTYPE(hk) != H_DUPLICATE)
+ new_datalen += DUP_SIZE(0);
+
+ /*
+ * We need to add a new page under two conditions:
+ * 1. The addition makes the total data length cross the BIG
+ * threshold and the OFFDUP structure won't fit on this page.
+ * 2. The addition does not make the total data cross the
+ * threshold, but the new data won't fit on the page.
+ * If neither of these is true, then we can return.
+ */
+ if (ISBIG(hcp, new_datalen) && (old_len > HOFFDUP_SIZE ||
+ HOFFDUP_SIZE - old_len <= P_FREESPACE(dbp, hcp->page)))
+ return (0);
+
+ if (!ISBIG(hcp, new_datalen) && add_len <= P_FREESPACE(dbp, hcp->page))
+ return (0);
+
+ /*
+ * If we get here, then we need to move the item to a new page.
+ * Check if there are more pages in the chain. We now need to
+ * update new_datalen to include the size of both the key and
+ * the data that we need to move.
+ */
+
+ new_datalen = ISBIG(hcp, new_datalen) ?
+ HOFFDUP_SIZE : HKEYDATA_SIZE(new_datalen);
+ new_datalen += LEN_HITEM(dbp, hcp->page, dbp->pgsize, H_KEYINDEX(hcp->indx));
+
+ next_pagep = NULL;
+ for (next_pgno = NEXT_PGNO(hcp->page); next_pgno != PGNO_INVALID;
+ next_pgno = NEXT_PGNO(next_pagep)) {
+ if (next_pagep != NULL &&
+ (ret = mpf->put(mpf, next_pagep, 0)) != 0)
+ return (ret);
+
+ if ((ret = mpf->get(mpf,
+ &next_pgno, DB_MPOOL_CREATE, &next_pagep)) != 0)
+ return (ret);
+
+ if (P_FREESPACE(dbp, next_pagep) >= new_datalen)
+ break;
+ }
+
+ /* No more pages, add one. */
+ if (next_pagep == NULL && (ret = __ham_add_ovflpage(dbc,
+ hcp->page, 0, &next_pagep)) != 0)
+ return (ret);
+
+ /* Add new page at the end of the chain. */
+ if (P_FREESPACE(dbp, next_pagep) < new_datalen && (ret =
+ __ham_add_ovflpage(dbc, next_pagep, 1, &next_pagep)) != 0) {
+ (void)mpf->put(mpf, next_pagep, 0);
+ return (ret);
+ }
+
+ /* Copy the item to the new page. */
+ if (DBC_LOGGING(dbc)) {
+ rectype = PUTPAIR;
+ k.flags = 0;
+ d.flags = 0;
+ if (HPAGE_PTYPE(
+ H_PAIRKEY(dbp, hcp->page, hcp->indx)) == H_OFFPAGE) {
+ rectype |= PAIR_KEYMASK;
+ k.data = H_PAIRKEY(dbp, hcp->page, hcp->indx);
+ k.size = HOFFPAGE_SIZE;
+ } else {
+ k.data =
+ HKEYDATA_DATA(H_PAIRKEY(dbp, hcp->page, hcp->indx));
+ k.size =
+ LEN_HKEY(dbp, hcp->page, dbp->pgsize, hcp->indx);
+ }
+
+ if (HPAGE_PTYPE(hk) == H_OFFPAGE) {
+ rectype |= PAIR_DATAMASK;
+ d.data = H_PAIRDATA(dbp, hcp->page, hcp->indx);
+ d.size = HOFFPAGE_SIZE;
+ } else {
+ if (HPAGE_PTYPE(H_PAIRDATA(dbp, hcp->page, hcp->indx))
+ == H_DUPLICATE)
+ rectype |= PAIR_DUPMASK;
+ d.data =
+ HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx));
+ d.size = LEN_HDATA(dbp, hcp->page,
+ dbp->pgsize, hcp->indx);
+ }
+
+ if ((ret = __ham_insdel_log(dbp,
+ dbc->txn, &new_lsn, 0, rectype, PGNO(next_pagep),
+ (u_int32_t)NUM_ENT(next_pagep), &LSN(next_pagep),
+ &k, &d)) != 0) {
+ (void)mpf->put(mpf, next_pagep, 0);
+ return (ret);
+ }
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ /* Move lsn onto page. */
+ LSN(next_pagep) = new_lsn; /* Structure assignment. */
+
+ __ham_copy_item(dbp, hcp->page, H_KEYINDEX(hcp->indx), next_pagep);
+ __ham_copy_item(dbp, hcp->page, H_DATAINDEX(hcp->indx), next_pagep);
+
+ /*
+ * We've just manually inserted a key and set of data onto
+ * next_pagep; however, it's possible that our caller will
+ * return without further modifying the new page, for instance
+ * if DB_NODUPDATA is set and our new item is a duplicate duplicate.
+ * Thus, to be on the safe side, we need to mark the page dirty
+ * here. [#2996]
+ *
+ * Note that __ham_del_pair should dirty the page we're moving
+ * the items from, so we need only dirty the new page ourselves.
+ */
+ if ((ret = mpf->set(mpf, next_pagep, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+
+ /* Update all cursors that used to point to this item. */
+ if ((ret = __ham_c_chgpg(dbc, PGNO(hcp->page), H_KEYINDEX(hcp->indx),
+ PGNO(next_pagep), NUM_ENT(next_pagep) - 2)) != 0)
+ goto out;
+
+ /* Now delete the pair from the current page. */
+ ret = __ham_del_pair(dbc, 0);
+
+ /*
+ * __ham_del_pair decremented nelem. This is incorrect; we
+ * manually copied the element elsewhere, so the total number
+ * of elements hasn't changed. Increment it again.
+ *
+ * !!!
+ * Note that we still have the metadata page pinned, and
+ * __ham_del_pair dirtied it, so we don't need to set the dirty
+ * flag again.
+ */
+ if (!STD_LOCKING(dbc))
+ hcp->hdr->nelem++;
+
+out:
+ (void)mpf->put(mpf, hcp->page, DB_MPOOL_DIRTY);
+ hcp->page = next_pagep;
+ hcp->pgno = PGNO(hcp->page);
+ hcp->indx = NUM_ENT(hcp->page) - 2;
+ F_SET(hcp, H_EXPAND);
+ F_CLR(hcp, H_DELETED);
+
+ return (ret);
+}
+
+/*
+ * __ham_move_offpage --
+ * Replace an onpage set of duplicates with the OFFDUP structure
+ * that references the duplicate page.
+ *
+ * XXX
+ * This is really just a special case of __onpage_replace; we should
+ * probably combine them.
+ *
+ */
+static int
+__ham_move_offpage(dbc, pagep, ndx, pgno)
+ DBC *dbc;
+ PAGE *pagep;
+ u_int32_t ndx;
+ db_pgno_t pgno;
+{
+ DB *dbp;
+ DBT new_dbt;
+ DBT old_dbt;
+ HOFFDUP od;
+ db_indx_t i, *inp;
+ int32_t shrink;
+ u_int8_t *src;
+ int ret;
+
+ dbp = dbc->dbp;
+ od.type = H_OFFDUP;
+ UMRW_SET(od.unused[0]);
+ UMRW_SET(od.unused[1]);
+ UMRW_SET(od.unused[2]);
+ od.pgno = pgno;
+ ret = 0;
+
+ if (DBC_LOGGING(dbc)) {
+ new_dbt.data = &od;
+ new_dbt.size = HOFFDUP_SIZE;
+ old_dbt.data = P_ENTRY(dbp, pagep, ndx);
+ old_dbt.size = LEN_HITEM(dbp, pagep, dbp->pgsize, ndx);
+ if ((ret = __ham_replace_log(dbp, dbc->txn, &LSN(pagep), 0,
+ PGNO(pagep), (u_int32_t)ndx, &LSN(pagep), -1,
+ &old_dbt, &new_dbt, 0)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(pagep));
+
+ shrink = LEN_HITEM(dbp, pagep, dbp->pgsize, ndx) - HOFFDUP_SIZE;
+ inp = P_INP(dbp, pagep);
+
+ if (shrink != 0) {
+ /* Copy data. */
+ src = (u_int8_t *)(pagep) + HOFFSET(pagep);
+ memmove(src + shrink, src, inp[ndx] - HOFFSET(pagep));
+ HOFFSET(pagep) += shrink;
+
+ /* Update index table. */
+ for (i = ndx; i < NUM_ENT(pagep); i++)
+ inp[i] += shrink;
+ }
+
+ /* Now copy the offdup entry onto the page. */
+ memcpy(P_ENTRY(dbp, pagep, ndx), &od, HOFFDUP_SIZE);
+ return (ret);
+}
+
+/*
+ * __ham_dsearch:
+ * Locate a particular duplicate in a duplicate set. Make sure that
+ * we exit with the cursor set appropriately.
+ *
+ * PUBLIC: void __ham_dsearch
+ * PUBLIC: __P((DBC *, DBT *, u_int32_t *, int *, u_int32_t));
+ */
+void
+__ham_dsearch(dbc, dbt, offp, cmpp, flags)
+ DBC *dbc;
+ DBT *dbt;
+ u_int32_t *offp, flags;
+ int *cmpp;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ DBT cur;
+ db_indx_t i, len;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+ u_int8_t *data;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ func = dbp->dup_compare == NULL ? __bam_defcmp : dbp->dup_compare;
+
+ i = F_ISSET(hcp, H_CONTINUE) ? hcp->dup_off: 0;
+ data = HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx)) + i;
+ hcp->dup_tlen = LEN_HDATA(dbp, hcp->page, dbp->pgsize, hcp->indx);
+ while (i < hcp->dup_tlen) {
+ memcpy(&len, data, sizeof(db_indx_t));
+ data += sizeof(db_indx_t);
+ cur.data = data;
+ cur.size = (u_int32_t)len;
+
+ /*
+ * If we find an exact match, we're done. If in a sorted
+ * duplicate set and the item is larger than our test item,
+ * we're done. In the latter case, if permitting partial
+ * matches, it's not a failure.
+ */
+ *cmpp = func(dbp, dbt, &cur);
+ if (*cmpp == 0)
+ break;
+ if (*cmpp < 0 && dbp->dup_compare != NULL) {
+ if (flags == DB_GET_BOTH_RANGE)
+ *cmpp = 0;
+ break;
+ }
+
+ i += len + 2 * sizeof(db_indx_t);
+ data += len + sizeof(db_indx_t);
+ }
+
+ *offp = i;
+ hcp->dup_off = i;
+ hcp->dup_len = len;
+ F_SET(hcp, H_ISDUP);
+}
+
+#ifdef DEBUG
+/*
+ * __ham_cprint --
+ * Display the current cursor list.
+ *
+ * PUBLIC: void __ham_cprint __P((DBC *));
+ */
+void
+__ham_cprint(dbc)
+ DBC *dbc;
+{
+ HASH_CURSOR *cp;
+
+ cp = (HASH_CURSOR *)dbc->internal;
+
+ fprintf(stderr, "%#0lx->%#0lx: page: %lu index: %lu",
+ P_TO_ULONG(dbc), P_TO_ULONG(cp), (u_long)cp->pgno,
+ (u_long)cp->indx);
+ if (F_ISSET(cp, H_DELETED))
+ fprintf(stderr, " (deleted)");
+ fprintf(stderr, "\n");
+}
+#endif /* DEBUG */
+
+/*
+ * __ham_dcursor --
+ *
+ * Create an off page duplicate cursor for this cursor.
+ */
+static int
+__ham_dcursor(dbc, pgno, indx)
+ DBC *dbc;
+ db_pgno_t pgno;
+ u_int32_t indx;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ BTREE_CURSOR *dcp;
+ int ret;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if ((ret = __db_c_newopd(dbc, pgno, hcp->opd, &hcp->opd)) != 0)
+ return (ret);
+
+ dcp = (BTREE_CURSOR *)hcp->opd->internal;
+ dcp->pgno = pgno;
+ dcp->indx = indx;
+
+ if (dbp->dup_compare == NULL) {
+ /*
+ * Converting to off-page Recno trees is tricky. The
+ * record number for the cursor is the index + 1 (to
+ * convert to 1-based record numbers).
+ */
+ dcp->recno = indx + 1;
+ }
+
+ /*
+ * Transfer the deleted flag from the top-level cursor to the
+ * created one.
+ */
+ if (F_ISSET(hcp, H_DELETED)) {
+ F_SET(dcp, C_DELETED);
+ F_CLR(hcp, H_DELETED);
+ }
+
+ return (0);
+}
+
+/*
+ * __ham_c_chgpg --
+ * Adjust the cursors after moving an item to a new page. We only
+ * move cursors that are pointing at this one item and are not
+ * deleted; since we only touch non-deleted cursors, and since
+ * (by definition) no item existed at the pgno/indx we're moving the
+ * item to, we're guaranteed that all the cursors we affect here or
+ * on abort really do refer to this one item.
+ */
+static int
+__ham_c_chgpg(dbc, old_pgno, old_index, new_pgno, new_index)
+ DBC *dbc;
+ db_pgno_t old_pgno, new_pgno;
+ u_int32_t old_index, new_index;
+{
+ DB *dbp, *ldbp;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ DBC *cp;
+ HASH_CURSOR *hcp;
+ int found, ret;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+
+ my_txn = IS_SUBTRANSACTION(dbc->txn) ? dbc->txn : NULL;
+ found = 0;
+
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links)) {
+ if (cp == dbc || cp->dbtype != DB_HASH)
+ continue;
+
+ hcp = (HASH_CURSOR *)cp->internal;
+
+ /*
+ * If a cursor is deleted, it doesn't refer to this
+ * item--it just happens to have the same indx, but
+ * it points to a former neighbor. Don't move it.
+ */
+ if (F_ISSET(hcp, H_DELETED))
+ continue;
+
+ if (hcp->pgno == old_pgno) {
+ if (hcp->indx == old_index) {
+ hcp->pgno = new_pgno;
+ hcp->indx = new_index;
+ } else
+ continue;
+ if (my_txn != NULL && cp->txn != my_txn)
+ found = 1;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DBC_LOGGING(dbc)) {
+ if ((ret = __ham_chgpg_log(dbp, my_txn, &lsn, 0, DB_HAM_CHGPG,
+ old_pgno, new_pgno, old_index, new_index)) != 0)
+ return (ret);
+ }
+ return (0);
+}
diff --git a/libdb/hash/hash_func.c b/libdb/hash/hash_func.c
new file mode 100644
index 0000000..3d0509a
--- /dev/null
+++ b/libdb/hash/hash_func.c
@@ -0,0 +1,245 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __ham_func2 --
+ * Phong Vo's linear congruential hash.
+ *
+ * PUBLIC: u_int32_t __ham_func2 __P((DB *, const void *, u_int32_t));
+ */
+#define DCHARHASH(h, c) ((h) = 0x63c63cd9*(h) + 0x9c39c33d + (c))
+
+u_int32_t
+__ham_func2(dbp, key, len)
+ DB *dbp;
+ const void *key;
+ u_int32_t len;
+{
+ const u_int8_t *e, *k;
+ u_int32_t h;
+ u_int8_t c;
+
+ if (dbp != NULL)
+ COMPQUIET(dbp, NULL);
+
+ k = key;
+ e = k + len;
+ for (h = 0; k != e;) {
+ c = *k++;
+ if (!c && k > e)
+ break;
+ DCHARHASH(h, c);
+ }
+ return (h);
+}
+
+/*
+ * __ham_func3 --
+ * Ozan Yigit's original sdbm hash.
+ *
+ * Ugly, but fast. Break the string up into 8 byte units. On the first time
+ * through the loop get the "leftover bytes" (strlen % 8). On every other
+ * iteration, perform 8 HASHC's so we handle all 8 bytes. Essentially, this
+ * saves us 7 cmp & branch instructions.
+ *
+ * PUBLIC: u_int32_t __ham_func3 __P((DB *, const void *, u_int32_t));
+ */
+u_int32_t
+__ham_func3(dbp, key, len)
+ DB *dbp;
+ const void *key;
+ u_int32_t len;
+{
+ const u_int8_t *k;
+ u_int32_t n, loop;
+
+ if (dbp != NULL)
+ COMPQUIET(dbp, NULL);
+
+ if (len == 0)
+ return (0);
+
+#define HASHC n = *k++ + 65599 * n
+ n = 0;
+ k = key;
+
+ loop = (len + 8 - 1) >> 3;
+ switch (len & (8 - 1)) {
+ case 0:
+ do {
+ HASHC;
+ case 7:
+ HASHC;
+ case 6:
+ HASHC;
+ case 5:
+ HASHC;
+ case 4:
+ HASHC;
+ case 3:
+ HASHC;
+ case 2:
+ HASHC;
+ case 1:
+ HASHC;
+ } while (--loop);
+ }
+ return (n);
+}
+
+/*
+ * __ham_func4 --
+ * Chris Torek's hash function. Although this function performs only
+ * slightly worse than __ham_func5 on strings, it performs horribly on
+ * numbers.
+ *
+ * PUBLIC: u_int32_t __ham_func4 __P((DB *, const void *, u_int32_t));
+ */
+u_int32_t
+__ham_func4(dbp, key, len)
+ DB *dbp;
+ const void *key;
+ u_int32_t len;
+{
+ const u_int8_t *k;
+ u_int32_t h, loop;
+
+ if (dbp != NULL)
+ COMPQUIET(dbp, NULL);
+
+ if (len == 0)
+ return (0);
+
+#define HASH4a h = (h << 5) - h + *k++;
+#define HASH4b h = (h << 5) + h + *k++;
+#define HASH4 HASH4b
+ h = 0;
+ k = key;
+
+ loop = (len + 8 - 1) >> 3;
+ switch (len & (8 - 1)) {
+ case 0:
+ do {
+ HASH4;
+ case 7:
+ HASH4;
+ case 6:
+ HASH4;
+ case 5:
+ HASH4;
+ case 4:
+ HASH4;
+ case 3:
+ HASH4;
+ case 2:
+ HASH4;
+ case 1:
+ HASH4;
+ } while (--loop);
+ }
+ return (h);
+}
+
+/*
+ * Fowler/Noll/Vo hash
+ *
+ * The basis of the hash algorithm was taken from an idea sent by email to the
+ * IEEE Posix P1003.2 mailing list from Phong Vo (kpv@research.att.com) and
+ * Glenn Fowler (gsf@research.att.com). Landon Curt Noll (chongo@toad.com)
+ * later improved on their algorithm.
+ *
+ * The magic is in the interesting relationship between the special prime
+ * 16777619 (2^24 + 403) and 2^32 and 2^8.
+ *
+ * This hash produces the fewest collisions of any function that we've seen so
+ * far, and works well on both numbers and strings.
+ *
+ * PUBLIC: u_int32_t __ham_func5 __P((DB *, const void *, u_int32_t));
+ */
+u_int32_t
+__ham_func5(dbp, key, len)
+ DB *dbp;
+ const void *key;
+ u_int32_t len;
+{
+ const u_int8_t *k, *e;
+ u_int32_t h;
+
+ if (dbp != NULL)
+ COMPQUIET(dbp, NULL);
+
+ k = key;
+ e = k + len;
+ for (h = 0; k < e; ++k) {
+ h *= 16777619;
+ h ^= *k;
+ }
+ return (h);
+}
+
+/*
+ * __ham_test --
+ *
+ * PUBLIC: u_int32_t __ham_test __P((DB *, const void *, u_int32_t));
+ */
+u_int32_t
+__ham_test(dbp, key, len)
+ DB *dbp;
+ const void *key;
+ u_int32_t len;
+{
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(len, 0);
+ return ((u_int32_t)*(char *)key);
+}
diff --git a/libdb/hash/hash_meta.c b/libdb/hash/hash_meta.c
new file mode 100644
index 0000000..cdf1087
--- /dev/null
+++ b/libdb/hash/hash_meta.c
@@ -0,0 +1,125 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+
+/*
+ * Acquire the meta-data page.
+ *
+ * PUBLIC: int __ham_get_meta __P((DBC *));
+ */
+int
+__ham_get_meta(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ HASH *hashp;
+ HASH_CURSOR *hcp;
+ int ret;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ hashp = dbp->h_internal;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if (dbenv != NULL &&
+ STD_LOCKING(dbc) && !F_ISSET(dbc, DBC_RECOVER | DBC_COMPENSATE)) {
+ dbc->lock.pgno = hashp->meta_pgno;
+ if ((ret = dbenv->lock_get(dbenv, dbc->locker,
+ DB_NONBLOCK(dbc) ? DB_LOCK_NOWAIT : 0,
+ &dbc->lock_dbt, DB_LOCK_READ, &hcp->hlock)) != 0)
+ return (ret);
+ }
+
+ if ((ret = mpf->get(mpf,
+ &hashp->meta_pgno, DB_MPOOL_CREATE, &(hcp->hdr))) != 0 &&
+ LOCK_ISSET(hcp->hlock))
+ (void)dbenv->lock_put(dbenv, &hcp->hlock);
+
+ return (ret);
+}
+
+/*
+ * Release the meta-data page.
+ *
+ * PUBLIC: int __ham_release_meta __P((DBC *));
+ */
+int
+__ham_release_meta(dbc)
+ DBC *dbc;
+{
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+
+ mpf = dbc->dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if (hcp->hdr)
+ (void)mpf->put(mpf, hcp->hdr,
+ F_ISSET(hcp, H_DIRTY) ? DB_MPOOL_DIRTY : 0);
+ hcp->hdr = NULL;
+ if (!F_ISSET(dbc, DBC_RECOVER | DBC_COMPENSATE) &&
+ dbc->txn == NULL && LOCK_ISSET(hcp->hlock))
+ (void)dbc->dbp->dbenv->lock_put(dbc->dbp->dbenv, &hcp->hlock);
+ F_CLR(hcp, H_DIRTY);
+
+ return (0);
+}
+
+/*
+ * Mark the meta-data page dirty.
+ *
+ * PUBLIC: int __ham_dirty_meta __P((DBC *));
+ */
+int
+__ham_dirty_meta(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ DB_LOCK _tmp;
+ HASH *hashp;
+ HASH_CURSOR *hcp;
+ int ret;
+
+ dbp = dbc->dbp;
+ hashp = dbp->h_internal;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ ret = 0;
+ if (STD_LOCKING(dbc) && !F_ISSET(dbc, DBC_RECOVER | DBC_COMPENSATE)) {
+ dbenv = dbp->dbenv;
+ dbc->lock.pgno = hashp->meta_pgno;
+ if ((ret = dbenv->lock_get(dbenv, dbc->locker,
+ DB_NONBLOCK(dbc) ? DB_LOCK_NOWAIT : 0,
+ &dbc->lock_dbt, DB_LOCK_WRITE, &_tmp)) == 0) {
+ ret = dbenv->lock_put(dbenv, &hcp->hlock);
+ hcp->hlock = _tmp;
+ }
+ }
+
+ if (ret == 0)
+ F_SET(hcp, H_DIRTY);
+ return (ret);
+}
diff --git a/libdb/hash/hash_method.c b/libdb/hash/hash_method.c
new file mode 100644
index 0000000..7fd7618
--- /dev/null
+++ b/libdb/hash/hash_method.c
@@ -0,0 +1,126 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+
+static int __ham_set_h_ffactor __P((DB *, u_int32_t));
+static int __ham_set_h_hash
+ __P((DB *, u_int32_t(*)(DB *, const void *, u_int32_t)));
+static int __ham_set_h_nelem __P((DB *, u_int32_t));
+
+/*
+ * __ham_db_create --
+ * Hash specific initialization of the DB structure.
+ *
+ * PUBLIC: int __ham_db_create __P((DB *));
+ */
+int
+__ham_db_create(dbp)
+ DB *dbp;
+{
+ HASH *hashp;
+ int ret;
+
+ if ((ret = __os_malloc(dbp->dbenv,
+ sizeof(HASH), &dbp->h_internal)) != 0)
+ return (ret);
+
+ hashp = dbp->h_internal;
+
+ hashp->h_nelem = 0; /* Defaults. */
+ hashp->h_ffactor = 0;
+ hashp->h_hash = NULL;
+
+ dbp->set_h_ffactor = __ham_set_h_ffactor;
+ dbp->set_h_hash = __ham_set_h_hash;
+ dbp->set_h_nelem = __ham_set_h_nelem;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_db_close __P((DB *));
+ */
+int
+__ham_db_close(dbp)
+ DB *dbp;
+{
+ if (dbp->h_internal == NULL)
+ return (0);
+ __os_free(dbp->dbenv, dbp->h_internal);
+ dbp->h_internal = NULL;
+ return (0);
+}
+
+/*
+ * __ham_set_h_ffactor --
+ * Set the fill factor.
+ */
+static int
+__ham_set_h_ffactor(dbp, h_ffactor)
+ DB *dbp;
+ u_int32_t h_ffactor;
+{
+ HASH *hashp;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_h_ffactor");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_HASH);
+
+ hashp = dbp->h_internal;
+ hashp->h_ffactor = h_ffactor;
+ return (0);
+}
+
+/*
+ * __ham_set_h_hash --
+ * Set the hash function.
+ */
+static int
+__ham_set_h_hash(dbp, func)
+ DB *dbp;
+ u_int32_t (*func) __P((DB *, const void *, u_int32_t));
+{
+ HASH *hashp;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_h_hash");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_HASH);
+
+ hashp = dbp->h_internal;
+ hashp->h_hash = func;
+ return (0);
+}
+
+/*
+ * __ham_set_h_nelem --
+ * Set the table size.
+ */
+static int
+__ham_set_h_nelem(dbp, h_nelem)
+ DB *dbp;
+ u_int32_t h_nelem;
+{
+ HASH *hashp;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_h_nelem");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_HASH);
+
+ hashp = dbp->h_internal;
+ hashp->h_nelem = h_nelem;
+ return (0);
+}
diff --git a/libdb/hash/hash_open.c b/libdb/hash/hash_open.c
new file mode 100644
index 0000000..0159ab1
--- /dev/null
+++ b/libdb/hash/hash_open.c
@@ -0,0 +1,558 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/btree.h"
+#include "dbinc/fop.h"
+
+static db_pgno_t __ham_init_meta __P((DB *, HMETA *, db_pgno_t, DB_LSN *));
+
+/*
+ * __ham_open --
+ *
+ * PUBLIC: int __ham_open __P((DB *,
+ * PUBLIC: DB_TXN *, const char * name, db_pgno_t, u_int32_t));
+ */
+int
+__ham_open(dbp, txn, name, base_pgno, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name;
+ db_pgno_t base_pgno;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ HASH *hashp;
+ int ret, t_ret;
+
+ COMPQUIET(name, NULL);
+ dbenv = dbp->dbenv;
+ dbc = NULL;
+ mpf = dbp->mpf;
+
+ /* Initialize the remaining fields/methods of the DB. */
+ dbp->stat = __ham_stat;
+
+ /*
+ * Get a cursor. If DB_CREATE is specified, we may be creating
+ * pages, and to do that safely in CDB we need a write cursor.
+ * In STD_LOCKING mode, we'll synchronize using the meta page
+ * lock instead.
+ */
+ if ((ret = dbp->cursor(dbp,
+ txn, &dbc, LF_ISSET(DB_CREATE) && CDB_LOCKING(dbenv) ?
+ DB_WRITECURSOR : 0)) != 0)
+ return (ret);
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ hashp = dbp->h_internal;
+ hashp->meta_pgno = base_pgno;
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto err1;
+
+ /* Initialize the hdr structure. */
+ if (hcp->hdr->dbmeta.magic == DB_HASHMAGIC) {
+ /* File exists, verify the data in the header. */
+ if (hashp->h_hash == NULL)
+ hashp->h_hash = hcp->hdr->dbmeta.version < 5
+ ? __ham_func4 : __ham_func5;
+ if (!F_ISSET(dbp, DB_AM_RDONLY) && !IS_RECOVERING(dbenv) &&
+ hashp->h_hash(dbp,
+ CHARKEY, sizeof(CHARKEY)) != hcp->hdr->h_charkey) {
+ __db_err(dbp->dbenv,
+ "hash: incompatible hash function");
+ ret = EINVAL;
+ goto err2;
+ }
+ if (F_ISSET(&hcp->hdr->dbmeta, DB_HASH_DUP))
+ F_SET(dbp, DB_AM_DUP);
+ if (F_ISSET(&hcp->hdr->dbmeta, DB_HASH_DUPSORT))
+ F_SET(dbp, DB_AM_DUPSORT);
+ if (F_ISSET(&hcp->hdr->dbmeta, DB_HASH_SUBDB))
+ F_SET(dbp, DB_AM_SUBDB);
+
+ /* We must initialize last_pgno, it could be stale. */
+ if (!F_ISSET(dbp, DB_AM_RDONLY) &&
+ dbp->meta_pgno == PGNO_BASE_MD) {
+ if ((ret = __ham_dirty_meta(dbc)) != 0)
+ goto err2;
+ mpf->last_pgno(mpf, &hcp->hdr->dbmeta.last_pgno);
+ }
+ } else if (!IS_RECOVERING(dbenv) && !F_ISSET(dbp, DB_AM_RECOVER))
+ DB_ASSERT(0);
+
+err2: /* Release the meta data page */
+ if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+err1: if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __ham_metachk --
+ *
+ * PUBLIC: int __ham_metachk __P((DB *, const char *, HMETA *));
+ */
+int
+__ham_metachk(dbp, name, hashm)
+ DB *dbp;
+ const char *name;
+ HMETA *hashm;
+{
+ DB_ENV *dbenv;
+ u_int32_t vers;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * At this point, all we know is that the magic number is for a Hash.
+ * Check the version, the database may be out of date.
+ */
+ vers = hashm->dbmeta.version;
+ if (F_ISSET(dbp, DB_AM_SWAP))
+ M_32_SWAP(vers);
+ switch (vers) {
+ case 4:
+ case 5:
+ case 6:
+ __db_err(dbenv,
+ "%s: hash version %lu requires a version upgrade",
+ name, (u_long)vers);
+ return (DB_OLD_VERSION);
+ case 7:
+ case 8:
+ break;
+ default:
+ __db_err(dbenv,
+ "%s: unsupported hash version: %lu", name, (u_long)vers);
+ return (EINVAL);
+ }
+
+ /* Swap the page if we need to. */
+ if (F_ISSET(dbp, DB_AM_SWAP) && (ret = __ham_mswap((PAGE *)hashm)) != 0)
+ return (ret);
+
+ /* Check the type. */
+ if (dbp->type != DB_HASH && dbp->type != DB_UNKNOWN)
+ return (EINVAL);
+ dbp->type = DB_HASH;
+ DB_ILLEGAL_METHOD(dbp, DB_OK_HASH);
+
+ /*
+ * Check application info against metadata info, and set info, flags,
+ * and type based on metadata info.
+ */
+ if ((ret = __db_fchk(dbenv,
+ "DB->open", hashm->dbmeta.flags,
+ DB_HASH_DUP | DB_HASH_SUBDB | DB_HASH_DUPSORT)) != 0)
+ return (ret);
+
+ if (F_ISSET(&hashm->dbmeta, DB_HASH_DUP))
+ F_SET(dbp, DB_AM_DUP);
+ else
+ if (F_ISSET(dbp, DB_AM_DUP)) {
+ __db_err(dbenv,
+ "%s: DB_DUP specified to open method but not set in database",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&hashm->dbmeta, DB_HASH_SUBDB))
+ F_SET(dbp, DB_AM_SUBDB);
+ else
+ if (F_ISSET(dbp, DB_AM_SUBDB)) {
+ __db_err(dbenv,
+ "%s: multiple databases specified but not supported in file",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&hashm->dbmeta, DB_HASH_DUPSORT)) {
+ if (dbp->dup_compare == NULL)
+ dbp->dup_compare = __bam_defcmp;
+ } else
+ if (dbp->dup_compare != NULL) {
+ __db_err(dbenv,
+ "%s: duplicate sort function specified but not set in database",
+ name);
+ return (EINVAL);
+ }
+
+ /* Set the page size. */
+ dbp->pgsize = hashm->dbmeta.pagesize;
+
+ /* Copy the file's ID. */
+ memcpy(dbp->fileid, hashm->dbmeta.uid, DB_FILE_ID_LEN);
+
+ return (0);
+}
+
+/*
+ * __ham_init_meta --
+ *
+ * Initialize a hash meta-data page. We assume that the meta-data page is
+ * contiguous with the initial buckets that we create. If that turns out
+ * to be false, we'll fix it up later. Return the initial number of buckets
+ * allocated.
+ */
+static db_pgno_t
+__ham_init_meta(dbp, meta, pgno, lsnp)
+ DB *dbp;
+ HMETA *meta;
+ db_pgno_t pgno;
+ DB_LSN *lsnp;
+{
+ HASH *hashp;
+ db_pgno_t nbuckets;
+ int i;
+ int32_t l2;
+
+ hashp = dbp->h_internal;
+ if (hashp->h_hash == NULL)
+ hashp->h_hash = DB_HASHVERSION < 5 ? __ham_func4 : __ham_func5;
+
+ if (hashp->h_nelem != 0 && hashp->h_ffactor != 0) {
+ hashp->h_nelem = (hashp->h_nelem - 1) / hashp->h_ffactor + 1;
+ l2 = __db_log2(hashp->h_nelem > 2 ? hashp->h_nelem : 2);
+ } else
+ l2 = 1;
+ nbuckets = (db_pgno_t)(1 << l2);
+
+ memset(meta, 0, sizeof(HMETA));
+ meta->dbmeta.lsn = *lsnp;
+ meta->dbmeta.pgno = pgno;
+ meta->dbmeta.magic = DB_HASHMAGIC;
+ meta->dbmeta.version = DB_HASHVERSION;
+ meta->dbmeta.pagesize = dbp->pgsize;
+ if (F_ISSET(dbp, DB_AM_CHKSUM))
+ FLD_SET(meta->dbmeta.metaflags, DBMETA_CHKSUM);
+ if (F_ISSET(dbp, DB_AM_ENCRYPT)) {
+ meta->dbmeta.encrypt_alg =
+ ((DB_CIPHER *)dbp->dbenv->crypto_handle)->alg;
+ DB_ASSERT(meta->dbmeta.encrypt_alg != 0);
+ meta->crypto_magic = meta->dbmeta.magic;
+ }
+ meta->dbmeta.type = P_HASHMETA;
+ meta->dbmeta.free = PGNO_INVALID;
+ meta->dbmeta.last_pgno = pgno;
+ meta->max_bucket = nbuckets - 1;
+ meta->high_mask = nbuckets - 1;
+ meta->low_mask = (nbuckets >> 1) - 1;
+ meta->ffactor = hashp->h_ffactor;
+ meta->h_charkey = hashp->h_hash(dbp, CHARKEY, sizeof(CHARKEY));
+ memcpy(meta->dbmeta.uid, dbp->fileid, DB_FILE_ID_LEN);
+
+ if (F_ISSET(dbp, DB_AM_DUP))
+ F_SET(&meta->dbmeta, DB_HASH_DUP);
+ if (F_ISSET(dbp, DB_AM_SUBDB))
+ F_SET(&meta->dbmeta, DB_HASH_SUBDB);
+ if (dbp->dup_compare != NULL)
+ F_SET(&meta->dbmeta, DB_HASH_DUPSORT);
+
+ /*
+ * Create the first and second buckets pages so that we have the
+ * page numbers for them and we can store that page number in the
+ * meta-data header (spares[0]).
+ */
+ meta->spares[0] = pgno + 1;
+
+ /* Fill in the last fields of the meta data page. */
+ for (i = 1; i <= l2; i++)
+ meta->spares[i] = meta->spares[0];
+ for (; i < NCACHED; i++)
+ meta->spares[i] = PGNO_INVALID;
+
+ return (nbuckets);
+}
+
+/*
+ * __ham_new_file --
+ * Create the necessary pages to begin a new database file. If name
+ * is NULL, then this is an unnamed file, the mpf has been set in the dbp
+ * and we simply create the pages using mpool. In this case, we don't log
+ * because we never have to redo an unnamed create and the undo simply
+ * frees resources.
+ *
+ * This code appears more complex than it is because of the two cases (named
+ * and unnamed). The way to read the code is that for each page being created,
+ * there are three parts: 1) a "get page" chunk (which either uses malloc'd
+ * memory or calls mpf->get), 2) the initialization, and 3) the "put page"
+ * chunk which either does a fop write or an mpf->put.
+ *
+ * PUBLIC: int __ham_new_file __P((DB *, DB_TXN *, DB_FH *, const char *));
+ */
+int
+__ham_new_file(dbp, txn, fhp, name)
+ DB *dbp;
+ DB_TXN *txn;
+ DB_FH *fhp;
+ const char *name;
+{
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_MPOOLFILE *mpf;
+ DB_PGINFO pginfo;
+ DBT pdbt;
+ HMETA *meta;
+ PAGE *page;
+ int ret;
+ db_pgno_t lpgno;
+ void *buf;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ meta = NULL;
+ page = NULL;
+ memset(&pdbt, 0, sizeof(pdbt));
+
+ /* Build meta-data page. */
+ if (name == NULL) {
+ lpgno = PGNO_BASE_MD;
+ ret = mpf->get(mpf, &lpgno, DB_MPOOL_CREATE, &meta);
+ } else {
+ pginfo.db_pagesize = dbp->pgsize;
+ pginfo.type = dbp->type;
+ pginfo.flags =
+ F_ISSET(dbp, (DB_AM_CHKSUM | DB_AM_ENCRYPT | DB_AM_SWAP));
+ pdbt.data = &pginfo;
+ pdbt.size = sizeof(pginfo);
+ ret = __os_calloc(dbp->dbenv, 1, dbp->pgsize, &buf);
+ meta = (HMETA *)buf;
+ }
+ if (ret != 0)
+ return (ret);
+
+ LSN_NOT_LOGGED(lsn);
+ lpgno = __ham_init_meta(dbp, meta, PGNO_BASE_MD, &lsn);
+ meta->dbmeta.last_pgno = lpgno;
+
+ if (name == NULL)
+ ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY);
+ else {
+ if ((ret = __db_pgout(dbenv, PGNO_BASE_MD, meta, &pdbt)) != 0)
+ goto err;
+ ret = __fop_write(dbenv, txn, name,
+ DB_APP_DATA, fhp, 0, buf, dbp->pgsize, 1);
+ }
+ if (ret != 0)
+ goto err;
+ meta = NULL;
+
+ /* Now allocate the final hash bucket. */
+ if (name == NULL) {
+ if ((ret = mpf->get(mpf, &lpgno, DB_MPOOL_CREATE, &page)) != 0)
+ goto err;
+ } else {
+#ifdef DIAGNOSTIC
+ memset(buf, 0, dbp->pgsize);
+#endif
+ page = (PAGE *)buf;
+ }
+
+ P_INIT(page, dbp->pgsize, lpgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+ LSN_NOT_LOGGED(page->lsn);
+
+ if (name == NULL)
+ ret = mpf->put(mpf, page, DB_MPOOL_DIRTY);
+ else {
+ if ((ret = __db_pgout(dbenv, lpgno, buf, &pdbt)) != 0)
+ goto err;
+ ret = __fop_write(dbenv, txn, name,
+ DB_APP_DATA, fhp, lpgno * dbp->pgsize, buf, dbp->pgsize, 1);
+ }
+ if (ret != 0)
+ goto err;
+ page = NULL;
+
+err: if (name != NULL)
+ __os_free(dbenv, buf);
+ else {
+ if (meta != NULL)
+ (void)mpf->put(mpf, meta, 0);
+ if (page != NULL)
+ (void)mpf->put(mpf, page, 0);
+ }
+ return (ret);
+}
+
+/*
+ * __ham_new_subdb --
+ * Create the necessary pages to begin a new subdatabase.
+ *
+ * PUBLIC: int __ham_new_subdb __P((DB *, DB *, DB_TXN *));
+ */
+int
+__ham_new_subdb(mdbp, dbp, txn)
+ DB *mdbp, *dbp;
+ DB_TXN *txn;
+{
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LOCK metalock, mmlock;
+ DB_LSN lsn;
+ DB_MPOOLFILE *mpf;
+ DBMETA *mmeta;
+ HMETA *meta;
+ PAGE *h;
+ int i, ret, t_ret;
+ db_pgno_t lpgno, mpgno;
+
+ dbenv = mdbp->dbenv;
+ mpf = mdbp->mpf;
+ dbc = NULL;
+ meta = NULL;
+ mmeta = NULL;
+ LOCK_INIT(metalock);
+ LOCK_INIT(mmlock);
+
+ if ((ret = mdbp->cursor(mdbp, txn,
+ &dbc, CDB_LOCKING(dbenv) ? DB_WRITECURSOR : 0)) != 0)
+ return (ret);
+
+ /* Get and lock the new meta data page. */
+ if ((ret = __db_lget(dbc,
+ 0, dbp->meta_pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &dbp->meta_pgno, DB_MPOOL_CREATE, &meta)) != 0)
+ goto err;
+
+ /* Initialize the new meta-data page. */
+ lsn = meta->dbmeta.lsn;
+ lpgno = __ham_init_meta(dbp, meta, dbp->meta_pgno, &lsn);
+
+ /*
+ * We are about to allocate a set of contiguous buckets (lpgno
+ * worth). We need to get the master meta-data page to figure
+ * out where these pages are and to allocate them. So, lock and
+ * get the master meta data page.
+ */
+ mpgno = PGNO_BASE_MD;
+ if ((ret = __db_lget(dbc, 0, mpgno, DB_LOCK_WRITE, 0, &mmlock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &mpgno, 0, &mmeta)) != 0)
+ goto err;
+
+ /*
+ * Now update the hash meta-data page to reflect where the first
+ * set of buckets are actually located.
+ */
+ meta->spares[0] = mmeta->last_pgno + 1;
+ for (i = 0; i < NCACHED && meta->spares[i] != PGNO_INVALID; i++)
+ meta->spares[i] = meta->spares[0];
+
+ /* The new meta data page is now complete; log it. */
+ if ((ret = __db_log_page(mdbp,
+ txn, &meta->dbmeta.lsn, dbp->meta_pgno, (PAGE *)meta)) != 0)
+ goto err;
+
+ /* Reflect the group allocation. */
+ if (DBENV_LOGGING(dbenv))
+ if ((ret = __ham_groupalloc_log(mdbp, txn,
+ &LSN(mmeta), 0, &LSN(mmeta),
+ meta->spares[0], meta->max_bucket + 1, mmeta->free)) != 0)
+ goto err;
+
+ /* Release the new meta-data page. */
+ if ((ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ meta = NULL;
+
+ mmeta->last_pgno +=lpgno;
+ lpgno = mmeta->last_pgno;
+
+ /* Now allocate the final hash bucket. */
+ if ((ret = mpf->get(mpf, &lpgno, DB_MPOOL_CREATE, &h)) != 0)
+ goto err;
+ P_INIT(h, dbp->pgsize, lpgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+ LSN(h) = LSN(mmeta);
+ if ((ret = mpf->put(mpf, h, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+
+ /* Now put the master-metadata page back. */
+ if ((ret = mpf->put(mpf, mmeta, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ mmeta = NULL;
+
+err:
+ if (mmeta != NULL)
+ if ((t_ret = mpf->put(mpf, mmeta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (LOCK_ISSET(mmlock))
+ if ((t_ret = __LPUT(dbc, mmlock)) != 0 && ret == 0)
+ ret = t_ret;
+ if (meta != NULL)
+ if ((t_ret = mpf->put(mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (LOCK_ISSET(metalock))
+ if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
+ ret = t_ret;
+ if (dbc != NULL)
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
diff --git a/libdb/hash/hash_page.c b/libdb/hash/hash_page.c
new file mode 100644
index 0000000..790323f
--- /dev/null
+++ b/libdb/hash/hash_page.c
@@ -0,0 +1,1862 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+/*
+ * PACKAGE: hashing
+ *
+ * DESCRIPTION:
+ * Page manipulation for hashing package.
+ */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+
+static int __ham_c_delpg
+ __P((DBC *, db_pgno_t, db_pgno_t, u_int32_t, db_ham_mode, u_int32_t *));
+
+/*
+ * PUBLIC: int __ham_item __P((DBC *, db_lockmode_t, db_pgno_t *));
+ */
+int
+__ham_item(dbc, mode, pgnop)
+ DBC *dbc;
+ db_lockmode_t mode;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ db_pgno_t next_pgno;
+ int ret;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if (F_ISSET(hcp, H_DELETED)) {
+ __db_err(dbp->dbenv, "Attempt to return a deleted item");
+ return (EINVAL);
+ }
+ F_CLR(hcp, H_OK | H_NOMORE);
+
+ /* Check if we need to get a page for this cursor. */
+ if ((ret = __ham_get_cpage(dbc, mode)) != 0)
+ return (ret);
+
+recheck:
+ /* Check if we are looking for space in which to insert an item. */
+ if (hcp->seek_size && hcp->seek_found_page == PGNO_INVALID &&
+ hcp->seek_size < P_FREESPACE(dbp, hcp->page))
+ hcp->seek_found_page = hcp->pgno;
+
+ /* Check for off-page duplicates. */
+ if (hcp->indx < NUM_ENT(hcp->page) &&
+ HPAGE_TYPE(dbp, hcp->page, H_DATAINDEX(hcp->indx)) == H_OFFDUP) {
+ memcpy(pgnop,
+ HOFFDUP_PGNO(H_PAIRDATA(dbp, hcp->page, hcp->indx)),
+ sizeof(db_pgno_t));
+ F_SET(hcp, H_OK);
+ return (0);
+ }
+
+ /* Check if we need to go on to the next page. */
+ if (F_ISSET(hcp, H_ISDUP))
+ /*
+ * ISDUP is set, and offset is at the beginning of the datum.
+ * We need to grab the length of the datum, then set the datum
+ * pointer to be the beginning of the datum.
+ */
+ memcpy(&hcp->dup_len,
+ HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx)) +
+ hcp->dup_off, sizeof(db_indx_t));
+
+ if (hcp->indx >= (db_indx_t)NUM_ENT(hcp->page)) {
+ /* Fetch next page. */
+ if (NEXT_PGNO(hcp->page) == PGNO_INVALID) {
+ F_SET(hcp, H_NOMORE);
+ return (DB_NOTFOUND);
+ }
+ next_pgno = NEXT_PGNO(hcp->page);
+ hcp->indx = 0;
+ if ((ret = __ham_next_cpage(dbc, next_pgno, 0)) != 0)
+ return (ret);
+ goto recheck;
+ }
+
+ F_SET(hcp, H_OK);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_item_reset __P((DBC *));
+ */
+int
+__ham_item_reset(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ ret = 0;
+ if (hcp->page != NULL)
+ ret = mpf->put(mpf, hcp->page, 0);
+
+ __ham_item_init(dbc);
+ return (ret);
+}
+
+/*
+ * PUBLIC: void __ham_item_init __P((DBC *));
+ */
+void
+__ham_item_init(dbc)
+ DBC *dbc;
+{
+ HASH_CURSOR *hcp;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ /*
+ * If this cursor still holds any locks, we must
+ * release them if we are not running with transactions.
+ */
+ (void)__TLPUT(dbc, hcp->lock);
+
+ /*
+ * The following fields must *not* be initialized here
+ * because they may have meaning across inits.
+ * hlock, hdr, split_buf, stats
+ */
+ hcp->bucket = BUCKET_INVALID;
+ hcp->lbucket = BUCKET_INVALID;
+ LOCK_INIT(hcp->lock);
+ hcp->lock_mode = DB_LOCK_NG;
+ hcp->dup_off = 0;
+ hcp->dup_len = 0;
+ hcp->dup_tlen = 0;
+ hcp->seek_size = 0;
+ hcp->seek_found_page = PGNO_INVALID;
+ hcp->flags = 0;
+
+ hcp->pgno = PGNO_INVALID;
+ hcp->indx = NDX_INVALID;
+ hcp->page = NULL;
+}
+
+/*
+ * Returns the last item in a bucket.
+ *
+ * PUBLIC: int __ham_item_last __P((DBC *, db_lockmode_t, db_pgno_t *));
+ */
+int
+__ham_item_last(dbc, mode, pgnop)
+ DBC *dbc;
+ db_lockmode_t mode;
+ db_pgno_t *pgnop;
+{
+ HASH_CURSOR *hcp;
+ int ret;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if ((ret = __ham_item_reset(dbc)) != 0)
+ return (ret);
+
+ hcp->bucket = hcp->hdr->max_bucket;
+ hcp->pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+ F_SET(hcp, H_OK);
+ return (__ham_item_prev(dbc, mode, pgnop));
+}
+
+/*
+ * PUBLIC: int __ham_item_first __P((DBC *, db_lockmode_t, db_pgno_t *));
+ */
+int
+__ham_item_first(dbc, mode, pgnop)
+ DBC *dbc;
+ db_lockmode_t mode;
+ db_pgno_t *pgnop;
+{
+ HASH_CURSOR *hcp;
+ int ret;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if ((ret = __ham_item_reset(dbc)) != 0)
+ return (ret);
+ F_SET(hcp, H_OK);
+ hcp->bucket = 0;
+ hcp->pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+ return (__ham_item_next(dbc, mode, pgnop));
+}
+
+/*
+ * __ham_item_prev --
+ * Returns a pointer to key/data pair on a page. In the case of
+ * bigkeys, just returns the page number and index of the bigkey
+ * pointer pair.
+ *
+ * PUBLIC: int __ham_item_prev __P((DBC *, db_lockmode_t, db_pgno_t *));
+ */
+int
+__ham_item_prev(dbc, mode, pgnop)
+ DBC *dbc;
+ db_lockmode_t mode;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ db_pgno_t next_pgno;
+ int ret;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ dbp = dbc->dbp;
+
+ /*
+ * There are 5 cases for backing up in a hash file.
+ * Case 1: In the middle of a page, no duplicates, just dec the index.
+ * Case 2: In the middle of a duplicate set, back up one.
+ * Case 3: At the beginning of a duplicate set, get out of set and
+ * back up to next key.
+ * Case 4: At the beginning of a page; go to previous page.
+ * Case 5: At the beginning of a bucket; go to prev bucket.
+ */
+ F_CLR(hcp, H_OK | H_NOMORE | H_DELETED);
+
+ if ((ret = __ham_get_cpage(dbc, mode)) != 0)
+ return (ret);
+
+ /*
+ * First handle the duplicates. Either you'll get the key here
+ * or you'll exit the duplicate set and drop into the code below
+ * to handle backing up through keys.
+ */
+ if (!F_ISSET(hcp, H_NEXT_NODUP) && F_ISSET(hcp, H_ISDUP)) {
+ if (HPAGE_TYPE(dbp, hcp->page, H_DATAINDEX(hcp->indx)) ==
+ H_OFFDUP) {
+ memcpy(pgnop,
+ HOFFDUP_PGNO(H_PAIRDATA(dbp, hcp->page, hcp->indx)),
+ sizeof(db_pgno_t));
+ F_SET(hcp, H_OK);
+ return (0);
+ }
+
+ /* Duplicates are on-page. */
+ if (hcp->dup_off != 0) {
+ memcpy(&hcp->dup_len, HKEYDATA_DATA(
+ H_PAIRDATA(dbp, hcp->page, hcp->indx))
+ + hcp->dup_off - sizeof(db_indx_t),
+ sizeof(db_indx_t));
+ hcp->dup_off -=
+ DUP_SIZE(hcp->dup_len);
+ return (__ham_item(dbc, mode, pgnop));
+ }
+ }
+
+ /*
+ * If we get here, we are not in a duplicate set, and just need
+ * to back up the cursor. There are still three cases:
+ * midpage, beginning of page, beginning of bucket.
+ */
+
+ if (F_ISSET(hcp, H_DUPONLY)) {
+ F_CLR(hcp, H_OK);
+ F_SET(hcp, H_NOMORE);
+ return (0);
+ } else
+ /*
+ * We are no longer in a dup set; flag this so the dup code
+ * will reinitialize should we stumble upon another one.
+ */
+ F_CLR(hcp, H_ISDUP);
+
+ if (hcp->indx == 0) { /* Beginning of page. */
+ hcp->pgno = PREV_PGNO(hcp->page);
+ if (hcp->pgno == PGNO_INVALID) {
+ /* Beginning of bucket. */
+ F_SET(hcp, H_NOMORE);
+ return (DB_NOTFOUND);
+ } else if ((ret =
+ __ham_next_cpage(dbc, hcp->pgno, 0)) != 0)
+ return (ret);
+ else
+ hcp->indx = NUM_ENT(hcp->page);
+ }
+
+ /*
+ * Either we've got the cursor set up to be decremented, or we
+ * have to find the end of a bucket.
+ */
+ if (hcp->indx == NDX_INVALID) {
+ DB_ASSERT(hcp->page != NULL);
+
+ hcp->indx = NUM_ENT(hcp->page);
+ for (next_pgno = NEXT_PGNO(hcp->page);
+ next_pgno != PGNO_INVALID;
+ next_pgno = NEXT_PGNO(hcp->page)) {
+ if ((ret = __ham_next_cpage(dbc, next_pgno, 0)) != 0)
+ return (ret);
+ hcp->indx = NUM_ENT(hcp->page);
+ }
+
+ if (hcp->indx == 0) {
+ /* Bucket was empty. */
+ F_SET(hcp, H_NOMORE);
+ return (DB_NOTFOUND);
+ }
+ }
+
+ hcp->indx -= 2;
+
+ return (__ham_item(dbc, mode, pgnop));
+}
+
+/*
+ * Sets the cursor to the next key/data pair on a page.
+ *
+ * PUBLIC: int __ham_item_next __P((DBC *, db_lockmode_t, db_pgno_t *));
+ */
+int
+__ham_item_next(dbc, mode, pgnop)
+ DBC *dbc;
+ db_lockmode_t mode;
+ db_pgno_t *pgnop;
+{
+ HASH_CURSOR *hcp;
+ int ret;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if ((ret = __ham_get_cpage(dbc, mode)) != 0)
+ return (ret);
+
+ /*
+ * Deleted on-page duplicates are a weird case. If we delete the last
+ * one, then our cursor is at the very end of a duplicate set and
+ * we actually need to go on to the next key.
+ */
+ if (F_ISSET(hcp, H_DELETED)) {
+ if (hcp->indx != NDX_INVALID &&
+ F_ISSET(hcp, H_ISDUP) &&
+ HPAGE_TYPE(dbc->dbp, hcp->page, H_DATAINDEX(hcp->indx))
+ == H_DUPLICATE && hcp->dup_tlen == hcp->dup_off) {
+ if (F_ISSET(hcp, H_DUPONLY)) {
+ F_CLR(hcp, H_OK);
+ F_SET(hcp, H_NOMORE);
+ return (0);
+ } else {
+ F_CLR(hcp, H_ISDUP);
+ hcp->indx += 2;
+ }
+ } else if (!F_ISSET(hcp, H_ISDUP) && F_ISSET(hcp, H_DUPONLY)) {
+ F_CLR(hcp, H_OK);
+ F_SET(hcp, H_NOMORE);
+ return (0);
+ } else if (F_ISSET(hcp, H_ISDUP) &&
+ F_ISSET(hcp, H_NEXT_NODUP)) {
+ F_CLR(hcp, H_ISDUP);
+ hcp->indx += 2;
+ }
+ F_CLR(hcp, H_DELETED);
+ } else if (hcp->indx == NDX_INVALID) {
+ hcp->indx = 0;
+ F_CLR(hcp, H_ISDUP);
+ } else if (F_ISSET(hcp, H_NEXT_NODUP)) {
+ hcp->indx += 2;
+ F_CLR(hcp, H_ISDUP);
+ } else if (F_ISSET(hcp, H_ISDUP) && hcp->dup_tlen != 0) {
+ if (hcp->dup_off + DUP_SIZE(hcp->dup_len) >=
+ hcp->dup_tlen && F_ISSET(hcp, H_DUPONLY)) {
+ F_CLR(hcp, H_OK);
+ F_SET(hcp, H_NOMORE);
+ return (0);
+ }
+ hcp->dup_off += DUP_SIZE(hcp->dup_len);
+ if (hcp->dup_off >= hcp->dup_tlen) {
+ F_CLR(hcp, H_ISDUP);
+ hcp->indx += 2;
+ }
+ } else if (F_ISSET(hcp, H_DUPONLY)) {
+ F_CLR(hcp, H_OK);
+ F_SET(hcp, H_NOMORE);
+ return (0);
+ } else {
+ hcp->indx += 2;
+ F_CLR(hcp, H_ISDUP);
+ }
+
+ return (__ham_item(dbc, mode, pgnop));
+}
+
+/*
+ * PUBLIC: void __ham_putitem __P((DB *, PAGE *p, const DBT *, int));
+ *
+ * This is a little bit sleazy in that we're overloading the meaning
+ * of the H_OFFPAGE type here. When we recover deletes, we have the
+ * entire entry instead of having only the DBT, so we'll pass type
+ * H_OFFPAGE to mean, "copy the whole entry" as opposed to constructing
+ * an H_KEYDATA around it.
+ */
+void
+__ham_putitem(dbp, p, dbt, type)
+ DB *dbp;
+ PAGE *p;
+ const DBT *dbt;
+ int type;
+{
+ u_int16_t n, off;
+ db_indx_t *inp;
+
+ n = NUM_ENT(p);
+ inp = P_INP(dbp, p);
+
+ /* Put the item element on the page. */
+ if (type == H_OFFPAGE) {
+ off = HOFFSET(p) - dbt->size;
+ HOFFSET(p) = inp[n] = off;
+ memcpy(P_ENTRY(dbp, p, n), dbt->data, dbt->size);
+ } else {
+ off = HOFFSET(p) - HKEYDATA_SIZE(dbt->size);
+ HOFFSET(p) = inp[n] = off;
+ PUT_HKEYDATA(P_ENTRY(dbp, p, n), dbt->data, dbt->size, type);
+ }
+
+ /* Adjust page info. */
+ NUM_ENT(p) += 1;
+}
+
+/*
+ * PUBLIC: void __ham_reputpair __P((DB *, PAGE *,
+ * PUBLIC: u_int32_t, const DBT *, const DBT *));
+ *
+ * This is a special case to restore a key/data pair to its original
+ * location during recovery. We are guaranteed that the pair fits
+ * on the page and is not the last pair on the page (because if it's
+ * the last pair, the normal insert works).
+ */
+void
+__ham_reputpair(dbp, p, ndx, key, data)
+ DB *dbp;
+ PAGE *p;
+ u_int32_t ndx;
+ const DBT *key, *data;
+{
+ db_indx_t i, *inp, movebytes, newbytes;
+ size_t psize;
+ u_int8_t *from;
+
+ psize = dbp->pgsize;
+ inp = P_INP(dbp, p);
+ /* First shuffle the existing items up on the page. */
+ movebytes = (db_indx_t)(
+ (ndx == 0 ? psize : inp[H_DATAINDEX(ndx - 2)]) - HOFFSET(p));
+ newbytes = key->size + data->size;
+ from = (u_int8_t *)p + HOFFSET(p);
+ memmove(from - newbytes, from, movebytes);
+
+ /*
+ * Adjust the indices and move them up 2 spaces. Note that we
+ * have to check the exit condition inside the loop just in case
+ * we are dealing with index 0 (db_indx_t's are unsigned).
+ */
+ for (i = NUM_ENT(p) - 1; ; i-- ) {
+ inp[i + 2] = inp[i] - newbytes;
+ if (i == H_KEYINDEX(ndx))
+ break;
+ }
+
+ /* Put the key and data on the page. */
+ inp[H_KEYINDEX(ndx)] = (db_indx_t)(
+ (ndx == 0 ? psize : inp[H_DATAINDEX(ndx - 2)]) - key->size);
+ inp[H_DATAINDEX(ndx)] = inp[H_KEYINDEX(ndx)] - data->size;
+ memcpy(P_ENTRY(dbp, p, H_KEYINDEX(ndx)), key->data, key->size);
+ memcpy(P_ENTRY(dbp, p, H_DATAINDEX(ndx)), data->data, data->size);
+
+ /* Adjust page info. */
+ HOFFSET(p) -= newbytes;
+ NUM_ENT(p) += 2;
+}
+
+/*
+ * PUBLIC: int __ham_del_pair __P((DBC *, int));
+ */
+int
+__ham_del_pair(dbc, reclaim_page)
+ DBC *dbc;
+ int reclaim_page;
+{
+ DB *dbp;
+ DBT data_dbt, key_dbt;
+ DB_LSN new_lsn, *n_lsn, tmp_lsn;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ PAGE *n_pagep, *nn_pagep, *p, *p_pagep;
+ db_ham_mode op;
+ db_indx_t ndx;
+ db_pgno_t chg_pgno, pgno, tmp_pgno;
+ int ret, t_ret;
+ u_int32_t order;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ n_pagep = p_pagep = nn_pagep = NULL;
+ ndx = hcp->indx;
+
+ if (hcp->page == NULL &&
+ (ret = mpf->get(mpf, &hcp->pgno, DB_MPOOL_CREATE, &hcp->page)) != 0)
+ return (ret);
+ p = hcp->page;
+
+ /*
+ * We optimize for the normal case which is when neither the key nor
+ * the data are large. In this case, we write a single log record
+ * and do the delete. If either is large, we'll call __big_delete
+ * to remove the big item and then update the page to remove the
+ * entry referring to the big item.
+ */
+ ret = 0;
+ if (HPAGE_PTYPE(H_PAIRKEY(dbp, p, ndx)) == H_OFFPAGE) {
+ memcpy(&pgno, HOFFPAGE_PGNO(P_ENTRY(dbp, p, H_KEYINDEX(ndx))),
+ sizeof(db_pgno_t));
+ ret = __db_doff(dbc, pgno);
+ }
+
+ if (ret == 0)
+ switch (HPAGE_PTYPE(H_PAIRDATA(dbp, p, ndx))) {
+ case H_OFFPAGE:
+ memcpy(&pgno,
+ HOFFPAGE_PGNO(P_ENTRY(dbp, p, H_DATAINDEX(ndx))),
+ sizeof(db_pgno_t));
+ ret = __db_doff(dbc, pgno);
+ break;
+ case H_OFFDUP:
+ case H_DUPLICATE:
+ /*
+ * If we delete a pair that is/was a duplicate, then
+ * we had better clear the flag so that we update the
+ * cursor appropriately.
+ */
+ F_CLR(hcp, H_ISDUP);
+ break;
+ }
+
+ if (ret)
+ return (ret);
+
+ /* Now log the delete off this page. */
+ if (DBC_LOGGING(dbc)) {
+ key_dbt.data = P_ENTRY(dbp, p, H_KEYINDEX(ndx));
+ key_dbt.size = LEN_HITEM(dbp, p, dbp->pgsize, H_KEYINDEX(ndx));
+ data_dbt.data = P_ENTRY(dbp, p, H_DATAINDEX(ndx));
+ data_dbt.size = LEN_HITEM(dbp, p, dbp->pgsize, H_DATAINDEX(ndx));
+
+ if ((ret = __ham_insdel_log(dbp,
+ dbc->txn, &new_lsn, 0, DELPAIR, PGNO(p), (u_int32_t)ndx,
+ &LSN(p), &key_dbt, &data_dbt)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ /* Move lsn onto page. */
+ LSN(p) = new_lsn;
+
+ /* Do the delete. */
+ __ham_dpair(dbp, p, ndx);
+
+ /*
+ * Mark item deleted so that we don't try to return it, and
+ * so that we update the cursor correctly on the next call
+ * to next.
+ */
+ F_SET(hcp, H_DELETED);
+ F_CLR(hcp, H_OK);
+
+ /*
+ * Update cursors that are on the page where the delete happend.
+ */
+ if ((ret = __ham_c_update(dbc, 0, 0, 0)) != 0)
+ return (ret);
+
+ /*
+ * If we are locking, we will not maintain this, because it is
+ * a hot spot.
+ *
+ * XXX
+ * Perhaps we can retain incremental numbers and apply them later.
+ */
+ if (!STD_LOCKING(dbc)) {
+ --hcp->hdr->nelem;
+ if ((ret = __ham_dirty_meta(dbc)) != 0)
+ return (ret);
+ }
+
+ /*
+ * If we need to reclaim the page, then check if the page is empty.
+ * There are two cases. If it's empty and it's not the first page
+ * in the bucket (i.e., the bucket page) then we can simply remove
+ * it. If it is the first chain in the bucket, then we need to copy
+ * the second page into it and remove the second page.
+ * If its the only page in the bucket we leave it alone.
+ */
+ if (!reclaim_page ||
+ NUM_ENT(p) != 0 ||
+ (PREV_PGNO(p) == PGNO_INVALID && NEXT_PGNO(p) == PGNO_INVALID))
+ return (mpf->set(mpf, p, DB_MPOOL_DIRTY));
+
+ if (PREV_PGNO(p) == PGNO_INVALID) {
+ /*
+ * First page in chain is empty and we know that there
+ * are more pages in the chain.
+ */
+ if ((ret = mpf->get(mpf, &NEXT_PGNO(p), 0, &n_pagep)) != 0)
+ return (ret);
+
+ if (NEXT_PGNO(n_pagep) != PGNO_INVALID && (ret =
+ mpf->get(mpf, &NEXT_PGNO(n_pagep), 0, &nn_pagep)) != 0)
+ goto err;
+
+ if (DBC_LOGGING(dbc)) {
+ key_dbt.data = n_pagep;
+ key_dbt.size = dbp->pgsize;
+ if ((ret = __ham_copypage_log(dbp,
+ dbc->txn, &new_lsn, 0, PGNO(p),
+ &LSN(p), PGNO(n_pagep), &LSN(n_pagep),
+ NEXT_PGNO(n_pagep),
+ nn_pagep == NULL ? NULL : &LSN(nn_pagep),
+ &key_dbt)) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ /* Move lsn onto page. */
+ LSN(p) = new_lsn; /* Structure assignment. */
+ LSN(n_pagep) = new_lsn;
+ if (NEXT_PGNO(n_pagep) != PGNO_INVALID)
+ LSN(nn_pagep) = new_lsn;
+
+ if (nn_pagep != NULL) {
+ PREV_PGNO(nn_pagep) = PGNO(p);
+ if ((ret =
+ mpf->put(mpf, nn_pagep, DB_MPOOL_DIRTY)) != 0) {
+ nn_pagep = NULL;
+ goto err;
+ }
+ }
+
+ tmp_pgno = PGNO(p);
+ tmp_lsn = LSN(p);
+ memcpy(p, n_pagep, dbp->pgsize);
+ PGNO(p) = tmp_pgno;
+ LSN(p) = tmp_lsn;
+ PREV_PGNO(p) = PGNO_INVALID;
+
+ /*
+ * Update cursors to reflect the fact that records
+ * on the second page have moved to the first page.
+ */
+ if ((ret = __ham_c_delpg(dbc, PGNO(n_pagep),
+ PGNO(p), 0, DB_HAM_DELFIRSTPG, &order)) != 0)
+ goto err;
+
+ /*
+ * Update the cursor to reflect its new position.
+ */
+ hcp->indx = 0;
+ hcp->pgno = PGNO(p);
+ hcp->order += order;
+
+ if ((ret = mpf->set(mpf, p, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ if ((ret = __db_free(dbc, n_pagep)) != 0) {
+ n_pagep = NULL;
+ goto err;
+ }
+ } else {
+ if ((ret = mpf->get(mpf, &PREV_PGNO(p), 0, &p_pagep)) != 0)
+ goto err;
+
+ if (NEXT_PGNO(p) != PGNO_INVALID) {
+ if ((ret =
+ mpf->get(mpf, &NEXT_PGNO(p), 0, &n_pagep)) != 0)
+ goto err;
+ n_lsn = &LSN(n_pagep);
+ } else {
+ n_pagep = NULL;
+ n_lsn = NULL;
+ }
+
+ NEXT_PGNO(p_pagep) = NEXT_PGNO(p);
+ if (n_pagep != NULL)
+ PREV_PGNO(n_pagep) = PGNO(p_pagep);
+
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __ham_newpage_log(dbp, dbc->txn,
+ &new_lsn, 0, DELOVFL, PREV_PGNO(p), &LSN(p_pagep),
+ PGNO(p), &LSN(p), NEXT_PGNO(p), n_lsn)) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ /* Move lsn onto page. */
+ LSN(p_pagep) = new_lsn; /* Structure assignment. */
+ if (n_pagep)
+ LSN(n_pagep) = new_lsn;
+ LSN(p) = new_lsn;
+
+ if (NEXT_PGNO(p) == PGNO_INVALID) {
+ /*
+ * There is no next page; put the cursor on the
+ * previous page as if we'd deleted the last item
+ * on that page, with index after the last valid
+ * entry.
+ *
+ * The deleted flag was set up above.
+ */
+ hcp->pgno = PGNO(p_pagep);
+ hcp->indx = NUM_ENT(p_pagep);
+ op = DB_HAM_DELLASTPG;
+ } else {
+ /*
+ * There is a next page, so put the cursor at
+ * the beginning of it.
+ */
+ hcp->pgno = NEXT_PGNO(p);
+ hcp->indx = 0;
+ op = DB_HAM_DELMIDPG;
+ }
+
+ /*
+ * Since we are about to delete the cursor page and we have
+ * just moved the cursor, we need to make sure that the
+ * old page pointer isn't left hanging around in the cursor.
+ */
+ hcp->page = NULL;
+ chg_pgno = PGNO(p);
+ ret = __db_free(dbc, p);
+ if ((t_ret =
+ mpf->put(mpf, p_pagep, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ if (n_pagep != NULL && (t_ret =
+ mpf->put(mpf, n_pagep, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ret != 0)
+ return (ret);
+ if ((ret = __ham_c_delpg(dbc,
+ chg_pgno, hcp->pgno, hcp->indx, op, &order)) != 0)
+ return (ret);
+ hcp->order += order;
+ }
+ return (ret);
+
+err: /* Clean up any pages. */
+ if (n_pagep != NULL)
+ (void)mpf->put(mpf, n_pagep, 0);
+ if (nn_pagep != NULL)
+ (void)mpf->put(mpf, nn_pagep, 0);
+ if (p_pagep != NULL)
+ (void)mpf->put(mpf, p_pagep, 0);
+ return (ret);
+}
+
+/*
+ * __ham_replpair --
+ * Given the key data indicated by the cursor, replace part/all of it
+ * according to the fields in the dbt.
+ *
+ * PUBLIC: int __ham_replpair __P((DBC *, DBT *, u_int32_t));
+ */
+int
+__ham_replpair(dbc, dbt, make_dup)
+ DBC *dbc;
+ DBT *dbt;
+ u_int32_t make_dup;
+{
+ DB *dbp;
+ DBT old_dbt, tdata, tmp;
+ DB_ENV *dbenv;
+ DB_LSN new_lsn;
+ HASH_CURSOR *hcp;
+ int32_t change; /* XXX: Possible overflow. */
+ u_int32_t dup_flag, len, memsize;
+ int beyond_eor, is_big, ret, type;
+ u_int8_t *beg, *dest, *end, *hk, *src;
+ void *memp;
+
+ /*
+ * Big item replacements are handled in generic code.
+ * Items that fit on the current page fall into 4 classes.
+ * 1. On-page element, same size
+ * 2. On-page element, new is bigger (fits)
+ * 3. On-page element, new is bigger (does not fit)
+ * 4. On-page element, old is bigger
+ * Numbers 1, 2, and 4 are essentially the same (and should
+ * be the common case). We handle case 3 as a delete and
+ * add.
+ */
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ /*
+ * We need to compute the number of bytes that we are adding or
+ * removing from the entry. Normally, we can simply substract
+ * the number of bytes we are replacing (dbt->dlen) from the
+ * number of bytes we are inserting (dbt->size). However, if
+ * we are doing a partial put off the end of a record, then this
+ * formula doesn't work, because we are essentially adding
+ * new bytes.
+ */
+ change = dbt->size - dbt->dlen;
+
+ hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
+ is_big = HPAGE_PTYPE(hk) == H_OFFPAGE;
+
+ if (is_big)
+ memcpy(&len, HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
+ else
+ len = LEN_HKEYDATA(dbp, hcp->page,
+ dbp->pgsize, H_DATAINDEX(hcp->indx));
+
+ beyond_eor = dbt->doff + dbt->dlen > len;
+ if (beyond_eor)
+ change += dbt->doff + dbt->dlen - len;
+
+ if (change > (int32_t)P_FREESPACE(dbp, hcp->page) ||
+ beyond_eor || is_big) {
+ /*
+ * Case 3 -- two subcases.
+ * A. This is not really a partial operation, but an overwrite.
+ * Simple del and add works.
+ * B. This is a partial and we need to construct the data that
+ * we are really inserting (yuck).
+ * In both cases, we need to grab the key off the page (in
+ * some cases we could do this outside of this routine; for
+ * cleanliness we do it here. If you happen to be on a big
+ * key, this could be a performance hit).
+ */
+ memset(&tmp, 0, sizeof(tmp));
+ if ((ret =
+ __db_ret(dbp, hcp->page, H_KEYINDEX(hcp->indx),
+ &tmp, &dbc->rkey->data, &dbc->rkey->ulen)) != 0)
+ return (ret);
+
+ /* Preserve duplicate info. */
+ dup_flag = F_ISSET(hcp, H_ISDUP);
+ if (dbt->doff == 0 && dbt->dlen == len) {
+ ret = __ham_del_pair(dbc, 0);
+ if (ret == 0)
+ ret = __ham_add_el(dbc,
+ &tmp, dbt, dup_flag ? H_DUPLICATE : H_KEYDATA);
+ } else { /* Case B */
+ type = HPAGE_PTYPE(hk) != H_OFFPAGE ?
+ HPAGE_PTYPE(hk) : H_KEYDATA;
+ memset(&tdata, 0, sizeof(tdata));
+ memp = NULL;
+ memsize = 0;
+ if ((ret = __db_ret(dbp, hcp->page,
+ H_DATAINDEX(hcp->indx), &tdata, &memp, &memsize))
+ != 0)
+ goto err;
+
+ /* Now we can delete the item. */
+ if ((ret = __ham_del_pair(dbc, 0)) != 0) {
+ __os_free(dbenv, memp);
+ goto err;
+ }
+
+ /* Now shift old data around to make room for new. */
+ if (change > 0) {
+ if ((ret = __os_realloc(dbenv,
+ tdata.size + change, &tdata.data)) != 0)
+ return (ret);
+ memp = tdata.data;
+ memsize = tdata.size + change;
+ memset((u_int8_t *)tdata.data + tdata.size,
+ 0, change);
+ }
+ end = (u_int8_t *)tdata.data + tdata.size;
+
+ src = (u_int8_t *)tdata.data + dbt->doff + dbt->dlen;
+ if (src < end && tdata.size > dbt->doff + dbt->dlen) {
+ len = tdata.size - dbt->doff - dbt->dlen;
+ dest = src + change;
+ memmove(dest, src, len);
+ }
+ memcpy((u_int8_t *)tdata.data + dbt->doff,
+ dbt->data, dbt->size);
+ tdata.size += change;
+
+ /* Now add the pair. */
+ ret = __ham_add_el(dbc, &tmp, &tdata, type);
+ __os_free(dbenv, memp);
+ }
+ F_SET(hcp, dup_flag);
+err: return (ret);
+ }
+
+ /*
+ * Set up pointer into existing data. Do it before the log
+ * message so we can use it inside of the log setup.
+ */
+ beg = HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx));
+ beg += dbt->doff;
+
+ /*
+ * If we are going to have to move bytes at all, figure out
+ * all the parameters here. Then log the call before moving
+ * anything around.
+ */
+ if (DBC_LOGGING(dbc)) {
+ old_dbt.data = beg;
+ old_dbt.size = dbt->dlen;
+ if ((ret = __ham_replace_log(dbp,
+ dbc->txn, &new_lsn, 0, PGNO(hcp->page),
+ (u_int32_t)H_DATAINDEX(hcp->indx), &LSN(hcp->page),
+ (u_int32_t)dbt->doff, &old_dbt, dbt, make_dup)) != 0)
+ return (ret);
+
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ LSN(hcp->page) = new_lsn; /* Structure assignment. */
+
+ __ham_onpage_replace(dbp, hcp->page, (u_int32_t)H_DATAINDEX(hcp->indx),
+ (int32_t)dbt->doff, change, dbt);
+
+ return (0);
+}
+
+/*
+ * Replace data on a page with new data, possibly growing or shrinking what's
+ * there. This is called on two different occasions. On one (from replpair)
+ * we are interested in changing only the data. On the other (from recovery)
+ * we are replacing the entire data (header and all) with a new element. In
+ * the latter case, the off argument is negative.
+ * pagep: the page that we're changing
+ * ndx: page index of the element that is growing/shrinking.
+ * off: Offset at which we are beginning the replacement.
+ * change: the number of bytes (+ or -) that the element is growing/shrinking.
+ * dbt: the new data that gets written at beg.
+ *
+ * PUBLIC: void __ham_onpage_replace __P((DB *, PAGE *, u_int32_t,
+ * PUBLIC: int32_t, int32_t, DBT *));
+ */
+void
+__ham_onpage_replace(dbp, pagep, ndx, off, change, dbt)
+ DB *dbp;
+ PAGE *pagep;
+ u_int32_t ndx;
+ int32_t off;
+ int32_t change;
+ DBT *dbt;
+{
+ db_indx_t i, *inp;
+ int32_t len;
+ size_t pgsize;
+ u_int8_t *src, *dest;
+ int zero_me;
+
+ pgsize = dbp->pgsize;
+ inp = P_INP(dbp, pagep);
+ if (change != 0) {
+ zero_me = 0;
+ src = (u_int8_t *)(pagep) + HOFFSET(pagep);
+ if (off < 0)
+ len = inp[ndx] - HOFFSET(pagep);
+ else if ((u_int32_t)off >=
+ LEN_HKEYDATA(dbp, pagep, pgsize, ndx)) {
+ len = (int32_t)(HKEYDATA_DATA(P_ENTRY(dbp, pagep, ndx))
+ + LEN_HKEYDATA(dbp, pagep, pgsize, ndx) - src);
+ zero_me = 1;
+ } else
+ len = (int32_t)(
+ (HKEYDATA_DATA(P_ENTRY(dbp, pagep, ndx)) + off) -
+ src);
+ dest = src - change;
+ memmove(dest, src, len);
+ if (zero_me)
+ memset(dest + len, 0, change);
+
+ /* Now update the indices. */
+ for (i = ndx; i < NUM_ENT(pagep); i++)
+ inp[i] -= change;
+ HOFFSET(pagep) -= change;
+ }
+ if (off >= 0)
+ memcpy(HKEYDATA_DATA(P_ENTRY(dbp, pagep, ndx)) + off,
+ dbt->data, dbt->size);
+ else
+ memcpy(P_ENTRY(dbp, pagep, ndx), dbt->data, dbt->size);
+}
+
+/*
+ * PUBLIC: int __ham_split_page __P((DBC *, u_int32_t, u_int32_t));
+ */
+int
+__ham_split_page(dbc, obucket, nbucket)
+ DBC *dbc;
+ u_int32_t obucket, nbucket;
+{
+ DB *dbp;
+ DBC **carray;
+ DBT key, page_dbt;
+ DB_ENV *dbenv;
+ DB_LOCK block;
+ DB_LSN new_lsn;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp, *cp;
+ PAGE **pp, *old_pagep, *temp_pagep, *new_pagep;
+ db_indx_t n;
+ db_pgno_t bucket_pgno, npgno, next_pgno;
+ u_int32_t big_len, len;
+ int found, i, ret, t_ret;
+ void *big_buf;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ temp_pagep = old_pagep = new_pagep = NULL;
+ carray = NULL;
+ LOCK_INIT(block);
+
+ bucket_pgno = BUCKET_TO_PAGE(hcp, obucket);
+ if ((ret = __db_lget(dbc,
+ 0, bucket_pgno, DB_LOCK_WRITE, 0, &block)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf,
+ &bucket_pgno, DB_MPOOL_CREATE, &old_pagep)) != 0)
+ goto err;
+
+ /* Properly initialize the new bucket page. */
+ npgno = BUCKET_TO_PAGE(hcp, nbucket);
+ if ((ret = mpf->get(mpf, &npgno, DB_MPOOL_CREATE, &new_pagep)) != 0)
+ goto err;
+ P_INIT(new_pagep,
+ dbp->pgsize, npgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+
+ temp_pagep = hcp->split_buf;
+ memcpy(temp_pagep, old_pagep, dbp->pgsize);
+
+ if (DBC_LOGGING(dbc)) {
+ page_dbt.size = dbp->pgsize;
+ page_dbt.data = old_pagep;
+ if ((ret = __ham_splitdata_log(dbp,
+ dbc->txn, &new_lsn, 0, SPLITOLD,
+ PGNO(old_pagep), &page_dbt, &LSN(old_pagep))) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ LSN(old_pagep) = new_lsn; /* Structure assignment. */
+
+ P_INIT(old_pagep, dbp->pgsize, PGNO(old_pagep), PGNO_INVALID,
+ PGNO_INVALID, 0, P_HASH);
+
+ big_len = 0;
+ big_buf = NULL;
+ key.flags = 0;
+ while (temp_pagep != NULL) {
+ if ((ret = __ham_get_clist(dbp,
+ PGNO(temp_pagep), NDX_INVALID, &carray)) != 0)
+ goto err;
+
+ for (n = 0; n < (db_indx_t)NUM_ENT(temp_pagep); n += 2) {
+ if ((ret = __db_ret(dbp, temp_pagep,
+ H_KEYINDEX(n), &key, &big_buf, &big_len)) != 0)
+ goto err;
+
+ if (__ham_call_hash(dbc, key.data, key.size) == obucket)
+ pp = &old_pagep;
+ else
+ pp = &new_pagep;
+
+ /*
+ * Figure out how many bytes we need on the new
+ * page to store the key/data pair.
+ */
+ len = LEN_HITEM(dbp, temp_pagep, dbp->pgsize,
+ H_DATAINDEX(n)) +
+ LEN_HITEM(dbp, temp_pagep, dbp->pgsize,
+ H_KEYINDEX(n)) +
+ 2 * sizeof(db_indx_t);
+
+ if (P_FREESPACE(dbp, *pp) < len) {
+ if (DBC_LOGGING(dbc)) {
+ page_dbt.size = dbp->pgsize;
+ page_dbt.data = *pp;
+ if ((ret = __ham_splitdata_log(dbp,
+ dbc->txn, &new_lsn, 0,
+ SPLITNEW, PGNO(*pp), &page_dbt,
+ &LSN(*pp))) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+ LSN(*pp) = new_lsn;
+ if ((ret =
+ __ham_add_ovflpage(dbc, *pp, 1, pp)) != 0)
+ goto err;
+ }
+
+ /* Check if we need to update a cursor. */
+ if (carray != NULL) {
+ found = 0;
+ for (i = 0; carray[i] != NULL; i++) {
+ cp =
+ (HASH_CURSOR *)carray[i]->internal;
+ if (cp->pgno == PGNO(temp_pagep) &&
+ cp->indx == n) {
+ cp->pgno = PGNO(*pp);
+ cp->indx = NUM_ENT(*pp);
+ found = 1;
+ }
+ }
+ if (found && DBC_LOGGING(dbc) &&
+ IS_SUBTRANSACTION(dbc->txn)) {
+ if ((ret =
+ __ham_chgpg_log(dbp,
+ dbc->txn, &new_lsn, 0,
+ DB_HAM_SPLIT, PGNO(temp_pagep),
+ PGNO(*pp), n, NUM_ENT(*pp))) != 0)
+ goto err;
+ }
+ }
+ __ham_copy_item(dbp, temp_pagep, H_KEYINDEX(n), *pp);
+ __ham_copy_item(dbp, temp_pagep, H_DATAINDEX(n), *pp);
+ }
+ next_pgno = NEXT_PGNO(temp_pagep);
+
+ /* Clear temp_page; if it's a link overflow page, free it. */
+ if (PGNO(temp_pagep) != bucket_pgno && (ret =
+ __db_free(dbc, temp_pagep)) != 0) {
+ temp_pagep = NULL;
+ goto err;
+ }
+
+ if (next_pgno == PGNO_INVALID)
+ temp_pagep = NULL;
+ else if ((ret = mpf->get(
+ mpf, &next_pgno, DB_MPOOL_CREATE, &temp_pagep)) != 0)
+ goto err;
+
+ if (temp_pagep != NULL) {
+ if (DBC_LOGGING(dbc)) {
+ page_dbt.size = dbp->pgsize;
+ page_dbt.data = temp_pagep;
+ if ((ret = __ham_splitdata_log(dbp,
+ dbc->txn, &new_lsn, 0,
+ SPLITOLD, PGNO(temp_pagep),
+ &page_dbt, &LSN(temp_pagep))) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+ LSN(temp_pagep) = new_lsn;
+ }
+
+ if (carray != NULL) /* We never knew its size. */
+ __os_free(dbenv, carray);
+ carray = NULL;
+ }
+ if (big_buf != NULL)
+ __os_free(dbenv, big_buf);
+
+ /*
+ * If the original bucket spanned multiple pages, then we've got
+ * a pointer to a page that used to be on the bucket chain. It
+ * should be deleted.
+ */
+ if (temp_pagep != NULL && PGNO(temp_pagep) != bucket_pgno &&
+ (ret = __db_free(dbc, temp_pagep)) != 0) {
+ temp_pagep = NULL;
+ goto err;
+ }
+
+ /*
+ * Write new buckets out.
+ */
+ if (DBC_LOGGING(dbc)) {
+ page_dbt.size = dbp->pgsize;
+ page_dbt.data = old_pagep;
+ if ((ret = __ham_splitdata_log(dbp, dbc->txn,
+ &new_lsn, 0, SPLITNEW, PGNO(old_pagep), &page_dbt,
+ &LSN(old_pagep))) != 0)
+ goto err;
+ LSN(old_pagep) = new_lsn;
+
+ page_dbt.data = new_pagep;
+ if ((ret = __ham_splitdata_log(dbp, dbc->txn, &new_lsn, 0,
+ SPLITNEW, PGNO(new_pagep), &page_dbt,
+ &LSN(new_pagep))) != 0)
+ goto err;
+ LSN(new_pagep) = new_lsn;
+ } else {
+ LSN_NOT_LOGGED(LSN(old_pagep));
+ LSN_NOT_LOGGED(LSN(new_pagep));
+ }
+
+ ret = mpf->put(mpf, old_pagep, DB_MPOOL_DIRTY);
+ if ((t_ret =
+ mpf->put(mpf, new_pagep, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (0) {
+err: if (old_pagep != NULL)
+ (void)mpf->put(mpf, old_pagep, DB_MPOOL_DIRTY);
+ if (new_pagep != NULL)
+ (void)mpf->put(mpf, new_pagep, DB_MPOOL_DIRTY);
+ if (temp_pagep != NULL && PGNO(temp_pagep) != bucket_pgno)
+ (void)mpf->put(mpf, temp_pagep, DB_MPOOL_DIRTY);
+ }
+ if (LOCK_ISSET(block))
+ __TLPUT(dbc, block);
+ if (carray != NULL) /* We never knew its size. */
+ __os_free(dbenv, carray);
+ return (ret);
+}
+
+/*
+ * Add the given pair to the page. The page in question may already be
+ * held (i.e. it was already gotten). If it is, then the page is passed
+ * in via the pagep parameter. On return, pagep will contain the page
+ * to which we just added something. This allows us to link overflow
+ * pages and return the new page having correctly put the last page.
+ *
+ * PUBLIC: int __ham_add_el __P((DBC *, const DBT *, const DBT *, int));
+ */
+int
+__ham_add_el(dbc, key, val, type)
+ DBC *dbc;
+ const DBT *key, *val;
+ int type;
+{
+ const DBT *pkey, *pdata;
+ DB *dbp;
+ DBT key_dbt, data_dbt;
+ DB_LSN new_lsn;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ HOFFPAGE doff, koff;
+ db_pgno_t next_pgno, pgno;
+ u_int32_t data_size, key_size, pairsize, rectype;
+ int do_expand, is_keybig, is_databig, ret;
+ int key_type, data_type;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ do_expand = 0;
+
+ pgno = hcp->seek_found_page != PGNO_INVALID ?
+ hcp->seek_found_page : hcp->pgno;
+ if (hcp->page == NULL &&
+ (ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &hcp->page)) != 0)
+ return (ret);
+
+ key_size = HKEYDATA_PSIZE(key->size);
+ data_size = HKEYDATA_PSIZE(val->size);
+ is_keybig = ISBIG(hcp, key->size);
+ is_databig = ISBIG(hcp, val->size);
+ if (is_keybig)
+ key_size = HOFFPAGE_PSIZE;
+ if (is_databig)
+ data_size = HOFFPAGE_PSIZE;
+
+ pairsize = key_size + data_size;
+
+ /* Advance to first page in chain with room for item. */
+ while (H_NUMPAIRS(hcp->page) && NEXT_PGNO(hcp->page) != PGNO_INVALID) {
+ /*
+ * This may not be the end of the chain, but the pair may fit
+ * anyway. Check if it's a bigpair that fits or a regular
+ * pair that fits.
+ */
+ if (P_FREESPACE(dbp, hcp->page) >= pairsize)
+ break;
+ next_pgno = NEXT_PGNO(hcp->page);
+ if ((ret = __ham_next_cpage(dbc, next_pgno, 0)) != 0)
+ return (ret);
+ }
+
+ /*
+ * Check if we need to allocate a new page.
+ */
+ if (P_FREESPACE(dbp, hcp->page) < pairsize) {
+ do_expand = 1;
+ if ((ret = __ham_add_ovflpage(dbc,
+ (PAGE *)hcp->page, 1, (PAGE **)&hcp->page)) != 0)
+ return (ret);
+ hcp->pgno = PGNO(hcp->page);
+ }
+
+ /*
+ * Update cursor.
+ */
+ hcp->indx = NUM_ENT(hcp->page);
+ F_CLR(hcp, H_DELETED);
+ if (is_keybig) {
+ koff.type = H_OFFPAGE;
+ UMRW_SET(koff.unused[0]);
+ UMRW_SET(koff.unused[1]);
+ UMRW_SET(koff.unused[2]);
+ if ((ret = __db_poff(dbc, key, &koff.pgno)) != 0)
+ return (ret);
+ koff.tlen = key->size;
+ key_dbt.data = &koff;
+ key_dbt.size = sizeof(koff);
+ pkey = &key_dbt;
+ key_type = H_OFFPAGE;
+ } else {
+ pkey = key;
+ key_type = H_KEYDATA;
+ }
+
+ if (is_databig) {
+ doff.type = H_OFFPAGE;
+ UMRW_SET(doff.unused[0]);
+ UMRW_SET(doff.unused[1]);
+ UMRW_SET(doff.unused[2]);
+ if ((ret = __db_poff(dbc, val, &doff.pgno)) != 0)
+ return (ret);
+ doff.tlen = val->size;
+ data_dbt.data = &doff;
+ data_dbt.size = sizeof(doff);
+ pdata = &data_dbt;
+ data_type = H_OFFPAGE;
+ } else {
+ pdata = val;
+ data_type = type;
+ }
+
+ if (DBC_LOGGING(dbc)) {
+ rectype = PUTPAIR;
+ if (is_databig)
+ rectype |= PAIR_DATAMASK;
+ if (is_keybig)
+ rectype |= PAIR_KEYMASK;
+ if (type == H_DUPLICATE)
+ rectype |= PAIR_DUPMASK;
+
+ if ((ret = __ham_insdel_log(dbp, dbc->txn, &new_lsn, 0,
+ rectype, PGNO(hcp->page), (u_int32_t)NUM_ENT(hcp->page),
+ &LSN(hcp->page), pkey, pdata)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ /* Move lsn onto page. */
+ LSN(hcp->page) = new_lsn; /* Structure assignment. */
+
+ __ham_putitem(dbp, hcp->page, pkey, key_type);
+ __ham_putitem(dbp, hcp->page, pdata, data_type);
+
+ /*
+ * For splits, we are going to update item_info's page number
+ * field, so that we can easily return to the same page the
+ * next time we come in here. For other operations, this shouldn't
+ * matter, since odds are this is the last thing that happens before
+ * we return to the user program.
+ */
+ hcp->pgno = PGNO(hcp->page);
+
+ /*
+ * XXX
+ * Maybe keep incremental numbers here.
+ */
+ if (!STD_LOCKING(dbc)) {
+ hcp->hdr->nelem++;
+ if ((ret = __ham_dirty_meta(dbc)) != 0)
+ return (ret);
+ }
+
+ if (do_expand || (hcp->hdr->ffactor != 0 &&
+ (u_int32_t)H_NUMPAIRS(hcp->page) > hcp->hdr->ffactor))
+ F_SET(hcp, H_EXPAND);
+ return (0);
+}
+
+/*
+ * Special __putitem call used in splitting -- copies one entry to
+ * another. Works for all types of hash entries (H_OFFPAGE, H_KEYDATA,
+ * H_DUPLICATE, H_OFFDUP). Since we log splits at a high level, we
+ * do not need to do any logging here.
+ *
+ * PUBLIC: void __ham_copy_item __P((DB *, PAGE *, u_int32_t, PAGE *));
+ */
+void
+__ham_copy_item(dbp, src_page, src_ndx, dest_page)
+ DB *dbp;
+ PAGE *src_page;
+ u_int32_t src_ndx;
+ PAGE *dest_page;
+{
+ u_int32_t len;
+ size_t pgsize;
+ void *src, *dest;
+ db_indx_t *inp;
+
+ pgsize = dbp->pgsize;
+ inp = P_INP(dbp, dest_page);
+ /*
+ * Copy the key and data entries onto this new page.
+ */
+ src = P_ENTRY(dbp, src_page, src_ndx);
+
+ /* Set up space on dest. */
+ len = (u_int32_t)LEN_HITEM(dbp, src_page, pgsize, src_ndx);
+ HOFFSET(dest_page) -= len;
+ inp[NUM_ENT(dest_page)] = HOFFSET(dest_page);
+ dest = P_ENTRY(dbp, dest_page, NUM_ENT(dest_page));
+ NUM_ENT(dest_page)++;
+
+ memcpy(dest, src, len);
+}
+
+/*
+ *
+ * Returns:
+ * pointer on success
+ * NULL on error
+ *
+ * PUBLIC: int __ham_add_ovflpage __P((DBC *, PAGE *, int, PAGE **));
+ */
+int
+__ham_add_ovflpage(dbc, pagep, release, pp)
+ DBC *dbc;
+ PAGE *pagep;
+ int release;
+ PAGE **pp;
+{
+ DB *dbp;
+ DB_LSN new_lsn;
+ DB_MPOOLFILE *mpf;
+ PAGE *new_pagep;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+
+ if ((ret = __db_new(dbc, P_HASH, &new_pagep)) != 0)
+ return (ret);
+
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __ham_newpage_log(dbp, dbc->txn, &new_lsn, 0,
+ PUTOVFL, PGNO(pagep), &LSN(pagep),
+ PGNO(new_pagep), &LSN(new_pagep), PGNO_INVALID, NULL)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ /* Move lsn onto page. */
+ LSN(pagep) = LSN(new_pagep) = new_lsn;
+ NEXT_PGNO(pagep) = PGNO(new_pagep);
+
+ PREV_PGNO(new_pagep) = PGNO(pagep);
+
+ if (release)
+ ret = mpf->put(mpf, pagep, DB_MPOOL_DIRTY);
+
+ *pp = new_pagep;
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __ham_get_cpage __P((DBC *, db_lockmode_t));
+ */
+int
+__ham_get_cpage(dbc, mode)
+ DBC *dbc;
+ db_lockmode_t mode;
+{
+ DB *dbp;
+ DB_LOCK tmp_lock;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ ret = 0;
+
+ /*
+ * There are four cases with respect to buckets and locks.
+ * 1. If there is no lock held, then if we are locking, we should
+ * get the lock.
+ * 2. If there is a lock held, it's for the current bucket, and it's
+ * for the right mode, we don't need to do anything.
+ * 3. If there is a lock held for the current bucket but it's not
+ * strong enough, we need to upgrade.
+ * 4. If there is a lock, but it's for a different bucket, then we need
+ * to release the existing lock and get a new lock.
+ */
+ LOCK_INIT(tmp_lock);
+ if (STD_LOCKING(dbc)) {
+ if (hcp->lbucket != hcp->bucket && /* Case 4 */
+ (ret = __TLPUT(dbc, hcp->lock)) != 0)
+ return (ret);
+
+ if ((LOCK_ISSET(hcp->lock) &&
+ (hcp->lock_mode == DB_LOCK_READ &&
+ mode == DB_LOCK_WRITE))) {
+ /* Case 3. */
+ tmp_lock = hcp->lock;
+ LOCK_INIT(hcp->lock);
+ }
+
+ /* Acquire the lock. */
+ if (!LOCK_ISSET(hcp->lock))
+ /* Cases 1, 3, and 4. */
+ if ((ret = __ham_lock_bucket(dbc, mode)) != 0)
+ return (ret);
+
+ if (ret == 0) {
+ hcp->lock_mode = mode;
+ hcp->lbucket = hcp->bucket;
+ if (LOCK_ISSET(tmp_lock))
+ /* Case 3: release the original lock. */
+ ret =
+ dbp->dbenv->lock_put(dbp->dbenv, &tmp_lock);
+ } else if (LOCK_ISSET(tmp_lock))
+ hcp->lock = tmp_lock;
+ }
+
+ if (ret == 0 && hcp->page == NULL) {
+ if (hcp->pgno == PGNO_INVALID)
+ hcp->pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+ if ((ret = mpf->get(mpf,
+ &hcp->pgno, DB_MPOOL_CREATE, &hcp->page)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * Get a new page at the cursor, putting the last page if necessary.
+ * If the flag is set to H_ISDUP, then we are talking about the
+ * duplicate page, not the main page.
+ *
+ * PUBLIC: int __ham_next_cpage __P((DBC *, db_pgno_t, int));
+ */
+int
+__ham_next_cpage(dbc, pgno, dirty)
+ DBC *dbc;
+ db_pgno_t pgno;
+ int dirty;
+{
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ PAGE *p;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if (hcp->page != NULL &&
+ (ret = mpf->put(mpf, hcp->page, dirty ? DB_MPOOL_DIRTY : 0)) != 0)
+ return (ret);
+ hcp->page = NULL;
+
+ if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &p)) != 0)
+ return (ret);
+
+ hcp->page = p;
+ hcp->pgno = pgno;
+ hcp->indx = 0;
+
+ return (0);
+}
+
+/*
+ * __ham_lock_bucket --
+ * Get the lock on a particular bucket.
+ *
+ * PUBLIC: int __ham_lock_bucket __P((DBC *, db_lockmode_t));
+ */
+int
+__ham_lock_bucket(dbc, mode)
+ DBC *dbc;
+ db_lockmode_t mode;
+{
+ HASH_CURSOR *hcp;
+ db_pgno_t pgno;
+ int gotmeta, ret;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ gotmeta = hcp->hdr == NULL ? 1 : 0;
+ if (gotmeta)
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ return (ret);
+ pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+ if (gotmeta)
+ if ((ret = __ham_release_meta(dbc)) != 0)
+ return (ret);
+
+ ret = __db_lget(dbc, 0, pgno, mode, 0, &hcp->lock);
+
+ hcp->lock_mode = mode;
+ return (ret);
+}
+
+/*
+ * __ham_dpair --
+ * Delete a pair on a page, paying no attention to what the pair
+ * represents. The caller is responsible for freeing up duplicates
+ * or offpage entries that might be referenced by this pair.
+ *
+ * Recovery assumes that this may be called without the metadata
+ * page pinned.
+ *
+ * PUBLIC: void __ham_dpair __P((DB *, PAGE *, u_int32_t));
+ */
+void
+__ham_dpair(dbp, p, indx)
+ DB *dbp;
+ PAGE *p;
+ u_int32_t indx;
+{
+ db_indx_t delta, n, *inp;
+ u_int8_t *dest, *src;
+
+ inp = P_INP(dbp, p);
+ /*
+ * Compute "delta", the amount we have to shift all of the
+ * offsets. To find the delta, we just need to calculate
+ * the size of the pair of elements we are removing.
+ */
+ delta = H_PAIRSIZE(dbp, p, dbp->pgsize, indx);
+
+ /*
+ * The hard case: we want to remove something other than
+ * the last item on the page. We need to shift data and
+ * offsets down.
+ */
+ if ((db_indx_t)indx != NUM_ENT(p) - 2) {
+ /*
+ * Move the data: src is the first occupied byte on
+ * the page. (Length is delta.)
+ */
+ src = (u_int8_t *)p + HOFFSET(p);
+
+ /*
+ * Destination is delta bytes beyond src. This might
+ * be an overlapping copy, so we have to use memmove.
+ */
+ dest = src + delta;
+ memmove(dest, src, inp[H_DATAINDEX(indx)] - HOFFSET(p));
+ }
+
+ /* Adjust page metadata. */
+ HOFFSET(p) = HOFFSET(p) + delta;
+ NUM_ENT(p) = NUM_ENT(p) - 2;
+
+ /* Adjust the offsets. */
+ for (n = (db_indx_t)indx; n < (db_indx_t)(NUM_ENT(p)); n++)
+ inp[n] = inp[n + 2] + delta;
+
+}
+
+/*
+ * __ham_c_delpg --
+ *
+ * Adjust the cursors after we've emptied a page in a bucket, taking
+ * care that when we move cursors pointing to deleted items, their
+ * orders don't collide with the orders of cursors on the page we move
+ * them to (since after this function is called, cursors with the same
+ * index on the two pages will be otherwise indistinguishable--they'll
+ * all have pgno new_pgno). There are three cases:
+ *
+ * 1) The emptied page is the first page in the bucket. In this
+ * case, we've copied all the items from the second page into the
+ * first page, so the first page is new_pgno and the second page is
+ * old_pgno. new_pgno is empty, but can have deleted cursors
+ * pointing at indx 0, so we need to be careful of the orders
+ * there. This is DB_HAM_DELFIRSTPG.
+ *
+ * 2) The page is somewhere in the middle of a bucket. Our caller
+ * can just delete such a page, so it's old_pgno. old_pgno is
+ * empty, but may have deleted cursors pointing at indx 0, so we
+ * need to be careful of indx 0 when we move those cursors to
+ * new_pgno. This is DB_HAM_DELMIDPG.
+ *
+ * 3) The page is the last in a bucket. Again the empty page is
+ * old_pgno, and again it should only have cursors that are deleted
+ * and at indx == 0. This time, though, there's no next page to
+ * move them to, so we set them to indx == num_ent on the previous
+ * page--and indx == num_ent is the index whose cursors we need to
+ * be careful of. This is DB_HAM_DELLASTPG.
+ */
+static int
+__ham_c_delpg(dbc, old_pgno, new_pgno, num_ent, op, orderp)
+ DBC *dbc;
+ db_pgno_t old_pgno, new_pgno;
+ u_int32_t num_ent;
+ db_ham_mode op;
+ u_int32_t *orderp;
+{
+ DB *dbp, *ldbp;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ DBC *cp;
+ HASH_CURSOR *hcp;
+ int found, ret;
+ db_indx_t indx;
+ u_int32_t order;
+
+ /* Which is the worrisome index? */
+ indx = (op == DB_HAM_DELLASTPG) ? num_ent : 0;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+
+ my_txn = IS_SUBTRANSACTION(dbc->txn) ? dbc->txn : NULL;
+ found = 0;
+
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ /*
+ * Find the highest order of any cursor our movement
+ * may collide with.
+ */
+ order = 1;
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links)) {
+ if (cp == dbc || cp->dbtype != DB_HASH)
+ continue;
+ hcp = (HASH_CURSOR *)cp->internal;
+ if (hcp->pgno == new_pgno) {
+ if (hcp->indx == indx &&
+ F_ISSET(hcp, H_DELETED) &&
+ hcp->order >= order)
+ order = hcp->order + 1;
+ DB_ASSERT(op != DB_HAM_DELFIRSTPG ||
+ hcp->indx == NDX_INVALID ||
+ (hcp->indx == 0 &&
+ F_ISSET(hcp, H_DELETED)));
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links)) {
+ if (cp == dbc || cp->dbtype != DB_HASH)
+ continue;
+
+ hcp = (HASH_CURSOR *)cp->internal;
+
+ if (hcp->pgno == old_pgno) {
+ switch (op) {
+ case DB_HAM_DELFIRSTPG:
+ /*
+ * We're moving all items,
+ * regardless of index.
+ */
+ hcp->pgno = new_pgno;
+
+ /*
+ * But we have to be careful of
+ * the order values.
+ */
+ if (hcp->indx == indx)
+ hcp->order += order;
+ break;
+ case DB_HAM_DELMIDPG:
+ hcp->pgno = new_pgno;
+ DB_ASSERT(hcp->indx == 0 &&
+ F_ISSET(hcp, H_DELETED));
+ hcp->order += order;
+ break;
+ case DB_HAM_DELLASTPG:
+ hcp->pgno = new_pgno;
+ DB_ASSERT(hcp->indx == 0 &&
+ F_ISSET(hcp, H_DELETED));
+ hcp->indx = indx;
+ hcp->order += order;
+ break;
+ default:
+ DB_ASSERT(0);
+ return (__db_panic(dbenv, EINVAL));
+ }
+ if (my_txn != NULL && cp->txn != my_txn)
+ found = 1;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DBC_LOGGING(dbc)) {
+ if ((ret = __ham_chgpg_log(dbp, my_txn, &lsn, 0, op,
+ old_pgno, new_pgno, indx, order)) != 0)
+ return (ret);
+ }
+ *orderp = order;
+ return (0);
+}
diff --git a/libdb/hash/hash_rec.c b/libdb/hash/hash_rec.c
new file mode 100644
index 0000000..5cddc95
--- /dev/null
+++ b/libdb/hash/hash_rec.c
@@ -0,0 +1,1156 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
+
+static int __ham_alloc_pages __P((DB *, __ham_groupalloc_args *, DB_LSN *));
+
+/*
+ * __ham_insdel_recover --
+ *
+ * PUBLIC: int __ham_insdel_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_insdel_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_insdel_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int32_t flags, opcode;
+ int cmp_n, cmp_p, ret, type;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+
+ REC_PRINT(__ham_insdel_print);
+ REC_INTRO(__ham_insdel_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ goto done;
+ } else if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+ /*
+ * Two possible things going on:
+ * redo a delete/undo a put: delete the item from the page.
+ * redo a put/undo a delete: add the item to the page.
+ * If we are undoing a delete, then the information logged is the
+ * entire entry off the page, not just the data of a dbt. In
+ * this case, we want to copy it back onto the page verbatim.
+ * We do this by calling __putitem with the type H_OFFPAGE instead
+ * of H_KEYDATA.
+ */
+ opcode = OPCODE_OF(argp->opcode);
+
+ flags = 0;
+ if ((opcode == DELPAIR && cmp_n == 0 && DB_UNDO(op)) ||
+ (opcode == PUTPAIR && cmp_p == 0 && DB_REDO(op))) {
+ /*
+ * Need to redo a PUT or undo a delete. If we are undoing a
+ * delete, we've got to restore the item back to its original
+ * position. That's a royal pain in the butt (because we do
+ * not store item lengths on the page), but there's no choice.
+ */
+ if (opcode != DELPAIR ||
+ argp->ndx == (u_int32_t)NUM_ENT(pagep)) {
+ __ham_putitem(file_dbp, pagep, &argp->key,
+ DB_UNDO(op) || PAIR_ISKEYBIG(argp->opcode) ?
+ H_OFFPAGE : H_KEYDATA);
+
+ if (PAIR_ISDATADUP(argp->opcode))
+ type = H_DUPLICATE;
+ else if (DB_UNDO(op) || PAIR_ISDATABIG(argp->opcode))
+ type = H_OFFPAGE;
+ else
+ type = H_KEYDATA;
+ __ham_putitem(file_dbp, pagep, &argp->data, type);
+ } else
+ (void)__ham_reputpair(file_dbp, pagep,
+ argp->ndx, &argp->key, &argp->data);
+
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->pagelsn;
+ flags = DB_MPOOL_DIRTY;
+
+ } else if ((opcode == DELPAIR && cmp_p == 0 && DB_REDO(op)) ||
+ (opcode == PUTPAIR && cmp_n == 0 && DB_UNDO(op))) {
+ /* Need to undo a put or redo a delete. */
+ __ham_dpair(file_dbp, pagep, argp->ndx);
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->pagelsn;
+ flags = DB_MPOOL_DIRTY;
+ }
+
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
+ goto out;
+ pagep = NULL;
+
+ /* Return the previous LSN. */
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __ham_newpage_recover --
+ * This log message is used when we add/remove overflow pages. This
+ * message takes care of the pointer chains, not the data on the pages.
+ *
+ * PUBLIC: int __ham_newpage_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_newpage_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_newpage_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int32_t flags;
+ int cmp_n, cmp_p, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+
+ REC_PRINT(__ham_newpage_print);
+ REC_INTRO(__ham_newpage_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->new_pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ ret = 0;
+ goto ppage;
+ } else if ((ret = mpf->get(mpf,
+ &argp->new_pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ /*
+ * There are potentially three pages we need to check: the one
+ * that we created/deleted, the one before it and the one after
+ * it.
+ */
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+
+ flags = 0;
+ if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == PUTOVFL) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == DELOVFL)) {
+ /* Redo a create new page or undo a delete new page. */
+ P_INIT(pagep, file_dbp->pgsize, argp->new_pgno,
+ argp->prev_pgno, argp->next_pgno, 0, P_HASH);
+ flags = DB_MPOOL_DIRTY;
+ } else if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == DELOVFL) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == PUTOVFL)) {
+ /*
+ * Redo a delete or undo a create new page. All we
+ * really need to do is change the LSN.
+ */
+ flags = DB_MPOOL_DIRTY;
+ }
+
+ if (flags)
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->pagelsn;
+
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
+ goto out;
+ pagep = NULL;
+
+ /* Now do the prev page. */
+ppage: if (argp->prev_pgno != PGNO_INVALID) {
+ if ((ret = mpf->get(mpf, &argp->prev_pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist.
+ * That is equivalent to having a pagelsn of 0,
+ * so we would not have to undo anything. In
+ * this case, don't bother creating a page.
+ */
+ ret = 0;
+ goto npage;
+ } else if ((ret = mpf->get(mpf,
+ &argp->prev_pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->prevlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->prevlsn);
+ flags = 0;
+
+ if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == PUTOVFL) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == DELOVFL)) {
+ /* Redo a create new page or undo a delete new page. */
+ pagep->next_pgno = argp->new_pgno;
+ flags = DB_MPOOL_DIRTY;
+ } else if ((cmp_p == 0 &&
+ DB_REDO(op) && argp->opcode == DELOVFL) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == PUTOVFL)) {
+ /* Redo a delete or undo a create new page. */
+ pagep->next_pgno = argp->next_pgno;
+ flags = DB_MPOOL_DIRTY;
+ }
+
+ if (flags)
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->prevlsn;
+
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
+ goto out;
+ pagep = NULL;
+ }
+
+ /* Now time to do the next page */
+npage: if (argp->next_pgno != PGNO_INVALID) {
+ if ((ret = mpf->get(mpf, &argp->next_pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist.
+ * That is equivalent to having a pagelsn of 0,
+ * so we would not have to undo anything. In
+ * this case, don't bother creating a page.
+ */
+ goto done;
+ } else if ((ret = mpf->get(mpf,
+ &argp->next_pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->nextlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->nextlsn);
+ flags = 0;
+
+ if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == PUTOVFL) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == DELOVFL)) {
+ /* Redo a create new page or undo a delete new page. */
+ pagep->prev_pgno = argp->new_pgno;
+ flags = DB_MPOOL_DIRTY;
+ } else if ((cmp_p == 0 &&
+ DB_REDO(op) && argp->opcode == DELOVFL) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == PUTOVFL)) {
+ /* Redo a delete or undo a create new page. */
+ pagep->prev_pgno = argp->prev_pgno;
+ flags = DB_MPOOL_DIRTY;
+ }
+
+ if (flags)
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->nextlsn;
+
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
+ goto out;
+ pagep = NULL;
+ }
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __ham_replace_recover --
+ * This log message refers to partial puts that are local to a single
+ * page. You can think of them as special cases of the more general
+ * insdel log message.
+ *
+ * PUBLIC: int __ham_replace_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_replace_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_replace_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ DBT dbt;
+ PAGE *pagep;
+ u_int32_t flags;
+ int32_t grow;
+ int cmp_n, cmp_p, ret;
+ u_int8_t *hk;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+
+ REC_PRINT(__ham_replace_print);
+ REC_INTRO(__ham_replace_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ goto done;
+ } else if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+
+ memset(&dbt, 0, sizeof(dbt));
+ flags = 0;
+ grow = 1;
+
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Reapply the change as specified. */
+ dbt.data = argp->newitem.data;
+ dbt.size = argp->newitem.size;
+ grow = argp->newitem.size - argp->olditem.size;
+ LSN(pagep) = *lsnp;
+ flags = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Undo the already applied change. */
+ dbt.data = argp->olditem.data;
+ dbt.size = argp->olditem.size;
+ grow = argp->olditem.size - argp->newitem.size;
+ LSN(pagep) = argp->pagelsn;
+ flags = DB_MPOOL_DIRTY;
+ }
+
+ if (flags) {
+ __ham_onpage_replace(file_dbp, pagep,
+ argp->ndx, argp->off, grow, &dbt);
+ if (argp->makedup) {
+ hk = P_ENTRY(file_dbp, pagep, argp->ndx);
+ if (DB_REDO(op))
+ HPAGE_PTYPE(hk) = H_DUPLICATE;
+ else
+ HPAGE_PTYPE(hk) = H_KEYDATA;
+ }
+ }
+
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __ham_splitdata_recover --
+ *
+ * PUBLIC: int __ham_splitdata_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_splitdata_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_splitdata_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int32_t flags;
+ int cmp_n, cmp_p, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+
+ REC_PRINT(__ham_splitdata_print);
+ REC_INTRO(__ham_splitdata_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ goto done;
+ } else if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+
+ /*
+ * There are two types of log messages here, one for the old page
+ * and one for the new pages created. The original image in the
+ * SPLITOLD record is used for undo. The image in the SPLITNEW
+ * is used for redo. We should never have a case where there is
+ * a redo operation and the SPLITOLD record is on disk, but not
+ * the SPLITNEW record. Therefore, we only have work to do when
+ * redo NEW messages and undo OLD messages, but we have to update
+ * LSNs in both cases.
+ */
+ flags = 0;
+ if (cmp_p == 0 && DB_REDO(op)) {
+ if (argp->opcode == SPLITNEW)
+ /* Need to redo the split described. */
+ memcpy(pagep, argp->pageimage.data,
+ argp->pageimage.size);
+ LSN(pagep) = *lsnp;
+ flags = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ if (argp->opcode == SPLITOLD) {
+ /* Put back the old image. */
+ memcpy(pagep, argp->pageimage.data,
+ argp->pageimage.size);
+ } else
+ P_INIT(pagep, file_dbp->pgsize, argp->pgno,
+ PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+ LSN(pagep) = argp->pagelsn;
+ flags = DB_MPOOL_DIRTY;
+ }
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __ham_copypage_recover --
+ * Recovery function for copypage.
+ *
+ * PUBLIC: int __ham_copypage_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_copypage_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_copypage_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int32_t flags;
+ int cmp_n, cmp_p, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+
+ REC_PRINT(__ham_copypage_print);
+ REC_INTRO(__ham_copypage_read, 1);
+
+ flags = 0;
+
+ /* This is the bucket page. */
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ ret = 0;
+ goto donext;
+ } else if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ memcpy(pagep, argp->page.data, argp->page.size);
+ PGNO(pagep) = argp->pgno;
+ PREV_PGNO(pagep) = PGNO_INVALID;
+ LSN(pagep) = *lsnp;
+ flags = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ P_INIT(pagep, file_dbp->pgsize, argp->pgno, PGNO_INVALID,
+ argp->next_pgno, 0, P_HASH);
+ LSN(pagep) = argp->pagelsn;
+ flags = DB_MPOOL_DIRTY;
+ }
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
+ goto out;
+ pagep = NULL;
+
+donext: /* Now fix up the "next" page. */
+ if ((ret = mpf->get(mpf, &argp->next_pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ ret = 0;
+ goto do_nn;
+ } else if ((ret = mpf->get(mpf,
+ &argp->next_pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ /* For REDO just update the LSN. For UNDO copy page back. */
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->nextlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->nextlsn);
+ flags = 0;
+ if (cmp_p == 0 && DB_REDO(op)) {
+ LSN(pagep) = *lsnp;
+ flags = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ memcpy(pagep, argp->page.data, argp->page.size);
+ flags = DB_MPOOL_DIRTY;
+ }
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
+ goto out;
+ pagep = NULL;
+
+ /* Now fix up the next's next page. */
+do_nn: if (argp->nnext_pgno == PGNO_INVALID)
+ goto done;
+
+ if ((ret = mpf->get(mpf, &argp->nnext_pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ goto done;
+ } else if ((ret = mpf->get(mpf,
+ &argp->nnext_pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->nnextlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->nnextlsn);
+
+ flags = 0;
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ PREV_PGNO(pagep) = argp->pgno;
+ LSN(pagep) = *lsnp;
+ flags = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ PREV_PGNO(pagep) = argp->next_pgno;
+ LSN(pagep) = argp->nnextlsn;
+ flags = DB_MPOOL_DIRTY;
+ }
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __ham_metagroup_recover --
+ * Recovery function for metagroup.
+ *
+ * PUBLIC: int __ham_metagroup_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_metagroup_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_metagroup_args *argp;
+ HASH_CURSOR *hcp;
+ DB *file_dbp;
+ DBMETA *mmeta;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ db_pgno_t pgno;
+ u_int32_t flags, mmeta_flags;
+ int cmp_n, cmp_p, did_recover, groupgrow, ret;
+
+ COMPQUIET(info, NULL);
+ mmeta_flags = 0;
+ mmeta = NULL;
+ REC_PRINT(__ham_metagroup_print);
+ REC_INTRO(__ham_metagroup_read, 1);
+
+ /*
+ * This logs the virtual create of pages pgno to pgno + bucket
+ * Since the mpool page-allocation is not really able to be
+ * transaction protected, we can never undo it. Even in an abort,
+ * we have to allocate these pages to the hash table if they
+ * were actually created. In particular, during disaster
+ * recovery the metapage may be before this point if we
+ * are rolling backward. If the file has not been extended
+ * then the metapage could not have been updated.
+ * The log record contains:
+ * bucket: new bucket being allocated.
+ * pgno: page number of the new bucket.
+ * if bucket is a power of 2, then we allocated a whole batch of
+ * pages; if it's not, then we simply allocated one new page.
+ */
+ groupgrow = (u_int32_t)(1 << __db_log2(argp->bucket + 1)) ==
+ argp->bucket + 1;
+ pgno = argp->pgno;
+ if (argp->newalloc)
+ pgno += argp->bucket;
+
+ if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+
+ flags = 0;
+ if ((cmp_p == 0 && DB_REDO(op)) || (cmp_n == 0 && DB_UNDO(op))) {
+ /*
+ * We need to make sure that we redo the allocation of the
+ * pages.
+ */
+ if (DB_REDO(op))
+ pagep->lsn = *lsnp;
+ else
+ pagep->lsn = argp->pagelsn;
+ flags = DB_MPOOL_DIRTY;
+ }
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
+ goto out;
+
+ /* Now we have to update the meta-data page. */
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto out;
+ cmp_n = log_compare(lsnp, &hcp->hdr->dbmeta.lsn);
+ cmp_p = log_compare(&hcp->hdr->dbmeta.lsn, &argp->metalsn);
+ CHECK_LSN(op, cmp_p, &hcp->hdr->dbmeta.lsn, &argp->metalsn);
+ did_recover = 0;
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Redo the actual updating of bucket counts. */
+ ++hcp->hdr->max_bucket;
+ if (groupgrow) {
+ hcp->hdr->low_mask = hcp->hdr->high_mask;
+ hcp->hdr->high_mask =
+ (argp->bucket + 1) | hcp->hdr->low_mask;
+ }
+ hcp->hdr->dbmeta.lsn = *lsnp;
+ did_recover = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Undo the actual updating of bucket counts. */
+ --hcp->hdr->max_bucket;
+ if (groupgrow) {
+ hcp->hdr->high_mask = hcp->hdr->low_mask;
+ hcp->hdr->low_mask = hcp->hdr->high_mask >> 1;
+ }
+ hcp->hdr->dbmeta.lsn = argp->metalsn;
+ did_recover = 1;
+ }
+
+ /*
+ * Now we need to fix up the spares array. Each entry in the
+ * spares array indicates the beginning page number for the
+ * indicated doubling. We need to fill this in whenever the
+ * spares array is invalid, since we never reclaim pages from
+ * the spares array and we have to allocate the pages to the
+ * spares array in both the redo and undo cases.
+ */
+ if (argp->newalloc &&
+ hcp->hdr->spares[__db_log2(argp->bucket + 1) + 1] == PGNO_INVALID) {
+ hcp->hdr->spares[__db_log2(argp->bucket + 1) + 1] =
+ argp->pgno - argp->bucket - 1;
+ did_recover = 1;
+ }
+
+ /*
+ * Finally, we need to potentially fix up the last_pgno field
+ * in the master meta-data page (which may or may not be the
+ * same as the hash header page).
+ */
+ if (argp->mmpgno != argp->mpgno) {
+ if ((ret =
+ mpf->get(mpf, &argp->mmpgno, 0, (PAGE **)&mmeta)) != 0)
+ goto out;
+ mmeta_flags = 0;
+ cmp_n = log_compare(lsnp, &mmeta->lsn);
+ cmp_p = log_compare(&mmeta->lsn, &argp->mmetalsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ mmeta->lsn = *lsnp;
+ mmeta_flags = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ mmeta->lsn = argp->mmetalsn;
+ mmeta_flags = DB_MPOOL_DIRTY;
+ }
+ } else
+ mmeta = (DBMETA *)hcp->hdr;
+
+ if (argp->newalloc) {
+ if (mmeta->last_pgno < pgno)
+ mmeta->last_pgno = pgno;
+ mmeta_flags = DB_MPOOL_DIRTY;
+ }
+
+ if (argp->mmpgno != argp->mpgno &&
+ (ret = mpf->put(mpf, mmeta, mmeta_flags)) != 0)
+ goto out;
+ mmeta = NULL;
+
+ if (did_recover)
+ F_SET(hcp, H_DIRTY);
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (mmeta != NULL)
+ (void)mpf->put(mpf, mmeta, 0);
+ if (dbc != NULL)
+ (void)__ham_release_meta(dbc);
+ if (ret == ENOENT && op == DB_TXN_BACKWARD_ALLOC)
+ ret = 0;
+
+ REC_CLOSE;
+}
+
+/*
+ * __ham_groupalloc_recover --
+ * Recover the batch creation of a set of pages for a new database.
+ *
+ * PUBLIC: int __ham_groupalloc_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_groupalloc_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_groupalloc_args *argp;
+ DBMETA *mmeta;
+ DB_MPOOLFILE *mpf;
+ DB *file_dbp;
+ DBC *dbc;
+ PAGE *pagep;
+ db_pgno_t pgno;
+ int cmp_n, cmp_p, modified, ret;
+
+ mmeta = NULL;
+ modified = 0;
+ REC_PRINT(__ham_groupalloc_print);
+ REC_INTRO(__ham_groupalloc_read, 0);
+
+ pgno = PGNO_BASE_MD;
+ if ((ret = mpf->get(mpf, &pgno, 0, &mmeta)) != 0) {
+ if (DB_REDO(op)) {
+ /* Page should have existed. */
+ __db_pgerr(file_dbp, pgno, ret);
+ goto out;
+ } else {
+ ret = 0;
+ goto done;
+ }
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(mmeta));
+ cmp_p = log_compare(&LSN(mmeta), &argp->meta_lsn);
+ CHECK_LSN(op, cmp_p, &LSN(mmeta), &argp->meta_lsn);
+
+ /*
+ * Basically, we used mpool to allocate a chunk of pages.
+ * We need to either add those to a free list (in the undo
+ * case) or initialize them (in the redo case).
+ *
+ * If we are redoing and this is a hash subdatabase, it's possible
+ * that the pages were never allocated, so we'd better check for
+ * that and handle it here.
+ */
+ if (DB_REDO(op)) {
+ if ((ret = __ham_alloc_pages(file_dbp, argp, lsnp)) != 0)
+ goto out;
+ if (cmp_p == 0) {
+ LSN(mmeta) = *lsnp;
+ modified = 1;
+ }
+ } else if (DB_UNDO(op)) {
+ /*
+ * Reset the last page back to its preallocation state.
+ */
+ pgno = argp->start_pgno + argp->num - 1;
+ if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) == 0) {
+
+ if (log_compare(&pagep->lsn, lsnp) == 0)
+ ZERO_LSN(pagep->lsn);
+
+ if ((ret = mpf->put(mpf, pagep, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ } else if (ret != DB_PAGE_NOTFOUND)
+ goto out;
+ /*
+ * Always put the pages into the limbo list and free them later.
+ */
+ if ((ret = __db_add_limbo(dbenv,
+ info, argp->fileid, argp->start_pgno, argp->num)) != 0)
+ goto out;
+ if (cmp_n == 0) {
+ LSN(mmeta) = argp->meta_lsn;
+ modified = 1;
+ }
+ }
+
+done: if (ret == 0)
+ *lsnp = argp->prev_lsn;
+
+out: if (mmeta != NULL)
+ (void)mpf->put(mpf, mmeta, modified ? DB_MPOOL_DIRTY : 0);
+
+ if (ret == ENOENT && op == DB_TXN_BACKWARD_ALLOC)
+ ret = 0;
+ REC_CLOSE;
+}
+
+/*
+ * __ham_alloc_pages --
+ *
+ * Called during redo of a file create. We create new pages in the file
+ * using the MPOOL_NEW_GROUP flag. We then log the meta-data page with a
+ * __crdel_metasub message. If we manage to crash without the newly written
+ * pages getting to disk (I'm not sure this can happen anywhere except our
+ * test suite?!), then we need to go through a recreate the final pages.
+ * Hash normally has holes in its files and handles them appropriately.
+ */
+static int
+__ham_alloc_pages(dbp, argp, lsnp)
+ DB *dbp;
+ __ham_groupalloc_args *argp;
+ DB_LSN *lsnp;
+{
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ db_pgno_t pgno;
+ int ret;
+
+ mpf = dbp->mpf;
+
+ /* Read the last page of the allocation. */
+ pgno = argp->start_pgno + argp->num - 1;
+
+ /* If the page exists, and it has been initialized, then we're done. */
+ if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) == 0) {
+ if (NUM_ENT(pagep) == 0 && IS_ZERO_LSN(pagep->lsn))
+ goto reinit_page;
+ if ((ret = mpf->put(mpf, pagep, 0)) != 0)
+ return (ret);
+ return (0);
+ }
+
+ /* Had to create the page. */
+ if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &pagep)) != 0) {
+ __db_pgerr(dbp, pgno, ret);
+ return (ret);
+ }
+
+reinit_page:
+ /* Initialize the newly allocated page. */
+ P_INIT(pagep, dbp->pgsize, pgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+ pagep->lsn = *lsnp;
+
+ if ((ret = mpf->put(mpf, pagep, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __ham_curadj_recover --
+ * Undo cursor adjustments if a subtransaction fails.
+ *
+ * PUBLIC: int __ham_curadj_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_curadj_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_curadj_args *argp;
+ DB_MPOOLFILE *mpf;
+ DB *file_dbp;
+ DBC *dbc;
+ int ret;
+ HASH_CURSOR *hcp;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__ham_curadj_print);
+ REC_INTRO(__ham_curadj_read, 0);
+
+ if (op != DB_TXN_ABORT)
+ goto done;
+
+ /*
+ * Undo the adjustment by reinitializing the the cursor
+ * to look like the one that was used to do the adustment,
+ * then we invert the add so that undo the adjustment.
+ */
+ hcp = (HASH_CURSOR *)dbc->internal;
+ hcp->pgno = argp->pgno;
+ hcp->indx = argp->indx;
+ hcp->dup_off = argp->dup_off;
+ hcp->order = argp->order;
+ if (!argp->add)
+ F_SET(hcp, H_DELETED);
+ (void)__ham_c_update(dbc, argp->len, !argp->add, argp->is_dup);
+
+done: *lsnp = argp->prev_lsn;
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_chgpg_recover --
+ * Undo cursor adjustments if a subtransaction fails.
+ *
+ * PUBLIC: int __ham_chgpg_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_chgpg_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_chgpg_args *argp;
+ BTREE_CURSOR *opdcp;
+ DB_MPOOLFILE *mpf;
+ DB *file_dbp, *ldbp;
+ DBC *dbc;
+ int ret;
+ DBC *cp;
+ HASH_CURSOR *lcp;
+ u_int32_t order, indx;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__ham_chgpg_print);
+ REC_INTRO(__ham_chgpg_read, 0);
+
+ if (op != DB_TXN_ABORT)
+ goto done;
+
+ /* Overloaded fields for DB_HAM_DEL*PG */
+ indx = argp->old_indx;
+ order = argp->new_indx;
+
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, file_dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == file_dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, file_dbp->mutexp);
+
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links)) {
+ lcp = (HASH_CURSOR *)cp->internal;
+
+ switch (argp->mode) {
+ case DB_HAM_DELFIRSTPG:
+ if (lcp->pgno != argp->new_pgno)
+ break;
+ if (lcp->indx != indx ||
+ !F_ISSET(lcp, H_DELETED) ||
+ lcp->order >= order) {
+ lcp->pgno = argp->old_pgno;
+ if (lcp->indx == indx)
+ lcp->order -= order;
+ }
+ break;
+ case DB_HAM_DELMIDPG:
+ case DB_HAM_DELLASTPG:
+ if (lcp->pgno == argp->new_pgno &&
+ lcp->indx == indx &&
+ F_ISSET(lcp, H_DELETED) &&
+ lcp->order >= order) {
+ lcp->pgno = argp->old_pgno;
+ lcp->order -= order;
+ lcp->indx = 0;
+ }
+ break;
+ case DB_HAM_CHGPG:
+ /*
+ * If we're doing a CHGPG, we're undoing
+ * the move of a non-deleted item to a
+ * new page. Any cursors with the deleted
+ * flag set do not belong to this item;
+ * don't touch them.
+ */
+ if (F_ISSET(lcp, H_DELETED))
+ break;
+ /* FALLTHROUGH */
+ case DB_HAM_SPLIT:
+ if (lcp->pgno == argp->new_pgno &&
+ lcp->indx == argp->new_indx) {
+ lcp->indx = argp->old_indx;
+ lcp->pgno = argp->old_pgno;
+ }
+ break;
+ case DB_HAM_DUP:
+ if (lcp->opd == NULL)
+ break;
+ opdcp = (BTREE_CURSOR *)lcp->opd->internal;
+ if (opdcp->pgno != argp->new_pgno ||
+ opdcp->indx != argp->new_indx)
+ break;
+
+ if (F_ISSET(opdcp, C_DELETED))
+ F_SET(lcp, H_DELETED);
+ /*
+ * We can't close a cursor while we have the
+ * dbp mutex locked, since c_close reacquires
+ * it. It should be safe to drop the mutex
+ * here, though, since newly opened cursors
+ * are put only at the end of the tailq and
+ * the cursor we're adjusting can't be closed
+ * under us.
+ */
+ MUTEX_THREAD_UNLOCK(dbenv, file_dbp->mutexp);
+ if ((ret = lcp->opd->c_close(lcp->opd)) != 0)
+ goto out;
+ MUTEX_THREAD_LOCK(dbenv, file_dbp->mutexp);
+ lcp->opd = NULL;
+ break;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, file_dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+done: *lsnp = argp->prev_lsn;
+out: REC_CLOSE;
+}
diff --git a/libdb/hash/hash_reclaim.c b/libdb/hash/hash_reclaim.c
new file mode 100644
index 0000000..444d32e
--- /dev/null
+++ b/libdb/hash/hash_reclaim.c
@@ -0,0 +1,111 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+
+/*
+ * __ham_reclaim --
+ * Reclaim the pages from a subdatabase and return them to the
+ * parent free list. For now, we link each freed page on the list
+ * separately. If people really store hash databases in subdatabases
+ * and do a lot of creates and deletes, this is going to be a problem,
+ * because hash needs chunks of contiguous storage. We may eventually
+ * need to go to a model where we maintain the free list with chunks of
+ * contiguous pages as well.
+ *
+ * PUBLIC: int __ham_reclaim __P((DB *, DB_TXN *txn));
+ */
+int
+__ham_reclaim(dbp, txn)
+ DB *dbp;
+ DB_TXN *txn;
+{
+ DBC *dbc;
+ HASH_CURSOR *hcp;
+ int ret;
+
+ /* Open up a cursor that we'll use for traversing. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto err;
+
+ if ((ret = __ham_traverse(dbc,
+ DB_LOCK_WRITE, __db_reclaim_callback, dbc, 1)) != 0)
+ goto err;
+ if ((ret = dbc->c_close(dbc)) != 0)
+ goto err;
+ if ((ret = __ham_release_meta(dbc)) != 0)
+ goto err;
+ return (0);
+
+err: if (hcp->hdr != NULL)
+ (void)__ham_release_meta(dbc);
+ (void)dbc->c_close(dbc);
+ return (ret);
+}
+
+/*
+ * __ham_truncate --
+ * Reclaim the pages from a subdatabase and return them to the
+ * parent free list.
+ *
+ * PUBLIC: int __ham_truncate __P((DB *, DB_TXN *txn, u_int32_t *));
+ */
+int
+__ham_truncate(dbp, txn, countp)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t *countp;
+{
+ DBC *dbc;
+ HASH_CURSOR *hcp;
+ db_trunc_param trunc;
+ int ret;
+
+ /* Open up a cursor that we'll use for traversing. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto err;
+
+ trunc.count = 0;
+ trunc.dbc = dbc;
+
+ if ((ret = __ham_traverse(dbc,
+ DB_LOCK_WRITE, __db_truncate_callback, &trunc, 1)) != 0)
+ goto err;
+ if ((ret = __ham_release_meta(dbc)) != 0)
+ goto err;
+ if ((ret = dbc->c_close(dbc)) != 0)
+ goto err;
+ *countp = trunc.count;
+ return (0);
+
+err: if (hcp->hdr != NULL)
+ (void)__ham_release_meta(dbc);
+ (void)dbc->c_close(dbc);
+ return (ret);
+}
diff --git a/libdb/hash/hash_stat.c b/libdb/hash/hash_stat.c
new file mode 100644
index 0000000..9858595
--- /dev/null
+++ b/libdb/hash/hash_stat.c
@@ -0,0 +1,372 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+
+static int __ham_stat_callback __P((DB *, PAGE *, void *, int *));
+
+/*
+ * __ham_stat --
+ * Gather/print the hash statistics
+ *
+ * PUBLIC: int __ham_stat __P((DB *, void *, u_int32_t));
+ */
+int
+__ham_stat(dbp, spp, flags)
+ DB *dbp;
+ void *spp;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_HASH_STAT *sp;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ PAGE *h;
+ db_pgno_t pgno;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ PANIC_CHECK(dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->stat");
+
+ mpf = dbp->mpf;
+ sp = NULL;
+
+ /* Check for invalid flags. */
+ if ((ret = __db_statchk(dbp, flags)) != 0)
+ return (ret);
+
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ return (ret);
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto err;
+
+ /* Allocate and clear the structure. */
+ if ((ret = __os_umalloc(dbenv, sizeof(*sp), &sp)) != 0)
+ goto err;
+ memset(sp, 0, sizeof(*sp));
+ /* Copy the fields that we have. */
+ sp->hash_nkeys = hcp->hdr->dbmeta.key_count;
+ sp->hash_ndata = hcp->hdr->dbmeta.record_count;
+ sp->hash_pagesize = dbp->pgsize;
+ sp->hash_buckets = hcp->hdr->max_bucket + 1;
+ sp->hash_magic = hcp->hdr->dbmeta.magic;
+ sp->hash_version = hcp->hdr->dbmeta.version;
+ sp->hash_metaflags = hcp->hdr->dbmeta.flags;
+ sp->hash_ffactor = hcp->hdr->ffactor;
+
+ if (flags == DB_FAST_STAT || flags == DB_CACHED_COUNTS)
+ goto done;
+
+ /* Walk the free list, counting pages. */
+ for (sp->hash_free = 0, pgno = hcp->hdr->dbmeta.free;
+ pgno != PGNO_INVALID;) {
+ ++sp->hash_free;
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ goto err;
+
+ pgno = h->next_pgno;
+ (void)mpf->put(mpf, h, 0);
+ }
+
+ /* Now traverse the rest of the table. */
+ sp->hash_nkeys = 0;
+ sp->hash_ndata = 0;
+ if ((ret = __ham_traverse(dbc,
+ DB_LOCK_READ, __ham_stat_callback, sp, 0)) != 0)
+ goto err;
+
+ if (!F_ISSET(dbp, DB_AM_RDONLY)) {
+ if ((ret = __ham_dirty_meta(dbc)) != 0)
+ goto err;
+ hcp->hdr->dbmeta.key_count = sp->hash_nkeys;
+ hcp->hdr->dbmeta.record_count = sp->hash_ndata;
+ }
+
+done:
+ if ((ret = __ham_release_meta(dbc)) != 0)
+ goto err;
+ if ((ret = dbc->c_close(dbc)) != 0)
+ goto err;
+
+ *(DB_HASH_STAT **)spp = sp;
+ return (0);
+
+err: if (sp != NULL)
+ __os_ufree(dbenv, sp);
+ if (hcp->hdr != NULL)
+ (void)__ham_release_meta(dbc);
+ (void)dbc->c_close(dbc);
+ return (ret);
+
+}
+
+/*
+ * __ham_traverse
+ * Traverse an entire hash table. We use the callback so that we
+ * can use this both for stat collection and for deallocation.
+ *
+ * PUBLIC: int __ham_traverse __P((DBC *, db_lockmode_t,
+ * PUBLIC: int (*)(DB *, PAGE *, void *, int *), void *, int));
+ */
+int
+__ham_traverse(dbc, mode, callback, cookie, look_past_max)
+ DBC *dbc;
+ db_lockmode_t mode;
+ int (*callback) __P((DB *, PAGE *, void *, int *));
+ void *cookie;
+ int look_past_max;
+{
+ DB *dbp;
+ DBC *opd;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ HKEYDATA *hk;
+ db_pgno_t pgno, opgno;
+ int did_put, i, ret, t_ret;
+ u_int32_t bucket, spares_entry;
+
+ dbp = dbc->dbp;
+ opd = NULL;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ ret = 0;
+
+ /*
+ * In a perfect world, we could simply read each page in the file
+ * and look at its page type to tally the information necessary.
+ * Unfortunately, the bucket locking that hash tables do to make
+ * locking easy, makes this a pain in the butt. We have to traverse
+ * duplicate, overflow and big pages from the bucket so that we
+ * don't access anything that isn't properly locked.
+ *
+ */
+ for (bucket = 0;; bucket++) {
+ /*
+ * We put the loop exit condition check here, because
+ * it made for a really vile extended ?: that made SCO's
+ * compiler drop core.
+ *
+ * If look_past_max is not set, we can stop at max_bucket;
+ * if it is set, we need to include pages that are part of
+ * the current doubling but beyond the highest bucket we've
+ * split into, as well as pages from a "future" doubling
+ * that may have been created within an aborted
+ * transaction. To do this, keep looping (and incrementing
+ * bucket) until the corresponding spares array entries
+ * cease to be defined.
+ */
+ if (look_past_max) {
+ spares_entry = __db_log2(bucket + 1);
+ if (spares_entry >= NCACHED ||
+ hcp->hdr->spares[spares_entry] == 0)
+ break;
+ } else {
+ if (bucket > hcp->hdr->max_bucket)
+ break;
+ }
+
+ hcp->bucket = bucket;
+ hcp->pgno = pgno = BUCKET_TO_PAGE(hcp, bucket);
+ for (ret = __ham_get_cpage(dbc, mode); ret == 0;
+ ret = __ham_next_cpage(dbc, pgno, 0)) {
+
+ /*
+ * If we are cleaning up pages past the max_bucket,
+ * then they may be on the free list and have their
+ * next pointers set, but the should be ignored. In
+ * fact, we really ought to just skip anybody who is
+ * not a valid page.
+ */
+ if (TYPE(hcp->page) == P_INVALID)
+ break;
+ pgno = NEXT_PGNO(hcp->page);
+
+ /*
+ * Go through each item on the page checking for
+ * duplicates (in which case we have to count the
+ * duplicate pages) or big key/data items (in which
+ * case we have to count those pages).
+ */
+ for (i = 0; i < NUM_ENT(hcp->page); i++) {
+ hk = (HKEYDATA *)P_ENTRY(dbp, hcp->page, i);
+ switch (HPAGE_PTYPE(hk)) {
+ case H_OFFDUP:
+ memcpy(&opgno, HOFFDUP_PGNO(hk),
+ sizeof(db_pgno_t));
+ if ((ret = __db_c_newopd(dbc,
+ opgno, NULL, &opd)) != 0)
+ return (ret);
+ if ((ret = __bam_traverse(opd,
+ DB_LOCK_READ, opgno,
+ callback, cookie))
+ != 0)
+ goto err;
+ if ((ret = opd->c_close(opd)) != 0)
+ return (ret);
+ opd = NULL;
+ break;
+ case H_OFFPAGE:
+ /*
+ * We are about to get a big page
+ * which will use the same spot that
+ * the current page uses, so we need
+ * to restore the current page before
+ * looking at it again.
+ */
+ memcpy(&opgno, HOFFPAGE_PGNO(hk),
+ sizeof(db_pgno_t));
+ if ((ret = __db_traverse_big(dbp,
+ opgno, callback, cookie)) != 0)
+ goto err;
+ break;
+ case H_KEYDATA:
+ break;
+ }
+ }
+
+ /* Call the callback on main pages. */
+ if ((ret = callback(dbp,
+ hcp->page, cookie, &did_put)) != 0)
+ goto err;
+
+ if (did_put)
+ hcp->page = NULL;
+ if (pgno == PGNO_INVALID)
+ break;
+ }
+ if (ret != 0)
+ goto err;
+
+ if (STD_LOCKING(dbc))
+ (void)dbp->dbenv->lock_put(dbp->dbenv, &hcp->lock);
+
+ if (hcp->page != NULL) {
+ if ((ret = mpf->put(mpf, hcp->page, 0)) != 0)
+ return (ret);
+ hcp->page = NULL;
+ }
+
+ }
+err: if (opd != NULL &&
+ (t_ret = opd->c_close(opd)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+static int
+__ham_stat_callback(dbp, pagep, cookie, putp)
+ DB *dbp;
+ PAGE *pagep;
+ void *cookie;
+ int *putp;
+{
+ DB_HASH_STAT *sp;
+ DB_BTREE_STAT bstat;
+ db_indx_t indx, len, off, tlen, top;
+ u_int8_t *hk;
+ int ret;
+
+ *putp = 0;
+ sp = cookie;
+
+ switch (pagep->type) {
+ case P_INVALID:
+ /*
+ * Hash pages may be wholly zeroed; this is not a bug.
+ * Obviously such pages have no data, so we can just proceed.
+ */
+ break;
+ case P_HASH:
+ /*
+ * We count the buckets and the overflow pages
+ * separately and tally their bytes separately
+ * as well. We need to figure out if this page
+ * is a bucket.
+ */
+ if (PREV_PGNO(pagep) == PGNO_INVALID)
+ sp->hash_bfree += P_FREESPACE(dbp, pagep);
+ else {
+ sp->hash_overflows++;
+ sp->hash_ovfl_free += P_FREESPACE(dbp, pagep);
+ }
+ top = NUM_ENT(pagep);
+ /* Correct for on-page duplicates and deleted items. */
+ for (indx = 0; indx < top; indx += P_INDX) {
+ switch (*H_PAIRDATA(dbp, pagep, indx)) {
+ case H_OFFDUP:
+ case H_OFFPAGE:
+ break;
+ case H_KEYDATA:
+ sp->hash_ndata++;
+ break;
+ case H_DUPLICATE:
+ tlen = LEN_HDATA(dbp, pagep, 0, indx);
+ hk = H_PAIRDATA(dbp, pagep, indx);
+ for (off = 0; off < tlen;
+ off += len + 2 * sizeof (db_indx_t)) {
+ sp->hash_ndata++;
+ memcpy(&len,
+ HKEYDATA_DATA(hk)
+ + off, sizeof(db_indx_t));
+ }
+ }
+ }
+ sp->hash_nkeys += H_NUMPAIRS(pagep);
+ break;
+ case P_IBTREE:
+ case P_IRECNO:
+ case P_LBTREE:
+ case P_LRECNO:
+ case P_LDUP:
+ /*
+ * These are all btree pages; get a correct
+ * cookie and call them. Then add appropriate
+ * fields into our stat structure.
+ */
+ memset(&bstat, 0, sizeof(bstat));
+ bstat.bt_dup_pgfree = 0;
+ bstat.bt_int_pgfree = 0;
+ bstat.bt_leaf_pgfree = 0;
+ bstat.bt_ndata = 0;
+ if ((ret = __bam_stat_callback(dbp, pagep, &bstat, putp)) != 0)
+ return (ret);
+ sp->hash_dup++;
+ sp->hash_dup_free += bstat.bt_leaf_pgfree +
+ bstat.bt_dup_pgfree + bstat.bt_int_pgfree;
+ sp->hash_ndata += bstat.bt_ndata;
+ break;
+ case P_OVERFLOW:
+ sp->hash_bigpages++;
+ sp->hash_big_bfree += P_OVFLSPACE(dbp, dbp->pgsize, pagep);
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, pagep->pgno));
+ }
+
+ return (0);
+}
diff --git a/libdb/hash/hash_upgrade.c b/libdb/hash/hash_upgrade.c
new file mode 100644
index 0000000..d08b7ef
--- /dev/null
+++ b/libdb/hash/hash_upgrade.c
@@ -0,0 +1,266 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+#include "dbinc/db_upgrade.h"
+
+/*
+ * __ham_30_hashmeta --
+ * Upgrade the database from version 4/5 to version 6.
+ *
+ * PUBLIC: int __ham_30_hashmeta __P((DB *, char *, u_int8_t *));
+ */
+int
+__ham_30_hashmeta(dbp, real_name, obuf)
+ DB *dbp;
+ char *real_name;
+ u_int8_t *obuf;
+{
+ DB_ENV *dbenv;
+ HASHHDR *oldmeta;
+ HMETA30 newmeta;
+ u_int32_t *o_spares, *n_spares;
+ u_int32_t fillf, maxb, nelem;
+ int i, max_entry, ret;
+
+ dbenv = dbp->dbenv;
+ memset(&newmeta, 0, sizeof(newmeta));
+
+ oldmeta = (HASHHDR *)obuf;
+
+ /*
+ * The first 32 bytes are similar. The only change is the version
+ * and that we removed the ovfl_point and have the page type now.
+ */
+
+ newmeta.dbmeta.lsn = oldmeta->lsn;
+ newmeta.dbmeta.pgno = oldmeta->pgno;
+ newmeta.dbmeta.magic = oldmeta->magic;
+ newmeta.dbmeta.version = 6;
+ newmeta.dbmeta.pagesize = oldmeta->pagesize;
+ newmeta.dbmeta.type = P_HASHMETA;
+
+ /* Move flags */
+ newmeta.dbmeta.flags = oldmeta->flags;
+
+ /* Copy the free list, which has changed its name but works the same. */
+ newmeta.dbmeta.free = oldmeta->last_freed;
+
+ /* Copy: max_bucket, high_mask, low-mask, ffactor, nelem, h_charkey */
+ newmeta.max_bucket = oldmeta->max_bucket;
+ newmeta.high_mask = oldmeta->high_mask;
+ newmeta.low_mask = oldmeta->low_mask;
+ newmeta.ffactor = oldmeta->ffactor;
+ newmeta.nelem = oldmeta->nelem;
+ newmeta.h_charkey = oldmeta->h_charkey;
+
+ /*
+ * There was a bug in 2.X versions where the nelem could go negative.
+ * In general, this is considered "bad." If it does go negative
+ * (that is, very large and positive), we'll die trying to dump and
+ * load this database. So, let's see if we can fix it here.
+ */
+ nelem = newmeta.nelem;
+ fillf = newmeta.ffactor;
+ maxb = newmeta.max_bucket;
+
+ if ((fillf != 0 && fillf * maxb < 2 * nelem) ||
+ (fillf == 0 && nelem > 0x8000000))
+ newmeta.nelem = 0;
+
+ /*
+ * We now have to convert the spares array. The old spares array
+ * contained the total number of extra pages allocated prior to
+ * the bucket that begins the next doubling. The new spares array
+ * contains the page number of the first bucket in the next doubling
+ * MINUS the bucket number of that bucket.
+ */
+ o_spares = oldmeta->spares;
+ n_spares = newmeta.spares;
+ max_entry = __db_log2(maxb + 1); /* highest spares entry in use */
+ n_spares[0] = 1;
+ for (i = 1; i < NCACHED && i <= max_entry; i++)
+ n_spares[i] = 1 + o_spares[i - 1];
+
+ /* Replace the unique ID. */
+ if ((ret = __os_fileid(dbenv, real_name, 1, newmeta.dbmeta.uid)) != 0)
+ return (ret);
+
+ /* Overwrite the original. */
+ memcpy(oldmeta, &newmeta, sizeof(newmeta));
+
+ return (0);
+}
+
+/*
+ * __ham_30_sizefix --
+ * Make sure that all hash pages belonging to the current
+ * hash doubling are within the bounds of the file.
+ *
+ * PUBLIC: int __ham_30_sizefix __P((DB *, DB_FH *, char *, u_int8_t *));
+ */
+int
+__ham_30_sizefix(dbp, fhp, realname, metabuf)
+ DB *dbp;
+ DB_FH *fhp;
+ char *realname;
+ u_int8_t *metabuf;
+{
+ u_int8_t buf[DB_MAX_PGSIZE];
+ DB_ENV *dbenv;
+ HMETA30 *meta;
+ db_pgno_t last_actual, last_desired;
+ int ret;
+ size_t nw;
+ u_int32_t pagesize;
+
+ dbenv = dbp->dbenv;
+ memset(buf, 0, DB_MAX_PGSIZE);
+
+ meta = (HMETA30 *)metabuf;
+ pagesize = meta->dbmeta.pagesize;
+
+ /*
+ * Get the last page number. To do this, we'll need dbp->pgsize
+ * to be set right, so slam it into place.
+ */
+ dbp->pgsize = pagesize;
+ if ((ret = __db_lastpgno(dbp, realname, fhp, &last_actual)) != 0)
+ return (ret);
+
+ /*
+ * The last bucket in the doubling is equal to high_mask; calculate
+ * the page number that implies.
+ */
+ last_desired = BS_TO_PAGE(meta->high_mask, meta->spares);
+
+ /*
+ * If last_desired > last_actual, we need to grow the file. Write
+ * a zeroed page where last_desired would go.
+ */
+ if (last_desired > last_actual) {
+ if ((ret = __os_seek(dbenv,
+ fhp, pagesize, last_desired, 0, 0, DB_OS_SEEK_SET)) != 0)
+ return (ret);
+ if ((ret = __os_write(dbenv, fhp, buf, pagesize, &nw)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * __ham_31_hashmeta --
+ * Upgrade the database from version 6 to version 7.
+ *
+ * PUBLIC: int __ham_31_hashmeta
+ * PUBLIC: __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+ */
+int
+__ham_31_hashmeta(dbp, real_name, flags, fhp, h, dirtyp)
+ DB *dbp;
+ char *real_name;
+ u_int32_t flags;
+ DB_FH *fhp;
+ PAGE *h;
+ int *dirtyp;
+{
+ HMETA31 *newmeta;
+ HMETA30 *oldmeta;
+
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(real_name, NULL);
+ COMPQUIET(fhp, NULL);
+
+ newmeta = (HMETA31 *)h;
+ oldmeta = (HMETA30 *)h;
+
+ /*
+ * Copy the fields down the page.
+ * The fields may overlap so start at the bottom and use memmove().
+ */
+ memmove(newmeta->spares, oldmeta->spares, sizeof(oldmeta->spares));
+ newmeta->h_charkey = oldmeta->h_charkey;
+ newmeta->nelem = oldmeta->nelem;
+ newmeta->ffactor = oldmeta->ffactor;
+ newmeta->low_mask = oldmeta->low_mask;
+ newmeta->high_mask = oldmeta->high_mask;
+ newmeta->max_bucket = oldmeta->max_bucket;
+ memmove(newmeta->dbmeta.uid,
+ oldmeta->dbmeta.uid, sizeof(oldmeta->dbmeta.uid));
+ newmeta->dbmeta.flags = oldmeta->dbmeta.flags;
+ newmeta->dbmeta.record_count = 0;
+ newmeta->dbmeta.key_count = 0;
+ ZERO_LSN(newmeta->dbmeta.unused3);
+
+ /* Update the version. */
+ newmeta->dbmeta.version = 7;
+
+ /* Upgrade the flags. */
+ if (LF_ISSET(DB_DUPSORT))
+ F_SET(&newmeta->dbmeta, DB_HASH_DUPSORT);
+
+ *dirtyp = 1;
+ return (0);
+}
+
+/*
+ * __ham_31_hash --
+ * Upgrade the database hash leaf pages.
+ *
+ * PUBLIC: int __ham_31_hash
+ * PUBLIC: __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+ */
+int
+__ham_31_hash(dbp, real_name, flags, fhp, h, dirtyp)
+ DB *dbp;
+ char *real_name;
+ u_int32_t flags;
+ DB_FH *fhp;
+ PAGE *h;
+ int *dirtyp;
+{
+ HKEYDATA *hk;
+ db_pgno_t pgno, tpgno;
+ db_indx_t indx;
+ int ret;
+
+ COMPQUIET(flags, 0);
+
+ ret = 0;
+ for (indx = 0; indx < NUM_ENT(h); indx += 2) {
+ hk = (HKEYDATA *)H_PAIRDATA(dbp, h, indx);
+ if (HPAGE_PTYPE(hk) == H_OFFDUP) {
+ memcpy(&pgno, HOFFDUP_PGNO(hk), sizeof(db_pgno_t));
+ tpgno = pgno;
+ if ((ret = __db_31_offdup(dbp, real_name, fhp,
+ LF_ISSET(DB_DUPSORT) ? 1 : 0, &tpgno)) != 0)
+ break;
+ if (pgno != tpgno) {
+ *dirtyp = 1;
+ memcpy(HOFFDUP_PGNO(hk),
+ &tpgno, sizeof(db_pgno_t));
+ }
+ }
+ }
+
+ return (ret);
+}
diff --git a/libdb/hash/hash_verify.c b/libdb/hash/hash_verify.c
new file mode 100644
index 0000000..ebe3fea
--- /dev/null
+++ b/libdb/hash/hash_verify.c
@@ -0,0 +1,1079 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_verify.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+
+static int __ham_dups_unsorted __P((DB *, u_int8_t *, u_int32_t));
+static int __ham_vrfy_bucket __P((DB *, VRFY_DBINFO *, HMETA *, u_int32_t,
+ u_int32_t));
+static int __ham_vrfy_item __P((DB *,
+ VRFY_DBINFO *, db_pgno_t, PAGE *, u_int32_t, u_int32_t));
+
+/*
+ * __ham_vrfy_meta --
+ * Verify the hash-specific part of a metadata page.
+ *
+ * Note that unlike btree, we don't save things off, because we
+ * will need most everything again to verify each page and the
+ * amount of state here is significant.
+ *
+ * PUBLIC: int __ham_vrfy_meta __P((DB *, VRFY_DBINFO *, HMETA *,
+ * PUBLIC: db_pgno_t, u_int32_t));
+ */
+int
+__ham_vrfy_meta(dbp, vdp, m, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ HMETA *m;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ HASH *hashp;
+ VRFY_PAGEINFO *pip;
+ int i, ret, t_ret, isbad;
+ u_int32_t pwr, mbucket;
+ u_int32_t (*hfunc) __P((DB *, const void *, u_int32_t));
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+ isbad = 0;
+
+ hashp = dbp->h_internal;
+
+ if (hashp != NULL && hashp->h_hash != NULL)
+ hfunc = hashp->h_hash;
+ else
+ hfunc = __ham_func5;
+
+ /*
+ * If we haven't already checked the common fields in pagezero,
+ * check them.
+ */
+ if (!F_ISSET(pip, VRFY_INCOMPLETE) &&
+ (ret = __db_vrfy_meta(dbp, vdp, &m->dbmeta, pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /* h_charkey */
+ if (!LF_ISSET(DB_NOORDERCHK))
+ if (m->h_charkey != hfunc(dbp, CHARKEY, sizeof(CHARKEY))) {
+ EPRINT((dbp->dbenv,
+"Page %lu: database has different custom hash function; reverify with DB_NOORDERCHK set",
+ (u_long)pgno));
+ /*
+ * Return immediately; this is probably a sign
+ * of user error rather than database corruption, so
+ * we want to avoid extraneous errors.
+ */
+ isbad = 1;
+ goto err;
+ }
+
+ /* max_bucket must be less than the last pgno. */
+ if (m->max_bucket > vdp->last_pgno) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: Impossible max_bucket %lu on meta page",
+ (u_long)pgno, (u_long)m->max_bucket));
+ /*
+ * Most other fields depend somehow on max_bucket, so
+ * we just return--there will be lots of extraneous
+ * errors.
+ */
+ isbad = 1;
+ goto err;
+ }
+
+ /*
+ * max_bucket, high_mask and low_mask: high_mask must be one
+ * less than the next power of two above max_bucket, and
+ * low_mask must be one less than the power of two below it.
+ *
+ *
+ */
+ pwr = (m->max_bucket == 0) ? 1 : 1 << __db_log2(m->max_bucket + 1);
+ if (m->high_mask != pwr - 1) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: incorrect high_mask %lu, should be %lu",
+ (u_long)pgno, (u_long)m->high_mask, (u_long)pwr - 1));
+ isbad = 1;
+ }
+ pwr >>= 1;
+ if (m->low_mask != pwr - 1) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: incorrect low_mask %lu, should be %lu",
+ (u_long)pgno, (u_long)m->low_mask, (u_long)pwr - 1));
+ isbad = 1;
+ }
+
+ /* ffactor: no check possible. */
+ pip->h_ffactor = m->ffactor;
+
+ /*
+ * nelem: just make sure it's not astronomical for now. This is the
+ * same check that hash_upgrade does, since there was a bug in 2.X
+ * which could make nelem go "negative".
+ */
+ if (m->nelem > 0x80000000) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: suspiciously high nelem of %lu",
+ (u_long)pgno, (u_long)m->nelem));
+ isbad = 1;
+ pip->h_nelem = 0;
+ } else
+ pip->h_nelem = m->nelem;
+
+ /* flags */
+ if (F_ISSET(&m->dbmeta, DB_HASH_DUP))
+ F_SET(pip, VRFY_HAS_DUPS);
+ if (F_ISSET(&m->dbmeta, DB_HASH_DUPSORT))
+ F_SET(pip, VRFY_HAS_DUPSORT);
+ /* XXX: Why is the DB_HASH_SUBDB flag necessary? */
+
+ /* spares array */
+ for (i = 0; m->spares[i] != 0 && i < NCACHED; i++) {
+ /*
+ * We set mbucket to the maximum bucket that would use a given
+ * spares entry; we want to ensure that it's always less
+ * than last_pgno.
+ */
+ mbucket = (1 << i) - 1;
+ if (BS_TO_PAGE(mbucket, m->spares) > vdp->last_pgno) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: spares array entry %d is invalid",
+ (u_long)pgno, i));
+ isbad = 1;
+ }
+ }
+
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __ham_vrfy --
+ * Verify hash page.
+ *
+ * PUBLIC: int __ham_vrfy __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ * PUBLIC: u_int32_t));
+ */
+int
+__ham_vrfy(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ u_int32_t ent, himark, inpend;
+ db_indx_t *inp;
+ int isbad, ret, t_ret;
+
+ isbad = 0;
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ /* Sanity check our flags and page type. */
+ if ((ret = __db_fchk(dbp->dbenv, "__ham_vrfy",
+ flags, DB_AGGRESSIVE | DB_NOORDERCHK | DB_SALVAGE)) != 0)
+ goto err;
+
+ if (TYPE(h) != P_HASH) {
+ TYPE_ERR_PRINT(dbp->dbenv, "__ham_vrfy", pgno, TYPE(h));
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /* Verify and save off fields common to all PAGEs. */
+ if ((ret = __db_vrfy_datapage(dbp, vdp, h, pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /*
+ * Verify inp[]. Each offset from 0 to NUM_ENT(h) must be lower
+ * than the previous one, higher than the current end of the inp array,
+ * and lower than the page size.
+ *
+ * In any case, we return immediately if things are bad, as it would
+ * be unsafe to proceed.
+ */
+ inp = P_INP(dbp, h);
+ for (ent = 0, himark = dbp->pgsize,
+ inpend = (u_int32_t)((u_int8_t *)inp - (u_int8_t *)h);
+ ent < NUM_ENT(h); ent++)
+ if (inp[ent] >= himark) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: item %lu is out of order or nonsensical",
+ (u_long)pgno, (u_long)ent));
+ isbad = 1;
+ goto err;
+ } else if (inpend >= himark) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: entries array collided with data",
+ (u_long)pgno));
+ isbad = 1;
+ goto err;
+
+ } else {
+ himark = inp[ent];
+ inpend += sizeof(db_indx_t);
+ if ((ret = __ham_vrfy_item(
+ dbp, vdp, pgno, h, ent, flags)) != 0)
+ goto err;
+ }
+
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret == 0 && isbad == 1 ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __ham_vrfy_item --
+ * Given a hash page and an offset, sanity-check the item itself,
+ * and save off any overflow items or off-page dup children as necessary.
+ */
+static int
+__ham_vrfy_item(dbp, vdp, pgno, h, i, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ PAGE *h;
+ u_int32_t i, flags;
+{
+ HOFFPAGE hop;
+ HOFFDUP hod;
+ VRFY_CHILDINFO child;
+ VRFY_PAGEINFO *pip;
+ db_indx_t offset, len, dlen, elen;
+ int ret, t_ret;
+ u_int8_t *databuf;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ switch (HPAGE_TYPE(dbp, h, i)) {
+ case H_KEYDATA:
+ /* Nothing to do here--everything but the type field is data */
+ break;
+ case H_DUPLICATE:
+ /* Are we a datum or a key? Better be the former. */
+ if (i % 2 == 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: hash key stored as duplicate item %lu",
+ (u_long)pip->pgno, (u_long)i));
+ }
+ /*
+ * Dups are encoded as a series within a single HKEYDATA,
+ * in which each dup is surrounded by a copy of its length
+ * on either side (so that the series can be walked in either
+ * direction. We loop through this series and make sure
+ * each dup is reasonable.
+ *
+ * Note that at this point, we've verified item i-1, so
+ * it's safe to use LEN_HKEYDATA (which looks at inp[i-1]).
+ */
+ len = LEN_HKEYDATA(dbp, h, dbp->pgsize, i);
+ databuf = HKEYDATA_DATA(P_ENTRY(dbp, h, i));
+ for (offset = 0; offset < len; offset += DUP_SIZE(dlen)) {
+ memcpy(&dlen, databuf + offset, sizeof(db_indx_t));
+
+ /* Make sure the length is plausible. */
+ if (offset + DUP_SIZE(dlen) > len) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: duplicate item %lu has bad length",
+ (u_long)pip->pgno, (u_long)i));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ /*
+ * Make sure the second copy of the length is the
+ * same as the first.
+ */
+ memcpy(&elen,
+ databuf + offset + dlen + sizeof(db_indx_t),
+ sizeof(db_indx_t));
+ if (elen != dlen) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: duplicate item %lu has two different lengths",
+ (u_long)pip->pgno, (u_long)i));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ }
+ F_SET(pip, VRFY_HAS_DUPS);
+ if (!LF_ISSET(DB_NOORDERCHK) &&
+ __ham_dups_unsorted(dbp, databuf, len))
+ F_SET(pip, VRFY_DUPS_UNSORTED);
+ break;
+ case H_OFFPAGE:
+ /* Offpage item. Make sure pgno is sane, save off. */
+ memcpy(&hop, P_ENTRY(dbp, h, i), HOFFPAGE_SIZE);
+ if (!IS_VALID_PGNO(hop.pgno) || hop.pgno == pip->pgno ||
+ hop.pgno == PGNO_INVALID) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: offpage item %lu has bad pgno %lu",
+ (u_long)pip->pgno, (u_long)i, (u_long)hop.pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ memset(&child, 0, sizeof(VRFY_CHILDINFO));
+ child.pgno = hop.pgno;
+ child.type = V_OVERFLOW;
+ child.tlen = hop.tlen; /* This will get checked later. */
+ if ((ret = __db_vrfy_childput(vdp, pip->pgno, &child)) != 0)
+ goto err;
+ break;
+ case H_OFFDUP:
+ /* Offpage duplicate item. Same drill. */
+ memcpy(&hod, P_ENTRY(dbp, h, i), HOFFDUP_SIZE);
+ if (!IS_VALID_PGNO(hod.pgno) || hod.pgno == pip->pgno ||
+ hod.pgno == PGNO_INVALID) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: offpage item %lu has bad page number",
+ (u_long)pip->pgno, (u_long)i));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ memset(&child, 0, sizeof(VRFY_CHILDINFO));
+ child.pgno = hod.pgno;
+ child.type = V_DUPLICATE;
+ if ((ret = __db_vrfy_childput(vdp, pip->pgno, &child)) != 0)
+ goto err;
+ F_SET(pip, VRFY_HAS_DUPS);
+ break;
+ default:
+ EPRINT((dbp->dbenv,
+ "Page %lu: item %i has bad type",
+ (u_long)pip->pgno, (u_long)i));
+ ret = DB_VERIFY_BAD;
+ break;
+ }
+
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __ham_vrfy_structure --
+ * Verify the structure of a hash database.
+ *
+ * PUBLIC: int __ham_vrfy_structure __P((DB *, VRFY_DBINFO *, db_pgno_t,
+ * PUBLIC: u_int32_t));
+ */
+int
+__ham_vrfy_structure(dbp, vdp, meta_pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t meta_pgno;
+ u_int32_t flags;
+{
+ DB *pgset;
+ DB_MPOOLFILE *mpf;
+ HMETA *m;
+ PAGE *h;
+ VRFY_PAGEINFO *pip;
+ int isbad, p, ret, t_ret;
+ db_pgno_t pgno;
+ u_int32_t bucket, spares_entry;
+
+ mpf = dbp->mpf;
+ pgset = vdp->pgset;
+ h = NULL;
+ ret = isbad = 0;
+
+ if ((ret = __db_vrfy_pgset_get(pgset, meta_pgno, &p)) != 0)
+ return (ret);
+ if (p != 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: Hash meta page referenced twice",
+ (u_long)meta_pgno));
+ return (DB_VERIFY_BAD);
+ }
+ if ((ret = __db_vrfy_pgset_inc(pgset, meta_pgno)) != 0)
+ return (ret);
+
+ /* Get the meta page; we'll need it frequently. */
+ if ((ret = mpf->get(mpf, &meta_pgno, 0, &m)) != 0)
+ return (ret);
+
+ /* Loop through bucket by bucket. */
+ for (bucket = 0; bucket <= m->max_bucket; bucket++)
+ if ((ret =
+ __ham_vrfy_bucket(dbp, vdp, m, bucket, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /*
+ * There may be unused hash pages corresponding to buckets
+ * that have been allocated but not yet used. These may be
+ * part of the current doubling above max_bucket, or they may
+ * correspond to buckets that were used in a transaction
+ * that then aborted.
+ *
+ * Loop through them, as far as the spares array defines them,
+ * and make sure they're all empty.
+ *
+ * Note that this should be safe, since we've already verified
+ * that the spares array is sane.
+ */
+ for (bucket = m->max_bucket + 1; spares_entry = __db_log2(bucket + 1),
+ spares_entry < NCACHED && m->spares[spares_entry] != 0; bucket++) {
+ pgno = BS_TO_PAGE(bucket, m->spares);
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ goto err;
+
+ /* It's okay if these pages are totally zeroed; unmark it. */
+ F_CLR(pip, VRFY_IS_ALLZEROES);
+
+ /* It's also OK if this page is simply invalid. */
+ if (pip->type == P_INVALID) {
+ if ((ret = __db_vrfy_putpageinfo(dbp->dbenv,
+ vdp, pip)) != 0)
+ goto err;
+ continue;
+ }
+
+ if (pip->type != P_HASH) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: hash bucket %lu maps to non-hash page",
+ (u_long)pgno, (u_long)bucket));
+ isbad = 1;
+ } else if (pip->entries != 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: non-empty page in unused hash bucket %lu",
+ (u_long)pgno, (u_long)bucket));
+ isbad = 1;
+ } else {
+ if ((ret = __db_vrfy_pgset_get(pgset, pgno, &p)) != 0)
+ goto err;
+ if (p != 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: above max_bucket referenced",
+ (u_long)pgno));
+ isbad = 1;
+ } else {
+ if ((ret =
+ __db_vrfy_pgset_inc(pgset, pgno)) != 0)
+ goto err;
+ if ((ret = __db_vrfy_putpageinfo(dbp->dbenv,
+ vdp, pip)) != 0)
+ goto err;
+ continue;
+ }
+ }
+
+ /* If we got here, it's an error. */
+ (void)__db_vrfy_putpageinfo(dbp->dbenv, vdp, pip);
+ goto err;
+ }
+
+err: if ((t_ret = mpf->put(mpf, m, 0)) != 0)
+ return (t_ret);
+ if (h != NULL && (t_ret = mpf->put(mpf, h, 0)) != 0)
+ return (t_ret);
+ return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD: ret);
+}
+
+/*
+ * __ham_vrfy_bucket --
+ * Verify a given bucket.
+ */
+static int
+__ham_vrfy_bucket(dbp, vdp, m, bucket, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ HMETA *m;
+ u_int32_t bucket, flags;
+{
+ HASH *hashp;
+ VRFY_CHILDINFO *child;
+ VRFY_PAGEINFO *mip, *pip;
+ int ret, t_ret, isbad, p;
+ db_pgno_t pgno, next_pgno;
+ DBC *cc;
+ u_int32_t (*hfunc) __P((DB *, const void *, u_int32_t));
+
+ isbad = 0;
+ pip = NULL;
+ cc = NULL;
+
+ hashp = dbp->h_internal;
+ if (hashp != NULL && hashp->h_hash != NULL)
+ hfunc = hashp->h_hash;
+ else
+ hfunc = __ham_func5;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, PGNO(m), &mip)) != 0)
+ return (ret);
+
+ /* Calculate the first pgno for this bucket. */
+ pgno = BS_TO_PAGE(bucket, m->spares);
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ goto err;
+
+ /* Make sure we got a plausible page number. */
+ if (pgno > vdp->last_pgno || pip->type != P_HASH) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: impossible first page in bucket %lu",
+ (u_long)pgno, (u_long)bucket));
+ /* Unsafe to continue. */
+ isbad = 1;
+ goto err;
+ }
+
+ if (pip->prev_pgno != PGNO_INVALID) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: first page in hash bucket %lu has a prev_pgno",
+ (u_long)pgno, (u_long)bucket));
+ isbad = 1;
+ }
+
+ /*
+ * Set flags for dups and sorted dups.
+ */
+ flags |= F_ISSET(mip, VRFY_HAS_DUPS) ? ST_DUPOK : 0;
+ flags |= F_ISSET(mip, VRFY_HAS_DUPSORT) ? ST_DUPSORT : 0;
+
+ /* Loop until we find a fatal bug, or until we run out of pages. */
+ for (;;) {
+ /* Provide feedback on our progress to the application. */
+ if (!LF_ISSET(DB_SALVAGE))
+ __db_vrfy_struct_feedback(dbp, vdp);
+
+ if ((ret = __db_vrfy_pgset_get(vdp->pgset, pgno, &p)) != 0)
+ goto err;
+ if (p != 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: hash page referenced twice",
+ (u_long)pgno));
+ isbad = 1;
+ /* Unsafe to continue. */
+ goto err;
+ } else if ((ret = __db_vrfy_pgset_inc(vdp->pgset, pgno)) != 0)
+ goto err;
+
+ /*
+ * Hash pages that nothing has ever hashed to may never
+ * have actually come into existence, and may appear to be
+ * entirely zeroed. This is acceptable, and since there's
+ * no real way for us to know whether this has actually
+ * occurred, we clear the "wholly zeroed" flag on every
+ * hash page. A wholly zeroed page, by nature, will appear
+ * to have no flags set and zero entries, so should
+ * otherwise verify correctly.
+ */
+ F_CLR(pip, VRFY_IS_ALLZEROES);
+
+ /* If we have dups, our meta page had better know about it. */
+ if (F_ISSET(pip, VRFY_HAS_DUPS) &&
+ !F_ISSET(mip, VRFY_HAS_DUPS)) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: duplicates present in non-duplicate database",
+ (u_long)pgno));
+ isbad = 1;
+ }
+
+ /*
+ * If the database has sorted dups, this page had better
+ * not have unsorted ones.
+ */
+ if (F_ISSET(mip, VRFY_HAS_DUPSORT) &&
+ F_ISSET(pip, VRFY_DUPS_UNSORTED)) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: unsorted dups in sorted-dup database",
+ (u_long)pgno));
+ isbad = 1;
+ }
+
+ /* Walk overflow chains and offpage dup trees. */
+ if ((ret = __db_vrfy_childcursor(vdp, &cc)) != 0)
+ goto err;
+ for (ret = __db_vrfy_ccset(cc, pip->pgno, &child); ret == 0;
+ ret = __db_vrfy_ccnext(cc, &child))
+ if (child->type == V_OVERFLOW) {
+ if ((ret = __db_vrfy_ovfl_structure(dbp, vdp,
+ child->pgno, child->tlen, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+ } else if (child->type == V_DUPLICATE) {
+ if ((ret = __db_vrfy_duptype(dbp,
+ vdp, child->pgno, flags)) != 0) {
+ isbad = 1;
+ continue;
+ }
+ if ((ret = __bam_vrfy_subtree(dbp, vdp,
+ child->pgno, NULL, NULL,
+ flags | ST_RECNUM | ST_DUPSET | ST_TOPLEVEL,
+ NULL, NULL, NULL)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+ }
+ if ((ret = __db_vrfy_ccclose(cc)) != 0)
+ goto err;
+ cc = NULL;
+
+ /* If it's safe to check that things hash properly, do so. */
+ if (isbad == 0 && !LF_ISSET(DB_NOORDERCHK) &&
+ (ret = __ham_vrfy_hashing(dbp, pip->entries,
+ m, bucket, pgno, flags, hfunc)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ next_pgno = pip->next_pgno;
+ ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip);
+
+ pip = NULL;
+ if (ret != 0)
+ goto err;
+
+ if (next_pgno == PGNO_INVALID)
+ break; /* End of the bucket. */
+
+ /* We already checked this, but just in case... */
+ if (!IS_VALID_PGNO(next_pgno)) {
+ DB_ASSERT(0);
+ EPRINT((dbp->dbenv,
+ "Page %lu: hash page has bad next_pgno",
+ (u_long)pgno));
+ isbad = 1;
+ goto err;
+ }
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, next_pgno, &pip)) != 0)
+ goto err;
+
+ if (pip->prev_pgno != pgno) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: hash page has bad prev_pgno",
+ (u_long)next_pgno));
+ isbad = 1;
+ }
+ pgno = next_pgno;
+ }
+
+err: if (cc != NULL && ((t_ret = __db_vrfy_ccclose(cc)) != 0) && ret == 0)
+ ret = t_ret;
+ if (mip != NULL && ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, mip)) != 0) && ret == 0)
+ ret = t_ret;
+ if (pip != NULL && ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0) && ret == 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __ham_vrfy_hashing --
+ * Verify that all items on a given hash page hash correctly.
+ *
+ * PUBLIC: int __ham_vrfy_hashing __P((DB *,
+ * PUBLIC: u_int32_t, HMETA *, u_int32_t, db_pgno_t, u_int32_t,
+ * PUBLIC: u_int32_t (*) __P((DB *, const void *, u_int32_t))));
+ */
+int
+__ham_vrfy_hashing(dbp, nentries, m, thisbucket, pgno, flags, hfunc)
+ DB *dbp;
+ u_int32_t nentries;
+ HMETA *m;
+ u_int32_t thisbucket;
+ db_pgno_t pgno;
+ u_int32_t flags;
+ u_int32_t (*hfunc) __P((DB *, const void *, u_int32_t));
+{
+ DBT dbt;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_indx_t i;
+ int ret, t_ret, isbad;
+ u_int32_t hval, bucket;
+
+ mpf = dbp->mpf;
+ ret = isbad = 0;
+
+ memset(&dbt, 0, sizeof(DBT));
+ F_SET(&dbt, DB_DBT_REALLOC);
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ return (ret);
+
+ for (i = 0; i < nentries; i += 2) {
+ /*
+ * We've already verified the page integrity and that of any
+ * overflow chains linked off it; it is therefore safe to use
+ * __db_ret. It's also not all that much slower, since we have
+ * to copy every hash item to deal with alignment anyway; we
+ * can tweak this a bit if this proves to be a bottleneck,
+ * but for now, take the easy route.
+ */
+ if ((ret = __db_ret(dbp, h, i, &dbt, NULL, NULL)) != 0)
+ goto err;
+ hval = hfunc(dbp, dbt.data, dbt.size);
+
+ bucket = hval & m->high_mask;
+ if (bucket > m->max_bucket)
+ bucket = bucket & m->low_mask;
+
+ if (bucket != thisbucket) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: item %lu hashes incorrectly",
+ (u_long)pgno, (u_long)i));
+ isbad = 1;
+ }
+ }
+
+err: if (dbt.data != NULL)
+ __os_ufree(dbp->dbenv, dbt.data);
+ if ((t_ret = mpf->put(mpf, h, 0)) != 0)
+ return (t_ret);
+
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __ham_salvage --
+ * Safely dump out anything that looks like a key on an alleged
+ * hash page.
+ *
+ * PUBLIC: int __ham_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t, PAGE *,
+ * PUBLIC: void *, int (*)(void *, const void *), u_int32_t));
+ */
+int
+__ham_salvage(dbp, vdp, pgno, h, handle, callback, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ PAGE *h;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ DBT dbt, unkdbt;
+ db_pgno_t dpgno;
+ int ret, err_ret, t_ret;
+ u_int32_t himark, tlen;
+ u_int8_t *hk;
+ void *buf;
+ u_int32_t dlen, len, i;
+
+ memset(&dbt, 0, sizeof(DBT));
+ dbt.flags = DB_DBT_REALLOC;
+
+ memset(&unkdbt, 0, sizeof(DBT));
+ unkdbt.size = (u_int32_t)strlen("UNKNOWN") + 1;
+ unkdbt.data = "UNKNOWN";
+
+ err_ret = 0;
+
+ /*
+ * Allocate a buffer for overflow items. Start at one page;
+ * __db_safe_goff will realloc as needed.
+ */
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &buf)) != 0)
+ return (ret);
+
+ himark = dbp->pgsize;
+ for (i = 0;; i++) {
+ /* If we're not aggressive, break when we hit NUM_ENT(h). */
+ if (!LF_ISSET(DB_AGGRESSIVE) && i >= NUM_ENT(h))
+ break;
+
+ /* Verify the current item. */
+ ret = __db_vrfy_inpitem(dbp,
+ h, pgno, i, 0, flags, &himark, NULL);
+ /* If this returned a fatality, it's time to break. */
+ if (ret == DB_VERIFY_FATAL)
+ break;
+
+ if (ret == 0) {
+ hk = P_ENTRY(dbp, h, i);
+ len = LEN_HKEYDATA(dbp, h, dbp->pgsize, i);
+ if ((u_int32_t)(hk + len - (u_int8_t *)h) >
+ dbp->pgsize) {
+ /*
+ * Item is unsafely large; either continue
+ * or set it to the whole page, depending on
+ * aggressiveness.
+ */
+ if (!LF_ISSET(DB_AGGRESSIVE))
+ continue;
+ len = dbp->pgsize -
+ (u_int32_t)(hk - (u_int8_t *)h);
+ err_ret = DB_VERIFY_BAD;
+ }
+ switch (HPAGE_PTYPE(hk)) {
+ default:
+ if (!LF_ISSET(DB_AGGRESSIVE))
+ break;
+ err_ret = DB_VERIFY_BAD;
+ /* FALLTHROUGH */
+ case H_KEYDATA:
+keydata: memcpy(buf, HKEYDATA_DATA(hk), len);
+ dbt.size = len;
+ dbt.data = buf;
+ if ((ret = __db_prdbt(&dbt,
+ 0, " ", handle, callback, 0, vdp)) != 0)
+ err_ret = ret;
+ break;
+ case H_OFFPAGE:
+ if (len < HOFFPAGE_SIZE) {
+ err_ret = DB_VERIFY_BAD;
+ continue;
+ }
+ memcpy(&dpgno,
+ HOFFPAGE_PGNO(hk), sizeof(dpgno));
+ if ((ret = __db_safe_goff(dbp, vdp,
+ dpgno, &dbt, &buf, flags)) != 0) {
+ err_ret = ret;
+ (void)__db_prdbt(&unkdbt, 0, " ",
+ handle, callback, 0, vdp);
+ break;
+ }
+ if ((ret = __db_prdbt(&dbt,
+ 0, " ", handle, callback, 0, vdp)) != 0)
+ err_ret = ret;
+ break;
+ case H_OFFDUP:
+ if (len < HOFFPAGE_SIZE) {
+ err_ret = DB_VERIFY_BAD;
+ continue;
+ }
+ memcpy(&dpgno,
+ HOFFPAGE_PGNO(hk), sizeof(dpgno));
+ /* UNKNOWN iff pgno is bad or we're a key. */
+ if (!IS_VALID_PGNO(dpgno) || (i % 2 == 0)) {
+ if ((ret = __db_prdbt(&unkdbt, 0, " ",
+ handle, callback, 0, vdp)) != 0)
+ err_ret = ret;
+ } else if ((ret = __db_salvage_duptree(dbp,
+ vdp, dpgno, &dbt, handle, callback,
+ flags | SA_SKIPFIRSTKEY)) != 0)
+ err_ret = ret;
+ break;
+ case H_DUPLICATE:
+ /*
+ * We're a key; printing dups will seriously
+ * foul the output. If we're being aggressive,
+ * pretend this is a key and let the app.
+ * programmer sort out the mess.
+ */
+ if (i % 2 == 0) {
+ err_ret = ret;
+ if (LF_ISSET(DB_AGGRESSIVE))
+ goto keydata;
+ break;
+ }
+
+ /* Too small to have any data. */
+ if (len <
+ HKEYDATA_SIZE(2 * sizeof(db_indx_t))) {
+ err_ret = DB_VERIFY_BAD;
+ continue;
+ }
+
+ /* Loop until we hit the total length. */
+ for (tlen = 0; tlen + sizeof(db_indx_t) < len;
+ tlen += dlen) {
+ tlen += sizeof(db_indx_t);
+ memcpy(&dlen, hk, sizeof(db_indx_t));
+ /*
+ * If dlen is too long, print all the
+ * rest of the dup set in a chunk.
+ */
+ if (dlen + tlen > len)
+ dlen = len - tlen;
+ memcpy(buf, hk + tlen, dlen);
+ dbt.size = dlen;
+ dbt.data = buf;
+ if ((ret = __db_prdbt(&dbt, 0, " ",
+ handle, callback, 0, vdp)) != 0)
+ err_ret = ret;
+ tlen += sizeof(db_indx_t);
+ }
+ break;
+ }
+ }
+ }
+
+ __os_free(dbp->dbenv, buf);
+ if ((t_ret = __db_salvage_markdone(vdp, pgno)) != 0)
+ return (t_ret);
+ return ((ret == 0 && err_ret != 0) ? err_ret : ret);
+}
+
+/*
+ * __ham_meta2pgset --
+ * Return the set of hash pages corresponding to the given
+ * known-good meta page.
+ *
+ * PUBLIC: int __ham_meta2pgset __P((DB *, VRFY_DBINFO *, HMETA *, u_int32_t,
+ * PUBLIC: DB *));
+ */
+int __ham_meta2pgset(dbp, vdp, hmeta, flags, pgset)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ HMETA *hmeta;
+ u_int32_t flags;
+ DB *pgset;
+{
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t bucket, totpgs;
+ int ret, val;
+
+ /*
+ * We don't really need flags, but leave them for consistency with
+ * __bam_meta2pgset.
+ */
+ COMPQUIET(flags, 0);
+
+ DB_ASSERT(pgset != NULL);
+
+ mpf = dbp->mpf;
+ totpgs = 0;
+
+ /*
+ * Loop through all the buckets, pushing onto pgset the corresponding
+ * page(s) for each one.
+ */
+ for (bucket = 0; bucket <= hmeta->max_bucket; bucket++) {
+ pgno = BS_TO_PAGE(bucket, hmeta->spares);
+
+ /*
+ * We know the initial pgno is safe because the spares array has
+ * been verified.
+ *
+ * Safely walk the list of pages in this bucket.
+ */
+ for (;;) {
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ return (ret);
+ if (TYPE(h) == P_HASH) {
+
+ /*
+ * Make sure we don't go past the end of
+ * pgset.
+ */
+ if (++totpgs > vdp->last_pgno) {
+ (void)mpf->put(mpf, h, 0);
+ return (DB_VERIFY_BAD);
+ }
+ if ((ret =
+ __db_vrfy_pgset_inc(pgset, pgno)) != 0) {
+ (void)mpf->put(mpf, h, 0);
+ return (ret);
+ }
+
+ pgno = NEXT_PGNO(h);
+ } else
+ pgno = PGNO_INVALID;
+
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ return (ret);
+
+ /* If the new pgno is wonky, go onto the next bucket. */
+ if (!IS_VALID_PGNO(pgno) ||
+ pgno == PGNO_INVALID)
+ break;
+
+ /*
+ * If we've touched this page before, we have a cycle;
+ * go on to the next bucket.
+ */
+ if ((ret = __db_vrfy_pgset_get(pgset, pgno, &val)) != 0)
+ return (ret);
+ if (val != 0)
+ break;
+ }
+ }
+ return (0);
+}
+
+/*
+ * __ham_dups_unsorted --
+ * Takes a known-safe hash duplicate set and its total length.
+ * Returns 1 if there are out-of-order duplicates in this set,
+ * 0 if there are not.
+ */
+static int
+__ham_dups_unsorted(dbp, buf, len)
+ DB *dbp;
+ u_int8_t *buf;
+ u_int32_t len;
+{
+ DBT a, b;
+ db_indx_t offset, dlen;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+
+ memset(&a, 0, sizeof(DBT));
+ memset(&b, 0, sizeof(DBT));
+
+ func = (dbp->dup_compare == NULL) ? __bam_defcmp : dbp->dup_compare;
+
+ /*
+ * Loop through the dup set until we hit the end or we find
+ * a pair of dups that's out of order. b is always the current
+ * dup, a the one before it.
+ */
+ for (offset = 0; offset < len; offset += DUP_SIZE(dlen)) {
+ memcpy(&dlen, buf + offset, sizeof(db_indx_t));
+ b.data = buf + offset + sizeof(db_indx_t);
+ b.size = dlen;
+
+ if (a.data != NULL && func(dbp, &a, &b) > 0)
+ return (1);
+
+ a.data = b.data;
+ a.size = b.size;
+ }
+
+ return (0);
+}
diff --git a/libdb/hmac/hmac.c b/libdb/hmac/hmac.c
new file mode 100644
index 0000000..029b764
--- /dev/null
+++ b/libdb/hmac/hmac.c
@@ -0,0 +1,207 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * Some parts of this code originally written by Adam Stubblefield,
+ * astubble@rice.edu.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h" /* for hash.h only */
+#include "dbinc/hash.h"
+#include "dbinc/hmac.h"
+
+#define HMAC_OUTPUT_SIZE 20
+#define HMAC_BLOCK_SIZE 64
+
+static void __db_hmac __P((u_int8_t *, u_int8_t *, size_t, u_int8_t *));
+
+/*
+ * !!!
+ * All of these functions use a ctx structure on the stack. The __db_SHA1Init
+ * call does not initialize the 64-byte buffer portion of it. The
+ * underlying SHA1 functions will properly pad the buffer if the data length
+ * is less than 64-bytes, so there isn't a chance of reading uninitialized
+ * memory. Although it would be cleaner to do a memset(ctx.buffer, 0, 64)
+ * we do not want to incur that penalty if we don't have to for performance.
+ */
+
+/*
+ * __db_hmac --
+ * Do a hashed MAC.
+ */
+static void
+__db_hmac(k, data, data_len, mac)
+ u_int8_t *k, *data, *mac;
+ size_t data_len;
+{
+ SHA1_CTX ctx;
+ u_int8_t key[HMAC_BLOCK_SIZE];
+ u_int8_t ipad[HMAC_BLOCK_SIZE];
+ u_int8_t opad[HMAC_BLOCK_SIZE];
+ u_int8_t tmp[HMAC_OUTPUT_SIZE];
+ int i;
+
+ memset(key, 0x00, HMAC_BLOCK_SIZE);
+ memset(ipad, 0x36, HMAC_BLOCK_SIZE);
+ memset(opad, 0x5C, HMAC_BLOCK_SIZE);
+
+ memcpy(key, k, HMAC_OUTPUT_SIZE);
+
+ for (i = 0; i < HMAC_BLOCK_SIZE; i++) {
+ ipad[i] ^= key[i];
+ opad[i] ^= key[i];
+ }
+
+ __db_SHA1Init(&ctx);
+ __db_SHA1Update(&ctx, ipad, HMAC_BLOCK_SIZE);
+ __db_SHA1Update(&ctx, data, data_len);
+ __db_SHA1Final(tmp, &ctx);
+ __db_SHA1Init(&ctx);
+ __db_SHA1Update(&ctx, opad, HMAC_BLOCK_SIZE);
+ __db_SHA1Update(&ctx, tmp, HMAC_OUTPUT_SIZE);
+ __db_SHA1Final(mac, &ctx);
+ return;
+}
+
+/*
+ * __db_chksum --
+ * Create a MAC/SHA1 checksum.
+ *
+ * PUBLIC: void __db_chksum __P((u_int8_t *, size_t, u_int8_t *, u_int8_t *));
+ */
+void
+__db_chksum(data, data_len, mac_key, store)
+ u_int8_t *data;
+ size_t data_len;
+ u_int8_t *mac_key;
+ u_int8_t *store;
+{
+ int sumlen;
+ u_int32_t hash4;
+ u_int8_t tmp[DB_MAC_KEY];
+
+ /*
+ * Since the checksum might be on a page of data we are checksumming
+ * we might be overwriting after checksumming, we zero-out the
+ * checksum value so that we can have a known value there when
+ * we verify the checksum.
+ */
+ if (mac_key == NULL)
+ sumlen = sizeof(u_int32_t);
+ else
+ sumlen = DB_MAC_KEY;
+ memset(store, 0, sumlen);
+ if (mac_key == NULL) {
+ /* Just a hash, no MAC */
+ hash4 = __ham_func4(NULL, data, (u_int32_t)data_len);
+ memcpy(store, &hash4, sumlen);
+ } else {
+ memset(tmp, 0, DB_MAC_KEY);
+ __db_hmac(mac_key, data, data_len, tmp);
+ memcpy(store, tmp, sumlen);
+ }
+ return;
+}
+/*
+ * __db_derive_mac --
+ * Create a MAC/SHA1 key.
+ *
+ * PUBLIC: void __db_derive_mac __P((u_int8_t *, size_t, u_int8_t *));
+ */
+void
+__db_derive_mac(passwd, plen, mac_key)
+ u_int8_t *passwd;
+ size_t plen;
+ u_int8_t *mac_key;
+{
+ SHA1_CTX ctx;
+
+ /* Compute the MAC key. mac_key must be 20 bytes. */
+ __db_SHA1Init(&ctx);
+ __db_SHA1Update(&ctx, passwd, plen);
+ __db_SHA1Update(&ctx, (u_int8_t *)DB_MAC_MAGIC, strlen(DB_MAC_MAGIC));
+ __db_SHA1Update(&ctx, passwd, plen);
+ __db_SHA1Final(mac_key, &ctx);
+
+ return;
+}
+
+/*
+ * __db_check_chksum --
+ * Verify a checksum.
+ *
+ * Return 0 on success, >0 (errno) on error, -1 on checksum mismatch.
+ *
+ * PUBLIC: int __db_check_chksum __P((DB_ENV *,
+ * PUBLIC: DB_CIPHER *, u_int8_t *, void *, size_t, int));
+ */
+int
+__db_check_chksum(dbenv, db_cipher, chksum, data, data_len, is_hmac)
+ DB_ENV *dbenv;
+ DB_CIPHER *db_cipher;
+ u_int8_t *chksum;
+ void *data;
+ size_t data_len;
+ int is_hmac;
+{
+ int ret;
+ size_t sum_len;
+ u_int32_t hash4;
+ u_int8_t *mac_key, old[DB_MAC_KEY], new[DB_MAC_KEY];
+
+ /*
+ * If we are just doing checksumming and not encryption, then checksum
+ * is 4 bytes. Otherwise, it is DB_MAC_KEY size. Check for illegal
+ * combinations of crypto/non-crypto checksums.
+ */
+ if (is_hmac == 0) {
+ if (db_cipher != NULL) {
+ __db_err(dbenv,
+ "Unencrypted checksum with a supplied encryption key");
+ return (EINVAL);
+ }
+ sum_len = sizeof(u_int32_t);
+ mac_key = NULL;
+ } else {
+ if (db_cipher == NULL) {
+ __db_err(dbenv,
+ "Encrypted checksum: no encryption key specified");
+ return (EINVAL);
+ }
+ sum_len = DB_MAC_KEY;
+ mac_key = db_cipher->mac_key;
+ }
+
+ /*
+ * !!!
+ * Since the checksum might be on the page, we need to have known data
+ * there so that we can generate the same original checksum. We zero
+ * it out, just like we do in __db_chksum above.
+ */
+ memcpy(old, chksum, sum_len);
+ memset(chksum, 0, sum_len);
+ if (mac_key == NULL) {
+ /* Just a hash, no MAC */
+ hash4 = __ham_func4(NULL, data, (u_int32_t)data_len);
+ ret = memcmp((u_int32_t *)old, &hash4, sum_len) ? -1 : 0;
+ } else {
+ __db_hmac(mac_key, data, data_len, new);
+ ret = memcmp(old, new, sum_len) ? -1 : 0;
+ }
+
+ return (ret);
+}
diff --git a/libdb/hmac/sha1.c b/libdb/hmac/sha1.c
new file mode 100644
index 0000000..a17a613
--- /dev/null
+++ b/libdb/hmac/sha1.c
@@ -0,0 +1,294 @@
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+/*
+SHA-1 in C
+By Steve Reid <sreid@sea-to-sky.net>
+100% Public Domain
+
+-----------------
+Modified 7/98
+By James H. Brown <jbrown@burgoyne.com>
+Still 100% Public Domain
+
+Corrected a problem which generated improper hash values on 16 bit machines
+Routine SHA1Update changed from
+ void SHA1Update(SHA1_CTX* context, unsigned char* data, unsigned int
+len)
+to
+ void SHA1Update(SHA1_CTX* context, unsigned char* data, unsigned
+long len)
+
+The 'len' parameter was declared an int which works fine on 32 bit machines.
+However, on 16 bit machines an int is too small for the shifts being done
+against
+it. This caused the hash function to generate incorrect values if len was
+greater than 8191 (8K - 1) due to the 'len << 3' on line 3 of SHA1Update().
+
+Since the file IO in main() reads 16K at a time, any file 8K or larger would
+be guaranteed to generate the wrong hash (e.g. Test Vector #3, a million
+"a"s).
+
+I also changed the declaration of variables i & j in SHA1Update to
+unsigned long from unsigned int for the same reason.
+
+These changes should make no difference to any 32 bit implementations since
+an
+int and a long are the same size in those environments.
+
+--
+I also corrected a few compiler warnings generated by Borland C.
+1. Added #include <process.h> for exit() prototype
+2. Removed unused variable 'j' in SHA1Final
+3. Changed exit(0) to return(0) at end of main.
+
+ALL changes I made can be located by searching for comments containing 'JHB'
+-----------------
+Modified 8/98
+By Steve Reid <sreid@sea-to-sky.net>
+Still 100% public domain
+
+1- Removed #include <process.h> and used return() instead of exit()
+2- Fixed overwriting of finalcount in SHA1Final() (discovered by Chris Hall)
+3- Changed email address from steve@edmweb.com to sreid@sea-to-sky.net
+
+-----------------
+Modified 4/01
+By Saul Kravitz <Saul.Kravitz@celera.com>
+Still 100% PD
+Modified to run on Compaq Alpha hardware.
+
+
+*/
+
+/*
+Test Vectors (from FIPS PUB 180-1)
+"abc"
+ A9993E36 4706816A BA3E2571 7850C26C 9CD0D89D
+"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"
+ 84983E44 1C3BD26E BAAE4AA1 F95129E5 E54670F1
+A million repetitions of "a"
+ 34AA973C D4C4DAA4 F61EEB2B DBAD2731 6534016F
+*/
+
+#define SHA1HANDSOFF
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/hmac.h"
+
+/* #include <process.h> */ /* prototype for exit() - JHB */
+/* Using return() instead of exit() - SWR */
+
+#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits))))
+
+/* blk0() and blk() perform the initial expand. */
+/* I got the idea of expanding during the round function from SSLeay */
+#define blk0(i) is_bigendian ? block->l[i] : \
+ (block->l[i] = (rol(block->l[i],24)&0xFF00FF00) \
+ |(rol(block->l[i],8)&0x00FF00FF))
+#define blk(i) (block->l[i&15] = rol(block->l[(i+13)&15]^block->l[(i+8)&15] \
+ ^block->l[(i+2)&15]^block->l[i&15],1))
+
+/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */
+#define R0(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk0(i)+0x5A827999+rol(v,5);w=rol(w,30);
+#define R1(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk(i)+0x5A827999+rol(v,5);w=rol(w,30);
+#define R2(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0x6ED9EBA1+rol(v,5);w=rol(w,30);
+#define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i)+0x8F1BBCDC+rol(v,5);w=rol(w,30);
+#define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30);
+
+
+#ifdef VERBOSE /* SAK */
+static void __db_SHAPrintContext __P((SHA1_CTX *, char *));
+static void
+__db_SHAPrintContext(context, msg)
+ SHA1_CTX *context;
+ char *msg;
+{
+ printf("%s (%d,%d) %x %x %x %x %x\n",
+ msg,
+ context->count[0], context->count[1],
+ context->state[0],
+ context->state[1],
+ context->state[2],
+ context->state[3],
+ context->state[4]);
+}
+#endif
+
+/* Hash a single 512-bit block. This is the core of the algorithm. */
+
+/*
+ * __db_SHA1Transform --
+ *
+ * PUBLIC: void __db_SHA1Transform __P((u_int32_t *, unsigned char *));
+ */
+void
+__db_SHA1Transform(state, buffer)
+ u_int32_t *state;
+ unsigned char *buffer;
+{
+u_int32_t a, b, c, d, e;
+typedef union {
+ unsigned char c[64];
+ u_int32_t l[16];
+} CHAR64LONG16;
+CHAR64LONG16* block;
+static int is_bigendian = -1;
+#ifdef SHA1HANDSOFF
+ unsigned char workspace[64];
+
+ block = (CHAR64LONG16*)workspace;
+ memcpy(block, buffer, 64);
+#else
+ block = (CHAR64LONG16*)buffer;
+#endif
+ if (is_bigendian == -1)
+ is_bigendian = __db_isbigendian();
+ /* Copy context->state[] to working vars */
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+ /* 4 rounds of 20 operations each. Loop unrolled. */
+ R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3);
+ R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7);
+ R0(c,d,e,a,b, 8); R0(b,c,d,e,a, 9); R0(a,b,c,d,e,10); R0(e,a,b,c,d,11);
+ R0(d,e,a,b,c,12); R0(c,d,e,a,b,13); R0(b,c,d,e,a,14); R0(a,b,c,d,e,15);
+ R1(e,a,b,c,d,16); R1(d,e,a,b,c,17); R1(c,d,e,a,b,18); R1(b,c,d,e,a,19);
+ R2(a,b,c,d,e,20); R2(e,a,b,c,d,21); R2(d,e,a,b,c,22); R2(c,d,e,a,b,23);
+ R2(b,c,d,e,a,24); R2(a,b,c,d,e,25); R2(e,a,b,c,d,26); R2(d,e,a,b,c,27);
+ R2(c,d,e,a,b,28); R2(b,c,d,e,a,29); R2(a,b,c,d,e,30); R2(e,a,b,c,d,31);
+ R2(d,e,a,b,c,32); R2(c,d,e,a,b,33); R2(b,c,d,e,a,34); R2(a,b,c,d,e,35);
+ R2(e,a,b,c,d,36); R2(d,e,a,b,c,37); R2(c,d,e,a,b,38); R2(b,c,d,e,a,39);
+ R3(a,b,c,d,e,40); R3(e,a,b,c,d,41); R3(d,e,a,b,c,42); R3(c,d,e,a,b,43);
+ R3(b,c,d,e,a,44); R3(a,b,c,d,e,45); R3(e,a,b,c,d,46); R3(d,e,a,b,c,47);
+ R3(c,d,e,a,b,48); R3(b,c,d,e,a,49); R3(a,b,c,d,e,50); R3(e,a,b,c,d,51);
+ R3(d,e,a,b,c,52); R3(c,d,e,a,b,53); R3(b,c,d,e,a,54); R3(a,b,c,d,e,55);
+ R3(e,a,b,c,d,56); R3(d,e,a,b,c,57); R3(c,d,e,a,b,58); R3(b,c,d,e,a,59);
+ R4(a,b,c,d,e,60); R4(e,a,b,c,d,61); R4(d,e,a,b,c,62); R4(c,d,e,a,b,63);
+ R4(b,c,d,e,a,64); R4(a,b,c,d,e,65); R4(e,a,b,c,d,66); R4(d,e,a,b,c,67);
+ R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71);
+ R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75);
+ R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79);
+ /* Add the working vars back into context.state[] */
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+ state[4] += e;
+ /* Wipe variables */
+ a = b = c = d = e = 0;
+}
+
+
+/* SHA1Init - Initialize new context */
+
+/*
+ * __db_SHA1Init --
+ * Initialize new context
+ *
+ * PUBLIC: void __db_SHA1Init __P((SHA1_CTX *));
+ */
+void
+__db_SHA1Init(context)
+ SHA1_CTX *context;
+{
+ /* SHA1 initialization constants */
+ context->state[0] = 0x67452301;
+ context->state[1] = 0xEFCDAB89;
+ context->state[2] = 0x98BADCFE;
+ context->state[3] = 0x10325476;
+ context->state[4] = 0xC3D2E1F0;
+ context->count[0] = context->count[1] = 0;
+}
+
+
+/* Run your data through this. */
+
+/*
+ * __db_SHA1Update --
+ * Run your data through this.
+ *
+ * PUBLIC: void __db_SHA1Update __P((SHA1_CTX *, unsigned char *,
+ * PUBLIC: size_t));
+ */
+void
+__db_SHA1Update(context, data, len)
+ SHA1_CTX *context;
+ unsigned char *data;
+ size_t len;
+{
+u_int32_t i, j; /* JHB */
+
+#ifdef VERBOSE
+ __db_SHAPrintContext(context, "before");
+#endif
+ j = (context->count[0] >> 3) & 63;
+ if ((context->count[0] += (u_int32_t)len << 3) < (len << 3)) context->count[1]++;
+ context->count[1] += (u_int32_t)(len >> 29);
+ if ((j + len) > 63) {
+ memcpy(&context->buffer[j], data, (i = 64-j));
+ __db_SHA1Transform(context->state, context->buffer);
+ for ( ; i + 63 < len; i += 64) {
+ __db_SHA1Transform(context->state, &data[i]);
+ }
+ j = 0;
+ }
+ else i = 0;
+ memcpy(&context->buffer[j], &data[i], len - i);
+#ifdef VERBOSE
+ __db_SHAPrintContext(context, "after ");
+#endif
+}
+
+
+/* Add padding and return the message digest. */
+
+/*
+ * __db_SHA1Final --
+ * Add padding and return the message digest.
+ *
+ * PUBLIC: void __db_SHA1Final __P((unsigned char *, SHA1_CTX *));
+ */
+void
+__db_SHA1Final(digest, context)
+ unsigned char *digest;
+ SHA1_CTX *context;
+{
+u_int32_t i; /* JHB */
+unsigned char finalcount[8];
+
+ for (i = 0; i < 8; i++) {
+ finalcount[i] = (unsigned char)((context->count[(i >= 4 ? 0 : 1)]
+ >> ((3-(i & 3)) * 8) ) & 255); /* Endian independent */
+ }
+ __db_SHA1Update(context, (unsigned char *)"\200", 1);
+ while ((context->count[0] & 504) != 448) {
+ __db_SHA1Update(context, (unsigned char *)"\0", 1);
+ }
+ __db_SHA1Update(context, finalcount, 8); /* Should cause a SHA1Transform()
+*/
+ for (i = 0; i < 20; i++) {
+ digest[i] = (unsigned char)
+ ((context->state[i>>2] >> ((3-(i & 3)) * 8) ) & 255);
+ }
+ /* Wipe variables */
+ i = 0; /* JHB */
+ memset(context->buffer, 0, 64);
+ memset(context->state, 0, 20);
+ memset(context->count, 0, 8);
+ memset(finalcount, 0, 8); /* SWR */
+#ifdef SHA1HANDSOFF /* make SHA1Transform overwrite it's own static vars */
+ __db_SHA1Transform(context->state, context->buffer);
+#endif
+}
+
+/*************************************************************/
+
diff --git a/libdb/hsearch/hsearch.c b/libdb/hsearch/hsearch.c
new file mode 100644
index 0000000..5b6b42d
--- /dev/null
+++ b/libdb/hsearch/hsearch.c
@@ -0,0 +1,160 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#define DB_DBM_HSEARCH 1
+#include "db_int.h"
+
+static DB *dbp;
+static ENTRY retval;
+
+/*
+ * Translate HSEARCH calls into DB calls so that DB doesn't step on the
+ * application's name space.
+ *
+ * EXTERN: #if DB_DBM_HSEARCH != 0
+ *
+ * EXTERN: int __db_hcreate __P((size_t));
+ * EXTERN: ENTRY *__db_hsearch __P((ENTRY, ACTION));
+ * EXTERN: void __db_hdestroy __P((void));
+ *
+ * EXTERN: #endif
+ */
+int
+__db_hcreate(nel)
+ size_t nel;
+{
+ int ret;
+
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ __os_set_errno(ret);
+ return (1);
+ }
+
+ if ((ret = dbp->set_pagesize(dbp, 512)) != 0 ||
+ (ret = dbp->set_h_ffactor(dbp, 16)) != 0 ||
+ (ret = dbp->set_h_nelem(dbp, (u_int32_t)nel)) != 0 ||
+ (ret = dbp->open(dbp,
+ NULL, NULL, NULL, DB_HASH, DB_CREATE, __db_omode("rw----"))) != 0)
+ __os_set_errno(ret);
+
+ /*
+ * !!!
+ * Hsearch returns 0 on error, not 1.
+ */
+ return (ret == 0 ? 1 : 0);
+}
+
+ENTRY *
+__db_hsearch(item, action)
+ ENTRY item;
+ ACTION action;
+{
+ DBT key, val;
+ int ret;
+
+ if (dbp == NULL) {
+ __os_set_errno(EINVAL);
+ return (NULL);
+ }
+ memset(&key, 0, sizeof(key));
+ memset(&val, 0, sizeof(val));
+ key.data = item.key;
+ key.size = (u_int32_t)strlen(item.key) + 1;
+
+ switch (action) {
+ case ENTER:
+ val.data = item.data;
+ val.size = (u_int32_t)strlen(item.data) + 1;
+
+ /*
+ * Try and add the key to the database. If we fail because
+ * the key already exists, return the existing key.
+ */
+ if ((ret =
+ dbp->put(dbp, NULL, &key, &val, DB_NOOVERWRITE)) == 0)
+ break;
+ if (ret == DB_KEYEXIST &&
+ (ret = dbp->get(dbp, NULL, &key, &val, 0)) == 0)
+ break;
+ /*
+ * The only possible DB error is DB_NOTFOUND, and it can't
+ * happen. Check for a DB error, and lie if we find one.
+ */
+ __os_set_errno(ret > 0 ? ret : EINVAL);
+ return (NULL);
+ case FIND:
+ if ((ret = dbp->get(dbp, NULL, &key, &val, 0)) != 0) {
+ if (ret != DB_NOTFOUND)
+ __os_set_errno(ret);
+ return (NULL);
+ }
+ item.data = (char *)val.data;
+ break;
+ default:
+ __os_set_errno(EINVAL);
+ return (NULL);
+ }
+ retval.key = item.key;
+ retval.data = item.data;
+ return (&retval);
+}
+
+void
+__db_hdestroy()
+{
+ if (dbp != NULL) {
+ (void)dbp->close(dbp, 0);
+ dbp = NULL;
+ }
+}
diff --git a/libdb/java/src/com/sleepycat/db/Db.java b/libdb/java/src/com/sleepycat/db/Db.java
new file mode 100644
index 0000000..26461ec
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/Db.java
@@ -0,0 +1,761 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+import java.io.OutputStream;
+import java.io.FileNotFoundException;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class Db
+{
+ // BEGIN-JAVA-SPECIAL-CONSTANTS
+ /* DO NOT EDIT: automatically built by dist/s_java. */
+ public static final int DB_BTREE = 1;
+ public static final int DB_DONOTINDEX = -30999;
+ public static final int DB_HASH = 2;
+ public static final int DB_KEYEMPTY = -30998;
+ public static final int DB_KEYEXIST = -30997;
+ public static final int DB_LOCK_DEADLOCK = -30996;
+ public static final int DB_LOCK_NOTGRANTED = -30995;
+ public static final int DB_NOSERVER = -30994;
+ public static final int DB_NOSERVER_HOME = -30993;
+ public static final int DB_NOSERVER_ID = -30992;
+ public static final int DB_NOTFOUND = -30991;
+ public static final int DB_OLD_VERSION = -30990;
+ public static final int DB_PAGE_NOTFOUND = -30989;
+ public static final int DB_QUEUE = 4;
+ public static final int DB_RECNO = 3;
+ public static final int DB_REP_DUPMASTER = -30988;
+ public static final int DB_REP_HOLDELECTION = -30987;
+ public static final int DB_REP_NEWMASTER = -30986;
+ public static final int DB_REP_NEWSITE = -30985;
+ public static final int DB_REP_OUTDATED = -30984;
+ public static final int DB_RUNRECOVERY = -30982;
+ public static final int DB_SECONDARY_BAD = -30981;
+ public static final int DB_TXN_ABORT = 0;
+ public static final int DB_TXN_APPLY = 1;
+ public static final int DB_TXN_BACKWARD_ROLL = 3;
+ public static final int DB_TXN_FORWARD_ROLL = 4;
+ public static final int DB_TXN_PRINT = 8;
+ public static final int DB_UNKNOWN = 5;
+ public static final int DB_VERIFY_BAD = -30980;
+ public static final int DB_AFTER;
+ public static final int DB_AGGRESSIVE;
+ public static final int DB_APPEND;
+ public static final int DB_ARCH_ABS;
+ public static final int DB_ARCH_DATA;
+ public static final int DB_ARCH_LOG;
+ public static final int DB_AUTO_COMMIT;
+ public static final int DB_BEFORE;
+ public static final int DB_CACHED_COUNTS;
+ public static final int DB_CDB_ALLDB;
+ public static final int DB_CHKSUM_SHA1;
+ public static final int DB_CLIENT;
+ public static final int DB_CONSUME;
+ public static final int DB_CONSUME_WAIT;
+ public static final int DB_CREATE;
+ public static final int DB_CURRENT;
+ public static final int DB_CXX_NO_EXCEPTIONS;
+ public static final int DB_DBT_MALLOC;
+ public static final int DB_DBT_PARTIAL;
+ public static final int DB_DBT_REALLOC;
+ public static final int DB_DBT_USERMEM;
+ public static final int DB_DIRECT;
+ public static final int DB_DIRECT_DB;
+ public static final int DB_DIRECT_LOG;
+ public static final int DB_DIRTY_READ;
+ public static final int DB_DUP;
+ public static final int DB_DUPSORT;
+ public static final int DB_EID_BROADCAST;
+ public static final int DB_EID_INVALID;
+ public static final int DB_ENCRYPT;
+ public static final int DB_ENCRYPT_AES;
+ public static final int DB_EXCL;
+ public static final int DB_FAST_STAT;
+ public static final int DB_FIRST;
+ public static final int DB_FLUSH;
+ public static final int DB_FORCE;
+ public static final int DB_GET_BOTH;
+ public static final int DB_GET_BOTH_RANGE;
+ public static final int DB_GET_RECNO;
+ public static final int DB_INIT_CDB;
+ public static final int DB_INIT_LOCK;
+ public static final int DB_INIT_LOG;
+ public static final int DB_INIT_MPOOL;
+ public static final int DB_INIT_TXN;
+ public static final int DB_JOINENV;
+ public static final int DB_JOIN_ITEM;
+ public static final int DB_JOIN_NOSORT;
+ public static final int DB_KEYFIRST;
+ public static final int DB_KEYLAST;
+ public static final int DB_LAST;
+ public static final int DB_LOCKDOWN;
+ public static final int DB_LOCK_DEFAULT;
+ public static final int DB_LOCK_EXPIRE;
+ public static final int DB_LOCK_GET;
+ public static final int DB_LOCK_GET_TIMEOUT;
+ public static final int DB_LOCK_IREAD;
+ public static final int DB_LOCK_IWR;
+ public static final int DB_LOCK_IWRITE;
+ public static final int DB_LOCK_MAXLOCKS;
+ public static final int DB_LOCK_MINLOCKS;
+ public static final int DB_LOCK_MINWRITE;
+ public static final int DB_LOCK_NOWAIT;
+ public static final int DB_LOCK_OLDEST;
+ public static final int DB_LOCK_PUT;
+ public static final int DB_LOCK_PUT_ALL;
+ public static final int DB_LOCK_PUT_OBJ;
+ public static final int DB_LOCK_RANDOM;
+ public static final int DB_LOCK_READ;
+ public static final int DB_LOCK_TIMEOUT;
+ public static final int DB_LOCK_WRITE;
+ public static final int DB_LOCK_YOUNGEST;
+ public static final int DB_MULTIPLE;
+ public static final int DB_MULTIPLE_KEY;
+ public static final int DB_NEXT;
+ public static final int DB_NEXT_DUP;
+ public static final int DB_NEXT_NODUP;
+ public static final int DB_NODUPDATA;
+ public static final int DB_NOLOCKING;
+ public static final int DB_NOMMAP;
+ public static final int DB_NOORDERCHK;
+ public static final int DB_NOOVERWRITE;
+ public static final int DB_NOPANIC;
+ public static final int DB_NOSYNC;
+ public static final int DB_ODDFILESIZE;
+ public static final int DB_ORDERCHKONLY;
+ public static final int DB_OVERWRITE;
+ public static final int DB_PANIC_ENVIRONMENT;
+ public static final int DB_POSITION;
+ public static final int DB_PREV;
+ public static final int DB_PREV_NODUP;
+ public static final int DB_PRINTABLE;
+ public static final int DB_PRIORITY_DEFAULT;
+ public static final int DB_PRIORITY_HIGH;
+ public static final int DB_PRIORITY_LOW;
+ public static final int DB_PRIORITY_VERY_HIGH;
+ public static final int DB_PRIORITY_VERY_LOW;
+ public static final int DB_PRIVATE;
+ public static final int DB_RDONLY;
+ public static final int DB_RECNUM;
+ public static final int DB_RECORDCOUNT;
+ public static final int DB_RECOVER;
+ public static final int DB_RECOVER_FATAL;
+ public static final int DB_REGION_INIT;
+ public static final int DB_RENUMBER;
+ public static final int DB_REP_CLIENT;
+ public static final int DB_REP_LOGSONLY;
+ public static final int DB_REP_MASTER;
+ public static final int DB_REP_PERMANENT;
+ public static final int DB_REP_UNAVAIL;
+ public static final int DB_REVSPLITOFF;
+ public static final int DB_RMW;
+ public static final int DB_SALVAGE;
+ public static final int DB_SET;
+ public static final int DB_SET_LOCK_TIMEOUT;
+ public static final int DB_SET_RANGE;
+ public static final int DB_SET_RECNO;
+ public static final int DB_SET_TXN_TIMEOUT;
+ public static final int DB_SNAPSHOT;
+ public static final int DB_STAT_CLEAR;
+ public static final int DB_SYSTEM_MEM;
+ public static final int DB_THREAD;
+ public static final int DB_TRUNCATE;
+ public static final int DB_TXN_NOSYNC;
+ public static final int DB_TXN_NOWAIT;
+ public static final int DB_TXN_SYNC;
+ public static final int DB_TXN_WRITE_NOSYNC;
+ public static final int DB_UPGRADE;
+ public static final int DB_USE_ENVIRON;
+ public static final int DB_USE_ENVIRON_ROOT;
+ public static final int DB_VERB_CHKPOINT;
+ public static final int DB_VERB_DEADLOCK;
+ public static final int DB_VERB_RECOVERY;
+ public static final int DB_VERB_REPLICATION;
+ public static final int DB_VERB_WAITSFOR;
+ public static final int DB_VERIFY;
+ public static final int DB_VERSION_MAJOR;
+ public static final int DB_VERSION_MINOR;
+ public static final int DB_VERSION_PATCH;
+ public static final int DB_WRITECURSOR;
+ public static final int DB_XA_CREATE;
+ public static final int DB_XIDDATASIZE;
+ public static final int DB_YIELDCPU;
+ // END-JAVA-SPECIAL-CONSTANTS
+
+ // Note: the env can be null
+ //
+ public Db(DbEnv env, int flags)
+ throws DbException
+ {
+ constructor_env_ = env;
+ _init(env, flags);
+ if (env == null) {
+ dbenv_ = new DbEnv(this);
+ }
+ else {
+ dbenv_ = env;
+ }
+ dbenv_._add_db(this);
+ }
+
+ //
+ // Our parent DbEnv is notifying us that the environment is closing.
+ //
+ /*package*/ void _notify_dbenv_close()
+ {
+ dbenv_ = null;
+ _notify_internal();
+ }
+
+ private native void _init(DbEnv env, int flags)
+ throws DbException;
+
+ private native void _notify_internal();
+
+ // methods
+ //
+
+ public synchronized void associate(DbTxn txn, Db secondary,
+ DbSecondaryKeyCreate key_creator,
+ int flags)
+ throws DbException
+ {
+ secondary.secondary_key_create_ = key_creator;
+ _associate(txn, secondary, key_creator, flags);
+ }
+
+ public native void _associate(DbTxn txn, Db secondary,
+ DbSecondaryKeyCreate key_creator, int flags)
+ throws DbException;
+
+ public synchronized int close(int flags)
+ throws DbException
+ {
+ try {
+ dbenv_._remove_db(this);
+ return _close(flags);
+ }
+ finally {
+ if (constructor_env_ == null) {
+ dbenv_._notify_db_close();
+ }
+ dbenv_ = null;
+ }
+ }
+
+ public native int _close(int flags)
+ throws DbException;
+
+ public native Dbc cursor(DbTxn txnid, int flags)
+ throws DbException;
+
+ public native int del(DbTxn txnid, Dbt key, int flags)
+ throws DbException;
+
+ public native void err(int errcode, String message);
+
+ public native void errx(String message);
+
+ public native int fd()
+ throws DbException;
+
+ // overrides Object.finalize
+ protected void finalize()
+ throws Throwable
+ {
+ if (dbenv_ == null)
+ _finalize(null, null);
+ else
+ _finalize(dbenv_.errcall_, dbenv_.errpfx_);
+ }
+
+ protected native void _finalize(DbErrcall errcall, String errpfx)
+ throws Throwable;
+
+ // returns: 0, DB_NOTFOUND, or throws error
+ public native int get(DbTxn txnid, Dbt key, Dbt data, int flags)
+ throws DbException;
+
+ public native boolean get_byteswapped();
+
+ public native /*DBTYPE*/ int get_type();
+
+ public native Dbc join(Dbc curslist[], int flags)
+ throws DbException;
+
+ public native void key_range(DbTxn txnid, Dbt key,
+ DbKeyRange range, int flags)
+ throws DbException;
+
+ public synchronized void open(DbTxn txnid, String file,
+ String database, /*DBTYPE*/ int type,
+ int flags, int mode)
+ throws DbException, FileNotFoundException
+ {
+ _open(txnid, file, database, type, flags, mode);
+ }
+
+ // (Internal)
+ public native void _open(DbTxn txnid, String file,
+ String database, /*DBTYPE*/ int type,
+ int flags, int mode)
+ throws DbException, FileNotFoundException;
+
+
+ // returns: 0, DB_NOTFOUND, or throws error
+ public native int pget(DbTxn txnid, Dbt key, Dbt pkey, Dbt data, int flags)
+ throws DbException;
+
+ // returns: 0, DB_KEYEXIST, or throws error
+ public native int put(DbTxn txnid, Dbt key, Dbt data, int flags)
+ throws DbException;
+
+ public synchronized void rename(String file, String database,
+ String newname, int flags)
+ throws DbException, FileNotFoundException
+ {
+ try {
+ _rename(file, database, newname, flags);
+ }
+ finally {
+ if (constructor_env_ == null) {
+ dbenv_._notify_db_close();
+ }
+ dbenv_ = null;
+ }
+ }
+
+ public native void _rename(String file, String database,
+ String newname, int flags)
+ throws DbException, FileNotFoundException;
+
+
+ public synchronized void remove(String file,
+ String database, int flags)
+ throws DbException, FileNotFoundException
+ {
+ try {
+ _remove(file, database, flags);
+ }
+ finally {
+ if (constructor_env_ == null) {
+ dbenv_._notify_db_close();
+ }
+ dbenv_ = null;
+ }
+ }
+
+ public native void _remove(String file, String database,
+ int flags)
+ throws DbException, FileNotFoundException;
+
+ // Comparison function.
+ public void set_append_recno(DbAppendRecno append_recno)
+ throws DbException
+ {
+ append_recno_ = append_recno;
+ append_recno_changed(append_recno);
+ }
+
+ // (Internal)
+ private native void append_recno_changed(DbAppendRecno append_recno)
+ throws DbException;
+
+ // Comparison function.
+ public void set_bt_compare(DbBtreeCompare bt_compare)
+ throws DbException
+ {
+ bt_compare_ = bt_compare;
+ bt_compare_changed(bt_compare);
+ }
+
+ // (Internal)
+ private native void bt_compare_changed(DbBtreeCompare bt_compare)
+ throws DbException;
+
+ // Maximum keys per page.
+ public native void set_bt_maxkey(int maxkey)
+ throws DbException;
+
+ // Minimum keys per page.
+ public native void set_bt_minkey(int minkey)
+ throws DbException;
+
+ // Prefix function.
+ public void set_bt_prefix(DbBtreePrefix bt_prefix)
+ throws DbException
+ {
+ bt_prefix_ = bt_prefix;
+ bt_prefix_changed(bt_prefix);
+ }
+
+ // (Internal)
+ private native void bt_prefix_changed(DbBtreePrefix bt_prefix)
+ throws DbException;
+
+ // Set cache size
+ public native void set_cachesize(int gbytes, int bytes, int ncaches)
+ throws DbException;
+
+ // Set cache priority
+ public native void set_cache_priority(/* DB_CACHE_PRIORITY */ int priority)
+ throws DbException;
+
+ // Duplication resolution
+ public void set_dup_compare(DbDupCompare dup_compare)
+ throws DbException
+ {
+ dup_compare_ = dup_compare;
+ dup_compare_changed(dup_compare);
+ }
+
+ // (Internal)
+ private native void dup_compare_changed(DbDupCompare dup_compare)
+ throws DbException;
+
+ // Encryption
+ public native void set_encrypt(String passwd, /*u_int32_t*/ int flags)
+ throws DbException;
+
+ // Error message callback.
+ public void set_errcall(DbErrcall errcall)
+ {
+ if (dbenv_ != null)
+ dbenv_.set_errcall(errcall);
+ }
+
+ // Error stream.
+ public void set_error_stream(OutputStream s)
+ {
+ DbOutputStreamErrcall errcall = new DbOutputStreamErrcall(s);
+ set_errcall(errcall);
+ }
+
+ // Error message prefix.
+ public void set_errpfx(String errpfx)
+ {
+ if (dbenv_ != null)
+ dbenv_.set_errpfx(errpfx);
+ }
+
+
+ // Feedback
+ public void set_feedback(DbFeedback feedback)
+ throws DbException
+ {
+ feedback_ = feedback;
+ feedback_changed(feedback);
+ }
+
+ // (Internal)
+ private native void feedback_changed(DbFeedback feedback)
+ throws DbException;
+
+ // Flags.
+ public native void set_flags(/*u_int32_t*/ int flags)
+ throws DbException;
+
+ // Internal - only intended for testing purposes in the Java RPC server
+ public native int get_flags_raw()
+ throws DbException;
+
+ // Fill factor.
+ public native void set_h_ffactor(/*unsigned*/ int h_ffactor)
+ throws DbException;
+
+ // Hash function.
+ public void set_h_hash(DbHash h_hash)
+ throws DbException
+ {
+ h_hash_ = h_hash;
+ hash_changed(h_hash);
+ }
+
+ // (Internal)
+ private native void hash_changed(DbHash hash)
+ throws DbException;
+
+ // Number of elements.
+ public native void set_h_nelem(/*unsigned*/ int h_nelem)
+ throws DbException;
+
+ // Byte order.
+ public native void set_lorder(int lorder)
+ throws DbException;
+
+ // Underlying page size.
+ public native void set_pagesize(/*size_t*/ long pagesize)
+ throws DbException;
+
+ // Variable-length delimiting byte.
+ public native void set_re_delim(int re_delim)
+ throws DbException;
+
+ // Length for fixed-length records.
+ public native void set_re_len(/*u_int32_t*/ int re_len)
+ throws DbException;
+
+ // Fixed-length padding byte.
+ public native void set_re_pad(int re_pad)
+ throws DbException;
+
+ // Source file name.
+ public native void set_re_source(String re_source)
+ throws DbException;
+
+ // Extent size of Queue
+ public native void set_q_extentsize(/*u_int32_t*/ int extent_size)
+ throws DbException;
+
+ // returns a DbBtreeStat or DbHashStat
+ public native Object stat(int flags)
+ throws DbException;
+
+ public native void sync(int flags)
+ throws DbException;
+
+ public native int truncate(DbTxn txnid, int flags)
+ throws DbException;
+
+ public native void upgrade(String name, int flags)
+ throws DbException;
+
+ public native void verify(String name, String subdb,
+ OutputStream outstr, int flags)
+ throws DbException;
+
+ ////////////////////////////////////////////////////////////////
+ //
+ // private data
+ //
+ private long private_dbobj_ = 0;
+ private long private_info_ = 0;
+ private DbEnv dbenv_ = null;
+ private DbEnv constructor_env_ = null;
+ private DbFeedback feedback_ = null;
+ private DbAppendRecno append_recno_ = null;
+ private DbBtreeCompare bt_compare_ = null;
+ private DbBtreePrefix bt_prefix_ = null;
+ private DbDupCompare dup_compare_ = null;
+ private DbHash h_hash_ = null;
+ private DbSecondaryKeyCreate secondary_key_create_ = null;
+
+ ////////////////////////////////////////////////////////////////
+ //
+ // static methods and data that implement
+ // loading the native library and doing any
+ // extra sanity checks on startup.
+ //
+ private static boolean already_loaded_ = false;
+
+ public static void load_db()
+ {
+ if (already_loaded_)
+ return;
+
+ // An alternate library name can be specified via a property.
+ //
+ String override;
+
+ if ((override = System.getProperty("sleepycat.db.libfile")) != null) {
+ System.load(override);
+ }
+ else if ((override = System.getProperty("sleepycat.db.libname")) != null) {
+ System.loadLibrary(override);
+ }
+ else {
+ String os = System.getProperty("os.name");
+ if (os != null && os.startsWith("Windows")) {
+ // library name is "libdb_java30.dll" (for example) on Win/*
+ System.loadLibrary("libdb_java" +
+ DbConstants.DB_VERSION_MAJOR +
+ DbConstants.DB_VERSION_MINOR);
+ }
+ else {
+ // library name is "libdb_java-3.0.so" (for example) on UNIX
+ // Note: "db_java" isn't good enough;
+ // some Unixes require us to use the explicit SONAME.
+ System.loadLibrary("db_java-" +
+ DbConstants.DB_VERSION_MAJOR + "." +
+ DbConstants.DB_VERSION_MINOR);
+ }
+ }
+
+ already_loaded_ = true;
+ }
+
+ static private native void one_time_init();
+
+ static private void check_constant(int c1, int c2)
+ {
+ if (c1 != c2) {
+ System.err.println("Db: constant mismatch");
+ Thread.dumpStack();
+ System.exit(1);
+ }
+ }
+
+ static {
+ Db.load_db();
+
+ // BEGIN-JAVA-CONSTANT-INITIALIZATION
+ /* DO NOT EDIT: automatically built by dist/s_java. */
+ DB_AFTER = DbConstants.DB_AFTER;
+ DB_AGGRESSIVE = DbConstants.DB_AGGRESSIVE;
+ DB_APPEND = DbConstants.DB_APPEND;
+ DB_ARCH_ABS = DbConstants.DB_ARCH_ABS;
+ DB_ARCH_DATA = DbConstants.DB_ARCH_DATA;
+ DB_ARCH_LOG = DbConstants.DB_ARCH_LOG;
+ DB_AUTO_COMMIT = DbConstants.DB_AUTO_COMMIT;
+ DB_BEFORE = DbConstants.DB_BEFORE;
+ DB_CACHED_COUNTS = DbConstants.DB_CACHED_COUNTS;
+ DB_CDB_ALLDB = DbConstants.DB_CDB_ALLDB;
+ DB_CHKSUM_SHA1 = DbConstants.DB_CHKSUM_SHA1;
+ DB_CLIENT = DbConstants.DB_CLIENT;
+ DB_CONSUME = DbConstants.DB_CONSUME;
+ DB_CONSUME_WAIT = DbConstants.DB_CONSUME_WAIT;
+ DB_CREATE = DbConstants.DB_CREATE;
+ DB_CURRENT = DbConstants.DB_CURRENT;
+ DB_CXX_NO_EXCEPTIONS = DbConstants.DB_CXX_NO_EXCEPTIONS;
+ DB_DBT_MALLOC = DbConstants.DB_DBT_MALLOC;
+ DB_DBT_PARTIAL = DbConstants.DB_DBT_PARTIAL;
+ DB_DBT_REALLOC = DbConstants.DB_DBT_REALLOC;
+ DB_DBT_USERMEM = DbConstants.DB_DBT_USERMEM;
+ DB_DIRECT = DbConstants.DB_DIRECT;
+ DB_DIRECT_DB = DbConstants.DB_DIRECT_DB;
+ DB_DIRECT_LOG = DbConstants.DB_DIRECT_LOG;
+ DB_DIRTY_READ = DbConstants.DB_DIRTY_READ;
+ DB_DUP = DbConstants.DB_DUP;
+ DB_DUPSORT = DbConstants.DB_DUPSORT;
+ DB_EID_BROADCAST = DbConstants.DB_EID_BROADCAST;
+ DB_EID_INVALID = DbConstants.DB_EID_INVALID;
+ DB_ENCRYPT = DbConstants.DB_ENCRYPT;
+ DB_ENCRYPT_AES = DbConstants.DB_ENCRYPT_AES;
+ DB_EXCL = DbConstants.DB_EXCL;
+ DB_FAST_STAT = DbConstants.DB_FAST_STAT;
+ DB_FIRST = DbConstants.DB_FIRST;
+ DB_FLUSH = DbConstants.DB_FLUSH;
+ DB_FORCE = DbConstants.DB_FORCE;
+ DB_GET_BOTH = DbConstants.DB_GET_BOTH;
+ DB_GET_BOTH_RANGE = DbConstants.DB_GET_BOTH_RANGE;
+ DB_GET_RECNO = DbConstants.DB_GET_RECNO;
+ DB_INIT_CDB = DbConstants.DB_INIT_CDB;
+ DB_INIT_LOCK = DbConstants.DB_INIT_LOCK;
+ DB_INIT_LOG = DbConstants.DB_INIT_LOG;
+ DB_INIT_MPOOL = DbConstants.DB_INIT_MPOOL;
+ DB_INIT_TXN = DbConstants.DB_INIT_TXN;
+ DB_JOINENV = DbConstants.DB_JOINENV;
+ DB_JOIN_ITEM = DbConstants.DB_JOIN_ITEM;
+ DB_JOIN_NOSORT = DbConstants.DB_JOIN_NOSORT;
+ DB_KEYFIRST = DbConstants.DB_KEYFIRST;
+ DB_KEYLAST = DbConstants.DB_KEYLAST;
+ DB_LAST = DbConstants.DB_LAST;
+ DB_LOCKDOWN = DbConstants.DB_LOCKDOWN;
+ DB_LOCK_DEFAULT = DbConstants.DB_LOCK_DEFAULT;
+ DB_LOCK_EXPIRE = DbConstants.DB_LOCK_EXPIRE;
+ DB_LOCK_GET = DbConstants.DB_LOCK_GET;
+ DB_LOCK_GET_TIMEOUT = DbConstants.DB_LOCK_GET_TIMEOUT;
+ DB_LOCK_IREAD = DbConstants.DB_LOCK_IREAD;
+ DB_LOCK_IWR = DbConstants.DB_LOCK_IWR;
+ DB_LOCK_IWRITE = DbConstants.DB_LOCK_IWRITE;
+ DB_LOCK_MAXLOCKS = DbConstants.DB_LOCK_MAXLOCKS;
+ DB_LOCK_MINLOCKS = DbConstants.DB_LOCK_MINLOCKS;
+ DB_LOCK_MINWRITE = DbConstants.DB_LOCK_MINWRITE;
+ DB_LOCK_NOWAIT = DbConstants.DB_LOCK_NOWAIT;
+ DB_LOCK_OLDEST = DbConstants.DB_LOCK_OLDEST;
+ DB_LOCK_PUT = DbConstants.DB_LOCK_PUT;
+ DB_LOCK_PUT_ALL = DbConstants.DB_LOCK_PUT_ALL;
+ DB_LOCK_PUT_OBJ = DbConstants.DB_LOCK_PUT_OBJ;
+ DB_LOCK_RANDOM = DbConstants.DB_LOCK_RANDOM;
+ DB_LOCK_READ = DbConstants.DB_LOCK_READ;
+ DB_LOCK_TIMEOUT = DbConstants.DB_LOCK_TIMEOUT;
+ DB_LOCK_WRITE = DbConstants.DB_LOCK_WRITE;
+ DB_LOCK_YOUNGEST = DbConstants.DB_LOCK_YOUNGEST;
+ DB_MULTIPLE = DbConstants.DB_MULTIPLE;
+ DB_MULTIPLE_KEY = DbConstants.DB_MULTIPLE_KEY;
+ DB_NEXT = DbConstants.DB_NEXT;
+ DB_NEXT_DUP = DbConstants.DB_NEXT_DUP;
+ DB_NEXT_NODUP = DbConstants.DB_NEXT_NODUP;
+ DB_NODUPDATA = DbConstants.DB_NODUPDATA;
+ DB_NOLOCKING = DbConstants.DB_NOLOCKING;
+ DB_NOMMAP = DbConstants.DB_NOMMAP;
+ DB_NOORDERCHK = DbConstants.DB_NOORDERCHK;
+ DB_NOOVERWRITE = DbConstants.DB_NOOVERWRITE;
+ DB_NOPANIC = DbConstants.DB_NOPANIC;
+ DB_NOSYNC = DbConstants.DB_NOSYNC;
+ DB_ODDFILESIZE = DbConstants.DB_ODDFILESIZE;
+ DB_ORDERCHKONLY = DbConstants.DB_ORDERCHKONLY;
+ DB_OVERWRITE = DbConstants.DB_OVERWRITE;
+ DB_PANIC_ENVIRONMENT = DbConstants.DB_PANIC_ENVIRONMENT;
+ DB_POSITION = DbConstants.DB_POSITION;
+ DB_PREV = DbConstants.DB_PREV;
+ DB_PREV_NODUP = DbConstants.DB_PREV_NODUP;
+ DB_PRINTABLE = DbConstants.DB_PRINTABLE;
+ DB_PRIORITY_DEFAULT = DbConstants.DB_PRIORITY_DEFAULT;
+ DB_PRIORITY_HIGH = DbConstants.DB_PRIORITY_HIGH;
+ DB_PRIORITY_LOW = DbConstants.DB_PRIORITY_LOW;
+ DB_PRIORITY_VERY_HIGH = DbConstants.DB_PRIORITY_VERY_HIGH;
+ DB_PRIORITY_VERY_LOW = DbConstants.DB_PRIORITY_VERY_LOW;
+ DB_PRIVATE = DbConstants.DB_PRIVATE;
+ DB_RDONLY = DbConstants.DB_RDONLY;
+ DB_RECNUM = DbConstants.DB_RECNUM;
+ DB_RECORDCOUNT = DbConstants.DB_RECORDCOUNT;
+ DB_RECOVER = DbConstants.DB_RECOVER;
+ DB_RECOVER_FATAL = DbConstants.DB_RECOVER_FATAL;
+ DB_REGION_INIT = DbConstants.DB_REGION_INIT;
+ DB_RENUMBER = DbConstants.DB_RENUMBER;
+ DB_REP_CLIENT = DbConstants.DB_REP_CLIENT;
+ DB_REP_LOGSONLY = DbConstants.DB_REP_LOGSONLY;
+ DB_REP_MASTER = DbConstants.DB_REP_MASTER;
+ DB_REP_PERMANENT = DbConstants.DB_REP_PERMANENT;
+ DB_REP_UNAVAIL = DbConstants.DB_REP_UNAVAIL;
+ DB_REVSPLITOFF = DbConstants.DB_REVSPLITOFF;
+ DB_RMW = DbConstants.DB_RMW;
+ DB_SALVAGE = DbConstants.DB_SALVAGE;
+ DB_SET = DbConstants.DB_SET;
+ DB_SET_LOCK_TIMEOUT = DbConstants.DB_SET_LOCK_TIMEOUT;
+ DB_SET_RANGE = DbConstants.DB_SET_RANGE;
+ DB_SET_RECNO = DbConstants.DB_SET_RECNO;
+ DB_SET_TXN_TIMEOUT = DbConstants.DB_SET_TXN_TIMEOUT;
+ DB_SNAPSHOT = DbConstants.DB_SNAPSHOT;
+ DB_STAT_CLEAR = DbConstants.DB_STAT_CLEAR;
+ DB_SYSTEM_MEM = DbConstants.DB_SYSTEM_MEM;
+ DB_THREAD = DbConstants.DB_THREAD;
+ DB_TRUNCATE = DbConstants.DB_TRUNCATE;
+ DB_TXN_NOSYNC = DbConstants.DB_TXN_NOSYNC;
+ DB_TXN_NOWAIT = DbConstants.DB_TXN_NOWAIT;
+ DB_TXN_SYNC = DbConstants.DB_TXN_SYNC;
+ DB_TXN_WRITE_NOSYNC = DbConstants.DB_TXN_WRITE_NOSYNC;
+ DB_UPGRADE = DbConstants.DB_UPGRADE;
+ DB_USE_ENVIRON = DbConstants.DB_USE_ENVIRON;
+ DB_USE_ENVIRON_ROOT = DbConstants.DB_USE_ENVIRON_ROOT;
+ DB_VERB_CHKPOINT = DbConstants.DB_VERB_CHKPOINT;
+ DB_VERB_DEADLOCK = DbConstants.DB_VERB_DEADLOCK;
+ DB_VERB_RECOVERY = DbConstants.DB_VERB_RECOVERY;
+ DB_VERB_REPLICATION = DbConstants.DB_VERB_REPLICATION;
+ DB_VERB_WAITSFOR = DbConstants.DB_VERB_WAITSFOR;
+ DB_VERIFY = DbConstants.DB_VERIFY;
+ DB_VERSION_MAJOR = DbConstants.DB_VERSION_MAJOR;
+ DB_VERSION_MINOR = DbConstants.DB_VERSION_MINOR;
+ DB_VERSION_PATCH = DbConstants.DB_VERSION_PATCH;
+ DB_WRITECURSOR = DbConstants.DB_WRITECURSOR;
+ DB_XA_CREATE = DbConstants.DB_XA_CREATE;
+ DB_XIDDATASIZE = DbConstants.DB_XIDDATASIZE;
+ DB_YIELDCPU = DbConstants.DB_YIELDCPU;
+ // END-JAVA-CONSTANT-INITIALIZATION
+
+ one_time_init();
+ }
+}
+// end of Db.java
diff --git a/libdb/java/src/com/sleepycat/db/DbAppDispatch.java b/libdb/java/src/com/sleepycat/db/DbAppDispatch.java
new file mode 100644
index 0000000..af0d9c7
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbAppDispatch.java
@@ -0,0 +1,22 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This interface is used by DbEnv.set_app_dispatch()
+ *
+ */
+public interface DbAppDispatch
+{
+ // The value of recops is one of the Db.DB_TXN_* constants
+ public abstract int app_dispatch(DbEnv env, Dbt dbt, DbLsn lsn, int recops);
+}
+
+// end of DbAppDispatch.java
diff --git a/libdb/java/src/com/sleepycat/db/DbAppendRecno.java b/libdb/java/src/com/sleepycat/db/DbAppendRecno.java
new file mode 100644
index 0000000..2e25557
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbAppendRecno.java
@@ -0,0 +1,22 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This interface is used by Db.set_append_recno()
+ *
+ */
+public interface DbAppendRecno
+{
+ public abstract void db_append_recno(Db db, Dbt data, int recno)
+ throws DbException;
+}
+
+// end of DbAppendRecno.java
diff --git a/libdb/java/src/com/sleepycat/db/DbBtreeCompare.java b/libdb/java/src/com/sleepycat/db/DbBtreeCompare.java
new file mode 100644
index 0000000..0f20e66
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbBtreeCompare.java
@@ -0,0 +1,21 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This interface is used by DbEnv.set_bt_compare()
+ *
+ */
+public interface DbBtreeCompare
+{
+ public abstract int bt_compare(Db db, Dbt dbt1, Dbt dbt2);
+}
+
+// end of DbBtreeCompare.java
diff --git a/libdb/java/src/com/sleepycat/db/DbBtreePrefix.java b/libdb/java/src/com/sleepycat/db/DbBtreePrefix.java
new file mode 100644
index 0000000..8665dbe
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbBtreePrefix.java
@@ -0,0 +1,21 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This interface is used by DbEnv.set_bt_prefix()
+ *
+ */
+public interface DbBtreePrefix
+{
+ public abstract int bt_prefix(Db db, Dbt dbt1, Dbt dbt2);
+}
+
+// end of DbBtreePrefix.java
diff --git a/libdb/java/src/com/sleepycat/db/DbBtreeStat.java b/libdb/java/src/com/sleepycat/db/DbBtreeStat.java
new file mode 100644
index 0000000..669afcf
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbBtreeStat.java
@@ -0,0 +1,28 @@
+/* DO NOT EDIT: automatically built by dist/s_java. */
+
+package com.sleepycat.db;
+
+public class DbBtreeStat
+{
+ public int bt_magic;
+ public int bt_version;
+ public int bt_metaflags;
+ public int bt_nkeys;
+ public int bt_ndata;
+ public int bt_pagesize;
+ public int bt_maxkey;
+ public int bt_minkey;
+ public int bt_re_len;
+ public int bt_re_pad;
+ public int bt_levels;
+ public int bt_int_pg;
+ public int bt_leaf_pg;
+ public int bt_dup_pg;
+ public int bt_over_pg;
+ public int bt_free;
+ public int bt_int_pgfree;
+ public int bt_leaf_pgfree;
+ public int bt_dup_pgfree;
+ public int bt_over_pgfree;
+}
+// end of DbBtreeStat.java
diff --git a/libdb/java/src/com/sleepycat/db/DbClient.java b/libdb/java/src/com/sleepycat/db/DbClient.java
new file mode 100644
index 0000000..29990fe
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbClient.java
@@ -0,0 +1,21 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This interface is used by DbEnv.set_rpc_server().
+ * It is a placeholder for a future capability.
+ *
+ */
+public interface DbClient
+{
+}
+
+// end of DbClient.java
diff --git a/libdb/java/src/com/sleepycat/db/DbConstants.java b/libdb/java/src/com/sleepycat/db/DbConstants.java
new file mode 100644
index 0000000..bad3442
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbConstants.java
@@ -0,0 +1,151 @@
+/* DO NOT EDIT: automatically built by dist/s_java. */
+
+package com.sleepycat.db;
+
+class DbConstants
+{
+ static final int DB_AFTER = 1;
+ static final int DB_AGGRESSIVE = 0x000001;
+ static final int DB_APPEND = 2;
+ static final int DB_ARCH_ABS = 0x001;
+ static final int DB_ARCH_DATA = 0x002;
+ static final int DB_ARCH_LOG = 0x004;
+ static final int DB_AUTO_COMMIT = 0x00800000;
+ static final int DB_BEFORE = 3;
+ static final int DB_CACHED_COUNTS = 4;
+ static final int DB_CDB_ALLDB = 0x000800;
+ static final int DB_CHKSUM_SHA1 = 0x000001;
+ static final int DB_CLIENT = 0x000001;
+ static final int DB_CONSUME = 6;
+ static final int DB_CONSUME_WAIT = 7;
+ static final int DB_CREATE = 0x000001;
+ static final int DB_CURRENT = 8;
+ static final int DB_CXX_NO_EXCEPTIONS = 0x000002;
+ static final int DB_DBT_MALLOC = 0x004;
+ static final int DB_DBT_PARTIAL = 0x008;
+ static final int DB_DBT_REALLOC = 0x010;
+ static final int DB_DBT_USERMEM = 0x020;
+ static final int DB_DIRECT = 0x000800;
+ static final int DB_DIRECT_DB = 0x001000;
+ static final int DB_DIRECT_LOG = 0x002000;
+ static final int DB_DIRTY_READ = 0x01000000;
+ static final int DB_DUP = 0x000002;
+ static final int DB_DUPSORT = 0x000004;
+ static final int DB_EID_BROADCAST = -1;
+ static final int DB_EID_INVALID = -2;
+ static final int DB_ENCRYPT = 0x000008;
+ static final int DB_ENCRYPT_AES = 0x000001;
+ static final int DB_EXCL = 0x000800;
+ static final int DB_FAST_STAT = 9;
+ static final int DB_FIRST = 10;
+ static final int DB_FLUSH = 0x02000000;
+ static final int DB_FORCE = 0x000004;
+ static final int DB_GET_BOTH = 11;
+ static final int DB_GET_BOTH_RANGE = 13;
+ static final int DB_GET_RECNO = 14;
+ static final int DB_INIT_CDB = 0x000800;
+ static final int DB_INIT_LOCK = 0x001000;
+ static final int DB_INIT_LOG = 0x002000;
+ static final int DB_INIT_MPOOL = 0x004000;
+ static final int DB_INIT_TXN = 0x008000;
+ static final int DB_JOINENV = 0x010000;
+ static final int DB_JOIN_ITEM = 15;
+ static final int DB_JOIN_NOSORT = 0x000001;
+ static final int DB_KEYFIRST = 16;
+ static final int DB_KEYLAST = 17;
+ static final int DB_LAST = 18;
+ static final int DB_LOCKDOWN = 0x020000;
+ static final int DB_LOCK_DEFAULT = 1;
+ static final int DB_LOCK_EXPIRE = 2;
+ static final int DB_LOCK_GET = 1;
+ static final int DB_LOCK_GET_TIMEOUT = 2;
+ static final int DB_LOCK_IREAD = 5;
+ static final int DB_LOCK_IWR = 6;
+ static final int DB_LOCK_IWRITE = 4;
+ static final int DB_LOCK_MAXLOCKS = 3;
+ static final int DB_LOCK_MINLOCKS = 4;
+ static final int DB_LOCK_MINWRITE = 5;
+ static final int DB_LOCK_NOWAIT = 0x002;
+ static final int DB_LOCK_OLDEST = 6;
+ static final int DB_LOCK_PUT = 4;
+ static final int DB_LOCK_PUT_ALL = 5;
+ static final int DB_LOCK_PUT_OBJ = 6;
+ static final int DB_LOCK_RANDOM = 7;
+ static final int DB_LOCK_READ = 1;
+ static final int DB_LOCK_TIMEOUT = 8;
+ static final int DB_LOCK_WRITE = 2;
+ static final int DB_LOCK_YOUNGEST = 8;
+ static final int DB_MULTIPLE = 0x04000000;
+ static final int DB_MULTIPLE_KEY = 0x08000000;
+ static final int DB_NEXT = 19;
+ static final int DB_NEXT_DUP = 20;
+ static final int DB_NEXT_NODUP = 21;
+ static final int DB_NODUPDATA = 22;
+ static final int DB_NOLOCKING = 0x004000;
+ static final int DB_NOMMAP = 0x000008;
+ static final int DB_NOORDERCHK = 0x000002;
+ static final int DB_NOOVERWRITE = 23;
+ static final int DB_NOPANIC = 0x008000;
+ static final int DB_NOSYNC = 24;
+ static final int DB_ODDFILESIZE = 0x002000;
+ static final int DB_ORDERCHKONLY = 0x000004;
+ static final int DB_OVERWRITE = 0x010000;
+ static final int DB_PANIC_ENVIRONMENT = 0x020000;
+ static final int DB_POSITION = 25;
+ static final int DB_PREV = 27;
+ static final int DB_PREV_NODUP = 28;
+ static final int DB_PRINTABLE = 0x000020;
+ static final int DB_PRIORITY_DEFAULT = 3;
+ static final int DB_PRIORITY_HIGH = 4;
+ static final int DB_PRIORITY_LOW = 2;
+ static final int DB_PRIORITY_VERY_HIGH = 5;
+ static final int DB_PRIORITY_VERY_LOW = 1;
+ static final int DB_PRIVATE = 0x040000;
+ static final int DB_RDONLY = 0x000010;
+ static final int DB_RECNUM = 0x000010;
+ static final int DB_RECORDCOUNT = 29;
+ static final int DB_RECOVER = 0x000020;
+ static final int DB_RECOVER_FATAL = 0x080000;
+ static final int DB_REGION_INIT = 0x040000;
+ static final int DB_RENUMBER = 0x000020;
+ static final int DB_REP_CLIENT = 0x001;
+ static final int DB_REP_LOGSONLY = 0x002;
+ static final int DB_REP_MASTER = 0x004;
+ static final int DB_REP_PERMANENT = 0x0001;
+ static final int DB_REP_UNAVAIL = -30983;
+ static final int DB_REVSPLITOFF = 0x000040;
+ static final int DB_RMW = 0x40000000;
+ static final int DB_SALVAGE = 0x000040;
+ static final int DB_SET = 30;
+ static final int DB_SET_LOCK_TIMEOUT = 31;
+ static final int DB_SET_RANGE = 32;
+ static final int DB_SET_RECNO = 33;
+ static final int DB_SET_TXN_TIMEOUT = 35;
+ static final int DB_SNAPSHOT = 0x000080;
+ static final int DB_STAT_CLEAR = 0x000001;
+ static final int DB_SYSTEM_MEM = 0x100000;
+ static final int DB_THREAD = 0x000040;
+ static final int DB_TRUNCATE = 0x000080;
+ static final int DB_TXN_NOSYNC = 0x000100;
+ static final int DB_TXN_NOWAIT = 0x000800;
+ static final int DB_TXN_SYNC = 0x001000;
+ static final int DB_TXN_WRITE_NOSYNC = 0x080000;
+ static final int DB_UPGRADE = 0x000001;
+ static final int DB_USE_ENVIRON = 0x000200;
+ static final int DB_USE_ENVIRON_ROOT = 0x000400;
+ static final int DB_VERB_CHKPOINT = 0x0001;
+ static final int DB_VERB_DEADLOCK = 0x0002;
+ static final int DB_VERB_RECOVERY = 0x0004;
+ static final int DB_VERB_REPLICATION = 0x0008;
+ static final int DB_VERB_WAITSFOR = 0x0010;
+ static final int DB_VERIFY = 0x000002;
+ static final int DB_VERSION_MAJOR = 4;
+ static final int DB_VERSION_MINOR = 1;
+ static final int DB_VERSION_PATCH = 24;
+ static final int DB_WRITECURSOR = 37;
+ static final int DB_XA_CREATE = 0x000001;
+ static final int DB_XIDDATASIZE = 128;
+ static final int DB_YIELDCPU = 0x100000;
+}
+
+// end of DbConstants.java
diff --git a/libdb/java/src/com/sleepycat/db/DbDeadlockException.java b/libdb/java/src/com/sleepycat/db/DbDeadlockException.java
new file mode 100644
index 0000000..2fdc537
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbDeadlockException.java
@@ -0,0 +1,28 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+public class DbDeadlockException extends DbException
+{
+ // methods
+ //
+
+ public DbDeadlockException(String s)
+ {
+ super(s);
+ }
+
+ public DbDeadlockException(String s, int errno)
+ {
+ super(s, errno);
+ }
+}
+
+// end of DbDeadlockException.java
diff --git a/libdb/java/src/com/sleepycat/db/DbDupCompare.java b/libdb/java/src/com/sleepycat/db/DbDupCompare.java
new file mode 100644
index 0000000..8809665
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbDupCompare.java
@@ -0,0 +1,21 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This interface is used by DbEnv.set_dup_compare()
+ *
+ */
+public interface DbDupCompare
+{
+ public abstract int dup_compare(Db db, Dbt dbt1, Dbt dbt2);
+}
+
+// end of DbDupCompare.java
diff --git a/libdb/java/src/com/sleepycat/db/DbEnv.java b/libdb/java/src/com/sleepycat/db/DbEnv.java
new file mode 100644
index 0000000..198e51f
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbEnv.java
@@ -0,0 +1,430 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+import java.io.OutputStream;
+import java.io.FileNotFoundException;
+import java.util.Date;
+import java.util.Enumeration;
+import java.util.Vector;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class DbEnv
+{
+ // methods
+ //
+
+ //
+ // After using this constructor, set any parameters via
+ // the set_* access methods below, and finally open
+ // the environment by calling open().
+ //
+ public DbEnv(int flags) throws DbException
+ {
+ constructor_flags_ = flags;
+ _init(errstream_, constructor_flags_);
+ }
+
+ //
+ // This constructor is purposely not public.
+ // It is used internally to create a DbEnv wrapper
+ // when an underlying environment already exists.
+ //
+ /*package*/ DbEnv(Db db)
+ {
+ _init_using_db(errstream_, db);
+ }
+
+ //
+ // When a Db is created, it is kept in a private list,
+ // so that Db's can be notified when the environment
+ // is closed. This allows us to detect and guard
+ // against the following situation:
+ // DbEnv env = new DbEnv(0);
+ // Db db = new Db(0);
+ // env.close();
+ // db.close();
+ //
+ // This *is* a programming error, but not protecting
+ // against it will crash the VM.
+ //
+ /*package*/ void _add_db(Db db)
+ {
+ dblist_.addElement(db);
+ }
+
+ //
+ // Remove from the private list of Db's.
+ //
+ /*package*/ void _remove_db(Db db)
+ {
+ dblist_.removeElement(db);
+ }
+
+ //
+ // Iterate all the Db's in the list, and
+ // notify them that the environment is closing,
+ // so they can clean up.
+ //
+ /*package*/ void _notify_dbs()
+ {
+ Enumeration enum = dblist_.elements();
+ while (enum.hasMoreElements()) {
+ Db db = (Db)enum.nextElement();
+ db._notify_dbenv_close();
+ }
+ dblist_.removeAllElements();
+ }
+
+ // close discards any internal memory.
+ // After using close, the DbEnv can be reopened.
+ //
+ public synchronized void close(int flags)
+ throws DbException
+ {
+ _notify_dbs();
+ _close(flags);
+ }
+
+ // (Internal)
+ private native void _close(int flags)
+ throws DbException;
+
+ public native void dbremove(DbTxn txn, String name, String subdb,
+ int flags)
+ throws DbException;
+
+ public native void dbrename(DbTxn txn, String name, String subdb,
+ String newname, int flags)
+ throws DbException;
+
+ public native void err(int errcode, String message);
+
+ public native void errx(String message);
+
+ // overrides Object.finalize
+ protected void finalize()
+ throws Throwable
+ {
+ _notify_dbs();
+ _finalize(errcall_, errpfx_);
+ }
+
+ // (Internal)
+ protected native void _finalize(DbErrcall errcall, String errpfx)
+ throws Throwable;
+
+ // (Internal)
+ // called during constructor
+ private native void _init(DbErrcall errcall, int flags) throws DbException;
+
+ // (Internal)
+ // called when DbEnv is constructed as part of Db constructor.
+ private native void _init_using_db(DbErrcall errcall, Db db);
+
+ /*package*/ native void _notify_db_close();
+
+ public native void open(String db_home, int flags, int mode)
+ throws DbException, FileNotFoundException;
+
+ // remove removes any files and discards any internal memory.
+ // (i.e. implicitly it does a close, if the environment is open).
+ // After using close, the DbEnv can no longer be used;
+ // create another one if needed.
+ //
+ public native synchronized void remove(String db_home, int flags)
+ throws DbException, FileNotFoundException;
+
+ ////////////////////////////////////////////////////////////////
+ // simple get/set access methods
+ //
+ // If you are calling set_ methods, you need to
+ // use the constructor with one argument along with open().
+
+ public native void set_cachesize(int gbytes, int bytes, int ncaches)
+ throws DbException;
+
+ // Encryption
+ public native void set_encrypt(String passwd, /*u_int32_t*/ int flags)
+ throws DbException;
+
+ // Error message callback.
+ public void set_errcall(DbErrcall errcall)
+ {
+ errcall_ = errcall;
+ _set_errcall(errcall);
+ }
+
+ public native void _set_errcall(DbErrcall errcall);
+
+ // Error stream.
+ public void set_error_stream(OutputStream s)
+ {
+ DbOutputStreamErrcall errcall = new DbOutputStreamErrcall(s);
+ set_errcall(errcall);
+ }
+
+ // Error message prefix.
+ public void set_errpfx(String errpfx)
+ {
+ errpfx_ = errpfx;
+ _set_errpfx(errpfx);
+ }
+
+ private native void _set_errpfx(String errpfx);
+
+ // Feedback
+ public void set_feedback(DbEnvFeedback feedback)
+ throws DbException
+ {
+ feedback_ = feedback;
+ feedback_changed(feedback);
+ }
+
+ // (Internal)
+ private native void feedback_changed(DbEnvFeedback feedback)
+ throws DbException;
+
+ // Generate debugging messages.
+ public native void set_verbose(int which, boolean onoff)
+ throws DbException;
+
+ public native void set_data_dir(String data_dir)
+ throws DbException;
+
+ // Log buffer size.
+ public native void set_lg_bsize(/*u_int32_t*/ int lg_bsize)
+ throws DbException;
+
+ // Log directory.
+ public native void set_lg_dir(String lg_dir)
+ throws DbException;
+
+ // Maximum log file size.
+ public native void set_lg_max(/*u_int32_t*/ int lg_max)
+ throws DbException;
+
+ // Log region size.
+ public native void set_lg_regionmax(/*u_int32_t*/ int lg_regionmax)
+ throws DbException;
+
+ // Two dimensional conflict matrix.
+ public native void set_lk_conflicts(byte[][] lk_conflicts)
+ throws DbException;
+
+ // Deadlock detect on every conflict.
+ public native void set_lk_detect(/*u_int32_t*/ int lk_detect)
+ throws DbException;
+
+ /**
+ * @deprecated DB 3.2.6, see the online documentation.
+ */
+ // Maximum number of locks.
+ public native void set_lk_max(/*unsigned*/ int lk_max)
+ throws DbException;
+
+ // Maximum number of lockers.
+ public native void set_lk_max_lockers(/*unsigned*/ int lk_max_lockers)
+ throws DbException;
+
+ // Maximum number of locks.
+ public native void set_lk_max_locks(/*unsigned*/ int lk_max_locks)
+ throws DbException;
+
+ // Maximum number of locked objects.
+ public native void set_lk_max_objects(/*unsigned*/ int lk_max_objects)
+ throws DbException;
+
+ // Maximum file size for mmap.
+ public native void set_mp_mmapsize(/*size_t*/ long mmapsize)
+ throws DbException;
+
+ public native void set_flags(int flags, boolean onoff)
+ throws DbException;
+
+ public native void set_rep_limit(int gbytes, int bytes) throws DbException;
+
+ public void set_rep_transport(int envid, DbRepTransport transport)
+ throws DbException
+ {
+ rep_transport_ = transport;
+ rep_transport_changed(envid, transport);
+ }
+
+ // (Internal)
+ private native void rep_transport_changed(int envid,
+ DbRepTransport transport)
+ throws DbException;
+
+ public native void set_rpc_server(DbClient client, String host,
+ long cl_timeout, long sv_timeout,
+ int flags)
+ throws DbException;
+
+ public native void set_shm_key(long shm_key)
+ throws DbException;
+
+ public native void set_tas_spins(int tas_spins)
+ throws DbException;
+
+ public native void set_timeout(/*db_timeout_t*/ long timeout,
+ /*u_int32_t*/ int flags)
+ throws DbException;
+
+ public native void set_tmp_dir(String tmp_dir)
+ throws DbException;
+
+ // Feedback
+ public void set_app_dispatch(DbAppDispatch app_dispatch)
+ throws DbException
+ {
+ app_dispatch_ = app_dispatch;
+ app_dispatch_changed(app_dispatch);
+ }
+
+ // (Internal)
+ private native void app_dispatch_changed(DbAppDispatch app_dispatch)
+ throws DbException;
+
+ // Maximum number of transactions.
+ public native void set_tx_max(/*unsigned*/ int tx_max)
+ throws DbException;
+
+ // Note: only the seconds (not milliseconds) of the timestamp
+ // are used in this API.
+ public void set_tx_timestamp(Date timestamp)
+ throws DbException
+ {
+ _set_tx_timestamp(timestamp.getTime()/1000);
+ }
+
+ // (Internal)
+ private native void _set_tx_timestamp(long seconds)
+ throws DbException;
+
+ // Versioning information
+ public native static int get_version_major();
+ public native static int get_version_minor();
+ public native static int get_version_patch();
+ public native static String get_version_string();
+
+ // Convert DB error codes to strings
+ public native static String strerror(int errcode);
+
+ public native int lock_detect(int flags, int atype)
+ throws DbException;
+
+ public native DbLock lock_get(/*u_int32_t*/ int locker,
+ int flags,
+ Dbt obj,
+ /*db_lockmode_t*/ int lock_mode)
+ throws DbException;
+
+ public native void lock_put(DbLock lock)
+ throws DbException;
+
+ public native /*u_int32_t*/ int lock_id()
+ throws DbException;
+
+ public native void lock_id_free(/*u_int32_t*/ int id)
+ throws DbException;
+
+ public native DbLockStat lock_stat(/*u_int32_t*/ int flags)
+ throws DbException;
+
+ public native void lock_vec(/*u_int32_t*/ int locker,
+ int flags,
+ DbLockRequest[] list,
+ int offset,
+ int count)
+ throws DbException;
+
+ public native String[] log_archive(int flags)
+ throws DbException;
+
+ public native static int log_compare(DbLsn lsn0, DbLsn lsn1);
+
+ public native DbLogc log_cursor(int flags)
+ throws DbException;
+
+ public native String log_file(DbLsn lsn)
+ throws DbException;
+
+ public native void log_flush(DbLsn lsn)
+ throws DbException;
+
+ public native void log_put(DbLsn lsn, Dbt data, int flags)
+ throws DbException;
+
+ public native DbLogStat log_stat(/*u_int32_t*/ int flags)
+ throws DbException;
+
+ public native DbMpoolStat memp_stat(/*u_int32_t*/ int flags)
+ throws DbException;
+
+ public native DbMpoolFStat[] memp_fstat(/*u_int32_t*/ int flags)
+ throws DbException;
+
+ public native int memp_trickle(int pct)
+ throws DbException;
+
+ public native int rep_elect(int nsites, int pri, int timeout)
+ throws DbException;
+
+ public static class RepProcessMessage {
+ public int envid;
+ }
+ public native int rep_process_message(Dbt control, Dbt rec,
+ RepProcessMessage result)
+ throws DbException;
+
+ public native void rep_start(Dbt cookie, int flags)
+ throws DbException;
+
+ public native DbRepStat rep_stat(/*u_int32_t*/ int flags)
+ throws DbException;
+
+ public native DbTxn txn_begin(DbTxn pid, int flags)
+ throws DbException;
+
+ public native void txn_checkpoint(int kbyte, int min, int flags)
+ throws DbException;
+
+ public native DbPreplist[] txn_recover(int count, int flags)
+ throws DbException;
+
+ public native DbTxnStat txn_stat(/*u_int32_t*/ int flags)
+ throws DbException;
+
+ ////////////////////////////////////////////////////////////////
+ //
+ // private data
+ //
+ private long private_dbobj_ = 0;
+ private long private_info_ = 0;
+ private int constructor_flags_ = 0;
+ private Vector dblist_ = new Vector(); // Db's that are open
+ private DbEnvFeedback feedback_ = null;
+ private DbRepTransport rep_transport_ = null;
+ private DbAppDispatch app_dispatch_ = null;
+ private DbOutputStreamErrcall errstream_ =
+ new DbOutputStreamErrcall(System.err);
+ /*package*/ DbErrcall errcall_ = errstream_;
+ /*package*/ String errpfx_;
+
+ static {
+ Db.load_db();
+ }
+}
+
+// end of DbEnv.java
diff --git a/libdb/java/src/com/sleepycat/db/DbEnvFeedback.java b/libdb/java/src/com/sleepycat/db/DbEnvFeedback.java
new file mode 100644
index 0000000..236b909
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbEnvFeedback.java
@@ -0,0 +1,19 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+public interface DbEnvFeedback
+{
+ // methods
+ //
+ public abstract void feedback(DbEnv env, int opcode, int pct);
+}
+
+// end of DbFeedback.java
diff --git a/libdb/java/src/com/sleepycat/db/DbErrcall.java b/libdb/java/src/com/sleepycat/db/DbErrcall.java
new file mode 100644
index 0000000..4409142
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbErrcall.java
@@ -0,0 +1,23 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public interface DbErrcall
+{
+ // methods
+ //
+ public abstract void errcall(String prefix, String buffer);
+}
+
+// end of DbErrcall.java
diff --git a/libdb/java/src/com/sleepycat/db/DbException.java b/libdb/java/src/com/sleepycat/db/DbException.java
new file mode 100644
index 0000000..58d39c7
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbException.java
@@ -0,0 +1,56 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class DbException extends Exception
+{
+ // methods
+ //
+
+ public DbException(String s)
+ {
+ super(s);
+ }
+
+ public DbException(String s, int errno)
+ {
+ super(s);
+ this.errno_ = errno;
+ }
+
+ public String toString()
+ {
+ String s = super.toString();
+ if (errno_ == 0)
+ return s;
+ else
+ return s + ": " + DbEnv.strerror(errno_);
+
+ }
+
+ // get/set methods
+ //
+
+ public int get_errno()
+ {
+ return errno_;
+ }
+
+ // private data
+ //
+
+ private int errno_ = 0;
+}
+
+// end of DbException.java
diff --git a/libdb/java/src/com/sleepycat/db/DbFeedback.java b/libdb/java/src/com/sleepycat/db/DbFeedback.java
new file mode 100644
index 0000000..54909ba
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbFeedback.java
@@ -0,0 +1,23 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public interface DbFeedback
+{
+ // methods
+ //
+ public abstract void feedback(Db db, int opcode, int pct);
+}
+
+// end of DbFeedback.java
diff --git a/libdb/java/src/com/sleepycat/db/DbHash.java b/libdb/java/src/com/sleepycat/db/DbHash.java
new file mode 100644
index 0000000..e911a20
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbHash.java
@@ -0,0 +1,21 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This interface is used by DbEnv.set_bt_compare()
+ *
+ */
+public interface DbHash
+{
+ public abstract int hash(Db db, byte[] data, int len);
+}
+
+// end of DbHash.java
diff --git a/libdb/java/src/com/sleepycat/db/DbHashStat.java b/libdb/java/src/com/sleepycat/db/DbHashStat.java
new file mode 100644
index 0000000..97de612
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbHashStat.java
@@ -0,0 +1,24 @@
+/* DO NOT EDIT: automatically built by dist/s_java. */
+
+package com.sleepycat.db;
+
+public class DbHashStat
+{
+ public int hash_magic;
+ public int hash_version;
+ public int hash_metaflags;
+ public int hash_nkeys;
+ public int hash_ndata;
+ public int hash_pagesize;
+ public int hash_ffactor;
+ public int hash_buckets;
+ public int hash_free;
+ public int hash_bfree;
+ public int hash_bigpages;
+ public int hash_big_bfree;
+ public int hash_overflows;
+ public int hash_ovfl_free;
+ public int hash_dup;
+ public int hash_dup_free;
+}
+// end of DbHashStat.java
diff --git a/libdb/java/src/com/sleepycat/db/DbKeyRange.java b/libdb/java/src/com/sleepycat/db/DbKeyRange.java
new file mode 100644
index 0000000..c85c6ee
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbKeyRange.java
@@ -0,0 +1,23 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class DbKeyRange
+{
+ public double less;
+ public double equal;
+ public double greater;
+}
+
+// end of DbKeyRange.java
diff --git a/libdb/java/src/com/sleepycat/db/DbLock.java b/libdb/java/src/com/sleepycat/db/DbLock.java
new file mode 100644
index 0000000..0c14c00
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbLock.java
@@ -0,0 +1,33 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class DbLock
+{
+ protected native void finalize()
+ throws Throwable;
+
+ // get/set methods
+ //
+
+ // private data
+ //
+ private long private_dbobj_ = 0;
+
+ static {
+ Db.load_db();
+ }
+}
+
+// end of DbLock.java
diff --git a/libdb/java/src/com/sleepycat/db/DbLockNotGrantedException.java b/libdb/java/src/com/sleepycat/db/DbLockNotGrantedException.java
new file mode 100644
index 0000000..5142bec
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbLockNotGrantedException.java
@@ -0,0 +1,57 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+public class DbLockNotGrantedException extends DbException {
+ public DbLockNotGrantedException(String message,
+ int op, int mode, Dbt obj,
+ DbLock lock, int index)
+ {
+ super(message, Db.DB_LOCK_NOTGRANTED);
+ this.op = op;
+ this.mode = mode;
+ this.obj = obj;
+ this.lock = lock;
+ this.index = index;
+ }
+
+ public int get_op()
+ {
+ return op;
+ }
+
+ public int get_mode()
+ {
+ return mode;
+ }
+
+ public Dbt get_obj()
+ {
+ return obj;
+ }
+
+ public DbLock get_lock()
+ {
+ return lock;
+ }
+
+ public int get_index()
+ {
+ return index;
+ }
+
+ private int op;
+ private int mode;
+ private Dbt obj;
+ private DbLock lock;
+ private int index;
+
+}
+
diff --git a/libdb/java/src/com/sleepycat/db/DbLockRequest.java b/libdb/java/src/com/sleepycat/db/DbLockRequest.java
new file mode 100644
index 0000000..d9eb49c
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbLockRequest.java
@@ -0,0 +1,67 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+public class DbLockRequest
+{
+ public DbLockRequest(int op, int mode, Dbt obj, DbLock lock)
+ {
+ this.op = op;
+ this.mode = mode;
+ this.obj = obj;
+ this.lock = lock;
+ }
+
+ public int get_op()
+ {
+ return op;
+ }
+
+ public void set_op(int op)
+ {
+ this.op = op;
+ }
+
+ public int get_mode()
+ {
+ return mode;
+ }
+
+ public void set_mode(int mode)
+ {
+ this.mode = mode;
+ }
+
+ public Dbt get_obj()
+ {
+ return obj;
+ }
+
+ public void set_obj(Dbt obj)
+ {
+ this.obj = obj;
+ }
+
+ public DbLock get_lock()
+ {
+ return lock;
+ }
+
+ public void set_lock(DbLock lock)
+ {
+ this.lock = lock;
+ }
+
+ private /* db_lockop_t */ int op;
+ private /* db_lockmode_t */ int mode;
+ private /* db_timeout_t */ int timeout;
+ private Dbt obj;
+ private DbLock lock;
+}
diff --git a/libdb/java/src/com/sleepycat/db/DbLockStat.java b/libdb/java/src/com/sleepycat/db/DbLockStat.java
new file mode 100644
index 0000000..f0903f0
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbLockStat.java
@@ -0,0 +1,32 @@
+/* DO NOT EDIT: automatically built by dist/s_java. */
+
+package com.sleepycat.db;
+
+public class DbLockStat
+{
+ public int st_id;
+ public int st_cur_maxid;
+ public int st_maxlocks;
+ public int st_maxlockers;
+ public int st_maxobjects;
+ public int st_nmodes;
+ public int st_nlocks;
+ public int st_maxnlocks;
+ public int st_nlockers;
+ public int st_maxnlockers;
+ public int st_nobjects;
+ public int st_maxnobjects;
+ public int st_nconflicts;
+ public int st_nrequests;
+ public int st_nreleases;
+ public int st_nnowaits;
+ public int st_ndeadlocks;
+ public int st_locktimeout;
+ public int st_nlocktimeouts;
+ public int st_txntimeout;
+ public int st_ntxntimeouts;
+ public int st_region_wait;
+ public int st_region_nowait;
+ public int st_regsize;
+}
+// end of DbLockStat.java
diff --git a/libdb/java/src/com/sleepycat/db/DbLogStat.java b/libdb/java/src/com/sleepycat/db/DbLogStat.java
new file mode 100644
index 0000000..19e5be2
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbLogStat.java
@@ -0,0 +1,29 @@
+/* DO NOT EDIT: automatically built by dist/s_java. */
+
+package com.sleepycat.db;
+
+public class DbLogStat
+{
+ public int st_magic;
+ public int st_version;
+ public int st_mode;
+ public int st_lg_bsize;
+ public int st_lg_size;
+ public int st_w_bytes;
+ public int st_w_mbytes;
+ public int st_wc_bytes;
+ public int st_wc_mbytes;
+ public int st_wcount;
+ public int st_wcount_fill;
+ public int st_scount;
+ public int st_region_wait;
+ public int st_region_nowait;
+ public int st_cur_file;
+ public int st_cur_offset;
+ public int st_disk_file;
+ public int st_disk_offset;
+ public int st_regsize;
+ public int st_maxcommitperflush;
+ public int st_mincommitperflush;
+}
+// end of DbLogStat.java
diff --git a/libdb/java/src/com/sleepycat/db/DbLogc.java b/libdb/java/src/com/sleepycat/db/DbLogc.java
new file mode 100644
index 0000000..f9d0e4f
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbLogc.java
@@ -0,0 +1,39 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class DbLogc
+{
+ // methods
+ //
+ public native void close(int flags)
+ throws DbException;
+
+ // returns: 0, DB_NOTFOUND, or throws error
+ public native int get(DbLsn lsn, Dbt data, int flags)
+ throws DbException;
+
+ protected native void finalize()
+ throws Throwable;
+
+ // private data
+ //
+ private long private_dbobj_ = 0;
+
+ static {
+ Db.load_db();
+ }
+}
+
+// end of DbLogc.java
diff --git a/libdb/java/src/com/sleepycat/db/DbLsn.java b/libdb/java/src/com/sleepycat/db/DbLsn.java
new file mode 100644
index 0000000..eeec1dc
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbLsn.java
@@ -0,0 +1,42 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class DbLsn
+{
+ // methods
+ //
+ public DbLsn()
+ {
+ init_lsn();
+ }
+
+ protected native void finalize()
+ throws Throwable;
+
+ private native void init_lsn();
+
+ // get/set methods
+ //
+
+ // private data
+ //
+ private long private_dbobj_ = 0;
+
+ static {
+ Db.load_db();
+ }
+}
+
+// end of DbLsn.java
diff --git a/libdb/java/src/com/sleepycat/db/DbMemoryException.java b/libdb/java/src/com/sleepycat/db/DbMemoryException.java
new file mode 100644
index 0000000..a996678
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbMemoryException.java
@@ -0,0 +1,49 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+public class DbMemoryException extends DbException
+{
+ // methods
+ //
+
+ public DbMemoryException(String s)
+ {
+ super(s);
+ }
+
+ public DbMemoryException(String s, int errno)
+ {
+ super(s, errno);
+ }
+
+ public void set_dbt(Dbt dbt)
+ {
+ this.dbt = dbt;
+ }
+
+ public Dbt get_dbt()
+ {
+ return dbt;
+ }
+
+ /* Override of DbException.toString():
+ * the extra verbage that comes from DbEnv.strerror(ENOMEM)
+ * is not helpful.
+ */
+ public String toString()
+ {
+ return getMessage();
+ }
+
+ Dbt dbt = null;
+}
+
+// end of DbMemoryException.java
diff --git a/libdb/java/src/com/sleepycat/db/DbMpoolFStat.java b/libdb/java/src/com/sleepycat/db/DbMpoolFStat.java
new file mode 100644
index 0000000..cc03b56
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbMpoolFStat.java
@@ -0,0 +1,16 @@
+/* DO NOT EDIT: automatically built by dist/s_java. */
+
+package com.sleepycat.db;
+
+public class DbMpoolFStat
+{
+ public String file_name;
+ public int st_pagesize;
+ public int st_map;
+ public int st_cache_hit;
+ public int st_cache_miss;
+ public int st_page_create;
+ public int st_page_in;
+ public int st_page_out;
+}
+// end of DbMpoolFStat.java
diff --git a/libdb/java/src/com/sleepycat/db/DbMpoolStat.java b/libdb/java/src/com/sleepycat/db/DbMpoolStat.java
new file mode 100644
index 0000000..03fc2d1
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbMpoolStat.java
@@ -0,0 +1,38 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This is filled in and returned by the
+ * DbMpool.stat() method.
+ */
+public class DbMpoolStat
+{
+ public int st_cache_hit; // Pages found in the cache.
+ public int st_cache_miss; // Pages not found in the cache.
+ public int st_map; // Pages from mapped files.
+ public int st_page_create; // Pages created in the cache.
+ public int st_page_in; // Pages read in.
+ public int st_page_out; // Pages written out.
+ public int st_ro_evict; // Clean pages forced from the cache.
+ public int st_rw_evict; // Dirty pages forced from the cache.
+ public int st_hash_buckets; // Number of hash buckets.
+ public int st_hash_searches; // Total hash chain searches.
+ public int st_hash_longest; // Longest hash chain searched.
+ public int st_hash_examined; // Total hash entries searched.
+ public int st_page_clean; // Clean pages.
+ public int st_page_dirty; // Dirty pages.
+ public int st_page_trickle; // Pages written by memp_trickle.
+ public int st_region_wait; // Region lock granted after wait.
+ public int st_region_nowait; // Region lock granted without wait.
+ public int st_regsize; // Region size.
+}
+
+// end of DbMpoolStat.java
diff --git a/libdb/java/src/com/sleepycat/db/DbMultipleDataIterator.java b/libdb/java/src/com/sleepycat/db/DbMultipleDataIterator.java
new file mode 100644
index 0000000..cbfbe91
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbMultipleDataIterator.java
@@ -0,0 +1,46 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author David M. Krinsky
+ */
+public class DbMultipleDataIterator extends DbMultipleIterator
+{
+ // public methods
+ public DbMultipleDataIterator(Dbt data)
+ {
+ super(data);
+ }
+
+ public boolean next(Dbt data)
+ {
+ int dataoff = DbUtil.array2int(buf, pos);
+
+ // crack out the data offset and length.
+ if (dataoff < 0) {
+ return (false);
+ }
+
+ pos -= int32sz;
+ int datasz = DbUtil.array2int(buf, pos);
+
+ pos -= int32sz;
+
+ data.set_data(buf);
+ data.set_size(datasz);
+ data.set_offset(dataoff);
+
+ return (true);
+ }
+}
+
+// end of DbMultipleDataIterator.java
diff --git a/libdb/java/src/com/sleepycat/db/DbMultipleIterator.java b/libdb/java/src/com/sleepycat/db/DbMultipleIterator.java
new file mode 100644
index 0000000..c81ecf9
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbMultipleIterator.java
@@ -0,0 +1,51 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author David M. Krinsky
+ */
+// DbMultipleIterator is a shared package-private base class for the three
+// types of bulk-return Iterator; it should never be instantiated directly,
+// but it handles the functionality shared by its subclasses.
+class DbMultipleIterator
+{
+ // Package-private methods and members: used by our subclasses.
+
+ // Called implicitly by the subclass
+ DbMultipleIterator(Dbt data)
+ {
+ buf = data.get_data();
+ size = data.get_ulen();
+ // The offset will always be zero from the front of the buffer
+ // DB returns, and the buffer is opaque, so don't bother
+ // handling an offset.
+
+ // The initial position is pointing at the last u_int32_t
+ // in the buffer.
+ pos = size - int32sz;
+ }
+
+ // The C macros use sizeof(u_int32_t). Fortunately, java ints
+ // are always four bytes. Make this a constant just for form's sake.
+ static final int int32sz = 4;
+
+ // Current position within the buffer; equivalent to "pointer"
+ // in the DB_MULTIPLE macros.
+ int pos;
+
+ // A reference to the encoded buffer returned from the original
+ // Db/Dbc.get call on the data Dbt, and its size.
+ byte[] buf;
+ int size;
+}
+
+// end of DbMultipleIterator.java
diff --git a/libdb/java/src/com/sleepycat/db/DbMultipleKeyDataIterator.java b/libdb/java/src/com/sleepycat/db/DbMultipleKeyDataIterator.java
new file mode 100644
index 0000000..3085c11
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbMultipleKeyDataIterator.java
@@ -0,0 +1,56 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author David M. Krinsky
+ */
+public class DbMultipleKeyDataIterator extends DbMultipleIterator
+{
+ // public methods
+ public DbMultipleKeyDataIterator(Dbt data)
+ {
+ super(data);
+ }
+
+ public boolean next(Dbt key, Dbt data)
+ {
+ int keyoff = DbUtil.array2int(buf, pos);
+
+ // crack out the key and data offsets and lengths.
+ if (keyoff < 0) {
+ return (false);
+ }
+
+ pos -= int32sz;
+ int keysz = DbUtil.array2int(buf, pos);
+
+ pos -= int32sz;
+ int dataoff = DbUtil.array2int(buf, pos);
+
+ pos -= int32sz;
+ int datasz = DbUtil.array2int(buf, pos);
+
+ pos -= int32sz;
+
+ key.set_data(buf);
+ key.set_size(keysz);
+ key.set_offset(keyoff);
+
+ data.set_data(buf);
+ data.set_size(datasz);
+ data.set_offset(dataoff);
+
+ return (true);
+ }
+}
+
+// end of DbMultipleKeyDataIterator.java
diff --git a/libdb/java/src/com/sleepycat/db/DbMultipleRecnoDataIterator.java b/libdb/java/src/com/sleepycat/db/DbMultipleRecnoDataIterator.java
new file mode 100644
index 0000000..7611664
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbMultipleRecnoDataIterator.java
@@ -0,0 +1,51 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author David M. Krinsky
+ */
+public class DbMultipleRecnoDataIterator extends DbMultipleIterator
+{
+ // public methods
+ public DbMultipleRecnoDataIterator(Dbt data)
+ {
+ super(data);
+ }
+
+ public boolean next(Dbt key, Dbt data)
+ {
+ int keyoff = DbUtil.array2int(buf, pos);
+
+ // crack out the key offset and the data offset and length.
+ if (keyoff < 0) {
+ return (false);
+ }
+
+ pos -= int32sz;
+ int dataoff = DbUtil.array2int(buf, pos);
+
+ pos -= int32sz;
+ int datasz = DbUtil.array2int(buf, pos);
+
+ pos -= int32sz;
+
+ key.set_recno_key_from_buffer(buf, keyoff);
+
+ data.set_data(buf);
+ data.set_size(datasz);
+ data.set_offset(dataoff);
+
+ return (true);
+ }
+}
+
+// end of DbMultipleRecnoDataIterator.java
diff --git a/libdb/java/src/com/sleepycat/db/DbOutputStreamErrcall.java b/libdb/java/src/com/sleepycat/db/DbOutputStreamErrcall.java
new file mode 100644
index 0000000..b2a472e
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbOutputStreamErrcall.java
@@ -0,0 +1,58 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+import java.io.OutputStream;
+import java.io.IOException;
+
+/**
+ *
+ * @author Donald D. Anderson
+ *
+ * This class is not public, as it is only used internally
+ * by Db to implement a default error handler.
+ */
+
+/*package*/ class DbOutputStreamErrcall implements DbErrcall
+{
+ DbOutputStreamErrcall(OutputStream stream)
+ {
+ this.stream_ = stream;
+ }
+
+ // errcall implements DbErrcall
+ //
+ public void errcall(String prefix, String buffer)
+ {
+ try {
+ if (prefix != null) {
+ stream_.write(prefix.getBytes());
+ stream_.write((new String(": ")).getBytes());
+ }
+ stream_.write(buffer.getBytes());
+ stream_.write((new String("\n")).getBytes());
+ }
+ catch (IOException e) {
+
+ // well, we tried.
+ // Do our best to report the problem by other means.
+ //
+ System.err.println("DbOutputStreamErrcall Exception: " + e);
+ if (prefix != null)
+ System.err.print(prefix + ": ");
+ System.err.println(buffer + "\n");
+ }
+ }
+
+ // private data
+ //
+ private OutputStream stream_;
+}
+
+// end of DbOutputStreamErrcall.java
diff --git a/libdb/java/src/com/sleepycat/db/DbPreplist.java b/libdb/java/src/com/sleepycat/db/DbPreplist.java
new file mode 100644
index 0000000..477d248
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbPreplist.java
@@ -0,0 +1,22 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This is filled in and returned by the
+ * DbEnv.txn_recover() method.
+ */
+public class DbPreplist
+{
+ public DbTxn txn;
+ public byte gid[];
+}
+
+// end of DbPreplist.java
diff --git a/libdb/java/src/com/sleepycat/db/DbQueueStat.java b/libdb/java/src/com/sleepycat/db/DbQueueStat.java
new file mode 100644
index 0000000..67d229a
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbQueueStat.java
@@ -0,0 +1,21 @@
+/* DO NOT EDIT: automatically built by dist/s_java. */
+
+package com.sleepycat.db;
+
+public class DbQueueStat
+{
+ public int qs_magic;
+ public int qs_version;
+ public int qs_metaflags;
+ public int qs_nkeys;
+ public int qs_ndata;
+ public int qs_pagesize;
+ public int qs_extentsize;
+ public int qs_pages;
+ public int qs_re_len;
+ public int qs_re_pad;
+ public int qs_pgfree;
+ public int qs_first_recno;
+ public int qs_cur_recno;
+}
+// end of DbQueueStat.java
diff --git a/libdb/java/src/com/sleepycat/db/DbRepStat.java b/libdb/java/src/com/sleepycat/db/DbRepStat.java
new file mode 100644
index 0000000..953d10e
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbRepStat.java
@@ -0,0 +1,43 @@
+/* DO NOT EDIT: automatically built by dist/s_java. */
+
+package com.sleepycat.db;
+
+public class DbRepStat
+{
+ public int st_status;
+ public DbLsn st_next_lsn;
+ public DbLsn st_waiting_lsn;
+ public int st_dupmasters;
+ public int st_env_id;
+ public int st_env_priority;
+ public int st_gen;
+ public int st_log_duplicated;
+ public int st_log_queued;
+ public int st_log_queued_max;
+ public int st_log_queued_total;
+ public int st_log_records;
+ public int st_log_requested;
+ public int st_master;
+ public int st_master_changes;
+ public int st_msgs_badgen;
+ public int st_msgs_processed;
+ public int st_msgs_recover;
+ public int st_msgs_send_failures;
+ public int st_msgs_sent;
+ public int st_newsites;
+ public int st_nsites;
+ public int st_nthrottles;
+ public int st_outdated;
+ public int st_txns_applied;
+ public int st_elections;
+ public int st_elections_won;
+ public int st_election_cur_winner;
+ public int st_election_gen;
+ public DbLsn st_election_lsn;
+ public int st_election_nsites;
+ public int st_election_priority;
+ public int st_election_status;
+ public int st_election_tiebreaker;
+ public int st_election_votes;
+}
+// end of DbRepStat.java
diff --git a/libdb/java/src/com/sleepycat/db/DbRepTransport.java b/libdb/java/src/com/sleepycat/db/DbRepTransport.java
new file mode 100644
index 0000000..d2c71c0
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbRepTransport.java
@@ -0,0 +1,19 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This is used as a callback by DbEnv.set_rep_transport.
+ */
+public interface DbRepTransport
+{
+ public int send(DbEnv env, Dbt control, Dbt rec, int flags, int envid)
+ throws DbException;
+}
diff --git a/libdb/java/src/com/sleepycat/db/DbRunRecoveryException.java b/libdb/java/src/com/sleepycat/db/DbRunRecoveryException.java
new file mode 100644
index 0000000..76e9a63
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbRunRecoveryException.java
@@ -0,0 +1,32 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class DbRunRecoveryException extends DbException
+{
+ // methods
+ //
+
+ public DbRunRecoveryException(String s)
+ {
+ super(s);
+ }
+
+ public DbRunRecoveryException(String s, int errno)
+ {
+ super(s, errno);
+ }
+}
+
+// end of DbRunRecoveryException.java
diff --git a/libdb/java/src/com/sleepycat/db/DbSecondaryKeyCreate.java b/libdb/java/src/com/sleepycat/db/DbSecondaryKeyCreate.java
new file mode 100644
index 0000000..f8b8179
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbSecondaryKeyCreate.java
@@ -0,0 +1,22 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This is used as a callback by Db.associate.
+ */
+public interface DbSecondaryKeyCreate
+{
+ public int secondary_key_create(Db secondary, Dbt key,
+ Dbt data, Dbt result)
+ throws DbException;
+}
+
+// end of DbSecondaryKeyCreate.java
diff --git a/libdb/java/src/com/sleepycat/db/DbTxn.java b/libdb/java/src/com/sleepycat/db/DbTxn.java
new file mode 100644
index 0000000..01db938
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbTxn.java
@@ -0,0 +1,77 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class DbTxn
+{
+ // methods
+ //
+ public native void abort()
+ throws DbException;
+
+ public native void commit(int flags)
+ throws DbException;
+
+ public native void discard(int flags)
+ throws DbException;
+
+ public native /*u_int32_t*/ int id()
+ throws DbException;
+
+ public native void prepare(byte[] gid)
+ throws DbException;
+
+ public native void set_timeout(/*db_timeout_t*/ long timeout,
+ /*u_int32_t*/ int flags)
+ throws DbException;
+
+ // We override Object.equals because it is possible for
+ // the Java API to create multiple DbTxns that reference
+ // the same underlying object. This can happen for example
+ // during DbEnv.txn_recover().
+ //
+ public boolean equals(Object obj)
+ {
+ if (this == obj)
+ return true;
+
+ if (obj != null && (obj instanceof DbTxn)) {
+ DbTxn that = (DbTxn)obj;
+ return (this.private_dbobj_ == that.private_dbobj_);
+ }
+ return false;
+ }
+
+ // We must override Object.hashCode whenever we override
+ // Object.equals() to enforce the maxim that equal objects
+ // have the same hashcode.
+ //
+ public int hashCode()
+ {
+ return ((int)private_dbobj_ ^ (int)(private_dbobj_ >> 32));
+ }
+
+ // get/set methods
+ //
+
+ // private data
+ //
+ private long private_dbobj_ = 0;
+
+ static {
+ Db.load_db();
+ }
+}
+
+// end of DbTxn.java
diff --git a/libdb/java/src/com/sleepycat/db/DbTxnStat.java b/libdb/java/src/com/sleepycat/db/DbTxnStat.java
new file mode 100644
index 0000000..78794ae
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbTxnStat.java
@@ -0,0 +1,27 @@
+/* DO NOT EDIT: automatically built by dist/s_java. */
+
+package com.sleepycat.db;
+
+public class DbTxnStat
+{
+ public static class Active {
+ public int txnid;
+ public int parentid;
+ public DbLsn lsn;
+ };
+ public DbLsn st_last_ckp;
+ public long st_time_ckp;
+ public int st_last_txnid;
+ public int st_maxtxns;
+ public int st_naborts;
+ public int st_nbegins;
+ public int st_ncommits;
+ public int st_nactive;
+ public int st_nrestores;
+ public int st_maxnactive;
+ public Active st_txnarray[];
+ public int st_region_wait;
+ public int st_region_nowait;
+ public int st_regsize;
+}
+// end of DbTxnStat.java
diff --git a/libdb/java/src/com/sleepycat/db/DbUtil.java b/libdb/java/src/com/sleepycat/db/DbUtil.java
new file mode 100644
index 0000000..86af711
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/DbUtil.java
@@ -0,0 +1,98 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author David M. Krinsky
+ */
+
+// DbUtil is a simple, package-private wrapper class that holds a few
+// static utility functions other parts of the package share and that don't
+// have a good home elsewhere. (For now, that's limited to byte-array-to-int
+// conversion and back.)
+
+class DbUtil
+{
+ // Get the u_int32_t stored beginning at offset "offset" into
+ // array "arr". We have to do the conversion manually since it's
+ // a C-native int, and we're not really supposed to make this kind of
+ // cast in Java.
+ static int array2int(byte[] arr, int offset)
+ {
+ int b1, b2, b3, b4;
+ int pos = offset;
+
+ // Get the component bytes; b4 is most significant, b1 least.
+ if (big_endian) {
+ b4 = arr[pos++];
+ b3 = arr[pos++];
+ b2 = arr[pos++];
+ b1 = arr[pos];
+ } else {
+ b1 = arr[pos++];
+ b2 = arr[pos++];
+ b3 = arr[pos++];
+ b4 = arr[pos];
+ }
+
+ // Bytes are signed. Convert [-128, -1] to [128, 255].
+ if (b1 < 0) { b1 += 256; }
+ if (b2 < 0) { b2 += 256; }
+ if (b3 < 0) { b3 += 256; }
+ if (b4 < 0) { b4 += 256; }
+
+ // Put the bytes in their proper places in an int.
+ b2 <<= 8;
+ b3 <<= 16;
+ b4 <<= 24;
+
+ // Return their sum.
+ return (b1 + b2 + b3 + b4);
+ }
+
+ // Store the specified u_int32_t, with endianness appropriate
+ // to the platform we're running on, into four consecutive bytes of
+ // the specified byte array, starting from the specified offset.
+ static void int2array(int n, byte[] arr, int offset)
+ {
+ int b1, b2, b3, b4;
+ int pos = offset;
+
+ b1 = n & 0xff;
+ b2 = (n >> 8) & 0xff;
+ b3 = (n >> 16) & 0xff;
+ b4 = (n >> 24) & 0xff;
+
+ // Bytes are signed. Convert [128, 255] to [-128, -1].
+ if (b1 >= 128) { b1 -= 256; }
+ if (b2 >= 128) { b2 -= 256; }
+ if (b3 >= 128) { b3 -= 256; }
+ if (b4 >= 128) { b4 -= 256; }
+
+ // Put the bytes in the appropriate place in the array.
+ if (big_endian) {
+ arr[pos++] = (byte)b4;
+ arr[pos++] = (byte)b3;
+ arr[pos++] = (byte)b2;
+ arr[pos] = (byte)b1;
+ } else {
+ arr[pos++] = (byte)b1;
+ arr[pos++] = (byte)b2;
+ arr[pos++] = (byte)b3;
+ arr[pos] = (byte)b4;
+ }
+ }
+
+ private static final boolean big_endian = is_big_endian();
+ private static native boolean is_big_endian();
+}
+
+// end of DbUtil.java
diff --git a/libdb/java/src/com/sleepycat/db/Dbc.java b/libdb/java/src/com/sleepycat/db/Dbc.java
new file mode 100644
index 0000000..6d3e207
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/Dbc.java
@@ -0,0 +1,60 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class Dbc
+{
+ // methods
+ //
+ public native void close()
+ throws DbException;
+
+ public native int count(int flags)
+ throws DbException;
+
+ // returns: 0, DB_KEYEMPTY, or throws error
+ public native int del(int flags)
+ throws DbException;
+
+ public native Dbc dup(int flags)
+ throws DbException;
+
+ // returns: 0, DB_NOTFOUND, or throws error
+ public native int get(Dbt key, Dbt data, int flags)
+ throws DbException;
+
+ // returns: 0, DB_NOTFOUND, or throws error
+ public native int pget(Dbt key, Dbt pkey, Dbt data, int flags)
+ throws DbException;
+
+ // returns: 0, DB_KEYEXIST, or throws error
+ public native int put(Dbt key, Dbt data, int flags)
+ throws DbException;
+
+ protected native void finalize()
+ throws Throwable;
+
+ // get/set methods
+ //
+
+ // private data
+ //
+ private long private_dbobj_ = 0;
+
+ static {
+ Db.load_db();
+ }
+}
+
+// end of Dbc.java
diff --git a/libdb/java/src/com/sleepycat/db/Dbt.java b/libdb/java/src/com/sleepycat/db/Dbt.java
new file mode 100644
index 0000000..ed4fac1
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/Dbt.java
@@ -0,0 +1,230 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class Dbt
+{
+ // methods
+ //
+
+ public Dbt(byte[] data)
+ {
+ init();
+ this.data = data;
+ if (data != null) {
+ this.size = data.length;
+ }
+ }
+
+ public Dbt(byte[] data, int off, int len)
+ {
+ init();
+ this.data = data;
+ this.offset = off;
+ this.size = len;
+ }
+
+ public Dbt()
+ {
+ init();
+ }
+
+ public Dbt(Object serialobj) throws java.io.IOException
+ {
+ init();
+ this.set_object(serialobj);
+ }
+
+ protected native void finalize()
+ throws Throwable;
+
+ // get/set methods
+ //
+
+ // key/data
+ public byte[] get_data()
+ {
+ // In certain circumstances, like callbacks to
+ // user code that have Dbt args, we do not create
+ // data arrays until the user explicitly does a get_data.
+ // This saves us from needlessly creating objects
+ // (potentially large arrays) that may never be accessed.
+ //
+ if (must_create_data) {
+ data = create_data();
+ must_create_data = false;
+ }
+ return data;
+ }
+
+ public void set_data(byte[] data)
+ {
+ this.data = data;
+ this.must_create_data = false;
+ }
+
+
+ // get_offset/set_offset is unique to the Java portion
+ // of the DB APIs. They can be used to get/set the offset
+ // into the attached byte array.
+ //
+ public int get_offset()
+ {
+ return offset;
+ }
+
+ public void set_offset(int offset)
+ {
+ this.offset = offset;
+ }
+
+ // key/data length
+ public /*u_int32_t*/ int get_size()
+ {
+ return size;
+ }
+
+ public void set_size(/*u_int32_t*/ int size)
+ {
+ this.size = size;
+ }
+
+ // RO: length of user buffer.
+ public /*u_int32_t*/ int get_ulen()
+ {
+ return ulen;
+ }
+
+ public void set_ulen(/*u_int32_t*/ int ulen)
+ {
+ this.ulen = ulen;
+ }
+
+
+ // RO: get/put record length.
+ public /*u_int32_t*/ int get_dlen()
+ {
+ return dlen;
+ }
+
+ public void set_dlen(/*u_int32_t*/ int dlen)
+ {
+ this.dlen = dlen;
+ }
+
+ // RO: get/put record offset.
+ public /*u_int32_t*/ int get_doff()
+ {
+ return doff;
+ }
+
+ public void set_doff(/*u_int32_t*/ int doff)
+ {
+ this.doff = doff;
+ }
+
+ // flags
+ public /*u_int32_t*/ int get_flags()
+ {
+ return flags;
+ }
+
+ public void set_flags(/*u_int32_t*/ int flags)
+ {
+ this.flags = flags;
+ }
+
+ // Helper methods to get/set a Dbt from a serializable object.
+ public Object get_object() throws java.io.IOException,
+ java.lang.ClassNotFoundException
+ {
+ ByteArrayInputStream bytestream = new ByteArrayInputStream(get_data());
+ ObjectInputStream ois = new ObjectInputStream(bytestream);
+ Object serialobj = ois.readObject();
+ ois.close();
+ bytestream.close();
+ return (serialobj);
+ }
+
+ public void set_object(Object serialobj) throws java.io.IOException
+ {
+ ByteArrayOutputStream bytestream = new ByteArrayOutputStream();
+ ObjectOutputStream oos = new ObjectOutputStream(bytestream);
+ oos.writeObject(serialobj);
+ oos.close();
+ byte[] buf = bytestream.toByteArray();
+ bytestream.close();
+ set_data(buf);
+ set_offset(0);
+ set_size(buf.length);
+ }
+
+ // These are not in the original DB interface.
+ // They can be used to set the recno key for a Dbt.
+ // Note: if data is less than (offset + 4) bytes, these
+ // methods may throw an ArrayIndexException. get_recno_key_data()
+ // will additionally throw a NullPointerException if data is null.
+ public void set_recno_key_data(int recno)
+ {
+ if (data == null) {
+ data = new byte[4];
+ size = 4;
+ offset = 0;
+ }
+ DbUtil.int2array(recno, data, offset);
+ }
+
+ public int get_recno_key_data()
+ {
+ return (DbUtil.array2int(data, offset));
+ }
+
+ // Used internally by DbMultipleRecnoIterator
+ //
+ /*package*/ void set_recno_key_from_buffer(byte[] data, int offset)
+ {
+ this.data = data;
+ this.offset = offset;
+ this.size = 4;
+ }
+
+ static {
+ Db.load_db();
+ }
+
+ // private methods
+ //
+ private native void init();
+ private native byte[] create_data();
+
+ // private data
+ //
+ private long private_dbobj_ = 0;
+
+ private byte[] data = null;
+ private int offset = 0;
+ private int size = 0;
+ private int ulen = 0;
+ private int dlen = 0;
+ private int doff = 0;
+ private int flags = 0;
+ private boolean must_create_data = false;
+}
+
+// end of Dbt.java
diff --git a/libdb/java/src/com/sleepycat/db/xa/DbXAResource.java b/libdb/java/src/com/sleepycat/db/xa/DbXAResource.java
new file mode 100644
index 0000000..26bbdb5
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/xa/DbXAResource.java
@@ -0,0 +1,190 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db.xa;
+
+import com.sleepycat.db.Db;
+import com.sleepycat.db.DbEnv;
+import com.sleepycat.db.DbTxn;
+import javax.transaction.xa.XAResource;
+import javax.transaction.xa.XAException;
+import javax.transaction.xa.Xid;
+
+public class DbXAResource implements XAResource
+{
+ public DbXAResource(String home, int rmid, int flags)
+ throws XAException
+ {
+ this.home = home;
+ this.rmid = rmid;
+
+ // We force single-threading for calls to _init/_close.
+ // This makes our internal code much easier, and
+ // should not be a performance burden.
+ synchronized (DbXAResource.class) {
+ _init(home, rmid, flags);
+ }
+ }
+
+ //
+ // Alternate constructor for convenience.
+ // Uses an rmid that is unique within this JVM,
+ // numbered started at 0.
+ //
+ public DbXAResource(String home)
+ throws XAException
+ {
+ this(home, get_unique_rmid(), 0);
+ }
+
+ private native void _init(String home, int rmid, int flags);
+
+ public void close(int flags)
+ throws XAException
+ {
+ // We force single-threading for calls to _init/_close.
+ // This makes our internal code much easier, and
+ // should not be a performance burden.
+ synchronized (DbXAResource.class) {
+ _close(home, rmid, flags);
+ }
+ }
+
+ private native void _close(String home, int rmid, int flags);
+
+ public void commit(Xid xid, boolean onePhase)
+ throws XAException
+ {
+ _commit(xid, rmid, onePhase);
+ }
+
+ private native void _commit(Xid xid, int rmid, boolean onePhase);
+
+ public void end(Xid xid, int flags)
+ throws XAException
+ {
+ _end(xid, rmid, flags);
+ }
+
+ private native void _end(Xid xid, int rmid, int flags);
+
+ public void forget(Xid xid)
+ throws XAException
+ {
+ _forget(xid, rmid);
+ }
+
+ private native void _forget(Xid xid, int rmid);
+
+ public int getTransactionTimeout()
+ throws XAException
+ {
+ return transactionTimeout;
+ }
+
+ public boolean isSameRM(XAResource xares)
+ throws XAException
+ {
+ if (!(xares instanceof DbXAResource))
+ return false;
+ return (this.rmid == ((DbXAResource)xares).rmid);
+ }
+
+ public int prepare(Xid xid)
+ throws XAException
+ {
+ return _prepare(xid, rmid);
+ }
+
+ private native int _prepare(Xid xid, int rmid);
+
+ public Xid [] recover(int flag)
+ throws XAException
+ {
+ return _recover(rmid, flag);
+ }
+
+ private native Xid[] _recover(int rmid, int flags);
+
+ public void rollback(Xid xid)
+ throws XAException
+ {
+ _rollback(xid, rmid);
+ System.err.println("DbXAResource.rollback returned");
+ }
+
+ private native void _rollback(Xid xid, int rmid);
+
+ public boolean setTransactionTimeout(int seconds)
+ throws XAException
+ {
+ // XXX we are not using the transaction timeout.
+ transactionTimeout = seconds;
+ return true;
+ }
+
+ public void start(Xid xid, int flags)
+ throws XAException
+ {
+ _start(xid, rmid, flags);
+ }
+
+ private native void _start(Xid xid, int rmid, int flags);
+
+ private static synchronized int get_unique_rmid()
+ {
+ return unique_rmid++;
+ }
+
+ public interface DbAttach
+ {
+ public DbEnv get_env();
+ public DbTxn get_txn();
+ }
+
+ protected static class DbAttachImpl implements DbAttach
+ {
+ private DbEnv env;
+ private DbTxn txn;
+
+ DbAttachImpl(DbEnv env, DbTxn txn)
+ {
+ this.env = env;
+ this.txn = txn;
+ }
+
+ public DbTxn get_txn()
+ {
+ return txn;
+ }
+
+ public DbEnv get_env()
+ {
+ return env;
+ }
+ }
+
+ public static native DbAttach xa_attach(Xid xid, Integer rmid);
+
+ ////////////////////////////////////////////////////////////////
+ //
+ // private data
+ //
+ private long private_dbobj_ = 0;
+ private int transactionTimeout = 0;
+ private String home;
+ private int rmid;
+
+ private static int unique_rmid = 0;
+
+ static
+ {
+ Db.load_db();
+ }
+}
diff --git a/libdb/java/src/com/sleepycat/db/xa/DbXid.java b/libdb/java/src/com/sleepycat/db/xa/DbXid.java
new file mode 100644
index 0000000..64a2469
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/db/xa/DbXid.java
@@ -0,0 +1,49 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db.xa;
+
+import com.sleepycat.db.DbException;
+import com.sleepycat.db.DbTxn;
+import javax.transaction.xa.XAException;
+import javax.transaction.xa.Xid;
+
+public class DbXid implements Xid
+{
+ public DbXid(int formatId, byte[] gtrid, byte[] bqual)
+ throws XAException
+ {
+ this.formatId = formatId;
+ this.gtrid = gtrid;
+ this.bqual = bqual;
+ }
+
+ public int getFormatId()
+ {
+ return formatId;
+ }
+
+ public byte[] getGlobalTransactionId()
+ {
+ return gtrid;
+ }
+
+ public byte[] getBranchQualifier()
+ {
+ return bqual;
+ }
+
+ ////////////////////////////////////////////////////////////////
+ //
+ // private data
+ //
+ private byte[] gtrid;
+ private byte[] bqual;
+ private int formatId;
+}
diff --git a/libdb/java/src/com/sleepycat/examples/AccessExample.java b/libdb/java/src/com/sleepycat/examples/AccessExample.java
new file mode 100644
index 0000000..858cb4d
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/examples/AccessExample.java
@@ -0,0 +1,181 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.examples;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+class AccessExample
+{
+ private static final String FileName = "access.db";
+
+ public AccessExample()
+ {
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ AccessExample app = new AccessExample();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("AccessExample: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("AccessExample: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(InputStreamReader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(InputStreamReader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("AccessExample");
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ InputStreamReader reader = new InputStreamReader(System.in);
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ byte[] data = value.getBytes();
+ set_data(data);
+ set_size(data.length);
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+}
diff --git a/libdb/java/src/com/sleepycat/examples/BtRecExample.java b/libdb/java/src/com/sleepycat/examples/BtRecExample.java
new file mode 100644
index 0000000..74938b1
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/examples/BtRecExample.java
@@ -0,0 +1,340 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.examples;
+
+import com.sleepycat.db.*;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class BtRecExample
+{
+ static final String progname = "BtRecExample"; // Program name.
+ static final String database = "access.db";
+ static final String wordlist = "../test/wordlist";
+
+ BtRecExample(BufferedReader reader)
+ throws DbException, IOException, FileNotFoundException
+ {
+ int ret;
+
+ // Remove the previous database.
+ File f = new File(database);
+ f.delete();
+
+ dbp = new Db(null, 0);
+
+ dbp.set_error_stream(System.err);
+ dbp.set_errpfx(progname);
+ dbp.set_pagesize(1024); // 1K page sizes.
+
+ dbp.set_flags(Db.DB_RECNUM); // Record numbers.
+ dbp.open(null, database, null, Db.DB_BTREE, Db.DB_CREATE, 0664);
+
+ //
+ // Insert records into the database, where the key is the word
+ // preceded by its record number, and the data is the same, but
+ // in reverse order.
+ //
+
+ for (int cnt = 1; cnt <= 1000; ++cnt) {
+ String numstr = String.valueOf(cnt);
+ while (numstr.length() < 4)
+ numstr = "0" + numstr;
+ String buf = numstr + '_' + reader.readLine();
+ StringBuffer rbuf = new StringBuffer(buf).reverse();
+
+ StringDbt key = new StringDbt(buf);
+ StringDbt data = new StringDbt(rbuf.toString());
+
+ if ((ret = dbp.put(null, key, data, Db.DB_NOOVERWRITE)) != 0) {
+ if (ret != Db.DB_KEYEXIST)
+ throw new DbException("Db.put failed" + ret);
+ }
+ }
+ }
+
+ void run()
+ throws DbException
+ {
+ int recno;
+ int ret;
+
+ // Acquire a cursor for the database.
+ dbcp = dbp.cursor(null, 0);
+
+ //
+ // Prompt the user for a record number, then retrieve and display
+ // that record.
+ //
+ InputStreamReader reader = new InputStreamReader(System.in);
+
+ for (;;) {
+ // Get a record number.
+ String line = askForLine(reader, System.out, "recno #> ");
+ if (line == null)
+ break;
+
+ try {
+ recno = Integer.parseInt(line);
+ }
+ catch (NumberFormatException nfe) {
+ System.err.println("Bad record number: " + nfe);
+ continue;
+ }
+
+ //
+ // Start with a fresh key each time, the dbp.get() routine returns
+ // the key and data pair, not just the key!
+ //
+ RecnoStringDbt key = new RecnoStringDbt(recno, 100);
+ RecnoStringDbt data = new RecnoStringDbt(100);
+
+ if ((ret = dbcp.get(key, data, Db.DB_SET_RECNO)) != 0) {
+ throw new DbException("Dbc.get failed", ret);
+ }
+
+ // Display the key and data.
+ show("k/d\t", key, data);
+
+ // Move the cursor a record forward.
+ if ((ret = dbcp.get(key, data, Db.DB_NEXT)) != 0) {
+ throw new DbException("Dbc.get failed", ret);
+ }
+
+ // Display the key and data.
+ show("next\t", key, data);
+
+ RecnoStringDbt datano = new RecnoStringDbt(100);
+
+ //
+ // Retrieve the record number for the following record into
+ // local memory.
+ //
+ if ((ret = dbcp.get(key, datano, Db.DB_GET_RECNO)) != 0) {
+ if (ret != Db.DB_NOTFOUND && ret != Db.DB_KEYEMPTY) {
+ throw new DbException("Dbc.get failed", ret);
+ }
+ }
+ else {
+ recno = datano.getRecno();
+ System.out.println("retrieved recno: " + recno);
+ }
+ }
+
+ dbcp.close();
+ dbcp = null;
+ }
+
+ //
+ // Print out the number of records in the database.
+ //
+ void stats()
+ throws DbException
+ {
+ DbBtreeStat statp;
+
+ statp = (DbBtreeStat)dbp.stat(0);
+ System.out.println(progname + ": database contains " +
+ statp.bt_ndata + " records");
+ }
+
+ void show(String msg, RecnoStringDbt key, RecnoStringDbt data)
+ throws DbException
+ {
+ System.out.println(msg + key.getString() + ": " + data.getString());
+ }
+
+ public void shutdown()
+ throws DbException
+ {
+ if (dbcp != null) {
+ dbcp.close();
+ dbcp = null;
+ }
+ if (dbp != null) {
+ dbp.close(0);
+ dbp = null;
+ }
+ }
+
+ public static void main(String argv[])
+ {
+
+ try {
+ // Open the word database.
+ FileReader freader = new FileReader(wordlist);
+
+ BtRecExample app = new BtRecExample(new BufferedReader(freader));
+
+ // Close the word database.
+ freader.close();
+ freader = null;
+
+ app.stats();
+ app.run();
+ } catch (FileNotFoundException fnfe) {
+ System.err.println(progname + ": unexpected open error " + fnfe);
+ System.exit (1);
+ } catch (IOException ioe) {
+ System.err.println(progname + ": open " + wordlist + ": " + ioe);
+ System.exit (1);
+ } catch (DbException dbe) {
+ System.err.println("Exception: " + dbe);
+ System.exit(dbe.get_errno());
+ }
+
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(InputStreamReader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(InputStreamReader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ private Dbc dbcp;
+ private Db dbp;
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings.
+ // We've declared it as a static inner class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt(byte[] arr)
+ {
+ set_flags(Db.DB_DBT_USERMEM);
+ set_data(arr);
+ set_size(arr.length);
+ }
+
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ byte[] data = value.getBytes();
+ set_data(data);
+ set_size(data.length);
+ // must set ulen because sometimes a string is returned
+ set_ulen(data.length);
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+
+ // Here's an example of how you can extend a Dbt to store
+ // (potentially) both recno's and strings in the same
+ // structure.
+ //
+ static /*inner*/
+ class RecnoStringDbt extends Dbt
+ {
+ RecnoStringDbt(int maxsize)
+ {
+ this(0, maxsize); // let other constructor do most of the work
+ }
+
+ RecnoStringDbt(int value, int maxsize)
+ {
+ set_flags(Db.DB_DBT_USERMEM); // do not allocate on retrieval
+ arr = new byte[maxsize];
+ set_data(arr); // use our local array for data
+ set_ulen(maxsize); // size of return storage
+ setRecno(value);
+ }
+
+ RecnoStringDbt(String value, int maxsize)
+ {
+ set_flags(Db.DB_DBT_USERMEM); // do not allocate on retrieval
+ arr = new byte[maxsize];
+ set_data(arr); // use our local array for data
+ set_ulen(maxsize); // size of return storage
+ setString(value);
+ }
+
+ void setRecno(int value)
+ {
+ set_recno_key_data(value);
+ set_size(arr.length);
+ }
+
+ void setString(String value)
+ {
+ byte[] data = value.getBytes();
+ set_data(data);
+ set_size(data.length);
+ }
+
+ int getRecno()
+ {
+ return get_recno_key_data();
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+
+ byte arr[];
+ }
+}
diff --git a/libdb/java/src/com/sleepycat/examples/BulkAccessExample.java b/libdb/java/src/com/sleepycat/examples/BulkAccessExample.java
new file mode 100644
index 0000000..8d2254b
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/examples/BulkAccessExample.java
@@ -0,0 +1,198 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.examples;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+class BulkAccessExample
+{
+ private static final String FileName = "access.db";
+
+ public BulkAccessExample()
+ {
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ BulkAccessExample app = new BulkAccessExample();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("BulkAccessExample: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("BulkAccessExample: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(InputStreamReader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(InputStreamReader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("BulkAccessExample");
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ InputStreamReader reader = new InputStreamReader(System.in);
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire a cursor for the table and two Dbts.
+ Dbc dbc = table.cursor(null, 0);
+ Dbt foo = new Dbt();
+ foo.set_flags(Db.DB_DBT_MALLOC);
+
+ Dbt bulk_data = new Dbt();
+
+ // Set Db.DB_DBT_USERMEM on the data Dbt; Db.DB_MULTIPLE_KEY requires
+ // it. Then allocate a byte array of a reasonable size; we'll
+ // go through the database in chunks this big.
+ bulk_data.set_flags(Db.DB_DBT_USERMEM);
+ bulk_data.set_data(new byte[1000000]);
+ bulk_data.set_ulen(1000000);
+
+
+ // Walk through the table, printing the key/data pairs.
+ //
+ while (dbc.get(foo, bulk_data, Db.DB_NEXT | Db.DB_MULTIPLE_KEY) == 0)
+ {
+ DbMultipleKeyDataIterator iterator;
+ iterator = new DbMultipleKeyDataIterator(bulk_data);
+
+ StringDbt key, data;
+ key = new StringDbt();
+ data = new StringDbt();
+
+ while (iterator.next(key, data)) {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ }
+ dbc.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ byte[] data = value.getBytes();
+ set_data(data);
+ set_size(data.length);
+ }
+
+ String getString()
+ {
+ return new String(get_data(), get_offset(), get_size());
+ }
+ }
+}
diff --git a/libdb/java/src/com/sleepycat/examples/EnvExample.java b/libdb/java/src/com/sleepycat/examples/EnvExample.java
new file mode 100644
index 0000000..e7ead01
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/examples/EnvExample.java
@@ -0,0 +1,128 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.examples;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+import java.io.OutputStream;
+
+/*
+ * An example of a program using DbEnv to configure its DB
+ * environment.
+ *
+ * For comparison purposes, this example uses a similar structure
+ * as examples/ex_env.c and examples_cxx/EnvExample.cpp.
+ */
+public class EnvExample
+{
+ private static final String progname = "EnvExample";
+ private static final String DATABASE_HOME = "/tmp/database";
+
+ private static void db_application()
+ throws DbException
+ {
+ // Do something interesting...
+ // Your application goes here.
+ }
+
+ private static void db_setup(String home, String data_dir,
+ OutputStream errs)
+ throws DbException, FileNotFoundException
+ {
+ //
+ // Create an environment object and initialize it for error
+ // reporting.
+ //
+ DbEnv dbenv = new DbEnv(0);
+ dbenv.set_error_stream(errs);
+ dbenv.set_errpfx(progname);
+
+ //
+ // We want to specify the shared memory buffer pool cachesize,
+ // but everything else is the default.
+ //
+ dbenv.set_cachesize(0, 64 * 1024, 0);
+
+ // Databases are in a subdirectory.
+ dbenv.set_data_dir(data_dir);
+
+ // Open the environment with full transactional support.
+ //
+ // open() will throw a DbException if there is an error.
+ //
+ // open is declared to throw a FileNotFoundException, which normally
+ // shouldn't occur with the DB_CREATE option.
+ //
+ dbenv.open(DATABASE_HOME,
+ Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_LOG |
+ Db.DB_INIT_MPOOL | Db.DB_INIT_TXN, 0);
+
+ try {
+
+ // Start your application.
+ db_application();
+
+ }
+ finally {
+
+ // Close the environment. Doing this in the
+ // finally block ensures it is done, even if
+ // an error is thrown.
+ //
+ dbenv.close(0);
+ }
+ }
+
+ private static void db_teardown(String home, String data_dir,
+ OutputStream errs)
+ throws DbException, FileNotFoundException
+ {
+ // Remove the shared database regions.
+
+ DbEnv dbenv = new DbEnv(0);
+
+ dbenv.set_error_stream(errs);
+ dbenv.set_errpfx(progname);
+ dbenv.set_data_dir(data_dir);
+ dbenv.remove(home, 0);
+ }
+
+ public static void main(String[] args)
+ {
+ //
+ // All of the shared database files live in /tmp/database,
+ // but data files live in /database.
+ //
+ // Using Berkeley DB in C/C++, we need to allocate two elements
+ // in the array and set config[1] to NULL. This is not
+ // necessary in Java.
+ //
+ String home = DATABASE_HOME;
+ String config = "/database/files";
+
+ try {
+ System.out.println("Setup env");
+ db_setup(home, config, System.err);
+
+ System.out.println("Teardown env");
+ db_teardown(home, config, System.err);
+ }
+ catch (DbException dbe) {
+ System.err.println(progname + ": environment open: " + dbe.toString());
+ System.exit (1);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println(progname +
+ ": unexpected open environment error " + fnfe);
+ System.exit (1);
+ }
+ }
+
+}
diff --git a/libdb/java/src/com/sleepycat/examples/LockExample.java b/libdb/java/src/com/sleepycat/examples/LockExample.java
new file mode 100644
index 0000000..181da86
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/examples/LockExample.java
@@ -0,0 +1,235 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.examples;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.Vector;
+
+//
+// An example of a program using DbLock and related classes.
+//
+class LockExample extends DbEnv
+{
+ private static final String progname = "LockExample";
+ private static final String LOCK_HOME = "TESTDIR";
+
+ public LockExample(String home, int maxlocks, boolean do_unlink)
+ throws DbException, FileNotFoundException
+ {
+ super(0);
+ if (do_unlink) {
+ remove(home, Db.DB_FORCE);
+ }
+ else {
+ set_error_stream(System.err);
+ set_errpfx("LockExample");
+ if (maxlocks != 0)
+ set_lk_max_locks(maxlocks);
+ open(home, Db.DB_CREATE|Db.DB_INIT_LOCK, 0);
+ }
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(InputStreamReader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(InputStreamReader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException
+ {
+ long held;
+ int len = 0, locker;
+ int ret;
+ boolean did_get = false;
+ int lockid = 0;
+ InputStreamReader in = new InputStreamReader(System.in);
+ Vector locks = new Vector();
+
+ //
+ // Accept lock requests.
+ //
+ locker = lock_id();
+ for (held = 0;;) {
+ String opbuf = askForLine(in, System.out,
+ "Operation get/release [get]> ");
+ if (opbuf == null)
+ break;
+
+ try {
+ if (opbuf.equals("get")) {
+ // Acquire a lock.
+ String objbuf = askForLine(in, System.out,
+ "input object (text string) to lock> ");
+ if (objbuf == null)
+ break;
+
+ String lockbuf;
+ do {
+ lockbuf = askForLine(in, System.out,
+ "lock type read/write [read]> ");
+ if (lockbuf == null)
+ break;
+ len = lockbuf.length();
+ } while (len >= 1 &&
+ !lockbuf.equals("read") &&
+ !lockbuf.equals("write"));
+
+ int lock_type;
+ if (len <= 1 || lockbuf.equals("read"))
+ lock_type = Db.DB_LOCK_READ;
+ else
+ lock_type = Db.DB_LOCK_WRITE;
+
+ Dbt dbt = new Dbt(objbuf.getBytes());
+
+ DbLock lock;
+ did_get = true;
+ lock = lock_get(locker, Db.DB_LOCK_NOWAIT,
+ dbt, lock_type);
+ lockid = locks.size();
+ locks.addElement(lock);
+ } else {
+ // Release a lock.
+ String objbuf;
+ objbuf = askForLine(in, System.out,
+ "input lock to release> ");
+ if (objbuf == null)
+ break;
+
+ lockid = Integer.parseInt(objbuf, 16);
+ if (lockid < 0 || lockid >= locks.size()) {
+ System.out.println("Lock #" + lockid + " out of range");
+ continue;
+ }
+ did_get = false;
+ DbLock lock = (DbLock)locks.elementAt(lockid);
+ lock_put(lock);
+ }
+ System.out.println("Lock #" + lockid + " " +
+ (did_get ? "granted" : "released"));
+ held += did_get ? 1 : -1;
+ }
+ catch (DbException dbe) {
+ switch (dbe.get_errno()) {
+ case Db.DB_LOCK_NOTGRANTED:
+ System.out.println("Lock not granted");
+ break;
+ case Db.DB_LOCK_DEADLOCK:
+ System.err.println("LockExample: lock_" +
+ (did_get ? "get" : "put") +
+ ": returned DEADLOCK");
+ break;
+ default:
+ System.err.println("LockExample: lock_get: " + dbe.toString());
+ }
+ }
+ }
+ System.out.println();
+ System.out.println("Closing lock region " + String.valueOf(held) +
+ " locks held");
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: LockExample [-u] [-h home] [-m maxlocks]");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ String home = LOCK_HOME;
+ boolean do_unlink = false;
+ int maxlocks = 0;
+
+ for (int i = 0; i < argv.length; ++i) {
+ if (argv[i].equals("-h")) {
+ if (++i >= argv.length)
+ usage();
+ home = argv[i];
+ }
+ else if (argv[i].equals("-m")) {
+ if (++i >= argv.length)
+ usage();
+
+ try {
+ maxlocks = Integer.parseInt(argv[i]);
+ }
+ catch (NumberFormatException nfe) {
+ usage();
+ }
+ }
+ else if (argv[i].equals("-u")) {
+ do_unlink = true;
+ }
+ else {
+ usage();
+ }
+ }
+
+ try {
+ if (do_unlink) {
+ // Create an environment that immediately
+ // removes all files.
+ LockExample tmp = new LockExample(home, maxlocks, do_unlink);
+ }
+
+ LockExample app = new LockExample(home, maxlocks, do_unlink);
+ app.run();
+ app.close(0);
+ }
+ catch (DbException dbe) {
+ System.err.println(progname + ": " + dbe.toString());
+ }
+ catch (Throwable t) {
+ System.err.println(progname + ": " + t.toString());
+ }
+ System.out.println("LockExample completed");
+ }
+}
diff --git a/libdb/java/src/com/sleepycat/examples/TpcbExample.java b/libdb/java/src/com/sleepycat/examples/TpcbExample.java
new file mode 100644
index 0000000..a7b785b
--- /dev/null
+++ b/libdb/java/src/com/sleepycat/examples/TpcbExample.java
@@ -0,0 +1,843 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.examples;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.Random;
+import java.util.GregorianCalendar;
+import java.math.BigDecimal;
+
+//
+// This program implements a basic TPC/B driver program. To create the
+// TPC/B database, run with the -i (init) flag. The number of records
+// with which to populate the account, history, branch, and teller tables
+// is specified by the a, s, b, and t flags respectively. To run a TPC/B
+// test, use the n flag to indicate a number of transactions to run (note
+// that you can run many of these processes in parallel to simulate a
+// multiuser test run).
+//
+class TpcbExample extends DbEnv
+{
+ public static final int TELLERS_PER_BRANCH = 10;
+ public static final int ACCOUNTS_PER_TELLER = 10000;
+ public static final int HISTORY_PER_BRANCH = 2592000;
+
+ //
+ // The default configuration that adheres to TPCB scaling rules requires
+ // nearly 3 GB of space. To avoid requiring that much space for testing,
+ // we set the parameters much lower. If you want to run a valid 10 TPS
+ // configuration, uncomment the VALID_SCALING configuration
+ //
+
+ // VALID_SCALING configuration
+ /*
+ public static final int ACCOUNTS = 1000000;
+ public static final int BRANCHES = 10;
+ public static final int TELLERS = 100;
+ public static final int HISTORY = 25920000;
+ */
+
+ // TINY configuration
+ /*
+ public static final int ACCOUNTS = 1000;
+ public static final int BRANCHES = 10;
+ public static final int TELLERS = 100;
+ public static final int HISTORY = 10000;
+ */
+
+ // Default configuration
+ public static final int ACCOUNTS = 100000;
+ public static final int BRANCHES = 10;
+ public static final int TELLERS = 100;
+ public static final int HISTORY = 259200;
+
+ public static final int HISTORY_LEN = 100;
+ public static final int RECLEN = 100;
+ public static final int BEGID = 1000000;
+
+ // used by random_id()
+ public static final int ACCOUNT = 0;
+ public static final int BRANCH = 1;
+ public static final int TELLER = 2;
+
+ private static boolean verbose = false;
+ private static final String progname = "TpcbExample"; // Program name.
+
+ public TpcbExample(String home, int cachesize,
+ boolean initializing, int flags)
+ throws DbException, FileNotFoundException
+ {
+ super(0);
+ set_error_stream(System.err);
+ set_errpfx(progname);
+ set_cachesize(0, cachesize == 0 ? 4 * 1024 * 1024 : cachesize, 0);
+
+ if ((flags & (Db.DB_TXN_NOSYNC)) != 0)
+ set_flags(Db.DB_TXN_NOSYNC, true);
+ flags &= ~(Db.DB_TXN_NOSYNC);
+
+ int local_flags = flags | Db.DB_CREATE;
+ if (initializing)
+ local_flags |= Db.DB_INIT_MPOOL;
+ else
+ local_flags |= Db.DB_INIT_TXN | Db.DB_INIT_LOCK |
+ Db.DB_INIT_LOG | Db.DB_INIT_MPOOL;
+
+ open(home, local_flags, 0); // may throw DbException
+ }
+
+ //
+ // Initialize the database to the specified number of accounts, branches,
+ // history records, and tellers.
+ //
+ // Note: num_h was unused in the original ex_tpcb.c example.
+ //
+ public void
+ populate(int num_a, int num_b, int num_h, int num_t)
+ {
+ Db dbp = null;
+
+ int err;
+ int balance, idnum;
+ int end_anum, end_bnum, end_tnum;
+ int start_anum, start_bnum, start_tnum;
+ int h_nelem;
+
+ idnum = BEGID;
+ balance = 500000;
+
+ h_nelem = num_a;
+
+ try {
+ dbp = new Db(this, 0);
+ dbp.set_h_nelem(h_nelem);
+ dbp.open(null, "account", null, Db.DB_HASH,
+ Db.DB_CREATE | Db.DB_TRUNCATE, 0644);
+ }
+ // can be DbException or FileNotFoundException
+ catch (Exception e1) {
+ errExit(e1, "Open of account file failed");
+ }
+
+ start_anum = idnum;
+ populateTable(dbp, idnum, balance, h_nelem, "account");
+ idnum += h_nelem;
+ end_anum = idnum - 1;
+ try {
+ dbp.close(0);
+ }
+ catch (DbException e2) {
+ errExit(e2, "Account file close failed");
+ }
+
+ if (verbose)
+ System.out.println("Populated accounts: "
+ + String.valueOf(start_anum) + " - " + String.valueOf(end_anum));
+
+ //
+ // Since the number of branches is very small, we want to use very
+ // small pages and only 1 key per page. This is the poor-man's way
+ // of getting key locking instead of page locking.
+ //
+ h_nelem = (int)num_b;
+
+ try {
+ dbp = new Db(this, 0);
+
+ dbp.set_h_nelem(h_nelem);
+ dbp.set_h_ffactor(1);
+ dbp.set_pagesize(512);
+
+ dbp.open(null, "branch", null, Db.DB_HASH,
+ Db.DB_CREATE | Db.DB_TRUNCATE, 0644);
+ }
+ // can be DbException or FileNotFoundException
+ catch (Exception e3) {
+ errExit(e3, "Branch file create failed");
+ }
+ start_bnum = idnum;
+ populateTable(dbp, idnum, balance, h_nelem, "branch");
+ idnum += h_nelem;
+ end_bnum = idnum - 1;
+
+ try {
+ dbp.close(0);
+ }
+ catch (DbException dbe4) {
+ errExit(dbe4, "Close of branch file failed");
+ }
+
+ if (verbose)
+ System.out.println("Populated branches: "
+ + String.valueOf(start_bnum) + " - " + String.valueOf(end_bnum));
+
+ //
+ // In the case of tellers, we also want small pages, but we'll let
+ // the fill factor dynamically adjust itself.
+ //
+ h_nelem = (int)num_t;
+
+ try {
+
+ dbp = new Db(this, 0);
+
+ dbp.set_h_nelem(h_nelem);
+ dbp.set_h_ffactor(0);
+ dbp.set_pagesize(512);
+
+ dbp.open(null, "teller", null, Db.DB_HASH,
+ Db.DB_CREATE | Db.DB_TRUNCATE, 0644);
+ }
+ // can be DbException or FileNotFoundException
+ catch (Exception e5) {
+ errExit(e5, "Teller file create failed");
+ }
+
+ start_tnum = idnum;
+ populateTable(dbp, idnum, balance, h_nelem, "teller");
+ idnum += h_nelem;
+ end_tnum = idnum - 1;
+
+ try {
+ dbp.close(0);
+ }
+ catch (DbException e6) {
+ errExit(e6, "Close of teller file failed");
+ }
+
+ if (verbose)
+ System.out.println("Populated tellers: "
+ + String.valueOf(start_tnum) + " - " + String.valueOf(end_tnum));
+
+ try {
+ dbp = new Db(this, 0);
+ dbp.set_re_len(HISTORY_LEN);
+ dbp.open(null, "history", null, Db.DB_RECNO,
+ Db.DB_CREATE | Db.DB_TRUNCATE, 0644);
+ }
+ // can be DbException or FileNotFoundException
+ catch (Exception e7) {
+ errExit(e7, "Create of history file failed");
+ }
+
+ populateHistory(dbp, num_h, num_a, num_b, num_t);
+
+ try {
+ dbp.close(0);
+ }
+ catch (DbException e8) {
+ errExit(e8, "Close of history file failed");
+ }
+ }
+
+ public void
+ populateTable(Db dbp,
+ int start_id, int balance,
+ int nrecs, String msg)
+ {
+ Defrec drec = new Defrec();
+
+ Dbt kdbt = new Dbt(drec.data);
+ kdbt.set_size(4); // sizeof(int)
+ Dbt ddbt = new Dbt(drec.data);
+ ddbt.set_size(drec.data.length); // uses whole array
+
+ try {
+ for (int i = 0; i < nrecs; i++) {
+ kdbt.set_recno_key_data(start_id + (int)i);
+ drec.set_balance(balance);
+ dbp.put(null, kdbt, ddbt, Db.DB_NOOVERWRITE);
+ }
+ }
+ catch (DbException dbe) {
+ System.err.println("Failure initializing " + msg + " file: " +
+ dbe.toString());
+ System.exit(1);
+ }
+ }
+
+ public void
+ populateHistory(Db dbp, int nrecs,
+ int anum, int bnum, int tnum)
+ {
+ Histrec hrec = new Histrec();
+ hrec.set_amount(10);
+
+ byte arr[] = new byte[4]; // sizeof(int)
+ int i;
+ Dbt kdbt = new Dbt(arr);
+ kdbt.set_size(arr.length);
+ Dbt ddbt = new Dbt(hrec.data);
+ ddbt.set_size(hrec.data.length);
+
+ try {
+ for (i = 1; i <= nrecs; i++) {
+ kdbt.set_recno_key_data(i);
+
+ hrec.set_aid(random_id(ACCOUNT, anum, bnum, tnum));
+ hrec.set_bid(random_id(BRANCH, anum, bnum, tnum));
+ hrec.set_tid(random_id(TELLER, anum, bnum, tnum));
+
+ dbp.put(null, kdbt, ddbt, Db.DB_APPEND);
+ }
+ }
+ catch (DbException dbe) {
+ errExit(dbe, "Failure initializing history file");
+ }
+ }
+
+ static Random rand = new Random();
+
+ public static int
+ random_int(int lo, int hi)
+ {
+ int ret;
+ int t;
+
+ t = rand.nextInt();
+ if (t < 0)
+ t = -t;
+ ret = (int)(((double)t / ((double)(Integer.MAX_VALUE) + 1)) *
+ (hi - lo + 1));
+ ret += lo;
+ return (ret);
+ }
+
+ public static int
+ random_id(int type, int accounts, int branches, int tellers)
+ {
+ int min, max, num;
+
+ max = min = BEGID;
+ num = accounts;
+ switch(type) {
+ case TELLER:
+ min += branches;
+ num = tellers;
+ // Fallthrough
+ case BRANCH:
+ if (type == BRANCH)
+ num = branches;
+ min += accounts;
+ // Fallthrough
+ case ACCOUNT:
+ max = min + num - 1;
+ }
+ return (random_int(min, max));
+ }
+
+ public void
+ run(int n, int accounts, int branches, int tellers)
+ {
+ Db adb = null;
+ Db bdb = null;
+ Db hdb = null;
+ Db tdb = null;
+ double gtps, itps;
+ int failed, ifailed, ret, txns;
+ long starttime, curtime, lasttime;
+
+ //
+ // Open the database files.
+ //
+ int err;
+ try {
+ adb = new Db(this, 0);
+ adb.open(null, "account", null, Db.DB_UNKNOWN,
+ Db.DB_AUTO_COMMIT, 0);
+ bdb = new Db(this, 0);
+ bdb.open(null, "branch", null, Db.DB_UNKNOWN,
+ Db.DB_AUTO_COMMIT, 0);
+ tdb = new Db(this, 0);
+ tdb.open(null, "teller", null, Db.DB_UNKNOWN,
+ Db.DB_AUTO_COMMIT, 0);
+ hdb = new Db(this, 0);
+ hdb.open(null, "history", null, Db.DB_UNKNOWN,
+ Db.DB_AUTO_COMMIT, 0);
+ }
+ catch (DbException dbe) {
+ errExit(dbe, "Open of db files failed");
+ }
+ catch (FileNotFoundException fnfe) {
+ errExit(fnfe, "Open of db files failed, missing file");
+ }
+
+ txns = failed = ifailed = 0;
+ starttime = (new Date()).getTime();
+ lasttime = starttime;
+ while (n-- > 0) {
+ txns++;
+ ret = txn(adb, bdb, tdb, hdb, accounts, branches, tellers);
+ if (ret != 0) {
+ failed++;
+ ifailed++;
+ }
+ if (n % 5000 == 0) {
+ curtime = (new Date()).getTime();
+ gtps = (double)(txns - failed) /
+ ((curtime - starttime) / 1000.0);
+ itps = (double)(5000 - ifailed) /
+ ((curtime - lasttime) / 1000.0);
+ System.out.print(String.valueOf(txns) + " txns " +
+ String.valueOf(failed) + " failed ");
+ System.out.println(showRounded(gtps, 2) + " TPS (gross) " +
+ showRounded(itps, 2) + " TPS (interval)");
+ lasttime = curtime;
+ ifailed = 0;
+ }
+ }
+
+ try {
+ adb.close(0);
+ bdb.close(0);
+ tdb.close(0);
+ hdb.close(0);
+ }
+ catch (DbException dbe2) {
+ errExit(dbe2, "Close of db files failed");
+ }
+
+ System.out.println((long)txns + " transactions begun "
+ + String.valueOf(failed) + " failed");
+
+ }
+
+ //
+ // XXX Figure out the appropriate way to pick out IDs.
+ //
+ public int
+ txn(Db adb, Db bdb, Db tdb, Db hdb,
+ int anum, int bnum, int tnum)
+ {
+ Dbc acurs = null;
+ Dbc bcurs = null;
+ Dbc hcurs = null;
+ Dbc tcurs = null;
+ DbTxn t = null;
+
+ Defrec rec = new Defrec();
+ Histrec hrec = new Histrec();
+ int account, branch, teller;
+
+ Dbt d_dbt = new Dbt();
+ Dbt d_histdbt = new Dbt();
+ Dbt k_dbt = new Dbt();
+ Dbt k_histdbt = new Dbt();
+
+ account = random_id(ACCOUNT, anum, bnum, tnum);
+ branch = random_id(BRANCH, anum, bnum, tnum);
+ teller = random_id(TELLER, anum, bnum, tnum);
+
+ // The history key will not actually be retrieved,
+ // but it does need to be set to something.
+ byte hist_key[] = new byte[4];
+ k_histdbt.set_data(hist_key);
+ k_histdbt.set_size(4 /* == sizeof(int)*/);
+
+ byte key_bytes[] = new byte[4];
+ k_dbt.set_data(key_bytes);
+ k_dbt.set_size(4 /* == sizeof(int)*/);
+
+ d_dbt.set_flags(Db.DB_DBT_USERMEM);
+ d_dbt.set_data(rec.data);
+ d_dbt.set_ulen(rec.length());
+
+ hrec.set_aid(account);
+ hrec.set_bid(branch);
+ hrec.set_tid(teller);
+ hrec.set_amount(10);
+ // Request 0 bytes since we're just positioning.
+ d_histdbt.set_flags(Db.DB_DBT_PARTIAL);
+
+ // START TIMING
+
+ try {
+ t = txn_begin(null, 0);
+
+ acurs = adb.cursor(t, 0);
+ bcurs = bdb.cursor(t, 0);
+ tcurs = tdb.cursor(t, 0);
+ hcurs = hdb.cursor(t, 0);
+
+ // Account record
+ k_dbt.set_recno_key_data(account);
+ if (acurs.get(k_dbt, d_dbt, Db.DB_SET) != 0)
+ throw new TpcbException("acurs get failed");
+ rec.set_balance(rec.get_balance() + 10);
+ acurs.put(k_dbt, d_dbt, Db.DB_CURRENT);
+
+ // Branch record
+ k_dbt.set_recno_key_data(branch);
+ if (bcurs.get(k_dbt, d_dbt, Db.DB_SET) != 0)
+ throw new TpcbException("bcurs get failed");
+ rec.set_balance(rec.get_balance() + 10);
+ bcurs.put(k_dbt, d_dbt, Db.DB_CURRENT);
+
+ // Teller record
+ k_dbt.set_recno_key_data(teller);
+ if (tcurs.get(k_dbt, d_dbt, Db.DB_SET) != 0)
+ throw new TpcbException("ccurs get failed");
+ rec.set_balance(rec.get_balance() + 10);
+ tcurs.put(k_dbt, d_dbt, Db.DB_CURRENT);
+
+ // History record
+ d_histdbt.set_flags(0);
+ d_histdbt.set_data(hrec.data);
+ d_histdbt.set_ulen(hrec.length());
+ if (hdb.put(t, k_histdbt, d_histdbt, Db.DB_APPEND) != 0)
+ throw(new DbException("put failed"));
+
+ acurs.close();
+ bcurs.close();
+ tcurs.close();
+ hcurs.close();
+
+ // null out t in advance; if the commit fails,
+ // we don't want to abort it in the catch clause.
+ DbTxn tmptxn = t;
+ t = null;
+ tmptxn.commit(0);
+
+ // END TIMING
+ return (0);
+
+ }
+ catch (Exception e) {
+ try {
+ if (acurs != null)
+ acurs.close();
+ if (bcurs != null)
+ bcurs.close();
+ if (tcurs != null)
+ tcurs.close();
+ if (hcurs != null)
+ hcurs.close();
+ if (t != null)
+ t.abort();
+ }
+ catch (DbException dbe) {
+ // not much we can do here.
+ }
+
+ if (verbose) {
+ System.out.println("Transaction A=" + String.valueOf(account)
+ + " B=" + String.valueOf(branch)
+ + " T=" + String.valueOf(teller) + " failed");
+ System.out.println("Reason: " + e.toString());
+ }
+ return (-1);
+ }
+ }
+
+ static void errExit(Exception err, String s)
+ {
+ System.err.print(progname + ": ");
+ if (s != null) {
+ System.err.print(s + ": ");
+ }
+ System.err.println(err.toString());
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ long seed;
+ int accounts, branches, tellers, history;
+ boolean iflag, txn_no_sync;
+ int mpool, ntxns;
+ String home, endarg;
+
+ home = "TESTDIR";
+ accounts = branches = history = tellers = 0;
+ txn_no_sync = false;
+ mpool = ntxns = 0;
+ verbose = false;
+ iflag = false;
+ seed = (new GregorianCalendar()).get(Calendar.SECOND);
+
+ for (int i = 0; i < argv.length; ++i)
+ {
+ if (argv[i].equals("-a")) {
+ // Number of account records
+ if ((accounts = Integer.parseInt(argv[++i])) <= 0)
+ invarg(argv[i]);
+ }
+ else if (argv[i].equals("-b")) {
+ // Number of branch records
+ if ((branches = Integer.parseInt(argv[++i])) <= 0)
+ invarg(argv[i]);
+ }
+ else if (argv[i].equals("-c")) {
+ // Cachesize in bytes
+ if ((mpool = Integer.parseInt(argv[++i])) <= 0)
+ invarg(argv[i]);
+ }
+ else if (argv[i].equals("-f")) {
+ // Fast mode: no txn sync.
+ txn_no_sync = true;
+ }
+ else if (argv[i].equals("-h")) {
+ // DB home.
+ home = argv[++i];
+ }
+ else if (argv[i].equals("-i")) {
+ // Initialize the test.
+ iflag = true;
+ }
+ else if (argv[i].equals("-n")) {
+ // Number of transactions
+ if ((ntxns = Integer.parseInt(argv[++i])) <= 0)
+ invarg(argv[i]);
+ }
+ else if (argv[i].equals("-S")) {
+ // Random number seed.
+ seed = Long.parseLong(argv[++i]);
+ if (seed <= 0)
+ invarg(argv[i]);
+ }
+ else if (argv[i].equals("-s")) {
+ // Number of history records
+ if ((history = Integer.parseInt(argv[++i])) <= 0)
+ invarg(argv[i]);
+ }
+ else if (argv[i].equals("-t")) {
+ // Number of teller records
+ if ((tellers = Integer.parseInt(argv[++i])) <= 0)
+ invarg(argv[i]);
+ }
+ else if (argv[i].equals("-v")) {
+ // Verbose option.
+ verbose = true;
+ }
+ else
+ {
+ usage();
+ }
+ }
+
+ rand.setSeed((int)seed);
+
+ TpcbExample app = null;
+
+ // Initialize the database environment.
+ // Must be done in within a try block.
+ //
+ try {
+ app = new TpcbExample(home, mpool, iflag,
+ txn_no_sync ? Db.DB_TXN_NOSYNC : 0);
+ }
+ catch (Exception e1) {
+ errExit(e1, "initializing environment failed");
+ }
+
+ accounts = accounts == 0 ? ACCOUNTS : accounts;
+ branches = branches == 0 ? BRANCHES : branches;
+ tellers = tellers == 0 ? TELLERS : tellers;
+ history = history == 0 ? HISTORY : history;
+
+ if (verbose)
+ System.out.println((long)accounts + " Accounts, "
+ + String.valueOf(branches) + " Branches, "
+ + String.valueOf(tellers) + " Tellers, "
+ + String.valueOf(history) + " History");
+
+ if (iflag) {
+ if (ntxns != 0)
+ usage();
+ app.populate(accounts, branches, history, tellers);
+ }
+ else {
+ if (ntxns == 0)
+ usage();
+ app.run(ntxns, accounts, branches, tellers);
+ }
+
+ // Shut down the application.
+
+ try {
+ app.close(0);
+ }
+ catch (DbException dbe2) {
+ errExit(dbe2, "appexit failed");
+ }
+
+ System.exit(0);
+ }
+
+ private static void invarg(String str)
+ {
+ System.err.println("TpcbExample: invalid argument: " + str);
+ System.exit(1);
+ }
+
+ private static void usage()
+ {
+ System.err.println(
+ "usage: TpcbExample [-fiv] [-a accounts] [-b branches]\n" +
+ " [-c cachesize] [-h home] [-n transactions ]\n" +
+ " [-S seed] [-s history] [-t tellers]");
+ System.exit(1);
+ }
+
+ // round 'd' to 'scale' digits, and return result as string
+ private String showRounded(double d, int scale)
+ {
+ return new BigDecimal(d).
+ setScale(scale, BigDecimal.ROUND_HALF_DOWN).toString();
+ }
+
+ // The byte order is our choice.
+ //
+ static long get_int_in_array(byte[] array, int offset)
+ {
+ return
+ ((0xff & array[offset+0]) << 0) |
+ ((0xff & array[offset+1]) << 8) |
+ ((0xff & array[offset+2]) << 16) |
+ ((0xff & array[offset+3]) << 24);
+ }
+
+ // Note: Value needs to be long to avoid sign extension
+ static void set_int_in_array(byte[] array, int offset, long value)
+ {
+ array[offset+0] = (byte)((value >> 0) & 0x0ff);
+ array[offset+1] = (byte)((value >> 8) & 0x0ff);
+ array[offset+2] = (byte)((value >> 16) & 0x0ff);
+ array[offset+3] = (byte)((value >> 24) & 0x0ff);
+ }
+
+};
+
+// Simulate the following C struct:
+// struct Defrec {
+// u_int32_t id;
+// u_int32_t balance;
+// u_int8_t pad[RECLEN - sizeof(int) - sizeof(int)];
+// };
+
+class Defrec
+{
+ public Defrec()
+ {
+ data = new byte[TpcbExample.RECLEN];
+ }
+
+ public int length()
+ {
+ return TpcbExample.RECLEN;
+ }
+
+ public long get_id()
+ {
+ return TpcbExample.get_int_in_array(data, 0);
+ }
+
+ public void set_id(long value)
+ {
+ TpcbExample.set_int_in_array(data, 0, value);
+ }
+
+ public long get_balance()
+ {
+ return TpcbExample.get_int_in_array(data, 4);
+ }
+
+ public void set_balance(long value)
+ {
+ TpcbExample.set_int_in_array(data, 4, value);
+ }
+
+ static {
+ Defrec d = new Defrec();
+ d.set_balance(500000);
+ }
+
+ public byte[] data;
+}
+
+// Simulate the following C struct:
+// struct Histrec {
+// u_int32_t aid;
+// u_int32_t bid;
+// u_int32_t tid;
+// u_int32_t amount;
+// u_int8_t pad[RECLEN - 4 * sizeof(u_int32_t)];
+// };
+
+class Histrec
+{
+ public Histrec()
+ {
+ data = new byte[TpcbExample.RECLEN];
+ }
+
+ public int length()
+ {
+ return TpcbExample.RECLEN;
+ }
+
+ public long get_aid()
+ {
+ return TpcbExample.get_int_in_array(data, 0);
+ }
+
+ public void set_aid(long value)
+ {
+ TpcbExample.set_int_in_array(data, 0, value);
+ }
+
+ public long get_bid()
+ {
+ return TpcbExample.get_int_in_array(data, 4);
+ }
+
+ public void set_bid(long value)
+ {
+ TpcbExample.set_int_in_array(data, 4, value);
+ }
+
+ public long get_tid()
+ {
+ return TpcbExample.get_int_in_array(data, 8);
+ }
+
+ public void set_tid(long value)
+ {
+ TpcbExample.set_int_in_array(data, 8, value);
+ }
+
+ public long get_amount()
+ {
+ return TpcbExample.get_int_in_array(data, 12);
+ }
+
+ public void set_amount(long value)
+ {
+ TpcbExample.set_int_in_array(data, 12, value);
+ }
+
+ public byte[] data;
+}
+
+class TpcbException extends Exception
+{
+ TpcbException()
+ {
+ super();
+ }
+
+ TpcbException(String s)
+ {
+ super(s);
+ }
+}
diff --git a/libdb/libdb_java/checkapi.prl b/libdb/libdb_java/checkapi.prl
new file mode 100644
index 0000000..a27b8ff
--- /dev/null
+++ b/libdb/libdb_java/checkapi.prl
@@ -0,0 +1,134 @@
+#
+# Released to public domain by Donald Anderson dda@world.std.com
+# No warranties.
+#
+# Perl script to check for matching of JNI interfaces to implementation.
+# We check all .cpp arguments and .h arguments and make sure that for
+# each .h declaration (marked by JNIEXPORT keyword), there is a .cpp
+# definition for the same function (also marked by JNIEXPORT keyword),
+# and vice versa. Definitions and declarations are determined solely
+# by whether they are in a .h or .cpp file - we don't do any further
+# analysis.
+#
+# Some additions made to help with Berkeley DB sources:
+#
+# Berkeley DB Java sources use JAVADB_*_ACCESS #defines
+# to quickly define routine access functions.
+
+foreach $file (<@ARGV>) { # glob allows direct use from Win* makefiles
+ open (FILE, $file) || die "$file: cannot open\n";
+ $dot_h = 0;
+ if ($file =~ /.*[hH]$/) {
+ $dot_h = 1;
+ }
+ $in_def = 0;
+nextline:
+ while (<FILE>) {
+ chop;
+ if (/JNIEXPORT/ || /^JAVADB_.*_ACCESS/) {
+ $in_def = 1;
+ $def = "";
+ }
+ if ($in_def == 1) {
+ $def .= " $_";
+ }
+ if (/\)/) {
+ $line = "";
+ $in_def = 0;
+ if ($def eq "") {
+ next nextline;
+ }
+ $_ = $def;
+ # remove comments
+ s@/\*[^*]*\*/@@g;
+ s@[ ][ ]*@ @g;
+ s@^[ ]@@g;
+ s@[ ]$@@g;
+ s@JNIEnv *\* *@JNIEnv @g;
+ s@([,*()]) @\1@g;
+ s@ ([,*()])@\1@g;
+
+ s/JAVADB_WO_ACCESS_METHOD/JAVADB_WO_ACCESS/;
+
+ if (/^JAVADB_.*_ACCESS/) {
+ s@ *@ @g;
+ s@_ACCESS_STRING\(([^,]*),@_ACCESS(\1,jstring,@;
+ s@_ACCESS_BEFORE_APPINIT@_ACCESS@;
+ s@_ACCESS\(@,normal,@;
+ s@JAVADB_@@;
+ s@\)@,@;
+ @vars = split(/,/);
+ $get = 0;
+ $set = 0;
+ if (@vars[0] eq "RW") {
+ $get = 1;
+ $set = 1;
+ }
+ if (@vars[0] eq "RO") {
+ $get = 1;
+ }
+ if (@vars[0] eq "WO") {
+ $set = 1;
+ }
+ if ($get == 0 && $set == 0) {
+ print "Invalid use of JAVADB_ macro\n";
+ }
+ if ($set == 1) {
+ $line = "JNIEXPORT void JNICALL Java_com_sleepycat_db_@vars[2]_set_1@vars[4](JNIEnv,jobject,@vars[3])";
+ }
+ if ($get == 1) {
+ $line2 = "JNIEXPORT @vars[3] JNICALL Java_com_sleepycat_db_@vars[2]_get_1@vars[4](JNIEnv,jobject)";
+ }
+ }
+ else {
+ s@([,(][a-zA-Z0-9_]*) [a-zA-Z0-9_]*@\1@g;
+ s@;$@@g;
+ $line = $_;
+ }
+
+ $def = "";
+
+ if ($line ne "") {
+ if ($lines{$line} eq "") {
+ $lines{$line} = 0;
+ }
+ if ($dot_h == 1) {
+ $lines{$line} += 1;
+ }
+ else {
+ $lines{$line} -= 1;
+ }
+ $line = "";
+ }
+ if ($line2 ne "") {
+ if ($lines{$line2} eq "") {
+ $lines{$line2} = 0;
+ }
+ if ($dot_h == 1) {
+ $lines{$line2} += 1;
+ }
+ else {
+ $lines{$line2} -= 1;
+ }
+ $line2 = "";
+ }
+ }
+ }
+ close (FILE);
+}
+
+$status = 0;
+foreach $key (sort keys %lines) {
+ if ($lines{$key} != 0) {
+ if ($lines{$key} > 0) {
+ print "Missing .cpp implementation: $lines${key}\n";
+ $status = 1;
+ }
+ else {
+ print "Missing .h declaration: $lines${key}\n";
+ $status = 1;
+ }
+ }
+}
+
+exit ($status);
diff --git a/libdb/libdb_java/com_sleepycat_db_Db.h b/libdb/libdb_java/com_sleepycat_db_Db.h
new file mode 100644
index 0000000..0787ae8
--- /dev/null
+++ b/libdb/libdb_java/com_sleepycat_db_Db.h
@@ -0,0 +1,598 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_Db */
+
+#ifndef _Included_com_sleepycat_db_Db
+#define _Included_com_sleepycat_db_Db
+#ifdef __cplusplus
+extern "C" {
+#endif
+#undef com_sleepycat_db_Db_DB_BTREE
+#define com_sleepycat_db_Db_DB_BTREE 1L
+#undef com_sleepycat_db_Db_DB_DONOTINDEX
+#define com_sleepycat_db_Db_DB_DONOTINDEX -30999L
+#undef com_sleepycat_db_Db_DB_HASH
+#define com_sleepycat_db_Db_DB_HASH 2L
+#undef com_sleepycat_db_Db_DB_KEYEMPTY
+#define com_sleepycat_db_Db_DB_KEYEMPTY -30998L
+#undef com_sleepycat_db_Db_DB_KEYEXIST
+#define com_sleepycat_db_Db_DB_KEYEXIST -30997L
+#undef com_sleepycat_db_Db_DB_LOCK_DEADLOCK
+#define com_sleepycat_db_Db_DB_LOCK_DEADLOCK -30996L
+#undef com_sleepycat_db_Db_DB_LOCK_NOTGRANTED
+#define com_sleepycat_db_Db_DB_LOCK_NOTGRANTED -30995L
+#undef com_sleepycat_db_Db_DB_NOSERVER
+#define com_sleepycat_db_Db_DB_NOSERVER -30994L
+#undef com_sleepycat_db_Db_DB_NOSERVER_HOME
+#define com_sleepycat_db_Db_DB_NOSERVER_HOME -30993L
+#undef com_sleepycat_db_Db_DB_NOSERVER_ID
+#define com_sleepycat_db_Db_DB_NOSERVER_ID -30992L
+#undef com_sleepycat_db_Db_DB_NOTFOUND
+#define com_sleepycat_db_Db_DB_NOTFOUND -30991L
+#undef com_sleepycat_db_Db_DB_OLD_VERSION
+#define com_sleepycat_db_Db_DB_OLD_VERSION -30990L
+#undef com_sleepycat_db_Db_DB_PAGE_NOTFOUND
+#define com_sleepycat_db_Db_DB_PAGE_NOTFOUND -30989L
+#undef com_sleepycat_db_Db_DB_QUEUE
+#define com_sleepycat_db_Db_DB_QUEUE 4L
+#undef com_sleepycat_db_Db_DB_RECNO
+#define com_sleepycat_db_Db_DB_RECNO 3L
+#undef com_sleepycat_db_Db_DB_REP_DUPMASTER
+#define com_sleepycat_db_Db_DB_REP_DUPMASTER -30988L
+#undef com_sleepycat_db_Db_DB_REP_HOLDELECTION
+#define com_sleepycat_db_Db_DB_REP_HOLDELECTION -30987L
+#undef com_sleepycat_db_Db_DB_REP_NEWMASTER
+#define com_sleepycat_db_Db_DB_REP_NEWMASTER -30986L
+#undef com_sleepycat_db_Db_DB_REP_NEWSITE
+#define com_sleepycat_db_Db_DB_REP_NEWSITE -30985L
+#undef com_sleepycat_db_Db_DB_REP_OUTDATED
+#define com_sleepycat_db_Db_DB_REP_OUTDATED -30984L
+#undef com_sleepycat_db_Db_DB_RUNRECOVERY
+#define com_sleepycat_db_Db_DB_RUNRECOVERY -30982L
+#undef com_sleepycat_db_Db_DB_SECONDARY_BAD
+#define com_sleepycat_db_Db_DB_SECONDARY_BAD -30981L
+#undef com_sleepycat_db_Db_DB_TXN_ABORT
+#define com_sleepycat_db_Db_DB_TXN_ABORT 0L
+#undef com_sleepycat_db_Db_DB_TXN_APPLY
+#define com_sleepycat_db_Db_DB_TXN_APPLY 1L
+#undef com_sleepycat_db_Db_DB_TXN_BACKWARD_ROLL
+#define com_sleepycat_db_Db_DB_TXN_BACKWARD_ROLL 3L
+#undef com_sleepycat_db_Db_DB_TXN_FORWARD_ROLL
+#define com_sleepycat_db_Db_DB_TXN_FORWARD_ROLL 4L
+#undef com_sleepycat_db_Db_DB_TXN_PRINT
+#define com_sleepycat_db_Db_DB_TXN_PRINT 8L
+#undef com_sleepycat_db_Db_DB_UNKNOWN
+#define com_sleepycat_db_Db_DB_UNKNOWN 5L
+#undef com_sleepycat_db_Db_DB_VERIFY_BAD
+#define com_sleepycat_db_Db_DB_VERIFY_BAD -30980L
+/* Inaccessible static: DB_AFTER */
+/* Inaccessible static: DB_AGGRESSIVE */
+/* Inaccessible static: DB_APPEND */
+/* Inaccessible static: DB_ARCH_ABS */
+/* Inaccessible static: DB_ARCH_DATA */
+/* Inaccessible static: DB_ARCH_LOG */
+/* Inaccessible static: DB_AUTO_COMMIT */
+/* Inaccessible static: DB_BEFORE */
+/* Inaccessible static: DB_CACHED_COUNTS */
+/* Inaccessible static: DB_CDB_ALLDB */
+/* Inaccessible static: DB_CHKSUM_SHA1 */
+/* Inaccessible static: DB_CLIENT */
+/* Inaccessible static: DB_CONSUME */
+/* Inaccessible static: DB_CONSUME_WAIT */
+/* Inaccessible static: DB_CREATE */
+/* Inaccessible static: DB_CURRENT */
+/* Inaccessible static: DB_CXX_NO_EXCEPTIONS */
+/* Inaccessible static: DB_DBT_MALLOC */
+/* Inaccessible static: DB_DBT_PARTIAL */
+/* Inaccessible static: DB_DBT_REALLOC */
+/* Inaccessible static: DB_DBT_USERMEM */
+/* Inaccessible static: DB_DIRECT */
+/* Inaccessible static: DB_DIRECT_DB */
+/* Inaccessible static: DB_DIRECT_LOG */
+/* Inaccessible static: DB_DIRTY_READ */
+/* Inaccessible static: DB_DUP */
+/* Inaccessible static: DB_DUPSORT */
+/* Inaccessible static: DB_EID_BROADCAST */
+/* Inaccessible static: DB_EID_INVALID */
+/* Inaccessible static: DB_ENCRYPT */
+/* Inaccessible static: DB_ENCRYPT_AES */
+/* Inaccessible static: DB_EXCL */
+/* Inaccessible static: DB_FAST_STAT */
+/* Inaccessible static: DB_FIRST */
+/* Inaccessible static: DB_FLUSH */
+/* Inaccessible static: DB_FORCE */
+/* Inaccessible static: DB_GET_BOTH */
+/* Inaccessible static: DB_GET_BOTH_RANGE */
+/* Inaccessible static: DB_GET_RECNO */
+/* Inaccessible static: DB_INIT_CDB */
+/* Inaccessible static: DB_INIT_LOCK */
+/* Inaccessible static: DB_INIT_LOG */
+/* Inaccessible static: DB_INIT_MPOOL */
+/* Inaccessible static: DB_INIT_TXN */
+/* Inaccessible static: DB_JOINENV */
+/* Inaccessible static: DB_JOIN_ITEM */
+/* Inaccessible static: DB_JOIN_NOSORT */
+/* Inaccessible static: DB_KEYFIRST */
+/* Inaccessible static: DB_KEYLAST */
+/* Inaccessible static: DB_LAST */
+/* Inaccessible static: DB_LOCKDOWN */
+/* Inaccessible static: DB_LOCK_DEFAULT */
+/* Inaccessible static: DB_LOCK_EXPIRE */
+/* Inaccessible static: DB_LOCK_GET */
+/* Inaccessible static: DB_LOCK_GET_TIMEOUT */
+/* Inaccessible static: DB_LOCK_IREAD */
+/* Inaccessible static: DB_LOCK_IWR */
+/* Inaccessible static: DB_LOCK_IWRITE */
+/* Inaccessible static: DB_LOCK_MAXLOCKS */
+/* Inaccessible static: DB_LOCK_MINLOCKS */
+/* Inaccessible static: DB_LOCK_MINWRITE */
+/* Inaccessible static: DB_LOCK_NOWAIT */
+/* Inaccessible static: DB_LOCK_OLDEST */
+/* Inaccessible static: DB_LOCK_PUT */
+/* Inaccessible static: DB_LOCK_PUT_ALL */
+/* Inaccessible static: DB_LOCK_PUT_OBJ */
+/* Inaccessible static: DB_LOCK_RANDOM */
+/* Inaccessible static: DB_LOCK_READ */
+/* Inaccessible static: DB_LOCK_TIMEOUT */
+/* Inaccessible static: DB_LOCK_WRITE */
+/* Inaccessible static: DB_LOCK_YOUNGEST */
+/* Inaccessible static: DB_MULTIPLE */
+/* Inaccessible static: DB_MULTIPLE_KEY */
+/* Inaccessible static: DB_NEXT */
+/* Inaccessible static: DB_NEXT_DUP */
+/* Inaccessible static: DB_NEXT_NODUP */
+/* Inaccessible static: DB_NODUPDATA */
+/* Inaccessible static: DB_NOLOCKING */
+/* Inaccessible static: DB_NOMMAP */
+/* Inaccessible static: DB_NOORDERCHK */
+/* Inaccessible static: DB_NOOVERWRITE */
+/* Inaccessible static: DB_NOPANIC */
+/* Inaccessible static: DB_NOSYNC */
+/* Inaccessible static: DB_ODDFILESIZE */
+/* Inaccessible static: DB_ORDERCHKONLY */
+/* Inaccessible static: DB_OVERWRITE */
+/* Inaccessible static: DB_PANIC_ENVIRONMENT */
+/* Inaccessible static: DB_POSITION */
+/* Inaccessible static: DB_PREV */
+/* Inaccessible static: DB_PREV_NODUP */
+/* Inaccessible static: DB_PRINTABLE */
+/* Inaccessible static: DB_PRIORITY_DEFAULT */
+/* Inaccessible static: DB_PRIORITY_HIGH */
+/* Inaccessible static: DB_PRIORITY_LOW */
+/* Inaccessible static: DB_PRIORITY_VERY_HIGH */
+/* Inaccessible static: DB_PRIORITY_VERY_LOW */
+/* Inaccessible static: DB_PRIVATE */
+/* Inaccessible static: DB_RDONLY */
+/* Inaccessible static: DB_RECNUM */
+/* Inaccessible static: DB_RECORDCOUNT */
+/* Inaccessible static: DB_RECOVER */
+/* Inaccessible static: DB_RECOVER_FATAL */
+/* Inaccessible static: DB_REGION_INIT */
+/* Inaccessible static: DB_RENUMBER */
+/* Inaccessible static: DB_REP_CLIENT */
+/* Inaccessible static: DB_REP_LOGSONLY */
+/* Inaccessible static: DB_REP_MASTER */
+/* Inaccessible static: DB_REP_PERMANENT */
+/* Inaccessible static: DB_REP_UNAVAIL */
+/* Inaccessible static: DB_REVSPLITOFF */
+/* Inaccessible static: DB_RMW */
+/* Inaccessible static: DB_SALVAGE */
+/* Inaccessible static: DB_SET */
+/* Inaccessible static: DB_SET_LOCK_TIMEOUT */
+/* Inaccessible static: DB_SET_RANGE */
+/* Inaccessible static: DB_SET_RECNO */
+/* Inaccessible static: DB_SET_TXN_TIMEOUT */
+/* Inaccessible static: DB_SNAPSHOT */
+/* Inaccessible static: DB_STAT_CLEAR */
+/* Inaccessible static: DB_SYSTEM_MEM */
+/* Inaccessible static: DB_THREAD */
+/* Inaccessible static: DB_TRUNCATE */
+/* Inaccessible static: DB_TXN_NOSYNC */
+/* Inaccessible static: DB_TXN_NOWAIT */
+/* Inaccessible static: DB_TXN_SYNC */
+/* Inaccessible static: DB_TXN_WRITE_NOSYNC */
+/* Inaccessible static: DB_UPGRADE */
+/* Inaccessible static: DB_USE_ENVIRON */
+/* Inaccessible static: DB_USE_ENVIRON_ROOT */
+/* Inaccessible static: DB_VERB_CHKPOINT */
+/* Inaccessible static: DB_VERB_DEADLOCK */
+/* Inaccessible static: DB_VERB_RECOVERY */
+/* Inaccessible static: DB_VERB_REPLICATION */
+/* Inaccessible static: DB_VERB_WAITSFOR */
+/* Inaccessible static: DB_VERIFY */
+/* Inaccessible static: DB_VERSION_MAJOR */
+/* Inaccessible static: DB_VERSION_MINOR */
+/* Inaccessible static: DB_VERSION_PATCH */
+/* Inaccessible static: DB_WRITECURSOR */
+/* Inaccessible static: DB_XA_CREATE */
+/* Inaccessible static: DB_XIDDATASIZE */
+/* Inaccessible static: DB_YIELDCPU */
+/* Inaccessible static: already_loaded_ */
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: _init
+ * Signature: (Lcom/sleepycat/db/DbEnv;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1init
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: _notify_internal
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1notify_1internal
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: _associate
+ * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Db;Lcom/sleepycat/db/DbSecondaryKeyCreate;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1associate
+ (JNIEnv *, jobject, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: _close
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db__1close
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: cursor
+ * Signature: (Lcom/sleepycat/db/DbTxn;I)Lcom/sleepycat/db/Dbc;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_cursor
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: del
+ * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_del
+ (JNIEnv *, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: err
+ * Signature: (ILjava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_err
+ (JNIEnv *, jobject, jint, jstring);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: errx
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_errx
+ (JNIEnv *, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: fd
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_fd
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: _finalize
+ * Signature: (Lcom/sleepycat/db/DbErrcall;Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1finalize
+ (JNIEnv *, jobject, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: get
+ * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get
+ (JNIEnv *, jobject, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: get_byteswapped
+ * Signature: ()Z
+ */
+JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_Db_get_1byteswapped
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: get_type
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get_1type
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: join
+ * Signature: ([Lcom/sleepycat/db/Dbc;I)Lcom/sleepycat/db/Dbc;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_join
+ (JNIEnv *, jobject, jobjectArray, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: key_range
+ * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/DbKeyRange;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_key_1range
+ (JNIEnv *, jobject, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: _open
+ * Signature: (Lcom/sleepycat/db/DbTxn;Ljava/lang/String;Ljava/lang/String;III)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1open
+ (JNIEnv *, jobject, jobject, jstring, jstring, jint, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: pget
+ * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_pget
+ (JNIEnv *, jobject, jobject, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: put
+ * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_put
+ (JNIEnv *, jobject, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: _rename
+ * Signature: (Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1rename
+ (JNIEnv *, jobject, jstring, jstring, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: _remove
+ * Signature: (Ljava/lang/String;Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1remove
+ (JNIEnv *, jobject, jstring, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: append_recno_changed
+ * Signature: (Lcom/sleepycat/db/DbAppendRecno;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_append_1recno_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: bt_compare_changed
+ * Signature: (Lcom/sleepycat/db/DbBtreeCompare;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_bt_1compare_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_bt_maxkey
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1bt_1maxkey
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_bt_minkey
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1bt_1minkey
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: bt_prefix_changed
+ * Signature: (Lcom/sleepycat/db/DbBtreePrefix;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_bt_1prefix_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_cachesize
+ * Signature: (III)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1cachesize
+ (JNIEnv *, jobject, jint, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_cache_priority
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1cache_1priority
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: dup_compare_changed
+ * Signature: (Lcom/sleepycat/db/DbDupCompare;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_dup_1compare_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_encrypt
+ * Signature: (Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1encrypt
+ (JNIEnv *, jobject, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: feedback_changed
+ * Signature: (Lcom/sleepycat/db/DbFeedback;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_feedback_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_flags
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1flags
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: get_flags_raw
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get_1flags_1raw
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_h_ffactor
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1h_1ffactor
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: hash_changed
+ * Signature: (Lcom/sleepycat/db/DbHash;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_hash_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_h_nelem
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1h_1nelem
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_lorder
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1lorder
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_pagesize
+ * Signature: (J)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1pagesize
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_re_delim
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1re_1delim
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_re_len
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1re_1len
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_re_pad
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1re_1pad
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_re_source
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1re_1source
+ (JNIEnv *, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_q_extentsize
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1q_1extentsize
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: stat
+ * Signature: (I)Ljava/lang/Object;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_stat
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: sync
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_sync
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: truncate
+ * Signature: (Lcom/sleepycat/db/DbTxn;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_truncate
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: upgrade
+ * Signature: (Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_upgrade
+ (JNIEnv *, jobject, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: verify
+ * Signature: (Ljava/lang/String;Ljava/lang/String;Ljava/io/OutputStream;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_verify
+ (JNIEnv *, jobject, jstring, jstring, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: one_time_init
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_one_1time_1init
+ (JNIEnv *, jclass);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/libdb/libdb_java/com_sleepycat_db_DbEnv.h b/libdb/libdb_java/com_sleepycat_db_DbEnv.h
new file mode 100644
index 0000000..f239dfc
--- /dev/null
+++ b/libdb/libdb_java/com_sleepycat_db_DbEnv.h
@@ -0,0 +1,581 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_DbEnv */
+
+#ifndef _Included_com_sleepycat_db_DbEnv
+#define _Included_com_sleepycat_db_DbEnv
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _close
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1close
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: dbremove
+ * Signature: (Lcom/sleepycat/db/DbTxn;Ljava/lang/String;Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_dbremove
+ (JNIEnv *, jobject, jobject, jstring, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: dbrename
+ * Signature: (Lcom/sleepycat/db/DbTxn;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_dbrename
+ (JNIEnv *, jobject, jobject, jstring, jstring, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: err
+ * Signature: (ILjava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_err
+ (JNIEnv *, jobject, jint, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: errx
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_errx
+ (JNIEnv *, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _finalize
+ * Signature: (Lcom/sleepycat/db/DbErrcall;Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1finalize
+ (JNIEnv *, jobject, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _init
+ * Signature: (Lcom/sleepycat/db/DbErrcall;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _init_using_db
+ * Signature: (Lcom/sleepycat/db/DbErrcall;Lcom/sleepycat/db/Db;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init_1using_1db
+ (JNIEnv *, jobject, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _init_using_xa
+ * Signature: (Lcom/sleepycat/db/DbErrcall;II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init_1using_1xa
+ (JNIEnv *, jobject, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _notify_db_close
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1notify_1db_1close
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: open
+ * Signature: (Ljava/lang/String;II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_open
+ (JNIEnv *, jobject, jstring, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: remove
+ * Signature: (Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_remove
+ (JNIEnv *, jobject, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_cachesize
+ * Signature: (III)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1cachesize
+ (JNIEnv *, jobject, jint, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_encrypt
+ * Signature: (Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1encrypt
+ (JNIEnv *, jobject, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _set_errcall
+ * Signature: (Lcom/sleepycat/db/DbErrcall;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1errcall
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _set_errpfx
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1errpfx
+ (JNIEnv *, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: feedback_changed
+ * Signature: (Lcom/sleepycat/db/DbEnvFeedback;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_feedback_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_verbose
+ * Signature: (IZ)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1verbose
+ (JNIEnv *, jobject, jint, jboolean);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_data_dir
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1data_1dir
+ (JNIEnv *, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lg_bsize
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lg_1bsize
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lg_dir
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lg_1dir
+ (JNIEnv *, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lg_max
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lg_1max
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lg_regionmax
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lg_1regionmax
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lk_conflicts
+ * Signature: ([[B)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1conflicts
+ (JNIEnv *, jobject, jobjectArray);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lk_detect
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1detect
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lk_max
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1max
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lk_max_lockers
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1max_1lockers
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lk_max_locks
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1max_1locks
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lk_max_objects
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1max_1objects
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_mp_mmapsize
+ * Signature: (J)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1mp_1mmapsize
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_flags
+ * Signature: (IZ)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1flags
+ (JNIEnv *, jobject, jint, jboolean);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_rep_limit
+ * Signature: (II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1rep_1limit
+ (JNIEnv *, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: rep_transport_changed
+ * Signature: (ILcom/sleepycat/db/DbRepTransport;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_rep_1transport_1changed
+ (JNIEnv *, jobject, jint, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_rpc_server
+ * Signature: (Lcom/sleepycat/db/DbClient;Ljava/lang/String;JJI)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1rpc_1server
+ (JNIEnv *, jobject, jobject, jstring, jlong, jlong, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_shm_key
+ * Signature: (J)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1shm_1key
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_tas_spins
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1tas_1spins
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_timeout
+ * Signature: (JI)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1timeout
+ (JNIEnv *, jobject, jlong, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_tmp_dir
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1tmp_1dir
+ (JNIEnv *, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: app_dispatch_changed
+ * Signature: (Lcom/sleepycat/db/DbAppDispatch;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_app_1dispatch_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_tx_max
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1tx_1max
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _set_tx_timestamp
+ * Signature: (J)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1tx_1timestamp
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: get_version_major
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1major
+ (JNIEnv *, jclass);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: get_version_minor
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1minor
+ (JNIEnv *, jclass);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: get_version_patch
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1patch
+ (JNIEnv *, jclass);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: get_version_string
+ * Signature: ()Ljava/lang/String;
+ */
+JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1string
+ (JNIEnv *, jclass);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: strerror
+ * Signature: (I)Ljava/lang/String;
+ */
+JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_strerror
+ (JNIEnv *, jclass, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: lock_detect
+ * Signature: (II)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_lock_1detect
+ (JNIEnv *, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: lock_get
+ * Signature: (IILcom/sleepycat/db/Dbt;I)Lcom/sleepycat/db/DbLock;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_lock_1get
+ (JNIEnv *, jobject, jint, jint, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: lock_put
+ * Signature: (Lcom/sleepycat/db/DbLock;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_lock_1put
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: lock_id
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_lock_1id
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: lock_id_free
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_lock_1id_1free
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: lock_stat
+ * Signature: (I)Lcom/sleepycat/db/DbLockStat;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_lock_1stat
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: lock_vec
+ * Signature: (II[Lcom/sleepycat/db/DbLockRequest;II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_lock_1vec
+ (JNIEnv *, jobject, jint, jint, jobjectArray, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_archive
+ * Signature: (I)[Ljava/lang/String;
+ */
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_log_1archive
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_compare
+ * Signature: (Lcom/sleepycat/db/DbLsn;Lcom/sleepycat/db/DbLsn;)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_log_1compare
+ (JNIEnv *, jclass, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_cursor
+ * Signature: (I)Lcom/sleepycat/db/DbLogc;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_log_1cursor
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_file
+ * Signature: (Lcom/sleepycat/db/DbLsn;)Ljava/lang/String;
+ */
+JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_log_1file
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_flush
+ * Signature: (Lcom/sleepycat/db/DbLsn;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1flush
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_put
+ * Signature: (Lcom/sleepycat/db/DbLsn;Lcom/sleepycat/db/Dbt;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1put
+ (JNIEnv *, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_stat
+ * Signature: (I)Lcom/sleepycat/db/DbLogStat;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_log_1stat
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: memp_stat
+ * Signature: (I)Lcom/sleepycat/db/DbMpoolStat;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_memp_1stat
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: memp_fstat
+ * Signature: (I)[Lcom/sleepycat/db/DbMpoolFStat;
+ */
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_memp_1fstat
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: memp_trickle
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_memp_1trickle
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: rep_elect
+ * Signature: (III)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_rep_1elect
+ (JNIEnv *, jobject, jint, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: rep_process_message
+ * Signature: (Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/DbEnv$RepProcessMessage;)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_rep_1process_1message
+ (JNIEnv *, jobject, jobject, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: rep_start
+ * Signature: (Lcom/sleepycat/db/Dbt;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_rep_1start
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: rep_stat
+ * Signature: (I)Lcom/sleepycat/db/DbRepStat;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_rep_1stat
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: txn_begin
+ * Signature: (Lcom/sleepycat/db/DbTxn;I)Lcom/sleepycat/db/DbTxn;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1begin
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: txn_checkpoint
+ * Signature: (III)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_txn_1checkpoint
+ (JNIEnv *, jobject, jint, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: txn_recover
+ * Signature: (II)[Lcom/sleepycat/db/DbPreplist;
+ */
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_txn_1recover
+ (JNIEnv *, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: txn_stat
+ * Signature: (I)Lcom/sleepycat/db/DbTxnStat;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1stat
+ (JNIEnv *, jobject, jint);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/libdb/libdb_java/com_sleepycat_db_DbLock.h b/libdb/libdb_java/com_sleepycat_db_DbLock.h
new file mode 100644
index 0000000..9f3d77d
--- /dev/null
+++ b/libdb/libdb_java/com_sleepycat_db_DbLock.h
@@ -0,0 +1,21 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_DbLock */
+
+#ifndef _Included_com_sleepycat_db_DbLock
+#define _Included_com_sleepycat_db_DbLock
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_DbLock
+ * Method: finalize
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLock_finalize
+ (JNIEnv *, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/libdb/libdb_java/com_sleepycat_db_DbLogc.h b/libdb/libdb_java/com_sleepycat_db_DbLogc.h
new file mode 100644
index 0000000..8d029c7
--- /dev/null
+++ b/libdb/libdb_java/com_sleepycat_db_DbLogc.h
@@ -0,0 +1,37 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_DbLogc */
+
+#ifndef _Included_com_sleepycat_db_DbLogc
+#define _Included_com_sleepycat_db_DbLogc
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_DbLogc
+ * Method: close
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLogc_close
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbLogc
+ * Method: get
+ * Signature: (Lcom/sleepycat/db/DbLsn;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbLogc_get
+ (JNIEnv *, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbLogc
+ * Method: finalize
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLogc_finalize
+ (JNIEnv *, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/libdb/libdb_java/com_sleepycat_db_DbLsn.h b/libdb/libdb_java/com_sleepycat_db_DbLsn.h
new file mode 100644
index 0000000..080fa0a
--- /dev/null
+++ b/libdb/libdb_java/com_sleepycat_db_DbLsn.h
@@ -0,0 +1,29 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_DbLsn */
+
+#ifndef _Included_com_sleepycat_db_DbLsn
+#define _Included_com_sleepycat_db_DbLsn
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_DbLsn
+ * Method: finalize
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLsn_finalize
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbLsn
+ * Method: init_lsn
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLsn_init_1lsn
+ (JNIEnv *, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/libdb/libdb_java/com_sleepycat_db_DbTxn.h b/libdb/libdb_java/com_sleepycat_db_DbTxn.h
new file mode 100644
index 0000000..59641c0
--- /dev/null
+++ b/libdb/libdb_java/com_sleepycat_db_DbTxn.h
@@ -0,0 +1,61 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_DbTxn */
+
+#ifndef _Included_com_sleepycat_db_DbTxn
+#define _Included_com_sleepycat_db_DbTxn
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_DbTxn
+ * Method: abort
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_abort
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbTxn
+ * Method: commit
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_commit
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbTxn
+ * Method: discard
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_discard
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbTxn
+ * Method: id
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbTxn_id
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbTxn
+ * Method: prepare
+ * Signature: ([B)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_prepare
+ (JNIEnv *, jobject, jbyteArray);
+
+/*
+ * Class: com_sleepycat_db_DbTxn
+ * Method: set_timeout
+ * Signature: (JI)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_set_1timeout
+ (JNIEnv *, jobject, jlong, jint);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/libdb/libdb_java/com_sleepycat_db_DbUtil.h b/libdb/libdb_java/com_sleepycat_db_DbUtil.h
new file mode 100644
index 0000000..7f84955
--- /dev/null
+++ b/libdb/libdb_java/com_sleepycat_db_DbUtil.h
@@ -0,0 +1,22 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_DbUtil */
+
+#ifndef _Included_com_sleepycat_db_DbUtil
+#define _Included_com_sleepycat_db_DbUtil
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* Inaccessible static: big_endian */
+/*
+ * Class: com_sleepycat_db_DbUtil
+ * Method: is_big_endian
+ * Signature: ()Z
+ */
+JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_DbUtil_is_1big_1endian
+ (JNIEnv *, jclass);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/libdb/libdb_java/com_sleepycat_db_Dbc.h b/libdb/libdb_java/com_sleepycat_db_Dbc.h
new file mode 100644
index 0000000..447ab23
--- /dev/null
+++ b/libdb/libdb_java/com_sleepycat_db_Dbc.h
@@ -0,0 +1,77 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_Dbc */
+
+#ifndef _Included_com_sleepycat_db_Dbc
+#define _Included_com_sleepycat_db_Dbc
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: close
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbc_close
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: count
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_count
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: del
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_del
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: dup
+ * Signature: (I)Lcom/sleepycat/db/Dbc;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Dbc_dup
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: get
+ * Signature: (Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_get
+ (JNIEnv *, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: pget
+ * Signature: (Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_pget
+ (JNIEnv *, jobject, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: put
+ * Signature: (Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_put
+ (JNIEnv *, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: finalize
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbc_finalize
+ (JNIEnv *, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/libdb/libdb_java/com_sleepycat_db_Dbt.h b/libdb/libdb_java/com_sleepycat_db_Dbt.h
new file mode 100644
index 0000000..c09bd8e
--- /dev/null
+++ b/libdb/libdb_java/com_sleepycat_db_Dbt.h
@@ -0,0 +1,37 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_Dbt */
+
+#ifndef _Included_com_sleepycat_db_Dbt
+#define _Included_com_sleepycat_db_Dbt
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: finalize
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_finalize
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: init
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_init
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: create_data
+ * Signature: ()[B
+ */
+JNIEXPORT jbyteArray JNICALL Java_com_sleepycat_db_Dbt_create_1data
+ (JNIEnv *, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/libdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h b/libdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h
new file mode 100644
index 0000000..00e9e2e
--- /dev/null
+++ b/libdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h
@@ -0,0 +1,95 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_xa_DbXAResource */
+
+#ifndef _Included_com_sleepycat_db_xa_DbXAResource
+#define _Included_com_sleepycat_db_xa_DbXAResource
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* Inaccessible static: unique_rmid */
+/* Inaccessible static: class_00024com_00024sleepycat_00024db_00024xa_00024DbXAResource */
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _init
+ * Signature: (Ljava/lang/String;II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1init
+ (JNIEnv *, jobject, jstring, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _close
+ * Signature: (Ljava/lang/String;II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1close
+ (JNIEnv *, jobject, jstring, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _commit
+ * Signature: (Ljavax/transaction/xa/Xid;IZ)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1commit
+ (JNIEnv *, jobject, jobject, jint, jboolean);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _end
+ * Signature: (Ljavax/transaction/xa/Xid;II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1end
+ (JNIEnv *, jobject, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _forget
+ * Signature: (Ljavax/transaction/xa/Xid;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1forget
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _prepare
+ * Signature: (Ljavax/transaction/xa/Xid;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_xa_DbXAResource__1prepare
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _recover
+ * Signature: (II)[Ljavax/transaction/xa/Xid;
+ */
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_xa_DbXAResource__1recover
+ (JNIEnv *, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _rollback
+ * Signature: (Ljavax/transaction/xa/Xid;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1rollback
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _start
+ * Signature: (Ljavax/transaction/xa/Xid;II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1start
+ (JNIEnv *, jobject, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: xa_attach
+ * Signature: (Ljavax/transaction/xa/Xid;Ljava/lang/Integer;)Lcom/sleepycat/db/xa/DbXAResource$DbAttach;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_xa_DbXAResource_xa_1attach
+ (JNIEnv *, jclass, jobject, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/libdb/libdb_java/java_Db.c b/libdb/libdb_java/java_Db.c
new file mode 100644
index 0000000..d6dace9
--- /dev/null
+++ b/libdb/libdb_java/java_Db.c
@@ -0,0 +1,982 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <jni.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc_auto/db_ext.h"
+#include "java_util.h"
+#include "java_stat_auto.h"
+#include "com_sleepycat_db_Db.h"
+
+/* This struct is used in Db.verify and its callback */
+struct verify_callback_struct {
+ JNIEnv *env;
+ jobject streamobj;
+ jbyteArray bytes;
+ int nbytes;
+ jmethodID writemid;
+};
+
+JAVADB_GET_FLD(Db, jint, flags_1raw, DB, flags)
+
+JAVADB_SET_METH(Db, jint, flags, DB, flags)
+JAVADB_SET_METH(Db, jint, h_1ffactor, DB, h_ffactor)
+JAVADB_SET_METH(Db, jint, h_1nelem, DB, h_nelem)
+JAVADB_SET_METH(Db, jint, lorder, DB, lorder)
+JAVADB_SET_METH(Db, jint, re_1delim, DB, re_delim)
+JAVADB_SET_METH(Db, jint, re_1len, DB, re_len)
+JAVADB_SET_METH(Db, jint, re_1pad, DB, re_pad)
+JAVADB_SET_METH(Db, jint, q_1extentsize, DB, q_extentsize)
+JAVADB_SET_METH(Db, jint, bt_1maxkey, DB, bt_maxkey)
+JAVADB_SET_METH(Db, jint, bt_1minkey, DB, bt_minkey)
+
+/*
+ * This only gets called once ever, at the beginning of execution
+ * and can be used to initialize unchanging methodIds, fieldIds, etc.
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_one_1time_1init
+ (JNIEnv *jnienv, /*Db.class*/ jclass jthisclass)
+{
+ COMPQUIET(jthisclass, NULL);
+
+ one_time_init(jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1init
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbEnv*/ jobject jdbenv, jint flags)
+{
+ int err;
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+ DB_ENV *dbenv;
+
+ dbenv = get_DB_ENV(jnienv, jdbenv);
+ dbinfo = get_DB_JAVAINFO(jnienv, jthis);
+ DB_ASSERT(dbinfo == NULL);
+
+ err = db_create(&db, dbenv, flags);
+ if (verify_return(jnienv, err, 0)) {
+ set_private_dbobj(jnienv, name_DB, jthis, db);
+ dbinfo = dbji_construct(jnienv, jthis, flags);
+ set_private_info(jnienv, name_DB, jthis, dbinfo);
+ db->api_internal = dbinfo;
+ }
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1associate
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /* DbTxn */ jobject jtxn,
+ /*Db*/ jobject jsecondary, /*DbSecondaryKeyCreate*/ jobject jcallback,
+ jint flags)
+{
+ DB *db, *secondary;
+ DB_JAVAINFO *second_info;
+ DB_TXN *txn;
+
+ db = get_DB(jnienv, jthis);
+ txn = get_DB_TXN(jnienv, jtxn);
+ secondary = get_DB(jnienv, jsecondary);
+
+ second_info = (DB_JAVAINFO*)secondary->api_internal;
+ dbji_set_assoc_object(second_info, jnienv, db, txn, secondary,
+ jcallback, flags);
+
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db__1close
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jint flags)
+{
+ int err;
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+
+ db = get_DB(jnienv, jthis);
+ dbinfo = get_DB_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return (0);
+
+ /*
+ * Null out the private data to indicate the DB is invalid.
+ * We do this in advance to help guard against multithreading
+ * issues.
+ */
+ set_private_dbobj(jnienv, name_DB, jthis, 0);
+
+ err = db->close(db, flags);
+ verify_return(jnienv, err, 0);
+ dbji_dealloc(dbinfo, jnienv);
+
+ return (err);
+}
+
+/*
+ * We are being notified that the parent DbEnv has closed.
+ * Zero out the pointer to the DB, since it is no longer
+ * valid, to prevent mistakes. The user will get a null
+ * pointer exception if they try to use this Db again.
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1notify_1internal
+ (JNIEnv *jnienv, /*Db*/ jobject jthis)
+{
+ set_private_dbobj(jnienv, name_DB, jthis, 0);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_append_1recno_1changed
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbAppendRecno*/ jobject jcallback)
+{
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+
+ dbinfo = (DB_JAVAINFO*)db->api_internal;
+ dbji_set_append_recno_object(dbinfo, jnienv, db, jcallback);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_bt_1compare_1changed
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbBtreeCompare*/ jobject jbtcompare)
+{
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+
+ dbinfo = (DB_JAVAINFO*)db->api_internal;
+ dbji_set_bt_compare_object(dbinfo, jnienv, db, jbtcompare);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_bt_1prefix_1changed
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbBtreePrefix*/ jobject jbtprefix)
+{
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+
+ dbinfo = (DB_JAVAINFO*)db->api_internal;
+ dbji_set_bt_prefix_object(dbinfo, jnienv, db, jbtprefix);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_cursor
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid, jint flags)
+{
+ int err;
+ DBC *dbc;
+ DB *db = get_DB(jnienv, jthis);
+ DB_TXN *dbtxnid = get_DB_TXN(jnienv, txnid);
+
+ if (!verify_non_null(jnienv, db))
+ return (NULL);
+ err = db->cursor(db, dbtxnid, &dbc, flags);
+ verify_return(jnienv, err, 0);
+ return (get_Dbc(jnienv, dbc));
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_del
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
+ /*Dbt*/ jobject key, jint dbflags)
+{
+ int err;
+ DB_TXN *dbtxnid;
+ DB *db;
+ LOCKED_DBT lkey;
+
+ err = 0;
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return (0);
+
+ dbtxnid = get_DB_TXN(jnienv, txnid);
+ if (locked_dbt_get(&lkey, jnienv, db->dbenv, key, inOp) != 0)
+ goto out;
+
+ err = db->del(db, dbtxnid, &lkey.javainfo->dbt, dbflags);
+ if (!DB_RETOK_DBDEL(err))
+ verify_return(jnienv, err, 0);
+
+ out:
+ locked_dbt_put(&lkey, jnienv, db->dbenv);
+ return (err);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_dup_1compare_1changed
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbDupCompare*/ jobject jdupcompare)
+{
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+
+ dbinfo = (DB_JAVAINFO*)db->api_internal;
+ dbji_set_dup_compare_object(dbinfo, jnienv, db, jdupcompare);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_err
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jint ecode, jstring msg)
+{
+ DB *db;
+ LOCKED_STRING ls_msg;
+
+ if (locked_string_get(&ls_msg, jnienv, msg) != 0)
+ goto out;
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ goto out;
+
+ db->err(db, ecode, "%s", ls_msg.string);
+
+ out:
+ locked_string_put(&ls_msg, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_errx
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring msg)
+{
+ LOCKED_STRING ls_msg;
+ DB *db = get_DB(jnienv, jthis);
+
+ if (locked_string_get(&ls_msg, jnienv, msg) != 0)
+ goto out;
+ if (!verify_non_null(jnienv, db))
+ goto out;
+
+ db->errx(db, "%s", ls_msg.string);
+
+ out:
+ locked_string_put(&ls_msg, jnienv);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_fd
+ (JNIEnv *jnienv, /*Db*/ jobject jthis)
+{
+ int err;
+ int return_value = 0;
+ DB *db = get_DB(jnienv, jthis);
+
+ if (!verify_non_null(jnienv, db))
+ return (0);
+
+ err = db->fd(db, &return_value);
+ verify_return(jnienv, err, 0);
+
+ return (return_value);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1encrypt
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring jpasswd, jint flags)
+{
+ int err;
+ DB *db;
+ LOCKED_STRING ls_passwd;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+ if (locked_string_get(&ls_passwd, jnienv, jpasswd) != 0)
+ goto out;
+
+ err = db->set_encrypt(db, ls_passwd.string, flags);
+ verify_return(jnienv, err, 0);
+
+out: locked_string_put(&ls_passwd, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_feedback_1changed
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbFeedback*/ jobject jfeedback)
+{
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+
+ dbinfo = (DB_JAVAINFO*)db->api_internal;
+ dbji_set_feedback_object(dbinfo, jnienv, db, jfeedback);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
+ /*Dbt*/ jobject key, /*Dbt*/ jobject data, jint flags)
+{
+ int err, op_flags, retry;
+ DB *db;
+ DB_ENV *dbenv;
+ OpKind keyop, dataop;
+ DB_TXN *dbtxnid;
+ LOCKED_DBT lkey, ldata;
+
+ err = 0;
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ goto out3;
+ dbenv = db->dbenv;
+
+ /* Depending on flags, the key may be input/output. */
+ keyop = inOp;
+ dataop = outOp;
+ op_flags = flags & DB_OPFLAGS_MASK;
+ if (op_flags == DB_SET_RECNO) {
+ keyop = inOutOp;
+ }
+ else if (op_flags == DB_GET_BOTH) {
+ keyop = inOutOp;
+ dataop = inOutOp;
+ }
+
+ dbtxnid = get_DB_TXN(jnienv, txnid);
+
+ if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
+ goto out2;
+ if (locked_dbt_get(&ldata, jnienv, dbenv, data, dataop) != 0)
+ goto out1;
+ for (retry = 0; retry < 3; retry++) {
+ err = db->get(db,
+ dbtxnid, &lkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
+
+ /*
+ * If we failed due to lack of memory in our DBT arrays,
+ * retry.
+ */
+ if (err != ENOMEM)
+ break;
+ if (!locked_dbt_realloc(&lkey, jnienv, dbenv) &&
+ !locked_dbt_realloc(&ldata, jnienv, dbenv))
+ break;
+ }
+ out1:
+ locked_dbt_put(&ldata, jnienv, dbenv);
+ out2:
+ locked_dbt_put(&lkey, jnienv, dbenv);
+ out3:
+ if (!DB_RETOK_DBGET(err)) {
+ if (verify_dbt(jnienv, err, &lkey) &&
+ verify_dbt(jnienv, err, &ldata))
+ verify_return(jnienv, err, 0);
+ }
+ return (err);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_hash_1changed
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbHash*/ jobject jhash)
+{
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+
+ dbinfo = (DB_JAVAINFO*)db->api_internal;
+ dbji_set_h_hash_object(dbinfo, jnienv, db, jhash);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_join
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*Dbc[]*/ jobjectArray curslist,
+ jint flags)
+{
+ int err;
+ DB *db;
+ int count;
+ DBC **newlist;
+ DBC *dbc;
+ int i;
+ int size;
+
+ db = get_DB(jnienv, jthis);
+ count = (*jnienv)->GetArrayLength(jnienv, curslist);
+ size = sizeof(DBC *) * (count+1);
+ if ((err = __os_malloc(db->dbenv, size, &newlist)) != 0) {
+ if (!verify_return(jnienv, err, 0))
+ return (NULL);
+ }
+
+ /* Convert the java array of Dbc's to a C array of DBC's. */
+ for (i = 0; i < count; i++) {
+ jobject jobj =
+ (*jnienv)->GetObjectArrayElement(jnienv, curslist, i);
+ if (jobj == 0) {
+ /*
+ * An embedded null in the array is treated
+ * as an endpoint.
+ */
+ newlist[i] = 0;
+ break;
+ }
+ else {
+ newlist[i] = get_DBC(jnienv, jobj);
+ }
+ }
+ newlist[count] = 0;
+
+ if (!verify_non_null(jnienv, db))
+ return (NULL);
+
+ err = db->join(db, newlist, &dbc, flags);
+ verify_return(jnienv, err, 0);
+ __os_free(db->dbenv, newlist);
+
+ return (get_Dbc(jnienv, dbc));
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_key_1range
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
+ /*Dbt*/ jobject jkey, jobject /*DbKeyRange*/ range, jint flags)
+{
+ int err;
+ DB *db;
+ DB_TXN *dbtxnid;
+ LOCKED_DBT lkey;
+ DB_KEY_RANGE result;
+ jfieldID fid;
+ jclass krclass;
+
+ db = get_DB(jnienv, jthis);
+ dbtxnid = get_DB_TXN(jnienv, txnid);
+ if (!verify_non_null(jnienv, db))
+ return;
+ if (!verify_non_null(jnienv, range))
+ return;
+ if (locked_dbt_get(&lkey, jnienv, db->dbenv, jkey, inOp) != 0)
+ goto out;
+ err = db->key_range(db, dbtxnid, &lkey.javainfo->dbt, &result, flags);
+ if (verify_return(jnienv, err, 0)) {
+ /* fill in the values of the DbKeyRange structure */
+ if ((krclass = get_class(jnienv, "DbKeyRange")) == NULL)
+ return; /* An exception has been posted. */
+ fid = (*jnienv)->GetFieldID(jnienv, krclass, "less", "D");
+ (*jnienv)->SetDoubleField(jnienv, range, fid, result.less);
+ fid = (*jnienv)->GetFieldID(jnienv, krclass, "equal", "D");
+ (*jnienv)->SetDoubleField(jnienv, range, fid, result.equal);
+ fid = (*jnienv)->GetFieldID(jnienv, krclass, "greater", "D");
+ (*jnienv)->SetDoubleField(jnienv, range, fid, result.greater);
+ }
+ out:
+ locked_dbt_put(&lkey, jnienv, db->dbenv);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_pget
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
+ /*Dbt*/ jobject key, /*Dbt*/ jobject rkey, /*Dbt*/ jobject data, jint flags)
+{
+ int err, op_flags, retry;
+ DB *db;
+ DB_ENV *dbenv;
+ OpKind keyop, rkeyop, dataop;
+ DB_TXN *dbtxnid;
+ LOCKED_DBT lkey, lrkey, ldata;
+
+ err = 0;
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ goto out4;
+ dbenv = db->dbenv;
+
+ /* Depending on flags, the key may be input/output. */
+ keyop = inOp;
+ rkeyop = outOp;
+ dataop = outOp;
+ op_flags = flags & DB_OPFLAGS_MASK;
+ if (op_flags == DB_SET_RECNO) {
+ keyop = inOutOp;
+ }
+ else if (op_flags == DB_GET_BOTH) {
+ keyop = inOutOp;
+ rkeyop = inOutOp;
+ dataop = inOutOp;
+ }
+
+ dbtxnid = get_DB_TXN(jnienv, txnid);
+
+ if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
+ goto out3;
+ if (locked_dbt_get(&lrkey, jnienv, dbenv, rkey, rkeyop) != 0)
+ goto out2;
+ if (locked_dbt_get(&ldata, jnienv, dbenv, data, dataop) != 0)
+ goto out1;
+ for (retry = 0; retry < 3; retry++) {
+ err = db->pget(db, dbtxnid, &lkey.javainfo->dbt,
+ &lrkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
+
+ /*
+ * If we failed due to lack of memory in our DBT arrays,
+ * retry.
+ */
+ if (err != ENOMEM)
+ break;
+ if (!locked_dbt_realloc(&lkey, jnienv, dbenv) &&
+ !locked_dbt_realloc(&lrkey, jnienv, dbenv) &&
+ !locked_dbt_realloc(&ldata, jnienv, dbenv))
+ break;
+ }
+ out1:
+ locked_dbt_put(&ldata, jnienv, dbenv);
+ out2:
+ locked_dbt_put(&lrkey, jnienv, dbenv);
+ out3:
+ locked_dbt_put(&lkey, jnienv, dbenv);
+ out4:
+ if (!DB_RETOK_DBGET(err)) {
+ if (verify_dbt(jnienv, err, &lkey) &&
+ verify_dbt(jnienv, err, &lrkey) &&
+ verify_dbt(jnienv, err, &ldata))
+ verify_return(jnienv, err, 0);
+ }
+ return (err);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_put
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
+ /*Dbt*/ jobject key, /*Dbt*/ jobject data, jint flags)
+{
+ int err;
+ DB *db;
+ DB_ENV *dbenv;
+ DB_TXN *dbtxnid;
+ LOCKED_DBT lkey, ldata;
+ OpKind keyop;
+
+ err = 0;
+ db = get_DB(jnienv, jthis);
+ dbtxnid = get_DB_TXN(jnienv, txnid);
+ if (!verify_non_null(jnienv, db))
+ return (0); /* error will be thrown, retval doesn't matter */
+ dbenv = db->dbenv;
+
+ /*
+ * For DB_APPEND, the key may be output-only; for all other flags,
+ * it's input-only.
+ */
+ if ((flags & DB_OPFLAGS_MASK) == DB_APPEND)
+ keyop = outOp;
+ else
+ keyop = inOp;
+
+ if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
+ goto out2;
+ if (locked_dbt_get(&ldata, jnienv, dbenv, data, inOp) != 0)
+ goto out1;
+
+ if (!verify_non_null(jnienv, db))
+ goto out1;
+
+ err = db->put(db,
+ dbtxnid, &lkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
+ if (!DB_RETOK_DBPUT(err))
+ verify_return(jnienv, err, 0);
+
+ out1:
+ locked_dbt_put(&ldata, jnienv, dbenv);
+ out2:
+ locked_dbt_put(&lkey, jnienv, dbenv);
+ return (err);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1remove
+ (JNIEnv *jnienv, /*Db*/ jobject jthis,
+ jstring file, jstring database, jint flags)
+{
+ int err;
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+ LOCKED_STRING ls_file;
+ LOCKED_STRING ls_database;
+
+ db = get_DB(jnienv, jthis);
+ dbinfo = get_DB_JAVAINFO(jnienv, jthis);
+
+ if (!verify_non_null(jnienv, db))
+ return;
+ if (locked_string_get(&ls_file, jnienv, file) != 0)
+ goto out2;
+ if (locked_string_get(&ls_database, jnienv, database) != 0)
+ goto out1;
+ err = db->remove(db, ls_file.string, ls_database.string, flags);
+
+ set_private_dbobj(jnienv, name_DB, jthis, 0);
+ verify_return(jnienv, err, EXCEPTION_FILE_NOT_FOUND);
+
+ out1:
+ locked_string_put(&ls_database, jnienv);
+ out2:
+ locked_string_put(&ls_file, jnienv);
+
+ dbji_dealloc(dbinfo, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1rename
+ (JNIEnv *jnienv, /*Db*/ jobject jthis,
+ jstring file, jstring database, jstring newname, jint flags)
+{
+ int err;
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+ LOCKED_STRING ls_file;
+ LOCKED_STRING ls_database;
+ LOCKED_STRING ls_newname;
+
+ db = get_DB(jnienv, jthis);
+ dbinfo = get_DB_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+ if (locked_string_get(&ls_file, jnienv, file) != 0)
+ goto out3;
+ if (locked_string_get(&ls_database, jnienv, database) != 0)
+ goto out2;
+ if (locked_string_get(&ls_newname, jnienv, newname) != 0)
+ goto out1;
+
+ err = db->rename(db, ls_file.string, ls_database.string,
+ ls_newname.string, flags);
+
+ verify_return(jnienv, err, EXCEPTION_FILE_NOT_FOUND);
+ set_private_dbobj(jnienv, name_DB, jthis, 0);
+
+ out1:
+ locked_string_put(&ls_newname, jnienv);
+ out2:
+ locked_string_put(&ls_database, jnienv);
+ out3:
+ locked_string_put(&ls_file, jnienv);
+
+ dbji_dealloc(dbinfo, jnienv);
+}
+
+JAVADB_METHOD(Db_set_1pagesize, (JAVADB_ARGS, jlong pagesize), DB,
+ set_pagesize, (c_this, (u_int32_t)pagesize))
+JAVADB_METHOD(Db_set_1cachesize,
+ (JAVADB_ARGS, jint gbytes, jint bytes, jint ncaches), DB,
+ set_cachesize, (c_this, gbytes, bytes, ncaches))
+JAVADB_METHOD(Db_set_1cache_1priority, (JAVADB_ARGS, jint priority), DB,
+ set_cache_priority, (c_this, (DB_CACHE_PRIORITY)priority))
+
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_Db_set_1re_1source
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring re_source)
+{
+ int err;
+ DB *db;
+
+ db = get_DB(jnienv, jthis);
+ if (verify_non_null(jnienv, db)) {
+
+ /* XXX does the string from get_c_string ever get freed? */
+ if (re_source != NULL)
+ err = db->set_re_source(db,
+ get_c_string(jnienv, re_source));
+ else
+ err = db->set_re_source(db, 0);
+
+ verify_return(jnienv, err, 0);
+ }
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_stat
+ (JNIEnv *jnienv, jobject jthis, jint flags)
+{
+ DB *db;
+ DB_BTREE_STAT *bstp;
+ DB_HASH_STAT *hstp;
+ DB_QUEUE_STAT *qstp;
+ DBTYPE dbtype;
+ jobject retval;
+ jclass dbclass;
+ size_t bytesize;
+ void *statp;
+
+ bytesize = 0;
+ retval = NULL;
+ statp = NULL;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return (NULL);
+
+ if (verify_return(jnienv, db->stat(db, &statp, flags), 0) &&
+ verify_return(jnienv, db->get_type(db, &dbtype), 0)) {
+ switch (dbtype) {
+ /* Btree and recno share the same stat structure */
+ case DB_BTREE:
+ case DB_RECNO:
+ bstp = (DB_BTREE_STAT *)statp;
+ bytesize = sizeof(DB_BTREE_STAT);
+ retval = create_default_object(jnienv,
+ name_DB_BTREE_STAT);
+ if ((dbclass =
+ get_class(jnienv, name_DB_BTREE_STAT)) == NULL)
+ break; /* An exception has been posted. */
+
+ __jv_fill_bt_stat(jnienv, dbclass, retval, bstp);
+ break;
+
+ /* Hash stat structure */
+ case DB_HASH:
+ hstp = (DB_HASH_STAT *)statp;
+ bytesize = sizeof(DB_HASH_STAT);
+ retval = create_default_object(jnienv,
+ name_DB_HASH_STAT);
+ if ((dbclass =
+ get_class(jnienv, name_DB_HASH_STAT)) == NULL)
+ break; /* An exception has been posted. */
+
+ __jv_fill_h_stat(jnienv, dbclass, retval, hstp);
+ break;
+
+ case DB_QUEUE:
+ qstp = (DB_QUEUE_STAT *)statp;
+ bytesize = sizeof(DB_QUEUE_STAT);
+ retval = create_default_object(jnienv,
+ name_DB_QUEUE_STAT);
+ if ((dbclass =
+ get_class(jnienv, name_DB_QUEUE_STAT)) == NULL)
+ break; /* An exception has been posted. */
+
+ __jv_fill_qam_stat(jnienv, dbclass, retval, qstp);
+ break;
+
+ /* That's all the database types we're aware of! */
+ default:
+ report_exception(jnienv,
+ "Db.stat not implemented for types"
+ " other than BTREE, HASH, QUEUE,"
+ " and RECNO",
+ EINVAL, 0);
+ break;
+ }
+ if (bytesize != 0)
+ __os_ufree(db->dbenv, statp);
+ }
+ return (retval);
+}
+
+JAVADB_METHOD(Db_sync, (JAVADB_ARGS, jint flags), DB,
+ sync, (c_this, flags))
+
+JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_Db_get_1byteswapped
+ (JNIEnv *jnienv, /*Db*/ jobject jthis)
+{
+ DB *db;
+ int err, isbyteswapped;
+
+ /* This value should never be seen, because of the exception. */
+ isbyteswapped = 0;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return (0);
+
+ err = db->get_byteswapped(db, &isbyteswapped);
+ (void)verify_return(jnienv, err, 0);
+
+ return ((jboolean)isbyteswapped);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get_1type
+ (JNIEnv *jnienv, /*Db*/ jobject jthis)
+{
+ DB *db;
+ int err;
+ DBTYPE dbtype;
+
+ /* This value should never be seen, because of the exception. */
+ dbtype = DB_UNKNOWN;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return (0);
+
+ err = db->get_type(db, &dbtype);
+ (void)verify_return(jnienv, err, 0);
+
+ return ((jint)dbtype);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1open
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
+ jstring file, jstring database, jint type, jint flags, jint mode)
+{
+ int err;
+ DB *db;
+ DB_TXN *dbtxnid;
+ LOCKED_STRING ls_file;
+ LOCKED_STRING ls_database;
+
+ /* Java is assumed to be threaded */
+ flags |= DB_THREAD;
+
+ db = get_DB(jnienv, jthis);
+
+ dbtxnid = get_DB_TXN(jnienv, txnid);
+ if (locked_string_get(&ls_file, jnienv, file) != 0)
+ goto out2;
+ if (locked_string_get(&ls_database, jnienv, database) != 0)
+ goto out1;
+ if (verify_non_null(jnienv, db)) {
+ err = db->open(db, dbtxnid, ls_file.string, ls_database.string,
+ (DBTYPE)type, flags, mode);
+ verify_return(jnienv, err, EXCEPTION_FILE_NOT_FOUND);
+ }
+ out1:
+ locked_string_put(&ls_database, jnienv);
+ out2:
+ locked_string_put(&ls_file, jnienv);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_truncate
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject jtxnid, jint flags)
+{
+ int err;
+ DB *db;
+ u_int32_t count;
+ DB_TXN *dbtxnid;
+
+ db = get_DB(jnienv, jthis);
+ dbtxnid = get_DB_TXN(jnienv, jtxnid);
+ count = 0;
+ if (verify_non_null(jnienv, db)) {
+ err = db->truncate(db, dbtxnid, &count, flags);
+ verify_return(jnienv, err, 0);
+ }
+ return (jint)count;
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_upgrade
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring name,
+ jint flags)
+{
+ int err;
+ DB *db = get_DB(jnienv, jthis);
+ LOCKED_STRING ls_name;
+
+ if (verify_non_null(jnienv, db)) {
+ if (locked_string_get(&ls_name, jnienv, name) != 0)
+ goto out;
+ err = db->upgrade(db, ls_name.string, flags);
+ verify_return(jnienv, err, 0);
+ }
+ out:
+ locked_string_put(&ls_name, jnienv);
+}
+
+static int java_verify_callback(void *handle, const void *str_arg)
+{
+ char *str;
+ struct verify_callback_struct *vc;
+ int len;
+ JNIEnv *jnienv;
+
+ str = (char *)str_arg;
+ vc = (struct verify_callback_struct *)handle;
+ jnienv = vc->env;
+ len = strlen(str)+1;
+ if (len > vc->nbytes) {
+ vc->nbytes = len;
+ vc->bytes = (*jnienv)->NewByteArray(jnienv, len);
+ }
+
+ if (vc->bytes != NULL) {
+ (*jnienv)->SetByteArrayRegion(jnienv, vc->bytes, 0, len,
+ (jbyte*)str);
+ (*jnienv)->CallVoidMethod(jnienv, vc->streamobj,
+ vc->writemid, vc->bytes, 0, len-1);
+ }
+
+ if ((*jnienv)->ExceptionOccurred(jnienv) != NULL)
+ return (EIO);
+
+ return (0);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_verify
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring name,
+ jstring subdb, jobject stream, jint flags)
+{
+ int err;
+ DB *db;
+ LOCKED_STRING ls_name;
+ LOCKED_STRING ls_subdb;
+ struct verify_callback_struct vcs;
+ jclass streamclass;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+ if (locked_string_get(&ls_name, jnienv, name) != 0)
+ goto out2;
+ if (locked_string_get(&ls_subdb, jnienv, subdb) != 0)
+ goto out1;
+
+ /* set up everything we need for the callbacks */
+ vcs.env = jnienv;
+ vcs.streamobj = stream;
+ vcs.nbytes = 100;
+ if ((vcs.bytes = (*jnienv)->NewByteArray(jnienv, vcs.nbytes)) == NULL)
+ goto out1;
+
+ /* get the method ID for OutputStream.write(byte[], int, int); */
+ streamclass = (*jnienv)->FindClass(jnienv, "java/io/OutputStream");
+ vcs.writemid = (*jnienv)->GetMethodID(jnienv, streamclass,
+ "write", "([BII)V");
+
+ /* invoke verify - this will invoke the callback repeatedly. */
+ err = __db_verify_internal(db, ls_name.string, ls_subdb.string,
+ &vcs, java_verify_callback, flags);
+ verify_return(jnienv, err, 0);
+
+out1:
+ locked_string_put(&ls_subdb, jnienv);
+out2:
+ locked_string_put(&ls_name, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1finalize
+ (JNIEnv *jnienv, jobject jthis,
+ jobject /*DbErrcall*/ errcall, jstring errpfx)
+{
+ DB_JAVAINFO *dbinfo;
+ DB *db;
+
+ dbinfo = get_DB_JAVAINFO(jnienv, jthis);
+ db = get_DB(jnienv, jthis);
+ DB_ASSERT(dbinfo != NULL);
+
+ /*
+ * Note: We can never be sure if the underlying DB is attached to
+ * a DB_ENV that was already closed. Sure, that's a user error,
+ * but it shouldn't crash the VM. Therefore, we cannot just
+ * automatically close if the handle indicates we are not yet
+ * closed. The best we can do is detect this and report it.
+ */
+ if (db != NULL) {
+ /* If this error occurs, this object was never closed. */
+ report_errcall(jnienv, errcall, errpfx,
+ "Db.finalize: open Db object destroyed");
+ }
+
+ /* Shouldn't see this object again, but just in case */
+ set_private_dbobj(jnienv, name_DB, jthis, 0);
+ set_private_info(jnienv, name_DB, jthis, 0);
+
+ dbji_destroy(dbinfo, jnienv);
+}
diff --git a/libdb/libdb_java/java_DbEnv.c b/libdb/libdb_java/java_DbEnv.c
new file mode 100644
index 0000000..561ae90
--- /dev/null
+++ b/libdb/libdb_java/java_DbEnv.c
@@ -0,0 +1,1450 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <jni.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db_int.h"
+#include "java_util.h"
+#include "java_stat_auto.h"
+#include "com_sleepycat_db_DbEnv.h"
+
+/* We keep these lined up, and alphabetical by field name,
+ * for comparison with C++'s list.
+ */
+JAVADB_SET_METH_STR(DbEnv, data_1dir, DB_ENV, data_dir)
+JAVADB_SET_METH(DbEnv, jint, lg_1bsize, DB_ENV, lg_bsize)
+JAVADB_SET_METH_STR(DbEnv, lg_1dir, DB_ENV, lg_dir)
+JAVADB_SET_METH(DbEnv, jint, lg_1max, DB_ENV, lg_max)
+JAVADB_SET_METH(DbEnv, jint, lg_1regionmax, DB_ENV, lg_regionmax)
+JAVADB_SET_METH(DbEnv, jint, lk_1detect, DB_ENV, lk_detect)
+JAVADB_SET_METH(DbEnv, jint, lk_1max, DB_ENV, lk_max)
+JAVADB_SET_METH(DbEnv, jint, lk_1max_1locks, DB_ENV, lk_max_locks)
+JAVADB_SET_METH(DbEnv, jint, lk_1max_1lockers, DB_ENV, lk_max_lockers)
+JAVADB_SET_METH(DbEnv, jint, lk_1max_1objects, DB_ENV, lk_max_objects)
+/* mp_mmapsize is declared below, it needs an extra cast */
+JAVADB_SET_METH_STR(DbEnv, tmp_1dir, DB_ENV, tmp_dir)
+JAVADB_SET_METH(DbEnv, jint, tx_1max, DB_ENV, tx_max)
+
+static void DbEnv_errcall_callback(const char *prefix, char *message)
+{
+ JNIEnv *jnienv;
+ DB_ENV_JAVAINFO *envinfo = (DB_ENV_JAVAINFO *)prefix;
+ jstring pre;
+
+ /*
+ * Note: these error cases are "impossible", and would
+ * normally warrant an exception. However, without
+ * a jnienv, we cannot throw an exception...
+ * We don't want to trap or exit, since the point of
+ * this facility is for the user to completely control
+ * error situations.
+ */
+ if (envinfo == NULL) {
+ /*
+ * Something is *really* wrong here, the
+ * prefix is set in every environment created.
+ */
+ fprintf(stderr, "Error callback failed!\n");
+ fprintf(stderr, "error: %s\n", message);
+ return;
+ }
+
+ /* Should always succeed... */
+ jnienv = dbjie_get_jnienv(envinfo);
+
+ if (jnienv == NULL) {
+
+ /* But just in case... */
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ fprintf(stderr, "error: %s\n", message);
+ return;
+ }
+
+ pre = dbjie_get_errpfx(envinfo, jnienv);
+ report_errcall(jnienv, dbjie_get_errcall(envinfo), pre, message);
+}
+
+static void DbEnv_initialize(JNIEnv *jnienv, DB_ENV *dbenv,
+ /*DbEnv*/ jobject jenv,
+ /*DbErrcall*/ jobject jerrcall,
+ int is_dbopen)
+{
+ DB_ENV_JAVAINFO *envinfo;
+
+ envinfo = get_DB_ENV_JAVAINFO(jnienv, jenv);
+ DB_ASSERT(envinfo == NULL);
+ envinfo = dbjie_construct(jnienv, jenv, jerrcall, is_dbopen);
+ set_private_info(jnienv, name_DB_ENV, jenv, envinfo);
+ dbenv->set_errpfx(dbenv, (const char*)envinfo);
+ dbenv->set_errcall(dbenv, DbEnv_errcall_callback);
+ dbenv->api2_internal = envinfo;
+ set_private_dbobj(jnienv, name_DB_ENV, jenv, dbenv);
+}
+
+/*
+ * This is called when this DbEnv was made on behalf of a Db
+ * created directly (without a parent DbEnv), and the Db is
+ * being closed. We'll zero out the pointer to the DB_ENV,
+ * since it is no longer valid, to prevent mistakes.
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1notify_1db_1close
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis)
+{
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ set_private_dbobj(jnienv, name_DB_ENV, jthis, 0);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (dbenvinfo != NULL)
+ dbjie_dealloc(dbenvinfo, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_feedback_1changed
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis,
+ /*DbEnvFeedback*/ jobject jfeedback)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv) ||
+ !verify_non_null(jnienv, dbenvinfo))
+ return;
+
+ dbjie_set_feedback_object(dbenvinfo, jnienv, dbenv, jfeedback);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jobject /*DbErrcall*/ jerrcall,
+ jint flags)
+{
+ int err;
+ DB_ENV *dbenv;
+
+ err = db_env_create(&dbenv, flags);
+ if (verify_return(jnienv, err, 0))
+ DbEnv_initialize(jnienv, dbenv, jthis, jerrcall, 0);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init_1using_1db
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jobject /*DbErrcall*/ jerrcall,
+ /*Db*/ jobject jdb)
+{
+ DB_ENV *dbenv;
+ DB *db;
+
+ db = get_DB(jnienv, jdb);
+ dbenv = db->dbenv;
+ DbEnv_initialize(jnienv, dbenv, jthis, jerrcall, 0);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_open
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring db_home,
+ jint flags, jint mode)
+{
+ int err;
+ DB_ENV *dbenv;
+ LOCKED_STRING ls_home;
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv) ||
+ !verify_non_null(jnienv, dbenvinfo))
+ return;
+ if (locked_string_get(&ls_home, jnienv, db_home) != 0)
+ goto out;
+
+ /* Java is assumed to be threaded. */
+ flags |= DB_THREAD;
+
+ err = dbenv->open(dbenv, ls_home.string, flags, mode);
+ verify_return(jnienv, err, EXCEPTION_FILE_NOT_FOUND);
+ out:
+ locked_string_put(&ls_home, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_remove
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring db_home, jint flags)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+ LOCKED_STRING ls_home;
+ int err = 0;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+ if (locked_string_get(&ls_home, jnienv, db_home) != 0)
+ goto out;
+
+ err = dbenv->remove(dbenv, ls_home.string, flags);
+ set_private_dbobj(jnienv, name_DB_ENV, jthis, 0);
+
+ verify_return(jnienv, err, 0);
+ out:
+ locked_string_put(&ls_home, jnienv);
+
+ if (dbenvinfo != NULL)
+ dbjie_dealloc(dbenvinfo, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1close
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+
+ err = dbenv->close(dbenv, flags);
+ set_private_dbobj(jnienv, name_DB_ENV, jthis, 0);
+
+ if (dbenvinfo != NULL)
+ dbjie_dealloc(dbenvinfo, jnienv);
+
+ /* Throw an exception if the close failed. */
+ verify_return(jnienv, err, 0);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_dbremove
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbTxn*/ jobject jtxn,
+ jstring name, jstring subdb, jint flags)
+{
+ LOCKED_STRING ls_name, ls_subdb;
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ int err;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+ txn = get_DB_TXN(jnienv, jtxn);
+ if (locked_string_get(&ls_name, jnienv, name) != 0)
+ return;
+ if (locked_string_get(&ls_subdb, jnienv, subdb) != 0)
+ goto err1;
+
+ err = dbenv->dbremove(dbenv, txn, ls_name.string, ls_subdb.string,
+ flags);
+
+ /* Throw an exception if the dbremove failed. */
+ verify_return(jnienv, err, 0);
+
+ locked_string_put(&ls_subdb, jnienv);
+err1: locked_string_put(&ls_name, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_dbrename
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbTxn*/ jobject jtxn,
+ jstring name, jstring subdb, jstring newname, jint flags)
+{
+ LOCKED_STRING ls_name, ls_subdb, ls_newname;
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ int err;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+ txn = get_DB_TXN(jnienv, jtxn);
+ if (locked_string_get(&ls_name, jnienv, name) != 0)
+ return;
+ if (locked_string_get(&ls_subdb, jnienv, subdb) != 0)
+ goto err2;
+ if (locked_string_get(&ls_newname, jnienv, newname) != 0)
+ goto err1;
+
+ err = dbenv->dbrename(dbenv, txn, ls_name.string, ls_subdb.string,
+ ls_newname.string, flags);
+
+ /* Throw an exception if the dbrename failed. */
+ verify_return(jnienv, err, 0);
+
+ locked_string_put(&ls_newname, jnienv);
+err1: locked_string_put(&ls_subdb, jnienv);
+err2: locked_string_put(&ls_name, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_err
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint ecode, jstring msg)
+{
+ LOCKED_STRING ls_msg;
+ DB_ENV *dbenv;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+
+ if (locked_string_get(&ls_msg, jnienv, msg) != 0)
+ goto out;
+
+ dbenv->err(dbenv, ecode, "%s", ls_msg.string);
+ out:
+ locked_string_put(&ls_msg, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_errx
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring msg)
+{
+ LOCKED_STRING ls_msg;
+ DB_ENV *dbenv;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+
+ if (locked_string_get(&ls_msg, jnienv, msg) != 0)
+ goto out;
+
+ dbenv->errx(dbenv, "%s", ls_msg.string);
+ out:
+ locked_string_put(&ls_msg, jnienv);
+}
+
+/*static*/
+JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_strerror
+ (JNIEnv *jnienv, jclass jthis_class, jint ecode)
+{
+ const char *message;
+
+ COMPQUIET(jthis_class, NULL);
+ message = db_strerror(ecode);
+ return (get_java_string(jnienv, message));
+}
+
+JAVADB_METHOD(DbEnv_set_1cachesize,
+ (JAVADB_ARGS, jint gbytes, jint bytes, jint ncaches), DB_ENV,
+ set_cachesize, (c_this, gbytes, bytes, ncaches))
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1encrypt
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring jpasswd, jint flags)
+{
+ int err;
+ DB_ENV *dbenv;
+ LOCKED_STRING ls_passwd;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+ if (locked_string_get(&ls_passwd, jnienv, jpasswd) != 0)
+ goto out;
+
+ err = dbenv->set_encrypt(dbenv, ls_passwd.string, flags);
+ verify_return(jnienv, err, 0);
+
+out: locked_string_put(&ls_passwd, jnienv);
+}
+
+JAVADB_METHOD(DbEnv_set_1flags,
+ (JAVADB_ARGS, jint flags, jboolean onoff), DB_ENV,
+ set_flags, (c_this, flags, onoff ? 1 : 0))
+
+JAVADB_METHOD(DbEnv_set_1mp_1mmapsize, (JAVADB_ARGS, jlong value), DB_ENV,
+ set_mp_mmapsize, (c_this, (size_t)value))
+
+JAVADB_METHOD(DbEnv_set_1tas_1spins, (JAVADB_ARGS, jint spins), DB_ENV,
+ set_tas_spins, (c_this, (u_int32_t)spins))
+
+JAVADB_METHOD(DbEnv_set_1timeout,
+ (JAVADB_ARGS, jlong timeout, jint flags), DB_ENV,
+ set_timeout, (c_this, (u_int32_t)timeout, flags))
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1conflicts
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jobjectArray array)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+ int err;
+ jsize i, len;
+ u_char *newarr;
+ int bytesize;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv) ||
+ !verify_non_null(jnienv, dbenvinfo))
+ return;
+
+ len = (*jnienv)->GetArrayLength(jnienv, array);
+ bytesize = sizeof(u_char) * len * len;
+
+ if ((err = __os_malloc(dbenv, bytesize, &newarr)) != 0) {
+ if (!verify_return(jnienv, err, 0))
+ return;
+ }
+
+ for (i=0; i<len; i++) {
+ jobject subArray =
+ (*jnienv)->GetObjectArrayElement(jnienv, array, i);
+ (*jnienv)->GetByteArrayRegion(jnienv, (jbyteArray)subArray,
+ 0, len,
+ (jbyte *)&newarr[i*len]);
+ }
+ dbjie_set_conflict(dbenvinfo, newarr, bytesize);
+ err = dbenv->set_lk_conflicts(dbenv, newarr, len);
+ verify_return(jnienv, err, 0);
+}
+
+JNIEXPORT jint JNICALL
+ Java_com_sleepycat_db_DbEnv_rep_1elect
+ (JNIEnv *jnienv, /* DbEnv */ jobject jthis, jint nsites, jint pri,
+ jint timeout)
+{
+ DB_ENV *dbenv;
+ int err, id;
+
+ if (!verify_non_null(jnienv, jthis))
+ return (DB_EID_INVALID);
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+
+ err = dbenv->rep_elect(dbenv, (int)nsites,
+ (int)pri, (u_int32_t)timeout, &id);
+ verify_return(jnienv, err, 0);
+
+ return ((jint)id);
+}
+
+JNIEXPORT jint JNICALL
+ Java_com_sleepycat_db_DbEnv_rep_1process_1message
+ (JNIEnv *jnienv, /* DbEnv */ jobject jthis, /* Dbt */ jobject control,
+ /* Dbt */ jobject rec, /* RepProcessMessage */ jobject result)
+{
+ DB_ENV *dbenv;
+ LOCKED_DBT cdbt, rdbt;
+ int err, envid;
+
+ if (!verify_non_null(jnienv, jthis) || !verify_non_null(jnienv, result))
+ return (-1);
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ err = 0;
+
+ /* The DBTs are always inputs. */
+ if (locked_dbt_get(&cdbt, jnienv, dbenv, control, inOp) != 0)
+ goto out2;
+ if (locked_dbt_get(&rdbt, jnienv, dbenv, rec, inOp) != 0)
+ goto out1;
+
+ envid = (*jnienv)->GetIntField(jnienv,
+ result, fid_RepProcessMessage_envid);
+
+ err = dbenv->rep_process_message(dbenv, &cdbt.javainfo->dbt,
+ &rdbt.javainfo->dbt, &envid);
+
+ if (err == DB_REP_NEWMASTER)
+ (*jnienv)->SetIntField(jnienv,
+ result, fid_RepProcessMessage_envid, envid);
+ else if (!DB_RETOK_REPPMSG(err))
+ verify_return(jnienv, err, 0);
+
+out1: locked_dbt_put(&rdbt, jnienv, dbenv);
+out2: locked_dbt_put(&cdbt, jnienv, dbenv);
+
+ return (err);
+}
+
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_DbEnv_rep_1start
+ (JNIEnv *jnienv, /* DbEnv */ jobject jthis, /* Dbt */ jobject cookie,
+ jint flags)
+{
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ LOCKED_DBT ldbt;
+ int err;
+
+ if (!verify_non_null(jnienv, jthis))
+ return;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+
+ /* The Dbt cookie may be null; if so, pass in a NULL DBT. */
+ if (cookie != NULL) {
+ if (locked_dbt_get(&ldbt, jnienv, dbenv, cookie, inOp) != 0)
+ goto out;
+ dbtp = &ldbt.javainfo->dbt;
+ } else
+ dbtp = NULL;
+
+ err = dbenv->rep_start(dbenv, dbtp, flags);
+ verify_return(jnienv, err, 0);
+
+out: if (cookie != NULL)
+ locked_dbt_put(&ldbt, jnienv, dbenv);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_rep_1stat
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ DB_REP_STAT *statp = NULL;
+ jobject retval = NULL;
+ jclass dbclass;
+
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ err = dbenv->rep_stat(dbenv, &statp, (u_int32_t)flags);
+ if (verify_return(jnienv, err, 0)) {
+ if ((dbclass = get_class(jnienv, name_DB_REP_STAT)) == NULL ||
+ (retval =
+ create_default_object(jnienv, name_DB_REP_STAT)) == NULL)
+ goto err; /* An exception has been posted. */
+
+ __jv_fill_rep_stat(jnienv, dbclass, retval, statp);
+
+err: __os_ufree(dbenv, statp);
+ }
+ return (retval);
+}
+
+JNIEXPORT void JNICALL
+Java_com_sleepycat_db_DbEnv_set_1rep_1limit
+ (JNIEnv *jnienv, /* DbEnv */ jobject jthis, jint gbytes, jint bytes)
+{
+ DB_ENV *dbenv;
+ int err;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+
+ if (verify_non_null(jnienv, dbenv)) {
+ err = dbenv->set_rep_limit(dbenv,
+ (u_int32_t)gbytes, (u_int32_t)bytes);
+ verify_return(jnienv, err, 0);
+ }
+}
+
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_DbEnv_rep_1transport_1changed
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint envid,
+ /* DbRepTransport */ jobject jreptransport)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv) ||
+ !verify_non_null(jnienv, dbenvinfo) ||
+ !verify_non_null(jnienv, jreptransport))
+ return;
+
+ dbjie_set_rep_transport_object(dbenvinfo,
+ jnienv, dbenv, envid, jreptransport);
+}
+
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_DbEnv_set_1rpc_1server
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbClient*/ jobject jclient,
+ jstring jhost, jlong tsec, jlong ssec, jint flags)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ const char *host = (*jnienv)->GetStringUTFChars(jnienv, jhost, NULL);
+
+ if (jclient != NULL) {
+ report_exception(jnienv, "DbEnv.set_rpc_server client arg "
+ "must be null; reserved for future use",
+ EINVAL, 0);
+ return;
+ }
+ if (verify_non_null(jnienv, dbenv)) {
+ err = dbenv->set_rpc_server(dbenv, NULL, host,
+ (long)tsec, (long)ssec, flags);
+
+ /* Throw an exception if the call failed. */
+ verify_return(jnienv, err, 0);
+ }
+}
+
+JAVADB_METHOD(DbEnv_set_1shm_1key, (JAVADB_ARGS, jlong shm_key), DB_ENV,
+ set_shm_key, (c_this, (long)shm_key))
+
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_DbEnv__1set_1tx_1timestamp
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jlong seconds)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ time_t time = seconds;
+
+ if (verify_non_null(jnienv, dbenv)) {
+ err = dbenv->set_tx_timestamp(dbenv, &time);
+
+ /* Throw an exception if the call failed. */
+ verify_return(jnienv, err, 0);
+ }
+}
+
+JAVADB_METHOD(DbEnv_set_1verbose,
+ (JAVADB_ARGS, jint which, jboolean onoff), DB_ENV,
+ set_verbose, (c_this, which, onoff ? 1 : 0))
+
+/*static*/
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1major
+ (JNIEnv * jnienv, jclass this_class)
+{
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(this_class, NULL);
+
+ return (DB_VERSION_MAJOR);
+}
+
+/*static*/
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1minor
+ (JNIEnv * jnienv, jclass this_class)
+{
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(this_class, NULL);
+
+ return (DB_VERSION_MINOR);
+}
+
+/*static*/
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1patch
+ (JNIEnv * jnienv, jclass this_class)
+{
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(this_class, NULL);
+
+ return (DB_VERSION_PATCH);
+}
+
+/*static*/
+JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1string
+ (JNIEnv *jnienv, jclass this_class)
+{
+ COMPQUIET(this_class, NULL);
+
+ return ((*jnienv)->NewStringUTF(jnienv, DB_VERSION_STRING));
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_lock_1id
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis)
+{
+ int err;
+ u_int32_t id;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+
+ if (!verify_non_null(jnienv, dbenv))
+ return (-1);
+ err = dbenv->lock_id(dbenv, &id);
+ verify_return(jnienv, err, 0);
+ return (id);
+}
+
+JAVADB_METHOD(DbEnv_lock_1id_1free, (JAVADB_ARGS, jint id), DB_ENV,
+ lock_id_free, (c_this, id))
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_lock_1stat
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ DB_LOCK_STAT *statp = NULL;
+ jobject retval = NULL;
+ jclass dbclass;
+
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ err = dbenv->lock_stat(dbenv, &statp, (u_int32_t)flags);
+ if (verify_return(jnienv, err, 0)) {
+ if ((dbclass = get_class(jnienv, name_DB_LOCK_STAT)) == NULL ||
+ (retval =
+ create_default_object(jnienv, name_DB_LOCK_STAT)) == NULL)
+ goto err; /* An exception has been posted. */
+
+ __jv_fill_lock_stat(jnienv, dbclass, retval, statp);
+
+err: __os_ufree(dbenv, statp);
+ }
+ return (retval);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_lock_1detect
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint atype, jint flags)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ int aborted;
+
+ if (!verify_non_null(jnienv, dbenv))
+ return (0);
+ err = dbenv->lock_detect(dbenv, atype, flags, &aborted);
+ verify_return(jnienv, err, 0);
+ return (aborted);
+}
+
+JNIEXPORT /*DbLock*/ jobject JNICALL Java_com_sleepycat_db_DbEnv_lock_1get
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*u_int32_t*/ jint locker,
+ jint flags, /*const Dbt*/ jobject obj, /*db_lockmode_t*/ jint lock_mode)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB_LOCK *dblock;
+ LOCKED_DBT lobj;
+ /*DbLock*/ jobject retval;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ if ((err = __os_malloc(dbenv, sizeof(DB_LOCK), &dblock)) != 0)
+ if (!verify_return(jnienv, err, 0))
+ return (NULL);
+
+ memset(dblock, 0, sizeof(DB_LOCK));
+ err = 0;
+ retval = NULL;
+ if (locked_dbt_get(&lobj, jnienv, dbenv, obj, inOp) != 0)
+ goto out;
+
+ err = dbenv->lock_get(dbenv, locker, flags, &lobj.javainfo->dbt,
+ (db_lockmode_t)lock_mode, dblock);
+
+ if (err == DB_LOCK_NOTGRANTED)
+ report_notgranted_exception(jnienv,
+ "DbEnv.lock_get not granted",
+ DB_LOCK_GET, lock_mode, obj,
+ NULL, -1);
+ else if (verify_return(jnienv, err, 0)) {
+ retval = create_default_object(jnienv, name_DB_LOCK);
+ set_private_dbobj(jnienv, name_DB_LOCK, retval, dblock);
+ }
+
+ out:
+ locked_dbt_put(&lobj, jnienv, dbenv);
+ return (retval);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_lock_1vec
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*u_int32_t*/ jint locker,
+ jint flags, /*const Dbt*/ jobjectArray list, jint offset, jint count)
+{
+ DB_ENV *dbenv;
+ DB_LOCKREQ *lockreq;
+ DB_LOCKREQ *prereq; /* preprocessed requests */
+ DB_LOCKREQ *failedreq;
+ DB_LOCK *lockp;
+ LOCKED_DBT *locked_dbts;
+ int err;
+ int alloc_err;
+ int i;
+ size_t bytesize;
+ size_t ldbtsize;
+ jobject jlockreq;
+ db_lockop_t op;
+ jobject jobj;
+ jobject jlock;
+ int completed;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ goto out0;
+
+ if ((*jnienv)->GetArrayLength(jnienv, list) < offset + count) {
+ report_exception(jnienv,
+ "DbEnv.lock_vec array not large enough",
+ 0, 0);
+ goto out0;
+ }
+
+ bytesize = sizeof(DB_LOCKREQ) * count;
+ if ((err = __os_malloc(dbenv, bytesize, &lockreq)) != 0) {
+ verify_return(jnienv, err, 0);
+ goto out0;
+ }
+ memset(lockreq, 0, bytesize);
+
+ ldbtsize = sizeof(LOCKED_DBT) * count;
+ if ((err = __os_malloc(dbenv, ldbtsize, &locked_dbts)) != 0) {
+ verify_return(jnienv, err, 0);
+ goto out1;
+ }
+ memset(lockreq, 0, ldbtsize);
+ prereq = &lockreq[0];
+
+ /* fill in the lockreq array */
+ for (i = 0, prereq = &lockreq[0]; i < count; i++, prereq++) {
+ jlockreq = (*jnienv)->GetObjectArrayElement(jnienv, list,
+ offset + i);
+ if (jlockreq == NULL) {
+ report_exception(jnienv,
+ "DbEnv.lock_vec list entry is null",
+ 0, 0);
+ goto out2;
+ }
+ op = (*jnienv)->GetIntField(jnienv, jlockreq,
+ fid_DbLockRequest_op);
+ prereq->op = op;
+
+ switch (op) {
+ case DB_LOCK_GET_TIMEOUT:
+ /* Needed: mode, timeout, obj. Returned: lock. */
+ prereq->op = (*jnienv)->GetIntField(jnienv, jlockreq,
+ fid_DbLockRequest_timeout);
+ /* FALLTHROUGH */
+ case DB_LOCK_GET:
+ /* Needed: mode, obj. Returned: lock. */
+ prereq->mode = (*jnienv)->GetIntField(jnienv, jlockreq,
+ fid_DbLockRequest_mode);
+ jobj = (*jnienv)->GetObjectField(jnienv, jlockreq,
+ fid_DbLockRequest_obj);
+ if ((err = locked_dbt_get(&locked_dbts[i], jnienv,
+ dbenv, jobj, inOp)) != 0)
+ goto out2;
+ prereq->obj = &locked_dbts[i].javainfo->dbt;
+ break;
+ case DB_LOCK_PUT:
+ /* Needed: lock. Ignored: mode, obj. */
+ jlock = (*jnienv)->GetObjectField(jnienv, jlockreq,
+ fid_DbLockRequest_lock);
+ if (!verify_non_null(jnienv, jlock))
+ goto out2;
+ lockp = get_DB_LOCK(jnienv, jlock);
+ if (!verify_non_null(jnienv, lockp))
+ goto out2;
+
+ prereq->lock = *lockp;
+ break;
+ case DB_LOCK_PUT_ALL:
+ case DB_LOCK_TIMEOUT:
+ /* Needed: (none). Ignored: lock, mode, obj. */
+ break;
+ case DB_LOCK_PUT_OBJ:
+ /* Needed: obj. Ignored: lock, mode. */
+ jobj = (*jnienv)->GetObjectField(jnienv, jlockreq,
+ fid_DbLockRequest_obj);
+ if ((err = locked_dbt_get(&locked_dbts[i], jnienv,
+ dbenv, jobj, inOp)) != 0)
+ goto out2;
+ prereq->obj = &locked_dbts[i].javainfo->dbt;
+ break;
+ default:
+ report_exception(jnienv,
+ "DbEnv.lock_vec bad op value",
+ 0, 0);
+ goto out2;
+ }
+ }
+
+ err = dbenv->lock_vec(dbenv, locker, flags, lockreq, count, &failedreq);
+ if (err == 0)
+ completed = count;
+ else
+ completed = failedreq - lockreq;
+
+ /* do post processing for any and all requests that completed */
+ for (i = 0; i < completed; i++) {
+ op = lockreq[i].op;
+ if (op == DB_LOCK_PUT) {
+ /*
+ * After a successful put, the DbLock can no longer
+ * be used, so we release the storage related to it.
+ */
+ jlockreq = (*jnienv)->GetObjectArrayElement(jnienv,
+ list, i + offset);
+ jlock = (*jnienv)->GetObjectField(jnienv, jlockreq,
+ fid_DbLockRequest_lock);
+ lockp = get_DB_LOCK(jnienv, jlock);
+ __os_free(NULL, lockp);
+ set_private_dbobj(jnienv, name_DB_LOCK, jlock, 0);
+ }
+ else if (op == DB_LOCK_GET) {
+ /*
+ * Store the lock that was obtained.
+ * We need to create storage for it since
+ * the lockreq array only exists during this
+ * method call.
+ */
+ alloc_err = __os_malloc(dbenv, sizeof(DB_LOCK), &lockp);
+ if (!verify_return(jnienv, alloc_err, 0))
+ goto out2;
+
+ *lockp = lockreq[i].lock;
+
+ jlockreq = (*jnienv)->GetObjectArrayElement(jnienv,
+ list, i + offset);
+ jlock = create_default_object(jnienv, name_DB_LOCK);
+ set_private_dbobj(jnienv, name_DB_LOCK, jlock, lockp);
+ (*jnienv)->SetObjectField(jnienv, jlockreq,
+ fid_DbLockRequest_lock,
+ jlock);
+ }
+ }
+
+ /* If one of the locks was not granted, build the exception now. */
+ if (err == DB_LOCK_NOTGRANTED && i < count) {
+ jlockreq = (*jnienv)->GetObjectArrayElement(jnienv,
+ list, i + offset);
+ jobj = (*jnienv)->GetObjectField(jnienv, jlockreq,
+ fid_DbLockRequest_obj);
+ jlock = (*jnienv)->GetObjectField(jnienv, jlockreq,
+ fid_DbLockRequest_lock);
+ report_notgranted_exception(jnienv,
+ "DbEnv.lock_vec incomplete",
+ lockreq[i].op,
+ lockreq[i].mode,
+ jobj,
+ jlock,
+ i);
+ }
+ else
+ verify_return(jnienv, err, 0);
+
+ out2:
+ /* Free the dbts that we have locked */
+ for (i = 0 ; i < (prereq - lockreq); i++) {
+ if ((op = lockreq[i].op) == DB_LOCK_GET ||
+ op == DB_LOCK_PUT_OBJ)
+ locked_dbt_put(&locked_dbts[i], jnienv, dbenv);
+ }
+ __os_free(dbenv, locked_dbts);
+
+ out1:
+ __os_free(dbenv, lockreq);
+
+ out0:
+ return;
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_lock_1put
+ (JNIEnv *jnienv, jobject jthis, /*DbLock*/ jobject jlock)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB_LOCK *dblock;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+
+ dblock = get_DB_LOCK(jnienv, jlock);
+ if (!verify_non_null(jnienv, dblock))
+ return;
+
+ err = dbenv->lock_put(dbenv, dblock);
+ if (verify_return(jnienv, err, 0)) {
+ /*
+ * After a successful put, the DbLock can no longer
+ * be used, so we release the storage related to it
+ * (allocated in DbEnv.lock_get()).
+ */
+ __os_free(NULL, dblock);
+
+ set_private_dbobj(jnienv, name_DB_LOCK, jlock, 0);
+ }
+}
+
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_log_1archive
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
+{
+ int err, len, i;
+ char** ret;
+ jclass stringClass;
+ jobjectArray strarray;
+ DB_ENV *dbenv;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ strarray = NULL;
+ if (!verify_non_null(jnienv, dbenv))
+ return (0);
+ err = dbenv->log_archive(dbenv, &ret, flags);
+ if (!verify_return(jnienv, err, 0))
+ return (0);
+
+ if (ret != NULL) {
+ len = 0;
+ while (ret[len] != NULL)
+ len++;
+ stringClass = (*jnienv)->FindClass(jnienv, "java/lang/String");
+ if ((strarray = (*jnienv)->NewObjectArray(jnienv,
+ len, stringClass, 0)) == NULL)
+ goto out;
+ for (i=0; i<len; i++) {
+ jstring str = (*jnienv)->NewStringUTF(jnienv, ret[i]);
+ (*jnienv)->SetObjectArrayElement(jnienv, strarray,
+ i, str);
+ }
+ }
+out: return (strarray);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_log_1compare
+ (JNIEnv *jnienv, jclass jthis_class,
+ /*DbLsn*/ jobject lsn0, /*DbLsn*/ jobject lsn1)
+{
+ DB_LSN *dblsn0;
+ DB_LSN *dblsn1;
+
+ COMPQUIET(jthis_class, NULL);
+ dblsn0 = get_DB_LSN(jnienv, lsn0);
+ dblsn1 = get_DB_LSN(jnienv, lsn1);
+
+ return (log_compare(dblsn0, dblsn1));
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_log_1cursor
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
+{
+ int err;
+ DB_LOGC *dblogc;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+ err = dbenv->log_cursor(dbenv, &dblogc, flags);
+ verify_return(jnienv, err, 0);
+ return (get_DbLogc(jnienv, dblogc));
+}
+
+JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_log_1file
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbLsn*/ jobject lsn)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ DB_LSN *dblsn = get_DB_LSN(jnienv, lsn);
+ char filename[FILENAME_MAX+1] = "";
+
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ err = dbenv->log_file(dbenv, dblsn, filename, FILENAME_MAX);
+ verify_return(jnienv, err, 0);
+ filename[FILENAME_MAX] = '\0'; /* just to be sure */
+ return (get_java_string(jnienv, filename));
+}
+
+JAVADB_METHOD(DbEnv_log_1flush,
+ (JAVADB_ARGS, /*DbLsn*/ jobject lsn), DB_ENV,
+ log_flush, (c_this, get_DB_LSN(jnienv, lsn)))
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1put
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbLsn*/ jobject lsn,
+ /*DbDbt*/ jobject data, jint flags)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB_LSN *dblsn;
+ LOCKED_DBT ldata;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dblsn = get_DB_LSN(jnienv, lsn);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+
+ /* log_put's DB_LSN argument may not be NULL. */
+ if (!verify_non_null(jnienv, dblsn))
+ return;
+
+ if (locked_dbt_get(&ldata, jnienv, dbenv, data, inOp) != 0)
+ goto out;
+
+ err = dbenv->log_put(dbenv, dblsn, &ldata.javainfo->dbt, flags);
+ verify_return(jnienv, err, 0);
+ out:
+ locked_dbt_put(&ldata, jnienv, dbenv);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_log_1stat
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB_LOG_STAT *statp;
+ jobject retval;
+ jclass dbclass;
+
+ retval = NULL;
+ statp = NULL;
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ err = dbenv->log_stat(dbenv, &statp, (u_int32_t)flags);
+ if (verify_return(jnienv, err, 0)) {
+ if ((dbclass = get_class(jnienv, name_DB_LOG_STAT)) == NULL ||
+ (retval =
+ create_default_object(jnienv, name_DB_LOG_STAT)) == NULL)
+ goto err; /* An exception has been posted. */
+
+ __jv_fill_log_stat(jnienv, dbclass, retval, statp);
+
+err: __os_ufree(dbenv, statp);
+ }
+ return (retval);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_memp_1stat
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
+{
+ int err;
+ jclass dbclass;
+ DB_ENV *dbenv;
+ DB_MPOOL_STAT *statp;
+ jobject retval;
+
+ retval = NULL;
+ statp = NULL;
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ err = dbenv->memp_stat(dbenv, &statp, 0, (u_int32_t)flags);
+ if (verify_return(jnienv, err, 0)) {
+ if ((dbclass = get_class(jnienv, name_DB_MPOOL_STAT)) == NULL ||
+ (retval =
+ create_default_object(jnienv, name_DB_MPOOL_STAT)) == NULL)
+ goto err; /* An exception has been posted. */
+
+ __jv_fill_mpool_stat(jnienv, dbclass, retval, statp);
+
+err: __os_ufree(dbenv, statp);
+ }
+ return (retval);
+}
+
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_memp_1fstat
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
+{
+ int err, i, len;
+ jclass fstat_class;
+ DB_ENV *dbenv;
+ DB_MPOOL_FSTAT **fstatp;
+ jobjectArray retval;
+ jfieldID filename_id;
+ jstring jfilename;
+
+ fstatp = NULL;
+ retval = NULL;
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ err = dbenv->memp_stat(dbenv, 0, &fstatp, (u_int32_t)flags);
+ if (verify_return(jnienv, err, 0)) {
+ len = 0;
+ while (fstatp[len] != NULL)
+ len++;
+ if ((fstat_class =
+ get_class(jnienv, name_DB_MPOOL_FSTAT)) == NULL ||
+ (retval = (*jnienv)->NewObjectArray(jnienv, len,
+ fstat_class, 0)) == NULL)
+ goto err;
+ for (i=0; i<len; i++) {
+ jobject obj;
+ if ((obj = create_default_object(jnienv,
+ name_DB_MPOOL_FSTAT)) == NULL)
+ goto err;
+ (*jnienv)->SetObjectArrayElement(jnienv, retval,
+ i, obj);
+
+ /* Set the string field. */
+ filename_id = (*jnienv)->GetFieldID(jnienv,
+ fstat_class, "file_name", string_signature);
+ jfilename = get_java_string(jnienv,
+ fstatp[i]->file_name);
+ (*jnienv)->SetObjectField(jnienv, obj,
+ filename_id, jfilename);
+ set_int_field(jnienv, fstat_class, obj,
+ "st_pagesize", fstatp[i]->st_pagesize);
+ set_int_field(jnienv, fstat_class, obj,
+ "st_cache_hit", fstatp[i]->st_cache_hit);
+ set_int_field(jnienv, fstat_class, obj,
+ "st_cache_miss", fstatp[i]->st_cache_miss);
+ set_int_field(jnienv, fstat_class, obj,
+ "st_map", fstatp[i]->st_map);
+ set_int_field(jnienv, fstat_class, obj,
+ "st_page_create", fstatp[i]->st_page_create);
+ set_int_field(jnienv, fstat_class, obj,
+ "st_page_in", fstatp[i]->st_page_in);
+ set_int_field(jnienv, fstat_class, obj,
+ "st_page_out", fstatp[i]->st_page_out);
+ __os_ufree(dbenv, fstatp[i]);
+ }
+err: __os_ufree(dbenv, fstatp);
+ }
+ return (retval);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_memp_1trickle
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint pct)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ int result = 0;
+
+ if (verify_non_null(jnienv, dbenv)) {
+ err = dbenv->memp_trickle(dbenv, pct, &result);
+ verify_return(jnienv, err, 0);
+ }
+ return (result);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1begin
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbTxn*/ jobject pid, jint flags)
+{
+ int err;
+ DB_TXN *dbpid, *result;
+ DB_ENV *dbenv;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return (0);
+
+ dbpid = get_DB_TXN(jnienv, pid);
+ result = 0;
+
+ err = dbenv->txn_begin(dbenv, dbpid, &result, flags);
+ if (!verify_return(jnienv, err, 0))
+ return (0);
+ return (get_DbTxn(jnienv, result));
+}
+
+JAVADB_METHOD(DbEnv_txn_1checkpoint,
+ (JAVADB_ARGS, jint kbyte, jint min, jint flags), DB_ENV,
+ txn_checkpoint, (c_this, kbyte, min, flags))
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_app_1dispatch_1changed
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbFeedback*/ jobject jappdispatch)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv) ||
+ !verify_non_null(jnienv, dbenvinfo))
+ return;
+
+ dbjie_set_app_dispatch_object(dbenvinfo, jnienv, dbenv, jappdispatch);
+}
+
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_txn_1recover
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint count, jint flags)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB_PREPLIST *preps;
+ long retcount;
+ int i;
+ char signature[128];
+ size_t bytesize;
+ jobject retval;
+ jobject obj;
+ jobject txnobj;
+ jbyteArray bytearr;
+ jclass preplist_class;
+ jfieldID txn_fieldid;
+ jfieldID gid_fieldid;
+
+ retval = NULL;
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ /*
+ * We need to allocate some local storage for the
+ * returned preplist, and that requires us to do
+ * our own argument validation.
+ */
+ if (count <= 0) {
+ verify_return(jnienv, EINVAL, 0);
+ goto out;
+ }
+
+ bytesize = sizeof(DB_PREPLIST) * count;
+ if ((err = __os_malloc(dbenv, bytesize, &preps)) != 0) {
+ verify_return(jnienv, err, 0);
+ goto out;
+ }
+
+ err = dbenv->txn_recover(dbenv, preps, count, &retcount, flags);
+
+ if (verify_return(jnienv, err, 0)) {
+ if ((preplist_class =
+ get_class(jnienv, name_DB_PREPLIST)) == NULL ||
+ (retval = (*jnienv)->NewObjectArray(jnienv, retcount,
+ preplist_class, 0)) == NULL)
+ goto err;
+
+ (void)snprintf(signature, sizeof(signature),
+ "L%s%s;", DB_PACKAGE_NAME, name_DB_TXN);
+ txn_fieldid = (*jnienv)->GetFieldID(jnienv, preplist_class,
+ "txn", signature);
+ gid_fieldid = (*jnienv)->GetFieldID(jnienv, preplist_class,
+ "gid", "[B");
+
+ for (i=0; i<retcount; i++) {
+ /*
+ * First, make a blank DbPreplist object
+ * and set the array entry.
+ */
+ if ((obj = create_default_object(jnienv,
+ name_DB_PREPLIST)) == NULL)
+ goto err;
+ (*jnienv)->SetObjectArrayElement(jnienv,
+ retval, i, obj);
+
+ /* Set the txn field. */
+ txnobj = get_DbTxn(jnienv, preps[i].txn);
+ (*jnienv)->SetObjectField(jnienv,
+ obj, txn_fieldid, txnobj);
+
+ /* Build the gid array and set the field. */
+ if ((bytearr = (*jnienv)->NewByteArray(jnienv,
+ sizeof(preps[i].gid))) == NULL)
+ goto err;
+ (*jnienv)->SetByteArrayRegion(jnienv, bytearr, 0,
+ sizeof(preps[i].gid), (jbyte *)&preps[i].gid[0]);
+ (*jnienv)->SetObjectField(jnienv, obj,
+ gid_fieldid, bytearr);
+ }
+ }
+err: __os_free(dbenv, preps);
+out: return (retval);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1stat
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB_TXN_STAT *statp;
+ jobject retval, obj;
+ jclass dbclass, active_class;
+ char active_signature[512];
+ jfieldID arrid;
+ jobjectArray actives;
+ unsigned int i;
+
+ retval = NULL;
+ statp = NULL;
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ err = dbenv->txn_stat(dbenv, &statp, (u_int32_t)flags);
+ if (verify_return(jnienv, err, 0)) {
+ if ((dbclass = get_class(jnienv, name_DB_TXN_STAT)) == NULL ||
+ (retval =
+ create_default_object(jnienv, name_DB_TXN_STAT)) == NULL)
+ goto err;
+
+ /* Set the individual fields */
+ __jv_fill_txn_stat(jnienv, dbclass, retval, statp);
+
+ if ((active_class =
+ get_class(jnienv, name_DB_TXN_STAT_ACTIVE)) == NULL ||
+ (actives = (*jnienv)->NewObjectArray(jnienv,
+ statp->st_nactive, active_class, 0)) == NULL)
+ goto err;
+
+ /*
+ * Set the st_txnarray field. This is a little more involved
+ * than other fields, since the type is an array, so none
+ * of our utility functions help.
+ */
+ (void)snprintf(active_signature, sizeof(active_signature),
+ "[L%s%s;", DB_PACKAGE_NAME, name_DB_TXN_STAT_ACTIVE);
+
+ arrid = (*jnienv)->GetFieldID(jnienv, dbclass, "st_txnarray",
+ active_signature);
+ (*jnienv)->SetObjectField(jnienv, retval, arrid, actives);
+
+ /* Now fill the in the elements of st_txnarray. */
+ for (i=0; i<statp->st_nactive; i++) {
+ obj = create_default_object(jnienv,
+ name_DB_TXN_STAT_ACTIVE);
+ (*jnienv)->SetObjectArrayElement(jnienv,
+ actives, i, obj);
+
+ set_int_field(jnienv, active_class, obj,
+ "txnid", statp->st_txnarray[i].txnid);
+ set_int_field(jnienv, active_class, obj, "parentid",
+ statp->st_txnarray[i].parentid);
+ set_lsn_field(jnienv, active_class, obj,
+ "lsn", statp->st_txnarray[i].lsn);
+ }
+
+err: __os_ufree(dbenv, statp);
+ }
+ return (retval);
+}
+
+/* See discussion on errpfx, errcall in DB_ENV_JAVAINFO */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1errcall
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jobject errcall)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+
+ if (verify_non_null(jnienv, dbenv) &&
+ verify_non_null(jnienv, dbenvinfo)) {
+ dbjie_set_errcall(dbenvinfo, jnienv, errcall);
+ }
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1errpfx
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring str)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+
+ if (verify_non_null(jnienv, dbenv) &&
+ verify_non_null(jnienv, dbenvinfo)) {
+ dbjie_set_errpfx(dbenvinfo, jnienv, str);
+ }
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1finalize
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis,
+ jobject /*DbErrcall*/ errcall, jstring errpfx)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *envinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ envinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ DB_ASSERT(envinfo != NULL);
+
+ /* Note: We detect and report unclosed DbEnvs. */
+ if (dbenv != NULL && envinfo != NULL && !dbjie_is_dbopen(envinfo)) {
+
+ /* If this error occurs, this object was never closed. */
+ report_errcall(jnienv, errcall, errpfx,
+ "DbEnv.finalize: open DbEnv object destroyed");
+ }
+
+ /* Shouldn't see this object again, but just in case */
+ set_private_dbobj(jnienv, name_DB_ENV, jthis, 0);
+ set_private_info(jnienv, name_DB_ENV, jthis, 0);
+
+ dbjie_destroy(envinfo, jnienv);
+}
diff --git a/libdb/libdb_java/java_DbLock.c b/libdb/libdb_java/java_DbLock.c
new file mode 100644
index 0000000..51aca62
--- /dev/null
+++ b/libdb/libdb_java/java_DbLock.c
@@ -0,0 +1,30 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <jni.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db_int.h"
+#include "java_util.h"
+#include "com_sleepycat_db_DbLock.h"
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLock_finalize
+ (JNIEnv *jnienv, jobject jthis)
+{
+ DB_LOCK *dblock = get_DB_LOCK(jnienv, jthis);
+ if (dblock) {
+ /* Free any data related to DB_LOCK here */
+ __os_free(NULL, dblock);
+ }
+ set_private_dbobj(jnienv, name_DB_LOCK, jthis, 0); /* paranoia */
+}
diff --git a/libdb/libdb_java/java_DbLogc.c b/libdb/libdb_java/java_DbLogc.c
new file mode 100644
index 0000000..f42667d
--- /dev/null
+++ b/libdb/libdb_java/java_DbLogc.c
@@ -0,0 +1,110 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef DIAGNOSTIC
+#include <stdio.h>
+#endif
+
+#include "db_int.h"
+#include "java_util.h"
+#include "com_sleepycat_db_DbLogc.h"
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLogc_close
+ (JNIEnv *jnienv, jobject jthis, jint flags)
+{
+ int err;
+ DB_LOGC *dblogc = get_DB_LOGC(jnienv, jthis);
+
+ if (!verify_non_null(jnienv, dblogc))
+ return;
+ err = dblogc->close(dblogc, flags);
+ if (verify_return(jnienv, err, 0)) {
+ set_private_dbobj(jnienv, name_DB_LOGC, jthis, 0);
+ }
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbLogc_get
+ (JNIEnv *jnienv, jobject jthis,
+ /*DbLsn*/ jobject lsn, /*Dbt*/ jobject data, jint flags)
+{
+ int err, retry;
+ DB_LOGC *dblogc;
+ DB_LSN *dblsn;
+ LOCKED_DBT ldata;
+ OpKind dataop;
+
+ /*
+ * Depending on flags, the user may be supplying the key,
+ * or else we may have to retrieve it.
+ */
+ err = 0;
+ dataop = outOp;
+
+ dblogc = get_DB_LOGC(jnienv, jthis);
+ dblsn = get_DB_LSN(jnienv, lsn);
+ if (locked_dbt_get(&ldata, jnienv, dblogc->dbenv, data, dataop) != 0)
+ goto out1;
+
+ if (!verify_non_null(jnienv, dblogc))
+ goto out1;
+
+ for (retry = 0; retry < 3; retry++) {
+ err = dblogc->get(dblogc, dblsn, &ldata.javainfo->dbt, flags);
+
+ /*
+ * If we failed due to lack of memory in our DBT arrays,
+ * retry.
+ */
+ if (err != ENOMEM)
+ break;
+ if (!locked_dbt_realloc(&ldata, jnienv, dblogc->dbenv))
+ break;
+ }
+ out1:
+ locked_dbt_put(&ldata, jnienv, dblogc->dbenv);
+ if (!DB_RETOK_LGGET(err)) {
+ if (verify_dbt(jnienv, err, &ldata))
+ verify_return(jnienv, err, 0);
+ }
+ return (err);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLogc_finalize
+ (JNIEnv *jnienv, jobject jthis)
+{
+ /*
+ * Free any data related to DB_LOGC here.
+ * If we ever have java-only data embedded in the DB_LOGC
+ * and need to do this, we'll have to track DbLogc's
+ * according to which DbEnv owns them, just as
+ * we track Db's according to which DbEnv owns them.
+ * That's necessary to avoid double freeing that
+ * comes about when closes interact with GC.
+ */
+
+#ifdef DIAGNOSTIC
+ DB_LOGC *dblogc;
+
+ dblogc = get_DB_LOGC(jnienv, jthis);
+ if (dblogc != NULL)
+ fprintf(stderr, "Java API: DbLogc has not been closed\n");
+#else
+
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(jthis, NULL);
+
+#endif
+}
diff --git a/libdb/libdb_java/java_DbLsn.c b/libdb/libdb_java/java_DbLsn.c
new file mode 100644
index 0000000..f4d5d62
--- /dev/null
+++ b/libdb/libdb_java/java_DbLsn.c
@@ -0,0 +1,43 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <jni.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h> /* needed for FILENAME_MAX */
+
+#include "db_int.h"
+#include "java_util.h"
+#include "com_sleepycat_db_DbLsn.h"
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLsn_init_1lsn
+ (JNIEnv *jnienv, /*DbLsn*/ jobject jthis)
+{
+ /*
+ * Note: the DB_LSN object stored in the private_dbobj_
+ * is allocated in get_DbLsn() or get_DB_LSN().
+ */
+
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(jthis, NULL);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLsn_finalize
+ (JNIEnv *jnienv, jobject jthis)
+{
+ DB_LSN *dblsn;
+
+ dblsn = get_DB_LSN(jnienv, jthis);
+ if (dblsn) {
+ (void)__os_free(NULL, dblsn);
+ }
+}
diff --git a/libdb/libdb_java/java_DbTxn.c b/libdb/libdb_java/java_DbTxn.c
new file mode 100644
index 0000000..0a8b2af
--- /dev/null
+++ b/libdb/libdb_java/java_DbTxn.c
@@ -0,0 +1,67 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db_int.h"
+#include "java_util.h"
+#include "com_sleepycat_db_DbTxn.h"
+
+JAVADB_METHOD(DbTxn_abort, (JAVADB_ARGS), DB_TXN,
+ abort, (c_this))
+JAVADB_METHOD(DbTxn_commit, (JAVADB_ARGS, jint flags), DB_TXN,
+ commit, (c_this, flags))
+JAVADB_METHOD(DbTxn_discard, (JAVADB_ARGS, jint flags), DB_TXN,
+ discard, (c_this, flags))
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbTxn_id
+ (JNIEnv *jnienv, jobject jthis)
+{
+ int retval = 0;
+ DB_TXN *dbtxn = get_DB_TXN(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbtxn))
+ return (-1);
+
+ /* No error to check for from DB_TXN->id */
+ retval = dbtxn->id(dbtxn);
+ return (retval);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_prepare
+ (JNIEnv *jnienv, jobject jthis, jbyteArray gid)
+{
+ int err;
+ DB_TXN *dbtxn;
+ jbyte *c_array;
+
+ dbtxn = get_DB_TXN(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbtxn))
+ return;
+
+ if (gid == NULL ||
+ (*jnienv)->GetArrayLength(jnienv, gid) < DB_XIDDATASIZE) {
+ report_exception(jnienv, "DbTxn.prepare gid array "
+ "must be >= 128 bytes", EINVAL, 0);
+ return;
+ }
+ c_array = (*jnienv)->GetByteArrayElements(jnienv, gid, NULL);
+ err = dbtxn->prepare(dbtxn, (u_int8_t *)c_array);
+ (*jnienv)->ReleaseByteArrayElements(jnienv, gid, c_array, 0);
+ verify_return(jnienv, err, 0);
+}
+
+JAVADB_METHOD(DbTxn_set_1timeout,
+ (JAVADB_ARGS, jlong timeout, jint flags), DB_TXN,
+ set_timeout, (c_this, (u_int32_t)timeout, flags))
diff --git a/libdb/libdb_java/java_DbUtil.c b/libdb/libdb_java/java_DbUtil.c
new file mode 100644
index 0000000..ed958dd
--- /dev/null
+++ b/libdb/libdb_java/java_DbUtil.c
@@ -0,0 +1,27 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <jni.h>
+
+#include "db_int.h"
+#include "java_util.h"
+#include "com_sleepycat_db_DbUtil.h"
+
+JNIEXPORT jboolean JNICALL
+Java_com_sleepycat_db_DbUtil_is_1big_1endian (JNIEnv *jnienv,
+ jclass jthis_class)
+{
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(jthis_class, NULL);
+
+ return (__db_isbigendian() ? JNI_TRUE : JNI_FALSE);
+}
diff --git a/libdb/libdb_java/java_DbXAResource.c b/libdb/libdb_java/java_DbXAResource.c
new file mode 100644
index 0000000..fba0900
--- /dev/null
+++ b/libdb/libdb_java/java_DbXAResource.c
@@ -0,0 +1,288 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2001
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef DIAGNOSTIC
+#include <stdio.h>
+#endif
+
+#include "db_int.h"
+#include "java_util.h"
+#include "dbinc/xa.h"
+#include "dbinc_auto/xa_ext.h"
+#include "com_sleepycat_db_xa_DbXAResource.h"
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1init
+ (JNIEnv *jnienv, jobject jthis, jstring home, jint rmid, jint flags)
+{
+ int err;
+ LOCKED_STRING ls_home;
+ jclass cl;
+ jmethodID mid;
+
+ COMPQUIET(jthis, NULL);
+ if (locked_string_get(&ls_home, jnienv, home) != 0)
+ goto out;
+ if ((err = __db_xa_open((char *)ls_home.string,
+ rmid, flags)) != XA_OK) {
+ verify_return(jnienv, err, EXCEPTION_XA);
+ }
+
+ /*
+ * Now create the DbEnv object, it will get attached
+ * to the DB_ENV just made in __db_xa_open.
+ */
+ if ((cl = get_class(jnienv, name_DB_ENV)) == NULL)
+ goto out;
+
+ mid = (*jnienv)->GetStaticMethodID(jnienv, cl,
+ "_create_DbEnv_for_XA", "(II)V");
+ (*jnienv)->CallStaticVoidMethod(jnienv, cl, mid, 0, rmid);
+
+ out:
+ locked_string_put(&ls_home, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1close
+ (JNIEnv *jnienv, jobject jthis, jstring home, jint rmid, jint flags)
+{
+ int err;
+ LOCKED_STRING ls_home;
+
+ COMPQUIET(jthis, NULL);
+ if (locked_string_get(&ls_home, jnienv, home) != 0)
+ goto out;
+ if ((err = __db_xa_close((char *)ls_home.string,
+ rmid, flags)) != XA_OK)
+ verify_return(jnienv, err, EXCEPTION_XA);
+ out:
+ locked_string_put(&ls_home, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1commit
+ (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid,
+ jboolean onePhase)
+{
+ XID xid;
+ long flags;
+ int err;
+
+ COMPQUIET(jthis, NULL);
+ if (!get_XID(jnienv, jxid, &xid))
+ return;
+ flags = 0;
+ if (onePhase == JNI_TRUE)
+ flags |= TMONEPHASE;
+ if ((err = __db_xa_commit(&xid, rmid, flags)) != XA_OK)
+ verify_return(jnienv, err, EXCEPTION_XA);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1end
+ (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid, jint flags)
+{
+ XID xid;
+ int err;
+
+ COMPQUIET(jthis, NULL);
+ if (!get_XID(jnienv, jxid, &xid))
+ return;
+ if ((err = __db_xa_end(&xid, rmid, flags)) != XA_OK)
+ verify_return(jnienv, err, EXCEPTION_XA);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1forget
+ (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid)
+{
+ XID xid;
+ int err;
+
+ COMPQUIET(jthis, NULL);
+ if (!get_XID(jnienv, jxid, &xid))
+ return;
+ if ((err = __db_xa_forget(&xid, rmid, 0)) != XA_OK)
+ verify_return(jnienv, err, EXCEPTION_XA);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_xa_DbXAResource__1prepare
+ (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid)
+{
+ XID xid;
+ int err;
+
+ COMPQUIET(jthis, NULL);
+ if (!get_XID(jnienv, jxid, &xid))
+ return (0);
+ err = __db_xa_prepare(&xid, rmid, 0);
+ if (err != XA_OK && err != XA_RDONLY)
+ verify_return(jnienv, err, EXCEPTION_XA);
+
+ return (err);
+}
+
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_xa_DbXAResource__1recover
+ (JNIEnv *jnienv, jobject jthis, jint rmid, jint flags)
+{
+ XID *xids;
+ int err;
+ int total;
+ int cnt;
+ int i;
+ int curflags;
+ size_t nbytes;
+ jclass xid_class;
+ jmethodID mid;
+ jobject obj;
+ jobjectArray retval;
+
+ COMPQUIET(jthis, NULL);
+ total = 0;
+ cnt = 0;
+ xids = NULL;
+ flags &= ~(DB_FIRST | DB_LAST | DB_NEXT);
+
+ /* Repeatedly call __db_xa_recover to fill up an array of XIDs */
+ curflags = flags | DB_FIRST;
+ do {
+ total += cnt;
+ nbytes = sizeof(XID) * (total + 10);
+ if ((err = __os_realloc(NULL, nbytes, &xids)) != 0) {
+ if (xids != NULL)
+ __os_free(NULL, xids);
+ verify_return(jnienv, XAER_NOTA, EXCEPTION_XA);
+ return (NULL);
+ }
+ cnt = __db_xa_recover(&xids[total], 10, rmid, curflags);
+ curflags = flags | DB_NEXT;
+ } while (cnt > 0);
+
+ if (xids != NULL)
+ __os_free(NULL, xids);
+
+ if (cnt < 0) {
+ verify_return(jnienv, cnt, EXCEPTION_XA);
+ return (NULL);
+ }
+
+ /* Create the java DbXid array and fill it up */
+ if ((xid_class = get_class(jnienv, name_DB_XID)) == NULL)
+ return (NULL);
+ mid = (*jnienv)->GetMethodID(jnienv, xid_class, "<init>",
+ "(I[B[B)V");
+ if ((retval = (*jnienv)->NewObjectArray(jnienv, total, xid_class, 0))
+ == NULL)
+ goto out;
+
+ for (i = 0; i < total; i++) {
+ jobject gtrid;
+ jobject bqual;
+ jsize gtrid_len;
+ jsize bqual_len;
+
+ gtrid_len = (jsize)xids[i].gtrid_length;
+ bqual_len = (jsize)xids[i].bqual_length;
+ gtrid = (*jnienv)->NewByteArray(jnienv, gtrid_len);
+ bqual = (*jnienv)->NewByteArray(jnienv, bqual_len);
+ if (gtrid == NULL || bqual == NULL)
+ goto out;
+ (*jnienv)->SetByteArrayRegion(jnienv, gtrid, 0, gtrid_len,
+ (jbyte *)&xids[i].data[0]);
+ (*jnienv)->SetByteArrayRegion(jnienv, bqual, 0, bqual_len,
+ (jbyte *)&xids[i].data[gtrid_len]);
+ if ((obj = (*jnienv)->NewObject(jnienv, xid_class, mid,
+ (jint)xids[i].formatID, gtrid, bqual)) == NULL)
+ goto out;
+ (*jnienv)->SetObjectArrayElement(jnienv, retval, i, obj);
+ }
+out: return (retval);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1rollback
+ (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid)
+{
+ XID xid;
+ int err;
+
+ COMPQUIET(jthis, NULL);
+ if (!get_XID(jnienv, jxid, &xid))
+ return;
+ if ((err = __db_xa_rollback(&xid, rmid, 0)) != XA_OK)
+ verify_return(jnienv, err, EXCEPTION_XA);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1start
+ (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid, jint flags)
+{
+ XID xid;
+ int err;
+
+ COMPQUIET(jthis, NULL);
+ if (!get_XID(jnienv, jxid, &xid))
+ return;
+
+ if ((err = __db_xa_start(&xid, rmid, flags)) != XA_OK)
+ verify_return(jnienv, err, EXCEPTION_XA);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_xa_DbXAResource_xa_1attach
+ (JNIEnv *jnienv, jclass jthisclass, jobject jxid, jobject jrmid)
+{
+ XID xid;
+ XID *xidp;
+ int ret;
+ DB_ENV *env;
+ DB_TXN *txn;
+ int rmid;
+ int *rmidp;
+ jobject jtxn;
+ jobject jenv;
+ jclass cl;
+ jmethodID mid;
+
+ COMPQUIET(jthisclass, NULL);
+ if (jxid == NULL) {
+ xidp = NULL;
+ }
+ else {
+ xidp = &xid;
+ if (!get_XID(jnienv, jxid, &xid))
+ return (NULL);
+ }
+ if (jrmid == NULL) {
+ rmidp = NULL;
+ }
+ else {
+ rmidp = &rmid;
+ rmid = (int)(*jnienv)->CallIntMethod(jnienv, jrmid,
+ mid_Integer_intValue);
+ }
+
+ if ((ret = db_env_xa_attach(rmidp, xidp, &env, &txn)) != 0) {
+ /*
+ * DB_NOTFOUND is a normal return, it means we
+ * have no current transaction,
+ */
+ if (ret != DB_NOTFOUND)
+ verify_return(jnienv, ret, 0);
+ return (NULL);
+ }
+
+ jenv = ((DB_ENV_JAVAINFO *)env->api2_internal)->jenvref;
+ jtxn = get_DbTxn(jnienv, txn);
+ if ((cl = get_class(jnienv, name_DB_XAATTACH)) == NULL)
+ return (NULL);
+ mid = (*jnienv)->GetMethodID(jnienv, cl, "<init>",
+ "(Lcom/sleepycat/db/DbEnv;Lcom/sleepycat/db/DbTxn;)V");
+ return (*jnienv)->NewObject(jnienv, cl, mid, jenv, jtxn);
+}
diff --git a/libdb/libdb_java/java_Dbc.c b/libdb/libdb_java/java_Dbc.c
new file mode 100644
index 0000000..025da40
--- /dev/null
+++ b/libdb/libdb_java/java_Dbc.c
@@ -0,0 +1,278 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef DIAGNOSTIC
+#include <stdio.h>
+#endif
+
+#include "db_int.h"
+#include "java_util.h"
+#include "com_sleepycat_db_Dbc.h"
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbc_close
+ (JNIEnv *jnienv, jobject jthis)
+{
+ int err;
+ DBC *dbc = get_DBC(jnienv, jthis);
+
+ if (!verify_non_null(jnienv, dbc))
+ return;
+ err = dbc->c_close(dbc);
+ if (verify_return(jnienv, err, 0)) {
+ set_private_dbobj(jnienv, name_DBC, jthis, 0);
+ }
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_count
+ (JNIEnv *jnienv, jobject jthis, jint flags)
+{
+ int err;
+ DBC *dbc = get_DBC(jnienv, jthis);
+ db_recno_t count;
+
+ if (!verify_non_null(jnienv, dbc))
+ return (0);
+ err = dbc->c_count(dbc, &count, flags);
+ verify_return(jnienv, err, 0);
+ return (count);
+}
+
+JAVADB_METHOD_INT(Dbc_del, (JAVADB_ARGS, jint flags), DBC,
+ c_del, (c_this, flags), DB_RETOK_DBCDEL)
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Dbc_dup
+ (JNIEnv *jnienv, jobject jthis, jint flags)
+{
+ int err;
+ DBC *dbc = get_DBC(jnienv, jthis);
+ DBC *dbc_ret = NULL;
+
+ if (!verify_non_null(jnienv, dbc))
+ return (0);
+ err = dbc->c_dup(dbc, &dbc_ret, flags);
+ if (!verify_return(jnienv, err, 0))
+ return (0);
+
+ return (get_Dbc(jnienv, dbc_ret));
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_get
+ (JNIEnv *jnienv, jobject jthis,
+ /*Dbt*/ jobject key, /*Dbt*/ jobject data, jint flags)
+{
+ int err, retry, op_flags;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ LOCKED_DBT lkey, ldata;
+ OpKind keyop, dataop;
+
+ /*
+ * Depending on flags, the user may be supplying the key,
+ * or else we may have to retrieve it.
+ */
+ err = 0;
+ keyop = outOp;
+ dataop = outOp;
+
+ op_flags = flags & DB_OPFLAGS_MASK;
+ if (op_flags == DB_SET) {
+ keyop = inOp;
+ }
+ else if (op_flags == DB_SET_RANGE ||
+ op_flags == DB_SET_RECNO) {
+ keyop = inOutOp;
+ }
+ else if (op_flags == DB_GET_BOTH || op_flags == DB_GET_BOTH_RANGE) {
+ keyop = inOutOp;
+ dataop = inOutOp;
+ }
+
+ dbc = get_DBC(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbc))
+ return (0);
+ dbenv = dbc->dbp->dbenv;
+
+ if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
+ goto out2;
+ if (locked_dbt_get(&ldata, jnienv, dbenv, data, dataop) != 0)
+ goto out1;
+
+ if (!verify_non_null(jnienv, dbc))
+ goto out1;
+
+ for (retry = 0; retry < 3; retry++) {
+ err = dbc->c_get(dbc,
+ &lkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
+
+ /*
+ * If we failed due to lack of memory in our DBT arrays,
+ * retry.
+ */
+ if (err != ENOMEM)
+ break;
+ if (!locked_dbt_realloc(&lkey, jnienv,
+ dbenv) && !locked_dbt_realloc(&ldata, jnienv, dbenv))
+ break;
+ }
+ out1:
+ locked_dbt_put(&ldata, jnienv, dbenv);
+ out2:
+ locked_dbt_put(&lkey, jnienv, dbenv);
+ if (!DB_RETOK_DBCGET(err)) {
+ if (verify_dbt(jnienv, err, &lkey) &&
+ verify_dbt(jnienv, err, &ldata))
+ verify_return(jnienv, err, 0);
+ }
+ return (err);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_pget
+ (JNIEnv *jnienv, jobject jthis,
+ /*Dbt*/ jobject key, /*Dbt*/ jobject pkey, /*Dbt*/ jobject data, jint flags)
+{
+ int err, retry, op_flags;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ LOCKED_DBT lkey, lpkey, ldata;
+ OpKind keyop, pkeyop, dataop;
+
+ /*
+ * Depending on flags, the user may be supplying the key,
+ * or else we may have to retrieve it.
+ */
+ err = 0;
+ keyop = outOp;
+ pkeyop = outOp;
+ dataop = outOp;
+
+ op_flags = flags & DB_OPFLAGS_MASK;
+ if (op_flags == DB_SET) {
+ keyop = inOp;
+ }
+ else if (op_flags == DB_SET_RANGE ||
+ op_flags == DB_SET_RECNO) {
+ keyop = inOutOp;
+ }
+ else if (op_flags == DB_GET_BOTH || op_flags == DB_GET_BOTH_RANGE) {
+ pkeyop = inOutOp;
+ keyop = inOutOp;
+ dataop = inOutOp;
+ }
+
+ dbc = get_DBC(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbc))
+ return (0);
+ dbenv = dbc->dbp->dbenv;
+ if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
+ goto out3;
+ if (locked_dbt_get(&lpkey, jnienv, dbenv, pkey, pkeyop) != 0)
+ goto out2;
+ if (locked_dbt_get(&ldata, jnienv, dbenv, data, dataop) != 0)
+ goto out1;
+
+ if (!verify_non_null(jnienv, dbc))
+ goto out1;
+
+ for (retry = 0; retry < 3; retry++) {
+ err = dbc->c_pget(dbc, &lkey.javainfo->dbt,
+ &lpkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
+
+ /*
+ * If we failed due to lack of memory in our DBT arrays,
+ * retry.
+ */
+ if (err != ENOMEM)
+ break;
+ if (!locked_dbt_realloc(&lkey, jnienv, dbenv) &&
+ !locked_dbt_realloc(&lpkey, jnienv, dbenv) &&
+ !locked_dbt_realloc(&ldata, jnienv, dbenv))
+ break;
+ }
+ out1:
+ locked_dbt_put(&ldata, jnienv, dbenv);
+ out2:
+ locked_dbt_put(&lpkey, jnienv, dbenv);
+ out3:
+ locked_dbt_put(&lkey, jnienv, dbenv);
+ if (!DB_RETOK_DBCGET(err)) {
+ if (verify_dbt(jnienv, err, &lkey) &&
+ verify_dbt(jnienv, err, &lpkey) &&
+ verify_dbt(jnienv, err, &ldata))
+ verify_return(jnienv, err, 0);
+ }
+ return (err);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_put
+ (JNIEnv *jnienv, jobject jthis,
+ /*Dbt*/ jobject key, /*Dbt*/ jobject data, jint flags)
+{
+ int err;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ LOCKED_DBT lkey, ldata;
+ OpKind keyop;
+
+ err = 0;
+ dbc = get_DBC(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbc))
+ return (0);
+ dbenv = dbc->dbp->dbenv;
+ keyop = (dbc->dbp->type == DB_RECNO &&
+ (flags == DB_BEFORE || flags == DB_AFTER)) ? outOp : inOp;
+ if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
+ goto out2;
+ if (locked_dbt_get(&ldata, jnienv, dbenv, data, inOp) != 0)
+ goto out1;
+
+ if (!verify_non_null(jnienv, dbc))
+ goto out1;
+ err = dbc->c_put(dbc, &lkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
+ if (!DB_RETOK_DBCPUT(err))
+ verify_return(jnienv, err, 0);
+ out1:
+ locked_dbt_put(&ldata, jnienv, dbenv);
+ out2:
+ locked_dbt_put(&lkey, jnienv, dbenv);
+ return (err);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbc_finalize
+ (JNIEnv *jnienv, jobject jthis)
+{
+ /*
+ * Free any data related to DBC here.
+ * If we ever have java-only data embedded in the DBC
+ * and need to do this, we'll have to track Dbc's
+ * according to which Db owns them, just as
+ * we track Db's according to which DbEnv owns them.
+ * That's necessary to avoid double freeing that
+ * comes about when closes interact with GC.
+ */
+
+#ifdef DIAGNOSTIC
+ DBC *dbc;
+
+ dbc = get_DBC(jnienv, jthis);
+ if (dbc != NULL)
+ fprintf(stderr, "Java API: Dbc has not been closed\n");
+#else
+
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(jthis, NULL);
+
+#endif
+}
diff --git a/libdb/libdb_java/java_Dbt.c b/libdb/libdb_java/java_Dbt.c
new file mode 100644
index 0000000..5d04c91
--- /dev/null
+++ b/libdb/libdb_java/java_Dbt.c
@@ -0,0 +1,59 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db_int.h"
+#include "java_util.h"
+#include "com_sleepycat_db_Dbt.h"
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_init
+ (JNIEnv *jnienv, jobject jthis)
+{
+ DBT_JAVAINFO *dbtji;
+
+ dbtji = dbjit_construct();
+ set_private_dbobj(jnienv, name_DBT, jthis, dbtji);
+}
+
+JNIEXPORT jbyteArray JNICALL Java_com_sleepycat_db_Dbt_create_1data
+ (JNIEnv *jnienv, jobject jthis)
+{
+ DBT_JAVAINFO *db_this;
+ jbyteArray arr = NULL;
+ int len;
+
+ db_this = get_DBT_JAVAINFO(jnienv, jthis);
+ if (verify_non_null(jnienv, db_this)) {
+ len = db_this->dbt.size;
+ if ((arr = (*jnienv)->NewByteArray(jnienv, len)) == NULL)
+ goto out;
+ (*jnienv)->SetByteArrayRegion(jnienv, arr, 0, len,
+ db_this->dbt.data);
+ }
+out: return (arr);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_finalize
+ (JNIEnv *jnienv, jobject jthis)
+{
+ DBT_JAVAINFO *dbtji;
+
+ dbtji = get_DBT_JAVAINFO(jnienv, jthis);
+ if (dbtji) {
+ /* Free any data related to DBT here */
+ dbjit_destroy(dbtji);
+ }
+}
diff --git a/libdb/libdb_java/java_info.c b/libdb/libdb_java/java_info.c
new file mode 100644
index 0000000..a0369d8
--- /dev/null
+++ b/libdb/libdb_java/java_info.c
@@ -0,0 +1,1125 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db_int.h"
+#include "java_util.h"
+
+/****************************************************************
+ *
+ * Callback functions
+ */
+
+static int Db_assoc_callback(DB *db,
+ const DBT *key,
+ const DBT *data,
+ DBT *retval)
+{
+ DB_JAVAINFO *dbinfo;
+
+ DB_ASSERT(db != NULL);
+ dbinfo = (DB_JAVAINFO *)db->api_internal;
+ return (dbji_call_assoc(dbinfo, db, dbinfo->jdbref,
+ key, data, retval));
+}
+
+static void Db_feedback_callback(DB *db, int opcode, int percent)
+{
+ DB_JAVAINFO *dbinfo;
+
+ DB_ASSERT(db != NULL);
+ dbinfo = (DB_JAVAINFO *)db->api_internal;
+ dbji_call_feedback(dbinfo, db, dbinfo->jdbref, opcode, percent);
+}
+
+static int Db_append_recno_callback(DB *db, DBT *dbt, db_recno_t recno)
+{
+ DB_JAVAINFO *dbinfo;
+
+ dbinfo = (DB_JAVAINFO *)db->api_internal;
+ return (dbji_call_append_recno(dbinfo, db, dbinfo->jdbref, dbt, recno));
+}
+
+static int Db_bt_compare_callback(DB *db, const DBT *dbt1, const DBT *dbt2)
+{
+ DB_JAVAINFO *dbinfo;
+
+ dbinfo = (DB_JAVAINFO *)db->api_internal;
+ return (dbji_call_bt_compare(dbinfo, db, dbinfo->jdbref, dbt1, dbt2));
+}
+
+static size_t Db_bt_prefix_callback(DB *db, const DBT *dbt1, const DBT *dbt2)
+{
+ DB_JAVAINFO *dbinfo;
+
+ dbinfo = (DB_JAVAINFO *)db->api_internal;
+ return (dbji_call_bt_prefix(dbinfo, db, dbinfo->jdbref, dbt1, dbt2));
+}
+
+static int Db_dup_compare_callback(DB *db, const DBT *dbt1, const DBT *dbt2)
+{
+ DB_JAVAINFO *dbinfo;
+
+ dbinfo = (DB_JAVAINFO *)db->api_internal;
+ return (dbji_call_dup_compare(dbinfo, db, dbinfo->jdbref, dbt1, dbt2));
+}
+
+static u_int32_t Db_h_hash_callback(DB *db, const void *data, u_int32_t len)
+{
+ DB_JAVAINFO *dbinfo;
+
+ dbinfo = (DB_JAVAINFO *)db->api_internal;
+ return (dbji_call_h_hash(dbinfo, db, dbinfo->jdbref, data, len));
+}
+
+static void DbEnv_feedback_callback(DB_ENV *dbenv, int opcode, int percent)
+{
+ DB_ENV_JAVAINFO *dbinfo;
+
+ DB_ASSERT(dbenv != NULL);
+ dbinfo = (DB_ENV_JAVAINFO *)dbenv->api2_internal;
+ dbjie_call_feedback(dbinfo, dbenv, dbinfo->jenvref, opcode, percent);
+}
+
+static int DbEnv_rep_transport_callback(DB_ENV *dbenv,
+ const DBT *control, const DBT *rec,
+ int envid, u_int32_t flags)
+{
+ DB_ENV_JAVAINFO *dbinfo;
+
+ dbinfo = (DB_ENV_JAVAINFO *)dbenv->api2_internal;
+ return (dbjie_call_rep_transport(dbinfo, dbenv,
+ dbinfo->jenvref, control, rec, envid, (int)flags));
+}
+
+static int DbEnv_app_dispatch_callback(DB_ENV *dbenv, DBT *dbt,
+ DB_LSN *lsn, db_recops recops)
+{
+ DB_ENV_JAVAINFO *dbinfo;
+
+ DB_ASSERT(dbenv != NULL);
+ dbinfo = (DB_ENV_JAVAINFO *)dbenv->api2_internal;
+ return (dbjie_call_app_dispatch(dbinfo, dbenv, dbinfo->jenvref, dbt,
+ lsn, recops));
+}
+
+/****************************************************************
+ *
+ * Implementation of class DBT_javainfo
+ */
+DBT_JAVAINFO *
+dbjit_construct()
+{
+ DBT_JAVAINFO *dbjit;
+ int err;
+
+ /*XXX should return err*/
+ if ((err = __os_malloc(NULL, sizeof(DBT_JAVAINFO), &dbjit)) != 0)
+ return (NULL);
+
+ memset(dbjit, 0, sizeof(DBT_JAVAINFO));
+ return (dbjit);
+}
+
+void dbjit_destroy(DBT_JAVAINFO *dbjit)
+{
+ DB_ASSERT(!F_ISSET(dbjit, DBT_JAVAINFO_LOCKED));
+ /* Extra paranoia */
+ memset(dbjit, 0, sizeof(DBT_JAVAINFO));
+ (void)__os_free(NULL, dbjit);
+}
+
+/****************************************************************
+ *
+ * Implementation of class DB_ENV_JAVAINFO
+ */
+
+/* create/initialize an object */
+DB_ENV_JAVAINFO *
+dbjie_construct(JNIEnv *jnienv,
+ jobject jenv,
+ jobject default_errcall,
+ int is_dbopen)
+{
+ DB_ENV_JAVAINFO *dbjie;
+ int err;
+
+ /*XXX should return err*/
+ if ((err = __os_malloc(NULL, sizeof(DB_ENV_JAVAINFO), &dbjie)) != 0)
+ return (NULL);
+ memset(dbjie, 0, sizeof(DB_ENV_JAVAINFO));
+ dbjie->is_dbopen = is_dbopen;
+
+ if ((*jnienv)->GetJavaVM(jnienv, &dbjie->javavm) != 0) {
+ __os_free(NULL, dbjie);
+ report_exception(jnienv, "cannot get Java VM", 0, 0);
+ return (NULL);
+ }
+
+ /*
+ * The default error call just prints to the 'System.err'
+ * stream. If the user does set_errcall to null, we'll
+ * want to have a reference to set it back to.
+ *
+ * Why do we have always set db_errcall to our own callback?
+ * Because it makes the interaction between setting the
+ * error prefix, error stream, and user's error callback
+ * that much easier.
+ */
+ dbjie->default_errcall = NEW_GLOBAL_REF(jnienv, default_errcall);
+ dbjie->errcall = NEW_GLOBAL_REF(jnienv, default_errcall);
+ dbjie->jenvref = NEW_GLOBAL_REF(jnienv, jenv);
+ return (dbjie);
+}
+
+/* release all objects held by this this one */
+void dbjie_dealloc(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv)
+{
+ if (dbjie->feedback != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->feedback);
+ dbjie->feedback = NULL;
+ }
+ if (dbjie->app_dispatch != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->app_dispatch);
+ dbjie->app_dispatch = NULL;
+ }
+ if (dbjie->errcall != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->errcall);
+ dbjie->errcall = NULL;
+ }
+ if (dbjie->default_errcall != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->default_errcall);
+ dbjie->default_errcall = NULL;
+ }
+ if (dbjie->jenvref != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->jenvref);
+ dbjie->jenvref = NULL;
+ }
+
+ if (dbjie->conflict != NULL) {
+ __os_free(NULL, dbjie->conflict);
+ dbjie->conflict = NULL;
+ dbjie->conflict_size = 0;
+ }
+ if (dbjie->errpfx != NULL) {
+ __os_free(NULL, dbjie->errpfx);
+ dbjie->errpfx = NULL;
+ }
+}
+
+/* free this object, releasing anything allocated on its behalf */
+void dbjie_destroy(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv)
+{
+ dbjie_dealloc(dbjie, jnienv);
+
+ /* Extra paranoia */
+ memset(dbjie, 0, sizeof(DB_ENV_JAVAINFO));
+ (void)__os_free(NULL, dbjie);
+}
+
+/*
+ * Attach to the current thread that is running and
+ * return that. We use the java virtual machine
+ * that we saved in the constructor.
+ */
+JNIEnv *
+dbjie_get_jnienv(DB_ENV_JAVAINFO *dbjie)
+{
+ /*
+ * Note:
+ * Different versions of the JNI disagree on the signature
+ * for AttachCurrentThread. The most recent documentation
+ * seems to say that (JNIEnv **) is correct, but newer
+ * JNIs seem to use (void **), oddly enough.
+ */
+#ifdef JNI_VERSION_1_2
+ void *attachret = 0;
+#else
+ JNIEnv *attachret = 0;
+#endif
+
+ /*
+ * This should always succeed, as we are called via
+ * some Java activity. I think therefore I am (a thread).
+ */
+ if ((*dbjie->javavm)->AttachCurrentThread(dbjie->javavm, &attachret, 0)
+ != 0)
+ return (0);
+
+ return ((JNIEnv *)attachret);
+}
+
+jstring
+dbjie_get_errpfx(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv)
+{
+ return (get_java_string(jnienv, dbjie->errpfx));
+}
+
+void
+dbjie_set_errcall(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv, jobject new_errcall)
+{
+ /*
+ * If the new_errcall is null, we'll set the error call
+ * to the default one.
+ */
+ if (new_errcall == NULL)
+ new_errcall = dbjie->default_errcall;
+
+ DELETE_GLOBAL_REF(jnienv, dbjie->errcall);
+ dbjie->errcall = NEW_GLOBAL_REF(jnienv, new_errcall);
+}
+
+void
+dbjie_set_errpfx(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv, jstring errpfx)
+{
+ if (dbjie->errpfx != NULL)
+ __os_free(NULL, dbjie->errpfx);
+
+ if (errpfx)
+ dbjie->errpfx = get_c_string(jnienv, errpfx);
+ else
+ dbjie->errpfx = NULL;
+}
+
+void
+dbjie_set_conflict(DB_ENV_JAVAINFO *dbjie, u_char *newarr, size_t size)
+{
+ if (dbjie->conflict != NULL)
+ (void)__os_free(NULL, dbjie->conflict);
+ dbjie->conflict = newarr;
+ dbjie->conflict_size = size;
+}
+
+void dbjie_set_feedback_object(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv,
+ DB_ENV *dbenv, jobject jfeedback)
+{
+ int err;
+
+ if (dbjie->feedback != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->feedback);
+ }
+ if (jfeedback == NULL) {
+ if ((err = dbenv->set_feedback(dbenv, NULL)) != 0)
+ report_exception(jnienv, "set_feedback failed",
+ err, 0);
+ }
+ else {
+ if ((err = dbenv->set_feedback(dbenv,
+ DbEnv_feedback_callback)) != 0)
+ report_exception(jnienv, "set_feedback failed",
+ err, 0);
+ }
+
+ dbjie->feedback = NEW_GLOBAL_REF(jnienv, jfeedback);
+}
+
+void dbjie_call_feedback(DB_ENV_JAVAINFO *dbjie, DB_ENV *dbenv, jobject jenv,
+ int opcode, int percent)
+{
+ JNIEnv *jnienv;
+ jclass feedback_class;
+ jmethodID id;
+
+ COMPQUIET(dbenv, NULL);
+ jnienv = dbjie_get_jnienv(dbjie);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return;
+ }
+
+ if ((feedback_class =
+ get_class(jnienv, name_DbEnvFeedback)) == NULL) {
+ fprintf(stderr, "Cannot find callback class %s\n",
+ name_DbEnvFeedback);
+ return; /* An exception has been posted. */
+ }
+ id = (*jnienv)->GetMethodID(jnienv, feedback_class,
+ "feedback",
+ "(Lcom/sleepycat/db/DbEnv;II)V");
+ if (!id) {
+ fprintf(stderr, "Cannot find callback method feedback\n");
+ return;
+ }
+
+ (*jnienv)->CallVoidMethod(jnienv, dbjie->feedback, id,
+ jenv, (jint)opcode, (jint)percent);
+}
+
+void dbjie_set_rep_transport_object(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv,
+ DB_ENV *dbenv, int id, jobject jtransport)
+{
+ int err;
+
+ if (dbjie->rep_transport != NULL)
+ DELETE_GLOBAL_REF(jnienv, dbjie->rep_transport);
+
+ err = dbenv->set_rep_transport(dbenv, id,
+ DbEnv_rep_transport_callback);
+ verify_return(jnienv, err, 0);
+
+ dbjie->rep_transport = NEW_GLOBAL_REF(jnienv, jtransport);
+}
+
+int dbjie_call_rep_transport(DB_ENV_JAVAINFO *dbjie, DB_ENV *dbenv,
+ jobject jenv, const DBT *control,
+ const DBT *rec, int flags, int envid)
+{
+ JNIEnv *jnienv;
+ jclass rep_transport_class;
+ jmethodID jid;
+ jobject jcdbt, jrdbt;
+
+ COMPQUIET(dbenv, NULL);
+ jnienv = dbjie_get_jnienv(dbjie);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ if ((rep_transport_class =
+ get_class(jnienv, name_DbRepTransport)) == NULL) {
+ fprintf(stderr, "Cannot find callback class %s\n",
+ name_DbRepTransport);
+ return (0); /* An exception has been posted. */
+ }
+ jid = (*jnienv)->GetMethodID(jnienv, rep_transport_class,
+ "send",
+ "(Lcom/sleepycat/db/DbEnv;"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/Dbt;II)I");
+
+ if (!jid) {
+ fprintf(stderr, "Cannot find callback method send\n");
+ return (0);
+ }
+
+ jcdbt = get_const_Dbt(jnienv, control, NULL);
+ jrdbt = get_const_Dbt(jnienv, rec, NULL);
+
+ return (*jnienv)->CallIntMethod(jnienv, dbjie->rep_transport, jid, jenv,
+ jcdbt, jrdbt, flags, envid);
+}
+
+void dbjie_set_app_dispatch_object(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv,
+ DB_ENV *dbenv, jobject japp_dispatch)
+{
+ int err;
+
+ if (dbjie->app_dispatch != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->app_dispatch);
+ }
+ if (japp_dispatch == NULL) {
+ if ((err = dbenv->set_app_dispatch(dbenv, NULL)) != 0)
+ report_exception(jnienv, "set_app_dispatch failed",
+ err, 0);
+ }
+ else {
+ if ((err = dbenv->set_app_dispatch(dbenv,
+ DbEnv_app_dispatch_callback)) != 0)
+ report_exception(jnienv, "set_app_dispatch failed",
+ err, 0);
+ }
+
+ dbjie->app_dispatch = NEW_GLOBAL_REF(jnienv, japp_dispatch);
+}
+
+int dbjie_call_app_dispatch(DB_ENV_JAVAINFO *dbjie, DB_ENV *dbenv, jobject jenv,
+ DBT *dbt, DB_LSN *lsn, int recops)
+{
+ JNIEnv *jnienv;
+ jclass app_dispatch_class;
+ jmethodID id;
+ jobject jdbt;
+ jobject jlsn;
+
+ COMPQUIET(dbenv, NULL);
+ jnienv = dbjie_get_jnienv(dbjie);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ if ((app_dispatch_class =
+ get_class(jnienv, name_DbTxnRecover)) == NULL) {
+ fprintf(stderr, "Cannot find callback class %s\n",
+ name_DbTxnRecover);
+ return (0); /* An exception has been posted. */
+ }
+ id = (*jnienv)->GetMethodID(jnienv, app_dispatch_class,
+ "app_dispatch",
+ "(Lcom/sleepycat/db/DbEnv;"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/DbLsn;"
+ "I)I");
+ if (!id) {
+ fprintf(stderr, "Cannot find callback method app_dispatch\n");
+ return (0);
+ }
+
+ jdbt = get_Dbt(jnienv, dbt, NULL);
+
+ if (lsn == NULL)
+ jlsn = NULL;
+ else
+ jlsn = get_DbLsn(jnienv, *lsn);
+
+ return (*jnienv)->CallIntMethod(jnienv, dbjie->app_dispatch, id, jenv,
+ jdbt, jlsn, recops);
+}
+
+jobject dbjie_get_errcall(DB_ENV_JAVAINFO *dbjie)
+{
+ return (dbjie->errcall);
+}
+
+jint dbjie_is_dbopen(DB_ENV_JAVAINFO *dbjie)
+{
+ return (dbjie->is_dbopen);
+}
+
+/****************************************************************
+ *
+ * Implementation of class DB_JAVAINFO
+ */
+
+DB_JAVAINFO *dbji_construct(JNIEnv *jnienv, jobject jdb, jint flags)
+{
+ DB_JAVAINFO *dbji;
+ int err;
+
+ /*XXX should return err*/
+ if ((err = __os_malloc(NULL, sizeof(DB_JAVAINFO), &dbji)) != 0)
+ return (NULL);
+
+ memset(dbji, 0, sizeof(DB_JAVAINFO));
+
+ if ((*jnienv)->GetJavaVM(jnienv, &dbji->javavm) != 0) {
+ report_exception(jnienv, "cannot get Java VM", 0, 0);
+ (void)__os_free(NULL, dbji);
+ return (NULL);
+ }
+ dbji->jdbref = NEW_GLOBAL_REF(jnienv, jdb);
+ dbji->construct_flags = flags;
+ return (dbji);
+}
+
+void
+dbji_dealloc(DB_JAVAINFO *dbji, JNIEnv *jnienv)
+{
+ if (dbji->append_recno != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->append_recno);
+ dbji->append_recno = NULL;
+ }
+ if (dbji->assoc != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->assoc);
+ dbji->assoc = NULL;
+ }
+ if (dbji->bt_compare != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->bt_compare);
+ dbji->bt_compare = NULL;
+ }
+ if (dbji->bt_prefix != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->bt_prefix);
+ dbji->bt_prefix = NULL;
+ }
+ if (dbji->dup_compare != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->dup_compare);
+ dbji->dup_compare = NULL;
+ }
+ if (dbji->feedback != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->feedback);
+ dbji->feedback = NULL;
+ }
+ if (dbji->h_hash != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->h_hash);
+ dbji->h_hash = NULL;
+ }
+ if (dbji->jdbref != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->jdbref);
+ dbji->jdbref = NULL;
+ }
+}
+
+void
+dbji_destroy(DB_JAVAINFO *dbji, JNIEnv *jnienv)
+{
+ dbji_dealloc(dbji, jnienv);
+ __os_free(NULL, dbji);
+}
+
+JNIEnv *dbji_get_jnienv(DB_JAVAINFO *dbji)
+{
+ /*
+ * Note:
+ * Different versions of the JNI disagree on the signature
+ * for AttachCurrentThread. The most recent documentation
+ * seems to say that (JNIEnv **) is correct, but newer
+ * JNIs seem to use (void **), oddly enough.
+ */
+#ifdef JNI_VERSION_1_2
+ void *attachret = 0;
+#else
+ JNIEnv *attachret = 0;
+#endif
+
+ /*
+ * This should always succeed, as we are called via
+ * some Java activity. I think therefore I am (a thread).
+ */
+ if ((*dbji->javavm)->AttachCurrentThread(dbji->javavm, &attachret, 0)
+ != 0)
+ return (0);
+
+ return ((JNIEnv *)attachret);
+}
+
+jint dbji_get_flags(DB_JAVAINFO *dbji)
+{
+ return (dbji->construct_flags);
+}
+
+void dbji_set_feedback_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
+ DB *db, jobject jfeedback)
+{
+ jclass feedback_class;
+
+ if (dbji->feedback_method_id == NULL) {
+ if ((feedback_class =
+ get_class(jnienv, name_DbFeedback)) == NULL)
+ return; /* An exception has been posted. */
+ dbji->feedback_method_id =
+ (*jnienv)->GetMethodID(jnienv, feedback_class,
+ "feedback",
+ "(Lcom/sleepycat/db/Db;II)V");
+ if (dbji->feedback_method_id == NULL) {
+ /*
+ * XXX
+ * We should really have a better way
+ * to translate this to a Java exception class.
+ * In theory, it shouldn't happen.
+ */
+ report_exception(jnienv, "Cannot find callback method",
+ EFAULT, 0);
+ return;
+ }
+ }
+
+ if (dbji->feedback != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->feedback);
+ }
+ if (jfeedback == NULL) {
+ db->set_feedback(db, NULL);
+ }
+ else {
+ db->set_feedback(db, Db_feedback_callback);
+ }
+
+ dbji->feedback = NEW_GLOBAL_REF(jnienv, jfeedback);
+
+}
+
+void dbji_call_feedback(DB_JAVAINFO *dbji, DB *db, jobject jdb,
+ int opcode, int percent)
+{
+ JNIEnv *jnienv;
+
+ COMPQUIET(db, NULL);
+ jnienv = dbji_get_jnienv(dbji);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return;
+ }
+
+ DB_ASSERT(dbji->feedback_method_id != NULL);
+ (*jnienv)->CallVoidMethod(jnienv, dbji->feedback,
+ dbji->feedback_method_id,
+ jdb, (jint)opcode, (jint)percent);
+}
+
+void dbji_set_append_recno_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
+ DB *db, jobject jcallback)
+{
+ jclass append_recno_class;
+
+ if (dbji->append_recno_method_id == NULL) {
+ if ((append_recno_class =
+ get_class(jnienv, name_DbAppendRecno)) == NULL)
+ return; /* An exception has been posted. */
+ dbji->append_recno_method_id =
+ (*jnienv)->GetMethodID(jnienv, append_recno_class,
+ "db_append_recno",
+ "(Lcom/sleepycat/db/Db;"
+ "Lcom/sleepycat/db/Dbt;I)V");
+ if (dbji->append_recno_method_id == NULL) {
+ /*
+ * XXX
+ * We should really have a better way
+ * to translate this to a Java exception class.
+ * In theory, it shouldn't happen.
+ */
+ report_exception(jnienv, "Cannot find callback method",
+ EFAULT, 0);
+ return;
+ }
+ }
+
+ if (dbji->append_recno != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->append_recno);
+ }
+ if (jcallback == NULL) {
+ db->set_append_recno(db, NULL);
+ }
+ else {
+ db->set_append_recno(db, Db_append_recno_callback);
+ }
+
+ dbji->append_recno = NEW_GLOBAL_REF(jnienv, jcallback);
+}
+
+extern int dbji_call_append_recno(DB_JAVAINFO *dbji, DB *db, jobject jdb,
+ DBT *dbt, jint recno)
+{
+ JNIEnv *jnienv;
+ jobject jresult;
+ DBT_JAVAINFO *dbtji;
+ LOCKED_DBT lresult;
+ DB_ENV *dbenv;
+ u_char *bytearray;
+ int err;
+
+ jnienv = dbji_get_jnienv(dbji);
+ dbenv = db->dbenv;
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ jresult = get_Dbt(jnienv, dbt, &dbtji);
+
+ DB_ASSERT(dbji->append_recno_method_id != NULL);
+ (*jnienv)->CallVoidMethod(jnienv, dbji->append_recno,
+ dbji->append_recno_method_id,
+ jdb, jresult, recno);
+
+ /*
+ * The underlying C API requires that an errno be returned
+ * on error. Java users know nothing of errnos, so we
+ * allow them to throw exceptions instead. We leave the
+ * exception in place and return DB_JAVA_CALLBACK to the C API
+ * that called us. Eventually the DB->get will fail and
+ * when java prepares to throw an exception in
+ * report_exception(), this will be spotted as a special case,
+ * and the original exception will be preserved.
+ *
+ * Note: we have sometimes noticed strange behavior with
+ * exceptions under Linux 1.1.7 JVM. (i.e. multiple calls
+ * to ExceptionOccurred() may report different results).
+ * Currently we don't know of any problems related to this
+ * in our code, but if it pops up in the future, users are
+ * encouranged to get a more recent JVM.
+ */
+ if ((*jnienv)->ExceptionOccurred(jnienv) != NULL)
+ return (DB_JAVA_CALLBACK);
+
+ /*
+ * Now get the DBT back from java, because the user probably
+ * changed it. We'll have to copy back the array too and let
+ * our caller free it.
+ *
+ * We expect that the user *has* changed the DBT (why else would
+ * they set up an append_recno callback?) so we don't
+ * worry about optimizing the unchanged case.
+ */
+ if ((err = locked_dbt_get(&lresult, jnienv, dbenv, jresult, inOp)) != 0)
+ return (err);
+
+ memcpy(dbt, &lresult.javainfo->dbt, sizeof(DBT));
+ if ((err = __os_malloc(dbenv, dbt->size, &bytearray)) != 0)
+ goto out;
+
+ memcpy(bytearray, dbt->data, dbt->size);
+ dbt->data = bytearray;
+ dbt->flags |= DB_DBT_APPMALLOC;
+
+ out:
+ locked_dbt_put(&lresult, jnienv, dbenv);
+ return (err);
+}
+
+void dbji_set_assoc_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
+ DB *db, DB_TXN *txn, DB *second,
+ jobject jcallback, int flags)
+{
+ jclass assoc_class;
+ int err;
+
+ if (dbji->assoc_method_id == NULL) {
+ if ((assoc_class =
+ get_class(jnienv, name_DbSecondaryKeyCreate)) == NULL)
+ return; /* An exception has been posted. */
+ dbji->assoc_method_id =
+ (*jnienv)->GetMethodID(jnienv, assoc_class,
+ "secondary_key_create",
+ "(Lcom/sleepycat/db/Db;"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/Dbt;)I");
+ if (dbji->assoc_method_id == NULL) {
+ /*
+ * XXX
+ * We should really have a better way
+ * to translate this to a Java exception class.
+ * In theory, it shouldn't happen.
+ */
+ report_exception(jnienv, "Cannot find callback method",
+ EFAULT, 0);
+ return;
+ }
+ }
+
+ if (dbji->assoc != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->assoc);
+ dbji->assoc = NULL;
+ }
+
+ if (jcallback == NULL)
+ err = db->associate(db, txn, second, NULL, flags);
+ else
+ err = db->associate(db, txn, second, Db_assoc_callback, flags);
+
+ if (verify_return(jnienv, err, 0))
+ dbji->assoc = NEW_GLOBAL_REF(jnienv, jcallback);
+}
+
+extern int dbji_call_assoc(DB_JAVAINFO *dbji, DB *db, jobject jdb,
+ const DBT *key, const DBT *value, DBT *result)
+{
+ JNIEnv *jnienv;
+ jobject jresult;
+ LOCKED_DBT lresult;
+ DB_ENV *dbenv;
+ int err;
+ int sz;
+ u_char *bytearray;
+ jint retval;
+
+ jnienv = dbji_get_jnienv(dbji);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ DB_ASSERT(dbji->assoc_method_id != NULL);
+
+ dbenv = db->dbenv;
+ jresult = create_default_object(jnienv, name_DBT);
+
+ retval = (*jnienv)->CallIntMethod(jnienv, dbji->assoc,
+ dbji->assoc_method_id, jdb,
+ get_const_Dbt(jnienv, key, NULL),
+ get_const_Dbt(jnienv, value, NULL),
+ jresult);
+ if (retval != 0)
+ return (retval);
+
+ if ((*jnienv)->ExceptionOccurred(jnienv) != NULL)
+ return (DB_JAVA_CALLBACK);
+
+ if ((err = locked_dbt_get(&lresult, jnienv, dbenv, jresult, inOp)) != 0)
+ return (err);
+
+ sz = lresult.javainfo->dbt.size;
+ if (sz > 0) {
+ bytearray = (u_char *)lresult.javainfo->dbt.data;
+
+ /*
+ * If the byte array is in the range of one of the
+ * arrays passed to us we can use it directly.
+ * If not, we must create our own array and
+ * fill it in with the java array. Since
+ * the java array may disappear and we don't
+ * want to keep its memory locked indefinitely,
+ * we cannot just pin the array.
+ *
+ * XXX consider pinning the array, and having
+ * some way for the C layer to notify the java
+ * layer when it can be unpinned.
+ */
+ if ((bytearray < (u_char *)key->data ||
+ bytearray + sz > (u_char *)key->data + key->size) &&
+ (bytearray < (u_char *)value->data ||
+ bytearray + sz > (u_char *)value->data + value->size)) {
+
+ result->flags |= DB_DBT_APPMALLOC;
+ if ((err = __os_malloc(dbenv, sz, &bytearray)) != 0)
+ goto out;
+ memcpy(bytearray, lresult.javainfo->dbt.data, sz);
+ }
+ result->data = bytearray;
+ result->size = sz;
+ }
+ out:
+ locked_dbt_put(&lresult, jnienv, dbenv);
+ return (err);
+}
+
+void dbji_set_bt_compare_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
+ DB *db, jobject jcompare)
+{
+ jclass bt_compare_class;
+
+ if (dbji->bt_compare_method_id == NULL) {
+ if ((bt_compare_class =
+ get_class(jnienv, name_DbBtreeCompare)) == NULL)
+ return; /* An exception has been posted. */
+ dbji->bt_compare_method_id =
+ (*jnienv)->GetMethodID(jnienv, bt_compare_class,
+ "bt_compare",
+ "(Lcom/sleepycat/db/Db;"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/Dbt;)I");
+ if (dbji->bt_compare_method_id == NULL) {
+ /*
+ * XXX
+ * We should really have a better way
+ * to translate this to a Java exception class.
+ * In theory, it shouldn't happen.
+ */
+ report_exception(jnienv, "Cannot find callback method",
+ EFAULT, 0);
+ return;
+ }
+ }
+
+ if (dbji->bt_compare != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->bt_compare);
+ }
+ if (jcompare == NULL) {
+ db->set_bt_compare(db, NULL);
+ }
+ else {
+ db->set_bt_compare(db, Db_bt_compare_callback);
+ }
+
+ dbji->bt_compare = NEW_GLOBAL_REF(jnienv, jcompare);
+}
+
+int dbji_call_bt_compare(DB_JAVAINFO *dbji, DB *db, jobject jdb,
+ const DBT *dbt1, const DBT *dbt2)
+{
+ JNIEnv *jnienv;
+ jobject jdbt1, jdbt2;
+
+ COMPQUIET(db, NULL);
+ jnienv = dbji_get_jnienv(dbji);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ jdbt1 = get_const_Dbt(jnienv, dbt1, NULL);
+ jdbt2 = get_const_Dbt(jnienv, dbt2, NULL);
+
+ DB_ASSERT(dbji->bt_compare_method_id != NULL);
+ return (*jnienv)->CallIntMethod(jnienv, dbji->bt_compare,
+ dbji->bt_compare_method_id,
+ jdb, jdbt1, jdbt2);
+}
+
+void dbji_set_bt_prefix_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
+ DB *db, jobject jprefix)
+{
+ jclass bt_prefix_class;
+
+ if (dbji->bt_prefix_method_id == NULL) {
+ if ((bt_prefix_class =
+ get_class(jnienv, name_DbBtreePrefix)) == NULL)
+ return; /* An exception has been posted. */
+ dbji->bt_prefix_method_id =
+ (*jnienv)->GetMethodID(jnienv, bt_prefix_class,
+ "bt_prefix",
+ "(Lcom/sleepycat/db/Db;"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/Dbt;)I");
+ if (dbji->bt_prefix_method_id == NULL) {
+ /*
+ * XXX
+ * We should really have a better way
+ * to translate this to a Java exception class.
+ * In theory, it shouldn't happen.
+ */
+ report_exception(jnienv, "Cannot find callback method",
+ EFAULT, 0);
+ return;
+ }
+ }
+
+ if (dbji->bt_prefix != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->bt_prefix);
+ }
+ if (jprefix == NULL) {
+ db->set_bt_prefix(db, NULL);
+ }
+ else {
+ db->set_bt_prefix(db, Db_bt_prefix_callback);
+ }
+
+ dbji->bt_prefix = NEW_GLOBAL_REF(jnienv, jprefix);
+}
+
+size_t dbji_call_bt_prefix(DB_JAVAINFO *dbji, DB *db, jobject jdb,
+ const DBT *dbt1, const DBT *dbt2)
+{
+ JNIEnv *jnienv;
+ jobject jdbt1, jdbt2;
+
+ COMPQUIET(db, NULL);
+ jnienv = dbji_get_jnienv(dbji);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ jdbt1 = get_const_Dbt(jnienv, dbt1, NULL);
+ jdbt2 = get_const_Dbt(jnienv, dbt2, NULL);
+
+ DB_ASSERT(dbji->bt_prefix_method_id != NULL);
+ return (size_t)(*jnienv)->CallIntMethod(jnienv, dbji->bt_prefix,
+ dbji->bt_prefix_method_id,
+ jdb, jdbt1, jdbt2);
+}
+
+void dbji_set_dup_compare_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
+ DB *db, jobject jcompare)
+{
+ jclass dup_compare_class;
+
+ if (dbji->dup_compare_method_id == NULL) {
+ if ((dup_compare_class =
+ get_class(jnienv, name_DbDupCompare)) == NULL)
+ return; /* An exception has been posted. */
+ dbji->dup_compare_method_id =
+ (*jnienv)->GetMethodID(jnienv, dup_compare_class,
+ "dup_compare",
+ "(Lcom/sleepycat/db/Db;"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/Dbt;)I");
+ if (dbji->dup_compare_method_id == NULL) {
+ /*
+ * XXX
+ * We should really have a better way
+ * to translate this to a Java exception class.
+ * In theory, it shouldn't happen.
+ */
+ report_exception(jnienv, "Cannot find callback method",
+ EFAULT, 0);
+ return;
+ }
+ }
+
+ if (dbji->dup_compare != NULL)
+ DELETE_GLOBAL_REF(jnienv, dbji->dup_compare);
+
+ if (jcompare == NULL)
+ db->set_dup_compare(db, NULL);
+ else
+ db->set_dup_compare(db, Db_dup_compare_callback);
+
+ dbji->dup_compare = NEW_GLOBAL_REF(jnienv, jcompare);
+}
+
+int dbji_call_dup_compare(DB_JAVAINFO *dbji, DB *db, jobject jdb,
+ const DBT *dbt1, const DBT *dbt2)
+{
+ JNIEnv *jnienv;
+ jobject jdbt1, jdbt2;
+
+ COMPQUIET(db, NULL);
+ jnienv = dbji_get_jnienv(dbji);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ jdbt1 = get_const_Dbt(jnienv, dbt1, NULL);
+ jdbt2 = get_const_Dbt(jnienv, dbt2, NULL);
+
+ DB_ASSERT(dbji->dup_compare_method_id != NULL);
+ return (*jnienv)->CallIntMethod(jnienv, dbji->dup_compare,
+ dbji->dup_compare_method_id,
+ jdb, jdbt1, jdbt2);
+}
+
+void dbji_set_h_hash_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
+ DB *db, jobject jhash)
+{
+ jclass h_hash_class;
+
+ if (dbji->h_hash_method_id == NULL) {
+ if ((h_hash_class =
+ get_class(jnienv, name_DbHash)) == NULL)
+ return; /* An exception has been posted. */
+ dbji->h_hash_method_id =
+ (*jnienv)->GetMethodID(jnienv, h_hash_class,
+ "hash",
+ "(Lcom/sleepycat/db/Db;"
+ "[BI)I");
+ if (dbji->h_hash_method_id == NULL) {
+ /*
+ * XXX
+ * We should really have a better way
+ * to translate this to a Java exception class.
+ * In theory, it shouldn't happen.
+ */
+ report_exception(jnienv, "Cannot find callback method",
+ EFAULT, 0);
+ return;
+ }
+ }
+
+ if (dbji->h_hash != NULL)
+ DELETE_GLOBAL_REF(jnienv, dbji->h_hash);
+
+ if (jhash == NULL)
+ db->set_h_hash(db, NULL);
+ else
+ db->set_h_hash(db, Db_h_hash_callback);
+
+ dbji->h_hash = NEW_GLOBAL_REF(jnienv, jhash);
+}
+
+int dbji_call_h_hash(DB_JAVAINFO *dbji, DB *db, jobject jdb,
+ const void *data, int len)
+{
+ JNIEnv *jnienv;
+ jbyteArray jdata;
+
+ COMPQUIET(db, NULL);
+ jnienv = dbji_get_jnienv(dbji);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ DB_ASSERT(dbji->h_hash_method_id != NULL);
+
+ if ((jdata = (*jnienv)->NewByteArray(jnienv, len)) == NULL)
+ return (0); /* An exception has been posted by the JVM */
+ (*jnienv)->SetByteArrayRegion(jnienv, jdata, 0, len, (void *)data);
+ return (*jnienv)->CallIntMethod(jnienv, dbji->h_hash,
+ dbji->h_hash_method_id,
+ jdb, jdata, len);
+}
diff --git a/libdb/libdb_java/java_info.h b/libdb/libdb_java/java_info.h
new file mode 100644
index 0000000..a3356a7
--- /dev/null
+++ b/libdb/libdb_java/java_info.h
@@ -0,0 +1,221 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _JAVA_INFO_H_
+#define _JAVA_INFO_H_
+
+/*
+ * "Info" classes for Java implementation of Berkeley DB API.
+ * These classes hold extra information for which there is
+ * no room or counterpart in the base classes used in the C API.
+ * In the case of a DBT, the DBT_javainfo class is stored in the
+ * 'private' variable of the java Dbt, and the DBT_javainfo is subclassed
+ * from a DBT. In the case of DB and DB_ENV, the appropriate
+ * info objects are pointed to by the DB and DB_ENV objects.
+ * This is convenient to implement callbacks.
+ */
+
+/****************************************************************
+ *
+ * Declaration of class DBT_javainfo
+ *
+ * A DBT_javainfo is created whenever a Dbt (java) object is created,
+ * and a pointer to it is stored in its private info storage.
+ * It is subclassed from DBT, because we must retain some extra
+ * information in it while it is in use. In particular, when
+ * a java array is associated with it, we need to keep a Globally
+ * Locked reference to it so it is not GC'd. This reference is
+ * destroyed when the Dbt is GC'd.
+ */
+typedef struct _dbt_javainfo
+{
+ DBT dbt;
+ DB *db; /* associated DB */
+ jobject dbtref; /* the java Dbt object */
+ jbyteArray array; /* the java array object -
+ this is only valid during the API call */
+ int offset; /* offset into the Java array */
+
+#define DBT_JAVAINFO_LOCKED 0x01 /* a LOCKED_DBT has been created */
+ u_int32_t flags;
+}
+DBT_JAVAINFO; /* used with all 'dbtji' functions */
+
+/* create/initialize a DBT_JAVAINFO object */
+extern DBT_JAVAINFO *dbjit_construct();
+
+/* free this DBT_JAVAINFO, releasing anything allocated on its behalf */
+extern void dbjit_destroy(DBT_JAVAINFO *dbjit);
+
+/****************************************************************
+ *
+ * Declaration of class DB_ENV_JAVAINFO
+ *
+ * A DB_ENV_JAVAINFO is allocated and stuffed into the cj_internal
+ * and the db_errpfx for every DB_ENV created. It holds a
+ * little extra info that is needed to support callbacks.
+ *
+ * There's a bit of trickery here, because we have built this
+ * above a layer that has a C function callback that gets
+ * invoked when an error occurs. One of the C callback's arguments
+ * is the prefix from the DB_ENV, but since we stuffed a pointer
+ * to our own DB_ENV_JAVAINFO into the prefix, we get that object as an
+ * argument to the C callback. Thus, the C callback can have
+ * access to much more than just the prefix, and it needs that
+ * to call back into the Java enviroment.
+ *
+ * The DB_ENV_JAVAINFO object holds a copy of the Java Virtual Machine,
+ * which is needed to attach to the current running thread
+ * whenever we need to make a callback. (This is more reliable
+ * than our previous approach, which was to save the thread
+ * that created the DbEnv). It also has the Java callback object,
+ * as well as a 'default' callback object that is used when the
+ * caller sets the callback to null. It also has the original
+ * error prefix, since we overwrote the one in the DB_ENV.
+ * There are also fields that are unrelated to the handling
+ * of callbacks, but are convenient to attach to a DB_ENV.
+ *
+ * Note: We assume that the Java layer is the only one
+ * fiddling with the contents of db_errpfx, db_errcall, cj_internal
+ * for a DB_ENV that was created via Java. Since the Java layer should
+ * have the only pointer to such a DB_ENV, this should be true.
+ */
+typedef struct _db_env_javainfo
+{
+ JavaVM *javavm;
+ int is_dbopen;
+ char *errpfx;
+ jobject jenvref; /* global reference */
+ jobject default_errcall; /* global reference */
+ jobject errcall; /* global reference */
+ jobject feedback; /* global reference */
+ jobject rep_transport; /* global reference */
+ jobject app_dispatch; /* global reference */
+ jobject recovery_init; /* global reference */
+ u_char *conflict;
+ size_t conflict_size;
+ jint construct_flags;
+}
+DB_ENV_JAVAINFO; /* used with all 'dbjie' functions */
+
+/* create/initialize an object */
+extern DB_ENV_JAVAINFO *dbjie_construct(JNIEnv *jnienv,
+ jobject jenv,
+ jobject default_errcall,
+ int is_dbopen);
+
+/* release all objects held by this this one */
+extern void dbjie_dealloc(DB_ENV_JAVAINFO *, JNIEnv *jnienv);
+
+/* free this object, releasing anything allocated on its behalf */
+extern void dbjie_destroy(DB_ENV_JAVAINFO *, JNIEnv *jnienv);
+
+/* This gets the environment for the current thread */
+extern JNIEnv *dbjie_get_jnienv(DB_ENV_JAVAINFO *);
+
+extern void dbjie_set_errpfx(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
+ jstring errpfx);
+extern jstring dbjie_get_errpfx(DB_ENV_JAVAINFO *, JNIEnv *jnienv);
+extern void dbjie_set_errcall(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
+ jobject new_errcall);
+extern void dbjie_set_conflict(DB_ENV_JAVAINFO *, u_char *v, size_t sz);
+extern void dbjie_set_feedback_object(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
+ DB_ENV *dbenv, jobject value);
+extern void dbjie_call_feedback(DB_ENV_JAVAINFO *, DB_ENV *dbenv, jobject jenv,
+ int opcode, int percent);
+extern void dbjie_set_recovery_init_object(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
+ DB_ENV *dbenv, jobject value);
+extern int dbjie_call_recovery_init(DB_ENV_JAVAINFO *, DB_ENV *dbenv,
+ jobject jenv);
+extern void dbjie_set_rep_transport_object(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
+ DB_ENV *dbenv, int id, jobject obj);
+extern int dbjie_call_rep_transport(DB_ENV_JAVAINFO *, DB_ENV *dbenv,
+ jobject jenv, const DBT *control,
+ const DBT *rec, int envid, int flags);
+extern void dbjie_set_app_dispatch_object(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
+ DB_ENV *dbenv, jobject value);
+extern int dbjie_call_app_dispatch(DB_ENV_JAVAINFO *,
+ DB_ENV *dbenv, jobject jenv,
+ DBT *dbt, DB_LSN *lsn, int recops);
+extern jobject dbjie_get_errcall(DB_ENV_JAVAINFO *) ;
+extern jint dbjie_is_dbopen(DB_ENV_JAVAINFO *);
+
+/****************************************************************
+ *
+ * Declaration of class DB_JAVAINFO
+ *
+ * A DB_JAVAINFO is allocated and stuffed into the cj_internal field
+ * for every DB created. It holds a little extra info that is needed
+ * to support callbacks.
+ *
+ * Note: We assume that the Java layer is the only one
+ * fiddling with the contents of cj_internal
+ * for a DB that was created via Java. Since the Java layer should
+ * have the only pointer to such a DB, this should be true.
+ */
+typedef struct _db_javainfo
+{
+ JavaVM *javavm;
+ jobject jdbref; /* global reference */
+ jobject append_recno; /* global reference */
+ jobject assoc; /* global reference */
+ jobject bt_compare; /* global reference */
+ jobject bt_prefix; /* global reference */
+ jobject dup_compare; /* global reference */
+ jobject feedback; /* global reference */
+ jobject h_hash; /* global reference */
+ jmethodID append_recno_method_id;
+ jmethodID assoc_method_id;
+ jmethodID bt_compare_method_id;
+ jmethodID bt_prefix_method_id;
+ jmethodID dup_compare_method_id;
+ jmethodID feedback_method_id;
+ jmethodID h_hash_method_id;
+ jint construct_flags;
+} DB_JAVAINFO;
+
+/* create/initialize an object */
+extern DB_JAVAINFO *dbji_construct(JNIEnv *jnienv, jobject jdb, jint flags);
+
+/* release all objects held by this this one */
+extern void dbji_dealloc(DB_JAVAINFO *, JNIEnv *jnienv);
+
+/* free this object, releasing anything allocated on its behalf */
+extern void dbji_destroy(DB_JAVAINFO *, JNIEnv *jnienv);
+
+/* This gets the environment for the current thread */
+extern JNIEnv *dbji_get_jnienv();
+extern jint dbji_get_flags();
+
+extern void dbji_set_feedback_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
+extern void dbji_call_feedback(DB_JAVAINFO *, DB *db, jobject jdb,
+ int opcode, int percent);
+
+extern void dbji_set_append_recno_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
+extern int dbji_call_append_recno(DB_JAVAINFO *, DB *db, jobject jdb,
+ DBT *dbt, jint recno);
+extern void dbji_set_assoc_object(DB_JAVAINFO *, JNIEnv *jnienv,
+ DB *db, DB_TXN *txn, DB *second,
+ jobject value, int flags);
+extern int dbji_call_assoc(DB_JAVAINFO *, DB *db, jobject jdb,
+ const DBT *key, const DBT* data, DBT *result);
+extern void dbji_set_bt_compare_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
+extern int dbji_call_bt_compare(DB_JAVAINFO *, DB *db, jobject jdb,
+ const DBT *dbt1, const DBT *dbt2);
+extern void dbji_set_bt_prefix_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
+extern size_t dbji_call_bt_prefix(DB_JAVAINFO *, DB *db, jobject jdb,
+ const DBT *dbt1, const DBT *dbt2);
+extern void dbji_set_dup_compare_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
+extern int dbji_call_dup_compare(DB_JAVAINFO *, DB *db, jobject jdb,
+ const DBT *dbt1, const DBT *dbt2);
+extern void dbji_set_h_hash_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
+extern int dbji_call_h_hash(DB_JAVAINFO *, DB *db, jobject jdb,
+ const void *data, int len);
+
+#endif /* !_JAVA_INFO_H_ */
diff --git a/libdb/libdb_java/java_locked.c b/libdb/libdb_java/java_locked.c
new file mode 100644
index 0000000..7805f06
--- /dev/null
+++ b/libdb/libdb_java/java_locked.c
@@ -0,0 +1,321 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db_int.h"
+#include "java_util.h"
+
+/****************************************************************
+ *
+ * Implementation of functions to manipulate LOCKED_DBT.
+ */
+int
+locked_dbt_get(LOCKED_DBT *ldbt, JNIEnv *jnienv, DB_ENV *dbenv,
+ jobject jdbt, OpKind kind)
+{
+ DBT *dbt;
+
+ COMPQUIET(dbenv, NULL);
+ ldbt->jdbt = jdbt;
+ ldbt->java_array_len = 0;
+ ldbt->flags = 0;
+ ldbt->kind = kind;
+ ldbt->java_data = 0;
+ ldbt->before_data = 0;
+ ldbt->javainfo =
+ (DBT_JAVAINFO *)get_private_dbobj(jnienv, name_DBT, jdbt);
+
+ if (!verify_non_null(jnienv, ldbt->javainfo)) {
+ report_exception(jnienv, "Dbt is gc'ed?", 0, 0);
+ F_SET(ldbt, LOCKED_ERROR);
+ return (EINVAL);
+ }
+ if (F_ISSET(ldbt->javainfo, DBT_JAVAINFO_LOCKED)) {
+ report_exception(jnienv, "Dbt is already in use", 0, 0);
+ F_SET(ldbt, LOCKED_ERROR);
+ return (EINVAL);
+ }
+ dbt = &ldbt->javainfo->dbt;
+
+ if ((*jnienv)->GetBooleanField(jnienv,
+ jdbt, fid_Dbt_must_create_data) != 0)
+ F_SET(ldbt, LOCKED_CREATE_DATA);
+ else
+ ldbt->javainfo->array =
+ (*jnienv)->GetObjectField(jnienv, jdbt, fid_Dbt_data);
+
+ dbt->size = (*jnienv)->GetIntField(jnienv, jdbt, fid_Dbt_size);
+ dbt->ulen = (*jnienv)->GetIntField(jnienv, jdbt, fid_Dbt_ulen);
+ dbt->dlen = (*jnienv)->GetIntField(jnienv, jdbt, fid_Dbt_dlen);
+ dbt->doff = (*jnienv)->GetIntField(jnienv, jdbt, fid_Dbt_doff);
+ dbt->flags = (*jnienv)->GetIntField(jnienv, jdbt, fid_Dbt_flags);
+ ldbt->javainfo->offset = (*jnienv)->GetIntField(jnienv, jdbt,
+ fid_Dbt_offset);
+
+ /*
+ * If no flags are set, use default behavior of DB_DBT_MALLOC.
+ * We can safely set dbt->flags because flags will never be copied
+ * back to the Java Dbt.
+ */
+ if (kind != inOp &&
+ !F_ISSET(dbt, DB_DBT_USERMEM | DB_DBT_MALLOC | DB_DBT_REALLOC))
+ F_SET(dbt, DB_DBT_MALLOC);
+
+ /*
+ * If this is requested to be realloc with an existing array,
+ * we cannot use the underlying realloc, because the array we
+ * will pass in is allocated by the Java VM, not us, so it
+ * cannot be realloced. We simulate the reallocation by using
+ * USERMEM and reallocating the java array when a ENOMEM error
+ * occurs. We change the flags during the operation, and they
+ * are reset when the operation completes (in locked_dbt_put).
+ */
+ if (F_ISSET(dbt, DB_DBT_REALLOC) && ldbt->javainfo->array != NULL) {
+ F_CLR(dbt, DB_DBT_REALLOC);
+ F_SET(dbt, DB_DBT_USERMEM);
+ F_SET(ldbt, LOCKED_REALLOC_NONNULL);
+ }
+
+ if ((F_ISSET(dbt, DB_DBT_USERMEM) || kind != outOp) &&
+ !F_ISSET(ldbt, LOCKED_CREATE_DATA)) {
+
+ /*
+ * If writing with DB_DBT_USERMEM
+ * or it's a set (or get/set) operation,
+ * then the data should point to a java array.
+ * Note that outOp means data is coming out of the database
+ * (it's a get). inOp means data is going into the database
+ * (either a put, or a key input).
+ */
+ if (!ldbt->javainfo->array) {
+ report_exception(jnienv, "Dbt.data is null", 0, 0);
+ F_SET(ldbt, LOCKED_ERROR);
+ return (EINVAL);
+ }
+
+ /* Verify other parameters */
+ ldbt->java_array_len = (*jnienv)->GetArrayLength(jnienv,
+ ldbt->javainfo->array);
+ if (ldbt->javainfo->offset < 0 ) {
+ report_exception(jnienv, "Dbt.offset illegal", 0, 0);
+ F_SET(ldbt, LOCKED_ERROR);
+ return (EINVAL);
+ }
+ if (dbt->size + ldbt->javainfo->offset > ldbt->java_array_len) {
+ report_exception(jnienv,
+ "Dbt.size + Dbt.offset greater than array length",
+ 0, 0);
+ F_SET(ldbt, LOCKED_ERROR);
+ return (EINVAL);
+ }
+
+ ldbt->java_data = (*jnienv)->GetByteArrayElements(jnienv,
+ ldbt->javainfo->array,
+ (jboolean *)0);
+
+ dbt->data = ldbt->before_data = ldbt->java_data +
+ ldbt->javainfo->offset;
+ }
+ else if (!F_ISSET(ldbt, LOCKED_CREATE_DATA)) {
+
+ /*
+ * If writing with DB_DBT_MALLOC or DB_DBT_REALLOC with
+ * a null array, then the data is allocated by DB.
+ */
+ dbt->data = ldbt->before_data = 0;
+ }
+
+ /*
+ * RPC makes the assumption that if dbt->size is non-zero, there
+ * is data to copy from dbt->data. We may have set dbt->size
+ * to a non-zero integer above but decided not to point
+ * dbt->data at anything. (One example is if we're doing an outOp
+ * with an already-used Dbt whose values we expect to just
+ * overwrite.)
+ *
+ * Clean up the dbt fields so we don't run into trouble.
+ * (Note that doff, dlen, and flags all may contain meaningful
+ * values.)
+ */
+ if (dbt->data == NULL)
+ dbt->size = dbt->ulen = 0;
+
+ F_SET(ldbt->javainfo, DBT_JAVAINFO_LOCKED);
+ return (0);
+}
+
+/*
+ * locked_dbt_put must be called for any LOCKED_DBT struct before a
+ * java handler returns to the user. It can be thought of as the
+ * LOCKED_DBT destructor. It copies any information from temporary
+ * structures back to user accessible arrays, and of course must free
+ * memory and remove references. The LOCKED_DBT itself is not freed,
+ * as it is expected to be a stack variable.
+ *
+ * Note that after this call, the LOCKED_DBT can still be used in
+ * limited ways, e.g. to look at values in the C DBT.
+ */
+void
+locked_dbt_put(LOCKED_DBT *ldbt, JNIEnv *jnienv, DB_ENV *dbenv)
+{
+ DBT *dbt;
+
+ dbt = &ldbt->javainfo->dbt;
+
+ /*
+ * If the error flag was set, we never succeeded
+ * in allocating storage.
+ */
+ if (F_ISSET(ldbt, LOCKED_ERROR))
+ return;
+
+ if (((F_ISSET(dbt, DB_DBT_USERMEM) ||
+ F_ISSET(ldbt, LOCKED_REALLOC_NONNULL)) ||
+ ldbt->kind == inOp) && !F_ISSET(ldbt, LOCKED_CREATE_DATA)) {
+
+ /*
+ * If writing with DB_DBT_USERMEM or it's a set
+ * (or get/set) operation, then the data may be already in
+ * the java array, in which case, we just need to release it.
+ * If DB didn't put it in the array (indicated by the
+ * dbt->data changing), we need to do that
+ */
+ if (ldbt->before_data != ldbt->java_data) {
+ (*jnienv)->SetByteArrayRegion(jnienv,
+ ldbt->javainfo->array,
+ ldbt->javainfo->offset,
+ dbt->ulen,
+ ldbt->before_data);
+ }
+ (*jnienv)->ReleaseByteArrayElements(jnienv,
+ ldbt->javainfo->array,
+ ldbt->java_data, 0);
+ dbt->data = 0;
+ }
+ else if (F_ISSET(dbt, DB_DBT_MALLOC | DB_DBT_REALLOC) &&
+ ldbt->kind != inOp && !F_ISSET(ldbt, LOCKED_CREATE_DATA)) {
+
+ /*
+ * If writing with DB_DBT_MALLOC, or DB_DBT_REALLOC
+ * with a zero buffer, then the data was allocated by
+ * DB. If dbt->data is zero, it means an error
+ * occurred (and should have been already reported).
+ */
+ if (dbt->data) {
+
+ /*
+ * In the case of SET_RANGE, the key is inOutOp
+ * and when not found, its data will be left as
+ * its original value. Only copy and free it
+ * here if it has been allocated by DB
+ * (dbt->data has changed).
+ */
+ if (dbt->data != ldbt->before_data) {
+ jbyteArray newarr;
+
+ if ((newarr = (*jnienv)->NewByteArray(jnienv,
+ dbt->size)) == NULL) {
+ /* The JVM has posted an exception. */
+ F_SET(ldbt, LOCKED_ERROR);
+ return;
+ }
+ (*jnienv)->SetObjectField(jnienv, ldbt->jdbt,
+ fid_Dbt_data,
+ newarr);
+ ldbt->javainfo->offset = 0;
+ (*jnienv)->SetByteArrayRegion(jnienv,
+ newarr, 0, dbt->size,
+ (jbyte *)dbt->data);
+ (void)__os_ufree(dbenv, dbt->data);
+ dbt->data = 0;
+ }
+ }
+ }
+
+ /*
+ * The size field may have changed after a DB API call,
+ * so we set that back too.
+ */
+ (*jnienv)->SetIntField(jnienv, ldbt->jdbt, fid_Dbt_size, dbt->size);
+ ldbt->javainfo->array = NULL;
+ F_CLR(ldbt->javainfo, DBT_JAVAINFO_LOCKED);
+}
+
+/*
+ * Realloc the java array to receive data if the DBT used
+ * DB_DBT_REALLOC flag with a non-null data array, and the last
+ * operation set the size field to an amount greater than ulen.
+ * Return 1 if these conditions are met, otherwise 0. This is used
+ * internally to simulate the operations needed for DB_DBT_REALLOC.
+ */
+int locked_dbt_realloc(LOCKED_DBT *ldbt, JNIEnv *jnienv, DB_ENV *dbenv)
+{
+ DBT *dbt;
+
+ COMPQUIET(dbenv, NULL);
+ dbt = &ldbt->javainfo->dbt;
+
+ if (!F_ISSET(ldbt, LOCKED_REALLOC_NONNULL) ||
+ F_ISSET(ldbt, LOCKED_ERROR) || dbt->size <= dbt->ulen)
+ return (0);
+
+ (*jnienv)->ReleaseByteArrayElements(jnienv, ldbt->javainfo->array,
+ ldbt->java_data, 0);
+
+ /*
+ * We allocate a new array of the needed size.
+ * We'll set the offset to 0, as the old offset
+ * really doesn't make any sense.
+ */
+ if ((ldbt->javainfo->array = (*jnienv)->NewByteArray(jnienv,
+ dbt->size)) == NULL) {
+ F_SET(ldbt, LOCKED_ERROR);
+ return (0);
+ }
+
+ ldbt->java_array_len = dbt->ulen = dbt->size;
+ ldbt->javainfo->offset = 0;
+ (*jnienv)->SetObjectField(jnienv, ldbt->jdbt, fid_Dbt_data,
+ ldbt->javainfo->array);
+ ldbt->java_data = (*jnienv)->GetByteArrayElements(jnienv,
+ ldbt->javainfo->array, (jboolean *)0);
+ memcpy(ldbt->java_data, ldbt->before_data, dbt->ulen);
+ dbt->data = ldbt->before_data = ldbt->java_data;
+ return (1);
+}
+
+/****************************************************************
+ *
+ * Implementation of functions to manipulate LOCKED_STRING.
+ */
+int
+locked_string_get(LOCKED_STRING *ls, JNIEnv *jnienv, jstring jstr)
+{
+ ls->jstr = jstr;
+
+ if (jstr == 0)
+ ls->string = 0;
+ else
+ ls->string = (*jnienv)->GetStringUTFChars(jnienv, jstr,
+ (jboolean *)0);
+ return (0);
+}
+
+void locked_string_put(LOCKED_STRING *ls, JNIEnv *jnienv)
+{
+ if (ls->jstr)
+ (*jnienv)->ReleaseStringUTFChars(jnienv, ls->jstr, ls->string);
+}
diff --git a/libdb/libdb_java/java_locked.h b/libdb/libdb_java/java_locked.h
new file mode 100644
index 0000000..60419ff
--- /dev/null
+++ b/libdb/libdb_java/java_locked.h
@@ -0,0 +1,82 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _JAVA_LOCKED_H_
+#define _JAVA_LOCKED_H_
+
+/*
+ * Used as argument to locked_dbt_get().
+ */
+typedef enum _OpKind {
+ inOp, /* setting data in database (passing data in) */
+ outOp, /* getting data from database to user memory */
+ inOutOp /* both getting/setting data */
+} OpKind;
+
+/*
+ * LOCKED_DBT
+ *
+ * A stack variable LOCKED_DBT should be declared for each Dbt used in a
+ * native call to the DB API. Before the DBT can be used, locked_dbt_get()
+ * must be called to temporarily convert any java array found in the
+ * Dbt (which has a pointer to a DBT_JAVAINFO struct) to actual bytes
+ * in memory that remain locked in place. These bytes are used during
+ * the call to the DB C API, and are released and/or copied back when
+ * locked_dbt_put is called.
+ */
+typedef struct _locked_dbt
+{
+ /* these are accessed externally to locked_dbt_ functions */
+ DBT_JAVAINFO *javainfo;
+ unsigned int java_array_len;
+ jobject jdbt;
+
+ /* these are for used internally by locked_dbt_ functions */
+ jbyte *java_data;
+ jbyte *before_data;
+ OpKind kind;
+
+#define LOCKED_ERROR 0x01 /* error occurred */
+#define LOCKED_CREATE_DATA 0x02 /* must create data on the fly */
+#define LOCKED_REALLOC_NONNULL 0x04 /* DB_DBT_REALLOC flag, nonnull data */
+ u_int32_t flags;
+} LOCKED_DBT;
+
+/* Fill the LOCKED_DBT struct and lock the Java byte array */
+extern int locked_dbt_get(LOCKED_DBT *, JNIEnv *, DB_ENV *, jobject, OpKind);
+
+/* unlock the Java byte array */
+extern void locked_dbt_put(LOCKED_DBT *, JNIEnv *, DB_ENV *);
+
+/* realloc the Java byte array */
+extern int locked_dbt_realloc(LOCKED_DBT *, JNIEnv *, DB_ENV *);
+
+/*
+ * LOCKED_STRING
+ *
+ * A LOCKED_STRING exists temporarily to convert a java jstring object
+ * to a char *. Because the memory for the char * string is
+ * managed by the JVM, it must be released when we are done
+ * looking at it. Typically, locked_string_get() is called at the
+ * beginning of a function for each jstring object, and locked_string_put
+ * is called at the end of each function for each LOCKED_STRING.
+ */
+typedef struct _locked_string
+{
+ /* this accessed externally to locked_string_ functions */
+ const char *string;
+
+ /* this is used internally by locked_string_ functions */
+ jstring jstr;
+} LOCKED_STRING;
+
+extern int locked_string_get(LOCKED_STRING *, JNIEnv *jnienv, jstring jstr);
+extern void locked_string_put(LOCKED_STRING *, JNIEnv *jnienv); /* this unlocks and frees mem */
+
+#endif /* !_JAVA_LOCKED_H_ */
diff --git a/libdb/libdb_java/java_stat_auto.c b/libdb/libdb_java/java_stat_auto.c
new file mode 100644
index 0000000..c141223
--- /dev/null
+++ b/libdb/libdb_java/java_stat_auto.c
@@ -0,0 +1,207 @@
+/* DO NOT EDIT: automatically built by dist/s_java. */
+#include "java_util.h"
+int __jv_fill_bt_stat(JNIEnv *jnienv, jclass cl,
+ jobject jobj, struct __db_bt_stat *statp) {
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_magic);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_version);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_metaflags);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_nkeys);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_ndata);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_pagesize);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_maxkey);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_minkey);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_re_len);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_re_pad);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_levels);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_int_pg);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_leaf_pg);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_dup_pg);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_over_pg);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_free);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_int_pgfree);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_leaf_pgfree);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_dup_pgfree);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_over_pgfree);
+ return (0);
+}
+int __jv_fill_h_stat(JNIEnv *jnienv, jclass cl,
+ jobject jobj, struct __db_h_stat *statp) {
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_magic);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_version);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_metaflags);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_nkeys);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_ndata);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_pagesize);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_ffactor);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_buckets);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_free);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_bfree);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_bigpages);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_big_bfree);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_overflows);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_ovfl_free);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_dup);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_dup_free);
+ return (0);
+}
+int __jv_fill_lock_stat(JNIEnv *jnienv, jclass cl,
+ jobject jobj, struct __db_lock_stat *statp) {
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_id);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_cur_maxid);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxlocks);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxlockers);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxobjects);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nmodes);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nlocks);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxnlocks);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nlockers);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxnlockers);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nobjects);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxnobjects);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nconflicts);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nrequests);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nreleases);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nnowaits);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_ndeadlocks);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_locktimeout);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nlocktimeouts);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_txntimeout);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_ntxntimeouts);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_wait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_nowait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_regsize);
+ return (0);
+}
+int __jv_fill_log_stat(JNIEnv *jnienv, jclass cl,
+ jobject jobj, struct __db_log_stat *statp) {
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_magic);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_version);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_mode);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_lg_bsize);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_lg_size);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_w_bytes);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_w_mbytes);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_wc_bytes);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_wc_mbytes);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_wcount);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_wcount_fill);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_scount);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_wait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_nowait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_cur_file);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_cur_offset);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_disk_file);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_disk_offset);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_regsize);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxcommitperflush);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_mincommitperflush);
+ return (0);
+}
+int __jv_fill_mpool_stat(JNIEnv *jnienv, jclass cl,
+ jobject jobj, struct __db_mpool_stat *statp) {
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_gbytes);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_bytes);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_ncache);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_regsize);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_map);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_cache_hit);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_cache_miss);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_page_create);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_page_in);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_page_out);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_ro_evict);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_rw_evict);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_page_trickle);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_pages);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_page_clean);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_page_dirty);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_hash_buckets);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_hash_searches);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_hash_longest);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_hash_examined);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_hash_nowait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_hash_wait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_hash_max_wait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_nowait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_wait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_alloc);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_alloc_buckets);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_alloc_max_buckets);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_alloc_pages);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_alloc_max_pages);
+ return (0);
+}
+int __jv_fill_qam_stat(JNIEnv *jnienv, jclass cl,
+ jobject jobj, struct __db_qam_stat *statp) {
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_magic);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_version);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_metaflags);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_nkeys);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_ndata);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_pagesize);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_extentsize);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_pages);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_re_len);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_re_pad);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_pgfree);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_first_recno);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_cur_recno);
+ return (0);
+}
+int __jv_fill_rep_stat(JNIEnv *jnienv, jclass cl,
+ jobject jobj, struct __db_rep_stat *statp) {
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_status);
+ JAVADB_STAT_LSN(jnienv, cl, jobj, statp, st_next_lsn);
+ JAVADB_STAT_LSN(jnienv, cl, jobj, statp, st_waiting_lsn);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_dupmasters);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_env_id);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_env_priority);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_gen);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_log_duplicated);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_log_queued);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_log_queued_max);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_log_queued_total);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_log_records);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_log_requested);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_master);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_master_changes);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_msgs_badgen);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_msgs_processed);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_msgs_recover);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_msgs_send_failures);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_msgs_sent);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_newsites);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nsites);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nthrottles);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_outdated);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_txns_applied);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_elections);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_elections_won);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_election_cur_winner);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_election_gen);
+ JAVADB_STAT_LSN(jnienv, cl, jobj, statp, st_election_lsn);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_election_nsites);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_election_priority);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_election_status);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_election_tiebreaker);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_election_votes);
+ return (0);
+}
+int __jv_fill_txn_stat(JNIEnv *jnienv, jclass cl,
+ jobject jobj, struct __db_txn_stat *statp) {
+ JAVADB_STAT_LSN(jnienv, cl, jobj, statp, st_last_ckp);
+ JAVADB_STAT_LONG(jnienv, cl, jobj, statp, st_time_ckp);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_last_txnid);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxtxns);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_naborts);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nbegins);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_ncommits);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nactive);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nrestores);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxnactive);
+ JAVADB_STAT_ACTIVE(jnienv, cl, jobj, statp, st_txnarray);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_wait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_nowait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_regsize);
+ return (0);
+}
diff --git a/libdb/libdb_java/java_stat_auto.h b/libdb/libdb_java/java_stat_auto.h
new file mode 100644
index 0000000..20eecf1
--- /dev/null
+++ b/libdb/libdb_java/java_stat_auto.h
@@ -0,0 +1,9 @@
+/* DO NOT EDIT: automatically built by dist/s_java. */
+extern int __jv_fill_bt_stat(JNIEnv *jnienv, jclass cl, jobject jobj, struct __db_bt_stat *statp);
+extern int __jv_fill_h_stat(JNIEnv *jnienv, jclass cl, jobject jobj, struct __db_h_stat *statp);
+extern int __jv_fill_lock_stat(JNIEnv *jnienv, jclass cl, jobject jobj, struct __db_lock_stat *statp);
+extern int __jv_fill_log_stat(JNIEnv *jnienv, jclass cl, jobject jobj, struct __db_log_stat *statp);
+extern int __jv_fill_mpool_stat(JNIEnv *jnienv, jclass cl, jobject jobj, struct __db_mpool_stat *statp);
+extern int __jv_fill_qam_stat(JNIEnv *jnienv, jclass cl, jobject jobj, struct __db_qam_stat *statp);
+extern int __jv_fill_rep_stat(JNIEnv *jnienv, jclass cl, jobject jobj, struct __db_rep_stat *statp);
+extern int __jv_fill_txn_stat(JNIEnv *jnienv, jclass cl, jobject jobj, struct __db_txn_stat *statp);
diff --git a/libdb/libdb_java/java_util.c b/libdb/libdb_java/java_util.c
new file mode 100644
index 0000000..0e83c64
--- /dev/null
+++ b/libdb/libdb_java/java_util.c
@@ -0,0 +1,890 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+
+#include "db_int.h"
+#include "java_util.h"
+
+#ifdef DB_WIN32
+#define sys_errlist _sys_errlist
+#define sys_nerr _sys_nerr
+#endif
+
+const char * const name_DB = "Db";
+const char * const name_DB_BTREE_STAT = "DbBtreeStat";
+const char * const name_DBC = "Dbc";
+const char * const name_DB_DEADLOCK_EX = "DbDeadlockException";
+const char * const name_DB_ENV = "DbEnv";
+const char * const name_DB_EXCEPTION = "DbException";
+const char * const name_DB_HASH_STAT = "DbHashStat";
+const char * const name_DB_LOCK = "DbLock";
+const char * const name_DB_LOCK_STAT = "DbLockStat";
+const char * const name_DB_LOCKNOTGRANTED_EX = "DbLockNotGrantedException";
+const char * const name_DB_LOGC = "DbLogc";
+const char * const name_DB_LOG_STAT = "DbLogStat";
+const char * const name_DB_LSN = "DbLsn";
+const char * const name_DB_MEMORY_EX = "DbMemoryException";
+const char * const name_DB_MPOOL_FSTAT = "DbMpoolFStat";
+const char * const name_DB_MPOOL_STAT = "DbMpoolStat";
+const char * const name_DB_PREPLIST = "DbPreplist";
+const char * const name_DB_QUEUE_STAT = "DbQueueStat";
+const char * const name_DB_REP_STAT = "DbRepStat";
+const char * const name_DB_RUNRECOVERY_EX = "DbRunRecoveryException";
+const char * const name_DBT = "Dbt";
+const char * const name_DB_TXN = "DbTxn";
+const char * const name_DB_TXN_STAT = "DbTxnStat";
+const char * const name_DB_TXN_STAT_ACTIVE = "DbTxnStat$Active";
+const char * const name_DB_UTIL = "DbUtil";
+const char * const name_DbAppendRecno = "DbAppendRecno";
+const char * const name_DbBtreeCompare = "DbBtreeCompare";
+const char * const name_DbBtreePrefix = "DbBtreePrefix";
+const char * const name_DbDupCompare = "DbDupCompare";
+const char * const name_DbEnvFeedback = "DbEnvFeedback";
+const char * const name_DbErrcall = "DbErrcall";
+const char * const name_DbHash = "DbHash";
+const char * const name_DbLockRequest = "DbLockRequest";
+const char * const name_DbFeedback = "DbFeedback";
+const char * const name_DbRecoveryInit = "DbRecoveryInit";
+const char * const name_DbRepTransport = "DbRepTransport";
+const char * const name_DbSecondaryKeyCreate = "DbSecondaryKeyCreate";
+const char * const name_DbTxnRecover = "DbTxnRecover";
+const char * const name_RepElectResult = "DbEnv$RepElectResult";
+const char * const name_RepProcessMessage = "DbEnv$RepProcessMessage";
+
+const char * const string_signature = "Ljava/lang/String;";
+
+jfieldID fid_Dbt_data;
+jfieldID fid_Dbt_offset;
+jfieldID fid_Dbt_size;
+jfieldID fid_Dbt_ulen;
+jfieldID fid_Dbt_dlen;
+jfieldID fid_Dbt_doff;
+jfieldID fid_Dbt_flags;
+jfieldID fid_Dbt_private_dbobj_;
+jfieldID fid_Dbt_must_create_data;
+jfieldID fid_DbLockRequest_op;
+jfieldID fid_DbLockRequest_mode;
+jfieldID fid_DbLockRequest_timeout;
+jfieldID fid_DbLockRequest_obj;
+jfieldID fid_DbLockRequest_lock;
+jfieldID fid_RepProcessMessage_envid;
+
+/****************************************************************
+ *
+ * Utility functions used by "glue" functions.
+ */
+
+/*
+ * Do any one time initialization, especially initializing any
+ * unchanging methodIds, fieldIds, etc.
+ */
+void one_time_init(JNIEnv *jnienv)
+{
+ jclass cl;
+
+ if ((cl = get_class(jnienv, name_DBT)) == NULL)
+ return; /* An exception has been posted. */
+ fid_Dbt_data = (*jnienv)->GetFieldID(jnienv, cl, "data", "[B");
+ fid_Dbt_offset = (*jnienv)->GetFieldID(jnienv, cl, "offset", "I");
+ fid_Dbt_size = (*jnienv)->GetFieldID(jnienv, cl, "size", "I");
+ fid_Dbt_ulen = (*jnienv)->GetFieldID(jnienv, cl, "ulen", "I");
+ fid_Dbt_dlen = (*jnienv)->GetFieldID(jnienv, cl, "dlen", "I");
+ fid_Dbt_doff = (*jnienv)->GetFieldID(jnienv, cl, "doff", "I");
+ fid_Dbt_flags = (*jnienv)->GetFieldID(jnienv, cl, "flags", "I");
+ fid_Dbt_must_create_data = (*jnienv)->GetFieldID(jnienv, cl,
+ "must_create_data", "Z");
+ fid_Dbt_private_dbobj_ =
+ (*jnienv)->GetFieldID(jnienv, cl, "private_dbobj_", "J");
+
+ if ((cl = get_class(jnienv, name_DbLockRequest)) == NULL)
+ return; /* An exception has been posted. */
+ fid_DbLockRequest_op = (*jnienv)->GetFieldID(jnienv, cl, "op", "I");
+ fid_DbLockRequest_mode = (*jnienv)->GetFieldID(jnienv, cl, "mode", "I");
+ fid_DbLockRequest_timeout =
+ (*jnienv)->GetFieldID(jnienv, cl, "timeout", "I");
+ fid_DbLockRequest_obj = (*jnienv)->GetFieldID(jnienv, cl, "obj",
+ "Lcom/sleepycat/db/Dbt;");
+ fid_DbLockRequest_lock = (*jnienv)->GetFieldID(jnienv, cl, "lock",
+ "Lcom/sleepycat/db/DbLock;");
+
+ if ((cl = get_class(jnienv, name_RepProcessMessage)) == NULL)
+ return; /* An exception has been posted. */
+ fid_RepProcessMessage_envid =
+ (*jnienv)->GetFieldID(jnienv, cl, "envid", "I");
+}
+
+/*
+ * Get the private data from a Db* object that points back to a C DB_* object.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void *get_private_dbobj(JNIEnv *jnienv, const char *classname,
+ jobject obj)
+{
+ jclass dbClass;
+ jfieldID id;
+ long_to_ptr lp;
+
+ if (!obj)
+ return (0);
+
+ if ((dbClass = get_class(jnienv, classname)) == NULL)
+ return (NULL); /* An exception has been posted. */
+ id = (*jnienv)->GetFieldID(jnienv, dbClass, "private_dbobj_", "J");
+ lp.java_long = (*jnienv)->GetLongField(jnienv, obj, id);
+ return (lp.ptr);
+}
+
+/*
+ * Set the private data in a Db* object that points back to a C DB_* object.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void set_private_dbobj(JNIEnv *jnienv, const char *classname,
+ jobject obj, void *value)
+{
+ long_to_ptr lp;
+ jclass dbClass;
+ jfieldID id;
+
+ lp.java_long = 0; /* no junk in case sizes mismatch */
+ lp.ptr = value;
+ if ((dbClass = get_class(jnienv, classname)) == NULL)
+ return; /* An exception has been posted. */
+ id = (*jnienv)->GetFieldID(jnienv, dbClass, "private_dbobj_", "J");
+ (*jnienv)->SetLongField(jnienv, obj, id, lp.java_long);
+}
+
+/*
+ * Get the private data in a Db/DbEnv object that holds additional 'side data'.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void *get_private_info(JNIEnv *jnienv, const char *classname,
+ jobject obj)
+{
+ jclass dbClass;
+ jfieldID id;
+ long_to_ptr lp;
+
+ if (!obj)
+ return (NULL);
+
+ if ((dbClass = get_class(jnienv, classname)) == NULL)
+ return (NULL); /* An exception has been posted. */
+ id = (*jnienv)->GetFieldID(jnienv, dbClass, "private_info_", "J");
+ lp.java_long = (*jnienv)->GetLongField(jnienv, obj, id);
+ return (lp.ptr);
+}
+
+/*
+ * Set the private data in a Db/DbEnv object that holds additional 'side data'.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void set_private_info(JNIEnv *jnienv, const char *classname,
+ jobject obj, void *value)
+{
+ long_to_ptr lp;
+ jclass dbClass;
+ jfieldID id;
+
+ lp.java_long = 0; /* no junk in case sizes mismatch */
+ lp.ptr = value;
+ if ((dbClass = get_class(jnienv, classname)) == NULL)
+ return; /* An exception has been posted. */
+ id = (*jnienv)->GetFieldID(jnienv, dbClass, "private_info_", "J");
+ (*jnienv)->SetLongField(jnienv, obj, id, lp.java_long);
+}
+
+/*
+ * Given a non-qualified name (e.g. "foo"), get the class handle
+ * for the fully qualified name (e.g. "com.sleepycat.db.foo")
+ */
+jclass get_class(JNIEnv *jnienv, const char *classname)
+{
+ /*
+ * Note: PERFORMANCE: It should be possible to cache jclass's.
+ * If we do a NewGlobalRef on each one, we can keep them
+ * around in a table. A jclass is a jobject, and
+ * since NewGlobalRef returns a jobject, it isn't
+ * technically right, but it would likely work with
+ * most implementations. Possibly make it configurable.
+ */
+ char fullname[128];
+
+ (void)snprintf(fullname, sizeof(fullname),
+ "%s%s", DB_PACKAGE_NAME, classname);
+ return ((*jnienv)->FindClass(jnienv, fullname));
+}
+
+/*
+ * Given a fully qualified name (e.g. "java.util.Hashtable")
+ * return the jclass object. If it can't be found, an
+ * exception is raised and NULL is return.
+ * This is appropriate to be used for classes that may
+ * not be present.
+ */
+jclass get_fully_qualified_class(JNIEnv *jnienv, const char *classname)
+{
+ jclass result;
+
+ result = ((*jnienv)->FindClass(jnienv, classname));
+ if (result == NULL) {
+ jclass cnfe;
+ char message[1024];
+
+ cnfe = (*jnienv)->FindClass(jnienv,
+ "java/lang/ClassNotFoundException");
+ strncpy(message, classname, sizeof(message));
+ strncat(message, ": class not found", sizeof(message));
+ (*jnienv)->ThrowNew(jnienv, cnfe, message);
+ }
+ return (result);
+}
+
+/*
+ * Set an individual field in a Db* object.
+ * The field must be a DB object type.
+ */
+void set_object_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *object_classname,
+ const char *name_of_field, jobject obj)
+{
+ char signature[512];
+ jfieldID id;
+
+ (void)snprintf(signature, sizeof(signature),
+ "L%s%s;", DB_PACKAGE_NAME, object_classname);
+ id = (*jnienv)->GetFieldID(
+ jnienv, class_of_this, name_of_field, signature);
+ (*jnienv)->SetObjectField(jnienv, jthis, id, obj);
+}
+
+/*
+ * Set an individual field in a Db* object.
+ * The field must be an integer type.
+ */
+void set_int_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *name_of_field, jint value)
+{
+ jfieldID id =
+ (*jnienv)->GetFieldID(jnienv, class_of_this, name_of_field, "I");
+ (*jnienv)->SetIntField(jnienv, jthis, id, value);
+}
+
+/*
+ * Set an individual field in a Db* object.
+ * The field must be an integer type.
+ */
+void set_long_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *name_of_field, jlong value)
+{
+ jfieldID id = (*jnienv)->GetFieldID(jnienv, class_of_this,
+ name_of_field, "J");
+ (*jnienv)->SetLongField(jnienv, jthis, id, value);
+}
+
+/*
+ * Set an individual field in a Db* object.
+ * The field must be an integer type.
+ */
+void set_lsn_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *name_of_field, DB_LSN value)
+{
+ set_object_field(jnienv, class_of_this, jthis, name_DB_LSN,
+ name_of_field, get_DbLsn(jnienv, value));
+}
+
+/*
+ * Report an exception back to the java side.
+ */
+void report_exception(JNIEnv *jnienv, const char *text,
+ int err, unsigned long expect_mask)
+{
+ jstring textString;
+ jclass dbexcept;
+ jclass javaexcept;
+ jthrowable obj;
+
+ textString = NULL;
+ dbexcept = NULL;
+ javaexcept = NULL;
+
+ switch (err) {
+ /*
+ * DB_JAVA_CALLBACK is returned by
+ * dbji_call_append_recno() (the append_recno callback)
+ * when the Java version of the callback has thrown
+ * an exception, and we want to pass the exception on.
+ * The exception has already been thrown, we
+ * don't want to throw a new one.
+ */
+ case DB_JAVA_CALLBACK:
+ break;
+ case ENOENT:
+ /*
+ * In this case there is a corresponding
+ * standard java exception type that we'll use.
+ * First we make sure that the calling function
+ * expected this kind of error, if not we give
+ * an 'internal error' DbException, since
+ * we must not throw an exception type that isn't
+ * declared in the signature.
+ *
+ * We'll make this a little more general if/when
+ * we add more java standard exceptions.
+ */
+ if ((expect_mask & EXCEPTION_FILE_NOT_FOUND) != 0) {
+ javaexcept = (*jnienv)->FindClass(jnienv,
+ "java/io/FileNotFoundException");
+ }
+ else {
+ char errstr[1024];
+
+ snprintf(errstr, sizeof(errstr),
+ "internal error: unexpected errno: %s",
+ text);
+ textString = get_java_string(jnienv,
+ errstr);
+ dbexcept = get_class(jnienv,
+ name_DB_EXCEPTION);
+ }
+ break;
+ case DB_RUNRECOVERY:
+ dbexcept = get_class(jnienv,
+ name_DB_RUNRECOVERY_EX);
+ break;
+ case DB_LOCK_DEADLOCK:
+ dbexcept = get_class(jnienv, name_DB_DEADLOCK_EX);
+ break;
+ default:
+ dbexcept = get_class(jnienv, name_DB_EXCEPTION);
+ break;
+ }
+ if (dbexcept != NULL) {
+ if (textString == NULL)
+ textString = get_java_string(jnienv, text);
+ if ((obj = create_exception(jnienv, textString, err, dbexcept))
+ != NULL)
+ (*jnienv)->Throw(jnienv, obj);
+ /* Otherwise, an exception has been posted. */
+ }
+ else if (javaexcept != NULL)
+ (*jnienv)->ThrowNew(jnienv, javaexcept, text);
+ else
+ fprintf(stderr,
+ "report_exception: failed to create an exception\n");
+}
+
+/*
+ * Report an exception back to the java side, for the specific
+ * case of DB_LOCK_NOTGRANTED, as more things are added to the
+ * constructor of this type of exception.
+ */
+void report_notgranted_exception(JNIEnv *jnienv, const char *text,
+ db_lockop_t op, db_lockmode_t mode,
+ jobject jdbt, jobject jlock, int index)
+{
+ jstring textString;
+ jclass dbexcept;
+ jthrowable obj;
+ jmethodID mid;
+
+ if ((dbexcept = get_class(jnienv, name_DB_LOCKNOTGRANTED_EX)) == NULL)
+ return; /* An exception has been posted. */
+ textString = get_java_string(jnienv, text);
+
+ mid = (*jnienv)->GetMethodID(jnienv, dbexcept, "<init>",
+ "(Ljava/lang/String;II"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/DbLock;I)V");
+ if ((obj = (jthrowable)(*jnienv)->NewObject(jnienv, dbexcept,
+ mid, textString, op, mode, jdbt, jlock, index)) != NULL)
+ (*jnienv)->Throw(jnienv, obj);
+ else
+ fprintf(stderr,
+ "report_notgranted_exception: failed to create an exception\n");
+}
+
+/*
+ * Create an exception object and return it.
+ * The given class must have a constructor that has a
+ * constructor with args (java.lang.String text, int errno);
+ * DbException and its subclasses fit this bill.
+ */
+jobject create_exception(JNIEnv *jnienv, jstring text,
+ int err, jclass dbexcept)
+{
+ jthrowable obj;
+ jmethodID mid;
+
+ mid = (*jnienv)->GetMethodID(jnienv, dbexcept, "<init>",
+ "(Ljava/lang/String;I)V");
+ if (mid != NULL)
+ obj = (jthrowable)(*jnienv)->NewObject(jnienv, dbexcept, mid,
+ text, err);
+ else {
+ fprintf(stderr, "Cannot get exception init method ID!\n");
+ obj = NULL;
+ }
+
+ return (obj);
+}
+
+/*
+ * Report an error via the errcall mechanism.
+ */
+void report_errcall(JNIEnv *jnienv, jobject errcall,
+ jstring prefix, const char *message)
+{
+ jmethodID id;
+ jclass errcall_class;
+ jstring msg;
+
+ if ((errcall_class = get_class(jnienv, name_DbErrcall)) == NULL)
+ return; /* An exception has been posted. */
+ msg = get_java_string(jnienv, message);
+
+ id = (*jnienv)->GetMethodID(jnienv, errcall_class,
+ "errcall",
+ "(Ljava/lang/String;Ljava/lang/String;)V");
+ if (id == NULL) {
+ fprintf(stderr, "Cannot get errcall methodID!\n");
+ fprintf(stderr, "error: %s\n", message);
+ return;
+ }
+
+ (*jnienv)->CallVoidMethod(jnienv, errcall, id, prefix, msg);
+}
+
+/*
+ * If the object is null, report an exception and return false (0),
+ * otherwise return true (1).
+ */
+int verify_non_null(JNIEnv *jnienv, void *obj)
+{
+ if (obj == NULL) {
+ report_exception(jnienv, "null object", EINVAL, 0);
+ return (0);
+ }
+ return (1);
+}
+
+/*
+ * If the error code is non-zero, report an exception and return false (0),
+ * otherwise return true (1).
+ */
+int verify_return(JNIEnv *jnienv, int err, unsigned long expect_mask)
+{
+ if (err == 0)
+ return (1);
+
+ report_exception(jnienv, db_strerror(err), err, expect_mask);
+ return (0);
+}
+
+/*
+ * Verify that there was no memory error due to undersized Dbt.
+ * If there is report a DbMemoryException, with the Dbt attached
+ * and return false (0), otherwise return true (1).
+ */
+int verify_dbt(JNIEnv *jnienv, int err, LOCKED_DBT *ldbt)
+{
+ DBT *dbt;
+ jobject exception;
+ jstring text;
+ jclass dbexcept;
+ jmethodID mid;
+
+ if (err != ENOMEM)
+ return (1);
+
+ dbt = &ldbt->javainfo->dbt;
+ if (!F_ISSET(dbt, DB_DBT_USERMEM) || dbt->size <= dbt->ulen)
+ return (1);
+
+ /* Create/throw an exception of type DbMemoryException */
+ if ((dbexcept = get_class(jnienv, name_DB_MEMORY_EX)) == NULL)
+ return (1); /* An exception has been posted. */
+ text = get_java_string(jnienv,
+ "Dbt not large enough for available data");
+ exception = create_exception(jnienv, text, ENOMEM, dbexcept);
+
+ /* Attach the dbt to the exception */
+ mid = (*jnienv)->GetMethodID(jnienv, dbexcept, "set_dbt",
+ "(L" DB_PACKAGE_NAME "Dbt;)V");
+ (*jnienv)->CallVoidMethod(jnienv, exception, mid, ldbt->jdbt);
+ (*jnienv)->Throw(jnienv, exception);
+ return (0);
+}
+
+/*
+ * Create an object of the given class, calling its default constructor.
+ */
+jobject create_default_object(JNIEnv *jnienv, const char *class_name)
+{
+ jmethodID id;
+ jclass dbclass;
+
+ if ((dbclass = get_class(jnienv, class_name)) == NULL)
+ return (NULL); /* An exception has been posted. */
+ id = (*jnienv)->GetMethodID(jnienv, dbclass, "<init>", "()V");
+ return ((*jnienv)->NewObject(jnienv, dbclass, id));
+}
+
+/*
+ * Convert an DB object to a Java encapsulation of that object.
+ * Note: This implementation creates a new Java object on each call,
+ * so it is generally useful when a new DB object has just been created.
+ */
+jobject convert_object(JNIEnv *jnienv, const char *class_name, void *dbobj)
+{
+ jobject jo;
+
+ if (!dbobj)
+ return (0);
+
+ jo = create_default_object(jnienv, class_name);
+ set_private_dbobj(jnienv, class_name, jo, dbobj);
+ return (jo);
+}
+
+/*
+ * Create a copy of the string
+ */
+char *dup_string(const char *str)
+{
+ int len;
+ char *retval;
+ int err;
+
+ len = strlen(str) + 1;
+ if ((err = __os_malloc(NULL, sizeof(char)*len, &retval)) != 0)
+ return (NULL);
+ strncpy(retval, str, len);
+ return (retval);
+}
+
+/*
+ * Create a java string from the given string
+ */
+jstring get_java_string(JNIEnv *jnienv, const char* string)
+{
+ if (string == 0)
+ return (0);
+ return ((*jnienv)->NewStringUTF(jnienv, string));
+}
+
+/*
+ * Create a copy of the java string using __os_malloc.
+ * Caller must free it.
+ */
+char *get_c_string(JNIEnv *jnienv, jstring jstr)
+{
+ const char *utf;
+ char *retval;
+
+ utf = (*jnienv)->GetStringUTFChars(jnienv, jstr, NULL);
+ retval = dup_string(utf);
+ (*jnienv)->ReleaseStringUTFChars(jnienv, jstr, utf);
+ return (retval);
+}
+
+/*
+ * Convert a java object to the various C pointers they represent.
+ */
+DB *get_DB(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB *)get_private_dbobj(jnienv, name_DB, obj));
+}
+
+DB_BTREE_STAT *get_DB_BTREE_STAT(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_BTREE_STAT *)
+ get_private_dbobj(jnienv, name_DB_BTREE_STAT, obj));
+}
+
+DBC *get_DBC(JNIEnv *jnienv, jobject obj)
+{
+ return ((DBC *)get_private_dbobj(jnienv, name_DBC, obj));
+}
+
+DB_ENV *get_DB_ENV(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_ENV *)get_private_dbobj(jnienv, name_DB_ENV, obj));
+}
+
+DB_ENV_JAVAINFO *get_DB_ENV_JAVAINFO(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_ENV_JAVAINFO *)get_private_info(jnienv, name_DB_ENV, obj));
+}
+
+DB_HASH_STAT *get_DB_HASH_STAT(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_HASH_STAT *)
+ get_private_dbobj(jnienv, name_DB_HASH_STAT, obj));
+}
+
+DB_JAVAINFO *get_DB_JAVAINFO(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_JAVAINFO *)get_private_info(jnienv, name_DB, obj));
+}
+
+DB_LOCK *get_DB_LOCK(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_LOCK *)get_private_dbobj(jnienv, name_DB_LOCK, obj));
+}
+
+DB_LOGC *get_DB_LOGC(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_LOGC *)get_private_dbobj(jnienv, name_DB_LOGC, obj));
+}
+
+DB_LOG_STAT *get_DB_LOG_STAT(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_LOG_STAT *)
+ get_private_dbobj(jnienv, name_DB_LOG_STAT, obj));
+}
+
+DB_LSN *get_DB_LSN(JNIEnv *jnienv, /* DbLsn */ jobject obj) {
+ /*
+ * DbLsns that are created from within java (new DbLsn()) rather
+ * than from within C (get_DbLsn()) may not have a "private" DB_LSN
+ * structure allocated for them yet. We can't do this in the
+ * actual constructor (init_lsn()), because there's no way to pass
+ * in an initializing value in, and because the get_DbLsn()/
+ * convert_object() code path needs a copy of the pointer before
+ * the constructor gets called. Thus, get_DbLsn() allocates and
+ * fills a DB_LSN for the object it's about to create.
+ *
+ * Since "new DbLsn()" may reasonably be passed as an argument to
+ * functions such as DbEnv.log_put(), though, we need to make sure
+ * that DB_LSN's get allocated when the object was created from
+ * Java, too. Here, we lazily allocate a new private DB_LSN if
+ * and only if it turns out that we don't already have one.
+ *
+ * The only exception is if the DbLsn object is a Java null
+ * (in which case the jobject will also be NULL). Then a NULL
+ * DB_LSN is legitimate.
+ */
+ DB_LSN *lsnp;
+ int err;
+
+ if (obj == NULL)
+ return (NULL);
+
+ lsnp = (DB_LSN *)get_private_dbobj(jnienv, name_DB_LSN, obj);
+ if (lsnp == NULL) {
+ if ((err = __os_malloc(NULL, sizeof(DB_LSN), &lsnp)) != 0)
+ return (NULL);
+ memset(lsnp, 0, sizeof(DB_LSN));
+ set_private_dbobj(jnienv, name_DB_LSN, obj, lsnp);
+ }
+
+ return (lsnp);
+}
+
+DB_MPOOL_FSTAT *get_DB_MPOOL_FSTAT(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_MPOOL_FSTAT *)
+ get_private_dbobj(jnienv, name_DB_MPOOL_FSTAT, obj));
+}
+
+DB_MPOOL_STAT *get_DB_MPOOL_STAT(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_MPOOL_STAT *)
+ get_private_dbobj(jnienv, name_DB_MPOOL_STAT, obj));
+}
+
+DB_QUEUE_STAT *get_DB_QUEUE_STAT(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_QUEUE_STAT *)
+ get_private_dbobj(jnienv, name_DB_QUEUE_STAT, obj));
+}
+
+DB_TXN *get_DB_TXN(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_TXN *)get_private_dbobj(jnienv, name_DB_TXN, obj));
+}
+
+DB_TXN_STAT *get_DB_TXN_STAT(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_TXN_STAT *)
+ get_private_dbobj(jnienv, name_DB_TXN_STAT, obj));
+}
+
+DBT *get_DBT(JNIEnv *jnienv, jobject obj)
+{
+ DBT_JAVAINFO *ji;
+
+ ji = (DBT_JAVAINFO *)get_private_dbobj(jnienv, name_DBT, obj);
+ if (ji == NULL)
+ return (NULL);
+ else
+ return (&ji->dbt);
+}
+
+DBT_JAVAINFO *get_DBT_JAVAINFO(JNIEnv *jnienv, jobject obj)
+{
+ return ((DBT_JAVAINFO *)get_private_dbobj(jnienv, name_DBT, obj));
+}
+
+/*
+ * Convert a C pointer to the various Java objects they represent.
+ */
+jobject get_DbBtreeStat(JNIEnv *jnienv, DB_BTREE_STAT *dbobj)
+{
+ return (convert_object(jnienv, name_DB_BTREE_STAT, dbobj));
+}
+
+jobject get_Dbc(JNIEnv *jnienv, DBC *dbobj)
+{
+ return (convert_object(jnienv, name_DBC, dbobj));
+}
+
+jobject get_DbHashStat(JNIEnv *jnienv, DB_HASH_STAT *dbobj)
+{
+ return (convert_object(jnienv, name_DB_HASH_STAT, dbobj));
+}
+
+jobject get_DbLogc(JNIEnv *jnienv, DB_LOGC *dbobj)
+{
+ return (convert_object(jnienv, name_DB_LOGC, dbobj));
+}
+
+jobject get_DbLogStat(JNIEnv *jnienv, DB_LOG_STAT *dbobj)
+{
+ return (convert_object(jnienv, name_DB_LOG_STAT, dbobj));
+}
+
+/*
+ * LSNs are different since they are really normally
+ * treated as by-value objects. We actually create
+ * a pointer to the LSN and store that, deleting it
+ * when the LSN is GC'd.
+ */
+jobject get_DbLsn(JNIEnv *jnienv, DB_LSN dbobj)
+{
+ DB_LSN *lsnp;
+ int err;
+
+ if ((err = __os_malloc(NULL, sizeof(DB_LSN), &lsnp)) != 0)
+ return (NULL);
+
+ memset(lsnp, 0, sizeof(DB_LSN));
+ *lsnp = dbobj;
+ return (convert_object(jnienv, name_DB_LSN, lsnp));
+}
+
+/*
+ * Shared code for get_Dbt and get_const_Dbt.
+ *
+ * XXX
+ * Currently we make no distinction in implementation of these
+ * two kinds of Dbts, although in the future we may want to.
+ * (It's probably easier to make the optimizations listed below
+ * with readonly Dbts).
+ *
+ * Dbt's created via this function are only used for a short lifetime,
+ * during callback functions. In the future, we should consider taking
+ * advantage of this by having a pool of Dbt objects instead of creating
+ * new ones each time. Because of multithreading, we may need an
+ * arbitrary number. We might also have sharing of the byte arrays
+ * used by the Dbts.
+ */
+static jobject get_Dbt_shared(JNIEnv *jnienv, const DBT *dbt, int readonly,
+ DBT_JAVAINFO **ret_info)
+{
+ jobject jdbt;
+ DBT_JAVAINFO *dbtji;
+
+ COMPQUIET(readonly, 0);
+
+ /* A NULL DBT should become a null Dbt. */
+ if (dbt == NULL)
+ return (NULL);
+
+ /*
+ * Note that a side effect of creating a Dbt object
+ * is the creation of the attached DBT_JAVAINFO object
+ * (see the native implementation of Dbt.init())
+ * A DBT_JAVAINFO object contains its own DBT.
+ */
+ jdbt = create_default_object(jnienv, name_DBT);
+ dbtji = get_DBT_JAVAINFO(jnienv, jdbt);
+ memcpy(&dbtji->dbt, dbt, sizeof(DBT));
+
+ /*
+ * Set the boolean indicator so that the Java side knows to
+ * call back when it wants to look at the array. This avoids
+ * needlessly creating/copying arrays that may never be looked at.
+ */
+ (*jnienv)->SetBooleanField(jnienv, jdbt, fid_Dbt_must_create_data, 1);
+ (*jnienv)->SetIntField(jnienv, jdbt, fid_Dbt_size, dbt->size);
+
+ if (ret_info != NULL)
+ *ret_info = dbtji;
+ return (jdbt);
+}
+
+/*
+ * Get a writeable Dbt.
+ *
+ * Currently we're sharing code with get_const_Dbt.
+ * It really shouldn't be this way, we have a DBT that we can
+ * change, and have some mechanism for copying back
+ * any changes to the original DBT.
+ */
+jobject get_Dbt(JNIEnv *jnienv, DBT *dbt,
+ DBT_JAVAINFO **ret_info)
+{
+ return (get_Dbt_shared(jnienv, dbt, 0, ret_info));
+}
+
+/*
+ * Get a Dbt that we promise not to change, or at least
+ * if there are changes, they don't matter and won't get
+ * seen by anyone.
+ */
+jobject get_const_Dbt(JNIEnv *jnienv, const DBT *dbt,
+ DBT_JAVAINFO **ret_info)
+{
+ return (get_Dbt_shared(jnienv, dbt, 1, ret_info));
+}
+
+jobject get_DbMpoolFStat(JNIEnv *jnienv, DB_MPOOL_FSTAT *dbobj)
+{
+ return (convert_object(jnienv, name_DB_MPOOL_FSTAT, dbobj));
+}
+
+jobject get_DbMpoolStat(JNIEnv *jnienv, DB_MPOOL_STAT *dbobj)
+{
+ return (convert_object(jnienv, name_DB_MPOOL_STAT, dbobj));
+}
+
+jobject get_DbQueueStat(JNIEnv *jnienv, DB_QUEUE_STAT *dbobj)
+{
+ return (convert_object(jnienv, name_DB_QUEUE_STAT, dbobj));
+}
+
+jobject get_DbTxn(JNIEnv *jnienv, DB_TXN *dbobj)
+{
+ return (convert_object(jnienv, name_DB_TXN, dbobj));
+}
+
+jobject get_DbTxnStat(JNIEnv *jnienv, DB_TXN_STAT *dbobj)
+{
+ return (convert_object(jnienv, name_DB_TXN_STAT, dbobj));
+}
diff --git a/libdb/libdb_java/java_util.h b/libdb/libdb_java/java_util.h
new file mode 100644
index 0000000..9051a94
--- /dev/null
+++ b/libdb/libdb_java/java_util.h
@@ -0,0 +1,441 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef _JAVA_UTIL_H_
+#define _JAVA_UTIL_H_
+
+#ifdef _MSC_VER
+
+/*
+ * These are level 4 warnings that are explicitly disabled.
+ * With Visual C++, by default you do not see above level 3 unless
+ * you use /W4. But we like to compile with the highest level
+ * warnings to catch other errors.
+ *
+ * 4201: nameless struct/union
+ * triggered by standard include file <winnt.h>
+ *
+ * 4244: '=' : convert from '__int64' to 'unsigned int', possible loss of data
+ * results from making size_t data members correspond to jlongs
+ *
+ * 4514: unreferenced inline function has been removed
+ * jni.h defines methods that are not called
+ *
+ * 4127: conditional expression is constant
+ * occurs because of arg in JAVADB_RW_ACCESS_STRING macro
+ */
+#pragma warning(disable: 4244 4201 4514 4127)
+
+#endif
+
+#include "db_config.h"
+#include "db.h"
+#include "db_int.h"
+#include <jni.h>
+#include "java_info.h"
+#include "java_locked.h"
+#include <string.h> /* needed for memset */
+
+#define DB_PACKAGE_NAME "com/sleepycat/db/"
+
+/* Union to convert longs to pointers (see {get,set}_private_dbobj). */
+typedef union {
+ jlong java_long;
+ void *ptr;
+} long_to_ptr;
+
+/****************************************************************
+ *
+ * Utility functions and definitions used by "glue" functions.
+ */
+
+#define NOT_IMPLEMENTED(str) \
+ report_exception(jnienv, str /*concatenate*/ ": not implemented", 0)
+
+/*
+ * Get, delete a global reference.
+ * Making this operation a function call allows for
+ * easier tracking for debugging. Global references
+ * are mostly grabbed at 'open' and 'close' points,
+ * so there shouldn't be a big performance hit.
+ *
+ * Macro-izing this makes it easier to add debugging code
+ * to track unreleased references.
+ */
+#ifdef DBJAVA_DEBUG
+#include <unistd.h>
+static void wrdebug(const char *str)
+{
+ write(2, str, strlen(str));
+ write(2, "\n", 1);
+}
+
+static jobject debug_new_global_ref(JNIEnv *jnienv, jobject obj, const char *s)
+{
+ wrdebug(s);
+ return ((*jnienv)->NewGlobalRef(jnienv, obj));
+}
+
+static void debug_delete_global_ref(JNIEnv *jnienv, jobject obj, const char *s)
+{
+ wrdebug(s);
+ (*jnienv)->DeleteGlobalRef(jnienv, obj);
+}
+
+#define NEW_GLOBAL_REF(jnienv, obj) \
+ debug_new_global_ref(jnienv, obj, "+Ref: " #obj)
+#define DELETE_GLOBAL_REF(jnienv, obj) \
+ debug_delete_global_ref(jnienv, obj, "-Ref: " #obj)
+#else
+#define NEW_GLOBAL_REF(jnienv, obj) (*jnienv)->NewGlobalRef(jnienv, obj)
+#define DELETE_GLOBAL_REF(jnienv, obj) (*jnienv)->DeleteGlobalRef(jnienv, obj)
+#define wrdebug(x)
+#endif
+
+/*
+ * Do any one time initialization, especially initializing any
+ * unchanging methodIds, fieldIds, etc.
+ */
+void one_time_init(JNIEnv *jnienv);
+
+/*
+ * Get the current JNIEnv from the java VM.
+ * If the jvm argument is null, uses the default
+ * jvm stored during the first invocation.
+ */
+JNIEnv *get_jnienv(JavaVM *jvm);
+
+/*
+ * Get the private data from a Db* object that points back to a C DB_* object.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void *get_private_dbobj(JNIEnv *jnienv, const char *classname,
+ jobject obj);
+
+/*
+ * Set the private data in a Db* object that points back to a C DB_* object.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void set_private_dbobj(JNIEnv *jnienv, const char *classname,
+ jobject obj, void *value);
+
+/*
+ * Get the private data in a Db/DbEnv object that holds additional 'side data'.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void *get_private_info(JNIEnv *jnienv, const char *classname,
+ jobject obj);
+
+/*
+ * Set the private data in a Db/DbEnv object that holds additional 'side data'.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void set_private_info(JNIEnv *jnienv, const char *classname,
+ jobject obj, void *value);
+
+/*
+ * Given a non-qualified name (e.g. "foo"), get the class handle
+ * for the fully qualified name (e.g. "com.sleepycat.db.foo")
+ */
+jclass get_class(JNIEnv *jnienv, const char *classname);
+
+/*
+ * Set an individual field in a Db* object.
+ * The field must be a DB object type.
+ */
+void set_object_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *object_classname,
+ const char *name_of_field, jobject obj);
+
+/*
+ * Set an individual field in a Db* object.
+ * The field must be an integer type.
+ */
+void set_int_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *name_of_field, jint value);
+
+/*
+ * Set an individual field in a Db* object.
+ * The field must be an integer type.
+ */
+void set_long_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *name_of_field, jlong value);
+
+/*
+ * Set an individual field in a Db* object.
+ * The field must be an DbLsn type.
+ */
+void set_lsn_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *name_of_field, DB_LSN value);
+
+/*
+ * Values of flags for verify_return() and report_exception().
+ * These indicate what sort of exceptions the method may throw
+ * (in addition to DbException).
+ */
+static const u_int32_t EXCEPTION_FILE_NOT_FOUND = 0x0001; /*FileNotFound*/
+
+/*
+ * Report an exception back to the java side.
+ */
+void report_exception(JNIEnv *jnienv, const char *text,
+ int err, unsigned long expect_mask);
+
+/*
+ * Report an exception back to the java side, for the specific
+ * case of DB_LOCK_NOTGRANTED, as more things are added to the
+ * constructor of this type of exception.
+ */
+void report_notgranted_exception(JNIEnv *jnienv, const char *text,
+ db_lockop_t op, db_lockmode_t mode,
+ jobject jdbt, jobject jlock, int index);
+
+/*
+ * Create an exception object and return it.
+ * The given class must have a constructor that has a
+ * constructor with args (java.lang.String text, int errno);
+ * DbException and its subclasses fit this bill.
+ */
+jobject create_exception(JNIEnv *jnienv, jstring text,
+ int err, jclass dbexcept);
+
+/*
+ * Report an error via the errcall mechanism.
+ */
+void report_errcall(JNIEnv *jnienv, jobject errcall,
+ jstring prefix, const char *message);
+
+/*
+ * If the object is null, report an exception and return false (0),
+ * otherwise return true (1).
+ */
+int verify_non_null(JNIEnv *jnienv, void *obj);
+
+/*
+ * If the error code is non-zero, report an exception and return false (0),
+ * otherwise return true (1).
+ */
+int verify_return(JNIEnv *jnienv, int err, unsigned long flags);
+
+/*
+ * Verify that there was no memory error due to undersized Dbt.
+ * If there is report a DbMemoryException, with the Dbt attached
+ * and return false (0), otherwise return true (1).
+ */
+int verify_dbt(JNIEnv *jnienv, int err, LOCKED_DBT *locked_dbt);
+
+/*
+ * Create an object of the given class, calling its default constructor.
+ */
+jobject create_default_object(JNIEnv *jnienv, const char *class_name);
+
+/*
+ * Create a Dbt object, , calling its default constructor.
+ */
+jobject create_dbt(JNIEnv *jnienv, const char *class_name);
+
+/*
+ * Convert an DB object to a Java encapsulation of that object.
+ * Note: This implementation creates a new Java object on each call,
+ * so it is generally useful when a new DB object has just been created.
+ */
+jobject convert_object(JNIEnv *jnienv, const char *class_name, void *dbobj);
+
+/*
+ * Create a copy of the java string using __os_malloc.
+ * Caller must free it.
+ */
+char *get_c_string(JNIEnv *jnienv, jstring jstr);
+
+/*
+ * Create a java string from the given string
+ */
+jstring get_java_string(JNIEnv *jnienv, const char* string);
+
+/*
+ * Convert a java object to the various C pointers they represent.
+ */
+DB *get_DB (JNIEnv *jnienv, jobject obj);
+DB_BTREE_STAT *get_DB_BTREE_STAT (JNIEnv *jnienv, jobject obj);
+DBC *get_DBC (JNIEnv *jnienv, jobject obj);
+DB_ENV *get_DB_ENV (JNIEnv *jnienv, jobject obj);
+DB_ENV_JAVAINFO *get_DB_ENV_JAVAINFO (JNIEnv *jnienv, jobject obj);
+DB_HASH_STAT *get_DB_HASH_STAT (JNIEnv *jnienv, jobject obj);
+DB_JAVAINFO *get_DB_JAVAINFO (JNIEnv *jnienv, jobject obj);
+DB_LOCK *get_DB_LOCK (JNIEnv *jnienv, jobject obj);
+DB_LOGC *get_DB_LOGC (JNIEnv *jnienv, jobject obj);
+DB_LOG_STAT *get_DB_LOG_STAT (JNIEnv *jnienv, jobject obj);
+DB_LSN *get_DB_LSN (JNIEnv *jnienv, jobject obj);
+DB_MPOOL_FSTAT *get_DB_MPOOL_FSTAT(JNIEnv *jnienv, jobject obj);
+DB_MPOOL_STAT *get_DB_MPOOL_STAT (JNIEnv *jnienv, jobject obj);
+DB_QUEUE_STAT *get_DB_QUEUE_STAT (JNIEnv *jnienv, jobject obj);
+DB_TXN *get_DB_TXN (JNIEnv *jnienv, jobject obj);
+DB_TXN_STAT *get_DB_TXN_STAT (JNIEnv *jnienv, jobject obj);
+DBT *get_DBT (JNIEnv *jnienv, jobject obj);
+DBT_JAVAINFO *get_DBT_JAVAINFO (JNIEnv *jnienv, jobject obj);
+
+/*
+ * From a C object, create a Java object.
+ */
+jobject get_DbBtreeStat (JNIEnv *jnienv, DB_BTREE_STAT *dbobj);
+jobject get_Dbc (JNIEnv *jnienv, DBC *dbobj);
+jobject get_DbHashStat (JNIEnv *jnienv, DB_HASH_STAT *dbobj);
+jobject get_DbLogc (JNIEnv *jnienv, DB_LOGC *dbobj);
+jobject get_DbLogStat (JNIEnv *jnienv, DB_LOG_STAT *dbobj);
+jobject get_DbLsn (JNIEnv *jnienv, DB_LSN dbobj);
+jobject get_DbMpoolStat (JNIEnv *jnienv, DB_MPOOL_STAT *dbobj);
+jobject get_DbMpoolFStat (JNIEnv *jnienv, DB_MPOOL_FSTAT *dbobj);
+jobject get_DbQueueStat (JNIEnv *jnienv, DB_QUEUE_STAT *dbobj);
+jobject get_const_Dbt (JNIEnv *jnienv, const DBT *dbt, DBT_JAVAINFO **retp);
+jobject get_Dbt (JNIEnv *jnienv, DBT *dbt, DBT_JAVAINFO **retp);
+jobject get_DbTxn (JNIEnv *jnienv, DB_TXN *dbobj);
+jobject get_DbTxnStat (JNIEnv *jnienv, DB_TXN_STAT *dbobj);
+
+/* The java names of DB classes */
+extern const char * const name_DB;
+extern const char * const name_DB_BTREE_STAT;
+extern const char * const name_DBC;
+extern const char * const name_DB_DEADLOCK_EX;
+extern const char * const name_DB_ENV;
+extern const char * const name_DB_EXCEPTION;
+extern const char * const name_DB_HASH_STAT;
+extern const char * const name_DB_LOCK;
+extern const char * const name_DB_LOCK_STAT;
+extern const char * const name_DB_LOGC;
+extern const char * const name_DB_LOG_STAT;
+extern const char * const name_DB_LSN;
+extern const char * const name_DB_MEMORY_EX;
+extern const char * const name_DB_MPOOL_FSTAT;
+extern const char * const name_DB_MPOOL_STAT;
+extern const char * const name_DB_LOCKNOTGRANTED_EX;
+extern const char * const name_DB_PREPLIST;
+extern const char * const name_DB_QUEUE_STAT;
+extern const char * const name_DB_REP_STAT;
+extern const char * const name_DB_RUNRECOVERY_EX;
+extern const char * const name_DBT;
+extern const char * const name_DB_TXN;
+extern const char * const name_DB_TXN_STAT;
+extern const char * const name_DB_TXN_STAT_ACTIVE;
+extern const char * const name_DB_UTIL;
+extern const char * const name_DbAppendRecno;
+extern const char * const name_DbBtreeCompare;
+extern const char * const name_DbBtreePrefix;
+extern const char * const name_DbDupCompare;
+extern const char * const name_DbEnvFeedback;
+extern const char * const name_DbErrcall;
+extern const char * const name_DbFeedback;
+extern const char * const name_DbHash;
+extern const char * const name_DbRecoveryInit;
+extern const char * const name_DbRepTransport;
+extern const char * const name_DbSecondaryKeyCreate;
+extern const char * const name_DbTxnRecover;
+extern const char * const name_RepElectResult;
+extern const char * const name_RepProcessMessage;
+
+extern const char * const string_signature;
+
+extern jfieldID fid_Dbt_data;
+extern jfieldID fid_Dbt_offset;
+extern jfieldID fid_Dbt_size;
+extern jfieldID fid_Dbt_ulen;
+extern jfieldID fid_Dbt_dlen;
+extern jfieldID fid_Dbt_doff;
+extern jfieldID fid_Dbt_flags;
+extern jfieldID fid_Dbt_must_create_data;
+extern jfieldID fid_DbLockRequest_op;
+extern jfieldID fid_DbLockRequest_mode;
+extern jfieldID fid_DbLockRequest_timeout;
+extern jfieldID fid_DbLockRequest_obj;
+extern jfieldID fid_DbLockRequest_lock;
+extern jfieldID fid_RepProcessMessage_envid;
+
+#define JAVADB_ARGS JNIEnv *jnienv, jobject jthis
+
+#define JAVADB_GET_FLD(j_class, j_fieldtype, j_field, c_type, c_field) \
+JNIEXPORT j_fieldtype JNICALL \
+ Java_com_sleepycat_db_##j_class##_get_1##j_field \
+ (JAVADB_ARGS) \
+{ \
+ c_type *db= get_##c_type(jnienv, jthis); \
+ \
+ if (verify_non_null(jnienv, db)) \
+ return (db->c_field); \
+ return (0); \
+}
+
+#define JAVADB_SET_FLD(j_class, j_fieldtype, j_field, c_type, c_field) \
+JNIEXPORT void JNICALL \
+ Java_com_sleepycat_db_##j_class##_set_1##j_field \
+ (JAVADB_ARGS, j_fieldtype value) \
+{ \
+ c_type *db= get_##c_type(jnienv, jthis); \
+ \
+ if (verify_non_null(jnienv, db)) \
+ db->c_field = value; \
+}
+
+#define JAVADB_METHOD(_meth, _argspec, c_type, c_meth, _args) \
+JNIEXPORT void JNICALL Java_com_sleepycat_db_##_meth _argspec \
+{ \
+ c_type *c_this = get_##c_type(jnienv, jthis); \
+ int ret; \
+ \
+ if (!verify_non_null(jnienv, c_this)) \
+ return; \
+ ret = c_this->c_meth _args; \
+ if (!DB_RETOK_STD(ret)) \
+ report_exception(jnienv, db_strerror(ret), ret, 0); \
+}
+
+#define JAVADB_METHOD_INT(_meth, _argspec, c_type, c_meth, _args, _retok) \
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_##_meth _argspec \
+{ \
+ c_type *c_this = get_##c_type(jnienv, jthis); \
+ int ret; \
+ \
+ if (!verify_non_null(jnienv, c_this)) \
+ return (0); \
+ ret = c_this->c_meth _args; \
+ if (!_retok(ret)) \
+ report_exception(jnienv, db_strerror(ret), ret, 0); \
+ return ((jint)ret); \
+}
+
+#define JAVADB_SET_METH(j_class, j_type, j_fld, c_type, c_field) \
+ JAVADB_METHOD(j_class##_set_1##j_fld, (JAVADB_ARGS, j_type val), c_type, \
+ set_##c_field, (c_this, val))
+
+#define JAVADB_SET_METH_STR(j_class, j_fld, c_type, c_field) \
+ JAVADB_METHOD(j_class##_set_1##j_fld, (JAVADB_ARGS, jstring val), c_type, \
+ set_##c_field, (c_this, (*jnienv)->GetStringUTFChars(jnienv, val, NULL)))
+
+
+/*
+ * These macros are used by code generated by the s_java script.
+ */
+#define JAVADB_STAT_INT(env, cl, jobj, statp, name) \
+ set_int_field(jnienv, cl, jobj, #name, statp->name)
+
+#define JAVADB_STAT_LSN(env, cl, jobj, statp, name) \
+ set_lsn_field(jnienv, cl, jobj, #name, statp->name)
+
+#define JAVADB_STAT_LONG(env, cl, jobj, statp, name) \
+ set_long_field(jnienv, cl, jobj, #name, statp->name)
+
+/*
+ * We build the active list separately.
+ */
+#define JAVADB_STAT_ACTIVE(env, cl, jobj, statp, name) \
+ do {} while(0)
+
+#endif /* !_JAVA_UTIL_H_ */
diff --git a/libdb/lock/Design b/libdb/lock/Design
new file mode 100644
index 0000000..e423ff7
--- /dev/null
+++ b/libdb/lock/Design
@@ -0,0 +1,301 @@
+# $Id$
+
+Synchronization in the Locking Subsystem
+
+This is a document that describes how we implemented fine-grain locking
+in the lock manager (that is, locking on a hash bucket level instead of
+locking the entire region). We found that the increase in concurrency
+was not sufficient to warrant the increase in complexity or the additional
+cost of performing each lock operation. Therefore, we don't use this
+any more. Should we have to do fine-grain locking in a future release,
+this would be a reasonable starting point.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+1. Data structures
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+
+The lock manager maintains 3 different structures:
+
+Objects (__db_lockobj):
+ Describes an object that is locked. When used with DB, this consists
+ of a __db_ilock (a file identifier and a page number).
+
+Lockers (__db_locker):
+ Identifies a specific locker ID and maintains the head of a list of
+ locks held by a locker (for using during transaction commit/abort).
+
+Locks (__db_lock):
+ Describes a particular object lock held on behalf of a particular
+ locker id.
+
+Objects and Lockers reference Locks.
+
+These structures are organized via two synchronized hash tables. Each
+hash table consists of two physical arrays: the array of actual hash
+buckets and an array of mutexes so we can lock individual buckets, rather
+than the whole table.
+
+One hash table contains Objects and the other hash table contains Lockers.
+Objects contain two lists of locks, waiters and holders: holders currently
+hold a lock on the Object, waiters are lock waiting to be granted.
+Lockers are a single linked list that connects the Locks held on behalf
+of the specific locker ID.
+
+In the diagram below:
+
+Locker ID #1 holds a lock on Object #1 (L1) and Object #2 (L5), and is
+waiting on a lock on Object #1 (L3).
+
+Locker ID #2 holds a lock on Object #1 (L2) and is waiting on a lock for
+Object #2 (L7).
+
+Locker ID #3 is waiting for a lock on Object #2 (L6).
+
+ OBJECT -----------------------
+ HASH | |
+ ----|------------- |
+ ________ _______ | | ________ | |
+ | |-->| O1 |--|---|-->| O2 | | |
+ |_______| |_____| | | |______| V |
+ | | W H--->L1->L2 W H--->L5 | holders
+ |_______| | | | | V
+ | | ------->L3 \ ------->L6------>L7 waiters
+ |_______| / \ \
+ . . / \ \
+ . . | \ \
+ . . | \ -----------
+ |_______| | -------------- |
+ | | ____|____ ___|_____ _|______
+ |_______| | | | | | |
+ | | | LID1 | | LID2 | | LID3 |
+ |_______| |_______| |_______| |______|
+ ^ ^ ^
+ | | |
+ ___|________________________|________|___
+ LOCKER | | | | | | | | |
+ HASH | | | | | | | | |
+ | | | | | | | | |
+ |____|____|____|____|____|____|____|____|
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+2. Synchronization
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+
+There are four types of mutexes in the subsystem.
+
+Object mutexes;
+ These map one-to-one to each bucket in the Object hash table.
+ Holding a mutex on an Object bucket secures all the Objects in
+ that bucket as well as the Lock structures linked from those
+ Objects. All fields in the Locks EXCEPT the Locker links (the
+ links that attach Locks by Locker ID) are protected by these
+ mutexes.
+
+Locker mutexes:
+ These map one-to-one to each bucket in the Locker hash table.
+ Holding a mutex on a Locker bucket secures the Locker structures
+ and the Locker links in the Locks.
+
+Memory mutex:
+ This mutex allows calls to allocate/free memory, i.e. calls to
+ __db_shalloc and __db_shalloc_free, as well as manipulation of
+ the Object, Locker and Lock free lists.
+
+Region mutex:
+ This mutex is currently only used to protect the locker ids.
+ It may also be needed later to provide exclusive access to
+ the region for deadlock detection.
+
+Creating or removing a Lock requires locking both the Object lock and the
+Locker lock (and eventually the shalloc lock to return the item to the
+free list).
+
+The locking hierarchy is as follows:
+
+ The Region mutex may never be acquired after any other mutex.
+
+ The Object mutex may be acquired after the Region mutex.
+
+ The Locker mutex may be acquired after the Region and Object
+ mutexes.
+
+ The Memory mutex may be acquired after any mutex.
+
+So, if both and Object mutex and a Locker mutex are going to be acquired,
+the Object mutex must be acquired first.
+
+The Memory mutex may be acquired after any other mutex, but no other mutexes
+can be acquired once the Memory mutex is held.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+3. The algorithms:
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+The locking subsystem supports four basic operations:
+ Get a Lock (lock_get)
+
+ Release a Lock (lock_put)
+
+ Release all the Locks on a specific Object (lock_vec)
+
+ Release all the Locks for a specific Locker (lock_vec)
+
+Get a lock:
+ Acquire Object bucket mutex.
+ Acquire Locker bucket mutex.
+
+ Acquire Memory mutex.
+ If the Object does not exist
+ Take an Object off the freelist.
+ If the Locker doesn't exist
+ Take a Locker off the freelist.
+ Take a Lock off the free list.
+ Release Memory mutex.
+
+ Add Lock to the Object list.
+ Add Lock to the Locker list.
+ Release Locker bucket mutex
+
+ If the lock cannot be granted
+ Release Object bucket mutex
+ Acquire lock mutex (blocks)
+
+ Acquire Object bucket mutex
+ If lock acquisition did not succeed (e.g, deadlock)
+ Acquire Locker bucket mutex
+ If locker should be destroyed
+ Remove locker from hash table
+ Acquire Memory mutex
+ Return locker to free list
+ Release Memory mutex
+ Release Locker bucket mutex
+
+ If object should be released
+ Acquire Memory mutex
+ Return object to free list
+ Release Memory mutex
+
+ Release Object bucket mutex
+
+Release a lock:
+ Acquire Object bucket mutex.
+ (Requires that we be able to find the Object hash bucket
+ without looking inside the Lock itself.)
+
+ If releasing a single lock and the user provided generation number
+ doesn't match the Lock's generation number, the Lock has been reused
+ and we return failure.
+
+ Enter lock_put_internal:
+ if the Lock is still on the Object's lists:
+ Increment Lock's generation number.
+ Remove Lock from the Object's list (NULL link fields).
+ Promote locks for the Object.
+
+ Enter locker_list_removal
+ Acquire Locker bucket mutex.
+ If Locker doesn't exist:
+ Release Locker bucket mutex
+ Release Object bucket mutex
+ Return error.
+ Else if Locker marked as deleted:
+ dont_release = TRUE
+ Else
+ Remove Lock from Locker list.
+ If Locker has no more locks
+ Remove Locker from table.
+ Acquire Memory mutex.
+ Return Locker to free list
+ Release Memory mutex
+ Release Locker bucket mutex.
+ Exit locker_list_removal
+
+ If (!dont_release)
+ Acquire Memory mutex
+ Return Lock to free list
+ Release Memory mutex
+
+ Exit lock_put_internal
+
+ Release Object bucket mutex
+
+Release all the Locks on a specific Object (lock_vec, DB_PUT_ALL_OBJ):
+
+ Acquire Object bucket mutex.
+
+ For each lock on the waiter list:
+ lock_put_internal
+ For each lock on the holder list:
+ lock_put_internal
+
+ Release Object bucket mutex.
+
+Release all the Locks for a specific Locker (lock_vec, DB_PUT_ALL):
+
+ Acquire Locker bucket mutex.
+ Mark Locker deleted.
+ Release Locker mutex.
+
+ For each lock on the Locker's list:
+ Remove from locker's list
+ (The lock could get put back on the free list in
+ lock_put and then could get reallocated and the
+ act of setting its locker links could clobber us.)
+ Perform "Release a Lock" above: skip locker_list_removal.
+
+ Acquire Locker bucket mutex.
+ Remove Locker
+ Release Locker mutex.
+
+ Acquire Memory mutex
+ Return Locker to free list
+ Release Memory mutex
+
+Deadlock detection (lock_detect):
+
+ For each bucket in Object table
+ Acquire the Object bucket mutex.
+ create waitsfor
+
+ For each bucket in Object table
+ Release the Object mutex.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+FAQ:
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Q: Why do you need generation numbers?
+A: If a lock has been released due to a transaction abort (potentially in a
+ different process), and then lock is released by a thread of control
+ unaware of the abort, the lock might have potentially been re-allocated
+ to a different object. The generation numbers detect this problem.
+
+ Note, we assume that reads/writes of lock generation numbers are atomic,
+ if they are not, it is theoretically possible that a re-allocated lock
+ could be mistaken for another lock.
+
+Q: Why is is safe to walk the Locker list without holding any mutexes at
+ all?
+A: Locks are created with both the Object and Locker bucket mutexes held.
+ Once created, they removed in two ways:
+
+ a) when a specific Lock is released, in which case, the Object and
+ Locker bucket mutexes are again held, and
+
+ b) when all Locks for a specific Locker Id is released.
+
+ In case b), the Locker bucket mutex is held while the Locker chain is
+ marked as "destroyed", which blocks any further access to the Locker
+ chain. Then, each individual Object bucket mutex is acquired when each
+ individual Lock is removed.
+
+Q: What are the implications of doing fine grain locking?
+
+A: Since we no longer globally lock the entire region, lock_vec will no
+ longer be atomic. We still execute the items in a lock_vec in order,
+ so things like lock-coupling still work, but you can't make any
+ guarantees about atomicity.
+
+Q: How do I configure for FINE_GRAIN locking?
+
+A: We currently do not support any automatic configuration for FINE_GRAIN
+ locking. When we do, will need to document that atomicity discussion
+ listed above (it is bug-report #553).
diff --git a/libdb/lock/lock.c b/libdb/lock/lock.c
new file mode 100644
index 0000000..66d4a2b
--- /dev/null
+++ b/libdb/lock/lock.c
@@ -0,0 +1,1874 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+static int __lock_checklocker __P((DB_LOCKTAB *,
+ struct __db_lock *, u_int32_t, u_int32_t));
+static void __lock_expires __P((DB_ENV *, db_timeval_t *, db_timeout_t));
+static void __lock_freelocker
+ __P((DB_LOCKTAB *, DB_LOCKREGION *, DB_LOCKER *, u_int32_t));
+static int __lock_get_internal __P((DB_LOCKTAB *, u_int32_t, u_int32_t,
+ const DBT *, db_lockmode_t, db_timeout_t, DB_LOCK *));
+static int __lock_getobj
+ __P((DB_LOCKTAB *, const DBT *, u_int32_t, int, DB_LOCKOBJ **));
+static int __lock_is_parent __P((DB_LOCKTAB *, u_int32_t, DB_LOCKER *));
+static int __lock_put_internal __P((DB_LOCKTAB *,
+ struct __db_lock *, u_int32_t, u_int32_t));
+static int __lock_put_nolock __P((DB_ENV *, DB_LOCK *, int *, u_int32_t));
+static void __lock_remove_waiter __P((DB_LOCKTAB *,
+ DB_LOCKOBJ *, struct __db_lock *, db_status_t));
+static int __lock_trade __P((DB_ENV *, DB_LOCK *, u_int32_t));
+
+static const char __db_lock_err[] = "Lock table is out of available %s";
+static const char __db_lock_invalid[] = "%s: Lock is no longer valid";
+static const char __db_locker_invalid[] = "Locker is not valid";
+
+/*
+ * __lock_id --
+ * Generate a unique locker id.
+ *
+ * PUBLIC: int __lock_id __P((DB_ENV *, u_int32_t *));
+ */
+int
+__lock_id(dbenv, idp)
+ DB_ENV *dbenv;
+ u_int32_t *idp;
+{
+ DB_LOCKER *lk;
+ DB_LOCKTAB *lt;
+ DB_LOCKREGION *region;
+ u_int32_t *ids, locker_ndx;
+ int nids, ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "DB_ENV->lock_id", DB_INIT_LOCK);
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+ ret = 0;
+
+ /*
+ * Allocate a new lock id. If we wrap around then we
+ * find the minimum currently in use and make sure we
+ * can stay below that. This code is similar to code
+ * in __txn_begin_int for recovering txn ids.
+ */
+ LOCKREGION(dbenv, lt);
+ /*
+ * Our current valid range can span the maximum valid value, so check
+ * for it and wrap manually.
+ */
+ if (region->stat.st_id == DB_LOCK_MAXID &&
+ region->stat.st_cur_maxid != DB_LOCK_MAXID)
+ region->stat.st_id = DB_LOCK_INVALIDID;
+ if (region->stat.st_id == region->stat.st_cur_maxid) {
+ if ((ret = __os_malloc(dbenv,
+ sizeof(u_int32_t) * region->stat.st_nlockers, &ids)) != 0)
+ goto err;
+ nids = 0;
+ for (lk = SH_TAILQ_FIRST(&region->lockers, __db_locker);
+ lk != NULL;
+ lk = SH_TAILQ_NEXT(lk, ulinks, __db_locker))
+ ids[nids++] = lk->id;
+ region->stat.st_id = DB_LOCK_INVALIDID;
+ region->stat.st_cur_maxid = DB_LOCK_MAXID;
+ if (nids != 0)
+ __db_idspace(ids, nids,
+ &region->stat.st_id, &region->stat.st_cur_maxid);
+ __os_free(dbenv, ids);
+ }
+ *idp = ++region->stat.st_id;
+
+ /* Allocate a locker for this id. */
+ LOCKER_LOCK(lt, region, *idp, locker_ndx);
+ ret = __lock_getlocker(lt, *idp, locker_ndx, 1, &lk);
+
+err: UNLOCKREGION(dbenv, lt);
+
+ return (ret);
+}
+
+/*
+ * __lock_id_free --
+ * Free a locker id.
+ *
+ * PUBLIC: int __lock_id_free __P((DB_ENV *, u_int32_t));
+ */
+int
+__lock_id_free(dbenv, id)
+ DB_ENV *dbenv;
+ u_int32_t id;
+{
+ DB_LOCKER *sh_locker;
+ DB_LOCKTAB *lt;
+ DB_LOCKREGION *region;
+ u_int32_t locker_ndx;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "DB_ENV->lock_id_free", DB_INIT_LOCK);
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+
+ LOCKREGION(dbenv, lt);
+ LOCKER_LOCK(lt, region, id, locker_ndx);
+ if ((ret =
+ __lock_getlocker(lt, id, locker_ndx, 0, &sh_locker)) != 0)
+ goto err;
+ if (sh_locker == NULL) {
+ ret = EINVAL;
+ goto err;
+ }
+
+ if (sh_locker->nlocks != 0) {
+ __db_err(dbenv, "Locker still has locks");
+ ret = EINVAL;
+ goto err;
+ }
+
+ __lock_freelocker(lt, region, sh_locker, locker_ndx);
+
+err: UNLOCKREGION(dbenv, lt);
+ return (ret);
+}
+
+/*
+ * __lock_vec --
+ * Vector lock routine. This function takes a set of operations
+ * and performs them all at once. In addition, lock_vec provides
+ * functionality for lock inheritance, releasing all locks for a
+ * given locker (used during transaction commit/abort), releasing
+ * all locks on a given object, and generating debugging information.
+ *
+ * PUBLIC: int __lock_vec __P((DB_ENV *,
+ * PUBLIC: u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **));
+ */
+int
+__lock_vec(dbenv, locker, flags, list, nlist, elistp)
+ DB_ENV *dbenv;
+ u_int32_t locker, flags;
+ int nlist;
+ DB_LOCKREQ *list, **elistp;
+{
+ struct __db_lock *lp, *next_lock;
+ DB_LOCK lock;
+ DB_LOCKER *sh_locker, *sh_parent;
+ DB_LOCKOBJ *obj, *sh_obj;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ u_int32_t lndx, ndx;
+ int did_abort, i, ret, run_dd, upgrade, writes;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "DB_ENV->lock_vec", DB_INIT_LOCK);
+
+ /* Check if locks have been globally turned off. */
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
+ return (0);
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB_ENV->lock_vec",
+ flags, DB_LOCK_FREE_LOCKER | DB_LOCK_NOWAIT)) != 0)
+ return (ret);
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+
+ run_dd = 0;
+ LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+ for (i = 0, ret = 0; i < nlist && ret == 0; i++)
+ switch (list[i].op) {
+ case DB_LOCK_GET_TIMEOUT:
+ LF_SET(DB_LOCK_SET_TIMEOUT);
+ case DB_LOCK_GET:
+ ret = __lock_get_internal(dbenv->lk_handle,
+ locker, flags, list[i].obj,
+ list[i].mode, list[i].timeout, &list[i].lock);
+ break;
+ case DB_LOCK_INHERIT:
+ /*
+ * Get the committing locker and mark it as deleted.
+ * This allows us to traverse the locker links without
+ * worrying that someone else is deleting locks out
+ * from under us. However, if the locker doesn't
+ * exist, that just means that the child holds no
+ * locks, so inheritance is easy!
+ */
+ LOCKER_LOCK(lt, region, locker, ndx);
+ if ((ret = __lock_getlocker(lt,
+ locker, ndx, 0, &sh_locker)) != 0 ||
+ sh_locker == NULL ||
+ F_ISSET(sh_locker, DB_LOCKER_DELETED)) {
+ if (ret == 0 && sh_locker != NULL)
+ ret = EINVAL;
+ __db_err(dbenv, __db_locker_invalid);
+ break;
+ }
+
+ /* Make sure we are a child transaction. */
+ if (sh_locker->parent_locker == INVALID_ROFF) {
+ __db_err(dbenv, "Not a child transaction");
+ ret = EINVAL;
+ break;
+ }
+ sh_parent = (DB_LOCKER *)
+ R_ADDR(&lt->reginfo, sh_locker->parent_locker);
+ F_SET(sh_locker, DB_LOCKER_DELETED);
+
+ /*
+ * Now, lock the parent locker; move locks from
+ * the committing list to the parent's list.
+ */
+ LOCKER_LOCK(lt, region, locker, ndx);
+ if (F_ISSET(sh_parent, DB_LOCKER_DELETED)) {
+ if (ret == 0) {
+ __db_err(dbenv,
+ "Parent locker is not valid");
+ ret = EINVAL;
+ }
+ break;
+ }
+
+ for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock);
+ lp != NULL;
+ lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock)) {
+ SH_LIST_REMOVE(lp, locker_links, __db_lock);
+ SH_LIST_INSERT_HEAD(&sh_parent->heldby, lp,
+ locker_links, __db_lock);
+ lp->holder = sh_parent->id;
+
+ /* Get the object associated with this lock. */
+ obj = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj);
+
+ (void)__lock_promote(lt, obj,
+ LF_ISSET(DB_LOCK_NOWAITERS));
+ }
+
+ /* Transfer child counts to parent. */
+ sh_parent->nlocks += sh_locker->nlocks;
+ sh_parent->nwrites += sh_locker->nwrites;
+
+ /* Now free the original locker. */
+ ret = __lock_checklocker(lt,
+ NULL, locker, DB_LOCK_IGNOREDEL);
+ break;
+ case DB_LOCK_PUT:
+ ret = __lock_put_nolock(dbenv,
+ &list[i].lock, &run_dd, flags);
+ break;
+ case DB_LOCK_PUT_ALL:
+ case DB_LOCK_PUT_READ:
+ case DB_LOCK_UPGRADE_WRITE:
+ /*
+ * Get the locker and mark it as deleted. This
+ * allows us to traverse the locker links without
+ * worrying that someone else is deleting locks out
+ * from under us. Since the locker may hold no
+ * locks (i.e., you could call abort before you've
+ * done any work), it's perfectly reasonable for there
+ * to be no locker; this is not an error.
+ */
+ LOCKER_LOCK(lt, region, locker, ndx);
+ if ((ret = __lock_getlocker(lt,
+ locker, ndx, 0, &sh_locker)) != 0 ||
+ sh_locker == NULL ||
+ F_ISSET(sh_locker, DB_LOCKER_DELETED))
+ /*
+ * If ret is set, then we'll generate an
+ * error. If it's not set, we have nothing
+ * to do.
+ */
+ break;
+ upgrade = 0;
+ writes = 1;
+ if (list[i].op == DB_LOCK_PUT_READ)
+ writes = 0;
+ else if (list[i].op == DB_LOCK_UPGRADE_WRITE) {
+ if (F_ISSET(sh_locker, DB_LOCKER_DIRTY))
+ upgrade = 1;
+ writes = 0;
+ }
+
+ F_SET(sh_locker, DB_LOCKER_DELETED);
+
+ /* Now traverse the locks, releasing each one. */
+ for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock);
+ lp != NULL;) {
+ sh_obj = (DB_LOCKOBJ *)
+ ((u_int8_t *)lp + lp->obj);
+ if (writes == 1 || lp->mode == DB_LOCK_READ) {
+ SH_LIST_REMOVE(lp,
+ locker_links, __db_lock);
+ sh_obj = (DB_LOCKOBJ *)
+ ((u_int8_t *)lp + lp->obj);
+ SHOBJECT_LOCK(lt, region, sh_obj, lndx);
+ /*
+ * We are not letting lock_put_internal
+ * unlink the lock, so we'll have to
+ * update counts here.
+ */
+ sh_locker->nlocks--;
+ if (IS_WRITELOCK(lp->mode))
+ sh_locker->nwrites--;
+ ret = __lock_put_internal(lt, lp,
+ lndx, DB_LOCK_FREE | DB_LOCK_DOALL);
+ if (ret != 0)
+ break;
+ lp = SH_LIST_FIRST(
+ &sh_locker->heldby, __db_lock);
+ } else
+ lp = SH_LIST_NEXT(lp,
+ locker_links, __db_lock);
+ }
+ switch (list[i].op) {
+ case DB_LOCK_UPGRADE_WRITE:
+ if (upgrade != 1)
+ goto up_done;
+ for (lp = SH_LIST_FIRST(
+ &sh_locker->heldby, __db_lock);
+ lp != NULL;
+ lp = SH_LIST_NEXT(lp,
+ locker_links, __db_lock)) {
+ if (ret != 0)
+ break;
+ lock.off = R_OFFSET(&lt->reginfo, lp);
+ lock.gen = lp->gen;
+ F_SET(sh_locker, DB_LOCKER_INABORT);
+ ret = __lock_get_internal(lt,
+ locker, DB_LOCK_UPGRADE,
+ NULL, DB_LOCK_WRITE, 0, &lock);
+ }
+ up_done:
+ /* FALL THROUGH */
+ case DB_LOCK_PUT_READ:
+ F_CLR(sh_locker, DB_LOCKER_DELETED);
+ break;
+
+ case DB_LOCK_PUT_ALL:
+ if (ret == 0)
+ ret = __lock_checklocker(lt,
+ NULL, locker, DB_LOCK_IGNOREDEL);
+ break;
+ default:
+ break;
+ }
+ break;
+ case DB_LOCK_PUT_OBJ:
+ /* Remove all the locks associated with an object. */
+ OBJECT_LOCK(lt, region, list[i].obj, ndx);
+ if ((ret = __lock_getobj(lt, list[i].obj,
+ ndx, 0, &sh_obj)) != 0 || sh_obj == NULL) {
+ if (ret == 0)
+ ret = EINVAL;
+ break;
+ }
+
+ /*
+ * Go through both waiters and holders. Don't bother
+ * to run promotion, because everyone is getting
+ * released. The processes waiting will still get
+ * awakened as their waiters are released.
+ */
+ for (lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock);
+ ret == 0 && lp != NULL;
+ lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock))
+ ret = __lock_put_internal(lt, lp, ndx,
+ DB_LOCK_UNLINK |
+ DB_LOCK_NOPROMOTE | DB_LOCK_DOALL);
+
+ /*
+ * On the last time around, the object will get
+ * reclaimed by __lock_put_internal, structure the
+ * loop carefully so we do not get bitten.
+ */
+ for (lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock);
+ ret == 0 && lp != NULL;
+ lp = next_lock) {
+ next_lock = SH_TAILQ_NEXT(lp, links, __db_lock);
+ ret = __lock_put_internal(lt, lp, ndx,
+ DB_LOCK_UNLINK |
+ DB_LOCK_NOPROMOTE | DB_LOCK_DOALL);
+ }
+ break;
+
+ case DB_LOCK_TIMEOUT:
+ ret = __lock_set_timeout(dbenv,
+ locker, 0, DB_SET_TXN_NOW);
+ region->need_dd = 1;
+ break;
+
+ case DB_LOCK_TRADE:
+ /*
+ * INTERNAL USE ONLY.
+ * Change the holder of the lock described in
+ * list[i].lock to the locker-id specified by
+ * the locker parameter.
+ */
+ /*
+ * You had better know what you're doing here.
+ * We are trading locker-id's on a lock to
+ * facilitate file locking on open DB handles.
+ * We do not do any conflict checking on this,
+ * so heaven help you if you use this flag under
+ * any other circumstances.
+ */
+ ret = __lock_trade(dbenv, &list[i].lock, locker);
+ break;
+#ifdef DEBUG
+ case DB_LOCK_DUMP:
+ /* Find the locker. */
+ LOCKER_LOCK(lt, region, locker, ndx);
+ if ((ret = __lock_getlocker(lt,
+ locker, ndx, 0, &sh_locker)) != 0 ||
+ sh_locker == NULL ||
+ F_ISSET(sh_locker, DB_LOCKER_DELETED))
+ break;
+
+ for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock);
+ lp != NULL;
+ lp = SH_LIST_NEXT(lp, locker_links, __db_lock)) {
+ __lock_printlock(lt, lp, 1);
+ }
+ break;
+#endif
+ default:
+ __db_err(dbenv,
+ "Invalid lock operation: %d", list[i].op);
+ ret = EINVAL;
+ break;
+ }
+
+ if (ret == 0 && region->need_dd && region->detect != DB_LOCK_NORUN)
+ run_dd = 1;
+ UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+
+ if (run_dd)
+ (void)dbenv->lock_detect(dbenv, 0, region->detect, &did_abort);
+
+ if (ret != 0 && elistp != NULL)
+ *elistp = &list[i - 1];
+
+ return (ret);
+}
+
+/*
+ * Lock acquisition routines. There are two library interfaces:
+ *
+ * __lock_get --
+ * original lock get interface that takes a locker id.
+ *
+ * All the work for lock_get (and for the GET option of lock_vec) is done
+ * inside of lock_get_internal.
+ *
+ * PUBLIC: int __lock_get __P((DB_ENV *,
+ * PUBLIC: u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *));
+ */
+int
+__lock_get(dbenv, locker, flags, obj, lock_mode, lock)
+ DB_ENV *dbenv;
+ u_int32_t locker, flags;
+ const DBT *obj;
+ db_lockmode_t lock_mode;
+ DB_LOCK *lock;
+{
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "DB_ENV->lock_get", DB_INIT_LOCK);
+
+ if (IS_RECOVERING(dbenv)) {
+ LOCK_INIT(*lock);
+ return (0);
+ }
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB_ENV->lock_get", flags,
+ DB_LOCK_NOWAIT | DB_LOCK_UPGRADE | DB_LOCK_SWITCH)) != 0)
+ return (ret);
+
+ LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+ ret = __lock_get_internal(dbenv->lk_handle,
+ locker, flags, obj, lock_mode, 0, lock);
+ UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+ return (ret);
+}
+
+static int
+__lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
+ DB_LOCKTAB *lt;
+ u_int32_t locker, flags;
+ const DBT *obj;
+ db_lockmode_t lock_mode;
+ db_timeout_t timeout;
+ DB_LOCK *lock;
+{
+ struct __db_lock *newl, *lp, *wwrite;
+ DB_ENV *dbenv;
+ DB_LOCKER *sh_locker;
+ DB_LOCKOBJ *sh_obj;
+ DB_LOCKREGION *region;
+ u_int32_t locker_ndx, obj_ndx;
+ int did_abort, ihold, on_locker_list, no_dd, ret;
+
+ dbenv = lt->dbenv;
+ region = lt->reginfo.primary;
+ on_locker_list = no_dd = ret = 0;
+
+ /* Check if locks have been globally turned off. */
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
+ return (0);
+
+ /*
+ * If we are not going to reuse this lock, initialize the offset to
+ * invalid so that if we fail it will not look like a valid lock.
+ */
+ if (!LF_ISSET(DB_LOCK_UPGRADE | DB_LOCK_SWITCH))
+ LOCK_INIT(*lock);
+
+ /* Check that the lock mode is valid. */
+ if ((u_int32_t)lock_mode >= region->stat.st_nmodes) {
+ __db_err(dbenv, "DB_ENV->lock_get: invalid lock mode %lu",
+ (u_long)lock_mode);
+ return (EINVAL);
+ }
+
+ /* Allocate a new lock. Optimize for the common case of a grant. */
+ region->stat.st_nrequests++;
+ if ((newl = SH_TAILQ_FIRST(&region->free_locks, __db_lock)) != NULL)
+ SH_TAILQ_REMOVE(&region->free_locks, newl, links, __db_lock);
+ if (newl == NULL) {
+ __db_err(dbenv, __db_lock_err, "locks");
+ return (ENOMEM);
+ }
+ if (++region->stat.st_nlocks > region->stat.st_maxnlocks)
+ region->stat.st_maxnlocks = region->stat.st_nlocks;
+
+ if (obj == NULL) {
+ DB_ASSERT(LOCK_ISSET(*lock));
+ lp = (struct __db_lock *)R_ADDR(&lt->reginfo, lock->off);
+ sh_obj = (DB_LOCKOBJ *) ((u_int8_t *)lp + lp->obj);
+ } else {
+ /* Allocate a shared memory new object. */
+ OBJECT_LOCK(lt, region, obj, lock->ndx);
+ if ((ret = __lock_getobj(lt, obj, lock->ndx, 1, &sh_obj)) != 0)
+ goto err;
+ }
+
+ /* Get the locker, we may need it to find our parent. */
+ LOCKER_LOCK(lt, region, locker, locker_ndx);
+ if ((ret = __lock_getlocker(lt, locker,
+ locker_ndx, locker > DB_LOCK_MAXID ? 1 : 0, &sh_locker)) != 0) {
+ /*
+ * XXX We cannot tell if we created the object or not,
+ * so we don't kow if we should free it or not.
+ */
+ goto err;
+ }
+
+ if (sh_locker == NULL) {
+ __db_err(dbenv, "Locker does not exist");
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Now we have a lock and an object and we need to see if we should
+ * grant the lock. We use a FIFO ordering so we can only grant a
+ * new lock if it does not conflict with anyone on the holders list
+ * OR anyone on the waiters list. The reason that we don't grant if
+ * there's a conflict is that this can lead to starvation (a writer
+ * waiting on a popularly read item will never be granted). The
+ * downside of this is that a waiting reader can prevent an upgrade
+ * from reader to writer, which is not uncommon.
+ *
+ * There is one exception to the no-conflict rule. If a lock is held
+ * by the requesting locker AND the new lock does not conflict with
+ * any other holders, then we grant the lock. The most common place
+ * this happens is when the holder has a WRITE lock and a READ lock
+ * request comes in for the same locker. If we do not grant the read
+ * lock, then we guarantee deadlock.
+ *
+ * In case of conflict, we put the new lock on the end of the waiters
+ * list, unless we are upgrading in which case the locker goes on the
+ * front of the list.
+ */
+ ihold = 0;
+ lp = NULL;
+ if (LF_ISSET(DB_LOCK_SWITCH))
+ goto put_lock;
+
+ wwrite = NULL;
+ for (lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock);
+ lp != NULL;
+ lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
+ if (locker == lp->holder) {
+ if (lp->mode == lock_mode &&
+ lp->status == DB_LSTAT_HELD) {
+ if (LF_ISSET(DB_LOCK_UPGRADE))
+ goto upgrade;
+
+ /*
+ * Lock is held, so we can increment the
+ * reference count and return this lock.
+ * We do not count reference increments
+ * towards the locks held by the locker.
+ */
+ lp->refcount++;
+ lock->off = R_OFFSET(&lt->reginfo, lp);
+ lock->gen = lp->gen;
+ lock->mode = lp->mode;
+
+ ret = 0;
+ goto done;
+ } else {
+ ihold = 1;
+ if (lock_mode == DB_LOCK_WRITE &&
+ lp->mode == DB_LOCK_WWRITE)
+ wwrite = lp;
+ }
+ } else if (__lock_is_parent(lt, lp->holder, sh_locker))
+ ihold = 1;
+ else if (CONFLICTS(lt, region, lp->mode, lock_mode))
+ break;
+ }
+
+ /*
+ * If we are looking to upgrade a WWRITE to a WRITE lock
+ * and there were no conflicting locks then we can just
+ * upgrade this lock to the one we want.
+ */
+ if (wwrite != NULL && lp == NULL) {
+ lp = wwrite;
+ lp->mode = lock_mode;
+ lp->refcount++;
+ lock->off = R_OFFSET(&lt->reginfo, lp);
+ lock->gen = lp->gen;
+ lock->mode = lp->mode;
+
+ ret = 0;
+ goto done;
+ }
+
+ /*
+ * Make the new lock point to the new object, initialize fields.
+ *
+ * This lock is not linked in anywhere, so we can muck with it
+ * without holding any mutexes.
+ */
+put_lock:
+ newl->holder = locker;
+ newl->refcount = 1;
+ newl->mode = lock_mode;
+ newl->obj = SH_PTR_TO_OFF(newl, sh_obj);
+ newl->status = DB_LSTAT_HELD;
+
+ /*
+ * If we are upgrading, then there are two scenarios. Either
+ * we had no conflicts, so we can do the upgrade. Or, there
+ * is a conflict and we should wait at the HEAD of the waiters
+ * list.
+ */
+ if (LF_ISSET(DB_LOCK_UPGRADE)) {
+ if (lp == NULL)
+ goto upgrade;
+
+ /*
+ * There was a conflict, wait. If this is the first waiter,
+ * add the object to the deadlock detector's list.
+ */
+ if (SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL)
+ SH_TAILQ_INSERT_HEAD(&region->dd_objs,
+ sh_obj, dd_links, __db_lockobj);
+
+ SH_TAILQ_INSERT_HEAD(&sh_obj->waiters, newl, links, __db_lock);
+ goto llist;
+ }
+
+ if (lp == NULL && !ihold)
+ for (lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock);
+ lp != NULL;
+ lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
+ if (CONFLICTS(lt, region, lp->mode, lock_mode) &&
+ locker != lp->holder)
+ break;
+ }
+ if (!LF_ISSET(DB_LOCK_SWITCH) && lp == NULL)
+ SH_TAILQ_INSERT_TAIL(&sh_obj->holders, newl, links);
+ else if (!LF_ISSET(DB_LOCK_NOWAIT)) {
+ /*
+ * If this is the first waiter, add the object to the
+ * deadlock detector's list.
+ */
+ if (SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL)
+ SH_TAILQ_INSERT_HEAD(&region->dd_objs,
+ sh_obj, dd_links, __db_lockobj);
+ SH_TAILQ_INSERT_TAIL(&sh_obj->waiters, newl, links);
+ } else {
+ ret = DB_LOCK_NOTGRANTED;
+ if (SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL &&
+ LF_ISSET(DB_LOCK_FREE_LOCKER))
+ __lock_freelocker(lt, region, sh_locker, locker_ndx);
+ region->stat.st_nnowaits++;
+ goto err;
+ }
+
+llist:
+ /*
+ * Now, insert the lock onto its locker's list. If the locker does
+ * not currently hold any locks, there's no reason to run a deadlock
+ * detector, save that information.
+ */
+ on_locker_list = 1;
+ no_dd = sh_locker->master_locker == INVALID_ROFF &&
+ SH_LIST_FIRST(&sh_locker->child_locker, __db_locker) == NULL &&
+ SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL;
+
+ SH_LIST_INSERT_HEAD(&sh_locker->heldby, newl, locker_links, __db_lock);
+
+ if (LF_ISSET(DB_LOCK_SWITCH) || lp != NULL) {
+ if (LF_ISSET(DB_LOCK_SWITCH) &&
+ (ret = __lock_put_nolock(dbenv,
+ lock, &ihold, DB_LOCK_NOWAITERS)) != 0)
+ goto err;
+ /*
+ * This is really a blocker for the thread. It should be
+ * initialized locked, so that when we try to acquire it, we
+ * block.
+ */
+ newl->status = DB_LSTAT_WAITING;
+ region->stat.st_nconflicts++;
+ region->need_dd = 1;
+ /*
+ * First check to see if this txn has expired.
+ * If not then see if the lock timeout is past
+ * the expiration of the txn, if it is, use
+ * the txn expiration time. lk_expire is passed
+ * to avoid an extra call to get the time.
+ */
+ if (__lock_expired(dbenv,
+ &sh_locker->lk_expire, &sh_locker->tx_expire)) {
+ newl->status = DB_LSTAT_ABORTED;
+ region->stat.st_ndeadlocks++;
+ region->stat.st_ntxntimeouts++;
+
+ /*
+ * Remove the lock from the wait queue and if
+ * this was the only lock on the wait queue remove
+ * this object from the deadlock detector object
+ * list.
+ */
+ SH_LIST_REMOVE(newl, locker_links, __db_lock);
+ SH_TAILQ_REMOVE(
+ &sh_obj->waiters, newl, links, __db_lock);
+ if (SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL)
+ SH_TAILQ_REMOVE(&region->dd_objs,
+ sh_obj, dd_links, __db_lockobj);
+
+ /* Clear the timeout, we are done. */
+ LOCK_SET_TIME_INVALID(&sh_locker->tx_expire);
+ goto expired;
+ }
+
+ /*
+ * If a timeout was specified in this call then it
+ * takes priority. If a lock timeout has been specified
+ * for this transaction then use that, otherwise use
+ * the global timeout value.
+ */
+ if (!LF_ISSET(DB_LOCK_SET_TIMEOUT)) {
+ if (F_ISSET(sh_locker, DB_LOCKER_TIMEOUT))
+ timeout = sh_locker->lk_timeout;
+ else
+ timeout = region->lk_timeout;
+ }
+ if (timeout != 0)
+ __lock_expires(dbenv, &sh_locker->lk_expire, timeout);
+ else
+ LOCK_SET_TIME_INVALID(&sh_locker->lk_expire);
+
+ if (LOCK_TIME_ISVALID(&sh_locker->tx_expire) &&
+ (timeout == 0 || __lock_expired(dbenv,
+ &sh_locker->lk_expire, &sh_locker->tx_expire)))
+ sh_locker->lk_expire = sh_locker->tx_expire;
+ UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+
+ /*
+ * We are about to wait; before waiting, see if the deadlock
+ * detector should be run.
+ */
+ if (region->detect != DB_LOCK_NORUN && !no_dd)
+ (void)dbenv->lock_detect(
+ dbenv, 0, region->detect, &did_abort);
+
+ MUTEX_LOCK(dbenv, &newl->mutex);
+ LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+
+expired: /* Turn off lock timeout. */
+ LOCK_SET_TIME_INVALID(&sh_locker->lk_expire);
+
+ if (newl->status != DB_LSTAT_PENDING) {
+ (void)__lock_checklocker(lt, newl, newl->holder, 0);
+ switch (newl->status) {
+ case DB_LSTAT_ABORTED:
+ on_locker_list = 0;
+ ret = DB_LOCK_DEADLOCK;
+ break;
+ case DB_LSTAT_NOTEXIST:
+ ret = DB_LOCK_NOTEXIST;
+ break;
+ case DB_LSTAT_EXPIRED:
+ SHOBJECT_LOCK(lt,
+ region, sh_obj, obj_ndx);
+ if ((ret = __lock_put_internal(
+ lt, newl, obj_ndx, 0) != 0))
+ goto err;
+ if (LOCK_TIME_EQUAL(
+ &sh_locker->lk_expire,
+ &sh_locker->tx_expire)) {
+ region->stat.st_ndeadlocks++;
+ region->stat.st_ntxntimeouts++;
+ return (DB_LOCK_DEADLOCK);
+ } else {
+ region->stat.st_nlocktimeouts++;
+ return (DB_LOCK_NOTGRANTED);
+ }
+ default:
+ ret = EINVAL;
+ break;
+ }
+ goto err;
+ } else if (LF_ISSET(DB_LOCK_UPGRADE)) {
+ /*
+ * The lock that was just granted got put on the
+ * holders list. Since we're upgrading some other
+ * lock, we've got to remove it here.
+ */
+ SH_TAILQ_REMOVE(
+ &sh_obj->holders, newl, links, __db_lock);
+ /*
+ * Ensure that the object is not believed to be on
+ * the object's lists, if we're traversing by locker.
+ */
+ newl->links.stqe_prev = -1;
+ goto upgrade;
+ } else
+ newl->status = DB_LSTAT_HELD;
+ }
+
+ lock->off = R_OFFSET(&lt->reginfo, newl);
+ lock->gen = newl->gen;
+ lock->mode = newl->mode;
+ sh_locker->nlocks++;
+ if (IS_WRITELOCK(newl->mode))
+ sh_locker->nwrites++;
+
+ return (0);
+
+upgrade:/*
+ * This was an upgrade, so return the new lock to the free list and
+ * upgrade the mode of the original lock.
+ */
+ lp = (struct __db_lock *)R_ADDR(&lt->reginfo, lock->off);
+ if (IS_WRITELOCK(lock_mode) && !IS_WRITELOCK(lp->mode))
+ sh_locker->nwrites++;
+ lp->mode = lock_mode;
+
+ ret = 0;
+ /* FALLTHROUGH */
+
+done:
+err: newl->status = DB_LSTAT_FREE;
+ region->stat.st_nlocks--;
+ if (on_locker_list) {
+ SH_LIST_REMOVE(newl, locker_links, __db_lock);
+ }
+ SH_TAILQ_INSERT_HEAD(&region->free_locks, newl, links, __db_lock);
+ return (ret);
+}
+
+/*
+ * Lock release routines.
+ *
+ * The user callable one is lock_put and the three we use internally are
+ * __lock_put_nolock, __lock_put_internal and __lock_downgrade.
+ *
+ * PUBLIC: int __lock_put __P((DB_ENV *, DB_LOCK *));
+ */
+int
+__lock_put(dbenv, lock)
+ DB_ENV *dbenv;
+ DB_LOCK *lock;
+{
+ DB_LOCKTAB *lt;
+ int ret, run_dd;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "DB_LOCK->lock_put", DB_INIT_LOCK);
+
+ if (IS_RECOVERING(dbenv))
+ return (0);
+
+ lt = dbenv->lk_handle;
+
+ LOCKREGION(dbenv, lt);
+ ret = __lock_put_nolock(dbenv, lock, &run_dd, 0);
+ UNLOCKREGION(dbenv, lt);
+
+ /*
+ * Only run the lock detector if put told us to AND we are running
+ * in auto-detect mode. If we are not running in auto-detect, then
+ * a call to lock_detect here will 0 the need_dd bit, but will not
+ * actually abort anything.
+ */
+ if (ret == 0 && run_dd)
+ (void)dbenv->lock_detect(dbenv, 0,
+ ((DB_LOCKREGION *)lt->reginfo.primary)->detect, NULL);
+ return (ret);
+}
+
+static int
+__lock_put_nolock(dbenv, lock, runp, flags)
+ DB_ENV *dbenv;
+ DB_LOCK *lock;
+ int *runp;
+ u_int32_t flags;
+{
+ struct __db_lock *lockp;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ int ret;
+
+ /* Check if locks have been globally turned off. */
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
+ return (0);
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+
+ lockp = (struct __db_lock *)R_ADDR(&lt->reginfo, lock->off);
+ LOCK_INIT(*lock);
+ if (lock->gen != lockp->gen) {
+ __db_err(dbenv, __db_lock_invalid, "DB_LOCK->lock_put");
+ return (EINVAL);
+ }
+
+ ret = __lock_put_internal(lt,
+ lockp, lock->ndx, flags | DB_LOCK_UNLINK | DB_LOCK_FREE);
+
+ *runp = 0;
+ if (ret == 0 && region->need_dd && region->detect != DB_LOCK_NORUN)
+ *runp = 1;
+
+ return (ret);
+}
+
+/*
+ * __lock_downgrade --
+ * Used to downgrade locks. Currently this is used in two places,
+ * 1) by the concurrent access product to downgrade write locks
+ * back to iwrite locks and 2) to downgrade write-handle locks to read-handle
+ * locks at the end of an open/create.
+ *
+ * PUBLIC: int __lock_downgrade __P((DB_ENV *,
+ * PUBLIC: DB_LOCK *, db_lockmode_t, u_int32_t));
+ */
+int
+__lock_downgrade(dbenv, lock, new_mode, flags)
+ DB_ENV *dbenv;
+ DB_LOCK *lock;
+ db_lockmode_t new_mode;
+ u_int32_t flags;
+{
+ struct __db_lock *lockp;
+ DB_LOCKER *sh_locker;
+ DB_LOCKOBJ *obj;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ u_int32_t indx;
+ int ret;
+
+ COMPQUIET(flags, 0);
+
+ PANIC_CHECK(dbenv);
+ ret = 0;
+
+ /* Check if locks have been globally turned off. */
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
+ return (0);
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+
+ LOCKREGION(dbenv, lt);
+
+ lockp = (struct __db_lock *)R_ADDR(&lt->reginfo, lock->off);
+ if (lock->gen != lockp->gen) {
+ __db_err(dbenv, __db_lock_invalid, "lock_downgrade");
+ ret = EINVAL;
+ goto out;
+ }
+
+ LOCKER_LOCK(lt, region, lockp->holder, indx);
+
+ if ((ret = __lock_getlocker(lt, lockp->holder,
+ indx, 0, &sh_locker)) != 0 || sh_locker == NULL) {
+ if (ret == 0)
+ ret = EINVAL;
+ __db_err(dbenv, __db_locker_invalid);
+ goto out;
+ }
+ if (IS_WRITELOCK(lockp->mode) && !IS_WRITELOCK(new_mode))
+ sh_locker->nwrites--;
+
+ if (new_mode == DB_LOCK_WWRITE)
+ F_SET(sh_locker, DB_LOCKER_DIRTY);
+
+ lockp->mode = new_mode;
+
+ /* Get the object associated with this lock. */
+ obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj);
+ (void)__lock_promote(lt, obj, LF_ISSET(DB_LOCK_NOWAITERS));
+
+out: UNLOCKREGION(dbenv, lt);
+
+ return (ret);
+}
+
+static int
+__lock_put_internal(lt, lockp, obj_ndx, flags)
+ DB_LOCKTAB *lt;
+ struct __db_lock *lockp;
+ u_int32_t obj_ndx, flags;
+{
+ DB_LOCKOBJ *sh_obj;
+ DB_LOCKREGION *region;
+ int ret, state_changed;
+
+ region = lt->reginfo.primary;
+ ret = state_changed = 0;
+
+ if (!OBJ_LINKS_VALID(lockp)) {
+ /*
+ * Someone removed this lock while we were doing a release
+ * by locker id. We are trying to free this lock, but it's
+ * already been done; all we need to do is return it to the
+ * free list.
+ */
+ lockp->status = DB_LSTAT_FREE;
+ SH_TAILQ_INSERT_HEAD(
+ &region->free_locks, lockp, links, __db_lock);
+ region->stat.st_nlocks--;
+ return (0);
+ }
+
+ if (LF_ISSET(DB_LOCK_DOALL))
+ region->stat.st_nreleases += lockp->refcount;
+ else
+ region->stat.st_nreleases++;
+
+ if (!LF_ISSET(DB_LOCK_DOALL) && lockp->refcount > 1) {
+ lockp->refcount--;
+ return (0);
+ }
+
+ /* Increment generation number. */
+ lockp->gen++;
+
+ /* Get the object associated with this lock. */
+ sh_obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj);
+
+ /* Remove this lock from its holders/waitlist. */
+ if (lockp->status != DB_LSTAT_HELD && lockp->status != DB_LSTAT_PENDING)
+ __lock_remove_waiter(lt, sh_obj, lockp, DB_LSTAT_FREE);
+ else {
+ SH_TAILQ_REMOVE(&sh_obj->holders, lockp, links, __db_lock);
+ lockp->links.stqe_prev = -1;
+ }
+
+ if (LF_ISSET(DB_LOCK_NOPROMOTE))
+ state_changed = 0;
+ else
+ state_changed = __lock_promote(lt,
+ sh_obj, LF_ISSET(DB_LOCK_REMOVE | DB_LOCK_NOWAITERS));
+
+ if (LF_ISSET(DB_LOCK_UNLINK))
+ ret = __lock_checklocker(lt, lockp, lockp->holder, flags);
+
+ /* Check if object should be reclaimed. */
+ if (SH_TAILQ_FIRST(&sh_obj->holders, __db_lock) == NULL &&
+ SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL) {
+ HASHREMOVE_EL(lt->obj_tab,
+ obj_ndx, __db_lockobj, links, sh_obj);
+ if (sh_obj->lockobj.size > sizeof(sh_obj->objdata))
+ __db_shalloc_free(lt->reginfo.addr,
+ SH_DBT_PTR(&sh_obj->lockobj));
+ SH_TAILQ_INSERT_HEAD(
+ &region->free_objs, sh_obj, links, __db_lockobj);
+ region->stat.st_nobjects--;
+ state_changed = 1;
+ }
+
+ /* Free lock. */
+ if (!LF_ISSET(DB_LOCK_UNLINK) && LF_ISSET(DB_LOCK_FREE)) {
+ lockp->status = DB_LSTAT_FREE;
+ SH_TAILQ_INSERT_HEAD(
+ &region->free_locks, lockp, links, __db_lock);
+ region->stat.st_nlocks--;
+ }
+
+ /*
+ * If we did not promote anyone; we need to run the deadlock
+ * detector again.
+ */
+ if (state_changed == 0)
+ region->need_dd = 1;
+
+ return (ret);
+}
+
+/*
+ * Utility functions; listed alphabetically.
+ */
+
+/*
+ * __lock_checklocker --
+ * If a locker has no more locks, then we can free the object.
+ * Return a boolean indicating whether we freed the object or not.
+ *
+ * Must be called without the locker's lock set.
+ */
+static int
+__lock_checklocker(lt, lockp, locker, flags)
+ DB_LOCKTAB *lt;
+ struct __db_lock *lockp;
+ u_int32_t locker, flags;
+{
+ DB_ENV *dbenv;
+ DB_LOCKER *sh_locker;
+ DB_LOCKREGION *region;
+ u_int32_t indx;
+ int ret;
+
+ dbenv = lt->dbenv;
+ region = lt->reginfo.primary;
+ ret = 0;
+
+ LOCKER_LOCK(lt, region, locker, indx);
+
+ /* If the locker's list is NULL, free up the locker. */
+ if ((ret = __lock_getlocker(lt,
+ locker, indx, 0, &sh_locker)) != 0 || sh_locker == NULL) {
+ if (ret == 0)
+ ret = EINVAL;
+ __db_err(dbenv, __db_locker_invalid);
+ goto freelock;
+ }
+
+ if (F_ISSET(sh_locker, DB_LOCKER_DELETED)) {
+ LF_CLR(DB_LOCK_FREE);
+ if (!LF_ISSET(DB_LOCK_IGNOREDEL))
+ goto freelock;
+ }
+
+ if (LF_ISSET(DB_LOCK_UNLINK)) {
+ SH_LIST_REMOVE(lockp, locker_links, __db_lock);
+ if (lockp->status == DB_LSTAT_HELD) {
+ sh_locker->nlocks--;
+ if (IS_WRITELOCK(lockp->mode))
+ sh_locker->nwrites--;
+ }
+ }
+
+ if (SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL &&
+ LF_ISSET(DB_LOCK_FREE_LOCKER))
+ __lock_freelocker( lt, region, sh_locker, indx);
+
+freelock:
+ if (LF_ISSET(DB_LOCK_FREE)) {
+ lockp->status = DB_LSTAT_FREE;
+ SH_TAILQ_INSERT_HEAD(
+ &region->free_locks, lockp, links, __db_lock);
+ region->stat.st_nlocks--;
+ }
+
+ return (ret);
+}
+
+/*
+ * __lock_addfamilylocker
+ * Put a locker entry in for a child transaction.
+ *
+ * PUBLIC: int __lock_addfamilylocker __P((DB_ENV *, u_int32_t, u_int32_t));
+ */
+int
+__lock_addfamilylocker(dbenv, pid, id)
+ DB_ENV *dbenv;
+ u_int32_t pid, id;
+{
+ DB_LOCKER *lockerp, *mlockerp;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ u_int32_t ndx;
+ int ret;
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+ LOCKREGION(dbenv, lt);
+
+ /* get/create the parent locker info */
+ LOCKER_LOCK(lt, region, pid, ndx);
+ if ((ret = __lock_getlocker(dbenv->lk_handle,
+ pid, ndx, 1, &mlockerp)) != 0)
+ goto err;
+
+ /*
+ * We assume that only one thread can manipulate
+ * a single transaction family.
+ * Therefore the master locker cannot go away while
+ * we manipulate it, nor can another child in the
+ * family be created at the same time.
+ */
+ LOCKER_LOCK(lt, region, id, ndx);
+ if ((ret = __lock_getlocker(dbenv->lk_handle,
+ id, ndx, 1, &lockerp)) != 0)
+ goto err;
+
+ /* Point to our parent. */
+ lockerp->parent_locker = R_OFFSET(&lt->reginfo, mlockerp);
+
+ /* See if this locker is the family master. */
+ if (mlockerp->master_locker == INVALID_ROFF)
+ lockerp->master_locker = R_OFFSET(&lt->reginfo, mlockerp);
+ else {
+ lockerp->master_locker = mlockerp->master_locker;
+ mlockerp = R_ADDR(&lt->reginfo, mlockerp->master_locker);
+ }
+
+ /*
+ * Link the child at the head of the master's list.
+ * The guess is when looking for deadlock that
+ * the most recent child is the one thats blocked.
+ */
+ SH_LIST_INSERT_HEAD(
+ &mlockerp->child_locker, lockerp, child_link, __db_locker);
+
+err:
+ UNLOCKREGION(dbenv, lt);
+
+ return (ret);
+}
+
+/*
+ * __lock_freefamilylocker
+ * Remove a locker from the hash table and its family.
+ *
+ * This must be called without the locker bucket locked.
+ *
+ * PUBLIC: int __lock_freefamilylocker __P((DB_LOCKTAB *, u_int32_t));
+ */
+int
+__lock_freefamilylocker(lt, locker)
+ DB_LOCKTAB *lt;
+ u_int32_t locker;
+{
+ DB_ENV *dbenv;
+ DB_LOCKER *sh_locker;
+ DB_LOCKREGION *region;
+ u_int32_t indx;
+ int ret;
+
+ dbenv = lt->dbenv;
+ region = lt->reginfo.primary;
+
+ LOCKREGION(dbenv, lt);
+ LOCKER_LOCK(lt, region, locker, indx);
+
+ if ((ret = __lock_getlocker(lt,
+ locker, indx, 0, &sh_locker)) != 0 || sh_locker == NULL)
+ goto freelock;
+
+ if (SH_LIST_FIRST(&sh_locker->heldby, __db_lock) != NULL) {
+ ret = EINVAL;
+ __db_err(dbenv, "Freeing locker with locks");
+ goto freelock;
+ }
+
+ /* If this is part of a family, we must fix up its links. */
+ if (sh_locker->master_locker != INVALID_ROFF)
+ SH_LIST_REMOVE(sh_locker, child_link, __db_locker);
+
+ __lock_freelocker(lt, region, sh_locker, indx);
+
+freelock:
+ UNLOCKREGION(dbenv, lt);
+ return (ret);
+}
+
+/*
+ * __lock_freelocker
+ * common code for deleting a locker.
+ *
+ * This must be called with the locker bucket locked.
+ */
+static void
+__lock_freelocker(lt, region, sh_locker, indx)
+ DB_LOCKTAB *lt;
+ DB_LOCKREGION *region;
+ DB_LOCKER *sh_locker;
+ u_int32_t indx;
+
+{
+ HASHREMOVE_EL(
+ lt->locker_tab, indx, __db_locker, links, sh_locker);
+ SH_TAILQ_INSERT_HEAD(
+ &region->free_lockers, sh_locker, links, __db_locker);
+ SH_TAILQ_REMOVE(&region->lockers, sh_locker, ulinks, __db_locker);
+ region->stat.st_nlockers--;
+}
+
+/*
+ * __lock_set_timeout
+ * -- set timeout values in shared memory.
+ * This is called from the transaction system.
+ * We either set the time that this tranaction expires or the
+ * amount of time that a lock for this transaction is permitted
+ * to wait.
+ *
+ * PUBLIC: int __lock_set_timeout __P(( DB_ENV *,
+ * PUBLIC: u_int32_t, db_timeout_t, u_int32_t));
+ */
+int
+__lock_set_timeout(dbenv, locker, timeout, op)
+ DB_ENV *dbenv;
+ u_int32_t locker;
+ db_timeout_t timeout;
+ u_int32_t op;
+{
+ DB_LOCKER *sh_locker;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ u_int32_t locker_ndx;
+ int ret;
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+ LOCKREGION(dbenv, lt);
+
+ LOCKER_LOCK(lt, region, locker, locker_ndx);
+ ret = __lock_getlocker(lt, locker, locker_ndx, 1, &sh_locker);
+ UNLOCKREGION(dbenv, lt);
+ if (ret != 0)
+ return (ret);
+
+ if (op == DB_SET_TXN_TIMEOUT) {
+ if (timeout == 0)
+ LOCK_SET_TIME_INVALID(&sh_locker->tx_expire);
+ else
+ __lock_expires(dbenv, &sh_locker->tx_expire, timeout);
+ } else if (op == DB_SET_LOCK_TIMEOUT) {
+ sh_locker->lk_timeout = timeout;
+ F_SET(sh_locker, DB_LOCKER_TIMEOUT);
+ } else if (op == DB_SET_TXN_NOW) {
+ LOCK_SET_TIME_INVALID(&sh_locker->tx_expire);
+ __lock_expires(dbenv, &sh_locker->tx_expire, 0);
+ sh_locker->lk_expire = sh_locker->tx_expire;
+ } else
+ return (EINVAL);
+
+ return (0);
+}
+
+/*
+ * __lock_inherit_timeout
+ * -- inherit timeout values from parent locker.
+ * This is called from the transaction system. This will
+ * return EINVAL if the parent does not exist or did not
+ * have a current txn timeout set.
+ *
+ * PUBLIC: int __lock_inherit_timeout __P(( DB_ENV *, u_int32_t, u_int32_t));
+ */
+int
+__lock_inherit_timeout(dbenv, parent, locker)
+ DB_ENV *dbenv;
+ u_int32_t parent, locker;
+{
+ DB_LOCKER *parent_locker, *sh_locker;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ u_int32_t locker_ndx;
+ int ret;
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+ ret = 0;
+ LOCKREGION(dbenv, lt);
+
+ /* If the parent does not exist, we are done. */
+ LOCKER_LOCK(lt, region, parent, locker_ndx);
+ if ((ret = __lock_getlocker(lt,
+ parent, locker_ndx, 0, &parent_locker)) != 0)
+ goto err;
+
+ /*
+ * If the parent is not there yet, thats ok. If it
+ * does not have any timouts set, then avoid creating
+ * the child locker at this point.
+ */
+ if (parent_locker == NULL ||
+ (LOCK_TIME_ISVALID(&parent_locker->tx_expire) &&
+ !F_ISSET(parent_locker, DB_LOCKER_TIMEOUT))) {
+ ret = EINVAL;
+ goto done;
+ }
+
+ LOCKER_LOCK(lt, region, locker, locker_ndx);
+ if ((ret = __lock_getlocker(lt,
+ locker, locker_ndx, 1, &sh_locker)) != 0)
+ goto err;
+
+ sh_locker->tx_expire = parent_locker->tx_expire;
+
+ if (F_ISSET(parent_locker, DB_LOCKER_TIMEOUT)) {
+ sh_locker->lk_timeout = parent_locker->lk_timeout;
+ F_SET(sh_locker, DB_LOCKER_TIMEOUT);
+ if (!LOCK_TIME_ISVALID(&parent_locker->tx_expire))
+ ret = EINVAL;
+ }
+
+done:
+err:
+ UNLOCKREGION(dbenv, lt);
+ return (ret);
+}
+
+/*
+ * __lock_getlocker --
+ * Get a locker in the locker hash table. The create parameter
+ * indicates if the locker should be created if it doesn't exist in
+ * the table.
+ *
+ * This must be called with the locker bucket locked.
+ *
+ * PUBLIC: int __lock_getlocker __P((DB_LOCKTAB *,
+ * PUBLIC: u_int32_t, u_int32_t, int, DB_LOCKER **));
+ */
+int
+__lock_getlocker(lt, locker, indx, create, retp)
+ DB_LOCKTAB *lt;
+ u_int32_t locker, indx;
+ int create;
+ DB_LOCKER **retp;
+{
+ DB_ENV *dbenv;
+ DB_LOCKER *sh_locker;
+ DB_LOCKREGION *region;
+
+ dbenv = lt->dbenv;
+ region = lt->reginfo.primary;
+
+ HASHLOOKUP(lt->locker_tab,
+ indx, __db_locker, links, locker, sh_locker, __lock_locker_cmp);
+
+ /*
+ * If we found the locker, then we can just return it. If
+ * we didn't find the locker, then we need to create it.
+ */
+ if (sh_locker == NULL && create) {
+ /* Create new locker and then insert it into hash table. */
+ if ((sh_locker = SH_TAILQ_FIRST(
+ &region->free_lockers, __db_locker)) == NULL) {
+ __db_err(dbenv, __db_lock_err, "locker entries");
+ return (ENOMEM);
+ }
+ SH_TAILQ_REMOVE(
+ &region->free_lockers, sh_locker, links, __db_locker);
+ if (++region->stat.st_nlockers > region->stat.st_maxnlockers)
+ region->stat.st_maxnlockers = region->stat.st_nlockers;
+
+ sh_locker->id = locker;
+ sh_locker->dd_id = 0;
+ sh_locker->master_locker = INVALID_ROFF;
+ sh_locker->parent_locker = INVALID_ROFF;
+ SH_LIST_INIT(&sh_locker->child_locker);
+ sh_locker->flags = 0;
+ SH_LIST_INIT(&sh_locker->heldby);
+ sh_locker->nlocks = 0;
+ sh_locker->nwrites = 0;
+ sh_locker->lk_timeout = 0;
+ LOCK_SET_TIME_INVALID(&sh_locker->tx_expire);
+ if (locker < TXN_MINIMUM && region->tx_timeout != 0)
+ __lock_expires(dbenv,
+ &sh_locker->tx_expire, region->tx_timeout);
+ LOCK_SET_TIME_INVALID(&sh_locker->lk_expire);
+
+ HASHINSERT(lt->locker_tab, indx, __db_locker, links, sh_locker);
+ SH_TAILQ_INSERT_HEAD(&region->lockers,
+ sh_locker, ulinks, __db_locker);
+ }
+
+ *retp = sh_locker;
+ return (0);
+}
+
+/*
+ * __lock_getobj --
+ * Get an object in the object hash table. The create parameter
+ * indicates if the object should be created if it doesn't exist in
+ * the table.
+ *
+ * This must be called with the object bucket locked.
+ */
+static int
+__lock_getobj(lt, obj, ndx, create, retp)
+ DB_LOCKTAB *lt;
+ const DBT *obj;
+ u_int32_t ndx;
+ int create;
+ DB_LOCKOBJ **retp;
+{
+ DB_ENV *dbenv;
+ DB_LOCKOBJ *sh_obj;
+ DB_LOCKREGION *region;
+ int ret;
+ void *p;
+
+ dbenv = lt->dbenv;
+ region = lt->reginfo.primary;
+
+ /* Look up the object in the hash table. */
+ HASHLOOKUP(lt->obj_tab,
+ ndx, __db_lockobj, links, obj, sh_obj, __lock_cmp);
+
+ /*
+ * If we found the object, then we can just return it. If
+ * we didn't find the object, then we need to create it.
+ */
+ if (sh_obj == NULL && create) {
+ /* Create new object and then insert it into hash table. */
+ if ((sh_obj =
+ SH_TAILQ_FIRST(&region->free_objs, __db_lockobj)) == NULL) {
+ __db_err(lt->dbenv, __db_lock_err, "object entries");
+ ret = ENOMEM;
+ goto err;
+ }
+
+ /*
+ * If we can fit this object in the structure, do so instead
+ * of shalloc-ing space for it.
+ */
+ if (obj->size <= sizeof(sh_obj->objdata))
+ p = sh_obj->objdata;
+ else if ((ret = __db_shalloc(
+ lt->reginfo.addr, obj->size, 0, &p)) != 0) {
+ __db_err(dbenv, "No space for lock object storage");
+ goto err;
+ }
+
+ memcpy(p, obj->data, obj->size);
+
+ SH_TAILQ_REMOVE(
+ &region->free_objs, sh_obj, links, __db_lockobj);
+ if (++region->stat.st_nobjects > region->stat.st_maxnobjects)
+ region->stat.st_maxnobjects = region->stat.st_nobjects;
+
+ SH_TAILQ_INIT(&sh_obj->waiters);
+ SH_TAILQ_INIT(&sh_obj->holders);
+ sh_obj->lockobj.size = obj->size;
+ sh_obj->lockobj.off = SH_PTR_TO_OFF(&sh_obj->lockobj, p);
+
+ HASHINSERT(lt->obj_tab, ndx, __db_lockobj, links, sh_obj);
+ }
+
+ *retp = sh_obj;
+ return (0);
+
+err: return (ret);
+}
+
+/*
+ * __lock_is_parent --
+ * Given a locker and a transaction, return 1 if the locker is
+ * an ancestor of the designcated transaction. This is used to determine
+ * if we should grant locks that appear to conflict, but don't because
+ * the lock is already held by an ancestor.
+ */
+static int
+__lock_is_parent(lt, locker, sh_locker)
+ DB_LOCKTAB *lt;
+ u_int32_t locker;
+ DB_LOCKER *sh_locker;
+{
+ DB_LOCKER *parent;
+
+ parent = sh_locker;
+ while (parent->parent_locker != INVALID_ROFF) {
+ parent = (DB_LOCKER *)
+ R_ADDR(&lt->reginfo, parent->parent_locker);
+ if (parent->id == locker)
+ return (1);
+ }
+
+ return (0);
+}
+
+/*
+ * __lock_promote --
+ *
+ * Look through the waiters and holders lists and decide which (if any)
+ * locks can be promoted. Promote any that are eligible.
+ *
+ * PUBLIC: int __lock_promote __P((DB_LOCKTAB *, DB_LOCKOBJ *, u_int32_t));
+ */
+int
+__lock_promote(lt, obj, flags)
+ DB_LOCKTAB *lt;
+ DB_LOCKOBJ *obj;
+ u_int32_t flags;
+{
+ struct __db_lock *lp_w, *lp_h, *next_waiter;
+ DB_LOCKER *sh_locker;
+ DB_LOCKREGION *region;
+ u_int32_t locker_ndx;
+ int had_waiters, state_changed;
+
+ region = lt->reginfo.primary;
+ had_waiters = 0;
+
+ /*
+ * We need to do lock promotion. We also need to determine if we're
+ * going to need to run the deadlock detector again. If we release
+ * locks, and there are waiters, but no one gets promoted, then we
+ * haven't fundamentally changed the lockmgr state, so we may still
+ * have a deadlock and we have to run again. However, if there were
+ * no waiters, or we actually promoted someone, then we are OK and we
+ * don't have to run it immediately.
+ *
+ * During promotion, we look for state changes so we can return this
+ * information to the caller.
+ */
+
+ for (lp_w = SH_TAILQ_FIRST(&obj->waiters, __db_lock),
+ state_changed = lp_w == NULL;
+ lp_w != NULL;
+ lp_w = next_waiter) {
+ had_waiters = 1;
+ next_waiter = SH_TAILQ_NEXT(lp_w, links, __db_lock);
+
+ /* Waiter may have aborted or expired. */
+ if (lp_w->status != DB_LSTAT_WAITING)
+ continue;
+ /* Are we switching locks? */
+ if (LF_ISSET(DB_LOCK_NOWAITERS) && lp_w->mode == DB_LOCK_WAIT)
+ continue;
+
+ if (LF_ISSET(DB_LOCK_REMOVE)) {
+ __lock_remove_waiter(lt, obj, lp_w, DB_LSTAT_NOTEXIST);
+ continue;
+ }
+ for (lp_h = SH_TAILQ_FIRST(&obj->holders, __db_lock);
+ lp_h != NULL;
+ lp_h = SH_TAILQ_NEXT(lp_h, links, __db_lock)) {
+ if (lp_h->holder != lp_w->holder &&
+ CONFLICTS(lt, region, lp_h->mode, lp_w->mode)) {
+ LOCKER_LOCK(lt,
+ region, lp_w->holder, locker_ndx);
+ if ((__lock_getlocker(lt, lp_w->holder,
+ locker_ndx, 0, &sh_locker)) != 0) {
+ DB_ASSERT(0);
+ break;
+ }
+ if (!__lock_is_parent(lt,
+ lp_h->holder, sh_locker))
+ break;
+ }
+ }
+ if (lp_h != NULL) /* Found a conflict. */
+ break;
+
+ /* No conflict, promote the waiting lock. */
+ SH_TAILQ_REMOVE(&obj->waiters, lp_w, links, __db_lock);
+ lp_w->status = DB_LSTAT_PENDING;
+ SH_TAILQ_INSERT_TAIL(&obj->holders, lp_w, links);
+
+ /* Wake up waiter. */
+ MUTEX_UNLOCK(lt->dbenv, &lp_w->mutex);
+ state_changed = 1;
+ }
+
+ /*
+ * If this object had waiters and doesn't any more, then we need
+ * to remove it from the dd_obj list.
+ */
+ if (had_waiters && SH_TAILQ_FIRST(&obj->waiters, __db_lock) == NULL)
+ SH_TAILQ_REMOVE(&region->dd_objs, obj, dd_links, __db_lockobj);
+ return (state_changed);
+}
+
+/*
+ * __lock_remove_waiter --
+ * Any lock on the waitlist has a process waiting for it. Therefore,
+ * we can't return the lock to the freelist immediately. Instead, we can
+ * remove the lock from the list of waiters, set the status field of the
+ * lock, and then let the process waking up return the lock to the
+ * free list.
+ *
+ * This must be called with the Object bucket locked.
+ */
+static void
+__lock_remove_waiter(lt, sh_obj, lockp, status)
+ DB_LOCKTAB *lt;
+ DB_LOCKOBJ *sh_obj;
+ struct __db_lock *lockp;
+ db_status_t status;
+{
+ DB_LOCKREGION *region;
+ int do_wakeup;
+
+ region = lt->reginfo.primary;
+
+ do_wakeup = lockp->status == DB_LSTAT_WAITING;
+
+ SH_TAILQ_REMOVE(&sh_obj->waiters, lockp, links, __db_lock);
+ lockp->links.stqe_prev = -1;
+ lockp->status = status;
+ if (SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL)
+ SH_TAILQ_REMOVE(
+ &region->dd_objs,
+ sh_obj, dd_links, __db_lockobj);
+
+ /*
+ * Wake whoever is waiting on this lock.
+ *
+ * The MUTEX_UNLOCK macro normally resolves to a single argument,
+ * keep the compiler quiet.
+ */
+ if (do_wakeup)
+ MUTEX_UNLOCK(lt->dbenv, &lockp->mutex);
+}
+
+/*
+ * __lock_expires -- set the expire time given the time to live.
+ * We assume that if timevalp is set then it contains "now".
+ * This avoids repeated system calls to get the time.
+ */
+static void
+__lock_expires(dbenv, timevalp, timeout)
+ DB_ENV *dbenv;
+ db_timeval_t *timevalp;
+ db_timeout_t timeout;
+{
+ if (!LOCK_TIME_ISVALID(timevalp))
+ __os_clock(dbenv, &timevalp->tv_sec, &timevalp->tv_usec);
+ if (timeout > 1000000) {
+ timevalp->tv_sec += timeout / 1000000;
+ timevalp->tv_usec += timeout % 1000000;
+ } else
+ timevalp->tv_usec += timeout;
+
+ if (timevalp->tv_usec > 1000000) {
+ timevalp->tv_sec++;
+ timevalp->tv_usec -= 1000000;
+ }
+}
+
+/*
+ * __lock_expired -- determine if a lock has expired.
+ *
+ * PUBLIC: int __lock_expired __P((DB_ENV *, db_timeval_t *, db_timeval_t *));
+ */
+int
+__lock_expired(dbenv, now, timevalp)
+ DB_ENV *dbenv;
+ db_timeval_t *now, *timevalp;
+{
+ if (!LOCK_TIME_ISVALID(timevalp))
+ return (0);
+
+ if (!LOCK_TIME_ISVALID(now))
+ __os_clock(dbenv, &now->tv_sec, &now->tv_usec);
+
+ return (now->tv_sec > timevalp->tv_sec ||
+ (now->tv_sec == timevalp->tv_sec &&
+ now->tv_usec >= timevalp->tv_usec));
+}
+
+/*
+ * __lock_trade --
+ *
+ * Trade locker ids on a lock. This is used to reassign file locks from
+ * a transactional locker id to a long-lived locker id. This should be
+ * called with the region mutex held.
+ */
+static int
+__lock_trade(dbenv, lock, new_locker)
+ DB_ENV *dbenv;
+ DB_LOCK *lock;
+ u_int32_t new_locker;
+{
+ struct __db_lock *lp;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ DB_LOCKER *sh_locker;
+ int ret;
+ u_int32_t locker_ndx;
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+
+ lp = (struct __db_lock *)R_ADDR(&lt->reginfo, lock->off);
+
+ /* If the lock is already released, simply return. */
+ if (lp->gen != lock->gen)
+ return (DB_NOTFOUND);
+
+ /* Make sure that we can get new locker and add this lock to it. */
+ LOCKER_LOCK(lt, region, new_locker, locker_ndx);
+ if ((ret =
+ __lock_getlocker(lt, new_locker, locker_ndx, 0, &sh_locker)) != 0)
+ return (ret);
+
+ if (sh_locker == NULL) {
+ __db_err(dbenv, "Locker does not exist");
+ return (EINVAL);
+ }
+
+ /* Remove the lock from its current locker. */
+ if ((ret = __lock_checklocker(lt, lp, lp->holder, DB_LOCK_UNLINK)) != 0)
+ return (ret);
+
+ /* Add lock to its new locker. */
+ SH_LIST_INSERT_HEAD(&sh_locker->heldby, lp, locker_links, __db_lock);
+ sh_locker->nlocks++;
+ if (IS_WRITELOCK(lp->mode))
+ sh_locker->nwrites++;
+ lp->holder = new_locker;
+
+ return (0);
+}
diff --git a/libdb/lock/lock_deadlock.c b/libdb/lock/lock_deadlock.c
new file mode 100644
index 0000000..6ec9ebe
--- /dev/null
+++ b/libdb/lock/lock_deadlock.c
@@ -0,0 +1,886 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/txn.h"
+#include "dbinc/rep.h"
+
+#define ISSET_MAP(M, N) ((M)[(N) / 32] & (1 << (N) % 32))
+
+#define CLEAR_MAP(M, N) { \
+ u_int32_t __i; \
+ for (__i = 0; __i < (N); __i++) \
+ (M)[__i] = 0; \
+}
+
+#define SET_MAP(M, B) ((M)[(B) / 32] |= (1 << ((B) % 32)))
+#define CLR_MAP(M, B) ((M)[(B) / 32] &= ~(1 << ((B) % 32)))
+
+#define OR_MAP(D, S, N) { \
+ u_int32_t __i; \
+ for (__i = 0; __i < (N); __i++) \
+ D[__i] |= S[__i]; \
+}
+#define BAD_KILLID 0xffffffff
+
+typedef struct {
+ int valid;
+ int self_wait;
+ u_int32_t count;
+ u_int32_t id;
+ u_int32_t last_lock;
+ u_int32_t last_locker_id;
+ db_pgno_t pgno;
+} locker_info;
+
+static int __dd_abort __P((DB_ENV *, locker_info *));
+static int __dd_build __P((DB_ENV *,
+ u_int32_t, u_int32_t **, u_int32_t *, u_int32_t *, locker_info **));
+static int __dd_find __P((DB_ENV *,
+ u_int32_t *, locker_info *, u_int32_t, u_int32_t, u_int32_t ***));
+static int __dd_isolder __P((u_int32_t, u_int32_t, u_int32_t, u_int32_t));
+static int __dd_verify __P((locker_info *, u_int32_t *, u_int32_t *,
+ u_int32_t *, u_int32_t, u_int32_t, u_int32_t));
+
+#ifdef DIAGNOSTIC
+static void __dd_debug
+ __P((DB_ENV *, locker_info *, u_int32_t *, u_int32_t, u_int32_t));
+#endif
+
+/*
+ * lock_detect --
+ *
+ * PUBLIC: int __lock_detect __P((DB_ENV *, u_int32_t, u_int32_t, int *));
+ */
+int
+__lock_detect(dbenv, flags, atype, abortp)
+ DB_ENV *dbenv;
+ u_int32_t flags, atype;
+ int *abortp;
+{
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ DB_TXNMGR *tmgr;
+ locker_info *idmap;
+ u_int32_t *bitmap, *copymap, **deadp, **free_me, *tmpmap;
+ u_int32_t i, keeper, killid, limit, nalloc, nlockers;
+ u_int32_t lock_max, txn_max;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "DB_ENV->lock_detect", DB_INIT_LOCK);
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB_ENV->lock_detect", flags, 0)) != 0)
+ return (ret);
+ switch (atype) {
+ case DB_LOCK_DEFAULT:
+ case DB_LOCK_EXPIRE:
+ case DB_LOCK_MAXLOCKS:
+ case DB_LOCK_MINLOCKS:
+ case DB_LOCK_MINWRITE:
+ case DB_LOCK_OLDEST:
+ case DB_LOCK_RANDOM:
+ case DB_LOCK_YOUNGEST:
+ break;
+ default:
+ __db_err(dbenv,
+ "DB_ENV->lock_detect: unknown deadlock detection mode specified");
+ return (EINVAL);
+ }
+
+ /*
+ * If this environment is a replication client, then we must use the
+ * MINWRITE detection discipline.
+ */
+ if (__rep_is_client(dbenv))
+ atype = DB_LOCK_MINWRITE;
+
+ free_me = NULL;
+
+ lt = dbenv->lk_handle;
+ if (abortp != NULL)
+ *abortp = 0;
+
+ /* Check if a detector run is necessary. */
+ LOCKREGION(dbenv, lt);
+
+ /* Make a pass only if auto-detect would run. */
+ region = lt->reginfo.primary;
+
+ if (region->need_dd == 0) {
+ UNLOCKREGION(dbenv, lt);
+ return (0);
+ }
+
+ /* Reset need_dd, so we know we've run the detector. */
+ region->need_dd = 0;
+
+ /* Build the waits-for bitmap. */
+ ret = __dd_build(dbenv, atype, &bitmap, &nlockers, &nalloc, &idmap);
+ lock_max = region->stat.st_cur_maxid;
+ UNLOCKREGION(dbenv, lt);
+
+ /*
+ * We need the cur_maxid from the txn region as well. In order
+ * to avoid tricky synchronization between the lock and txn
+ * regions, we simply unlock the lock region and then lock the
+ * txn region. This introduces a small window during which the
+ * transaction system could then wrap. We're willing to return
+ * the wrong answer for "oldest" or "youngest" in those rare
+ * circumstances.
+ */
+ tmgr = dbenv->tx_handle;
+ if (tmgr != NULL) {
+ R_LOCK(dbenv, &tmgr->reginfo);
+ txn_max = ((DB_TXNREGION *)tmgr->reginfo.primary)->cur_maxid;
+ R_UNLOCK(dbenv, &tmgr->reginfo);
+ } else
+ txn_max = TXN_MAXIMUM;
+ if (ret != 0 || atype == DB_LOCK_EXPIRE)
+ return (ret);
+
+ if (nlockers == 0)
+ return (0);
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_WAITSFOR))
+ __dd_debug(dbenv, idmap, bitmap, nlockers, nalloc);
+#endif
+ /* Now duplicate the bitmaps so we can verify deadlock participants. */
+ if ((ret = __os_calloc(dbenv, (size_t)nlockers,
+ sizeof(u_int32_t) * nalloc, &copymap)) != 0)
+ goto err;
+ memcpy(copymap, bitmap, nlockers * sizeof(u_int32_t) * nalloc);
+
+ if ((ret = __os_calloc(dbenv, sizeof(u_int32_t), nalloc, &tmpmap)) != 0)
+ goto err1;
+
+ /* Find a deadlock. */
+ if ((ret =
+ __dd_find(dbenv, bitmap, idmap, nlockers, nalloc, &deadp)) != 0)
+ return (ret);
+
+ killid = BAD_KILLID;
+ free_me = deadp;
+ for (; *deadp != NULL; deadp++) {
+ if (abortp != NULL)
+ ++*abortp;
+ killid = (u_int32_t)((*deadp - bitmap) / nalloc);
+ limit = killid;
+ keeper = BAD_KILLID;
+
+ if (atype == DB_LOCK_DEFAULT || atype == DB_LOCK_RANDOM)
+ goto dokill;
+ /*
+ * It's conceivable that under XA, the locker could
+ * have gone away.
+ */
+ if (killid == BAD_KILLID)
+ break;
+
+ /*
+ * Start with the id that we know is deadlocked
+ * and then examine all other set bits and see
+ * if any are a better candidate for abortion
+ * and that they are genuinely part of the
+ * deadlock. The definition of "best":
+ * OLDEST: smallest id
+ * YOUNGEST: largest id
+ * MAXLOCKS: maximum count
+ * MINLOCKS: minimum count
+ * MINWRITE: minimum count
+ */
+
+ for (i = (killid + 1) % nlockers;
+ i != limit;
+ i = (i + 1) % nlockers) {
+ if (!ISSET_MAP(*deadp, i))
+ continue;
+ switch (atype) {
+ case DB_LOCK_OLDEST:
+ if (__dd_isolder(idmap[killid].id,
+ idmap[i].id, lock_max, txn_max))
+ continue;
+ keeper = i;
+ break;
+ case DB_LOCK_YOUNGEST:
+ if (__dd_isolder(idmap[i].id,
+ idmap[killid].id, lock_max, txn_max))
+ continue;
+ keeper = i;
+ break;
+ case DB_LOCK_MAXLOCKS:
+ if (idmap[i].count < idmap[killid].count)
+ continue;
+ keeper = i;
+ break;
+ case DB_LOCK_MINLOCKS:
+ case DB_LOCK_MINWRITE:
+ if (idmap[i].count > idmap[killid].count)
+ continue;
+ keeper = i;
+ break;
+ default:
+ killid = BAD_KILLID;
+ ret = EINVAL;
+ goto dokill;
+ }
+ if (__dd_verify(idmap, *deadp,
+ tmpmap, copymap, nlockers, nalloc, i))
+ killid = i;
+ }
+
+dokill: if (killid == BAD_KILLID)
+ continue;
+
+ /*
+ * There are cases in which our general algorithm will
+ * fail. Returning 1 from verify indicates that the
+ * particular locker is not only involved in a deadlock,
+ * but that killing him will allow others to make forward
+ * progress. Unfortunately, there are cases where we need
+ * to abort someone, but killing them will not necessarily
+ * ensure forward progress (imagine N readers all trying to
+ * acquire a write lock). In such a scenario, we'll have
+ * gotten all the way through the loop, we will have found
+ * someone to keep (keeper will be valid), but killid will
+ * still be the initial deadlocker. In this case, if the
+ * initial killid satisfies __dd_verify, kill it, else abort
+ * keeper and indicate that we need to run deadlock detection
+ * again.
+ */
+
+ if (keeper != BAD_KILLID && killid == limit &&
+ __dd_verify(idmap, *deadp,
+ tmpmap, copymap, nlockers, nalloc, killid) == 0) {
+ LOCKREGION(dbenv, lt);
+ region->need_dd = 1;
+ UNLOCKREGION(dbenv, lt);
+ killid = keeper;
+ }
+
+ /* Kill the locker with lockid idmap[killid]. */
+ if ((ret = __dd_abort(dbenv, &idmap[killid])) != 0) {
+ /*
+ * It's possible that the lock was already aborted;
+ * this isn't necessarily a problem, so do not treat
+ * it as an error.
+ */
+ if (ret == DB_ALREADY_ABORTED)
+ ret = 0;
+ else
+ __db_err(dbenv,
+ "warning: unable to abort locker %lx",
+ (u_long)idmap[killid].id);
+ } else if (FLD_ISSET(dbenv->verbose, DB_VERB_DEADLOCK))
+ __db_err(dbenv,
+ "Aborting locker %lx", (u_long)idmap[killid].id);
+ }
+ __os_free(dbenv, tmpmap);
+err1: __os_free(dbenv, copymap);
+
+err: if (free_me != NULL)
+ __os_free(dbenv, free_me);
+ __os_free(dbenv, bitmap);
+ __os_free(dbenv, idmap);
+
+ return (ret);
+}
+
+/*
+ * ========================================================================
+ * Utilities
+ */
+
+# define DD_INVALID_ID ((u_int32_t) -1)
+
+static int
+__dd_build(dbenv, atype, bmp, nlockers, allocp, idmap)
+ DB_ENV *dbenv;
+ u_int32_t atype, **bmp, *nlockers, *allocp;
+ locker_info **idmap;
+{
+ struct __db_lock *lp;
+ DB_LOCKER *lip, *lockerp, *child;
+ DB_LOCKOBJ *op, *lo;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ locker_info *id_array;
+ db_timeval_t now;
+ u_int32_t *bitmap, count, dd, *entryp, id, ndx, nentries, *tmpmap;
+ u_int8_t *pptr;
+ int expire_only, is_first, need_timeout, ret;
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+ LOCK_SET_TIME_INVALID(&now);
+ need_timeout = 0;
+ expire_only = atype == DB_LOCK_EXPIRE;
+
+ /*
+ * While we always check for expired timeouts, if we are called
+ * with DB_LOCK_EXPIRE, then we are only checking for timeouts
+ * (i.e., not doing deadlock detection at all). If we aren't
+ * doing real deadlock detection, then we can skip a significant,
+ * amount of the processing. In particular we do not build
+ * the conflict array and our caller needs to expect this.
+ */
+ if (expire_only) {
+ count = 0;
+ nentries = 0;
+ goto obj_loop;
+ }
+
+ /*
+ * We'll check how many lockers there are, add a few more in for
+ * good measure and then allocate all the structures. Then we'll
+ * verify that we have enough room when we go back in and get the
+ * mutex the second time.
+ */
+retry: count = region->stat.st_nlockers;
+
+ if (count == 0) {
+ *nlockers = 0;
+ return (0);
+ }
+
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_DEADLOCK))
+ __db_err(dbenv, "%lu lockers", (u_long)count);
+
+ count += 20;
+ nentries = ALIGN(count, 32) / 32;
+
+ /*
+ * Allocate enough space for a count by count bitmap matrix.
+ *
+ * XXX
+ * We can probably save the malloc's between iterations just
+ * reallocing if necessary because count grew by too much.
+ */
+ if ((ret = __os_calloc(dbenv, (size_t)count,
+ sizeof(u_int32_t) * nentries, &bitmap)) != 0)
+ return (ret);
+
+ if ((ret = __os_calloc(dbenv,
+ sizeof(u_int32_t), nentries, &tmpmap)) != 0) {
+ __os_free(dbenv, bitmap);
+ return (ret);
+ }
+
+ if ((ret = __os_calloc(dbenv,
+ (size_t)count, sizeof(locker_info), &id_array)) != 0) {
+ __os_free(dbenv, bitmap);
+ __os_free(dbenv, tmpmap);
+ return (ret);
+ }
+
+ /*
+ * Now go back in and actually fill in the matrix.
+ */
+ if (region->stat.st_nlockers > count) {
+ __os_free(dbenv, bitmap);
+ __os_free(dbenv, tmpmap);
+ __os_free(dbenv, id_array);
+ goto retry;
+ }
+
+ /*
+ * First we go through and assign each locker a deadlock detector id.
+ */
+ for (id = 0, lip = SH_TAILQ_FIRST(&region->lockers, __db_locker);
+ lip != NULL;
+ lip = SH_TAILQ_NEXT(lip, ulinks, __db_locker)) {
+ if (F_ISSET(lip, DB_LOCKER_INABORT))
+ continue;
+ if (lip->master_locker == INVALID_ROFF) {
+ lip->dd_id = id++;
+ id_array[lip->dd_id].id = lip->id;
+ if (atype == DB_LOCK_MINLOCKS ||
+ atype == DB_LOCK_MAXLOCKS)
+ id_array[lip->dd_id].count = lip->nlocks;
+ if (atype == DB_LOCK_MINWRITE)
+ id_array[lip->dd_id].count = lip->nwrites;
+ } else
+ lip->dd_id = DD_INVALID_ID;
+
+ }
+
+ /*
+ * We only need consider objects that have waiters, so we use
+ * the list of objects with waiters (dd_objs) instead of traversing
+ * the entire hash table. For each object, we traverse the waiters
+ * list and add an entry in the waitsfor matrix for each waiter/holder
+ * combination.
+ */
+obj_loop:
+ for (op = SH_TAILQ_FIRST(&region->dd_objs, __db_lockobj);
+ op != NULL; op = SH_TAILQ_NEXT(op, dd_links, __db_lockobj)) {
+ if (expire_only)
+ goto look_waiters;
+ CLEAR_MAP(tmpmap, nentries);
+
+ /*
+ * First we go through and create a bit map that
+ * represents all the holders of this object.
+ */
+ for (lp = SH_TAILQ_FIRST(&op->holders, __db_lock);
+ lp != NULL;
+ lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
+ LOCKER_LOCK(lt, region, lp->holder, ndx);
+ if ((ret = __lock_getlocker(lt,
+ lp->holder, ndx, 0, &lockerp)) != 0)
+ continue;
+ if (F_ISSET(lockerp, DB_LOCKER_INABORT))
+ continue;
+
+ if (lockerp->dd_id == DD_INVALID_ID) {
+ dd = ((DB_LOCKER *)R_ADDR(&lt->reginfo,
+ lockerp->master_locker))->dd_id;
+ lockerp->dd_id = dd;
+ if (atype == DB_LOCK_MINLOCKS ||
+ atype == DB_LOCK_MAXLOCKS)
+ id_array[dd].count += lockerp->nlocks;
+ if (atype == DB_LOCK_MINWRITE)
+ id_array[dd].count += lockerp->nwrites;
+
+ } else
+ dd = lockerp->dd_id;
+ id_array[dd].valid = 1;
+
+ /*
+ * If the holder has already been aborted, then
+ * we should ignore it for now.
+ */
+ if (lp->status == DB_LSTAT_HELD)
+ SET_MAP(tmpmap, dd);
+ }
+
+ /*
+ * Next, for each waiter, we set its row in the matrix
+ * equal to the map of holders we set up above.
+ */
+look_waiters:
+ for (is_first = 1,
+ lp = SH_TAILQ_FIRST(&op->waiters, __db_lock);
+ lp != NULL;
+ is_first = 0,
+ lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
+ LOCKER_LOCK(lt, region, lp->holder, ndx);
+ if ((ret = __lock_getlocker(lt,
+ lp->holder, ndx, 0, &lockerp)) != 0)
+ continue;
+ if (lp->status == DB_LSTAT_WAITING) {
+ if (__lock_expired(dbenv,
+ &now, &lockerp->lk_expire)) {
+ lp->status = DB_LSTAT_EXPIRED;
+ MUTEX_UNLOCK(dbenv, &lp->mutex);
+ continue;
+ }
+ need_timeout =
+ LOCK_TIME_ISVALID(&lockerp->lk_expire);
+ }
+
+ if (expire_only)
+ continue;
+
+ if (lockerp->dd_id == DD_INVALID_ID) {
+ dd = ((DB_LOCKER *)R_ADDR(&lt->reginfo,
+ lockerp->master_locker))->dd_id;
+ lockerp->dd_id = dd;
+ if (atype == DB_LOCK_MINLOCKS ||
+ atype == DB_LOCK_MAXLOCKS)
+ id_array[dd].count += lockerp->nlocks;
+ if (atype == DB_LOCK_MINWRITE)
+ id_array[dd].count += lockerp->nwrites;
+ } else
+ dd = lockerp->dd_id;
+ id_array[dd].valid = 1;
+
+ /*
+ * If the transaction is pending abortion, then
+ * ignore it on this iteration.
+ */
+ if (lp->status != DB_LSTAT_WAITING)
+ continue;
+
+ entryp = bitmap + (nentries * dd);
+ OR_MAP(entryp, tmpmap, nentries);
+ /*
+ * If this is the first waiter on the queue,
+ * then we remove the waitsfor relationship
+ * with oneself. However, if it's anywhere
+ * else on the queue, then we have to keep
+ * it and we have an automatic deadlock.
+ */
+ if (is_first) {
+ if (ISSET_MAP(entryp, dd))
+ id_array[dd].self_wait = 1;
+ CLR_MAP(entryp, dd);
+ }
+ }
+ }
+
+ if (expire_only) {
+ region->need_dd = need_timeout;
+ return (0);
+ }
+
+ /* Now for each locker; record its last lock. */
+ for (id = 0; id < count; id++) {
+ if (!id_array[id].valid)
+ continue;
+ LOCKER_LOCK(lt, region, id_array[id].id, ndx);
+ if ((ret = __lock_getlocker(lt,
+ id_array[id].id, ndx, 0, &lockerp)) != 0) {
+ __db_err(dbenv,
+ "No locks for locker %lu", (u_long)id_array[id].id);
+ continue;
+ }
+
+ /*
+ * If this is a master transaction, try to
+ * find one of its children's locks first,
+ * as they are probably more recent.
+ */
+ child = SH_LIST_FIRST(&lockerp->child_locker, __db_locker);
+ if (child != NULL) {
+ do {
+ lp = SH_LIST_FIRST(&child->heldby, __db_lock);
+ if (lp != NULL &&
+ lp->status == DB_LSTAT_WAITING) {
+ id_array[id].last_locker_id = child->id;
+ goto get_lock;
+ }
+ child = SH_LIST_NEXT(
+ child, child_link, __db_locker);
+ } while (child != NULL);
+ }
+ lp = SH_LIST_FIRST(&lockerp->heldby, __db_lock);
+ if (lp != NULL) {
+ id_array[id].last_locker_id = lockerp->id;
+ get_lock: id_array[id].last_lock = R_OFFSET(&lt->reginfo, lp);
+ lo = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj);
+ pptr = SH_DBT_PTR(&lo->lockobj);
+ if (lo->lockobj.size >= sizeof(db_pgno_t))
+ memcpy(&id_array[id].pgno,
+ pptr, sizeof(db_pgno_t));
+ else
+ id_array[id].pgno = 0;
+ }
+ }
+
+ /*
+ * Pass complete, reset the deadlock detector bit,
+ * unless we have pending timeouts.
+ */
+ region->need_dd = need_timeout;
+
+ /*
+ * Now we can release everything except the bitmap matrix that we
+ * created.
+ */
+ *nlockers = id;
+ *idmap = id_array;
+ *bmp = bitmap;
+ *allocp = nentries;
+ __os_free(dbenv, tmpmap);
+ return (0);
+}
+
+static int
+__dd_find(dbenv, bmp, idmap, nlockers, nalloc, deadp)
+ DB_ENV *dbenv;
+ u_int32_t *bmp, nlockers, nalloc;
+ locker_info *idmap;
+ u_int32_t ***deadp;
+{
+ u_int32_t i, j, k, *mymap, *tmpmap;
+ u_int32_t **retp;
+ int ndead, ndeadalloc, ret;
+
+#undef INITIAL_DEAD_ALLOC
+#define INITIAL_DEAD_ALLOC 8
+
+ ndeadalloc = INITIAL_DEAD_ALLOC;
+ ndead = 0;
+ if ((ret = __os_malloc(dbenv,
+ ndeadalloc * sizeof(u_int32_t *), &retp)) != 0)
+ return (ret);
+
+ /*
+ * For each locker, OR in the bits from the lockers on which that
+ * locker is waiting.
+ */
+ for (mymap = bmp, i = 0; i < nlockers; i++, mymap += nalloc) {
+ if (!idmap[i].valid)
+ continue;
+ for (j = 0; j < nlockers; j++) {
+ if (!ISSET_MAP(mymap, j))
+ continue;
+
+ /* Find the map for this bit. */
+ tmpmap = bmp + (nalloc * j);
+ OR_MAP(mymap, tmpmap, nalloc);
+ if (!ISSET_MAP(mymap, i))
+ continue;
+
+ /* Make sure we leave room for NULL. */
+ if (ndead + 2 >= ndeadalloc) {
+ ndeadalloc <<= 1;
+ /*
+ * If the alloc fails, then simply return the
+ * deadlocks that we already have.
+ */
+ if (__os_realloc(dbenv,
+ ndeadalloc * sizeof(u_int32_t),
+ &retp) != 0) {
+ retp[ndead] = NULL;
+ *deadp = retp;
+ return (0);
+ }
+ }
+ retp[ndead++] = mymap;
+
+ /* Mark all participants in this deadlock invalid. */
+ for (k = 0; k < nlockers; k++)
+ if (ISSET_MAP(mymap, k))
+ idmap[k].valid = 0;
+ break;
+ }
+ }
+ retp[ndead] = NULL;
+ *deadp = retp;
+ return (0);
+}
+
+static int
+__dd_abort(dbenv, info)
+ DB_ENV *dbenv;
+ locker_info *info;
+{
+ struct __db_lock *lockp;
+ DB_LOCKER *lockerp;
+ DB_LOCKOBJ *sh_obj;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ u_int32_t ndx;
+ int ret;
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+
+ LOCKREGION(dbenv, lt);
+
+ /* Find the locker's last lock. */
+ LOCKER_LOCK(lt, region, info->last_locker_id, ndx);
+ if ((ret = __lock_getlocker(lt,
+ info->last_locker_id, ndx, 0, &lockerp)) != 0 || lockerp == NULL) {
+ if (ret == 0)
+ ret = DB_ALREADY_ABORTED;
+ goto out;
+ }
+
+ /* It's possible that this locker was already aborted. */
+ if ((lockp = SH_LIST_FIRST(&lockerp->heldby, __db_lock)) == NULL) {
+ ret = DB_ALREADY_ABORTED;
+ goto out;
+ }
+ if (R_OFFSET(&lt->reginfo, lockp) != info->last_lock ||
+ lockp->status != DB_LSTAT_WAITING) {
+ ret = DB_ALREADY_ABORTED;
+ goto out;
+ }
+
+ sh_obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj);
+ SH_LIST_REMOVE(lockp, locker_links, __db_lock);
+
+ /* Abort lock, take it off list, and wake up this lock. */
+ SHOBJECT_LOCK(lt, region, sh_obj, ndx);
+ lockp->status = DB_LSTAT_ABORTED;
+ SH_TAILQ_REMOVE(&sh_obj->waiters, lockp, links, __db_lock);
+
+ /*
+ * Either the waiters list is now empty, in which case we remove
+ * it from dd_objs, or it is not empty, in which case we need to
+ * do promotion.
+ */
+ if (SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL)
+ SH_TAILQ_REMOVE(&region->dd_objs,
+ sh_obj, dd_links, __db_lockobj);
+ else
+ ret = __lock_promote(lt, sh_obj, 0);
+ MUTEX_UNLOCK(dbenv, &lockp->mutex);
+
+ region->stat.st_ndeadlocks++;
+ UNLOCKREGION(dbenv, lt);
+
+ return (0);
+
+out: UNLOCKREGION(dbenv, lt);
+ return (ret);
+}
+
+#ifdef DIAGNOSTIC
+static void
+__dd_debug(dbenv, idmap, bitmap, nlockers, nalloc)
+ DB_ENV *dbenv;
+ locker_info *idmap;
+ u_int32_t *bitmap, nlockers, nalloc;
+{
+ u_int32_t i, j, *mymap;
+ char *msgbuf;
+
+ __db_err(dbenv, "Waitsfor array\nWaiter:\tWaiting on:");
+
+ /* Allocate space to print 10 bytes per item waited on. */
+#undef MSGBUF_LEN
+#define MSGBUF_LEN ((nlockers + 1) * 10 + 64)
+ if (__os_malloc(dbenv, MSGBUF_LEN, &msgbuf) != 0)
+ return;
+
+ for (mymap = bitmap, i = 0; i < nlockers; i++, mymap += nalloc) {
+ if (!idmap[i].valid)
+ continue;
+ sprintf(msgbuf, /* Waiter. */
+ "%lx/%lu:\t", (u_long)idmap[i].id, (u_long)idmap[i].pgno);
+ for (j = 0; j < nlockers; j++)
+ if (ISSET_MAP(mymap, j))
+ sprintf(msgbuf, "%s %lx", msgbuf,
+ (u_long)idmap[j].id);
+ (void)sprintf(msgbuf,
+ "%s %lu", msgbuf, (u_long)idmap[i].last_lock);
+ __db_err(dbenv, msgbuf);
+ }
+
+ __os_free(dbenv, msgbuf);
+}
+#endif
+
+/*
+ * Given a bitmap that contains a deadlock, verify that the bit
+ * specified in the which parameter indicates a transaction that
+ * is actually deadlocked. Return 1 if really deadlocked, 0 otherwise.
+ * deadmap is the array that identified the deadlock.
+ * tmpmap is a copy of the initial bitmaps from the dd_build phase
+ * origmap is a temporary bit map into which we can OR things
+ * nlockers is the number of actual lockers under consideration
+ * nalloc is the number of words allocated for the bitmap
+ * which is the locker in question
+ */
+static int
+__dd_verify(idmap, deadmap, tmpmap, origmap, nlockers, nalloc, which)
+ locker_info *idmap;
+ u_int32_t *deadmap, *tmpmap, *origmap;
+ u_int32_t nlockers, nalloc, which;
+{
+ u_int32_t *tmap;
+ u_int32_t j;
+ int count;
+
+ memset(tmpmap, 0, sizeof(u_int32_t) * nalloc);
+
+ /*
+ * In order for "which" to be actively involved in
+ * the deadlock, removing him from the evaluation
+ * must remove the deadlock. So, we OR together everyone
+ * except which; if all the participants still have their
+ * bits set, then the deadlock persists and which does
+ * not participate. If the deadlock does not persist
+ * then "which" does participate.
+ */
+ count = 0;
+ for (j = 0; j < nlockers; j++) {
+ if (!ISSET_MAP(deadmap, j) || j == which)
+ continue;
+
+ /* Find the map for this bit. */
+ tmap = origmap + (nalloc * j);
+
+ /*
+ * We special case the first waiter who is also a holder, so
+ * we don't automatically call that a deadlock. However, if
+ * it really is a deadlock, we need the bit set now so that
+ * we treat the first waiter like other waiters.
+ */
+ if (idmap[j].self_wait)
+ SET_MAP(tmap, j);
+ OR_MAP(tmpmap, tmap, nalloc);
+ count++;
+ }
+
+ if (count == 1)
+ return (1);
+
+ /*
+ * Now check the resulting map and see whether
+ * all participants still have their bit set.
+ */
+ for (j = 0; j < nlockers; j++) {
+ if (!ISSET_MAP(deadmap, j) || j == which)
+ continue;
+ if (!ISSET_MAP(tmpmap, j))
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * __dd_isolder --
+ *
+ * Figure out the relative age of two lockers. We make all lockers
+ * older than all transactions, because that's how it's worked
+ * historically (because lockers are lower ids).
+ */
+static int
+__dd_isolder(a, b, lock_max, txn_max)
+ u_int32_t a, b;
+ u_int32_t lock_max, txn_max;
+{
+ u_int32_t max;
+
+ /* Check for comparing lock-id and txnid. */
+ if (a <= DB_LOCK_MAXID && b > DB_LOCK_MAXID)
+ return (1);
+ if (b <= DB_LOCK_MAXID && a > DB_LOCK_MAXID)
+ return (0);
+
+ /* In the same space; figure out which one. */
+ max = txn_max;
+ if (a <= DB_LOCK_MAXID)
+ max = lock_max;
+
+ /*
+ * We can't get a 100% correct ordering, because we don't know
+ * where the current interval started and if there were older
+ * lockers outside the interval. We do the best we can.
+ */
+
+ /*
+ * Check for a wrapped case with ids above max.
+ */
+ if (a > max && b < max)
+ return (1);
+ if (b > max && a < max)
+ return (0);
+
+ return (a < b);
+}
diff --git a/libdb/lock/lock_method.c b/libdb/lock/lock_method.c
new file mode 100644
index 0000000..529a420
--- /dev/null
+++ b/libdb/lock/lock_method.c
@@ -0,0 +1,275 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+
+#ifdef HAVE_RPC
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+#endif
+
+static int __lock_set_lk_conflicts __P((DB_ENV *, u_int8_t *, int));
+static int __lock_set_lk_detect __P((DB_ENV *, u_int32_t));
+static int __lock_set_lk_max __P((DB_ENV *, u_int32_t));
+static int __lock_set_lk_max_lockers __P((DB_ENV *, u_int32_t));
+static int __lock_set_lk_max_locks __P((DB_ENV *, u_int32_t));
+static int __lock_set_lk_max_objects __P((DB_ENV *, u_int32_t));
+static int __lock_set_env_timeout __P((DB_ENV *, db_timeout_t, u_int32_t));
+
+/*
+ * __lock_dbenv_create --
+ * Lock specific creation of the DB_ENV structure.
+ *
+ * PUBLIC: void __lock_dbenv_create __P((DB_ENV *));
+ */
+void
+__lock_dbenv_create(dbenv)
+ DB_ENV *dbenv;
+{
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ */
+
+ dbenv->lk_max = DB_LOCK_DEFAULT_N;
+ dbenv->lk_max_lockers = DB_LOCK_DEFAULT_N;
+ dbenv->lk_max_objects = DB_LOCK_DEFAULT_N;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ dbenv->set_lk_conflicts = __dbcl_set_lk_conflict;
+ dbenv->set_lk_detect = __dbcl_set_lk_detect;
+ dbenv->set_lk_max = __dbcl_set_lk_max;
+ dbenv->set_lk_max_lockers = __dbcl_set_lk_max_lockers;
+ dbenv->set_lk_max_locks = __dbcl_set_lk_max_locks;
+ dbenv->set_lk_max_objects = __dbcl_set_lk_max_objects;
+ dbenv->lock_detect = __dbcl_lock_detect;
+ dbenv->lock_dump_region = NULL;
+ dbenv->lock_get = __dbcl_lock_get;
+ dbenv->lock_id = __dbcl_lock_id;
+ dbenv->lock_id_free = __dbcl_lock_id_free;
+ dbenv->lock_put = __dbcl_lock_put;
+ dbenv->lock_stat = __dbcl_lock_stat;
+ dbenv->lock_vec = __dbcl_lock_vec;
+ } else
+#endif
+ {
+ dbenv->set_lk_conflicts = __lock_set_lk_conflicts;
+ dbenv->set_lk_detect = __lock_set_lk_detect;
+ dbenv->set_lk_max = __lock_set_lk_max;
+ dbenv->set_lk_max_lockers = __lock_set_lk_max_lockers;
+ dbenv->set_lk_max_locks = __lock_set_lk_max_locks;
+ dbenv->set_lk_max_objects = __lock_set_lk_max_objects;
+ dbenv->set_timeout = __lock_set_env_timeout;
+ dbenv->lock_detect = __lock_detect;
+ dbenv->lock_dump_region = __lock_dump_region;
+ dbenv->lock_get = __lock_get;
+ dbenv->lock_id = __lock_id;
+ dbenv->lock_id_free = __lock_id_free;
+#ifdef CONFIG_TEST
+ dbenv->lock_id_set = __lock_id_set;
+#endif
+ dbenv->lock_put = __lock_put;
+ dbenv->lock_stat = __lock_stat;
+ dbenv->lock_vec = __lock_vec;
+ dbenv->lock_downgrade = __lock_downgrade;
+ }
+}
+
+/*
+ * __lock_dbenv_close --
+ * Lock specific destruction of the DB_ENV structure.
+ *
+ * PUBLIC: void __lock_dbenv_close __P((DB_ENV *));
+ */
+void
+__lock_dbenv_close(dbenv)
+ DB_ENV *dbenv;
+{
+ if (dbenv->lk_conflicts != NULL) {
+ __os_free(dbenv, dbenv->lk_conflicts);
+ dbenv->lk_conflicts = NULL;
+ }
+}
+
+/*
+ * __lock_set_lk_conflicts
+ * Set the conflicts matrix.
+ */
+static int
+__lock_set_lk_conflicts(dbenv, lk_conflicts, lk_modes)
+ DB_ENV *dbenv;
+ u_int8_t *lk_conflicts;
+ int lk_modes;
+{
+ int ret;
+
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lk_conflicts");
+
+ if (dbenv->lk_conflicts != NULL) {
+ __os_free(dbenv, dbenv->lk_conflicts);
+ dbenv->lk_conflicts = NULL;
+ }
+ if ((ret = __os_malloc(dbenv,
+ lk_modes * lk_modes, &dbenv->lk_conflicts)) != 0)
+ return (ret);
+ memcpy(dbenv->lk_conflicts, lk_conflicts, lk_modes * lk_modes);
+ dbenv->lk_modes = lk_modes;
+
+ return (0);
+}
+
+/*
+ * __lock_set_lk_detect
+ * Set the automatic deadlock detection.
+ */
+static int
+__lock_set_lk_detect(dbenv, lk_detect)
+ DB_ENV *dbenv;
+ u_int32_t lk_detect;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lk_detect");
+
+ switch (lk_detect) {
+ case DB_LOCK_DEFAULT:
+ case DB_LOCK_EXPIRE:
+ case DB_LOCK_MAXLOCKS:
+ case DB_LOCK_MINLOCKS:
+ case DB_LOCK_MINWRITE:
+ case DB_LOCK_OLDEST:
+ case DB_LOCK_RANDOM:
+ case DB_LOCK_YOUNGEST:
+ break;
+ default:
+ __db_err(dbenv,
+ "DB_ENV->set_lk_detect: unknown deadlock detection mode specified");
+ return (EINVAL);
+ }
+ dbenv->lk_detect = lk_detect;
+ return (0);
+}
+
+/*
+ * __lock_set_lk_max
+ * Set the lock table size.
+ */
+static int
+__lock_set_lk_max(dbenv, lk_max)
+ DB_ENV *dbenv;
+ u_int32_t lk_max;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lk_max");
+
+ dbenv->lk_max = lk_max;
+ dbenv->lk_max_objects = lk_max;
+ dbenv->lk_max_lockers = lk_max;
+ return (0);
+}
+
+/*
+ * __lock_set_lk_max_locks
+ * Set the lock table size.
+ */
+static int
+__lock_set_lk_max_locks(dbenv, lk_max)
+ DB_ENV *dbenv;
+ u_int32_t lk_max;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lk_max_locks");
+
+ dbenv->lk_max = lk_max;
+ return (0);
+}
+
+/*
+ * __lock_set_lk_max_lockers
+ * Set the lock table size.
+ */
+static int
+__lock_set_lk_max_lockers(dbenv, lk_max)
+ DB_ENV *dbenv;
+ u_int32_t lk_max;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lk_max_lockers");
+
+ dbenv->lk_max_lockers = lk_max;
+ return (0);
+}
+
+/*
+ * __lock_set_lk_max_objects
+ * Set the lock table size.
+ */
+static int
+__lock_set_lk_max_objects(dbenv, lk_max)
+ DB_ENV *dbenv;
+ u_int32_t lk_max;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lk_max_objects");
+
+ dbenv->lk_max_objects = lk_max;
+ return (0);
+}
+
+/*
+ * __lock_set_env_timeout
+ * Set the lock environment timeout.
+ */
+static int
+__lock_set_env_timeout(dbenv, timeout, flags)
+ DB_ENV *dbenv;
+ db_timeout_t timeout;
+ u_int32_t flags;
+{
+ DB_LOCKREGION *region;
+
+ region = NULL;
+ if (F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) {
+ if (!LOCKING_ON(dbenv))
+ return (__db_env_config(
+ dbenv, "set_timeout", DB_INIT_LOCK));
+ region = ((DB_LOCKTAB *)dbenv->lk_handle)->reginfo.primary;
+ }
+
+ switch (flags) {
+ case DB_SET_LOCK_TIMEOUT:
+ dbenv->lk_timeout = timeout;
+ if (region != NULL)
+ region->lk_timeout = timeout;
+ break;
+ case DB_SET_TXN_TIMEOUT:
+ dbenv->tx_timeout = timeout;
+ if (region != NULL)
+ region->tx_timeout = timeout;
+ break;
+ default:
+ return (__db_ferr(dbenv, "DB_ENV->set_timeout", 0));
+ /* NOTREACHED */
+ }
+
+ return (0);
+}
diff --git a/libdb/lock/lock_region.c b/libdb/lock/lock_region.c
new file mode 100644
index 0000000..9a53d58
--- /dev/null
+++ b/libdb/lock/lock_region.c
@@ -0,0 +1,417 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+
+static int __lock_init __P((DB_ENV *, DB_LOCKTAB *));
+static size_t
+ __lock_region_size __P((DB_ENV *));
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+static size_t __lock_region_maint __P((DB_ENV *));
+#endif
+
+/*
+ * The conflict arrays are set up such that the row is the lock you are
+ * holding and the column is the lock that is desired.
+ */
+#define DB_LOCK_RIW_N 9
+static const u_int8_t db_riw_conflicts[] = {
+/* N R W WT IW IR RIW DR WW */
+/* N */ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+/* R */ 0, 0, 1, 0, 1, 0, 1, 0, 1,
+/* W */ 0, 1, 1, 1, 1, 1, 1, 1, 1,
+/* WT */ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+/* IW */ 0, 1, 1, 0, 0, 0, 0, 1, 1,
+/* IR */ 0, 0, 1, 0, 0, 0, 0, 0, 1,
+/* RIW */ 0, 1, 1, 0, 0, 0, 0, 1, 1,
+/* DR */ 0, 0, 1, 0, 1, 0, 1, 0, 0,
+/* WW */ 0, 1, 1, 0, 1, 1, 1, 0, 1
+};
+
+/*
+ * This conflict array is used for concurrent db access (CDB). It uses
+ * the same locks as the db_riw_conflicts array, but adds an IW mode to
+ * be used for write cursors.
+ */
+#define DB_LOCK_CDB_N 5
+static const u_int8_t db_cdb_conflicts[] = {
+ /* N R W WT IW */
+ /* N */ 0, 0, 0, 0, 0,
+ /* R */ 0, 0, 1, 0, 0,
+ /* W */ 0, 1, 1, 1, 1,
+ /* WT */ 0, 0, 0, 0, 0,
+ /* IW */ 0, 0, 1, 0, 1
+};
+
+/*
+ * __lock_open --
+ * Internal version of lock_open: only called from DB_ENV->open.
+ *
+ * PUBLIC: int __lock_open __P((DB_ENV *));
+ */
+int
+__lock_open(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ size_t size;
+ int ret;
+
+ /* Create the lock table structure. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_LOCKTAB), &lt)) != 0)
+ return (ret);
+ lt->dbenv = dbenv;
+
+ /* Join/create the lock region. */
+ lt->reginfo.type = REGION_TYPE_LOCK;
+ lt->reginfo.id = INVALID_REGION_ID;
+ lt->reginfo.mode = dbenv->db_mode;
+ lt->reginfo.flags = REGION_JOIN_OK;
+ if (F_ISSET(dbenv, DB_ENV_CREATE))
+ F_SET(&lt->reginfo, REGION_CREATE_OK);
+ size = __lock_region_size(dbenv);
+ if ((ret = __db_r_attach(dbenv, &lt->reginfo, size)) != 0)
+ goto err;
+
+ /* If we created the region, initialize it. */
+ if (F_ISSET(&lt->reginfo, REGION_CREATE))
+ if ((ret = __lock_init(dbenv, lt)) != 0)
+ goto err;
+
+ /* Set the local addresses. */
+ region = lt->reginfo.primary =
+ R_ADDR(&lt->reginfo, lt->reginfo.rp->primary);
+
+ /* Check for incompatible automatic deadlock detection requests. */
+ if (dbenv->lk_detect != DB_LOCK_NORUN) {
+ if (region->detect != DB_LOCK_NORUN &&
+ dbenv->lk_detect != DB_LOCK_DEFAULT &&
+ region->detect != dbenv->lk_detect) {
+ __db_err(dbenv,
+ "lock_open: incompatible deadlock detector mode");
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Upgrade if our caller wants automatic detection, and it
+ * was not currently being done, whether or not we created
+ * the region.
+ */
+ if (region->detect == DB_LOCK_NORUN)
+ region->detect = dbenv->lk_detect;
+ }
+
+ /*
+ * A process joining the region may have reset the lock and transaction
+ * timeouts.
+ */
+ if (dbenv->lk_timeout != 0)
+ region->lk_timeout = dbenv->lk_timeout;
+ if (dbenv->tx_timeout != 0)
+ region->tx_timeout = dbenv->tx_timeout;
+
+ /* Set remaining pointers into region. */
+ lt->conflicts = (u_int8_t *)R_ADDR(&lt->reginfo, region->conf_off);
+ lt->obj_tab = (DB_HASHTAB *)R_ADDR(&lt->reginfo, region->obj_off);
+ lt->locker_tab = (DB_HASHTAB *)R_ADDR(&lt->reginfo, region->locker_off);
+
+ R_UNLOCK(dbenv, &lt->reginfo);
+
+ dbenv->lk_handle = lt;
+ return (0);
+
+err: if (lt->reginfo.addr != NULL) {
+ if (F_ISSET(&lt->reginfo, REGION_CREATE))
+ ret = __db_panic(dbenv, ret);
+ R_UNLOCK(dbenv, &lt->reginfo);
+ (void)__db_r_detach(dbenv, &lt->reginfo, 0);
+ }
+ __os_free(dbenv, lt);
+ return (ret);
+}
+
+/*
+ * __lock_init --
+ * Initialize the lock region.
+ */
+static int
+__lock_init(dbenv, lt)
+ DB_ENV *dbenv;
+ DB_LOCKTAB *lt;
+{
+ const u_int8_t *lk_conflicts;
+ struct __db_lock *lp;
+ DB_LOCKER *lidp;
+ DB_LOCKOBJ *op;
+ DB_LOCKREGION *region;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ size_t maint_size;
+#endif
+ u_int32_t i, lk_modes;
+ u_int8_t *addr;
+ int ret;
+
+ if ((ret = __db_shalloc(lt->reginfo.addr,
+ sizeof(DB_LOCKREGION), 0, &lt->reginfo.primary)) != 0)
+ goto mem_err;
+ lt->reginfo.rp->primary = R_OFFSET(&lt->reginfo, lt->reginfo.primary);
+ region = lt->reginfo.primary;
+ memset(region, 0, sizeof(*region));
+
+ /* Select a conflict matrix if none specified. */
+ if (dbenv->lk_modes == 0)
+ if (CDB_LOCKING(dbenv)) {
+ lk_modes = DB_LOCK_CDB_N;
+ lk_conflicts = db_cdb_conflicts;
+ } else {
+ lk_modes = DB_LOCK_RIW_N;
+ lk_conflicts = db_riw_conflicts;
+ }
+ else {
+ lk_modes = dbenv->lk_modes;
+ lk_conflicts = dbenv->lk_conflicts;
+ }
+
+ region->need_dd = 0;
+ region->detect = DB_LOCK_NORUN;
+ region->lk_timeout = dbenv->lk_timeout;
+ region->tx_timeout = dbenv->tx_timeout;
+ region->locker_t_size = __db_tablesize(dbenv->lk_max_lockers);
+ region->object_t_size = __db_tablesize(dbenv->lk_max_objects);
+ memset(&region->stat, 0, sizeof(region->stat));
+ region->stat.st_id = 0;
+ region->stat.st_cur_maxid = DB_LOCK_MAXID;
+ region->stat.st_maxlocks = dbenv->lk_max;
+ region->stat.st_maxlockers = dbenv->lk_max_lockers;
+ region->stat.st_maxobjects = dbenv->lk_max_objects;
+ region->stat.st_nmodes = lk_modes;
+
+ /* Allocate room for the conflict matrix and initialize it. */
+ if ((ret =
+ __db_shalloc(lt->reginfo.addr, lk_modes * lk_modes, 0, &addr)) != 0)
+ goto mem_err;
+ memcpy(addr, lk_conflicts, lk_modes * lk_modes);
+ region->conf_off = R_OFFSET(&lt->reginfo, addr);
+
+ /* Allocate room for the object hash table and initialize it. */
+ if ((ret = __db_shalloc(lt->reginfo.addr,
+ region->object_t_size * sizeof(DB_HASHTAB), 0, &addr)) != 0)
+ goto mem_err;
+ __db_hashinit(addr, region->object_t_size);
+ region->obj_off = R_OFFSET(&lt->reginfo, addr);
+
+ /* Allocate room for the locker hash table and initialize it. */
+ if ((ret = __db_shalloc(lt->reginfo.addr,
+ region->locker_t_size * sizeof(DB_HASHTAB), 0, &addr)) != 0)
+ goto mem_err;
+ __db_hashinit(addr, region->locker_t_size);
+ region->locker_off = R_OFFSET(&lt->reginfo, addr);
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ maint_size = __lock_region_maint(dbenv);
+ /* Allocate room for the locker maintenance info and initialize it. */
+ if ((ret = __db_shalloc(lt->reginfo.addr,
+ sizeof(REGMAINT) + maint_size, 0, &addr)) != 0)
+ goto mem_err;
+ __db_maintinit(&lt->reginfo, addr, maint_size);
+ region->maint_off = R_OFFSET(&lt->reginfo, addr);
+#endif
+
+ /*
+ * Initialize locks onto a free list. Initialize and lock the mutex
+ * so that when we need to block, all we need do is try to acquire
+ * the mutex.
+ */
+ SH_TAILQ_INIT(&region->free_locks);
+ for (i = 0; i < region->stat.st_maxlocks; ++i) {
+ if ((ret = __db_shalloc(lt->reginfo.addr,
+ sizeof(struct __db_lock), MUTEX_ALIGN, &lp)) != 0)
+ goto mem_err;
+ lp->status = DB_LSTAT_FREE;
+ lp->gen = 0;
+ if ((ret = __db_mutex_setup(dbenv, &lt->reginfo, &lp->mutex,
+ MUTEX_NO_RLOCK | MUTEX_SELF_BLOCK)) != 0)
+ return (ret);
+ MUTEX_LOCK(dbenv, &lp->mutex);
+ SH_TAILQ_INSERT_HEAD(&region->free_locks, lp, links, __db_lock);
+ }
+
+ /* Initialize objects onto a free list. */
+ SH_TAILQ_INIT(&region->dd_objs);
+ SH_TAILQ_INIT(&region->free_objs);
+ for (i = 0; i < region->stat.st_maxobjects; ++i) {
+ if ((ret = __db_shalloc(lt->reginfo.addr,
+ sizeof(DB_LOCKOBJ), 0, &op)) != 0)
+ goto mem_err;
+ SH_TAILQ_INSERT_HEAD(
+ &region->free_objs, op, links, __db_lockobj);
+ }
+
+ /* Initialize lockers onto a free list. */
+ SH_TAILQ_INIT(&region->lockers);
+ SH_TAILQ_INIT(&region->free_lockers);
+ for (i = 0; i < region->stat.st_maxlockers; ++i) {
+ if ((ret = __db_shalloc(lt->reginfo.addr,
+ sizeof(DB_LOCKER), 0, &lidp)) != 0) {
+mem_err: __db_err(dbenv,
+ "Unable to allocate memory for the lock table");
+ return (ret);
+ }
+ SH_TAILQ_INSERT_HEAD(
+ &region->free_lockers, lidp, links, __db_locker);
+ }
+
+ return (0);
+}
+
+/*
+ * __lock_dbenv_refresh --
+ * Clean up after the lock system on a close or failed open. Called
+ * only from __dbenv_refresh. (Formerly called __lock_close.)
+ *
+ * PUBLIC: int __lock_dbenv_refresh __P((DB_ENV *));
+ */
+int
+__lock_dbenv_refresh(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOCKTAB *lt;
+ int ret;
+
+ lt = dbenv->lk_handle;
+
+ /* Detach from the region. */
+ ret = __db_r_detach(dbenv, &lt->reginfo, 0);
+
+ __os_free(dbenv, lt);
+
+ dbenv->lk_handle = NULL;
+ return (ret);
+}
+
+/*
+ * __lock_region_size --
+ * Return the region size.
+ */
+static size_t
+__lock_region_size(dbenv)
+ DB_ENV *dbenv;
+{
+ size_t retval;
+
+ /*
+ * Figure out how much space we're going to need. This list should
+ * map one-to-one with the __db_shalloc calls in __lock_init.
+ */
+ retval = 0;
+ retval += __db_shalloc_size(sizeof(DB_LOCKREGION), sizeof(db_align_t));
+ retval += __db_shalloc_size(dbenv->lk_modes * dbenv->lk_modes, sizeof(db_align_t));
+ retval += __db_shalloc_size(
+ __db_tablesize(dbenv->lk_max_lockers) * (sizeof(DB_HASHTAB)), sizeof(db_align_t));
+ retval += __db_shalloc_size(
+ __db_tablesize(dbenv->lk_max_objects) * (sizeof(DB_HASHTAB)), sizeof(db_align_t));
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ retval +=
+ __db_shalloc_size(sizeof(REGMAINT) + __lock_region_maint(dbenv), sizeof(db_align_t));
+#endif
+ retval += __db_shalloc_size(
+ sizeof(struct __db_lock), MUTEX_ALIGN) * dbenv->lk_max;
+ retval += __db_shalloc_size(
+ sizeof(DB_LOCKOBJ), sizeof(db_align_t)) * dbenv->lk_max_objects;
+ retval += __db_shalloc_size(
+ sizeof(DB_LOCKER), sizeof(db_align_t)) * dbenv->lk_max_lockers;
+
+ /*
+ * Include 16 bytes of string space per lock. DB doesn't use it
+ * because we pre-allocate lock space for DBTs in the structure.
+ */
+ retval += __db_shalloc_size(dbenv->lk_max * 16, sizeof(size_t));
+
+ /* And we keep getting this wrong, let's be generous. */
+ retval += retval / 4;
+
+ return (retval);
+}
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+/*
+ * __lock_region_maint --
+ * Return the amount of space needed for region maintenance info.
+ */
+static size_t
+__lock_region_maint(dbenv)
+ DB_ENV *dbenv;
+{
+ size_t s;
+
+ s = sizeof(DB_MUTEX *) * dbenv->lk_max;
+ return (s);
+}
+#endif
+
+/*
+ * __lock_region_destroy
+ * Destroy any region maintenance info.
+ *
+ * PUBLIC: void __lock_region_destroy __P((DB_ENV *, REGINFO *));
+ */
+void
+__lock_region_destroy(dbenv, infop)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+{
+ __db_shlocks_destroy(infop, (REGMAINT *)R_ADDR(infop,
+ ((DB_LOCKREGION *)R_ADDR(infop, infop->rp->primary))->maint_off));
+
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(infop, NULL);
+}
+
+#ifdef CONFIG_TEST
+/*
+ * __lock_id_set --
+ * Set the current locker ID and current maximum unused ID (for
+ * testing purposes only).
+ *
+ * PUBLIC: int __lock_id_set __P((DB_ENV *, u_int32_t, u_int32_t));
+ */
+int
+__lock_id_set(dbenv, cur_id, max_id)
+ DB_ENV *dbenv;
+ u_int32_t cur_id, max_id;
+{
+ DB_LOCKTAB *lt;
+ DB_LOCKREGION *region;
+
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "lock_id_set", DB_INIT_LOCK);
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+ region->stat.st_id = cur_id;
+ region->stat.st_cur_maxid = max_id;
+
+ return (0);
+}
+#endif
diff --git a/libdb/lock/lock_stat.c b/libdb/lock/lock_stat.c
new file mode 100644
index 0000000..72c2490
--- /dev/null
+++ b/libdb/lock/lock_stat.c
@@ -0,0 +1,398 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <string.h>
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <ctype.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_page.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/db_am.h"
+
+static void __lock_dump_locker __P((DB_LOCKTAB *, DB_LOCKER *, FILE *));
+static void __lock_dump_object __P((DB_LOCKTAB *, DB_LOCKOBJ *, FILE *));
+static void __lock_printheader __P((void));
+
+/*
+ * __lock_stat --
+ * Return LOCK statistics.
+ *
+ * PUBLIC: int __lock_stat __P((DB_ENV *, DB_LOCK_STAT **, u_int32_t));
+ */
+int
+__lock_stat(dbenv, statp, flags)
+ DB_ENV *dbenv;
+ DB_LOCK_STAT **statp;
+ u_int32_t flags;
+{
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ DB_LOCK_STAT *stats, tmp;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "DB_ENV->lock_stat", DB_INIT_LOCK);
+
+ *statp = NULL;
+ if ((ret = __db_fchk(dbenv,
+ "DB_ENV->lock_stat", flags, DB_STAT_CLEAR)) != 0)
+ return (ret);
+
+ lt = dbenv->lk_handle;
+
+ if ((ret = __os_umalloc(dbenv, sizeof(*stats), &stats)) != 0)
+ return (ret);
+
+ /* Copy out the global statistics. */
+ R_LOCK(dbenv, &lt->reginfo);
+
+ region = lt->reginfo.primary;
+ memcpy(stats, &region->stat, sizeof(*stats));
+ stats->st_locktimeout = region->lk_timeout;
+ stats->st_txntimeout = region->tx_timeout;
+
+ stats->st_region_wait = lt->reginfo.rp->mutex.mutex_set_wait;
+ stats->st_region_nowait = lt->reginfo.rp->mutex.mutex_set_nowait;
+ stats->st_regsize = lt->reginfo.rp->size;
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ tmp = region->stat;
+ memset(&region->stat, 0, sizeof(region->stat));
+ lt->reginfo.rp->mutex.mutex_set_wait = 0;
+ lt->reginfo.rp->mutex.mutex_set_nowait = 0;
+
+ region->stat.st_id = tmp.st_id;
+ region->stat.st_cur_maxid = tmp.st_cur_maxid;
+ region->stat.st_maxlocks = tmp.st_maxlocks;
+ region->stat.st_maxlockers = tmp.st_maxlockers;
+ region->stat.st_maxobjects = tmp.st_maxobjects;
+ region->stat.st_nlocks =
+ region->stat.st_maxnlocks = tmp.st_nlocks;
+ region->stat.st_nlockers =
+ region->stat.st_maxnlockers = tmp.st_nlockers;
+ region->stat.st_nobjects =
+ region->stat.st_maxnobjects = tmp.st_nobjects;
+ region->stat.st_nmodes = tmp.st_nmodes;
+ }
+
+ R_UNLOCK(dbenv, &lt->reginfo);
+
+ *statp = stats;
+ return (0);
+}
+
+#define LOCK_DUMP_CONF 0x001 /* Conflict matrix. */
+#define LOCK_DUMP_LOCKERS 0x002 /* Display lockers. */
+#define LOCK_DUMP_MEM 0x004 /* Display region memory. */
+#define LOCK_DUMP_OBJECTS 0x008 /* Display objects. */
+#define LOCK_DUMP_PARAMS 0x010 /* Display params. */
+#define LOCK_DUMP_ALL /* All */ \
+ (LOCK_DUMP_CONF | LOCK_DUMP_LOCKERS | LOCK_DUMP_MEM | \
+ LOCK_DUMP_OBJECTS | LOCK_DUMP_PARAMS)
+
+/*
+ * __lock_dump_region --
+ *
+ * PUBLIC: int __lock_dump_region __P((DB_ENV *, char *, FILE *));
+ */
+int
+__lock_dump_region(dbenv, area, fp)
+ DB_ENV *dbenv;
+ char *area;
+ FILE *fp;
+{
+ DB_LOCKER *lip;
+ DB_LOCKOBJ *op;
+ DB_LOCKREGION *lrp;
+ DB_LOCKTAB *lt;
+ u_int32_t flags, i, j;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "lock_dump_region", DB_INIT_LOCK);
+
+ /* Make it easy to call from the debugger. */
+ if (fp == NULL)
+ fp = stderr;
+
+ for (flags = 0; *area != '\0'; ++area)
+ switch (*area) {
+ case 'A':
+ LF_SET(LOCK_DUMP_ALL);
+ break;
+ case 'c':
+ LF_SET(LOCK_DUMP_CONF);
+ break;
+ case 'l':
+ LF_SET(LOCK_DUMP_LOCKERS);
+ break;
+ case 'm':
+ LF_SET(LOCK_DUMP_MEM);
+ break;
+ case 'o':
+ LF_SET(LOCK_DUMP_OBJECTS);
+ break;
+ case 'p':
+ LF_SET(LOCK_DUMP_PARAMS);
+ break;
+ }
+
+ lt = dbenv->lk_handle;
+ lrp = lt->reginfo.primary;
+ LOCKREGION(dbenv, lt);
+
+ if (LF_ISSET(LOCK_DUMP_PARAMS)) {
+ fprintf(fp, "%s\nLock region parameters\n", DB_LINE);
+ fprintf(fp,
+ "%s: %lu, %s: %lu, %s: %lu,\n%s: %lu, %s: %lu, %s: %lu, %s: %lu\n",
+ "locker table size", (u_long)lrp->locker_t_size,
+ "object table size", (u_long)lrp->object_t_size,
+ "obj_off", (u_long)lrp->obj_off,
+ "osynch_off", (u_long)lrp->osynch_off,
+ "locker_off", (u_long)lrp->locker_off,
+ "lsynch_off", (u_long)lrp->lsynch_off,
+ "need_dd", (u_long)lrp->need_dd);
+ }
+
+ if (LF_ISSET(LOCK_DUMP_CONF)) {
+ fprintf(fp, "\n%s\nConflict matrix\n", DB_LINE);
+ for (i = 0; i < lrp->stat.st_nmodes; i++) {
+ for (j = 0; j < lrp->stat.st_nmodes; j++)
+ fprintf(fp, "%lu\t", (u_long)
+ lt->conflicts[i * lrp->stat.st_nmodes + j]);
+ fprintf(fp, "\n");
+ }
+ }
+
+ if (LF_ISSET(LOCK_DUMP_LOCKERS)) {
+ fprintf(fp, "%s\nLocks grouped by lockers\n", DB_LINE);
+ __lock_printheader();
+ for (i = 0; i < lrp->locker_t_size; i++)
+ for (lip =
+ SH_TAILQ_FIRST(&lt->locker_tab[i], __db_locker);
+ lip != NULL;
+ lip = SH_TAILQ_NEXT(lip, links, __db_locker)) {
+ __lock_dump_locker(lt, lip, fp);
+ }
+ }
+
+ if (LF_ISSET(LOCK_DUMP_OBJECTS)) {
+ fprintf(fp, "%s\nLocks grouped by object\n", DB_LINE);
+ __lock_printheader();
+ for (i = 0; i < lrp->object_t_size; i++) {
+ for (op = SH_TAILQ_FIRST(&lt->obj_tab[i], __db_lockobj);
+ op != NULL;
+ op = SH_TAILQ_NEXT(op, links, __db_lockobj))
+ __lock_dump_object(lt, op, fp);
+ }
+ }
+
+ if (LF_ISSET(LOCK_DUMP_MEM))
+ __db_shalloc_dump(lt->reginfo.addr, fp);
+
+ UNLOCKREGION(dbenv, lt);
+
+ return (0);
+}
+
+static void
+__lock_dump_locker(lt, lip, fp)
+ DB_LOCKTAB *lt;
+ DB_LOCKER *lip;
+ FILE *fp;
+{
+ struct __db_lock *lp;
+ time_t s;
+ char buf[64];
+
+ fprintf(fp, "%8lx dd=%2ld locks held %-4d write locks %-4d",
+ (u_long)lip->id, (long)lip->dd_id, lip->nlocks, lip->nwrites);
+ fprintf(fp, " %s ", F_ISSET(lip, DB_LOCKER_DELETED) ? "(D)" : " ");
+ if (LOCK_TIME_ISVALID(&lip->tx_expire)) {
+ s = lip->tx_expire.tv_sec;
+ strftime(buf, sizeof(buf), "%m-%d-%H:%M:%S", localtime(&s));
+ fprintf(fp,
+ " expires %s.%lu", buf, (u_long)lip->tx_expire.tv_usec);
+ }
+ if (F_ISSET(lip, DB_LOCKER_TIMEOUT))
+ fprintf(fp, " lk timeout %u", lip->lk_timeout);
+ if (LOCK_TIME_ISVALID(&lip->lk_expire)) {
+ s = lip->lk_expire.tv_sec;
+ strftime(buf, sizeof(buf), "%m-%d-%H:%M:%S", localtime(&s));
+ fprintf(fp,
+ " lk expires %s.%lu", buf, (u_long)lip->lk_expire.tv_usec);
+ }
+ fprintf(fp, "\n");
+
+ lp = SH_LIST_FIRST(&lip->heldby, __db_lock);
+ if (lp != NULL) {
+ for (; lp != NULL;
+ lp = SH_LIST_NEXT(lp, locker_links, __db_lock))
+ __lock_printlock(lt, lp, 1);
+ fprintf(fp, "\n");
+ }
+}
+
+static void
+__lock_dump_object(lt, op, fp)
+ DB_LOCKTAB *lt;
+ DB_LOCKOBJ *op;
+ FILE *fp;
+{
+ struct __db_lock *lp;
+
+ for (lp =
+ SH_TAILQ_FIRST(&op->holders, __db_lock);
+ lp != NULL;
+ lp = SH_TAILQ_NEXT(lp, links, __db_lock))
+ __lock_printlock(lt, lp, 1);
+ for (lp =
+ SH_TAILQ_FIRST(&op->waiters, __db_lock);
+ lp != NULL;
+ lp = SH_TAILQ_NEXT(lp, links, __db_lock))
+ __lock_printlock(lt, lp, 1);
+
+ fprintf(fp, "\n");
+}
+
+/*
+ * __lock_printheader --
+ */
+static void
+__lock_printheader()
+{
+ printf("%-8s %-6s %-6s %-10s %s\n",
+ "Locker", "Mode",
+ "Count", "Status", "----------- Object ----------");
+}
+
+/*
+ * __lock_printlock --
+ *
+ * PUBLIC: void __lock_printlock __P((DB_LOCKTAB *, struct __db_lock *, int));
+ */
+void
+__lock_printlock(lt, lp, ispgno)
+ DB_LOCKTAB *lt;
+ struct __db_lock *lp;
+ int ispgno;
+{
+ DB_LOCKOBJ *lockobj;
+ db_pgno_t pgno;
+ u_int32_t *fidp, type;
+ u_int8_t *ptr;
+ char *namep;
+ const char *mode, *status;
+
+ switch (lp->mode) {
+ case DB_LOCK_DIRTY:
+ mode = "DIRTY_READ";
+ break;
+ case DB_LOCK_IREAD:
+ mode = "IREAD";
+ break;
+ case DB_LOCK_IWR:
+ mode = "IWR";
+ break;
+ case DB_LOCK_IWRITE:
+ mode = "IWRITE";
+ break;
+ case DB_LOCK_NG:
+ mode = "NG";
+ break;
+ case DB_LOCK_READ:
+ mode = "READ";
+ break;
+ case DB_LOCK_WRITE:
+ mode = "WRITE";
+ break;
+ case DB_LOCK_WWRITE:
+ mode = "WAS_WRITE";
+ break;
+ case DB_LOCK_WAIT:
+ mode = "WAIT";
+ break;
+ default:
+ mode = "UNKNOWN";
+ break;
+ }
+ switch (lp->status) {
+ case DB_LSTAT_ABORTED:
+ status = "ABORT";
+ break;
+ case DB_LSTAT_ERR:
+ status = "ERROR";
+ break;
+ case DB_LSTAT_FREE:
+ status = "FREE";
+ break;
+ case DB_LSTAT_HELD:
+ status = "HELD";
+ break;
+ case DB_LSTAT_WAITING:
+ status = "WAIT";
+ break;
+ case DB_LSTAT_PENDING:
+ status = "PENDING";
+ break;
+ case DB_LSTAT_EXPIRED:
+ status = "EXPIRED";
+ break;
+ default:
+ status = "UNKNOWN";
+ break;
+ }
+ printf("%8lx %-6s %6lu %-10s ",
+ (u_long)lp->holder, mode, (u_long)lp->refcount, status);
+
+ lockobj = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj);
+ ptr = SH_DBT_PTR(&lockobj->lockobj);
+ if (ispgno && lockobj->lockobj.size == sizeof(struct __db_ilock)) {
+ /* Assume this is a DBT lock. */
+ memcpy(&pgno, ptr, sizeof(db_pgno_t));
+ fidp = (u_int32_t *)(ptr + sizeof(db_pgno_t));
+ type = *(u_int32_t *)(ptr + sizeof(db_pgno_t) + DB_FILE_ID_LEN);
+ if (__dbreg_get_name(lt->dbenv, (u_int8_t *)fidp, &namep) != 0)
+ namep = NULL;
+ if (namep == NULL)
+ printf("(%lx %lx %lx %lx %lx)",
+ (u_long)fidp[0], (u_long)fidp[1], (u_long)fidp[2],
+ (u_long)fidp[3], (u_long)fidp[4]);
+ else
+ printf("%-20s", namep);
+ printf("%-7s %lu\n",
+ type == DB_PAGE_LOCK ? "page" :
+ type == DB_RECORD_LOCK ? "record" : "handle",
+ (u_long)pgno);
+ } else {
+ printf("0x%lx ", (u_long)R_OFFSET(&lt->reginfo, lockobj));
+ __db_pr(ptr, lockobj->lockobj.size, stdout);
+ printf("\n");
+ }
+}
diff --git a/libdb/lock/lock_util.c b/libdb/lock/lock_util.c
new file mode 100644
index 0000000..1fa2b93
--- /dev/null
+++ b/libdb/lock/lock_util.c
@@ -0,0 +1,138 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+
+/*
+ * __lock_cmp --
+ * This function is used to compare a DBT that is about to be entered
+ * into a hash table with an object already in the hash table. Note
+ * that it just returns true on equal and 0 on not-equal. Therefore
+ * this function cannot be used as a sort function; its purpose is to
+ * be used as a hash comparison function.
+ *
+ * PUBLIC: int __lock_cmp __P((const DBT *, DB_LOCKOBJ *));
+ */
+int
+__lock_cmp(dbt, lock_obj)
+ const DBT *dbt;
+ DB_LOCKOBJ *lock_obj;
+{
+ void *obj_data;
+
+ obj_data = SH_DBT_PTR(&lock_obj->lockobj);
+ return (dbt->size == lock_obj->lockobj.size &&
+ memcmp(dbt->data, obj_data, dbt->size) == 0);
+}
+
+/*
+ * PUBLIC: int __lock_locker_cmp __P((u_int32_t, DB_LOCKER *));
+ */
+int
+__lock_locker_cmp(locker, sh_locker)
+ u_int32_t locker;
+ DB_LOCKER *sh_locker;
+{
+ return (locker == sh_locker->id);
+}
+
+/*
+ * The next two functions are the hash functions used to store objects in the
+ * lock hash tables. They are hashing the same items, but one (__lock_ohash)
+ * takes a DBT (used for hashing a parameter passed from the user) and the
+ * other (__lock_lhash) takes a DB_LOCKOBJ (used for hashing something that is
+ * already in the lock manager). In both cases, we have a special check to
+ * fast path the case where we think we are doing a hash on a DB page/fileid
+ * pair. If the size is right, then we do the fast hash.
+ *
+ * We know that DB uses DB_LOCK_ILOCK types for its lock objects. The first
+ * four bytes are the 4-byte page number and the next DB_FILE_ID_LEN bytes
+ * are a unique file id, where the first 4 bytes on UNIX systems are the file
+ * inode number, and the first 4 bytes on Windows systems are the FileIndexLow
+ * bytes. So, we use the XOR of the page number and the first four bytes of
+ * the file id to produce a 32-bit hash value.
+ *
+ * We have no particular reason to believe that this algorithm will produce
+ * a good hash, but we want a fast hash more than we want a good one, when
+ * we're coming through this code path.
+ */
+#define FAST_HASH(P) { \
+ u_int32_t __h; \
+ u_int8_t *__cp, *__hp; \
+ __hp = (u_int8_t *)&__h; \
+ __cp = (u_int8_t *)(P); \
+ __hp[0] = __cp[0] ^ __cp[4]; \
+ __hp[1] = __cp[1] ^ __cp[5]; \
+ __hp[2] = __cp[2] ^ __cp[6]; \
+ __hp[3] = __cp[3] ^ __cp[7]; \
+ return (__h); \
+}
+
+/*
+ * __lock_ohash --
+ *
+ * PUBLIC: u_int32_t __lock_ohash __P((const DBT *));
+ */
+u_int32_t
+__lock_ohash(dbt)
+ const DBT *dbt;
+{
+ if (dbt->size == sizeof(DB_LOCK_ILOCK))
+ FAST_HASH(dbt->data);
+
+ return (__ham_func5(NULL, dbt->data, dbt->size));
+}
+
+/*
+ * __lock_lhash --
+ *
+ * PUBLIC: u_int32_t __lock_lhash __P((DB_LOCKOBJ *));
+ */
+u_int32_t
+__lock_lhash(lock_obj)
+ DB_LOCKOBJ *lock_obj;
+{
+ void *obj_data;
+
+ obj_data = SH_DBT_PTR(&lock_obj->lockobj);
+
+ if (lock_obj->lockobj.size == sizeof(DB_LOCK_ILOCK))
+ FAST_HASH(obj_data);
+
+ return (__ham_func5(NULL, obj_data, lock_obj->lockobj.size));
+}
+
+/*
+ * __lock_locker_hash --
+ * Hash function for entering lockers into the locker hash table.
+ * Since these are simply 32-bit unsigned integers, just return
+ * the locker value.
+ *
+ * PUBLIC: u_int32_t __lock_locker_hash __P((u_int32_t));
+ */
+u_int32_t
+__lock_locker_hash(locker)
+ u_int32_t locker;
+{
+ return (locker);
+}
diff --git a/libdb/log/log.c b/libdb/log/log.c
new file mode 100644
index 0000000..033f77e
--- /dev/null
+++ b/libdb/log/log.c
@@ -0,0 +1,1084 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/hmac.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+static int __log_init __P((DB_ENV *, DB_LOG *));
+static int __log_recover __P((DB_LOG *));
+static size_t __log_region_size __P((DB_ENV *));
+static int __log_zero __P((DB_ENV *, DB_LSN *, DB_LSN *));
+
+/*
+ * __log_open --
+ * Internal version of log_open: only called from DB_ENV->open.
+ *
+ * PUBLIC: int __log_open __P((DB_ENV *));
+ */
+int
+__log_open(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+ int ret;
+
+ /* Create/initialize the DB_LOG structure. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_LOG), &dblp)) != 0)
+ return (ret);
+ dblp->dbenv = dbenv;
+
+ /* Join/create the log region. */
+ dblp->reginfo.type = REGION_TYPE_LOG;
+ dblp->reginfo.id = INVALID_REGION_ID;
+ dblp->reginfo.mode = dbenv->db_mode;
+ dblp->reginfo.flags = REGION_JOIN_OK;
+ if (F_ISSET(dbenv, DB_ENV_CREATE))
+ F_SET(&dblp->reginfo, REGION_CREATE_OK);
+ if ((ret = __db_r_attach(
+ dbenv, &dblp->reginfo, __log_region_size(dbenv))) != 0)
+ goto err;
+
+ /* If we created the region, initialize it. */
+ if (F_ISSET(&dblp->reginfo, REGION_CREATE))
+ if ((ret = __log_init(dbenv, dblp)) != 0)
+ goto err;
+
+ /* Set the local addresses. */
+ lp = dblp->reginfo.primary =
+ R_ADDR(&dblp->reginfo, dblp->reginfo.rp->primary);
+
+ /*
+ * If the region is threaded, then we have to lock both the handles
+ * and the region, and we need to allocate a mutex for that purpose.
+ */
+ if (F_ISSET(dbenv, DB_ENV_THREAD) &&
+ (ret = __db_mutex_setup(dbenv, &dblp->reginfo, &dblp->mutexp,
+ MUTEX_ALLOC | MUTEX_NO_RLOCK)) != 0)
+ goto err;
+
+ /* Initialize the rest of the structure. */
+ dblp->bufp = R_ADDR(&dblp->reginfo, lp->buffer_off);
+
+ /*
+ * Set the handle -- we may be about to run recovery, which allocates
+ * log cursors. Log cursors require logging be already configured,
+ * and the handle being set is what demonstrates that.
+ *
+ * If we created the region, run recovery. If that fails, make sure
+ * we reset the log handle before cleaning up, otherwise we will try
+ * and clean up again in the mainline DB_ENV initialization code.
+ */
+ dbenv->lg_handle = dblp;
+
+ if (F_ISSET(&dblp->reginfo, REGION_CREATE)) {
+ if ((ret = __log_recover(dblp)) != 0) {
+ dbenv->lg_handle = NULL;
+ goto err;
+ }
+
+ /*
+ * We first take the log file size from the environment, if
+ * specified. If that wasn't set, recovery may have set it
+ * from the persistent information in a log file header. If
+ * that didn't set it either, we default.
+ */
+ if (lp->log_size == 0)
+ lp->log_size = lp->log_nsize = LG_MAX_DEFAULT;
+ } else {
+ /*
+ * A process joining the region may have reset the log file
+ * size, too. If so, it only affects the next log file we
+ * create.
+ */
+ if (dbenv->lg_size != 0)
+ lp->log_nsize = dbenv->lg_size;
+ }
+
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (0);
+
+err: if (dblp->reginfo.addr != NULL) {
+ if (F_ISSET(&dblp->reginfo, REGION_CREATE))
+ ret = __db_panic(dbenv, ret);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ (void)__db_r_detach(dbenv, &dblp->reginfo, 0);
+ }
+
+ if (dblp->mutexp != NULL)
+ __db_mutex_free(dbenv, &dblp->reginfo, dblp->mutexp);
+
+ __os_free(dbenv, dblp);
+
+ return (ret);
+}
+
+/*
+ * __log_init --
+ * Initialize a log region in shared memory.
+ */
+static int
+__log_init(dbenv, dblp)
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+{
+ DB_MUTEX *flush_mutexp;
+ LOG *region;
+ int ret;
+ void *p;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ u_int8_t *addr;
+#endif
+
+ if ((ret = __db_shalloc(dblp->reginfo.addr,
+ sizeof(*region), 0, &dblp->reginfo.primary)) != 0)
+ goto mem_err;
+ dblp->reginfo.rp->primary =
+ R_OFFSET(&dblp->reginfo, dblp->reginfo.primary);
+ region = dblp->reginfo.primary;
+ memset(region, 0, sizeof(*region));
+
+ region->fid_max = 0;
+ SH_TAILQ_INIT(&region->fq);
+ region->free_fid_stack = INVALID_ROFF;
+ region->free_fids = region->free_fids_alloced = 0;
+
+ /* Initialize LOG LSNs. */
+ INIT_LSN(region->lsn);
+ INIT_LSN(region->ready_lsn);
+ INIT_LSN(region->t_lsn);
+
+ /*
+ * It's possible to be waiting for an LSN of [1][0], if a replication
+ * client gets the first log record out of order. An LSN of [0][0]
+ * signifies that we're not waiting.
+ */
+ ZERO_LSN(region->waiting_lsn);
+
+ /*
+ * Log makes note of the fact that it ran into a checkpoint on
+ * startup if it did so, as a recovery optimization. A zero
+ * LSN signifies that it hasn't found one [yet].
+ */
+ ZERO_LSN(region->cached_ckp_lsn);
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ /* Allocate room for the log maintenance info and initialize it. */
+ if ((ret = __db_shalloc(dblp->reginfo.addr,
+ sizeof(REGMAINT) + LG_MAINT_SIZE, 0, &addr)) != 0)
+ goto mem_err;
+ __db_maintinit(&dblp->reginfo, addr, LG_MAINT_SIZE);
+ region->maint_off = R_OFFSET(&dblp->reginfo, addr);
+#endif
+
+ if ((ret = __db_mutex_setup(dbenv, &dblp->reginfo, &region->fq_mutex,
+ MUTEX_NO_RLOCK)) != 0)
+ return (ret);
+
+ /*
+ * We must create a place for the flush mutex separately; mutexes have
+ * to be aligned to MUTEX_ALIGN, and the only way to guarantee that is
+ * to make sure they're at the beginning of a shalloc'ed chunk.
+ */
+ if ((ret = __db_shalloc(dblp->reginfo.addr,
+ sizeof(DB_MUTEX), MUTEX_ALIGN, &flush_mutexp)) != 0)
+ goto mem_err;
+ if ((ret = __db_mutex_setup(dbenv, &dblp->reginfo, flush_mutexp,
+ MUTEX_NO_RLOCK)) != 0)
+ return (ret);
+ region->flush_mutex_off = R_OFFSET(&dblp->reginfo, flush_mutexp);
+
+ /* Initialize the buffer. */
+ if ((ret =
+ __db_shalloc(dblp->reginfo.addr, dbenv->lg_bsize, 0, &p)) != 0) {
+mem_err: __db_err(dbenv, "Unable to allocate memory for the log buffer");
+ return (ret);
+ }
+ region->buffer_size = dbenv->lg_bsize;
+ region->buffer_off = R_OFFSET(&dblp->reginfo, p);
+ region->log_size = region->log_nsize = dbenv->lg_size;
+
+ /* Initialize the commit Queue. */
+ SH_TAILQ_INIT(&region->free_commits);
+ SH_TAILQ_INIT(&region->commits);
+ region->ncommit = 0;
+
+ /*
+ * Fill in the log's persistent header. Don't fill in the log file
+ * sizes, as they may change at any time and so have to be filled in
+ * as each log file is created.
+ */
+ region->persist.magic = DB_LOGMAGIC;
+ region->persist.version = DB_LOGVERSION;
+ region->persist.mode = (u_int32_t)dbenv->db_mode;
+
+ return (0);
+}
+
+/*
+ * __log_recover --
+ * Recover a log.
+ */
+static int
+__log_recover(dblp)
+ DB_LOG *dblp;
+{
+ DBT dbt;
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
+ DB_LSN lsn;
+ LOG *lp;
+ u_int32_t cnt, rectype;
+ int ret;
+ logfile_validity status;
+
+ logc = NULL;
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+
+ /*
+ * Find a log file. If none exist, we simply return, leaving
+ * everything initialized to a new log.
+ */
+ if ((ret = __log_find(dblp, 0, &cnt, &status)) != 0)
+ return (ret);
+ if (cnt == 0)
+ return (0);
+
+ /*
+ * If the last file is an old version, readable or no, start a new
+ * file. Don't bother finding the end of the last log file;
+ * we assume that it's valid in its entirety, since the user
+ * should have shut down cleanly or run recovery before upgrading.
+ */
+ if (status == DB_LV_OLD_READABLE || status == DB_LV_OLD_UNREADABLE) {
+ lp->lsn.file = lp->s_lsn.file = cnt + 1;
+ lp->lsn.offset = lp->s_lsn.offset = 0;
+ goto skipsearch;
+ }
+ DB_ASSERT(status == DB_LV_NORMAL);
+
+ /*
+ * We have the last useful log file and we've loaded any persistent
+ * information. Set the end point of the log past the end of the last
+ * file. Read the last file, looking for the last checkpoint and
+ * the log's end.
+ */
+ lp->lsn.file = cnt + 1;
+ lp->lsn.offset = 0;
+ lsn.file = cnt;
+ lsn.offset = 0;
+
+ /*
+ * Allocate a cursor and set it to the first record. This shouldn't
+ * fail, leave error messages on.
+ */
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ F_SET(logc, DB_LOG_LOCKED);
+ memset(&dbt, 0, sizeof(dbt));
+ if ((ret = logc->get(logc, &lsn, &dbt, DB_SET)) != 0)
+ goto err;
+
+ /*
+ * Read to the end of the file. This may fail at some point, so
+ * turn off error messages.
+ */
+ F_SET(logc, DB_LOG_SILENT_ERR);
+ while (logc->get(logc, &lsn, &dbt, DB_NEXT) == 0) {
+ if (dbt.size < sizeof(u_int32_t))
+ continue;
+ memcpy(&rectype, dbt.data, sizeof(u_int32_t));
+ if (rectype == DB___txn_ckp)
+ /*
+ * If we happen to run into a checkpoint, cache its
+ * LSN so that the transaction system doesn't have
+ * to walk this log file again looking for it.
+ */
+ lp->cached_ckp_lsn = lsn;
+ }
+ F_CLR(logc, DB_LOG_SILENT_ERR);
+
+ /*
+ * We now know where the end of the log is. Set the first LSN that
+ * we want to return to an application and the LSN of the last known
+ * record on disk.
+ */
+ lp->lsn = lsn;
+ lp->s_lsn = lsn;
+ lp->lsn.offset += logc->c_len;
+ lp->s_lsn.offset += logc->c_len;
+
+ /* Set up the current buffer information, too. */
+ lp->len = logc->c_len;
+ lp->b_off = 0;
+ lp->w_off = lp->lsn.offset;
+
+skipsearch:
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_RECOVERY))
+ __db_err(dbenv,
+ "Finding last valid log LSN: file: %lu offset %lu",
+ (u_long)lp->lsn.file, (u_long)lp->lsn.offset);
+
+err: if (logc != NULL)
+ (void)logc->close(logc, 0);
+
+ return (ret);
+}
+
+/*
+ * __log_find --
+ * Try to find a log file. If find_first is set, valp will contain
+ * the number of the first readable log file, else it will contain the number
+ * of the last log file (which may be too old to read).
+ *
+ * PUBLIC: int __log_find __P((DB_LOG *, int, u_int32_t *, logfile_validity *));
+ */
+int
+__log_find(dblp, find_first, valp, statusp)
+ DB_LOG *dblp;
+ int find_first;
+ u_int32_t *valp;
+ logfile_validity *statusp;
+{
+ DB_ENV *dbenv;
+ logfile_validity logval_status, status;
+ u_int32_t clv, logval;
+ int cnt, fcnt, ret;
+ const char *dir;
+ char *c, **names, *p, *q, savech;
+
+ dbenv = dblp->dbenv;
+ logval_status = status = DB_LV_NONEXISTENT;
+
+ /* Return a value of 0 as the log file number on failure. */
+ *valp = 0;
+
+ /* Find the directory name. */
+ if ((ret = __log_name(dblp, 1, &p, NULL, 0)) != 0)
+ return (ret);
+ if ((q = __db_rpath(p)) == NULL) {
+ COMPQUIET(savech, 0);
+ dir = PATH_DOT;
+ } else {
+ savech = *q;
+ *q = '\0';
+ dir = p;
+ }
+
+ /* Get the list of file names. */
+ ret = __os_dirlist(dbenv, dir, &names, &fcnt);
+
+ /*
+ * !!!
+ * We overwrote a byte in the string with a nul. Restore the string
+ * so that the diagnostic checks in the memory allocation code work
+ * and any error messages display the right file name.
+ */
+ if (q != NULL)
+ *q = savech;
+
+ if (ret != 0) {
+ __db_err(dbenv, "%s: %s", dir, db_strerror(ret));
+ __os_free(dbenv, p);
+ return (ret);
+ }
+
+ /* Search for a valid log file name. */
+ for (cnt = fcnt, clv = logval = 0; --cnt >= 0;) {
+ if (strncmp(names[cnt], LFPREFIX, sizeof(LFPREFIX) - 1) != 0)
+ continue;
+
+ /*
+ * Names of the form log\.[0-9]* are reserved for DB. Other
+ * names sharing LFPREFIX, such as "log.db", are legal.
+ */
+ for (c = names[cnt] + sizeof(LFPREFIX) - 1; *c != '\0'; c++)
+ if (!isdigit((int)*c))
+ break;
+ if (*c != '\0')
+ continue;
+
+ /*
+ * Use atol, not atoi; if an "int" is 16-bits, the largest
+ * log file name won't fit.
+ */
+ clv = atol(names[cnt] + (sizeof(LFPREFIX) - 1));
+
+ /*
+ * If searching for the first log file, we want to return the
+ * oldest log file we can read, or, if no readable log files
+ * exist, the newest log file we can't read (the crossover
+ * point between the old and new versions of the log file).
+ *
+ * If we're searching for the last log file, we want to return
+ * the newest log file, period.
+ *
+ * Readable log files should never preceede unreadable log
+ * files, that would mean the admin seriously screwed up.
+ */
+ if (find_first) {
+ if (logval != 0 &&
+ status != DB_LV_OLD_UNREADABLE && clv > logval)
+ continue;
+ } else
+ if (logval != 0 && clv < logval)
+ continue;
+
+ if ((ret = __log_valid(dblp, clv, 1, &status)) != 0) {
+ __db_err(dbenv, "Invalid log file: %s: %s",
+ names[cnt], db_strerror(ret));
+ goto err;
+ }
+ switch (status) {
+ case DB_LV_NONEXISTENT:
+ /* __log_valid never returns DB_LV_NONEXISTENT. */
+ DB_ASSERT(0);
+ break;
+ case DB_LV_INCOMPLETE:
+ /*
+ * The last log file may not have been initialized --
+ * it's possible to create a log file but not write
+ * anything to it. If performing recovery (that is,
+ * if find_first isn't set), ignore the file, it's
+ * not interesting. If we're searching for the first
+ * log record, return the file (assuming we don't find
+ * something better), as the "real" first log record
+ * is likely to be in the log buffer, and we want to
+ * set the file LSN for our return.
+ */
+ if (find_first)
+ goto found;
+ break;
+ case DB_LV_OLD_UNREADABLE:
+ /*
+ * If we're searching for the first log file, then we
+ * only want this file if we don't yet have a file or
+ * already have an unreadable file and this one is
+ * newer than that one. If we're searching for the
+ * last log file, we always want this file because we
+ * wouldn't be here if it wasn't newer than our current
+ * choice.
+ */
+ if (!find_first || logval == 0 ||
+ (status == DB_LV_OLD_UNREADABLE && clv > logval))
+ goto found;
+ break;
+ case DB_LV_NORMAL:
+ case DB_LV_OLD_READABLE:
+found: logval = clv;
+ logval_status = status;
+ break;
+ }
+ }
+
+ *valp = logval;
+
+err: __os_dirfree(dbenv, names, fcnt);
+ __os_free(dbenv, p);
+ *statusp = logval_status;
+
+ return (ret);
+}
+
+/*
+ * log_valid --
+ * Validate a log file. Returns an error code in the event of
+ * a fatal flaw in a the specified log file; returns success with
+ * a code indicating the currentness and completeness of the specified
+ * log file if it is not unexpectedly flawed (that is, if it's perfectly
+ * normal, if it's zero-length, or if it's an old version).
+ *
+ * PUBLIC: int __log_valid __P((DB_LOG *, u_int32_t, int, logfile_validity *));
+ */
+int
+__log_valid(dblp, number, set_persist, statusp)
+ DB_LOG *dblp;
+ u_int32_t number;
+ int set_persist;
+ logfile_validity *statusp;
+{
+ DB_CIPHER *db_cipher;
+ DB_ENV *dbenv;
+ DB_FH fh;
+ HDR *hdr;
+ LOG *region;
+ LOGP *persist;
+ logfile_validity status;
+ size_t hdrsize, nw, recsize;
+ int is_hmac, need_free, ret;
+ u_int8_t *tmp;
+ char *fname;
+
+ dbenv = dblp->dbenv;
+ db_cipher = dbenv->crypto_handle;
+ persist = NULL;
+ status = DB_LV_NORMAL;
+
+ /* Try to open the log file. */
+ if ((ret = __log_name(dblp,
+ number, &fname, &fh, DB_OSO_RDONLY | DB_OSO_SEQ)) != 0) {
+ __os_free(dbenv, fname);
+ return (ret);
+ }
+
+ need_free = 0;
+ hdrsize = HDR_NORMAL_SZ;
+ is_hmac = 0;
+ recsize = sizeof(LOGP);
+ if (CRYPTO_ON(dbenv)) {
+ hdrsize = HDR_CRYPTO_SZ;
+ recsize = sizeof(LOGP);
+ recsize += db_cipher->adj_size(recsize);
+ is_hmac = 1;
+ }
+ if ((ret = __os_calloc(dbenv, 1, recsize + hdrsize, &tmp)) != 0)
+ return (ret);
+ need_free = 1;
+ hdr = (HDR *)tmp;
+ persist = (LOGP *)(tmp + hdrsize);
+ /* Try to read the header. */
+ if ((ret = __os_read(dbenv, &fh, tmp, recsize + hdrsize, &nw)) != 0 ||
+ nw != recsize + hdrsize) {
+ if (ret == 0)
+ status = DB_LV_INCOMPLETE;
+ else
+ /*
+ * The error was a fatal read error, not just an
+ * incompletely initialized log file.
+ */
+ __db_err(dbenv, "Ignoring log file: %s: %s",
+ fname, db_strerror(ret));
+
+ (void)__os_closehandle(dbenv, &fh);
+ goto err;
+ }
+ (void)__os_closehandle(dbenv, &fh);
+
+ /*
+ * Now we have to validate the persistent record. We have
+ * several scenarios we have to deal with:
+ *
+ * 1. User has crypto turned on:
+ * - They're reading an old, unencrypted log file
+ * . We will fail the record size match check below.
+ * - They're reading a current, unencrypted log file
+ * . We will fail the record size match check below.
+ * - They're reading an old, encrypted log file [NOT YET]
+ * . After decryption we'll fail the version check. [NOT YET]
+ * - They're reading a current, encrypted log file
+ * . We should proceed as usual.
+ * 2. User has crypto turned off:
+ * - They're reading an old, unencrypted log file
+ * . We will fail the version check.
+ * - They're reading a current, unencrypted log file
+ * . We should proceed as usual.
+ * - They're reading an old, encrypted log file [NOT YET]
+ * . We'll fail the magic number check (it is encrypted).
+ * - They're reading a current, encrypted log file
+ * . We'll fail the magic number check (it is encrypted).
+ */
+ if (CRYPTO_ON(dbenv)) {
+ /*
+ * If we are trying to decrypt an unencrypted log
+ * we can only detect that by having an unreasonable
+ * data length for our persistent data.
+ */
+ if ((hdr->len - hdrsize) != sizeof(LOGP)) {
+ __db_err(dbenv, "log record size mismatch");
+ goto err;
+ }
+ /* Check the checksum and decrypt. */
+ if ((ret = __db_check_chksum(dbenv, db_cipher, &hdr->chksum[0],
+ (u_int8_t *)persist, hdr->len - hdrsize, is_hmac)) != 0) {
+ __db_err(dbenv, "log record checksum mismatch");
+ goto err;
+ }
+ if ((ret = db_cipher->decrypt(dbenv, db_cipher->data,
+ &hdr->iv[0], (u_int8_t *)persist, hdr->len - hdrsize)) != 0)
+ goto err;
+ }
+
+ /* Validate the header. */
+ if (persist->magic != DB_LOGMAGIC) {
+ __db_err(dbenv,
+ "Ignoring log file: %s: magic number %lx, not %lx",
+ fname, (u_long)persist->magic, (u_long)DB_LOGMAGIC);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Set our status code to indicate whether the log file
+ * belongs to an unreadable or readable old version; leave it
+ * alone if and only if the log file version is the current one.
+ */
+ if (persist->version > DB_LOGVERSION) {
+ /* This is a fatal error--the log file is newer than DB. */
+ __db_err(dbenv,
+ "Ignoring log file: %s: unsupported log version %lu",
+ fname, (u_long)persist->version);
+ ret = EINVAL;
+ goto err;
+ } else if (persist->version < DB_LOGOLDVER) {
+ status = DB_LV_OLD_UNREADABLE;
+ /*
+ * We don't want to set persistent info based on an
+ * unreadable region, so jump to "err".
+ */
+ goto err;
+ } else if (persist->version < DB_LOGVERSION)
+ status = DB_LV_OLD_READABLE;
+
+ /*
+ * Only if we have a current log do we verify the checksum.
+ * We could not check the checksum before checking the magic
+ * and version because old log hdrs have the length and checksum
+ * in a different location.
+ */
+ if (!CRYPTO_ON(dbenv) && ((ret = __db_check_chksum(dbenv,
+ db_cipher, &hdr->chksum[0], (u_int8_t *)persist,
+ hdr->len - hdrsize, is_hmac)) != 0)) {
+ __db_err(dbenv, "log record checksum mismatch");
+ goto err;
+ }
+
+ /*
+ * If the log is readable so far and we're doing system initialization,
+ * set the region's persistent information based on the headers.
+ *
+ * Always set the current log file size. Only set the next log file's
+ * size if the application hasn't set it already.
+ *
+ * XXX
+ * Always use the persistent header's mode, regardless of what was set
+ * in the current environment. We've always done it this way, but it's
+ * probably a bug -- I can't think of a way not-changing the mode would
+ * be a problem, though.
+ */
+ if (set_persist) {
+ region = dblp->reginfo.primary;
+ region->log_size = persist->log_size;
+ if (region->log_nsize == 0)
+ region->log_nsize = persist->log_size;
+ region->persist.mode = persist->mode;
+ }
+
+err: __os_free(dbenv, fname);
+ if (need_free)
+ __os_free(dbenv, tmp);
+ *statusp = status;
+ return (ret);
+}
+
+/*
+ * __log_dbenv_refresh --
+ * Clean up after the log system on a close or failed open. Called only
+ * from __dbenv_refresh. (Formerly called __log_close.)
+ *
+ * PUBLIC: int __log_dbenv_refresh __P((DB_ENV *));
+ */
+int
+__log_dbenv_refresh(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOG *dblp;
+ int ret, t_ret;
+
+ dblp = dbenv->lg_handle;
+
+ /* We may have opened files as part of XA; if so, close them. */
+ F_SET(dblp, DBLOG_RECOVER);
+ ret = __dbreg_close_files(dbenv);
+
+ /* Discard the per-thread lock. */
+ if (dblp->mutexp != NULL)
+ __db_mutex_free(dbenv, &dblp->reginfo, dblp->mutexp);
+
+ /* Detach from the region. */
+ if ((t_ret =
+ __db_r_detach(dbenv, &dblp->reginfo, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Close open files, release allocated memory. */
+ if (F_ISSET(&dblp->lfh, DB_FH_VALID) &&
+ (t_ret = __os_closehandle(dbenv, &dblp->lfh)) != 0 && ret == 0)
+ ret = t_ret;
+ if (dblp->dbentry != NULL)
+ __os_free(dbenv, dblp->dbentry);
+
+ __os_free(dbenv, dblp);
+
+ dbenv->lg_handle = NULL;
+ return (ret);
+}
+
+/*
+ * __log_stat --
+ * Return log statistics.
+ *
+ * PUBLIC: int __log_stat __P((DB_ENV *, DB_LOG_STAT **, u_int32_t));
+ */
+int
+__log_stat(dbenv, statp, flags)
+ DB_ENV *dbenv;
+ DB_LOG_STAT **statp;
+ u_int32_t flags;
+{
+ DB_LOG *dblp;
+ DB_LOG_STAT *stats;
+ LOG *region;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lg_handle, "DB_ENV->log_stat", DB_INIT_LOG);
+
+ *statp = NULL;
+ if ((ret = __db_fchk(dbenv,
+ "DB_ENV->log_stat", flags, DB_STAT_CLEAR)) != 0)
+ return (ret);
+
+ dblp = dbenv->lg_handle;
+ region = dblp->reginfo.primary;
+
+ if ((ret = __os_umalloc(dbenv, sizeof(DB_LOG_STAT), &stats)) != 0)
+ return (ret);
+
+ /* Copy out the global statistics. */
+ R_LOCK(dbenv, &dblp->reginfo);
+ *stats = region->stat;
+ if (LF_ISSET(DB_STAT_CLEAR))
+ memset(&region->stat, 0, sizeof(region->stat));
+
+ stats->st_magic = region->persist.magic;
+ stats->st_version = region->persist.version;
+ stats->st_mode = region->persist.mode;
+ stats->st_lg_bsize = region->buffer_size;
+ stats->st_lg_size = region->log_nsize;
+
+ stats->st_region_wait = dblp->reginfo.rp->mutex.mutex_set_wait;
+ stats->st_region_nowait = dblp->reginfo.rp->mutex.mutex_set_nowait;
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ dblp->reginfo.rp->mutex.mutex_set_wait = 0;
+ dblp->reginfo.rp->mutex.mutex_set_nowait = 0;
+ }
+ stats->st_regsize = dblp->reginfo.rp->size;
+
+ stats->st_cur_file = region->lsn.file;
+ stats->st_cur_offset = region->lsn.offset;
+ stats->st_disk_file = region->s_lsn.file;
+ stats->st_disk_offset = region->s_lsn.offset;
+
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ *statp = stats;
+ return (0);
+}
+
+/*
+ * __log_get_cached_ckp_lsn --
+ * Retrieve any last checkpoint LSN that we may have found on startup.
+ *
+ * PUBLIC: void __log_get_cached_ckp_lsn __P((DB_ENV *, DB_LSN *));
+ */
+void
+__log_get_cached_ckp_lsn(dbenv, ckp_lsnp)
+ DB_ENV *dbenv;
+ DB_LSN *ckp_lsnp;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+
+ dblp = (DB_LOG *)dbenv->lg_handle;
+ lp = (LOG *)dblp->reginfo.primary;
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ *ckp_lsnp = lp->cached_ckp_lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+}
+
+/*
+ * __log_region_size --
+ * Return the amount of space needed for the log region.
+ * Make the region large enough to hold txn_max transaction
+ * detail structures plus some space to hold thread handles
+ * and the beginning of the shalloc region and anything we
+ * need for mutex system resource recording.
+ */
+static size_t
+__log_region_size(dbenv)
+ DB_ENV *dbenv;
+{
+ size_t s;
+
+ s = dbenv->lg_regionmax + dbenv->lg_bsize;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ s += sizeof(REGMAINT) + LG_MAINT_SIZE;
+#endif
+ return (s);
+}
+
+/*
+ * __log_region_destroy
+ * Destroy any region maintenance info.
+ *
+ * PUBLIC: void __log_region_destroy __P((DB_ENV *, REGINFO *));
+ */
+void
+__log_region_destroy(dbenv, infop)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+{
+ __db_shlocks_destroy(infop, (REGMAINT *)R_ADDR(infop,
+ ((LOG *)R_ADDR(infop, infop->rp->primary))->maint_off));
+
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(infop, NULL);
+}
+
+/*
+ * __log_vtruncate
+ * This is a virtual truncate. We set up the log indicators to
+ * make everyone believe that the given record is the last one in the
+ * log. Returns with the next valid LSN (i.e., the LSN of the next
+ * record to be written). This is used in replication to discard records
+ * in the log file that do not agree with the master.
+ *
+ * PUBLIC: int __log_vtruncate __P((DB_ENV *, DB_LSN *, DB_LSN *));
+ */
+int
+__log_vtruncate(dbenv, lsn, ckplsn)
+ DB_ENV *dbenv;
+ DB_LSN *lsn, *ckplsn;
+{
+ DBT log_dbt;
+ DB_FH fh;
+ DB_LOG *dblp;
+ DB_LOGC *logc;
+ DB_LSN end_lsn;
+ LOG *lp;
+ u_int32_t bytes, c_len;
+ int fn, ret, t_ret;
+ char *fname;
+
+ /* Need to find out the length of this soon-to-be-last record. */
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ memset(&log_dbt, 0, sizeof(log_dbt));
+ ret = logc->get(logc, lsn, &log_dbt, DB_SET);
+ c_len = logc->c_len;
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ret != 0)
+ return (ret);
+
+ /* Now do the truncate. */
+ dblp = (DB_LOG *)dbenv->lg_handle;
+ lp = (LOG *)dblp->reginfo.primary;
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ end_lsn = lp->lsn;
+ lp->lsn = *lsn;
+ lp->len = c_len;
+ lp->lsn.offset += lp->len;
+
+ /*
+ * I am going to assume that the number of bytes written since
+ * the last checkpoint doesn't exceed a 32-bit number.
+ */
+ DB_ASSERT(lp->lsn.file >= ckplsn->file);
+ bytes = 0;
+ if (ckplsn->file != lp->lsn.file) {
+ bytes = lp->log_size - ckplsn->offset;
+ if (lp->lsn.file > ckplsn->file + 1)
+ bytes += lp->log_size *
+ (lp->lsn.file - ckplsn->file - 1);
+ bytes += lp->lsn.offset;
+ } else
+ bytes = lp->lsn.offset - ckplsn->offset;
+
+ lp->stat.st_wc_mbytes += bytes / MEGABYTE;
+ lp->stat.st_wc_bytes += bytes % MEGABYTE;
+
+ /*
+ * If the saved lsn is greater than our new end of log, reset it
+ * to our current end of log.
+ */
+ if (log_compare(&lp->s_lsn, lsn) > 0)
+ lp->s_lsn = lp->lsn;
+
+ /*
+ * If the new end of log is in the middle of the buffer,
+ * don't change the w_off or f_lsn. If the new end is
+ * before the w_off then reset w_off and f_lsn to the new
+ * end of log.
+ */
+ if (lp->w_off >= lp->lsn.offset) {
+ lp->f_lsn = lp->lsn;
+ lp->w_off = lp->lsn.offset;
+ lp->b_off = 0;
+ } else
+ lp->b_off = lp->lsn.offset - lp->w_off;
+
+ ZERO_LSN(lp->waiting_lsn);
+ lp->ready_lsn = lp->lsn;
+ lp->wait_recs = 0;
+ lp->rcvd_recs = 0;
+
+ /* Now throw away any extra log files that we have around. */
+ for (fn = lp->lsn.file + 1;; fn++) {
+ if (__log_name(dblp, fn, &fname, &fh, DB_OSO_RDONLY) != 0) {
+ __os_free(dbenv, fname);
+ break;
+ }
+ (void)__os_closehandle(dbenv, &fh);
+ ret = __os_unlink(dbenv, fname);
+ __os_free(dbenv, fname);
+ if (ret != 0)
+ goto err;
+ }
+
+ /* Truncate the log to the new point. */
+ if ((ret = __log_zero(dbenv, &lp->lsn, &end_lsn)) != 0)
+ goto err;
+
+err: R_UNLOCK(dbenv, &dblp->reginfo);
+ return (ret);
+}
+
+/*
+ * __log_is_outdated --
+ * Used by the replication system to identify if a client's logs
+ * are too old. The log represented by dbenv is compared to the file
+ * number passed in fnum. If the log file fnum does not exist and is
+ * lower-numbered than the current logs, the we return *outdatedp non
+ * zero, else we return it 0.
+ *
+ * PUBLIC: int __log_is_outdated __P((DB_ENV *dbenv,
+ * PUBLIC: u_int32_t fnum, int *outdatedp));
+ */
+int
+__log_is_outdated(dbenv, fnum, outdatedp)
+ DB_ENV *dbenv;
+ u_int32_t fnum;
+ int *outdatedp;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+ char *name;
+ int ret;
+ u_int32_t cfile;
+
+ dblp = dbenv->lg_handle;
+ *outdatedp = 0;
+
+ if ((ret = __log_name(dblp, fnum, &name, NULL, 0)) != 0)
+ return (ret);
+
+ /* If the file exists, we're just fine. */
+ if (__os_exists(name, NULL) == 0)
+ goto out;
+
+ /*
+ * It didn't exist, decide if the file number is too big or
+ * too little. If it's too little, then we need to indicate
+ * that the LSN is outdated.
+ */
+ R_LOCK(dbenv, &dblp->reginfo);
+ lp = (LOG *)dblp->reginfo.primary;
+ cfile = lp->lsn.file;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ if (cfile > fnum)
+ *outdatedp = 1;
+out: __os_free(dbenv, name);
+ return (ret);
+}
+
+/*
+ * __log_zero --
+ * Zero out the tail of a log after a truncate.
+ */
+static int
+__log_zero(dbenv, from_lsn, to_lsn)
+ DB_ENV *dbenv;
+ DB_LSN *from_lsn, *to_lsn;
+{
+ char *lname;
+ DB_LOG *dblp;
+ LOG *lp;
+ int ret;
+ size_t nbytes, len, nw;
+ u_int8_t buf[4096];
+ u_int32_t mbytes, bytes;
+
+ dblp = dbenv->lg_handle;
+ lp = (LOG *)dblp->reginfo.primary;
+ lname = NULL;
+
+ if (dblp->lfname != lp->lsn.file) {
+ if (F_ISSET(&dblp->lfh, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, &dblp->lfh);
+ dblp->lfname = lp->lsn.file;
+ }
+
+ if (from_lsn->file != to_lsn->file) {
+ /* We removed some log files; have to 0 to end of file. */
+ if (!F_ISSET(&dblp->lfh, DB_FH_VALID) && (ret =
+ __log_name(dblp, dblp->lfname, &lname, &dblp->lfh, 0)) != 0)
+ return (ret);
+ if ((ret = __os_ioinfo(dbenv,
+ NULL, &dblp->lfh, &mbytes, &bytes, NULL)) != 0)
+ goto err;
+ len = mbytes * MEGABYTE + bytes - from_lsn->offset;
+ } else if (to_lsn->offset <= from_lsn->offset)
+ return (0);
+ else
+ len = to_lsn->offset = from_lsn->offset;
+
+ memset(buf, 0, sizeof(buf));
+
+ /* Initialize the write position. */
+ if (!F_ISSET(&dblp->lfh, DB_FH_VALID) &&
+ (ret = __log_name(dblp, dblp->lfname, &lname, &dblp->lfh, 0)) != 0)
+ goto err;
+
+ if ((ret = __os_seek(dbenv,
+ &dblp->lfh, 0, 0, from_lsn->offset, 0, DB_OS_SEEK_SET)) != 0)
+ return (ret);
+
+ while (len > 0) {
+ nbytes = len > sizeof(buf) ? sizeof(buf) : len;
+ if ((ret =
+ __os_write(dbenv, &dblp->lfh, buf, nbytes, &nw)) != 0)
+ return (ret);
+ len -= nbytes;
+ }
+err: if (lname != NULL)
+ __os_free(dbenv, lname);
+
+ return (0);
+}
diff --git a/libdb/log/log_archive.c b/libdb/log/log_archive.c
new file mode 100644
index 0000000..f52403e
--- /dev/null
+++ b/libdb/log/log_archive.c
@@ -0,0 +1,486 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+#include "dbinc/txn.h"
+
+static int __absname __P((DB_ENV *, char *, char *, char **));
+static int __build_data __P((DB_ENV *, char *, char ***));
+static int __cmpfunc __P((const void *, const void *));
+static int __usermem __P((DB_ENV *, char ***));
+
+/*
+ * __log_archive --
+ * Supporting function for db_archive(1).
+ *
+ * PUBLIC: int __log_archive __P((DB_ENV *, char **[], u_int32_t));
+ */
+int
+__log_archive(dbenv, listp, flags)
+ DB_ENV *dbenv;
+ char ***listp;
+ u_int32_t flags;
+{
+ DBT rec;
+ DB_LOG *dblp;
+ DB_LOGC *logc;
+ DB_LSN stable_lsn;
+ __txn_ckp_args *ckp_args;
+ char **array, **arrayp, *name, *p, *pref, buf[MAXPATHLEN];
+ int array_size, db_arch_abs, n, ret;
+ u_int32_t fnum;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lg_handle, "DB_ENV->log_archive", DB_INIT_LOG);
+
+ name = NULL;
+ dblp = dbenv->lg_handle;
+ COMPQUIET(fnum, 0);
+
+#define OKFLAGS (DB_ARCH_ABS | DB_ARCH_DATA | DB_ARCH_LOG)
+ if (flags != 0) {
+ if ((ret = __db_fchk(
+ dbenv, "DB_ENV->log_archive", flags, OKFLAGS)) != 0)
+ return (ret);
+ if ((ret = __db_fcchk(dbenv, "DB_ENV->log_archive",
+ flags, DB_ARCH_DATA, DB_ARCH_LOG)) != 0)
+ return (ret);
+ }
+
+ if (LF_ISSET(DB_ARCH_ABS)) {
+ db_arch_abs = 1;
+ LF_CLR(DB_ARCH_ABS);
+ } else
+ db_arch_abs = 0;
+
+ if (flags == 0 || flags == DB_ARCH_DATA)
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->tx_handle, "DB_ENV->log_archive", DB_INIT_TXN);
+
+ /*
+ * Get the absolute pathname of the current directory. It would
+ * be nice to get the shortest pathname of the database directory,
+ * but that's just not possible.
+ *
+ * XXX
+ * Can't trust getcwd(3) to set a valid errno. If it doesn't, just
+ * guess that we ran out of memory.
+ */
+ if (db_arch_abs) {
+ __os_set_errno(0);
+ if ((pref = getcwd(buf, sizeof(buf))) == NULL) {
+ if (__os_get_errno() == 0)
+ __os_set_errno(ENOMEM);
+ return (__os_get_errno());
+ }
+ } else
+ pref = NULL;
+
+ switch (flags) {
+ case DB_ARCH_DATA:
+ return (__build_data(dbenv, pref, listp));
+ case DB_ARCH_LOG:
+ memset(&rec, 0, sizeof(rec));
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+#ifdef UMRW
+ ZERO_LSN(stable_lsn);
+#endif
+ ret = logc->get(logc, &stable_lsn, &rec, DB_LAST);
+ (void)logc->close(logc, 0);
+ if (ret != 0)
+ return (ret);
+ fnum = stable_lsn.file;
+ break;
+ case 0:
+ memset(&rec, 0, sizeof(rec));
+ if (__txn_getckp(dbenv, &stable_lsn) != 0) {
+ /*
+ * A failure return means that there's no checkpoint
+ * in the log (so we are not going to be deleting
+ * any log files).
+ */
+ *listp = NULL;
+ return (0);
+ }
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ if ((ret = logc->get(logc, &stable_lsn, &rec, DB_SET)) != 0 ||
+ (ret = __txn_ckp_read(dbenv, rec.data, &ckp_args)) != 0) {
+ /*
+ * A return of DB_NOTFOUND may only mean that the
+ * checkpoint LSN is before the beginning of the
+ * log files that we still have. This is not
+ * an error; it just means our work is done.
+ */
+ if (ret == DB_NOTFOUND) {
+ *listp = NULL;
+ ret = 0;
+ }
+ (void)logc->close(logc, 0);
+ return (ret);
+ }
+ if ((ret = logc->close(logc, 0)) != 0)
+ return (ret);
+ stable_lsn = ckp_args->ckp_lsn;
+ __os_free(dbenv, ckp_args);
+
+ /* Remove any log files before the last stable LSN. */
+ fnum = stable_lsn.file - 1;
+ break;
+ }
+
+#define LIST_INCREMENT 64
+ /* Get some initial space. */
+ array_size = 64;
+ if ((ret = __os_malloc(dbenv,
+ sizeof(char *) * array_size, &array)) != 0)
+ return (ret);
+ array[0] = NULL;
+
+ /* Build an array of the file names. */
+ for (n = 0; fnum > 0; --fnum) {
+ if ((ret = __log_name(dblp, fnum, &name, NULL, 0)) != 0)
+ goto err;
+ if (__os_exists(name, NULL) != 0) {
+ if (LF_ISSET(DB_ARCH_LOG) && fnum == stable_lsn.file)
+ continue;
+ __os_free(dbenv, name);
+ name = NULL;
+ break;
+ }
+
+ if (n >= array_size - 2) {
+ array_size += LIST_INCREMENT;
+ if ((ret = __os_realloc(dbenv,
+ sizeof(char *) * array_size, &array)) != 0)
+ goto err;
+ }
+
+ if (db_arch_abs) {
+ if ((ret = __absname(dbenv,
+ pref, name, &array[n])) != 0)
+ goto err;
+ __os_free(dbenv, name);
+ } else if ((p = __db_rpath(name)) != NULL) {
+ if ((ret = __os_strdup(dbenv, p + 1, &array[n])) != 0)
+ goto err;
+ __os_free(dbenv, name);
+ } else
+ array[n] = name;
+
+ name = NULL;
+ array[++n] = NULL;
+ }
+
+ /* If there's nothing to return, we're done. */
+ if (n == 0) {
+ *listp = NULL;
+ ret = 0;
+ goto err;
+ }
+
+ /* Sort the list. */
+ qsort(array, (size_t)n, sizeof(char *), __cmpfunc);
+
+ /* Rework the memory. */
+ if ((ret = __usermem(dbenv, &array)) != 0)
+ goto err;
+
+ *listp = array;
+ return (0);
+
+err: if (array != NULL) {
+ for (arrayp = array; *arrayp != NULL; ++arrayp)
+ __os_free(dbenv, *arrayp);
+ __os_free(dbenv, array);
+ }
+ if (name != NULL)
+ __os_free(dbenv, name);
+ return (ret);
+}
+
+/*
+ * __build_data --
+ * Build a list of datafiles for return.
+ */
+static int
+__build_data(dbenv, pref, listp)
+ DB_ENV *dbenv;
+ char *pref, ***listp;
+{
+ DBT rec;
+ DB_LOGC *logc;
+ DB_LSN lsn;
+ __dbreg_register_args *argp;
+ u_int32_t rectype;
+ int array_size, last, n, nxt, ret, t_ret;
+ char **array, **arrayp, **list, **lp, *p, *real_name;
+
+ /* Get some initial space. */
+ array_size = 64;
+ if ((ret = __os_malloc(dbenv,
+ sizeof(char *) * array_size, &array)) != 0)
+ return (ret);
+ array[0] = NULL;
+
+ memset(&rec, 0, sizeof(rec));
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ for (n = 0; (ret = logc->get(logc, &lsn, &rec, DB_PREV)) == 0;) {
+ if (rec.size < sizeof(rectype)) {
+ ret = EINVAL;
+ __db_err(dbenv, "DB_ENV->log_archive: bad log record");
+ goto free_continue;
+ }
+
+ memcpy(&rectype, rec.data, sizeof(rectype));
+ if (rectype != DB___dbreg_register)
+ continue;
+ if ((ret =
+ __dbreg_register_read(dbenv, rec.data, &argp)) != 0) {
+ ret = EINVAL;
+ __db_err(dbenv,
+ "DB_ENV->log_archive: unable to read log record");
+ goto free_continue;
+ }
+
+ if (n >= array_size - 2) {
+ array_size += LIST_INCREMENT;
+ if ((ret = __os_realloc(dbenv,
+ sizeof(char *) * array_size, &array)) != 0)
+ goto free_continue;
+ }
+
+ if ((ret = __os_strdup(dbenv,
+ argp->name.data, &array[n++])) != 0)
+ goto free_continue;
+ array[n] = NULL;
+
+ if (argp->ftype == DB_QUEUE) {
+ if ((ret = __qam_extent_names(dbenv,
+ argp->name.data, &list)) != 0)
+ goto q_err;
+ for (lp = list;
+ lp != NULL && *lp != NULL; lp++) {
+ if (n >= array_size - 2) {
+ array_size += LIST_INCREMENT;
+ if ((ret = __os_realloc(dbenv,
+ sizeof(char *) *
+ array_size, &array)) != 0)
+ goto q_err;
+ }
+ if ((ret =
+ __os_strdup(dbenv, *lp, &array[n++])) != 0)
+ goto q_err;
+ array[n] = NULL;
+ }
+q_err: if (list != NULL)
+ __os_free(dbenv, list);
+ }
+free_continue: __os_free(dbenv, argp);
+ if (ret != 0)
+ break;
+ }
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ret != 0)
+ goto err1;
+
+ /* If there's nothing to return, we're done. */
+ if (n == 0) {
+ ret = 0;
+ *listp = NULL;
+ goto err1;
+ }
+
+ /* Sort the list. */
+ qsort(array, (size_t)n, sizeof(char *), __cmpfunc);
+
+ /*
+ * Build the real pathnames, discarding nonexistent files and
+ * duplicates.
+ */
+ for (last = nxt = 0; nxt < n;) {
+ /*
+ * Discard duplicates. Last is the next slot we're going
+ * to return to the user, nxt is the next slot that we're
+ * going to consider.
+ */
+ if (last != nxt) {
+ array[last] = array[nxt];
+ array[nxt] = NULL;
+ }
+ for (++nxt; nxt < n &&
+ strcmp(array[last], array[nxt]) == 0; ++nxt) {
+ __os_free(dbenv, array[nxt]);
+ array[nxt] = NULL;
+ }
+
+ /* Get the real name. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, array[last], 0, NULL, &real_name)) != 0)
+ goto err2;
+
+ /* If the file doesn't exist, ignore it. */
+ if (__os_exists(real_name, NULL) != 0) {
+ __os_free(dbenv, real_name);
+ __os_free(dbenv, array[last]);
+ array[last] = NULL;
+ continue;
+ }
+
+ /* Rework the name as requested by the user. */
+ __os_free(dbenv, array[last]);
+ array[last] = NULL;
+ if (pref != NULL) {
+ ret = __absname(dbenv, pref, real_name, &array[last]);
+ __os_free(dbenv, real_name);
+ if (ret != 0)
+ goto err2;
+ } else if ((p = __db_rpath(real_name)) != NULL) {
+ ret = __os_strdup(dbenv, p + 1, &array[last]);
+ __os_free(dbenv, real_name);
+ if (ret != 0)
+ goto err2;
+ } else
+ array[last] = real_name;
+ ++last;
+ }
+
+ /* NULL-terminate the list. */
+ array[last] = NULL;
+
+ /* Rework the memory. */
+ if ((ret = __usermem(dbenv, &array)) != 0)
+ goto err1;
+
+ *listp = array;
+ return (0);
+
+err2: /*
+ * XXX
+ * We've possibly inserted NULLs into the array list, so clean up a
+ * bit so that the other error processing works.
+ */
+ if (array != NULL)
+ for (; nxt < n; ++nxt)
+ __os_free(dbenv, array[nxt]);
+ /* FALLTHROUGH */
+
+err1: if (array != NULL) {
+ for (arrayp = array; *arrayp != NULL; ++arrayp)
+ __os_free(dbenv, *arrayp);
+ __os_free(dbenv, array);
+ }
+ return (ret);
+}
+
+/*
+ * __absname --
+ * Return an absolute path name for the file.
+ */
+static int
+__absname(dbenv, pref, name, newnamep)
+ DB_ENV *dbenv;
+ char *pref, *name, **newnamep;
+{
+ size_t l_pref, l_name;
+ int isabspath, ret;
+ char *newname;
+
+ l_name = strlen(name);
+ isabspath = __os_abspath(name);
+ l_pref = isabspath ? 0 : strlen(pref);
+
+ /* Malloc space for concatenating the two. */
+ if ((ret = __os_malloc(dbenv,
+ l_pref + l_name + 2, &newname)) != 0)
+ return (ret);
+ *newnamep = newname;
+
+ /* Build the name. If `name' is an absolute path, ignore any prefix. */
+ if (!isabspath) {
+ memcpy(newname, pref, l_pref);
+ if (strchr(PATH_SEPARATOR, newname[l_pref - 1]) == NULL)
+ newname[l_pref++] = PATH_SEPARATOR[0];
+ }
+ memcpy(newname + l_pref, name, l_name + 1);
+
+ return (0);
+}
+
+/*
+ * __usermem --
+ * Create a single chunk of memory that holds the returned information.
+ * If the user has their own malloc routine, use it.
+ */
+static int
+__usermem(dbenv, listp)
+ DB_ENV *dbenv;
+ char ***listp;
+{
+ size_t len;
+ int ret;
+ char **array, **arrayp, **orig, *strp;
+
+ /* Find out how much space we need. */
+ for (len = 0, orig = *listp; *orig != NULL; ++orig)
+ len += sizeof(char *) + strlen(*orig) + 1;
+ len += sizeof(char *);
+
+ /* Allocate it and set up the pointers. */
+ if ((ret = __os_umalloc(dbenv, len, &array)) != 0)
+ return (ret);
+
+ strp = (char *)(array + (orig - *listp) + 1);
+
+ /* Copy the original information into the new memory. */
+ for (orig = *listp, arrayp = array; *orig != NULL; ++orig, ++arrayp) {
+ len = strlen(*orig);
+ memcpy(strp, *orig, len + 1);
+ *arrayp = strp;
+ strp += len + 1;
+
+ __os_free(dbenv, *orig);
+ }
+
+ /* NULL-terminate the list. */
+ *arrayp = NULL;
+
+ __os_free(dbenv, *listp);
+ *listp = array;
+
+ return (0);
+}
+
+static int
+__cmpfunc(p1, p2)
+ const void *p1, *p2;
+{
+ return (strcmp(*((char * const *)p1), *((char * const *)p2)));
+}
diff --git a/libdb/log/log_compare.c b/libdb/log/log_compare.c
new file mode 100644
index 0000000..887ba0e
--- /dev/null
+++ b/libdb/log/log_compare.c
@@ -0,0 +1,36 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * log_compare --
+ * Compare two LSN's; return 1, 0, -1 if first is >, == or < second.
+ *
+ * EXTERN: int log_compare __P((const DB_LSN *, const DB_LSN *));
+ */
+int
+log_compare(lsn0, lsn1)
+ const DB_LSN *lsn0, *lsn1;
+{
+ if (lsn0->file != lsn1->file)
+ return (lsn0->file < lsn1->file ? -1 : 1);
+
+ if (lsn0->offset != lsn1->offset)
+ return (lsn0->offset < lsn1->offset ? -1 : 1);
+
+ return (0);
+}
diff --git a/libdb/log/log_get.c b/libdb/log/log_get.c
new file mode 100644
index 0000000..faf0be1
--- /dev/null
+++ b/libdb/log/log_get.c
@@ -0,0 +1,1058 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hmac.h"
+#include "dbinc/log.h"
+#include "dbinc/hash.h"
+
+typedef enum { L_ALREADY, L_ACQUIRED, L_NONE } RLOCK;
+
+static int __log_c_close __P((DB_LOGC *, u_int32_t));
+static int __log_c_get __P((DB_LOGC *, DB_LSN *, DBT *, u_int32_t));
+static int __log_c_get_int __P((DB_LOGC *, DB_LSN *, DBT *, u_int32_t));
+static int __log_c_hdrchk __P((DB_LOGC *, HDR *, int *));
+static int __log_c_incursor __P((DB_LOGC *, DB_LSN *, HDR *, u_int8_t **));
+static int __log_c_inregion __P((DB_LOGC *,
+ DB_LSN *, RLOCK *, DB_LSN *, HDR *, u_int8_t **));
+static int __log_c_io __P((DB_LOGC *,
+ u_int32_t, u_int32_t, void *, size_t *, int *));
+static int __log_c_ondisk __P((DB_LOGC *,
+ DB_LSN *, DB_LSN *, int, HDR *, u_int8_t **, int *));
+static int __log_c_set_maxrec __P((DB_LOGC *, char *));
+static int __log_c_shortread __P((DB_LOGC *, int));
+
+/*
+ * __log_cursor --
+ * Create a log cursor.
+ *
+ * PUBLIC: int __log_cursor __P((DB_ENV *, DB_LOGC **, u_int32_t));
+ */
+int
+__log_cursor(dbenv, logcp, flags)
+ DB_ENV *dbenv;
+ DB_LOGC **logcp;
+ u_int32_t flags;
+{
+ DB_LOGC *logc;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lg_handle, "DB_ENV->log_cursor", DB_INIT_LOG);
+
+ *logcp = NULL;
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB_ENV->log_cursor", flags, 0)) != 0)
+ return (ret);
+
+ /* Allocate memory for the cursor. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_LOGC), &logc)) != 0)
+ goto err;
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_FH), &logc->c_fh)) != 0)
+ goto err;
+
+ logc->bp_size = DB_LOGC_BUF_SIZE;
+ if ((ret = __os_malloc(dbenv, logc->bp_size, &logc->bp)) != 0)
+ goto err;
+
+ logc->dbenv = dbenv;
+ logc->close = __log_c_close;
+ logc->get = __log_c_get;
+
+ *logcp = logc;
+ return (0);
+
+err: if (logc != NULL) {
+ if (logc->c_fh != NULL)
+ __os_free(dbenv, logc->c_fh);
+ __os_free(dbenv, logc);
+ }
+
+ return (ret);
+}
+
+/*
+ * __log_c_close --
+ * Close a log cursor.
+ */
+static int
+__log_c_close(logc, flags)
+ DB_LOGC *logc;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = logc->dbenv;
+
+ PANIC_CHECK(dbenv);
+ if ((ret = __db_fchk(dbenv, "DB_LOGC->close", flags, 0)) != 0)
+ return (ret);
+
+ if (F_ISSET(logc->c_fh, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, logc->c_fh);
+
+ if (logc->c_dbt.data != NULL)
+ __os_free(dbenv, logc->c_dbt.data);
+
+ __os_free(dbenv, logc->bp);
+ __os_free(dbenv, logc->c_fh);
+ __os_free(dbenv, logc);
+
+ return (0);
+}
+
+/*
+ * __log_c_get --
+ * Get a log record.
+ */
+static int
+__log_c_get(logc, alsn, dbt, flags)
+ DB_LOGC *logc;
+ DB_LSN *alsn;
+ DBT *dbt;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_LSN saved_lsn;
+ int ret;
+
+ dbenv = logc->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /* Validate arguments. */
+ switch (flags) {
+ case DB_CURRENT:
+ case DB_FIRST:
+ case DB_LAST:
+ case DB_NEXT:
+ case DB_PREV:
+ break;
+ case DB_SET:
+ if (IS_ZERO_LSN(*alsn)) {
+ __db_err(dbenv, "DB_LOGC->get: invalid LSN");
+ return (EINVAL);
+ }
+ break;
+ default:
+ return (__db_ferr(dbenv, "DB_LOGC->get", 1));
+ }
+
+ /*
+ * On error, we take care not to overwrite the caller's LSN. This
+ * is because callers looking for the end of the log loop using the
+ * DB_NEXT flag, and expect to take the last successful lsn out of
+ * the passed-in structure after DB_LOGC->get fails with DB_NOTFOUND.
+ *
+ * !!!
+ * This line is often flagged an uninitialized memory read during a
+ * Purify or similar tool run, as the application didn't initialize
+ * *alsn. If the application isn't setting the DB_SET flag, there is
+ * no reason it should have initialized *alsn, but we can't know that
+ * and we want to make sure we never overwrite whatever the application
+ * put in there.
+ */
+ saved_lsn = *alsn;
+
+ /*
+ * If we get one of the log's header records as a result of doing a
+ * DB_FIRST, DB_NEXT, DB_LAST or DB_PREV, repeat the operation, log
+ * file header records aren't useful to applications.
+ */
+ if ((ret = __log_c_get_int(logc, alsn, dbt, flags)) != 0) {
+ *alsn = saved_lsn;
+ return (ret);
+ }
+ if (alsn->offset == 0 && (flags == DB_FIRST ||
+ flags == DB_NEXT || flags == DB_LAST || flags == DB_PREV)) {
+ switch (flags) {
+ case DB_FIRST:
+ flags = DB_NEXT;
+ break;
+ case DB_LAST:
+ flags = DB_PREV;
+ break;
+ }
+ if (F_ISSET(dbt, DB_DBT_MALLOC)) {
+ __os_free(dbenv, dbt->data);
+ dbt->data = NULL;
+ }
+ if ((ret = __log_c_get_int(logc, alsn, dbt, flags)) != 0) {
+ *alsn = saved_lsn;
+ return (ret);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * __log_c_get_int --
+ * Get a log record; internal version.
+ */
+static int
+__log_c_get_int(logc, alsn, dbt, flags)
+ DB_LOGC *logc;
+ DB_LSN *alsn;
+ DBT *dbt;
+ u_int32_t flags;
+{
+ DB_CIPHER *db_cipher;
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ DB_LSN last_lsn, nlsn;
+ HDR hdr;
+ LOG *lp;
+ RLOCK rlock;
+ logfile_validity status;
+ u_int32_t cnt;
+ u_int8_t *rp;
+ int eof, is_hmac, ret;
+
+ dbenv = logc->dbenv;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ is_hmac = 0;
+
+ /*
+ * We don't acquire the log region lock until we need it, and we
+ * release it as soon as we're done.
+ */
+ rlock = F_ISSET(logc, DB_LOG_LOCKED) ? L_ALREADY : L_NONE;
+
+ nlsn = logc->c_lsn;
+ switch (flags) {
+ case DB_NEXT: /* Next log record. */
+ if (!IS_ZERO_LSN(nlsn)) {
+ /* Increment the cursor by the cursor record size. */
+ nlsn.offset += logc->c_len;
+ break;
+ }
+ flags = DB_FIRST;
+ /* FALLTHROUGH */
+ case DB_FIRST: /* First log record. */
+ /* Find the first log file. */
+ if ((ret = __log_find(dblp, 1, &cnt, &status)) != 0)
+ goto err;
+
+ /*
+ * DB_LV_INCOMPLETE:
+ * Theoretically, the log file we want could be created
+ * but not yet written, the "first" log record must be
+ * in the log buffer.
+ * DB_LV_NORMAL:
+ * DB_LV_OLD_READABLE:
+ * We found a log file we can read.
+ * DB_LV_NONEXISTENT:
+ * No log files exist, the "first" log record must be in
+ * the log buffer.
+ * DB_LV_OLD_UNREADABLE:
+ * No readable log files exist, we're at the cross-over
+ * point between two versions. The "first" log record
+ * must be in the log buffer.
+ */
+ switch (status) {
+ case DB_LV_INCOMPLETE:
+ DB_ASSERT(lp->lsn.file == cnt);
+ /* FALLTHROUGH */
+ case DB_LV_NORMAL:
+ case DB_LV_OLD_READABLE:
+ nlsn.file = cnt;
+ break;
+ case DB_LV_NONEXISTENT:
+ nlsn.file = 1;
+ DB_ASSERT(lp->lsn.file == nlsn.file);
+ break;
+ case DB_LV_OLD_UNREADABLE:
+ nlsn.file = cnt + 1;
+ DB_ASSERT(lp->lsn.file == nlsn.file);
+ break;
+ }
+ nlsn.offset = 0;
+ break;
+ case DB_CURRENT: /* Current log record. */
+ break;
+ case DB_PREV: /* Previous log record. */
+ if (!IS_ZERO_LSN(nlsn)) {
+ /* If at start-of-file, move to the previous file. */
+ if (nlsn.offset == 0) {
+ if (nlsn.file == 1 ||
+ __log_valid(dblp,
+ nlsn.file - 1, 0, &status) != 0) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+
+ if (status != DB_LV_NORMAL &&
+ status != DB_LV_OLD_READABLE) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+
+ --nlsn.file;
+ }
+ nlsn.offset = logc->c_prev;
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_LAST: /* Last log record. */
+ if (rlock == L_NONE) {
+ rlock = L_ACQUIRED;
+ R_LOCK(dbenv, &dblp->reginfo);
+ }
+ nlsn.file = lp->lsn.file;
+ nlsn.offset = lp->lsn.offset - lp->len;
+ break;
+ case DB_SET: /* Set log record. */
+ nlsn = *alsn;
+ break;
+ }
+
+ if (0) { /* Move to the next file. */
+next_file: ++nlsn.file;
+ nlsn.offset = 0;
+ }
+
+ /*
+ * The above switch statement should have set nlsn to the lsn of
+ * the requested record.
+ */
+
+ if (CRYPTO_ON(dbenv)) {
+ hdr.size = HDR_CRYPTO_SZ;
+ is_hmac = 1;
+ } else {
+ hdr.size = HDR_NORMAL_SZ;
+ is_hmac = 0;
+ }
+ /* Check to see if the record is in the cursor's buffer. */
+ if ((ret = __log_c_incursor(logc, &nlsn, &hdr, &rp)) != 0)
+ goto err;
+ if (rp != NULL)
+ goto cksum;
+
+ /*
+ * Look to see if we're moving backward in the log with the last record
+ * coming from the disk -- it means the record can't be in the region's
+ * buffer. Else, check the region's buffer.
+ *
+ * If the record isn't in the region's buffer, we're going to have to
+ * read the record from disk. We want to make a point of not reading
+ * past the end of the logical log (after recovery, there may be data
+ * after the end of the logical log, not to mention the log file may
+ * have been pre-allocated). So, zero out last_lsn, and initialize it
+ * inside __log_c_inregion -- if it's still zero when we check it in
+ * __log_c_ondisk, that's OK, it just means the logical end of the log
+ * isn't an issue for this request.
+ */
+ ZERO_LSN(last_lsn);
+ if (!F_ISSET(logc, DB_LOG_DISK) ||
+ log_compare(&nlsn, &logc->c_lsn) > 0) {
+ F_CLR(logc, DB_LOG_DISK);
+
+ if ((ret = __log_c_inregion(logc,
+ &nlsn, &rlock, &last_lsn, &hdr, &rp)) != 0)
+ goto err;
+ if (rp != NULL)
+ goto cksum;
+ }
+
+ /*
+ * We have to read from an on-disk file to retrieve the record.
+ * If we ever can't retrieve the record at offset 0, we're done,
+ * return EOF/DB_NOTFOUND.
+ *
+ * Discard the region lock if we're still holding it, the on-disk
+ * reading routines don't need it.
+ */
+ if (rlock == L_ACQUIRED) {
+ rlock = L_NONE;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ }
+ if ((ret = __log_c_ondisk(
+ logc, &nlsn, &last_lsn, flags, &hdr, &rp, &eof)) != 0)
+ goto err;
+ if (eof == 1) {
+ /*
+ * Only DB_NEXT automatically moves to the next file, and
+ * it only happens once.
+ */
+ if (flags != DB_NEXT || nlsn.offset == 0)
+ return (DB_NOTFOUND);
+ goto next_file;
+ }
+ F_SET(logc, DB_LOG_DISK);
+
+cksum: /*
+ * Discard the region lock if we're still holding it. (The path to
+ * get here is that we acquired the lock because of the caller's
+ * flag argument, but we found the record in the cursor's buffer.
+ * Improbable, but it's easy to avoid.
+ */
+ if (rlock == L_ACQUIRED) {
+ rlock = L_NONE;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ }
+
+ /*
+ * Checksum: there are two types of errors -- a configuration error
+ * or a checksum mismatch. The former is always bad. The latter is
+ * OK if we're searching for the end of the log, and very, very bad
+ * if we're reading random log records.
+ */
+ db_cipher = dbenv->crypto_handle;
+ if ((ret = __db_check_chksum(dbenv, db_cipher,
+ hdr.chksum, rp + hdr.size, hdr.len - hdr.size, is_hmac)) != 0) {
+ if (F_ISSET(logc, DB_LOG_SILENT_ERR)) {
+ if (ret == 0 || ret == -1)
+ ret = EIO;
+ } else if (ret == -1) {
+ __db_err(dbenv,
+ "DB_LOGC->get: log record checksum mismatch");
+ __db_err(dbenv,
+ "DB_LOGC->get: catastrophic recovery may be required");
+ ret = __db_panic(dbenv, DB_RUNRECOVERY);
+ }
+ goto err;
+ }
+
+ /*
+ * If we got a 0-length record, that means we're in the midst of
+ * some bytes that got 0'd as the result of a vtruncate. We're
+ * going to have to retry.
+ */
+ if (hdr.len == 0) {
+ switch (flags) {
+ case DB_FIRST:
+ case DB_NEXT:
+ /* Zero'd records always indicate the end of a file. */
+ goto next_file;
+
+ case DB_LAST:
+ case DB_PREV:
+ /*
+ * We should never get here. If we recover a log
+ * file with 0's at the end, we'll treat the 0'd
+ * headers as the end of log and ignore them. If
+ * we're reading backwards from another file, then
+ * the first record in that new file should have its
+ * prev field set correctly.
+ */
+ __db_err(dbenv,
+ "Encountered zero length records while traversing backwards");
+ DB_ASSERT(0);
+ case DB_SET:
+ default:
+ /* Return the 0-length record. */
+ break;
+ }
+ }
+
+ /* Copy the record into the user's DBT. */
+ if ((ret = __db_retcopy(dbenv, dbt, rp + hdr.size,
+ (u_int32_t)(hdr.len - hdr.size),
+ &logc->c_dbt.data, &logc->c_dbt.ulen)) != 0)
+ goto err;
+
+ if (CRYPTO_ON(dbenv)) {
+ if ((ret = db_cipher->decrypt(dbenv, db_cipher->data,
+ hdr.iv, dbt->data, hdr.len - hdr.size)) != 0) {
+ ret = EAGAIN;
+ goto err;
+ }
+ /*
+ * Return the original log record size to the user,
+ * even though we've allocated more than that, possibly.
+ * The log record is decrypted in the user dbt, not in
+ * the buffer, so we must do this here after decryption,
+ * not adjust the len passed to the __db_retcopy call.
+ */
+ dbt->size = hdr.orig_size;
+ }
+
+ /* Update the cursor and the returned LSN. */
+ *alsn = nlsn;
+ logc->c_lsn = nlsn;
+ logc->c_len = hdr.len;
+ logc->c_prev = hdr.prev;
+
+err: if (rlock == L_ACQUIRED)
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ return (ret);
+}
+
+/*
+ * __log_c_incursor --
+ * Check to see if the requested record is in the cursor's buffer.
+ */
+static int
+__log_c_incursor(logc, lsn, hdr, pp)
+ DB_LOGC *logc;
+ DB_LSN *lsn;
+ HDR *hdr;
+ u_int8_t **pp;
+{
+ u_int8_t *p;
+
+ *pp = NULL;
+
+ /*
+ * Test to see if the requested LSN could be part of the cursor's
+ * buffer.
+ *
+ * The record must be part of the same file as the cursor's buffer.
+ * The record must start at a byte offset equal to or greater than
+ * the cursor buffer.
+ * The record must not start at a byte offset after the cursor
+ * buffer's end.
+ */
+ if (logc->bp_lsn.file != lsn->file)
+ return (0);
+ if (logc->bp_lsn.offset > lsn->offset)
+ return (0);
+ if (logc->bp_lsn.offset + logc->bp_rlen <= lsn->offset + hdr->size)
+ return (0);
+
+ /*
+ * Read the record's header and check if the record is entirely held
+ * in the buffer. If the record is not entirely held, get it again.
+ * (The only advantage in having part of the record locally is that
+ * we might avoid a system call because we already have the HDR in
+ * memory.)
+ *
+ * If the header check fails for any reason, it must be because the
+ * LSN is bogus. Fail hard.
+ */
+ p = logc->bp + (lsn->offset - logc->bp_lsn.offset);
+ memcpy(hdr, p, hdr->size);
+ if (__log_c_hdrchk(logc, hdr, NULL))
+ return (DB_NOTFOUND);
+ if (logc->bp_lsn.offset + logc->bp_rlen <= lsn->offset + hdr->len)
+ return (0);
+
+ *pp = p; /* Success. */
+
+ return (0);
+}
+
+/*
+ * __log_c_inregion --
+ * Check to see if the requested record is in the region's buffer.
+ */
+static int
+__log_c_inregion(logc, lsn, rlockp, last_lsn, hdr, pp)
+ DB_LOGC *logc;
+ DB_LSN *lsn, *last_lsn;
+ RLOCK *rlockp;
+ HDR *hdr;
+ u_int8_t **pp;
+{
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ LOG *lp;
+ size_t len, nr;
+ u_int32_t b_disk, b_region;
+ int ret;
+ u_int8_t *p;
+
+ dbenv = logc->dbenv;
+ dblp = dbenv->lg_handle;
+ lp = ((DB_LOG *)logc->dbenv->lg_handle)->reginfo.primary;
+
+ ret = 0;
+ *pp = NULL;
+
+ /* If we haven't yet acquired the log region lock, do so. */
+ if (*rlockp == L_NONE) {
+ *rlockp = L_ACQUIRED;
+ R_LOCK(dbenv, &dblp->reginfo);
+ }
+
+ /*
+ * The routines to read from disk must avoid reading past the logical
+ * end of the log, so pass that information back to it.
+ *
+ * Since they're reading directly from the disk, they must also avoid
+ * reading past the offset we've written out. If the log was
+ * truncated, it's possible that there are zeroes or garbage on
+ * disk after this offset, and the logical end of the log can
+ * come later than this point if the log buffer isn't empty.
+ */
+ *last_lsn = lp->lsn;
+ if (last_lsn->offset > lp->w_off)
+ last_lsn->offset = lp->w_off;
+
+ /*
+ * Test to see if the requested LSN could be part of the region's
+ * buffer.
+ *
+ * During recovery, we read the log files getting the information to
+ * initialize the region. In that case, the region's lsn field will
+ * not yet have been filled in, use only the disk.
+ *
+ * The record must not start at a byte offset after the region buffer's
+ * end, since that means the request is for a record after the end of
+ * the log. Do this test even if the region's buffer is empty -- after
+ * recovery, the log files may continue past the declared end-of-log,
+ * and the disk reading routine will incorrectly attempt to read the
+ * remainder of the log.
+ *
+ * Otherwise, test to see if the region's buffer actually has what we
+ * want:
+ *
+ * The buffer must have some useful content.
+ * The record must be in the same file as the region's buffer and must
+ * start at a byte offset equal to or greater than the region's buffer.
+ */
+ if (IS_ZERO_LSN(lp->lsn))
+ return (0);
+ if (lsn->file > lp->lsn.file ||
+ (lsn->file == lp->lsn.file && lsn->offset >= lp->lsn.offset))
+ return (DB_NOTFOUND);
+ if (lp->b_off == 0)
+ return (0);
+ if (lsn->file < lp->f_lsn.file || lsn->offset < lp->f_lsn.offset)
+ return (0);
+
+ /*
+ * The current contents of the cursor's buffer will be useless for a
+ * future call -- trash it rather than try and make it look correct.
+ */
+ ZERO_LSN(logc->bp_lsn);
+
+ /*
+ * If the requested LSN is greater than the region buffer's first
+ * byte, we know the entire record is in the buffer.
+ *
+ * If the header check fails for any reason, it must be because the
+ * LSN is bogus. Fail hard.
+ */
+ if (lsn->offset > lp->f_lsn.offset) {
+ p = dblp->bufp + (lsn->offset - lp->w_off);
+ memcpy(hdr, p, hdr->size);
+ if (__log_c_hdrchk(logc, hdr, NULL))
+ return (DB_NOTFOUND);
+ if (logc->bp_size <= hdr->len) {
+ len = ALIGN(hdr->len * 2, 128);
+ if ((ret =
+ __os_realloc(logc->dbenv, len, &logc->bp)) != 0)
+ return (ret);
+ logc->bp_size = (u_int32_t)len;
+ }
+ memcpy(logc->bp, p, hdr->len);
+ *pp = logc->bp;
+ return (0);
+ }
+
+ /*
+ * There's a partial record, that is, the requested record starts
+ * in a log file and finishes in the region buffer. We have to
+ * find out how many bytes of the record are in the region buffer
+ * so we can copy them out into the cursor buffer. First, check
+ * to see if the requested record is the only record in the region
+ * buffer, in which case we should copy the entire region buffer.
+ *
+ * Else, walk back through the region's buffer to find the first LSN
+ * after the record that crosses the buffer boundary -- we can detect
+ * that LSN, because its "prev" field will reference the record we
+ * want. The bytes we need to copy from the region buffer are the
+ * bytes up to the record we find. The bytes we'll need to allocate
+ * to hold the log record are the bytes between the two offsets.
+ */
+ b_disk = lp->w_off - lsn->offset;
+ if (lp->b_off <= lp->len)
+ b_region = (u_int32_t)lp->b_off;
+ else
+ for (p = dblp->bufp + (lp->b_off - lp->len);;) {
+ memcpy(hdr, p, hdr->size);
+ if (hdr->prev == lsn->offset) {
+ b_region = (u_int32_t)(p - dblp->bufp);
+ break;
+ }
+ p = dblp->bufp + (hdr->prev - lp->w_off);
+ }
+
+ /*
+ * If we don't have enough room for the record, we have to allocate
+ * space. We have to do it while holding the region lock, which is
+ * truly annoying, but there's no way around it. This call is why
+ * we allocate cursor buffer space when allocating the cursor instead
+ * of waiting.
+ */
+ if (logc->bp_size <= b_region + b_disk) {
+ len = ALIGN((b_region + b_disk) * 2, 128);
+ if ((ret = __os_realloc(logc->dbenv, len, &logc->bp)) != 0)
+ return (ret);
+ logc->bp_size = (u_int32_t)len;
+ }
+
+ /* Copy the region's bytes to the end of the cursor's buffer. */
+ p = (logc->bp + logc->bp_size) - b_region;
+ memcpy(p, dblp->bufp, b_region);
+
+ /* Release the region lock. */
+ if (*rlockp == L_ACQUIRED) {
+ *rlockp = L_NONE;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ }
+
+ /*
+ * Read the rest of the information from disk. Neither short reads
+ * or EOF are acceptable, the bytes we want had better be there.
+ */
+ if (b_disk != 0) {
+ p -= b_disk;
+ nr = b_disk;
+ if ((ret = __log_c_io(
+ logc, lsn->file, lsn->offset, p, &nr, NULL)) != 0)
+ return (ret);
+ if (nr < b_disk)
+ return (__log_c_shortread(logc, 0));
+ }
+
+ /* Copy the header information into the caller's structure. */
+ memcpy(hdr, p, hdr->size);
+
+ *pp = p;
+ return (0);
+}
+
+/*
+ * __log_c_ondisk --
+ * Read a record off disk.
+ */
+static int
+__log_c_ondisk(logc, lsn, last_lsn, flags, hdr, pp, eofp)
+ DB_LOGC *logc;
+ DB_LSN *lsn, *last_lsn;
+ int flags, *eofp;
+ HDR *hdr;
+ u_int8_t **pp;
+{
+ DB_ENV *dbenv;
+ size_t len, nr;
+ u_int32_t offset;
+ int ret;
+
+ dbenv = logc->dbenv;
+ *eofp = 0;
+
+ nr = hdr->size;
+ if ((ret =
+ __log_c_io(logc, lsn->file, lsn->offset, hdr, &nr, eofp)) != 0)
+ return (ret);
+ if (*eofp)
+ return (0);
+
+ /* If we read 0 bytes, assume we've hit EOF. */
+ if (nr == 0) {
+ *eofp = 1;
+ return (0);
+ }
+
+ /* Check the HDR. */
+ if ((ret = __log_c_hdrchk(logc, hdr, eofp)) != 0)
+ return (ret);
+ if (*eofp)
+ return (0);
+
+ /* Otherwise, we should have gotten the bytes we wanted. */
+ if (nr < hdr->size)
+ return (__log_c_shortread(logc, 0));
+
+ /*
+ * Regardless of how we return, the previous contents of the cursor's
+ * buffer are useless -- trash it.
+ */
+ ZERO_LSN(logc->bp_lsn);
+
+ /*
+ * Otherwise, we now (finally!) know how big the record is. (Maybe
+ * we should have just stuck the length of the record into the LSN!?)
+ * Make sure we have enough space.
+ */
+ if (logc->bp_size <= hdr->len) {
+ len = ALIGN(hdr->len * 2, 128);
+ if ((ret = __os_realloc(dbenv, len, &logc->bp)) != 0)
+ return (ret);
+ logc->bp_size = (u_int32_t)len;
+ }
+
+ /*
+ * If we're moving forward in the log file, read this record in at the
+ * beginning of the buffer. Otherwise, read this record in at the end
+ * of the buffer, making sure we don't try and read before the start
+ * of the file. (We prefer positioning at the end because transaction
+ * aborts use DB_SET to move backward through the log and we might get
+ * lucky.)
+ *
+ * Read a buffer's worth, without reading past the logical EOF. The
+ * last_lsn may be a zero LSN, but that's OK, the test works anyway.
+ */
+ if (flags == DB_FIRST || flags == DB_NEXT)
+ offset = lsn->offset;
+ else if (lsn->offset + hdr->len < logc->bp_size)
+ offset = 0;
+ else
+ offset = (lsn->offset + hdr->len) - logc->bp_size;
+
+ nr = logc->bp_size;
+ if (lsn->file == last_lsn->file && offset + nr >= last_lsn->offset)
+ nr = last_lsn->offset - offset;
+
+ if ((ret =
+ __log_c_io(logc, lsn->file, offset, logc->bp, &nr, eofp)) != 0)
+ return (ret);
+
+ /*
+ * We should have at least gotten the bytes up-to-and-including the
+ * record we're reading.
+ */
+ if (nr < (lsn->offset + hdr->len) - offset)
+ return (__log_c_shortread(logc, 1));
+
+ /* Set up the return information. */
+ logc->bp_rlen = (u_int32_t)nr;
+ logc->bp_lsn.file = lsn->file;
+ logc->bp_lsn.offset = offset;
+
+ *pp = logc->bp + (lsn->offset - offset);
+
+ return (0);
+}
+
+/*
+ * __log_c_hdrchk --
+ *
+ * Check for corrupted HDRs before we use them to allocate memory or find
+ * records.
+ *
+ * If the log files were pre-allocated, a zero-filled HDR structure is the
+ * logical file end. However, we can see buffers filled with 0's during
+ * recovery, too (because multiple log buffers were written asynchronously,
+ * and one made it to disk before a different one that logically precedes
+ * it in the log file.
+ *
+ * XXX
+ * I think there's a potential pre-allocation recovery flaw here -- if we
+ * fail to write a buffer at the end of a log file (by scheduling its
+ * write asynchronously, and it never making it to disk), then succeed in
+ * writing a log file block to a subsequent log file, I don't think we will
+ * detect that the buffer of 0's should have marked the end of the log files
+ * during recovery. I think we may need to always write some garbage after
+ * each block write if we pre-allocate log files. (At the moment, we do not
+ * pre-allocate, so this isn't currently an issue.)
+ *
+ * Check for impossibly large records. The malloc should fail later, but we
+ * have customers that run mallocs that treat all allocation failures as fatal
+ * errors.
+ *
+ * Note that none of this is necessarily something awful happening. We let
+ * the application hand us any LSN they want, and it could be a pointer into
+ * the middle of a log record, there's no way to tell.
+ */
+static int
+__log_c_hdrchk(logc, hdr, eofp)
+ DB_LOGC *logc;
+ HDR *hdr;
+ int *eofp;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = logc->dbenv;
+
+ /* Sanity check the log record's size. */
+ if (hdr->len <= hdr->size)
+ goto err;
+ /*
+ * If the cursor's max-record value isn't yet set, it means we aren't
+ * reading these records from a log file and no check is necessary.
+ */
+ if (logc->bp_maxrec != 0 && hdr->len > logc->bp_maxrec) {
+ /*
+ * If we fail the check, there's the pathological case that
+ * we're reading the last file, it's growing, and our initial
+ * check information was wrong. Get it again, to be sure.
+ */
+ if ((ret = __log_c_set_maxrec(logc, NULL)) != 0) {
+ __db_err(dbenv, "DB_LOGC->get: %s", db_strerror(ret));
+ return (ret);
+ }
+ if (logc->bp_maxrec != 0 && hdr->len > logc->bp_maxrec)
+ goto err;
+ }
+
+ if (eofp != NULL) {
+ if (hdr->prev == 0 && hdr->chksum[0] == 0 && hdr->len == 0) {
+ *eofp = 1;
+ return (0);
+ }
+ *eofp = 0;
+ }
+ return (0);
+
+err: if (!F_ISSET(logc, DB_LOG_SILENT_ERR))
+ __db_err(dbenv, "DB_LOGC->get: invalid log record header");
+ return (EIO);
+}
+
+/*
+ * __log_c_io --
+ * Read records from a log file.
+ */
+static int
+__log_c_io(logc, fnum, offset, p, nrp, eofp)
+ DB_LOGC *logc;
+ u_int32_t fnum, offset;
+ void *p;
+ size_t *nrp;
+ int *eofp;
+{
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ int ret;
+ char *np;
+
+ dbenv = logc->dbenv;
+ dblp = dbenv->lg_handle;
+
+ /*
+ * If we've switched files, discard the current file handle and acquire
+ * a new one.
+ */
+ if (F_ISSET(logc->c_fh, DB_FH_VALID) && logc->bp_lsn.file != fnum)
+ if ((ret = __os_closehandle(dbenv, logc->c_fh)) != 0)
+ return (ret);
+ if (!F_ISSET(logc->c_fh, DB_FH_VALID)) {
+ if ((ret = __log_name(dblp, fnum,
+ &np, logc->c_fh, DB_OSO_RDONLY | DB_OSO_SEQ)) != 0) {
+ /*
+ * If we're allowed to return EOF, assume that's the
+ * problem, set the EOF status flag and return 0.
+ */
+ if (eofp != NULL) {
+ *eofp = 1;
+ ret = 0;
+ } else if (!F_ISSET(logc, DB_LOG_SILENT_ERR))
+ __db_err(dbenv, "DB_LOGC->get: %s: %s",
+ np, db_strerror(ret));
+ __os_free(dbenv, np);
+ return (ret);
+ }
+
+ if ((ret = __log_c_set_maxrec(logc, np)) != 0) {
+ __db_err(dbenv,
+ "DB_LOGC->get: %s: %s", np, db_strerror(ret));
+ __os_free(dbenv, np);
+ return (ret);
+ }
+ __os_free(dbenv, np);
+ }
+
+ /* Seek to the record's offset. */
+ if ((ret = __os_seek(dbenv,
+ logc->c_fh, 0, 0, offset, 0, DB_OS_SEEK_SET)) != 0) {
+ if (!F_ISSET(logc, DB_LOG_SILENT_ERR))
+ __db_err(dbenv,
+ "DB_LOGC->get: seek: %s", db_strerror(ret));
+ return (ret);
+ }
+
+ /* Read the data. */
+ if ((ret = __os_read(dbenv, logc->c_fh, p, *nrp, nrp)) != 0) {
+ if (!F_ISSET(logc, DB_LOG_SILENT_ERR))
+ __db_err(dbenv,
+ "DB_LOGC->get: read: %s", db_strerror(ret));
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * __log_c_shortread --
+ * Read was short -- return a consistent error message and error.
+ */
+static int
+__log_c_shortread(logc, silent)
+ DB_LOGC *logc;
+ int silent;
+{
+ if (!silent || !F_ISSET(logc, DB_LOG_SILENT_ERR))
+ __db_err(logc->dbenv, "DB_LOGC->get: short read");
+ return (EIO);
+}
+
+/*
+ * __log_c_set_maxrec --
+ * Bound the maximum log record size in a log file.
+ */
+static int
+__log_c_set_maxrec(logc, np)
+ DB_LOGC *logc;
+ char *np;
+{
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ LOG *lp;
+ u_int32_t mbytes, bytes;
+ int ret;
+
+ dbenv = logc->dbenv;
+ dblp = dbenv->lg_handle;
+
+ /*
+ * We don't want to try and allocate huge chunks of memory because
+ * applications with error-checking malloc's often consider that a
+ * hard failure. If we're about to look at a corrupted record with
+ * a bizarre size, we need to know before trying to allocate space
+ * to hold it. We could read the persistent data at the beginning
+ * of the file but that's hard -- we may have to decrypt it, checksum
+ * it and so on. Stat the file instead.
+ */
+ if ((ret =
+ __os_ioinfo(dbenv, np, logc->c_fh, &mbytes, &bytes, NULL)) != 0)
+ return (ret);
+
+ logc->bp_maxrec = mbytes * MEGABYTE + bytes;
+
+ /*
+ * If reading from the log file currently being written, we could get
+ * an incorrect size, that is, if the cursor was opened on the file
+ * when it had only a few hundred bytes, and then the cursor used to
+ * move forward in the file, after more log records were written, the
+ * original stat value would be wrong. Use the maximum of the current
+ * log file size and the size of the buffer -- that should represent
+ * the max of any log record currently in the file.
+ *
+ * The log buffer size is set when the environment is opened and never
+ * changed, we don't need a lock on it.
+ */
+ lp = dblp->reginfo.primary;
+ logc->bp_maxrec += lp->buffer_size;
+
+ return (0);
+}
diff --git a/libdb/log/log_method.c b/libdb/log/log_method.c
new file mode 100644
index 0000000..96c80c8
--- /dev/null
+++ b/libdb/log/log_method.c
@@ -0,0 +1,188 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/log.h"
+
+#ifdef HAVE_RPC
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+#endif
+
+static int __log_set_lg_bsize __P((DB_ENV *, u_int32_t));
+static int __log_set_lg_dir __P((DB_ENV *, const char *));
+static int __log_set_lg_max __P((DB_ENV *, u_int32_t));
+static int __log_set_lg_regionmax __P((DB_ENV *, u_int32_t));
+
+/*
+ * __log_dbenv_create --
+ * Log specific initialization of the DB_ENV structure.
+ *
+ * PUBLIC: void __log_dbenv_create __P((DB_ENV *));
+ */
+void
+__log_dbenv_create(dbenv)
+ DB_ENV *dbenv;
+{
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ */
+
+ dbenv->lg_bsize = LG_BSIZE_DEFAULT;
+ dbenv->lg_regionmax = LG_BASE_REGION_SIZE;
+
+#ifdef HAVE_RPC
+ /*
+ * If we have a client, overwrite what we just setup to
+ * point to client functions.
+ */
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ dbenv->set_lg_bsize = __dbcl_set_lg_bsize;
+ dbenv->set_lg_dir = __dbcl_set_lg_dir;
+ dbenv->set_lg_max = __dbcl_set_lg_max;
+ dbenv->set_lg_regionmax = __dbcl_set_lg_regionmax;
+ dbenv->log_archive = __dbcl_log_archive;
+ dbenv->log_cursor = __dbcl_log_cursor;
+ dbenv->log_file = __dbcl_log_file;
+ dbenv->log_flush = __dbcl_log_flush;
+ dbenv->log_put = __dbcl_log_put;
+ dbenv->log_stat = __dbcl_log_stat;
+ } else
+#endif
+ {
+ dbenv->set_lg_bsize = __log_set_lg_bsize;
+ dbenv->set_lg_dir = __log_set_lg_dir;
+ dbenv->set_lg_max = __log_set_lg_max;
+ dbenv->set_lg_regionmax = __log_set_lg_regionmax;
+ dbenv->log_archive = __log_archive;
+ dbenv->log_cursor = __log_cursor;
+ dbenv->log_file = __log_file;
+ dbenv->log_flush = __log_flush;
+ dbenv->log_put = __log_put;
+ dbenv->log_stat = __log_stat;
+ }
+}
+
+/*
+ * __log_set_lg_bsize --
+ * Set the log buffer size.
+ */
+static int
+__log_set_lg_bsize(dbenv, lg_bsize)
+ DB_ENV *dbenv;
+ u_int32_t lg_bsize;
+{
+ u_int32_t lg_max;
+
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lg_bsize");
+
+ if (lg_bsize == 0)
+ lg_bsize = LG_BSIZE_DEFAULT;
+
+ /* Let's not be silly. */
+ lg_max = dbenv->lg_size == 0 ? LG_MAX_DEFAULT : dbenv->lg_size;
+ if (lg_bsize > lg_max / 4) {
+ __db_err(dbenv, "log buffer size must be <= log file size / 4");
+ return (EINVAL);
+ }
+
+ dbenv->lg_bsize = lg_bsize;
+ return (0);
+}
+
+/*
+ * __log_set_lg_max --
+ * Set the maximum log file size.
+ */
+static int
+__log_set_lg_max(dbenv, lg_max)
+ DB_ENV *dbenv;
+ u_int32_t lg_max;
+{
+ LOG *region;
+
+ if (lg_max == 0)
+ lg_max = LG_MAX_DEFAULT;
+
+ if (F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) {
+ if (!LOGGING_ON(dbenv))
+ return (__db_env_config(
+ dbenv, "set_lg_max", DB_INIT_LOG));
+ region = ((DB_LOG *)dbenv->lg_handle)->reginfo.primary;
+
+ /* Let's not be silly. */
+ if (lg_max < region->buffer_size * 4)
+ goto err;
+ region->log_nsize = lg_max;
+ } else {
+ /* Let's not be silly. */
+ if (lg_max < dbenv->lg_bsize * 4)
+ goto err;
+ dbenv->lg_size = lg_max;
+ }
+
+ return (0);
+
+err: __db_err(dbenv, "log file size must be >= log buffer size * 4");
+ return (EINVAL);
+}
+
+/*
+ * __log_set_lg_regionmax --
+ * Set the region size.
+ */
+static int
+__log_set_lg_regionmax(dbenv, lg_regionmax)
+ DB_ENV *dbenv;
+ u_int32_t lg_regionmax;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lg_regionmax");
+
+ /* Let's not be silly. */
+ if (lg_regionmax != 0 && lg_regionmax < LG_BASE_REGION_SIZE) {
+ __db_err(dbenv,
+ "log file size must be >= %d", LG_BASE_REGION_SIZE);
+ return (EINVAL);
+ }
+
+ dbenv->lg_regionmax = lg_regionmax;
+ return (0);
+}
+
+/*
+ * __log_set_lg_dir --
+ * Set the log file directory.
+ */
+static int
+__log_set_lg_dir(dbenv, dir)
+ DB_ENV *dbenv;
+ const char *dir;
+{
+ if (dbenv->db_log_dir != NULL)
+ __os_free(dbenv, dbenv->db_log_dir);
+ return (__os_strdup(dbenv, dir, &dbenv->db_log_dir));
+}
diff --git a/libdb/log/log_put.c b/libdb/log/log_put.c
new file mode 100644
index 0000000..c4bd5e8
--- /dev/null
+++ b/libdb/log/log_put.c
@@ -0,0 +1,1247 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/hmac.h"
+#include "dbinc/log.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+static int __log_encrypt_record __P((DB_ENV *, DBT *, HDR *, u_int32_t));
+static int __log_fill __P((DB_LOG *, DB_LSN *, void *, u_int32_t));
+static int __log_flush_commit __P((DB_ENV *, const DB_LSN *, u_int32_t));
+static int __log_flush_int __P((DB_LOG *, const DB_LSN *, int));
+static int __log_newfh __P((DB_LOG *));
+static int __log_put_next __P((DB_ENV *,
+ DB_LSN *, const DBT *, HDR *, DB_LSN *));
+static int __log_putr __P((DB_LOG *,
+ DB_LSN *, const DBT *, u_int32_t, HDR *));
+static int __log_write __P((DB_LOG *, void *, u_int32_t));
+
+/*
+ * __log_put --
+ * Write a log record. This is the public interface, DB_ENV->log_put.
+ *
+ * PUBLIC: int __log_put __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t));
+ */
+int
+__log_put(dbenv, lsnp, udbt, flags)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ const DBT *udbt;
+ u_int32_t flags;
+{
+ DB_CIPHER *db_cipher;
+ DBT *dbt, t;
+ DB_LOG *dblp;
+ DB_LSN lsn, old_lsn;
+ HDR hdr;
+ LOG *lp;
+ u_int32_t do_flush, op, writeonly;
+ int lock_held, need_free, ret;
+ u_int8_t *key;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lg_handle, "DB_ENV->log_put", DB_INIT_LOG);
+
+ /* Validate arguments. */
+ op = DB_OPFLAGS_MASK & flags;
+ if (op != 0 && op != DB_COMMIT)
+ return (__db_ferr(dbenv, "DB_ENV->log_put", 0));
+
+ /* Check for allowed bit-flags. */
+ if (LF_ISSET(~(DB_OPFLAGS_MASK |
+ DB_FLUSH | DB_NOCOPY | DB_PERMANENT | DB_WRNOSYNC)))
+ return (__db_ferr(dbenv, "DB_ENV->log_put", 0));
+
+ /* DB_WRNOSYNC and DB_FLUSH are mutually exclusive. */
+ if (LF_ISSET(DB_WRNOSYNC) && LF_ISSET(DB_FLUSH))
+ return (__db_ferr(dbenv, "DB_ENV->log_put", 1));
+
+ /* Replication clients should never write log records. */
+ if (F_ISSET(dbenv, DB_ENV_REP_CLIENT) ||
+ F_ISSET(dbenv, DB_ENV_REP_LOGSONLY)) {
+ __db_err(dbenv,
+ "DB_ENV->log_put is illegal on replication clients");
+ return (EINVAL);
+ }
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ db_cipher = dbenv->crypto_handle;
+ dbt = &t;
+ t = *udbt;
+ lock_held = need_free = 0;
+ do_flush = LF_ISSET(DB_FLUSH);
+ writeonly = LF_ISSET(DB_WRNOSYNC);
+
+ /*
+ * If we are coming from the logging code, we use an internal
+ * flag, DB_NOCOPY, because we know we can overwrite/encrypt
+ * the log record in place. Otherwise, if a user called log_put
+ * then we must copy it to new memory so that we know we can
+ * write it.
+ *
+ * We also must copy it to new memory if we are a replication
+ * master so that we retain an unencrypted copy of the log
+ * record to send to clients.
+ */
+ if (!LF_ISSET(DB_NOCOPY) || F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+ if (CRYPTO_ON(dbenv))
+ t.size += db_cipher->adj_size(udbt->size);
+ if ((ret = __os_calloc(dbenv, 1, t.size, &t.data)) != 0)
+ goto err;
+ need_free = 1;
+ memcpy(t.data, udbt->data, udbt->size);
+ }
+ if ((ret = __log_encrypt_record(dbenv, dbt, &hdr, udbt->size)) != 0)
+ goto err;
+ if (CRYPTO_ON(dbenv))
+ key = db_cipher->mac_key;
+ else
+ key = NULL;
+ /* Otherwise, we actually have a record to put. Put it. */
+
+ /* Before we grab the region lock, calculate the record's checksum. */
+ __db_chksum(dbt->data, dbt->size, key, hdr.chksum);
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ lock_held = 1;
+
+ ZERO_LSN(old_lsn);
+ if ((ret = __log_put_next(dbenv, &lsn, dbt, &hdr, &old_lsn)) != 0)
+ goto err;
+
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+ /*
+ * Replication masters need to drop the lock to send
+ * messages, but we want to drop and reacquire it a minimal
+ * number of times.
+ */
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ lock_held = 0;
+
+ /*
+ * If we changed files and we're in a replicated
+ * environment, we need to inform our clients now that
+ * we've dropped the region lock.
+ *
+ * Note that a failed NEWFILE send is a dropped message
+ * that our client can handle, so we can ignore it. It's
+ * possible that the record we already put is a commit, so
+ * we don't just want to return failure.
+ */
+ if (!IS_ZERO_LSN(old_lsn))
+ (void)__rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_NEWFILE, &old_lsn, NULL, 0);
+
+ /*
+ * Then send the log record itself on to our clients.
+ *
+ * If the send fails and we're a commit or checkpoint,
+ * there's nothing we can do; the record's in the log.
+ * Flush it, even if we're running with TXN_NOSYNC, on the
+ * grounds that it should be in durable form somewhere.
+ */
+ /*
+ * !!!
+ * In the crypto case, we MUST send the udbt, not the
+ * now-encrypted dbt. Clients have no way to decrypt
+ * without the header.
+ */
+ if ((__rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_LOG, &lsn, udbt, flags) != 0) &&
+ LF_ISSET(DB_PERMANENT))
+ do_flush |= DB_FLUSH;
+ }
+
+ /*
+ * If needed, do a flush. Note that failures at this point
+ * are only permissible if we know we haven't written a commit
+ * record; __log_flush_commit is responsible for enforcing this.
+ *
+ * If a flush is not needed, see if WRITE_NOSYNC was set and we
+ * need to write out the log buffer.
+ */
+ if (do_flush || writeonly) {
+ if (!lock_held) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ lock_held = 1;
+ }
+ if (do_flush)
+ ret = __log_flush_commit(dbenv, &lsn, flags);
+ else if (lp->b_off != 0)
+ /*
+ * writeonly: if there's anything in the current
+ * log buffer, we need to write it out.
+ */
+ if ((ret = __log_write(dblp,
+ dblp->bufp, (u_int32_t)lp->b_off)) == 0)
+ lp->b_off = 0;
+ }
+
+err: if (lock_held)
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (need_free)
+ __os_free(dbenv, dbt->data);
+
+ if (ret == 0)
+ *lsnp = lsn;
+
+ return (ret);
+}
+
+/*
+ * __log_txn_lsn --
+ *
+ * PUBLIC: void __log_txn_lsn
+ * PUBLIC: __P((DB_ENV *, DB_LSN *, u_int32_t *, u_int32_t *));
+ */
+void
+__log_txn_lsn(dbenv, lsnp, mbytesp, bytesp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ u_int32_t *mbytesp, *bytesp;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ R_LOCK(dbenv, &dblp->reginfo);
+
+ /*
+ * We are trying to get the LSN of the last entry in the log. We use
+ * this in two places: 1) DB_ENV->txn_checkpiont uses it as a first
+ * value when trying to compute an LSN such that all transactions begun
+ * before it are complete. 2) DB_ENV->txn_begin uses it as the
+ * begin_lsn.
+ *
+ * Typically, it's easy to get the last written LSN, you simply look
+ * at the current log pointer and back up the number of bytes of the
+ * last log record. However, if the last thing we did was write the
+ * log header of a new log file, then, this doesn't work, so we return
+ * the first log record that will be written in this new file.
+ */
+ *lsnp = lp->lsn;
+ if (lp->lsn.offset > lp->len)
+ lsnp->offset -= lp->len;
+
+ /*
+ * Since we're holding the log region lock, return the bytes put into
+ * the log since the last checkpoint, transaction checkpoint needs it.
+ *
+ * We add the current buffer offset so as to count bytes that have not
+ * yet been written, but are sitting in the log buffer.
+ */
+ if (mbytesp != NULL) {
+ *mbytesp = lp->stat.st_wc_mbytes;
+ *bytesp = (u_int32_t)(lp->stat.st_wc_bytes + lp->b_off);
+
+ lp->stat.st_wc_mbytes = lp->stat.st_wc_bytes = 0;
+ }
+
+ R_UNLOCK(dbenv, &dblp->reginfo);
+}
+
+/*
+ * __log_put_next --
+ * Put the given record as the next in the log, wherever that may
+ * turn out to be.
+ */
+static int
+__log_put_next(dbenv, lsn, dbt, hdr, old_lsnp)
+ DB_ENV *dbenv;
+ DB_LSN *lsn;
+ const DBT *dbt;
+ HDR *hdr;
+ DB_LSN *old_lsnp;
+{
+ DB_LOG *dblp;
+ DB_LSN old_lsn;
+ LOG *lp;
+ int newfile, ret;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ /*
+ * Save a copy of lp->lsn before we might decide to switch log
+ * files and change it. If we do switch log files, and we're
+ * doing replication, we'll need to tell our clients about the
+ * switch, and they need to receive a NEWFILE message
+ * with this "would-be" LSN in order to know they're not
+ * missing any log records.
+ */
+ old_lsn = lp->lsn;
+ newfile = 0;
+
+ /*
+ * If this information won't fit in the file, or if we're a
+ * replication client environment and have been told to do so,
+ * swap files.
+ */
+ if (lp->lsn.offset == 0 ||
+ lp->lsn.offset + hdr->size + dbt->size > lp->log_size) {
+ if (hdr->size + sizeof(LOGP) + dbt->size > lp->log_size) {
+ __db_err(dbenv,
+ "DB_ENV->log_put: record larger than maximum file size");
+ return (EINVAL);
+ }
+
+ if ((ret = __log_newfile(dblp, NULL)) != 0)
+ return (ret);
+
+ /*
+ * Flag that we switched files, in case we're a master
+ * and need to send this information to our clients.
+ * We postpone doing the actual send until we can
+ * safely release the log region lock and are doing so
+ * anyway.
+ */
+ newfile = 1;
+ }
+
+ /*
+ * The offset into the log file at this point is the LSN where
+ * we're about to put this record, and is the LSN the caller wants.
+ */
+ *lsn = lp->lsn;
+
+ /* If we switched log files, let our caller know where. */
+ if (newfile)
+ *old_lsnp = old_lsn;
+
+ /* Actually put the record. */
+ return (__log_putr(dblp, lsn, dbt, lp->lsn.offset - lp->len, hdr));
+}
+
+/*
+ * __log_flush_commit --
+ * Flush a record for which the DB_FLUSH flag to log_put has been set.
+ */
+static int
+__log_flush_commit(dbenv, lsnp, flags)
+ DB_ENV *dbenv;
+ const DB_LSN *lsnp;
+ u_int32_t flags;
+{
+ DB_LOG *dblp;
+ DB_LSN flush_lsn;
+ LOG *lp;
+ int ret;
+ u_int32_t op;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ flush_lsn = *lsnp;
+ op = DB_OPFLAGS_MASK & flags;
+
+ if ((ret = __log_flush_int(dblp, &flush_lsn, 1)) == 0)
+ return (0);
+
+ /*
+ * If a flush supporting a transaction commit fails, we must abort the
+ * transaction. (If we aren't doing a commit, return the failure; if
+ * if the commit we care about made it to disk successfully, we just
+ * ignore the failure, because there's no way to undo the commit.)
+ */
+ if (op != DB_COMMIT)
+ return (ret);
+
+ if (flush_lsn.file != lp->lsn.file || flush_lsn.offset < lp->w_off)
+ return (0);
+
+ /*
+ * Else, make sure that the commit record does not get out after we
+ * abort the transaction. Do this by overwriting the commit record
+ * in the buffer. (Note that other commits in this buffer will wait
+ * wait until a sucessful write happens, we do not wake them.) We
+ * point at the right part of the buffer and write an abort record
+ * over the commit. We must then try and flush the buffer again,
+ * since the interesting part of the buffer may have actually made
+ * it out to disk before there was a failure, we can't know for sure.
+ */
+ if (__txn_force_abort(dbenv,
+ dblp->bufp + flush_lsn.offset - lp->w_off) == 0)
+ (void)__log_flush_int(dblp, &flush_lsn, 0);
+
+ return (ret);
+}
+
+/*
+ * __log_newfile --
+ * Initialize and switch to a new log file. (Note that this is
+ * called both when no log yet exists and when we fill a log file.)
+ *
+ * PUBLIC: int __log_newfile __P((DB_LOG *, DB_LSN *));
+ */
+int
+__log_newfile(dblp, lsnp)
+ DB_LOG *dblp;
+ DB_LSN *lsnp;
+{
+ DB_CIPHER *db_cipher;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DBT t;
+ HDR hdr;
+ LOG *lp;
+ int need_free, ret;
+ u_int32_t lastoff;
+ size_t tsize;
+ u_int8_t *tmp;
+
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+
+ /* If we're not at the beginning of a file already, start a new one. */
+ if (lp->lsn.offset != 0) {
+ /*
+ * Flush the log so this file is out and can be closed. We
+ * cannot release the region lock here because we need to
+ * protect the end of the file while we switch. In
+ * particular, a thread with a smaller record than ours
+ * could detect that there is space in the log. Even
+ * blocking that event by declaring the file full would
+ * require all threads to wait here so that the lsn.file
+ * can be moved ahead after the flush completes. This
+ * probably can be changed if we had an lsn for the
+ * previous file and one for the curent, but it does not
+ * seem like this would get much more throughput, if any.
+ */
+ if ((ret = __log_flush_int(dblp, NULL, 0)) != 0)
+ return (ret);
+
+ DB_ASSERT(lp->b_off == 0);
+ /*
+ * Save the last known offset from the previous file, we'll
+ * need it to initialize the persistent header information.
+ */
+ lastoff = lp->lsn.offset;
+
+ /* Point the current LSN to the new file. */
+ ++lp->lsn.file;
+ lp->lsn.offset = 0;
+
+ /* Reset the file write offset. */
+ lp->w_off = 0;
+ } else
+ lastoff = 0;
+
+ /*
+ * Insert persistent information as the first record in every file.
+ * Note that the previous length is wrong for the very first record
+ * of the log, but that's okay, we check for it during retrieval.
+ */
+ DB_ASSERT(lp->b_off == 0);
+
+ memset(&t, 0, sizeof(t));
+ memset(&hdr, 0, sizeof(HDR));
+
+ need_free = 0;
+ tsize = sizeof(LOGP);
+ db_cipher = dbenv->crypto_handle;
+ if (CRYPTO_ON(dbenv))
+ tsize += db_cipher->adj_size(tsize);
+ if ((ret = __os_calloc(dbenv, 1, tsize, &tmp)) != 0)
+ return (ret);
+ lp->persist.log_size = lp->log_size = lp->log_nsize;
+ memcpy(tmp, &lp->persist, sizeof(LOGP));
+ t.data = tmp;
+ t.size = (u_int32_t)tsize;
+ need_free = 1;
+
+ if ((ret =
+ __log_encrypt_record(dbenv, &t, &hdr, (u_int32_t)tsize)) != 0)
+ goto err;
+ __db_chksum(t.data, t.size,
+ (CRYPTO_ON(dbenv)) ? db_cipher->mac_key : NULL, hdr.chksum);
+ lsn = lp->lsn;
+ if ((ret = __log_putr(dblp, &lsn,
+ &t, lastoff == 0 ? 0 : lastoff - lp->len, &hdr)) != 0)
+ goto err;
+
+ /* Update the LSN information returned to the caller. */
+ if (lsnp != NULL)
+ *lsnp = lp->lsn;
+
+err:
+ if (need_free)
+ __os_free(dbenv, tmp);
+ return (ret);
+}
+
+/*
+ * __log_putr --
+ * Actually put a record into the log.
+ */
+static int
+__log_putr(dblp, lsn, dbt, prev, h)
+ DB_LOG *dblp;
+ DB_LSN *lsn;
+ const DBT *dbt;
+ u_int32_t prev;
+ HDR *h;
+{
+ DB_CIPHER *db_cipher;
+ DB_ENV *dbenv;
+ DB_LSN f_lsn;
+ LOG *lp;
+ HDR tmp, *hdr;
+ int ret, t_ret;
+ size_t b_off, nr;
+ u_int32_t w_off;
+
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+
+ /*
+ * If we weren't given a header, use a local one.
+ */
+ db_cipher = dbenv->crypto_handle;
+ if (h == NULL) {
+ hdr = &tmp;
+ memset(hdr, 0, sizeof(HDR));
+ if (CRYPTO_ON(dbenv))
+ hdr->size = HDR_CRYPTO_SZ;
+ else
+ hdr->size = HDR_NORMAL_SZ;
+ } else
+ hdr = h;
+
+ /* Save our position in case we fail. */
+ b_off = lp->b_off;
+ w_off = lp->w_off;
+ f_lsn = lp->f_lsn;
+
+ /*
+ * Initialize the header. If we just switched files, lsn.offset will
+ * be 0, and what we really want is the offset of the previous record
+ * in the previous file. Fortunately, prev holds the value we want.
+ */
+ hdr->prev = prev;
+ hdr->len = (u_int32_t)hdr->size + dbt->size;
+
+ /*
+ * If we were passed in a nonzero checksum, our caller calculated
+ * the checksum before acquiring the log mutex, as an optimization.
+ *
+ * If our caller calculated a real checksum of 0, we'll needlessly
+ * recalculate it. C'est la vie; there's no out-of-bounds value
+ * here.
+ */
+ if (hdr->chksum[0] == 0)
+ __db_chksum(dbt->data, dbt->size,
+ (CRYPTO_ON(dbenv)) ? db_cipher->mac_key : NULL,
+ hdr->chksum);
+
+ if ((ret = __log_fill(dblp, lsn, hdr, (u_int32_t)hdr->size)) != 0)
+ goto err;
+
+ if ((ret = __log_fill(dblp, lsn, dbt->data, dbt->size)) != 0)
+ goto err;
+
+ lp->len = (u_int32_t)(hdr->size + dbt->size);
+ lp->lsn.offset += (u_int32_t)(hdr->size + dbt->size);
+ return (0);
+err:
+ /*
+ * If we wrote more than one buffer before failing, get the
+ * first one back. The extra buffers will fail the checksums
+ * and be ignored.
+ */
+ if (w_off + lp->buffer_size < lp->w_off) {
+ if ((t_ret =
+ __os_seek(dbenv,
+ &dblp->lfh, 0, 0, w_off, 0, DB_OS_SEEK_SET)) != 0 ||
+ (t_ret = __os_read(dbenv, &dblp->lfh, dblp->bufp,
+ b_off, &nr)) != 0)
+ return (__db_panic(dbenv, t_ret));
+ if (nr != b_off) {
+ __db_err(dbenv, "Short read while restoring log");
+ return (__db_panic(dbenv, EIO));
+ }
+ }
+
+ /* Reset to where we started. */
+ lp->w_off = w_off;
+ lp->b_off = b_off;
+ lp->f_lsn = f_lsn;
+
+ return (ret);
+}
+
+/*
+ * __log_flush --
+ * Write all records less than or equal to the specified LSN.
+ *
+ * PUBLIC: int __log_flush __P((DB_ENV *, const DB_LSN *));
+ */
+int
+__log_flush(dbenv, lsn)
+ DB_ENV *dbenv;
+ const DB_LSN *lsn;
+{
+ DB_LOG *dblp;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lg_handle, "DB_ENV->log_flush", DB_INIT_LOG);
+
+ dblp = dbenv->lg_handle;
+ R_LOCK(dbenv, &dblp->reginfo);
+ ret = __log_flush_int(dblp, lsn, 1);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (ret);
+}
+
+/*
+ * __log_flush_int --
+ * Write all records less than or equal to the specified LSN; internal
+ * version.
+ */
+static int
+__log_flush_int(dblp, lsnp, release)
+ DB_LOG *dblp;
+ const DB_LSN *lsnp;
+ int release;
+{
+ DB_ENV *dbenv;
+ DB_LSN flush_lsn, f_lsn;
+ DB_MUTEX *flush_mutexp;
+ LOG *lp;
+ int current, do_flush, first, ret;
+ size_t b_off;
+ struct __db_commit *commit;
+ u_int32_t ncommit, w_off;
+
+ ret = 0;
+ ncommit = 0;
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+ flush_mutexp = R_ADDR(&dblp->reginfo, lp->flush_mutex_off);
+
+ /*
+ * If no LSN specified, flush the entire log by setting the flush LSN
+ * to the last LSN written in the log. Otherwise, check that the LSN
+ * isn't a non-existent record for the log.
+ */
+ if (lsnp == NULL) {
+ flush_lsn.file = lp->lsn.file;
+ flush_lsn.offset = lp->lsn.offset - lp->len;
+ } else if (lsnp->file > lp->lsn.file ||
+ (lsnp->file == lp->lsn.file &&
+ lsnp->offset > lp->lsn.offset - lp->len)) {
+ __db_err(dbenv,
+ "DB_ENV->log_flush: LSN past current end-of-log");
+ return (EINVAL);
+ } else {
+ /*
+ * See if we need to wait. s_lsn is not locked so some
+ * care is needed. The sync point can only move forward.
+ * If the file we want is in the past we are done.
+ * If the file numbers are the same check the offset.
+ * If this fails check the file numbers again since the
+ * offset might have changed while we were looking.
+ * This all assumes we can read an integer in one
+ * state or the other, not in transition.
+ */
+ if (lp->s_lsn.file > lsnp->file)
+ return (0);
+
+ if (lp->s_lsn.file == lsnp->file &&
+ lp->s_lsn.offset > lsnp->offset)
+ return (0);
+
+ if (lp->s_lsn.file > lsnp->file)
+ return (0);
+
+ flush_lsn = *lsnp;
+ }
+
+ /*
+ * If a flush is in progress and we're allowed to do so, drop
+ * the region lock and block waiting for the next flush.
+ */
+ if (release && lp->in_flush != 0) {
+ if ((commit = SH_TAILQ_FIRST(
+ &lp->free_commits, __db_commit)) == NULL) {
+ if ((ret =
+ __db_shalloc(dblp->reginfo.addr,
+ sizeof(struct __db_commit),
+ MUTEX_ALIGN, &commit)) != 0)
+ goto flush;
+ memset(commit, 0, sizeof(*commit));
+ if ((ret = __db_mutex_setup(dbenv, &dblp->reginfo,
+ &commit->mutex, MUTEX_SELF_BLOCK |
+ MUTEX_NO_RLOCK)) != 0) {
+ __db_shalloc_free(dblp->reginfo.addr, commit);
+ return (ret);
+ }
+ MUTEX_LOCK(dbenv, &commit->mutex);
+ } else
+ SH_TAILQ_REMOVE(
+ &lp->free_commits, commit, links, __db_commit);
+
+ lp->ncommit++;
+
+ /*
+ * Flushes may be requested out of LSN order; be
+ * sure we only move lp->t_lsn forward.
+ */
+ if (log_compare(&lp->t_lsn, &flush_lsn) < 0)
+ lp->t_lsn = flush_lsn;
+
+ commit->lsn = flush_lsn;
+ SH_TAILQ_INSERT_HEAD(
+ &lp->commits, commit, links, __db_commit);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ /* Wait here for the in-progress flush to finish. */
+ MUTEX_LOCK(dbenv, &commit->mutex);
+ R_LOCK(dbenv, &dblp->reginfo);
+
+ lp->ncommit--;
+ /*
+ * Grab the flag before freeing the struct to see if
+ * we need to flush the log to commit. If so,
+ * use the maximal lsn for any committing thread.
+ */
+ do_flush = F_ISSET(commit, DB_COMMIT_FLUSH);
+ F_CLR(commit, DB_COMMIT_FLUSH);
+ SH_TAILQ_INSERT_HEAD(
+ &lp->free_commits, commit, links, __db_commit);
+ if (do_flush) {
+ lp->in_flush--;
+ flush_lsn = lp->t_lsn;
+ } else
+ return (0);
+ }
+
+ /*
+ * Protect flushing with its own mutex so we can release
+ * the region lock except during file switches.
+ */
+flush: MUTEX_LOCK(dbenv, flush_mutexp);
+
+ /*
+ * If the LSN is less than or equal to the last-sync'd LSN, we're done.
+ * Note, the last-sync LSN saved in s_lsn is the LSN of the first byte
+ * after the byte we absolutely know was written to disk, so the test
+ * is <, not <=.
+ */
+ if (flush_lsn.file < lp->s_lsn.file ||
+ (flush_lsn.file == lp->s_lsn.file &&
+ flush_lsn.offset < lp->s_lsn.offset)) {
+ MUTEX_UNLOCK(dbenv, flush_mutexp);
+ goto done;
+ }
+
+ /*
+ * We may need to write the current buffer. We have to write the
+ * current buffer if the flush LSN is greater than or equal to the
+ * buffer's starting LSN.
+ */
+ current = 0;
+ if (lp->b_off != 0 && log_compare(&flush_lsn, &lp->f_lsn) >= 0) {
+ if ((ret = __log_write(dblp,
+ dblp->bufp, (u_int32_t)lp->b_off)) != 0) {
+ MUTEX_UNLOCK(dbenv, flush_mutexp);
+ goto done;
+ }
+
+ lp->b_off = 0;
+ current = 1;
+ }
+
+ /*
+ * It's possible that this thread may never have written to this log
+ * file. Acquire a file descriptor if we don't already have one.
+ * One last check -- if we're not writing anything from the current
+ * buffer, don't bother. We have nothing to write and nothing to
+ * sync.
+ */
+ if (!F_ISSET(&dblp->lfh, DB_FH_VALID) || dblp->lfname != lp->lsn.file)
+ if (!current || (ret = __log_newfh(dblp)) != 0) {
+ MUTEX_UNLOCK(dbenv, flush_mutexp);
+ goto done;
+ }
+
+ /*
+ * We are going to flush, release the region.
+ * First get the current state of the buffer since
+ * another write may come in, but we may not flush it.
+ */
+ b_off = lp->b_off;
+ w_off = lp->w_off;
+ f_lsn = lp->f_lsn;
+ lp->in_flush++;
+ if (release)
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ /* Sync all writes to disk. */
+ if ((ret = __os_fsync(dbenv, &dblp->lfh)) != 0) {
+ MUTEX_UNLOCK(dbenv, flush_mutexp);
+ if (release)
+ R_LOCK(dbenv, &dblp->reginfo);
+ ret = __db_panic(dbenv, ret);
+ return (ret);
+ }
+
+ /*
+ * Set the last-synced LSN.
+ * This value must be set to the LSN past the last complete
+ * record that has been flushed. This is at least the first
+ * lsn, f_lsn. If the buffer is empty, b_off == 0, then
+ * we can move up to write point since the first lsn is not
+ * set for the new buffer.
+ */
+ lp->s_lsn = f_lsn;
+ if (b_off == 0)
+ lp->s_lsn.offset = w_off;
+
+ MUTEX_UNLOCK(dbenv, flush_mutexp);
+ if (release)
+ R_LOCK(dbenv, &dblp->reginfo);
+
+ lp->in_flush--;
+ ++lp->stat.st_scount;
+
+ /*
+ * How many flush calls (usually commits) did this call actually sync?
+ * At least one, if it got here.
+ */
+ ncommit = 1;
+done:
+ if (lp->ncommit != 0) {
+ first = 1;
+ for (commit = SH_TAILQ_FIRST(&lp->commits, __db_commit);
+ commit != NULL;
+ commit = SH_TAILQ_NEXT(commit, links, __db_commit))
+ if (log_compare(&lp->s_lsn, &commit->lsn) > 0) {
+ MUTEX_UNLOCK(dbenv, &commit->mutex);
+ SH_TAILQ_REMOVE(
+ &lp->commits, commit, links, __db_commit);
+ ncommit++;
+ } else if (first == 1) {
+ F_SET(commit, DB_COMMIT_FLUSH);
+ MUTEX_UNLOCK(dbenv, &commit->mutex);
+ SH_TAILQ_REMOVE(
+ &lp->commits, commit, links, __db_commit);
+ /*
+ * This thread will wake and flush.
+ * If another thread commits and flushes
+ * first we will waste a trip trough the
+ * mutex.
+ */
+ lp->in_flush++;
+ first = 0;
+ }
+ }
+ if (lp->stat.st_maxcommitperflush < ncommit)
+ lp->stat.st_maxcommitperflush = ncommit;
+ if (lp->stat.st_mincommitperflush > ncommit ||
+ lp->stat.st_mincommitperflush == 0)
+ lp->stat.st_mincommitperflush = ncommit;
+
+ return (ret);
+}
+
+/*
+ * __log_fill --
+ * Write information into the log.
+ */
+static int
+__log_fill(dblp, lsn, addr, len)
+ DB_LOG *dblp;
+ DB_LSN *lsn;
+ void *addr;
+ u_int32_t len;
+{
+ LOG *lp;
+ u_int32_t bsize, nrec;
+ size_t nw, remain;
+ int ret;
+
+ lp = dblp->reginfo.primary;
+ bsize = lp->buffer_size;
+
+ while (len > 0) { /* Copy out the data. */
+ /*
+ * If we're beginning a new buffer, note the user LSN to which
+ * the first byte of the buffer belongs. We have to know this
+ * when flushing the buffer so that we know if the in-memory
+ * buffer needs to be flushed.
+ */
+ if (lp->b_off == 0)
+ lp->f_lsn = *lsn;
+
+ /*
+ * If we're on a buffer boundary and the data is big enough,
+ * copy as many records as we can directly from the data.
+ */
+ if (lp->b_off == 0 && len >= bsize) {
+ nrec = len / bsize;
+ if ((ret = __log_write(dblp, addr, nrec * bsize)) != 0)
+ return (ret);
+ addr = (u_int8_t *)addr + nrec * bsize;
+ len -= nrec * bsize;
+ ++lp->stat.st_wcount_fill;
+ continue;
+ }
+
+ /* Figure out how many bytes we can copy this time. */
+ remain = bsize - lp->b_off;
+ nw = remain > len ? len : remain;
+ memcpy(dblp->bufp + lp->b_off, addr, nw);
+ addr = (u_int8_t *)addr + nw;
+ len -= (u_int32_t)nw;
+ lp->b_off += nw;
+
+ /* If we fill the buffer, flush it. */
+ if (lp->b_off == bsize) {
+ if ((ret = __log_write(dblp, dblp->bufp, bsize)) != 0)
+ return (ret);
+ lp->b_off = 0;
+ ++lp->stat.st_wcount_fill;
+ }
+ }
+ return (0);
+}
+
+/*
+ * __log_write --
+ * Write the log buffer to disk.
+ */
+static int
+__log_write(dblp, addr, len)
+ DB_LOG *dblp;
+ void *addr;
+ u_int32_t len;
+{
+ DB_ENV *dbenv;
+ LOG *lp;
+ size_t nw;
+ int ret;
+
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+
+ /*
+ * If we haven't opened the log file yet or the current one
+ * has changed, acquire a new log file.
+ */
+ if (!F_ISSET(&dblp->lfh, DB_FH_VALID) || dblp->lfname != lp->lsn.file)
+ if ((ret = __log_newfh(dblp)) != 0)
+ return (ret);
+
+ /*
+ * Seek to the offset in the file (someone may have written it
+ * since we last did).
+ */
+ if ((ret =
+ __os_seek(dbenv,
+ &dblp->lfh, 0, 0, lp->w_off, 0, DB_OS_SEEK_SET)) != 0 ||
+ (ret = __os_write(dbenv, &dblp->lfh, addr, len, &nw)) != 0)
+ return (ret);
+
+ /* Reset the buffer offset and update the seek offset. */
+ lp->w_off += len;
+
+ /* Update written statistics. */
+ if ((lp->stat.st_w_bytes += len) >= MEGABYTE) {
+ lp->stat.st_w_bytes -= MEGABYTE;
+ ++lp->stat.st_w_mbytes;
+ }
+ if ((lp->stat.st_wc_bytes += len) >= MEGABYTE) {
+ lp->stat.st_wc_bytes -= MEGABYTE;
+ ++lp->stat.st_wc_mbytes;
+ }
+ ++lp->stat.st_wcount;
+
+ return (0);
+}
+
+/*
+ * __log_file --
+ * Map a DB_LSN to a file name.
+ *
+ * PUBLIC: int __log_file __P((DB_ENV *, const DB_LSN *, char *, size_t));
+ */
+int
+__log_file(dbenv, lsn, namep, len)
+ DB_ENV *dbenv;
+ const DB_LSN *lsn;
+ char *namep;
+ size_t len;
+{
+ DB_LOG *dblp;
+ int ret;
+ char *name;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lg_handle, "DB_ENV->log_file", DB_INIT_LOG);
+
+ dblp = dbenv->lg_handle;
+ R_LOCK(dbenv, &dblp->reginfo);
+ ret = __log_name(dblp, lsn->file, &name, NULL, 0);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (ret != 0)
+ return (ret);
+
+ /* Check to make sure there's enough room and copy the name. */
+ if (len < strlen(name) + 1) {
+ *namep = '\0';
+ __db_err(dbenv, "DB_ENV->log_file: name buffer is too short");
+ return (EINVAL);
+ }
+ (void)strcpy(namep, name);
+ __os_free(dbenv, name);
+
+ return (0);
+}
+
+/*
+ * __log_newfh --
+ * Acquire a file handle for the current log file.
+ */
+static int
+__log_newfh(dblp)
+ DB_LOG *dblp;
+{
+ DB_ENV *dbenv;
+ LOG *lp;
+ int ret;
+ char *name;
+
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+
+ /* Close any previous file descriptor. */
+ if (F_ISSET(&dblp->lfh, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, &dblp->lfh);
+
+ /*
+ * Get the path of the new file and open it.
+ *
+ * Adding DB_OSO_LOG to the flags may add additional platform-specific
+ * optimizations. On WinNT, the logfile is preallocated, which may
+ * have a time penalty at startup, but have better overall throughput.
+ * We are not certain that this works reliably, so enable at your own
+ * risk.
+ *
+ * XXX:
+ * Initialize the log file size. This is a hack to push the log's
+ * maximum size down into the Windows __os_open routine, because it
+ * wants to pre-allocate it.
+ */
+ dblp->lfname = lp->lsn.file;
+ dblp->lfh.log_size = lp->log_size;
+ if ((ret = __log_name(dblp, dblp->lfname,
+ &name, &dblp->lfh,
+ DB_OSO_CREATE |/* DB_OSO_LOG |*/ DB_OSO_SEQ |
+ (F_ISSET(dbenv, DB_ENV_DIRECT_LOG) ? DB_OSO_DIRECT : 0))) != 0)
+ __db_err(dbenv,
+ "DB_ENV->log_put: %s: %s", name, db_strerror(ret));
+
+ __os_free(dbenv, name);
+ return (ret);
+}
+
+/*
+ * __log_name --
+ * Return the log name for a particular file, and optionally open it.
+ *
+ * PUBLIC: int __log_name __P((DB_LOG *,
+ * PUBLIC: u_int32_t, char **, DB_FH *, u_int32_t));
+ */
+int
+__log_name(dblp, filenumber, namep, fhp, flags)
+ DB_LOG *dblp;
+ u_int32_t filenumber, flags;
+ char **namep;
+ DB_FH *fhp;
+{
+ DB_ENV *dbenv;
+ LOG *lp;
+ int ret;
+ char *oname;
+ char old[sizeof(LFPREFIX) + 5 + 20], new[sizeof(LFPREFIX) + 10 + 20];
+
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+
+ /*
+ * !!!
+ * The semantics of this routine are bizarre.
+ *
+ * The reason for all of this is that we need a place where we can
+ * intercept requests for log files, and, if appropriate, check for
+ * both the old-style and new-style log file names. The trick is
+ * that all callers of this routine that are opening the log file
+ * read-only want to use an old-style file name if they can't find
+ * a match using a new-style name. The only down-side is that some
+ * callers may check for the old-style when they really don't need
+ * to, but that shouldn't mess up anything, and we only check for
+ * the old-style name when we've already failed to find a new-style
+ * one.
+ *
+ * Create a new-style file name, and if we're not going to open the
+ * file, return regardless.
+ */
+ (void)snprintf(new, sizeof(new), LFNAME, filenumber);
+ if ((ret = __db_appname(dbenv,
+ DB_APP_LOG, new, 0, NULL, namep)) != 0 || fhp == NULL)
+ return (ret);
+
+ /* Open the new-style file -- if we succeed, we're done. */
+ if ((ret = __os_open(dbenv, *namep, flags, lp->persist.mode, fhp)) == 0)
+ return (0);
+
+ /*
+ * The open failed... if the DB_RDONLY flag isn't set, we're done,
+ * the caller isn't interested in old-style files.
+ */
+ if (!LF_ISSET(DB_OSO_RDONLY)) {
+ __db_err(dbenv,
+ "%s: log file open failed: %s", *namep, db_strerror(ret));
+ return (__db_panic(dbenv, ret));
+ }
+
+ /* Create an old-style file name. */
+ (void)snprintf(old, sizeof(old), LFNAME_V1, filenumber);
+ if ((ret = __db_appname(dbenv, DB_APP_LOG, old, 0, NULL, &oname)) != 0)
+ goto err;
+
+ /*
+ * Open the old-style file -- if we succeed, we're done. Free the
+ * space allocated for the new-style name and return the old-style
+ * name to the caller.
+ */
+ if ((ret = __os_open(dbenv,
+ oname, flags, lp->persist.mode, fhp)) == 0) {
+ __os_free(dbenv, *namep);
+ *namep = oname;
+ return (0);
+ }
+
+ /*
+ * Couldn't find either style of name -- return the new-style name
+ * for the caller's error message. If it's an old-style name that's
+ * actually missing we're going to confuse the user with the error
+ * message, but that implies that not only were we looking for an
+ * old-style name, but we expected it to exist and we weren't just
+ * looking for any log file. That's not a likely error.
+ */
+err: __os_free(dbenv, oname);
+ return (ret);
+}
+
+/*
+ * __log_rep_put --
+ * Short-circuit way for replication clients to put records into the
+ * log. Replication clients' logs need to be laid out exactly their masters'
+ * are, so we let replication take responsibility for when the log gets
+ * flushed, when log switches files, etc. This is just a thin PUBLIC wrapper
+ * for __log_putr with a slightly prettier interface.
+ *
+ * Note that the log region mutex should be held when this is called.
+ *
+ * PUBLIC: int __log_rep_put __P((DB_ENV *, DB_LSN *, const DBT *));
+ */
+int
+__log_rep_put(dbenv, lsnp, rec)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ const DBT *rec;
+{
+ DB_CIPHER *db_cipher;
+ DB_LOG *dblp;
+ HDR hdr;
+ DBT *dbt, t;
+ LOG *lp;
+ int need_free, ret;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ memset(&hdr, 0, sizeof(HDR));
+ t = *rec;
+ dbt = &t;
+ need_free = 0;
+ db_cipher = (DB_CIPHER *)dbenv->crypto_handle;
+ if (CRYPTO_ON(dbenv))
+ t.size += db_cipher->adj_size(rec->size);
+ if ((ret = __os_calloc(dbenv, 1, t.size, &t.data)) != 0)
+ goto err;
+ need_free = 1;
+ memcpy(t.data, rec->data, rec->size);
+
+ if ((ret = __log_encrypt_record(dbenv, dbt, &hdr, rec->size)) != 0)
+ goto err;
+ __db_chksum(t.data, t.size,
+ (CRYPTO_ON(dbenv)) ? db_cipher->mac_key : NULL, hdr.chksum);
+
+ DB_ASSERT(log_compare(lsnp, &lp->lsn) == 0);
+ ret = __log_putr(dblp, lsnp, dbt, lp->lsn.offset - lp->len, &hdr);
+err:
+ if (need_free)
+ __os_free(dbenv, t.data);
+ return (ret);
+}
+
+static int
+__log_encrypt_record(dbenv, dbt, hdr, orig)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ HDR *hdr;
+ u_int32_t orig;
+{
+ DB_CIPHER *db_cipher;
+ int ret;
+
+ if (CRYPTO_ON(dbenv)) {
+ db_cipher = (DB_CIPHER *)dbenv->crypto_handle;
+ hdr->size = HDR_CRYPTO_SZ;
+ hdr->orig_size = orig;
+ if ((ret = db_cipher->encrypt(dbenv, db_cipher->data,
+ hdr->iv, dbt->data, dbt->size)) != 0)
+ return (ret);
+ } else {
+ hdr->size = HDR_NORMAL_SZ;
+ }
+ return (0);
+}
diff --git a/libdb/mp/mp_alloc.c b/libdb/mp/mp_alloc.c
new file mode 100644
index 0000000..3672150
--- /dev/null
+++ b/libdb/mp/mp_alloc.c
@@ -0,0 +1,442 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+typedef struct {
+ DB_MPOOL_HASH *bucket;
+ u_int32_t priority;
+} HS;
+
+static void __memp_bad_buffer __P((DB_MPOOL_HASH *));
+static void __memp_reset_lru __P((DB_ENV *, REGINFO *, MPOOL *));
+
+/*
+ * __memp_alloc --
+ * Allocate some space from a cache region.
+ *
+ * PUBLIC: int __memp_alloc __P((DB_MPOOL *,
+ * PUBLIC: REGINFO *, MPOOLFILE *, size_t, roff_t *, void *));
+ */
+int
+__memp_alloc(dbmp, memreg, mfp, len, offsetp, retp)
+ DB_MPOOL *dbmp;
+ REGINFO *memreg;
+ MPOOLFILE *mfp;
+ size_t len;
+ roff_t *offsetp;
+ void *retp;
+{
+ BH *bhp;
+ DB_ENV *dbenv;
+ DB_MPOOL_HASH *dbht, *hp, *hp_end, *hp_tmp;
+ DB_MUTEX *mutexp;
+ MPOOL *c_mp;
+ MPOOLFILE *bh_mfp;
+ size_t freed_space;
+ u_int32_t buckets, buffers, high_priority, max_na, priority;
+ int aggressive, ret;
+ void *p;
+
+ dbenv = dbmp->dbenv;
+ c_mp = memreg->primary;
+ dbht = R_ADDR(memreg, c_mp->htab);
+ hp_end = &dbht[c_mp->htab_buckets];
+
+ buckets = buffers = 0;
+ aggressive = 0;
+
+ c_mp->stat.st_alloc++;
+
+ /*
+ * Get aggressive if we've tried to flush the number of pages as are
+ * in the system without finding space.
+ */
+ max_na = 5 * c_mp->htab_buckets;
+
+ /*
+ * If we're allocating a buffer, and the one we're discarding is the
+ * same size, we don't want to waste the time to re-integrate it into
+ * the shared memory free list. If the DB_MPOOLFILE argument isn't
+ * NULL, we'll compare the underlying page sizes of the two buffers
+ * before free-ing and re-allocating buffers.
+ */
+ if (mfp != NULL)
+ len = (sizeof(BH) - sizeof(u_int8_t)) + mfp->stat.st_pagesize;
+
+ R_LOCK(dbenv, memreg);
+
+ /*
+ * On every buffer allocation we update the buffer generation number
+ * and check for wraparound.
+ */
+ if (++c_mp->lru_count == UINT32_T_MAX)
+ __memp_reset_lru(dbenv, memreg, c_mp);
+
+ /*
+ * Anything newer than 1/10th of the buffer pool is ignored during
+ * allocation (unless allocation starts failing).
+ */
+ DB_ASSERT(c_mp->lru_count > c_mp->stat.st_pages / 10);
+ high_priority = c_mp->lru_count - c_mp->stat.st_pages / 10;
+
+ /*
+ * First we try to allocate from free memory. If that fails, scan the
+ * buffer pool to find buffers with low priorities. We consider small
+ * sets of hash buckets each time to limit the amount of work needing
+ * to be done. This approximates LRU, but not very well. We either
+ * find a buffer of the same size to use, or we will free 3 times what
+ * we need in the hopes it will coalesce into a contiguous chunk of the
+ * right size. In the latter case we branch back here and try again.
+ */
+alloc: if ((ret = __db_shalloc(memreg->addr, len, MUTEX_ALIGN, &p)) == 0) {
+ if (mfp != NULL)
+ c_mp->stat.st_pages++;
+ R_UNLOCK(dbenv, memreg);
+
+found: if (offsetp != NULL)
+ *offsetp = R_OFFSET(memreg, p);
+ *(void **)retp = p;
+
+ /*
+ * Update the search statistics.
+ *
+ * We're not holding the region locked here, these statistics
+ * can't be trusted.
+ */
+ if (buckets != 0) {
+ if (buckets > c_mp->stat.st_alloc_max_buckets)
+ c_mp->stat.st_alloc_max_buckets = buckets;
+ c_mp->stat.st_alloc_buckets += buckets;
+ }
+ if (buffers != 0) {
+ if (buffers > c_mp->stat.st_alloc_max_pages)
+ c_mp->stat.st_alloc_max_pages = buffers;
+ c_mp->stat.st_alloc_pages += buffers;
+ }
+ return (0);
+ }
+
+ /*
+ * We re-attempt the allocation every time we've freed 3 times what
+ * we need. Reset our free-space counter.
+ */
+ freed_space = 0;
+
+ /*
+ * Walk the hash buckets and find the next two with potentially useful
+ * buffers. Free the buffer with the lowest priority from the buckets'
+ * chains.
+ */
+ for (hp_tmp = NULL;;) {
+ /* Check for wrap around. */
+ hp = &dbht[c_mp->last_checked++];
+ if (hp >= hp_end) {
+ c_mp->last_checked = 0;
+
+ /*
+ * If we've gone through all of the hash buckets, try
+ * an allocation. If the cache is small, the old page
+ * size is small, and the new page size is large, we
+ * might have freed enough memory (but not 3 times the
+ * memory).
+ */
+ goto alloc;
+ }
+
+ /*
+ * Skip empty buckets.
+ *
+ * We can check for empty buckets before locking as we
+ * only care if the pointer is zero or non-zero.
+ */
+ if (SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL)
+ continue;
+
+ /*
+ * The failure mode is when there are too many buffers we can't
+ * write or there's not enough memory in the system. We don't
+ * have a metric for deciding if allocation has no possible way
+ * to succeed, so we don't ever fail, we assume memory will be
+ * available if we wait long enough.
+ *
+ * Get aggressive if we've tried to flush 5 times the number of
+ * hash buckets as are in the system -- it's possible we have
+ * been repeatedly trying to flush the same buffers, although
+ * it's unlikely. Aggressive means:
+ *
+ * a: set a flag to attempt to flush high priority buffers as
+ * well as other buffers.
+ * b: sync the mpool to force out queue extent pages. While we
+ * might not have enough space for what we want and flushing
+ * is expensive, why not?
+ * c: sleep for a second -- hopefully someone else will run and
+ * free up some memory. Try to allocate memory too, in case
+ * the other thread returns its memory to the region.
+ * d: look at a buffer in every hash bucket rather than choose
+ * the more preferable of two.
+ *
+ * !!!
+ * This test ignores pathological cases like no buffers in the
+ * system -- that shouldn't be possible.
+ */
+ if ((++buckets % max_na) == 0) {
+ aggressive = 1;
+
+ R_UNLOCK(dbenv, memreg);
+
+ (void)__memp_sync_int(
+ dbenv, NULL, 0, DB_SYNC_ALLOC, NULL);
+
+ (void)__os_sleep(dbenv, 1, 0);
+
+ R_LOCK(dbenv, memreg);
+ goto alloc;
+ }
+
+ if (!aggressive) {
+ /* Skip high priority buckets. */
+ if (hp->hash_priority > high_priority)
+ continue;
+
+ /*
+ * Find two buckets and select the one with the lowest
+ * priority. Performance testing shows that looking
+ * at two improves the LRUness and looking at more only
+ * does a little better.
+ */
+ if (hp_tmp == NULL) {
+ hp_tmp = hp;
+ continue;
+ }
+ if (hp->hash_priority > hp_tmp->hash_priority)
+ hp = hp_tmp;
+ hp_tmp = NULL;
+ }
+
+ /* Remember the priority of the buffer we're looking for. */
+ priority = hp->hash_priority;
+
+ /* Unlock the region and lock the hash bucket. */
+ R_UNLOCK(dbenv, memreg);
+ mutexp = &hp->hash_mutex;
+ MUTEX_LOCK(dbenv, mutexp);
+
+#ifdef DIAGNOSTIC
+ __memp_check_order(hp);
+#endif
+ /*
+ * The lowest priority page is first in the bucket, as they are
+ * maintained in sorted order.
+ *
+ * The buffer may have been freed or its priority changed while
+ * we switched from the region lock to the hash lock. If so,
+ * we have to restart. We will still take the first buffer on
+ * the bucket's list, though, if it has a low enough priority.
+ */
+ if ((bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)) == NULL ||
+ bhp->ref != 0 || bhp->priority > priority)
+ goto next_hb;
+
+ buffers++;
+
+ /* Find the associated MPOOLFILE. */
+ bh_mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
+
+ /* If the page is dirty, pin it and write it. */
+ ret = 0;
+ if (F_ISSET(bhp, BH_DIRTY)) {
+ ++bhp->ref;
+ ret = __memp_bhwrite(dbmp, hp, bh_mfp, bhp, 0);
+ --bhp->ref;
+ if (ret == 0)
+ ++c_mp->stat.st_rw_evict;
+ } else
+ ++c_mp->stat.st_ro_evict;
+
+ /*
+ * If a write fails for any reason, we can't proceed.
+ *
+ * We released the hash bucket lock while doing I/O, so another
+ * thread may have acquired this buffer and incremented the ref
+ * count after we wrote it, in which case we can't have it.
+ *
+ * If there's a write error, avoid selecting this buffer again
+ * by making it the bucket's least-desirable buffer.
+ */
+ if (ret != 0 || bhp->ref != 0) {
+ if (ret != 0 && aggressive)
+ __memp_bad_buffer(hp);
+ goto next_hb;
+ }
+
+ /*
+ * Check to see if the buffer is the size we're looking for.
+ * If so, we can simply reuse it. Else, free the buffer and
+ * its space and keep looking.
+ */
+ if (mfp != NULL &&
+ mfp->stat.st_pagesize == bh_mfp->stat.st_pagesize) {
+ __memp_bhfree(dbmp, hp, bhp, 0);
+
+ p = bhp;
+ goto found;
+ }
+
+ freed_space += __db_shsizeof(bhp);
+ __memp_bhfree(dbmp, hp, bhp, 1);
+
+ /*
+ * Unlock this hash bucket and re-acquire the region lock. If
+ * we're reaching here as a result of calling memp_bhfree, the
+ * hash bucket lock has already been discarded.
+ */
+ if (0) {
+next_hb: MUTEX_UNLOCK(dbenv, mutexp);
+ }
+ R_LOCK(dbenv, memreg);
+
+ /*
+ * Retry the allocation as soon as we've freed up sufficient
+ * space. We're likely to have to coalesce of memory to
+ * satisfy the request, don't try until it's likely (possible?)
+ * we'll succeed.
+ */
+ if (freed_space >= 3 * len)
+ goto alloc;
+ }
+ /* NOTREACHED */
+}
+
+/*
+ * __memp_bad_buffer --
+ * Make the first buffer in a hash bucket the least desirable buffer.
+ */
+static void
+__memp_bad_buffer(hp)
+ DB_MPOOL_HASH *hp;
+{
+ BH *bhp, *t_bhp;
+ u_int32_t priority;
+
+ /* Remove the first buffer from the bucket. */
+ bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
+ SH_TAILQ_REMOVE(&hp->hash_bucket, bhp, hq, __bh);
+
+ /*
+ * Find the highest priority buffer in the bucket. Buffers are
+ * sorted by priority, so it's the last one in the bucket.
+ *
+ * XXX
+ * Should use SH_TAILQ_LAST, but I think that macro is broken.
+ */
+ priority = bhp->priority;
+ for (t_bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
+ t_bhp != NULL; t_bhp = SH_TAILQ_NEXT(t_bhp, hq, __bh))
+ priority = t_bhp->priority;
+
+ /*
+ * Set our buffer's priority to be just as bad, and append it to
+ * the bucket.
+ */
+ bhp->priority = priority;
+ SH_TAILQ_INSERT_TAIL(&hp->hash_bucket, bhp, hq);
+
+ /* Reset the hash bucket's priority. */
+ hp->hash_priority = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
+}
+
+/*
+ * __memp_reset_lru --
+ * Reset the cache LRU counter.
+ */
+static void
+__memp_reset_lru(dbenv, memreg, c_mp)
+ DB_ENV *dbenv;
+ REGINFO *memreg;
+ MPOOL *c_mp;
+{
+ BH *bhp;
+ DB_MPOOL_HASH *hp;
+ int bucket;
+
+ /*
+ * Update the counter so all future allocations will start at the
+ * bottom.
+ */
+ c_mp->lru_count -= MPOOL_BASE_DECREMENT;
+
+ /* Release the region lock. */
+ R_UNLOCK(dbenv, memreg);
+
+ /* Adjust the priority of every buffer in the system. */
+ for (hp = R_ADDR(memreg, c_mp->htab),
+ bucket = 0; bucket < c_mp->htab_buckets; ++hp, ++bucket) {
+ /*
+ * Skip empty buckets.
+ *
+ * We can check for empty buckets before locking as we
+ * only care if the pointer is zero or non-zero.
+ */
+ if (SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL)
+ continue;
+
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+ for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
+ bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh))
+ if (bhp->priority != UINT32_T_MAX &&
+ bhp->priority > MPOOL_BASE_DECREMENT)
+ bhp->priority -= MPOOL_BASE_DECREMENT;
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ }
+
+ /* Reacquire the region lock. */
+ R_LOCK(dbenv, memreg);
+}
+
+#ifdef DIAGNOSTIC
+/*
+ * __memp_check_order --
+ * Verify the priority ordering of a hash bucket chain.
+ *
+ * PUBLIC: #ifdef DIAGNOSTIC
+ * PUBLIC: void __memp_check_order __P((DB_MPOOL_HASH *));
+ * PUBLIC: #endif
+ */
+void
+__memp_check_order(hp)
+ DB_MPOOL_HASH *hp;
+{
+ BH *bhp;
+ u_int32_t priority;
+
+ /*
+ * Assumes the hash bucket is locked.
+ */
+ if ((bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)) == NULL)
+ return;
+
+ DB_ASSERT(bhp->priority == hp->hash_priority);
+
+ for (priority = bhp->priority;
+ (bhp = SH_TAILQ_NEXT(bhp, hq, __bh)) != NULL;
+ priority = bhp->priority)
+ DB_ASSERT(priority <= bhp->priority);
+}
+#endif
diff --git a/libdb/mp/mp_bh.c b/libdb/mp/mp_bh.c
new file mode 100644
index 0000000..34df0a8
--- /dev/null
+++ b/libdb/mp/mp_bh.c
@@ -0,0 +1,646 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+#include "dbinc/log.h"
+#include "dbinc/db_page.h"
+
+static int __memp_pgwrite
+ __P((DB_MPOOL *, DB_MPOOLFILE *, DB_MPOOL_HASH *, BH *));
+static int __memp_upgrade __P((DB_MPOOL *, DB_MPOOLFILE *, MPOOLFILE *));
+
+/*
+ * __memp_bhwrite --
+ * Write the page associated with a given buffer header.
+ *
+ * PUBLIC: int __memp_bhwrite __P((DB_MPOOL *,
+ * PUBLIC: DB_MPOOL_HASH *, MPOOLFILE *, BH *, int));
+ */
+int
+__memp_bhwrite(dbmp, hp, mfp, bhp, open_extents)
+ DB_MPOOL *dbmp;
+ DB_MPOOL_HASH *hp;
+ MPOOLFILE *mfp;
+ BH *bhp;
+ int open_extents;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *dbmfp;
+ DB_MPREG *mpreg;
+ int local_open, incremented, ret;
+
+ dbenv = dbmp->dbenv;
+ local_open = incremented = 0;
+
+ /*
+ * If the file has been removed or is a closed temporary file, jump
+ * right ahead and pretend that we've found the file we want -- the
+ * page-write function knows how to handle the fact that we don't have
+ * (or need!) any real file descriptor information.
+ */
+ if (F_ISSET(mfp, MP_DEADFILE)) {
+ dbmfp = NULL;
+ goto found;
+ }
+
+ /*
+ * Walk the process' DB_MPOOLFILE list and find a file descriptor for
+ * the file. We also check that the descriptor is open for writing.
+ * If we find a descriptor on the file that's not open for writing, we
+ * try and upgrade it to make it writeable. If that fails, we're done.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ for (dbmfp = TAILQ_FIRST(&dbmp->dbmfq);
+ dbmfp != NULL; dbmfp = TAILQ_NEXT(dbmfp, q))
+ if (dbmfp->mfp == mfp) {
+ if (F_ISSET(dbmfp, MP_READONLY) &&
+ !F_ISSET(dbmfp, MP_UPGRADE) &&
+ (F_ISSET(dbmfp, MP_UPGRADE_FAIL) ||
+ __memp_upgrade(dbmp, dbmfp, mfp))) {
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ return (EPERM);
+ }
+
+ /*
+ * Increment the reference count -- see the comment in
+ * __memp_fclose_int().
+ */
+ ++dbmfp->ref;
+ incremented = 1;
+ break;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+
+ if (dbmfp != NULL)
+ goto found;
+
+ /*
+ * !!!
+ * It's the caller's choice if we're going to open extent files.
+ */
+ if (!open_extents && F_ISSET(mfp, MP_EXTENT))
+ return (EPERM);
+
+ /*
+ * !!!
+ * Don't try to attach to temporary files. There are two problems in
+ * trying to do that. First, if we have different privileges than the
+ * process that "owns" the temporary file, we might create the backing
+ * disk file such that the owning process couldn't read/write its own
+ * buffers, e.g., memp_trickle running as root creating a file owned
+ * as root, mode 600. Second, if the temporary file has already been
+ * created, we don't have any way of finding out what its real name is,
+ * and, even if we did, it was already unlinked (so that it won't be
+ * left if the process dies horribly). This decision causes a problem,
+ * however: if the temporary file consumes the entire buffer cache,
+ * and the owner doesn't flush the buffers to disk, we could end up
+ * with resource starvation, and the memp_trickle thread couldn't do
+ * anything about it. That's a pretty unlikely scenario, though.
+ *
+ * Note we should never get here when the temporary file in question
+ * has already been closed in another process, in which case it should
+ * be marked MP_DEADFILE.
+ */
+ if (F_ISSET(mfp, MP_TEMP))
+ return (EPERM);
+
+ /*
+ * It's not a page from a file we've opened. If the file requires
+ * input/output processing, see if this process has ever registered
+ * information as to how to write this type of file. If not, there's
+ * nothing we can do.
+ */
+ if (mfp->ftype != 0) {
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ for (mpreg = LIST_FIRST(&dbmp->dbregq);
+ mpreg != NULL; mpreg = LIST_NEXT(mpreg, q))
+ if (mpreg->ftype == mfp->ftype)
+ break;
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ if (mpreg == NULL)
+ return (EPERM);
+ }
+
+ /*
+ * Try and open the file, attaching to the underlying shared area.
+ * Ignore any error, assume it's a permissions problem.
+ *
+ * XXX
+ * There's no negative cache, so we may repeatedly try and open files
+ * that we have previously tried (and failed) to open.
+ */
+ if ((ret = dbenv->memp_fcreate(dbenv, &dbmfp, 0)) != 0)
+ return (ret);
+ if ((ret = __memp_fopen_int(dbmfp, mfp,
+ R_ADDR(dbmp->reginfo, mfp->path_off),
+ 0, 0, mfp->stat.st_pagesize)) != 0) {
+ (void)dbmfp->close(dbmfp, 0);
+ return (ret);
+ }
+ local_open = 1;
+
+found: ret = __memp_pgwrite(dbmp, dbmfp, hp, bhp);
+
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ if (incremented)
+ --dbmfp->ref;
+ else if (local_open)
+ F_SET(dbmfp, MP_FLUSH);
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+
+ return (ret);
+}
+
+/*
+ * __memp_pgread --
+ * Read a page from a file.
+ *
+ * PUBLIC: int __memp_pgread __P((DB_MPOOLFILE *, DB_MUTEX *, BH *, int));
+ */
+int
+__memp_pgread(dbmfp, mutexp, bhp, can_create)
+ DB_MPOOLFILE *dbmfp;
+ DB_MUTEX *mutexp;
+ BH *bhp;
+ int can_create;
+{
+ DB_IO db_io;
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ MPOOLFILE *mfp;
+ size_t len, nr, pagesize;
+ int ret;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+ mfp = dbmfp->mfp;
+ pagesize = mfp->stat.st_pagesize;
+
+ /* We should never be called with a dirty or a locked buffer. */
+ DB_ASSERT(!F_ISSET(bhp, BH_DIRTY | BH_DIRTY_CREATE | BH_LOCKED));
+
+ /* Lock the buffer and swap the hash bucket lock for the buffer lock. */
+ F_SET(bhp, BH_LOCKED | BH_TRASH);
+ MUTEX_LOCK(dbenv, &bhp->mutex);
+ MUTEX_UNLOCK(dbenv, mutexp);
+
+ /*
+ * Temporary files may not yet have been created. We don't create
+ * them now, we create them when the pages have to be flushed.
+ */
+ nr = 0;
+ if (F_ISSET(dbmfp->fhp, DB_FH_VALID)) {
+ db_io.fhp = dbmfp->fhp;
+ db_io.mutexp = dbmfp->mutexp;
+ db_io.pagesize = db_io.bytes = pagesize;
+ db_io.pgno = bhp->pgno;
+ db_io.buf = bhp->buf;
+
+ /*
+ * The page may not exist; if it doesn't, nr may well be 0,
+ * but we expect the underlying OS calls not to return an
+ * error code in this case.
+ */
+ if ((ret = __os_io(dbenv, &db_io, DB_IO_READ, &nr)) != 0)
+ goto err;
+ }
+
+ if (nr < pagesize) {
+ /*
+ * Don't output error messages for short reads. In particular,
+ * DB recovery processing may request pages never written to
+ * disk or for which only some part have been written to disk,
+ * in which case we won't find the page. The caller must know
+ * how to handle the error.
+ */
+ if (can_create == 0) {
+ ret = DB_PAGE_NOTFOUND;
+ goto err;
+ }
+
+ /* Clear any bytes that need to be cleared. */
+ len = mfp->clear_len == 0 ? pagesize : mfp->clear_len;
+ memset(bhp->buf, 0, len);
+
+#if defined(DIAGNOSTIC) || defined(UMRW)
+ /*
+ * If we're running in diagnostic mode, corrupt any bytes on
+ * the page that are unknown quantities for the caller.
+ */
+ if (len < pagesize)
+ memset(bhp->buf + len, CLEAR_BYTE, pagesize - len);
+#endif
+ ++mfp->stat.st_page_create;
+ } else
+ ++mfp->stat.st_page_in;
+
+ /* Call any pgin function. */
+ ret = mfp->ftype == 0 ? 0 : __memp_pg(dbmfp, bhp, 1);
+
+ /* Unlock the buffer and reacquire the hash bucket lock. */
+err: MUTEX_UNLOCK(dbenv, &bhp->mutex);
+ MUTEX_LOCK(dbenv, mutexp);
+
+ /*
+ * If no errors occurred, the data is now valid, clear the BH_TRASH
+ * flag; regardless, clear the lock bit and let other threads proceed.
+ */
+ F_CLR(bhp, BH_LOCKED);
+ if (ret == 0)
+ F_CLR(bhp, BH_TRASH);
+
+ return (ret);
+}
+
+/*
+ * __memp_pgwrite --
+ * Write a page to a file.
+ */
+static int
+__memp_pgwrite(dbmp, dbmfp, hp, bhp)
+ DB_MPOOL *dbmp;
+ DB_MPOOLFILE *dbmfp;
+ DB_MPOOL_HASH *hp;
+ BH *bhp;
+{
+ DB_ENV *dbenv;
+ DB_IO db_io;
+ DB_LSN lsn;
+ MPOOLFILE *mfp;
+ size_t nw;
+ int callpgin, ret;
+
+ dbenv = dbmp->dbenv;
+ mfp = dbmfp == NULL ? NULL : dbmfp->mfp;
+ callpgin = ret = 0;
+
+ /*
+ * We should never be called with a clean or trash buffer.
+ * The sync code does call us with already locked buffers.
+ */
+ DB_ASSERT(F_ISSET(bhp, BH_DIRTY));
+ DB_ASSERT(!F_ISSET(bhp, BH_TRASH));
+
+ /*
+ * If we have not already traded the hash bucket lock for the buffer
+ * lock, do so now.
+ */
+ if (!F_ISSET(bhp, BH_LOCKED)) {
+ F_SET(bhp, BH_LOCKED);
+ MUTEX_LOCK(dbenv, &bhp->mutex);
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ }
+
+ /*
+ * It's possible that the underlying file doesn't exist, either
+ * because of an outright removal or because it was a temporary
+ * file that's been closed.
+ *
+ * !!!
+ * Once we pass this point, we know that dbmfp and mfp aren't NULL,
+ * and that we have a valid file reference.
+ */
+ if (mfp == NULL || F_ISSET(mfp, MP_DEADFILE))
+ goto file_dead;
+
+ /*
+ * If the page is in a file for which we have LSN information, we have
+ * to ensure the appropriate log records are on disk.
+ */
+ if (LOGGING_ON(dbenv) && mfp->lsn_off != -1) {
+ memcpy(&lsn, bhp->buf + mfp->lsn_off, sizeof(DB_LSN));
+ if ((ret = dbenv->log_flush(dbenv, &lsn)) != 0)
+ goto err;
+ }
+
+#ifdef DIAGNOSTIC
+ /*
+ * Verify write-ahead logging semantics.
+ *
+ * !!!
+ * One special case. There is a single field on the meta-data page,
+ * the last-page-number-in-the-file field, for which we do not log
+ * changes. If the page was originally created in a database that
+ * didn't have logging turned on, we can see a page marked dirty but
+ * for which no corresponding log record has been written. However,
+ * the only way that a page can be created for which there isn't a
+ * previous log record and valid LSN is when the page was created
+ * without logging turned on, and so we check for that special-case
+ * LSN value.
+ */
+ if (LOGGING_ON(dbenv) && !IS_NOT_LOGGED_LSN(LSN(bhp->buf))) {
+ /*
+ * There is a potential race here. If we are in the midst of
+ * switching log files, it's possible we could test against the
+ * old file and the new offset in the log region's LSN. If we
+ * fail the first test, acquire the log mutex and check again.
+ */
+ DB_LOG *dblp;
+ LOG *lp;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ if (!IS_NOT_LOGGED_LSN(LSN(bhp->buf)) &&
+ log_compare(&lp->s_lsn, &LSN(bhp->buf)) <= 0) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ DB_ASSERT(log_compare(&lp->s_lsn, &LSN(bhp->buf)) > 0);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ }
+ }
+#endif
+
+ /*
+ * Call any pgout function. We set the callpgin flag so that we flag
+ * that the contents of the buffer will need to be passed through pgin
+ * before they are reused.
+ */
+ if (mfp->ftype != 0) {
+ callpgin = 1;
+ if ((ret = __memp_pg(dbmfp, bhp, 0)) != 0)
+ goto err;
+ }
+
+ /* Temporary files may not yet have been created. */
+ if (!F_ISSET(dbmfp->fhp, DB_FH_VALID)) {
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ ret = F_ISSET(dbmfp->fhp, DB_FH_VALID) ? 0 :
+ __db_appname(dbenv, DB_APP_TMP, NULL,
+ F_ISSET(dbenv, DB_ENV_DIRECT_DB) ? DB_OSO_DIRECT : 0,
+ dbmfp->fhp, NULL);
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ if (ret != 0) {
+ __db_err(dbenv,
+ "unable to create temporary backing file");
+ goto err;
+ }
+ }
+
+ /* Write the page. */
+ db_io.fhp = dbmfp->fhp;
+ db_io.mutexp = dbmfp->mutexp;
+ db_io.pagesize = db_io.bytes = mfp->stat.st_pagesize;
+ db_io.pgno = bhp->pgno;
+ db_io.buf = bhp->buf;
+ if ((ret = __os_io(dbenv, &db_io, DB_IO_WRITE, &nw)) != 0) {
+ __db_err(dbenv, "%s: write failed for page %lu",
+ __memp_fn(dbmfp), (u_long)bhp->pgno);
+ goto err;
+ }
+ ++mfp->stat.st_page_out;
+
+err:
+file_dead:
+ /*
+ * !!!
+ * Once we pass this point, dbmfp and mfp may be NULL, we may not have
+ * a valid file reference.
+ *
+ * Unlock the buffer and reacquire the hash lock.
+ */
+ MUTEX_UNLOCK(dbenv, &bhp->mutex);
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+
+ /*
+ * If we rewrote the page, it will need processing by the pgin
+ * routine before reuse.
+ */
+ if (callpgin)
+ F_SET(bhp, BH_CALLPGIN);
+
+ /*
+ * Update the hash bucket statistics, reset the flags.
+ * If we were successful, the page is no longer dirty.
+ */
+ if (ret == 0) {
+ DB_ASSERT(hp->hash_page_dirty != 0);
+ --hp->hash_page_dirty;
+
+ F_CLR(bhp, BH_DIRTY | BH_DIRTY_CREATE);
+ }
+
+ /* Regardless, clear any sync wait-for count and remove our lock. */
+ bhp->ref_sync = 0;
+ F_CLR(bhp, BH_LOCKED);
+
+ return (ret);
+}
+
+/*
+ * __memp_pg --
+ * Call the pgin/pgout routine.
+ *
+ * PUBLIC: int __memp_pg __P((DB_MPOOLFILE *, BH *, int));
+ */
+int
+__memp_pg(dbmfp, bhp, is_pgin)
+ DB_MPOOLFILE *dbmfp;
+ BH *bhp;
+ int is_pgin;
+{
+ DBT dbt, *dbtp;
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ DB_MPREG *mpreg;
+ MPOOLFILE *mfp;
+ int ftype, ret;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+ mfp = dbmfp->mfp;
+
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+
+ ftype = mfp->ftype;
+ for (mpreg = LIST_FIRST(&dbmp->dbregq);
+ mpreg != NULL; mpreg = LIST_NEXT(mpreg, q)) {
+ if (ftype != mpreg->ftype)
+ continue;
+ if (mfp->pgcookie_len == 0)
+ dbtp = NULL;
+ else {
+ dbt.size = mfp->pgcookie_len;
+ dbt.data = R_ADDR(dbmp->reginfo, mfp->pgcookie_off);
+ dbtp = &dbt;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+
+ if (is_pgin) {
+ if (mpreg->pgin != NULL &&
+ (ret = mpreg->pgin(dbenv,
+ bhp->pgno, bhp->buf, dbtp)) != 0)
+ goto err;
+ } else
+ if (mpreg->pgout != NULL &&
+ (ret = mpreg->pgout(dbenv,
+ bhp->pgno, bhp->buf, dbtp)) != 0)
+ goto err;
+ break;
+ }
+
+ if (mpreg == NULL)
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+
+ return (0);
+
+err: MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ __db_err(dbenv, "%s: %s failed for page %lu",
+ __memp_fn(dbmfp), is_pgin ? "pgin" : "pgout", (u_long)bhp->pgno);
+ return (ret);
+}
+
+/*
+ * __memp_bhfree --
+ * Free a bucket header and its referenced data.
+ *
+ * PUBLIC: void __memp_bhfree __P((DB_MPOOL *, DB_MPOOL_HASH *, BH *, int));
+ */
+void
+__memp_bhfree(dbmp, hp, bhp, free_mem)
+ DB_MPOOL *dbmp;
+ DB_MPOOL_HASH *hp;
+ BH *bhp;
+ int free_mem;
+{
+ DB_ENV *dbenv;
+ MPOOL *c_mp, *mp;
+ MPOOLFILE *mfp;
+ u_int32_t n_cache;
+
+ /*
+ * Assumes the hash bucket is locked and the MPOOL is not.
+ */
+ dbenv = dbmp->dbenv;
+ mp = dbmp->reginfo[0].primary;
+ n_cache = NCACHE(mp, bhp->mf_offset, bhp->pgno);
+
+ /*
+ * Delete the buffer header from the hash bucket queue and reset
+ * the hash bucket's priority, if necessary.
+ */
+ SH_TAILQ_REMOVE(&hp->hash_bucket, bhp, hq, __bh);
+ if (bhp->priority == hp->hash_priority)
+ hp->hash_priority =
+ SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL ?
+ 0 : SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
+
+ /*
+ * Discard the hash bucket's mutex, it's no longer needed, and
+ * we don't want to be holding it when acquiring other locks.
+ */
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+
+ /*
+ * Find the underlying MPOOLFILE and decrement its reference count.
+ * If this is its last reference, remove it.
+ */
+ mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
+ MUTEX_LOCK(dbenv, &mfp->mutex);
+ if (--mfp->block_cnt == 0 && mfp->mpf_cnt == 0)
+ __memp_mf_discard(dbmp, mfp);
+ else
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
+
+ R_LOCK(dbenv, &dbmp->reginfo[n_cache]);
+
+ /*
+ * Clear the mutex this buffer recorded; requires the region lock
+ * be held.
+ */
+ __db_shlocks_clear(&bhp->mutex, &dbmp->reginfo[n_cache],
+ (REGMAINT *)R_ADDR(&dbmp->reginfo[n_cache], mp->maint_off));
+
+ /*
+ * If we're not reusing the buffer immediately, free the buffer header
+ * and data for real.
+ */
+ if (free_mem) {
+ __db_shalloc_free(dbmp->reginfo[n_cache].addr, bhp);
+ c_mp = dbmp->reginfo[n_cache].primary;
+ c_mp->stat.st_pages--;
+ }
+ R_UNLOCK(dbenv, &dbmp->reginfo[n_cache]);
+}
+
+/*
+ * __memp_upgrade --
+ * Upgrade a file descriptor from read-only to read-write.
+ */
+static int
+__memp_upgrade(dbmp, dbmfp, mfp)
+ DB_MPOOL *dbmp;
+ DB_MPOOLFILE *dbmfp;
+ MPOOLFILE *mfp;
+{
+ DB_ENV *dbenv;
+ DB_FH *fhp, *tfhp;
+ int ret;
+ char *rpath;
+
+ dbenv = dbmp->dbenv;
+ fhp = NULL;
+ rpath = NULL;
+
+ /*
+ * Calculate the real name for this file and try to open it read/write.
+ * We know we have a valid pathname for the file because it's the only
+ * way we could have gotten a file descriptor of any kind.
+ */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_FH), &fhp)) != 0)
+ goto err;
+
+ if ((ret = __db_appname(dbenv, DB_APP_DATA,
+ R_ADDR(dbmp->reginfo, mfp->path_off), 0, NULL, &rpath)) != 0)
+ goto err;
+
+ if (__os_open(dbenv, rpath,
+ F_ISSET(mfp, MP_DIRECT) ? DB_OSO_DIRECT : 0, 0, fhp) != 0) {
+ F_SET(dbmfp, MP_UPGRADE_FAIL);
+ goto err;
+ }
+
+ /*
+ * Swap the descriptors and set the upgrade flag.
+ *
+ * XXX
+ * There is a race here. If another process schedules a read using the
+ * existing file descriptor and is swapped out before making the system
+ * call, this code could theoretically close the file descriptor out
+ * from under it. While it's very unlikely, this code should still be
+ * rewritten.
+ */
+ tfhp = dbmfp->fhp;
+ dbmfp->fhp = fhp;
+ fhp = tfhp;
+
+ (void)__os_closehandle(dbenv, fhp);
+ F_SET(dbmfp, MP_UPGRADE);
+
+ ret = 0;
+ if (0) {
+err: ret = 1;
+ }
+ if (fhp != NULL)
+ __os_free(dbenv, fhp);
+ if (rpath != NULL)
+ __os_free(dbenv, rpath);
+
+ return (ret);
+}
diff --git a/libdb/mp/mp_fget.c b/libdb/mp/mp_fget.c
new file mode 100644
index 0000000..0e1346e
--- /dev/null
+++ b/libdb/mp/mp_fget.c
@@ -0,0 +1,654 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+#ifdef HAVE_FILESYSTEM_NOTZERO
+static int __memp_fs_notzero
+ __P((DB_ENV *, DB_MPOOLFILE *, MPOOLFILE *, db_pgno_t *));
+#endif
+
+/*
+ * __memp_fget --
+ * Get a page from the file.
+ *
+ * PUBLIC: int __memp_fget
+ * PUBLIC: __P((DB_MPOOLFILE *, db_pgno_t *, u_int32_t, void *));
+ */
+int
+__memp_fget(dbmfp, pgnoaddr, flags, addrp)
+ DB_MPOOLFILE *dbmfp;
+ db_pgno_t *pgnoaddr;
+ u_int32_t flags;
+ void *addrp;
+{
+ enum { FIRST_FOUND, FIRST_MISS, SECOND_FOUND, SECOND_MISS } state;
+ BH *alloc_bhp, *bhp;
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ DB_MPOOL_HASH *hp;
+ MPOOL *c_mp, *mp;
+ MPOOLFILE *mfp;
+ roff_t mf_offset;
+ u_int32_t n_cache, st_hsearch;
+ int b_incr, extending, first, ret;
+
+ *(void **)addrp = NULL;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ mp = dbmp->reginfo[0].primary;
+ mfp = dbmfp->mfp;
+ mf_offset = R_OFFSET(dbmp->reginfo, mfp);
+ alloc_bhp = bhp = NULL;
+ hp = NULL;
+ b_incr = extending = ret = 0;
+
+ /*
+ * Validate arguments.
+ *
+ * !!!
+ * Don't test for DB_MPOOL_CREATE and DB_MPOOL_NEW flags for readonly
+ * files here, and create non-existent pages in readonly files if the
+ * flags are set, later. The reason is that the hash access method
+ * wants to get empty pages that don't really exist in readonly files.
+ * The only alternative is for hash to write the last "bucket" all the
+ * time, which we don't want to do because one of our big goals in life
+ * is to keep database files small. It's sleazy as hell, but we catch
+ * any attempt to actually write the file in memp_fput().
+ */
+#define OKFLAGS (DB_MPOOL_CREATE | DB_MPOOL_LAST | DB_MPOOL_NEW)
+ if (flags != 0) {
+ if ((ret = __db_fchk(dbenv, "memp_fget", flags, OKFLAGS)) != 0)
+ return (ret);
+
+ switch (flags) {
+ case DB_MPOOL_CREATE:
+ break;
+ case DB_MPOOL_LAST:
+ /* Get the last page number in the file. */
+ if (flags == DB_MPOOL_LAST) {
+ R_LOCK(dbenv, dbmp->reginfo);
+ *pgnoaddr = mfp->last_pgno;
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ }
+ break;
+ case DB_MPOOL_NEW:
+ /*
+ * If always creating a page, skip the first search
+ * of the hash bucket.
+ */
+ if (flags == DB_MPOOL_NEW)
+ goto alloc;
+ break;
+ default:
+ return (__db_ferr(dbenv, "memp_fget", 1));
+ }
+ }
+
+ /*
+ * If mmap'ing the file and the page is not past the end of the file,
+ * just return a pointer.
+ *
+ * The page may be past the end of the file, so check the page number
+ * argument against the original length of the file. If we previously
+ * returned pages past the original end of the file, last_pgno will
+ * have been updated to match the "new" end of the file, and checking
+ * against it would return pointers past the end of the mmap'd region.
+ *
+ * If another process has opened the file for writing since we mmap'd
+ * it, we will start playing the game by their rules, i.e. everything
+ * goes through the cache. All pages previously returned will be safe,
+ * as long as the correct locking protocol was observed.
+ *
+ * We don't discard the map because we don't know when all of the
+ * pages will have been discarded from the process' address space.
+ * It would be possible to do so by reference counting the open
+ * pages from the mmap, but it's unclear to me that it's worth it.
+ */
+ if (dbmfp->addr != NULL &&
+ F_ISSET(mfp, MP_CAN_MMAP) && *pgnoaddr <= mfp->orig_last_pgno) {
+ *(void **)addrp =
+ R_ADDR(dbmfp, *pgnoaddr * mfp->stat.st_pagesize);
+ ++mfp->stat.st_map;
+ return (0);
+ }
+
+hb_search:
+ /*
+ * Determine the cache and hash bucket where this page lives and get
+ * local pointers to them. Reset on each pass through this code, the
+ * page number can change.
+ */
+ n_cache = NCACHE(mp, mf_offset, *pgnoaddr);
+ c_mp = dbmp->reginfo[n_cache].primary;
+ hp = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab);
+ hp = &hp[NBUCKET(c_mp, mf_offset, *pgnoaddr)];
+
+ /* Search the hash chain for the page. */
+retry: st_hsearch = 0;
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+ for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
+ bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh)) {
+ ++st_hsearch;
+ if (bhp->pgno != *pgnoaddr || bhp->mf_offset != mf_offset)
+ continue;
+
+ /*
+ * Increment the reference count. We may discard the hash
+ * bucket lock as we evaluate and/or read the buffer, so we
+ * need to ensure it doesn't move and its contents remain
+ * unchanged.
+ */
+ if (bhp->ref == UINT16_T_MAX) {
+ __db_err(dbenv,
+ "%s: page %lu: reference count overflow",
+ __memp_fn(dbmfp), (u_long)bhp->pgno);
+ ret = EINVAL;
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ goto err;
+ }
+ ++bhp->ref;
+ b_incr = 1;
+
+ /*
+ * BH_LOCKED --
+ * I/O is in progress or sync is waiting on the buffer to write
+ * it. Because we've incremented the buffer reference count,
+ * we know the buffer can't move. Unlock the bucket lock, wait
+ * for the buffer to become available, reacquire the bucket.
+ */
+ for (first = 1; F_ISSET(bhp, BH_LOCKED) &&
+ !F_ISSET(dbenv, DB_ENV_NOLOCKING); first = 0) {
+ /*
+ * If someone is trying to sync this buffer and the
+ * buffer is hot, they may never get in. Give up
+ * and try again.
+ */
+ if (!first && bhp->ref_sync != 0) {
+ --bhp->ref;
+ b_incr = 0;
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ __os_yield(dbenv, 1);
+ goto retry;
+ }
+
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ /*
+ * Explicitly yield the processor if not the first pass
+ * through this loop -- if we don't, we might run to the
+ * end of our CPU quantum as we will simply be swapping
+ * between the two locks.
+ */
+ if (!first)
+ __os_yield(dbenv, 1);
+
+ MUTEX_LOCK(dbenv, &bhp->mutex);
+ /* Wait for I/O to finish... */
+ MUTEX_UNLOCK(dbenv, &bhp->mutex);
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+ }
+
+ ++mfp->stat.st_cache_hit;
+ break;
+ }
+
+ /*
+ * Update the hash bucket search statistics -- do now because our next
+ * search may be for a different bucket.
+ */
+ ++c_mp->stat.st_hash_searches;
+ if (st_hsearch > c_mp->stat.st_hash_longest)
+ c_mp->stat.st_hash_longest = st_hsearch;
+ c_mp->stat.st_hash_examined += st_hsearch;
+
+ /*
+ * There are 4 possible paths to this location:
+ *
+ * FIRST_MISS:
+ * Didn't find the page in the hash bucket on our first pass:
+ * bhp == NULL, alloc_bhp == NULL
+ *
+ * FIRST_FOUND:
+ * Found the page in the hash bucket on our first pass:
+ * bhp != NULL, alloc_bhp == NULL
+ *
+ * SECOND_FOUND:
+ * Didn't find the page in the hash bucket on the first pass,
+ * allocated space, and found the page in the hash bucket on
+ * our second pass:
+ * bhp != NULL, alloc_bhp != NULL
+ *
+ * SECOND_MISS:
+ * Didn't find the page in the hash bucket on the first pass,
+ * allocated space, and didn't find the page in the hash bucket
+ * on our second pass:
+ * bhp == NULL, alloc_bhp != NULL
+ */
+ state = bhp == NULL ?
+ (alloc_bhp == NULL ? FIRST_MISS : SECOND_MISS) :
+ (alloc_bhp == NULL ? FIRST_FOUND : SECOND_FOUND);
+ switch (state) {
+ case FIRST_FOUND:
+ /* We found the buffer in our first check -- we're done. */
+ break;
+ case FIRST_MISS:
+ /*
+ * We didn't find the buffer in our first check. Figure out
+ * if the page exists, and allocate structures so we can add
+ * the page to the buffer pool.
+ */
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+
+alloc: /*
+ * If DB_MPOOL_NEW is set, we have to allocate a page number.
+ * If neither DB_MPOOL_CREATE or DB_MPOOL_CREATE is set, then
+ * it's an error to try and get a page past the end of file.
+ */
+ COMPQUIET(n_cache, 0);
+
+ extending = ret = 0;
+ R_LOCK(dbenv, dbmp->reginfo);
+ switch (flags) {
+ case DB_MPOOL_NEW:
+ extending = 1;
+ *pgnoaddr = mfp->last_pgno + 1;
+ break;
+ case DB_MPOOL_CREATE:
+ extending = *pgnoaddr > mfp->last_pgno;
+ break;
+ default:
+ ret = *pgnoaddr > mfp->last_pgno ? DB_PAGE_NOTFOUND : 0;
+ break;
+ }
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ if (ret != 0)
+ goto err;
+
+ /*
+ * !!!
+ * In the DB_MPOOL_NEW code path, mf_offset and n_cache have
+ * not yet been initialized.
+ */
+ mf_offset = R_OFFSET(dbmp->reginfo, mfp);
+ n_cache = NCACHE(mp, mf_offset, *pgnoaddr);
+
+ /* Allocate a new buffer header and data space. */
+ if ((ret = __memp_alloc(dbmp,
+ &dbmp->reginfo[n_cache], mfp, 0, NULL, &alloc_bhp)) != 0)
+ goto err;
+#ifdef DIAGNOSTIC
+ if ((db_alignp_t)alloc_bhp->buf & (sizeof(size_t) - 1)) {
+ __db_err(dbenv,
+ "Error: buffer data is NOT size_t aligned");
+ ret = EINVAL;
+ goto err;
+ }
+#endif
+ /*
+ * If we are extending the file, we'll need the region lock
+ * again.
+ */
+ if (extending)
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ /*
+ * DB_MPOOL_NEW does not guarantee you a page unreferenced by
+ * any other thread of control. (That guarantee is interesting
+ * for DB_MPOOL_NEW, unlike DB_MPOOL_CREATE, because the caller
+ * did not specify the page number, and so, may reasonably not
+ * have any way to lock the page outside of mpool.) Regardless,
+ * if we allocate the page, and some other thread of control
+ * requests the page by number, we will not detect that and the
+ * thread of control that allocated using DB_MPOOL_NEW may not
+ * have a chance to initialize the page. (Note: we *could*
+ * detect this case if we set a flag in the buffer header which
+ * guaranteed that no gets of the page would succeed until the
+ * reference count went to 0, that is, until the creating page
+ * put the page.) What we do guarantee is that if two threads
+ * of control are both doing DB_MPOOL_NEW calls, they won't
+ * collide, that is, they won't both get the same page.
+ *
+ * There's a possibility that another thread allocated the page
+ * we were planning to allocate while we were off doing buffer
+ * allocation. We can do that by making sure the page number
+ * we were going to use is still available. If it's not, then
+ * we check to see if the next available page number hashes to
+ * the same mpool region as the old one -- if it does, we can
+ * continue, otherwise, we have to start over.
+ */
+ if (flags == DB_MPOOL_NEW && *pgnoaddr != mfp->last_pgno + 1) {
+ *pgnoaddr = mfp->last_pgno + 1;
+ if (n_cache != NCACHE(mp, mf_offset, *pgnoaddr)) {
+ __db_shalloc_free(
+ dbmp->reginfo[n_cache].addr, alloc_bhp);
+ /*
+ * flags == DB_MPOOL_NEW, so extending is set
+ * and we're holding the region locked.
+ */
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ alloc_bhp = NULL;
+ goto alloc;
+ }
+ }
+
+ /*
+ * We released the region lock, so another thread might have
+ * extended the file. Update the last_pgno and initialize
+ * the file, as necessary, if we extended the file.
+ */
+ if (extending) {
+#ifdef HAVE_FILESYSTEM_NOTZERO
+ if (*pgnoaddr > mfp->last_pgno &&
+ __os_fs_notzero() &&
+ F_ISSET(dbmfp->fhp, DB_FH_VALID))
+ ret = __memp_fs_notzero(
+ dbenv, dbmfp, mfp, pgnoaddr);
+ else
+ ret = 0;
+#endif
+ if (ret == 0 && *pgnoaddr > mfp->last_pgno)
+ mfp->last_pgno = *pgnoaddr;
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ if (ret != 0)
+ goto err;
+ }
+ goto hb_search;
+ case SECOND_FOUND:
+ /*
+ * We allocated buffer space for the requested page, but then
+ * found the page in the buffer cache on our second check.
+ * That's OK -- we can use the page we found in the pool,
+ * unless DB_MPOOL_NEW is set.
+ *
+ * Free the allocated memory, we no longer need it. Since we
+ * can't acquire the region lock while holding the hash bucket
+ * lock, we have to release the hash bucket and re-acquire it.
+ * That's OK, because we have the buffer pinned down.
+ */
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ R_LOCK(dbenv, &dbmp->reginfo[n_cache]);
+ __db_shalloc_free(dbmp->reginfo[n_cache].addr, alloc_bhp);
+ alloc_bhp = NULL;
+ R_UNLOCK(dbenv, &dbmp->reginfo[n_cache]);
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+
+ /*
+ * We can't use the page we found in the pool if DB_MPOOL_NEW
+ * was set. (For details, see the above comment beginning
+ * "DB_MPOOL_NEW does not guarantee you a page unreferenced by
+ * any other thread of control".) If DB_MPOOL_NEW is set, we
+ * release our pin on this particular buffer, and try to get
+ * another one.
+ */
+ if (flags == DB_MPOOL_NEW) {
+ --bhp->ref;
+ b_incr = 0;
+ goto alloc;
+ }
+ break;
+ case SECOND_MISS:
+ /*
+ * We allocated buffer space for the requested page, and found
+ * the page still missing on our second pass through the buffer
+ * cache. Instantiate the page.
+ */
+ bhp = alloc_bhp;
+ alloc_bhp = NULL;
+
+ /*
+ * Initialize all the BH and hash bucket fields so we can call
+ * __memp_bhfree if an error occurs.
+ *
+ * Append the buffer to the tail of the bucket list and update
+ * the hash bucket's priority.
+ */
+ b_incr = 1;
+
+ memset(bhp, 0, sizeof(BH));
+ bhp->ref = 1;
+ bhp->priority = UINT32_T_MAX;
+ bhp->pgno = *pgnoaddr;
+ bhp->mf_offset = mf_offset;
+ SH_TAILQ_INSERT_TAIL(&hp->hash_bucket, bhp, hq);
+ hp->hash_priority =
+ SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
+
+ /* If we extended the file, make sure the page is never lost. */
+ if (extending) {
+ ++hp->hash_page_dirty;
+ F_SET(bhp, BH_DIRTY | BH_DIRTY_CREATE);
+ }
+
+ /*
+ * If we created the page, zero it out. If we didn't create
+ * the page, read from the backing file.
+ *
+ * !!!
+ * DB_MPOOL_NEW doesn't call the pgin function.
+ *
+ * If DB_MPOOL_CREATE is used, then the application's pgin
+ * function has to be able to handle pages of 0's -- if it
+ * uses DB_MPOOL_NEW, it can detect all of its page creates,
+ * and not bother.
+ *
+ * If we're running in diagnostic mode, smash any bytes on the
+ * page that are unknown quantities for the caller.
+ *
+ * Otherwise, read the page into memory, optionally creating it
+ * if DB_MPOOL_CREATE is set.
+ */
+ if (extending) {
+ if (mfp->clear_len == 0)
+ memset(bhp->buf, 0, mfp->stat.st_pagesize);
+ else {
+ memset(bhp->buf, 0, mfp->clear_len);
+#if defined(DIAGNOSTIC) || defined(UMRW)
+ memset(bhp->buf + mfp->clear_len, CLEAR_BYTE,
+ mfp->stat.st_pagesize - mfp->clear_len);
+#endif
+ }
+
+ if (flags == DB_MPOOL_CREATE && mfp->ftype != 0)
+ F_SET(bhp, BH_CALLPGIN);
+
+ ++mfp->stat.st_page_create;
+ } else {
+ F_SET(bhp, BH_TRASH);
+ ++mfp->stat.st_cache_miss;
+ }
+
+ /* Increment buffer count referenced by MPOOLFILE. */
+ MUTEX_LOCK(dbenv, &mfp->mutex);
+ ++mfp->block_cnt;
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
+
+ /*
+ * Initialize the mutex. This is the last initialization step,
+ * because it's the only one that can fail, and everything else
+ * must be set up or we can't jump to the err label because it
+ * will call __memp_bhfree.
+ */
+ if ((ret = __db_mutex_setup(dbenv,
+ &dbmp->reginfo[n_cache], &bhp->mutex, 0)) != 0)
+ goto err;
+ }
+
+ DB_ASSERT(bhp->ref != 0);
+
+ /*
+ * If we're the only reference, update buffer and bucket priorities.
+ * We may be about to release the hash bucket lock, and everything
+ * should be correct, first. (We've already done this if we created
+ * the buffer, so there is no need to do it again.)
+ */
+ if (state != SECOND_MISS && bhp->ref == 1) {
+ bhp->priority = UINT32_T_MAX;
+ SH_TAILQ_REMOVE(&hp->hash_bucket, bhp, hq, __bh);
+ SH_TAILQ_INSERT_TAIL(&hp->hash_bucket, bhp, hq);
+ hp->hash_priority =
+ SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
+ }
+
+ /*
+ * BH_TRASH --
+ * The buffer we found may need to be filled from the disk.
+ *
+ * It's possible for the read function to fail, which means we fail as
+ * well. Note, the __memp_pgread() function discards and reacquires
+ * the hash lock, so the buffer must be pinned down so that it cannot
+ * move and its contents are unchanged. Discard the buffer on failure
+ * unless another thread is waiting on our I/O to complete. It's OK to
+ * leave the buffer around, as the waiting thread will see the BH_TRASH
+ * flag set, and will also attempt to discard it. If there's a waiter,
+ * we need to decrement our reference count.
+ */
+ if (F_ISSET(bhp, BH_TRASH) &&
+ (ret = __memp_pgread(dbmfp,
+ &hp->hash_mutex, bhp, LF_ISSET(DB_MPOOL_CREATE) ? 1 : 0)) != 0)
+ goto err;
+
+ /*
+ * BH_CALLPGIN --
+ * The buffer was processed for being written to disk, and now has
+ * to be re-converted for use.
+ */
+ if (F_ISSET(bhp, BH_CALLPGIN)) {
+ if ((ret = __memp_pg(dbmfp, bhp, 1)) != 0)
+ goto err;
+ F_CLR(bhp, BH_CALLPGIN);
+ }
+
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+
+#ifdef DIAGNOSTIC
+ /* Update the file's pinned reference count. */
+ R_LOCK(dbenv, dbmp->reginfo);
+ ++dbmfp->pinref;
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ /*
+ * We want to switch threads as often as possible, and at awkward
+ * times. Yield every time we get a new page to ensure contention.
+ */
+ if (F_ISSET(dbenv, DB_ENV_YIELDCPU))
+ __os_yield(dbenv, 1);
+#endif
+
+ *(void **)addrp = bhp->buf;
+ return (0);
+
+err: /*
+ * Discard our reference. If we're the only reference, discard the
+ * the buffer entirely. If we held a reference to a buffer, we are
+ * also still holding the hash bucket mutex.
+ */
+ if (b_incr) {
+ if (bhp->ref == 1)
+ (void)__memp_bhfree(dbmp, hp, bhp, 1);
+ else {
+ --bhp->ref;
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ }
+ }
+
+ /* If alloc_bhp is set, free the memory. */
+ if (alloc_bhp != NULL)
+ __db_shalloc_free(dbmp->reginfo[n_cache].addr, alloc_bhp);
+
+ return (ret);
+}
+
+#ifdef HAVE_FILESYSTEM_NOTZERO
+/*
+ * __memp_fs_notzero --
+ * Initialize the underlying allocated pages in the file.
+ */
+static int
+__memp_fs_notzero(dbenv, dbmfp, mfp, pgnoaddr)
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *dbmfp;
+ MPOOLFILE *mfp;
+ db_pgno_t *pgnoaddr;
+{
+ DB_IO db_io;
+ u_int32_t i, npages;
+ size_t nw;
+ int ret;
+ u_int8_t *page;
+ char *fail;
+
+ /*
+ * Pages allocated by writing pages past end-of-file are not zeroed,
+ * on some systems. Recovery could theoretically be fooled by a page
+ * showing up that contained garbage. In order to avoid this, we
+ * have to write the pages out to disk, and flush them. The reason
+ * for the flush is because if we don't sync, the allocation of another
+ * page subsequent to this one might reach the disk first, and if we
+ * crashed at the right moment, leave us with this page as the one
+ * allocated by writing a page past it in the file.
+ *
+ * Hash is the only access method that allocates groups of pages. We
+ * know that it will use the existence of the last page in a group to
+ * signify that the entire group is OK; so, write all the pages but
+ * the last one in the group, flush them to disk, and then write the
+ * last one to disk and flush it.
+ */
+ if ((ret = __os_calloc(dbenv, 1, mfp->stat.st_pagesize, &page)) != 0)
+ return (ret);
+
+ db_io.fhp = dbmfp->fhp;
+ db_io.mutexp = dbmfp->mutexp;
+ db_io.pagesize = db_io.bytes = mfp->stat.st_pagesize;
+ db_io.buf = page;
+
+ npages = *pgnoaddr - mfp->last_pgno;
+ for (i = 1; i < npages; ++i) {
+ db_io.pgno = mfp->last_pgno + i;
+ if ((ret = __os_io(dbenv, &db_io, DB_IO_WRITE, &nw)) != 0) {
+ fail = "write";
+ goto err;
+ }
+ }
+ if (i != 1 && (ret = __os_fsync(dbenv, dbmfp->fhp)) != 0) {
+ fail = "sync";
+ goto err;
+ }
+
+ db_io.pgno = mfp->last_pgno + npages;
+ if ((ret = __os_io(dbenv, &db_io, DB_IO_WRITE, &nw)) != 0) {
+ fail = "write";
+ goto err;
+ }
+ if ((ret = __os_fsync(dbenv, dbmfp->fhp)) != 0) {
+ fail = "sync";
+err: __db_err(dbenv, "%s: %s failed for page %lu",
+ __memp_fn(dbmfp), fail, (u_long)db_io.pgno);
+ }
+
+ __os_free(dbenv, page);
+ return (ret);
+}
+#endif
diff --git a/libdb/mp/mp_fopen.c b/libdb/mp/mp_fopen.c
new file mode 100644
index 0000000..0b9a8b2
--- /dev/null
+++ b/libdb/mp/mp_fopen.c
@@ -0,0 +1,1018 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+static int __memp_fclose __P((DB_MPOOLFILE *, u_int32_t));
+static int __memp_fopen __P((DB_MPOOLFILE *,
+ const char *, u_int32_t, int, size_t));
+static void __memp_get_fileid __P((DB_MPOOLFILE *, u_int8_t *));
+static void __memp_last_pgno __P((DB_MPOOLFILE *, db_pgno_t *));
+static void __memp_refcnt __P((DB_MPOOLFILE *, db_pgno_t *));
+static int __memp_set_clear_len __P((DB_MPOOLFILE *, u_int32_t));
+static int __memp_set_fileid __P((DB_MPOOLFILE *, u_int8_t *));
+static int __memp_set_ftype __P((DB_MPOOLFILE *, int));
+static int __memp_set_lsn_offset __P((DB_MPOOLFILE *, int32_t));
+static int __memp_set_pgcookie __P((DB_MPOOLFILE *, DBT *));
+static int __memp_set_priority __P((DB_MPOOLFILE *, DB_CACHE_PRIORITY));
+static void __memp_set_unlink __P((DB_MPOOLFILE *, int));
+
+/* Initialization methods cannot be called after open is called. */
+#define MPF_ILLEGAL_AFTER_OPEN(dbmfp, name) \
+ if (F_ISSET(dbmfp, MP_OPEN_CALLED)) \
+ return (__db_mi_open((dbmfp)->dbmp->dbenv, name, 1));
+
+/*
+ * __memp_fcreate --
+ * Create a DB_MPOOLFILE handle.
+ *
+ * PUBLIC: int __memp_fcreate __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t));
+ */
+int
+__memp_fcreate(dbenv, retp, flags)
+ DB_ENV *dbenv;
+ DB_MPOOLFILE **retp;
+ u_int32_t flags;
+{
+ DB_MPOOL *dbmp;
+ DB_MPOOLFILE *dbmfp;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->mp_handle, "memp_fcreate", DB_INIT_MPOOL);
+
+ dbmp = dbenv->mp_handle;
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "memp_fcreate", flags, 0)) != 0)
+ return (ret);
+
+ /* Allocate and initialize the per-process structure. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_MPOOLFILE), &dbmfp)) != 0)
+ return (ret);
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_FH), &dbmfp->fhp)) != 0)
+ goto err;
+
+ /* Allocate and initialize a mutex if necessary. */
+ if (F_ISSET(dbenv, DB_ENV_THREAD) &&
+ (ret = __db_mutex_setup(dbenv, dbmp->reginfo, &dbmfp->mutexp,
+ MUTEX_ALLOC | MUTEX_THREAD)) != 0)
+ goto err;
+
+ dbmfp->ref = 1;
+ dbmfp->lsn_offset = -1;
+ dbmfp->dbmp = dbmp;
+ dbmfp->mfp = INVALID_ROFF;
+
+ dbmfp->close = __memp_fclose;
+ dbmfp->get = __memp_fget;
+ dbmfp->get_fileid = __memp_get_fileid;
+ dbmfp->last_pgno = __memp_last_pgno;
+ dbmfp->open = __memp_fopen;
+ dbmfp->put = __memp_fput;
+ dbmfp->refcnt = __memp_refcnt;
+ dbmfp->set = __memp_fset;
+ dbmfp->set_clear_len = __memp_set_clear_len;
+ dbmfp->set_fileid = __memp_set_fileid;
+ dbmfp->set_ftype = __memp_set_ftype;
+ dbmfp->set_lsn_offset = __memp_set_lsn_offset;
+ dbmfp->set_pgcookie = __memp_set_pgcookie;
+ dbmfp->set_priority = __memp_set_priority;
+ dbmfp->set_unlink = __memp_set_unlink;
+ dbmfp->sync = __memp_fsync;
+
+ *retp = dbmfp;
+ return (0);
+
+err: if (dbmfp != NULL) {
+ if (dbmfp->fhp != NULL)
+ (void)__os_free(dbenv, dbmfp->fhp);
+ (void)__os_free(dbenv, dbmfp);
+ }
+ return (ret);
+}
+
+/*
+ * __memp_set_clear_len --
+ * Set the clear length.
+ */
+static int
+__memp_set_clear_len(dbmfp, clear_len)
+ DB_MPOOLFILE *dbmfp;
+ u_int32_t clear_len;
+{
+ MPF_ILLEGAL_AFTER_OPEN(dbmfp, "set_clear_len");
+
+ dbmfp->clear_len = clear_len;
+ return (0);
+}
+
+/*
+ * __memp_set_fileid --
+ * Set the file ID.
+ */
+static int
+__memp_set_fileid(dbmfp, fileid)
+ DB_MPOOLFILE *dbmfp;
+ u_int8_t *fileid;
+{
+ MPF_ILLEGAL_AFTER_OPEN(dbmfp, "set_fileid");
+
+ /*
+ * XXX
+ * This is dangerous -- we're saving the caller's pointer instead
+ * of allocating memory and copying the contents.
+ */
+ dbmfp->fileid = fileid;
+ return (0);
+}
+
+/*
+ * __memp_set_ftype --
+ * Set the file type (as registered).
+ */
+static int
+__memp_set_ftype(dbmfp, ftype)
+ DB_MPOOLFILE *dbmfp;
+ int ftype;
+{
+ MPF_ILLEGAL_AFTER_OPEN(dbmfp, "set_ftype");
+
+ dbmfp->ftype = ftype;
+ return (0);
+}
+
+/*
+ * __memp_set_lsn_offset --
+ * Set the page's LSN offset.
+ */
+static int
+__memp_set_lsn_offset(dbmfp, lsn_offset)
+ DB_MPOOLFILE *dbmfp;
+ int32_t lsn_offset;
+{
+ MPF_ILLEGAL_AFTER_OPEN(dbmfp, "set_lsn_offset");
+
+ dbmfp->lsn_offset = lsn_offset;
+ return (0);
+}
+
+/*
+ * __memp_set_pgcookie --
+ * Set the pgin/pgout cookie.
+ */
+static int
+__memp_set_pgcookie(dbmfp, pgcookie)
+ DB_MPOOLFILE *dbmfp;
+ DBT *pgcookie;
+{
+ MPF_ILLEGAL_AFTER_OPEN(dbmfp, "set_pgcookie");
+
+ dbmfp->pgcookie = pgcookie;
+ return (0);
+}
+
+/*
+ * __memp_set_priority --
+ * Set the cache priority for pages from this file.
+ */
+static int
+__memp_set_priority(dbmfp, priority)
+ DB_MPOOLFILE *dbmfp;
+ DB_CACHE_PRIORITY priority;
+{
+ switch (priority) {
+ case DB_PRIORITY_VERY_LOW:
+ dbmfp->mfp->priority = MPOOL_PRI_VERY_LOW;
+ break;
+ case DB_PRIORITY_LOW:
+ dbmfp->mfp->priority = MPOOL_PRI_LOW;
+ break;
+ case DB_PRIORITY_DEFAULT:
+ dbmfp->mfp->priority = MPOOL_PRI_DEFAULT;
+ break;
+ case DB_PRIORITY_HIGH:
+ dbmfp->mfp->priority = MPOOL_PRI_HIGH;
+ break;
+ case DB_PRIORITY_VERY_HIGH:
+ dbmfp->mfp->priority = MPOOL_PRI_VERY_HIGH;
+ break;
+ default:
+ __db_err(dbmfp->dbmp->dbenv,
+ "Unknown priority value: %d", priority);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/*
+ * __memp_fopen --
+ * Open a backing file for the memory pool.
+ */
+static int
+__memp_fopen(dbmfp, path, flags, mode, pagesize)
+ DB_MPOOLFILE *dbmfp;
+ const char *path;
+ u_int32_t flags;
+ int mode;
+ size_t pagesize;
+{
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ int ret;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "memp_fopen", flags,
+ DB_CREATE | DB_DIRECT | DB_EXTENT |
+ DB_NOMMAP | DB_ODDFILESIZE | DB_RDONLY | DB_TRUNCATE)) != 0)
+ return (ret);
+
+ /*
+ * Require a non-zero, power-of-two pagesize, smaller than the
+ * clear length.
+ */
+ if (pagesize == 0 || !POWER_OF_TWO(pagesize)) {
+ __db_err(dbenv,
+ "memp_fopen: page sizes must be a power-of-2");
+ return (EINVAL);
+ }
+ if (dbmfp->clear_len > pagesize) {
+ __db_err(dbenv,
+ "memp_fopen: clear length larger than page size");
+ return (EINVAL);
+ }
+
+ /* Read-only checks, and local flag. */
+ if (LF_ISSET(DB_RDONLY) && path == NULL) {
+ __db_err(dbenv,
+ "memp_fopen: temporary files can't be readonly");
+ return (EINVAL);
+ }
+
+ return (__memp_fopen_int(dbmfp, NULL, path, flags, mode, pagesize));
+}
+
+/*
+ * __memp_fopen_int --
+ * Open a backing file for the memory pool; internal version.
+ *
+ * PUBLIC: int __memp_fopen_int __P((DB_MPOOLFILE *,
+ * PUBLIC: MPOOLFILE *, const char *, u_int32_t, int, size_t));
+ */
+int
+__memp_fopen_int(dbmfp, mfp, path, flags, mode, pagesize)
+ DB_MPOOLFILE *dbmfp;
+ MPOOLFILE *mfp;
+ const char *path;
+ u_int32_t flags;
+ int mode;
+ size_t pagesize;
+{
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ MPOOL *mp;
+ db_pgno_t last_pgno;
+ size_t maxmap;
+ u_int32_t mbytes, bytes, oflags;
+ int mfp_alloc, ret;
+ u_int8_t idbuf[DB_FILE_ID_LEN];
+ char *rpath;
+ void *p;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+ mp = dbmp->reginfo[0].primary;
+ mfp_alloc = ret = 0;
+ rpath = NULL;
+
+ /*
+ * Set the page size so os_open can decide whether to turn buffering
+ * off if the DB_DIRECT_DB flag is set.
+ */
+ dbmfp->fhp->pagesize = (u_int32_t)pagesize;
+
+ /*
+ * If it's a temporary file, delay the open until we actually need
+ * to write the file, and we know we can't join any existing files.
+ */
+ if (path == NULL)
+ goto alloc;
+
+ /*
+ * Get the real name for this file and open it. If it's a Queue extent
+ * file, it may not exist, and that's OK.
+ */
+ oflags = 0;
+ if (LF_ISSET(DB_CREATE))
+ oflags |= DB_OSO_CREATE;
+ if (LF_ISSET(DB_DIRECT))
+ oflags |= DB_OSO_DIRECT;
+ if (LF_ISSET(DB_RDONLY)) {
+ F_SET(dbmfp, MP_READONLY);
+ oflags |= DB_OSO_RDONLY;
+ }
+ if ((ret =
+ __db_appname(dbenv, DB_APP_DATA, path, 0, NULL, &rpath)) != 0)
+ goto err;
+ if ((ret = __os_open(dbenv, rpath, oflags, mode, dbmfp->fhp)) != 0) {
+ if (!LF_ISSET(DB_EXTENT))
+ __db_err(dbenv, "%s: %s", rpath, db_strerror(ret));
+ goto err;
+ }
+
+ /*
+ * Figure out the file's size.
+ *
+ * !!!
+ * We can't use off_t's here, or in any code in the mainline library
+ * for that matter. (We have to use them in the os stubs, of course,
+ * as there are system calls that take them as arguments.) The reason
+ * is some customers build in environments where an off_t is 32-bits,
+ * but still run where offsets are 64-bits, and they pay us a lot of
+ * money.
+ */
+ if ((ret = __os_ioinfo(
+ dbenv, rpath, dbmfp->fhp, &mbytes, &bytes, NULL)) != 0) {
+ __db_err(dbenv, "%s: %s", rpath, db_strerror(ret));
+ goto err;
+ }
+
+ /*
+ * Get the file id if we weren't given one. Generated file id's
+ * don't use timestamps, otherwise there'd be no chance of any
+ * other process joining the party.
+ */
+ if (dbmfp->fileid == NULL) {
+ if ((ret = __os_fileid(dbenv, rpath, 0, idbuf)) != 0)
+ goto err;
+ dbmfp->fileid = idbuf;
+ }
+
+ /*
+ * If our caller knows what mfp we're using, increment the ref count,
+ * no need to search.
+ *
+ * We don't need to acquire a lock other than the mfp itself, because
+ * we know there's another reference and it's not going away.
+ */
+ if (mfp != NULL) {
+ MUTEX_LOCK(dbenv, &mfp->mutex);
+ ++mfp->mpf_cnt;
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
+ goto check_map;
+ }
+
+ /*
+ * If not creating a temporary file, walk the list of MPOOLFILE's,
+ * looking for a matching file. Files backed by temporary files
+ * or previously removed files can't match.
+ *
+ * DB_TRUNCATE support.
+ *
+ * The fileID is a filesystem unique number (e.g., a UNIX dev/inode
+ * pair) plus a timestamp. If files are removed and created in less
+ * than a second, the fileID can be repeated. The problem with
+ * repetition happens when the file that previously had the fileID
+ * value still has pages in the pool, since we don't want to use them
+ * to satisfy requests for the new file.
+ *
+ * Because the DB_TRUNCATE flag reuses the dev/inode pair, repeated
+ * opens with that flag set guarantees matching fileIDs when the
+ * machine can open a file and then re-open with truncate within a
+ * second. For this reason, we pass that flag down, and, if we find
+ * a matching entry, we ensure that it's never found again, and we
+ * create a new entry for the current request.
+ */
+ R_LOCK(dbenv, dbmp->reginfo);
+ for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
+ /* Skip dead files and temporary files. */
+ if (F_ISSET(mfp, MP_DEADFILE | MP_TEMP))
+ continue;
+
+ /* Skip non-matching files. */
+ if (memcmp(dbmfp->fileid, R_ADDR(dbmp->reginfo,
+ mfp->fileid_off), DB_FILE_ID_LEN) != 0)
+ continue;
+
+ /*
+ * If the file is being truncated, remove it from the system
+ * and create a new entry.
+ *
+ * !!!
+ * We should be able to set mfp to NULL and break out of the
+ * loop, but I like the idea of checking all the entries.
+ */
+ if (LF_ISSET(DB_TRUNCATE)) {
+ MUTEX_LOCK(dbenv, &mfp->mutex);
+ MPOOLFILE_IGNORE(mfp);
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
+ continue;
+ }
+
+ /*
+ * Some things about a file cannot be changed: the clear length,
+ * page size, or lSN location.
+ *
+ * The file type can change if the application's pre- and post-
+ * processing needs change. For example, an application that
+ * created a hash subdatabase in a database that was previously
+ * all btree.
+ *
+ * XXX
+ * We do not check to see if the pgcookie information changed,
+ * or update it if it is, this might be a bug.
+ */
+ if (dbmfp->clear_len != mfp->clear_len ||
+ pagesize != mfp->stat.st_pagesize ||
+ dbmfp->lsn_offset != mfp->lsn_off) {
+ __db_err(dbenv,
+ "%s: clear length, page size or LSN location changed",
+ path);
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ ret = EINVAL;
+ goto err;
+ }
+
+ if (dbmfp->ftype != 0)
+ mfp->ftype = dbmfp->ftype;
+
+ MUTEX_LOCK(dbenv, &mfp->mutex);
+ ++mfp->mpf_cnt;
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
+ break;
+ }
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ if (mfp != NULL)
+ goto check_map;
+
+alloc: /* Allocate and initialize a new MPOOLFILE. */
+ if ((ret = __memp_alloc(
+ dbmp, dbmp->reginfo, NULL, sizeof(MPOOLFILE), NULL, &mfp)) != 0)
+ goto err;
+ mfp_alloc = 1;
+ memset(mfp, 0, sizeof(MPOOLFILE));
+ mfp->mpf_cnt = 1;
+ mfp->ftype = dbmfp->ftype;
+ mfp->stat.st_pagesize = pagesize;
+ mfp->lsn_off = dbmfp->lsn_offset;
+ mfp->clear_len = dbmfp->clear_len;
+
+ if (LF_ISSET(DB_DIRECT))
+ F_SET(mfp, MP_DIRECT);
+ if (LF_ISSET(DB_EXTENT))
+ F_SET(mfp, MP_EXTENT);
+ F_SET(mfp, MP_CAN_MMAP);
+
+ if (path == NULL)
+ F_SET(mfp, MP_TEMP);
+ else {
+ /*
+ * Don't permit files that aren't a multiple of the pagesize,
+ * and find the number of the last page in the file, all the
+ * time being careful not to overflow 32 bits.
+ *
+ * During verify or recovery, we might have to cope with a
+ * truncated file; if the file size is not a multiple of the
+ * page size, round down to a page, we'll take care of the
+ * partial page outside the mpool system.
+ */
+ if (bytes % pagesize != 0) {
+ if (LF_ISSET(DB_ODDFILESIZE))
+ bytes -= (u_int32_t)(bytes % pagesize);
+ else {
+ __db_err(dbenv,
+ "%s: file size not a multiple of the pagesize", rpath);
+ ret = EINVAL;
+ goto err;
+ }
+ }
+
+ /*
+ * If the user specifies DB_MPOOL_LAST or DB_MPOOL_NEW on a
+ * page get, we have to increment the last page in the file.
+ * Figure it out and save it away.
+ *
+ * Note correction: page numbers are zero-based, not 1-based.
+ */
+ last_pgno = (db_pgno_t)(mbytes * (MEGABYTE / pagesize));
+ last_pgno += (db_pgno_t)(bytes / pagesize);
+ if (last_pgno != 0)
+ --last_pgno;
+ mfp->orig_last_pgno = mfp->last_pgno = last_pgno;
+
+ /* Copy the file path into shared memory. */
+ if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
+ NULL, strlen(path) + 1, &mfp->path_off, &p)) != 0)
+ goto err;
+ memcpy(p, path, strlen(path) + 1);
+
+ /* Copy the file identification string into shared memory. */
+ if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
+ NULL, DB_FILE_ID_LEN, &mfp->fileid_off, &p)) != 0)
+ goto err;
+ memcpy(p, dbmfp->fileid, DB_FILE_ID_LEN);
+ }
+
+ /* Copy the page cookie into shared memory. */
+ if (dbmfp->pgcookie == NULL || dbmfp->pgcookie->size == 0) {
+ mfp->pgcookie_len = 0;
+ mfp->pgcookie_off = 0;
+ } else {
+ if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
+ NULL, dbmfp->pgcookie->size, &mfp->pgcookie_off, &p)) != 0)
+ goto err;
+ memcpy(p, dbmfp->pgcookie->data, dbmfp->pgcookie->size);
+ mfp->pgcookie_len = dbmfp->pgcookie->size;
+ }
+
+ /*
+ * Prepend the MPOOLFILE to the list of MPOOLFILE's.
+ */
+ R_LOCK(dbenv, dbmp->reginfo);
+ ret = __db_mutex_setup(dbenv, dbmp->reginfo, &mfp->mutex,
+ MUTEX_NO_RLOCK);
+ if (ret == 0)
+ SH_TAILQ_INSERT_HEAD(&mp->mpfq, mfp, q, __mpoolfile);
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ if (ret != 0)
+ goto err;
+
+check_map:
+ /*
+ * If a file:
+ * + isn't temporary
+ * + is read-only
+ * + doesn't require any pgin/pgout support
+ * + the DB_NOMMAP flag wasn't set (in either the file open or
+ * the environment in which it was opened)
+ * + and is less than mp_mmapsize bytes in size
+ *
+ * we can mmap it instead of reading/writing buffers. Don't do error
+ * checking based on the mmap call failure. We want to do normal I/O
+ * on the file if the reason we failed was because the file was on an
+ * NFS mounted partition, and we can fail in buffer I/O just as easily
+ * as here.
+ *
+ * We'd like to test to see if the file is too big to mmap. Since we
+ * don't know what size or type off_t's or size_t's are, or the largest
+ * unsigned integral type is, or what random insanity the local C
+ * compiler will perpetrate, doing the comparison in a portable way is
+ * flatly impossible. Hope that mmap fails if the file is too large.
+ */
+#define DB_MAXMMAPSIZE (10 * 1024 * 1024) /* 10 MB. */
+ if (F_ISSET(mfp, MP_CAN_MMAP)) {
+ if (path == NULL)
+ F_CLR(mfp, MP_CAN_MMAP);
+ if (!F_ISSET(dbmfp, MP_READONLY))
+ F_CLR(mfp, MP_CAN_MMAP);
+ if (dbmfp->ftype != 0)
+ F_CLR(mfp, MP_CAN_MMAP);
+ if (LF_ISSET(DB_NOMMAP) || F_ISSET(dbenv, DB_ENV_NOMMAP))
+ F_CLR(mfp, MP_CAN_MMAP);
+ maxmap = dbenv->mp_mmapsize == 0 ?
+ DB_MAXMMAPSIZE : dbenv->mp_mmapsize;
+ if (mbytes > maxmap / MEGABYTE ||
+ (mbytes == maxmap / MEGABYTE && bytes >= maxmap % MEGABYTE))
+ F_CLR(mfp, MP_CAN_MMAP);
+
+ dbmfp->addr = NULL;
+ if (F_ISSET(mfp, MP_CAN_MMAP)) {
+ dbmfp->len = (size_t)mbytes * MEGABYTE + bytes;
+ if (__os_mapfile(dbenv, rpath,
+ dbmfp->fhp, dbmfp->len, 1, &dbmfp->addr) != 0) {
+ dbmfp->addr = NULL;
+ F_CLR(mfp, MP_CAN_MMAP);
+ }
+ }
+ }
+
+ dbmfp->mfp = mfp;
+
+ F_SET(dbmfp, MP_OPEN_CALLED);
+
+ /* Add the file to the process' list of DB_MPOOLFILEs. */
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ TAILQ_INSERT_TAIL(&dbmp->dbmfq, dbmfp, q);
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+
+ if (0) {
+err: if (F_ISSET(dbmfp->fhp, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, dbmfp->fhp);
+
+ if (mfp_alloc) {
+ R_LOCK(dbenv, dbmp->reginfo);
+ if (mfp->path_off != 0)
+ __db_shalloc_free(dbmp->reginfo[0].addr,
+ R_ADDR(dbmp->reginfo, mfp->path_off));
+ if (mfp->fileid_off != 0)
+ __db_shalloc_free(dbmp->reginfo[0].addr,
+ R_ADDR(dbmp->reginfo, mfp->fileid_off));
+ __db_shalloc_free(dbmp->reginfo[0].addr, mfp);
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ }
+
+ }
+ if (rpath != NULL)
+ __os_free(dbenv, rpath);
+ return (ret);
+}
+
+/*
+ * __memp_get_fileid --
+ * Return the file ID.
+ *
+ * XXX
+ * Undocumented interface: DB private.
+ */
+static void
+__memp_get_fileid(dbmfp, fidp)
+ DB_MPOOLFILE *dbmfp;
+ u_int8_t *fidp;
+{
+ /*
+ * No lock needed -- we're using the handle, it had better not
+ * be going away.
+ *
+ * !!!
+ * Get the fileID out of the region, not out of the DB_MPOOLFILE
+ * structure because the DB_MPOOLFILE reference is possibly short
+ * lived, and isn't to be trusted.
+ */
+ memcpy(fidp, R_ADDR(
+ dbmfp->dbmp->reginfo, dbmfp->mfp->fileid_off), DB_FILE_ID_LEN);
+}
+
+/*
+ * __memp_last_pgno --
+ * Return the page number of the last page in the file.
+ *
+ * XXX
+ * Undocumented interface: DB private.
+ */
+static void
+__memp_last_pgno(dbmfp, pgnoaddr)
+ DB_MPOOLFILE *dbmfp;
+ db_pgno_t *pgnoaddr;
+{
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+
+ R_LOCK(dbenv, dbmp->reginfo);
+ *pgnoaddr = dbmfp->mfp->last_pgno;
+ R_UNLOCK(dbenv, dbmp->reginfo);
+}
+
+/*
+ * __memp_refcnt --
+ * Return the current reference count.
+ *
+ * XXX
+ * Undocumented interface: DB private.
+ */
+static void
+__memp_refcnt(dbmfp, cntp)
+ DB_MPOOLFILE *dbmfp;
+ db_pgno_t *cntp;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbmfp->dbmp->dbenv;
+
+ MUTEX_LOCK(dbenv, &dbmfp->mfp->mutex);
+ *cntp = dbmfp->mfp->mpf_cnt;
+ MUTEX_UNLOCK(dbenv, &dbmfp->mfp->mutex);
+}
+
+/*
+ * __memp_set_unlink --
+ * Set unlink on last close flag.
+ *
+ * XXX
+ * Undocumented interface: DB private.
+ */
+static void
+__memp_set_unlink(dbmpf, set)
+ DB_MPOOLFILE *dbmpf;
+ int set;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbmpf->dbmp->dbenv;
+
+ MUTEX_LOCK(dbenv, &dbmpf->mfp->mutex);
+ if (set)
+ F_SET(dbmpf->mfp, MP_UNLINK);
+ else
+ F_CLR(dbmpf->mfp, MP_UNLINK);
+ MUTEX_UNLOCK(dbenv, &dbmpf->mfp->mutex);
+}
+
+/*
+ * memp_fclose --
+ * Close a backing file for the memory pool.
+ */
+static int
+__memp_fclose(dbmfp, flags)
+ DB_MPOOLFILE *dbmfp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret, t_ret;
+
+ dbenv = dbmfp->dbmp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /*
+ * XXX
+ * DB_MPOOL_DISCARD: Undocumented flag: DB private.
+ */
+ ret = __db_fchk(dbenv, "DB_MPOOLFILE->close", flags, DB_MPOOL_DISCARD);
+
+ if ((t_ret = __memp_fclose_int(dbmfp, flags)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __memp_fclose_int --
+ * Internal version of __memp_fclose.
+ *
+ * PUBLIC: int __memp_fclose_int __P((DB_MPOOLFILE *, u_int32_t));
+ */
+int
+__memp_fclose_int(dbmfp, flags)
+ DB_MPOOLFILE *dbmfp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ MPOOLFILE *mfp;
+ char *rpath;
+ int deleted, ret, t_ret;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+ ret = 0;
+
+ /*
+ * We have to reference count DB_MPOOLFILE structures as other threads
+ * in the process may be using them. Here's the problem:
+ *
+ * Thread A opens a database.
+ * Thread B uses thread A's DB_MPOOLFILE to write a buffer
+ * in order to free up memory in the mpool cache.
+ * Thread A closes the database while thread B is using the
+ * DB_MPOOLFILE structure.
+ *
+ * By opening all databases before creating any threads, and closing
+ * the databases after all the threads have exited, applications get
+ * better performance and avoid the problem path entirely.
+ *
+ * Regardless, holding the DB_MPOOLFILE to flush a dirty buffer is a
+ * short-term lock, even in worst case, since we better be the only
+ * thread of control using the DB_MPOOLFILE structure to read pages
+ * *into* the cache. Wait until we're the only reference holder and
+ * remove the DB_MPOOLFILE structure from the list, so nobody else can
+ * find it. We do this, rather than have the last reference holder
+ * (whoever that might be) discard the DB_MPOOLFILE structure, because
+ * we'd rather write error messages to the application in the close
+ * routine, not in the checkpoint/sync routine.
+ *
+ * !!!
+ * It's possible the DB_MPOOLFILE was never added to the DB_MPOOLFILE
+ * file list, check the DB_OPEN_CALLED flag to be sure.
+ */
+ for (deleted = 0;;) {
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ if (dbmfp->ref == 1) {
+ if (F_ISSET(dbmfp, MP_OPEN_CALLED))
+ TAILQ_REMOVE(&dbmp->dbmfq, dbmfp, q);
+ deleted = 1;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+
+ if (deleted)
+ break;
+ __os_sleep(dbenv, 1, 0);
+ }
+
+ /* Complain if pinned blocks never returned. */
+ if (dbmfp->pinref != 0) {
+ __db_err(dbenv, "%s: close: %lu blocks left pinned",
+ __memp_fn(dbmfp), (u_long)dbmfp->pinref);
+ ret = __db_panic(dbenv, DB_RUNRECOVERY);
+ }
+
+ /* Discard any mmap information. */
+ if (dbmfp->addr != NULL &&
+ (ret = __os_unmapfile(dbenv, dbmfp->addr, dbmfp->len)) != 0)
+ __db_err(dbenv, "%s: %s", __memp_fn(dbmfp), db_strerror(ret));
+
+ /* Close the file; temporary files may not yet have been created. */
+ if (F_ISSET(dbmfp->fhp, DB_FH_VALID) &&
+ (t_ret = __os_closehandle(dbenv, dbmfp->fhp)) != 0) {
+ __db_err(dbenv, "%s: %s", __memp_fn(dbmfp), db_strerror(t_ret));
+ if (ret == 0)
+ ret = t_ret;
+ }
+
+ /* Discard the thread mutex. */
+ if (dbmfp->mutexp != NULL)
+ __db_mutex_free(dbenv, dbmp->reginfo, dbmfp->mutexp);
+
+ /*
+ * Discard our reference on the the underlying MPOOLFILE, and close
+ * it if it's no longer useful to anyone. It possible the open of
+ * the file never happened or wasn't successful, in which case, mpf
+ * will be NULL;
+ */
+ if ((mfp = dbmfp->mfp) == NULL)
+ goto done;
+
+ /*
+ * If it's a temp file, all outstanding references belong to unflushed
+ * buffers. (A temp file can only be referenced by one DB_MPOOLFILE).
+ * We don't care about preserving any of those buffers, so mark the
+ * MPOOLFILE as dead so that even the dirty ones just get discarded
+ * when we try to flush them.
+ */
+ deleted = 0;
+ MUTEX_LOCK(dbenv, &mfp->mutex);
+ if (--mfp->mpf_cnt == 0 || LF_ISSET(DB_MPOOL_DISCARD)) {
+ if (LF_ISSET(DB_MPOOL_DISCARD) ||
+ F_ISSET(mfp, MP_TEMP | MP_UNLINK))
+ MPOOLFILE_IGNORE(mfp);
+ if (F_ISSET(mfp, MP_UNLINK)) {
+ if ((t_ret = __db_appname(dbmp->dbenv,
+ DB_APP_DATA, R_ADDR(dbmp->reginfo,
+ mfp->path_off), 0, NULL, &rpath)) != 0 && ret == 0)
+ ret = t_ret;
+ if (t_ret == 0) {
+ if ((t_ret = __os_unlink(
+ dbmp->dbenv, rpath) != 0) && ret == 0)
+ ret = t_ret;
+ __os_free(dbenv, rpath);
+ }
+ }
+ if (mfp->block_cnt == 0) {
+ if ((t_ret =
+ __memp_mf_discard(dbmp, mfp)) != 0 && ret == 0)
+ ret = t_ret;
+ deleted = 1;
+ }
+ }
+ if (deleted == 0)
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
+
+ /* Discard the DB_MPOOLFILE structure. */
+done: __os_free(dbenv, dbmfp->fhp);
+ __os_free(dbenv, dbmfp);
+
+ return (ret);
+}
+
+/*
+ * __memp_mf_discard --
+ * Discard an MPOOLFILE.
+ *
+ * PUBLIC: int __memp_mf_discard __P((DB_MPOOL *, MPOOLFILE *));
+ */
+int
+__memp_mf_discard(dbmp, mfp)
+ DB_MPOOL *dbmp;
+ MPOOLFILE *mfp;
+{
+ DB_ENV *dbenv;
+ DB_FH fh;
+ DB_MPOOL_STAT *sp;
+ MPOOL *mp;
+ char *rpath;
+ int ret;
+
+ dbenv = dbmp->dbenv;
+ mp = dbmp->reginfo[0].primary;
+ ret = 0;
+
+ /*
+ * Expects caller to be holding the MPOOLFILE mutex.
+ *
+ * When discarding a file, we have to flush writes from it to disk.
+ * The scenario is that dirty buffers from this file need to be
+ * flushed to satisfy a future checkpoint, but when the checkpoint
+ * calls mpool sync, the sync code won't know anything about them.
+ */
+ if (!F_ISSET(mfp, MP_DEADFILE) &&
+ (ret = __db_appname(dbenv, DB_APP_DATA,
+ R_ADDR(dbmp->reginfo, mfp->path_off), 0, NULL, &rpath)) == 0) {
+ if ((ret = __os_open(dbenv, rpath, 0, 0, &fh)) == 0) {
+ ret = __os_fsync(dbenv, &fh);
+ (void)__os_closehandle(dbenv, &fh);
+ }
+ __os_free(dbenv, rpath);
+ }
+
+ /*
+ * We have to release the MPOOLFILE lock before acquiring the region
+ * lock so that we don't deadlock. Make sure nobody ever looks at
+ * this structure again.
+ */
+ MPOOLFILE_IGNORE(mfp);
+
+ /* Discard the mutex we're holding. */
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
+
+ /* Delete from the list of MPOOLFILEs. */
+ R_LOCK(dbenv, dbmp->reginfo);
+ SH_TAILQ_REMOVE(&mp->mpfq, mfp, q, __mpoolfile);
+
+ /* Copy the statistics into the region. */
+ sp = &mp->stat;
+ sp->st_cache_hit += mfp->stat.st_cache_hit;
+ sp->st_cache_miss += mfp->stat.st_cache_miss;
+ sp->st_map += mfp->stat.st_map;
+ sp->st_page_create += mfp->stat.st_page_create;
+ sp->st_page_in += mfp->stat.st_page_in;
+ sp->st_page_out += mfp->stat.st_page_out;
+
+ /* Clear the mutex this MPOOLFILE recorded. */
+ __db_shlocks_clear(&mfp->mutex, dbmp->reginfo,
+ (REGMAINT *)R_ADDR(dbmp->reginfo, mp->maint_off));
+
+ /* Free the space. */
+ if (mfp->path_off != 0)
+ __db_shalloc_free(dbmp->reginfo[0].addr,
+ R_ADDR(dbmp->reginfo, mfp->path_off));
+ if (mfp->fileid_off != 0)
+ __db_shalloc_free(dbmp->reginfo[0].addr,
+ R_ADDR(dbmp->reginfo, mfp->fileid_off));
+ if (mfp->pgcookie_off != 0)
+ __db_shalloc_free(dbmp->reginfo[0].addr,
+ R_ADDR(dbmp->reginfo, mfp->pgcookie_off));
+ __db_shalloc_free(dbmp->reginfo[0].addr, mfp);
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ return (ret);
+}
+
+/*
+ * __memp_fn --
+ * On errors we print whatever is available as the file name.
+ *
+ * PUBLIC: char * __memp_fn __P((DB_MPOOLFILE *));
+ */
+char *
+__memp_fn(dbmfp)
+ DB_MPOOLFILE *dbmfp;
+{
+ return (__memp_fns(dbmfp->dbmp, dbmfp->mfp));
+}
+
+/*
+ * __memp_fns --
+ * On errors we print whatever is available as the file name.
+ *
+ * PUBLIC: char * __memp_fns __P((DB_MPOOL *, MPOOLFILE *));
+ *
+ */
+char *
+__memp_fns(dbmp, mfp)
+ DB_MPOOL *dbmp;
+ MPOOLFILE *mfp;
+{
+ if (mfp->path_off == 0)
+ return ((char *)"temporary");
+
+ return ((char *)R_ADDR(dbmp->reginfo, mfp->path_off));
+}
diff --git a/libdb/mp/mp_fput.c b/libdb/mp/mp_fput.c
new file mode 100644
index 0000000..64cda27
--- /dev/null
+++ b/libdb/mp/mp_fput.c
@@ -0,0 +1,202 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+/*
+ * __memp_fput --
+ * Mpool file put function.
+ *
+ * PUBLIC: int __memp_fput __P((DB_MPOOLFILE *, void *, u_int32_t));
+ */
+int
+__memp_fput(dbmfp, pgaddr, flags)
+ DB_MPOOLFILE *dbmfp;
+ void *pgaddr;
+ u_int32_t flags;
+{
+ BH *argbhp, *bhp, *prev;
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ DB_MPOOL_HASH *hp;
+ MPOOL *c_mp;
+ u_int32_t n_cache;
+ int adjust, ret;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /* Validate arguments. */
+ if (flags) {
+ if ((ret = __db_fchk(dbenv, "memp_fput", flags,
+ DB_MPOOL_CLEAN | DB_MPOOL_DIRTY | DB_MPOOL_DISCARD)) != 0)
+ return (ret);
+ if ((ret = __db_fcchk(dbenv, "memp_fput",
+ flags, DB_MPOOL_CLEAN, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+
+ if (LF_ISSET(DB_MPOOL_DIRTY) && F_ISSET(dbmfp, MP_READONLY)) {
+ __db_err(dbenv,
+ "%s: dirty flag set for readonly file page",
+ __memp_fn(dbmfp));
+ return (EACCES);
+ }
+ }
+
+ /*
+ * If we're mapping the file, there's nothing to do. Because we can
+ * stop mapping the file at any time, we have to check on each buffer
+ * to see if the address we gave the application was part of the map
+ * region.
+ */
+ if (dbmfp->addr != NULL && pgaddr >= dbmfp->addr &&
+ (u_int8_t *)pgaddr <= (u_int8_t *)dbmfp->addr + dbmfp->len)
+ return (0);
+
+#ifdef DIAGNOSTIC
+ /*
+ * Decrement the per-file pinned buffer count (mapped pages aren't
+ * counted).
+ */
+ R_LOCK(dbenv, dbmp->reginfo);
+ if (dbmfp->pinref == 0) {
+ ret = EINVAL;
+ __db_err(dbenv,
+ "%s: more pages returned than retrieved", __memp_fn(dbmfp));
+ } else {
+ ret = 0;
+ --dbmfp->pinref;
+ }
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ if (ret != 0)
+ return (ret);
+#endif
+
+ /* Convert a page address to a buffer header and hash bucket. */
+ bhp = (BH *)((u_int8_t *)pgaddr - SSZA(BH, buf));
+ n_cache = NCACHE(dbmp->reginfo[0].primary, bhp->mf_offset, bhp->pgno);
+ c_mp = dbmp->reginfo[n_cache].primary;
+ hp = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab);
+ hp = &hp[NBUCKET(c_mp, bhp->mf_offset, bhp->pgno)];
+
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+
+ /* Set/clear the page bits. */
+ if (LF_ISSET(DB_MPOOL_CLEAN) &&
+ F_ISSET(bhp, BH_DIRTY) && !F_ISSET(bhp, BH_DIRTY_CREATE)) {
+ DB_ASSERT(hp->hash_page_dirty != 0);
+ --hp->hash_page_dirty;
+ F_CLR(bhp, BH_DIRTY);
+ }
+ if (LF_ISSET(DB_MPOOL_DIRTY) && !F_ISSET(bhp, BH_DIRTY)) {
+ ++hp->hash_page_dirty;
+ F_SET(bhp, BH_DIRTY);
+ }
+ if (LF_ISSET(DB_MPOOL_DISCARD))
+ F_SET(bhp, BH_DISCARD);
+
+ /*
+ * Check for a reference count going to zero. This can happen if the
+ * application returns a page twice.
+ */
+ if (bhp->ref == 0) {
+ __db_err(dbenv, "%s: page %lu: unpinned page returned",
+ __memp_fn(dbmfp), (u_long)bhp->pgno);
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ return (EINVAL);
+ }
+
+ /*
+ * If more than one reference to the page or a reference other than a
+ * thread waiting to flush the buffer to disk, we're done. Ignore the
+ * discard flags (for now) and leave the buffer's priority alone.
+ */
+ if (--bhp->ref > 1 || (bhp->ref == 1 && !F_ISSET(bhp, BH_LOCKED))) {
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ return (0);
+ }
+
+ /* Update priority values. */
+ if (F_ISSET(bhp, BH_DISCARD) ||
+ dbmfp->mfp->priority == MPOOL_PRI_VERY_LOW)
+ bhp->priority = 0;
+ else {
+ /*
+ * We don't lock the LRU counter or the stat.st_pages field, if
+ * we get garbage (which won't happen on a 32-bit machine), it
+ * only means a buffer has the wrong priority.
+ */
+ bhp->priority = c_mp->lru_count;
+
+ adjust = 0;
+ if (dbmfp->mfp->priority != 0)
+ adjust =
+ (int)c_mp->stat.st_pages / dbmfp->mfp->priority;
+ if (F_ISSET(bhp, BH_DIRTY))
+ adjust += c_mp->stat.st_pages / MPOOL_PRI_DIRTY;
+
+ if (adjust > 0) {
+ if (UINT32_T_MAX - bhp->priority <= (u_int32_t)adjust)
+ bhp->priority += adjust;
+ } else if (adjust < 0)
+ if (bhp->priority > (u_int32_t)-adjust)
+ bhp->priority += adjust;
+ }
+
+ /*
+ * Buffers on hash buckets are sorted by priority -- move the buffer
+ * to the correct position in the list.
+ */
+ argbhp = bhp;
+ SH_TAILQ_REMOVE(&hp->hash_bucket, argbhp, hq, __bh);
+
+ prev = NULL;
+ for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
+ bhp != NULL; prev = bhp, bhp = SH_TAILQ_NEXT(bhp, hq, __bh))
+ if (bhp->priority > argbhp->priority)
+ break;
+ if (prev == NULL)
+ SH_TAILQ_INSERT_HEAD(&hp->hash_bucket, argbhp, hq, __bh);
+ else
+ SH_TAILQ_INSERT_AFTER(&hp->hash_bucket, prev, argbhp, hq, __bh);
+
+ /* Reset the hash bucket's priority. */
+ hp->hash_priority = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
+
+#ifdef DIAGNOSTIC
+ __memp_check_order(hp);
+#endif
+
+ /*
+ * The sync code has a separate counter for buffers on which it waits.
+ * It reads that value without holding a lock so we update it as the
+ * last thing we do. Once that value goes to 0, we won't see another
+ * reference to that buffer being returned to the cache until the sync
+ * code has finished, so we're safe as long as we don't let the value
+ * go to 0 before we finish with the buffer.
+ */
+ if (F_ISSET(argbhp, BH_LOCKED) && argbhp->ref_sync != 0)
+ --argbhp->ref_sync;
+
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+
+ return (0);
+}
diff --git a/libdb/mp/mp_fset.c b/libdb/mp/mp_fset.c
new file mode 100644
index 0000000..a8e012d
--- /dev/null
+++ b/libdb/mp/mp_fset.c
@@ -0,0 +1,89 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+/*
+ * __memp_fset --
+ * Mpool page set-flag routine.
+ *
+ * PUBLIC: int __memp_fset __P((DB_MPOOLFILE *, void *, u_int32_t));
+ */
+int
+__memp_fset(dbmfp, pgaddr, flags)
+ DB_MPOOLFILE *dbmfp;
+ void *pgaddr;
+ u_int32_t flags;
+{
+ BH *bhp;
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ DB_MPOOL_HASH *hp;
+ MPOOL *c_mp;
+ u_int32_t n_cache;
+ int ret;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /* Validate arguments. */
+ if (flags == 0)
+ return (__db_ferr(dbenv, "memp_fset", 1));
+
+ if ((ret = __db_fchk(dbenv, "memp_fset", flags,
+ DB_MPOOL_CLEAN | DB_MPOOL_DIRTY | DB_MPOOL_DISCARD)) != 0)
+ return (ret);
+ if ((ret = __db_fcchk(dbenv, "memp_fset",
+ flags, DB_MPOOL_CLEAN, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+
+ if (LF_ISSET(DB_MPOOL_DIRTY) && F_ISSET(dbmfp, MP_READONLY)) {
+ __db_err(dbenv, "%s: dirty flag set for readonly file page",
+ __memp_fn(dbmfp));
+ return (EACCES);
+ }
+
+ /* Convert the page address to a buffer header and hash bucket. */
+ bhp = (BH *)((u_int8_t *)pgaddr - SSZA(BH, buf));
+ n_cache = NCACHE(dbmp->reginfo[0].primary, bhp->mf_offset, bhp->pgno);
+ c_mp = dbmp->reginfo[n_cache].primary;
+ hp = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab);
+ hp = &hp[NBUCKET(c_mp, bhp->mf_offset, bhp->pgno)];
+
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+
+ /* Set/clear the page bits. */
+ if (LF_ISSET(DB_MPOOL_CLEAN) &&
+ F_ISSET(bhp, BH_DIRTY) && !F_ISSET(bhp, BH_DIRTY_CREATE)) {
+ DB_ASSERT(hp->hash_page_dirty != 0);
+ --hp->hash_page_dirty;
+ F_CLR(bhp, BH_DIRTY);
+ }
+ if (LF_ISSET(DB_MPOOL_DIRTY) && !F_ISSET(bhp, BH_DIRTY)) {
+ ++hp->hash_page_dirty;
+ F_SET(bhp, BH_DIRTY);
+ }
+ if (LF_ISSET(DB_MPOOL_DISCARD))
+ F_SET(bhp, BH_DISCARD);
+
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ return (0);
+}
diff --git a/libdb/mp/mp_method.c b/libdb/mp/mp_method.c
new file mode 100644
index 0000000..0a78d5e
--- /dev/null
+++ b/libdb/mp/mp_method.c
@@ -0,0 +1,156 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+#ifdef HAVE_RPC
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+#endif
+
+static int __memp_set_cachesize __P((DB_ENV *, u_int32_t, u_int32_t, int));
+static int __memp_set_mp_mmapsize __P((DB_ENV *, size_t));
+
+/*
+ * __memp_dbenv_create --
+ * Mpool specific creation of the DB_ENV structure.
+ *
+ * PUBLIC: void __memp_dbenv_create __P((DB_ENV *));
+ */
+void
+__memp_dbenv_create(dbenv)
+ DB_ENV *dbenv;
+{
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ *
+ * We default to 32 8K pages. We don't default to a flat 256K, because
+ * some systems require significantly more memory to hold 32 pages than
+ * others. For example, HP-UX with POSIX pthreads needs 88 bytes for
+ * a POSIX pthread mutex and almost 200 bytes per buffer header, while
+ * Solaris needs 24 and 52 bytes for the same structures. The minimum
+ * number of hash buckets is 37. These contain a mutex also.
+ */
+ dbenv->mp_bytes =
+ 32 * ((8 * 1024) + sizeof(BH)) + 37 * sizeof(DB_MPOOL_HASH);
+ dbenv->mp_ncache = 1;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ dbenv->set_cachesize = __dbcl_env_cachesize;
+ dbenv->set_mp_mmapsize = __dbcl_set_mp_mmapsize;
+ dbenv->memp_dump_region = NULL;
+ dbenv->memp_fcreate = __dbcl_memp_fcreate;
+ dbenv->memp_nameop = NULL;
+ dbenv->memp_register = __dbcl_memp_register;
+ dbenv->memp_stat = __dbcl_memp_stat;
+ dbenv->memp_sync = __dbcl_memp_sync;
+ dbenv->memp_trickle = __dbcl_memp_trickle;
+ } else
+#endif
+ {
+ dbenv->set_cachesize = __memp_set_cachesize;
+ dbenv->set_mp_mmapsize = __memp_set_mp_mmapsize;
+ dbenv->memp_dump_region = __memp_dump_region;
+ dbenv->memp_fcreate = __memp_fcreate;
+ dbenv->memp_nameop = __memp_nameop;
+ dbenv->memp_register = __memp_register;
+ dbenv->memp_stat = __memp_stat;
+ dbenv->memp_sync = __memp_sync;
+ dbenv->memp_trickle = __memp_trickle;
+ }
+}
+
+/*
+ * __memp_set_cachesize --
+ * Initialize the cache size.
+ */
+static int
+__memp_set_cachesize(dbenv, gbytes, bytes, ncache)
+ DB_ENV *dbenv;
+ u_int32_t gbytes, bytes;
+ int ncache;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_cachesize");
+
+ /* Normalize the values. */
+ if (ncache == 0)
+ ncache = 1;
+
+ /*
+ * You can only store 4GB-1 in an unsigned 32-bit value, so correct for
+ * applications that specify 4GB cache sizes -- we know what they meant.
+ */
+ if (gbytes / ncache == 4 && bytes == 0) {
+ --gbytes;
+ bytes = GIGABYTE - 1;
+ } else {
+ gbytes += bytes / GIGABYTE;
+ bytes %= GIGABYTE;
+ }
+
+ /* Avoid too-large cache sizes, they result in a region size of zero. */
+ if (gbytes / ncache > 4 || (gbytes / ncache == 4 && bytes != 0)) {
+ __db_err(dbenv, "individual cache size too large");
+ return (EINVAL);
+ }
+
+ /*
+ * If the application requested less than 500Mb, increase the cachesize
+ * by 25% and factor in the size of the hash buckets to account for our
+ * overhead. (I'm guessing caches over 500Mb are specifically sized,
+ * that is, it's a large server and the application actually knows how
+ * much memory is available. We only document the 25% overhead number,
+ * not the hash buckets, but I don't see a reason to confuse the issue,
+ * it shouldn't matter to an application.)
+ *
+ * There is a minimum cache size, regardless.
+ */
+ if (gbytes == 0) {
+ if (bytes < 500 * MEGABYTE)
+ bytes += (bytes / 4) + 37 * sizeof(DB_MPOOL_HASH);
+ if (bytes / ncache < DB_CACHESIZE_MIN)
+ bytes = ncache * DB_CACHESIZE_MIN;
+ }
+
+ dbenv->mp_gbytes = gbytes;
+ dbenv->mp_bytes = bytes;
+ dbenv->mp_ncache = ncache;
+
+ return (0);
+}
+
+/*
+ * __memp_set_mp_mmapsize --
+ * Set the maximum mapped file size.
+ */
+static int
+__memp_set_mp_mmapsize(dbenv, mp_mmapsize )
+ DB_ENV *dbenv;
+ size_t mp_mmapsize;
+{
+ dbenv->mp_mmapsize = mp_mmapsize;
+ return (0);
+}
diff --git a/libdb/mp/mp_region.c b/libdb/mp/mp_region.c
new file mode 100644
index 0000000..fd0299a
--- /dev/null
+++ b/libdb/mp/mp_region.c
@@ -0,0 +1,466 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+static int __mpool_init __P((DB_ENV *, DB_MPOOL *, int, int));
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+static size_t __mpool_region_maint __P((REGINFO *));
+#endif
+
+/*
+ * __memp_open --
+ * Internal version of memp_open: only called from DB_ENV->open.
+ *
+ * PUBLIC: int __memp_open __P((DB_ENV *));
+ */
+int
+__memp_open(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_MPOOL *dbmp;
+ MPOOL *mp;
+ REGINFO reginfo;
+ roff_t reg_size, *regids;
+ u_int32_t i;
+ int htab_buckets, ret;
+
+ /* Figure out how big each cache region is. */
+ reg_size = (dbenv->mp_gbytes / dbenv->mp_ncache) * GIGABYTE;
+ reg_size += ((dbenv->mp_gbytes %
+ dbenv->mp_ncache) * GIGABYTE) / dbenv->mp_ncache;
+ reg_size += dbenv->mp_bytes / dbenv->mp_ncache;
+
+ /*
+ * Figure out how many hash buckets each region will have. Assume we
+ * want to keep the hash chains with under 10 pages on each chain. We
+ * don't know the pagesize in advance, and it may differ for different
+ * files. Use a pagesize of 1K for the calculation -- we walk these
+ * chains a lot, they must be kept short.
+ */
+ htab_buckets = __db_tablesize((reg_size / (1 * 1024)) / 10);
+
+ /* Create and initialize the DB_MPOOL structure. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(*dbmp), &dbmp)) != 0)
+ return (ret);
+ LIST_INIT(&dbmp->dbregq);
+ TAILQ_INIT(&dbmp->dbmfq);
+ dbmp->dbenv = dbenv;
+
+ /* Join/create the first mpool region. */
+ memset(&reginfo, 0, sizeof(REGINFO));
+ reginfo.type = REGION_TYPE_MPOOL;
+ reginfo.id = INVALID_REGION_ID;
+ reginfo.mode = dbenv->db_mode;
+ reginfo.flags = REGION_JOIN_OK;
+ if (F_ISSET(dbenv, DB_ENV_CREATE))
+ F_SET(&reginfo, REGION_CREATE_OK);
+ if ((ret = __db_r_attach(dbenv, &reginfo, reg_size)) != 0)
+ goto err;
+
+ /*
+ * If we created the region, initialize it. Create or join any
+ * additional regions.
+ */
+ if (F_ISSET(&reginfo, REGION_CREATE)) {
+ /*
+ * We define how many regions there are going to be, allocate
+ * the REGINFO structures and create them. Make sure we don't
+ * clear the wrong entries on error.
+ */
+ dbmp->nreg = dbenv->mp_ncache;
+ if ((ret = __os_calloc(dbenv,
+ dbmp->nreg, sizeof(REGINFO), &dbmp->reginfo)) != 0)
+ goto err;
+ /* Make sure we don't clear the wrong entries on error. */
+ for (i = 0; i < dbmp->nreg; ++i)
+ dbmp->reginfo[i].id = INVALID_REGION_ID;
+ dbmp->reginfo[0] = reginfo;
+
+ /* Initialize the first region. */
+ if ((ret = __mpool_init(dbenv, dbmp, 0, htab_buckets)) != 0)
+ goto err;
+
+ /*
+ * Create/initialize remaining regions and copy their IDs into
+ * the first region.
+ */
+ mp = R_ADDR(dbmp->reginfo, dbmp->reginfo[0].rp->primary);
+ regids = R_ADDR(dbmp->reginfo, mp->regids);
+ for (i = 1; i < dbmp->nreg; ++i) {
+ dbmp->reginfo[i].type = REGION_TYPE_MPOOL;
+ dbmp->reginfo[i].id = INVALID_REGION_ID;
+ dbmp->reginfo[i].mode = dbenv->db_mode;
+ dbmp->reginfo[i].flags = REGION_CREATE_OK;
+ if ((ret = __db_r_attach(
+ dbenv, &dbmp->reginfo[i], reg_size)) != 0)
+ goto err;
+ if ((ret =
+ __mpool_init(dbenv, dbmp, i, htab_buckets)) != 0)
+ goto err;
+ R_UNLOCK(dbenv, &dbmp->reginfo[i]);
+
+ regids[i] = dbmp->reginfo[i].id;
+ }
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ } else {
+ /*
+ * Determine how many regions there are going to be, allocate
+ * the REGINFO structures and fill in local copies of that
+ * information.
+ */
+ mp = R_ADDR(&reginfo, reginfo.rp->primary);
+ dbmp->nreg = mp->nreg;
+ if ((ret = __os_calloc(dbenv,
+ dbmp->nreg, sizeof(REGINFO), &dbmp->reginfo)) != 0)
+ goto err;
+ /* Make sure we don't clear the wrong entries on error. */
+ for (i = 0; i < dbmp->nreg; ++i)
+ dbmp->reginfo[i].id = INVALID_REGION_ID;
+ dbmp->reginfo[0] = reginfo;
+
+ /*
+ * We have to unlock the primary mpool region before we attempt
+ * to join the additional mpool regions. If we don't, we can
+ * deadlock. The scenario is that we hold the primary mpool
+ * region lock. We then try to attach to an additional mpool
+ * region, which requires the acquisition/release of the main
+ * region lock (to search the list of regions). If another
+ * thread of control already holds the main region lock and is
+ * waiting on our primary mpool region lock, we'll deadlock.
+ * See [#4696] for more information.
+ */
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ /* Join remaining regions. */
+ regids = R_ADDR(dbmp->reginfo, mp->regids);
+ for (i = 1; i < dbmp->nreg; ++i) {
+ dbmp->reginfo[i].type = REGION_TYPE_MPOOL;
+ dbmp->reginfo[i].id = regids[i];
+ dbmp->reginfo[i].mode = 0;
+ dbmp->reginfo[i].flags = REGION_JOIN_OK;
+ if ((ret = __db_r_attach(
+ dbenv, &dbmp->reginfo[i], 0)) != 0)
+ goto err;
+ R_UNLOCK(dbenv, &dbmp->reginfo[i]);
+ }
+ }
+
+ /* Set the local addresses for the regions. */
+ for (i = 0; i < dbmp->nreg; ++i)
+ dbmp->reginfo[i].primary =
+ R_ADDR(&dbmp->reginfo[i], dbmp->reginfo[i].rp->primary);
+
+ /* If the region is threaded, allocate a mutex to lock the handles. */
+ if (F_ISSET(dbenv, DB_ENV_THREAD) &&
+ (ret = __db_mutex_setup(dbenv, dbmp->reginfo, &dbmp->mutexp,
+ MUTEX_ALLOC | MUTEX_THREAD)) != 0)
+ goto err;
+
+ dbenv->mp_handle = dbmp;
+ return (0);
+
+err: if (dbmp->reginfo != NULL && dbmp->reginfo[0].addr != NULL) {
+ if (F_ISSET(dbmp->reginfo, REGION_CREATE))
+ ret = __db_panic(dbenv, ret);
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ for (i = 0; i < dbmp->nreg; ++i)
+ if (dbmp->reginfo[i].id != INVALID_REGION_ID)
+ (void)__db_r_detach(
+ dbenv, &dbmp->reginfo[i], 0);
+ __os_free(dbenv, dbmp->reginfo);
+ }
+ if (dbmp->mutexp != NULL)
+ __db_mutex_free(dbenv, dbmp->reginfo, dbmp->mutexp);
+ __os_free(dbenv, dbmp);
+ return (ret);
+}
+
+/*
+ * __mpool_init --
+ * Initialize a MPOOL structure in shared memory.
+ */
+static int
+__mpool_init(dbenv, dbmp, reginfo_off, htab_buckets)
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ int reginfo_off, htab_buckets;
+{
+ DB_MPOOL_HASH *htab;
+ MPOOL *mp;
+ REGINFO *reginfo;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ size_t maint_size;
+#endif
+ int i, ret;
+ void *p;
+
+ mp = NULL;
+
+ reginfo = &dbmp->reginfo[reginfo_off];
+ if ((ret = __db_shalloc(reginfo->addr,
+ sizeof(MPOOL), MUTEX_ALIGN, &reginfo->primary)) != 0)
+ goto mem_err;
+ reginfo->rp->primary = R_OFFSET(reginfo, reginfo->primary);
+ mp = reginfo->primary;
+ memset(mp, 0, sizeof(*mp));
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ maint_size = __mpool_region_maint(reginfo);
+ /* Allocate room for the maintenance info and initialize it. */
+ if ((ret = __db_shalloc(reginfo->addr,
+ sizeof(REGMAINT) + maint_size, 0, &p)) != 0)
+ goto mem_err;
+ __db_maintinit(reginfo, p, maint_size);
+ mp->maint_off = R_OFFSET(reginfo, p);
+#endif
+
+ if (reginfo_off == 0) {
+ SH_TAILQ_INIT(&mp->mpfq);
+
+ ZERO_LSN(mp->lsn);
+
+ mp->nreg = dbmp->nreg;
+ if ((ret = __db_shalloc(dbmp->reginfo[0].addr,
+ dbmp->nreg * sizeof(int), 0, &p)) != 0)
+ goto mem_err;
+ mp->regids = R_OFFSET(dbmp->reginfo, p);
+ }
+
+ /* Allocate hash table space and initialize it. */
+ if ((ret = __db_shalloc(reginfo->addr,
+ htab_buckets * sizeof(DB_MPOOL_HASH), 0, &htab)) != 0)
+ goto mem_err;
+ mp->htab = R_OFFSET(reginfo, htab);
+ for (i = 0; i < htab_buckets; i++) {
+ if ((ret = __db_mutex_setup(dbenv,
+ reginfo, &htab[i].hash_mutex,
+ MUTEX_NO_RLOCK)) != 0)
+ return (ret);
+ SH_TAILQ_INIT(&htab[i].hash_bucket);
+ htab[i].hash_page_dirty = htab[i].hash_priority = 0;
+ }
+ mp->htab_buckets = mp->stat.st_hash_buckets = htab_buckets;
+
+ /*
+ * Only the environment creator knows the total cache size, fill in
+ * those statistics now.
+ */
+ mp->stat.st_gbytes = dbenv->mp_gbytes;
+ mp->stat.st_bytes = dbenv->mp_bytes;
+ return (0);
+
+mem_err:__db_err(dbenv, "Unable to allocate memory for mpool region");
+ return (ret);
+}
+
+/*
+ * __memp_dbenv_refresh --
+ * Clean up after the mpool system on a close or failed open.
+ *
+ * PUBLIC: int __memp_dbenv_refresh __P((DB_ENV *));
+ */
+int
+__memp_dbenv_refresh(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_MPOOL *dbmp;
+ DB_MPOOLFILE *dbmfp;
+ DB_MPREG *mpreg;
+ u_int32_t i;
+ int ret, t_ret;
+
+ ret = 0;
+ dbmp = dbenv->mp_handle;
+
+ /* Discard DB_MPREGs. */
+ while ((mpreg = LIST_FIRST(&dbmp->dbregq)) != NULL) {
+ LIST_REMOVE(mpreg, q);
+ __os_free(dbenv, mpreg);
+ }
+
+ /* Discard DB_MPOOLFILEs. */
+ while ((dbmfp = TAILQ_FIRST(&dbmp->dbmfq)) != NULL)
+ if ((t_ret = __memp_fclose_int(dbmfp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Discard the thread mutex. */
+ if (dbmp->mutexp != NULL)
+ __db_mutex_free(dbenv, dbmp->reginfo, dbmp->mutexp);
+
+ /* Detach from the region(s). */
+ for (i = 0; i < dbmp->nreg; ++i)
+ if ((t_ret = __db_r_detach(
+ dbenv, &dbmp->reginfo[i], 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ __os_free(dbenv, dbmp->reginfo);
+ __os_free(dbenv, dbmp);
+
+ dbenv->mp_handle = NULL;
+ return (ret);
+}
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+/*
+ * __mpool_region_maint --
+ * Return the amount of space needed for region maintenance info.
+ *
+ */
+static size_t
+__mpool_region_maint(infop)
+ REGINFO *infop;
+{
+ size_t s;
+ int numlocks;
+
+ /*
+ * For mutex maintenance we need one mutex per possible page.
+ * Compute the maximum number of pages this cache can have.
+ * Also add in an mpool mutex and mutexes for all dbenv and db
+ * handles.
+ */
+ numlocks = ((infop->rp->size / DB_MIN_PGSIZE) + 1);
+ numlocks += DB_MAX_HANDLES;
+ s = sizeof(roff_t) * numlocks;
+ return (s);
+}
+#endif
+
+/*
+ * __mpool_region_destroy
+ * Destroy any region maintenance info.
+ *
+ * PUBLIC: void __mpool_region_destroy __P((DB_ENV *, REGINFO *));
+ */
+void
+__mpool_region_destroy(dbenv, infop)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+{
+ __db_shlocks_destroy(infop, (REGMAINT *)R_ADDR(infop,
+ ((MPOOL *)R_ADDR(infop, infop->rp->primary))->maint_off));
+
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(infop, NULL);
+}
+
+/*
+ * __memp_nameop
+ * Remove or rename a file in the pool.
+ *
+ * PUBLIC: int __memp_nameop __P((DB_ENV *,
+ * PUBLIC: u_int8_t *, const char *, const char *, const char *));
+ *
+ * XXX
+ * Undocumented interface: DB private.
+ */
+int
+__memp_nameop(dbenv, fileid, newname, fullold, fullnew)
+ DB_ENV *dbenv;
+ u_int8_t *fileid;
+ const char *newname, *fullold, *fullnew;
+{
+ DB_MPOOL *dbmp;
+ MPOOL *mp;
+ MPOOLFILE *mfp;
+ roff_t newname_off;
+ int locked, ret;
+ void *p;
+
+ locked = 0;
+ dbmp = NULL;
+
+ if (!MPOOL_ON(dbenv))
+ goto fsop;
+
+ dbmp = dbenv->mp_handle;
+ mp = dbmp->reginfo[0].primary;
+
+ /*
+ * Remove or rename a file that the mpool might know about. We assume
+ * that the fop layer has the file locked for exclusive access, so we
+ * don't worry about locking except for the mpool mutexes. Checkpoint
+ * can happen at any time, independent of file locking, so we have to
+ * do the actual unlink or rename system call to avoid any race.
+ *
+ * If this is a rename, allocate first, because we can't recursively
+ * grab the region lock.
+ */
+ if (newname == NULL)
+ p = NULL;
+ else {
+ if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
+ NULL, strlen(newname) + 1, &newname_off, &p)) != 0)
+ return (ret);
+ memcpy(p, newname, strlen(newname) + 1);
+ }
+
+ locked = 1;
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ /*
+ * Find the file -- if mpool doesn't know about this file, that's not
+ * an error-- we may not have it open.
+ */
+ for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
+ /* Ignore non-active files. */
+ if (F_ISSET(mfp, MP_DEADFILE | MP_TEMP))
+ continue;
+
+ /* Ignore non-matching files. */
+ if (memcmp(fileid, R_ADDR(
+ dbmp->reginfo, mfp->fileid_off), DB_FILE_ID_LEN) != 0)
+ continue;
+
+ /* If newname is NULL, we're removing the file. */
+ if (newname == NULL) {
+ MUTEX_LOCK(dbenv, &mfp->mutex);
+ MPOOLFILE_IGNORE(mfp);
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
+ } else {
+ /*
+ * Else, it's a rename. We've allocated memory
+ * for the new name. Swap it with the old one.
+ */
+ p = R_ADDR(dbmp->reginfo, mfp->path_off);
+ mfp->path_off = newname_off;
+ }
+ break;
+ }
+
+ /* Delete the memory we no longer need. */
+ if (p != NULL)
+ __db_shalloc_free(dbmp->reginfo[0].addr, p);
+
+fsop: if (newname == NULL)
+ (void)__os_unlink(dbenv, fullold);
+ else
+ (void)__os_rename(dbenv, fullold, fullnew, 1);
+
+ if (locked)
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ return (0);
+}
diff --git a/libdb/mp/mp_register.c b/libdb/mp/mp_register.c
new file mode 100644
index 0000000..89b6b60
--- /dev/null
+++ b/libdb/mp/mp_register.c
@@ -0,0 +1,76 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+/*
+ * memp_register --
+ * Register a file type's pgin, pgout routines.
+ *
+ * PUBLIC: int __memp_register __P((DB_ENV *, int,
+ * PUBLIC: int (*)(DB_ENV *, db_pgno_t, void *, DBT *),
+ * PUBLIC: int (*)(DB_ENV *, db_pgno_t, void *, DBT *)));
+ */
+int
+__memp_register(dbenv, ftype, pgin, pgout)
+ DB_ENV *dbenv;
+ int ftype;
+ int (*pgin) __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ int (*pgout) __P((DB_ENV *, db_pgno_t, void *, DBT *));
+{
+ DB_MPOOL *dbmp;
+ DB_MPREG *mpreg;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->mp_handle, "DB_ENV->memp_register", DB_INIT_MPOOL);
+
+ dbmp = dbenv->mp_handle;
+
+ /*
+ * Chances are good that the item has already been registered, as the
+ * DB access methods are the folks that call this routine. If already
+ * registered, just update the entry, although it's probably unchanged.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ for (mpreg = LIST_FIRST(&dbmp->dbregq);
+ mpreg != NULL; mpreg = LIST_NEXT(mpreg, q))
+ if (mpreg->ftype == ftype) {
+ mpreg->pgin = pgin;
+ mpreg->pgout = pgout;
+ break;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ if (mpreg != NULL)
+ return (0);
+
+ /* New entry. */
+ if ((ret = __os_malloc(dbenv, sizeof(DB_MPREG), &mpreg)) != 0)
+ return (ret);
+
+ mpreg->ftype = ftype;
+ mpreg->pgin = pgin;
+ mpreg->pgout = pgout;
+
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ LIST_INSERT_HEAD(&dbmp->dbregq, mpreg, q);
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+
+ return (0);
+}
diff --git a/libdb/mp/mp_stat.c b/libdb/mp/mp_stat.c
new file mode 100644
index 0000000..23779b3
--- /dev/null
+++ b/libdb/mp/mp_stat.c
@@ -0,0 +1,491 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_am.h"
+#include "dbinc/mp.h"
+
+static void __memp_dumpcache __P((DB_ENV *,
+ DB_MPOOL *, REGINFO *, size_t *, FILE *, u_int32_t));
+static void __memp_pbh __P((DB_MPOOL *, BH *, size_t *, FILE *));
+static void __memp_stat_wait __P((REGINFO *, MPOOL *, DB_MPOOL_STAT *, int));
+
+/*
+ * __memp_stat --
+ * Display MPOOL statistics.
+ *
+ * PUBLIC: int __memp_stat
+ * PUBLIC: __P((DB_ENV *, DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, u_int32_t));
+ */
+int
+__memp_stat(dbenv, gspp, fspp, flags)
+ DB_ENV *dbenv;
+ DB_MPOOL_STAT **gspp;
+ DB_MPOOL_FSTAT ***fspp;
+ u_int32_t flags;
+{
+ DB_MPOOL *dbmp;
+ DB_MPOOL_FSTAT **tfsp, *tstruct;
+ DB_MPOOL_STAT *sp;
+ MPOOL *c_mp, *mp;
+ MPOOLFILE *mfp;
+ size_t len, nlen, pagesize;
+ u_int32_t pages, i;
+ int ret;
+ char *name, *tname;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->mp_handle, "memp_stat", DB_INIT_MPOOL);
+
+ if ((ret = __db_fchk(dbenv,
+ "DB_ENV->memp_stat", flags, DB_STAT_CLEAR)) != 0)
+ return (ret);
+
+ dbmp = dbenv->mp_handle;
+ mp = dbmp->reginfo[0].primary;
+
+ /* Global statistics. */
+ if (gspp != NULL) {
+ *gspp = NULL;
+
+ if ((ret = __os_umalloc(dbenv, sizeof(**gspp), gspp)) != 0)
+ return (ret);
+ memset(*gspp, 0, sizeof(**gspp));
+ sp = *gspp;
+
+ /*
+ * Initialization and information that is not maintained on
+ * a per-cache basis.
+ */
+ c_mp = dbmp->reginfo[0].primary;
+ sp->st_gbytes = c_mp->stat.st_gbytes;
+ sp->st_bytes = c_mp->stat.st_bytes;
+ sp->st_ncache = dbmp->nreg;
+ sp->st_regsize = dbmp->reginfo[0].rp->size;
+
+ /* Walk the cache list and accumulate the global information. */
+ for (i = 0; i < mp->nreg; ++i) {
+ c_mp = dbmp->reginfo[i].primary;
+
+ sp->st_map += c_mp->stat.st_map;
+ sp->st_cache_hit += c_mp->stat.st_cache_hit;
+ sp->st_cache_miss += c_mp->stat.st_cache_miss;
+ sp->st_page_create += c_mp->stat.st_page_create;
+ sp->st_page_in += c_mp->stat.st_page_in;
+ sp->st_page_out += c_mp->stat.st_page_out;
+ sp->st_ro_evict += c_mp->stat.st_ro_evict;
+ sp->st_rw_evict += c_mp->stat.st_rw_evict;
+ sp->st_page_trickle += c_mp->stat.st_page_trickle;
+ sp->st_pages += c_mp->stat.st_pages;
+ /*
+ * st_page_dirty calculated by __memp_stat_hash
+ * st_page_clean calculated here
+ */
+ __memp_stat_hash(
+ &dbmp->reginfo[i], c_mp, &sp->st_page_dirty);
+ sp->st_page_clean = sp->st_pages - sp->st_page_dirty;
+ sp->st_hash_buckets += c_mp->stat.st_hash_buckets;
+ sp->st_hash_searches += c_mp->stat.st_hash_searches;
+ sp->st_hash_longest += c_mp->stat.st_hash_longest;
+ sp->st_hash_examined += c_mp->stat.st_hash_examined;
+ /*
+ * st_hash_nowait calculated by __memp_stat_wait
+ * st_hash_wait
+ */
+ __memp_stat_wait(&dbmp->reginfo[i], c_mp, sp, flags);
+ sp->st_region_nowait +=
+ dbmp->reginfo[i].rp->mutex.mutex_set_nowait;
+ sp->st_region_wait +=
+ dbmp->reginfo[i].rp->mutex.mutex_set_wait;
+ sp->st_alloc += c_mp->stat.st_alloc;
+ sp->st_alloc_buckets += c_mp->stat.st_alloc_buckets;
+ if (sp->st_alloc_max_buckets <
+ c_mp->stat.st_alloc_max_buckets)
+ sp->st_alloc_max_buckets =
+ c_mp->stat.st_alloc_max_buckets;
+ sp->st_alloc_pages += c_mp->stat.st_alloc_pages;
+ if (sp->st_alloc_max_pages <
+ c_mp->stat.st_alloc_max_pages)
+ sp->st_alloc_max_pages =
+ c_mp->stat.st_alloc_max_pages;
+
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ dbmp->reginfo[i].rp->mutex.mutex_set_wait = 0;
+ dbmp->reginfo[i].rp->mutex.mutex_set_nowait = 0;
+ pages = c_mp->stat.st_pages;
+ memset(&c_mp->stat, 0, sizeof(c_mp->stat));
+ c_mp->stat.st_hash_buckets = c_mp->htab_buckets;
+ c_mp->stat.st_pages = pages;
+ }
+ }
+
+ /*
+ * We have duplicate statistics fields in per-file structures
+ * and the cache. The counters are only incremented in the
+ * per-file structures, except if a file is flushed from the
+ * mpool, at which time we copy its information into the cache
+ * statistics. We added the cache information above, now we
+ * add the per-file information.
+ */
+ R_LOCK(dbenv, dbmp->reginfo);
+ for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
+ sp->st_map += mfp->stat.st_map;
+ sp->st_cache_hit += mfp->stat.st_cache_hit;
+ sp->st_cache_miss += mfp->stat.st_cache_miss;
+ sp->st_page_create += mfp->stat.st_page_create;
+ sp->st_page_in += mfp->stat.st_page_in;
+ sp->st_page_out += mfp->stat.st_page_out;
+ if (fspp == NULL && LF_ISSET(DB_STAT_CLEAR)) {
+ pagesize = mfp->stat.st_pagesize;
+ memset(&mfp->stat, 0, sizeof(mfp->stat));
+ mfp->stat.st_pagesize = pagesize;
+ }
+ }
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ }
+
+ /* Per-file statistics. */
+ if (fspp != NULL) {
+ *fspp = NULL;
+
+ /* Count the MPOOLFILE structures. */
+ R_LOCK(dbenv, dbmp->reginfo);
+ for (i = 0, len = 0,
+ mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL;
+ ++i, mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile))
+ len += sizeof(DB_MPOOL_FSTAT *) +
+ sizeof(DB_MPOOL_FSTAT) +
+ strlen(__memp_fns(dbmp, mfp)) + 1;
+ len += sizeof(DB_MPOOL_FSTAT *); /* Trailing NULL */
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ if (i == 0)
+ return (0);
+
+ /* Allocate space */
+ if ((ret = __os_umalloc(dbenv, len, fspp)) != 0)
+ return (ret);
+
+ /*
+ * Build each individual entry. We assume that an array of
+ * pointers are aligned correctly to be followed by an array
+ * of structures, which should be safe (in this particular
+ * case, the first element of the structure is a pointer, so
+ * we're doubly safe). The array is followed by space for
+ * the text file names.
+ *
+ * Add 1 to i because we need to skip over the NULL.
+ */
+ tfsp = *fspp;
+ tstruct = (DB_MPOOL_FSTAT *)(tfsp + i + 1);
+ tname = (char *)(tstruct + i);
+
+ /*
+ * Files may have been opened since we counted, don't walk
+ * off the end of the allocated space.
+ */
+ R_LOCK(dbenv, dbmp->reginfo);
+ for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL && i-- > 0;
+ ++tfsp, ++tstruct, tname += nlen,
+ mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
+ name = __memp_fns(dbmp, mfp);
+ nlen = strlen(name) + 1;
+ *tfsp = tstruct;
+ *tstruct = mfp->stat;
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ pagesize = mfp->stat.st_pagesize;
+ memset(&mfp->stat, 0, sizeof(mfp->stat));
+ mfp->stat.st_pagesize = pagesize;
+ }
+ tstruct->file_name = tname;
+ memcpy(tname, name, nlen);
+ }
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ *tfsp = NULL;
+ }
+ return (0);
+}
+
+#define FMAP_ENTRIES 200 /* Files we map. */
+
+#define MPOOL_DUMP_HASH 0x01 /* Debug hash chains. */
+#define MPOOL_DUMP_MEM 0x04 /* Debug region memory. */
+#define MPOOL_DUMP_ALL 0x07 /* Debug all. */
+
+/*
+ * __memp_dump_region --
+ * Display MPOOL structures.
+ *
+ * PUBLIC: int __memp_dump_region __P((DB_ENV *, char *, FILE *));
+ */
+int
+__memp_dump_region(dbenv, area, fp)
+ DB_ENV *dbenv;
+ char *area;
+ FILE *fp;
+{
+ static const FN fn[] = {
+ { MP_CAN_MMAP, "mmapped" },
+ { MP_DEADFILE, "dead" },
+ { MP_DIRECT, "no buffer" },
+ { MP_EXTENT, "extent" },
+ { MP_TEMP, "temporary" },
+ { MP_UNLINK, "unlink" },
+ { 0, NULL }
+ };
+ DB_MPOOL *dbmp;
+ DB_MPOOLFILE *dbmfp;
+ MPOOL *mp;
+ MPOOLFILE *mfp;
+ size_t fmap[FMAP_ENTRIES + 1];
+ u_int32_t i, flags;
+ int cnt;
+ u_int8_t *p;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->mp_handle, "memp_dump_region", DB_INIT_MPOOL);
+
+ dbmp = dbenv->mp_handle;
+
+ /* Make it easy to call from the debugger. */
+ if (fp == NULL)
+ fp = stderr;
+
+ for (flags = 0; *area != '\0'; ++area)
+ switch (*area) {
+ case 'A':
+ LF_SET(MPOOL_DUMP_ALL);
+ break;
+ case 'h':
+ LF_SET(MPOOL_DUMP_HASH);
+ break;
+ case 'm':
+ LF_SET(MPOOL_DUMP_MEM);
+ break;
+ }
+
+ mp = dbmp->reginfo[0].primary;
+
+ /* Display MPOOL structures. */
+ (void)fprintf(fp, "%s\nPool (region addr 0x%lx)\n",
+ DB_LINE, P_TO_ULONG(dbmp->reginfo[0].addr));
+
+ /* Display the MPOOLFILE structures. */
+ R_LOCK(dbenv, dbmp->reginfo);
+ for (cnt = 0, mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile), ++cnt) {
+ (void)fprintf(fp, "File #%d: %s: pagesize %lu\n", cnt + 1,
+ __memp_fns(dbmp, mfp), (u_long)mfp->stat.st_pagesize);
+ (void)fprintf(fp, "\t type %ld; ref %lu; blocks %lu; last %lu;",
+ (long)mfp->ftype, (u_long)mfp->mpf_cnt,
+ (u_long)mfp->block_cnt, (u_long)mfp->last_pgno);
+ __db_prflags(mfp->flags, fn, fp);
+
+ (void)fprintf(fp, "\n\t UID: ");
+ p = R_ADDR(dbmp->reginfo, mfp->fileid_off);
+ for (i = 0; i < DB_FILE_ID_LEN; ++i, ++p) {
+ (void)fprintf(fp, "%x", (u_int)*p);
+ if (i < DB_FILE_ID_LEN - 1)
+ (void)fprintf(fp, " ");
+ }
+ (void)fprintf(fp, "\n");
+ if (cnt < FMAP_ENTRIES)
+ fmap[cnt] = R_OFFSET(dbmp->reginfo, mfp);
+ }
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ for (dbmfp = TAILQ_FIRST(&dbmp->dbmfq);
+ dbmfp != NULL; dbmfp = TAILQ_NEXT(dbmfp, q), ++cnt) {
+ (void)fprintf(fp, "File #%d: %s: per-process, %s\n",
+ cnt + 1, __memp_fn(dbmfp),
+ F_ISSET(dbmfp, MP_READONLY) ? "readonly" : "read/write");
+ if (cnt < FMAP_ENTRIES)
+ fmap[cnt] = R_OFFSET(dbmp->reginfo, mfp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ if (cnt < FMAP_ENTRIES)
+ fmap[cnt] = INVALID_ROFF;
+ else
+ fmap[FMAP_ENTRIES] = INVALID_ROFF;
+
+ /* Dump the memory pools. */
+ for (i = 0; i < mp->nreg; ++i) {
+ (void)fprintf(fp, "%s\nCache #%d:\n", DB_LINE, i + 1);
+ __memp_dumpcache(
+ dbenv, dbmp, &dbmp->reginfo[i], fmap, fp, flags);
+ }
+
+ /* Flush in case we're debugging. */
+ (void)fflush(fp);
+
+ return (0);
+}
+
+/*
+ * __memp_dumpcache --
+ * Display statistics for a cache.
+ */
+static void
+__memp_dumpcache(dbenv, dbmp, reginfo, fmap, fp, flags)
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ REGINFO *reginfo;
+ size_t *fmap;
+ FILE *fp;
+ u_int32_t flags;
+{
+ BH *bhp;
+ DB_MPOOL_HASH *hp;
+ MPOOL *c_mp;
+ int bucket;
+
+ c_mp = reginfo->primary;
+
+ /* Display the hash table list of BH's. */
+ if (LF_ISSET(MPOOL_DUMP_HASH)) {
+ (void)fprintf(fp,
+ "%s\nBH hash table (%lu hash slots)\nbucket (priority):\n",
+ DB_LINE, (u_long)c_mp->htab_buckets);
+ (void)fprintf(fp,
+ "\tpageno, file, ref, address [LSN] priority\n");
+
+ for (hp = R_ADDR(reginfo, c_mp->htab),
+ bucket = 0; bucket < c_mp->htab_buckets; ++hp, ++bucket) {
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+ if ((bhp =
+ SH_TAILQ_FIRST(&hp->hash_bucket, __bh)) != NULL)
+ (void)fprintf(fp, "%lu (%u):\n",
+ (u_long)bucket, hp->hash_priority);
+ for (; bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh))
+ __memp_pbh(dbmp, bhp, fmap, fp);
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ }
+ }
+
+ /* Dump the memory pool. */
+ if (LF_ISSET(MPOOL_DUMP_MEM))
+ __db_shalloc_dump(reginfo->addr, fp);
+}
+
+/*
+ * __memp_pbh --
+ * Display a BH structure.
+ */
+static void
+__memp_pbh(dbmp, bhp, fmap, fp)
+ DB_MPOOL *dbmp;
+ BH *bhp;
+ size_t *fmap;
+ FILE *fp;
+{
+ static const FN fn[] = {
+ { BH_CALLPGIN, "callpgin" },
+ { BH_DIRTY, "dirty" },
+ { BH_DIRTY_CREATE, "created" },
+ { BH_DISCARD, "discard" },
+ { BH_LOCKED, "locked" },
+ { BH_TRASH, "trash" },
+ { 0, NULL }
+ };
+ int i;
+
+ for (i = 0; i < FMAP_ENTRIES; ++i)
+ if (fmap[i] == INVALID_ROFF || fmap[i] == bhp->mf_offset)
+ break;
+
+ if (fmap[i] == INVALID_ROFF)
+ (void)fprintf(fp, "\t%5lu, %lu, %2lu, %8lu [%lu,%lu] %lu",
+ (u_long)bhp->pgno, (u_long)bhp->mf_offset,
+ (u_long)bhp->ref, (u_long)R_OFFSET(dbmp->reginfo, bhp),
+ (u_long)LSN(bhp->buf).file, (u_long)LSN(bhp->buf).offset,
+ (u_long)bhp->priority);
+ else
+ (void)fprintf(fp, "\t%5lu, #%d, %2lu, %8lu [%lu,%lu] %lu",
+ (u_long)bhp->pgno, i + 1,
+ (u_long)bhp->ref, (u_long)R_OFFSET(dbmp->reginfo, bhp),
+ (u_long)LSN(bhp->buf).file, (u_long)LSN(bhp->buf).offset,
+ (u_long)bhp->priority);
+
+ __db_prflags(bhp->flags, fn, fp);
+
+ (void)fprintf(fp, "\n");
+}
+
+/*
+ * __memp_stat_hash --
+ * Total hash bucket stats (other than mutex wait) into the region.
+ *
+ * PUBLIC: void __memp_stat_hash __P((REGINFO *, MPOOL *, u_int32_t *));
+ */
+void
+__memp_stat_hash(reginfo, mp, dirtyp)
+ REGINFO *reginfo;
+ MPOOL *mp;
+ u_int32_t *dirtyp;
+{
+ DB_MPOOL_HASH *hp;
+ u_int32_t dirty;
+ int i;
+
+ hp = R_ADDR(reginfo, mp->htab);
+ for (i = 0, dirty = 0; i < mp->htab_buckets; i++, hp++)
+ dirty += hp->hash_page_dirty;
+ *dirtyp = dirty;
+}
+
+/*
+ * __memp_stat_wait --
+ * Total hash bucket wait stats into the region.
+ */
+static void
+__memp_stat_wait(reginfo, mp, mstat, flags)
+ REGINFO *reginfo;
+ MPOOL *mp;
+ DB_MPOOL_STAT *mstat;
+ int flags;
+{
+ DB_MPOOL_HASH *hp;
+ DB_MUTEX *mutexp;
+ int i;
+
+ mstat->st_hash_max_wait = 0;
+ hp = R_ADDR(reginfo, mp->htab);
+ for (i = 0; i < mp->htab_buckets; i++, hp++) {
+ mutexp = &hp->hash_mutex;
+ mstat->st_hash_nowait += mutexp->mutex_set_nowait;
+ mstat->st_hash_wait += mutexp->mutex_set_wait;
+ if (mutexp->mutex_set_wait > mstat->st_hash_max_wait)
+ mstat->st_hash_max_wait = mutexp->mutex_set_wait;
+
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ mutexp->mutex_set_wait = 0;
+ mutexp->mutex_set_nowait = 0;
+ }
+ }
+}
diff --git a/libdb/mp/mp_sync.c b/libdb/mp/mp_sync.c
new file mode 100644
index 0000000..0ffc1cc
--- /dev/null
+++ b/libdb/mp/mp_sync.c
@@ -0,0 +1,627 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+typedef struct {
+ DB_MPOOL_HASH *track_hp; /* Hash bucket. */
+
+ roff_t track_off; /* Page file offset. */
+ db_pgno_t track_pgno; /* Page number. */
+} BH_TRACK;
+
+static int __bhcmp __P((const void *, const void *));
+static int __memp_close_flush_files __P((DB_ENV *, DB_MPOOL *));
+static int __memp_sync_files __P((DB_ENV *, DB_MPOOL *));
+
+/*
+ * __memp_sync --
+ * Mpool sync function.
+ *
+ * PUBLIC: int __memp_sync __P((DB_ENV *, DB_LSN *));
+ */
+int
+__memp_sync(dbenv, lsnp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+{
+ DB_MPOOL *dbmp;
+ MPOOL *mp;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->mp_handle, "memp_sync", DB_INIT_MPOOL);
+
+ /*
+ * If no LSN is provided, flush the entire cache (reasonable usage
+ * even if there's no log subsystem configured).
+ */
+ if (lsnp != NULL)
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lg_handle, "memp_sync", DB_INIT_LOG);
+
+ dbmp = dbenv->mp_handle;
+ mp = dbmp->reginfo[0].primary;
+
+ /* If we've flushed to the requested LSN, return that information. */
+ if (lsnp != NULL) {
+ R_LOCK(dbenv, dbmp->reginfo);
+ if (log_compare(lsnp, &mp->lsn) <= 0) {
+ *lsnp = mp->lsn;
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ return (0);
+ }
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ }
+
+ if ((ret = __memp_sync_int(dbenv, NULL, 0, DB_SYNC_CACHE, NULL)) != 0)
+ return (ret);
+
+ if (lsnp != NULL) {
+ R_LOCK(dbenv, dbmp->reginfo);
+ if (log_compare(lsnp, &mp->lsn) > 0)
+ mp->lsn = *lsnp;
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ }
+
+ return (0);
+}
+
+/*
+ * __memp_fsync --
+ * Mpool file sync function.
+ *
+ * PUBLIC: int __memp_fsync __P((DB_MPOOLFILE *));
+ */
+int
+__memp_fsync(dbmfp)
+ DB_MPOOLFILE *dbmfp;
+{
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /*
+ * If this handle doesn't have a file descriptor that's open for
+ * writing, or if the file is a temporary, there's no reason to
+ * proceed further.
+ */
+ if (F_ISSET(dbmfp, MP_READONLY))
+ return (0);
+
+ if (F_ISSET(dbmfp->mfp, MP_TEMP))
+ return (0);
+
+ return (__memp_sync_int(dbenv, dbmfp, 0, DB_SYNC_FILE, NULL));
+}
+
+/*
+ * __mp_xxx_fh --
+ * Return a file descriptor for DB 1.85 compatibility locking.
+ *
+ * PUBLIC: int __mp_xxx_fh __P((DB_MPOOLFILE *, DB_FH **));
+ */
+int
+__mp_xxx_fh(dbmfp, fhp)
+ DB_MPOOLFILE *dbmfp;
+ DB_FH **fhp;
+{
+ DB_ENV *dbenv;
+ /*
+ * This is a truly spectacular layering violation, intended ONLY to
+ * support compatibility for the DB 1.85 DB->fd call.
+ *
+ * Sync the database file to disk, creating the file as necessary.
+ *
+ * We skip the MP_READONLY and MP_TEMP tests done by memp_fsync(3).
+ * The MP_READONLY test isn't interesting because we will either
+ * already have a file descriptor (we opened the database file for
+ * reading) or we aren't readonly (we created the database which
+ * requires write privileges). The MP_TEMP test isn't interesting
+ * because we want to write to the backing file regardless so that
+ * we get a file descriptor to return.
+ */
+ *fhp = dbmfp->fhp;
+ if (F_ISSET(dbmfp->fhp, DB_FH_VALID))
+ return (0);
+ dbenv = dbmfp->dbmp->dbenv;
+
+ return (__memp_sync_int(dbenv, dbmfp, 0, DB_SYNC_FILE, NULL));
+}
+
+/*
+ * __memp_sync_int --
+ * Mpool sync internal function.
+ *
+ * PUBLIC: int __memp_sync_int
+ * PUBLIC: __P((DB_ENV *, DB_MPOOLFILE *, int, db_sync_op, int *));
+ */
+int
+__memp_sync_int(dbenv, dbmfp, ar_max, op, wrotep)
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *dbmfp;
+ int ar_max, *wrotep;
+ db_sync_op op;
+{
+ BH *bhp;
+ BH_TRACK *bharray;
+ DB_MPOOL *dbmp;
+ DB_MPOOL_HASH *hp;
+ DB_MUTEX *mutexp;
+ MPOOL *c_mp, *mp;
+ MPOOLFILE *mfp;
+ u_int32_t n_cache;
+ int ar_cnt, hb_lock, i, pass, remaining, ret, t_ret, wait_cnt, wrote;
+
+ dbmp = dbenv->mp_handle;
+ mp = dbmp->reginfo[0].primary;
+ pass = wrote = 0;
+
+ /*
+ * If the caller does not specify how many pages assume one
+ * per bucket.
+ */
+ if (ar_max == 0)
+ ar_max = mp->nreg * mp->htab_buckets;
+
+ if ((ret =
+ __os_malloc(dbenv, ar_max * sizeof(BH_TRACK), &bharray)) != 0)
+ return (ret);
+
+ /*
+ * Walk each cache's list of buffers and mark all dirty buffers to be
+ * written and all pinned buffers to be potentially written, depending
+ * on our flags.
+ */
+ for (ar_cnt = 0, n_cache = 0; n_cache < mp->nreg; ++n_cache) {
+ c_mp = dbmp->reginfo[n_cache].primary;
+
+ hp = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab);
+ for (i = 0; i < c_mp->htab_buckets; i++, hp++) {
+ /*
+ * We can check for empty buckets before locking as we
+ * only care if the pointer is zero or non-zero. We
+ * can ignore empty buckets because we only need write
+ * buffers that were dirty before we started.
+ */
+ if (SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL)
+ continue;
+
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+ for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
+ bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh)) {
+ /* Always ignore unreferenced, clean pages. */
+ if (bhp->ref == 0 && !F_ISSET(bhp, BH_DIRTY))
+ continue;
+
+ /*
+ * Checkpoints have to wait on all pinned pages,
+ * as pages may be marked dirty when returned to
+ * the cache.
+ *
+ * File syncs only wait on pages both pinned and
+ * dirty. (We don't care if pages are marked
+ * dirty when returned to the cache, that means
+ * there's another writing thread and flushing
+ * the cache for this handle is meaningless.)
+ */
+ if (op == DB_SYNC_FILE &&
+ !F_ISSET(bhp, BH_DIRTY))
+ continue;
+
+ mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
+
+ /*
+ * Ignore temporary files -- this means you
+ * can't even flush temporary files by handle.
+ * (Checkpoint doesn't require temporary files
+ * be flushed and the underlying buffer write
+ * write routine may not be able to write it
+ * anyway.)
+ */
+ if (F_ISSET(mfp, MP_TEMP))
+ continue;
+
+ /*
+ * If we're flushing a specific file, see if
+ * this page is from that file.
+ */
+ if (dbmfp != NULL && mfp != dbmfp->mfp)
+ continue;
+
+ /*
+ * Ignore files that aren't involved in DB's
+ * transactional operations during checkpoints.
+ */
+ if (dbmfp == NULL && mfp->lsn_off == -1)
+ continue;
+
+ /* Track the buffer, we want it. */
+ bharray[ar_cnt].track_hp = hp;
+ bharray[ar_cnt].track_pgno = bhp->pgno;
+ bharray[ar_cnt].track_off = bhp->mf_offset;
+ ar_cnt++;
+
+ if (ar_cnt >= ar_max) {
+ if ((ret = __os_realloc(dbenv,
+ (ar_max * 2) * sizeof(BH_TRACK),
+ &bharray)) != 0)
+ break;
+ ar_max *= 2;
+ }
+ }
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+
+ if (ret != 0)
+ goto err;
+ }
+ }
+
+ /* If there no buffers to write, we're done. */
+ if (ar_cnt == 0)
+ goto done;
+
+ /*
+ * Write the buffers in file/page order, trying to reduce seeks by the
+ * filesystem and, when pages are smaller than filesystem block sizes,
+ * reduce the actual number of writes.
+ */
+ if (ar_cnt > 1)
+ qsort(bharray, ar_cnt, sizeof(BH_TRACK), __bhcmp);
+
+ /*
+ * If we're trickling buffers, only write enough to reach the correct
+ * percentage for this region. We may not write enough if the dirty
+ * buffers have an unbalanced distribution among the regions, but that
+ * seems unlikely.
+ */
+ if (op == DB_SYNC_TRICKLE && ar_cnt > ar_max / (int)mp->nreg)
+ ar_cnt = ar_max / (int)mp->nreg;
+
+ /*
+ * Flush the log. We have to ensure the log records reflecting the
+ * changes on the database pages we're writing have already made it
+ * to disk. We still have to check the log each time we write a page
+ * (because pages we are about to write may be modified after we have
+ * flushed the log), but in general this will at least avoid any I/O
+ * on the log's part.
+ */
+ if (LOGGING_ON(dbenv) && (ret = dbenv->log_flush(dbenv, NULL)) != 0)
+ goto err;
+
+ /*
+ * Walk the array, writing buffers. When we write a buffer, we NULL
+ * out its hash bucket pointer so we don't process a slot more than
+ * once.
+ */
+ for (remaining = ar_cnt, i = pass = 0; remaining > 0; ++i) {
+ if (i >= ar_cnt) {
+ i = 0;
+ ++pass;
+ __os_sleep(dbenv, 1, 0);
+ }
+ if ((hp = bharray[i].track_hp) == NULL)
+ continue;
+
+ /* Lock the hash bucket and find the buffer. */
+ mutexp = &hp->hash_mutex;
+ MUTEX_LOCK(dbenv, mutexp);
+ for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
+ bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh))
+ if (bhp->pgno == bharray[i].track_pgno &&
+ bhp->mf_offset == bharray[i].track_off)
+ break;
+
+ /*
+ * If we can't find the buffer we're done, somebody else had
+ * to have written it.
+ *
+ * If the buffer isn't pinned or dirty, we're done, there's
+ * no work needed.
+ */
+ if (bhp == NULL || (bhp->ref == 0 && !F_ISSET(bhp, BH_DIRTY))) {
+ MUTEX_UNLOCK(dbenv, mutexp);
+ --remaining;
+ bharray[i].track_hp = NULL;
+ continue;
+ }
+
+ /*
+ * If the buffer is locked by another thread, ignore it, we'll
+ * come back to it.
+ *
+ * If the buffer is pinned and it's only the first or second
+ * time we have looked at it, ignore it, we'll come back to
+ * it.
+ *
+ * In either case, skip the buffer if we're not required to
+ * write it.
+ */
+ if (F_ISSET(bhp, BH_LOCKED) || (bhp->ref != 0 && pass < 2)) {
+ MUTEX_UNLOCK(dbenv, mutexp);
+ if (op != DB_SYNC_CACHE && op != DB_SYNC_FILE) {
+ --remaining;
+ bharray[i].track_hp = NULL;
+ }
+ continue;
+ }
+
+ /*
+ * The buffer is either pinned or dirty.
+ *
+ * Set the sync wait-for count, used to count down outstanding
+ * references to this buffer as they are returned to the cache.
+ */
+ bhp->ref_sync = bhp->ref;
+
+ /* Pin the buffer into memory and lock it. */
+ ++bhp->ref;
+ F_SET(bhp, BH_LOCKED);
+ MUTEX_LOCK(dbenv, &bhp->mutex);
+
+ /*
+ * Unlock the hash bucket and wait for the wait-for count to
+ * go to 0. No new thread can acquire the buffer because we
+ * have it locked.
+ *
+ * If a thread attempts to re-pin a page, the wait-for count
+ * will never go to 0 (the thread spins on our buffer lock,
+ * while we spin on the thread's ref count). Give up if we
+ * don't get the buffer in 3 seconds, we can try again later.
+ *
+ * If, when the wait-for count goes to 0, the buffer is found
+ * to be dirty, write it.
+ */
+ MUTEX_UNLOCK(dbenv, mutexp);
+ for (wait_cnt = 1;
+ bhp->ref_sync != 0 && wait_cnt < 4; ++wait_cnt)
+ __os_sleep(dbenv, 1, 0);
+ MUTEX_LOCK(dbenv, mutexp);
+ hb_lock = 1;
+
+ /*
+ * If the ref_sync count has gone to 0, we're going to be done
+ * with this buffer no matter what happens.
+ */
+ if (bhp->ref_sync == 0) {
+ --remaining;
+ bharray[i].track_hp = NULL;
+ }
+
+ /*
+ * If the ref_sync count has gone to 0 and the buffer is still
+ * dirty, we write it. We only try to write the buffer once.
+ * Any process checkpointing or trickle-flushing the pool
+ * must be able to write any underlying file -- if the write
+ * fails, error out. It would be very strange if file sync
+ * failed to write, but we don't care if it happens.
+ */
+ if (bhp->ref_sync == 0 && F_ISSET(bhp, BH_DIRTY)) {
+ hb_lock = 0;
+ MUTEX_UNLOCK(dbenv, mutexp);
+
+ mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
+ if ((ret = __memp_bhwrite(dbmp, hp, mfp, bhp, 1)) == 0)
+ ++wrote;
+ else if (op == DB_SYNC_CACHE || op == DB_SYNC_TRICKLE)
+ __db_err(dbenv, "%s: unable to flush page: %lu",
+ __memp_fns(dbmp, mfp), (u_long)bhp->pgno);
+ else
+ ret = 0;
+ }
+
+ /*
+ * If ref_sync count never went to 0, the buffer was written
+ * by another thread, or the write failed, we still have the
+ * buffer locked.
+ *
+ * We may or may not currently hold the hash bucket mutex. If
+ * the __memp_bhwrite -> __memp_pgwrite call was successful,
+ * then __memp_pgwrite will have swapped the buffer lock for
+ * the hash lock. All other call paths will leave us without
+ * the hash bucket lock.
+ *
+ * The order of mutexes above was to acquire the buffer lock
+ * while holding the hash bucket lock. Don't deadlock here,
+ * release the buffer lock and then acquire the hash bucket
+ * lock.
+ */
+ if (F_ISSET(bhp, BH_LOCKED)) {
+ F_CLR(bhp, BH_LOCKED);
+ MUTEX_UNLOCK(dbenv, &bhp->mutex);
+
+ if (!hb_lock)
+ MUTEX_LOCK(dbenv, mutexp);
+ }
+
+ /*
+ * Reset the ref_sync count regardless of our success, we're
+ * done with this buffer for now.
+ */
+ bhp->ref_sync = 0;
+
+ /* Discard our reference and unlock the bucket. */
+ --bhp->ref;
+ MUTEX_UNLOCK(dbenv, mutexp);
+
+ if (ret != 0)
+ break;
+ }
+
+done: /* If we've opened files to flush pages, close them. */
+ if ((t_ret = __memp_close_flush_files(dbenv, dbmp)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * If doing a checkpoint or flushing a file for the application, we
+ * have to force the pages to disk. We don't do this as we go along
+ * because we want to give the OS as much time as possible to lazily
+ * flush, and because we have to flush files that might not even have
+ * had dirty buffers in the cache, so we have to walk the files list.
+ */
+ if (ret == 0 && (op == DB_SYNC_CACHE || op == DB_SYNC_FILE)) {
+ if (dbmfp == NULL)
+ ret = __memp_sync_files(dbenv, dbmp);
+ else
+ ret = __os_fsync(dbenv, dbmfp->fhp);
+ }
+
+err: __os_free(dbenv, bharray);
+ if (wrotep != NULL)
+ *wrotep = wrote;
+
+ return (ret);
+}
+
+/*
+ * __memp_sync_files --
+ * Sync all the files in the environment, open or not.
+ */
+static
+int __memp_sync_files(dbenv, dbmp)
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+{
+ DB_MPOOLFILE *dbmfp;
+ MPOOL *mp;
+ MPOOLFILE *mfp;
+ int ret, t_ret;
+
+ ret = 0;
+ mp = dbmp->reginfo[0].primary;
+
+ R_LOCK(dbenv, dbmp->reginfo);
+ for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
+ if (mfp->stat.st_page_out == 0 ||
+ F_ISSET(mfp, MP_DEADFILE | MP_TEMP))
+ continue;
+
+ /* Look for an already open handle. */
+ ret = 0;
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ for (dbmfp = TAILQ_FIRST(&dbmp->dbmfq);
+ dbmfp != NULL; dbmfp = TAILQ_NEXT(dbmfp, q))
+ if (dbmfp->mfp == mfp) {
+ ret = __os_fsync(dbenv, dbmfp->fhp);
+ break;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ if (ret != 0)
+ goto err;
+
+ /* If we don't find one, open one. */
+ if (dbmfp == NULL) {
+ if ((ret = dbenv->memp_fcreate(dbenv, &dbmfp, 0)) != 0)
+ goto err;
+ ret = __memp_fopen_int(
+ dbmfp, mfp, R_ADDR(dbmp->reginfo, mfp->path_off),
+ 0, 0, mfp->stat.st_pagesize);
+ if (ret == 0)
+ ret = __os_fsync(dbenv, dbmfp->fhp);
+ if ((t_ret =
+ __memp_fclose_int(dbmfp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ret != 0)
+ goto err;
+ }
+ }
+
+ if (0) {
+err: __db_err(dbenv, "%s: cannot sync: %s",
+ R_ADDR(dbmp->reginfo, mfp->path_off), db_strerror(ret));
+ }
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ return (ret);
+}
+
+/*
+ * __memp_close_flush_files --
+ * Close files opened only to flush buffers.
+ */
+static int
+__memp_close_flush_files(dbenv, dbmp)
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+{
+ DB_MPOOLFILE *dbmfp;
+ int ret;
+
+ /*
+ * The routine exists because we must close files opened by sync to
+ * flush buffers. There are two cases: first, extent files have to
+ * be closed so they may be removed when empty. Second, regular
+ * files have to be closed so we don't run out of descriptors (for
+ * example, and application partitioning its data into databases
+ * based on timestamps, so there's a continually increasing set of
+ * files).
+ *
+ * We mark files opened in the __memp_bhwrite() function with the
+ * MP_FLUSH flag. Here we walk through our file descriptor list,
+ * and, if a file was opened by __memp_bhwrite(), we close it.
+ */
+retry: MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ for (dbmfp = TAILQ_FIRST(&dbmp->dbmfq);
+ dbmfp != NULL; dbmfp = TAILQ_NEXT(dbmfp, q))
+ if (F_ISSET(dbmfp, MP_FLUSH)) {
+ F_CLR(dbmfp, MP_FLUSH);
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ if ((ret = __memp_fclose_int(dbmfp, 0)) != 0)
+ return (ret);
+ goto retry;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+
+ return (0);
+}
+
+static int
+__bhcmp(p1, p2)
+ const void *p1, *p2;
+{
+ BH_TRACK *bhp1, *bhp2;
+
+ bhp1 = (BH_TRACK *)p1;
+ bhp2 = (BH_TRACK *)p2;
+
+ /* Sort by file (shared memory pool offset). */
+ if (bhp1->track_off < bhp2->track_off)
+ return (-1);
+ if (bhp1->track_off > bhp2->track_off)
+ return (1);
+
+ /*
+ * !!!
+ * Defend against badly written quicksort code calling the comparison
+ * function with two identical pointers (e.g., WATCOM C++ (Power++)).
+ */
+ if (bhp1->track_pgno < bhp2->track_pgno)
+ return (-1);
+ if (bhp1->track_pgno > bhp2->track_pgno)
+ return (1);
+ return (0);
+}
diff --git a/libdb/mp/mp_trickle.c b/libdb/mp/mp_trickle.c
new file mode 100644
index 0000000..8f78a2b
--- /dev/null
+++ b/libdb/mp/mp_trickle.c
@@ -0,0 +1,83 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+/*
+ * __memp_trickle --
+ * Keep a specified percentage of the buffers clean.
+ *
+ * PUBLIC: int __memp_trickle __P((DB_ENV *, int, int *));
+ */
+int
+__memp_trickle(dbenv, pct, nwrotep)
+ DB_ENV *dbenv;
+ int pct, *nwrotep;
+{
+ DB_MPOOL *dbmp;
+ MPOOL *c_mp, *mp;
+ u_int32_t clean, dirty, i, total, dtmp;
+ int ret, wrote;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->mp_handle, "memp_trickle", DB_INIT_MPOOL);
+
+ dbmp = dbenv->mp_handle;
+ mp = dbmp->reginfo[0].primary;
+
+ if (nwrotep != NULL)
+ *nwrotep = 0;
+
+ if (pct < 1 || pct > 100)
+ return (EINVAL);
+
+ /*
+ * If there are sufficient clean buffers, no buffers or no dirty
+ * buffers, we're done.
+ *
+ * XXX
+ * Using hash_page_dirty is our only choice at the moment, but it's not
+ * as correct as we might like in the presence of pools having more
+ * than one page size, as a free 512B buffer isn't the same as a free
+ * 8KB buffer.
+ *
+ * Loop through the caches counting total/dirty buffers.
+ */
+ for (ret = 0, i = dirty = total = 0; i < mp->nreg; ++i) {
+ c_mp = dbmp->reginfo[i].primary;
+ total += c_mp->stat.st_pages;
+ __memp_stat_hash(&dbmp->reginfo[i], c_mp, &dtmp);
+ dirty += dtmp;
+ }
+
+ clean = total - dirty;
+ if (clean == total || (clean * 100) / total >= (u_long)pct)
+ return (0);
+
+ if (nwrotep == NULL)
+ nwrotep = &wrote;
+ ret = __memp_sync_int(dbenv, NULL,
+ ((total * pct) / 100) - clean, DB_SYNC_TRICKLE, nwrotep);
+
+ mp->stat.st_page_trickle += *nwrotep;
+
+ return (ret);
+}
diff --git a/libdb/mutex/README b/libdb/mutex/README
new file mode 100644
index 0000000..1e86268
--- /dev/null
+++ b/libdb/mutex/README
@@ -0,0 +1,108 @@
+# $Id$
+
+Note: this only applies to locking using test-and-set and fcntl calls,
+pthreads were added after this was written.
+
+Resource locking routines: lock based on a db_mutex_t. All this gunk
+(including trying to make assembly code portable), is necessary because
+System V semaphores require system calls for uncontested locks and we
+don't want to make two system calls per resource lock.
+
+First, this is how it works. The db_mutex_t structure contains a resource
+test-and-set lock (tsl), a file offset, a pid for debugging and statistics
+information.
+
+If HAVE_MUTEX_THREADS is defined (i.e. we know how to do test-and-sets
+for this compiler/architecture combination), we try and lock the resource
+tsl __os_spin() times. If we can't acquire the lock that way, we use a
+system call to sleep for 1ms, 2ms, 4ms, etc. (The time is bounded at 1
+second, just in case.) Using the timer backoff means that there are two
+assumptions: that locks are held for brief periods (never over system
+calls or I/O) and that locks are not hotly contested.
+
+If HAVE_MUTEX_THREADS is not defined, i.e. we can't do test-and-sets, we
+use a file descriptor to do byte locking on a file at a specified offset.
+In this case, ALL of the locking is done in the kernel. Because file
+descriptors are allocated per process, we have to provide the file
+descriptor as part of the lock call. We still have to do timer backoff
+because we need to be able to block ourselves, i.e. the lock manager
+causes processes to wait by having the process acquire a mutex and then
+attempting to re-acquire the mutex. There's no way to use kernel locking
+to block yourself, i.e. if you hold a lock and attempt to re-acquire it,
+the attempt will succeed.
+
+Next, let's talk about why it doesn't work the way a reasonable person
+would think it should work.
+
+Ideally, we'd have the ability to try to lock the resource tsl, and if
+that fails, increment a counter of waiting processes, then block in the
+kernel until the tsl is released. The process holding the resource tsl
+would see the wait counter when it went to release the resource tsl, and
+would wake any waiting processes up after releasing the lock. This would
+actually require both another tsl (call it the mutex tsl) and
+synchronization between the call that blocks in the kernel and the actual
+resource tsl. The mutex tsl would be used to protect accesses to the
+db_mutex_t itself. Locking the mutex tsl would be done by a busy loop,
+which is safe because processes would never block holding that tsl (all
+they would do is try to obtain the resource tsl and set/check the wait
+count). The problem in this model is that the blocking call into the
+kernel requires a blocking semaphore, i.e. one whose normal state is
+locked.
+
+The only portable forms of locking under UNIX are fcntl(2) on a file
+descriptor/offset, and System V semaphores. Neither of these locking
+methods are sufficient to solve the problem.
+
+The problem with fcntl locking is that only the process that obtained the
+lock can release it. Remember, we want the normal state of the kernel
+semaphore to be locked. So, if the creator of the db_mutex_t were to
+initialize the lock to "locked", then a second process locks the resource
+tsl, and then a third process needs to block, waiting for the resource
+tsl, when the second process wants to wake up the third process, it can't
+because it's not the holder of the lock! For the second process to be
+the holder of the lock, we would have to make a system call per
+uncontested lock, which is what we were trying to get away from in the
+first place.
+
+There are some hybrid schemes, such as signaling the holder of the lock,
+or using a different blocking offset depending on which process is
+holding the lock, but it gets complicated fairly quickly. I'm open to
+suggestions, but I'm not holding my breath.
+
+Regardless, we use this form of locking when HAVE_SPINLOCKS is not
+defined, (i.e. we're locking in the kernel) because it doesn't have the
+limitations found in System V semaphores, and because the normal state of
+the kernel object in that case is unlocked, so the process releasing the
+lock is also the holder of the lock.
+
+The System V semaphore design has a number of other limitations that make
+it inappropriate for this task. Namely:
+
+First, the semaphore key name space is separate from the file system name
+space (although there exist methods for using file names to create
+semaphore keys). If we use a well-known key, there's no reason to believe
+that any particular key will not already be in use, either by another
+instance of the DB application or some other application, in which case
+the DB application will fail. If we create a key, then we have to use a
+file system name to rendezvous and pass around the key.
+
+Second, System V semaphores traditionally have compile-time, system-wide
+limits on the number of semaphore keys that you can have. Typically, that
+number is far too low for any practical purpose. Since the semaphores
+permit more than a single slot per semaphore key, we could try and get
+around that limit by using multiple slots, but that means that the file
+that we're using for rendezvous is going to have to contain slot
+information as well as semaphore key information, and we're going to be
+reading/writing it on every db_mutex_t init or destroy operation. Anyhow,
+similar compile-time, system-wide limits on the numbers of slots per
+semaphore key kick in, and you're right back where you started.
+
+My fantasy is that once POSIX.1 standard mutexes are in wide-spread use,
+we can switch to them. My guess is that it won't happen, because the
+POSIX semaphores are only required to work for threads within a process,
+and not independent processes.
+
+Note: there are races in the statistics code, but since it's just that,
+I didn't bother fixing them. (The fix requires a mutex tsl, so, when/if
+this code is fixed to do rational locking (see above), then change the
+statistics update code to acquire/release the mutex tsl.
diff --git a/libdb/mutex/mut_fcntl.c b/libdb/mutex/mut_fcntl.c
new file mode 100644
index 0000000..687ca6b
--- /dev/null
+++ b/libdb/mutex/mut_fcntl.c
@@ -0,0 +1,184 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __db_fcntl_mutex_init --
+ * Initialize a DB mutex structure.
+ *
+ * PUBLIC: int __db_fcntl_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
+ */
+int
+__db_fcntl_mutex_init(dbenv, mutexp, offset)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+ u_int32_t offset;
+{
+ u_int32_t save;
+
+ /*
+ * The only setting/checking of the MUTEX_MPOOL flags is in the mutex
+ * mutex allocation code (__db_mutex_alloc/free). Preserve only that
+ * flag. This is safe because even if this flag was never explicitly
+ * set, but happened to be set in memory, it will never be checked or
+ * acted upon.
+ */
+ save = F_ISSET(mutexp, MUTEX_MPOOL);
+ memset(mutexp, 0, sizeof(*mutexp));
+ F_SET(mutexp, save);
+
+ /*
+ * This is where we decide to ignore locks we don't need to set -- if
+ * the application is private, we don't need any locks.
+ */
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+ F_SET(mutexp, MUTEX_IGNORE);
+ return (0);
+ }
+
+ mutexp->off = offset;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ mutexp->reg_off = INVALID_ROFF;
+#endif
+ F_SET(mutexp, MUTEX_INITED);
+
+ return (0);
+}
+
+/*
+ * __db_fcntl_mutex_lock
+ * Lock on a mutex, blocking if necessary.
+ *
+ * PUBLIC: int __db_fcntl_mutex_lock __P((DB_ENV *, DB_MUTEX *));
+ */
+int
+__db_fcntl_mutex_lock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+{
+ struct flock k_lock;
+ int locked, ms, waited;
+
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
+ return (0);
+
+ /* Initialize the lock. */
+ k_lock.l_whence = SEEK_SET;
+ k_lock.l_start = mutexp->off;
+ k_lock.l_len = 1;
+
+ for (locked = waited = 0;;) {
+ /*
+ * Wait for the lock to become available; wait 1ms initially,
+ * up to 1 second.
+ */
+ for (ms = 1; mutexp->pid != 0;) {
+ waited = 1;
+ __os_yield(NULL, ms * USEC_PER_MS);
+ if ((ms <<= 1) > MS_PER_SEC)
+ ms = MS_PER_SEC;
+ }
+
+ /* Acquire an exclusive kernel lock. */
+ k_lock.l_type = F_WRLCK;
+ if (fcntl(dbenv->lockfhp->fd, F_SETLKW, &k_lock))
+ return (__os_get_errno());
+
+ /* If the resource is still available, it's ours. */
+ if (mutexp->pid == 0) {
+ locked = 1;
+ __os_id(&mutexp->pid);
+ }
+
+ /* Release the kernel lock. */
+ k_lock.l_type = F_UNLCK;
+ if (fcntl(dbenv->lockfhp->fd, F_SETLK, &k_lock))
+ return (__os_get_errno());
+
+ /*
+ * If we got the resource lock we're done.
+ *
+ * !!!
+ * We can't check to see if the lock is ours, because we may
+ * be trying to block ourselves in the lock manager, and so
+ * the holder of the lock that's preventing us from getting
+ * the lock may be us! (Seriously.)
+ */
+ if (locked)
+ break;
+ }
+
+ if (waited)
+ ++mutexp->mutex_set_wait;
+ else
+ ++mutexp->mutex_set_nowait;
+ return (0);
+}
+
+/*
+ * __db_fcntl_mutex_unlock --
+ * Release a lock.
+ *
+ * PUBLIC: int __db_fcntl_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
+ */
+int
+__db_fcntl_mutex_unlock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+{
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
+ return (0);
+
+#ifdef DIAGNOSTIC
+#define MSG "mutex_unlock: ERROR: released lock that was unlocked\n"
+#ifndef STDERR_FILENO
+#define STDERR_FILENO 2
+#endif
+ if (mutexp->pid == 0)
+ write(STDERR_FILENO, MSG, sizeof(MSG) - 1);
+#endif
+
+ /*
+ * Release the resource. We don't have to acquire any locks because
+ * processes trying to acquire the lock are checking for a pid set to
+ * 0/non-0, not to any specific value.
+ */
+ mutexp->pid = 0;
+
+ return (0);
+}
+
+/*
+ * __db_fcntl_mutex_destroy --
+ * Destroy a DB_MUTEX.
+ *
+ * PUBLIC: int __db_fcntl_mutex_destroy __P((DB_MUTEX *));
+ */
+int
+__db_fcntl_mutex_destroy(mutexp)
+ DB_MUTEX *mutexp;
+{
+ COMPQUIET(mutexp, NULL);
+
+ return (0);
+}
diff --git a/libdb/mutex/mut_pthread.c b/libdb/mutex/mut_pthread.c
new file mode 100644
index 0000000..04d4b08
--- /dev/null
+++ b/libdb/mutex/mut_pthread.c
@@ -0,0 +1,361 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+#ifdef DIAGNOSTIC
+#undef MSG1
+#define MSG1 "mutex_lock: ERROR: lock currently in use: pid: %lu.\n"
+#undef MSG2
+#define MSG2 "mutex_unlock: ERROR: lock already unlocked\n"
+#ifndef STDERR_FILENO
+#define STDERR_FILENO 2
+#endif
+#endif
+
+#ifdef HAVE_MUTEX_SOLARIS_LWP
+#define pthread_cond_signal _lwp_cond_signal
+#define pthread_cond_wait _lwp_cond_wait
+#define pthread_mutex_lock _lwp_mutex_lock
+#define pthread_mutex_trylock _lwp_mutex_trylock
+#define pthread_mutex_unlock _lwp_mutex_unlock
+/*
+ * _lwp_self returns the LWP process ID which isn't a unique per-thread
+ * identifier. Use pthread_self instead, it appears to work even if we
+ * are not a pthreads application.
+ */
+#define pthread_mutex_destroy(x) 0
+#endif
+#ifdef HAVE_MUTEX_UI_THREADS
+#define pthread_cond_signal cond_signal
+#define pthread_cond_wait cond_wait
+#define pthread_mutex_lock mutex_lock
+#define pthread_mutex_trylock mutex_trylock
+#define pthread_mutex_unlock mutex_unlock
+#define pthread_self thr_self
+#define pthread_mutex_destroy mutex_destroy
+#endif
+
+#define PTHREAD_UNLOCK_ATTEMPTS 5
+
+/*
+ * __db_pthread_mutex_init --
+ * Initialize a DB_MUTEX.
+ *
+ * PUBLIC: int __db_pthread_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
+ */
+int
+__db_pthread_mutex_init(dbenv, mutexp, flags)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+ u_int32_t flags;
+{
+ u_int32_t save;
+ int ret;
+
+ ret = 0;
+
+ /*
+ * The only setting/checking of the MUTEX_MPOOL flags is in the mutex
+ * mutex allocation code (__db_mutex_alloc/free). Preserve only that
+ * flag. This is safe because even if this flag was never explicitly
+ * set, but happened to be set in memory, it will never be checked or
+ * acted upon.
+ */
+ save = F_ISSET(mutexp, MUTEX_MPOOL);
+ memset(mutexp, 0, sizeof(*mutexp));
+ F_SET(mutexp, save);
+
+ /*
+ * If this is a thread lock or the process has told us that there are
+ * no other processes in the environment, use thread-only locks, they
+ * are faster in some cases.
+ *
+ * This is where we decide to ignore locks we don't need to set -- if
+ * the application isn't threaded, there aren't any threads to block.
+ */
+ if (LF_ISSET(MUTEX_THREAD) || F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+ if (!F_ISSET(dbenv, DB_ENV_THREAD)) {
+ F_SET(mutexp, MUTEX_IGNORE);
+ return (0);
+ }
+ }
+
+#ifdef HAVE_MUTEX_PTHREADS
+ {
+ pthread_condattr_t condattr, *condattrp = NULL;
+ pthread_mutexattr_t mutexattr, *mutexattrp = NULL;
+
+ if (!LF_ISSET(MUTEX_THREAD)) {
+ ret = pthread_mutexattr_init(&mutexattr);
+#ifndef HAVE_MUTEX_THREAD_ONLY
+ if (ret == 0)
+ ret = pthread_mutexattr_setpshared(
+ &mutexattr, PTHREAD_PROCESS_SHARED);
+#endif
+ mutexattrp = &mutexattr;
+ }
+
+ if (ret == 0)
+ ret = pthread_mutex_init(&mutexp->mutex, mutexattrp);
+ if (mutexattrp != NULL)
+ pthread_mutexattr_destroy(mutexattrp);
+ if (ret == 0 && LF_ISSET(MUTEX_SELF_BLOCK)) {
+ if (!LF_ISSET(MUTEX_THREAD)) {
+ ret = pthread_condattr_init(&condattr);
+#ifndef HAVE_MUTEX_THREAD_ONLY
+ if (ret == 0) {
+ condattrp = &condattr;
+ ret = pthread_condattr_setpshared(
+ &condattr, PTHREAD_PROCESS_SHARED);
+ }
+#endif
+ }
+
+ if (ret == 0)
+ ret = pthread_cond_init(&mutexp->cond, condattrp);
+
+ F_SET(mutexp, MUTEX_SELF_BLOCK);
+ if (condattrp != NULL)
+ (void)pthread_condattr_destroy(condattrp);
+ }
+
+ }
+#endif
+#ifdef HAVE_MUTEX_SOLARIS_LWP
+ /*
+ * XXX
+ * Gcc complains about missing braces in the static initializations of
+ * lwp_cond_t and lwp_mutex_t structures because the structures contain
+ * sub-structures/unions and the Solaris include file that defines the
+ * initialization values doesn't have surrounding braces. There's not
+ * much we can do.
+ */
+ if (LF_ISSET(MUTEX_THREAD)) {
+ static lwp_mutex_t mi = DEFAULTMUTEX;
+
+ mutexp->mutex = mi;
+ } else {
+ static lwp_mutex_t mi = SHAREDMUTEX;
+
+ mutexp->mutex = mi;
+ }
+ if (LF_ISSET(MUTEX_SELF_BLOCK)) {
+ if (LF_ISSET(MUTEX_THREAD)) {
+ static lwp_cond_t ci = DEFAULTCV;
+
+ mutexp->cond = ci;
+ } else {
+ static lwp_cond_t ci = SHAREDCV;
+
+ mutexp->cond = ci;
+ }
+ F_SET(mutexp, MUTEX_SELF_BLOCK);
+ }
+#endif
+#ifdef HAVE_MUTEX_UI_THREADS
+ {
+ int type;
+
+ type = LF_ISSET(MUTEX_THREAD) ? USYNC_THREAD : USYNC_PROCESS;
+
+ ret = mutex_init(&mutexp->mutex, type, NULL);
+ if (ret == 0 && LF_ISSET(MUTEX_SELF_BLOCK)) {
+ ret = cond_init(&mutexp->cond, type, NULL);
+
+ F_SET(mutexp, MUTEX_SELF_BLOCK);
+ }}
+#endif
+
+ mutexp->spins = __os_spin(dbenv);
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ mutexp->reg_off = INVALID_ROFF;
+#endif
+ if (ret == 0)
+ F_SET(mutexp, MUTEX_INITED);
+ else
+ __db_err(dbenv,
+ "unable to initialize mutex: %s", strerror(ret));
+
+ return (ret);
+}
+
+/*
+ * __db_pthread_mutex_lock
+ * Lock on a mutex, logically blocking if necessary.
+ *
+ * PUBLIC: int __db_pthread_mutex_lock __P((DB_ENV *, DB_MUTEX *));
+ */
+int
+__db_pthread_mutex_lock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+{
+ u_int32_t nspins;
+ int i, ret, waited;
+
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+ /* Attempt to acquire the resource for N spins. */
+ for (nspins = mutexp->spins; nspins > 0; --nspins)
+ if (pthread_mutex_trylock(&mutexp->mutex) == 0)
+ break;
+
+ if (nspins == 0 && (ret = pthread_mutex_lock(&mutexp->mutex)) != 0)
+ goto err;
+
+ if (F_ISSET(mutexp, MUTEX_SELF_BLOCK)) {
+ for (waited = 0; mutexp->locked != 0; waited = 1) {
+ ret = pthread_cond_wait(&mutexp->cond, &mutexp->mutex);
+ /*
+ * !!!
+ * Solaris bug workaround:
+ * pthread_cond_wait() sometimes returns ETIME -- out
+ * of sheer paranoia, check both ETIME and ETIMEDOUT.
+ * We believe this happens when the application uses
+ * SIGALRM for some purpose, e.g., the C library sleep
+ * call, and Solaris delivers the signal to the wrong
+ * LWP.
+ */
+ if (ret != 0 && ret != EINTR &&
+#ifdef ETIME
+ ret != ETIME &&
+#endif
+ ret != ETIMEDOUT) {
+ (void)pthread_mutex_unlock(&mutexp->mutex);
+ return (ret);
+ }
+ }
+
+ if (waited)
+ ++mutexp->mutex_set_wait;
+ else
+ ++mutexp->mutex_set_nowait;
+
+#ifdef DIAGNOSTIC
+ mutexp->locked = (u_int32_t)pthread_self();
+#else
+ mutexp->locked = 1;
+#endif
+ /*
+ * According to HP-UX engineers contacted by Netscape,
+ * pthread_mutex_unlock() will occasionally return EFAULT
+ * for no good reason on mutexes in shared memory regions,
+ * and the correct caller behavior is to try again. Do
+ * so, up to PTHREAD_UNLOCK_ATTEMPTS consecutive times.
+ * Note that we don't bother to restrict this to HP-UX;
+ * it should be harmless elsewhere. [#2471]
+ */
+ i = PTHREAD_UNLOCK_ATTEMPTS;
+ do {
+ ret = pthread_mutex_unlock(&mutexp->mutex);
+ } while (ret == EFAULT && --i > 0);
+ if (ret != 0)
+ goto err;
+ } else {
+ if (nspins == mutexp->spins)
+ ++mutexp->mutex_set_nowait;
+ else if (nspins > 0) {
+ ++mutexp->mutex_set_spin;
+ mutexp->mutex_set_spins += mutexp->spins - nspins;
+ } else
+ ++mutexp->mutex_set_wait;
+#ifdef DIAGNOSTIC
+ if (mutexp->locked) {
+ char msgbuf[128];
+ (void)snprintf(msgbuf,
+ sizeof(msgbuf), MSG1, (u_long)mutexp->locked);
+ (void)write(STDERR_FILENO, msgbuf, strlen(msgbuf));
+ }
+ mutexp->locked = (u_int32_t)pthread_self();
+#else
+ mutexp->locked = 1;
+#endif
+ }
+ return (0);
+
+err: __db_err(dbenv, "unable to lock mutex: %s", strerror(ret));
+ return (ret);
+}
+
+/*
+ * __db_pthread_mutex_unlock --
+ * Release a lock.
+ *
+ * PUBLIC: int __db_pthread_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
+ */
+int
+__db_pthread_mutex_unlock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+{
+ int i, ret;
+
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+#ifdef DIAGNOSTIC
+ if (!mutexp->locked)
+ (void)write(STDERR_FILENO, MSG2, sizeof(MSG2) - 1);
+#endif
+
+ if (F_ISSET(mutexp, MUTEX_SELF_BLOCK)) {
+ if ((ret = pthread_mutex_lock(&mutexp->mutex)) != 0)
+ goto err;
+
+ mutexp->locked = 0;
+
+ if ((ret = pthread_cond_signal(&mutexp->cond)) != 0)
+ return (ret);
+
+ } else
+ mutexp->locked = 0;
+
+ /* See comment above; workaround for [#2471]. */
+ i = PTHREAD_UNLOCK_ATTEMPTS;
+ do {
+ ret = pthread_mutex_unlock(&mutexp->mutex);
+ } while (ret == EFAULT && --i > 0);
+ return (ret);
+
+err: __db_err(dbenv, "unable to unlock mutex: %s", strerror(ret));
+ return (ret);
+}
+
+/*
+ * __db_pthread_mutex_destroy --
+ * Destroy a DB_MUTEX.
+ *
+ * PUBLIC: int __db_pthread_mutex_destroy __P((DB_MUTEX *));
+ */
+int
+__db_pthread_mutex_destroy(mutexp)
+ DB_MUTEX *mutexp;
+{
+ int ret;
+
+ if (F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+ if ((ret = pthread_mutex_destroy(&mutexp->mutex)) != 0)
+ __db_err(NULL, "unable to destroy mutex: %s", strerror(ret));
+ return (ret);
+}
diff --git a/libdb/mutex/mut_tas.c b/libdb/mutex/mut_tas.c
new file mode 100644
index 0000000..8ad8a4a
--- /dev/null
+++ b/libdb/mutex/mut_tas.c
@@ -0,0 +1,211 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+#ifdef HAVE_MUTEX_ALPHA_LINUX_ASSEMBLY
+#include "alphalinux.gcc"
+#endif
+#ifdef HAVE_MUTEX_SPARC32_LINUX_ASSEMBLY
+#include "sparc32linux.gcc"
+#endif
+#ifdef HAVE_MUTEX_SPARC64_LINUX_ASSEMBLY
+#include "sparc64linux.gcc"
+#endif
+#ifdef HAVE_MUTEX_S390_LINUX_ASSEMBLY
+#include "s390linux.gcc"
+#endif
+
+/*
+ * This is where we load in the actual test-and-set mutex code.
+ */
+#define LOAD_ACTUAL_MUTEX_CODE
+#include "db_int.h"
+
+/*
+ * __db_tas_mutex_init --
+ * Initialize a DB_MUTEX.
+ *
+ * PUBLIC: int __db_tas_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
+ */
+int
+__db_tas_mutex_init(dbenv, mutexp, flags)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+ u_int32_t flags;
+{
+ u_int32_t save;
+
+ /* Check alignment. */
+ DB_ASSERT(((db_alignp_t)mutexp & (MUTEX_ALIGN - 1)) == 0);
+
+ /*
+ * The only setting/checking of the MUTEX_MPOOL flags is in the mutex
+ * mutex allocation code (__db_mutex_alloc/free). Preserve only that
+ * flag. This is safe because even if this flag was never explicitly
+ * set, but happened to be set in memory, it will never be checked or
+ * acted upon.
+ */
+ save = F_ISSET(mutexp, MUTEX_MPOOL);
+ memset(mutexp, 0, sizeof(*mutexp));
+ F_SET(mutexp, save);
+
+ /*
+ * If this is a thread lock or the process has told us that there are
+ * no other processes in the environment, use thread-only locks, they
+ * are faster in some cases.
+ *
+ * This is where we decide to ignore locks we don't need to set -- if
+ * the application isn't threaded, there aren't any threads to block.
+ */
+ if (LF_ISSET(MUTEX_THREAD) || F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+ if (!F_ISSET(dbenv, DB_ENV_THREAD)) {
+ F_SET(mutexp, MUTEX_IGNORE);
+ return (0);
+ }
+ }
+
+ /* Initialize the lock. */
+ if (MUTEX_INIT(&mutexp->tas))
+ return (__os_get_errno());
+
+ mutexp->spins = __os_spin(dbenv);
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ mutexp->reg_off = INVALID_ROFF;
+#endif
+ F_SET(mutexp, MUTEX_INITED);
+
+ return (0);
+}
+
+/*
+ * __db_tas_mutex_lock
+ * Lock on a mutex, logically blocking if necessary.
+ *
+ * PUBLIC: int __db_tas_mutex_lock __P((DB_ENV *, DB_MUTEX *));
+ */
+int
+__db_tas_mutex_lock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+{
+ u_long ms;
+ int nspins;
+
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+ ms = 1;
+
+loop: /* Attempt to acquire the resource for N spins. */
+ for (nspins = mutexp->spins; nspins > 0; --nspins) {
+#ifdef HAVE_MUTEX_HPPA_MSEM_INIT
+relock:
+#endif
+ if (!MUTEX_SET(&mutexp->tas))
+ continue;
+#ifdef HAVE_MUTEX_HPPA_MSEM_INIT
+ /*
+ * HP semaphores are unlocked automatically when a holding
+ * process exits. If the mutex appears to be locked
+ * (mutexp->locked != 0) but we got here, assume this has
+ * happened. Stick our own pid into mutexp->locked and
+ * lock again. (The default state of the mutexes used to
+ * block in __lock_get_internal is locked, so exiting with
+ * a locked mutex is reasonable behavior for a process that
+ * happened to initialize or use one of them.)
+ */
+ if (mutexp->locked != 0) {
+ __os_id(&mutexp->locked);
+ goto relock;
+ }
+ /*
+ * If we make it here, locked == 0, the diagnostic won't fire,
+ * and we were really unlocked by someone calling the
+ * DB mutex unlock function.
+ */
+#endif
+#ifdef DIAGNOSTIC
+ if (mutexp->locked != 0)
+ __db_err(dbenv,
+ "__db_tas_mutex_lock: ERROR: lock currently in use: ID: %lu",
+ (u_long)mutexp->locked);
+#endif
+#if defined(DIAGNOSTIC) || defined(HAVE_MUTEX_HPPA_MSEM_INIT)
+ __os_id(&mutexp->locked);
+#endif
+ if (ms == 1)
+ ++mutexp->mutex_set_nowait;
+ else
+ ++mutexp->mutex_set_wait;
+ return (0);
+ }
+
+ /* Yield the processor; wait 1ms initially, up to 1 second. */
+ __os_yield(NULL, ms * USEC_PER_MS);
+ if ((ms <<= 1) > MS_PER_SEC)
+ ms = MS_PER_SEC;
+
+ goto loop;
+}
+
+/*
+ * __db_tas_mutex_unlock --
+ * Release a lock.
+ *
+ * PUBLIC: int __db_tas_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
+ */
+int
+__db_tas_mutex_unlock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+{
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+#ifdef DIAGNOSTIC
+ if (!mutexp->locked)
+ __db_err(dbenv,
+ "__db_tas_mutex_unlock: ERROR: lock already unlocked");
+#endif
+#if defined(DIAGNOSTIC) || defined(HAVE_MUTEX_HPPA_MSEM_INIT)
+ mutexp->locked = 0;
+#endif
+
+ MUTEX_UNSET(&mutexp->tas);
+
+ return (0);
+}
+
+/*
+ * __db_tas_mutex_destroy --
+ * Destroy a DB_MUTEX.
+ *
+ * PUBLIC: int __db_tas_mutex_destroy __P((DB_MUTEX *));
+ */
+int
+__db_tas_mutex_destroy(mutexp)
+ DB_MUTEX *mutexp;
+{
+ if (F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+ MUTEX_DESTROY(&mutexp->tas);
+
+ return (0);
+}
diff --git a/libdb/mutex/mut_win32.c b/libdb/mutex/mut_win32.c
new file mode 100644
index 0000000..fef57f0
--- /dev/null
+++ b/libdb/mutex/mut_win32.c
@@ -0,0 +1,227 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+/*
+ * This is where we load in the actual test-and-set mutex code.
+ */
+#define LOAD_ACTUAL_MUTEX_CODE
+#include "db_int.h"
+
+/* We don't want to run this code even in "ordinary" diagnostic mode. */
+#undef MUTEX_DIAG
+
+#define GET_HANDLE(mutexp, event) do { \
+ char idbuf[13]; \
+ \
+ snprintf(idbuf, sizeof idbuf, "db.m%08x", mutexp->id); \
+ event = CreateEvent(NULL, FALSE, FALSE, idbuf); \
+ if (event == NULL) \
+ return (__os_win32_errno()); \
+} while (0)
+
+/*
+ * __db_win32_mutex_init --
+ * Initialize a DB_MUTEX.
+ *
+ * PUBLIC: int __db_win32_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
+ */
+int
+__db_win32_mutex_init(dbenv, mutexp, flags)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+ u_int32_t flags;
+{
+ u_int32_t save;
+
+ /*
+ * The only setting/checking of the MUTEX_MPOOL flags is in the mutex
+ * mutex allocation code (__db_mutex_alloc/free). Preserve only that
+ * flag. This is safe because even if this flag was never explicitly
+ * set, but happened to be set in memory, it will never be checked or
+ * acted upon.
+ */
+ save = F_ISSET(mutexp, MUTEX_MPOOL);
+ memset(mutexp, 0, sizeof(*mutexp));
+ F_SET(mutexp, save);
+
+ /*
+ * If this is a thread lock or the process has told us that there are
+ * no other processes in the environment, and the application isn't
+ * threaded, there aren't any threads to block.
+ */
+ if (LF_ISSET(MUTEX_THREAD) || F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+ if (!F_ISSET(dbenv, DB_ENV_THREAD)) {
+ F_SET(mutexp, MUTEX_IGNORE);
+ return (0);
+ }
+ }
+
+ mutexp->id = ((getpid() & 0xffff) << 16) ^ P_TO_UINT32(mutexp);
+ mutexp->spins = __os_spin(dbenv);
+ F_SET(mutexp, MUTEX_INITED);
+
+ return (0);
+}
+
+/*
+ * __db_win32_mutex_lock
+ * Lock on a mutex, logically blocking if necessary.
+ *
+ * PUBLIC: int __db_win32_mutex_lock __P((DB_ENV *, DB_MUTEX *));
+ */
+int
+__db_win32_mutex_lock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+{
+ HANDLE event;
+ int ret, ms, nspins;
+#ifdef MUTEX_DIAG
+ LARGE_INTEGER now;
+#endif
+
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+ event = NULL;
+ ms = 50;
+ ret = 0;
+
+loop: /* Attempt to acquire the resource for N spins. */
+ for (nspins = mutexp->spins; nspins > 0; --nspins) {
+ if (!MUTEX_SET(&mutexp->tas))
+ continue;
+
+#ifdef DIAGNOSTIC
+ if (mutexp->locked)
+ __db_err(dbenv,
+ "__db_win32_mutex_lock: mutex double-locked!");
+
+ __os_id(&mutexp->locked);
+#endif
+
+ if (event == NULL)
+ ++mutexp->mutex_set_nowait;
+ else {
+ ++mutexp->mutex_set_wait;
+ CloseHandle(event);
+ InterlockedDecrement(&mutexp->nwaiters);
+#ifdef MUTEX_DIAG
+ if (ret != WAIT_OBJECT_0) {
+ QueryPerformanceCounter(&now);
+ printf("[%I64d]: Lost signal on mutex %p, "
+ "id %d, ms %d\n",
+ now.QuadPart, mutexp, mutexp->id, ms);
+ }
+#endif
+ }
+
+ return (0);
+ }
+
+ /*
+ * Yield the processor; wait 50 ms initially, up to 1 second. This
+ * loop is needed to work around a race where the signal from the
+ * unlocking thread gets lost. We start at 50 ms because it's unlikely
+ * to happen often and we want to avoid wasting CPU.
+ */
+ if (event == NULL) {
+#ifdef MUTEX_DIAG
+ QueryPerformanceCounter(&now);
+ printf("[%I64d]: Waiting on mutex %p, id %d\n",
+ now.QuadPart, mutexp, mutexp->id);
+#endif
+ InterlockedIncrement(&mutexp->nwaiters);
+ GET_HANDLE(mutexp, event);
+ }
+ if ((ret = WaitForSingleObject(event, ms)) == WAIT_FAILED)
+ return (__os_win32_errno());
+ if ((ms <<= 1) > MS_PER_SEC)
+ ms = MS_PER_SEC;
+
+ goto loop;
+}
+
+/*
+ * __db_win32_mutex_unlock --
+ * Release a lock.
+ *
+ * PUBLIC: int __db_win32_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
+ */
+int
+__db_win32_mutex_unlock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+{
+ int ret;
+ HANDLE event;
+#ifdef MUTEX_DIAG
+ LARGE_INTEGER now;
+#endif
+
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+#ifdef DIAGNOSTIC
+ if (!mutexp->tas || !mutexp->locked)
+ __db_err(dbenv,
+ "__db_win32_mutex_unlock: ERROR: lock already unlocked");
+
+ mutexp->locked = 0;
+#endif
+ MUTEX_UNSET(&mutexp->tas);
+
+ ret = 0;
+
+ if (mutexp->nwaiters > 0) {
+ GET_HANDLE(mutexp, event);
+
+#ifdef MUTEX_DIAG
+ QueryPerformanceCounter(&now);
+ printf("[%I64d]: Signalling mutex %p, id %d\n",
+ now.QuadPart, mutexp, mutexp->id);
+#endif
+ if (!PulseEvent(event))
+ ret = __os_win32_errno();
+
+ CloseHandle(event);
+ }
+
+#ifdef DIAGNOSTIC
+ if (ret != 0)
+ __db_err(dbenv,
+ "__db_win32_mutex_unlock: ERROR: unlock failed");
+#endif
+
+ return (ret);
+}
+
+/*
+ * __db_win32_mutex_destroy --
+ * Destroy a DB_MUTEX - noop with this implementation.
+ *
+ * PUBLIC: int __db_win32_mutex_destroy __P((DB_MUTEX *));
+ */
+int
+__db_win32_mutex_destroy(mutexp)
+ DB_MUTEX *mutexp;
+{
+ return (0);
+}
diff --git a/libdb/mutex/mutex.c b/libdb/mutex/mutex.c
new file mode 100644
index 0000000..97fec83
--- /dev/null
+++ b/libdb/mutex/mutex.c
@@ -0,0 +1,395 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+#if defined(MUTEX_NO_MALLOC_LOCKS) || defined(HAVE_MUTEX_SYSTEM_RESOURCES)
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/mp.h"
+#include "dbinc/txn.h"
+#endif
+
+static int __db_mutex_alloc_int __P((DB_ENV *, REGINFO *, DB_MUTEX **));
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+static REGMAINT * __db_mutex_maint __P((DB_ENV *, REGINFO *));
+#endif
+
+/*
+ * __db_mutex_setup --
+ * External interface to allocate, and/or initialize, record
+ * mutexes.
+ *
+ * PUBLIC: int __db_mutex_setup __P((DB_ENV *, REGINFO *, void *, u_int32_t));
+ */
+int
+__db_mutex_setup(dbenv, infop, ptr, flags)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ void *ptr;
+ u_int32_t flags;
+{
+ DB_MUTEX *mutex;
+ REGMAINT *maint;
+ u_int32_t iflags, offset;
+ int ret;
+
+ ret = 0;
+ /*
+ * If they indicated the region is not locked, then lock it.
+ * This is only needed when we have unusual mutex resources.
+ * (I.e. MUTEX_NO_MALLOC_LOCKS or HAVE_MUTEX_SYSTEM_RESOURCES)
+ */
+#if defined(MUTEX_NO_MALLOC_LOCKS) || defined(HAVE_MUTEX_SYSTEM_RESOURCES)
+ if (!LF_ISSET(MUTEX_NO_RLOCK))
+ R_LOCK(dbenv, infop);
+#endif
+ /*
+ * Allocate the mutex if they asked us to.
+ */
+ mutex = NULL;
+ if (LF_ISSET(MUTEX_ALLOC)) {
+ if ((ret = __db_mutex_alloc_int(dbenv, infop, ptr)) != 0)
+ goto err;
+ mutex = *(DB_MUTEX **)ptr;
+ } else
+ mutex = (DB_MUTEX *)ptr;
+
+ /*
+ * Set up to initialize the mutex.
+ */
+ iflags = LF_ISSET(MUTEX_THREAD | MUTEX_SELF_BLOCK);
+ switch (infop->type) {
+ case REGION_TYPE_LOCK:
+ offset = P_TO_UINT32(mutex) + DB_FCNTL_OFF_LOCK;
+ break;
+ case REGION_TYPE_MPOOL:
+ offset = P_TO_UINT32(mutex) + DB_FCNTL_OFF_MPOOL;
+ break;
+ default:
+ offset = P_TO_UINT32(mutex) + DB_FCNTL_OFF_GEN;
+ break;
+ }
+ maint = NULL;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ if (!LF_ISSET(MUTEX_NO_RECORD))
+ maint = (REGMAINT *)__db_mutex_maint(dbenv, infop);
+#endif
+
+ ret = __db_mutex_init(dbenv, mutex, offset, iflags, infop, maint);
+err:
+#if defined(MUTEX_NO_MALLOC_LOCKS) || defined(HAVE_MUTEX_SYSTEM_RESOURCES)
+ if (!LF_ISSET(MUTEX_NO_RLOCK))
+ R_UNLOCK(dbenv, infop);
+#endif
+ /*
+ * If we allocated the mutex but had an error on init'ing,
+ * then we must free it before returning.
+ * !!!
+ * Free must be done after releasing region lock.
+ */
+ if (ret != 0 && LF_ISSET(MUTEX_ALLOC) && mutex != NULL) {
+ __db_mutex_free(dbenv, infop, mutex);
+ *(DB_MUTEX **)ptr = NULL;
+ }
+ return (ret);
+}
+
+/*
+ * __db_mutex_alloc_int --
+ * Allocate and initialize a mutex.
+ */
+static int
+__db_mutex_alloc_int(dbenv, infop, storep)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ DB_MUTEX **storep;
+{
+ int ret;
+
+ /*
+ * If the architecture supports mutexes in heap memory, use heap memory.
+ * If it doesn't, we have to allocate space in a region. If allocation
+ * in the region fails, fallback to allocating from the mpool region,
+ * because it's big, it almost always exists and if it's entirely dirty,
+ * we can free buffers until memory is available.
+ */
+#if defined(MUTEX_NO_MALLOC_LOCKS) || defined(HAVE_MUTEX_SYSTEM_RESOURCES)
+ ret = __db_shalloc(infop->addr, sizeof(DB_MUTEX), MUTEX_ALIGN, storep);
+
+ if (ret == ENOMEM && MPOOL_ON(dbenv)) {
+ DB_MPOOL *dbmp;
+
+ dbmp = dbenv->mp_handle;
+ if ((ret = __memp_alloc(dbmp,
+ dbmp->reginfo, NULL, sizeof(DB_MUTEX), NULL, storep)) == 0)
+ (*storep)->flags = MUTEX_MPOOL;
+ } else
+ (*storep)->flags = 0;
+#else
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(infop, NULL);
+ ret = __os_calloc(dbenv, 1, sizeof(DB_MUTEX), storep);
+#endif
+ if (ret != 0)
+ __db_err(dbenv, "Unable to allocate memory for mutex");
+ return (ret);
+}
+
+/*
+ * __db_mutex_free --
+ * Free a mutex.
+ *
+ * PUBLIC: void __db_mutex_free __P((DB_ENV *, REGINFO *, DB_MUTEX *));
+ */
+void
+__db_mutex_free(dbenv, infop, mutexp)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ DB_MUTEX *mutexp;
+{
+#if defined(MUTEX_NO_MALLOC_LOCKS) || defined(HAVE_MUTEX_SYSTEM_RESOURCES)
+ R_LOCK(dbenv, infop);
+#if defined(HAVE_MUTEX_SYSTEM_RESOURCES)
+ if (F_ISSET(mutexp, MUTEX_INITED))
+ __db_shlocks_clear(mutexp, infop, NULL);
+#endif
+ if (F_ISSET(mutexp, MUTEX_MPOOL)) {
+ DB_MPOOL *dbmp;
+
+ dbmp = dbenv->mp_handle;
+ R_LOCK(dbenv, dbmp->reginfo);
+ __db_shalloc_free(dbmp->reginfo[0].addr, mutexp);
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ } else
+ __db_shalloc_free(infop->addr, mutexp);
+ R_UNLOCK(dbenv, infop);
+#else
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(infop, NULL);
+ __os_free(dbenv, mutexp);
+#endif
+}
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+/*
+ * __db_shreg_locks_record --
+ * Record an entry in the shared locks area.
+ * Region lock must be held in caller.
+ */
+static int
+__db_shreg_locks_record(dbenv, mutexp, infop, rp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+ REGINFO *infop;
+ REGMAINT *rp;
+{
+ u_int i;
+
+ if (!F_ISSET(mutexp, MUTEX_INITED))
+ return (0);
+ DB_ASSERT(mutexp->reg_off == INVALID_ROFF);
+ rp->stat.st_records++;
+ i = (roff_t *)R_ADDR(infop, rp->regmutex_hint) - &rp->regmutexes[0];
+ if (rp->regmutexes[i] != INVALID_ROFF) {
+ /*
+ * Our hint failed, search for an open slot.
+ */
+ rp->stat.st_hint_miss++;
+ for (i = 0; i < rp->reglocks; i++)
+ if (rp->regmutexes[i] == INVALID_ROFF)
+ break;
+ if (i == rp->reglocks) {
+ rp->stat.st_max_locks++;
+ __db_err(dbenv,
+ "Region mutexes: Exceeded maximum lock slots %lu",
+ (u_long)rp->reglocks);
+ return (ENOMEM);
+ }
+ } else
+ rp->stat.st_hint_hit++;
+ /*
+ * When we get here, i is an empty slot. Record this
+ * mutex, set hint to point to the next slot and we are done.
+ */
+ rp->regmutexes[i] = R_OFFSET(infop, mutexp);
+ mutexp->reg_off = R_OFFSET(infop, &rp->regmutexes[i]);
+ rp->regmutex_hint = (i < rp->reglocks - 1) ?
+ R_OFFSET(infop, &rp->regmutexes[i+1]) :
+ R_OFFSET(infop, &rp->regmutexes[0]);
+ return (0);
+}
+
+/*
+ * __db_shreg_locks_clear --
+ * Erase an entry in the shared locks area.
+ *
+ * PUBLIC: void __db_shreg_locks_clear __P((DB_MUTEX *, REGINFO *, REGMAINT *));
+ */
+void
+__db_shreg_locks_clear(mutexp, infop, rp)
+ DB_MUTEX *mutexp;
+ REGINFO *infop;
+ REGMAINT *rp;
+{
+ /*
+ * !!!
+ * Assumes the caller's region lock is held.
+ */
+ if (!F_ISSET(mutexp, MUTEX_INITED))
+ return;
+ /*
+ * This function is generally only called on a forcible remove of an
+ * environment. We recorded our index in the mutex, find and clear it.
+ */
+ DB_ASSERT(mutexp->reg_off != INVALID_ROFF);
+ DB_ASSERT(*(roff_t *)R_ADDR(infop, mutexp->reg_off) == \
+ R_OFFSET(infop, mutexp));
+ *(roff_t *)R_ADDR(infop, mutexp->reg_off) = 0;
+ if (rp != NULL) {
+ rp->regmutex_hint = mutexp->reg_off;
+ rp->stat.st_clears++;
+ }
+ mutexp->reg_off = INVALID_ROFF;
+ __db_mutex_destroy(mutexp);
+}
+
+/*
+ * __db_shreg_locks_destroy --
+ * Destroy all mutexes in a region's range.
+ *
+ * PUBLIC: void __db_shreg_locks_destroy __P((REGINFO *, REGMAINT *));
+ */
+void
+__db_shreg_locks_destroy(infop, rp)
+ REGINFO *infop;
+ REGMAINT *rp;
+{
+ u_int32_t i;
+
+ /*
+ * Go through the list of all mutexes and destroy them.
+ */
+ for (i = 0; i < rp->reglocks; i++)
+ if (rp->regmutexes[i] != 0) {
+ rp->stat.st_destroys++;
+ __db_mutex_destroy((DB_MUTEX *)R_ADDR(infop,
+ rp->regmutexes[i]));
+ }
+}
+
+/*
+ * __db_shreg_mutex_init --
+ * Initialize a shared memory mutex.
+ *
+ * PUBLIC: int __db_shreg_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t,
+ * PUBLIC: u_int32_t, REGINFO *, REGMAINT *));
+ */
+int
+__db_shreg_mutex_init(dbenv, mutexp, offset, flags, infop, rp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+ u_int32_t offset;
+ u_int32_t flags;
+ REGINFO *infop;
+ REGMAINT *rp;
+{
+ int ret;
+
+ if ((ret = __db_mutex_init_int(dbenv, mutexp, offset, flags)) != 0)
+ return (ret);
+ /*
+ * Some mutexes cannot be recorded, but we want one interface.
+ * So, if we have no REGMAINT, then just return.
+ */
+ if (rp == NULL)
+ return (ret);
+ /*
+ * !!!
+ * Since __db_mutex_init_int is a macro, we may not be
+ * using the 'offset' as it is only used for one type
+ * of mutex. We COMPQUIET it here, after the call above.
+ */
+ COMPQUIET(offset, 0);
+ ret = __db_shreg_locks_record(dbenv, mutexp, infop, rp);
+
+ /*
+ * If we couldn't record it and we are returning an error,
+ * we need to destroy the mutex we just created.
+ */
+ if (ret)
+ __db_mutex_destroy(mutexp);
+
+ return (ret);
+}
+
+/*
+ * __db_shreg_maintinit --
+ * Initialize a region's maintenance information.
+ *
+ * PUBLIC: void __db_shreg_maintinit __P((REGINFO *, void *addr, size_t));
+ */
+void
+__db_shreg_maintinit(infop, addr, size)
+ REGINFO *infop;
+ void *addr;
+ size_t size;
+{
+ REGMAINT *rp;
+ u_int32_t i;
+
+ rp = (REGMAINT *)addr;
+ memset(addr, 0, sizeof(REGMAINT));
+ rp->reglocks = size / sizeof(roff_t);
+ rp->regmutex_hint = R_OFFSET(infop, &rp->regmutexes[0]);
+ for (i = 0; i < rp->reglocks; i++)
+ rp->regmutexes[i] = INVALID_ROFF;
+}
+
+static REGMAINT *
+__db_mutex_maint(dbenv, infop)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+{
+ roff_t moff;
+
+ switch (infop->type) {
+ case REGION_TYPE_LOCK:
+ moff = ((DB_LOCKREGION *)R_ADDR(infop,
+ infop->rp->primary))->maint_off;
+ break;
+ case REGION_TYPE_LOG:
+ moff = ((LOG *)R_ADDR(infop, infop->rp->primary))->maint_off;
+ break;
+ case REGION_TYPE_MPOOL:
+ moff = ((MPOOL *)R_ADDR(infop, infop->rp->primary))->maint_off;
+ break;
+ case REGION_TYPE_TXN:
+ moff = ((DB_TXNREGION *)R_ADDR(infop,
+ infop->rp->primary))->maint_off;
+ break;
+ default:
+ __db_err(dbenv,
+ "Attempting to record mutex in a region not set up to do so");
+ return (NULL);
+ }
+ return ((REGMAINT *)R_ADDR(infop, moff));
+}
+#endif /* HAVE_MUTEX_SYSTEM_RESOURCES */
diff --git a/libdb/mutex/tm.c b/libdb/mutex/tm.c
new file mode 100644
index 0000000..4af1b19
--- /dev/null
+++ b/libdb/mutex/tm.c
@@ -0,0 +1,627 @@
+/*
+ * Standalone mutex tester for Berkeley DB mutexes.
+ */
+#include "db_config.h"
+
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#if defined(HAVE_MUTEX_PTHREADS) || defined(BUILD_PTHREADS_ANYWAY)
+#include <pthread.h>
+#endif
+
+#include "db_int.h"
+
+void exec_proc();
+void tm_file_init();
+void map_file();
+void run_proc();
+void *run_thread();
+void *run_thread_wake();
+void tm_mutex_destroy();
+void tm_mutex_init();
+void tm_mutex_stats();
+void unmap_file();
+
+#define MUTEX_WAKEME 0x80 /* Wake-me flag. */
+
+DB_ENV dbenv; /* Fake out DB. */
+size_t len; /* Backing file size. */
+int align; /* Mutex alignment in file. */
+int quit; /* End-of-test flag. */
+char *file = "mutex.file"; /* Backing file. */
+
+int maxlocks = 20; /* -l: Backing locks. */
+int nlocks = 10000; /* -n: Locks per processes. */
+int nprocs = 20; /* -p: Processes. */
+int child; /* -s: Slave. */
+int nthreads = 1; /* -t: Threads. */
+int verbose; /* -v: Verbosity. */
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern int optind;
+ extern char *optarg;
+ pid_t pid;
+ int ch, eval, i, status;
+ char *tmpath;
+
+ tmpath = argv[0];
+ while ((ch = getopt(argc, argv, "l:n:p:st:v")) != EOF)
+ switch(ch) {
+ case 'l':
+ maxlocks = atoi(optarg);
+ break;
+ case 'n':
+ nlocks = atoi(optarg);
+ break;
+ case 'p':
+ nprocs = atoi(optarg);
+ break;
+ case 's':
+ child = 1;
+ break;
+ case 't':
+ nthreads = atoi(optarg);
+#if !defined(HAVE_MUTEX_PTHREADS) && !defined(BUILD_PTHREADS_ANYWAY)
+ if (nthreads != 1) {
+ (void)fprintf(stderr,
+ "tm: pthreads not available or not compiled for this platform.\n");
+ return (EXIT_FAILURE);
+ }
+#endif
+ break;
+ case 'v':
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ (void)fprintf(stderr,
+ "usage: tm [-v] [-l maxlocks] [-n locks] [-p procs] [-t threads]\n");
+ return (EXIT_FAILURE);
+ }
+ argc -= optind;
+ argv += optind;
+
+ /*
+ * The file layout:
+ * DB_MUTEX[1] per-thread mutex array lock
+ * DB_MUTEX[nthreads] per-thread mutex array
+ * DB_MUTEX[maxlocks] per-lock mutex array
+ * u_long[maxlocks][2] per-lock ID array
+ */
+ align = ALIGN(sizeof(DB_MUTEX) * 2, MUTEX_ALIGN);
+ len =
+ align * (1 + nthreads + maxlocks) + sizeof(u_long) * maxlocks * 2;
+ printf(
+ "mutex alignment %d, structure alignment %d, backing file %lu bytes\n",
+ MUTEX_ALIGN, align, (u_long)len);
+
+ if (child) {
+ run_proc();
+ return (EXIT_SUCCESS);
+ }
+
+ tm_file_init();
+ tm_mutex_init();
+
+ printf(
+ "%d proc, %d threads/proc, %d lock requests from %d locks:\n",
+ nprocs, nthreads, nlocks, maxlocks);
+ for (i = 0; i < nprocs; ++i)
+ switch (fork()) {
+ case -1:
+ perror("fork");
+ return (EXIT_FAILURE);
+ case 0:
+ exec_proc(tmpath);
+ break;
+ default:
+ break;
+ }
+
+ eval = EXIT_SUCCESS;
+ while ((pid = wait(&status)) != (pid_t)-1) {
+ fprintf(stderr,
+ "%lu: exited %d\n", (u_long)pid, WEXITSTATUS(status));
+ if (WEXITSTATUS(status) != 0)
+ eval = EXIT_FAILURE;
+ }
+
+ tm_mutex_stats();
+ tm_mutex_destroy();
+
+ printf("tm: exit status: %s\n",
+ eval == EXIT_SUCCESS ? "success" : "failed!");
+ return (eval);
+}
+
+void
+exec_proc(tmpath)
+ char *tmpath;
+{
+ char *argv[10], **ap, b_l[10], b_n[10], b_t[10];
+
+ ap = &argv[0];
+ *ap++ = "tm";
+ sprintf(b_l, "-l%d", maxlocks);
+ *ap++ = b_l;
+ sprintf(b_n, "-n%d", nlocks);
+ *ap++ = b_n;
+ *ap++ = "-s";
+ sprintf(b_t, "-t%d", nthreads);
+ *ap++ = b_t;
+ if (verbose)
+ *ap++ = "-v";
+
+ *ap = NULL;
+ execvp(tmpath, argv);
+
+ fprintf(stderr, "%s: %s\n", tmpath, strerror(errno));
+ exit(EXIT_FAILURE);
+}
+
+void
+run_proc()
+{
+#if defined(HAVE_MUTEX_PTHREADS) || defined(BUILD_PTHREADS_ANYWAY)
+ pthread_t *kidsp, wakep;
+ int i, status;
+ void *retp;
+#endif
+ __os_sleep(&dbenv, 3, 0); /* Let everyone catch up. */
+
+ srand((u_int)time(NULL) / getpid()); /* Initialize random numbers. */
+
+ if (nthreads == 1) /* Simple case. */
+ exit((int)run_thread((void *)0));
+
+#if defined(HAVE_MUTEX_PTHREADS) || defined(BUILD_PTHREADS_ANYWAY)
+ /*
+ * Spawn off threads. We have nthreads all locking and going to
+ * sleep, and one other thread cycling through and waking them up.
+ */
+ if ((kidsp =
+ (pthread_t *)calloc(sizeof(pthread_t), nthreads)) == NULL) {
+ fprintf(stderr, "tm: %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ for (i = 0; i < nthreads; i++)
+ if ((errno = pthread_create(
+ &kidsp[i], NULL, run_thread, (void *)i)) != 0) {
+ fprintf(stderr, "tm: failed spawning thread %d: %s\n",
+ i, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ if ((errno = pthread_create(
+ &wakep, NULL, run_thread_wake, (void *)0)) != 0) {
+ fprintf(stderr, "tm: failed spawning wakeup thread: %s\n",
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ /* Wait for the threads to exit. */
+ status = 0;
+ for (i = 0; i < nthreads; i++) {
+ pthread_join(kidsp[i], &retp);
+ if (retp != NULL) {
+ fprintf(stderr,
+ "tm: thread %d exited with error\n", i);
+ status = EXIT_FAILURE;
+ }
+ }
+ free(kidsp);
+
+ /* Signal wakeup thread to stop. */
+ quit = 1;
+ pthread_join(wakep, &retp);
+ if (retp != NULL) {
+ fprintf(stderr, "tm: wakeup thread exited with error\n");
+ status = EXIT_FAILURE;
+ }
+
+ exit(status);
+#endif
+}
+
+void *
+run_thread(arg)
+ void *arg;
+{
+ DB_MUTEX *gm_addr, *lm_addr, *tm_addr, *mp;
+ u_long gid1, gid2, *id_addr;
+ int fd, i, lock, id, nl, remap;
+
+ /* Set local and global per-thread ID. */
+ id = (int)arg;
+ gid1 = (u_long)getpid();
+#if defined(HAVE_MUTEX_PTHREADS) || defined(BUILD_PTHREADS_ANYWAY)
+ gid2 = (u_long)pthread_self();
+#else
+ gid2 = 0;
+#endif
+ printf("\tPID: %lu; TID: %lx; ID: %d\n", gid1, gid2, id);
+
+ nl = nlocks;
+ for (gm_addr = NULL, remap = 0;;) {
+ /* Map in the file as necessary. */
+ if (gm_addr == NULL) {
+ map_file(&gm_addr, &tm_addr, &lm_addr, &id_addr, &fd);
+ remap = (rand() % 100) + 35;
+ }
+
+ /* Select and acquire a data lock. */
+ lock = rand() % maxlocks;
+ mp = (DB_MUTEX *)((u_int8_t *)lm_addr + lock * align);
+ if (verbose)
+ printf("%lu/%lx: %03d\n", gid1, gid2, lock);
+
+ if (__db_mutex_lock(&dbenv, mp)) {
+ fprintf(stderr,
+ "%lu/%lx: never got lock\n", gid1, gid2);
+ return ((void *)EXIT_FAILURE);
+ }
+ if (id_addr[lock * 2] != 0) {
+ fprintf(stderr,
+ "RACE! (%lu/%lx granted lock %d held by %lu/%lx)\n",
+ gid1, gid2,
+ lock, id_addr[lock * 2], id_addr[lock * 2 + 1]);
+ return ((void *)EXIT_FAILURE);
+ }
+ id_addr[lock * 2] = gid1;
+ id_addr[lock * 2 + 1] = gid2;
+
+ /*
+ * Pretend to do some work, periodically checking to see if
+ * we still hold the mutex.
+ */
+ for (i = 0; i < 3; ++i) {
+ __os_sleep(&dbenv, 0, rand() % 3);
+ if (id_addr[lock * 2] != gid1 ||
+ id_addr[lock * 2 + 1] != gid2) {
+ fprintf(stderr,
+ "RACE! (%lu/%lx stole lock %d from %lu/%lx)\n",
+ id_addr[lock * 2],
+ id_addr[lock * 2 + 1], lock, gid1, gid2);
+ return ((void *)EXIT_FAILURE);
+ }
+ }
+
+#if defined(HAVE_MUTEX_PTHREADS) || defined(BUILD_PTHREADS_ANYWAY)
+ /*
+ * Test self-blocking and unlocking by other threads/processes:
+ *
+ * acquire the global lock
+ * set our wakeup flag
+ * release the global lock
+ * acquire our per-thread lock
+ *
+ * The wakeup thread will wake us up.
+ */
+ if (__db_mutex_lock(&dbenv, gm_addr)) {
+ fprintf(stderr, "%lu/%lx: global lock\n", gid1, gid2);
+ return ((void *)EXIT_FAILURE);
+ }
+ mp = (DB_MUTEX *)((u_int8_t *)tm_addr + id * align);
+ F_SET(mp, MUTEX_WAKEME);
+ if (__db_mutex_unlock(&dbenv, gm_addr)) {
+ fprintf(stderr,
+ "%lu/%lx: per-thread wakeup failed\n", gid1, gid2);
+ return ((void *)EXIT_FAILURE);
+ }
+ if (__db_mutex_lock(&dbenv, mp)) {
+ fprintf(stderr,
+ "%lu/%lx: per-thread lock\n", gid1, gid2);
+ return ((void *)EXIT_FAILURE);
+ }
+ /* Time passes... */
+ if (F_ISSET(mp, MUTEX_WAKEME)) {
+ fprintf(stderr, "%lu/%lx: %03d wakeup flag still set\n",
+ gid1, gid2, id);
+ return ((void *)EXIT_FAILURE);
+ }
+#endif
+
+ /* Release the data lock. */
+ id_addr[lock * 2] = id_addr[lock * 2 + 1] = 0;
+ mp = (DB_MUTEX *)((u_int8_t *)lm_addr + lock * align);
+ if (__db_mutex_unlock(&dbenv, mp)) {
+ fprintf(stderr, "%lu/%lx: wakeup failed\n", gid1, gid2);
+ return ((void *)EXIT_FAILURE);
+ }
+
+ if (--nl % 100 == 0)
+ fprintf(stderr, "%lu/%lx: %d\n", gid1, gid2, nl);
+
+ if (nl == 0 || --remap == 0) {
+ unmap_file((void *)gm_addr, fd);
+ gm_addr = NULL;
+
+ if (nl == 0)
+ break;
+
+ __os_sleep(&dbenv, rand() % 3, 0);
+ }
+ }
+
+ return (NULL);
+}
+
+#if defined(HAVE_MUTEX_PTHREADS) || defined(BUILD_PTHREADS_ANYWAY)
+/*
+ * run_thread_wake --
+ * Thread to wake up other threads that are sleeping.
+ */
+void *
+run_thread_wake(arg)
+ void *arg;
+{
+ DB_MUTEX *gm_addr, *tm_addr, *mp;
+ int fd, id;
+
+ arg = NULL;
+ map_file(&gm_addr, &tm_addr, NULL, NULL, &fd);
+
+ /* Loop, waking up sleepers and periodically sleeping ourselves. */
+ while (!quit) {
+ id = 0;
+
+ /* Acquire the global lock. */
+retry: if (__db_mutex_lock(&dbenv, gm_addr)) {
+ fprintf(stderr, "wt: global lock failed\n");
+ return ((void *)EXIT_FAILURE);
+ }
+
+next: mp = (DB_MUTEX *)((u_int8_t *)tm_addr + id * align);
+ if (F_ISSET(mp, MUTEX_WAKEME)) {
+ F_CLR(mp, MUTEX_WAKEME);
+ if (__db_mutex_unlock(&dbenv, mp)) {
+ fprintf(stderr, "wt: wakeup failed\n");
+ return ((void *)EXIT_FAILURE);
+ }
+ }
+
+ if (++id < nthreads && id % 3 != 0)
+ goto next;
+
+ if (__db_mutex_unlock(&dbenv, gm_addr)) {
+ fprintf(stderr, "wt: global unlock failed\n");
+ return ((void *)EXIT_FAILURE);
+ }
+
+ __os_sleep(&dbenv, 0, 500);
+
+ if (id < nthreads)
+ goto retry;
+ }
+ return (NULL);
+}
+#endif
+
+/*
+ * tm_file_init --
+ * Initialize the backing file.
+ */
+void
+tm_file_init()
+{
+ int fd;
+
+
+ /* Initialize the backing file. */
+ printf("Create the backing file...\n");
+#ifdef HAVE_QNX
+ (void)shm_unlink(file);
+ if ((fd = shm_open(file, O_CREAT | O_RDWR | O_TRUNC,
+#else
+ (void)remove(file);
+ if ((fd = open(file, O_CREAT | O_RDWR | O_TRUNC,
+#endif
+
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH)) == -1) {
+ (void)fprintf(stderr, "%s: open: %s\n", file, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ if (lseek(fd, (off_t)len, SEEK_SET) != len || write(fd, &fd, 1) != 1) {
+ (void)fprintf(stderr,
+ "%s: seek/write: %s\n", file, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ (void)close(fd);
+}
+
+/*
+ * tm_mutex_init --
+ * Initialize the mutexes.
+ */
+void
+tm_mutex_init()
+{
+ DB_MUTEX *gm_addr, *lm_addr, *mp, *tm_addr;
+ int fd, i;
+
+ map_file(&gm_addr, &tm_addr, &lm_addr, NULL, &fd);
+
+ printf("Initialize the global mutex...\n");
+ if (__db_mutex_init_int(&dbenv, gm_addr, 0, 0)) {
+ fprintf(stderr,
+ "__db_mutex_init (global): %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ printf("Initialize the per-thread mutexes...\n");
+ for (i = 1, mp = tm_addr;
+ i <= nthreads; ++i, mp = (DB_MUTEX *)((u_int8_t *)mp + align)) {
+ if (__db_mutex_init_int(&dbenv, mp, 0, MUTEX_SELF_BLOCK)) {
+ fprintf(stderr, "__db_mutex_init (per-thread %d): %s\n",
+ i, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ if (__db_mutex_lock(&dbenv, mp)) {
+ fprintf(stderr,
+ "__db_mutex_init (per-thread %d) lock: %s\n",
+ i, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ printf("Initialize the per-lock mutexes...\n");
+ for (i = 1, mp = lm_addr;
+ i <= maxlocks; ++i, mp = (DB_MUTEX *)((u_int8_t *)mp + align))
+ if (__db_mutex_init_int(&dbenv, mp, 0, 0)) {
+ fprintf(stderr, "__db_mutex_init (per-lock: %d): %s\n",
+ i, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ unmap_file((void *)gm_addr, fd);
+}
+
+/*
+ * tm_mutex_destroy --
+ * Destroy the mutexes.
+ */
+void
+tm_mutex_destroy()
+{
+ DB_MUTEX *gm_addr, *lm_addr, *mp, *tm_addr;
+ int fd, i;
+
+ map_file(&gm_addr, &tm_addr, &lm_addr, NULL, &fd);
+
+ printf("Destroy the global mutex...\n");
+ if (__db_mutex_destroy(gm_addr)) {
+ fprintf(stderr,
+ "__db_mutex_destroy (global): %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ printf("Destroy the per-thread mutexes...\n");
+ for (i = 1, mp = tm_addr;
+ i <= nthreads; ++i, mp = (DB_MUTEX *)((u_int8_t *)mp + align)) {
+ if (__db_mutex_destroy(mp)) {
+ fprintf(stderr,
+ "__db_mutex_destroy (per-thread %d): %s\n",
+ i, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ printf("Destroy the per-lock mutexes...\n");
+ for (i = 1, mp = lm_addr;
+ i <= maxlocks; ++i, mp = (DB_MUTEX *)((u_int8_t *)mp + align))
+ if (__db_mutex_destroy(mp)) {
+ fprintf(stderr,
+ "__db_mutex_destroy (per-lock: %d): %s\n",
+ i, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ unmap_file((void *)gm_addr, fd);
+#ifdef HAVE_QNX
+ (void)shm_unlink(file);
+#endif
+}
+
+/*
+ * tm_mutex_stats --
+ * Display mutex statistics.
+ */
+void
+tm_mutex_stats()
+{
+ DB_MUTEX *gm_addr, *lm_addr, *mp;
+ int fd, i;
+
+ map_file(&gm_addr, NULL, &lm_addr, NULL, &fd);
+
+ printf("Per-lock mutex statistics...\n");
+ for (i = 1, mp = lm_addr;
+ i <= maxlocks; ++i, mp = (DB_MUTEX *)((u_int8_t *)mp + align))
+ printf("mutex %2d: wait: %lu; no wait %lu\n", i,
+ (u_long)mp->mutex_set_wait, (u_long)mp->mutex_set_nowait);
+
+ unmap_file((void *)gm_addr, fd);
+}
+
+/*
+ * map_file --
+ * Map in the backing file.
+ */
+void
+map_file(gm_addrp, tm_addrp, lm_addrp, id_addrp, fdp)
+ DB_MUTEX **gm_addrp, **tm_addrp, **lm_addrp;
+ u_long **id_addrp;
+ int *fdp;
+{
+ void *maddr;
+ int fd;
+
+#ifndef MAP_FAILED
+#define MAP_FAILED (void *)-1
+#endif
+#ifndef MAP_FILE
+#define MAP_FILE 0
+#endif
+#ifdef HAVE_QNX
+ if ((fd = shm_open(file, O_RDWR, 0)) == -1) {
+#else
+ if ((fd = open(file, O_RDWR, 0)) == -1) {
+#endif
+ fprintf(stderr, "%s: open %s\n", file, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ maddr = mmap(NULL, len,
+ PROT_READ | PROT_WRITE, MAP_FILE | MAP_SHARED, fd, (off_t)0);
+ if (maddr == MAP_FAILED) {
+ fprintf(stderr, "%s: mmap: %s\n", file, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ if (gm_addrp != NULL)
+ *gm_addrp = (DB_MUTEX *)maddr;
+ maddr = (u_int8_t *)maddr + align;
+ if (tm_addrp != NULL)
+ *tm_addrp = (DB_MUTEX *)maddr;
+ maddr = (u_int8_t *)maddr + align * nthreads;
+ if (lm_addrp != NULL)
+ *lm_addrp = (DB_MUTEX *)maddr;
+ maddr = (u_int8_t *)maddr + align * maxlocks;
+ if (id_addrp != NULL)
+ *id_addrp = (u_long *)maddr;
+ if (fdp != NULL)
+ *fdp = fd;
+}
+
+/*
+ * unmap_file --
+ * Discard backing file map.
+ */
+void
+unmap_file(maddr, fd)
+ void *maddr;
+ int fd;
+{
+ if (munmap(maddr, len) != 0) {
+ fprintf(stderr, "munmap: %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ if (close(fd) != 0) {
+ fprintf(stderr, "close: %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+}
diff --git a/libdb/mutex/uts4_cc.s b/libdb/mutex/uts4_cc.s
new file mode 100644
index 0000000..bacd81a
--- /dev/null
+++ b/libdb/mutex/uts4_cc.s
@@ -0,0 +1,27 @@
+ / See the file LICENSE for redistribution information.
+ /
+ / Copyright (c) 1997-2002
+ / Sleepycat Software. All rights reserved.
+ /
+ / $Id$
+ /
+ / int uts_lock ( int *p, int i );
+ / Update the lock word pointed to by p with the
+ / value i, using compare-and-swap.
+ / Returns 0 if update was successful.
+ / Returns 1 if update failed.
+ /
+ entry uts_lock
+ uts_lock:
+ using .,r15
+ st r2,8(sp) / Save R2
+ l r2,64+0(sp) / R2 -> word to update
+ slr r0, r0 / R0 = current lock value must be 0
+ l r1,64+4(sp) / R1 = new lock value
+ cs r0,r1,0(r2) / Try the update ...
+ be x / ... Success. Return 0
+ la r0,1 / ... Failure. Return 1
+ x: /
+ l r2,8(sp) / Restore R2
+ b 2(,r14) / Return to caller
+ drop r15
diff --git a/libdb/os/os_abs.c b/libdb/os/os_abs.c
new file mode 100644
index 0000000..538e335
--- /dev/null
+++ b/libdb/os/os_abs.c
@@ -0,0 +1,31 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_abspath --
+ * Return if a path is an absolute path.
+ *
+ * PUBLIC: int __os_abspath __P((const char *));
+ */
+int
+__os_abspath(path)
+ const char *path;
+{
+ return (path[0] == '/');
+}
diff --git a/libdb/os/os_alloc.c b/libdb/os/os_alloc.c
new file mode 100644
index 0000000..388cf9f
--- /dev/null
+++ b/libdb/os/os_alloc.c
@@ -0,0 +1,458 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+#ifdef DIAGNOSTIC
+static void __os_guard __P((DB_ENV *));
+
+union __db_alloc {
+ size_t size;
+ double align;
+};
+#endif
+
+/*
+ * !!!
+ * Correct for systems that return NULL when you allocate 0 bytes of memory.
+ * There are several places in DB where we allocate the number of bytes held
+ * by the key/data item, and it can be 0. Correct here so that malloc never
+ * returns a NULL for that reason (which behavior is permitted by ANSI). We
+ * could make these calls macros on non-Alpha architectures (that's where we
+ * saw the problem), but it's probably not worth the autoconf complexity.
+ *
+ * !!!
+ * Correct for systems that don't set errno when malloc and friends fail.
+ *
+ * !!!
+ * There is no circumstance in which we can call __os_umalloc, __os_urealloc
+ * or __os_ufree without an environment handle, as we need one to determine
+ * whether or not to use an application-specified malloc function. If we
+ * don't have an environment handle, we should be calling __os_XXX instead.
+ * Make DIAGNOSTIC blow up if we get this wrong.
+ *
+ * Out of memory.
+ * We wish to hold the whole sky,
+ * But we never will.
+ */
+
+/*
+ * __os_umalloc --
+ * A malloc(3) function that will use, in order of preference,
+ * the allocation function specified to the DB handle, the DB_ENV
+ * handle, or __os_malloc.
+ *
+ * PUBLIC: int __os_umalloc __P((DB_ENV *, size_t, void *));
+ */
+int
+__os_umalloc(dbenv, size, storep)
+ DB_ENV *dbenv;
+ size_t size;
+ void *storep;
+{
+ int ret;
+
+ /* Require an environment handle. */
+ DB_ASSERT(dbenv != NULL);
+
+ /* Never allocate 0 bytes -- some C libraries don't like it. */
+ if (size == 0)
+ ++size;
+
+ if (dbenv == NULL || dbenv->db_malloc == NULL) {
+ if (DB_GLOBAL(j_malloc) != NULL)
+ *(void **)storep = DB_GLOBAL(j_malloc)(size);
+ else
+ *(void **)storep = malloc(size);
+ if (*(void **)storep == NULL) {
+ /*
+ * Correct error return, see __os_malloc.
+ */
+ if ((ret = __os_get_errno()) == 0) {
+ ret = ENOMEM;
+ __os_set_errno(ENOMEM);
+ }
+ __db_err(dbenv,
+ "malloc: %s: %lu", strerror(ret), (u_long)size);
+ return (ret);
+ }
+ return (0);
+ }
+
+ if ((*(void **)storep = dbenv->db_malloc(size)) == NULL) {
+ __db_err(dbenv, "User-specified malloc function returned NULL");
+ return (ENOMEM);
+ }
+
+ return (0);
+}
+
+/*
+ * __os_urealloc --
+ * realloc(3) counterpart to __os_umalloc.
+ *
+ * PUBLIC: int __os_urealloc __P((DB_ENV *, size_t, void *));
+ */
+int
+__os_urealloc(dbenv, size, storep)
+ DB_ENV *dbenv;
+ size_t size;
+ void *storep;
+{
+ int ret;
+ void *ptr;
+
+ ptr = *(void **)storep;
+
+ /* Require an environment handle. */
+ DB_ASSERT(dbenv != NULL);
+
+ /* Never allocate 0 bytes -- some C libraries don't like it. */
+ if (size == 0)
+ ++size;
+
+ if (dbenv == NULL || dbenv->db_realloc == NULL) {
+ if (ptr == NULL)
+ return (__os_umalloc(dbenv, size, storep));
+
+ if (DB_GLOBAL(j_realloc) != NULL)
+ *(void **)storep = DB_GLOBAL(j_realloc)(ptr, size);
+ else
+ *(void **)storep = realloc(ptr, size);
+ if (*(void **)storep == NULL) {
+ /*
+ * Correct errno, see __os_realloc.
+ */
+ if ((ret = __os_get_errno()) == 0) {
+ ret = ENOMEM;
+ __os_set_errno(ENOMEM);
+ }
+ __db_err(dbenv,
+ "realloc: %s: %lu", strerror(ret), (u_long)size);
+ return (ret);
+ }
+ return (0);
+ }
+
+ if ((*(void **)storep = dbenv->db_realloc(ptr, size)) == NULL) {
+ __db_err(dbenv,
+ "User-specified realloc function returned NULL");
+ return (ENOMEM);
+ }
+
+ return (0);
+}
+
+/*
+ * __os_ufree --
+ * free(3) counterpart to __os_umalloc.
+ *
+ * PUBLIC: int __os_ufree __P((DB_ENV *, void *));
+ */
+int
+__os_ufree(dbenv, ptr)
+ DB_ENV *dbenv;
+ void *ptr;
+{
+ /* Require an environment handle. */
+ DB_ASSERT(dbenv != NULL);
+
+ if (dbenv != NULL && dbenv->db_free != NULL)
+ dbenv->db_free(ptr);
+ else if (DB_GLOBAL(j_free) != NULL)
+ DB_GLOBAL(j_free)(ptr);
+ else
+ free(ptr);
+
+ return (0);
+}
+
+/*
+ * __os_strdup --
+ * The strdup(3) function for DB.
+ *
+ * PUBLIC: int __os_strdup __P((DB_ENV *, const char *, void *));
+ */
+int
+__os_strdup(dbenv, str, storep)
+ DB_ENV *dbenv;
+ const char *str;
+ void *storep;
+{
+ size_t size;
+ int ret;
+ void *p;
+
+ *(void **)storep = NULL;
+
+ size = strlen(str) + 1;
+ if ((ret = __os_malloc(dbenv, size, &p)) != 0)
+ return (ret);
+
+ memcpy(p, str, size);
+
+ *(void **)storep = p;
+ return (0);
+}
+
+/*
+ * __os_calloc --
+ * The calloc(3) function for DB.
+ *
+ * PUBLIC: int __os_calloc __P((DB_ENV *, size_t, size_t, void *));
+ */
+int
+__os_calloc(dbenv, num, size, storep)
+ DB_ENV *dbenv;
+ size_t num, size;
+ void *storep;
+{
+ void *p;
+ int ret;
+
+ size *= num;
+ if ((ret = __os_malloc(dbenv, size, &p)) != 0)
+ return (ret);
+
+ memset(p, 0, size);
+
+ *(void **)storep = p;
+ return (0);
+}
+
+/*
+ * __os_malloc --
+ * The malloc(3) function for DB.
+ *
+ * PUBLIC: int __os_malloc __P((DB_ENV *, size_t, void *));
+ */
+int
+__os_malloc(dbenv, size, storep)
+ DB_ENV *dbenv;
+ size_t size;
+ void *storep;
+{
+ int ret;
+ void *p;
+
+ *(void **)storep = NULL;
+
+ /* Never allocate 0 bytes -- some C libraries don't like it. */
+ if (size == 0)
+ ++size;
+
+#ifdef DIAGNOSTIC
+ /* Add room for size and a guard byte. */
+ size += sizeof(union __db_alloc) + 1;
+#endif
+
+ if (DB_GLOBAL(j_malloc) != NULL)
+ p = DB_GLOBAL(j_malloc)(size);
+ else
+ p = malloc(size);
+ if (p == NULL) {
+ /*
+ * Some C libraries don't correctly set errno when malloc(3)
+ * fails. We'd like to 0 out errno before calling malloc,
+ * but it turns out that setting errno is quite expensive on
+ * Windows/NT in an MT environment.
+ */
+ if ((ret = __os_get_errno()) == 0) {
+ ret = ENOMEM;
+ __os_set_errno(ENOMEM);
+ }
+ __db_err(dbenv,
+ "malloc: %s: %lu", strerror(ret), (u_long)size);
+ return (ret);
+ }
+
+#ifdef DIAGNOSTIC
+ /*
+ * Guard bytes: if #DIAGNOSTIC is defined, we allocate an additional
+ * byte after the memory and set it to a special value that we check
+ * for when the memory is free'd.
+ */
+ ((u_int8_t *)p)[size - 1] = CLEAR_BYTE;
+
+ ((union __db_alloc *)p)->size = size;
+ p = &((union __db_alloc *)p)[1];
+#endif
+ *(void **)storep = p;
+
+ return (0);
+}
+
+/*
+ * __os_realloc --
+ * The realloc(3) function for DB.
+ *
+ * PUBLIC: int __os_realloc __P((DB_ENV *, size_t, void *));
+ */
+int
+__os_realloc(dbenv, size, storep)
+ DB_ENV *dbenv;
+ size_t size;
+ void *storep;
+{
+ int ret;
+ void *p, *ptr;
+
+ ptr = *(void **)storep;
+
+ /* Never allocate 0 bytes -- some C libraries don't like it. */
+ if (size == 0)
+ ++size;
+
+ /* If we haven't yet allocated anything yet, simply call malloc. */
+ if (ptr == NULL)
+ return (__os_malloc(dbenv, size, storep));
+
+#ifdef DIAGNOSTIC
+ /* Add room for size and a guard byte. */
+ size += sizeof(union __db_alloc) + 1;
+
+ /* Back up to the real begining */
+ ptr = &((union __db_alloc *)ptr)[-1];
+#endif
+
+ /*
+ * Don't overwrite the original pointer, there are places in DB we
+ * try to continue after realloc fails.
+ */
+ if (DB_GLOBAL(j_realloc) != NULL)
+ p = DB_GLOBAL(j_realloc)(ptr, size);
+ else
+ p = realloc(ptr, size);
+ if (p == NULL) {
+ /*
+ * Some C libraries don't correctly set errno when malloc(3)
+ * fails. We'd like to 0 out errno before calling malloc,
+ * but it turns out that setting errno is quite expensive on
+ * Windows/NT in an MT environment.
+ */
+ if ((ret = __os_get_errno()) == 0) {
+ ret = ENOMEM;
+ __os_set_errno(ENOMEM);
+ }
+ __db_err(dbenv,
+ "realloc: %s: %lu", strerror(ret), (u_long)size);
+ return (ret);
+ }
+#ifdef DIAGNOSTIC
+ ((u_int8_t *)p)[size - 1] = CLEAR_BYTE; /* Initialize guard byte. */
+
+ ((union __db_alloc *)p)->size = size;
+ p = &((union __db_alloc *)p)[1];
+#endif
+
+ *(void **)storep = p;
+
+ return (0);
+}
+
+/*
+ * __os_free --
+ * The free(3) function for DB.
+ *
+ * PUBLIC: void __os_free __P((DB_ENV *, void *));
+ */
+void
+__os_free(dbenv, ptr)
+ DB_ENV *dbenv;
+ void *ptr;
+{
+#ifdef DIAGNOSTIC
+ int size;
+ /*
+ * Check that the guard byte (one past the end of the memory) is
+ * still CLEAR_BYTE.
+ */
+ if (ptr == NULL)
+ return;
+
+ ptr = &((union __db_alloc *)ptr)[-1];
+ size = ((union __db_alloc *)ptr)->size;
+ if (((u_int8_t *)ptr)[size - 1] != CLEAR_BYTE)
+ __os_guard(dbenv);
+
+ /* Clear memory. */
+ if (size != 0)
+ memset(ptr, CLEAR_BYTE, size);
+#endif
+ COMPQUIET(dbenv, NULL);
+
+ if (DB_GLOBAL(j_free) != NULL)
+ DB_GLOBAL(j_free)(ptr);
+ else
+ free(ptr);
+}
+
+#ifdef DIAGNOSTIC
+/*
+ * __os_guard --
+ * Complain and abort.
+ */
+static void
+__os_guard(dbenv)
+ DB_ENV *dbenv;
+{
+ __db_err(dbenv, "Guard byte incorrect during free");
+ abort();
+ /* NOTREACHED */
+}
+#endif
+
+/*
+ * __ua_memcpy --
+ * Copy memory to memory without relying on any kind of alignment.
+ *
+ * There are places in DB that we have unaligned data, for example,
+ * when we've stored a structure in a log record as a DBT, and now
+ * we want to look at it. Unfortunately, if you have code like:
+ *
+ * struct a {
+ * int x;
+ * } *p;
+ *
+ * void *func_argument;
+ * int local;
+ *
+ * p = (struct a *)func_argument;
+ * memcpy(&local, p->x, sizeof(local));
+ *
+ * compilers optimize to use inline instructions requiring alignment,
+ * and records in the log don't have any particular alignment. (This
+ * isn't a compiler bug, because it's a structure they're allowed to
+ * assume alignment.)
+ *
+ * Casting the memcpy arguments to (u_int8_t *) appears to work most
+ * of the time, but we've seen examples where it wasn't sufficient
+ * and there's nothing in ANSI C that requires that work.
+ *
+ * PUBLIC: void *__ua_memcpy __P((void *, const void *, size_t));
+ */
+void *
+__ua_memcpy(dst, src, len)
+ void *dst;
+ const void *src;
+ size_t len;
+{
+ return ((void *)memcpy(dst, src, len));
+}
diff --git a/libdb/os/os_clock.c b/libdb/os/os_clock.c
new file mode 100644
index 0000000..6dc5700
--- /dev/null
+++ b/libdb/os/os_clock.c
@@ -0,0 +1,92 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif /* HAVE_SYS_TIME_H */
+#endif /* TIME_WITH SYS_TIME */
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_clock --
+ * Return the current time-of-day clock in seconds and microseconds.
+ *
+ * PUBLIC: int __os_clock __P((DB_ENV *, u_int32_t *, u_int32_t *));
+ */
+int
+__os_clock(dbenv, secsp, usecsp)
+ DB_ENV *dbenv;
+ u_int32_t *secsp, *usecsp; /* Seconds and microseconds. */
+{
+#if defined(HAVE_GETTIMEOFDAY)
+ struct timeval tp;
+ int ret;
+
+retry: if (gettimeofday(&tp, NULL) != 0) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "gettimeofday: %s", strerror(ret));
+ return (ret);
+ }
+
+ if (secsp != NULL)
+ *secsp = tp.tv_sec;
+ if (usecsp != NULL)
+ *usecsp = tp.tv_usec;
+#endif
+#if !defined(HAVE_GETTIMEOFDAY) && defined(HAVE_CLOCK_GETTIME)
+ struct timespec tp;
+ int ret;
+
+retry: if (clock_gettime(CLOCK_REALTIME, &tp) != 0) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "clock_gettime: %s", strerror(ret));
+ return (ret);
+ }
+
+ if (secsp != NULL)
+ *secsp = tp.tv_sec;
+ if (usecsp != NULL)
+ *usecsp = tp.tv_nsec / 1000;
+#endif
+#if !defined(HAVE_GETTIMEOFDAY) && !defined(HAVE_CLOCK_GETTIME)
+ time_t now;
+ int ret;
+
+ if (time(&now) == (time_t)-1) {
+ ret = __os_get_errno();
+ __db_err(dbenv, "time: %s", strerror(ret));
+ return (ret);
+ }
+
+ if (secsp != NULL)
+ *secsp = now;
+ if (usecsp != NULL)
+ *usecsp = 0;
+#endif
+ return (0);
+}
diff --git a/libdb/os/os_config.c b/libdb/os/os_config.c
new file mode 100644
index 0000000..bf791e3
--- /dev/null
+++ b/libdb/os/os_config.c
@@ -0,0 +1,31 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_fs_notzero --
+ * Return 1 if allocated filesystem blocks are not zeroed.
+ *
+ * PUBLIC: int __os_fs_notzero __P((void));
+ */
+int
+__os_fs_notzero()
+{
+ /* Most filesystems zero out implicitly created pages. */
+ return (0);
+}
diff --git a/libdb/os/os_dir.c b/libdb/os/os_dir.c
new file mode 100644
index 0000000..aed7add
--- /dev/null
+++ b/libdb/os/os_dir.c
@@ -0,0 +1,108 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if HAVE_DIRENT_H
+# include <dirent.h>
+# define NAMLEN(dirent) strlen((dirent)->d_name)
+#else
+# define dirent direct
+# define NAMLEN(dirent) (dirent)->d_namlen
+# if HAVE_SYS_NDIR_H
+# include <sys/ndir.h>
+# endif
+# if HAVE_SYS_DIR_H
+# include <sys/dir.h>
+# endif
+# if HAVE_NDIR_H
+# include <ndir.h>
+# endif
+#endif
+
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_dirlist --
+ * Return a list of the files in a directory.
+ *
+ * PUBLIC: int __os_dirlist __P((DB_ENV *, const char *, char ***, int *));
+ */
+int
+__os_dirlist(dbenv, dir, namesp, cntp)
+ DB_ENV *dbenv;
+ const char *dir;
+ char ***namesp;
+ int *cntp;
+{
+ struct dirent *dp;
+ DIR *dirp;
+ int arraysz, cnt, ret;
+ char **names;
+
+ if (DB_GLOBAL(j_dirlist) != NULL)
+ return (DB_GLOBAL(j_dirlist)(dir, namesp, cntp));
+
+#ifdef HAVE_VXWORKS
+ if ((dirp = opendir((char *)dir)) == NULL)
+#else
+ if ((dirp = opendir(dir)) == NULL)
+#endif
+ return (__os_get_errno());
+ names = NULL;
+ for (arraysz = cnt = 0; (dp = readdir(dirp)) != NULL; ++cnt) {
+ if (cnt >= arraysz) {
+ arraysz += 100;
+ if ((ret = __os_realloc(dbenv,
+ arraysz * sizeof(names[0]), &names)) != 0)
+ goto nomem;
+ }
+ if ((ret = __os_strdup(dbenv, dp->d_name, &names[cnt])) != 0)
+ goto nomem;
+ }
+ (void)closedir(dirp);
+
+ *namesp = names;
+ *cntp = cnt;
+ return (0);
+
+nomem: if (names != NULL)
+ __os_dirfree(dbenv, names, cnt);
+ if (dirp != NULL)
+ (void)closedir(dirp);
+ return (ret);
+}
+
+/*
+ * __os_dirfree --
+ * Free the list of files.
+ *
+ * PUBLIC: void __os_dirfree __P((DB_ENV *, char **, int));
+ */
+void
+__os_dirfree(dbenv, names, cnt)
+ DB_ENV *dbenv;
+ char **names;
+ int cnt;
+{
+ if (DB_GLOBAL(j_dirfree) != NULL)
+ DB_GLOBAL(j_dirfree)(names, cnt);
+ else {
+ while (cnt > 0)
+ __os_free(dbenv, names[--cnt]);
+ __os_free(dbenv, names);
+ }
+}
diff --git a/libdb/os/os_errno.c b/libdb/os/os_errno.c
new file mode 100644
index 0000000..ad7c8dd
--- /dev/null
+++ b/libdb/os/os_errno.c
@@ -0,0 +1,64 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_get_errno_ret_zero --
+ * Return the value of errno, even if it's zero.
+ *
+ * PUBLIC: int __os_get_errno_ret_zero __P((void));
+ */
+int
+__os_get_errno_ret_zero()
+{
+ /* This routine must be able to return the same value repeatedly. */
+ return (errno);
+}
+
+/*
+ * __os_get_errno --
+ * Return the value of errno, or EAGAIN if errno is zero.
+ *
+ * PUBLIC: int __os_get_errno __P((void));
+ */
+int
+__os_get_errno()
+{
+ /*
+ * This routine must be able to return the same value repeatedly.
+ *
+ * We've seen cases where system calls failed but errno was never set.
+ * This version of __os_get_errno() sets errno to EAGAIN if it's not
+ * already set, to work around that problem. For obvious reasons, we
+ * can only call this function if we know an error has occurred, that
+ * is, we can't test errno for a non-zero value after this call.
+ */
+ if (errno == 0)
+ __os_set_errno(EAGAIN);
+
+ return (errno);
+}
+
+/*
+ * __os_set_errno --
+ * Set the value of errno.
+ *
+ * PUBLIC: void __os_set_errno __P((int));
+ */
+void
+__os_set_errno(evalue)
+ int evalue;
+{
+ errno = evalue;
+}
diff --git a/libdb/os/os_fid.c b/libdb/os/os_fid.c
new file mode 100644
index 0000000..bd2ffc7
--- /dev/null
+++ b/libdb/os/os_fid.c
@@ -0,0 +1,148 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+#define SERIAL_INIT 0
+static u_int32_t fid_serial = SERIAL_INIT;
+
+/*
+ * __os_fileid --
+ * Return a unique identifier for a file. The structure
+ * of a fileid is: ino(4) dev(4) time(4) pid(4) extra(4).
+ * For real files, which have a backing inode and device, the first
+ * 16 bytes are filled in and the extra bytes are left 0. For
+ * temporary files, the inode and device fields are left blank and
+ * the extra four bytes are filled in with a random value.
+ *
+ * PUBLIC: int __os_fileid __P((DB_ENV *, const char *, int, u_int8_t *));
+ */
+int
+__os_fileid(dbenv, fname, unique_okay, fidp)
+ DB_ENV *dbenv;
+ const char *fname;
+ int unique_okay;
+ u_int8_t *fidp;
+{
+ struct stat sb;
+ size_t i;
+ int ret;
+ u_int32_t tmp;
+ u_int8_t *p;
+
+ /* Clear the buffer. */
+ memset(fidp, 0, DB_FILE_ID_LEN);
+
+ /* On POSIX/UNIX, use a dev/inode pair. */
+retry:
+#ifdef HAVE_VXWORKS
+ if (stat((char *)fname, &sb) != 0) {
+#else
+ if (stat(fname, &sb) != 0) {
+#endif
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "%s: %s", fname, strerror(ret));
+ return (ret);
+ }
+
+ /*
+ * Initialize/increment the serial number we use to help avoid
+ * fileid collisions. Note that we don't bother with locking;
+ * it's unpleasant to do from down in here, and if we race on
+ * this no real harm will be done, since the finished fileid
+ * has so many other components.
+ *
+ * We increment by 100000 on each call as a simple way of
+ * randomizing; simply incrementing seems potentially less useful
+ * if pids are also simply incremented, since this is process-local
+ * and we may be one of a set of processes starting up. 100000
+ * pushes us out of pid space on most platforms, and has few
+ * interesting properties in base 2.
+ */
+ if (fid_serial == SERIAL_INIT)
+ __os_id(&fid_serial);
+ else
+ fid_serial += 100000;
+
+ /*
+ * !!!
+ * Nothing is ever big enough -- on Sparc V9, st_ino, st_dev and the
+ * time_t types are all 8 bytes. As DB_FILE_ID_LEN is only 20 bytes,
+ * we convert to a (potentially) smaller fixed-size type and use it.
+ *
+ * We don't worry about byte sexing or the actual variable sizes.
+ *
+ * When this routine is called from the DB access methods, it's only
+ * called once -- whatever ID is generated when a database is created
+ * is stored in the database file's metadata, and that is what is
+ * saved in the mpool region's information to uniquely identify the
+ * file.
+ *
+ * When called from the mpool layer this routine will be called each
+ * time a new thread of control wants to share the file, which makes
+ * things tougher. As far as byte sexing goes, since the mpool region
+ * lives on a single host, there's no issue of that -- the entire
+ * region is byte sex dependent. As far as variable sizes go, we make
+ * the simplifying assumption that 32-bit and 64-bit processes will
+ * get the same 32-bit values if we truncate any returned 64-bit value
+ * to a 32-bit value. When we're called from the mpool layer, though,
+ * we need to be careful not to include anything that isn't
+ * reproducible for a given file, such as the timestamp or serial
+ * number.
+ */
+ tmp = (u_int32_t)sb.st_ino;
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+
+ tmp = (u_int32_t)sb.st_dev;
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+
+ if (unique_okay) {
+ /*
+ * We want the number of seconds, not the high-order 0 bits,
+ * so convert the returned time_t to a (potentially) smaller
+ * fixed-size type.
+ */
+ tmp = (u_int32_t)time(NULL);
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+
+ for (p = (u_int8_t *)&fid_serial, i = sizeof(u_int32_t);
+ i > 0; --i)
+ *fidp++ = *p++;
+ }
+
+ return (0);
+}
diff --git a/libdb/os/os_fsync.c b/libdb/os/os_fsync.c
new file mode 100644
index 0000000..4bcb4f6
--- /dev/null
+++ b/libdb/os/os_fsync.c
@@ -0,0 +1,89 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h> /* XXX: Required by __hp3000s900 */
+#include <unistd.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+#ifdef HAVE_VXWORKS
+#include "ioLib.h"
+
+#define fsync(fd) __vx_fsync(fd);
+
+int
+__vx_fsync(fd)
+ int fd;
+{
+ int ret;
+
+ /*
+ * The results of ioctl are driver dependent. Some will return the
+ * number of bytes sync'ed. Only if it returns 'ERROR' should we
+ * flag it.
+ */
+ if ((ret = ioctl(fd, FIOSYNC, 0)) != ERROR)
+ return (0);
+ return (ret);
+}
+#endif
+
+#ifdef __hp3000s900
+#define fsync(fd) __mpe_fsync(fd);
+
+int
+__mpe_fsync(fd)
+ int fd;
+{
+ extern FCONTROL(short, short, void *);
+
+ FCONTROL(_MPE_FILENO(fd), 2, NULL); /* Flush the buffers */
+ FCONTROL(_MPE_FILENO(fd), 6, NULL); /* Write the EOF */
+ return (0);
+}
+#endif
+
+/*
+ * __os_fsync --
+ * Flush a file descriptor.
+ *
+ * PUBLIC: int __os_fsync __P((DB_ENV *, DB_FH *));
+ */
+int
+__os_fsync(dbenv, fhp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+{
+ int ret;
+
+ /*
+ * Do nothing if the file descriptor has been marked as not requiring
+ * any sync to disk.
+ */
+ if (F_ISSET(fhp, DB_FH_NOSYNC))
+ return (0);
+
+ do {
+ ret = DB_GLOBAL(j_fsync) != NULL ?
+ DB_GLOBAL(j_fsync)(fhp->fd) : fsync(fhp->fd);
+ } while (ret != 0 && (ret = __os_get_errno()) == EINTR);
+
+ if (ret != 0)
+ __db_err(dbenv, "fsync %s", strerror(ret));
+ return (ret);
+}
diff --git a/libdb/os/os_handle.c b/libdb/os/os_handle.c
new file mode 100644
index 0000000..5341f65
--- /dev/null
+++ b/libdb/os/os_handle.c
@@ -0,0 +1,185 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_openhandle --
+ * Open a file, using POSIX 1003.1 open flags.
+ *
+ * PUBLIC: int __os_openhandle __P((DB_ENV *, const char *, int, int, DB_FH *));
+ */
+int
+__os_openhandle(dbenv, name, flags, mode, fhp)
+ DB_ENV *dbenv;
+ const char *name;
+ int flags, mode;
+ DB_FH *fhp;
+{
+ int ret, nrepeat;
+#ifdef HAVE_VXWORKS
+ int newflags;
+#endif
+
+ memset(fhp, 0, sizeof(*fhp));
+
+ /* If the application specified an interface, use it. */
+ if (DB_GLOBAL(j_open) != NULL) {
+ if ((fhp->fd = DB_GLOBAL(j_open)(name, flags, mode)) == -1)
+ return (__os_get_errno());
+ F_SET(fhp, DB_FH_VALID);
+ return (0);
+ }
+
+ for (nrepeat = 1; nrepeat < 4; ++nrepeat) {
+ ret = 0;
+#ifdef HAVE_VXWORKS
+ /*
+ * VxWorks does not support O_CREAT on open, you have to use
+ * creat() instead. (It does not support O_EXCL or O_TRUNC
+ * either, even though they are defined "for future support".)
+ * We really want the POSIX behavior that if O_CREAT is set,
+ * we open if it exists, or create it if it doesn't exist.
+ * If O_CREAT is specified, single thread and try to open the
+ * file. If successful, and O_EXCL return EEXIST. If
+ * unsuccessful call creat and then end single threading.
+ */
+ if (LF_ISSET(O_CREAT)) {
+ DB_BEGIN_SINGLE_THREAD;
+ newflags = flags & ~(O_CREAT | O_EXCL);
+ if ((fhp->fd =
+ open(name, newflags, mode)) != -1) {
+ if (LF_ISSET(O_EXCL)) {
+ /*
+ * If we get here, we want O_EXCL
+ * create, and it exists. Close and
+ * return EEXISTS.
+ */
+ (void)close(fhp->fd);
+ DB_END_SINGLE_THREAD;
+ return (EEXIST);
+ }
+ /*
+ * XXX
+ * Assume any error means non-existence.
+ * Unfortunately return values (even for
+ * non-existence) are driver specific so
+ * there is no single error we can use to
+ * verify we truly got the equivalent of
+ * ENOENT.
+ */
+ } else
+ fhp->fd = creat(name, newflags);
+ DB_END_SINGLE_THREAD;
+ } else
+
+ /* FALLTHROUGH */
+#endif
+#ifdef __VMS
+ /*
+ * !!!
+ * Open with full sharing on VMS.
+ *
+ * We use these flags because they are the ones set by the VMS
+ * CRTL mmap() call when it opens a file, and we have to be
+ * able to open files that mmap() has previously opened, e.g.,
+ * when we're joining already existing DB regions.
+ */
+ fhp->fd = open(name, flags, mode, "shr=get,put,upd,del,upi");
+#else
+ fhp->fd = open(name, flags, mode);
+#endif
+
+ if (fhp->fd == -1) {
+ /*
+ * If it's a "temporary" error, we retry up to 3 times,
+ * waiting up to 12 seconds. While it's not a problem
+ * if we can't open a database, an inability to open a
+ * log file is cause for serious dismay.
+ */
+ ret = __os_get_errno();
+ if (ret == ENFILE || ret == EMFILE || ret == ENOSPC) {
+ (void)__os_sleep(dbenv, nrepeat * 2, 0);
+ continue;
+ }
+
+ /*
+ * If it was an EINTR it's reasonable to retry
+ * immediately, and arbitrarily often.
+ */
+ if (ret == EINTR) {
+ --nrepeat;
+ continue;
+ }
+ } else {
+#if defined(HAVE_FCNTL_F_SETFD)
+ /* Deny file descriptor access to any child process. */
+ if (fcntl(fhp->fd, F_SETFD, 1) == -1) {
+ ret = __os_get_errno();
+ __db_err(dbenv, "fcntl(F_SETFD): %s",
+ strerror(ret));
+ (void)__os_closehandle(dbenv, fhp);
+ } else
+#endif
+ F_SET(fhp, DB_FH_VALID);
+ }
+ break;
+ }
+
+ return (ret);
+}
+
+/*
+ * __os_closehandle --
+ * Close a file.
+ *
+ * PUBLIC: int __os_closehandle __P((DB_ENV *, DB_FH *));
+ */
+int
+__os_closehandle(dbenv, fhp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+{
+ int ret;
+
+ /* Don't close file descriptors that were never opened. */
+ DB_ASSERT(F_ISSET(fhp, DB_FH_VALID) && fhp->fd != -1);
+
+ do {
+ ret = DB_GLOBAL(j_close) != NULL ?
+ DB_GLOBAL(j_close)(fhp->fd) : close(fhp->fd);
+ } while (ret != 0 && (ret = __os_get_errno()) == EINTR);
+
+ /* Unlink the file if we haven't already done so. */
+ if (F_ISSET(fhp, DB_FH_UNLINK)) {
+ (void)__os_unlink(dbenv, fhp->name);
+ (void)__os_free(dbenv, fhp->name);
+ }
+
+ /*
+ * Smash the POSIX file descriptor -- it's never tested, but we want
+ * to catch any mistakes.
+ */
+ fhp->fd = -1;
+ F_CLR(fhp, DB_FH_VALID);
+
+ return (ret);
+}
diff --git a/libdb/os/os_id.c b/libdb/os/os_id.c
new file mode 100644
index 0000000..78221d9
--- /dev/null
+++ b/libdb/os/os_id.c
@@ -0,0 +1,47 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_id --
+ * Return a 32-bit value identifying the current thread of control.
+ *
+ * PUBLIC: void __os_id __P((u_int32_t *));
+ */
+void
+__os_id(idp)
+ u_int32_t *idp;
+{
+ /*
+ * By default, use the process ID.
+ *
+ * getpid() returns a pid_t which we convert to a u_int32_t. I have
+ * not yet seen a system where a pid_t has 64-bits, but I'm sure they
+ * exist. Since we're returning only the bottom 32-bits, you cannot
+ * use the return of __os_id to reference a process (for example, you
+ * cannot send a signal to the value returned by __os_id). To send a
+ * signal to the current process, use raise(3) instead.
+ */
+#ifdef HAVE_VXWORKS
+ *idp = taskIdSelf();
+#else
+ *idp = getpid();
+#endif
+}
diff --git a/libdb/os/os_map.c b/libdb/os/os_map.c
new file mode 100644
index 0000000..ed3da9d
--- /dev/null
+++ b/libdb/os/os_map.c
@@ -0,0 +1,443 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#ifdef HAVE_MMAP
+#include <sys/mman.h>
+#endif
+
+#ifdef HAVE_SHMGET
+#include <sys/ipc.h>
+#include <sys/shm.h>
+#endif
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+#ifdef HAVE_MMAP
+static int __os_map __P((DB_ENV *, char *, DB_FH *, size_t, int, int, void **));
+#endif
+#ifndef HAVE_SHMGET
+static int __db_nosystemmem __P((DB_ENV *));
+#endif
+
+/*
+ * __os_r_sysattach --
+ * Create/join a shared memory region.
+ *
+ * PUBLIC: int __os_r_sysattach __P((DB_ENV *, REGINFO *, REGION *));
+ */
+int
+__os_r_sysattach(dbenv, infop, rp)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ REGION *rp;
+{
+ if (F_ISSET(dbenv, DB_ENV_SYSTEM_MEM)) {
+ /*
+ * If the region is in system memory on UNIX, we use shmget(2).
+ *
+ * !!!
+ * There exist spinlocks that don't work in shmget memory, e.g.,
+ * the HP/UX msemaphore interface. If we don't have locks that
+ * will work in shmget memory, we better be private and not be
+ * threaded. If we reach this point, we know we're public, so
+ * it's an error.
+ */
+#if defined(MUTEX_NO_SHMGET_LOCKS)
+ __db_err(dbenv,
+ "architecture does not support locks inside system shared memory");
+ return (EINVAL);
+#endif
+#if defined(HAVE_SHMGET)
+ {
+ key_t segid;
+ int id, ret;
+
+ /*
+ * We could potentially create based on REGION_CREATE_OK, but
+ * that's dangerous -- we might get crammed in sideways if
+ * some of the expected regions exist but others do not. Also,
+ * if the requested size differs from an existing region's
+ * actual size, then all sorts of nasty things can happen.
+ * Basing create solely on REGION_CREATE is much safer -- a
+ * recovery will get us straightened out.
+ */
+ if (F_ISSET(infop, REGION_CREATE)) {
+ /*
+ * The application must give us a base System V IPC key
+ * value. Adjust that value based on the region's ID,
+ * and correct so the user's original value appears in
+ * the ipcs output.
+ */
+ if (dbenv->shm_key == INVALID_REGION_SEGID) {
+ __db_err(dbenv,
+ "no base system shared memory ID specified");
+ return (EINVAL);
+ }
+ segid = (key_t)(dbenv->shm_key + (infop->id - 1));
+
+ /*
+ * If map to an existing region, assume the application
+ * crashed and we're restarting. Delete the old region
+ * and re-try. If that fails, return an error, the
+ * application will have to select a different segment
+ * ID or clean up some other way.
+ */
+ if ((id = shmget(segid, 0, 0)) != -1) {
+ (void)shmctl(id, IPC_RMID, NULL);
+ if ((id = shmget(segid, 0, 0)) != -1) {
+ __db_err(dbenv,
+ "shmget: key: %ld: shared system memory region already exists",
+ (long)segid);
+ return (EAGAIN);
+ }
+ }
+ if ((id =
+ shmget(segid, rp->size, IPC_CREAT | 0600)) == -1) {
+ ret = __os_get_errno();
+ __db_err(dbenv,
+ "shmget: key: %ld: unable to create shared system memory region: %s",
+ (long)segid, strerror(ret));
+ return (ret);
+ }
+ rp->segid = id;
+ } else
+ id = rp->segid;
+
+ if ((infop->addr = shmat(id, NULL, 0)) == (void *)-1) {
+ infop->addr = NULL;
+ ret = __os_get_errno();
+ __db_err(dbenv,
+ "shmat: id %d: unable to attach to shared system memory region: %s",
+ id, strerror(ret));
+ return (ret);
+ }
+
+ return (0);
+ }
+#else
+ return (__db_nosystemmem(dbenv));
+#endif
+ }
+
+#ifdef HAVE_MMAP
+ {
+ DB_FH fh;
+ int ret;
+
+ /*
+ * Try to open/create the shared region file. We DO NOT need to ensure
+ * that multiple threads/processes attempting to simultaneously create
+ * the region are properly ordered, our caller has already taken care
+ * of that.
+ */
+ if ((ret = __os_open(dbenv, infop->name,
+ DB_OSO_REGION | DB_OSO_DIRECT |
+ (F_ISSET(infop, REGION_CREATE_OK) ? DB_OSO_CREATE : 0),
+ infop->mode, &fh)) != 0)
+ __db_err(dbenv, "%s: %s", infop->name, db_strerror(ret));
+
+ /*
+ * If we created the file, grow it to its full size before mapping
+ * it in. We really want to avoid touching the buffer cache after
+ * mmap(2) is called, doing anything else confuses the hell out of
+ * systems without merged VM/buffer cache systems, or, more to the
+ * point, *badly* merged VM/buffer cache systems.
+ */
+ if (ret == 0 && F_ISSET(infop, REGION_CREATE))
+ ret = __db_fileinit(dbenv,
+ &fh, rp->size, F_ISSET(dbenv, DB_ENV_REGION_INIT) ? 1 : 0);
+
+ /* Map the file in. */
+ if (ret == 0)
+ ret = __os_map(dbenv,
+ infop->name, &fh, rp->size, 1, 0, &infop->addr);
+
+ if (F_ISSET(&fh, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, &fh);
+
+ return (ret);
+ }
+#else
+ COMPQUIET(infop, NULL);
+ COMPQUIET(rp, NULL);
+ __db_err(dbenv,
+ "architecture lacks mmap(2), shared environments not possible");
+ return (__db_eopnotsup(dbenv));
+#endif
+}
+
+/*
+ * __os_r_sysdetach --
+ * Detach from a shared memory region.
+ *
+ * PUBLIC: int __os_r_sysdetach __P((DB_ENV *, REGINFO *, int));
+ */
+int
+__os_r_sysdetach(dbenv, infop, destroy)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ int destroy;
+{
+ REGION *rp;
+
+ rp = infop->rp;
+
+ if (F_ISSET(dbenv, DB_ENV_SYSTEM_MEM)) {
+#ifdef HAVE_SHMGET
+ int ret, segid;
+
+ /*
+ * We may be about to remove the memory referenced by rp,
+ * save the segment ID, and (optionally) wipe the original.
+ */
+ segid = rp->segid;
+ if (destroy)
+ rp->segid = INVALID_REGION_SEGID;
+
+ if (shmdt(infop->addr) != 0) {
+ ret = __os_get_errno();
+ __db_err(dbenv, "shmdt: %s", strerror(ret));
+ return (ret);
+ }
+
+ if (destroy && shmctl(segid, IPC_RMID,
+ NULL) != 0 && (ret = __os_get_errno()) != EINVAL) {
+ __db_err(dbenv,
+ "shmctl: id %ld: unable to delete system shared memory region: %s",
+ segid, strerror(ret));
+ return (ret);
+ }
+
+ return (0);
+#else
+ return (__db_nosystemmem(dbenv));
+#endif
+ }
+
+#ifdef HAVE_MMAP
+#ifdef HAVE_MUNLOCK
+ if (F_ISSET(dbenv, DB_ENV_LOCKDOWN))
+ (void)munlock(infop->addr, rp->size);
+#endif
+ if (munmap(infop->addr, rp->size) != 0) {
+ int ret;
+
+ ret = __os_get_errno();
+ __db_err(dbenv, "munmap: %s", strerror(ret));
+ return (ret);
+ }
+
+ if (destroy && __os_region_unlink(dbenv, infop->name) != 0)
+ return (__os_get_errno());
+
+ return (0);
+#else
+ COMPQUIET(destroy, 0);
+ return (EINVAL);
+#endif
+}
+
+/*
+ * __os_mapfile --
+ * Map in a shared memory file.
+ *
+ * PUBLIC: int __os_mapfile __P((DB_ENV *,
+ * PUBLIC: char *, DB_FH *, size_t, int, void **));
+ */
+int
+__os_mapfile(dbenv, path, fhp, len, is_rdonly, addrp)
+ DB_ENV *dbenv;
+ char *path;
+ DB_FH *fhp;
+ int is_rdonly;
+ size_t len;
+ void **addrp;
+{
+#if defined(HAVE_MMAP) && !defined(HAVE_QNX)
+ return (__os_map(dbenv, path, fhp, len, 0, is_rdonly, addrp));
+#else
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(path, NULL);
+ COMPQUIET(fhp, NULL);
+ COMPQUIET(is_rdonly, 0);
+ COMPQUIET(len, 0);
+ COMPQUIET(addrp, NULL);
+ return (EINVAL);
+#endif
+}
+
+/*
+ * __os_unmapfile --
+ * Unmap the shared memory file.
+ *
+ * PUBLIC: int __os_unmapfile __P((DB_ENV *, void *, size_t));
+ */
+int
+__os_unmapfile(dbenv, addr, len)
+ DB_ENV *dbenv;
+ void *addr;
+ size_t len;
+{
+ /* If the user replaced the map call, call through their interface. */
+ if (DB_GLOBAL(j_unmap) != NULL)
+ return (DB_GLOBAL(j_unmap)(addr, len));
+
+#ifdef HAVE_MMAP
+#ifdef HAVE_MUNLOCK
+ if (F_ISSET(dbenv, DB_ENV_LOCKDOWN))
+ while (munlock(addr, len) != 0 && __os_get_errno() == EINTR)
+ ;
+#else
+ COMPQUIET(dbenv, NULL);
+#endif
+ {
+ int ret;
+
+ while ((ret = munmap(addr, len)) != 0 &&
+ __os_get_errno() == EINTR)
+ ;
+ return (ret ? __os_get_errno() : 0);
+ }
+#else
+ COMPQUIET(dbenv, NULL);
+
+ return (EINVAL);
+#endif
+}
+
+#ifdef HAVE_MMAP
+/*
+ * __os_map --
+ * Call the mmap(2) function.
+ */
+static int
+__os_map(dbenv, path, fhp, len, is_region, is_rdonly, addrp)
+ DB_ENV *dbenv;
+ char *path;
+ DB_FH *fhp;
+ int is_region, is_rdonly;
+ size_t len;
+ void **addrp;
+{
+ void *p;
+ int flags, prot, ret;
+
+ /* If the user replaced the map call, call through their interface. */
+ if (DB_GLOBAL(j_map) != NULL)
+ return (DB_GLOBAL(j_map)
+ (path, len, is_region, is_rdonly, addrp));
+
+ /*
+ * If it's read-only, it's private, and if it's not, it's shared.
+ * Don't bother with an additional parameter.
+ */
+ flags = is_rdonly ? MAP_PRIVATE : MAP_SHARED;
+
+#ifdef MAP_FILE
+ /*
+ * Historically, MAP_FILE was required for mapping regular files,
+ * even though it was the default. Some systems have it, some
+ * don't, some that have it set it to 0.
+ */
+ flags |= MAP_FILE;
+#endif
+
+ /*
+ * I know of no systems that implement the flag to tell the system
+ * that the region contains semaphores, but it's not an unreasonable
+ * thing to do, and has been part of the design since forever. I
+ * don't think anyone will object, but don't set it for read-only
+ * files, it doesn't make sense.
+ */
+#ifdef MAP_HASSEMAPHORE
+ if (is_region && !is_rdonly)
+ flags |= MAP_HASSEMAPHORE;
+#else
+ COMPQUIET(is_region, 0);
+#endif
+
+ prot = PROT_READ | (is_rdonly ? 0 : PROT_WRITE);
+
+ /*
+ * XXX
+ * Work around a bug in the VMS V7.1 mmap() implementation. To map
+ * a file into memory on VMS it needs to be opened in a certain way,
+ * originally. To get the file opened in that certain way, the VMS
+ * mmap() closes the file and re-opens it. When it does this, it
+ * doesn't flush any caches out to disk before closing. The problem
+ * this causes us is that when the memory cache doesn't get written
+ * out, the file isn't big enough to match the memory chunk and the
+ * mmap() call fails. This call to fsync() fixes the problem. DEC
+ * thinks this isn't a bug because of language in XPG5 discussing user
+ * responsibility for on-disk and in-memory synchronization.
+ */
+#ifdef VMS
+ if (__os_fsync(dbenv, fhp) == -1)
+ return (__os_get_errno());
+#endif
+
+ /* MAP_FAILED was not defined in early mmap implementations. */
+#ifndef MAP_FAILED
+#define MAP_FAILED -1
+#endif
+ if ((p = mmap(NULL,
+ len, prot, flags, fhp->fd, (off_t)0)) == (void *)MAP_FAILED) {
+ ret = __os_get_errno();
+ __db_err(dbenv, "mmap: %s", strerror(ret));
+ return (ret);
+ }
+
+#ifdef HAVE_MLOCK
+ /*
+ * If it's a region, we want to make sure that the memory isn't paged.
+ * For example, Solaris will page large mpools because it thinks that
+ * I/O buffer memory is more important than we are. The mlock system
+ * call may or may not succeed (mlock is restricted to the super-user
+ * on some systems). Currently, the only other use of mmap in DB is
+ * to map read-only databases -- we don't want them paged, either, so
+ * the call isn't conditional.
+ */
+ if (F_ISSET(dbenv, DB_ENV_LOCKDOWN) && mlock(p, len) != 0) {
+ ret = __os_get_errno();
+ (void)munmap(p, len);
+ __db_err(dbenv, "mlock: %s", strerror(ret));
+ return (ret);
+ }
+#else
+ COMPQUIET(dbenv, NULL);
+#endif
+
+ *addrp = p;
+ return (0);
+}
+#endif
+
+#ifndef HAVE_SHMGET
+/*
+ * __db_nosystemmem --
+ * No system memory environments error message.
+ */
+static int
+__db_nosystemmem(dbenv)
+ DB_ENV *dbenv;
+{
+ __db_err(dbenv,
+ "architecture doesn't support environments in system memory");
+ return (__db_eopnotsup(dbenv));
+}
+#endif
diff --git a/libdb/os/os_method.c b/libdb/os/os_method.c
new file mode 100644
index 0000000..2db73f6
--- /dev/null
+++ b/libdb/os/os_method.c
@@ -0,0 +1,234 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * EXTERN: int db_env_set_func_close __P((int (*)(int)));
+ */
+int
+db_env_set_func_close(func_close)
+ int (*func_close) __P((int));
+{
+ DB_GLOBAL(j_close) = func_close;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_dirfree __P((void (*)(char **, int)));
+ */
+int
+db_env_set_func_dirfree(func_dirfree)
+ void (*func_dirfree) __P((char **, int));
+{
+ DB_GLOBAL(j_dirfree) = func_dirfree;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_dirlist
+ * EXTERN: __P((int (*)(const char *, char ***, int *)));
+ */
+int
+db_env_set_func_dirlist(func_dirlist)
+ int (*func_dirlist) __P((const char *, char ***, int *));
+{
+ DB_GLOBAL(j_dirlist) = func_dirlist;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_exists __P((int (*)(const char *, int *)));
+ */
+int
+db_env_set_func_exists(func_exists)
+ int (*func_exists) __P((const char *, int *));
+{
+ DB_GLOBAL(j_exists) = func_exists;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_free __P((void (*)(void *)));
+ */
+int
+db_env_set_func_free(func_free)
+ void (*func_free) __P((void *));
+{
+ DB_GLOBAL(j_free) = func_free;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_fsync __P((int (*)(int)));
+ */
+int
+db_env_set_func_fsync(func_fsync)
+ int (*func_fsync) __P((int));
+{
+ DB_GLOBAL(j_fsync) = func_fsync;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_ioinfo __P((int (*)(const char *,
+ * EXTERN: int, u_int32_t *, u_int32_t *, u_int32_t *)));
+ */
+int
+db_env_set_func_ioinfo(func_ioinfo)
+ int (*func_ioinfo)
+ __P((const char *, int, u_int32_t *, u_int32_t *, u_int32_t *));
+{
+ DB_GLOBAL(j_ioinfo) = func_ioinfo;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_malloc __P((void *(*)(size_t)));
+ */
+int
+db_env_set_func_malloc(func_malloc)
+ void *(*func_malloc) __P((size_t));
+{
+ DB_GLOBAL(j_malloc) = func_malloc;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_map
+ * EXTERN: __P((int (*)(char *, size_t, int, int, void **)));
+ */
+int
+db_env_set_func_map(func_map)
+ int (*func_map) __P((char *, size_t, int, int, void **));
+{
+ DB_GLOBAL(j_map) = func_map;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_open __P((int (*)(const char *, int, ...)));
+ */
+int
+db_env_set_func_open(func_open)
+ int (*func_open) __P((const char *, int, ...));
+{
+ DB_GLOBAL(j_open) = func_open;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_read __P((ssize_t (*)(int, void *, size_t)));
+ */
+int
+db_env_set_func_read(func_read)
+ ssize_t (*func_read) __P((int, void *, size_t));
+{
+ DB_GLOBAL(j_read) = func_read;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_realloc __P((void *(*)(void *, size_t)));
+ */
+int
+db_env_set_func_realloc(func_realloc)
+ void *(*func_realloc) __P((void *, size_t));
+{
+ DB_GLOBAL(j_realloc) = func_realloc;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_rename
+ * EXTERN: __P((int (*)(const char *, const char *)));
+ */
+int
+db_env_set_func_rename(func_rename)
+ int (*func_rename) __P((const char *, const char *));
+{
+ DB_GLOBAL(j_rename) = func_rename;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_seek
+ * EXTERN: __P((int (*)(int, size_t, db_pgno_t, u_int32_t, int, int)));
+ */
+int
+db_env_set_func_seek(func_seek)
+ int (*func_seek) __P((int, size_t, db_pgno_t, u_int32_t, int, int));
+{
+ DB_GLOBAL(j_seek) = func_seek;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_sleep __P((int (*)(u_long, u_long)));
+ */
+int
+db_env_set_func_sleep(func_sleep)
+ int (*func_sleep) __P((u_long, u_long));
+{
+ DB_GLOBAL(j_sleep) = func_sleep;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_unlink __P((int (*)(const char *)));
+ */
+int
+db_env_set_func_unlink(func_unlink)
+ int (*func_unlink) __P((const char *));
+{
+ DB_GLOBAL(j_unlink) = func_unlink;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_unmap __P((int (*)(void *, size_t)));
+ */
+int
+db_env_set_func_unmap(func_unmap)
+ int (*func_unmap) __P((void *, size_t));
+{
+ DB_GLOBAL(j_unmap) = func_unmap;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_write
+ * EXTERN: __P((ssize_t (*)(int, const void *, size_t)));
+ */
+int
+db_env_set_func_write(func_write)
+ ssize_t (*func_write) __P((int, const void *, size_t));
+{
+ DB_GLOBAL(j_write) = func_write;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_yield __P((int (*)(void)));
+ */
+int
+db_env_set_func_yield(func_yield)
+ int (*func_yield) __P((void));
+{
+ DB_GLOBAL(j_yield) = func_yield;
+ return (0);
+}
diff --git a/libdb/os/os_oflags.c b/libdb/os/os_oflags.c
new file mode 100644
index 0000000..b5c68c1
--- /dev/null
+++ b/libdb/os/os_oflags.c
@@ -0,0 +1,118 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <fcntl.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __db_oflags --
+ * Convert open(2) flags to DB flags.
+ *
+ * PUBLIC: u_int32_t __db_oflags __P((int));
+ */
+u_int32_t
+__db_oflags(oflags)
+ int oflags;
+{
+ u_int32_t dbflags;
+
+ dbflags = 0;
+
+ if (oflags & O_CREAT)
+ dbflags |= DB_CREATE;
+
+ if (oflags & O_TRUNC)
+ dbflags |= DB_TRUNCATE;
+
+ /*
+ * !!!
+ * Convert POSIX 1003.1 open(2) mode flags to DB flags. This isn't
+ * an exact science as few POSIX implementations have a flag value
+ * for O_RDONLY, it's simply the lack of a write flag.
+ */
+#ifndef O_ACCMODE
+#define O_ACCMODE (O_RDONLY | O_RDWR | O_WRONLY)
+#endif
+ switch (oflags & O_ACCMODE) {
+ case O_RDWR:
+ case O_WRONLY:
+ break;
+ default:
+ dbflags |= DB_RDONLY;
+ break;
+ }
+ return (dbflags);
+}
+
+/*
+ * __db_omode --
+ * Convert a permission string to the correct open(2) flags.
+ *
+ * PUBLIC: int __db_omode __P((const char *));
+ */
+int
+__db_omode(perm)
+ const char *perm;
+{
+ int mode;
+
+#ifdef DB_WIN32
+#ifndef S_IRUSR
+#define S_IRUSR S_IREAD /* R for owner */
+#endif
+#ifndef S_IWUSR
+#define S_IWUSR S_IWRITE /* W for owner */
+#endif
+#ifndef S_IRGRP
+#define S_IRGRP 0 /* R for group */
+#endif
+#ifndef S_IWGRP
+#define S_IWGRP 0 /* W for group */
+#endif
+#ifndef S_IROTH
+#define S_IROTH 0 /* R for other */
+#endif
+#ifndef S_IWOTH
+#define S_IWOTH 0 /* W for other */
+#endif
+#else
+#ifndef S_IRUSR
+#define S_IRUSR 0000400 /* R for owner */
+#define S_IWUSR 0000200 /* W for owner */
+#define S_IRGRP 0000040 /* R for group */
+#define S_IWGRP 0000020 /* W for group */
+#define S_IROTH 0000004 /* R for other */
+#define S_IWOTH 0000002 /* W for other */
+#endif
+#endif /* DB_WIN32 */
+ mode = 0;
+ if (perm[0] == 'r')
+ mode |= S_IRUSR;
+ if (perm[1] == 'w')
+ mode |= S_IWUSR;
+ if (perm[2] == 'r')
+ mode |= S_IRGRP;
+ if (perm[3] == 'w')
+ mode |= S_IWGRP;
+ if (perm[4] == 'r')
+ mode |= S_IROTH;
+ if (perm[5] == 'w')
+ mode |= S_IWOTH;
+ return (mode);
+}
diff --git a/libdb/os/os_open.c b/libdb/os/os_open.c
new file mode 100644
index 0000000..08749eb
--- /dev/null
+++ b/libdb/os/os_open.c
@@ -0,0 +1,257 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+#ifdef HAVE_QNX
+static int __os_region_open __P((DB_ENV *, const char *, int, int, DB_FH *));
+#endif
+
+/*
+ * __os_open --
+ * Open a file.
+ *
+ * PUBLIC: int __os_open __P((DB_ENV *, const char *, u_int32_t, int, DB_FH *));
+ */
+int
+__os_open(dbenv, name, flags, mode, fhp)
+ DB_ENV *dbenv;
+ const char *name;
+ u_int32_t flags;
+ int mode;
+ DB_FH *fhp;
+{
+ int oflags, ret;
+
+ oflags = 0;
+
+#ifdef DIAGNOSTIC
+#define OKFLAGS \
+ (DB_OSO_CREATE | DB_OSO_DIRECT | DB_OSO_EXCL | DB_OSO_LOG | \
+ DB_OSO_RDONLY | DB_OSO_REGION | DB_OSO_SEQ | DB_OSO_TEMP | \
+ DB_OSO_TRUNC)
+ if ((ret = __db_fchk(dbenv, "__os_open", flags, OKFLAGS)) != 0)
+ return (ret);
+#endif
+
+#if defined(O_BINARY)
+ /*
+ * If there's a binary-mode open flag, set it, we never want any
+ * kind of translation. Some systems do translations by default,
+ * e.g., with Cygwin, the default mode for an open() is set by the
+ * mode of the mount that underlies the file.
+ */
+ oflags |= O_BINARY;
+#endif
+
+ /*
+ * DB requires the POSIX 1003.1 semantic that two files opened at the
+ * same time with DB_OSO_CREATE/O_CREAT and DB_OSO_EXCL/O_EXCL flags
+ * set return an EEXIST failure in at least one.
+ */
+ if (LF_ISSET(DB_OSO_CREATE))
+ oflags |= O_CREAT;
+
+ if (LF_ISSET(DB_OSO_EXCL))
+ oflags |= O_EXCL;
+
+#if defined(O_DSYNC) && defined(XXX_NEVER_SET)
+ /*
+ * !!!
+ * We should get better performance if we push the log files to disk
+ * immediately instead of waiting for the sync. However, Solaris
+ * (and likely any other system based on the 4BSD filesystem releases),
+ * doesn't implement O_DSYNC correctly, only flushing data blocks and
+ * not inode or indirect blocks.
+ */
+ if (LF_ISSET(DB_OSO_LOG))
+ oflags |= O_DSYNC;
+#endif
+
+ if (LF_ISSET(DB_OSO_RDONLY))
+ oflags |= O_RDONLY;
+ else
+ oflags |= O_RDWR;
+
+ if (LF_ISSET(DB_OSO_TRUNC))
+ oflags |= O_TRUNC;
+
+#ifdef HAVE_O_DIRECT
+ if (LF_ISSET(DB_OSO_DIRECT))
+ oflags |= O_DIRECT;
+#endif
+
+#ifdef HAVE_QNX
+ if (LF_ISSET(DB_OSO_REGION))
+ return (__os_region_open(dbenv, name, oflags, mode, fhp));
+#endif
+ /* Open the file. */
+ if ((ret = __os_openhandle(dbenv, name, oflags, mode, fhp)) != 0)
+ return (ret);
+
+#ifdef HAVE_DIRECTIO
+ if (LF_ISSET(DB_OSO_DIRECT))
+ (void)directio(fhp->fd, DIRECTIO_ON);
+#endif
+
+ /*
+ * Delete any temporary file.
+ *
+ * !!!
+ * There's a race here, where we've created a file and we crash before
+ * we can unlink it. Temporary files aren't common in DB, regardless,
+ * it's not a security problem because the file is empty. There's no
+ * reasonable way to avoid the race (playing signal games isn't worth
+ * the portability nightmare), so we just live with it.
+ */
+ if (LF_ISSET(DB_OSO_TEMP)) {
+#if defined(HAVE_UNLINK_WITH_OPEN_FAILURE) || defined(CONFIG_TEST)
+ if ((ret = __os_strdup(dbenv, name, &fhp->name)) != 0) {
+ (void)__os_closehandle(dbenv, fhp);
+ (void)__os_unlink(dbenv, name);
+ return (ret);
+ }
+ F_SET(fhp, DB_FH_UNLINK);
+#else
+ (void)__os_unlink(dbenv, name);
+#endif
+ }
+
+ return (0);
+}
+
+#ifdef HAVE_QNX
+/*
+ * __os_region_open --
+ * Open a shared memory region file using POSIX shm_open.
+ */
+static int
+__os_region_open(dbenv, name, oflags, mode, fhp)
+ DB_ENV *dbenv;
+ const char *name;
+ int oflags;
+ int mode;
+ DB_FH *fhp;
+{
+ int ret;
+ char *newname;
+
+ if ((ret = __os_shmname(dbenv, name, &newname)) != 0)
+ goto err;
+ memset(fhp, 0, sizeof(*fhp));
+ fhp->fd = shm_open(newname, oflags, mode);
+ if (fhp->fd == -1)
+ ret = __os_get_errno();
+ else {
+#ifdef HAVE_FCNTL_F_SETFD
+ /* Deny file descriptor acces to any child process. */
+ if (fcntl(fhp->fd, F_SETFD, 1) == -1) {
+ ret = __os_get_errno();
+ __db_err(dbenv, "fcntl(F_SETFD): %s", strerror(ret));
+ __os_closehandle(dbenv, fhp);
+ } else
+#endif
+ F_SET(fhp, DB_FH_VALID);
+ }
+ /*
+ * Once we have created the object, we don't need the name
+ * anymore. Other callers of this will convert themselves.
+ */
+err:
+ if (newname != NULL)
+ __os_free(dbenv, newname);
+ return (ret);
+}
+
+/*
+ * __os_shmname --
+ * Translate a pathname into a shm_open memory object name.
+ *
+ * PUBLIC: #ifdef HAVE_QNX
+ * PUBLIC: int __os_shmname __P((DB_ENV *, const char *, char **));
+ * PUBLIC: #endif
+ */
+int
+__os_shmname(dbenv, name, newnamep)
+ DB_ENV *dbenv;
+ const char *name;
+ char **newnamep;
+{
+ int ret;
+ size_t size;
+ char *p, *q, *tmpname;
+
+ *newnamep = NULL;
+
+ /*
+ * POSIX states that the name for a shared memory object
+ * may begin with a slash '/' and support for subsequent
+ * slashes is implementation-dependent. The one implementation
+ * we know of right now, QNX, forbids subsequent slashes.
+ * We don't want to be parsing pathnames for '.' and '..' in
+ * the middle. In order to allow easy conversion, just take
+ * the last component as the shared memory name. This limits
+ * the namespace a bit, but makes our job a lot easier.
+ *
+ * We should not be modifying user memory, so we use our own.
+ * Caller is responsible for freeing the memory we give them.
+ */
+ if ((ret = __os_strdup(dbenv, name, &tmpname)) != 0)
+ return (ret);
+ /*
+ * Skip over filename component.
+ * We set that separator to '\0' so that we can do another
+ * __db_rpath. However, we immediately set it then to ':'
+ * so that we end up with the tailing directory:filename.
+ * We require a home directory component. Return an error
+ * if there isn't one.
+ */
+ p = __db_rpath(tmpname);
+ if (p == NULL)
+ return (EINVAL);
+ if (p != tmpname) {
+ *p = '\0';
+ q = p;
+ p = __db_rpath(tmpname);
+ *q = ':';
+ }
+ if (p != NULL) {
+ /*
+ * If we have a path component, copy and return it.
+ */
+ ret = __os_strdup(dbenv, p, newnamep);
+ __os_free(dbenv, tmpname);
+ return (ret);
+ }
+
+ /*
+ * We were given just a directory name with no path components.
+ * Add a leading slash, and copy the remainder.
+ */
+ size = strlen(tmpname) + 2;
+ if ((ret = __os_malloc(dbenv, size, &p)) != 0)
+ return (ret);
+ p[0] = '/';
+ memcpy(&p[1], tmpname, size-1);
+ __os_free(dbenv, tmpname);
+ *newnamep = p;
+ return (0);
+}
+#endif
diff --git a/libdb/os/os_region.c b/libdb/os/os_region.c
new file mode 100644
index 0000000..d810e8a
--- /dev/null
+++ b/libdb/os/os_region.c
@@ -0,0 +1,115 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_r_attach --
+ * Attach to a shared memory region.
+ *
+ * PUBLIC: int __os_r_attach __P((DB_ENV *, REGINFO *, REGION *));
+ */
+int
+__os_r_attach(dbenv, infop, rp)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ REGION *rp;
+{
+ int ret;
+ /* Round off the requested size for the underlying VM. */
+ OS_VMROUNDOFF(rp->size);
+
+#ifdef DB_REGIONSIZE_MAX
+ /* Some architectures have hard limits on the maximum region size. */
+ if (rp->size > DB_REGIONSIZE_MAX) {
+ __db_err(dbenv, "region size %lu is too large; maximum is %lu",
+ (u_long)rp->size, (u_long)DB_REGIONSIZE_MAX);
+ return (EINVAL);
+ }
+#endif
+
+ /*
+ * If a region is private, malloc the memory.
+ *
+ * !!!
+ * If this fails because the region is too large to malloc, mmap(2)
+ * using the MAP_ANON or MAP_ANONYMOUS flags would be an alternative.
+ * I don't know of any architectures (yet!) where malloc is a problem.
+ */
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+#if defined(MUTEX_NO_MALLOC_LOCKS)
+ /*
+ * !!!
+ * There exist spinlocks that don't work in malloc memory, e.g.,
+ * the HP/UX msemaphore interface. If we don't have locks that
+ * will work in malloc memory, we better not be private or not
+ * be threaded.
+ */
+ if (F_ISSET(dbenv, DB_ENV_THREAD)) {
+ __db_err(dbenv, "%s",
+ "architecture does not support locks inside process-local (malloc) memory");
+ __db_err(dbenv, "%s",
+ "application may not specify both DB_PRIVATE and DB_THREAD");
+ return (EINVAL);
+ }
+#endif
+ if ((ret =
+ __os_malloc(dbenv, rp->size, &infop->addr)) != 0)
+ return (ret);
+#if defined(UMRW) && !defined(DIAGNOSTIC)
+ memset(infop->addr, CLEAR_BYTE, rp->size);
+#endif
+ return (0);
+ }
+
+ /* If the user replaced the map call, call through their interface. */
+ if (DB_GLOBAL(j_map) != NULL)
+ return (DB_GLOBAL(j_map)(infop->name,
+ rp->size, 1, 0, &infop->addr));
+
+ return (__os_r_sysattach(dbenv, infop, rp));
+}
+
+/*
+ * __os_r_detach --
+ * Detach from a shared memory region.
+ *
+ * PUBLIC: int __os_r_detach __P((DB_ENV *, REGINFO *, int));
+ */
+int
+__os_r_detach(dbenv, infop, destroy)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ int destroy;
+{
+ REGION *rp;
+
+ rp = infop->rp;
+
+ /* If a region is private, free the memory. */
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+ __os_free(dbenv, infop->addr);
+ return (0);
+ }
+
+ /* If the user replaced the map call, call through their interface. */
+ if (DB_GLOBAL(j_unmap) != NULL)
+ return (DB_GLOBAL(j_unmap)(infop->addr, rp->size));
+
+ return (__os_r_sysdetach(dbenv, infop, destroy));
+}
diff --git a/libdb/os/os_rename.c b/libdb/os/os_rename.c
new file mode 100644
index 0000000..94ed6fc
--- /dev/null
+++ b/libdb/os/os_rename.c
@@ -0,0 +1,47 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_rename --
+ * Rename a file. If flags is non-zero, then errors are OK and we
+ * should not output an error message.
+ *
+ * PUBLIC: int __os_rename __P((DB_ENV *,
+ * PUBLIC: const char *, const char *, u_int32_t));
+ */
+int
+__os_rename(dbenv, old, new, flags)
+ DB_ENV *dbenv;
+ const char *old, *new;
+ u_int32_t flags;
+{
+ int ret;
+
+ do {
+ ret = DB_GLOBAL(j_rename) != NULL ?
+ DB_GLOBAL(j_rename)(old, new) : rename(old, new);
+ } while (ret != 0 && (ret = __os_get_errno()) == EINTR);
+
+ if (ret != 0 && flags == 0)
+ __db_err(dbenv, "rename %s %s: %s", old, new, strerror(ret));
+ return (ret);
+}
diff --git a/libdb/os/os_root.c b/libdb/os/os_root.c
new file mode 100644
index 0000000..5dc63c9
--- /dev/null
+++ b/libdb/os/os_root.c
@@ -0,0 +1,36 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_isroot --
+ * Return if user has special permissions.
+ *
+ * PUBLIC: int __os_isroot __P((void));
+ */
+int
+__os_isroot()
+{
+#ifdef HAVE_GETUID
+ return (getuid() == 0);
+#else
+ return (0);
+#endif
+}
diff --git a/libdb/os/os_rpath.c b/libdb/os/os_rpath.c
new file mode 100644
index 0000000..7b34913
--- /dev/null
+++ b/libdb/os/os_rpath.c
@@ -0,0 +1,69 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#ifdef HAVE_VXWORKS
+#include "iosLib.h"
+#endif
+
+/*
+ * __db_rpath --
+ * Return the last path separator in the path or NULL if none found.
+ *
+ * PUBLIC: char *__db_rpath __P((const char *));
+ */
+char *
+__db_rpath(path)
+ const char *path;
+{
+ const char *s, *last;
+#ifdef HAVE_VXWORKS
+ DEV_HDR *dummy;
+ char *ptail;
+
+ /*
+ * VxWorks devices can be rooted at any name. We want to
+ * skip over the device name and not take into account any
+ * PATH_SEPARATOR characters that might be in that name.
+ *
+ * XXX [#2393]
+ * VxWorks supports having a filename directly follow a device
+ * name with no separator. I.e. to access a file 'xxx' in
+ * the top level directory of a device mounted at "mydrive"
+ * you could say "mydrivexxx" or "mydrive/xxx" or "mydrive\xxx".
+ * We do not support the first usage here.
+ * XXX
+ */
+ if ((dummy = iosDevFind((char *)path, &ptail)) == NULL)
+ s = path;
+ else
+ s = ptail;
+#else
+ s = path;
+#endif
+
+ last = NULL;
+ if (PATH_SEPARATOR[1] != '\0') {
+ for (; s[0] != '\0'; ++s)
+ if (strchr(PATH_SEPARATOR, s[0]) != NULL)
+ last = s;
+ } else
+ for (; s[0] != '\0'; ++s)
+ if (s[0] == PATH_SEPARATOR[0])
+ last = s;
+ return ((char *)last);
+}
diff --git a/libdb/os/os_rw.c b/libdb/os/os_rw.c
new file mode 100644
index 0000000..c21abb7
--- /dev/null
+++ b/libdb/os/os_rw.c
@@ -0,0 +1,288 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+#ifdef HAVE_FILESYSTEM_NOTZERO
+static int __os_zerofill __P((DB_ENV *, DB_FH *));
+#endif
+static int __os_physwrite __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+
+/*
+ * __os_io --
+ * Do an I/O.
+ *
+ * PUBLIC: int __os_io __P((DB_ENV *, DB_IO *, int, size_t *));
+ */
+int
+__os_io(dbenv, db_iop, op, niop)
+ DB_ENV *dbenv;
+ DB_IO *db_iop;
+ int op;
+ size_t *niop;
+{
+ int ret;
+
+ /* Check for illegal usage. */
+ DB_ASSERT(F_ISSET(db_iop->fhp, DB_FH_VALID) && db_iop->fhp->fd != -1);
+
+#if defined(HAVE_PREAD) && defined(HAVE_PWRITE)
+ switch (op) {
+ case DB_IO_READ:
+ if (DB_GLOBAL(j_read) != NULL)
+ goto slow;
+ *niop = pread(db_iop->fhp->fd, db_iop->buf,
+ db_iop->bytes, (off_t)db_iop->pgno * db_iop->pagesize);
+ break;
+ case DB_IO_WRITE:
+ if (DB_GLOBAL(j_write) != NULL)
+ goto slow;
+#ifdef HAVE_FILESYSTEM_NOTZERO
+ if (__os_fs_notzero())
+ goto slow;
+#endif
+ *niop = pwrite(db_iop->fhp->fd, db_iop->buf,
+ db_iop->bytes, (off_t)db_iop->pgno * db_iop->pagesize);
+ break;
+ }
+ if (*niop == (size_t)db_iop->bytes)
+ return (0);
+slow:
+#endif
+ MUTEX_THREAD_LOCK(dbenv, db_iop->mutexp);
+
+ if ((ret = __os_seek(dbenv, db_iop->fhp,
+ db_iop->pagesize, db_iop->pgno, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+ switch (op) {
+ case DB_IO_READ:
+ ret = __os_read(dbenv,
+ db_iop->fhp, db_iop->buf, db_iop->bytes, niop);
+ break;
+ case DB_IO_WRITE:
+ ret = __os_write(dbenv,
+ db_iop->fhp, db_iop->buf, db_iop->bytes, niop);
+ break;
+ }
+
+err: MUTEX_THREAD_UNLOCK(dbenv, db_iop->mutexp);
+
+ return (ret);
+
+}
+
+/*
+ * __os_read --
+ * Read from a file handle.
+ *
+ * PUBLIC: int __os_read __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+ */
+int
+__os_read(dbenv, fhp, addr, len, nrp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ void *addr;
+ size_t len;
+ size_t *nrp;
+{
+ size_t offset;
+ ssize_t nr;
+ int ret;
+ u_int8_t *taddr;
+
+ /* Check for illegal usage. */
+ DB_ASSERT(F_ISSET(fhp, DB_FH_VALID) && fhp->fd != -1);
+
+ for (taddr = addr,
+ offset = 0; offset < len; taddr += nr, offset += nr) {
+retry: if ((nr = DB_GLOBAL(j_read) != NULL ?
+ DB_GLOBAL(j_read)(fhp->fd, taddr, len - offset) :
+ read(fhp->fd, taddr, len - offset)) < 0) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "read: 0x%x, %lu: %s", taddr,
+ (u_long)len-offset, strerror(ret));
+ return (ret);
+ }
+ if (nr == 0)
+ break;
+ }
+ *nrp = taddr - (u_int8_t *)addr;
+ return (0);
+}
+
+/*
+ * __os_write --
+ * Write to a file handle.
+ *
+ * PUBLIC: int __os_write __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+ */
+int
+__os_write(dbenv, fhp, addr, len, nwp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ void *addr;
+ size_t len;
+ size_t *nwp;
+{
+ /* Check for illegal usage. */
+ DB_ASSERT(F_ISSET(fhp, DB_FH_VALID) && fhp->fd != -1);
+
+#ifdef HAVE_FILESYSTEM_NOTZERO
+ /* Zero-fill as necessary. */
+ if (__os_fs_notzero()) {
+ int ret;
+ if ((ret = __os_zerofill(dbenv, fhp)) != 0)
+ return (ret);
+ }
+#endif
+ return (__os_physwrite(dbenv, fhp, addr, len, nwp));
+}
+
+/*
+ * __os_physwrite --
+ * Physical write to a file handle.
+ */
+static int
+__os_physwrite(dbenv, fhp, addr, len, nwp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ void *addr;
+ size_t len;
+ size_t *nwp;
+{
+ size_t offset;
+ ssize_t nw;
+ int ret;
+ u_int8_t *taddr;
+
+#if defined(HAVE_FILESYSTEM_NOTZERO) && defined(DIAGNOSTIC)
+ if (__os_fs_notzero()) {
+ struct stat sb;
+ off_t cur_off;
+
+ DB_ASSERT(fstat(fhp->fd, &sb) != -1 &&
+ (cur_off = lseek(fhp->fd, (off_t)0, SEEK_CUR)) != -1 &&
+ cur_off <= sb.st_size);
+ }
+#endif
+
+ for (taddr = addr,
+ offset = 0; offset < len; taddr += nw, offset += nw)
+retry: if ((nw = DB_GLOBAL(j_write) != NULL ?
+ DB_GLOBAL(j_write)(fhp->fd, taddr, len - offset) :
+ write(fhp->fd, taddr, len - offset)) < 0) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "write: 0x%x, %lu: %s", taddr,
+ (u_long)len-offset, strerror(ret));
+ return (ret);
+ }
+ *nwp = len;
+ return (0);
+}
+
+#ifdef HAVE_FILESYSTEM_NOTZERO
+/*
+ * __os_zerofill --
+ * Zero out bytes in the file.
+ *
+ * Pages allocated by writing pages past end-of-file are not zeroed,
+ * on some systems. Recovery could theoretically be fooled by a page
+ * showing up that contained garbage. In order to avoid this, we
+ * have to write the pages out to disk, and flush them. The reason
+ * for the flush is because if we don't sync, the allocation of another
+ * page subsequent to this one might reach the disk first, and if we
+ * crashed at the right moment, leave us with this page as the one
+ * allocated by writing a page past it in the file.
+ */
+static int
+__os_zerofill(dbenv, fhp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+{
+ off_t stat_offset, write_offset;
+ size_t blen, nw;
+ u_int32_t bytes, mbytes;
+ int group_sync, need_free, ret;
+ u_int8_t buf[8 * 1024], *bp;
+
+ /* Calculate the byte offset of the next write. */
+ write_offset = (off_t)fhp->pgno * fhp->pgsize + fhp->offset;
+
+ /* Stat the file. */
+ if ((ret = __os_ioinfo(dbenv, NULL, fhp, &mbytes, &bytes, NULL)) != 0)
+ return (ret);
+ stat_offset = (off_t)mbytes * MEGABYTE + bytes;
+
+ /* Check if the file is large enough. */
+ if (stat_offset >= write_offset)
+ return (0);
+
+ /* Get a large buffer if we're writing lots of data. */
+#undef ZF_LARGE_WRITE
+#define ZF_LARGE_WRITE (64 * 1024)
+ if (write_offset - stat_offset > ZF_LARGE_WRITE) {
+ if ((ret = __os_calloc(dbenv, 1, ZF_LARGE_WRITE, &bp)) != 0)
+ return (ret);
+ blen = ZF_LARGE_WRITE;
+ need_free = 1;
+ } else {
+ bp = buf;
+ blen = sizeof(buf);
+ need_free = 0;
+ memset(buf, 0, sizeof(buf));
+ }
+
+ /* Seek to the current end of the file. */
+ if ((ret = __os_seek(
+ dbenv, fhp, MEGABYTE, mbytes, bytes, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+
+ /*
+ * Hash is the only access method that allocates groups of pages. Hash
+ * uses the existence of the last page in a group to signify the entire
+ * group is OK; so, write all the pages but the last one in the group,
+ * flush them to disk, then write the last one to disk and flush it.
+ */
+ for (group_sync = 0; stat_offset < write_offset; group_sync = 1) {
+ if (write_offset - stat_offset <= blen) {
+ blen = (size_t)(write_offset - stat_offset);
+ if (group_sync && (ret = __os_fsync(dbenv, fhp)) != 0)
+ goto err;
+ }
+ if ((ret = __os_physwrite(dbenv, fhp, bp, blen, &nw)) != 0)
+ goto err;
+ stat_offset += blen;
+ }
+ if ((ret = __os_fsync(dbenv, fhp)) != 0)
+ goto err;
+
+ /* Seek back to where we started. */
+ mbytes = (u_int32_t)(write_offset / MEGABYTE);
+ bytes = (u_int32_t)(write_offset % MEGABYTE);
+ ret = __os_seek(dbenv, fhp, MEGABYTE, mbytes, bytes, 0, DB_OS_SEEK_SET);
+
+err: if (need_free)
+ __os_free(dbenv, bp);
+ return (ret);
+}
+#endif
diff --git a/libdb/os/os_seek.c b/libdb/os/os_seek.c
new file mode 100644
index 0000000..339836d
--- /dev/null
+++ b/libdb/os/os_seek.c
@@ -0,0 +1,81 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_seek --
+ * Seek to a page/byte offset in the file.
+ *
+ * PUBLIC: int __os_seek __P((DB_ENV *,
+ * PUBLIC: DB_FH *, size_t, db_pgno_t, u_int32_t, int, DB_OS_SEEK));
+ */
+int
+__os_seek(dbenv, fhp, pgsize, pageno, relative, isrewind, db_whence)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ size_t pgsize;
+ db_pgno_t pageno;
+ u_int32_t relative;
+ int isrewind;
+ DB_OS_SEEK db_whence;
+{
+ off_t offset;
+ int ret, whence;
+
+ switch (db_whence) {
+ case DB_OS_SEEK_CUR:
+ whence = SEEK_CUR;
+ break;
+ case DB_OS_SEEK_END:
+ whence = SEEK_END;
+ break;
+ case DB_OS_SEEK_SET:
+ whence = SEEK_SET;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ if (DB_GLOBAL(j_seek) != NULL)
+ ret = DB_GLOBAL(j_seek)(fhp->fd,
+ pgsize, pageno, relative, isrewind, whence);
+ else {
+ offset = (off_t)pgsize * pageno + relative;
+ if (isrewind)
+ offset = -offset;
+ do {
+ ret = lseek(fhp->fd, offset, whence) == -1 ?
+ __os_get_errno() : 0;
+ } while (ret == EINTR);
+ }
+
+ if (ret == 0) {
+ fhp->pgsize = pgsize;
+ fhp->pgno = pageno;
+ fhp->offset = relative;
+ } else
+ __db_err(dbenv, "seek: %lu %d %d: %s",
+ (u_long)pgsize * pageno + relative,
+ isrewind, db_whence, strerror(ret));
+
+ return (ret);
+}
diff --git a/libdb/os/os_sleep.c b/libdb/os/os_sleep.c
new file mode 100644
index 0000000..fb1b68d
--- /dev/null
+++ b/libdb/os/os_sleep.c
@@ -0,0 +1,80 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_SYS_SELECT_H
+#include <sys/select.h>
+#endif
+
+#ifdef HAVE_VXWORKS
+#include <sys/times.h>
+#include <time.h>
+#include <selectLib.h>
+#else
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif /* HAVE_SYS_TIME_H */
+#endif /* TIME_WITH SYS_TIME */
+#endif /* HAVE_VXWORKS */
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_sleep --
+ * Yield the processor for a period of time.
+ *
+ * PUBLIC: int __os_sleep __P((DB_ENV *, u_long, u_long));
+ */
+int
+__os_sleep(dbenv, secs, usecs)
+ DB_ENV *dbenv;
+ u_long secs, usecs; /* Seconds and microseconds. */
+{
+ struct timeval t;
+ int ret;
+
+ /* Don't require that the values be normalized. */
+ for (; usecs >= 1000000; usecs -= 1000000)
+ ++secs;
+
+ if (DB_GLOBAL(j_sleep) != NULL)
+ return (DB_GLOBAL(j_sleep)(secs, usecs));
+
+ /*
+ * It's important that we yield the processor here so that other
+ * processes or threads are permitted to run.
+ */
+ t.tv_sec = secs;
+ t.tv_usec = usecs;
+ do {
+ ret = select(0, NULL, NULL, NULL, &t) == -1 ?
+ __os_get_errno() : 0;
+ } while (ret == EINTR);
+
+ if (ret != 0)
+ __db_err(dbenv, "select: %s", strerror(ret));
+
+ return (ret);
+}
diff --git a/libdb/os/os_spin.c b/libdb/os/os_spin.c
new file mode 100644
index 0000000..872f523
--- /dev/null
+++ b/libdb/os/os_spin.c
@@ -0,0 +1,113 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#if defined(HAVE_PSTAT_GETDYNAMIC)
+#include <sys/pstat.h>
+#endif
+
+#include <limits.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+#if defined(HAVE_PSTAT_GETDYNAMIC)
+static int __os_pstat_getdynamic __P((void));
+
+/*
+ * __os_pstat_getdynamic --
+ * HP/UX.
+ */
+static int
+__os_pstat_getdynamic()
+{
+ struct pst_dynamic psd;
+
+ return (pstat_getdynamic(&psd,
+ sizeof(psd), (size_t)1, 0) == -1 ? 1 : psd.psd_proc_cnt);
+}
+#endif
+
+#if defined(HAVE_SYSCONF) && defined(_SC_NPROCESSORS_ONLN)
+static int __os_sysconf __P((void));
+
+/*
+ * __os_sysconf --
+ * Solaris, Linux.
+ */
+static int
+__os_sysconf()
+{
+ long nproc;
+
+ return ((nproc = sysconf(_SC_NPROCESSORS_ONLN)) > 1 ? (int)nproc : 1);
+}
+#endif
+
+/*
+ * __os_spin --
+ * Return the number of default spins before blocking.
+ *
+ * PUBLIC: int __os_spin __P((DB_ENV *));
+ */
+int
+__os_spin(dbenv)
+ DB_ENV *dbenv;
+{
+ /*
+ * If the application specified a value or we've already figured it
+ * out, return it.
+ *
+ * XXX
+ * We don't want to repeatedly call the underlying function because
+ * it can be expensive (e.g., requiring multiple filesystem accesses
+ * under Debian Linux).
+ */
+ if (dbenv->tas_spins != 0)
+ return (dbenv->tas_spins);
+
+ dbenv->tas_spins = 1;
+#if defined(HAVE_PSTAT_GETDYNAMIC)
+ dbenv->tas_spins = __os_pstat_getdynamic();
+#endif
+#if defined(HAVE_SYSCONF) && defined(_SC_NPROCESSORS_ONLN)
+ dbenv->tas_spins = __os_sysconf();
+#endif
+
+ /*
+ * Spin 50 times per processor, we have anecdotal evidence that this
+ * is a reasonable value.
+ */
+ if (dbenv->tas_spins != 1)
+ dbenv->tas_spins *= 50;
+
+ return (dbenv->tas_spins);
+}
+
+/*
+ * __os_yield --
+ * Yield the processor.
+ *
+ * PUBLIC: void __os_yield __P((DB_ENV*, u_long));
+ */
+void
+__os_yield(dbenv, usecs)
+ DB_ENV *dbenv;
+ u_long usecs;
+{
+ if (DB_GLOBAL(j_yield) != NULL && DB_GLOBAL(j_yield)() == 0)
+ return;
+ (void)__os_sleep(dbenv, 0, usecs);
+}
diff --git a/libdb/os/os_stat.c b/libdb/os/os_stat.c
new file mode 100644
index 0000000..6d0426b
--- /dev/null
+++ b/libdb/os/os_stat.c
@@ -0,0 +1,119 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_exists --
+ * Return if the file exists.
+ *
+ * PUBLIC: int __os_exists __P((const char *, int *));
+ */
+int
+__os_exists(path, isdirp)
+ const char *path;
+ int *isdirp;
+{
+ int ret;
+ struct stat sb;
+
+ if (DB_GLOBAL(j_exists) != NULL)
+ return (DB_GLOBAL(j_exists)(path, isdirp));
+
+ do {
+ ret =
+#ifdef HAVE_VXWORKS
+ stat((char *)path, &sb);
+#else
+ stat(path, &sb);
+#endif
+ if (ret != 0)
+ ret = __os_get_errno();
+ } while (ret == EINTR);
+
+ if (ret != 0)
+ return (ret);
+
+#if !defined(S_ISDIR) || defined(STAT_MACROS_BROKEN)
+#undef S_ISDIR
+#ifdef _S_IFDIR
+#define S_ISDIR(m) (_S_IFDIR & (m))
+#else
+#define S_ISDIR(m) (((m) & 0170000) == 0040000)
+#endif
+#endif
+ if (isdirp != NULL)
+ *isdirp = S_ISDIR(sb.st_mode);
+
+ return (0);
+}
+
+/*
+ * __os_ioinfo --
+ * Return file size and I/O size; abstracted to make it easier
+ * to replace.
+ *
+ * PUBLIC: int __os_ioinfo __P((DB_ENV *, const char *,
+ * PUBLIC: DB_FH *, u_int32_t *, u_int32_t *, u_int32_t *));
+ */
+int
+__os_ioinfo(dbenv, path, fhp, mbytesp, bytesp, iosizep)
+ DB_ENV *dbenv;
+ const char *path;
+ DB_FH *fhp;
+ u_int32_t *mbytesp, *bytesp, *iosizep;
+{
+ int ret;
+ struct stat sb;
+
+ if (DB_GLOBAL(j_ioinfo) != NULL)
+ return (DB_GLOBAL(j_ioinfo)(path,
+ fhp->fd, mbytesp, bytesp, iosizep));
+
+retry:
+ if (fstat(fhp->fd, &sb) == -1) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "fstat: %s", strerror(ret));
+ return (ret);
+ }
+
+ /* Return the size of the file. */
+ if (mbytesp != NULL)
+ *mbytesp = (u_int32_t)(sb.st_size / MEGABYTE);
+ if (bytesp != NULL)
+ *bytesp = (u_int32_t)(sb.st_size % MEGABYTE);
+
+ /*
+ * Return the underlying filesystem blocksize, if available.
+ *
+ * XXX
+ * Check for a 0 size -- the HP MPE/iX architecture has st_blksize,
+ * but it's always 0.
+ */
+#ifdef HAVE_STRUCT_STAT_ST_BLKSIZE
+ if (iosizep != NULL && (*iosizep = sb.st_blksize) == 0)
+ *iosizep = DB_DEF_IOSIZE;
+#else
+ if (iosizep != NULL)
+ *iosizep = DB_DEF_IOSIZE;
+#endif
+ return (0);
+}
diff --git a/libdb/os/os_tmpdir.c b/libdb/os/os_tmpdir.c
new file mode 100644
index 0000000..e09385e
--- /dev/null
+++ b/libdb/os/os_tmpdir.c
@@ -0,0 +1,121 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#endif
+
+#include "db_int.h"
+
+#ifdef macintosh
+#include <TFileSpec.h>
+#endif
+
+/*
+ * __os_tmpdir --
+ * Set the temporary directory path.
+ *
+ * The order of items in the list structure and the order of checks in
+ * the environment are documented.
+ *
+ * PUBLIC: int __os_tmpdir __P((DB_ENV *, u_int32_t));
+ */
+int
+__os_tmpdir(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ int isdir;
+
+ /*
+ * !!!
+ * Don't change this to:
+ *
+ * static const char * const list[]
+ *
+ * because it creates a text relocation in position independent code.
+ */
+ static const char * list[] = {
+ "/var/tmp",
+ "/usr/tmp",
+ "/temp", /* Windows. */
+ "/tmp",
+ "C:/temp", /* Windows. */
+ "C:/tmp", /* Windows. */
+ NULL
+ };
+ const char * const *lp, *p;
+
+ /* Use the environment if it's permitted and initialized. */
+ if (LF_ISSET(DB_USE_ENVIRON) ||
+ (LF_ISSET(DB_USE_ENVIRON_ROOT) && __os_isroot())) {
+ if ((p = getenv("TMPDIR")) != NULL && p[0] == '\0') {
+ __db_err(dbenv, "illegal TMPDIR environment variable");
+ return (EINVAL);
+ }
+ /* Windows */
+ if (p == NULL && (p = getenv("TEMP")) != NULL && p[0] == '\0') {
+ __db_err(dbenv, "illegal TEMP environment variable");
+ return (EINVAL);
+ }
+ /* Windows */
+ if (p == NULL && (p = getenv("TMP")) != NULL && p[0] == '\0') {
+ __db_err(dbenv, "illegal TMP environment variable");
+ return (EINVAL);
+ }
+ /* Macintosh */
+ if (p == NULL &&
+ (p = getenv("TempFolder")) != NULL && p[0] == '\0') {
+ __db_err(dbenv,
+ "illegal TempFolder environment variable");
+ return (EINVAL);
+ }
+ if (p != NULL)
+ return (__os_strdup(dbenv, p, &dbenv->db_tmp_dir));
+ }
+
+#ifdef macintosh
+ /* Get the path to the temporary folder. */
+ {FSSpec spec;
+
+ if (!Special2FSSpec(kTemporaryFolderType,
+ kOnSystemDisk, 0, &spec))
+ return (__os_strdup(dbenv,
+ FSp2FullPath(&spec), &dbenv->db_tmp_dir));
+ }
+#endif
+#ifdef DB_WIN32
+ /* Get the path to the temporary directory. */
+ {int len;
+ char *eos, temp[MAXPATHLEN + 1];
+
+ if ((len = GetTempPath(sizeof(temp) - 1, temp)) > 2) {
+ eos = &temp[len];
+ *eos-- = '\0';
+ if (*eos == '\\' || *eos == '/')
+ *eos = '\0';
+ if (__os_exists(temp, &isdir) == 0 && isdir != 0)
+ return (__os_strdup(dbenv,
+ temp, &dbenv->db_tmp_dir));
+ }
+ }
+#endif
+
+ /* Step through the static list looking for a possibility. */
+ for (lp = list; *lp != NULL; ++lp)
+ if (__os_exists(*lp, &isdir) == 0 && isdir != 0)
+ return (__os_strdup(dbenv, *lp, &dbenv->db_tmp_dir));
+ return (0);
+}
diff --git a/libdb/os/os_unlink.c b/libdb/os/os_unlink.c
new file mode 100644
index 0000000..ea3a8a8
--- /dev/null
+++ b/libdb/os/os_unlink.c
@@ -0,0 +1,109 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_region_unlink --
+ * Remove a shared memory object file.
+ *
+ * PUBLIC: int __os_region_unlink __P((DB_ENV *, const char *));
+ */
+int
+__os_region_unlink(dbenv, path)
+ DB_ENV *dbenv;
+ const char *path;
+{
+#ifdef HAVE_QNX
+ int ret;
+ char *newname;
+
+ if ((ret = __os_shmname(dbenv, path, &newname)) != 0)
+ goto err;
+
+ if ((ret = shm_unlink(newname)) != 0) {
+ ret = __os_get_errno();
+ if (ret != ENOENT)
+ __db_err(dbenv, "shm_unlink: %s: %s",
+ newname, strerror(ret));
+ }
+err:
+ if (newname != NULL)
+ __os_free(dbenv, newname);
+ return (ret);
+#else
+ if (F_ISSET(dbenv, DB_ENV_OVERWRITE))
+ (void)__db_overwrite(dbenv, path);
+
+ return (__os_unlink(dbenv, path));
+#endif
+}
+
+/*
+ * __os_unlink --
+ * Remove a file.
+ *
+ * PUBLIC: int __os_unlink __P((DB_ENV *, const char *));
+ */
+int
+__os_unlink(dbenv, path)
+ DB_ENV *dbenv;
+ const char *path;
+{
+ int ret;
+
+retry: ret = DB_GLOBAL(j_unlink) != NULL ?
+ DB_GLOBAL(j_unlink)(path) :
+#ifdef HAVE_VXWORKS
+ unlink((char *)path);
+#else
+ unlink(path);
+#endif
+ if (ret == -1) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ /*
+ * XXX
+ * We really shouldn't be looking at this value ourselves,
+ * but ENOENT usually signals that a file is missing, and
+ * we attempt to unlink things (such as v. 2.x environment
+ * regions, in DB_ENV->remove) that we're expecting not to
+ * be there. Reporting errors in these cases is annoying.
+ */
+#ifdef HAVE_VXWORKS
+ /*
+ * XXX
+ * The results of unlink are file system driver specific
+ * on VxWorks. In the case of removing a file that did
+ * not exist, some, at least, return an error, but with
+ * an errno of 0, not ENOENT.
+ *
+ * Code below falls through to original if-statement only
+ * we didn't get a "successful" error.
+ */
+ if (ret != 0)
+ /* FALLTHROUGH */
+#endif
+ if (ret != ENOENT)
+ __db_err(dbenv, "unlink: %s: %s", path, strerror(ret));
+ }
+
+ return (ret);
+}
diff --git a/libdb/os_vxworks/os_vx_abs.c b/libdb/os_vxworks/os_vx_abs.c
new file mode 100644
index 0000000..7d9bad4
--- /dev/null
+++ b/libdb/os_vxworks/os_vx_abs.c
@@ -0,0 +1,45 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include "db_int.h"
+#include "iosLib.h"
+
+/*
+ * __os_abspath --
+ * Return if a path is an absolute path.
+ */
+int
+__os_abspath(path)
+ const char *path;
+{
+ DEV_HDR *dummy;
+ char *ptail;
+
+ /*
+ * VxWorks devices can be rooted at any name at all.
+ * Use iosDevFind() to see if name matches any of our devices.
+ */
+ if ((dummy = iosDevFind((char *)path, &ptail)) == NULL)
+ return (0);
+ /*
+ * If the routine used a device, then ptail points to the
+ * rest and we are an abs path.
+ */
+ if (ptail != path)
+ return (1);
+ /*
+ * If the path starts with a '/', then we are an absolute path,
+ * using the host machine, otherwise we are not.
+ */
+ return (path[0] == '/');
+}
diff --git a/libdb/os_vxworks/os_vx_config.c b/libdb/os_vxworks/os_vx_config.c
new file mode 100644
index 0000000..6ccc12d
--- /dev/null
+++ b/libdb/os_vxworks/os_vx_config.c
@@ -0,0 +1,31 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_fs_notzero --
+ * Return 1 if allocated filesystem blocks are not zeroed.
+ *
+ * PUBLIC: int __os_fs_notzero __P((void));
+ */
+int
+__os_fs_notzero()
+{
+ /*
+ * Some VxWorks FS drivers do not zero-fill pages that were never
+ * explicitly written to the file, they give you random garbage,
+ * and that breaks Berkeley DB.
+ */
+ return (1);
+}
diff --git a/libdb/os_vxworks/os_vx_map.c b/libdb/os_vxworks/os_vx_map.c
new file mode 100644
index 0000000..c4ae7fe
--- /dev/null
+++ b/libdb/os_vxworks/os_vx_map.c
@@ -0,0 +1,441 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * This code is derived from software contributed to Sleepycat Software by
+ * Frederick G.M. Roeber of Netscape Communications Corp.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * DB uses memory-mapped files for two things:
+ * faster access of read-only databases, and
+ * shared memory for process synchronization and locking.
+ * The code carefully does not mix the two uses. The first-case uses are
+ * actually written such that memory-mapping isn't really required -- it's
+ * merely a convenience -- so we don't have to worry much about it. In the
+ * second case, it's solely used as a shared memory mechanism, so that's
+ * all we have to replace.
+ *
+ * All memory in VxWorks is shared, and a task can allocate memory and keep
+ * notes. So I merely have to allocate memory, remember the "filename" for
+ * that memory, and issue small-integer segment IDs which index the list of
+ * these shared-memory segments. Subsequent opens are checked against the
+ * list of already open segments.
+ */
+typedef struct {
+ void *segment; /* Segment address. */
+ u_int32_t size; /* Segment size. */
+ char *name; /* Segment name. */
+ long segid; /* Segment ID. */
+} os_segdata_t;
+
+static os_segdata_t *__os_segdata; /* Segment table. */
+static int __os_segdata_size; /* Segment table size. */
+
+#define OS_SEGDATA_STARTING_SIZE 16
+#define OS_SEGDATA_INCREMENT 16
+
+static int __os_segdata_allocate
+ __P((DB_ENV *, const char *, REGINFO *, REGION *));
+static int __os_segdata_find_byname
+ __P((DB_ENV *, const char *, REGINFO *, REGION *));
+static int __os_segdata_init __P((DB_ENV *));
+static int __os_segdata_new __P((DB_ENV *, int *));
+static int __os_segdata_release __P((DB_ENV *, REGION *, int));
+
+/*
+ * __os_r_sysattach --
+ * Create/join a shared memory region.
+ *
+ * PUBLIC: int __os_r_sysattach __P((DB_ENV *, REGINFO *, REGION *));
+ */
+int
+__os_r_sysattach(dbenv, infop, rp)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ REGION *rp;
+{
+ int ret;
+
+ if (__os_segdata == NULL)
+ __os_segdata_init(dbenv);
+
+ DB_BEGIN_SINGLE_THREAD;
+
+ /* Try to find an already existing segment. */
+ ret = __os_segdata_find_byname(dbenv, infop->name, infop, rp);
+
+ /*
+ * If we are trying to join a region, it is easy, either we
+ * found it and we return, or we didn't find it and we return
+ * an error that it doesn't exist.
+ */
+ if (!F_ISSET(infop, REGION_CREATE)) {
+ if (ret != 0) {
+ __db_err(dbenv, "segment %s does not exist",
+ infop->name);
+ ret = EAGAIN;
+ }
+ goto out;
+ }
+
+ /*
+ * If we get here, we are trying to create the region.
+ * There are several things to consider:
+ * - if we have an error (not a found or not-found value), return.
+ * - they better have shm_key set.
+ * - if the region is already there (ret == 0 from above),
+ * assume the application crashed and we're restarting.
+ * Delete the old region.
+ * - try to create the region.
+ */
+ if (ret != 0 && ret != ENOENT)
+ goto out;
+
+ if (dbenv->shm_key == INVALID_REGION_SEGID) {
+ __db_err(dbenv, "no base shared memory ID specified");
+ ret = EAGAIN;
+ goto out;
+ }
+ if (ret == 0 && __os_segdata_release(dbenv, rp, 1) != 0) {
+ __db_err(dbenv,
+ "key: %ld: shared memory region already exists",
+ dbenv->shm_key + (infop->id - 1));
+ ret = EAGAIN;
+ goto out;
+ }
+
+ ret = __os_segdata_allocate(dbenv, infop->name, infop, rp);
+out:
+ DB_END_SINGLE_THREAD;
+ return (ret);
+}
+
+/*
+ * __os_r_sysdetach --
+ * Detach from a shared region.
+ *
+ * PUBLIC: int __os_r_sysdetach __P((DB_ENV *, REGINFO *, int));
+ */
+int
+__os_r_sysdetach(dbenv, infop, destroy)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ int destroy;
+{
+ /*
+ * If just detaching, there is no mapping to discard.
+ * If destroying, remove the region.
+ */
+ if (destroy)
+ return (__os_segdata_release(dbenv, infop->rp, 0));
+ return (0);
+}
+
+/*
+ * __os_mapfile --
+ * Map in a shared memory file.
+ *
+ * PUBLIC: int __os_mapfile __P((DB_ENV *,
+ * PUBLIC: char *, DB_FH *, size_t, int, void **));
+ */
+int
+__os_mapfile(dbenv, path, fhp, len, is_rdonly, addrp)
+ DB_ENV *dbenv;
+ char *path;
+ DB_FH *fhp;
+ int is_rdonly;
+ size_t len;
+ void **addrp;
+{
+ /* We cannot map in regular files in VxWorks. */
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(path, NULL);
+ COMPQUIET(fhp, NULL);
+ COMPQUIET(is_rdonly, 0);
+ COMPQUIET(len, 0);
+ COMPQUIET(addrp, NULL);
+ return (EINVAL);
+}
+
+/*
+ * __os_unmapfile --
+ * Unmap the shared memory file.
+ *
+ * PUBLIC: int __os_unmapfile __P((DB_ENV *, void *, size_t));
+ */
+int
+__os_unmapfile(dbenv, addr, len)
+ DB_ENV *dbenv;
+ void *addr;
+ size_t len;
+{
+ /* We cannot map in regular files in VxWorks. */
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(addr, NULL);
+ COMPQUIET(len, 0);
+ return (EINVAL);
+}
+
+/*
+ * __os_segdata_init --
+ * Initializes the library's table of shared memory segments.
+ * Called once on the first time through __os_segdata_new().
+ */
+static int
+__os_segdata_init(dbenv)
+ DB_ENV *dbenv;
+{
+ int ret;
+
+ if (__os_segdata != NULL) {
+ __db_err(dbenv, "shared memory segment already exists");
+ return (EEXIST);
+ }
+
+ /*
+ * The lock init call returns a locked lock.
+ */
+ DB_BEGIN_SINGLE_THREAD;
+ __os_segdata_size = OS_SEGDATA_STARTING_SIZE;
+ ret = __os_calloc(dbenv,
+ __os_segdata_size, sizeof(os_segdata_t), &__os_segdata);
+ DB_END_SINGLE_THREAD;
+ return (ret);
+}
+
+/*
+ * __os_segdata_destroy --
+ * Destroys the library's table of shared memory segments. It also
+ * frees all linked data: the segments themselves, and their names.
+ * Currently not called. This function should be called if the
+ * user creates a function to unload or shutdown.
+ *
+ * PUBLIC: int __os_segdata_destroy __P((DB_ENV *));
+ */
+int
+__os_segdata_destroy(dbenv)
+ DB_ENV *dbenv;
+{
+ os_segdata_t *p;
+ int i;
+
+ if (__os_segdata == NULL)
+ return (0);
+
+ DB_BEGIN_SINGLE_THREAD;
+ for (i = 0; i < __os_segdata_size; i++) {
+ p = &__os_segdata[i];
+ if (p->name != NULL) {
+ __os_free(dbenv, p->name);
+ p->name = NULL;
+ }
+ if (p->segment != NULL) {
+ __os_free(dbenv, p->segment);
+ p->segment = NULL;
+ }
+ p->size = 0;
+ }
+
+ __os_free(dbenv, __os_segdata);
+ __os_segdata = NULL;
+ __os_segdata_size = 0;
+ DB_END_SINGLE_THREAD;
+
+ return (0);
+}
+
+/*
+ * __os_segdata_allocate --
+ * Creates a new segment of the specified size, optionally with the
+ * specified name.
+ *
+ * Assumes it is called with the SEGDATA lock taken.
+ */
+static int
+__os_segdata_allocate(dbenv, name, infop, rp)
+ DB_ENV *dbenv;
+ const char *name;
+ REGINFO *infop;
+ REGION *rp;
+{
+ os_segdata_t *p;
+ int id, ret;
+
+ if ((ret = __os_segdata_new(dbenv, &id)) != 0)
+ return (ret);
+
+ p = &__os_segdata[id];
+ if ((ret = __os_calloc(dbenv, 1, rp->size, &p->segment)) != 0)
+ return (ret);
+ if ((ret = __os_strdup(dbenv, name, &p->name)) != 0) {
+ __os_free(dbenv, p->segment);
+ p->segment = NULL;
+ return (ret);
+ }
+ p->size = rp->size;
+ p->segid = dbenv->shm_key + infop->id - 1;
+
+ infop->addr = p->segment;
+ rp->segid = id;
+
+ return (0);
+}
+
+/*
+ * __os_segdata_new --
+ * Finds a new segdata slot. Does not initialise it, so the fd returned
+ * is only valid until you call this again.
+ *
+ * Assumes it is called with the SEGDATA lock taken.
+ */
+static int
+__os_segdata_new(dbenv, segidp)
+ DB_ENV *dbenv;
+ int *segidp;
+{
+ os_segdata_t *p;
+ int i, newsize, ret;
+
+ if (__os_segdata == NULL) {
+ __db_err(dbenv, "shared memory segment not initialized");
+ return (EAGAIN);
+ }
+
+ for (i = 0; i < __os_segdata_size; i++) {
+ p = &__os_segdata[i];
+ if (p->segment == NULL) {
+ *segidp = i;
+ return (0);
+ }
+ }
+
+ /*
+ * No more free slots, expand.
+ */
+ newsize = __os_segdata_size + OS_SEGDATA_INCREMENT;
+ if ((ret = __os_realloc(dbenv, newsize * sizeof(os_segdata_t),
+ &__os_segdata)) != 0)
+ return (ret);
+ memset(&__os_segdata[__os_segdata_size],
+ 0, OS_SEGDATA_INCREMENT * sizeof(os_segdata_t));
+
+ *segidp = __os_segdata_size;
+ __os_segdata_size = newsize;
+
+ return (0);
+}
+
+/*
+ * __os_segdata_find_byname --
+ * Finds a segment by its name and shm_key.
+ *
+ * Assumes it is called with the SEGDATA lock taken.
+ *
+ * PUBLIC: __os_segdata_find_byname
+ * PUBLIC: __P((DB_ENV *, const char *, REGINFO *, REGION *));
+ */
+static int
+__os_segdata_find_byname(dbenv, name, infop, rp)
+ DB_ENV *dbenv;
+ const char *name;
+ REGINFO *infop;
+ REGION *rp;
+{
+ os_segdata_t *p;
+ long segid;
+ int i;
+
+ if (__os_segdata == NULL) {
+ __db_err(dbenv, "shared memory segment not initialized");
+ return (EAGAIN);
+ }
+
+ if (name == NULL) {
+ __db_err(dbenv, "no segment name given");
+ return (EAGAIN);
+ }
+
+ /*
+ * If we are creating the region, compute the segid.
+ * If we are joining the region, we use the segid in the
+ * index we are given.
+ */
+ if (F_ISSET(infop, REGION_CREATE))
+ segid = dbenv->shm_key + (infop->id - 1);
+ else {
+ if (rp->segid >= __os_segdata_size ||
+ rp->segid == INVALID_REGION_SEGID) {
+ __db_err(dbenv, "Invalid segment id given");
+ return (EAGAIN);
+ }
+ segid = __os_segdata[rp->segid].segid;
+ }
+ for (i = 0; i < __os_segdata_size; i++) {
+ p = &__os_segdata[i];
+ if (p->name != NULL && strcmp(name, p->name) == 0 &&
+ p->segid == segid) {
+ infop->addr = p->segment;
+ rp->segid = i;
+ return (0);
+ }
+ }
+ return (ENOENT);
+}
+
+/*
+ * __os_segdata_release --
+ * Free a segdata entry.
+ */
+static int
+__os_segdata_release(dbenv, rp, is_locked)
+ DB_ENV *dbenv;
+ REGION *rp;
+ int is_locked;
+{
+ os_segdata_t *p;
+
+ if (__os_segdata == NULL) {
+ __db_err(dbenv, "shared memory segment not initialized");
+ return (EAGAIN);
+ }
+
+ if (rp->segid < 0 || rp->segid >= __os_segdata_size) {
+ __db_err(dbenv, "segment id %ld out of range", rp->segid);
+ return (EINVAL);
+ }
+
+ if (is_locked == 0)
+ DB_BEGIN_SINGLE_THREAD;
+ p = &__os_segdata[rp->segid];
+ if (p->name != NULL) {
+ __os_free(dbenv, p->name);
+ p->name = NULL;
+ }
+ if (p->segment != NULL) {
+ __os_free(dbenv, p->segment);
+ p->segment = NULL;
+ }
+ p->size = 0;
+ if (is_locked == 0)
+ DB_END_SINGLE_THREAD;
+
+ /* Any shrink-table logic could go here */
+
+ return (0);
+}
diff --git a/libdb/os_win32/os_abs.c b/libdb/os_win32/os_abs.c
new file mode 100644
index 0000000..b31cce9
--- /dev/null
+++ b/libdb/os_win32/os_abs.c
@@ -0,0 +1,33 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_abspath --
+ * Return if a path is an absolute path.
+ */
+int
+__os_abspath(path)
+ const char *path;
+{
+ /*
+ * !!!
+ * Check for drive specifications, e.g., "C:". In addition, the path
+ * separator used by the win32 DB (PATH_SEPARATOR) is \; look for both
+ * / and \ since these are user-input paths.
+ */
+ if (isalpha(path[0]) && path[1] == ':')
+ path += 2;
+ return (path[0] == '/' || path[0] == '\\');
+}
diff --git a/libdb/os_win32/os_clock.c b/libdb/os_win32/os_clock.c
new file mode 100644
index 0000000..472f7a0
--- /dev/null
+++ b/libdb/os_win32/os_clock.c
@@ -0,0 +1,37 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include <sys/types.h>
+#include <sys/timeb.h>
+#include <string.h>
+
+#include "db_int.h"
+
+/*
+ * __os_clock --
+ * Return the current time-of-day clock in seconds and microseconds.
+ */
+int
+__os_clock(dbenv, secsp, usecsp)
+ DB_ENV *dbenv;
+ u_int32_t *secsp, *usecsp; /* Seconds and microseconds. */
+{
+ struct _timeb now;
+
+ _ftime(&now);
+ if (secsp != NULL)
+ *secsp = (u_int32_t)now.time;
+ if (usecsp != NULL)
+ *usecsp = now.millitm * 1000;
+ return (0);
+}
diff --git a/libdb/os_win32/os_config.c b/libdb/os_win32/os_config.c
new file mode 100644
index 0000000..dd9493b
--- /dev/null
+++ b/libdb/os_win32/os_config.c
@@ -0,0 +1,29 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_fs_notzero --
+ * Return 1 if allocated filesystem blocks are not zeroed.
+ */
+int
+__os_fs_notzero()
+{
+ /*
+ * Windows/NT zero-fills pages that were never explicitly written to
+ * the file. Windows 95/98 gives you random garbage, and that breaks
+ * Berkeley DB.
+ */
+ return (__os_is_winnt() ? 0 : 1);
+}
diff --git a/libdb/os_win32/os_dir.c b/libdb/os_win32/os_dir.c
new file mode 100644
index 0000000..034c4b5
--- /dev/null
+++ b/libdb/os_win32/os_dir.c
@@ -0,0 +1,86 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_dirlist --
+ * Return a list of the files in a directory.
+ */
+int
+__os_dirlist(dbenv, dir, namesp, cntp)
+ DB_ENV *dbenv;
+ const char *dir;
+ char ***namesp;
+ int *cntp;
+{
+ struct _finddata_t fdata;
+#ifdef _WIN64
+ intptr_t dirhandle;
+#else
+ long dirhandle;
+#endif
+ int arraysz, cnt, finished, ret;
+ char **names, filespec[MAXPATHLEN];
+
+ if (DB_GLOBAL(j_dirlist) != NULL)
+ return (DB_GLOBAL(j_dirlist)(dir, namesp, cntp));
+
+ (void)snprintf(filespec, sizeof(filespec), "%s/*", dir);
+ if ((dirhandle = _findfirst(filespec, &fdata)) == -1)
+ return (__os_get_errno());
+
+ names = NULL;
+ finished = 0;
+ for (arraysz = cnt = 0; finished != 1; ++cnt) {
+ if (cnt >= arraysz) {
+ arraysz += 100;
+ if ((ret = __os_realloc(dbenv,
+ arraysz * sizeof(names[0]), &names)) != 0)
+ goto nomem;
+ }
+ if ((ret = __os_strdup(dbenv, fdata.name, &names[cnt])) != 0)
+ goto nomem;
+ if (_findnext(dirhandle, &fdata) != 0)
+ finished = 1;
+ }
+ _findclose(dirhandle);
+
+ *namesp = names;
+ *cntp = cnt;
+ return (0);
+
+nomem: if (names != NULL)
+ __os_dirfree(dbenv, names, cnt);
+ return (ret);
+}
+
+/*
+ * __os_dirfree --
+ * Free the list of files.
+ */
+void
+__os_dirfree(dbenv, names, cnt)
+ DB_ENV *dbenv;
+ char **names;
+ int cnt;
+{
+ if (DB_GLOBAL(j_dirfree) != NULL) {
+ DB_GLOBAL(j_dirfree)(names, cnt);
+ return;
+ }
+
+ while (cnt > 0)
+ __os_free(dbenv, names[--cnt]);
+ __os_free(dbenv, names);
+}
diff --git a/libdb/os_win32/os_errno.c b/libdb/os_win32/os_errno.c
new file mode 100644
index 0000000..a030925
--- /dev/null
+++ b/libdb/os_win32/os_errno.c
@@ -0,0 +1,145 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_get_errno --
+ * Return the value of errno.
+ */
+int
+__os_get_errno()
+{
+ /* This routine must be able to return the same value repeatedly. */
+ return (errno);
+}
+
+/*
+ * __os_set_errno --
+ * Set the value of errno.
+ */
+void
+__os_set_errno(evalue)
+ int evalue;
+{
+ errno = evalue;
+}
+
+/*
+ * __os_win32_errno --
+ * Return the last Windows error as an errno.
+ * We give generic error returns:
+ *
+ * EFAULT means Win* call failed,
+ * and GetLastError provided no extra info.
+ *
+ * EIO means error on Win* call.
+ * and we were unable to provide a meaningful errno for this Windows
+ * error. More information is only available by setting a breakpoint
+ * here.
+ *
+ * PUBLIC: #if defined(DB_WIN32)
+ * PUBLIC: int __os_win32_errno __P((void));
+ * PUBLIC: #endif
+ */
+int
+__os_win32_errno(void)
+{
+ DWORD last_error;
+ int ret;
+
+ /* Ignore errno - we used to check it here. */
+
+ last_error = GetLastError();
+
+ /*
+ * Take our best guess at translating some of the Windows error
+ * codes. We really care about only a few of these.
+ */
+ switch (last_error) {
+ case ERROR_FILE_NOT_FOUND:
+ case ERROR_INVALID_DRIVE:
+ case ERROR_PATH_NOT_FOUND:
+ ret = ENOENT;
+ break;
+
+ case ERROR_NO_MORE_FILES:
+ case ERROR_TOO_MANY_OPEN_FILES:
+ ret = EMFILE;
+ break;
+
+ case ERROR_ACCESS_DENIED:
+ ret = EPERM;
+ break;
+
+ case ERROR_INVALID_HANDLE:
+ ret = EBADF;
+ break;
+
+ case ERROR_NOT_ENOUGH_MEMORY:
+ ret = ENOMEM;
+ break;
+
+ case ERROR_DISK_FULL:
+ ret = ENOSPC;
+
+ case ERROR_ARENA_TRASHED:
+ case ERROR_BAD_COMMAND:
+ case ERROR_BAD_ENVIRONMENT:
+ case ERROR_BAD_FORMAT:
+ case ERROR_GEN_FAILURE:
+ case ERROR_INVALID_ACCESS:
+ case ERROR_INVALID_BLOCK:
+ case ERROR_INVALID_DATA:
+ case ERROR_READ_FAULT:
+ case ERROR_WRITE_FAULT:
+ ret = EFAULT;
+ break;
+
+ case ERROR_FILE_EXISTS:
+ case ERROR_ALREADY_EXISTS:
+ ret = EEXIST;
+ break;
+
+ case ERROR_NOT_SAME_DEVICE:
+ ret = EXDEV;
+ break;
+
+ case ERROR_WRITE_PROTECT:
+ ret = EACCES;
+ break;
+
+ case ERROR_NOT_READY:
+ ret = EBUSY;
+ break;
+
+ case ERROR_LOCK_VIOLATION:
+ case ERROR_SHARING_VIOLATION:
+ ret = EBUSY;
+ break;
+
+ case ERROR_RETRY:
+ ret = EINTR;
+ break;
+
+ case 0:
+ ret = EFAULT;
+ break;
+
+ default:
+ ret = EIO; /* Generic error. */
+ break;
+ }
+
+ return (ret);
+}
diff --git a/libdb/os_win32/os_fid.c b/libdb/os_win32/os_fid.c
new file mode 100644
index 0000000..4d73710
--- /dev/null
+++ b/libdb/os_win32/os_fid.c
@@ -0,0 +1,143 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include "db_int.h"
+
+#define SERIAL_INIT 0
+static u_int32_t fid_serial = SERIAL_INIT;
+
+/*
+ * __os_fileid --
+ * Return a unique identifier for a file.
+ */
+int
+__os_fileid(dbenv, fname, unique_okay, fidp)
+ DB_ENV *dbenv;
+ const char *fname;
+ int unique_okay;
+ u_int8_t *fidp;
+{
+ size_t i;
+ u_int32_t tmp;
+ u_int8_t *p;
+ int ret;
+
+ /*
+ * The documentation for GetFileInformationByHandle() states that the
+ * inode-type numbers are not constant between processes. Actually,
+ * they are, they're the NTFS MFT indexes. So, this works on NTFS,
+ * but perhaps not on other platforms, and perhaps not over a network.
+ * Can't think of a better solution right now.
+ */
+ DB_FH fh;
+ BY_HANDLE_FILE_INFORMATION fi;
+ BOOL retval = FALSE;
+
+ DB_ASSERT(fname != NULL);
+
+ /* Clear the buffer. */
+ memset(fidp, 0, DB_FILE_ID_LEN);
+
+ /*
+ * Initialize/increment the serial number we use to help avoid
+ * fileid collisions. Note that we don't bother with locking;
+ * it's unpleasant to do from down in here, and if we race on
+ * this no real harm will be done, since the finished fileid
+ * has so many other components.
+ *
+ * We increment by 100000 on each call as a simple way of
+ * randomizing; simply incrementing seems potentially less useful
+ * if pids are also simply incremented, since this is process-local
+ * and we may be one of a set of processes starting up. 100000
+ * pushes us out of pid space on most platforms, and has few
+ * interesting properties in base 2.
+ */
+ if (fid_serial == SERIAL_INIT)
+ __os_id(&fid_serial);
+ else
+ fid_serial += 100000;
+
+ /*
+ * First we open the file, because we're not given a handle to it.
+ * If we can't open it, we're in trouble.
+ */
+ if ((ret = __os_open(dbenv, fname, DB_OSO_RDONLY, _S_IREAD, &fh)) != 0)
+ return (ret);
+
+ /* File open, get its info */
+ if ((retval = GetFileInformationByHandle(fh.handle, &fi)) == FALSE)
+ ret = __os_win32_errno();
+ __os_closehandle(dbenv, &fh);
+
+ if (retval == FALSE)
+ return (ret);
+
+ /*
+ * We want the three 32-bit words which tell us the volume ID and
+ * the file ID. We make a crude attempt to copy the bytes over to
+ * the callers buffer.
+ *
+ * We don't worry about byte sexing or the actual variable sizes.
+ *
+ * When this routine is called from the DB access methods, it's only
+ * called once -- whatever ID is generated when a database is created
+ * is stored in the database file's metadata, and that is what is
+ * saved in the mpool region's information to uniquely identify the
+ * file.
+ *
+ * When called from the mpool layer this routine will be called each
+ * time a new thread of control wants to share the file, which makes
+ * things tougher. As far as byte sexing goes, since the mpool region
+ * lives on a single host, there's no issue of that -- the entire
+ * region is byte sex dependent. As far as variable sizes go, we make
+ * the simplifying assumption that 32-bit and 64-bit processes will
+ * get the same 32-bit values if we truncate any returned 64-bit value
+ * to a 32-bit value.
+ */
+ tmp = (u_int32_t)fi.nFileIndexLow;
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+ tmp = (u_int32_t)fi.nFileIndexHigh;
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+
+ if (unique_okay) {
+ /*
+ * Use the system time to try to get a unique value
+ * within this process. A millisecond counter
+ * overflows 32 bits in about 49 days. So we use 8
+ * bytes, and don't bother with the volume ID, which
+ * is not very useful for our purposes.
+ */
+ SYSTEMTIME st;
+
+ GetSystemTime(&st);
+ tmp = (st.wYear - 1900) * 12 + (st.wMonth - 1);
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+ tmp = ((((st.wDay - 1) * 24 + st.wHour) * 60 +
+ st.wMinute) * 60 + st.wSecond) * 1000 +
+ st.wMilliseconds;
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+ for (p = (u_int8_t *)&fid_serial, i = sizeof(u_int32_t);
+ i > 0; --i)
+ *fidp++ = *p++;
+ } else {
+ tmp = (u_int32_t)fi.dwVolumeSerialNumber;
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+ }
+
+ return (0);
+}
diff --git a/libdb/os_win32/os_fsync.c b/libdb/os_win32/os_fsync.c
new file mode 100644
index 0000000..b99fb61
--- /dev/null
+++ b/libdb/os_win32/os_fsync.c
@@ -0,0 +1,59 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h> /* XXX: Required by __hp3000s900 */
+#include <unistd.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_fsync --
+ * Flush a file descriptor.
+ *
+ * PUBLIC: int __os_fsync __P((DB_ENV *, DB_FH *));
+ */
+int
+__os_fsync(dbenv, fhp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+{
+ BOOL success;
+ int ret;
+
+ /*
+ * Do nothing if the file descriptor has been marked as not requiring
+ * any sync to disk.
+ */
+ if (F_ISSET(fhp, DB_FH_NOSYNC))
+ return (0);
+
+ ret = 0;
+ do {
+ if (DB_GLOBAL(j_fsync) != NULL)
+ success = (DB_GLOBAL(j_fsync)(fhp->fd) == 0);
+ else {
+ success = FlushFileBuffers(fhp->handle);
+ if (!success)
+ __os_set_errno(__os_win32_errno());
+ }
+ } while (!success && (ret = __os_get_errno()) == EINTR);
+
+ if (ret != 0)
+ __db_err(dbenv, "fsync %s", strerror(ret));
+ return (ret);
+}
diff --git a/libdb/os_win32/os_handle.c b/libdb/os_win32/os_handle.c
new file mode 100644
index 0000000..3e36daa
--- /dev/null
+++ b/libdb/os_win32/os_handle.c
@@ -0,0 +1,126 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_openhandle --
+ * Open a file, using POSIX 1003.1 open flags.
+ *
+ * PUBLIC: int __os_openhandle __P((DB_ENV *, const char *, int, int, DB_FH *));
+ */
+int
+__os_openhandle(dbenv, name, flags, mode, fhp)
+ DB_ENV *dbenv;
+ const char *name;
+ int flags, mode;
+ DB_FH *fhp;
+{
+ int ret, nrepeat;
+
+ memset(fhp, 0, sizeof(*fhp));
+ fhp->handle = INVALID_HANDLE_VALUE;
+
+ /* If the application specified an interface, use it. */
+ if (DB_GLOBAL(j_open) != NULL) {
+ if ((fhp->fd = DB_GLOBAL(j_open)(name, flags, mode)) == -1)
+ return (__os_get_errno());
+ F_SET(fhp, DB_FH_VALID);
+ return (0);
+ }
+
+ for (nrepeat = 1; nrepeat < 4; ++nrepeat) {
+ ret = 0;
+ fhp->fd = open(name, flags, mode);
+
+ if (fhp->fd == -1) {
+ /*
+ * If it's a "temporary" error, we retry up to 3 times,
+ * waiting up to 12 seconds. While it's not a problem
+ * if we can't open a database, an inability to open a
+ * log file is cause for serious dismay.
+ */
+ ret = __os_get_errno();
+ if (ret == ENFILE || ret == EMFILE || ret == ENOSPC) {
+ (void)__os_sleep(dbenv, nrepeat * 2, 0);
+ continue;
+ }
+
+ /*
+ * If it was an EINTR it's reasonable to retry
+ * immediately, and arbitrarily often.
+ */
+ if (ret == EINTR) {
+ --nrepeat;
+ continue;
+ }
+ } else {
+ F_SET(fhp, DB_FH_VALID);
+ }
+ break;
+ }
+
+ return (ret);
+}
+
+/*
+ * __os_closehandle --
+ * Close a file.
+ *
+ * PUBLIC: int __os_closehandle __P((DB_ENV *, DB_FH *));
+ */
+int
+__os_closehandle(dbenv, fhp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+{
+ BOOL success;
+ int ret;
+
+ COMPQUIET(dbenv, NULL);
+ /* Don't close file descriptors that were never opened. */
+ DB_ASSERT(F_ISSET(fhp, DB_FH_VALID) &&
+ ((fhp->fd != -1) || (fhp->handle != INVALID_HANDLE_VALUE)));
+
+ ret = 0;
+
+ do {
+ if (DB_GLOBAL(j_close) != NULL)
+ success = (DB_GLOBAL(j_close)(fhp->fd) == 0);
+ else if (fhp->handle != INVALID_HANDLE_VALUE) {
+ success = CloseHandle(fhp->handle);
+ if (!success)
+ __os_set_errno(__os_win32_errno());
+ }
+ else
+ success = (close(fhp->fd) == 0);
+ } while (!success && (ret = __os_get_errno()) == EINTR);
+
+ /*
+ * Smash the POSIX file descriptor -- it's never tested, but we want
+ * to catch any mistakes.
+ */
+ fhp->fd = -1;
+ fhp->handle = INVALID_HANDLE_VALUE;
+ F_CLR(fhp, DB_FH_VALID);
+
+ return (ret);
+}
diff --git a/libdb/os_win32/os_map.c b/libdb/os_win32/os_map.c
new file mode 100644
index 0000000..b81dd4e
--- /dev/null
+++ b/libdb/os_win32/os_map.c
@@ -0,0 +1,338 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include "db_int.h"
+
+static int __os_map
+ __P((DB_ENV *, char *, REGINFO *, DB_FH *, size_t, int, int, int, void **));
+static int __os_unique_name __P((char *, HANDLE, char *, size_t));
+
+/*
+ * __os_r_sysattach --
+ * Create/join a shared memory region.
+ */
+int
+__os_r_sysattach(dbenv, infop, rp)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ REGION *rp;
+{
+ DB_FH fh;
+ int is_system, ret;
+
+ /*
+ * Try to open/create the file. We DO NOT need to ensure that multiple
+ * threads/processes attempting to simultaneously create the region are
+ * properly ordered, our caller has already taken care of that.
+ */
+ if ((ret = __os_open(dbenv, infop->name,
+ DB_OSO_DIRECT |
+ F_ISSET(infop, REGION_CREATE_OK) ? DB_OSO_CREATE: 0,
+ infop->mode, &fh)) != 0) {
+ __db_err(dbenv, "%s: %s", infop->name, db_strerror(ret));
+ return (ret);
+ }
+
+ /*
+ * On Windows/9X, files that are opened by multiple processes do not
+ * share data correctly. For this reason, the DB_SYSTEM_MEM flag is
+ * implied for any application that does not specify the DB_PRIVATE
+ * flag.
+ */
+ is_system = F_ISSET(dbenv, DB_ENV_SYSTEM_MEM) ||
+ (!F_ISSET(dbenv, DB_ENV_PRIVATE) && __os_is_winnt() == 0);
+
+ /*
+ * Map the file in. If we're creating an in-system-memory region,
+ * specify a segment ID (which is never used again) so that the
+ * calling code writes out the REGENV_REF structure to the primary
+ * environment file.
+ */
+ ret = __os_map(dbenv, infop->name, infop, &fh, rp->size,
+ 1, is_system, 0, &infop->addr);
+ if (ret == 0 && is_system == 1)
+ rp->segid = 1;
+
+ (void)__os_closehandle(dbenv, &fh);
+
+ return (ret);
+}
+
+/*
+ * __os_r_sysdetach --
+ * Detach from a shared memory region.
+ */
+int
+__os_r_sysdetach(dbenv, infop, destroy)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ int destroy;
+{
+ int ret, t_ret;
+
+ if (infop->wnt_handle != NULL) {
+ (void)CloseHandle(*((HANDLE*)(infop->wnt_handle)));
+ __os_free(dbenv, infop->wnt_handle);
+ }
+
+ ret = !UnmapViewOfFile(infop->addr) ? __os_win32_errno() : 0;
+ if (ret != 0)
+ __db_err(dbenv, "UnmapViewOfFile: %s", strerror(ret));
+
+ if (!F_ISSET(dbenv, DB_ENV_SYSTEM_MEM) && destroy) {
+ if (F_ISSET(dbenv, DB_ENV_OVERWRITE))
+ (void)__db_overwrite(dbenv, infop->name);
+ if ((t_ret = __os_unlink(dbenv, infop->name)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ return (ret);
+}
+
+/*
+ * __os_mapfile --
+ * Map in a shared memory file.
+ */
+int
+__os_mapfile(dbenv, path, fhp, len, is_rdonly, addr)
+ DB_ENV *dbenv;
+ char *path;
+ DB_FH *fhp;
+ int is_rdonly;
+ size_t len;
+ void **addr;
+{
+ /* If the user replaced the map call, call through their interface. */
+ if (DB_GLOBAL(j_map) != NULL)
+ return (DB_GLOBAL(j_map)(path, len, 0, is_rdonly, addr));
+
+ return (__os_map(dbenv, path, NULL, fhp, len, 0, 0, is_rdonly, addr));
+}
+
+/*
+ * __os_unmapfile --
+ * Unmap the shared memory file.
+ */
+int
+__os_unmapfile(dbenv, addr, len)
+ DB_ENV *dbenv;
+ void *addr;
+ size_t len;
+{
+ /* If the user replaced the map call, call through their interface. */
+ if (DB_GLOBAL(j_unmap) != NULL)
+ return (DB_GLOBAL(j_unmap)(addr, len));
+
+ return (!UnmapViewOfFile(addr) ? __os_win32_errno() : 0);
+}
+
+/*
+ * __os_unique_name --
+ * Create a unique identifying name from a pathname (may be absolute or
+ * relative) and/or a file descriptor.
+ *
+ * The name returned must be unique (different files map to different
+ * names), and repeatable (same files, map to same names). It's not
+ * so easy to do by name. Should handle not only:
+ *
+ * foo.bar == ./foo.bar == c:/whatever_path/foo.bar
+ *
+ * but also understand that:
+ *
+ * foo.bar == Foo.Bar (FAT file system)
+ * foo.bar != Foo.Bar (NTFS)
+ *
+ * The best solution is to use the file index, found in the file
+ * information structure (similar to UNIX inode #).
+ *
+ * When a file is deleted, its file index may be reused,
+ * but if the unique name has not gone from its namespace,
+ * we may get a conflict. So to ensure some tie in to the
+ * original pathname, we also use the creation time and the
+ * file basename. This is not a perfect system, but it
+ * should work for all but anamolous test cases.
+ *
+ */
+static int
+__os_unique_name(orig_path, hfile, result_path, result_path_len)
+ char *orig_path, *result_path;
+ HANDLE hfile;
+ size_t result_path_len;
+{
+ BY_HANDLE_FILE_INFORMATION fileinfo;
+ char *basename, *p;
+
+ /*
+ * In Windows, pathname components are delimited by '/' or '\', and
+ * if neither is present, we need to strip off leading drive letter
+ * (e.g. c:foo.txt).
+ */
+ basename = strrchr(orig_path, '/');
+ p = strrchr(orig_path, '\\');
+ if (basename == NULL || (p != NULL && p > basename))
+ basename = p;
+ if (basename == NULL)
+ basename = strrchr(orig_path, ':');
+
+ if (basename == NULL)
+ basename = orig_path;
+ else
+ basename++;
+
+ if (!GetFileInformationByHandle(hfile, &fileinfo))
+ return (__os_win32_errno());
+
+ (void)snprintf(result_path, result_path_len,
+ "__db_shmem.%8.8lx.%8.8lx.%8.8lx.%8.8lx.%8.8lx.%s",
+ fileinfo.dwVolumeSerialNumber,
+ fileinfo.nFileIndexHigh,
+ fileinfo.nFileIndexLow,
+ fileinfo.ftCreationTime.dwHighDateTime,
+ fileinfo.ftCreationTime.dwHighDateTime,
+ basename);
+
+ return (0);
+}
+
+/*
+ * __os_map --
+ * The mmap(2) function for Windows.
+ */
+static int
+__os_map(dbenv, path, infop, fhp, len, is_region, is_system, is_rdonly, addr)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ char *path;
+ DB_FH *fhp;
+ int is_region, is_system, is_rdonly;
+ size_t len;
+ void **addr;
+{
+ HANDLE hMemory;
+ REGENV *renv;
+ int ret, use_pagefile;
+ char shmem_name[MAXPATHLEN];
+ void *pMemory;
+
+ ret = 0;
+ if (infop != NULL)
+ infop->wnt_handle = NULL;
+
+ use_pagefile = is_region && is_system;
+
+ /*
+ * If creating a region in system space, get a matching name in the
+ * paging file namespace.
+ */
+ if (use_pagefile && (ret = __os_unique_name(
+ path, fhp->handle, shmem_name, sizeof(shmem_name))) != 0)
+ return (ret);
+
+ /*
+ * XXX
+ * DB: We have not implemented copy-on-write here.
+ *
+ * XXX
+ * DB: This code will fail if the library is ever compiled on a 64-bit
+ * machine.
+ *
+ * XXX
+ * If this is an region in system memory, let's try opening using the
+ * OpenFileMapping() first. Why, oh why are we doing this?
+ *
+ * Well, we might be asking the OS for a handle to a pre-existing
+ * memory section, or we might be the first to get here and want the
+ * section created. CreateFileMapping() sounds like it will do both
+ * jobs. But, not so. It seems to mess up making the commit charge to
+ * the process. It thinks, incorrectly, that when we want to join a
+ * previously existing section, that it should make a commit charge
+ * for the whole section. In fact, there is no new committed memory
+ * whatever. The call can fail if there is insufficient memory free
+ * to handle the erroneous commit charge. So, we find that the bogus
+ * commit is not made if we call OpenFileMapping(). So we do that
+ * first, and only call CreateFileMapping() if we're really creating
+ * the section.
+ */
+ hMemory = NULL;
+ if (use_pagefile)
+ hMemory = OpenFileMapping(
+ is_rdonly ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS,
+ 0,
+ shmem_name);
+
+ if (hMemory == NULL)
+ hMemory = CreateFileMapping(
+ use_pagefile ? (HANDLE)-1 : fhp->handle,
+ 0,
+ is_rdonly ? PAGE_READONLY : PAGE_READWRITE,
+ 0, (DWORD)len,
+ use_pagefile ? shmem_name : NULL);
+ if (hMemory == NULL) {
+ ret = __os_win32_errno();
+ __db_err(dbenv, "OpenFileMapping: %s", strerror(ret));
+ return (ret);
+ }
+
+ pMemory = MapViewOfFile(hMemory,
+ (is_rdonly ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS), 0, 0, len);
+ if (pMemory == NULL) {
+ ret = __os_win32_errno();
+ __db_err(dbenv, "MapViewOfFile: %s", strerror(ret));
+ return (ret);
+ }
+
+ /*
+ * XXX
+ * It turns out that the kernel object underlying the named section
+ * is reference counted, but that the call to MapViewOfFile() above
+ * does NOT increment the reference count! So, if we close the handle
+ * here, the kernel deletes the object from the kernel namespace.
+ * When a second process comes along to join the region, the kernel
+ * happily creates a new object with the same name, but completely
+ * different identity. The two processes then have distinct isolated
+ * mapped sections, not at all what was wanted. Not closing the handle
+ * here fixes this problem. We carry the handle around in the region
+ * structure so we can close it when unmap is called. Ignore malloc
+ * errors, it just means we leak the memory.
+ */
+ if (use_pagefile && infop != NULL) {
+ if (__os_malloc(dbenv,
+ sizeof(HANDLE), &infop->wnt_handle) == 0)
+ memcpy(infop->wnt_handle, &hMemory, sizeof(HANDLE));
+ } else
+ CloseHandle(hMemory);
+
+ if (is_region) {
+ /*
+ * XXX
+ * Windows/95 zeroes anonymous memory regions at last close.
+ * This means that the backing file can exist and reference
+ * the region, but the region itself is no longer initialized.
+ * If the caller is capable of creating the region, update
+ * the REGINFO structure so that they do so.
+ */
+ renv = (REGENV *)pMemory;
+ if (renv->magic == 0) {
+ if (F_ISSET(infop, REGION_CREATE_OK))
+ F_SET(infop, REGION_CREATE);
+ else {
+ (void)UnmapViewOfFile(pMemory);
+ pMemory = NULL;
+ ret = EAGAIN;
+ }
+ }
+ }
+
+ *addr = pMemory;
+ return (ret);
+}
diff --git a/libdb/os_win32/os_open.c b/libdb/os_win32/os_open.c
new file mode 100644
index 0000000..ede9742
--- /dev/null
+++ b/libdb/os_win32/os_open.c
@@ -0,0 +1,221 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_open --
+ * Open a file descriptor.
+ */
+int
+__os_open(dbenv, name, flags, mode, fhp)
+ DB_ENV *dbenv;
+ const char *name;
+ u_int32_t flags;
+ int mode;
+ DB_FH *fhp;
+{
+ DWORD bytesWritten;
+ u_int32_t log_size, pagesize, sectorsize;
+ int access, attr, oflags, share, createflag;
+ int ret, nrepeat;
+ char *drive, dbuf[4]; /* <letter><colon><slosh><nul> */
+
+#ifdef DIAGNOSTIC
+#define OKFLAGS \
+ (DB_OSO_CREATE | DB_OSO_DIRECT | DB_OSO_EXCL | DB_OSO_LOG | \
+ DB_OSO_RDONLY | DB_OSO_REGION | DB_OSO_SEQ | DB_OSO_TEMP | \
+ DB_OSO_TRUNC)
+ if ((ret = __db_fchk(dbenv, "__os_open", flags, OKFLAGS)) != 0)
+ return (ret);
+#endif
+
+ /*
+ * The "public" interface to the __os_open routine passes around POSIX
+ * 1003.1 flags, not DB flags. If the user has defined their own open
+ * interface, use the POSIX flags.
+ */
+ if (DB_GLOBAL(j_open) != NULL) {
+ oflags = O_BINARY | O_NOINHERIT;
+
+ if (LF_ISSET(DB_OSO_CREATE))
+ oflags |= O_CREAT;
+
+ if (LF_ISSET(DB_OSO_EXCL))
+ oflags |= O_EXCL;
+
+ if (LF_ISSET(DB_OSO_RDONLY))
+ oflags |= O_RDONLY;
+ else
+ oflags |= O_RDWR;
+
+ if (LF_ISSET(DB_OSO_SEQ))
+ oflags |= _O_SEQUENTIAL;
+ else
+ oflags |= _O_RANDOM;
+
+ if (LF_ISSET(DB_OSO_TEMP))
+ oflags |= _O_TEMPORARY;
+
+ if (LF_ISSET(DB_OSO_TRUNC))
+ oflags |= O_TRUNC;
+
+ ret = __os_openhandle(dbenv, name, oflags, mode, fhp);
+ if (ret == 0)
+ fhp->handle = _get_osfhandle (fhp->fd);
+
+ return ret;
+ }
+
+ ret = 0;
+
+ if (LF_ISSET(DB_OSO_LOG))
+ log_size = fhp->log_size; /* XXX: Gag. */
+
+ pagesize = fhp->pagesize;
+
+ memset(fhp, 0, sizeof(*fhp));
+ fhp->fd = -1;
+
+ /*
+ * Otherwise, use the Windows/32 CreateFile interface so that we can
+ * play magic games with log files to get data flush effects similar
+ * to the POSIX O_DSYNC flag.
+ *
+ * !!!
+ * We currently ignore the 'mode' argument. It would be possible
+ * to construct a set of security attributes that we could pass to
+ * CreateFile that would accurately represents the mode. In worst
+ * case, this would require looking up user and all group names and
+ * creating an entry for each. Alternatively, we could call the
+ * _chmod (partial emulation) function after file creation, although
+ * this leaves us with an obvious race. However, these efforts are
+ * largely meaningless on FAT, the most common file system, which
+ * only has a "readable" and "writeable" flag, applying to all users.
+ */
+ access = GENERIC_READ;
+ if (!LF_ISSET(DB_OSO_RDONLY))
+ access |= GENERIC_WRITE;
+
+ share = FILE_SHARE_READ | FILE_SHARE_WRITE;
+ attr = FILE_ATTRIBUTE_NORMAL;
+
+ /*
+ * Reproduce POSIX 1003.1 semantics: if O_CREATE and O_EXCL are both
+ * specified, fail, returning EEXIST, unless we create the file.
+ */
+ if (LF_ISSET(DB_OSO_CREATE) && LF_ISSET(DB_OSO_EXCL))
+ createflag = CREATE_NEW; /* create only if !exist*/
+ else if (!LF_ISSET(DB_OSO_CREATE) && LF_ISSET(DB_OSO_TRUNC))
+ createflag = TRUNCATE_EXISTING; /* truncate, fail if !exist */
+ else if (LF_ISSET(DB_OSO_TRUNC))
+ createflag = CREATE_ALWAYS; /* create and truncate */
+ else if (LF_ISSET(DB_OSO_CREATE))
+ createflag = OPEN_ALWAYS; /* open or create */
+ else
+ createflag = OPEN_EXISTING; /* open only if existing */
+
+ if (LF_ISSET(DB_OSO_LOG)) {
+ F_SET(fhp, DB_FH_NOSYNC);
+ attr |= FILE_FLAG_WRITE_THROUGH;
+ }
+
+ if (LF_ISSET(DB_OSO_SEQ))
+ attr |= FILE_FLAG_SEQUENTIAL_SCAN;
+ else
+ attr |= FILE_FLAG_RANDOM_ACCESS;
+
+ if (LF_ISSET(DB_OSO_TEMP))
+ attr |= FILE_FLAG_DELETE_ON_CLOSE;
+
+ /*
+ * We can turn filesystem buffering off if the page size is a
+ * multiple of the disk's sector size. To find the sector size,
+ * we call GetDiskFreeSpace, which expects a drive name like "d:\\"
+ * or NULL for the current disk (i.e., a relative path)
+ */
+ if (LF_ISSET(DB_OSO_DIRECT) && pagesize != 0 && name[0] != '\0') {
+ if (name[1] == ':') {
+ drive = dbuf;
+ snprintf(dbuf, sizeof(dbuf), "%c:\\", name[0]);
+ } else
+ drive = NULL;
+
+ if (GetDiskFreeSpace(drive, NULL, &sectorsize, NULL, NULL) &&
+ pagesize % sectorsize == 0)
+ attr |= FILE_FLAG_NO_BUFFERING;
+ }
+
+ for (nrepeat = 1;; ++nrepeat) {
+ fhp->handle =
+ CreateFile(name, access, share, NULL, createflag, attr, 0);
+ if (fhp->handle == INVALID_HANDLE_VALUE) {
+ /*
+ * If it's a "temporary" error, we retry up to 3 times,
+ * waiting up to 12 seconds. While it's not a problem
+ * if we can't open a database, an inability to open a
+ * log file is cause for serious dismay.
+ */
+ ret = __os_win32_errno();
+ if ((ret != ENFILE && ret != EMFILE && ret != ENOSPC) ||
+ nrepeat > 3)
+ goto err;
+
+ (void)__os_sleep(dbenv, nrepeat * 2, 0);
+ } else
+ break;
+ }
+
+ /*
+ * Special handling needed for log files. To get Windows to not update
+ * the MFT metadata on each write, extend the file to its maximum size.
+ * Windows will allocate all the data blocks and store them in the MFT
+ * (inode) area. In addition, flush the MFT area to disk.
+ * This strategy only works for Win/NT; Win/9X does not
+ * guarantee that the logs will be zero filled.
+ */
+ if (LF_ISSET(DB_OSO_LOG) && log_size != 0 && __os_is_winnt()) {
+ if (SetFilePointer(fhp->handle,
+ log_size - 1, NULL, FILE_BEGIN) == (DWORD)-1)
+ goto err;
+ if (WriteFile(fhp->handle, "\x00", 1, &bytesWritten, NULL) == 0)
+ goto err;
+ if (bytesWritten != 1)
+ goto err;
+ if (SetEndOfFile(fhp->handle) == 0)
+ goto err;
+ if (SetFilePointer(
+ fhp->handle, 0, NULL, FILE_BEGIN) == (DWORD)-1)
+ goto err;
+ if (FlushFileBuffers(fhp->handle) == 0)
+ goto err;
+ }
+
+ F_SET(fhp, DB_FH_VALID);
+ return (0);
+
+err: if (ret == 0)
+ ret = __os_win32_errno();
+ if (fhp->handle != INVALID_HANDLE_VALUE)
+ (void)CloseHandle(fhp->handle);
+ return (ret);
+}
diff --git a/libdb/os_win32/os_rename.c b/libdb/os_win32/os_rename.c
new file mode 100644
index 0000000..e0cb1f5
--- /dev/null
+++ b/libdb/os_win32/os_rename.c
@@ -0,0 +1,62 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_rename --
+ * Rename a file.
+ */
+int
+__os_rename(dbenv, oldname, newname, flags)
+ DB_ENV *dbenv;
+ const char *oldname, *newname;
+ u_int32_t flags;
+{
+ int ret;
+
+ ret = 0;
+ if (DB_GLOBAL(j_rename) != NULL) {
+ if (DB_GLOBAL(j_rename)(oldname, newname) == -1)
+ ret = __os_get_errno();
+ goto done;
+ }
+
+ if (!MoveFile(oldname, newname))
+ ret = __os_win32_errno();
+
+ if (ret == EEXIST) {
+ ret = 0;
+ if (__os_is_winnt()) {
+ if (!MoveFileEx(
+ oldname, newname, MOVEFILE_REPLACE_EXISTING))
+ ret = __os_win32_errno();
+ } else {
+ /*
+ * There is no MoveFileEx for Win9x/Me, so we have to
+ * do the best we can. Note that MoveFile returns 1
+ * if the names refer to the same file, so we don't
+ * need to check that here.
+ */
+ (void)DeleteFile(newname);
+ if (!MoveFile(oldname, newname))
+ ret = __os_win32_errno();
+ }
+ }
+
+done: if (ret != 0 && flags == 0)
+ __db_err(dbenv,
+ "Rename %s %s: %s", oldname, newname, strerror(ret));
+
+ return (ret);
+}
diff --git a/libdb/os_win32/os_rw.c b/libdb/os_win32/os_rw.c
new file mode 100644
index 0000000..f562931
--- /dev/null
+++ b/libdb/os_win32/os_rw.c
@@ -0,0 +1,300 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+#ifdef HAVE_FILESYSTEM_NOTZERO
+static int __os_zerofill __P((DB_ENV *, DB_FH *));
+#endif
+static int __os_physwrite __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+
+/*
+ * __os_io --
+ * Do an I/O.
+ *
+ * PUBLIC: int __os_io __P((DB_ENV *, DB_IO *, int, size_t *));
+ */
+int
+__os_io(dbenv, db_iop, op, niop)
+ DB_ENV *dbenv;
+ DB_IO *db_iop;
+ int op;
+ size_t *niop;
+{
+ int ret;
+
+ if (__os_is_winnt()) {
+ ULONG64 off = (ULONG64)db_iop->pagesize * db_iop->pgno;
+ OVERLAPPED over;
+ DWORD nbytes;
+ over.Offset = (DWORD)(off & 0xffffffff);
+ over.OffsetHigh = (DWORD)(off >> 32);
+ over.hEvent = 0; /* we don't want asynchronous notifications */
+
+ switch (op) {
+ case DB_IO_READ:
+ if (DB_GLOBAL(j_read) != NULL)
+ goto slow;
+ if (!ReadFile(db_iop->fhp->handle,
+ db_iop->buf, (DWORD)db_iop->bytes, &nbytes, &over))
+ goto slow;
+ break;
+ case DB_IO_WRITE:
+ if (DB_GLOBAL(j_write) != NULL)
+ goto slow;
+#ifdef HAVE_FILESYSTEM_NOTZERO
+ if (__os_fs_notzero())
+ goto slow;
+#endif
+ if (!WriteFile(db_iop->fhp->handle,
+ db_iop->buf, (DWORD)db_iop->bytes, &nbytes, &over))
+ goto slow;
+ break;
+ }
+ if (nbytes == db_iop->bytes) {
+ *niop = (size_t)nbytes;
+ return (0);
+ }
+ }
+
+slow: MUTEX_THREAD_LOCK(dbenv, db_iop->mutexp);
+
+ if ((ret = __os_seek(dbenv, db_iop->fhp,
+ db_iop->pagesize, db_iop->pgno, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+
+ switch (op) {
+ case DB_IO_READ:
+ ret = __os_read(dbenv,
+ db_iop->fhp, db_iop->buf, db_iop->bytes, niop);
+ break;
+ case DB_IO_WRITE:
+ ret = __os_write(dbenv,
+ db_iop->fhp, db_iop->buf, db_iop->bytes, niop);
+ break;
+ }
+
+err: MUTEX_THREAD_UNLOCK(dbenv, db_iop->mutexp);
+
+ return (ret);
+}
+
+/*
+ * __os_read --
+ * Read from a file handle.
+ *
+ * PUBLIC: int __os_read __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+ */
+int
+__os_read(dbenv, fhp, addr, len, nrp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ void *addr;
+ size_t len;
+ size_t *nrp;
+{
+ size_t offset;
+ DWORD nr;
+ int ret;
+ BOOL success;
+ u_int8_t *taddr;
+
+ for (taddr = addr,
+ offset = 0; offset < len; taddr += nr, offset += nr) {
+retry: if (DB_GLOBAL(j_read) != NULL) {
+ nr = (DWORD)DB_GLOBAL(j_read)(fhp->fd,
+ taddr, len - offset);
+ success = (nr >= 0);
+ } else {
+ success = ReadFile(fhp->handle,
+ taddr, (DWORD)(len - offset), &nr, NULL);
+ if (!success)
+ __os_set_errno(__os_win32_errno());
+ }
+
+ if (!success) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "read: 0x%lx, %lu: %s",
+ P_TO_ULONG(taddr),
+ (u_long)len - offset, strerror(ret));
+ return (ret);
+ }
+ if (nr == 0)
+ break;
+ }
+ *nrp = taddr - (u_int8_t *)addr;
+ return (0);
+}
+
+/*
+ * __os_write --
+ * Write to a file handle.
+ *
+ * PUBLIC: int __os_write __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+ */
+int
+__os_write(dbenv, fhp, addr, len, nwp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ void *addr;
+ size_t len;
+ size_t *nwp;
+{
+ int ret;
+
+#ifdef HAVE_FILESYSTEM_NOTZERO
+ /* Zero-fill as necessary. */
+ if (__os_fs_notzero() && (ret = __os_zerofill(dbenv, fhp)) != 0)
+ return (ret);
+#endif
+ return (__os_physwrite(dbenv, fhp, addr, len, nwp));
+}
+
+/*
+ * __os_physwrite --
+ * Physical write to a file handle.
+ */
+static int
+__os_physwrite(dbenv, fhp, addr, len, nwp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ void *addr;
+ size_t len;
+ size_t *nwp;
+{
+ size_t offset;
+ DWORD nw;
+ int ret;
+ BOOL success;
+ u_int8_t *taddr;
+
+ for (taddr = addr,
+ offset = 0; offset < len; taddr += nw, offset += nw) {
+retry: if (DB_GLOBAL(j_write) != NULL) {
+ nw = (DWORD)DB_GLOBAL(j_write)(fhp->fd,
+ taddr, len - offset);
+ success = (nw >= 0);
+ } else {
+ success = WriteFile(fhp->handle,
+ taddr, (DWORD)(len - offset), &nw, NULL);
+ if (!success)
+ __os_set_errno(__os_win32_errno());
+ }
+
+ if (!success) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "write: 0x%x, %lu: %s", taddr,
+ (u_long)len-offset, strerror(ret));
+ return (ret);
+ }
+ }
+
+ *nwp = len;
+ return (0);
+}
+
+#ifdef HAVE_FILESYSTEM_NOTZERO
+/*
+ * __os_zerofill --
+ * Zero out bytes in the file.
+ *
+ * Pages allocated by writing pages past end-of-file are not zeroed,
+ * on some systems. Recovery could theoretically be fooled by a page
+ * showing up that contained garbage. In order to avoid this, we
+ * have to write the pages out to disk, and flush them. The reason
+ * for the flush is because if we don't sync, the allocation of another
+ * page subsequent to this one might reach the disk first, and if we
+ * crashed at the right moment, leave us with this page as the one
+ * allocated by writing a page past it in the file.
+ */
+static int
+__os_zerofill(dbenv, fhp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+{
+ unsigned __int64 stat_offset, write_offset;
+ size_t blen, nw;
+ u_int32_t bytes, mbytes;
+ int group_sync, need_free, ret;
+ u_int8_t buf[8 * 1024], *bp;
+
+ /* Calculate the byte offset of the next write. */
+ write_offset = (unsigned __int64)fhp->pgno * fhp->pgsize + fhp->offset;
+
+ /* Stat the file. */
+ if ((ret = __os_ioinfo(dbenv, NULL, fhp, &mbytes, &bytes, NULL)) != 0)
+ return (ret);
+ stat_offset = (unsigned __int64)mbytes * MEGABYTE + bytes;
+
+ /* Check if the file is large enough. */
+ if (stat_offset >= write_offset)
+ return (0);
+
+ /* Get a large buffer if we're writing lots of data. */
+#undef ZF_LARGE_WRITE
+#define ZF_LARGE_WRITE (64 * 1024)
+ if (write_offset - stat_offset > ZF_LARGE_WRITE) {
+ if ((ret = __os_calloc(dbenv, 1, ZF_LARGE_WRITE, &bp)) != 0)
+ return (ret);
+ blen = ZF_LARGE_WRITE;
+ need_free = 1;
+ } else {
+ bp = buf;
+ blen = sizeof(buf);
+ need_free = 0;
+ memset(buf, 0, sizeof(buf));
+ }
+
+ /* Seek to the current end of the file. */
+ if ((ret = __os_seek(
+ dbenv, fhp, MEGABYTE, mbytes, bytes, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+
+ /*
+ * Hash is the only access method that allocates groups of pages. Hash
+ * uses the existence of the last page in a group to signify the entire
+ * group is OK; so, write all the pages but the last one in the group,
+ * flush them to disk, then write the last one to disk and flush it.
+ */
+ for (group_sync = 0; stat_offset < write_offset; group_sync = 1) {
+ if (write_offset - stat_offset <= blen) {
+ blen = (size_t)(write_offset - stat_offset);
+ if (group_sync && (ret = __os_fsync(dbenv, fhp)) != 0)
+ goto err;
+ }
+ if ((ret = __os_physwrite(dbenv, fhp, bp, blen, &nw)) != 0)
+ goto err;
+ stat_offset += blen;
+ }
+ if ((ret = __os_fsync(dbenv, fhp)) != 0)
+ goto err;
+
+ /* Seek back to where we started. */
+ mbytes = (u_int32_t)(write_offset / MEGABYTE);
+ bytes = (u_int32_t)(write_offset % MEGABYTE);
+ ret = __os_seek(dbenv, fhp, MEGABYTE, mbytes, bytes, 0, DB_OS_SEEK_SET);
+
+err: if (need_free)
+ __os_free(dbenv, bp);
+ return (ret);
+}
+#endif
diff --git a/libdb/os_win32/os_seek.c b/libdb/os_win32/os_seek.c
new file mode 100644
index 0000000..b26dec8
--- /dev/null
+++ b/libdb/os_win32/os_seek.c
@@ -0,0 +1,93 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_seek --
+ * Seek to a page/byte offset in the file.
+ */
+int
+__os_seek(dbenv, fhp, pgsize, pageno, relative, isrewind, db_whence)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ size_t pgsize;
+ db_pgno_t pageno;
+ u_int32_t relative;
+ int isrewind;
+ DB_OS_SEEK db_whence;
+{
+ /* Yes, this really is how Microsoft have designed their API */
+ union {
+ __int64 bigint;
+ struct {
+ unsigned long low;
+ long high;
+ };
+ } offset;
+ int ret, whence;
+ DWORD from;
+
+ if (DB_GLOBAL(j_seek) != NULL) {
+ switch (db_whence) {
+ case DB_OS_SEEK_CUR:
+ whence = SEEK_CUR;
+ break;
+ case DB_OS_SEEK_END:
+ whence = SEEK_END;
+ break;
+ case DB_OS_SEEK_SET:
+ whence = SEEK_SET;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ ret = DB_GLOBAL(j_seek)(fhp->fd, pgsize, pageno,
+ relative, isrewind, whence);
+ } else {
+ switch (db_whence) {
+ case DB_OS_SEEK_CUR:
+ from = FILE_CURRENT;
+ break;
+ case DB_OS_SEEK_END:
+ from = FILE_END;
+ break;
+ case DB_OS_SEEK_SET:
+ from = FILE_BEGIN;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ offset.bigint = (__int64)pgsize * pageno + relative;
+ if (isrewind)
+ offset.bigint = -offset.bigint;
+
+ ret = (SetFilePointer(fhp->handle,
+ offset.low, &offset.high, from) == (DWORD) - 1) ?
+ __os_win32_errno() : 0;
+ }
+
+ if (ret == 0) {
+ fhp->pgsize = pgsize;
+ fhp->pgno = pageno;
+ fhp->offset = relative;
+ } else {
+ __db_err(dbenv, "seek: %lu %d %d: %s",
+ (u_long)pgsize * pageno + relative,
+ isrewind, db_whence, strerror(ret));
+ }
+
+ return (ret);
+}
diff --git a/libdb/os_win32/os_sleep.c b/libdb/os_win32/os_sleep.c
new file mode 100644
index 0000000..18c5a6a
--- /dev/null
+++ b/libdb/os_win32/os_sleep.c
@@ -0,0 +1,40 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_sleep --
+ * Yield the processor for a period of time.
+ */
+int
+__os_sleep(dbenv, secs, usecs)
+ DB_ENV *dbenv;
+ u_long secs, usecs; /* Seconds and microseconds. */
+{
+ COMPQUIET(dbenv, NULL);
+
+ /* Don't require that the values be normalized. */
+ for (; usecs >= 1000000; ++secs, usecs -= 1000000)
+ ;
+
+ if (DB_GLOBAL(j_sleep) != NULL)
+ return (DB_GLOBAL(j_sleep)(secs, usecs));
+
+ /*
+ * It's important that we yield the processor here so that other
+ * processes or threads are permitted to run.
+ */
+ Sleep(secs * 1000 + usecs / 1000);
+ return (0);
+}
diff --git a/libdb/os_win32/os_spin.c b/libdb/os_win32/os_spin.c
new file mode 100644
index 0000000..4387947
--- /dev/null
+++ b/libdb/os_win32/os_spin.c
@@ -0,0 +1,59 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_spin --
+ * Return the number of default spins before blocking.
+ */
+int
+__os_spin(dbenv)
+ DB_ENV *dbenv;
+{
+ SYSTEM_INFO SystemInfo;
+
+ /*
+ * If the application specified a value or we've already figured it
+ * out, return it.
+ */
+ if (dbenv->tas_spins != 0)
+ return (dbenv->tas_spins);
+
+ /* Get the number of processors */
+ GetSystemInfo(&SystemInfo);
+
+ /*
+ * Spin 50 times per processor -- we have anecdotal evidence that this
+ * is a reasonable value.
+ */
+ if (SystemInfo.dwNumberOfProcessors > 1)
+ dbenv->tas_spins = 50 * SystemInfo.dwNumberOfProcessors;
+ else
+ dbenv->tas_spins = 1;
+ return (dbenv->tas_spins);
+}
+
+/*
+ * __os_yield --
+ * Yield the processor.
+ */
+void
+__os_yield(dbenv, usecs)
+ DB_ENV *dbenv;
+ u_long usecs;
+{
+ if (DB_GLOBAL(j_yield) != NULL && DB_GLOBAL(j_yield)() == 0)
+ return;
+ __os_sleep(dbenv, 0, usecs);
+}
diff --git a/libdb/os_win32/os_stat.c b/libdb/os_win32/os_stat.c
new file mode 100644
index 0000000..1aea275
--- /dev/null
+++ b/libdb/os_win32/os_stat.c
@@ -0,0 +1,100 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_exists --
+ * Return if the file exists.
+ *
+ * PUBLIC: int __os_exists __P((const char *, int *));
+ */
+int
+__os_exists(path, isdirp)
+ const char *path;
+ int *isdirp;
+{
+ int ret;
+ DWORD attrs;
+
+ if (DB_GLOBAL(j_exists) != NULL)
+ return (DB_GLOBAL(j_exists)(path, isdirp));
+
+ ret = 0;
+ do {
+ attrs = GetFileAttributes(path);
+ if (attrs == (DWORD)-1)
+ ret = __os_win32_errno();
+ } while (ret == EINTR);
+
+ if (ret != 0)
+ return (ret);
+
+ if (isdirp != NULL)
+ *isdirp = (attrs & FILE_ATTRIBUTE_DIRECTORY);
+
+ return (0);
+}
+
+/*
+ * __os_ioinfo --
+ * Return file size and I/O size; abstracted to make it easier
+ * to replace.
+ *
+ * PUBLIC: int __os_ioinfo __P((DB_ENV *, const char *,
+ * PUBLIC: DB_FH *, u_int32_t *, u_int32_t *, u_int32_t *));
+ */
+int
+__os_ioinfo(dbenv, path, fhp, mbytesp, bytesp, iosizep)
+ DB_ENV *dbenv;
+ const char *path;
+ DB_FH *fhp;
+ u_int32_t *mbytesp, *bytesp, *iosizep;
+{
+ int ret;
+ BY_HANDLE_FILE_INFORMATION bhfi;
+ unsigned __int64 filesize;
+
+ if (DB_GLOBAL(j_ioinfo) != NULL)
+ return (DB_GLOBAL(j_ioinfo)(path,
+ fhp->fd, mbytesp, bytesp, iosizep));
+
+retry: if (!GetFileInformationByHandle(fhp->handle, &bhfi)) {
+ if ((ret = __os_win32_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv,
+ "GetFileInformationByHandle: %s", strerror(ret));
+ return (ret);
+ }
+
+ filesize = ((unsigned __int64)bhfi.nFileSizeHigh << 32) +
+ bhfi.nFileSizeLow;
+
+ /* Return the size of the file. */
+ if (mbytesp != NULL)
+ *mbytesp = (u_int32_t)(filesize / MEGABYTE);
+ if (bytesp != NULL)
+ *bytesp = (u_int32_t)(filesize % MEGABYTE);
+
+ /* The filesystem blocksize is not easily available. */
+ if (iosizep != NULL)
+ *iosizep = DB_DEF_IOSIZE;
+ return (0);
+}
diff --git a/libdb/os_win32/os_type.c b/libdb/os_win32/os_type.c
new file mode 100644
index 0000000..da51a5c
--- /dev/null
+++ b/libdb/os_win32/os_type.c
@@ -0,0 +1,37 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+#include "db_int_def.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+/*
+ * __os_is_winnt --
+ * Return 1 if Windows/NT, otherwise 0.
+ *
+ * PUBLIC: int __os_is_winnt __P((void));
+ */
+int
+__os_is_winnt()
+{
+ static int __os_type = -1;
+
+ /*
+ * The value of __os_type is computed only once, and cached to
+ * avoid the overhead of repeated calls to GetVersion().
+ */
+ if (__os_type == -1) {
+ if ((GetVersion() & 0x80000000) == 0)
+ __os_type = 1;
+ else
+ __os_type = 0;
+ }
+ return (__os_type);
+}
diff --git a/libdb/perl/BerkeleyDB/BerkeleyDB.pm b/libdb/perl/BerkeleyDB/BerkeleyDB.pm
new file mode 100644
index 0000000..c56390b
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/BerkeleyDB.pm
@@ -0,0 +1,1506 @@
+
+package BerkeleyDB;
+
+
+# Copyright (c) 1997-2002 Paul Marquess. All rights reserved.
+# This program is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself.
+#
+
+# The documentation for this module is at the bottom of this file,
+# after the line __END__.
+
+BEGIN { require 5.004_04 }
+
+use strict;
+use Carp;
+use vars qw($VERSION @ISA @EXPORT $AUTOLOAD
+ $use_XSLoader);
+
+$VERSION = '0.20';
+
+require Exporter;
+#require DynaLoader;
+require AutoLoader;
+
+BEGIN {
+ $use_XSLoader = 1 ;
+ { local $SIG{__DIE__} ; eval { require XSLoader } ; }
+
+ if ($@) {
+ $use_XSLoader = 0 ;
+ require DynaLoader;
+ @ISA = qw(DynaLoader);
+ }
+}
+
+@ISA = qw(Exporter DynaLoader);
+# Items to export into callers namespace by default. Note: do not export
+# names by default without a very good reason. Use EXPORT_OK instead.
+# Do not simply export all your public functions/methods/constants.
+
+# NOTE -- Do not add to @EXPORT directly. It is written by mkconsts
+@EXPORT = qw(
+ DB_AFTER
+ DB_AGGRESSIVE
+ DB_ALREADY_ABORTED
+ DB_APPEND
+ DB_APPLY_LOGREG
+ DB_APP_INIT
+ DB_ARCH_ABS
+ DB_ARCH_DATA
+ DB_ARCH_LOG
+ DB_AUTO_COMMIT
+ DB_BEFORE
+ DB_BROADCAST_EID
+ DB_BTREE
+ DB_BTREEMAGIC
+ DB_BTREEOLDVER
+ DB_BTREEVERSION
+ DB_CACHED_COUNTS
+ DB_CDB_ALLDB
+ DB_CHECKPOINT
+ DB_CHKSUM_SHA1
+ DB_CLIENT
+ DB_CL_WRITER
+ DB_COMMIT
+ DB_CONSUME
+ DB_CONSUME_WAIT
+ DB_CREATE
+ DB_CURLSN
+ DB_CURRENT
+ DB_CXX_NO_EXCEPTIONS
+ DB_DELETED
+ DB_DELIMITER
+ DB_DIRECT
+ DB_DIRECT_DB
+ DB_DIRECT_LOG
+ DB_DIRTY_READ
+ DB_DONOTINDEX
+ DB_DUP
+ DB_DUPCURSOR
+ DB_DUPSORT
+ DB_EID_BROADCAST
+ DB_EID_INVALID
+ DB_ENCRYPT
+ DB_ENCRYPT_AES
+ DB_ENV_APPINIT
+ DB_ENV_AUTO_COMMIT
+ DB_ENV_CDB
+ DB_ENV_CDB_ALLDB
+ DB_ENV_CREATE
+ DB_ENV_DBLOCAL
+ DB_ENV_DIRECT_DB
+ DB_ENV_DIRECT_LOG
+ DB_ENV_FATAL
+ DB_ENV_LOCKDOWN
+ DB_ENV_LOCKING
+ DB_ENV_LOGGING
+ DB_ENV_NOLOCKING
+ DB_ENV_NOMMAP
+ DB_ENV_NOPANIC
+ DB_ENV_OPEN_CALLED
+ DB_ENV_OVERWRITE
+ DB_ENV_PANIC_OK
+ DB_ENV_PRIVATE
+ DB_ENV_REGION_INIT
+ DB_ENV_REP_CLIENT
+ DB_ENV_REP_LOGSONLY
+ DB_ENV_REP_MASTER
+ DB_ENV_RPCCLIENT
+ DB_ENV_RPCCLIENT_GIVEN
+ DB_ENV_STANDALONE
+ DB_ENV_SYSTEM_MEM
+ DB_ENV_THREAD
+ DB_ENV_TXN
+ DB_ENV_TXN_NOSYNC
+ DB_ENV_TXN_WRITE_NOSYNC
+ DB_ENV_USER_ALLOC
+ DB_ENV_YIELDCPU
+ DB_EXCL
+ DB_EXTENT
+ DB_FAST_STAT
+ DB_FCNTL_LOCKING
+ DB_FILE_ID_LEN
+ DB_FIRST
+ DB_FIXEDLEN
+ DB_FLUSH
+ DB_FORCE
+ DB_GETREC
+ DB_GET_BOTH
+ DB_GET_BOTHC
+ DB_GET_BOTH_RANGE
+ DB_GET_RECNO
+ DB_HANDLE_LOCK
+ DB_HASH
+ DB_HASHMAGIC
+ DB_HASHOLDVER
+ DB_HASHVERSION
+ DB_INCOMPLETE
+ DB_INIT_CDB
+ DB_INIT_LOCK
+ DB_INIT_LOG
+ DB_INIT_MPOOL
+ DB_INIT_TXN
+ DB_INVALID_EID
+ DB_JAVA_CALLBACK
+ DB_JOINENV
+ DB_JOIN_ITEM
+ DB_JOIN_NOSORT
+ DB_KEYEMPTY
+ DB_KEYEXIST
+ DB_KEYFIRST
+ DB_KEYLAST
+ DB_LAST
+ DB_LOCKDOWN
+ DB_LOCKMAGIC
+ DB_LOCKVERSION
+ DB_LOCK_CONFLICT
+ DB_LOCK_DEADLOCK
+ DB_LOCK_DEFAULT
+ DB_LOCK_DUMP
+ DB_LOCK_EXPIRE
+ DB_LOCK_FREE_LOCKER
+ DB_LOCK_GET
+ DB_LOCK_GET_TIMEOUT
+ DB_LOCK_INHERIT
+ DB_LOCK_MAXLOCKS
+ DB_LOCK_MINLOCKS
+ DB_LOCK_MINWRITE
+ DB_LOCK_NORUN
+ DB_LOCK_NOTEXIST
+ DB_LOCK_NOTGRANTED
+ DB_LOCK_NOTHELD
+ DB_LOCK_NOWAIT
+ DB_LOCK_OLDEST
+ DB_LOCK_PUT
+ DB_LOCK_PUT_ALL
+ DB_LOCK_PUT_OBJ
+ DB_LOCK_PUT_READ
+ DB_LOCK_RANDOM
+ DB_LOCK_RECORD
+ DB_LOCK_REMOVE
+ DB_LOCK_RIW_N
+ DB_LOCK_RW_N
+ DB_LOCK_SET_TIMEOUT
+ DB_LOCK_SWITCH
+ DB_LOCK_TIMEOUT
+ DB_LOCK_TRADE
+ DB_LOCK_UPGRADE
+ DB_LOCK_UPGRADE_WRITE
+ DB_LOCK_YOUNGEST
+ DB_LOGC_BUF_SIZE
+ DB_LOGFILEID_INVALID
+ DB_LOGMAGIC
+ DB_LOGOLDVER
+ DB_LOGVERSION
+ DB_LOG_DISK
+ DB_LOG_LOCKED
+ DB_LOG_SILENT_ERR
+ DB_MAX_PAGES
+ DB_MAX_RECORDS
+ DB_MPOOL_CLEAN
+ DB_MPOOL_CREATE
+ DB_MPOOL_DIRTY
+ DB_MPOOL_DISCARD
+ DB_MPOOL_EXTENT
+ DB_MPOOL_LAST
+ DB_MPOOL_NEW
+ DB_MPOOL_NEW_GROUP
+ DB_MPOOL_PRIVATE
+ DB_MULTIPLE
+ DB_MULTIPLE_KEY
+ DB_MUTEXDEBUG
+ DB_MUTEXLOCKS
+ DB_NEEDSPLIT
+ DB_NEXT
+ DB_NEXT_DUP
+ DB_NEXT_NODUP
+ DB_NOCOPY
+ DB_NODUPDATA
+ DB_NOLOCKING
+ DB_NOMMAP
+ DB_NOORDERCHK
+ DB_NOOVERWRITE
+ DB_NOPANIC
+ DB_NORECURSE
+ DB_NOSERVER
+ DB_NOSERVER_HOME
+ DB_NOSERVER_ID
+ DB_NOSYNC
+ DB_NOTFOUND
+ DB_ODDFILESIZE
+ DB_OK_BTREE
+ DB_OK_HASH
+ DB_OK_QUEUE
+ DB_OK_RECNO
+ DB_OLD_VERSION
+ DB_OPEN_CALLED
+ DB_OPFLAGS_MASK
+ DB_ORDERCHKONLY
+ DB_OVERWRITE
+ DB_PAD
+ DB_PAGEYIELD
+ DB_PAGE_LOCK
+ DB_PAGE_NOTFOUND
+ DB_PANIC_ENVIRONMENT
+ DB_PERMANENT
+ DB_POSITION
+ DB_POSITIONI
+ DB_PREV
+ DB_PREV_NODUP
+ DB_PRINTABLE
+ DB_PRIORITY_DEFAULT
+ DB_PRIORITY_HIGH
+ DB_PRIORITY_LOW
+ DB_PRIORITY_VERY_HIGH
+ DB_PRIORITY_VERY_LOW
+ DB_PRIVATE
+ DB_PR_HEADERS
+ DB_PR_PAGE
+ DB_PR_RECOVERYTEST
+ DB_QAMMAGIC
+ DB_QAMOLDVER
+ DB_QAMVERSION
+ DB_QUEUE
+ DB_RDONLY
+ DB_RDWRMASTER
+ DB_RECNO
+ DB_RECNUM
+ DB_RECORDCOUNT
+ DB_RECORD_LOCK
+ DB_RECOVER
+ DB_RECOVER_FATAL
+ DB_REGION_ANON
+ DB_REGION_INIT
+ DB_REGION_MAGIC
+ DB_REGION_NAME
+ DB_REGISTERED
+ DB_RENAMEMAGIC
+ DB_RENUMBER
+ DB_REP_CLIENT
+ DB_REP_DUPMASTER
+ DB_REP_HOLDELECTION
+ DB_REP_LOGSONLY
+ DB_REP_MASTER
+ DB_REP_NEWMASTER
+ DB_REP_NEWSITE
+ DB_REP_OUTDATED
+ DB_REP_PERMANENT
+ DB_REP_UNAVAIL
+ DB_REVSPLITOFF
+ DB_RMW
+ DB_RPC_SERVERPROG
+ DB_RPC_SERVERVERS
+ DB_RUNRECOVERY
+ DB_SALVAGE
+ DB_SECONDARY_BAD
+ DB_SEQUENTIAL
+ DB_SET
+ DB_SET_LOCK_TIMEOUT
+ DB_SET_RANGE
+ DB_SET_RECNO
+ DB_SET_TXN_NOW
+ DB_SET_TXN_TIMEOUT
+ DB_SNAPSHOT
+ DB_STAT_CLEAR
+ DB_SURPRISE_KID
+ DB_SWAPBYTES
+ DB_SYSTEM_MEM
+ DB_TEMPORARY
+ DB_TEST_ELECTINIT
+ DB_TEST_ELECTSEND
+ DB_TEST_ELECTVOTE1
+ DB_TEST_ELECTVOTE2
+ DB_TEST_ELECTWAIT1
+ DB_TEST_ELECTWAIT2
+ DB_TEST_POSTDESTROY
+ DB_TEST_POSTEXTDELETE
+ DB_TEST_POSTEXTOPEN
+ DB_TEST_POSTEXTUNLINK
+ DB_TEST_POSTLOG
+ DB_TEST_POSTLOGMETA
+ DB_TEST_POSTOPEN
+ DB_TEST_POSTRENAME
+ DB_TEST_POSTSYNC
+ DB_TEST_PREDESTROY
+ DB_TEST_PREEXTDELETE
+ DB_TEST_PREEXTOPEN
+ DB_TEST_PREEXTUNLINK
+ DB_TEST_PREOPEN
+ DB_TEST_PRERENAME
+ DB_TEST_SUBDB_LOCKS
+ DB_THREAD
+ DB_TIMEOUT
+ DB_TRUNCATE
+ DB_TXNMAGIC
+ DB_TXNVERSION
+ DB_TXN_ABORT
+ DB_TXN_APPLY
+ DB_TXN_BACKWARD_ALLOC
+ DB_TXN_BACKWARD_ROLL
+ DB_TXN_CKP
+ DB_TXN_FORWARD_ROLL
+ DB_TXN_GETPGNOS
+ DB_TXN_LOCK
+ DB_TXN_LOCK_2PL
+ DB_TXN_LOCK_MASK
+ DB_TXN_LOCK_OPTIMIST
+ DB_TXN_LOCK_OPTIMISTIC
+ DB_TXN_LOG_MASK
+ DB_TXN_LOG_REDO
+ DB_TXN_LOG_UNDO
+ DB_TXN_LOG_UNDOREDO
+ DB_TXN_NOSYNC
+ DB_TXN_NOWAIT
+ DB_TXN_OPENFILES
+ DB_TXN_POPENFILES
+ DB_TXN_PRINT
+ DB_TXN_REDO
+ DB_TXN_SYNC
+ DB_TXN_UNDO
+ DB_TXN_WRITE_NOSYNC
+ DB_UNKNOWN
+ DB_UNRESOLVED_CHILD
+ DB_UPDATE_SECONDARY
+ DB_UPGRADE
+ DB_USE_ENVIRON
+ DB_USE_ENVIRON_ROOT
+ DB_VERB_CHKPOINT
+ DB_VERB_DEADLOCK
+ DB_VERB_RECOVERY
+ DB_VERB_REPLICATION
+ DB_VERB_WAITSFOR
+ DB_VERIFY
+ DB_VERIFY_BAD
+ DB_VERIFY_FATAL
+ DB_VERSION_MAJOR
+ DB_VERSION_MINOR
+ DB_VERSION_PATCH
+ DB_VERSION_STRING
+ DB_VRFY_FLAGMASK
+ DB_WRITECURSOR
+ DB_WRITELOCK
+ DB_WRITEOPEN
+ DB_WRNOSYNC
+ DB_XA_CREATE
+ DB_XIDDATASIZE
+ DB_YIELDCPU
+ );
+
+sub AUTOLOAD {
+ my($constname);
+ ($constname = $AUTOLOAD) =~ s/.*:://;
+ my ($error, $val) = constant($constname);
+ Carp::croak $error if $error;
+ no strict 'refs';
+ *{$AUTOLOAD} = sub { $val };
+ goto &{$AUTOLOAD};
+}
+
+#bootstrap BerkeleyDB $VERSION;
+if ($use_XSLoader)
+ { XSLoader::load("BerkeleyDB", $VERSION)}
+else
+ { bootstrap BerkeleyDB $VERSION }
+
+# Preloaded methods go here.
+
+
+sub ParseParameters($@)
+{
+ my ($default, @rest) = @_ ;
+ my (%got) = %$default ;
+ my (@Bad) ;
+ my ($key, $value) ;
+ my $sub = (caller(1))[3] ;
+ my %options = () ;
+ local ($Carp::CarpLevel) = 1 ;
+
+ # allow the options to be passed as a hash reference or
+ # as the complete hash.
+ if (@rest == 1) {
+
+ croak "$sub: parameter is not a reference to a hash"
+ if ref $rest[0] ne "HASH" ;
+
+ %options = %{ $rest[0] } ;
+ }
+ elsif (@rest >= 2) {
+ %options = @rest ;
+ }
+
+ while (($key, $value) = each %options)
+ {
+ $key =~ s/^-// ;
+
+ if (exists $default->{$key})
+ { $got{$key} = $value }
+ else
+ { push (@Bad, $key) }
+ }
+
+ if (@Bad) {
+ my ($bad) = join(", ", @Bad) ;
+ croak "unknown key value(s) @Bad" ;
+ }
+
+ return \%got ;
+}
+
+use UNIVERSAL qw( isa ) ;
+
+sub env_remove
+{
+ # Usage:
+ #
+ # $env = new BerkeleyDB::Env
+ # [ -Home => $path, ]
+ # [ -Config => { name => value, name => value }
+ # [ -Flags => DB_INIT_LOCK| ]
+ # ;
+
+ my $got = BerkeleyDB::ParseParameters({
+ Home => undef,
+ Flags => 0,
+ Config => undef,
+ }, @_) ;
+
+ if (defined $got->{Config}) {
+ croak("Config parameter must be a hash reference")
+ if ! ref $got->{Config} eq 'HASH' ;
+
+ @BerkeleyDB::a = () ;
+ my $k = "" ; my $v = "" ;
+ while (($k, $v) = each %{$got->{Config}}) {
+ push @BerkeleyDB::a, "$k\t$v" ;
+ }
+
+ $got->{"Config"} = pack("p*", @BerkeleyDB::a, undef)
+ if @BerkeleyDB::a ;
+ }
+
+ return _env_remove($got) ;
+}
+
+sub db_remove
+{
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ Filename => undef,
+ Subname => undef,
+ Flags => 0,
+ Env => undef,
+ }, @_) ;
+
+ croak("Must specify a filename")
+ if ! defined $got->{Filename} ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ return _db_remove($got);
+}
+
+sub db_rename
+{
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ Filename => undef,
+ Subname => undef,
+ Newname => undef,
+ Flags => 0,
+ Env => undef,
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Must specify a filename")
+ if ! defined $got->{Filename} ;
+
+ croak("Must specify a Subname")
+ if ! defined $got->{Subname} ;
+
+ croak("Must specify a Newname")
+ if ! defined $got->{Newname} ;
+
+ return _db_rename($got);
+}
+
+sub db_verify
+{
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ Filename => undef,
+ Subname => undef,
+ Outfile => undef,
+ Flags => 0,
+ Env => undef,
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Must specify a filename")
+ if ! defined $got->{Filename} ;
+
+ return _db_verify($got);
+}
+
+package BerkeleyDB::Env ;
+
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+use vars qw( %valid_config_keys ) ;
+
+sub isaFilehandle
+{
+ my $fh = shift ;
+
+ return ((isa($fh,'GLOB') or isa(\$fh,'GLOB')) and defined fileno($fh) )
+
+}
+
+%valid_config_keys = map { $_, 1 } qw( DB_DATA_DIR DB_LOG_DIR DB_TEMP_DIR
+DB_TMP_DIR ) ;
+
+sub new
+{
+ # Usage:
+ #
+ # $env = new BerkeleyDB::Env
+ # [ -Home => $path, ]
+ # [ -Mode => mode, ]
+ # [ -Config => { name => value, name => value }
+ # [ -ErrFile => filename, ]
+ # [ -ErrPrefix => "string", ]
+ # [ -Flags => DB_INIT_LOCK| ]
+ # [ -Set_Flags => $flags,]
+ # [ -Cachesize => number ]
+ # [ -LockDetect => ]
+ # [ -Verbose => boolean ]
+ # ;
+
+ my $pkg = shift ;
+ my $got = BerkeleyDB::ParseParameters({
+ Home => undef,
+ Server => undef,
+ Mode => 0666,
+ ErrFile => undef,
+ ErrPrefix => undef,
+ Flags => 0,
+ SetFlags => 0,
+ Cachesize => 0,
+ LockDetect => 0,
+ Verbose => 0,
+ Config => undef,
+ }, @_) ;
+
+ if (defined $got->{ErrFile}) {
+ croak("ErrFile parameter must be a file name")
+ if ref $got->{ErrFile} ;
+ #if (!isaFilehandle($got->{ErrFile})) {
+ # my $handle = new IO::File ">$got->{ErrFile}"
+# or croak "Cannot open file $got->{ErrFile}: $!\n" ;
+# $got->{ErrFile} = $handle ;
+# }
+ }
+
+
+ my %config ;
+ if (defined $got->{Config}) {
+ croak("Config parameter must be a hash reference")
+ if ! ref $got->{Config} eq 'HASH' ;
+
+ %config = %{ $got->{Config} } ;
+ @BerkeleyDB::a = () ;
+ my $k = "" ; my $v = "" ;
+ while (($k, $v) = each %config) {
+ if ($BerkeleyDB::db_version >= 3.1 && ! $valid_config_keys{$k} ) {
+ $BerkeleyDB::Error = "illegal name-value pair: $k $v\n" ;
+ croak $BerkeleyDB::Error ;
+ }
+ push @BerkeleyDB::a, "$k\t$v" ;
+ }
+
+ $got->{"Config"} = pack("p*", @BerkeleyDB::a, undef)
+ if @BerkeleyDB::a ;
+ }
+
+ my ($addr) = _db_appinit($pkg, $got) ;
+ my $obj ;
+ $obj = bless [$addr] , $pkg if $addr ;
+ if ($obj && $BerkeleyDB::db_version >= 3.1 && keys %config) {
+ my ($k, $v);
+ while (($k, $v) = each %config) {
+ if ($k eq 'DB_DATA_DIR')
+ { $obj->set_data_dir($v) }
+ elsif ($k eq 'DB_LOG_DIR')
+ { $obj->set_lg_dir($v) }
+ elsif ($k eq 'DB_TEMP_DIR' || $k eq 'DB_TMP_DIR')
+ { $obj->set_tmp_dir($v) }
+ else {
+ $BerkeleyDB::Error = "illegal name-value pair: $k $v\n" ;
+ croak $BerkeleyDB::Error
+ }
+ }
+ }
+ return $obj ;
+}
+
+
+sub TxnMgr
+{
+ my $env = shift ;
+ my ($addr) = $env->_TxnMgr() ;
+ my $obj ;
+ $obj = bless [$addr, $env] , "BerkeleyDB::TxnMgr" if $addr ;
+ return $obj ;
+}
+
+sub txn_begin
+{
+ my $env = shift ;
+ my ($addr) = $env->_txn_begin(@_) ;
+ my $obj ;
+ $obj = bless [$addr, $env] , "BerkeleyDB::Txn" if $addr ;
+ return $obj ;
+}
+
+sub DESTROY
+{
+ my $self = shift ;
+ $self->_DESTROY() ;
+}
+
+package BerkeleyDB::Hash ;
+
+use vars qw(@ISA) ;
+@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedHash ) ;
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+
+sub new
+{
+ my $self = shift ;
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ # Generic Stuff
+ Filename => undef,
+ Subname => undef,
+ #Flags => BerkeleyDB::DB_CREATE(),
+ Flags => 0,
+ Property => 0,
+ Mode => 0666,
+ Cachesize => 0,
+ Lorder => 0,
+ Pagesize => 0,
+ Env => undef,
+ #Tie => undef,
+ Txn => undef,
+
+ # Hash specific
+ Ffactor => 0,
+ Nelem => 0,
+ Hash => undef,
+ DupCompare => undef,
+
+ # BerkeleyDB specific
+ ReadKey => undef,
+ WriteKey => undef,
+ ReadValue => undef,
+ WriteValue => undef,
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Txn not of type BerkeleyDB::Txn")
+ if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+
+ croak("-Tie needs a reference to a hash")
+ if defined $got->{Tie} and $got->{Tie} !~ /HASH/ ;
+
+ my ($addr) = _db_open_hash($self, $got);
+ my $obj ;
+ if ($addr) {
+ $obj = bless [$addr] , $self ;
+ push @{ $obj }, $got->{Env} if $got->{Env} ;
+ $obj->Txn($got->{Txn})
+ if $got->{Txn} ;
+ }
+ return $obj ;
+}
+
+*TIEHASH = \&new ;
+
+
+package BerkeleyDB::Btree ;
+
+use vars qw(@ISA) ;
+@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedHash ) ;
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+
+sub new
+{
+ my $self = shift ;
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ # Generic Stuff
+ Filename => undef,
+ Subname => undef,
+ #Flags => BerkeleyDB::DB_CREATE(),
+ Flags => 0,
+ Property => 0,
+ Mode => 0666,
+ Cachesize => 0,
+ Lorder => 0,
+ Pagesize => 0,
+ Env => undef,
+ #Tie => undef,
+ Txn => undef,
+
+ # Btree specific
+ Minkey => 0,
+ Compare => undef,
+ DupCompare => undef,
+ Prefix => undef,
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Txn not of type BerkeleyDB::Txn")
+ if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+
+ croak("-Tie needs a reference to a hash")
+ if defined $got->{Tie} and $got->{Tie} !~ /HASH/ ;
+
+ my ($addr) = _db_open_btree($self, $got);
+ my $obj ;
+ if ($addr) {
+ $obj = bless [$addr] , $self ;
+ push @{ $obj }, $got->{Env} if $got->{Env} ;
+ $obj->Txn($got->{Txn})
+ if $got->{Txn} ;
+ }
+ return $obj ;
+}
+
+*BerkeleyDB::Btree::TIEHASH = \&BerkeleyDB::Btree::new ;
+
+
+package BerkeleyDB::Recno ;
+
+use vars qw(@ISA) ;
+@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedArray ) ;
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+
+sub new
+{
+ my $self = shift ;
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ # Generic Stuff
+ Filename => undef,
+ Subname => undef,
+ #Flags => BerkeleyDB::DB_CREATE(),
+ Flags => 0,
+ Property => 0,
+ Mode => 0666,
+ Cachesize => 0,
+ Lorder => 0,
+ Pagesize => 0,
+ Env => undef,
+ #Tie => undef,
+ Txn => undef,
+
+ # Recno specific
+ Delim => undef,
+ Len => undef,
+ Pad => undef,
+ Source => undef,
+ ArrayBase => 1, # lowest index in array
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Txn not of type BerkeleyDB::Txn")
+ if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+
+ croak("Tie needs a reference to an array")
+ if defined $got->{Tie} and $got->{Tie} !~ /ARRAY/ ;
+
+ croak("ArrayBase can only be 0 or 1, parsed $got->{ArrayBase}")
+ if $got->{ArrayBase} != 1 and $got->{ArrayBase} != 0 ;
+
+
+ $got->{Fname} = $got->{Filename} if defined $got->{Filename} ;
+
+ my ($addr) = _db_open_recno($self, $got);
+ my $obj ;
+ if ($addr) {
+ $obj = bless [$addr] , $self ;
+ push @{ $obj }, $got->{Env} if $got->{Env} ;
+ $obj->Txn($got->{Txn})
+ if $got->{Txn} ;
+ }
+ return $obj ;
+}
+
+*BerkeleyDB::Recno::TIEARRAY = \&BerkeleyDB::Recno::new ;
+*BerkeleyDB::Recno::db_stat = \&BerkeleyDB::Btree::db_stat ;
+
+package BerkeleyDB::Queue ;
+
+use vars qw(@ISA) ;
+@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedArray ) ;
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+
+sub new
+{
+ my $self = shift ;
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ # Generic Stuff
+ Filename => undef,
+ Subname => undef,
+ #Flags => BerkeleyDB::DB_CREATE(),
+ Flags => 0,
+ Property => 0,
+ Mode => 0666,
+ Cachesize => 0,
+ Lorder => 0,
+ Pagesize => 0,
+ Env => undef,
+ #Tie => undef,
+ Txn => undef,
+
+ # Queue specific
+ Len => undef,
+ Pad => undef,
+ ArrayBase => 1, # lowest index in array
+ ExtentSize => undef,
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Txn not of type BerkeleyDB::Txn")
+ if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+
+ croak("Tie needs a reference to an array")
+ if defined $got->{Tie} and $got->{Tie} !~ /ARRAY/ ;
+
+ croak("ArrayBase can only be 0 or 1, parsed $got->{ArrayBase}")
+ if $got->{ArrayBase} != 1 and $got->{ArrayBase} != 0 ;
+
+ $got->{Fname} = $got->{Filename} if defined $got->{Filename} ;
+
+ my ($addr) = _db_open_queue($self, $got);
+ my $obj ;
+ if ($addr) {
+ $obj = bless [$addr] , $self ;
+ push @{ $obj }, $got->{Env} if $got->{Env} ;
+ $obj->Txn($got->{Txn})
+ if $got->{Txn} ;
+ }
+ return $obj ;
+}
+
+*BerkeleyDB::Queue::TIEARRAY = \&BerkeleyDB::Queue::new ;
+
+sub UNSHIFT
+{
+ my $self = shift;
+ croak "unshift is unsupported with Queue databases";
+}
+
+## package BerkeleyDB::Text ;
+##
+## use vars qw(@ISA) ;
+## @ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedArray ) ;
+## use UNIVERSAL qw( isa ) ;
+## use Carp ;
+##
+## sub new
+## {
+## my $self = shift ;
+## my $got = BerkeleyDB::ParseParameters(
+## {
+## # Generic Stuff
+## Filename => undef,
+## #Flags => BerkeleyDB::DB_CREATE(),
+## Flags => 0,
+## Property => 0,
+## Mode => 0666,
+## Cachesize => 0,
+## Lorder => 0,
+## Pagesize => 0,
+## Env => undef,
+## #Tie => undef,
+## Txn => undef,
+##
+## # Recno specific
+## Delim => undef,
+## Len => undef,
+## Pad => undef,
+## Btree => undef,
+## }, @_) ;
+##
+## croak("Env not of type BerkeleyDB::Env")
+## if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+##
+## croak("Txn not of type BerkeleyDB::Txn")
+## if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+##
+## croak("-Tie needs a reference to an array")
+## if defined $got->{Tie} and $got->{Tie} !~ /ARRAY/ ;
+##
+## # rearange for recno
+## $got->{Source} = $got->{Filename} if defined $got->{Filename} ;
+## delete $got->{Filename} ;
+## $got->{Fname} = $got->{Btree} if defined $got->{Btree} ;
+## return BerkeleyDB::Recno::_db_open_recno($self, $got);
+## }
+##
+## *BerkeleyDB::Text::TIEARRAY = \&BerkeleyDB::Text::new ;
+## *BerkeleyDB::Text::db_stat = \&BerkeleyDB::Btree::db_stat ;
+
+package BerkeleyDB::Unknown ;
+
+use vars qw(@ISA) ;
+@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedArray ) ;
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+
+sub new
+{
+ my $self = shift ;
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ # Generic Stuff
+ Filename => undef,
+ Subname => undef,
+ #Flags => BerkeleyDB::DB_CREATE(),
+ Flags => 0,
+ Property => 0,
+ Mode => 0666,
+ Cachesize => 0,
+ Lorder => 0,
+ Pagesize => 0,
+ Env => undef,
+ #Tie => undef,
+ Txn => undef,
+
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Txn not of type BerkeleyDB::Txn")
+ if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+
+ croak("-Tie needs a reference to a hash")
+ if defined $got->{Tie} and $got->{Tie} !~ /HASH/ ;
+
+ my ($addr, $type) = _db_open_unknown($got);
+ my $obj ;
+ if ($addr) {
+ $obj = bless [$addr], "BerkeleyDB::$type" ;
+ push @{ $obj }, $got->{Env} if $got->{Env} ;
+ $obj->Txn($got->{Txn})
+ if $got->{Txn} ;
+ }
+ return $obj ;
+}
+
+
+package BerkeleyDB::_tiedHash ;
+
+use Carp ;
+
+#sub TIEHASH
+#{
+# my $self = shift ;
+# my $db_object = shift ;
+#
+#print "Tiehash REF=[$self] [" . (ref $self) . "]\n" ;
+#
+# return bless { Obj => $db_object}, $self ;
+#}
+
+sub Tie
+{
+ # Usage:
+ #
+ # $db->Tie \%hash ;
+ #
+
+ my $self = shift ;
+
+ #print "Tie method REF=[$self] [" . (ref $self) . "]\n" ;
+
+ croak("usage \$x->Tie \\%hash\n") unless @_ ;
+ my $ref = shift ;
+
+ croak("Tie needs a reference to a hash")
+ if defined $ref and $ref !~ /HASH/ ;
+
+ #tie %{ $ref }, ref($self), $self ;
+ tie %{ $ref }, "BerkeleyDB::_tiedHash", $self ;
+ return undef ;
+}
+
+
+sub TIEHASH
+{
+ my $self = shift ;
+ my $db_object = shift ;
+ #return bless $db_object, 'BerkeleyDB::Common' ;
+ return $db_object ;
+}
+
+sub STORE
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+
+ $self->db_put($key, $value) ;
+}
+
+sub FETCH
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = undef ;
+ $self->db_get($key, $value) ;
+
+ return $value ;
+}
+
+sub EXISTS
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = undef ;
+ $self->db_get($key, $value) == 0 ;
+}
+
+sub DELETE
+{
+ my $self = shift ;
+ my $key = shift ;
+ $self->db_del($key) ;
+}
+
+sub CLEAR
+{
+ my $self = shift ;
+ my ($key, $value) = (0, 0) ;
+ my $cursor = $self->db_cursor() ;
+ while ($cursor->c_get($key, $value, BerkeleyDB::DB_PREV()) == 0)
+ { $cursor->c_del() }
+ #1 while $cursor->c_del() == 0 ;
+ # cursor will self-destruct
+}
+
+#sub DESTROY
+#{
+# my $self = shift ;
+# print "BerkeleyDB::_tieHash::DESTROY\n" ;
+# $self->{Cursor}->c_close() if $self->{Cursor} ;
+#}
+
+package BerkeleyDB::_tiedArray ;
+
+use Carp ;
+
+sub Tie
+{
+ # Usage:
+ #
+ # $db->Tie \@array ;
+ #
+
+ my $self = shift ;
+
+ #print "Tie method REF=[$self] [" . (ref $self) . "]\n" ;
+
+ croak("usage \$x->Tie \\%hash\n") unless @_ ;
+ my $ref = shift ;
+
+ croak("Tie needs a reference to an array")
+ if defined $ref and $ref !~ /ARRAY/ ;
+
+ #tie %{ $ref }, ref($self), $self ;
+ tie @{ $ref }, "BerkeleyDB::_tiedArray", $self ;
+ return undef ;
+}
+
+
+#sub TIEARRAY
+#{
+# my $self = shift ;
+# my $db_object = shift ;
+#
+#print "Tiearray REF=[$self] [" . (ref $self) . "]\n" ;
+#
+# return bless { Obj => $db_object}, $self ;
+#}
+
+sub TIEARRAY
+{
+ my $self = shift ;
+ my $db_object = shift ;
+ #return bless $db_object, 'BerkeleyDB::Common' ;
+ return $db_object ;
+}
+
+sub STORE
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+
+ $self->db_put($key, $value) ;
+}
+
+sub FETCH
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = undef ;
+ $self->db_get($key, $value) ;
+
+ return $value ;
+}
+
+*CLEAR = \&BerkeleyDB::_tiedHash::CLEAR ;
+*FIRSTKEY = \&BerkeleyDB::_tiedHash::FIRSTKEY ;
+*NEXTKEY = \&BerkeleyDB::_tiedHash::NEXTKEY ;
+
+sub EXTEND {} # don't do anything with EXTEND
+
+
+sub SHIFT
+{
+ my $self = shift;
+ my ($key, $value) = (0, 0) ;
+ my $cursor = $self->db_cursor() ;
+ return undef if $cursor->c_get($key, $value, BerkeleyDB::DB_FIRST()) != 0 ;
+ return undef if $cursor->c_del() != 0 ;
+
+ return $value ;
+}
+
+
+sub UNSHIFT
+{
+ my $self = shift;
+ if (@_)
+ {
+ my ($key, $value) = (0, 0) ;
+ my $cursor = $self->db_cursor() ;
+ my $status = $cursor->c_get($key, $value, BerkeleyDB::DB_FIRST()) ;
+ if ($status == 0)
+ {
+ foreach $value (reverse @_)
+ {
+ $key = 0 ;
+ $cursor->c_put($key, $value, BerkeleyDB::DB_BEFORE()) ;
+ }
+ }
+ elsif ($status == BerkeleyDB::DB_NOTFOUND())
+ {
+ $key = 0 ;
+ foreach $value (@_)
+ {
+ $self->db_put($key++, $value) ;
+ }
+ }
+ }
+}
+
+sub PUSH
+{
+ my $self = shift;
+ if (@_)
+ {
+ my ($key, $value) = (-1, 0) ;
+ my $cursor = $self->db_cursor() ;
+ my $status = $cursor->c_get($key, $value, BerkeleyDB::DB_LAST()) ;
+ if ($status == 0 || $status == BerkeleyDB::DB_NOTFOUND())
+ {
+ $key = -1 if $status != 0 and $self->type != BerkeleyDB::DB_RECNO() ;
+ foreach $value (@_)
+ {
+ ++ $key ;
+ $status = $self->db_put($key, $value) ;
+ }
+ }
+
+# can use this when DB_APPEND is fixed.
+# foreach $value (@_)
+# {
+# my $status = $cursor->c_put($key, $value, BerkeleyDB::DB_AFTER()) ;
+#print "[$status]\n" ;
+# }
+ }
+}
+
+sub POP
+{
+ my $self = shift;
+ my ($key, $value) = (0, 0) ;
+ my $cursor = $self->db_cursor() ;
+ return undef if $cursor->c_get($key, $value, BerkeleyDB::DB_LAST()) != 0 ;
+ return undef if $cursor->c_del() != 0 ;
+
+ return $value ;
+}
+
+sub SPLICE
+{
+ my $self = shift;
+ croak "SPLICE is not implemented yet" ;
+}
+
+*shift = \&SHIFT ;
+*unshift = \&UNSHIFT ;
+*push = \&PUSH ;
+*pop = \&POP ;
+*clear = \&CLEAR ;
+*length = \&FETCHSIZE ;
+
+sub STORESIZE
+{
+ croak "STORESIZE is not implemented yet" ;
+#print "STORESIZE @_\n" ;
+# my $self = shift;
+# my $length = shift ;
+# my $current_length = $self->FETCHSIZE() ;
+#print "length is $current_length\n";
+#
+# if ($length < $current_length) {
+#print "Make smaller $length < $current_length\n" ;
+# my $key ;
+# for ($key = $current_length - 1 ; $key >= $length ; -- $key)
+# { $self->db_del($key) }
+# }
+# elsif ($length > $current_length) {
+#print "Make larger $length > $current_length\n" ;
+# $self->db_put($length-1, "") ;
+# }
+# else { print "stay the same\n" }
+
+}
+
+
+
+#sub DESTROY
+#{
+# my $self = shift ;
+# print "BerkeleyDB::_tieArray::DESTROY\n" ;
+#}
+
+
+package BerkeleyDB::Common ;
+
+
+use Carp ;
+
+sub DESTROY
+{
+ my $self = shift ;
+ $self->_DESTROY() ;
+}
+
+sub Txn
+{
+ my $self = shift ;
+ my $txn = shift ;
+ #print "BerkeleyDB::Common::Txn db [$self] txn [$txn]\n" ;
+ if ($txn) {
+ $self->_Txn($txn) ;
+ push @{ $txn }, $self ;
+ }
+ else {
+ $self->_Txn() ;
+ }
+ #print "end BerkeleyDB::Common::Txn \n";
+}
+
+
+sub get_dup
+{
+ croak "Usage: \$db->get_dup(key [,flag])\n"
+ unless @_ == 2 or @_ == 3 ;
+
+ my $db = shift ;
+ my $key = shift ;
+ my $flag = shift ;
+ my $value = 0 ;
+ my $origkey = $key ;
+ my $wantarray = wantarray ;
+ my %values = () ;
+ my @values = () ;
+ my $counter = 0 ;
+ my $status = 0 ;
+ my $cursor = $db->db_cursor() ;
+
+ # iterate through the database until either EOF ($status == 0)
+ # or a different key is encountered ($key ne $origkey).
+ for ($status = $cursor->c_get($key, $value, BerkeleyDB::DB_SET()) ;
+ $status == 0 and $key eq $origkey ;
+ $status = $cursor->c_get($key, $value, BerkeleyDB::DB_NEXT()) ) {
+ # save the value or count number of matches
+ if ($wantarray) {
+ if ($flag)
+ { ++ $values{$value} }
+ else
+ { push (@values, $value) }
+ }
+ else
+ { ++ $counter }
+
+ }
+
+ return ($wantarray ? ($flag ? %values : @values) : $counter) ;
+}
+
+sub db_cursor
+{
+ my $db = shift ;
+ my ($addr) = $db->_db_cursor(@_) ;
+ my $obj ;
+ $obj = bless [$addr, $db] , "BerkeleyDB::Cursor" if $addr ;
+ return $obj ;
+}
+
+sub db_join
+{
+ croak 'Usage: $db->BerkeleyDB::Common::db_join([cursors], flags=0)'
+ if @_ < 2 || @_ > 3 ;
+ my $db = shift ;
+ my ($addr) = $db->_db_join(@_) ;
+ my $obj ;
+ $obj = bless [$addr, $db, $_[0]] , "BerkeleyDB::Cursor" if $addr ;
+ return $obj ;
+}
+
+package BerkeleyDB::Cursor ;
+
+sub c_close
+{
+ my $cursor = shift ;
+ $cursor->[1] = "" ;
+ return $cursor->_c_close() ;
+}
+
+sub c_dup
+{
+ my $cursor = shift ;
+ my ($addr) = $cursor->_c_dup(@_) ;
+ my $obj ;
+ $obj = bless [$addr, $cursor->[1]] , "BerkeleyDB::Cursor" if $addr ;
+ return $obj ;
+}
+
+sub DESTROY
+{
+ my $self = shift ;
+ $self->_DESTROY() ;
+}
+
+package BerkeleyDB::TxnMgr ;
+
+sub DESTROY
+{
+ my $self = shift ;
+ $self->_DESTROY() ;
+}
+
+sub txn_begin
+{
+ my $txnmgr = shift ;
+ my ($addr) = $txnmgr->_txn_begin(@_) ;
+ my $obj ;
+ $obj = bless [$addr, $txnmgr] , "BerkeleyDB::Txn" if $addr ;
+ return $obj ;
+}
+
+package BerkeleyDB::Txn ;
+
+sub Txn
+{
+ my $self = shift ;
+ my $db ;
+ # keep a reference to each db in the txn object
+ foreach $db (@_) {
+ $db->_Txn($self) ;
+ push @{ $self}, $db ;
+ }
+}
+
+sub txn_commit
+{
+ my $self = shift ;
+ $self->disassociate() ;
+ my $status = $self->_txn_commit() ;
+ return $status ;
+}
+
+sub txn_abort
+{
+ my $self = shift ;
+ $self->disassociate() ;
+ my $status = $self->_txn_abort() ;
+ return $status ;
+}
+
+sub disassociate
+{
+ my $self = shift ;
+ my $db ;
+ while ( @{ $self } > 2) {
+ $db = pop @{ $self } ;
+ $db->Txn() ;
+ }
+ #print "end disassociate\n" ;
+}
+
+
+sub DESTROY
+{
+ my $self = shift ;
+
+ $self->disassociate() ;
+ # first close the close the transaction
+ $self->_DESTROY() ;
+}
+
+package BerkeleyDB::Term ;
+
+END
+{
+ close_everything() ;
+}
+
+
+package BerkeleyDB ;
+
+
+
+# Autoload methods go after =cut, and are processed by the autosplit program.
+
+1;
+__END__
+
+
diff --git a/libdb/perl/BerkeleyDB/BerkeleyDB.pod b/libdb/perl/BerkeleyDB/BerkeleyDB.pod
new file mode 100644
index 0000000..60f30e2
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/BerkeleyDB.pod
@@ -0,0 +1,1792 @@
+=head1 NAME
+
+BerkeleyDB - Perl extension for Berkeley DB version 2, 3 or 4
+
+=head1 SYNOPSIS
+
+ use BerkeleyDB;
+
+ $env = new BerkeleyDB::Env [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Hash', [OPTIONS] ;
+ $db = new BerkeleyDB::Hash [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Btree', [OPTIONS] ;
+ $db = new BerkeleyDB::Btree [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Recno', [OPTIONS] ;
+ $db = new BerkeleyDB::Recno [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Queue', [OPTIONS] ;
+ $db = new BerkeleyDB::Queue [OPTIONS] ;
+
+ $db = new BerkeleyDB::Unknown [OPTIONS] ;
+
+ $status = BerkeleyDB::db_remove [OPTIONS]
+ $status = BerkeleyDB::db_rename [OPTIONS]
+ $status = BerkeleyDB::db_verify [OPTIONS]
+
+ $hash{$key} = $value ;
+ $value = $hash{$key} ;
+ each %hash ;
+ keys %hash ;
+ values %hash ;
+
+ $status = $db->db_get()
+ $status = $db->db_put() ;
+ $status = $db->db_del() ;
+ $status = $db->db_sync() ;
+ $status = $db->db_close() ;
+ $status = $db->db_close() ;
+ $status = $db->db_pget()
+ $hash_ref = $db->db_stat() ;
+ $status = $db->db_key_range();
+ $type = $db->type() ;
+ $status = $db->status() ;
+ $boolean = $db->byteswapped() ;
+ $status = $db->truncate($count) ;
+
+ ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
+ ($flag, $old_offset, $old_length) = $db->partial_clear() ;
+
+ $cursor = $db->db_cursor([$flags]) ;
+ $newcursor = $cursor->c_dup([$flags]);
+ $status = $cursor->c_get() ;
+ $status = $cursor->c_put() ;
+ $status = $cursor->c_del() ;
+ $status = $cursor->c_count() ;
+ $status = $cursor->c_pget() ;
+ $status = $cursor->status() ;
+ $status = $cursor->c_close() ;
+
+ $cursor = $db->db_join() ;
+ $status = $cursor->c_get() ;
+ $status = $cursor->c_close() ;
+
+ $status = $env->txn_checkpoint()
+ $hash_ref = $env->txn_stat()
+ $status = $env->setmutexlocks()
+ $status = $env->set_flags()
+
+ $txn = $env->txn_begin() ;
+ $db->Txn($txn);
+ $txn->Txn($db1, $db2,...);
+ $status = $txn->txn_prepare()
+ $status = $txn->txn_commit()
+ $status = $txn->txn_abort()
+ $status = $txn->txn_id()
+ $status = $txn->txn_discard()
+
+ $status = $env->set_lg_dir();
+ $status = $env->set_lg_bsize();
+ $status = $env->set_lg_max();
+
+ $status = $env->set_data_dir() ;
+ $status = $env->set_tmp_dir() ;
+ $status = $env->set_verbose() ;
+
+ $BerkeleyDB::Error
+ $BerkeleyDB::db_version
+
+ # DBM Filters
+ $old_filter = $db->filter_store_key ( sub { ... } ) ;
+ $old_filter = $db->filter_store_value( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_key ( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_value( sub { ... } ) ;
+
+ # deprecated, but supported
+ $txn_mgr = $env->TxnMgr();
+ $status = $txn_mgr->txn_checkpoint()
+ $hash_ref = $txn_mgr->txn_stat()
+ $txn = $txn_mgr->txn_begin() ;
+
+=head1 DESCRIPTION
+
+B<NOTE: This document is still under construction. Expect it to be
+incomplete in places.>
+
+This Perl module provides an interface to most of the functionality
+available in Berkeley DB versions 2, 3 and 4. In general it is safe to assume
+that the interface provided here to be identical to the Berkeley DB
+interface. The main changes have been to make the Berkeley DB API work
+in a Perl way. Note that if you are using Berkeley DB 2.x, the new
+features available in Berkeley DB 3.x or DB 4.x are not available via
+this module.
+
+The reader is expected to be familiar with the Berkeley DB
+documentation. Where the interface provided here is identical to the
+Berkeley DB library and the... TODO
+
+The B<db_appinit>, B<db_cursor>, B<db_open> and B<db_txn> man pages are
+particularly relevant.
+
+The interface to Berkeley DB is implemented with a number of Perl
+classes.
+
+=head1 ENV CLASS
+
+The B<BerkeleyDB::Env> class provides an interface to the Berkeley DB
+function B<db_appinit> in Berkeley DB 2.x or B<db_env_create> and
+B<DBENV-E<gt>open> in Berkeley DB 3.x/4.x. Its purpose is to initialise a
+number of sub-systems that can then be used in a consistent way in all
+the databases you make use of the environment.
+
+If you don't intend using transactions, locking or logging, then you
+shouldn't need to make use of B<BerkeleyDB::Env>.
+
+=head2 Synopsis
+
+ $env = new BerkeleyDB::Env
+ [ -Home => $path, ]
+ [ -Server => $name, ]
+ [ -CacheSize => $number, ]
+ [ -Config => { name => value, name => value }, ]
+ [ -ErrFile => filename, ]
+ [ -ErrPrefix => "string", ]
+ [ -Flags => number, ]
+ [ -SetFlags => bitmask, ]
+ [ -LockDetect => number, ]
+ [ -Verbose => boolean, ]
+
+=over 5
+
+All the parameters to the BerkeleyDB::Env constructor are optional.
+
+=item -Home
+
+If present, this parameter should point to an existing directory. Any
+files that I<aren't> specified with an absolute path in the sub-systems
+that are initialised by the BerkeleyDB::Env class will be assumed to
+live in the B<Home> directory.
+
+For example, in the code fragment below the database "fred.db" will be
+opened in the directory "/home/databases" because it was specified as a
+relative path, but "joe.db" will be opened in "/other" because it was
+part of an absolute path.
+
+ $env = new BerkeleyDB::Env
+ -Home => "/home/databases"
+ ...
+
+ $db1 = new BerkeleyDB::Hash
+ -Filename = "fred.db",
+ -Env => $env
+ ...
+
+ $db2 = new BerkeleyDB::Hash
+ -Filename = "/other/joe.db",
+ -Env => $env
+ ...
+
+=item -Server
+
+If present, this parameter should be the hostname of a server that is running
+the Berkeley DB RPC server. All databases will be accessed via the RPC server.
+
+=item -Cachesize
+
+If present, this parameter sets the size of the environments shared memory
+buffer pool.
+
+=item -Config
+
+This is a variation on the C<-Home> parameter, but it allows finer
+control of where specific types of files will be stored.
+
+The parameter expects a reference to a hash. Valid keys are:
+B<DB_DATA_DIR>, B<DB_LOG_DIR> and B<DB_TMP_DIR>
+
+The code below shows an example of how it can be used.
+
+ $env = new BerkeleyDB::Env
+ -Config => { DB_DATA_DIR => "/home/databases",
+ DB_LOG_DIR => "/home/logs",
+ DB_TMP_DIR => "/home/tmp"
+ }
+ ...
+
+=item -ErrFile
+
+Expects a filenme. Any errors generated internally by Berkeley DB will
+be logged to this file.
+
+=item -ErrPrefix
+
+Allows a prefix to be added to the error messages before they are sent
+to B<-ErrFile>.
+
+=item -Flags
+
+The B<Flags> parameter specifies both which sub-systems to initialise,
+as well as a number of environment-wide options.
+See the Berkeley DB documentation for more details of these options.
+
+Any of the following can be specified by OR'ing them:
+
+B<DB_CREATE>
+
+If any of the files specified do not already exist, create them.
+
+B<DB_INIT_CDB>
+
+Initialise the Concurrent Access Methods
+
+B<DB_INIT_LOCK>
+
+Initialise the Locking sub-system.
+
+B<DB_INIT_LOG>
+
+Initialise the Logging sub-system.
+
+B<DB_INIT_MPOOL>
+
+Initialise the ...
+
+B<DB_INIT_TXN>
+
+Initialise the ...
+
+B<DB_MPOOL_PRIVATE>
+
+Initialise the ...
+
+B<DB_INIT_MPOOL> is also specified.
+
+Initialise the ...
+
+B<DB_NOMMAP>
+
+Initialise the ...
+
+B<DB_RECOVER>
+
+
+
+B<DB_RECOVER_FATAL>
+
+B<DB_THREAD>
+
+B<DB_TXN_NOSYNC>
+
+B<DB_USE_ENVIRON>
+
+B<DB_USE_ENVIRON_ROOT>
+
+=item -SetFlags
+
+Calls ENV->set_flags with the supplied bitmask. Use this when you need to make
+use of DB_ENV->set_flags before DB_ENV->open is called.
+
+Only valid when Berkeley DB 3.x or better is used.
+
+=item -LockDetect
+
+Specifies what to do when a lock conflict occurs. The value should be one of
+
+B<DB_LOCK_DEFAULT>
+
+B<DB_LOCK_OLDEST>
+
+B<DB_LOCK_RANDOM>
+
+B<DB_LOCK_YOUNGEST>
+
+=item -Verbose
+
+Add extra debugging information to the messages sent to B<-ErrFile>.
+
+=back
+
+=head2 Methods
+
+The environment class has the following methods:
+
+=over 5
+
+=item $env->errPrefix("string") ;
+
+This method is identical to the B<-ErrPrefix> flag. It allows the
+error prefix string to be changed dynamically.
+
+=item $env->set_flags(bitmask, 1|0);
+
+=item $txn = $env->TxnMgr()
+
+Constructor for creating a B<TxnMgr> object.
+See L<"TRANSACTIONS"> for more details of using transactions.
+
+This method is deprecated. Access the transaction methods using the B<txn_>
+methods below from the environment object directly.
+
+=item $env->txn_begin()
+
+TODO
+
+=item $env->txn_stat()
+
+TODO
+
+=item $env->txn_checkpoint()
+
+TODO
+
+=item $env->status()
+
+Returns the status of the last BerkeleyDB::Env method.
+
+=item $env->setmutexlocks()
+
+Only available in Berkeley Db 3.0 or greater. Calls
+B<db_env_set_mutexlocks> when used with Berkeley DB 3.1.x. When used with
+Berkeley DB 3.0 or 3.2 and better it calls B<DBENV-E<gt>set_mutexlocks>.
+
+=back
+
+=head2 Examples
+
+TODO.
+
+=head1 Global Classes
+
+ $status = BerkeleyDB::db_remove [OPTIONS]
+ $status = BerkeleyDB::db_rename [OPTIONS]
+ $status = BerkeleyDB::db_verify [OPTIONS]
+
+=head1 THE DATABASE CLASSES
+
+B<BerkeleyDB> supports the following database formats:
+
+=over 5
+
+=item B<BerkeleyDB::Hash>
+
+This database type allows arbitrary key/value pairs to be stored in data
+files. This is equivalent to the functionality provided by other
+hashing packages like DBM, NDBM, ODBM, GDBM, and SDBM. Remember though,
+the files created using B<BerkeleyDB::Hash> are not compatible with any
+of the other packages mentioned.
+
+A default hashing algorithm, which will be adequate for most applications,
+is built into BerkeleyDB. If you do need to use your own hashing algorithm
+it is possible to write your own in Perl and have B<BerkeleyDB> use
+it instead.
+
+=item B<BerkeleyDB::Btree>
+
+The Btree format allows arbitrary key/value pairs to be stored in a
+B+tree.
+
+As with the B<BerkeleyDB::Hash> format, it is possible to provide a
+user defined Perl routine to perform the comparison of keys. By default,
+though, the keys are stored in lexical order.
+
+=item B<BerkeleyDB::Recno>
+
+TODO.
+
+
+=item B<BerkeleyDB::Queue>
+
+TODO.
+
+=item B<BerkeleyDB::Unknown>
+
+This isn't a database format at all. It is used when you want to open an
+existing Berkeley DB database without having to know what type is it.
+
+=back
+
+
+Each of the database formats described above is accessed via a
+corresponding B<BerkeleyDB> class. These will be described in turn in
+the next sections.
+
+=head1 BerkeleyDB::Hash
+
+Equivalent to calling B<db_open> with type B<DB_HASH> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_HASH> in
+Berkeley DB 3.x or greater.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Hash
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Hash specific
+ [ -Ffactor => number,]
+ [ -Nelem => number,]
+ [ -Hash => code reference,]
+ [ -DupCompare => code reference,]
+
+and this
+
+ [$db =] tie %hash, 'BerkeleyDB::Hash',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Hash specific
+ [ -Ffactor => number,]
+ [ -Nelem => number,]
+ [ -Hash => code reference,]
+ [ -DupCompare => code reference,]
+
+
+When the "tie" interface is used, reading from and writing to the database
+is achieved via the tied hash. In this case the database operates like
+a Perl associative array that happens to be stored on disk.
+
+In addition to the high-level tied hash interface, it is possible to
+make use of the underlying methods provided by Berkeley DB
+
+=head2 Options
+
+In addition to the standard set of options (see L<COMMON OPTIONS>)
+B<BerkeleyDB::Hash> supports these options:
+
+=over 5
+
+=item -Property
+
+Used to specify extra flags when opening a database. The following
+flags may be specified by logically OR'ing together one or more of the
+following values:
+
+B<DB_DUP>
+
+When creating a new database, this flag enables the storing of duplicate
+keys in the database. If B<DB_DUPSORT> is not specified as well, the
+duplicates are stored in the order they are created in the database.
+
+B<DB_DUPSORT>
+
+Enables the sorting of duplicate keys in the database. Ignored if
+B<DB_DUP> isn't also specified.
+
+=item -Ffactor
+
+=item -Nelem
+
+See the Berkeley DB documentation for details of these options.
+
+=item -Hash
+
+Allows you to provide a user defined hash function. If not specified,
+a default hash function is used. Here is a template for a user-defined
+hash function
+
+ sub hash
+ {
+ my ($data) = shift ;
+ ...
+ # return the hash value for $data
+ return $hash ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Hash => \&hash,
+ ...
+
+See L<""> for an example.
+
+=item -DupCompare
+
+Used in conjunction with the B<DB_DUPOSRT> flag.
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Property => DB_DUP|DB_DUPSORT,
+ -DupCompare => \&compare,
+ ...
+
+=back
+
+
+=head2 Methods
+
+B<BerkeleyDB::Hash> only supports the standard database methods.
+See L<COMMON DATABASE METHODS>.
+
+=head2 A Simple Tied Hash Example
+
+ use strict ;
+ use BerkeleyDB ;
+ use vars qw( %h $k $v ) ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $h{"apple"} = "red" ;
+ $h{"orange"} = "orange" ;
+ $h{"banana"} = "yellow" ;
+ $h{"tomato"} = "red" ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $h{"banana"} ;
+
+ # Delete a key/value pair.
+ delete $h{"apple"} ;
+
+ # print the contents of the file
+ while (($k, $v) = each %h)
+ { print "$k -> $v\n" }
+
+ untie %h ;
+
+here is the output:
+
+ Banana Exists
+
+ orange -> orange
+ tomato -> red
+ banana -> yellow
+
+Note that the like ordinary associative arrays, the order of the keys
+retrieved from a Hash database are in an apparently random order.
+
+=head2 Another Simple Hash Example
+
+Do the same as the previous example but not using tie.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("apple", "red") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("banana", "yellow") ;
+ $db->db_put("tomato", "red") ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $db->db_get("banana", $v) == 0;
+
+ # Delete a key/value pair.
+ $db->db_del("apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+
+=head2 Duplicate keys
+
+The code below is a variation on the examples above. This time the hash has
+been inverted. The key this time is colour and the value is the fruit name.
+The B<DB_DUP> flag has been specified to allow duplicates.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+
+here is the output:
+
+ orange -> orange
+ yellow -> banana
+ red -> apple
+ red -> tomato
+ green -> banana
+ green -> apple
+
+=head2 Sorting Duplicate Keys
+
+In the previous example, when there were duplicate keys, the values are
+sorted in the order they are stored in. The code below is
+identical to the previous example except the B<DB_DUPSORT> flag is
+specified.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP | DB_DUPSORT
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+
+Notice that in the output below the duplicate values are sorted.
+
+ orange -> orange
+ yellow -> banana
+ red -> apple
+ red -> tomato
+ green -> apple
+ green -> banana
+
+=head2 Custom Sorting Duplicate Keys
+
+Another variation
+
+TODO
+
+=head2 Changing the hash
+
+TODO
+
+=head2 Using db_stat
+
+TODO
+
+=head1 BerkeleyDB::Btree
+
+Equivalent to calling B<db_open> with type B<DB_BTREE> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_BTREE> in
+Berkeley DB 3.x or greater.
+
+Two forms of constructor are supported:
+
+
+ $db = new BerkeleyDB::Btree
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Btree specific
+ [ -Minkey => number,]
+ [ -Compare => code reference,]
+ [ -DupCompare => code reference,]
+ [ -Prefix => code reference,]
+
+and this
+
+ [$db =] tie %hash, 'BerkeleyDB::Btree',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Btree specific
+ [ -Minkey => number,]
+ [ -Compare => code reference,]
+ [ -DupCompare => code reference,]
+ [ -Prefix => code reference,]
+
+=head2 Options
+
+In addition to the standard set of options (see L<COMMON OPTIONS>)
+B<BerkeleyDB::Btree> supports these options:
+
+=over 5
+
+=item -Property
+
+Used to specify extra flags when opening a database. The following
+flags may be specified by logically OR'ing together one or more of the
+following values:
+
+B<DB_DUP>
+
+When creating a new database, this flag enables the storing of duplicate
+keys in the database. If B<DB_DUPSORT> is not specified as well, the
+duplicates are stored in the order they are created in the database.
+
+B<DB_DUPSORT>
+
+Enables the sorting of duplicate keys in the database. Ignored if
+B<DB_DUP> isn't also specified.
+
+=item Minkey
+
+TODO
+
+=item Compare
+
+Allow you to override the default sort order used in the database. See
+L<"Changing the sort order"> for an example.
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Compare => \&compare,
+ ...
+
+=item Prefix
+
+ sub prefix
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return number of bytes of $key2 which are
+ # necessary to determine that it is greater than $key1
+ return $bytes ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Prefix => \&prefix,
+ ...
+=item DupCompare
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -DupCompare => \&compare,
+ ...
+
+=back
+
+=head2 Methods
+
+B<BerkeleyDB::Btree> supports the following database methods.
+See also L<COMMON DATABASE METHODS>.
+
+All the methods below return 0 to indicate success.
+
+=over 5
+
+=item $status = $db->db_key_range($key, $less, $equal, $greater [, $flags])
+
+Given a key, C<$key>, this method returns the proportion of keys less than
+C<$key> in C<$less>, the proportion equal to C<$key> in C<$equal> and the
+proportion greater than C<$key> in C<$greater>.
+
+The proportion is returned as a double in the range 0.0 to 1.0.
+
+=back
+
+=head2 A Simple Btree Example
+
+The code below is a simple example of using a btree database.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+
+Here is the output from the code above. The keys have been sorted using
+Berkeley DB's default sorting algorithm.
+
+ Smith
+ Wall
+ mouse
+
+
+=head2 Changing the sort order
+
+It is possible to supply your own sorting algorithm if the one that Berkeley
+DB used isn't suitable. The code below is identical to the previous example
+except for the case insensitive compare function.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Compare => sub { lc $_[0] cmp lc $_[1] }
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+
+Here is the output from the code above.
+
+ mouse
+ Smith
+ Wall
+
+There are a few point to bear in mind if you want to change the
+ordering in a BTREE database:
+
+=over 5
+
+=item 1.
+
+The new compare function must be specified when you create the database.
+
+=item 2.
+
+You cannot change the ordering once the database has been created. Thus
+you must use the same compare function every time you access the
+database.
+
+=back
+
+=head2 Using db_stat
+
+TODO
+
+=head1 BerkeleyDB::Recno
+
+Equivalent to calling B<db_open> with type B<DB_RECNO> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_RECNO> in
+Berkeley DB 3.x or greater.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Recno
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Recno specific
+ [ -Delim => byte,]
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -Source => filename,]
+
+and this
+
+ [$db =] tie @arry, 'BerkeleyDB::Recno',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Recno specific
+ [ -Delim => byte,]
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -Source => filename,]
+
+=head2 A Recno Example
+
+Here is a simple example that uses RECNO (if you are using a version
+of Perl earlier than 5.004_57 this example won't work -- see
+L<Extra RECNO Methods> for a workaround).
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ tie @h, 'BerkeleyDB::Recno',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_RENUMBER
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ push @h, "green", "black" ;
+
+ my $elements = scalar @h ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = pop @h ;
+ print "popped $last\n" ;
+
+ unshift @h, "white" ;
+ my $first = shift @h ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ untie @h ;
+
+Here is the output from the script:
+
+ The array contains 5 entries
+ popped black
+ shifted white
+ Element 1 Exists with value blue
+ The last element is green
+ The 2nd last element is yellow
+
+=head1 BerkeleyDB::Queue
+
+Equivalent to calling B<db_create> followed by B<DB-E<gt>open> with
+type B<DB_QUEUE> in Berkeley DB 3.x or greater. This database format
+isn't available if you use Berkeley DB 2.x.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Queue
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Queue specific
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -ExtentSize => number, ]
+
+and this
+
+ [$db =] tie @arry, 'BerkeleyDB::Queue',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Queue specific
+ [ -Len => number,]
+ [ -Pad => byte,]
+
+
+=head1 BerkeleyDB::Unknown
+
+This class is used to open an existing database.
+
+Equivalent to calling B<db_open> with type B<DB_UNKNOWN> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_UNKNOWN> in
+Berkeley DB 3.x or greater.
+
+The constructor looks like this:
+
+ $db = new BerkeleyDB::Unknown
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+
+
+=head2 An example
+
+=head1 COMMON OPTIONS
+
+All database access class constructors support the common set of
+options defined below. All are optional.
+
+=over 5
+
+=item -Filename
+
+The database filename. If no filename is specified, a temporary file will
+be created and removed once the program terminates.
+
+=item -Subname
+
+Specifies the name of the sub-database to open.
+This option is only valid if you are using Berkeley DB 3.x or greater.
+
+=item -Flags
+
+Specify how the database will be opened/created. The valid flags are:
+
+B<DB_CREATE>
+
+Create any underlying files, as necessary. If the files do not already
+exist and the B<DB_CREATE> flag is not specified, the call will fail.
+
+B<DB_NOMMAP>
+
+Not supported by BerkeleyDB.
+
+B<DB_RDONLY>
+
+Opens the database in read-only mode.
+
+B<DB_THREAD>
+
+Not supported by BerkeleyDB.
+
+B<DB_TRUNCATE>
+
+If the database file already exists, remove all the data before
+opening it.
+
+=item -Mode
+
+Determines the file protection when the database is created. Defaults
+to 0666.
+
+=item -Cachesize
+
+=item -Lorder
+
+=item -Pagesize
+
+=item -Env
+
+When working under a Berkeley DB environment, this parameter
+
+Defaults to no environment.
+
+=item -Txn
+
+TODO.
+
+=back
+
+=head1 COMMON DATABASE METHODS
+
+All the database interfaces support the common set of methods defined
+below.
+
+All the methods below return 0 to indicate success.
+
+=head2 $status = $db->db_get($key, $value [, $flags])
+
+Given a key (C<$key>) this method reads the value associated with it
+from the database. If it exists, the value read from the database is
+returned in the C<$value> parameter.
+
+The B<$flags> parameter is optional. If present, it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_GET_BOTH>
+
+When the B<DB_GET_BOTH> flag is specified, B<db_get> checks for the
+existence of B<both> the C<$key> B<and> C<$value> in the database.
+
+=item B<DB_SET_RECNO>
+
+TODO.
+
+=back
+
+In addition, the following value may be set by logically OR'ing it into
+the B<$flags> parameter:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO
+
+=back
+
+
+=head2 $status = $db->db_put($key, $value [, $flags])
+
+Stores a key/value pair in the database.
+
+The B<$flags> parameter is optional. If present it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_APPEND>
+
+This flag is only applicable when accessing a B<BerkeleyDB::Recno>
+database.
+
+TODO.
+
+
+=item B<DB_NOOVERWRITE>
+
+If this flag is specified and C<$key> already exists in the database,
+the call to B<db_put> will return B<DB_KEYEXIST>.
+
+=back
+
+=head2 $status = $db->db_del($key [, $flags])
+
+Deletes a key/value pair in the database associated with C<$key>.
+If duplicate keys are enabled in the database, B<db_del> will delete
+B<all> key/value pairs with key C<$key>.
+
+The B<$flags> parameter is optional and is currently unused.
+
+=head2 $status = $db->db_sync()
+
+If any parts of the database are in memory, write them to the database.
+
+=head2 $cursor = $db->db_cursor([$flags])
+
+Creates a cursor object. This is used to access the contents of the
+database sequentially. See L<CURSORS> for details of the methods
+available when working with cursors.
+
+The B<$flags> parameter is optional. If present it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO.
+
+=back
+
+=head2 ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
+
+TODO
+
+=head2 ($flag, $old_offset, $old_length) = $db->partial_clear() ;
+
+TODO
+
+=head2 $db->byteswapped()
+
+TODO
+
+=head2 $db->type()
+
+Returns the type of the database. The possible return code are B<DB_HASH>
+for a B<BerkeleyDB::Hash> database, B<DB_BTREE> for a B<BerkeleyDB::Btree>
+database and B<DB_RECNO> for a B<BerkeleyDB::Recno> database. This method
+is typically used when a database has been opened with
+B<BerkeleyDB::Unknown>.
+
+=item $ref = $db->db_stat()
+
+Returns a reference to an associative array containing information about
+the database. The keys of the associative array correspond directly to the
+names of the fields defined in the Berkeley DB documentation. For example,
+in the DB documentation, the field B<bt_version> stores the version of the
+Btree database. Assuming you called B<db_stat> on a Btree database the
+equivalent field would be accessed as follows:
+
+ $version = $ref->{'bt_version'} ;
+
+If you are using Berkeley DB 3.x or better, this method will work will
+all database formats. When DB 2.x is used, it only works with
+B<BerkeleyDB::Btree>.
+
+=head2 $status = $db->status()
+
+Returns the status of the last C<$db> method called.
+
+=head2 $status = $db->truncate($count)
+
+Truncates the datatabase and returns the number or records deleted
+in C<$count>.
+
+=head1 CURSORS
+
+A cursor is used whenever you want to access the contents of a database
+in sequential order.
+A cursor object is created with the C<db_cursor>
+
+A cursor object has the following methods available:
+
+=head2 $newcursor = $cursor->c_dup($flags)
+
+Creates a duplicate of C<$cursor>. This method needs Berkeley DB 3.0.x or better.
+
+The C<$flags> parameter is optional and can take the following value:
+
+=over 5
+
+=item DB_POSITION
+
+When present this flag will position the new cursor at the same place as the
+existing cursor.
+
+=back
+
+=head2 $status = $cursor->c_get($key, $value, $flags)
+
+Reads a key/value pair from the database, returning the data in C<$key>
+and C<$value>. The key/value pair actually read is controlled by the
+C<$flags> parameter, which can take B<one> of the following values:
+
+=over 5
+
+=item B<DB_FIRST>
+
+Set the cursor to point to the first key/value pair in the
+database. Return the key/value pair in C<$key> and C<$value>.
+
+=item B<DB_LAST>
+
+Set the cursor to point to the last key/value pair in the database. Return
+the key/value pair in C<$key> and C<$value>.
+
+=item B<DB_NEXT>
+
+If the cursor is already pointing to a key/value pair, it will be
+incremented to point to the next key/value pair and return its contents.
+
+If the cursor isn't initialised, B<DB_NEXT> works just like B<DB_FIRST>.
+
+If the cursor is already positioned at the last key/value pair, B<c_get>
+will return B<DB_NOTFOUND>.
+
+=item B<DB_NEXT_DUP>
+
+This flag is only valid when duplicate keys have been enabled in
+a database.
+If the cursor is already pointing to a key/value pair and the key of
+the next key/value pair is identical, the cursor will be incremented to
+point to it and their contents returned.
+
+=item B<DB_PREV>
+
+If the cursor is already pointing to a key/value pair, it will be
+decremented to point to the previous key/value pair and return its
+contents.
+
+If the cursor isn't initialised, B<DB_PREV> works just like B<DB_LAST>.
+
+If the cursor is already positioned at the first key/value pair, B<c_get>
+will return B<DB_NOTFOUND>.
+
+=item B<DB_CURRENT>
+
+If the cursor has been set to point to a key/value pair, return their
+contents.
+If the key/value pair referenced by the cursor has been deleted, B<c_get>
+will return B<DB_KEYEMPTY>.
+
+=item B<DB_SET>
+
+Set the cursor to point to the key/value pair referenced by B<$key>
+and return the value in B<$value>.
+
+=item B<DB_SET_RANGE>
+
+This flag is a variation on the B<DB_SET> flag. As well as returning
+the value, it also returns the key, via B<$key>.
+When used with a B<BerkeleyDB::Btree> database the key matched by B<c_get>
+will be the shortest key (in length) which is greater than or equal to
+the key supplied, via B<$key>. This allows partial key searches.
+See ??? for an example of how to use this flag.
+
+=item B<DB_GET_BOTH>
+
+Another variation on B<DB_SET>. This one returns both the key and
+the value.
+
+=item B<DB_SET_RECNO>
+
+TODO.
+
+=item B<DB_GET_RECNO>
+
+TODO.
+
+=back
+
+In addition, the following value may be set by logically OR'ing it into
+the B<$flags> parameter:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO.
+
+=back
+
+=head2 $status = $cursor->c_put($key, $value, $flags)
+
+Stores the key/value pair in the database. The position that the data is
+stored in the database is controlled by the C<$flags> parameter, which
+must take B<one> of the following values:
+
+=over 5
+
+=item B<DB_AFTER>
+
+When used with a Btree or Hash database, a duplicate of the key referenced
+by the current cursor position will be created and the contents of
+B<$value> will be associated with it - B<$key> is ignored.
+The new key/value pair will be stored immediately after the current
+cursor position.
+Obviously the database has to have been opened with B<DB_DUP>.
+
+When used with a Recno ... TODO
+
+
+=item B<DB_BEFORE>
+
+When used with a Btree or Hash database, a duplicate of the key referenced
+by the current cursor position will be created and the contents of
+B<$value> will be associated with it - B<$key> is ignored.
+The new key/value pair will be stored immediately before the current
+cursor position.
+Obviously the database has to have been opened with B<DB_DUP>.
+
+When used with a Recno ... TODO
+
+=item B<DB_CURRENT>
+
+If the cursor has been initialised, replace the value of the key/value
+pair stored in the database with the contents of B<$value>.
+
+=item B<DB_KEYFIRST>
+
+Only valid with a Btree or Hash database. This flag is only really
+used when duplicates are enabled in the database and sorted duplicates
+haven't been specified.
+In this case the key/value pair will be inserted as the first entry in
+the duplicates for the particular key.
+
+=item B<DB_KEYLAST>
+
+Only valid with a Btree or Hash database. This flag is only really
+used when duplicates are enabled in the database and sorted duplicates
+haven't been specified.
+In this case the key/value pair will be inserted as the last entry in
+the duplicates for the particular key.
+
+=back
+
+=head2 $status = $cursor->c_del([$flags])
+
+This method deletes the key/value pair associated with the current cursor
+position. The cursor position will not be changed by this operation, so
+any subsequent cursor operation must first initialise the cursor to
+point to a valid key/value pair.
+
+If the key/value pair associated with the cursor have already been
+deleted, B<c_del> will return B<DB_KEYEMPTY>.
+
+The B<$flags> parameter is not used at present.
+
+=head2 $status = $cursor->c_del($cnt [, $flags])
+
+Stores the number of duplicates at the current cursor position in B<$cnt>.
+
+The B<$flags> parameter is not used at present. This method needs
+Berkeley DB 3.1 or better.
+
+=head2 $status = $cursor->status()
+
+Returns the status of the last cursor method as a dual type.
+
+=head2 Cursor Examples
+
+TODO
+
+Iterating from first to last, then in reverse.
+
+examples of each of the flags.
+
+=head1 JOIN
+
+Join support for BerkeleyDB is in progress. Watch this space.
+
+TODO
+
+=head1 TRANSACTIONS
+
+TODO.
+
+=head1 DBM Filters
+
+A DBM Filter is a piece of code that is be used when you I<always>
+want to make the same transformation to all keys and/or values in a DBM
+database. All of the database classes (BerkeleyDB::Hash,
+BerkeleyDB::Btree and BerkeleyDB::Recno) support DBM Filters.
+
+There are four methods associated with DBM Filters. All work
+identically, and each is used to install (or uninstall) a single DBM
+Filter. Each expects a single parameter, namely a reference to a sub.
+The only difference between them is the place that the filter is
+installed.
+
+To summarise:
+
+=over 5
+
+=item B<filter_store_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a key to a DBM database.
+
+=item B<filter_store_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a value to a DBM database.
+
+
+=item B<filter_fetch_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a key from a DBM database.
+
+=item B<filter_fetch_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a value from a DBM database.
+
+=back
+
+You can use any combination of the methods, from none, to all four.
+
+All filter methods return the existing filter, if present, or C<undef>
+in not.
+
+To delete a filter pass C<undef> to it.
+
+=head2 The Filter
+
+When each filter is called by Perl, a local copy of C<$_> will contain
+the key or value to be filtered. Filtering is achieved by modifying
+the contents of C<$_>. The return code from the filter is ignored.
+
+=head2 An Example -- the NULL termination problem.
+
+Consider the following scenario. You have a DBM database that you need
+to share with a third-party C application. The C application assumes
+that I<all> keys and values are NULL terminated. Unfortunately when
+Perl writes to DBM databases it doesn't use NULL termination, so your
+Perl application will have to manage NULL termination itself. When you
+write to the database you will have to use something like this:
+
+ $hash{"$key\0"} = "$value\0" ;
+
+Similarly the NULL needs to be taken into account when you are considering
+the length of existing keys/values.
+
+It would be much better if you could ignore the NULL terminations issue
+in the main application code and have a mechanism that automatically
+added the terminating NULL to all keys and values whenever you write to
+the database and have them removed when you read from the database. As I'm
+sure you have already guessed, this is a problem that DBM Filters can
+fix very easily.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+ my $db = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Install DBM Filters
+ $db->filter_fetch_key ( sub { s/\0$// } ) ;
+ $db->filter_store_key ( sub { $_ .= "\0" } ) ;
+ $db->filter_fetch_value( sub { s/\0$// } ) ;
+ $db->filter_store_value( sub { $_ .= "\0" } ) ;
+
+ $hash{"abc"} = "def" ;
+ my $a = $hash{"ABC"} ;
+ # ...
+ undef $db ;
+ untie %hash ;
+
+Hopefully the contents of each of the filters should be
+self-explanatory. Both "fetch" filters remove the terminating NULL,
+and both "store" filters add a terminating NULL.
+
+
+=head2 Another Example -- Key is a C int.
+
+Here is another real-life example. By default, whenever Perl writes to
+a DBM database it always writes the key and value as strings. So when
+you use this:
+
+ $hash{12345} = "something" ;
+
+the key 12345 will get stored in the DBM database as the 5 byte string
+"12345". If you actually want the key to be stored in the DBM database
+as a C int, you will have to use C<pack> when writing, and C<unpack>
+when reading.
+
+Here is a DBM Filter that does it:
+
+ use strict ;
+ use BerkeleyDB ;
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+
+ my $db = tie %hash, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ $db->filter_fetch_key ( sub { $_ = unpack("i", $_) } ) ;
+ $db->filter_store_key ( sub { $_ = pack ("i", $_) } ) ;
+ $hash{123} = "def" ;
+ # ...
+ undef $db ;
+ untie %hash ;
+
+This time only two filters have been used -- we only need to manipulate
+the contents of the key, so it wasn't necessary to install any value
+filters.
+
+=head1 Using BerkeleyDB with MLDBM
+
+Both BerkeleyDB::Hash and BerkeleyDB::Btree can be used with the MLDBM
+module. The code fragment below shows how to open associate MLDBM with
+BerkeleyDB::Btree. To use BerkeleyDB::Hash just replace
+BerkeleyDB::Btree with BerkeleyDB::Hash.
+
+ use strict ;
+ use BerkeleyDB ;
+ use MLDBM qw(BerkeleyDB::Btree) ;
+ use Data::Dumper;
+
+ my $filename = 'testmldbm' ;
+ my %o ;
+
+ unlink $filename ;
+ tie %o, 'MLDBM', -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open database '$filename: $!\n";
+
+See the MLDBM documentation for information on how to use the module
+and for details of its limitations.
+
+=head1 EXAMPLES
+
+TODO.
+
+=head1 HINTS & TIPS
+
+=head2 Sharing Databases With C Applications
+
+There is no technical reason why a Berkeley DB database cannot be
+shared by both a Perl and a C application.
+
+The vast majority of problems that are reported in this area boil down
+to the fact that C strings are NULL terminated, whilst Perl strings
+are not. See L<An Example -- the NULL termination problem.> in the DBM
+FILTERS section for a generic way to work around this problem.
+
+
+=head2 The untie Gotcha
+
+TODO
+
+=head1 COMMON QUESTIONS
+
+This section attempts to answer some of the more common questions that
+I get asked.
+
+
+=head2 Relationship with DB_File
+
+Before Berkeley DB 2.x was written there was only one Perl module that
+interfaced to Berkeley DB. That module is called B<DB_File>. Although
+B<DB_File> can be build with Berkeley DB 1.x, 2.x, 3.x or 4.x, it only provides
+an interface to the functionality available in Berkeley DB 1.x. That
+means that it doesn't support transactions, locking or any of the other
+new features available in DB 2.x or better.
+
+=head2 How do I store Perl data structures with BerkeleyDB?
+
+See L<Using BerkeleyDB with MLDBM>.
+
+=head1 HISTORY
+
+See the Changes file.
+
+=head1 AVAILABILITY
+
+The most recent version of B<BerkeleyDB> can always be found
+on CPAN (see L<perlmod/CPAN> for details), in the directory
+F<modules/by-module/BerkeleyDB>.
+
+The official web site for Berkeley DB is F<http://www.sleepycat.com>.
+
+=head1 COPYRIGHT
+
+Copyright (c) 1997-2002 Paul Marquess. All rights reserved. This program
+is free software; you can redistribute it and/or modify it under the
+same terms as Perl itself.
+
+Although B<BerkeleyDB> is covered by the Perl license, the library it
+makes use of, namely Berkeley DB, is not. Berkeley DB has its own
+copyright and its own license. Please take the time to read it.
+
+Here are few words taken from the Berkeley DB FAQ (at
+F<http://www.sleepycat.com>) regarding the license:
+
+ Do I have to license DB to use it in Perl scripts?
+
+ No. The Berkeley DB license requires that software that uses
+ Berkeley DB be freely redistributable. In the case of Perl, that
+ software is Perl, and not your scripts. Any Perl scripts that you
+ write are your property, including scripts that make use of Berkeley
+ DB. Neither the Perl license nor the Berkeley DB license
+ place any restriction on what you may do with them.
+
+If you are in any doubt about the license situation, contact either the
+Berkeley DB authors or the author of BerkeleyDB.
+See L<"AUTHOR"> for details.
+
+
+=head1 AUTHOR
+
+Paul Marquess E<lt>Paul.Marquess@btinternet.comE<gt>.
+
+Questions about Berkeley DB may be addressed to E<lt>db@sleepycat.comE<gt>.
+
+=head1 SEE ALSO
+
+perl(1), DB_File, Berkeley DB.
+
+=cut
diff --git a/libdb/perl/BerkeleyDB/BerkeleyDB.pod.P b/libdb/perl/BerkeleyDB/BerkeleyDB.pod.P
new file mode 100644
index 0000000..4a848f5
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/BerkeleyDB.pod.P
@@ -0,0 +1,1559 @@
+=head1 NAME
+
+BerkeleyDB - Perl extension for Berkeley DB version 2, 3 or 4
+
+=head1 SYNOPSIS
+
+ use BerkeleyDB;
+
+ $env = new BerkeleyDB::Env [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Hash', [OPTIONS] ;
+ $db = new BerkeleyDB::Hash [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Btree', [OPTIONS] ;
+ $db = new BerkeleyDB::Btree [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Recno', [OPTIONS] ;
+ $db = new BerkeleyDB::Recno [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Queue', [OPTIONS] ;
+ $db = new BerkeleyDB::Queue [OPTIONS] ;
+
+ $db = new BerkeleyDB::Unknown [OPTIONS] ;
+
+ $status = BerkeleyDB::db_remove [OPTIONS]
+ $status = BerkeleyDB::db_rename [OPTIONS]
+ $status = BerkeleyDB::db_verify [OPTIONS]
+
+ $hash{$key} = $value ;
+ $value = $hash{$key} ;
+ each %hash ;
+ keys %hash ;
+ values %hash ;
+
+ $status = $db->db_get()
+ $status = $db->db_put() ;
+ $status = $db->db_del() ;
+ $status = $db->db_sync() ;
+ $status = $db->db_close() ;
+ $status = $db->db_close() ;
+ $status = $db->db_pget()
+ $hash_ref = $db->db_stat() ;
+ $status = $db->db_key_range();
+ $type = $db->type() ;
+ $status = $db->status() ;
+ $boolean = $db->byteswapped() ;
+ $status = $db->truncate($count) ;
+
+ ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
+ ($flag, $old_offset, $old_length) = $db->partial_clear() ;
+
+ $cursor = $db->db_cursor([$flags]) ;
+ $newcursor = $cursor->c_dup([$flags]);
+ $status = $cursor->c_get() ;
+ $status = $cursor->c_put() ;
+ $status = $cursor->c_del() ;
+ $status = $cursor->c_count() ;
+ $status = $cursor->c_pget() ;
+ $status = $cursor->status() ;
+ $status = $cursor->c_close() ;
+
+ $cursor = $db->db_join() ;
+ $status = $cursor->c_get() ;
+ $status = $cursor->c_close() ;
+
+ $status = $env->txn_checkpoint()
+ $hash_ref = $env->txn_stat()
+ $status = $env->setmutexlocks()
+ $status = $env->set_flags()
+
+ $txn = $env->txn_begin() ;
+ $db->Txn($txn);
+ $txn->Txn($db1, $db2,...);
+ $status = $txn->txn_prepare()
+ $status = $txn->txn_commit()
+ $status = $txn->txn_abort()
+ $status = $txn->txn_id()
+ $status = $txn->txn_discard()
+
+ $status = $env->set_lg_dir();
+ $status = $env->set_lg_bsize();
+ $status = $env->set_lg_max();
+
+ $status = $env->set_data_dir() ;
+ $status = $env->set_tmp_dir() ;
+ $status = $env->set_verbose() ;
+
+ $BerkeleyDB::Error
+ $BerkeleyDB::db_version
+
+ # DBM Filters
+ $old_filter = $db->filter_store_key ( sub { ... } ) ;
+ $old_filter = $db->filter_store_value( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_key ( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_value( sub { ... } ) ;
+
+ # deprecated, but supported
+ $txn_mgr = $env->TxnMgr();
+ $status = $txn_mgr->txn_checkpoint()
+ $hash_ref = $txn_mgr->txn_stat()
+ $txn = $txn_mgr->txn_begin() ;
+
+=head1 DESCRIPTION
+
+B<NOTE: This document is still under construction. Expect it to be
+incomplete in places.>
+
+This Perl module provides an interface to most of the functionality
+available in Berkeley DB versions 2, 3 and 4. In general it is safe to assume
+that the interface provided here to be identical to the Berkeley DB
+interface. The main changes have been to make the Berkeley DB API work
+in a Perl way. Note that if you are using Berkeley DB 2.x, the new
+features available in Berkeley DB 3.x or DB 4.x are not available via
+this module.
+
+The reader is expected to be familiar with the Berkeley DB
+documentation. Where the interface provided here is identical to the
+Berkeley DB library and the... TODO
+
+The B<db_appinit>, B<db_cursor>, B<db_open> and B<db_txn> man pages are
+particularly relevant.
+
+The interface to Berkeley DB is implemented with a number of Perl
+classes.
+
+=head1 ENV CLASS
+
+The B<BerkeleyDB::Env> class provides an interface to the Berkeley DB
+function B<db_appinit> in Berkeley DB 2.x or B<db_env_create> and
+B<DBENV-E<gt>open> in Berkeley DB 3.x/4.x. Its purpose is to initialise a
+number of sub-systems that can then be used in a consistent way in all
+the databases you make use of the environment.
+
+If you don't intend using transactions, locking or logging, then you
+shouldn't need to make use of B<BerkeleyDB::Env>.
+
+=head2 Synopsis
+
+ $env = new BerkeleyDB::Env
+ [ -Home => $path, ]
+ [ -Server => $name, ]
+ [ -CacheSize => $number, ]
+ [ -Config => { name => value, name => value }, ]
+ [ -ErrFile => filename, ]
+ [ -ErrPrefix => "string", ]
+ [ -Flags => number, ]
+ [ -SetFlags => bitmask, ]
+ [ -LockDetect => number, ]
+ [ -Verbose => boolean, ]
+
+=over 5
+
+All the parameters to the BerkeleyDB::Env constructor are optional.
+
+=item -Home
+
+If present, this parameter should point to an existing directory. Any
+files that I<aren't> specified with an absolute path in the sub-systems
+that are initialised by the BerkeleyDB::Env class will be assumed to
+live in the B<Home> directory.
+
+For example, in the code fragment below the database "fred.db" will be
+opened in the directory "/home/databases" because it was specified as a
+relative path, but "joe.db" will be opened in "/other" because it was
+part of an absolute path.
+
+ $env = new BerkeleyDB::Env
+ -Home => "/home/databases"
+ ...
+
+ $db1 = new BerkeleyDB::Hash
+ -Filename = "fred.db",
+ -Env => $env
+ ...
+
+ $db2 = new BerkeleyDB::Hash
+ -Filename = "/other/joe.db",
+ -Env => $env
+ ...
+
+=item -Server
+
+If present, this parameter should be the hostname of a server that is running
+the Berkeley DB RPC server. All databases will be accessed via the RPC server.
+
+=item -Cachesize
+
+If present, this parameter sets the size of the environments shared memory
+buffer pool.
+
+=item -Config
+
+This is a variation on the C<-Home> parameter, but it allows finer
+control of where specific types of files will be stored.
+
+The parameter expects a reference to a hash. Valid keys are:
+B<DB_DATA_DIR>, B<DB_LOG_DIR> and B<DB_TMP_DIR>
+
+The code below shows an example of how it can be used.
+
+ $env = new BerkeleyDB::Env
+ -Config => { DB_DATA_DIR => "/home/databases",
+ DB_LOG_DIR => "/home/logs",
+ DB_TMP_DIR => "/home/tmp"
+ }
+ ...
+
+=item -ErrFile
+
+Expects a filenme. Any errors generated internally by Berkeley DB will
+be logged to this file.
+
+=item -ErrPrefix
+
+Allows a prefix to be added to the error messages before they are sent
+to B<-ErrFile>.
+
+=item -Flags
+
+The B<Flags> parameter specifies both which sub-systems to initialise,
+as well as a number of environment-wide options.
+See the Berkeley DB documentation for more details of these options.
+
+Any of the following can be specified by OR'ing them:
+
+B<DB_CREATE>
+
+If any of the files specified do not already exist, create them.
+
+B<DB_INIT_CDB>
+
+Initialise the Concurrent Access Methods
+
+B<DB_INIT_LOCK>
+
+Initialise the Locking sub-system.
+
+B<DB_INIT_LOG>
+
+Initialise the Logging sub-system.
+
+B<DB_INIT_MPOOL>
+
+Initialise the ...
+
+B<DB_INIT_TXN>
+
+Initialise the ...
+
+B<DB_MPOOL_PRIVATE>
+
+Initialise the ...
+
+B<DB_INIT_MPOOL> is also specified.
+
+Initialise the ...
+
+B<DB_NOMMAP>
+
+Initialise the ...
+
+B<DB_RECOVER>
+
+
+
+B<DB_RECOVER_FATAL>
+
+B<DB_THREAD>
+
+B<DB_TXN_NOSYNC>
+
+B<DB_USE_ENVIRON>
+
+B<DB_USE_ENVIRON_ROOT>
+
+=item -SetFlags
+
+Calls ENV->set_flags with the supplied bitmask. Use this when you need to make
+use of DB_ENV->set_flags before DB_ENV->open is called.
+
+Only valid when Berkeley DB 3.x or better is used.
+
+=item -LockDetect
+
+Specifies what to do when a lock conflict occurs. The value should be one of
+
+B<DB_LOCK_DEFAULT>
+
+B<DB_LOCK_OLDEST>
+
+B<DB_LOCK_RANDOM>
+
+B<DB_LOCK_YOUNGEST>
+
+=item -Verbose
+
+Add extra debugging information to the messages sent to B<-ErrFile>.
+
+=back
+
+=head2 Methods
+
+The environment class has the following methods:
+
+=over 5
+
+=item $env->errPrefix("string") ;
+
+This method is identical to the B<-ErrPrefix> flag. It allows the
+error prefix string to be changed dynamically.
+
+=item $env->set_flags(bitmask, 1|0);
+
+=item $txn = $env->TxnMgr()
+
+Constructor for creating a B<TxnMgr> object.
+See L<"TRANSACTIONS"> for more details of using transactions.
+
+This method is deprecated. Access the transaction methods using the B<txn_>
+methods below from the environment object directly.
+
+=item $env->txn_begin()
+
+TODO
+
+=item $env->txn_stat()
+
+TODO
+
+=item $env->txn_checkpoint()
+
+TODO
+
+=item $env->status()
+
+Returns the status of the last BerkeleyDB::Env method.
+
+=item $env->setmutexlocks()
+
+Only available in Berkeley Db 3.0 or greater. Calls
+B<db_env_set_mutexlocks> when used with Berkeley DB 3.1.x. When used with
+Berkeley DB 3.0 or 3.2 and better it calls B<DBENV-E<gt>set_mutexlocks>.
+
+=back
+
+=head2 Examples
+
+TODO.
+
+=head1 Global Classes
+
+ $status = BerkeleyDB::db_remove [OPTIONS]
+ $status = BerkeleyDB::db_rename [OPTIONS]
+ $status = BerkeleyDB::db_verify [OPTIONS]
+
+=head1 THE DATABASE CLASSES
+
+B<BerkeleyDB> supports the following database formats:
+
+=over 5
+
+=item B<BerkeleyDB::Hash>
+
+This database type allows arbitrary key/value pairs to be stored in data
+files. This is equivalent to the functionality provided by other
+hashing packages like DBM, NDBM, ODBM, GDBM, and SDBM. Remember though,
+the files created using B<BerkeleyDB::Hash> are not compatible with any
+of the other packages mentioned.
+
+A default hashing algorithm, which will be adequate for most applications,
+is built into BerkeleyDB. If you do need to use your own hashing algorithm
+it is possible to write your own in Perl and have B<BerkeleyDB> use
+it instead.
+
+=item B<BerkeleyDB::Btree>
+
+The Btree format allows arbitrary key/value pairs to be stored in a
+B+tree.
+
+As with the B<BerkeleyDB::Hash> format, it is possible to provide a
+user defined Perl routine to perform the comparison of keys. By default,
+though, the keys are stored in lexical order.
+
+=item B<BerkeleyDB::Recno>
+
+TODO.
+
+
+=item B<BerkeleyDB::Queue>
+
+TODO.
+
+=item B<BerkeleyDB::Unknown>
+
+This isn't a database format at all. It is used when you want to open an
+existing Berkeley DB database without having to know what type is it.
+
+=back
+
+
+Each of the database formats described above is accessed via a
+corresponding B<BerkeleyDB> class. These will be described in turn in
+the next sections.
+
+=head1 BerkeleyDB::Hash
+
+Equivalent to calling B<db_open> with type B<DB_HASH> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_HASH> in
+Berkeley DB 3.x or greater.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Hash
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Hash specific
+ [ -Ffactor => number,]
+ [ -Nelem => number,]
+ [ -Hash => code reference,]
+ [ -DupCompare => code reference,]
+
+and this
+
+ [$db =] tie %hash, 'BerkeleyDB::Hash',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Hash specific
+ [ -Ffactor => number,]
+ [ -Nelem => number,]
+ [ -Hash => code reference,]
+ [ -DupCompare => code reference,]
+
+
+When the "tie" interface is used, reading from and writing to the database
+is achieved via the tied hash. In this case the database operates like
+a Perl associative array that happens to be stored on disk.
+
+In addition to the high-level tied hash interface, it is possible to
+make use of the underlying methods provided by Berkeley DB
+
+=head2 Options
+
+In addition to the standard set of options (see L<COMMON OPTIONS>)
+B<BerkeleyDB::Hash> supports these options:
+
+=over 5
+
+=item -Property
+
+Used to specify extra flags when opening a database. The following
+flags may be specified by logically OR'ing together one or more of the
+following values:
+
+B<DB_DUP>
+
+When creating a new database, this flag enables the storing of duplicate
+keys in the database. If B<DB_DUPSORT> is not specified as well, the
+duplicates are stored in the order they are created in the database.
+
+B<DB_DUPSORT>
+
+Enables the sorting of duplicate keys in the database. Ignored if
+B<DB_DUP> isn't also specified.
+
+=item -Ffactor
+
+=item -Nelem
+
+See the Berkeley DB documentation for details of these options.
+
+=item -Hash
+
+Allows you to provide a user defined hash function. If not specified,
+a default hash function is used. Here is a template for a user-defined
+hash function
+
+ sub hash
+ {
+ my ($data) = shift ;
+ ...
+ # return the hash value for $data
+ return $hash ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Hash => \&hash,
+ ...
+
+See L<""> for an example.
+
+=item -DupCompare
+
+Used in conjunction with the B<DB_DUPOSRT> flag.
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Property => DB_DUP|DB_DUPSORT,
+ -DupCompare => \&compare,
+ ...
+
+=back
+
+
+=head2 Methods
+
+B<BerkeleyDB::Hash> only supports the standard database methods.
+See L<COMMON DATABASE METHODS>.
+
+=head2 A Simple Tied Hash Example
+
+## simpleHash
+
+here is the output:
+
+ Banana Exists
+
+ orange -> orange
+ tomato -> red
+ banana -> yellow
+
+Note that the like ordinary associative arrays, the order of the keys
+retrieved from a Hash database are in an apparently random order.
+
+=head2 Another Simple Hash Example
+
+Do the same as the previous example but not using tie.
+
+## simpleHash2
+
+=head2 Duplicate keys
+
+The code below is a variation on the examples above. This time the hash has
+been inverted. The key this time is colour and the value is the fruit name.
+The B<DB_DUP> flag has been specified to allow duplicates.
+
+##dupHash
+
+here is the output:
+
+ orange -> orange
+ yellow -> banana
+ red -> apple
+ red -> tomato
+ green -> banana
+ green -> apple
+
+=head2 Sorting Duplicate Keys
+
+In the previous example, when there were duplicate keys, the values are
+sorted in the order they are stored in. The code below is
+identical to the previous example except the B<DB_DUPSORT> flag is
+specified.
+
+##dupSortHash
+
+Notice that in the output below the duplicate values are sorted.
+
+ orange -> orange
+ yellow -> banana
+ red -> apple
+ red -> tomato
+ green -> apple
+ green -> banana
+
+=head2 Custom Sorting Duplicate Keys
+
+Another variation
+
+TODO
+
+=head2 Changing the hash
+
+TODO
+
+=head2 Using db_stat
+
+TODO
+
+=head1 BerkeleyDB::Btree
+
+Equivalent to calling B<db_open> with type B<DB_BTREE> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_BTREE> in
+Berkeley DB 3.x or greater.
+
+Two forms of constructor are supported:
+
+
+ $db = new BerkeleyDB::Btree
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Btree specific
+ [ -Minkey => number,]
+ [ -Compare => code reference,]
+ [ -DupCompare => code reference,]
+ [ -Prefix => code reference,]
+
+and this
+
+ [$db =] tie %hash, 'BerkeleyDB::Btree',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Btree specific
+ [ -Minkey => number,]
+ [ -Compare => code reference,]
+ [ -DupCompare => code reference,]
+ [ -Prefix => code reference,]
+
+=head2 Options
+
+In addition to the standard set of options (see L<COMMON OPTIONS>)
+B<BerkeleyDB::Btree> supports these options:
+
+=over 5
+
+=item -Property
+
+Used to specify extra flags when opening a database. The following
+flags may be specified by logically OR'ing together one or more of the
+following values:
+
+B<DB_DUP>
+
+When creating a new database, this flag enables the storing of duplicate
+keys in the database. If B<DB_DUPSORT> is not specified as well, the
+duplicates are stored in the order they are created in the database.
+
+B<DB_DUPSORT>
+
+Enables the sorting of duplicate keys in the database. Ignored if
+B<DB_DUP> isn't also specified.
+
+=item Minkey
+
+TODO
+
+=item Compare
+
+Allow you to override the default sort order used in the database. See
+L<"Changing the sort order"> for an example.
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Compare => \&compare,
+ ...
+
+=item Prefix
+
+ sub prefix
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return number of bytes of $key2 which are
+ # necessary to determine that it is greater than $key1
+ return $bytes ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Prefix => \&prefix,
+ ...
+=item DupCompare
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -DupCompare => \&compare,
+ ...
+
+=back
+
+=head2 Methods
+
+B<BerkeleyDB::Btree> supports the following database methods.
+See also L<COMMON DATABASE METHODS>.
+
+All the methods below return 0 to indicate success.
+
+=over 5
+
+=item $status = $db->db_key_range($key, $less, $equal, $greater [, $flags])
+
+Given a key, C<$key>, this method returns the proportion of keys less than
+C<$key> in C<$less>, the proportion equal to C<$key> in C<$equal> and the
+proportion greater than C<$key> in C<$greater>.
+
+The proportion is returned as a double in the range 0.0 to 1.0.
+
+=back
+
+=head2 A Simple Btree Example
+
+The code below is a simple example of using a btree database.
+
+## btreeSimple
+
+Here is the output from the code above. The keys have been sorted using
+Berkeley DB's default sorting algorithm.
+
+ Smith
+ Wall
+ mouse
+
+
+=head2 Changing the sort order
+
+It is possible to supply your own sorting algorithm if the one that Berkeley
+DB used isn't suitable. The code below is identical to the previous example
+except for the case insensitive compare function.
+
+## btreeSortOrder
+
+Here is the output from the code above.
+
+ mouse
+ Smith
+ Wall
+
+There are a few point to bear in mind if you want to change the
+ordering in a BTREE database:
+
+=over 5
+
+=item 1.
+
+The new compare function must be specified when you create the database.
+
+=item 2.
+
+You cannot change the ordering once the database has been created. Thus
+you must use the same compare function every time you access the
+database.
+
+=back
+
+=head2 Using db_stat
+
+TODO
+
+=head1 BerkeleyDB::Recno
+
+Equivalent to calling B<db_open> with type B<DB_RECNO> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_RECNO> in
+Berkeley DB 3.x or greater.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Recno
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Recno specific
+ [ -Delim => byte,]
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -Source => filename,]
+
+and this
+
+ [$db =] tie @arry, 'BerkeleyDB::Recno',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Recno specific
+ [ -Delim => byte,]
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -Source => filename,]
+
+=head2 A Recno Example
+
+Here is a simple example that uses RECNO (if you are using a version
+of Perl earlier than 5.004_57 this example won't work -- see
+L<Extra RECNO Methods> for a workaround).
+
+## simpleRecno
+
+Here is the output from the script:
+
+ The array contains 5 entries
+ popped black
+ shifted white
+ Element 1 Exists with value blue
+ The last element is green
+ The 2nd last element is yellow
+
+=head1 BerkeleyDB::Queue
+
+Equivalent to calling B<db_create> followed by B<DB-E<gt>open> with
+type B<DB_QUEUE> in Berkeley DB 3.x or greater. This database format
+isn't available if you use Berkeley DB 2.x.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Queue
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Queue specific
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -ExtentSize => number, ]
+
+and this
+
+ [$db =] tie @arry, 'BerkeleyDB::Queue',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Queue specific
+ [ -Len => number,]
+ [ -Pad => byte,]
+
+
+=head1 BerkeleyDB::Unknown
+
+This class is used to open an existing database.
+
+Equivalent to calling B<db_open> with type B<DB_UNKNOWN> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_UNKNOWN> in
+Berkeley DB 3.x or greater.
+
+The constructor looks like this:
+
+ $db = new BerkeleyDB::Unknown
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+
+
+=head2 An example
+
+=head1 COMMON OPTIONS
+
+All database access class constructors support the common set of
+options defined below. All are optional.
+
+=over 5
+
+=item -Filename
+
+The database filename. If no filename is specified, a temporary file will
+be created and removed once the program terminates.
+
+=item -Subname
+
+Specifies the name of the sub-database to open.
+This option is only valid if you are using Berkeley DB 3.x or greater.
+
+=item -Flags
+
+Specify how the database will be opened/created. The valid flags are:
+
+B<DB_CREATE>
+
+Create any underlying files, as necessary. If the files do not already
+exist and the B<DB_CREATE> flag is not specified, the call will fail.
+
+B<DB_NOMMAP>
+
+Not supported by BerkeleyDB.
+
+B<DB_RDONLY>
+
+Opens the database in read-only mode.
+
+B<DB_THREAD>
+
+Not supported by BerkeleyDB.
+
+B<DB_TRUNCATE>
+
+If the database file already exists, remove all the data before
+opening it.
+
+=item -Mode
+
+Determines the file protection when the database is created. Defaults
+to 0666.
+
+=item -Cachesize
+
+=item -Lorder
+
+=item -Pagesize
+
+=item -Env
+
+When working under a Berkeley DB environment, this parameter
+
+Defaults to no environment.
+
+=item -Txn
+
+TODO.
+
+=back
+
+=head1 COMMON DATABASE METHODS
+
+All the database interfaces support the common set of methods defined
+below.
+
+All the methods below return 0 to indicate success.
+
+=head2 $status = $db->db_get($key, $value [, $flags])
+
+Given a key (C<$key>) this method reads the value associated with it
+from the database. If it exists, the value read from the database is
+returned in the C<$value> parameter.
+
+The B<$flags> parameter is optional. If present, it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_GET_BOTH>
+
+When the B<DB_GET_BOTH> flag is specified, B<db_get> checks for the
+existence of B<both> the C<$key> B<and> C<$value> in the database.
+
+=item B<DB_SET_RECNO>
+
+TODO.
+
+=back
+
+In addition, the following value may be set by logically OR'ing it into
+the B<$flags> parameter:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO
+
+=back
+
+
+=head2 $status = $db->db_put($key, $value [, $flags])
+
+Stores a key/value pair in the database.
+
+The B<$flags> parameter is optional. If present it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_APPEND>
+
+This flag is only applicable when accessing a B<BerkeleyDB::Recno>
+database.
+
+TODO.
+
+
+=item B<DB_NOOVERWRITE>
+
+If this flag is specified and C<$key> already exists in the database,
+the call to B<db_put> will return B<DB_KEYEXIST>.
+
+=back
+
+=head2 $status = $db->db_del($key [, $flags])
+
+Deletes a key/value pair in the database associated with C<$key>.
+If duplicate keys are enabled in the database, B<db_del> will delete
+B<all> key/value pairs with key C<$key>.
+
+The B<$flags> parameter is optional and is currently unused.
+
+=head2 $status = $db->db_sync()
+
+If any parts of the database are in memory, write them to the database.
+
+=head2 $cursor = $db->db_cursor([$flags])
+
+Creates a cursor object. This is used to access the contents of the
+database sequentially. See L<CURSORS> for details of the methods
+available when working with cursors.
+
+The B<$flags> parameter is optional. If present it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO.
+
+=back
+
+=head2 ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
+
+TODO
+
+=head2 ($flag, $old_offset, $old_length) = $db->partial_clear() ;
+
+TODO
+
+=head2 $db->byteswapped()
+
+TODO
+
+=head2 $db->type()
+
+Returns the type of the database. The possible return code are B<DB_HASH>
+for a B<BerkeleyDB::Hash> database, B<DB_BTREE> for a B<BerkeleyDB::Btree>
+database and B<DB_RECNO> for a B<BerkeleyDB::Recno> database. This method
+is typically used when a database has been opened with
+B<BerkeleyDB::Unknown>.
+
+=item $ref = $db->db_stat()
+
+Returns a reference to an associative array containing information about
+the database. The keys of the associative array correspond directly to the
+names of the fields defined in the Berkeley DB documentation. For example,
+in the DB documentation, the field B<bt_version> stores the version of the
+Btree database. Assuming you called B<db_stat> on a Btree database the
+equivalent field would be accessed as follows:
+
+ $version = $ref->{'bt_version'} ;
+
+If you are using Berkeley DB 3.x or better, this method will work will
+all database formats. When DB 2.x is used, it only works with
+B<BerkeleyDB::Btree>.
+
+=head2 $status = $db->status()
+
+Returns the status of the last C<$db> method called.
+
+=head2 $status = $db->truncate($count)
+
+Truncates the datatabase and returns the number or records deleted
+in C<$count>.
+
+=head1 CURSORS
+
+A cursor is used whenever you want to access the contents of a database
+in sequential order.
+A cursor object is created with the C<db_cursor>
+
+A cursor object has the following methods available:
+
+=head2 $newcursor = $cursor->c_dup($flags)
+
+Creates a duplicate of C<$cursor>. This method needs Berkeley DB 3.0.x or better.
+
+The C<$flags> parameter is optional and can take the following value:
+
+=over 5
+
+=item DB_POSITION
+
+When present this flag will position the new cursor at the same place as the
+existing cursor.
+
+=back
+
+=head2 $status = $cursor->c_get($key, $value, $flags)
+
+Reads a key/value pair from the database, returning the data in C<$key>
+and C<$value>. The key/value pair actually read is controlled by the
+C<$flags> parameter, which can take B<one> of the following values:
+
+=over 5
+
+=item B<DB_FIRST>
+
+Set the cursor to point to the first key/value pair in the
+database. Return the key/value pair in C<$key> and C<$value>.
+
+=item B<DB_LAST>
+
+Set the cursor to point to the last key/value pair in the database. Return
+the key/value pair in C<$key> and C<$value>.
+
+=item B<DB_NEXT>
+
+If the cursor is already pointing to a key/value pair, it will be
+incremented to point to the next key/value pair and return its contents.
+
+If the cursor isn't initialised, B<DB_NEXT> works just like B<DB_FIRST>.
+
+If the cursor is already positioned at the last key/value pair, B<c_get>
+will return B<DB_NOTFOUND>.
+
+=item B<DB_NEXT_DUP>
+
+This flag is only valid when duplicate keys have been enabled in
+a database.
+If the cursor is already pointing to a key/value pair and the key of
+the next key/value pair is identical, the cursor will be incremented to
+point to it and their contents returned.
+
+=item B<DB_PREV>
+
+If the cursor is already pointing to a key/value pair, it will be
+decremented to point to the previous key/value pair and return its
+contents.
+
+If the cursor isn't initialised, B<DB_PREV> works just like B<DB_LAST>.
+
+If the cursor is already positioned at the first key/value pair, B<c_get>
+will return B<DB_NOTFOUND>.
+
+=item B<DB_CURRENT>
+
+If the cursor has been set to point to a key/value pair, return their
+contents.
+If the key/value pair referenced by the cursor has been deleted, B<c_get>
+will return B<DB_KEYEMPTY>.
+
+=item B<DB_SET>
+
+Set the cursor to point to the key/value pair referenced by B<$key>
+and return the value in B<$value>.
+
+=item B<DB_SET_RANGE>
+
+This flag is a variation on the B<DB_SET> flag. As well as returning
+the value, it also returns the key, via B<$key>.
+When used with a B<BerkeleyDB::Btree> database the key matched by B<c_get>
+will be the shortest key (in length) which is greater than or equal to
+the key supplied, via B<$key>. This allows partial key searches.
+See ??? for an example of how to use this flag.
+
+=item B<DB_GET_BOTH>
+
+Another variation on B<DB_SET>. This one returns both the key and
+the value.
+
+=item B<DB_SET_RECNO>
+
+TODO.
+
+=item B<DB_GET_RECNO>
+
+TODO.
+
+=back
+
+In addition, the following value may be set by logically OR'ing it into
+the B<$flags> parameter:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO.
+
+=back
+
+=head2 $status = $cursor->c_put($key, $value, $flags)
+
+Stores the key/value pair in the database. The position that the data is
+stored in the database is controlled by the C<$flags> parameter, which
+must take B<one> of the following values:
+
+=over 5
+
+=item B<DB_AFTER>
+
+When used with a Btree or Hash database, a duplicate of the key referenced
+by the current cursor position will be created and the contents of
+B<$value> will be associated with it - B<$key> is ignored.
+The new key/value pair will be stored immediately after the current
+cursor position.
+Obviously the database has to have been opened with B<DB_DUP>.
+
+When used with a Recno ... TODO
+
+
+=item B<DB_BEFORE>
+
+When used with a Btree or Hash database, a duplicate of the key referenced
+by the current cursor position will be created and the contents of
+B<$value> will be associated with it - B<$key> is ignored.
+The new key/value pair will be stored immediately before the current
+cursor position.
+Obviously the database has to have been opened with B<DB_DUP>.
+
+When used with a Recno ... TODO
+
+=item B<DB_CURRENT>
+
+If the cursor has been initialised, replace the value of the key/value
+pair stored in the database with the contents of B<$value>.
+
+=item B<DB_KEYFIRST>
+
+Only valid with a Btree or Hash database. This flag is only really
+used when duplicates are enabled in the database and sorted duplicates
+haven't been specified.
+In this case the key/value pair will be inserted as the first entry in
+the duplicates for the particular key.
+
+=item B<DB_KEYLAST>
+
+Only valid with a Btree or Hash database. This flag is only really
+used when duplicates are enabled in the database and sorted duplicates
+haven't been specified.
+In this case the key/value pair will be inserted as the last entry in
+the duplicates for the particular key.
+
+=back
+
+=head2 $status = $cursor->c_del([$flags])
+
+This method deletes the key/value pair associated with the current cursor
+position. The cursor position will not be changed by this operation, so
+any subsequent cursor operation must first initialise the cursor to
+point to a valid key/value pair.
+
+If the key/value pair associated with the cursor have already been
+deleted, B<c_del> will return B<DB_KEYEMPTY>.
+
+The B<$flags> parameter is not used at present.
+
+=head2 $status = $cursor->c_del($cnt [, $flags])
+
+Stores the number of duplicates at the current cursor position in B<$cnt>.
+
+The B<$flags> parameter is not used at present. This method needs
+Berkeley DB 3.1 or better.
+
+=head2 $status = $cursor->status()
+
+Returns the status of the last cursor method as a dual type.
+
+=head2 Cursor Examples
+
+TODO
+
+Iterating from first to last, then in reverse.
+
+examples of each of the flags.
+
+=head1 JOIN
+
+Join support for BerkeleyDB is in progress. Watch this space.
+
+TODO
+
+=head1 TRANSACTIONS
+
+TODO.
+
+=head1 DBM Filters
+
+A DBM Filter is a piece of code that is be used when you I<always>
+want to make the same transformation to all keys and/or values in a DBM
+database. All of the database classes (BerkeleyDB::Hash,
+BerkeleyDB::Btree and BerkeleyDB::Recno) support DBM Filters.
+
+There are four methods associated with DBM Filters. All work
+identically, and each is used to install (or uninstall) a single DBM
+Filter. Each expects a single parameter, namely a reference to a sub.
+The only difference between them is the place that the filter is
+installed.
+
+To summarise:
+
+=over 5
+
+=item B<filter_store_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a key to a DBM database.
+
+=item B<filter_store_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a value to a DBM database.
+
+
+=item B<filter_fetch_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a key from a DBM database.
+
+=item B<filter_fetch_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a value from a DBM database.
+
+=back
+
+You can use any combination of the methods, from none, to all four.
+
+All filter methods return the existing filter, if present, or C<undef>
+in not.
+
+To delete a filter pass C<undef> to it.
+
+=head2 The Filter
+
+When each filter is called by Perl, a local copy of C<$_> will contain
+the key or value to be filtered. Filtering is achieved by modifying
+the contents of C<$_>. The return code from the filter is ignored.
+
+=head2 An Example -- the NULL termination problem.
+
+Consider the following scenario. You have a DBM database that you need
+to share with a third-party C application. The C application assumes
+that I<all> keys and values are NULL terminated. Unfortunately when
+Perl writes to DBM databases it doesn't use NULL termination, so your
+Perl application will have to manage NULL termination itself. When you
+write to the database you will have to use something like this:
+
+ $hash{"$key\0"} = "$value\0" ;
+
+Similarly the NULL needs to be taken into account when you are considering
+the length of existing keys/values.
+
+It would be much better if you could ignore the NULL terminations issue
+in the main application code and have a mechanism that automatically
+added the terminating NULL to all keys and values whenever you write to
+the database and have them removed when you read from the database. As I'm
+sure you have already guessed, this is a problem that DBM Filters can
+fix very easily.
+
+## nullFilter
+
+Hopefully the contents of each of the filters should be
+self-explanatory. Both "fetch" filters remove the terminating NULL,
+and both "store" filters add a terminating NULL.
+
+
+=head2 Another Example -- Key is a C int.
+
+Here is another real-life example. By default, whenever Perl writes to
+a DBM database it always writes the key and value as strings. So when
+you use this:
+
+ $hash{12345} = "something" ;
+
+the key 12345 will get stored in the DBM database as the 5 byte string
+"12345". If you actually want the key to be stored in the DBM database
+as a C int, you will have to use C<pack> when writing, and C<unpack>
+when reading.
+
+Here is a DBM Filter that does it:
+
+## intFilter
+
+This time only two filters have been used -- we only need to manipulate
+the contents of the key, so it wasn't necessary to install any value
+filters.
+
+=head1 Using BerkeleyDB with MLDBM
+
+Both BerkeleyDB::Hash and BerkeleyDB::Btree can be used with the MLDBM
+module. The code fragment below shows how to open associate MLDBM with
+BerkeleyDB::Btree. To use BerkeleyDB::Hash just replace
+BerkeleyDB::Btree with BerkeleyDB::Hash.
+
+ use strict ;
+ use BerkeleyDB ;
+ use MLDBM qw(BerkeleyDB::Btree) ;
+ use Data::Dumper;
+
+ my $filename = 'testmldbm' ;
+ my %o ;
+
+ unlink $filename ;
+ tie %o, 'MLDBM', -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open database '$filename: $!\n";
+
+See the MLDBM documentation for information on how to use the module
+and for details of its limitations.
+
+=head1 EXAMPLES
+
+TODO.
+
+=head1 HINTS & TIPS
+
+=head2 Sharing Databases With C Applications
+
+There is no technical reason why a Berkeley DB database cannot be
+shared by both a Perl and a C application.
+
+The vast majority of problems that are reported in this area boil down
+to the fact that C strings are NULL terminated, whilst Perl strings
+are not. See L<An Example -- the NULL termination problem.> in the DBM
+FILTERS section for a generic way to work around this problem.
+
+
+=head2 The untie Gotcha
+
+TODO
+
+=head1 COMMON QUESTIONS
+
+This section attempts to answer some of the more common questions that
+I get asked.
+
+
+=head2 Relationship with DB_File
+
+Before Berkeley DB 2.x was written there was only one Perl module that
+interfaced to Berkeley DB. That module is called B<DB_File>. Although
+B<DB_File> can be build with Berkeley DB 1.x, 2.x, 3.x or 4.x, it only provides
+an interface to the functionality available in Berkeley DB 1.x. That
+means that it doesn't support transactions, locking or any of the other
+new features available in DB 2.x or better.
+
+=head2 How do I store Perl data structures with BerkeleyDB?
+
+See L<Using BerkeleyDB with MLDBM>.
+
+=head1 HISTORY
+
+See the Changes file.
+
+=head1 AVAILABILITY
+
+The most recent version of B<BerkeleyDB> can always be found
+on CPAN (see L<perlmod/CPAN> for details), in the directory
+F<modules/by-module/BerkeleyDB>.
+
+The official web site for Berkeley DB is F<http://www.sleepycat.com>.
+
+=head1 COPYRIGHT
+
+Copyright (c) 1997-2002 Paul Marquess. All rights reserved. This program
+is free software; you can redistribute it and/or modify it under the
+same terms as Perl itself.
+
+Although B<BerkeleyDB> is covered by the Perl license, the library it
+makes use of, namely Berkeley DB, is not. Berkeley DB has its own
+copyright and its own license. Please take the time to read it.
+
+Here are few words taken from the Berkeley DB FAQ (at
+F<http://www.sleepycat.com>) regarding the license:
+
+ Do I have to license DB to use it in Perl scripts?
+
+ No. The Berkeley DB license requires that software that uses
+ Berkeley DB be freely redistributable. In the case of Perl, that
+ software is Perl, and not your scripts. Any Perl scripts that you
+ write are your property, including scripts that make use of Berkeley
+ DB. Neither the Perl license nor the Berkeley DB license
+ place any restriction on what you may do with them.
+
+If you are in any doubt about the license situation, contact either the
+Berkeley DB authors or the author of BerkeleyDB.
+See L<"AUTHOR"> for details.
+
+
+=head1 AUTHOR
+
+Paul Marquess E<lt>Paul.Marquess@btinternet.comE<gt>.
+
+Questions about Berkeley DB may be addressed to E<lt>db@sleepycat.comE<gt>.
+
+=head1 SEE ALSO
+
+perl(1), DB_File, Berkeley DB.
+
+=cut
diff --git a/libdb/perl/BerkeleyDB/BerkeleyDB.xs b/libdb/perl/BerkeleyDB/BerkeleyDB.xs
new file mode 100644
index 0000000..531b38a
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/BerkeleyDB.xs
@@ -0,0 +1,3643 @@
+/*
+
+ BerkeleyDB.xs -- Perl 5 interface to Berkeley DB version 2 & 3
+
+ written by Paul Marquess <Paul.Marquess@btinternet.com>
+
+ All comments/suggestions/problems are welcome
+
+ Copyright (c) 1997-2002 Paul Marquess. All rights reserved.
+ This program is free software; you can redistribute it and/or
+ modify it under the same terms as Perl itself.
+
+ Please refer to the COPYRIGHT section in
+
+ Changes:
+ 0.01 - First Alpha Release
+ 0.02 -
+
+*/
+
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define PERL_POLLUTE
+#include "EXTERN.h"
+#include "perl.h"
+#include "XSUB.h"
+#include "ppport.h"
+
+
+/* XSUB.h defines a macro called abort */
+/* This clashes with the txn abort method in Berkeley DB 4.x */
+/* This is a problem with ActivePerl (at least) */
+
+#ifdef _WIN32
+# ifdef abort
+# undef abort
+# endif
+# ifdef fopen
+# undef fopen
+# endif
+# ifdef fclose
+# undef fclose
+# endif
+#endif
+
+/* Being the Berkeley DB we prefer the <sys/cdefs.h> (which will be
+ * shortly #included by the <db.h>) __attribute__ to the possibly
+ * already defined __attribute__, for example by GNUC or by Perl. */
+
+#undef __attribute__
+
+#ifdef USE_PERLIO
+# define GetFILEptr(sv) PerlIO_findFILE(IoOFP(sv_2io(sv)))
+#else
+# define GetFILEptr(sv) IoOFP(sv_2io(sv))
+#endif
+
+#include <db.h>
+
+/* Check the version of Berkeley DB */
+
+#ifndef DB_VERSION_MAJOR
+#ifdef HASHMAGIC
+#error db.h is from Berkeley DB 1.x - need at least Berkeley DB 2.6.4
+#else
+#error db.h is not for Berkeley DB at all.
+#endif
+#endif
+
+#if (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6) ||\
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 6 && DB_VERSION_PATCH < 4)
+# error db.h is from Berkeley DB 2.0-2.5 - need at least Berkeley DB 2.6.4
+#endif
+
+
+#if (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 0)
+# define IS_DB_3_0_x
+#endif
+
+#if DB_VERSION_MAJOR >= 3
+# define AT_LEAST_DB_3
+#endif
+
+#if DB_VERSION_MAJOR > 3 || (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR >= 1)
+# define AT_LEAST_DB_3_1
+#endif
+
+#if DB_VERSION_MAJOR > 3 || (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR >= 2)
+# define AT_LEAST_DB_3_2
+#endif
+
+#if DB_VERSION_MAJOR > 3 || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 2) ||\
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 2 && DB_VERSION_PATCH >= 6)
+# define AT_LEAST_DB_3_2_6
+#endif
+
+#if DB_VERSION_MAJOR > 3 || (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR >= 3)
+# define AT_LEAST_DB_3_3
+#endif
+
+#if DB_VERSION_MAJOR >= 4
+# define AT_LEAST_DB_4
+#endif
+
+#if DB_VERSION_MAJOR > 4 || (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 1)
+# define AT_LEAST_DB_4_1
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#define DBM_FILTERING
+#define STRICT_CLOSE
+/* #define ALLOW_RECNO_OFFSET */
+/* #define TRACE */
+
+#if DB_VERSION_MAJOR == 2 && ! defined(DB_LOCK_DEADLOCK)
+# define DB_LOCK_DEADLOCK EAGAIN
+#endif /* DB_VERSION_MAJOR == 2 */
+
+#if DB_VERSION_MAJOR == 2
+# define DB_QUEUE 4
+#endif /* DB_VERSION_MAJOR == 2 */
+
+#ifdef AT_LEAST_DB_3_2
+# define DB_callback DB * db,
+#else
+# define DB_callback
+#endif
+
+#if DB_VERSION_MAJOR > 2
+typedef struct {
+ int db_lorder;
+ size_t db_cachesize;
+ size_t db_pagesize;
+
+
+ void *(*db_malloc) __P((size_t));
+ int (*dup_compare)
+ __P((DB_callback const DBT *, const DBT *));
+
+ u_int32_t bt_maxkey;
+ u_int32_t bt_minkey;
+ int (*bt_compare)
+ __P((DB_callback const DBT *, const DBT *));
+ size_t (*bt_prefix)
+ __P((DB_callback const DBT *, const DBT *));
+
+ u_int32_t h_ffactor;
+ u_int32_t h_nelem;
+ u_int32_t (*h_hash)
+ __P((DB_callback const void *, u_int32_t));
+
+ int re_pad;
+ int re_delim;
+ u_int32_t re_len;
+ char *re_source;
+
+#define DB_DELIMITER 0x0001
+#define DB_FIXEDLEN 0x0008
+#define DB_PAD 0x0010
+ u_int32_t flags;
+ u_int32_t q_extentsize;
+} DB_INFO ;
+
+#endif /* DB_VERSION_MAJOR > 2 */
+
+typedef struct {
+ int Status ;
+ /* char ErrBuff[1000] ; */
+ SV * ErrPrefix ;
+ FILE * ErrHandle ;
+ DB_ENV * Env ;
+ int open_dbs ;
+ int TxnMgrStatus ;
+ int active ;
+ bool txn_enabled ;
+ } BerkeleyDB_ENV_type ;
+
+
+typedef struct {
+ DBTYPE type ;
+ bool recno_or_queue ;
+ char * filename ;
+ BerkeleyDB_ENV_type * parent_env ;
+ DB * dbp ;
+ SV * compare ;
+ bool in_compare ;
+ SV * dup_compare ;
+ bool in_dup_compare ;
+ SV * prefix ;
+ bool in_prefix ;
+ SV * hash ;
+ bool in_hash ;
+#ifdef AT_LEAST_DB_3_3
+ SV * associated ;
+ bool secondary_db ;
+#endif
+ int Status ;
+ DB_INFO * info ;
+ DBC * cursor ;
+ DB_TXN * txn ;
+ int open_cursors ;
+ u_int32_t partial ;
+ u_int32_t dlen ;
+ u_int32_t doff ;
+ int active ;
+#ifdef ALLOW_RECNO_OFFSET
+ int array_base ;
+#endif
+#ifdef DBM_FILTERING
+ SV * filter_fetch_key ;
+ SV * filter_store_key ;
+ SV * filter_fetch_value ;
+ SV * filter_store_value ;
+ int filtering ;
+#endif
+ } BerkeleyDB_type;
+
+
+typedef struct {
+ DBTYPE type ;
+ bool recno_or_queue ;
+ char * filename ;
+ DB * dbp ;
+ SV * compare ;
+ SV * dup_compare ;
+ SV * prefix ;
+ SV * hash ;
+#ifdef AT_LEAST_DB_3_3
+ SV * associated ;
+ bool secondary_db ;
+#endif
+ int Status ;
+ DB_INFO * info ;
+ DBC * cursor ;
+ DB_TXN * txn ;
+ BerkeleyDB_type * parent_db ;
+ u_int32_t partial ;
+ u_int32_t dlen ;
+ u_int32_t doff ;
+ int active ;
+#ifdef ALLOW_RECNO_OFFSET
+ int array_base ;
+#endif
+#ifdef DBM_FILTERING
+ SV * filter_fetch_key ;
+ SV * filter_store_key ;
+ SV * filter_fetch_value ;
+ SV * filter_store_value ;
+ int filtering ;
+#endif
+ } BerkeleyDB_Cursor_type;
+
+typedef struct {
+ BerkeleyDB_ENV_type * env ;
+ } BerkeleyDB_TxnMgr_type ;
+
+#if 1
+typedef struct {
+ int Status ;
+ DB_TXN * txn ;
+ int active ;
+ } BerkeleyDB_Txn_type ;
+#else
+typedef DB_TXN BerkeleyDB_Txn_type ;
+#endif
+
+typedef BerkeleyDB_ENV_type * BerkeleyDB__Env ;
+typedef BerkeleyDB_ENV_type * BerkeleyDB__Env__Raw ;
+typedef BerkeleyDB_ENV_type * BerkeleyDB__Env__Inner ;
+typedef BerkeleyDB_type * BerkeleyDB ;
+typedef void * BerkeleyDB__Raw ;
+typedef BerkeleyDB_type * BerkeleyDB__Common ;
+typedef BerkeleyDB_type * BerkeleyDB__Common__Raw ;
+typedef BerkeleyDB_type * BerkeleyDB__Common__Inner ;
+typedef BerkeleyDB_type * BerkeleyDB__Hash ;
+typedef BerkeleyDB_type * BerkeleyDB__Hash__Raw ;
+typedef BerkeleyDB_type * BerkeleyDB__Btree ;
+typedef BerkeleyDB_type * BerkeleyDB__Btree__Raw ;
+typedef BerkeleyDB_type * BerkeleyDB__Recno ;
+typedef BerkeleyDB_type * BerkeleyDB__Recno__Raw ;
+typedef BerkeleyDB_type * BerkeleyDB__Queue ;
+typedef BerkeleyDB_type * BerkeleyDB__Queue__Raw ;
+typedef BerkeleyDB_Cursor_type BerkeleyDB__Cursor_type ;
+typedef BerkeleyDB_Cursor_type * BerkeleyDB__Cursor ;
+typedef BerkeleyDB_Cursor_type * BerkeleyDB__Cursor__Raw ;
+typedef BerkeleyDB_TxnMgr_type * BerkeleyDB__TxnMgr ;
+typedef BerkeleyDB_TxnMgr_type * BerkeleyDB__TxnMgr__Raw ;
+typedef BerkeleyDB_TxnMgr_type * BerkeleyDB__TxnMgr__Inner ;
+typedef BerkeleyDB_Txn_type * BerkeleyDB__Txn ;
+typedef BerkeleyDB_Txn_type * BerkeleyDB__Txn__Raw ;
+typedef BerkeleyDB_Txn_type * BerkeleyDB__Txn__Inner ;
+#if 0
+typedef DB_LOG * BerkeleyDB__Log ;
+typedef DB_LOCKTAB * BerkeleyDB__Lock ;
+#endif
+typedef DBT DBTKEY ;
+typedef DBT DBT_OPT ;
+typedef DBT DBT_B ;
+typedef DBT DBTKEY_B ;
+typedef DBT DBTVALUE ;
+typedef void * PV_or_NULL ;
+typedef PerlIO * IO_or_NULL ;
+typedef int DualType ;
+
+static void
+hash_delete(char * hash, char * key);
+
+#ifdef TRACE
+# define Trace(x) printf x
+#else
+# define Trace(x)
+#endif
+
+#ifdef ALLOW_RECNO_OFFSET
+# define RECNO_BASE db->array_base
+#else
+# define RECNO_BASE 1
+#endif
+
+#if DB_VERSION_MAJOR == 2
+# define flagSet_DB2(i, f) i |= f
+#else
+# define flagSet_DB2(i, f)
+#endif
+
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
+# define flagSet(bitmask) (flags & (bitmask))
+#else
+# define flagSet(bitmask) ((flags & DB_OPFLAGS_MASK) == (bitmask))
+#endif
+
+#if DB_VERSION_MAJOR == 2
+# define BackRef internal
+#else
+# if DB_VERSION_MAJOR == 3 || (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0)
+# define BackRef cj_internal
+# else
+# define BackRef api_internal
+# endif
+#endif
+
+#define ERR_BUFF "BerkeleyDB::Error"
+
+#define ZMALLOC(to, typ) ((to = (typ *)safemalloc(sizeof(typ))), \
+ Zero(to,1,typ))
+
+#define DBT_clear(x) Zero(&x, 1, DBT) ;
+
+#if 1
+#define getInnerObject(x) (*av_fetch((AV*)SvRV(x), 0, FALSE))
+#else
+#define getInnerObject(x) ((SV*)SvRV(sv))
+#endif
+
+#define my_sv_setpvn(sv, d, s) (s ? sv_setpvn(sv, d, s) : sv_setpv(sv, "") )
+
+#define SetValue_iv(i, k) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
+ i = SvIV(sv)
+#define SetValue_io(i, k) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
+ i = GetFILEptr(sv)
+#define SetValue_sv(i, k) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
+ i = sv
+#define SetValue_pv(i, k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
+ i = (t)SvPV(sv,PL_na)
+#define SetValue_pvx(i, k, t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
+ i = (t)SvPVX(sv)
+#define SetValue_ov(i,k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) {\
+ IV tmp = SvIV(getInnerObject(sv)) ; \
+ i = INT2PTR(t, tmp) ; \
+ }
+
+#define SetValue_ovx(i,k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) {\
+ HV * hv = (HV *)GetInternalObject(sv); \
+ SV ** svp = hv_fetch(hv, "db", 2, FALSE);\
+ IV tmp = SvIV(*svp); \
+ i = INT2PTR(t, tmp) ; \
+ }
+
+#define SetValue_ovX(i,k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) {\
+ IV tmp = SvIV(GetInternalObject(sv));\
+ i = INT2PTR(t, tmp) ; \
+ }
+
+#define LastDBerror DB_RUNRECOVERY
+
+#define setDUALerrno(var, err) \
+ sv_setnv(var, (double)err) ; \
+ sv_setpv(var, ((err) ? db_strerror(err) : "")) ;\
+ SvNOK_on(var);
+
+#define OutputValue(arg, name) \
+ { if (RETVAL == 0) { \
+ my_sv_setpvn(arg, name.data, name.size) ; \
+ DBM_ckFilter(arg, filter_fetch_value,"filter_fetch_value") ; \
+ } \
+ }
+
+#define OutputValue_B(arg, name) \
+ { if (RETVAL == 0) { \
+ if (db->type == DB_BTREE && \
+ flagSet(DB_GET_RECNO)){ \
+ sv_setiv(arg, (I32)(*(I32*)name.data) - RECNO_BASE); \
+ } \
+ else { \
+ my_sv_setpvn(arg, name.data, name.size) ; \
+ } \
+ DBM_ckFilter(arg, filter_fetch_value, "filter_fetch_value"); \
+ } \
+ }
+
+#define OutputKey(arg, name) \
+ { if (RETVAL == 0) \
+ { \
+ if (!db->recno_or_queue) { \
+ my_sv_setpvn(arg, name.data, name.size); \
+ } \
+ else \
+ sv_setiv(arg, (I32)*(I32*)name.data - RECNO_BASE); \
+ DBM_ckFilter(arg, filter_fetch_key, "filter_fetch_key") ; \
+ } \
+ }
+
+#define OutputKey_B(arg, name) \
+ { if (RETVAL == 0) \
+ { \
+ if (db->recno_or_queue || \
+ (db->type == DB_BTREE && \
+ flagSet(DB_GET_RECNO))){ \
+ sv_setiv(arg, (I32)(*(I32*)name.data) - RECNO_BASE); \
+ } \
+ else { \
+ my_sv_setpvn(arg, name.data, name.size); \
+ } \
+ DBM_ckFilter(arg, filter_fetch_key, "filter_fetch_key") ; \
+ } \
+ }
+
+#define SetPartial(data,db) \
+ data.flags = db->partial ; \
+ data.dlen = db->dlen ; \
+ data.doff = db->doff ;
+
+#define ckActive(active, type) \
+ { \
+ if (!active) \
+ softCrash("%s is already closed", type) ; \
+ }
+
+#define ckActive_Environment(a) ckActive(a, "Environment")
+#define ckActive_TxnMgr(a) ckActive(a, "Transaction Manager")
+#define ckActive_Transaction(a) ckActive(a, "Transaction")
+#define ckActive_Database(a) ckActive(a, "Database")
+#define ckActive_Cursor(a) ckActive(a, "Cursor")
+
+/* Internal Global Data */
+static db_recno_t Value ;
+static db_recno_t zero = 0 ;
+static BerkeleyDB CurrentDB ;
+
+static DBTKEY empty ;
+#if 0
+static char ErrBuff[1000] ;
+#endif
+
+#ifdef AT_LEAST_DB_3_3
+# if PERL_REVISION == 5 && PERL_VERSION <= 4
+
+/* saferealloc in perl5.004 will croak if it is given a NULL pointer*/
+void *
+MyRealloc(void * ptr, size_t size)
+{
+ if (ptr == NULL )
+ return safemalloc(size) ;
+ else
+ return saferealloc(ptr, size) ;
+}
+
+# else
+# define MyRealloc saferealloc
+# endif
+#endif
+
+static char *
+my_strdup(const char *s)
+{
+ if (s == NULL)
+ return NULL ;
+
+ {
+ MEM_SIZE l = strlen(s);
+ char *s1 = (char *)safemalloc(l);
+
+ Copy(s, s1, (MEM_SIZE)l, char);
+ return s1;
+ }
+}
+
+#if DB_VERSION_MAJOR == 2
+static char *
+db_strerror(int err)
+{
+ if (err == 0)
+ return "" ;
+
+ if (err > 0)
+ return Strerror(err) ;
+
+ switch (err) {
+ case DB_INCOMPLETE:
+ return ("DB_INCOMPLETE: Sync was unable to complete");
+ case DB_KEYEMPTY:
+ return ("DB_KEYEMPTY: Non-existent key/data pair");
+ case DB_KEYEXIST:
+ return ("DB_KEYEXIST: Key/data pair already exists");
+ case DB_LOCK_DEADLOCK:
+ return (
+ "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock");
+ case DB_LOCK_NOTGRANTED:
+ return ("DB_LOCK_NOTGRANTED: Lock not granted");
+ case DB_LOCK_NOTHELD:
+ return ("DB_LOCK_NOTHELD: Lock not held by locker");
+ case DB_NOTFOUND:
+ return ("DB_NOTFOUND: No matching key/data pair found");
+ case DB_RUNRECOVERY:
+ return ("DB_RUNRECOVERY: Fatal error, run database recovery");
+ default:
+ return "Unknown Error" ;
+
+ }
+}
+#endif /* DB_VERSION_MAJOR == 2 */
+
+#ifdef TRACE
+#if DB_VERSION_MAJOR > 2
+static char *
+my_db_strerror(int err)
+{
+ static char buffer[1000] ;
+ SV * sv = perl_get_sv(ERR_BUFF, FALSE) ;
+ sprintf(buffer, "%d: %s", err, db_strerror(err)) ;
+ if (err && sv) {
+ strcat(buffer, ", ") ;
+ strcat(buffer, SvPVX(sv)) ;
+ }
+ return buffer;
+}
+#endif
+#endif
+
+static void
+close_everything(void)
+{
+ dTHR;
+ Trace(("close_everything\n")) ;
+ /* Abort All Transactions */
+ {
+ BerkeleyDB__Txn__Raw tid ;
+ HE * he ;
+ I32 len ;
+ HV * hv = perl_get_hv("BerkeleyDB::Term::Txn", TRUE);
+ int all = 0 ;
+ int closed = 0 ;
+ (void)hv_iterinit(hv) ;
+ Trace(("BerkeleyDB::Term::close_all_txns dirty=%d\n", PL_dirty)) ;
+ while ( (he = hv_iternext(hv)) ) {
+ tid = * (BerkeleyDB__Txn__Raw *) hv_iterkey(he, &len) ;
+ Trace((" Aborting Transaction [%d] in [%d] Active [%d]\n", tid->txn, tid, tid->active));
+ if (tid->active) {
+#ifdef AT_LEAST_DB_4
+ tid->txn->abort(tid->txn) ;
+#else
+ txn_abort(tid->txn);
+#endif
+ ++ closed ;
+ }
+ tid->active = FALSE ;
+ ++ all ;
+ }
+ Trace(("End of BerkeleyDB::Term::close_all_txns aborted %d of %d transactios\n",closed, all)) ;
+ }
+
+ /* Close All Cursors */
+ {
+ BerkeleyDB__Cursor db ;
+ HE * he ;
+ I32 len ;
+ HV * hv = perl_get_hv("BerkeleyDB::Term::Cursor", TRUE);
+ int all = 0 ;
+ int closed = 0 ;
+ (void) hv_iterinit(hv) ;
+ Trace(("BerkeleyDB::Term::close_all_cursors \n")) ;
+ while ( (he = hv_iternext(hv)) ) {
+ db = * (BerkeleyDB__Cursor*) hv_iterkey(he, &len) ;
+ Trace((" Closing Cursor [%d] in [%d] Active [%d]\n", db->cursor, db, db->active));
+ if (db->active) {
+ ((db->cursor)->c_close)(db->cursor) ;
+ ++ closed ;
+ }
+ db->active = FALSE ;
+ ++ all ;
+ }
+ Trace(("End of BerkeleyDB::Term::close_all_cursors closed %d of %d cursors\n",closed, all)) ;
+ }
+
+ /* Close All Databases */
+ {
+ BerkeleyDB db ;
+ HE * he ;
+ I32 len ;
+ HV * hv = perl_get_hv("BerkeleyDB::Term::Db", TRUE);
+ int all = 0 ;
+ int closed = 0 ;
+ (void)hv_iterinit(hv) ;
+ Trace(("BerkeleyDB::Term::close_all_dbs\n" )) ;
+ while ( (he = hv_iternext(hv)) ) {
+ db = * (BerkeleyDB*) hv_iterkey(he, &len) ;
+ Trace((" Closing Database [%d] in [%d] Active [%d]\n", db->dbp, db, db->active));
+ if (db->active) {
+ (db->dbp->close)(db->dbp, 0) ;
+ ++ closed ;
+ }
+ db->active = FALSE ;
+ ++ all ;
+ }
+ Trace(("End of BerkeleyDB::Term::close_all_dbs closed %d of %d dbs\n",closed, all)) ;
+ }
+
+ /* Close All Environments */
+ {
+ BerkeleyDB__Env env ;
+ HE * he ;
+ I32 len ;
+ HV * hv = perl_get_hv("BerkeleyDB::Term::Env", TRUE);
+ int all = 0 ;
+ int closed = 0 ;
+ (void)hv_iterinit(hv) ;
+ Trace(("BerkeleyDB::Term::close_all_envs\n")) ;
+ while ( (he = hv_iternext(hv)) ) {
+ env = * (BerkeleyDB__Env*) hv_iterkey(he, &len) ;
+ Trace((" Closing Environment [%d] in [%d] Active [%d]\n", env->Env, env, env->active));
+ if (env->active) {
+#if DB_VERSION_MAJOR == 2
+ db_appexit(env->Env) ;
+#else
+ (env->Env->close)(env->Env, 0) ;
+#endif
+ ++ closed ;
+ }
+ env->active = FALSE ;
+ ++ all ;
+ }
+ Trace(("End of BerkeleyDB::Term::close_all_envs closed %d of %d dbs\n",closed, all)) ;
+ }
+
+ Trace(("end close_everything\n")) ;
+
+}
+
+static void
+destroyDB(BerkeleyDB db)
+{
+ dTHR;
+ if (! PL_dirty && db->active) {
+ -- db->open_cursors ;
+ ((db->dbp)->close)(db->dbp, 0) ;
+ }
+ if (db->hash)
+ SvREFCNT_dec(db->hash) ;
+ if (db->compare)
+ SvREFCNT_dec(db->compare) ;
+ if (db->dup_compare)
+ SvREFCNT_dec(db->dup_compare) ;
+#ifdef AT_LEAST_DB_3_3
+ if (db->associated && !db->secondary_db)
+ SvREFCNT_dec(db->associated) ;
+#endif
+ if (db->prefix)
+ SvREFCNT_dec(db->prefix) ;
+#ifdef DBM_FILTERING
+ if (db->filter_fetch_key)
+ SvREFCNT_dec(db->filter_fetch_key) ;
+ if (db->filter_store_key)
+ SvREFCNT_dec(db->filter_store_key) ;
+ if (db->filter_fetch_value)
+ SvREFCNT_dec(db->filter_fetch_value) ;
+ if (db->filter_store_value)
+ SvREFCNT_dec(db->filter_store_value) ;
+#endif
+ hash_delete("BerkeleyDB::Term::Db", (char *)db) ;
+ if (db->filename)
+ Safefree(db->filename) ;
+ Safefree(db) ;
+}
+
+static int
+softCrash(const char *pat, ...)
+{
+ char buffer1 [500] ;
+ char buffer2 [500] ;
+ va_list args;
+ va_start(args, pat);
+
+ Trace(("softCrash: %s\n", pat)) ;
+
+#define ABORT_PREFIX "BerkeleyDB Aborting: "
+
+ /* buffer = (char*) safemalloc(strlen(pat) + strlen(ABORT_PREFIX) + 1) ; */
+ strcpy(buffer1, ABORT_PREFIX) ;
+ strcat(buffer1, pat) ;
+
+ vsprintf(buffer2, buffer1, args) ;
+
+ croak(buffer2);
+
+ /* NOTREACHED */
+ va_end(args);
+ return 1 ;
+}
+
+
+static I32
+GetArrayLength(BerkeleyDB db)
+{
+ DBT key ;
+ DBT value ;
+ int RETVAL = 0 ;
+ DBC * cursor ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6
+ if ( ((db->dbp)->cursor)(db->dbp, db->txn, &cursor) == 0 )
+#else
+ if ( ((db->dbp)->cursor)(db->dbp, db->txn, &cursor, 0) == 0 )
+#endif
+ {
+ RETVAL = cursor->c_get(cursor, &key, &value, DB_LAST) ;
+ if (RETVAL == 0)
+ RETVAL = *(I32 *)key.data ;
+ else /* No key means empty file */
+ RETVAL = 0 ;
+ cursor->c_close(cursor) ;
+ }
+
+ Trace(("GetArrayLength got %d\n", RETVAL)) ;
+ return ((I32)RETVAL) ;
+}
+
+#if 0
+
+#define GetRecnoKey(db, value) _GetRecnoKey(db, value)
+
+static db_recno_t
+_GetRecnoKey(BerkeleyDB db, I32 value)
+{
+ Trace(("GetRecnoKey start value = %d\n", value)) ;
+ if (db->recno_or_queue && value < 0) {
+ /* Get the length of the array */
+ I32 length = GetArrayLength(db) ;
+
+ /* check for attempt to write before start of array */
+ if (length + value + RECNO_BASE <= 0)
+ softCrash("Modification of non-creatable array value attempted, subscript %ld", (long)value) ;
+
+ value = length + value + RECNO_BASE ;
+ }
+ else
+ ++ value ;
+
+ Trace(("GetRecnoKey end value = %d\n", value)) ;
+
+ return value ;
+}
+
+#else /* ! 0 */
+
+#if 0
+#ifdef ALLOW_RECNO_OFFSET
+#define GetRecnoKey(db, value) _GetRecnoKey(db, value)
+
+static db_recno_t
+_GetRecnoKey(BerkeleyDB db, I32 value)
+{
+ if (value + RECNO_BASE < 1)
+ softCrash("key value %d < base (%d)", (value), RECNO_BASE?0:1) ;
+ return value + RECNO_BASE ;
+}
+
+#else
+#endif /* ALLOW_RECNO_OFFSET */
+#endif /* 0 */
+
+#define GetRecnoKey(db, value) ((value) + RECNO_BASE )
+
+#endif /* 0 */
+
+#if 0
+static SV *
+GetInternalObject(SV * sv)
+{
+ SV * info = (SV*) NULL ;
+ SV * s ;
+ MAGIC * mg ;
+
+ Trace(("in GetInternalObject %d\n", sv)) ;
+ if (sv == NULL || !SvROK(sv))
+ return NULL ;
+
+ s = SvRV(sv) ;
+ if (SvMAGICAL(s))
+ {
+ if (SvTYPE(s) == SVt_PVHV || SvTYPE(s) == SVt_PVAV)
+ mg = mg_find(s, 'P') ;
+ else
+ mg = mg_find(s, 'q') ;
+
+ /* all this testing is probably overkill, but till I know more
+ about global destruction it stays.
+ */
+ /* if (mg && mg->mg_obj && SvRV(mg->mg_obj) && SvPVX(SvRV(mg->mg_obj))) */
+ if (mg && mg->mg_obj && SvRV(mg->mg_obj) )
+ info = SvRV(mg->mg_obj) ;
+ else
+ info = s ;
+ }
+
+ Trace(("end of GetInternalObject %d\n", info)) ;
+ return info ;
+}
+#endif
+
+static int
+btree_compare(DB_callback const DBT * key1, const DBT * key2 )
+{
+ dSP ;
+ char * data1, * data2 ;
+ int retval ;
+ int count ;
+ BerkeleyDB keepDB = CurrentDB ;
+
+ data1 = (char*) key1->data ;
+ data2 = (char*) key2->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (key1->size == 0)
+ data1 = "" ;
+ if (key2->size == 0)
+ data2 = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
+ PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->compare, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ softCrash ("in btree_compare - expected 1 return value from compare sub, got %d", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+ CurrentDB = keepDB ;
+ return (retval) ;
+
+}
+
+static int
+dup_compare(DB_callback const DBT * key1, const DBT * key2 )
+{
+ dSP ;
+ char * data1, * data2 ;
+ int retval ;
+ int count ;
+ BerkeleyDB keepDB = CurrentDB ;
+
+ Trace(("In dup_compare \n")) ;
+ if (!CurrentDB)
+ softCrash("Internal Error - No CurrentDB in dup_compare") ;
+ if (CurrentDB->dup_compare == NULL)
+ softCrash("in dup_compare: no callback specified for database '%s'", CurrentDB->filename) ;
+
+ data1 = (char*) key1->data ;
+ data2 = (char*) key2->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (key1->size == 0)
+ data1 = "" ;
+ if (key2->size == 0)
+ data2 = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
+ PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->dup_compare, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ softCrash ("dup_compare: expected 1 return value from compare sub, got %d", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+ CurrentDB = keepDB ;
+ return (retval) ;
+
+}
+
+static size_t
+btree_prefix(DB_callback const DBT * key1, const DBT * key2 )
+{
+ dSP ;
+ char * data1, * data2 ;
+ int retval ;
+ int count ;
+ BerkeleyDB keepDB = CurrentDB ;
+
+ data1 = (char*) key1->data ;
+ data2 = (char*) key2->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (key1->size == 0)
+ data1 = "" ;
+ if (key2->size == 0)
+ data2 = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
+ PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->prefix, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ softCrash ("btree_prefix: expected 1 return value from prefix sub, got %d", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+ CurrentDB = keepDB ;
+
+ return (retval) ;
+}
+
+static u_int32_t
+hash_cb(DB_callback const void * data, u_int32_t size)
+{
+ dSP ;
+ int retval ;
+ int count ;
+ BerkeleyDB keepDB = CurrentDB ;
+
+#ifndef newSVpvn
+ if (size == 0)
+ data = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+
+ XPUSHs(sv_2mortal(newSVpvn((char*)data,size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->hash, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ softCrash ("hash_cb: expected 1 return value from hash sub, got %d", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+ CurrentDB = keepDB ;
+
+ return (retval) ;
+}
+
+#ifdef AT_LEAST_DB_3_3
+
+static int
+associate_cb(DB_callback const DBT * pkey, const DBT * pdata, DBT * skey)
+{
+ dSP ;
+ char * pk_dat, * pd_dat, *sk_dat ;
+ int retval ;
+ int count ;
+ SV * skey_SV ;
+
+ Trace(("In associate_cb \n")) ;
+ if (((BerkeleyDB)db->BackRef)->associated == NULL){
+ Trace(("No Callback registered\n")) ;
+ return EINVAL ;
+ }
+
+ skey_SV = newSVpv("",0);
+
+
+ pk_dat = (char*) pkey->data ;
+ pd_dat = (char*) pdata->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (pkey->size == 0)
+ pk_dat = "" ;
+ if (pdata->size == 0)
+ pd_dat = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(pk_dat,pkey->size)));
+ PUSHs(sv_2mortal(newSVpvn(pd_dat,pdata->size)));
+ PUSHs(sv_2mortal(skey_SV));
+ PUTBACK ;
+
+ Trace(("calling associated cb\n"));
+ count = perl_call_sv(((BerkeleyDB)db->BackRef)->associated, G_SCALAR);
+ Trace(("called associated cb\n"));
+
+ SPAGAIN ;
+
+ if (count != 1)
+ softCrash ("associate: expected 1 return value from prefix sub, got %d", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+
+ /* retrieve the secondary key */
+ DBT_clear(*skey);
+ skey->flags = DB_DBT_APPMALLOC;
+ skey->size = SvCUR(skey_SV);
+ skey->data = (char*)safemalloc(skey->size);
+ memcpy(skey->data, SvPVX(skey_SV), skey->size);
+ Trace(("key is %d -- %.*s\n", skey->size, skey->size, skey->data));
+
+ FREETMPS ;
+ LEAVE ;
+
+ return (retval) ;
+}
+
+#endif /* AT_LEAST_DB_3_3 */
+
+static void
+db_errcall_cb(const char * db_errpfx, char * buffer)
+{
+#if 0
+
+ if (db_errpfx == NULL)
+ db_errpfx = "" ;
+ if (buffer == NULL )
+ buffer = "" ;
+ ErrBuff[0] = '\0';
+ if (strlen(db_errpfx) + strlen(buffer) + 3 <= 1000) {
+ if (*db_errpfx != '\0') {
+ strcat(ErrBuff, db_errpfx) ;
+ strcat(ErrBuff, ": ") ;
+ }
+ strcat(ErrBuff, buffer) ;
+ }
+
+#endif
+
+ SV * sv = perl_get_sv(ERR_BUFF, FALSE) ;
+ if (sv) {
+ if (db_errpfx)
+ sv_setpvf(sv, "%s: %s", db_errpfx, buffer) ;
+ else
+ sv_setpv(sv, buffer) ;
+ }
+}
+
+static SV *
+readHash(HV * hash, char * key)
+{
+ SV ** svp;
+ svp = hv_fetch(hash, key, strlen(key), FALSE);
+ if (svp && SvOK(*svp))
+ return *svp ;
+ return NULL ;
+}
+
+static void
+hash_delete(char * hash, char * key)
+{
+ HV * hv = perl_get_hv(hash, TRUE);
+ (void) hv_delete(hv, (char*)&key, sizeof(key), G_DISCARD);
+}
+
+static void
+hash_store_iv(char * hash, char * key, IV value)
+{
+ HV * hv = perl_get_hv(hash, TRUE);
+ (void)hv_store(hv, (char*)&key, sizeof(key), newSViv(value), 0);
+ /* printf("hv_store returned %d\n", ret) ; */
+}
+
+static void
+hv_store_iv(HV * hash, char * key, IV value)
+{
+ hv_store(hash, key, strlen(key), newSViv(value), 0);
+}
+
+static BerkeleyDB
+my_db_open(
+ BerkeleyDB db ,
+ SV * ref,
+ SV * ref_dbenv ,
+ BerkeleyDB__Env dbenv ,
+ BerkeleyDB__Txn txn,
+ const char * file,
+ const char * subname,
+ DBTYPE type,
+ int flags,
+ int mode,
+ DB_INFO * info
+ )
+{
+ DB_ENV * env = NULL ;
+ BerkeleyDB RETVAL = NULL ;
+ DB * dbp ;
+ int Status ;
+ DB_TXN* txnid = NULL ;
+
+ Trace(("_db_open(dbenv[%p] ref_dbenv [%p] file[%s] subname [%s] type[%d] flags[%d] mode[%d]\n",
+ dbenv, ref_dbenv, file, subname, type, flags, mode)) ;
+
+ CurrentDB = db ;
+ if (dbenv)
+ env = dbenv->Env ;
+
+ if (txn)
+ txnid = txn->txn;
+
+ Trace(("_db_open(dbenv[%p] ref_dbenv [%p] txn [%p] file[%s] subname [%s] type[%d] flags[%d] mode[%d]\n",
+ dbenv, ref_dbenv, txn, file, subname, type, flags, mode)) ;
+
+#if DB_VERSION_MAJOR == 2
+ if (subname)
+ softCrash("Subname needs Berkeley DB 3 or better") ;
+#endif
+
+#if DB_VERSION_MAJOR > 2
+ Status = db_create(&dbp, env, 0) ;
+ Trace(("db_create returned %s\n", my_db_strerror(Status))) ;
+ if (Status)
+ return RETVAL ;
+
+#ifdef AT_LEAST_DB_3_3
+ if (! env) {
+ dbp->set_alloc(dbp, safemalloc, MyRealloc, safefree) ;
+ dbp->set_errcall(dbp, db_errcall_cb) ;
+ }
+#endif
+
+ if (info->re_source) {
+ Status = dbp->set_re_source(dbp, info->re_source) ;
+ Trace(("set_re_source [%s] returned %s\n",
+ info->re_source, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->db_cachesize) {
+ Status = dbp->set_cachesize(dbp, 0, info->db_cachesize, 0) ;
+ Trace(("set_cachesize [%d] returned %s\n",
+ info->db_cachesize, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->db_lorder) {
+ Status = dbp->set_lorder(dbp, info->db_lorder) ;
+ Trace(("set_lorder [%d] returned %s\n",
+ info->db_lorder, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->db_pagesize) {
+ Status = dbp->set_pagesize(dbp, info->db_pagesize) ;
+ Trace(("set_pagesize [%d] returned %s\n",
+ info->db_pagesize, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->h_ffactor) {
+ Status = dbp->set_h_ffactor(dbp, info->h_ffactor) ;
+ Trace(("set_h_ffactor [%d] returned %s\n",
+ info->h_ffactor, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->h_nelem) {
+ Status = dbp->set_h_nelem(dbp, info->h_nelem) ;
+ Trace(("set_h_nelem [%d] returned %s\n",
+ info->h_nelem, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->bt_minkey) {
+ Status = dbp->set_bt_minkey(dbp, info->bt_minkey) ;
+ Trace(("set_bt_minkey [%d] returned %s\n",
+ info->bt_minkey, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->bt_compare) {
+ Status = dbp->set_bt_compare(dbp, info->bt_compare) ;
+ Trace(("set_bt_compare [%p] returned %s\n",
+ info->bt_compare, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->h_hash) {
+ Status = dbp->set_h_hash(dbp, info->h_hash) ;
+ Trace(("set_h_hash [%d] returned %s\n",
+ info->h_hash, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->dup_compare) {
+ Status = dbp->set_dup_compare(dbp, info->dup_compare) ;
+ Trace(("set_dup_compare [%d] returned %s\n",
+ info->dup_compare, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->bt_prefix) {
+ Status = dbp->set_bt_prefix(dbp, info->bt_prefix) ;
+ Trace(("set_bt_prefix [%d] returned %s\n",
+ info->bt_prefix, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->re_len) {
+ Status = dbp->set_re_len(dbp, info->re_len) ;
+ Trace(("set_re_len [%d] returned %s\n",
+ info->re_len, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->re_delim) {
+ Status = dbp->set_re_delim(dbp, info->re_delim) ;
+ Trace(("set_re_delim [%d] returned %s\n",
+ info->re_delim, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->re_pad) {
+ Status = dbp->set_re_pad(dbp, info->re_pad) ;
+ Trace(("set_re_pad [%d] returned %s\n",
+ info->re_pad, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->flags) {
+ Status = dbp->set_flags(dbp, info->flags) ;
+ Trace(("set_flags [%d] returned %s\n",
+ info->flags, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->q_extentsize) {
+#ifdef AT_LEAST_DB_3_2
+ Status = dbp->set_q_extentsize(dbp, info->q_extentsize) ;
+ Trace(("set_flags [%d] returned %s\n",
+ info->flags, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+#else
+ softCrash("-ExtentSize needs at least Berkeley DB 3.2.x") ;
+#endif
+ }
+
+#ifdef AT_LEAST_DB_4_1
+ if ((Status = (dbp->open)(dbp, txnid, file, subname, type, flags, mode)) == 0) {
+#else
+ if ((Status = (dbp->open)(dbp, file, subname, type, flags, mode)) == 0) {
+#endif /* AT_LEAST_DB_4_1 */
+#else /* DB_VERSION_MAJOR == 2 */
+ if ((Status = db_open(file, type, flags, mode, env, info, &dbp)) == 0) {
+#endif /* DB_VERSION_MAJOR == 2 */
+
+ Trace(("db_opened ok\n"));
+#ifdef AT_LEAST_DB_3_3
+ dbp->BackRef = db;
+#endif
+ RETVAL = db ;
+ RETVAL->dbp = dbp ;
+ RETVAL->txn = txnid ;
+#if DB_VERSION_MAJOR == 2
+ RETVAL->type = dbp->type ;
+#else /* DB_VERSION_MAJOR > 2 */
+#ifdef AT_LEAST_DB_3_3
+ dbp->get_type(dbp, &RETVAL->type) ;
+#else /* DB 3.0 -> 3.2 */
+ RETVAL->type = dbp->get_type(dbp) ;
+#endif
+#endif /* DB_VERSION_MAJOR > 2 */
+ RETVAL->recno_or_queue = (RETVAL->type == DB_RECNO ||
+ RETVAL->type == DB_QUEUE) ;
+ RETVAL->filename = my_strdup(file) ;
+ RETVAL->Status = Status ;
+ RETVAL->active = TRUE ;
+ hash_store_iv("BerkeleyDB::Term::Db", (char *)RETVAL, 1) ;
+ Trace((" storing %p %p in BerkeleyDB::Term::Db\n", RETVAL, dbp)) ;
+ if (dbenv) {
+ RETVAL->parent_env = dbenv ;
+ dbenv->Status = Status ;
+ ++ dbenv->open_dbs ;
+ }
+ }
+ else {
+#if DB_VERSION_MAJOR > 2
+ (dbp->close)(dbp, 0) ;
+#endif
+ destroyDB(db) ;
+ Trace(("db open returned %s\n", my_db_strerror(Status))) ;
+ }
+
+ return RETVAL ;
+}
+
+
+#include "constants.h"
+
+MODULE = BerkeleyDB PACKAGE = BerkeleyDB PREFIX = env_
+
+INCLUDE: constants.xs
+
+#define env_db_version(maj, min, patch) db_version(&maj, &min, &patch)
+char *
+env_db_version(maj, min, patch)
+ int maj
+ int min
+ int patch
+ OUTPUT:
+ RETVAL
+ maj
+ min
+ patch
+
+int
+db_value_set(value, which)
+ int value
+ int which
+ NOT_IMPLEMENTED_YET
+
+
+DualType
+_db_remove(ref)
+ SV * ref
+ CODE:
+ {
+#if DB_VERSION_MAJOR == 2
+ softCrash("BerkeleyDB::db_remove needs Berkeley DB 3.x or better") ;
+#else
+ HV * hash ;
+ DB * dbp ;
+ SV * sv ;
+ const char * db = NULL ;
+ const char * subdb = NULL ;
+ BerkeleyDB__Env env = NULL ;
+ DB_ENV * dbenv = NULL ;
+ u_int32_t flags = 0 ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(db, "Filename", char *) ;
+ SetValue_pv(subdb, "Subname", char *) ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_ov(env, "Env", BerkeleyDB__Env) ;
+ if (env)
+ dbenv = env->Env ;
+ RETVAL = db_create(&dbp, dbenv, 0) ;
+ if (RETVAL == 0) {
+ RETVAL = dbp->remove(dbp, db, subdb, flags) ;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+DualType
+_db_verify(ref)
+ SV * ref
+ CODE:
+ {
+#ifndef AT_LEAST_DB_3_1
+ softCrash("BerkeleyDB::db_verify needs Berkeley DB 3.1.x or better") ;
+#else
+ HV * hash ;
+ DB * dbp ;
+ SV * sv ;
+ const char * db = NULL ;
+ const char * subdb = NULL ;
+ const char * outfile = NULL ;
+ FILE * ofh = NULL;
+ BerkeleyDB__Env env = NULL ;
+ DB_ENV * dbenv = NULL ;
+ u_int32_t flags = 0 ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(db, "Filename", char *) ;
+ SetValue_pv(subdb, "Subname", char *) ;
+ SetValue_pv(outfile, "Outfile", char *) ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_ov(env, "Env", BerkeleyDB__Env) ;
+ RETVAL = 0;
+ if (outfile){
+ ofh = fopen(outfile, "w");
+ if (! ofh)
+ RETVAL = errno;
+ }
+ if (! RETVAL) {
+ if (env)
+ dbenv = env->Env ;
+ RETVAL = db_create(&dbp, dbenv, 0) ;
+ if (RETVAL == 0) {
+ RETVAL = dbp->verify(dbp, db, subdb, ofh, flags) ;
+ }
+ if (outfile)
+ fclose(ofh);
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+DualType
+_db_rename(ref)
+ SV * ref
+ CODE:
+ {
+#ifndef AT_LEAST_DB_3_1
+ softCrash("BerkeleyDB::db_rename needs Berkeley DB 3.1.x or better") ;
+#else
+ HV * hash ;
+ DB * dbp ;
+ SV * sv ;
+ const char * db = NULL ;
+ const char * subdb = NULL ;
+ const char * newname = NULL ;
+ BerkeleyDB__Env env = NULL ;
+ DB_ENV * dbenv = NULL ;
+ u_int32_t flags = 0 ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(db, "Filename", char *) ;
+ SetValue_pv(subdb, "Subname", char *) ;
+ SetValue_pv(newname, "Newname", char *) ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_ov(env, "Env", BerkeleyDB__Env) ;
+ if (env)
+ dbenv = env->Env ;
+ RETVAL = db_create(&dbp, dbenv, 0) ;
+ if (RETVAL == 0) {
+ RETVAL = dbp->rename(dbp, db, subdb, newname, flags) ;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+MODULE = BerkeleyDB::Env PACKAGE = BerkeleyDB::Env PREFIX = env_
+
+
+BerkeleyDB::Env::Raw
+_db_appinit(self, ref)
+ char * self
+ SV * ref
+ CODE:
+ {
+ HV * hash ;
+ SV * sv ;
+ char * home = NULL ;
+ char * errfile = NULL ;
+ char * server = NULL ;
+ char ** config = NULL ;
+ int flags = 0 ;
+ int setflags = 0 ;
+ int cachesize = 0 ;
+ int lk_detect = 0 ;
+ SV * errprefix = NULL;
+ DB_ENV * env ;
+ int status ;
+
+ Trace(("in _db_appinit [%s] %d\n", self, ref)) ;
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(home, "Home", char *) ;
+ SetValue_pv(config, "Config", char **) ;
+ SetValue_sv(errprefix, "ErrPrefix") ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(setflags, "SetFlags") ;
+ SetValue_pv(server, "Server", char *) ;
+ SetValue_iv(cachesize, "Cachesize") ;
+ SetValue_iv(lk_detect, "LockDetect") ;
+#ifndef AT_LEAST_DB_3_2
+ if (setflags)
+ softCrash("-SetFlags needs Berkeley DB 3.x or better") ;
+#endif /* ! AT_LEAST_DB_3 */
+#ifndef AT_LEAST_DB_3_1
+ if (server)
+ softCrash("-Server needs Berkeley DB 3.1 or better") ;
+#endif /* ! AT_LEAST_DB_3_1 */
+ Trace(("_db_appinit(config=[%d], home=[%s],errprefix=[%s],flags=[%d]\n",
+ config, home, errprefix, flags)) ;
+#ifdef TRACE
+ if (config) {
+ int i ;
+ for (i = 0 ; i < 10 ; ++ i) {
+ if (config[i] == NULL) {
+ printf(" End\n") ;
+ break ;
+ }
+ printf(" config = [%s]\n", config[i]) ;
+ }
+ }
+#endif /* TRACE */
+ ZMALLOC(RETVAL, BerkeleyDB_ENV_type) ;
+ if (flags & DB_INIT_TXN)
+ RETVAL->txn_enabled = TRUE ;
+#if DB_VERSION_MAJOR == 2
+ ZMALLOC(RETVAL->Env, DB_ENV) ;
+ env = RETVAL->Env ;
+ {
+ /* Take a copy of the error prefix */
+ if (errprefix) {
+ Trace(("copying errprefix\n" )) ;
+ RETVAL->ErrPrefix = newSVsv(errprefix) ;
+ SvPOK_only(RETVAL->ErrPrefix) ;
+ }
+ if (RETVAL->ErrPrefix)
+ RETVAL->Env->db_errpfx = SvPVX(RETVAL->ErrPrefix) ;
+
+ SetValue_pv(errfile, "ErrFile", char *) ;
+ if (errfile) {
+ RETVAL->ErrHandle = env->db_errfile = fopen(errfile, "w");
+ if (RETVAL->ErrHandle == NULL)
+ croak("Cannot open file %s: %s\n", errfile, Strerror(errno));
+ }
+ SetValue_iv(env->db_verbose, "Verbose") ;
+ env->db_errcall = db_errcall_cb ;
+ RETVAL->active = TRUE ;
+ status = db_appinit(home, config, env, flags) ;
+ Trace((" status = %d env %d Env %d\n", status, RETVAL, env)) ;
+ if (status == 0)
+ hash_store_iv("BerkeleyDB::Term::Env", (char *)RETVAL, 1) ;
+ else {
+ if (RETVAL->ErrHandle)
+ fclose(RETVAL->ErrHandle) ;
+ if (RETVAL->ErrPrefix)
+ SvREFCNT_dec(RETVAL->ErrPrefix) ;
+ Safefree(RETVAL->Env) ;
+ Safefree(RETVAL) ;
+ RETVAL = NULL ;
+ }
+ }
+#else /* DB_VERSION_MAJOR > 2 */
+#ifndef AT_LEAST_DB_3_1
+# define DB_CLIENT 0
+#endif
+ status = db_env_create(&RETVAL->Env, server ? DB_CLIENT : 0) ;
+ Trace(("db_env_create flags = %d returned %s\n", flags,
+ my_db_strerror(status))) ;
+ env = RETVAL->Env ;
+#ifdef AT_LEAST_DB_3_3
+ env->set_alloc(env, safemalloc, MyRealloc, safefree) ;
+#endif
+ if (status == 0 && cachesize) {
+ status = env->set_cachesize(env, 0, cachesize, 0) ;
+ Trace(("set_cachesize [%d] returned %s\n",
+ cachesize, my_db_strerror(status)));
+ }
+
+ if (status == 0 && lk_detect) {
+ status = env->set_lk_detect(env, lk_detect) ;
+ Trace(("set_lk_detect [%d] returned %s\n",
+ lk_detect, my_db_strerror(status)));
+ }
+#ifdef AT_LEAST_DB_4
+ /* set the server */
+ if (server && status == 0)
+ {
+ status = env->set_rpc_server(env, NULL, server, 0, 0, 0);
+ Trace(("ENV->set_rpc_server server = %s returned %s\n", server,
+ my_db_strerror(status))) ;
+ }
+#else
+# if defined(AT_LEAST_DB_3_1) && ! defined(AT_LEAST_DB_4)
+ /* set the server */
+ if (server && status == 0)
+ {
+ status = env->set_server(env, server, 0, 0, 0);
+ Trace(("ENV->set_server server = %s returned %s\n", server,
+ my_db_strerror(status))) ;
+ }
+# endif
+#endif
+#ifdef AT_LEAST_DB_3_2
+ if (setflags && status == 0)
+ {
+ status = env->set_flags(env, setflags, 1);
+ Trace(("ENV->set_flags value = %d returned %s\n", setflags,
+ my_db_strerror(status))) ;
+ }
+#endif
+ if (status == 0)
+ {
+ int mode = 0 ;
+ /* Take a copy of the error prefix */
+ if (errprefix) {
+ Trace(("copying errprefix\n" )) ;
+ RETVAL->ErrPrefix = newSVsv(errprefix) ;
+ SvPOK_only(RETVAL->ErrPrefix) ;
+ }
+ if (RETVAL->ErrPrefix)
+ env->set_errpfx(env, SvPVX(RETVAL->ErrPrefix)) ;
+
+ SetValue_pv(errfile, "ErrFile", char *) ;
+ if (errfile) {
+ RETVAL->ErrHandle = fopen(errfile, "w");
+ if (RETVAL->ErrHandle == NULL)
+ croak("Cannot open file %s: %s\n", errfile, Strerror(errno));
+ env->set_errfile(env, RETVAL->ErrHandle) ;
+ }
+
+ SetValue_iv(mode, "Mode") ;
+ env->set_errcall(env, db_errcall_cb) ;
+ RETVAL->active = TRUE ;
+#ifdef IS_DB_3_0_x
+ status = (env->open)(env, home, config, flags, mode) ;
+#else /* > 3.0 */
+ status = (env->open)(env, home, flags, mode) ;
+#endif
+ Trace(("ENV->open returned %s\n", my_db_strerror(status))) ;
+ }
+
+ if (status == 0)
+ hash_store_iv("BerkeleyDB::Term::Env", (char *)RETVAL, 1) ;
+ else {
+ (env->close)(env, 0) ;
+ if (RETVAL->ErrHandle)
+ fclose(RETVAL->ErrHandle) ;
+ if (RETVAL->ErrPrefix)
+ SvREFCNT_dec(RETVAL->ErrPrefix) ;
+ Safefree(RETVAL) ;
+ RETVAL = NULL ;
+ }
+#endif /* DB_VERSION_MAJOR > 2 */
+ }
+ OUTPUT:
+ RETVAL
+
+void
+log_archive(env, flags=0)
+ u_int32_t flags
+ BerkeleyDB::Env env
+ PPCODE:
+ {
+ char ** list;
+ char ** file;
+ AV * av;
+#ifndef AT_LEAST_DB_3
+ softCrash("log_archive needs at least Berkeley DB 3.x.x");
+#else
+# ifdef AT_LEAST_DB_4
+ env->Status = env->Env->log_archive(env->Env, &list, flags) ;
+# else
+# ifdef AT_LEAST_DB_3_3
+ env->Status = log_archive(env->Env, &list, flags) ;
+# else
+ env->Status = log_archive(env->Env, &list, flags, safemalloc) ;
+# endif
+# endif
+ if (env->Status == 0 && list != NULL)
+ {
+ for (file = list; *file != NULL; ++file)
+ {
+ XPUSHs(sv_2mortal(newSVpv(*file, 0))) ;
+ }
+ safefree(list);
+ }
+#endif
+ }
+
+BerkeleyDB::Txn::Raw
+_txn_begin(env, pid=NULL, flags=0)
+ u_int32_t flags
+ BerkeleyDB::Env env
+ BerkeleyDB::Txn pid
+ CODE:
+ {
+ DB_TXN *txn ;
+ DB_TXN *p_id = NULL ;
+ Trace(("txn_begin pid %d, flags %d\n", pid, flags)) ;
+#if DB_VERSION_MAJOR == 2
+ if (env->Env->tx_info == NULL)
+ softCrash("Transaction Manager not enabled") ;
+#endif
+ if (!env->txn_enabled)
+ softCrash("Transaction Manager not enabled") ;
+ if (pid)
+ p_id = pid->txn ;
+ env->TxnMgrStatus =
+#if DB_VERSION_MAJOR == 2
+ txn_begin(env->Env->tx_info, p_id, &txn) ;
+#else
+# ifdef AT_LEAST_DB_4
+ env->Env->txn_begin(env->Env, p_id, &txn, flags) ;
+# else
+ txn_begin(env->Env, p_id, &txn, flags) ;
+# endif
+#endif
+ if (env->TxnMgrStatus == 0) {
+ ZMALLOC(RETVAL, BerkeleyDB_Txn_type) ;
+ RETVAL->txn = txn ;
+ RETVAL->active = TRUE ;
+ Trace(("_txn_begin created txn [%p] in [%p]\n", txn, RETVAL));
+ hash_store_iv("BerkeleyDB::Term::Txn", (char *)RETVAL, 1) ;
+ }
+ else
+ RETVAL = NULL ;
+ }
+ OUTPUT:
+ RETVAL
+
+
+#if DB_VERSION_MAJOR == 2
+# define env_txn_checkpoint(e,k,m,f) txn_checkpoint(e->Env->tx_info, k, m)
+#else /* DB 3.0 or better */
+# ifdef AT_LEAST_DB_4
+# define env_txn_checkpoint(e,k,m,f) e->Env->txn_checkpoint(e->Env, k, m, f)
+# else
+# ifdef AT_LEAST_DB_3_1
+# define env_txn_checkpoint(e,k,m,f) txn_checkpoint(e->Env, k, m, 0)
+# else
+# define env_txn_checkpoint(e,k,m,f) txn_checkpoint(e->Env, k, m)
+# endif
+# endif
+#endif
+DualType
+env_txn_checkpoint(env, kbyte, min, flags=0)
+ BerkeleyDB::Env env
+ long kbyte
+ long min
+ u_int32_t flags
+
+HV *
+txn_stat(env)
+ BerkeleyDB::Env env
+ HV * RETVAL = NULL ;
+ CODE:
+ {
+ DB_TXN_STAT * stat ;
+#ifdef AT_LEAST_DB_4
+ if(env->Env->txn_stat(env->Env, &stat, 0) == 0) {
+#else
+# ifdef AT_LEAST_DB_3_3
+ if(txn_stat(env->Env, &stat) == 0) {
+# else
+# if DB_VERSION_MAJOR == 2
+ if(txn_stat(env->Env->tx_info, &stat, safemalloc) == 0) {
+# else
+ if(txn_stat(env->Env, &stat, safemalloc) == 0) {
+# endif
+# endif
+#endif
+ RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
+ hv_store_iv(RETVAL, "st_time_ckp", stat->st_time_ckp) ;
+ hv_store_iv(RETVAL, "st_last_txnid", stat->st_last_txnid) ;
+ hv_store_iv(RETVAL, "st_maxtxns", stat->st_maxtxns) ;
+ hv_store_iv(RETVAL, "st_naborts", stat->st_naborts) ;
+ hv_store_iv(RETVAL, "st_nbegins", stat->st_nbegins) ;
+ hv_store_iv(RETVAL, "st_ncommits", stat->st_ncommits) ;
+ hv_store_iv(RETVAL, "st_nactive", stat->st_nactive) ;
+#if DB_VERSION_MAJOR > 2
+ hv_store_iv(RETVAL, "st_maxnactive", stat->st_maxnactive) ;
+ hv_store_iv(RETVAL, "st_regsize", stat->st_regsize) ;
+ hv_store_iv(RETVAL, "st_region_wait", stat->st_region_wait) ;
+ hv_store_iv(RETVAL, "st_region_nowait", stat->st_region_nowait) ;
+#endif
+ safefree(stat) ;
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+#define EnDis(x) ((x) ? "Enabled" : "Disabled")
+void
+printEnv(env)
+ BerkeleyDB::Env env
+ INIT:
+ ckActive_Environment(env->active) ;
+ CODE:
+#if 0
+ printf("env [0x%X]\n", env) ;
+ printf(" ErrPrefix [%s]\n", env->ErrPrefix
+ ? SvPVX(env->ErrPrefix) : 0) ;
+ printf(" DB_ENV\n") ;
+ printf(" db_lorder [%d]\n", env->Env.db_lorder) ;
+ printf(" db_home [%s]\n", env->Env.db_home) ;
+ printf(" db_data_dir [%s]\n", env->Env.db_data_dir) ;
+ printf(" db_log_dir [%s]\n", env->Env.db_log_dir) ;
+ printf(" db_tmp_dir [%s]\n", env->Env.db_tmp_dir) ;
+ printf(" lk_info [%s]\n", EnDis(env->Env.lk_info)) ;
+ printf(" lk_max [%d]\n", env->Env.lk_max) ;
+ printf(" lg_info [%s]\n", EnDis(env->Env.lg_info)) ;
+ printf(" lg_max [%d]\n", env->Env.lg_max) ;
+ printf(" mp_info [%s]\n", EnDis(env->Env.mp_info)) ;
+ printf(" mp_size [%d]\n", env->Env.mp_size) ;
+ printf(" tx_info [%s]\n", EnDis(env->Env.tx_info)) ;
+ printf(" tx_max [%d]\n", env->Env.tx_max) ;
+ printf(" flags [%d]\n", env->Env.flags) ;
+ printf("\n") ;
+#endif
+
+SV *
+errPrefix(env, prefix)
+ BerkeleyDB::Env env
+ SV * prefix
+ INIT:
+ ckActive_Environment(env->active) ;
+ CODE:
+ if (env->ErrPrefix) {
+ RETVAL = newSVsv(env->ErrPrefix) ;
+ SvPOK_only(RETVAL) ;
+ sv_setsv(env->ErrPrefix, prefix) ;
+ }
+ else {
+ RETVAL = NULL ;
+ env->ErrPrefix = newSVsv(prefix) ;
+ }
+ SvPOK_only(env->ErrPrefix) ;
+#if DB_VERSION_MAJOR == 2
+ env->Env->db_errpfx = SvPVX(env->ErrPrefix) ;
+#else
+ env->Env->set_errpfx(env->Env, SvPVX(env->ErrPrefix)) ;
+#endif
+ OUTPUT:
+ RETVAL
+
+DualType
+status(env)
+ BerkeleyDB::Env env
+ CODE:
+ RETVAL = env->Status ;
+ OUTPUT:
+ RETVAL
+
+DualType
+db_appexit(env)
+ BerkeleyDB::Env env
+ ALIAS: close =1
+ INIT:
+ ckActive_Environment(env->active) ;
+ CODE:
+#ifdef STRICT_CLOSE
+ if (env->open_dbs)
+ softCrash("attempted to close an environment with %d open database(s)",
+ env->open_dbs) ;
+#endif /* STRICT_CLOSE */
+#if DB_VERSION_MAJOR == 2
+ RETVAL = db_appexit(env->Env) ;
+#else
+ RETVAL = (env->Env->close)(env->Env, 0) ;
+#endif
+ env->active = FALSE ;
+ hash_delete("BerkeleyDB::Term::Env", (char *)env) ;
+ OUTPUT:
+ RETVAL
+
+
+void
+_DESTROY(env)
+ BerkeleyDB::Env env
+ int RETVAL = 0 ;
+ CODE:
+ Trace(("In BerkeleyDB::Env::DESTROY\n"));
+ Trace((" env %ld Env %ld dirty %d\n", env, &env->Env, PL_dirty)) ;
+ if (env->active)
+#if DB_VERSION_MAJOR == 2
+ db_appexit(env->Env) ;
+#else
+ (env->Env->close)(env->Env, 0) ;
+#endif
+ if (env->ErrHandle)
+ fclose(env->ErrHandle) ;
+ if (env->ErrPrefix)
+ SvREFCNT_dec(env->ErrPrefix) ;
+#if DB_VERSION_MAJOR == 2
+ Safefree(env->Env) ;
+#endif
+ Safefree(env) ;
+ hash_delete("BerkeleyDB::Term::Env", (char *)env) ;
+ Trace(("End of BerkeleyDB::Env::DESTROY %d\n", RETVAL)) ;
+
+BerkeleyDB::TxnMgr::Raw
+_TxnMgr(env)
+ BerkeleyDB::Env env
+ INIT:
+ ckActive_Environment(env->active) ;
+ if (!env->txn_enabled)
+ softCrash("Transaction Manager not enabled") ;
+ CODE:
+ ZMALLOC(RETVAL, BerkeleyDB_TxnMgr_type) ;
+ RETVAL->env = env ;
+ /* hash_store_iv("BerkeleyDB::Term::TxnMgr", (char *)txn, 1) ; */
+ OUTPUT:
+ RETVAL
+
+int
+set_lg_dir(env, dir)
+ BerkeleyDB::Env env
+ char * dir
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3_1
+ softCrash("$env->set_lg_dir needs Berkeley DB 3.1 or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_lg_dir(env->Env, dir);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_lg_bsize(env, bsize)
+ BerkeleyDB::Env env
+ u_int32_t bsize
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3
+ softCrash("$env->set_lg_bsize needs Berkeley DB 3.0.55 or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_lg_bsize(env->Env, bsize);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_lg_max(env, lg_max)
+ BerkeleyDB::Env env
+ u_int32_t lg_max
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3
+ softCrash("$env->set_lg_max needs Berkeley DB 3.0.55 or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_lg_max(env->Env, lg_max);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_data_dir(env, dir)
+ BerkeleyDB::Env env
+ char * dir
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3_1
+ softCrash("$env->set_data_dir needs Berkeley DB 3.1 or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_data_dir(env->Env, dir);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_tmp_dir(env, dir)
+ BerkeleyDB::Env env
+ char * dir
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3_1
+ softCrash("$env->set_tmp_dir needs Berkeley DB 3.1 or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_tmp_dir(env->Env, dir);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_mutexlocks(env, do_lock)
+ BerkeleyDB::Env env
+ int do_lock
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3
+ softCrash("$env->set_setmutexlocks needs Berkeley DB 3.0 or better") ;
+#else
+# ifdef AT_LEAST_DB_4
+ RETVAL = env->Status = env->Env->set_flags(env->Env, DB_NOLOCKING, do_lock);
+# else
+# if defined(AT_LEAST_DB_3_2_6) || defined(IS_DB_3_0_x)
+ RETVAL = env->Status = env->Env->set_mutexlocks(env->Env, do_lock);
+# else /* DB 3.1 or 3.2.3 */
+ RETVAL = env->Status = db_env_set_mutexlocks(do_lock);
+# endif
+# endif
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_verbose(env, which, onoff)
+ BerkeleyDB::Env env
+ u_int32_t which
+ int onoff
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3
+ softCrash("$env->set_verbose needs Berkeley DB 3.x or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_verbose(env->Env, which, onoff);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_flags(env, flags, onoff)
+ BerkeleyDB::Env env
+ u_int32_t flags
+ int onoff
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3_2
+ softCrash("$env->set_flags needs Berkeley DB 3.2.x or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_flags(env->Env, flags, onoff);
+#endif
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB::Term PACKAGE = BerkeleyDB::Term
+
+void
+close_everything()
+
+#define safeCroak(string) softCrash(string)
+void
+safeCroak(string)
+ char * string
+
+MODULE = BerkeleyDB::Hash PACKAGE = BerkeleyDB::Hash PREFIX = hash_
+
+BerkeleyDB::Hash::Raw
+_db_open_hash(self, ref)
+ char * self
+ SV * ref
+ CODE:
+ {
+ HV * hash ;
+ SV * sv ;
+ DB_INFO info ;
+ BerkeleyDB__Env dbenv = NULL;
+ SV * ref_dbenv = NULL;
+ const char * file = NULL ;
+ const char * subname = NULL ;
+ int flags = 0 ;
+ int mode = 0 ;
+ BerkeleyDB db ;
+ BerkeleyDB__Txn txn = NULL ;
+
+ Trace(("_db_open_hash start\n")) ;
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(file, "Filename", char *) ;
+ SetValue_pv(subname, "Subname", char *) ;
+ SetValue_ov(txn, "Txn", BerkeleyDB__Txn) ;
+ SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
+ ref_dbenv = sv ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(mode, "Mode") ;
+
+ Zero(&info, 1, DB_INFO) ;
+ SetValue_iv(info.db_cachesize, "Cachesize") ;
+ SetValue_iv(info.db_lorder, "Lorder") ;
+ SetValue_iv(info.db_pagesize, "Pagesize") ;
+ SetValue_iv(info.h_ffactor, "Ffactor") ;
+ SetValue_iv(info.h_nelem, "Nelem") ;
+ SetValue_iv(info.flags, "Property") ;
+ ZMALLOC(db, BerkeleyDB_type) ;
+ if ((sv = readHash(hash, "Hash")) && sv != &PL_sv_undef) {
+ info.h_hash = hash_cb ;
+ db->hash = newSVsv(sv) ;
+ }
+ /* DB_DUPSORT was introduced in DB 2.5.9 */
+ if ((sv = readHash(hash, "DupCompare")) && sv != &PL_sv_undef) {
+#ifdef DB_DUPSORT
+ info.dup_compare = dup_compare ;
+ db->dup_compare = newSVsv(sv) ;
+ info.flags |= DB_DUP|DB_DUPSORT ;
+#else
+ croak("DupCompare needs Berkeley DB 2.5.9 or later") ;
+#endif
+ }
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, txn, file, subname, DB_HASH, flags, mode, &info) ;
+ Trace(("_db_open_hash end\n")) ;
+ }
+ OUTPUT:
+ RETVAL
+
+
+HV *
+db_stat(db, flags=0)
+ int flags
+ BerkeleyDB::Common db
+ HV * RETVAL = NULL ;
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+#if DB_VERSION_MAJOR == 2
+ softCrash("$db->db_stat for a Hash needs Berkeley DB 3.x or better") ;
+#else
+ DB_HASH_STAT * stat ;
+#ifdef AT_LEAST_DB_3_3
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, flags) ;
+#else
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, safemalloc, flags) ;
+#endif
+ if (db->Status == 0) {
+ RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
+ hv_store_iv(RETVAL, "hash_magic", stat->hash_magic) ;
+ hv_store_iv(RETVAL, "hash_version", stat->hash_version);
+ hv_store_iv(RETVAL, "hash_pagesize", stat->hash_pagesize);
+#ifdef AT_LEAST_DB_3_1
+ hv_store_iv(RETVAL, "hash_nkeys", stat->hash_nkeys);
+ hv_store_iv(RETVAL, "hash_ndata", stat->hash_ndata);
+#else
+ hv_store_iv(RETVAL, "hash_nrecs", stat->hash_nrecs);
+#endif
+#ifndef AT_LEAST_DB_3_1
+ hv_store_iv(RETVAL, "hash_nelem", stat->hash_nelem);
+#endif
+ hv_store_iv(RETVAL, "hash_ffactor", stat->hash_ffactor);
+ hv_store_iv(RETVAL, "hash_buckets", stat->hash_buckets);
+ hv_store_iv(RETVAL, "hash_free", stat->hash_free);
+ hv_store_iv(RETVAL, "hash_bfree", stat->hash_bfree);
+ hv_store_iv(RETVAL, "hash_bigpages", stat->hash_bigpages);
+ hv_store_iv(RETVAL, "hash_big_bfree", stat->hash_big_bfree);
+ hv_store_iv(RETVAL, "hash_overflows", stat->hash_overflows);
+ hv_store_iv(RETVAL, "hash_ovfl_free", stat->hash_ovfl_free);
+ hv_store_iv(RETVAL, "hash_dup", stat->hash_dup);
+ hv_store_iv(RETVAL, "hash_dup_free", stat->hash_dup_free);
+#if DB_VERSION_MAJOR >= 3
+ hv_store_iv(RETVAL, "hash_metaflags", stat->hash_metaflags);
+#endif
+ safefree(stat) ;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB::Unknown PACKAGE = BerkeleyDB::Unknown PREFIX = hash_
+
+void
+_db_open_unknown(ref)
+ SV * ref
+ PPCODE:
+ {
+ HV * hash ;
+ SV * sv ;
+ DB_INFO info ;
+ BerkeleyDB__Env dbenv = NULL;
+ SV * ref_dbenv = NULL;
+ const char * file = NULL ;
+ const char * subname = NULL ;
+ int flags = 0 ;
+ int mode = 0 ;
+ BerkeleyDB db ;
+ BerkeleyDB RETVAL ;
+ BerkeleyDB__Txn txn = NULL ;
+ static char * Names[] = {"", "Btree", "Hash", "Recno"} ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(file, "Filename", char *) ;
+ SetValue_pv(subname, "Subname", char *) ;
+ SetValue_ov(txn, "Txn", BerkeleyDB__Txn) ;
+ SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
+ ref_dbenv = sv ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(mode, "Mode") ;
+
+ Zero(&info, 1, DB_INFO) ;
+ SetValue_iv(info.db_cachesize, "Cachesize") ;
+ SetValue_iv(info.db_lorder, "Lorder") ;
+ SetValue_iv(info.db_pagesize, "Pagesize") ;
+ SetValue_iv(info.h_ffactor, "Ffactor") ;
+ SetValue_iv(info.h_nelem, "Nelem") ;
+ SetValue_iv(info.flags, "Property") ;
+ ZMALLOC(db, BerkeleyDB_type) ;
+
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, txn, file, subname, DB_UNKNOWN, flags, mode, &info) ;
+ XPUSHs(sv_2mortal(newSViv(PTR2IV(RETVAL))));
+ if (RETVAL)
+ XPUSHs(sv_2mortal(newSVpv(Names[RETVAL->type], 0))) ;
+ else
+ XPUSHs(sv_2mortal(newSViv((IV)NULL)));
+ }
+
+
+
+MODULE = BerkeleyDB::Btree PACKAGE = BerkeleyDB::Btree PREFIX = btree_
+
+BerkeleyDB::Btree::Raw
+_db_open_btree(self, ref)
+ char * self
+ SV * ref
+ CODE:
+ {
+ HV * hash ;
+ SV * sv ;
+ DB_INFO info ;
+ BerkeleyDB__Env dbenv = NULL;
+ SV * ref_dbenv = NULL;
+ const char * file = NULL ;
+ const char * subname = NULL ;
+ int flags = 0 ;
+ int mode = 0 ;
+ BerkeleyDB db ;
+ BerkeleyDB__Txn txn = NULL ;
+
+ Trace(("In _db_open_btree\n"));
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(file, "Filename", char*) ;
+ SetValue_pv(subname, "Subname", char *) ;
+ SetValue_ov(txn, "Txn", BerkeleyDB__Txn) ;
+ SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
+ ref_dbenv = sv ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(mode, "Mode") ;
+
+ Zero(&info, 1, DB_INFO) ;
+ SetValue_iv(info.db_cachesize, "Cachesize") ;
+ SetValue_iv(info.db_lorder, "Lorder") ;
+ SetValue_iv(info.db_pagesize, "Pagesize") ;
+ SetValue_iv(info.bt_minkey, "Minkey") ;
+ SetValue_iv(info.flags, "Property") ;
+ ZMALLOC(db, BerkeleyDB_type) ;
+ if ((sv = readHash(hash, "Compare")) && sv != &PL_sv_undef) {
+ Trace((" Parsed Compare callback\n"));
+ info.bt_compare = btree_compare ;
+ db->compare = newSVsv(sv) ;
+ }
+ /* DB_DUPSORT was introduced in DB 2.5.9 */
+ if ((sv = readHash(hash, "DupCompare")) && sv != &PL_sv_undef) {
+#ifdef DB_DUPSORT
+ Trace((" Parsed DupCompare callback\n"));
+ info.dup_compare = dup_compare ;
+ db->dup_compare = newSVsv(sv) ;
+ info.flags |= DB_DUP|DB_DUPSORT ;
+#else
+ softCrash("DupCompare needs Berkeley DB 2.5.9 or later") ;
+#endif
+ }
+ if ((sv = readHash(hash, "Prefix")) && sv != &PL_sv_undef) {
+ Trace((" Parsed Prefix callback\n"));
+ info.bt_prefix = btree_prefix ;
+ db->prefix = newSVsv(sv) ;
+ }
+
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, txn, file, subname, DB_BTREE, flags, mode, &info) ;
+ }
+ OUTPUT:
+ RETVAL
+
+
+HV *
+db_stat(db, flags=0)
+ int flags
+ BerkeleyDB::Common db
+ HV * RETVAL = NULL ;
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+ DB_BTREE_STAT * stat ;
+#ifdef AT_LEAST_DB_3_3
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, flags) ;
+#else
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, safemalloc, flags) ;
+#endif
+ if (db->Status == 0) {
+ RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
+ hv_store_iv(RETVAL, "bt_magic", stat->bt_magic);
+ hv_store_iv(RETVAL, "bt_version", stat->bt_version);
+#if DB_VERSION_MAJOR > 2
+ hv_store_iv(RETVAL, "bt_metaflags", stat->bt_metaflags) ;
+ hv_store_iv(RETVAL, "bt_flags", stat->bt_metaflags) ;
+#else
+ hv_store_iv(RETVAL, "bt_flags", stat->bt_flags) ;
+#endif
+ hv_store_iv(RETVAL, "bt_maxkey", stat->bt_maxkey) ;
+ hv_store_iv(RETVAL, "bt_minkey", stat->bt_minkey);
+ hv_store_iv(RETVAL, "bt_re_len", stat->bt_re_len);
+ hv_store_iv(RETVAL, "bt_re_pad", stat->bt_re_pad);
+ hv_store_iv(RETVAL, "bt_pagesize", stat->bt_pagesize);
+ hv_store_iv(RETVAL, "bt_levels", stat->bt_levels);
+#ifdef AT_LEAST_DB_3_1
+ hv_store_iv(RETVAL, "bt_nkeys", stat->bt_nkeys);
+ hv_store_iv(RETVAL, "bt_ndata", stat->bt_ndata);
+#else
+ hv_store_iv(RETVAL, "bt_nrecs", stat->bt_nrecs);
+#endif
+ hv_store_iv(RETVAL, "bt_int_pg", stat->bt_int_pg);
+ hv_store_iv(RETVAL, "bt_leaf_pg", stat->bt_leaf_pg);
+ hv_store_iv(RETVAL, "bt_dup_pg", stat->bt_dup_pg);
+ hv_store_iv(RETVAL, "bt_over_pg", stat->bt_over_pg);
+ hv_store_iv(RETVAL, "bt_free", stat->bt_free);
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
+ hv_store_iv(RETVAL, "bt_freed", stat->bt_freed);
+ hv_store_iv(RETVAL, "bt_pfxsaved", stat->bt_pfxsaved);
+ hv_store_iv(RETVAL, "bt_split", stat->bt_split);
+ hv_store_iv(RETVAL, "bt_rootsplit", stat->bt_rootsplit);
+ hv_store_iv(RETVAL, "bt_fastsplit", stat->bt_fastsplit);
+ hv_store_iv(RETVAL, "bt_added", stat->bt_added);
+ hv_store_iv(RETVAL, "bt_deleted", stat->bt_deleted);
+ hv_store_iv(RETVAL, "bt_get", stat->bt_get);
+ hv_store_iv(RETVAL, "bt_cache_hit", stat->bt_cache_hit);
+ hv_store_iv(RETVAL, "bt_cache_miss", stat->bt_cache_miss);
+#endif
+ hv_store_iv(RETVAL, "bt_int_pgfree", stat->bt_int_pgfree);
+ hv_store_iv(RETVAL, "bt_leaf_pgfree", stat->bt_leaf_pgfree);
+ hv_store_iv(RETVAL, "bt_dup_pgfree", stat->bt_dup_pgfree);
+ hv_store_iv(RETVAL, "bt_over_pgfree", stat->bt_over_pgfree);
+ safefree(stat) ;
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB::Recno PACKAGE = BerkeleyDB::Recno PREFIX = recno_
+
+BerkeleyDB::Recno::Raw
+_db_open_recno(self, ref)
+ char * self
+ SV * ref
+ CODE:
+ {
+ HV * hash ;
+ SV * sv ;
+ DB_INFO info ;
+ BerkeleyDB__Env dbenv = NULL;
+ SV * ref_dbenv = NULL;
+ const char * file = NULL ;
+ const char * subname = NULL ;
+ int flags = 0 ;
+ int mode = 0 ;
+ BerkeleyDB db ;
+ BerkeleyDB__Txn txn = NULL ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(file, "Fname", char*) ;
+ SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
+ ref_dbenv = sv ;
+ SetValue_ov(txn, "Txn", BerkeleyDB__Txn) ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(mode, "Mode") ;
+
+ Zero(&info, 1, DB_INFO) ;
+ SetValue_iv(info.db_cachesize, "Cachesize") ;
+ SetValue_iv(info.db_lorder, "Lorder") ;
+ SetValue_iv(info.db_pagesize, "Pagesize") ;
+ SetValue_iv(info.bt_minkey, "Minkey") ;
+
+ SetValue_iv(info.flags, "Property") ;
+ SetValue_pv(info.re_source, "Source", char*) ;
+ if ((sv = readHash(hash, "Len")) && sv != &PL_sv_undef) {
+ info.re_len = SvIV(sv) ; ;
+ flagSet_DB2(info.flags, DB_FIXEDLEN) ;
+ }
+ if ((sv = readHash(hash, "Delim")) && sv != &PL_sv_undef) {
+ info.re_delim = SvPOK(sv) ? *SvPV(sv,PL_na) : SvIV(sv) ; ;
+ flagSet_DB2(info.flags, DB_DELIMITER) ;
+ }
+ if ((sv = readHash(hash, "Pad")) && sv != &PL_sv_undef) {
+ info.re_pad = (u_int32_t)SvPOK(sv) ? *SvPV(sv,PL_na) : SvIV(sv) ; ;
+ flagSet_DB2(info.flags, DB_PAD) ;
+ }
+ ZMALLOC(db, BerkeleyDB_type) ;
+#ifdef ALLOW_RECNO_OFFSET
+ SetValue_iv(db->array_base, "ArrayBase") ;
+ db->array_base = (db->array_base == 0 ? 1 : 0) ;
+#endif /* ALLOW_RECNO_OFFSET */
+
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, txn, file, subname, DB_RECNO, flags, mode, &info) ;
+ }
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB::Queue PACKAGE = BerkeleyDB::Queue PREFIX = recno_
+
+BerkeleyDB::Queue::Raw
+_db_open_queue(self, ref)
+ char * self
+ SV * ref
+ CODE:
+ {
+#ifndef AT_LEAST_DB_3
+ softCrash("BerkeleyDB::Queue needs Berkeley DB 3.0.x or better");
+#else
+ HV * hash ;
+ SV * sv ;
+ DB_INFO info ;
+ BerkeleyDB__Env dbenv = NULL;
+ SV * ref_dbenv = NULL;
+ const char * file = NULL ;
+ const char * subname = NULL ;
+ int flags = 0 ;
+ int mode = 0 ;
+ BerkeleyDB db ;
+ BerkeleyDB__Txn txn = NULL ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(file, "Fname", char*) ;
+ SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
+ ref_dbenv = sv ;
+ SetValue_ov(txn, "Txn", BerkeleyDB__Txn) ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(mode, "Mode") ;
+
+ Zero(&info, 1, DB_INFO) ;
+ SetValue_iv(info.db_cachesize, "Cachesize") ;
+ SetValue_iv(info.db_lorder, "Lorder") ;
+ SetValue_iv(info.db_pagesize, "Pagesize") ;
+ SetValue_iv(info.bt_minkey, "Minkey") ;
+ SetValue_iv(info.q_extentsize, "ExtentSize") ;
+
+
+ SetValue_iv(info.flags, "Property") ;
+ if ((sv = readHash(hash, "Len")) && sv != &PL_sv_undef) {
+ info.re_len = SvIV(sv) ; ;
+ flagSet_DB2(info.flags, DB_FIXEDLEN) ;
+ }
+ if ((sv = readHash(hash, "Pad")) && sv != &PL_sv_undef) {
+ info.re_pad = (u_int32_t)SvPOK(sv) ? *SvPV(sv,PL_na) : SvIV(sv) ; ;
+ flagSet_DB2(info.flags, DB_PAD) ;
+ }
+ ZMALLOC(db, BerkeleyDB_type) ;
+#ifdef ALLOW_RECNO_OFFSET
+ SetValue_iv(db->array_base, "ArrayBase") ;
+ db->array_base = (db->array_base == 0 ? 1 : 0) ;
+#endif /* ALLOW_RECNO_OFFSET */
+
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, txn, file, subname, DB_QUEUE, flags, mode, &info) ;
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+HV *
+db_stat(db, flags=0)
+ int flags
+ BerkeleyDB::Common db
+ HV * RETVAL = NULL ;
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+#if DB_VERSION_MAJOR == 2
+ softCrash("$db->db_stat for a Queue needs Berkeley DB 3.x or better") ;
+#else /* Berkeley DB 3, or better */
+ DB_QUEUE_STAT * stat ;
+#ifdef AT_LEAST_DB_3_3
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, flags) ;
+#else
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, safemalloc, flags) ;
+#endif
+ if (db->Status == 0) {
+ RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
+ hv_store_iv(RETVAL, "qs_magic", stat->qs_magic) ;
+ hv_store_iv(RETVAL, "qs_version", stat->qs_version);
+#ifdef AT_LEAST_DB_3_1
+ hv_store_iv(RETVAL, "qs_nkeys", stat->qs_nkeys);
+ hv_store_iv(RETVAL, "qs_ndata", stat->qs_ndata);
+#else
+ hv_store_iv(RETVAL, "qs_nrecs", stat->qs_nrecs);
+#endif
+ hv_store_iv(RETVAL, "qs_pages", stat->qs_pages);
+ hv_store_iv(RETVAL, "qs_pagesize", stat->qs_pagesize);
+ hv_store_iv(RETVAL, "qs_pgfree", stat->qs_pgfree);
+ hv_store_iv(RETVAL, "qs_re_len", stat->qs_re_len);
+ hv_store_iv(RETVAL, "qs_re_pad", stat->qs_re_pad);
+#ifdef AT_LEAST_DB_3_2
+#else
+ hv_store_iv(RETVAL, "qs_start", stat->qs_start);
+#endif
+ hv_store_iv(RETVAL, "qs_first_recno", stat->qs_first_recno);
+ hv_store_iv(RETVAL, "qs_cur_recno", stat->qs_cur_recno);
+#if DB_VERSION_MAJOR >= 3
+ hv_store_iv(RETVAL, "qs_metaflags", stat->qs_metaflags);
+#endif
+ safefree(stat) ;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB::Common PACKAGE = BerkeleyDB::Common PREFIX = dab_
+
+
+DualType
+db_close(db,flags=0)
+ int flags
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+ CODE:
+ Trace(("BerkeleyDB::Common::db_close %d\n", db));
+#ifdef STRICT_CLOSE
+ if (db->txn)
+ softCrash("attempted to close a database while a transaction was still open") ;
+ if (db->open_cursors)
+ softCrash("attempted to close a database with %d open cursor(s)",
+ db->open_cursors) ;
+#endif /* STRICT_CLOSE */
+ RETVAL = db->Status = ((db->dbp)->close)(db->dbp, flags) ;
+ if (db->parent_env && db->parent_env->open_dbs)
+ -- db->parent_env->open_dbs ;
+ db->active = FALSE ;
+ hash_delete("BerkeleyDB::Term::Db", (char *)db) ;
+ -- db->open_cursors ;
+ Trace(("end of BerkeleyDB::Common::db_close\n"));
+ OUTPUT:
+ RETVAL
+
+void
+dab__DESTROY(db)
+ BerkeleyDB::Common db
+ CODE:
+ CurrentDB = db ;
+ Trace(("In BerkeleyDB::Common::_DESTROY db %d dirty=%d\n", db, PL_dirty)) ;
+ destroyDB(db) ;
+ Trace(("End of BerkeleyDB::Common::DESTROY \n")) ;
+
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6
+#define db_cursor(db, txn, cur,flags) ((db->dbp)->cursor)(db->dbp, txn, cur)
+#else
+#define db_cursor(db, txn, cur,flags) ((db->dbp)->cursor)(db->dbp, txn, cur,flags)
+#endif
+BerkeleyDB::Cursor::Raw
+_db_cursor(db, flags=0)
+ u_int32_t flags
+ BerkeleyDB::Common db
+ BerkeleyDB::Cursor RETVAL = NULL ;
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+ DBC * cursor ;
+ CurrentDB = db ;
+ if ((db->Status = db_cursor(db, db->txn, &cursor, flags)) == 0){
+ ZMALLOC(RETVAL, BerkeleyDB__Cursor_type) ;
+ db->open_cursors ++ ;
+ RETVAL->parent_db = db ;
+ RETVAL->cursor = cursor ;
+ RETVAL->dbp = db->dbp ;
+ RETVAL->txn = db->txn ;
+ RETVAL->type = db->type ;
+ RETVAL->recno_or_queue = db->recno_or_queue ;
+ RETVAL->filename = my_strdup(db->filename) ;
+ RETVAL->compare = db->compare ;
+ RETVAL->dup_compare = db->dup_compare ;
+#ifdef AT_LEAST_DB_3_3
+ RETVAL->associated = db->associated ;
+ RETVAL->secondary_db = db->secondary_db;
+#endif
+ RETVAL->prefix = db->prefix ;
+ RETVAL->hash = db->hash ;
+ RETVAL->partial = db->partial ;
+ RETVAL->doff = db->doff ;
+ RETVAL->dlen = db->dlen ;
+ RETVAL->active = TRUE ;
+#ifdef ALLOW_RECNO_OFFSET
+ RETVAL->array_base = db->array_base ;
+#endif /* ALLOW_RECNO_OFFSET */
+#ifdef DBM_FILTERING
+ RETVAL->filtering = FALSE ;
+ RETVAL->filter_fetch_key = db->filter_fetch_key ;
+ RETVAL->filter_store_key = db->filter_store_key ;
+ RETVAL->filter_fetch_value = db->filter_fetch_value ;
+ RETVAL->filter_store_value = db->filter_store_value ;
+#endif
+ /* RETVAL->info ; */
+ hash_store_iv("BerkeleyDB::Term::Cursor", (char *)RETVAL, 1) ;
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+BerkeleyDB::Cursor::Raw
+_db_join(db, cursors, flags=0)
+ u_int32_t flags
+ BerkeleyDB::Common db
+ AV * cursors
+ BerkeleyDB::Cursor RETVAL = NULL ;
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+#if DB_VERSION_MAJOR == 2 && (DB_VERSION_MINOR < 5 || (DB_VERSION_MINOR == 5 && DB_VERSION_PATCH < 2))
+ softCrash("join needs Berkeley DB 2.5.2 or later") ;
+#else /* Berkeley DB >= 2.5.2 */
+ DBC * join_cursor ;
+ DBC ** cursor_list ;
+ I32 count = av_len(cursors) + 1 ;
+ int i ;
+ CurrentDB = db ;
+ if (count < 1 )
+ softCrash("db_join: No cursors in parameter list") ;
+ cursor_list = (DBC **)safemalloc(sizeof(DBC*) * (count + 1));
+ for (i = 0 ; i < count ; ++i) {
+ SV * obj = (SV*) * av_fetch(cursors, i, FALSE) ;
+ IV tmp = SvIV(getInnerObject(obj)) ;
+ BerkeleyDB__Cursor cur = INT2PTR(BerkeleyDB__Cursor, tmp);
+ cursor_list[i] = cur->cursor ;
+ }
+ cursor_list[i] = NULL ;
+#if DB_VERSION_MAJOR == 2
+ if ((db->Status = ((db->dbp)->join)(db->dbp, cursor_list, flags, &join_cursor)) == 0){
+#else
+ if ((db->Status = ((db->dbp)->join)(db->dbp, cursor_list, &join_cursor, flags)) == 0){
+#endif
+ ZMALLOC(RETVAL, BerkeleyDB__Cursor_type) ;
+ db->open_cursors ++ ;
+ RETVAL->parent_db = db ;
+ RETVAL->cursor = join_cursor ;
+ RETVAL->dbp = db->dbp ;
+ RETVAL->type = db->type ;
+ RETVAL->filename = my_strdup(db->filename) ;
+ RETVAL->compare = db->compare ;
+ RETVAL->dup_compare = db->dup_compare ;
+#ifdef AT_LEAST_DB_3_3
+ RETVAL->associated = db->associated ;
+ RETVAL->secondary_db = db->secondary_db;
+#endif
+ RETVAL->prefix = db->prefix ;
+ RETVAL->hash = db->hash ;
+ RETVAL->partial = db->partial ;
+ RETVAL->doff = db->doff ;
+ RETVAL->dlen = db->dlen ;
+ RETVAL->active = TRUE ;
+#ifdef ALLOW_RECNO_OFFSET
+ RETVAL->array_base = db->array_base ;
+#endif /* ALLOW_RECNO_OFFSET */
+#ifdef DBM_FILTERING
+ RETVAL->filtering = FALSE ;
+ RETVAL->filter_fetch_key = db->filter_fetch_key ;
+ RETVAL->filter_store_key = db->filter_store_key ;
+ RETVAL->filter_fetch_value = db->filter_fetch_value ;
+ RETVAL->filter_store_value = db->filter_store_value ;
+#endif
+ /* RETVAL->info ; */
+ hash_store_iv("BerkeleyDB::Term::Cursor", (char *)RETVAL, 1) ;
+ }
+ safefree(cursor_list) ;
+#endif /* Berkeley DB >= 2.5.2 */
+ }
+ OUTPUT:
+ RETVAL
+
+int
+ArrayOffset(db)
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+#ifdef ALLOW_RECNO_OFFSET
+ RETVAL = db->array_base ? 0 : 1 ;
+#else
+ RETVAL = 0 ;
+#endif /* ALLOW_RECNO_OFFSET */
+ OUTPUT:
+ RETVAL
+
+int
+type(db)
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ RETVAL = db->type ;
+ OUTPUT:
+ RETVAL
+
+int
+byteswapped(db)
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
+ softCrash("byteswapped needs Berkeley DB 2.5 or later") ;
+#else
+#if DB_VERSION_MAJOR == 2
+ RETVAL = db->dbp->byteswapped ;
+#else
+#ifdef AT_LEAST_DB_3_3
+ db->dbp->get_byteswapped(db->dbp, &RETVAL) ;
+#else
+ RETVAL = db->dbp->get_byteswapped(db->dbp) ;
+#endif
+#endif
+#endif
+ OUTPUT:
+ RETVAL
+
+DualType
+status(db)
+ BerkeleyDB::Common db
+ CODE:
+ RETVAL = db->Status ;
+ OUTPUT:
+ RETVAL
+
+#ifdef DBM_FILTERING
+
+#define setFilter(ftype) \
+ { \
+ if (db->ftype) \
+ RETVAL = sv_mortalcopy(db->ftype) ; \
+ ST(0) = RETVAL ; \
+ if (db->ftype && (code == &PL_sv_undef)) { \
+ SvREFCNT_dec(db->ftype) ; \
+ db->ftype = NULL ; \
+ } \
+ else if (code) { \
+ if (db->ftype) \
+ sv_setsv(db->ftype, code) ; \
+ else \
+ db->ftype = newSVsv(code) ; \
+ } \
+ }
+
+
+SV *
+filter_fetch_key(db, code)
+ BerkeleyDB::Common db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ DBM_setFilter(db->filter_fetch_key, code) ;
+
+SV *
+filter_store_key(db, code)
+ BerkeleyDB::Common db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ DBM_setFilter(db->filter_store_key, code) ;
+
+SV *
+filter_fetch_value(db, code)
+ BerkeleyDB::Common db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ DBM_setFilter(db->filter_fetch_value, code) ;
+
+SV *
+filter_store_value(db, code)
+ BerkeleyDB::Common db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ DBM_setFilter(db->filter_store_value, code) ;
+
+#endif /* DBM_FILTERING */
+
+void
+partial_set(db, offset, length)
+ BerkeleyDB::Common db
+ u_int32_t offset
+ u_int32_t length
+ INIT:
+ ckActive_Database(db->active) ;
+ PPCODE:
+ if (GIMME == G_ARRAY) {
+ XPUSHs(sv_2mortal(newSViv(db->partial == DB_DBT_PARTIAL))) ;
+ XPUSHs(sv_2mortal(newSViv(db->doff))) ;
+ XPUSHs(sv_2mortal(newSViv(db->dlen))) ;
+ }
+ db->partial = DB_DBT_PARTIAL ;
+ db->doff = offset ;
+ db->dlen = length ;
+
+
+void
+partial_clear(db)
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ PPCODE:
+ if (GIMME == G_ARRAY) {
+ XPUSHs(sv_2mortal(newSViv(db->partial == DB_DBT_PARTIAL))) ;
+ XPUSHs(sv_2mortal(newSViv(db->doff))) ;
+ XPUSHs(sv_2mortal(newSViv(db->dlen))) ;
+ }
+ db->partial =
+ db->doff =
+ db->dlen = 0 ;
+
+
+#define db_del(db, key, flags) \
+ (db->Status = ((db->dbp)->del)(db->dbp, db->txn, &key, flags))
+DualType
+db_del(db, key, flags=0)
+ u_int flags
+ BerkeleyDB::Common db
+ DBTKEY key
+ INIT:
+ Trace(("db_del db[%p] in [%p] txn[%p] key[%.*s] flags[%d]\n", db->dbp, db, db->txn, key.size, key.data, flags)) ;
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+
+
+#ifdef AT_LEAST_DB_3
+# ifdef AT_LEAST_DB_3_2
+# define writeToKey() (flagSet(DB_CONSUME)||flagSet(DB_CONSUME_WAIT)||flagSet(DB_GET_BOTH)||flagSet(DB_SET_RECNO))
+# else
+# define writeToKey() (flagSet(DB_CONSUME)||flagSet(DB_GET_BOTH)||flagSet(DB_SET_RECNO))
+# endif
+#else
+#define writeToKey() (flagSet(DB_GET_BOTH)||flagSet(DB_SET_RECNO))
+#endif
+#define db_get(db, key, data, flags) \
+ (db->Status = ((db->dbp)->get)(db->dbp, db->txn, &key, &data, flags))
+DualType
+db_get(db, key, data, flags=0)
+ u_int flags
+ BerkeleyDB::Common db
+ DBTKEY_B key
+ DBT_OPT data
+ CODE:
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+ SetPartial(data,db) ;
+ Trace(("db_get db[%p] in [%p] txn[%p] key [%.*s] flags[%d]\n", db->dbp, db, db->txn, key.size, key.data, flags)) ;
+ RETVAL = db_get(db, key, data, flags);
+ Trace((" RETVAL %d\n", RETVAL));
+ OUTPUT:
+ RETVAL
+ key if (writeToKey()) OutputKey(ST(1), key) ;
+ data
+
+#define db_pget(db, key, pkey, data, flags) \
+ (db->Status = ((db->dbp)->pget)(db->dbp, db->txn, &key, &pkey, &data, flags))
+DualType
+db_pget(db, key, pkey, data, flags=0)
+ u_int flags
+ BerkeleyDB::Common db
+ DBTKEY_B key
+ DBTKEY_B pkey = NO_INIT
+ DBT_OPT data
+ CODE:
+#ifndef AT_LEAST_DB_3_3
+ softCrash("db_pget needs at least Berkeley DB 3.3");
+#else
+ Trace(("db_pget db [%p] in [%p] txn [%p] flags [%d]\n", db->dbp, db, db->txn, flags)) ;
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+ SetPartial(data,db) ;
+ DBT_clear(pkey);
+ RETVAL = db_pget(db, key, pkey, data, flags);
+ Trace((" RETVAL %d\n", RETVAL));
+#endif
+ OUTPUT:
+ RETVAL
+ key if (writeToKey()) OutputKey(ST(1), key) ;
+ pkey
+ data
+
+#define db_put(db,key,data,flag) \
+ (db->Status = (db->dbp->put)(db->dbp,db->txn,&key,&data,flag))
+DualType
+db_put(db, key, data, flags=0)
+ u_int flags
+ BerkeleyDB::Common db
+ DBTKEY key
+ DBT data
+ CODE:
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+ /* SetPartial(data,db) ; */
+ Trace(("db_put db[%p] in [%p] txn[%p] key[%.*s] data [%.*s] flags[%d]\n", db->dbp, db, db->txn, key.size, key.data, data.size, data.data, flags)) ;
+ RETVAL = db_put(db, key, data, flags);
+ Trace((" RETVAL %d\n", RETVAL));
+ OUTPUT:
+ RETVAL
+ key if (flagSet(DB_APPEND)) OutputKey(ST(1), key) ;
+
+#define db_key_range(db, key, range, flags) \
+ (db->Status = ((db->dbp)->key_range)(db->dbp, db->txn, &key, &range, flags))
+DualType
+db_key_range(db, key, less, equal, greater, flags=0)
+ u_int32_t flags
+ BerkeleyDB::Common db
+ DBTKEY_B key
+ double less = 0.0 ;
+ double equal = 0.0 ;
+ double greater = 0.0 ;
+ CODE:
+ {
+#ifndef AT_LEAST_DB_3_1
+ softCrash("key_range needs Berkeley DB 3.1.x or later") ;
+#else
+ DB_KEY_RANGE range ;
+ range.less = range.equal = range.greater = 0.0 ;
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+ RETVAL = db_key_range(db, key, range, flags);
+ if (RETVAL == 0) {
+ less = range.less ;
+ equal = range.equal;
+ greater = range.greater;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+ less
+ equal
+ greater
+
+
+#define db_fd(d, x) (db->Status = (db->dbp->fd)(db->dbp, &x))
+DualType
+db_fd(db)
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ CurrentDB = db ;
+ db_fd(db, RETVAL) ;
+ OUTPUT:
+ RETVAL
+
+
+#define db_sync(db, fl) (db->Status = (db->dbp->sync)(db->dbp, fl))
+DualType
+db_sync(db, flags=0)
+ u_int flags
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+
+void
+_Txn(db, txn=NULL)
+ BerkeleyDB::Common db
+ BerkeleyDB::Txn txn
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ if (txn) {
+ Trace(("_Txn[%p] in[%p] active [%d]\n", txn->txn, txn, txn->active));
+ ckActive_Transaction(txn->active) ;
+ db->txn = txn->txn ;
+ }
+ else {
+ Trace(("_Txn[undef] \n"));
+ db->txn = NULL ;
+ }
+
+
+#define db_truncate(db, countp, flags) \
+ (db->Status = ((db->dbp)->truncate)(db->dbp, db->txn, &countp, flags))
+DualType
+truncate(db, countp, flags=0)
+ BerkeleyDB::Common db
+ u_int32_t countp
+ u_int32_t flags
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3_3
+ softCrash("truncate needs Berkeley DB 3.3 or later") ;
+#else
+ CurrentDB = db ;
+ RETVAL = db_truncate(db, countp, flags);
+#endif
+ OUTPUT:
+ RETVAL
+ countp
+
+#ifdef AT_LEAST_DB_4_1
+# define db_associate(db, sec, cb, flags)\
+ (db->Status = ((db->dbp)->associate)(db->dbp, NULL, sec->dbp, &cb, flags))
+#else
+# define db_associate(db, sec, cb, flags)\
+ (db->Status = ((db->dbp)->associate)(db->dbp, sec->dbp, &cb, flags))
+#endif
+DualType
+associate(db, secondary, callback, flags=0)
+ BerkeleyDB::Common db
+ BerkeleyDB::Common secondary
+ SV* callback
+ u_int32_t flags
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3_3
+ softCrash("associate needs Berkeley DB 3.3 or later") ;
+#else
+ CurrentDB = db ;
+ /* db->associated = newSVsv(callback) ; */
+ secondary->associated = newSVsv(callback) ;
+ /* secondary->dbp->app_private = secondary->associated ; */
+ secondary->secondary_db = TRUE;
+ RETVAL = db_associate(db, secondary, associate_cb, flags);
+#endif
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB::Cursor PACKAGE = BerkeleyDB::Cursor PREFIX = cu_
+
+BerkeleyDB::Cursor::Raw
+_c_dup(db, flags=0)
+ u_int32_t flags
+ BerkeleyDB::Cursor db
+ BerkeleyDB::Cursor RETVAL = NULL ;
+ INIT:
+ CurrentDB = db->parent_db ;
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+#ifndef AT_LEAST_DB_3
+ softCrash("c_dup needs at least Berkeley DB 3.0.x");
+#else
+ DBC * newcursor ;
+ db->Status = ((db->cursor)->c_dup)(db->cursor, &newcursor, flags) ;
+ if (db->Status == 0){
+ ZMALLOC(RETVAL, BerkeleyDB__Cursor_type) ;
+ db->parent_db->open_cursors ++ ;
+ RETVAL->parent_db = db->parent_db ;
+ RETVAL->cursor = newcursor ;
+ RETVAL->dbp = db->dbp ;
+ RETVAL->type = db->type ;
+ RETVAL->recno_or_queue = db->recno_or_queue ;
+ RETVAL->filename = my_strdup(db->filename) ;
+ RETVAL->compare = db->compare ;
+ RETVAL->dup_compare = db->dup_compare ;
+#ifdef AT_LEAST_DB_3_3
+ RETVAL->associated = db->associated ;
+#endif
+ RETVAL->prefix = db->prefix ;
+ RETVAL->hash = db->hash ;
+ RETVAL->partial = db->partial ;
+ RETVAL->doff = db->doff ;
+ RETVAL->dlen = db->dlen ;
+ RETVAL->active = TRUE ;
+#ifdef ALLOW_RECNO_OFFSET
+ RETVAL->array_base = db->array_base ;
+#endif /* ALLOW_RECNO_OFFSET */
+#ifdef DBM_FILTERING
+ RETVAL->filtering = FALSE ;
+ RETVAL->filter_fetch_key = db->filter_fetch_key ;
+ RETVAL->filter_store_key = db->filter_store_key ;
+ RETVAL->filter_fetch_value = db->filter_fetch_value ;
+ RETVAL->filter_store_value = db->filter_store_value ;
+#endif /* DBM_FILTERING */
+ /* RETVAL->info ; */
+ hash_store_iv("BerkeleyDB::Term::Cursor", (char *)RETVAL, 1) ;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+DualType
+_c_close(db)
+ BerkeleyDB::Cursor db
+ INIT:
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ hash_delete("BerkeleyDB::Term::Cursor", (char *)db) ;
+ CODE:
+ RETVAL = db->Status =
+ ((db->cursor)->c_close)(db->cursor) ;
+ db->active = FALSE ;
+ if (db->parent_db->open_cursors)
+ -- db->parent_db->open_cursors ;
+ OUTPUT:
+ RETVAL
+
+void
+_DESTROY(db)
+ BerkeleyDB::Cursor db
+ CODE:
+ CurrentDB = db->parent_db ;
+ Trace(("In BerkeleyDB::Cursor::_DESTROY db %d dirty=%d active=%d\n", db, PL_dirty, db->active));
+ hash_delete("BerkeleyDB::Term::Cursor", (char *)db) ;
+ if (db->active)
+ ((db->cursor)->c_close)(db->cursor) ;
+ if (db->parent_db->open_cursors)
+ -- db->parent_db->open_cursors ;
+ Safefree(db->filename) ;
+ Safefree(db) ;
+ Trace(("End of BerkeleyDB::Cursor::_DESTROY\n")) ;
+
+DualType
+status(db)
+ BerkeleyDB::Cursor db
+ CODE:
+ RETVAL = db->Status ;
+ OUTPUT:
+ RETVAL
+
+
+#define cu_c_del(c,f) (c->Status = ((c->cursor)->c_del)(c->cursor,f))
+DualType
+cu_c_del(db, flags=0)
+ int flags
+ BerkeleyDB::Cursor db
+ INIT:
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ OUTPUT:
+ RETVAL
+
+
+#define cu_c_get(c,k,d,f) (c->Status = (c->cursor->c_get)(c->cursor,&k,&d,f))
+DualType
+cu_c_get(db, key, data, flags=0)
+ int flags
+ BerkeleyDB::Cursor db
+ DBTKEY_B key
+ DBT_B data
+ INIT:
+ Trace(("c_get db [%p] in [%p] flags [%d]\n", db->dbp, db, flags)) ;
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ SetPartial(data,db) ;
+ Trace(("c_get end\n")) ;
+ OUTPUT:
+ RETVAL
+ key
+ data if (! flagSet(DB_JOIN_ITEM)) OutputValue_B(ST(2), data) ;
+
+#define cu_c_pget(c,k,p,d,f) (c->Status = (c->secondary_db ? (c->cursor->c_pget)(c->cursor,&k,&p,&d,f) : EINVAL))
+DualType
+cu_c_pget(db, key, pkey, data, flags=0)
+ int flags
+ BerkeleyDB::Cursor db
+ DBTKEY_B key
+ DBTKEY_B pkey = NO_INIT
+ DBT_B data
+ CODE:
+#ifndef AT_LEAST_DB_3_3
+ softCrash("db_c_pget needs at least Berkeley DB 3.3");
+#else
+ Trace(("c_pget db [%d] flags [%d]\n", db, flags)) ;
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ SetPartial(data,db) ;
+ DBT_clear(pkey);
+ RETVAL = cu_c_pget(db, key, pkey, data, flags);
+ Trace(("c_pget end\n")) ;
+#endif
+ OUTPUT:
+ RETVAL
+ key
+ pkey
+ data if (! flagSet(DB_JOIN_ITEM)) OutputValue_B(ST(2), data) ;
+
+
+
+#define cu_c_put(c,k,d,f) (c->Status = (c->cursor->c_put)(c->cursor,&k,&d,f))
+DualType
+cu_c_put(db, key, data, flags=0)
+ int flags
+ BerkeleyDB::Cursor db
+ DBTKEY key
+ DBT data
+ INIT:
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ /* SetPartial(data,db) ; */
+ OUTPUT:
+ RETVAL
+
+#define cu_c_count(c,p,f) (c->Status = (c->cursor->c_count)(c->cursor,&p,f))
+DualType
+cu_c_count(db, count, flags=0)
+ int flags
+ BerkeleyDB::Cursor db
+ u_int32_t count = NO_INIT
+ CODE:
+#ifndef AT_LEAST_DB_3_1
+ softCrash("c_count needs at least Berkeley DB 3.1.x");
+#else
+ Trace(("c_get count [%d] flags [%d]\n", db, flags)) ;
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ RETVAL = cu_c_count(db, count, flags) ;
+ Trace((" c_count got %d duplicates\n", count)) ;
+#endif
+ OUTPUT:
+ RETVAL
+ count
+
+MODULE = BerkeleyDB::TxnMgr PACKAGE = BerkeleyDB::TxnMgr PREFIX = xx_
+
+BerkeleyDB::Txn::Raw
+_txn_begin(txnmgr, pid=NULL, flags=0)
+ u_int32_t flags
+ BerkeleyDB::TxnMgr txnmgr
+ BerkeleyDB::Txn pid
+ CODE:
+ {
+ DB_TXN *txn ;
+ DB_TXN *p_id = NULL ;
+#if DB_VERSION_MAJOR == 2
+ if (txnmgr->env->Env->tx_info == NULL)
+ softCrash("Transaction Manager not enabled") ;
+#endif
+ if (pid)
+ p_id = pid->txn ;
+ txnmgr->env->TxnMgrStatus =
+#if DB_VERSION_MAJOR == 2
+ txn_begin(txnmgr->env->Env->tx_info, p_id, &txn) ;
+#else
+# ifdef AT_LEAST_DB_4
+ txnmgr->env->Env->txn_begin(txnmgr->env->Env, p_id, &txn, flags) ;
+# else
+ txn_begin(txnmgr->env->Env, p_id, &txn, flags) ;
+# endif
+#endif
+ if (txnmgr->env->TxnMgrStatus == 0) {
+ ZMALLOC(RETVAL, BerkeleyDB_Txn_type) ;
+ RETVAL->txn = txn ;
+ RETVAL->active = TRUE ;
+ Trace(("_txn_begin created txn [%d] in [%d]\n", txn, RETVAL));
+ hash_store_iv("BerkeleyDB::Term::Txn", (char *)RETVAL, 1) ;
+ }
+ else
+ RETVAL = NULL ;
+ }
+ OUTPUT:
+ RETVAL
+
+
+DualType
+status(mgr)
+ BerkeleyDB::TxnMgr mgr
+ CODE:
+ RETVAL = mgr->env->TxnMgrStatus ;
+ OUTPUT:
+ RETVAL
+
+
+void
+_DESTROY(mgr)
+ BerkeleyDB::TxnMgr mgr
+ CODE:
+ Trace(("In BerkeleyDB::TxnMgr::DESTROY dirty=%d\n", PL_dirty)) ;
+ Safefree(mgr) ;
+ Trace(("End of BerkeleyDB::TxnMgr::DESTROY\n")) ;
+
+DualType
+txn_close(txnp)
+ BerkeleyDB::TxnMgr txnp
+ NOT_IMPLEMENTED_YET
+
+
+#if DB_VERSION_MAJOR == 2
+# define xx_txn_checkpoint(t,k,m,f) txn_checkpoint(t->env->Env->tx_info, k, m)
+#else
+# ifdef AT_LEAST_DB_4
+# define xx_txn_checkpoint(e,k,m,f) e->env->Env->txn_checkpoint(e->env->Env, k, m, f)
+# else
+# ifdef AT_LEAST_DB_3_1
+# define xx_txn_checkpoint(t,k,m,f) txn_checkpoint(t->env->Env, k, m, 0)
+# else
+# define xx_txn_checkpoint(t,k,m,f) txn_checkpoint(t->env->Env, k, m)
+# endif
+# endif
+#endif
+DualType
+xx_txn_checkpoint(txnp, kbyte, min, flags=0)
+ BerkeleyDB::TxnMgr txnp
+ long kbyte
+ long min
+ u_int32_t flags
+
+HV *
+txn_stat(txnp)
+ BerkeleyDB::TxnMgr txnp
+ HV * RETVAL = NULL ;
+ CODE:
+ {
+ DB_TXN_STAT * stat ;
+#ifdef AT_LEAST_DB_4
+ if(txnp->env->Env->txn_stat(txnp->env->Env, &stat, 0) == 0) {
+#else
+# ifdef AT_LEAST_DB_3_3
+ if(txn_stat(txnp->env->Env, &stat) == 0) {
+# else
+# if DB_VERSION_MAJOR == 2
+ if(txn_stat(txnp->env->Env->tx_info, &stat, safemalloc) == 0) {
+# else
+ if(txn_stat(txnp->env->Env, &stat, safemalloc) == 0) {
+# endif
+# endif
+#endif
+ RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
+ hv_store_iv(RETVAL, "st_time_ckp", stat->st_time_ckp) ;
+ hv_store_iv(RETVAL, "st_last_txnid", stat->st_last_txnid) ;
+ hv_store_iv(RETVAL, "st_maxtxns", stat->st_maxtxns) ;
+ hv_store_iv(RETVAL, "st_naborts", stat->st_naborts) ;
+ hv_store_iv(RETVAL, "st_nbegins", stat->st_nbegins) ;
+ hv_store_iv(RETVAL, "st_ncommits", stat->st_ncommits) ;
+ hv_store_iv(RETVAL, "st_nactive", stat->st_nactive) ;
+#if DB_VERSION_MAJOR > 2
+ hv_store_iv(RETVAL, "st_maxnactive", stat->st_maxnactive) ;
+ hv_store_iv(RETVAL, "st_regsize", stat->st_regsize) ;
+ hv_store_iv(RETVAL, "st_region_wait", stat->st_region_wait) ;
+ hv_store_iv(RETVAL, "st_region_nowait", stat->st_region_nowait) ;
+#endif
+ safefree(stat) ;
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+
+BerkeleyDB::TxnMgr
+txn_open(dir, flags, mode, dbenv)
+ int flags
+ const char * dir
+ int mode
+ BerkeleyDB::Env dbenv
+ NOT_IMPLEMENTED_YET
+
+
+MODULE = BerkeleyDB::Txn PACKAGE = BerkeleyDB::Txn PREFIX = xx_
+
+DualType
+status(tid)
+ BerkeleyDB::Txn tid
+ CODE:
+ RETVAL = tid->Status ;
+ OUTPUT:
+ RETVAL
+
+int
+_DESTROY(tid)
+ BerkeleyDB::Txn tid
+ CODE:
+ Trace(("In BerkeleyDB::Txn::_DESTROY txn [%d] active [%d] dirty=%d\n", tid->txn, tid->active, PL_dirty)) ;
+ if (tid->active)
+#ifdef AT_LEAST_DB_4
+ tid->txn->abort(tid->txn) ;
+#else
+ txn_abort(tid->txn) ;
+#endif
+ RETVAL = (int)tid ;
+ hash_delete("BerkeleyDB::Term::Txn", (char *)tid) ;
+ Safefree(tid) ;
+ Trace(("End of BerkeleyDB::Txn::DESTROY\n")) ;
+ OUTPUT:
+ RETVAL
+
+#define xx_txn_unlink(d,f,e) txn_unlink(d,f,&(e->Env))
+DualType
+xx_txn_unlink(dir, force, dbenv)
+ const char * dir
+ int force
+ BerkeleyDB::Env dbenv
+ NOT_IMPLEMENTED_YET
+
+#ifdef AT_LEAST_DB_4
+# define xx_txn_prepare(t) (t->Status = t->txn->prepare(t->txn, 0))
+#else
+# ifdef AT_LEAST_DB_3_3
+# define xx_txn_prepare(t) (t->Status = txn_prepare(t->txn, 0))
+# else
+# define xx_txn_prepare(t) (t->Status = txn_prepare(t->txn))
+# endif
+#endif
+DualType
+xx_txn_prepare(tid)
+ BerkeleyDB::Txn tid
+ INIT:
+ ckActive_Transaction(tid->active) ;
+
+#ifdef AT_LEAST_DB_4
+# define _txn_commit(t,flags) (t->Status = t->txn->commit(t->txn, flags))
+#else
+# if DB_VERSION_MAJOR == 2
+# define _txn_commit(t,flags) (t->Status = txn_commit(t->txn))
+# else
+# define _txn_commit(t, flags) (t->Status = txn_commit(t->txn, flags))
+# endif
+#endif
+DualType
+_txn_commit(tid, flags=0)
+ u_int32_t flags
+ BerkeleyDB::Txn tid
+ INIT:
+ ckActive_Transaction(tid->active) ;
+ hash_delete("BerkeleyDB::Term::Txn", (char *)tid) ;
+ tid->active = FALSE ;
+
+#ifdef AT_LEAST_DB_4
+# define _txn_abort(t) (t->Status = t->txn->abort(t->txn))
+#else
+# define _txn_abort(t) (t->Status = txn_abort(t->txn))
+#endif
+DualType
+_txn_abort(tid)
+ BerkeleyDB::Txn tid
+ INIT:
+ ckActive_Transaction(tid->active) ;
+ hash_delete("BerkeleyDB::Term::Txn", (char *)tid) ;
+ tid->active = FALSE ;
+
+#ifdef AT_LEAST_DB_4
+# define _txn_discard(t,f) (t->Status = t->txn->discard(t->txn, f))
+#else
+# ifdef AT_LEAST_DB_3_3_4
+# define _txn_discard(t,f) (t->Status = txn_discard(t->txn, f))
+# else
+# define _txn_discard(t,f) (int)softCrash("txn_discard needs Berkeley DB 3.3.4 or better") ;
+# endif
+#endif
+DualType
+_txn_discard(tid, flags=0)
+ BerkeleyDB::Txn tid
+ u_int32_t flags
+ INIT:
+ ckActive_Transaction(tid->active) ;
+ hash_delete("BerkeleyDB::Term::Txn", (char *)tid) ;
+ tid->active = FALSE ;
+
+#ifdef AT_LEAST_DB_4
+# define xx_txn_id(t) t->txn->id(t->txn)
+#else
+# define xx_txn_id(t) txn_id(t->txn)
+#endif
+u_int32_t
+xx_txn_id(tid)
+ BerkeleyDB::Txn tid
+
+MODULE = BerkeleyDB::_tiedHash PACKAGE = BerkeleyDB::_tiedHash
+
+int
+FIRSTKEY(db)
+ BerkeleyDB::Common db
+ CODE:
+ {
+ DBTKEY key ;
+ DBT value ;
+ DBC * cursor ;
+
+ /*
+ TODO!
+ set partial value to 0 - to eliminate the retrieval of
+ the value need to store any existing partial settings &
+ restore at the end.
+
+ */
+ CurrentDB = db ;
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ /* If necessary create a cursor for FIRSTKEY/NEXTKEY use */
+ if (!db->cursor &&
+ (db->Status = db_cursor(db, db->txn, &cursor, 0)) == 0 )
+ db->cursor = cursor ;
+
+ if (db->cursor)
+ RETVAL = (db->Status) =
+ ((db->cursor)->c_get)(db->cursor, &key, &value, DB_FIRST);
+ else
+ RETVAL = db->Status ;
+ /* check for end of cursor */
+ if (RETVAL == DB_NOTFOUND) {
+ ((db->cursor)->c_close)(db->cursor) ;
+ db->cursor = NULL ;
+ }
+ ST(0) = sv_newmortal();
+ OutputKey(ST(0), key)
+ }
+
+
+
+int
+NEXTKEY(db, key)
+ BerkeleyDB::Common db
+ DBTKEY key = NO_INIT
+ CODE:
+ {
+ DBT value ;
+
+ CurrentDB = db ;
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ key.flags = 0 ;
+ RETVAL = (db->Status) =
+ ((db->cursor)->c_get)(db->cursor, &key, &value, DB_NEXT);
+
+ /* check for end of cursor */
+ if (RETVAL == DB_NOTFOUND) {
+ ((db->cursor)->c_close)(db->cursor) ;
+ db->cursor = NULL ;
+ }
+ ST(0) = sv_newmortal();
+ OutputKey(ST(0), key)
+ }
+
+MODULE = BerkeleyDB::_tiedArray PACKAGE = BerkeleyDB::_tiedArray
+
+I32
+FETCHSIZE(db)
+ BerkeleyDB::Common db
+ CODE:
+ CurrentDB = db ;
+ RETVAL = GetArrayLength(db) ;
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB PACKAGE = BerkeleyDB
+
+BOOT:
+ {
+ SV * sv_err = perl_get_sv(ERR_BUFF, GV_ADD|GV_ADDMULTI) ;
+ SV * version_sv = perl_get_sv("BerkeleyDB::db_version", GV_ADD|GV_ADDMULTI) ;
+ SV * ver_sv = perl_get_sv("BerkeleyDB::db_ver", GV_ADD|GV_ADDMULTI) ;
+ int Major, Minor, Patch ;
+ (void)db_version(&Major, &Minor, &Patch) ;
+ /* Check that the versions of db.h and libdb.a are the same */
+ if (Major != DB_VERSION_MAJOR || Minor != DB_VERSION_MINOR
+ || Patch != DB_VERSION_PATCH)
+ croak("\nBerkeleyDB needs compatible versions of libdb & db.h\n\tyou have db.h version %d.%d.%d and libdb version %d.%d.%d\n",
+ DB_VERSION_MAJOR, DB_VERSION_MINOR, DB_VERSION_PATCH,
+ Major, Minor, Patch) ;
+
+ if (Major < 2 || (Major == 2 && Minor < 6))
+ {
+ croak("BerkeleyDB needs Berkeley DB 2.6 or greater. This is %d.%d.%d\n",
+ Major, Minor, Patch) ;
+ }
+ sv_setpvf(version_sv, "%d.%d", Major, Minor) ;
+ sv_setpvf(ver_sv, "%d.%03d%03d", Major, Minor, Patch) ;
+ sv_setpv(sv_err, "");
+
+ DBT_clear(empty) ;
+ empty.data = &zero ;
+ empty.size = sizeof(db_recno_t) ;
+ empty.flags = 0 ;
+
+ }
+
diff --git a/libdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm b/libdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm
new file mode 100644
index 0000000..ba9a9c0
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm
@@ -0,0 +1,8 @@
+
+package BerkeleyDB::Btree ;
+
+# This file is only used for MLDBM
+
+use BerkeleyDB ;
+
+1 ;
diff --git a/libdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm b/libdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm
new file mode 100644
index 0000000..8e7bc7e
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm
@@ -0,0 +1,8 @@
+
+package BerkeleyDB::Hash ;
+
+# This file is only used for MLDBM
+
+use BerkeleyDB ;
+
+1 ;
diff --git a/libdb/perl/BerkeleyDB/Changes b/libdb/perl/BerkeleyDB/Changes
new file mode 100644
index 0000000..cbeb1a3
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/Changes
@@ -0,0 +1,167 @@
+Revision history for Perl extension BerkeleyDB.
+
+0.20 2nd September 2002
+
+ * More support for building with Berkeley DB 4.1.x
+ * db->get & db->pget used the wrong output macro for DBM filters
+ bug spotted by Aaron Ross.
+ * db_join didn't keep a reference to the cursors it was joining.
+ Spotted by Winton Davies.
+
+0.19 5th June 2002
+ * Removed the targets that used mkconsts from Makefile.PL. They relied
+ on a module that is not available in all versions of Perl.
+ * added support for env->set_verbose
+ * added support for db->truncate
+ * added support for db->rename via BerkeleyDB::db_rename
+ * added support for db->verify via BerkeleyDB::db_verify
+ * added support for db->associate, db->pget & cursor->c_pget
+ * Builds with Berkeley DB 4.1.x
+
+
+0.18 6th January 2002
+ * Dropped support for ErrFile as a file handle. It was proving too
+ difficult to get at the underlying FILE * in XS.
+ Reported by Jonas Smedegaard (Debian powerpc) & Kenneth Olwing (Win32)
+ * Fixed problem with abort macro in XSUB.h clashing with txn abort
+ method in Berkeley DB 4.x -- patch supplied by Kenneth Olwing.
+ * DB->set_alloc was getting called too late in BerkeleyDB.xs.
+ This was causing problems with ActivePerl -- problem reported
+ by Kenneth Olwing.
+ * When opening a queue, the Len proprty set the DB_PAD flag.
+ Should have been DB_FIXEDLEN. Fix provided by Kenneth Olwing.
+ * Test harness fixes from Kenneth Olwing.
+
+0.17 23 September 2001
+ * Fixed a bug in BerkeleyDB::Recno - reported by Niklas Paulsson.
+ * Added log_archive - patch supplied by Benjamin Holzman
+ * Added txn_discard
+ * Builds with Berkeley DB 4.0.x
+
+0.16 1 August 2001
+ * added support for Berkeley DB 3.3.x (but no support for any of the
+ new features just yet)
+
+0.15 26 April 2001
+ * Fixed a bug in the processing of the flags options in
+ db_key_range.
+ * added support for set_lg_max & set_lg_bsize
+ * allow DB_TMP_DIR and DB_TEMP_DIR
+ * the -Filename parameter to BerkeleyDB::Queue didn't work.
+ * added symbol DB_CONSUME_WAIT
+
+0.14 21st January 2001
+ * Silenced the warnings when build with a 64-bit Perl.
+ * Can now build with DB 3.2.3h (part of MySQL). The test harness
+ takes an age to do the queue test, but it does eventually pass.
+ * Mentioned the problems that occur when perl is built with sfio.
+
+0.13 15th January 2001
+ * Added support to allow this module to build with Berkeley DB 3.2
+ * Updated dbinfo to support Berkeley DB 3.1 & 3.2 file format
+ changes.
+ * Documented the Solaris 2.7 core dump problem in README.
+ * Tidied up the test harness to fix a problem on Solaris where the
+ "fred" directory wasn't being deleted when it should have been.
+ * two calls to "open" clashed with a win32 macro.
+ * size argument for hash_cb is different for Berkeley DB 3.x
+ * Documented the issue of building on Linux.
+ * Added -Server, -CacheSize & -LockDetect options
+ [original patch supplied by Graham Barr]
+ * Added support for set_mutexlocks, c_count, set_q_extentsize,
+ key_range, c_dup
+ * Dropped the "attempted to close a Cursor with an open transaction"
+ error in c_close. The correct behaviour is that the cursor
+ should be closed before committing/aborting the transaction.
+
+0.12 2nd August 2000
+ * Serious bug with get fixed. Spotted by Sleepycat.
+ * Added hints file for Solaris & Irix (courtesy of Albert Chin-A-Young)
+
+0.11 4th June 2000
+ * When built with Berkeley Db 3.x there can be a clash with the close
+ macro.
+ * Typo in the definition of DB_WRITECURSOR
+ * The flags parameter wasn't getting sent to db_cursor
+ * Plugged small memory leak in db_cursor (DESTROY wasn't freeing
+ memory)
+ * Can be built with Berkeley DB 3.1
+
+0.10 8th December 1999
+ * The DESTROY method was missing for BerkeleyDB::Env. This resulted in
+ a memory leak. Fixed.
+ * If opening an environment or database failed, there was a small
+ memory leak. This has been fixed.
+ * A thread-enabled Perl it could core when a database was closed.
+ Problem traced to the strdup function.
+
+0.09 29th November 1999
+ * the queue.t & subdb.t test harnesses were outputting a few
+ spurious warnings. This has been fixed.
+
+0.08 28nd November 1999
+ * More documentation updates
+ * Changed reference to files in /tmp in examples.t
+ * Fixed a typo in softCrash that caused problems when building
+ with a thread-enabled Perl.
+ * BerkeleyDB::Error wasn't initialised properly.
+ * ANSI-ified all the static C functions in BerkeleyDB.xs
+ * Added support for the following DB 3.x features:
+ + The Queue database type
+ + db_remove
+ + subdatabases
+ + db_stat for Hash & Queue
+
+0.07 21st September 1999
+ * Numerous small bug fixes.
+ * Added support for sorting duplicate values DB_DUPSORT.
+ * Added support for DB_GET_BOTH & DB_NEXT_DUP.
+ * Added get_dup (from DB_File).
+ * beefed up the documentation.
+ * Forgot to add the DB_INIT_CDB in BerkeleyDB.pm in previous release.
+ * Merged the DBM Filter code from DB_File into BerkeleyDB.
+ * Fixed a nasty bug where a closed transaction was still used with
+ with dp_put, db_get etc.
+ * Added logic to gracefully close everything whenever a fatal error
+ happens. Previously the plug was just pulled.
+ * It is now a fatal error to explicitly close an environment if there
+ is still an open database; a database when there are open cursors or
+ an open transaction; and a cursor if there is an open transaction.
+ Using object destruction doesn't have this issue, as object
+ references will ensure everything gets closed in the correct order.
+ * The BOOT code now checks that the version of db.h & libdb are the
+ same - this seems to be a common problem on Linux.
+ * MLDBM support added.
+ * Support for the new join cursor added.
+ * Builds with Berkeley DB 3.x
+ * Updated dbinfo for Berkeley DB 3.x file formats.
+ * Deprecated the TxnMgr class. As with Berkeley DB version 3,
+ txn_begin etc are now accessed via the environment object.
+
+0.06 19 December 1998
+ * Minor modifications to get the module to build with DB 2.6.x
+ * Added support for DB 2.6.x's Concurrent Access Method, DB_INIT_CDB.
+
+0.05 9 November 1998
+ * Added a note to README about how to build Berkeley DB 2.x
+ when using HP-UX.
+ * Minor modifications to get the module to build with DB 2.5.x
+
+0.04 19 May 1998
+ * Define DEFSV & SAVE_DEFSV if not already defined. This allows
+ the module to be built with Perl 5.004_04.
+
+0.03 5 May 1998
+ * fixed db_get with DB_SET_RECNO
+ * fixed c_get with DB_SET_RECNO and DB_GET_RECNO
+ * implemented BerkeleyDB::Unknown
+ * implemented BerkeleyDB::Recno, including push, pop etc
+ modified the txn support.
+
+0.02 30 October 1997
+ * renamed module to BerkeleyDB
+ * fixed a few bugs & added more tests
+
+0.01 23 October 1997
+ * first alpha release as BerkDB.
+
diff --git a/libdb/perl/BerkeleyDB/MANIFEST b/libdb/perl/BerkeleyDB/MANIFEST
new file mode 100644
index 0000000..7da51ef
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/MANIFEST
@@ -0,0 +1,56 @@
+BerkeleyDB.pm
+BerkeleyDB.pod
+BerkeleyDB.pod.P
+BerkeleyDB.xs
+BerkeleyDB/Btree.pm
+BerkeleyDB/Hash.pm
+Changes
+config.in
+constants.h
+constants.xs
+dbinfo
+hints/dec_osf.pl
+hints/solaris.pl
+hints/irix_6_5.pl
+Makefile.PL
+MANIFEST
+mkconsts
+mkpod
+ppport.h
+README
+t/btree.t
+t/db-3.0.t
+t/db-3.1.t
+t/db-3.2.t
+t/db-3.3.t
+t/destroy.t
+t/env.t
+t/examples.t
+t/examples.t.T
+t/examples3.t
+t/examples3.t.T
+t/filter.t
+t/hash.t
+t/join.t
+t/mldbm.t
+t/queue.t
+t/recno.t
+t/strict.t
+t/subdb.t
+t/txn.t
+t/unknown.t
+t/util.pm
+Todo
+typemap
+patches/5.004
+patches/5.004_01
+patches/5.004_02
+patches/5.004_03
+patches/5.004_04
+patches/5.004_05
+patches/5.005
+patches/5.005_01
+patches/5.005_02
+patches/5.005_03
+patches/5.6.0
+scan
diff --git a/libdb/perl/BerkeleyDB/Makefile.PL b/libdb/perl/BerkeleyDB/Makefile.PL
new file mode 100644
index 0000000..86da9a8
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/Makefile.PL
@@ -0,0 +1,123 @@
+#! perl -w
+
+# It should not be necessary to edit this file. The configuration for
+# BerkeleyDB is controlled from the file config.in
+
+
+BEGIN { die "BerkeleyDB needs Perl 5.004_04 or greater" if $] < 5.004_04 ; }
+
+use strict ;
+use ExtUtils::MakeMaker ;
+use Config ;
+
+# Check for the presence of sfio
+if ($Config{'d_sfio'}) {
+ print <<EOM;
+
+WARNING: Perl seems to have been built with SFIO support enabled.
+ Please read the SFIO Notes in the README file.
+
+EOM
+}
+
+my $LIB_DIR ;
+my $INC_DIR ;
+my $DB_NAME ;
+my $LIBS ;
+
+ParseCONFIG() ;
+
+if (defined $DB_NAME)
+ { $LIBS = $DB_NAME }
+else {
+ if ($^O eq 'MSWin32')
+ { $LIBS = '-llibdb' }
+ else
+ { $LIBS = '-ldb' }
+}
+
+# OS2 is a special case, so check for it now.
+my $OS2 = "" ;
+$OS2 = "-DOS2" if $^O eq 'os2' ;
+
+WriteMakefile(
+ NAME => 'BerkeleyDB',
+ LIBS => ["-L${LIB_DIR} $LIBS"],
+ #MAN3PODS => {}, # Pods will be built by installman.
+ INC => "-I$INC_DIR",
+ VERSION_FROM => 'BerkeleyDB.pm',
+ XSPROTOARG => '-noprototypes',
+ DEFINE => "$OS2",
+ #'macro' => { INSTALLDIRS => 'perl' },
+ 'dist' => {COMPRESS=>'gzip', SUFFIX=>'gz'},
+ ($] >= 5.005
+ ? (ABSTRACT_FROM => 'BerkeleyDB.pod',
+ AUTHOR => 'Paul Marquess <Paul.Marquess@btinternet.com>')
+ : ()
+ ),
+ );
+
+
+sub MY::postamble {
+ '
+$(NAME).pod: $(NAME).pod.P t/examples.t.T t/examples3.t.T mkpod
+ perl ./mkpod
+
+$(NAME).xs: typemap
+ $(TOUCH) $(NAME).xs
+
+Makefile: config.in
+
+
+' ;
+}
+
+sub ParseCONFIG
+{
+ my ($k, $v) ;
+ my @badkey = () ;
+ my %Info = () ;
+ my @Options = qw( INCLUDE LIB DBNAME ) ;
+ my %ValidOption = map {$_, 1} @Options ;
+ my %Parsed = %ValidOption ;
+ my $CONFIG = 'config.in' ;
+
+ print "Parsing $CONFIG...\n" ;
+
+ # DBNAME is optional, so pretend it has been parsed.
+ delete $Parsed{'DBNAME'} ;
+
+ open(F, "$CONFIG") or die "Cannot open file $CONFIG: $!\n" ;
+ while (<F>) {
+ s/^\s*|\s*$//g ;
+ next if /^\s*$/ or /^\s*#/ ;
+ s/\s*#\s*$// ;
+
+ ($k, $v) = split(/\s+=\s+/, $_, 2) ;
+ $k = uc $k ;
+ if ($ValidOption{$k}) {
+ delete $Parsed{$k} ;
+ $Info{$k} = $v ;
+ }
+ else {
+ push(@badkey, $k) ;
+ }
+ }
+ close F ;
+
+ print "Unknown keys in $CONFIG ignored [@badkey]\n"
+ if @badkey ;
+
+ # check parsed values
+ my @missing = () ;
+ die "The following keys are missing from $CONFIG file: [@missing]\n"
+ if @missing = keys %Parsed ;
+
+ $INC_DIR = $ENV{'BERKELEYDB_INCLUDE'} || $Info{'INCLUDE'} ;
+ $LIB_DIR = $ENV{'BERKELEYDB_LIB'} || $Info{'LIB'} ;
+ $DB_NAME = $Info{'DBNAME'} if defined $Info{'DBNAME'} ;
+ print "Looks Good.\n" ;
+
+}
+
+# end of file Makefile.PL
diff --git a/libdb/perl/BerkeleyDB/README b/libdb/perl/BerkeleyDB/README
new file mode 100644
index 0000000..a600e31
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/README
@@ -0,0 +1,484 @@
+ BerkeleyDB
+
+ Version 0.20
+
+ 2nd Sept 2002
+
+ Copyright (c) 1997-2002 Paul Marquess. All rights reserved. This
+ program is free software; you can redistribute it and/or modify
+ it under the same terms as Perl itself.
+
+
+DESCRIPTION
+-----------
+
+BerkeleyDB is a module which allows Perl programs to make use of the
+facilities provided by Berkeley DB version 2 or greater. (Note: if
+you want to use version 1 of Berkeley DB with Perl you need the DB_File
+module).
+
+Berkeley DB is a C library which provides a consistent interface to a
+number of database formats. BerkeleyDB provides an interface to all
+four of the database types (hash, btree, queue and recno) currently
+supported by Berkeley DB.
+
+For further details see the documentation in the file BerkeleyDB.pod.
+
+PREREQUISITES
+-------------
+
+Before you can build BerkeleyDB you need to have the following
+installed on your system:
+
+ * Perl 5.004_04 or greater.
+
+ * Berkeley DB Version 2.6.4 or greater
+
+ The official web site for Berkeley DB is http://www.sleepycat.com
+
+ The latest version of Berkeley DB is always available there. It
+ is recommended that you use the most recent version available at
+ the Sleepycat site.
+
+ The one exception to this advice is where you want to use BerkeleyDB
+ to access database files created by a third-party application,
+ like Sendmail. In these cases you must build BerkeleyDB with a
+ compatible version of Berkeley DB.
+
+
+BUILDING THE MODULE
+-------------------
+
+Assuming you have met all the prerequisites, building the module should
+be relatively straightforward.
+
+Step 1 : If you are running Solaris 2.5, 2.7 or HP-UX 10 read either
+ the Solaris Notes or HP-UX Notes sections below.
+ If you are running Linux please read the Linux Notes section
+ before proceeding.
+
+
+Step 2 : Edit the file config.in to suit you local installation.
+ Instructions are given in the file.
+
+Step 3 : Build and test the module using this sequence of commands:
+
+ perl Makefile.PL
+ make
+ make test
+
+INSTALLATION
+------------
+
+ make install
+
+TROUBLESHOOTING
+===============
+
+Here are some of the problems that people encounter when building BerkeleyDB.
+
+Missing db.h or libdb.a
+-----------------------
+
+If you get an error like this:
+
+ cc -c -I./libraries/ -Dbool=char -DHAS_BOOL -I/usr/local/include -O2
+ -DVERSION=\"0.07\" -DXS_VERSION=\"0.07\" -fpic
+ -I/usr/local/lib/perl5/5.00503/i586-linux/CORE BerkeleyDB.c
+ BerkeleyDB.xs:52: db.h: No such file or directory
+
+or this:
+
+ cc -c -I./libraries/2.7.5 -Dbool=char -DHAS_BOOL -I/usr/local/include -O2
+ -DVERSION=\"0.07\" -DXS_VERSION=\"0.07\" -fpic
+ -I/usr/local/lib/perl5/5.00503/i586-linux/CORE BerkeleyDB.c
+ LD_RUN_PATH="/lib" cc -o blib/arch/auto/BerkeleyDB/BerkeleyDB.so -shared
+ -L/usr/local/lib BerkeleyDB.o
+ -L/home/paul/perl/ext/BerkDB/BerkeleyDB/libraries -ldb
+ ld: cannot open -ldb: No such file or directory
+
+This symptom can imply:
+
+ 1. You don't have Berkeley DB installed on your system at all.
+ Solution: get & install Berkeley DB.
+
+ 2. You do have Berkeley DB installed, but it isn't in a standard place.
+ Solution: Edit config.in and set the LIB and INCLUDE variables to point
+ to the directories where libdb.a and db.h are installed.
+
+#error db.h is not for Berkeley DB at all.
+------------------------------------------
+
+If you get the error above when building this module it means that there
+is a file called "db.h" on your system that isn't the one that comes
+with Berkeley DB.
+
+Options:
+
+ 1. You don't have Berkeley DB installed on your system at all.
+ Solution: get & install Berkeley DB.
+
+ 2. Edit config.in and make sure the INCLUDE variable points to the
+ directory where the Berkeley DB file db.h is installed.
+
+ 3. If option 2 doesn't work, try tempoarily renaming the db.h file
+ that is causing the error.
+
+#error db.h is for Berkeley DB 1.x - need at least Berkeley DB 2.6.4
+--------------------------------------------------------------------
+
+The error above will occur if there is a copy of the Berkeley DB 1.x
+file db.h on your system.
+
+This error will happen when
+
+ 1. you only have Berkeley DB version 1 on your system.
+ Solution: get & install a newer version of Berkeley DB.
+
+ 2. you have both version 1 and a later version of Berkeley DB
+ installed on your system. When building BerkeleyDB it attempts to
+ use the db.h for Berkeley DB version 1.
+ Solution: Edit config.in and set the LIB and INCLUDE variables
+ to point to the directories where libdb.a and db.h are
+ installed.
+
+
+#error db.h is for Berkeley DB 2.0-2.5 - need at least Berkeley DB 2.6.4
+------------------------------------------------------------------------
+
+The error above will occur if there is a copy of the the file db.h for
+Berkeley DB 2.0 to 2.5 on your system.
+
+This symptom can imply:
+
+ 1. You don't have a new enough version of Berkeley DB.
+ Solution: get & install a newer version of Berkeley DB.
+
+ 2. You have the correct version of Berkeley DB installed, but it isn't
+ in a standard place.
+ Solution: Edit config.in and set the LIB and INCLUDE variables
+ to point to the directories where libdb.a and db.h are
+ installed.
+
+Undefined Symbol: txn_stat
+--------------------------
+
+BerkeleyDB seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00503
+ -Iblib/arch -Iblib/lib -I/usr/local/lib/perl5/5.00503/i586-linux
+ -I/usr/local/lib/perl5/5.00503 -e 'use Test::Harness qw(&runtests $verbose);
+ $verbose=0; runtests @ARGV;' t/*.t
+ t/btree.............Can't load 'blib/arch/auto/BerkeleyDB/BerkeleyDB.so' for
+ module BerkeleyDB: blib/arch/auto/BerkeleyDB/BerkeleyDB.so:
+ undefined symbol: txn_stat
+ at /usr/local/lib/perl5/5.00503/i586-linux/DynaLoader.pm line 169.
+ ...
+
+This error usually happens when you have both version 1 and a newer version
+of Berkeley DB installed on your system. BerkeleyDB attempts
+to build using the db.h for Berkeley DB version 2/3/4 and the version 1
+library. Unfortunately the two versions aren't compatible with each
+other. BerkeleyDB can only be built with Berkeley DB version 2, 3 or 4.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want BerkeleyDB to use.
+
+Undefined Symbol: db_appinit
+----------------------------
+
+BerkeleyDB seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00561 -Iblib/arch
+ -Iblib/lib -I/home/paul/perl/install/5.005_61/lib/5.00561/i586-linux
+ -I/home/paul/perl/install/5.005_61/lib/5.00561 -e 'use Test::Harness
+ qw(&runtests $verbose); $verbose=0; runtests @ARGV;' t/*.t
+ t/btree.............Can't load 'blib/arch/auto/BerkeleyDB/BerkeleyDB.so' for
+ module BerkeleyDB: blib/arch/auto/BerkeleyDB/BerkeleyDB.so:
+ undefined symbol: db_appinit
+ at /home/paul/perl/install/5.005_61/lib/5.00561/i586-linux/DynaLoader.pm
+ ...
+
+
+This error usually happens when you have both version 2 and version
+3 of Berkeley DB installed on your system and BerkeleyDB attempts
+to build using the db.h for Berkeley DB version 2 and the version 3
+library. Unfortunately the two versions aren't compatible with each
+other.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want BerkeleyDB to use.
+
+Undefined Symbol: db_create
+---------------------------
+
+BerkeleyDB seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00561 -Iblib/arch
+ -Iblib/lib -I/home/paul/perl/install/5.005_61/lib/5.00561/i586-linux
+ -I/home/paul/perl/install/5.005_61/lib/5.00561 -e 'use Test::Harness
+ qw(&runtests $verbose); $verbose=0; runtests @ARGV;' t/*.t
+ t/btree.............Can't load 'blib/arch/auto/BerkeleyDB/BerkeleyDB.so' for
+ module BerkeleyDB: blib/arch/auto/BerkeleyDB/BerkeleyDB.so:
+ undefined symbol: db_create
+ at /home/paul/perl/install/5.005_61/lib/5.00561/i586-linux/DynaLoader.pm
+ ...
+
+This error usually happens when you have both version 2 and version
+3 of Berkeley DB installed on your system and BerkeleyDB attempts
+to build using the db.h for Berkeley DB version 3 and the version 2
+library. Unfortunately the two versions aren't compatible with each
+other.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want BerkeleyDB to use.
+
+
+Incompatible versions of db.h and libdb
+---------------------------------------
+
+BerkeleyDB seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00503
+ -Iblib/arch -Iblib/lib -I/usr/local/lib/perl5/5.00503/i586-linux
+ -I/usr/local/lib/perl5/5.00503 -e 'use Test::Harness qw(&runtests $verbose);
+ $verbose=0; runtests @ARGV;' t/*.t
+ t/btree.............
+ BerkeleyDB needs compatible versions of libdb & db.h
+ you have db.h version 2.6.4 and libdb version 2.7.5
+ BEGIN failed--compilation aborted at t/btree.t line 25.
+ dubious
+ Test returned status 255 (wstat 65280, 0xff00)
+ ...
+
+Another variation on the theme of having two versions of Berkeley DB on
+your system.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want BerkeleyDB to use.
+ If you are running Linux, please read the Linux Notes section below.
+
+
+Linux Notes
+-----------
+
+Newer versions of Linux (e.g. RedHat 6, SuSe 6) ship with a C library
+that has version 2.x of Berkeley DB linked into it. This makes it
+difficult to build this module with anything other than the version of
+Berkeley DB that shipped with your Linux release. If you do try to use
+a different version of Berkeley DB you will most likely get the error
+described in the "Incompatible versions of db.h and libdb" section of
+this file.
+
+To make matters worse, prior to Perl 5.6.1, the perl binary itself
+*always* included the Berkeley DB library.
+
+If you want to use a newer version of Berkeley DB with this module, the
+easiest solution is to use Perl 5.6.1 (or better) and Berkeley DB 3.x
+(or better).
+
+There are two approaches you can use to get older versions of Perl to
+work with specific versions of Berkeley DB. Both have their advantages
+and disadvantages.
+
+The first approach will only work when you want to build a version of
+Perl older than 5.6.1 along with Berkeley DB 3.x. If you want to use
+Berkeley DB 2.x, you must use the next approach. This approach involves
+rebuilding your existing version of Perl after applying an unofficial
+patch. The "patches" directory in the this module's source distribution
+contains a number of patch files. There is one patch file for every
+stable version of Perl since 5.004. Apply the appropriate patch to your
+Perl source tree before re-building and installing Perl from scratch.
+For example, assuming you are in the top-level source directory for
+Perl 5.6.0, the command below will apply the necessary patch. Remember
+to replace the path shown below with one that points to this module's
+patches directory.
+
+ patch -p1 -N </path/to/BerkeleyDB/patches/5.6.0
+
+Now rebuild & install perl. You should now have a perl binary that can
+be used to build this module. Follow the instructions in "BUILDING THE
+MODULE", remembering to set the INCLUDE and LIB variables in config.in.
+
+
+The second approach will work with Berkeley DB 2.x or better.
+Start by building Berkeley DB as a shared library. This is from
+the Berkeley DB build instructions:
+
+ Building Shared Libraries for the GNU GCC compiler
+
+ If you're using gcc and there's no better shared library example for
+ your architecture, the following shared library build procedure will
+ probably work.
+
+ Add the -fpic option to the CFLAGS value in the Makefile.
+
+ Rebuild all of your .o files. This will create a Berkeley DB library
+ that contains .o files with PIC code. To build the shared library,
+ then take the following steps in the library build directory:
+
+ % mkdir tmp
+ % cd tmp
+ % ar xv ../libdb.a
+ % gcc -shared -o libdb.so *.o
+ % mv libdb.so ..
+ % cd ..
+ % rm -rf tmp
+
+ Note, you may have to change the gcc line depending on the
+ requirements of your system.
+
+ The file libdb.so is your shared library
+
+Once you have built libdb.so, you will need to store it somewhere safe.
+
+ cp libdb.so /usr/local/BerkeleyDB/lib
+
+If you now set the LD_PRELOAD environment variable to point to this
+shared library, Perl will use it instead of the version of Berkeley DB
+that shipped with your Linux distribution.
+
+ export LD_PRELOAD=/usr/local/BerkeleyDB/lib/libdb.so
+
+Finally follow the instructions in "BUILDING THE MODULE" to build,
+test and install this module. Don't forget to set the INCLUDE and LIB
+variables in config.in.
+
+Remember, you will need to have the LD_PRELOAD variable set anytime you
+want to use Perl with Berkeley DB. Also note that if you have LD_PRELOAD
+permanently set it will affect ALL commands you execute. This may be a
+problem if you run any commands that access a database created by the
+version of Berkeley DB that shipped with your Linux distribution.
+
+
+
+Solaris 2.5 Notes
+-----------------
+
+If you are running Solaris 2.5, and you get this error when you run the
+BerkeleyDB test harness:
+
+ libc internal error: _rmutex_unlock: rmutex not held.
+
+you probably need to install a Sun patch. It has been reported that
+Sun patch 103187-25 (or later revisions) fixes this problem.
+
+To find out if you have the patch installed, the command "showrev -p"
+will display the patches that are currently installed on your system.
+
+Solaris 2.7 Notes
+-----------------
+
+If you are running Solaris 2.7 and all the tests in the test harness
+generate a core dump, try applying Sun patch 106980-09 (or better).
+
+To find out if you have the patch installed, the command "showrev -p"
+will display the patches that are currently installed on your system.
+
+
+HP-UX Notes
+-----------
+
+Some people running HP-UX 10 have reported getting an error like this
+when building this module with the native HP-UX compiler.
+
+ ld: (Warning) At least one PA 2.0 object file (BerkeleyDB.o) was detected.
+ The linked output may not run on a PA 1.x system.
+ ld: Invalid loader fixup for symbol "$000000A5".
+
+If this is the case for you, Berkeley DB needs to be recompiled with
+the +z or +Z option and the resulting library placed in a .sl file. The
+following steps should do the trick:
+
+ 1: Configure the Berkeley DB distribution with the +z or +Z C compiler
+ flag:
+
+ env "CFLAGS=+z" ../dist/configure ...
+
+ 2: Edit the Berkeley DB Makefile and change:
+
+ "libdb= libdb.a" to "libdb= libdb.sl".
+
+ 3: Build and install the Berkeley DB distribution as usual.
+
+
+
+FEEDBACK
+--------
+
+How to report a problem with BerkeleyDB.
+
+To help me help you, I need of the following information:
+
+ 1. The version of Perl and the operating system name and version you
+ are running. The complete output from running "perl -V" will tell
+ me all I need to know.
+ If your perl does not understand the "-V" option is too old.
+ BerkeleyDB needs Perl version 5.004_04 or better.
+
+ 2. The version of BerkeleyDB you have. If you have successfully
+ installed BerkeleyDB, this one-liner will tell you:
+
+ perl -MBerkeleyDB -e 'print qq{BerkeleyDB ver $BerkeleyDB::VERSION\n}'
+
+ If you haven't installed BerkeleyDB then search BerkeleyDB.pm for a
+ line like this:
+
+ $VERSION = "1.20" ;
+
+ 3. The version of Berkeley DB you have installed. If you have
+ successfully installed BerkeleyDB, this one-liner will tell you:
+
+ perl -MBerkeleyDB -e 'print BerkeleyDB::DB_VERSION_STRING.qq{\n}'
+
+ If you haven't installed BerkeleyDB then search db.h for a line
+ like this:
+
+ #define DB_VERSION_STRING
+
+ 4. If you are having problems building BerkeleyDB, send me a complete
+ log of what happened.
+
+ 5. Now the difficult one. If you think you have found a bug in
+ BerkeleyDB and you want me to fix it, you will *greatly* enhance
+ the chances of me being able to track it down by sending me a small
+ self-contained Perl script that illustrates the problem you are
+ encountering. Include a summary of what you think the problem is
+ and a log of what happens when you run the script, in case I can't
+ reproduce your problem on my system. If possible, don't have the
+ script dependent on an existing 20Meg database. If the script you
+ send me can create the database itself then that is preferred.
+
+ I realise that in some cases this is easier said than done, so if
+ you can only reproduce the problem in your existing script, then
+ you can post me that if you want. Just don't expect me to find your
+ problem in a hurry, or at all. :-)
+
+
+CHANGES
+-------
+
+See the Changes file.
+
+Paul Marquess <Paul.Marquess@btinternet.com>
+
diff --git a/libdb/perl/BerkeleyDB/Todo b/libdb/perl/BerkeleyDB/Todo
new file mode 100644
index 0000000..12d53bc
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/Todo
@@ -0,0 +1,57 @@
+
+ * Proper documentation.
+
+ * address or document the "close all cursors if you encounter an error"
+
+ * Change the $BerkeleyDB::Error to store the info in the db object,
+ if possible.
+
+ * $BerkeleyDB::db_version is documented. &db_version isn't.
+
+ * migrate perl code into the .xs file where necessary
+
+ * convert as many of the DB examples files to BerkeleyDB format.
+
+ * add a method to the DB object to allow access to the environment (if there
+ actually is one).
+
+
+Possibles
+
+ * use '~' magic to store the inner data.
+
+ * for the get stuff zap the value to undef if it doesn't find the
+ key. This may be more intuitive for those folks who are used with
+ the $hash{key} interface.
+
+ * Text interface? This can be done as via Recno
+
+ * allow recno to allow base offset for arrays to be either 0 or 1.
+
+ * when duplicate keys are enabled, allow db_put($key, [$val1, $val2,...])
+
+
+2.x -> 3.x Upgrade
+==================
+
+Environment Verbose
+Env->open mode
+DB cache size extra parameter
+DB->open subdatabases Done
+An empty environment causes DB->open to fail
+where is __db.001 coming from? db_remove seems to create it. Bug in 3.0.55
+Change db_strerror for 0 to ""? Done
+Queue Done
+db_stat for Hash & Queue Done
+No TxnMgr
+DB->remove
+ENV->remove
+ENV->set_verbose
+upgrade
+
+ $env = BerkeleyDB::Env::Create
+ $env = create BerkeleyDB::Env
+ $status = $env->open()
+
+ $db = BerkeleyDB::Hash::Create
+ $status = $db->open()
diff --git a/libdb/perl/BerkeleyDB/config.in b/libdb/perl/BerkeleyDB/config.in
new file mode 100644
index 0000000..fd1bb1c
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/config.in
@@ -0,0 +1,43 @@
+# Filename: config.in
+#
+# written by Paul Marquess <Paul.Marquess@btinternet.com>
+
+# 1. Where is the file db.h?
+#
+# Change the path below to point to the directory where db.h is
+# installed on your system.
+
+INCLUDE = /usr/local/include
+#INCLUDE = /usr/local/BerkeleyDB/include
+
+# 2. Where is libdb?
+#
+# Change the path below to point to the directory where libdb is
+# installed on your system.
+
+LIB = /usr/local/lib
+#LIB = /usr/local/BerkeleyDB/lib
+
+# 3. Is the library called libdb?
+#
+# If you have copies of both 1.x and 2.x Berkeley DB installed on
+# your system it can sometimes be tricky to make sure you are using
+# the correct one. Renaming one (or creating a symbolic link) to
+# include the version number of the library can help.
+#
+# For example, if you have Berkeley DB 2.6.4 you could rename the
+# Berkeley DB library from libdb.a to libdb-2.6.4.a and change the
+# DBNAME line below to look like this:
+#
+# DBNAME = -ldb-2.6.4
+#
+# Note: If you are building this module with Win32, -llibdb will be
+# used by default.
+#
+# If you have changed the name of the library, uncomment the line
+# below (by removing the leading #) and edit the line to use the name
+# you have picked.
+
+#DBNAME = -ldb-3.0
+
+# end of file config.in
diff --git a/libdb/perl/BerkeleyDB/constants.h b/libdb/perl/BerkeleyDB/constants.h
new file mode 100644
index 0000000..d86cef1
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/constants.h
@@ -0,0 +1,4046 @@
+#define PERL_constant_NOTFOUND 1
+#define PERL_constant_NOTDEF 2
+#define PERL_constant_ISIV 3
+#define PERL_constant_ISNO 4
+#define PERL_constant_ISNV 5
+#define PERL_constant_ISPV 6
+#define PERL_constant_ISPVN 7
+#define PERL_constant_ISSV 8
+#define PERL_constant_ISUNDEF 9
+#define PERL_constant_ISUV 10
+#define PERL_constant_ISYES 11
+
+#ifndef NVTYPE
+typedef double NV; /* 5.6 and later define NVTYPE, and typedef NV to it. */
+#endif
+#ifndef aTHX_
+#define aTHX_ /* 5.6 or later define this for threading support. */
+#endif
+#ifndef pTHX_
+#define pTHX_ /* 5.6 or later define this for threading support. */
+#endif
+
+static int
+constant_6 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_DUP DB_PAD DB_RMW DB_SET */
+ /* Offset 3 gives the best switch position. */
+ switch (name[3]) {
+ case 'D':
+ if (memEQ(name, "DB_DUP", 6)) {
+ /* ^ */
+#ifdef DB_DUP
+ *iv_return = DB_DUP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_PAD", 6)) {
+ /* ^ */
+#ifdef DB_PAD
+ *iv_return = DB_PAD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_RMW", 6)) {
+ /* ^ */
+#ifdef DB_RMW
+ *iv_return = DB_RMW;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_SET", 6)) {
+ /* ^ */
+#ifdef DB_SET
+ *iv_return = DB_SET;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_7 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_EXCL DB_HASH DB_LAST DB_NEXT DB_PREV */
+ /* Offset 3 gives the best switch position. */
+ switch (name[3]) {
+ case 'E':
+ if (memEQ(name, "DB_EXCL", 7)) {
+ /* ^ */
+#ifdef DB_EXCL
+ *iv_return = DB_EXCL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'H':
+ if (memEQ(name, "DB_HASH", 7)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_HASH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_LAST", 7)) {
+ /* ^ */
+#ifdef DB_LAST
+ *iv_return = DB_LAST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_NEXT", 7)) {
+ /* ^ */
+#ifdef DB_NEXT
+ *iv_return = DB_NEXT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_PREV", 7)) {
+ /* ^ */
+#ifdef DB_PREV
+ *iv_return = DB_PREV;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_8 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_AFTER DB_BTREE DB_FIRST DB_FLUSH DB_FORCE DB_QUEUE DB_RECNO */
+ /* Offset 4 gives the best switch position. */
+ switch (name[4]) {
+ case 'E':
+ if (memEQ(name, "DB_RECNO", 8)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_RECNO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'F':
+ if (memEQ(name, "DB_AFTER", 8)) {
+ /* ^ */
+#ifdef DB_AFTER
+ *iv_return = DB_AFTER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_FIRST", 8)) {
+ /* ^ */
+#ifdef DB_FIRST
+ *iv_return = DB_FIRST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_FLUSH", 8)) {
+ /* ^ */
+#ifdef DB_FLUSH
+ *iv_return = DB_FLUSH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_FORCE", 8)) {
+ /* ^ */
+#ifdef DB_FORCE
+ *iv_return = DB_FORCE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_BTREE", 8)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_BTREE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_QUEUE", 8)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 55)
+ *iv_return = DB_QUEUE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_9 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_APPEND DB_BEFORE DB_CLIENT DB_COMMIT DB_CREATE DB_CURLSN DB_DIRECT
+ DB_EXTENT DB_GETREC DB_NOCOPY DB_NOMMAP DB_NOSYNC DB_RDONLY DB_RECNUM
+ DB_THREAD DB_VERIFY */
+ /* Offset 7 gives the best switch position. */
+ switch (name[7]) {
+ case 'A':
+ if (memEQ(name, "DB_NOMMAP", 9)) {
+ /* ^ */
+#ifdef DB_NOMMAP
+ *iv_return = DB_NOMMAP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_THREAD", 9)) {
+ /* ^ */
+#ifdef DB_THREAD
+ *iv_return = DB_THREAD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'C':
+ if (memEQ(name, "DB_DIRECT", 9)) {
+ /* ^ */
+#ifdef DB_DIRECT
+ *iv_return = DB_DIRECT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_GETREC", 9)) {
+ /* ^ */
+#ifdef DB_GETREC
+ *iv_return = DB_GETREC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'F':
+ if (memEQ(name, "DB_VERIFY", 9)) {
+ /* ^ */
+#ifdef DB_VERIFY
+ *iv_return = DB_VERIFY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_COMMIT", 9)) {
+ /* ^ */
+#ifdef DB_COMMIT
+ *iv_return = DB_COMMIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_RDONLY", 9)) {
+ /* ^ */
+#ifdef DB_RDONLY
+ *iv_return = DB_RDONLY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_APPEND", 9)) {
+ /* ^ */
+#ifdef DB_APPEND
+ *iv_return = DB_APPEND;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_CLIENT", 9)) {
+ /* ^ */
+#ifdef DB_CLIENT
+ *iv_return = DB_CLIENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_EXTENT", 9)) {
+ /* ^ */
+#ifdef DB_EXTENT
+ *iv_return = DB_EXTENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_NOSYNC", 9)) {
+ /* ^ */
+#ifdef DB_NOSYNC
+ *iv_return = DB_NOSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_NOCOPY", 9)) {
+ /* ^ */
+#ifdef DB_NOCOPY
+ *iv_return = DB_NOCOPY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_BEFORE", 9)) {
+ /* ^ */
+#ifdef DB_BEFORE
+ *iv_return = DB_BEFORE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_CURLSN", 9)) {
+ /* ^ */
+#ifdef DB_CURLSN
+ *iv_return = DB_CURLSN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_CREATE", 9)) {
+ /* ^ */
+#ifdef DB_CREATE
+ *iv_return = DB_CREATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_RECNUM", 9)) {
+ /* ^ */
+#ifdef DB_RECNUM
+ *iv_return = DB_RECNUM;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_10 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_CONSUME DB_CURRENT DB_DELETED DB_DUPSORT DB_ENCRYPT DB_ENV_CDB
+ DB_ENV_TXN DB_JOINENV DB_KEYLAST DB_NOPANIC DB_OK_HASH DB_PRIVATE
+ DB_PR_PAGE DB_RECOVER DB_SALVAGE DB_TIMEOUT DB_TXN_CKP DB_UNKNOWN
+ DB_UPGRADE */
+ /* Offset 8 gives the best switch position. */
+ switch (name[8]) {
+ case 'D':
+ if (memEQ(name, "DB_ENV_CDB", 10)) {
+ /* ^ */
+#ifdef DB_ENV_CDB
+ *iv_return = DB_ENV_CDB;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_UPGRADE", 10)) {
+ /* ^ */
+#ifdef DB_UPGRADE
+ *iv_return = DB_UPGRADE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_DELETED", 10)) {
+ /* ^ */
+#ifdef DB_DELETED
+ *iv_return = DB_DELETED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_RECOVER", 10)) {
+ /* ^ */
+#ifdef DB_RECOVER
+ *iv_return = DB_RECOVER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_PR_PAGE", 10)) {
+ /* ^ */
+#ifdef DB_PR_PAGE
+ *iv_return = DB_PR_PAGE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_SALVAGE", 10)) {
+ /* ^ */
+#ifdef DB_SALVAGE
+ *iv_return = DB_SALVAGE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_NOPANIC", 10)) {
+ /* ^ */
+#ifdef DB_NOPANIC
+ *iv_return = DB_NOPANIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'K':
+ if (memEQ(name, "DB_TXN_CKP", 10)) {
+ /* ^ */
+#ifdef DB_TXN_CKP
+ *iv_return = DB_TXN_CKP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_CONSUME", 10)) {
+ /* ^ */
+#ifdef DB_CONSUME
+ *iv_return = DB_CONSUME;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_CURRENT", 10)) {
+ /* ^ */
+#ifdef DB_CURRENT
+ *iv_return = DB_CURRENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_JOINENV", 10)) {
+ /* ^ */
+#ifdef DB_JOINENV
+ *iv_return = DB_JOINENV;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_ENCRYPT", 10)) {
+ /* ^ */
+#ifdef DB_ENCRYPT
+ *iv_return = DB_ENCRYPT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_DUPSORT", 10)) {
+ /* ^ */
+#ifdef DB_DUPSORT
+ *iv_return = DB_DUPSORT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_KEYLAST", 10)) {
+ /* ^ */
+#ifdef DB_KEYLAST
+ *iv_return = DB_KEYLAST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_OK_HASH", 10)) {
+ /* ^ */
+#ifdef DB_OK_HASH
+ *iv_return = DB_OK_HASH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_PRIVATE", 10)) {
+ /* ^ */
+#ifdef DB_PRIVATE
+ *iv_return = DB_PRIVATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_TIMEOUT", 10)) {
+ /* ^ */
+#ifdef DB_TIMEOUT
+ *iv_return = DB_TIMEOUT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'W':
+ if (memEQ(name, "DB_UNKNOWN", 10)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_UNKNOWN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'X':
+ if (memEQ(name, "DB_ENV_TXN", 10)) {
+ /* ^ */
+#ifdef DB_ENV_TXN
+ *iv_return = DB_ENV_TXN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_11 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_APP_INIT DB_ARCH_ABS DB_ARCH_LOG DB_FIXEDLEN DB_GET_BOTH DB_INIT_CDB
+ DB_INIT_LOG DB_INIT_TXN DB_KEYEMPTY DB_KEYEXIST DB_KEYFIRST DB_LOCKDOWN
+ DB_LOCK_GET DB_LOCK_PUT DB_LOGMAGIC DB_LOG_DISK DB_MULTIPLE DB_NEXT_DUP
+ DB_NOSERVER DB_NOTFOUND DB_OK_BTREE DB_OK_QUEUE DB_OK_RECNO DB_POSITION
+ DB_QAMMAGIC DB_RENUMBER DB_SNAPSHOT DB_TRUNCATE DB_TXNMAGIC DB_TXN_LOCK
+ DB_TXN_REDO DB_TXN_SYNC DB_TXN_UNDO DB_WRNOSYNC DB_YIELDCPU */
+ /* Offset 8 gives the best switch position. */
+ switch (name[8]) {
+ case 'A':
+ if (memEQ(name, "DB_ARCH_ABS", 11)) {
+ /* ^ */
+#ifdef DB_ARCH_ABS
+ *iv_return = DB_ARCH_ABS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TRUNCATE", 11)) {
+ /* ^ */
+#ifdef DB_TRUNCATE
+ *iv_return = DB_TRUNCATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'B':
+ if (memEQ(name, "DB_RENUMBER", 11)) {
+ /* ^ */
+#ifdef DB_RENUMBER
+ *iv_return = DB_RENUMBER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'C':
+ if (memEQ(name, "DB_INIT_CDB", 11)) {
+ /* ^ */
+#ifdef DB_INIT_CDB
+ *iv_return = DB_INIT_CDB;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_OK_RECNO", 11)) {
+ /* ^ */
+#ifdef DB_OK_RECNO
+ *iv_return = DB_OK_RECNO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_YIELDCPU", 11)) {
+ /* ^ */
+#ifdef DB_YIELDCPU
+ *iv_return = DB_YIELDCPU;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'D':
+ if (memEQ(name, "DB_NEXT_DUP", 11)) {
+ /* ^ */
+#ifdef DB_NEXT_DUP
+ *iv_return = DB_NEXT_DUP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_OK_QUEUE", 11)) {
+ /* ^ */
+#ifdef DB_OK_QUEUE
+ *iv_return = DB_OK_QUEUE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_REDO", 11)) {
+ /* ^ */
+#ifdef DB_TXN_REDO
+ *iv_return = DB_TXN_REDO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_LOCK_GET", 11)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_LOCK_GET;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOGMAGIC", 11)) {
+ /* ^ */
+#ifdef DB_LOGMAGIC
+ *iv_return = DB_LOGMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_QAMMAGIC", 11)) {
+ /* ^ */
+#ifdef DB_QAMMAGIC
+ *iv_return = DB_QAMMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXNMAGIC", 11)) {
+ /* ^ */
+#ifdef DB_TXNMAGIC
+ *iv_return = DB_TXNMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'H':
+ if (memEQ(name, "DB_SNAPSHOT", 11)) {
+ /* ^ */
+#ifdef DB_SNAPSHOT
+ *iv_return = DB_SNAPSHOT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_KEYEXIST", 11)) {
+ /* ^ */
+#ifdef DB_KEYEXIST
+ *iv_return = DB_KEYEXIST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOG_DISK", 11)) {
+ /* ^ */
+#ifdef DB_LOG_DISK
+ *iv_return = DB_LOG_DISK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_POSITION", 11)) {
+ /* ^ */
+#ifdef DB_POSITION
+ *iv_return = DB_POSITION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_ARCH_LOG", 11)) {
+ /* ^ */
+#ifdef DB_ARCH_LOG
+ *iv_return = DB_ARCH_LOG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_FIXEDLEN", 11)) {
+ /* ^ */
+#ifdef DB_FIXEDLEN
+ *iv_return = DB_FIXEDLEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_INIT_LOG", 11)) {
+ /* ^ */
+#ifdef DB_INIT_LOG
+ *iv_return = DB_INIT_LOG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_APP_INIT", 11)) {
+ /* ^ */
+#ifdef DB_APP_INIT
+ *iv_return = DB_APP_INIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_UNDO", 11)) {
+ /* ^ */
+#ifdef DB_TXN_UNDO
+ *iv_return = DB_TXN_UNDO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_GET_BOTH", 11)) {
+ /* ^ */
+#ifdef DB_GET_BOTH
+ *iv_return = DB_GET_BOTH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCKDOWN", 11)) {
+ /* ^ */
+#ifdef DB_LOCKDOWN
+ *iv_return = DB_LOCKDOWN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_LOCK", 11)) {
+ /* ^ */
+#ifdef DB_TXN_LOCK
+ *iv_return = DB_TXN_LOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_KEYEMPTY", 11)) {
+ /* ^ */
+#ifdef DB_KEYEMPTY
+ *iv_return = DB_KEYEMPTY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_PUT", 11)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_LOCK_PUT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MULTIPLE", 11)) {
+ /* ^ */
+#ifdef DB_MULTIPLE
+ *iv_return = DB_MULTIPLE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_KEYFIRST", 11)) {
+ /* ^ */
+#ifdef DB_KEYFIRST
+ *iv_return = DB_KEYFIRST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_OK_BTREE", 11)) {
+ /* ^ */
+#ifdef DB_OK_BTREE
+ *iv_return = DB_OK_BTREE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_INIT_TXN", 11)) {
+ /* ^ */
+#ifdef DB_INIT_TXN
+ *iv_return = DB_INIT_TXN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_NOTFOUND", 11)) {
+ /* ^ */
+#ifdef DB_NOTFOUND
+ *iv_return = DB_NOTFOUND;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'V':
+ if (memEQ(name, "DB_NOSERVER", 11)) {
+ /* ^ */
+#ifdef DB_NOSERVER
+ *iv_return = DB_NOSERVER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'Y':
+ if (memEQ(name, "DB_TXN_SYNC", 11)) {
+ /* ^ */
+#ifdef DB_TXN_SYNC
+ *iv_return = DB_TXN_SYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_WRNOSYNC", 11)) {
+ /* ^ */
+#ifdef DB_WRNOSYNC
+ *iv_return = DB_WRNOSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_12 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_ARCH_DATA DB_CDB_ALLDB DB_CL_WRITER DB_DELIMITER DB_DIRECT_DB
+ DB_DUPCURSOR DB_ENV_FATAL DB_FAST_STAT DB_GET_BOTHC DB_GET_RECNO
+ DB_HASHMAGIC DB_INIT_LOCK DB_JOIN_ITEM DB_LOCKMAGIC DB_LOCK_DUMP
+ DB_LOCK_RW_N DB_LOGOLDVER DB_MAX_PAGES DB_MPOOL_NEW DB_NEEDSPLIT
+ DB_NODUPDATA DB_NOLOCKING DB_NORECURSE DB_OVERWRITE DB_PAGEYIELD
+ DB_PAGE_LOCK DB_PERMANENT DB_POSITIONI DB_PRINTABLE DB_QAMOLDVER
+ DB_SET_RANGE DB_SET_RECNO DB_SWAPBYTES DB_TEMPORARY DB_TXN_ABORT
+ DB_TXN_APPLY DB_TXN_PRINT DB_WRITELOCK DB_WRITEOPEN DB_XA_CREATE */
+ /* Offset 3 gives the best switch position. */
+ switch (name[3]) {
+ case 'A':
+ if (memEQ(name, "DB_ARCH_DATA", 12)) {
+ /* ^ */
+#ifdef DB_ARCH_DATA
+ *iv_return = DB_ARCH_DATA;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'C':
+ if (memEQ(name, "DB_CDB_ALLDB", 12)) {
+ /* ^ */
+#ifdef DB_CDB_ALLDB
+ *iv_return = DB_CDB_ALLDB;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_CL_WRITER", 12)) {
+ /* ^ */
+#ifdef DB_CL_WRITER
+ *iv_return = DB_CL_WRITER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'D':
+ if (memEQ(name, "DB_DELIMITER", 12)) {
+ /* ^ */
+#ifdef DB_DELIMITER
+ *iv_return = DB_DELIMITER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_DIRECT_DB", 12)) {
+ /* ^ */
+#ifdef DB_DIRECT_DB
+ *iv_return = DB_DIRECT_DB;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_DUPCURSOR", 12)) {
+ /* ^ */
+#ifdef DB_DUPCURSOR
+ *iv_return = DB_DUPCURSOR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_ENV_FATAL", 12)) {
+ /* ^ */
+#ifdef DB_ENV_FATAL
+ *iv_return = DB_ENV_FATAL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'F':
+ if (memEQ(name, "DB_FAST_STAT", 12)) {
+ /* ^ */
+#ifdef DB_FAST_STAT
+ *iv_return = DB_FAST_STAT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_GET_BOTHC", 12)) {
+ /* ^ */
+#ifdef DB_GET_BOTHC
+ *iv_return = DB_GET_BOTHC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_GET_RECNO", 12)) {
+ /* ^ */
+#ifdef DB_GET_RECNO
+ *iv_return = DB_GET_RECNO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'H':
+ if (memEQ(name, "DB_HASHMAGIC", 12)) {
+ /* ^ */
+#ifdef DB_HASHMAGIC
+ *iv_return = DB_HASHMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_INIT_LOCK", 12)) {
+ /* ^ */
+#ifdef DB_INIT_LOCK
+ *iv_return = DB_INIT_LOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'J':
+ if (memEQ(name, "DB_JOIN_ITEM", 12)) {
+ /* ^ */
+#ifdef DB_JOIN_ITEM
+ *iv_return = DB_JOIN_ITEM;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_LOCKMAGIC", 12)) {
+ /* ^ */
+#ifdef DB_LOCKMAGIC
+ *iv_return = DB_LOCKMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_DUMP", 12)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_LOCK_DUMP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_RW_N", 12)) {
+ /* ^ */
+#ifdef DB_LOCK_RW_N
+ *iv_return = DB_LOCK_RW_N;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOGOLDVER", 12)) {
+ /* ^ */
+#ifdef DB_LOGOLDVER
+ *iv_return = DB_LOGOLDVER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_MAX_PAGES", 12)) {
+ /* ^ */
+#ifdef DB_MAX_PAGES
+ *iv_return = DB_MAX_PAGES;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MPOOL_NEW", 12)) {
+ /* ^ */
+#ifdef DB_MPOOL_NEW
+ *iv_return = DB_MPOOL_NEW;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_NEEDSPLIT", 12)) {
+ /* ^ */
+#ifdef DB_NEEDSPLIT
+ *iv_return = DB_NEEDSPLIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_NODUPDATA", 12)) {
+ /* ^ */
+#ifdef DB_NODUPDATA
+ *iv_return = DB_NODUPDATA;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_NOLOCKING", 12)) {
+ /* ^ */
+#ifdef DB_NOLOCKING
+ *iv_return = DB_NOLOCKING;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_NORECURSE", 12)) {
+ /* ^ */
+#ifdef DB_NORECURSE
+ *iv_return = DB_NORECURSE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_OVERWRITE", 12)) {
+ /* ^ */
+#ifdef DB_OVERWRITE
+ *iv_return = DB_OVERWRITE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_PAGEYIELD", 12)) {
+ /* ^ */
+#ifdef DB_PAGEYIELD
+ *iv_return = DB_PAGEYIELD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_PAGE_LOCK", 12)) {
+ /* ^ */
+#ifdef DB_PAGE_LOCK
+ *iv_return = DB_PAGE_LOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_PERMANENT", 12)) {
+ /* ^ */
+#ifdef DB_PERMANENT
+ *iv_return = DB_PERMANENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_POSITIONI", 12)) {
+ /* ^ */
+#ifdef DB_POSITIONI
+ *iv_return = DB_POSITIONI;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_PRINTABLE", 12)) {
+ /* ^ */
+#ifdef DB_PRINTABLE
+ *iv_return = DB_PRINTABLE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'Q':
+ if (memEQ(name, "DB_QAMOLDVER", 12)) {
+ /* ^ */
+#ifdef DB_QAMOLDVER
+ *iv_return = DB_QAMOLDVER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_SET_RANGE", 12)) {
+ /* ^ */
+#ifdef DB_SET_RANGE
+ *iv_return = DB_SET_RANGE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_SET_RECNO", 12)) {
+ /* ^ */
+#ifdef DB_SET_RECNO
+ *iv_return = DB_SET_RECNO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_SWAPBYTES", 12)) {
+ /* ^ */
+#ifdef DB_SWAPBYTES
+ *iv_return = DB_SWAPBYTES;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_TEMPORARY", 12)) {
+ /* ^ */
+#ifdef DB_TEMPORARY
+ *iv_return = DB_TEMPORARY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_ABORT", 12)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 12)
+ *iv_return = DB_TXN_ABORT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_APPLY", 12)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 7)
+ *iv_return = DB_TXN_APPLY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_PRINT", 12)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_TXN_PRINT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'W':
+ if (memEQ(name, "DB_WRITELOCK", 12)) {
+ /* ^ */
+#ifdef DB_WRITELOCK
+ *iv_return = DB_WRITELOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_WRITEOPEN", 12)) {
+ /* ^ */
+#ifdef DB_WRITEOPEN
+ *iv_return = DB_WRITEOPEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'X':
+ if (memEQ(name, "DB_XA_CREATE", 12)) {
+ /* ^ */
+#ifdef DB_XA_CREATE
+ *iv_return = DB_XA_CREATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_13 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_AGGRESSIVE DB_BTREEMAGIC DB_CHECKPOINT DB_DIRECT_LOG DB_DIRTY_READ
+ DB_DONOTINDEX DB_ENV_CREATE DB_ENV_NOMMAP DB_ENV_THREAD DB_HASHOLDVER
+ DB_INCOMPLETE DB_INIT_MPOOL DB_LOCK_NORUN DB_LOCK_RIW_N DB_LOCK_TRADE
+ DB_LOGVERSION DB_LOG_LOCKED DB_MPOOL_LAST DB_MUTEXDEBUG DB_MUTEXLOCKS
+ DB_NEXT_NODUP DB_NOORDERCHK DB_PREV_NODUP DB_PR_HEADERS DB_QAMVERSION
+ DB_RDWRMASTER DB_REGISTERED DB_REP_CLIENT DB_REP_MASTER DB_SEQUENTIAL
+ DB_STAT_CLEAR DB_SYSTEM_MEM DB_TXNVERSION DB_TXN_NOSYNC DB_TXN_NOWAIT
+ DB_VERIFY_BAD */
+ /* Offset 5 gives the best switch position. */
+ switch (name[5]) {
+ case 'A':
+ if (memEQ(name, "DB_STAT_CLEAR", 13)) {
+ /* ^ */
+#ifdef DB_STAT_CLEAR
+ *iv_return = DB_STAT_CLEAR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'C':
+ if (memEQ(name, "DB_INCOMPLETE", 13)) {
+ /* ^ */
+#ifdef DB_INCOMPLETE
+ *iv_return = DB_INCOMPLETE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_NORUN", 13)) {
+ /* ^ */
+#ifdef DB_LOCK_NORUN
+ *iv_return = DB_LOCK_NORUN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_RIW_N", 13)) {
+ /* ^ */
+#ifdef DB_LOCK_RIW_N
+ *iv_return = DB_LOCK_RIW_N;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_TRADE", 13)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_LOCK_TRADE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_CHECKPOINT", 13)) {
+ /* ^ */
+#ifdef DB_CHECKPOINT
+ *iv_return = DB_CHECKPOINT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_PREV_NODUP", 13)) {
+ /* ^ */
+#ifdef DB_PREV_NODUP
+ *iv_return = DB_PREV_NODUP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_AGGRESSIVE", 13)) {
+ /* ^ */
+#ifdef DB_AGGRESSIVE
+ *iv_return = DB_AGGRESSIVE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOGVERSION", 13)) {
+ /* ^ */
+#ifdef DB_LOGVERSION
+ *iv_return = DB_LOGVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOG_LOCKED", 13)) {
+ /* ^ */
+#ifdef DB_LOG_LOCKED
+ *iv_return = DB_LOG_LOCKED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REGISTERED", 13)) {
+ /* ^ */
+#ifdef DB_REGISTERED
+ *iv_return = DB_REGISTERED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_INIT_MPOOL", 13)) {
+ /* ^ */
+#ifdef DB_INIT_MPOOL
+ *iv_return = DB_INIT_MPOOL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_QAMVERSION", 13)) {
+ /* ^ */
+#ifdef DB_QAMVERSION
+ *iv_return = DB_QAMVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_DONOTINDEX", 13)) {
+ /* ^ */
+#ifdef DB_DONOTINDEX
+ *iv_return = DB_DONOTINDEX;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXNVERSION", 13)) {
+ /* ^ */
+#ifdef DB_TXNVERSION
+ *iv_return = DB_TXNVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_NOSYNC", 13)) {
+ /* ^ */
+#ifdef DB_TXN_NOSYNC
+ *iv_return = DB_TXN_NOSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_NOWAIT", 13)) {
+ /* ^ */
+#ifdef DB_TXN_NOWAIT
+ *iv_return = DB_TXN_NOWAIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_MPOOL_LAST", 13)) {
+ /* ^ */
+#ifdef DB_MPOOL_LAST
+ *iv_return = DB_MPOOL_LAST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_NOORDERCHK", 13)) {
+ /* ^ */
+#ifdef DB_NOORDERCHK
+ *iv_return = DB_NOORDERCHK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_REP_CLIENT", 13)) {
+ /* ^ */
+#ifdef DB_REP_CLIENT
+ *iv_return = DB_REP_CLIENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REP_MASTER", 13)) {
+ /* ^ */
+#ifdef DB_REP_MASTER
+ *iv_return = DB_REP_MASTER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'Q':
+ if (memEQ(name, "DB_SEQUENTIAL", 13)) {
+ /* ^ */
+#ifdef DB_SEQUENTIAL
+ *iv_return = DB_SEQUENTIAL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_BTREEMAGIC", 13)) {
+ /* ^ */
+#ifdef DB_BTREEMAGIC
+ *iv_return = DB_BTREEMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_DIRECT_LOG", 13)) {
+ /* ^ */
+#ifdef DB_DIRECT_LOG
+ *iv_return = DB_DIRECT_LOG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_DIRTY_READ", 13)) {
+ /* ^ */
+#ifdef DB_DIRTY_READ
+ *iv_return = DB_DIRTY_READ;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERIFY_BAD", 13)) {
+ /* ^ */
+#ifdef DB_VERIFY_BAD
+ *iv_return = DB_VERIFY_BAD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_HASHOLDVER", 13)) {
+ /* ^ */
+#ifdef DB_HASHOLDVER
+ *iv_return = DB_HASHOLDVER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_SYSTEM_MEM", 13)) {
+ /* ^ */
+#ifdef DB_SYSTEM_MEM
+ *iv_return = DB_SYSTEM_MEM;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_MUTEXDEBUG", 13)) {
+ /* ^ */
+#ifdef DB_MUTEXDEBUG
+ *iv_return = DB_MUTEXDEBUG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MUTEXLOCKS", 13)) {
+ /* ^ */
+#ifdef DB_MUTEXLOCKS
+ *iv_return = DB_MUTEXLOCKS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'V':
+ if (memEQ(name, "DB_ENV_CREATE", 13)) {
+ /* ^ */
+#ifdef DB_ENV_CREATE
+ *iv_return = DB_ENV_CREATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ENV_NOMMAP", 13)) {
+ /* ^ */
+#ifdef DB_ENV_NOMMAP
+ *iv_return = DB_ENV_NOMMAP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ENV_THREAD", 13)) {
+ /* ^ */
+#ifdef DB_ENV_THREAD
+ *iv_return = DB_ENV_THREAD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'W':
+ if (memEQ(name, "DB_RDWRMASTER", 13)) {
+ /* ^ */
+#ifdef DB_RDWRMASTER
+ *iv_return = DB_RDWRMASTER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'X':
+ if (memEQ(name, "DB_NEXT_NODUP", 13)) {
+ /* ^ */
+#ifdef DB_NEXT_NODUP
+ *iv_return = DB_NEXT_NODUP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_PR_HEADERS", 13)) {
+ /* ^ */
+#ifdef DB_PR_HEADERS
+ *iv_return = DB_PR_HEADERS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_14 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_AUTO_COMMIT DB_BTREEOLDVER DB_CHKSUM_SHA1 DB_EID_INVALID DB_ENCRYPT_AES
+ DB_ENV_APPINIT DB_ENV_DBLOCAL DB_ENV_LOCKING DB_ENV_LOGGING DB_ENV_NOPANIC
+ DB_ENV_PRIVATE DB_FILE_ID_LEN DB_HANDLE_LOCK DB_HASHVERSION DB_INVALID_EID
+ DB_JOIN_NOSORT DB_LOCKVERSION DB_LOCK_EXPIRE DB_LOCK_NOWAIT DB_LOCK_OLDEST
+ DB_LOCK_RANDOM DB_LOCK_RECORD DB_LOCK_REMOVE DB_LOCK_SWITCH DB_MAX_RECORDS
+ DB_MPOOL_CLEAN DB_MPOOL_DIRTY DB_NOOVERWRITE DB_NOSERVER_ID DB_ODDFILESIZE
+ DB_OLD_VERSION DB_OPEN_CALLED DB_RECORDCOUNT DB_RECORD_LOCK DB_REGION_ANON
+ DB_REGION_INIT DB_REGION_NAME DB_RENAMEMAGIC DB_REP_NEWSITE DB_REP_UNAVAIL
+ DB_REVSPLITOFF DB_RUNRECOVERY DB_SET_TXN_NOW DB_USE_ENVIRON DB_WRITECURSOR
+ DB_XIDDATASIZE */
+ /* Offset 9 gives the best switch position. */
+ switch (name[9]) {
+ case 'A':
+ if (memEQ(name, "DB_LOCK_RANDOM", 14)) {
+ /* ^ */
+#ifdef DB_LOCK_RANDOM
+ *iv_return = DB_LOCK_RANDOM;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_OPEN_CALLED", 14)) {
+ /* ^ */
+#ifdef DB_OPEN_CALLED
+ *iv_return = DB_OPEN_CALLED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REP_UNAVAIL", 14)) {
+ /* ^ */
+#ifdef DB_REP_UNAVAIL
+ *iv_return = DB_REP_UNAVAIL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_XIDDATASIZE", 14)) {
+ /* ^ */
+#ifdef DB_XIDDATASIZE
+ *iv_return = DB_XIDDATASIZE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'C':
+ if (memEQ(name, "DB_ENV_LOCKING", 14)) {
+ /* ^ */
+#ifdef DB_ENV_LOCKING
+ *iv_return = DB_ENV_LOCKING;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MAX_RECORDS", 14)) {
+ /* ^ */
+#ifdef DB_MAX_RECORDS
+ *iv_return = DB_MAX_RECORDS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MPOOL_CLEAN", 14)) {
+ /* ^ */
+#ifdef DB_MPOOL_CLEAN
+ *iv_return = DB_MPOOL_CLEAN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_RECORDCOUNT", 14)) {
+ /* ^ */
+#ifdef DB_RECORDCOUNT
+ *iv_return = DB_RECORDCOUNT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'D':
+ if (memEQ(name, "DB_FILE_ID_LEN", 14)) {
+ /* ^ */
+#ifdef DB_FILE_ID_LEN
+ *iv_return = DB_FILE_ID_LEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_INVALID_EID", 14)) {
+ /* ^ */
+#ifdef DB_INVALID_EID
+ *iv_return = DB_INVALID_EID;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MPOOL_DIRTY", 14)) {
+ /* ^ */
+#ifdef DB_MPOOL_DIRTY
+ *iv_return = DB_MPOOL_DIRTY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_LOCK_RECORD", 14)) {
+ /* ^ */
+#ifdef DB_LOCK_RECORD
+ *iv_return = DB_LOCK_RECORD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_REMOVE", 14)) {
+ /* ^ */
+#ifdef DB_LOCK_REMOVE
+ *iv_return = DB_LOCK_REMOVE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_NOSERVER_ID", 14)) {
+ /* ^ */
+#ifdef DB_NOSERVER_ID
+ *iv_return = DB_NOSERVER_ID;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ODDFILESIZE", 14)) {
+ /* ^ */
+#ifdef DB_ODDFILESIZE
+ *iv_return = DB_ODDFILESIZE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_ENV_LOGGING", 14)) {
+ /* ^ */
+#ifdef DB_ENV_LOGGING
+ *iv_return = DB_ENV_LOGGING;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_ENV_PRIVATE", 14)) {
+ /* ^ */
+#ifdef DB_ENV_PRIVATE
+ *iv_return = DB_ENV_PRIVATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REVSPLITOFF", 14)) {
+ /* ^ */
+#ifdef DB_REVSPLITOFF
+ *iv_return = DB_REVSPLITOFF;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_BTREEOLDVER", 14)) {
+ /* ^ */
+#ifdef DB_BTREEOLDVER
+ *iv_return = DB_BTREEOLDVER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ENV_DBLOCAL", 14)) {
+ /* ^ */
+#ifdef DB_ENV_DBLOCAL
+ *iv_return = DB_ENV_DBLOCAL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_OLDEST", 14)) {
+ /* ^ */
+#ifdef DB_LOCK_OLDEST
+ *iv_return = DB_LOCK_OLDEST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_RENAMEMAGIC", 14)) {
+ /* ^ */
+#ifdef DB_RENAMEMAGIC
+ *iv_return = DB_RENAMEMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_SET_TXN_NOW", 14)) {
+ /* ^ */
+#ifdef DB_SET_TXN_NOW
+ *iv_return = DB_SET_TXN_NOW;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_AUTO_COMMIT", 14)) {
+ /* ^ */
+#ifdef DB_AUTO_COMMIT
+ *iv_return = DB_AUTO_COMMIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_JOIN_NOSORT", 14)) {
+ /* ^ */
+#ifdef DB_JOIN_NOSORT
+ *iv_return = DB_JOIN_NOSORT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_NOWAIT", 14)) {
+ /* ^ */
+#ifdef DB_LOCK_NOWAIT
+ *iv_return = DB_LOCK_NOWAIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_RUNRECOVERY", 14)) {
+ /* ^ */
+#ifdef DB_RUNRECOVERY
+ *iv_return = DB_RUNRECOVERY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_ENV_APPINIT", 14)) {
+ /* ^ */
+#ifdef DB_ENV_APPINIT
+ *iv_return = DB_ENV_APPINIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ENV_NOPANIC", 14)) {
+ /* ^ */
+#ifdef DB_ENV_NOPANIC
+ *iv_return = DB_ENV_NOPANIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_HASHVERSION", 14)) {
+ /* ^ */
+#ifdef DB_HASHVERSION
+ *iv_return = DB_HASHVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCKVERSION", 14)) {
+ /* ^ */
+#ifdef DB_LOCKVERSION
+ *iv_return = DB_LOCKVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_OLD_VERSION", 14)) {
+ /* ^ */
+#ifdef DB_OLD_VERSION
+ *iv_return = DB_OLD_VERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_ENCRYPT_AES", 14)) {
+ /* ^ */
+#ifdef DB_ENCRYPT_AES
+ *iv_return = DB_ENCRYPT_AES;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_WRITECURSOR", 14)) {
+ /* ^ */
+#ifdef DB_WRITECURSOR
+ *iv_return = DB_WRITECURSOR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'V':
+ if (memEQ(name, "DB_EID_INVALID", 14)) {
+ /* ^ */
+#ifdef DB_EID_INVALID
+ *iv_return = DB_EID_INVALID;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_USE_ENVIRON", 14)) {
+ /* ^ */
+#ifdef DB_USE_ENVIRON
+ *iv_return = DB_USE_ENVIRON;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'W':
+ if (memEQ(name, "DB_LOCK_SWITCH", 14)) {
+ /* ^ */
+#ifdef DB_LOCK_SWITCH
+ *iv_return = DB_LOCK_SWITCH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_NOOVERWRITE", 14)) {
+ /* ^ */
+#ifdef DB_NOOVERWRITE
+ *iv_return = DB_NOOVERWRITE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REP_NEWSITE", 14)) {
+ /* ^ */
+#ifdef DB_REP_NEWSITE
+ *iv_return = DB_REP_NEWSITE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'X':
+ if (memEQ(name, "DB_LOCK_EXPIRE", 14)) {
+ /* ^ */
+#ifdef DB_LOCK_EXPIRE
+ *iv_return = DB_LOCK_EXPIRE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_CHKSUM_SHA1", 14)) {
+ /* ^ */
+#ifdef DB_CHKSUM_SHA1
+ *iv_return = DB_CHKSUM_SHA1;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_HANDLE_LOCK", 14)) {
+ /* ^ */
+#ifdef DB_HANDLE_LOCK
+ *iv_return = DB_HANDLE_LOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_RECORD_LOCK", 14)) {
+ /* ^ */
+#ifdef DB_RECORD_LOCK
+ *iv_return = DB_RECORD_LOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REGION_ANON", 14)) {
+ /* ^ */
+#ifdef DB_REGION_ANON
+ *iv_return = DB_REGION_ANON;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REGION_INIT", 14)) {
+ /* ^ */
+#ifdef DB_REGION_INIT
+ *iv_return = DB_REGION_INIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REGION_NAME", 14)) {
+ /* ^ */
+#ifdef DB_REGION_NAME
+ *iv_return = DB_REGION_NAME;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_15 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_APPLY_LOGREG DB_BTREEVERSION DB_CONSUME_WAIT DB_ENV_LOCKDOWN
+ DB_ENV_PANIC_OK DB_ENV_YIELDCPU DB_LOCK_DEFAULT DB_LOCK_INHERIT
+ DB_LOCK_NOTHELD DB_LOCK_PUT_ALL DB_LOCK_PUT_OBJ DB_LOCK_TIMEOUT
+ DB_LOCK_UPGRADE DB_MPOOL_CREATE DB_MPOOL_EXTENT DB_MULTIPLE_KEY
+ DB_OPFLAGS_MASK DB_ORDERCHKONLY DB_PRIORITY_LOW DB_REGION_MAGIC
+ DB_REP_LOGSONLY DB_REP_OUTDATED DB_SURPRISE_KID DB_TEST_POSTLOG
+ DB_TEST_PREOPEN DB_TXN_GETPGNOS DB_TXN_LOCK_2PL DB_TXN_LOG_MASK
+ DB_TXN_LOG_REDO DB_TXN_LOG_UNDO DB_VERIFY_FATAL */
+ /* Offset 10 gives the best switch position. */
+ switch (name[10]) {
+ case 'D':
+ if (memEQ(name, "DB_REP_OUTDATED", 15)) {
+ /* ^ */
+#ifdef DB_REP_OUTDATED
+ *iv_return = DB_REP_OUTDATED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_MULTIPLE_KEY", 15)) {
+ /* ^ */
+#ifdef DB_MULTIPLE_KEY
+ *iv_return = DB_MULTIPLE_KEY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_SURPRISE_KID", 15)) {
+ /* ^ */
+#ifdef DB_SURPRISE_KID
+ *iv_return = DB_SURPRISE_KID;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_PREOPEN", 15)) {
+ /* ^ */
+#ifdef DB_TEST_PREOPEN
+ *iv_return = DB_TEST_PREOPEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'F':
+ if (memEQ(name, "DB_LOCK_DEFAULT", 15)) {
+ /* ^ */
+#ifdef DB_LOCK_DEFAULT
+ *iv_return = DB_LOCK_DEFAULT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERIFY_FATAL", 15)) {
+ /* ^ */
+#ifdef DB_VERIFY_FATAL
+ *iv_return = DB_VERIFY_FATAL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_LOCK_UPGRADE", 15)) {
+ /* ^ */
+#ifdef DB_LOCK_UPGRADE
+ *iv_return = DB_LOCK_UPGRADE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'H':
+ if (memEQ(name, "DB_LOCK_INHERIT", 15)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 7) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 7 && \
+ DB_VERSION_PATCH >= 1)
+ *iv_return = DB_LOCK_INHERIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_ENV_PANIC_OK", 15)) {
+ /* ^ */
+#ifdef DB_ENV_PANIC_OK
+ *iv_return = DB_ENV_PANIC_OK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'K':
+ if (memEQ(name, "DB_ENV_LOCKDOWN", 15)) {
+ /* ^ */
+#ifdef DB_ENV_LOCKDOWN
+ *iv_return = DB_ENV_LOCKDOWN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ORDERCHKONLY", 15)) {
+ /* ^ */
+#ifdef DB_ORDERCHKONLY
+ *iv_return = DB_ORDERCHKONLY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_LOCK_2PL", 15)) {
+ /* ^ */
+#ifdef DB_TXN_LOCK_2PL
+ *iv_return = DB_TXN_LOCK_2PL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_ENV_YIELDCPU", 15)) {
+ /* ^ */
+#ifdef DB_ENV_YIELDCPU
+ *iv_return = DB_ENV_YIELDCPU;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_LOCK_TIMEOUT", 15)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 7)
+ *iv_return = DB_LOCK_TIMEOUT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REGION_MAGIC", 15)) {
+ /* ^ */
+#ifdef DB_REGION_MAGIC
+ *iv_return = DB_REGION_MAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_APPLY_LOGREG", 15)) {
+ /* ^ */
+#ifdef DB_APPLY_LOGREG
+ *iv_return = DB_APPLY_LOGREG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_TXN_GETPGNOS", 15)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_TXN_GETPGNOS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_BTREEVERSION", 15)) {
+ /* ^ */
+#ifdef DB_BTREEVERSION
+ *iv_return = DB_BTREEVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MPOOL_CREATE", 15)) {
+ /* ^ */
+#ifdef DB_MPOOL_CREATE
+ *iv_return = DB_MPOOL_CREATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_REP_LOGSONLY", 15)) {
+ /* ^ */
+#ifdef DB_REP_LOGSONLY
+ *iv_return = DB_REP_LOGSONLY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_POSTLOG", 15)) {
+ /* ^ */
+#ifdef DB_TEST_POSTLOG
+ *iv_return = DB_TEST_POSTLOG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_LOCK_NOTHELD", 15)) {
+ /* ^ */
+#ifdef DB_LOCK_NOTHELD
+ *iv_return = DB_LOCK_NOTHELD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_PUT_ALL", 15)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_LOCK_PUT_ALL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_PUT_OBJ", 15)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_LOCK_PUT_OBJ;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'X':
+ if (memEQ(name, "DB_MPOOL_EXTENT", 15)) {
+ /* ^ */
+#ifdef DB_MPOOL_EXTENT
+ *iv_return = DB_MPOOL_EXTENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'Y':
+ if (memEQ(name, "DB_PRIORITY_LOW", 15)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_PRIORITY_LOW;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_CONSUME_WAIT", 15)) {
+ /* ^ */
+#ifdef DB_CONSUME_WAIT
+ *iv_return = DB_CONSUME_WAIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_OPFLAGS_MASK", 15)) {
+ /* ^ */
+#ifdef DB_OPFLAGS_MASK
+ *iv_return = DB_OPFLAGS_MASK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_LOG_MASK", 15)) {
+ /* ^ */
+#ifdef DB_TXN_LOG_MASK
+ *iv_return = DB_TXN_LOG_MASK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_LOG_REDO", 15)) {
+ /* ^ */
+#ifdef DB_TXN_LOG_REDO
+ *iv_return = DB_TXN_LOG_REDO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_LOG_UNDO", 15)) {
+ /* ^ */
+#ifdef DB_TXN_LOG_UNDO
+ *iv_return = DB_TXN_LOG_UNDO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_16 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_BROADCAST_EID DB_CACHED_COUNTS DB_EID_BROADCAST DB_ENV_CDB_ALLDB
+ DB_ENV_DIRECT_DB DB_ENV_NOLOCKING DB_ENV_OVERWRITE DB_ENV_RPCCLIENT
+ DB_FCNTL_LOCKING DB_JAVA_CALLBACK DB_LOCK_CONFLICT DB_LOCK_DEADLOCK
+ DB_LOCK_MAXLOCKS DB_LOCK_MINLOCKS DB_LOCK_MINWRITE DB_LOCK_NOTEXIST
+ DB_LOCK_PUT_READ DB_LOCK_YOUNGEST DB_LOGC_BUF_SIZE DB_MPOOL_DISCARD
+ DB_MPOOL_PRIVATE DB_NOSERVER_HOME DB_PAGE_NOTFOUND DB_PRIORITY_HIGH
+ DB_RECOVER_FATAL DB_REP_DUPMASTER DB_REP_NEWMASTER DB_REP_PERMANENT
+ DB_SECONDARY_BAD DB_TEST_POSTOPEN DB_TEST_POSTSYNC DB_TXN_LOCK_MASK
+ DB_TXN_OPENFILES DB_VERB_CHKPOINT DB_VERB_DEADLOCK DB_VERB_RECOVERY
+ DB_VERB_WAITSFOR DB_VERSION_MAJOR DB_VERSION_MINOR DB_VERSION_PATCH
+ DB_VRFY_FLAGMASK */
+ /* Offset 12 gives the best switch position. */
+ switch (name[12]) {
+ case 'A':
+ if (memEQ(name, "DB_RECOVER_FATAL", 16)) {
+ /* ^ */
+#ifdef DB_RECOVER_FATAL
+ *iv_return = DB_RECOVER_FATAL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERSION_MAJOR", 16)) {
+ /* ^ */
+#ifdef DB_VERSION_MAJOR
+ *iv_return = DB_VERSION_MAJOR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERSION_PATCH", 16)) {
+ /* ^ */
+#ifdef DB_VERSION_PATCH
+ *iv_return = DB_VERSION_PATCH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'B':
+ if (memEQ(name, "DB_JAVA_CALLBACK", 16)) {
+ /* ^ */
+#ifdef DB_JAVA_CALLBACK
+ *iv_return = DB_JAVA_CALLBACK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'C':
+ if (memEQ(name, "DB_EID_BROADCAST", 16)) {
+ /* ^ */
+#ifdef DB_EID_BROADCAST
+ *iv_return = DB_EID_BROADCAST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MPOOL_DISCARD", 16)) {
+ /* ^ */
+#ifdef DB_MPOOL_DISCARD
+ *iv_return = DB_MPOOL_DISCARD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_LOCK_YOUNGEST", 16)) {
+ /* ^ */
+#ifdef DB_LOCK_YOUNGEST
+ *iv_return = DB_LOCK_YOUNGEST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'H':
+ if (memEQ(name, "DB_NOSERVER_HOME", 16)) {
+ /* ^ */
+#ifdef DB_NOSERVER_HOME
+ *iv_return = DB_NOSERVER_HOME;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_PRIORITY_HIGH", 16)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_PRIORITY_HIGH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_ENV_RPCCLIENT", 16)) {
+ /* ^ */
+#ifdef DB_ENV_RPCCLIENT
+ *iv_return = DB_ENV_RPCCLIENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_OPENFILES", 16)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 12)
+ *iv_return = DB_TXN_OPENFILES;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERSION_MINOR", 16)) {
+ /* ^ */
+#ifdef DB_VERSION_MINOR
+ *iv_return = DB_VERSION_MINOR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'K':
+ if (memEQ(name, "DB_ENV_NOLOCKING", 16)) {
+ /* ^ */
+#ifdef DB_ENV_NOLOCKING
+ *iv_return = DB_ENV_NOLOCKING;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_FCNTL_LOCKING", 16)) {
+ /* ^ */
+#ifdef DB_FCNTL_LOCKING
+ *iv_return = DB_FCNTL_LOCKING;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_ENV_CDB_ALLDB", 16)) {
+ /* ^ */
+#ifdef DB_ENV_CDB_ALLDB
+ *iv_return = DB_ENV_CDB_ALLDB;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_CONFLICT", 16)) {
+ /* ^ */
+#ifdef DB_LOCK_CONFLICT
+ *iv_return = DB_LOCK_CONFLICT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_DEADLOCK", 16)) {
+ /* ^ */
+#ifdef DB_LOCK_DEADLOCK
+ *iv_return = DB_LOCK_DEADLOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERB_DEADLOCK", 16)) {
+ /* ^ */
+#ifdef DB_VERB_DEADLOCK
+ *iv_return = DB_VERB_DEADLOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_TXN_LOCK_MASK", 16)) {
+ /* ^ */
+#ifdef DB_TXN_LOCK_MASK
+ *iv_return = DB_TXN_LOCK_MASK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VRFY_FLAGMASK", 16)) {
+ /* ^ */
+#ifdef DB_VRFY_FLAGMASK
+ *iv_return = DB_VRFY_FLAGMASK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_REP_PERMANENT", 16)) {
+ /* ^ */
+#ifdef DB_REP_PERMANENT
+ *iv_return = DB_REP_PERMANENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_LOCK_MAXLOCKS", 16)) {
+ /* ^ */
+#ifdef DB_LOCK_MAXLOCKS
+ *iv_return = DB_LOCK_MAXLOCKS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_MINLOCKS", 16)) {
+ /* ^ */
+#ifdef DB_LOCK_MINLOCKS
+ *iv_return = DB_LOCK_MINLOCKS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_PAGE_NOTFOUND", 16)) {
+ /* ^ */
+#ifdef DB_PAGE_NOTFOUND
+ *iv_return = DB_PAGE_NOTFOUND;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_POSTOPEN", 16)) {
+ /* ^ */
+#ifdef DB_TEST_POSTOPEN
+ *iv_return = DB_TEST_POSTOPEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERB_CHKPOINT", 16)) {
+ /* ^ */
+#ifdef DB_VERB_CHKPOINT
+ *iv_return = DB_VERB_CHKPOINT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_ENV_OVERWRITE", 16)) {
+ /* ^ */
+#ifdef DB_ENV_OVERWRITE
+ *iv_return = DB_ENV_OVERWRITE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_MINWRITE", 16)) {
+ /* ^ */
+#ifdef DB_LOCK_MINWRITE
+ *iv_return = DB_LOCK_MINWRITE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_PUT_READ", 16)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 7)
+ *iv_return = DB_LOCK_PUT_READ;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_LOGC_BUF_SIZE", 16)) {
+ /* ^ */
+#ifdef DB_LOGC_BUF_SIZE
+ *iv_return = DB_LOGC_BUF_SIZE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REP_DUPMASTER", 16)) {
+ /* ^ */
+#ifdef DB_REP_DUPMASTER
+ *iv_return = DB_REP_DUPMASTER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REP_NEWMASTER", 16)) {
+ /* ^ */
+#ifdef DB_REP_NEWMASTER
+ *iv_return = DB_REP_NEWMASTER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_POSTSYNC", 16)) {
+ /* ^ */
+#ifdef DB_TEST_POSTSYNC
+ *iv_return = DB_TEST_POSTSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERB_WAITSFOR", 16)) {
+ /* ^ */
+#ifdef DB_VERB_WAITSFOR
+ *iv_return = DB_VERB_WAITSFOR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_ENV_DIRECT_DB", 16)) {
+ /* ^ */
+#ifdef DB_ENV_DIRECT_DB
+ *iv_return = DB_ENV_DIRECT_DB;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_CACHED_COUNTS", 16)) {
+ /* ^ */
+#ifdef DB_CACHED_COUNTS
+ *iv_return = DB_CACHED_COUNTS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'V':
+ if (memEQ(name, "DB_MPOOL_PRIVATE", 16)) {
+ /* ^ */
+#ifdef DB_MPOOL_PRIVATE
+ *iv_return = DB_MPOOL_PRIVATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERB_RECOVERY", 16)) {
+ /* ^ */
+#ifdef DB_VERB_RECOVERY
+ *iv_return = DB_VERB_RECOVERY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'X':
+ if (memEQ(name, "DB_LOCK_NOTEXIST", 16)) {
+ /* ^ */
+#ifdef DB_LOCK_NOTEXIST
+ *iv_return = DB_LOCK_NOTEXIST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_BROADCAST_EID", 16)) {
+ /* ^ */
+#ifdef DB_BROADCAST_EID
+ *iv_return = DB_BROADCAST_EID;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_SECONDARY_BAD", 16)) {
+ /* ^ */
+#ifdef DB_SECONDARY_BAD
+ *iv_return = DB_SECONDARY_BAD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_17 (pTHX_ const char *name, IV *iv_return, const char **pv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_ENV_DIRECT_LOG DB_ENV_REP_CLIENT DB_ENV_REP_MASTER DB_ENV_STANDALONE
+ DB_ENV_SYSTEM_MEM DB_ENV_TXN_NOSYNC DB_ENV_USER_ALLOC DB_GET_BOTH_RANGE
+ DB_LOG_SILENT_ERR DB_RPC_SERVERPROG DB_RPC_SERVERVERS DB_TEST_ELECTINIT
+ DB_TEST_ELECTSEND DB_TEST_PRERENAME DB_TXN_POPENFILES DB_VERSION_STRING */
+ /* Offset 14 gives the best switch position. */
+ switch (name[14]) {
+ case 'A':
+ if (memEQ(name, "DB_TEST_PRERENAME", 17)) {
+ /* ^ */
+#ifdef DB_TEST_PRERENAME
+ *iv_return = DB_TEST_PRERENAME;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_ENV_REP_CLIENT", 17)) {
+ /* ^ */
+#ifdef DB_ENV_REP_CLIENT
+ *iv_return = DB_ENV_REP_CLIENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOG_SILENT_ERR", 17)) {
+ /* ^ */
+#ifdef DB_LOG_SILENT_ERR
+ *iv_return = DB_LOG_SILENT_ERR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_RPC_SERVERVERS", 17)) {
+ /* ^ */
+#ifdef DB_RPC_SERVERVERS
+ *iv_return = DB_RPC_SERVERVERS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_ELECTSEND", 17)) {
+ /* ^ */
+#ifdef DB_TEST_ELECTSEND
+ *iv_return = DB_TEST_ELECTSEND;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_VERSION_STRING", 17)) {
+ /* ^ */
+#ifdef DB_VERSION_STRING
+ *pv_return = DB_VERSION_STRING;
+ return PERL_constant_ISPV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_ENV_DIRECT_LOG", 17)) {
+ /* ^ */
+#ifdef DB_ENV_DIRECT_LOG
+ *iv_return = DB_ENV_DIRECT_LOG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ENV_USER_ALLOC", 17)) {
+ /* ^ */
+#ifdef DB_ENV_USER_ALLOC
+ *iv_return = DB_ENV_USER_ALLOC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_POPENFILES", 17)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \
+ DB_VERSION_PATCH >= 4)
+ *iv_return = DB_TXN_POPENFILES;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_ENV_SYSTEM_MEM", 17)) {
+ /* ^ */
+#ifdef DB_ENV_SYSTEM_MEM
+ *iv_return = DB_ENV_SYSTEM_MEM;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_GET_BOTH_RANGE", 17)) {
+ /* ^ */
+#ifdef DB_GET_BOTH_RANGE
+ *iv_return = DB_GET_BOTH_RANGE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_ELECTINIT", 17)) {
+ /* ^ */
+#ifdef DB_TEST_ELECTINIT
+ *iv_return = DB_TEST_ELECTINIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_ENV_STANDALONE", 17)) {
+ /* ^ */
+#ifdef DB_ENV_STANDALONE
+ *iv_return = DB_ENV_STANDALONE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_RPC_SERVERPROG", 17)) {
+ /* ^ */
+#ifdef DB_RPC_SERVERPROG
+ *iv_return = DB_RPC_SERVERPROG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_ENV_REP_MASTER", 17)) {
+ /* ^ */
+#ifdef DB_ENV_REP_MASTER
+ *iv_return = DB_ENV_REP_MASTER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'Y':
+ if (memEQ(name, "DB_ENV_TXN_NOSYNC", 17)) {
+ /* ^ */
+#ifdef DB_ENV_TXN_NOSYNC
+ *iv_return = DB_ENV_TXN_NOSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_18 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_ALREADY_ABORTED DB_ENV_AUTO_COMMIT DB_ENV_OPEN_CALLED
+ DB_ENV_REGION_INIT DB_LOCK_NOTGRANTED DB_MPOOL_NEW_GROUP
+ DB_PR_RECOVERYTEST DB_SET_TXN_TIMEOUT DB_TEST_ELECTVOTE1
+ DB_TEST_ELECTVOTE2 DB_TEST_ELECTWAIT1 DB_TEST_ELECTWAIT2
+ DB_TEST_POSTRENAME DB_TEST_PREDESTROY DB_TEST_PREEXTOPEN */
+ /* Offset 13 gives the best switch position. */
+ switch (name[13]) {
+ case 'A':
+ if (memEQ(name, "DB_ENV_OPEN_CALLED", 18)) {
+ /* ^ */
+#ifdef DB_ENV_OPEN_CALLED
+ *iv_return = DB_ENV_OPEN_CALLED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_NOTGRANTED", 18)) {
+ /* ^ */
+#ifdef DB_LOCK_NOTGRANTED
+ *iv_return = DB_LOCK_NOTGRANTED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_TEST_POSTRENAME", 18)) {
+ /* ^ */
+#ifdef DB_TEST_POSTRENAME
+ *iv_return = DB_TEST_POSTRENAME;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_MPOOL_NEW_GROUP", 18)) {
+ /* ^ */
+#ifdef DB_MPOOL_NEW_GROUP
+ *iv_return = DB_MPOOL_NEW_GROUP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_SET_TXN_TIMEOUT", 18)) {
+ /* ^ */
+#ifdef DB_SET_TXN_TIMEOUT
+ *iv_return = DB_SET_TXN_TIMEOUT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_ALREADY_ABORTED", 18)) {
+ /* ^ */
+#ifdef DB_ALREADY_ABORTED
+ *iv_return = DB_ALREADY_ABORTED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ENV_AUTO_COMMIT", 18)) {
+ /* ^ */
+#ifdef DB_ENV_AUTO_COMMIT
+ *iv_return = DB_ENV_AUTO_COMMIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_TEST_PREDESTROY", 18)) {
+ /* ^ */
+#ifdef DB_TEST_PREDESTROY
+ *iv_return = DB_TEST_PREDESTROY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_TEST_PREEXTOPEN", 18)) {
+ /* ^ */
+#ifdef DB_TEST_PREEXTOPEN
+ *iv_return = DB_TEST_PREEXTOPEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'V':
+ if (memEQ(name, "DB_TEST_ELECTVOTE1", 18)) {
+ /* ^ */
+#ifdef DB_TEST_ELECTVOTE1
+ *iv_return = DB_TEST_ELECTVOTE1;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_ELECTVOTE2", 18)) {
+ /* ^ */
+#ifdef DB_TEST_ELECTVOTE2
+ *iv_return = DB_TEST_ELECTVOTE2;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'W':
+ if (memEQ(name, "DB_TEST_ELECTWAIT1", 18)) {
+ /* ^ */
+#ifdef DB_TEST_ELECTWAIT1
+ *iv_return = DB_TEST_ELECTWAIT1;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_ELECTWAIT2", 18)) {
+ /* ^ */
+#ifdef DB_TEST_ELECTWAIT2
+ *iv_return = DB_TEST_ELECTWAIT2;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'Y':
+ if (memEQ(name, "DB_PR_RECOVERYTEST", 18)) {
+ /* ^ */
+#ifdef DB_PR_RECOVERYTEST
+ *iv_return = DB_PR_RECOVERYTEST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_ENV_REGION_INIT", 18)) {
+ /* ^ */
+#ifdef DB_ENV_REGION_INIT
+ *iv_return = DB_ENV_REGION_INIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_19 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_ENV_REP_LOGSONLY DB_LOCK_FREE_LOCKER DB_LOCK_GET_TIMEOUT
+ DB_LOCK_SET_TIMEOUT DB_PRIORITY_DEFAULT DB_REP_HOLDELECTION
+ DB_SET_LOCK_TIMEOUT DB_TEST_POSTDESTROY DB_TEST_POSTEXTOPEN
+ DB_TEST_POSTLOGMETA DB_TEST_SUBDB_LOCKS DB_TXN_FORWARD_ROLL
+ DB_TXN_LOG_UNDOREDO DB_TXN_WRITE_NOSYNC DB_UNRESOLVED_CHILD
+ DB_UPDATE_SECONDARY DB_USE_ENVIRON_ROOT DB_VERB_REPLICATION */
+ /* Offset 9 gives the best switch position. */
+ switch (name[9]) {
+ case 'C':
+ if (memEQ(name, "DB_SET_LOCK_TIMEOUT", 19)) {
+ /* ^ */
+#ifdef DB_SET_LOCK_TIMEOUT
+ *iv_return = DB_SET_LOCK_TIMEOUT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_LOCK_GET_TIMEOUT", 19)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 7)
+ *iv_return = DB_LOCK_GET_TIMEOUT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_SET_TIMEOUT", 19)) {
+ /* ^ */
+#ifdef DB_LOCK_SET_TIMEOUT
+ *iv_return = DB_LOCK_SET_TIMEOUT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERB_REPLICATION", 19)) {
+ /* ^ */
+#ifdef DB_VERB_REPLICATION
+ *iv_return = DB_VERB_REPLICATION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_TXN_LOG_UNDOREDO", 19)) {
+ /* ^ */
+#ifdef DB_TXN_LOG_UNDOREDO
+ *iv_return = DB_TXN_LOG_UNDOREDO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_TXN_WRITE_NOSYNC", 19)) {
+ /* ^ */
+#ifdef DB_TXN_WRITE_NOSYNC
+ *iv_return = DB_TXN_WRITE_NOSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_REP_HOLDELECTION", 19)) {
+ /* ^ */
+#ifdef DB_REP_HOLDELECTION
+ *iv_return = DB_REP_HOLDELECTION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_UNRESOLVED_CHILD", 19)) {
+ /* ^ */
+#ifdef DB_UNRESOLVED_CHILD
+ *iv_return = DB_UNRESOLVED_CHILD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_TEST_POSTDESTROY", 19)) {
+ /* ^ */
+#ifdef DB_TEST_POSTDESTROY
+ *iv_return = DB_TEST_POSTDESTROY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_POSTEXTOPEN", 19)) {
+ /* ^ */
+#ifdef DB_TEST_POSTEXTOPEN
+ *iv_return = DB_TEST_POSTEXTOPEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_POSTLOGMETA", 19)) {
+ /* ^ */
+#ifdef DB_TEST_POSTLOGMETA
+ *iv_return = DB_TEST_POSTLOGMETA;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_ENV_REP_LOGSONLY", 19)) {
+ /* ^ */
+#ifdef DB_ENV_REP_LOGSONLY
+ *iv_return = DB_ENV_REP_LOGSONLY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_LOCK_FREE_LOCKER", 19)) {
+ /* ^ */
+#ifdef DB_LOCK_FREE_LOCKER
+ *iv_return = DB_LOCK_FREE_LOCKER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_FORWARD_ROLL", 19)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 12)
+ *iv_return = DB_TXN_FORWARD_ROLL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_PRIORITY_DEFAULT", 19)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_PRIORITY_DEFAULT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_TEST_SUBDB_LOCKS", 19)) {
+ /* ^ */
+#ifdef DB_TEST_SUBDB_LOCKS
+ *iv_return = DB_TEST_SUBDB_LOCKS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'V':
+ if (memEQ(name, "DB_USE_ENVIRON_ROOT", 19)) {
+ /* ^ */
+#ifdef DB_USE_ENVIRON_ROOT
+ *iv_return = DB_USE_ENVIRON_ROOT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_UPDATE_SECONDARY", 19)) {
+ /* ^ */
+#ifdef DB_UPDATE_SECONDARY
+ *iv_return = DB_UPDATE_SECONDARY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_20 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_CXX_NO_EXCEPTIONS DB_LOGFILEID_INVALID DB_PANIC_ENVIRONMENT
+ DB_PRIORITY_VERY_LOW DB_TEST_PREEXTDELETE DB_TEST_PREEXTUNLINK
+ DB_TXN_BACKWARD_ROLL DB_TXN_LOCK_OPTIMIST */
+ /* Offset 14 gives the best switch position. */
+ switch (name[14]) {
+ case 'D':
+ if (memEQ(name, "DB_TEST_PREEXTDELETE", 20)) {
+ /* ^ */
+#ifdef DB_TEST_PREEXTDELETE
+ *iv_return = DB_TEST_PREEXTDELETE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_BACKWARD_ROLL", 20)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 12)
+ *iv_return = DB_TXN_BACKWARD_ROLL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_LOGFILEID_INVALID", 20)) {
+ /* ^ */
+#ifdef DB_LOGFILEID_INVALID
+ *iv_return = DB_LOGFILEID_INVALID;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_PANIC_ENVIRONMENT", 20)) {
+ /* ^ */
+#ifdef DB_PANIC_ENVIRONMENT
+ *iv_return = DB_PANIC_ENVIRONMENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_CXX_NO_EXCEPTIONS", 20)) {
+ /* ^ */
+#ifdef DB_CXX_NO_EXCEPTIONS
+ *iv_return = DB_CXX_NO_EXCEPTIONS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_PRIORITY_VERY_LOW", 20)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_PRIORITY_VERY_LOW;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_TXN_LOCK_OPTIMIST", 20)) {
+ /* ^ */
+#ifdef DB_TXN_LOCK_OPTIMIST
+ *iv_return = DB_TXN_LOCK_OPTIMIST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_TEST_PREEXTUNLINK", 20)) {
+ /* ^ */
+#ifdef DB_TEST_PREEXTUNLINK
+ *iv_return = DB_TEST_PREEXTUNLINK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_21 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_LOCK_UPGRADE_WRITE DB_PRIORITY_VERY_HIGH DB_TEST_POSTEXTDELETE
+ DB_TEST_POSTEXTUNLINK DB_TXN_BACKWARD_ALLOC */
+ /* Offset 16 gives the best switch position. */
+ switch (name[16]) {
+ case 'A':
+ if (memEQ(name, "DB_TXN_BACKWARD_ALLOC", 21)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_TXN_BACKWARD_ALLOC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_TEST_POSTEXTDELETE", 21)) {
+ /* ^ */
+#ifdef DB_TEST_POSTEXTDELETE
+ *iv_return = DB_TEST_POSTEXTDELETE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_TEST_POSTEXTUNLINK", 21)) {
+ /* ^ */
+#ifdef DB_TEST_POSTEXTUNLINK
+ *iv_return = DB_TEST_POSTEXTUNLINK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'W':
+ if (memEQ(name, "DB_LOCK_UPGRADE_WRITE", 21)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \
+ DB_VERSION_PATCH >= 4)
+ *iv_return = DB_LOCK_UPGRADE_WRITE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_PRIORITY_VERY_HIGH", 21)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_PRIORITY_VERY_HIGH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant (pTHX_ const char *name, STRLEN len, IV *iv_return, const char **pv_return) {
+ /* Initially switch on the length of the name. */
+ /* When generated this function returned values for the list of names given
+ in this section of perl code. Rather than manually editing these functions
+ to add or remove constants, which would result in this comment and section
+ of code becoming inaccurate, we recommend that you edit this section of
+ code, and use it to regenerate a new set of constant functions which you
+ then use to replace the originals.
+
+ Regenerate these constant functions by feeding this entire source file to
+ perl -x
+
+#!/home/paul/perl/install/redhat6.1/5.8.0/bin/perl5.8.0 -w
+use ExtUtils::Constant qw (constant_types C_constant XS_constant);
+
+my $types = {map {($_, 1)} qw(IV PV)};
+my @names = (qw(DB_AFTER DB_AGGRESSIVE DB_ALREADY_ABORTED DB_APPEND
+ DB_APPLY_LOGREG DB_APP_INIT DB_ARCH_ABS DB_ARCH_DATA DB_ARCH_LOG
+ DB_AUTO_COMMIT DB_BEFORE DB_BROADCAST_EID DB_BTREEMAGIC
+ DB_BTREEOLDVER DB_BTREEVERSION DB_CACHED_COUNTS DB_CDB_ALLDB
+ DB_CHECKPOINT DB_CHKSUM_SHA1 DB_CLIENT DB_CL_WRITER DB_COMMIT
+ DB_CONSUME DB_CONSUME_WAIT DB_CREATE DB_CURLSN DB_CURRENT
+ DB_CXX_NO_EXCEPTIONS DB_DELETED DB_DELIMITER DB_DIRECT
+ DB_DIRECT_DB DB_DIRECT_LOG DB_DIRTY_READ DB_DONOTINDEX DB_DUP
+ DB_DUPCURSOR DB_DUPSORT DB_EID_BROADCAST DB_EID_INVALID
+ DB_ENCRYPT DB_ENCRYPT_AES DB_ENV_APPINIT DB_ENV_AUTO_COMMIT
+ DB_ENV_CDB DB_ENV_CDB_ALLDB DB_ENV_CREATE DB_ENV_DBLOCAL
+ DB_ENV_DIRECT_DB DB_ENV_DIRECT_LOG DB_ENV_FATAL DB_ENV_LOCKDOWN
+ DB_ENV_LOCKING DB_ENV_LOGGING DB_ENV_NOLOCKING DB_ENV_NOMMAP
+ DB_ENV_NOPANIC DB_ENV_OPEN_CALLED DB_ENV_OVERWRITE
+ DB_ENV_PANIC_OK DB_ENV_PRIVATE DB_ENV_REGION_INIT
+ DB_ENV_REP_CLIENT DB_ENV_REP_LOGSONLY DB_ENV_REP_MASTER
+ DB_ENV_RPCCLIENT DB_ENV_RPCCLIENT_GIVEN DB_ENV_STANDALONE
+ DB_ENV_SYSTEM_MEM DB_ENV_THREAD DB_ENV_TXN DB_ENV_TXN_NOSYNC
+ DB_ENV_TXN_WRITE_NOSYNC DB_ENV_USER_ALLOC DB_ENV_YIELDCPU
+ DB_EXCL DB_EXTENT DB_FAST_STAT DB_FCNTL_LOCKING DB_FILE_ID_LEN
+ DB_FIRST DB_FIXEDLEN DB_FLUSH DB_FORCE DB_GETREC DB_GET_BOTH
+ DB_GET_BOTHC DB_GET_BOTH_RANGE DB_GET_RECNO DB_HANDLE_LOCK
+ DB_HASHMAGIC DB_HASHOLDVER DB_HASHVERSION DB_INCOMPLETE
+ DB_INIT_CDB DB_INIT_LOCK DB_INIT_LOG DB_INIT_MPOOL DB_INIT_TXN
+ DB_INVALID_EID DB_JAVA_CALLBACK DB_JOINENV DB_JOIN_ITEM
+ DB_JOIN_NOSORT DB_KEYEMPTY DB_KEYEXIST DB_KEYFIRST DB_KEYLAST
+ DB_LAST DB_LOCKDOWN DB_LOCKMAGIC DB_LOCKVERSION DB_LOCK_CONFLICT
+ DB_LOCK_DEADLOCK DB_LOCK_DEFAULT DB_LOCK_EXPIRE
+ DB_LOCK_FREE_LOCKER DB_LOCK_MAXLOCKS DB_LOCK_MINLOCKS
+ DB_LOCK_MINWRITE DB_LOCK_NORUN DB_LOCK_NOTEXIST
+ DB_LOCK_NOTGRANTED DB_LOCK_NOTHELD DB_LOCK_NOWAIT DB_LOCK_OLDEST
+ DB_LOCK_RANDOM DB_LOCK_RECORD DB_LOCK_REMOVE DB_LOCK_RIW_N
+ DB_LOCK_RW_N DB_LOCK_SET_TIMEOUT DB_LOCK_SWITCH DB_LOCK_UPGRADE
+ DB_LOCK_YOUNGEST DB_LOGC_BUF_SIZE DB_LOGFILEID_INVALID
+ DB_LOGMAGIC DB_LOGOLDVER DB_LOGVERSION DB_LOG_DISK DB_LOG_LOCKED
+ DB_LOG_SILENT_ERR DB_MAX_PAGES DB_MAX_RECORDS DB_MPOOL_CLEAN
+ DB_MPOOL_CREATE DB_MPOOL_DIRTY DB_MPOOL_DISCARD DB_MPOOL_EXTENT
+ DB_MPOOL_LAST DB_MPOOL_NEW DB_MPOOL_NEW_GROUP DB_MPOOL_PRIVATE
+ DB_MULTIPLE DB_MULTIPLE_KEY DB_MUTEXDEBUG DB_MUTEXLOCKS
+ DB_NEEDSPLIT DB_NEXT DB_NEXT_DUP DB_NEXT_NODUP DB_NOCOPY
+ DB_NODUPDATA DB_NOLOCKING DB_NOMMAP DB_NOORDERCHK DB_NOOVERWRITE
+ DB_NOPANIC DB_NORECURSE DB_NOSERVER DB_NOSERVER_HOME
+ DB_NOSERVER_ID DB_NOSYNC DB_NOTFOUND DB_ODDFILESIZE DB_OK_BTREE
+ DB_OK_HASH DB_OK_QUEUE DB_OK_RECNO DB_OLD_VERSION DB_OPEN_CALLED
+ DB_OPFLAGS_MASK DB_ORDERCHKONLY DB_OVERWRITE DB_PAD DB_PAGEYIELD
+ DB_PAGE_LOCK DB_PAGE_NOTFOUND DB_PANIC_ENVIRONMENT DB_PERMANENT
+ DB_POSITION DB_POSITIONI DB_PREV DB_PREV_NODUP DB_PRINTABLE
+ DB_PRIVATE DB_PR_HEADERS DB_PR_PAGE DB_PR_RECOVERYTEST
+ DB_QAMMAGIC DB_QAMOLDVER DB_QAMVERSION DB_RDONLY DB_RDWRMASTER
+ DB_RECNUM DB_RECORDCOUNT DB_RECORD_LOCK DB_RECOVER
+ DB_RECOVER_FATAL DB_REGION_ANON DB_REGION_INIT DB_REGION_MAGIC
+ DB_REGION_NAME DB_REGISTERED DB_RENAMEMAGIC DB_RENUMBER
+ DB_REP_CLIENT DB_REP_DUPMASTER DB_REP_HOLDELECTION
+ DB_REP_LOGSONLY DB_REP_MASTER DB_REP_NEWMASTER DB_REP_NEWSITE
+ DB_REP_OUTDATED DB_REP_PERMANENT DB_REP_UNAVAIL DB_REVSPLITOFF
+ DB_RMW DB_RPC_SERVERPROG DB_RPC_SERVERVERS DB_RUNRECOVERY
+ DB_SALVAGE DB_SECONDARY_BAD DB_SEQUENTIAL DB_SET
+ DB_SET_LOCK_TIMEOUT DB_SET_RANGE DB_SET_RECNO DB_SET_TXN_NOW
+ DB_SET_TXN_TIMEOUT DB_SNAPSHOT DB_STAT_CLEAR DB_SURPRISE_KID
+ DB_SWAPBYTES DB_SYSTEM_MEM DB_TEMPORARY DB_TEST_ELECTINIT
+ DB_TEST_ELECTSEND DB_TEST_ELECTVOTE1 DB_TEST_ELECTVOTE2
+ DB_TEST_ELECTWAIT1 DB_TEST_ELECTWAIT2 DB_TEST_POSTDESTROY
+ DB_TEST_POSTEXTDELETE DB_TEST_POSTEXTOPEN DB_TEST_POSTEXTUNLINK
+ DB_TEST_POSTLOG DB_TEST_POSTLOGMETA DB_TEST_POSTOPEN
+ DB_TEST_POSTRENAME DB_TEST_POSTSYNC DB_TEST_PREDESTROY
+ DB_TEST_PREEXTDELETE DB_TEST_PREEXTOPEN DB_TEST_PREEXTUNLINK
+ DB_TEST_PREOPEN DB_TEST_PRERENAME DB_TEST_SUBDB_LOCKS DB_THREAD
+ DB_TIMEOUT DB_TRUNCATE DB_TXNMAGIC DB_TXNVERSION DB_TXN_CKP
+ DB_TXN_LOCK DB_TXN_LOCK_2PL DB_TXN_LOCK_MASK
+ DB_TXN_LOCK_OPTIMIST DB_TXN_LOCK_OPTIMISTIC DB_TXN_LOG_MASK
+ DB_TXN_LOG_REDO DB_TXN_LOG_UNDO DB_TXN_LOG_UNDOREDO
+ DB_TXN_NOSYNC DB_TXN_NOWAIT DB_TXN_REDO DB_TXN_SYNC DB_TXN_UNDO
+ DB_TXN_WRITE_NOSYNC DB_UNRESOLVED_CHILD DB_UPDATE_SECONDARY
+ DB_UPGRADE DB_USE_ENVIRON DB_USE_ENVIRON_ROOT DB_VERB_CHKPOINT
+ DB_VERB_DEADLOCK DB_VERB_RECOVERY DB_VERB_REPLICATION
+ DB_VERB_WAITSFOR DB_VERIFY DB_VERIFY_BAD DB_VERIFY_FATAL
+ DB_VERSION_MAJOR DB_VERSION_MINOR DB_VERSION_PATCH
+ DB_VRFY_FLAGMASK DB_WRITECURSOR DB_WRITELOCK DB_WRITEOPEN
+ DB_WRNOSYNC DB_XA_CREATE DB_XIDDATASIZE DB_YIELDCPU),
+ {name=>"DB_BTREE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_HASH", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_LOCK_DUMP", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_LOCK_GET", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_LOCK_GET_TIMEOUT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 7)\n", "#endif\n"]},
+ {name=>"DB_LOCK_INHERIT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 7) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 7 && \\\n DB_VERSION_PATCH >= 1)\n", "#endif\n"]},
+ {name=>"DB_LOCK_PUT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_LOCK_PUT_ALL", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_LOCK_PUT_OBJ", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_LOCK_PUT_READ", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 7)\n", "#endif\n"]},
+ {name=>"DB_LOCK_TIMEOUT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 7)\n", "#endif\n"]},
+ {name=>"DB_LOCK_TRADE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_LOCK_UPGRADE_WRITE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \\\n DB_VERSION_PATCH >= 4)\n", "#endif\n"]},
+ {name=>"DB_PRIORITY_DEFAULT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_PRIORITY_HIGH", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_PRIORITY_LOW", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_PRIORITY_VERY_HIGH", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_PRIORITY_VERY_LOW", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_QUEUE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 55)\n", "#endif\n"]},
+ {name=>"DB_RECNO", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_TXN_ABORT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 12)\n", "#endif\n"]},
+ {name=>"DB_TXN_APPLY", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 7)\n", "#endif\n"]},
+ {name=>"DB_TXN_BACKWARD_ALLOC", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_TXN_BACKWARD_ROLL", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 12)\n", "#endif\n"]},
+ {name=>"DB_TXN_FORWARD_ROLL", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 12)\n", "#endif\n"]},
+ {name=>"DB_TXN_GETPGNOS", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_TXN_OPENFILES", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 12)\n", "#endif\n"]},
+ {name=>"DB_TXN_POPENFILES", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \\\n DB_VERSION_PATCH >= 4)\n", "#endif\n"]},
+ {name=>"DB_TXN_PRINT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_UNKNOWN", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_VERSION_STRING", type=>"PV"});
+
+print constant_types(); # macro defs
+foreach (C_constant ("BerkeleyDB", 'constant', 'IV', $types, undef, 3, @names) ) {
+ print $_, "\n"; # C constant subs
+}
+print "#### XS Section:\n";
+print XS_constant ("BerkeleyDB", $types);
+__END__
+ */
+
+ switch (len) {
+ case 6:
+ return constant_6 (aTHX_ name, iv_return);
+ break;
+ case 7:
+ return constant_7 (aTHX_ name, iv_return);
+ break;
+ case 8:
+ return constant_8 (aTHX_ name, iv_return);
+ break;
+ case 9:
+ return constant_9 (aTHX_ name, iv_return);
+ break;
+ case 10:
+ return constant_10 (aTHX_ name, iv_return);
+ break;
+ case 11:
+ return constant_11 (aTHX_ name, iv_return);
+ break;
+ case 12:
+ return constant_12 (aTHX_ name, iv_return);
+ break;
+ case 13:
+ return constant_13 (aTHX_ name, iv_return);
+ break;
+ case 14:
+ return constant_14 (aTHX_ name, iv_return);
+ break;
+ case 15:
+ return constant_15 (aTHX_ name, iv_return);
+ break;
+ case 16:
+ return constant_16 (aTHX_ name, iv_return);
+ break;
+ case 17:
+ return constant_17 (aTHX_ name, iv_return, pv_return);
+ break;
+ case 18:
+ return constant_18 (aTHX_ name, iv_return);
+ break;
+ case 19:
+ return constant_19 (aTHX_ name, iv_return);
+ break;
+ case 20:
+ return constant_20 (aTHX_ name, iv_return);
+ break;
+ case 21:
+ return constant_21 (aTHX_ name, iv_return);
+ break;
+ case 22:
+ /* Names all of length 22. */
+ /* DB_ENV_RPCCLIENT_GIVEN DB_TXN_LOCK_OPTIMISTIC */
+ /* Offset 8 gives the best switch position. */
+ switch (name[8]) {
+ case 'O':
+ if (memEQ(name, "DB_TXN_LOCK_OPTIMISTIC", 22)) {
+ /* ^ */
+#ifdef DB_TXN_LOCK_OPTIMISTIC
+ *iv_return = DB_TXN_LOCK_OPTIMISTIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_ENV_RPCCLIENT_GIVEN", 22)) {
+ /* ^ */
+#ifdef DB_ENV_RPCCLIENT_GIVEN
+ *iv_return = DB_ENV_RPCCLIENT_GIVEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ break;
+ case 23:
+ if (memEQ(name, "DB_ENV_TXN_WRITE_NOSYNC", 23)) {
+#ifdef DB_ENV_TXN_WRITE_NOSYNC
+ *iv_return = DB_ENV_TXN_WRITE_NOSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
diff --git a/libdb/perl/BerkeleyDB/constants.xs b/libdb/perl/BerkeleyDB/constants.xs
new file mode 100644
index 0000000..1b2c8b2
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/constants.xs
@@ -0,0 +1,87 @@
+void
+constant(sv)
+ PREINIT:
+#ifdef dXSTARG
+ dXSTARG; /* Faster if we have it. */
+#else
+ dTARGET;
+#endif
+ STRLEN len;
+ int type;
+ IV iv;
+ /* NV nv; Uncomment this if you need to return NVs */
+ const char *pv;
+ INPUT:
+ SV * sv;
+ const char * s = SvPV(sv, len);
+ PPCODE:
+ /* Change this to constant(aTHX_ s, len, &iv, &nv);
+ if you need to return both NVs and IVs */
+ type = constant(aTHX_ s, len, &iv, &pv);
+ /* Return 1 or 2 items. First is error message, or undef if no error.
+ Second, if present, is found value */
+ switch (type) {
+ case PERL_constant_NOTFOUND:
+ sv = sv_2mortal(newSVpvf("%s is not a valid BerkeleyDB macro", s));
+ PUSHs(sv);
+ break;
+ case PERL_constant_NOTDEF:
+ sv = sv_2mortal(newSVpvf(
+ "Your vendor has not defined BerkeleyDB macro %s, used", s));
+ PUSHs(sv);
+ break;
+ case PERL_constant_ISIV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHi(iv);
+ break;
+ /* Uncomment this if you need to return NOs
+ case PERL_constant_ISNO:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHs(&PL_sv_no);
+ break; */
+ /* Uncomment this if you need to return NVs
+ case PERL_constant_ISNV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHn(nv);
+ break; */
+ case PERL_constant_ISPV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHp(pv, strlen(pv));
+ break;
+ /* Uncomment this if you need to return PVNs
+ case PERL_constant_ISPVN:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHp(pv, iv);
+ break; */
+ /* Uncomment this if you need to return SVs
+ case PERL_constant_ISSV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHs(sv);
+ break; */
+ /* Uncomment this if you need to return UNDEFs
+ case PERL_constant_ISUNDEF:
+ break; */
+ /* Uncomment this if you need to return UVs
+ case PERL_constant_ISUV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHu((UV)iv);
+ break; */
+ /* Uncomment this if you need to return YESs
+ case PERL_constant_ISYES:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHs(&PL_sv_yes);
+ break; */
+ default:
+ sv = sv_2mortal(newSVpvf(
+ "Unexpected return type %d while processing BerkeleyDB macro %s, used",
+ type, s));
+ PUSHs(sv);
+ }
diff --git a/libdb/perl/BerkeleyDB/dbinfo b/libdb/perl/BerkeleyDB/dbinfo
new file mode 100755
index 0000000..af2c45f
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/dbinfo
@@ -0,0 +1,112 @@
+#!/usr/local/bin/perl
+
+# Name: dbinfo -- identify berkeley DB version used to create
+# a database file
+#
+# Author: Paul Marquess <Paul.Marquess@btinternet.com>
+# Version: 1.03
+# Date 17th September 2000
+#
+# Copyright (c) 1998-2002 Paul Marquess. All rights reserved.
+# This program is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself.
+
+# Todo: Print more stats on a db file, e.g. no of records
+# add log/txn/lock files
+
+use strict ;
+
+my %Data =
+ (
+ 0x053162 => {
+ Type => "Btree",
+ Versions =>
+ {
+ 1 => "Unknown (older than 1.71)",
+ 2 => "Unknown (older than 1.71)",
+ 3 => "1.71 -> 1.85, 1.86",
+ 4 => "Unknown",
+ 5 => "2.0.0 -> 2.3.0",
+ 6 => "2.3.1 -> 2.7.7",
+ 7 => "3.0.x",
+ 8 => "3.1.x -> 4.0.x",
+ 9 => "4.1.x or greater",
+ }
+ },
+ 0x061561 => {
+ Type => "Hash",
+ Versions =>
+ {
+ 1 => "Unknown (older than 1.71)",
+ 2 => "1.71 -> 1.85",
+ 3 => "1.86",
+ 4 => "2.0.0 -> 2.1.0",
+ 5 => "2.2.6 -> 2.7.7",
+ 6 => "3.0.x",
+ 7 => "3.1.x -> 4.0.x",
+ 8 => "4.1.x or greater",
+ }
+ },
+ 0x042253 => {
+ Type => "Queue",
+ Versions =>
+ {
+ 1 => "3.0.x",
+ 2 => "3.1.x",
+ 3 => "3.2.x -> 4.0.x",
+ 4 => "4.1.x or greater",
+ }
+ },
+ ) ;
+
+die "Usage: dbinfo file\n" unless @ARGV == 1 ;
+
+print "testing file $ARGV[0]...\n\n" ;
+open (F, "<$ARGV[0]") or die "Cannot open file $ARGV[0]: $!\n" ;
+
+my $buff ;
+read F, $buff, 20 ;
+
+my (@info) = unpack("NNNNN", $buff) ;
+my (@info1) = unpack("VVVVV", $buff) ;
+my ($magic, $version, $endian) ;
+
+if ($Data{$info[0]}) # first try DB 1.x format
+{
+ $magic = $info[0] ;
+ $version = $info[1] ;
+ $endian = "Unknown" ;
+}
+elsif ($Data{$info[3]}) # next DB 2.x big endian
+{
+ $magic = $info[3] ;
+ $version = $info[4] ;
+ $endian = "Big Endian" ;
+}
+elsif ($Data{$info1[3]}) # next DB 2.x little endian
+{
+ $magic = $info1[3] ;
+ $version = $info1[4] ;
+ $endian = "Little Endian" ;
+}
+else
+ { die "not a Berkeley DB database file.\n" }
+
+my $type = $Data{$magic} ;
+$magic = sprintf "%06X", $magic ;
+
+my $ver_string = "Unknown" ;
+$ver_string = $type->{Versions}{$version}
+ if defined $type->{Versions}{$version} ;
+
+print <<EOM ;
+File Type: Berkeley DB $type->{Type} file.
+File Version ID: $version
+Built with Berkeley DB: $ver_string
+Byte Order: $endian
+Magic: $magic
+EOM
+
+close F ;
+
+exit ;
diff --git a/libdb/perl/BerkeleyDB/hints/dec_osf.pl b/libdb/perl/BerkeleyDB/hints/dec_osf.pl
new file mode 100644
index 0000000..6d7faee
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/hints/dec_osf.pl
@@ -0,0 +1 @@
+$self->{LIBS} = [ "@{$self->{LIBS}} -lpthreads" ];
diff --git a/libdb/perl/BerkeleyDB/hints/irix_6_5.pl b/libdb/perl/BerkeleyDB/hints/irix_6_5.pl
new file mode 100644
index 0000000..b531673
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/hints/irix_6_5.pl
@@ -0,0 +1 @@
+$self->{LIBS} = [ "@{$self->{LIBS}} -lthread" ];
diff --git a/libdb/perl/BerkeleyDB/hints/solaris.pl b/libdb/perl/BerkeleyDB/hints/solaris.pl
new file mode 100644
index 0000000..ddd941d
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/hints/solaris.pl
@@ -0,0 +1 @@
+$self->{LIBS} = [ "@{$self->{LIBS}} -lmt" ];
diff --git a/libdb/perl/BerkeleyDB/mkconsts b/libdb/perl/BerkeleyDB/mkconsts
new file mode 100644
index 0000000..7e09643
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/mkconsts
@@ -0,0 +1,770 @@
+#!/usr/bin/perl
+
+use ExtUtils::Constant qw(WriteConstants);
+
+use constant DEFINE => 'define' ;
+use constant STRING => 'string' ;
+use constant IGNORE => 'ignore' ;
+
+%constants = (
+
+ #########
+ # 2.0.0
+ #########
+
+ DBM_INSERT => IGNORE,
+ DBM_REPLACE => IGNORE,
+ DBM_SUFFIX => IGNORE,
+ DB_AFTER => DEFINE,
+ DB_AM_DUP => IGNORE,
+ DB_AM_INMEM => IGNORE,
+ DB_AM_LOCKING => IGNORE,
+ DB_AM_LOGGING => IGNORE,
+ DB_AM_MLOCAL => IGNORE,
+ DB_AM_PGDEF => IGNORE,
+ DB_AM_RDONLY => IGNORE,
+ DB_AM_RECOVER => IGNORE,
+ DB_AM_SWAP => IGNORE,
+ DB_AM_TXN => IGNORE,
+ DB_APP_INIT => DEFINE,
+ DB_BEFORE => DEFINE,
+ DB_BTREEMAGIC => DEFINE,
+ DB_BTREEVERSION => DEFINE,
+ DB_BT_DELIMITER => IGNORE,
+ DB_BT_EOF => IGNORE,
+ DB_BT_FIXEDLEN => IGNORE,
+ DB_BT_PAD => IGNORE,
+ DB_BT_SNAPSHOT => IGNORE,
+ DB_CHECKPOINT => DEFINE,
+ DB_CREATE => DEFINE,
+ DB_CURRENT => DEFINE,
+ DB_DBT_INTERNAL => IGNORE,
+ DB_DBT_MALLOC => IGNORE,
+ DB_DBT_PARTIAL => IGNORE,
+ DB_DBT_USERMEM => IGNORE,
+ DB_DELETED => DEFINE,
+ DB_DELIMITER => DEFINE,
+ DB_DUP => DEFINE,
+ DB_EXCL => DEFINE,
+ DB_FIRST => DEFINE,
+ DB_FIXEDLEN => DEFINE,
+ DB_FLUSH => DEFINE,
+ DB_HASHMAGIC => DEFINE,
+ DB_HASHVERSION => DEFINE,
+ DB_HS_DIRTYMETA => IGNORE,
+ DB_INCOMPLETE => DEFINE,
+ DB_INIT_LOCK => DEFINE,
+ DB_INIT_LOG => DEFINE,
+ DB_INIT_MPOOL => DEFINE,
+ DB_INIT_TXN => DEFINE,
+ DB_KEYEXIST => DEFINE,
+ DB_KEYFIRST => DEFINE,
+ DB_KEYLAST => DEFINE,
+ DB_LAST => DEFINE,
+ DB_LOCKMAGIC => DEFINE,
+ DB_LOCKVERSION => DEFINE,
+ DB_LOCK_DEADLOCK => DEFINE,
+ DB_LOCK_NOTGRANTED => DEFINE,
+ DB_LOCK_NOTHELD => DEFINE,
+ DB_LOCK_NOWAIT => DEFINE,
+ DB_LOCK_RIW_N => DEFINE,
+ DB_LOCK_RW_N => DEFINE,
+ DB_LOGMAGIC => DEFINE,
+ DB_LOGVERSION => DEFINE,
+ DB_MAX_PAGES => DEFINE,
+ DB_MAX_RECORDS => DEFINE,
+ DB_MPOOL_CLEAN => DEFINE,
+ DB_MPOOL_CREATE => DEFINE,
+ DB_MPOOL_DIRTY => DEFINE,
+ DB_MPOOL_DISCARD => DEFINE,
+ DB_MPOOL_LAST => DEFINE,
+ DB_MPOOL_NEW => DEFINE,
+ DB_MPOOL_PRIVATE => DEFINE,
+ DB_MUTEXDEBUG => DEFINE,
+ DB_NEEDSPLIT => DEFINE,
+ DB_NEXT => DEFINE,
+ DB_NOOVERWRITE => DEFINE,
+ DB_NORECURSE => DEFINE,
+ DB_NOSYNC => DEFINE,
+ DB_NOTFOUND => DEFINE,
+ DB_PAD => DEFINE,
+ DB_PREV => DEFINE,
+ DB_RDONLY => DEFINE,
+ DB_REGISTERED => DEFINE,
+ DB_RE_MODIFIED => IGNORE,
+ DB_SET => DEFINE,
+ DB_SET_RANGE => DEFINE,
+ DB_SNAPSHOT => DEFINE,
+ DB_SWAPBYTES => DEFINE,
+ DB_TRUNCATE => DEFINE,
+ DB_TXNMAGIC => DEFINE,
+ DB_TXNVERSION => DEFINE,
+ DB_TXN_BACKWARD_ROLL => DEFINE,
+ DB_TXN_FORWARD_ROLL => DEFINE,
+ DB_TXN_LOCK_2PL => DEFINE,
+ DB_TXN_LOCK_MASK => DEFINE,
+ DB_TXN_LOCK_OPTIMISTIC => DEFINE,
+ DB_TXN_LOG_MASK => DEFINE,
+ DB_TXN_LOG_REDO => DEFINE,
+ DB_TXN_LOG_UNDO => DEFINE,
+ DB_TXN_LOG_UNDOREDO => DEFINE,
+ DB_TXN_OPENFILES => DEFINE,
+ DB_TXN_REDO => DEFINE,
+ DB_TXN_UNDO => DEFINE,
+ DB_USE_ENVIRON => DEFINE,
+ DB_USE_ENVIRON_ROOT => DEFINE,
+ DB_VERSION_MAJOR => DEFINE,
+ DB_VERSION_MINOR => DEFINE,
+ DB_VERSION_PATCH => DEFINE,
+ DB_VERSION_STRING => STRING,
+ _DB_H_ => IGNORE,
+ __BIT_TYPES_DEFINED__ => IGNORE,
+ const => IGNORE,
+
+ # enum DBTYPE
+ DB_BTREE => '2.0.0',
+ DB_HASH => '2.0.0',
+ DB_RECNO => '2.0.0',
+ DB_UNKNOWN => '2.0.0',
+
+ # enum db_lockop_t
+ DB_LOCK_DUMP => '2.0.0',
+ DB_LOCK_GET => '2.0.0',
+ DB_LOCK_PUT => '2.0.0',
+ DB_LOCK_PUT_ALL => '2.0.0',
+ DB_LOCK_PUT_OBJ => '2.0.0',
+
+ # enum db_lockmode_t
+ DB_LOCK_NG => IGNORE, # 2.0.0
+ DB_LOCK_READ => IGNORE, # 2.0.0
+ DB_LOCK_WRITE => IGNORE, # 2.0.0
+ DB_LOCK_IREAD => IGNORE, # 2.0.0
+ DB_LOCK_IWRITE => IGNORE, # 2.0.0
+ DB_LOCK_IWR => IGNORE, # 2.0.0
+
+ # enum ACTION
+ FIND => IGNORE, # 2.0.0
+ ENTER => IGNORE, # 2.0.0
+
+ #########
+ # 2.0.3
+ #########
+
+ DB_SEQUENTIAL => DEFINE,
+ DB_TEMPORARY => DEFINE,
+
+ #########
+ # 2.1.0
+ #########
+
+ DB_NOMMAP => DEFINE,
+
+ #########
+ # 2.2.6
+ #########
+
+ DB_AM_THREAD => IGNORE,
+ DB_ARCH_ABS => DEFINE,
+ DB_ARCH_DATA => DEFINE,
+ DB_ARCH_LOG => DEFINE,
+ DB_LOCK_CONFLICT => DEFINE,
+ DB_LOCK_DEFAULT => DEFINE,
+ DB_LOCK_NORUN => DEFINE,
+ DB_LOCK_OLDEST => DEFINE,
+ DB_LOCK_RANDOM => DEFINE,
+ DB_LOCK_YOUNGEST => DEFINE,
+ DB_RECOVER => DEFINE,
+ DB_RECOVER_FATAL => DEFINE,
+ DB_THREAD => DEFINE,
+ DB_TXN_NOSYNC => DEFINE,
+
+ #########
+ # 2.3.0
+ #########
+
+ DB_BTREEOLDVER => DEFINE,
+ DB_BT_RECNUM => IGNORE,
+ DB_FILE_ID_LEN => DEFINE,
+ DB_GETREC => DEFINE,
+ DB_HASHOLDVER => DEFINE,
+ DB_KEYEMPTY => DEFINE,
+ DB_LOGOLDVER => DEFINE,
+ DB_RECNUM => DEFINE,
+ DB_RECORDCOUNT => DEFINE,
+ DB_RENUMBER => DEFINE,
+ DB_RE_DELIMITER => IGNORE,
+ DB_RE_FIXEDLEN => IGNORE,
+ DB_RE_PAD => IGNORE,
+ DB_RE_RENUMBER => IGNORE,
+ DB_RE_SNAPSHOT => IGNORE,
+
+ #########
+ # 2.3.1
+ #########
+
+ DB_GET_RECNO => DEFINE,
+ DB_SET_RECNO => DEFINE,
+
+ #########
+ # 2.3.3
+ #########
+
+ DB_APPEND => DEFINE,
+
+ #########
+ # 2.3.6
+ #########
+
+ DB_TXN_CKP => DEFINE,
+
+ #########
+ # 2.3.11
+ #########
+
+ DB_ENV_APPINIT => DEFINE,
+ DB_ENV_STANDALONE => DEFINE,
+ DB_ENV_THREAD => DEFINE,
+
+ #########
+ # 2.3.12
+ #########
+
+ DB_FUNC_CALLOC => IGNORE,
+ DB_FUNC_CLOSE => IGNORE,
+ DB_FUNC_DIRFREE => IGNORE,
+ DB_FUNC_DIRLIST => IGNORE,
+ DB_FUNC_EXISTS => IGNORE,
+ DB_FUNC_FREE => IGNORE,
+ DB_FUNC_FSYNC => IGNORE,
+ DB_FUNC_IOINFO => IGNORE,
+ DB_FUNC_MALLOC => IGNORE,
+ DB_FUNC_MAP => IGNORE,
+ DB_FUNC_OPEN => IGNORE,
+ DB_FUNC_READ => IGNORE,
+ DB_FUNC_REALLOC => IGNORE,
+ DB_FUNC_SEEK => IGNORE,
+ DB_FUNC_SLEEP => IGNORE,
+ DB_FUNC_STRDUP => IGNORE,
+ DB_FUNC_UNLINK => IGNORE,
+ DB_FUNC_UNMAP => IGNORE,
+ DB_FUNC_WRITE => IGNORE,
+ DB_FUNC_YIELD => IGNORE,
+
+ #########
+ # 2.3.14
+ #########
+
+ DB_TSL_SPINS => IGNORE,
+
+ #########
+ # 2.3.16
+ #########
+
+ DB_DBM_HSEARCH => IGNORE,
+ firstkey => IGNORE,
+ hdestroy => IGNORE,
+
+ #########
+ # 2.4.10
+ #########
+
+ DB_CURLSN => DEFINE,
+ DB_FUNC_RUNLINK => IGNORE,
+ DB_REGION_ANON => DEFINE,
+ DB_REGION_INIT => DEFINE,
+ DB_REGION_NAME => DEFINE,
+ DB_TXN_LOCK_OPTIMIST => DEFINE,
+ __CURRENTLY_UNUSED => IGNORE,
+
+ # enum db_status_t
+ DB_LSTAT_ABORTED => IGNORE, # 2.4.10
+ DB_LSTAT_ERR => IGNORE, # 2.4.10
+ DB_LSTAT_FREE => IGNORE, # 2.4.10
+ DB_LSTAT_HELD => IGNORE, # 2.4.10
+ DB_LSTAT_NOGRANT => IGNORE, # 2.4.10
+ DB_LSTAT_PENDING => IGNORE, # 2.4.10
+ DB_LSTAT_WAITING => IGNORE, # 2.4.10
+
+ #########
+ # 2.4.14
+ #########
+
+ DB_MUTEXLOCKS => DEFINE,
+ DB_PAGEYIELD => DEFINE,
+ __UNUSED_100 => IGNORE,
+ __UNUSED_4000 => IGNORE,
+
+ #########
+ # 2.5.2
+ #########
+
+ DBC_CONTINUE => IGNORE,
+ DBC_KEYSET => IGNORE,
+ DBC_RECOVER => IGNORE,
+ DBC_RMW => IGNORE,
+ DB_DBM_ERROR => IGNORE,
+ DB_GET_BOTH => DEFINE,
+ DB_NEXT_DUP => DEFINE,
+ DB_OPFLAGS_MASK => DEFINE,
+ DB_RMW => DEFINE,
+ DB_RUNRECOVERY => DEFINE,
+ dbmclose => IGNORE,
+
+ #########
+ # 2.5.9
+ #########
+
+ DB_DUPSORT => DEFINE,
+ DB_JOIN_ITEM => DEFINE,
+
+ #########
+ # 2.6.4
+ #########
+
+ DBC_WRITER => IGNORE,
+ DB_AM_CDB => IGNORE,
+ DB_ENV_CDB => DEFINE,
+ DB_INIT_CDB => DEFINE,
+ DB_LOCK_UPGRADE => DEFINE,
+ DB_WRITELOCK => DEFINE,
+
+ #########
+ # 2.7.1
+ #########
+
+
+ # enum db_lockop_t
+ DB_LOCK_INHERIT => '2.7.1',
+
+ #########
+ # 2.7.7
+ #########
+
+ DB_FCNTL_LOCKING => DEFINE,
+
+ #########
+ # 3.0.55
+ #########
+
+ DBC_WRITECURSOR => IGNORE,
+ DB_AM_DISCARD => IGNORE,
+ DB_AM_SUBDB => IGNORE,
+ DB_BT_REVSPLIT => IGNORE,
+ DB_CONSUME => DEFINE,
+ DB_CXX_NO_EXCEPTIONS => DEFINE,
+ DB_DBT_REALLOC => IGNORE,
+ DB_DUPCURSOR => DEFINE,
+ DB_ENV_CREATE => DEFINE,
+ DB_ENV_DBLOCAL => DEFINE,
+ DB_ENV_LOCKDOWN => DEFINE,
+ DB_ENV_LOCKING => DEFINE,
+ DB_ENV_LOGGING => DEFINE,
+ DB_ENV_NOMMAP => DEFINE,
+ DB_ENV_OPEN_CALLED => DEFINE,
+ DB_ENV_PRIVATE => DEFINE,
+ DB_ENV_SYSTEM_MEM => DEFINE,
+ DB_ENV_TXN => DEFINE,
+ DB_ENV_TXN_NOSYNC => DEFINE,
+ DB_ENV_USER_ALLOC => DEFINE,
+ DB_FORCE => DEFINE,
+ DB_LOCKDOWN => DEFINE,
+ DB_LOCK_RECORD => DEFINE,
+ DB_LOGFILEID_INVALID => DEFINE,
+ DB_MPOOL_NEW_GROUP => DEFINE,
+ DB_NEXT_NODUP => DEFINE,
+ DB_OK_BTREE => DEFINE,
+ DB_OK_HASH => DEFINE,
+ DB_OK_QUEUE => DEFINE,
+ DB_OK_RECNO => DEFINE,
+ DB_OLD_VERSION => DEFINE,
+ DB_OPEN_CALLED => DEFINE,
+ DB_PAGE_LOCK => DEFINE,
+ DB_POSITION => DEFINE,
+ DB_POSITIONI => DEFINE,
+ DB_PRIVATE => DEFINE,
+ DB_QAMMAGIC => DEFINE,
+ DB_QAMOLDVER => DEFINE,
+ DB_QAMVERSION => DEFINE,
+ DB_RECORD_LOCK => DEFINE,
+ DB_REVSPLITOFF => DEFINE,
+ DB_SYSTEM_MEM => DEFINE,
+ DB_TEST_POSTLOG => DEFINE,
+ DB_TEST_POSTLOGMETA => DEFINE,
+ DB_TEST_POSTOPEN => DEFINE,
+ DB_TEST_POSTRENAME => DEFINE,
+ DB_TEST_POSTSYNC => DEFINE,
+ DB_TEST_PREOPEN => DEFINE,
+ DB_TEST_PRERENAME => DEFINE,
+ DB_TXN_NOWAIT => DEFINE,
+ DB_TXN_SYNC => DEFINE,
+ DB_UPGRADE => DEFINE,
+ DB_VERB_CHKPOINT => DEFINE,
+ DB_VERB_DEADLOCK => DEFINE,
+ DB_VERB_RECOVERY => DEFINE,
+ DB_VERB_WAITSFOR => DEFINE,
+ DB_WRITECURSOR => DEFINE,
+ DB_XA_CREATE => DEFINE,
+
+ # enum DBTYPE
+ DB_QUEUE => '3.0.55',
+
+ #########
+ # 3.1.12
+ #########
+
+ DBC_ACTIVE => IGNORE,
+ DBC_OPD => IGNORE,
+ DBC_TRANSIENT => IGNORE,
+ DBC_WRITEDUP => IGNORE,
+ DB_AGGRESSIVE => DEFINE,
+ DB_AM_DUPSORT => IGNORE,
+ DB_CACHED_COUNTS => DEFINE,
+ DB_CLIENT => DEFINE,
+ DB_DBT_DUPOK => IGNORE,
+ DB_DBT_ISSET => IGNORE,
+ DB_ENV_RPCCLIENT => DEFINE,
+ DB_GET_BOTHC => DEFINE,
+ DB_JOIN_NOSORT => DEFINE,
+ DB_NODUPDATA => DEFINE,
+ DB_NOORDERCHK => DEFINE,
+ DB_NOSERVER => DEFINE,
+ DB_NOSERVER_HOME => DEFINE,
+ DB_NOSERVER_ID => DEFINE,
+ DB_ODDFILESIZE => DEFINE,
+ DB_ORDERCHKONLY => DEFINE,
+ DB_PREV_NODUP => DEFINE,
+ DB_PR_HEADERS => DEFINE,
+ DB_PR_PAGE => DEFINE,
+ DB_PR_RECOVERYTEST => DEFINE,
+ DB_RDWRMASTER => DEFINE,
+ DB_SALVAGE => DEFINE,
+ DB_VERIFY_BAD => DEFINE,
+ DB_VERIFY_FATAL => DEFINE,
+ DB_VRFY_FLAGMASK => DEFINE,
+
+ # enum db_recops
+ DB_TXN_ABORT => '3.1.12',
+ DB_TXN_BACKWARD_ROLL => '3.1.12',
+ DB_TXN_FORWARD_ROLL => '3.1.12',
+ DB_TXN_OPENFILES => '3.1.12',
+
+ #########
+ # 3.2.3
+ #########
+
+ DBC_COMPENSATE => IGNORE,
+ DB_AM_VERIFYING => IGNORE,
+ DB_CDB_ALLDB => DEFINE,
+ DB_ENV_CDB_ALLDB => DEFINE,
+ DB_EXTENT => DEFINE,
+ DB_JOINENV => DEFINE,
+ DB_LOCK_SWITCH => DEFINE,
+ DB_MPOOL_EXTENT => DEFINE,
+ DB_REGION_MAGIC => DEFINE,
+ DB_UNRESOLVED_CHILD => DEFINE,
+ DB_VERIFY => DEFINE,
+
+ # enum db_notices
+ DB_NOTICE_LOGFILE_CHANGED => IGNORE, # 3.2.3
+
+ #########
+ # 3.2.6
+ #########
+
+ DB_ALREADY_ABORTED => DEFINE,
+ DB_CONSUME_WAIT => DEFINE,
+ DB_JAVA_CALLBACK => DEFINE,
+ DB_TEST_POSTEXTDELETE => DEFINE,
+ DB_TEST_POSTEXTOPEN => DEFINE,
+ DB_TEST_POSTEXTUNLINK => DEFINE,
+ DB_TEST_PREEXTDELETE => DEFINE,
+ DB_TEST_PREEXTOPEN => DEFINE,
+ DB_TEST_PREEXTUNLINK => DEFINE,
+
+ # enum db_lockmode_t
+ DB_LOCK_WAIT => IGNORE, # 3.2.6
+
+ #########
+ # 3.3.4
+ #########
+
+ DBC_DIRTY_READ => IGNORE,
+ DBC_MULTIPLE => IGNORE,
+ DBC_MULTIPLE_KEY => IGNORE,
+ DB_AM_DIRTY => IGNORE,
+ DB_AM_SECONDARY => IGNORE,
+ DB_COMMIT => DEFINE,
+ DB_DBT_APPMALLOC => IGNORE,
+ DB_DIRTY_READ => DEFINE,
+ DB_DONOTINDEX => DEFINE,
+ DB_ENV_PANIC_OK => DEFINE,
+ DB_ENV_RPCCLIENT_GIVEN => DEFINE,
+ DB_FAST_STAT => DEFINE,
+ DB_LOCK_MAXLOCKS => DEFINE,
+ DB_LOCK_MINLOCKS => DEFINE,
+ DB_LOCK_MINWRITE => DEFINE,
+ DB_MULTIPLE => DEFINE,
+ DB_MULTIPLE_KEY => DEFINE,
+ DB_PAGE_NOTFOUND => DEFINE,
+ DB_RPC_SERVERPROG => DEFINE,
+ DB_RPC_SERVERVERS => DEFINE,
+ DB_UPDATE_SECONDARY => DEFINE,
+ DB_XIDDATASIZE => DEFINE,
+
+ # enum db_recops
+ DB_TXN_POPENFILES => '3.3.4',
+
+ # enum db_lockop_t
+ DB_LOCK_UPGRADE_WRITE => '3.3.4',
+
+ # enum db_lockmode_t
+ DB_LOCK_DIRTY => IGNORE, # 3.3.4
+ DB_LOCK_WWRITE => IGNORE, # 3.3.4
+
+ #########
+ # 3.3.11
+ #########
+
+ DB_SECONDARY_BAD => DEFINE,
+ DB_SURPRISE_KID => DEFINE,
+ DB_TEST_POSTDESTROY => DEFINE,
+ DB_TEST_PREDESTROY => DEFINE,
+
+ #########
+ # 4.0.7
+ #########
+
+ DB_APPLY_LOGREG => DEFINE,
+ DB_BROADCAST_EID => DEFINE,
+ DB_CL_WRITER => DEFINE,
+ DB_ENV_NOLOCKING => DEFINE,
+ DB_ENV_NOPANIC => DEFINE,
+ DB_ENV_REGION_INIT => DEFINE,
+ DB_ENV_REP_CLIENT => DEFINE,
+ DB_ENV_REP_LOGSONLY => DEFINE,
+ DB_ENV_REP_MASTER => DEFINE,
+ DB_ENV_YIELDCPU => DEFINE,
+ DB_GET_BOTH_RANGE => DEFINE,
+ DB_INVALID_EID => DEFINE,
+ DB_LOCK_EXPIRE => DEFINE,
+ DB_LOCK_FREE_LOCKER => DEFINE,
+ DB_LOCK_SET_TIMEOUT => DEFINE,
+ DB_LOGC_BUF_SIZE => DEFINE,
+ DB_LOG_DISK => DEFINE,
+ DB_LOG_LOCKED => DEFINE,
+ DB_LOG_SILENT_ERR => DEFINE,
+ DB_NOLOCKING => DEFINE,
+ DB_NOPANIC => DEFINE,
+ DB_PANIC_ENVIRONMENT => DEFINE,
+ DB_REP_CLIENT => DEFINE,
+ DB_REP_DUPMASTER => DEFINE,
+ DB_REP_HOLDELECTION => DEFINE,
+ DB_REP_LOGSONLY => DEFINE,
+ DB_REP_MASTER => DEFINE,
+ DB_REP_NEWMASTER => DEFINE,
+ DB_REP_NEWSITE => DEFINE,
+ DB_REP_OUTDATED => DEFINE,
+ DB_REP_PERMANENT => DEFINE,
+ DB_REP_UNAVAIL => DEFINE,
+ DB_SET_LOCK_TIMEOUT => DEFINE,
+ DB_SET_TXN_NOW => DEFINE,
+ DB_SET_TXN_TIMEOUT => DEFINE,
+ DB_STAT_CLEAR => DEFINE,
+ DB_TIMEOUT => DEFINE,
+ DB_YIELDCPU => DEFINE,
+ MP_FLUSH => IGNORE,
+ MP_OPEN_CALLED => IGNORE,
+ MP_READONLY => IGNORE,
+ MP_UPGRADE => IGNORE,
+ MP_UPGRADE_FAIL => IGNORE,
+ TXN_CHILDCOMMIT => IGNORE,
+ TXN_COMPENSATE => IGNORE,
+ TXN_DIRTY_READ => IGNORE,
+ TXN_LOCKTIMEOUT => IGNORE,
+ TXN_MALLOC => IGNORE,
+ TXN_NOSYNC => IGNORE,
+ TXN_NOWAIT => IGNORE,
+ TXN_SYNC => IGNORE,
+
+ # enum db_recops
+ DB_TXN_APPLY => '4.0.7',
+
+ # enum db_lockop_t
+ DB_LOCK_GET_TIMEOUT => '4.0.7',
+ DB_LOCK_PUT_READ => '4.0.7',
+ DB_LOCK_TIMEOUT => '4.0.7',
+
+ # enum db_status_t
+ DB_LSTAT_EXPIRED => IGNORE, # 4.0.7
+
+ #########
+ # 4.0.14
+ #########
+
+ DB_EID_BROADCAST => DEFINE,
+ DB_EID_INVALID => DEFINE,
+ DB_VERB_REPLICATION => DEFINE,
+
+ #########
+ # 4.1.17
+ #########
+
+ DBC_OWN_LID => IGNORE,
+ DB_AM_CHKSUM => IGNORE,
+ DB_AM_CL_WRITER => IGNORE,
+ DB_AM_COMPENSATE => IGNORE,
+ DB_AM_CREATED => IGNORE,
+ DB_AM_CREATED_MSTR => IGNORE,
+ DB_AM_DBM_ERROR => IGNORE,
+ DB_AM_DELIMITER => IGNORE,
+ DB_AM_ENCRYPT => IGNORE,
+ DB_AM_FIXEDLEN => IGNORE,
+ DB_AM_IN_RENAME => IGNORE,
+ DB_AM_OPEN_CALLED => IGNORE,
+ DB_AM_PAD => IGNORE,
+ DB_AM_RECNUM => IGNORE,
+ DB_AM_RENUMBER => IGNORE,
+ DB_AM_REVSPLITOFF => IGNORE,
+ DB_AM_SNAPSHOT => IGNORE,
+ DB_AUTO_COMMIT => DEFINE,
+ DB_CHKSUM_SHA1 => DEFINE,
+ DB_DIRECT => DEFINE,
+ DB_DIRECT_DB => DEFINE,
+ DB_DIRECT_LOG => DEFINE,
+ DB_ENCRYPT => DEFINE,
+ DB_ENCRYPT_AES => DEFINE,
+ DB_ENV_AUTO_COMMIT => DEFINE,
+ DB_ENV_DIRECT_DB => DEFINE,
+ DB_ENV_DIRECT_LOG => DEFINE,
+ DB_ENV_FATAL => DEFINE,
+ DB_ENV_OVERWRITE => DEFINE,
+ DB_ENV_TXN_WRITE_NOSYNC => DEFINE,
+ DB_HANDLE_LOCK => DEFINE,
+ DB_LOCK_NOTEXIST => DEFINE,
+ DB_LOCK_REMOVE => DEFINE,
+ DB_NOCOPY => DEFINE,
+ DB_OVERWRITE => DEFINE,
+ DB_PERMANENT => DEFINE,
+ DB_PRINTABLE => DEFINE,
+ DB_RENAMEMAGIC => DEFINE,
+ DB_TEST_ELECTINIT => DEFINE,
+ DB_TEST_ELECTSEND => DEFINE,
+ DB_TEST_ELECTVOTE1 => DEFINE,
+ DB_TEST_ELECTVOTE2 => DEFINE,
+ DB_TEST_ELECTWAIT1 => DEFINE,
+ DB_TEST_ELECTWAIT2 => DEFINE,
+ DB_TEST_SUBDB_LOCKS => DEFINE,
+ DB_TXN_LOCK => DEFINE,
+ DB_TXN_WRITE_NOSYNC => DEFINE,
+ DB_WRITEOPEN => DEFINE,
+ DB_WRNOSYNC => DEFINE,
+ _DB_EXT_PROT_IN_ => IGNORE,
+
+ # enum db_lockop_t
+ DB_LOCK_TRADE => '4.1.17',
+
+ # enum db_status_t
+ DB_LSTAT_NOTEXIST => IGNORE, # 4.1.17
+
+ # enum DB_CACHE_PRIORITY
+ DB_PRIORITY_VERY_LOW => '4.1.17',
+ DB_PRIORITY_LOW => '4.1.17',
+ DB_PRIORITY_DEFAULT => '4.1.17',
+ DB_PRIORITY_HIGH => '4.1.17',
+ DB_PRIORITY_VERY_HIGH => '4.1.17',
+
+ # enum db_recops
+ DB_TXN_BACKWARD_ALLOC => '4.1.17',
+ DB_TXN_GETPGNOS => '4.1.17',
+ DB_TXN_PRINT => '4.1.17',
+
+ ) ;
+
+sub enum_Macro
+{
+ my $str = shift ;
+ my ($major, $minor, $patch) = split /\./, $str ;
+
+ my $macro =
+ "#if (DB_VERSION_MAJOR > $major) || \\\n" .
+ " (DB_VERSION_MAJOR == $major && DB_VERSION_MINOR > $minor) || \\\n" .
+ " (DB_VERSION_MAJOR == $major && DB_VERSION_MINOR == $minor && \\\n" .
+ " DB_VERSION_PATCH >= $patch)\n" ;
+
+ return $macro;
+
+}
+
+sub OutputXS
+{
+
+ my @names = () ;
+
+ foreach my $key (sort keys %constants)
+ {
+ my $val = $constants{$key} ;
+ next if $val eq IGNORE;
+
+ if ($val eq STRING)
+ { push @names, { name => $key, type => "PV" } }
+ elsif ($val eq DEFINE)
+ { push @names, $key }
+ else
+ { push @names, { name => $key, macro => [enum_Macro($val), "#endif\n"] } }
+ }
+
+ warn "Updating constants.xs & constants.h...\n";
+ WriteConstants(
+ NAME => BerkeleyDB,
+ NAMES => \@names,
+ C_FILE => 'constants.h',
+ XS_FILE => 'constants.xs',
+ ) ;
+}
+
+sub OutputPM
+{
+ my $filename = 'BerkeleyDB.pm';
+ warn "Updating $filename...\n";
+ open IN, "<$filename" || die "Cannot open $filename: $!\n";
+ open OUT, ">$filename.tmp" || die "Cannot open $filename.tmp: $!\n";
+
+ my $START = '@EXPORT = qw(' ;
+ my $START_re = quotemeta $START ;
+ my $END = ');';
+ my $END_re = quotemeta $END ;
+
+ # skip to the @EXPORT declaration
+ OUTER: while (<IN>)
+ {
+ if ( /^\s*$START_re/ )
+ {
+ # skip to the end marker.
+ while (<IN>)
+ { last OUTER if /^\s*$END_re/ }
+ }
+ print OUT ;
+ }
+
+ print OUT "$START\n";
+ foreach my $key (sort keys %constants)
+ {
+ next if $constants{$key} eq IGNORE;
+ print OUT "\t$key\n";
+ }
+ print OUT "\t$END\n";
+
+ while (<IN>)
+ {
+ print OUT ;
+ }
+
+ close IN;
+ close OUT;
+
+ rename $filename, "$filename.bak" || die "Cannot rename $filename: $!\n" ;
+ rename "$filename.tmp", $filename || die "Cannot rename $filename.tmp: $!\n" ;
+}
+
+OutputXS() ;
+OutputPM() ;
diff --git a/libdb/perl/BerkeleyDB/mkpod b/libdb/perl/BerkeleyDB/mkpod
new file mode 100755
index 0000000..44bbf3f
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/mkpod
@@ -0,0 +1,146 @@
+#!/usr/local/bin/perl5
+
+# Filename: mkpod
+#
+# Author: Paul Marquess
+
+# File types
+#
+# Macro files end with .M
+# Tagged source files end with .T
+# Output from the code ends with .O
+# Pre-Pod file ends with .P
+#
+# Tags
+#
+# ## BEGIN tagname
+# ...
+# ## END tagname
+#
+# ## 0
+# ## 1
+#
+
+# Constants
+
+$TOKEN = '##' ;
+$Verbose = 1 if $ARGV[0] =~ /^-v/i ;
+
+# Macros files first
+foreach $file (glob("*.M"))
+{
+ open (F, "<$file") or die "Cannot open '$file':$!\n" ;
+ print " Processing Macro file $file\n" ;
+ while (<F>)
+ {
+ # Skip blank & comment lines
+ next if /^\s*$/ || /^\s*#/ ;
+
+ #
+ ($name, $expand) = split (/\t+/, $_, 2) ;
+
+ $expand =~ s/^\s*// ;
+ $expand =~ s/\s*$// ;
+
+ if ($expand =~ /\[#/ )
+ {
+ }
+
+ $Macros{$name} = $expand ;
+ }
+ close F ;
+}
+
+# Suck up all the code files
+foreach $file (glob("t/*.T"))
+{
+ ($newfile = $file) =~ s/\.T$// ;
+ open (F, "<$file") or die "Cannot open '$file':$!\n" ;
+ open (N, ">$newfile") or die "Cannot open '$newfile':$!\n" ;
+
+ print " Processing $file -> $newfile\n" ;
+
+ while ($line = <F>)
+ {
+ if ($line =~ /^$TOKEN\s*BEGIN\s+(\w+)\s*$/ or
+ $line =~ m[\s*/\*$TOKEN\s*BEGIN\s+(\w+)\s*$] )
+ {
+ print " Section $1 begins\n" if $Verbose ;
+ $InSection{$1} ++ ;
+ $Section{$1} = '' unless $Section{$1} ;
+ }
+ elsif ($line =~ /^$TOKEN\s*END\s+(\w+)\s*$/ or
+ $line =~ m[^\s*/\*$TOKEN\s*END\s+(\w+)\s*$] )
+ {
+ warn "Encountered END without a begin [$line]\n"
+ unless $InSection{$1} ;
+
+ delete $InSection{$1} ;
+ print " Section $1 ends\n" if $Verbose ;
+ }
+ else
+ {
+ print N $line ;
+ chop $line ;
+ $line =~ s/\s*$// ;
+
+ # Save the current line in each of the sections
+ foreach( keys %InSection)
+ {
+ if ($line !~ /^\s*$/ )
+ #{ $Section{$_} .= " $line" }
+ { $Section{$_} .= $line }
+ $Section{$_} .= "\n" ;
+ }
+ }
+
+ }
+
+ if (%InSection)
+ {
+ # Check for unclosed sections
+ print "The following Sections are not terminated\n" ;
+ foreach (sort keys %InSection)
+ { print "\t$_\n" }
+ exit 1 ;
+ }
+
+ close F ;
+ close N ;
+}
+
+print "\n\nCreating pod file(s)\n\n" if $Verbose ;
+
+@ppods = glob('*.P') ;
+#$ppod = $ARGV[0] ;
+#$pod = $ARGV[1] ;
+
+# Now process the pre-pod file
+foreach $ppod (@ppods)
+{
+ ($pod = $ppod) =~ s/\.P$// ;
+ open (PPOD, "<$ppod") or die "Cannot open file '$ppod': $!\n" ;
+ open (POD, ">$pod") or die "Cannot open file '$pod': $!\n" ;
+
+ print " $ppod -> $pod\n" ;
+
+ while ($line = <PPOD>)
+ {
+ if ( $line =~ /^\s*$TOKEN\s*(\w+)\s*$/)
+ {
+ warn "No code insert '$1' available\n"
+ unless $Section{$1} ;
+
+ print "Expanding section $1\n" if $Verbose ;
+ print POD $Section{$1} ;
+ }
+ else
+ {
+# $line =~ s/\[#([^\]])]/$Macros{$1}/ge ;
+ print POD $line ;
+ }
+ }
+
+ close PPOD ;
+ close POD ;
+}
diff --git a/libdb/perl/BerkeleyDB/patches/5.004 b/libdb/perl/BerkeleyDB/patches/5.004
new file mode 100644
index 0000000..143ec95
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/patches/5.004
@@ -0,0 +1,44 @@
+diff perl5.004.orig/Configure perl5.004/Configure
+190a191
+> perllibs=''
+9904a9906,9913
+> : Remove libraries needed only for extensions
+> : The appropriate ext/Foo/Makefile.PL will add them back in, if
+> : necessary.
+> set X `echo " $libs " |
+> sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
+> shift
+> perllibs="$*"
+>
+10372a10382
+> perllibs='$perllibs'
+diff perl5.004.orig/Makefile.SH perl5.004/Makefile.SH
+122c122
+< libs = $libs $cryptlib
+---
+> libs = $perllibs $cryptlib
+Common subdirectories: perl5.004.orig/Porting and perl5.004/Porting
+Common subdirectories: perl5.004.orig/cygwin32 and perl5.004/cygwin32
+Common subdirectories: perl5.004.orig/eg and perl5.004/eg
+Common subdirectories: perl5.004.orig/emacs and perl5.004/emacs
+Common subdirectories: perl5.004.orig/ext and perl5.004/ext
+Common subdirectories: perl5.004.orig/h2pl and perl5.004/h2pl
+Common subdirectories: perl5.004.orig/hints and perl5.004/hints
+Common subdirectories: perl5.004.orig/lib and perl5.004/lib
+diff perl5.004.orig/myconfig perl5.004/myconfig
+38c38
+< libs=$libs
+---
+> libs=$perllibs
+Common subdirectories: perl5.004.orig/os2 and perl5.004/os2
+diff perl5.004.orig/patchlevel.h perl5.004/patchlevel.h
+40a41
+> ,"NODB-1.0 - remove -ldb from core perl binary."
+Common subdirectories: perl5.004.orig/plan9 and perl5.004/plan9
+Common subdirectories: perl5.004.orig/pod and perl5.004/pod
+Common subdirectories: perl5.004.orig/qnx and perl5.004/qnx
+Common subdirectories: perl5.004.orig/t and perl5.004/t
+Common subdirectories: perl5.004.orig/utils and perl5.004/utils
+Common subdirectories: perl5.004.orig/vms and perl5.004/vms
+Common subdirectories: perl5.004.orig/win32 and perl5.004/win32
+Common subdirectories: perl5.004.orig/x2p and perl5.004/x2p
diff --git a/libdb/perl/BerkeleyDB/patches/5.004_01 b/libdb/perl/BerkeleyDB/patches/5.004_01
new file mode 100644
index 0000000..1b05eb4
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/patches/5.004_01
@@ -0,0 +1,217 @@
+diff -rc perl5.004_01.orig/Configure perl5.004_01/Configure
+*** perl5.004_01.orig/Configure Wed Jun 11 00:28:03 1997
+--- perl5.004_01/Configure Sun Nov 12 22:12:35 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9907,9912 ****
+--- 9908,9921 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10375,10380 ****
+--- 10384,10390 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_01.orig/Makefile.SH perl5.004_01/Makefile.SH
+*** perl5.004_01.orig/Makefile.SH Thu Jun 12 23:27:56 1997
+--- perl5.004_01/Makefile.SH Sun Nov 12 22:12:35 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_01.orig/lib/ExtUtils/Embed.pm perl5.004_01/lib/ExtUtils/Embed.pm
+*** perl5.004_01.orig/lib/ExtUtils/Embed.pm Wed Apr 2 22:12:04 1997
+--- perl5.004_01/lib/ExtUtils/Embed.pm Sun Nov 12 22:12:35 2000
+***************
+*** 170,176 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 170,176 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_01.orig/lib/ExtUtils/Liblist.pm perl5.004_01/lib/ExtUtils/Liblist.pm
+*** perl5.004_01.orig/lib/ExtUtils/Liblist.pm Sat Jun 7 01:19:44 1997
+--- perl5.004_01/lib/ExtUtils/Liblist.pm Sun Nov 12 22:13:27 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $Verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $Verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $Verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $Verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $Verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $Verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_01.orig/lib/ExtUtils/MM_Unix.pm perl5.004_01/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_01.orig/lib/ExtUtils/MM_Unix.pm Thu Jun 12 22:06:18 1997
+--- perl5.004_01/lib/ExtUtils/MM_Unix.pm Sun Nov 12 22:12:35 2000
+***************
+*** 2137,2143 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2137,2143 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_01.orig/myconfig perl5.004_01/myconfig
+*** perl5.004_01.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_01/myconfig Sun Nov 12 22:12:35 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_01.orig/patchlevel.h perl5.004_01/patchlevel.h
+*** perl5.004_01.orig/patchlevel.h Wed Jun 11 03:06:10 1997
+--- perl5.004_01/patchlevel.h Sun Nov 12 22:12:35 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/libdb/perl/BerkeleyDB/patches/5.004_02 b/libdb/perl/BerkeleyDB/patches/5.004_02
new file mode 100644
index 0000000..238f873
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/patches/5.004_02
@@ -0,0 +1,217 @@
+diff -rc perl5.004_02.orig/Configure perl5.004_02/Configure
+*** perl5.004_02.orig/Configure Thu Aug 7 15:08:44 1997
+--- perl5.004_02/Configure Sun Nov 12 22:06:24 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9911,9916 ****
+--- 9912,9925 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10379,10384 ****
+--- 10388,10394 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_02.orig/Makefile.SH perl5.004_02/Makefile.SH
+*** perl5.004_02.orig/Makefile.SH Thu Aug 7 13:10:53 1997
+--- perl5.004_02/Makefile.SH Sun Nov 12 22:06:24 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_02.orig/lib/ExtUtils/Embed.pm perl5.004_02/lib/ExtUtils/Embed.pm
+*** perl5.004_02.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_02/lib/ExtUtils/Embed.pm Sun Nov 12 22:06:24 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_02.orig/lib/ExtUtils/Liblist.pm perl5.004_02/lib/ExtUtils/Liblist.pm
+*** perl5.004_02.orig/lib/ExtUtils/Liblist.pm Fri Aug 1 19:36:58 1997
+--- perl5.004_02/lib/ExtUtils/Liblist.pm Sun Nov 12 22:06:24 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_02.orig/lib/ExtUtils/MM_Unix.pm perl5.004_02/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_02.orig/lib/ExtUtils/MM_Unix.pm Tue Aug 5 14:28:08 1997
+--- perl5.004_02/lib/ExtUtils/MM_Unix.pm Sun Nov 12 22:06:25 2000
+***************
+*** 2224,2230 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2224,2230 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_02.orig/myconfig perl5.004_02/myconfig
+*** perl5.004_02.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_02/myconfig Sun Nov 12 22:06:25 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_02.orig/patchlevel.h perl5.004_02/patchlevel.h
+*** perl5.004_02.orig/patchlevel.h Fri Aug 1 15:07:34 1997
+--- perl5.004_02/patchlevel.h Sun Nov 12 22:06:25 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/libdb/perl/BerkeleyDB/patches/5.004_03 b/libdb/perl/BerkeleyDB/patches/5.004_03
new file mode 100644
index 0000000..06331ea
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/patches/5.004_03
@@ -0,0 +1,223 @@
+diff -rc perl5.004_03.orig/Configure perl5.004_03/Configure
+*** perl5.004_03.orig/Configure Wed Aug 13 16:09:46 1997
+--- perl5.004_03/Configure Sun Nov 12 21:56:18 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9911,9916 ****
+--- 9912,9925 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10379,10384 ****
+--- 10388,10394 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+Only in perl5.004_03: Configure.orig
+diff -rc perl5.004_03.orig/Makefile.SH perl5.004_03/Makefile.SH
+*** perl5.004_03.orig/Makefile.SH Mon Aug 18 19:24:29 1997
+--- perl5.004_03/Makefile.SH Sun Nov 12 21:56:18 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+Only in perl5.004_03: Makefile.SH.orig
+diff -rc perl5.004_03.orig/lib/ExtUtils/Embed.pm perl5.004_03/lib/ExtUtils/Embed.pm
+*** perl5.004_03.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_03/lib/ExtUtils/Embed.pm Sun Nov 12 21:56:18 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_03.orig/lib/ExtUtils/Liblist.pm perl5.004_03/lib/ExtUtils/Liblist.pm
+*** perl5.004_03.orig/lib/ExtUtils/Liblist.pm Fri Aug 1 19:36:58 1997
+--- perl5.004_03/lib/ExtUtils/Liblist.pm Sun Nov 12 21:57:17 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+Only in perl5.004_03/lib/ExtUtils: Liblist.pm.orig
+Only in perl5.004_03/lib/ExtUtils: Liblist.pm.rej
+diff -rc perl5.004_03.orig/lib/ExtUtils/MM_Unix.pm perl5.004_03/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_03.orig/lib/ExtUtils/MM_Unix.pm Mon Aug 18 19:16:12 1997
+--- perl5.004_03/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:56:19 2000
+***************
+*** 2224,2230 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2224,2230 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+Only in perl5.004_03/lib/ExtUtils: MM_Unix.pm.orig
+diff -rc perl5.004_03.orig/myconfig perl5.004_03/myconfig
+*** perl5.004_03.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_03/myconfig Sun Nov 12 21:56:19 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_03.orig/patchlevel.h perl5.004_03/patchlevel.h
+*** perl5.004_03.orig/patchlevel.h Wed Aug 13 11:42:01 1997
+--- perl5.004_03/patchlevel.h Sun Nov 12 21:56:19 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
+Only in perl5.004_03: patchlevel.h.orig
diff --git a/libdb/perl/BerkeleyDB/patches/5.004_04 b/libdb/perl/BerkeleyDB/patches/5.004_04
new file mode 100644
index 0000000..a227dc7
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/patches/5.004_04
@@ -0,0 +1,209 @@
+diff -rc perl5.004_04.orig/Configure perl5.004_04/Configure
+*** perl5.004_04.orig/Configure Fri Oct 3 18:57:39 1997
+--- perl5.004_04/Configure Sun Nov 12 21:50:51 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9910,9915 ****
+--- 9911,9924 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10378,10383 ****
+--- 10387,10393 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_04.orig/Makefile.SH perl5.004_04/Makefile.SH
+*** perl5.004_04.orig/Makefile.SH Wed Oct 15 10:33:16 1997
+--- perl5.004_04/Makefile.SH Sun Nov 12 21:50:51 2000
+***************
+*** 129,135 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 129,135 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_04.orig/lib/ExtUtils/Embed.pm perl5.004_04/lib/ExtUtils/Embed.pm
+*** perl5.004_04.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_04/lib/ExtUtils/Embed.pm Sun Nov 12 21:50:51 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_04.orig/lib/ExtUtils/Liblist.pm perl5.004_04/lib/ExtUtils/Liblist.pm
+*** perl5.004_04.orig/lib/ExtUtils/Liblist.pm Tue Sep 9 17:41:32 1997
+--- perl5.004_04/lib/ExtUtils/Liblist.pm Sun Nov 12 21:51:33 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 189,195 ****
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 189,195 ----
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 539,545 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 539,545 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_04.orig/lib/ExtUtils/MM_Unix.pm perl5.004_04/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_04.orig/lib/ExtUtils/MM_Unix.pm Wed Oct 8 14:13:51 1997
+--- perl5.004_04/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:50:51 2000
+***************
+*** 2229,2235 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2229,2235 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_04.orig/myconfig perl5.004_04/myconfig
+*** perl5.004_04.orig/myconfig Mon Oct 6 18:26:49 1997
+--- perl5.004_04/myconfig Sun Nov 12 21:50:51 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_04.orig/patchlevel.h perl5.004_04/patchlevel.h
+*** perl5.004_04.orig/patchlevel.h Wed Oct 15 10:55:19 1997
+--- perl5.004_04/patchlevel.h Sun Nov 12 21:50:51 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ /* The following line and terminating '};' are read by perlbug.PL. Don't alter. */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/libdb/perl/BerkeleyDB/patches/5.004_05 b/libdb/perl/BerkeleyDB/patches/5.004_05
new file mode 100644
index 0000000..51c8bf3
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/patches/5.004_05
@@ -0,0 +1,209 @@
+diff -rc perl5.004_05.orig/Configure perl5.004_05/Configure
+*** perl5.004_05.orig/Configure Thu Jan 6 22:05:49 2000
+--- perl5.004_05/Configure Sun Nov 12 21:36:25 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 10164,10169 ****
+--- 10165,10178 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10648,10653 ****
+--- 10657,10663 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_05.orig/Makefile.SH perl5.004_05/Makefile.SH
+*** perl5.004_05.orig/Makefile.SH Thu Jan 6 22:05:49 2000
+--- perl5.004_05/Makefile.SH Sun Nov 12 21:36:25 2000
+***************
+*** 151,157 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 151,157 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_05.orig/lib/ExtUtils/Embed.pm perl5.004_05/lib/ExtUtils/Embed.pm
+*** perl5.004_05.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_05/lib/ExtUtils/Embed.pm Sun Nov 12 21:36:25 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_05.orig/lib/ExtUtils/Liblist.pm perl5.004_05/lib/ExtUtils/Liblist.pm
+*** perl5.004_05.orig/lib/ExtUtils/Liblist.pm Thu Jan 6 22:05:54 2000
+--- perl5.004_05/lib/ExtUtils/Liblist.pm Sun Nov 12 21:45:31 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 590,596 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 590,596 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_05.orig/lib/ExtUtils/MM_Unix.pm perl5.004_05/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_05.orig/lib/ExtUtils/MM_Unix.pm Thu Jan 6 22:05:54 2000
+--- perl5.004_05/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:36:25 2000
+***************
+*** 2246,2252 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2246,2252 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_05.orig/myconfig perl5.004_05/myconfig
+*** perl5.004_05.orig/myconfig Thu Jan 6 22:05:55 2000
+--- perl5.004_05/myconfig Sun Nov 12 21:43:54 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_05.orig/patchlevel.h perl5.004_05/patchlevel.h
+*** perl5.004_05.orig/patchlevel.h Thu Jan 6 22:05:48 2000
+--- perl5.004_05/patchlevel.h Sun Nov 12 21:36:25 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ /* The following line and terminating '};' are read by perlbug.PL. Don't alter. */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/libdb/perl/BerkeleyDB/patches/5.005 b/libdb/perl/BerkeleyDB/patches/5.005
new file mode 100644
index 0000000..effee3e
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/patches/5.005
@@ -0,0 +1,209 @@
+diff -rc perl5.005.orig/Configure perl5.005/Configure
+*** perl5.005.orig/Configure Wed Jul 15 08:05:44 1998
+--- perl5.005/Configure Sun Nov 12 21:30:40 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11279,11284 ****
+--- 11280,11293 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11804,11809 ****
+--- 11813,11819 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005.orig/Makefile.SH perl5.005/Makefile.SH
+*** perl5.005.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005/Makefile.SH Sun Nov 12 21:30:40 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005.orig/lib/ExtUtils/Embed.pm perl5.005/lib/ExtUtils/Embed.pm
+*** perl5.005.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005/lib/ExtUtils/Embed.pm Sun Nov 12 21:30:40 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005.orig/lib/ExtUtils/Liblist.pm perl5.005/lib/ExtUtils/Liblist.pm
+*** perl5.005.orig/lib/ExtUtils/Liblist.pm Wed Jul 22 07:09:42 1998
+--- perl5.005/lib/ExtUtils/Liblist.pm Sun Nov 12 21:30:40 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 290,296 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 290,296 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 598,604 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 598,604 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.005.orig/lib/ExtUtils/MM_Unix.pm perl5.005/lib/ExtUtils/MM_Unix.pm
+*** perl5.005.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:30:41 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.005.orig/myconfig perl5.005/myconfig
+*** perl5.005.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005/myconfig Sun Nov 12 21:30:41 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005.orig/patchlevel.h perl5.005/patchlevel.h
+*** perl5.005.orig/patchlevel.h Wed Jul 22 19:22:01 1998
+--- perl5.005/patchlevel.h Sun Nov 12 21:30:41 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/libdb/perl/BerkeleyDB/patches/5.005_01 b/libdb/perl/BerkeleyDB/patches/5.005_01
new file mode 100644
index 0000000..2a05dd5
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/patches/5.005_01
@@ -0,0 +1,209 @@
+diff -rc perl5.005_01.orig/Configure perl5.005_01/Configure
+*** perl5.005_01.orig/Configure Wed Jul 15 08:05:44 1998
+--- perl5.005_01/Configure Sun Nov 12 20:55:58 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11279,11284 ****
+--- 11280,11293 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11804,11809 ****
+--- 11813,11819 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005_01.orig/Makefile.SH perl5.005_01/Makefile.SH
+*** perl5.005_01.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005_01/Makefile.SH Sun Nov 12 20:55:58 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005_01.orig/lib/ExtUtils/Embed.pm perl5.005_01/lib/ExtUtils/Embed.pm
+*** perl5.005_01.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005_01/lib/ExtUtils/Embed.pm Sun Nov 12 20:55:58 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_01.orig/lib/ExtUtils/Liblist.pm perl5.005_01/lib/ExtUtils/Liblist.pm
+*** perl5.005_01.orig/lib/ExtUtils/Liblist.pm Wed Jul 22 07:09:42 1998
+--- perl5.005_01/lib/ExtUtils/Liblist.pm Sun Nov 12 20:55:58 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 290,296 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 290,296 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 598,604 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 598,604 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.005_01.orig/lib/ExtUtils/MM_Unix.pm perl5.005_01/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_01.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005_01/lib/ExtUtils/MM_Unix.pm Sun Nov 12 20:55:58 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.005_01.orig/myconfig perl5.005_01/myconfig
+*** perl5.005_01.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005_01/myconfig Sun Nov 12 20:55:58 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005_01.orig/patchlevel.h perl5.005_01/patchlevel.h
+*** perl5.005_01.orig/patchlevel.h Mon Jan 3 11:07:45 2000
+--- perl5.005_01/patchlevel.h Sun Nov 12 20:55:58 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/libdb/perl/BerkeleyDB/patches/5.005_02 b/libdb/perl/BerkeleyDB/patches/5.005_02
new file mode 100644
index 0000000..5dd57dd
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/patches/5.005_02
@@ -0,0 +1,264 @@
+diff -rc perl5.005_02.orig/Configure perl5.005_02/Configure
+*** perl5.005_02.orig/Configure Mon Jan 3 11:12:20 2000
+--- perl5.005_02/Configure Sun Nov 12 20:50:51 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11334,11339 ****
+--- 11335,11348 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11859,11864 ****
+--- 11868,11874 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+Only in perl5.005_02: Configure.orig
+diff -rc perl5.005_02.orig/Makefile.SH perl5.005_02/Makefile.SH
+*** perl5.005_02.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005_02/Makefile.SH Sun Nov 12 20:50:51 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+Only in perl5.005_02: Makefile.SH.orig
+diff -rc perl5.005_02.orig/lib/ExtUtils/Embed.pm perl5.005_02/lib/ExtUtils/Embed.pm
+*** perl5.005_02.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005_02/lib/ExtUtils/Embed.pm Sun Nov 12 20:50:51 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_02.orig/lib/ExtUtils/Liblist.pm perl5.005_02/lib/ExtUtils/Liblist.pm
+*** perl5.005_02.orig/lib/ExtUtils/Liblist.pm Mon Jan 3 11:12:21 2000
+--- perl5.005_02/lib/ExtUtils/Liblist.pm Sun Nov 12 20:50:51 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 333,339 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 333,339 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 623,629 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 623,629 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+***************
+*** 666,672 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 666,672 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 676,682 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 676,682 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+Only in perl5.005_02/lib/ExtUtils: Liblist.pm.orig
+diff -rc perl5.005_02.orig/lib/ExtUtils/MM_Unix.pm perl5.005_02/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_02.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005_02/lib/ExtUtils/MM_Unix.pm Sun Nov 12 20:50:51 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+Only in perl5.005_02/lib/ExtUtils: MM_Unix.pm.orig
+diff -rc perl5.005_02.orig/myconfig perl5.005_02/myconfig
+*** perl5.005_02.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005_02/myconfig Sun Nov 12 20:50:51 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005_02.orig/patchlevel.h perl5.005_02/patchlevel.h
+*** perl5.005_02.orig/patchlevel.h Mon Jan 3 11:12:19 2000
+--- perl5.005_02/patchlevel.h Sun Nov 12 20:50:51 2000
+***************
+*** 40,45 ****
+--- 40,46 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/libdb/perl/BerkeleyDB/patches/5.005_03 b/libdb/perl/BerkeleyDB/patches/5.005_03
new file mode 100644
index 0000000..115f9f5
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/patches/5.005_03
@@ -0,0 +1,250 @@
+diff -rc perl5.005_03.orig/Configure perl5.005_03/Configure
+*** perl5.005_03.orig/Configure Sun Mar 28 17:12:57 1999
+--- perl5.005_03/Configure Sun Sep 17 22:19:16 2000
+***************
+*** 208,213 ****
+--- 208,214 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11642,11647 ****
+--- 11643,11656 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 12183,12188 ****
+--- 12192,12198 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005_03.orig/Makefile.SH perl5.005_03/Makefile.SH
+*** perl5.005_03.orig/Makefile.SH Thu Mar 4 02:35:25 1999
+--- perl5.005_03/Makefile.SH Sun Sep 17 22:21:01 2000
+***************
+*** 58,67 ****
+ shrpldflags="-H512 -T512 -bhalt:4 -bM:SRE -bE:perl.exp"
+ case "$osvers" in
+ 3*)
+! shrpldflags="$shrpldflags -e _nostart $ldflags $libs $cryptlib"
+ ;;
+ *)
+! shrpldflags="$shrpldflags -b noentry $ldflags $libs $cryptlib"
+ ;;
+ esac
+ aixinstdir=`pwd | sed 's/\/UU$//'`
+--- 58,67 ----
+ shrpldflags="-H512 -T512 -bhalt:4 -bM:SRE -bE:perl.exp"
+ case "$osvers" in
+ 3*)
+! shrpldflags="$shrpldflags -e _nostart $ldflags $perllibs $cryptlib"
+ ;;
+ *)
+! shrpldflags="$shrpldflags -b noentry $ldflags $perllibs $cryptlib"
+ ;;
+ esac
+ aixinstdir=`pwd | sed 's/\/UU$//'`
+***************
+*** 155,161 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 155,161 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/Embed.pm perl5.005_03/lib/ExtUtils/Embed.pm
+*** perl5.005_03.orig/lib/ExtUtils/Embed.pm Wed Jan 6 02:17:50 1999
+--- perl5.005_03/lib/ExtUtils/Embed.pm Sun Sep 17 22:19:16 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/Liblist.pm perl5.005_03/lib/ExtUtils/Liblist.pm
+*** perl5.005_03.orig/lib/ExtUtils/Liblist.pm Wed Jan 6 02:17:47 1999
+--- perl5.005_03/lib/ExtUtils/Liblist.pm Sun Sep 17 22:19:16 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 336,342 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 336,342 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 626,632 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+--- 626,632 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+***************
+*** 670,676 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 670,676 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 680,686 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 680,686 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/MM_Unix.pm perl5.005_03/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_03.orig/lib/ExtUtils/MM_Unix.pm Fri Mar 5 00:34:20 1999
+--- perl5.005_03/lib/ExtUtils/MM_Unix.pm Sun Sep 17 22:19:16 2000
+***************
+*** 2284,2290 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2284,2290 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
diff --git a/libdb/perl/BerkeleyDB/patches/5.6.0 b/libdb/perl/BerkeleyDB/patches/5.6.0
new file mode 100644
index 0000000..1f9b3b6
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/patches/5.6.0
@@ -0,0 +1,294 @@
+diff -cr perl-5.6.0.orig/Configure perl-5.6.0/Configure
+*** perl-5.6.0.orig/Configure Wed Mar 22 20:36:37 2000
+--- perl-5.6.0/Configure Sun Sep 17 23:40:15 2000
+***************
+*** 217,222 ****
+--- 217,223 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 14971,14976 ****
+--- 14972,14985 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 15640,15645 ****
+--- 15649,15655 ----
+ path_sep='$path_sep'
+ perl5='$perl5'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -cr perl-5.6.0.orig/Makefile.SH perl-5.6.0/Makefile.SH
+*** perl-5.6.0.orig/Makefile.SH Sat Mar 11 16:05:24 2000
+--- perl-5.6.0/Makefile.SH Sun Sep 17 23:40:15 2000
+***************
+*** 70,76 ****
+ *) shrpldflags="$shrpldflags -b noentry"
+ ;;
+ esac
+! shrpldflags="$shrpldflags $ldflags $libs $cryptlib"
+ linklibperl="-L $archlibexp/CORE -L `pwd | sed 's/\/UU$//'` -lperl"
+ ;;
+ hpux*)
+--- 70,76 ----
+ *) shrpldflags="$shrpldflags -b noentry"
+ ;;
+ esac
+! shrpldflags="$shrpldflags $ldflags $perllibs $cryptlib"
+ linklibperl="-L $archlibexp/CORE -L `pwd | sed 's/\/UU$//'` -lperl"
+ ;;
+ hpux*)
+***************
+*** 176,182 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 176,182 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+***************
+*** 333,339 ****
+ case "$osname" in
+ aix)
+ $spitshell >>Makefile <<!GROK!THIS!
+! LIBS = $libs
+ # In AIX we need to change this for building Perl itself from
+ # its earlier definition (which is for building external
+ # extensions *after* Perl has been built and installed)
+--- 333,339 ----
+ case "$osname" in
+ aix)
+ $spitshell >>Makefile <<!GROK!THIS!
+! LIBS = $perllibs
+ # In AIX we need to change this for building Perl itself from
+ # its earlier definition (which is for building external
+ # extensions *after* Perl has been built and installed)
+diff -cr perl-5.6.0.orig/lib/ExtUtils/Embed.pm perl-5.6.0/lib/ExtUtils/Embed.pm
+*** perl-5.6.0.orig/lib/ExtUtils/Embed.pm Sun Jan 23 12:08:32 2000
+--- perl-5.6.0/lib/ExtUtils/Embed.pm Sun Sep 17 23:40:15 2000
+***************
+*** 193,199 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 193,199 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -cr perl-5.6.0.orig/lib/ExtUtils/Liblist.pm perl-5.6.0/lib/ExtUtils/Liblist.pm
+*** perl-5.6.0.orig/lib/ExtUtils/Liblist.pm Wed Mar 22 16:16:31 2000
+--- perl-5.6.0/lib/ExtUtils/Liblist.pm Sun Sep 17 23:40:15 2000
+***************
+*** 17,34 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 17,34 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 198,204 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 198,204 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 338,344 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 338,344 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 624,630 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+--- 624,630 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+***************
+*** 668,674 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 668,674 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 678,684 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 678,684 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+diff -cr perl-5.6.0.orig/lib/ExtUtils/MM_Unix.pm perl-5.6.0/lib/ExtUtils/MM_Unix.pm
+*** perl-5.6.0.orig/lib/ExtUtils/MM_Unix.pm Thu Mar 2 17:52:52 2000
+--- perl-5.6.0/lib/ExtUtils/MM_Unix.pm Sun Sep 17 23:40:15 2000
+***************
+*** 2450,2456 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2450,2456 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -cr perl-5.6.0.orig/myconfig.SH perl-5.6.0/myconfig.SH
+*** perl-5.6.0.orig/myconfig.SH Sat Feb 26 06:34:49 2000
+--- perl-5.6.0/myconfig.SH Sun Sep 17 23:41:17 2000
+***************
+*** 48,54 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 48,54 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -cr perl-5.6.0.orig/patchlevel.h perl-5.6.0/patchlevel.h
+*** perl-5.6.0.orig/patchlevel.h Wed Mar 22 20:23:11 2000
+--- perl-5.6.0/patchlevel.h Sun Sep 17 23:40:15 2000
+***************
+*** 70,75 ****
+--- 70,76 ----
+ #if !defined(PERL_PATCHLEVEL_H_IMPLICIT) && !defined(LOCAL_PATCH_COUNT)
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/libdb/perl/BerkeleyDB/ppport.h b/libdb/perl/BerkeleyDB/ppport.h
new file mode 100644
index 0000000..c343835
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/ppport.h
@@ -0,0 +1,329 @@
+/* This file is Based on output from
+ * Perl/Pollution/Portability Version 2.0000 */
+
+#ifndef _P_P_PORTABILITY_H_
+#define _P_P_PORTABILITY_H_
+
+#ifndef PERL_REVISION
+# ifndef __PATCHLEVEL_H_INCLUDED__
+# include "patchlevel.h"
+# endif
+# ifndef PERL_REVISION
+# define PERL_REVISION (5)
+ /* Replace: 1 */
+# define PERL_VERSION PATCHLEVEL
+# define PERL_SUBVERSION SUBVERSION
+ /* Replace PERL_PATCHLEVEL with PERL_VERSION */
+ /* Replace: 0 */
+# endif
+#endif
+
+#define PERL_BCDVERSION ((PERL_REVISION * 0x1000000L) + (PERL_VERSION * 0x1000L) + PERL_SUBVERSION)
+
+#ifndef ERRSV
+# define ERRSV perl_get_sv("@",FALSE)
+#endif
+
+#if (PERL_VERSION < 4) || ((PERL_VERSION == 4) && (PERL_SUBVERSION <= 5))
+/* Replace: 1 */
+# define PL_Sv Sv
+# define PL_compiling compiling
+# define PL_copline copline
+# define PL_curcop curcop
+# define PL_curstash curstash
+# define PL_defgv defgv
+# define PL_dirty dirty
+# define PL_hints hints
+# define PL_na na
+# define PL_perldb perldb
+# define PL_rsfp_filters rsfp_filters
+# define PL_rsfp rsfp
+# define PL_stdingv stdingv
+# define PL_sv_no sv_no
+# define PL_sv_undef sv_undef
+# define PL_sv_yes sv_yes
+/* Replace: 0 */
+#endif
+
+#ifndef pTHX
+# define pTHX
+# define pTHX_
+# define aTHX
+# define aTHX_
+#endif
+
+#ifndef PTR2IV
+# define PTR2IV(d) (IV)(d)
+#endif
+
+#ifndef INT2PTR
+# define INT2PTR(any,d) (any)(d)
+#endif
+
+#ifndef dTHR
+# ifdef WIN32
+# define dTHR extern int Perl___notused
+# else
+# define dTHR extern int errno
+# endif
+#endif
+
+#ifndef boolSV
+# define boolSV(b) ((b) ? &PL_sv_yes : &PL_sv_no)
+#endif
+
+#ifndef gv_stashpvn
+# define gv_stashpvn(str,len,flags) gv_stashpv(str,flags)
+#endif
+
+#ifndef newSVpvn
+# define newSVpvn(data,len) ((len) ? newSVpv ((data), (len)) : newSVpv ("", 0))
+#endif
+
+#ifndef newRV_inc
+/* Replace: 1 */
+# define newRV_inc(sv) newRV(sv)
+/* Replace: 0 */
+#endif
+
+/* DEFSV appears first in 5.004_56 */
+#ifndef DEFSV
+# define DEFSV GvSV(PL_defgv)
+#endif
+
+#ifndef SAVE_DEFSV
+# define SAVE_DEFSV SAVESPTR(GvSV(PL_defgv))
+#endif
+
+#ifndef newRV_noinc
+# ifdef __GNUC__
+# define newRV_noinc(sv) \
+ ({ \
+ SV *nsv = (SV*)newRV(sv); \
+ SvREFCNT_dec(sv); \
+ nsv; \
+ })
+# else
+# if defined(CRIPPLED_CC) || defined(USE_THREADS)
+static SV * newRV_noinc (SV * sv)
+{
+ SV *nsv = (SV*)newRV(sv);
+ SvREFCNT_dec(sv);
+ return nsv;
+}
+# else
+# define newRV_noinc(sv) \
+ ((PL_Sv=(SV*)newRV(sv), SvREFCNT_dec(sv), (SV*)PL_Sv)
+# endif
+# endif
+#endif
+
+/* Provide: newCONSTSUB */
+
+/* newCONSTSUB from IO.xs is in the core starting with 5.004_63 */
+#if (PERL_VERSION < 4) || ((PERL_VERSION == 4) && (PERL_SUBVERSION < 63))
+
+#if defined(NEED_newCONSTSUB)
+static
+#else
+extern void newCONSTSUB _((HV * stash, char * name, SV *sv));
+#endif
+
+#if defined(NEED_newCONSTSUB) || defined(NEED_newCONSTSUB_GLOBAL)
+void
+newCONSTSUB(stash,name,sv)
+HV *stash;
+char *name;
+SV *sv;
+{
+ U32 oldhints = PL_hints;
+ HV *old_cop_stash = PL_curcop->cop_stash;
+ HV *old_curstash = PL_curstash;
+ line_t oldline = PL_curcop->cop_line;
+ PL_curcop->cop_line = PL_copline;
+
+ PL_hints &= ~HINT_BLOCK_SCOPE;
+ if (stash)
+ PL_curstash = PL_curcop->cop_stash = stash;
+
+ newSUB(
+
+#if (PERL_VERSION < 3) || ((PERL_VERSION == 3) && (PERL_SUBVERSION < 22))
+ /* before 5.003_22 */
+ start_subparse(),
+#else
+# if (PERL_VERSION == 3) && (PERL_SUBVERSION == 22)
+ /* 5.003_22 */
+ start_subparse(0),
+# else
+ /* 5.003_23 onwards */
+ start_subparse(FALSE, 0),
+# endif
+#endif
+
+ newSVOP(OP_CONST, 0, newSVpv(name,0)),
+ newSVOP(OP_CONST, 0, &PL_sv_no), /* SvPV(&PL_sv_no) == "" -- GMB */
+ newSTATEOP(0, Nullch, newSVOP(OP_CONST, 0, sv))
+ );
+
+ PL_hints = oldhints;
+ PL_curcop->cop_stash = old_cop_stash;
+ PL_curstash = old_curstash;
+ PL_curcop->cop_line = oldline;
+}
+#endif
+
+#endif /* newCONSTSUB */
+
+
+#ifndef START_MY_CXT
+
+/*
+ * Boilerplate macros for initializing and accessing interpreter-local
+ * data from C. All statics in extensions should be reworked to use
+ * this, if you want to make the extension thread-safe. See ext/re/re.xs
+ * for an example of the use of these macros.
+ *
+ * Code that uses these macros is responsible for the following:
+ * 1. #define MY_CXT_KEY to a unique string, e.g. "DynaLoader_guts"
+ * 2. Declare a typedef named my_cxt_t that is a structure that contains
+ * all the data that needs to be interpreter-local.
+ * 3. Use the START_MY_CXT macro after the declaration of my_cxt_t.
+ * 4. Use the MY_CXT_INIT macro such that it is called exactly once
+ * (typically put in the BOOT: section).
+ * 5. Use the members of the my_cxt_t structure everywhere as
+ * MY_CXT.member.
+ * 6. Use the dMY_CXT macro (a declaration) in all the functions that
+ * access MY_CXT.
+ */
+
+#if defined(MULTIPLICITY) || defined(PERL_OBJECT) || \
+ defined(PERL_CAPI) || defined(PERL_IMPLICIT_CONTEXT)
+
+/* This must appear in all extensions that define a my_cxt_t structure,
+ * right after the definition (i.e. at file scope). The non-threads
+ * case below uses it to declare the data as static. */
+#define START_MY_CXT
+
+#if PERL_REVISION == 5 && \
+ (PERL_VERSION < 4 || (PERL_VERSION == 4 && PERL_SUBVERSION < 68 ))
+/* Fetches the SV that keeps the per-interpreter data. */
+#define dMY_CXT_SV \
+ SV *my_cxt_sv = perl_get_sv(MY_CXT_KEY, FALSE)
+#else /* >= perl5.004_68 */
+#define dMY_CXT_SV \
+ SV *my_cxt_sv = *hv_fetch(PL_modglobal, MY_CXT_KEY, \
+ sizeof(MY_CXT_KEY)-1, TRUE)
+#endif /* < perl5.004_68 */
+
+/* This declaration should be used within all functions that use the
+ * interpreter-local data. */
+#define dMY_CXT \
+ dMY_CXT_SV; \
+ my_cxt_t *my_cxtp = INT2PTR(my_cxt_t*,SvUV(my_cxt_sv))
+
+/* Creates and zeroes the per-interpreter data.
+ * (We allocate my_cxtp in a Perl SV so that it will be released when
+ * the interpreter goes away.) */
+#define MY_CXT_INIT \
+ dMY_CXT_SV; \
+ /* newSV() allocates one more than needed */ \
+ my_cxt_t *my_cxtp = (my_cxt_t*)SvPVX(newSV(sizeof(my_cxt_t)-1));\
+ Zero(my_cxtp, 1, my_cxt_t); \
+ sv_setuv(my_cxt_sv, PTR2UV(my_cxtp))
+
+/* This macro must be used to access members of the my_cxt_t structure.
+ * e.g. MYCXT.some_data */
+#define MY_CXT (*my_cxtp)
+
+/* Judicious use of these macros can reduce the number of times dMY_CXT
+ * is used. Use is similar to pTHX, aTHX etc. */
+#define pMY_CXT my_cxt_t *my_cxtp
+#define pMY_CXT_ pMY_CXT,
+#define _pMY_CXT ,pMY_CXT
+#define aMY_CXT my_cxtp
+#define aMY_CXT_ aMY_CXT,
+#define _aMY_CXT ,aMY_CXT
+
+#else /* single interpreter */
+
+#ifndef NOOP
+# define NOOP (void)0
+#endif
+
+#ifdef HASATTRIBUTE
+# define PERL_UNUSED_DECL __attribute__((unused))
+#else
+# define PERL_UNUSED_DECL
+#endif
+
+#ifndef dNOOP
+# define dNOOP extern int Perl___notused PERL_UNUSED_DECL
+#endif
+
+#define START_MY_CXT static my_cxt_t my_cxt;
+#define dMY_CXT_SV dNOOP
+#define dMY_CXT dNOOP
+#define MY_CXT_INIT NOOP
+#define MY_CXT my_cxt
+
+#define pMY_CXT void
+#define pMY_CXT_
+#define _pMY_CXT
+#define aMY_CXT
+#define aMY_CXT_
+#define _aMY_CXT
+
+#endif
+
+#endif /* START_MY_CXT */
+
+
+#ifndef DBM_setFilter
+
+/*
+ The DBM_setFilter & DBM_ckFilter macros are only used by
+ the *DB*_File modules
+*/
+
+#define DBM_setFilter(db_type,code) \
+ { \
+ if (db_type) \
+ RETVAL = sv_mortalcopy(db_type) ; \
+ ST(0) = RETVAL ; \
+ if (db_type && (code == &PL_sv_undef)) { \
+ SvREFCNT_dec(db_type) ; \
+ db_type = NULL ; \
+ } \
+ else if (code) { \
+ if (db_type) \
+ sv_setsv(db_type, code) ; \
+ else \
+ db_type = newSVsv(code) ; \
+ } \
+ }
+
+#define DBM_ckFilter(arg,type,name) \
+ if (db->type) { \
+ if (db->filtering) { \
+ croak("recursion detected in %s", name) ; \
+ } \
+ ENTER ; \
+ SAVETMPS ; \
+ SAVEINT(db->filtering) ; \
+ db->filtering = TRUE ; \
+ SAVESPTR(DEFSV) ; \
+ DEFSV = arg ; \
+ SvTEMP_off(arg) ; \
+ PUSHMARK(SP) ; \
+ PUTBACK ; \
+ (void) perl_call_sv(db->type, G_DISCARD); \
+ SPAGAIN ; \
+ PUTBACK ; \
+ FREETMPS ; \
+ LEAVE ; \
+ }
+
+#endif /* DBM_setFilter */
+
+#endif /* _P_P_PORTABILITY_H_ */
diff --git a/libdb/perl/BerkeleyDB/scan b/libdb/perl/BerkeleyDB/scan
new file mode 100644
index 0000000..eb06495
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/scan
@@ -0,0 +1,229 @@
+#!/usr/local/bin/perl
+
+my $ignore_re = '^(' . join("|",
+ qw(
+ _
+ [a-z]
+ DBM
+ DBC
+ DB_AM_
+ DB_BT_
+ DB_RE_
+ DB_HS_
+ DB_FUNC_
+ DB_DBT_
+ DB_DBM
+ DB_TSL
+ MP
+ TXN
+ )) . ')' ;
+
+my %ignore_def = map {$_, 1} qw() ;
+
+%ignore_enums = map {$_, 1} qw( ACTION db_status_t db_notices db_lockmode_t ) ;
+
+my $filler = ' ' x 26 ;
+
+chdir "libraries" || die "Cannot chdir into './libraries': $!\n";
+
+foreach my $name (sort tuple glob "[2-9]*")
+{
+ my $inc = "$name/include/db.h" ;
+ next unless -f $inc ;
+
+ my $file = readFile($inc) ;
+ StripCommentsAndStrings($file) ;
+ my $result = scan($name, $file) ;
+ print "\n\t#########\n\t# $name\n\t#########\n\n$result"
+ if $result;
+}
+exit ;
+
+
+sub scan
+{
+ my $version = shift ;
+ my $file = shift ;
+
+ my %seen_define = () ;
+ my $result = "" ;
+
+ if (1) {
+ # Preprocess all tri-graphs
+ # including things stuck in quoted string constants.
+ $file =~ s/\?\?=/#/g; # | ??=| #|
+ $file =~ s/\?\?\!/|/g; # | ??!| ||
+ $file =~ s/\?\?'/^/g; # | ??'| ^|
+ $file =~ s/\?\?\(/[/g; # | ??(| [|
+ $file =~ s/\?\?\)/]/g; # | ??)| ]|
+ $file =~ s/\?\?\-/~/g; # | ??-| ~|
+ $file =~ s/\?\?\//\\/g; # | ??/| \|
+ $file =~ s/\?\?</{/g; # | ??<| {|
+ $file =~ s/\?\?>/}/g; # | ??>| }|
+ }
+
+ while ( $file =~ /^\s*#\s*define\s+([\$\w]+)\b(?!\()\s*(.*)/gm )
+ {
+ my $def = $1;
+ my $rest = $2;
+ my $ignore = 0 ;
+
+ $ignore = 1 if $ignore_def{$def} || $def =~ /$ignore_re/o ;
+
+ # Cannot do: (-1) and ((LHANDLE)3) are OK:
+ #print("Skip non-wordy $def => $rest\n"),
+
+ $rest =~ s/\s*$//;
+ #next if $rest =~ /[^\w\$]/;
+
+ #print "Matched $_ ($def)\n" ;
+
+ next if $before{$def} ++ ;
+
+ if ($ignore)
+ { $seen_define{$def} = 'IGNORE' }
+ elsif ($rest =~ /"/)
+ { $seen_define{$def} = 'STRING' }
+ else
+ { $seen_define{$def} = 'DEFINE' }
+ }
+
+ foreach $define (sort keys %seen_define)
+ {
+ my $out = $filler ;
+ substr($out,0, length $define) = $define;
+ $result .= "\t$out => $seen_define{$define},\n" ;
+ }
+
+ while ($file =~ /\btypedef\s+enum\s*{(.*?)}\s*(\w+)/gs )
+ {
+ my $enum = $1 ;
+ my $name = $2 ;
+ my $ignore = 0 ;
+
+ $ignore = 1 if $ignore_enums{$name} ;
+
+ #$enum =~ s/\s*=\s*\S+\s*(,?)\s*\n/$1/g;
+ $enum =~ s/^\s*//;
+ $enum =~ s/\s*$//;
+
+ my @tokens = map { s/\s*=.*// ; $_} split /\s*,\s*/, $enum ;
+ my @new = grep { ! $Enums{$_}++ } @tokens ;
+ if (@new)
+ {
+ my $value ;
+ if ($ignore)
+ { $value = "IGNORE, # $version" }
+ else
+ { $value = "'$version'," }
+
+ $result .= "\n\t# enum $name\n";
+ my $out = $filler ;
+ foreach $name (@new)
+ {
+ $out = $filler ;
+ substr($out,0, length $name) = $name;
+ $result .= "\t$out => $value\n" ;
+ }
+ }
+ }
+
+ return $result ;
+}
+
+
+sub StripCommentsAndStrings
+{
+
+ # Strip C & C++ coments
+ # From the perlfaq
+ $_[0] =~
+
+ s{
+ /\* ## Start of /* ... */ comment
+ [^*]*\*+ ## Non-* followed by 1-or-more *'s
+ (
+ [^/*][^*]*\*+
+ )* ## 0-or-more things which don't start with /
+ ## but do end with '*'
+ / ## End of /* ... */ comment
+
+ | ## OR C++ Comment
+ // ## Start of C++ comment //
+ [^\n]* ## followed by 0-or-more non end of line characters
+
+ | ## OR various things which aren't comments:
+
+ (
+ " ## Start of " ... " string
+ (
+ \\. ## Escaped char
+ | ## OR
+ [^"\\] ## Non "\
+ )*
+ " ## End of " ... " string
+
+ | ## OR
+
+ ' ## Start of ' ... ' string
+ (
+ \\. ## Escaped char
+ | ## OR
+ [^'\\] ## Non '\
+ )*
+ ' ## End of ' ... ' string
+
+ | ## OR
+
+ . ## Anything other char
+ [^/"'\\]* ## Chars which doesn't start a comment, string or escape
+ )
+ }{$2}gxs;
+
+
+
+ # Remove double-quoted strings.
+ #$_[0] =~ s#"(\\.|[^"\\])*"##g;
+
+ # Remove single-quoted strings.
+ #$_[0] =~ s#'(\\.|[^'\\])*'##g;
+
+ # Remove leading whitespace.
+ $_[0] =~ s/\A\s+//m ;
+
+ # Remove trailing whitespace.
+ $_[0] =~ s/\s+\Z//m ;
+
+ # Replace all multiple whitespace by a single space.
+ #$_[0] =~ s/\s+/ /g ;
+}
+
+
+sub readFile
+{
+ my $filename = shift ;
+ open F, "<$filename" || die "Cannot open $filename: $!\n" ;
+ local $/ ;
+ my $x = <F> ;
+ close F ;
+ return $x ;
+}
+
+sub tuple
+{
+ my (@a) = split(/\./, $a) ;
+ my (@b) = split(/\./, $b) ;
+ if (@a != @b) {
+ my $diff = @a - @b ;
+ push @b, (0 x $diff) if $diff > 0 ;
+ push @a, (0 x -$diff) if $diff < 0 ;
+ }
+ foreach $A (@a) {
+ $B = shift @b ;
+ $A == $B or return $A <=> $B ;
+ }
+ return 0;
+}
+
+__END__
+
diff --git a/libdb/perl/BerkeleyDB/t/btree.t b/libdb/perl/BerkeleyDB/t/btree.t
new file mode 100644
index 0000000..fd6ed8f
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/btree.t
@@ -0,0 +1,931 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..244\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' $db = new BerkeleyDB::Btree -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $db = new BerkeleyDB::Btree -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/ ;
+
+ eval ' $db = new BerkeleyDB::Btree -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' $db = new BerkeleyDB::Btree -Txn => "x" ' ;
+ ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' $db = new BerkeleyDB::Btree -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+# Now check the interface to Btree
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ ok 6, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ my $status ;
+ ok 7, $db->db_put("some key", "some value") == 0 ;
+ ok 8, $db->status() == 0 ;
+ ok 9, $db->db_get("some key", $value) == 0 ;
+ ok 10, $value eq "some value" ;
+ ok 11, $db->db_put("key", "value") == 0 ;
+ ok 12, $db->db_get("key", $value) == 0 ;
+ ok 13, $value eq "value" ;
+ ok 14, $db->db_del("some key") == 0 ;
+ ok 15, ($status = $db->db_get("some key", $value)) == DB_NOTFOUND ;
+ ok 16, $db->status() == DB_NOTFOUND ;
+ ok 17, $db->status() eq $DB_errors{'DB_NOTFOUND'} ;
+
+ ok 18, $db->db_sync() == 0 ;
+
+ # Check NOOVERWRITE will make put fail when attempting to overwrite
+ # an existing record.
+
+ ok 19, $db->db_put( 'key', 'x', DB_NOOVERWRITE) == DB_KEYEXIST ;
+ ok 20, $db->status() eq $DB_errors{'DB_KEYEXIST'} ;
+ ok 21, $db->status() == DB_KEYEXIST ;
+
+
+ # check that the value of the key has not been changed by the
+ # previous test
+ ok 22, $db->db_get("key", $value) == 0 ;
+ ok 23, $value eq "value" ;
+
+ # test DB_GET_BOTH
+ my ($k, $v) = ("key", "value") ;
+ ok 24, $db->db_get($k, $v, DB_GET_BOTH) == 0 ;
+
+ ($k, $v) = ("key", "fred") ;
+ ok 25, $db->db_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+ ($k, $v) = ("another", "value") ;
+ ok 26, $db->db_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+
+}
+
+{
+ # Check simple env works with a hash.
+ my $lex = new LexFile $Dfile ;
+
+ my $home = "./fred" ;
+ ok 27, my $lexD = new LexDir($home) ;
+
+ ok 28, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL,
+ -Home => $home ;
+ ok 29, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Env => $env,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ ok 30, $db->db_put("some key", "some value") == 0 ;
+ ok 31, $db->db_get("some key", $value) == 0 ;
+ ok 32, $value eq "some value" ;
+ undef $db ;
+ undef $env ;
+}
+
+
+{
+ # cursors
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my ($k, $v) ;
+ ok 33, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => 2,
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 34, $ret == 0 ;
+
+ # create the cursor
+ ok 35, my $cursor = $db->db_cursor() ;
+
+ $k = $v = "" ;
+ my %copy = %data ;
+ my $extras = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 36, $cursor->status() == DB_NOTFOUND ;
+ ok 37, $cursor->status() eq $DB_errors{'DB_NOTFOUND'};
+ ok 38, keys %copy == 0 ;
+ ok 39, $extras == 0 ;
+
+ # sequence backwards
+ %copy = %data ;
+ $extras = 0 ;
+ my $status ;
+ for ( $status = $cursor->c_get($k, $v, DB_LAST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_PREV)) {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 40, $status == DB_NOTFOUND ;
+ ok 41, $status eq $DB_errors{'DB_NOTFOUND'};
+ ok 42, $cursor->status() == $status ;
+ ok 43, $cursor->status() eq $status ;
+ ok 44, keys %copy == 0 ;
+ ok 45, $extras == 0 ;
+
+ ($k, $v) = ("green", "house") ;
+ ok 46, $cursor->c_get($k, $v, DB_GET_BOTH) == 0 ;
+
+ ($k, $v) = ("green", "door") ;
+ ok 47, $cursor->c_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+ ($k, $v) = ("black", "house") ;
+ ok 48, $cursor->c_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+}
+
+{
+ # Tied Hash interface
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 49, tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # check "each" with an empty database
+ my $count = 0 ;
+ while (my ($k, $v) = each %hash) {
+ ++ $count ;
+ }
+ ok 50, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 51, $count == 0 ;
+
+ # Add a k/v pair
+ my $value ;
+ $hash{"some key"} = "some value";
+ ok 52, (tied %hash)->status() == 0 ;
+ ok 53, $hash{"some key"} eq "some value";
+ ok 54, defined $hash{"some key"} ;
+ ok 55, (tied %hash)->status() == 0 ;
+ ok 56, exists $hash{"some key"} ;
+ ok 57, !defined $hash{"jimmy"} ;
+ ok 58, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 59, !exists $hash{"jimmy"} ;
+ ok 60, (tied %hash)->status() == DB_NOTFOUND ;
+
+ delete $hash{"some key"} ;
+ ok 61, (tied %hash)->status() == 0 ;
+ ok 62, ! defined $hash{"some key"} ;
+ ok 63, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 64, ! exists $hash{"some key"} ;
+ ok 65, (tied %hash)->status() == DB_NOTFOUND ;
+
+ $hash{1} = 2 ;
+ $hash{10} = 20 ;
+ $hash{1000} = 2000 ;
+
+ my ($keys, $values) = (0,0);
+ $count = 0 ;
+ while (my ($k, $v) = each %hash) {
+ $keys += $k ;
+ $values += $v ;
+ ++ $count ;
+ }
+ ok 66, $count == 3 ;
+ ok 67, $keys == 1011 ;
+ ok 68, $values == 2022 ;
+
+ # now clear the hash
+ %hash = () ;
+ ok 69, keys %hash == 0 ;
+
+ untie %hash ;
+}
+
+{
+ # override default compare
+ my $lex = new LexFile $Dfile, $Dfile2, $Dfile3 ;
+ my $value ;
+ my (%h, %g, %k) ;
+ my @Keys = qw( 0123 12 -1234 9 987654321 def ) ;
+ ok 70, tie %h, "BerkeleyDB::Btree", -Filename => $Dfile,
+ -Compare => sub { $_[0] <=> $_[1] },
+ -Flags => DB_CREATE ;
+
+ ok 71, tie %g, 'BerkeleyDB::Btree', -Filename => $Dfile2,
+ -Compare => sub { $_[0] cmp $_[1] },
+ -Flags => DB_CREATE ;
+
+ ok 72, tie %k, 'BerkeleyDB::Btree', -Filename => $Dfile3,
+ -Compare => sub { length $_[0] <=> length $_[1] },
+ -Flags => DB_CREATE ;
+
+ my @srt_1 ;
+ { local $^W = 0 ;
+ @srt_1 = sort { $a <=> $b } @Keys ;
+ }
+ my @srt_2 = sort { $a cmp $b } @Keys ;
+ my @srt_3 = sort { length $a <=> length $b } @Keys ;
+
+ foreach (@Keys) {
+ local $^W = 0 ;
+ $h{$_} = 1 ;
+ $g{$_} = 1 ;
+ $k{$_} = 1 ;
+ }
+
+ sub ArrayCompare
+ {
+ my($a, $b) = @_ ;
+
+ return 0 if @$a != @$b ;
+
+ foreach (1 .. length @$a)
+ {
+ return 0 unless $$a[$_] eq $$b[$_] ;
+ }
+
+ 1 ;
+ }
+
+ ok 73, ArrayCompare (\@srt_1, [keys %h]);
+ ok 74, ArrayCompare (\@srt_2, [keys %g]);
+ ok 75, ArrayCompare (\@srt_3, [keys %k]);
+
+}
+
+{
+ # override default compare, with duplicates, don't sort values
+ my $lex = new LexFile $Dfile, $Dfile2, $Dfile3 ;
+ my $value ;
+ my (%h, %g, %k) ;
+ my @Keys = qw( 0123 9 12 -1234 9 987654321 def ) ;
+ my @Values = qw( 1 0 3 dd x abc 0 ) ;
+ ok 76, tie %h, "BerkeleyDB::Btree", -Filename => $Dfile,
+ -Compare => sub { $_[0] <=> $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ ok 77, tie %g, 'BerkeleyDB::Btree', -Filename => $Dfile2,
+ -Compare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ ok 78, tie %k, 'BerkeleyDB::Btree', -Filename => $Dfile3,
+ -Compare => sub { length $_[0] <=> length $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ my @srt_1 ;
+ { local $^W = 0 ;
+ @srt_1 = sort { $a <=> $b } @Keys ;
+ }
+ my @srt_2 = sort { $a cmp $b } @Keys ;
+ my @srt_3 = sort { length $a <=> length $b } @Keys ;
+
+ foreach (@Keys) {
+ local $^W = 0 ;
+ my $value = shift @Values ;
+ $h{$_} = $value ;
+ $g{$_} = $value ;
+ $k{$_} = $value ;
+ }
+
+ sub getValues
+ {
+ my $hash = shift ;
+ my $db = tied %$hash ;
+ my $cursor = $db->db_cursor() ;
+ my @values = () ;
+ my ($k, $v) = (0,0) ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ push @values, $v ;
+ }
+ return @values ;
+ }
+
+ ok 79, ArrayCompare (\@srt_1, [keys %h]);
+ ok 80, ArrayCompare (\@srt_2, [keys %g]);
+ ok 81, ArrayCompare (\@srt_3, [keys %k]);
+ ok 82, ArrayCompare ([qw(dd 0 0 x 3 1 abc)], [getValues \%h]);
+ ok 83, ArrayCompare ([qw(dd 1 0 3 x abc 0)], [getValues \%g]);
+ ok 84, ArrayCompare ([qw(0 x 3 0 1 dd abc)], [getValues \%k]);
+
+ # test DB_DUP_NEXT
+ ok 85, my $cur = (tied %g)->db_cursor() ;
+ my ($k, $v) = (9, "") ;
+ ok 86, $cur->c_get($k, $v, DB_SET) == 0 ;
+ ok 87, $k == 9 && $v == 0 ;
+ ok 88, $cur->c_get($k, $v, DB_NEXT_DUP) == 0 ;
+ ok 89, $k == 9 && $v eq "x" ;
+ ok 90, $cur->c_get($k, $v, DB_NEXT_DUP) == DB_NOTFOUND ;
+}
+
+{
+ # override default compare, with duplicates, sort values
+ my $lex = new LexFile $Dfile, $Dfile2;
+ my $value ;
+ my (%h, %g) ;
+ my @Keys = qw( 0123 9 12 -1234 9 987654321 9 def ) ;
+ my @Values = qw( 1 11 3 dd x abc 2 0 ) ;
+ ok 91, tie %h, "BerkeleyDB::Btree", -Filename => $Dfile,
+ -Compare => sub { $_[0] <=> $_[1] },
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ ok 92, tie %g, 'BerkeleyDB::Btree', -Filename => $Dfile2,
+ -Compare => sub { $_[0] cmp $_[1] },
+ -DupCompare => sub { $_[0] <=> $_[1] },
+ -Property => DB_DUP,
+
+
+
+ -Flags => DB_CREATE ;
+
+ my @srt_1 ;
+ { local $^W = 0 ;
+ @srt_1 = sort { $a <=> $b } @Keys ;
+ }
+ my @srt_2 = sort { $a cmp $b } @Keys ;
+
+ foreach (@Keys) {
+ local $^W = 0 ;
+ my $value = shift @Values ;
+ $h{$_} = $value ;
+ $g{$_} = $value ;
+ }
+
+ ok 93, ArrayCompare (\@srt_1, [keys %h]);
+ ok 94, ArrayCompare (\@srt_2, [keys %g]);
+ ok 95, ArrayCompare ([qw(dd 1 3 x 2 11 abc 0)], [getValues \%g]);
+ ok 96, ArrayCompare ([qw(dd 0 11 2 x 3 1 abc)], [getValues \%h]);
+
+}
+
+{
+ # get_dup etc
+ my $lex = new LexFile $Dfile;
+ my %hh ;
+
+ ok 97, my $YY = tie %hh, "BerkeleyDB::Btree", -Filename => $Dfile,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hh{'Wall'} = 'Larry' ;
+ $hh{'Wall'} = 'Stone' ; # Note the duplicate key
+ $hh{'Wall'} = 'Brick' ; # Note the duplicate key
+ $hh{'Smith'} = 'John' ;
+ $hh{'mouse'} = 'mickey' ;
+
+ # first work in scalar context
+ ok 98, scalar $YY->get_dup('Unknown') == 0 ;
+ ok 99, scalar $YY->get_dup('Smith') == 1 ;
+ ok 100, scalar $YY->get_dup('Wall') == 3 ;
+
+ # now in list context
+ my @unknown = $YY->get_dup('Unknown') ;
+ ok 101, "@unknown" eq "" ;
+
+ my @smith = $YY->get_dup('Smith') ;
+ ok 102, "@smith" eq "John" ;
+
+ {
+ my @wall = $YY->get_dup('Wall') ;
+ my %wall ;
+ @wall{@wall} = @wall ;
+ ok 103, (@wall == 3 && $wall{'Larry'} && $wall{'Stone'} && $wall{'Brick'});
+ }
+
+ # hash
+ my %unknown = $YY->get_dup('Unknown', 1) ;
+ ok 104, keys %unknown == 0 ;
+
+ my %smith = $YY->get_dup('Smith', 1) ;
+ ok 105, keys %smith == 1 && $smith{'John'} ;
+
+ my %wall = $YY->get_dup('Wall', 1) ;
+ ok 106, keys %wall == 3 && $wall{'Larry'} == 1 && $wall{'Stone'} == 1
+ && $wall{'Brick'} == 1 ;
+
+ undef $YY ;
+ untie %hh ;
+
+}
+
+{
+ # in-memory file
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $fd ;
+ my $value ;
+ ok 107, my $db = tie %hash, 'BerkeleyDB::Btree' ;
+
+ ok 108, $db->db_put("some key", "some value") == 0 ;
+ ok 109, $db->db_get("some key", $value) == 0 ;
+ ok 110, $value eq "some value" ;
+
+}
+
+{
+ # partial
+ # check works via API
+
+ my $lex = new LexFile $Dfile ;
+ my $value ;
+ ok 111, my $db = new BerkeleyDB::Btree, -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 112, $ret == 0 ;
+
+
+ # do a partial get
+ my ($pon, $off, $len) = $db->partial_set(0,2) ;
+ ok 113, ! $pon && $off == 0 && $len == 0 ;
+ ok 114, $db->db_get("red", $value) == 0 && $value eq "bo" ;
+ ok 115, $db->db_get("green", $value) == 0 && $value eq "ho" ;
+ ok 116, $db->db_get("blue", $value) == 0 && $value eq "se" ;
+
+ # do a partial get, off end of data
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 117, $pon ;
+ ok 118, $off == 0 ;
+ ok 119, $len == 2 ;
+ ok 120, $db->db_get("red", $value) == 0 && $value eq "t" ;
+ ok 121, $db->db_get("green", $value) == 0 && $value eq "se" ;
+ ok 122, $db->db_get("blue", $value) == 0 && $value eq "" ;
+
+ # switch of partial mode
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 123, $pon ;
+ ok 124, $off == 3 ;
+ ok 125, $len == 2 ;
+ ok 126, $db->db_get("red", $value) == 0 && $value eq "boat" ;
+ ok 127, $db->db_get("green", $value) == 0 && $value eq "house" ;
+ ok 128, $db->db_get("blue", $value) == 0 && $value eq "sea" ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 129, $db->db_put("red", "") == 0 ;
+ ok 130, $db->db_put("green", "AB") == 0 ;
+ ok 131, $db->db_put("blue", "XYZ") == 0 ;
+ ok 132, $db->db_put("new", "KLM") == 0 ;
+
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 133, $pon ;
+ ok 134, $off == 0 ;
+ ok 135, $len == 2 ;
+ ok 136, $db->db_get("red", $value) == 0 && $value eq "at" ;
+ ok 137, $db->db_get("green", $value) == 0 && $value eq "ABuse" ;
+ ok 138, $db->db_get("blue", $value) == 0 && $value eq "XYZa" ;
+ ok 139, $db->db_get("new", $value) == 0 && $value eq "KLM" ;
+
+ # now partial put
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 140, ! $pon ;
+ ok 141, $off == 0 ;
+ ok 142, $len == 0 ;
+ ok 143, $db->db_put("red", "PPP") == 0 ;
+ ok 144, $db->db_put("green", "Q") == 0 ;
+ ok 145, $db->db_put("blue", "XYZ") == 0 ;
+ ok 146, $db->db_put("new", "TU") == 0 ;
+
+ $db->partial_clear() ;
+ ok 147, $db->db_get("red", $value) == 0 && $value eq "at\0PPP" ;
+ ok 148, $db->db_get("green", $value) == 0 && $value eq "ABuQ" ;
+ ok 149, $db->db_get("blue", $value) == 0 && $value eq "XYZXYZ" ;
+ ok 150, $db->db_get("new", $value) == 0 && $value eq "KLMTU" ;
+}
+
+{
+ # partial
+ # check works via tied hash
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+ ok 151, my $db = tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ while (my ($k, $v) = each %data) {
+ $hash{$k} = $v ;
+ }
+
+
+ # do a partial get
+ $db->partial_set(0,2) ;
+ ok 152, $hash{"red"} eq "bo" ;
+ ok 153, $hash{"green"} eq "ho" ;
+ ok 154, $hash{"blue"} eq "se" ;
+
+ # do a partial get, off end of data
+ $db->partial_set(3,2) ;
+ ok 155, $hash{"red"} eq "t" ;
+ ok 156, $hash{"green"} eq "se" ;
+ ok 157, $hash{"blue"} eq "" ;
+
+ # switch of partial mode
+ $db->partial_clear() ;
+ ok 158, $hash{"red"} eq "boat" ;
+ ok 159, $hash{"green"} eq "house" ;
+ ok 160, $hash{"blue"} eq "sea" ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 161, $hash{"red"} = "" ;
+ ok 162, $hash{"green"} = "AB" ;
+ ok 163, $hash{"blue"} = "XYZ" ;
+ ok 164, $hash{"new"} = "KLM" ;
+
+ $db->partial_clear() ;
+ ok 165, $hash{"red"} eq "at" ;
+ ok 166, $hash{"green"} eq "ABuse" ;
+ ok 167, $hash{"blue"} eq "XYZa" ;
+ ok 168, $hash{"new"} eq "KLM" ;
+
+ # now partial put
+ $db->partial_set(3,2) ;
+ ok 169, $hash{"red"} = "PPP" ;
+ ok 170, $hash{"green"} = "Q" ;
+ ok 171, $hash{"blue"} = "XYZ" ;
+ ok 172, $hash{"new"} = "TU" ;
+
+ $db->partial_clear() ;
+ ok 173, $hash{"red"} eq "at\0PPP" ;
+ ok 174, $hash{"green"} eq "ABuQ" ;
+ ok 175, $hash{"blue"} eq "XYZXYZ" ;
+ ok 176, $hash{"new"} eq "KLMTU" ;
+}
+
+{
+ # transaction
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 177, my $lexD = new LexDir($home) ;
+ ok 178, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 179, my $txn = $env->txn_begin() ;
+ ok 180, my $db1 = tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+ ok 181, (my $Z = $txn->txn_commit()) == 0 ;
+ ok 182, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 183, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 184, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 185, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ #ok 151, $txn->txn_abort() == 0 ;
+ ok 186, ($Z = $txn->txn_abort()) == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 187, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 188, $count == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie %hash ;
+}
+
+{
+ # DB_DUP
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 189, my $db = tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hash{'Wall'} = 'Larry' ;
+ $hash{'Wall'} = 'Stone' ;
+ $hash{'Smith'} = 'John' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'mouse'} = 'mickey' ;
+
+ ok 190, keys %hash == 6 ;
+
+ # create a cursor
+ ok 191, my $cursor = $db->db_cursor() ;
+
+ my $key = "Wall" ;
+ my $value ;
+ ok 192, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 193, $key eq "Wall" && $value eq "Larry" ;
+ ok 194, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 195, $key eq "Wall" && $value eq "Stone" ;
+ ok 196, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 197, $key eq "Wall" && $value eq "Brick" ;
+ ok 198, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 199, $key eq "Wall" && $value eq "Brick" ;
+
+ #my $ref = $db->db_stat() ;
+ #ok 200, ($ref->{bt_flags} | DB_DUP) == DB_DUP ;
+#print "bt_flags " . $ref->{bt_flags} . " DB_DUP " . DB_DUP ."\n";
+
+ undef $db ;
+ undef $cursor ;
+ untie %hash ;
+
+}
+
+{
+ # db_stat
+
+ my $lex = new LexFile $Dfile ;
+ my $recs = ($BerkeleyDB::db_version >= 3.1 ? "bt_ndata" : "bt_nrecs") ;
+ my %hash ;
+ my ($k, $v) ;
+ ok 200, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Flags => DB_CREATE,
+ -Minkey =>3 ,
+ -Pagesize => 2 **12
+ ;
+
+ my $ref = $db->db_stat() ;
+ ok 201, $ref->{$recs} == 0;
+ ok 202, $ref->{'bt_minkey'} == 3;
+ ok 203, $ref->{'bt_pagesize'} == 2 ** 12;
+
+ # create some data
+ my %data = (
+ "red" => 2,
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 204, $ret == 0 ;
+
+ $ref = $db->db_stat() ;
+ ok 205, $ref->{$recs} == 3;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use BerkeleyDB;
+ @ISA=qw(BerkeleyDB::Btree);
+ @EXPORT = @BerkeleyDB::EXPORT ;
+
+ sub db_put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::db_put($key, $value * 3) ;
+ }
+
+ sub db_get {
+ my $self = shift ;
+ $self->SUPER::db_get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok 206, $@ eq "" ;
+ my %h ;
+ my $X ;
+ eval '
+ $X = tie(%h, "SubDB", -Filename => "dbbtree.tmp",
+ -Flags => DB_CREATE,
+ -Mode => 0640 );
+ ' ;
+
+ main::ok 207, $@ eq "" && $X ;
+
+ my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
+ main::ok 208, $@ eq "" ;
+ main::ok 209, $ret == 7 ;
+
+ my $value = 0;
+ $ret = eval '$X->db_put("joe", 4) ; $X->db_get("joe", $value) ; return $value' ;
+ main::ok 210, $@ eq "" ;
+ main::ok 211, $ret == 10 ;
+
+ $ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
+ main::ok 212, $@ eq "" ;
+ main::ok 213, $ret == 1 ;
+
+ $ret = eval '$X->A_new_method("joe") ' ;
+ main::ok 214, $@ eq "" ;
+ main::ok 215, $ret eq "[[10]]" ;
+
+ undef $X;
+ untie %h;
+ unlink "SubDB.pm", "dbbtree.tmp" ;
+
+}
+
+{
+ # DB_RECNUM, DB_SET_RECNO & DB_GET_RECNO
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my ($k, $v) = ("", "");
+ ok 216, my $db = new BerkeleyDB::Btree
+ -Filename => $Dfile,
+ -Flags => DB_CREATE,
+ -Property => DB_RECNUM ;
+
+
+ # create some data
+ my @data = (
+ "A zero",
+ "B one",
+ "C two",
+ "D three",
+ "E four"
+ ) ;
+
+ my $ix = 0 ;
+ my $ret = 0 ;
+ foreach (@data) {
+ $ret += $db->db_put($_, $ix) ;
+ ++ $ix ;
+ }
+ ok 217, $ret == 0 ;
+
+ # db_get & DB_SET_RECNO
+ $k = 1 ;
+ ok 218, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 219, $k eq "B one" && $v == 1 ;
+
+ $k = 3 ;
+ ok 220, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 221, $k eq "D three" && $v == 3 ;
+
+ $k = 4 ;
+ ok 222, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 223, $k eq "E four" && $v == 4 ;
+
+ $k = 0 ;
+ ok 224, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 225, $k eq "A zero" && $v == 0 ;
+
+ # cursor & DB_SET_RECNO
+
+ # create the cursor
+ ok 226, my $cursor = $db->db_cursor() ;
+
+ $k = 2 ;
+ ok 227, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 228, $k eq "C two" && $v == 2 ;
+
+ $k = 0 ;
+ ok 229, $cursor->c_get($k, $v, DB_SET_RECNO) == 0;
+ ok 230, $k eq "A zero" && $v == 0 ;
+
+ $k = 3 ;
+ ok 231, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 232, $k eq "D three" && $v == 3 ;
+
+ # cursor & DB_GET_RECNO
+ ok 233, $cursor->c_get($k, $v, DB_FIRST) == 0 ;
+ ok 234, $k eq "A zero" && $v == 0 ;
+ ok 235, $cursor->c_get($k, $v, DB_GET_RECNO) == 0;
+ ok 236, $v == 0 ;
+
+ ok 237, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 238, $k eq "B one" && $v == 1 ;
+ ok 239, $cursor->c_get($k, $v, DB_GET_RECNO) == 0;
+ ok 240, $v == 1 ;
+
+ ok 241, $cursor->c_get($k, $v, DB_LAST) == 0 ;
+ ok 242, $k eq "E four" && $v == 4 ;
+ ok 243, $cursor->c_get($k, $v, DB_GET_RECNO) == 0;
+ ok 244, $v == 4 ;
+
+}
+
diff --git a/libdb/perl/BerkeleyDB/t/db-3.0.t b/libdb/perl/BerkeleyDB/t/db-3.0.t
new file mode 100644
index 0000000..0a213e5
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/db-3.0.t
@@ -0,0 +1,90 @@
+#!./perl -w
+
+# ID: 1.2, 7/17/97
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3) {
+ print "1..0 # Skipped - this needs Berkeley DB 3.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+print "1..14\n";
+
+
+my $Dfile = "dbhash.tmp";
+
+umask(0);
+
+{
+ # set_mutexlocks
+
+ my $home = "./fred" ;
+ ok 1, my $lexD = new LexDir($home) ;
+ chdir "./fred" ;
+ ok 2, my $env = new BerkeleyDB::Env -Flags => DB_CREATE ;
+ ok 3, $env->set_mutexlocks(0) == 0 ;
+ chdir ".." ;
+ undef $env ;
+}
+
+{
+ # c_dup
+
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my ($k, $v) ;
+ ok 4, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => 2,
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 5, $ret == 0 ;
+
+ # create a cursor
+ ok 6, my $cursor = $db->db_cursor() ;
+
+ # point to a specific k/v pair
+ $k = "green" ;
+ ok 7, $cursor->c_get($k, $v, DB_SET) == 0 ;
+ ok 8, $v eq "house" ;
+
+ # duplicate the cursor
+ my $dup_cursor = $cursor->c_dup(DB_POSITION);
+ ok 9, $dup_cursor ;
+
+ # move original cursor off green/house
+ $cursor->c_get($k, $v, DB_NEXT) ;
+ ok 10, $k ne "green" ;
+ ok 11, $v ne "house" ;
+
+ # duplicate cursor should still be on green/house
+ ok 12, $dup_cursor->c_get($k, $v, DB_CURRENT) == 0;
+ ok 13, $k eq "green" ;
+ ok 14, $v eq "house" ;
+
+}
+
diff --git a/libdb/perl/BerkeleyDB/t/db-3.1.t b/libdb/perl/BerkeleyDB/t/db-3.1.t
new file mode 100644
index 0000000..bc0516f
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/db-3.1.t
@@ -0,0 +1,199 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3.1) {
+ print "1..0 # Skipping test, this needs Berkeley DB 3.1.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+print "1..35\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+
+{
+ # c_count
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 1, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hash{'Wall'} = 'Larry' ;
+ $hash{'Wall'} = 'Stone' ;
+ $hash{'Smith'} = 'John' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'mouse'} = 'mickey' ;
+
+ ok 2, keys %hash == 6 ;
+
+ # create a cursor
+ ok 3, my $cursor = $db->db_cursor() ;
+
+ my $key = "Wall" ;
+ my $value ;
+ ok 4, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 5, $key eq "Wall" && $value eq "Larry" ;
+
+ my $count ;
+ ok 6, $cursor->c_count($count) == 0 ;
+ ok 7, $count == 4 ;
+
+ $key = "Smith" ;
+ ok 8, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 9, $key eq "Smith" && $value eq "John" ;
+
+ ok 10, $cursor->c_count($count) == 0 ;
+ ok 11, $count == 1 ;
+
+
+ undef $db ;
+ undef $cursor ;
+ untie %hash ;
+
+}
+
+{
+ # db_key_range
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 12, my $db = tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hash{'Wall'} = 'Larry' ;
+ $hash{'Wall'} = 'Stone' ;
+ $hash{'Smith'} = 'John' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'mouse'} = 'mickey' ;
+
+ ok 13, keys %hash == 6 ;
+
+ my $key = "Wall" ;
+ my ($less, $equal, $greater) ;
+ ok 14, $db->db_key_range($key, $less, $equal, $greater) == 0 ;
+
+ ok 15, $less != 0 ;
+ ok 16, $equal != 0 ;
+ ok 17, $greater != 0 ;
+
+ $key = "Smith" ;
+ ok 18, $db->db_key_range($key, $less, $equal, $greater) == 0 ;
+
+ ok 19, $less == 0 ;
+ ok 20, $equal != 0 ;
+ ok 21, $greater != 0 ;
+
+ $key = "NotThere" ;
+ ok 22, $db->db_key_range($key, $less, $equal, $greater) == 0 ;
+
+ ok 23, $less == 0 ;
+ ok 24, $equal == 0 ;
+ ok 25, $greater == 1 ;
+
+ undef $db ;
+ untie %hash ;
+
+}
+
+{
+ # rename
+
+ my $lex = new LexFile $Dfile ;
+
+ ok 26, my $db1 = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ,
+ -Flags => DB_CREATE ;
+
+ ok 27, my $db2 = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Subname => "joe" ,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my %data = qw(
+ red sky
+ blue sea
+ black heart
+ yellow belley
+ green grass
+ ) ;
+
+ ok 28, addData($db1, %data) ;
+ ok 29, addData($db2, %data) ;
+
+ undef $db1 ;
+ undef $db2 ;
+
+ # now rename
+ ok 30, BerkeleyDB::db_rename(-Filename => $Dfile,
+ -Subname => "fred",
+ -Newname => "harry") == 0;
+
+ ok 31, my $db3 = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "harry" ;
+
+}
+
+{
+ # verify
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+
+ ok 32, my $db1 = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my %data = qw(
+ red sky
+ blue sea
+ black heart
+ yellow belley
+ green grass
+ ) ;
+
+ ok 33, addData($db1, %data) ;
+
+ undef $db1 ;
+
+ # now verify
+ ok 34, BerkeleyDB::db_verify(-Filename => $Dfile,
+ -Subname => "fred",
+ ) == 0;
+
+ # now verify & dump
+ ok 35, BerkeleyDB::db_verify(-Filename => $Dfile,
+ -Subname => "fred",
+ -Outfile => $Dfile2,
+ ) == 0;
+
+}
+
+# db_remove with env
+
diff --git a/libdb/perl/BerkeleyDB/t/db-3.2.t b/libdb/perl/BerkeleyDB/t/db-3.2.t
new file mode 100644
index 0000000..38c68f4
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/db-3.2.t
@@ -0,0 +1,65 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3.2) {
+ print "1..0 # Skipping test, this needs Berkeley DB 3.2.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+print "1..6\n";
+
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+
+{
+ # set_q_extentsize
+
+ ok 1, 1 ;
+}
+
+{
+ # env->set_flags
+
+ my $home = "./fred" ;
+ ok 2, my $lexD = new LexDir($home) ;
+ ok 3, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE ,
+ -SetFlags => DB_NOMMAP ;
+
+ undef $env ;
+}
+
+{
+ # env->set_flags
+
+ my $home = "./fred" ;
+ ok 4, my $lexD = new LexDir($home) ;
+ ok 5, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE ;
+ ok 6, ! $env->set_flags(DB_NOMMAP, 1);
+
+ undef $env ;
+}
diff --git a/libdb/perl/BerkeleyDB/t/db-3.3.t b/libdb/perl/BerkeleyDB/t/db-3.3.t
new file mode 100644
index 0000000..e186863
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/db-3.3.t
@@ -0,0 +1,174 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3.3) {
+ print "1..0 # Skipping test, this needs Berkeley DB 3.3.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+umask(0);
+
+print "1..37\n";
+
+{
+ # db->truncate
+
+ my $Dfile;
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my ($k, $v) ;
+ ok 1, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => 2,
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 2, $ret == 0 ;
+
+ # check there are three records
+ ok 3, countRecords($db) == 3 ;
+
+ # now truncate the database
+ my $count = 0;
+ ok 4, $db->truncate($count) == 0 ;
+
+ ok 5, $count == 3 ;
+ ok 6, countRecords($db) == 0 ;
+
+}
+
+{
+ # db->associate -- secondary keys
+
+ sub sec_key
+ {
+ #print "in sec_key\n";
+ my $pkey = shift ;
+ my $pdata = shift ;
+
+ $_[0] = $pdata ;
+ return 0;
+ }
+
+ my ($Dfile1, $Dfile2);
+ my $lex = new LexFile $Dfile1, $Dfile2 ;
+ my %hash ;
+ my $status;
+ my ($k, $v, $pk) = ('','','');
+
+ # create primary database
+ ok 7, my $primary = new BerkeleyDB::Hash -Filename => $Dfile1,
+ -Flags => DB_CREATE ;
+
+ # create secondary database
+ ok 8, my $secondary = new BerkeleyDB::Hash -Filename => $Dfile2,
+ -Flags => DB_CREATE ;
+
+ # associate primary with secondary
+ ok 9, $primary->associate($secondary, \&sec_key) == 0;
+
+ # add data to the primary
+ my %data = (
+ "red" => "flag",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (($k, $v) = each %data) {
+ my $r = $primary->db_put($k, $v) ;
+ #print "put $r $BerkeleyDB::Error\n";
+ $ret += $r;
+ }
+ ok 10, $ret == 0 ;
+
+ # check the records in the secondary
+ ok 11, countRecords($secondary) == 3 ;
+
+ ok 12, $secondary->db_get("house", $v) == 0;
+ ok 13, $v eq "house";
+
+ ok 14, $secondary->db_get("sea", $v) == 0;
+ ok 15, $v eq "sea";
+
+ ok 16, $secondary->db_get("flag", $v) == 0;
+ ok 17, $v eq "flag";
+
+ # pget to primary database is illegal
+ ok 18, $primary->db_pget('red', $pk, $v) != 0 ;
+
+ # pget to secondary database is ok
+ ok 19, $secondary->db_pget('house', $pk, $v) == 0 ;
+ ok 20, $pk eq 'green';
+ ok 21, $v eq 'house';
+
+ ok 22, my $p_cursor = $primary->db_cursor();
+ ok 23, my $s_cursor = $secondary->db_cursor();
+
+ # c_get from primary
+ $k = 1;
+ ok 24, $p_cursor->c_get($k, $v, DB_FIRST) == 0;
+
+ # c_get from secondary
+ ok 25, $s_cursor->c_get($k, $v, DB_FIRST) == 0;
+
+ # c_pget from primary database should fail
+ $k = 1;
+ ok 26, $p_cursor->c_pget($k, $pk, $v, DB_FIRST) != 0;
+
+ # c_pget from secondary database
+ ok 27, $s_cursor->c_pget($k, $pk, $v, DB_FIRST) == 0;
+
+ # check put to secondary is illegal
+ ok 28, $secondary->db_put("tom", "dick") != 0;
+ ok 29, countRecords($secondary) == 3 ;
+
+ # delete from primary
+ ok 30, $primary->db_del("green") == 0 ;
+ ok 31, countRecords($primary) == 2 ;
+
+ # check has been deleted in secondary
+ ok 32, $secondary->db_get("house", $v) != 0;
+ ok 33, countRecords($secondary) == 2 ;
+
+ # delete from secondary
+ ok 34, $secondary->db_del('flag') == 0 ;
+ ok 35, countRecords($secondary) == 1 ;
+
+
+ # check deleted from primary
+ ok 36, $primary->db_get("red", $v) != 0;
+ ok 37, countRecords($primary) == 1 ;
+
+}
+
+
+ # db->associate -- multiple secondary keys
+
+
+ # db->associate -- same again but when DB_DUP is specified.
+
diff --git a/libdb/perl/BerkeleyDB/t/destroy.t b/libdb/perl/BerkeleyDB/t/destroy.t
new file mode 100644
index 0000000..7457d36
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/destroy.t
@@ -0,0 +1,105 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..15\n";
+
+my $Dfile = "dbhash.tmp";
+my $home = "./fred" ;
+
+umask(0);
+
+{
+ # let object destruction kill everything
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ ok 1, my $lexD = new LexDir($home) ;
+ ok 2, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 3, my $txn = $env->txn_begin() ;
+ ok 4, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+ ok 5, $txn->txn_commit() == 0 ;
+ ok 6, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 7, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 8, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 9, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 10, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 11, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 12, $count == 0 ;
+
+ #undef $txn ;
+ #undef $cursor ;
+ #undef $db1 ;
+ #undef $env ;
+ #untie %hash ;
+
+}
+
+{
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $cursor ;
+ my ($k, $v) = ("", "") ;
+ ok 13, my $db1 = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+ my $count = 0 ;
+ # sequence forwards
+ ok 14, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 15, $count == 0 ;
+}
+
+
diff --git a/libdb/perl/BerkeleyDB/t/env.t b/libdb/perl/BerkeleyDB/t/env.t
new file mode 100644
index 0000000..3905abf
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/env.t
@@ -0,0 +1,217 @@
+#!./perl -w
+
+# ID: 1.2, 7/17/97
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..47\n";
+
+my $Dfile = "dbhash.tmp";
+
+umask(0);
+
+{
+ # db version stuff
+ my ($major, $minor, $patch) = (0, 0, 0) ;
+
+ ok 1, my $VER = BerkeleyDB::DB_VERSION_STRING ;
+ ok 2, my $ver = BerkeleyDB::db_version($major, $minor, $patch) ;
+ ok 3, $VER eq $ver ;
+ ok 4, $major > 1 ;
+ ok 5, defined $minor ;
+ ok 6, defined $patch ;
+}
+
+{
+ # Check for invalid parameters
+ my $env ;
+ eval ' $env = new BerkeleyDB::Env( -Stupid => 3) ; ' ;
+ ok 7, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $env = new BerkeleyDB::Env( -Bad => 2, -Home => "/tmp", -Stupid => 3) ; ' ;
+ ok 8, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/ ;
+
+ eval ' $env = new BerkeleyDB::Env (-Config => {"fred" => " "} ) ; ' ;
+ ok 9, !$env ;
+ ok 10, $BerkeleyDB::Error =~ /^illegal name-value pair/ ;
+}
+
+{
+ # create a very simple environment
+ my $home = "./fred" ;
+ ok 11, my $lexD = new LexDir($home) ;
+ chdir "./fred" ;
+ ok 12, my $env = new BerkeleyDB::Env -Flags => DB_CREATE ;
+ chdir ".." ;
+ undef $env ;
+}
+
+{
+ # create an environment with a Home
+ my $home = "./fred" ;
+ ok 13, my $lexD = new LexDir($home) ;
+ ok 14, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE ;
+
+ undef $env ;
+}
+
+{
+ # make new fail.
+ my $home = "./not_there" ;
+ rmtree $home ;
+ ok 15, ! -d $home ;
+ my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_INIT_LOCK ;
+ ok 16, ! $env ;
+ ok 17, $! != 0 || $^E != 0 ;
+
+ rmtree $home ;
+}
+
+{
+ # Config
+ use Cwd ;
+ my $cwd = cwd() ;
+ my $home = "$cwd/fred" ;
+ my $data_dir = "$home/data_dir" ;
+ my $log_dir = "$home/log_dir" ;
+ my $data_file = "data.db" ;
+ ok 18, my $lexD = new LexDir($home) ;
+ ok 19, -d $data_dir ? chmod 0777, $data_dir : mkdir($data_dir, 0777) ;
+ ok 20, -d $log_dir ? chmod 0777, $log_dir : mkdir($log_dir, 0777) ;
+ my $env = new BerkeleyDB::Env -Home => $home,
+ -Config => { DB_DATA_DIR => $data_dir,
+ DB_LOG_DIR => $log_dir
+ },
+ -Flags => DB_CREATE|DB_INIT_TXN|DB_INIT_LOG|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 21, $env ;
+
+ ok 22, my $txn = $env->txn_begin() ;
+
+ my %hash ;
+ ok 23, tie %hash, 'BerkeleyDB::Hash', -Filename => $data_file,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+ $hash{"abc"} = 123 ;
+ $hash{"def"} = 456 ;
+
+ $txn->txn_commit() ;
+
+ untie %hash ;
+
+ undef $txn ;
+ undef $env ;
+}
+
+{
+ # -ErrFile with a filename
+ my $errfile = "./errfile" ;
+ my $home = "./fred" ;
+ ok 24, my $lexD = new LexDir($home) ;
+ my $lex = new LexFile $errfile ;
+ ok 25, my $env = new BerkeleyDB::Env( -ErrFile => $errfile,
+ -Flags => DB_CREATE,
+ -Home => $home) ;
+ my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Env => $env,
+ -Flags => -1;
+ ok 26, !$db ;
+
+ ok 27, $BerkeleyDB::Error =~ /^illegal flag specified to (db_open|DB->open)/;
+ ok 28, -e $errfile ;
+ my $contents = docat($errfile) ;
+ chomp $contents ;
+ ok 29, $BerkeleyDB::Error eq $contents ;
+
+ undef $env ;
+}
+
+{
+ # -ErrFile with a filehandle/reference -- should fail
+ my $home = "./fred" ;
+ ok 30, my $lexD = new LexDir($home) ;
+ eval { my $env = new BerkeleyDB::Env( -ErrFile => [],
+ -Flags => DB_CREATE,
+ -Home => $home) ; };
+ ok 31, $@ =~ /ErrFile parameter must be a file name/;
+}
+
+{
+ # -ErrPrefix
+ use IO ;
+ my $home = "./fred" ;
+ ok 32, my $lexD = new LexDir($home) ;
+ my $errfile = "./errfile" ;
+ my $lex = new LexFile $errfile ;
+ ok 33, my $env = new BerkeleyDB::Env( -ErrFile => $errfile,
+ -ErrPrefix => "PREFIX",
+ -Flags => DB_CREATE,
+ -Home => $home) ;
+ my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Env => $env,
+ -Flags => -1;
+ ok 34, !$db ;
+
+ ok 35, $BerkeleyDB::Error =~ /^PREFIX: illegal flag specified to (db_open|DB->open)/;
+ ok 36, -e $errfile ;
+ my $contents = docat($errfile) ;
+ chomp $contents ;
+ ok 37, $BerkeleyDB::Error eq $contents ;
+
+ # change the prefix on the fly
+ my $old = $env->errPrefix("NEW ONE") ;
+ ok 38, $old eq "PREFIX" ;
+
+ $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Env => $env,
+ -Flags => -1;
+ ok 39, !$db ;
+ ok 40, $BerkeleyDB::Error =~ /^NEW ONE: illegal flag specified to (db_open|DB->open)/;
+ $contents = docat($errfile) ;
+ chomp $contents ;
+ ok 41, $contents =~ /$BerkeleyDB::Error$/ ;
+ undef $env ;
+}
+
+{
+ # test db_appexit
+ use Cwd ;
+ my $cwd = cwd() ;
+ my $home = "$cwd/fred" ;
+ my $data_dir = "$home/data_dir" ;
+ my $log_dir = "$home/log_dir" ;
+ my $data_file = "data.db" ;
+ ok 42, my $lexD = new LexDir($home);
+ ok 43, -d $data_dir ? chmod 0777, $data_dir : mkdir($data_dir, 0777) ;
+ ok 44, -d $log_dir ? chmod 0777, $log_dir : mkdir($log_dir, 0777) ;
+ my $env = new BerkeleyDB::Env -Home => $home,
+ -Config => { DB_DATA_DIR => $data_dir,
+ DB_LOG_DIR => $log_dir
+ },
+ -Flags => DB_CREATE|DB_INIT_TXN|DB_INIT_LOG|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 45, $env ;
+
+ ok 46, my $txn_mgr = $env->TxnMgr() ;
+
+ ok 47, $env->db_appexit() == 0 ;
+
+}
+
+# test -Verbose
+# test -Flags
+# db_value_set
diff --git a/libdb/perl/BerkeleyDB/t/examples.t b/libdb/perl/BerkeleyDB/t/examples.t
new file mode 100644
index 0000000..69b7f8f
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/examples.t
@@ -0,0 +1,401 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util;
+
+print "1..7\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+my $redirect = "xyzt" ;
+
+
+{
+my $x = $BerkeleyDB::Error;
+my $redirect = "xyzt" ;
+ {
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+ use vars qw( %h $k $v ) ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $h{"apple"} = "red" ;
+ $h{"orange"} = "orange" ;
+ $h{"banana"} = "yellow" ;
+ $h{"tomato"} = "red" ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $h{"banana"} ;
+
+ # Delete a key/value pair.
+ delete $h{"apple"} ;
+
+ # print the contents of the file
+ while (($k, $v) = each %h)
+ { print "$k -> $v\n" }
+
+ untie %h ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(1, docat_del($redirect) eq <<'EOM') ;
+Banana Exists
+
+orange -> orange
+tomato -> red
+banana -> yellow
+EOM
+
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("apple", "red") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("banana", "yellow") ;
+ $db->db_put("tomato", "red") ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $db->db_get("banana", $v) == 0;
+
+ # Delete a key/value pair.
+ $db->db_del("apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(2, docat_del($redirect) eq <<'EOM') ;
+Banana Exists
+
+orange -> orange
+tomato -> red
+banana -> yellow
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(3, docat_del($redirect) eq <<'EOM') ;
+Smith
+Wall
+mouse
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Compare => sub { lc $_[0] cmp lc $_[1] }
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(4, docat_del($redirect) eq <<'EOM') ;
+mouse
+Smith
+Wall
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+ my $db = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Install DBM Filters
+ $db->filter_fetch_key ( sub { s/\0$// } ) ;
+ $db->filter_store_key ( sub { $_ .= "\0" } ) ;
+ $db->filter_fetch_value( sub { s/\0$// } ) ;
+ $db->filter_store_value( sub { $_ .= "\0" } ) ;
+
+ $hash{"abc"} = "def" ;
+ my $a = $hash{"ABC"} ;
+ # ...
+ undef $db ;
+ untie %hash ;
+ $db = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+ while (($k, $v) = each %hash)
+ { print "$k -> $v\n" }
+ undef $db ;
+ untie %hash ;
+
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(5, docat_del($redirect) eq <<"EOM") ;
+abc\x00 -> def\x00
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+
+ my $db = tie %hash, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ $db->filter_fetch_key ( sub { $_ = unpack("i", $_) } ) ;
+ $db->filter_store_key ( sub { $_ = pack ("i", $_) } ) ;
+ $hash{123} = "def" ;
+ # ...
+ undef $db ;
+ untie %hash ;
+ $db = tie %hash, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot Open $filename: $!\n" ;
+ while (($k, $v) = each %hash)
+ { print "$k -> $v\n" }
+ undef $db ;
+ untie %hash ;
+
+ unlink $filename ;
+ }
+
+ my $val = pack("i", 123) ;
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(6, docat_del($redirect) eq <<"EOM") ;
+$val -> def
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ if ($FA) {
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ tie @h, 'BerkeleyDB::Recno',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_RENUMBER
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ push @h, "green", "black" ;
+
+ my $elements = scalar @h ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = pop @h ;
+ print "popped $last\n" ;
+
+ unshift @h, "white" ;
+ my $first = shift @h ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ untie @h ;
+ unlink $filename ;
+ } else {
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ my $db = tie @h, 'BerkeleyDB::Recno',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_RENUMBER
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ $db->push("green", "black") ;
+
+ my $elements = $db->length() ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = $db->pop ;
+ print "popped $last\n" ;
+
+ $db->unshift("white") ;
+ my $first = $db->shift ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ undef $db ;
+ untie @h ;
+ unlink $filename ;
+ }
+
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(7, docat_del($redirect) eq <<"EOM") ;
+The array contains 5 entries
+popped black
+shifted white
+Element 1 Exists with value blue
+EOM
+
+}
+
diff --git a/libdb/perl/BerkeleyDB/t/examples.t.T b/libdb/perl/BerkeleyDB/t/examples.t.T
new file mode 100644
index 0000000..fe9bdf7
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/examples.t.T
@@ -0,0 +1,415 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util;
+
+print "1..7\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+my $redirect = "xyzt" ;
+
+
+{
+my $x = $BerkeleyDB::Error;
+my $redirect = "xyzt" ;
+ {
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN simpleHash
+ use strict ;
+ use BerkeleyDB ;
+ use vars qw( %h $k $v ) ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $h{"apple"} = "red" ;
+ $h{"orange"} = "orange" ;
+ $h{"banana"} = "yellow" ;
+ $h{"tomato"} = "red" ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $h{"banana"} ;
+
+ # Delete a key/value pair.
+ delete $h{"apple"} ;
+
+ # print the contents of the file
+ while (($k, $v) = each %h)
+ { print "$k -> $v\n" }
+
+ untie %h ;
+## END simpleHash
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(1, docat_del($redirect) eq <<'EOM') ;
+Banana Exists
+
+orange -> orange
+tomato -> red
+banana -> yellow
+EOM
+
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN simpleHash2
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("apple", "red") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("banana", "yellow") ;
+ $db->db_put("tomato", "red") ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $db->db_get("banana", $v) == 0;
+
+ # Delete a key/value pair.
+ $db->db_del("apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+## END simpleHash2
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(2, docat_del($redirect) eq <<'EOM') ;
+Banana Exists
+
+orange -> orange
+tomato -> red
+banana -> yellow
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN btreeSimple
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+## END btreeSimple
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(3, docat_del($redirect) eq <<'EOM') ;
+Smith
+Wall
+mouse
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN btreeSortOrder
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Compare => sub { lc $_[0] cmp lc $_[1] }
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+## END btreeSortOrder
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(4, docat_del($redirect) eq <<'EOM') ;
+mouse
+Smith
+Wall
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN nullFilter
+ use strict ;
+ use BerkeleyDB ;
+
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+ my $db = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Install DBM Filters
+ $db->filter_fetch_key ( sub { s/\0$// } ) ;
+ $db->filter_store_key ( sub { $_ .= "\0" } ) ;
+ $db->filter_fetch_value( sub { s/\0$// } ) ;
+ $db->filter_store_value( sub { $_ .= "\0" } ) ;
+
+ $hash{"abc"} = "def" ;
+ my $a = $hash{"ABC"} ;
+ # ...
+ undef $db ;
+ untie %hash ;
+## END nullFilter
+ $db = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+ while (($k, $v) = each %hash)
+ { print "$k -> $v\n" }
+ undef $db ;
+ untie %hash ;
+
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(5, docat_del($redirect) eq <<"EOM") ;
+abc\x00 -> def\x00
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN intFilter
+ use strict ;
+ use BerkeleyDB ;
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+
+ my $db = tie %hash, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ $db->filter_fetch_key ( sub { $_ = unpack("i", $_) } ) ;
+ $db->filter_store_key ( sub { $_ = pack ("i", $_) } ) ;
+ $hash{123} = "def" ;
+ # ...
+ undef $db ;
+ untie %hash ;
+## END intFilter
+ $db = tie %hash, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot Open $filename: $!\n" ;
+ while (($k, $v) = each %hash)
+ { print "$k -> $v\n" }
+ undef $db ;
+ untie %hash ;
+
+ unlink $filename ;
+ }
+
+ my $val = pack("i", 123) ;
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(6, docat_del($redirect) eq <<"EOM") ;
+$val -> def
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ if ($FA) {
+## BEGIN simpleRecno
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ tie @h, 'BerkeleyDB::Recno',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_RENUMBER
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ push @h, "green", "black" ;
+
+ my $elements = scalar @h ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = pop @h ;
+ print "popped $last\n" ;
+
+ unshift @h, "white" ;
+ my $first = shift @h ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ untie @h ;
+## END simpleRecno
+ unlink $filename ;
+ } else {
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ my $db = tie @h, 'BerkeleyDB::Recno',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_RENUMBER
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ $db->push("green", "black") ;
+
+ my $elements = $db->length() ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = $db->pop ;
+ print "popped $last\n" ;
+
+ $db->unshift("white") ;
+ my $first = $db->shift ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ undef $db ;
+ untie @h ;
+ unlink $filename ;
+ }
+
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(7, docat_del($redirect) eq <<"EOM") ;
+The array contains 5 entries
+popped black
+shifted white
+Element 1 Exists with value blue
+EOM
+
+}
+
diff --git a/libdb/perl/BerkeleyDB/t/examples3.t b/libdb/perl/BerkeleyDB/t/examples3.t
new file mode 100644
index 0000000..22e94b7
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/examples3.t
@@ -0,0 +1,132 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util;
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3) {
+ print "1..0 # Skipping test, this needs Berkeley DB 3.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+
+print "1..2\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+my $redirect = "xyzt" ;
+
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(1, docat_del($redirect) eq <<'EOM') ;
+orange -> orange
+yellow -> banana
+red -> apple
+red -> tomato
+green -> banana
+green -> apple
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP | DB_DUPSORT
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(2, docat_del($redirect) eq <<'EOM') ;
+orange -> orange
+yellow -> banana
+red -> apple
+red -> tomato
+green -> apple
+green -> banana
+EOM
+
+}
+
+
diff --git a/libdb/perl/BerkeleyDB/t/examples3.t.T b/libdb/perl/BerkeleyDB/t/examples3.t.T
new file mode 100644
index 0000000..5eeaa14
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/examples3.t.T
@@ -0,0 +1,136 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util;
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3) {
+ print "1..0 # Skipping test, this needs Berkeley DB 3.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+
+print "1..2\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+my $redirect = "xyzt" ;
+
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN dupHash
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+## END dupHash
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(1, docat_del($redirect) eq <<'EOM') ;
+orange -> orange
+yellow -> banana
+red -> apple
+red -> tomato
+green -> banana
+green -> apple
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN dupSortHash
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP | DB_DUPSORT
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+## END dupSortHash
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(2, docat_del($redirect) eq <<'EOM') ;
+orange -> orange
+yellow -> banana
+red -> apple
+red -> tomato
+green -> apple
+green -> banana
+EOM
+
+}
+
+
diff --git a/libdb/perl/BerkeleyDB/t/filter.t b/libdb/perl/BerkeleyDB/t/filter.t
new file mode 100644
index 0000000..47a7c10
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/filter.t
@@ -0,0 +1,217 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..46\n";
+
+my $Dfile = "dbhash.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+{
+ # DBM Filter tests
+ use strict ;
+ my (%h, $db) ;
+ my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ unlink $Dfile;
+
+ sub checkOutput
+ {
+ my($fk, $sk, $fv, $sv) = @_ ;
+ return
+ $fetch_key eq $fk && $store_key eq $sk &&
+ $fetch_value eq $fv && $store_value eq $sv &&
+ $_ eq 'original' ;
+ }
+
+ ok 1, $db = tie %h, 'BerkeleyDB::Hash',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE;
+
+ $db->filter_fetch_key (sub { $fetch_key = $_ }) ;
+ $db->filter_store_key (sub { $store_key = $_ }) ;
+ $db->filter_fetch_value (sub { $fetch_value = $_}) ;
+ $db->filter_store_value (sub { $store_value = $_ }) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ # fk sk fv sv
+ ok 2, checkOutput( "", "fred", "", "joe") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 3, $h{"fred"} eq "joe";
+ # fk sk fv sv
+ ok 4, checkOutput( "", "fred", "joe", "") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 5, $db->FIRSTKEY() eq "fred" ;
+ # fk sk fv sv
+ ok 6, checkOutput( "fred", "", "", "") ;
+
+ # replace the filters, but remember the previous set
+ my ($old_fk) = $db->filter_fetch_key
+ (sub { $_ = uc $_ ; $fetch_key = $_ }) ;
+ my ($old_sk) = $db->filter_store_key
+ (sub { $_ = lc $_ ; $store_key = $_ }) ;
+ my ($old_fv) = $db->filter_fetch_value
+ (sub { $_ = "[$_]"; $fetch_value = $_ }) ;
+ my ($old_sv) = $db->filter_store_value
+ (sub { s/o/x/g; $store_value = $_ }) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"Fred"} = "Joe" ;
+ # fk sk fv sv
+ ok 7, checkOutput( "", "fred", "", "Jxe") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 8, $h{"Fred"} eq "[Jxe]";
+ # fk sk fv sv
+ ok 9, checkOutput( "", "fred", "[Jxe]", "") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 10, $db->FIRSTKEY() eq "FRED" ;
+ # fk sk fv sv
+ ok 11, checkOutput( "FRED", "", "", "") ;
+
+ # put the original filters back
+ $db->filter_fetch_key ($old_fk);
+ $db->filter_store_key ($old_sk);
+ $db->filter_fetch_value ($old_fv);
+ $db->filter_store_value ($old_sv);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok 12, checkOutput( "", "fred", "", "joe") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 13, $h{"fred"} eq "joe";
+ ok 14, checkOutput( "", "fred", "joe", "") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 15, $db->FIRSTKEY() eq "fred" ;
+ ok 16, checkOutput( "fred", "", "", "") ;
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok 17, checkOutput( "", "", "", "") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 18, $h{"fred"} eq "joe";
+ ok 19, checkOutput( "", "", "", "") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 20, $db->FIRSTKEY() eq "fred" ;
+ ok 21, checkOutput( "", "", "", "") ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter with a closure
+
+ use strict ;
+ my (%h, $db) ;
+
+ unlink $Dfile;
+ ok 22, $db = tie %h, 'BerkeleyDB::Hash',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE;
+
+ my %result = () ;
+
+ sub Closure
+ {
+ my ($name) = @_ ;
+ my $count = 0 ;
+ my @kept = () ;
+
+ return sub { ++$count ;
+ push @kept, $_ ;
+ $result{$name} = "$name - $count: [@kept]" ;
+ }
+ }
+
+ $db->filter_store_key(Closure("store key")) ;
+ $db->filter_store_value(Closure("store value")) ;
+ $db->filter_fetch_key(Closure("fetch key")) ;
+ $db->filter_fetch_value(Closure("fetch value")) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ ok 23, $result{"store key"} eq "store key - 1: [fred]" ;
+ ok 24, $result{"store value"} eq "store value - 1: [joe]" ;
+ ok 25, ! defined $result{"fetch key"} ;
+ ok 26, ! defined $result{"fetch value"} ;
+ ok 27, $_ eq "original" ;
+
+ ok 28, $db->FIRSTKEY() eq "fred" ;
+ ok 29, $result{"store key"} eq "store key - 1: [fred]" ;
+ ok 30, $result{"store value"} eq "store value - 1: [joe]" ;
+ ok 31, $result{"fetch key"} eq "fetch key - 1: [fred]" ;
+ ok 32, ! defined $result{"fetch value"} ;
+ ok 33, $_ eq "original" ;
+
+ $h{"jim"} = "john" ;
+ ok 34, $result{"store key"} eq "store key - 2: [fred jim]" ;
+ ok 35, $result{"store value"} eq "store value - 2: [joe john]" ;
+ ok 36, $result{"fetch key"} eq "fetch key - 1: [fred]" ;
+ ok 37, ! defined $result{"fetch value"} ;
+ ok 38, $_ eq "original" ;
+
+ ok 39, $h{"fred"} eq "joe" ;
+ ok 40, $result{"store key"} eq "store key - 3: [fred jim fred]" ;
+ ok 41, $result{"store value"} eq "store value - 2: [joe john]" ;
+ ok 42, $result{"fetch key"} eq "fetch key - 1: [fred]" ;
+ ok 43, $result{"fetch value"} eq "fetch value - 1: [joe]" ;
+ ok 44, $_ eq "original" ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter recursion detection
+ use strict ;
+ my (%h, $db) ;
+ unlink $Dfile;
+
+ ok 45, $db = tie %h, 'BerkeleyDB::Hash',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE;
+
+ $db->filter_store_key (sub { $_ = $h{$_} }) ;
+
+ eval '$h{1} = 1234' ;
+ ok 46, $@ =~ /^BerkeleyDB Aborting: recursion detected in filter_store_key at/ ;
+ #print "[$@]\n" ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
diff --git a/libdb/perl/BerkeleyDB/t/hash.t b/libdb/perl/BerkeleyDB/t/hash.t
new file mode 100644
index 0000000..0e68385
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/hash.t
@@ -0,0 +1,728 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..212\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' $db = new BerkeleyDB::Hash -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $db = new BerkeleyDB::Hash -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/ ;
+
+ eval ' $db = new BerkeleyDB::Hash -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' $db = new BerkeleyDB::Hash -Txn => "fred" ' ;
+ ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' $db = new BerkeleyDB::Hash -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+# Now check the interface to HASH
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ ok 6, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ my $status ;
+ ok 7, $db->db_put("some key", "some value") == 0 ;
+ ok 8, $db->status() == 0 ;
+ ok 9, $db->db_get("some key", $value) == 0 ;
+ ok 10, $value eq "some value" ;
+ ok 11, $db->db_put("key", "value") == 0 ;
+ ok 12, $db->db_get("key", $value) == 0 ;
+ ok 13, $value eq "value" ;
+ ok 14, $db->db_del("some key") == 0 ;
+ ok 15, ($status = $db->db_get("some key", $value)) == DB_NOTFOUND ;
+ ok 16, $status eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 17, $db->status() == DB_NOTFOUND ;
+ ok 18, $db->status() eq $DB_errors{'DB_NOTFOUND'};
+
+ ok 19, $db->db_sync() == 0 ;
+
+ # Check NOOVERWRITE will make put fail when attempting to overwrite
+ # an existing record.
+
+ ok 20, $db->db_put( 'key', 'x', DB_NOOVERWRITE) == DB_KEYEXIST ;
+ ok 21, $db->status() eq $DB_errors{'DB_KEYEXIST'};
+ ok 22, $db->status() == DB_KEYEXIST ;
+
+ # check that the value of the key has not been changed by the
+ # previous test
+ ok 23, $db->db_get("key", $value) == 0 ;
+ ok 24, $value eq "value" ;
+
+ # test DB_GET_BOTH
+ my ($k, $v) = ("key", "value") ;
+ ok 25, $db->db_get($k, $v, DB_GET_BOTH) == 0 ;
+
+ ($k, $v) = ("key", "fred") ;
+ ok 26, $db->db_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+ ($k, $v) = ("another", "value") ;
+ ok 27, $db->db_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+
+}
+
+{
+ # Check simple env works with a hash.
+ my $lex = new LexFile $Dfile ;
+
+ my $home = "./fred" ;
+ ok 28, my $lexD = new LexDir($home);
+
+ ok 29, my $env = new BerkeleyDB::Env -Flags => DB_CREATE| DB_INIT_MPOOL,
+ -Home => $home ;
+ ok 30, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Env => $env,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ ok 31, $db->db_put("some key", "some value") == 0 ;
+ ok 32, $db->db_get("some key", $value) == 0 ;
+ ok 33, $value eq "some value" ;
+ undef $db ;
+ undef $env ;
+}
+
+{
+ # override default hash
+ my $lex = new LexFile $Dfile ;
+ my $value ;
+ $::count = 0 ;
+ ok 34, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Hash => sub { ++$::count ; length $_[0] },
+ -Flags => DB_CREATE ;
+
+ ok 35, $db->db_put("some key", "some value") == 0 ;
+ ok 36, $db->db_get("some key", $value) == 0 ;
+ ok 37, $value eq "some value" ;
+ ok 38, $::count > 0 ;
+
+}
+
+{
+ # cursors
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my ($k, $v) ;
+ ok 39, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => 2,
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 40, $ret == 0 ;
+
+ # create the cursor
+ ok 41, my $cursor = $db->db_cursor() ;
+
+ $k = $v = "" ;
+ my %copy = %data ;
+ my $extras = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 42, $cursor->status() == DB_NOTFOUND ;
+ ok 43, $cursor->status() eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 44, keys %copy == 0 ;
+ ok 45, $extras == 0 ;
+
+ # sequence backwards
+ %copy = %data ;
+ $extras = 0 ;
+ my $status ;
+ for ( $status = $cursor->c_get($k, $v, DB_LAST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_PREV)) {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 46, $status == DB_NOTFOUND ;
+ ok 47, $status eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 48, $cursor->status() == $status ;
+ ok 49, $cursor->status() eq $status ;
+ ok 50, keys %copy == 0 ;
+ ok 51, $extras == 0 ;
+
+ ($k, $v) = ("green", "house") ;
+ ok 52, $cursor->c_get($k, $v, DB_GET_BOTH) == 0 ;
+
+ ($k, $v) = ("green", "door") ;
+ ok 53, $cursor->c_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+ ($k, $v) = ("black", "house") ;
+ ok 54, $cursor->c_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+}
+
+{
+ # Tied Hash interface
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 55, tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # check "each" with an empty database
+ my $count = 0 ;
+ while (my ($k, $v) = each %hash) {
+ ++ $count ;
+ }
+ ok 56, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 57, $count == 0 ;
+
+ # Add a k/v pair
+ my $value ;
+ $hash{"some key"} = "some value";
+ ok 58, (tied %hash)->status() == 0 ;
+ ok 59, $hash{"some key"} eq "some value";
+ ok 60, defined $hash{"some key"} ;
+ ok 61, (tied %hash)->status() == 0 ;
+ ok 62, exists $hash{"some key"} ;
+ ok 63, !defined $hash{"jimmy"} ;
+ ok 64, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 65, !exists $hash{"jimmy"} ;
+ ok 66, (tied %hash)->status() == DB_NOTFOUND ;
+
+ delete $hash{"some key"} ;
+ ok 67, (tied %hash)->status() == 0 ;
+ ok 68, ! defined $hash{"some key"} ;
+ ok 69, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 70, ! exists $hash{"some key"} ;
+ ok 71, (tied %hash)->status() == DB_NOTFOUND ;
+
+ $hash{1} = 2 ;
+ $hash{10} = 20 ;
+ $hash{1000} = 2000 ;
+
+ my ($keys, $values) = (0,0);
+ $count = 0 ;
+ while (my ($k, $v) = each %hash) {
+ $keys += $k ;
+ $values += $v ;
+ ++ $count ;
+ }
+ ok 72, $count == 3 ;
+ ok 73, $keys == 1011 ;
+ ok 74, $values == 2022 ;
+
+ # now clear the hash
+ %hash = () ;
+ ok 75, keys %hash == 0 ;
+
+ untie %hash ;
+}
+
+{
+ # in-memory file
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $fd ;
+ my $value ;
+ ok 76, my $db = tie %hash, 'BerkeleyDB::Hash' ;
+
+ ok 77, $db->db_put("some key", "some value") == 0 ;
+ ok 78, $db->db_get("some key", $value) == 0 ;
+ ok 79, $value eq "some value" ;
+
+ undef $db ;
+ untie %hash ;
+}
+
+{
+ # partial
+ # check works via API
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+ ok 80, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 81, $ret == 0 ;
+
+
+ # do a partial get
+ my($pon, $off, $len) = $db->partial_set(0,2) ;
+ ok 82, $pon == 0 && $off == 0 && $len == 0 ;
+ ok 83, ( $db->db_get("red", $value) == 0) && $value eq "bo" ;
+ ok 84, ( $db->db_get("green", $value) == 0) && $value eq "ho" ;
+ ok 85, ( $db->db_get("blue", $value) == 0) && $value eq "se" ;
+
+ # do a partial get, off end of data
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 86, $pon ;
+ ok 87, $off == 0 ;
+ ok 88, $len == 2 ;
+ ok 89, $db->db_get("red", $value) == 0 && $value eq "t" ;
+ ok 90, $db->db_get("green", $value) == 0 && $value eq "se" ;
+ ok 91, $db->db_get("blue", $value) == 0 && $value eq "" ;
+
+ # switch of partial mode
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 92, $pon ;
+ ok 93, $off == 3 ;
+ ok 94, $len == 2 ;
+ ok 95, $db->db_get("red", $value) == 0 && $value eq "boat" ;
+ ok 96, $db->db_get("green", $value) == 0 && $value eq "house" ;
+ ok 97, $db->db_get("blue", $value) == 0 && $value eq "sea" ;
+
+ # now partial put
+ ($pon, $off, $len) = $db->partial_set(0,2) ;
+ ok 98, ! $pon ;
+ ok 99, $off == 0 ;
+ ok 100, $len == 0 ;
+ ok 101, $db->db_put("red", "") == 0 ;
+ ok 102, $db->db_put("green", "AB") == 0 ;
+ ok 103, $db->db_put("blue", "XYZ") == 0 ;
+ ok 104, $db->db_put("new", "KLM") == 0 ;
+
+ $db->partial_clear() ;
+ ok 105, $db->db_get("red", $value) == 0 && $value eq "at" ;
+ ok 106, $db->db_get("green", $value) == 0 && $value eq "ABuse" ;
+ ok 107, $db->db_get("blue", $value) == 0 && $value eq "XYZa" ;
+ ok 108, $db->db_get("new", $value) == 0 && $value eq "KLM" ;
+
+ # now partial put
+ $db->partial_set(3,2) ;
+ ok 109, $db->db_put("red", "PPP") == 0 ;
+ ok 110, $db->db_put("green", "Q") == 0 ;
+ ok 111, $db->db_put("blue", "XYZ") == 0 ;
+ ok 112, $db->db_put("new", "--") == 0 ;
+
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 113, $pon ;
+ ok 114, $off == 3 ;
+ ok 115, $len == 2 ;
+ ok 116, $db->db_get("red", $value) == 0 && $value eq "at\0PPP" ;
+ ok 117, $db->db_get("green", $value) == 0 && $value eq "ABuQ" ;
+ ok 118, $db->db_get("blue", $value) == 0 && $value eq "XYZXYZ" ;
+ ok 119, $db->db_get("new", $value) == 0 && $value eq "KLM--" ;
+}
+
+{
+ # partial
+ # check works via tied hash
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+ ok 120, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ while (my ($k, $v) = each %data) {
+ $hash{$k} = $v ;
+ }
+
+
+ # do a partial get
+ $db->partial_set(0,2) ;
+ ok 121, $hash{"red"} eq "bo" ;
+ ok 122, $hash{"green"} eq "ho" ;
+ ok 123, $hash{"blue"} eq "se" ;
+
+ # do a partial get, off end of data
+ $db->partial_set(3,2) ;
+ ok 124, $hash{"red"} eq "t" ;
+ ok 125, $hash{"green"} eq "se" ;
+ ok 126, $hash{"blue"} eq "" ;
+
+ # switch of partial mode
+ $db->partial_clear() ;
+ ok 127, $hash{"red"} eq "boat" ;
+ ok 128, $hash{"green"} eq "house" ;
+ ok 129, $hash{"blue"} eq "sea" ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 130, $hash{"red"} = "" ;
+ ok 131, $hash{"green"} = "AB" ;
+ ok 132, $hash{"blue"} = "XYZ" ;
+ ok 133, $hash{"new"} = "KLM" ;
+
+ $db->partial_clear() ;
+ ok 134, $hash{"red"} eq "at" ;
+ ok 135, $hash{"green"} eq "ABuse" ;
+ ok 136, $hash{"blue"} eq "XYZa" ;
+ ok 137, $hash{"new"} eq "KLM" ;
+
+ # now partial put
+ $db->partial_set(3,2) ;
+ ok 138, $hash{"red"} = "PPP" ;
+ ok 139, $hash{"green"} = "Q" ;
+ ok 140, $hash{"blue"} = "XYZ" ;
+ ok 141, $hash{"new"} = "TU" ;
+
+ $db->partial_clear() ;
+ ok 142, $hash{"red"} eq "at\0PPP" ;
+ ok 143, $hash{"green"} eq "ABuQ" ;
+ ok 144, $hash{"blue"} eq "XYZXYZ" ;
+ ok 145, $hash{"new"} eq "KLMTU" ;
+}
+
+{
+ # transaction
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 146, my $lexD = new LexDir($home);
+ ok 147, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 148, my $txn = $env->txn_begin() ;
+ ok 149, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ ok 150, $txn->txn_commit() == 0 ;
+ ok 151, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 152, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 153, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 154, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 155, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 156, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 157, $count == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie %hash ;
+}
+
+
+{
+ # DB_DUP
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 158, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hash{'Wall'} = 'Larry' ;
+ $hash{'Wall'} = 'Stone' ;
+ $hash{'Smith'} = 'John' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'mouse'} = 'mickey' ;
+
+ ok 159, keys %hash == 6 ;
+
+ # create a cursor
+ ok 160, my $cursor = $db->db_cursor() ;
+
+ my $key = "Wall" ;
+ my $value ;
+ ok 161, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 162, $key eq "Wall" && $value eq "Larry" ;
+ ok 163, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 164, $key eq "Wall" && $value eq "Stone" ;
+ ok 165, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 166, $key eq "Wall" && $value eq "Brick" ;
+ ok 167, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 168, $key eq "Wall" && $value eq "Brick" ;
+
+ #my $ref = $db->db_stat() ;
+ #ok 143, $ref->{bt_flags} | DB_DUP ;
+
+ # test DB_DUP_NEXT
+ my ($k, $v) = ("Wall", "") ;
+ ok 169, $cursor->c_get($k, $v, DB_SET) == 0 ;
+ ok 170, $k eq "Wall" && $v eq "Larry" ;
+ ok 171, $cursor->c_get($k, $v, DB_NEXT_DUP) == 0 ;
+ ok 172, $k eq "Wall" && $v eq "Stone" ;
+ ok 173, $cursor->c_get($k, $v, DB_NEXT_DUP) == 0 ;
+ ok 174, $k eq "Wall" && $v eq "Brick" ;
+ ok 175, $cursor->c_get($k, $v, DB_NEXT_DUP) == 0 ;
+ ok 176, $k eq "Wall" && $v eq "Brick" ;
+ ok 177, $cursor->c_get($k, $v, DB_NEXT_DUP) == DB_NOTFOUND ;
+
+
+ undef $db ;
+ undef $cursor ;
+ untie %hash ;
+
+}
+
+{
+ # DB_DUP & DupCompare
+ my $lex = new LexFile $Dfile, $Dfile2;
+ my ($key, $value) ;
+ my (%h, %g) ;
+ my @Keys = qw( 0123 9 12 -1234 9 987654321 9 def ) ;
+ my @Values = qw( 1 11 3 dd x abc 2 0 ) ;
+
+ ok 178, tie %h, "BerkeleyDB::Hash", -Filename => $Dfile,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP|DB_DUPSORT,
+ -Flags => DB_CREATE ;
+
+ ok 179, tie %g, 'BerkeleyDB::Hash', -Filename => $Dfile2,
+ -DupCompare => sub { $_[0] <=> $_[1] },
+ -Property => DB_DUP|DB_DUPSORT,
+ -Flags => DB_CREATE ;
+
+ foreach (@Keys) {
+ local $^W = 0 ;
+ my $value = shift @Values ;
+ $h{$_} = $value ;
+ $g{$_} = $value ;
+ }
+
+ ok 180, my $cursor = (tied %h)->db_cursor() ;
+ $key = 9 ; $value = "";
+ ok 181, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 182, $key == 9 && $value eq 11 ;
+ ok 183, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 184, $key == 9 && $value == 2 ;
+ ok 185, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 186, $key == 9 && $value eq "x" ;
+
+ $cursor = (tied %g)->db_cursor() ;
+ $key = 9 ;
+ ok 187, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 188, $key == 9 && $value eq "x" ;
+ ok 189, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 190, $key == 9 && $value == 2 ;
+ ok 191, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 192, $key == 9 && $value == 11 ;
+
+
+}
+
+{
+ # get_dup etc
+ my $lex = new LexFile $Dfile;
+ my %hh ;
+
+ ok 193, my $YY = tie %hh, "BerkeleyDB::Hash", -Filename => $Dfile,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hh{'Wall'} = 'Larry' ;
+ $hh{'Wall'} = 'Stone' ; # Note the duplicate key
+ $hh{'Wall'} = 'Brick' ; # Note the duplicate key
+ $hh{'Smith'} = 'John' ;
+ $hh{'mouse'} = 'mickey' ;
+
+ # first work in scalar context
+ ok 194, scalar $YY->get_dup('Unknown') == 0 ;
+ ok 195, scalar $YY->get_dup('Smith') == 1 ;
+ ok 196, scalar $YY->get_dup('Wall') == 3 ;
+
+ # now in list context
+ my @unknown = $YY->get_dup('Unknown') ;
+ ok 197, "@unknown" eq "" ;
+
+ my @smith = $YY->get_dup('Smith') ;
+ ok 198, "@smith" eq "John" ;
+
+ {
+ my @wall = $YY->get_dup('Wall') ;
+ my %wall ;
+ @wall{@wall} = @wall ;
+ ok 199, (@wall == 3 && $wall{'Larry'}
+ && $wall{'Stone'} && $wall{'Brick'});
+ }
+
+ # hash
+ my %unknown = $YY->get_dup('Unknown', 1) ;
+ ok 200, keys %unknown == 0 ;
+
+ my %smith = $YY->get_dup('Smith', 1) ;
+ ok 201, keys %smith == 1 && $smith{'John'} ;
+
+ my %wall = $YY->get_dup('Wall', 1) ;
+ ok 202, keys %wall == 3 && $wall{'Larry'} == 1 && $wall{'Stone'} == 1
+ && $wall{'Brick'} == 1 ;
+
+ undef $YY ;
+ untie %hh ;
+
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use BerkeleyDB;
+ @ISA=qw(BerkeleyDB::Hash);
+ @EXPORT = @BerkeleyDB::EXPORT ;
+
+ sub db_put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::db_put($key, $value * 3) ;
+ }
+
+ sub db_get {
+ my $self = shift ;
+ $self->SUPER::db_get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok 203, $@ eq "" ;
+ my %h ;
+ my $X ;
+ eval '
+ $X = tie(%h, "SubDB", -Filename => "dbhash.tmp",
+ -Flags => DB_CREATE,
+ -Mode => 0640 );
+ ' ;
+
+ main::ok 204, $@ eq "" ;
+
+ my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
+ main::ok 205, $@ eq "" ;
+ main::ok 206, $ret == 7 ;
+
+ my $value = 0;
+ $ret = eval '$X->db_put("joe", 4) ; $X->db_get("joe", $value) ; return $value' ;
+ main::ok 207, $@ eq "" ;
+ main::ok 208, $ret == 10 ;
+
+ $ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
+ main::ok 209, $@ eq "" ;
+ main::ok 210, $ret == 1 ;
+
+ $ret = eval '$X->A_new_method("joe") ' ;
+ main::ok 211, $@ eq "" ;
+ main::ok 212, $ret eq "[[10]]" ;
+
+ unlink "SubDB.pm", "dbhash.tmp" ;
+
+}
diff --git a/libdb/perl/BerkeleyDB/t/join.t b/libdb/perl/BerkeleyDB/t/join.t
new file mode 100644
index 0000000..ed9b6a2
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/join.t
@@ -0,0 +1,225 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+if ($BerkeleyDB::db_ver < 2.005002)
+{
+ print "1..0 # Skip: join needs Berkeley DB 2.5.2 or later\n" ;
+ exit 0 ;
+}
+
+
+print "1..37\n";
+
+my $Dfile1 = "dbhash1.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile1, $Dfile2, $Dfile3 ;
+
+umask(0) ;
+
+{
+ # error cases
+ my $lex = new LexFile $Dfile1, $Dfile2, $Dfile3 ;
+ my %hash1 ;
+ my $value ;
+ my $status ;
+ my $cursor ;
+
+ ok 1, my $db1 = tie %hash1, 'BerkeleyDB::Hash',
+ -Filename => $Dfile1,
+ -Flags => DB_CREATE,
+ -DupCompare => sub { $_[0] lt $_[1] },
+ -Property => DB_DUP|DB_DUPSORT ;
+
+ # no cursors supplied
+ eval '$cursor = $db1->db_join() ;' ;
+ ok 2, $@ =~ /Usage: \$db->BerkeleyDB::Common::db_join\Q([cursors], flags=0)/;
+
+ # empty list
+ eval '$cursor = $db1->db_join([]) ;' ;
+ ok 3, $@ =~ /db_join: No cursors in parameter list/;
+
+ # cursor list, isn't a []
+ eval '$cursor = $db1->db_join({}) ;' ;
+ ok 4, $@ =~ /cursors is not an array reference at/ ;
+
+ eval '$cursor = $db1->db_join(\1) ;' ;
+ ok 5, $@ =~ /cursors is not an array reference at/ ;
+
+}
+
+{
+ # test a 2-way & 3-way join
+
+ my $lex = new LexFile $Dfile1, $Dfile2, $Dfile3 ;
+ my %hash1 ;
+ my %hash2 ;
+ my %hash3 ;
+ my $value ;
+ my $status ;
+
+ my $home = "./fred" ;
+ ok 6, my $lexD = new LexDir($home);
+ ok 7, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN
+ |DB_INIT_MPOOL;
+ #|DB_INIT_MPOOL| DB_INIT_LOCK;
+ ok 8, my $txn = $env->txn_begin() ;
+ ok 9, my $db1 = tie %hash1, 'BerkeleyDB::Hash',
+ -Filename => $Dfile1,
+ -Flags => DB_CREATE,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP|DB_DUPSORT,
+ -Env => $env,
+ -Txn => $txn ;
+ ;
+
+ ok 10, my $db2 = tie %hash2, 'BerkeleyDB::Hash',
+ -Filename => $Dfile2,
+ -Flags => DB_CREATE,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP|DB_DUPSORT,
+ -Env => $env,
+ -Txn => $txn ;
+
+ ok 11, my $db3 = tie %hash3, 'BerkeleyDB::Btree',
+ -Filename => $Dfile3,
+ -Flags => DB_CREATE,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP|DB_DUPSORT,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ ok 12, addData($db1, qw( apple Convenience
+ peach Shopway
+ pear Farmer
+ raspberry Shopway
+ strawberry Shopway
+ gooseberry Farmer
+ blueberry Farmer
+ ));
+
+ ok 13, addData($db2, qw( red apple
+ red raspberry
+ red strawberry
+ yellow peach
+ yellow pear
+ green gooseberry
+ blue blueberry)) ;
+
+ ok 14, addData($db3, qw( expensive apple
+ reasonable raspberry
+ expensive strawberry
+ reasonable peach
+ reasonable pear
+ expensive gooseberry
+ reasonable blueberry)) ;
+
+ ok 15, my $cursor2 = $db2->db_cursor() ;
+ my $k = "red" ;
+ my $v = "" ;
+ ok 16, $cursor2->c_get($k, $v, DB_SET) == 0 ;
+
+ # Two way Join
+ ok 17, my $cursor1 = $db1->db_join([$cursor2]) ;
+
+ my %expected = qw( apple Convenience
+ raspberry Shopway
+ strawberry Shopway
+ ) ;
+
+ # sequence forwards
+ while ($cursor1->c_get($k, $v) == 0) {
+ delete $expected{$k}
+ if defined $expected{$k} && $expected{$k} eq $v ;
+ #print "[$k] [$v]\n" ;
+ }
+ ok 18, keys %expected == 0 ;
+ ok 19, $cursor1->status() == DB_NOTFOUND ;
+
+ # Three way Join
+ ok 20, $cursor2 = $db2->db_cursor() ;
+ $k = "red" ;
+ $v = "" ;
+ ok 21, $cursor2->c_get($k, $v, DB_SET) == 0 ;
+
+ ok 22, my $cursor3 = $db3->db_cursor() ;
+ $k = "expensive" ;
+ $v = "" ;
+ ok 23, $cursor3->c_get($k, $v, DB_SET) == 0 ;
+ ok 24, $cursor1 = $db1->db_join([$cursor2, $cursor3]) ;
+
+ %expected = qw( apple Convenience
+ strawberry Shopway
+ ) ;
+
+ # sequence forwards
+ while ($cursor1->c_get($k, $v) == 0) {
+ delete $expected{$k}
+ if defined $expected{$k} && $expected{$k} eq $v ;
+ #print "[$k] [$v]\n" ;
+ }
+ ok 25, keys %expected == 0 ;
+ ok 26, $cursor1->status() == DB_NOTFOUND ;
+
+ # test DB_JOIN_ITEM
+ # #################
+ ok 27, $cursor2 = $db2->db_cursor() ;
+ $k = "red" ;
+ $v = "" ;
+ ok 28, $cursor2->c_get($k, $v, DB_SET) == 0 ;
+
+ ok 29, $cursor3 = $db3->db_cursor() ;
+ $k = "expensive" ;
+ $v = "" ;
+ ok 30, $cursor3->c_get($k, $v, DB_SET) == 0 ;
+ ok 31, $cursor1 = $db1->db_join([$cursor2, $cursor3]) ;
+
+ %expected = qw( apple 1
+ strawberry 1
+ ) ;
+
+ # sequence forwards
+ $k = "" ;
+ $v = "" ;
+ while ($cursor1->c_get($k, $v, DB_JOIN_ITEM) == 0) {
+ delete $expected{$k}
+ if defined $expected{$k} ;
+ #print "[$k]\n" ;
+ }
+ ok 32, keys %expected == 0 ;
+ ok 33, $cursor1->status() == DB_NOTFOUND ;
+
+ ok 34, $cursor1->c_close() == 0 ;
+ ok 35, $cursor2->c_close() == 0 ;
+ ok 36, $cursor3->c_close() == 0 ;
+
+ ok 37, ($status = $txn->txn_commit) == 0;
+
+ undef $txn ;
+ #undef $cursor1;
+ #undef $cursor2;
+ #undef $cursor3;
+ undef $db1 ;
+ undef $db2 ;
+ undef $db3 ;
+ undef $env ;
+ untie %hash1 ;
+ untie %hash2 ;
+ untie %hash3 ;
+}
+print "# at the end\n";
diff --git a/libdb/perl/BerkeleyDB/t/mldbm.t b/libdb/perl/BerkeleyDB/t/mldbm.t
new file mode 100644
index 0000000..d35f7e1
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/mldbm.t
@@ -0,0 +1,161 @@
+#!/usr/bin/perl -w
+
+use strict ;
+
+BEGIN
+{
+ if ($] < 5.005) {
+ print "1..0 # This is Perl $], skipping test\n" ;
+ exit 0 ;
+ }
+
+ eval { require Data::Dumper ; };
+ if ($@) {
+ print "1..0 # Data::Dumper is not installed on this system.\n";
+ exit 0 ;
+ }
+ if ($Data::Dumper::VERSION < 2.08) {
+ print "1..0 # Data::Dumper 2.08 or better required (found $Data::Dumper::VERSION).\n";
+ exit 0 ;
+ }
+ eval { require MLDBM ; };
+ if ($@) {
+ print "1..0 # MLDBM is not installed on this system.\n";
+ exit 0 ;
+ }
+}
+
+use t::util ;
+
+print "1..12\n";
+
+{
+ package BTREE ;
+
+ use BerkeleyDB ;
+ use MLDBM qw(BerkeleyDB::Btree) ;
+ use Data::Dumper;
+
+ my $filename = "";
+ my $lex = new LexFile $filename;
+
+ $MLDBM::UseDB = "BerkeleyDB::Btree" ;
+ my %o ;
+ my $db = tie %o, 'MLDBM', -Filename => $filename,
+ -Flags => DB_CREATE
+ or die $!;
+ ::ok 1, $db ;
+ ::ok 2, $db->type() == DB_BTREE ;
+
+ my $c = [\'c'];
+ my $b = {};
+ my $a = [1, $b, $c];
+ $b->{a} = $a;
+ $b->{b} = $a->[1];
+ $b->{c} = $a->[2];
+ @o{qw(a b c)} = ($a, $b, $c);
+ $o{d} = "{once upon a time}";
+ $o{e} = 1024;
+ $o{f} = 1024.1024;
+ my $first = Data::Dumper->new([@o{qw(a b c)}], [qw(a b c)])->Quotekeys(0)->Dump;
+ my $second = <<'EOT';
+$a = [
+ 1,
+ {
+ a => $a,
+ b => $a->[1],
+ c => [
+ \'c'
+ ]
+ },
+ $a->[1]{c}
+ ];
+$b = {
+ a => [
+ 1,
+ $b,
+ [
+ \'c'
+ ]
+ ],
+ b => $b,
+ c => $b->{a}[2]
+ };
+$c = [
+ \'c'
+ ];
+EOT
+
+ ::ok 3, $first eq $second ;
+ ::ok 4, $o{d} eq "{once upon a time}" ;
+ ::ok 5, $o{e} == 1024 ;
+ ::ok 6, $o{f} eq 1024.1024 ;
+
+}
+
+{
+
+ package HASH ;
+
+ use BerkeleyDB ;
+ use MLDBM qw(BerkeleyDB::Hash) ;
+ use Data::Dumper;
+
+ my $filename = "";
+ my $lex = new LexFile $filename;
+
+ unlink $filename ;
+ $MLDBM::UseDB = "BerkeleyDB::Hash" ;
+ my %o ;
+ my $db = tie %o, 'MLDBM', -Filename => $filename,
+ -Flags => DB_CREATE
+ or die $!;
+ ::ok 7, $db ;
+ ::ok 8, $db->type() == DB_HASH ;
+
+
+ my $c = [\'c'];
+ my $b = {};
+ my $a = [1, $b, $c];
+ $b->{a} = $a;
+ $b->{b} = $a->[1];
+ $b->{c} = $a->[2];
+ @o{qw(a b c)} = ($a, $b, $c);
+ $o{d} = "{once upon a time}";
+ $o{e} = 1024;
+ $o{f} = 1024.1024;
+ my $first = Data::Dumper->new([@o{qw(a b c)}], [qw(a b c)])->Quotekeys(0)->Dump;
+ my $second = <<'EOT';
+$a = [
+ 1,
+ {
+ a => $a,
+ b => $a->[1],
+ c => [
+ \'c'
+ ]
+ },
+ $a->[1]{c}
+ ];
+$b = {
+ a => [
+ 1,
+ $b,
+ [
+ \'c'
+ ]
+ ],
+ b => $b,
+ c => $b->{a}[2]
+ };
+$c = [
+ \'c'
+ ];
+EOT
+
+ ::ok 9, $first eq $second ;
+ ::ok 10, $o{d} eq "{once upon a time}" ;
+ ::ok 11, $o{e} == 1024 ;
+ ::ok 12, $o{f} eq 1024.1024 ;
+
+}
diff --git a/libdb/perl/BerkeleyDB/t/queue.t b/libdb/perl/BerkeleyDB/t/queue.t
new file mode 100644
index 0000000..86add12
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/queue.t
@@ -0,0 +1,763 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3.3) {
+ print "1..0 # Skipping test, Queue needs Berkeley DB 3.3.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+print "1..201\n";
+
+sub fillout
+{
+ my $var = shift ;
+ my $length = shift ;
+ my $pad = shift || " " ;
+ my $template = $pad x $length ;
+ substr($template, 0, length($var)) = $var ;
+ return $template ;
+}
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' $db = new BerkeleyDB::Queue -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $db = new BerkeleyDB::Queue -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) / ;
+
+ eval ' $db = new BerkeleyDB::Queue -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' $db = new BerkeleyDB::Queue -Txn => "x" ' ;
+ ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' $db = new BerkeleyDB::Queue -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+# Now check the interface to Queue
+
+{
+ my $lex = new LexFile $Dfile ;
+ my $rec_len = 10 ;
+ my $pad = "x" ;
+
+ ok 6, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
+ -Flags => DB_CREATE,
+ -Len => $rec_len,
+ -Pad => $pad;
+
+ # Add a k/v pair
+ my $value ;
+ my $status ;
+ ok 7, $db->db_put(1, "some value") == 0 ;
+ ok 8, $db->status() == 0 ;
+ ok 9, $db->db_get(1, $value) == 0 ;
+ ok 10, $value eq fillout("some value", $rec_len, $pad) ;
+ ok 11, $db->db_put(2, "value") == 0 ;
+ ok 12, $db->db_get(2, $value) == 0 ;
+ ok 13, $value eq fillout("value", $rec_len, $pad) ;
+ ok 14, $db->db_del(1) == 0 ;
+ ok 15, ($status = $db->db_get(1, $value)) == DB_KEYEMPTY ;
+ ok 16, $db->status() == DB_KEYEMPTY ;
+ ok 17, $db->status() eq $DB_errors{'DB_KEYEMPTY'} ;
+
+ ok 18, ($status = $db->db_get(7, $value)) == DB_NOTFOUND ;
+ ok 19, $db->status() == DB_NOTFOUND ;
+ ok 20, $db->status() eq $DB_errors{'DB_NOTFOUND'} ;
+
+ ok 21, $db->db_sync() == 0 ;
+
+ # Check NOOVERWRITE will make put fail when attempting to overwrite
+ # an existing record.
+
+ ok 22, $db->db_put( 2, 'x', DB_NOOVERWRITE) == DB_KEYEXIST ;
+ ok 23, $db->status() eq $DB_errors{'DB_KEYEXIST'} ;
+ ok 24, $db->status() == DB_KEYEXIST ;
+
+
+ # check that the value of the key has not been changed by the
+ # previous test
+ ok 25, $db->db_get(2, $value) == 0 ;
+ ok 26, $value eq fillout("value", $rec_len, $pad) ;
+
+
+}
+
+
+{
+ # Check simple env works with a array.
+ # and pad defaults to space
+ my $lex = new LexFile $Dfile ;
+
+ my $home = "./fred" ;
+ my $rec_len = 11 ;
+ ok 27, my $lexD = new LexDir($home);
+
+ ok 28, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL,
+ -Home => $home ;
+ ok 29, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
+ -Env => $env,
+ -Flags => DB_CREATE,
+ -Len => $rec_len;
+
+ # Add a k/v pair
+ my $value ;
+ ok 30, $db->db_put(1, "some value") == 0 ;
+ ok 31, $db->db_get(1, $value) == 0 ;
+ ok 32, $value eq fillout("some value", $rec_len) ;
+ undef $db ;
+ undef $env ;
+}
+
+
+{
+ # cursors
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my ($k, $v) ;
+ my $rec_len = 5 ;
+ ok 33, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Len => $rec_len;
+
+ # create some data
+ my @data = (
+ "red" ,
+ "green" ,
+ "blue" ,
+ ) ;
+
+ my $i ;
+ my %data ;
+ my $ret = 0 ;
+ for ($i = 0 ; $i < @data ; ++$i) {
+ $ret += $db->db_put($i, $data[$i]) ;
+ $data{$i} = $data[$i] ;
+ }
+ ok 34, $ret == 0 ;
+
+ # create the cursor
+ ok 35, my $cursor = $db->db_cursor() ;
+
+ $k = 0 ; $v = "" ;
+ my %copy = %data;
+ my $extras = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ {
+ if ( fillout($copy{$k}, $rec_len) eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+
+ ok 36, $cursor->status() == DB_NOTFOUND ;
+ ok 37, $cursor->status() eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 38, keys %copy == 0 ;
+ ok 39, $extras == 0 ;
+
+ # sequence backwards
+ %copy = %data ;
+ $extras = 0 ;
+ my $status ;
+ for ( $status = $cursor->c_get($k, $v, DB_LAST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_PREV)) {
+ if ( fillout($copy{$k}, $rec_len) eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 40, $status == DB_NOTFOUND ;
+ ok 41, $status eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 42, $cursor->status() == $status ;
+ ok 43, $cursor->status() eq $status ;
+ ok 44, keys %copy == 0 ;
+ ok 45, $extras == 0 ;
+}
+
+{
+ # Tied Array interface
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $db ;
+ my $rec_len = 10 ;
+ ok 46, $db = tie @array, 'BerkeleyDB::Queue', -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Len => $rec_len;
+
+ ok 47, my $cursor = (tied @array)->db_cursor() ;
+ # check the database is empty
+ my $count = 0 ;
+ my ($k, $v) = (0,"") ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 48, $cursor->status() == DB_NOTFOUND ;
+ ok 49, $count == 0 ;
+
+ ok 50, @array == 0 ;
+
+ # Add a k/v pair
+ my $value ;
+ $array[1] = "some value";
+ ok 51, (tied @array)->status() == 0 ;
+ ok 52, $array[1] eq fillout("some value", $rec_len);
+ ok 53, defined $array[1];
+ ok 54, (tied @array)->status() == 0 ;
+ ok 55, !defined $array[3];
+ ok 56, (tied @array)->status() == DB_NOTFOUND ;
+
+ ok 57, (tied @array)->db_del(1) == 0 ;
+ ok 58, (tied @array)->status() == 0 ;
+ ok 59, ! defined $array[1];
+ ok 60, (tied @array)->status() == DB_KEYEMPTY ;
+
+ $array[1] = 2 ;
+ $array[10] = 20 ;
+ $array[1000] = 2000 ;
+
+ my ($keys, $values) = (0,0);
+ $count = 0 ;
+ for ( my $status = $cursor->c_get($k, $v, DB_FIRST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_NEXT)) {
+ $keys += $k ;
+ $values += $v ;
+ ++ $count ;
+ }
+ ok 61, $count == 3 ;
+ ok 62, $keys == 1011 ;
+ ok 63, $values == 2022 ;
+
+ # unshift isn't allowed
+# eval {
+# $FA ? unshift @array, "red", "green", "blue"
+# : $db->unshift("red", "green", "blue" ) ;
+# } ;
+# ok 64, $@ =~ /^unshift is unsupported with Queue databases/ ;
+ $array[0] = "red" ;
+ $array[1] = "green" ;
+ $array[2] = "blue" ;
+ $array[4] = 2 ;
+ ok 64, $array[0] eq fillout("red", $rec_len) ;
+ ok 65, $cursor->c_get($k, $v, DB_FIRST) == 0 ;
+ ok 66, $k == 0 ;
+ ok 67, $v eq fillout("red", $rec_len) ;
+ ok 68, $array[1] eq fillout("green", $rec_len) ;
+ ok 69, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 70, $k == 1 ;
+ ok 71, $v eq fillout("green", $rec_len) ;
+ ok 72, $array[2] eq fillout("blue", $rec_len) ;
+ ok 73, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 74, $k == 2 ;
+ ok 75, $v eq fillout("blue", $rec_len) ;
+ ok 76, $array[4] == 2 ;
+ ok 77, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 78, $k == 4 ;
+ ok 79, $v == 2 ;
+
+ # shift
+ ok 80, ($FA ? shift @array : $db->shift()) eq fillout("red", $rec_len) ;
+ ok 81, ($FA ? shift @array : $db->shift()) eq fillout("green", $rec_len) ;
+ ok 82, ($FA ? shift @array : $db->shift()) eq fillout("blue", $rec_len) ;
+ ok 83, ($FA ? shift @array : $db->shift()) == 2 ;
+
+ # push
+ $FA ? push @array, "the", "end"
+ : $db->push("the", "end") ;
+ ok 84, $cursor->c_get($k, $v, DB_LAST) == 0 ;
+ ok 85, $k == 1002 ;
+ ok 86, $v eq fillout("end", $rec_len) ;
+ ok 87, $cursor->c_get($k, $v, DB_PREV) == 0 ;
+ ok 88, $k == 1001 ;
+ ok 89, $v eq fillout("the", $rec_len) ;
+ ok 90, $cursor->c_get($k, $v, DB_PREV) == 0 ;
+ ok 91, $k == 1000 ;
+ ok 92, $v == 2000 ;
+
+ # pop
+ ok 93, ( $FA ? pop @array : $db->pop ) eq fillout("end", $rec_len) ;
+ ok 94, ( $FA ? pop @array : $db->pop ) eq fillout("the", $rec_len) ;
+ ok 95, ( $FA ? pop @array : $db->pop ) == 2000 ;
+
+ # now clear the array
+ $FA ? @array = ()
+ : $db->clear() ;
+ ok 96, $cursor->c_get($k, $v, DB_FIRST) == DB_NOTFOUND ;
+
+ undef $cursor ;
+ undef $db ;
+ untie @array ;
+}
+
+{
+ # in-memory file
+
+ my @array ;
+ my $fd ;
+ my $value ;
+ my $rec_len = 15 ;
+ ok 97, my $db = tie @array, 'BerkeleyDB::Queue',
+ -Len => $rec_len;
+
+ ok 98, $db->db_put(1, "some value") == 0 ;
+ ok 99, $db->db_get(1, $value) == 0 ;
+ ok 100, $value eq fillout("some value", $rec_len) ;
+
+}
+
+{
+ # partial
+ # check works via API
+
+ my $lex = new LexFile $Dfile ;
+ my $value ;
+ my $rec_len = 8 ;
+ ok 101, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Len => $rec_len,
+ -Pad => " " ;
+
+ # create some data
+ my @data = (
+ "",
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = 0 ; $i < @data ; ++$i) {
+ my $r = $db->db_put($i, $data[$i]) ;
+ $ret += $r ;
+ }
+ ok 102, $ret == 0 ;
+
+ # do a partial get
+ my ($pon, $off, $len) = $db->partial_set(0,2) ;
+ ok 103, ! $pon && $off == 0 && $len == 0 ;
+ ok 104, $db->db_get(1, $value) == 0 && $value eq "bo" ;
+ ok 105, $db->db_get(2, $value) == 0 && $value eq "ho" ;
+ ok 106, $db->db_get(3, $value) == 0 && $value eq "se" ;
+
+ # do a partial get, off end of data
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 107, $pon ;
+ ok 108, $off == 0 ;
+ ok 109, $len == 2 ;
+ ok 110, $db->db_get(1, $value) == 0 && $value eq fillout("t", 2) ;
+ ok 111, $db->db_get(2, $value) == 0 && $value eq "se" ;
+ ok 112, $db->db_get(3, $value) == 0 && $value eq " " ;
+
+ # switch of partial mode
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 113, $pon ;
+ ok 114, $off == 3 ;
+ ok 115, $len == 2 ;
+ ok 116, $db->db_get(1, $value) == 0 && $value eq fillout("boat", $rec_len) ;
+ ok 117, $db->db_get(2, $value) == 0 && $value eq fillout("house", $rec_len) ;
+ ok 118, $db->db_get(3, $value) == 0 && $value eq fillout("sea", $rec_len) ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 119, $db->db_put(1, "") != 0 ;
+ ok 120, $db->db_put(2, "AB") == 0 ;
+ ok 121, $db->db_put(3, "XY") == 0 ;
+ ok 122, $db->db_put(4, "KLM") != 0 ;
+ ok 123, $db->db_put(4, "KL") == 0 ;
+
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 124, $pon ;
+ ok 125, $off == 0 ;
+ ok 126, $len == 2 ;
+ ok 127, $db->db_get(1, $value) == 0 && $value eq fillout("boat", $rec_len) ;
+ ok 128, $db->db_get(2, $value) == 0 && $value eq fillout("ABuse", $rec_len) ;
+ ok 129, $db->db_get(3, $value) == 0 && $value eq fillout("XYa", $rec_len) ;
+ ok 130, $db->db_get(4, $value) == 0 && $value eq fillout("KL", $rec_len) ;
+
+ # now partial put
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 131, ! $pon ;
+ ok 132, $off == 0 ;
+ ok 133, $len == 0 ;
+ ok 134, $db->db_put(1, "PP") == 0 ;
+ ok 135, $db->db_put(2, "Q") != 0 ;
+ ok 136, $db->db_put(3, "XY") == 0 ;
+ ok 137, $db->db_put(4, "TU") == 0 ;
+
+ $db->partial_clear() ;
+ ok 138, $db->db_get(1, $value) == 0 && $value eq fillout("boaPP", $rec_len) ;
+ ok 139, $db->db_get(2, $value) == 0 && $value eq fillout("ABuse",$rec_len) ;
+ ok 140, $db->db_get(3, $value) == 0 && $value eq fillout("XYaXY", $rec_len) ;
+ ok 141, $db->db_get(4, $value) == 0 && $value eq fillout("KL TU", $rec_len) ;
+}
+
+{
+ # partial
+ # check works via tied array
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $value ;
+ my $rec_len = 8 ;
+ ok 142, my $db = tie @array, 'BerkeleyDB::Queue', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Len => $rec_len,
+ -Pad => " " ;
+
+ # create some data
+ my @data = (
+ "",
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $i ;
+ my $status = 0 ;
+ for ($i = 1 ; $i < @data ; ++$i) {
+ $array[$i] = $data[$i] ;
+ $status += $db->status() ;
+ }
+
+ ok 143, $status == 0 ;
+
+ # do a partial get
+ $db->partial_set(0,2) ;
+ ok 144, $array[1] eq fillout("bo", 2) ;
+ ok 145, $array[2] eq fillout("ho", 2) ;
+ ok 146, $array[3] eq fillout("se", 2) ;
+
+ # do a partial get, off end of data
+ $db->partial_set(3,2) ;
+ ok 147, $array[1] eq fillout("t", 2) ;
+ ok 148, $array[2] eq fillout("se", 2) ;
+ ok 149, $array[3] eq fillout("", 2) ;
+
+ # switch of partial mode
+ $db->partial_clear() ;
+ ok 150, $array[1] eq fillout("boat", $rec_len) ;
+ ok 151, $array[2] eq fillout("house", $rec_len) ;
+ ok 152, $array[3] eq fillout("sea", $rec_len) ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ $array[1] = "" ;
+ ok 153, $db->status() != 0 ;
+ $array[2] = "AB" ;
+ ok 154, $db->status() == 0 ;
+ $array[3] = "XY" ;
+ ok 155, $db->status() == 0 ;
+ $array[4] = "KL" ;
+ ok 156, $db->status() == 0 ;
+
+ $db->partial_clear() ;
+ ok 157, $array[1] eq fillout("boat", $rec_len) ;
+ ok 158, $array[2] eq fillout("ABuse", $rec_len) ;
+ ok 159, $array[3] eq fillout("XYa", $rec_len) ;
+ ok 160, $array[4] eq fillout("KL", $rec_len) ;
+
+ # now partial put
+ $db->partial_set(3,2) ;
+ $array[1] = "PP" ;
+ ok 161, $db->status() == 0 ;
+ $array[2] = "Q" ;
+ ok 162, $db->status() != 0 ;
+ $array[3] = "XY" ;
+ ok 163, $db->status() == 0 ;
+ $array[4] = "TU" ;
+ ok 164, $db->status() == 0 ;
+
+ $db->partial_clear() ;
+ ok 165, $array[1] eq fillout("boaPP", $rec_len) ;
+ ok 166, $array[2] eq fillout("ABuse", $rec_len) ;
+ ok 167, $array[3] eq fillout("XYaXY", $rec_len) ;
+ ok 168, $array[4] eq fillout("KL TU", $rec_len) ;
+}
+
+{
+ # transaction
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 169, my $lexD = new LexDir($home);
+ my $rec_len = 9 ;
+ ok 170, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 171, my $txn = $env->txn_begin() ;
+ ok 172, my $db1 = tie @array, 'BerkeleyDB::Queue',
+ -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ,
+ -Len => $rec_len,
+ -Pad => " " ;
+
+
+ ok 173, $txn->txn_commit() == 0 ;
+ ok 174, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+
+ # create some data
+ my @data = (
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = 0 ; $i < @data ; ++$i) {
+ $ret += $db1->db_put($i, $data[$i]) ;
+ }
+ ok 175, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 176, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = (0, "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 177, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 178, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 179, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 180, $count == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie @array ;
+}
+
+
+{
+ # db_stat
+
+ my $lex = new LexFile $Dfile ;
+ my $recs = ($BerkeleyDB::db_version >= 3.1 ? "qs_ndata" : "qs_nrecs") ;
+ my @array ;
+ my ($k, $v) ;
+ my $rec_len = 7 ;
+ ok 181, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
+ -Flags => DB_CREATE,
+ -Pagesize => 4 * 1024,
+ -Len => $rec_len,
+ -Pad => " "
+ ;
+
+ my $ref = $db->db_stat() ;
+ ok 182, $ref->{$recs} == 0;
+ ok 183, $ref->{'qs_pagesize'} == 4 * 1024;
+
+ # create some data
+ my @data = (
+ 2,
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = $db->ArrayOffset ; @data ; ++$i) {
+ $ret += $db->db_put($i, shift @data) ;
+ }
+ ok 184, $ret == 0 ;
+
+ $ref = $db->db_stat() ;
+ ok 185, $ref->{$recs} == 3;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use BerkeleyDB;
+ @ISA=qw(BerkeleyDB::Queue);
+ @EXPORT = @BerkeleyDB::EXPORT ;
+
+ sub db_put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::db_put($key, $value * 3) ;
+ }
+
+ sub db_get {
+ my $self = shift ;
+ $self->SUPER::db_get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok 186, $@ eq "" ;
+ my @h ;
+ my $X ;
+ my $rec_len = 34 ;
+ eval '
+ $X = tie(@h, "SubDB", -Filename => "dbqueue.tmp",
+ -Flags => DB_CREATE,
+ -Mode => 0640 ,
+ -Len => $rec_len,
+ -Pad => " "
+ );
+ ' ;
+
+ main::ok 187, $@ eq "" ;
+
+ my $ret = eval '$h[1] = 3 ; return $h[1] ' ;
+ main::ok 188, $@ eq "" ;
+ main::ok 189, $ret == 7 ;
+
+ my $value = 0;
+ $ret = eval '$X->db_put(1, 4) ; $X->db_get(1, $value) ; return $value' ;
+ main::ok 190, $@ eq "" ;
+ main::ok 191, $ret == 10 ;
+
+ $ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
+ main::ok 192, $@ eq "" ;
+ main::ok 193, $ret == 1 ;
+
+ $ret = eval '$X->A_new_method(1) ' ;
+ main::ok 194, $@ eq "" ;
+ main::ok 195, $ret eq "[[10]]" ;
+
+ undef $X ;
+ untie @h ;
+ unlink "SubDB.pm", "dbqueue.tmp" ;
+
+}
+
+{
+ # DB_APPEND
+
+ my $lex = new LexFile $Dfile;
+ my @array ;
+ my $value ;
+ my $rec_len = 21 ;
+ ok 196, my $db = tie @array, 'BerkeleyDB::Queue',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Len => $rec_len,
+ -Pad => " " ;
+
+ # create a few records
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+
+ my $k = 0 ;
+ ok 197, $db->db_put($k, "fred", DB_APPEND) == 0 ;
+ ok 198, $k == 4 ;
+ ok 199, $array[4] eq fillout("fred", $rec_len) ;
+
+ undef $db ;
+ untie @array ;
+}
+
+{
+ # 23 Sept 2001 -- push into an empty array
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $db ;
+ my $rec_len = 21 ;
+ ok 200, $db = tie @array, 'BerkeleyDB::Queue',
+ -Flags => DB_CREATE ,
+ -ArrayBase => 0,
+ -Len => $rec_len,
+ -Pad => " " ,
+ -Filename => $Dfile ;
+ $FA ? push @array, "first"
+ : $db->push("first") ;
+
+ ok 201, ($FA ? pop @array : $db->pop()) eq fillout("first", $rec_len) ;
+
+ undef $db;
+ untie @array ;
+
+}
+
+__END__
+
+
+# TODO
+#
+# DB_DELIMETER DB_FIXEDLEN DB_PAD DB_SNAPSHOT with partial records
diff --git a/libdb/perl/BerkeleyDB/t/recno.t b/libdb/perl/BerkeleyDB/t/recno.t
new file mode 100644
index 0000000..64b1803
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/recno.t
@@ -0,0 +1,913 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..226\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' $db = new BerkeleyDB::Recno -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $db = new BerkeleyDB::Recno -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) / ;
+
+ eval ' $db = new BerkeleyDB::Recno -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' $db = new BerkeleyDB::Recno -Txn => "x" ' ;
+ ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' $db = new BerkeleyDB::Recno -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+# Now check the interface to Recno
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ ok 6, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ my $status ;
+ ok 7, $db->db_put(1, "some value") == 0 ;
+ ok 8, $db->status() == 0 ;
+ ok 9, $db->db_get(1, $value) == 0 ;
+ ok 10, $value eq "some value" ;
+ ok 11, $db->db_put(2, "value") == 0 ;
+ ok 12, $db->db_get(2, $value) == 0 ;
+ ok 13, $value eq "value" ;
+ ok 14, $db->db_del(1) == 0 ;
+ ok 15, ($status = $db->db_get(1, $value)) == DB_KEYEMPTY ;
+ ok 16, $db->status() == DB_KEYEMPTY ;
+ ok 17, $db->status() eq $DB_errors{'DB_KEYEMPTY'} ;
+
+ ok 18, ($status = $db->db_get(7, $value)) == DB_NOTFOUND ;
+ ok 19, $db->status() == DB_NOTFOUND ;
+ ok 20, $db->status() eq $DB_errors{'DB_NOTFOUND'} ;
+
+ ok 21, $db->db_sync() == 0 ;
+
+ # Check NOOVERWRITE will make put fail when attempting to overwrite
+ # an existing record.
+
+ ok 22, $db->db_put( 2, 'x', DB_NOOVERWRITE) == DB_KEYEXIST ;
+ ok 23, $db->status() eq $DB_errors{'DB_KEYEXIST'} ;
+ ok 24, $db->status() == DB_KEYEXIST ;
+
+
+ # check that the value of the key has not been changed by the
+ # previous test
+ ok 25, $db->db_get(2, $value) == 0 ;
+ ok 26, $value eq "value" ;
+
+
+}
+
+
+{
+ # Check simple env works with a array.
+ my $lex = new LexFile $Dfile ;
+
+ my $home = "./fred" ;
+ ok 27, my $lexD = new LexDir($home);
+
+ ok 28, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL,
+ -Home => $home ;
+
+ ok 29, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
+ -Env => $env,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ ok 30, $db->db_put(1, "some value") == 0 ;
+ ok 31, $db->db_get(1, $value) == 0 ;
+ ok 32, $value eq "some value" ;
+ undef $db ;
+ undef $env ;
+}
+
+
+{
+ # cursors
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my ($k, $v) ;
+ ok 33, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my @data = (
+ "red" ,
+ "green" ,
+ "blue" ,
+ ) ;
+
+ my $i ;
+ my %data ;
+ my $ret = 0 ;
+ for ($i = 0 ; $i < @data ; ++$i) {
+ $ret += $db->db_put($i, $data[$i]) ;
+ $data{$i} = $data[$i] ;
+ }
+ ok 34, $ret == 0 ;
+
+ # create the cursor
+ ok 35, my $cursor = $db->db_cursor() ;
+
+ $k = 0 ; $v = "" ;
+ my %copy = %data;
+ my $extras = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+
+ ok 36, $cursor->status() == DB_NOTFOUND ;
+ ok 37, $cursor->status() eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 38, keys %copy == 0 ;
+ ok 39, $extras == 0 ;
+
+ # sequence backwards
+ %copy = %data ;
+ $extras = 0 ;
+ my $status ;
+ for ( $status = $cursor->c_get($k, $v, DB_LAST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_PREV)) {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 40, $status == DB_NOTFOUND ;
+ ok 41, $status eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 42, $cursor->status() == $status ;
+ ok 43, $cursor->status() eq $status ;
+ ok 44, keys %copy == 0 ;
+ ok 45, $extras == 0 ;
+}
+
+{
+ # Tied Array interface
+
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $db ;
+ ok 46, $db = tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -Property => DB_RENUMBER,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ;
+
+ ok 47, my $cursor = (tied @array)->db_cursor() ;
+ # check the database is empty
+ my $count = 0 ;
+ my ($k, $v) = (0,"") ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 48, $cursor->status() == DB_NOTFOUND ;
+ ok 49, $count == 0 ;
+
+ ok 50, @array == 0 ;
+
+ # Add a k/v pair
+ my $value ;
+ $array[1] = "some value";
+ ok 51, (tied @array)->status() == 0 ;
+ ok 52, $array[1] eq "some value";
+ ok 53, defined $array[1];
+ ok 54, (tied @array)->status() == 0 ;
+ ok 55, !defined $array[3];
+ ok 56, (tied @array)->status() == DB_NOTFOUND ;
+
+ ok 57, (tied @array)->db_del(1) == 0 ;
+ ok 58, (tied @array)->status() == 0 ;
+ ok 59, ! defined $array[1];
+ ok 60, (tied @array)->status() == DB_NOTFOUND ;
+
+ $array[1] = 2 ;
+ $array[10] = 20 ;
+ $array[1000] = 2000 ;
+
+ my ($keys, $values) = (0,0);
+ $count = 0 ;
+ for ( my $status = $cursor->c_get($k, $v, DB_FIRST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_NEXT)) {
+ $keys += $k ;
+ $values += $v ;
+ ++ $count ;
+ }
+ ok 61, $count == 3 ;
+ ok 62, $keys == 1011 ;
+ ok 63, $values == 2022 ;
+
+ # unshift
+ $FA ? unshift @array, "red", "green", "blue"
+ : $db->unshift("red", "green", "blue" ) ;
+ ok 64, $array[1] eq "red" ;
+ ok 65, $cursor->c_get($k, $v, DB_FIRST) == 0 ;
+ ok 66, $k == 1 ;
+ ok 67, $v eq "red" ;
+ ok 68, $array[2] eq "green" ;
+ ok 69, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 70, $k == 2 ;
+ ok 71, $v eq "green" ;
+ ok 72, $array[3] eq "blue" ;
+ ok 73, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 74, $k == 3 ;
+ ok 75, $v eq "blue" ;
+ ok 76, $array[4] == 2 ;
+ ok 77, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 78, $k == 4 ;
+ ok 79, $v == 2 ;
+
+ # shift
+ ok 80, ($FA ? shift @array : $db->shift()) eq "red" ;
+ ok 81, ($FA ? shift @array : $db->shift()) eq "green" ;
+ ok 82, ($FA ? shift @array : $db->shift()) eq "blue" ;
+ ok 83, ($FA ? shift @array : $db->shift()) == 2 ;
+
+ # push
+ $FA ? push @array, "the", "end"
+ : $db->push("the", "end") ;
+ ok 84, $cursor->c_get($k, $v, DB_LAST) == 0 ;
+ ok 85, $k == 1001 ;
+ ok 86, $v eq "end" ;
+ ok 87, $cursor->c_get($k, $v, DB_PREV) == 0 ;
+ ok 88, $k == 1000 ;
+ ok 89, $v eq "the" ;
+ ok 90, $cursor->c_get($k, $v, DB_PREV) == 0 ;
+ ok 91, $k == 999 ;
+ ok 92, $v == 2000 ;
+
+ # pop
+ ok 93, ( $FA ? pop @array : $db->pop ) eq "end" ;
+ ok 94, ( $FA ? pop @array : $db->pop ) eq "the" ;
+ ok 95, ( $FA ? pop @array : $db->pop ) == 2000 ;
+
+ # now clear the array
+ $FA ? @array = ()
+ : $db->clear() ;
+ ok 96, $cursor->c_get($k, $v, DB_FIRST) == DB_NOTFOUND ;
+
+ undef $cursor ;
+ undef $db ;
+ untie @array ;
+}
+
+{
+ # in-memory file
+
+ my @array ;
+ my $fd ;
+ my $value ;
+ ok 97, my $db = tie @array, 'BerkeleyDB::Recno' ;
+
+ ok 98, $db->db_put(1, "some value") == 0 ;
+ ok 99, $db->db_get(1, $value) == 0 ;
+ ok 100, $value eq "some value" ;
+
+}
+
+{
+ # partial
+ # check works via API
+
+ my $lex = new LexFile $Dfile ;
+ my $value ;
+ ok 101, my $db = new BerkeleyDB::Recno, -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my @data = (
+ "",
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = 1 ; $i < @data ; ++$i) {
+ $ret += $db->db_put($i, $data[$i]) ;
+ }
+ ok 102, $ret == 0 ;
+
+
+ # do a partial get
+ my ($pon, $off, $len) = $db->partial_set(0,2) ;
+ ok 103, ! $pon && $off == 0 && $len == 0 ;
+ ok 104, $db->db_get(1, $value) == 0 && $value eq "bo" ;
+ ok 105, $db->db_get(2, $value) == 0 && $value eq "ho" ;
+ ok 106, $db->db_get(3, $value) == 0 && $value eq "se" ;
+
+ # do a partial get, off end of data
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 107, $pon ;
+ ok 108, $off == 0 ;
+ ok 109, $len == 2 ;
+ ok 110, $db->db_get(1, $value) == 0 && $value eq "t" ;
+ ok 111, $db->db_get(2, $value) == 0 && $value eq "se" ;
+ ok 112, $db->db_get(3, $value) == 0 && $value eq "" ;
+
+ # switch of partial mode
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 113, $pon ;
+ ok 114, $off == 3 ;
+ ok 115, $len == 2 ;
+ ok 116, $db->db_get(1, $value) == 0 && $value eq "boat" ;
+ ok 117, $db->db_get(2, $value) == 0 && $value eq "house" ;
+ ok 118, $db->db_get(3, $value) == 0 && $value eq "sea" ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 119, $db->db_put(1, "") == 0 ;
+ ok 120, $db->db_put(2, "AB") == 0 ;
+ ok 121, $db->db_put(3, "XYZ") == 0 ;
+ ok 122, $db->db_put(4, "KLM") == 0 ;
+
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 123, $pon ;
+ ok 124, $off == 0 ;
+ ok 125, $len == 2 ;
+ ok 126, $db->db_get(1, $value) == 0 && $value eq "at" ;
+ ok 127, $db->db_get(2, $value) == 0 && $value eq "ABuse" ;
+ ok 128, $db->db_get(3, $value) == 0 && $value eq "XYZa" ;
+ ok 129, $db->db_get(4, $value) == 0 && $value eq "KLM" ;
+
+ # now partial put
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 130, ! $pon ;
+ ok 131, $off == 0 ;
+ ok 132, $len == 0 ;
+ ok 133, $db->db_put(1, "PPP") == 0 ;
+ ok 134, $db->db_put(2, "Q") == 0 ;
+ ok 135, $db->db_put(3, "XYZ") == 0 ;
+ ok 136, $db->db_put(4, "TU") == 0 ;
+
+ $db->partial_clear() ;
+ ok 137, $db->db_get(1, $value) == 0 && $value eq "at\0PPP" ;
+ ok 138, $db->db_get(2, $value) == 0 && $value eq "ABuQ" ;
+ ok 139, $db->db_get(3, $value) == 0 && $value eq "XYZXYZ" ;
+ ok 140, $db->db_get(4, $value) == 0 && $value eq "KLMTU" ;
+}
+
+{
+ # partial
+ # check works via tied array
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $value ;
+ ok 141, my $db = tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my @data = (
+ "",
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $i ;
+ for ($i = 1 ; $i < @data ; ++$i) {
+ $array[$i] = $data[$i] ;
+ }
+
+
+ # do a partial get
+ $db->partial_set(0,2) ;
+ ok 142, $array[1] eq "bo" ;
+ ok 143, $array[2] eq "ho" ;
+ ok 144, $array[3] eq "se" ;
+
+ # do a partial get, off end of data
+ $db->partial_set(3,2) ;
+ ok 145, $array[1] eq "t" ;
+ ok 146, $array[2] eq "se" ;
+ ok 147, $array[3] eq "" ;
+
+ # switch of partial mode
+ $db->partial_clear() ;
+ ok 148, $array[1] eq "boat" ;
+ ok 149, $array[2] eq "house" ;
+ ok 150, $array[3] eq "sea" ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 151, $array[1] = "" ;
+ ok 152, $array[2] = "AB" ;
+ ok 153, $array[3] = "XYZ" ;
+ ok 154, $array[4] = "KLM" ;
+
+ $db->partial_clear() ;
+ ok 155, $array[1] eq "at" ;
+ ok 156, $array[2] eq "ABuse" ;
+ ok 157, $array[3] eq "XYZa" ;
+ ok 158, $array[4] eq "KLM" ;
+
+ # now partial put
+ $db->partial_set(3,2) ;
+ ok 159, $array[1] = "PPP" ;
+ ok 160, $array[2] = "Q" ;
+ ok 161, $array[3] = "XYZ" ;
+ ok 162, $array[4] = "TU" ;
+
+ $db->partial_clear() ;
+ ok 163, $array[1] eq "at\0PPP" ;
+ ok 164, $array[2] eq "ABuQ" ;
+ ok 165, $array[3] eq "XYZXYZ" ;
+ ok 166, $array[4] eq "KLMTU" ;
+}
+
+{
+ # transaction
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 167, my $lexD = new LexDir($home);
+ ok 168, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 169, my $txn = $env->txn_begin() ;
+ ok 170, my $db1 = tie @array, 'BerkeleyDB::Recno',
+ -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ ok 171, $txn->txn_commit() == 0 ;
+ ok 172, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+
+ # create some data
+ my @data = (
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = 0 ; $i < @data ; ++$i) {
+ $ret += $db1->db_put($i, $data[$i]) ;
+ }
+ ok 173, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 174, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = (0, "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 175, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 176, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 177, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 178, $count == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie @array ;
+}
+
+
+{
+ # db_stat
+
+ my $lex = new LexFile $Dfile ;
+ my $recs = ($BerkeleyDB::db_version >= 3.1 ? "bt_ndata" : "bt_nrecs") ;
+ my @array ;
+ my ($k, $v) ;
+ ok 179, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
+ -Flags => DB_CREATE,
+ -Pagesize => 4 * 1024,
+ ;
+
+ my $ref = $db->db_stat() ;
+ ok 180, $ref->{$recs} == 0;
+ ok 181, $ref->{'bt_pagesize'} == 4 * 1024;
+
+ # create some data
+ my @data = (
+ 2,
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = $db->ArrayOffset ; @data ; ++$i) {
+ $ret += $db->db_put($i, shift @data) ;
+ }
+ ok 182, $ret == 0 ;
+
+ $ref = $db->db_stat() ;
+ ok 183, $ref->{$recs} == 3;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use BerkeleyDB;
+ @ISA=qw(BerkeleyDB::Recno);
+ @EXPORT = @BerkeleyDB::EXPORT ;
+
+ sub db_put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::db_put($key, $value * 3) ;
+ }
+
+ sub db_get {
+ my $self = shift ;
+ $self->SUPER::db_get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok 184, $@ eq "" ;
+ my @h ;
+ my $X ;
+ eval '
+ $X = tie(@h, "SubDB", -Filename => "dbrecno.tmp",
+ -Flags => DB_CREATE,
+ -Mode => 0640 );
+ ' ;
+
+ main::ok 185, $@ eq "" ;
+
+ my $ret = eval '$h[1] = 3 ; return $h[1] ' ;
+ main::ok 186, $@ eq "" ;
+ main::ok 187, $ret == 7 ;
+
+ my $value = 0;
+ $ret = eval '$X->db_put(1, 4) ; $X->db_get(1, $value) ; return $value' ;
+ main::ok 188, $@ eq "" ;
+ main::ok 189, $ret == 10 ;
+
+ $ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
+ main::ok 190, $@ eq "" ;
+ main::ok 191, $ret == 1 ;
+
+ $ret = eval '$X->A_new_method(1) ' ;
+ main::ok 192, $@ eq "" ;
+ main::ok 193, $ret eq "[[10]]" ;
+
+ undef $X;
+ untie @h;
+ unlink "SubDB.pm", "dbrecno.tmp" ;
+
+}
+
+{
+ # variable length records, DB_DELIMETER -- defaults to \n
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 194, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Source => $Dfile2 ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 195, $x eq "abc\ndef\n\nghi\n" ;
+}
+
+{
+ # variable length records, change DB_DELIMETER
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 196, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Source => $Dfile2 ,
+ -Delim => "-";
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 197, $x eq "abc-def--ghi-";
+}
+
+{
+ # fixed length records, default DB_PAD
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 198, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Len => 5,
+ -Source => $Dfile2 ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 199, $x eq "abc def ghi " ;
+}
+
+{
+ # fixed length records, change Pad
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 200, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Len => 5,
+ -Pad => "-",
+ -Source => $Dfile2 ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 201, $x eq "abc--def-------ghi--" ;
+}
+
+{
+ # DB_RENUMBER
+
+ my $lex = new LexFile $Dfile;
+ my @array ;
+ my $value ;
+ ok 202, my $db = tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -Property => DB_RENUMBER,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ;
+ # create a few records
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+
+ ok 203, my ($length, $joined) = joiner($db, "|") ;
+ ok 204, $length == 3 ;
+ ok 205, $joined eq "abc|def|ghi";
+
+ ok 206, $db->db_del(1) == 0 ;
+ ok 207, ($length, $joined) = joiner($db, "|") ;
+ ok 208, $length == 2 ;
+ ok 209, $joined eq "abc|ghi";
+
+ undef $db ;
+ untie @array ;
+
+}
+
+{
+ # DB_APPEND
+
+ my $lex = new LexFile $Dfile;
+ my @array ;
+ my $value ;
+ ok 210, my $db = tie @array, 'BerkeleyDB::Recno',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create a few records
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+
+ my $k = 0 ;
+ ok 211, $db->db_put($k, "fred", DB_APPEND) == 0 ;
+ ok 212, $k == 4 ;
+
+ undef $db ;
+ untie @array ;
+}
+
+{
+ # in-memory Btree with an associated text file
+
+ my $lex = new LexFile $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 213, tie @array, 'BerkeleyDB::Recno', -Source => $Dfile2 ,
+ -ArrayBase => 0,
+ -Property => DB_RENUMBER,
+ -Flags => DB_CREATE ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 214, $x eq "abc\ndef\n\nghi\n" ;
+}
+
+{
+ # in-memory, variable length records, change DB_DELIMETER
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 215, tie @array, 'BerkeleyDB::Recno',
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Source => $Dfile2 ,
+ -Property => DB_RENUMBER,
+ -Delim => "-";
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 216, $x eq "abc-def--ghi-";
+}
+
+{
+ # in-memory, fixed length records, default DB_PAD
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 217, tie @array, 'BerkeleyDB::Recno', -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Property => DB_RENUMBER,
+ -Len => 5,
+ -Source => $Dfile2 ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 218, $x eq "abc def ghi " ;
+}
+
+{
+ # in-memory, fixed length records, change Pad
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 219, tie @array, 'BerkeleyDB::Recno',
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Property => DB_RENUMBER,
+ -Len => 5,
+ -Pad => "-",
+ -Source => $Dfile2 ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 220, $x eq "abc--def-------ghi--" ;
+}
+
+{
+ # 23 Sept 2001 -- push into an empty array
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $db ;
+ ok 221, $db = tie @array, 'BerkeleyDB::Recno',
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Property => DB_RENUMBER,
+ -Filename => $Dfile ;
+ $FA ? push @array, "first"
+ : $db->push("first") ;
+
+ ok 222, $array[0] eq "first" ;
+ ok 223, $FA ? pop @array : $db->pop() eq "first" ;
+
+ undef $db;
+ untie @array ;
+
+}
+
+{
+ # 23 Sept 2001 -- unshift into an empty array
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $db ;
+ ok 224, $db = tie @array, 'BerkeleyDB::Recno',
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Property => DB_RENUMBER,
+ -Filename => $Dfile ;
+ $FA ? unshift @array, "first"
+ : $db->unshift("first") ;
+
+ ok 225, $array[0] eq "first" ;
+ ok 226, ($FA ? shift @array : $db->shift()) eq "first" ;
+
+ undef $db;
+ untie @array ;
+
+}
+__END__
+
+
+# TODO
+#
+# DB_DELIMETER DB_FIXEDLEN DB_PAD DB_SNAPSHOT with partial records
diff --git a/libdb/perl/BerkeleyDB/t/strict.t b/libdb/perl/BerkeleyDB/t/strict.t
new file mode 100644
index 0000000..ab41d44
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/strict.t
@@ -0,0 +1,174 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..44\n";
+
+my $Dfile = "dbhash.tmp";
+my $home = "./fred" ;
+
+umask(0);
+
+{
+ # closing a database & an environment in the correct order.
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $status ;
+
+ ok 1, my $lexD = new LexDir($home);
+ ok 2, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+
+ ok 3, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env;
+
+ ok 4, $db1->db_close() == 0 ;
+
+ eval { $status = $env->db_appexit() ; } ;
+ ok 5, $status == 0 ;
+ ok 6, $@ eq "" ;
+ #print "[$@]\n" ;
+
+}
+
+{
+ # closing an environment with an open database
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+
+ ok 7, my $lexD = new LexDir($home);
+ ok 8, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+
+ ok 9, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env;
+
+ eval { $env->db_appexit() ; } ;
+ ok 10, $@ =~ /BerkeleyDB Aborting: attempted to close an environment with 1 open database/ ;
+ #print "[$@]\n" ;
+
+ undef $db1 ;
+ untie %hash ;
+ undef $env ;
+}
+
+{
+ # closing a transaction & a database
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $status ;
+
+ ok 11, my $lexD = new LexDir($home);
+ ok 12, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+
+ ok 13, my $txn = $env->txn_begin() ;
+ ok 14, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+ ok 15, $txn->txn_commit() == 0 ;
+ eval { $status = $db->db_close() ; } ;
+ ok 16, $status == 0 ;
+ ok 17, $@ eq "" ;
+ #print "[$@]\n" ;
+ eval { $status = $env->db_appexit() ; } ;
+ ok 18, $status == 0 ;
+ ok 19, $@ eq "" ;
+ #print "[$@]\n" ;
+}
+
+{
+ # closing a database with an open transaction
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+
+ ok 20, my $lexD = new LexDir($home);
+ ok 21, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+
+ ok 22, my $txn = $env->txn_begin() ;
+ ok 23, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+ eval { $db->db_close() ; } ;
+ ok 24, $@ =~ /BerkeleyDB Aborting: attempted to close a database while a transaction was still open at/ ;
+ #print "[$@]\n" ;
+}
+
+{
+ # closing a cursor & a database
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $status ;
+ ok 25, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+ ok 26, my $cursor = $db->db_cursor() ;
+ ok 27, $cursor->c_close() == 0 ;
+ eval { $status = $db->db_close() ; } ;
+ ok 28, $status == 0 ;
+ ok 29, $@ eq "" ;
+ #print "[$@]\n" ;
+}
+
+{
+ # closing a database with an open cursor
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 30, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+ ok 31, my $cursor = $db->db_cursor() ;
+ eval { $db->db_close() ; } ;
+ ok 32, $@ =~ /\QBerkeleyDB Aborting: attempted to close a database with 1 open cursor(s) at/;
+ #print "[$@]\n" ;
+}
+
+{
+ # closing a transaction & a cursor
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $status ;
+
+ ok 33, my $lexD = new LexDir($home);
+ ok 34, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 35, my $txn = $env->txn_begin() ;
+ ok 36, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+ ok 37, my $cursor = $db->db_cursor() ;
+ eval { $status = $cursor->c_close() ; } ;
+ ok 38, $status == 0 ;
+ ok 39, ($status = $txn->txn_commit()) == 0 ;
+ ok 40, $@ eq "" ;
+ eval { $status = $db->db_close() ; } ;
+ ok 41, $status == 0 ;
+ ok 42, $@ eq "" ;
+ #print "[$@]\n" ;
+ eval { $status = $env->db_appexit() ; } ;
+ ok 43, $status == 0 ;
+ ok 44, $@ eq "" ;
+ #print "[$@]\n" ;
+}
+
diff --git a/libdb/perl/BerkeleyDB/t/subdb.t b/libdb/perl/BerkeleyDB/t/subdb.t
new file mode 100644
index 0000000..23016d6
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/subdb.t
@@ -0,0 +1,243 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3) {
+ print "1..0 # Skipping test, this needs Berkeley DB 3.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+print "1..43\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+# Berkeley DB 3.x specific functionality
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' BerkeleyDB::db_remove -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' BerkeleyDB::db_remove -Bad => 2, -Filename => "fred", -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/ ;
+
+ eval ' BerkeleyDB::db_remove -Filename => "a", -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' BerkeleyDB::db_remove -Subname => "a"' ;
+ ok 4, $@ =~ /^Must specify a filename/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' BerkeleyDB::db_remove -Filename => "x", -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+{
+ # subdatabases
+
+ # opening a subdatabse in an exsiting database that doesn't have
+ # subdatabases at all should fail
+
+ my $lex = new LexFile $Dfile ;
+
+ ok 6, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my %data = qw(
+ red sky
+ blue sea
+ black heart
+ yellow belley
+ green grass
+ ) ;
+
+ ok 7, addData($db, %data) ;
+
+ undef $db ;
+
+ $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ;
+ ok 8, ! $db ;
+
+ ok 9, -e $Dfile ;
+ ok 10, ! BerkeleyDB::db_remove(-Filename => $Dfile) ;
+}
+
+{
+ # subdatabases
+
+ # opening a subdatabse in an exsiting database that does have
+ # subdatabases at all, but not this one
+
+ my $lex = new LexFile $Dfile ;
+
+ ok 11, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my %data = qw(
+ red sky
+ blue sea
+ black heart
+ yellow belley
+ green grass
+ ) ;
+
+ ok 12, addData($db, %data) ;
+
+ undef $db ;
+
+ $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "joe" ;
+
+ ok 13, !$db ;
+
+}
+
+{
+ # subdatabases
+
+ my $lex = new LexFile $Dfile ;
+
+ ok 14, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my %data = qw(
+ red sky
+ blue sea
+ black heart
+ yellow belley
+ green grass
+ ) ;
+
+ ok 15, addData($db, %data) ;
+
+ undef $db ;
+
+ ok 16, $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ;
+
+ ok 17, my $cursor = $db->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $status ;
+ while (($status = $cursor->c_get($k, $v, DB_NEXT)) == 0) {
+ if ($data{$k} eq $v) {
+ delete $data{$k} ;
+ }
+ }
+ ok 18, $status == DB_NOTFOUND ;
+ ok 19, keys %data == 0 ;
+}
+
+{
+ # subdatabases
+
+ # opening a database with multiple subdatabases - handle should be a list
+ # of the subdatabase names
+
+ my $lex = new LexFile $Dfile ;
+
+ ok 20, my $db1 = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ,
+ -Flags => DB_CREATE ;
+
+ ok 21, my $db2 = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Subname => "joe" ,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my %data = qw(
+ red sky
+ blue sea
+ black heart
+ yellow belley
+ green grass
+ ) ;
+
+ ok 22, addData($db1, %data) ;
+ ok 23, addData($db2, %data) ;
+
+ undef $db1 ;
+ undef $db2 ;
+
+ ok 24, my $db = new BerkeleyDB::Unknown -Filename => $Dfile ,
+ -Flags => DB_RDONLY ;
+
+ #my $type = $db->type() ; print "type $type\n" ;
+ ok 25, my $cursor = $db->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $status ;
+ my @dbnames = () ;
+ while (($status = $cursor->c_get($k, $v, DB_NEXT)) == 0) {
+ push @dbnames, $k ;
+ }
+ ok 26, $status == DB_NOTFOUND ;
+ ok 27, join(",", sort @dbnames) eq "fred,joe" ;
+ undef $db ;
+
+ ok 28, BerkeleyDB::db_remove(-Filename => $Dfile, -Subname => "harry") != 0;
+ ok 29, BerkeleyDB::db_remove(-Filename => $Dfile, -Subname => "fred") == 0 ;
+
+ # should only be one subdatabase
+ ok 30, $db = new BerkeleyDB::Unknown -Filename => $Dfile ,
+ -Flags => DB_RDONLY ;
+
+ ok 31, $cursor = $db->db_cursor() ;
+ @dbnames = () ;
+ while (($status = $cursor->c_get($k, $v, DB_NEXT)) == 0) {
+ push @dbnames, $k ;
+ }
+ ok 32, $status == DB_NOTFOUND ;
+ ok 33, join(",", sort @dbnames) eq "joe" ;
+ undef $db ;
+
+ # can't delete an already deleted subdatabase
+ ok 34, BerkeleyDB::db_remove(-Filename => $Dfile, -Subname => "fred") != 0;
+
+ ok 35, BerkeleyDB::db_remove(-Filename => $Dfile, -Subname => "joe") == 0 ;
+
+ # should only be one subdatabase
+ ok 36, $db = new BerkeleyDB::Unknown -Filename => $Dfile ,
+ -Flags => DB_RDONLY ;
+
+ ok 37, $cursor = $db->db_cursor() ;
+ @dbnames = () ;
+ while (($status = $cursor->c_get($k, $v, DB_NEXT)) == 0) {
+ push @dbnames, $k ;
+ }
+ ok 38, $status == DB_NOTFOUND ;
+ ok 39, @dbnames == 0 ;
+ undef $db ;
+ undef $cursor ;
+
+ ok 40, -e $Dfile ;
+ ok 41, BerkeleyDB::db_remove(-Filename => $Dfile) == 0 ;
+ ok 42, ! -e $Dfile ;
+ ok 43, BerkeleyDB::db_remove(-Filename => $Dfile) != 0 ;
+}
+
+# db_remove with env
diff --git a/libdb/perl/BerkeleyDB/t/txn.t b/libdb/perl/BerkeleyDB/t/txn.t
new file mode 100644
index 0000000..ba6b636
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/txn.t
@@ -0,0 +1,320 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..58\n";
+
+my $Dfile = "dbhash.tmp";
+
+umask(0);
+
+{
+ # error cases
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 1, my $lexD = new LexDir($home);
+ ok 2, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE| DB_INIT_MPOOL;
+ eval { $env->txn_begin() ; } ;
+ ok 3, $@ =~ /^BerkeleyDB Aborting: Transaction Manager not enabled at/ ;
+
+ eval { my $txn_mgr = $env->TxnMgr() ; } ;
+ ok 4, $@ =~ /^BerkeleyDB Aborting: Transaction Manager not enabled at/ ;
+ undef $env ;
+
+}
+
+{
+ # transaction - abort works
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 5, my $lexD = new LexDir($home);
+ ok 6, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 7, my $txn = $env->txn_begin() ;
+ ok 8, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ ok 9, $txn->txn_commit() == 0 ;
+ ok 10, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 11, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 12, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 13, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 14, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 15, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 16, $count == 0 ;
+
+ my $stat = $env->txn_stat() ;
+ ok 17, $stat->{'st_naborts'} == 1 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie %hash ;
+}
+
+{
+ # transaction - abort works via txnmgr
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 18, my $lexD = new LexDir($home);
+ ok 19, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 20, my $txn_mgr = $env->TxnMgr() ;
+ ok 21, my $txn = $txn_mgr->txn_begin() ;
+ ok 22, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+ ok 23, $txn->txn_commit() == 0 ;
+ ok 24, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 25, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 26, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 27, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 28, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 29, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 30, $count == 0 ;
+
+ my $stat = $txn_mgr->txn_stat() ;
+ ok 31, $stat->{'st_naborts'} == 1 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $txn_mgr ;
+ undef $env ;
+ untie %hash ;
+}
+
+{
+ # transaction - commit works
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 32, my $lexD = new LexDir($home);
+ ok 33, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 34, my $txn = $env->txn_begin() ;
+ ok 35, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ ok 36, $txn->txn_commit() == 0 ;
+ ok 37, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 38, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 39, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 40, $count == 3 ;
+ undef $cursor ;
+
+ # now commit the transaction
+ ok 41, $txn->txn_commit() == 0 ;
+
+ $count = 0 ;
+ # sequence forwards
+ ok 42, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 43, $count == 3 ;
+
+ my $stat = $env->txn_stat() ;
+ ok 44, $stat->{'st_naborts'} == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie %hash ;
+}
+
+{
+ # transaction - commit works via txnmgr
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 45, my $lexD = new LexDir($home);
+ ok 46, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 47, my $txn_mgr = $env->TxnMgr() ;
+ ok 48, my $txn = $txn_mgr->txn_begin() ;
+ ok 49, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+ ok 50, $txn->txn_commit() == 0 ;
+ ok 51, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 52, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 53, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 54, $count == 3 ;
+ undef $cursor ;
+
+ # now commit the transaction
+ ok 55, $txn->txn_commit() == 0 ;
+
+ $count = 0 ;
+ # sequence forwards
+ ok 56, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 57, $count == 3 ;
+
+ my $stat = $txn_mgr->txn_stat() ;
+ ok 58, $stat->{'st_naborts'} == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $txn_mgr ;
+ undef $env ;
+ untie %hash ;
+}
+
diff --git a/libdb/perl/BerkeleyDB/t/unknown.t b/libdb/perl/BerkeleyDB/t/unknown.t
new file mode 100644
index 0000000..f2630b5
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/unknown.t
@@ -0,0 +1,176 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..41\n";
+
+my $Dfile = "dbhash.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' $db = new BerkeleyDB::Unknown -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $db = new BerkeleyDB::Unknown -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/ ;
+
+ eval ' $db = new BerkeleyDB::Unknown -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' $db = new BerkeleyDB::Unknown -Txn => "fred" ' ;
+ ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' $db = new BerkeleyDB::Unknown -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+# check the interface to a rubbish database
+{
+ # first an empty file
+ my $lex = new LexFile $Dfile ;
+ ok 6, writeFile($Dfile, "") ;
+
+ ok 7, ! (new BerkeleyDB::Unknown -Filename => $Dfile);
+
+ # now a non-database file
+ writeFile($Dfile, "\x2af6") ;
+ ok 8, ! (new BerkeleyDB::Unknown -Filename => $Dfile);
+}
+
+# check the interface to a Hash database
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ # create a hash database
+ ok 9, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a few k/v pairs
+ my $value ;
+ my $status ;
+ ok 10, $db->db_put("some key", "some value") == 0 ;
+ ok 11, $db->db_put("key", "value") == 0 ;
+
+ # close the database
+ undef $db ;
+
+ # now open it with Unknown
+ ok 12, $db = new BerkeleyDB::Unknown -Filename => $Dfile;
+
+ ok 13, $db->type() == DB_HASH ;
+ ok 14, $db->db_get("some key", $value) == 0 ;
+ ok 15, $value eq "some value" ;
+ ok 16, $db->db_get("key", $value) == 0 ;
+ ok 17, $value eq "value" ;
+
+ my @array ;
+ eval { $db->Tie(\@array)} ;
+ ok 18, $@ =~ /^Tie needs a reference to a hash/ ;
+
+ my %hash ;
+ $db->Tie(\%hash) ;
+ ok 19, $hash{"some key"} eq "some value" ;
+
+}
+
+# check the interface to a Btree database
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ # create a hash database
+ ok 20, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a few k/v pairs
+ my $value ;
+ my $status ;
+ ok 21, $db->db_put("some key", "some value") == 0 ;
+ ok 22, $db->db_put("key", "value") == 0 ;
+
+ # close the database
+ undef $db ;
+
+ # now open it with Unknown
+ # create a hash database
+ ok 23, $db = new BerkeleyDB::Unknown -Filename => $Dfile;
+
+ ok 24, $db->type() == DB_BTREE ;
+ ok 25, $db->db_get("some key", $value) == 0 ;
+ ok 26, $value eq "some value" ;
+ ok 27, $db->db_get("key", $value) == 0 ;
+ ok 28, $value eq "value" ;
+
+
+ my @array ;
+ eval { $db->Tie(\@array)} ;
+ ok 29, $@ =~ /^Tie needs a reference to a hash/ ;
+
+ my %hash ;
+ $db->Tie(\%hash) ;
+ ok 30, $hash{"some key"} eq "some value" ;
+
+
+}
+
+# check the interface to a Recno database
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ # create a recno database
+ ok 31, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a few k/v pairs
+ my $value ;
+ my $status ;
+ ok 32, $db->db_put(0, "some value") == 0 ;
+ ok 33, $db->db_put(1, "value") == 0 ;
+
+ # close the database
+ undef $db ;
+
+ # now open it with Unknown
+ # create a hash database
+ ok 34, $db = new BerkeleyDB::Unknown -Filename => $Dfile;
+
+ ok 35, $db->type() == DB_RECNO ;
+ ok 36, $db->db_get(0, $value) == 0 ;
+ ok 37, $value eq "some value" ;
+ ok 38, $db->db_get(1, $value) == 0 ;
+ ok 39, $value eq "value" ;
+
+
+ my %hash ;
+ eval { $db->Tie(\%hash)} ;
+ ok 40, $@ =~ /^Tie needs a reference to an array/ ;
+
+ my @array ;
+ $db->Tie(\@array) ;
+ ok 41, $array[1] eq "value" ;
+
+
+}
+
+# check i/f to text
diff --git a/libdb/perl/BerkeleyDB/t/util.pm b/libdb/perl/BerkeleyDB/t/util.pm
new file mode 100644
index 0000000..1a14497
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/t/util.pm
@@ -0,0 +1,220 @@
+package util ;
+
+package main ;
+
+use strict ;
+use BerkeleyDB ;
+use File::Path qw(rmtree);
+use vars qw(%DB_errors $FA) ;
+
+$| = 1;
+
+%DB_errors = (
+ 'DB_INCOMPLETE' => "DB_INCOMPLETE: Sync was unable to complete",
+ 'DB_KEYEMPTY' => "DB_KEYEMPTY: Non-existent key/data pair",
+ 'DB_KEYEXIST' => "DB_KEYEXIST: Key/data pair already exists",
+ 'DB_LOCK_DEADLOCK' => "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock",
+ 'DB_LOCK_NOTGRANTED' => "DB_LOCK_NOTGRANTED: Lock not granted",
+ 'DB_NOTFOUND' => "DB_NOTFOUND: No matching key/data pair found",
+ 'DB_OLD_VERSION' => "DB_OLDVERSION: Database requires a version upgrade",
+ 'DB_RUNRECOVERY' => "DB_RUNRECOVERY: Fatal error, run database recovery",
+) ;
+
+# full tied array support started in Perl 5.004_57
+# just double check.
+$FA = 0 ;
+{
+ sub try::TIEARRAY { bless [], "try" }
+ sub try::FETCHSIZE { $FA = 1 }
+ my @a ;
+ tie @a, 'try' ;
+ my $a = @a ;
+}
+
+{
+ package LexFile ;
+
+ use vars qw( $basename @files ) ;
+ $basename = "db0000" ;
+
+ sub new
+ {
+ my $self = shift ;
+ #my @files = () ;
+ foreach (@_)
+ {
+ $_ = $basename ;
+ unlink $basename ;
+ push @files, $basename ;
+ ++ $basename ;
+ }
+ bless [ @files ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ #unlink @{ $self } ;
+ }
+
+ END
+ {
+ foreach (@files) { unlink $_ }
+ }
+}
+
+
+{
+ package LexDir ;
+
+ use File::Path qw(rmtree);
+
+ use vars qw( $basename %dirs ) ;
+
+ sub new
+ {
+ my $self = shift ;
+ my $dir = shift ;
+
+ rmtree $dir if -e $dir ;
+
+ mkdir $dir, 0777 or return undef ;
+
+ return bless [ $dir ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ my $dir = $self->[0];
+ #rmtree $dir;
+ $dirs{$dir} ++ ;
+ }
+
+ END
+ {
+ foreach (keys %dirs) {
+ rmtree $_ if -d $_ ;
+ }
+ }
+
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT>;
+ close(CAT);
+ return $result;
+}
+
+sub docat_del
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my $result = <CAT> || "" ;
+ close(CAT);
+ unlink $file ;
+ return $result;
+}
+
+sub writeFile
+{
+ my $name = shift ;
+ open(FH, ">$name") or return 0 ;
+ print FH @_ ;
+ close FH ;
+ return 1 ;
+}
+
+sub touch
+{
+ my $file = shift ;
+ open(CAT,">$file") || die "Cannot open $file:$!";
+ close(CAT);
+}
+
+sub joiner
+{
+ my $db = shift ;
+ my $sep = shift ;
+ my ($k, $v) = (0, "") ;
+ my @data = () ;
+
+ my $cursor = $db->db_cursor() or return () ;
+ for ( my $status = $cursor->c_get($k, $v, DB_FIRST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_NEXT)) {
+ push @data, $v ;
+ }
+
+ (scalar(@data), join($sep, @data)) ;
+}
+
+sub countRecords
+{
+ my $db = shift ;
+ my ($k, $v) = (0,0) ;
+ my ($count) = 0 ;
+ my ($cursor) = $db->db_cursor() ;
+ #for ($status = $cursor->c_get($k, $v, DB_FIRST) ;
+# $status == 0 ;
+# $status = $cursor->c_get($k, $v, DB_NEXT) )
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { ++ $count }
+
+ return $count ;
+}
+
+sub addData
+{
+ my $db = shift ;
+ my @data = @_ ;
+ die "addData odd data\n" if @data % 2 != 0 ;
+ my ($k, $v) ;
+ my $ret = 0 ;
+ while (@data) {
+ $k = shift @data ;
+ $v = shift @data ;
+ $ret += $db->db_put($k, $v) ;
+ }
+
+ return ($ret == 0) ;
+}
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+
+1;
diff --git a/libdb/perl/BerkeleyDB/typemap b/libdb/perl/BerkeleyDB/typemap
new file mode 100644
index 0000000..81ead2c
--- /dev/null
+++ b/libdb/perl/BerkeleyDB/typemap
@@ -0,0 +1,275 @@
+# typemap for Perl 5 interface to Berkeley DB version 2 & 3
+#
+# SCCS: %I%, %G%
+#
+# written by Paul Marquess <Paul.Marquess@btinternet.com>
+#
+#################################### DB SECTION
+#
+#
+
+void * T_PV
+u_int T_U_INT
+u_int32_t T_U_INT
+const char * T_PV_NULL
+PV_or_NULL T_PV_NULL
+IO_or_NULL T_IO_NULL
+
+AV * T_AV
+
+BerkeleyDB T_PTROBJ
+BerkeleyDB::Common T_PTROBJ_AV
+BerkeleyDB::Hash T_PTROBJ_AV
+BerkeleyDB::Btree T_PTROBJ_AV
+BerkeleyDB::Recno T_PTROBJ_AV
+BerkeleyDB::Queue T_PTROBJ_AV
+BerkeleyDB::Cursor T_PTROBJ_AV
+BerkeleyDB::TxnMgr T_PTROBJ_AV
+BerkeleyDB::Txn T_PTROBJ_AV
+BerkeleyDB::Log T_PTROBJ_AV
+BerkeleyDB::Lock T_PTROBJ_AV
+BerkeleyDB::Env T_PTROBJ_AV
+
+BerkeleyDB::Raw T_RAW
+BerkeleyDB::Common::Raw T_RAW
+BerkeleyDB::Hash::Raw T_RAW
+BerkeleyDB::Btree::Raw T_RAW
+BerkeleyDB::Recno::Raw T_RAW
+BerkeleyDB::Queue::Raw T_RAW
+BerkeleyDB::Cursor::Raw T_RAW
+BerkeleyDB::TxnMgr::Raw T_RAW
+BerkeleyDB::Txn::Raw T_RAW
+BerkeleyDB::Log::Raw T_RAW
+BerkeleyDB::Lock::Raw T_RAW
+BerkeleyDB::Env::Raw T_RAW
+
+BerkeleyDB::Env::Inner T_INNER
+BerkeleyDB::Common::Inner T_INNER
+BerkeleyDB::Txn::Inner T_INNER
+BerkeleyDB::TxnMgr::Inner T_INNER
+# BerkeleyDB__Env T_PTR
+DBT T_dbtdatum
+DBT_OPT T_dbtdatum_opt
+DBT_B T_dbtdatum_btree
+DBTKEY T_dbtkeydatum
+DBTKEY_B T_dbtkeydatum_btree
+DBTYPE T_U_INT
+DualType T_DUAL
+BerkeleyDB_type * T_IV
+BerkeleyDB_ENV_type * T_IV
+BerkeleyDB_TxnMgr_type * T_IV
+BerkeleyDB_Txn_type * T_IV
+BerkeleyDB__Cursor_type * T_IV
+DB * T_IV
+
+INPUT
+
+T_AV
+ if (SvROK($arg) && SvTYPE(SvRV($arg)) == SVt_PVAV)
+ /* if (sv_isa($arg, \"${ntype}\")) */
+ $var = (AV*)SvRV($arg);
+ else
+ croak(\"$var is not an array reference\")
+
+T_RAW
+ $var = INT2PTR($type,SvIV($arg)
+
+T_U_INT
+ $var = SvUV($arg)
+
+T_SV_REF_NULL
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else if (sv_derived_from($arg, \"${ntype}\")) {
+ IV tmp = SvIV((SV *)GetInternalObject($arg));
+ $var = INT2PTR($type, tmp);
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+T_HV_REF_NULL
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else if (sv_derived_from($arg, \"${ntype}\")) {
+ HV * hv = (HV *)GetInternalObject($arg);
+ SV ** svp = hv_fetch(hv, \"db\", 2, FALSE);
+ IV tmp = SvIV(*svp);
+ $var = INT2PTR($type, tmp);
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+T_HV_REF
+ if (sv_derived_from($arg, \"${ntype}\")) {
+ HV * hv = (HV *)GetInternalObject($arg);
+ SV ** svp = hv_fetch(hv, \"db\", 2, FALSE);
+ IV tmp = SvIV(*svp);
+ $var = INT2PTR($type, tmp);
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+
+T_P_REF
+ if (sv_derived_from($arg, \"${ntype}\")) {
+ IV tmp = SvIV((SV*)SvRV($arg));
+ $var = INT2PTR($type, tmp);
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+
+T_INNER
+ {
+ HV * hv = (HV *)SvRV($arg);
+ SV ** svp = hv_fetch(hv, \"db\", 2, FALSE);
+ IV tmp = SvIV(*svp);
+ $var = INT2PTR($type, tmp);
+ }
+
+T_PV_NULL
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else {
+ $var = ($type)SvPV($arg,PL_na) ;
+ if (PL_na == 0)
+ $var = NULL ;
+ }
+
+T_IO_NULL
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else
+ $var = IoOFP(sv_2io($arg))
+
+T_PTROBJ_NULL
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else if (sv_derived_from($arg, \"${ntype}\")) {
+ IV tmp = SvIV((SV*)SvRV($arg));
+ $var = INT2PTR($type, tmp);
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+T_PTROBJ_SELF
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else if (sv_derived_from($arg, \"${ntype}\")) {
+ IV tmp = SvIV((SV*)SvRV($arg));
+ $var = INT2PTR($type, tmp);
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+T_PTROBJ_AV
+ if ($arg == &PL_sv_undef || $arg == NULL)
+ $var = NULL ;
+ else if (sv_derived_from($arg, \"${ntype}\")) {
+ IV tmp = SvIV(getInnerObject($arg)) ;
+ $var = INT2PTR($type, tmp);
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+T_dbtkeydatum
+ DBM_ckFilter($arg, filter_store_key, \"filter_store_key\");
+ DBT_clear($var) ;
+ if (db->recno_or_queue) {
+ Value = GetRecnoKey(db, SvIV($arg)) ;
+ $var.data = & Value;
+ $var.size = (int)sizeof(db_recno_t);
+ }
+ else {
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ }
+
+T_dbtkeydatum_btree
+ DBM_ckFilter($arg, filter_store_key, \"filter_store_key\");
+ DBT_clear($var) ;
+ if (db->recno_or_queue ||
+ (db->type == DB_BTREE && flagSet(DB_SET_RECNO))) {
+ Value = GetRecnoKey(db, SvIV($arg)) ;
+ $var.data = & Value;
+ $var.size = (int)sizeof(db_recno_t);
+ }
+ else {
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ }
+
+T_dbtdatum
+ DBM_ckFilter($arg, filter_store_value, \"filter_store_value\");
+ DBT_clear($var) ;
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ $var.flags = db->partial ;
+ $var.dlen = db->dlen ;
+ $var.doff = db->doff ;
+
+T_dbtdatum_opt
+ DBT_clear($var) ;
+ if (flagSet(DB_GET_BOTH)) {
+ DBM_ckFilter($arg, filter_store_value, \"filter_store_value\");
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ $var.flags = db->partial ;
+ $var.dlen = db->dlen ;
+ $var.doff = db->doff ;
+ }
+
+T_dbtdatum_btree
+ DBT_clear($var) ;
+ if (flagSet(DB_GET_BOTH)) {
+ DBM_ckFilter($arg, filter_store_value, \"filter_store_value\");
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ $var.flags = db->partial ;
+ $var.dlen = db->dlen ;
+ $var.doff = db->doff ;
+ }
+
+
+OUTPUT
+
+T_RAW
+ sv_setiv($arg, PTR2IV($var));
+
+T_SV_REF_NULL
+ sv_setiv($arg, PTR2IV($var));
+
+T_HV_REF_NULL
+ sv_setiv($arg, PTR2IV($var));
+
+T_HV_REF
+ sv_setiv($arg, PTR2IV($var));
+
+T_P_REF
+ sv_setiv($arg, PTR2IV($var));
+
+T_DUAL
+ setDUALerrno($arg, $var) ;
+
+T_U_INT
+ sv_setuv($arg, (UV)$var);
+
+T_PV_NULL
+ sv_setpv((SV*)$arg, $var);
+
+T_dbtkeydatum_btree
+ OutputKey_B($arg, $var)
+T_dbtkeydatum
+ OutputKey($arg, $var)
+T_dbtdatum
+ OutputValue($arg, $var)
+T_dbtdatum_opt
+ OutputValue($arg, $var)
+T_dbtdatum_btree
+ OutputValue_B($arg, $var)
+
+T_PTROBJ_NULL
+ sv_setref_pv($arg, \"${ntype}\", (void*)$var);
+
+T_PTROBJ_SELF
+ sv_setref_pv($arg, self, (void*)$var);
diff --git a/libdb/perl/DB_File/Changes b/libdb/perl/DB_File/Changes
new file mode 100644
index 0000000..7883cbd
--- /dev/null
+++ b/libdb/perl/DB_File/Changes
@@ -0,0 +1,434 @@
+
+1.805 1st September 2002
+
+ * Added support to allow DB_File to build with Berkeley DB 4.1.X
+
+ * Tightened up the test harness to test that calls to untie don't generate
+ the "untie attempted while %d inner references still exist" warning.
+
+ * added code to guard against calling the callbacks (compare,hash & prefix)
+ recursively.
+
+ * pasing undef for the flags and/or mode when opening a database could cause
+ a "Use of uninitialized value in subroutine entry" warning. Now silenced.
+
+ * DBM filter code beefed up to cope with read-only $_.
+
+1.804 2nd June 2002
+
+ * Perl core patch 14939 added a new warning to "splice". This broke the
+ db-recno test harness. Fixed.
+
+ * merged core patches 16502 & 16540.
+
+1.803 1st March 2002
+
+ * Fixed a problem with db-btree.t where it complained about an "our"
+ variable redeclaation.
+
+ * FETCH, STORE & DELETE don't map the flags parameter into the
+ equivalent Berkeley DB function anymore.
+
+1.802 6th January 2002
+
+ * The message about some test failing in db-recno.t had the wrong test
+ numbers. Fixed.
+
+ * merged core patch 13942.
+
+1.801 26th November 2001
+
+ * Fixed typo in Makefile.PL
+
+ * Added "clean" attribute to Makefile.PL
+
+1.800 23rd November 2001
+
+ * use pport.h for perl backward compatability code.
+
+ * use new ExtUtils::Constant module to generate XS constants.
+
+ * upgrade Makefile.PL upgrade/downgrade code to toggle "our" with
+ "use vars"
+
+1.79 22nd October 2001
+
+ * Added a "local $SIG{__DIE__}" inside the eval that checks for
+ the presence of XSLoader s suggested by Andrew Hryckowin.
+
+ * merged core patch 12277.
+
+ * Changed NEXTKEY to not initialise the input key. It isn't used anyway.
+
+1.79 22nd October 2001
+
+ * Fixed test harness for cygwin
+
+1.78 30th July 2001
+
+ * the test in Makefile.PL for AIX used -plthreads. Should have been
+ -lpthreads
+
+ * merged Core patches
+ 10372, 10335, 10372, 10534, 10549, 10643, 11051, 11194, 11432
+
+ * added documentation patch regarding duplicate keys from Andrew Johnson
+
+
+1.77 26th April 2001
+
+ * AIX is reported to need -lpthreads, so Makefile.PL now checks for
+ AIX and adds it to the link options.
+
+ * Minor documentation updates.
+
+ * Merged Core patch 9176
+
+ * Added a patch from Edward Avis that adds support for splice with
+ recno databases.
+
+ * Modified Makefile.PL to only enable the warnings pragma if using perl
+ 5.6.1 or better.
+
+1.76 15th January 2001
+
+ * Added instructions for using LD_PRELOAD to get Berkeley DB 2.x to work
+ with DB_File on Linux. Thanks to Norbert Bollow for sending details of
+ this approach.
+
+
+1.75 17th December 2000
+
+ * Fixed perl core patch 7703
+
+ * Added suppport to allow DB_File to be built with Berkeley DB 3.2 --
+ btree_compare, btree_prefix and hash_cb needed to be changed.
+
+ * Updated dbinfo to support Berkeley DB 3.2 file format changes.
+
+
+1.74 10th December 2000
+
+ * A "close" call in DB_File.xs needed parenthesised to stop win32 from
+ thinking it was one of its macros.
+
+ * Updated dbinfo to support Berkeley DB 3.1 file format changes.
+
+ * DB_File.pm & the test hasness now use the warnings pragma (when
+ available).
+
+ * Included Perl core patch 7703 -- size argument for hash_cb is different
+ for Berkeley DB 3.x
+
+ * Included Perl core patch 7801 -- Give __getBerkeleyDBInfo the ANSI C
+ treatment.
+
+ * @a = () produced the warning 'Argument "" isn't numeric in entersub'
+ This has been fixed. Thanks to Edward Avis for spotting this bug.
+
+ * Added note about building under Linux. Included patches.
+
+ * Included Perl core patch 8068 -- fix for bug 20001013.009
+ When run with warnings enabled "$hash{XX} = undef " produced an
+ "Uninitialized value" warning. This has been fixed.
+
+1.73 31st May 2000
+
+ * Added support in version.c for building with threaded Perl.
+
+ * Berkeley DB 3.1 has reenabled support for null keys. The test
+ harness has been updated to reflect this.
+
+1.72 16th January 2000
+
+ * Added hints/sco.pl
+
+ * The module will now use XSLoader when it is available. When it
+ isn't it will use DynaLoader.
+
+ * The locking section in DB_File.pm has been discredited. Many thanks
+ to David Harris for spotting the underlying problem, contributing
+ the updates to the documentation and writing DB_File::Lock (available
+ on CPAN).
+
+1.71 7th September 1999
+
+ * Fixed a bug that prevented 1.70 from compiling under win32
+
+ * Updated to support Berkeley DB 3.x
+
+ * Updated dbinfo for Berkeley DB 3.x file formats.
+
+1.70 4th August 1999
+
+ * Initialise $DB_File::db_ver and $DB_File::db_version with
+ GV_ADD|GV_ADDMULT -- bug spotted by Nick Ing-Simmons.
+
+ * Added a BOOT check to test for equivalent versions of db.h &
+ libdb.a/so.
+
+1.69 3rd August 1999
+
+ * fixed a bug in push -- DB_APPEND wasn't working properly.
+
+ * Fixed the R_SETCURSOR bug introduced in 1.68
+
+ * Added a new Perl variable $DB_File::db_ver
+
+1.68 22nd July 1999
+
+ * Merged changes from 5.005_58
+
+ * Fixed a bug in R_IBEFORE & R_IAFTER procesing in Berkeley DB
+ 2 databases.
+
+ * Added some of the examples in the POD into the test harness.
+
+1.67 6th June 1999
+
+ * Added DBM Filter documentation to DB_File.pm
+
+ * Fixed DBM Filter code to work with 5.004
+
+ * A few instances of newSVpvn were used in 1.66. This isn't available in
+ Perl 5.004_04 or earlier. Replaced with newSVpv.
+
+1.66 15th March 1999
+
+ * Added DBM Filter code
+
+1.65 6th March 1999
+
+ * Fixed a bug in the recno PUSH logic.
+ * The BOOT version check now needs 2.3.4 when using Berkeley DB version 2
+
+1.64 21st February 1999
+
+ * Tidied the 1.x to 2.x flag mapping code.
+ * Added a patch from Mark Kettenis <kettenis@wins.uva.nl> to fix a flag
+ mapping problem with O_RDONLY on the Hurd
+ * Updated the message that db-recno.t prints when tests 51, 53 or 55 fail.
+
+1.63 19th December 1998
+
+ * Fix to allow DB 2.6.x to build with DB_File
+ * Documentation updated to use push,pop etc in the RECNO example &
+ to include the find_dup & del_dup methods.
+
+1.62 30th November 1998
+
+ Added hints/dynixptx.pl.
+ Fixed typemap -- 1.61 used PL_na instead of na
+
+1.61 19th November 1998
+
+ Added a note to README about how to build Berkeley DB 2.x when
+ using HP-UX.
+ Minor modifications to get the module to build with DB 2.5.x
+ Fixed a typo in the definition of O_RDONLY, courtesy of Mark Kettenis.
+
+1.60
+ Changed the test to check for full tied array support
+
+1.59
+ Updated the license section.
+
+ Berkeley DB 2.4.10 disallows zero length keys. Tests 32 & 42 in
+ db-btree.t and test 27 in db-hash.t failed because of this change.
+ Those tests have been zapped.
+
+ Added dbinfo to the distribution.
+
+1.58
+ Tied Array support was enhanced in Perl 5.004_57. DB_File now
+ supports PUSH,POP,SHIFT,UNSHIFT & STORESIZE.
+
+ Fixed a problem with the use of sv_setpvn. When the size is
+ specified as 0, it does a strlen on the data. This was ok for DB
+ 1.x, but isn't for DB 2.x.
+
+1.57
+ If Perl has been compiled with Threads support,the symbol op will be
+ defined. This clashes with a field name in db.h, so it needs to be
+ #undef'ed before db.h is included.
+
+1.56
+ Documented the Solaris 2.5 mutex bug
+
+1.55
+ Merged 1.16 changes.
+
+1.54
+
+ Fixed a small bug in the test harness when run under win32
+ The emulation of fd when useing DB 2.x was busted.
+
+1.53
+
+ Added DB_RENUMBER to flags for recno.
+
+1.52
+
+ Patch from Nick Ing-Simmons now allows DB_File to build on NT.
+ Merged 1.15 patch.
+
+1.51
+
+ Fixed the test harness so that it doesn't expect DB_File to have
+ been installed by the main Perl build.
+
+
+ Fixed a bug in mapping 1.x O_RDONLY flag to 2.x DB_RDONLY equivalent
+
+1.50
+
+ DB_File can now build with either DB 1.x or 2.x, but not both at
+ the same time.
+
+1.16
+
+ A harmless looking tab was causing Makefile.PL to fail on AIX 3.2.5
+
+ Small fix for the AIX strict C compiler XLC which doesn't like
+ __attribute__ being defined via proto.h and redefined via db.h. Fix
+ courtesy of Jarkko Hietaniemi.
+
+1.15
+
+ Patch from Gisle Aas <gisle@aas.no> to suppress "use of undefined
+ value" warning with db_get and db_seq.
+
+ Patch from Gisle Aas <gisle@aas.no> to make DB_File export only the
+ O_* constants from Fcntl.
+
+ Removed the DESTROY method from the DB_File::HASHINFO module.
+
+ Previously DB_File hard-wired the class name of any object that it
+ created to "DB_File". This makes sub-classing difficult. Now
+ DB_File creats objects in the namespace of the package it has been
+ inherited into.
+
+
+1.14
+
+ Made it illegal to tie an associative array to a RECNO database and
+ an ordinary array to a HASH or BTREE database.
+
+1.13
+
+ Minor changes to DB_FIle.xs and DB_File.pm
+
+1.12
+
+ Documented the incompatibility with version 2 of Berkeley DB.
+
+1.11
+
+ Documented the untie gotcha.
+
+1.10
+
+ Fixed fd method so that it still returns -1 for in-memory files
+ when db 1.86 is used.
+
+1.09
+
+ Minor bug fix in DB_File::HASHINFO, DB_File::RECNOINFO and
+ DB_File::BTREEINFO.
+
+ Changed default mode to 0666.
+
+1.08
+
+ Documented operation of bval.
+
+1.07
+
+ Fixed bug with RECNO, where bval wasn't defaulting to "\n".
+
+1.06
+
+ Minor namespace cleanup: Localized PrintBtree.
+
+1.05
+
+ Made all scripts in the documentation strict and -w clean.
+
+ Added logic to DB_File.xs to allow the module to be built after
+ Perl is installed.
+
+1.04
+
+ Minor documentation changes.
+
+ Fixed a bug in hash_cb. Patches supplied by Dave Hammen,
+ <hammen@gothamcity.jsc.nasa.govt>.
+
+ Fixed a bug with the constructors for DB_File::HASHINFO,
+ DB_File::BTREEINFO and DB_File::RECNOINFO. Also tidied up the
+ constructors to make them -w clean.
+
+ Reworked part of the test harness to be more locale friendly.
+
+1.03
+
+ Documentation update.
+
+ DB_File now imports the constants (O_RDWR, O_CREAT etc.) from Fcntl
+ automatically.
+
+ The standard hash function exists is now supported.
+
+ Modified the behavior of get_dup. When it returns an associative
+ array, the value is the count of the number of matching BTREE
+ values.
+
+1.02
+
+ Merged OS/2 specific code into DB_File.xs
+
+ Removed some redundant code in DB_File.xs.
+
+ Documentation update.
+
+ Allow negative subscripts with RECNO interface.
+
+ Changed the default flags from O_RDWR to O_CREAT|O_RDWR.
+
+ The example code which showed how to lock a database needed a call
+ to sync added. Without it the resultant database file was empty.
+
+ Added get_dup method.
+
+1.01
+
+ Fixed a core dump problem with SunOS.
+
+ The return value from TIEHASH wasn't set to NULL when dbopen
+ returned an error.
+
+1.0
+
+ DB_File has been in use for over a year. To reflect that, the
+ version number has been incremented to 1.0.
+
+ Added complete support for multiple concurrent callbacks.
+
+ Using the push method on an empty list didn't work properly. This
+ has been fixed.
+
+0.3
+
+ Added prototype support for multiple btree compare callbacks.
+
+0.2
+
+ When DB_File is opening a database file it no longer terminates the
+ process if dbopen returned an error. This allows file protection
+ errors to be caught at run time. Thanks to Judith Grass
+ <grass@cybercash.com> for spotting the bug.
+
+0.1
+
+ First Release.
+
diff --git a/libdb/perl/DB_File/DB_File.pm b/libdb/perl/DB_File/DB_File.pm
new file mode 100644
index 0000000..49004ff
--- /dev/null
+++ b/libdb/perl/DB_File/DB_File.pm
@@ -0,0 +1,2291 @@
+# DB_File.pm -- Perl 5 interface to Berkeley DB
+#
+# written by Paul Marquess (Paul.Marquess@btinternet.com)
+# last modified 1st September 2002
+# version 1.805
+#
+# Copyright (c) 1995-2002 Paul Marquess. All rights reserved.
+# This program is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself.
+
+
+package DB_File::HASHINFO ;
+
+require 5.00404;
+
+use warnings;
+use strict;
+use Carp;
+require Tie::Hash;
+@DB_File::HASHINFO::ISA = qw(Tie::Hash);
+
+sub new
+{
+ my $pkg = shift ;
+ my %x ;
+ tie %x, $pkg ;
+ bless \%x, $pkg ;
+}
+
+
+sub TIEHASH
+{
+ my $pkg = shift ;
+
+ bless { VALID => {
+ bsize => 1,
+ ffactor => 1,
+ nelem => 1,
+ cachesize => 1,
+ hash => 2,
+ lorder => 1,
+ },
+ GOT => {}
+ }, $pkg ;
+}
+
+
+sub FETCH
+{
+ my $self = shift ;
+ my $key = shift ;
+
+ return $self->{GOT}{$key} if exists $self->{VALID}{$key} ;
+
+ my $pkg = ref $self ;
+ croak "${pkg}::FETCH - Unknown element '$key'" ;
+}
+
+
+sub STORE
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+
+ my $type = $self->{VALID}{$key};
+
+ if ( $type )
+ {
+ croak "Key '$key' not associated with a code reference"
+ if $type == 2 && !ref $value && ref $value ne 'CODE';
+ $self->{GOT}{$key} = $value ;
+ return ;
+ }
+
+ my $pkg = ref $self ;
+ croak "${pkg}::STORE - Unknown element '$key'" ;
+}
+
+sub DELETE
+{
+ my $self = shift ;
+ my $key = shift ;
+
+ if ( exists $self->{VALID}{$key} )
+ {
+ delete $self->{GOT}{$key} ;
+ return ;
+ }
+
+ my $pkg = ref $self ;
+ croak "DB_File::HASHINFO::DELETE - Unknown element '$key'" ;
+}
+
+sub EXISTS
+{
+ my $self = shift ;
+ my $key = shift ;
+
+ exists $self->{VALID}{$key} ;
+}
+
+sub NotHere
+{
+ my $self = shift ;
+ my $method = shift ;
+
+ croak ref($self) . " does not define the method ${method}" ;
+}
+
+sub FIRSTKEY { my $self = shift ; $self->NotHere("FIRSTKEY") }
+sub NEXTKEY { my $self = shift ; $self->NotHere("NEXTKEY") }
+sub CLEAR { my $self = shift ; $self->NotHere("CLEAR") }
+
+package DB_File::RECNOINFO ;
+
+use warnings;
+use strict ;
+
+@DB_File::RECNOINFO::ISA = qw(DB_File::HASHINFO) ;
+
+sub TIEHASH
+{
+ my $pkg = shift ;
+
+ bless { VALID => { map {$_, 1}
+ qw( bval cachesize psize flags lorder reclen bfname )
+ },
+ GOT => {},
+ }, $pkg ;
+}
+
+package DB_File::BTREEINFO ;
+
+use warnings;
+use strict ;
+
+@DB_File::BTREEINFO::ISA = qw(DB_File::HASHINFO) ;
+
+sub TIEHASH
+{
+ my $pkg = shift ;
+
+ bless { VALID => {
+ flags => 1,
+ cachesize => 1,
+ maxkeypage => 1,
+ minkeypage => 1,
+ psize => 1,
+ compare => 2,
+ prefix => 2,
+ lorder => 1,
+ },
+ GOT => {},
+ }, $pkg ;
+}
+
+
+package DB_File ;
+
+use warnings;
+use strict;
+our ($VERSION, @ISA, @EXPORT, $AUTOLOAD, $DB_BTREE, $DB_HASH, $DB_RECNO);
+our ($db_version, $use_XSLoader, $splice_end_array);
+use Carp;
+
+
+$VERSION = "1.805" ;
+
+{
+ local $SIG{__WARN__} = sub {$splice_end_array = "@_";};
+ my @a =(1); splice(@a, 3);
+ $splice_end_array =
+ ($splice_end_array =~ /^splice\(\) offset past end of array at /);
+}
+
+#typedef enum { DB_BTREE, DB_HASH, DB_RECNO } DBTYPE;
+$DB_BTREE = new DB_File::BTREEINFO ;
+$DB_HASH = new DB_File::HASHINFO ;
+$DB_RECNO = new DB_File::RECNOINFO ;
+
+require Tie::Hash;
+require Exporter;
+use AutoLoader;
+BEGIN {
+ $use_XSLoader = 1 ;
+ { local $SIG{__DIE__} ; eval { require XSLoader } ; }
+
+ if ($@) {
+ $use_XSLoader = 0 ;
+ require DynaLoader;
+ @ISA = qw(DynaLoader);
+ }
+}
+
+push @ISA, qw(Tie::Hash Exporter);
+@EXPORT = qw(
+ $DB_BTREE $DB_HASH $DB_RECNO
+
+ BTREEMAGIC
+ BTREEVERSION
+ DB_LOCK
+ DB_SHMEM
+ DB_TXN
+ HASHMAGIC
+ HASHVERSION
+ MAX_PAGE_NUMBER
+ MAX_PAGE_OFFSET
+ MAX_REC_NUMBER
+ RET_ERROR
+ RET_SPECIAL
+ RET_SUCCESS
+ R_CURSOR
+ R_DUP
+ R_FIRST
+ R_FIXEDLEN
+ R_IAFTER
+ R_IBEFORE
+ R_LAST
+ R_NEXT
+ R_NOKEY
+ R_NOOVERWRITE
+ R_PREV
+ R_RECNOSYNC
+ R_SETCURSOR
+ R_SNAPSHOT
+ __R_UNUSED
+
+);
+
+sub AUTOLOAD {
+ my($constname);
+ ($constname = $AUTOLOAD) =~ s/.*:://;
+ my ($error, $val) = constant($constname);
+ Carp::croak $error if $error;
+ no strict 'refs';
+ *{$AUTOLOAD} = sub { $val };
+ goto &{$AUTOLOAD};
+}
+
+
+eval {
+ # Make all Fcntl O_XXX constants available for importing
+ require Fcntl;
+ my @O = grep /^O_/, @Fcntl::EXPORT;
+ Fcntl->import(@O); # first we import what we want to export
+ push(@EXPORT, @O);
+};
+
+if ($use_XSLoader)
+ { XSLoader::load("DB_File", $VERSION)}
+else
+ { bootstrap DB_File $VERSION }
+
+# Preloaded methods go here. Autoload methods go after __END__, and are
+# processed by the autosplit program.
+
+sub tie_hash_or_array
+{
+ my (@arg) = @_ ;
+ my $tieHASH = ( (caller(1))[3] =~ /TIEHASH/ ) ;
+
+ $arg[4] = tied %{ $arg[4] }
+ if @arg >= 5 && ref $arg[4] && $arg[4] =~ /=HASH/ && tied %{ $arg[4] } ;
+
+ $arg[2] = O_CREAT()|O_RDWR() if @arg >=3 && ! defined $arg[2];
+ $arg[3] = 0666 if @arg >=4 && ! defined $arg[3];
+
+ # make recno in Berkeley DB version 2 work like recno in version 1.
+ if ($db_version > 1 and defined $arg[4] and $arg[4] =~ /RECNO/ and
+ $arg[1] and ! -e $arg[1]) {
+ open(FH, ">$arg[1]") or return undef ;
+ close FH ;
+ chmod $arg[3] ? $arg[3] : 0666 , $arg[1] ;
+ }
+
+ DoTie_($tieHASH, @arg) ;
+}
+
+sub TIEHASH
+{
+ tie_hash_or_array(@_) ;
+}
+
+sub TIEARRAY
+{
+ tie_hash_or_array(@_) ;
+}
+
+sub CLEAR
+{
+ my $self = shift;
+ my $key = 0 ;
+ my $value = "" ;
+ my $status = $self->seq($key, $value, R_FIRST());
+ my @keys;
+
+ while ($status == 0) {
+ push @keys, $key;
+ $status = $self->seq($key, $value, R_NEXT());
+ }
+ foreach $key (reverse @keys) {
+ my $s = $self->del($key);
+ }
+}
+
+sub EXTEND { }
+
+sub STORESIZE
+{
+ my $self = shift;
+ my $length = shift ;
+ my $current_length = $self->length() ;
+
+ if ($length < $current_length) {
+ my $key ;
+ for ($key = $current_length - 1 ; $key >= $length ; -- $key)
+ { $self->del($key) }
+ }
+ elsif ($length > $current_length) {
+ $self->put($length-1, "") ;
+ }
+}
+
+
+sub SPLICE
+{
+ my $self = shift;
+ my $offset = shift;
+ if (not defined $offset) {
+ warnings::warnif('uninitialized', 'Use of uninitialized value in splice');
+ $offset = 0;
+ }
+
+ my $length = @_ ? shift : 0;
+ # Carping about definedness comes _after_ the OFFSET sanity check.
+ # This is so we get the same error messages as Perl's splice().
+ #
+
+ my @list = @_;
+
+ my $size = $self->FETCHSIZE();
+
+ # 'If OFFSET is negative then it start that far from the end of
+ # the array.'
+ #
+ if ($offset < 0) {
+ my $new_offset = $size + $offset;
+ if ($new_offset < 0) {
+ die "Modification of non-creatable array value attempted, "
+ . "subscript $offset";
+ }
+ $offset = $new_offset;
+ }
+
+ if (not defined $length) {
+ warnings::warnif('uninitialized', 'Use of uninitialized value in splice');
+ $length = 0;
+ }
+
+ if ($offset > $size) {
+ $offset = $size;
+ warnings::warnif('misc', 'splice() offset past end of array')
+ if $splice_end_array;
+ }
+
+ # 'If LENGTH is omitted, removes everything from OFFSET onward.'
+ if (not defined $length) {
+ $length = $size - $offset;
+ }
+
+ # 'If LENGTH is negative, leave that many elements off the end of
+ # the array.'
+ #
+ if ($length < 0) {
+ $length = $size - $offset + $length;
+
+ if ($length < 0) {
+ # The user must have specified a length bigger than the
+ # length of the array passed in. But perl's splice()
+ # doesn't catch this, it just behaves as for length=0.
+ #
+ $length = 0;
+ }
+ }
+
+ if ($length > $size - $offset) {
+ $length = $size - $offset;
+ }
+
+ # $num_elems holds the current number of elements in the database.
+ my $num_elems = $size;
+
+ # 'Removes the elements designated by OFFSET and LENGTH from an
+ # array,'...
+ #
+ my @removed = ();
+ foreach (0 .. $length - 1) {
+ my $old;
+ my $status = $self->get($offset, $old);
+ if ($status != 0) {
+ my $msg = "error from Berkeley DB on get($offset, \$old)";
+ if ($status == 1) {
+ $msg .= ' (no such element?)';
+ }
+ else {
+ $msg .= ": error status $status";
+ if (defined $! and $! ne '') {
+ $msg .= ", message $!";
+ }
+ }
+ die $msg;
+ }
+ push @removed, $old;
+
+ $status = $self->del($offset);
+ if ($status != 0) {
+ my $msg = "error from Berkeley DB on del($offset)";
+ if ($status == 1) {
+ $msg .= ' (no such element?)';
+ }
+ else {
+ $msg .= ": error status $status";
+ if (defined $! and $! ne '') {
+ $msg .= ", message $!";
+ }
+ }
+ die $msg;
+ }
+
+ -- $num_elems;
+ }
+
+ # ...'and replaces them with the elements of LIST, if any.'
+ my $pos = $offset;
+ while (defined (my $elem = shift @list)) {
+ my $old_pos = $pos;
+ my $status;
+ if ($pos >= $num_elems) {
+ $status = $self->put($pos, $elem);
+ }
+ else {
+ $status = $self->put($pos, $elem, $self->R_IBEFORE);
+ }
+
+ if ($status != 0) {
+ my $msg = "error from Berkeley DB on put($pos, $elem, ...)";
+ if ($status == 1) {
+ $msg .= ' (no such element?)';
+ }
+ else {
+ $msg .= ", error status $status";
+ if (defined $! and $! ne '') {
+ $msg .= ", message $!";
+ }
+ }
+ die $msg;
+ }
+
+ die "pos unexpectedly changed from $old_pos to $pos with R_IBEFORE"
+ if $old_pos != $pos;
+
+ ++ $pos;
+ ++ $num_elems;
+ }
+
+ if (wantarray) {
+ # 'In list context, returns the elements removed from the
+ # array.'
+ #
+ return @removed;
+ }
+ elsif (defined wantarray and not wantarray) {
+ # 'In scalar context, returns the last element removed, or
+ # undef if no elements are removed.'
+ #
+ if (@removed) {
+ my $last = pop @removed;
+ return "$last";
+ }
+ else {
+ return undef;
+ }
+ }
+ elsif (not defined wantarray) {
+ # Void context
+ }
+ else { die }
+}
+sub ::DB_File::splice { &SPLICE }
+
+sub find_dup
+{
+ croak "Usage: \$db->find_dup(key,value)\n"
+ unless @_ == 3 ;
+
+ my $db = shift ;
+ my ($origkey, $value_wanted) = @_ ;
+ my ($key, $value) = ($origkey, 0);
+ my ($status) = 0 ;
+
+ for ($status = $db->seq($key, $value, R_CURSOR() ) ;
+ $status == 0 ;
+ $status = $db->seq($key, $value, R_NEXT() ) ) {
+
+ return 0 if $key eq $origkey and $value eq $value_wanted ;
+ }
+
+ return $status ;
+}
+
+sub del_dup
+{
+ croak "Usage: \$db->del_dup(key,value)\n"
+ unless @_ == 3 ;
+
+ my $db = shift ;
+ my ($key, $value) = @_ ;
+ my ($status) = $db->find_dup($key, $value) ;
+ return $status if $status != 0 ;
+
+ $status = $db->del($key, R_CURSOR() ) ;
+ return $status ;
+}
+
+sub get_dup
+{
+ croak "Usage: \$db->get_dup(key [,flag])\n"
+ unless @_ == 2 or @_ == 3 ;
+
+ my $db = shift ;
+ my $key = shift ;
+ my $flag = shift ;
+ my $value = 0 ;
+ my $origkey = $key ;
+ my $wantarray = wantarray ;
+ my %values = () ;
+ my @values = () ;
+ my $counter = 0 ;
+ my $status = 0 ;
+
+ # iterate through the database until either EOF ($status == 0)
+ # or a different key is encountered ($key ne $origkey).
+ for ($status = $db->seq($key, $value, R_CURSOR()) ;
+ $status == 0 and $key eq $origkey ;
+ $status = $db->seq($key, $value, R_NEXT()) ) {
+
+ # save the value or count number of matches
+ if ($wantarray) {
+ if ($flag)
+ { ++ $values{$value} }
+ else
+ { push (@values, $value) }
+ }
+ else
+ { ++ $counter }
+
+ }
+
+ return ($wantarray ? ($flag ? %values : @values) : $counter) ;
+}
+
+
+1;
+__END__
+
+=head1 NAME
+
+DB_File - Perl5 access to Berkeley DB version 1.x
+
+=head1 SYNOPSIS
+
+ use DB_File;
+
+ [$X =] tie %hash, 'DB_File', [$filename, $flags, $mode, $DB_HASH] ;
+ [$X =] tie %hash, 'DB_File', $filename, $flags, $mode, $DB_BTREE ;
+ [$X =] tie @array, 'DB_File', $filename, $flags, $mode, $DB_RECNO ;
+
+ $status = $X->del($key [, $flags]) ;
+ $status = $X->put($key, $value [, $flags]) ;
+ $status = $X->get($key, $value [, $flags]) ;
+ $status = $X->seq($key, $value, $flags) ;
+ $status = $X->sync([$flags]) ;
+ $status = $X->fd ;
+
+ # BTREE only
+ $count = $X->get_dup($key) ;
+ @list = $X->get_dup($key) ;
+ %list = $X->get_dup($key, 1) ;
+ $status = $X->find_dup($key, $value) ;
+ $status = $X->del_dup($key, $value) ;
+
+ # RECNO only
+ $a = $X->length;
+ $a = $X->pop ;
+ $X->push(list);
+ $a = $X->shift;
+ $X->unshift(list);
+ @r = $X->splice(offset, length, elements);
+
+ # DBM Filters
+ $old_filter = $db->filter_store_key ( sub { ... } ) ;
+ $old_filter = $db->filter_store_value( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_key ( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_value( sub { ... } ) ;
+
+ untie %hash ;
+ untie @array ;
+
+=head1 DESCRIPTION
+
+B<DB_File> is a module which allows Perl programs to make use of the
+facilities provided by Berkeley DB version 1.x (if you have a newer
+version of DB, see L<Using DB_File with Berkeley DB version 2 or greater>).
+It is assumed that you have a copy of the Berkeley DB manual pages at
+hand when reading this documentation. The interface defined here
+mirrors the Berkeley DB interface closely.
+
+Berkeley DB is a C library which provides a consistent interface to a
+number of database formats. B<DB_File> provides an interface to all
+three of the database types currently supported by Berkeley DB.
+
+The file types are:
+
+=over 5
+
+=item B<DB_HASH>
+
+This database type allows arbitrary key/value pairs to be stored in data
+files. This is equivalent to the functionality provided by other
+hashing packages like DBM, NDBM, ODBM, GDBM, and SDBM. Remember though,
+the files created using DB_HASH are not compatible with any of the
+other packages mentioned.
+
+A default hashing algorithm, which will be adequate for most
+applications, is built into Berkeley DB. If you do need to use your own
+hashing algorithm it is possible to write your own in Perl and have
+B<DB_File> use it instead.
+
+=item B<DB_BTREE>
+
+The btree format allows arbitrary key/value pairs to be stored in a
+sorted, balanced binary tree.
+
+As with the DB_HASH format, it is possible to provide a user defined
+Perl routine to perform the comparison of keys. By default, though, the
+keys are stored in lexical order.
+
+=item B<DB_RECNO>
+
+DB_RECNO allows both fixed-length and variable-length flat text files
+to be manipulated using the same key/value pair interface as in DB_HASH
+and DB_BTREE. In this case the key will consist of a record (line)
+number.
+
+=back
+
+=head2 Using DB_File with Berkeley DB version 2 or greater
+
+Although B<DB_File> is intended to be used with Berkeley DB version 1,
+it can also be used with version 2, 3 or 4. In this case the interface is
+limited to the functionality provided by Berkeley DB 1.x. Anywhere the
+version 2 or greater interface differs, B<DB_File> arranges for it to work
+like version 1. This feature allows B<DB_File> scripts that were built
+with version 1 to be migrated to version 2 or greater without any changes.
+
+If you want to make use of the new features available in Berkeley DB
+2.x or greater, use the Perl module B<BerkeleyDB> instead.
+
+B<Note:> The database file format has changed multiple times in Berkeley
+DB version 2, 3 and 4. If you cannot recreate your databases, you
+must dump any existing databases with either the C<db_dump> or the
+C<db_dump185> utility that comes with Berkeley DB.
+Once you have rebuilt DB_File to use Berkeley DB version 2 or greater,
+your databases can be recreated using C<db_load>. Refer to the Berkeley DB
+documentation for further details.
+
+Please read L<"COPYRIGHT"> before using version 2.x or greater of Berkeley
+DB with DB_File.
+
+=head2 Interface to Berkeley DB
+
+B<DB_File> allows access to Berkeley DB files using the tie() mechanism
+in Perl 5 (for full details, see L<perlfunc/tie()>). This facility
+allows B<DB_File> to access Berkeley DB files using either an
+associative array (for DB_HASH & DB_BTREE file types) or an ordinary
+array (for the DB_RECNO file type).
+
+In addition to the tie() interface, it is also possible to access most
+of the functions provided in the Berkeley DB API directly.
+See L<THE API INTERFACE>.
+
+=head2 Opening a Berkeley DB Database File
+
+Berkeley DB uses the function dbopen() to open or create a database.
+Here is the C prototype for dbopen():
+
+ DB*
+ dbopen (const char * file, int flags, int mode,
+ DBTYPE type, const void * openinfo)
+
+The parameter C<type> is an enumeration which specifies which of the 3
+interface methods (DB_HASH, DB_BTREE or DB_RECNO) is to be used.
+Depending on which of these is actually chosen, the final parameter,
+I<openinfo> points to a data structure which allows tailoring of the
+specific interface method.
+
+This interface is handled slightly differently in B<DB_File>. Here is
+an equivalent call using B<DB_File>:
+
+ tie %array, 'DB_File', $filename, $flags, $mode, $DB_HASH ;
+
+The C<filename>, C<flags> and C<mode> parameters are the direct
+equivalent of their dbopen() counterparts. The final parameter $DB_HASH
+performs the function of both the C<type> and C<openinfo> parameters in
+dbopen().
+
+In the example above $DB_HASH is actually a pre-defined reference to a
+hash object. B<DB_File> has three of these pre-defined references.
+Apart from $DB_HASH, there is also $DB_BTREE and $DB_RECNO.
+
+The keys allowed in each of these pre-defined references is limited to
+the names used in the equivalent C structure. So, for example, the
+$DB_HASH reference will only allow keys called C<bsize>, C<cachesize>,
+C<ffactor>, C<hash>, C<lorder> and C<nelem>.
+
+To change one of these elements, just assign to it like this:
+
+ $DB_HASH->{'cachesize'} = 10000 ;
+
+The three predefined variables $DB_HASH, $DB_BTREE and $DB_RECNO are
+usually adequate for most applications. If you do need to create extra
+instances of these objects, constructors are available for each file
+type.
+
+Here are examples of the constructors and the valid options available
+for DB_HASH, DB_BTREE and DB_RECNO respectively.
+
+ $a = new DB_File::HASHINFO ;
+ $a->{'bsize'} ;
+ $a->{'cachesize'} ;
+ $a->{'ffactor'};
+ $a->{'hash'} ;
+ $a->{'lorder'} ;
+ $a->{'nelem'} ;
+
+ $b = new DB_File::BTREEINFO ;
+ $b->{'flags'} ;
+ $b->{'cachesize'} ;
+ $b->{'maxkeypage'} ;
+ $b->{'minkeypage'} ;
+ $b->{'psize'} ;
+ $b->{'compare'} ;
+ $b->{'prefix'} ;
+ $b->{'lorder'} ;
+
+ $c = new DB_File::RECNOINFO ;
+ $c->{'bval'} ;
+ $c->{'cachesize'} ;
+ $c->{'psize'} ;
+ $c->{'flags'} ;
+ $c->{'lorder'} ;
+ $c->{'reclen'} ;
+ $c->{'bfname'} ;
+
+The values stored in the hashes above are mostly the direct equivalent
+of their C counterpart. Like their C counterparts, all are set to a
+default values - that means you don't have to set I<all> of the
+values when you only want to change one. Here is an example:
+
+ $a = new DB_File::HASHINFO ;
+ $a->{'cachesize'} = 12345 ;
+ tie %y, 'DB_File', "filename", $flags, 0777, $a ;
+
+A few of the options need extra discussion here. When used, the C
+equivalent of the keys C<hash>, C<compare> and C<prefix> store pointers
+to C functions. In B<DB_File> these keys are used to store references
+to Perl subs. Below are templates for each of the subs:
+
+ sub hash
+ {
+ my ($data) = @_ ;
+ ...
+ # return the hash value for $data
+ return $hash ;
+ }
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ sub prefix
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return number of bytes of $key2 which are
+ # necessary to determine that it is greater than $key1
+ return $bytes ;
+ }
+
+See L<Changing the BTREE sort order> for an example of using the
+C<compare> template.
+
+If you are using the DB_RECNO interface and you intend making use of
+C<bval>, you should check out L<The 'bval' Option>.
+
+=head2 Default Parameters
+
+It is possible to omit some or all of the final 4 parameters in the
+call to C<tie> and let them take default values. As DB_HASH is the most
+common file format used, the call:
+
+ tie %A, "DB_File", "filename" ;
+
+is equivalent to:
+
+ tie %A, "DB_File", "filename", O_CREAT|O_RDWR, 0666, $DB_HASH ;
+
+It is also possible to omit the filename parameter as well, so the
+call:
+
+ tie %A, "DB_File" ;
+
+is equivalent to:
+
+ tie %A, "DB_File", undef, O_CREAT|O_RDWR, 0666, $DB_HASH ;
+
+See L<In Memory Databases> for a discussion on the use of C<undef>
+in place of a filename.
+
+=head2 In Memory Databases
+
+Berkeley DB allows the creation of in-memory databases by using NULL
+(that is, a C<(char *)0> in C) in place of the filename. B<DB_File>
+uses C<undef> instead of NULL to provide this functionality.
+
+=head1 DB_HASH
+
+The DB_HASH file format is probably the most commonly used of the three
+file formats that B<DB_File> supports. It is also very straightforward
+to use.
+
+=head2 A Simple Example
+
+This example shows how to create a database, add key/value pairs to the
+database, delete keys/value pairs and finally how to enumerate the
+contents of the database.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ our (%h, $k, $v) ;
+
+ unlink "fruit" ;
+ tie %h, "DB_File", "fruit", O_RDWR|O_CREAT, 0666, $DB_HASH
+ or die "Cannot open file 'fruit': $!\n";
+
+ # Add a few key/value pairs to the file
+ $h{"apple"} = "red" ;
+ $h{"orange"} = "orange" ;
+ $h{"banana"} = "yellow" ;
+ $h{"tomato"} = "red" ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $h{"banana"} ;
+
+ # Delete a key/value pair.
+ delete $h{"apple"} ;
+
+ # print the contents of the file
+ while (($k, $v) = each %h)
+ { print "$k -> $v\n" }
+
+ untie %h ;
+
+here is the output:
+
+ Banana Exists
+
+ orange -> orange
+ tomato -> red
+ banana -> yellow
+
+Note that the like ordinary associative arrays, the order of the keys
+retrieved is in an apparently random order.
+
+=head1 DB_BTREE
+
+The DB_BTREE format is useful when you want to store data in a given
+order. By default the keys will be stored in lexical order, but as you
+will see from the example shown in the next section, it is very easy to
+define your own sorting function.
+
+=head2 Changing the BTREE sort order
+
+This script shows how to override the default sorting algorithm that
+BTREE uses. Instead of using the normal lexical ordering, a case
+insensitive compare function will be used.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my %h ;
+
+ sub Compare
+ {
+ my ($key1, $key2) = @_ ;
+ "\L$key1" cmp "\L$key2" ;
+ }
+
+ # specify the Perl sub that will do the comparison
+ $DB_BTREE->{'compare'} = \&Compare ;
+
+ unlink "tree" ;
+ tie %h, "DB_File", "tree", O_RDWR|O_CREAT, 0666, $DB_BTREE
+ or die "Cannot open file 'tree': $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+
+Here is the output from the code above.
+
+ mouse
+ Smith
+ Wall
+
+There are a few point to bear in mind if you want to change the
+ordering in a BTREE database:
+
+=over 5
+
+=item 1.
+
+The new compare function must be specified when you create the database.
+
+=item 2.
+
+You cannot change the ordering once the database has been created. Thus
+you must use the same compare function every time you access the
+database.
+
+=item 3
+
+Duplicate keys are entirely defined by the comparison function.
+In the case-insensitive example above, the keys: 'KEY' and 'key'
+would be considered duplicates, and assigning to the second one
+would overwrite the first. If duplicates are allowed for (with the
+R_DUPS flag discussed below), only a single copy of duplicate keys
+is stored in the database --- so (again with example above) assigning
+three values to the keys: 'KEY', 'Key', and 'key' would leave just
+the first key: 'KEY' in the database with three values. For some
+situations this results in information loss, so care should be taken
+to provide fully qualified comparison functions when necessary.
+For example, the above comparison routine could be modified to
+additionally compare case-sensitively if two keys are equal in the
+case insensitive comparison:
+
+ sub compare {
+ my($key1, $key2) = @_;
+ lc $key1 cmp lc $key2 ||
+ $key1 cmp $key2;
+ }
+
+And now you will only have duplicates when the keys themselves
+are truly the same. (note: in versions of the db library prior to
+about November 1996, such duplicate keys were retained so it was
+possible to recover the original keys in sets of keys that
+compared as equal).
+
+
+=back
+
+=head2 Handling Duplicate Keys
+
+The BTREE file type optionally allows a single key to be associated
+with an arbitrary number of values. This option is enabled by setting
+the flags element of C<$DB_BTREE> to R_DUP when creating the database.
+
+There are some difficulties in using the tied hash interface if you
+want to manipulate a BTREE database with duplicate keys. Consider this
+code:
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my ($filename, %h) ;
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key and value
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+
+ # iterate through the associative array
+ # and print each key/value pair.
+ foreach (sort keys %h)
+ { print "$_ -> $h{$_}\n" }
+
+ untie %h ;
+
+Here is the output:
+
+ Smith -> John
+ Wall -> Larry
+ Wall -> Larry
+ Wall -> Larry
+ mouse -> mickey
+
+As you can see 3 records have been successfully created with key C<Wall>
+- the only thing is, when they are retrieved from the database they
+I<seem> to have the same value, namely C<Larry>. The problem is caused
+by the way that the associative array interface works. Basically, when
+the associative array interface is used to fetch the value associated
+with a given key, it will only ever retrieve the first value.
+
+Although it may not be immediately obvious from the code above, the
+associative array interface can be used to write values with duplicate
+keys, but it cannot be used to read them back from the database.
+
+The way to get around this problem is to use the Berkeley DB API method
+called C<seq>. This method allows sequential access to key/value
+pairs. See L<THE API INTERFACE> for details of both the C<seq> method
+and the API in general.
+
+Here is the script above rewritten using the C<seq> API method.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my ($filename, $x, %h, $status, $key, $value) ;
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key and value
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+
+ # iterate through the btree using seq
+ # and print each key/value pair.
+ $key = $value = 0 ;
+ for ($status = $x->seq($key, $value, R_FIRST) ;
+ $status == 0 ;
+ $status = $x->seq($key, $value, R_NEXT) )
+ { print "$key -> $value\n" }
+
+ undef $x ;
+ untie %h ;
+
+that prints:
+
+ Smith -> John
+ Wall -> Brick
+ Wall -> Brick
+ Wall -> Larry
+ mouse -> mickey
+
+This time we have got all the key/value pairs, including the multiple
+values associated with the key C<Wall>.
+
+To make life easier when dealing with duplicate keys, B<DB_File> comes with
+a few utility methods.
+
+=head2 The get_dup() Method
+
+The C<get_dup> method assists in
+reading duplicate values from BTREE databases. The method can take the
+following forms:
+
+ $count = $x->get_dup($key) ;
+ @list = $x->get_dup($key) ;
+ %list = $x->get_dup($key, 1) ;
+
+In a scalar context the method returns the number of values associated
+with the key, C<$key>.
+
+In list context, it returns all the values which match C<$key>. Note
+that the values will be returned in an apparently random order.
+
+In list context, if the second parameter is present and evaluates
+TRUE, the method returns an associative array. The keys of the
+associative array correspond to the values that matched in the BTREE
+and the values of the array are a count of the number of times that
+particular value occurred in the BTREE.
+
+So assuming the database created above, we can use C<get_dup> like
+this:
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my ($filename, $x, %h) ;
+
+ $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ my $cnt = $x->get_dup("Wall") ;
+ print "Wall occurred $cnt times\n" ;
+
+ my %hash = $x->get_dup("Wall", 1) ;
+ print "Larry is there\n" if $hash{'Larry'} ;
+ print "There are $hash{'Brick'} Brick Walls\n" ;
+
+ my @list = sort $x->get_dup("Wall") ;
+ print "Wall => [@list]\n" ;
+
+ @list = $x->get_dup("Smith") ;
+ print "Smith => [@list]\n" ;
+
+ @list = $x->get_dup("Dog") ;
+ print "Dog => [@list]\n" ;
+
+
+and it will print:
+
+ Wall occurred 3 times
+ Larry is there
+ There are 2 Brick Walls
+ Wall => [Brick Brick Larry]
+ Smith => [John]
+ Dog => []
+
+=head2 The find_dup() Method
+
+ $status = $X->find_dup($key, $value) ;
+
+This method checks for the existence of a specific key/value pair. If the
+pair exists, the cursor is left pointing to the pair and the method
+returns 0. Otherwise the method returns a non-zero value.
+
+Assuming the database from the previous example:
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my ($filename, $x, %h, $found) ;
+
+ $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ $found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ;
+ print "Larry Wall is $found there\n" ;
+
+ $found = ( $x->find_dup("Wall", "Harry") == 0 ? "" : "not") ;
+ print "Harry Wall is $found there\n" ;
+
+ undef $x ;
+ untie %h ;
+
+prints this
+
+ Larry Wall is there
+ Harry Wall is not there
+
+
+=head2 The del_dup() Method
+
+ $status = $X->del_dup($key, $value) ;
+
+This method deletes a specific key/value pair. It returns
+0 if they exist and have been deleted successfully.
+Otherwise the method returns a non-zero value.
+
+Again assuming the existence of the C<tree> database
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my ($filename, $x, %h, $found) ;
+
+ $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ $x->del_dup("Wall", "Larry") ;
+
+ $found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ;
+ print "Larry Wall is $found there\n" ;
+
+ undef $x ;
+ untie %h ;
+
+prints this
+
+ Larry Wall is not there
+
+=head2 Matching Partial Keys
+
+The BTREE interface has a feature which allows partial keys to be
+matched. This functionality is I<only> available when the C<seq> method
+is used along with the R_CURSOR flag.
+
+ $x->seq($key, $value, R_CURSOR) ;
+
+Here is the relevant quote from the dbopen man page where it defines
+the use of the R_CURSOR flag with seq:
+
+ Note, for the DB_BTREE access method, the returned key is not
+ necessarily an exact match for the specified key. The returned key
+ is the smallest key greater than or equal to the specified key,
+ permitting partial key matches and range searches.
+
+In the example script below, the C<match> sub uses this feature to find
+and print the first matching key/value pair given a partial key.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ use Fcntl ;
+
+ my ($filename, $x, %h, $st, $key, $value) ;
+
+ sub match
+ {
+ my $key = shift ;
+ my $value = 0;
+ my $orig_key = $key ;
+ $x->seq($key, $value, R_CURSOR) ;
+ print "$orig_key\t-> $key\t-> $value\n" ;
+ }
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'mouse'} = 'mickey' ;
+ $h{'Wall'} = 'Larry' ;
+ $h{'Walls'} = 'Brick' ;
+ $h{'Smith'} = 'John' ;
+
+
+ $key = $value = 0 ;
+ print "IN ORDER\n" ;
+ for ($st = $x->seq($key, $value, R_FIRST) ;
+ $st == 0 ;
+ $st = $x->seq($key, $value, R_NEXT) )
+
+ { print "$key -> $value\n" }
+
+ print "\nPARTIAL MATCH\n" ;
+
+ match "Wa" ;
+ match "A" ;
+ match "a" ;
+
+ undef $x ;
+ untie %h ;
+
+Here is the output:
+
+ IN ORDER
+ Smith -> John
+ Wall -> Larry
+ Walls -> Brick
+ mouse -> mickey
+
+ PARTIAL MATCH
+ Wa -> Wall -> Larry
+ A -> Smith -> John
+ a -> mouse -> mickey
+
+=head1 DB_RECNO
+
+DB_RECNO provides an interface to flat text files. Both variable and
+fixed length records are supported.
+
+In order to make RECNO more compatible with Perl, the array offset for
+all RECNO arrays begins at 0 rather than 1 as in Berkeley DB.
+
+As with normal Perl arrays, a RECNO array can be accessed using
+negative indexes. The index -1 refers to the last element of the array,
+-2 the second last, and so on. Attempting to access an element before
+the start of the array will raise a fatal run-time error.
+
+=head2 The 'bval' Option
+
+The operation of the bval option warrants some discussion. Here is the
+definition of bval from the Berkeley DB 1.85 recno manual page:
+
+ The delimiting byte to be used to mark the end of a
+ record for variable-length records, and the pad charac-
+ ter for fixed-length records. If no value is speci-
+ fied, newlines (``\n'') are used to mark the end of
+ variable-length records and fixed-length records are
+ padded with spaces.
+
+The second sentence is wrong. In actual fact bval will only default to
+C<"\n"> when the openinfo parameter in dbopen is NULL. If a non-NULL
+openinfo parameter is used at all, the value that happens to be in bval
+will be used. That means you always have to specify bval when making
+use of any of the options in the openinfo parameter. This documentation
+error will be fixed in the next release of Berkeley DB.
+
+That clarifies the situation with regards Berkeley DB itself. What
+about B<DB_File>? Well, the behavior defined in the quote above is
+quite useful, so B<DB_File> conforms to it.
+
+That means that you can specify other options (e.g. cachesize) and
+still have bval default to C<"\n"> for variable length records, and
+space for fixed length records.
+
+Also note that the bval option only allows you to specify a single byte
+as a delimeter.
+
+=head2 A Simple Example
+
+Here is a simple example that uses RECNO (if you are using a version
+of Perl earlier than 5.004_57 this example won't work -- see
+L<Extra RECNO Methods> for a workaround).
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ tie @h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_RECNO
+ or die "Cannot open file 'text': $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ push @h, "green", "black" ;
+
+ my $elements = scalar @h ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = pop @h ;
+ print "popped $last\n" ;
+
+ unshift @h, "white" ;
+ my $first = shift @h ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ # use a negative index
+ print "The last element is $h[-1]\n" ;
+ print "The 2nd last element is $h[-2]\n" ;
+
+ untie @h ;
+
+Here is the output from the script:
+
+ The array contains 5 entries
+ popped black
+ shifted white
+ Element 1 Exists with value blue
+ The last element is green
+ The 2nd last element is yellow
+
+=head2 Extra RECNO Methods
+
+If you are using a version of Perl earlier than 5.004_57, the tied
+array interface is quite limited. In the example script above
+C<push>, C<pop>, C<shift>, C<unshift>
+or determining the array length will not work with a tied array.
+
+To make the interface more useful for older versions of Perl, a number
+of methods are supplied with B<DB_File> to simulate the missing array
+operations. All these methods are accessed via the object returned from
+the tie call.
+
+Here are the methods:
+
+=over 5
+
+=item B<$X-E<gt>push(list) ;>
+
+Pushes the elements of C<list> to the end of the array.
+
+=item B<$value = $X-E<gt>pop ;>
+
+Removes and returns the last element of the array.
+
+=item B<$X-E<gt>shift>
+
+Removes and returns the first element of the array.
+
+=item B<$X-E<gt>unshift(list) ;>
+
+Pushes the elements of C<list> to the start of the array.
+
+=item B<$X-E<gt>length>
+
+Returns the number of elements in the array.
+
+=item B<$X-E<gt>splice(offset, length, elements);>
+
+Returns a splice of the the array.
+
+=back
+
+=head2 Another Example
+
+Here is a more complete example that makes use of some of the methods
+described above. It also makes use of the API interface directly (see
+L<THE API INTERFACE>).
+
+ use warnings ;
+ use strict ;
+ my (@h, $H, $file, $i) ;
+ use DB_File ;
+ use Fcntl ;
+
+ $file = "text" ;
+
+ unlink $file ;
+
+ $H = tie @h, "DB_File", $file, O_RDWR|O_CREAT, 0666, $DB_RECNO
+ or die "Cannot open file $file: $!\n" ;
+
+ # first create a text file to play with
+ $h[0] = "zero" ;
+ $h[1] = "one" ;
+ $h[2] = "two" ;
+ $h[3] = "three" ;
+ $h[4] = "four" ;
+
+
+ # Print the records in order.
+ #
+ # The length method is needed here because evaluating a tied
+ # array in a scalar context does not return the number of
+ # elements in the array.
+
+ print "\nORIGINAL\n" ;
+ foreach $i (0 .. $H->length - 1) {
+ print "$i: $h[$i]\n" ;
+ }
+
+ # use the push & pop methods
+ $a = $H->pop ;
+ $H->push("last") ;
+ print "\nThe last record was [$a]\n" ;
+
+ # and the shift & unshift methods
+ $a = $H->shift ;
+ $H->unshift("first") ;
+ print "The first record was [$a]\n" ;
+
+ # Use the API to add a new record after record 2.
+ $i = 2 ;
+ $H->put($i, "Newbie", R_IAFTER) ;
+
+ # and a new record before record 1.
+ $i = 1 ;
+ $H->put($i, "New One", R_IBEFORE) ;
+
+ # delete record 3
+ $H->del(3) ;
+
+ # now print the records in reverse order
+ print "\nREVERSE\n" ;
+ for ($i = $H->length - 1 ; $i >= 0 ; -- $i)
+ { print "$i: $h[$i]\n" }
+
+ # same again, but use the API functions instead
+ print "\nREVERSE again\n" ;
+ my ($s, $k, $v) = (0, 0, 0) ;
+ for ($s = $H->seq($k, $v, R_LAST) ;
+ $s == 0 ;
+ $s = $H->seq($k, $v, R_PREV))
+ { print "$k: $v\n" }
+
+ undef $H ;
+ untie @h ;
+
+and this is what it outputs:
+
+ ORIGINAL
+ 0: zero
+ 1: one
+ 2: two
+ 3: three
+ 4: four
+
+ The last record was [four]
+ The first record was [zero]
+
+ REVERSE
+ 5: last
+ 4: three
+ 3: Newbie
+ 2: one
+ 1: New One
+ 0: first
+
+ REVERSE again
+ 5: last
+ 4: three
+ 3: Newbie
+ 2: one
+ 1: New One
+ 0: first
+
+Notes:
+
+=over 5
+
+=item 1.
+
+Rather than iterating through the array, C<@h> like this:
+
+ foreach $i (@h)
+
+it is necessary to use either this:
+
+ foreach $i (0 .. $H->length - 1)
+
+or this:
+
+ for ($a = $H->get($k, $v, R_FIRST) ;
+ $a == 0 ;
+ $a = $H->get($k, $v, R_NEXT) )
+
+=item 2.
+
+Notice that both times the C<put> method was used the record index was
+specified using a variable, C<$i>, rather than the literal value
+itself. This is because C<put> will return the record number of the
+inserted line via that parameter.
+
+=back
+
+=head1 THE API INTERFACE
+
+As well as accessing Berkeley DB using a tied hash or array, it is also
+possible to make direct use of most of the API functions defined in the
+Berkeley DB documentation.
+
+To do this you need to store a copy of the object returned from the tie.
+
+ $db = tie %hash, "DB_File", "filename" ;
+
+Once you have done that, you can access the Berkeley DB API functions
+as B<DB_File> methods directly like this:
+
+ $db->put($key, $value, R_NOOVERWRITE) ;
+
+B<Important:> If you have saved a copy of the object returned from
+C<tie>, the underlying database file will I<not> be closed until both
+the tied variable is untied and all copies of the saved object are
+destroyed.
+
+ use DB_File ;
+ $db = tie %hash, "DB_File", "filename"
+ or die "Cannot tie filename: $!" ;
+ ...
+ undef $db ;
+ untie %hash ;
+
+See L<The untie() Gotcha> for more details.
+
+All the functions defined in L<dbopen> are available except for
+close() and dbopen() itself. The B<DB_File> method interface to the
+supported functions have been implemented to mirror the way Berkeley DB
+works whenever possible. In particular note that:
+
+=over 5
+
+=item *
+
+The methods return a status value. All return 0 on success.
+All return -1 to signify an error and set C<$!> to the exact
+error code. The return code 1 generally (but not always) means that the
+key specified did not exist in the database.
+
+Other return codes are defined. See below and in the Berkeley DB
+documentation for details. The Berkeley DB documentation should be used
+as the definitive source.
+
+=item *
+
+Whenever a Berkeley DB function returns data via one of its parameters,
+the equivalent B<DB_File> method does exactly the same.
+
+=item *
+
+If you are careful, it is possible to mix API calls with the tied
+hash/array interface in the same piece of code. Although only a few of
+the methods used to implement the tied interface currently make use of
+the cursor, you should always assume that the cursor has been changed
+any time the tied hash/array interface is used. As an example, this
+code will probably not do what you expect:
+
+ $X = tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0777, $DB_BTREE
+ or die "Cannot tie $filename: $!" ;
+
+ # Get the first key/value pair and set the cursor
+ $X->seq($key, $value, R_FIRST) ;
+
+ # this line will modify the cursor
+ $count = scalar keys %x ;
+
+ # Get the second key/value pair.
+ # oops, it didn't, it got the last key/value pair!
+ $X->seq($key, $value, R_NEXT) ;
+
+The code above can be rearranged to get around the problem, like this:
+
+ $X = tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0777, $DB_BTREE
+ or die "Cannot tie $filename: $!" ;
+
+ # this line will modify the cursor
+ $count = scalar keys %x ;
+
+ # Get the first key/value pair and set the cursor
+ $X->seq($key, $value, R_FIRST) ;
+
+ # Get the second key/value pair.
+ # worked this time.
+ $X->seq($key, $value, R_NEXT) ;
+
+=back
+
+All the constants defined in L<dbopen> for use in the flags parameters
+in the methods defined below are also available. Refer to the Berkeley
+DB documentation for the precise meaning of the flags values.
+
+Below is a list of the methods available.
+
+=over 5
+
+=item B<$status = $X-E<gt>get($key, $value [, $flags]) ;>
+
+Given a key (C<$key>) this method reads the value associated with it
+from the database. The value read from the database is returned in the
+C<$value> parameter.
+
+If the key does not exist the method returns 1.
+
+No flags are currently defined for this method.
+
+=item B<$status = $X-E<gt>put($key, $value [, $flags]) ;>
+
+Stores the key/value pair in the database.
+
+If you use either the R_IAFTER or R_IBEFORE flags, the C<$key> parameter
+will have the record number of the inserted key/value pair set.
+
+Valid flags are R_CURSOR, R_IAFTER, R_IBEFORE, R_NOOVERWRITE and
+R_SETCURSOR.
+
+=item B<$status = $X-E<gt>del($key [, $flags]) ;>
+
+Removes all key/value pairs with key C<$key> from the database.
+
+A return code of 1 means that the requested key was not in the
+database.
+
+R_CURSOR is the only valid flag at present.
+
+=item B<$status = $X-E<gt>fd ;>
+
+Returns the file descriptor for the underlying database.
+
+See L<Locking: The Trouble with fd> for an explanation for why you should
+not use C<fd> to lock your database.
+
+=item B<$status = $X-E<gt>seq($key, $value, $flags) ;>
+
+This interface allows sequential retrieval from the database. See
+L<dbopen> for full details.
+
+Both the C<$key> and C<$value> parameters will be set to the key/value
+pair read from the database.
+
+The flags parameter is mandatory. The valid flag values are R_CURSOR,
+R_FIRST, R_LAST, R_NEXT and R_PREV.
+
+=item B<$status = $X-E<gt>sync([$flags]) ;>
+
+Flushes any cached buffers to disk.
+
+R_RECNOSYNC is the only valid flag at present.
+
+=back
+
+=head1 DBM FILTERS
+
+A DBM Filter is a piece of code that is be used when you I<always>
+want to make the same transformation to all keys and/or values in a
+DBM database.
+
+There are four methods associated with DBM Filters. All work identically,
+and each is used to install (or uninstall) a single DBM Filter. Each
+expects a single parameter, namely a reference to a sub. The only
+difference between them is the place that the filter is installed.
+
+To summarise:
+
+=over 5
+
+=item B<filter_store_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a key to a DBM database.
+
+=item B<filter_store_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a value to a DBM database.
+
+
+=item B<filter_fetch_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a key from a DBM database.
+
+=item B<filter_fetch_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a value from a DBM database.
+
+=back
+
+You can use any combination of the methods, from none, to all four.
+
+All filter methods return the existing filter, if present, or C<undef>
+in not.
+
+To delete a filter pass C<undef> to it.
+
+=head2 The Filter
+
+When each filter is called by Perl, a local copy of C<$_> will contain
+the key or value to be filtered. Filtering is achieved by modifying
+the contents of C<$_>. The return code from the filter is ignored.
+
+=head2 An Example -- the NULL termination problem.
+
+Consider the following scenario. You have a DBM database
+that you need to share with a third-party C application. The C application
+assumes that I<all> keys and values are NULL terminated. Unfortunately
+when Perl writes to DBM databases it doesn't use NULL termination, so
+your Perl application will have to manage NULL termination itself. When
+you write to the database you will have to use something like this:
+
+ $hash{"$key\0"} = "$value\0" ;
+
+Similarly the NULL needs to be taken into account when you are considering
+the length of existing keys/values.
+
+It would be much better if you could ignore the NULL terminations issue
+in the main application code and have a mechanism that automatically
+added the terminating NULL to all keys and values whenever you write to
+the database and have them removed when you read from the database. As I'm
+sure you have already guessed, this is a problem that DBM Filters can
+fix very easily.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my %hash ;
+ my $filename = "/tmp/filt" ;
+ unlink $filename ;
+
+ my $db = tie %hash, 'DB_File', $filename, O_CREAT|O_RDWR, 0666, $DB_HASH
+ or die "Cannot open $filename: $!\n" ;
+
+ # Install DBM Filters
+ $db->filter_fetch_key ( sub { s/\0$// } ) ;
+ $db->filter_store_key ( sub { $_ .= "\0" } ) ;
+ $db->filter_fetch_value( sub { s/\0$// } ) ;
+ $db->filter_store_value( sub { $_ .= "\0" } ) ;
+
+ $hash{"abc"} = "def" ;
+ my $a = $hash{"ABC"} ;
+ # ...
+ undef $db ;
+ untie %hash ;
+
+Hopefully the contents of each of the filters should be
+self-explanatory. Both "fetch" filters remove the terminating NULL,
+and both "store" filters add a terminating NULL.
+
+
+=head2 Another Example -- Key is a C int.
+
+Here is another real-life example. By default, whenever Perl writes to
+a DBM database it always writes the key and value as strings. So when
+you use this:
+
+ $hash{12345} = "soemthing" ;
+
+the key 12345 will get stored in the DBM database as the 5 byte string
+"12345". If you actually want the key to be stored in the DBM database
+as a C int, you will have to use C<pack> when writing, and C<unpack>
+when reading.
+
+Here is a DBM Filter that does it:
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ my %hash ;
+ my $filename = "/tmp/filt" ;
+ unlink $filename ;
+
+
+ my $db = tie %hash, 'DB_File', $filename, O_CREAT|O_RDWR, 0666, $DB_HASH
+ or die "Cannot open $filename: $!\n" ;
+
+ $db->filter_fetch_key ( sub { $_ = unpack("i", $_) } ) ;
+ $db->filter_store_key ( sub { $_ = pack ("i", $_) } ) ;
+ $hash{123} = "def" ;
+ # ...
+ undef $db ;
+ untie %hash ;
+
+This time only two filters have been used -- we only need to manipulate
+the contents of the key, so it wasn't necessary to install any value
+filters.
+
+=head1 HINTS AND TIPS
+
+
+=head2 Locking: The Trouble with fd
+
+Until version 1.72 of this module, the recommended technique for locking
+B<DB_File> databases was to flock the filehandle returned from the "fd"
+function. Unfortunately this technique has been shown to be fundamentally
+flawed (Kudos to David Harris for tracking this down). Use it at your own
+peril!
+
+The locking technique went like this.
+
+ $db = tie(%db, 'DB_File', '/tmp/foo.db', O_CREAT|O_RDWR, 0666)
+ || die "dbcreat /tmp/foo.db $!";
+ $fd = $db->fd;
+ open(DB_FH, "+<&=$fd") || die "dup $!";
+ flock (DB_FH, LOCK_EX) || die "flock: $!";
+ ...
+ $db{"Tom"} = "Jerry" ;
+ ...
+ flock(DB_FH, LOCK_UN);
+ undef $db;
+ untie %db;
+ close(DB_FH);
+
+In simple terms, this is what happens:
+
+=over 5
+
+=item 1.
+
+Use "tie" to open the database.
+
+=item 2.
+
+Lock the database with fd & flock.
+
+=item 3.
+
+Read & Write to the database.
+
+=item 4.
+
+Unlock and close the database.
+
+=back
+
+Here is the crux of the problem. A side-effect of opening the B<DB_File>
+database in step 2 is that an initial block from the database will get
+read from disk and cached in memory.
+
+To see why this is a problem, consider what can happen when two processes,
+say "A" and "B", both want to update the same B<DB_File> database
+using the locking steps outlined above. Assume process "A" has already
+opened the database and has a write lock, but it hasn't actually updated
+the database yet (it has finished step 2, but not started step 3 yet). Now
+process "B" tries to open the same database - step 1 will succeed,
+but it will block on step 2 until process "A" releases the lock. The
+important thing to notice here is that at this point in time both
+processes will have cached identical initial blocks from the database.
+
+Now process "A" updates the database and happens to change some of the
+data held in the initial buffer. Process "A" terminates, flushing
+all cached data to disk and releasing the database lock. At this point
+the database on disk will correctly reflect the changes made by process
+"A".
+
+With the lock released, process "B" can now continue. It also updates the
+database and unfortunately it too modifies the data that was in its
+initial buffer. Once that data gets flushed to disk it will overwrite
+some/all of the changes process "A" made to the database.
+
+The result of this scenario is at best a database that doesn't contain
+what you expect. At worst the database will corrupt.
+
+The above won't happen every time competing process update the same
+B<DB_File> database, but it does illustrate why the technique should
+not be used.
+
+=head2 Safe ways to lock a database
+
+Starting with version 2.x, Berkeley DB has internal support for locking.
+The companion module to this one, B<BerkeleyDB>, provides an interface
+to this locking functionality. If you are serious about locking
+Berkeley DB databases, I strongly recommend using B<BerkeleyDB>.
+
+If using B<BerkeleyDB> isn't an option, there are a number of modules
+available on CPAN that can be used to implement locking. Each one
+implements locking differently and has different goals in mind. It is
+therefore worth knowing the difference, so that you can pick the right
+one for your application. Here are the three locking wrappers:
+
+=over 5
+
+=item B<Tie::DB_Lock>
+
+A B<DB_File> wrapper which creates copies of the database file for
+read access, so that you have a kind of a multiversioning concurrent read
+system. However, updates are still serial. Use for databases where reads
+may be lengthy and consistency problems may occur.
+
+=item B<Tie::DB_LockFile>
+
+A B<DB_File> wrapper that has the ability to lock and unlock the database
+while it is being used. Avoids the tie-before-flock problem by simply
+re-tie-ing the database when you get or drop a lock. Because of the
+flexibility in dropping and re-acquiring the lock in the middle of a
+session, this can be massaged into a system that will work with long
+updates and/or reads if the application follows the hints in the POD
+documentation.
+
+=item B<DB_File::Lock>
+
+An extremely lightweight B<DB_File> wrapper that simply flocks a lockfile
+before tie-ing the database and drops the lock after the untie. Allows
+one to use the same lockfile for multiple databases to avoid deadlock
+problems, if desired. Use for databases where updates are reads are
+quick and simple flock locking semantics are enough.
+
+=back
+
+=head2 Sharing Databases With C Applications
+
+There is no technical reason why a Berkeley DB database cannot be
+shared by both a Perl and a C application.
+
+The vast majority of problems that are reported in this area boil down
+to the fact that C strings are NULL terminated, whilst Perl strings are
+not. See L<DBM FILTERS> for a generic way to work around this problem.
+
+Here is a real example. Netscape 2.0 keeps a record of the locations you
+visit along with the time you last visited them in a DB_HASH database.
+This is usually stored in the file F<~/.netscape/history.db>. The key
+field in the database is the location string and the value field is the
+time the location was last visited stored as a 4 byte binary value.
+
+If you haven't already guessed, the location string is stored with a
+terminating NULL. This means you need to be careful when accessing the
+database.
+
+Here is a snippet of code that is loosely based on Tom Christiansen's
+I<ggh> script (available from your nearest CPAN archive in
+F<authors/id/TOMC/scripts/nshist.gz>).
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ use Fcntl ;
+
+ my ($dotdir, $HISTORY, %hist_db, $href, $binary_time, $date) ;
+ $dotdir = $ENV{HOME} || $ENV{LOGNAME};
+
+ $HISTORY = "$dotdir/.netscape/history.db";
+
+ tie %hist_db, 'DB_File', $HISTORY
+ or die "Cannot open $HISTORY: $!\n" ;;
+
+ # Dump the complete database
+ while ( ($href, $binary_time) = each %hist_db ) {
+
+ # remove the terminating NULL
+ $href =~ s/\x00$// ;
+
+ # convert the binary time into a user friendly string
+ $date = localtime unpack("V", $binary_time);
+ print "$date $href\n" ;
+ }
+
+ # check for the existence of a specific key
+ # remember to add the NULL
+ if ( $binary_time = $hist_db{"http://mox.perl.com/\x00"} ) {
+ $date = localtime unpack("V", $binary_time) ;
+ print "Last visited mox.perl.com on $date\n" ;
+ }
+ else {
+ print "Never visited mox.perl.com\n"
+ }
+
+ untie %hist_db ;
+
+=head2 The untie() Gotcha
+
+If you make use of the Berkeley DB API, it is I<very> strongly
+recommended that you read L<perltie/The untie Gotcha>.
+
+Even if you don't currently make use of the API interface, it is still
+worth reading it.
+
+Here is an example which illustrates the problem from a B<DB_File>
+perspective:
+
+ use DB_File ;
+ use Fcntl ;
+
+ my %x ;
+ my $X ;
+
+ $X = tie %x, 'DB_File', 'tst.fil' , O_RDWR|O_TRUNC
+ or die "Cannot tie first time: $!" ;
+
+ $x{123} = 456 ;
+
+ untie %x ;
+
+ tie %x, 'DB_File', 'tst.fil' , O_RDWR|O_CREAT
+ or die "Cannot tie second time: $!" ;
+
+ untie %x ;
+
+When run, the script will produce this error message:
+
+ Cannot tie second time: Invalid argument at bad.file line 14.
+
+Although the error message above refers to the second tie() statement
+in the script, the source of the problem is really with the untie()
+statement that precedes it.
+
+Having read L<perltie> you will probably have already guessed that the
+error is caused by the extra copy of the tied object stored in C<$X>.
+If you haven't, then the problem boils down to the fact that the
+B<DB_File> destructor, DESTROY, will not be called until I<all>
+references to the tied object are destroyed. Both the tied variable,
+C<%x>, and C<$X> above hold a reference to the object. The call to
+untie() will destroy the first, but C<$X> still holds a valid
+reference, so the destructor will not get called and the database file
+F<tst.fil> will remain open. The fact that Berkeley DB then reports the
+attempt to open a database that is already open via the catch-all
+"Invalid argument" doesn't help.
+
+If you run the script with the C<-w> flag the error message becomes:
+
+ untie attempted while 1 inner references still exist at bad.file line 12.
+ Cannot tie second time: Invalid argument at bad.file line 14.
+
+which pinpoints the real problem. Finally the script can now be
+modified to fix the original problem by destroying the API object
+before the untie:
+
+ ...
+ $x{123} = 456 ;
+
+ undef $X ;
+ untie %x ;
+
+ $X = tie %x, 'DB_File', 'tst.fil' , O_RDWR|O_CREAT
+ ...
+
+
+=head1 COMMON QUESTIONS
+
+=head2 Why is there Perl source in my database?
+
+If you look at the contents of a database file created by DB_File,
+there can sometimes be part of a Perl script included in it.
+
+This happens because Berkeley DB uses dynamic memory to allocate
+buffers which will subsequently be written to the database file. Being
+dynamic, the memory could have been used for anything before DB
+malloced it. As Berkeley DB doesn't clear the memory once it has been
+allocated, the unused portions will contain random junk. In the case
+where a Perl script gets written to the database, the random junk will
+correspond to an area of dynamic memory that happened to be used during
+the compilation of the script.
+
+Unless you don't like the possibility of there being part of your Perl
+scripts embedded in a database file, this is nothing to worry about.
+
+=head2 How do I store complex data structures with DB_File?
+
+Although B<DB_File> cannot do this directly, there is a module which
+can layer transparently over B<DB_File> to accomplish this feat.
+
+Check out the MLDBM module, available on CPAN in the directory
+F<modules/by-module/MLDBM>.
+
+=head2 What does "Invalid Argument" mean?
+
+You will get this error message when one of the parameters in the
+C<tie> call is wrong. Unfortunately there are quite a few parameters to
+get wrong, so it can be difficult to figure out which one it is.
+
+Here are a couple of possibilities:
+
+=over 5
+
+=item 1.
+
+Attempting to reopen a database without closing it.
+
+=item 2.
+
+Using the O_WRONLY flag.
+
+=back
+
+=head2 What does "Bareword 'DB_File' not allowed" mean?
+
+You will encounter this particular error message when you have the
+C<strict 'subs'> pragma (or the full strict pragma) in your script.
+Consider this script:
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ my %x ;
+ tie %x, DB_File, "filename" ;
+
+Running it produces the error in question:
+
+ Bareword "DB_File" not allowed while "strict subs" in use
+
+To get around the error, place the word C<DB_File> in either single or
+double quotes, like this:
+
+ tie %x, "DB_File", "filename" ;
+
+Although it might seem like a real pain, it is really worth the effort
+of having a C<use strict> in all your scripts.
+
+=head1 REFERENCES
+
+Articles that are either about B<DB_File> or make use of it.
+
+=over 5
+
+=item 1.
+
+I<Full-Text Searching in Perl>, Tim Kientzle (tkientzle@ddj.com),
+Dr. Dobb's Journal, Issue 295, January 1999, pp 34-41
+
+=back
+
+=head1 HISTORY
+
+Moved to the Changes file.
+
+=head1 BUGS
+
+Some older versions of Berkeley DB had problems with fixed length
+records using the RECNO file format. This problem has been fixed since
+version 1.85 of Berkeley DB.
+
+I am sure there are bugs in the code. If you do find any, or can
+suggest any enhancements, I would welcome your comments.
+
+=head1 AVAILABILITY
+
+B<DB_File> comes with the standard Perl source distribution. Look in
+the directory F<ext/DB_File>. Given the amount of time between releases
+of Perl the version that ships with Perl is quite likely to be out of
+date, so the most recent version can always be found on CPAN (see
+L<perlmod/CPAN> for details), in the directory
+F<modules/by-module/DB_File>.
+
+This version of B<DB_File> will work with either version 1.x, 2.x or
+3.x of Berkeley DB, but is limited to the functionality provided by
+version 1.
+
+The official web site for Berkeley DB is F<http://www.sleepycat.com>.
+All versions of Berkeley DB are available there.
+
+Alternatively, Berkeley DB version 1 is available at your nearest CPAN
+archive in F<src/misc/db.1.85.tar.gz>.
+
+If you are running IRIX, then get Berkeley DB version 1 from
+F<http://reality.sgi.com/ariel>. It has the patches necessary to
+compile properly on IRIX 5.3.
+
+=head1 COPYRIGHT
+
+Copyright (c) 1995-2002 Paul Marquess. All rights reserved. This program
+is free software; you can redistribute it and/or modify it under the
+same terms as Perl itself.
+
+Although B<DB_File> is covered by the Perl license, the library it
+makes use of, namely Berkeley DB, is not. Berkeley DB has its own
+copyright and its own license. Please take the time to read it.
+
+Here are are few words taken from the Berkeley DB FAQ (at
+F<http://www.sleepycat.com>) regarding the license:
+
+ Do I have to license DB to use it in Perl scripts?
+
+ No. The Berkeley DB license requires that software that uses
+ Berkeley DB be freely redistributable. In the case of Perl, that
+ software is Perl, and not your scripts. Any Perl scripts that you
+ write are your property, including scripts that make use of
+ Berkeley DB. Neither the Perl license nor the Berkeley DB license
+ place any restriction on what you may do with them.
+
+If you are in any doubt about the license situation, contact either the
+Berkeley DB authors or the author of DB_File. See L<"AUTHOR"> for details.
+
+
+=head1 SEE ALSO
+
+L<perl(1)>, L<dbopen(3)>, L<hash(3)>, L<recno(3)>, L<btree(3)>,
+L<dbmfilter>
+
+=head1 AUTHOR
+
+The DB_File interface was written by Paul Marquess
+E<lt>Paul.Marquess@btinternet.comE<gt>.
+Questions about the DB system itself may be addressed to
+E<lt>db@sleepycat.com<gt>.
+
+=cut
diff --git a/libdb/perl/DB_File/DB_File.xs b/libdb/perl/DB_File/DB_File.xs
new file mode 100644
index 0000000..fba8ded
--- /dev/null
+++ b/libdb/perl/DB_File/DB_File.xs
@@ -0,0 +1,1951 @@
+/*
+
+ DB_File.xs -- Perl 5 interface to Berkeley DB
+
+ written by Paul Marquess <Paul.Marquess@btinternet.com>
+ last modified 1st September 2002
+ version 1.805
+
+ All comments/suggestions/problems are welcome
+
+ Copyright (c) 1995-2002 Paul Marquess. All rights reserved.
+ This program is free software; you can redistribute it and/or
+ modify it under the same terms as Perl itself.
+
+ Changes:
+ 0.1 - Initial Release
+ 0.2 - No longer bombs out if dbopen returns an error.
+ 0.3 - Added some support for multiple btree compares
+ 1.0 - Complete support for multiple callbacks added.
+ Fixed a problem with pushing a value onto an empty list.
+ 1.01 - Fixed a SunOS core dump problem.
+ The return value from TIEHASH wasn't set to NULL when
+ dbopen returned an error.
+ 1.02 - Use ALIAS to define TIEARRAY.
+ Removed some redundant commented code.
+ Merged OS2 code into the main distribution.
+ Allow negative subscripts with RECNO interface.
+ Changed the default flags to O_CREAT|O_RDWR
+ 1.03 - Added EXISTS
+ 1.04 - fixed a couple of bugs in hash_cb. Patches supplied by
+ Dave Hammen, hammen@gothamcity.jsc.nasa.gov
+ 1.05 - Added logic to allow prefix & hash types to be specified via
+ Makefile.PL
+ 1.06 - Minor namespace cleanup: Localized PrintBtree.
+ 1.07 - Fixed bug with RECNO, where bval wasn't defaulting to "\n".
+ 1.08 - No change to DB_File.xs
+ 1.09 - Default mode for dbopen changed to 0666
+ 1.10 - Fixed fd method so that it still returns -1 for
+ in-memory files when db 1.86 is used.
+ 1.11 - No change to DB_File.xs
+ 1.12 - No change to DB_File.xs
+ 1.13 - Tidied up a few casts.
+ 1.14 - Made it illegal to tie an associative array to a RECNO
+ database and an ordinary array to a HASH or BTREE database.
+ 1.50 - Make work with both DB 1.x or DB 2.x
+ 1.51 - Fixed a bug in mapping 1.x O_RDONLY flag to 2.x DB_RDONLY equivalent
+ 1.52 - Patch from Gisle Aas <gisle@aas.no> to suppress "use of
+ undefined value" warning with db_get and db_seq.
+ 1.53 - Added DB_RENUMBER to flags for recno.
+ 1.54 - Fixed bug in the fd method
+ 1.55 - Fix for AIX from Jarkko Hietaniemi
+ 1.56 - No change to DB_File.xs
+ 1.57 - added the #undef op to allow building with Threads support.
+ 1.58 - Fixed a problem with the use of sv_setpvn. When the
+ size is specified as 0, it does a strlen on the data.
+ This was ok for DB 1.x, but isn't for DB 2.x.
+ 1.59 - No change to DB_File.xs
+ 1.60 - Some code tidy up
+ 1.61 - added flagSet macro for DB 2.5.x
+ fixed typo in O_RDONLY test.
+ 1.62 - No change to DB_File.xs
+ 1.63 - Fix to alllow DB 2.6.x to build.
+ 1.64 - Tidied up the 1.x to 2.x flags mapping code.
+ Added a patch from Mark Kettenis <kettenis@wins.uva.nl>
+ to fix a flag mapping problem with O_RDONLY on the Hurd
+ 1.65 - Fixed a bug in the PUSH logic.
+ Added BOOT check that using 2.3.4 or greater
+ 1.66 - Added DBM filter code
+ 1.67 - Backed off the use of newSVpvn.
+ Fixed DBM Filter code for Perl 5.004.
+ Fixed a small memory leak in the filter code.
+ 1.68 - fixed backward compatability bug with R_IAFTER & R_IBEFORE
+ merged in the 5.005_58 changes
+ 1.69 - fixed a bug in push -- DB_APPEND wasn't working properly.
+ Fixed the R_SETCURSOR bug introduced in 1.68
+ Added a new Perl variable $DB_File::db_ver
+ 1.70 - Initialise $DB_File::db_ver and $DB_File::db_version with
+ GV_ADD|GV_ADDMULT -- bug spotted by Nick Ing-Simmons.
+ Added a BOOT check to test for equivalent versions of db.h &
+ libdb.a/so.
+ 1.71 - Support for Berkeley DB version 3.
+ Support for Berkeley DB 2/3's backward compatability mode.
+ Rewrote push
+ 1.72 - No change to DB_File.xs
+ 1.73 - No change to DB_File.xs
+ 1.74 - A call to open needed parenthesised to stop it clashing
+ with a win32 macro.
+ Added Perl core patches 7703 & 7801.
+ 1.75 - Fixed Perl core patch 7703.
+ Added suppport to allow DB_File to be built with
+ Berkeley DB 3.2 -- btree_compare, btree_prefix and hash_cb
+ needed to be changed.
+ 1.76 - No change to DB_File.xs
+ 1.77 - Tidied up a few types used in calling newSVpvn.
+ 1.78 - Core patch 10335, 10372, 10534, 10549, 11051 included.
+ 1.79 - NEXTKEY ignores the input key.
+ Added lots of casts
+ 1.800 - Moved backward compatability code into ppport.h.
+ Use the new constants code.
+ 1.801 - No change to DB_File.xs
+ 1.802 - No change to DB_File.xs
+ 1.803 - FETCH, STORE & DELETE don't map the flags parameter
+ into the equivalent Berkeley DB function anymore.
+ 1.804 - no change.
+ 1.805 - recursion detection added to the callbacks
+ Support for 4.1.X added.
+ Filter code can now cope with read-only $_
+
+*/
+
+#define PERL_NO_GET_CONTEXT
+#include "EXTERN.h"
+#include "perl.h"
+#include "XSUB.h"
+
+#ifdef _NOT_CORE
+# include "ppport.h"
+#endif
+
+/* Mention DB_VERSION_MAJOR_CFG, DB_VERSION_MINOR_CFG, and
+ DB_VERSION_PATCH_CFG here so that Configure pulls them all in. */
+
+/* Being the Berkeley DB we prefer the <sys/cdefs.h> (which will be
+ * shortly #included by the <db.h>) __attribute__ to the possibly
+ * already defined __attribute__, for example by GNUC or by Perl. */
+
+/* #if DB_VERSION_MAJOR_CFG < 2 */
+#ifndef DB_VERSION_MAJOR
+# undef __attribute__
+#endif
+
+#ifdef COMPAT185
+# include <db_185.h>
+#else
+# include <db.h>
+#endif
+
+/* Wall starts with 5.7.x */
+
+#if PERL_REVISION > 5 || (PERL_REVISION == 5 && PERL_VERSION >= 7)
+
+/* Since we dropped the gccish definition of __attribute__ we will want
+ * to redefine dNOOP, however (so that dTHX continues to work). Yes,
+ * all this means that we can't do attribute checking on the DB_File,
+ * boo, hiss. */
+# ifndef DB_VERSION_MAJOR
+
+# undef dNOOP
+# define dNOOP extern int Perl___notused
+
+ /* Ditto for dXSARGS. */
+# undef dXSARGS
+# define dXSARGS \
+ dSP; dMARK; \
+ I32 ax = mark - PL_stack_base + 1; \
+ I32 items = sp - mark
+
+# endif
+
+/* avoid -Wall; DB_File xsubs never make use of `ix' setup for ALIASes */
+# undef dXSI32
+# define dXSI32 dNOOP
+
+#endif /* Perl >= 5.7 */
+
+#include <fcntl.h>
+
+/* #define TRACE */
+
+#ifdef TRACE
+# define Trace(x) printf x
+#else
+# define Trace(x)
+#endif
+
+
+#define DBT_clear(x) Zero(&x, 1, DBT) ;
+
+#ifdef DB_VERSION_MAJOR
+
+#if DB_VERSION_MAJOR == 2
+# define BERKELEY_DB_1_OR_2
+#endif
+
+#if DB_VERSION_MAJOR > 3 || (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR >= 2)
+# define AT_LEAST_DB_3_2
+#endif
+
+#if DB_VERSION_MAJOR > 4 || (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 1)
+# define AT_LEAST_DB_4_1
+#endif
+
+/* map version 2 features & constants onto their version 1 equivalent */
+
+#ifdef DB_Prefix_t
+# undef DB_Prefix_t
+#endif
+#define DB_Prefix_t size_t
+
+#ifdef DB_Hash_t
+# undef DB_Hash_t
+#endif
+#define DB_Hash_t u_int32_t
+
+/* DBTYPE stays the same */
+/* HASHINFO, RECNOINFO and BTREEINFO map to DB_INFO */
+#if DB_VERSION_MAJOR == 2
+ typedef DB_INFO INFO ;
+#else /* DB_VERSION_MAJOR > 2 */
+# define DB_FIXEDLEN (0x8000)
+#endif /* DB_VERSION_MAJOR == 2 */
+
+/* version 2 has db_recno_t in place of recno_t */
+typedef db_recno_t recno_t;
+
+
+#define R_CURSOR DB_SET_RANGE
+#define R_FIRST DB_FIRST
+#define R_IAFTER DB_AFTER
+#define R_IBEFORE DB_BEFORE
+#define R_LAST DB_LAST
+#define R_NEXT DB_NEXT
+#define R_NOOVERWRITE DB_NOOVERWRITE
+#define R_PREV DB_PREV
+
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
+# define R_SETCURSOR 0x800000
+#else
+# define R_SETCURSOR (-100)
+#endif
+
+#define R_RECNOSYNC 0
+#define R_FIXEDLEN DB_FIXEDLEN
+#define R_DUP DB_DUP
+
+
+#define db_HA_hash h_hash
+#define db_HA_ffactor h_ffactor
+#define db_HA_nelem h_nelem
+#define db_HA_bsize db_pagesize
+#define db_HA_cachesize db_cachesize
+#define db_HA_lorder db_lorder
+
+#define db_BT_compare bt_compare
+#define db_BT_prefix bt_prefix
+#define db_BT_flags flags
+#define db_BT_psize db_pagesize
+#define db_BT_cachesize db_cachesize
+#define db_BT_lorder db_lorder
+#define db_BT_maxkeypage
+#define db_BT_minkeypage
+
+
+#define db_RE_reclen re_len
+#define db_RE_flags flags
+#define db_RE_bval re_pad
+#define db_RE_bfname re_source
+#define db_RE_psize db_pagesize
+#define db_RE_cachesize db_cachesize
+#define db_RE_lorder db_lorder
+
+#define TXN NULL,
+
+#define do_SEQ(db, key, value, flag) (db->cursor->c_get)(db->cursor, &key, &value, flag)
+
+
+#define DBT_flags(x) x.flags = 0
+#define DB_flags(x, v) x |= v
+
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
+# define flagSet(flags, bitmask) ((flags) & (bitmask))
+#else
+# define flagSet(flags, bitmask) (((flags) & DB_OPFLAGS_MASK) == (bitmask))
+#endif
+
+#else /* db version 1.x */
+
+#define BERKELEY_DB_1
+#define BERKELEY_DB_1_OR_2
+
+typedef union INFO {
+ HASHINFO hash ;
+ RECNOINFO recno ;
+ BTREEINFO btree ;
+ } INFO ;
+
+
+#ifdef mDB_Prefix_t
+# ifdef DB_Prefix_t
+# undef DB_Prefix_t
+# endif
+# define DB_Prefix_t mDB_Prefix_t
+#endif
+
+#ifdef mDB_Hash_t
+# ifdef DB_Hash_t
+# undef DB_Hash_t
+# endif
+# define DB_Hash_t mDB_Hash_t
+#endif
+
+#define db_HA_hash hash.hash
+#define db_HA_ffactor hash.ffactor
+#define db_HA_nelem hash.nelem
+#define db_HA_bsize hash.bsize
+#define db_HA_cachesize hash.cachesize
+#define db_HA_lorder hash.lorder
+
+#define db_BT_compare btree.compare
+#define db_BT_prefix btree.prefix
+#define db_BT_flags btree.flags
+#define db_BT_psize btree.psize
+#define db_BT_cachesize btree.cachesize
+#define db_BT_lorder btree.lorder
+#define db_BT_maxkeypage btree.maxkeypage
+#define db_BT_minkeypage btree.minkeypage
+
+#define db_RE_reclen recno.reclen
+#define db_RE_flags recno.flags
+#define db_RE_bval recno.bval
+#define db_RE_bfname recno.bfname
+#define db_RE_psize recno.psize
+#define db_RE_cachesize recno.cachesize
+#define db_RE_lorder recno.lorder
+
+#define TXN
+
+#define do_SEQ(db, key, value, flag) (db->dbp->seq)(db->dbp, &key, &value, flag)
+#define DBT_flags(x)
+#define DB_flags(x, v)
+#define flagSet(flags, bitmask) ((flags) & (bitmask))
+
+#endif /* db version 1 */
+
+
+
+#define db_DELETE(db, key, flags) ((db->dbp)->del)(db->dbp, TXN &key, 0)
+#define db_STORE(db, key, value, flags) ((db->dbp)->put)(db->dbp, TXN &key, &value, 0)
+#define db_FETCH(db, key, flags) ((db->dbp)->get)(db->dbp, TXN &key, &value, 0)
+
+#define db_sync(db, flags) ((db->dbp)->sync)(db->dbp, flags)
+#define db_get(db, key, value, flags) ((db->dbp)->get)(db->dbp, TXN &key, &value, flags)
+
+#ifdef DB_VERSION_MAJOR
+#define db_DESTROY(db) (!db->aborted && ( db->cursor->c_close(db->cursor),\
+ (db->dbp->close)(db->dbp, 0) ))
+#define db_close(db) ((db->dbp)->close)(db->dbp, 0)
+#define db_del(db, key, flags) (flagSet(flags, R_CURSOR) \
+ ? ((db->cursor)->c_del)(db->cursor, 0) \
+ : ((db->dbp)->del)(db->dbp, NULL, &key, flags) )
+
+#else /* ! DB_VERSION_MAJOR */
+
+#define db_DESTROY(db) (!db->aborted && ((db->dbp)->close)(db->dbp))
+#define db_close(db) ((db->dbp)->close)(db->dbp)
+#define db_del(db, key, flags) ((db->dbp)->del)(db->dbp, &key, flags)
+#define db_put(db, key, value, flags) ((db->dbp)->put)(db->dbp, &key, &value, flags)
+
+#endif /* ! DB_VERSION_MAJOR */
+
+
+#define db_seq(db, key, value, flags) do_SEQ(db, key, value, flags)
+
+typedef struct {
+ DBTYPE type ;
+ DB * dbp ;
+ SV * compare ;
+ bool in_compare ;
+ SV * prefix ;
+ bool in_prefix ;
+ SV * hash ;
+ bool in_hash ;
+ bool aborted ;
+ int in_memory ;
+#ifdef BERKELEY_DB_1_OR_2
+ INFO info ;
+#endif
+#ifdef DB_VERSION_MAJOR
+ DBC * cursor ;
+#endif
+ SV * filter_fetch_key ;
+ SV * filter_store_key ;
+ SV * filter_fetch_value ;
+ SV * filter_store_value ;
+ int filtering ;
+
+ } DB_File_type;
+
+typedef DB_File_type * DB_File ;
+typedef DBT DBTKEY ;
+
+#define my_sv_setpvn(sv, d, s) sv_setpvn(sv, (s ? d : (void*)""), s)
+
+#define OutputValue(arg, name) \
+ { if (RETVAL == 0) { \
+ my_sv_setpvn(arg, name.data, name.size) ; \
+ TAINT; \
+ SvTAINTED_on(arg); \
+ DBM_ckFilter(arg, filter_fetch_value,"filter_fetch_value") ; \
+ } \
+ }
+
+#define OutputKey(arg, name) \
+ { if (RETVAL == 0) \
+ { \
+ if (db->type != DB_RECNO) { \
+ my_sv_setpvn(arg, name.data, name.size); \
+ } \
+ else \
+ sv_setiv(arg, (I32)*(I32*)name.data - 1); \
+ TAINT; \
+ SvTAINTED_on(arg); \
+ DBM_ckFilter(arg, filter_fetch_key,"filter_fetch_key") ; \
+ } \
+ }
+
+#define my_SvUV32(sv) ((u_int32_t)SvUV(sv))
+
+#ifdef CAN_PROTOTYPE
+extern void __getBerkeleyDBInfo(void);
+#endif
+
+/* Internal Global Data */
+
+#define MY_CXT_KEY "DB_File::_guts" XS_VERSION
+
+typedef struct {
+ recno_t x_Value;
+ recno_t x_zero;
+ DB_File x_CurrentDB;
+ DBTKEY x_empty;
+} my_cxt_t;
+
+START_MY_CXT
+
+#define Value (MY_CXT.x_Value)
+#define zero (MY_CXT.x_zero)
+#define CurrentDB (MY_CXT.x_CurrentDB)
+#define empty (MY_CXT.x_empty)
+
+#define ERR_BUFF "DB_File::Error"
+
+#ifdef DB_VERSION_MAJOR
+
+static int
+#ifdef CAN_PROTOTYPE
+db_put(DB_File db, DBTKEY key, DBT value, u_int flags)
+#else
+db_put(db, key, value, flags)
+DB_File db ;
+DBTKEY key ;
+DBT value ;
+u_int flags ;
+#endif
+{
+ int status ;
+
+ if (flagSet(flags, R_IAFTER) || flagSet(flags, R_IBEFORE)) {
+ DBC * temp_cursor ;
+ DBT l_key, l_value;
+
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6
+ if (((db->dbp)->cursor)(db->dbp, NULL, &temp_cursor) != 0)
+#else
+ if (((db->dbp)->cursor)(db->dbp, NULL, &temp_cursor, 0) != 0)
+#endif
+ return (-1) ;
+
+ memset(&l_key, 0, sizeof(l_key));
+ l_key.data = key.data;
+ l_key.size = key.size;
+ memset(&l_value, 0, sizeof(l_value));
+ l_value.data = value.data;
+ l_value.size = value.size;
+
+ if ( temp_cursor->c_get(temp_cursor, &l_key, &l_value, DB_SET) != 0) {
+ (void)temp_cursor->c_close(temp_cursor);
+ return (-1);
+ }
+
+ status = temp_cursor->c_put(temp_cursor, &key, &value, flags);
+ (void)temp_cursor->c_close(temp_cursor);
+
+ return (status) ;
+ }
+
+
+ if (flagSet(flags, R_CURSOR)) {
+ return ((db->cursor)->c_put)(db->cursor, &key, &value, DB_CURRENT);
+ }
+
+ if (flagSet(flags, R_SETCURSOR)) {
+ if ((db->dbp)->put(db->dbp, NULL, &key, &value, 0) != 0)
+ return -1 ;
+ return ((db->cursor)->c_get)(db->cursor, &key, &value, DB_SET_RANGE);
+
+ }
+
+ return ((db->dbp)->put)(db->dbp, NULL, &key, &value, flags) ;
+
+}
+
+#endif /* DB_VERSION_MAJOR */
+
+static void
+tidyUp(DB_File db)
+{
+ /* db_DESTROY(db); */
+ db->aborted = TRUE ;
+}
+
+
+static int
+#ifdef AT_LEAST_DB_3_2
+
+#ifdef CAN_PROTOTYPE
+btree_compare(DB * db, const DBT *key1, const DBT *key2)
+#else
+btree_compare(db, key1, key2)
+DB * db ;
+const DBT * key1 ;
+const DBT * key2 ;
+#endif /* CAN_PROTOTYPE */
+
+#else /* Berkeley DB < 3.2 */
+
+#ifdef CAN_PROTOTYPE
+btree_compare(const DBT *key1, const DBT *key2)
+#else
+btree_compare(key1, key2)
+const DBT * key1 ;
+const DBT * key2 ;
+#endif
+
+#endif
+
+{
+#ifdef dTHX
+ dTHX;
+#endif
+ dSP ;
+ dMY_CXT ;
+ void * data1, * data2 ;
+ int retval ;
+ int count ;
+ DB_File keep_CurrentDB = CurrentDB;
+
+
+ if (CurrentDB->in_compare) {
+ tidyUp(CurrentDB);
+ croak ("DB_File btree_compare: recursion detected\n") ;
+ }
+
+ data1 = (char *) key1->data ;
+ data2 = (char *) key2->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (key1->size == 0)
+ data1 = "" ;
+ if (key2->size == 0)
+ data2 = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
+ PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
+ PUTBACK ;
+
+ CurrentDB->in_compare = TRUE;
+
+ count = perl_call_sv(CurrentDB->compare, G_SCALAR);
+
+ CurrentDB = keep_CurrentDB;
+ CurrentDB->in_compare = FALSE;
+
+ SPAGAIN ;
+
+ if (count != 1){
+ tidyUp(CurrentDB);
+ croak ("DB_File btree_compare: expected 1 return value from compare sub, got %d\n", count) ;
+ }
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+
+ return (retval) ;
+
+}
+
+static DB_Prefix_t
+#ifdef AT_LEAST_DB_3_2
+
+#ifdef CAN_PROTOTYPE
+btree_prefix(DB * db, const DBT *key1, const DBT *key2)
+#else
+btree_prefix(db, key1, key2)
+Db * db ;
+const DBT * key1 ;
+const DBT * key2 ;
+#endif
+
+#else /* Berkeley DB < 3.2 */
+
+#ifdef CAN_PROTOTYPE
+btree_prefix(const DBT *key1, const DBT *key2)
+#else
+btree_prefix(key1, key2)
+const DBT * key1 ;
+const DBT * key2 ;
+#endif
+
+#endif
+{
+#ifdef dTHX
+ dTHX;
+#endif
+ dSP ;
+ dMY_CXT ;
+ char * data1, * data2 ;
+ int retval ;
+ int count ;
+ DB_File keep_CurrentDB = CurrentDB;
+
+ if (CurrentDB->in_prefix){
+ tidyUp(CurrentDB);
+ croak ("DB_File btree_prefix: recursion detected\n") ;
+ }
+
+ data1 = (char *) key1->data ;
+ data2 = (char *) key2->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (key1->size == 0)
+ data1 = "" ;
+ if (key2->size == 0)
+ data2 = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
+ PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
+ PUTBACK ;
+
+ CurrentDB->in_prefix = TRUE;
+
+ count = perl_call_sv(CurrentDB->prefix, G_SCALAR);
+
+ CurrentDB = keep_CurrentDB;
+ CurrentDB->in_prefix = FALSE;
+
+ SPAGAIN ;
+
+ if (count != 1){
+ tidyUp(CurrentDB);
+ croak ("DB_File btree_prefix: expected 1 return value from prefix sub, got %d\n", count) ;
+ }
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+
+ return (retval) ;
+}
+
+
+#ifdef BERKELEY_DB_1
+# define HASH_CB_SIZE_TYPE size_t
+#else
+# define HASH_CB_SIZE_TYPE u_int32_t
+#endif
+
+static DB_Hash_t
+#ifdef AT_LEAST_DB_3_2
+
+#ifdef CAN_PROTOTYPE
+hash_cb(DB * db, const void *data, u_int32_t size)
+#else
+hash_cb(db, data, size)
+DB * db ;
+const void * data ;
+HASH_CB_SIZE_TYPE size ;
+#endif
+
+#else /* Berkeley DB < 3.2 */
+
+#ifdef CAN_PROTOTYPE
+hash_cb(const void *data, HASH_CB_SIZE_TYPE size)
+#else
+hash_cb(data, size)
+const void * data ;
+HASH_CB_SIZE_TYPE size ;
+#endif
+
+#endif
+{
+#ifdef dTHX
+ dTHX;
+#endif
+ dSP ;
+ dMY_CXT;
+ int retval ;
+ int count ;
+ DB_File keep_CurrentDB = CurrentDB;
+
+ if (CurrentDB->in_hash){
+ tidyUp(CurrentDB);
+ croak ("DB_File hash callback: recursion detected\n") ;
+ }
+
+#ifndef newSVpvn
+ if (size == 0)
+ data = "" ;
+#endif
+
+ /* DGH - Next two lines added to fix corrupted stack problem */
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+
+ XPUSHs(sv_2mortal(newSVpvn((char*)data,size)));
+ PUTBACK ;
+
+ keep_CurrentDB->in_hash = TRUE;
+
+ count = perl_call_sv(CurrentDB->hash, G_SCALAR);
+
+ CurrentDB = keep_CurrentDB;
+ CurrentDB->in_hash = FALSE;
+
+ SPAGAIN ;
+
+ if (count != 1){
+ tidyUp(CurrentDB);
+ croak ("DB_File hash_cb: expected 1 return value from hash sub, got %d\n", count) ;
+ }
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+
+ return (retval) ;
+}
+
+static void
+#ifdef CAN_PROTOTYPE
+db_errcall_cb(const char * db_errpfx, char * buffer)
+#else
+db_errcall_cb(db_errpfx, buffer)
+const char * db_errpfx;
+char * buffer;
+#endif
+{
+ SV * sv = perl_get_sv(ERR_BUFF, FALSE) ;
+ if (sv) {
+ if (db_errpfx)
+ sv_setpvf(sv, "%s: %s", db_errpfx, buffer) ;
+ else
+ sv_setpv(sv, buffer) ;
+ }
+}
+
+#if defined(TRACE) && defined(BERKELEY_DB_1_OR_2)
+
+static void
+#ifdef CAN_PROTOTYPE
+PrintHash(INFO *hash)
+#else
+PrintHash(hash)
+INFO * hash ;
+#endif
+{
+ printf ("HASH Info\n") ;
+ printf (" hash = %s\n",
+ (hash->db_HA_hash != NULL ? "redefined" : "default")) ;
+ printf (" bsize = %d\n", hash->db_HA_bsize) ;
+ printf (" ffactor = %d\n", hash->db_HA_ffactor) ;
+ printf (" nelem = %d\n", hash->db_HA_nelem) ;
+ printf (" cachesize = %d\n", hash->db_HA_cachesize) ;
+ printf (" lorder = %d\n", hash->db_HA_lorder) ;
+
+}
+
+static void
+#ifdef CAN_PROTOTYPE
+PrintRecno(INFO *recno)
+#else
+PrintRecno(recno)
+INFO * recno ;
+#endif
+{
+ printf ("RECNO Info\n") ;
+ printf (" flags = %d\n", recno->db_RE_flags) ;
+ printf (" cachesize = %d\n", recno->db_RE_cachesize) ;
+ printf (" psize = %d\n", recno->db_RE_psize) ;
+ printf (" lorder = %d\n", recno->db_RE_lorder) ;
+ printf (" reclen = %ul\n", (unsigned long)recno->db_RE_reclen) ;
+ printf (" bval = %d 0x%x\n", recno->db_RE_bval, recno->db_RE_bval) ;
+ printf (" bfname = %d [%s]\n", recno->db_RE_bfname, recno->db_RE_bfname) ;
+}
+
+static void
+#ifdef CAN_PROTOTYPE
+PrintBtree(INFO *btree)
+#else
+PrintBtree(btree)
+INFO * btree ;
+#endif
+{
+ printf ("BTREE Info\n") ;
+ printf (" compare = %s\n",
+ (btree->db_BT_compare ? "redefined" : "default")) ;
+ printf (" prefix = %s\n",
+ (btree->db_BT_prefix ? "redefined" : "default")) ;
+ printf (" flags = %d\n", btree->db_BT_flags) ;
+ printf (" cachesize = %d\n", btree->db_BT_cachesize) ;
+ printf (" psize = %d\n", btree->db_BT_psize) ;
+#ifndef DB_VERSION_MAJOR
+ printf (" maxkeypage = %d\n", btree->db_BT_maxkeypage) ;
+ printf (" minkeypage = %d\n", btree->db_BT_minkeypage) ;
+#endif
+ printf (" lorder = %d\n", btree->db_BT_lorder) ;
+}
+
+#else
+
+#define PrintRecno(recno)
+#define PrintHash(hash)
+#define PrintBtree(btree)
+
+#endif /* TRACE */
+
+
+static I32
+#ifdef CAN_PROTOTYPE
+GetArrayLength(pTHX_ DB_File db)
+#else
+GetArrayLength(db)
+DB_File db ;
+#endif
+{
+ DBT key ;
+ DBT value ;
+ int RETVAL ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ RETVAL = do_SEQ(db, key, value, R_LAST) ;
+ if (RETVAL == 0)
+ RETVAL = *(I32 *)key.data ;
+ else /* No key means empty file */
+ RETVAL = 0 ;
+
+ return ((I32)RETVAL) ;
+}
+
+static recno_t
+#ifdef CAN_PROTOTYPE
+GetRecnoKey(pTHX_ DB_File db, I32 value)
+#else
+GetRecnoKey(db, value)
+DB_File db ;
+I32 value ;
+#endif
+{
+ if (value < 0) {
+ /* Get the length of the array */
+ I32 length = GetArrayLength(aTHX_ db) ;
+
+ /* check for attempt to write before start of array */
+ if (length + value + 1 <= 0) {
+ tidyUp(db);
+ croak("Modification of non-creatable array value attempted, subscript %ld", (long)value) ;
+ }
+
+ value = length + value + 1 ;
+ }
+ else
+ ++ value ;
+
+ return value ;
+}
+
+
+static DB_File
+#ifdef CAN_PROTOTYPE
+ParseOpenInfo(pTHX_ int isHASH, char *name, int flags, int mode, SV *sv)
+#else
+ParseOpenInfo(isHASH, name, flags, mode, sv)
+int isHASH ;
+char * name ;
+int flags ;
+int mode ;
+SV * sv ;
+#endif
+{
+
+#ifdef BERKELEY_DB_1_OR_2 /* Berkeley DB Version 1 or 2 */
+
+ SV ** svp;
+ HV * action ;
+ DB_File RETVAL = (DB_File)safemalloc(sizeof(DB_File_type)) ;
+ void * openinfo = NULL ;
+ INFO * info = &RETVAL->info ;
+ STRLEN n_a;
+ dMY_CXT;
+
+/* printf("In ParseOpenInfo name=[%s] flags=[%d] mode = [%d]\n", name, flags, mode) ; */
+ Zero(RETVAL, 1, DB_File_type) ;
+
+ /* Default to HASH */
+ RETVAL->filtering = 0 ;
+ RETVAL->filter_fetch_key = RETVAL->filter_store_key =
+ RETVAL->filter_fetch_value = RETVAL->filter_store_value =
+ RETVAL->hash = RETVAL->compare = RETVAL->prefix = NULL ;
+ RETVAL->type = DB_HASH ;
+
+ /* DGH - Next line added to avoid SEGV on existing hash DB */
+ CurrentDB = RETVAL;
+
+ /* fd for 1.86 hash in memory files doesn't return -1 like 1.85 */
+ RETVAL->in_memory = (name == NULL) ;
+
+ if (sv)
+ {
+ if (! SvROK(sv) )
+ croak ("type parameter is not a reference") ;
+
+ svp = hv_fetch( (HV*)SvRV(sv), "GOT", 3, FALSE) ;
+ if (svp && SvOK(*svp))
+ action = (HV*) SvRV(*svp) ;
+ else
+ croak("internal error") ;
+
+ if (sv_isa(sv, "DB_File::HASHINFO"))
+ {
+
+ if (!isHASH)
+ croak("DB_File can only tie an associative array to a DB_HASH database") ;
+
+ RETVAL->type = DB_HASH ;
+ openinfo = (void*)info ;
+
+ svp = hv_fetch(action, "hash", 4, FALSE);
+
+ if (svp && SvOK(*svp))
+ {
+ info->db_HA_hash = hash_cb ;
+ RETVAL->hash = newSVsv(*svp) ;
+ }
+ else
+ info->db_HA_hash = NULL ;
+
+ svp = hv_fetch(action, "ffactor", 7, FALSE);
+ info->db_HA_ffactor = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "nelem", 5, FALSE);
+ info->db_HA_nelem = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "bsize", 5, FALSE);
+ info->db_HA_bsize = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ info->db_HA_cachesize = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ info->db_HA_lorder = svp ? SvIV(*svp) : 0;
+
+ PrintHash(info) ;
+ }
+ else if (sv_isa(sv, "DB_File::BTREEINFO"))
+ {
+ if (!isHASH)
+ croak("DB_File can only tie an associative array to a DB_BTREE database");
+
+ RETVAL->type = DB_BTREE ;
+ openinfo = (void*)info ;
+
+ svp = hv_fetch(action, "compare", 7, FALSE);
+ if (svp && SvOK(*svp))
+ {
+ info->db_BT_compare = btree_compare ;
+ RETVAL->compare = newSVsv(*svp) ;
+ }
+ else
+ info->db_BT_compare = NULL ;
+
+ svp = hv_fetch(action, "prefix", 6, FALSE);
+ if (svp && SvOK(*svp))
+ {
+ info->db_BT_prefix = btree_prefix ;
+ RETVAL->prefix = newSVsv(*svp) ;
+ }
+ else
+ info->db_BT_prefix = NULL ;
+
+ svp = hv_fetch(action, "flags", 5, FALSE);
+ info->db_BT_flags = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ info->db_BT_cachesize = svp ? SvIV(*svp) : 0;
+
+#ifndef DB_VERSION_MAJOR
+ svp = hv_fetch(action, "minkeypage", 10, FALSE);
+ info->btree.minkeypage = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "maxkeypage", 10, FALSE);
+ info->btree.maxkeypage = svp ? SvIV(*svp) : 0;
+#endif
+
+ svp = hv_fetch(action, "psize", 5, FALSE);
+ info->db_BT_psize = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ info->db_BT_lorder = svp ? SvIV(*svp) : 0;
+
+ PrintBtree(info) ;
+
+ }
+ else if (sv_isa(sv, "DB_File::RECNOINFO"))
+ {
+ if (isHASH)
+ croak("DB_File can only tie an array to a DB_RECNO database");
+
+ RETVAL->type = DB_RECNO ;
+ openinfo = (void *)info ;
+
+ info->db_RE_flags = 0 ;
+
+ svp = hv_fetch(action, "flags", 5, FALSE);
+ info->db_RE_flags = (u_long) (svp ? SvIV(*svp) : 0);
+
+ svp = hv_fetch(action, "reclen", 6, FALSE);
+ info->db_RE_reclen = (size_t) (svp ? SvIV(*svp) : 0);
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ info->db_RE_cachesize = (u_int) (svp ? SvIV(*svp) : 0);
+
+ svp = hv_fetch(action, "psize", 5, FALSE);
+ info->db_RE_psize = (u_int) (svp ? SvIV(*svp) : 0);
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ info->db_RE_lorder = (int) (svp ? SvIV(*svp) : 0);
+
+#ifdef DB_VERSION_MAJOR
+ info->re_source = name ;
+ name = NULL ;
+#endif
+ svp = hv_fetch(action, "bfname", 6, FALSE);
+ if (svp && SvOK(*svp)) {
+ char * ptr = SvPV(*svp,n_a) ;
+#ifdef DB_VERSION_MAJOR
+ name = (char*) n_a ? ptr : NULL ;
+#else
+ info->db_RE_bfname = (char*) (n_a ? ptr : NULL) ;
+#endif
+ }
+ else
+#ifdef DB_VERSION_MAJOR
+ name = NULL ;
+#else
+ info->db_RE_bfname = NULL ;
+#endif
+
+ svp = hv_fetch(action, "bval", 4, FALSE);
+#ifdef DB_VERSION_MAJOR
+ if (svp && SvOK(*svp))
+ {
+ int value ;
+ if (SvPOK(*svp))
+ value = (int)*SvPV(*svp, n_a) ;
+ else
+ value = SvIV(*svp) ;
+
+ if (info->flags & DB_FIXEDLEN) {
+ info->re_pad = value ;
+ info->flags |= DB_PAD ;
+ }
+ else {
+ info->re_delim = value ;
+ info->flags |= DB_DELIMITER ;
+ }
+
+ }
+#else
+ if (svp && SvOK(*svp))
+ {
+ if (SvPOK(*svp))
+ info->db_RE_bval = (u_char)*SvPV(*svp, n_a) ;
+ else
+ info->db_RE_bval = (u_char)(unsigned long) SvIV(*svp) ;
+ DB_flags(info->flags, DB_DELIMITER) ;
+
+ }
+ else
+ {
+ if (info->db_RE_flags & R_FIXEDLEN)
+ info->db_RE_bval = (u_char) ' ' ;
+ else
+ info->db_RE_bval = (u_char) '\n' ;
+ DB_flags(info->flags, DB_DELIMITER) ;
+ }
+#endif
+
+#ifdef DB_RENUMBER
+ info->flags |= DB_RENUMBER ;
+#endif
+
+ PrintRecno(info) ;
+ }
+ else
+ croak("type is not of type DB_File::HASHINFO, DB_File::BTREEINFO or DB_File::RECNOINFO");
+ }
+
+
+ /* OS2 Specific Code */
+#ifdef OS2
+#ifdef __EMX__
+ flags |= O_BINARY;
+#endif /* __EMX__ */
+#endif /* OS2 */
+
+#ifdef DB_VERSION_MAJOR
+
+ {
+ int Flags = 0 ;
+ int status ;
+
+ /* Map 1.x flags to 2.x flags */
+ if ((flags & O_CREAT) == O_CREAT)
+ Flags |= DB_CREATE ;
+
+#if O_RDONLY == 0
+ if (flags == O_RDONLY)
+#else
+ if ((flags & O_RDONLY) == O_RDONLY && (flags & O_RDWR) != O_RDWR)
+#endif
+ Flags |= DB_RDONLY ;
+
+#ifdef O_TRUNC
+ if ((flags & O_TRUNC) == O_TRUNC)
+ Flags |= DB_TRUNCATE ;
+#endif
+
+ status = db_open(name, RETVAL->type, Flags, mode, NULL, openinfo, &RETVAL->dbp) ;
+ if (status == 0)
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6
+ status = (RETVAL->dbp->cursor)(RETVAL->dbp, NULL, &RETVAL->cursor) ;
+#else
+ status = (RETVAL->dbp->cursor)(RETVAL->dbp, NULL, &RETVAL->cursor,
+ 0) ;
+#endif
+
+ if (status)
+ RETVAL->dbp = NULL ;
+
+ }
+#else
+
+#if defined(DB_LIBRARY_COMPATIBILITY_API) && DB_VERSION_MAJOR > 2
+ RETVAL->dbp = __db185_open(name, flags, mode, RETVAL->type, openinfo) ;
+#else
+ RETVAL->dbp = dbopen(name, flags, mode, RETVAL->type, openinfo) ;
+#endif /* DB_LIBRARY_COMPATIBILITY_API */
+
+#endif
+
+ return (RETVAL) ;
+
+#else /* Berkeley DB Version > 2 */
+
+ SV ** svp;
+ HV * action ;
+ DB_File RETVAL = (DB_File)safemalloc(sizeof(DB_File_type)) ;
+ DB * dbp ;
+ STRLEN n_a;
+ int status ;
+ dMY_CXT;
+
+/* printf("In ParseOpenInfo name=[%s] flags=[%d] mode = [%d]\n", name, flags, mode) ; */
+ Zero(RETVAL, 1, DB_File_type) ;
+
+ /* Default to HASH */
+ RETVAL->filtering = 0 ;
+ RETVAL->filter_fetch_key = RETVAL->filter_store_key =
+ RETVAL->filter_fetch_value = RETVAL->filter_store_value =
+ RETVAL->hash = RETVAL->compare = RETVAL->prefix = NULL ;
+ RETVAL->type = DB_HASH ;
+
+ /* DGH - Next line added to avoid SEGV on existing hash DB */
+ CurrentDB = RETVAL;
+
+ /* fd for 1.86 hash in memory files doesn't return -1 like 1.85 */
+ RETVAL->in_memory = (name == NULL) ;
+
+ status = db_create(&RETVAL->dbp, NULL,0) ;
+ /* printf("db_create returned %d %s\n", status, db_strerror(status)) ; */
+ if (status) {
+ RETVAL->dbp = NULL ;
+ return (RETVAL) ;
+ }
+ dbp = RETVAL->dbp ;
+
+ if (sv)
+ {
+ if (! SvROK(sv) )
+ croak ("type parameter is not a reference") ;
+
+ svp = hv_fetch( (HV*)SvRV(sv), "GOT", 3, FALSE) ;
+ if (svp && SvOK(*svp))
+ action = (HV*) SvRV(*svp) ;
+ else
+ croak("internal error") ;
+
+ if (sv_isa(sv, "DB_File::HASHINFO"))
+ {
+
+ if (!isHASH)
+ croak("DB_File can only tie an associative array to a DB_HASH database") ;
+
+ RETVAL->type = DB_HASH ;
+
+ svp = hv_fetch(action, "hash", 4, FALSE);
+
+ if (svp && SvOK(*svp))
+ {
+ (void)dbp->set_h_hash(dbp, hash_cb) ;
+ RETVAL->hash = newSVsv(*svp) ;
+ }
+
+ svp = hv_fetch(action, "ffactor", 7, FALSE);
+ if (svp)
+ (void)dbp->set_h_ffactor(dbp, my_SvUV32(*svp)) ;
+
+ svp = hv_fetch(action, "nelem", 5, FALSE);
+ if (svp)
+ (void)dbp->set_h_nelem(dbp, my_SvUV32(*svp)) ;
+
+ svp = hv_fetch(action, "bsize", 5, FALSE);
+ if (svp)
+ (void)dbp->set_pagesize(dbp, my_SvUV32(*svp));
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ if (svp)
+ (void)dbp->set_cachesize(dbp, 0, my_SvUV32(*svp), 0) ;
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ if (svp)
+ (void)dbp->set_lorder(dbp, (int)SvIV(*svp)) ;
+
+ PrintHash(info) ;
+ }
+ else if (sv_isa(sv, "DB_File::BTREEINFO"))
+ {
+ if (!isHASH)
+ croak("DB_File can only tie an associative array to a DB_BTREE database");
+
+ RETVAL->type = DB_BTREE ;
+
+ svp = hv_fetch(action, "compare", 7, FALSE);
+ if (svp && SvOK(*svp))
+ {
+ (void)dbp->set_bt_compare(dbp, btree_compare) ;
+ RETVAL->compare = newSVsv(*svp) ;
+ }
+
+ svp = hv_fetch(action, "prefix", 6, FALSE);
+ if (svp && SvOK(*svp))
+ {
+ (void)dbp->set_bt_prefix(dbp, btree_prefix) ;
+ RETVAL->prefix = newSVsv(*svp) ;
+ }
+
+ svp = hv_fetch(action, "flags", 5, FALSE);
+ if (svp)
+ (void)dbp->set_flags(dbp, my_SvUV32(*svp)) ;
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ if (svp)
+ (void)dbp->set_cachesize(dbp, 0, my_SvUV32(*svp), 0) ;
+
+ svp = hv_fetch(action, "psize", 5, FALSE);
+ if (svp)
+ (void)dbp->set_pagesize(dbp, my_SvUV32(*svp)) ;
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ if (svp)
+ (void)dbp->set_lorder(dbp, (int)SvIV(*svp)) ;
+
+ PrintBtree(info) ;
+
+ }
+ else if (sv_isa(sv, "DB_File::RECNOINFO"))
+ {
+ int fixed = FALSE ;
+
+ if (isHASH)
+ croak("DB_File can only tie an array to a DB_RECNO database");
+
+ RETVAL->type = DB_RECNO ;
+
+ svp = hv_fetch(action, "flags", 5, FALSE);
+ if (svp) {
+ int flags = SvIV(*svp) ;
+ /* remove FIXDLEN, if present */
+ if (flags & DB_FIXEDLEN) {
+ fixed = TRUE ;
+ flags &= ~DB_FIXEDLEN ;
+ }
+ }
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ if (svp) {
+ status = dbp->set_cachesize(dbp, 0, my_SvUV32(*svp), 0) ;
+ }
+
+ svp = hv_fetch(action, "psize", 5, FALSE);
+ if (svp) {
+ status = dbp->set_pagesize(dbp, my_SvUV32(*svp)) ;
+ }
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ if (svp) {
+ status = dbp->set_lorder(dbp, (int)SvIV(*svp)) ;
+ }
+
+ svp = hv_fetch(action, "bval", 4, FALSE);
+ if (svp && SvOK(*svp))
+ {
+ int value ;
+ if (SvPOK(*svp))
+ value = (int)*SvPV(*svp, n_a) ;
+ else
+ value = (int)SvIV(*svp) ;
+
+ if (fixed) {
+ status = dbp->set_re_pad(dbp, value) ;
+ }
+ else {
+ status = dbp->set_re_delim(dbp, value) ;
+ }
+
+ }
+
+ if (fixed) {
+ svp = hv_fetch(action, "reclen", 6, FALSE);
+ if (svp) {
+ u_int32_t len = my_SvUV32(*svp) ;
+ status = dbp->set_re_len(dbp, len) ;
+ }
+ }
+
+ if (name != NULL) {
+ status = dbp->set_re_source(dbp, name) ;
+ name = NULL ;
+ }
+
+ svp = hv_fetch(action, "bfname", 6, FALSE);
+ if (svp && SvOK(*svp)) {
+ char * ptr = SvPV(*svp,n_a) ;
+ name = (char*) n_a ? ptr : NULL ;
+ }
+ else
+ name = NULL ;
+
+
+ status = dbp->set_flags(dbp, (u_int32_t)DB_RENUMBER) ;
+
+ if (flags){
+ (void)dbp->set_flags(dbp, (u_int32_t)flags) ;
+ }
+ PrintRecno(info) ;
+ }
+ else
+ croak("type is not of type DB_File::HASHINFO, DB_File::BTREEINFO or DB_File::RECNOINFO");
+ }
+
+ {
+ u_int32_t Flags = 0 ;
+ int status ;
+
+ /* Map 1.x flags to 3.x flags */
+ if ((flags & O_CREAT) == O_CREAT)
+ Flags |= DB_CREATE ;
+
+#if O_RDONLY == 0
+ if (flags == O_RDONLY)
+#else
+ if ((flags & O_RDONLY) == O_RDONLY && (flags & O_RDWR) != O_RDWR)
+#endif
+ Flags |= DB_RDONLY ;
+
+#ifdef O_TRUNC
+ if ((flags & O_TRUNC) == O_TRUNC)
+ Flags |= DB_TRUNCATE ;
+#endif
+
+#ifdef AT_LEAST_DB_4_1
+ status = (RETVAL->dbp->open)(RETVAL->dbp, NULL, name, NULL, RETVAL->type,
+ Flags, mode) ;
+#else
+ status = (RETVAL->dbp->open)(RETVAL->dbp, name, NULL, RETVAL->type,
+ Flags, mode) ;
+#endif
+ /* printf("open returned %d %s\n", status, db_strerror(status)) ; */
+
+ if (status == 0) {
+ RETVAL->dbp->set_errcall(RETVAL->dbp, db_errcall_cb) ;
+
+ status = (RETVAL->dbp->cursor)(RETVAL->dbp, NULL, &RETVAL->cursor,
+ 0) ;
+ /* printf("cursor returned %d %s\n", status, db_strerror(status)) ; */
+ }
+
+ if (status)
+ RETVAL->dbp = NULL ;
+
+ }
+
+ return (RETVAL) ;
+
+#endif /* Berkeley DB Version > 2 */
+
+} /* ParseOpenInfo */
+
+
+#include "constants.h"
+
+MODULE = DB_File PACKAGE = DB_File PREFIX = db_
+
+INCLUDE: constants.xs
+
+BOOT:
+ {
+ SV * sv_err = perl_get_sv(ERR_BUFF, GV_ADD|GV_ADDMULTI) ;
+ MY_CXT_INIT;
+ __getBerkeleyDBInfo() ;
+
+ DBT_clear(empty) ;
+ empty.data = &zero ;
+ empty.size = sizeof(recno_t) ;
+ }
+
+
+
+DB_File
+db_DoTie_(isHASH, dbtype, name=undef, flags=O_CREAT|O_RDWR, mode=0666, type=DB_HASH)
+ int isHASH
+ char * dbtype
+ int flags
+ int mode
+ CODE:
+ {
+ char * name = (char *) NULL ;
+ SV * sv = (SV *) NULL ;
+ STRLEN n_a;
+
+ if (items >= 3 && SvOK(ST(2)))
+ name = (char*) SvPV(ST(2), n_a) ;
+
+ if (items == 6)
+ sv = ST(5) ;
+
+ RETVAL = ParseOpenInfo(aTHX_ isHASH, name, flags, mode, sv) ;
+ if (RETVAL->dbp == NULL)
+ RETVAL = NULL ;
+ }
+ OUTPUT:
+ RETVAL
+
+int
+db_DESTROY(db)
+ DB_File db
+ PREINIT:
+ dMY_CXT;
+ INIT:
+ CurrentDB = db ;
+ Trace(("DESTROY %p\n", db));
+ CLEANUP:
+ Trace(("DESTROY %p done\n", db));
+ if (db->hash)
+ SvREFCNT_dec(db->hash) ;
+ if (db->compare)
+ SvREFCNT_dec(db->compare) ;
+ if (db->prefix)
+ SvREFCNT_dec(db->prefix) ;
+ if (db->filter_fetch_key)
+ SvREFCNT_dec(db->filter_fetch_key) ;
+ if (db->filter_store_key)
+ SvREFCNT_dec(db->filter_store_key) ;
+ if (db->filter_fetch_value)
+ SvREFCNT_dec(db->filter_fetch_value) ;
+ if (db->filter_store_value)
+ SvREFCNT_dec(db->filter_store_value) ;
+ safefree(db) ;
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+#endif
+
+
+int
+db_DELETE(db, key, flags=0)
+ DB_File db
+ DBTKEY key
+ u_int flags
+ PREINIT:
+ dMY_CXT;
+ INIT:
+ CurrentDB = db ;
+
+
+int
+db_EXISTS(db, key)
+ DB_File db
+ DBTKEY key
+ PREINIT:
+ dMY_CXT;
+ CODE:
+ {
+ DBT value ;
+
+ DBT_clear(value) ;
+ CurrentDB = db ;
+ RETVAL = (((db->dbp)->get)(db->dbp, TXN &key, &value, 0) == 0) ;
+ }
+ OUTPUT:
+ RETVAL
+
+void
+db_FETCH(db, key, flags=0)
+ DB_File db
+ DBTKEY key
+ u_int flags
+ PREINIT:
+ dMY_CXT ;
+ int RETVAL ;
+ CODE:
+ {
+ DBT value ;
+
+ DBT_clear(value) ;
+ CurrentDB = db ;
+ RETVAL = db_get(db, key, value, flags) ;
+ ST(0) = sv_newmortal();
+ OutputValue(ST(0), value)
+ }
+
+int
+db_STORE(db, key, value, flags=0)
+ DB_File db
+ DBTKEY key
+ DBT value
+ u_int flags
+ PREINIT:
+ dMY_CXT;
+ INIT:
+ CurrentDB = db ;
+
+
+void
+db_FIRSTKEY(db)
+ DB_File db
+ PREINIT:
+ dMY_CXT ;
+ int RETVAL ;
+ CODE:
+ {
+ DBTKEY key ;
+ DBT value ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ CurrentDB = db ;
+ RETVAL = do_SEQ(db, key, value, R_FIRST) ;
+ ST(0) = sv_newmortal();
+ OutputKey(ST(0), key) ;
+ }
+
+void
+db_NEXTKEY(db, key)
+ DB_File db
+ DBTKEY key = NO_INIT
+ PREINIT:
+ dMY_CXT ;
+ int RETVAL ;
+ CODE:
+ {
+ DBT value ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ CurrentDB = db ;
+ RETVAL = do_SEQ(db, key, value, R_NEXT) ;
+ ST(0) = sv_newmortal();
+ OutputKey(ST(0), key) ;
+ }
+
+#
+# These would be nice for RECNO
+#
+
+int
+unshift(db, ...)
+ DB_File db
+ ALIAS: UNSHIFT = 1
+ PREINIT:
+ dMY_CXT;
+ CODE:
+ {
+ DBTKEY key ;
+ DBT value ;
+ int i ;
+ int One ;
+ STRLEN n_a;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ CurrentDB = db ;
+#ifdef DB_VERSION_MAJOR
+ /* get the first value */
+ RETVAL = do_SEQ(db, key, value, DB_FIRST) ;
+ RETVAL = 0 ;
+#else
+ RETVAL = -1 ;
+#endif
+ for (i = items-1 ; i > 0 ; --i)
+ {
+ value.data = SvPV(ST(i), n_a) ;
+ value.size = n_a ;
+ One = 1 ;
+ key.data = &One ;
+ key.size = sizeof(int) ;
+#ifdef DB_VERSION_MAJOR
+ RETVAL = (db->cursor->c_put)(db->cursor, &key, &value, DB_BEFORE) ;
+#else
+ RETVAL = (db->dbp->put)(db->dbp, &key, &value, R_IBEFORE) ;
+#endif
+ if (RETVAL != 0)
+ break;
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+void
+pop(db)
+ DB_File db
+ PREINIT:
+ dMY_CXT;
+ ALIAS: POP = 1
+ PREINIT:
+ I32 RETVAL;
+ CODE:
+ {
+ DBTKEY key ;
+ DBT value ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ CurrentDB = db ;
+
+ /* First get the final value */
+ RETVAL = do_SEQ(db, key, value, R_LAST) ;
+ ST(0) = sv_newmortal();
+ /* Now delete it */
+ if (RETVAL == 0)
+ {
+ /* the call to del will trash value, so take a copy now */
+ OutputValue(ST(0), value) ;
+ RETVAL = db_del(db, key, R_CURSOR) ;
+ if (RETVAL != 0)
+ sv_setsv(ST(0), &PL_sv_undef);
+ }
+ }
+
+void
+shift(db)
+ DB_File db
+ PREINIT:
+ dMY_CXT;
+ ALIAS: SHIFT = 1
+ PREINIT:
+ I32 RETVAL;
+ CODE:
+ {
+ DBT value ;
+ DBTKEY key ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ CurrentDB = db ;
+ /* get the first value */
+ RETVAL = do_SEQ(db, key, value, R_FIRST) ;
+ ST(0) = sv_newmortal();
+ /* Now delete it */
+ if (RETVAL == 0)
+ {
+ /* the call to del will trash value, so take a copy now */
+ OutputValue(ST(0), value) ;
+ RETVAL = db_del(db, key, R_CURSOR) ;
+ if (RETVAL != 0)
+ sv_setsv (ST(0), &PL_sv_undef) ;
+ }
+ }
+
+
+I32
+push(db, ...)
+ DB_File db
+ PREINIT:
+ dMY_CXT;
+ ALIAS: PUSH = 1
+ CODE:
+ {
+ DBTKEY key ;
+ DBT value ;
+ DB * Db = db->dbp ;
+ int i ;
+ STRLEN n_a;
+ int keyval ;
+
+ DBT_flags(key) ;
+ DBT_flags(value) ;
+ CurrentDB = db ;
+ /* Set the Cursor to the Last element */
+ RETVAL = do_SEQ(db, key, value, R_LAST) ;
+#ifndef DB_VERSION_MAJOR
+ if (RETVAL >= 0)
+#endif
+ {
+ if (RETVAL == 0)
+ keyval = *(int*)key.data ;
+ else
+ keyval = 0 ;
+ for (i = 1 ; i < items ; ++i)
+ {
+ value.data = SvPV(ST(i), n_a) ;
+ value.size = n_a ;
+ ++ keyval ;
+ key.data = &keyval ;
+ key.size = sizeof(int) ;
+ RETVAL = (Db->put)(Db, TXN &key, &value, 0) ;
+ if (RETVAL != 0)
+ break;
+ }
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+I32
+length(db)
+ DB_File db
+ PREINIT:
+ dMY_CXT;
+ ALIAS: FETCHSIZE = 1
+ CODE:
+ CurrentDB = db ;
+ RETVAL = GetArrayLength(aTHX_ db) ;
+ OUTPUT:
+ RETVAL
+
+
+#
+# Now provide an interface to the rest of the DB functionality
+#
+
+int
+db_del(db, key, flags=0)
+ DB_File db
+ DBTKEY key
+ u_int flags
+ PREINIT:
+ dMY_CXT;
+ CODE:
+ CurrentDB = db ;
+ RETVAL = db_del(db, key, flags) ;
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+ else if (RETVAL == DB_NOTFOUND)
+ RETVAL = 1 ;
+#endif
+ OUTPUT:
+ RETVAL
+
+
+int
+db_get(db, key, value, flags=0)
+ DB_File db
+ DBTKEY key
+ DBT value = NO_INIT
+ u_int flags
+ PREINIT:
+ dMY_CXT;
+ CODE:
+ CurrentDB = db ;
+ DBT_clear(value) ;
+ RETVAL = db_get(db, key, value, flags) ;
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+ else if (RETVAL == DB_NOTFOUND)
+ RETVAL = 1 ;
+#endif
+ OUTPUT:
+ RETVAL
+ value
+
+int
+db_put(db, key, value, flags=0)
+ DB_File db
+ DBTKEY key
+ DBT value
+ u_int flags
+ PREINIT:
+ dMY_CXT;
+ CODE:
+ CurrentDB = db ;
+ RETVAL = db_put(db, key, value, flags) ;
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+ else if (RETVAL == DB_KEYEXIST)
+ RETVAL = 1 ;
+#endif
+ OUTPUT:
+ RETVAL
+ key if (flagSet(flags, R_IAFTER) || flagSet(flags, R_IBEFORE)) OutputKey(ST(1), key);
+
+int
+db_fd(db)
+ DB_File db
+ PREINIT:
+ dMY_CXT ;
+ CODE:
+ CurrentDB = db ;
+#ifdef DB_VERSION_MAJOR
+ RETVAL = -1 ;
+ {
+ int status = 0 ;
+ status = (db->in_memory
+ ? -1
+ : ((db->dbp)->fd)(db->dbp, &RETVAL) ) ;
+ if (status != 0)
+ RETVAL = -1 ;
+ }
+#else
+ RETVAL = (db->in_memory
+ ? -1
+ : ((db->dbp)->fd)(db->dbp) ) ;
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+db_sync(db, flags=0)
+ DB_File db
+ u_int flags
+ PREINIT:
+ dMY_CXT;
+ CODE:
+ CurrentDB = db ;
+ RETVAL = db_sync(db, flags) ;
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+#endif
+ OUTPUT:
+ RETVAL
+
+
+int
+db_seq(db, key, value, flags)
+ DB_File db
+ DBTKEY key
+ DBT value = NO_INIT
+ u_int flags
+ PREINIT:
+ dMY_CXT;
+ CODE:
+ CurrentDB = db ;
+ DBT_clear(value) ;
+ RETVAL = db_seq(db, key, value, flags);
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+ else if (RETVAL == DB_NOTFOUND)
+ RETVAL = 1 ;
+#endif
+ OUTPUT:
+ RETVAL
+ key
+ value
+
+SV *
+filter_fetch_key(db, code)
+ DB_File db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ DBM_setFilter(db->filter_fetch_key, code) ;
+
+SV *
+filter_store_key(db, code)
+ DB_File db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ DBM_setFilter(db->filter_store_key, code) ;
+
+SV *
+filter_fetch_value(db, code)
+ DB_File db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ DBM_setFilter(db->filter_fetch_value, code) ;
+
+SV *
+filter_store_value(db, code)
+ DB_File db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ DBM_setFilter(db->filter_store_value, code) ;
+
diff --git a/libdb/perl/DB_File/DB_File_BS b/libdb/perl/DB_File/DB_File_BS
new file mode 100644
index 0000000..9282c49
--- /dev/null
+++ b/libdb/perl/DB_File/DB_File_BS
@@ -0,0 +1,6 @@
+# NeXT needs /usr/lib/libposix.a to load along with DB_File.so
+if ( $dlsrc eq "dl_next.xs" ) {
+ @DynaLoader::dl_resolve_using = ( '/usr/lib/libposix.a' );
+}
+
+1;
diff --git a/libdb/perl/DB_File/MANIFEST b/libdb/perl/DB_File/MANIFEST
new file mode 100644
index 0000000..b3e1a7b
--- /dev/null
+++ b/libdb/perl/DB_File/MANIFEST
@@ -0,0 +1,30 @@
+Changes
+DB_File.pm
+DB_File.xs
+DB_File_BS
+MANIFEST
+Makefile.PL
+README
+config.in
+dbinfo
+fallback.h
+fallback.xs
+hints/dynixptx.pl
+hints/sco.pl
+patches/5.004
+patches/5.004_01
+patches/5.004_02
+patches/5.004_03
+patches/5.004_04
+patches/5.004_05
+patches/5.005
+patches/5.005_01
+patches/5.005_02
+patches/5.005_03
+patches/5.6.0
+ppport.h
+t/db-btree.t
+t/db-hash.t
+t/db-recno.t
+typemap
+version.c
diff --git a/libdb/perl/DB_File/Makefile.PL b/libdb/perl/DB_File/Makefile.PL
new file mode 100644
index 0000000..4c1565d
--- /dev/null
+++ b/libdb/perl/DB_File/Makefile.PL
@@ -0,0 +1,330 @@
+#! perl -w
+
+use strict ;
+use ExtUtils::MakeMaker 5.16 ;
+use Config ;
+
+die "DB_File needs Perl 5.004_05 or better. This is $]\n"
+ if $] <= 5.00404;
+
+my $VER_INFO ;
+my $LIB_DIR ;
+my $INC_DIR ;
+my $DB_NAME ;
+my $LIBS ;
+my $COMPAT185 = "" ;
+
+ParseCONFIG() ;
+
+my @files = ('DB_File.pm', glob "t/*.t") ;
+UpDowngrade(@files);
+
+if (defined $DB_NAME)
+ { $LIBS = $DB_NAME }
+else {
+ if ($^O eq 'MSWin32')
+ { $LIBS = '-llibdb' }
+ else
+ { $LIBS = '-ldb' }
+}
+
+# Solaris is special.
+#$LIBS .= " -lthread" if $^O eq 'solaris' ;
+
+# AIX is special.
+$LIBS .= " -lpthread" if $^O eq 'aix' ;
+
+# OS2 is a special case, so check for it now.
+my $OS2 = "" ;
+$OS2 = "-DOS2" if $Config{'osname'} eq 'os2' ;
+
+WriteMakefile(
+ NAME => 'DB_File',
+ LIBS => ["-L${LIB_DIR} $LIBS"],
+ #MAN3PODS => {}, # Pods will be built by installman.
+ INC => "-I$INC_DIR",
+ VERSION_FROM => 'DB_File.pm',
+ XSPROTOARG => '-noprototypes',
+ DEFINE => "-D_NOT_CORE $OS2 $VER_INFO $COMPAT185",
+ OBJECT => 'version$(OBJ_EXT) DB_File$(OBJ_EXT)',
+ #OPTIMIZE => '-g',
+ 'depend' => { 'Makefile' => 'config.in',
+ 'version$(OBJ_EXT)' => 'version.c'},
+ 'clean' => { FILES => 'constants.h constants.xs' },
+ 'macro' => { INSTALLDIRS => 'perl', my_files => "@files" },
+ 'dist' => { COMPRESS => 'gzip', SUFFIX => 'gz',
+ DIST_DEFAULT => 'MyDoubleCheck tardist'},
+ );
+
+
+my @names = qw(
+ BTREEMAGIC
+ BTREEVERSION
+ DB_LOCK
+ DB_SHMEM
+ DB_TXN
+ HASHMAGIC
+ HASHVERSION
+ MAX_PAGE_NUMBER
+ MAX_PAGE_OFFSET
+ MAX_REC_NUMBER
+ RET_ERROR
+ RET_SPECIAL
+ RET_SUCCESS
+ R_CURSOR
+ R_DUP
+ R_FIRST
+ R_FIXEDLEN
+ R_IAFTER
+ R_IBEFORE
+ R_LAST
+ R_NEXT
+ R_NOKEY
+ R_NOOVERWRITE
+ R_PREV
+ R_RECNOSYNC
+ R_SETCURSOR
+ R_SNAPSHOT
+ __R_UNUSED
+ );
+
+if (eval {require ExtUtils::Constant; 1}) {
+ # Check the constants above all appear in @EXPORT in DB_File.pm
+ my %names = map { $_, 1} @names;
+ open F, "<DB_File.pm" or die "Cannot open DB_File.pm: $!\n";
+ while (<F>)
+ {
+ last if /^\s*\@EXPORT\s+=\s+qw\(/ ;
+ }
+
+ while (<F>)
+ {
+ last if /^\s*\)/ ;
+ /(\S+)/ ;
+ delete $names{$1} if defined $1 ;
+ }
+ close F ;
+
+ if ( keys %names )
+ {
+ my $missing = join ("\n\t", sort keys %names) ;
+ die "The following names are missing from \@EXPORT in DB_File.pm\n" .
+ "\t$missing\n" ;
+ }
+
+
+ ExtUtils::Constant::WriteConstants(
+ NAME => 'DB_File',
+ NAMES => \@names,
+ C_FILE => 'constants.h',
+ XS_FILE => 'constants.xs',
+
+ );
+}
+else {
+ use File::Copy;
+ copy ('fallback.h', 'constants.h')
+ or die "Can't copy fallback.h to constants.h: $!";
+ copy ('fallback.xs', 'constants.xs')
+ or die "Can't copy fallback.xs to constants.xs: $!";
+}
+
+exit;
+
+
+sub MY::postamble { <<'EOM' } ;
+
+MyDoubleCheck:
+ @echo Checking config.in is setup for a release
+ @(grep "^LIB.*/usr/local/BerkeleyDB" config.in && \
+ grep "^INCLUDE.*/usr/local/BerkeleyDB" config.in && \
+ grep "^#DBNAME.*" config.in) >/dev/null || \
+ (echo config.in needs fixing ; exit 1)
+ @echo config.in is ok
+ @echo
+ @echo Checking DB_File.xs is ok for a release.
+ @(perl -ne ' exit 1 if /^\s*#\s*define\s+TRACE/ ; ' DB_File.xs || \
+ (echo DB_File.xs needs fixing ; exit 1))
+ @echo DB_File.xs is ok
+ @echo
+ @echo Checking for $$^W in files: $(my_files)
+ @perl -ne ' \
+ exit 1 if /^\s*local\s*\(\s*\$$\^W\s*\)/;' $(my_files) || \
+ (echo found unexpected $$^W ; exit 1)
+ @echo No $$^W found.
+ @echo
+ @echo Checking for 'use vars' in files: $(my_files)
+ @perl -ne ' \
+ exit 0 if /^__(DATA|END)__/; \
+ exit 1 if /^\s*use\s+vars/;' $(my_files) || \
+ (echo found unexpected "use vars"; exit 1)
+ @echo No 'use vars' found.
+ @echo
+ @echo All files are OK for a release.
+ @echo
+
+EOM
+
+
+
+sub ParseCONFIG
+{
+ my ($k, $v) ;
+ my @badkey = () ;
+ my %Info = () ;
+ my @Options = qw( INCLUDE LIB PREFIX HASH DBNAME COMPAT185 ) ;
+ my %ValidOption = map {$_, 1} @Options ;
+ my %Parsed = %ValidOption ;
+ my $CONFIG = 'config.in' ;
+
+ print "Parsing $CONFIG...\n" ;
+
+ # DBNAME & COMPAT185 are optional, so pretend they have
+ # been parsed.
+ delete $Parsed{'DBNAME'} ;
+ delete $Parsed{'COMPAT185'} ;
+ $Info{COMPAT185} = "No" ;
+
+
+ open(F, "$CONFIG") or die "Cannot open file $CONFIG: $!\n" ;
+ while (<F>) {
+ s/^\s*|\s*$//g ;
+ next if /^\s*$/ or /^\s*#/ ;
+ s/\s*#\s*$// ;
+
+ ($k, $v) = split(/\s+=\s+/, $_, 2) ;
+ $k = uc $k ;
+ if ($ValidOption{$k}) {
+ delete $Parsed{$k} ;
+ $Info{$k} = $v ;
+ }
+ else {
+ push(@badkey, $k) ;
+ }
+ }
+ close F ;
+
+ print "Unknown keys in $CONFIG ignored [@badkey]\n"
+ if @badkey ;
+
+ # check parsed values
+ my @missing = () ;
+ die "The following keys are missing from $CONFIG file: [@missing]\n"
+ if @missing = keys %Parsed ;
+
+ $INC_DIR = $ENV{'DB_FILE_INCLUDE'} || $Info{'INCLUDE'} ;
+ $LIB_DIR = $ENV{'DB_FILE_LIB'} || $Info{'LIB'} ;
+ $DB_NAME = $Info{'DBNAME'} if defined $Info{'DBNAME'} ;
+ $COMPAT185 = "-DCOMPAT185 -DDB_LIBRARY_COMPATIBILITY_API"
+ if (defined $ENV{'DB_FILE_COMPAT185'} &&
+ $ENV{'DB_FILE_COMPAT185'} =~ /^\s*(on|true|1)\s*$/i) ||
+ $Info{'COMPAT185'} =~ /^\s*(on|true|1)\s*$/i ;
+ my $PREFIX = $Info{'PREFIX'} ;
+ my $HASH = $Info{'HASH'} ;
+
+ $VER_INFO = "-DmDB_Prefix_t=${PREFIX} -DmDB_Hash_t=${HASH}" ;
+
+ print <<EOM if 0 ;
+ INCLUDE [$INC_DIR]
+ LIB [$LIB_DIR]
+ HASH [$HASH]
+ PREFIX [$PREFIX]
+ DBNAME [$DB_NAME]
+
+EOM
+
+ print "Looks Good.\n" ;
+
+}
+
+sub UpDowngrade
+{
+ my @files = @_ ;
+
+ # our is stable from 5.6.0 onward
+ # warnings is stable from 5.6.1 onward
+
+ # Note: this code assumes that each statement it modifies is not
+ # split across multiple lines.
+
+
+ my $warn_sub ;
+ my $our_sub ;
+
+ if ($] < 5.006001) {
+ # From: use|no warnings "blah"
+ # To: local ($^W) = 1; # use|no warnings "blah"
+ #
+ # and
+ #
+ # From: warnings::warnif(x,y);
+ # To: $^W && carp(y); # warnif -- x
+ $warn_sub = sub {
+ s/^(\s*)(no\s+warnings)/${1}local (\$^W) = 0; #$2/ ;
+ s/^(\s*)(use\s+warnings)/${1}local (\$^W) = 1; #$2/ ;
+
+ s/^(\s*)warnings::warnif\s*\((.*?)\s*,\s*(.*?)\)\s*;/${1}\$^W && carp($3); # warnif - $2/ ;
+ };
+ }
+ else {
+ # From: local ($^W) = 1; # use|no warnings "blah"
+ # To: use|no warnings "blah"
+ #
+ # and
+ #
+ # From: $^W && carp(y); # warnif -- x
+ # To: warnings::warnif(x,y);
+ $warn_sub = sub {
+ s/^(\s*)local\s*\(\$\^W\)\s*=\s*\d+\s*;\s*#\s*((no|use)\s+warnings.*)/$1$2/ ;
+ s/^(\s*)\$\^W\s+\&\&\s*carp\s*\((.*?)\)\s*;\s*#\s*warnif\s*-\s*(.*)/${1}warnings::warnif($3, $2);/ ;
+ };
+ }
+
+ if ($] < 5.006000) {
+ $our_sub = sub {
+ if ( /^(\s*)our\s+\(\s*([^)]+\s*)\)/ ) {
+ my $indent = $1;
+ my $vars = join ' ', split /\s*,\s*/, $2;
+ $_ = "${indent}use vars qw($vars);\n";
+ }
+ };
+ }
+ else {
+ $our_sub = sub {
+ if ( /^(\s*)use\s+vars\s+qw\((.*?)\)/ ) {
+ my $indent = $1;
+ my $vars = join ', ', split ' ', $2;
+ $_ = "${indent}our ($vars);\n";
+ }
+ };
+ }
+
+ foreach (@files)
+ { doUpDown($our_sub, $warn_sub, $_) }
+}
+
+
+sub doUpDown
+{
+ my $our_sub = shift;
+ my $warn_sub = shift;
+
+ local ($^I) = ".bak" ;
+ local (@ARGV) = shift;
+
+ while (<>)
+ {
+ print, last if /^__(END|DATA)__/ ;
+
+ &{ $our_sub }();
+ &{ $warn_sub }();
+ print ;
+ }
+
+ return if eof ;
+
+ while (<>)
+ { print }
+}
+
+# end of file Makefile.PL
diff --git a/libdb/perl/DB_File/README b/libdb/perl/DB_File/README
new file mode 100644
index 0000000..b09aa9d
--- /dev/null
+++ b/libdb/perl/DB_File/README
@@ -0,0 +1,458 @@
+ DB_File
+
+ Version 1.805
+
+ 1st Sep 2002
+
+ Copyright (c) 1995-2002 Paul Marquess. All rights reserved. This
+ program is free software; you can redistribute it and/or modify
+ it under the same terms as Perl itself.
+
+
+IMPORTANT NOTICE
+================
+
+If are using the locking technique described in older versions of
+DB_File, please read the section called "Locking: The Trouble with fd"
+in DB_File.pm immediately. The locking method has been found to be
+unsafe. You risk corrupting your data if you continue to use it.
+
+DESCRIPTION
+-----------
+
+DB_File is a module which allows Perl programs to make use of the
+facilities provided by Berkeley DB version 1. (DB_File can be built
+version 2, 3 or 4 of Berkeley DB, but it will only support the 1.x
+features),
+
+If you want to make use of the new features available in Berkeley DB
+2.x, 3.x or 4.x, use the Perl module BerkeleyDB instead.
+
+Berkeley DB is a C library which provides a consistent interface to a
+number of database formats. DB_File provides an interface to all three
+of the database types (hash, btree and recno) currently supported by
+Berkeley DB.
+
+For further details see the documentation included at the end of the
+file DB_File.pm.
+
+PREREQUISITES
+-------------
+
+Before you can build DB_File you must have the following installed on
+your system:
+
+ * Perl 5.004_05 or greater.
+
+ * Berkeley DB.
+
+ The official web site for Berkeley DB is http://www.sleepycat.com.
+ The latest version of Berkeley DB is always available there. It
+ is recommended that you use the most recent version available at
+ the Sleepycat site.
+
+ The one exception to this advice is where you want to use DB_File
+ to access database files created by a third-party application, like
+ Sendmail or Netscape. In these cases you must build DB_File with a
+ compatible version of Berkeley DB.
+
+ If you want to use Berkeley DB 2.x, you must have version 2.3.4
+ or greater. If you want to use Berkeley DB 3.x or 4.x, any version
+ will do. For Berkeley DB 1.x, use either version 1.85 or 1.86.
+
+
+BUILDING THE MODULE
+-------------------
+
+Assuming you have met all the prerequisites, building the module should
+be relatively straightforward.
+
+Step 1 : If you are running either Solaris 2.5 or HP-UX 10 and want
+ to use Berkeley DB version 2, 3 or 4, read either the Solaris Notes
+ or HP-UX Notes sections below. If you are running Linux please
+ read the Linux Notes section before proceeding.
+
+Step 2 : Edit the file config.in to suit you local installation.
+ Instructions are given in the file.
+
+Step 3 : Build and test the module using this sequence of commands:
+
+ perl Makefile.PL
+ make
+ make test
+
+
+ NOTE:
+ If you have a very old version of Berkeley DB (i.e. pre 1.85),
+ three of the tests in the recno test harness may fail (tests 51,
+ 53 and 55). You can safely ignore the errors if you're never
+ going to use the broken functionality (recno databases with a
+ modified bval). Otherwise you'll have to upgrade your DB
+ library.
+
+
+INSTALLATION
+------------
+
+ make install
+
+
+TROUBLESHOOTING
+===============
+
+Here are some of the common problems people encounter when building
+DB_File.
+
+Missing db.h or libdb.a
+-----------------------
+
+If you get an error like this:
+
+ cc -c -I/usr/local/include -Dbool=char -DHAS_BOOL
+ -O2 -DVERSION=\"1.64\" -DXS_VERSION=\"1.64\" -fpic
+ -I/usr/local/lib/perl5/i586-linux/5.00404/CORE -DmDB_Prefix_t=size_t
+ -DmDB_Hash_t=u_int32_t DB_File.c
+ DB_File.xs:101: db.h: No such file or directory
+
+or this:
+
+ LD_RUN_PATH="/lib" cc -o blib/arch/auto/DB_File/DB_File.so -shared
+ -L/usr/local/lib DB_File.o -L/usr/local/lib -ldb
+ ld: cannot open -ldb: No such file or directory
+
+This symptom can imply:
+
+ 1. You don't have Berkeley DB installed on your system at all.
+ Solution: get & install Berkeley DB.
+
+ 2. You do have Berkeley DB installed, but it isn't in a standard place.
+ Solution: Edit config.in and set the LIB and INCLUDE variables to point
+ to the directories where libdb.a and db.h are installed.
+
+
+Undefined symbol db_version
+---------------------------
+
+DB_File seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /usr/bin/perl5.00404 -I./blib/arch -I./blib/lib
+ -I/usr/local/lib/perl5/i586-linux/5.00404 -I/usr/local/lib/perl5 -e 'use
+ Test::Harness qw(&runtests $verbose); $verbose=0; runtests @ARGV;' t/*.t
+ t/db-btree..........Can't load './blib/arch/auto/DB_File/DB_File.so' for
+ module DB_File: ./blib/arch/auto/DB_File/DB_File.so: undefined symbol:
+ db_version at /usr/local/lib/perl5/i586-linux/5.00404/DynaLoader.pm
+ line 166.
+
+ at t/db-btree.t line 21
+ BEGIN failed--compilation aborted at t/db-btree.t line 21.
+ dubious Test returned status 2 (wstat 512, 0x200)
+
+This error usually happens when you have both version 1 and version
+2 of Berkeley DB installed on your system and DB_File attempts to
+build using the db.h for Berkeley DB version 2 and the version 1
+library. Unfortunately the two versions aren't compatible with each
+other. The undefined symbol error is actually caused because Berkeley
+DB version 1 doesn't have the symbol db_version.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want DB_File to use.
+
+
+Undefined symbol dbopen
+-----------------------
+
+DB_File seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ ...
+ t/db-btree..........Can't load 'blib/arch/auto/DB_File/DB_File.so' for
+ module DB_File: blib/arch/auto/DB_File/DB_File.so: undefined symbol:
+ dbopen at /usr/local/lib/perl5/5.6.1/i586-linux/DynaLoader.pm line 206.
+ at t/db-btree.t line 23
+ Compilation failed in require at t/db-btree.t line 23.
+ ...
+
+This error usually happens when you have both version 1 and a more recent
+version of Berkeley DB installed on your system and DB_File attempts
+to build using the db.h for Berkeley DB version 1 and the newer version
+library. Unfortunately the two versions aren't compatible with each
+other. The undefined symbol error is actually caused because versions
+of Berkeley DB newer than version 1 doesn't have the symbol dbopen.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want DB_File to use.
+
+
+Incompatible versions of db.h and libdb
+---------------------------------------
+
+BerkeleyDB seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00560 -Iblib/arch
+ -Iblib/lib -I/home/paul/perl/install/5.005_60/lib/5.00560/i586-linux
+ -I/home/paul/perl/install/5.005_60/lib/5.00560 -e 'use Test::Harness
+ qw(&runtests $verbose); $verbose=0; runtests @ARGV;' t/*.t
+ t/db-btree..........
+ DB_File needs compatible versions of libdb & db.h
+ you have db.h version 2.3.7 and libdb version 2.7.5
+ BEGIN failed--compilation aborted at t/db-btree.t line 21.
+ ...
+
+Another variation on the theme of having two versions of Berkeley DB on
+your system.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want BerkeleyDB to use.
+ If you are running Linux, please read the Linux Notes section
+ below.
+
+
+Linux Notes
+-----------
+
+Newer versions of Linux (e.g. RedHat 6, SuSe 6) ship with a C library
+that has version 2.x of Berkeley DB linked into it. This makes it
+difficult to build this module with anything other than the version of
+Berkeley DB that shipped with your Linux release. If you do try to use
+a different version of Berkeley DB you will most likely get the error
+described in the "Incompatible versions of db.h and libdb" section of
+this file.
+
+To make matters worse, prior to Perl 5.6.1, the perl binary itself
+*always* included the Berkeley DB library.
+
+If you want to use a newer version of Berkeley DB with this module, the
+easiest solution is to use Perl 5.6.1 (or better) and Berkeley DB 3.x
+(or better).
+
+There are two approaches you can use to get older versions of Perl to
+work with specific versions of Berkeley DB. Both have their advantages
+and disadvantages.
+
+The first approach will only work when you want to build a version of
+Perl older than 5.6.1 along with Berkeley DB 3.x. If you want to use
+Berkeley DB 2.x, you must use the next approach. This approach involves
+rebuilding your existing version of Perl after applying an unofficial
+patch. The "patches" directory in the this module's source distribution
+contains a number of patch files. There is one patch file for every
+stable version of Perl since 5.004. Apply the appropriate patch to your
+Perl source tree before re-building and installing Perl from scratch.
+For example, assuming you are in the top-level source directory for
+Perl 5.6.0, the command below will apply the necessary patch. Remember
+to replace the path shown below with one that points to this module's
+patches directory.
+
+ patch -p1 -N </path/to/DB_File/patches/5.6.0
+
+Now rebuild & install perl. You should now have a perl binary that can
+be used to build this module. Follow the instructions in "BUILDING THE
+MODULE", remembering to set the INCLUDE and LIB variables in config.in.
+
+
+The second approach will work with both Berkeley DB 2.x and 3.x.
+Start by building Berkeley DB as a shared library. This is from
+the Berkeley DB build instructions:
+
+ Building Shared Libraries for the GNU GCC compiler
+
+ If you're using gcc and there's no better shared library example for
+ your architecture, the following shared library build procedure will
+ probably work.
+
+ Add the -fpic option to the CFLAGS value in the Makefile.
+
+ Rebuild all of your .o files. This will create a Berkeley DB library
+ that contains .o files with PIC code. To build the shared library,
+ then take the following steps in the library build directory:
+
+ % mkdir tmp
+ % cd tmp
+ % ar xv ../libdb.a
+ % gcc -shared -o libdb.so *.o
+ % mv libdb.so ..
+ % cd ..
+ % rm -rf tmp
+
+ Note, you may have to change the gcc line depending on the
+ requirements of your system.
+
+ The file libdb.so is your shared library
+
+Once you have built libdb.so, you will need to store it somewhere safe.
+
+ cp libdb.so /usr/local/BerkeleyDB/lib
+
+If you now set the LD_PRELOAD environment variable to point to this
+shared library, Perl will use it instead of the version of Berkeley DB
+that shipped with your Linux distribution.
+
+ export LD_PRELOAD=/usr/local/BerkeleyDB/lib/libdb.so
+
+Finally follow the instructions in "BUILDING THE MODULE" to build,
+test and install this module. Don't forget to set the INCLUDE and LIB
+variables in config.in.
+
+Remember, you will need to have the LD_PRELOAD variable set anytime you
+want to use Perl with Berkeley DB. Also note that if you have LD_PRELOAD
+permanently set it will affect ALL commands you execute. This may be a
+problem if you run any commands that access a database created by the
+version of Berkeley DB that shipped with your Linux distribution.
+
+
+Solaris Notes
+-------------
+
+If you are running Solaris 2.5, and you get this error when you run the
+DB_File test harness:
+
+ libc internal error: _rmutex_unlock: rmutex not held.
+
+you probably need to install a Sun patch. It has been reported that
+Sun patch 103187-25 (or later revisions) fixes this problem.
+
+To find out if you have the patch installed, the command "showrev -p"
+will display the patches that are currently installed on your system.
+
+
+HP-UX 10 Notes
+--------------
+
+Some people running HP-UX 10 have reported getting an error like this
+when building DB_File with the native HP-UX compiler.
+
+ ld: (Warning) At least one PA 2.0 object file (DB_File.o) was detected.
+ The linked output may not run on a PA 1.x system.
+ ld: Invalid loader fixup for symbol "$000000A5".
+
+If this is the case for you, Berkeley DB needs to be recompiled with
+the +z or +Z option and the resulting library placed in a .sl file. The
+following steps should do the trick:
+
+ 1: Configure the Berkeley DB distribution with the +z or +Z C compiler
+ flag:
+
+ env "CFLAGS=+z" ../dist/configure ...
+
+ 2: Edit the Berkeley DB Makefile and change:
+
+ "libdb= libdb.a" to "libdb= libdb.sl".
+
+
+ 3: Build and install the Berkeley DB distribution as usual.
+
+HP-UX 11 Notes
+--------------
+
+Some people running the combination of HP-UX 11 and Berkeley DB 2.7.7 have
+reported getting this error when the run the test harness for DB_File
+
+ ...
+ lib/db-btree.........Can't call method "DELETE" on an undefined value at lib/db-btree.t line 216.
+ FAILED at test 26
+ lib/db-hash..........Can't call method "DELETE" on an undefined value at lib/db-hash.t line 183.
+ FAILED at test 22
+ ...
+
+The fix for this is to rebuild and install Berkeley DB with the bigfile
+option disabled.
+
+
+IRIX NOTES
+----------
+
+If you are running IRIX, and want to use Berkeley DB version 1, you can
+get it from http://reality.sgi.com/ariel. It has the patches necessary
+to compile properly on IRIX 5.3.
+
+
+FEEDBACK
+========
+
+How to report a problem with DB_File.
+
+When reporting any problem, I need the information requested below.
+
+ 1. The *complete* output from running this
+
+ perl -V
+
+ Do not edit the output in any way.
+ Note, I want you to run "perl -V" and NOT "perl -v".
+
+ If your perl does not understand the "-V" option it is too
+ old. DB_File needs Perl version 5.00405 or better.
+
+ 2. The version of DB_File you have.
+ If you have successfully installed DB_File, this one-liner will
+ tell you:
+
+ perl -e 'use DB_File; print qq{DB_File ver $DB_File::VERSION\n}'
+
+ If you haven't installed DB_File then search DB_File.pm for a line
+ like this:
+
+ $VERSION = "1.20" ;
+
+ 3. The version of Berkeley DB you are using.
+ If you are using a version older than 1.85, think about upgrading. One
+ point to note if you are considering upgrading Berkeley DB - the
+ file formats for 1.85, 1.86, 2.0, 3.0 & 3.1 are all different.
+
+ If you have successfully installed DB_File, this command will display
+ the version of Berkeley DB it was built with:
+
+ perl -e 'use DB_File; print qq{Berkeley DB ver $DB_File::db_ver\n}'
+
+ 4. A copy the file config.in from the DB_File main source directory.
+
+ 5. A listing of directories where Berkeley DB is installed.
+ For example, if Berkeley DB is installed in /usr/BerkeleDB/lib and
+ /usr/BerkeleyDB/include, I need the output from running this
+
+ ls -l /usr/BerkeleyDB/lib
+ ls -l /usr/BerkeleyDB/include
+
+ 6. If you are having problems building DB_File, send me a complete log
+ of what happened. Start by unpacking the DB_File module into a fresh
+ directory and keep a log of all the steps
+
+ [edit config.in, if necessary]
+ perl Makefile.PL
+ make
+ make test TEST_VERBOSE=1
+
+ 7. Now the difficult one. If you think you have found a bug in DB_File
+ and you want me to fix it, you will *greatly* enhance the chances
+ of me being able to track it down by sending me a small
+ self-contained Perl script that illustrates the problem you are
+ encountering. Include a summary of what you think the problem is
+ and a log of what happens when you run the script, in case I can't
+ reproduce your problem on my system. If possible, don't have the
+ script dependent on an existing 20Meg database. If the script you
+ send me can create the database itself then that is preferred.
+
+ I realise that in some cases this is easier said than done, so if
+ you can only reproduce the problem in your existing script, then
+ you can post me that if you want. Just don't expect me to find your
+ problem in a hurry, or at all. :-)
+
+
+CHANGES
+-------
+
+See the Changes file.
+
+Paul Marquess <Paul.Marquess@btinternet.com>
diff --git a/libdb/perl/DB_File/config.in b/libdb/perl/DB_File/config.in
new file mode 100644
index 0000000..292b09a
--- /dev/null
+++ b/libdb/perl/DB_File/config.in
@@ -0,0 +1,97 @@
+# Filename: config.in
+#
+# written by Paul Marquess <Paul.Marquess@btinternet.com>
+# last modified 9th Sept 1997
+# version 1.55
+
+# 1. Where is the file db.h?
+#
+# Change the path below to point to the directory where db.h is
+# installed on your system.
+
+INCLUDE = /usr/local/BerkeleyDB/include
+#INCLUDE = /usr/local/include
+#INCLUDE = /usr/include
+
+# 2. Where is libdb?
+#
+# Change the path below to point to the directory where libdb is
+# installed on your system.
+
+LIB = /usr/local/BerkeleyDB/lib
+#LIB = /usr/local/lib
+#LIB = /usr/lib
+
+# 3. What version of Berkely DB have you got?
+#
+# If you have version 2.0 or greater, you can skip this question.
+#
+# If you have Berkeley DB 1.78 or greater you shouldn't have to
+# change the definitions for PREFIX and HASH below.
+#
+# For older versions of Berkeley DB change both PREFIX and HASH to int.
+# Version 1.71, 1.72 and 1.73 are known to need this change.
+#
+# If you don't know what version you have have a look in the file db.h.
+#
+# Search for the string "DB_VERSION_MAJOR". If it is present, you
+# have Berkeley DB version 2 (or greater).
+#
+# If that didn't work, find the definition of the BTREEINFO typedef.
+# Check the return type from the prefix element. It should look like
+# this in an older copy of db.h:
+#
+# int (*prefix) __P((const DBT *, const DBT *));
+#
+# and like this in a more recent copy:
+#
+# size_t (*prefix) /* prefix function */
+# __P((const DBT *, const DBT *));
+#
+# Change the definition of PREFIX, below, to reflect the return type
+# of the prefix function in your db.h.
+#
+# Now find the definition of the HASHINFO typedef. Check the return
+# type of the hash element. Older versions look like this:
+#
+# int (*hash) __P((const void *, size_t));
+#
+# newer like this:
+#
+# u_int32_t /* hash function */
+# (*hash) __P((const void *, size_t));
+#
+# Change the definition of HASH, below, to reflect the return type of
+# the hash function in your db.h.
+#
+
+PREFIX = size_t
+HASH = u_int32_t
+
+# 4. Is the library called libdb?
+#
+# If you have copies of both 1.x and 2.x Berkeley DB installed on
+# your system it can sometimes be tricky to make sure you are using
+# the correct one. Renaming one (or creating a symbolic link) to
+# include the version number of the library can help.
+#
+# For example, if you have both Berkeley DB 2.3.12 and 1.85 on your
+# system and you want to use the Berkeley DB version 2 library you
+# could rename the version 2 library from libdb.a to libdb-2.3.12.a and
+# change the DBNAME line below to look like this:
+#
+# DBNAME = -ldb-2.3.12
+#
+# That will ensure you are linking the correct version of the DB
+# library.
+#
+# Note: If you are building this module with Win32, -llibdb will be
+# used by default.
+#
+# If you have changed the name of the library, uncomment the line
+# below (by removing the leading #) and edit the line to use the name
+# you have picked.
+
+#DBNAME = -ldb-2.4.10
+
+# end of file config.in
diff --git a/libdb/perl/DB_File/dbinfo b/libdb/perl/DB_File/dbinfo
new file mode 100644
index 0000000..af2c45f
--- /dev/null
+++ b/libdb/perl/DB_File/dbinfo
@@ -0,0 +1,112 @@
+#!/usr/local/bin/perl
+
+# Name: dbinfo -- identify berkeley DB version used to create
+# a database file
+#
+# Author: Paul Marquess <Paul.Marquess@btinternet.com>
+# Version: 1.03
+# Date 17th September 2000
+#
+# Copyright (c) 1998-2002 Paul Marquess. All rights reserved.
+# This program is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself.
+
+# Todo: Print more stats on a db file, e.g. no of records
+# add log/txn/lock files
+
+use strict ;
+
+my %Data =
+ (
+ 0x053162 => {
+ Type => "Btree",
+ Versions =>
+ {
+ 1 => "Unknown (older than 1.71)",
+ 2 => "Unknown (older than 1.71)",
+ 3 => "1.71 -> 1.85, 1.86",
+ 4 => "Unknown",
+ 5 => "2.0.0 -> 2.3.0",
+ 6 => "2.3.1 -> 2.7.7",
+ 7 => "3.0.x",
+ 8 => "3.1.x -> 4.0.x",
+ 9 => "4.1.x or greater",
+ }
+ },
+ 0x061561 => {
+ Type => "Hash",
+ Versions =>
+ {
+ 1 => "Unknown (older than 1.71)",
+ 2 => "1.71 -> 1.85",
+ 3 => "1.86",
+ 4 => "2.0.0 -> 2.1.0",
+ 5 => "2.2.6 -> 2.7.7",
+ 6 => "3.0.x",
+ 7 => "3.1.x -> 4.0.x",
+ 8 => "4.1.x or greater",
+ }
+ },
+ 0x042253 => {
+ Type => "Queue",
+ Versions =>
+ {
+ 1 => "3.0.x",
+ 2 => "3.1.x",
+ 3 => "3.2.x -> 4.0.x",
+ 4 => "4.1.x or greater",
+ }
+ },
+ ) ;
+
+die "Usage: dbinfo file\n" unless @ARGV == 1 ;
+
+print "testing file $ARGV[0]...\n\n" ;
+open (F, "<$ARGV[0]") or die "Cannot open file $ARGV[0]: $!\n" ;
+
+my $buff ;
+read F, $buff, 20 ;
+
+my (@info) = unpack("NNNNN", $buff) ;
+my (@info1) = unpack("VVVVV", $buff) ;
+my ($magic, $version, $endian) ;
+
+if ($Data{$info[0]}) # first try DB 1.x format
+{
+ $magic = $info[0] ;
+ $version = $info[1] ;
+ $endian = "Unknown" ;
+}
+elsif ($Data{$info[3]}) # next DB 2.x big endian
+{
+ $magic = $info[3] ;
+ $version = $info[4] ;
+ $endian = "Big Endian" ;
+}
+elsif ($Data{$info1[3]}) # next DB 2.x little endian
+{
+ $magic = $info1[3] ;
+ $version = $info1[4] ;
+ $endian = "Little Endian" ;
+}
+else
+ { die "not a Berkeley DB database file.\n" }
+
+my $type = $Data{$magic} ;
+$magic = sprintf "%06X", $magic ;
+
+my $ver_string = "Unknown" ;
+$ver_string = $type->{Versions}{$version}
+ if defined $type->{Versions}{$version} ;
+
+print <<EOM ;
+File Type: Berkeley DB $type->{Type} file.
+File Version ID: $version
+Built with Berkeley DB: $ver_string
+Byte Order: $endian
+Magic: $magic
+EOM
+
+close F ;
+
+exit ;
diff --git a/libdb/perl/DB_File/fallback.h b/libdb/perl/DB_File/fallback.h
new file mode 100644
index 0000000..0213308
--- /dev/null
+++ b/libdb/perl/DB_File/fallback.h
@@ -0,0 +1,455 @@
+#define PERL_constant_NOTFOUND 1
+#define PERL_constant_NOTDEF 2
+#define PERL_constant_ISIV 3
+#define PERL_constant_ISNO 4
+#define PERL_constant_ISNV 5
+#define PERL_constant_ISPV 6
+#define PERL_constant_ISPVN 7
+#define PERL_constant_ISSV 8
+#define PERL_constant_ISUNDEF 9
+#define PERL_constant_ISUV 10
+#define PERL_constant_ISYES 11
+
+#ifndef NVTYPE
+typedef double NV; /* 5.6 and later define NVTYPE, and typedef NV to it. */
+#endif
+#ifndef aTHX_
+#define aTHX_ /* 5.6 or later define this for threading support. */
+#endif
+#ifndef pTHX_
+#define pTHX_ /* 5.6 or later define this for threading support. */
+#endif
+
+static int
+constant_6 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_TXN R_LAST R_NEXT R_PREV */
+ /* Offset 2 gives the best switch position. */
+ switch (name[2]) {
+ case 'L':
+ if (memEQ(name, "R_LAST", 6)) {
+ /* ^ */
+#ifdef R_LAST
+ *iv_return = R_LAST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "R_NEXT", 6)) {
+ /* ^ */
+#ifdef R_NEXT
+ *iv_return = R_NEXT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "R_PREV", 6)) {
+ /* ^ */
+#ifdef R_PREV
+ *iv_return = R_PREV;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_TXN", 6)) {
+ /* ^ */
+#ifdef DB_TXN
+ *iv_return = DB_TXN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_7 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_LOCK R_FIRST R_NOKEY */
+ /* Offset 3 gives the best switch position. */
+ switch (name[3]) {
+ case 'I':
+ if (memEQ(name, "R_FIRST", 7)) {
+ /* ^ */
+#ifdef R_FIRST
+ *iv_return = R_FIRST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_LOCK", 7)) {
+ /* ^ */
+#ifdef DB_LOCK
+ *iv_return = DB_LOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "R_NOKEY", 7)) {
+ /* ^ */
+#ifdef R_NOKEY
+ *iv_return = R_NOKEY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_8 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_SHMEM R_CURSOR R_IAFTER */
+ /* Offset 5 gives the best switch position. */
+ switch (name[5]) {
+ case 'M':
+ if (memEQ(name, "DB_SHMEM", 8)) {
+ /* ^ */
+#ifdef DB_SHMEM
+ *iv_return = DB_SHMEM;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "R_CURSOR", 8)) {
+ /* ^ */
+#ifdef R_CURSOR
+ *iv_return = R_CURSOR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "R_IAFTER", 8)) {
+ /* ^ */
+#ifdef R_IAFTER
+ *iv_return = R_IAFTER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_9 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ HASHMAGIC RET_ERROR R_IBEFORE */
+ /* Offset 7 gives the best switch position. */
+ switch (name[7]) {
+ case 'I':
+ if (memEQ(name, "HASHMAGIC", 9)) {
+ /* ^ */
+#ifdef HASHMAGIC
+ *iv_return = HASHMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "RET_ERROR", 9)) {
+ /* ^ */
+#ifdef RET_ERROR
+ *iv_return = RET_ERROR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "R_IBEFORE", 9)) {
+ /* ^ */
+#ifdef R_IBEFORE
+ *iv_return = R_IBEFORE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_10 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ BTREEMAGIC R_FIXEDLEN R_SNAPSHOT __R_UNUSED */
+ /* Offset 5 gives the best switch position. */
+ switch (name[5]) {
+ case 'E':
+ if (memEQ(name, "R_FIXEDLEN", 10)) {
+ /* ^ */
+#ifdef R_FIXEDLEN
+ *iv_return = R_FIXEDLEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "BTREEMAGIC", 10)) {
+ /* ^ */
+#ifdef BTREEMAGIC
+ *iv_return = BTREEMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "__R_UNUSED", 10)) {
+ /* ^ */
+#ifdef __R_UNUSED
+ *iv_return = __R_UNUSED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "R_SNAPSHOT", 10)) {
+ /* ^ */
+#ifdef R_SNAPSHOT
+ *iv_return = R_SNAPSHOT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_11 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ HASHVERSION RET_SPECIAL RET_SUCCESS R_RECNOSYNC R_SETCURSOR */
+ /* Offset 10 gives the best switch position. */
+ switch (name[10]) {
+ case 'C':
+ if (memEQ(name, "R_RECNOSYNC", 11)) {
+ /* ^ */
+#ifdef R_RECNOSYNC
+ *iv_return = R_RECNOSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "RET_SPECIAL", 11)) {
+ /* ^ */
+#ifdef RET_SPECIAL
+ *iv_return = RET_SPECIAL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "HASHVERSION", 11)) {
+ /* ^ */
+#ifdef HASHVERSION
+ *iv_return = HASHVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "R_SETCURSOR", 11)) {
+ /* ^ */
+#ifdef R_SETCURSOR
+ *iv_return = R_SETCURSOR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "RET_SUCCESS", 11)) {
+ /* ^ */
+#ifdef RET_SUCCESS
+ *iv_return = RET_SUCCESS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant (pTHX_ const char *name, STRLEN len, IV *iv_return) {
+ /* Initially switch on the length of the name. */
+ /* When generated this function returned values for the list of names given
+ in this section of perl code. Rather than manually editing these functions
+ to add or remove constants, which would result in this comment and section
+ of code becoming inaccurate, we recommend that you edit this section of
+ code, and use it to regenerate a new set of constant functions which you
+ then use to replace the originals.
+
+ Regenerate these constant functions by feeding this entire source file to
+ perl -x
+
+#!bleedperl -w
+use ExtUtils::Constant qw (constant_types C_constant XS_constant);
+
+my $types = {map {($_, 1)} qw(IV)};
+my @names = (qw(BTREEMAGIC BTREEVERSION DB_LOCK DB_SHMEM DB_TXN HASHMAGIC
+ HASHVERSION MAX_PAGE_NUMBER MAX_PAGE_OFFSET MAX_REC_NUMBER
+ RET_ERROR RET_SPECIAL RET_SUCCESS R_CURSOR R_DUP R_FIRST
+ R_FIXEDLEN R_IAFTER R_IBEFORE R_LAST R_NEXT R_NOKEY
+ R_NOOVERWRITE R_PREV R_RECNOSYNC R_SETCURSOR R_SNAPSHOT
+ __R_UNUSED));
+
+print constant_types(); # macro defs
+foreach (C_constant ("DB_File", 'constant', 'IV', $types, undef, 3, @names) ) {
+ print $_, "\n"; # C constant subs
+}
+print "#### XS Section:\n";
+print XS_constant ("DB_File", $types);
+__END__
+ */
+
+ switch (len) {
+ case 5:
+ if (memEQ(name, "R_DUP", 5)) {
+#ifdef R_DUP
+ *iv_return = R_DUP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 6:
+ return constant_6 (aTHX_ name, iv_return);
+ break;
+ case 7:
+ return constant_7 (aTHX_ name, iv_return);
+ break;
+ case 8:
+ return constant_8 (aTHX_ name, iv_return);
+ break;
+ case 9:
+ return constant_9 (aTHX_ name, iv_return);
+ break;
+ case 10:
+ return constant_10 (aTHX_ name, iv_return);
+ break;
+ case 11:
+ return constant_11 (aTHX_ name, iv_return);
+ break;
+ case 12:
+ if (memEQ(name, "BTREEVERSION", 12)) {
+#ifdef BTREEVERSION
+ *iv_return = BTREEVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 13:
+ if (memEQ(name, "R_NOOVERWRITE", 13)) {
+#ifdef R_NOOVERWRITE
+ *iv_return = R_NOOVERWRITE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 14:
+ if (memEQ(name, "MAX_REC_NUMBER", 14)) {
+#ifdef MAX_REC_NUMBER
+ *iv_return = MAX_REC_NUMBER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 15:
+ /* Names all of length 15. */
+ /* MAX_PAGE_NUMBER MAX_PAGE_OFFSET */
+ /* Offset 9 gives the best switch position. */
+ switch (name[9]) {
+ case 'N':
+ if (memEQ(name, "MAX_PAGE_NUMBER", 15)) {
+ /* ^ */
+#ifdef MAX_PAGE_NUMBER
+ *iv_return = MAX_PAGE_NUMBER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "MAX_PAGE_OFFSET", 15)) {
+ /* ^ */
+#ifdef MAX_PAGE_OFFSET
+ *iv_return = MAX_PAGE_OFFSET;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
diff --git a/libdb/perl/DB_File/fallback.xs b/libdb/perl/DB_File/fallback.xs
new file mode 100644
index 0000000..8650cdf
--- /dev/null
+++ b/libdb/perl/DB_File/fallback.xs
@@ -0,0 +1,88 @@
+void
+constant(sv)
+ PREINIT:
+#ifdef dXSTARG
+ dXSTARG; /* Faster if we have it. */
+#else
+ dTARGET;
+#endif
+ STRLEN len;
+ int type;
+ IV iv;
+ /* NV nv; Uncomment this if you need to return NVs */
+ /* const char *pv; Uncomment this if you need to return PVs */
+ INPUT:
+ SV * sv;
+ const char * s = SvPV(sv, len);
+ PPCODE:
+ /* Change this to constant(aTHX_ s, len, &iv, &nv);
+ if you need to return both NVs and IVs */
+ type = constant(aTHX_ s, len, &iv);
+ /* Return 1 or 2 items. First is error message, or undef if no error.
+ Second, if present, is found value */
+ switch (type) {
+ case PERL_constant_NOTFOUND:
+ sv = sv_2mortal(newSVpvf("%s is not a valid DB_File macro", s));
+ PUSHs(sv);
+ break;
+ case PERL_constant_NOTDEF:
+ sv = sv_2mortal(newSVpvf(
+ "Your vendor has not defined DB_File macro %s, used", s));
+ PUSHs(sv);
+ break;
+ case PERL_constant_ISIV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHi(iv);
+ break;
+ /* Uncomment this if you need to return NOs
+ case PERL_constant_ISNO:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHs(&PL_sv_no);
+ break; */
+ /* Uncomment this if you need to return NVs
+ case PERL_constant_ISNV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHn(nv);
+ break; */
+ /* Uncomment this if you need to return PVs
+ case PERL_constant_ISPV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHp(pv, strlen(pv));
+ break; */
+ /* Uncomment this if you need to return PVNs
+ case PERL_constant_ISPVN:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHp(pv, iv);
+ break; */
+ /* Uncomment this if you need to return SVs
+ case PERL_constant_ISSV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHs(sv);
+ break; */
+ /* Uncomment this if you need to return UNDEFs
+ case PERL_constant_ISUNDEF:
+ break; */
+ /* Uncomment this if you need to return UVs
+ case PERL_constant_ISUV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHu((UV)iv);
+ break; */
+ /* Uncomment this if you need to return YESs
+ case PERL_constant_ISYES:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHs(&PL_sv_yes);
+ break; */
+ default:
+ sv = sv_2mortal(newSVpvf(
+ "Unexpected return type %d while processing DB_File macro %s, used",
+ type, s));
+ PUSHs(sv);
+ }
diff --git a/libdb/perl/DB_File/hints/dynixptx.pl b/libdb/perl/DB_File/hints/dynixptx.pl
new file mode 100644
index 0000000..bb5ffa5
--- /dev/null
+++ b/libdb/perl/DB_File/hints/dynixptx.pl
@@ -0,0 +1,3 @@
+# Need to add an extra '-lc' to the end to work around a DYNIX/ptx bug
+
+$self->{LIBS} = ['-lm -lc'];
diff --git a/libdb/perl/DB_File/hints/sco.pl b/libdb/perl/DB_File/hints/sco.pl
new file mode 100644
index 0000000..ff60440
--- /dev/null
+++ b/libdb/perl/DB_File/hints/sco.pl
@@ -0,0 +1,2 @@
+# osr5 needs to explicitly link against libc to pull in some static symbols
+$self->{LIBS} = ['-ldb -lc'] if $Config{'osvers'} =~ '3\.2v5\.0\..' ;
diff --git a/libdb/perl/DB_File/patches/5.004 b/libdb/perl/DB_File/patches/5.004
new file mode 100644
index 0000000..143ec95
--- /dev/null
+++ b/libdb/perl/DB_File/patches/5.004
@@ -0,0 +1,44 @@
+diff perl5.004.orig/Configure perl5.004/Configure
+190a191
+> perllibs=''
+9904a9906,9913
+> : Remove libraries needed only for extensions
+> : The appropriate ext/Foo/Makefile.PL will add them back in, if
+> : necessary.
+> set X `echo " $libs " |
+> sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
+> shift
+> perllibs="$*"
+>
+10372a10382
+> perllibs='$perllibs'
+diff perl5.004.orig/Makefile.SH perl5.004/Makefile.SH
+122c122
+< libs = $libs $cryptlib
+---
+> libs = $perllibs $cryptlib
+Common subdirectories: perl5.004.orig/Porting and perl5.004/Porting
+Common subdirectories: perl5.004.orig/cygwin32 and perl5.004/cygwin32
+Common subdirectories: perl5.004.orig/eg and perl5.004/eg
+Common subdirectories: perl5.004.orig/emacs and perl5.004/emacs
+Common subdirectories: perl5.004.orig/ext and perl5.004/ext
+Common subdirectories: perl5.004.orig/h2pl and perl5.004/h2pl
+Common subdirectories: perl5.004.orig/hints and perl5.004/hints
+Common subdirectories: perl5.004.orig/lib and perl5.004/lib
+diff perl5.004.orig/myconfig perl5.004/myconfig
+38c38
+< libs=$libs
+---
+> libs=$perllibs
+Common subdirectories: perl5.004.orig/os2 and perl5.004/os2
+diff perl5.004.orig/patchlevel.h perl5.004/patchlevel.h
+40a41
+> ,"NODB-1.0 - remove -ldb from core perl binary."
+Common subdirectories: perl5.004.orig/plan9 and perl5.004/plan9
+Common subdirectories: perl5.004.orig/pod and perl5.004/pod
+Common subdirectories: perl5.004.orig/qnx and perl5.004/qnx
+Common subdirectories: perl5.004.orig/t and perl5.004/t
+Common subdirectories: perl5.004.orig/utils and perl5.004/utils
+Common subdirectories: perl5.004.orig/vms and perl5.004/vms
+Common subdirectories: perl5.004.orig/win32 and perl5.004/win32
+Common subdirectories: perl5.004.orig/x2p and perl5.004/x2p
diff --git a/libdb/perl/DB_File/patches/5.004_01 b/libdb/perl/DB_File/patches/5.004_01
new file mode 100644
index 0000000..1b05eb4
--- /dev/null
+++ b/libdb/perl/DB_File/patches/5.004_01
@@ -0,0 +1,217 @@
+diff -rc perl5.004_01.orig/Configure perl5.004_01/Configure
+*** perl5.004_01.orig/Configure Wed Jun 11 00:28:03 1997
+--- perl5.004_01/Configure Sun Nov 12 22:12:35 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9907,9912 ****
+--- 9908,9921 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10375,10380 ****
+--- 10384,10390 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_01.orig/Makefile.SH perl5.004_01/Makefile.SH
+*** perl5.004_01.orig/Makefile.SH Thu Jun 12 23:27:56 1997
+--- perl5.004_01/Makefile.SH Sun Nov 12 22:12:35 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_01.orig/lib/ExtUtils/Embed.pm perl5.004_01/lib/ExtUtils/Embed.pm
+*** perl5.004_01.orig/lib/ExtUtils/Embed.pm Wed Apr 2 22:12:04 1997
+--- perl5.004_01/lib/ExtUtils/Embed.pm Sun Nov 12 22:12:35 2000
+***************
+*** 170,176 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 170,176 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_01.orig/lib/ExtUtils/Liblist.pm perl5.004_01/lib/ExtUtils/Liblist.pm
+*** perl5.004_01.orig/lib/ExtUtils/Liblist.pm Sat Jun 7 01:19:44 1997
+--- perl5.004_01/lib/ExtUtils/Liblist.pm Sun Nov 12 22:13:27 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $Verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $Verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $Verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $Verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $Verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $Verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_01.orig/lib/ExtUtils/MM_Unix.pm perl5.004_01/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_01.orig/lib/ExtUtils/MM_Unix.pm Thu Jun 12 22:06:18 1997
+--- perl5.004_01/lib/ExtUtils/MM_Unix.pm Sun Nov 12 22:12:35 2000
+***************
+*** 2137,2143 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2137,2143 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_01.orig/myconfig perl5.004_01/myconfig
+*** perl5.004_01.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_01/myconfig Sun Nov 12 22:12:35 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_01.orig/patchlevel.h perl5.004_01/patchlevel.h
+*** perl5.004_01.orig/patchlevel.h Wed Jun 11 03:06:10 1997
+--- perl5.004_01/patchlevel.h Sun Nov 12 22:12:35 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/libdb/perl/DB_File/patches/5.004_02 b/libdb/perl/DB_File/patches/5.004_02
new file mode 100644
index 0000000..238f873
--- /dev/null
+++ b/libdb/perl/DB_File/patches/5.004_02
@@ -0,0 +1,217 @@
+diff -rc perl5.004_02.orig/Configure perl5.004_02/Configure
+*** perl5.004_02.orig/Configure Thu Aug 7 15:08:44 1997
+--- perl5.004_02/Configure Sun Nov 12 22:06:24 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9911,9916 ****
+--- 9912,9925 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10379,10384 ****
+--- 10388,10394 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_02.orig/Makefile.SH perl5.004_02/Makefile.SH
+*** perl5.004_02.orig/Makefile.SH Thu Aug 7 13:10:53 1997
+--- perl5.004_02/Makefile.SH Sun Nov 12 22:06:24 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_02.orig/lib/ExtUtils/Embed.pm perl5.004_02/lib/ExtUtils/Embed.pm
+*** perl5.004_02.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_02/lib/ExtUtils/Embed.pm Sun Nov 12 22:06:24 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_02.orig/lib/ExtUtils/Liblist.pm perl5.004_02/lib/ExtUtils/Liblist.pm
+*** perl5.004_02.orig/lib/ExtUtils/Liblist.pm Fri Aug 1 19:36:58 1997
+--- perl5.004_02/lib/ExtUtils/Liblist.pm Sun Nov 12 22:06:24 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_02.orig/lib/ExtUtils/MM_Unix.pm perl5.004_02/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_02.orig/lib/ExtUtils/MM_Unix.pm Tue Aug 5 14:28:08 1997
+--- perl5.004_02/lib/ExtUtils/MM_Unix.pm Sun Nov 12 22:06:25 2000
+***************
+*** 2224,2230 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2224,2230 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_02.orig/myconfig perl5.004_02/myconfig
+*** perl5.004_02.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_02/myconfig Sun Nov 12 22:06:25 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_02.orig/patchlevel.h perl5.004_02/patchlevel.h
+*** perl5.004_02.orig/patchlevel.h Fri Aug 1 15:07:34 1997
+--- perl5.004_02/patchlevel.h Sun Nov 12 22:06:25 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/libdb/perl/DB_File/patches/5.004_03 b/libdb/perl/DB_File/patches/5.004_03
new file mode 100644
index 0000000..06331ea
--- /dev/null
+++ b/libdb/perl/DB_File/patches/5.004_03
@@ -0,0 +1,223 @@
+diff -rc perl5.004_03.orig/Configure perl5.004_03/Configure
+*** perl5.004_03.orig/Configure Wed Aug 13 16:09:46 1997
+--- perl5.004_03/Configure Sun Nov 12 21:56:18 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9911,9916 ****
+--- 9912,9925 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10379,10384 ****
+--- 10388,10394 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+Only in perl5.004_03: Configure.orig
+diff -rc perl5.004_03.orig/Makefile.SH perl5.004_03/Makefile.SH
+*** perl5.004_03.orig/Makefile.SH Mon Aug 18 19:24:29 1997
+--- perl5.004_03/Makefile.SH Sun Nov 12 21:56:18 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+Only in perl5.004_03: Makefile.SH.orig
+diff -rc perl5.004_03.orig/lib/ExtUtils/Embed.pm perl5.004_03/lib/ExtUtils/Embed.pm
+*** perl5.004_03.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_03/lib/ExtUtils/Embed.pm Sun Nov 12 21:56:18 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_03.orig/lib/ExtUtils/Liblist.pm perl5.004_03/lib/ExtUtils/Liblist.pm
+*** perl5.004_03.orig/lib/ExtUtils/Liblist.pm Fri Aug 1 19:36:58 1997
+--- perl5.004_03/lib/ExtUtils/Liblist.pm Sun Nov 12 21:57:17 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+Only in perl5.004_03/lib/ExtUtils: Liblist.pm.orig
+Only in perl5.004_03/lib/ExtUtils: Liblist.pm.rej
+diff -rc perl5.004_03.orig/lib/ExtUtils/MM_Unix.pm perl5.004_03/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_03.orig/lib/ExtUtils/MM_Unix.pm Mon Aug 18 19:16:12 1997
+--- perl5.004_03/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:56:19 2000
+***************
+*** 2224,2230 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2224,2230 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+Only in perl5.004_03/lib/ExtUtils: MM_Unix.pm.orig
+diff -rc perl5.004_03.orig/myconfig perl5.004_03/myconfig
+*** perl5.004_03.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_03/myconfig Sun Nov 12 21:56:19 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_03.orig/patchlevel.h perl5.004_03/patchlevel.h
+*** perl5.004_03.orig/patchlevel.h Wed Aug 13 11:42:01 1997
+--- perl5.004_03/patchlevel.h Sun Nov 12 21:56:19 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
+Only in perl5.004_03: patchlevel.h.orig
diff --git a/libdb/perl/DB_File/patches/5.004_04 b/libdb/perl/DB_File/patches/5.004_04
new file mode 100644
index 0000000..a227dc7
--- /dev/null
+++ b/libdb/perl/DB_File/patches/5.004_04
@@ -0,0 +1,209 @@
+diff -rc perl5.004_04.orig/Configure perl5.004_04/Configure
+*** perl5.004_04.orig/Configure Fri Oct 3 18:57:39 1997
+--- perl5.004_04/Configure Sun Nov 12 21:50:51 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9910,9915 ****
+--- 9911,9924 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10378,10383 ****
+--- 10387,10393 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_04.orig/Makefile.SH perl5.004_04/Makefile.SH
+*** perl5.004_04.orig/Makefile.SH Wed Oct 15 10:33:16 1997
+--- perl5.004_04/Makefile.SH Sun Nov 12 21:50:51 2000
+***************
+*** 129,135 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 129,135 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_04.orig/lib/ExtUtils/Embed.pm perl5.004_04/lib/ExtUtils/Embed.pm
+*** perl5.004_04.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_04/lib/ExtUtils/Embed.pm Sun Nov 12 21:50:51 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_04.orig/lib/ExtUtils/Liblist.pm perl5.004_04/lib/ExtUtils/Liblist.pm
+*** perl5.004_04.orig/lib/ExtUtils/Liblist.pm Tue Sep 9 17:41:32 1997
+--- perl5.004_04/lib/ExtUtils/Liblist.pm Sun Nov 12 21:51:33 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 189,195 ****
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 189,195 ----
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 539,545 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 539,545 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_04.orig/lib/ExtUtils/MM_Unix.pm perl5.004_04/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_04.orig/lib/ExtUtils/MM_Unix.pm Wed Oct 8 14:13:51 1997
+--- perl5.004_04/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:50:51 2000
+***************
+*** 2229,2235 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2229,2235 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_04.orig/myconfig perl5.004_04/myconfig
+*** perl5.004_04.orig/myconfig Mon Oct 6 18:26:49 1997
+--- perl5.004_04/myconfig Sun Nov 12 21:50:51 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_04.orig/patchlevel.h perl5.004_04/patchlevel.h
+*** perl5.004_04.orig/patchlevel.h Wed Oct 15 10:55:19 1997
+--- perl5.004_04/patchlevel.h Sun Nov 12 21:50:51 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ /* The following line and terminating '};' are read by perlbug.PL. Don't alter. */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/libdb/perl/DB_File/patches/5.004_05 b/libdb/perl/DB_File/patches/5.004_05
new file mode 100644
index 0000000..51c8bf3
--- /dev/null
+++ b/libdb/perl/DB_File/patches/5.004_05
@@ -0,0 +1,209 @@
+diff -rc perl5.004_05.orig/Configure perl5.004_05/Configure
+*** perl5.004_05.orig/Configure Thu Jan 6 22:05:49 2000
+--- perl5.004_05/Configure Sun Nov 12 21:36:25 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 10164,10169 ****
+--- 10165,10178 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10648,10653 ****
+--- 10657,10663 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_05.orig/Makefile.SH perl5.004_05/Makefile.SH
+*** perl5.004_05.orig/Makefile.SH Thu Jan 6 22:05:49 2000
+--- perl5.004_05/Makefile.SH Sun Nov 12 21:36:25 2000
+***************
+*** 151,157 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 151,157 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_05.orig/lib/ExtUtils/Embed.pm perl5.004_05/lib/ExtUtils/Embed.pm
+*** perl5.004_05.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_05/lib/ExtUtils/Embed.pm Sun Nov 12 21:36:25 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_05.orig/lib/ExtUtils/Liblist.pm perl5.004_05/lib/ExtUtils/Liblist.pm
+*** perl5.004_05.orig/lib/ExtUtils/Liblist.pm Thu Jan 6 22:05:54 2000
+--- perl5.004_05/lib/ExtUtils/Liblist.pm Sun Nov 12 21:45:31 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 590,596 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 590,596 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_05.orig/lib/ExtUtils/MM_Unix.pm perl5.004_05/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_05.orig/lib/ExtUtils/MM_Unix.pm Thu Jan 6 22:05:54 2000
+--- perl5.004_05/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:36:25 2000
+***************
+*** 2246,2252 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2246,2252 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_05.orig/myconfig perl5.004_05/myconfig
+*** perl5.004_05.orig/myconfig Thu Jan 6 22:05:55 2000
+--- perl5.004_05/myconfig Sun Nov 12 21:43:54 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_05.orig/patchlevel.h perl5.004_05/patchlevel.h
+*** perl5.004_05.orig/patchlevel.h Thu Jan 6 22:05:48 2000
+--- perl5.004_05/patchlevel.h Sun Nov 12 21:36:25 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ /* The following line and terminating '};' are read by perlbug.PL. Don't alter. */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/libdb/perl/DB_File/patches/5.005 b/libdb/perl/DB_File/patches/5.005
new file mode 100644
index 0000000..effee3e
--- /dev/null
+++ b/libdb/perl/DB_File/patches/5.005
@@ -0,0 +1,209 @@
+diff -rc perl5.005.orig/Configure perl5.005/Configure
+*** perl5.005.orig/Configure Wed Jul 15 08:05:44 1998
+--- perl5.005/Configure Sun Nov 12 21:30:40 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11279,11284 ****
+--- 11280,11293 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11804,11809 ****
+--- 11813,11819 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005.orig/Makefile.SH perl5.005/Makefile.SH
+*** perl5.005.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005/Makefile.SH Sun Nov 12 21:30:40 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005.orig/lib/ExtUtils/Embed.pm perl5.005/lib/ExtUtils/Embed.pm
+*** perl5.005.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005/lib/ExtUtils/Embed.pm Sun Nov 12 21:30:40 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005.orig/lib/ExtUtils/Liblist.pm perl5.005/lib/ExtUtils/Liblist.pm
+*** perl5.005.orig/lib/ExtUtils/Liblist.pm Wed Jul 22 07:09:42 1998
+--- perl5.005/lib/ExtUtils/Liblist.pm Sun Nov 12 21:30:40 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 290,296 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 290,296 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 598,604 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 598,604 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.005.orig/lib/ExtUtils/MM_Unix.pm perl5.005/lib/ExtUtils/MM_Unix.pm
+*** perl5.005.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:30:41 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.005.orig/myconfig perl5.005/myconfig
+*** perl5.005.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005/myconfig Sun Nov 12 21:30:41 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005.orig/patchlevel.h perl5.005/patchlevel.h
+*** perl5.005.orig/patchlevel.h Wed Jul 22 19:22:01 1998
+--- perl5.005/patchlevel.h Sun Nov 12 21:30:41 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/libdb/perl/DB_File/patches/5.005_01 b/libdb/perl/DB_File/patches/5.005_01
new file mode 100644
index 0000000..2a05dd5
--- /dev/null
+++ b/libdb/perl/DB_File/patches/5.005_01
@@ -0,0 +1,209 @@
+diff -rc perl5.005_01.orig/Configure perl5.005_01/Configure
+*** perl5.005_01.orig/Configure Wed Jul 15 08:05:44 1998
+--- perl5.005_01/Configure Sun Nov 12 20:55:58 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11279,11284 ****
+--- 11280,11293 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11804,11809 ****
+--- 11813,11819 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005_01.orig/Makefile.SH perl5.005_01/Makefile.SH
+*** perl5.005_01.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005_01/Makefile.SH Sun Nov 12 20:55:58 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005_01.orig/lib/ExtUtils/Embed.pm perl5.005_01/lib/ExtUtils/Embed.pm
+*** perl5.005_01.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005_01/lib/ExtUtils/Embed.pm Sun Nov 12 20:55:58 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_01.orig/lib/ExtUtils/Liblist.pm perl5.005_01/lib/ExtUtils/Liblist.pm
+*** perl5.005_01.orig/lib/ExtUtils/Liblist.pm Wed Jul 22 07:09:42 1998
+--- perl5.005_01/lib/ExtUtils/Liblist.pm Sun Nov 12 20:55:58 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 290,296 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 290,296 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 598,604 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 598,604 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.005_01.orig/lib/ExtUtils/MM_Unix.pm perl5.005_01/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_01.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005_01/lib/ExtUtils/MM_Unix.pm Sun Nov 12 20:55:58 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.005_01.orig/myconfig perl5.005_01/myconfig
+*** perl5.005_01.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005_01/myconfig Sun Nov 12 20:55:58 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005_01.orig/patchlevel.h perl5.005_01/patchlevel.h
+*** perl5.005_01.orig/patchlevel.h Mon Jan 3 11:07:45 2000
+--- perl5.005_01/patchlevel.h Sun Nov 12 20:55:58 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/libdb/perl/DB_File/patches/5.005_02 b/libdb/perl/DB_File/patches/5.005_02
new file mode 100644
index 0000000..5dd57dd
--- /dev/null
+++ b/libdb/perl/DB_File/patches/5.005_02
@@ -0,0 +1,264 @@
+diff -rc perl5.005_02.orig/Configure perl5.005_02/Configure
+*** perl5.005_02.orig/Configure Mon Jan 3 11:12:20 2000
+--- perl5.005_02/Configure Sun Nov 12 20:50:51 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11334,11339 ****
+--- 11335,11348 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11859,11864 ****
+--- 11868,11874 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+Only in perl5.005_02: Configure.orig
+diff -rc perl5.005_02.orig/Makefile.SH perl5.005_02/Makefile.SH
+*** perl5.005_02.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005_02/Makefile.SH Sun Nov 12 20:50:51 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+Only in perl5.005_02: Makefile.SH.orig
+diff -rc perl5.005_02.orig/lib/ExtUtils/Embed.pm perl5.005_02/lib/ExtUtils/Embed.pm
+*** perl5.005_02.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005_02/lib/ExtUtils/Embed.pm Sun Nov 12 20:50:51 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_02.orig/lib/ExtUtils/Liblist.pm perl5.005_02/lib/ExtUtils/Liblist.pm
+*** perl5.005_02.orig/lib/ExtUtils/Liblist.pm Mon Jan 3 11:12:21 2000
+--- perl5.005_02/lib/ExtUtils/Liblist.pm Sun Nov 12 20:50:51 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 333,339 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 333,339 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 623,629 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 623,629 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+***************
+*** 666,672 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 666,672 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 676,682 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 676,682 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+Only in perl5.005_02/lib/ExtUtils: Liblist.pm.orig
+diff -rc perl5.005_02.orig/lib/ExtUtils/MM_Unix.pm perl5.005_02/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_02.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005_02/lib/ExtUtils/MM_Unix.pm Sun Nov 12 20:50:51 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+Only in perl5.005_02/lib/ExtUtils: MM_Unix.pm.orig
+diff -rc perl5.005_02.orig/myconfig perl5.005_02/myconfig
+*** perl5.005_02.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005_02/myconfig Sun Nov 12 20:50:51 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005_02.orig/patchlevel.h perl5.005_02/patchlevel.h
+*** perl5.005_02.orig/patchlevel.h Mon Jan 3 11:12:19 2000
+--- perl5.005_02/patchlevel.h Sun Nov 12 20:50:51 2000
+***************
+*** 40,45 ****
+--- 40,46 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/libdb/perl/DB_File/patches/5.005_03 b/libdb/perl/DB_File/patches/5.005_03
new file mode 100644
index 0000000..115f9f5
--- /dev/null
+++ b/libdb/perl/DB_File/patches/5.005_03
@@ -0,0 +1,250 @@
+diff -rc perl5.005_03.orig/Configure perl5.005_03/Configure
+*** perl5.005_03.orig/Configure Sun Mar 28 17:12:57 1999
+--- perl5.005_03/Configure Sun Sep 17 22:19:16 2000
+***************
+*** 208,213 ****
+--- 208,214 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11642,11647 ****
+--- 11643,11656 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 12183,12188 ****
+--- 12192,12198 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005_03.orig/Makefile.SH perl5.005_03/Makefile.SH
+*** perl5.005_03.orig/Makefile.SH Thu Mar 4 02:35:25 1999
+--- perl5.005_03/Makefile.SH Sun Sep 17 22:21:01 2000
+***************
+*** 58,67 ****
+ shrpldflags="-H512 -T512 -bhalt:4 -bM:SRE -bE:perl.exp"
+ case "$osvers" in
+ 3*)
+! shrpldflags="$shrpldflags -e _nostart $ldflags $libs $cryptlib"
+ ;;
+ *)
+! shrpldflags="$shrpldflags -b noentry $ldflags $libs $cryptlib"
+ ;;
+ esac
+ aixinstdir=`pwd | sed 's/\/UU$//'`
+--- 58,67 ----
+ shrpldflags="-H512 -T512 -bhalt:4 -bM:SRE -bE:perl.exp"
+ case "$osvers" in
+ 3*)
+! shrpldflags="$shrpldflags -e _nostart $ldflags $perllibs $cryptlib"
+ ;;
+ *)
+! shrpldflags="$shrpldflags -b noentry $ldflags $perllibs $cryptlib"
+ ;;
+ esac
+ aixinstdir=`pwd | sed 's/\/UU$//'`
+***************
+*** 155,161 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 155,161 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/Embed.pm perl5.005_03/lib/ExtUtils/Embed.pm
+*** perl5.005_03.orig/lib/ExtUtils/Embed.pm Wed Jan 6 02:17:50 1999
+--- perl5.005_03/lib/ExtUtils/Embed.pm Sun Sep 17 22:19:16 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/Liblist.pm perl5.005_03/lib/ExtUtils/Liblist.pm
+*** perl5.005_03.orig/lib/ExtUtils/Liblist.pm Wed Jan 6 02:17:47 1999
+--- perl5.005_03/lib/ExtUtils/Liblist.pm Sun Sep 17 22:19:16 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 336,342 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 336,342 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 626,632 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+--- 626,632 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+***************
+*** 670,676 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 670,676 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 680,686 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 680,686 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/MM_Unix.pm perl5.005_03/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_03.orig/lib/ExtUtils/MM_Unix.pm Fri Mar 5 00:34:20 1999
+--- perl5.005_03/lib/ExtUtils/MM_Unix.pm Sun Sep 17 22:19:16 2000
+***************
+*** 2284,2290 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2284,2290 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
diff --git a/libdb/perl/DB_File/patches/5.6.0 b/libdb/perl/DB_File/patches/5.6.0
new file mode 100644
index 0000000..1f9b3b6
--- /dev/null
+++ b/libdb/perl/DB_File/patches/5.6.0
@@ -0,0 +1,294 @@
+diff -cr perl-5.6.0.orig/Configure perl-5.6.0/Configure
+*** perl-5.6.0.orig/Configure Wed Mar 22 20:36:37 2000
+--- perl-5.6.0/Configure Sun Sep 17 23:40:15 2000
+***************
+*** 217,222 ****
+--- 217,223 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 14971,14976 ****
+--- 14972,14985 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 15640,15645 ****
+--- 15649,15655 ----
+ path_sep='$path_sep'
+ perl5='$perl5'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -cr perl-5.6.0.orig/Makefile.SH perl-5.6.0/Makefile.SH
+*** perl-5.6.0.orig/Makefile.SH Sat Mar 11 16:05:24 2000
+--- perl-5.6.0/Makefile.SH Sun Sep 17 23:40:15 2000
+***************
+*** 70,76 ****
+ *) shrpldflags="$shrpldflags -b noentry"
+ ;;
+ esac
+! shrpldflags="$shrpldflags $ldflags $libs $cryptlib"
+ linklibperl="-L $archlibexp/CORE -L `pwd | sed 's/\/UU$//'` -lperl"
+ ;;
+ hpux*)
+--- 70,76 ----
+ *) shrpldflags="$shrpldflags -b noentry"
+ ;;
+ esac
+! shrpldflags="$shrpldflags $ldflags $perllibs $cryptlib"
+ linklibperl="-L $archlibexp/CORE -L `pwd | sed 's/\/UU$//'` -lperl"
+ ;;
+ hpux*)
+***************
+*** 176,182 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 176,182 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+***************
+*** 333,339 ****
+ case "$osname" in
+ aix)
+ $spitshell >>Makefile <<!GROK!THIS!
+! LIBS = $libs
+ # In AIX we need to change this for building Perl itself from
+ # its earlier definition (which is for building external
+ # extensions *after* Perl has been built and installed)
+--- 333,339 ----
+ case "$osname" in
+ aix)
+ $spitshell >>Makefile <<!GROK!THIS!
+! LIBS = $perllibs
+ # In AIX we need to change this for building Perl itself from
+ # its earlier definition (which is for building external
+ # extensions *after* Perl has been built and installed)
+diff -cr perl-5.6.0.orig/lib/ExtUtils/Embed.pm perl-5.6.0/lib/ExtUtils/Embed.pm
+*** perl-5.6.0.orig/lib/ExtUtils/Embed.pm Sun Jan 23 12:08:32 2000
+--- perl-5.6.0/lib/ExtUtils/Embed.pm Sun Sep 17 23:40:15 2000
+***************
+*** 193,199 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 193,199 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -cr perl-5.6.0.orig/lib/ExtUtils/Liblist.pm perl-5.6.0/lib/ExtUtils/Liblist.pm
+*** perl-5.6.0.orig/lib/ExtUtils/Liblist.pm Wed Mar 22 16:16:31 2000
+--- perl-5.6.0/lib/ExtUtils/Liblist.pm Sun Sep 17 23:40:15 2000
+***************
+*** 17,34 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 17,34 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 198,204 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 198,204 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 338,344 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 338,344 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 624,630 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+--- 624,630 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+***************
+*** 668,674 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 668,674 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 678,684 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 678,684 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+diff -cr perl-5.6.0.orig/lib/ExtUtils/MM_Unix.pm perl-5.6.0/lib/ExtUtils/MM_Unix.pm
+*** perl-5.6.0.orig/lib/ExtUtils/MM_Unix.pm Thu Mar 2 17:52:52 2000
+--- perl-5.6.0/lib/ExtUtils/MM_Unix.pm Sun Sep 17 23:40:15 2000
+***************
+*** 2450,2456 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2450,2456 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -cr perl-5.6.0.orig/myconfig.SH perl-5.6.0/myconfig.SH
+*** perl-5.6.0.orig/myconfig.SH Sat Feb 26 06:34:49 2000
+--- perl-5.6.0/myconfig.SH Sun Sep 17 23:41:17 2000
+***************
+*** 48,54 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 48,54 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -cr perl-5.6.0.orig/patchlevel.h perl-5.6.0/patchlevel.h
+*** perl-5.6.0.orig/patchlevel.h Wed Mar 22 20:23:11 2000
+--- perl-5.6.0/patchlevel.h Sun Sep 17 23:40:15 2000
+***************
+*** 70,75 ****
+--- 70,76 ----
+ #if !defined(PERL_PATCHLEVEL_H_IMPLICIT) && !defined(LOCAL_PATCH_COUNT)
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/libdb/perl/DB_File/ppport.h b/libdb/perl/DB_File/ppport.h
new file mode 100644
index 0000000..c343835
--- /dev/null
+++ b/libdb/perl/DB_File/ppport.h
@@ -0,0 +1,329 @@
+/* This file is Based on output from
+ * Perl/Pollution/Portability Version 2.0000 */
+
+#ifndef _P_P_PORTABILITY_H_
+#define _P_P_PORTABILITY_H_
+
+#ifndef PERL_REVISION
+# ifndef __PATCHLEVEL_H_INCLUDED__
+# include "patchlevel.h"
+# endif
+# ifndef PERL_REVISION
+# define PERL_REVISION (5)
+ /* Replace: 1 */
+# define PERL_VERSION PATCHLEVEL
+# define PERL_SUBVERSION SUBVERSION
+ /* Replace PERL_PATCHLEVEL with PERL_VERSION */
+ /* Replace: 0 */
+# endif
+#endif
+
+#define PERL_BCDVERSION ((PERL_REVISION * 0x1000000L) + (PERL_VERSION * 0x1000L) + PERL_SUBVERSION)
+
+#ifndef ERRSV
+# define ERRSV perl_get_sv("@",FALSE)
+#endif
+
+#if (PERL_VERSION < 4) || ((PERL_VERSION == 4) && (PERL_SUBVERSION <= 5))
+/* Replace: 1 */
+# define PL_Sv Sv
+# define PL_compiling compiling
+# define PL_copline copline
+# define PL_curcop curcop
+# define PL_curstash curstash
+# define PL_defgv defgv
+# define PL_dirty dirty
+# define PL_hints hints
+# define PL_na na
+# define PL_perldb perldb
+# define PL_rsfp_filters rsfp_filters
+# define PL_rsfp rsfp
+# define PL_stdingv stdingv
+# define PL_sv_no sv_no
+# define PL_sv_undef sv_undef
+# define PL_sv_yes sv_yes
+/* Replace: 0 */
+#endif
+
+#ifndef pTHX
+# define pTHX
+# define pTHX_
+# define aTHX
+# define aTHX_
+#endif
+
+#ifndef PTR2IV
+# define PTR2IV(d) (IV)(d)
+#endif
+
+#ifndef INT2PTR
+# define INT2PTR(any,d) (any)(d)
+#endif
+
+#ifndef dTHR
+# ifdef WIN32
+# define dTHR extern int Perl___notused
+# else
+# define dTHR extern int errno
+# endif
+#endif
+
+#ifndef boolSV
+# define boolSV(b) ((b) ? &PL_sv_yes : &PL_sv_no)
+#endif
+
+#ifndef gv_stashpvn
+# define gv_stashpvn(str,len,flags) gv_stashpv(str,flags)
+#endif
+
+#ifndef newSVpvn
+# define newSVpvn(data,len) ((len) ? newSVpv ((data), (len)) : newSVpv ("", 0))
+#endif
+
+#ifndef newRV_inc
+/* Replace: 1 */
+# define newRV_inc(sv) newRV(sv)
+/* Replace: 0 */
+#endif
+
+/* DEFSV appears first in 5.004_56 */
+#ifndef DEFSV
+# define DEFSV GvSV(PL_defgv)
+#endif
+
+#ifndef SAVE_DEFSV
+# define SAVE_DEFSV SAVESPTR(GvSV(PL_defgv))
+#endif
+
+#ifndef newRV_noinc
+# ifdef __GNUC__
+# define newRV_noinc(sv) \
+ ({ \
+ SV *nsv = (SV*)newRV(sv); \
+ SvREFCNT_dec(sv); \
+ nsv; \
+ })
+# else
+# if defined(CRIPPLED_CC) || defined(USE_THREADS)
+static SV * newRV_noinc (SV * sv)
+{
+ SV *nsv = (SV*)newRV(sv);
+ SvREFCNT_dec(sv);
+ return nsv;
+}
+# else
+# define newRV_noinc(sv) \
+ ((PL_Sv=(SV*)newRV(sv), SvREFCNT_dec(sv), (SV*)PL_Sv)
+# endif
+# endif
+#endif
+
+/* Provide: newCONSTSUB */
+
+/* newCONSTSUB from IO.xs is in the core starting with 5.004_63 */
+#if (PERL_VERSION < 4) || ((PERL_VERSION == 4) && (PERL_SUBVERSION < 63))
+
+#if defined(NEED_newCONSTSUB)
+static
+#else
+extern void newCONSTSUB _((HV * stash, char * name, SV *sv));
+#endif
+
+#if defined(NEED_newCONSTSUB) || defined(NEED_newCONSTSUB_GLOBAL)
+void
+newCONSTSUB(stash,name,sv)
+HV *stash;
+char *name;
+SV *sv;
+{
+ U32 oldhints = PL_hints;
+ HV *old_cop_stash = PL_curcop->cop_stash;
+ HV *old_curstash = PL_curstash;
+ line_t oldline = PL_curcop->cop_line;
+ PL_curcop->cop_line = PL_copline;
+
+ PL_hints &= ~HINT_BLOCK_SCOPE;
+ if (stash)
+ PL_curstash = PL_curcop->cop_stash = stash;
+
+ newSUB(
+
+#if (PERL_VERSION < 3) || ((PERL_VERSION == 3) && (PERL_SUBVERSION < 22))
+ /* before 5.003_22 */
+ start_subparse(),
+#else
+# if (PERL_VERSION == 3) && (PERL_SUBVERSION == 22)
+ /* 5.003_22 */
+ start_subparse(0),
+# else
+ /* 5.003_23 onwards */
+ start_subparse(FALSE, 0),
+# endif
+#endif
+
+ newSVOP(OP_CONST, 0, newSVpv(name,0)),
+ newSVOP(OP_CONST, 0, &PL_sv_no), /* SvPV(&PL_sv_no) == "" -- GMB */
+ newSTATEOP(0, Nullch, newSVOP(OP_CONST, 0, sv))
+ );
+
+ PL_hints = oldhints;
+ PL_curcop->cop_stash = old_cop_stash;
+ PL_curstash = old_curstash;
+ PL_curcop->cop_line = oldline;
+}
+#endif
+
+#endif /* newCONSTSUB */
+
+
+#ifndef START_MY_CXT
+
+/*
+ * Boilerplate macros for initializing and accessing interpreter-local
+ * data from C. All statics in extensions should be reworked to use
+ * this, if you want to make the extension thread-safe. See ext/re/re.xs
+ * for an example of the use of these macros.
+ *
+ * Code that uses these macros is responsible for the following:
+ * 1. #define MY_CXT_KEY to a unique string, e.g. "DynaLoader_guts"
+ * 2. Declare a typedef named my_cxt_t that is a structure that contains
+ * all the data that needs to be interpreter-local.
+ * 3. Use the START_MY_CXT macro after the declaration of my_cxt_t.
+ * 4. Use the MY_CXT_INIT macro such that it is called exactly once
+ * (typically put in the BOOT: section).
+ * 5. Use the members of the my_cxt_t structure everywhere as
+ * MY_CXT.member.
+ * 6. Use the dMY_CXT macro (a declaration) in all the functions that
+ * access MY_CXT.
+ */
+
+#if defined(MULTIPLICITY) || defined(PERL_OBJECT) || \
+ defined(PERL_CAPI) || defined(PERL_IMPLICIT_CONTEXT)
+
+/* This must appear in all extensions that define a my_cxt_t structure,
+ * right after the definition (i.e. at file scope). The non-threads
+ * case below uses it to declare the data as static. */
+#define START_MY_CXT
+
+#if PERL_REVISION == 5 && \
+ (PERL_VERSION < 4 || (PERL_VERSION == 4 && PERL_SUBVERSION < 68 ))
+/* Fetches the SV that keeps the per-interpreter data. */
+#define dMY_CXT_SV \
+ SV *my_cxt_sv = perl_get_sv(MY_CXT_KEY, FALSE)
+#else /* >= perl5.004_68 */
+#define dMY_CXT_SV \
+ SV *my_cxt_sv = *hv_fetch(PL_modglobal, MY_CXT_KEY, \
+ sizeof(MY_CXT_KEY)-1, TRUE)
+#endif /* < perl5.004_68 */
+
+/* This declaration should be used within all functions that use the
+ * interpreter-local data. */
+#define dMY_CXT \
+ dMY_CXT_SV; \
+ my_cxt_t *my_cxtp = INT2PTR(my_cxt_t*,SvUV(my_cxt_sv))
+
+/* Creates and zeroes the per-interpreter data.
+ * (We allocate my_cxtp in a Perl SV so that it will be released when
+ * the interpreter goes away.) */
+#define MY_CXT_INIT \
+ dMY_CXT_SV; \
+ /* newSV() allocates one more than needed */ \
+ my_cxt_t *my_cxtp = (my_cxt_t*)SvPVX(newSV(sizeof(my_cxt_t)-1));\
+ Zero(my_cxtp, 1, my_cxt_t); \
+ sv_setuv(my_cxt_sv, PTR2UV(my_cxtp))
+
+/* This macro must be used to access members of the my_cxt_t structure.
+ * e.g. MYCXT.some_data */
+#define MY_CXT (*my_cxtp)
+
+/* Judicious use of these macros can reduce the number of times dMY_CXT
+ * is used. Use is similar to pTHX, aTHX etc. */
+#define pMY_CXT my_cxt_t *my_cxtp
+#define pMY_CXT_ pMY_CXT,
+#define _pMY_CXT ,pMY_CXT
+#define aMY_CXT my_cxtp
+#define aMY_CXT_ aMY_CXT,
+#define _aMY_CXT ,aMY_CXT
+
+#else /* single interpreter */
+
+#ifndef NOOP
+# define NOOP (void)0
+#endif
+
+#ifdef HASATTRIBUTE
+# define PERL_UNUSED_DECL __attribute__((unused))
+#else
+# define PERL_UNUSED_DECL
+#endif
+
+#ifndef dNOOP
+# define dNOOP extern int Perl___notused PERL_UNUSED_DECL
+#endif
+
+#define START_MY_CXT static my_cxt_t my_cxt;
+#define dMY_CXT_SV dNOOP
+#define dMY_CXT dNOOP
+#define MY_CXT_INIT NOOP
+#define MY_CXT my_cxt
+
+#define pMY_CXT void
+#define pMY_CXT_
+#define _pMY_CXT
+#define aMY_CXT
+#define aMY_CXT_
+#define _aMY_CXT
+
+#endif
+
+#endif /* START_MY_CXT */
+
+
+#ifndef DBM_setFilter
+
+/*
+ The DBM_setFilter & DBM_ckFilter macros are only used by
+ the *DB*_File modules
+*/
+
+#define DBM_setFilter(db_type,code) \
+ { \
+ if (db_type) \
+ RETVAL = sv_mortalcopy(db_type) ; \
+ ST(0) = RETVAL ; \
+ if (db_type && (code == &PL_sv_undef)) { \
+ SvREFCNT_dec(db_type) ; \
+ db_type = NULL ; \
+ } \
+ else if (code) { \
+ if (db_type) \
+ sv_setsv(db_type, code) ; \
+ else \
+ db_type = newSVsv(code) ; \
+ } \
+ }
+
+#define DBM_ckFilter(arg,type,name) \
+ if (db->type) { \
+ if (db->filtering) { \
+ croak("recursion detected in %s", name) ; \
+ } \
+ ENTER ; \
+ SAVETMPS ; \
+ SAVEINT(db->filtering) ; \
+ db->filtering = TRUE ; \
+ SAVESPTR(DEFSV) ; \
+ DEFSV = arg ; \
+ SvTEMP_off(arg) ; \
+ PUSHMARK(SP) ; \
+ PUTBACK ; \
+ (void) perl_call_sv(db->type, G_DISCARD); \
+ SPAGAIN ; \
+ PUTBACK ; \
+ FREETMPS ; \
+ LEAVE ; \
+ }
+
+#endif /* DBM_setFilter */
+
+#endif /* _P_P_PORTABILITY_H_ */
diff --git a/libdb/perl/DB_File/t/db-btree.t b/libdb/perl/DB_File/t/db-btree.t
new file mode 100644
index 0000000..a990a5c
--- /dev/null
+++ b/libdb/perl/DB_File/t/db-btree.t
@@ -0,0 +1,1489 @@
+#!./perl -w
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use warnings;
+use strict;
+use Config;
+
+BEGIN {
+ if(-d "lib" && -f "TEST") {
+ if ($Config{'extensions'} !~ /\bDB_File\b/ ) {
+ print "1..0 # Skip: DB_File was not built\n";
+ exit 0;
+ }
+ }
+ if ($^O eq 'darwin'
+ && $Config{db_version_major} == 1
+ && $Config{db_version_minor} == 0
+ && $Config{db_version_patch} == 0) {
+ warn <<EOM;
+#
+# This test is known to crash in Mac OS X versions 10.1.4 (or earlier)
+# because of the buggy Berkeley DB version included with the OS.
+#
+EOM
+ }
+}
+
+use DB_File;
+use Fcntl;
+
+print "1..177\n";
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+sub lexical
+{
+ my(@a) = unpack ("C*", $a) ;
+ my(@b) = unpack ("C*", $b) ;
+
+ my $len = (@a > @b ? @b : @a) ;
+ my $i = 0 ;
+
+ foreach $i ( 0 .. $len -1) {
+ return $a[$i] - $b[$i] if $a[$i] != $b[$i] ;
+ }
+
+ return @a - @b ;
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef ;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my $result = <CAT>;
+ close(CAT);
+ $result = normalise($result) ;
+ return $result ;
+}
+
+sub docat_del
+{
+ my $file = shift;
+ my $result = docat($file);
+ unlink $file ;
+ return $result ;
+}
+
+sub normalise
+{
+ my $data = shift ;
+ $data =~ s#\r\n#\n#g
+ if $^O eq 'cygwin' ;
+
+ return $data ;
+}
+
+sub safeUntie
+{
+ my $hashref = shift ;
+ my $no_inner = 1;
+ local $SIG{__WARN__} = sub {-- $no_inner } ;
+ untie %$hashref;
+ return $no_inner;
+}
+
+
+
+my $db185mode = ($DB_File::db_version == 1 && ! $DB_File::db_185_compat) ;
+my $null_keys_allowed = ($DB_File::db_ver < 2.004010
+ || $DB_File::db_ver >= 3.1 );
+
+my $Dfile = "dbbtree.tmp";
+unlink $Dfile;
+
+umask(0);
+
+# Check the interface to BTREEINFO
+
+my $dbh = new DB_File::BTREEINFO ;
+ok(1, ! defined $dbh->{flags}) ;
+ok(2, ! defined $dbh->{cachesize}) ;
+ok(3, ! defined $dbh->{psize}) ;
+ok(4, ! defined $dbh->{lorder}) ;
+ok(5, ! defined $dbh->{minkeypage}) ;
+ok(6, ! defined $dbh->{maxkeypage}) ;
+ok(7, ! defined $dbh->{compare}) ;
+ok(8, ! defined $dbh->{prefix}) ;
+
+$dbh->{flags} = 3000 ;
+ok(9, $dbh->{flags} == 3000) ;
+
+$dbh->{cachesize} = 9000 ;
+ok(10, $dbh->{cachesize} == 9000);
+
+$dbh->{psize} = 400 ;
+ok(11, $dbh->{psize} == 400) ;
+
+$dbh->{lorder} = 65 ;
+ok(12, $dbh->{lorder} == 65) ;
+
+$dbh->{minkeypage} = 123 ;
+ok(13, $dbh->{minkeypage} == 123) ;
+
+$dbh->{maxkeypage} = 1234 ;
+ok(14, $dbh->{maxkeypage} == 1234 );
+
+# Check that an invalid entry is caught both for store & fetch
+eval '$dbh->{fred} = 1234' ;
+ok(15, $@ =~ /^DB_File::BTREEINFO::STORE - Unknown element 'fred' at/ ) ;
+eval 'my $q = $dbh->{fred}' ;
+ok(16, $@ =~ /^DB_File::BTREEINFO::FETCH - Unknown element 'fred' at/ ) ;
+
+# Now check the interface to BTREE
+
+my ($X, %h) ;
+ok(17, $X = tie(%h, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE )) ;
+die "Could not tie: $!" unless $X;
+
+my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
+ $blksize,$blocks) = stat($Dfile);
+
+my %noMode = map { $_, 1} qw( amigaos MSWin32 NetWare cygwin ) ;
+
+ok(18, ($mode & 0777) == (($^O eq 'os2' || $^O eq 'MacOS') ? 0666 : 0640)
+ || $noMode{$^O} );
+
+my ($key, $value, $i);
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(19, !$i ) ;
+
+$h{'goner1'} = 'snork';
+
+$h{'abc'} = 'ABC';
+ok(20, $h{'abc'} eq 'ABC' );
+ok(21, ! defined $h{'jimmy'} ) ;
+ok(22, ! exists $h{'jimmy'} ) ;
+ok(23, defined $h{'abc'} ) ;
+
+$h{'def'} = 'DEF';
+$h{'jkl','mno'} = "JKL\034MNO";
+$h{'a',2,3,4,5} = join("\034",'A',2,3,4,5);
+$h{'a'} = 'A';
+
+#$h{'b'} = 'B';
+$X->STORE('b', 'B') ;
+
+$h{'c'} = 'C';
+
+#$h{'d'} = 'D';
+$X->put('d', 'D') ;
+
+$h{'e'} = 'E';
+$h{'f'} = 'F';
+$h{'g'} = 'X';
+$h{'h'} = 'H';
+$h{'i'} = 'I';
+
+$h{'goner2'} = 'snork';
+delete $h{'goner2'};
+
+
+# IMPORTANT - $X must be undefined before the untie otherwise the
+# underlying DB close routine will not get called.
+undef $X ;
+untie(%h);
+
+# tie to the same file again
+ok(24, $X = tie(%h,'DB_File',$Dfile, O_RDWR, 0640, $DB_BTREE)) ;
+
+# Modify an entry from the previous tie
+$h{'g'} = 'G';
+
+$h{'j'} = 'J';
+$h{'k'} = 'K';
+$h{'l'} = 'L';
+$h{'m'} = 'M';
+$h{'n'} = 'N';
+$h{'o'} = 'O';
+$h{'p'} = 'P';
+$h{'q'} = 'Q';
+$h{'r'} = 'R';
+$h{'s'} = 'S';
+$h{'t'} = 'T';
+$h{'u'} = 'U';
+$h{'v'} = 'V';
+$h{'w'} = 'W';
+$h{'x'} = 'X';
+$h{'y'} = 'Y';
+$h{'z'} = 'Z';
+
+$h{'goner3'} = 'snork';
+
+delete $h{'goner1'};
+$X->DELETE('goner3');
+
+my @keys = keys(%h);
+my @values = values(%h);
+
+ok(25, $#keys == 29 && $#values == 29) ;
+
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ if ($key eq $keys[$i] && $value eq $values[$i] && $key eq lc($value)) {
+ $key =~ y/a-z/A-Z/;
+ $i++ if $key eq $value;
+ }
+}
+
+ok(26, $i == 30) ;
+
+@keys = ('blurfl', keys(%h), 'dyick');
+ok(27, $#keys == 31) ;
+
+#Check that the keys can be retrieved in order
+my @b = keys %h ;
+my @c = sort lexical @b ;
+ok(28, ArrayCompare(\@b, \@c)) ;
+
+$h{'foo'} = '';
+ok(29, $h{'foo'} eq '' ) ;
+
+# Berkeley DB from version 2.4.10 to 3.0 does not allow null keys.
+# This feature was reenabled in version 3.1 of Berkeley DB.
+my $result = 0 ;
+if ($null_keys_allowed) {
+ $h{''} = 'bar';
+ $result = ( $h{''} eq 'bar' );
+}
+else
+ { $result = 1 }
+ok(30, $result) ;
+
+# check cache overflow and numeric keys and contents
+my $ok = 1;
+for ($i = 1; $i < 200; $i++) { $h{$i + 0} = $i + 0; }
+for ($i = 1; $i < 200; $i++) { $ok = 0 unless $h{$i} == $i; }
+ok(31, $ok);
+
+($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
+ $blksize,$blocks) = stat($Dfile);
+ok(32, $size > 0 );
+
+@h{0..200} = 200..400;
+my @foo = @h{0..200};
+ok(33, join(':',200..400) eq join(':',@foo) );
+
+# Now check all the non-tie specific stuff
+
+
+# Check R_NOOVERWRITE flag will make put fail when attempting to overwrite
+# an existing record.
+
+my $status = $X->put( 'x', 'newvalue', R_NOOVERWRITE) ;
+ok(34, $status == 1 );
+
+# check that the value of the key 'x' has not been changed by the
+# previous test
+ok(35, $h{'x'} eq 'X' );
+
+# standard put
+$status = $X->put('key', 'value') ;
+ok(36, $status == 0 );
+
+#check that previous put can be retrieved
+$value = 0 ;
+$status = $X->get('key', $value) ;
+ok(37, $status == 0 );
+ok(38, $value eq 'value' );
+
+# Attempting to delete an existing key should work
+
+$status = $X->del('q') ;
+ok(39, $status == 0 );
+if ($null_keys_allowed) {
+ $status = $X->del('') ;
+} else {
+ $status = 0 ;
+}
+ok(40, $status == 0 );
+
+# Make sure that the key deleted, cannot be retrieved
+ok(41, ! defined $h{'q'}) ;
+ok(42, ! defined $h{''}) ;
+
+undef $X ;
+untie %h ;
+
+ok(43, $X = tie(%h, 'DB_File',$Dfile, O_RDWR, 0640, $DB_BTREE ));
+
+# Attempting to delete a non-existant key should fail
+
+$status = $X->del('joe') ;
+ok(44, $status == 1 );
+
+# Check the get interface
+
+# First a non-existing key
+$status = $X->get('aaaa', $value) ;
+ok(45, $status == 1 );
+
+# Next an existing key
+$status = $X->get('a', $value) ;
+ok(46, $status == 0 );
+ok(47, $value eq 'A' );
+
+# seq
+# ###
+
+# use seq to find an approximate match
+$key = 'ke' ;
+$value = '' ;
+$status = $X->seq($key, $value, R_CURSOR) ;
+ok(48, $status == 0 );
+ok(49, $key eq 'key' );
+ok(50, $value eq 'value' );
+
+# seq when the key does not match
+$key = 'zzz' ;
+$value = '' ;
+$status = $X->seq($key, $value, R_CURSOR) ;
+ok(51, $status == 1 );
+
+
+# use seq to set the cursor, then delete the record @ the cursor.
+
+$key = 'x' ;
+$value = '' ;
+$status = $X->seq($key, $value, R_CURSOR) ;
+ok(52, $status == 0 );
+ok(53, $key eq 'x' );
+ok(54, $value eq 'X' );
+$status = $X->del(0, R_CURSOR) ;
+ok(55, $status == 0 );
+$status = $X->get('x', $value) ;
+ok(56, $status == 1 );
+
+# ditto, but use put to replace the key/value pair.
+$key = 'y' ;
+$value = '' ;
+$status = $X->seq($key, $value, R_CURSOR) ;
+ok(57, $status == 0 );
+ok(58, $key eq 'y' );
+ok(59, $value eq 'Y' );
+
+$key = "replace key" ;
+$value = "replace value" ;
+$status = $X->put($key, $value, R_CURSOR) ;
+ok(60, $status == 0 );
+ok(61, $key eq 'replace key' );
+ok(62, $value eq 'replace value' );
+$status = $X->get('y', $value) ;
+ok(63, 1) ; # hard-wire to always pass. the previous test ($status == 1)
+ # only worked because of a bug in 1.85/6
+
+# use seq to walk forwards through a file
+
+$status = $X->seq($key, $value, R_FIRST) ;
+ok(64, $status == 0 );
+my $previous = $key ;
+
+$ok = 1 ;
+while (($status = $X->seq($key, $value, R_NEXT)) == 0)
+{
+ ($ok = 0), last if ($previous cmp $key) == 1 ;
+}
+
+ok(65, $status == 1 );
+ok(66, $ok == 1 );
+
+# use seq to walk backwards through a file
+$status = $X->seq($key, $value, R_LAST) ;
+ok(67, $status == 0 );
+$previous = $key ;
+
+$ok = 1 ;
+while (($status = $X->seq($key, $value, R_PREV)) == 0)
+{
+ ($ok = 0), last if ($previous cmp $key) == -1 ;
+ #print "key = [$key] value = [$value]\n" ;
+}
+
+ok(68, $status == 1 );
+ok(69, $ok == 1 );
+
+
+# check seq FIRST/LAST
+
+# sync
+# ####
+
+$status = $X->sync ;
+ok(70, $status == 0 );
+
+
+# fd
+# ##
+
+$status = $X->fd ;
+ok(71, $status != 0 );
+
+
+undef $X ;
+untie %h ;
+
+unlink $Dfile;
+
+# Now try an in memory file
+my $Y;
+ok(72, $Y = tie(%h, 'DB_File',undef, O_RDWR|O_CREAT, 0640, $DB_BTREE ));
+
+# fd with an in memory file should return failure
+$status = $Y->fd ;
+ok(73, $status == -1 );
+
+
+undef $Y ;
+untie %h ;
+
+# Duplicate keys
+my $bt = new DB_File::BTREEINFO ;
+$bt->{flags} = R_DUP ;
+my ($YY, %hh);
+ok(74, $YY = tie(%hh, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $bt )) ;
+
+$hh{'Wall'} = 'Larry' ;
+$hh{'Wall'} = 'Stone' ; # Note the duplicate key
+$hh{'Wall'} = 'Brick' ; # Note the duplicate key
+$hh{'Wall'} = 'Brick' ; # Note the duplicate key and value
+$hh{'Smith'} = 'John' ;
+$hh{'mouse'} = 'mickey' ;
+
+# first work in scalar context
+ok(75, scalar $YY->get_dup('Unknown') == 0 );
+ok(76, scalar $YY->get_dup('Smith') == 1 );
+ok(77, scalar $YY->get_dup('Wall') == 4 );
+
+# now in list context
+my @unknown = $YY->get_dup('Unknown') ;
+ok(78, "@unknown" eq "" );
+
+my @smith = $YY->get_dup('Smith') ;
+ok(79, "@smith" eq "John" );
+
+{
+my @wall = $YY->get_dup('Wall') ;
+my %wall ;
+@wall{@wall} = @wall ;
+ok(80, (@wall == 4 && $wall{'Larry'} && $wall{'Stone'} && $wall{'Brick'}) );
+}
+
+# hash
+my %unknown = $YY->get_dup('Unknown', 1) ;
+ok(81, keys %unknown == 0 );
+
+my %smith = $YY->get_dup('Smith', 1) ;
+ok(82, keys %smith == 1 && $smith{'John'}) ;
+
+my %wall = $YY->get_dup('Wall', 1) ;
+ok(83, keys %wall == 3 && $wall{'Larry'} == 1 && $wall{'Stone'} == 1
+ && $wall{'Brick'} == 2);
+
+undef $YY ;
+untie %hh ;
+unlink $Dfile;
+
+
+# test multiple callbacks
+my $Dfile1 = "btree1" ;
+my $Dfile2 = "btree2" ;
+my $Dfile3 = "btree3" ;
+
+my $dbh1 = new DB_File::BTREEINFO ;
+$dbh1->{compare} = sub {
+ no warnings 'numeric' ;
+ $_[0] <=> $_[1] } ;
+
+my $dbh2 = new DB_File::BTREEINFO ;
+$dbh2->{compare} = sub { $_[0] cmp $_[1] } ;
+
+my $dbh3 = new DB_File::BTREEINFO ;
+$dbh3->{compare} = sub { length $_[0] <=> length $_[1] } ;
+
+
+my (%g, %k);
+tie(%h, 'DB_File',$Dfile1, O_RDWR|O_CREAT, 0640, $dbh1 ) or die $!;
+tie(%g, 'DB_File',$Dfile2, O_RDWR|O_CREAT, 0640, $dbh2 ) or die $!;
+tie(%k, 'DB_File',$Dfile3, O_RDWR|O_CREAT, 0640, $dbh3 ) or die $!;
+
+my @Keys = qw( 0123 12 -1234 9 987654321 def ) ;
+my (@srt_1, @srt_2, @srt_3);
+{
+ no warnings 'numeric' ;
+ @srt_1 = sort { $a <=> $b } @Keys ;
+}
+@srt_2 = sort { $a cmp $b } @Keys ;
+@srt_3 = sort { length $a <=> length $b } @Keys ;
+
+foreach (@Keys) {
+ $h{$_} = 1 ;
+ $g{$_} = 1 ;
+ $k{$_} = 1 ;
+}
+
+sub ArrayCompare
+{
+ my($a, $b) = @_ ;
+
+ return 0 if @$a != @$b ;
+
+ foreach (1 .. length @$a)
+ {
+ return 0 unless $$a[$_] eq $$b[$_] ;
+ }
+
+ 1 ;
+}
+
+ok(84, ArrayCompare (\@srt_1, [keys %h]) );
+ok(85, ArrayCompare (\@srt_2, [keys %g]) );
+ok(86, ArrayCompare (\@srt_3, [keys %k]) );
+
+untie %h ;
+untie %g ;
+untie %k ;
+unlink $Dfile1, $Dfile2, $Dfile3 ;
+
+# clear
+# #####
+
+ok(87, tie(%h, 'DB_File', $Dfile1, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+foreach (1 .. 10)
+ { $h{$_} = $_ * 100 }
+
+# check that there are 10 elements in the hash
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(88, $i == 10);
+
+# now clear the hash
+%h = () ;
+
+# check it is empty
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(89, $i == 0);
+
+untie %h ;
+unlink $Dfile1 ;
+
+{
+ # check that attempting to tie an array to a DB_BTREE will fail
+
+ my $filename = "xyz" ;
+ my @x ;
+ eval { tie @x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE ; } ;
+ ok(90, $@ =~ /^DB_File can only tie an associative array to a DB_BTREE database/) ;
+ unlink $filename ;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use warnings ;
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use warnings ;
+ use strict ;
+ our (@ISA, @EXPORT);
+
+ require Exporter ;
+ use DB_File;
+ @ISA=qw(DB_File);
+ @EXPORT = @DB_File::EXPORT ;
+
+ sub STORE {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::STORE($key, $value * 2) ;
+ }
+
+ sub FETCH {
+ my $self = shift ;
+ my $key = shift ;
+ $self->SUPER::FETCH($key) - 1 ;
+ }
+
+ sub put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::put($key, $value * 3) ;
+ }
+
+ sub get {
+ my $self = shift ;
+ $self->SUPER::get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok(91, $@ eq "") ;
+ my %h ;
+ my $X ;
+ eval '
+ $X = tie(%h, "SubDB","dbbtree.tmp", O_RDWR|O_CREAT, 0640, $DB_BTREE );
+ ' ;
+
+ main::ok(92, $@ eq "") ;
+
+ my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
+ main::ok(93, $@ eq "") ;
+ main::ok(94, $ret == 5) ;
+
+ my $value = 0;
+ $ret = eval '$X->put("joe", 4) ; $X->get("joe", $value) ; return $value' ;
+ main::ok(95, $@ eq "") ;
+ main::ok(96, $ret == 10) ;
+
+ $ret = eval ' R_NEXT eq main::R_NEXT ' ;
+ main::ok(97, $@ eq "" ) ;
+ main::ok(98, $ret == 1) ;
+
+ $ret = eval '$X->A_new_method("joe") ' ;
+ main::ok(99, $@ eq "") ;
+ main::ok(100, $ret eq "[[11]]") ;
+
+ undef $X;
+ untie(%h);
+ unlink "SubDB.pm", "dbbtree.tmp" ;
+
+}
+
+{
+ # DBM Filter tests
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ unlink $Dfile;
+
+ sub checkOutput
+ {
+ my($fk, $sk, $fv, $sv) = @_ ;
+ return
+ $fetch_key eq $fk && $store_key eq $sk &&
+ $fetch_value eq $fv && $store_value eq $sv &&
+ $_ eq 'original' ;
+ }
+
+ ok(101, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+
+ $db->filter_fetch_key (sub { $fetch_key = $_ }) ;
+ $db->filter_store_key (sub { $store_key = $_ }) ;
+ $db->filter_fetch_value (sub { $fetch_value = $_}) ;
+ $db->filter_store_value (sub { $store_value = $_ }) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ # fk sk fv sv
+ ok(102, checkOutput( "", "fred", "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(103, $h{"fred"} eq "joe");
+ # fk sk fv sv
+ ok(104, checkOutput( "", "fred", "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(105, $db->FIRSTKEY() eq "fred") ;
+ # fk sk fv sv
+ ok(106, checkOutput( "fred", "", "", "")) ;
+
+ # replace the filters, but remember the previous set
+ my ($old_fk) = $db->filter_fetch_key
+ (sub { $_ = uc $_ ; $fetch_key = $_ }) ;
+ my ($old_sk) = $db->filter_store_key
+ (sub { $_ = lc $_ ; $store_key = $_ }) ;
+ my ($old_fv) = $db->filter_fetch_value
+ (sub { $_ = "[$_]"; $fetch_value = $_ }) ;
+ my ($old_sv) = $db->filter_store_value
+ (sub { s/o/x/g; $store_value = $_ }) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"Fred"} = "Joe" ;
+ # fk sk fv sv
+ ok(107, checkOutput( "", "fred", "", "Jxe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(108, $h{"Fred"} eq "[Jxe]");
+ # fk sk fv sv
+ ok(109, checkOutput( "", "fred", "[Jxe]", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(110, $db->FIRSTKEY() eq "FRED") ;
+ # fk sk fv sv
+ ok(111, checkOutput( "FRED", "", "", "")) ;
+
+ # put the original filters back
+ $db->filter_fetch_key ($old_fk);
+ $db->filter_store_key ($old_sk);
+ $db->filter_fetch_value ($old_fv);
+ $db->filter_store_value ($old_sv);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok(112, checkOutput( "", "fred", "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(113, $h{"fred"} eq "joe");
+ ok(114, checkOutput( "", "fred", "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(115, $db->FIRSTKEY() eq "fred") ;
+ ok(116, checkOutput( "fred", "", "", "")) ;
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok(117, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(118, $h{"fred"} eq "joe");
+ ok(119, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(120, $db->FIRSTKEY() eq "fred") ;
+ ok(121, checkOutput( "", "", "", "")) ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter with a closure
+
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+
+ unlink $Dfile;
+ ok(122, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+
+ my %result = () ;
+
+ sub Closure
+ {
+ my ($name) = @_ ;
+ my $count = 0 ;
+ my @kept = () ;
+
+ return sub { ++$count ;
+ push @kept, $_ ;
+ $result{$name} = "$name - $count: [@kept]" ;
+ }
+ }
+
+ $db->filter_store_key(Closure("store key")) ;
+ $db->filter_store_value(Closure("store value")) ;
+ $db->filter_fetch_key(Closure("fetch key")) ;
+ $db->filter_fetch_value(Closure("fetch value")) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ ok(123, $result{"store key"} eq "store key - 1: [fred]");
+ ok(124, $result{"store value"} eq "store value - 1: [joe]");
+ ok(125, ! defined $result{"fetch key"} );
+ ok(126, ! defined $result{"fetch value"} );
+ ok(127, $_ eq "original") ;
+
+ ok(128, $db->FIRSTKEY() eq "fred") ;
+ ok(129, $result{"store key"} eq "store key - 1: [fred]");
+ ok(130, $result{"store value"} eq "store value - 1: [joe]");
+ ok(131, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(132, ! defined $result{"fetch value"} );
+ ok(133, $_ eq "original") ;
+
+ $h{"jim"} = "john" ;
+ ok(134, $result{"store key"} eq "store key - 2: [fred jim]");
+ ok(135, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(136, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(137, ! defined $result{"fetch value"} );
+ ok(138, $_ eq "original") ;
+
+ ok(139, $h{"fred"} eq "joe");
+ ok(140, $result{"store key"} eq "store key - 3: [fred jim fred]");
+ ok(141, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(142, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(143, $result{"fetch value"} eq "fetch value - 1: [joe]");
+ ok(144, $_ eq "original") ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter recursion detection
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ unlink $Dfile;
+
+ ok(145, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+
+ $db->filter_store_key (sub { $_ = $h{$_} }) ;
+
+ eval '$h{1} = 1234' ;
+ ok(146, $@ =~ /^recursion detected in filter_store_key at/ );
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+
+{
+ # Examples from the POD
+
+
+ my $file = "xyzt" ;
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 1
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ my %h ;
+
+ sub Compare
+ {
+ my ($key1, $key2) = @_ ;
+ "\L$key1" cmp "\L$key2" ;
+ }
+
+ # specify the Perl sub that will do the comparison
+ $DB_BTREE->{'compare'} = \&Compare ;
+
+ unlink "tree" ;
+ tie %h, "DB_File", "tree", O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open file 'tree': $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+
+ unlink "tree" ;
+ }
+
+ delete $DB_BTREE->{'compare'} ;
+
+ ok(147, docat_del($file) eq <<'EOM') ;
+mouse
+Smith
+Wall
+EOM
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 2
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ my ($filename, %h);
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key and value
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+
+ # iterate through the associative array
+ # and print each key/value pair.
+ foreach (keys %h)
+ { print "$_ -> $h{$_}\n" }
+
+ untie %h ;
+
+ unlink $filename ;
+ }
+
+ ok(148, docat_del($file) eq ($db185mode ? <<'EOM' : <<'EOM') ) ;
+Smith -> John
+Wall -> Brick
+Wall -> Brick
+Wall -> Brick
+mouse -> mickey
+EOM
+Smith -> John
+Wall -> Larry
+Wall -> Larry
+Wall -> Larry
+mouse -> mickey
+EOM
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 3
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ my ($filename, $x, %h, $status, $key, $value);
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key and value
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+
+ # iterate through the btree using seq
+ # and print each key/value pair.
+ $key = $value = 0 ;
+ for ($status = $x->seq($key, $value, R_FIRST) ;
+ $status == 0 ;
+ $status = $x->seq($key, $value, R_NEXT) )
+ { print "$key -> $value\n" }
+
+
+ undef $x ;
+ untie %h ;
+ }
+
+ ok(149, docat_del($file) eq ($db185mode == 1 ? <<'EOM' : <<'EOM') ) ;
+Smith -> John
+Wall -> Brick
+Wall -> Brick
+Wall -> Larry
+mouse -> mickey
+EOM
+Smith -> John
+Wall -> Larry
+Wall -> Brick
+Wall -> Brick
+mouse -> mickey
+EOM
+
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 4
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ my ($filename, $x, %h);
+
+ $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ my $cnt = $x->get_dup("Wall") ;
+ print "Wall occurred $cnt times\n" ;
+
+ my %hash = $x->get_dup("Wall", 1) ;
+ print "Larry is there\n" if $hash{'Larry'} ;
+ print "There are $hash{'Brick'} Brick Walls\n" ;
+
+ my @list = sort $x->get_dup("Wall") ;
+ print "Wall => [@list]\n" ;
+
+ @list = $x->get_dup("Smith") ;
+ print "Smith => [@list]\n" ;
+
+ @list = $x->get_dup("Dog") ;
+ print "Dog => [@list]\n" ;
+
+ undef $x ;
+ untie %h ;
+ }
+
+ ok(150, docat_del($file) eq <<'EOM') ;
+Wall occurred 3 times
+Larry is there
+There are 2 Brick Walls
+Wall => [Brick Brick Larry]
+Smith => [John]
+Dog => []
+EOM
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 5
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ my ($filename, $x, %h, $found);
+
+ $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ $found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ;
+ print "Larry Wall is $found there\n" ;
+
+ $found = ( $x->find_dup("Wall", "Harry") == 0 ? "" : "not") ;
+ print "Harry Wall is $found there\n" ;
+
+ undef $x ;
+ untie %h ;
+ }
+
+ ok(151, docat_del($file) eq <<'EOM') ;
+Larry Wall is there
+Harry Wall is not there
+EOM
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 6
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ my ($filename, $x, %h, $found);
+
+ $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ $x->del_dup("Wall", "Larry") ;
+
+ $found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ;
+ print "Larry Wall is $found there\n" ;
+
+ undef $x ;
+ untie %h ;
+
+ unlink $filename ;
+ }
+
+ ok(152, docat_del($file) eq <<'EOM') ;
+Larry Wall is not there
+EOM
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 7
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+ use Fcntl ;
+
+ my ($filename, $x, %h, $st, $key, $value);
+
+ sub match
+ {
+ my $key = shift ;
+ my $value = 0;
+ my $orig_key = $key ;
+ $x->seq($key, $value, R_CURSOR) ;
+ print "$orig_key\t-> $key\t-> $value\n" ;
+ }
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'mouse'} = 'mickey' ;
+ $h{'Wall'} = 'Larry' ;
+ $h{'Walls'} = 'Brick' ;
+ $h{'Smith'} = 'John' ;
+
+
+ $key = $value = 0 ;
+ print "IN ORDER\n" ;
+ for ($st = $x->seq($key, $value, R_FIRST) ;
+ $st == 0 ;
+ $st = $x->seq($key, $value, R_NEXT) )
+
+ { print "$key -> $value\n" }
+
+ print "\nPARTIAL MATCH\n" ;
+
+ match "Wa" ;
+ match "A" ;
+ match "a" ;
+
+ undef $x ;
+ untie %h ;
+
+ unlink $filename ;
+
+ }
+
+ ok(153, docat_del($file) eq <<'EOM') ;
+IN ORDER
+Smith -> John
+Wall -> Larry
+Walls -> Brick
+mouse -> mickey
+
+PARTIAL MATCH
+Wa -> Wall -> Larry
+A -> Smith -> John
+a -> mouse -> mickey
+EOM
+
+}
+
+#{
+# # R_SETCURSOR
+# use strict ;
+# my (%h, $db) ;
+# unlink $Dfile;
+#
+# ok(156, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+#
+# $h{abc} = 33 ;
+# my $k = "newest" ;
+# my $v = 44 ;
+# my $status = $db->put($k, $v, R_SETCURSOR) ;
+# print "status = [$status]\n" ;
+# ok(157, $status == 0) ;
+# $status = $db->del($k, R_CURSOR) ;
+# print "status = [$status]\n" ;
+# ok(158, $status == 0) ;
+# $k = "newest" ;
+# ok(159, $db->get($k, $v, R_CURSOR)) ;
+#
+# ok(160, keys %h == 1) ;
+#
+# undef $db ;
+# untie %h;
+# unlink $Dfile;
+#}
+
+{
+ # Bug ID 20001013.009
+ #
+ # test that $hash{KEY} = undef doesn't produce the warning
+ # Use of uninitialized value in null operation
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my %h ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ tie %h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_BTREE
+ or die "Can't open file: $!\n" ;
+ $h{ABC} = undef;
+ ok(154, $a eq "") ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+{
+ # test that %hash = () doesn't produce the warning
+ # Argument "" isn't numeric in entersub
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my %h ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ tie %h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_BTREE
+ or die "Can't open file: $!\n" ;
+ %h = (); ;
+ ok(155, $a eq "") ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+{
+ # When iterating over a tied hash using "each", the key passed to FETCH
+ # will be recycled and passed to NEXTKEY. If a Source Filter modifies the
+ # key in FETCH via a filter_fetch_key method we need to check that the
+ # modified key doesn't get passed to NEXTKEY.
+ # Also Test "keys" & "values" while we are at it.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my $bad_key = 0 ;
+ my %h = () ;
+ my $db ;
+ ok(156, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+ $db->filter_fetch_key (sub { $_ =~ s/^Beta_/Alpha_/ if defined $_}) ;
+ $db->filter_store_key (sub { $bad_key = 1 if /^Beta_/ ; $_ =~ s/^Alpha_/Beta_/}) ;
+
+ $h{'Alpha_ABC'} = 2 ;
+ $h{'Alpha_DEF'} = 5 ;
+
+ ok(157, $h{'Alpha_ABC'} == 2);
+ ok(158, $h{'Alpha_DEF'} == 5);
+
+ my ($k, $v) = ("","");
+ while (($k, $v) = each %h) {}
+ ok(159, $bad_key == 0);
+
+ $bad_key = 0 ;
+ foreach $k (keys %h) {}
+ ok(160, $bad_key == 0);
+
+ $bad_key = 0 ;
+ foreach $v (values %h) {}
+ ok(161, $bad_key == 0);
+
+ undef $db ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+{
+ # now an error to pass 'compare' a non-code reference
+ my $dbh = new DB_File::BTREEINFO ;
+
+ eval { $dbh->{compare} = 2 };
+ ok(162, $@ =~ /^Key 'compare' not associated with a code reference at/);
+
+ eval { $dbh->{prefix} = 2 };
+ ok(163, $@ =~ /^Key 'prefix' not associated with a code reference at/);
+
+}
+
+
+{
+ # recursion detection in btree
+ my %hash ;
+ unlink $Dfile;
+ my $dbh = new DB_File::BTREEINFO ;
+ $dbh->{compare} = sub { $hash{3} = 4 ; length $_[0] } ;
+
+
+ my (%h);
+ ok(164, tie(%hash, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $dbh ) );
+
+ eval { $hash{1} = 2;
+ $hash{4} = 5;
+ };
+
+ ok(165, $@ =~ /^DB_File btree_compare: recursion detected/);
+ {
+ no warnings;
+ untie %hash;
+ }
+ unlink $Dfile;
+}
+
+{
+ # Check that two callbacks don't interact
+ my %hash1 ;
+ my %hash2 ;
+ my $h1_count = 0;
+ my $h2_count = 0;
+ unlink $Dfile, $Dfile2;
+ my $dbh1 = new DB_File::BTREEINFO ;
+ $dbh1->{compare} = sub { ++ $h1_count ; $_[0] cmp $_[1] } ;
+
+ my $dbh2 = new DB_File::BTREEINFO ;
+ $dbh2->{compare} = sub { ;++ $h2_count ; $_[0] cmp $_[1] } ;
+
+
+
+ my (%h);
+ ok(166, tie(%hash1, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $dbh1 ) );
+ ok(167, tie(%hash2, 'DB_File',$Dfile2, O_RDWR|O_CREAT, 0640, $dbh2 ) );
+
+ $hash1{DEFG} = 5;
+ $hash1{XYZ} = 2;
+ $hash1{ABCDE} = 5;
+
+ $hash2{defg} = 5;
+ $hash2{xyz} = 2;
+ $hash2{abcde} = 5;
+
+ ok(168, $h1_count > 0);
+ ok(169, $h1_count == $h2_count);
+
+ ok(170, safeUntie \%hash1);
+ ok(171, safeUntie \%hash2);
+ unlink $Dfile, $Dfile2;
+}
+
+{
+ # Check that DBM Filter can cope with read-only $_
+
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ unlink $Dfile;
+
+ ok(172, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+
+ $db->filter_fetch_key (sub { }) ;
+ $db->filter_store_key (sub { }) ;
+ $db->filter_fetch_value (sub { }) ;
+ $db->filter_store_value (sub { }) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ ok(173, $h{"fred"} eq "joe");
+
+ eval { grep { $h{$_} } (1, 2, 3) };
+ ok (174, ! $@);
+
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ $h{"fred"} = "joe" ;
+
+ ok(175, $h{"fred"} eq "joe");
+
+ ok(176, $db->FIRSTKEY() eq "fred") ;
+
+ eval { grep { $h{$_} } (1, 2, 3) };
+ ok (177, ! $@);
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+exit ;
diff --git a/libdb/perl/DB_File/t/db-hash.t b/libdb/perl/DB_File/t/db-hash.t
new file mode 100644
index 0000000..10623cc
--- /dev/null
+++ b/libdb/perl/DB_File/t/db-hash.t
@@ -0,0 +1,981 @@
+#!./perl
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use warnings;
+use strict;
+use Config;
+
+BEGIN {
+ if(-d "lib" && -f "TEST") {
+ if ($Config{'extensions'} !~ /\bDB_File\b/ ) {
+ print "1..0 # Skip: DB_File was not built\n";
+ exit 0;
+ }
+ }
+}
+
+use DB_File;
+use Fcntl;
+
+print "1..143\n";
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat_del
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my $result = <CAT>;
+ close(CAT);
+ $result = normalise($result) ;
+ unlink $file ;
+ return $result;
+}
+
+sub normalise
+{
+ my $data = shift ;
+ $data =~ s#\r\n#\n#g
+ if $^O eq 'cygwin' ;
+ return $data ;
+}
+
+sub safeUntie
+{
+ my $hashref = shift ;
+ my $no_inner = 1;
+ local $SIG{__WARN__} = sub {-- $no_inner } ;
+ untie %$hashref;
+ return $no_inner;
+}
+
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $null_keys_allowed = ($DB_File::db_ver < 2.004010
+ || $DB_File::db_ver >= 3.1 );
+
+unlink $Dfile;
+
+umask(0);
+
+# Check the interface to HASHINFO
+
+my $dbh = new DB_File::HASHINFO ;
+
+ok(1, ! defined $dbh->{bsize}) ;
+ok(2, ! defined $dbh->{ffactor}) ;
+ok(3, ! defined $dbh->{nelem}) ;
+ok(4, ! defined $dbh->{cachesize}) ;
+ok(5, ! defined $dbh->{hash}) ;
+ok(6, ! defined $dbh->{lorder}) ;
+
+$dbh->{bsize} = 3000 ;
+ok(7, $dbh->{bsize} == 3000 );
+
+$dbh->{ffactor} = 9000 ;
+ok(8, $dbh->{ffactor} == 9000 );
+
+$dbh->{nelem} = 400 ;
+ok(9, $dbh->{nelem} == 400 );
+
+$dbh->{cachesize} = 65 ;
+ok(10, $dbh->{cachesize} == 65 );
+
+my $some_sub = sub {} ;
+$dbh->{hash} = $some_sub;
+ok(11, $dbh->{hash} eq $some_sub );
+
+$dbh->{lorder} = 1234 ;
+ok(12, $dbh->{lorder} == 1234 );
+
+# Check that an invalid entry is caught both for store & fetch
+eval '$dbh->{fred} = 1234' ;
+ok(13, $@ =~ /^DB_File::HASHINFO::STORE - Unknown element 'fred' at/ );
+eval 'my $q = $dbh->{fred}' ;
+ok(14, $@ =~ /^DB_File::HASHINFO::FETCH - Unknown element 'fred' at/ );
+
+
+# Now check the interface to HASH
+my ($X, %h);
+ok(15, $X = tie(%h, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+die "Could not tie: $!" unless $X;
+
+my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
+ $blksize,$blocks) = stat($Dfile);
+
+my %noMode = map { $_, 1} qw( amigaos MSWin32 NetWare cygwin ) ;
+
+ok(16, ($mode & 0777) == (($^O eq 'os2' || $^O eq 'MacOS') ? 0666 : 0640) ||
+ $noMode{$^O} );
+
+my ($key, $value, $i);
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(17, !$i );
+
+$h{'goner1'} = 'snork';
+
+$h{'abc'} = 'ABC';
+ok(18, $h{'abc'} eq 'ABC' );
+ok(19, !defined $h{'jimmy'} );
+ok(20, !exists $h{'jimmy'} );
+ok(21, exists $h{'abc'} );
+
+$h{'def'} = 'DEF';
+$h{'jkl','mno'} = "JKL\034MNO";
+$h{'a',2,3,4,5} = join("\034",'A',2,3,4,5);
+$h{'a'} = 'A';
+
+#$h{'b'} = 'B';
+$X->STORE('b', 'B') ;
+
+$h{'c'} = 'C';
+
+#$h{'d'} = 'D';
+$X->put('d', 'D') ;
+
+$h{'e'} = 'E';
+$h{'f'} = 'F';
+$h{'g'} = 'X';
+$h{'h'} = 'H';
+$h{'i'} = 'I';
+
+$h{'goner2'} = 'snork';
+delete $h{'goner2'};
+
+
+# IMPORTANT - $X must be undefined before the untie otherwise the
+# underlying DB close routine will not get called.
+undef $X ;
+untie(%h);
+
+
+# tie to the same file again, do not supply a type - should default to HASH
+ok(22, $X = tie(%h,'DB_File',$Dfile, O_RDWR, 0640) );
+
+# Modify an entry from the previous tie
+$h{'g'} = 'G';
+
+$h{'j'} = 'J';
+$h{'k'} = 'K';
+$h{'l'} = 'L';
+$h{'m'} = 'M';
+$h{'n'} = 'N';
+$h{'o'} = 'O';
+$h{'p'} = 'P';
+$h{'q'} = 'Q';
+$h{'r'} = 'R';
+$h{'s'} = 'S';
+$h{'t'} = 'T';
+$h{'u'} = 'U';
+$h{'v'} = 'V';
+$h{'w'} = 'W';
+$h{'x'} = 'X';
+$h{'y'} = 'Y';
+$h{'z'} = 'Z';
+
+$h{'goner3'} = 'snork';
+
+delete $h{'goner1'};
+$X->DELETE('goner3');
+
+my @keys = keys(%h);
+my @values = values(%h);
+
+ok(23, $#keys == 29 && $#values == 29) ;
+
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ if ($key eq $keys[$i] && $value eq $values[$i] && $key eq lc($value)) {
+ $key =~ y/a-z/A-Z/;
+ $i++ if $key eq $value;
+ }
+}
+
+ok(24, $i == 30) ;
+
+@keys = ('blurfl', keys(%h), 'dyick');
+ok(25, $#keys == 31) ;
+
+$h{'foo'} = '';
+ok(26, $h{'foo'} eq '' );
+
+# Berkeley DB from version 2.4.10 to 3.0 does not allow null keys.
+# This feature was reenabled in version 3.1 of Berkeley DB.
+my $result = 0 ;
+if ($null_keys_allowed) {
+ $h{''} = 'bar';
+ $result = ( $h{''} eq 'bar' );
+}
+else
+ { $result = 1 }
+ok(27, $result) ;
+
+# check cache overflow and numeric keys and contents
+my $ok = 1;
+for ($i = 1; $i < 200; $i++) { $h{$i + 0} = $i + 0; }
+for ($i = 1; $i < 200; $i++) { $ok = 0 unless $h{$i} == $i; }
+ok(28, $ok );
+
+($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
+ $blksize,$blocks) = stat($Dfile);
+ok(29, $size > 0 );
+
+@h{0..200} = 200..400;
+my @foo = @h{0..200};
+ok(30, join(':',200..400) eq join(':',@foo) );
+
+
+# Now check all the non-tie specific stuff
+
+# Check NOOVERWRITE will make put fail when attempting to overwrite
+# an existing record.
+
+my $status = $X->put( 'x', 'newvalue', R_NOOVERWRITE) ;
+ok(31, $status == 1 );
+
+# check that the value of the key 'x' has not been changed by the
+# previous test
+ok(32, $h{'x'} eq 'X' );
+
+# standard put
+$status = $X->put('key', 'value') ;
+ok(33, $status == 0 );
+
+#check that previous put can be retrieved
+$value = 0 ;
+$status = $X->get('key', $value) ;
+ok(34, $status == 0 );
+ok(35, $value eq 'value' );
+
+# Attempting to delete an existing key should work
+
+$status = $X->del('q') ;
+ok(36, $status == 0 );
+
+# Make sure that the key deleted, cannot be retrieved
+{
+ no warnings 'uninitialized' ;
+ ok(37, $h{'q'} eq undef );
+}
+
+# Attempting to delete a non-existant key should fail
+
+$status = $X->del('joe') ;
+ok(38, $status == 1 );
+
+# Check the get interface
+
+# First a non-existing key
+$status = $X->get('aaaa', $value) ;
+ok(39, $status == 1 );
+
+# Next an existing key
+$status = $X->get('a', $value) ;
+ok(40, $status == 0 );
+ok(41, $value eq 'A' );
+
+# seq
+# ###
+
+# ditto, but use put to replace the key/value pair.
+
+# use seq to walk backwards through a file - check that this reversed is
+
+# check seq FIRST/LAST
+
+# sync
+# ####
+
+$status = $X->sync ;
+ok(42, $status == 0 );
+
+
+# fd
+# ##
+
+$status = $X->fd ;
+ok(43, $status != 0 );
+
+undef $X ;
+untie %h ;
+
+unlink $Dfile;
+
+# clear
+# #####
+
+ok(44, tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+foreach (1 .. 10)
+ { $h{$_} = $_ * 100 }
+
+# check that there are 10 elements in the hash
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(45, $i == 10);
+
+# now clear the hash
+%h = () ;
+
+# check it is empty
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(46, $i == 0);
+
+untie %h ;
+unlink $Dfile ;
+
+
+# Now try an in memory file
+ok(47, $X = tie(%h, 'DB_File',undef, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+
+# fd with an in memory file should return fail
+$status = $X->fd ;
+ok(48, $status == -1 );
+
+undef $X ;
+untie %h ;
+
+{
+ # check ability to override the default hashing
+ my %x ;
+ my $filename = "xyz" ;
+ my $hi = new DB_File::HASHINFO ;
+ $::count = 0 ;
+ $hi->{hash} = sub { ++$::count ; length $_[0] } ;
+ ok(49, tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $hi ) ;
+ $h{"abc"} = 123 ;
+ ok(50, $h{"abc"} == 123) ;
+ untie %x ;
+ unlink $filename ;
+ ok(51, $::count >0) ;
+}
+
+{
+ # check that attempting to tie an array to a DB_HASH will fail
+
+ my $filename = "xyz" ;
+ my @x ;
+ eval { tie @x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $DB_HASH ; } ;
+ ok(52, $@ =~ /^DB_File can only tie an associative array to a DB_HASH database/) ;
+ unlink $filename ;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use warnings ;
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use warnings ;
+ use strict ;
+ our (@ISA, @EXPORT);
+
+ require Exporter ;
+ use DB_File;
+ @ISA=qw(DB_File);
+ @EXPORT = @DB_File::EXPORT ;
+
+ sub STORE {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::STORE($key, $value * 2) ;
+ }
+
+ sub FETCH {
+ my $self = shift ;
+ my $key = shift ;
+ $self->SUPER::FETCH($key) - 1 ;
+ }
+
+ sub put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::put($key, $value * 3) ;
+ }
+
+ sub get {
+ my $self = shift ;
+ $self->SUPER::get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok(53, $@ eq "") ;
+ my %h ;
+ my $X ;
+ eval '
+ $X = tie(%h, "SubDB","dbhash.tmp", O_RDWR|O_CREAT, 0640, $DB_HASH );
+ ' ;
+
+ main::ok(54, $@ eq "") ;
+
+ my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
+ main::ok(55, $@ eq "") ;
+ main::ok(56, $ret == 5) ;
+
+ my $value = 0;
+ $ret = eval '$X->put("joe", 4) ; $X->get("joe", $value) ; return $value' ;
+ main::ok(57, $@ eq "") ;
+ main::ok(58, $ret == 10) ;
+
+ $ret = eval ' R_NEXT eq main::R_NEXT ' ;
+ main::ok(59, $@ eq "" ) ;
+ main::ok(60, $ret == 1) ;
+
+ $ret = eval '$X->A_new_method("joe") ' ;
+ main::ok(61, $@ eq "") ;
+ main::ok(62, $ret eq "[[11]]") ;
+
+ undef $X;
+ untie(%h);
+ unlink "SubDB.pm", "dbhash.tmp" ;
+
+}
+
+{
+ # DBM Filter tests
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ unlink $Dfile;
+
+ sub checkOutput
+ {
+ no warnings 'uninitialized';
+ my($fk, $sk, $fv, $sv) = @_ ;
+
+ print "# Fetch Key : expected '$fk' got '$fetch_key'\n"
+ if $fetch_key ne $fk ;
+ print "# Fetch Value : expected '$fv' got '$fetch_value'\n"
+ if $fetch_value ne $fv ;
+ print "# Store Key : expected '$sk' got '$store_key'\n"
+ if $store_key ne $sk ;
+ print "# Store Value : expected '$sv' got '$store_value'\n"
+ if $store_value ne $sv ;
+ print "# \$_ : expected 'original' got '$_'\n"
+ if $_ ne 'original' ;
+
+ return
+ $fetch_key eq $fk && $store_key eq $sk &&
+ $fetch_value eq $fv && $store_value eq $sv &&
+ $_ eq 'original' ;
+ }
+
+ ok(63, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+
+ $db->filter_fetch_key (sub { $fetch_key = $_ }) ;
+ $db->filter_store_key (sub { $store_key = $_ }) ;
+ $db->filter_fetch_value (sub { $fetch_value = $_}) ;
+ $db->filter_store_value (sub { $store_value = $_ }) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ # fk sk fv sv
+ ok(64, checkOutput( "", "fred", "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(65, $h{"fred"} eq "joe");
+ # fk sk fv sv
+ ok(66, checkOutput( "", "fred", "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ my ($k, $v) ;
+ $k = 'fred';
+ ok(67, ! $db->seq($k, $v, R_FIRST) ) ;
+ ok(68, $k eq "fred") ;
+ ok(69, $v eq "joe") ;
+ # fk sk fv sv
+ ok(70, checkOutput( "fred", "fred", "joe", "")) ;
+
+ # replace the filters, but remember the previous set
+ my ($old_fk) = $db->filter_fetch_key
+ (sub { $_ = uc $_ ; $fetch_key = $_ }) ;
+ my ($old_sk) = $db->filter_store_key
+ (sub { $_ = lc $_ ; $store_key = $_ }) ;
+ my ($old_fv) = $db->filter_fetch_value
+ (sub { $_ = "[$_]"; $fetch_value = $_ }) ;
+ my ($old_sv) = $db->filter_store_value
+ (sub { s/o/x/g; $store_value = $_ }) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"Fred"} = "Joe" ;
+ # fk sk fv sv
+ ok(71, checkOutput( "", "fred", "", "Jxe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(72, $h{"Fred"} eq "[Jxe]");
+ # fk sk fv sv
+ ok(73, checkOutput( "", "fred", "[Jxe]", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $k = 'Fred'; $v ='';
+ ok(74, ! $db->seq($k, $v, R_FIRST) ) ;
+ ok(75, $k eq "FRED") ;
+ ok(76, $v eq "[Jxe]") ;
+ # fk sk fv sv
+ ok(77, checkOutput( "FRED", "fred", "[Jxe]", "")) ;
+
+ # put the original filters back
+ $db->filter_fetch_key ($old_fk);
+ $db->filter_store_key ($old_sk);
+ $db->filter_fetch_value ($old_fv);
+ $db->filter_store_value ($old_sv);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok(78, checkOutput( "", "fred", "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(79, $h{"fred"} eq "joe");
+ ok(80, checkOutput( "", "fred", "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ #ok(77, $db->FIRSTKEY() eq "fred") ;
+ $k = 'fred';
+ ok(81, ! $db->seq($k, $v, R_FIRST) ) ;
+ ok(82, $k eq "fred") ;
+ ok(83, $v eq "joe") ;
+ # fk sk fv sv
+ ok(84, checkOutput( "fred", "fred", "joe", "")) ;
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok(85, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(86, $h{"fred"} eq "joe");
+ ok(87, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $k = 'fred';
+ ok(88, ! $db->seq($k, $v, R_FIRST) ) ;
+ ok(89, $k eq "fred") ;
+ ok(90, $v eq "joe") ;
+ ok(91, checkOutput( "", "", "", "")) ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter with a closure
+
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+
+ unlink $Dfile;
+ ok(92, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+
+ my %result = () ;
+
+ sub Closure
+ {
+ my ($name) = @_ ;
+ my $count = 0 ;
+ my @kept = () ;
+
+ return sub { ++$count ;
+ push @kept, $_ ;
+ $result{$name} = "$name - $count: [@kept]" ;
+ }
+ }
+
+ $db->filter_store_key(Closure("store key")) ;
+ $db->filter_store_value(Closure("store value")) ;
+ $db->filter_fetch_key(Closure("fetch key")) ;
+ $db->filter_fetch_value(Closure("fetch value")) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ ok(93, $result{"store key"} eq "store key - 1: [fred]");
+ ok(94, $result{"store value"} eq "store value - 1: [joe]");
+ ok(95, ! defined $result{"fetch key"} );
+ ok(96, ! defined $result{"fetch value"} );
+ ok(97, $_ eq "original") ;
+
+ ok(98, $db->FIRSTKEY() eq "fred") ;
+ ok(99, $result{"store key"} eq "store key - 1: [fred]");
+ ok(100, $result{"store value"} eq "store value - 1: [joe]");
+ ok(101, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(102, ! defined $result{"fetch value"} );
+ ok(103, $_ eq "original") ;
+
+ $h{"jim"} = "john" ;
+ ok(104, $result{"store key"} eq "store key - 2: [fred jim]");
+ ok(105, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(106, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(107, ! defined $result{"fetch value"} );
+ ok(108, $_ eq "original") ;
+
+ ok(109, $h{"fred"} eq "joe");
+ ok(110, $result{"store key"} eq "store key - 3: [fred jim fred]");
+ ok(111, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(112, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(113, $result{"fetch value"} eq "fetch value - 1: [joe]");
+ ok(114, $_ eq "original") ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter recursion detection
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ unlink $Dfile;
+
+ ok(115, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+
+ $db->filter_store_key (sub { $_ = $h{$_} }) ;
+
+ eval '$h{1} = 1234' ;
+ ok(116, $@ =~ /^recursion detected in filter_store_key at/ );
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+
+{
+ # Examples from the POD
+
+ my $file = "xyzt" ;
+ {
+ my $redirect = new Redirect $file ;
+
+ use warnings FATAL => qw(all);
+ use strict ;
+ use DB_File ;
+ our (%h, $k, $v);
+
+ unlink "fruit" ;
+ tie %h, "DB_File", "fruit", O_RDWR|O_CREAT, 0640, $DB_HASH
+ or die "Cannot open file 'fruit': $!\n";
+
+ # Add a few key/value pairs to the file
+ $h{"apple"} = "red" ;
+ $h{"orange"} = "orange" ;
+ $h{"banana"} = "yellow" ;
+ $h{"tomato"} = "red" ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $h{"banana"} ;
+
+ # Delete a key/value pair.
+ delete $h{"apple"} ;
+
+ # print the contents of the file
+ while (($k, $v) = each %h)
+ { print "$k -> $v\n" }
+
+ untie %h ;
+
+ unlink "fruit" ;
+ }
+
+ ok(117, docat_del($file) eq <<'EOM') ;
+Banana Exists
+
+orange -> orange
+tomato -> red
+banana -> yellow
+EOM
+
+}
+
+{
+ # Bug ID 20001013.009
+ #
+ # test that $hash{KEY} = undef doesn't produce the warning
+ # Use of uninitialized value in null operation
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my %h ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ tie %h, 'DB_File', $Dfile or die "Can't open file: $!\n" ;
+ $h{ABC} = undef;
+ ok(118, $a eq "") ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+{
+ # test that %hash = () doesn't produce the warning
+ # Argument "" isn't numeric in entersub
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my %h ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ tie %h, 'DB_File', $Dfile or die "Can't open file: $!\n" ;
+ %h = (); ;
+ ok(119, $a eq "") ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+{
+ # When iterating over a tied hash using "each", the key passed to FETCH
+ # will be recycled and passed to NEXTKEY. If a Source Filter modifies the
+ # key in FETCH via a filter_fetch_key method we need to check that the
+ # modified key doesn't get passed to NEXTKEY.
+ # Also Test "keys" & "values" while we are at it.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my $bad_key = 0 ;
+ my %h = () ;
+ my $db ;
+ ok(120, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+ $db->filter_fetch_key (sub { $_ =~ s/^Beta_/Alpha_/ if defined $_}) ;
+ $db->filter_store_key (sub { $bad_key = 1 if /^Beta_/ ; $_ =~ s/^Alpha_/Beta_/}) ;
+
+ $h{'Alpha_ABC'} = 2 ;
+ $h{'Alpha_DEF'} = 5 ;
+
+ ok(121, $h{'Alpha_ABC'} == 2);
+ ok(122, $h{'Alpha_DEF'} == 5);
+
+ my ($k, $v) = ("","");
+ while (($k, $v) = each %h) {}
+ ok(123, $bad_key == 0);
+
+ $bad_key = 0 ;
+ foreach $k (keys %h) {}
+ ok(124, $bad_key == 0);
+
+ $bad_key = 0 ;
+ foreach $v (values %h) {}
+ ok(125, $bad_key == 0);
+
+ undef $db ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+{
+ # now an error to pass 'hash' a non-code reference
+ my $dbh = new DB_File::HASHINFO ;
+
+ eval { $dbh->{hash} = 2 };
+ ok(126, $@ =~ /^Key 'hash' not associated with a code reference at/);
+
+}
+
+{
+ # recursion detection in hash
+ my %hash ;
+ unlink $Dfile;
+ my $dbh = new DB_File::HASHINFO ;
+ $dbh->{hash} = sub { $hash{3} = 4 ; length $_[0] } ;
+
+
+ my (%h);
+ ok(127, tie(%hash, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $dbh ) );
+
+ eval { $hash{1} = 2;
+ $hash{4} = 5;
+ };
+
+ ok(128, $@ =~ /^DB_File hash callback: recursion detected/);
+ {
+ no warnings;
+ untie %hash;
+ }
+ unlink $Dfile;
+}
+
+{
+ # Check that two hash's don't interact
+ my %hash1 ;
+ my %hash2 ;
+ my $h1_count = 0;
+ my $h2_count = 0;
+ unlink $Dfile, $Dfile2;
+ my $dbh1 = new DB_File::HASHINFO ;
+ $dbh1->{hash} = sub { ++ $h1_count ; length $_[0] } ;
+
+ my $dbh2 = new DB_File::HASHINFO ;
+ $dbh2->{hash} = sub { ++ $h2_count ; length $_[0] } ;
+
+
+
+ my (%h);
+ ok(129, tie(%hash1, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $dbh1 ) );
+ ok(130, tie(%hash2, 'DB_File',$Dfile2, O_RDWR|O_CREAT, 0640, $dbh2 ) );
+
+ $hash1{DEFG} = 5;
+ $hash1{XYZ} = 2;
+ $hash1{ABCDE} = 5;
+
+ $hash2{defg} = 5;
+ $hash2{xyz} = 2;
+ $hash2{abcde} = 5;
+
+ ok(131, $h1_count > 0);
+ ok(132, $h1_count == $h2_count);
+
+ ok(133, safeUntie \%hash1);
+ ok(134, safeUntie \%hash2);
+ unlink $Dfile, $Dfile2;
+}
+
+{
+ # Passing undef for flags and/or mode when calling tie could cause
+ # Use of uninitialized value in subroutine entry
+
+
+ my $warn_count = 0 ;
+ #local $SIG{__WARN__} = sub { ++ $warn_count };
+ my %hash1;
+ unlink $Dfile;
+
+ tie %hash1, 'DB_File',$Dfile, undef;
+ ok(135, $warn_count == 0);
+ $warn_count = 0;
+ tie %hash1, 'DB_File',$Dfile, O_RDWR|O_CREAT, undef;
+ ok(136, $warn_count == 0);
+ tie %hash1, 'DB_File',$Dfile, undef, undef;
+ ok(137, $warn_count == 0);
+ $warn_count = 0;
+
+ unlink $Dfile;
+}
+
+{
+ # Check that DBM Filter can cope with read-only $_
+
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ unlink $Dfile;
+
+ ok(138, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+
+ $db->filter_fetch_key (sub { }) ;
+ $db->filter_store_key (sub { }) ;
+ $db->filter_fetch_value (sub { }) ;
+ $db->filter_store_value (sub { }) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ ok(139, $h{"fred"} eq "joe");
+
+ eval { grep { $h{$_} } (1, 2, 3) };
+ ok (140, ! $@);
+
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ $h{"fred"} = "joe" ;
+
+ ok(141, $h{"fred"} eq "joe");
+
+ ok(142, $db->FIRSTKEY() eq "fred") ;
+
+ eval { grep { $h{$_} } (1, 2, 3) };
+ ok (143, ! $@);
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+exit ;
diff --git a/libdb/perl/DB_File/t/db-recno.t b/libdb/perl/DB_File/t/db-recno.t
new file mode 100644
index 0000000..5390b54
--- /dev/null
+++ b/libdb/perl/DB_File/t/db-recno.t
@@ -0,0 +1,1428 @@
+#!./perl -w
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use warnings;
+use strict;
+use Config;
+
+BEGIN {
+ if(-d "lib" && -f "TEST") {
+ if ($Config{'extensions'} !~ /\bDB_File\b/ ) {
+ print "1..0 # Skip: DB_File was not built\n";
+ exit 0;
+ }
+ }
+}
+
+use DB_File;
+use Fcntl;
+our ($dbh, $Dfile, $bad_ones, $FA);
+
+# full tied array support started in Perl 5.004_57
+# Double check to see if it is available.
+
+{
+ sub try::TIEARRAY { bless [], "try" }
+ sub try::FETCHSIZE { $FA = 1 }
+ $FA = 0 ;
+ my @a ;
+ tie @a, 'try' ;
+ my $a = @a ;
+}
+
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+
+ return $result ;
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT>;
+ close(CAT);
+ normalise($result) ;
+ return $result;
+}
+
+sub docat_del
+{
+ my $file = shift;
+ my $result = docat($file);
+ unlink $file ;
+ return $result;
+}
+
+sub safeUntie
+{
+ my $hashref = shift ;
+ my $no_inner = 1;
+ local $SIG{__WARN__} = sub {-- $no_inner } ;
+ untie @$hashref;
+ return $no_inner;
+}
+
+sub bad_one
+{
+ unless ($bad_ones++) {
+ print STDERR <<EOM ;
+#
+# Some older versions of Berkeley DB version 1 will fail db-recno
+# tests 61, 63 and 65.
+EOM
+ if ($^O eq 'darwin'
+ && $Config{db_version_major} == 1
+ && $Config{db_version_minor} == 0
+ && $Config{db_version_patch} == 0) {
+ print STDERR <<EOM ;
+#
+# For example Mac OS X 10.1.4 (or earlier) has such an old
+# version of Berkeley DB.
+EOM
+ }
+
+ print STDERR <<EOM ;
+#
+# You can safely ignore the errors if you're never going to use the
+# broken functionality (recno databases with a modified bval).
+# Otherwise you'll have to upgrade your DB library.
+#
+# If you want to use Berkeley DB version 1, then 1.85 and 1.86 are the
+# last versions that were released. Berkeley DB version 2 is continually
+# being updated -- Check out http://www.sleepycat.com/ for more details.
+#
+EOM
+ }
+}
+
+sub normalise
+{
+ return unless $^O eq 'cygwin' ;
+ foreach (@_)
+ { s#\r\n#\n#g }
+}
+
+BEGIN
+{
+ {
+ local $SIG{__DIE__} ;
+ eval { require Data::Dumper ; import Data::Dumper } ;
+ }
+
+ if ($@) {
+ *Dumper = sub { my $a = shift; return "[ @{ $a } ]" } ;
+ }
+}
+
+my $splice_tests = 10 + 12 + 1; # ten regressions, plus the randoms
+my $total_tests = 158 ;
+$total_tests += $splice_tests if $FA ;
+print "1..$total_tests\n";
+
+$Dfile = "recno.tmp";
+unlink $Dfile ;
+
+umask(0);
+
+# Check the interface to RECNOINFO
+
+$dbh = new DB_File::RECNOINFO ;
+ok(1, ! defined $dbh->{bval}) ;
+ok(2, ! defined $dbh->{cachesize}) ;
+ok(3, ! defined $dbh->{psize}) ;
+ok(4, ! defined $dbh->{flags}) ;
+ok(5, ! defined $dbh->{lorder}) ;
+ok(6, ! defined $dbh->{reclen}) ;
+ok(7, ! defined $dbh->{bfname}) ;
+
+$dbh->{bval} = 3000 ;
+ok(8, $dbh->{bval} == 3000 );
+
+$dbh->{cachesize} = 9000 ;
+ok(9, $dbh->{cachesize} == 9000 );
+
+$dbh->{psize} = 400 ;
+ok(10, $dbh->{psize} == 400 );
+
+$dbh->{flags} = 65 ;
+ok(11, $dbh->{flags} == 65 );
+
+$dbh->{lorder} = 123 ;
+ok(12, $dbh->{lorder} == 123 );
+
+$dbh->{reclen} = 1234 ;
+ok(13, $dbh->{reclen} == 1234 );
+
+$dbh->{bfname} = 1234 ;
+ok(14, $dbh->{bfname} == 1234 );
+
+
+# Check that an invalid entry is caught both for store & fetch
+eval '$dbh->{fred} = 1234' ;
+ok(15, $@ =~ /^DB_File::RECNOINFO::STORE - Unknown element 'fred' at/ );
+eval 'my $q = $dbh->{fred}' ;
+ok(16, $@ =~ /^DB_File::RECNOINFO::FETCH - Unknown element 'fred' at/ );
+
+# Now check the interface to RECNOINFO
+
+my $X ;
+my @h ;
+ok(17, $X = tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) ;
+
+my %noMode = map { $_, 1} qw( amigaos MSWin32 NetWare cygwin ) ;
+
+ok(18, ((stat($Dfile))[2] & 0777) == (($^O eq 'os2' || $^O eq 'MacOS') ? 0666 : 0640)
+ || $noMode{$^O} );
+
+#my $l = @h ;
+my $l = $X->length ;
+ok(19, ($FA ? @h == 0 : !$l) );
+
+my @data = qw( a b c d ever f g h i j k longername m n o p) ;
+
+$h[0] = shift @data ;
+ok(20, $h[0] eq 'a' );
+
+my $ i;
+foreach (@data)
+ { $h[++$i] = $_ }
+
+unshift (@data, 'a') ;
+
+ok(21, defined $h[1] );
+ok(22, ! defined $h[16] );
+ok(23, $FA ? @h == @data : $X->length == @data );
+
+
+# Overwrite an entry & check fetch it
+$h[3] = 'replaced' ;
+$data[3] = 'replaced' ;
+ok(24, $h[3] eq 'replaced' );
+
+#PUSH
+my @push_data = qw(added to the end) ;
+($FA ? push(@h, @push_data) : $X->push(@push_data)) ;
+push (@data, @push_data) ;
+ok(25, $h[++$i] eq 'added' );
+ok(26, $h[++$i] eq 'to' );
+ok(27, $h[++$i] eq 'the' );
+ok(28, $h[++$i] eq 'end' );
+
+# POP
+my $popped = pop (@data) ;
+my $value = ($FA ? pop @h : $X->pop) ;
+ok(29, $value eq $popped) ;
+
+# SHIFT
+$value = ($FA ? shift @h : $X->shift) ;
+my $shifted = shift @data ;
+ok(30, $value eq $shifted );
+
+# UNSHIFT
+
+# empty list
+($FA ? unshift @h,() : $X->unshift) ;
+ok(31, ($FA ? @h == @data : $X->length == @data ));
+
+my @new_data = qw(add this to the start of the array) ;
+$FA ? unshift (@h, @new_data) : $X->unshift (@new_data) ;
+unshift (@data, @new_data) ;
+ok(32, $FA ? @h == @data : $X->length == @data );
+ok(33, $h[0] eq "add") ;
+ok(34, $h[1] eq "this") ;
+ok(35, $h[2] eq "to") ;
+ok(36, $h[3] eq "the") ;
+ok(37, $h[4] eq "start") ;
+ok(38, $h[5] eq "of") ;
+ok(39, $h[6] eq "the") ;
+ok(40, $h[7] eq "array") ;
+ok(41, $h[8] eq $data[8]) ;
+
+# Brief test for SPLICE - more thorough 'soak test' is later.
+my @old;
+if ($FA) {
+ @old = splice(@h, 1, 2, qw(bananas just before));
+}
+else {
+ @old = $X->splice(1, 2, qw(bananas just before));
+}
+ok(42, $h[0] eq "add") ;
+ok(43, $h[1] eq "bananas") ;
+ok(44, $h[2] eq "just") ;
+ok(45, $h[3] eq "before") ;
+ok(46, $h[4] eq "the") ;
+ok(47, $h[5] eq "start") ;
+ok(48, $h[6] eq "of") ;
+ok(49, $h[7] eq "the") ;
+ok(50, $h[8] eq "array") ;
+ok(51, $h[9] eq $data[8]) ;
+$FA ? splice(@h, 1, 3, @old) : $X->splice(1, 3, @old);
+
+# Now both arrays should be identical
+
+my $ok = 1 ;
+my $j = 0 ;
+foreach (@data)
+{
+ $ok = 0, last if $_ ne $h[$j ++] ;
+}
+ok(52, $ok );
+
+# Neagtive subscripts
+
+# get the last element of the array
+ok(53, $h[-1] eq $data[-1] );
+ok(54, $h[-1] eq $h[ ($FA ? @h : $X->length) -1] );
+
+# get the first element using a negative subscript
+eval '$h[ - ( $FA ? @h : $X->length)] = "abcd"' ;
+ok(55, $@ eq "" );
+ok(56, $h[0] eq "abcd" );
+
+# now try to read before the start of the array
+eval '$h[ - (1 + ($FA ? @h : $X->length))] = 1234' ;
+ok(57, $@ =~ '^Modification of non-creatable array value attempted' );
+
+# IMPORTANT - $X must be undefined before the untie otherwise the
+# underlying DB close routine will not get called.
+undef $X ;
+ok(58, safeUntie \@h);
+
+unlink $Dfile;
+
+
+{
+ # Check bval defaults to \n
+
+ my @h = () ;
+ my $dbh = new DB_File::RECNOINFO ;
+ ok(59, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[3] = "ghi" ;
+ ok(60, safeUntie \@h);
+ my $x = docat($Dfile) ;
+ unlink $Dfile;
+ ok(61, $x eq "abc\ndef\n\nghi\n") ;
+}
+
+{
+ # Change bval
+
+ my @h = () ;
+ my $dbh = new DB_File::RECNOINFO ;
+ $dbh->{bval} = "-" ;
+ ok(62, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[3] = "ghi" ;
+ ok(63, safeUntie \@h);
+ my $x = docat($Dfile) ;
+ unlink $Dfile;
+ my $ok = ($x eq "abc-def--ghi-") ;
+ bad_one() unless $ok ;
+ ok(64, $ok) ;
+}
+
+{
+ # Check R_FIXEDLEN with default bval (space)
+
+ my @h = () ;
+ my $dbh = new DB_File::RECNOINFO ;
+ $dbh->{flags} = R_FIXEDLEN ;
+ $dbh->{reclen} = 5 ;
+ ok(65, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[3] = "ghi" ;
+ ok(66, safeUntie \@h);
+ my $x = docat($Dfile) ;
+ unlink $Dfile;
+ my $ok = ($x eq "abc def ghi ") ;
+ bad_one() unless $ok ;
+ ok(67, $ok) ;
+}
+
+{
+ # Check R_FIXEDLEN with user-defined bval
+
+ my @h = () ;
+ my $dbh = new DB_File::RECNOINFO ;
+ $dbh->{flags} = R_FIXEDLEN ;
+ $dbh->{bval} = "-" ;
+ $dbh->{reclen} = 5 ;
+ ok(68, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[3] = "ghi" ;
+ ok(69, safeUntie \@h);
+ my $x = docat($Dfile) ;
+ unlink $Dfile;
+ my $ok = ($x eq "abc--def-------ghi--") ;
+ bad_one() unless $ok ;
+ ok(70, $ok) ;
+}
+
+{
+ # check that attempting to tie an associative array to a DB_RECNO will fail
+
+ my $filename = "xyz" ;
+ my %x ;
+ eval { tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $DB_RECNO ; } ;
+ ok(71, $@ =~ /^DB_File can only tie an array to a DB_RECNO database/) ;
+ unlink $filename ;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use warnings ;
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use warnings ;
+ use strict ;
+ our (@ISA, @EXPORT);
+
+ require Exporter ;
+ use DB_File;
+ @ISA=qw(DB_File);
+ @EXPORT = @DB_File::EXPORT ;
+
+ sub STORE {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::STORE($key, $value * 2) ;
+ }
+
+ sub FETCH {
+ my $self = shift ;
+ my $key = shift ;
+ $self->SUPER::FETCH($key) - 1 ;
+ }
+
+ sub put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::put($key, $value * 3) ;
+ }
+
+ sub get {
+ my $self = shift ;
+ $self->SUPER::get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE or die "Could not close: $!";
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok(72, $@ eq "") ;
+ my @h ;
+ my $X ;
+ eval '
+ $X = tie(@h, "SubDB","recno.tmp", O_RDWR|O_CREAT, 0640, $DB_RECNO );
+ ' ;
+ die "Could not tie: $!" unless $X;
+
+ main::ok(73, $@ eq "") ;
+
+ my $ret = eval '$h[3] = 3 ; return $h[3] ' ;
+ main::ok(74, $@ eq "") ;
+ main::ok(75, $ret == 5) ;
+
+ my $value = 0;
+ $ret = eval '$X->put(1, 4) ; $X->get(1, $value) ; return $value' ;
+ main::ok(76, $@ eq "") ;
+ main::ok(77, $ret == 10) ;
+
+ $ret = eval ' R_NEXT eq main::R_NEXT ' ;
+ main::ok(78, $@ eq "" ) ;
+ main::ok(79, $ret == 1) ;
+
+ $ret = eval '$X->A_new_method(1) ' ;
+ main::ok(80, $@ eq "") ;
+ main::ok(81, $ret eq "[[11]]") ;
+
+ undef $X;
+ main::ok(82, main::safeUntie \@h);
+ unlink "SubDB.pm", "recno.tmp" ;
+
+}
+
+{
+
+ # test $#
+ my $self ;
+ unlink $Dfile;
+ ok(83, $self = tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[2] = "ghi" ;
+ $h[3] = "jkl" ;
+ ok(84, $FA ? $#h == 3 : $self->length() == 4) ;
+ undef $self ;
+ ok(85, safeUntie \@h);
+ my $x = docat($Dfile) ;
+ ok(86, $x eq "abc\ndef\nghi\njkl\n") ;
+
+ # $# sets array to same length
+ ok(87, $self = tie @h, 'DB_File', $Dfile, O_RDWR, 0640, $DB_RECNO ) ;
+ if ($FA)
+ { $#h = 3 }
+ else
+ { $self->STORESIZE(4) }
+ ok(88, $FA ? $#h == 3 : $self->length() == 4) ;
+ undef $self ;
+ ok(89, safeUntie \@h);
+ $x = docat($Dfile) ;
+ ok(90, $x eq "abc\ndef\nghi\njkl\n") ;
+
+ # $# sets array to bigger
+ ok(91, $self = tie @h, 'DB_File', $Dfile, O_RDWR, 0640, $DB_RECNO ) ;
+ if ($FA)
+ { $#h = 6 }
+ else
+ { $self->STORESIZE(7) }
+ ok(92, $FA ? $#h == 6 : $self->length() == 7) ;
+ undef $self ;
+ ok(93, safeUntie \@h);
+ $x = docat($Dfile) ;
+ ok(94, $x eq "abc\ndef\nghi\njkl\n\n\n\n") ;
+
+ # $# sets array smaller
+ ok(95, $self = tie @h, 'DB_File', $Dfile, O_RDWR, 0640, $DB_RECNO ) ;
+ if ($FA)
+ { $#h = 2 }
+ else
+ { $self->STORESIZE(3) }
+ ok(96, $FA ? $#h == 2 : $self->length() == 3) ;
+ undef $self ;
+ ok(97, safeUntie \@h);
+ $x = docat($Dfile) ;
+ ok(98, $x eq "abc\ndef\nghi\n") ;
+
+ unlink $Dfile;
+
+
+}
+
+{
+ # DBM Filter tests
+ use warnings ;
+ use strict ;
+ my (@h, $db) ;
+ my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ unlink $Dfile;
+
+ sub checkOutput
+ {
+ my($fk, $sk, $fv, $sv) = @_ ;
+
+ print "# Fetch Key : expected '$fk' got '$fetch_key'\n"
+ if $fetch_key ne $fk ;
+ print "# Fetch Value : expected '$fv' got '$fetch_value'\n"
+ if $fetch_value ne $fv ;
+ print "# Store Key : expected '$sk' got '$store_key'\n"
+ if $store_key ne $sk ;
+ print "# Store Value : expected '$sv' got '$store_value'\n"
+ if $store_value ne $sv ;
+ print "# \$_ : expected 'original' got '$_'\n"
+ if $_ ne 'original' ;
+
+ return
+ $fetch_key eq $fk && $store_key eq $sk &&
+ $fetch_value eq $fv && $store_value eq $sv &&
+ $_ eq 'original' ;
+ }
+
+ ok(99, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
+
+ $db->filter_fetch_key (sub { $fetch_key = $_ }) ;
+ $db->filter_store_key (sub { $store_key = $_ }) ;
+ $db->filter_fetch_value (sub { $fetch_value = $_}) ;
+ $db->filter_store_value (sub { $store_value = $_ }) ;
+
+ $_ = "original" ;
+
+ $h[0] = "joe" ;
+ # fk sk fv sv
+ ok(100, checkOutput( "", 0, "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(101, $h[0] eq "joe");
+ # fk sk fv sv
+ ok(102, checkOutput( "", 0, "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(103, $db->FIRSTKEY() == 0) ;
+ # fk sk fv sv
+ ok(104, checkOutput( 0, "", "", "")) ;
+
+ # replace the filters, but remember the previous set
+ my ($old_fk) = $db->filter_fetch_key
+ (sub { ++ $_ ; $fetch_key = $_ }) ;
+ my ($old_sk) = $db->filter_store_key
+ (sub { $_ *= 2 ; $store_key = $_ }) ;
+ my ($old_fv) = $db->filter_fetch_value
+ (sub { $_ = "[$_]"; $fetch_value = $_ }) ;
+ my ($old_sv) = $db->filter_store_value
+ (sub { s/o/x/g; $store_value = $_ }) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h[1] = "Joe" ;
+ # fk sk fv sv
+ ok(105, checkOutput( "", 2, "", "Jxe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(106, $h[1] eq "[Jxe]");
+ # fk sk fv sv
+ ok(107, checkOutput( "", 2, "[Jxe]", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(108, $db->FIRSTKEY() == 1) ;
+ # fk sk fv sv
+ ok(109, checkOutput( 1, "", "", "")) ;
+
+ # put the original filters back
+ $db->filter_fetch_key ($old_fk);
+ $db->filter_store_key ($old_sk);
+ $db->filter_fetch_value ($old_fv);
+ $db->filter_store_value ($old_sv);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h[0] = "joe" ;
+ ok(110, checkOutput( "", 0, "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(111, $h[0] eq "joe");
+ ok(112, checkOutput( "", 0, "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(113, $db->FIRSTKEY() == 0) ;
+ ok(114, checkOutput( 0, "", "", "")) ;
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h[0] = "joe" ;
+ ok(115, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(116, $h[0] eq "joe");
+ ok(117, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(118, $db->FIRSTKEY() == 0) ;
+ ok(119, checkOutput( "", "", "", "")) ;
+
+ undef $db ;
+ ok(120, safeUntie \@h);
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter with a closure
+
+ use warnings ;
+ use strict ;
+ my (@h, $db) ;
+
+ unlink $Dfile;
+ ok(121, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
+
+ my %result = () ;
+
+ sub Closure
+ {
+ my ($name) = @_ ;
+ my $count = 0 ;
+ my @kept = () ;
+
+ return sub { ++$count ;
+ push @kept, $_ ;
+ $result{$name} = "$name - $count: [@kept]" ;
+ }
+ }
+
+ $db->filter_store_key(Closure("store key")) ;
+ $db->filter_store_value(Closure("store value")) ;
+ $db->filter_fetch_key(Closure("fetch key")) ;
+ $db->filter_fetch_value(Closure("fetch value")) ;
+
+ $_ = "original" ;
+
+ $h[0] = "joe" ;
+ ok(122, $result{"store key"} eq "store key - 1: [0]");
+ ok(123, $result{"store value"} eq "store value - 1: [joe]");
+ ok(124, ! defined $result{"fetch key"} );
+ ok(125, ! defined $result{"fetch value"} );
+ ok(126, $_ eq "original") ;
+
+ ok(127, $db->FIRSTKEY() == 0 ) ;
+ ok(128, $result{"store key"} eq "store key - 1: [0]");
+ ok(129, $result{"store value"} eq "store value - 1: [joe]");
+ ok(130, $result{"fetch key"} eq "fetch key - 1: [0]");
+ ok(131, ! defined $result{"fetch value"} );
+ ok(132, $_ eq "original") ;
+
+ $h[7] = "john" ;
+ ok(133, $result{"store key"} eq "store key - 2: [0 7]");
+ ok(134, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(135, $result{"fetch key"} eq "fetch key - 1: [0]");
+ ok(136, ! defined $result{"fetch value"} );
+ ok(137, $_ eq "original") ;
+
+ ok(138, $h[0] eq "joe");
+ ok(139, $result{"store key"} eq "store key - 3: [0 7 0]");
+ ok(140, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(141, $result{"fetch key"} eq "fetch key - 1: [0]");
+ ok(142, $result{"fetch value"} eq "fetch value - 1: [joe]");
+ ok(143, $_ eq "original") ;
+
+ undef $db ;
+ ok(144, safeUntie \@h);
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter recursion detection
+ use warnings ;
+ use strict ;
+ my (@h, $db) ;
+ unlink $Dfile;
+
+ ok(145, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
+
+ $db->filter_store_key (sub { $_ = $h[0] }) ;
+
+ eval '$h[1] = 1234' ;
+ ok(146, $@ =~ /^recursion detected in filter_store_key at/ );
+
+ undef $db ;
+ ok(147, safeUntie \@h);
+ unlink $Dfile;
+}
+
+
+{
+ # Examples from the POD
+
+ my $file = "xyzt" ;
+ {
+ my $redirect = new Redirect $file ;
+
+ use warnings FATAL => qw(all);
+ use strict ;
+ use DB_File ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ my $x = tie @h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_RECNO
+ or die "Cannot open file 'text': $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ $FA ? push @h, "green", "black"
+ : $x->push("green", "black") ;
+
+ my $elements = $FA ? scalar @h : $x->length ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = $FA ? pop @h : $x->pop ;
+ print "popped $last\n" ;
+
+ $FA ? unshift @h, "white"
+ : $x->unshift("white") ;
+ my $first = $FA ? shift @h : $x->shift ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ # use a negative index
+ print "The last element is $h[-1]\n" ;
+ print "The 2nd last element is $h[-2]\n" ;
+
+ undef $x ;
+ untie @h ;
+
+ unlink $filename ;
+ }
+
+ ok(148, docat_del($file) eq <<'EOM') ;
+The array contains 5 entries
+popped black
+shifted white
+Element 1 Exists with value blue
+The last element is green
+The 2nd last element is yellow
+EOM
+
+ my $save_output = "xyzt" ;
+ {
+ my $redirect = new Redirect $save_output ;
+
+ use warnings FATAL => qw(all);
+ use strict ;
+ our (@h, $H, $file, $i);
+ use DB_File ;
+ use Fcntl ;
+
+ $file = "text" ;
+
+ unlink $file ;
+
+ $H = tie @h, "DB_File", $file, O_RDWR|O_CREAT, 0640, $DB_RECNO
+ or die "Cannot open file $file: $!\n" ;
+
+ # first create a text file to play with
+ $h[0] = "zero" ;
+ $h[1] = "one" ;
+ $h[2] = "two" ;
+ $h[3] = "three" ;
+ $h[4] = "four" ;
+
+
+ # Print the records in order.
+ #
+ # The length method is needed here because evaluating a tied
+ # array in a scalar context does not return the number of
+ # elements in the array.
+
+ print "\nORIGINAL\n" ;
+ foreach $i (0 .. $H->length - 1) {
+ print "$i: $h[$i]\n" ;
+ }
+
+ # use the push & pop methods
+ $a = $H->pop ;
+ $H->push("last") ;
+ print "\nThe last record was [$a]\n" ;
+
+ # and the shift & unshift methods
+ $a = $H->shift ;
+ $H->unshift("first") ;
+ print "The first record was [$a]\n" ;
+
+ # Use the API to add a new record after record 2.
+ $i = 2 ;
+ $H->put($i, "Newbie", R_IAFTER) ;
+
+ # and a new record before record 1.
+ $i = 1 ;
+ $H->put($i, "New One", R_IBEFORE) ;
+
+ # delete record 3
+ $H->del(3) ;
+
+ # now print the records in reverse order
+ print "\nREVERSE\n" ;
+ for ($i = $H->length - 1 ; $i >= 0 ; -- $i)
+ { print "$i: $h[$i]\n" }
+
+ # same again, but use the API functions instead
+ print "\nREVERSE again\n" ;
+ my ($s, $k, $v) = (0, 0, 0) ;
+ for ($s = $H->seq($k, $v, R_LAST) ;
+ $s == 0 ;
+ $s = $H->seq($k, $v, R_PREV))
+ { print "$k: $v\n" }
+
+ undef $H ;
+ untie @h ;
+
+ unlink $file ;
+ }
+
+ ok(149, docat_del($save_output) eq <<'EOM') ;
+
+ORIGINAL
+0: zero
+1: one
+2: two
+3: three
+4: four
+
+The last record was [four]
+The first record was [zero]
+
+REVERSE
+5: last
+4: three
+3: Newbie
+2: one
+1: New One
+0: first
+
+REVERSE again
+5: last
+4: three
+3: Newbie
+2: one
+1: New One
+0: first
+EOM
+
+}
+
+{
+ # Bug ID 20001013.009
+ #
+ # test that $hash{KEY} = undef doesn't produce the warning
+ # Use of uninitialized value in null operation
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my @h ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_RECNO
+ or die "Can't open file: $!\n" ;
+ $h[0] = undef;
+ ok(150, $a eq "") ;
+ ok(151, safeUntie \@h);
+ unlink $Dfile;
+}
+
+{
+ # test that %hash = () doesn't produce the warning
+ # Argument "" isn't numeric in entersub
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ unlink $Dfile;
+ my @h ;
+
+ tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_RECNO
+ or die "Can't open file: $!\n" ;
+ @h = (); ;
+ ok(152, $a eq "") ;
+ ok(153, safeUntie \@h);
+ unlink $Dfile;
+}
+
+{
+ # Check that DBM Filter can cope with read-only $_
+
+ use warnings ;
+ use strict ;
+ my (@h, $db) ;
+ unlink $Dfile;
+
+ ok(154, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
+
+ $db->filter_fetch_key (sub { }) ;
+ $db->filter_store_key (sub { }) ;
+ $db->filter_fetch_value (sub { }) ;
+ $db->filter_store_value (sub { }) ;
+
+ $_ = "original" ;
+
+ $h[0] = "joe" ;
+ ok(155, $h[0] eq "joe");
+
+ eval { grep { $h[$_] } (1, 2, 3) };
+ ok (156, ! $@);
+
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ $h[1] = "joe" ;
+
+ ok(157, $h[1] eq "joe");
+
+ eval { grep { $h[$_] } (1, 2, 3) };
+ ok (158, ! $@);
+
+ undef $db ;
+ untie @h;
+ unlink $Dfile;
+}
+
+# Only test splice if this is a newish version of Perl
+exit unless $FA ;
+
+# Test SPLICE
+
+{
+ # check that the splice warnings are under the same lexical control
+ # as their non-tied counterparts.
+
+ use warnings;
+ use strict;
+
+ my $a = '';
+ my @a = (1);
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ unlink $Dfile;
+ my @tied ;
+
+ tie @tied, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_RECNO
+ or die "Can't open file: $!\n" ;
+
+ # uninitialized offset
+ use warnings;
+ my $offset ;
+ $a = '';
+ splice(@a, $offset);
+ ok(159, $a =~ /^Use of uninitialized value /);
+ $a = '';
+ splice(@tied, $offset);
+ ok(160, $a =~ /^Use of uninitialized value in splice/);
+
+ no warnings 'uninitialized';
+ $a = '';
+ splice(@a, $offset);
+ ok(161, $a eq '');
+ $a = '';
+ splice(@tied, $offset);
+ ok(162, $a eq '');
+
+ # uninitialized length
+ use warnings;
+ my $length ;
+ $a = '';
+ splice(@a, 0, $length);
+ ok(163, $a =~ /^Use of uninitialized value /);
+ $a = '';
+ splice(@tied, 0, $length);
+ ok(164, $a =~ /^Use of uninitialized value in splice/);
+
+ no warnings 'uninitialized';
+ $a = '';
+ splice(@a, 0, $length);
+ ok(165, $a eq '');
+ $a = '';
+ splice(@tied, 0, $length);
+ ok(166, $a eq '');
+
+ # offset past end of array
+ use warnings;
+ $a = '';
+ splice(@a, 3);
+ my $splice_end_array = ($a =~ /^splice\(\) offset past end of array/);
+ $a = '';
+ splice(@tied, 3);
+ ok(167, !$splice_end_array || $a =~ /^splice\(\) offset past end of array/);
+
+ no warnings 'misc';
+ $a = '';
+ splice(@a, 3);
+ ok(168, $a eq '');
+ $a = '';
+ splice(@tied, 3);
+ ok(169, $a eq '');
+
+ ok(170, safeUntie \@tied);
+ unlink $Dfile;
+}
+
+#
+# These are a few regression tests: bundles of five arguments to pass
+# to test_splice(). The first four arguments correspond to those
+# given to splice(), and the last says which context to call it in
+# (scalar, list or void).
+#
+# The expected result is not needed because we get that by running
+# Perl's built-in splice().
+#
+my @tests = ([ [ 'falsely', 'dinosaur', 'remedy', 'commotion',
+ 'rarely', 'paleness' ],
+ -4, -2,
+ [ 'redoubled', 'Taylorize', 'Zoe', 'halogen' ],
+ 'void' ],
+
+ [ [ 'a' ], -2, 1, [ 'B' ], 'void' ],
+
+ [ [ 'Hartley', 'Islandia', 'assents', 'wishful' ],
+ 0, -4,
+ [ 'maids' ],
+ 'void' ],
+
+ [ [ 'visibility', 'pocketful', 'rectangles' ],
+ -10, 0,
+ [ 'garbages' ],
+ 'void' ],
+
+ [ [ 'sleeplessly' ],
+ 8, -4,
+ [ 'Margery', 'clearing', 'repercussion', 'clubs',
+ 'arise' ],
+ 'void' ],
+
+ [ [ 'chastises', 'recalculates' ],
+ 0, 0,
+ [ 'momentariness', 'mediates', 'accents', 'toils',
+ 'regaled' ],
+ 'void' ],
+
+ [ [ 'b', '' ],
+ 9, 8,
+ [ 'otrb', 'stje', 'ixrpw', 'vxfx', 'lhhf' ],
+ 'scalar' ],
+
+ [ [ 'b', '' ],
+ undef, undef,
+ [ 'otrb', 'stje', 'ixrpw', 'vxfx', 'lhhf' ],
+ 'scalar' ],
+
+ [ [ 'riheb' ], -8, undef, [], 'void' ],
+
+ [ [ 'uft', 'qnxs', '' ],
+ 6, -2,
+ [ 'znp', 'mhnkh', 'bn' ],
+ 'void' ],
+ );
+
+my $testnum = 171;
+my $failed = 0;
+require POSIX; my $tmp = POSIX::tmpnam();
+foreach my $test (@tests) {
+ my $err = test_splice(@$test);
+ if (defined $err) {
+ print STDERR "# failed: ", Dumper($test);
+ print STDERR "# error: $err\n";
+ $failed = 1;
+ ok($testnum++, 0);
+ }
+ else { ok($testnum++, 1) }
+}
+
+if ($failed) {
+ # Not worth running the random ones
+ print STDERR '# skipping ', $testnum++, "\n";
+}
+else {
+ # A thousand randomly-generated tests
+ $failed = 0;
+ srand(0);
+ foreach (0 .. 1000 - 1) {
+ my $test = rand_test();
+ my $err = test_splice(@$test);
+ if (defined $err) {
+ print STDERR "# failed: ", Dumper($test);
+ print STDERR "# error: $err\n";
+ $failed = 1;
+ print STDERR "# skipping any remaining random tests\n";
+ last;
+ }
+ }
+
+ ok($testnum++, not $failed);
+}
+
+die "testnum ($testnum) != total_tests ($total_tests) + 1"
+ if $testnum != $total_tests + 1;
+
+exit ;
+
+# Subroutines for SPLICE testing
+
+# test_splice()
+#
+# Test the new splice() against Perl's built-in one. The first four
+# parameters are those passed to splice(), except that the lists must
+# be (explicitly) passed by reference, and are not actually modified.
+# (It's just a test!) The last argument specifies the context in
+# which to call the functions: 'list', 'scalar', or 'void'.
+#
+# Returns:
+# undef, if the two splices give the same results for the given
+# arguments and context;
+#
+# an error message showing the difference, otherwise.
+#
+# Reads global variable $tmp.
+#
+sub test_splice {
+ die 'usage: test_splice(array, offset, length, list, context)' if @_ != 5;
+ my ($array, $offset, $length, $list, $context) = @_;
+ my @array = @$array;
+ my @list = @$list;
+
+ unlink $tmp;
+
+ my @h;
+ my $H = tie @h, 'DB_File', $tmp, O_CREAT|O_RDWR, 0644, $DB_RECNO
+ or die "cannot open $tmp: $!";
+
+ my $i = 0;
+ foreach ( @array ) { $h[$i++] = $_ }
+
+ return "basic DB_File sanity check failed"
+ if list_diff(\@array, \@h);
+
+ # Output from splice():
+ # Returned value (munged a bit), error msg, warnings
+ #
+ my ($s_r, $s_error, @s_warnings);
+
+ my $gather_warning = sub { push @s_warnings, $_[0] };
+ if ($context eq 'list') {
+ my @r;
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ @r = splice @array, $offset, $length, @list;
+ };
+ $s_error = $@;
+ $s_r = \@r;
+ }
+ elsif ($context eq 'scalar') {
+ my $r;
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ $r = splice @array, $offset, $length, @list;
+ };
+ $s_error = $@;
+ $s_r = [ $r ];
+ }
+ elsif ($context eq 'void') {
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ splice @array, $offset, $length, @list;
+ };
+ $s_error = $@;
+ $s_r = [];
+ }
+ else {
+ die "bad context $context";
+ }
+
+ foreach ($s_error, @s_warnings) {
+ chomp;
+ s/ at \S+ line \d+\.$//;
+ }
+
+ # Now do the same for DB_File's version of splice
+ my ($ms_r, $ms_error, @ms_warnings);
+ $gather_warning = sub { push @ms_warnings, $_[0] };
+ if ($context eq 'list') {
+ my @r;
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ @r = splice @h, $offset, $length, @list;
+ };
+ $ms_error = $@;
+ $ms_r = \@r;
+ }
+ elsif ($context eq 'scalar') {
+ my $r;
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ $r = splice @h, $offset, $length, @list;
+ };
+ $ms_error = $@;
+ $ms_r = [ $r ];
+ }
+ elsif ($context eq 'void') {
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ splice @h, $offset, $length, @list;
+ };
+ $ms_error = $@;
+ $ms_r = [];
+ }
+ else {
+ die "bad context $context";
+ }
+
+ foreach ($ms_error, @ms_warnings) {
+ chomp;
+ s/ at \S+ line \d+\.?.*//s;
+ }
+
+ return "different errors: '$s_error' vs '$ms_error'"
+ if $s_error ne $ms_error;
+ return('different return values: ' . Dumper($s_r) . ' vs ' . Dumper($ms_r))
+ if list_diff($s_r, $ms_r);
+ return('different changed list: ' . Dumper(\@array) . ' vs ' . Dumper(\@h))
+ if list_diff(\@array, \@h);
+
+ if ((scalar @s_warnings) != (scalar @ms_warnings)) {
+ return 'different number of warnings';
+ }
+
+ while (@s_warnings) {
+ my $sw = shift @s_warnings;
+ my $msw = shift @ms_warnings;
+
+ if (defined $sw and defined $msw) {
+ $msw =~ s/ \(.+\)$//;
+ $msw =~ s/ in splice$// if $] < 5.006;
+ if ($sw ne $msw) {
+ return "different warning: '$sw' vs '$msw'";
+ }
+ }
+ elsif (not defined $sw and not defined $msw) {
+ # Okay.
+ }
+ else {
+ return "one warning defined, another undef";
+ }
+ }
+
+ undef $H;
+ untie @h;
+
+ open(TEXT, $tmp) or die "cannot open $tmp: $!";
+ @h = <TEXT>; normalise @h; chomp @h;
+ close TEXT or die "cannot close $tmp: $!";
+ return('list is different when re-read from disk: '
+ . Dumper(\@array) . ' vs ' . Dumper(\@h))
+ if list_diff(\@array, \@h);
+
+ return undef; # success
+}
+
+
+# list_diff()
+#
+# Do two lists differ?
+#
+# Parameters:
+# reference to first list
+# reference to second list
+#
+# Returns true iff they differ. Only works for lists of (string or
+# undef).
+#
+# Surely there is a better way to do this?
+#
+sub list_diff {
+ die 'usage: list_diff(ref to first list, ref to second list)'
+ if @_ != 2;
+ my ($a, $b) = @_;
+ my @a = @$a; my @b = @$b;
+ return 1 if (scalar @a) != (scalar @b);
+ for (my $i = 0; $i < @a; $i++) {
+ my ($ae, $be) = ($a[$i], $b[$i]);
+ if (defined $ae and defined $be) {
+ return 1 if $ae ne $be;
+ }
+ elsif (not defined $ae and not defined $be) {
+ # Two undefined values are 'equal'
+ }
+ else {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+# rand_test()
+#
+# Think up a random ARRAY, OFFSET, LENGTH, LIST, and context.
+# ARRAY or LIST might be empty, and OFFSET or LENGTH might be
+# undefined. Return a 'test' - a listref of these five things.
+#
+sub rand_test {
+ die 'usage: rand_test()' if @_;
+ my @contexts = qw<list scalar void>;
+ my $context = $contexts[int(rand @contexts)];
+ return [ rand_list(),
+ (rand() < 0.5) ? (int(rand(20)) - 10) : undef,
+ (rand() < 0.5) ? (int(rand(20)) - 10) : undef,
+ rand_list(),
+ $context ];
+}
+
+
+sub rand_list {
+ die 'usage: rand_list()' if @_;
+ my @r;
+
+ while (rand() > 0.1 * (scalar @r + 1)) {
+ push @r, rand_word();
+ }
+ return \@r;
+}
+
+
+sub rand_word {
+ die 'usage: rand_word()' if @_;
+ my $r = '';
+ my @chars = qw<a b c d e f g h i j k l m n o p q r s t u v w x y z>;
+ while (rand() > 0.1 * (length($r) + 1)) {
+ $r .= $chars[int(rand(scalar @chars))];
+ }
+ return $r;
+}
+
+
diff --git a/libdb/perl/DB_File/typemap b/libdb/perl/DB_File/typemap
new file mode 100644
index 0000000..8ad7b12
--- /dev/null
+++ b/libdb/perl/DB_File/typemap
@@ -0,0 +1,46 @@
+# typemap for Perl 5 interface to Berkeley
+#
+# written by Paul Marquess <Paul.Marquess@btinternet.com>
+# last modified 10th December 2000
+# version 1.74
+#
+#################################### DB SECTION
+#
+#
+
+u_int T_U_INT
+DB_File T_PTROBJ
+DBT T_dbtdatum
+DBTKEY T_dbtkeydatum
+
+INPUT
+T_dbtkeydatum
+ DBM_ckFilter($arg, filter_store_key, \"filter_store_key\");
+ DBT_clear($var) ;
+ if (SvOK($arg)){
+ if (db->type != DB_RECNO) {
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ }
+ else {
+ Value = GetRecnoKey(aTHX_ db, SvIV($arg)) ;
+ $var.data = & Value;
+ $var.size = (int)sizeof(recno_t);
+ }
+ }
+T_dbtdatum
+ DBM_ckFilter($arg, filter_store_value, \"filter_store_value\");
+ DBT_clear($var) ;
+ if (SvOK($arg)) {
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ }
+
+OUTPUT
+
+T_dbtkeydatum
+ OutputKey($arg, $var)
+T_dbtdatum
+ OutputValue($arg, $var)
+T_PTROBJ
+ sv_setref_pv($arg, dbtype, (void*)$var);
diff --git a/libdb/perl/DB_File/version.c b/libdb/perl/DB_File/version.c
new file mode 100644
index 0000000..09c9a38
--- /dev/null
+++ b/libdb/perl/DB_File/version.c
@@ -0,0 +1,82 @@
+/*
+
+ version.c -- Perl 5 interface to Berkeley DB
+
+ written by Paul Marquess <Paul.Marquess@btinternet.com>
+ last modified 2nd Jan 2002
+ version 1.802
+
+ All comments/suggestions/problems are welcome
+
+ Copyright (c) 1995-2002 Paul Marquess. All rights reserved.
+ This program is free software; you can redistribute it and/or
+ modify it under the same terms as Perl itself.
+
+ Changes:
+ 1.71 - Support for Berkeley DB version 3.
+ Support for Berkeley DB 2/3's backward compatability mode.
+ 1.72 - No change.
+ 1.73 - Added support for threading
+ 1.74 - Added Perl core patch 7801.
+
+
+*/
+
+#define PERL_NO_GET_CONTEXT
+#include "EXTERN.h"
+#include "perl.h"
+#include "XSUB.h"
+
+#include <db.h>
+
+void
+#ifdef CAN_PROTOTYPE
+__getBerkeleyDBInfo(void)
+#else
+__getBerkeleyDBInfo()
+#endif
+{
+#ifdef dTHX
+ dTHX;
+#endif
+ SV * version_sv = perl_get_sv("DB_File::db_version", GV_ADD|GV_ADDMULTI) ;
+ SV * ver_sv = perl_get_sv("DB_File::db_ver", GV_ADD|GV_ADDMULTI) ;
+ SV * compat_sv = perl_get_sv("DB_File::db_185_compat", GV_ADD|GV_ADDMULTI) ;
+
+#ifdef DB_VERSION_MAJOR
+ int Major, Minor, Patch ;
+
+ (void)db_version(&Major, &Minor, &Patch) ;
+
+ /* Check that the versions of db.h and libdb.a are the same */
+ if (Major != DB_VERSION_MAJOR || Minor != DB_VERSION_MINOR
+ || Patch != DB_VERSION_PATCH)
+ croak("\nDB_File needs compatible versions of libdb & db.h\n\tyou have db.h version %d.%d.%d and libdb version %d.%d.%d\n",
+ DB_VERSION_MAJOR, DB_VERSION_MINOR, DB_VERSION_PATCH,
+ Major, Minor, Patch) ;
+
+ /* check that libdb is recent enough -- we need 2.3.4 or greater */
+ if (Major == 2 && (Minor < 3 || (Minor == 3 && Patch < 4)))
+ croak("DB_File needs Berkeley DB 2.3.4 or greater, you have %d.%d.%d\n",
+ Major, Minor, Patch) ;
+
+ {
+ char buffer[40] ;
+ sprintf(buffer, "%d.%d", Major, Minor) ;
+ sv_setpv(version_sv, buffer) ;
+ sprintf(buffer, "%d.%03d%03d", Major, Minor, Patch) ;
+ sv_setpv(ver_sv, buffer) ;
+ }
+
+#else /* ! DB_VERSION_MAJOR */
+ sv_setiv(version_sv, 1) ;
+ sv_setiv(ver_sv, 1) ;
+#endif /* ! DB_VERSION_MAJOR */
+
+#ifdef COMPAT185
+ sv_setiv(compat_sv, 1) ;
+#else /* ! COMPAT185 */
+ sv_setiv(compat_sv, 0) ;
+#endif /* ! COMPAT185 */
+
+}
diff --git a/libdb/qam/qam.c b/libdb/qam/qam.c
new file mode 100644
index 0000000..32b3711
--- /dev/null
+++ b/libdb/qam/qam.c
@@ -0,0 +1,1615 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+
+static int __qam_bulk __P((DBC *, DBT *, u_int32_t));
+static int __qam_c_close __P((DBC *, db_pgno_t, int *));
+static int __qam_c_del __P((DBC *));
+static int __qam_c_destroy __P((DBC *));
+static int __qam_c_get __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+static int __qam_c_put __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+static int __qam_consume __P((DBC *, QMETA *, db_recno_t));
+static int __qam_getno __P((DB *, const DBT *, db_recno_t *));
+
+/*
+ * __qam_position --
+ * Position a queued access method cursor at a record. This returns
+ * the page locked. *exactp will be set if the record is valid.
+ * PUBLIC: int __qam_position
+ * PUBLIC: __P((DBC *, db_recno_t *, qam_position_mode, int *));
+ */
+int
+__qam_position(dbc, recnop, mode, exactp)
+ DBC *dbc; /* open cursor */
+ db_recno_t *recnop; /* pointer to recno to find */
+ qam_position_mode mode;/* locking: read or write */
+ int *exactp; /* indicate if it was found */
+{
+ QUEUE_CURSOR *cp;
+ DB *dbp;
+ QAMDATA *qp;
+ db_pgno_t pg;
+ int ret;
+
+ dbp = dbc->dbp;
+ cp = (QUEUE_CURSOR *)dbc->internal;
+
+ /* Fetch the page for this recno. */
+ pg = QAM_RECNO_PAGE(dbp, *recnop);
+
+ if ((ret = __db_lget(dbc, 0, pg, mode == QAM_READ ?
+ DB_LOCK_READ : DB_LOCK_WRITE, 0, &cp->lock)) != 0)
+ return (ret);
+ cp->page = NULL;
+ *exactp = 0;
+ if ((ret = __qam_fget(dbp, &pg,
+ mode == QAM_WRITE ? DB_MPOOL_CREATE : 0, &cp->page)) != 0) {
+ /* We did not fetch it, we can release the lock. */
+ (void)__LPUT(dbc, cp->lock);
+ if (mode != QAM_WRITE &&
+ (ret == DB_PAGE_NOTFOUND || ret == ENOENT))
+ return (0);
+ return (ret);
+ }
+ cp->pgno = pg;
+ cp->indx = QAM_RECNO_INDEX(dbp, pg, *recnop);
+
+ if (PGNO(cp->page) == 0) {
+ if (F_ISSET(dbp, DB_AM_RDONLY)) {
+ *exactp = 0;
+ return (0);
+ }
+ PGNO(cp->page) = pg;
+ TYPE(cp->page) = P_QAMDATA;
+ }
+
+ qp = QAM_GET_RECORD(dbp, cp->page, cp->indx);
+ *exactp = F_ISSET(qp, QAM_VALID) ? 1 : 0;
+
+ return (ret);
+}
+
+/*
+ * __qam_pitem --
+ * Put an item on a queue page. Copy the data to the page and set the
+ * VALID and SET bits. If logging and the record was previously set,
+ * log that data, otherwise just log the new data.
+ *
+ * pagep must be write locked
+ *
+ * PUBLIC: int __qam_pitem
+ * PUBLIC: __P((DBC *, QPAGE *, u_int32_t, db_recno_t, DBT *));
+ */
+int
+__qam_pitem(dbc, pagep, indx, recno, data)
+ DBC *dbc;
+ QPAGE *pagep;
+ u_int32_t indx;
+ db_recno_t recno;
+ DBT *data;
+{
+ DB *dbp;
+ DBT olddata, pdata, *datap;
+ QAMDATA *qp;
+ QUEUE *t;
+ u_int32_t alloced;
+ u_int8_t *dest, *p;
+ int ret;
+
+ alloced = ret = 0;
+
+ dbp = dbc->dbp;
+ t = (QUEUE *)dbp->q_internal;
+
+ if (data->size > t->re_len)
+ goto len_err;
+
+ qp = QAM_GET_RECORD(dbp, pagep, indx);
+
+ p = qp->data;
+ datap = data;
+ if (F_ISSET(data, DB_DBT_PARTIAL)) {
+ if (data->doff + data->dlen > t->re_len) {
+ alloced = data->dlen;
+ goto len_err;
+ }
+ if (data->size != data->dlen) {
+len_err: __db_err(dbp->dbenv,
+ "Length improper for fixed length record %lu",
+ (u_long)(alloced ? alloced : data->size));
+ return (EINVAL);
+ }
+ if (data->size == t->re_len)
+ goto no_partial;
+
+ /*
+ * If we are logging, then we have to build the record
+ * first, otherwise, we can simply drop the change
+ * directly on the page. After this clause, make
+ * sure that datap and p are set up correctly so that
+ * copying datap into p does the right thing.
+ *
+ * Note, I am changing this so that if the existing
+ * record is not valid, we create a complete record
+ * to log so that both this and the recovery code is simpler.
+ */
+
+ if (DBC_LOGGING(dbc) || !F_ISSET(qp, QAM_VALID)) {
+ datap = &pdata;
+ memset(datap, 0, sizeof(*datap));
+
+ if ((ret = __os_malloc(dbp->dbenv,
+ t->re_len, &datap->data)) != 0)
+ return (ret);
+ alloced = 1;
+ datap->size = t->re_len;
+
+ /*
+ * Construct the record if it's valid, otherwise set it
+ * all to the pad character.
+ */
+ dest = datap->data;
+ if (F_ISSET(qp, QAM_VALID))
+ memcpy(dest, p, t->re_len);
+ else
+ memset(dest, t->re_pad, t->re_len);
+
+ dest += data->doff;
+ memcpy(dest, data->data, data->size);
+ } else {
+ datap = data;
+ p += data->doff;
+ }
+ }
+
+no_partial:
+ if (DBC_LOGGING(dbc)) {
+ olddata.size = 0;
+ if (F_ISSET(qp, QAM_SET)) {
+ olddata.data = qp->data;
+ olddata.size = t->re_len;
+ }
+ if ((ret = __qam_add_log(dbp, dbc->txn, &LSN(pagep),
+ 0, &LSN(pagep), pagep->pgno,
+ indx, recno, datap, qp->flags,
+ olddata.size == 0 ? NULL : &olddata)) != 0)
+ goto err;
+ }
+
+ F_SET(qp, QAM_VALID | QAM_SET);
+ memcpy(p, datap->data, datap->size);
+ if (!F_ISSET(data, DB_DBT_PARTIAL))
+ memset(p + datap->size, t->re_pad, t->re_len - datap->size);
+
+err: if (alloced)
+ __os_free(dbp->dbenv, datap->data);
+
+ return (ret);
+}
+/*
+ * __qam_c_put
+ * Cursor put for queued access method.
+ * BEFORE and AFTER cannot be specified.
+ */
+static int
+__qam_c_put(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ QMETA *meta;
+ QUEUE_CURSOR *cp;
+ db_pgno_t pg;
+ db_recno_t new_cur, new_first;
+ u_int32_t opcode;
+ int exact, ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ if (pgnop != NULL)
+ *pgnop = PGNO_INVALID;
+
+ cp = (QUEUE_CURSOR *)dbc->internal;
+
+ switch (flags) {
+ case DB_KEYFIRST:
+ case DB_KEYLAST:
+ if ((ret = __qam_getno(dbp, key, &cp->recno)) != 0)
+ return (ret);
+ /* FALLTHROUGH */
+ case DB_CURRENT:
+ break;
+ default:
+ /* The interface shouldn't let anything else through. */
+ DB_ASSERT(0);
+ return (__db_ferr(dbp->dbenv, "__qam_c_put", flags));
+ }
+
+ /* Write lock the record. */
+ if ((ret = __db_lget(dbc,
+ 0, cp->recno, DB_LOCK_WRITE, DB_LOCK_RECORD, &lock)) != 0)
+ return (ret);
+
+ if ((ret = __qam_position(dbc,
+ &cp->recno, QAM_WRITE, &exact)) != 0) {
+ /* We could not get the page, we can release the record lock. */
+ __LPUT(dbc, lock);
+ return (ret);
+ }
+
+ /* Put the item on the page. */
+ ret = __qam_pitem(dbc, (QPAGE *)cp->page, cp->indx, cp->recno, data);
+
+ /* Doing record locking, release the page lock */
+ if ((t_ret = __LPUT(dbc, cp->lock)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = __qam_fput(
+ dbp, cp->pgno, cp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ cp->page = NULL;
+ cp->lock = lock;
+ cp->lock_mode = DB_LOCK_WRITE;
+ if (ret != 0)
+ return (ret);
+
+ /* We may need to reset the head or tail of the queue. */
+ pg = ((QUEUE *)dbp->q_internal)->q_meta;
+
+ /*
+ * Get the meta page first, we don't want to write lock it while
+ * trying to pin it.
+ */
+ if ((ret = mpf->get(mpf, &pg, 0, &meta)) != 0)
+ return (ret);
+ if ((ret = __db_lget(dbc, 0, pg, DB_LOCK_WRITE, 0, &lock)) != 0) {
+ (void)mpf->put(mpf, meta, 0);
+ return (ret);
+ }
+
+ opcode = 0;
+ new_cur = new_first = 0;
+
+ /*
+ * If the put address is outside the queue, adjust the head and
+ * tail of the queue. If the order is inverted we move
+ * the one which is closer. The first case is when the
+ * queue is empty, move first and current to where the new
+ * insert is.
+ */
+
+ if (meta->first_recno == meta->cur_recno) {
+ new_first = cp->recno;
+ new_cur = cp->recno + 1;
+ if (new_cur == RECNO_OOB)
+ new_cur++;
+ opcode |= QAM_SETFIRST;
+ opcode |= QAM_SETCUR;
+ } else {
+ if (QAM_BEFORE_FIRST(meta, cp->recno) &&
+ (meta->first_recno <= meta->cur_recno ||
+ meta->first_recno - cp->recno <
+ cp->recno - meta->cur_recno)) {
+ new_first = cp->recno;
+ opcode |= QAM_SETFIRST;
+ }
+
+ if (meta->cur_recno == cp->recno ||
+ (QAM_AFTER_CURRENT(meta, cp->recno) &&
+ (meta->first_recno <= meta->cur_recno ||
+ cp->recno - meta->cur_recno <=
+ meta->first_recno - cp->recno))) {
+ new_cur = cp->recno + 1;
+ if (new_cur == RECNO_OOB)
+ new_cur++;
+ opcode |= QAM_SETCUR;
+ }
+ }
+
+ if (opcode != 0 && DBC_LOGGING(dbc)) {
+ ret = __qam_mvptr_log(dbp, dbc->txn, &meta->dbmeta.lsn,
+ 0, opcode, meta->first_recno, new_first,
+ meta->cur_recno, new_cur, &meta->dbmeta.lsn, PGNO_BASE_MD);
+ if (ret != 0)
+ opcode = 0;
+ }
+
+ if (opcode & QAM_SETCUR)
+ meta->cur_recno = new_cur;
+ if (opcode & QAM_SETFIRST)
+ meta->first_recno = new_first;
+
+ if ((t_ret = mpf->put(
+ mpf, meta, opcode != 0 ? DB_MPOOL_DIRTY : 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Don't hold the meta page long term. */
+ if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __qam_append --
+ * Perform a put(DB_APPEND) in queue.
+ *
+ * PUBLIC: int __qam_append __P((DBC *, DBT *, DBT *));
+ */
+int
+__qam_append(dbc, key, data)
+ DBC *dbc;
+ DBT *key, *data;
+{
+ DB *dbp;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ QMETA *meta;
+ QPAGE *page;
+ QUEUE *qp;
+ QUEUE_CURSOR *cp;
+ db_pgno_t pg;
+ db_recno_t recno;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (QUEUE_CURSOR *)dbc->internal;
+
+ pg = ((QUEUE *)dbp->q_internal)->q_meta;
+ /*
+ * Get the meta page first, we don't want to write lock it while
+ * trying to pin it.
+ */
+ if ((ret = mpf->get(mpf, &pg, 0, &meta)) != 0)
+ return (ret);
+ /* Write lock the meta page. */
+ if ((ret = __db_lget(dbc, 0, pg, DB_LOCK_WRITE, 0, &lock)) != 0) {
+ (void)mpf->put(mpf, meta, 0);
+ return (ret);
+ }
+
+ /* Get the next record number. */
+ recno = meta->cur_recno;
+ meta->cur_recno++;
+ if (meta->cur_recno == RECNO_OOB)
+ meta->cur_recno++;
+ if (meta->cur_recno == meta->first_recno) {
+ meta->cur_recno--;
+ if (meta->cur_recno == RECNO_OOB)
+ meta->cur_recno--;
+ (void)__LPUT(dbc, lock);
+ ret = EFBIG;
+ goto err;
+ }
+
+ if (QAM_BEFORE_FIRST(meta, recno))
+ meta->first_recno = recno;
+
+ /* Lock the record and release meta page lock. */
+ if ((ret = __db_lget(dbc, LCK_COUPLE_ALWAYS,
+ recno, DB_LOCK_WRITE, DB_LOCK_RECORD, &lock)) != 0) {
+ (void)__LPUT(dbc, lock);
+ goto err;
+ }
+
+ /*
+ * The application may modify the data based on the selected record
+ * number.
+ */
+ if (dbc->dbp->db_append_recno != NULL &&
+ (ret = dbc->dbp->db_append_recno(dbc->dbp, data, recno)) != 0) {
+ (void)__LPUT(dbc, lock);
+ goto err;
+ }
+
+ cp->lock = lock;
+ cp->lock_mode = DB_LOCK_WRITE;
+
+ pg = QAM_RECNO_PAGE(dbp, recno);
+
+ /* Fetch and write lock the data page. */
+ if ((ret = __db_lget(dbc, 0, pg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto err;
+ if ((ret = __qam_fget(dbp, &pg, DB_MPOOL_CREATE, &page)) != 0) {
+ /* We did not fetch it, we can release the lock. */
+ (void)__LPUT(dbc, lock);
+ goto err;
+ }
+
+ /* See if this is a new page. */
+ if (page->pgno == 0) {
+ page->pgno = pg;
+ page->type = P_QAMDATA;
+ }
+
+ /* Put the item on the page and log it. */
+ ret = __qam_pitem(dbc, page,
+ QAM_RECNO_INDEX(dbp, pg, recno), recno, data);
+
+ /* Doing record locking, release the page lock */
+ if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret
+ = __qam_fput(dbp, pg, page, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Return the record number to the user. */
+ if (ret == 0)
+ ret = __db_retcopy(dbp->dbenv, key,
+ &recno, sizeof(recno), &dbc->rkey->data, &dbc->rkey->ulen);
+
+ /* Position the cursor on this record. */
+ cp->recno = recno;
+
+ /* See if we are leaving the extent. */
+ qp = (QUEUE *) dbp->q_internal;
+ if (qp->page_ext != 0 &&
+ (recno % (qp->page_ext * qp->rec_page) == 0 ||
+ recno == UINT32_T_MAX)) {
+ if ((ret = __db_lget(dbc,
+ 0, ((QUEUE *)dbp->q_internal)->q_meta,
+ DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto err;
+ if (!QAM_AFTER_CURRENT(meta, recno))
+ ret = __qam_fclose(dbp, pg);
+ (void)__LPUT(dbc, lock);
+ }
+
+err:
+ /* Release the meta page. */
+ if ((t_ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __qam_c_del --
+ * Qam cursor->am_del function
+ */
+static int
+__qam_c_del(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DBT data;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ QAMDATA *qp;
+ QMETA *meta;
+ QUEUE_CURSOR *cp;
+ db_pgno_t pg;
+ db_recno_t first;
+ int exact, ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (QUEUE_CURSOR *)dbc->internal;
+
+ pg = ((QUEUE *)dbp->q_internal)->q_meta;
+ /*
+ * Get the meta page first, we don't want to write lock it while
+ * trying to pin it.
+ */
+ if ((ret = mpf->get(mpf, &pg, 0, &meta)) != 0)
+ return (ret);
+ /* Write lock the meta page. */
+ if ((ret = __db_lget(dbc, 0, pg, DB_LOCK_READ, 0, &lock)) != 0) {
+ (void)mpf->put(mpf, meta, 0);
+ return (ret);
+ }
+
+ if (QAM_NOT_VALID(meta, cp->recno))
+ ret = DB_NOTFOUND;
+
+ first = meta->first_recno;
+
+ /* Don't hold the meta page long term. */
+ if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (ret != 0)
+ goto err1;
+
+ if ((ret = __db_lget(dbc,
+ 0, cp->recno, DB_LOCK_WRITE, DB_LOCK_RECORD, &lock)) != 0)
+ goto err1;
+
+ cp->lock_mode = DB_LOCK_WRITE;
+ /* Find the record ; delete only deletes exact matches. */
+ if ((ret = __qam_position(dbc,
+ &cp->recno, QAM_WRITE, &exact)) != 0) {
+ cp->lock = lock;
+ goto err1;
+ }
+ if (!exact) {
+ ret = DB_NOTFOUND;
+ goto err1;
+ }
+
+ pagep = cp->page;
+ qp = QAM_GET_RECORD(dbp, pagep, cp->indx);
+
+ if (DBC_LOGGING(dbc)) {
+ if (((QUEUE *)dbp->q_internal)->page_ext == 0 ||
+ ((QUEUE *)dbp->q_internal)->re_len == 0) {
+ if ((ret = __qam_del_log(dbp,
+ dbc->txn, &LSN(pagep), 0, &LSN(pagep),
+ pagep->pgno, cp->indx, cp->recno)) != 0)
+ goto err1;
+ } else {
+ data.size = ((QUEUE *)dbp->q_internal)->re_len;
+ data.data = qp->data;
+ if ((ret = __qam_delext_log(dbp,
+ dbc->txn, &LSN(pagep), 0, &LSN(pagep),
+ pagep->pgno, cp->indx, cp->recno, &data)) != 0)
+ goto err1;
+ }
+ }
+
+ F_CLR(qp, QAM_VALID);
+
+ if (cp->recno == first) {
+ pg = ((QUEUE *)dbp->q_internal)->q_meta;
+ if ((ret =
+ __db_lget(dbc, 0, pg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto err1;
+ ret = __qam_consume(dbc, meta, first);
+ if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+err1:
+ if ((t_ret = mpf->put(mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (cp->page != NULL && (t_ret = __qam_fput(dbp, cp->pgno,
+ cp->page, ret == 0 ? DB_MPOOL_DIRTY : 0)) != 0 && ret == 0)
+ ret = t_ret;
+ cp->page = NULL;
+
+ /* Doing record locking, release the page lock */
+ if ((t_ret = __LPUT(dbc, cp->lock)) != 0 && ret == 0)
+ ret = t_ret;
+ cp->lock = lock;
+
+ return (ret);
+}
+
+#ifdef DEBUG_WOP
+#define QDEBUG
+#endif
+
+/*
+ * __qam_c_get --
+ * Queue cursor->c_get function.
+ */
+static int
+__qam_c_get(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ DBC *dbcdup;
+ DBT tmp;
+ DB_ENV *dbenv;
+ DB_LOCK lock, pglock, metalock;
+ DB_MPOOLFILE *mpf;
+ PAGE *pg;
+ QAMDATA *qp;
+ QMETA *meta;
+ QUEUE *t;
+ QUEUE_CURSOR *cp;
+ db_lockmode_t lock_mode;
+ db_pgno_t metapno;
+ db_recno_t first;
+ qam_position_mode mode;
+ int exact, is_first, locked, ret, t_ret, wait, with_delete;
+ int put_mode, meta_dirty, retrying;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ cp = (QUEUE_CURSOR *)dbc->internal;
+
+ PANIC_CHECK(dbenv);
+
+ wait = 0;
+ with_delete = 0;
+ retrying = 0;
+ lock_mode = DB_LOCK_READ;
+ put_mode = 0;
+ t_ret = 0;
+ *pgnop = 0;
+ pg = NULL;
+
+ mode = QAM_READ;
+ if (F_ISSET(dbc, DBC_RMW)) {
+ lock_mode = DB_LOCK_WRITE;
+ mode = QAM_WRITE;
+ }
+
+ if (flags == DB_CONSUME_WAIT) {
+ wait = 1;
+ flags = DB_CONSUME;
+ }
+ if (flags == DB_CONSUME) {
+ if ((ret = __db_check_txn(dbp, dbc->txn, dbc->locker, 0)) != 0)
+ return (ret);
+
+ with_delete = 1;
+ flags = DB_FIRST;
+ lock_mode = DB_LOCK_WRITE;
+ mode = QAM_CONSUME;
+ }
+
+ DEBUG_LREAD(dbc, dbc->txn, "qam_c_get",
+ flags == DB_SET || flags == DB_SET_RANGE ? key : NULL, NULL, flags);
+
+ /* Make lint and friends happy. */
+ meta_dirty = 0;
+ locked = 0;
+
+ is_first = 0;
+
+ t = (QUEUE *)dbp->q_internal;
+ metapno = t->q_meta;
+
+ /*
+ * Get the meta page first, we don't want to write lock it while
+ * trying to pin it. This is because someone my have it pinned
+ * but not locked.
+ */
+ if ((ret = mpf->get(mpf, &metapno, 0, &meta)) != 0)
+ return (ret);
+ if ((ret = __db_lget(dbc, 0, metapno, lock_mode, 0, &metalock)) != 0)
+ goto err;
+ locked = 1;
+
+ first = 0;
+
+ /* Release any previous lock if not in a transaction. */
+ (void)__TLPUT(dbc, cp->lock);
+
+retry: /* Update the record number. */
+ switch (flags) {
+ case DB_CURRENT:
+ break;
+ case DB_NEXT_DUP:
+ ret = DB_NOTFOUND;
+ goto err;
+ /* NOTREACHED */
+ case DB_NEXT:
+ case DB_NEXT_NODUP:
+ if (cp->recno != RECNO_OOB) {
+ ++cp->recno;
+ /* Wrap around, skipping zero. */
+ if (cp->recno == RECNO_OOB)
+ cp->recno++;
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_FIRST:
+ flags = DB_NEXT;
+ is_first = 1;
+
+ /* get the first record number */
+ cp->recno = first = meta->first_recno;
+
+ break;
+ case DB_PREV:
+ case DB_PREV_NODUP:
+ if (cp->recno != RECNO_OOB) {
+ if (QAM_BEFORE_FIRST(meta, cp->recno) ||
+ cp->recno == meta->first_recno) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ --cp->recno;
+ /* Wrap around, skipping zero. */
+ if (cp->recno == RECNO_OOB)
+ --cp->recno;
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_LAST:
+ if (meta->first_recno == meta->cur_recno) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ cp->recno = meta->cur_recno - 1;
+ if (cp->recno == RECNO_OOB)
+ cp->recno--;
+ break;
+ case DB_SET:
+ case DB_SET_RANGE:
+ case DB_GET_BOTH:
+ case DB_GET_BOTH_RANGE:
+ if ((ret = __qam_getno(dbp, key, &cp->recno)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_flag(dbenv, "__qam_c_get", flags);
+ goto err;
+ }
+
+ /*
+ * Check to see if we are out of data. Current points to
+ * the first free slot.
+ */
+ if (cp->recno == meta->cur_recno ||
+ QAM_AFTER_CURRENT(meta, cp->recno)) {
+ ret = DB_NOTFOUND;
+ pg = NULL;
+ if (wait) {
+ flags = DB_FIRST;
+ /*
+ * If first is not set, then we skipped a
+ * locked record, go back and find it.
+ * If we find a locked record again
+ * wait for it.
+ */
+ if (first == 0) {
+ retrying = 1;
+ goto retry;
+ }
+ if (CDB_LOCKING(dbenv)) {
+ if ((ret = dbenv->lock_get(
+ dbenv, dbc->locker,
+ DB_LOCK_SWITCH, &dbc->lock_dbt,
+ DB_LOCK_WAIT, &dbc->mylock)) != 0)
+ goto err;
+ if ((ret = dbenv->lock_get(
+ dbenv, dbc->locker,
+ DB_LOCK_UPGRADE, &dbc->lock_dbt,
+ DB_LOCK_WRITE, &dbc->mylock)) != 0)
+ goto err;
+ goto retry;
+ }
+ /*
+ * Wait for someone to update the meta page.
+ * This will probably mean there is something
+ * in the queue. We then go back up and
+ * try again.
+ */
+ if (locked == 0) {
+ if ((ret = __db_lget( dbc,
+ 0, metapno, lock_mode, 0, &metalock)) != 0)
+ goto err;
+ locked = 1;
+ if (cp->recno != RECNO_OOB &&
+ !QAM_AFTER_CURRENT(meta, cp->recno))
+ goto retry;
+ }
+ if ((ret = __db_lget(dbc, 0, metapno,
+ DB_LOCK_WAIT, DB_LOCK_SWITCH, &metalock)) != 0)
+ goto err;
+ if ((ret = dbenv->lock_get(dbenv, dbc->locker,
+ DB_LOCK_UPGRADE, &dbc->lock_dbt, DB_LOCK_WRITE,
+ &metalock)) != 0)
+ goto err;
+ locked = 1;
+ goto retry;
+ }
+
+ goto err;
+ }
+
+ /* Don't hold the meta page long term. */
+ if (locked) {
+ if ((ret = __LPUT(dbc, metalock)) != 0)
+ goto err;
+ locked = 0;
+ }
+
+ /* Lock the record. */
+ if ((ret = __db_lget(dbc, 0, cp->recno, lock_mode,
+ (with_delete && !retrying) ?
+ DB_LOCK_NOWAIT | DB_LOCK_RECORD : DB_LOCK_RECORD,
+ &lock)) == DB_LOCK_NOTGRANTED && with_delete) {
+#ifdef QDEBUG
+ __db_logmsg(dbenv,
+ dbc->txn, "Queue S", 0, "%x %d %d %d",
+ dbc->locker, cp->recno, first, meta->first_recno);
+#endif
+ first = 0;
+ if ((ret =
+ __db_lget(dbc, 0, metapno, lock_mode, 0, &metalock)) != 0)
+ goto err;
+ locked = 1;
+ goto retry;
+ }
+
+ if (ret != 0)
+ goto err;
+
+ /*
+ * In the DB_FIRST or DB_LAST cases we must wait and then start over
+ * since the first/last may have moved while we slept.
+ * We release our locks and try again.
+ */
+ if ((!with_delete && is_first) || flags == DB_LAST) {
+ if ((ret =
+ __db_lget(dbc, 0, metapno, lock_mode, 0, &metalock)) != 0)
+ goto err;
+ if (cp->recno !=
+ (is_first ? meta->first_recno : (meta->cur_recno - 1))) {
+ __LPUT(dbc, lock);
+ if (is_first)
+ flags = DB_FIRST;
+ locked = 1;
+ goto retry;
+ }
+ /* Don't hold the meta page long term. */
+ if ((ret = __LPUT(dbc, metalock)) != 0)
+ goto err;
+ }
+
+ /* Position the cursor on the record. */
+ if ((ret = __qam_position(dbc, &cp->recno, mode, &exact)) != 0) {
+ /* We cannot get the page, release the record lock. */
+ (void)__LPUT(dbc, lock);
+ goto err;
+ }
+
+ pg = cp->page;
+ pglock = cp->lock;
+ cp->lock = lock;
+ cp->lock_mode = lock_mode;
+
+ if (!exact) {
+ if (flags == DB_NEXT || flags == DB_NEXT_NODUP ||
+ flags == DB_PREV || flags == DB_PREV_NODUP ||
+ flags == DB_LAST) {
+ /* Release locks and try again. */
+ if (pg != NULL)
+ (void)__qam_fput(dbp, cp->pgno, pg, 0);
+ cp->page = pg = NULL;
+ (void)__LPUT(dbc, pglock);
+ (void)__LPUT(dbc, cp->lock);
+ if (flags == DB_LAST)
+ flags = DB_PREV;
+ if (!with_delete)
+ is_first = 0;
+ retrying = 0;
+ goto retry;
+ }
+ /* this is for the SET and SET_RANGE cases */
+ ret = DB_KEYEMPTY;
+ goto err1;
+ }
+
+ /* Return the key if the user didn't give us one. */
+ if (key != NULL) {
+ if (flags != DB_GET_BOTH && flags != DB_GET_BOTH_RANGE &&
+ flags != DB_SET && flags != DB_SET_RANGE &&
+ (ret = __db_retcopy(dbp->dbenv,
+ key, &cp->recno, sizeof(cp->recno),
+ &dbc->rkey->data, &dbc->rkey->ulen)) != 0)
+ goto err1;
+ F_SET(key, DB_DBT_ISSET);
+ }
+
+ qp = QAM_GET_RECORD(dbp, pg, cp->indx);
+
+ /* Return the data item. */
+ if (flags == DB_GET_BOTH || flags == DB_GET_BOTH_RANGE) {
+ /*
+ * Need to compare
+ */
+ tmp.data = qp->data;
+ tmp.size = t->re_len;
+ if ((ret = __bam_defcmp(dbp, data, &tmp)) != 0) {
+ ret = DB_NOTFOUND;
+ goto err1;
+ }
+ }
+ if (data != NULL &&
+ !F_ISSET(dbc, DBC_MULTIPLE|DBC_MULTIPLE_KEY) &&
+ (ret = __db_retcopy(dbp->dbenv, data,
+ qp->data, t->re_len, &dbc->rdata->data, &dbc->rdata->ulen)) != 0)
+ goto err1;
+
+ if (data != NULL)
+ F_SET(data, DB_DBT_ISSET);
+
+ /* Finally, if we are doing DB_CONSUME mark the record. */
+ if (with_delete) {
+ /*
+ * Assert that we're not a secondary index. Doing a DB_CONSUME
+ * on a secondary makes very little sense, since one can't
+ * DB_APPEND there; attempting one should be forbidden by
+ * the interface.
+ */
+ DB_ASSERT(!F_ISSET(dbp, DB_AM_SECONDARY));
+
+ /*
+ * Check and see if we *have* any secondary indices.
+ * If we do, we're a primary, so call __db_c_del_primary
+ * to delete the references to the item we're about to
+ * delete.
+ *
+ * Note that we work on a duplicated cursor, since the
+ * __db_ret work has already been done, so it's not safe
+ * to perform any additional ops on this cursor.
+ */
+ if (LIST_FIRST(&dbp->s_secondaries) != NULL) {
+ if ((ret = __db_c_idup(dbc,
+ &dbcdup, DB_POSITIONI)) != 0)
+ goto err1;
+
+ if ((ret = __db_c_del_primary(dbcdup)) != 0) {
+ /*
+ * The __db_c_del_primary return is more
+ * interesting.
+ */
+ (void)dbcdup->c_close(dbcdup);
+ goto err1;
+ }
+
+ if ((ret = dbcdup->c_close(dbcdup)) != 0)
+ goto err1;
+ }
+
+ if (DBC_LOGGING(dbc)) {
+ if (t->page_ext == 0 || t->re_len == 0) {
+ if ((ret = __qam_del_log(dbp, dbc->txn,
+ &LSN(pg), 0, &LSN(pg),
+ pg->pgno, cp->indx, cp->recno)) != 0)
+ goto err1;
+ } else {
+ tmp.data = qp->data;
+ tmp.size = t->re_len;
+ if ((ret = __qam_delext_log(dbp,
+ dbc->txn, &LSN(pg), 0, &LSN(pg),
+ pg->pgno, cp->indx, cp->recno, &tmp)) != 0)
+ goto err1;
+ }
+ }
+
+ F_CLR(qp, QAM_VALID);
+ put_mode = DB_MPOOL_DIRTY;
+
+ if ((ret = __LPUT(dbc, pglock)) != 0)
+ goto err1;
+
+ /*
+ * Now we need to update the metapage
+ * first pointer. If we have deleted
+ * the record that is pointed to by
+ * first_recno then we move it as far
+ * forward as we can without blocking.
+ * The metapage lock must be held for
+ * the whole scan otherwise someone could
+ * do a random insert behind where we are
+ * looking.
+ */
+
+ if (locked == 0 && (ret = __db_lget(
+ dbc, 0, metapno, lock_mode, 0, &metalock)) != 0)
+ goto err1;
+ locked = 1;
+
+#ifdef QDEBUG
+ __db_logmsg(dbenv,
+ dbc->txn, "Queue D", 0, "%x %d %d %d",
+ dbc->locker, cp->recno, first, meta->first_recno);
+#endif
+ /*
+ * See if we deleted the "first" record. If
+ * first is zero then we skipped something,
+ * see if first_recno has been move passed
+ * that to the record that we deleted.
+ */
+ if (first == 0)
+ first = cp->recno;
+ if (first != meta->first_recno)
+ goto done;
+
+ if ((ret = __qam_consume(dbc, meta, first)) != 0)
+ goto err1;
+ }
+
+done:
+err1: if (cp->page != NULL) {
+ t_ret = __qam_fput(dbp, cp->pgno, cp->page, put_mode);
+
+ if (!ret)
+ ret = t_ret;
+ /* Doing record locking, release the page lock */
+ t_ret = __LPUT(dbc, pglock);
+ cp->page = NULL;
+ }
+
+err: if (!ret)
+ ret = t_ret;
+ if (meta) {
+
+ /* release the meta page */
+ t_ret = mpf->put(mpf, meta, meta_dirty ? DB_MPOOL_DIRTY : 0);
+
+ if (!ret)
+ ret = t_ret;
+
+ /* Don't hold the meta page long term. */
+ if (locked)
+ t_ret = __LPUT(dbc, metalock);
+ }
+ DB_ASSERT(!LOCK_ISSET(metalock));
+
+ /*
+ * There is no need to keep the record locked if we are
+ * not in a transaction.
+ */
+ if (t_ret == 0)
+ t_ret = __TLPUT(dbc, cp->lock);
+
+ return (ret ? ret : t_ret);
+}
+
+/*
+ * __qam_consume -- try to reset the head of the queue.
+ *
+ */
+
+static int
+__qam_consume(dbc, meta, first)
+ DBC *dbc;
+ QMETA *meta;
+ db_recno_t first;
+{
+ DB *dbp;
+ DB_LOCK lock, save_lock;
+ DB_MPOOLFILE *mpf;
+ QUEUE_CURSOR *cp;
+ db_indx_t save_indx;
+ db_pgno_t save_page;
+ db_recno_t current, save_recno;
+ u_int32_t rec_extent;
+ int exact, put_mode, ret, t_ret, wrapped;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (QUEUE_CURSOR *)dbc->internal;
+ put_mode = DB_MPOOL_DIRTY;
+ ret = t_ret = 0;
+
+ save_page = cp->pgno;
+ save_indx = cp->indx;
+ save_recno = cp->recno;
+ save_lock = cp->lock;
+
+ /*
+ * If we skipped some deleted records, we need to
+ * reposition on the first one. Get a lock
+ * in case someone is trying to put it back.
+ */
+ if (first != cp->recno) {
+ ret = __db_lget(dbc, 0, first, DB_LOCK_READ,
+ DB_LOCK_NOWAIT | DB_LOCK_RECORD, &lock);
+ if (ret == DB_LOCK_NOTGRANTED) {
+ ret = 0;
+ goto done;
+ }
+ if (ret != 0)
+ goto done;
+ if ((ret =
+ __qam_fput(dbp, cp->pgno, cp->page, put_mode)) != 0)
+ goto done;
+ cp->page = NULL;
+ put_mode = 0;
+ if ((ret = __qam_position(dbc,
+ &first, QAM_READ, &exact)) != 0 || exact != 0) {
+ (void)__LPUT(dbc, lock);
+ goto done;
+ }
+ if ((ret =__LPUT(dbc, lock)) != 0)
+ goto done;
+ if ((ret = __LPUT(dbc, cp->lock)) != 0)
+ goto done;
+ }
+
+ current = meta->cur_recno;
+ wrapped = 0;
+ if (first > current)
+ wrapped = 1;
+ rec_extent = meta->page_ext * meta->rec_page;
+
+ /* Loop until we find a record or hit current */
+ for (;;) {
+ /*
+ * Check to see if we are moving off the extent
+ * and remove the extent.
+ * If we are moving off a page we need to
+ * get rid of the buffer.
+ * Wait for the lagging readers to move off the
+ * page.
+ */
+ if (cp->page != NULL && rec_extent != 0 &&
+ ((exact = (first % rec_extent == 0)) ||
+ first % meta->rec_page == 0 ||
+ first == UINT32_T_MAX)) {
+ if (exact == 1 && (ret = __db_lget(dbc,
+ 0, cp->pgno, DB_LOCK_WRITE, 0, &cp->lock)) != 0)
+ break;
+
+#ifdef QDEBUG
+ __db_logmsg(dbp->dbenv,
+ dbc->txn, "Queue R", 0, "%x %d %d %d",
+ dbc->locker, cp->pgno, first, meta->first_recno);
+#endif
+ put_mode |= DB_MPOOL_DISCARD;
+ if ((ret = __qam_fput(dbp,
+ cp->pgno, cp->page, put_mode)) != 0)
+ break;
+ cp->page = NULL;
+
+ if (exact == 1) {
+ ret = __qam_fremove(dbp, cp->pgno);
+ t_ret = __LPUT(dbc, cp->lock);
+ }
+ if (ret != 0)
+ break;
+ if (t_ret != 0) {
+ ret = t_ret;
+ break;
+ }
+ } else if (cp->page != NULL && (ret =
+ __qam_fput(dbp, cp->pgno, cp->page, put_mode)) != 0)
+ break;
+ cp->page = NULL;
+ first++;
+ if (first == RECNO_OOB) {
+ wrapped = 0;
+ first++;
+ }
+
+ /*
+ * LOOP EXIT when we come move to the current
+ * pointer.
+ */
+ if (!wrapped && first >= current)
+ break;
+
+ ret = __db_lget(dbc, 0, first, DB_LOCK_READ,
+ DB_LOCK_NOWAIT | DB_LOCK_RECORD, &lock);
+ if (ret == DB_LOCK_NOTGRANTED) {
+ ret = 0;
+ break;
+ }
+ if (ret != 0)
+ break;
+
+ if ((ret = __qam_position(dbc,
+ &first, QAM_READ, &exact)) != 0) {
+ (void)__LPUT(dbc, lock);
+ break;
+ }
+ put_mode = 0;
+ if ((ret =__LPUT(dbc, lock)) != 0 ||
+ (ret = __LPUT(dbc, cp->lock)) != 0 || exact) {
+ if ((t_ret = __qam_fput(dbp, cp->pgno,
+ cp->page, put_mode)) != 0 && ret == 0)
+ ret = t_ret;
+ cp->page = NULL;
+ break;
+ }
+ }
+
+ cp->pgno = save_page;
+ cp->indx = save_indx;
+ cp->recno = save_recno;
+ cp->lock = save_lock;
+
+ /*
+ * We have advanced as far as we can.
+ * Advance first_recno to this point.
+ */
+ if (ret == 0 && meta->first_recno != first) {
+#ifdef QDEBUG
+ __db_logmsg(dbp->dbenv, dbc->txn, "Queue M",
+ 0, "%x %d %d %d", dbc->locker, cp->recno,
+ first, meta->first_recno);
+#endif
+ if (DBC_LOGGING(dbc))
+ if ((ret = __qam_incfirst_log(dbp,
+ dbc->txn, &meta->dbmeta.lsn, 0,
+ cp->recno, PGNO_BASE_MD)) != 0)
+ goto done;
+ meta->first_recno = first;
+ (void)mpf->set(mpf, meta, DB_MPOOL_DIRTY);
+ }
+
+done:
+ return (ret);
+}
+
+static int
+__qam_bulk(dbc, data, flags)
+ DBC *dbc;
+ DBT *data;
+ u_int32_t flags;
+{
+ DB *dbp;
+ DB_LOCK metalock;
+ DB_MPOOLFILE *mpf;
+ PAGE *pg;
+ QMETA *meta;
+ QAMDATA *qp;
+ QUEUE_CURSOR *cp;
+ db_indx_t indx;
+ db_pgno_t metapno;
+ qam_position_mode mode;
+ int32_t *endp, *offp;
+ u_int8_t *dbuf, *dp, *np;
+ int exact, recs, re_len, ret, t_ret, valid;
+ int is_key, need_pg, pagesize, size, space;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (QUEUE_CURSOR *)dbc->internal;
+
+ mode = QAM_READ;
+ if (F_ISSET(dbc, DBC_RMW))
+ mode = QAM_WRITE;
+
+ pagesize = dbp->pgsize;
+ re_len = ((QUEUE *)dbp->q_internal)->re_len;
+ recs = ((QUEUE *)dbp->q_internal)->rec_page;
+ metapno = ((QUEUE *)dbp->q_internal)->q_meta;
+
+ is_key = LF_ISSET(DB_MULTIPLE_KEY) ? 1 : 0;
+ size = 0;
+
+ if ((ret = __db_lget(dbc, 0, metapno, DB_LOCK_READ, 0, &metalock)) != 0)
+ return (ret);
+ if ((ret = mpf->get(mpf, &metapno, 0, &meta)) != 0) {
+ /* We did not fetch it, we can release the lock. */
+ (void)__LPUT(dbc, metalock);
+ return (ret);
+ }
+
+ dbuf = data->data;
+ np = dp = dbuf;
+
+ /* Keep track of space that is left. There is an termination entry */
+ space = data->ulen;
+ space -= sizeof(*offp);
+
+ /* Build the offset/size table form the end up. */
+ endp = (int32_t *) ((u_int8_t *)dbuf + data->ulen);
+ endp--;
+ offp = endp;
+
+next_pg:
+ if ((ret = __qam_position(dbc, &cp->recno, mode, &exact)) != 0)
+ goto done;
+
+ pg = cp->page;
+ indx = cp->indx;
+ need_pg = 1;
+
+ do {
+ /*
+ * If this page is a nonexistent page at the end of an
+ * extent, pg may be NULL. A NULL page has no valid records,
+ * so just keep looping as though qp exists and isn't QAM_VALID;
+ * calling QAM_GET_RECORD is unsafe.
+ */
+ valid = 0;
+
+ /* Wrap around, skipping zero. */
+ if (cp->recno == RECNO_OOB)
+ cp->recno++;
+ if (pg != NULL) {
+ qp = QAM_GET_RECORD(dbp, pg, indx);
+ if (F_ISSET(qp, QAM_VALID)) {
+ valid = 1;
+ space -= (is_key ? 3 : 2) * sizeof(*offp);
+ if (space < 0)
+ goto get_space;
+ if (need_pg) {
+ dp = np;
+ size = pagesize - QPAGE_SZ(dbp);
+ if (space < size) {
+get_space:
+ if (offp == endp) {
+ data->size =
+ ALIGN(size +
+ pagesize,
+ sizeof(u_int32_t));
+ ret = ENOMEM;
+ break;
+ }
+ if (indx != 0)
+ indx--;
+ cp->recno--;
+ break;
+ }
+ memcpy(dp,
+ (char *)pg + QPAGE_SZ(dbp), size);
+ need_pg = 0;
+ space -= size;
+ np += size;
+ }
+ if (is_key)
+ *offp-- = cp->recno;
+ *offp-- = (int32_t)((u_int8_t*)qp -
+ (u_int8_t*)pg - QPAGE_SZ(dbp) +
+ dp - dbuf + SSZA(QAMDATA, data));
+ *offp-- = re_len;
+ }
+ }
+ if (!valid && is_key == 0) {
+ *offp-- = 0;
+ *offp-- = 0;
+ }
+ cp->recno++;
+ } while (++indx < recs && indx != RECNO_OOB
+ && cp->recno != meta->cur_recno
+ && !QAM_AFTER_CURRENT(meta, cp->recno));
+
+ if ((t_ret = __TLPUT(dbc, cp->lock)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (cp->page != NULL) {
+ if ((t_ret =
+ __qam_fput(dbp, cp->pgno, cp->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ cp->page = NULL;
+ }
+
+ if (ret == 0
+ && (indx >= recs || indx == RECNO_OOB)
+ && cp->recno != meta->cur_recno
+ && !QAM_AFTER_CURRENT(meta, cp->recno))
+ goto next_pg;
+
+ if (is_key == 1)
+ *offp = RECNO_OOB;
+ else
+ *offp = -1;
+
+done:
+ /* release the meta page */
+ t_ret = mpf->put(mpf, meta, 0);
+
+ if (!ret)
+ ret = t_ret;
+
+ t_ret = __LPUT(dbc, metalock);
+
+ return (ret);
+}
+
+/*
+ * __qam_c_close --
+ * Close down the cursor from a single use.
+ */
+static int
+__qam_c_close(dbc, root_pgno, rmroot)
+ DBC *dbc;
+ db_pgno_t root_pgno;
+ int *rmroot;
+{
+ QUEUE_CURSOR *cp;
+
+ COMPQUIET(root_pgno, 0);
+ COMPQUIET(rmroot, NULL);
+
+ cp = (QUEUE_CURSOR *)dbc->internal;
+
+ /* Discard any locks not acquired inside of a transaction. */
+ (void)__TLPUT(dbc, cp->lock);
+ LOCK_INIT(cp->lock);
+
+ cp->page = NULL;
+ cp->pgno = PGNO_INVALID;
+ cp->indx = 0;
+ cp->lock_mode = DB_LOCK_NG;
+ cp->recno = RECNO_OOB;
+ cp->flags = 0;
+
+ return (0);
+}
+
+/*
+ * __qam_c_dup --
+ * Duplicate a queue cursor, such that the new one holds appropriate
+ * locks for the position of the original.
+ *
+ * PUBLIC: int __qam_c_dup __P((DBC *, DBC *));
+ */
+int
+__qam_c_dup(orig_dbc, new_dbc)
+ DBC *orig_dbc, *new_dbc;
+{
+ QUEUE_CURSOR *orig, *new;
+
+ orig = (QUEUE_CURSOR *)orig_dbc->internal;
+ new = (QUEUE_CURSOR *)new_dbc->internal;
+
+ new->recno = orig->recno;
+
+ /* reget the long term lock if we are not in a xact */
+ if (orig_dbc->txn != NULL ||
+ !STD_LOCKING(orig_dbc) || !LOCK_ISSET(orig->lock))
+ return (0);
+
+ return (__db_lget(new_dbc,
+ 0, new->recno, new->lock_mode, DB_LOCK_RECORD, &new->lock));
+}
+
+/*
+ * __qam_c_init
+ *
+ * PUBLIC: int __qam_c_init __P((DBC *));
+ */
+int
+__qam_c_init(dbc)
+ DBC *dbc;
+{
+ QUEUE_CURSOR *cp;
+ DB *dbp;
+ int ret;
+
+ dbp = dbc->dbp;
+
+ /* Allocate the internal structure. */
+ cp = (QUEUE_CURSOR *)dbc->internal;
+ if (cp == NULL) {
+ if ((ret =
+ __os_calloc(dbp->dbenv, 1, sizeof(QUEUE_CURSOR), &cp)) != 0)
+ return (ret);
+ dbc->internal = (DBC_INTERNAL *)cp;
+ }
+
+ /* Initialize methods. */
+ dbc->c_close = __db_c_close;
+ dbc->c_count = __db_c_count;
+ dbc->c_del = __db_c_del;
+ dbc->c_dup = __db_c_dup;
+ dbc->c_get = dbc->c_real_get = __db_c_get;
+ dbc->c_pget = __db_c_pget;
+ dbc->c_put = __db_c_put;
+ dbc->c_am_bulk = __qam_bulk;
+ dbc->c_am_close = __qam_c_close;
+ dbc->c_am_del = __qam_c_del;
+ dbc->c_am_destroy = __qam_c_destroy;
+ dbc->c_am_get = __qam_c_get;
+ dbc->c_am_put = __qam_c_put;
+ dbc->c_am_writelock = NULL;
+
+ return (0);
+}
+
+/*
+ * __qam_c_destroy --
+ * Close a single cursor -- internal version.
+ */
+static int
+__qam_c_destroy(dbc)
+ DBC *dbc;
+{
+ /* Discard the structures. */
+ __os_free(dbc->dbp->dbenv, dbc->internal);
+
+ return (0);
+}
+
+/*
+ * __qam_getno --
+ * Check the user's record number.
+ */
+static int
+__qam_getno(dbp, key, rep)
+ DB *dbp;
+ const DBT *key;
+ db_recno_t *rep;
+{
+ if ((*rep = *(db_recno_t *)key->data) == 0) {
+ __db_err(dbp->dbenv, "illegal record number of 0");
+ return (EINVAL);
+ }
+ return (0);
+}
+
+/*
+ * __qam_truncate --
+ * Truncate a queue database
+ *
+ * PUBLIC: int __qam_truncate __P((DB *, DB_TXN *, u_int32_t *));
+ */
+int
+__qam_truncate(dbp, txn, countp)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t *countp;
+{
+ DBC *dbc;
+ DB_LOCK metalock;
+ DB_MPOOLFILE *mpf;
+ QMETA *meta;
+ db_pgno_t metapno;
+ int count, ret, t_ret;
+
+ mpf = dbp->mpf;
+
+ /* Acquire a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+
+ /* Walk the queue, counting rows. */
+ count = 0;
+ while ((ret = __qam_c_get(dbc, NULL, NULL, DB_CONSUME, &metapno)) == 0)
+ count++;
+
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+
+ /* Discard the cursor. */
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (ret != 0)
+ return (ret);
+
+ /* update the meta page */
+ /* get the meta page */
+ metapno = ((QUEUE *)dbp->q_internal)->q_meta;
+ if ((ret =
+ __db_lget(dbc, 0, metapno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ return (ret);
+
+ if ((ret = mpf->get(mpf, &metapno, 0, &meta)) != 0) {
+ /* We did not fetch it, we can release the lock. */
+ (void)__LPUT(dbc, metalock);
+ return (ret);
+ }
+ if (DBC_LOGGING(dbc)) {
+ ret = __qam_mvptr_log(dbp, dbc->txn, &meta->dbmeta.lsn, 0,
+ QAM_SETCUR | QAM_SETFIRST | QAM_TRUNCATE, meta->first_recno,
+ 1, meta->cur_recno, 1, &meta->dbmeta.lsn, PGNO_BASE_MD);
+ }
+ if (ret == 0)
+ meta->first_recno = meta->cur_recno = 1;
+
+ if ((t_ret =
+ mpf->put(mpf, meta, ret == 0 ? DB_MPOOL_DIRTY: 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
+ ret = t_ret;
+
+ *countp = count;
+
+ return (ret);
+}
diff --git a/libdb/qam/qam.src b/libdb/qam/qam.src
new file mode 100644
index 0000000..99a8428
--- /dev/null
+++ b/libdb/qam/qam.src
@@ -0,0 +1,101 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+PREFIX __qam
+DBPRIVATE
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/qam.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/txn.h"
+INCLUDE
+
+/*
+ * incfirst
+ * Used when we increment first_recno.
+ */
+BEGIN incfirst 84
+DB fileid int32_t ld
+ARG recno db_recno_t lu
+WRLOCK meta_pgno db_pgno_t lu
+END
+
+/*
+ * mvptr
+ * Used when we change one or both of cur_recno and first_recno.
+ */
+BEGIN mvptr 85
+ARG opcode u_int32_t lu
+DB fileid int32_t ld
+ARG old_first db_recno_t lu
+ARG new_first db_recno_t lu
+ARG old_cur db_recno_t lu
+ARG new_cur db_recno_t lu
+POINTER metalsn DB_LSN * lu
+WRLOCK meta_pgno db_pgno_t lu
+END
+
+
+/*
+ * del
+ * Used when we delete a record.
+ * recno is the record that is being deleted.
+ */
+BEGIN del 79
+DB fileid int32_t ld
+POINTER lsn DB_LSN * lu
+WRLOCK pgno db_pgno_t lu
+ARG indx u_int32_t lu
+ARG recno db_recno_t lu
+END
+
+/*
+ * add
+ * Used when we put a record on a page.
+ * recno is the record being added.
+ * data is the record itself.
+ */
+BEGIN add 80
+DB fileid int32_t ld
+POINTER lsn DB_LSN * lu
+WRLOCK pgno db_pgno_t lu
+ARG indx u_int32_t lu
+ARG recno db_recno_t lu
+DBT data DBT s
+ARG vflag u_int32_t lu
+DBT olddata DBT s
+END
+
+/*
+ * delext
+ * Used when we delete a record in extent based queue.
+ * recno is the record that is being deleted.
+ */
+BEGIN delext 83
+DB fileid int32_t ld
+POINTER lsn DB_LSN * lu
+ARG pgno db_pgno_t lu
+ARG indx u_int32_t lu
+ARG recno db_recno_t lu
+DBT data DBT s
+END
diff --git a/libdb/qam/qam_auto.c b/libdb/qam/qam_auto.c
new file mode 100644
index 0000000..784598a
--- /dev/null
+++ b/libdb/qam/qam_auto.c
@@ -0,0 +1,1449 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+/*
+ * PUBLIC: int __qam_incfirst_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, db_recno_t, db_pgno_t));
+ */
+int
+__qam_incfirst_log(dbp, txnid, ret_lsnp, flags, recno, meta_pgno)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ db_recno_t recno;
+ db_pgno_t meta_pgno;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___qam_incfirst;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)recno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)meta_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__qam_incfirst_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __qam_incfirst_getpgnos __P((DB_ENV *, DBT *,
+ * PUBLIC: DB_LSN *, db_recops, void *));
+ */
+int
+__qam_incfirst_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __qam_incfirst_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __qam_incfirst_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->meta_pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __qam_incfirst_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__qam_incfirst_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __qam_incfirst_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __qam_incfirst_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__qam_incfirst: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\trecno: %lu\n", (u_long)argp->recno);
+ (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __qam_incfirst_read __P((DB_ENV *, void *,
+ * PUBLIC: __qam_incfirst_args **));
+ */
+int
+__qam_incfirst_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __qam_incfirst_args **argpp;
+{
+ __qam_incfirst_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__qam_incfirst_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->recno = (db_recno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->meta_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __qam_mvptr_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, u_int32_t, db_recno_t, db_recno_t, db_recno_t,
+ * PUBLIC: db_recno_t, DB_LSN *, db_pgno_t));
+ */
+int
+__qam_mvptr_log(dbp, txnid, ret_lsnp, flags,
+ opcode, old_first, new_first, old_cur, new_cur,
+ metalsn, meta_pgno)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t opcode;
+ db_recno_t old_first;
+ db_recno_t new_first;
+ db_recno_t old_cur;
+ db_recno_t new_cur;
+ DB_LSN * metalsn;
+ db_pgno_t meta_pgno;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___qam_mvptr;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(*metalsn)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ uinttmp = (u_int32_t)opcode;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)old_first;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)new_first;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)old_cur;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)new_cur;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (metalsn != NULL)
+ memcpy(bp, metalsn, sizeof(*metalsn));
+ else
+ memset(bp, 0, sizeof(*metalsn));
+ bp += sizeof(*metalsn);
+
+ uinttmp = (u_int32_t)meta_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__qam_mvptr_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __qam_mvptr_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__qam_mvptr_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __qam_mvptr_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __qam_mvptr_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->meta_pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __qam_mvptr_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__qam_mvptr_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __qam_mvptr_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __qam_mvptr_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__qam_mvptr: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\topcode: %lu\n", (u_long)argp->opcode);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\told_first: %lu\n", (u_long)argp->old_first);
+ (void)printf("\tnew_first: %lu\n", (u_long)argp->new_first);
+ (void)printf("\told_cur: %lu\n", (u_long)argp->old_cur);
+ (void)printf("\tnew_cur: %lu\n", (u_long)argp->new_cur);
+ (void)printf("\tmetalsn: [%lu][%lu]\n",
+ (u_long)argp->metalsn.file, (u_long)argp->metalsn.offset);
+ (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __qam_mvptr_read __P((DB_ENV *, void *, __qam_mvptr_args **));
+ */
+int
+__qam_mvptr_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __qam_mvptr_args **argpp;
+{
+ __qam_mvptr_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__qam_mvptr_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->opcode = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->old_first = (db_recno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->new_first = (db_recno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->old_cur = (db_recno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->new_cur = (db_recno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->metalsn, bp, sizeof(argp->metalsn));
+ bp += sizeof(argp->metalsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->meta_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __qam_del_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_recno_t));
+ */
+int
+__qam_del_log(dbp, txnid, ret_lsnp, flags, lsn, pgno, indx, recno)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ DB_LSN * lsn;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ db_recno_t recno;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___qam_del;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(*lsn)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)indx;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)recno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__qam_del_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __qam_del_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__qam_del_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __qam_del_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __qam_del_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __qam_del_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__qam_del_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __qam_del_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __qam_del_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__qam_del: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tindx: %lu\n", (u_long)argp->indx);
+ (void)printf("\trecno: %lu\n", (u_long)argp->recno);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __qam_del_read __P((DB_ENV *, void *, __qam_del_args **));
+ */
+int
+__qam_del_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __qam_del_args **argpp;
+{
+ __qam_del_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__qam_del_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->indx = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->recno = (db_recno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __qam_add_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_recno_t,
+ * PUBLIC: const DBT *, u_int32_t, const DBT *));
+ */
+int
+__qam_add_log(dbp, txnid, ret_lsnp, flags, lsn, pgno, indx, recno, data,
+ vflag, olddata)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ DB_LSN * lsn;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ db_recno_t recno;
+ const DBT *data;
+ u_int32_t vflag;
+ const DBT *olddata;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___qam_add;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(*lsn)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t) + (data == NULL ? 0 : data->size)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t) + (olddata == NULL ? 0 : olddata->size);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)indx;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)recno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (data == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &data->size, sizeof(data->size));
+ bp += sizeof(data->size);
+ memcpy(bp, data->data, data->size);
+ bp += data->size;
+ }
+
+ uinttmp = (u_int32_t)vflag;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (olddata == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &olddata->size, sizeof(olddata->size));
+ bp += sizeof(olddata->size);
+ memcpy(bp, olddata->data, olddata->size);
+ bp += olddata->size;
+ }
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__qam_add_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __qam_add_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__qam_add_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ DB *dbp;
+ TXN_RECS *t;
+ __qam_add_args *argp;
+ u_int32_t ret;
+
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ argp = NULL;
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __qam_add_read(dbenv, rec->data, &argp)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_id_to_db(dbenv,
+ argp->txnid, &dbp, argp->fileid, 0)) != 0)
+ goto err;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ goto err;
+
+ t->array[t->npages].flags = 0;
+ t->array[t->npages].fid = argp->fileid;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].pgdesc.pgno = argp->pgno;
+ t->array[t->npages].pgdesc.type = DB_PAGE_LOCK;
+ memcpy(t->array[t->npages].pgdesc.fileid, dbp->fileid,
+ DB_FILE_ID_LEN);
+ t->npages++;
+
+err: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __qam_add_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__qam_add_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __qam_add_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __qam_add_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__qam_add: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tindx: %lu\n", (u_long)argp->indx);
+ (void)printf("\trecno: %lu\n", (u_long)argp->recno);
+ (void)printf("\tdata: ");
+ for (i = 0; i < argp->data.size; i++) {
+ ch = ((u_int8_t *)argp->data.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tvflag: %lu\n", (u_long)argp->vflag);
+ (void)printf("\tolddata: ");
+ for (i = 0; i < argp->olddata.size; i++) {
+ ch = ((u_int8_t *)argp->olddata.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __qam_add_read __P((DB_ENV *, void *, __qam_add_args **));
+ */
+int
+__qam_add_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __qam_add_args **argpp;
+{
+ __qam_add_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__qam_add_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->indx = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->recno = (db_recno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memset(&argp->data, 0, sizeof(argp->data));
+ memcpy(&argp->data.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->data.data = bp;
+ bp += argp->data.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->vflag = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memset(&argp->olddata, 0, sizeof(argp->olddata));
+ memcpy(&argp->olddata.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->olddata.data = bp;
+ bp += argp->olddata.size;
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __qam_delext_log __P((DB *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_recno_t,
+ * PUBLIC: const DBT *));
+ */
+int
+__qam_delext_log(dbp, txnid, ret_lsnp, flags, lsn, pgno, indx, recno, data)
+ DB *dbp;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ DB_LSN * lsn;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ db_recno_t recno;
+ const DBT *data;
+{
+ DBT logrec;
+ DB_ENV *dbenv;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ dbenv = dbp->dbenv;
+ rectype = DB___qam_delext;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(*lsn)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t) + (data == NULL ? 0 : data->size);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ DB_ASSERT(dbp->log_filename != NULL);
+ if (dbp->log_filename->id == DB_LOGFILEID_INVALID &&
+ (ret = __dbreg_lazy_id(dbp)) != 0)
+ return (ret);
+
+ uinttmp = (u_int32_t)dbp->log_filename->id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (lsn != NULL)
+ memcpy(bp, lsn, sizeof(*lsn));
+ else
+ memset(bp, 0, sizeof(*lsn));
+ bp += sizeof(*lsn);
+
+ uinttmp = (u_int32_t)pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)indx;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)recno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (data == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &data->size, sizeof(data->size));
+ bp += sizeof(data->size);
+ memcpy(bp, data->data, data->size);
+ bp += data->size;
+ }
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__qam_delext_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __qam_delext_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__qam_delext_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __qam_delext_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__qam_delext_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __qam_delext_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __qam_delext_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__qam_delext: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tlsn: [%lu][%lu]\n",
+ (u_long)argp->lsn.file, (u_long)argp->lsn.offset);
+ (void)printf("\tpgno: %lu\n", (u_long)argp->pgno);
+ (void)printf("\tindx: %lu\n", (u_long)argp->indx);
+ (void)printf("\trecno: %lu\n", (u_long)argp->recno);
+ (void)printf("\tdata: ");
+ for (i = 0; i < argp->data.size; i++) {
+ ch = ((u_int8_t *)argp->data.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __qam_delext_read __P((DB_ENV *, void *, __qam_delext_args **));
+ */
+int
+__qam_delext_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __qam_delext_args **argpp;
+{
+ __qam_delext_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__qam_delext_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->lsn, bp, sizeof(argp->lsn));
+ bp += sizeof(argp->lsn);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->indx = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->recno = (db_recno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memset(&argp->data, 0, sizeof(argp->data));
+ memcpy(&argp->data.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->data.data = bp;
+ bp += argp->data.size;
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __qam_init_print __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__qam_init_print(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __qam_incfirst_print, DB___qam_incfirst)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __qam_mvptr_print, DB___qam_mvptr)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __qam_del_print, DB___qam_del)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __qam_add_print, DB___qam_add)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __qam_delext_print, DB___qam_delext)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __qam_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__qam_init_getpgnos(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __qam_incfirst_getpgnos, DB___qam_incfirst)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __qam_mvptr_getpgnos, DB___qam_mvptr)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __qam_del_getpgnos, DB___qam_del)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __qam_add_getpgnos, DB___qam_add)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __qam_delext_getpgnos, DB___qam_delext)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __qam_init_recover __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__qam_init_recover(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __qam_incfirst_recover, DB___qam_incfirst)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __qam_mvptr_recover, DB___qam_mvptr)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __qam_del_recover, DB___qam_del)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __qam_add_recover, DB___qam_add)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __qam_delext_recover, DB___qam_delext)) != 0)
+ return (ret);
+ return (0);
+}
diff --git a/libdb/qam/qam_conv.c b/libdb/qam/qam_conv.c
new file mode 100644
index 0000000..02dbc32
--- /dev/null
+++ b/libdb/qam/qam_conv.c
@@ -0,0 +1,84 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/db_am.h"
+
+/*
+ * __qam_mswap --
+ * Swap the bytes on the queue metadata page.
+ *
+ * PUBLIC: int __qam_mswap __P((PAGE *));
+ */
+int
+__qam_mswap(pg)
+ PAGE *pg;
+{
+ u_int8_t *p;
+
+ __db_metaswap(pg);
+
+ p = (u_int8_t *)pg + sizeof(DBMETA);
+
+ SWAP32(p); /* first_recno */
+ SWAP32(p); /* cur_recno */
+ SWAP32(p); /* re_len */
+ SWAP32(p); /* re_pad */
+ SWAP32(p); /* rec_page */
+ SWAP32(p); /* page_ext */
+ p += 91 * sizeof(u_int32_t); /* unused */
+ SWAP32(p); /* crypto_magic */
+
+ return (0);
+}
+
+/*
+ * __qam_pgin_out --
+ * Convert host-specific page layout to/from the host-independent format
+ * stored on disk.
+ * We only need to fix up a few fields in the header
+ *
+ * PUBLIC: int __qam_pgin_out __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ */
+int
+__qam_pgin_out(dbenv, pg, pp, cookie)
+ DB_ENV *dbenv;
+ db_pgno_t pg;
+ void *pp;
+ DBT *cookie;
+{
+ DB_PGINFO *pginfo;
+ QPAGE *h;
+
+ COMPQUIET(pg, 0);
+ COMPQUIET(dbenv, NULL);
+ pginfo = (DB_PGINFO *)cookie->data;
+ if (!F_ISSET(pginfo, DB_AM_SWAP))
+ return (0);
+
+ h = pp;
+ if (h->type == P_QAMMETA)
+ return (__qam_mswap(pp));
+
+ M_32_SWAP(h->lsn.file);
+ M_32_SWAP(h->lsn.offset);
+ M_32_SWAP(h->pgno);
+
+ return (0);
+}
diff --git a/libdb/qam/qam_files.c b/libdb/qam/qam_files.c
new file mode 100644
index 0000000..e7b77f3
--- /dev/null
+++ b/libdb/qam/qam_files.c
@@ -0,0 +1,642 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <stdlib.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/qam.h"
+#include "dbinc/db_am.h"
+
+/*
+ * __qam_fprobe -- calculate and open extent
+ *
+ * Calculate which extent the page is in, open and create if necessary.
+ *
+ * PUBLIC: int __qam_fprobe
+ * PUBLIC: __P((DB *, db_pgno_t, void *, qam_probe_mode, u_int32_t));
+ */
+int
+__qam_fprobe(dbp, pgno, addrp, mode, flags)
+ DB *dbp;
+ db_pgno_t pgno;
+ void *addrp;
+ qam_probe_mode mode;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ MPFARRAY *array;
+ QUEUE *qp;
+ u_int8_t fid[DB_FILE_ID_LEN];
+ u_int32_t extid, maxext, openflags;
+ char buf[MAXPATHLEN];
+ int numext, offset, oldext, ret;
+
+ dbenv = dbp->dbenv;
+ qp = (QUEUE *)dbp->q_internal;
+ ret = 0;
+
+ if (qp->page_ext == 0) {
+ mpf = dbp->mpf;
+ return (mode == QAM_PROBE_GET ?
+ mpf->get(mpf, &pgno, flags, addrp) :
+ mpf->put(mpf, addrp, flags));
+ }
+
+ mpf = NULL;
+
+ /*
+ * Need to lock long enough to find the mpf or create the file.
+ * The file cannot go away because we must have a record locked
+ * in that file.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ extid = (pgno - 1) / qp->page_ext;
+
+ /* Array1 will always be in use if array2 is in use. */
+ array = &qp->array1;
+ if (array->n_extent == 0) {
+ /* Start with 4 extents */
+ oldext = 0;
+ array->n_extent = 4;
+ array->low_extent = extid;
+ offset = 0;
+ numext = 0;
+ goto alloc;
+ }
+
+ offset = extid - qp->array1.low_extent;
+ if (qp->array2.n_extent != 0 &&
+ abs(offset) > abs(extid - qp->array2.low_extent)) {
+ array = &qp->array2;
+ offset = extid - array->low_extent;
+ }
+
+ /*
+ * Check to see if the requested extent is outside the range of
+ * extents in the array. This is true by default if there are
+ * no extents here yet.
+ */
+ if (offset < 0 || (unsigned) offset >= array->n_extent) {
+ oldext = array->n_extent;
+ numext = array->hi_extent - array->low_extent + 1;
+ if (offset < 0 &&
+ (unsigned) -offset + numext <= array->n_extent) {
+ /*
+ * If we can fit this one into the existing array by
+ * shifting the existing entries then we do not have
+ * to allocate.
+ */
+ memmove(&array->mpfarray[-offset],
+ array->mpfarray, numext
+ * sizeof(array->mpfarray[0]));
+ memset(array->mpfarray, 0, -offset
+ * sizeof(array->mpfarray[0]));
+ offset = 0;
+ } else if ((u_int32_t)offset == array->n_extent &&
+ mode != QAM_PROBE_MPF && array->mpfarray[0].pinref == 0) {
+ /*
+ * If this is at the end of the array and the file at
+ * the begining has a zero pin count we can close
+ * the bottom extent and put this one at the end.
+ */
+ mpf = array->mpfarray[0].mpf;
+ if (mpf != NULL && (ret = mpf->close(mpf, 0)) != 0)
+ goto err;
+ memmove(&array->mpfarray[0], &array->mpfarray[1],
+ (array->n_extent - 1) * sizeof(array->mpfarray[0]));
+ array->low_extent++;
+ array->hi_extent++;
+ offset--;
+ array->mpfarray[offset].mpf = NULL;
+ array->mpfarray[offset].pinref = 0;
+ } else {
+ /*
+ * See if we have wrapped around the queue.
+ * If it has then allocate the second array.
+ * Otherwise just expand the one we are using.
+ */
+ maxext = (u_int32_t) UINT32_T_MAX
+ / (qp->page_ext * qp->rec_page);
+ if ((u_int32_t) abs(offset) >= maxext/2) {
+ array = &qp->array2;
+ DB_ASSERT(array->n_extent == 0);
+ oldext = 0;
+ array->n_extent = 4;
+ array->low_extent = extid;
+ offset = 0;
+ numext = 0;
+ } else {
+ /*
+ * Increase the size to at least include
+ * the new one and double it.
+ */
+ array->n_extent += abs(offset);
+ array->n_extent <<= 2;
+ }
+ alloc:
+ if ((ret = __os_realloc(dbenv,
+ array->n_extent * sizeof(struct __qmpf),
+ &array->mpfarray)) != 0)
+ goto err;
+
+ if (offset < 0) {
+ /*
+ * Move the array up and put the new one
+ * in the first slot.
+ */
+ offset = -offset;
+ memmove(&array->mpfarray[offset],
+ array->mpfarray,
+ numext * sizeof(array->mpfarray[0]));
+ memset(array->mpfarray, 0,
+ offset * sizeof(array->mpfarray[0]));
+ memset(&array->mpfarray[numext + offset], 0,
+ (array->n_extent - (numext + offset))
+ * sizeof(array->mpfarray[0]));
+ offset = 0;
+ }
+ else
+ /* Clear the new part of the array. */
+ memset(&array->mpfarray[oldext], 0,
+ (array->n_extent - oldext) *
+ sizeof(array->mpfarray[0]));
+ }
+ }
+
+ /* Update the low and hi range of saved extents. */
+ if (extid < array->low_extent)
+ array->low_extent = extid;
+ if (extid > array->hi_extent)
+ array->hi_extent = extid;
+
+ /* If the extent file is not yet open, open it. */
+ if (array->mpfarray[offset].mpf == NULL) {
+ snprintf(buf, sizeof(buf),
+ QUEUE_EXTENT, qp->dir, PATH_SEPARATOR[0], qp->name, extid);
+ if ((ret = dbenv->memp_fcreate(
+ dbenv, &array->mpfarray[offset].mpf, 0)) != 0)
+ goto err;
+ mpf = array->mpfarray[offset].mpf;
+ (void)mpf->set_lsn_offset(mpf, 0);
+ (void)mpf->set_pgcookie(mpf, &qp->pgcookie);
+
+ /* Set up the fileid for this extent. */
+ __qam_exid(dbp, fid, extid);
+ (void)mpf->set_fileid(mpf, fid);
+ openflags = DB_EXTENT;
+ if (LF_ISSET(DB_MPOOL_CREATE))
+ openflags |= DB_CREATE;
+ if (F_ISSET(dbp, DB_AM_RDONLY))
+ openflags |= DB_RDONLY;
+ if (F_ISSET(dbenv, DB_ENV_DIRECT_DB))
+ openflags |= DB_DIRECT;
+ if ((ret = mpf->open(
+ mpf, buf, openflags, qp->mode, dbp->pgsize)) != 0) {
+ array->mpfarray[offset].mpf = NULL;
+ (void)mpf->close(mpf, 0);
+ goto err;
+ }
+ }
+
+ mpf = array->mpfarray[offset].mpf;
+ if (mode == QAM_PROBE_GET)
+ array->mpfarray[offset].pinref++;
+ if (LF_ISSET(DB_MPOOL_CREATE))
+ mpf->set_unlink(mpf, 0);
+
+err:
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+
+ if (ret == 0) {
+ if (mode == QAM_PROBE_MPF) {
+ *(DB_MPOOLFILE **)addrp = mpf;
+ return (0);
+ }
+ pgno--;
+ pgno %= qp->page_ext;
+ if (mode == QAM_PROBE_GET)
+ return (mpf->get(mpf, &pgno, flags, addrp));
+ ret = mpf->put(mpf, addrp, flags);
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ array->mpfarray[offset].pinref--;
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ return (ret);
+}
+
+/*
+ * __qam_fclose -- close an extent.
+ *
+ * Calculate which extent the page is in and close it.
+ * We assume the mpf entry is present.
+ *
+ * PUBLIC: int __qam_fclose __P((DB *, db_pgno_t));
+ */
+int
+__qam_fclose(dbp, pgnoaddr)
+ DB *dbp;
+ db_pgno_t pgnoaddr;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ MPFARRAY *array;
+ QUEUE *qp;
+ u_int32_t extid;
+ int offset, ret;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ qp = (QUEUE *)dbp->q_internal;
+
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+
+ extid = (pgnoaddr - 1) / qp->page_ext;
+ array = &qp->array1;
+ if (array->low_extent > extid || array->hi_extent < extid)
+ array = &qp->array2;
+ offset = extid - array->low_extent;
+
+ DB_ASSERT(offset >= 0 && (unsigned) offset < array->n_extent);
+
+ /* If other threads are still using this file, leave it. */
+ if (array->mpfarray[offset].pinref != 0)
+ goto done;
+
+ mpf = array->mpfarray[offset].mpf;
+ array->mpfarray[offset].mpf = NULL;
+ ret = mpf->close(mpf, 0);
+
+done:
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ return (ret);
+}
+
+/*
+ * __qam_fremove -- remove an extent.
+ *
+ * Calculate which extent the page is in and remove it. There is no way
+ * to remove an extent without probing it first and seeing that is is empty
+ * so we assume the mpf entry is present.
+ *
+ * PUBLIC: int __qam_fremove __P((DB *, db_pgno_t));
+ */
+int
+__qam_fremove(dbp, pgnoaddr)
+ DB *dbp;
+ db_pgno_t pgnoaddr;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ MPFARRAY *array;
+ QUEUE *qp;
+ u_int32_t extid;
+#if CONFIG_TEST
+ char buf[MAXPATHLEN], *real_name;
+#endif
+ int offset, ret;
+
+ qp = (QUEUE *)dbp->q_internal;
+ dbenv = dbp->dbenv;
+ ret = 0;
+
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+
+ extid = (pgnoaddr - 1) / qp->page_ext;
+ array = &qp->array1;
+ if (array->low_extent > extid || array->hi_extent < extid)
+ array = &qp->array2;
+ offset = extid - array->low_extent;
+
+ DB_ASSERT(offset >= 0 && (unsigned) offset < array->n_extent);
+
+#if CONFIG_TEST
+ real_name = NULL;
+ /* Find the real name of the file. */
+ snprintf(buf, sizeof(buf),
+ QUEUE_EXTENT, qp->dir, PATH_SEPARATOR[0], qp->name, extid);
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, buf, 0, NULL, &real_name)) != 0)
+ goto err;
+#endif
+ /*
+ * The log must be flushed before the file is deleted. We depend on
+ * the log record of the last delete to recreate the file if we crash.
+ */
+ if (LOGGING_ON(dbenv) && (ret = dbenv->log_flush(dbenv, NULL)) != 0)
+ goto err;
+
+ mpf = array->mpfarray[offset].mpf;
+ array->mpfarray[offset].mpf = NULL;
+ mpf->set_unlink(mpf, 1);
+ if ((ret = mpf->close(mpf, 0)) != 0)
+ goto err;
+
+ /*
+ * If the file is at the bottom of the array
+ * shift things down and adjust the end points.
+ */
+ if (offset == 0) {
+ memmove(array->mpfarray, &array->mpfarray[1],
+ (array->hi_extent - array->low_extent)
+ * sizeof(array->mpfarray[0]));
+ array->mpfarray[
+ array->hi_extent - array->low_extent].mpf = NULL;
+ if (array->low_extent != array->hi_extent)
+ array->low_extent++;
+ } else {
+ if (extid == array->hi_extent)
+ array->hi_extent--;
+ }
+
+err:
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+#if CONFIG_TEST
+ if (real_name != NULL)
+ __os_free(dbenv, real_name);
+#endif
+ return (ret);
+}
+
+/*
+ * __qam_sync --
+ * Flush the database cache.
+ *
+ * PUBLIC: int __qam_sync __P((DB *, u_int32_t));
+ */
+int
+__qam_sync(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ MPFARRAY *array;
+ QUEUE *qp;
+ QUEUE_FILELIST *filelist;
+ struct __qmpf *mpfp;
+ u_int32_t i;
+ int done, ret;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+
+ PANIC_CHECK(dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->sync");
+
+ if ((ret = __db_syncchk(dbp, flags)) != 0)
+ return (ret);
+
+ /* Read-only trees never need to be sync'd. */
+ if (F_ISSET(dbp, DB_AM_RDONLY))
+ return (0);
+
+ /* If the tree was never backed by a database file, we're done. */
+ if (F_ISSET(dbp, DB_AM_INMEM))
+ return (0);
+
+ /* Flush any dirty pages from the cache to the backing file. */
+ if ((ret = mpf->sync(dbp->mpf)) != 0)
+ return (ret);
+
+ qp = (QUEUE *)dbp->q_internal;
+ if (qp->page_ext == 0)
+ return (0);
+
+ /* We do this for the side effect of opening all active extents. */
+ if ((ret = __qam_gen_filelist(dbp, &filelist)) != 0)
+ return (ret);
+
+ if (filelist == NULL)
+ return (0);
+
+ __os_free(dbp->dbenv, filelist);
+
+ done = 0;
+ qp = (QUEUE *)dbp->q_internal;
+ array = &qp->array1;
+
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+again:
+ mpfp = array->mpfarray;
+ for (i = array->low_extent; i <= array->hi_extent; i++, mpfp++)
+ if ((mpf = mpfp->mpf) != NULL) {
+ if ((ret = mpf->sync(mpf)) != 0)
+ goto err;
+ /*
+ * If we are the only ones with this file open
+ * then close it so it might be removed.
+ */
+ if (mpfp->pinref == 0) {
+ mpfp->mpf = NULL;
+ if ((ret = mpf->close(mpf, 0)) != 0)
+ goto err;
+ }
+ }
+
+ if (done == 0 && qp->array2.n_extent != 0) {
+ array = &qp->array2;
+ done = 1;
+ goto again;
+ }
+
+err:
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ return (ret);
+}
+
+/*
+ * __qam_gen_filelist -- generate a list of extent files.
+ * Another thread may close the handle so this should only
+ * be used single threaded or with care.
+ *
+ * PUBLIC: int __qam_gen_filelist __P(( DB *, QUEUE_FILELIST **));
+ */
+int
+__qam_gen_filelist(dbp, filelistp)
+ DB *dbp;
+ QUEUE_FILELIST **filelistp;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ QUEUE *qp;
+ QMETA *meta;
+ db_pgno_t i, last, start;
+ db_recno_t current, first;
+ QUEUE_FILELIST *fp;
+ int ret;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ qp = (QUEUE *)dbp->q_internal;
+ *filelistp = NULL;
+
+ if (qp->page_ext == 0)
+ return (0);
+
+ /* This may happen during metapage recovery. */
+ if (qp->name == NULL)
+ return (0);
+
+ /* Find out the page number of the last page in the database. */
+ i = PGNO_BASE_MD;
+ if ((ret = mpf->get(mpf, &i, 0, &meta)) != 0)
+ return (ret);
+
+ current = meta->cur_recno;
+ first = meta->first_recno;
+
+ if ((ret = mpf->put(mpf, meta, 0)) != 0)
+ return (ret);
+
+ last = QAM_RECNO_PAGE(dbp, current);
+ start = QAM_RECNO_PAGE(dbp, first);
+
+ /* Allocate the worst case plus 1 for null termination. */
+ if (last >= start)
+ ret = last - start + 2;
+ else
+ ret = last + (QAM_RECNO_PAGE(dbp, UINT32_T_MAX) - start) + 1;
+ if ((ret = __os_calloc(dbenv,
+ ret, sizeof(QUEUE_FILELIST), filelistp)) != 0)
+ return (ret);
+ fp = *filelistp;
+ i = start;
+
+again: for (; i <= last; i += qp->page_ext) {
+ if ((ret =
+ __qam_fprobe(dbp, i, &fp->mpf, QAM_PROBE_MPF, 0)) != 0) {
+ if (ret == ENOENT)
+ continue;
+ return (ret);
+ }
+ fp->id = (i - 1) / qp->page_ext;
+ fp++;
+ }
+
+ if (last < start) {
+ i = 1;
+ start = 0;
+ goto again;
+ }
+
+ return (0);
+}
+
+/*
+ * __qam_extent_names -- generate a list of extent files names.
+ *
+ * PUBLIC: int __qam_extent_names __P((DB_ENV *, char *, char ***));
+ */
+int
+__qam_extent_names(dbenv, name, namelistp)
+ DB_ENV *dbenv;
+ char *name;
+ char ***namelistp;
+{
+ DB *dbp;
+ QUEUE *qp;
+ QUEUE_FILELIST *filelist, *fp;
+ char buf[MAXPATHLEN], *dir, **cp, *freep;
+ int cnt, len, ret;
+
+ *namelistp = NULL;
+ filelist = NULL;
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return (ret);
+ if ((ret =
+ __db_open(dbp, NULL, name, NULL, DB_QUEUE, DB_RDONLY, 0)) != 0)
+ return (ret);
+ qp = dbp->q_internal;
+ if (qp->page_ext == 0)
+ goto done;
+
+ if ((ret = __qam_gen_filelist(dbp, &filelist)) != 0)
+ goto done;
+
+ if (filelist == NULL)
+ goto done;
+
+ cnt = 0;
+ for (fp = filelist; fp->mpf != NULL; fp++)
+ cnt++;
+ dir = ((QUEUE *)dbp->q_internal)->dir;
+ name = ((QUEUE *)dbp->q_internal)->name;
+
+ /* QUEUE_EXTENT contains extra chars, but add 6 anyway for the int. */
+ len = (u_int32_t)(cnt * (sizeof(**namelistp)
+ + strlen(QUEUE_EXTENT) + strlen(dir) + strlen(name) + 6));
+
+ if ((ret =
+ __os_malloc(dbp->dbenv, len, namelistp)) != 0)
+ goto done;
+ cp = *namelistp;
+ freep = (char *)(cp + cnt + 1);
+ for (fp = filelist; fp->mpf != NULL; fp++) {
+ snprintf(buf, sizeof(buf),
+ QUEUE_EXTENT, dir, PATH_SEPARATOR[0], name, fp->id);
+ len = (u_int32_t)strlen(buf);
+ *cp++ = freep;
+ strcpy(freep, buf);
+ freep += len + 1;
+ }
+ *cp = NULL;
+
+done:
+ if (filelist != NULL)
+ __os_free(dbp->dbenv, filelist);
+ (void)dbp->close(dbp, DB_NOSYNC);
+
+ return (ret);
+}
+
+/*
+ * __qam_exid --
+ * Generate a fileid for an extent based on the fileid of the main
+ * file. Since we do not log schema creates/deletes explicitly, the log
+ * never captures the fileid of an extent file. In order that masters and
+ * replicas have the same fileids (so they can explicitly delete them), we
+ * use computed fileids for the extent files of Queue files.
+ *
+ * An extent file id retains the low order 12 bytes of the file id and
+ * overwrites the dev/inode fields, placing a 0 in the inode field, and
+ * the extent number in the dev field.
+ *
+ * PUBLIC: void __qam_exid __P((DB *, u_int8_t *, u_int32_t));
+ */
+void
+__qam_exid(dbp, fidp, exnum)
+ DB *dbp;
+ u_int8_t *fidp;
+ u_int32_t exnum;
+{
+ int i;
+ u_int8_t *p;
+
+ /* Copy the fileid from the master. */
+ memcpy(fidp, dbp->fileid, DB_FILE_ID_LEN);
+
+ /* The first four bytes are the inode or the FileIndexLow; 0 it. */
+ for (i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = 0;
+
+ /* The next four bytes are the dev/FileIndexHigh; insert the exnum . */
+ for (p = (u_int8_t *)&exnum, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+}
diff --git a/libdb/qam/qam_method.c b/libdb/qam/qam_method.c
new file mode 100644
index 0000000..f51bad4
--- /dev/null
+++ b/libdb/qam/qam_method.c
@@ -0,0 +1,413 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_am.h"
+#include "dbinc/fop.h"
+#include "dbinc/lock.h"
+#include "dbinc/qam.h"
+#include "dbinc/txn.h"
+
+static int __qam_set_extentsize __P((DB *, u_int32_t));
+
+struct __qam_cookie {
+ DB_LSN lsn;
+ QUEUE_FILELIST *filelist;
+};
+
+/*
+ * __qam_db_create --
+ * Queue specific initialization of the DB structure.
+ *
+ * PUBLIC: int __qam_db_create __P((DB *));
+ */
+int
+__qam_db_create(dbp)
+ DB *dbp;
+{
+ QUEUE *t;
+ int ret;
+
+ /* Allocate and initialize the private queue structure. */
+ if ((ret = __os_calloc(dbp->dbenv, 1, sizeof(QUEUE), &t)) != 0)
+ return (ret);
+ dbp->q_internal = t;
+ dbp->set_q_extentsize = __qam_set_extentsize;
+
+ t->re_pad = ' ';
+
+ return (0);
+}
+
+/*
+ * __qam_db_close --
+ * Queue specific discard of the DB structure.
+ *
+ * PUBLIC: int __qam_db_close __P((DB *));
+ */
+int
+__qam_db_close(dbp)
+ DB *dbp;
+{
+ DB_MPOOLFILE *mpf;
+ MPFARRAY *array;
+ QUEUE *t;
+ struct __qmpf *mpfp;
+ u_int32_t i;
+ int ret, t_ret;
+
+ ret = 0;
+ if ((t = dbp->q_internal) == NULL)
+ return (0);
+
+ array = &t->array1;
+again:
+ mpfp = array->mpfarray;
+ if (mpfp != NULL) {
+ for (i = array->low_extent;
+ i <= array->hi_extent; i++, mpfp++) {
+ mpf = mpfp->mpf;
+ mpfp->mpf = NULL;
+ if (mpf != NULL &&
+ (t_ret = mpf->close(mpf, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+ __os_free(dbp->dbenv, array->mpfarray);
+ }
+ if (t->array2.n_extent != 0) {
+ array = &t->array2;
+ array->n_extent = 0;
+ goto again;
+ }
+
+ if (t->path != NULL)
+ __os_free(dbp->dbenv, t->path);
+ __os_free(dbp->dbenv, t);
+ dbp->q_internal = NULL;
+
+ return (ret);
+}
+
+static int
+__qam_set_extentsize(dbp, extentsize)
+ DB *dbp;
+ u_int32_t extentsize;
+{
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_extentsize");
+
+ if (extentsize < 1) {
+ __db_err(dbp->dbenv, "Extent size must be at least 1");
+ return (EINVAL);
+ }
+
+ ((QUEUE*)dbp->q_internal)->page_ext = extentsize;
+
+ return (0);
+}
+
+/*
+ * __db_prqueue --
+ * Print out a queue
+ *
+ * PUBLIC: int __db_prqueue __P((DB *, FILE *, u_int32_t));
+ */
+int
+__db_prqueue(dbp, fp, flags)
+ DB *dbp;
+ FILE *fp;
+ u_int32_t flags;
+{
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ QMETA *meta;
+ db_pgno_t first, i, last, pg_ext, stop;
+ int ret, t_ret;
+
+ mpf = dbp->mpf;
+
+ /* Find out the page number of the last page in the database. */
+ i = PGNO_BASE_MD;
+ if ((ret = mpf->get(mpf, &i, 0, &meta)) != 0)
+ return (ret);
+
+ first = QAM_RECNO_PAGE(dbp, meta->first_recno);
+ last = QAM_RECNO_PAGE(dbp, meta->cur_recno);
+
+ ret = __db_prpage(dbp, (PAGE *)meta, fp, flags);
+ if ((t_ret = mpf->put(mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (ret != 0)
+ return (ret);
+
+ i = first;
+ if (first > last)
+ stop = QAM_RECNO_PAGE(dbp, UINT32_T_MAX);
+ else
+ stop = last;
+
+ /* Dump each page. */
+begin:
+ for (; i <= stop; ++i) {
+ if ((ret = __qam_fget(dbp, &i, 0, &h)) != 0) {
+ pg_ext = ((QUEUE *)dbp->q_internal)->page_ext;
+ if (pg_ext == 0) {
+ if (ret == DB_PAGE_NOTFOUND && first == last)
+ return (0);
+ return (ret);
+ }
+ if (ret == ENOENT || ret == DB_PAGE_NOTFOUND) {
+ i += pg_ext - ((i - 1) % pg_ext) - 1;
+ continue;
+ }
+ return (ret);
+ }
+ (void)__db_prpage(dbp, h, fp, flags);
+ if ((ret = __qam_fput(dbp, i, h, 0)) != 0)
+ return (ret);
+ }
+
+ if (first > last) {
+ i = 1;
+ stop = last;
+ first = last;
+ goto begin;
+ }
+ return (0);
+}
+
+/*
+ * __qam_remove
+ * Remove method for a Queue.
+ *
+ * PUBLIC: int __qam_remove __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, const char *, DB_LSN *));
+ */
+int
+__qam_remove(dbp, txn, name, subdb, lsnp)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb;
+ DB_LSN *lsnp;
+{
+ DB_ENV *dbenv;
+ DB *tmpdbp;
+ MPFARRAY *ap;
+ QUEUE *qp;
+ QUEUE_FILELIST *filelist, *fp;
+ int ret, needclose, t_ret;
+ char buf[MAXPATHLEN];
+ u_int8_t fid[DB_FILE_ID_LEN];
+
+ COMPQUIET(lsnp, NULL);
+
+ dbenv = dbp->dbenv;
+ ret = 0;
+ filelist = NULL;
+ needclose = 0;
+
+ PANIC_CHECK(dbenv);
+
+ /*
+ * Subdatabases.
+ */
+ if (subdb != NULL) {
+ __db_err(dbenv,
+ "Queue does not support multiple databases per file");
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Since regular remove no longer opens the database, we may have
+ * to do it here.
+ */
+ if (F_ISSET(dbp, DB_AM_OPEN_CALLED))
+ tmpdbp = dbp;
+ else {
+ if ((ret = db_create(&tmpdbp, dbenv, 0)) != 0)
+ return (ret);
+ /*
+ * We need to make sure we don't self-deadlock, so give
+ * this dbp the same locker as the incoming one.
+ */
+ tmpdbp->lid = dbp->lid;
+
+ /*
+ * If this is a transactional dbp and the open fails, then
+ * the transactional abort will close the dbp. If it's not
+ * a transactional open, then we always have to close it
+ * even if the open fails. Once the open has succeeded,
+ * then we will always want to close it.
+ */
+ if (txn == NULL)
+ needclose = 1;
+ if ((ret = tmpdbp->open(tmpdbp,
+ txn, name, NULL, DB_QUEUE, 0, 0)) != 0)
+ goto err;
+ needclose = 1;
+ }
+
+ qp = (QUEUE *)tmpdbp->q_internal;
+
+ if (qp->page_ext != 0 &&
+ (ret = __qam_gen_filelist(tmpdbp, &filelist)) != 0)
+ goto err;
+
+ if (filelist == NULL)
+ goto err;
+
+ for (fp = filelist; fp->mpf != NULL; fp++) {
+ snprintf(buf, sizeof(buf),
+ QUEUE_EXTENT, qp->dir, PATH_SEPARATOR[0], qp->name, fp->id);
+ if ((ret = fp->mpf->close(fp->mpf, DB_MPOOL_DISCARD)) != 0)
+ goto err;
+ if (qp->array2.n_extent == 0 || qp->array2.low_extent > fp->id)
+ ap = &qp->array1;
+ else
+ ap = &qp->array2;
+ ap->mpfarray[fp->id - ap->low_extent].mpf = NULL;
+
+ /* Take care of object reclamation. */
+ __qam_exid(tmpdbp, fid, fp->id);
+ if ((ret = __fop_remove(dbenv,
+ txn, fid, buf, DB_APP_DATA)) != 0)
+ goto err;
+ }
+
+err: if (filelist != NULL)
+ __os_free(dbenv, filelist);
+ if (needclose) {
+ /*
+ * Since we copied the lid from the dbp, we'd better not
+ * free it here.
+ */
+ tmpdbp->lid = DB_LOCK_INVALIDID;
+
+ /* We need to remove the lockevent we associated with this. */
+ if (txn != NULL)
+ __txn_remlock(dbenv,
+ txn, &tmpdbp->handle_lock, DB_LOCK_INVALIDID);
+
+ if ((t_ret =
+ __db_close_i(tmpdbp, txn, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ return (ret);
+}
+
+/*
+ * __qam_rename
+ * Rename method for Queue.
+ *
+ * PUBLIC: int __qam_rename __P((DB *, DB_TXN *,
+ * PUBLIC: const char *, const char *, const char *));
+ */
+int
+__qam_rename(dbp, txn, filename, subdb, newname)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *filename, *subdb, *newname;
+{
+ DB_ENV *dbenv;
+ DB *tmpdbp;
+ MPFARRAY *ap;
+ QUEUE *qp;
+ QUEUE_FILELIST *fp, *filelist;
+ char buf[MAXPATHLEN], nbuf[MAXPATHLEN];
+ char *namep;
+ int ret, needclose, t_ret;
+ u_int8_t fid[DB_FILE_ID_LEN], *fidp;
+
+ dbenv = dbp->dbenv;
+ ret = 0;
+ filelist = NULL;
+ needclose = 0;
+
+ if (subdb != NULL) {
+ __db_err(dbenv,
+ "Queue does not support multiple databases per file");
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Since regular rename no longer opens the database, we may have
+ * to do it here.
+ */
+ if (F_ISSET(dbp, DB_AM_OPEN_CALLED))
+ tmpdbp = dbp;
+ else {
+ if ((ret = db_create(&tmpdbp, dbenv, 0)) != 0)
+ return (ret);
+ /* Copy the incoming locker so we don't self-deadlock. */
+ tmpdbp->lid = dbp->lid;
+ needclose = 1;
+ if ((ret = tmpdbp->open(tmpdbp, txn, filename, NULL,
+ DB_QUEUE, 0, 0)) != 0)
+ goto err;
+ }
+
+ qp = (QUEUE *)tmpdbp->q_internal;
+
+ if (qp->page_ext != 0 &&
+ (ret = __qam_gen_filelist(tmpdbp, &filelist)) != 0)
+ goto err;
+ if ((namep = __db_rpath(newname)) != NULL)
+ newname = namep + 1;
+
+ fidp = fid;
+ for (fp = filelist; fp != NULL && fp->mpf != NULL; fp++) {
+ fp->mpf->get_fileid(fp->mpf, fidp);
+ if ((ret = fp->mpf->close(fp->mpf, DB_MPOOL_DISCARD)) != 0)
+ goto err;
+ if (qp->array2.n_extent == 0 || qp->array2.low_extent > fp->id)
+ ap = &qp->array1;
+ else
+ ap = &qp->array2;
+ ap->mpfarray[fp->id - ap->low_extent].mpf = NULL;
+ snprintf(buf, sizeof(buf),
+ QUEUE_EXTENT, qp->dir, PATH_SEPARATOR[0], qp->name, fp->id);
+ snprintf(nbuf, sizeof(nbuf),
+ QUEUE_EXTENT, qp->dir, PATH_SEPARATOR[0], newname, fp->id);
+ if ((ret = __fop_rename(dbenv,
+ txn, buf, nbuf, fidp, DB_APP_DATA)) != 0)
+ goto err;
+ }
+
+err: if (filelist != NULL)
+ __os_free(dbenv, filelist);
+ if (needclose) {
+ /* We copied this, so we mustn't free it. */
+ tmpdbp->lid = DB_LOCK_INVALIDID;
+
+ /* We need to remove the lockevent we associated with this. */
+ if (txn != NULL)
+ __txn_remlock(dbenv,
+ txn, &tmpdbp->handle_lock, DB_LOCK_INVALIDID);
+
+ if ((t_ret =
+ __db_close_i(tmpdbp, txn, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+ return (ret);
+}
diff --git a/libdb/qam/qam_open.c b/libdb/qam/qam_open.c
new file mode 100644
index 0000000..c31b091
--- /dev/null
+++ b/libdb/qam/qam_open.c
@@ -0,0 +1,331 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/db_am.h"
+#include "dbinc/lock.h"
+#include "dbinc/qam.h"
+#include "dbinc/fop.h"
+
+static int __qam_init_meta __P((DB *, QMETA *));
+
+/*
+ * __qam_open
+ *
+ * PUBLIC: int __qam_open __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, db_pgno_t, int, u_int32_t));
+ */
+int
+__qam_open(dbp, txn, name, base_pgno, mode, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name;
+ db_pgno_t base_pgno;
+ int mode;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LOCK metalock;
+ DB_MPOOLFILE *mpf;
+ QMETA *qmeta;
+ QUEUE *t;
+ int ret, t_ret;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ t = dbp->q_internal;
+ ret = 0;
+ qmeta = NULL;
+
+ /* Initialize the remaining fields/methods of the DB. */
+ dbp->stat = __qam_stat;
+ dbp->sync = __qam_sync;
+ dbp->db_am_remove = __qam_remove;
+ dbp->db_am_rename = __qam_rename;
+
+ /*
+ * Get a cursor. If DB_CREATE is specified, we may be creating
+ * pages, and to do that safely in CDB we need a write cursor.
+ * In STD_LOCKING mode, we'll synchronize using the meta page
+ * lock instead.
+ */
+ if ((ret = dbp->cursor(dbp, txn, &dbc,
+ LF_ISSET(DB_CREATE) && CDB_LOCKING(dbenv) ? DB_WRITECURSOR : 0))
+ != 0)
+ return (ret);
+
+ /*
+ * Get the meta data page. It must exist, because creates of
+ * files/databases come in through the __qam_new_file interface
+ * and queue doesn't support subdatabases.
+ */
+ if ((ret =
+ __db_lget(dbc, 0, base_pgno, DB_LOCK_READ, 0, &metalock)) != 0)
+ goto err;
+ if ((ret =
+ mpf->get(mpf, &base_pgno, 0, (PAGE **)&qmeta)) != 0)
+ goto err;
+
+ /* If the magic number is incorrect, that's a fatal error. */
+ if (qmeta->dbmeta.magic != DB_QAMMAGIC) {
+ __db_err(dbenv, "%s: unexpected file type or format", name);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /* Setup information needed to open extents. */
+ t->page_ext = qmeta->page_ext;
+
+ if (t->page_ext != 0) {
+ t->pginfo.db_pagesize = dbp->pgsize;
+ t->pginfo.flags =
+ F_ISSET(dbp, (DB_AM_CHKSUM | DB_AM_ENCRYPT | DB_AM_SWAP));
+ t->pginfo.type = dbp->type;
+ t->pgcookie.data = &t->pginfo;
+ t->pgcookie.size = sizeof(DB_PGINFO);
+
+ if ((ret = __os_strdup(dbp->dbenv, name, &t->path)) != 0)
+ return (ret);
+ t->dir = t->path;
+ if ((t->name = __db_rpath(t->path)) == NULL) {
+ t->name = t->path;
+ t->dir = PATH_DOT;
+ } else
+ *t->name++ = '\0';
+
+ if (mode == 0)
+ mode = __db_omode("rwrw--");
+ t->mode = mode;
+ }
+
+ if (name == NULL && t->page_ext != 0) {
+ __db_err(dbenv,
+ "Extent size may not be specified for in-memory queue database");
+ return (EINVAL);
+ }
+
+ t->re_pad = qmeta->re_pad;
+ t->re_len = qmeta->re_len;
+ t->rec_page = qmeta->rec_page;
+
+ t->q_meta = base_pgno;
+ t->q_root = base_pgno + 1;
+
+err: if (qmeta != NULL && (t_ret = mpf->put(mpf, qmeta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Don't hold the meta page long term. */
+ (void)__LPUT(dbc, metalock);
+
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __qam_metachk --
+ *
+ * PUBLIC: int __qam_metachk __P((DB *, const char *, QMETA *));
+ */
+int
+__qam_metachk(dbp, name, qmeta)
+ DB *dbp;
+ const char *name;
+ QMETA *qmeta;
+{
+ DB_ENV *dbenv;
+ u_int32_t vers;
+ int ret;
+
+ dbenv = dbp->dbenv;
+ ret = 0;
+
+ /*
+ * At this point, all we know is that the magic number is for a Queue.
+ * Check the version, the database may be out of date.
+ */
+ vers = qmeta->dbmeta.version;
+ if (F_ISSET(dbp, DB_AM_SWAP))
+ M_32_SWAP(vers);
+ switch (vers) {
+ case 1:
+ case 2:
+ __db_err(dbenv,
+ "%s: queue version %lu requires a version upgrade",
+ name, (u_long)vers);
+ return (DB_OLD_VERSION);
+ case 3:
+ case 4:
+ break;
+ default:
+ __db_err(dbenv,
+ "%s: unsupported qam version: %lu", name, (u_long)vers);
+ return (EINVAL);
+ }
+
+ /* Swap the page if we need to. */
+ if (F_ISSET(dbp, DB_AM_SWAP) && (ret = __qam_mswap((PAGE *)qmeta)) != 0)
+ return (ret);
+
+ /* Check the type. */
+ if (dbp->type != DB_QUEUE && dbp->type != DB_UNKNOWN)
+ return (EINVAL);
+ dbp->type = DB_QUEUE;
+ DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE);
+
+ /* Set the page size. */
+ dbp->pgsize = qmeta->dbmeta.pagesize;
+
+ /* Copy the file's ID. */
+ memcpy(dbp->fileid, qmeta->dbmeta.uid, DB_FILE_ID_LEN);
+
+ /* Set up AM-specific methods that do not require an open. */
+ dbp->db_am_rename = __qam_rename;
+ dbp->db_am_remove = __qam_remove;
+
+ return (ret);
+}
+
+/*
+ * __qam_init_meta --
+ * Initialize the meta-data for a Queue database.
+ */
+static int
+__qam_init_meta(dbp, meta)
+ DB *dbp;
+ QMETA *meta;
+{
+ QUEUE *t;
+
+ t = dbp->q_internal;
+
+ memset(meta, 0, sizeof(QMETA));
+ LSN_NOT_LOGGED(meta->dbmeta.lsn);
+ meta->dbmeta.pgno = PGNO_BASE_MD;
+ meta->dbmeta.last_pgno = 0;
+ meta->dbmeta.magic = DB_QAMMAGIC;
+ meta->dbmeta.version = DB_QAMVERSION;
+ meta->dbmeta.pagesize = dbp->pgsize;
+ if (F_ISSET(dbp, DB_AM_CHKSUM))
+ FLD_SET(meta->dbmeta.metaflags, DBMETA_CHKSUM);
+ if (F_ISSET(dbp, DB_AM_ENCRYPT)) {
+ meta->dbmeta.encrypt_alg =
+ ((DB_CIPHER *)dbp->dbenv->crypto_handle)->alg;
+ DB_ASSERT(meta->dbmeta.encrypt_alg != 0);
+ meta->crypto_magic = meta->dbmeta.magic;
+ }
+ meta->dbmeta.type = P_QAMMETA;
+ meta->re_pad = t->re_pad;
+ meta->re_len = t->re_len;
+ meta->rec_page = CALC_QAM_RECNO_PER_PAGE(dbp);
+ meta->cur_recno = 1;
+ meta->first_recno = 1;
+ meta->page_ext = t->page_ext;
+ t->rec_page = meta->rec_page;
+ memcpy(meta->dbmeta.uid, dbp->fileid, DB_FILE_ID_LEN);
+
+ /* Verify that we can fit at least one record per page. */
+ if (QAM_RECNO_PER_PAGE(dbp) < 1) {
+ __db_err(dbp->dbenv,
+ "Record size of %lu too large for page size of %lu",
+ (u_long)t->re_len, (u_long)dbp->pgsize);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/*
+ * __qam_new_file --
+ * Create the necessary pages to begin a new queue database file.
+ *
+ * This code appears more complex than it is because of the two cases (named
+ * and unnamed). The way to read the code is that for each page being created,
+ * there are three parts: 1) a "get page" chunk (which either uses malloc'd
+ * memory or calls mpf->get), 2) the initialization, and 3) the "put page"
+ * chunk which either does a fop write or an mpf->put.
+ *
+ * PUBLIC: int __qam_new_file __P((DB *, DB_TXN *, DB_FH *, const char *));
+ */
+int
+__qam_new_file(dbp, txn, fhp, name)
+ DB *dbp;
+ DB_TXN *txn;
+ DB_FH *fhp;
+ const char *name;
+{
+ QMETA *meta;
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ DB_PGINFO pginfo;
+ DBT pdbt;
+ db_pgno_t pgno;
+ int ret;
+ void *buf;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ buf = NULL;
+ meta = NULL;
+
+ /* Build meta-data page. */
+
+ if (name == NULL) {
+ pgno = PGNO_BASE_MD;
+ ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &meta);
+ } else {
+ ret = __os_calloc(dbp->dbenv, 1, dbp->pgsize, &buf);
+ meta = (QMETA *)buf;
+ }
+ if (ret != 0)
+ return (ret);
+
+ if ((ret = __qam_init_meta(dbp, meta)) != 0)
+ goto err;
+
+ if (name == NULL)
+ ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY);
+ else {
+ pginfo.db_pagesize = dbp->pgsize;
+ pginfo.flags =
+ F_ISSET(dbp, (DB_AM_CHKSUM | DB_AM_ENCRYPT | DB_AM_SWAP));
+ pginfo.type = DB_QUEUE;
+ pdbt.data = &pginfo;
+ pdbt.size = sizeof(pginfo);
+ if ((ret = __db_pgout(dbenv, PGNO_BASE_MD, meta, &pdbt)) != 0)
+ goto err;
+ ret = __fop_write(dbenv,
+ txn, name, DB_APP_DATA, fhp, 0, buf, dbp->pgsize, 1);
+ }
+ if (ret != 0)
+ goto err;
+ meta = NULL;
+
+err: if (name != NULL)
+ __os_free(dbenv, buf);
+ else if (meta != NULL)
+ (void)mpf->put(mpf, meta, 0);
+ return (ret);
+}
diff --git a/libdb/qam/qam_rec.c b/libdb/qam/qam_rec.c
new file mode 100644
index 0000000..439ecf0
--- /dev/null
+++ b/libdb/qam/qam_rec.c
@@ -0,0 +1,568 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_am.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+
+/*
+ * __qam_incfirst_recover --
+ * Recovery function for incfirst.
+ *
+ * PUBLIC: int __qam_incfirst_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_incfirst_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_incfirst_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ QMETA *meta;
+ QUEUE_CURSOR *cp;
+ db_pgno_t metapg;
+ int exact, modified, ret, rec_ext;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__qam_incfirst_print);
+ REC_INTRO(__qam_incfirst_read, 1);
+
+ metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
+
+ if ((ret = __db_lget(dbc,
+ LCK_ROLLBACK, metapg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto done;
+ if ((ret = mpf->get(mpf, &metapg, 0, &meta)) != 0) {
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &metapg, DB_MPOOL_CREATE, &meta)) != 0) {
+ (void)__LPUT(dbc, lock);
+ goto out;
+ }
+ meta->dbmeta.pgno = metapg;
+ meta->dbmeta.type = P_QAMMETA;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ (void)__LPUT(dbc, lock);
+ goto out;
+ }
+ }
+
+ modified = 0;
+
+ /*
+ * Only move first_recno backwards so we pick up the aborted delete.
+ * When going forward we need to be careful since
+ * we may have bumped over a locked record.
+ */
+ if (DB_UNDO(op)) {
+ if (QAM_BEFORE_FIRST(meta, argp->recno)) {
+ meta->first_recno = argp->recno;
+ modified = 1;
+ }
+ } else {
+ if (log_compare(&LSN(meta), lsnp) < 0) {
+ LSN(meta) = *lsnp;
+ modified = 1;
+ }
+ rec_ext = 0;
+ if (meta->page_ext != 0)
+ rec_ext = meta->page_ext * meta->rec_page;
+ cp = (QUEUE_CURSOR *)dbc->internal;
+ if (meta->first_recno == RECNO_OOB)
+ meta->first_recno++;
+ while (meta->first_recno != meta->cur_recno &&
+ !QAM_BEFORE_FIRST(meta, argp->recno + 1)) {
+ if ((ret = __qam_position(dbc,
+ &meta->first_recno, QAM_READ, &exact)) != 0)
+ goto err;
+ if (cp->page != NULL)
+ __qam_fput(file_dbp, cp->pgno, cp->page, 0);
+
+ if (exact == 1)
+ break;
+ if (cp->page != NULL &&
+ rec_ext != 0 && meta->first_recno % rec_ext == 0)
+ if ((ret =
+ __qam_fremove(file_dbp, cp->pgno)) != 0)
+ goto err;
+ meta->first_recno++;
+ if (meta->first_recno == RECNO_OOB)
+ meta->first_recno++;
+ modified = 1;
+ }
+ }
+
+ if ((ret = mpf->put(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto err1;
+
+ (void)__LPUT(dbc, lock);
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+ if (0) {
+err: (void)mpf->put(mpf, meta, 0);
+err1: (void)__LPUT(dbc, lock);
+ }
+
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_mvptr_recover --
+ * Recovery function for mvptr.
+ *
+ * PUBLIC: int __qam_mvptr_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_mvptr_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_mvptr_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ QMETA *meta;
+ db_pgno_t metapg;
+ int cmp_n, cmp_p, modified, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__qam_mvptr_print);
+ REC_INTRO(__qam_mvptr_read, 1);
+
+ metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
+
+ if ((ret = __db_lget(dbc,
+ LCK_ROLLBACK, metapg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto done;
+ if ((ret = mpf->get(mpf, &metapg, 0, &meta)) != 0) {
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &metapg, DB_MPOOL_CREATE, &meta)) != 0) {
+ (void)__LPUT(dbc, lock);
+ goto out;
+ }
+ meta->dbmeta.pgno = metapg;
+ meta->dbmeta.type = P_QAMMETA;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ (void)__LPUT(dbc, lock);
+ goto out;
+ }
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(meta));
+ cmp_p = log_compare(&LSN(meta), &argp->metalsn);
+
+ /*
+ * Under normal circumstances, we never undo a movement of one of
+ * the pointers. Just move them along regardless of abort/commit.
+ *
+ * If we're undoing a truncate, we need to reset the pointers to
+ * their state before the truncate.
+ */
+ if (DB_UNDO(op) && (argp->opcode & QAM_TRUNCATE)) {
+ if (cmp_n == 0) {
+ meta->first_recno = argp->old_first;
+ meta->cur_recno = argp->old_cur;
+ modified = 1;
+ meta->dbmeta.lsn = argp->metalsn;
+ }
+ } else if (cmp_p == 0) {
+ if (argp->opcode & QAM_SETFIRST)
+ meta->first_recno = argp->new_first;
+
+ if (argp->opcode & QAM_SETCUR)
+ meta->cur_recno = argp->new_cur;
+
+ modified = 1;
+ meta->dbmeta.lsn = *lsnp;
+ }
+
+ if ((ret = mpf->put(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+ (void)__LPUT(dbc, lock);
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_del_recover --
+ * Recovery function for del.
+ * Non-extent version or if there is no data (zero len).
+ *
+ * PUBLIC: int __qam_del_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_del_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_del_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ QAMDATA *qp;
+ QMETA *meta;
+ QPAGE *pagep;
+ db_pgno_t metapg;
+ int cmp_n, modified, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__qam_del_print);
+ REC_INTRO(__qam_del_read, 1);
+
+ if ((ret = __qam_fget(file_dbp,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+
+ modified = 0;
+ if (pagep->pgno == PGNO_INVALID) {
+ pagep->pgno = argp->pgno;
+ pagep->type = P_QAMDATA;
+ modified = 1;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ if (DB_UNDO(op)) {
+ /* make sure first is behind us */
+ metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
+ if ((ret = __db_lget(dbc,
+ LCK_ROLLBACK, metapg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &metapg, 0, &meta)) != 0) {
+ (void)__LPUT(dbc, lock);
+ goto err;
+ }
+ if (meta->first_recno == RECNO_OOB ||
+ (QAM_BEFORE_FIRST(meta, argp->recno) &&
+ (meta->first_recno <= meta->cur_recno ||
+ meta->first_recno -
+ argp->recno < argp->recno - meta->cur_recno))) {
+ meta->first_recno = argp->recno;
+ (void)mpf->put(mpf, meta, DB_MPOOL_DIRTY);
+ } else
+ (void)mpf->put(mpf, meta, 0);
+ (void)__LPUT(dbc, lock);
+
+ /* Need to undo delete - mark the record as present */
+ qp = QAM_GET_RECORD(file_dbp, pagep, argp->indx);
+ F_SET(qp, QAM_VALID);
+
+ /*
+ * Move the LSN back to this point; do not move it forward.
+ * Only move it back if we're in recovery. If we're in
+ * an abort, because we don't hold a page lock, we could
+ * foul up a concurrent put. Having too late an LSN
+ * is harmless in queue except when we're determining
+ * what we need to roll forward during recovery. [#2588]
+ */
+ if (op == DB_TXN_BACKWARD_ROLL && cmp_n <= 0)
+ LSN(pagep) = argp->lsn;
+ modified = 1;
+ } else if (cmp_n > 0 && DB_REDO(op)) {
+ /* Need to redo delete - clear the valid bit */
+ qp = QAM_GET_RECORD(file_dbp, pagep, argp->indx);
+ F_CLR(qp, QAM_VALID);
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ }
+ if ((ret = __qam_fput(file_dbp,
+ argp->pgno, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+ if (0) {
+err: (void)__qam_fput(file_dbp, argp->pgno, pagep, 0);
+ }
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_delext_recover --
+ * Recovery function for del in an extent based queue.
+ *
+ * PUBLIC: int __qam_delext_recover __P((DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_delext_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_delext_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ QAMDATA *qp;
+ QMETA *meta;
+ QPAGE *pagep;
+ db_pgno_t metapg;
+ int cmp_n, modified, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__qam_delext_print);
+ REC_INTRO(__qam_delext_read, 1);
+
+ if ((ret = __qam_fget(file_dbp, &argp->pgno, 0, &pagep)) != 0) {
+ if (ret != DB_PAGE_NOTFOUND && ret != ENOENT)
+ goto out;
+ /*
+ * If we are redoing a delete and the page is not there
+ * we are done.
+ */
+ if (DB_REDO(op))
+ goto done;
+ if ((ret = __qam_fget(file_dbp,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ modified = 0;
+ if (pagep->pgno == PGNO_INVALID) {
+ pagep->pgno = argp->pgno;
+ pagep->type = P_QAMDATA;
+ modified = 1;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ if (DB_UNDO(op)) {
+ /* make sure first is behind us */
+ metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
+ if ((ret = __db_lget(dbc,
+ LCK_ROLLBACK, metapg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &metapg, 0, &meta)) != 0) {
+ (void)__LPUT(dbc, lock);
+ goto err;
+ }
+ if (meta->first_recno == RECNO_OOB ||
+ (QAM_BEFORE_FIRST(meta, argp->recno) &&
+ (meta->first_recno <= meta->cur_recno ||
+ meta->first_recno -
+ argp->recno < argp->recno - meta->cur_recno))) {
+ meta->first_recno = argp->recno;
+ (void)mpf->put(mpf, meta, DB_MPOOL_DIRTY);
+ } else
+ (void)mpf->put(mpf, meta, 0);
+ (void)__LPUT(dbc, lock);
+
+ if ((ret = __qam_pitem(dbc, pagep,
+ argp->indx, argp->recno, &argp->data)) != 0)
+ goto err;
+
+ /*
+ * Move the LSN back to this point; do not move it forward.
+ * Only move it back if we're in recovery. If we're in
+ * an abort, because we don't hold a page lock, we could
+ * foul up a concurrent put. Having too late an LSN
+ * is harmless in queue except when we're determining
+ * what we need to roll forward during recovery. [#2588]
+ */
+ if (op == DB_TXN_BACKWARD_ROLL && cmp_n <= 0)
+ LSN(pagep) = argp->lsn;
+ modified = 1;
+ } else if (cmp_n > 0 && DB_REDO(op)) {
+ /* Need to redo delete - clear the valid bit */
+ qp = QAM_GET_RECORD(file_dbp, pagep, argp->indx);
+ F_CLR(qp, QAM_VALID);
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ }
+ if ((ret = __qam_fput(file_dbp,
+ argp->pgno, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+ if (0) {
+err: (void)__qam_fput(file_dbp, argp->pgno, pagep, 0);
+ }
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_add_recover --
+ * Recovery function for add.
+ *
+ * PUBLIC: int __qam_add_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_add_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_add_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ QAMDATA *qp;
+ QMETA *meta;
+ QPAGE *pagep;
+ db_pgno_t metapg;
+ int cmp_n, meta_dirty, modified, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__qam_add_print);
+ REC_INTRO(__qam_add_read, 1);
+
+ modified = 0;
+ if ((ret = __qam_fget(file_dbp, &argp->pgno, 0, &pagep)) != 0) {
+ if (ret != DB_PAGE_NOTFOUND && ret != ENOENT)
+ goto out;
+ /*
+ * If we are undoing an append and the page is not there
+ * we are done.
+ */
+ if (DB_UNDO(op))
+ goto done;
+ if ((ret = __qam_fget(file_dbp,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ if (pagep->pgno == PGNO_INVALID) {
+ pagep->pgno = argp->pgno;
+ pagep->type = P_QAMDATA;
+ modified = 1;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ if (DB_REDO(op)) {
+ /* Fix meta-data page. */
+ metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
+ if ((ret = mpf->get(mpf, &metapg, 0, &meta)) != 0)
+ goto err;
+ meta_dirty = 0;
+ if (QAM_BEFORE_FIRST(meta, argp->recno)) {
+ meta->first_recno = argp->recno;
+ meta_dirty = 1;
+ }
+ if (argp->recno == meta->cur_recno ||
+ QAM_AFTER_CURRENT(meta, argp->recno)) {
+ meta->cur_recno = argp->recno + 1;
+ meta_dirty = 1;
+ }
+ if ((ret =
+ mpf->put(mpf, meta, meta_dirty? DB_MPOOL_DIRTY : 0)) != 0)
+ goto err;
+
+ /* Now update the actual page if necessary. */
+ if (cmp_n > 0) {
+ /* Need to redo add - put the record on page */
+ if ((ret = __qam_pitem(dbc,
+ pagep, argp->indx, argp->recno, &argp->data)) != 0)
+ goto err;
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ /* Make sure pointers include this record. */
+ metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
+ }
+ } else if (DB_UNDO(op)) {
+ /*
+ * Need to undo add
+ * If this was an overwrite, put old record back.
+ * Otherwise just clear the valid bit
+ */
+ if (argp->olddata.size != 0) {
+ if ((ret = __qam_pitem(dbc, pagep,
+ argp->indx, argp->recno, &argp->olddata)) != 0)
+ goto err;
+
+ if (!(argp->vflag & QAM_VALID)) {
+ qp = QAM_GET_RECORD(
+ file_dbp, pagep, argp->indx);
+ F_CLR(qp, QAM_VALID);
+ }
+ modified = 1;
+ } else {
+ qp = QAM_GET_RECORD(file_dbp, pagep, argp->indx);
+ qp->flags = 0;
+ modified = 1;
+ }
+
+ /*
+ * Move the LSN back to this point; do not move it forward.
+ * Only move it back if we're in recovery. If we're in
+ * an abort, because we don't hold a page lock, we could
+ * foul up a concurrent put. Having too late an LSN
+ * is harmless in queue except when we're determining
+ * what we need to roll forward during recovery. [#2588]
+ */
+ if (op == DB_TXN_BACKWARD_ROLL && cmp_n <= 0)
+ LSN(pagep) = argp->lsn;
+ }
+
+ if ((ret = __qam_fput(file_dbp,
+ argp->pgno, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+ if (0) {
+err: (void)__qam_fput(file_dbp, argp->pgno, pagep, 0);
+ }
+
+out: REC_CLOSE;
+}
diff --git a/libdb/qam/qam_stat.c b/libdb/qam/qam_stat.c
new file mode 100644
index 0000000..ad7b1fb
--- /dev/null
+++ b/libdb/qam/qam_stat.c
@@ -0,0 +1,203 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_am.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+
+/*
+ * __qam_stat --
+ * Gather/print the qam statistics
+ *
+ * PUBLIC: int __qam_stat __P((DB *, void *, u_int32_t));
+ */
+int
+__qam_stat(dbp, spp, flags)
+ DB *dbp;
+ void *spp;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ DB_QUEUE_STAT *sp;
+ PAGE *h;
+ QAMDATA *qp, *ep;
+ QMETA *meta;
+ QUEUE *t;
+ db_indx_t indx;
+ db_pgno_t first, last, pgno, pg_ext, stop;
+ u_int32_t re_len;
+ int ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->stat");
+
+ LOCK_INIT(lock);
+ mpf = dbp->mpf;
+ sp = NULL;
+ t = dbp->q_internal;
+
+ /* Check for invalid flags. */
+ if ((ret = __db_statchk(dbp, flags)) != 0)
+ return (ret);
+
+ if (spp == NULL)
+ return (0);
+
+ /* Acquire a cursor. */
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ return (ret);
+
+ DEBUG_LWRITE(dbc, NULL, "qam_stat", NULL, NULL, flags);
+
+ /* Allocate and clear the structure. */
+ if ((ret = __os_umalloc(dbp->dbenv, sizeof(*sp), &sp)) != 0)
+ goto err;
+ memset(sp, 0, sizeof(*sp));
+
+ re_len = ((QUEUE *)dbp->q_internal)->re_len;
+
+ /* Determine the last page of the database. */
+ if ((ret = __db_lget(dbc,
+ 0, t->q_meta, DB_LOCK_READ, 0, &lock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &t->q_meta, 0, (PAGE **)&meta)) != 0)
+ goto err;
+
+ if (flags == DB_FAST_STAT || flags == DB_CACHED_COUNTS) {
+ sp->qs_nkeys = meta->dbmeta.key_count;
+ sp->qs_ndata = meta->dbmeta.record_count;
+ goto meta_only;
+ }
+
+ first = QAM_RECNO_PAGE(dbp, meta->first_recno);
+ last = QAM_RECNO_PAGE(dbp, meta->cur_recno);
+
+ if ((ret = mpf->put(mpf, meta, 0)) != 0)
+ goto err;
+ (void)__LPUT(dbc, lock);
+
+ pgno = first;
+ if (first > last)
+ stop = QAM_RECNO_PAGE(dbp, UINT32_T_MAX);
+ else
+ stop = last;
+
+ /* Dump each page. */
+ pg_ext = ((QUEUE *)dbp->q_internal)->page_ext;
+begin:
+ /* Walk through the pages and count. */
+ for (; pgno <= stop; ++pgno) {
+ if ((ret =
+ __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &lock)) != 0)
+ goto err;
+ ret = __qam_fget(dbp, &pgno, 0, &h);
+ if (ret == ENOENT) {
+ pgno += pg_ext - 1;
+ continue;
+ }
+ if (ret == DB_PAGE_NOTFOUND) {
+ if (pg_ext == 0) {
+ if (pgno != stop && first != last)
+ goto err;
+ ret = 0;
+ break;
+ }
+ pgno += pg_ext - ((pgno - 1) % pg_ext) - 1;
+ continue;
+ }
+ if (ret != 0)
+ goto err;
+
+ ++sp->qs_pages;
+
+ ep = (QAMDATA *)((u_int8_t *)h + dbp->pgsize - re_len);
+ for (indx = 0, qp = QAM_GET_RECORD(dbp, h, indx);
+ qp <= ep;
+ ++indx, qp = QAM_GET_RECORD(dbp, h, indx)) {
+ if (F_ISSET(qp, QAM_VALID))
+ sp->qs_ndata++;
+ else
+ sp->qs_pgfree += re_len;
+ }
+
+ if ((ret = __qam_fput(dbp, pgno, h, 0)) != 0)
+ goto err;
+ (void)__LPUT(dbc, lock);
+ }
+
+ (void)__LPUT(dbc, lock);
+ if (first > last) {
+ pgno = 1;
+ stop = last;
+ first = last;
+ goto begin;
+ }
+
+ /* Get the meta-data page. */
+ if ((ret = __db_lget(dbc,
+ 0, t->q_meta, F_ISSET(dbp, DB_AM_RDONLY) ?
+ DB_LOCK_READ : DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &t->q_meta, 0, (PAGE **)&meta)) != 0)
+ goto err;
+
+ if (!F_ISSET(dbp, DB_AM_RDONLY))
+ meta->dbmeta.key_count =
+ meta->dbmeta.record_count = sp->qs_ndata;
+ sp->qs_nkeys = sp->qs_ndata;
+
+meta_only:
+ /* Get the metadata fields. */
+ sp->qs_magic = meta->dbmeta.magic;
+ sp->qs_version = meta->dbmeta.version;
+ sp->qs_metaflags = meta->dbmeta.flags;
+ sp->qs_pagesize = meta->dbmeta.pagesize;
+ sp->qs_extentsize = meta->page_ext;
+ sp->qs_re_len = meta->re_len;
+ sp->qs_re_pad = meta->re_pad;
+ sp->qs_first_recno = meta->first_recno;
+ sp->qs_cur_recno = meta->cur_recno;
+
+ /* Discard the meta-data page. */
+ if ((ret = mpf->put(mpf,
+ meta, F_ISSET(dbp, DB_AM_RDONLY) ? 0 : DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ (void)__LPUT(dbc, lock);
+
+ *(DB_QUEUE_STAT **)spp = sp;
+ ret = 0;
+
+ if (0) {
+err: if (sp != NULL)
+ __os_ufree(dbp->dbenv, sp);
+ }
+
+ (void)__LPUT(dbc, lock);
+
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
diff --git a/libdb/qam/qam_upgrade.c b/libdb/qam/qam_upgrade.c
new file mode 100644
index 0000000..a30fa67
--- /dev/null
+++ b/libdb/qam/qam_upgrade.c
@@ -0,0 +1,108 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_upgrade.h"
+
+/*
+ * __qam_31_qammeta --
+ * Upgrade the database from version 1 to version 2.
+ *
+ * PUBLIC: int __qam_31_qammeta __P((DB *, char *, u_int8_t *));
+ */
+int
+__qam_31_qammeta(dbp, real_name, buf)
+ DB *dbp;
+ char *real_name;
+ u_int8_t *buf;
+{
+ QMETA31 *newmeta;
+ QMETA30 *oldmeta;
+
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(real_name, NULL);
+
+ newmeta = (QMETA31 *)buf;
+ oldmeta = (QMETA30 *)buf;
+
+ /*
+ * Copy the fields to their new locations.
+ * They may overlap so start at the bottom and use memmove().
+ */
+ newmeta->rec_page = oldmeta->rec_page;
+ newmeta->re_pad = oldmeta->re_pad;
+ newmeta->re_len = oldmeta->re_len;
+ newmeta->cur_recno = oldmeta->cur_recno;
+ newmeta->first_recno = oldmeta->first_recno;
+ newmeta->start = oldmeta->start;
+ memmove(newmeta->dbmeta.uid,
+ oldmeta->dbmeta.uid, sizeof(oldmeta->dbmeta.uid));
+ newmeta->dbmeta.flags = oldmeta->dbmeta.flags;
+ newmeta->dbmeta.record_count = 0;
+ newmeta->dbmeta.key_count = 0;
+ ZERO_LSN(newmeta->dbmeta.unused3);
+
+ /* Update the version. */
+ newmeta->dbmeta.version = 2;
+
+ return (0);
+}
+
+/*
+ * __qam_32_qammeta --
+ * Upgrade the database from version 2 to version 3.
+ *
+ * PUBLIC: int __qam_32_qammeta __P((DB *, char *, u_int8_t *));
+ */
+int
+__qam_32_qammeta(dbp, real_name, buf)
+ DB *dbp;
+ char *real_name;
+ u_int8_t *buf;
+{
+ QMETA32 *newmeta;
+ QMETA31 *oldmeta;
+
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(real_name, NULL);
+
+ newmeta = (QMETA32 *)buf;
+ oldmeta = (QMETA31 *)buf;
+
+ /*
+ * Copy the fields to their new locations.
+ * We are dropping the first field so move
+ * from the top.
+ */
+ newmeta->first_recno = oldmeta->first_recno;
+ newmeta->cur_recno = oldmeta->cur_recno;
+ newmeta->re_len = oldmeta->re_len;
+ newmeta->re_pad = oldmeta->re_pad;
+ newmeta->rec_page = oldmeta->rec_page;
+ newmeta->page_ext = 0;
+ /* cur_recno now points to the first free slot. */
+ newmeta->cur_recno++;
+ if (newmeta->first_recno == 0)
+ newmeta->first_recno = 1;
+
+ /* Update the version. */
+ newmeta->dbmeta.version = 3;
+
+ return (0);
+}
diff --git a/libdb/qam/qam_verify.c b/libdb/qam/qam_verify.c
new file mode 100644
index 0000000..49b36d0
--- /dev/null
+++ b/libdb/qam/qam_verify.c
@@ -0,0 +1,200 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_verify.h"
+#include "dbinc/qam.h"
+#include "dbinc/db_am.h"
+
+/*
+ * __qam_vrfy_meta --
+ * Verify the queue-specific part of a metadata page.
+ *
+ * PUBLIC: int __qam_vrfy_meta __P((DB *, VRFY_DBINFO *, QMETA *,
+ * PUBLIC: db_pgno_t, u_int32_t));
+ */
+int
+__qam_vrfy_meta(dbp, vdp, meta, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ QMETA *meta;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ int isbad, ret, t_ret;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+ isbad = 0;
+
+ /*
+ * Queue can't be used in subdatabases, so if this isn't set
+ * something very odd is going on.
+ */
+ if (!F_ISSET(pip, VRFY_INCOMPLETE))
+ EPRINT((dbp->dbenv,
+ "Page %lu: queue databases must be one-per-file",
+ (u_long)pgno));
+
+ /*
+ * cur_recno/rec_page
+ * Cur_recno may be one beyond the end of the page and
+ * we start numbering from 1.
+ */
+ if (vdp->last_pgno > 0 && meta->cur_recno > 0 &&
+ meta->cur_recno - 1 > meta->rec_page * vdp->last_pgno) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: current recno %lu references record past last page number %lu",
+ (u_long)pgno,
+ (u_long)meta->cur_recno, (u_long)vdp->last_pgno));
+ isbad = 1;
+ }
+
+ /*
+ * re_len: If this is bad, we can't safely verify queue data pages, so
+ * return DB_VERIFY_FATAL
+ */
+ if (ALIGN(meta->re_len + sizeof(QAMDATA) - 1, sizeof(u_int32_t)) *
+ meta->rec_page + QPAGE_SZ(dbp) > dbp->pgsize) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: queue record length %lu too high for page size and recs/page",
+ (u_long)pgno, (u_long)meta->re_len));
+ ret = DB_VERIFY_FATAL;
+ goto err;
+ } else {
+ vdp->re_len = meta->re_len;
+ vdp->rec_page = meta->rec_page;
+ }
+
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret == 0 && isbad == 1 ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __qam_vrfy_data --
+ * Verify a queue data page.
+ *
+ * PUBLIC: int __qam_vrfy_data __P((DB *, VRFY_DBINFO *, QPAGE *,
+ * PUBLIC: db_pgno_t, u_int32_t));
+ */
+int
+__qam_vrfy_data(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ QPAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ DB fakedb;
+ struct __queue fakeq;
+ QAMDATA *qp;
+ db_recno_t i;
+ u_int8_t qflags;
+
+ /*
+ * Not much to do here, except make sure that flags are reasonable.
+ *
+ * QAM_GET_RECORD assumes a properly initialized q_internal
+ * structure, however, and we don't have one, so we play
+ * some gross games to fake it out.
+ */
+ fakedb.q_internal = &fakeq;
+ fakedb.flags = dbp->flags;
+ fakeq.re_len = vdp->re_len;
+
+ for (i = 0; i < vdp->rec_page; i++) {
+ qp = QAM_GET_RECORD(&fakedb, h, i);
+ if ((u_int8_t *)qp >= (u_int8_t *)h + dbp->pgsize) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: queue record %lu extends past end of page",
+ (u_long)pgno, (u_long)i));
+ return (DB_VERIFY_BAD);
+ }
+
+ qflags = qp->flags;
+ qflags &= !(QAM_VALID | QAM_SET);
+ if (qflags != 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: queue record %lu has bad flags",
+ (u_long)pgno, (u_long)i));
+ return (DB_VERIFY_BAD);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * __qam_vrfy_structure --
+ * Verify a queue database structure, such as it is.
+ *
+ * PUBLIC: int __qam_vrfy_structure __P((DB *, VRFY_DBINFO *, u_int32_t));
+ */
+int
+__qam_vrfy_structure(dbp, vdp, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ db_pgno_t i;
+ int ret, isbad;
+
+ isbad = 0;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, PGNO_BASE_MD, &pip)) != 0)
+ return (ret);
+
+ if (pip->type != P_QAMMETA) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: queue database has no meta page",
+ (u_long)PGNO_BASE_MD));
+ isbad = 1;
+ goto err;
+ }
+
+ if ((ret = __db_vrfy_pgset_inc(vdp->pgset, 0)) != 0)
+ goto err;
+
+ for (i = 1; i <= vdp->last_pgno; i++) {
+ /* Send feedback to the application about our progress. */
+ if (!LF_ISSET(DB_SALVAGE))
+ __db_vrfy_struct_feedback(dbp, vdp);
+
+ if ((ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 ||
+ (ret = __db_vrfy_getpageinfo(vdp, i, &pip)) != 0)
+ return (ret);
+ if (!F_ISSET(pip, VRFY_IS_ALLZEROES) &&
+ pip->type != P_QAMDATA) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: queue database page of incorrect type %lu",
+ (u_long)i, (u_long)pip->type));
+ isbad = 1;
+ goto err;
+ } else if ((ret = __db_vrfy_pgset_inc(vdp->pgset, i)) != 0)
+ goto err;
+ }
+
+err: if ((ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0)
+ return (ret);
+ return (isbad == 1 ? DB_VERIFY_BAD : 0);
+}
diff --git a/libdb/rep/rep_method.c b/libdb/rep/rep_method.c
new file mode 100644
index 0000000..6857c89
--- /dev/null
+++ b/libdb/rep/rep_method.c
@@ -0,0 +1,1144 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+#ifdef HAVE_RPC
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+#endif
+
+static int __rep_abort_prepared __P((DB_ENV *));
+static int __rep_bt_cmp __P((DB *, const DBT *, const DBT *));
+static int __rep_client_dbinit __P((DB_ENV *, int));
+static int __rep_elect __P((DB_ENV *, int, int, u_int32_t, int *));
+static int __rep_elect_init __P((DB_ENV *, DB_LSN *, int, int, int, int *));
+static int __rep_flush __P((DB_ENV *));
+static int __rep_restore_prepared __P((DB_ENV *));
+static int __rep_set_limit __P((DB_ENV *, u_int32_t, u_int32_t));
+static int __rep_set_request __P((DB_ENV *, u_int32_t, u_int32_t));
+static int __rep_set_rep_transport __P((DB_ENV *, int,
+ int (*)(DB_ENV *, const DBT *, const DBT *, int, u_int32_t)));
+static int __rep_start __P((DB_ENV *, DBT *, u_int32_t));
+static int __rep_stat __P((DB_ENV *, DB_REP_STAT **, u_int32_t));
+static int __rep_wait __P((DB_ENV *, u_int32_t, int *, u_int32_t));
+
+/*
+ * __rep_dbenv_create --
+ * Replication-specific initialization of the DB_ENV structure.
+ *
+ * PUBLIC: int __rep_dbenv_create __P((DB_ENV *));
+ */
+int
+__rep_dbenv_create(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_REP *db_rep;
+ int ret;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ COMPQUIET(db_rep, NULL);
+ COMPQUIET(ret, 0);
+ dbenv->rep_elect = __dbcl_rep_elect;
+ dbenv->rep_flush = __dbcl_rep_flush;
+ dbenv->rep_process_message = __dbcl_rep_process_message;
+ dbenv->rep_start = __dbcl_rep_start;
+ dbenv->rep_stat = __dbcl_rep_stat;
+ dbenv->set_rep_limit = __dbcl_rep_set_limit;
+ dbenv->set_rep_request = __dbcl_rep_set_request;
+ dbenv->set_rep_transport = __dbcl_rep_set_rep_transport;
+
+ } else
+#endif
+ {
+ dbenv->rep_elect = __rep_elect;
+ dbenv->rep_flush = __rep_flush;
+ dbenv->rep_process_message = __rep_process_message;
+ dbenv->rep_start = __rep_start;
+ dbenv->rep_stat = __rep_stat;
+ dbenv->set_rep_limit = __rep_set_limit;
+ dbenv->set_rep_request = __rep_set_request;
+ dbenv->set_rep_transport = __rep_set_rep_transport;
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ */
+
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_REP), &db_rep)) != 0)
+ return (ret);
+ dbenv->rep_handle = db_rep;
+
+ /* Initialize the per-process replication structure. */
+ db_rep->rep_send = NULL;
+ }
+
+ return (0);
+}
+
+/*
+ * __rep_start --
+ * Become a master or client, and start sending messages to participate
+ * in the replication environment. Must be called after the environment
+ * is open.
+ */
+static int
+__rep_start(dbenv, dbt, flags)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ u_int32_t flags;
+{
+ DB_LOG *dblp;
+ DB_LSN lsn;
+ DB_REP *db_rep;
+ REP *rep;
+ int announce, init_db, redo_prepared, ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_ILLEGAL_BEFORE_OPEN(dbenv, "rep_start");
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "rep_stat", DB_INIT_TXN);
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+
+ if ((ret = __db_fchk(dbenv, "DB_ENV->rep_start", flags,
+ DB_REP_CLIENT | DB_REP_LOGSONLY | DB_REP_MASTER)) != 0)
+ return (ret);
+
+ /* Exactly one of CLIENT and MASTER must be specified. */
+ if ((ret = __db_fcchk(dbenv,
+ "DB_ENV->rep_start", flags, DB_REP_CLIENT, DB_REP_MASTER)) != 0)
+ return (ret);
+ if (!LF_ISSET(DB_REP_CLIENT | DB_REP_MASTER | DB_REP_LOGSONLY)) {
+ __db_err(dbenv,
+ "DB_ENV->rep_start: replication mode must be specified");
+ return (EINVAL);
+ }
+
+ /* Masters can't be logs-only. */
+ if ((ret = __db_fcchk(dbenv,
+ "DB_ENV->rep_start", flags, DB_REP_LOGSONLY, DB_REP_MASTER)) != 0)
+ return (ret);
+
+ /* We need a transport function. */
+ if (db_rep->rep_send == NULL) {
+ __db_err(dbenv,
+ "DB_ENV->set_rep_transport must be called before DB_ENV->rep_start");
+ return (EINVAL);
+ }
+
+ /* We'd better not have any logged files open if we are a client. */
+ if (LF_ISSET(DB_REP_CLIENT) && (ret = __dbreg_nofiles(dbenv)) != 0) {
+ __db_err(dbenv, "DB_ENV->rep_start called with open files");
+ return (ret);
+ }
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ if (rep->eid == DB_EID_INVALID)
+ rep->eid = dbenv->rep_eid;
+
+ if (LF_ISSET(DB_REP_MASTER)) {
+ if (F_ISSET(dbenv, DB_ENV_REP_CLIENT)) {
+ /*
+ * If we're upgrading from having been a client,
+ * preclose, so that we close our temporary database.
+ *
+ * Do not close files that we may have opened while
+ * doing a rep_apply; they'll get closed when we
+ * finally close the environment, but for now, leave
+ * them open, as we don't want to recycle their
+ * fileids, and we may need the handles again if
+ * we become a client and the original master
+ * that opened them becomes a master again.
+ */
+ if ((ret = __rep_preclose(dbenv, 0)) != 0)
+ return (ret);
+
+ /*
+ * Now write a __txn_recycle record so that
+ * clients don't get confused with our txnids
+ * and txnids of previous masters.
+ */
+ F_CLR(dbenv, DB_ENV_REP_CLIENT);
+ if ((ret = __txn_reset(dbenv)) != 0)
+ return (ret);
+ }
+
+ redo_prepared = 0;
+ if (!F_ISSET(rep, REP_F_MASTER)) {
+ /* Master is not yet set. */
+ if (F_ISSET(rep, REP_ISCLIENT)) {
+ F_CLR(rep, REP_ISCLIENT);
+ rep->gen = ++rep->w_gen;
+ redo_prepared = 1;
+ } else if (rep->gen == 0)
+ rep->gen = 1;
+ }
+
+ F_SET(rep, REP_F_MASTER);
+ F_SET(dbenv, DB_ENV_REP_MASTER);
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ dblp = (DB_LOG *)dbenv->lg_handle;
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = ((LOG *)dblp->reginfo.primary)->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ /*
+ * Send the NEWMASTER message, then restore prepared txns
+ * if and only if we just upgraded from being a client.
+ */
+ if ((ret = __rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_NEWMASTER, &lsn, NULL, 0)) == 0 &&
+ redo_prepared)
+ ret = __rep_restore_prepared(dbenv);
+ } else {
+ F_CLR(dbenv, DB_ENV_REP_MASTER);
+ F_SET(dbenv, DB_ENV_REP_CLIENT);
+ if (LF_ISSET(DB_REP_LOGSONLY))
+ F_SET(dbenv, DB_ENV_REP_LOGSONLY);
+
+ announce = !F_ISSET(rep, REP_ISCLIENT) ||
+ rep->master_id == DB_EID_INVALID;
+ init_db = 0;
+ if (!F_ISSET(rep, REP_ISCLIENT)) {
+ F_CLR(rep, REP_F_MASTER);
+ if (LF_ISSET(DB_REP_LOGSONLY))
+ F_SET(rep, REP_F_LOGSONLY);
+ else
+ F_SET(rep, REP_F_UPGRADE);
+
+ /*
+ * We initialize the client's generation number to 0.
+ * Upon startup, it looks for a master and updates the
+ * generation number as necessary, exactly as it does
+ * during normal operation and a master failure.
+ */
+ rep->gen = 0;
+ rep->master_id = DB_EID_INVALID;
+ init_db = 1;
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ /*
+ * Abort any prepared transactions that were restored
+ * by recovery. We won't be able to create any txns of
+ * our own until they're resolved, but we can't resolve
+ * them ourselves; the master has to. If any get
+ * resolved as commits, we'll redo them when commit
+ * records come in. Aborts will simply be ignored.
+ */
+ if ((ret = __rep_abort_prepared(dbenv)) != 0)
+ return (ret);
+
+ if ((ret = __rep_client_dbinit(dbenv, init_db)) != 0)
+ return (ret);
+
+ /*
+ * If this client created a newly replicated environment,
+ * then announce the existence of this client. The master
+ * should respond with a message that will tell this client
+ * the current generation number and the current LSN. This
+ * will allow the client to either perform recovery or
+ * simply join in.
+ */
+ if (announce)
+ ret = __rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_NEWCLIENT, NULL, dbt, 0);
+ }
+ return (ret);
+}
+
+/*
+ * __rep_client_dbinit --
+ *
+ * Initialize the LSN database on the client side. This is called from the
+ * client initialization code. The startup flag value indicates if
+ * this is the first thread/process starting up and therefore should create
+ * the LSN database. This routine must be called once by each process acting
+ * as a client.
+ */
+static int
+__rep_client_dbinit(dbenv, startup)
+ DB_ENV *dbenv;
+ int startup;
+{
+ DB_REP *db_rep;
+ DB *dbp;
+ int ret, t_ret;
+ u_int32_t flags;
+
+ PANIC_CHECK(dbenv);
+ db_rep = dbenv->rep_handle;
+ dbp = NULL;
+
+#define REPDBNAME "__db.rep.db"
+
+ /* Check if this has already been called on this environment. */
+ if (db_rep->rep_db != NULL)
+ return (0);
+
+ MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+
+ if (startup) {
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ goto err;
+ /*
+ * Ignore errors, because if the file doesn't exist, this
+ * is perfectly OK.
+ */
+ (void)dbp->remove(dbp, REPDBNAME, NULL, 0);
+ }
+
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ goto err;
+ if ((ret = dbp->set_bt_compare(dbp, __rep_bt_cmp)) != 0)
+ goto err;
+
+ /* Allow writes to this database on a client. */
+ F_SET(dbp, DB_AM_CL_WRITER);
+
+ flags = (F_ISSET(dbenv, DB_ENV_THREAD) ? DB_THREAD : 0) |
+ (startup ? DB_CREATE : 0);
+ if ((ret = dbp->open(dbp, NULL,
+ "__db.rep.db", NULL, DB_BTREE, flags, 0)) != 0)
+ goto err;
+
+ db_rep->rep_db = dbp;
+
+ if (0) {
+err: if (dbp != NULL &&
+ (t_ret = dbp->close(dbp, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+ db_rep->rep_db = NULL;
+ }
+
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+
+ return (ret);
+}
+
+/*
+ * __rep_bt_cmp --
+ *
+ * Comparison function for the LSN table. We use the entire control
+ * structure as a key (for simplicity, so we don't have to merge the
+ * other fields in the control with the data field), but really only
+ * care about the LSNs.
+ */
+static int
+__rep_bt_cmp(dbp, dbt1, dbt2)
+ DB *dbp;
+ const DBT *dbt1, *dbt2;
+{
+ DB_LSN lsn1, lsn2;
+ REP_CONTROL *rp1, *rp2;
+
+ COMPQUIET(dbp, NULL);
+
+ rp1 = dbt1->data;
+ rp2 = dbt2->data;
+
+ __ua_memcpy(&lsn1, &rp1->lsn, sizeof(DB_LSN));
+ __ua_memcpy(&lsn2, &rp2->lsn, sizeof(DB_LSN));
+
+ if (lsn1.file > lsn2.file)
+ return (1);
+
+ if (lsn1.file < lsn2.file)
+ return (-1);
+
+ if (lsn1.offset > lsn2.offset)
+ return (1);
+
+ if (lsn1.offset < lsn2.offset)
+ return (-1);
+
+ return (0);
+}
+
+/*
+ * __rep_abort_prepared --
+ * Abort any prepared transactions that recovery restored.
+ *
+ * This is used by clients that have just run recovery, since
+ * they cannot/should not call txn_recover and handle prepared transactions
+ * themselves.
+ */
+static int
+__rep_abort_prepared(dbenv)
+ DB_ENV *dbenv;
+{
+#define PREPLISTSIZE 50
+ DB_PREPLIST prep[PREPLISTSIZE], *p;
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+ int do_aborts, ret;
+ long count, i;
+ u_int32_t op;
+
+ mgr = dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+
+ do_aborts = 0;
+ R_LOCK(dbenv, &mgr->reginfo);
+ if (region->stat.st_nrestores != 0)
+ do_aborts = 1;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ if (do_aborts) {
+ op = DB_FIRST;
+ do {
+ if ((ret = dbenv->txn_recover(dbenv,
+ prep, PREPLISTSIZE, &count, op)) != 0)
+ return (ret);
+ for (i = 0; i < count; i++) {
+ p = &prep[i];
+ if ((ret = p->txn->abort(p->txn)) != 0)
+ return (ret);
+ }
+ op = DB_NEXT;
+ } while (count == PREPLISTSIZE);
+ }
+
+ return (0);
+}
+
+/*
+ * __rep_restore_prepared --
+ * Restore to a prepared state any prepared but not yet committed
+ * transactions.
+ *
+ * This performs, in effect, a "mini-recovery"; it is called from
+ * __rep_start by newly upgraded masters. There may be transactions that an
+ * old master prepared but did not resolve, which we need to restore to an
+ * active state.
+ */
+static int
+__rep_restore_prepared(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOGC *logc;
+ DB_LSN ckp_lsn, lsn;
+ DBT rec;
+ __txn_ckp_args *ckp_args;
+ __txn_regop_args *regop_args;
+ __txn_xa_regop_args *prep_args;
+ int ret, t_ret;
+ u_int32_t hi_txn, low_txn, rectype;
+ void *txninfo;
+
+ txninfo = NULL;
+ ckp_args = NULL;
+ prep_args = NULL;
+ regop_args = NULL;
+ ZERO_LSN(ckp_lsn);
+ ZERO_LSN(lsn);
+
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+
+ /*
+ * We need to consider the set of records between the most recent
+ * checkpoint LSN and the end of the log; any txn in that
+ * range, and only txns in that range, could still have been
+ * active, and thus prepared but not yet committed (PBNYC),
+ * when the old master died.
+ *
+ * Find the most recent checkpoint LSN, and get the record there.
+ * If there is no checkpoint in the log, start off by getting
+ * the very first record in the log instead.
+ */
+ memset(&rec, 0, sizeof(DBT));
+ if ((ret = __txn_getckp(dbenv, &lsn)) == 0) {
+ if ((ret = logc->get(logc, &lsn, &rec, DB_SET)) != 0) {
+ __db_err(dbenv,
+ "Checkpoint record at LSN [%lu][%lu] not found",
+ (u_long)lsn.file, (u_long)lsn.offset);
+ goto err;
+ }
+
+ if ((ret = __txn_ckp_read(dbenv, rec.data, &ckp_args)) != 0) {
+ __db_err(dbenv,
+ "Invalid checkpoint record at [%lu][%lu]",
+ (u_long)lsn.file, (u_long)lsn.offset);
+ goto err;
+ }
+
+ ckp_lsn = ckp_args->ckp_lsn;
+ __os_free(dbenv, ckp_args);
+
+ if ((ret = logc->get(logc, &ckp_lsn, &rec, DB_SET)) != 0) {
+ __db_err(dbenv,
+ "Checkpoint LSN record [%lu][%lu] not found",
+ (u_long)ckp_lsn.file, (u_long)ckp_lsn.offset);
+ goto err;
+ }
+ } else if ((ret = logc->get(logc, &lsn, &rec, DB_FIRST)) != 0) {
+ if (ret == DB_NOTFOUND) {
+ /* An empty log means no PBNYC txns. */
+ ret = 0;
+ goto done;
+ }
+ __db_err(dbenv, "Attempt to get first log record failed");
+ goto err;
+ }
+
+ /*
+ * We use the same txnlist infrastructure that recovery does;
+ * it demands an estimate of the high and low txnids for
+ * initialization.
+ *
+ * First, the low txnid.
+ */
+ do {
+ /* txnid is after rectype, which is a u_int32. */
+ memcpy(&low_txn,
+ (u_int8_t *)rec.data + sizeof(u_int32_t), sizeof(low_txn));
+ if (low_txn != 0)
+ break;
+ } while ((ret = logc->get(logc, &lsn, &rec, DB_NEXT)) == 0);
+
+ /* If there are no txns, there are no PBNYC txns. */
+ if (ret == DB_NOTFOUND) {
+ ret = 0;
+ goto done;
+ } else if (ret != 0)
+ goto err;
+
+ /* Now, the high txnid. */
+ if ((ret = logc->get(logc, &lsn, &rec, DB_LAST)) != 0) {
+ /*
+ * Note that DB_NOTFOUND is unacceptable here because we
+ * had to have looked at some log record to get this far.
+ */
+ __db_err(dbenv, "Final log record not found");
+ goto err;
+ }
+ do {
+ /* txnid is after rectype, which is a u_int32. */
+ memcpy(&hi_txn,
+ (u_int8_t *)rec.data + sizeof(u_int32_t), sizeof(hi_txn));
+ if (hi_txn != 0)
+ break;
+ } while ((ret = logc->get(logc, &lsn, &rec, DB_PREV)) == 0);
+ if (ret == DB_NOTFOUND) {
+ ret = 0;
+ goto done;
+ } else if (ret != 0)
+ goto err;
+
+ /* We have a high and low txnid. Initialise the txn list. */
+ if ((ret =
+ __db_txnlist_init(dbenv, low_txn, hi_txn, NULL, &txninfo)) != 0)
+ goto err;
+
+ /*
+ * Now, walk backward from the end of the log to ckp_lsn. Any
+ * prepares that we hit without first hitting a commit or
+ * abort belong to PBNYC txns, and we need to apply them and
+ * restore them to a prepared state.
+ *
+ * Note that we wind up applying transactions out of order.
+ * Since all PBNYC txns still held locks on the old master and
+ * were isolated, this should be safe.
+ */
+ for (ret = logc->get(logc, &lsn, &rec, DB_LAST);
+ ret == 0 && log_compare(&lsn, &ckp_lsn) > 0;
+ ret = logc->get(logc, &lsn, &rec, DB_PREV)) {
+ memcpy(&rectype, rec.data, sizeof(rectype));
+ switch (rectype) {
+ case DB___txn_regop:
+ /*
+ * It's a commit or abort--but we don't care
+ * which! Just add it to the list of txns
+ * that are resolved.
+ */
+ if ((ret = __txn_regop_read(dbenv, rec.data,
+ &regop_args)) != 0)
+ goto err;
+
+ ret = __db_txnlist_find(dbenv,
+ txninfo, regop_args->txnid->txnid);
+ if (ret == DB_NOTFOUND)
+ ret = __db_txnlist_add(dbenv, txninfo,
+ regop_args->txnid->txnid,
+ regop_args->opcode, &lsn);
+ __os_free(dbenv, regop_args);
+ break;
+ case DB___txn_xa_regop:
+ /*
+ * It's a prepare. If we haven't put the
+ * txn on our list yet, it hasn't been
+ * resolved, so apply and restore it.
+ */
+ if ((ret = __txn_xa_regop_read(dbenv, rec.data,
+ &prep_args)) != 0)
+ goto err;
+ ret = __db_txnlist_find(dbenv, txninfo,
+ prep_args->txnid->txnid);
+ if (ret == DB_NOTFOUND)
+ if ((ret = __rep_process_txn(dbenv, &rec)) == 0)
+ ret = __txn_restore_txn(dbenv,
+ &lsn, prep_args);
+ __os_free(dbenv, prep_args);
+ break;
+ default:
+ continue;
+ }
+ }
+
+ /* It's not an error to have hit the beginning of the log. */
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+
+done:
+err: t_ret = logc->close(logc, 0);
+
+ if (txninfo != NULL)
+ __db_txnlist_end(dbenv, txninfo);
+
+ return (ret == 0 ? t_ret : ret);
+}
+
+/*
+ * __rep_set_limit --
+ * Set a limit on the amount of data that will be sent during a single
+ * invocation of __rep_process_message.
+ */
+static int
+__rep_set_limit(dbenv, gbytes, bytes)
+ DB_ENV *dbenv;
+ u_int32_t gbytes;
+ u_int32_t bytes;
+{
+ DB_REP *db_rep;
+ REP *rep;
+
+ PANIC_CHECK(dbenv);
+
+ if ((db_rep = dbenv->rep_handle) == NULL) {
+ __db_err(dbenv,
+ "DB_ENV->set_rep_limit: database environment not properly initialized");
+ return (__db_panic(dbenv, EINVAL));
+ }
+ rep = db_rep->region;
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ if (bytes > GIGABYTE) {
+ gbytes += bytes / GIGABYTE;
+ bytes = bytes % GIGABYTE;
+ }
+ rep->gbytes = gbytes;
+ rep->bytes = bytes;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ return (0);
+}
+
+/*
+ * __rep_set_request --
+ * Set the minimum and maximum number of log records that we wait
+ * before retransmitting.
+ * UNDOCUMENTED.
+ */
+static int
+__rep_set_request(dbenv, min, max)
+ DB_ENV *dbenv;
+ u_int32_t min;
+ u_int32_t max;
+{
+ LOG *lp;
+ DB_LOG *dblp;
+ DB_REP *db_rep;
+ REP *rep;
+
+ PANIC_CHECK(dbenv);
+
+ if ((db_rep = dbenv->rep_handle) == NULL) {
+ __db_err(dbenv,
+ "DB_ENV->set_rep_request: database environment not properly initialized");
+ return (__db_panic(dbenv, EINVAL));
+ }
+ rep = db_rep->region;
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ rep->request_gap = min;
+ rep->max_gap = max;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ dblp = dbenv->lg_handle;
+ if (dblp != NULL && (lp = dblp->reginfo.primary) != NULL) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ lp->wait_recs = 0;
+ lp->rcvd_recs = 0;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ }
+
+ return (0);
+}
+
+/*
+ * __rep_set_transport --
+ * Set the transport function for replication.
+ */
+static int
+__rep_set_rep_transport(dbenv, eid, f_send)
+ DB_ENV *dbenv;
+ int eid;
+ int (*f_send) __P((DB_ENV *, const DBT *, const DBT *, int, u_int32_t));
+{
+ DB_REP *db_rep;
+
+ PANIC_CHECK(dbenv);
+
+ if ((db_rep = dbenv->rep_handle) == NULL) {
+ __db_err(dbenv,
+ "DB_ENV->set_rep_transport: database environment not properly initialized");
+ return (__db_panic(dbenv, EINVAL));
+ }
+
+ if (f_send == NULL) {
+ __db_err(dbenv,
+ "DB_ENV->set_rep_transport: no send function specified");
+ return (EINVAL);
+ }
+
+ if (eid < 0) {
+ __db_err(dbenv,
+ "DB_ENV->set_rep_transport: eid must be greater than or equal to 0");
+ return (EINVAL);
+ }
+
+ db_rep->rep_send = f_send;
+
+ dbenv->rep_eid = eid;
+ return (0);
+}
+
+/*
+ * __rep_elect --
+ * Called after master failure to hold/participate in an election for
+ * a new master.
+ */
+static int
+__rep_elect(dbenv, nsites, priority, timeout, eidp)
+ DB_ENV *dbenv;
+ int nsites, priority;
+ u_int32_t timeout;
+ int *eidp;
+{
+ DB_LOG *dblp;
+ DB_LSN lsn;
+ DB_REP *db_rep;
+ REP *rep;
+ int in_progress, ret, send_vote, tiebreaker;
+ u_int32_t pid, sec, usec;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "rep_elect", DB_INIT_TXN);
+
+ /* Error checking. */
+ if (nsites <= 0) {
+ __db_err(dbenv,
+ "DB_ENV->rep_elect: nsites must be greater than 0");
+ return (EINVAL);
+ }
+ if (priority < 0) {
+ __db_err(dbenv,
+ "DB_ENV->rep_elect: priority may not be negative");
+ return (EINVAL);
+ }
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+ dblp = dbenv->lg_handle;
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = ((LOG *)dblp->reginfo.primary)->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ /* Generate a randomized tiebreaker value. */
+ __os_id(&pid);
+ if ((ret = __os_clock(dbenv, &sec, &usec)) != 0)
+ return (ret);
+ tiebreaker = pid ^ sec ^ usec ^ (u_int)rand() ^ P_TO_UINT32(&pid);
+
+ if ((ret = __rep_elect_init(dbenv,
+ &lsn, nsites, priority, tiebreaker, &in_progress)) != 0) {
+ if (ret == DB_REP_NEWMASTER) {
+ ret = 0;
+ *eidp = dbenv->rep_eid;
+ }
+ return (ret);
+ }
+
+ if (!in_progress) {
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv, "Beginning an election");
+#endif
+ if ((ret = __rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_ELECT, NULL, NULL, 0)) != 0)
+ goto err;
+ DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTSEND, ret, NULL);
+ }
+
+ /* Now send vote */
+ if ((ret =
+ __rep_send_vote(dbenv, &lsn, nsites, priority, tiebreaker)) != 0)
+ goto err;
+ DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTVOTE1, ret, NULL);
+
+ ret = __rep_wait(dbenv, timeout, eidp, REP_F_EPHASE1);
+ DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTWAIT1, ret, NULL);
+ switch (ret) {
+ case 0:
+ /* Check if election complete or phase complete. */
+ if (*eidp != DB_EID_INVALID)
+ return (0);
+ goto phase2;
+ case DB_TIMEOUT:
+ break;
+ default:
+ goto err;
+ }
+ /*
+ * If we got here, we haven't heard from everyone, but we've
+ * run out of time, so it's time to decide if we have enough
+ * votes to pick a winner and if so, to send out a vote to
+ * the winner.
+ */
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ send_vote = DB_EID_INVALID;
+ if (rep->sites > rep->nsites / 2) {
+ /* We think we've seen enough to cast a vote. */
+ send_vote = rep->winner;
+ if (rep->winner == rep->eid)
+ rep->votes++;
+ F_CLR(rep, REP_F_EPHASE1);
+ F_SET(rep, REP_F_EPHASE2);
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ if (send_vote == DB_EID_INVALID) {
+ /* We do not have enough votes to elect. */
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv,
+ "Not enough votes to elect: received %d of %d",
+ rep->sites, rep->nsites);
+#endif
+ ret = DB_REP_UNAVAIL;
+ goto err;
+
+ }
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION) &&
+ send_vote != rep->eid)
+ __db_err(dbenv, "Sending vote");
+#endif
+
+ if (send_vote != rep->eid && (ret = __rep_send_message(dbenv,
+ send_vote, REP_VOTE2, NULL, NULL, 0)) != 0)
+ goto err;
+ DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTVOTE2, ret, NULL);
+
+phase2: ret = __rep_wait(dbenv, timeout, eidp, REP_F_EPHASE2);
+ DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTWAIT2, ret, NULL);
+ switch (ret) {
+ case 0:
+ return (0);
+ case DB_TIMEOUT:
+ ret = DB_REP_UNAVAIL;
+ break;
+ default:
+ goto err;
+ }
+
+DB_TEST_RECOVERY_LABEL
+err: MUTEX_LOCK(dbenv, db_rep->mutexp);
+ ELECTION_DONE(rep);
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv, "Ended election with %d", ret);
+#endif
+ return (ret);
+}
+
+/*
+ * __rep_elect_init
+ * Initialize an election. Sets beginp non-zero if the election is
+ * already in progress; makes it 0 otherwise.
+ */
+static int
+__rep_elect_init(dbenv, lsnp, nsites, priority, tiebreaker, beginp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ int nsites, priority, tiebreaker, *beginp;
+{
+ DB_REP *db_rep;
+ REP *rep;
+ int ret, *tally;
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+
+ ret = 0;
+
+ /* We may miscount, as we don't hold the replication mutex here. */
+ rep->stat.st_elections++;
+
+ /* If we are already a master; simply broadcast that fact and return. */
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+ (void)__rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_NEWMASTER, lsnp, NULL, 0);
+ rep->stat.st_elections_won++;
+ return (DB_REP_NEWMASTER);
+ }
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ *beginp = IN_ELECTION(rep);
+ if (!*beginp) {
+ /*
+ * Make sure that we always initialize all the election fields
+ * before putting ourselves in an election state. That means
+ * issuing calls that can fail (allocation) before setting all
+ * the variables.
+ */
+ if (nsites > rep->asites &&
+ (ret = __rep_grow_sites(dbenv, nsites)) != 0)
+ goto err;
+ DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTINIT, ret, NULL);
+ rep->nsites = nsites;
+ rep->priority = priority;
+ rep->votes = 0;
+ rep->master_id = DB_EID_INVALID;
+ F_SET(rep, REP_F_EPHASE1);
+
+ /* We have always heard from ourselves. */
+ rep->sites = 1;
+ tally = R_ADDR((REGINFO *)dbenv->reginfo, rep->tally_off);
+ tally[0] = rep->eid;
+
+ if (priority != 0) {
+ /* Make ourselves the winner to start. */
+ rep->winner = rep->eid;
+ rep->w_priority = priority;
+ rep->w_gen = rep->gen;
+ rep->w_lsn = *lsnp;
+ rep->w_tiebreaker = tiebreaker;
+ } else {
+ rep->winner = DB_EID_INVALID;
+ rep->w_priority = 0;
+ rep->w_gen = 0;
+ ZERO_LSN(rep->w_lsn);
+ rep->w_tiebreaker = 0;
+ }
+ }
+DB_TEST_RECOVERY_LABEL
+err: MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (ret);
+}
+
+static int
+__rep_wait(dbenv, timeout, eidp, flags)
+ DB_ENV *dbenv;
+ u_int32_t timeout;
+ int *eidp;
+ u_int32_t flags;
+{
+ DB_REP *db_rep;
+ REP *rep;
+ int done, ret;
+ u_int32_t sleeptime;
+
+ done = 0;
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+
+ /*
+ * The user specifies an overall timeout function, but checking
+ * is cheap and the timeout may be a generous upper bound.
+ * Sleep repeatedly for the smaller of .5s and timeout/10.
+ */
+ sleeptime = (timeout > 5000000) ? 500000 : timeout / 10;
+ if (sleeptime == 0)
+ sleeptime++;
+ while (timeout > 0) {
+ if ((ret = __os_sleep(dbenv, 0, sleeptime)) != 0)
+ return (ret);
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ done = !F_ISSET(rep, flags) && rep->master_id != DB_EID_INVALID;
+
+ *eidp = rep->master_id;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ if (done)
+ return (0);
+
+ if (timeout > sleeptime)
+ timeout -= sleeptime;
+ else
+ timeout = 0;
+ }
+ return (DB_TIMEOUT);
+}
+
+/*
+ * __rep_flush --
+ * Re-push the last log record to all clients, in case they've lost
+ * messages and don't know it.
+ */
+static int
+__rep_flush(dbenv)
+ DB_ENV *dbenv;
+{
+ DBT rec;
+ DB_LOGC *logc;
+ DB_LSN lsn;
+ int ret, t_ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "rep_stat", DB_INIT_TXN);
+
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+
+ memset(&rec, 0, sizeof(rec));
+ memset(&lsn, 0, sizeof(lsn));
+
+ if ((ret = logc->get(logc, &lsn, &rec, DB_LAST)) != 0)
+ goto err;
+
+ ret = __rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_LOG, &lsn, &rec, 0);
+
+err: if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __rep_stat --
+ * Fetch replication statistics.
+ */
+static int
+__rep_stat(dbenv, statp, flags)
+ DB_ENV *dbenv;
+ DB_REP_STAT **statp;
+ u_int32_t flags;
+{
+ DB_LOG *dblp;
+ DB_REP *db_rep;
+ DB_REP_STAT *stats;
+ LOG *lp;
+ REP *rep;
+ u_int32_t queued;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "rep_stat", DB_INIT_TXN);
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ *statp = NULL;
+ if ((ret = __db_fchk(dbenv,
+ "DB_ENV->rep_stat", flags, DB_STAT_CLEAR)) != 0)
+ return (ret);
+
+ /* Allocate a stat struct to return to the user. */
+ if ((ret = __os_umalloc(dbenv, sizeof(DB_REP_STAT), &stats)) != 0)
+ return (ret);
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ memcpy(stats, &rep->stat, sizeof(*stats));
+
+ /* Copy out election stats. */
+ if (IN_ELECTION(rep)) {
+ if (F_ISSET(rep, REP_F_EPHASE1))
+ stats->st_election_status = 1;
+ else if (F_ISSET(rep, REP_F_EPHASE2))
+ stats->st_election_status = 2;
+
+ stats->st_election_nsites = rep->sites;
+ stats->st_election_cur_winner = rep->winner;
+ stats->st_election_priority = rep->w_priority;
+ stats->st_election_gen = rep->w_gen;
+ stats->st_election_lsn = rep->w_lsn;
+ stats->st_election_votes = rep->votes;
+ stats->st_election_tiebreaker = rep->w_tiebreaker;
+ }
+
+ /* Copy out other info that's protected by the rep mutex. */
+ stats->st_env_id = rep->eid;
+ stats->st_env_priority = rep->priority;
+ stats->st_nsites = rep->nsites;
+ stats->st_master = rep->master_id;
+ stats->st_gen = rep->gen;
+
+ if (F_ISSET(rep, REP_F_MASTER))
+ stats->st_status = DB_REP_MASTER;
+ else if (F_ISSET(rep, REP_F_LOGSONLY))
+ stats->st_status = DB_REP_LOGSONLY;
+ else if (F_ISSET(rep, REP_F_UPGRADE))
+ stats->st_status = DB_REP_CLIENT;
+ else
+ stats->st_status = 0;
+
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ queued = rep->stat.st_log_queued;
+ memset(&rep->stat, 0, sizeof(rep->stat));
+ rep->stat.st_log_queued = rep->stat.st_log_queued_total =
+ rep->stat.st_log_queued_max = queued;
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ /*
+ * Log-related replication info is stored in the log system and
+ * protected by the log region lock.
+ */
+ R_LOCK(dbenv, &dblp->reginfo);
+ if (F_ISSET(rep, REP_ISCLIENT)) {
+ stats->st_next_lsn = lp->ready_lsn;
+ stats->st_waiting_lsn = lp->waiting_lsn;
+ } else {
+ if (F_ISSET(rep, REP_F_MASTER))
+ stats->st_next_lsn = lp->lsn;
+ else
+ ZERO_LSN(stats->st_next_lsn);
+ ZERO_LSN(stats->st_waiting_lsn);
+ }
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ *statp = stats;
+ return (0);
+}
diff --git a/libdb/rep/rep_record.c b/libdb/rep/rep_record.c
new file mode 100644
index 0000000..4025fa6
--- /dev/null
+++ b/libdb/rep/rep_record.c
@@ -0,0 +1,1513 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+static int __rep_apply __P((DB_ENV *, REP_CONTROL *, DBT *));
+static int __rep_collect_txn __P((DB_ENV *, DB_LSN *, LSN_COLLECTION *));
+static int __rep_lsn_cmp __P((const void *, const void *));
+static int __rep_newfile __P((DB_ENV *, REP_CONTROL *, DBT *, DB_LSN *));
+
+#define IS_SIMPLE(R) ((R) != DB___txn_regop && \
+ (R) != DB___txn_ckp && (R) != DB___dbreg_register)
+
+/*
+ * __rep_process_message --
+ *
+ * This routine takes an incoming message and processes it.
+ *
+ * control: contains the control fields from the record
+ * rec: contains the actual record
+ * eidp: contains the machine id of the sender of the message;
+ * in the case of a DB_NEWMASTER message, returns the eid
+ * of the new master.
+ *
+ * PUBLIC: int __rep_process_message __P((DB_ENV *, DBT *, DBT *, int *));
+ */
+int
+__rep_process_message(dbenv, control, rec, eidp)
+ DB_ENV *dbenv;
+ DBT *control, *rec;
+ int *eidp;
+{
+ DB_LOG *dblp;
+ DB_LOGC *logc;
+ DB_LSN init_lsn, lsn, newfilelsn, oldfilelsn;
+ DB_REP *db_rep;
+ DBT *d, data_dbt, lsndbt, mylog;
+ LOG *lp;
+ REP *rep;
+ REP_CONTROL *rp;
+ REP_VOTE_INFO *vi;
+ u_int32_t bytes, gen, gbytes, type, unused;
+ int check_limit, cmp, done, do_req, i;
+ int master, old, recovering, ret, t_ret, *tally;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "rep_stat", DB_INIT_TXN);
+
+ /* Control argument must be non-Null. */
+ if (control == NULL || control->size == 0) {
+ __db_err(dbenv,
+ "DB_ENV->rep_process_message: control argument must be specified");
+ return (EINVAL);
+ }
+
+ ret = 0;
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ gen = rep->gen;
+ recovering = F_ISSET(rep, REP_F_RECOVER);
+
+ rep->stat.st_msgs_processed++;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ rp = (REP_CONTROL *)control->data;
+
+#if 0
+ __rep_print_message(dbenv, *eidp, rp, "rep_process_message");
+#endif
+
+ /* Complain if we see an improper version number. */
+ if (rp->rep_version != DB_REPVERSION) {
+ __db_err(dbenv,
+ "unexpected replication message version %d, expected %d",
+ rp->rep_version, DB_REPVERSION);
+ return (EINVAL);
+ }
+ if (rp->log_version != DB_LOGVERSION) {
+ __db_err(dbenv,
+ "unexpected log record version %d, expected %d",
+ rp->log_version, DB_LOGVERSION);
+ return (EINVAL);
+ }
+
+ /*
+ * Check for generation number matching. Ignore any old messages
+ * except requests that are indicative of a new client that needs
+ * to get in sync.
+ */
+ if (rp->gen < gen && rp->rectype != REP_ALIVE_REQ &&
+ rp->rectype != REP_NEWCLIENT && rp->rectype != REP_MASTER_REQ) {
+ /*
+ * We don't hold the rep mutex, and could miscount if we race.
+ */
+ rep->stat.st_msgs_badgen++;
+ return (0);
+ }
+ if (rp->gen > gen && rp->rectype != REP_ALIVE &&
+ rp->rectype != REP_NEWMASTER)
+ return (__rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_MASTER_REQ, NULL, NULL, 0));
+
+ /*
+ * We need to check if we're in recovery and if we are
+ * then we need to ignore any messages except VERIFY, VOTE,
+ * ELECT (the master might fail while we are recovering), and
+ * ALIVE_REQ.
+ */
+ if (recovering)
+ switch(rp->rectype) {
+ case REP_ALIVE:
+ case REP_ALIVE_REQ:
+ case REP_ELECT:
+ case REP_NEWCLIENT:
+ case REP_NEWMASTER:
+ case REP_NEWSITE:
+ case REP_VERIFY:
+ R_LOCK(dbenv, &dblp->reginfo);
+ cmp = log_compare(&lp->verify_lsn, &rp->lsn);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (cmp != 0)
+ goto skip;
+ /* FALLTHROUGH */
+ case REP_VOTE1:
+ case REP_VOTE2:
+ break;
+ default:
+skip: /*
+ * We don't hold the rep mutex, and could
+ * miscount if we race.
+ */
+ rep->stat.st_msgs_recover++;
+
+ /* Check for need to retransmit. */
+ R_LOCK(dbenv, &dblp->reginfo);
+ do_req = *eidp == rep->master_id &&
+ ++lp->rcvd_recs >= lp->wait_recs;
+ if (do_req) {
+ lp->wait_recs *= 2;
+ if (lp->wait_recs + rep->max_gap)
+ lp->wait_recs = rep->max_gap;
+ lp->rcvd_recs = 0;
+ lsn = lp->verify_lsn;
+ }
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (do_req)
+ ret = __rep_send_message(dbenv, *eidp,
+ REP_VERIFY_REQ, &lsn, NULL, 0);
+
+ return (ret);
+ }
+
+ switch(rp->rectype) {
+ case REP_ALIVE:
+ ANYSITE(dbenv);
+ if (rp->gen > gen && rp->flags)
+ return (__rep_new_master(dbenv, rp, *eidp));
+ break;
+ case REP_ALIVE_REQ:
+ ANYSITE(dbenv);
+ dblp = dbenv->lg_handle;
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = ((LOG *)dblp->reginfo.primary)->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (__rep_send_message(dbenv,
+ *eidp, REP_ALIVE, &lsn, NULL,
+ F_ISSET(dbenv, DB_ENV_REP_MASTER) ? 1 : 0));
+ case REP_ALL_REQ:
+ MASTER_ONLY(dbenv);
+ gbytes = bytes = 0;
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ gbytes = rep->gbytes;
+ bytes = rep->bytes;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ check_limit = gbytes != 0 || bytes != 0;
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ memset(&data_dbt, 0, sizeof(data_dbt));
+ oldfilelsn = lsn = rp->lsn;
+ type = REP_LOG;
+ for (ret = logc->get(logc, &rp->lsn, &data_dbt, DB_SET);
+ ret == 0 && type == REP_LOG;
+ ret = logc->get(logc, &lsn, &data_dbt, DB_NEXT)) {
+ /*
+ * lsn.offset will only be 0 if this is the
+ * beginning of the log; DB_SET, but not DB_NEXT,
+ * can set the log cursor to [n][0].
+ */
+ if (lsn.offset == 0)
+ ret = __rep_send_message(dbenv, *eidp,
+ REP_NEWFILE, &lsn, NULL, 0);
+ else {
+ /*
+ * DB_NEXT will never run into offsets
+ * of 0; thus, when a log file changes,
+ * we'll have a real log record with
+ * some lsn [n][m], and we'll also want to send
+ * a NEWFILE message with lsn [n][0].
+ * So that the client can detect gaps,
+ * send in the rec parameter the
+ * last LSN in the old file.
+ */
+ if (lsn.file != oldfilelsn.file) {
+ newfilelsn.file = lsn.file;
+ newfilelsn.offset = 0;
+
+ memset(&lsndbt, 0, sizeof(DBT));
+ lsndbt.size = sizeof(DB_LSN);
+ lsndbt.data = &oldfilelsn;
+
+ if ((ret = __rep_send_message(dbenv,
+ *eidp, REP_NEWFILE, &newfilelsn,
+ &lsndbt, 0)) != 0)
+ break;
+ }
+ if (check_limit) {
+ /*
+ * data_dbt.size is only the size of
+ * the log record; it doesn't count
+ * the size of the control structure.
+ * Factor that in as well so we're
+ * not off by a lot if our log
+ * records are small.
+ */
+ while (bytes < data_dbt.size +
+ sizeof(REP_CONTROL)) {
+ if (gbytes > 0) {
+ bytes += GIGABYTE;
+ --gbytes;
+ continue;
+ }
+ /*
+ * We don't hold the rep mutex,
+ * and may miscount.
+ */
+ rep->stat.st_nthrottles++;
+ type = REP_LOG_MORE;
+ goto send;
+ }
+ bytes -= (data_dbt.size +
+ sizeof(REP_CONTROL));
+ }
+send: ret = __rep_send_message(dbenv, *eidp,
+ type, &lsn, &data_dbt, 0);
+ }
+
+ /*
+ * In case we're about to change files and need it
+ * for a NEWFILE message, save the current LSN.
+ */
+ oldfilelsn = lsn;
+ }
+
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+ case REP_ELECT:
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ rep->gen++;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (__rep_send_message(dbenv,
+ *eidp, REP_NEWMASTER, &lsn, NULL, 0));
+ }
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ ret = IN_ELECTION(rep) ? 0 : DB_REP_HOLDELECTION;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (ret);
+#ifdef NOTYET
+ case REP_FILE: /* TODO */
+ CLIENT_ONLY(dbenv);
+ break;
+ case REP_FILE_REQ:
+ MASTER_ONLY(dbenv);
+ return (__rep_send_file(dbenv, rec, *eidp));
+ break;
+#endif
+ case REP_LOG:
+ case REP_LOG_MORE:
+ CLIENT_ONLY(dbenv);
+ if ((ret = __rep_apply(dbenv, rp, rec)) != 0)
+ return (ret);
+ if (rp->rectype == REP_LOG_MORE) {
+ MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+ master = rep->master_id;
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ ret = __rep_send_message(dbenv, master,
+ REP_ALL_REQ, &lsn, NULL, 0);
+ }
+ return (ret);
+ case REP_LOG_REQ:
+ MASTER_ONLY(dbenv);
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ memset(&data_dbt, 0, sizeof(data_dbt));
+ lsn = rp->lsn;
+
+ /*
+ * There are three different cases here.
+ * 1. We asked for a particular LSN and got it.
+ * 2. We asked for an LSN of X,0 which is invalid and got the
+ * first log record in a particular file.
+ * 3. We asked for an LSN and it's not found because it is
+ * beyond the end of a log file and we need a NEWFILE msg.
+ */
+ ret = logc->get(logc, &rp->lsn, &data_dbt, DB_SET);
+ cmp = log_compare(&lsn, &rp->lsn);
+
+ if (ret == 0 && cmp == 0) /* Case 1 */
+ ret = __rep_send_message(dbenv, *eidp,
+ REP_LOG, &rp->lsn, &data_dbt, 0);
+ else if (ret == DB_NOTFOUND ||
+ (ret == 0 && cmp < 0 && rp->lsn.offset == 0))
+ /* Cases 2 and 3: Send a NEWFILE message. */
+ ret = __rep_send_message(dbenv, *eidp,
+ REP_NEWFILE, &lsn, NULL, 0);
+
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+ case REP_NEWSITE:
+ /* We don't hold the rep mutex, and may miscount. */
+ rep->stat.st_newsites++;
+
+ /* This is a rebroadcast; simply tell the application. */
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ (void)__rep_send_message(dbenv,
+ *eidp, REP_NEWMASTER, &lsn, NULL, 0);
+ }
+ return (DB_REP_NEWSITE);
+ case REP_NEWCLIENT:
+ /*
+ * This message was received and should have resulted in the
+ * application entering the machine ID in its machine table.
+ * We respond to this with an ALIVE to send relevant information
+ * to the new client. But first, broadcast the new client's
+ * record to all the clients.
+ */
+ if ((ret = __rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_NEWSITE, &rp->lsn, rec, 0)) != 0)
+ return (ret);
+
+ if (F_ISSET(dbenv, DB_ENV_REP_CLIENT))
+ return (0);
+
+ /* FALLTHROUGH */
+ case REP_MASTER_REQ:
+ ANYSITE(dbenv);
+ if (F_ISSET(dbenv, DB_ENV_REP_CLIENT))
+ return (0);
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (__rep_send_message(dbenv,
+ *eidp, REP_NEWMASTER, &lsn, NULL, 0));
+ case REP_NEWFILE:
+ CLIENT_ONLY(dbenv);
+ return (__rep_apply(dbenv, rp, rec));
+ case REP_NEWMASTER:
+ ANYSITE(dbenv);
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER) &&
+ *eidp != dbenv->rep_eid) {
+ /* We don't hold the rep mutex, and may miscount. */
+ rep->stat.st_dupmasters++;
+ return (DB_REP_DUPMASTER);
+ }
+ return (__rep_new_master(dbenv, rp, *eidp));
+ case REP_PAGE: /* TODO */
+ CLIENT_ONLY(dbenv);
+ break;
+ case REP_PAGE_REQ: /* TODO */
+ MASTER_ONLY(dbenv);
+ break;
+ case REP_PLIST: /* TODO */
+ CLIENT_ONLY(dbenv);
+ break;
+ case REP_PLIST_REQ: /* TODO */
+ MASTER_ONLY(dbenv);
+ break;
+ case REP_VERIFY:
+ CLIENT_ONLY(dbenv);
+ DB_ASSERT((F_ISSET(rep, REP_F_RECOVER) &&
+ !IS_ZERO_LSN(lp->verify_lsn)) ||
+ (!F_ISSET(rep, REP_F_RECOVER) &&
+ IS_ZERO_LSN(lp->verify_lsn)));
+ if (IS_ZERO_LSN(lp->verify_lsn))
+ return (0);
+
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ memset(&mylog, 0, sizeof(mylog));
+ if ((ret = logc->get(logc, &rp->lsn, &mylog, DB_SET)) != 0)
+ goto rep_verify_err;
+ if (mylog.size == rec->size &&
+ memcmp(mylog.data, rec->data, rec->size) == 0) {
+ /*
+ * If we're a logs-only client, we can simply truncate
+ * the log to the point where it last agreed with the
+ * master's; otherwise, recover to that point.
+ */
+ R_LOCK(dbenv, &dblp->reginfo);
+ ZERO_LSN(lp->verify_lsn);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (F_ISSET(dbenv, DB_ENV_REP_LOGSONLY)) {
+ INIT_LSN(init_lsn);
+ if ((ret = dbenv->log_flush(dbenv,
+ &rp->lsn)) != 0 ||
+ (ret = __log_vtruncate(dbenv,
+ &rp->lsn, &init_lsn)) != 0)
+ goto rep_verify_err;
+ } else if ((ret = __db_apprec(dbenv, &rp->lsn, 0)) != 0)
+ goto rep_verify_err;
+
+ /*
+ * The log has been truncated (either by __db_apprec or
+ * directly). We want to make sure we're waiting for
+ * the LSN at the new end-of-log, not some later point.
+ */
+ R_LOCK(dbenv, &dblp->reginfo);
+ lp->ready_lsn = lp->lsn;
+ ZERO_LSN(lp->waiting_lsn);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ /*
+ * Discard any log records we have queued; we're
+ * about to re-request them, and can't trust the
+ * ones in the queue.
+ */
+ MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+ if ((ret = db_rep->rep_db->truncate(db_rep->rep_db,
+ NULL, &unused, 0)) != 0) {
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+ goto rep_verify_err;
+ }
+ rep->stat.st_log_queued = 0;
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ F_CLR(rep, REP_F_RECOVER);
+
+ /*
+ * If the master_id is invalid, this means that since
+ * the last record was sent, somebody declared an
+ * election and we may not have a master to request
+ * things of.
+ *
+ * This is not an error; when we find a new master,
+ * we'll re-negotiate where the end of the log is and
+ * try to bring ourselves up to date again anyway.
+ */
+ if ((master = rep->master_id) == DB_EID_INVALID) {
+ DB_ASSERT(IN_ELECTION(rep));
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ ret = 0;
+ } else {
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ ret = __rep_send_message(dbenv, master,
+ REP_ALL_REQ, &rp->lsn, NULL, 0);
+ }
+ } else if ((ret =
+ logc->get(logc, &lsn, &mylog, DB_PREV)) == 0) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ lp->verify_lsn = lsn;
+ lp->rcvd_recs = 0;
+ lp->wait_recs = rep->request_gap;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ ret = __rep_send_message(dbenv,
+ *eidp, REP_VERIFY_REQ, &lsn, NULL, 0);
+ }
+
+rep_verify_err: if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+ case REP_VERIFY_FAIL:
+ rep->stat.st_outdated++;
+ return (DB_REP_OUTDATED);
+ case REP_VERIFY_REQ:
+ MASTER_ONLY(dbenv);
+ type = REP_VERIFY;
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ d = &data_dbt;
+ memset(d, 0, sizeof(data_dbt));
+ F_SET(logc, DB_LOG_SILENT_ERR);
+ ret = logc->get(logc, &rp->lsn, d, DB_SET);
+ /*
+ * If the LSN was invalid, then we might get a not
+ * found, we might get an EIO, we could get anything.
+ * If we get a DB_NOTFOUND, then there is a chance that
+ * the LSN comes before the first file present in which
+ * case we need to return a fail so that the client can return
+ * a DB_OUTDATED.
+ */
+ if (ret == DB_NOTFOUND &&
+ __log_is_outdated(dbenv, rp->lsn.file, &old) == 0 &&
+ old != 0)
+ type = REP_VERIFY_FAIL;
+
+ if (ret != 0)
+ d = NULL;
+
+ ret = __rep_send_message(dbenv, *eidp, type, &rp->lsn, d, 0);
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+ case REP_VOTE1:
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv, "Master received vote");
+#endif
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (__rep_send_message(dbenv,
+ *eidp, REP_NEWMASTER, &lsn, NULL, 0));
+ }
+
+ vi = (REP_VOTE_INFO *)rec->data;
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+
+ /*
+ * If you get a vote and you're not in an election, simply
+ * return an indicator to hold an election which will trigger
+ * this site to send its vote again.
+ */
+ if (!IN_ELECTION(rep)) {
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv,
+ "Not in election, but received vote1");
+#endif
+ ret = DB_REP_HOLDELECTION;
+ goto unlock;
+ }
+
+ if (F_ISSET(rep, REP_F_EPHASE2))
+ goto unlock;
+
+ /* Check if this site knows about more sites than we do. */
+ if (vi->nsites > rep->nsites)
+ rep->nsites = vi->nsites;
+
+ /* Check if we've heard from this site already. */
+ tally = R_ADDR((REGINFO *)dbenv->reginfo, rep->tally_off);
+ for (i = 0; i < rep->sites; i++) {
+ if (tally[i] == *eidp)
+ /* Duplicate vote. */
+ goto unlock;
+ }
+
+ /*
+ * We are keeping vote, let's see if that changes our count of
+ * the number of sites.
+ */
+ if (rep->sites + 1 > rep->nsites)
+ rep->nsites = rep->sites + 1;
+ if (rep->nsites > rep->asites &&
+ (ret = __rep_grow_sites(dbenv, rep->nsites)) != 0)
+ goto unlock;
+
+ tally[rep->sites] = *eidp;
+ rep->sites++;
+
+ /*
+ * Change winners if the incoming record has a higher
+ * priority, or an equal priority but a larger LSN, or
+ * an equal priority and LSN but higher "tiebreaker" value.
+ */
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) {
+ __db_err(dbenv,
+ "%s(eid)%d (pri)%d (gen)%d (sites)%d [%d,%d]",
+ "Existing vote: ",
+ rep->winner, rep->w_priority, rep->w_gen,
+ rep->sites, rep->w_lsn.file, rep->w_lsn.offset);
+ __db_err(dbenv,
+ "Incoming vote: (eid)%d (pri)%d (gen)%d [%d,%d]",
+ *eidp, vi->priority, rp->gen, rp->lsn.file,
+ rp->lsn.offset);
+ }
+#endif
+ cmp = log_compare(&rp->lsn, &rep->w_lsn);
+ if (vi->priority > rep->w_priority ||
+ (vi->priority != 0 && vi->priority == rep->w_priority &&
+ (cmp > 0 ||
+ (cmp == 0 && vi->tiebreaker > rep->w_tiebreaker)))) {
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv, "Accepting new vote");
+#endif
+ rep->winner = *eidp;
+ rep->w_priority = vi->priority;
+ rep->w_lsn = rp->lsn;
+ rep->w_gen = rp->gen;
+ }
+ master = rep->winner;
+ lsn = rep->w_lsn;
+ done = rep->sites == rep->nsites && rep->w_priority != 0;
+ if (done) {
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) {
+ __db_err(dbenv, "Phase1 election done");
+ __db_err(dbenv, "Voting for %d%s",
+ master, master == rep->eid ? "(self)" : "");
+ }
+#endif
+ F_CLR(rep, REP_F_EPHASE1);
+ F_SET(rep, REP_F_EPHASE2);
+ }
+
+ if (done && master == rep->eid) {
+ rep->votes++;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (0);
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ /* Vote for someone else. */
+ if (done)
+ return (__rep_send_message(dbenv,
+ master, REP_VOTE2, NULL, NULL, 0));
+
+ /* Election is still going on. */
+ break;
+ case REP_VOTE2:
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv, "We received a vote%s",
+ F_ISSET(dbenv, DB_ENV_REP_MASTER) ?
+ " (master)" : "");
+#endif
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ rep->stat.st_elections_won++;
+ return (__rep_send_message(dbenv,
+ *eidp, REP_NEWMASTER, &lsn, NULL, 0));
+ }
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+
+ /* If we have priority 0, we should never get a vote. */
+ DB_ASSERT(rep->priority != 0);
+
+ if (!IN_ELECTION(rep)) {
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv, "Not in election, got vote");
+#endif
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (DB_REP_HOLDELECTION);
+ }
+ /* avoid counting duplicates. */
+ rep->votes++;
+ done = rep->votes > rep->nsites / 2;
+ if (done) {
+ rep->master_id = rep->eid;
+ rep->gen = rep->w_gen + 1;
+ ELECTION_DONE(rep);
+ F_CLR(rep, REP_F_UPGRADE);
+ F_SET(rep, REP_F_MASTER);
+ *eidp = rep->master_id;
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv,
+ "Got enough votes to win; election done; winner is %d",
+ rep->master_id);
+#endif
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ if (done) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ /* Declare me the winner. */
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv, "I won, sending NEWMASTER");
+#endif
+ rep->stat.st_elections_won++;
+ if ((ret = __rep_send_message(dbenv, DB_EID_BROADCAST,
+ REP_NEWMASTER, &lsn, NULL, 0)) != 0)
+ break;
+ return (DB_REP_NEWMASTER);
+ }
+ break;
+ default:
+ __db_err(dbenv,
+ "DB_ENV->rep_process_message: unknown replication message: type %lu",
+ (u_long)rp->rectype);
+ return (EINVAL);
+ }
+
+ return (0);
+
+unlock: MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (ret);
+}
+
+/*
+ * __rep_apply --
+ *
+ * Handle incoming log records on a client, applying when possible and
+ * entering into the bookkeeping table otherwise. This is the guts of
+ * the routine that handles the state machine that describes how we
+ * process and manage incoming log records.
+ */
+static int
+__rep_apply(dbenv, rp, rec)
+ DB_ENV *dbenv;
+ REP_CONTROL *rp;
+ DBT *rec;
+{
+ __dbreg_register_args dbreg_args;
+ __txn_ckp_args ckp_args;
+ DB_REP *db_rep;
+ DBT control_dbt, key_dbt, lsn_dbt, nextrec_dbt, rec_dbt;
+ DB *dbp;
+ DBC *dbc;
+ DB_LOG *dblp;
+ DB_LSN ckp_lsn, lsn, newfile_lsn, next_lsn, waiting_lsn;
+ LOG *lp;
+ REP *rep;
+ REP_CONTROL lsn_rc;
+ u_int32_t rectype, txnid;
+ int cmp, do_req, eid, have_mutex, ret, t_ret;
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+ dbp = db_rep->rep_db;
+ dbc = NULL;
+ have_mutex = ret = 0;
+ memset(&control_dbt, 0, sizeof(control_dbt));
+ memset(&rec_dbt, 0, sizeof(rec_dbt));
+
+ /*
+ * If this is a log record and it's the next one in line, simply
+ * write it to the log. If it's a "normal" log record, i.e., not
+ * a COMMIT or CHECKPOINT or something that needs immediate processing,
+ * just return. If it's a COMMIT, CHECKPOINT or LOG_REGISTER (i.e.,
+ * not SIMPLE), handle it now. If it's a NEWFILE record, then we
+ * have to be prepared to deal with a logfile change.
+ */
+ dblp = dbenv->lg_handle;
+ R_LOCK(dbenv, &dblp->reginfo);
+ lp = dblp->reginfo.primary;
+ cmp = log_compare(&rp->lsn, &lp->ready_lsn);
+
+ /*
+ * This is written to assume that you don't end up with a lot of
+ * records after a hole. That is, it optimizes for the case where
+ * there is only a record or two after a hole. If you have a lot
+ * of records after a hole, what you'd really want to do is write
+ * all of them and then process all the commits, checkpoints, etc.
+ * together. That is more complicated processing that we can add
+ * later if necessary.
+ *
+ * That said, I really don't want to do db operations holding the
+ * log mutex, so the synchronization here is tricky.
+ */
+ if (cmp == 0) {
+ /* We got the log record that we are expecting. */
+ if (rp->rectype == REP_NEWFILE) {
+newfile: ret = __rep_newfile(dbenv, rp, rec, &lp->ready_lsn);
+
+ /* Make this evaluate to a simple rectype. */
+ rectype = 0;
+ } else {
+ DB_ASSERT(log_compare(&rp->lsn, &lp->lsn) == 0);
+ ret = __log_rep_put(dbenv, &rp->lsn, rec);
+ lp->ready_lsn = lp->lsn;
+ memcpy(&rectype, rec->data, sizeof(rectype));
+ if (ret == 0)
+ /*
+ * We may miscount if we race, since we
+ * don't currently hold the rep mutex.
+ */
+ rep->stat.st_log_records++;
+ }
+ while (ret == 0 && IS_SIMPLE(rectype) &&
+ log_compare(&lp->ready_lsn, &lp->waiting_lsn) == 0) {
+ /*
+ * We just filled in a gap in the log record stream.
+ * Write subsequent records to the log.
+ */
+gap_check: lp->wait_recs = 0;
+ lp->rcvd_recs = 0;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (have_mutex == 0) {
+ MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+ have_mutex = 1;
+ }
+ if (dbc == NULL &&
+ (ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ goto err;
+
+ /* The DBTs need to persist through another call. */
+ F_SET(&control_dbt, DB_DBT_REALLOC);
+ F_SET(&rec_dbt, DB_DBT_REALLOC);
+ if ((ret = dbc->c_get(dbc,
+ &control_dbt, &rec_dbt, DB_RMW | DB_FIRST)) != 0)
+ goto err;
+
+ rp = (REP_CONTROL *)control_dbt.data;
+ rec = &rec_dbt;
+ memcpy(&rectype, rec->data, sizeof(rectype));
+ R_LOCK(dbenv, &dblp->reginfo);
+ /*
+ * We need to check again, because it's possible that
+ * some other thread of control changed the waiting_lsn
+ * or removed that record from the database.
+ */
+ if (log_compare(&lp->ready_lsn, &rp->lsn) == 0) {
+ if (rp->rectype != REP_NEWFILE) {
+ DB_ASSERT(log_compare
+ (&rp->lsn, &lp->lsn) == 0);
+ ret = __log_rep_put(dbenv,
+ &rp->lsn, rec);
+ lp->ready_lsn = lp->lsn;
+
+ /*
+ * We may miscount if we race, since we
+ * don't currently hold the rep mutex.
+ */
+ if (ret == 0)
+ rep->stat.st_log_records++;
+ } else {
+ ret = __rep_newfile(dbenv,
+ rp, rec, &lp->ready_lsn);
+ rectype = 0;
+ }
+ waiting_lsn = lp->waiting_lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if ((ret = dbc->c_del(dbc, 0)) != 0)
+ goto err;
+
+ /*
+ * We may miscount, as we don't hold the rep
+ * mutex.
+ */
+ --rep->stat.st_log_queued;
+
+ /*
+ * Update waiting_lsn. We need to move it
+ * forward to the LSN of the next record
+ * in the queue.
+ */
+ memset(&lsn_dbt, 0, sizeof(lsn_dbt));
+ F_SET(&lsn_dbt, DB_DBT_USERMEM);
+ lsn_dbt.data = &lsn_rc;
+ lsn_dbt.ulen = sizeof(lsn_rc);
+ memset(&lsn_rc, 0, sizeof(lsn_rc));
+
+ /*
+ * If the next item in the database is a log
+ * record--the common case--we're not
+ * interested in its contents, just in its LSN.
+ * If it's a newfile message, though, the
+ * data field may be the LSN of the last
+ * record in the old file, and we need to use
+ * that to determine whether or not there's
+ * a gap.
+ *
+ * Optimize both these cases by doing a partial
+ * get of the data item. If it's a newfile
+ * record, we'll get the whole LSN, and if
+ * it's not, we won't waste time allocating.
+ */
+ memset(&nextrec_dbt, 0, sizeof(nextrec_dbt));
+ F_SET(&nextrec_dbt,
+ DB_DBT_USERMEM | DB_DBT_PARTIAL);
+ nextrec_dbt.ulen =
+ nextrec_dbt.dlen = sizeof(newfile_lsn);
+ ZERO_LSN(newfile_lsn);
+ nextrec_dbt.data = &newfile_lsn;
+
+ ret = dbc->c_get(dbc,
+ &lsn_dbt, &nextrec_dbt, DB_NEXT);
+ if (ret != DB_NOTFOUND && ret != 0)
+ goto err;
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ if (ret == DB_NOTFOUND) {
+ /*
+ * Do a quick double-check to make
+ * sure waiting_lsn hasn't changed.
+ * It's possible that between the
+ * DB_NOTFOUND return and the R_LOCK,
+ * some record was added to the
+ * database, and we don't want to lose
+ * sight of the fact that it's there.
+ */
+ if (log_compare(&waiting_lsn,
+ &lp->waiting_lsn) == 0)
+ ZERO_LSN(
+ lp->waiting_lsn);
+
+ /*
+ * Whether or not the current record is
+ * simple, there's no next one, and
+ * therefore we haven't got anything
+ * else to do right now. Break out.
+ */
+ break;
+ }
+
+ DB_ASSERT(lsn_dbt.size == sizeof(lsn_rc));
+
+ /*
+ * NEWFILE records have somewhat convoluted
+ * semantics, so there are five cases
+ * pertaining to what the newly-gotten record
+ * is and what we want to do about it.
+ *
+ * 1) This isn't a NEWFILE record. Advance
+ * waiting_lsn and proceed.
+ *
+ * 2) NEWFILE, no LSN stored as the datum,
+ * lsn_rc.lsn == ready_lsn. The NEWFILE
+ * record is next, so set waiting_lsn =
+ * ready_lsn.
+ *
+ * 3) NEWFILE, no LSN stored as the datum, but
+ * lsn_rc.lsn > ready_lsn. There's still a
+ * gap; set waiting_lsn = lsn_rc.lsn.
+ *
+ * 4) NEWFILE, newfile_lsn in datum, and it's <
+ * ready_lsn. (If the datum is non-empty,
+ * it's the LSN of the last record in a log
+ * file, not the end of the log, and
+ * lsn_rc.lsn is the LSN of the start of
+ * the new file--we didn't have the end of
+ * the old log handy when we sent the
+ * record.) No gap--we're ready to
+ * proceed. Set both waiting and ready_lsn
+ * to lsn_rc.lsn.
+ *
+ * 5) NEWFILE, newfile_lsn in datum, and it's >=
+ * ready_lsn. We're still missing at
+ * least one record; set waiting_lsn,
+ * but not ready_lsn, to lsn_rc.lsn.
+ */
+ if (lsn_rc.rectype == REP_NEWFILE &&
+ nextrec_dbt.size > 0 && log_compare(
+ &newfile_lsn, &lp->ready_lsn) < 0)
+ /* Case 4. */
+ lp->ready_lsn =
+ lp->waiting_lsn = lsn_rc.lsn;
+ else {
+ /* Cases 1, 2, 3, and 5. */
+ DB_ASSERT(log_compare(&lsn_rc.lsn,
+ &lp->ready_lsn) >= 0);
+ lp->waiting_lsn = lsn_rc.lsn;
+ }
+
+ /*
+ * If the current rectype is simple, we're
+ * done with it, and we should check and see
+ * whether the next record queued is the next
+ * one we're ready for. This is just the loop
+ * condition, so we continue.
+ *
+ * Otherwise, we need to break out of this loop
+ * and process this record first.
+ */
+ if (!IS_SIMPLE(rectype))
+ break;
+ }
+ }
+
+ /*
+ * Check if we're at a gap in the table and if so, whether we
+ * need to ask for any records.
+ */
+ do_req = 0;
+ if (!IS_ZERO_LSN(lp->waiting_lsn) &&
+ log_compare(&lp->ready_lsn, &lp->waiting_lsn) != 0) {
+ next_lsn = lp->ready_lsn;
+ do_req = ++lp->rcvd_recs >= lp->wait_recs;
+ if (do_req) {
+ lp->wait_recs = rep->request_gap;
+ lp->rcvd_recs = 0;
+ }
+ }
+
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (dbc != NULL) {
+ if ((ret = dbc->c_close(dbc)) != 0)
+ goto err;
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+ have_mutex = 0;
+ }
+ dbc = NULL;
+
+ if (do_req) {
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ eid = db_rep->region->master_id;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ if (eid != DB_EID_INVALID) {
+ rep->stat.st_log_requested++;
+ if ((ret = __rep_send_message(dbenv,
+ eid, REP_LOG_REQ, &next_lsn, NULL, 0)) != 0)
+ goto err;
+ }
+ }
+ } else if (cmp > 0) {
+ /*
+ * The LSN is higher than the one we were waiting for.
+ * If it is a NEWFILE message, this may not mean that
+ * there's a gap; in some cases, NEWFILE messages contain
+ * the LSN of the beginning of the new file instead
+ * of the end of the old.
+ *
+ * In these cases, the rec DBT will contain the last LSN
+ * of the old file, so we can tell whether there's a gap.
+ */
+ if (rp->rectype == REP_NEWFILE &&
+ rp->lsn.file == lp->ready_lsn.file + 1 &&
+ rp->lsn.offset == 0) {
+ DB_ASSERT(rec != NULL && rec->data != NULL &&
+ rec->size == sizeof(DB_LSN));
+ memcpy(&lsn, rec->data, sizeof(DB_LSN));
+ if (log_compare(&lp->ready_lsn, &lsn) > 0)
+ /*
+ * The last LSN in the old file is smaller
+ * than the one we're expecting, so there's
+ * no gap--the one we're expecting just
+ * doesn't exist.
+ */
+ goto newfile;
+ }
+
+ /*
+ * This record isn't in sequence; add it to the table and
+ * update waiting_lsn if necessary.
+ */
+ memset(&key_dbt, 0, sizeof(key_dbt));
+ key_dbt.data = rp;
+ key_dbt.size = sizeof(*rp);
+ next_lsn = lp->lsn;
+ do_req = 0;
+ if (lp->wait_recs == 0) {
+ /*
+ * This is a new gap. Initialize the number of
+ * records that we should wait before requesting
+ * that it be resent. We grab the limits out of
+ * the rep without the mutex.
+ */
+ lp->wait_recs = rep->request_gap;
+ lp->rcvd_recs = 0;
+ }
+
+ if (++lp->rcvd_recs >= lp->wait_recs) {
+ /*
+ * If we've waited long enough, request the record
+ * and double the wait interval.
+ */
+ do_req = 1;
+ lp->wait_recs <<= 1;
+ lp->rcvd_recs = 0;
+ if (lp->wait_recs > rep->max_gap)
+ lp->wait_recs = rep->max_gap;
+ }
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+ ret = dbp->put(dbp, NULL, &key_dbt, rec, 0);
+ rep->stat.st_log_queued++;
+ rep->stat.st_log_queued_total++;
+ if (rep->stat.st_log_queued_max < rep->stat.st_log_queued)
+ rep->stat.st_log_queued_max = rep->stat.st_log_queued;
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+
+ if (ret != 0)
+ return (ret);
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ if (IS_ZERO_LSN(lp->waiting_lsn) ||
+ log_compare(&rp->lsn, &lp->waiting_lsn) < 0)
+ lp->waiting_lsn = rp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ if (do_req) {
+ /* Request the LSN we are still waiting for. */
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+
+ /* May as well do this after we grab the mutex. */
+ eid = db_rep->region->master_id;
+
+ /*
+ * If the master_id is invalid, this means that since
+ * the last record was sent, somebody declared an
+ * election and we may not have a master to request
+ * things of.
+ *
+ * This is not an error; when we find a new master,
+ * we'll re-negotiate where the end of the log is and
+ * try to to bring ourselves up to date again anyway.
+ */
+ if (eid != DB_EID_INVALID) {
+ rep->stat.st_log_requested++;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ ret = __rep_send_message(dbenv,
+ eid, REP_LOG_REQ, &next_lsn, NULL, 0);
+ } else
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ }
+ return (ret);
+ } else {
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ /*
+ * We may miscount if we race, since we
+ * don't currently hold the rep mutex.
+ */
+ rep->stat.st_log_duplicated++;
+ }
+ if (ret != 0 || cmp < 0 || (cmp == 0 && IS_SIMPLE(rectype)))
+ goto done;
+
+ /*
+ * If we got here, then we've got a log record in rp and rec that
+ * we need to process.
+ */
+ switch(rectype) {
+ case DB___dbreg_register:
+ /*
+ * DB opens occur in the context of a transaction, so we can
+ * simply handle them when we process the transaction. Closes,
+ * however, are not transaction-protected, so we have to
+ * handle them here.
+ *
+ * Note that it should be unsafe for the master to do a close
+ * of a file that was opened in an active transaction, so we
+ * should be guaranteed to get the ordering right.
+ */
+ memcpy(&txnid, (u_int8_t *)rec->data +
+ ((u_int8_t *)&dbreg_args.txnid - (u_int8_t *)&dbreg_args),
+ sizeof(u_int32_t));
+ if (txnid == TXN_INVALID &&
+ !F_ISSET(dbenv, DB_ENV_REP_LOGSONLY))
+ ret = __db_dispatch(dbenv, dbenv->recover_dtab,
+ dbenv->recover_dtab_size, rec, &rp->lsn,
+ DB_TXN_APPLY, NULL);
+ break;
+ case DB___txn_ckp:
+ /* Sync the memory pool. */
+ memcpy(&ckp_lsn, (u_int8_t *)rec->data +
+ ((u_int8_t *)&ckp_args.ckp_lsn - (u_int8_t *)&ckp_args),
+ sizeof(DB_LSN));
+ if (!F_ISSET(dbenv, DB_ENV_REP_LOGSONLY))
+ ret = dbenv->memp_sync(dbenv, &ckp_lsn);
+ else
+ /*
+ * We ought to make sure the logs on a logs-only
+ * replica get flushed now and again.
+ */
+ ret = dbenv->log_flush(dbenv, &ckp_lsn);
+ /* Update the last_ckp in the txn region. */
+ if (ret == 0)
+ __txn_updateckp(dbenv, &rp->lsn);
+ break;
+ case DB___txn_regop:
+ if (!F_ISSET(dbenv, DB_ENV_REP_LOGSONLY))
+ do {
+ /*
+ * If an application is doing app-specific
+ * recovery and acquires locks while applying
+ * a transaction, it can deadlock. Any other
+ * locks held by this thread should have been
+ * discarded in the __rep_process_txn error
+ * path, so if we simply retry, we should
+ * eventually succeed.
+ */
+ ret = __rep_process_txn(dbenv, rec);
+ } while (ret == DB_LOCK_DEADLOCK);
+ break;
+ default:
+ goto err;
+ }
+
+ /* Check if we need to go back into the table. */
+ if (ret == 0) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ if (log_compare(&lp->ready_lsn, &lp->waiting_lsn) == 0)
+ goto gap_check;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ }
+
+done:
+err: if (dbc != NULL && (t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ if (have_mutex)
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+
+ if (control_dbt.data != NULL)
+ __os_ufree(dbenv, control_dbt.data);
+ if (rec_dbt.data != NULL)
+ __os_ufree(dbenv, rec_dbt.data);
+
+ return (ret);
+}
+
+/*
+ * __rep_process_txn --
+ *
+ * This is the routine that actually gets a transaction ready for
+ * processing.
+ *
+ * PUBLIC: int __rep_process_txn __P((DB_ENV *, DBT *));
+ */
+int
+__rep_process_txn(dbenv, rec)
+ DB_ENV *dbenv;
+ DBT *rec;
+{
+ DBT data_dbt;
+ DB_LOCKREQ req, *lvp;
+ DB_LOGC *logc;
+ DB_LSN prev_lsn, *lsnp;
+ DB_REP *db_rep;
+ LSN_COLLECTION lc;
+ REP *rep;
+ __txn_regop_args *txn_args;
+ __txn_xa_regop_args *prep_args;
+ u_int32_t lockid, op, rectype;
+ int i, ret, t_ret;
+ int (**dtab)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t dtabsize;
+ void *txninfo;
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+
+ logc = NULL;
+ txninfo = NULL;
+ memset(&data_dbt, 0, sizeof(data_dbt));
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ F_SET(&data_dbt, DB_DBT_REALLOC);
+
+ /*
+ * There are two phases: First, we have to traverse
+ * backwards through the log records gathering the list
+ * of all LSNs in the transaction. Once we have this information,
+ * we can loop through, acquire the locks we need for each record,
+ * and then apply it.
+ */
+ dtab = NULL;
+
+ /*
+ * We may be passed a prepare (if we're restoring a prepare
+ * on upgrade) instead of a commit (the common case).
+ * Check which and behave appropriately.
+ */
+ memcpy(&rectype, rec->data, sizeof(rectype));
+ memset(&lc, 0, sizeof(lc));
+ if (rectype == DB___txn_regop) {
+ /*
+ * We're the end of a transaction. Make sure this is
+ * really a commit and not an abort!
+ */
+ if ((ret = __txn_regop_read(dbenv, rec->data, &txn_args)) != 0)
+ return (ret);
+ op = txn_args->opcode;
+ prev_lsn = txn_args->prev_lsn;
+ __os_free(dbenv, txn_args);
+ if (op != TXN_COMMIT)
+ return (0);
+ } else {
+ /* We're a prepare. */
+ DB_ASSERT(rectype == DB___txn_xa_regop);
+
+ if ((ret =
+ __txn_xa_regop_read(dbenv, rec->data, &prep_args)) != 0)
+ return (ret);
+ prev_lsn = prep_args->prev_lsn;
+ __os_free(dbenv, prep_args);
+ }
+
+ /* Phase 1. Get a list of the LSNs in this transaction, and sort it. */
+ if ((ret = __rep_collect_txn(dbenv, &prev_lsn, &lc)) != 0)
+ return (ret);
+ qsort(lc.array, lc.nlsns, sizeof(DB_LSN), __rep_lsn_cmp);
+
+ if ((ret = dbenv->lock_id(dbenv, &lockid)) != 0)
+ goto err;
+
+ /* Initialize the getpgno dispatch table. */
+ if ((ret = __rep_lockpgno_init(dbenv, &dtab, &dtabsize)) != 0)
+ goto err;
+
+ /*
+ * The set of records for a transaction may include dbreg_register
+ * records. Create a txnlist so that they can keep track of file
+ * state between records.
+ */
+ if ((ret = __db_txnlist_init(dbenv, 0, 0, NULL, &txninfo)) != 0)
+ goto err;
+
+ /* Phase 2: Apply updates. */
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+ for (lsnp = &lc.array[0], i = 0; i < lc.nlsns; i++, lsnp++) {
+ if ((ret = __rep_lockpages(dbenv,
+ dtab, dtabsize, lsnp, NULL, NULL, lockid)) != 0)
+ goto err;
+ if ((ret = logc->get(logc, lsnp, &data_dbt, DB_SET)) != 0)
+ goto err;
+ if ((ret = __db_dispatch(dbenv, dbenv->recover_dtab,
+ dbenv->recover_dtab_size, &data_dbt, lsnp,
+ DB_TXN_APPLY, txninfo)) != 0)
+ goto err;
+ }
+
+err: memset(&req, 0, sizeof(req));
+ req.op = DB_LOCK_PUT_ALL;
+ if ((t_ret = dbenv->lock_vec(dbenv, lockid,
+ DB_LOCK_FREE_LOCKER, &req, 1, &lvp)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (lc.nalloc != 0)
+ __os_free(dbenv, lc.array);
+
+ if ((t_ret =
+ dbenv->lock_id_free(dbenv, lockid)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (logc != NULL && (t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (txninfo != NULL)
+ __db_txnlist_end(dbenv, txninfo);
+
+ if (F_ISSET(&data_dbt, DB_DBT_REALLOC) && data_dbt.data != NULL)
+ __os_ufree(dbenv, data_dbt.data);
+
+ if (dtab != NULL)
+ __os_free(dbenv, dtab);
+
+ if (ret == 0)
+ /*
+ * We don't hold the rep mutex, and could miscount if we race.
+ */
+ rep->stat.st_txns_applied++;
+
+ return (ret);
+}
+
+/*
+ * __rep_collect_txn
+ * Recursive function that will let us visit every entry in a transaction
+ * chain including all child transactions so that we can then apply
+ * the entire transaction family at once.
+ */
+static int
+__rep_collect_txn(dbenv, lsnp, lc)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ LSN_COLLECTION *lc;
+{
+ __txn_child_args *argp;
+ DB_LOGC *logc;
+ DB_LSN c_lsn;
+ DBT data;
+ u_int32_t rectype;
+ int nalloc, ret, t_ret;
+
+ memset(&data, 0, sizeof(data));
+ F_SET(&data, DB_DBT_REALLOC);
+
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+
+ while (!IS_ZERO_LSN(*lsnp) &&
+ (ret = logc->get(logc, lsnp, &data, DB_SET)) == 0) {
+ memcpy(&rectype, data.data, sizeof(rectype));
+ if (rectype == DB___txn_child) {
+ if ((ret = __txn_child_read(dbenv,
+ data.data, &argp)) != 0)
+ goto err;
+ c_lsn = argp->c_lsn;
+ *lsnp = argp->prev_lsn;
+ __os_free(dbenv, argp);
+ ret = __rep_collect_txn(dbenv, &c_lsn, lc);
+ } else {
+ if (lc->nalloc < lc->nlsns + 1) {
+ nalloc = lc->nalloc == 0 ? 20 : lc->nalloc * 2;
+ if ((ret = __os_realloc(dbenv,
+ nalloc * sizeof(DB_LSN), &lc->array)) != 0)
+ goto err;
+ lc->nalloc = nalloc;
+ }
+ lc->array[lc->nlsns++] = *lsnp;
+
+ /*
+ * Explicitly copy the previous lsn. The record
+ * starts with a u_int32_t record type, a u_int32_t
+ * txn id, and then the DB_LSN (prev_lsn) that we
+ * want. We copy explicitly because we have no idea
+ * what kind of record this is.
+ */
+ memcpy(lsnp, (u_int8_t *)data.data +
+ sizeof(u_int32_t) + sizeof(u_int32_t),
+ sizeof(DB_LSN));
+ }
+
+ if (ret != 0)
+ goto err;
+ }
+
+err: if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (data.data != NULL)
+ __os_ufree(dbenv, data.data);
+ return (ret);
+}
+
+/*
+ * __rep_lsn_cmp --
+ * qsort-type-compatible wrapper for log_compare.
+ */
+static int
+__rep_lsn_cmp(lsn1, lsn2)
+ const void *lsn1, *lsn2;
+{
+
+ return (log_compare((DB_LSN *)lsn1, (DB_LSN *)lsn2));
+}
+
+/*
+ * __rep_newfile --
+ * NEWFILE messages can contain either the last LSN of the old file
+ * or the first LSN of the new one, depending on which we have available
+ * when the message is sent. When applying a NEWFILE message, make sure
+ * we haven't already swapped files, as it's possible (given the right sequence
+ * of out-of-order messages) to wind up with a NEWFILE message of each
+ * variety, and __rep_apply won't detect the two as duplicates of each other.
+ */
+static int
+__rep_newfile(dbenv, rc, msgdbt, lsnp)
+ DB_ENV *dbenv;
+ REP_CONTROL *rc;
+ DBT *msgdbt;
+ DB_LSN *lsnp;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+ u_int32_t newfile;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ /*
+ * A NEWFILE message containing the old file's LSN will be
+ * accompanied by a NULL rec DBT; one containing the new one's LSN
+ * will need to supply the last record in the old file by
+ * sending it in the rec DBT.
+ */
+ if (msgdbt == NULL || msgdbt->size == 0)
+ newfile = rc->lsn.file + 1;
+ else
+ newfile = rc->lsn.file;
+
+ if (newfile > lp->lsn.file)
+ return (__log_newfile(dblp, lsnp));
+ else {
+ /* We've already applied this NEWFILE. Just ignore it. */
+ *lsnp = lp->lsn;
+ return (0);
+ }
+}
diff --git a/libdb/rep/rep_region.c b/libdb/rep/rep_region.c
new file mode 100644
index 0000000..f766fd9
--- /dev/null
+++ b/libdb/rep/rep_region.c
@@ -0,0 +1,187 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#endif
+
+#include <string.h>
+
+#include "db_int.h"
+#include "dbinc/rep.h"
+#include "dbinc/log.h"
+
+/*
+ * __rep_region_init --
+ * Initialize the shared memory state for the replication system.
+ *
+ * PUBLIC: int __rep_region_init __P((DB_ENV *));
+ */
+int
+__rep_region_init(dbenv)
+ DB_ENV *dbenv;
+{
+ REGENV *renv;
+ REGINFO *infop;
+ DB_MUTEX *db_mutexp;
+ DB_REP *db_rep;
+ REP *rep;
+ int ret;
+
+ db_rep = dbenv->rep_handle;
+ infop = dbenv->reginfo;
+ renv = infop->primary;
+ ret = 0;
+
+ MUTEX_LOCK(dbenv, &renv->mutex);
+ if (renv->rep_off == INVALID_ROFF) {
+ /* Must create the region. */
+ if ((ret = __db_shalloc(infop->addr,
+ sizeof(REP), MUTEX_ALIGN, &rep)) != 0)
+ goto err;
+ memset(rep, 0, sizeof(*rep));
+ rep->tally_off = INVALID_ROFF;
+ renv->rep_off = R_OFFSET(infop, rep);
+
+ if ((ret = __db_mutex_setup(dbenv, infop, &rep->mutex,
+ MUTEX_NO_RECORD)) != 0)
+ goto err;
+
+ /*
+ * We must create a place for the db_mutex separately;
+ * mutexes have to be aligned to MUTEX_ALIGN, and the only way
+ * to guarantee that is to make sure they're at the beginning
+ * of a shalloc'ed chunk.
+ */
+ if ((ret = __db_shalloc(infop->addr, sizeof(DB_MUTEX),
+ MUTEX_ALIGN, &db_mutexp)) != 0)
+ goto err;
+ rep->db_mutex_off = R_OFFSET(infop, db_mutexp);
+
+ /*
+ * Because we have no way to prevent deadlocks and cannot log
+ * changes made to it, we single-thread access to the client
+ * bookkeeping database. This is suboptimal, but it only gets
+ * accessed when messages arrive out-of-order, so it should
+ * stay small and not be used in a high-performance app.
+ */
+ if ((ret = __db_mutex_setup(dbenv, infop, db_mutexp,
+ MUTEX_NO_RECORD)) != 0)
+ goto err;
+
+ /* We have the region; fill in the values. */
+ rep->eid = DB_EID_INVALID;
+ rep->master_id = DB_EID_INVALID;
+ rep->gen = 0;
+
+ /*
+ * Set default values for the min and max log records that we
+ * wait before requesting a missing log record.
+ */
+ rep->request_gap = DB_REP_REQUEST_GAP;
+ rep->max_gap = DB_REP_MAX_GAP;
+ } else
+ rep = R_ADDR(infop, renv->rep_off);
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ db_rep->mutexp = &rep->mutex;
+ db_rep->db_mutexp = R_ADDR(infop, rep->db_mutex_off);
+ db_rep->region = rep;
+
+ return (0);
+
+err: MUTEX_UNLOCK(dbenv, &renv->mutex);
+ return (ret);
+}
+
+/*
+ * __rep_region_destroy --
+ * Destroy any system resources allocated in the replication region.
+ *
+ * PUBLIC: int __rep_region_destroy __P((DB_ENV *));
+ */
+int
+__rep_region_destroy(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_REP *db_rep;
+ int ret, t_ret;
+
+ ret = t_ret = 0;
+ db_rep = (DB_REP *)dbenv->rep_handle;
+
+ if (db_rep != NULL) {
+ if (db_rep->mutexp != NULL)
+ ret = __db_mutex_destroy(db_rep->mutexp);
+ if (db_rep->db_mutexp != NULL)
+ t_ret = __db_mutex_destroy(db_rep->db_mutexp);
+ }
+
+ return (ret == 0 ? t_ret : ret);
+}
+
+/*
+ * __rep_dbenv_close --
+ * Replication-specific destruction of the DB_ENV structure.
+ *
+ * PUBLIC: int __rep_dbenv_close __P((DB_ENV *));
+ */
+int
+__rep_dbenv_close(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_REP *db_rep;
+
+ db_rep = (DB_REP *)dbenv->rep_handle;
+
+ if (db_rep != NULL) {
+ __os_free(dbenv, db_rep);
+ dbenv->rep_handle = NULL;
+ }
+
+ return (0);
+}
+
+/*
+ * __rep_preclose --
+ * If we are a client, shut down our client database and, if we're
+ * actually closing the environment, close all databases we've opened
+ * while applying messages.
+ *
+ * PUBLIC: int __rep_preclose __P((DB_ENV *, int));
+ */
+int
+__rep_preclose(dbenv, do_closefiles)
+ DB_ENV *dbenv;
+ int do_closefiles;
+{
+ DB *dbp;
+ DB_REP *db_rep;
+ int ret, t_ret;
+
+ ret = t_ret = 0;
+
+ /* If replication is not initialized, we have nothing to do. */
+ if ((db_rep = (DB_REP *)dbenv->rep_handle) == NULL)
+ return (0);
+
+ if ((dbp = db_rep->rep_db) != NULL) {
+ MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+ ret = dbp->close(dbp, 0);
+ db_rep->rep_db = NULL;
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+ }
+
+ if (do_closefiles)
+ t_ret = __dbreg_close_files(dbenv);
+
+ return (ret == 0 ? t_ret : ret);
+}
diff --git a/libdb/rep/rep_util.c b/libdb/rep/rep_util.c
new file mode 100644
index 0000000..0f16100
--- /dev/null
+++ b/libdb/rep/rep_util.c
@@ -0,0 +1,867 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/fop.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+/*
+ * rep_util.c:
+ * Miscellaneous replication-related utility functions, including
+ * those called by other subsystems.
+ */
+static int __rep_cmp_bylsn __P((const void *, const void *));
+static int __rep_cmp_bypage __P((const void *, const void *));
+
+#ifdef REP_DIAGNOSTIC
+static void __rep_print_logmsg __P((DB_ENV *, const DBT *, DB_LSN *));
+#endif
+
+/*
+ * __rep_check_alloc --
+ * Make sure the array of TXN_REC entries is of at least size n.
+ * (This function is called by the __*_getpgnos() functions in
+ * *.src.)
+ *
+ * PUBLIC: int __rep_check_alloc __P((DB_ENV *, TXN_RECS *, int));
+ */
+int
+__rep_check_alloc(dbenv, r, n)
+ DB_ENV *dbenv;
+ TXN_RECS *r;
+ int n;
+{
+ int nalloc, ret;
+
+ while (r->nalloc < r->npages + n) {
+ nalloc = r->nalloc == 0 ? 20 : r->nalloc * 2;
+
+ if ((ret = __os_realloc(dbenv, nalloc * sizeof(LSN_PAGE),
+ &r->array)) != 0)
+ return (ret);
+
+ r->nalloc = nalloc;
+ }
+
+ return (0);
+}
+
+/*
+ * __rep_send_message --
+ * This is a wrapper for sending a message. It takes care of constructing
+ * the REP_CONTROL structure and calling the user's specified send function.
+ *
+ * PUBLIC: int __rep_send_message __P((DB_ENV *, int,
+ * PUBLIC: u_int32_t, DB_LSN *, const DBT *, u_int32_t));
+ */
+int
+__rep_send_message(dbenv, eid, rtype, lsnp, dbtp, flags)
+ DB_ENV *dbenv;
+ int eid;
+ u_int32_t rtype;
+ DB_LSN *lsnp;
+ const DBT *dbtp;
+ u_int32_t flags;
+{
+ DB_REP *db_rep;
+ REP *rep;
+ DBT cdbt, scrap_dbt;
+ REP_CONTROL cntrl;
+ u_int32_t send_flags;
+ int ret;
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+
+ /* Set up control structure. */
+ memset(&cntrl, 0, sizeof(cntrl));
+ if (lsnp == NULL)
+ ZERO_LSN(cntrl.lsn);
+ else
+ cntrl.lsn = *lsnp;
+ cntrl.rectype = rtype;
+ cntrl.flags = flags;
+ cntrl.rep_version = DB_REPVERSION;
+ cntrl.log_version = DB_LOGVERSION;
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ cntrl.gen = rep->gen;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ memset(&cdbt, 0, sizeof(cdbt));
+ cdbt.data = &cntrl;
+ cdbt.size = sizeof(cntrl);
+
+ /* Don't assume the send function will be tolerant of NULL records. */
+ if (dbtp == NULL) {
+ memset(&scrap_dbt, 0, sizeof(DBT));
+ dbtp = &scrap_dbt;
+ }
+
+ send_flags = (LF_ISSET(DB_PERMANENT) ? DB_REP_PERMANENT : 0);
+
+#if 0
+ __rep_print_message(dbenv, eid, &cntrl, "rep_send_message");
+#endif
+#ifdef REP_DIAGNOSTIC
+ if (rtype == REP_LOG)
+ __rep_print_logmsg(dbenv, dbtp, lsnp);
+#endif
+ ret = db_rep->rep_send(dbenv, &cdbt, dbtp, eid, send_flags);
+
+ /*
+ * We don't hold the rep lock, so this could miscount if we race.
+ * I don't think it's worth grabbing the mutex for that bit of
+ * extra accuracy.
+ */
+ if (ret == 0)
+ rep->stat.st_msgs_sent++;
+ else
+ rep->stat.st_msgs_send_failures++;
+
+ return (ret);
+}
+
+#ifdef REP_DIAGNOSTIC
+
+/*
+ * __rep_print_logmsg --
+ * This is a debugging routine for printing out log records that
+ * we are about to transmit to a client.
+ */
+
+static void
+__rep_print_logmsg(dbenv, logdbt, lsnp)
+ DB_ENV *dbenv;
+ const DBT *logdbt;
+ DB_LSN *lsnp;
+{
+ /* Static structures to hold the printing functions. */
+ static int (**ptab)__P((DB_ENV *,
+ DBT *, DB_LSN *, db_recops, void *)) = NULL;
+ size_t ptabsize = 0;
+
+ if (ptabsize == 0) {
+ /* Initialize the table. */
+ (void)__bam_init_print(dbenv, &ptab, &ptabsize);
+ (void)__crdel_init_print(dbenv, &ptab, &ptabsize);
+ (void)__db_init_print(dbenv, &ptab, &ptabsize);
+ (void)__dbreg_init_print(dbenv, &ptab, &ptabsize);
+ (void)__fop_init_print(dbenv, &ptab, &ptabsize);
+ (void)__qam_init_print(dbenv, &ptab, &ptabsize);
+ (void)__ham_init_print(dbenv, &ptab, &ptabsize);
+ (void)__txn_init_print(dbenv, &ptab, &ptabsize);
+ }
+
+ (void)__db_dispatch(dbenv,
+ ptab, ptabsize, (DBT *)logdbt, lsnp, DB_TXN_PRINT, NULL);
+}
+
+#endif
+/*
+ * __rep_new_master --
+ * Called after a master election to sync back up with a new master.
+ * It's possible that we already know of this new master in which case
+ * we don't need to do anything.
+ *
+ * This is written assuming that this message came from the master; we
+ * need to enforce that in __rep_process_record, but right now, we have
+ * no way to identify the master.
+ *
+ * PUBLIC: int __rep_new_master __P((DB_ENV *, REP_CONTROL *, int));
+ */
+int
+__rep_new_master(dbenv, cntrl, eid)
+ DB_ENV *dbenv;
+ REP_CONTROL *cntrl;
+ int eid;
+{
+ DB_LOG *dblp;
+ DB_LOGC *logc;
+ DB_LSN last_lsn, lsn;
+ DB_REP *db_rep;
+ DBT dbt;
+ LOG *lp;
+ REP *rep;
+ int change, ret, t_ret;
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ ELECTION_DONE(rep);
+ change = rep->gen != cntrl->gen || rep->master_id != eid;
+ if (change) {
+ rep->gen = cntrl->gen;
+ rep->master_id = eid;
+ F_SET(rep, REP_F_RECOVER);
+ rep->stat.st_master_changes++;
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ if (!change)
+ return (0);
+
+ /*
+ * If the master changed, we need to start the process of
+ * figuring out what our last valid log record is. However,
+ * if both the master and we agree that the max LSN is 0,0,
+ * then there is no recovery to be done. If we are at 0 and
+ * the master is not, then we just need to request all the log
+ * records from the master.
+ */
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ R_LOCK(dbenv, &dblp->reginfo);
+ last_lsn = lsn = lp->lsn;
+ if (last_lsn.offset > sizeof(LOGP))
+ last_lsn.offset -= lp->len;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (IS_INIT_LSN(lsn) || IS_ZERO_LSN(lsn)) {
+empty: MUTEX_LOCK(dbenv, db_rep->mutexp);
+ F_CLR(rep, REP_F_RECOVER);
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ if (IS_INIT_LSN(cntrl->lsn))
+ ret = 0;
+ else
+ ret = __rep_send_message(dbenv, rep->master_id,
+ REP_ALL_REQ, &lsn, NULL, 0);
+
+ if (ret == 0)
+ ret = DB_REP_NEWMASTER;
+ return (ret);
+ } else if (last_lsn.offset <= sizeof(LOGP)) {
+ /*
+ * We have just changed log files and need to set lastlsn
+ * to the last record in the previous log files.
+ */
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ memset(&dbt, 0, sizeof(dbt));
+ ret = logc->get(logc, &last_lsn, &dbt, DB_LAST);
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ret == DB_NOTFOUND)
+ goto empty;
+ if (ret != 0)
+ return (ret);
+ }
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ lp->verify_lsn = last_lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if ((ret = __rep_send_message(dbenv,
+ eid, REP_VERIFY_REQ, &last_lsn, NULL, 0)) != 0)
+ return (ret);
+
+ return (DB_REP_NEWMASTER);
+}
+
+/*
+ * __rep_lockpgno_init
+ * Create a dispatch table for acquiring locks on each log record.
+ *
+ * PUBLIC: int __rep_lockpgno_init __P((DB_ENV *,
+ * PUBLIC: int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *),
+ * PUBLIC: size_t *));
+ */
+int
+__rep_lockpgno_init(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ /* Initialize dispatch table. */
+ *dtabsizep = 0;
+ *dtabp = NULL;
+ if ((ret = __bam_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __crdel_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __db_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __dbreg_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __fop_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __qam_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __ham_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __txn_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __rep_unlockpages --
+ * Unlock the pages locked in __rep_lockpages.
+ *
+ * PUBLIC: int __rep_unlockpages __P((DB_ENV *, u_int32_t));
+ */
+int
+__rep_unlockpages(dbenv, lid)
+ DB_ENV *dbenv;
+ u_int32_t lid;
+{
+ DB_LOCKREQ req, *lvp;
+
+ req.op = DB_LOCK_PUT_ALL;
+ return (dbenv->lock_vec(dbenv, lid, 0, &req, 1, &lvp));
+}
+
+/*
+ * __rep_lockpages --
+ * Called to gather and lock pages in preparation for both
+ * single transaction apply as well as client synchronization
+ * with a new master. A non-NULL key_lsn means that we're locking
+ * in order to apply a single log record during client recovery
+ * to the joint LSN. A non-NULL max_lsn means that we are applying
+ * a transaction whose commit is at max_lsn.
+ *
+ * PUBLIC: int __rep_lockpages __P((DB_ENV *,
+ * PUBLIC: int (**)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *),
+ * PUBLIC: size_t, DB_LSN *, DB_LSN *, TXN_RECS *, u_int32_t));
+ */
+int
+__rep_lockpages(dbenv, dtab, dtabsize, key_lsn, max_lsn, recs, lid)
+ DB_ENV *dbenv;
+ int (**dtab)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t dtabsize;
+ DB_LSN *key_lsn, *max_lsn;
+ TXN_RECS *recs;
+ u_int32_t lid;
+{
+ DBT data_dbt, lo;
+ DB_LOCK l;
+ DB_LOCKREQ *lvp;
+ DB_LOGC *logc;
+ DB_LSN tmp_lsn;
+ TXN_RECS tmp, *t;
+ db_pgno_t cur_pgno;
+ linfo_t locks;
+ int i, ret, t_ret, unique;
+ u_int32_t cur_fid;
+
+ /*
+ * There are two phases: First, we have to traverse backwards through
+ * the log records gathering the list of all the pages accessed. Once
+ * we have this information we can acquire all the locks we need.
+ */
+
+ /* Initialization */
+ memset(&locks, 0, sizeof(locks));
+ ret = 0;
+
+ t = recs != NULL ? recs : &tmp;
+ t->npages = t->nalloc = 0;
+ t->array = NULL;
+
+ /*
+ * We've got to be in one mode or the other; else life will either
+ * be excessively boring or overly exciting.
+ */
+ DB_ASSERT(key_lsn != NULL || max_lsn != NULL);
+ DB_ASSERT(key_lsn == NULL || max_lsn == NULL);
+
+ /*
+ * Phase 1: Fill in the pgno array.
+ */
+ memset(&data_dbt, 0, sizeof(data_dbt));
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ F_SET(&data_dbt, DB_DBT_REALLOC);
+
+ /* Single transaction apply. */
+ if (max_lsn != NULL) {
+ DB_ASSERT(0); /* XXX */
+ /*
+ tmp_lsn = *max_lsn;
+ if ((ret = __rep_apply_thread(dbenv, dtab, dtabsize,
+ &data_dbt, &tmp_lsn, t)) != 0)
+ goto err;
+ */
+ }
+
+ /* In recovery. */
+ if (key_lsn != NULL) {
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+ ret = logc->get(logc, key_lsn, &data_dbt, DB_SET);
+
+ /* Save lsn values, since dispatch functions can change them. */
+ tmp_lsn = *key_lsn;
+ ret = __db_dispatch(dbenv,
+ dtab, dtabsize, &data_dbt, &tmp_lsn, DB_TXN_GETPGNOS, t);
+
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * If ret == DB_DELETED, this record refers to a temporary
+ * file and there's nothing to apply.
+ */
+ if (ret == DB_DELETED) {
+ ret = 0;
+ goto out;
+ } else if (ret != 0)
+ goto err;
+ }
+
+ if (t->npages == 0)
+ goto out;
+
+ /* Phase 2: Write lock all the pages. */
+
+ /* Sort the entries in the array by page number. */
+ qsort(t->array, t->npages, sizeof(LSN_PAGE), __rep_cmp_bypage);
+
+ /* Count the number of unique pages. */
+ cur_fid = DB_LOGFILEID_INVALID;
+ cur_pgno = PGNO_INVALID;
+ unique = 0;
+ for (i = 0; i < t->npages; i++) {
+ if (F_ISSET(&t->array[i], LSN_PAGE_NOLOCK))
+ continue;
+ if (t->array[i].pgdesc.pgno != cur_pgno ||
+ t->array[i].fid != cur_fid) {
+ cur_pgno = t->array[i].pgdesc.pgno;
+ cur_fid = t->array[i].fid;
+ unique++;
+ }
+ }
+
+ if (unique == 0)
+ goto out;
+
+ /* Handle single lock case specially, else allocate space for locks. */
+ if (unique == 1) {
+ memset(&lo, 0, sizeof(lo));
+ lo.data = &t->array[0].pgdesc;
+ lo.size = sizeof(t->array[0].pgdesc);
+ ret = dbenv->lock_get(dbenv, lid, 0, &lo, DB_LOCK_WRITE, &l);
+ goto out2;
+ }
+
+ /* Multi-lock case. */
+ locks.n = unique;
+ if ((ret = __os_calloc(dbenv,
+ unique, sizeof(DB_LOCKREQ), &locks.reqs)) != 0)
+ goto err;
+ if ((ret = __os_calloc(dbenv, unique, sizeof(DBT), &locks.objs)) != 0)
+ goto err;
+
+ unique = 0;
+ cur_fid = DB_LOGFILEID_INVALID;
+ cur_pgno = PGNO_INVALID;
+ for (i = 0; i < t->npages; i++) {
+ if (F_ISSET(&t->array[i], LSN_PAGE_NOLOCK))
+ continue;
+ if (t->array[i].pgdesc.pgno != cur_pgno ||
+ t->array[i].fid != cur_fid) {
+ cur_pgno = t->array[i].pgdesc.pgno;
+ cur_fid = t->array[i].fid;
+ locks.reqs[unique].op = DB_LOCK_GET;
+ locks.reqs[unique].mode = DB_LOCK_WRITE;
+ locks.reqs[unique].obj = &locks.objs[unique];
+ locks.objs[unique].data = &t->array[i].pgdesc;
+ locks.objs[unique].size = sizeof(t->array[i].pgdesc);
+ unique++;
+ }
+ }
+
+ /* Finally, get the locks. */
+ if ((ret =
+ dbenv->lock_vec(dbenv, lid, 0, locks.reqs, unique, &lvp)) != 0) {
+ /*
+ * If we were unsuccessful, unlock any locks we acquired before
+ * the error and return the original error value.
+ */
+ (void)__rep_unlockpages(dbenv, lid);
+ }
+
+err:
+out: if (locks.objs != NULL)
+ __os_free(dbenv, locks.objs);
+ if (locks.reqs != NULL)
+ __os_free(dbenv, locks.reqs);
+
+ /*
+ * Before we return, sort by LSN so that we apply records in the
+ * right order.
+ */
+ qsort(t->array, t->npages, sizeof(LSN_PAGE), __rep_cmp_bylsn);
+
+out2: if ((ret != 0 || recs == NULL) && t->nalloc != 0) {
+ __os_free(dbenv, t->array);
+ t->array = NULL;
+ t->npages = t->nalloc = 0;
+ }
+
+ if (F_ISSET(&data_dbt, DB_DBT_REALLOC) && data_dbt.data != NULL)
+ __os_ufree(dbenv, data_dbt.data);
+
+ return (ret);
+}
+
+/*
+ * __rep_cmp_bypage and __rep_cmp_bylsn --
+ * Sort functions for qsort. "bypage" sorts first by page numbers and
+ * then by the LSN. "bylsn" sorts first by the LSN, then by page numbers.
+ */
+static int
+__rep_cmp_bypage(a, b)
+ const void *a, *b;
+{
+ LSN_PAGE *ap, *bp;
+
+ ap = (LSN_PAGE *)a;
+ bp = (LSN_PAGE *)b;
+
+ if (ap->fid < bp->fid)
+ return (-1);
+
+ if (ap->fid > bp->fid)
+ return (1);
+
+ if (ap->pgdesc.pgno < bp->pgdesc.pgno)
+ return (-1);
+
+ if (ap->pgdesc.pgno > bp->pgdesc.pgno)
+ return (1);
+
+ if (ap->lsn.file < bp->lsn.file)
+ return (-1);
+
+ if (ap->lsn.file > bp->lsn.file)
+ return (1);
+
+ if (ap->lsn.offset < bp->lsn.offset)
+ return (-1);
+
+ if (ap->lsn.offset > bp->lsn.offset)
+ return (1);
+
+ return (0);
+}
+
+static int
+__rep_cmp_bylsn(a, b)
+ const void *a, *b;
+{
+ LSN_PAGE *ap, *bp;
+
+ ap = (LSN_PAGE *)a;
+ bp = (LSN_PAGE *)b;
+
+ if (ap->lsn.file < bp->lsn.file)
+ return (-1);
+
+ if (ap->lsn.file > bp->lsn.file)
+ return (1);
+
+ if (ap->lsn.offset < bp->lsn.offset)
+ return (-1);
+
+ if (ap->lsn.offset > bp->lsn.offset)
+ return (1);
+
+ if (ap->fid < bp->fid)
+ return (-1);
+
+ if (ap->fid > bp->fid)
+ return (1);
+
+ if (ap->pgdesc.pgno < bp->pgdesc.pgno)
+ return (-1);
+
+ if (ap->pgdesc.pgno > bp->pgdesc.pgno)
+ return (1);
+
+ return (0);
+}
+
+/*
+ * __rep_is_client
+ * Used by other subsystems to figure out if this is a replication
+ * client sites.
+ *
+ * PUBLIC: int __rep_is_client __P((DB_ENV *));
+ */
+int
+__rep_is_client(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_REP *db_rep;
+ REP *rep;
+ int ret;
+
+ if ((db_rep = dbenv->rep_handle) == NULL)
+ return (0);
+ rep = db_rep->region;
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ ret = F_ISSET(rep, REP_F_UPGRADE | REP_F_LOGSONLY);
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (ret);
+}
+
+/*
+ * __rep_send_vote
+ * Send this site's vote for the election.
+ *
+ * PUBLIC: int __rep_send_vote __P((DB_ENV *, DB_LSN *, int, int, int));
+ */
+int
+__rep_send_vote(dbenv, lsnp, nsites, pri, tiebreaker)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ int nsites, pri, tiebreaker;
+{
+ DBT vote_dbt;
+ REP_VOTE_INFO vi;
+
+ memset(&vi, 0, sizeof(vi));
+
+ vi.priority = pri;
+ vi.nsites = nsites;
+ vi.tiebreaker = tiebreaker;
+
+ memset(&vote_dbt, 0, sizeof(vote_dbt));
+ vote_dbt.data = &vi;
+ vote_dbt.size = sizeof(vi);
+
+ return (__rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_VOTE1, lsnp, &vote_dbt, 0));
+}
+
+/*
+ * __rep_grow_sites --
+ * Called to allocate more space in the election tally information.
+ * Called with the rep mutex held. We need to call the region mutex, so
+ * we need to make sure that we *never* acquire those mutexes in the
+ * opposite order.
+ *
+ * PUBLIC: int __rep_grow_sites __P((DB_ENV *dbenv, int nsites));
+ */
+int
+__rep_grow_sites(dbenv, nsites)
+ DB_ENV *dbenv;
+ int nsites;
+{
+ REGENV *renv;
+ REGINFO *infop;
+ REP *rep;
+ int nalloc, ret, *tally;
+
+ rep = ((DB_REP *)dbenv->rep_handle)->region;
+
+ /*
+ * Allocate either twice the current allocation or nsites,
+ * whichever is more.
+ */
+
+ nalloc = 2 * rep->asites;
+ if (nalloc < nsites)
+ nalloc = nsites;
+
+ infop = dbenv->reginfo;
+ renv = infop->primary;
+ MUTEX_LOCK(dbenv, &renv->mutex);
+ if ((ret = __db_shalloc(infop->addr,
+ sizeof(nalloc * sizeof(int)), sizeof(int), &tally)) == 0) {
+ if (rep->tally_off != INVALID_ROFF)
+ __db_shalloc_free(infop->addr,
+ R_ADDR(infop, rep->tally_off));
+ rep->asites = nalloc;
+ rep->nsites = nsites;
+ rep->tally_off = R_OFFSET(infop, tally);
+ }
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+ return (ret);
+}
+
+#ifdef NOTYET
+static int __rep_send_file __P((DB_ENV *, DBT *, u_int32_t));
+/*
+ * __rep_send_file --
+ * Send an entire file, one block at a time.
+ */
+static int
+__rep_send_file(dbenv, rec, eid)
+ DB_ENV *dbenv;
+ DBT *rec;
+ u_int32_t eid;
+{
+ DB *dbp;
+ DB_LOCK lk;
+ DB_MPOOLFILE *mpf;
+ DBC *dbc;
+ DBT rec_dbt;
+ PAGE *pagep;
+ db_pgno_t last_pgno, pgno;
+ int ret, t_ret;
+
+ dbp = NULL;
+ dbc = NULL;
+ pagep = NULL;
+ mpf = NULL;
+ LOCK_INIT(lk);
+
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ goto err;
+
+ if ((ret = dbp->open(dbp, rec->data, NULL, DB_UNKNOWN, 0, 0)) != 0)
+ goto err;
+
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ goto err;
+ /*
+ * Force last_pgno to some value that will let us read the meta-dat
+ * page in the following loop.
+ */
+ memset(&rec_dbt, 0, sizeof(rec_dbt));
+ last_pgno = 1;
+ for (pgno = 0; pgno <= last_pgno; pgno++) {
+ if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &lk)) != 0)
+ goto err;
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) != 0)
+ goto err;
+
+ if (pgno == 0)
+ last_pgno = ((DBMETA *)pagep)->last_pgno;
+
+ rec_dbt.data = pagep;
+ rec_dbt.size = dbp->pgsize;
+ if ((ret = __rep_send_message(dbenv, eid,
+ REP_FILE, NULL, &rec_dbt, pgno == last_pgno)) != 0)
+ goto err;
+ ret = mpf->put(mpf, pagep, 0);
+ pagep = NULL;
+ if (ret != 0)
+ goto err;
+ ret = __LPUT(dbc, lk);
+ LOCK_INIT(lk);
+ if (ret != 0)
+ goto err;
+ }
+
+err: if (LOCK_ISSET(lk) && (t_ret = __LPUT(dbc, lk)) != 0 && ret == 0)
+ ret = t_ret;
+ if (dbc != NULL && (t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ if (pagep != NULL && (t_ret = mpf->put(mpf, pagep, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (dbp != NULL && (t_ret = dbp->close(dbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+#endif
+
+#if 0
+/*
+ * PUBLIC: void __rep_print_message __P((DB_ENV *, int, REP_CONTROL *, char *));
+ */
+void
+__rep_print_message(dbenv, eid, rp, str)
+ DB_ENV *dbenv;
+ int eid;
+ REP_CONTROL *rp;
+ char *str;
+{
+ char *type;
+ switch (rp->rectype) {
+ case REP_ALIVE:
+ type = "alive";
+ break;
+ case REP_ALIVE_REQ:
+ type = "alive_req";
+ break;
+ case REP_ALL_REQ:
+ type = "all_req";
+ break;
+ case REP_ELECT:
+ type = "elect";
+ break;
+ case REP_FILE:
+ type = "file";
+ break;
+ case REP_FILE_REQ:
+ type = "file_req";
+ break;
+ case REP_LOG:
+ type = "log";
+ break;
+ case REP_LOG_MORE:
+ type = "log_more";
+ break;
+ case REP_LOG_REQ:
+ type = "log_req";
+ break;
+ case REP_MASTER_REQ:
+ type = "master_req";
+ break;
+ case REP_NEWCLIENT:
+ type = "newclient";
+ break;
+ case REP_NEWFILE:
+ type = "newfile";
+ break;
+ case REP_NEWMASTER:
+ type = "newmaster";
+ break;
+ case REP_NEWSITE:
+ type = "newsite";
+ break;
+ case REP_PAGE:
+ type = "page";
+ break;
+ case REP_PAGE_REQ:
+ type = "page_req";
+ break;
+ case REP_PLIST:
+ type = "plist";
+ break;
+ case REP_PLIST_REQ:
+ type = "plist_req";
+ break;
+ case REP_VERIFY:
+ type = "verify";
+ break;
+ case REP_VERIFY_FAIL:
+ type = "verify_fail";
+ break;
+ case REP_VERIFY_REQ:
+ type = "verify_req";
+ break;
+ case REP_VOTE1:
+ type = "vote1";
+ break;
+ case REP_VOTE2:
+ type = "vote2";
+ break;
+ default:
+ type = "NOTYPE";
+ break;
+ }
+ printf("%s %s: gen = %d eid %d, type %s, LSN [%u][%u]\n",
+ dbenv->db_home, str, rp->gen, eid, type, rp->lsn.file,
+ rp->lsn.offset);
+}
+#endif
diff --git a/libdb/rpc_client/client.c b/libdb/rpc_client/client.c
new file mode 100644
index 0000000..bc6b9f6
--- /dev/null
+++ b/libdb/rpc_client/client.c
@@ -0,0 +1,464 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifdef HAVE_RPC
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_VXWORKS
+#include <rpcLib.h>
+#endif
+#include <rpc/rpc.h>
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/txn.h"
+
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+
+static int __dbcl_c_destroy __P((DBC *));
+static int __dbcl_txn_close __P((DB_ENV *));
+
+/*
+ * __dbcl_envrpcserver --
+ * Initialize an environment's server.
+ *
+ * PUBLIC: int __dbcl_envrpcserver
+ * PUBLIC: __P((DB_ENV *, void *, const char *, long, long, u_int32_t));
+ */
+int
+__dbcl_envrpcserver(dbenv, clnt, host, tsec, ssec, flags)
+ DB_ENV *dbenv;
+ void *clnt;
+ const char *host;
+ long tsec, ssec;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ struct timeval tp;
+
+ COMPQUIET(flags, 0);
+
+#ifdef HAVE_VXWORKS
+ if (rpcTaskInit() != 0) {
+ __db_err(dbenv, "Could not initialize VxWorks RPC");
+ return (ERROR);
+ }
+#endif
+ if (RPC_ON(dbenv)) {
+ __db_err(dbenv, "Already set an RPC handle");
+ return (EINVAL);
+ }
+ /*
+ * Only create the client and set its timeout if the user
+ * did not pass us a client structure to begin with.
+ */
+ if (clnt == NULL) {
+ if ((cl = clnt_create((char *)host, DB_RPC_SERVERPROG,
+ DB_RPC_SERVERVERS, "tcp")) == NULL) {
+ __db_err(dbenv, clnt_spcreateerror((char *)host));
+ return (DB_NOSERVER);
+ }
+ if (tsec != 0) {
+ tp.tv_sec = tsec;
+ tp.tv_usec = 0;
+ (void)clnt_control(cl, CLSET_TIMEOUT, (char *)&tp);
+ }
+ } else {
+ cl = (CLIENT *)clnt;
+ F_SET(dbenv, DB_ENV_RPCCLIENT_GIVEN);
+ }
+ dbenv->cl_handle = cl;
+
+ return (__dbcl_env_create(dbenv, ssec));
+}
+
+/*
+ * __dbcl_env_open_wrap --
+ * Wrapper function for DB_ENV->open function for clients.
+ * We need a wrapper function to deal with DB_USE_ENVIRON* flags
+ * and we don't want to complicate the generated code for env_open.
+ *
+ * PUBLIC: int __dbcl_env_open_wrap
+ * PUBLIC: __P((DB_ENV *, const char *, u_int32_t, int));
+ */
+int
+__dbcl_env_open_wrap(dbenv, home, flags, mode)
+ DB_ENV * dbenv;
+ const char * home;
+ u_int32_t flags;
+ int mode;
+{
+ int ret;
+
+ if (LF_ISSET(DB_THREAD)) {
+ __db_err(dbenv, "DB_THREAD not allowed on RPC clients");
+ return (EINVAL);
+ }
+ if ((ret = __db_home(dbenv, home, flags)) != 0)
+ return (ret);
+ return (__dbcl_env_open(dbenv, dbenv->db_home, flags, mode));
+}
+
+/*
+ * __dbcl_db_open_wrap --
+ * Wrapper function for DB->open function for clients.
+ * We need a wrapper function to error on DB_THREAD flag.
+ * and we don't want to complicate the generated code.
+ *
+ * PUBLIC: int __dbcl_db_open_wrap
+ * PUBLIC: __P((DB *, DB_TXN *, const char *, const char *,
+ * PUBLIC: DBTYPE, u_int32_t, int));
+ */
+int
+__dbcl_db_open_wrap(dbp, txnp, name, subdb, type, flags, mode)
+ DB * dbp;
+ DB_TXN * txnp;
+ const char * name;
+ const char * subdb;
+ DBTYPE type;
+ u_int32_t flags;
+ int mode;
+{
+ if (LF_ISSET(DB_THREAD)) {
+ __db_err(dbp->dbenv, "DB_THREAD not allowed on RPC clients");
+ return (EINVAL);
+ }
+ return (__dbcl_db_open(dbp, txnp, name, subdb, type, flags, mode));
+}
+
+/*
+ * __dbcl_refresh --
+ * Clean up an environment.
+ *
+ * PUBLIC: int __dbcl_refresh __P((DB_ENV *));
+ */
+int
+__dbcl_refresh(dbenv)
+ DB_ENV *dbenv;
+{
+ CLIENT *cl;
+ int ret;
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ ret = 0;
+ if (dbenv->tx_handle != NULL) {
+ /*
+ * We only need to free up our stuff, the caller
+ * of this function will call the server who will
+ * do all the real work.
+ */
+ ret = __dbcl_txn_close(dbenv);
+ dbenv->tx_handle = NULL;
+ }
+ if (!F_ISSET(dbenv, DB_ENV_RPCCLIENT_GIVEN) && cl != NULL)
+ clnt_destroy(cl);
+ dbenv->cl_handle = NULL;
+ if (dbenv->db_home != NULL) {
+ __os_free(dbenv, dbenv->db_home);
+ dbenv->db_home = NULL;
+ }
+ return (ret);
+}
+
+/*
+ * __dbcl_retcopy --
+ * Copy the returned data into the user's DBT, handling allocation flags,
+ * but not DB_DBT_PARTIAL.
+ *
+ * PUBLIC: int __dbcl_retcopy __P((DB_ENV *, DBT *,
+ * PUBLIC: void *, u_int32_t, void **, u_int32_t *));
+ */
+int
+__dbcl_retcopy(dbenv, dbt, data, len, memp, memsize)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ void *data;
+ u_int32_t len;
+ void **memp;
+ u_int32_t *memsize;
+{
+ int ret;
+ u_int32_t orig_flags;
+
+ /*
+ * The RPC server handles DB_DBT_PARTIAL, so we mask it out here to
+ * avoid the handling of partials in __db_retcopy.
+ */
+ orig_flags = dbt->flags;
+ F_CLR(dbt, DB_DBT_PARTIAL);
+ ret = __db_retcopy(dbenv, dbt, data, len, memp, memsize);
+ dbt->flags = orig_flags;
+ return (ret);
+}
+
+/*
+ * __dbcl_txn_close --
+ * Clean up an environment's transactions.
+ */
+int
+__dbcl_txn_close(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_TXN *txnp;
+ DB_TXNMGR *tmgrp;
+ int ret;
+
+ ret = 0;
+ tmgrp = dbenv->tx_handle;
+
+ /*
+ * This function can only be called once per process (i.e., not
+ * once per thread), so no synchronization is required.
+ * Also this function is called *after* the server has been called,
+ * so the server has already closed/aborted any transactions that
+ * were open on its side. We only need to do local cleanup.
+ */
+ while ((txnp = TAILQ_FIRST(&tmgrp->txn_chain)) != NULL)
+ __dbcl_txn_end(txnp);
+
+ __os_free(dbenv, tmgrp);
+ return (ret);
+
+}
+
+/*
+ * __dbcl_txn_end --
+ * Clean up an transaction.
+ * RECURSIVE FUNCTION: Clean up nested transactions.
+ *
+ * PUBLIC: void __dbcl_txn_end __P((DB_TXN *));
+ */
+void
+__dbcl_txn_end(txnp)
+ DB_TXN *txnp;
+{
+ DB_ENV *dbenv;
+ DB_TXN *kids;
+ DB_TXNMGR *mgr;
+
+ mgr = txnp->mgrp;
+ dbenv = mgr->dbenv;
+
+ /*
+ * First take care of any kids we have
+ */
+ for (kids = TAILQ_FIRST(&txnp->kids);
+ kids != NULL;
+ kids = TAILQ_FIRST(&txnp->kids))
+ __dbcl_txn_end(kids);
+
+ /*
+ * We are ending this transaction no matter what the parent
+ * may eventually do, if we have a parent. All those details
+ * are taken care of by the server. We only need to make sure
+ * that we properly release resources.
+ */
+ if (txnp->parent != NULL)
+ TAILQ_REMOVE(&txnp->parent->kids, txnp, klinks);
+ TAILQ_REMOVE(&mgr->txn_chain, txnp, links);
+ __os_free(dbenv, txnp);
+}
+
+/*
+ * __dbcl_txn_setup --
+ * Setup a client transaction structure.
+ *
+ * PUBLIC: void __dbcl_txn_setup __P((DB_ENV *, DB_TXN *, DB_TXN *, u_int32_t));
+ */
+void
+__dbcl_txn_setup(dbenv, txn, parent, id)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB_TXN *parent;
+ u_int32_t id;
+{
+ txn->mgrp = dbenv->tx_handle;
+ txn->parent = parent;
+ txn->txnid = id;
+
+ /*
+ * XXX
+ * In DB library the txn_chain is protected by the mgrp->mutexp.
+ * However, that mutex is implemented in the environments shared
+ * memory region. The client library does not support all of the
+ * region - that just get forwarded to the server. Therefore,
+ * the chain is unprotected here, but properly protected on the
+ * server.
+ */
+ TAILQ_INSERT_TAIL(&txn->mgrp->txn_chain, txn, links);
+
+ TAILQ_INIT(&txn->kids);
+
+ if (parent != NULL)
+ TAILQ_INSERT_HEAD(&parent->kids, txn, klinks);
+
+ txn->abort = __dbcl_txn_abort;
+ txn->commit = __dbcl_txn_commit;
+ txn->discard = __dbcl_txn_discard;
+ txn->id = __txn_id;
+ txn->prepare = __dbcl_txn_prepare;
+ txn->set_timeout = __dbcl_txn_timeout;
+
+ txn->flags = TXN_MALLOC;
+}
+
+/*
+ * __dbcl_c_destroy --
+ * Destroy a cursor.
+ */
+static int
+__dbcl_c_destroy(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+
+ dbp = dbc->dbp;
+
+ TAILQ_REMOVE(&dbp->free_queue, dbc, links);
+ /* Discard any memory used to store returned data. */
+ if (dbc->my_rskey.data != NULL)
+ __os_free(dbc->dbp->dbenv, dbc->my_rskey.data);
+ if (dbc->my_rkey.data != NULL)
+ __os_free(dbc->dbp->dbenv, dbc->my_rkey.data);
+ if (dbc->my_rdata.data != NULL)
+ __os_free(dbc->dbp->dbenv, dbc->my_rdata.data);
+ __os_free(NULL, dbc);
+
+ return (0);
+}
+
+/*
+ * __dbcl_c_refresh --
+ * Refresh a cursor. Move it from the active queue to the free queue.
+ *
+ * PUBLIC: void __dbcl_c_refresh __P((DBC *));
+ */
+void
+__dbcl_c_refresh(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+
+ dbp = dbc->dbp;
+ dbc->flags = 0;
+ dbc->cl_id = 0;
+
+ /*
+ * If dbp->cursor fails locally, we use a local dbc so that
+ * we can close it. In that case, dbp will be NULL.
+ */
+ if (dbp != NULL) {
+ TAILQ_REMOVE(&dbp->active_queue, dbc, links);
+ TAILQ_INSERT_TAIL(&dbp->free_queue, dbc, links);
+ }
+}
+
+/*
+ * __dbcl_c_setup --
+ * Allocate a cursor.
+ *
+ * PUBLIC: int __dbcl_c_setup __P((long, DB *, DBC **));
+ */
+int
+__dbcl_c_setup(cl_id, dbp, dbcp)
+ long cl_id;
+ DB *dbp;
+ DBC **dbcp;
+{
+ DBC *dbc, tmpdbc;
+ int ret;
+
+ if ((dbc = TAILQ_FIRST(&dbp->free_queue)) != NULL)
+ TAILQ_REMOVE(&dbp->free_queue, dbc, links);
+ else {
+ if ((ret =
+ __os_calloc(dbp->dbenv, 1, sizeof(DBC), &dbc)) != 0) {
+ /*
+ * If we die here, set up a tmp dbc to call the
+ * server to shut down that cursor.
+ */
+ tmpdbc.dbp = NULL;
+ tmpdbc.cl_id = cl_id;
+ (void)__dbcl_dbc_close(&tmpdbc);
+ return (ret);
+ }
+ dbc->c_close = __dbcl_dbc_close;
+ dbc->c_count = __dbcl_dbc_count;
+ dbc->c_del = __dbcl_dbc_del;
+ dbc->c_dup = __dbcl_dbc_dup;
+ dbc->c_get = __dbcl_dbc_get;
+ dbc->c_pget = __dbcl_dbc_pget;
+ dbc->c_put = __dbcl_dbc_put;
+ dbc->c_am_destroy = __dbcl_c_destroy;
+ }
+ dbc->cl_id = cl_id;
+ dbc->dbp = dbp;
+ TAILQ_INSERT_TAIL(&dbp->active_queue, dbc, links);
+ *dbcp = dbc;
+ return (0);
+}
+
+/*
+ * __dbcl_dbclose_common --
+ * Common code for closing/cleaning a dbp.
+ *
+ * PUBLIC: int __dbcl_dbclose_common __P((DB *));
+ */
+int
+__dbcl_dbclose_common(dbp)
+ DB *dbp;
+{
+ int ret, t_ret;
+ DBC *dbc;
+
+ /*
+ * Go through the active cursors and call the cursor recycle routine,
+ * which resolves pending operations and moves the cursors onto the
+ * free list. Then, walk the free list and call the cursor destroy
+ * routine.
+ *
+ * NOTE: We do not need to use the join_queue for join cursors.
+ * See comment in __dbcl_dbjoin_ret.
+ */
+ ret = 0;
+ while ((dbc = TAILQ_FIRST(&dbp->active_queue)) != NULL)
+ __dbcl_c_refresh(dbc);
+ while ((dbc = TAILQ_FIRST(&dbp->free_queue)) != NULL)
+ if ((t_ret = __dbcl_c_destroy(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ TAILQ_INIT(&dbp->free_queue);
+ TAILQ_INIT(&dbp->active_queue);
+ /* Discard any memory used to store returned data. */
+ if (dbp->my_rskey.data != NULL)
+ __os_free(dbp->dbenv, dbp->my_rskey.data);
+ if (dbp->my_rkey.data != NULL)
+ __os_free(dbp->dbenv, dbp->my_rkey.data);
+ if (dbp->my_rdata.data != NULL)
+ __os_free(dbp->dbenv, dbp->my_rdata.data);
+
+ memset(dbp, CLEAR_BYTE, sizeof(*dbp));
+ __os_free(NULL, dbp);
+ return (ret);
+}
+#endif /* HAVE_RPC */
diff --git a/libdb/rpc_client/db_server_clnt.c b/libdb/rpc_client/db_server_clnt.c
new file mode 100644
index 0000000..9b57a23
--- /dev/null
+++ b/libdb/rpc_client/db_server_clnt.c
@@ -0,0 +1,870 @@
+#include "db_config.h"
+
+#ifdef HAVE_RPC
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <rpc/rpc.h>
+
+#include <strings.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc_auto/db_server.h"
+
+/* Default timeout can be changed using clnt_control() */
+static struct timeval TIMEOUT = { 25, 0 };
+
+__env_cachesize_reply *
+__db_env_cachesize_4001(argp, clnt)
+ __env_cachesize_msg *argp;
+ CLIENT *clnt;
+{
+ static __env_cachesize_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_env_cachesize,
+ (xdrproc_t) xdr___env_cachesize_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___env_cachesize_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__env_close_reply *
+__db_env_close_4001(argp, clnt)
+ __env_close_msg *argp;
+ CLIENT *clnt;
+{
+ static __env_close_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_env_close,
+ (xdrproc_t) xdr___env_close_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___env_close_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__env_create_reply *
+__db_env_create_4001(argp, clnt)
+ __env_create_msg *argp;
+ CLIENT *clnt;
+{
+ static __env_create_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_env_create,
+ (xdrproc_t) xdr___env_create_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___env_create_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__env_dbremove_reply *
+__db_env_dbremove_4001(argp, clnt)
+ __env_dbremove_msg *argp;
+ CLIENT *clnt;
+{
+ static __env_dbremove_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_env_dbremove,
+ (xdrproc_t) xdr___env_dbremove_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___env_dbremove_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__env_dbrename_reply *
+__db_env_dbrename_4001(argp, clnt)
+ __env_dbrename_msg *argp;
+ CLIENT *clnt;
+{
+ static __env_dbrename_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_env_dbrename,
+ (xdrproc_t) xdr___env_dbrename_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___env_dbrename_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__env_encrypt_reply *
+__db_env_encrypt_4001(argp, clnt)
+ __env_encrypt_msg *argp;
+ CLIENT *clnt;
+{
+ static __env_encrypt_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_env_encrypt,
+ (xdrproc_t) xdr___env_encrypt_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___env_encrypt_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__env_flags_reply *
+__db_env_flags_4001(argp, clnt)
+ __env_flags_msg *argp;
+ CLIENT *clnt;
+{
+ static __env_flags_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_env_flags,
+ (xdrproc_t) xdr___env_flags_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___env_flags_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__env_open_reply *
+__db_env_open_4001(argp, clnt)
+ __env_open_msg *argp;
+ CLIENT *clnt;
+{
+ static __env_open_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_env_open,
+ (xdrproc_t) xdr___env_open_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___env_open_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__env_remove_reply *
+__db_env_remove_4001(argp, clnt)
+ __env_remove_msg *argp;
+ CLIENT *clnt;
+{
+ static __env_remove_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_env_remove,
+ (xdrproc_t) xdr___env_remove_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___env_remove_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__txn_abort_reply *
+__db_txn_abort_4001(argp, clnt)
+ __txn_abort_msg *argp;
+ CLIENT *clnt;
+{
+ static __txn_abort_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_txn_abort,
+ (xdrproc_t) xdr___txn_abort_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___txn_abort_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__txn_begin_reply *
+__db_txn_begin_4001(argp, clnt)
+ __txn_begin_msg *argp;
+ CLIENT *clnt;
+{
+ static __txn_begin_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_txn_begin,
+ (xdrproc_t) xdr___txn_begin_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___txn_begin_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__txn_commit_reply *
+__db_txn_commit_4001(argp, clnt)
+ __txn_commit_msg *argp;
+ CLIENT *clnt;
+{
+ static __txn_commit_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_txn_commit,
+ (xdrproc_t) xdr___txn_commit_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___txn_commit_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__txn_discard_reply *
+__db_txn_discard_4001(argp, clnt)
+ __txn_discard_msg *argp;
+ CLIENT *clnt;
+{
+ static __txn_discard_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_txn_discard,
+ (xdrproc_t) xdr___txn_discard_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___txn_discard_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__txn_prepare_reply *
+__db_txn_prepare_4001(argp, clnt)
+ __txn_prepare_msg *argp;
+ CLIENT *clnt;
+{
+ static __txn_prepare_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_txn_prepare,
+ (xdrproc_t) xdr___txn_prepare_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___txn_prepare_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__txn_recover_reply *
+__db_txn_recover_4001(argp, clnt)
+ __txn_recover_msg *argp;
+ CLIENT *clnt;
+{
+ static __txn_recover_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_txn_recover,
+ (xdrproc_t) xdr___txn_recover_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___txn_recover_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_associate_reply *
+__db_db_associate_4001(argp, clnt)
+ __db_associate_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_associate_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_associate,
+ (xdrproc_t) xdr___db_associate_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_associate_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_bt_maxkey_reply *
+__db_db_bt_maxkey_4001(argp, clnt)
+ __db_bt_maxkey_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_bt_maxkey_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_bt_maxkey,
+ (xdrproc_t) xdr___db_bt_maxkey_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_bt_maxkey_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_bt_minkey_reply *
+__db_db_bt_minkey_4001(argp, clnt)
+ __db_bt_minkey_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_bt_minkey_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_bt_minkey,
+ (xdrproc_t) xdr___db_bt_minkey_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_bt_minkey_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_close_reply *
+__db_db_close_4001(argp, clnt)
+ __db_close_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_close_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_close,
+ (xdrproc_t) xdr___db_close_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_close_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_create_reply *
+__db_db_create_4001(argp, clnt)
+ __db_create_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_create_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_create,
+ (xdrproc_t) xdr___db_create_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_create_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_del_reply *
+__db_db_del_4001(argp, clnt)
+ __db_del_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_del_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_del,
+ (xdrproc_t) xdr___db_del_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_del_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_encrypt_reply *
+__db_db_encrypt_4001(argp, clnt)
+ __db_encrypt_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_encrypt_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_encrypt,
+ (xdrproc_t) xdr___db_encrypt_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_encrypt_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_extentsize_reply *
+__db_db_extentsize_4001(argp, clnt)
+ __db_extentsize_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_extentsize_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_extentsize,
+ (xdrproc_t) xdr___db_extentsize_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_extentsize_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_flags_reply *
+__db_db_flags_4001(argp, clnt)
+ __db_flags_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_flags_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_flags,
+ (xdrproc_t) xdr___db_flags_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_flags_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_get_reply *
+__db_db_get_4001(argp, clnt)
+ __db_get_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_get_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_get,
+ (xdrproc_t) xdr___db_get_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_get_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_h_ffactor_reply *
+__db_db_h_ffactor_4001(argp, clnt)
+ __db_h_ffactor_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_h_ffactor_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_h_ffactor,
+ (xdrproc_t) xdr___db_h_ffactor_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_h_ffactor_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_h_nelem_reply *
+__db_db_h_nelem_4001(argp, clnt)
+ __db_h_nelem_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_h_nelem_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_h_nelem,
+ (xdrproc_t) xdr___db_h_nelem_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_h_nelem_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_key_range_reply *
+__db_db_key_range_4001(argp, clnt)
+ __db_key_range_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_key_range_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_key_range,
+ (xdrproc_t) xdr___db_key_range_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_key_range_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_lorder_reply *
+__db_db_lorder_4001(argp, clnt)
+ __db_lorder_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_lorder_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_lorder,
+ (xdrproc_t) xdr___db_lorder_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_lorder_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_open_reply *
+__db_db_open_4001(argp, clnt)
+ __db_open_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_open_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_open,
+ (xdrproc_t) xdr___db_open_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_open_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_pagesize_reply *
+__db_db_pagesize_4001(argp, clnt)
+ __db_pagesize_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_pagesize_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_pagesize,
+ (xdrproc_t) xdr___db_pagesize_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_pagesize_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_pget_reply *
+__db_db_pget_4001(argp, clnt)
+ __db_pget_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_pget_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_pget,
+ (xdrproc_t) xdr___db_pget_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_pget_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_put_reply *
+__db_db_put_4001(argp, clnt)
+ __db_put_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_put_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_put,
+ (xdrproc_t) xdr___db_put_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_put_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_re_delim_reply *
+__db_db_re_delim_4001(argp, clnt)
+ __db_re_delim_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_re_delim_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_re_delim,
+ (xdrproc_t) xdr___db_re_delim_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_re_delim_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_re_len_reply *
+__db_db_re_len_4001(argp, clnt)
+ __db_re_len_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_re_len_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_re_len,
+ (xdrproc_t) xdr___db_re_len_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_re_len_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_re_pad_reply *
+__db_db_re_pad_4001(argp, clnt)
+ __db_re_pad_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_re_pad_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_re_pad,
+ (xdrproc_t) xdr___db_re_pad_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_re_pad_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_remove_reply *
+__db_db_remove_4001(argp, clnt)
+ __db_remove_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_remove_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_remove,
+ (xdrproc_t) xdr___db_remove_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_remove_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_rename_reply *
+__db_db_rename_4001(argp, clnt)
+ __db_rename_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_rename_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_rename,
+ (xdrproc_t) xdr___db_rename_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_rename_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_stat_reply *
+__db_db_stat_4001(argp, clnt)
+ __db_stat_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_stat_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_stat,
+ (xdrproc_t) xdr___db_stat_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_stat_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_sync_reply *
+__db_db_sync_4001(argp, clnt)
+ __db_sync_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_sync_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_sync,
+ (xdrproc_t) xdr___db_sync_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_sync_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_truncate_reply *
+__db_db_truncate_4001(argp, clnt)
+ __db_truncate_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_truncate_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_truncate,
+ (xdrproc_t) xdr___db_truncate_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_truncate_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_cursor_reply *
+__db_db_cursor_4001(argp, clnt)
+ __db_cursor_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_cursor_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_cursor,
+ (xdrproc_t) xdr___db_cursor_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_cursor_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__db_join_reply *
+__db_db_join_4001(argp, clnt)
+ __db_join_msg *argp;
+ CLIENT *clnt;
+{
+ static __db_join_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_db_join,
+ (xdrproc_t) xdr___db_join_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___db_join_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__dbc_close_reply *
+__db_dbc_close_4001(argp, clnt)
+ __dbc_close_msg *argp;
+ CLIENT *clnt;
+{
+ static __dbc_close_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_dbc_close,
+ (xdrproc_t) xdr___dbc_close_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___dbc_close_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__dbc_count_reply *
+__db_dbc_count_4001(argp, clnt)
+ __dbc_count_msg *argp;
+ CLIENT *clnt;
+{
+ static __dbc_count_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_dbc_count,
+ (xdrproc_t) xdr___dbc_count_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___dbc_count_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__dbc_del_reply *
+__db_dbc_del_4001(argp, clnt)
+ __dbc_del_msg *argp;
+ CLIENT *clnt;
+{
+ static __dbc_del_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_dbc_del,
+ (xdrproc_t) xdr___dbc_del_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___dbc_del_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__dbc_dup_reply *
+__db_dbc_dup_4001(argp, clnt)
+ __dbc_dup_msg *argp;
+ CLIENT *clnt;
+{
+ static __dbc_dup_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_dbc_dup,
+ (xdrproc_t) xdr___dbc_dup_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___dbc_dup_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__dbc_get_reply *
+__db_dbc_get_4001(argp, clnt)
+ __dbc_get_msg *argp;
+ CLIENT *clnt;
+{
+ static __dbc_get_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_dbc_get,
+ (xdrproc_t) xdr___dbc_get_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___dbc_get_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__dbc_pget_reply *
+__db_dbc_pget_4001(argp, clnt)
+ __dbc_pget_msg *argp;
+ CLIENT *clnt;
+{
+ static __dbc_pget_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_dbc_pget,
+ (xdrproc_t) xdr___dbc_pget_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___dbc_pget_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+
+__dbc_put_reply *
+__db_dbc_put_4001(argp, clnt)
+ __dbc_put_msg *argp;
+ CLIENT *clnt;
+{
+ static __dbc_put_reply clnt_res;
+
+ memset((char *)&clnt_res, 0, sizeof (clnt_res));
+ if (clnt_call(clnt, __DB_dbc_put,
+ (xdrproc_t) xdr___dbc_put_msg, (caddr_t) argp,
+ (xdrproc_t) xdr___dbc_put_reply, (caddr_t) &clnt_res,
+ TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return (&clnt_res);
+}
+#endif /* HAVE_RPC */
diff --git a/libdb/rpc_client/gen_client.c b/libdb/rpc_client/gen_client.c
new file mode 100644
index 0000000..aed4946
--- /dev/null
+++ b/libdb/rpc_client/gen_client.c
@@ -0,0 +1,3274 @@
+/* Do not edit: automatically built by gen_rpc.awk. */
+#include "db_config.h"
+
+#ifdef HAVE_RPC
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <rpc/rpc.h>
+#include <rpc/xdr.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/txn.h"
+
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+
+static int __dbcl_noserver __P((DB_ENV *));
+
+static int
+__dbcl_noserver(dbenv)
+ DB_ENV *dbenv;
+{
+ __db_err(dbenv, "No server environment");
+ return (DB_NOSERVER);
+}
+
+static int __dbcl_rpc_illegal __P((DB_ENV *, char *));
+
+static int
+__dbcl_rpc_illegal(dbenv, name)
+ DB_ENV *dbenv;
+ char *name;
+{
+ __db_err(dbenv, "%s method meaningless in an RPC environment", name);
+ return (__db_eopnotsup(dbenv));
+}
+
+/*
+ * PUBLIC: int __dbcl_env_alloc __P((DB_ENV *, void *(*)(size_t),
+ * PUBLIC: void *(*)(void *, size_t), void (*)(void *)));
+ */
+int
+__dbcl_env_alloc(dbenv, func0, func1, func2)
+ DB_ENV * dbenv;
+ void *(*func0) __P((size_t));
+ void *(*func1) __P((void *, size_t));
+ void (*func2) __P((void *));
+{
+ COMPQUIET(func0, 0);
+ COMPQUIET(func1, 0);
+ COMPQUIET(func2, 0);
+ return (__dbcl_rpc_illegal(dbenv, "env_alloc"));
+}
+
+/*
+ * PUBLIC: int __dbcl_set_app_dispatch __P((DB_ENV *, int (*)(DB_ENV *, DBT *,
+ * PUBLIC: DB_LSN *, db_recops)));
+ */
+int
+__dbcl_set_app_dispatch(dbenv, func0)
+ DB_ENV * dbenv;
+ int (*func0) __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+{
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_app_dispatch"));
+}
+
+/*
+ * PUBLIC: int __dbcl_env_cachesize __P((DB_ENV *, u_int32_t, u_int32_t, int));
+ */
+int
+__dbcl_env_cachesize(dbenv, gbytes, bytes, ncache)
+ DB_ENV * dbenv;
+ u_int32_t gbytes;
+ u_int32_t bytes;
+ int ncache;
+{
+ CLIENT *cl;
+ __env_cachesize_msg msg;
+ __env_cachesize_reply *replyp = NULL;
+ int ret;
+
+ ret = 0;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(dbenv));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbenv == NULL)
+ msg.dbenvcl_id = 0;
+ else
+ msg.dbenvcl_id = dbenv->cl_id;
+ msg.gbytes = gbytes;
+ msg.bytes = bytes;
+ msg.ncache = ncache;
+
+ replyp = __db_env_cachesize_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___env_cachesize_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_env_close __P((DB_ENV *, u_int32_t));
+ */
+int
+__dbcl_env_close(dbenv, flags)
+ DB_ENV * dbenv;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __env_close_msg msg;
+ __env_close_reply *replyp = NULL;
+ int ret;
+
+ ret = 0;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(dbenv));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbenv == NULL)
+ msg.dbenvcl_id = 0;
+ else
+ msg.dbenvcl_id = dbenv->cl_id;
+ msg.flags = flags;
+
+ replyp = __db_env_close_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_env_close_ret(dbenv, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___env_close_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_env_create __P((DB_ENV *, long));
+ */
+int
+__dbcl_env_create(dbenv, timeout)
+ DB_ENV * dbenv;
+ long timeout;
+{
+ CLIENT *cl;
+ __env_create_msg msg;
+ __env_create_reply *replyp = NULL;
+ int ret;
+
+ ret = 0;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(dbenv));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ msg.timeout = timeout;
+
+ replyp = __db_env_create_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_env_create_ret(dbenv, timeout, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___env_create_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_set_data_dir __P((DB_ENV *, const char *));
+ */
+int
+__dbcl_set_data_dir(dbenv, dir)
+ DB_ENV * dbenv;
+ const char * dir;
+{
+ COMPQUIET(dir, NULL);
+ return (__dbcl_rpc_illegal(dbenv, "set_data_dir"));
+}
+
+/*
+ * PUBLIC: int __dbcl_env_dbremove __P((DB_ENV *, DB_TXN *, const char *,
+ * PUBLIC: const char *, u_int32_t));
+ */
+int
+__dbcl_env_dbremove(dbenv, txnp, name, subdb, flags)
+ DB_ENV * dbenv;
+ DB_TXN * txnp;
+ const char * name;
+ const char * subdb;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __env_dbremove_msg msg;
+ __env_dbremove_reply *replyp = NULL;
+ int ret;
+
+ ret = 0;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(dbenv));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbenv == NULL)
+ msg.dbenvcl_id = 0;
+ else
+ msg.dbenvcl_id = dbenv->cl_id;
+ if (txnp == NULL)
+ msg.txnpcl_id = 0;
+ else
+ msg.txnpcl_id = txnp->txnid;
+ if (name == NULL)
+ msg.name = "";
+ else
+ msg.name = (char *)name;
+ if (subdb == NULL)
+ msg.subdb = "";
+ else
+ msg.subdb = (char *)subdb;
+ msg.flags = flags;
+
+ replyp = __db_env_dbremove_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___env_dbremove_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_env_dbrename __P((DB_ENV *, DB_TXN *, const char *,
+ * PUBLIC: const char *, const char *, u_int32_t));
+ */
+int
+__dbcl_env_dbrename(dbenv, txnp, name, subdb, newname, flags)
+ DB_ENV * dbenv;
+ DB_TXN * txnp;
+ const char * name;
+ const char * subdb;
+ const char * newname;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __env_dbrename_msg msg;
+ __env_dbrename_reply *replyp = NULL;
+ int ret;
+
+ ret = 0;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(dbenv));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbenv == NULL)
+ msg.dbenvcl_id = 0;
+ else
+ msg.dbenvcl_id = dbenv->cl_id;
+ if (txnp == NULL)
+ msg.txnpcl_id = 0;
+ else
+ msg.txnpcl_id = txnp->txnid;
+ if (name == NULL)
+ msg.name = "";
+ else
+ msg.name = (char *)name;
+ if (subdb == NULL)
+ msg.subdb = "";
+ else
+ msg.subdb = (char *)subdb;
+ if (newname == NULL)
+ msg.newname = "";
+ else
+ msg.newname = (char *)newname;
+ msg.flags = flags;
+
+ replyp = __db_env_dbrename_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___env_dbrename_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_env_encrypt __P((DB_ENV *, const char *, u_int32_t));
+ */
+int
+__dbcl_env_encrypt(dbenv, passwd, flags)
+ DB_ENV * dbenv;
+ const char * passwd;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __env_encrypt_msg msg;
+ __env_encrypt_reply *replyp = NULL;
+ int ret;
+
+ ret = 0;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(dbenv));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbenv == NULL)
+ msg.dbenvcl_id = 0;
+ else
+ msg.dbenvcl_id = dbenv->cl_id;
+ if (passwd == NULL)
+ msg.passwd = "";
+ else
+ msg.passwd = (char *)passwd;
+ msg.flags = flags;
+
+ replyp = __db_env_encrypt_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___env_encrypt_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_env_set_feedback __P((DB_ENV *, void (*)(DB_ENV *, int,
+ * PUBLIC: int)));
+ */
+int
+__dbcl_env_set_feedback(dbenv, func0)
+ DB_ENV * dbenv;
+ void (*func0) __P((DB_ENV *, int, int));
+{
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "env_set_feedback"));
+}
+
+/*
+ * PUBLIC: int __dbcl_env_flags __P((DB_ENV *, u_int32_t, int));
+ */
+int
+__dbcl_env_flags(dbenv, flags, onoff)
+ DB_ENV * dbenv;
+ u_int32_t flags;
+ int onoff;
+{
+ CLIENT *cl;
+ __env_flags_msg msg;
+ __env_flags_reply *replyp = NULL;
+ int ret;
+
+ ret = 0;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(dbenv));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbenv == NULL)
+ msg.dbenvcl_id = 0;
+ else
+ msg.dbenvcl_id = dbenv->cl_id;
+ msg.flags = flags;
+ msg.onoff = onoff;
+
+ replyp = __db_env_flags_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___env_flags_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_set_lg_bsize __P((DB_ENV *, u_int32_t));
+ */
+int
+__dbcl_set_lg_bsize(dbenv, bsize)
+ DB_ENV * dbenv;
+ u_int32_t bsize;
+{
+ COMPQUIET(bsize, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_lg_bsize"));
+}
+
+/*
+ * PUBLIC: int __dbcl_set_lg_dir __P((DB_ENV *, const char *));
+ */
+int
+__dbcl_set_lg_dir(dbenv, dir)
+ DB_ENV * dbenv;
+ const char * dir;
+{
+ COMPQUIET(dir, NULL);
+ return (__dbcl_rpc_illegal(dbenv, "set_lg_dir"));
+}
+
+/*
+ * PUBLIC: int __dbcl_set_lg_max __P((DB_ENV *, u_int32_t));
+ */
+int
+__dbcl_set_lg_max(dbenv, max)
+ DB_ENV * dbenv;
+ u_int32_t max;
+{
+ COMPQUIET(max, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_lg_max"));
+}
+
+/*
+ * PUBLIC: int __dbcl_set_lg_regionmax __P((DB_ENV *, u_int32_t));
+ */
+int
+__dbcl_set_lg_regionmax(dbenv, max)
+ DB_ENV * dbenv;
+ u_int32_t max;
+{
+ COMPQUIET(max, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_lg_regionmax"));
+}
+
+/*
+ * PUBLIC: int __dbcl_set_lk_conflict __P((DB_ENV *, u_int8_t *, int));
+ */
+int
+__dbcl_set_lk_conflict(dbenv, conflicts, modes)
+ DB_ENV * dbenv;
+ u_int8_t * conflicts;
+ int modes;
+{
+ COMPQUIET(conflicts, 0);
+ COMPQUIET(modes, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_lk_conflict"));
+}
+
+/*
+ * PUBLIC: int __dbcl_set_lk_detect __P((DB_ENV *, u_int32_t));
+ */
+int
+__dbcl_set_lk_detect(dbenv, detect)
+ DB_ENV * dbenv;
+ u_int32_t detect;
+{
+ COMPQUIET(detect, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_lk_detect"));
+}
+
+/*
+ * PUBLIC: int __dbcl_set_lk_max __P((DB_ENV *, u_int32_t));
+ */
+int
+__dbcl_set_lk_max(dbenv, max)
+ DB_ENV * dbenv;
+ u_int32_t max;
+{
+ COMPQUIET(max, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_lk_max"));
+}
+
+/*
+ * PUBLIC: int __dbcl_set_lk_max_locks __P((DB_ENV *, u_int32_t));
+ */
+int
+__dbcl_set_lk_max_locks(dbenv, max)
+ DB_ENV * dbenv;
+ u_int32_t max;
+{
+ COMPQUIET(max, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_lk_max_locks"));
+}
+
+/*
+ * PUBLIC: int __dbcl_set_lk_max_lockers __P((DB_ENV *, u_int32_t));
+ */
+int
+__dbcl_set_lk_max_lockers(dbenv, max)
+ DB_ENV * dbenv;
+ u_int32_t max;
+{
+ COMPQUIET(max, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_lk_max_lockers"));
+}
+
+/*
+ * PUBLIC: int __dbcl_set_lk_max_objects __P((DB_ENV *, u_int32_t));
+ */
+int
+__dbcl_set_lk_max_objects(dbenv, max)
+ DB_ENV * dbenv;
+ u_int32_t max;
+{
+ COMPQUIET(max, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_lk_max_objects"));
+}
+
+/*
+ * PUBLIC: int __dbcl_set_mp_mmapsize __P((DB_ENV *, size_t));
+ */
+int
+__dbcl_set_mp_mmapsize(dbenv, mmapsize)
+ DB_ENV * dbenv;
+ size_t mmapsize;
+{
+ COMPQUIET(mmapsize, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_mp_mmapsize"));
+}
+
+/*
+ * PUBLIC: int __dbcl_env_open __P((DB_ENV *, const char *, u_int32_t, int));
+ */
+int
+__dbcl_env_open(dbenv, home, flags, mode)
+ DB_ENV * dbenv;
+ const char * home;
+ u_int32_t flags;
+ int mode;
+{
+ CLIENT *cl;
+ __env_open_msg msg;
+ __env_open_reply *replyp = NULL;
+ int ret;
+
+ ret = 0;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(dbenv));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbenv == NULL)
+ msg.dbenvcl_id = 0;
+ else
+ msg.dbenvcl_id = dbenv->cl_id;
+ if (home == NULL)
+ msg.home = "";
+ else
+ msg.home = (char *)home;
+ msg.flags = flags;
+ msg.mode = mode;
+
+ replyp = __db_env_open_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_env_open_ret(dbenv, home, flags, mode, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___env_open_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_env_paniccall __P((DB_ENV *, void (*)(DB_ENV *, int)));
+ */
+int
+__dbcl_env_paniccall(dbenv, func0)
+ DB_ENV * dbenv;
+ void (*func0) __P((DB_ENV *, int));
+{
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "env_paniccall"));
+}
+
+/*
+ * PUBLIC: int __dbcl_env_remove __P((DB_ENV *, const char *, u_int32_t));
+ */
+int
+__dbcl_env_remove(dbenv, home, flags)
+ DB_ENV * dbenv;
+ const char * home;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __env_remove_msg msg;
+ __env_remove_reply *replyp = NULL;
+ int ret;
+
+ ret = 0;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(dbenv));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbenv == NULL)
+ msg.dbenvcl_id = 0;
+ else
+ msg.dbenvcl_id = dbenv->cl_id;
+ if (home == NULL)
+ msg.home = "";
+ else
+ msg.home = (char *)home;
+ msg.flags = flags;
+
+ replyp = __db_env_remove_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_env_remove_ret(dbenv, home, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___env_remove_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_set_shm_key __P((DB_ENV *, long));
+ */
+int
+__dbcl_set_shm_key(dbenv, shm_key)
+ DB_ENV * dbenv;
+ long shm_key;
+{
+ COMPQUIET(shm_key, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_shm_key"));
+}
+
+/*
+ * PUBLIC: int __dbcl_set_tas_spins __P((DB_ENV *, u_int32_t));
+ */
+int
+__dbcl_set_tas_spins(dbenv, tas_spins)
+ DB_ENV * dbenv;
+ u_int32_t tas_spins;
+{
+ COMPQUIET(tas_spins, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_tas_spins"));
+}
+
+/*
+ * PUBLIC: int __dbcl_set_timeout __P((DB_ENV *, u_int32_t, u_int32_t));
+ */
+int
+__dbcl_set_timeout(dbenv, timeout, flags)
+ DB_ENV * dbenv;
+ u_int32_t timeout;
+ u_int32_t flags;
+{
+ COMPQUIET(timeout, 0);
+ COMPQUIET(flags, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_timeout"));
+}
+
+/*
+ * PUBLIC: int __dbcl_set_tmp_dir __P((DB_ENV *, const char *));
+ */
+int
+__dbcl_set_tmp_dir(dbenv, dir)
+ DB_ENV * dbenv;
+ const char * dir;
+{
+ COMPQUIET(dir, NULL);
+ return (__dbcl_rpc_illegal(dbenv, "set_tmp_dir"));
+}
+
+/*
+ * PUBLIC: int __dbcl_set_tx_max __P((DB_ENV *, u_int32_t));
+ */
+int
+__dbcl_set_tx_max(dbenv, max)
+ DB_ENV * dbenv;
+ u_int32_t max;
+{
+ COMPQUIET(max, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_tx_max"));
+}
+
+/*
+ * PUBLIC: int __dbcl_set_tx_timestamp __P((DB_ENV *, time_t *));
+ */
+int
+__dbcl_set_tx_timestamp(dbenv, max)
+ DB_ENV * dbenv;
+ time_t * max;
+{
+ COMPQUIET(max, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_tx_timestamp"));
+}
+
+/*
+ * PUBLIC: int __dbcl_set_verbose __P((DB_ENV *, u_int32_t, int));
+ */
+int
+__dbcl_set_verbose(dbenv, which, onoff)
+ DB_ENV * dbenv;
+ u_int32_t which;
+ int onoff;
+{
+ COMPQUIET(which, 0);
+ COMPQUIET(onoff, 0);
+ return (__dbcl_rpc_illegal(dbenv, "set_verbose"));
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_abort __P((DB_TXN *));
+ */
+int
+__dbcl_txn_abort(txnp)
+ DB_TXN * txnp;
+{
+ CLIENT *cl;
+ __txn_abort_msg msg;
+ __txn_abort_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = txnp->mgrp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (txnp == NULL)
+ msg.txnpcl_id = 0;
+ else
+ msg.txnpcl_id = txnp->txnid;
+
+ replyp = __db_txn_abort_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_txn_abort_ret(txnp, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___txn_abort_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_begin __P((DB_ENV *, DB_TXN *, DB_TXN **,
+ * PUBLIC: u_int32_t));
+ */
+int
+__dbcl_txn_begin(dbenv, parent, txnpp, flags)
+ DB_ENV * dbenv;
+ DB_TXN * parent;
+ DB_TXN ** txnpp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __txn_begin_msg msg;
+ __txn_begin_reply *replyp = NULL;
+ int ret;
+
+ ret = 0;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(dbenv));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbenv == NULL)
+ msg.dbenvcl_id = 0;
+ else
+ msg.dbenvcl_id = dbenv->cl_id;
+ if (parent == NULL)
+ msg.parentcl_id = 0;
+ else
+ msg.parentcl_id = parent->txnid;
+ msg.flags = flags;
+
+ replyp = __db_txn_begin_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_txn_begin_ret(dbenv, parent, txnpp, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___txn_begin_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_checkpoint __P((DB_ENV *, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t));
+ */
+int
+__dbcl_txn_checkpoint(dbenv, kbyte, min, flags)
+ DB_ENV * dbenv;
+ u_int32_t kbyte;
+ u_int32_t min;
+ u_int32_t flags;
+{
+ COMPQUIET(kbyte, 0);
+ COMPQUIET(min, 0);
+ COMPQUIET(flags, 0);
+ return (__dbcl_rpc_illegal(dbenv, "txn_checkpoint"));
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_commit __P((DB_TXN *, u_int32_t));
+ */
+int
+__dbcl_txn_commit(txnp, flags)
+ DB_TXN * txnp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __txn_commit_msg msg;
+ __txn_commit_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = txnp->mgrp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (txnp == NULL)
+ msg.txnpcl_id = 0;
+ else
+ msg.txnpcl_id = txnp->txnid;
+ msg.flags = flags;
+
+ replyp = __db_txn_commit_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_txn_commit_ret(txnp, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___txn_commit_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_discard __P((DB_TXN *, u_int32_t));
+ */
+int
+__dbcl_txn_discard(txnp, flags)
+ DB_TXN * txnp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __txn_discard_msg msg;
+ __txn_discard_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = txnp->mgrp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (txnp == NULL)
+ msg.txnpcl_id = 0;
+ else
+ msg.txnpcl_id = txnp->txnid;
+ msg.flags = flags;
+
+ replyp = __db_txn_discard_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_txn_discard_ret(txnp, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___txn_discard_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_prepare __P((DB_TXN *, u_int8_t *));
+ */
+int
+__dbcl_txn_prepare(txnp, gid)
+ DB_TXN * txnp;
+ u_int8_t * gid;
+{
+ CLIENT *cl;
+ __txn_prepare_msg msg;
+ __txn_prepare_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = txnp->mgrp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (txnp == NULL)
+ msg.txnpcl_id = 0;
+ else
+ msg.txnpcl_id = txnp->txnid;
+ memcpy(msg.gid, gid, 128);
+
+ replyp = __db_txn_prepare_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___txn_prepare_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_recover __P((DB_ENV *, DB_PREPLIST *, long, long *,
+ * PUBLIC: u_int32_t));
+ */
+int
+__dbcl_txn_recover(dbenv, preplist, count, retp, flags)
+ DB_ENV * dbenv;
+ DB_PREPLIST * preplist;
+ long count;
+ long * retp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __txn_recover_msg msg;
+ __txn_recover_reply *replyp = NULL;
+ int ret;
+
+ ret = 0;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(dbenv));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbenv == NULL)
+ msg.dbenvcl_id = 0;
+ else
+ msg.dbenvcl_id = dbenv->cl_id;
+ msg.count = count;
+ msg.flags = flags;
+
+ replyp = __db_txn_recover_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_txn_recover_ret(dbenv, preplist, count, retp, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___txn_recover_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_stat __P((DB_ENV *, DB_TXN_STAT **, u_int32_t));
+ */
+int
+__dbcl_txn_stat(dbenv, statp, flags)
+ DB_ENV * dbenv;
+ DB_TXN_STAT ** statp;
+ u_int32_t flags;
+{
+ COMPQUIET(statp, 0);
+ COMPQUIET(flags, 0);
+ return (__dbcl_rpc_illegal(dbenv, "txn_stat"));
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_timeout __P((DB_TXN *, u_int32_t, u_int32_t));
+ */
+int
+__dbcl_txn_timeout(txnp, timeout, flags)
+ DB_TXN * txnp;
+ u_int32_t timeout;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+
+ dbenv = txnp->mgrp->dbenv;
+ COMPQUIET(timeout, 0);
+ COMPQUIET(flags, 0);
+ return (__dbcl_rpc_illegal(dbenv, "txn_timeout"));
+}
+
+/*
+ * PUBLIC: int __dbcl_rep_elect __P((DB_ENV *, int, int, u_int32_t, int *));
+ */
+int
+__dbcl_rep_elect(dbenv, nsites, pri, timeout, idp)
+ DB_ENV * dbenv;
+ int nsites;
+ int pri;
+ u_int32_t timeout;
+ int * idp;
+{
+ COMPQUIET(nsites, 0);
+ COMPQUIET(pri, 0);
+ COMPQUIET(timeout, 0);
+ COMPQUIET(idp, 0);
+ return (__dbcl_rpc_illegal(dbenv, "rep_elect"));
+}
+
+/*
+ * PUBLIC: int __dbcl_rep_flush __P((DB_ENV *));
+ */
+int
+__dbcl_rep_flush(dbenv)
+ DB_ENV * dbenv;
+{
+ return (__dbcl_rpc_illegal(dbenv, "rep_flush"));
+}
+
+/*
+ * PUBLIC: int __dbcl_rep_process_message __P((DB_ENV *, DBT *, DBT *, int *));
+ */
+int
+__dbcl_rep_process_message(dbenv, rec, control, idp)
+ DB_ENV * dbenv;
+ DBT * rec;
+ DBT * control;
+ int * idp;
+{
+ COMPQUIET(rec, NULL);
+ COMPQUIET(control, NULL);
+ COMPQUIET(idp, 0);
+ return (__dbcl_rpc_illegal(dbenv, "rep_process_message"));
+}
+
+/*
+ * PUBLIC: int __dbcl_rep_set_limit __P((DB_ENV *, u_int32_t, u_int32_t));
+ */
+int
+__dbcl_rep_set_limit(dbenv, mbytes, bytes)
+ DB_ENV * dbenv;
+ u_int32_t mbytes;
+ u_int32_t bytes;
+{
+ COMPQUIET(mbytes, 0);
+ COMPQUIET(bytes, 0);
+ return (__dbcl_rpc_illegal(dbenv, "rep_set_limit"));
+}
+
+/*
+ * PUBLIC: int __dbcl_rep_set_request __P((DB_ENV *, u_int32_t, u_int32_t));
+ */
+int
+__dbcl_rep_set_request(dbenv, min, max)
+ DB_ENV * dbenv;
+ u_int32_t min;
+ u_int32_t max;
+{
+ COMPQUIET(min, 0);
+ COMPQUIET(max, 0);
+ return (__dbcl_rpc_illegal(dbenv, "rep_set_request"));
+}
+
+/*
+ * PUBLIC: int __dbcl_rep_set_rep_transport __P((DB_ENV *, int,
+ * PUBLIC: int (*)(DB_ENV *, const DBT *, const DBT *, int, u_int32_t)));
+ */
+int
+__dbcl_rep_set_rep_transport(dbenv, id, func0)
+ DB_ENV * dbenv;
+ int id;
+ int (*func0) __P((DB_ENV *, const DBT *, const DBT *, int, u_int32_t));
+{
+ COMPQUIET(id, 0);
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "rep_set_rep_transport"));
+}
+
+/*
+ * PUBLIC: int __dbcl_rep_start __P((DB_ENV *, DBT *, u_int32_t));
+ */
+int
+__dbcl_rep_start(dbenv, cdata, flags)
+ DB_ENV * dbenv;
+ DBT * cdata;
+ u_int32_t flags;
+{
+ COMPQUIET(cdata, NULL);
+ COMPQUIET(flags, 0);
+ return (__dbcl_rpc_illegal(dbenv, "rep_start"));
+}
+
+/*
+ * PUBLIC: int __dbcl_rep_stat __P((DB_ENV *, DB_REP_STAT **, u_int32_t));
+ */
+int
+__dbcl_rep_stat(dbenv, statp, flags)
+ DB_ENV * dbenv;
+ DB_REP_STAT ** statp;
+ u_int32_t flags;
+{
+ COMPQUIET(statp, 0);
+ COMPQUIET(flags, 0);
+ return (__dbcl_rpc_illegal(dbenv, "rep_stat"));
+}
+
+/*
+ * PUBLIC: int __dbcl_db_alloc __P((DB *, void *(*)(size_t), void *(*)(void *,
+ * PUBLIC: size_t), void (*)(void *)));
+ */
+int
+__dbcl_db_alloc(dbp, func0, func1, func2)
+ DB * dbp;
+ void *(*func0) __P((size_t));
+ void *(*func1) __P((void *, size_t));
+ void (*func2) __P((void *));
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(func0, 0);
+ COMPQUIET(func1, 0);
+ COMPQUIET(func2, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_alloc"));
+}
+
+/*
+ * PUBLIC: int __dbcl_db_associate __P((DB *, DB_TXN *, DB *, int (*)(DB *,
+ * PUBLIC: const DBT *, const DBT *, DBT *), u_int32_t));
+ */
+int
+__dbcl_db_associate(dbp, txnp, sdbp, func0, flags)
+ DB * dbp;
+ DB_TXN * txnp;
+ DB * sdbp;
+ int (*func0) __P((DB *, const DBT *, const DBT *, DBT *));
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_associate_msg msg;
+ __db_associate_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (func0 != NULL) {
+ __db_err(dbenv, "User functions not supported in RPC");
+ return (EINVAL);
+ }
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ if (txnp == NULL)
+ msg.txnpcl_id = 0;
+ else
+ msg.txnpcl_id = txnp->txnid;
+ if (sdbp == NULL)
+ msg.sdbpcl_id = 0;
+ else
+ msg.sdbpcl_id = sdbp->cl_id;
+ msg.flags = flags;
+
+ replyp = __db_db_associate_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_associate_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_bt_compare __P((DB *, int (*)(DB *, const DBT *,
+ * PUBLIC: const DBT *)));
+ */
+int
+__dbcl_db_bt_compare(dbp, func0)
+ DB * dbp;
+ int (*func0) __P((DB *, const DBT *, const DBT *));
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_bt_compare"));
+}
+
+/*
+ * PUBLIC: int __dbcl_db_bt_maxkey __P((DB *, u_int32_t));
+ */
+int
+__dbcl_db_bt_maxkey(dbp, maxkey)
+ DB * dbp;
+ u_int32_t maxkey;
+{
+ CLIENT *cl;
+ __db_bt_maxkey_msg msg;
+ __db_bt_maxkey_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ msg.maxkey = maxkey;
+
+ replyp = __db_db_bt_maxkey_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_bt_maxkey_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_bt_minkey __P((DB *, u_int32_t));
+ */
+int
+__dbcl_db_bt_minkey(dbp, minkey)
+ DB * dbp;
+ u_int32_t minkey;
+{
+ CLIENT *cl;
+ __db_bt_minkey_msg msg;
+ __db_bt_minkey_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ msg.minkey = minkey;
+
+ replyp = __db_db_bt_minkey_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_bt_minkey_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_bt_prefix __P((DB *, size_t(*)(DB *, const DBT *,
+ * PUBLIC: const DBT *)));
+ */
+int
+__dbcl_db_bt_prefix(dbp, func0)
+ DB * dbp;
+ size_t (*func0) __P((DB *, const DBT *, const DBT *));
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_bt_prefix"));
+}
+
+/*
+ * PUBLIC: int __dbcl_db_set_append_recno __P((DB *, int (*)(DB *, DBT *,
+ * PUBLIC: db_recno_t)));
+ */
+int
+__dbcl_db_set_append_recno(dbp, func0)
+ DB * dbp;
+ int (*func0) __P((DB *, DBT *, db_recno_t));
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_set_append_recno"));
+}
+
+/*
+ * PUBLIC: int __dbcl_db_cache_priority __P((DB *, DB_CACHE_PRIORITY));
+ */
+int
+__dbcl_db_cache_priority(dbp, priority)
+ DB * dbp;
+ DB_CACHE_PRIORITY priority;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(priority, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_cache_priority"));
+}
+
+/*
+ * PUBLIC: int __dbcl_db_cachesize __P((DB *, u_int32_t, u_int32_t, int));
+ */
+int
+__dbcl_db_cachesize(dbp, gbytes, bytes, ncache)
+ DB * dbp;
+ u_int32_t gbytes;
+ u_int32_t bytes;
+ int ncache;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(gbytes, 0);
+ COMPQUIET(bytes, 0);
+ COMPQUIET(ncache, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_cachesize"));
+}
+
+/*
+ * PUBLIC: int __dbcl_db_close __P((DB *, u_int32_t));
+ */
+int
+__dbcl_db_close(dbp, flags)
+ DB * dbp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_close_msg msg;
+ __db_close_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ msg.flags = flags;
+
+ replyp = __db_db_close_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_db_close_ret(dbp, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_close_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_create __P((DB *, DB_ENV *, u_int32_t));
+ */
+int
+__dbcl_db_create(dbp, dbenv, flags)
+ DB * dbp;
+ DB_ENV * dbenv;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_create_msg msg;
+ __db_create_reply *replyp = NULL;
+ int ret;
+
+ ret = 0;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(dbenv));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbenv == NULL)
+ msg.dbenvcl_id = 0;
+ else
+ msg.dbenvcl_id = dbenv->cl_id;
+ msg.flags = flags;
+
+ replyp = __db_db_create_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_db_create_ret(dbp, dbenv, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_create_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_del __P((DB *, DB_TXN *, DBT *, u_int32_t));
+ */
+int
+__dbcl_db_del(dbp, txnp, key, flags)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBT * key;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_del_msg msg;
+ __db_del_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ if (txnp == NULL)
+ msg.txnpcl_id = 0;
+ else
+ msg.txnpcl_id = txnp->txnid;
+ msg.keydlen = key->dlen;
+ msg.keydoff = key->doff;
+ msg.keyulen = key->ulen;
+ msg.keyflags = key->flags;
+ msg.keydata.keydata_val = key->data;
+ msg.keydata.keydata_len = key->size;
+ msg.flags = flags;
+
+ replyp = __db_db_del_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_del_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_dup_compare __P((DB *, int (*)(DB *, const DBT *,
+ * PUBLIC: const DBT *)));
+ */
+int
+__dbcl_db_dup_compare(dbp, func0)
+ DB * dbp;
+ int (*func0) __P((DB *, const DBT *, const DBT *));
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_dup_compare"));
+}
+
+/*
+ * PUBLIC: int __dbcl_db_encrypt __P((DB *, const char *, u_int32_t));
+ */
+int
+__dbcl_db_encrypt(dbp, passwd, flags)
+ DB * dbp;
+ const char * passwd;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_encrypt_msg msg;
+ __db_encrypt_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ if (passwd == NULL)
+ msg.passwd = "";
+ else
+ msg.passwd = (char *)passwd;
+ msg.flags = flags;
+
+ replyp = __db_db_encrypt_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_encrypt_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_extentsize __P((DB *, u_int32_t));
+ */
+int
+__dbcl_db_extentsize(dbp, extentsize)
+ DB * dbp;
+ u_int32_t extentsize;
+{
+ CLIENT *cl;
+ __db_extentsize_msg msg;
+ __db_extentsize_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ msg.extentsize = extentsize;
+
+ replyp = __db_db_extentsize_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_extentsize_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_fd __P((DB *, int *));
+ */
+int
+__dbcl_db_fd(dbp, fdp)
+ DB * dbp;
+ int * fdp;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(fdp, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_fd"));
+}
+
+/*
+ * PUBLIC: int __dbcl_db_feedback __P((DB *, void (*)(DB *, int, int)));
+ */
+int
+__dbcl_db_feedback(dbp, func0)
+ DB * dbp;
+ void (*func0) __P((DB *, int, int));
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_feedback"));
+}
+
+/*
+ * PUBLIC: int __dbcl_db_flags __P((DB *, u_int32_t));
+ */
+int
+__dbcl_db_flags(dbp, flags)
+ DB * dbp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_flags_msg msg;
+ __db_flags_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ msg.flags = flags;
+
+ replyp = __db_db_flags_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_flags_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_get __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ */
+int
+__dbcl_db_get(dbp, txnp, key, data, flags)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBT * key;
+ DBT * data;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_get_msg msg;
+ __db_get_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ if (txnp == NULL)
+ msg.txnpcl_id = 0;
+ else
+ msg.txnpcl_id = txnp->txnid;
+ msg.keydlen = key->dlen;
+ msg.keydoff = key->doff;
+ msg.keyulen = key->ulen;
+ msg.keyflags = key->flags;
+ msg.keydata.keydata_val = key->data;
+ msg.keydata.keydata_len = key->size;
+ msg.datadlen = data->dlen;
+ msg.datadoff = data->doff;
+ msg.dataulen = data->ulen;
+ msg.dataflags = data->flags;
+ msg.datadata.datadata_val = data->data;
+ msg.datadata.datadata_len = data->size;
+ msg.flags = flags;
+
+ replyp = __db_db_get_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_db_get_ret(dbp, txnp, key, data, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_get_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_h_ffactor __P((DB *, u_int32_t));
+ */
+int
+__dbcl_db_h_ffactor(dbp, ffactor)
+ DB * dbp;
+ u_int32_t ffactor;
+{
+ CLIENT *cl;
+ __db_h_ffactor_msg msg;
+ __db_h_ffactor_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ msg.ffactor = ffactor;
+
+ replyp = __db_db_h_ffactor_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_h_ffactor_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_h_hash __P((DB *, u_int32_t(*)(DB *, const void *,
+ * PUBLIC: u_int32_t)));
+ */
+int
+__dbcl_db_h_hash(dbp, func0)
+ DB * dbp;
+ u_int32_t (*func0) __P((DB *, const void *, u_int32_t));
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_h_hash"));
+}
+
+/*
+ * PUBLIC: int __dbcl_db_h_nelem __P((DB *, u_int32_t));
+ */
+int
+__dbcl_db_h_nelem(dbp, nelem)
+ DB * dbp;
+ u_int32_t nelem;
+{
+ CLIENT *cl;
+ __db_h_nelem_msg msg;
+ __db_h_nelem_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ msg.nelem = nelem;
+
+ replyp = __db_db_h_nelem_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_h_nelem_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_key_range __P((DB *, DB_TXN *, DBT *, DB_KEY_RANGE *,
+ * PUBLIC: u_int32_t));
+ */
+int
+__dbcl_db_key_range(dbp, txnp, key, range, flags)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBT * key;
+ DB_KEY_RANGE * range;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_key_range_msg msg;
+ __db_key_range_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ if (txnp == NULL)
+ msg.txnpcl_id = 0;
+ else
+ msg.txnpcl_id = txnp->txnid;
+ msg.keydlen = key->dlen;
+ msg.keydoff = key->doff;
+ msg.keyulen = key->ulen;
+ msg.keyflags = key->flags;
+ msg.keydata.keydata_val = key->data;
+ msg.keydata.keydata_len = key->size;
+ msg.flags = flags;
+
+ replyp = __db_db_key_range_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_db_key_range_ret(dbp, txnp, key, range, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_key_range_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_lorder __P((DB *, int));
+ */
+int
+__dbcl_db_lorder(dbp, lorder)
+ DB * dbp;
+ int lorder;
+{
+ CLIENT *cl;
+ __db_lorder_msg msg;
+ __db_lorder_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ msg.lorder = lorder;
+
+ replyp = __db_db_lorder_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_lorder_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_open __P((DB *, DB_TXN *, const char *, const char *,
+ * PUBLIC: DBTYPE, u_int32_t, int));
+ */
+int
+__dbcl_db_open(dbp, txnp, name, subdb, type, flags, mode)
+ DB * dbp;
+ DB_TXN * txnp;
+ const char * name;
+ const char * subdb;
+ DBTYPE type;
+ u_int32_t flags;
+ int mode;
+{
+ CLIENT *cl;
+ __db_open_msg msg;
+ __db_open_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ if (txnp == NULL)
+ msg.txnpcl_id = 0;
+ else
+ msg.txnpcl_id = txnp->txnid;
+ if (name == NULL)
+ msg.name = "";
+ else
+ msg.name = (char *)name;
+ if (subdb == NULL)
+ msg.subdb = "";
+ else
+ msg.subdb = (char *)subdb;
+ msg.type = type;
+ msg.flags = flags;
+ msg.mode = mode;
+
+ replyp = __db_db_open_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_db_open_ret(dbp, txnp, name, subdb, type, flags, mode, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_open_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_pagesize __P((DB *, u_int32_t));
+ */
+int
+__dbcl_db_pagesize(dbp, pagesize)
+ DB * dbp;
+ u_int32_t pagesize;
+{
+ CLIENT *cl;
+ __db_pagesize_msg msg;
+ __db_pagesize_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ msg.pagesize = pagesize;
+
+ replyp = __db_db_pagesize_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_pagesize_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_panic __P((DB *, void (*)(DB_ENV *, int)));
+ */
+int
+__dbcl_db_panic(dbp, func0)
+ DB * dbp;
+ void (*func0) __P((DB_ENV *, int));
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(func0, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_panic"));
+}
+
+/*
+ * PUBLIC: int __dbcl_db_pget __P((DB *, DB_TXN *, DBT *, DBT *, DBT *,
+ * PUBLIC: u_int32_t));
+ */
+int
+__dbcl_db_pget(dbp, txnp, skey, pkey, data, flags)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBT * skey;
+ DBT * pkey;
+ DBT * data;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_pget_msg msg;
+ __db_pget_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ if (txnp == NULL)
+ msg.txnpcl_id = 0;
+ else
+ msg.txnpcl_id = txnp->txnid;
+ msg.skeydlen = skey->dlen;
+ msg.skeydoff = skey->doff;
+ msg.skeyulen = skey->ulen;
+ msg.skeyflags = skey->flags;
+ msg.skeydata.skeydata_val = skey->data;
+ msg.skeydata.skeydata_len = skey->size;
+ msg.pkeydlen = pkey->dlen;
+ msg.pkeydoff = pkey->doff;
+ msg.pkeyulen = pkey->ulen;
+ msg.pkeyflags = pkey->flags;
+ msg.pkeydata.pkeydata_val = pkey->data;
+ msg.pkeydata.pkeydata_len = pkey->size;
+ msg.datadlen = data->dlen;
+ msg.datadoff = data->doff;
+ msg.dataulen = data->ulen;
+ msg.dataflags = data->flags;
+ msg.datadata.datadata_val = data->data;
+ msg.datadata.datadata_len = data->size;
+ msg.flags = flags;
+
+ replyp = __db_db_pget_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_db_pget_ret(dbp, txnp, skey, pkey, data, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_pget_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ */
+int
+__dbcl_db_put(dbp, txnp, key, data, flags)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBT * key;
+ DBT * data;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_put_msg msg;
+ __db_put_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ if (txnp == NULL)
+ msg.txnpcl_id = 0;
+ else
+ msg.txnpcl_id = txnp->txnid;
+ msg.keydlen = key->dlen;
+ msg.keydoff = key->doff;
+ msg.keyulen = key->ulen;
+ msg.keyflags = key->flags;
+ msg.keydata.keydata_val = key->data;
+ msg.keydata.keydata_len = key->size;
+ msg.datadlen = data->dlen;
+ msg.datadoff = data->doff;
+ msg.dataulen = data->ulen;
+ msg.dataflags = data->flags;
+ msg.datadata.datadata_val = data->data;
+ msg.datadata.datadata_len = data->size;
+ msg.flags = flags;
+
+ replyp = __db_db_put_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_db_put_ret(dbp, txnp, key, data, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_put_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_re_delim __P((DB *, int));
+ */
+int
+__dbcl_db_re_delim(dbp, delim)
+ DB * dbp;
+ int delim;
+{
+ CLIENT *cl;
+ __db_re_delim_msg msg;
+ __db_re_delim_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ msg.delim = delim;
+
+ replyp = __db_db_re_delim_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_re_delim_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_re_len __P((DB *, u_int32_t));
+ */
+int
+__dbcl_db_re_len(dbp, len)
+ DB * dbp;
+ u_int32_t len;
+{
+ CLIENT *cl;
+ __db_re_len_msg msg;
+ __db_re_len_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ msg.len = len;
+
+ replyp = __db_db_re_len_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_re_len_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_re_pad __P((DB *, int));
+ */
+int
+__dbcl_db_re_pad(dbp, pad)
+ DB * dbp;
+ int pad;
+{
+ CLIENT *cl;
+ __db_re_pad_msg msg;
+ __db_re_pad_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ msg.pad = pad;
+
+ replyp = __db_db_re_pad_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_re_pad_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_re_source __P((DB *, const char *));
+ */
+int
+__dbcl_db_re_source(dbp, re_source)
+ DB * dbp;
+ const char * re_source;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(re_source, NULL);
+ return (__dbcl_rpc_illegal(dbenv, "db_re_source"));
+}
+
+/*
+ * PUBLIC: int __dbcl_db_remove __P((DB *, const char *, const char *,
+ * PUBLIC: u_int32_t));
+ */
+int
+__dbcl_db_remove(dbp, name, subdb, flags)
+ DB * dbp;
+ const char * name;
+ const char * subdb;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_remove_msg msg;
+ __db_remove_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ if (name == NULL)
+ msg.name = "";
+ else
+ msg.name = (char *)name;
+ if (subdb == NULL)
+ msg.subdb = "";
+ else
+ msg.subdb = (char *)subdb;
+ msg.flags = flags;
+
+ replyp = __db_db_remove_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_db_remove_ret(dbp, name, subdb, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_remove_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_rename __P((DB *, const char *, const char *,
+ * PUBLIC: const char *, u_int32_t));
+ */
+int
+__dbcl_db_rename(dbp, name, subdb, newname, flags)
+ DB * dbp;
+ const char * name;
+ const char * subdb;
+ const char * newname;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_rename_msg msg;
+ __db_rename_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ if (name == NULL)
+ msg.name = "";
+ else
+ msg.name = (char *)name;
+ if (subdb == NULL)
+ msg.subdb = "";
+ else
+ msg.subdb = (char *)subdb;
+ if (newname == NULL)
+ msg.newname = "";
+ else
+ msg.newname = (char *)newname;
+ msg.flags = flags;
+
+ replyp = __db_db_rename_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_db_rename_ret(dbp, name, subdb, newname, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_rename_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_stat __P((DB *, void *, u_int32_t));
+ */
+int
+__dbcl_db_stat(dbp, sp, flags)
+ DB * dbp;
+ void * sp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_stat_msg msg;
+ __db_stat_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ msg.flags = flags;
+
+ replyp = __db_db_stat_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_db_stat_ret(dbp, sp, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_stat_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_sync __P((DB *, u_int32_t));
+ */
+int
+__dbcl_db_sync(dbp, flags)
+ DB * dbp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_sync_msg msg;
+ __db_sync_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ msg.flags = flags;
+
+ replyp = __db_db_sync_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_sync_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_truncate __P((DB *, DB_TXN *, u_int32_t *,
+ * PUBLIC: u_int32_t));
+ */
+int
+__dbcl_db_truncate(dbp, txnp, countp, flags)
+ DB * dbp;
+ DB_TXN * txnp;
+ u_int32_t * countp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_truncate_msg msg;
+ __db_truncate_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ if (txnp == NULL)
+ msg.txnpcl_id = 0;
+ else
+ msg.txnpcl_id = txnp->txnid;
+ msg.flags = flags;
+
+ replyp = __db_db_truncate_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_db_truncate_ret(dbp, txnp, countp, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_truncate_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_upgrade __P((DB *, const char *, u_int32_t));
+ */
+int
+__dbcl_db_upgrade(dbp, fname, flags)
+ DB * dbp;
+ const char * fname;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(fname, NULL);
+ COMPQUIET(flags, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_upgrade"));
+}
+
+/*
+ * PUBLIC: int __dbcl_db_verify __P((DB *, const char *, const char *, FILE *,
+ * PUBLIC: u_int32_t));
+ */
+int
+__dbcl_db_verify(dbp, fname, subdb, outfile, flags)
+ DB * dbp;
+ const char * fname;
+ const char * subdb;
+ FILE * outfile;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+ COMPQUIET(fname, NULL);
+ COMPQUIET(subdb, NULL);
+ COMPQUIET(outfile, 0);
+ COMPQUIET(flags, 0);
+ return (__dbcl_rpc_illegal(dbenv, "db_verify"));
+}
+
+/*
+ * PUBLIC: int __dbcl_db_cursor __P((DB *, DB_TXN *, DBC **, u_int32_t));
+ */
+int
+__dbcl_db_cursor(dbp, txnp, dbcpp, flags)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBC ** dbcpp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_cursor_msg msg;
+ __db_cursor_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ if (txnp == NULL)
+ msg.txnpcl_id = 0;
+ else
+ msg.txnpcl_id = txnp->txnid;
+ msg.flags = flags;
+
+ replyp = __db_db_cursor_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_db_cursor_ret(dbp, txnp, dbcpp, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_cursor_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_join __P((DB *, DBC **, DBC **, u_int32_t));
+ */
+int
+__dbcl_db_join(dbp, curs, dbcp, flags)
+ DB * dbp;
+ DBC ** curs;
+ DBC ** dbcp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __db_join_msg msg;
+ __db_join_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+ DBC ** cursp;
+ int cursi;
+ u_int32_t * cursq;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbp == NULL)
+ msg.dbpcl_id = 0;
+ else
+ msg.dbpcl_id = dbp->cl_id;
+ for (cursi = 0, cursp = curs; *cursp != 0; cursi++, cursp++)
+ ;
+ msg.curs.curs_len = cursi;
+ if ((ret = __os_calloc(dbenv,
+ msg.curs.curs_len, sizeof(u_int32_t), &msg.curs.curs_val)) != 0)
+ return (ret);
+ for (cursq = msg.curs.curs_val, cursp = curs; cursi--; cursq++, cursp++)
+ *cursq = (*cursp)->cl_id;
+ msg.flags = flags;
+
+ replyp = __db_db_join_4001(&msg, cl);
+ __os_free(dbenv, msg.curs.curs_val);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_db_join_ret(dbp, curs, dbcp, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___db_join_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_close __P((DBC *));
+ */
+int
+__dbcl_dbc_close(dbc)
+ DBC * dbc;
+{
+ CLIENT *cl;
+ __dbc_close_msg msg;
+ __dbc_close_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbc->dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbc == NULL)
+ msg.dbccl_id = 0;
+ else
+ msg.dbccl_id = dbc->cl_id;
+
+ replyp = __db_dbc_close_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_dbc_close_ret(dbc, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___dbc_close_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_count __P((DBC *, db_recno_t *, u_int32_t));
+ */
+int
+__dbcl_dbc_count(dbc, countp, flags)
+ DBC * dbc;
+ db_recno_t * countp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __dbc_count_msg msg;
+ __dbc_count_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbc->dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbc == NULL)
+ msg.dbccl_id = 0;
+ else
+ msg.dbccl_id = dbc->cl_id;
+ msg.flags = flags;
+
+ replyp = __db_dbc_count_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_dbc_count_ret(dbc, countp, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___dbc_count_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_del __P((DBC *, u_int32_t));
+ */
+int
+__dbcl_dbc_del(dbc, flags)
+ DBC * dbc;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __dbc_del_msg msg;
+ __dbc_del_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbc->dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbc == NULL)
+ msg.dbccl_id = 0;
+ else
+ msg.dbccl_id = dbc->cl_id;
+ msg.flags = flags;
+
+ replyp = __db_dbc_del_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = replyp->status;
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___dbc_del_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_dup __P((DBC *, DBC **, u_int32_t));
+ */
+int
+__dbcl_dbc_dup(dbc, dbcp, flags)
+ DBC * dbc;
+ DBC ** dbcp;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __dbc_dup_msg msg;
+ __dbc_dup_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbc->dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbc == NULL)
+ msg.dbccl_id = 0;
+ else
+ msg.dbccl_id = dbc->cl_id;
+ msg.flags = flags;
+
+ replyp = __db_dbc_dup_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_dbc_dup_ret(dbc, dbcp, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___dbc_dup_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_get __P((DBC *, DBT *, DBT *, u_int32_t));
+ */
+int
+__dbcl_dbc_get(dbc, key, data, flags)
+ DBC * dbc;
+ DBT * key;
+ DBT * data;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __dbc_get_msg msg;
+ __dbc_get_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbc->dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbc == NULL)
+ msg.dbccl_id = 0;
+ else
+ msg.dbccl_id = dbc->cl_id;
+ msg.keydlen = key->dlen;
+ msg.keydoff = key->doff;
+ msg.keyulen = key->ulen;
+ msg.keyflags = key->flags;
+ msg.keydata.keydata_val = key->data;
+ msg.keydata.keydata_len = key->size;
+ msg.datadlen = data->dlen;
+ msg.datadoff = data->doff;
+ msg.dataulen = data->ulen;
+ msg.dataflags = data->flags;
+ msg.datadata.datadata_val = data->data;
+ msg.datadata.datadata_len = data->size;
+ msg.flags = flags;
+
+ replyp = __db_dbc_get_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_dbc_get_ret(dbc, key, data, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___dbc_get_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_pget __P((DBC *, DBT *, DBT *, DBT *, u_int32_t));
+ */
+int
+__dbcl_dbc_pget(dbc, skey, pkey, data, flags)
+ DBC * dbc;
+ DBT * skey;
+ DBT * pkey;
+ DBT * data;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __dbc_pget_msg msg;
+ __dbc_pget_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbc->dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbc == NULL)
+ msg.dbccl_id = 0;
+ else
+ msg.dbccl_id = dbc->cl_id;
+ msg.skeydlen = skey->dlen;
+ msg.skeydoff = skey->doff;
+ msg.skeyulen = skey->ulen;
+ msg.skeyflags = skey->flags;
+ msg.skeydata.skeydata_val = skey->data;
+ msg.skeydata.skeydata_len = skey->size;
+ msg.pkeydlen = pkey->dlen;
+ msg.pkeydoff = pkey->doff;
+ msg.pkeyulen = pkey->ulen;
+ msg.pkeyflags = pkey->flags;
+ msg.pkeydata.pkeydata_val = pkey->data;
+ msg.pkeydata.pkeydata_len = pkey->size;
+ msg.datadlen = data->dlen;
+ msg.datadoff = data->doff;
+ msg.dataulen = data->ulen;
+ msg.dataflags = data->flags;
+ msg.datadata.datadata_val = data->data;
+ msg.datadata.datadata_len = data->size;
+ msg.flags = flags;
+
+ replyp = __db_dbc_pget_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_dbc_pget_ret(dbc, skey, pkey, data, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___dbc_pget_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_put __P((DBC *, DBT *, DBT *, u_int32_t));
+ */
+int
+__dbcl_dbc_put(dbc, key, data, flags)
+ DBC * dbc;
+ DBT * key;
+ DBT * data;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ __dbc_put_msg msg;
+ __dbc_put_reply *replyp = NULL;
+ int ret;
+ DB_ENV *dbenv;
+
+ ret = 0;
+ dbenv = dbc->dbp->dbenv;
+ if (dbenv == NULL || !RPC_ON(dbenv))
+ return (__dbcl_noserver(NULL));
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ if (dbc == NULL)
+ msg.dbccl_id = 0;
+ else
+ msg.dbccl_id = dbc->cl_id;
+ msg.keydlen = key->dlen;
+ msg.keydoff = key->doff;
+ msg.keyulen = key->ulen;
+ msg.keyflags = key->flags;
+ msg.keydata.keydata_val = key->data;
+ msg.keydata.keydata_len = key->size;
+ msg.datadlen = data->dlen;
+ msg.datadoff = data->doff;
+ msg.dataulen = data->ulen;
+ msg.dataflags = data->flags;
+ msg.datadata.datadata_val = data->data;
+ msg.datadata.datadata_len = data->size;
+ msg.flags = flags;
+
+ replyp = __db_dbc_put_4001(&msg, cl);
+ if (replyp == NULL) {
+ __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
+ ret = DB_NOSERVER;
+ goto out;
+ }
+ ret = __dbcl_dbc_put_ret(dbc, key, data, flags, replyp);
+out:
+ if (replyp != NULL)
+ xdr_free((xdrproc_t)xdr___dbc_put_reply, (void *)replyp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_lock_detect __P((DB_ENV *, u_int32_t, u_int32_t, int *));
+ */
+int
+__dbcl_lock_detect(dbenv, flags, atype, aborted)
+ DB_ENV * dbenv;
+ u_int32_t flags;
+ u_int32_t atype;
+ int * aborted;
+{
+ COMPQUIET(flags, 0);
+ COMPQUIET(atype, 0);
+ COMPQUIET(aborted, 0);
+ return (__dbcl_rpc_illegal(dbenv, "lock_detect"));
+}
+
+/*
+ * PUBLIC: int __dbcl_lock_get __P((DB_ENV *, u_int32_t, u_int32_t,
+ * PUBLIC: const DBT *, db_lockmode_t, DB_LOCK *));
+ */
+int
+__dbcl_lock_get(dbenv, locker, flags, obj, mode, lock)
+ DB_ENV * dbenv;
+ u_int32_t locker;
+ u_int32_t flags;
+ const DBT * obj;
+ db_lockmode_t mode;
+ DB_LOCK * lock;
+{
+ COMPQUIET(locker, 0);
+ COMPQUIET(flags, 0);
+ COMPQUIET(obj, NULL);
+ COMPQUIET(mode, 0);
+ COMPQUIET(lock, 0);
+ return (__dbcl_rpc_illegal(dbenv, "lock_get"));
+}
+
+/*
+ * PUBLIC: int __dbcl_lock_id __P((DB_ENV *, u_int32_t *));
+ */
+int
+__dbcl_lock_id(dbenv, idp)
+ DB_ENV * dbenv;
+ u_int32_t * idp;
+{
+ COMPQUIET(idp, 0);
+ return (__dbcl_rpc_illegal(dbenv, "lock_id"));
+}
+
+/*
+ * PUBLIC: int __dbcl_lock_id_free __P((DB_ENV *, u_int32_t));
+ */
+int
+__dbcl_lock_id_free(dbenv, id)
+ DB_ENV * dbenv;
+ u_int32_t id;
+{
+ COMPQUIET(id, 0);
+ return (__dbcl_rpc_illegal(dbenv, "lock_id_free"));
+}
+
+/*
+ * PUBLIC: int __dbcl_lock_put __P((DB_ENV *, DB_LOCK *));
+ */
+int
+__dbcl_lock_put(dbenv, lock)
+ DB_ENV * dbenv;
+ DB_LOCK * lock;
+{
+ COMPQUIET(lock, 0);
+ return (__dbcl_rpc_illegal(dbenv, "lock_put"));
+}
+
+/*
+ * PUBLIC: int __dbcl_lock_stat __P((DB_ENV *, DB_LOCK_STAT **, u_int32_t));
+ */
+int
+__dbcl_lock_stat(dbenv, statp, flags)
+ DB_ENV * dbenv;
+ DB_LOCK_STAT ** statp;
+ u_int32_t flags;
+{
+ COMPQUIET(statp, 0);
+ COMPQUIET(flags, 0);
+ return (__dbcl_rpc_illegal(dbenv, "lock_stat"));
+}
+
+/*
+ * PUBLIC: int __dbcl_lock_vec __P((DB_ENV *, u_int32_t, u_int32_t,
+ * PUBLIC: DB_LOCKREQ *, int, DB_LOCKREQ **));
+ */
+int
+__dbcl_lock_vec(dbenv, locker, flags, list, nlist, elistp)
+ DB_ENV * dbenv;
+ u_int32_t locker;
+ u_int32_t flags;
+ DB_LOCKREQ * list;
+ int nlist;
+ DB_LOCKREQ ** elistp;
+{
+ COMPQUIET(locker, 0);
+ COMPQUIET(flags, 0);
+ COMPQUIET(list, 0);
+ COMPQUIET(nlist, 0);
+ COMPQUIET(elistp, 0);
+ return (__dbcl_rpc_illegal(dbenv, "lock_vec"));
+}
+
+/*
+ * PUBLIC: int __dbcl_log_archive __P((DB_ENV *, char ***, u_int32_t));
+ */
+int
+__dbcl_log_archive(dbenv, listp, flags)
+ DB_ENV * dbenv;
+ char *** listp;
+ u_int32_t flags;
+{
+ COMPQUIET(listp, 0);
+ COMPQUIET(flags, 0);
+ return (__dbcl_rpc_illegal(dbenv, "log_archive"));
+}
+
+/*
+ * PUBLIC: int __dbcl_log_cursor __P((DB_ENV *, DB_LOGC **, u_int32_t));
+ */
+int
+__dbcl_log_cursor(dbenv, logcp, flags)
+ DB_ENV * dbenv;
+ DB_LOGC ** logcp;
+ u_int32_t flags;
+{
+ COMPQUIET(logcp, 0);
+ COMPQUIET(flags, 0);
+ return (__dbcl_rpc_illegal(dbenv, "log_cursor"));
+}
+
+/*
+ * PUBLIC: int __dbcl_log_file __P((DB_ENV *, const DB_LSN *, char *, size_t));
+ */
+int
+__dbcl_log_file(dbenv, lsn, namep, len)
+ DB_ENV * dbenv;
+ const DB_LSN * lsn;
+ char * namep;
+ size_t len;
+{
+ COMPQUIET(lsn, NULL);
+ COMPQUIET(namep, NULL);
+ COMPQUIET(len, 0);
+ return (__dbcl_rpc_illegal(dbenv, "log_file"));
+}
+
+/*
+ * PUBLIC: int __dbcl_log_flush __P((DB_ENV *, const DB_LSN *));
+ */
+int
+__dbcl_log_flush(dbenv, lsn)
+ DB_ENV * dbenv;
+ const DB_LSN * lsn;
+{
+ COMPQUIET(lsn, NULL);
+ return (__dbcl_rpc_illegal(dbenv, "log_flush"));
+}
+
+/*
+ * PUBLIC: int __dbcl_log_put __P((DB_ENV *, DB_LSN *, const DBT *,
+ * PUBLIC: u_int32_t));
+ */
+int
+__dbcl_log_put(dbenv, lsn, data, flags)
+ DB_ENV * dbenv;
+ DB_LSN * lsn;
+ const DBT * data;
+ u_int32_t flags;
+{
+ COMPQUIET(lsn, 0);
+ COMPQUIET(data, NULL);
+ COMPQUIET(flags, 0);
+ return (__dbcl_rpc_illegal(dbenv, "log_put"));
+}
+
+/*
+ * PUBLIC: int __dbcl_log_stat __P((DB_ENV *, DB_LOG_STAT **, u_int32_t));
+ */
+int
+__dbcl_log_stat(dbenv, statp, flags)
+ DB_ENV * dbenv;
+ DB_LOG_STAT ** statp;
+ u_int32_t flags;
+{
+ COMPQUIET(statp, 0);
+ COMPQUIET(flags, 0);
+ return (__dbcl_rpc_illegal(dbenv, "log_stat"));
+}
+
+/*
+ * PUBLIC: int __dbcl_memp_fcreate __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t));
+ */
+int
+__dbcl_memp_fcreate(dbenv, mpf, flags)
+ DB_ENV * dbenv;
+ DB_MPOOLFILE ** mpf;
+ u_int32_t flags;
+{
+ COMPQUIET(mpf, 0);
+ COMPQUIET(flags, 0);
+ return (__dbcl_rpc_illegal(dbenv, "memp_fcreate"));
+}
+
+/*
+ * PUBLIC: int __dbcl_memp_register __P((DB_ENV *, int, int (*)(DB_ENV *,
+ * PUBLIC: db_pgno_t, void *, DBT *), int (*)(DB_ENV *, db_pgno_t, void *, DBT *)));
+ */
+int
+__dbcl_memp_register(dbenv, ftype, func0, func1)
+ DB_ENV * dbenv;
+ int ftype;
+ int (*func0) __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ int (*func1) __P((DB_ENV *, db_pgno_t, void *, DBT *));
+{
+ COMPQUIET(ftype, 0);
+ COMPQUIET(func0, 0);
+ COMPQUIET(func1, 0);
+ return (__dbcl_rpc_illegal(dbenv, "memp_register"));
+}
+
+/*
+ * PUBLIC: int __dbcl_memp_stat __P((DB_ENV *, DB_MPOOL_STAT **,
+ * PUBLIC: DB_MPOOL_FSTAT ***, u_int32_t));
+ */
+int
+__dbcl_memp_stat(dbenv, gstatp, fstatp, flags)
+ DB_ENV * dbenv;
+ DB_MPOOL_STAT ** gstatp;
+ DB_MPOOL_FSTAT *** fstatp;
+ u_int32_t flags;
+{
+ COMPQUIET(gstatp, 0);
+ COMPQUIET(fstatp, 0);
+ COMPQUIET(flags, 0);
+ return (__dbcl_rpc_illegal(dbenv, "memp_stat"));
+}
+
+/*
+ * PUBLIC: int __dbcl_memp_sync __P((DB_ENV *, DB_LSN *));
+ */
+int
+__dbcl_memp_sync(dbenv, lsn)
+ DB_ENV * dbenv;
+ DB_LSN * lsn;
+{
+ COMPQUIET(lsn, 0);
+ return (__dbcl_rpc_illegal(dbenv, "memp_sync"));
+}
+
+/*
+ * PUBLIC: int __dbcl_memp_trickle __P((DB_ENV *, int, int *));
+ */
+int
+__dbcl_memp_trickle(dbenv, pct, nwrotep)
+ DB_ENV * dbenv;
+ int pct;
+ int * nwrotep;
+{
+ COMPQUIET(pct, 0);
+ COMPQUIET(nwrotep, 0);
+ return (__dbcl_rpc_illegal(dbenv, "memp_trickle"));
+}
+
+#endif /* HAVE_RPC */
diff --git a/libdb/rpc_client/gen_client_ret.c b/libdb/rpc_client/gen_client_ret.c
new file mode 100644
index 0000000..70aae86
--- /dev/null
+++ b/libdb/rpc_client/gen_client_ret.c
@@ -0,0 +1,824 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifdef HAVE_RPC
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <rpc/rpc.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/txn.h"
+
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+
+/*
+ * PUBLIC: int __dbcl_env_close_ret
+ * PUBLIC: __P((DB_ENV *, u_int32_t, __env_close_reply *));
+ */
+int
+__dbcl_env_close_ret(dbenv, flags, replyp)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+ __env_close_reply *replyp;
+{
+ int ret;
+
+ COMPQUIET(flags, 0);
+
+ ret = __dbcl_refresh(dbenv);
+ __os_free(NULL, dbenv);
+ if (replyp->status == 0 && ret != 0)
+ return (ret);
+ else
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_env_create_ret
+ * PUBLIC: __P((DB_ENV *, long, __env_create_reply *));
+ */
+int
+__dbcl_env_create_ret(dbenv, timeout, replyp)
+ DB_ENV * dbenv;
+ long timeout;
+ __env_create_reply *replyp;
+{
+
+ COMPQUIET(timeout, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ dbenv->cl_id = replyp->envcl_id;
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_env_open_ret __P((DB_ENV *,
+ * PUBLIC: const char *, u_int32_t, int, __env_open_reply *));
+ */
+int
+__dbcl_env_open_ret(dbenv, home, flags, mode, replyp)
+ DB_ENV *dbenv;
+ const char *home;
+ u_int32_t flags;
+ int mode;
+ __env_open_reply *replyp;
+{
+ DB_TXNMGR *tmgrp;
+ int ret;
+
+ COMPQUIET(home, NULL);
+ COMPQUIET(mode, 0);
+
+ /*
+ * If error, return it.
+ */
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ dbenv->cl_id = replyp->envcl_id;
+ /*
+ * If the user requested transactions, then we have some
+ * local client-side setup to do also.
+ */
+ if (LF_ISSET(DB_INIT_TXN)) {
+ if ((ret = __os_calloc(dbenv,
+ 1, sizeof(DB_TXNMGR), &tmgrp)) != 0)
+ return (ret);
+ TAILQ_INIT(&tmgrp->txn_chain);
+ tmgrp->dbenv = dbenv;
+ dbenv->tx_handle = tmgrp;
+ }
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_env_remove_ret
+ * PUBLIC: __P((DB_ENV *, const char *, u_int32_t, __env_remove_reply *));
+ */
+int
+__dbcl_env_remove_ret(dbenv, home, flags, replyp)
+ DB_ENV *dbenv;
+ const char *home;
+ u_int32_t flags;
+ __env_remove_reply *replyp;
+{
+ int ret;
+
+ COMPQUIET(home, NULL);
+ COMPQUIET(flags, 0);
+
+ ret = __dbcl_refresh(dbenv);
+ __os_free(NULL, dbenv);
+ if (replyp->status == 0 && ret != 0)
+ return (ret);
+ else
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_abort_ret __P((DB_TXN *, __txn_abort_reply *));
+ */
+int
+__dbcl_txn_abort_ret(txnp, replyp)
+ DB_TXN *txnp;
+ __txn_abort_reply *replyp;
+{
+ __dbcl_txn_end(txnp);
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_begin_ret __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, DB_TXN **, u_int32_t, __txn_begin_reply *));
+ */
+int
+__dbcl_txn_begin_ret(envp, parent, txnpp, flags, replyp)
+ DB_ENV *envp;
+ DB_TXN *parent, **txnpp;
+ u_int32_t flags;
+ __txn_begin_reply *replyp;
+{
+ DB_TXN *txn;
+ int ret;
+
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ if ((ret = __os_calloc(envp, 1, sizeof(DB_TXN), &txn)) != 0)
+ return (ret);
+ __dbcl_txn_setup(envp, txn, parent, replyp->txnidcl_id);
+ *txnpp = txn;
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_commit_ret
+ * PUBLIC: __P((DB_TXN *, u_int32_t, __txn_commit_reply *));
+ */
+int
+__dbcl_txn_commit_ret(txnp, flags, replyp)
+ DB_TXN *txnp;
+ u_int32_t flags;
+ __txn_commit_reply *replyp;
+{
+ COMPQUIET(flags, 0);
+
+ __dbcl_txn_end(txnp);
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_discard_ret __P((DB_TXN *, u_int32_t,
+ * PUBLIC: __txn_discard_reply *));
+ */
+int
+__dbcl_txn_discard_ret(txnp, flags, replyp)
+ DB_TXN * txnp;
+ u_int32_t flags;
+ __txn_discard_reply *replyp;
+{
+ COMPQUIET(flags, 0);
+
+ __dbcl_txn_end(txnp);
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_recover_ret __P((DB_ENV *, DB_PREPLIST *, long,
+ * PUBLIC: long *, u_int32_t, __txn_recover_reply *));
+ */
+int
+__dbcl_txn_recover_ret(dbenv, preplist, count, retp, flags, replyp)
+ DB_ENV * dbenv;
+ DB_PREPLIST * preplist;
+ long count;
+ long * retp;
+ u_int32_t flags;
+ __txn_recover_reply *replyp;
+{
+ DB_PREPLIST *prep;
+ DB_TXN *txnarray, *txn;
+ u_int32_t i, *txnid;
+ int ret;
+ u_int8_t *gid;
+
+ COMPQUIET(flags, 0);
+ COMPQUIET(count, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ *retp = (long) replyp->retcount;
+
+ if (replyp->retcount == 0)
+ return (replyp->status);
+
+ if ((ret = __os_calloc(dbenv, replyp->retcount, sizeof(DB_TXN),
+ &txnarray)) != 0)
+ return (ret);
+ /*
+ * We have a bunch of arrays that need to iterate in
+ * lockstep with each other.
+ */
+ i = 0;
+ txn = txnarray;
+ txnid = (u_int32_t *)replyp->txn.txn_val;
+ gid = (u_int8_t *)replyp->gid.gid_val;
+ prep = preplist;
+ while (i++ < replyp->retcount) {
+ __dbcl_txn_setup(dbenv, txn, NULL, *txnid);
+ prep->txn = txn;
+ memcpy(&prep->gid, gid, DB_XIDDATASIZE);
+ /*
+ * Now increment all our array pointers.
+ */
+ txn++;
+ gid += DB_XIDDATASIZE;
+ txnid++;
+ prep++;
+ }
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_close_ret __P((DB *, u_int32_t, __db_close_reply *));
+ */
+int
+__dbcl_db_close_ret(dbp, flags, replyp)
+ DB *dbp;
+ u_int32_t flags;
+ __db_close_reply *replyp;
+{
+ int ret;
+
+ COMPQUIET(flags, 0);
+
+ ret = __dbcl_dbclose_common(dbp);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ else
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_create_ret
+ * PUBLIC: __P((DB *, DB_ENV *, u_int32_t, __db_create_reply *));
+ */
+int
+__dbcl_db_create_ret(dbp, dbenv, flags, replyp)
+ DB * dbp;
+ DB_ENV * dbenv;
+ u_int32_t flags;
+ __db_create_reply *replyp;
+{
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ dbp->cl_id = replyp->dbcl_id;
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_get_ret
+ * PUBLIC: __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t, __db_get_reply *));
+ */
+int
+__dbcl_db_get_ret(dbp, txnp, key, data, flags, replyp)
+ DB *dbp;
+ DB_TXN *txnp;
+ DBT *key, *data;
+ u_int32_t flags;
+ __db_get_reply *replyp;
+{
+ DB_ENV *dbenv;
+ int ret;
+ void *oldkey;
+
+ COMPQUIET(txnp, NULL);
+ COMPQUIET(flags, 0);
+
+ ret = 0;
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ dbenv = dbp->dbenv;
+
+ oldkey = key->data;
+ ret = __dbcl_retcopy(dbenv, key, replyp->keydata.keydata_val,
+ replyp->keydata.keydata_len, &dbp->my_rkey.data,
+ &dbp->my_rkey.ulen);
+ if (ret)
+ return (ret);
+ ret = __dbcl_retcopy(dbenv, data, replyp->datadata.datadata_val,
+ replyp->datadata.datadata_len, &dbp->my_rdata.data,
+ &dbp->my_rdata.ulen);
+ /*
+ * If an error on copying 'data' and we allocated for 'key'
+ * free it before returning the error.
+ */
+ if (ret && oldkey != NULL)
+ __os_free(dbenv, key->data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_key_range_ret __P((DB *, DB_TXN *,
+ * PUBLIC: DBT *, DB_KEY_RANGE *, u_int32_t, __db_key_range_reply *));
+ */
+int
+__dbcl_db_key_range_ret(dbp, txnp, key, range, flags, replyp)
+ DB *dbp;
+ DB_TXN *txnp;
+ DBT *key;
+ DB_KEY_RANGE *range;
+ u_int32_t flags;
+ __db_key_range_reply *replyp;
+{
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(txnp, NULL);
+ COMPQUIET(key, NULL);
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ range->less = replyp->less;
+ range->equal = replyp->equal;
+ range->greater = replyp->greater;
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_open_ret __P((DB *, DB_TXN *, const char *,
+ * PUBLIC: const char *, DBTYPE, u_int32_t, int, __db_open_reply *));
+ */
+int
+__dbcl_db_open_ret(dbp, txn, name, subdb, type, flags, mode, replyp)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb;
+ DBTYPE type;
+ u_int32_t flags;
+ int mode;
+ __db_open_reply *replyp;
+{
+ COMPQUIET(txn, NULL);
+ COMPQUIET(name, NULL);
+ COMPQUIET(subdb, NULL);
+ COMPQUIET(type, 0);
+ COMPQUIET(flags, 0);
+ COMPQUIET(mode, 0);
+
+ if (replyp->status == 0) {
+ dbp->cl_id = replyp->dbcl_id;
+ dbp->type = replyp->type;
+ /*
+ * We get back the database's byteorder on the server.
+ * Determine if our byteorder is the same or not by
+ * calling __db_set_lorder.
+ *
+ * XXX
+ * This MUST come before we set the flags because
+ * __db_set_lorder checks that it is called before
+ * the open flag is set.
+ */
+ (void)__db_set_lorder(dbp, replyp->lorder);
+
+ /*
+ * XXX
+ * This is only for Tcl which peeks at the dbp flags.
+ * When dbp->get_flags exists, this should go away.
+ */
+ dbp->flags = replyp->dbflags;
+ }
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_pget_ret __P((DB *, DB_TXN *, DBT *, DBT *, DBT *,
+ * PUBLIC: u_int32_t, __db_pget_reply *));
+ */
+int
+__dbcl_db_pget_ret(dbp, txnp, skey, pkey, data, flags, replyp)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBT * skey;
+ DBT * pkey;
+ DBT * data;
+ u_int32_t flags;
+ __db_pget_reply *replyp;
+{
+ DB_ENV *dbenv;
+ int ret;
+ void *oldskey, *oldpkey;
+
+ COMPQUIET(txnp, NULL);
+ COMPQUIET(flags, 0);
+
+ ret = 0;
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ dbenv = dbp->dbenv;
+
+ oldskey = skey->data;
+ ret = __dbcl_retcopy(dbenv, skey, replyp->skeydata.skeydata_val,
+ replyp->skeydata.skeydata_len, &dbp->my_rskey.data,
+ &dbp->my_rskey.ulen);
+ if (ret)
+ return (ret);
+
+ oldpkey = pkey->data;
+ ret = __dbcl_retcopy(dbenv, pkey, replyp->pkeydata.pkeydata_val,
+ replyp->pkeydata.pkeydata_len, &dbp->my_rkey.data,
+ &dbp->my_rkey.ulen);
+ if (ret && oldskey != NULL) {
+ __os_free(dbenv, skey->data);
+ return (ret);
+ }
+ ret = __dbcl_retcopy(dbenv, data, replyp->datadata.datadata_val,
+ replyp->datadata.datadata_len, &dbp->my_rdata.data,
+ &dbp->my_rdata.ulen);
+ /*
+ * If an error on copying 'data' and we allocated for '*key'
+ * free it before returning the error.
+ */
+ if (ret) {
+ if (oldskey != NULL)
+ __os_free(dbenv, skey->data);
+ if (oldpkey != NULL)
+ __os_free(dbenv, pkey->data);
+ }
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_put_ret
+ * PUBLIC: __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t, __db_put_reply *));
+ */
+int
+__dbcl_db_put_ret(dbp, txnp, key, data, flags, replyp)
+ DB *dbp;
+ DB_TXN *txnp;
+ DBT *key, *data;
+ u_int32_t flags;
+ __db_put_reply *replyp;
+{
+ int ret;
+
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(txnp, NULL);
+ COMPQUIET(data, NULL);
+
+ ret = replyp->status;
+ if (replyp->status == 0 && (flags == DB_APPEND))
+ *(db_recno_t *)key->data =
+ *(db_recno_t *)replyp->keydata.keydata_val;
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_remove_ret __P((DB *,
+ * PUBLIC: const char *, const char *, u_int32_t, __db_remove_reply *));
+ */
+int
+__dbcl_db_remove_ret(dbp, name, subdb, flags, replyp)
+ DB *dbp;
+ const char *name, *subdb;
+ u_int32_t flags;
+ __db_remove_reply *replyp;
+{
+ int ret;
+
+ COMPQUIET(name, 0);
+ COMPQUIET(subdb, 0);
+ COMPQUIET(flags, 0);
+
+ ret = __dbcl_dbclose_common(dbp);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ else
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_rename_ret __P((DB *, const char *,
+ * PUBLIC: const char *, const char *, u_int32_t, __db_rename_reply *));
+ */
+int
+__dbcl_db_rename_ret(dbp, name, subdb, newname, flags, replyp)
+ DB *dbp;
+ const char *name, *subdb, *newname;
+ u_int32_t flags;
+ __db_rename_reply *replyp;
+{
+ int ret;
+
+ COMPQUIET(name, 0);
+ COMPQUIET(subdb, 0);
+ COMPQUIET(newname, 0);
+ COMPQUIET(flags, 0);
+
+ ret = __dbcl_dbclose_common(dbp);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ else
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_stat_ret
+ * PUBLIC: __P((DB *, void *, u_int32_t, __db_stat_reply *));
+ */
+int
+__dbcl_db_stat_ret(dbp, sp, flags, replyp)
+ DB *dbp;
+ void *sp;
+ u_int32_t flags;
+ __db_stat_reply *replyp;
+{
+ int len, ret;
+ u_int32_t i, *q, *p, *retsp;
+
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0 || sp == NULL)
+ return (replyp->status);
+
+ len = replyp->stats.stats_len * sizeof(u_int32_t);
+ if ((ret = __os_umalloc(dbp->dbenv, len, &retsp)) != 0)
+ return (ret);
+ for (i = 0, q = retsp, p = (u_int32_t *)replyp->stats.stats_val;
+ i < replyp->stats.stats_len; i++, q++, p++)
+ *q = *p;
+ *(u_int32_t **)sp = retsp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_truncate_ret __P((DB *, DB_TXN *, u_int32_t *,
+ * PUBLIC: u_int32_t, __db_truncate_reply *));
+ */
+int
+__dbcl_db_truncate_ret(dbp, txnp, countp, flags, replyp)
+ DB *dbp;
+ DB_TXN *txnp;
+ u_int32_t *countp, flags;
+ __db_truncate_reply *replyp;
+{
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(txnp, NULL);
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ *countp = replyp->count;
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_cursor_ret
+ * PUBLIC: __P((DB *, DB_TXN *, DBC **, u_int32_t, __db_cursor_reply *));
+ */
+int
+__dbcl_db_cursor_ret(dbp, txnp, dbcp, flags, replyp)
+ DB *dbp;
+ DB_TXN *txnp;
+ DBC **dbcp;
+ u_int32_t flags;
+ __db_cursor_reply *replyp;
+{
+ COMPQUIET(txnp, NULL);
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ return (__dbcl_c_setup(replyp->dbcidcl_id, dbp, dbcp));
+}
+
+/*
+ * PUBLIC: int __dbcl_db_join_ret
+ * PUBLIC: __P((DB *, DBC **, DBC **, u_int32_t, __db_join_reply *));
+ */
+int
+__dbcl_db_join_ret(dbp, curs, dbcp, flags, replyp)
+ DB *dbp;
+ DBC **curs, **dbcp;
+ u_int32_t flags;
+ __db_join_reply *replyp;
+{
+ COMPQUIET(curs, NULL);
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * We set this up as a normal cursor. We do not need
+ * to treat a join cursor any differently than a normal
+ * cursor, even though DB itself must. We only need the
+ * client-side cursor/db relationship to know what cursors
+ * are open in the db, and to store their ID. Nothing else.
+ */
+ return (__dbcl_c_setup(replyp->dbcidcl_id, dbp, dbcp));
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_close_ret __P((DBC *, __dbc_close_reply *));
+ */
+int
+__dbcl_dbc_close_ret(dbc, replyp)
+ DBC *dbc;
+ __dbc_close_reply *replyp;
+{
+ __dbcl_c_refresh(dbc);
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_count_ret
+ * PUBLIC: __P((DBC *, db_recno_t *, u_int32_t, __dbc_count_reply *));
+ */
+int
+__dbcl_dbc_count_ret(dbc, countp, flags, replyp)
+ DBC *dbc;
+ db_recno_t *countp;
+ u_int32_t flags;
+ __dbc_count_reply *replyp;
+{
+ COMPQUIET(dbc, NULL);
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ *countp = replyp->dupcount;
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_dup_ret
+ * PUBLIC: __P((DBC *, DBC **, u_int32_t, __dbc_dup_reply *));
+ */
+int
+__dbcl_dbc_dup_ret(dbc, dbcp, flags, replyp)
+ DBC *dbc, **dbcp;
+ u_int32_t flags;
+ __dbc_dup_reply *replyp;
+{
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ return (__dbcl_c_setup(replyp->dbcidcl_id, dbc->dbp, dbcp));
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_get_ret
+ * PUBLIC: __P((DBC *, DBT *, DBT *, u_int32_t, __dbc_get_reply *));
+ */
+int
+__dbcl_dbc_get_ret(dbc, key, data, flags, replyp)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ __dbc_get_reply *replyp;
+{
+ DB_ENV *dbenv;
+ int ret;
+ void *oldkey;
+
+ COMPQUIET(flags, 0);
+
+ ret = 0;
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ dbenv = dbc->dbp->dbenv;
+ oldkey = key->data;
+ ret = __dbcl_retcopy(dbenv, key, replyp->keydata.keydata_val,
+ replyp->keydata.keydata_len, &dbc->my_rkey.data,
+ &dbc->my_rkey.ulen);
+ if (ret)
+ return (ret);
+ ret = __dbcl_retcopy(dbenv, data, replyp->datadata.datadata_val,
+ replyp->datadata.datadata_len, &dbc->my_rdata.data,
+ &dbc->my_rdata.ulen);
+
+ /*
+ * If an error on copying 'data' and we allocated for 'key'
+ * free it before returning the error.
+ */
+ if (ret && oldkey != NULL)
+ __os_free(dbenv, key->data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_pget_ret __P((DBC *, DBT *, DBT *, DBT *, u_int32_t,
+ * PUBLIC: __dbc_pget_reply *));
+ */
+int
+__dbcl_dbc_pget_ret(dbc, skey, pkey, data, flags, replyp)
+ DBC * dbc;
+ DBT * skey;
+ DBT * pkey;
+ DBT * data;
+ u_int32_t flags;
+ __dbc_pget_reply *replyp;
+{
+ DB_ENV *dbenv;
+ int ret;
+ void *oldskey, *oldpkey;
+
+ COMPQUIET(flags, 0);
+
+ ret = 0;
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ dbenv = dbc->dbp->dbenv;
+
+ oldskey = skey->data;
+ ret = __dbcl_retcopy(dbenv, skey, replyp->skeydata.skeydata_val,
+ replyp->skeydata.skeydata_len, &dbc->my_rskey.data,
+ &dbc->my_rskey.ulen);
+ if (ret)
+ return (ret);
+
+ oldpkey = pkey->data;
+ ret = __dbcl_retcopy(dbenv, pkey, replyp->pkeydata.pkeydata_val,
+ replyp->pkeydata.pkeydata_len, &dbc->my_rkey.data,
+ &dbc->my_rkey.ulen);
+ if (ret && oldskey != NULL) {
+ __os_free(dbenv, skey->data);
+ return (ret);
+ }
+ ret = __dbcl_retcopy(dbenv, data, replyp->datadata.datadata_val,
+ replyp->datadata.datadata_len, &dbc->my_rdata.data,
+ &dbc->my_rdata.ulen);
+ /*
+ * If an error on copying 'data' and we allocated for '*key'
+ * free it before returning the error.
+ */
+ if (ret) {
+ if (oldskey != NULL)
+ __os_free(dbenv, skey->data);
+ if (oldpkey != NULL)
+ __os_free(dbenv, pkey->data);
+ }
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_put_ret
+ * PUBLIC: __P((DBC *, DBT *, DBT *, u_int32_t, __dbc_put_reply *));
+ */
+int
+__dbcl_dbc_put_ret(dbc, key, data, flags, replyp)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ __dbc_put_reply *replyp;
+{
+ COMPQUIET(data, NULL);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ if (replyp->status == 0 && dbc->dbp->type == DB_RECNO &&
+ (flags == DB_AFTER || flags == DB_BEFORE))
+ *(db_recno_t *)key->data =
+ *(db_recno_t *)replyp->keydata.keydata_val;
+ return (replyp->status);
+}
+#endif /* HAVE_RPC */
diff --git a/libdb/rpc_server/c/db_server_proc.c b/libdb/rpc_server/c/db_server_proc.c
new file mode 100644
index 0000000..6056749
--- /dev/null
+++ b/libdb/rpc_server/c/db_server_proc.c
@@ -0,0 +1,2500 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifdef HAVE_RPC
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <rpc/rpc.h>
+
+#include <string.h>
+#endif
+#include "dbinc_auto/db_server.h"
+
+#include "db_int.h"
+#include "dbinc/db_server_int.h"
+#include "dbinc_auto/rpc_server_ext.h"
+
+/* BEGIN __env_cachesize_proc */
+/*
+ * PUBLIC: void __env_cachesize_proc __P((long, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, __env_cachesize_reply *));
+ */
+void
+__env_cachesize_proc(dbenvcl_id, gbytes, bytes,
+ ncache, replyp)
+ long dbenvcl_id;
+ u_int32_t gbytes;
+ u_int32_t bytes;
+ u_int32_t ncache;
+ __env_cachesize_reply *replyp;
+/* END __env_cachesize_proc */
+{
+ DB_ENV *dbenv;
+ ct_entry *dbenv_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ ret = dbenv->set_cachesize(dbenv, gbytes, bytes, ncache);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_close_proc */
+/*
+ * PUBLIC: void __env_close_proc __P((long, u_int32_t, __env_close_reply *));
+ */
+void
+__env_close_proc(dbenvcl_id, flags, replyp)
+ long dbenvcl_id;
+ u_int32_t flags;
+ __env_close_reply *replyp;
+/* END __env_close_proc */
+{
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ replyp->status = __dbenv_close_int(dbenvcl_id, flags, 0);
+ return;
+}
+
+/* BEGIN __env_create_proc */
+/*
+ * PUBLIC: void __env_create_proc __P((u_int32_t, __env_create_reply *));
+ */
+void
+__env_create_proc(timeout, replyp)
+ u_int32_t timeout;
+ __env_create_reply *replyp;
+/* END __env_create_proc */
+{
+ DB_ENV *dbenv;
+ ct_entry *ctp;
+ int ret;
+
+ ctp = new_ct_ent(&replyp->status);
+ if (ctp == NULL)
+ return;
+ if ((ret = db_env_create(&dbenv, 0)) == 0) {
+ ctp->ct_envp = dbenv;
+ ctp->ct_type = CT_ENV;
+ ctp->ct_parent = NULL;
+ ctp->ct_envparent = ctp;
+ __dbsrv_settimeout(ctp, timeout);
+ __dbsrv_active(ctp);
+ replyp->envcl_id = ctp->ct_id;
+ } else
+ __dbclear_ctp(ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_dbremove_proc */
+/*
+ * PUBLIC: void __env_dbremove_proc __P((long, long, char *, char *, u_int32_t,
+ * PUBLIC: __env_dbremove_reply *));
+ */
+void
+__env_dbremove_proc(dbenvcl_id, txnpcl_id, name,
+ subdb, flags, replyp)
+ long dbenvcl_id;
+ long txnpcl_id;
+ char *name;
+ char *subdb;
+ u_int32_t flags;
+ __env_dbremove_reply *replyp;
+/* END __env_dbremove_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ ret = dbenv->dbremove(dbenv, txnp, name, subdb, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_dbrename_proc */
+/*
+ * PUBLIC: void __env_dbrename_proc __P((long, long, char *, char *, char *,
+ * PUBLIC: u_int32_t, __env_dbrename_reply *));
+ */
+void
+__env_dbrename_proc(dbenvcl_id, txnpcl_id, name,
+ subdb, newname, flags, replyp)
+ long dbenvcl_id;
+ long txnpcl_id;
+ char *name;
+ char *subdb;
+ char *newname;
+ u_int32_t flags;
+ __env_dbrename_reply *replyp;
+/* END __env_dbrename_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ ret = dbenv->dbrename(dbenv, txnp, name, subdb, newname, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_encrypt_proc */
+/*
+ * PUBLIC: void __env_encrypt_proc __P((long, char *, u_int32_t,
+ * PUBLIC: __env_encrypt_reply *));
+ */
+void
+__env_encrypt_proc(dbenvcl_id, passwd, flags, replyp)
+ long dbenvcl_id;
+ char *passwd;
+ u_int32_t flags;
+ __env_encrypt_reply *replyp;
+/* END __env_encrypt_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ ret = dbenv->set_encrypt(dbenv, passwd, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_flags_proc */
+/*
+ * PUBLIC: void __env_flags_proc __P((long, u_int32_t, u_int32_t,
+ * PUBLIC: __env_flags_reply *));
+ */
+void
+__env_flags_proc(dbenvcl_id, flags, onoff, replyp)
+ long dbenvcl_id;
+ u_int32_t flags;
+ u_int32_t onoff;
+ __env_flags_reply *replyp;
+/* END __env_flags_proc */
+{
+ DB_ENV *dbenv;
+ ct_entry *dbenv_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ ret = dbenv->set_flags(dbenv, flags, onoff);
+ if (onoff)
+ dbenv_ctp->ct_envdp.onflags = flags;
+ else
+ dbenv_ctp->ct_envdp.offflags = flags;
+
+ replyp->status = ret;
+ return;
+}
+/* BEGIN __env_open_proc */
+/*
+ * PUBLIC: void __env_open_proc __P((long, char *, u_int32_t, u_int32_t,
+ * PUBLIC: __env_open_reply *));
+ */
+void
+__env_open_proc(dbenvcl_id, home, flags,
+ mode, replyp)
+ long dbenvcl_id;
+ char *home;
+ u_int32_t flags;
+ u_int32_t mode;
+ __env_open_reply *replyp;
+/* END __env_open_proc */
+{
+ DB_ENV *dbenv;
+ ct_entry *dbenv_ctp, *new_ctp;
+ u_int32_t newflags, shareflags;
+ int ret;
+ home_entry *fullhome;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+ fullhome = get_home(home);
+ if (fullhome == NULL) {
+ ret = DB_NOSERVER_HOME;
+ goto out;
+ }
+
+ /*
+ * If they are using locking do deadlock detection for them,
+ * internally.
+ */
+ if ((flags & DB_INIT_LOCK) &&
+ (ret = dbenv->set_lk_detect(dbenv, DB_LOCK_DEFAULT)) != 0)
+ goto out;
+
+ if (__dbsrv_verbose) {
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, fullhome->home);
+ }
+
+ /*
+ * Mask off flags we ignore
+ */
+ newflags = (flags & ~DB_SERVER_FLAGMASK);
+ shareflags = (newflags & DB_SERVER_ENVFLAGS);
+ /*
+ * Check now whether we can share a handle for this env.
+ */
+ replyp->envcl_id = dbenvcl_id;
+ if ((new_ctp = __dbsrv_shareenv(dbenv_ctp, fullhome, shareflags))
+ != NULL) {
+ /*
+ * We can share, clean up old ID, set new one.
+ */
+ if (__dbsrv_verbose)
+ printf("Sharing env ID %ld\n", new_ctp->ct_id);
+ replyp->envcl_id = new_ctp->ct_id;
+ ret = __dbenv_close_int(dbenvcl_id, 0, 0);
+ } else {
+ ret = dbenv->open(dbenv, fullhome->home, newflags, mode);
+ dbenv_ctp->ct_envdp.home = fullhome;
+ dbenv_ctp->ct_envdp.envflags = shareflags;
+ }
+out: replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_remove_proc */
+/*
+ * PUBLIC: void __env_remove_proc __P((long, char *, u_int32_t,
+ * PUBLIC: __env_remove_reply *));
+ */
+void
+__env_remove_proc(dbenvcl_id, home, flags, replyp)
+ long dbenvcl_id;
+ char *home;
+ u_int32_t flags;
+ __env_remove_reply *replyp;
+/* END __env_remove_proc */
+{
+ DB_ENV *dbenv;
+ ct_entry *dbenv_ctp;
+ int ret;
+ home_entry *fullhome;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ fullhome = get_home(home);
+ if (fullhome == NULL) {
+ replyp->status = DB_NOSERVER_HOME;
+ return;
+ }
+
+ ret = dbenv->remove(dbenv, fullhome->home, flags);
+ __dbdel_ctp(dbenv_ctp);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_abort_proc */
+/*
+ * PUBLIC: void __txn_abort_proc __P((long, __txn_abort_reply *));
+ */
+void
+__txn_abort_proc(txnpcl_id, replyp)
+ long txnpcl_id;
+ __txn_abort_reply *replyp;
+/* END __txn_abort_proc */
+{
+ DB_TXN *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ ret = txnp->abort(txnp);
+ __dbdel_ctp(txnp_ctp);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_begin_proc */
+/*
+ * PUBLIC: void __txn_begin_proc __P((long, long, u_int32_t,
+ * PUBLIC: __txn_begin_reply *));
+ */
+void
+__txn_begin_proc(dbenvcl_id, parentcl_id,
+ flags, replyp)
+ long dbenvcl_id;
+ long parentcl_id;
+ u_int32_t flags;
+ __txn_begin_reply *replyp;
+/* END __txn_begin_proc */
+{
+ DB_ENV *dbenv;
+ DB_TXN *parent, *txnp;
+ ct_entry *ctp, *dbenv_ctp, *parent_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+ parent_ctp = NULL;
+
+ ctp = new_ct_ent(&replyp->status);
+ if (ctp == NULL)
+ return;
+
+ if (parentcl_id != 0) {
+ ACTIVATE_CTP(parent_ctp, parentcl_id, CT_TXN);
+ parent = (DB_TXN *)parent_ctp->ct_anyp;
+ ctp->ct_activep = parent_ctp->ct_activep;
+ } else
+ parent = NULL;
+
+ ret = dbenv->txn_begin(dbenv, parent, &txnp, flags);
+ if (ret == 0) {
+ ctp->ct_txnp = txnp;
+ ctp->ct_type = CT_TXN;
+ ctp->ct_parent = parent_ctp;
+ ctp->ct_envparent = dbenv_ctp;
+ replyp->txnidcl_id = ctp->ct_id;
+ __dbsrv_settimeout(ctp, dbenv_ctp->ct_timeout);
+ __dbsrv_active(ctp);
+ } else
+ __dbclear_ctp(ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_commit_proc */
+/*
+ * PUBLIC: void __txn_commit_proc __P((long, u_int32_t,
+ * PUBLIC: __txn_commit_reply *));
+ */
+void
+__txn_commit_proc(txnpcl_id, flags, replyp)
+ long txnpcl_id;
+ u_int32_t flags;
+ __txn_commit_reply *replyp;
+/* END __txn_commit_proc */
+{
+ DB_TXN *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ ret = txnp->commit(txnp, flags);
+ __dbdel_ctp(txnp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_discard_proc */
+/*
+ * PUBLIC: void __txn_discard_proc __P((long, u_int32_t,
+ * PUBLIC: __txn_discard_reply *));
+ */
+void
+__txn_discard_proc(txnpcl_id, flags, replyp)
+ long txnpcl_id;
+ u_int32_t flags;
+ __txn_discard_reply *replyp;
+/* END __txn_discard_proc */
+{
+ DB_TXN *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ ret = txnp->discard(txnp, flags);
+ __dbdel_ctp(txnp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_prepare_proc */
+/*
+ * PUBLIC: void __txn_prepare_proc __P((long, u_int8_t *,
+ * PUBLIC: __txn_prepare_reply *));
+ */
+void
+__txn_prepare_proc(txnpcl_id, gid, replyp)
+ long txnpcl_id;
+ u_int8_t *gid;
+ __txn_prepare_reply *replyp;
+/* END __txn_prepare_proc */
+{
+ DB_TXN *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ ret = txnp->prepare(txnp, gid);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_recover_proc */
+/*
+ * PUBLIC: void __txn_recover_proc __P((long, u_int32_t, u_int32_t,
+ * PUBLIC: __txn_recover_reply *, int *));
+ */
+void
+__txn_recover_proc(dbenvcl_id, count,
+ flags, replyp, freep)
+ long dbenvcl_id;
+ u_int32_t count;
+ u_int32_t flags;
+ __txn_recover_reply *replyp;
+ int * freep;
+/* END __txn_recover_proc */
+{
+ DB_ENV *dbenv;
+ DB_PREPLIST *dbprep, *p;
+ ct_entry *dbenv_ctp, *ctp;
+ long erri, i, retcount;
+ u_int32_t *txnidp;
+ int ret;
+ u_int8_t *gid;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+ dbprep = NULL;
+ *freep = 0;
+
+ if ((ret =
+ __os_malloc(dbenv, count * sizeof(DB_PREPLIST), &dbprep)) != 0)
+ goto out;
+ if ((ret =
+ dbenv->txn_recover(dbenv, dbprep, count, &retcount, flags)) != 0)
+ goto out;
+ /*
+ * If there is nothing, success, but it's easy.
+ */
+ replyp->retcount = retcount;
+ if (retcount == 0) {
+ replyp->txn.txn_val = NULL;
+ replyp->txn.txn_len = 0;
+ replyp->gid.gid_val = NULL;
+ replyp->gid.gid_len = 0;
+ }
+
+ /*
+ * We have our txn list. Now we need to allocate the space for
+ * the txn ID array and the GID array and set them up.
+ */
+ if ((ret = __os_calloc(dbenv, retcount, sizeof(u_int32_t),
+ &replyp->txn.txn_val)) != 0)
+ goto out;
+ replyp->txn.txn_len = retcount * sizeof(u_int32_t);
+ if ((ret = __os_calloc(dbenv, retcount, DB_XIDDATASIZE,
+ &replyp->gid.gid_val)) != 0) {
+ __os_free(dbenv, replyp->txn.txn_val);
+ goto out;
+ }
+ replyp->gid.gid_len = retcount * DB_XIDDATASIZE;
+
+ /*
+ * Now walk through our results, creating parallel arrays
+ * to send back. For each entry we need to create a new
+ * txn ctp and then fill in the array info.
+ */
+ i = 0;
+ p = dbprep;
+ gid = replyp->gid.gid_val;
+ txnidp = replyp->txn.txn_val;
+ while (i++ < retcount) {
+ ctp = new_ct_ent(&ret);
+ if (ret != 0) {
+ i--;
+ goto out2;
+ }
+ ctp->ct_txnp = p->txn;
+ ctp->ct_type = CT_TXN;
+ ctp->ct_parent = NULL;
+ ctp->ct_envparent = dbenv_ctp;
+ __dbsrv_settimeout(ctp, dbenv_ctp->ct_timeout);
+ __dbsrv_active(ctp);
+
+ *txnidp = ctp->ct_id;
+ memcpy(gid, p->gid, DB_XIDDATASIZE);
+
+ p++;
+ txnidp++;
+ gid += DB_XIDDATASIZE;
+ }
+ /*
+ * If we get here, we have success and we have to set freep
+ * so it'll get properly freed next time.
+ */
+ *freep = 1;
+out:
+ if (dbprep != NULL)
+ __os_free(dbenv, dbprep);
+ replyp->status = ret;
+ return;
+out2:
+ /*
+ * We had an error in the middle of creating our new txn
+ * ct entries. We have to unwind all that we have done. Ugh.
+ */
+ for (txnidp = replyp->txn.txn_val, erri = 0;
+ erri < i; erri++, txnidp++) {
+ ctp = get_tableent(*txnidp);
+ __dbclear_ctp(ctp);
+ }
+ __os_free(dbenv, replyp->txn.txn_val);
+ __os_free(dbenv, replyp->gid.gid_val);
+ __os_free(dbenv, dbprep);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_bt_maxkey_proc */
+/*
+ * PUBLIC: void __db_bt_maxkey_proc __P((long, u_int32_t,
+ * PUBLIC: __db_bt_maxkey_reply *));
+ */
+void
+__db_bt_maxkey_proc(dbpcl_id, maxkey, replyp)
+ long dbpcl_id;
+ u_int32_t maxkey;
+ __db_bt_maxkey_reply *replyp;
+/* END __db_bt_maxkey_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_bt_maxkey(dbp, maxkey);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_associate_proc */
+/*
+ * PUBLIC: void __db_associate_proc __P((long, long, long, u_int32_t,
+ * PUBLIC: __db_associate_reply *));
+ */
+void
+__db_associate_proc(dbpcl_id, txnpcl_id, sdbpcl_id,
+ flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ long sdbpcl_id;
+ u_int32_t flags;
+ __db_associate_reply *replyp;
+/* END __db_associate_proc */
+{
+ DB *dbp, *sdbp;
+ DB_TXN *txnp;
+ ct_entry *dbp_ctp, *sdbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ ACTIVATE_CTP(sdbp_ctp, sdbpcl_id, CT_DB);
+ sdbp = (DB *)sdbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+
+ /*
+ * We do not support DB_CREATE for associate. Users
+ * can only access secondary indices on a read-only basis,
+ * so whatever they are looking for needs to be there already.
+ */
+ if (flags != 0)
+ ret = EINVAL;
+ else
+ ret = dbp->associate(dbp, txnp, sdbp, NULL, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_bt_minkey_proc */
+/*
+ * PUBLIC: void __db_bt_minkey_proc __P((long, u_int32_t,
+ * PUBLIC: __db_bt_minkey_reply *));
+ */
+void
+__db_bt_minkey_proc(dbpcl_id, minkey, replyp)
+ long dbpcl_id;
+ u_int32_t minkey;
+ __db_bt_minkey_reply *replyp;
+/* END __db_bt_minkey_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_bt_minkey(dbp, minkey);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_close_proc */
+/*
+ * PUBLIC: void __db_close_proc __P((long, u_int32_t, __db_close_reply *));
+ */
+void
+__db_close_proc(dbpcl_id, flags, replyp)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_close_reply *replyp;
+/* END __db_close_proc */
+{
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ replyp->status = __db_close_int(dbpcl_id, flags);
+ return;
+}
+
+/* BEGIN __db_create_proc */
+/*
+ * PUBLIC: void __db_create_proc __P((long, u_int32_t, __db_create_reply *));
+ */
+void
+__db_create_proc(dbenvcl_id, flags, replyp)
+ long dbenvcl_id;
+ u_int32_t flags;
+ __db_create_reply *replyp;
+/* END __db_create_proc */
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ ct_entry *dbenv_ctp, *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ dbp_ctp = new_ct_ent(&replyp->status);
+ if (dbp_ctp == NULL)
+ return ;
+ /*
+ * We actually require env's for databases. The client should
+ * have caught it, but just in case.
+ */
+ DB_ASSERT(dbenv != NULL);
+ if ((ret = db_create(&dbp, dbenv, flags)) == 0) {
+ dbp_ctp->ct_dbp = dbp;
+ dbp_ctp->ct_type = CT_DB;
+ dbp_ctp->ct_parent = dbenv_ctp;
+ dbp_ctp->ct_envparent = dbenv_ctp;
+ replyp->dbcl_id = dbp_ctp->ct_id;
+ } else
+ __dbclear_ctp(dbp_ctp);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_del_proc */
+/*
+ * PUBLIC: void __db_del_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, __db_del_reply *));
+ */
+void
+__db_del_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyulen, keyflags, keydata,
+ keysize, flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t flags;
+ __db_del_reply *replyp;
+/* END __db_del_proc */
+{
+ DB *dbp;
+ DBT key;
+ DB_TXN *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ memset(&key, 0, sizeof(key));
+
+ /* Set up key DBT */
+ key.dlen = keydlen;
+ key.ulen = keyulen;
+ key.doff = keydoff;
+ key.flags = keyflags;
+ key.size = keysize;
+ key.data = keydata;
+
+ ret = dbp->del(dbp, txnp, &key, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_encrypt_proc */
+/*
+ * PUBLIC: void __db_encrypt_proc __P((long, char *, u_int32_t,
+ * PUBLIC: __db_encrypt_reply *));
+ */
+void
+__db_encrypt_proc(dbpcl_id, passwd, flags, replyp)
+ long dbpcl_id;
+ char *passwd;
+ u_int32_t flags;
+ __db_encrypt_reply *replyp;
+/* END __db_encrypt_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_encrypt(dbp, passwd, flags);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_extentsize_proc */
+/*
+ * PUBLIC: void __db_extentsize_proc __P((long, u_int32_t,
+ * PUBLIC: __db_extentsize_reply *));
+ */
+void
+__db_extentsize_proc(dbpcl_id, extentsize, replyp)
+ long dbpcl_id;
+ u_int32_t extentsize;
+ __db_extentsize_reply *replyp;
+/* END __db_extentsize_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_q_extentsize(dbp, extentsize);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_flags_proc */
+/*
+ * PUBLIC: void __db_flags_proc __P((long, u_int32_t, __db_flags_reply *));
+ */
+void
+__db_flags_proc(dbpcl_id, flags, replyp)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_flags_reply *replyp;
+/* END __db_flags_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_flags(dbp, flags);
+ dbp_ctp->ct_dbdp.setflags |= flags;
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_get_proc */
+/*
+ * PUBLIC: void __db_get_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
+ * PUBLIC: u_int32_t, u_int32_t, __db_get_reply *, int *));
+ */
+void
+__db_get_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyulen, keyflags, keydata,
+ keysize, datadlen, datadoff, dataulen,
+ dataflags, datadata, datasize, flags, replyp, freep)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __db_get_reply *replyp;
+ int * freep;
+/* END __db_get_proc */
+{
+ DB *dbp;
+ DBT key, data;
+ DB_TXN *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int key_alloc, bulk_alloc, ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ *freep = 0;
+ bulk_alloc = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Set up key and data DBT */
+ key.dlen = keydlen;
+ key.doff = keydoff;
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.flags = DB_DBT_MALLOC;
+ if (keyflags & DB_DBT_PARTIAL)
+ key.flags |= DB_DBT_PARTIAL;
+ key.size = keysize;
+ key.ulen = keyulen;
+ key.data = keydata;
+
+ data.dlen = datadlen;
+ data.doff = datadoff;
+ data.ulen = dataulen;
+ /*
+ * Ignore memory related flags on server.
+ */
+ data.size = datasize;
+ data.data = datadata;
+ if (flags & DB_MULTIPLE) {
+ if (data.data == 0) {
+ ret = __os_umalloc(dbp->dbenv,
+ data.ulen, &data.data);
+ if (ret != 0)
+ goto err;
+ bulk_alloc = 1;
+ }
+ data.flags |= DB_DBT_USERMEM;
+ } else
+ data.flags |= DB_DBT_MALLOC;
+ if (dataflags & DB_DBT_PARTIAL)
+ data.flags |= DB_DBT_PARTIAL;
+
+ /* Got all our stuff, now do the get */
+ ret = dbp->get(dbp, txnp, &key, &data, flags);
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (key.data == keydata) {
+ ret = __os_umalloc(dbp->dbenv,
+ key.size, &replyp->keydata.keydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->dbenv, key.data);
+ __os_ufree(dbp->dbenv, data.data);
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->keydata.keydata_val, key.data, key.size);
+ } else
+ replyp->keydata.keydata_val = key.data;
+
+ replyp->keydata.keydata_len = key.size;
+
+ /*
+ * Data
+ */
+ if (data.data == datadata) {
+ ret = __os_umalloc(dbp->dbenv,
+ data.size, &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->dbenv, key.data);
+ __os_ufree(dbp->dbenv, data.data);
+ if (key_alloc)
+ __os_ufree(dbp->dbenv,
+ replyp->keydata.keydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.data,
+ data.size);
+ } else
+ replyp->datadata.datadata_val = data.data;
+ replyp->datadata.datadata_len = data.size;
+ } else {
+err: replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ if (bulk_alloc)
+ __os_ufree(dbp->dbenv, data.data);
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_h_ffactor_proc */
+/*
+ * PUBLIC: void __db_h_ffactor_proc __P((long, u_int32_t,
+ * PUBLIC: __db_h_ffactor_reply *));
+ */
+void
+__db_h_ffactor_proc(dbpcl_id, ffactor, replyp)
+ long dbpcl_id;
+ u_int32_t ffactor;
+ __db_h_ffactor_reply *replyp;
+/* END __db_h_ffactor_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_h_ffactor(dbp, ffactor);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_h_nelem_proc */
+/*
+ * PUBLIC: void __db_h_nelem_proc __P((long, u_int32_t,
+ * PUBLIC: __db_h_nelem_reply *));
+ */
+void
+__db_h_nelem_proc(dbpcl_id, nelem, replyp)
+ long dbpcl_id;
+ u_int32_t nelem;
+ __db_h_nelem_reply *replyp;
+/* END __db_h_nelem_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_h_nelem(dbp, nelem);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_key_range_proc */
+/*
+ * PUBLIC: void __db_key_range_proc __P((long, long, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __db_key_range_reply *));
+ */
+void
+__db_key_range_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyulen, keyflags, keydata,
+ keysize, flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t flags;
+ __db_key_range_reply *replyp;
+/* END __db_key_range_proc */
+{
+ DB *dbp;
+ DBT key;
+ DB_KEY_RANGE range;
+ DB_TXN *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ memset(&key, 0, sizeof(key));
+ /* Set up key and data DBT */
+ key.dlen = keydlen;
+ key.ulen = keyulen;
+ key.doff = keydoff;
+ key.size = keysize;
+ key.data = keydata;
+ key.flags = keyflags;
+
+ ret = dbp->key_range(dbp, txnp, &key, &range, flags);
+
+ replyp->status = ret;
+ replyp->less = range.less;
+ replyp->equal = range.equal;
+ replyp->greater = range.greater;
+ return;
+}
+
+/* BEGIN __db_lorder_proc */
+/*
+ * PUBLIC: void __db_lorder_proc __P((long, u_int32_t, __db_lorder_reply *));
+ */
+void
+__db_lorder_proc(dbpcl_id, lorder, replyp)
+ long dbpcl_id;
+ u_int32_t lorder;
+ __db_lorder_reply *replyp;
+/* END __db_lorder_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_lorder(dbp, lorder);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_open_proc */
+/*
+ * PUBLIC: void __db_open_proc __P((long, long, char *, char *, u_int32_t,
+ * PUBLIC: u_int32_t, u_int32_t, __db_open_reply *));
+ */
+void
+__db_open_proc(dbpcl_id, txnpcl_id, name,
+ subdb, type, flags, mode, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ char *name;
+ char *subdb;
+ u_int32_t type;
+ u_int32_t flags;
+ u_int32_t mode;
+ __db_open_reply *replyp;
+/* END __db_open_proc */
+{
+ DB *dbp;
+ DB_TXN *txnp;
+ DBTYPE dbtype;
+ ct_entry *dbp_ctp, *new_ctp, *txnp_ctp;
+ int isswapped, ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ replyp->dbcl_id = dbpcl_id;
+ if ((new_ctp = __dbsrv_sharedb(dbp_ctp, name, subdb, type, flags))
+ != NULL) {
+ /*
+ * We can share, clean up old ID, set new one.
+ */
+ if (__dbsrv_verbose)
+ printf("Sharing db ID %ld\n", new_ctp->ct_id);
+ replyp->dbcl_id = new_ctp->ct_id;
+ ret = __db_close_int(dbpcl_id, 0);
+ goto out;
+ }
+ ret = dbp->open(dbp, txnp, name, subdb, (DBTYPE)type, flags, mode);
+ if (ret == 0) {
+ (void)dbp->get_type(dbp, &dbtype);
+ replyp->type = dbtype;
+ /* XXX
+ * Tcl needs to peek at dbp->flags for DB_AM_DUP. Send
+ * this dbp's flags back.
+ */
+ replyp->dbflags = (int) dbp->flags;
+ /*
+ * We need to determine the byte order of the database
+ * and send it back to the client. Determine it by
+ * the server's native order and the swapped value of
+ * the DB itself.
+ */
+ (void)dbp->get_byteswapped(dbp, &isswapped);
+ if (__db_byteorder(NULL, 1234) == 0) {
+ if (isswapped == 0)
+ replyp->lorder = 1234;
+ else
+ replyp->lorder = 4321;
+ } else {
+ if (isswapped == 0)
+ replyp->lorder = 4321;
+ else
+ replyp->lorder = 1234;
+ }
+ dbp_ctp->ct_dbdp.type = dbtype;
+ dbp_ctp->ct_dbdp.dbflags = LF_ISSET(DB_SERVER_DBFLAGS);
+ if (name == NULL)
+ dbp_ctp->ct_dbdp.db = NULL;
+ else if ((ret = __os_strdup(dbp->dbenv, name,
+ &dbp_ctp->ct_dbdp.db)) != 0)
+ goto out;
+ if (subdb == NULL)
+ dbp_ctp->ct_dbdp.subdb = NULL;
+ else if ((ret = __os_strdup(dbp->dbenv, subdb,
+ &dbp_ctp->ct_dbdp.subdb)) != 0)
+ goto out;
+ }
+out:
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_pagesize_proc */
+/*
+ * PUBLIC: void __db_pagesize_proc __P((long, u_int32_t,
+ * PUBLIC: __db_pagesize_reply *));
+ */
+void
+__db_pagesize_proc(dbpcl_id, pagesize, replyp)
+ long dbpcl_id;
+ u_int32_t pagesize;
+ __db_pagesize_reply *replyp;
+/* END __db_pagesize_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_pagesize(dbp, pagesize);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_pget_proc */
+/*
+ * PUBLIC: void __db_pget_proc __P((long, long, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
+ * PUBLIC: u_int32_t, u_int32_t, __db_pget_reply *, int *));
+ */
+void
+__db_pget_proc(dbpcl_id, txnpcl_id, skeydlen,
+ skeydoff, skeyulen, skeyflags, skeydata,
+ skeysize, pkeydlen, pkeydoff, pkeyulen,
+ pkeyflags, pkeydata, pkeysize, datadlen,
+ datadoff, dataulen, dataflags, datadata,
+ datasize, flags, replyp, freep)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t skeydlen;
+ u_int32_t skeydoff;
+ u_int32_t skeyulen;
+ u_int32_t skeyflags;
+ void *skeydata;
+ u_int32_t skeysize;
+ u_int32_t pkeydlen;
+ u_int32_t pkeydoff;
+ u_int32_t pkeyulen;
+ u_int32_t pkeyflags;
+ void *pkeydata;
+ u_int32_t pkeysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __db_pget_reply *replyp;
+ int * freep;
+/* END __db_pget_proc */
+{
+ DB *dbp;
+ DBT skey, pkey, data;
+ DB_TXN *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int key_alloc, ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ *freep = 0;
+ memset(&skey, 0, sizeof(skey));
+ memset(&pkey, 0, sizeof(pkey));
+ memset(&data, 0, sizeof(data));
+
+ /*
+ * Ignore memory related flags on server.
+ */
+ /* Set up key and data DBT */
+ skey.flags = DB_DBT_MALLOC;
+ skey.dlen = skeydlen;
+ skey.ulen = skeyulen;
+ skey.doff = skeydoff;
+ if (skeyflags & DB_DBT_PARTIAL)
+ skey.flags |= DB_DBT_PARTIAL;
+ skey.size = skeysize;
+ skey.data = skeydata;
+
+ pkey.flags = DB_DBT_MALLOC;
+ pkey.dlen = pkeydlen;
+ pkey.ulen = pkeyulen;
+ pkey.doff = pkeydoff;
+ if (pkeyflags & DB_DBT_PARTIAL)
+ pkey.flags |= DB_DBT_PARTIAL;
+ pkey.size = pkeysize;
+ pkey.data = pkeydata;
+
+ data.flags = DB_DBT_MALLOC;
+ data.dlen = datadlen;
+ data.ulen = dataulen;
+ data.doff = datadoff;
+ if (dataflags & DB_DBT_PARTIAL)
+ data.flags |= DB_DBT_PARTIAL;
+ data.size = datasize;
+ data.data = datadata;
+
+ /* Got all our stuff, now do the get */
+ ret = dbp->pget(dbp, txnp, &skey, &pkey, &data, flags);
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (skey.data == skeydata) {
+ ret = __os_umalloc(dbp->dbenv,
+ skey.size, &replyp->skeydata.skeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->dbenv, skey.data);
+ __os_ufree(dbp->dbenv, pkey.data);
+ __os_ufree(dbp->dbenv, data.data);
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->skeydata.skeydata_val, skey.data,
+ skey.size);
+ } else
+ replyp->skeydata.skeydata_val = skey.data;
+
+ replyp->skeydata.skeydata_len = skey.size;
+
+ /*
+ * Primary key
+ */
+ if (pkey.data == pkeydata) {
+ ret = __os_umalloc(dbp->dbenv,
+ pkey.size, &replyp->pkeydata.pkeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->dbenv, skey.data);
+ __os_ufree(dbp->dbenv, pkey.data);
+ __os_ufree(dbp->dbenv, data.data);
+ if (key_alloc)
+ __os_ufree(dbp->dbenv,
+ replyp->skeydata.skeydata_val);
+ goto err;
+ }
+ /*
+ * We can set it to 2, because they cannot send the
+ * pkey over without sending the skey over too.
+ * So if they did send a pkey, they must have sent
+ * the skey as well.
+ */
+ key_alloc = 2;
+ memcpy(replyp->pkeydata.pkeydata_val, pkey.data,
+ pkey.size);
+ } else
+ replyp->pkeydata.pkeydata_val = pkey.data;
+ replyp->pkeydata.pkeydata_len = pkey.size;
+
+ /*
+ * Data
+ */
+ if (data.data == datadata) {
+ ret = __os_umalloc(dbp->dbenv,
+ data.size, &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->dbenv, skey.data);
+ __os_ufree(dbp->dbenv, pkey.data);
+ __os_ufree(dbp->dbenv, data.data);
+ /*
+ * If key_alloc is 1, just skey needs to be
+ * freed, if key_alloc is 2, both skey and pkey
+ * need to be freed.
+ */
+ if (key_alloc--)
+ __os_ufree(dbp->dbenv,
+ replyp->skeydata.skeydata_val);
+ if (key_alloc)
+ __os_ufree(dbp->dbenv,
+ replyp->pkeydata.pkeydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.data,
+ data.size);
+ } else
+ replyp->datadata.datadata_val = data.data;
+ replyp->datadata.datadata_len = data.size;
+ } else {
+err: replyp->skeydata.skeydata_val = NULL;
+ replyp->skeydata.skeydata_len = 0;
+ replyp->pkeydata.pkeydata_val = NULL;
+ replyp->pkeydata.pkeydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_put_proc */
+/*
+ * PUBLIC: void __db_put_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
+ * PUBLIC: u_int32_t, u_int32_t, __db_put_reply *, int *));
+ */
+void
+__db_put_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyulen, keyflags, keydata,
+ keysize, datadlen, datadoff, dataulen,
+ dataflags, datadata, datasize, flags, replyp, freep)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __db_put_reply *replyp;
+ int * freep;
+/* END __db_put_proc */
+{
+ DB *dbp;
+ DBT key, data;
+ DB_TXN *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ *freep = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Set up key and data DBT */
+ key.dlen = keydlen;
+ key.ulen = keyulen;
+ key.doff = keydoff;
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.flags = DB_DBT_MALLOC;
+ if (keyflags & DB_DBT_PARTIAL)
+ key.flags |= DB_DBT_PARTIAL;
+ key.size = keysize;
+ key.data = keydata;
+
+ data.dlen = datadlen;
+ data.ulen = dataulen;
+ data.doff = datadoff;
+ data.flags = dataflags;
+ data.size = datasize;
+ data.data = datadata;
+
+ /* Got all our stuff, now do the put */
+ ret = dbp->put(dbp, txnp, &key, &data, flags);
+ /*
+ * If the client did a DB_APPEND, set up key in reply.
+ * Otherwise just status.
+ */
+ if (ret == 0 && (flags == DB_APPEND)) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ if (key.data == keydata) {
+ ret = __os_umalloc(dbp->dbenv,
+ key.size, &replyp->keydata.keydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->dbenv, key.data);
+ goto err;
+ }
+ memcpy(replyp->keydata.keydata_val, key.data, key.size);
+ } else
+ replyp->keydata.keydata_val = key.data;
+
+ replyp->keydata.keydata_len = key.size;
+ } else {
+err: replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ *freep = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_delim_proc */
+/*
+ * PUBLIC: void __db_re_delim_proc __P((long, u_int32_t,
+ * PUBLIC: __db_re_delim_reply *));
+ */
+void
+__db_re_delim_proc(dbpcl_id, delim, replyp)
+ long dbpcl_id;
+ u_int32_t delim;
+ __db_re_delim_reply *replyp;
+/* END __db_re_delim_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_re_delim(dbp, delim);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_len_proc */
+/*
+ * PUBLIC: void __db_re_len_proc __P((long, u_int32_t, __db_re_len_reply *));
+ */
+void
+__db_re_len_proc(dbpcl_id, len, replyp)
+ long dbpcl_id;
+ u_int32_t len;
+ __db_re_len_reply *replyp;
+/* END __db_re_len_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_re_len(dbp, len);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_pad_proc */
+/*
+ * PUBLIC: void __db_re_pad_proc __P((long, u_int32_t, __db_re_pad_reply *));
+ */
+void
+__db_re_pad_proc(dbpcl_id, pad, replyp)
+ long dbpcl_id;
+ u_int32_t pad;
+ __db_re_pad_reply *replyp;
+/* END __db_re_pad_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_re_pad(dbp, pad);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_remove_proc */
+/*
+ * PUBLIC: void __db_remove_proc __P((long, char *, char *, u_int32_t,
+ * PUBLIC: __db_remove_reply *));
+ */
+void
+__db_remove_proc(dbpcl_id, name, subdb,
+ flags, replyp)
+ long dbpcl_id;
+ char *name;
+ char *subdb;
+ u_int32_t flags;
+ __db_remove_reply *replyp;
+/* END __db_remove_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->remove(dbp, name, subdb, flags);
+ __dbdel_ctp(dbp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_rename_proc */
+/*
+ * PUBLIC: void __db_rename_proc __P((long, char *, char *, char *, u_int32_t,
+ * PUBLIC: __db_rename_reply *));
+ */
+void
+__db_rename_proc(dbpcl_id, name, subdb,
+ newname, flags, replyp)
+ long dbpcl_id;
+ char *name;
+ char *subdb;
+ char *newname;
+ u_int32_t flags;
+ __db_rename_reply *replyp;
+/* END __db_rename_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->rename(dbp, name, subdb, newname, flags);
+ __dbdel_ctp(dbp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_stat_proc */
+/*
+ * PUBLIC: void __db_stat_proc __P((long, u_int32_t, __db_stat_reply *,
+ * PUBLIC: int *));
+ */
+void
+__db_stat_proc(dbpcl_id, flags, replyp, freep)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_stat_reply *replyp;
+ int * freep;
+/* END __db_stat_proc */
+{
+ DB *dbp;
+ DBTYPE type;
+ ct_entry *dbp_ctp;
+ u_int32_t *q, *p, *retsp;
+ int i, len, ret;
+ void *sp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->stat(dbp, &sp, flags);
+ replyp->status = ret;
+ if (ret != 0)
+ return;
+ /*
+ * We get here, we have success. Allocate an array so that
+ * we can use the list generator. Generate the reply, free
+ * up the space.
+ */
+ /*
+ * XXX This assumes that all elements of all stat structures
+ * are u_int32_t fields. They are, currently.
+ */
+ (void)dbp->get_type(dbp, &type);
+ if (type == DB_HASH)
+ len = sizeof(DB_HASH_STAT);
+ else if (type == DB_QUEUE)
+ len = sizeof(DB_QUEUE_STAT);
+ else /* BTREE or RECNO are same stats */
+ len = sizeof(DB_BTREE_STAT);
+ replyp->stats.stats_len = len / sizeof(u_int32_t);
+
+ if ((ret = __os_umalloc(dbp->dbenv, len * replyp->stats.stats_len,
+ &retsp)) != 0)
+ goto out;
+ for (i = 0, q = retsp, p = sp; i < len;
+ i++, q++, p++)
+ *q = *p;
+ replyp->stats.stats_val = retsp;
+ __os_ufree(dbp->dbenv, sp);
+ if (ret == 0)
+ *freep = 1;
+out:
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_sync_proc */
+/*
+ * PUBLIC: void __db_sync_proc __P((long, u_int32_t, __db_sync_reply *));
+ */
+void
+__db_sync_proc(dbpcl_id, flags, replyp)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_sync_reply *replyp;
+/* END __db_sync_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->sync(dbp, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_truncate_proc */
+/*
+ * PUBLIC: void __db_truncate_proc __P((long, long, u_int32_t,
+ * PUBLIC: __db_truncate_reply *));
+ */
+void
+__db_truncate_proc(dbpcl_id, txnpcl_id,
+ flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t flags;
+ __db_truncate_reply *replyp;
+/* END __db_truncate_proc */
+{
+ DB *dbp;
+ DB_TXN *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ u_int32_t count;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ ret = dbp->truncate(dbp, txnp, &count, flags);
+ replyp->status = ret;
+ if (ret == 0)
+ replyp->count = count;
+ return;
+}
+
+/* BEGIN __db_cursor_proc */
+/*
+ * PUBLIC: void __db_cursor_proc __P((long, long, u_int32_t,
+ * PUBLIC: __db_cursor_reply *));
+ */
+void
+__db_cursor_proc(dbpcl_id, txnpcl_id,
+ flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t flags;
+ __db_cursor_reply *replyp;
+/* END __db_cursor_proc */
+{
+ DB *dbp;
+ DBC *dbc;
+ DB_TXN *txnp;
+ ct_entry *dbc_ctp, *env_ctp, *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ dbc_ctp = new_ct_ent(&replyp->status);
+ if (dbc_ctp == NULL)
+ return;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ dbc_ctp->ct_activep = txnp_ctp->ct_activep;
+ } else
+ txnp = NULL;
+
+ if ((ret = dbp->cursor(dbp, txnp, &dbc, flags)) == 0) {
+ dbc_ctp->ct_dbc = dbc;
+ dbc_ctp->ct_type = CT_CURSOR;
+ dbc_ctp->ct_parent = dbp_ctp;
+ env_ctp = dbp_ctp->ct_envparent;
+ dbc_ctp->ct_envparent = env_ctp;
+ __dbsrv_settimeout(dbc_ctp, env_ctp->ct_timeout);
+ __dbsrv_active(dbc_ctp);
+ replyp->dbcidcl_id = dbc_ctp->ct_id;
+ } else
+ __dbclear_ctp(dbc_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_join_proc */
+/*
+ * PUBLIC: void __db_join_proc __P((long, u_int32_t *, u_int32_t, u_int32_t,
+ * PUBLIC: __db_join_reply *));
+ */
+void
+__db_join_proc(dbpcl_id, curs, curslen,
+ flags, replyp)
+ long dbpcl_id;
+ u_int32_t * curs;
+ u_int32_t curslen;
+ u_int32_t flags;
+ __db_join_reply *replyp;
+/* END __db_join_proc */
+{
+ DB *dbp;
+ DBC **jcurs, **c;
+ DBC *dbc;
+ ct_entry *dbc_ctp, *ctp, *dbp_ctp;
+ size_t size;
+ u_int32_t *cl, i;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ dbc_ctp = new_ct_ent(&replyp->status);
+ if (dbc_ctp == NULL)
+ return;
+
+ size = (curslen + 1) * sizeof(DBC *);
+ if ((ret = __os_calloc(dbp->dbenv,
+ curslen + 1, sizeof(DBC *), &jcurs)) != 0) {
+ replyp->status = ret;
+ __dbclear_ctp(dbc_ctp);
+ return;
+ }
+ /*
+ * If our curslist has a parent txn, we need to use it too
+ * for the activity timeout. All cursors must be part of
+ * the same transaction, so just check the first.
+ */
+ ctp = get_tableent(*curs);
+ DB_ASSERT(ctp->ct_type == CT_CURSOR);
+ /*
+ * If we are using a transaction, set the join activity timer
+ * to point to the parent transaction.
+ */
+ if (ctp->ct_activep != &ctp->ct_active)
+ dbc_ctp->ct_activep = ctp->ct_activep;
+ for (i = 0, cl = curs, c = jcurs; i < curslen; i++, cl++, c++) {
+ ctp = get_tableent(*cl);
+ if (ctp == NULL) {
+ replyp->status = DB_NOSERVER_ID;
+ goto out;
+ }
+ /*
+ * If we are using a txn, the join cursor points to the
+ * transaction timeout. If we are not using a transaction,
+ * then all the curslist cursors must point to the join
+ * cursor's timeout so that we do not timeout any of the
+ * curlist cursors while the join cursor is active.
+ * Change the type of the curslist ctps to CT_JOIN so that
+ * we know they are part of a join list and we can distinguish
+ * them and later restore them when the join cursor is closed.
+ */
+ DB_ASSERT(ctp->ct_type == CT_CURSOR);
+ ctp->ct_type |= CT_JOIN;
+ ctp->ct_origp = ctp->ct_activep;
+ /*
+ * Setting this to the ct_active field of the dbc_ctp is
+ * really just a way to distinguish which join dbc this
+ * cursor is part of. The ct_activep of this cursor is
+ * not used at all during its lifetime as part of a join
+ * cursor.
+ */
+ ctp->ct_activep = &dbc_ctp->ct_active;
+ *c = ctp->ct_dbc;
+ }
+ *c = NULL;
+ if ((ret = dbp->join(dbp, jcurs, &dbc, flags)) == 0) {
+ dbc_ctp->ct_dbc = dbc;
+ dbc_ctp->ct_type = (CT_JOINCUR | CT_CURSOR);
+ dbc_ctp->ct_parent = dbp_ctp;
+ dbc_ctp->ct_envparent = dbp_ctp->ct_envparent;
+ __dbsrv_settimeout(dbc_ctp, dbp_ctp->ct_envparent->ct_timeout);
+ __dbsrv_active(dbc_ctp);
+ replyp->dbcidcl_id = dbc_ctp->ct_id;
+ } else {
+ __dbclear_ctp(dbc_ctp);
+ /*
+ * If we get an error, undo what we did above to any cursors.
+ */
+ for (cl = curs; *cl != 0; cl++) {
+ ctp = get_tableent(*cl);
+ ctp->ct_type = CT_CURSOR;
+ ctp->ct_activep = ctp->ct_origp;
+ }
+ }
+
+ replyp->status = ret;
+out:
+ __os_free(dbp->dbenv, jcurs);
+ return;
+}
+
+/* BEGIN __dbc_close_proc */
+/*
+ * PUBLIC: void __dbc_close_proc __P((long, __dbc_close_reply *));
+ */
+void
+__dbc_close_proc(dbccl_id, replyp)
+ long dbccl_id;
+ __dbc_close_reply *replyp;
+/* END __dbc_close_proc */
+{
+ ct_entry *dbc_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ replyp->status = __dbc_close_int(dbc_ctp);
+ return;
+}
+
+/* BEGIN __dbc_count_proc */
+/*
+ * PUBLIC: void __dbc_count_proc __P((long, u_int32_t, __dbc_count_reply *));
+ */
+void
+__dbc_count_proc(dbccl_id, flags, replyp)
+ long dbccl_id;
+ u_int32_t flags;
+ __dbc_count_reply *replyp;
+/* END __dbc_count_proc */
+{
+ DBC *dbc;
+ ct_entry *dbc_ctp;
+ db_recno_t num;
+ int ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ ret = dbc->c_count(dbc, &num, flags);
+ replyp->status = ret;
+ if (ret == 0)
+ replyp->dupcount = num;
+ return;
+}
+
+/* BEGIN __dbc_del_proc */
+/*
+ * PUBLIC: void __dbc_del_proc __P((long, u_int32_t, __dbc_del_reply *));
+ */
+void
+__dbc_del_proc(dbccl_id, flags, replyp)
+ long dbccl_id;
+ u_int32_t flags;
+ __dbc_del_reply *replyp;
+/* END __dbc_del_proc */
+{
+ DBC *dbc;
+ ct_entry *dbc_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ ret = dbc->c_del(dbc, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_dup_proc */
+/*
+ * PUBLIC: void __dbc_dup_proc __P((long, u_int32_t, __dbc_dup_reply *));
+ */
+void
+__dbc_dup_proc(dbccl_id, flags, replyp)
+ long dbccl_id;
+ u_int32_t flags;
+ __dbc_dup_reply *replyp;
+/* END __dbc_dup_proc */
+{
+ DBC *dbc, *newdbc;
+ ct_entry *dbc_ctp, *new_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ new_ctp = new_ct_ent(&replyp->status);
+ if (new_ctp == NULL)
+ return;
+
+ if ((ret = dbc->c_dup(dbc, &newdbc, flags)) == 0) {
+ new_ctp->ct_dbc = newdbc;
+ new_ctp->ct_type = CT_CURSOR;
+ new_ctp->ct_parent = dbc_ctp->ct_parent;
+ new_ctp->ct_envparent = dbc_ctp->ct_envparent;
+ /*
+ * If our cursor has a parent txn, we need to use it too.
+ */
+ if (dbc_ctp->ct_activep != &dbc_ctp->ct_active)
+ new_ctp->ct_activep = dbc_ctp->ct_activep;
+ __dbsrv_settimeout(new_ctp, dbc_ctp->ct_timeout);
+ __dbsrv_active(new_ctp);
+ replyp->dbcidcl_id = new_ctp->ct_id;
+ } else
+ __dbclear_ctp(new_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_get_proc */
+/*
+ * PUBLIC: void __dbc_get_proc __P((long, u_int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
+ * PUBLIC: u_int32_t, u_int32_t, __dbc_get_reply *, int *));
+ */
+void
+__dbc_get_proc(dbccl_id, keydlen, keydoff,
+ keyulen, keyflags, keydata, keysize,
+ datadlen, datadoff, dataulen, dataflags,
+ datadata, datasize, flags, replyp, freep)
+ long dbccl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __dbc_get_reply *replyp;
+ int * freep;
+/* END __dbc_get_proc */
+{
+ DBC *dbc;
+ DBT key, data;
+ DB_ENV *dbenv;
+ ct_entry *dbc_ctp;
+ int key_alloc, bulk_alloc, ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+ dbenv = dbc->dbp->dbenv;
+
+ *freep = 0;
+ bulk_alloc = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Set up key and data DBT */
+ key.dlen = keydlen;
+ key.ulen = keyulen;
+ key.doff = keydoff;
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.flags = DB_DBT_MALLOC;
+ if (keyflags & DB_DBT_PARTIAL)
+ key.flags |= DB_DBT_PARTIAL;
+ key.size = keysize;
+ key.data = keydata;
+
+ data.dlen = datadlen;
+ data.ulen = dataulen;
+ data.doff = datadoff;
+ data.size = datasize;
+ data.data = datadata;
+ if (flags & DB_MULTIPLE || flags & DB_MULTIPLE_KEY) {
+ if (data.data == 0) {
+ ret = __os_umalloc(dbenv, data.ulen, &data.data);
+ if (ret != 0)
+ goto err;
+ bulk_alloc = 1;
+ }
+ data.flags |= DB_DBT_USERMEM;
+ } else
+ data.flags |= DB_DBT_MALLOC;
+ if (dataflags & DB_DBT_PARTIAL)
+ data.flags |= DB_DBT_PARTIAL;
+
+ /* Got all our stuff, now do the get */
+ ret = dbc->c_get(dbc, &key, &data, flags);
+
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (key.data == keydata) {
+ ret = __os_umalloc(dbenv, key.size,
+ &replyp->keydata.keydata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv, key.data);
+ __os_ufree(dbenv, data.data);
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->keydata.keydata_val, key.data, key.size);
+ } else
+ replyp->keydata.keydata_val = key.data;
+
+ replyp->keydata.keydata_len = key.size;
+
+ /*
+ * Data
+ */
+ if (data.data == datadata) {
+ ret = __os_umalloc(dbenv, data.size,
+ &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv, key.data);
+ __os_ufree(dbenv, data.data);
+ if (key_alloc)
+ __os_ufree(dbenv, replyp->keydata.keydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.data,
+ data.size);
+ } else
+ replyp->datadata.datadata_val = data.data;
+ replyp->datadata.datadata_len = data.size;
+ } else {
+err: replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ if (bulk_alloc)
+ __os_ufree(dbenv, data.data);
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_pget_proc */
+/*
+ * PUBLIC: void __dbc_pget_proc __P((long, u_int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
+ * PUBLIC: u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t,
+ * PUBLIC: u_int32_t, __dbc_pget_reply *, int *));
+ */
+void
+__dbc_pget_proc(dbccl_id, skeydlen, skeydoff,
+ skeyulen, skeyflags, skeydata, skeysize,
+ pkeydlen, pkeydoff, pkeyulen, pkeyflags,
+ pkeydata, pkeysize, datadlen, datadoff,
+ dataulen, dataflags, datadata, datasize,
+ flags, replyp, freep)
+ long dbccl_id;
+ u_int32_t skeydlen;
+ u_int32_t skeydoff;
+ u_int32_t skeyulen;
+ u_int32_t skeyflags;
+ void *skeydata;
+ u_int32_t skeysize;
+ u_int32_t pkeydlen;
+ u_int32_t pkeydoff;
+ u_int32_t pkeyulen;
+ u_int32_t pkeyflags;
+ void *pkeydata;
+ u_int32_t pkeysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __dbc_pget_reply *replyp;
+ int * freep;
+/* END __dbc_pget_proc */
+{
+ DBC *dbc;
+ DBT skey, pkey, data;
+ DB_ENV *dbenv;
+ ct_entry *dbc_ctp;
+ int key_alloc, ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+ dbenv = dbc->dbp->dbenv;
+
+ *freep = 0;
+ memset(&skey, 0, sizeof(skey));
+ memset(&pkey, 0, sizeof(pkey));
+ memset(&data, 0, sizeof(data));
+
+ /*
+ * Ignore memory related flags on server.
+ */
+ /* Set up key and data DBT */
+ skey.flags = DB_DBT_MALLOC;
+ skey.dlen = skeydlen;
+ skey.ulen = skeyulen;
+ skey.doff = skeydoff;
+ if (skeyflags & DB_DBT_PARTIAL)
+ skey.flags |= DB_DBT_PARTIAL;
+ skey.size = skeysize;
+ skey.data = skeydata;
+
+ pkey.flags = DB_DBT_MALLOC;
+ pkey.dlen = pkeydlen;
+ pkey.ulen = pkeyulen;
+ pkey.doff = pkeydoff;
+ if (pkeyflags & DB_DBT_PARTIAL)
+ pkey.flags |= DB_DBT_PARTIAL;
+ pkey.size = pkeysize;
+ pkey.data = pkeydata;
+
+ data.flags = DB_DBT_MALLOC;
+ data.dlen = datadlen;
+ data.ulen = dataulen;
+ data.doff = datadoff;
+ if (dataflags & DB_DBT_PARTIAL)
+ data.flags |= DB_DBT_PARTIAL;
+ data.size = datasize;
+ data.data = datadata;
+
+ /* Got all our stuff, now do the get */
+ ret = dbc->c_pget(dbc, &skey, &pkey, &data, flags);
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (skey.data == skeydata) {
+ ret = __os_umalloc(dbenv,
+ skey.size, &replyp->skeydata.skeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv, skey.data);
+ __os_ufree(dbenv, pkey.data);
+ __os_ufree(dbenv, data.data);
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->skeydata.skeydata_val, skey.data,
+ skey.size);
+ } else
+ replyp->skeydata.skeydata_val = skey.data;
+ replyp->skeydata.skeydata_len = skey.size;
+
+ /*
+ * Primary key
+ */
+ if (pkey.data == pkeydata) {
+ ret = __os_umalloc(dbenv,
+ pkey.size, &replyp->pkeydata.pkeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv, skey.data);
+ __os_ufree(dbenv, pkey.data);
+ __os_ufree(dbenv, data.data);
+ if (key_alloc)
+ __os_ufree(dbenv,
+ replyp->skeydata.skeydata_val);
+ goto err;
+ }
+ /*
+ * We can set it to 2, because they cannot send the
+ * pkey over without sending the skey over too.
+ * So if they did send a pkey, they must have sent
+ * the skey as well.
+ */
+ key_alloc = 2;
+ memcpy(replyp->pkeydata.pkeydata_val, pkey.data,
+ pkey.size);
+ } else
+ replyp->pkeydata.pkeydata_val = pkey.data;
+ replyp->pkeydata.pkeydata_len = pkey.size;
+
+ /*
+ * Data
+ */
+ if (data.data == datadata) {
+ ret = __os_umalloc(dbenv,
+ data.size, &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv, skey.data);
+ __os_ufree(dbenv, pkey.data);
+ __os_ufree(dbenv, data.data);
+ /*
+ * If key_alloc is 1, just skey needs to be
+ * freed, if key_alloc is 2, both skey and pkey
+ * need to be freed.
+ */
+ if (key_alloc--)
+ __os_ufree(dbenv,
+ replyp->skeydata.skeydata_val);
+ if (key_alloc)
+ __os_ufree(dbenv,
+ replyp->pkeydata.pkeydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.data,
+ data.size);
+ } else
+ replyp->datadata.datadata_val = data.data;
+ replyp->datadata.datadata_len = data.size;
+ } else {
+err: replyp->skeydata.skeydata_val = NULL;
+ replyp->skeydata.skeydata_len = 0;
+ replyp->pkeydata.pkeydata_val = NULL;
+ replyp->pkeydata.pkeydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_put_proc */
+/*
+ * PUBLIC: void __dbc_put_proc __P((long, u_int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
+ * PUBLIC: u_int32_t, u_int32_t, __dbc_put_reply *, int *));
+ */
+void
+__dbc_put_proc(dbccl_id, keydlen, keydoff,
+ keyulen, keyflags, keydata, keysize,
+ datadlen, datadoff, dataulen, dataflags,
+ datadata, datasize, flags, replyp, freep)
+ long dbccl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __dbc_put_reply *replyp;
+ int * freep;
+/* END __dbc_put_proc */
+{
+ DB *dbp;
+ DBC *dbc;
+ DBT key, data;
+ ct_entry *dbc_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+ dbp = (DB *)dbc_ctp->ct_parent->ct_anyp;
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Set up key and data DBT */
+ key.dlen = keydlen;
+ key.ulen = keyulen;
+ key.doff = keydoff;
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.flags = 0;
+ if (keyflags & DB_DBT_PARTIAL)
+ key.flags |= DB_DBT_PARTIAL;
+ key.size = keysize;
+ key.data = keydata;
+
+ data.dlen = datadlen;
+ data.ulen = dataulen;
+ data.doff = datadoff;
+ data.flags = dataflags;
+ data.size = datasize;
+ data.data = datadata;
+
+ /* Got all our stuff, now do the put */
+ ret = dbc->c_put(dbc, &key, &data, flags);
+
+ *freep = 0;
+ if (ret == 0 && (flags == DB_AFTER || flags == DB_BEFORE) &&
+ dbp->type == DB_RECNO) {
+ /*
+ * We need to xdr_free whatever we are returning, next time.
+ */
+ replyp->keydata.keydata_val = key.data;
+ replyp->keydata.keydata_len = key.size;
+ } else {
+ replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+#endif /* HAVE_RPC */
diff --git a/libdb/rpc_server/c/db_server_proc.sed b/libdb/rpc_server/c/db_server_proc.sed
new file mode 100644
index 0000000..e11b2c3
--- /dev/null
+++ b/libdb/rpc_server/c/db_server_proc.sed
@@ -0,0 +1,772 @@
+/^\/\* BEGIN __env_cachesize_proc/,/^\/\* END __env_cachesize_proc/c\
+/* BEGIN __env_cachesize_proc */\
+/*\
+\ * PUBLIC: void __env_cachesize_proc __P((long, u_int32_t, u_int32_t,\
+\ * PUBLIC: u_int32_t, __env_cachesize_reply *));\
+\ */\
+void\
+__env_cachesize_proc(dbenvcl_id, gbytes, bytes,\
+\ \ ncache, replyp)\
+\ long dbenvcl_id;\
+\ u_int32_t gbytes;\
+\ u_int32_t bytes;\
+\ u_int32_t ncache;\
+\ __env_cachesize_reply *replyp;\
+/* END __env_cachesize_proc */
+/^\/\* BEGIN __env_close_proc/,/^\/\* END __env_close_proc/c\
+/* BEGIN __env_close_proc */\
+/*\
+\ * PUBLIC: void __env_close_proc __P((long, u_int32_t, __env_close_reply *));\
+\ */\
+void\
+__env_close_proc(dbenvcl_id, flags, replyp)\
+\ long dbenvcl_id;\
+\ u_int32_t flags;\
+\ __env_close_reply *replyp;\
+/* END __env_close_proc */
+/^\/\* BEGIN __env_create_proc/,/^\/\* END __env_create_proc/c\
+/* BEGIN __env_create_proc */\
+/*\
+\ * PUBLIC: void __env_create_proc __P((u_int32_t, __env_create_reply *));\
+\ */\
+void\
+__env_create_proc(timeout, replyp)\
+\ u_int32_t timeout;\
+\ __env_create_reply *replyp;\
+/* END __env_create_proc */
+/^\/\* BEGIN __env_dbremove_proc/,/^\/\* END __env_dbremove_proc/c\
+/* BEGIN __env_dbremove_proc */\
+/*\
+\ * PUBLIC: void __env_dbremove_proc __P((long, long, char *, char *, u_int32_t,\
+\ * PUBLIC: __env_dbremove_reply *));\
+\ */\
+void\
+__env_dbremove_proc(dbenvcl_id, txnpcl_id, name,\
+\ \ subdb, flags, replyp)\
+\ long dbenvcl_id;\
+\ long txnpcl_id;\
+\ char *name;\
+\ char *subdb;\
+\ u_int32_t flags;\
+\ __env_dbremove_reply *replyp;\
+/* END __env_dbremove_proc */
+/^\/\* BEGIN __env_dbrename_proc/,/^\/\* END __env_dbrename_proc/c\
+/* BEGIN __env_dbrename_proc */\
+/*\
+\ * PUBLIC: void __env_dbrename_proc __P((long, long, char *, char *, char *,\
+\ * PUBLIC: u_int32_t, __env_dbrename_reply *));\
+\ */\
+void\
+__env_dbrename_proc(dbenvcl_id, txnpcl_id, name,\
+\ \ subdb, newname, flags, replyp)\
+\ long dbenvcl_id;\
+\ long txnpcl_id;\
+\ char *name;\
+\ char *subdb;\
+\ char *newname;\
+\ u_int32_t flags;\
+\ __env_dbrename_reply *replyp;\
+/* END __env_dbrename_proc */
+/^\/\* BEGIN __env_encrypt_proc/,/^\/\* END __env_encrypt_proc/c\
+/* BEGIN __env_encrypt_proc */\
+/*\
+\ * PUBLIC: void __env_encrypt_proc __P((long, char *, u_int32_t,\
+\ * PUBLIC: __env_encrypt_reply *));\
+\ */\
+void\
+__env_encrypt_proc(dbenvcl_id, passwd, flags, replyp)\
+\ long dbenvcl_id;\
+\ char *passwd;\
+\ u_int32_t flags;\
+\ __env_encrypt_reply *replyp;\
+/* END __env_encrypt_proc */
+/^\/\* BEGIN __env_flags_proc/,/^\/\* END __env_flags_proc/c\
+/* BEGIN __env_flags_proc */\
+/*\
+\ * PUBLIC: void __env_flags_proc __P((long, u_int32_t, u_int32_t,\
+\ * PUBLIC: __env_flags_reply *));\
+\ */\
+void\
+__env_flags_proc(dbenvcl_id, flags, onoff, replyp)\
+\ long dbenvcl_id;\
+\ u_int32_t flags;\
+\ u_int32_t onoff;\
+\ __env_flags_reply *replyp;\
+/* END __env_flags_proc */
+/^\/\* BEGIN __env_open_proc/,/^\/\* END __env_open_proc/c\
+/* BEGIN __env_open_proc */\
+/*\
+\ * PUBLIC: void __env_open_proc __P((long, char *, u_int32_t, u_int32_t,\
+\ * PUBLIC: __env_open_reply *));\
+\ */\
+void\
+__env_open_proc(dbenvcl_id, home, flags,\
+\ \ mode, replyp)\
+\ long dbenvcl_id;\
+\ char *home;\
+\ u_int32_t flags;\
+\ u_int32_t mode;\
+\ __env_open_reply *replyp;\
+/* END __env_open_proc */
+/^\/\* BEGIN __env_remove_proc/,/^\/\* END __env_remove_proc/c\
+/* BEGIN __env_remove_proc */\
+/*\
+\ * PUBLIC: void __env_remove_proc __P((long, char *, u_int32_t,\
+\ * PUBLIC: __env_remove_reply *));\
+\ */\
+void\
+__env_remove_proc(dbenvcl_id, home, flags, replyp)\
+\ long dbenvcl_id;\
+\ char *home;\
+\ u_int32_t flags;\
+\ __env_remove_reply *replyp;\
+/* END __env_remove_proc */
+/^\/\* BEGIN __txn_abort_proc/,/^\/\* END __txn_abort_proc/c\
+/* BEGIN __txn_abort_proc */\
+/*\
+\ * PUBLIC: void __txn_abort_proc __P((long, __txn_abort_reply *));\
+\ */\
+void\
+__txn_abort_proc(txnpcl_id, replyp)\
+\ long txnpcl_id;\
+\ __txn_abort_reply *replyp;\
+/* END __txn_abort_proc */
+/^\/\* BEGIN __txn_begin_proc/,/^\/\* END __txn_begin_proc/c\
+/* BEGIN __txn_begin_proc */\
+/*\
+\ * PUBLIC: void __txn_begin_proc __P((long, long, u_int32_t,\
+\ * PUBLIC: __txn_begin_reply *));\
+\ */\
+void\
+__txn_begin_proc(dbenvcl_id, parentcl_id,\
+\ \ flags, replyp)\
+\ long dbenvcl_id;\
+\ long parentcl_id;\
+\ u_int32_t flags;\
+\ __txn_begin_reply *replyp;\
+/* END __txn_begin_proc */
+/^\/\* BEGIN __txn_commit_proc/,/^\/\* END __txn_commit_proc/c\
+/* BEGIN __txn_commit_proc */\
+/*\
+\ * PUBLIC: void __txn_commit_proc __P((long, u_int32_t,\
+\ * PUBLIC: __txn_commit_reply *));\
+\ */\
+void\
+__txn_commit_proc(txnpcl_id, flags, replyp)\
+\ long txnpcl_id;\
+\ u_int32_t flags;\
+\ __txn_commit_reply *replyp;\
+/* END __txn_commit_proc */
+/^\/\* BEGIN __txn_discard_proc/,/^\/\* END __txn_discard_proc/c\
+/* BEGIN __txn_discard_proc */\
+/*\
+\ * PUBLIC: void __txn_discard_proc __P((long, u_int32_t,\
+\ * PUBLIC: __txn_discard_reply *));\
+\ */\
+void\
+__txn_discard_proc(txnpcl_id, flags, replyp)\
+\ long txnpcl_id;\
+\ u_int32_t flags;\
+\ __txn_discard_reply *replyp;\
+/* END __txn_discard_proc */
+/^\/\* BEGIN __txn_prepare_proc/,/^\/\* END __txn_prepare_proc/c\
+/* BEGIN __txn_prepare_proc */\
+/*\
+\ * PUBLIC: void __txn_prepare_proc __P((long, u_int8_t *,\
+\ * PUBLIC: __txn_prepare_reply *));\
+\ */\
+void\
+__txn_prepare_proc(txnpcl_id, gid, replyp)\
+\ long txnpcl_id;\
+\ u_int8_t *gid;\
+\ __txn_prepare_reply *replyp;\
+/* END __txn_prepare_proc */
+/^\/\* BEGIN __txn_recover_proc/,/^\/\* END __txn_recover_proc/c\
+/* BEGIN __txn_recover_proc */\
+/*\
+\ * PUBLIC: void __txn_recover_proc __P((long, u_int32_t, u_int32_t,\
+\ * PUBLIC: __txn_recover_reply *, int *));\
+\ */\
+void\
+__txn_recover_proc(dbenvcl_id, count,\
+\ \ flags, replyp, freep)\
+\ long dbenvcl_id;\
+\ u_int32_t count;\
+\ u_int32_t flags;\
+\ __txn_recover_reply *replyp;\
+\ int * freep;\
+/* END __txn_recover_proc */
+/^\/\* BEGIN __db_associate_proc/,/^\/\* END __db_associate_proc/c\
+/* BEGIN __db_associate_proc */\
+/*\
+\ * PUBLIC: void __db_associate_proc __P((long, long, long, u_int32_t,\
+\ * PUBLIC: __db_associate_reply *));\
+\ */\
+void\
+__db_associate_proc(dbpcl_id, txnpcl_id, sdbpcl_id,\
+\ \ flags, replyp)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ long sdbpcl_id;\
+\ u_int32_t flags;\
+\ __db_associate_reply *replyp;\
+/* END __db_associate_proc */
+/^\/\* BEGIN __db_bt_maxkey_proc/,/^\/\* END __db_bt_maxkey_proc/c\
+/* BEGIN __db_bt_maxkey_proc */\
+/*\
+\ * PUBLIC: void __db_bt_maxkey_proc __P((long, u_int32_t,\
+\ * PUBLIC: __db_bt_maxkey_reply *));\
+\ */\
+void\
+__db_bt_maxkey_proc(dbpcl_id, maxkey, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t maxkey;\
+\ __db_bt_maxkey_reply *replyp;\
+/* END __db_bt_maxkey_proc */
+/^\/\* BEGIN __db_bt_minkey_proc/,/^\/\* END __db_bt_minkey_proc/c\
+/* BEGIN __db_bt_minkey_proc */\
+/*\
+\ * PUBLIC: void __db_bt_minkey_proc __P((long, u_int32_t,\
+\ * PUBLIC: __db_bt_minkey_reply *));\
+\ */\
+void\
+__db_bt_minkey_proc(dbpcl_id, minkey, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t minkey;\
+\ __db_bt_minkey_reply *replyp;\
+/* END __db_bt_minkey_proc */
+/^\/\* BEGIN __db_close_proc/,/^\/\* END __db_close_proc/c\
+/* BEGIN __db_close_proc */\
+/*\
+\ * PUBLIC: void __db_close_proc __P((long, u_int32_t, __db_close_reply *));\
+\ */\
+void\
+__db_close_proc(dbpcl_id, flags, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t flags;\
+\ __db_close_reply *replyp;\
+/* END __db_close_proc */
+/^\/\* BEGIN __db_create_proc/,/^\/\* END __db_create_proc/c\
+/* BEGIN __db_create_proc */\
+/*\
+\ * PUBLIC: void __db_create_proc __P((long, u_int32_t, __db_create_reply *));\
+\ */\
+void\
+__db_create_proc(dbenvcl_id, flags, replyp)\
+\ long dbenvcl_id;\
+\ u_int32_t flags;\
+\ __db_create_reply *replyp;\
+/* END __db_create_proc */
+/^\/\* BEGIN __db_del_proc/,/^\/\* END __db_del_proc/c\
+/* BEGIN __db_del_proc */\
+/*\
+\ * PUBLIC: void __db_del_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t,\
+\ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, __db_del_reply *));\
+\ */\
+void\
+__db_del_proc(dbpcl_id, txnpcl_id, keydlen,\
+\ \ keydoff, keyulen, keyflags, keydata,\
+\ \ keysize, flags, replyp)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ u_int32_t keydlen;\
+\ u_int32_t keydoff;\
+\ u_int32_t keyulen;\
+\ u_int32_t keyflags;\
+\ void *keydata;\
+\ u_int32_t keysize;\
+\ u_int32_t flags;\
+\ __db_del_reply *replyp;\
+/* END __db_del_proc */
+/^\/\* BEGIN __db_encrypt_proc/,/^\/\* END __db_encrypt_proc/c\
+/* BEGIN __db_encrypt_proc */\
+/*\
+\ * PUBLIC: void __db_encrypt_proc __P((long, char *, u_int32_t,\
+\ * PUBLIC: __db_encrypt_reply *));\
+\ */\
+void\
+__db_encrypt_proc(dbpcl_id, passwd, flags, replyp)\
+\ long dbpcl_id;\
+\ char *passwd;\
+\ u_int32_t flags;\
+\ __db_encrypt_reply *replyp;\
+/* END __db_encrypt_proc */
+/^\/\* BEGIN __db_extentsize_proc/,/^\/\* END __db_extentsize_proc/c\
+/* BEGIN __db_extentsize_proc */\
+/*\
+\ * PUBLIC: void __db_extentsize_proc __P((long, u_int32_t,\
+\ * PUBLIC: __db_extentsize_reply *));\
+\ */\
+void\
+__db_extentsize_proc(dbpcl_id, extentsize, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t extentsize;\
+\ __db_extentsize_reply *replyp;\
+/* END __db_extentsize_proc */
+/^\/\* BEGIN __db_flags_proc/,/^\/\* END __db_flags_proc/c\
+/* BEGIN __db_flags_proc */\
+/*\
+\ * PUBLIC: void __db_flags_proc __P((long, u_int32_t, __db_flags_reply *));\
+\ */\
+void\
+__db_flags_proc(dbpcl_id, flags, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t flags;\
+\ __db_flags_reply *replyp;\
+/* END __db_flags_proc */
+/^\/\* BEGIN __db_get_proc/,/^\/\* END __db_get_proc/c\
+/* BEGIN __db_get_proc */\
+/*\
+\ * PUBLIC: void __db_get_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t,\
+\ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,\
+\ * PUBLIC: u_int32_t, u_int32_t, __db_get_reply *, int *));\
+\ */\
+void\
+__db_get_proc(dbpcl_id, txnpcl_id, keydlen,\
+\ \ keydoff, keyulen, keyflags, keydata,\
+\ \ keysize, datadlen, datadoff, dataulen,\
+\ \ dataflags, datadata, datasize, flags, replyp, freep)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ u_int32_t keydlen;\
+\ u_int32_t keydoff;\
+\ u_int32_t keyulen;\
+\ u_int32_t keyflags;\
+\ void *keydata;\
+\ u_int32_t keysize;\
+\ u_int32_t datadlen;\
+\ u_int32_t datadoff;\
+\ u_int32_t dataulen;\
+\ u_int32_t dataflags;\
+\ void *datadata;\
+\ u_int32_t datasize;\
+\ u_int32_t flags;\
+\ __db_get_reply *replyp;\
+\ int * freep;\
+/* END __db_get_proc */
+/^\/\* BEGIN __db_h_ffactor_proc/,/^\/\* END __db_h_ffactor_proc/c\
+/* BEGIN __db_h_ffactor_proc */\
+/*\
+\ * PUBLIC: void __db_h_ffactor_proc __P((long, u_int32_t,\
+\ * PUBLIC: __db_h_ffactor_reply *));\
+\ */\
+void\
+__db_h_ffactor_proc(dbpcl_id, ffactor, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t ffactor;\
+\ __db_h_ffactor_reply *replyp;\
+/* END __db_h_ffactor_proc */
+/^\/\* BEGIN __db_h_nelem_proc/,/^\/\* END __db_h_nelem_proc/c\
+/* BEGIN __db_h_nelem_proc */\
+/*\
+\ * PUBLIC: void __db_h_nelem_proc __P((long, u_int32_t,\
+\ * PUBLIC: __db_h_nelem_reply *));\
+\ */\
+void\
+__db_h_nelem_proc(dbpcl_id, nelem, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t nelem;\
+\ __db_h_nelem_reply *replyp;\
+/* END __db_h_nelem_proc */
+/^\/\* BEGIN __db_key_range_proc/,/^\/\* END __db_key_range_proc/c\
+/* BEGIN __db_key_range_proc */\
+/*\
+\ * PUBLIC: void __db_key_range_proc __P((long, long, u_int32_t, u_int32_t,\
+\ * PUBLIC: u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __db_key_range_reply *));\
+\ */\
+void\
+__db_key_range_proc(dbpcl_id, txnpcl_id, keydlen,\
+\ \ keydoff, keyulen, keyflags, keydata,\
+\ \ keysize, flags, replyp)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ u_int32_t keydlen;\
+\ u_int32_t keydoff;\
+\ u_int32_t keyulen;\
+\ u_int32_t keyflags;\
+\ void *keydata;\
+\ u_int32_t keysize;\
+\ u_int32_t flags;\
+\ __db_key_range_reply *replyp;\
+/* END __db_key_range_proc */
+/^\/\* BEGIN __db_lorder_proc/,/^\/\* END __db_lorder_proc/c\
+/* BEGIN __db_lorder_proc */\
+/*\
+\ * PUBLIC: void __db_lorder_proc __P((long, u_int32_t, __db_lorder_reply *));\
+\ */\
+void\
+__db_lorder_proc(dbpcl_id, lorder, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t lorder;\
+\ __db_lorder_reply *replyp;\
+/* END __db_lorder_proc */
+/^\/\* BEGIN __db_open_proc/,/^\/\* END __db_open_proc/c\
+/* BEGIN __db_open_proc */\
+/*\
+\ * PUBLIC: void __db_open_proc __P((long, long, char *, char *, u_int32_t,\
+\ * PUBLIC: u_int32_t, u_int32_t, __db_open_reply *));\
+\ */\
+void\
+__db_open_proc(dbpcl_id, txnpcl_id, name,\
+\ \ subdb, type, flags, mode, replyp)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ char *name;\
+\ char *subdb;\
+\ u_int32_t type;\
+\ u_int32_t flags;\
+\ u_int32_t mode;\
+\ __db_open_reply *replyp;\
+/* END __db_open_proc */
+/^\/\* BEGIN __db_pagesize_proc/,/^\/\* END __db_pagesize_proc/c\
+/* BEGIN __db_pagesize_proc */\
+/*\
+\ * PUBLIC: void __db_pagesize_proc __P((long, u_int32_t,\
+\ * PUBLIC: __db_pagesize_reply *));\
+\ */\
+void\
+__db_pagesize_proc(dbpcl_id, pagesize, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t pagesize;\
+\ __db_pagesize_reply *replyp;\
+/* END __db_pagesize_proc */
+/^\/\* BEGIN __db_pget_proc/,/^\/\* END __db_pget_proc/c\
+/* BEGIN __db_pget_proc */\
+/*\
+\ * PUBLIC: void __db_pget_proc __P((long, long, u_int32_t, u_int32_t,\
+\ * PUBLIC: u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t,\
+\ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,\
+\ * PUBLIC: u_int32_t, u_int32_t, __db_pget_reply *, int *));\
+\ */\
+void\
+__db_pget_proc(dbpcl_id, txnpcl_id, skeydlen,\
+\ \ skeydoff, skeyulen, skeyflags, skeydata,\
+\ \ skeysize, pkeydlen, pkeydoff, pkeyulen,\
+\ \ pkeyflags, pkeydata, pkeysize, datadlen,\
+\ \ datadoff, dataulen, dataflags, datadata,\
+\ \ datasize, flags, replyp, freep)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ u_int32_t skeydlen;\
+\ u_int32_t skeydoff;\
+\ u_int32_t skeyulen;\
+\ u_int32_t skeyflags;\
+\ void *skeydata;\
+\ u_int32_t skeysize;\
+\ u_int32_t pkeydlen;\
+\ u_int32_t pkeydoff;\
+\ u_int32_t pkeyulen;\
+\ u_int32_t pkeyflags;\
+\ void *pkeydata;\
+\ u_int32_t pkeysize;\
+\ u_int32_t datadlen;\
+\ u_int32_t datadoff;\
+\ u_int32_t dataulen;\
+\ u_int32_t dataflags;\
+\ void *datadata;\
+\ u_int32_t datasize;\
+\ u_int32_t flags;\
+\ __db_pget_reply *replyp;\
+\ int * freep;\
+/* END __db_pget_proc */
+/^\/\* BEGIN __db_put_proc/,/^\/\* END __db_put_proc/c\
+/* BEGIN __db_put_proc */\
+/*\
+\ * PUBLIC: void __db_put_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t,\
+\ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,\
+\ * PUBLIC: u_int32_t, u_int32_t, __db_put_reply *, int *));\
+\ */\
+void\
+__db_put_proc(dbpcl_id, txnpcl_id, keydlen,\
+\ \ keydoff, keyulen, keyflags, keydata,\
+\ \ keysize, datadlen, datadoff, dataulen,\
+\ \ dataflags, datadata, datasize, flags, replyp, freep)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ u_int32_t keydlen;\
+\ u_int32_t keydoff;\
+\ u_int32_t keyulen;\
+\ u_int32_t keyflags;\
+\ void *keydata;\
+\ u_int32_t keysize;\
+\ u_int32_t datadlen;\
+\ u_int32_t datadoff;\
+\ u_int32_t dataulen;\
+\ u_int32_t dataflags;\
+\ void *datadata;\
+\ u_int32_t datasize;\
+\ u_int32_t flags;\
+\ __db_put_reply *replyp;\
+\ int * freep;\
+/* END __db_put_proc */
+/^\/\* BEGIN __db_re_delim_proc/,/^\/\* END __db_re_delim_proc/c\
+/* BEGIN __db_re_delim_proc */\
+/*\
+\ * PUBLIC: void __db_re_delim_proc __P((long, u_int32_t,\
+\ * PUBLIC: __db_re_delim_reply *));\
+\ */\
+void\
+__db_re_delim_proc(dbpcl_id, delim, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t delim;\
+\ __db_re_delim_reply *replyp;\
+/* END __db_re_delim_proc */
+/^\/\* BEGIN __db_re_len_proc/,/^\/\* END __db_re_len_proc/c\
+/* BEGIN __db_re_len_proc */\
+/*\
+\ * PUBLIC: void __db_re_len_proc __P((long, u_int32_t, __db_re_len_reply *));\
+\ */\
+void\
+__db_re_len_proc(dbpcl_id, len, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t len;\
+\ __db_re_len_reply *replyp;\
+/* END __db_re_len_proc */
+/^\/\* BEGIN __db_re_pad_proc/,/^\/\* END __db_re_pad_proc/c\
+/* BEGIN __db_re_pad_proc */\
+/*\
+\ * PUBLIC: void __db_re_pad_proc __P((long, u_int32_t, __db_re_pad_reply *));\
+\ */\
+void\
+__db_re_pad_proc(dbpcl_id, pad, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t pad;\
+\ __db_re_pad_reply *replyp;\
+/* END __db_re_pad_proc */
+/^\/\* BEGIN __db_remove_proc/,/^\/\* END __db_remove_proc/c\
+/* BEGIN __db_remove_proc */\
+/*\
+\ * PUBLIC: void __db_remove_proc __P((long, char *, char *, u_int32_t,\
+\ * PUBLIC: __db_remove_reply *));\
+\ */\
+void\
+__db_remove_proc(dbpcl_id, name, subdb,\
+\ \ flags, replyp)\
+\ long dbpcl_id;\
+\ char *name;\
+\ char *subdb;\
+\ u_int32_t flags;\
+\ __db_remove_reply *replyp;\
+/* END __db_remove_proc */
+/^\/\* BEGIN __db_rename_proc/,/^\/\* END __db_rename_proc/c\
+/* BEGIN __db_rename_proc */\
+/*\
+\ * PUBLIC: void __db_rename_proc __P((long, char *, char *, char *, u_int32_t,\
+\ * PUBLIC: __db_rename_reply *));\
+\ */\
+void\
+__db_rename_proc(dbpcl_id, name, subdb,\
+\ \ newname, flags, replyp)\
+\ long dbpcl_id;\
+\ char *name;\
+\ char *subdb;\
+\ char *newname;\
+\ u_int32_t flags;\
+\ __db_rename_reply *replyp;\
+/* END __db_rename_proc */
+/^\/\* BEGIN __db_stat_proc/,/^\/\* END __db_stat_proc/c\
+/* BEGIN __db_stat_proc */\
+/*\
+\ * PUBLIC: void __db_stat_proc __P((long, u_int32_t, __db_stat_reply *,\
+\ * PUBLIC: int *));\
+\ */\
+void\
+__db_stat_proc(dbpcl_id, flags, replyp, freep)\
+\ long dbpcl_id;\
+\ u_int32_t flags;\
+\ __db_stat_reply *replyp;\
+\ int * freep;\
+/* END __db_stat_proc */
+/^\/\* BEGIN __db_sync_proc/,/^\/\* END __db_sync_proc/c\
+/* BEGIN __db_sync_proc */\
+/*\
+\ * PUBLIC: void __db_sync_proc __P((long, u_int32_t, __db_sync_reply *));\
+\ */\
+void\
+__db_sync_proc(dbpcl_id, flags, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t flags;\
+\ __db_sync_reply *replyp;\
+/* END __db_sync_proc */
+/^\/\* BEGIN __db_truncate_proc/,/^\/\* END __db_truncate_proc/c\
+/* BEGIN __db_truncate_proc */\
+/*\
+\ * PUBLIC: void __db_truncate_proc __P((long, long, u_int32_t,\
+\ * PUBLIC: __db_truncate_reply *));\
+\ */\
+void\
+__db_truncate_proc(dbpcl_id, txnpcl_id,\
+\ \ flags, replyp)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ u_int32_t flags;\
+\ __db_truncate_reply *replyp;\
+/* END __db_truncate_proc */
+/^\/\* BEGIN __db_cursor_proc/,/^\/\* END __db_cursor_proc/c\
+/* BEGIN __db_cursor_proc */\
+/*\
+\ * PUBLIC: void __db_cursor_proc __P((long, long, u_int32_t,\
+\ * PUBLIC: __db_cursor_reply *));\
+\ */\
+void\
+__db_cursor_proc(dbpcl_id, txnpcl_id,\
+\ \ flags, replyp)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ u_int32_t flags;\
+\ __db_cursor_reply *replyp;\
+/* END __db_cursor_proc */
+/^\/\* BEGIN __db_join_proc/,/^\/\* END __db_join_proc/c\
+/* BEGIN __db_join_proc */\
+/*\
+\ * PUBLIC: void __db_join_proc __P((long, u_int32_t *, u_int32_t, u_int32_t,\
+\ * PUBLIC: __db_join_reply *));\
+\ */\
+void\
+__db_join_proc(dbpcl_id, curs, curslen,\
+\ \ flags, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t * curs;\
+\ u_int32_t curslen;\
+\ u_int32_t flags;\
+\ __db_join_reply *replyp;\
+/* END __db_join_proc */
+/^\/\* BEGIN __dbc_close_proc/,/^\/\* END __dbc_close_proc/c\
+/* BEGIN __dbc_close_proc */\
+/*\
+\ * PUBLIC: void __dbc_close_proc __P((long, __dbc_close_reply *));\
+\ */\
+void\
+__dbc_close_proc(dbccl_id, replyp)\
+\ long dbccl_id;\
+\ __dbc_close_reply *replyp;\
+/* END __dbc_close_proc */
+/^\/\* BEGIN __dbc_count_proc/,/^\/\* END __dbc_count_proc/c\
+/* BEGIN __dbc_count_proc */\
+/*\
+\ * PUBLIC: void __dbc_count_proc __P((long, u_int32_t, __dbc_count_reply *));\
+\ */\
+void\
+__dbc_count_proc(dbccl_id, flags, replyp)\
+\ long dbccl_id;\
+\ u_int32_t flags;\
+\ __dbc_count_reply *replyp;\
+/* END __dbc_count_proc */
+/^\/\* BEGIN __dbc_del_proc/,/^\/\* END __dbc_del_proc/c\
+/* BEGIN __dbc_del_proc */\
+/*\
+\ * PUBLIC: void __dbc_del_proc __P((long, u_int32_t, __dbc_del_reply *));\
+\ */\
+void\
+__dbc_del_proc(dbccl_id, flags, replyp)\
+\ long dbccl_id;\
+\ u_int32_t flags;\
+\ __dbc_del_reply *replyp;\
+/* END __dbc_del_proc */
+/^\/\* BEGIN __dbc_dup_proc/,/^\/\* END __dbc_dup_proc/c\
+/* BEGIN __dbc_dup_proc */\
+/*\
+\ * PUBLIC: void __dbc_dup_proc __P((long, u_int32_t, __dbc_dup_reply *));\
+\ */\
+void\
+__dbc_dup_proc(dbccl_id, flags, replyp)\
+\ long dbccl_id;\
+\ u_int32_t flags;\
+\ __dbc_dup_reply *replyp;\
+/* END __dbc_dup_proc */
+/^\/\* BEGIN __dbc_get_proc/,/^\/\* END __dbc_get_proc/c\
+/* BEGIN __dbc_get_proc */\
+/*\
+\ * PUBLIC: void __dbc_get_proc __P((long, u_int32_t, u_int32_t, u_int32_t,\
+\ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,\
+\ * PUBLIC: u_int32_t, u_int32_t, __dbc_get_reply *, int *));\
+\ */\
+void\
+__dbc_get_proc(dbccl_id, keydlen, keydoff,\
+\ \ keyulen, keyflags, keydata, keysize,\
+\ \ datadlen, datadoff, dataulen, dataflags,\
+\ \ datadata, datasize, flags, replyp, freep)\
+\ long dbccl_id;\
+\ u_int32_t keydlen;\
+\ u_int32_t keydoff;\
+\ u_int32_t keyulen;\
+\ u_int32_t keyflags;\
+\ void *keydata;\
+\ u_int32_t keysize;\
+\ u_int32_t datadlen;\
+\ u_int32_t datadoff;\
+\ u_int32_t dataulen;\
+\ u_int32_t dataflags;\
+\ void *datadata;\
+\ u_int32_t datasize;\
+\ u_int32_t flags;\
+\ __dbc_get_reply *replyp;\
+\ int * freep;\
+/* END __dbc_get_proc */
+/^\/\* BEGIN __dbc_pget_proc/,/^\/\* END __dbc_pget_proc/c\
+/* BEGIN __dbc_pget_proc */\
+/*\
+\ * PUBLIC: void __dbc_pget_proc __P((long, u_int32_t, u_int32_t, u_int32_t,\
+\ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,\
+\ * PUBLIC: u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t,\
+\ * PUBLIC: u_int32_t, __dbc_pget_reply *, int *));\
+\ */\
+void\
+__dbc_pget_proc(dbccl_id, skeydlen, skeydoff,\
+\ \ skeyulen, skeyflags, skeydata, skeysize,\
+\ \ pkeydlen, pkeydoff, pkeyulen, pkeyflags,\
+\ \ pkeydata, pkeysize, datadlen, datadoff,\
+\ \ dataulen, dataflags, datadata, datasize,\
+\ \ flags, replyp, freep)\
+\ long dbccl_id;\
+\ u_int32_t skeydlen;\
+\ u_int32_t skeydoff;\
+\ u_int32_t skeyulen;\
+\ u_int32_t skeyflags;\
+\ void *skeydata;\
+\ u_int32_t skeysize;\
+\ u_int32_t pkeydlen;\
+\ u_int32_t pkeydoff;\
+\ u_int32_t pkeyulen;\
+\ u_int32_t pkeyflags;\
+\ void *pkeydata;\
+\ u_int32_t pkeysize;\
+\ u_int32_t datadlen;\
+\ u_int32_t datadoff;\
+\ u_int32_t dataulen;\
+\ u_int32_t dataflags;\
+\ void *datadata;\
+\ u_int32_t datasize;\
+\ u_int32_t flags;\
+\ __dbc_pget_reply *replyp;\
+\ int * freep;\
+/* END __dbc_pget_proc */
+/^\/\* BEGIN __dbc_put_proc/,/^\/\* END __dbc_put_proc/c\
+/* BEGIN __dbc_put_proc */\
+/*\
+\ * PUBLIC: void __dbc_put_proc __P((long, u_int32_t, u_int32_t, u_int32_t,\
+\ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,\
+\ * PUBLIC: u_int32_t, u_int32_t, __dbc_put_reply *, int *));\
+\ */\
+void\
+__dbc_put_proc(dbccl_id, keydlen, keydoff,\
+\ \ keyulen, keyflags, keydata, keysize,\
+\ \ datadlen, datadoff, dataulen, dataflags,\
+\ \ datadata, datasize, flags, replyp, freep)\
+\ long dbccl_id;\
+\ u_int32_t keydlen;\
+\ u_int32_t keydoff;\
+\ u_int32_t keyulen;\
+\ u_int32_t keyflags;\
+\ void *keydata;\
+\ u_int32_t keysize;\
+\ u_int32_t datadlen;\
+\ u_int32_t datadoff;\
+\ u_int32_t dataulen;\
+\ u_int32_t dataflags;\
+\ void *datadata;\
+\ u_int32_t datasize;\
+\ u_int32_t flags;\
+\ __dbc_put_reply *replyp;\
+\ int * freep;\
+/* END __dbc_put_proc */
diff --git a/libdb/rpc_server/c/db_server_svc.c b/libdb/rpc_server/c/db_server_svc.c
new file mode 100644
index 0000000..96dd959
--- /dev/null
+++ b/libdb/rpc_server/c/db_server_svc.c
@@ -0,0 +1,435 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <rpc/rpc.h>
+#include <rpc/pmap_clnt.h>
+#include <stdio.h>
+#include <stdlib.h> /* getenv, exit */
+#include <memory.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc_auto/db_server.h"
+#include "dbinc/db_server_int.h"
+#include "dbinc_auto/rpc_server_ext.h"
+
+#ifdef DEBUG
+#define RPC_SVC_FG
+#endif
+
+static void
+db_rpc_serverprog_4001(rqstp, transp)
+ struct svc_req *rqstp;
+ register SVCXPRT *transp;
+{
+ union {
+ __env_cachesize_msg __db_env_cachesize_4001_arg;
+ __env_close_msg __db_env_close_4001_arg;
+ __env_create_msg __db_env_create_4001_arg;
+ __env_dbremove_msg __db_env_dbremove_4001_arg;
+ __env_dbrename_msg __db_env_dbrename_4001_arg;
+ __env_encrypt_msg __db_env_encrypt_4001_arg;
+ __env_flags_msg __db_env_flags_4001_arg;
+ __env_open_msg __db_env_open_4001_arg;
+ __env_remove_msg __db_env_remove_4001_arg;
+ __txn_abort_msg __db_txn_abort_4001_arg;
+ __txn_begin_msg __db_txn_begin_4001_arg;
+ __txn_commit_msg __db_txn_commit_4001_arg;
+ __txn_discard_msg __db_txn_discard_4001_arg;
+ __txn_prepare_msg __db_txn_prepare_4001_arg;
+ __txn_recover_msg __db_txn_recover_4001_arg;
+ __db_associate_msg __db_db_associate_4001_arg;
+ __db_bt_maxkey_msg __db_db_bt_maxkey_4001_arg;
+ __db_bt_minkey_msg __db_db_bt_minkey_4001_arg;
+ __db_close_msg __db_db_close_4001_arg;
+ __db_create_msg __db_db_create_4001_arg;
+ __db_del_msg __db_db_del_4001_arg;
+ __db_encrypt_msg __db_db_encrypt_4001_arg;
+ __db_extentsize_msg __db_db_extentsize_4001_arg;
+ __db_flags_msg __db_db_flags_4001_arg;
+ __db_get_msg __db_db_get_4001_arg;
+ __db_h_ffactor_msg __db_db_h_ffactor_4001_arg;
+ __db_h_nelem_msg __db_db_h_nelem_4001_arg;
+ __db_key_range_msg __db_db_key_range_4001_arg;
+ __db_lorder_msg __db_db_lorder_4001_arg;
+ __db_open_msg __db_db_open_4001_arg;
+ __db_pagesize_msg __db_db_pagesize_4001_arg;
+ __db_pget_msg __db_db_pget_4001_arg;
+ __db_put_msg __db_db_put_4001_arg;
+ __db_re_delim_msg __db_db_re_delim_4001_arg;
+ __db_re_len_msg __db_db_re_len_4001_arg;
+ __db_re_pad_msg __db_db_re_pad_4001_arg;
+ __db_remove_msg __db_db_remove_4001_arg;
+ __db_rename_msg __db_db_rename_4001_arg;
+ __db_stat_msg __db_db_stat_4001_arg;
+ __db_sync_msg __db_db_sync_4001_arg;
+ __db_truncate_msg __db_db_truncate_4001_arg;
+ __db_cursor_msg __db_db_cursor_4001_arg;
+ __db_join_msg __db_db_join_4001_arg;
+ __dbc_close_msg __db_dbc_close_4001_arg;
+ __dbc_count_msg __db_dbc_count_4001_arg;
+ __dbc_del_msg __db_dbc_del_4001_arg;
+ __dbc_dup_msg __db_dbc_dup_4001_arg;
+ __dbc_get_msg __db_dbc_get_4001_arg;
+ __dbc_pget_msg __db_dbc_pget_4001_arg;
+ __dbc_put_msg __db_dbc_put_4001_arg;
+ } argument;
+ char *result;
+ bool_t (*xdr_argument)(), (*xdr_result)();
+ char *(*local)();
+
+ switch (rqstp->rq_proc) {
+ case NULLPROC:
+ (void) svc_sendreply(transp, (xdrproc_t)xdr_void,
+ (char *)NULL);
+ return;
+
+ case __DB_env_cachesize:
+ xdr_argument = xdr___env_cachesize_msg;
+ xdr_result = xdr___env_cachesize_reply;
+ local = (char *(*)()) __db_env_cachesize_4001;
+ break;
+
+ case __DB_env_close:
+ xdr_argument = xdr___env_close_msg;
+ xdr_result = xdr___env_close_reply;
+ local = (char *(*)()) __db_env_close_4001;
+ break;
+
+ case __DB_env_create:
+ xdr_argument = xdr___env_create_msg;
+ xdr_result = xdr___env_create_reply;
+ local = (char *(*)()) __db_env_create_4001;
+ break;
+
+ case __DB_env_dbremove:
+ xdr_argument = xdr___env_dbremove_msg;
+ xdr_result = xdr___env_dbremove_reply;
+ local = (char *(*)()) __db_env_dbremove_4001;
+ break;
+
+ case __DB_env_dbrename:
+ xdr_argument = xdr___env_dbrename_msg;
+ xdr_result = xdr___env_dbrename_reply;
+ local = (char *(*)()) __db_env_dbrename_4001;
+ break;
+
+ case __DB_env_encrypt:
+ xdr_argument = xdr___env_encrypt_msg;
+ xdr_result = xdr___env_encrypt_reply;
+ local = (char *(*)()) __db_env_encrypt_4001;
+ break;
+
+ case __DB_env_flags:
+ xdr_argument = xdr___env_flags_msg;
+ xdr_result = xdr___env_flags_reply;
+ local = (char *(*)()) __db_env_flags_4001;
+ break;
+
+ case __DB_env_open:
+ xdr_argument = xdr___env_open_msg;
+ xdr_result = xdr___env_open_reply;
+ local = (char *(*)()) __db_env_open_4001;
+ break;
+
+ case __DB_env_remove:
+ xdr_argument = xdr___env_remove_msg;
+ xdr_result = xdr___env_remove_reply;
+ local = (char *(*)()) __db_env_remove_4001;
+ break;
+
+ case __DB_txn_abort:
+ xdr_argument = xdr___txn_abort_msg;
+ xdr_result = xdr___txn_abort_reply;
+ local = (char *(*)()) __db_txn_abort_4001;
+ break;
+
+ case __DB_txn_begin:
+ xdr_argument = xdr___txn_begin_msg;
+ xdr_result = xdr___txn_begin_reply;
+ local = (char *(*)()) __db_txn_begin_4001;
+ break;
+
+ case __DB_txn_commit:
+ xdr_argument = xdr___txn_commit_msg;
+ xdr_result = xdr___txn_commit_reply;
+ local = (char *(*)()) __db_txn_commit_4001;
+ break;
+
+ case __DB_txn_discard:
+ xdr_argument = xdr___txn_discard_msg;
+ xdr_result = xdr___txn_discard_reply;
+ local = (char *(*)()) __db_txn_discard_4001;
+ break;
+
+ case __DB_txn_prepare:
+ xdr_argument = xdr___txn_prepare_msg;
+ xdr_result = xdr___txn_prepare_reply;
+ local = (char *(*)()) __db_txn_prepare_4001;
+ break;
+
+ case __DB_txn_recover:
+ xdr_argument = xdr___txn_recover_msg;
+ xdr_result = xdr___txn_recover_reply;
+ local = (char *(*)()) __db_txn_recover_4001;
+ break;
+
+ case __DB_db_associate:
+ xdr_argument = xdr___db_associate_msg;
+ xdr_result = xdr___db_associate_reply;
+ local = (char *(*)()) __db_db_associate_4001;
+ break;
+
+ case __DB_db_bt_maxkey:
+ xdr_argument = xdr___db_bt_maxkey_msg;
+ xdr_result = xdr___db_bt_maxkey_reply;
+ local = (char *(*)()) __db_db_bt_maxkey_4001;
+ break;
+
+ case __DB_db_bt_minkey:
+ xdr_argument = xdr___db_bt_minkey_msg;
+ xdr_result = xdr___db_bt_minkey_reply;
+ local = (char *(*)()) __db_db_bt_minkey_4001;
+ break;
+
+ case __DB_db_close:
+ xdr_argument = xdr___db_close_msg;
+ xdr_result = xdr___db_close_reply;
+ local = (char *(*)()) __db_db_close_4001;
+ break;
+
+ case __DB_db_create:
+ xdr_argument = xdr___db_create_msg;
+ xdr_result = xdr___db_create_reply;
+ local = (char *(*)()) __db_db_create_4001;
+ break;
+
+ case __DB_db_del:
+ xdr_argument = xdr___db_del_msg;
+ xdr_result = xdr___db_del_reply;
+ local = (char *(*)()) __db_db_del_4001;
+ break;
+
+ case __DB_db_encrypt:
+ xdr_argument = xdr___db_encrypt_msg;
+ xdr_result = xdr___db_encrypt_reply;
+ local = (char *(*)()) __db_db_encrypt_4001;
+ break;
+
+ case __DB_db_extentsize:
+ xdr_argument = xdr___db_extentsize_msg;
+ xdr_result = xdr___db_extentsize_reply;
+ local = (char *(*)()) __db_db_extentsize_4001;
+ break;
+
+ case __DB_db_flags:
+ xdr_argument = xdr___db_flags_msg;
+ xdr_result = xdr___db_flags_reply;
+ local = (char *(*)()) __db_db_flags_4001;
+ break;
+
+ case __DB_db_get:
+ xdr_argument = xdr___db_get_msg;
+ xdr_result = xdr___db_get_reply;
+ local = (char *(*)()) __db_db_get_4001;
+ break;
+
+ case __DB_db_h_ffactor:
+ xdr_argument = xdr___db_h_ffactor_msg;
+ xdr_result = xdr___db_h_ffactor_reply;
+ local = (char *(*)()) __db_db_h_ffactor_4001;
+ break;
+
+ case __DB_db_h_nelem:
+ xdr_argument = xdr___db_h_nelem_msg;
+ xdr_result = xdr___db_h_nelem_reply;
+ local = (char *(*)()) __db_db_h_nelem_4001;
+ break;
+
+ case __DB_db_key_range:
+ xdr_argument = xdr___db_key_range_msg;
+ xdr_result = xdr___db_key_range_reply;
+ local = (char *(*)()) __db_db_key_range_4001;
+ break;
+
+ case __DB_db_lorder:
+ xdr_argument = xdr___db_lorder_msg;
+ xdr_result = xdr___db_lorder_reply;
+ local = (char *(*)()) __db_db_lorder_4001;
+ break;
+
+ case __DB_db_open:
+ xdr_argument = xdr___db_open_msg;
+ xdr_result = xdr___db_open_reply;
+ local = (char *(*)()) __db_db_open_4001;
+ break;
+
+ case __DB_db_pagesize:
+ xdr_argument = xdr___db_pagesize_msg;
+ xdr_result = xdr___db_pagesize_reply;
+ local = (char *(*)()) __db_db_pagesize_4001;
+ break;
+
+ case __DB_db_pget:
+ xdr_argument = xdr___db_pget_msg;
+ xdr_result = xdr___db_pget_reply;
+ local = (char *(*)()) __db_db_pget_4001;
+ break;
+
+ case __DB_db_put:
+ xdr_argument = xdr___db_put_msg;
+ xdr_result = xdr___db_put_reply;
+ local = (char *(*)()) __db_db_put_4001;
+ break;
+
+ case __DB_db_re_delim:
+ xdr_argument = xdr___db_re_delim_msg;
+ xdr_result = xdr___db_re_delim_reply;
+ local = (char *(*)()) __db_db_re_delim_4001;
+ break;
+
+ case __DB_db_re_len:
+ xdr_argument = xdr___db_re_len_msg;
+ xdr_result = xdr___db_re_len_reply;
+ local = (char *(*)()) __db_db_re_len_4001;
+ break;
+
+ case __DB_db_re_pad:
+ xdr_argument = xdr___db_re_pad_msg;
+ xdr_result = xdr___db_re_pad_reply;
+ local = (char *(*)()) __db_db_re_pad_4001;
+ break;
+
+ case __DB_db_remove:
+ xdr_argument = xdr___db_remove_msg;
+ xdr_result = xdr___db_remove_reply;
+ local = (char *(*)()) __db_db_remove_4001;
+ break;
+
+ case __DB_db_rename:
+ xdr_argument = xdr___db_rename_msg;
+ xdr_result = xdr___db_rename_reply;
+ local = (char *(*)()) __db_db_rename_4001;
+ break;
+
+ case __DB_db_stat:
+ xdr_argument = xdr___db_stat_msg;
+ xdr_result = xdr___db_stat_reply;
+ local = (char *(*)()) __db_db_stat_4001;
+ break;
+
+ case __DB_db_sync:
+ xdr_argument = xdr___db_sync_msg;
+ xdr_result = xdr___db_sync_reply;
+ local = (char *(*)()) __db_db_sync_4001;
+ break;
+
+ case __DB_db_truncate:
+ xdr_argument = xdr___db_truncate_msg;
+ xdr_result = xdr___db_truncate_reply;
+ local = (char *(*)()) __db_db_truncate_4001;
+ break;
+
+ case __DB_db_cursor:
+ xdr_argument = xdr___db_cursor_msg;
+ xdr_result = xdr___db_cursor_reply;
+ local = (char *(*)()) __db_db_cursor_4001;
+ break;
+
+ case __DB_db_join:
+ xdr_argument = xdr___db_join_msg;
+ xdr_result = xdr___db_join_reply;
+ local = (char *(*)()) __db_db_join_4001;
+ break;
+
+ case __DB_dbc_close:
+ xdr_argument = xdr___dbc_close_msg;
+ xdr_result = xdr___dbc_close_reply;
+ local = (char *(*)()) __db_dbc_close_4001;
+ break;
+
+ case __DB_dbc_count:
+ xdr_argument = xdr___dbc_count_msg;
+ xdr_result = xdr___dbc_count_reply;
+ local = (char *(*)()) __db_dbc_count_4001;
+ break;
+
+ case __DB_dbc_del:
+ xdr_argument = xdr___dbc_del_msg;
+ xdr_result = xdr___dbc_del_reply;
+ local = (char *(*)()) __db_dbc_del_4001;
+ break;
+
+ case __DB_dbc_dup:
+ xdr_argument = xdr___dbc_dup_msg;
+ xdr_result = xdr___dbc_dup_reply;
+ local = (char *(*)()) __db_dbc_dup_4001;
+ break;
+
+ case __DB_dbc_get:
+ xdr_argument = xdr___dbc_get_msg;
+ xdr_result = xdr___dbc_get_reply;
+ local = (char *(*)()) __db_dbc_get_4001;
+ break;
+
+ case __DB_dbc_pget:
+ xdr_argument = xdr___dbc_pget_msg;
+ xdr_result = xdr___dbc_pget_reply;
+ local = (char *(*)()) __db_dbc_pget_4001;
+ break;
+
+ case __DB_dbc_put:
+ xdr_argument = xdr___dbc_put_msg;
+ xdr_result = xdr___dbc_put_reply;
+ local = (char *(*)()) __db_dbc_put_4001;
+ break;
+
+ default:
+ svcerr_noproc(transp);
+ return;
+ }
+ (void) memset((char *)&argument, 0, sizeof (argument));
+ if (!svc_getargs(transp, (xdrproc_t)xdr_argument, (char *)&argument)) {
+ svcerr_decode(transp);
+ return;
+ }
+ result = (*local)(&argument, rqstp);
+ if (result != NULL && !svc_sendreply(transp, (xdrproc_t)xdr_result, result)) {
+ svcerr_systemerr(transp);
+ }
+ if (!svc_freeargs(transp, (xdrproc_t)xdr_argument, (char *)&argument)) {
+ fprintf(stderr, "unable to free arguments");
+ exit(1);
+ }
+ __dbsrv_timeout(0);
+ return;
+}
+
+void __dbsrv_main()
+{
+ register SVCXPRT *transp;
+
+ (void) pmap_unset(DB_RPC_SERVERPROG, DB_RPC_SERVERVERS);
+
+ transp = svctcp_create(RPC_ANYSOCK, 0, 0);
+ if (transp == NULL) {
+ fprintf(stderr, "cannot create tcp service.");
+ exit(1);
+ }
+ if (!svc_register(transp, DB_RPC_SERVERPROG, DB_RPC_SERVERVERS, db_rpc_serverprog_4001, IPPROTO_TCP)) {
+ fprintf(stderr, "unable to register (DB_RPC_SERVERPROG, DB_RPC_SERVERVERS, tcp).");
+ exit(1);
+ }
+
+ svc_run();
+ fprintf(stderr, "svc_run returned");
+ exit(1);
+ /* NOTREACHED */
+}
diff --git a/libdb/rpc_server/c/db_server_util.c b/libdb/rpc_server/c/db_server_util.c
new file mode 100644
index 0000000..869bf0c
--- /dev/null
+++ b/libdb/rpc_server/c/db_server_util.c
@@ -0,0 +1,815 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <rpc/rpc.h>
+
+#include <limits.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+#include "dbinc_auto/db_server.h"
+
+#include "db_int.h"
+#include "dbinc_auto/clib_ext.h"
+#include "dbinc/db_server_int.h"
+#include "dbinc_auto/rpc_server_ext.h"
+#include "dbinc_auto/common_ext.h"
+
+extern int __dbsrv_main __P((void));
+static int add_home __P((char *));
+static int add_passwd __P((char *));
+static int env_recover __P((char *));
+static void __dbclear_child __P((ct_entry *));
+
+static LIST_HEAD(cthead, ct_entry) __dbsrv_head;
+static LIST_HEAD(homehead, home_entry) __dbsrv_home;
+static long __dbsrv_defto = DB_SERVER_TIMEOUT;
+static long __dbsrv_maxto = DB_SERVER_MAXTIMEOUT;
+static long __dbsrv_idleto = DB_SERVER_IDLETIMEOUT;
+static char *logfile = NULL;
+static char *prog;
+
+static void usage __P((char *));
+static void version_check __P((void));
+
+int __dbsrv_verbose = 0;
+
+int
+main(argc, argv)
+ int argc;
+ char **argv;
+{
+ extern char *optarg;
+ CLIENT *cl;
+ int ch, ret;
+ char *passwd;
+
+ prog = argv[0];
+
+ version_check();
+
+ ret = 0;
+ /*
+ * Check whether another server is running or not. There
+ * is a race condition where two servers could be racing to
+ * register with the portmapper. The goal of this check is to
+ * forbid running additional servers (like those started from
+ * the test suite) if the user is already running one.
+ *
+ * XXX
+ * This does not solve nor prevent two servers from being
+ * started at the same time and running recovery at the same
+ * time on the same environments.
+ */
+ if ((cl = clnt_create("localhost",
+ DB_RPC_SERVERPROG, DB_RPC_SERVERVERS, "tcp")) != NULL) {
+ fprintf(stderr,
+ "%s: Berkeley DB RPC server already running.\n", prog);
+ clnt_destroy(cl);
+ return (EXIT_FAILURE);
+ }
+
+ LIST_INIT(&__dbsrv_home);
+ while ((ch = getopt(argc, argv, "h:I:L:P:t:T:Vv")) != EOF)
+ switch (ch) {
+ case 'h':
+ (void)add_home(optarg);
+ break;
+ case 'I':
+ if (__db_getlong(NULL, prog,
+ optarg, 1, LONG_MAX, &__dbsrv_idleto))
+ return (EXIT_FAILURE);
+ break;
+ case 'L':
+ logfile = optarg;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ prog, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ if ((ret = add_passwd(passwd)) != 0) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ prog, strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 't':
+ if (__db_getlong(NULL, prog,
+ optarg, 1, LONG_MAX, &__dbsrv_defto))
+ return (EXIT_FAILURE);
+ break;
+ case 'T':
+ if (__db_getlong(NULL, prog,
+ optarg, 1, LONG_MAX, &__dbsrv_maxto))
+ return (EXIT_FAILURE);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ __dbsrv_verbose = 1;
+ break;
+ default:
+ usage(prog);
+ }
+ /*
+ * Check default timeout against maximum timeout
+ */
+ if (__dbsrv_defto > __dbsrv_maxto)
+ __dbsrv_defto = __dbsrv_maxto;
+
+ /*
+ * Check default timeout against idle timeout
+ * It would be bad to timeout environments sooner than txns.
+ */
+ if (__dbsrv_defto > __dbsrv_idleto)
+ fprintf(stderr,
+ "%s: WARNING: Idle timeout %ld is less than resource timeout %ld\n",
+ prog, __dbsrv_idleto, __dbsrv_defto);
+
+ LIST_INIT(&__dbsrv_head);
+
+ /*
+ * If a client crashes during an RPC, our reply to it
+ * generates a SIGPIPE. Ignore SIGPIPE so we don't exit unnecessarily.
+ */
+#ifdef SIGPIPE
+ signal(SIGPIPE, SIG_IGN);
+#endif
+
+ if (logfile != NULL && __db_util_logset("berkeley_db_svc", logfile))
+ return (EXIT_FAILURE);
+
+ /*
+ * Now that we are ready to start, run recovery on all the
+ * environments specified.
+ */
+ if (env_recover(prog) != 0)
+ return (EXIT_FAILURE);
+
+ /*
+ * We've done our setup, now call the generated server loop
+ */
+ if (__dbsrv_verbose)
+ printf("%s: Ready to receive requests\n", prog);
+ __dbsrv_main();
+
+ /* NOTREACHED */
+ abort();
+}
+
+static void
+usage(prog)
+ char *prog;
+{
+ fprintf(stderr, "usage: %s %s\n\t%s\n", prog,
+ "[-Vv] [-h home] [-P passwd]",
+ "[-I idletimeout] [-L logfile] [-t def_timeout] [-T maxtimeout]");
+ exit(EXIT_FAILURE);
+}
+
+static void
+version_check()
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ prog, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ exit(EXIT_FAILURE);
+ }
+}
+
+/*
+ * PUBLIC: void __dbsrv_settimeout __P((ct_entry *, u_int32_t));
+ */
+void
+__dbsrv_settimeout(ctp, to)
+ ct_entry *ctp;
+ u_int32_t to;
+{
+ if (to > (u_int32_t)__dbsrv_maxto)
+ ctp->ct_timeout = __dbsrv_maxto;
+ else if (to <= 0)
+ ctp->ct_timeout = __dbsrv_defto;
+ else
+ ctp->ct_timeout = to;
+}
+
+/*
+ * PUBLIC: void __dbsrv_timeout __P((int));
+ */
+void
+__dbsrv_timeout(force)
+ int force;
+{
+ static long to_hint = -1;
+ time_t t;
+ long to;
+ ct_entry *ctp, *nextctp;
+
+ if ((t = time(NULL)) == -1)
+ return;
+
+ /*
+ * Check hint. If hint is further in the future
+ * than now, no work to do.
+ */
+ if (!force && to_hint > 0 && t < to_hint)
+ return;
+ to_hint = -1;
+ /*
+ * Timeout transactions or cursors holding DB resources.
+ * Do this before timing out envs to properly release resources.
+ *
+ * !!!
+ * We can just loop through this list looking for cursors and txns.
+ * We do not need to verify txn and cursor relationships at this
+ * point because we maintain the list in LIFO order *and* we
+ * maintain activity in the ultimate txn parent of any cursor
+ * so either everything in a txn is timing out, or nothing.
+ * So, since we are LIFO, we will correctly close/abort all the
+ * appropriate handles, in the correct order.
+ */
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; ctp = nextctp) {
+ nextctp = LIST_NEXT(ctp, entries);
+ switch (ctp->ct_type) {
+ case CT_TXN:
+ to = *(ctp->ct_activep) + ctp->ct_timeout;
+ /* TIMEOUT */
+ if (to < t) {
+ if (__dbsrv_verbose)
+ printf("Timing out txn id %ld\n",
+ ctp->ct_id);
+ (void)((DB_TXN *)ctp->ct_anyp)->
+ abort((DB_TXN *)ctp->ct_anyp);
+ __dbdel_ctp(ctp);
+ /*
+ * If we timed out an txn, we may have closed
+ * all sorts of ctp's.
+ * So start over with a guaranteed good ctp.
+ */
+ nextctp = LIST_FIRST(&__dbsrv_head);
+ } else if ((to_hint > 0 && to_hint > to) ||
+ to_hint == -1)
+ to_hint = to;
+ break;
+ case CT_CURSOR:
+ case (CT_JOINCUR | CT_CURSOR):
+ to = *(ctp->ct_activep) + ctp->ct_timeout;
+ /* TIMEOUT */
+ if (to < t) {
+ if (__dbsrv_verbose)
+ printf("Timing out cursor %ld\n",
+ ctp->ct_id);
+ (void)__dbc_close_int(ctp);
+ /*
+ * Start over with a guaranteed good ctp.
+ */
+ nextctp = LIST_FIRST(&__dbsrv_head);
+ } else if ((to_hint > 0 && to_hint > to) ||
+ to_hint == -1)
+ to_hint = to;
+ break;
+ default:
+ break;
+ }
+ }
+ /*
+ * Timeout idle handles.
+ * If we are forcing a timeout, we'll close all env handles.
+ */
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; ctp = nextctp) {
+ nextctp = LIST_NEXT(ctp, entries);
+ if (ctp->ct_type != CT_ENV)
+ continue;
+ to = *(ctp->ct_activep) + ctp->ct_idle;
+ /* TIMEOUT */
+ if (to < t || force) {
+ if (__dbsrv_verbose)
+ printf("Timing out env id %ld\n", ctp->ct_id);
+ (void)__dbenv_close_int(ctp->ct_id, 0, 1);
+ /*
+ * If we timed out an env, we may have closed
+ * all sorts of ctp's (maybe even all of them.
+ * So start over with a guaranteed good ctp.
+ */
+ nextctp = LIST_FIRST(&__dbsrv_head);
+ }
+ }
+}
+
+/*
+ * RECURSIVE FUNCTION. We need to clear/free any number of levels of nested
+ * layers.
+ */
+static void
+__dbclear_child(parent)
+ ct_entry *parent;
+{
+ ct_entry *ctp, *nextctp;
+
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = nextctp) {
+ nextctp = LIST_NEXT(ctp, entries);
+ if (ctp->ct_type == 0)
+ continue;
+ if (ctp->ct_parent == parent) {
+ __dbclear_child(ctp);
+ /*
+ * Need to do this here because le_next may
+ * have changed with the recursive call and we
+ * don't want to point to a removed entry.
+ */
+ nextctp = LIST_NEXT(ctp, entries);
+ __dbclear_ctp(ctp);
+ }
+ }
+}
+
+/*
+ * PUBLIC: void __dbclear_ctp __P((ct_entry *));
+ */
+void
+__dbclear_ctp(ctp)
+ ct_entry *ctp;
+{
+ LIST_REMOVE(ctp, entries);
+ __os_free(NULL, ctp);
+}
+
+/*
+ * PUBLIC: void __dbdel_ctp __P((ct_entry *));
+ */
+void
+__dbdel_ctp(parent)
+ ct_entry *parent;
+{
+ __dbclear_child(parent);
+ __dbclear_ctp(parent);
+}
+
+/*
+ * PUBLIC: ct_entry *new_ct_ent __P((int *));
+ */
+ct_entry *
+new_ct_ent(errp)
+ int *errp;
+{
+ time_t t;
+ ct_entry *ctp, *octp;
+ int ret;
+
+ if ((ret = __os_malloc(NULL, sizeof(ct_entry), &ctp)) != 0) {
+ *errp = ret;
+ return (NULL);
+ }
+ memset(ctp, 0, sizeof(ct_entry));
+ /*
+ * Get the time as ID. We may service more than one request per
+ * second however. If we are, then increment id value until we
+ * find an unused one. We insert entries in LRU fashion at the
+ * head of the list. So, if the first entry doesn't match, then
+ * we know for certain that we can use our entry.
+ */
+ if ((t = time(NULL)) == -1) {
+ *errp = __os_get_errno();
+ __os_free(NULL, ctp);
+ return (NULL);
+ }
+ octp = LIST_FIRST(&__dbsrv_head);
+ if (octp != NULL && octp->ct_id >= t)
+ t = octp->ct_id + 1;
+ ctp->ct_id = t;
+ ctp->ct_idle = __dbsrv_idleto;
+ ctp->ct_activep = &ctp->ct_active;
+ ctp->ct_origp = NULL;
+ ctp->ct_refcount = 1;
+
+ LIST_INSERT_HEAD(&__dbsrv_head, ctp, entries);
+ return (ctp);
+}
+
+/*
+ * PUBLIC: ct_entry *get_tableent __P((long));
+ */
+ct_entry *
+get_tableent(id)
+ long id;
+{
+ ct_entry *ctp;
+
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries))
+ if (ctp->ct_id == id)
+ return (ctp);
+ return (NULL);
+}
+
+/*
+ * PUBLIC: ct_entry *__dbsrv_sharedb __P((ct_entry *, const char *,
+ * PUBLIC: const char *, DBTYPE, u_int32_t));
+ */
+ct_entry *
+__dbsrv_sharedb(db_ctp, name, subdb, type, flags)
+ ct_entry *db_ctp;
+ const char *name, *subdb;
+ DBTYPE type;
+ u_int32_t flags;
+{
+ ct_entry *ctp;
+
+ /*
+ * Check if we can share a db handle. Criteria for sharing are:
+ * If any of the non-sharable flags are set, we cannot share.
+ * Must be a db ctp, obviously.
+ * Must share the same env parent.
+ * Must be the same type, or current one DB_UNKNOWN.
+ * Must be same byteorder, or current one must not care.
+ * All flags must match.
+ * Must be same name, but don't share in-memory databases.
+ * Must be same subdb name.
+ */
+ if (flags & DB_SERVER_DBNOSHARE)
+ return (NULL);
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries)) {
+ /*
+ * Skip ourselves.
+ */
+ if (ctp == db_ctp)
+ continue;
+ if (ctp->ct_type != CT_DB)
+ continue;
+ if (ctp->ct_envparent != db_ctp->ct_envparent)
+ continue;
+ if (type != DB_UNKNOWN && ctp->ct_dbdp.type != type)
+ continue;
+ if (ctp->ct_dbdp.dbflags != LF_ISSET(DB_SERVER_DBFLAGS))
+ continue;
+ if (db_ctp->ct_dbdp.setflags != 0 &&
+ ctp->ct_dbdp.setflags != db_ctp->ct_dbdp.setflags)
+ continue;
+ if (name == NULL || ctp->ct_dbdp.db == NULL ||
+ strcmp(name, ctp->ct_dbdp.db) != 0)
+ continue;
+ if (subdb != ctp->ct_dbdp.subdb &&
+ (subdb == NULL || ctp->ct_dbdp.subdb == NULL ||
+ strcmp(subdb, ctp->ct_dbdp.subdb) != 0))
+ continue;
+ /*
+ * If we get here, then we match.
+ */
+ ctp->ct_refcount++;
+ return (ctp);
+ }
+
+ return (NULL);
+}
+
+/*
+ * PUBLIC: ct_entry *__dbsrv_shareenv __P((ct_entry *, home_entry *, u_int32_t));
+ */
+ct_entry *
+__dbsrv_shareenv(env_ctp, home, flags)
+ ct_entry *env_ctp;
+ home_entry *home;
+ u_int32_t flags;
+{
+ ct_entry *ctp;
+
+ /*
+ * Check if we can share an env. Criteria for sharing are:
+ * Must be an env ctp, obviously.
+ * Must share the same home env.
+ * All flags must match.
+ */
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries)) {
+ /*
+ * Skip ourselves.
+ */
+ if (ctp == env_ctp)
+ continue;
+ if (ctp->ct_type != CT_ENV)
+ continue;
+ if (ctp->ct_envdp.home != home)
+ continue;
+ if (ctp->ct_envdp.envflags != flags)
+ continue;
+ if (ctp->ct_envdp.onflags != env_ctp->ct_envdp.onflags)
+ continue;
+ if (ctp->ct_envdp.offflags != env_ctp->ct_envdp.offflags)
+ continue;
+ /*
+ * If we get here, then we match. The only thing left to
+ * check is the timeout. Since the server timeout set by
+ * the client is a hint, for sharing we'll give them the
+ * benefit of the doubt and grant them the longer timeout.
+ */
+ if (ctp->ct_timeout < env_ctp->ct_timeout)
+ ctp->ct_timeout = env_ctp->ct_timeout;
+ ctp->ct_refcount++;
+ return (ctp);
+ }
+
+ return (NULL);
+}
+
+/*
+ * PUBLIC: void __dbsrv_active __P((ct_entry *));
+ */
+void
+__dbsrv_active(ctp)
+ ct_entry *ctp;
+{
+ time_t t;
+ ct_entry *envctp;
+
+ if (ctp == NULL)
+ return;
+ if ((t = time(NULL)) == -1)
+ return;
+ *(ctp->ct_activep) = t;
+ if ((envctp = ctp->ct_envparent) == NULL)
+ return;
+ *(envctp->ct_activep) = t;
+ return;
+}
+
+/*
+ * PUBLIC: int __db_close_int __P((long, u_int32_t));
+ */
+int
+__db_close_int(id, flags)
+ long id;
+ u_int32_t flags;
+{
+ DB *dbp;
+ int ret;
+ ct_entry *ctp;
+
+ ret = 0;
+ ctp = get_tableent(id);
+ if (ctp == NULL)
+ return (DB_NOSERVER_ID);
+ DB_ASSERT(ctp->ct_type == CT_DB);
+ if (__dbsrv_verbose && ctp->ct_refcount != 1)
+ printf("Deref'ing dbp id %ld, refcount %d\n",
+ id, ctp->ct_refcount);
+ if (--ctp->ct_refcount != 0)
+ return (ret);
+ dbp = ctp->ct_dbp;
+ if (__dbsrv_verbose)
+ printf("Closing dbp id %ld\n", id);
+
+ ret = dbp->close(dbp, flags);
+ __dbdel_ctp(ctp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbc_close_int __P((ct_entry *));
+ */
+int
+__dbc_close_int(dbc_ctp)
+ ct_entry *dbc_ctp;
+{
+ DBC *dbc;
+ int ret;
+ ct_entry *ctp;
+
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ ret = dbc->c_close(dbc);
+ /*
+ * If this cursor is a join cursor then we need to fix up the
+ * cursors that it was joined from so that they are independent again.
+ */
+ if (dbc_ctp->ct_type & CT_JOINCUR)
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries)) {
+ /*
+ * Test if it is a join cursor, and if it is part
+ * of this one.
+ */
+ if ((ctp->ct_type & CT_JOIN) &&
+ ctp->ct_activep == &dbc_ctp->ct_active) {
+ ctp->ct_type &= ~CT_JOIN;
+ ctp->ct_activep = ctp->ct_origp;
+ __dbsrv_active(ctp);
+ }
+ }
+ __dbclear_ctp(dbc_ctp);
+ return (ret);
+
+}
+
+/*
+ * PUBLIC: int __dbenv_close_int __P((long, u_int32_t, int));
+ */
+int
+__dbenv_close_int(id, flags, force)
+ long id;
+ u_int32_t flags;
+ int force;
+{
+ DB_ENV *dbenv;
+ int ret;
+ ct_entry *ctp;
+
+ ret = 0;
+ ctp = get_tableent(id);
+ if (ctp == NULL)
+ return (DB_NOSERVER_ID);
+ DB_ASSERT(ctp->ct_type == CT_ENV);
+ if (__dbsrv_verbose && ctp->ct_refcount != 1)
+ printf("Deref'ing env id %ld, refcount %d\n",
+ id, ctp->ct_refcount);
+ /*
+ * If we are timing out, we need to force the close, no matter
+ * what the refcount.
+ */
+ if (--ctp->ct_refcount != 0 && !force)
+ return (ret);
+ dbenv = ctp->ct_envp;
+ if (__dbsrv_verbose)
+ printf("Closing env id %ld\n", id);
+
+ ret = dbenv->close(dbenv, flags);
+ __dbdel_ctp(ctp);
+ return (ret);
+}
+
+static int
+add_home(home)
+ char *home;
+{
+ home_entry *hp, *homep;
+ int ret;
+
+ if ((ret = __os_malloc(NULL, sizeof(home_entry), &hp)) != 0)
+ return (ret);
+ if ((ret = __os_malloc(NULL, strlen(home)+1, &hp->home)) != 0)
+ return (ret);
+ memcpy(hp->home, home, strlen(home)+1);
+ hp->dir = home;
+ hp->passwd = NULL;
+ /*
+ * This loop is to remove any trailing path separators,
+ * to assure hp->name points to the last component.
+ */
+ hp->name = __db_rpath(home);
+ *(hp->name) = '\0';
+ hp->name++;
+ while (*(hp->name) == '\0') {
+ hp->name = __db_rpath(home);
+ *(hp->name) = '\0';
+ hp->name++;
+ }
+ /*
+ * Now we have successfully added it. Make sure there are no
+ * identical names.
+ */
+ for (homep = LIST_FIRST(&__dbsrv_home); homep != NULL;
+ homep = LIST_NEXT(homep, entries))
+ if (strcmp(homep->name, hp->name) == 0) {
+ printf("Already added home name %s, at directory %s\n",
+ hp->name, homep->dir);
+ __os_free(NULL, hp->home);
+ __os_free(NULL, hp);
+ return (-1);
+ }
+ LIST_INSERT_HEAD(&__dbsrv_home, hp, entries);
+ if (__dbsrv_verbose)
+ printf("Added home %s in dir %s\n", hp->name, hp->dir);
+ return (0);
+}
+
+static int
+add_passwd(passwd)
+ char *passwd;
+{
+ home_entry *hp;
+
+ /*
+ * We add the passwd to the last given home dir. If there
+ * isn't a home dir, or the most recent one already has a
+ * passwd, then there is a user error.
+ */
+ hp = LIST_FIRST(&__dbsrv_home);
+ if (hp == NULL || hp->passwd != NULL)
+ return (EINVAL);
+ /*
+ * We've already strdup'ed the passwd above, so we don't need
+ * to malloc new space, just point to it.
+ */
+ hp->passwd = passwd;
+ return (0);
+}
+
+/*
+ * PUBLIC: home_entry *get_home __P((char *));
+ */
+home_entry *
+get_home(name)
+ char *name;
+{
+ home_entry *hp;
+
+ for (hp = LIST_FIRST(&__dbsrv_home); hp != NULL;
+ hp = LIST_NEXT(hp, entries))
+ if (strcmp(name, hp->name) == 0)
+ return (hp);
+ return (NULL);
+}
+
+static int
+env_recover(progname)
+ char *progname;
+{
+ DB_ENV *dbenv;
+ home_entry *hp;
+ u_int32_t flags;
+ int exitval, ret;
+
+ for (hp = LIST_FIRST(&__dbsrv_home); hp != NULL;
+ hp = LIST_NEXT(hp, entries)) {
+ exitval = 0;
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: db_env_create: %s\n",
+ progname, db_strerror(ret));
+ exit(EXIT_FAILURE);
+ }
+ if (__dbsrv_verbose == 1) {
+ (void)dbenv->set_verbose(dbenv, DB_VERB_RECOVERY, 1);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_CHKPOINT, 1);
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ if (hp->passwd != NULL)
+ (void)dbenv->set_encrypt(dbenv, hp->passwd,
+ DB_ENCRYPT_AES);
+
+ /*
+ * Initialize the env with DB_RECOVER. That is all we
+ * have to do to run recovery.
+ */
+ if (__dbsrv_verbose)
+ printf("Running recovery on %s\n", hp->home);
+ flags = DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL |
+ DB_INIT_TXN | DB_USE_ENVIRON | DB_RECOVER;
+ if ((ret = dbenv->open(dbenv, hp->home, flags, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->open");
+ goto error;
+ }
+
+ if (0) {
+error: exitval = 1;
+ }
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr, "%s: dbenv->close: %s\n",
+ progname, db_strerror(ret));
+ }
+ if (exitval)
+ return (exitval);
+ }
+ return (0);
+}
diff --git a/libdb/rpc_server/c/db_server_xdr.c b/libdb/rpc_server/c/db_server_xdr.c
new file mode 100644
index 0000000..bfe2b6c
--- /dev/null
+++ b/libdb/rpc_server/c/db_server_xdr.c
@@ -0,0 +1,1512 @@
+#include "db_config.h"
+
+#ifdef HAVE_RPC
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <rpc/rpc.h>
+
+#include <strings.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc_auto/db_server.h"
+
+bool_t
+xdr___env_cachesize_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_cachesize_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->gbytes))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->bytes))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->ncache))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_cachesize_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_cachesize_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_close_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_close_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_close_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_close_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_create_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_create_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->timeout))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_create_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_create_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->envcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_dbremove_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_dbremove_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->name, ~0))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->subdb, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_dbremove_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_dbremove_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_dbrename_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_dbrename_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->name, ~0))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->subdb, ~0))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->newname, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_dbrename_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_dbrename_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_encrypt_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_encrypt_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->passwd, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_encrypt_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_encrypt_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_flags_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_flags_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->onoff))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_flags_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_flags_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_open_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_open_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->home, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->mode))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_open_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_open_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->envcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_remove_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_remove_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->home, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_remove_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_remove_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_abort_msg(xdrs, objp)
+ register XDR *xdrs;
+ __txn_abort_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_abort_reply(xdrs, objp)
+ register XDR *xdrs;
+ __txn_abort_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_begin_msg(xdrs, objp)
+ register XDR *xdrs;
+ __txn_begin_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->parentcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_begin_reply(xdrs, objp)
+ register XDR *xdrs;
+ __txn_begin_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnidcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_commit_msg(xdrs, objp)
+ register XDR *xdrs;
+ __txn_commit_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_commit_reply(xdrs, objp)
+ register XDR *xdrs;
+ __txn_commit_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_discard_msg(xdrs, objp)
+ register XDR *xdrs;
+ __txn_discard_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_discard_reply(xdrs, objp)
+ register XDR *xdrs;
+ __txn_discard_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_prepare_msg(xdrs, objp)
+ register XDR *xdrs;
+ __txn_prepare_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_opaque(xdrs, objp->gid, 128))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_prepare_reply(xdrs, objp)
+ register XDR *xdrs;
+ __txn_prepare_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_recover_msg(xdrs, objp)
+ register XDR *xdrs;
+ __txn_recover_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->count))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_recover_reply(xdrs, objp)
+ register XDR *xdrs;
+ __txn_recover_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_array(xdrs, (char **)&objp->txn.txn_val, (u_int *) &objp->txn.txn_len, ~0,
+ sizeof (u_int), (xdrproc_t) xdr_u_int))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->gid.gid_val, (u_int *) &objp->gid.gid_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->retcount))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_associate_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_associate_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->sdbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_associate_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_associate_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_bt_maxkey_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_bt_maxkey_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->maxkey))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_bt_maxkey_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_bt_maxkey_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_bt_minkey_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_bt_minkey_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->minkey))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_bt_minkey_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_bt_minkey_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_close_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_close_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_close_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_close_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_create_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_create_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_create_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_create_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dbcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_del_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_del_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_del_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_del_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_encrypt_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_encrypt_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->passwd, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_encrypt_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_encrypt_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_extentsize_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_extentsize_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->extentsize))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_extentsize_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_extentsize_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_flags_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_flags_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_flags_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_flags_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_get_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_get_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_get_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_get_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_h_ffactor_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_h_ffactor_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->ffactor))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_h_ffactor_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_h_ffactor_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_h_nelem_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_h_nelem_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->nelem))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_h_nelem_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_h_nelem_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_key_range_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_key_range_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_key_range_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_key_range_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_double(xdrs, &objp->less))
+ return (FALSE);
+ if (!xdr_double(xdrs, &objp->equal))
+ return (FALSE);
+ if (!xdr_double(xdrs, &objp->greater))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_lorder_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_lorder_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->lorder))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_lorder_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_lorder_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_open_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_open_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->name, ~0))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->subdb, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->type))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->mode))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_open_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_open_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dbcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->type))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dbflags))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->lorder))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_pagesize_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_pagesize_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pagesize))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_pagesize_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_pagesize_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_pget_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_pget_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->skeydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->skeydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->skeyulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->skeyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->skeydata.skeydata_val, (u_int *) &objp->skeydata.skeydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pkeydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pkeydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pkeyulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pkeyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->pkeydata.pkeydata_val, (u_int *) &objp->pkeydata.pkeydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_pget_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_pget_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->skeydata.skeydata_val, (u_int *) &objp->skeydata.skeydata_len, ~0))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->pkeydata.pkeydata_val, (u_int *) &objp->pkeydata.pkeydata_len, ~0))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_put_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_put_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_put_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_put_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_re_delim_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_re_delim_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->delim))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_re_delim_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_re_delim_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_re_len_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_re_len_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->len))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_re_len_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_re_len_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_re_pad_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_re_pad_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pad))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_re_pad_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_re_pad_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_remove_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_remove_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->name, ~0))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->subdb, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_remove_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_remove_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_rename_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_rename_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->name, ~0))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->subdb, ~0))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->newname, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_rename_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_rename_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_stat_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_stat_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_stat_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_stat_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_array(xdrs, (char **)&objp->stats.stats_val, (u_int *) &objp->stats.stats_len, ~0,
+ sizeof (u_int), (xdrproc_t) xdr_u_int))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_sync_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_sync_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_sync_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_sync_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_truncate_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_truncate_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_truncate_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_truncate_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->count))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_cursor_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_cursor_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_cursor_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_cursor_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dbcidcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_join_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_join_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_array(xdrs, (char **)&objp->curs.curs_val, (u_int *) &objp->curs.curs_len, ~0,
+ sizeof (u_int), (xdrproc_t) xdr_u_int))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_join_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_join_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dbcidcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_close_msg(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_close_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbccl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_close_reply(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_close_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_count_msg(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_count_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbccl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_count_reply(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_count_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dupcount))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_del_msg(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_del_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbccl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_del_reply(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_del_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_dup_msg(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_dup_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbccl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_dup_reply(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_dup_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dbcidcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_get_msg(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_get_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbccl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_get_reply(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_get_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_pget_msg(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_pget_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbccl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->skeydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->skeydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->skeyulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->skeyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->skeydata.skeydata_val, (u_int *) &objp->skeydata.skeydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pkeydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pkeydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pkeyulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pkeyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->pkeydata.pkeydata_val, (u_int *) &objp->pkeydata.pkeydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_pget_reply(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_pget_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->skeydata.skeydata_val, (u_int *) &objp->skeydata.skeydata_len, ~0))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->pkeydata.pkeydata_val, (u_int *) &objp->pkeydata.pkeydata_len, ~0))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_put_msg(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_put_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbccl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_put_reply(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_put_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ return (TRUE);
+}
+#endif /* HAVE_RPC */
diff --git a/libdb/rpc_server/c/gen_db_server.c b/libdb/rpc_server/c/gen_db_server.c
new file mode 100644
index 0000000..d5a4cca
--- /dev/null
+++ b/libdb/rpc_server/c/gen_db_server.c
@@ -0,0 +1,1169 @@
+/* Do not edit: automatically built by gen_rpc.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <rpc/rpc.h>
+#include <rpc/xdr.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc_auto/db_server.h"
+#include "dbinc/db_server_int.h"
+#include "dbinc_auto/rpc_server_ext.h"
+
+/*
+ * PUBLIC: __env_cachesize_reply *__db_env_cachesize_4001
+ * PUBLIC: __P((__env_cachesize_msg *, struct svc_req *));
+ */
+__env_cachesize_reply *
+__db_env_cachesize_4001(msg, req)
+ __env_cachesize_msg *msg;
+ struct svc_req *req;
+{
+ static __env_cachesize_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __env_cachesize_proc(msg->dbenvcl_id,
+ msg->gbytes,
+ msg->bytes,
+ msg->ncache,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __env_close_reply *__db_env_close_4001 __P((__env_close_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__env_close_reply *
+__db_env_close_4001(msg, req)
+ __env_close_msg *msg;
+ struct svc_req *req;
+{
+ static __env_close_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __env_close_proc(msg->dbenvcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __env_create_reply *__db_env_create_4001 __P((__env_create_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__env_create_reply *
+__db_env_create_4001(msg, req)
+ __env_create_msg *msg;
+ struct svc_req *req;
+{
+ static __env_create_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __env_create_proc(msg->timeout,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __env_dbremove_reply *__db_env_dbremove_4001
+ * PUBLIC: __P((__env_dbremove_msg *, struct svc_req *));
+ */
+__env_dbremove_reply *
+__db_env_dbremove_4001(msg, req)
+ __env_dbremove_msg *msg;
+ struct svc_req *req;
+{
+ static __env_dbremove_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __env_dbremove_proc(msg->dbenvcl_id,
+ msg->txnpcl_id,
+ (*msg->name == '\0') ? NULL : msg->name,
+ (*msg->subdb == '\0') ? NULL : msg->subdb,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __env_dbrename_reply *__db_env_dbrename_4001
+ * PUBLIC: __P((__env_dbrename_msg *, struct svc_req *));
+ */
+__env_dbrename_reply *
+__db_env_dbrename_4001(msg, req)
+ __env_dbrename_msg *msg;
+ struct svc_req *req;
+{
+ static __env_dbrename_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __env_dbrename_proc(msg->dbenvcl_id,
+ msg->txnpcl_id,
+ (*msg->name == '\0') ? NULL : msg->name,
+ (*msg->subdb == '\0') ? NULL : msg->subdb,
+ (*msg->newname == '\0') ? NULL : msg->newname,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __env_encrypt_reply *__db_env_encrypt_4001
+ * PUBLIC: __P((__env_encrypt_msg *, struct svc_req *));
+ */
+__env_encrypt_reply *
+__db_env_encrypt_4001(msg, req)
+ __env_encrypt_msg *msg;
+ struct svc_req *req;
+{
+ static __env_encrypt_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __env_encrypt_proc(msg->dbenvcl_id,
+ (*msg->passwd == '\0') ? NULL : msg->passwd,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __env_flags_reply *__db_env_flags_4001 __P((__env_flags_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__env_flags_reply *
+__db_env_flags_4001(msg, req)
+ __env_flags_msg *msg;
+ struct svc_req *req;
+{
+ static __env_flags_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __env_flags_proc(msg->dbenvcl_id,
+ msg->flags,
+ msg->onoff,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __env_open_reply *__db_env_open_4001 __P((__env_open_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__env_open_reply *
+__db_env_open_4001(msg, req)
+ __env_open_msg *msg;
+ struct svc_req *req;
+{
+ static __env_open_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __env_open_proc(msg->dbenvcl_id,
+ (*msg->home == '\0') ? NULL : msg->home,
+ msg->flags,
+ msg->mode,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __env_remove_reply *__db_env_remove_4001 __P((__env_remove_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__env_remove_reply *
+__db_env_remove_4001(msg, req)
+ __env_remove_msg *msg;
+ struct svc_req *req;
+{
+ static __env_remove_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __env_remove_proc(msg->dbenvcl_id,
+ (*msg->home == '\0') ? NULL : msg->home,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __txn_abort_reply *__db_txn_abort_4001 __P((__txn_abort_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__txn_abort_reply *
+__db_txn_abort_4001(msg, req)
+ __txn_abort_msg *msg;
+ struct svc_req *req;
+{
+ static __txn_abort_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __txn_abort_proc(msg->txnpcl_id,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __txn_begin_reply *__db_txn_begin_4001 __P((__txn_begin_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__txn_begin_reply *
+__db_txn_begin_4001(msg, req)
+ __txn_begin_msg *msg;
+ struct svc_req *req;
+{
+ static __txn_begin_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __txn_begin_proc(msg->dbenvcl_id,
+ msg->parentcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __txn_commit_reply *__db_txn_commit_4001 __P((__txn_commit_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__txn_commit_reply *
+__db_txn_commit_4001(msg, req)
+ __txn_commit_msg *msg;
+ struct svc_req *req;
+{
+ static __txn_commit_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __txn_commit_proc(msg->txnpcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __txn_discard_reply *__db_txn_discard_4001
+ * PUBLIC: __P((__txn_discard_msg *, struct svc_req *));
+ */
+__txn_discard_reply *
+__db_txn_discard_4001(msg, req)
+ __txn_discard_msg *msg;
+ struct svc_req *req;
+{
+ static __txn_discard_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __txn_discard_proc(msg->txnpcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __txn_prepare_reply *__db_txn_prepare_4001
+ * PUBLIC: __P((__txn_prepare_msg *, struct svc_req *));
+ */
+__txn_prepare_reply *
+__db_txn_prepare_4001(msg, req)
+ __txn_prepare_msg *msg;
+ struct svc_req *req;
+{
+ static __txn_prepare_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __txn_prepare_proc(msg->txnpcl_id,
+ msg->gid,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __txn_recover_reply *__db_txn_recover_4001
+ * PUBLIC: __P((__txn_recover_msg *, struct svc_req *));
+ */
+__txn_recover_reply *
+__db_txn_recover_4001(msg, req)
+ __txn_recover_msg *msg;
+ struct svc_req *req;
+{
+ static __txn_recover_reply reply; /* must be static */
+ static int __txn_recover_free = 0; /* must be static */
+
+ COMPQUIET(req, NULL);
+ if (__txn_recover_free)
+ xdr_free((xdrproc_t)xdr___txn_recover_reply, (void *)&reply);
+ __txn_recover_free = 0;
+
+ /* Reinitialize allocated fields */
+ reply.txn.txn_val = NULL;
+ reply.gid.gid_val = NULL;
+
+ __txn_recover_proc(msg->dbenvcl_id,
+ msg->count,
+ msg->flags,
+ &reply,
+ &__txn_recover_free);
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_associate_reply *__db_db_associate_4001
+ * PUBLIC: __P((__db_associate_msg *, struct svc_req *));
+ */
+__db_associate_reply *
+__db_db_associate_4001(msg, req)
+ __db_associate_msg *msg;
+ struct svc_req *req;
+{
+ static __db_associate_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_associate_proc(msg->dbpcl_id,
+ msg->txnpcl_id,
+ msg->sdbpcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_bt_maxkey_reply *__db_db_bt_maxkey_4001
+ * PUBLIC: __P((__db_bt_maxkey_msg *, struct svc_req *));
+ */
+__db_bt_maxkey_reply *
+__db_db_bt_maxkey_4001(msg, req)
+ __db_bt_maxkey_msg *msg;
+ struct svc_req *req;
+{
+ static __db_bt_maxkey_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_bt_maxkey_proc(msg->dbpcl_id,
+ msg->maxkey,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_bt_minkey_reply *__db_db_bt_minkey_4001
+ * PUBLIC: __P((__db_bt_minkey_msg *, struct svc_req *));
+ */
+__db_bt_minkey_reply *
+__db_db_bt_minkey_4001(msg, req)
+ __db_bt_minkey_msg *msg;
+ struct svc_req *req;
+{
+ static __db_bt_minkey_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_bt_minkey_proc(msg->dbpcl_id,
+ msg->minkey,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_close_reply *__db_db_close_4001 __P((__db_close_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_close_reply *
+__db_db_close_4001(msg, req)
+ __db_close_msg *msg;
+ struct svc_req *req;
+{
+ static __db_close_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_close_proc(msg->dbpcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_create_reply *__db_db_create_4001 __P((__db_create_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_create_reply *
+__db_db_create_4001(msg, req)
+ __db_create_msg *msg;
+ struct svc_req *req;
+{
+ static __db_create_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_create_proc(msg->dbenvcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_del_reply *__db_db_del_4001 __P((__db_del_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_del_reply *
+__db_db_del_4001(msg, req)
+ __db_del_msg *msg;
+ struct svc_req *req;
+{
+ static __db_del_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_del_proc(msg->dbpcl_id,
+ msg->txnpcl_id,
+ msg->keydlen,
+ msg->keydoff,
+ msg->keyulen,
+ msg->keyflags,
+ msg->keydata.keydata_val,
+ msg->keydata.keydata_len,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_encrypt_reply *__db_db_encrypt_4001 __P((__db_encrypt_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_encrypt_reply *
+__db_db_encrypt_4001(msg, req)
+ __db_encrypt_msg *msg;
+ struct svc_req *req;
+{
+ static __db_encrypt_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_encrypt_proc(msg->dbpcl_id,
+ (*msg->passwd == '\0') ? NULL : msg->passwd,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_extentsize_reply *__db_db_extentsize_4001
+ * PUBLIC: __P((__db_extentsize_msg *, struct svc_req *));
+ */
+__db_extentsize_reply *
+__db_db_extentsize_4001(msg, req)
+ __db_extentsize_msg *msg;
+ struct svc_req *req;
+{
+ static __db_extentsize_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_extentsize_proc(msg->dbpcl_id,
+ msg->extentsize,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_flags_reply *__db_db_flags_4001 __P((__db_flags_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_flags_reply *
+__db_db_flags_4001(msg, req)
+ __db_flags_msg *msg;
+ struct svc_req *req;
+{
+ static __db_flags_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_flags_proc(msg->dbpcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_get_reply *__db_db_get_4001 __P((__db_get_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_get_reply *
+__db_db_get_4001(msg, req)
+ __db_get_msg *msg;
+ struct svc_req *req;
+{
+ static __db_get_reply reply; /* must be static */
+ static int __db_get_free = 0; /* must be static */
+
+ COMPQUIET(req, NULL);
+ if (__db_get_free)
+ xdr_free((xdrproc_t)xdr___db_get_reply, (void *)&reply);
+ __db_get_free = 0;
+
+ /* Reinitialize allocated fields */
+ reply.keydata.keydata_val = NULL;
+ reply.datadata.datadata_val = NULL;
+
+ __db_get_proc(msg->dbpcl_id,
+ msg->txnpcl_id,
+ msg->keydlen,
+ msg->keydoff,
+ msg->keyulen,
+ msg->keyflags,
+ msg->keydata.keydata_val,
+ msg->keydata.keydata_len,
+ msg->datadlen,
+ msg->datadoff,
+ msg->dataulen,
+ msg->dataflags,
+ msg->datadata.datadata_val,
+ msg->datadata.datadata_len,
+ msg->flags,
+ &reply,
+ &__db_get_free);
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_h_ffactor_reply *__db_db_h_ffactor_4001
+ * PUBLIC: __P((__db_h_ffactor_msg *, struct svc_req *));
+ */
+__db_h_ffactor_reply *
+__db_db_h_ffactor_4001(msg, req)
+ __db_h_ffactor_msg *msg;
+ struct svc_req *req;
+{
+ static __db_h_ffactor_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_h_ffactor_proc(msg->dbpcl_id,
+ msg->ffactor,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_h_nelem_reply *__db_db_h_nelem_4001 __P((__db_h_nelem_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_h_nelem_reply *
+__db_db_h_nelem_4001(msg, req)
+ __db_h_nelem_msg *msg;
+ struct svc_req *req;
+{
+ static __db_h_nelem_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_h_nelem_proc(msg->dbpcl_id,
+ msg->nelem,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_key_range_reply *__db_db_key_range_4001
+ * PUBLIC: __P((__db_key_range_msg *, struct svc_req *));
+ */
+__db_key_range_reply *
+__db_db_key_range_4001(msg, req)
+ __db_key_range_msg *msg;
+ struct svc_req *req;
+{
+ static __db_key_range_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_key_range_proc(msg->dbpcl_id,
+ msg->txnpcl_id,
+ msg->keydlen,
+ msg->keydoff,
+ msg->keyulen,
+ msg->keyflags,
+ msg->keydata.keydata_val,
+ msg->keydata.keydata_len,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_lorder_reply *__db_db_lorder_4001 __P((__db_lorder_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_lorder_reply *
+__db_db_lorder_4001(msg, req)
+ __db_lorder_msg *msg;
+ struct svc_req *req;
+{
+ static __db_lorder_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_lorder_proc(msg->dbpcl_id,
+ msg->lorder,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_open_reply *__db_db_open_4001 __P((__db_open_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_open_reply *
+__db_db_open_4001(msg, req)
+ __db_open_msg *msg;
+ struct svc_req *req;
+{
+ static __db_open_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_open_proc(msg->dbpcl_id,
+ msg->txnpcl_id,
+ (*msg->name == '\0') ? NULL : msg->name,
+ (*msg->subdb == '\0') ? NULL : msg->subdb,
+ msg->type,
+ msg->flags,
+ msg->mode,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_pagesize_reply *__db_db_pagesize_4001
+ * PUBLIC: __P((__db_pagesize_msg *, struct svc_req *));
+ */
+__db_pagesize_reply *
+__db_db_pagesize_4001(msg, req)
+ __db_pagesize_msg *msg;
+ struct svc_req *req;
+{
+ static __db_pagesize_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_pagesize_proc(msg->dbpcl_id,
+ msg->pagesize,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_pget_reply *__db_db_pget_4001 __P((__db_pget_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_pget_reply *
+__db_db_pget_4001(msg, req)
+ __db_pget_msg *msg;
+ struct svc_req *req;
+{
+ static __db_pget_reply reply; /* must be static */
+ static int __db_pget_free = 0; /* must be static */
+
+ COMPQUIET(req, NULL);
+ if (__db_pget_free)
+ xdr_free((xdrproc_t)xdr___db_pget_reply, (void *)&reply);
+ __db_pget_free = 0;
+
+ /* Reinitialize allocated fields */
+ reply.skeydata.skeydata_val = NULL;
+ reply.pkeydata.pkeydata_val = NULL;
+ reply.datadata.datadata_val = NULL;
+
+ __db_pget_proc(msg->dbpcl_id,
+ msg->txnpcl_id,
+ msg->skeydlen,
+ msg->skeydoff,
+ msg->skeyulen,
+ msg->skeyflags,
+ msg->skeydata.skeydata_val,
+ msg->skeydata.skeydata_len,
+ msg->pkeydlen,
+ msg->pkeydoff,
+ msg->pkeyulen,
+ msg->pkeyflags,
+ msg->pkeydata.pkeydata_val,
+ msg->pkeydata.pkeydata_len,
+ msg->datadlen,
+ msg->datadoff,
+ msg->dataulen,
+ msg->dataflags,
+ msg->datadata.datadata_val,
+ msg->datadata.datadata_len,
+ msg->flags,
+ &reply,
+ &__db_pget_free);
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_put_reply *__db_db_put_4001 __P((__db_put_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_put_reply *
+__db_db_put_4001(msg, req)
+ __db_put_msg *msg;
+ struct svc_req *req;
+{
+ static __db_put_reply reply; /* must be static */
+ static int __db_put_free = 0; /* must be static */
+
+ COMPQUIET(req, NULL);
+ if (__db_put_free)
+ xdr_free((xdrproc_t)xdr___db_put_reply, (void *)&reply);
+ __db_put_free = 0;
+
+ /* Reinitialize allocated fields */
+ reply.keydata.keydata_val = NULL;
+
+ __db_put_proc(msg->dbpcl_id,
+ msg->txnpcl_id,
+ msg->keydlen,
+ msg->keydoff,
+ msg->keyulen,
+ msg->keyflags,
+ msg->keydata.keydata_val,
+ msg->keydata.keydata_len,
+ msg->datadlen,
+ msg->datadoff,
+ msg->dataulen,
+ msg->dataflags,
+ msg->datadata.datadata_val,
+ msg->datadata.datadata_len,
+ msg->flags,
+ &reply,
+ &__db_put_free);
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_re_delim_reply *__db_db_re_delim_4001
+ * PUBLIC: __P((__db_re_delim_msg *, struct svc_req *));
+ */
+__db_re_delim_reply *
+__db_db_re_delim_4001(msg, req)
+ __db_re_delim_msg *msg;
+ struct svc_req *req;
+{
+ static __db_re_delim_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_re_delim_proc(msg->dbpcl_id,
+ msg->delim,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_re_len_reply *__db_db_re_len_4001 __P((__db_re_len_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_re_len_reply *
+__db_db_re_len_4001(msg, req)
+ __db_re_len_msg *msg;
+ struct svc_req *req;
+{
+ static __db_re_len_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_re_len_proc(msg->dbpcl_id,
+ msg->len,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_re_pad_reply *__db_db_re_pad_4001 __P((__db_re_pad_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_re_pad_reply *
+__db_db_re_pad_4001(msg, req)
+ __db_re_pad_msg *msg;
+ struct svc_req *req;
+{
+ static __db_re_pad_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_re_pad_proc(msg->dbpcl_id,
+ msg->pad,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_remove_reply *__db_db_remove_4001 __P((__db_remove_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_remove_reply *
+__db_db_remove_4001(msg, req)
+ __db_remove_msg *msg;
+ struct svc_req *req;
+{
+ static __db_remove_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_remove_proc(msg->dbpcl_id,
+ (*msg->name == '\0') ? NULL : msg->name,
+ (*msg->subdb == '\0') ? NULL : msg->subdb,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_rename_reply *__db_db_rename_4001 __P((__db_rename_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_rename_reply *
+__db_db_rename_4001(msg, req)
+ __db_rename_msg *msg;
+ struct svc_req *req;
+{
+ static __db_rename_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_rename_proc(msg->dbpcl_id,
+ (*msg->name == '\0') ? NULL : msg->name,
+ (*msg->subdb == '\0') ? NULL : msg->subdb,
+ (*msg->newname == '\0') ? NULL : msg->newname,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_stat_reply *__db_db_stat_4001 __P((__db_stat_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_stat_reply *
+__db_db_stat_4001(msg, req)
+ __db_stat_msg *msg;
+ struct svc_req *req;
+{
+ static __db_stat_reply reply; /* must be static */
+ static int __db_stat_free = 0; /* must be static */
+
+ COMPQUIET(req, NULL);
+ if (__db_stat_free)
+ xdr_free((xdrproc_t)xdr___db_stat_reply, (void *)&reply);
+ __db_stat_free = 0;
+
+ /* Reinitialize allocated fields */
+ reply.stats.stats_val = NULL;
+
+ __db_stat_proc(msg->dbpcl_id,
+ msg->flags,
+ &reply,
+ &__db_stat_free);
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_sync_reply *__db_db_sync_4001 __P((__db_sync_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_sync_reply *
+__db_db_sync_4001(msg, req)
+ __db_sync_msg *msg;
+ struct svc_req *req;
+{
+ static __db_sync_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_sync_proc(msg->dbpcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_truncate_reply *__db_db_truncate_4001
+ * PUBLIC: __P((__db_truncate_msg *, struct svc_req *));
+ */
+__db_truncate_reply *
+__db_db_truncate_4001(msg, req)
+ __db_truncate_msg *msg;
+ struct svc_req *req;
+{
+ static __db_truncate_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_truncate_proc(msg->dbpcl_id,
+ msg->txnpcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_cursor_reply *__db_db_cursor_4001 __P((__db_cursor_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_cursor_reply *
+__db_db_cursor_4001(msg, req)
+ __db_cursor_msg *msg;
+ struct svc_req *req;
+{
+ static __db_cursor_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_cursor_proc(msg->dbpcl_id,
+ msg->txnpcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_join_reply *__db_db_join_4001 __P((__db_join_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_join_reply *
+__db_db_join_4001(msg, req)
+ __db_join_msg *msg;
+ struct svc_req *req;
+{
+ static __db_join_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_join_proc(msg->dbpcl_id,
+ msg->curs.curs_val,
+ msg->curs.curs_len,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __dbc_close_reply *__db_dbc_close_4001 __P((__dbc_close_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__dbc_close_reply *
+__db_dbc_close_4001(msg, req)
+ __dbc_close_msg *msg;
+ struct svc_req *req;
+{
+ static __dbc_close_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __dbc_close_proc(msg->dbccl_id,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __dbc_count_reply *__db_dbc_count_4001 __P((__dbc_count_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__dbc_count_reply *
+__db_dbc_count_4001(msg, req)
+ __dbc_count_msg *msg;
+ struct svc_req *req;
+{
+ static __dbc_count_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __dbc_count_proc(msg->dbccl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __dbc_del_reply *__db_dbc_del_4001 __P((__dbc_del_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__dbc_del_reply *
+__db_dbc_del_4001(msg, req)
+ __dbc_del_msg *msg;
+ struct svc_req *req;
+{
+ static __dbc_del_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __dbc_del_proc(msg->dbccl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __dbc_dup_reply *__db_dbc_dup_4001 __P((__dbc_dup_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__dbc_dup_reply *
+__db_dbc_dup_4001(msg, req)
+ __dbc_dup_msg *msg;
+ struct svc_req *req;
+{
+ static __dbc_dup_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __dbc_dup_proc(msg->dbccl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __dbc_get_reply *__db_dbc_get_4001 __P((__dbc_get_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__dbc_get_reply *
+__db_dbc_get_4001(msg, req)
+ __dbc_get_msg *msg;
+ struct svc_req *req;
+{
+ static __dbc_get_reply reply; /* must be static */
+ static int __dbc_get_free = 0; /* must be static */
+
+ COMPQUIET(req, NULL);
+ if (__dbc_get_free)
+ xdr_free((xdrproc_t)xdr___dbc_get_reply, (void *)&reply);
+ __dbc_get_free = 0;
+
+ /* Reinitialize allocated fields */
+ reply.keydata.keydata_val = NULL;
+ reply.datadata.datadata_val = NULL;
+
+ __dbc_get_proc(msg->dbccl_id,
+ msg->keydlen,
+ msg->keydoff,
+ msg->keyulen,
+ msg->keyflags,
+ msg->keydata.keydata_val,
+ msg->keydata.keydata_len,
+ msg->datadlen,
+ msg->datadoff,
+ msg->dataulen,
+ msg->dataflags,
+ msg->datadata.datadata_val,
+ msg->datadata.datadata_len,
+ msg->flags,
+ &reply,
+ &__dbc_get_free);
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __dbc_pget_reply *__db_dbc_pget_4001 __P((__dbc_pget_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__dbc_pget_reply *
+__db_dbc_pget_4001(msg, req)
+ __dbc_pget_msg *msg;
+ struct svc_req *req;
+{
+ static __dbc_pget_reply reply; /* must be static */
+ static int __dbc_pget_free = 0; /* must be static */
+
+ COMPQUIET(req, NULL);
+ if (__dbc_pget_free)
+ xdr_free((xdrproc_t)xdr___dbc_pget_reply, (void *)&reply);
+ __dbc_pget_free = 0;
+
+ /* Reinitialize allocated fields */
+ reply.skeydata.skeydata_val = NULL;
+ reply.pkeydata.pkeydata_val = NULL;
+ reply.datadata.datadata_val = NULL;
+
+ __dbc_pget_proc(msg->dbccl_id,
+ msg->skeydlen,
+ msg->skeydoff,
+ msg->skeyulen,
+ msg->skeyflags,
+ msg->skeydata.skeydata_val,
+ msg->skeydata.skeydata_len,
+ msg->pkeydlen,
+ msg->pkeydoff,
+ msg->pkeyulen,
+ msg->pkeyflags,
+ msg->pkeydata.pkeydata_val,
+ msg->pkeydata.pkeydata_len,
+ msg->datadlen,
+ msg->datadoff,
+ msg->dataulen,
+ msg->dataflags,
+ msg->datadata.datadata_val,
+ msg->datadata.datadata_len,
+ msg->flags,
+ &reply,
+ &__dbc_pget_free);
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __dbc_put_reply *__db_dbc_put_4001 __P((__dbc_put_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__dbc_put_reply *
+__db_dbc_put_4001(msg, req)
+ __dbc_put_msg *msg;
+ struct svc_req *req;
+{
+ static __dbc_put_reply reply; /* must be static */
+ static int __dbc_put_free = 0; /* must be static */
+
+ COMPQUIET(req, NULL);
+ if (__dbc_put_free)
+ xdr_free((xdrproc_t)xdr___dbc_put_reply, (void *)&reply);
+ __dbc_put_free = 0;
+
+ /* Reinitialize allocated fields */
+ reply.keydata.keydata_val = NULL;
+
+ __dbc_put_proc(msg->dbccl_id,
+ msg->keydlen,
+ msg->keydoff,
+ msg->keyulen,
+ msg->keyflags,
+ msg->keydata.keydata_val,
+ msg->keydata.keydata_len,
+ msg->datadlen,
+ msg->datadoff,
+ msg->dataulen,
+ msg->dataflags,
+ msg->datadata.datadata_val,
+ msg->datadata.datadata_len,
+ msg->flags,
+ &reply,
+ &__dbc_put_free);
+ return (&reply);
+}
+
diff --git a/libdb/rpc_server/clsrv.html b/libdb/rpc_server/clsrv.html
new file mode 100644
index 0000000..599ad56
--- /dev/null
+++ b/libdb/rpc_server/clsrv.html
@@ -0,0 +1,453 @@
+<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+ <meta name="GENERATOR" content="Mozilla/4.76 [en] (X11; U; FreeBSD 4.3-RELEASE i386) [Netscape]">
+</head>
+<body>
+
+<center>
+<h1>
+&nbsp;Client/Server Interface for Berkeley DB</h1></center>
+
+<center><i>Susan LoVerso</i>
+<br><i>sue@sleepycat.com</i>
+<br><i>Rev 1.3</i>
+<br><i>1999 Nov 29</i></center>
+
+<p>We provide an interface allowing client/server access to Berkeley DB.&nbsp;&nbsp;
+Our goal is to provide a client and server library to allow users to separate
+the functionality of their applications yet still have access to the full
+benefits of Berkeley DB.&nbsp; The goal is to provide a totally seamless
+interface with minimal modification to existing applications as well.
+<p>The client/server interface for Berkeley DB can be broken up into several
+layers.&nbsp; At the lowest level there is the transport mechanism to send
+out the messages over the network.&nbsp; Above that layer is the messaging
+layer to interpret what comes over the wire, and bundle/unbundle message
+contents.&nbsp; The next layer is Berkeley DB itself.
+<p>The transport layer uses ONC RPC (RFC 1831) and XDR (RFC 1832).&nbsp;
+We declare our message types and operations supported by our program and
+the RPC library and utilities pretty much take care of the rest.&nbsp;
+The
+<i>rpcgen</i> program generates all of the low level code needed.&nbsp;
+We need to define both sides of the RPC.
+<br>&nbsp;
+<h2>
+<a NAME="DB Modifications"></a>DB Modifications</h2>
+To achieve the goal of a seamless interface, it is necessary to impose
+a constraint on the application. That constraint is simply that all database
+access must be done through an open environment.&nbsp; I.e. this model
+does not support standalone databases.&nbsp; The reason for this constraint
+is so that we have an environment structure internally to store our connection
+to the server.&nbsp; Imposing this constraint means that we can provide
+the seamless interface just by adding a single environment method: <a href="../docs/api_c/env_set_rpc_server.html">DBENV->set_rpc_server()</a>.
+<p>The planned interface for this method is:
+<pre>DBENV->set_rpc_server(dbenv,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; /* DB_ENV structure */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; hostname&nbsp;&nbsp;&nbsp; /* Host of server */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; cl_timeout, /* Client timeout (sec) */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; srv_timeout,/* Server timeout (sec) */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; flags);&nbsp;&nbsp;&nbsp;&nbsp; /* Flags: unused */</pre>
+This new method takes the hostname of the server, establishes our connection
+and an environment on the server.&nbsp; If a server timeout is specified,
+then we send that to the server as well (and the server may or may not
+choose to use that value).&nbsp; This timeout is how long the server will
+allow the environment to remain idle before declaring it dead and releasing
+resources on the server.&nbsp; The pointer to the connection is stored
+on the client in the DBENV structure and is used by all other methods to
+figure out with whom to communicate.&nbsp; If a client timeout is specified,
+it indicates how long the client is willing to wait for a reply from the
+server.&nbsp; If the values are 0, then defaults are used.&nbsp; Flags
+is currently unused, but exists because we always need to have a placeholder
+for flags and it would be used for specifying authentication desired (were
+we to provide an authentication scheme at some point) or other uses not
+thought of yet!
+<p>This client code is part of the monolithic DB library.&nbsp; The user
+accesses the client functions via a new flag to <a href="../docs/api_c/db_env_create.html">db_env_create()</a>.&nbsp;
+That flag is DB_CLIENT.&nbsp; By using this flag the user indicates they
+want to have the client methods rather than the standard methods for the
+environment.&nbsp; Also by issuing this flag, the user needs to connect
+to the server via the <a href="../docs/api_c/env_set_rpc_server.html">DBENV->set_rpc_server()</a>
+method.
+<p>We need two new fields in the <i>DB_ENV </i>structure.&nbsp; One is
+the socket descriptor to communicate to the server, the other field is
+the client identifier the server gives to us.&nbsp; The <i>DB, </i>and<i>
+DBC </i>only need one additional field, the client identifier.&nbsp; The
+<i>DB_TXN</i>
+structure does not need modification, we are overloading the <i>txn_id
+</i>field.
+<h2>
+Issues</h2>
+We need to figure out what to do in case of client and server crashes.&nbsp;
+Both the client library and the server program are stateful.&nbsp; They
+both consume local resources during the lifetime of the connection.&nbsp;
+Should one end drop that connection, the other side needs to release those
+resources.
+<p>If the server crashes, then the client will get an error back.&nbsp;
+I have chosen to implement time-outs on the client side, using a default
+or allowing the application to specify one through the <a href="../docs/api_c/env_set_rpc_server.html">DBENV->set_rpc_server()</a>
+method.&nbsp; Either the current operation will time-out waiting for the
+reply or the next operation called will time out (or get back some other
+kind of error regarding the server's non-existence).&nbsp; In any case,
+if the client application gets back such an error, it should abort any
+open transactions locally, close any databases, and close its environment.&nbsp;
+It may then decide to retry to connect to the server periodically or whenever
+it comes back.&nbsp; If the last operation a client did was a transaction
+commit that did not return or timed out from the server, the client cannot
+determine if the transaction was committed or not but must release the
+local transaction resources. Once the server is back up, recovery must
+be run on the server.&nbsp;&nbsp; If the transaction commit completed on
+the server before the crash, then the operation is redone, if the transaction
+commit did not get to the server, the pieces of the transaction are undone
+on recover.&nbsp; The client can then re-establish its connection and begin
+again.&nbsp; This is effectively like beginning over.&nbsp; The client
+cannot use ID's from its previous connection to the server.&nbsp; However,
+if recovery is run, then consistency is assured.
+<p>If the client crashes, the server needs to somehow figure this out.&nbsp;
+The server is just sitting there waiting for a request to come in.&nbsp;
+A server must be able to time-out a client.&nbsp; Similar to ftpd, if a
+connection is idle for N seconds, then the server decides the client is
+dead and releases that client's resources, aborting any open transactions,
+closing any open databases and environments.&nbsp;&nbsp; The server timing
+out a client is not a trivial issue however.&nbsp; The generated function
+for the server just calls <i>svc_run()</i>.&nbsp; The server code I write
+contains procedures to do specific things.&nbsp; We do not have access
+to the code calling <i>select()</i>.&nbsp; Timing out the select is not
+good enough even if we could do so.&nbsp; We want to time-out idle environments,
+not simply cause a time-out if the server is idle a while.&nbsp; See the
+discussion of the <a href="#The Server Program">server program</a> for
+a description of how we accomplish this.
+<p>Since rpcgen generates the main() function of the server, I do not yet
+know how we are going to have the server multi-threaded or multi-process
+without changing the generated code.&nbsp; The RPC book indicates that
+the only way to accomplish this is through modifying the generated code
+in the server.&nbsp; <b>For the moment we will ignore this issue while
+we get the core server working, as it is only a performance issue.</b>
+<p>We do not do any security or authentication.&nbsp; Someone could get
+the code and modify it to spoof messages, trick the server, etc.&nbsp;
+RPC has some amount of authentication built into it.&nbsp; I haven't yet
+looked into it much to know if we want to use it or just point a user at
+it.&nbsp; The changes to the client code are fairly minor, the changes
+to our server procs are fairly minor.&nbsp; We would have to add code to
+a <i>sed</i> script or <i>awk</i> script to change the generated server
+code (yet again) in the dispatch routine to perform authentication.
+<p>We will need to get an official program number from Sun.&nbsp; We can
+get this by sending mail to <i>rpc@sun.com</i> and presumably at some point
+they will send us back a program number that we will encode into our XDR
+description file.&nbsp; Until we release this we can use a program number
+in the "user defined" number space.
+<br>&nbsp;
+<h2>
+<a NAME="The Server Program"></a>The Server Program</h2>
+The server is a standalone program that the user builds and runs, probably
+as a daemon like process.&nbsp; This program is linked against the Berkeley
+DB library and the RPC library (which is part of the C library on my FreeBSD
+machine, others may have/need <i>-lrpclib</i>).&nbsp; The server basically
+is a slave to the client process.&nbsp; All messages from the client are
+synchronous and two-way.&nbsp; The server handles messages one at a time,
+and sends a reply back before getting another message.&nbsp; There are
+no asynchronous messages generated by the server to the client.
+<p>We have made a choice to modify the generated code for the server.&nbsp;
+The changes will be minimal, generally calling functions we write, that
+are in other source files.&nbsp; The first change is adding a call to our
+time-out function as described below.&nbsp; The second change is changing
+the name of the generated <i>main()</i> function to <i>__dbsrv_main()</i>,
+and adding our own <i>main()</i> function so that we can parse options,
+and set up other initialization we require.&nbsp; I have a <i>sed</i> script
+that is run from the distribution scripts that massages the generated code
+to make these minor changes.
+<p>Primarily the code needed for the server is the collection of the specified
+RPC functions.&nbsp; Each function receives the structure indicated, and
+our code takes out what it needs and passes the information into DB itself.&nbsp;
+The server needs to maintain a translation table for identifiers that we
+pass back to the client for the environment, transaction and database handles.
+<p>The table that the server maintains, assuming one client per server
+process/thread, should contain the handle to the environment, database
+or transaction, a link to maintain parent/child relationships between transactions,
+or databases and cursors, this handle's identifier, a type so that we can
+error if the client passes us a bad id for this call, and a link to this
+handle's environment entry (for time out/activity purposes).&nbsp; The
+table contains, in entries used by environments, a time-out value and an
+activity time stamp.&nbsp; Its use is described below for timing out idle
+clients.
+<p>Here is how we time out clients in the server.&nbsp; We have to modify
+the generated server code, but only to add one line during the dispatch
+function to run the time-out function.&nbsp; The call is made right before
+the return of the dispatch function, after the reply is sent to the client,
+so that client's aren't kept waiting for server bookkeeping activities.&nbsp;
+This time-out function then runs every time the server processes a request.&nbsp;
+In the time-out function we maintain a time-out hint that is the youngest
+environment to time-out.&nbsp; If the current time is less than the hint
+we know we do not need to run through the list of open handles.&nbsp; If
+the hint is expired, then we go through the list of open environment handles,
+and if they are past their expiration, then we close them and clean up.&nbsp;
+If they are not, we set up the hint for the next time.
+<p>Each entry in the open handle table has a pointer back to its environment's
+entry.&nbsp; Every operation within this environment can then update the
+single environment activity record.&nbsp; Every environment can have a
+different time-out.&nbsp; The <a href="../docs/api_c/env_set_rpc_server.html">DBENV->set_rpc_server
+</a>call
+takes a server time-out value.&nbsp; If this value is 0 then a default
+(currently 5 minutes) is used.&nbsp; This time-out value is only a hint
+to the server.&nbsp; It may choose to disregard this value or set the time-out
+based on its own implementation.
+<p>For completeness, the flaws of this time-out implementation should be
+pointed out.&nbsp; First, it is possible that a client could crash with
+open handles, and no other requests come in to the server.&nbsp; Therefore
+the time-out function never gets run and those resources are not released
+(until a request does come in).&nbsp; Similarly, this time-out is not exact.&nbsp;
+The time-out function uses its hint and if it computes a hint on one run,
+an earlier time-out might be created before that time-out expires.&nbsp;
+This issue simply yields a handle that doesn't get released until that
+original hint expires.&nbsp; To illustrate, consider that at the time that
+the time-out function is run, the youngest time-out is 5 minutes in the
+future.&nbsp; Soon after, a new environment is opened that has a time-out
+of 1 minute.&nbsp; If this environment becomes idle (and other operations
+are going on), the time-out function will not release that environment
+until the original 5 minute hint expires.&nbsp; This is not a problem since
+the resources will eventually be released.
+<p>On a similar note, if a client crashes during an RPC, our reply generates
+a SIGPIPE, and our server crashes unless we catch it.&nbsp; Using <i>signal(SIGPIPE,
+SIG_IGN) </i>we can ignore it, and the server will go on.&nbsp; This is
+a call&nbsp; in our <i>main()</i> function that we write.&nbsp; Eventually
+this client's handles would be timed out as described above.&nbsp; We need
+this only for the unfortunate window of a client crashing during the RPC.
+<p>The options below are primarily for control of the program itself,.&nbsp;
+Details relating to databases and environments should be passed from the
+client to the server, since the server can serve many clients, many environments
+and many databases.&nbsp; Therefore it makes more sense for the client
+to set the cache size of its own environment, rather than setting a default
+cachesize on the server that applies as a blanket to any environment it
+may be called upon to open.&nbsp; Options are:
+<ul>
+<li>
+<b>-t&nbsp;</b> to set the default time-out given to an environment.</li>
+
+<li>
+<b>-T</b> to set the maximum time-out allowed for the server.</li>
+
+<li>
+<b>-L</b> to log the execution of the server process to a specified file.</li>
+
+<li>
+<b>-v</b> to run in verbose mode.</li>
+
+<li>
+<b>-M</b>&nbsp; to specify the maximum number of outstanding child server
+processes/threads we can have at any given time.&nbsp; The default is 10.
+<b>[We
+are not yet doing multiple threads/processes.]</b></li>
+</ul>
+
+<h2>
+The Client Code</h2>
+The client code contains all of the supported functions and methods used
+in this model.&nbsp; There are several methods in the <i>__db_env
+</i>and
+<i>__db</i>
+structures that currently do not apply, such as the callbacks.&nbsp; Those
+fields that are not applicable to the client model point to NULL to notify
+the user of their error.&nbsp; Some method functions remain unchanged,
+as well such as the error calls.
+<p>The client code contains each method function that goes along with the
+<a href="#Remote Procedure Calls">RPC
+calls</a> described elsewhere.&nbsp; The client library also contains its
+own version of <a href="../docs/api_c/env_create.html">db_env_create()</a>,
+which does not result in any messages going over to the server (since we
+do not yet know what server we are talking to).&nbsp; This function sets
+up the pointers to the correct client functions.
+<p>All of the method functions that handle the messaging have a basic flow
+similar to this:
+<ul>
+<li>
+Local arg parsing that may be needed</li>
+
+<li>
+Marshalling the message header and the arguments we need to send to the
+server</li>
+
+<li>
+Sending the message</li>
+
+<li>
+Receiving a reply</li>
+
+<li>
+Unmarshalling the reply</li>
+
+<li>
+Local results processing that may be needed</li>
+</ul>
+
+<h2>
+Generated Code</h2>
+Almost all of the code is generated from a source file describing the interface
+and an <i>awk</i> script.&nbsp;&nbsp; This awk script generates six (6)
+files for us.&nbsp; It also modifies one.&nbsp; The files are:
+<ol>
+<li>
+Client file - The C source file created containing the client code.</li>
+
+<li>
+Client template file - The C template source file created containing interfaces
+for handling client-local issues such as resource allocation, but with
+a consistent interface with the client code generated.</li>
+
+<li>
+Server file - The C source file created containing the server code.</li>
+
+<li>
+Server template file - The C template source file created containing interfaces
+for handling server-local issues such as resource allocation, calling into
+the DB library but with a consistent interface with the server code generated.</li>
+
+<li>
+XDR file - The XDR message description file created.</li>
+
+<li>
+Server sed file - A sed script that contains commands to apply to the server
+procedure file (i.e. the real source file that the server template file
+becomes) so that minor interface changes can be consistently and easily
+applied to the real code.</li>
+
+<li>
+Server procedure file - This is the file that is modified by the sed script
+generated.&nbsp; It originated from the server template file.</li>
+</ol>
+The awk script reads a source file, <i>db_server/rpc.src </i>that describes
+each operation and what sorts of arguments it takes and what it returns
+from the server.&nbsp; The syntax of the source file describes the interface
+to that operation.&nbsp; There are four (4) parts to the syntax:
+<ol>
+<li>
+<b>BEGIN</b> <b><i>function version# codetype</i></b> - begins a new functional
+interface for the given <b><i>function</i></b>.&nbsp; Each function has
+a <b><i>version number</i></b>, currently all of them are at version number
+one (1).&nbsp; The <b><i>code type</i></b> indicates to the awk script
+what kind of code to generate.&nbsp; The choices are:</li>
+
+<ul>
+<li>
+<b>CODE </b>- Generate all code, and return a status value.&nbsp; If specified,
+the client code will simply return the status to the user upon completion
+of the RPC call.</li>
+
+<li>
+<b>RETCODE </b>- Generate all code and call a return function in the client
+template file to deal with client issues or with other returned items.&nbsp;
+If specified, the client code generated will call a function of the form
+<i>__dbcl_&lt;name>_ret()
+</i>where
+&lt;name> is replaced with the function name given here.&nbsp; This function
+is placed in the template file because this indicates that something special
+must occur on return.&nbsp; The arguments to this function are the same
+as those for the client function, with the addition of the reply message
+structure.</li>
+
+<li>
+<b>NOCLNTCODE - </b>Generate XDR and server code, but no corresponding
+client code. (This is used for functions that are not named the same thing
+on both sides.&nbsp; The only use of this at the moment is db_env_create
+and db_create.&nbsp; The environment create call to the server is actually
+called from the <a href="../docs/api_c/env_set_rpc_server.html">DBENV->set_rpc_server()</a>
+method.&nbsp; The db_create code exists elsewhere in the library and we
+modify that code for the client call.)</li>
+</ul>
+
+<li>
+<b>ARG <i>RPC-type C-type varname [list-type]</i></b>- each line of this
+describes an argument to the function.&nbsp; The argument is called <b><i>varname</i></b>.&nbsp;
+The <b><i>C-type</i></b> given is what it should look like in the C code
+generated, such as <b>DB *, u_int32_t, const char *</b>.&nbsp; The
+<b><i>RPC-type</i></b>
+is an indication about how the RPC request message should be constructed.&nbsp;
+The RPC-types allowed are described below.</li>
+
+<li>
+<b>RET <i>RPC-type C-type varname [list-type]</i></b>- each line of this
+describes what the server should return from this procedure call (in addition
+to a status, which is always returned and should not be specified).&nbsp;
+The argument is called <b><i>varname</i></b>.&nbsp; The <b><i>C-type</i></b>
+given is what it should look like in the C code generated, such as <b>DB
+*, u_int32_t, const char *</b>.&nbsp; The <b><i>RPC-type</i></b> is an
+indication about how the RPC reply message should be constructed.&nbsp;
+The RPC-types are described below.</li>
+
+<li>
+<b>END </b>- End the description of this function.&nbsp; The result is
+that when the awk script encounters the <b>END</b> tag, it now has all
+the information it needs to construct the generated code for this function.</li>
+</ol>
+The <b><i>RPC-type</i></b> must be one of the following:
+<ul>
+<li>
+<b>IGNORE </b>- This argument is not passed to the server and should be
+ignored when constructing the XDR code.&nbsp; <b>Only allowed for an ARG
+specfication.</b></li>
+
+<li>
+<b>STRING</b> - This argument is a string.</li>
+
+<li>
+<b>INT </b>- This argument is an integer of some sort.</li>
+
+<li>
+<b>DBT </b>- This argument is a DBT, resulting in its decomposition into
+the request message.</li>
+
+<li>
+<b>LIST</b> - This argument is an opaque list passed to the server (NULL-terminated).&nbsp;
+If an argument of this type is given, it must have a <b><i>list-type</i></b>
+specified that is one of:</li>
+
+<ul>
+<li>
+<b>STRING</b></li>
+
+<li>
+<b>INT</b></li>
+
+<li>
+<b>ID</b>.</li>
+</ul>
+
+<li>
+<b>ID</b> - This argument is an identifier.</li>
+</ul>
+So, for example, the source for the DB->join RPC call looks like:
+<pre>BEGIN&nbsp;&nbsp; dbjoin&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 1&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; RETCODE
+ARG&nbsp;&nbsp;&nbsp;&nbsp; ID&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; DB *&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; dbp&nbsp;
+ARG&nbsp;&nbsp;&nbsp;&nbsp; LIST&nbsp;&nbsp;&nbsp; DBC **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; curs&nbsp;&nbsp;&nbsp; ID
+ARG&nbsp;&nbsp;&nbsp;&nbsp; IGNORE&nbsp; DBC **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; dbcpp&nbsp;
+ARG&nbsp;&nbsp;&nbsp;&nbsp; INT&nbsp;&nbsp;&nbsp;&nbsp; u_int32_t&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; flags
+RET&nbsp;&nbsp;&nbsp;&nbsp; ID&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; long&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; dbcid
+END</pre>
+Our first line tells us we are writing the dbjoin function.&nbsp; It requires
+special code on the client so we indicate that with the RETCODE.&nbsp;
+This method takes four arguments.&nbsp; For the RPC request we need the
+database ID from the dbp, we construct a NULL-terminated list of IDs for
+the cursor list, we ignore the argument to return the cursor handle to
+the user, and we pass along the flags.&nbsp; On the return, the reply contains
+a status, by default, and additionally, it contains the ID of the newly
+created cursor.
+<h2>
+Building and Installing</h2>
+I need to verify with Don Anderson, but I believe we should just build
+the server program, just like we do for db_stat, db_checkpoint, etc.&nbsp;
+Basically it can be treated as a utility program from the building and
+installation perspective.
+<p>As mentioned early on, in the section on <a href="#DB Modifications">DB
+Modifications</a>, we have a single library, but allowing the user to access
+the client portion by sending a flag to <a href="../docs/api_c/env_create.html">db_env_create()</a>.&nbsp;
+The Makefile is modified to include the new files.
+<p>Testing is performed in two ways.&nbsp; First I have a new example program,
+that should become part of the example directory.&nbsp; It is basically
+a merging of ex_access.c and ex_env.c.&nbsp; This example is adequate to
+test basic functionality, as it does just does database put/get calls and
+appropriate open and close calls.&nbsp; However, in order to test the full
+set of functions a more generalized scheme is required.&nbsp; For the moment,
+I am going to modify the Tcl interface to accept the server information.&nbsp;
+Nothing else should need to change in Tcl.&nbsp; Then we can either write
+our own test modules or use a subset of the existing ones to test functionality
+on a regular basis.
+</body>
+</html>
diff --git a/libdb/rpc_server/cxx/db_server_cxxproc.cpp b/libdb/rpc_server/cxx/db_server_cxxproc.cpp
new file mode 100644
index 0000000..aa5a733
--- /dev/null
+++ b/libdb/rpc_server/cxx/db_server_cxxproc.cpp
@@ -0,0 +1,2200 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifdef HAVE_RPC
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <rpc/rpc.h>
+
+#include <string.h>
+#endif
+#include "dbinc_auto/db_server.h"
+
+#include "db_int.h"
+#include "db_cxx.h"
+
+extern "C" {
+#include "dbinc/db_server_int.h"
+#include "dbinc_auto/rpc_server_ext.h"
+}
+
+/* BEGIN __env_cachesize_proc */
+extern "C" void
+__env_cachesize_proc(
+ long dbenvcl_id,
+ u_int32_t gbytes,
+ u_int32_t bytes,
+ u_int32_t ncache,
+ __env_cachesize_reply *replyp)
+/* END __env_cachesize_proc */
+{
+ DbEnv *dbenv;
+ ct_entry *dbenv_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+
+ ret = dbenv->set_cachesize(gbytes, bytes, ncache);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_close_proc */
+extern "C" void
+__env_close_proc(
+ long dbenvcl_id,
+ u_int32_t flags,
+ __env_close_reply *replyp)
+/* END __env_close_proc */
+{
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ replyp->status = __dbenv_close_int(dbenvcl_id, flags, 0);
+ return;
+}
+
+/* BEGIN __env_create_proc */
+extern "C" void
+__env_create_proc(
+ u_int32_t timeout,
+ __env_create_reply *replyp)
+/* END __env_create_proc */
+{
+ DbEnv *dbenv;
+ ct_entry *ctp;
+
+ ctp = new_ct_ent(&replyp->status);
+ if (ctp == NULL)
+ return;
+
+ dbenv = new DbEnv(DB_CXX_NO_EXCEPTIONS);
+ ctp->ct_envp = dbenv;
+ ctp->ct_type = CT_ENV;
+ ctp->ct_parent = NULL;
+ ctp->ct_envparent = ctp;
+ __dbsrv_settimeout(ctp, timeout);
+ __dbsrv_active(ctp);
+ replyp->envcl_id = ctp->ct_id;
+
+ replyp->status = 0;
+ return;
+}
+
+/* BEGIN __env_dbremove_proc */
+extern "C" void
+__env_dbremove_proc(
+ long dbenvcl_id,
+ long txnpcl_id,
+ char *name,
+ char *subdb,
+ u_int32_t flags,
+ __env_dbremove_reply *replyp)
+/* END __env_dbremove_proc */
+{
+ int ret;
+ DbEnv *dbenv;
+ DbTxn *txnp;
+ ct_entry *dbenv_ctp, *txnp_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ ret = dbenv->dbremove(txnp, name, subdb, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_dbrename_proc */
+void
+__env_dbrename_proc(
+ long dbenvcl_id,
+ long txnpcl_id,
+ char *name,
+ char *subdb,
+ char *newname,
+ u_int32_t flags,
+ __env_dbrename_reply *replyp)
+/* END __env_dbrename_proc */
+{
+ int ret;
+ DbEnv *dbenv;
+ DbTxn *txnp;
+ ct_entry *dbenv_ctp, *txnp_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ ret = dbenv->dbrename(txnp, name, subdb, newname, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_encrypt_proc */
+extern "C" void
+__env_encrypt_proc(
+ long dbenvcl_id,
+ char *passwd,
+ u_int32_t flags,
+ __env_encrypt_reply *replyp)
+/* END __env_encrypt_proc */
+{
+ DbEnv *dbenv;
+ ct_entry *dbenv_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+
+ ret = dbenv->set_encrypt(passwd, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_flags_proc */
+extern "C" void
+__env_flags_proc(
+ long dbenvcl_id,
+ u_int32_t flags,
+ u_int32_t onoff,
+ __env_flags_reply *replyp)
+/* END __env_flags_proc */
+{
+ DbEnv *dbenv;
+ ct_entry *dbenv_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+
+ ret = dbenv->set_flags(flags, onoff);
+ if (onoff)
+ dbenv_ctp->ct_envdp.onflags = flags;
+ else
+ dbenv_ctp->ct_envdp.offflags = flags;
+
+ replyp->status = ret;
+ return;
+}
+/* BEGIN __env_open_proc */
+extern "C" void
+__env_open_proc(
+ long dbenvcl_id,
+ char *home,
+ u_int32_t flags,
+ u_int32_t mode,
+ __env_open_reply *replyp)
+/* END __env_open_proc */
+{
+ DbEnv *dbenv;
+ ct_entry *dbenv_ctp, *new_ctp;
+ u_int32_t newflags, shareflags;
+ int ret;
+ home_entry *fullhome;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+ fullhome = get_home(home);
+ if (fullhome == NULL) {
+ ret = DB_NOSERVER_HOME;
+ goto out;
+ }
+
+ /*
+ * If they are using locking do deadlock detection for them,
+ * internally.
+ */
+ if ((flags & DB_INIT_LOCK) &&
+ (ret = dbenv->set_lk_detect(DB_LOCK_DEFAULT)) != 0)
+ goto out;
+
+ if (__dbsrv_verbose) {
+ dbenv->set_errfile(stderr);
+ dbenv->set_errpfx(fullhome->home);
+ }
+
+ /*
+ * Mask off flags we ignore
+ */
+ newflags = (flags & ~DB_SERVER_FLAGMASK);
+ shareflags = (newflags & DB_SERVER_ENVFLAGS);
+ /*
+ * Check now whether we can share a handle for this env.
+ */
+ replyp->envcl_id = dbenvcl_id;
+ if ((new_ctp = __dbsrv_shareenv(dbenv_ctp, fullhome, shareflags))
+ != NULL) {
+ /*
+ * We can share, clean up old ID, set new one.
+ */
+ if (__dbsrv_verbose)
+ printf("Sharing env ID %ld\n", new_ctp->ct_id);
+ replyp->envcl_id = new_ctp->ct_id;
+ ret = __dbenv_close_int(dbenvcl_id, 0, 0);
+ } else {
+ ret = dbenv->open(fullhome->home, newflags, mode);
+ dbenv_ctp->ct_envdp.home = fullhome;
+ dbenv_ctp->ct_envdp.envflags = shareflags;
+ }
+out: replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_remove_proc */
+extern "C" void
+__env_remove_proc(
+ long dbenvcl_id,
+ char *home,
+ u_int32_t flags,
+ __env_remove_reply *replyp)
+/* END __env_remove_proc */
+{
+ DbEnv *dbenv;
+ ct_entry *dbenv_ctp;
+ int ret;
+ home_entry *fullhome;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+ fullhome = get_home(home);
+ if (fullhome == NULL) {
+ replyp->status = DB_NOSERVER_HOME;
+ return;
+ }
+
+ ret = dbenv->remove(fullhome->home, flags);
+ __dbdel_ctp(dbenv_ctp);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_abort_proc */
+extern "C" void
+__txn_abort_proc(
+ long txnpcl_id,
+ __txn_abort_reply *replyp)
+/* END __txn_abort_proc */
+{
+ DbTxn *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+
+ ret = txnp->abort();
+ __dbdel_ctp(txnp_ctp);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_begin_proc */
+extern "C" void
+__txn_begin_proc(
+ long dbenvcl_id,
+ long parentcl_id,
+ u_int32_t flags,
+ __txn_begin_reply *replyp)
+/* END __txn_begin_proc */
+{
+ DbEnv *dbenv;
+ DbTxn *parent, *txnp;
+ ct_entry *ctp, *dbenv_ctp, *parent_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+ parent_ctp = NULL;
+
+ ctp = new_ct_ent(&replyp->status);
+ if (ctp == NULL)
+ return;
+
+ if (parentcl_id != 0) {
+ ACTIVATE_CTP(parent_ctp, parentcl_id, CT_TXN);
+ parent = (DbTxn *)parent_ctp->ct_anyp;
+ ctp->ct_activep = parent_ctp->ct_activep;
+ } else
+ parent = NULL;
+
+ ret = dbenv->txn_begin(parent, &txnp, flags);
+ if (ret == 0) {
+ ctp->ct_txnp = txnp;
+ ctp->ct_type = CT_TXN;
+ ctp->ct_parent = parent_ctp;
+ ctp->ct_envparent = dbenv_ctp;
+ replyp->txnidcl_id = ctp->ct_id;
+ __dbsrv_settimeout(ctp, dbenv_ctp->ct_timeout);
+ __dbsrv_active(ctp);
+ } else
+ __dbclear_ctp(ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_commit_proc */
+extern "C" void
+__txn_commit_proc(
+ long txnpcl_id,
+ u_int32_t flags,
+ __txn_commit_reply *replyp)
+/* END __txn_commit_proc */
+{
+ DbTxn *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+
+ ret = txnp->commit(flags);
+ __dbdel_ctp(txnp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_discard_proc */
+extern "C" void
+__txn_discard_proc(
+ long txnpcl_id,
+ u_int32_t flags,
+ __txn_discard_reply *replyp)
+/* END __txn_discard_proc */
+{
+ DbTxn *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+
+ ret = txnp->discard(flags);
+ __dbdel_ctp(txnp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_prepare_proc */
+extern "C" void
+__txn_prepare_proc(
+ long txnpcl_id,
+ u_int8_t *gid,
+ __txn_prepare_reply *replyp)
+/* END __txn_prepare_proc */
+{
+ DbTxn *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+
+ ret = txnp->prepare(gid);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_recover_proc */
+extern "C" void
+__txn_recover_proc(
+ long dbenvcl_id,
+ u_int32_t count,
+ u_int32_t flags,
+ __txn_recover_reply *replyp,
+ int * freep)
+/* END __txn_recover_proc */
+{
+ DbEnv *dbenv;
+ DbPreplist *dbprep, *p;
+ ct_entry *dbenv_ctp, *ctp;
+ long erri, i, retcount;
+ u_int32_t *txnidp;
+ int ret;
+ char *gid;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+ *freep = 0;
+
+ if ((ret =
+ __os_malloc(dbenv->get_DB_ENV(), count * sizeof(DbPreplist), &dbprep)) != 0)
+ goto out;
+ if ((ret =
+ dbenv->txn_recover(dbprep, count, &retcount, flags)) != 0)
+ goto out;
+ /*
+ * If there is nothing, success, but it's easy.
+ */
+ replyp->retcount = retcount; // TODO: fix C++ txn_recover
+ if (retcount == 0) {
+ replyp->txn.txn_val = NULL;
+ replyp->txn.txn_len = 0;
+ replyp->gid.gid_val = NULL;
+ replyp->gid.gid_len = 0;
+ }
+
+ /*
+ * We have our txn list. Now we need to allocate the space for
+ * the txn ID array and the GID array and set them up.
+ */
+ if ((ret = __os_calloc(dbenv->get_DB_ENV(), retcount, sizeof(u_int32_t),
+ &replyp->txn.txn_val)) != 0)
+ goto out;
+ replyp->txn.txn_len = retcount * sizeof(u_int32_t);
+ if ((ret = __os_calloc(dbenv->get_DB_ENV(), retcount, DB_XIDDATASIZE,
+ &replyp->gid.gid_val)) != 0) {
+ __os_free(dbenv->get_DB_ENV(), replyp->txn.txn_val);
+ goto out;
+ }
+ replyp->gid.gid_len = retcount * DB_XIDDATASIZE;
+
+ /*
+ * Now walk through our results, creating parallel arrays
+ * to send back. For each entry we need to create a new
+ * txn ctp and then fill in the array info.
+ */
+ i = 0;
+ p = dbprep;
+ gid = replyp->gid.gid_val;
+ txnidp = replyp->txn.txn_val;
+ while (i++ < retcount) {
+ ctp = new_ct_ent(&ret);
+ if (ret != 0) {
+ i--;
+ goto out2;
+ }
+ ctp->ct_txnp = p->txn;
+ ctp->ct_type = CT_TXN;
+ ctp->ct_parent = NULL;
+ ctp->ct_envparent = dbenv_ctp;
+ __dbsrv_settimeout(ctp, dbenv_ctp->ct_timeout);
+ __dbsrv_active(ctp);
+
+ *txnidp = ctp->ct_id;
+ memcpy(gid, p->gid, DB_XIDDATASIZE);
+
+ p++;
+ txnidp++;
+ gid += DB_XIDDATASIZE;
+ }
+ /*
+ * If we get here, we have success and we have to set freep
+ * so it'll get properly freed next time.
+ */
+ *freep = 1;
+out:
+ if (dbprep != NULL)
+ __os_free(dbenv->get_DB_ENV(), dbprep);
+ replyp->status = ret;
+ return;
+out2:
+ /*
+ * We had an error in the middle of creating our new txn
+ * ct entries. We have to unwind all that we have done. Ugh.
+ */
+ for (txnidp = replyp->txn.txn_val, erri = 0;
+ erri < i; erri++, txnidp++) {
+ ctp = get_tableent(*txnidp);
+ __dbclear_ctp(ctp);
+ }
+ __os_free(dbenv->get_DB_ENV(), replyp->txn.txn_val);
+ __os_free(dbenv->get_DB_ENV(), replyp->gid.gid_val);
+ __os_free(dbenv->get_DB_ENV(), dbprep);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_bt_maxkey_proc */
+extern "C" void
+__db_bt_maxkey_proc(
+ long dbpcl_id,
+ u_int32_t maxkey,
+ __db_bt_maxkey_reply *replyp)
+/* END __db_bt_maxkey_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_bt_maxkey(maxkey);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_associate_proc */
+extern "C" void
+__db_associate_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ long sdbpcl_id,
+ u_int32_t flags,
+ __db_associate_reply *replyp)
+/* END __db_associate_proc */
+{
+ Db *dbp, *sdbp;
+ DbTxn *txnp;
+ ct_entry *dbp_ctp, *sdbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ ACTIVATE_CTP(sdbp_ctp, sdbpcl_id, CT_DB);
+ sdbp = (Db *)sdbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ /*
+ * We do not support DB_CREATE for associate. Users
+ * can only access secondary indices on a read-only basis,
+ * so whatever they are looking for needs to be there already.
+ */
+ if (flags != 0)
+ ret = EINVAL;
+ else
+ ret = dbp->associate(txnp, sdbp, NULL, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_bt_minkey_proc */
+extern "C" void
+__db_bt_minkey_proc(
+ long dbpcl_id,
+ u_int32_t minkey,
+ __db_bt_minkey_reply *replyp)
+/* END __db_bt_minkey_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_bt_minkey(minkey);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_close_proc */
+extern "C" void
+__db_close_proc(
+ long dbpcl_id,
+ u_int32_t flags,
+ __db_close_reply *replyp)
+/* END __db_close_proc */
+{
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ replyp->status = __db_close_int(dbpcl_id, flags);
+ return;
+}
+
+/* BEGIN __db_create_proc */
+extern "C" void
+__db_create_proc(
+ long dbenvcl_id,
+ u_int32_t flags,
+ __db_create_reply *replyp)
+/* END __db_create_proc */
+{
+ Db *dbp;
+ DbEnv *dbenv;
+ ct_entry *dbenv_ctp, *dbp_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+
+ dbp_ctp = new_ct_ent(&replyp->status);
+ if (dbp_ctp == NULL)
+ return ;
+ /*
+ * We actually require env's for databases. The client should
+ * have caught it, but just in case.
+ */
+ DB_ASSERT(dbenv != NULL);
+ dbp = new Db(dbenv, flags);
+ dbp_ctp->ct_dbp = dbp;
+ dbp_ctp->ct_type = CT_DB;
+ dbp_ctp->ct_parent = dbenv_ctp;
+ dbp_ctp->ct_envparent = dbenv_ctp;
+ replyp->dbcl_id = dbp_ctp->ct_id;
+ replyp->status = 0;
+ return;
+}
+
+/* BEGIN __db_del_proc */
+extern "C" void
+__db_del_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ u_int32_t keydlen,
+ u_int32_t keydoff,
+ u_int32_t keyulen,
+ u_int32_t keyflags,
+ void *keydata,
+ u_int32_t keysize,
+ u_int32_t flags,
+ __db_del_reply *replyp)
+/* END __db_del_proc */
+{
+ Db *dbp;
+ DbTxn *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ /* Set up key */
+ Dbt key(keydata, keysize);
+ key.set_dlen(keydlen);
+ key.set_ulen(keyulen);
+ key.set_doff(keydoff);
+ key.set_flags(keyflags);
+
+ ret = dbp->del(txnp, &key, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_encrypt_proc */
+extern "C" void
+__db_encrypt_proc(
+ long dbpcl_id,
+ char *passwd,
+ u_int32_t flags,
+ __db_encrypt_reply *replyp)
+/* END __db_encrypt_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_encrypt(passwd, flags);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_extentsize_proc */
+extern "C" void
+__db_extentsize_proc(
+ long dbpcl_id,
+ u_int32_t extentsize,
+ __db_extentsize_reply *replyp)
+/* END __db_extentsize_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_q_extentsize(extentsize);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_flags_proc */
+extern "C" void
+__db_flags_proc(
+ long dbpcl_id,
+ u_int32_t flags,
+ __db_flags_reply *replyp)
+/* END __db_flags_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_flags(flags);
+ dbp_ctp->ct_dbdp.setflags = flags;
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_get_proc */
+extern "C" void
+__db_get_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ u_int32_t keydlen,
+ u_int32_t keydoff,
+ u_int32_t keyulen,
+ u_int32_t keyflags,
+ void *keydata,
+ u_int32_t keysize,
+ u_int32_t datadlen,
+ u_int32_t datadoff,
+ u_int32_t dataulen,
+ u_int32_t dataflags,
+ void *datadata,
+ u_int32_t datasize,
+ u_int32_t flags,
+ __db_get_reply *replyp,
+ int * freep)
+/* END __db_get_proc */
+{
+ Db *dbp;
+ DbTxn *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int key_alloc, bulk_alloc, ret;
+ void *tmpdata;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ *freep = 0;
+ bulk_alloc = 0;
+
+ /* Set up key and data */
+ Dbt key(keydata, keysize);
+ key.set_dlen(keydlen);
+ key.set_ulen(keyulen);
+ key.set_doff(keydoff);
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.set_flags(DB_DBT_MALLOC | (keyflags & DB_DBT_PARTIAL));
+
+ Dbt data(datadata, datasize);
+ data.set_dlen(datadlen);
+ data.set_ulen(dataulen);
+ data.set_doff(datadoff);
+ /*
+ * Ignore memory related flags on server.
+ */
+ dataflags &= DB_DBT_PARTIAL;
+ if (flags & DB_MULTIPLE) {
+ if (data.get_data() == 0) {
+ ret = __os_umalloc(dbp->get_DB()->dbenv,
+ dataulen, &tmpdata);
+ if (ret != 0)
+ goto err;
+ data.set_data(tmpdata);
+ bulk_alloc = 1;
+ }
+ dataflags |= DB_DBT_USERMEM;
+ } else
+ dataflags |= DB_DBT_MALLOC;
+ data.set_flags(dataflags);
+
+ /* Got all our stuff, now do the get */
+ ret = dbp->get(txnp, &key, &data, flags);
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (key.get_data() == keydata) {
+ ret = __os_umalloc(dbp->get_DB()->dbenv,
+ key.get_size(), &replyp->keydata.keydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->get_DB()->dbenv, key.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, data.get_data());
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->keydata.keydata_val, key.get_data(), key.get_size());
+ } else
+ replyp->keydata.keydata_val = (char *)key.get_data();
+
+ replyp->keydata.keydata_len = key.get_size();
+
+ /*
+ * Data
+ */
+ if (data.get_data() == datadata) {
+ ret = __os_umalloc(dbp->get_DB()->dbenv,
+ data.get_size(), &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->get_DB()->dbenv, key.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, data.get_data());
+ if (key_alloc)
+ __os_ufree(dbp->get_DB()->dbenv,
+ replyp->keydata.keydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.get_data(),
+ data.get_size());
+ } else
+ replyp->datadata.datadata_val = (char *)data.get_data();
+ replyp->datadata.datadata_len = data.get_size();
+ } else {
+err: replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ if (bulk_alloc)
+ __os_ufree(dbp->get_DB()->dbenv, data.get_data());
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_h_ffactor_proc */
+extern "C" void
+__db_h_ffactor_proc(
+ long dbpcl_id,
+ u_int32_t ffactor,
+ __db_h_ffactor_reply *replyp)
+/* END __db_h_ffactor_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_h_ffactor(ffactor);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_h_nelem_proc */
+extern "C" void
+__db_h_nelem_proc(
+ long dbpcl_id,
+ u_int32_t nelem,
+ __db_h_nelem_reply *replyp)
+/* END __db_h_nelem_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_h_nelem(nelem);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_key_range_proc */
+extern "C" void
+__db_key_range_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ u_int32_t keydlen,
+ u_int32_t keydoff,
+ u_int32_t keyulen,
+ u_int32_t keyflags,
+ void *keydata,
+ u_int32_t keysize,
+ u_int32_t flags,
+ __db_key_range_reply *replyp)
+/* END __db_key_range_proc */
+{
+ Db *dbp;
+ DB_KEY_RANGE range;
+ DbTxn *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ /* Set up key */
+ Dbt key(keydata, keysize);
+ key.set_dlen(keydlen);
+ key.set_ulen(keyulen);
+ key.set_doff(keydoff);
+ key.set_flags(keyflags);
+
+ ret = dbp->key_range(txnp, &key, &range, flags);
+
+ replyp->status = ret;
+ replyp->less = range.less;
+ replyp->equal = range.equal;
+ replyp->greater = range.greater;
+ return;
+}
+
+/* BEGIN __db_lorder_proc */
+extern "C" void
+__db_lorder_proc(
+ long dbpcl_id,
+ u_int32_t lorder,
+ __db_lorder_reply *replyp)
+/* END __db_lorder_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_lorder(lorder);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_open_proc */
+extern "C" void
+__db_open_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ char *name,
+ char *subdb,
+ u_int32_t type,
+ u_int32_t flags,
+ u_int32_t mode,
+ __db_open_reply *replyp)
+/* END __db_open_proc */
+{
+ Db *dbp;
+ DbTxn *txnp;
+ DBTYPE dbtype;
+ ct_entry *dbp_ctp, *new_ctp, *txnp_ctp;
+ int isswapped, ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ replyp->dbcl_id = dbpcl_id;
+ if ((new_ctp = __dbsrv_sharedb(dbp_ctp, name, subdb, (DBTYPE)type, flags))
+ != NULL) {
+ /*
+ * We can share, clean up old ID, set new one.
+ */
+ if (__dbsrv_verbose)
+ printf("Sharing db ID %ld\n", new_ctp->ct_id);
+ replyp->dbcl_id = new_ctp->ct_id;
+ ret = __db_close_int(dbpcl_id, 0);
+ goto out;
+ }
+ ret = dbp->open(txnp, name, subdb, (DBTYPE)type, flags, mode);
+ if (ret == 0) {
+ (void)dbp->get_type(&dbtype);
+ replyp->type = dbtype;
+ /* XXX
+ * Tcl needs to peek at dbp->flags for DB_AM_DUP. Send
+ * this dbp's flags back.
+ */
+ replyp->dbflags = (int) dbp->get_DB()->flags;
+ /*
+ * We need to determine the byte order of the database
+ * and send it back to the client. Determine it by
+ * the server's native order and the swapped value of
+ * the DB itself.
+ */
+ (void)dbp->get_byteswapped(&isswapped);
+ if (__db_byteorder(NULL, 1234) == 0) {
+ if (isswapped == 0)
+ replyp->lorder = 1234;
+ else
+ replyp->lorder = 4321;
+ } else {
+ if (isswapped == 0)
+ replyp->lorder = 4321;
+ else
+ replyp->lorder = 1234;
+ }
+ dbp_ctp->ct_dbdp.type = dbtype;
+ dbp_ctp->ct_dbdp.dbflags = LF_ISSET(DB_SERVER_DBFLAGS);
+ if (name == NULL)
+ dbp_ctp->ct_dbdp.db = NULL;
+ else if ((ret = __os_strdup(dbp->get_DB()->dbenv, name,
+ &dbp_ctp->ct_dbdp.db)) != 0)
+ goto out;
+ if (subdb == NULL)
+ dbp_ctp->ct_dbdp.subdb = NULL;
+ else if ((ret = __os_strdup(dbp->get_DB()->dbenv, subdb,
+ &dbp_ctp->ct_dbdp.subdb)) != 0)
+ goto out;
+ }
+out:
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_pagesize_proc */
+extern "C" void
+__db_pagesize_proc(
+ long dbpcl_id,
+ u_int32_t pagesize,
+ __db_pagesize_reply *replyp)
+/* END __db_pagesize_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_pagesize(pagesize);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_pget_proc */
+extern "C" void
+__db_pget_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ u_int32_t skeydlen,
+ u_int32_t skeydoff,
+ u_int32_t skeyulen,
+ u_int32_t skeyflags,
+ void *skeydata,
+ u_int32_t skeysize,
+ u_int32_t pkeydlen,
+ u_int32_t pkeydoff,
+ u_int32_t pkeyulen,
+ u_int32_t pkeyflags,
+ void *pkeydata,
+ u_int32_t pkeysize,
+ u_int32_t datadlen,
+ u_int32_t datadoff,
+ u_int32_t dataulen,
+ u_int32_t dataflags,
+ void *datadata,
+ u_int32_t datasize,
+ u_int32_t flags,
+ __db_pget_reply *replyp,
+ int * freep)
+/* END __db_pget_proc */
+{
+ Db *dbp;
+ DbTxn *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int key_alloc, ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ *freep = 0;
+
+ /*
+ * Ignore memory related flags on server.
+ */
+ /* Set up key and data */
+ Dbt skey(skeydata, skeysize);
+ skey.set_dlen(skeydlen);
+ skey.set_ulen(skeyulen);
+ skey.set_doff(skeydoff);
+ skey.set_flags(DB_DBT_MALLOC | (skeyflags & DB_DBT_PARTIAL));
+
+ Dbt pkey(pkeydata, pkeysize);
+ pkey.set_dlen(pkeydlen);
+ pkey.set_ulen(pkeyulen);
+ pkey.set_doff(pkeydoff);
+ pkey.set_flags(DB_DBT_MALLOC | (pkeyflags & DB_DBT_PARTIAL));
+
+ Dbt data(datadata, datasize);
+ data.set_dlen(datadlen);
+ data.set_ulen(dataulen);
+ data.set_doff(datadoff);
+ data.set_flags(DB_DBT_MALLOC | (dataflags & DB_DBT_PARTIAL));
+
+ /* Got all our stuff, now do the get */
+ ret = dbp->pget(txnp, &skey, &pkey, &data, flags);
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (skey.get_data() == skeydata) {
+ ret = __os_umalloc(dbp->get_DB()->dbenv,
+ skey.get_size(), &replyp->skeydata.skeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->get_DB()->dbenv, skey.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, pkey.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, data.get_data());
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->skeydata.skeydata_val, skey.get_data(),
+ skey.get_size());
+ } else
+ replyp->skeydata.skeydata_val = (char *)skey.get_data();
+
+ replyp->skeydata.skeydata_len = skey.get_size();
+
+ /*
+ * Primary key
+ */
+ if (pkey.get_data() == pkeydata) {
+ ret = __os_umalloc(dbp->get_DB()->dbenv,
+ pkey.get_size(), &replyp->pkeydata.pkeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->get_DB()->dbenv, skey.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, pkey.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, data.get_data());
+ if (key_alloc)
+ __os_ufree(dbp->get_DB()->dbenv,
+ replyp->skeydata.skeydata_val);
+ goto err;
+ }
+ /*
+ * We can set it to 2, because they cannot send the
+ * pkey over without sending the skey over too.
+ * So if they did send a pkey, they must have sent
+ * the skey as well.
+ */
+ key_alloc = 2;
+ memcpy(replyp->pkeydata.pkeydata_val, pkey.get_data(),
+ pkey.get_size());
+ } else
+ replyp->pkeydata.pkeydata_val = (char *)pkey.get_data();
+ replyp->pkeydata.pkeydata_len = pkey.get_size();
+
+ /*
+ * Data
+ */
+ if (data.get_data() == datadata) {
+ ret = __os_umalloc(dbp->get_DB()->dbenv,
+ data.get_size(), &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->get_DB()->dbenv, skey.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, pkey.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, data.get_data());
+ /*
+ * If key_alloc is 1, just skey needs to be
+ * freed, if key_alloc is 2, both skey and pkey
+ * need to be freed.
+ */
+ if (key_alloc--)
+ __os_ufree(dbp->get_DB()->dbenv,
+ replyp->skeydata.skeydata_val);
+ if (key_alloc)
+ __os_ufree(dbp->get_DB()->dbenv,
+ replyp->pkeydata.pkeydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.get_data(),
+ data.get_size());
+ } else
+ replyp->datadata.datadata_val = (char *)data.get_data();
+ replyp->datadata.datadata_len = data.get_size();
+ } else {
+err: replyp->skeydata.skeydata_val = NULL;
+ replyp->skeydata.skeydata_len = 0;
+ replyp->pkeydata.pkeydata_val = NULL;
+ replyp->pkeydata.pkeydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_put_proc */
+extern "C" void
+__db_put_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ u_int32_t keydlen,
+ u_int32_t keydoff,
+ u_int32_t keyulen,
+ u_int32_t keyflags,
+ void *keydata,
+ u_int32_t keysize,
+ u_int32_t datadlen,
+ u_int32_t datadoff,
+ u_int32_t dataulen,
+ u_int32_t dataflags,
+ void *datadata,
+ u_int32_t datasize,
+ u_int32_t flags,
+ __db_put_reply *replyp,
+ int * freep)
+/* END __db_put_proc */
+{
+ Db *dbp;
+ DbTxn *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ *freep = 0;
+
+ /* Set up key and data */
+ Dbt key(keydata, keysize);
+ key.set_dlen(keydlen);
+ key.set_ulen(keyulen);
+ key.set_doff(keydoff);
+ key.set_flags(DB_DBT_MALLOC | (keyflags & DB_DBT_PARTIAL));
+
+ Dbt data(datadata, datasize);
+ data.set_dlen(datadlen);
+ data.set_ulen(dataulen);
+ data.set_doff(datadoff);
+ data.set_flags(dataflags);
+
+ /* Got all our stuff, now do the put */
+ ret = dbp->put(txnp, &key, &data, flags);
+ /*
+ * If the client did a DB_APPEND, set up key in reply.
+ * Otherwise just status.
+ */
+ if (ret == 0 && (flags == DB_APPEND)) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ if (key.get_data() == keydata) {
+ ret = __os_umalloc(dbp->get_DB()->dbenv,
+ key.get_size(), &replyp->keydata.keydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->get_DB()->dbenv, key.get_data());
+ goto err;
+ }
+ memcpy(replyp->keydata.keydata_val, key.get_data(), key.get_size());
+ } else
+ replyp->keydata.keydata_val = (char *)key.get_data();
+
+ replyp->keydata.keydata_len = key.get_size();
+ } else {
+err: replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ *freep = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_delim_proc */
+extern "C" void
+__db_re_delim_proc(
+ long dbpcl_id,
+ u_int32_t delim,
+ __db_re_delim_reply *replyp)
+/* END __db_re_delim_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_re_delim(delim);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_len_proc */
+extern "C" void
+__db_re_len_proc(
+ long dbpcl_id,
+ u_int32_t len,
+ __db_re_len_reply *replyp)
+/* END __db_re_len_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_re_len(len);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_pad_proc */
+extern "C" void
+__db_re_pad_proc(
+ long dbpcl_id,
+ u_int32_t pad,
+ __db_re_pad_reply *replyp)
+/* END __db_re_pad_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_re_pad(pad);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_remove_proc */
+extern "C" void
+__db_remove_proc(
+ long dbpcl_id,
+ char *name,
+ char *subdb,
+ u_int32_t flags,
+ __db_remove_reply *replyp)
+/* END __db_remove_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->remove(name, subdb, flags);
+ __dbdel_ctp(dbp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_rename_proc */
+extern "C" void
+__db_rename_proc(
+ long dbpcl_id,
+ char *name,
+ char *subdb,
+ char *newname,
+ u_int32_t flags,
+ __db_rename_reply *replyp)
+/* END __db_rename_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->rename(name, subdb, newname, flags);
+ __dbdel_ctp(dbp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_stat_proc */
+extern "C" void
+__db_stat_proc(
+ long dbpcl_id,
+ u_int32_t flags,
+ __db_stat_reply *replyp,
+ int * freep)
+/* END __db_stat_proc */
+{
+ Db *dbp;
+ DBTYPE type;
+ ct_entry *dbp_ctp;
+ u_int32_t *q, *p, *retsp;
+ int i, len, ret;
+ void *sp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->stat(&sp, flags);
+ replyp->status = ret;
+ if (ret != 0)
+ return;
+ /*
+ * We get here, we have success. Allocate an array so that
+ * we can use the list generator. Generate the reply, free
+ * up the space.
+ */
+ /*
+ * XXX This assumes that all elements of all stat structures
+ * are u_int32_t fields. They are, currently.
+ */
+ (void)dbp->get_type(&type);
+ if (type == DB_HASH)
+ len = sizeof(DB_HASH_STAT);
+ else if (type == DB_QUEUE)
+ len = sizeof(DB_QUEUE_STAT);
+ else /* BTREE or RECNO are same stats */
+ len = sizeof(DB_BTREE_STAT);
+ replyp->stats.stats_len = len / sizeof(u_int32_t);
+
+ if ((ret = __os_umalloc(dbp->get_DB()->dbenv,
+ len * replyp->stats.stats_len, &retsp)) != 0)
+ goto out;
+ for (i = 0, q = retsp, p = (u_int32_t *)sp; i < len;
+ i++, q++, p++)
+ *q = *p;
+ replyp->stats.stats_val = retsp;
+ __os_ufree(dbp->get_DB()->dbenv, sp);
+ if (ret == 0)
+ *freep = 1;
+out:
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_sync_proc */
+extern "C" void
+__db_sync_proc(
+ long dbpcl_id,
+ u_int32_t flags,
+ __db_sync_reply *replyp)
+/* END __db_sync_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->sync(flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_truncate_proc */
+extern "C" void
+__db_truncate_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ u_int32_t flags,
+ __db_truncate_reply *replyp)
+/* END __db_truncate_proc */
+{
+ Db *dbp;
+ DbTxn *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ u_int32_t count;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ ret = dbp->truncate(txnp, &count, flags);
+ replyp->status = ret;
+ if (ret == 0)
+ replyp->count = count;
+ return;
+}
+
+/* BEGIN __db_cursor_proc */
+extern "C" void
+__db_cursor_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ u_int32_t flags,
+ __db_cursor_reply *replyp)
+/* END __db_cursor_proc */
+{
+ Db *dbp;
+ Dbc *dbc;
+ DbTxn *txnp;
+ ct_entry *dbc_ctp, *env_ctp, *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ dbc_ctp = new_ct_ent(&replyp->status);
+ if (dbc_ctp == NULL)
+ return;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ dbc_ctp->ct_activep = txnp_ctp->ct_activep;
+ } else
+ txnp = NULL;
+
+ if ((ret = dbp->cursor(txnp, &dbc, flags)) == 0) {
+ dbc_ctp->ct_dbc = dbc;
+ dbc_ctp->ct_type = CT_CURSOR;
+ dbc_ctp->ct_parent = dbp_ctp;
+ env_ctp = dbp_ctp->ct_envparent;
+ dbc_ctp->ct_envparent = env_ctp;
+ __dbsrv_settimeout(dbc_ctp, env_ctp->ct_timeout);
+ __dbsrv_active(dbc_ctp);
+ replyp->dbcidcl_id = dbc_ctp->ct_id;
+ } else
+ __dbclear_ctp(dbc_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_join_proc */
+extern "C" void
+__db_join_proc(
+ long dbpcl_id,
+ u_int32_t *curs,
+ u_int32_t curslen,
+ u_int32_t flags,
+ __db_join_reply *replyp)
+/* END __db_join_proc */
+{
+ Db *dbp;
+ Dbc **jcurs, **c;
+ Dbc *dbc;
+ ct_entry *dbc_ctp, *ctp, *dbp_ctp;
+ size_t size;
+ u_int32_t *cl, i;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ dbc_ctp = new_ct_ent(&replyp->status);
+ if (dbc_ctp == NULL)
+ return;
+
+ size = (curslen + 1) * sizeof(Dbc *);
+ if ((ret = __os_calloc(dbp->get_DB()->dbenv,
+ curslen + 1, sizeof(Dbc *), &jcurs)) != 0) {
+ replyp->status = ret;
+ __dbclear_ctp(dbc_ctp);
+ return;
+ }
+ /*
+ * If our curslist has a parent txn, we need to use it too
+ * for the activity timeout. All cursors must be part of
+ * the same transaction, so just check the first.
+ */
+ ctp = get_tableent(*curs);
+ DB_ASSERT(ctp->ct_type == CT_CURSOR);
+ /*
+ * If we are using a transaction, set the join activity timer
+ * to point to the parent transaction.
+ */
+ if (ctp->ct_activep != &ctp->ct_active)
+ dbc_ctp->ct_activep = ctp->ct_activep;
+ for (i = 0, cl = curs, c = jcurs; i < curslen; i++, cl++, c++) {
+ ctp = get_tableent(*cl);
+ if (ctp == NULL) {
+ replyp->status = DB_NOSERVER_ID;
+ goto out;
+ }
+ /*
+ * If we are using a txn, the join cursor points to the
+ * transaction timeout. If we are not using a transaction,
+ * then all the curslist cursors must point to the join
+ * cursor's timeout so that we do not timeout any of the
+ * curlist cursors while the join cursor is active.
+ * Change the type of the curslist ctps to CT_JOIN so that
+ * we know they are part of a join list and we can distinguish
+ * them and later restore them when the join cursor is closed.
+ */
+ DB_ASSERT(ctp->ct_type == CT_CURSOR);
+ ctp->ct_type |= CT_JOIN;
+ ctp->ct_origp = ctp->ct_activep;
+ /*
+ * Setting this to the ct_active field of the dbc_ctp is
+ * really just a way to distinguish which join dbc this
+ * cursor is part of. The ct_activep of this cursor is
+ * not used at all during its lifetime as part of a join
+ * cursor.
+ */
+ ctp->ct_activep = &dbc_ctp->ct_active;
+ *c = ctp->ct_dbc;
+ }
+ *c = NULL;
+ if ((ret = dbp->join(jcurs, &dbc, flags)) == 0) {
+ dbc_ctp->ct_dbc = dbc;
+ dbc_ctp->ct_type = (CT_JOINCUR | CT_CURSOR);
+ dbc_ctp->ct_parent = dbp_ctp;
+ dbc_ctp->ct_envparent = dbp_ctp->ct_envparent;
+ __dbsrv_settimeout(dbc_ctp, dbp_ctp->ct_envparent->ct_timeout);
+ __dbsrv_active(dbc_ctp);
+ replyp->dbcidcl_id = dbc_ctp->ct_id;
+ } else {
+ __dbclear_ctp(dbc_ctp);
+ /*
+ * If we get an error, undo what we did above to any cursors.
+ */
+ for (cl = curs; *cl != 0; cl++) {
+ ctp = get_tableent(*cl);
+ ctp->ct_type = CT_CURSOR;
+ ctp->ct_activep = ctp->ct_origp;
+ }
+ }
+
+ replyp->status = ret;
+out:
+ __os_free(dbp->get_DB()->dbenv, jcurs);
+ return;
+}
+
+/* BEGIN __dbc_close_proc */
+extern "C" void
+__dbc_close_proc(
+ long dbccl_id,
+ __dbc_close_reply *replyp)
+/* END __dbc_close_proc */
+{
+ ct_entry *dbc_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ replyp->status = __dbc_close_int(dbc_ctp);
+ return;
+}
+
+/* BEGIN __dbc_count_proc */
+extern "C" void
+__dbc_count_proc(
+ long dbccl_id,
+ u_int32_t flags,
+ __dbc_count_reply *replyp)
+/* END __dbc_count_proc */
+{
+ Dbc *dbc;
+ ct_entry *dbc_ctp;
+ db_recno_t num;
+ int ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (Dbc *)dbc_ctp->ct_anyp;
+
+ ret = dbc->count(&num, flags);
+ replyp->status = ret;
+ if (ret == 0)
+ replyp->dupcount = num;
+ return;
+}
+
+/* BEGIN __dbc_del_proc */
+extern "C" void
+__dbc_del_proc(
+ long dbccl_id,
+ u_int32_t flags,
+ __dbc_del_reply *replyp)
+/* END __dbc_del_proc */
+{
+ Dbc *dbc;
+ ct_entry *dbc_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (Dbc *)dbc_ctp->ct_anyp;
+
+ ret = dbc->del(flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_dup_proc */
+extern "C" void
+__dbc_dup_proc(
+ long dbccl_id,
+ u_int32_t flags,
+ __dbc_dup_reply *replyp)
+/* END __dbc_dup_proc */
+{
+ Dbc *dbc, *newdbc;
+ ct_entry *dbc_ctp, *new_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (Dbc *)dbc_ctp->ct_anyp;
+
+ new_ctp = new_ct_ent(&replyp->status);
+ if (new_ctp == NULL)
+ return;
+
+ if ((ret = dbc->dup(&newdbc, flags)) == 0) {
+ new_ctp->ct_dbc = newdbc;
+ new_ctp->ct_type = CT_CURSOR;
+ new_ctp->ct_parent = dbc_ctp->ct_parent;
+ new_ctp->ct_envparent = dbc_ctp->ct_envparent;
+ /*
+ * If our cursor has a parent txn, we need to use it too.
+ */
+ if (dbc_ctp->ct_activep != &dbc_ctp->ct_active)
+ new_ctp->ct_activep = dbc_ctp->ct_activep;
+ __dbsrv_settimeout(new_ctp, dbc_ctp->ct_timeout);
+ __dbsrv_active(new_ctp);
+ replyp->dbcidcl_id = new_ctp->ct_id;
+ } else
+ __dbclear_ctp(new_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_get_proc */
+extern "C" void
+__dbc_get_proc(
+ long dbccl_id,
+ u_int32_t keydlen,
+ u_int32_t keydoff,
+ u_int32_t keyulen,
+ u_int32_t keyflags,
+ void *keydata,
+ u_int32_t keysize,
+ u_int32_t datadlen,
+ u_int32_t datadoff,
+ u_int32_t dataulen,
+ u_int32_t dataflags,
+ void *datadata,
+ u_int32_t datasize,
+ u_int32_t flags,
+ __dbc_get_reply *replyp,
+ int * freep)
+/* END __dbc_get_proc */
+{
+ Dbc *dbc;
+ DbEnv *dbenv;
+ ct_entry *dbc_ctp;
+ int key_alloc, bulk_alloc, ret;
+ void *tmpdata;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (Dbc *)dbc_ctp->ct_anyp;
+ dbenv = DbEnv::get_DbEnv(((DBC *)dbc)->dbp->dbenv);
+
+ *freep = 0;
+ bulk_alloc = 0;
+
+ /* Set up key and data */
+ Dbt key(keydata, keysize);
+ key.set_dlen(keydlen);
+ key.set_ulen(keyulen);
+ key.set_doff(keydoff);
+ key.set_flags(DB_DBT_MALLOC | (keyflags & DB_DBT_PARTIAL));
+
+ Dbt data(datadata, datasize);
+ data.set_dlen(datadlen);
+ data.set_ulen(dataulen);
+ data.set_doff(datadoff);
+ dataflags &= DB_DBT_PARTIAL;
+ if (flags & DB_MULTIPLE || flags & DB_MULTIPLE_KEY) {
+ if (data.get_data() == NULL) {
+ ret = __os_umalloc(dbenv->get_DB_ENV(),
+ data.get_ulen(), &tmpdata);
+ if (ret != 0)
+ goto err;
+ data.set_data(tmpdata);
+ bulk_alloc = 1;
+ }
+ dataflags |= DB_DBT_USERMEM;
+ } else
+ dataflags |= DB_DBT_MALLOC;
+ data.set_flags(dataflags);
+
+ /* Got all our stuff, now do the get */
+ ret = dbc->get(&key, &data, flags);
+
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (key.get_data() == keydata) {
+ ret = __os_umalloc(dbenv->get_DB_ENV(), key.get_size(),
+ &replyp->keydata.keydata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv->get_DB_ENV(), key.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), data.get_data());
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->keydata.keydata_val, key.get_data(), key.get_size());
+ } else
+ replyp->keydata.keydata_val = (char *)key.get_data();
+
+ replyp->keydata.keydata_len = key.get_size();
+
+ /*
+ * Data
+ */
+ if (data.get_data() == datadata) {
+ ret = __os_umalloc(dbenv->get_DB_ENV(), data.get_size(),
+ &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv->get_DB_ENV(), key.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), data.get_data());
+ if (key_alloc)
+ __os_ufree(dbenv->get_DB_ENV(),
+ replyp->keydata.keydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.get_data(),
+ data.get_size());
+ } else
+ replyp->datadata.datadata_val = (char *)data.get_data();
+ replyp->datadata.datadata_len = data.get_size();
+ } else {
+err: replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ if (bulk_alloc)
+ __os_ufree(dbenv->get_DB_ENV(), data.get_data());
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_pget_proc */
+extern "C" void
+__dbc_pget_proc(
+ long dbccl_id,
+ u_int32_t skeydlen,
+ u_int32_t skeydoff,
+ u_int32_t skeyulen,
+ u_int32_t skeyflags,
+ void *skeydata,
+ u_int32_t skeysize,
+ u_int32_t pkeydlen,
+ u_int32_t pkeydoff,
+ u_int32_t pkeyulen,
+ u_int32_t pkeyflags,
+ void *pkeydata,
+ u_int32_t pkeysize,
+ u_int32_t datadlen,
+ u_int32_t datadoff,
+ u_int32_t dataulen,
+ u_int32_t dataflags,
+ void *datadata,
+ u_int32_t datasize,
+ u_int32_t flags,
+ __dbc_pget_reply *replyp,
+ int * freep)
+/* END __dbc_pget_proc */
+{
+ Dbc *dbc;
+ DbEnv *dbenv;
+ ct_entry *dbc_ctp;
+ int key_alloc, ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (Dbc *)dbc_ctp->ct_anyp;
+ dbenv = DbEnv::get_DbEnv(((DBC *)dbc)->dbp->dbenv);
+
+ *freep = 0;
+
+ /*
+ * Ignore memory related flags on server.
+ */
+ /* Set up key and data */
+ Dbt skey(skeydata, skeysize);
+ skey.set_dlen(skeydlen);
+ skey.set_ulen(skeyulen);
+ skey.set_doff(skeydoff);
+ skey.set_flags(DB_DBT_MALLOC | (skeyflags & DB_DBT_PARTIAL));
+
+ Dbt pkey(pkeydata, pkeysize);
+ pkey.set_dlen(pkeydlen);
+ pkey.set_ulen(pkeyulen);
+ pkey.set_doff(pkeydoff);
+ pkey.set_flags(DB_DBT_MALLOC | (pkeyflags & DB_DBT_PARTIAL));
+
+ Dbt data(datadata, datasize);
+ data.set_dlen(datadlen);
+ data.set_ulen(dataulen);
+ data.set_doff(datadoff);
+ data.set_flags(DB_DBT_MALLOC | (dataflags & DB_DBT_PARTIAL));
+
+ /* Got all our stuff, now do the get */
+ ret = dbc->pget(&skey, &pkey, &data, flags);
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (skey.get_data() == skeydata) {
+ ret = __os_umalloc(dbenv->get_DB_ENV(),
+ skey.get_size(), &replyp->skeydata.skeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv->get_DB_ENV(), skey.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), pkey.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), data.get_data());
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->skeydata.skeydata_val, skey.get_data(),
+ skey.get_size());
+ } else
+ replyp->skeydata.skeydata_val = (char *)skey.get_data();
+ replyp->skeydata.skeydata_len = skey.get_size();
+
+ /*
+ * Primary key
+ */
+ if (pkey.get_data() == pkeydata) {
+ ret = __os_umalloc(dbenv->get_DB_ENV(),
+ pkey.get_size(), &replyp->pkeydata.pkeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv->get_DB_ENV(), skey.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), pkey.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), data.get_data());
+ if (key_alloc)
+ __os_ufree(dbenv->get_DB_ENV(),
+ replyp->skeydata.skeydata_val);
+ goto err;
+ }
+ /*
+ * We can set it to 2, because they cannot send the
+ * pkey over without sending the skey over too.
+ * So if they did send a pkey, they must have sent
+ * the skey as well.
+ */
+ key_alloc = 2;
+ memcpy(replyp->pkeydata.pkeydata_val, pkey.get_data(),
+ pkey.get_size());
+ } else
+ replyp->pkeydata.pkeydata_val = (char *)pkey.get_data();
+ replyp->pkeydata.pkeydata_len = pkey.get_size();
+
+ /*
+ * Data
+ */
+ if (data.get_data() == datadata) {
+ ret = __os_umalloc(dbenv->get_DB_ENV(),
+ data.get_size(), &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv->get_DB_ENV(), skey.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), pkey.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), data.get_data());
+ /*
+ * If key_alloc is 1, just skey needs to be
+ * freed, if key_alloc is 2, both skey and pkey
+ * need to be freed.
+ */
+ if (key_alloc--)
+ __os_ufree(dbenv->get_DB_ENV(),
+ replyp->skeydata.skeydata_val);
+ if (key_alloc)
+ __os_ufree(dbenv->get_DB_ENV(),
+ replyp->pkeydata.pkeydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.get_data(),
+ data.get_size());
+ } else
+ replyp->datadata.datadata_val = (char *)data.get_data();
+ replyp->datadata.datadata_len = data.get_size();
+ } else {
+err: replyp->skeydata.skeydata_val = NULL;
+ replyp->skeydata.skeydata_len = 0;
+ replyp->pkeydata.pkeydata_val = NULL;
+ replyp->pkeydata.pkeydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_put_proc */
+extern "C" void
+__dbc_put_proc(
+ long dbccl_id,
+ u_int32_t keydlen,
+ u_int32_t keydoff,
+ u_int32_t keyulen,
+ u_int32_t keyflags,
+ void *keydata,
+ u_int32_t keysize,
+ u_int32_t datadlen,
+ u_int32_t datadoff,
+ u_int32_t dataulen,
+ u_int32_t dataflags,
+ void *datadata,
+ u_int32_t datasize,
+ u_int32_t flags,
+ __dbc_put_reply *replyp,
+ int * freep)
+/* END __dbc_put_proc */
+{
+ Db *dbp;
+ Dbc *dbc;
+ ct_entry *dbc_ctp;
+ int ret;
+ DBTYPE dbtype;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (Dbc *)dbc_ctp->ct_anyp;
+ dbp = (Db *)dbc_ctp->ct_parent->ct_anyp;
+
+ /* Set up key and data */
+ Dbt key(keydata, keysize);
+ key.set_dlen(keydlen);
+ key.set_ulen(keyulen);
+ key.set_doff(keydoff);
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.set_flags(DB_DBT_MALLOC | (keyflags & DB_DBT_PARTIAL));
+
+ Dbt data(datadata, datasize);
+ data.set_dlen(datadlen);
+ data.set_ulen(dataulen);
+ data.set_doff(datadoff);
+ data.set_flags(dataflags);
+
+ /* Got all our stuff, now do the put */
+ ret = dbc->put(&key, &data, flags);
+
+ *freep = 0;
+ replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ if (ret == 0 && (flags == DB_AFTER || flags == DB_BEFORE)) {
+ ret = dbp->get_type(&dbtype);
+ if (ret == 0 && dbtype == DB_RECNO) {
+ /*
+ * We need to xdr_free whatever we are returning, next time.
+ */
+ replyp->keydata.keydata_val = (char *)key.get_data();
+ replyp->keydata.keydata_len = key.get_size();
+ }
+ }
+ replyp->status = ret;
+ return;
+}
+#endif /* HAVE_RPC */
diff --git a/libdb/rpc_server/cxx/db_server_cxxutil.cpp b/libdb/rpc_server/cxx/db_server_cxxutil.cpp
new file mode 100644
index 0000000..f534b92
--- /dev/null
+++ b/libdb/rpc_server/cxx/db_server_cxxutil.cpp
@@ -0,0 +1,746 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <rpc/rpc.h>
+
+#include <limits.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+#include "dbinc_auto/db_server.h"
+
+#include "db_int.h"
+#include "db_cxx.h"
+#include "dbinc_auto/clib_ext.h"
+
+extern "C" {
+#include "dbinc/db_server_int.h"
+#include "dbinc_auto/rpc_server_ext.h"
+#include "dbinc_auto/common_ext.h"
+
+extern int __dbsrv_main __P((void));
+}
+
+static int add_home __P((char *));
+static int add_passwd __P((char *));
+static int env_recover __P((char *));
+static void __dbclear_child __P((ct_entry *));
+
+static LIST_HEAD(cthead, ct_entry) __dbsrv_head;
+static LIST_HEAD(homehead, home_entry) __dbsrv_home;
+static long __dbsrv_defto = DB_SERVER_TIMEOUT;
+static long __dbsrv_maxto = DB_SERVER_MAXTIMEOUT;
+static long __dbsrv_idleto = DB_SERVER_IDLETIMEOUT;
+static char *logfile = NULL;
+static char *prog;
+
+static void usage __P((char *));
+static void version_check __P((void));
+
+int __dbsrv_verbose = 0;
+
+int
+main(
+ int argc,
+ char **argv)
+{
+ extern char *optarg;
+ CLIENT *cl;
+ int ch, ret;
+ char *passwd;
+
+ prog = argv[0];
+
+ version_check();
+
+ /*
+ * Check whether another server is running or not. There
+ * is a race condition where two servers could be racing to
+ * register with the portmapper. The goal of this check is to
+ * forbid running additional servers (like those started from
+ * the test suite) if the user is already running one.
+ *
+ * XXX
+ * This does not solve nor prevent two servers from being
+ * started at the same time and running recovery at the same
+ * time on the same environments.
+ */
+ if ((cl = clnt_create("localhost",
+ DB_RPC_SERVERPROG, DB_RPC_SERVERVERS, "tcp")) != NULL) {
+ fprintf(stderr,
+ "%s: Berkeley DB RPC server already running.\n", prog);
+ clnt_destroy(cl);
+ return (EXIT_FAILURE);
+ }
+
+ LIST_INIT(&__dbsrv_home);
+ while ((ch = getopt(argc, argv, "h:I:L:P:t:T:Vv")) != EOF)
+ switch (ch) {
+ case 'h':
+ (void)add_home(optarg);
+ break;
+ case 'I':
+ if (__db_getlong(NULL, prog,
+ optarg, 1, LONG_MAX, &__dbsrv_idleto))
+ return (EXIT_FAILURE);
+ break;
+ case 'L':
+ logfile = optarg;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ prog, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ if ((ret = add_passwd(passwd)) != 0) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ prog, strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 't':
+ if (__db_getlong(NULL, prog,
+ optarg, 1, LONG_MAX, &__dbsrv_defto))
+ return (EXIT_FAILURE);
+ break;
+ case 'T':
+ if (__db_getlong(NULL, prog,
+ optarg, 1, LONG_MAX, &__dbsrv_maxto))
+ return (EXIT_FAILURE);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ __dbsrv_verbose = 1;
+ break;
+ default:
+ usage(prog);
+ }
+ /*
+ * Check default timeout against maximum timeout
+ */
+ if (__dbsrv_defto > __dbsrv_maxto)
+ __dbsrv_defto = __dbsrv_maxto;
+
+ /*
+ * Check default timeout against idle timeout
+ * It would be bad to timeout environments sooner than txns.
+ */
+ if (__dbsrv_defto > __dbsrv_idleto)
+ fprintf(stderr,
+ "%s: WARNING: Idle timeout %ld is less than resource timeout %ld\n",
+ prog, __dbsrv_idleto, __dbsrv_defto);
+
+ LIST_INIT(&__dbsrv_head);
+
+ /*
+ * If a client crashes during an RPC, our reply to it
+ * generates a SIGPIPE. Ignore SIGPIPE so we don't exit unnecessarily.
+ */
+#ifdef SIGPIPE
+ signal(SIGPIPE, SIG_IGN);
+#endif
+
+ if (logfile != NULL && __db_util_logset("berkeley_db_svc", logfile))
+ return (EXIT_FAILURE);
+
+ /*
+ * Now that we are ready to start, run recovery on all the
+ * environments specified.
+ */
+ if (env_recover(prog) != 0)
+ return (EXIT_FAILURE);
+
+ /*
+ * We've done our setup, now call the generated server loop
+ */
+ if (__dbsrv_verbose)
+ printf("%s: Ready to receive requests\n", prog);
+ __dbsrv_main();
+
+ /* NOTREACHED */
+ abort();
+}
+
+static void
+usage(char *prog)
+{
+ fprintf(stderr, "usage: %s %s\n\t%s\n", prog,
+ "[-Vv] [-h home] [-P passwd]",
+ "[-I idletimeout] [-L logfile] [-t def_timeout] [-T maxtimeout]");
+ exit(EXIT_FAILURE);
+}
+
+static void
+version_check()
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ prog, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ exit(EXIT_FAILURE);
+ }
+}
+
+extern "C" void
+__dbsrv_settimeout(
+ ct_entry *ctp,
+ u_int32_t to)
+{
+ if (to > (u_int32_t)__dbsrv_maxto)
+ ctp->ct_timeout = __dbsrv_maxto;
+ else if (to <= 0)
+ ctp->ct_timeout = __dbsrv_defto;
+ else
+ ctp->ct_timeout = to;
+}
+
+extern "C" void
+__dbsrv_timeout(int force)
+{
+ static long to_hint = -1;
+ time_t t;
+ long to;
+ ct_entry *ctp, *nextctp;
+
+ if ((t = time(NULL)) == -1)
+ return;
+
+ /*
+ * Check hint. If hint is further in the future
+ * than now, no work to do.
+ */
+ if (!force && to_hint > 0 && t < to_hint)
+ return;
+ to_hint = -1;
+ /*
+ * Timeout transactions or cursors holding DB resources.
+ * Do this before timing out envs to properly release resources.
+ *
+ * !!!
+ * We can just loop through this list looking for cursors and txns.
+ * We do not need to verify txn and cursor relationships at this
+ * point because we maintain the list in LIFO order *and* we
+ * maintain activity in the ultimate txn parent of any cursor
+ * so either everything in a txn is timing out, or nothing.
+ * So, since we are LIFO, we will correctly close/abort all the
+ * appropriate handles, in the correct order.
+ */
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; ctp = nextctp) {
+ nextctp = LIST_NEXT(ctp, entries);
+ switch (ctp->ct_type) {
+ case CT_TXN:
+ to = *(ctp->ct_activep) + ctp->ct_timeout;
+ /* TIMEOUT */
+ if (to < t) {
+ if (__dbsrv_verbose)
+ printf("Timing out txn id %ld\n",
+ ctp->ct_id);
+ (void)((DbTxn *)ctp->ct_anyp)->abort();
+ __dbdel_ctp(ctp);
+ /*
+ * If we timed out an txn, we may have closed
+ * all sorts of ctp's.
+ * So start over with a guaranteed good ctp.
+ */
+ nextctp = LIST_FIRST(&__dbsrv_head);
+ } else if ((to_hint > 0 && to_hint > to) ||
+ to_hint == -1)
+ to_hint = to;
+ break;
+ case CT_CURSOR:
+ case (CT_JOINCUR | CT_CURSOR):
+ to = *(ctp->ct_activep) + ctp->ct_timeout;
+ /* TIMEOUT */
+ if (to < t) {
+ if (__dbsrv_verbose)
+ printf("Timing out cursor %ld\n",
+ ctp->ct_id);
+ (void)__dbc_close_int(ctp);
+ /*
+ * Start over with a guaranteed good ctp.
+ */
+ nextctp = LIST_FIRST(&__dbsrv_head);
+ } else if ((to_hint > 0 && to_hint > to) ||
+ to_hint == -1)
+ to_hint = to;
+ break;
+ default:
+ break;
+ }
+ }
+ /*
+ * Timeout idle handles.
+ * If we are forcing a timeout, we'll close all env handles.
+ */
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; ctp = nextctp) {
+ nextctp = LIST_NEXT(ctp, entries);
+ if (ctp->ct_type != CT_ENV)
+ continue;
+ to = *(ctp->ct_activep) + ctp->ct_idle;
+ /* TIMEOUT */
+ if (to < t || force) {
+ if (__dbsrv_verbose)
+ printf("Timing out env id %ld\n", ctp->ct_id);
+ (void)__dbenv_close_int(ctp->ct_id, 0, 1);
+ /*
+ * If we timed out an env, we may have closed
+ * all sorts of ctp's (maybe even all of them.
+ * So start over with a guaranteed good ctp.
+ */
+ nextctp = LIST_FIRST(&__dbsrv_head);
+ }
+ }
+}
+
+/*
+ * RECURSIVE FUNCTION. We need to clear/free any number of levels of nested
+ * layers.
+ */
+static void
+__dbclear_child(ct_entry *parent)
+{
+ ct_entry *ctp, *nextctp;
+
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = nextctp) {
+ nextctp = LIST_NEXT(ctp, entries);
+ if (ctp->ct_type == 0)
+ continue;
+ if (ctp->ct_parent == parent) {
+ __dbclear_child(ctp);
+ /*
+ * Need to do this here because le_next may
+ * have changed with the recursive call and we
+ * don't want to point to a removed entry.
+ */
+ nextctp = LIST_NEXT(ctp, entries);
+ __dbclear_ctp(ctp);
+ }
+ }
+}
+
+extern "C" void
+__dbclear_ctp(ct_entry *ctp)
+{
+ LIST_REMOVE(ctp, entries);
+ __os_free(NULL, ctp);
+}
+
+extern "C" void
+__dbdel_ctp(ct_entry *parent)
+{
+ __dbclear_child(parent);
+ __dbclear_ctp(parent);
+}
+
+extern "C" ct_entry *
+new_ct_ent(int *errp)
+{
+ time_t t;
+ ct_entry *ctp, *octp;
+ int ret;
+
+ if ((ret = __os_malloc(NULL, sizeof(ct_entry), &ctp)) != 0) {
+ *errp = ret;
+ return (NULL);
+ }
+ memset(ctp, 0, sizeof(ct_entry));
+ /*
+ * Get the time as ID. We may service more than one request per
+ * second however. If we are, then increment id value until we
+ * find an unused one. We insert entries in LRU fashion at the
+ * head of the list. So, if the first entry doesn't match, then
+ * we know for certain that we can use our entry.
+ */
+ if ((t = time(NULL)) == -1) {
+ *errp = __os_get_errno();
+ __os_free(NULL, ctp);
+ return (NULL);
+ }
+ octp = LIST_FIRST(&__dbsrv_head);
+ if (octp != NULL && octp->ct_id >= t)
+ t = octp->ct_id + 1;
+ ctp->ct_id = t;
+ ctp->ct_idle = __dbsrv_idleto;
+ ctp->ct_activep = &ctp->ct_active;
+ ctp->ct_origp = NULL;
+ ctp->ct_refcount = 1;
+
+ LIST_INSERT_HEAD(&__dbsrv_head, ctp, entries);
+ return (ctp);
+}
+
+extern "C" ct_entry *
+get_tableent(long id)
+{
+ ct_entry *ctp;
+
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries))
+ if (ctp->ct_id == id)
+ return (ctp);
+ return (NULL);
+}
+
+extern "C" ct_entry *
+__dbsrv_sharedb(ct_entry *db_ctp, const char *name, const char *subdb, DBTYPE type, u_int32_t flags)
+{
+ ct_entry *ctp;
+
+ /*
+ * Check if we can share a db handle. Criteria for sharing are:
+ * If any of the non-sharable flags are set, we cannot share.
+ * Must be a db ctp, obviously.
+ * Must share the same env parent.
+ * Must be the same type, or current one DB_UNKNOWN.
+ * Must be same byteorder, or current one must not care.
+ * All flags must match.
+ * Must be same name, but don't share in-memory databases.
+ * Must be same subdb name.
+ */
+ if (flags & DB_SERVER_DBNOSHARE)
+ return (NULL);
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries)) {
+ /*
+ * Skip ourselves.
+ */
+ if (ctp == db_ctp)
+ continue;
+ if (ctp->ct_type != CT_DB)
+ continue;
+ if (ctp->ct_envparent != db_ctp->ct_envparent)
+ continue;
+ if (type != DB_UNKNOWN && ctp->ct_dbdp.type != type)
+ continue;
+ if (ctp->ct_dbdp.dbflags != LF_ISSET(DB_SERVER_DBFLAGS))
+ continue;
+ if (db_ctp->ct_dbdp.setflags != 0 &&
+ ctp->ct_dbdp.setflags != db_ctp->ct_dbdp.setflags)
+ continue;
+ if (name == NULL || ctp->ct_dbdp.db == NULL ||
+ strcmp(name, ctp->ct_dbdp.db) != 0)
+ continue;
+ if (subdb != ctp->ct_dbdp.subdb &&
+ (subdb == NULL || ctp->ct_dbdp.subdb == NULL ||
+ strcmp(subdb, ctp->ct_dbdp.subdb) != 0))
+ continue;
+ /*
+ * If we get here, then we match.
+ */
+ ctp->ct_refcount++;
+ return (ctp);
+ }
+
+ return (NULL);
+}
+
+extern "C" ct_entry *
+__dbsrv_shareenv(ct_entry *env_ctp, home_entry *home, u_int32_t flags)
+{
+ ct_entry *ctp;
+
+ /*
+ * Check if we can share an env. Criteria for sharing are:
+ * Must be an env ctp, obviously.
+ * Must share the same home env.
+ * All flags must match.
+ */
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries)) {
+ /*
+ * Skip ourselves.
+ */
+ if (ctp == env_ctp)
+ continue;
+ if (ctp->ct_type != CT_ENV)
+ continue;
+ if (ctp->ct_envdp.home != home)
+ continue;
+ if (ctp->ct_envdp.envflags != flags)
+ continue;
+ if (ctp->ct_envdp.onflags != env_ctp->ct_envdp.onflags)
+ continue;
+ if (ctp->ct_envdp.offflags != env_ctp->ct_envdp.offflags)
+ continue;
+ /*
+ * If we get here, then we match. The only thing left to
+ * check is the timeout. Since the server timeout set by
+ * the client is a hint, for sharing we'll give them the
+ * benefit of the doubt and grant them the longer timeout.
+ */
+ if (ctp->ct_timeout < env_ctp->ct_timeout)
+ ctp->ct_timeout = env_ctp->ct_timeout;
+ ctp->ct_refcount++;
+ return (ctp);
+ }
+
+ return (NULL);
+}
+
+extern "C" void
+__dbsrv_active(ct_entry *ctp)
+{
+ time_t t;
+ ct_entry *envctp;
+
+ if (ctp == NULL)
+ return;
+ if ((t = time(NULL)) == -1)
+ return;
+ *(ctp->ct_activep) = t;
+ if ((envctp = ctp->ct_envparent) == NULL)
+ return;
+ *(envctp->ct_activep) = t;
+ return;
+}
+
+extern "C" int
+__db_close_int(long id, u_int32_t flags)
+{
+ Db *dbp;
+ int ret;
+ ct_entry *ctp;
+
+ ret = 0;
+ ctp = get_tableent(id);
+ if (ctp == NULL)
+ return (DB_NOSERVER_ID);
+ DB_ASSERT(ctp->ct_type == CT_DB);
+ if (__dbsrv_verbose && ctp->ct_refcount != 1)
+ printf("Deref'ing dbp id %ld, refcount %d\n",
+ id, ctp->ct_refcount);
+ if (--ctp->ct_refcount != 0)
+ return (ret);
+ dbp = ctp->ct_dbp;
+ if (__dbsrv_verbose)
+ printf("Closing dbp id %ld\n", id);
+
+ ret = dbp->close(flags);
+ __dbdel_ctp(ctp);
+ return (ret);
+}
+
+extern "C" int
+__dbc_close_int(ct_entry *dbc_ctp)
+{
+ Dbc *dbc;
+ int ret;
+ ct_entry *ctp;
+
+ dbc = (Dbc *)dbc_ctp->ct_anyp;
+
+ ret = dbc->close();
+ /*
+ * If this cursor is a join cursor then we need to fix up the
+ * cursors that it was joined from so that they are independent again.
+ */
+ if (dbc_ctp->ct_type & CT_JOINCUR)
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries)) {
+ /*
+ * Test if it is a join cursor, and if it is part
+ * of this one.
+ */
+ if ((ctp->ct_type & CT_JOIN) &&
+ ctp->ct_activep == &dbc_ctp->ct_active) {
+ ctp->ct_type &= ~CT_JOIN;
+ ctp->ct_activep = ctp->ct_origp;
+ __dbsrv_active(ctp);
+ }
+ }
+ __dbclear_ctp(dbc_ctp);
+ return (ret);
+
+}
+
+extern "C" int
+__dbenv_close_int(long id, u_int32_t flags, int force)
+{
+ DbEnv *dbenv;
+ int ret;
+ ct_entry *ctp;
+
+ ret = 0;
+ ctp = get_tableent(id);
+ if (ctp == NULL)
+ return (DB_NOSERVER_ID);
+ DB_ASSERT(ctp->ct_type == CT_ENV);
+ if (__dbsrv_verbose && ctp->ct_refcount != 1)
+ printf("Deref'ing env id %ld, refcount %d\n",
+ id, ctp->ct_refcount);
+ /*
+ * If we are timing out, we need to force the close, no matter
+ * what the refcount.
+ */
+ if (--ctp->ct_refcount != 0 && !force)
+ return (ret);
+ dbenv = ctp->ct_envp;
+ if (__dbsrv_verbose)
+ printf("Closing env id %ld\n", id);
+
+ ret = dbenv->close(flags);
+ __dbdel_ctp(ctp);
+ return (ret);
+}
+
+static int
+add_home(char *home)
+{
+ home_entry *hp, *homep;
+ int ret;
+
+ if ((ret = __os_malloc(NULL, sizeof(home_entry), &hp)) != 0)
+ return (ret);
+ if ((ret = __os_malloc(NULL, strlen(home)+1, &hp->home)) != 0)
+ return (ret);
+ memcpy(hp->home, home, strlen(home)+1);
+ hp->dir = home;
+ hp->passwd = NULL;
+ /*
+ * This loop is to remove any trailing path separators,
+ * to assure hp->name points to the last component.
+ */
+ hp->name = __db_rpath(home);
+ *(hp->name) = '\0';
+ hp->name++;
+ while (*(hp->name) == '\0') {
+ hp->name = __db_rpath(home);
+ *(hp->name) = '\0';
+ hp->name++;
+ }
+ /*
+ * Now we have successfully added it. Make sure there are no
+ * identical names.
+ */
+ for (homep = LIST_FIRST(&__dbsrv_home); homep != NULL;
+ homep = LIST_NEXT(homep, entries))
+ if (strcmp(homep->name, hp->name) == 0) {
+ printf("Already added home name %s, at directory %s\n",
+ hp->name, homep->dir);
+ return (-1);
+ }
+ LIST_INSERT_HEAD(&__dbsrv_home, hp, entries);
+ if (__dbsrv_verbose)
+ printf("Added home %s in dir %s\n", hp->name, hp->dir);
+ return (0);
+}
+
+static int
+add_passwd(char *passwd)
+{
+ home_entry *hp;
+
+ /*
+ * We add the passwd to the last given home dir. If there
+ * isn't a home dir, or the most recent one already has a
+ * passwd, then there is a user error.
+ */
+ hp = LIST_FIRST(&__dbsrv_home);
+ if (hp == NULL || hp->passwd != NULL)
+ return (EINVAL);
+ /*
+ * We've already strdup'ed the passwd above, so we don't need
+ * to malloc new space, just point to it.
+ */
+ hp->passwd = passwd;
+ return (0);
+}
+
+extern "C" home_entry *
+get_home(char *name)
+{
+ home_entry *hp;
+
+ for (hp = LIST_FIRST(&__dbsrv_home); hp != NULL;
+ hp = LIST_NEXT(hp, entries))
+ if (strcmp(name, hp->name) == 0)
+ return (hp);
+ return (NULL);
+}
+
+static int
+env_recover(char *progname)
+{
+ DbEnv *dbenv;
+ home_entry *hp;
+ u_int32_t flags;
+ int exitval, ret;
+
+ for (hp = LIST_FIRST(&__dbsrv_home); hp != NULL;
+ hp = LIST_NEXT(hp, entries)) {
+ exitval = 0;
+ dbenv = new DbEnv(DB_CXX_NO_EXCEPTIONS);
+ if (__dbsrv_verbose == 1) {
+ (void)dbenv->set_verbose(DB_VERB_RECOVERY, 1);
+ (void)dbenv->set_verbose(DB_VERB_CHKPOINT, 1);
+ }
+ dbenv->set_errfile(stderr);
+ dbenv->set_errpfx(progname);
+ if (hp->passwd != NULL)
+ (void)dbenv->set_encrypt(hp->passwd, DB_ENCRYPT_AES);
+
+ /*
+ * Initialize the env with DB_RECOVER. That is all we
+ * have to do to run recovery.
+ */
+ if (__dbsrv_verbose)
+ printf("Running recovery on %s\n", hp->home);
+ flags = DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL |
+ DB_INIT_TXN | DB_USE_ENVIRON | DB_RECOVER;
+ if ((ret = dbenv->open(hp->home, flags, 0)) != 0) {
+ dbenv->err(ret, "DbEnv->open");
+ goto error;
+ }
+
+ if (0) {
+error: exitval = 1;
+ }
+ if ((ret = dbenv->close(0)) != 0) {
+ exitval = 1;
+ fprintf(stderr, "%s: dbenv->close: %s\n",
+ progname, db_strerror(ret));
+ }
+ if (exitval)
+ return (exitval);
+ }
+ return (0);
+}
diff --git a/libdb/rpc_server/db_server.x b/libdb/rpc_server/db_server.x
new file mode 100644
index 0000000..5c7fd5e
--- /dev/null
+++ b/libdb/rpc_server/db_server.x
@@ -0,0 +1,651 @@
+/* Do not edit: automatically built by gen_rpc.awk. */
+
+struct __env_cachesize_msg {
+ unsigned int dbenvcl_id;
+ unsigned int gbytes;
+ unsigned int bytes;
+ unsigned int ncache;
+};
+
+struct __env_cachesize_reply {
+ int status;
+};
+
+struct __env_close_msg {
+ unsigned int dbenvcl_id;
+ unsigned int flags;
+};
+
+struct __env_close_reply {
+ int status;
+};
+
+struct __env_create_msg {
+ unsigned int timeout;
+};
+
+struct __env_create_reply {
+ int status;
+ unsigned int envcl_id;
+};
+
+struct __env_dbremove_msg {
+ unsigned int dbenvcl_id;
+ unsigned int txnpcl_id;
+ string name<>;
+ string subdb<>;
+ unsigned int flags;
+};
+
+struct __env_dbremove_reply {
+ int status;
+};
+
+struct __env_dbrename_msg {
+ unsigned int dbenvcl_id;
+ unsigned int txnpcl_id;
+ string name<>;
+ string subdb<>;
+ string newname<>;
+ unsigned int flags;
+};
+
+struct __env_dbrename_reply {
+ int status;
+};
+
+struct __env_encrypt_msg {
+ unsigned int dbenvcl_id;
+ string passwd<>;
+ unsigned int flags;
+};
+
+struct __env_encrypt_reply {
+ int status;
+};
+
+struct __env_flags_msg {
+ unsigned int dbenvcl_id;
+ unsigned int flags;
+ unsigned int onoff;
+};
+
+struct __env_flags_reply {
+ int status;
+};
+
+struct __env_open_msg {
+ unsigned int dbenvcl_id;
+ string home<>;
+ unsigned int flags;
+ unsigned int mode;
+};
+
+struct __env_open_reply {
+ int status;
+ unsigned int envcl_id;
+};
+
+struct __env_remove_msg {
+ unsigned int dbenvcl_id;
+ string home<>;
+ unsigned int flags;
+};
+
+struct __env_remove_reply {
+ int status;
+};
+
+struct __txn_abort_msg {
+ unsigned int txnpcl_id;
+};
+
+struct __txn_abort_reply {
+ int status;
+};
+
+struct __txn_begin_msg {
+ unsigned int dbenvcl_id;
+ unsigned int parentcl_id;
+ unsigned int flags;
+};
+
+struct __txn_begin_reply {
+ int status;
+ unsigned int txnidcl_id;
+};
+
+struct __txn_commit_msg {
+ unsigned int txnpcl_id;
+ unsigned int flags;
+};
+
+struct __txn_commit_reply {
+ int status;
+};
+
+struct __txn_discard_msg {
+ unsigned int txnpcl_id;
+ unsigned int flags;
+};
+
+struct __txn_discard_reply {
+ int status;
+};
+
+struct __txn_prepare_msg {
+ unsigned int txnpcl_id;
+ opaque gid[128];
+};
+
+struct __txn_prepare_reply {
+ int status;
+};
+
+struct __txn_recover_msg {
+ unsigned int dbenvcl_id;
+ unsigned int count;
+ unsigned int flags;
+};
+
+struct __txn_recover_reply {
+ int status;
+ unsigned int txn<>;
+ opaque gid<>;
+ unsigned int retcount;
+};
+
+struct __db_associate_msg {
+ unsigned int dbpcl_id;
+ unsigned int txnpcl_id;
+ unsigned int sdbpcl_id;
+ unsigned int flags;
+};
+
+struct __db_associate_reply {
+ int status;
+};
+
+struct __db_bt_maxkey_msg {
+ unsigned int dbpcl_id;
+ unsigned int maxkey;
+};
+
+struct __db_bt_maxkey_reply {
+ int status;
+};
+
+struct __db_bt_minkey_msg {
+ unsigned int dbpcl_id;
+ unsigned int minkey;
+};
+
+struct __db_bt_minkey_reply {
+ int status;
+};
+
+struct __db_close_msg {
+ unsigned int dbpcl_id;
+ unsigned int flags;
+};
+
+struct __db_close_reply {
+ int status;
+};
+
+struct __db_create_msg {
+ unsigned int dbenvcl_id;
+ unsigned int flags;
+};
+
+struct __db_create_reply {
+ int status;
+ unsigned int dbcl_id;
+};
+
+struct __db_del_msg {
+ unsigned int dbpcl_id;
+ unsigned int txnpcl_id;
+ unsigned int keydlen;
+ unsigned int keydoff;
+ unsigned int keyulen;
+ unsigned int keyflags;
+ opaque keydata<>;
+ unsigned int flags;
+};
+
+struct __db_del_reply {
+ int status;
+};
+
+struct __db_encrypt_msg {
+ unsigned int dbpcl_id;
+ string passwd<>;
+ unsigned int flags;
+};
+
+struct __db_encrypt_reply {
+ int status;
+};
+
+struct __db_extentsize_msg {
+ unsigned int dbpcl_id;
+ unsigned int extentsize;
+};
+
+struct __db_extentsize_reply {
+ int status;
+};
+
+struct __db_flags_msg {
+ unsigned int dbpcl_id;
+ unsigned int flags;
+};
+
+struct __db_flags_reply {
+ int status;
+};
+
+struct __db_get_msg {
+ unsigned int dbpcl_id;
+ unsigned int txnpcl_id;
+ unsigned int keydlen;
+ unsigned int keydoff;
+ unsigned int keyulen;
+ unsigned int keyflags;
+ opaque keydata<>;
+ unsigned int datadlen;
+ unsigned int datadoff;
+ unsigned int dataulen;
+ unsigned int dataflags;
+ opaque datadata<>;
+ unsigned int flags;
+};
+
+struct __db_get_reply {
+ int status;
+ opaque keydata<>;
+ opaque datadata<>;
+};
+
+struct __db_h_ffactor_msg {
+ unsigned int dbpcl_id;
+ unsigned int ffactor;
+};
+
+struct __db_h_ffactor_reply {
+ int status;
+};
+
+struct __db_h_nelem_msg {
+ unsigned int dbpcl_id;
+ unsigned int nelem;
+};
+
+struct __db_h_nelem_reply {
+ int status;
+};
+
+struct __db_key_range_msg {
+ unsigned int dbpcl_id;
+ unsigned int txnpcl_id;
+ unsigned int keydlen;
+ unsigned int keydoff;
+ unsigned int keyulen;
+ unsigned int keyflags;
+ opaque keydata<>;
+ unsigned int flags;
+};
+
+struct __db_key_range_reply {
+ int status;
+ double less;
+ double equal;
+ double greater;
+};
+
+struct __db_lorder_msg {
+ unsigned int dbpcl_id;
+ unsigned int lorder;
+};
+
+struct __db_lorder_reply {
+ int status;
+};
+
+struct __db_open_msg {
+ unsigned int dbpcl_id;
+ unsigned int txnpcl_id;
+ string name<>;
+ string subdb<>;
+ unsigned int type;
+ unsigned int flags;
+ unsigned int mode;
+};
+
+struct __db_open_reply {
+ int status;
+ unsigned int dbcl_id;
+ unsigned int type;
+ unsigned int dbflags;
+ unsigned int lorder;
+};
+
+struct __db_pagesize_msg {
+ unsigned int dbpcl_id;
+ unsigned int pagesize;
+};
+
+struct __db_pagesize_reply {
+ int status;
+};
+
+struct __db_pget_msg {
+ unsigned int dbpcl_id;
+ unsigned int txnpcl_id;
+ unsigned int skeydlen;
+ unsigned int skeydoff;
+ unsigned int skeyulen;
+ unsigned int skeyflags;
+ opaque skeydata<>;
+ unsigned int pkeydlen;
+ unsigned int pkeydoff;
+ unsigned int pkeyulen;
+ unsigned int pkeyflags;
+ opaque pkeydata<>;
+ unsigned int datadlen;
+ unsigned int datadoff;
+ unsigned int dataulen;
+ unsigned int dataflags;
+ opaque datadata<>;
+ unsigned int flags;
+};
+
+struct __db_pget_reply {
+ int status;
+ opaque skeydata<>;
+ opaque pkeydata<>;
+ opaque datadata<>;
+};
+
+struct __db_put_msg {
+ unsigned int dbpcl_id;
+ unsigned int txnpcl_id;
+ unsigned int keydlen;
+ unsigned int keydoff;
+ unsigned int keyulen;
+ unsigned int keyflags;
+ opaque keydata<>;
+ unsigned int datadlen;
+ unsigned int datadoff;
+ unsigned int dataulen;
+ unsigned int dataflags;
+ opaque datadata<>;
+ unsigned int flags;
+};
+
+struct __db_put_reply {
+ int status;
+ opaque keydata<>;
+};
+
+struct __db_re_delim_msg {
+ unsigned int dbpcl_id;
+ unsigned int delim;
+};
+
+struct __db_re_delim_reply {
+ int status;
+};
+
+struct __db_re_len_msg {
+ unsigned int dbpcl_id;
+ unsigned int len;
+};
+
+struct __db_re_len_reply {
+ int status;
+};
+
+struct __db_re_pad_msg {
+ unsigned int dbpcl_id;
+ unsigned int pad;
+};
+
+struct __db_re_pad_reply {
+ int status;
+};
+
+struct __db_remove_msg {
+ unsigned int dbpcl_id;
+ string name<>;
+ string subdb<>;
+ unsigned int flags;
+};
+
+struct __db_remove_reply {
+ int status;
+};
+
+struct __db_rename_msg {
+ unsigned int dbpcl_id;
+ string name<>;
+ string subdb<>;
+ string newname<>;
+ unsigned int flags;
+};
+
+struct __db_rename_reply {
+ int status;
+};
+
+struct __db_stat_msg {
+ unsigned int dbpcl_id;
+ unsigned int flags;
+};
+
+struct __db_stat_reply {
+ int status;
+ unsigned int stats<>;
+};
+
+struct __db_sync_msg {
+ unsigned int dbpcl_id;
+ unsigned int flags;
+};
+
+struct __db_sync_reply {
+ int status;
+};
+
+struct __db_truncate_msg {
+ unsigned int dbpcl_id;
+ unsigned int txnpcl_id;
+ unsigned int flags;
+};
+
+struct __db_truncate_reply {
+ int status;
+ unsigned int count;
+};
+
+struct __db_cursor_msg {
+ unsigned int dbpcl_id;
+ unsigned int txnpcl_id;
+ unsigned int flags;
+};
+
+struct __db_cursor_reply {
+ int status;
+ unsigned int dbcidcl_id;
+};
+
+struct __db_join_msg {
+ unsigned int dbpcl_id;
+ unsigned int curs<>;
+ unsigned int flags;
+};
+
+struct __db_join_reply {
+ int status;
+ unsigned int dbcidcl_id;
+};
+
+struct __dbc_close_msg {
+ unsigned int dbccl_id;
+};
+
+struct __dbc_close_reply {
+ int status;
+};
+
+struct __dbc_count_msg {
+ unsigned int dbccl_id;
+ unsigned int flags;
+};
+
+struct __dbc_count_reply {
+ int status;
+ unsigned int dupcount;
+};
+
+struct __dbc_del_msg {
+ unsigned int dbccl_id;
+ unsigned int flags;
+};
+
+struct __dbc_del_reply {
+ int status;
+};
+
+struct __dbc_dup_msg {
+ unsigned int dbccl_id;
+ unsigned int flags;
+};
+
+struct __dbc_dup_reply {
+ int status;
+ unsigned int dbcidcl_id;
+};
+
+struct __dbc_get_msg {
+ unsigned int dbccl_id;
+ unsigned int keydlen;
+ unsigned int keydoff;
+ unsigned int keyulen;
+ unsigned int keyflags;
+ opaque keydata<>;
+ unsigned int datadlen;
+ unsigned int datadoff;
+ unsigned int dataulen;
+ unsigned int dataflags;
+ opaque datadata<>;
+ unsigned int flags;
+};
+
+struct __dbc_get_reply {
+ int status;
+ opaque keydata<>;
+ opaque datadata<>;
+};
+
+struct __dbc_pget_msg {
+ unsigned int dbccl_id;
+ unsigned int skeydlen;
+ unsigned int skeydoff;
+ unsigned int skeyulen;
+ unsigned int skeyflags;
+ opaque skeydata<>;
+ unsigned int pkeydlen;
+ unsigned int pkeydoff;
+ unsigned int pkeyulen;
+ unsigned int pkeyflags;
+ opaque pkeydata<>;
+ unsigned int datadlen;
+ unsigned int datadoff;
+ unsigned int dataulen;
+ unsigned int dataflags;
+ opaque datadata<>;
+ unsigned int flags;
+};
+
+struct __dbc_pget_reply {
+ int status;
+ opaque skeydata<>;
+ opaque pkeydata<>;
+ opaque datadata<>;
+};
+
+struct __dbc_put_msg {
+ unsigned int dbccl_id;
+ unsigned int keydlen;
+ unsigned int keydoff;
+ unsigned int keyulen;
+ unsigned int keyflags;
+ opaque keydata<>;
+ unsigned int datadlen;
+ unsigned int datadoff;
+ unsigned int dataulen;
+ unsigned int dataflags;
+ opaque datadata<>;
+ unsigned int flags;
+};
+
+struct __dbc_put_reply {
+ int status;
+ opaque keydata<>;
+};
+program DB_RPC_SERVERPROG {
+ version DB_RPC_SERVERVERS {
+ __env_cachesize_reply __DB_env_cachesize(__env_cachesize_msg) = 1;
+ __env_close_reply __DB_env_close(__env_close_msg) = 2;
+ __env_create_reply __DB_env_create(__env_create_msg) = 3;
+ __env_dbremove_reply __DB_env_dbremove(__env_dbremove_msg) = 4;
+ __env_dbrename_reply __DB_env_dbrename(__env_dbrename_msg) = 5;
+ __env_encrypt_reply __DB_env_encrypt(__env_encrypt_msg) = 6;
+ __env_flags_reply __DB_env_flags(__env_flags_msg) = 7;
+ __env_open_reply __DB_env_open(__env_open_msg) = 8;
+ __env_remove_reply __DB_env_remove(__env_remove_msg) = 9;
+ __txn_abort_reply __DB_txn_abort(__txn_abort_msg) = 10;
+ __txn_begin_reply __DB_txn_begin(__txn_begin_msg) = 11;
+ __txn_commit_reply __DB_txn_commit(__txn_commit_msg) = 12;
+ __txn_discard_reply __DB_txn_discard(__txn_discard_msg) = 13;
+ __txn_prepare_reply __DB_txn_prepare(__txn_prepare_msg) = 14;
+ __txn_recover_reply __DB_txn_recover(__txn_recover_msg) = 15;
+ __db_associate_reply __DB_db_associate(__db_associate_msg) = 16;
+ __db_bt_maxkey_reply __DB_db_bt_maxkey(__db_bt_maxkey_msg) = 17;
+ __db_bt_minkey_reply __DB_db_bt_minkey(__db_bt_minkey_msg) = 18;
+ __db_close_reply __DB_db_close(__db_close_msg) = 19;
+ __db_create_reply __DB_db_create(__db_create_msg) = 20;
+ __db_del_reply __DB_db_del(__db_del_msg) = 21;
+ __db_encrypt_reply __DB_db_encrypt(__db_encrypt_msg) = 22;
+ __db_extentsize_reply __DB_db_extentsize(__db_extentsize_msg) = 23;
+ __db_flags_reply __DB_db_flags(__db_flags_msg) = 24;
+ __db_get_reply __DB_db_get(__db_get_msg) = 25;
+ __db_h_ffactor_reply __DB_db_h_ffactor(__db_h_ffactor_msg) = 26;
+ __db_h_nelem_reply __DB_db_h_nelem(__db_h_nelem_msg) = 27;
+ __db_key_range_reply __DB_db_key_range(__db_key_range_msg) = 28;
+ __db_lorder_reply __DB_db_lorder(__db_lorder_msg) = 29;
+ __db_open_reply __DB_db_open(__db_open_msg) = 30;
+ __db_pagesize_reply __DB_db_pagesize(__db_pagesize_msg) = 31;
+ __db_pget_reply __DB_db_pget(__db_pget_msg) = 32;
+ __db_put_reply __DB_db_put(__db_put_msg) = 33;
+ __db_re_delim_reply __DB_db_re_delim(__db_re_delim_msg) = 34;
+ __db_re_len_reply __DB_db_re_len(__db_re_len_msg) = 35;
+ __db_re_pad_reply __DB_db_re_pad(__db_re_pad_msg) = 36;
+ __db_remove_reply __DB_db_remove(__db_remove_msg) = 37;
+ __db_rename_reply __DB_db_rename(__db_rename_msg) = 38;
+ __db_stat_reply __DB_db_stat(__db_stat_msg) = 39;
+ __db_sync_reply __DB_db_sync(__db_sync_msg) = 40;
+ __db_truncate_reply __DB_db_truncate(__db_truncate_msg) = 41;
+ __db_cursor_reply __DB_db_cursor(__db_cursor_msg) = 42;
+ __db_join_reply __DB_db_join(__db_join_msg) = 43;
+ __dbc_close_reply __DB_dbc_close(__dbc_close_msg) = 44;
+ __dbc_count_reply __DB_dbc_count(__dbc_count_msg) = 45;
+ __dbc_del_reply __DB_dbc_del(__dbc_del_msg) = 46;
+ __dbc_dup_reply __DB_dbc_dup(__dbc_dup_msg) = 47;
+ __dbc_get_reply __DB_dbc_get(__dbc_get_msg) = 48;
+ __dbc_pget_reply __DB_dbc_pget(__dbc_pget_msg) = 49;
+ __dbc_put_reply __DB_dbc_put(__dbc_put_msg) = 50;
+ } = 4001;
+} = 351457;
diff --git a/libdb/rpc_server/java/DbDispatcher.java b/libdb/rpc_server/java/DbDispatcher.java
new file mode 100644
index 0000000..0be77af
--- /dev/null
+++ b/libdb/rpc_server/java/DbDispatcher.java
@@ -0,0 +1,590 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import com.sleepycat.db.*;
+import java.io.IOException;
+import org.acplt.oncrpc.OncRpcException;
+
+/**
+ * Dispatcher for RPC messages for the Java RPC server.
+ * These are hooks that translate between RPC msg/reply structures and
+ * DB calls, which keeps the real implementation code in Rpc* classes cleaner.
+ */
+public abstract class DbDispatcher extends DbServerStub
+{
+ abstract int addEnv(RpcDbEnv rdbenv);
+ abstract int addDb(RpcDb rdb);
+ abstract int addTxn(RpcDbTxn rtxn);
+ abstract int addCursor(RpcDbc rdbc);
+ abstract void delEnv(RpcDbEnv rdbenv);
+ abstract void delDb(RpcDb rdb);
+ abstract void delTxn(RpcDbTxn rtxn);
+ abstract void delCursor(RpcDbc rdbc);
+ abstract RpcDbEnv getEnv(int envid);
+ abstract RpcDb getDb(int dbid);
+ abstract RpcDbTxn getTxn(int txnbid);
+ abstract RpcDbc getCursor(int dbcid);
+
+ public DbDispatcher() throws IOException, OncRpcException
+ {
+ super();
+ }
+
+ //// Db methods
+
+ public __db_associate_reply __DB_db_associate_4001(__db_associate_msg args)
+ {
+ __db_associate_reply reply = new __db_associate_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.associate(this, args, reply);
+ return reply;
+ }
+
+ public __db_bt_maxkey_reply __DB_db_bt_maxkey_4001(__db_bt_maxkey_msg args)
+ {
+ __db_bt_maxkey_reply reply = new __db_bt_maxkey_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_bt_maxkey(this, args, reply);
+ return reply;
+ }
+
+ public __db_bt_minkey_reply __DB_db_bt_minkey_4001(__db_bt_minkey_msg args)
+ {
+ __db_bt_minkey_reply reply = new __db_bt_minkey_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_bt_minkey(this, args, reply);
+ return reply;
+ }
+
+ public __db_close_reply __DB_db_close_4001(__db_close_msg args)
+ {
+ __db_close_reply reply = new __db_close_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.close(this, args, reply);
+ return reply;
+ }
+
+ public __db_create_reply __DB_db_create_4001(__db_create_msg args)
+ {
+ __db_create_reply reply = new __db_create_reply();
+ RpcDb rdb = new RpcDb(getEnv(args.dbenvcl_id));
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.create(this, args, reply);
+ return reply;
+ }
+
+ public __db_cursor_reply __DB_db_cursor_4001(__db_cursor_msg args)
+ {
+ __db_cursor_reply reply = new __db_cursor_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.cursor(this, args, reply);
+ return reply;
+ }
+
+ public __db_del_reply __DB_db_del_4001(__db_del_msg args)
+ {
+ __db_del_reply reply = new __db_del_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.del(this, args, reply);
+ return reply;
+ }
+
+ public __db_encrypt_reply __DB_db_encrypt_4001(__db_encrypt_msg args)
+ {
+ __db_encrypt_reply reply = new __db_encrypt_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_encrypt(this, args, reply);
+ return reply;
+ }
+
+ public __db_extentsize_reply __DB_db_extentsize_4001(__db_extentsize_msg args)
+ {
+ __db_extentsize_reply reply = new __db_extentsize_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_q_extentsize(this, args, reply);
+ return reply;
+ }
+
+ public __db_flags_reply __DB_db_flags_4001(__db_flags_msg args)
+ {
+ __db_flags_reply reply = new __db_flags_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_flags(this, args, reply);
+ return reply;
+ }
+
+ public __db_get_reply __DB_db_get_4001(__db_get_msg args)
+ {
+ __db_get_reply reply = new __db_get_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.get(this, args, reply);
+ return reply;
+ }
+
+ public __db_h_ffactor_reply __DB_db_h_ffactor_4001(__db_h_ffactor_msg args)
+ {
+ __db_h_ffactor_reply reply = new __db_h_ffactor_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_h_ffactor(this, args, reply);
+ return reply;
+ }
+
+ public __db_h_nelem_reply __DB_db_h_nelem_4001(__db_h_nelem_msg args)
+ {
+ __db_h_nelem_reply reply = new __db_h_nelem_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_h_nelem(this, args, reply);
+ return reply;
+ }
+
+ public __db_join_reply __DB_db_join_4001(__db_join_msg args)
+ {
+ __db_join_reply reply = new __db_join_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.join(this, args, reply);
+ return reply;
+ }
+
+ public __db_key_range_reply __DB_db_key_range_4001(__db_key_range_msg args)
+ {
+ __db_key_range_reply reply = new __db_key_range_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.key_range(this, args, reply);
+ return reply;
+ }
+
+ public __db_lorder_reply __DB_db_lorder_4001(__db_lorder_msg args)
+ {
+ __db_lorder_reply reply = new __db_lorder_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_lorder(this, args, reply);
+ return reply;
+ }
+
+ public __db_open_reply __DB_db_open_4001(__db_open_msg args)
+ {
+ __db_open_reply reply = new __db_open_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.open(this, args, reply);
+ return reply;
+ }
+
+ public __db_pagesize_reply __DB_db_pagesize_4001(__db_pagesize_msg args)
+ {
+ __db_pagesize_reply reply = new __db_pagesize_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_pagesize(this, args, reply);
+ return reply;
+ }
+
+ public __db_pget_reply __DB_db_pget_4001(__db_pget_msg args)
+ {
+ __db_pget_reply reply = new __db_pget_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.pget(this, args, reply);
+ return reply;
+ }
+
+ public __db_put_reply __DB_db_put_4001(__db_put_msg args)
+ {
+ __db_put_reply reply = new __db_put_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.put(this, args, reply);
+ return reply;
+ }
+
+ public __db_remove_reply __DB_db_remove_4001(__db_remove_msg args)
+ {
+ __db_remove_reply reply = new __db_remove_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.remove(this, args, reply);
+ return reply;
+ }
+
+ public __db_rename_reply __DB_db_rename_4001(__db_rename_msg args)
+ {
+ __db_rename_reply reply = new __db_rename_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.rename(this, args, reply);
+ return reply;
+ }
+
+ public __db_re_delim_reply __DB_db_re_delim_4001(__db_re_delim_msg args)
+ {
+ __db_re_delim_reply reply = new __db_re_delim_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_re_delim(this, args, reply);
+ return reply;
+ }
+
+ public __db_re_len_reply __DB_db_re_len_4001(__db_re_len_msg args)
+ {
+ __db_re_len_reply reply = new __db_re_len_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_re_len(this, args, reply);
+ return reply;
+ }
+
+ public __db_re_pad_reply __DB_db_re_pad_4001(__db_re_pad_msg args)
+ {
+ __db_re_pad_reply reply = new __db_re_pad_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_re_pad(this, args, reply);
+ return reply;
+ }
+
+ public __db_stat_reply __DB_db_stat_4001(__db_stat_msg args)
+ {
+ __db_stat_reply reply = new __db_stat_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.stat(this, args, reply);
+ return reply;
+ }
+
+ public __db_sync_reply __DB_db_sync_4001(__db_sync_msg args)
+ {
+ __db_sync_reply reply = new __db_sync_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.sync(this, args, reply);
+ return reply;
+ }
+
+ public __db_truncate_reply __DB_db_truncate_4001(__db_truncate_msg args)
+ {
+ __db_truncate_reply reply = new __db_truncate_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.truncate(this, args, reply);
+ return reply;
+ }
+
+ //// Cursor methods
+
+ public __dbc_close_reply __DB_dbc_close_4001(__dbc_close_msg args)
+ {
+ __dbc_close_reply reply = new __dbc_close_reply();
+ RpcDbc rdbc = getCursor(args.dbccl_id);
+ if (rdbc == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbc.close(this, args, reply);
+ return reply;
+ }
+
+ public __dbc_count_reply __DB_dbc_count_4001(__dbc_count_msg args)
+ {
+ __dbc_count_reply reply = new __dbc_count_reply();
+ RpcDbc rdbc = getCursor(args.dbccl_id);
+ if (rdbc == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbc.count(this, args, reply);
+ return reply;
+ }
+
+ public __dbc_del_reply __DB_dbc_del_4001(__dbc_del_msg args)
+ {
+ __dbc_del_reply reply = new __dbc_del_reply();
+ RpcDbc rdbc = getCursor(args.dbccl_id);
+ if (rdbc == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbc.del(this, args, reply);
+ return reply;
+ }
+
+ public __dbc_dup_reply __DB_dbc_dup_4001(__dbc_dup_msg args)
+ {
+ __dbc_dup_reply reply = new __dbc_dup_reply();
+ RpcDbc rdbc = getCursor(args.dbccl_id);
+ if (rdbc == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbc.dup(this, args, reply);
+ return reply;
+ }
+
+ public __dbc_get_reply __DB_dbc_get_4001(__dbc_get_msg args)
+ {
+ __dbc_get_reply reply = new __dbc_get_reply();
+ RpcDbc rdbc = getCursor(args.dbccl_id);
+ if (rdbc == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbc.get(this, args, reply);
+ return reply;
+ }
+
+ public __dbc_pget_reply __DB_dbc_pget_4001(__dbc_pget_msg args) {
+ __dbc_pget_reply reply = new __dbc_pget_reply();
+ RpcDbc rdbc = getCursor(args.dbccl_id);
+ if (rdbc == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbc.pget(this, args, reply);
+ return reply;
+ }
+
+ public __dbc_put_reply __DB_dbc_put_4001(__dbc_put_msg args) {
+ __dbc_put_reply reply = new __dbc_put_reply();
+ RpcDbc rdbc = getCursor(args.dbccl_id);
+ if (rdbc == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbc.put(this, args, reply);
+ return reply;
+ }
+
+ //// Environment methods
+
+ public __env_cachesize_reply __DB_env_cachesize_4001(__env_cachesize_msg args)
+ {
+ __env_cachesize_reply reply = new __env_cachesize_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.set_cachesize(this, args, reply);
+ return reply;
+ }
+
+ public __env_close_reply __DB_env_close_4001(__env_close_msg args)
+ {
+ __env_close_reply reply = new __env_close_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.close(this, args, reply);
+ return reply;
+ }
+
+ public __env_create_reply __DB_env_create_4001(__env_create_msg args)
+ {
+ __env_create_reply reply = new __env_create_reply();
+ RpcDbEnv rdbenv = new RpcDbEnv();
+ rdbenv.create(this, args, reply);
+ return reply;
+ }
+
+ public __env_dbremove_reply __DB_env_dbremove_4001(__env_dbremove_msg args)
+ {
+ __env_dbremove_reply reply = new __env_dbremove_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.dbremove(this, args, reply);
+ return reply;
+ }
+
+ public __env_dbrename_reply __DB_env_dbrename_4001(__env_dbrename_msg args)
+ {
+ __env_dbrename_reply reply = new __env_dbrename_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.dbrename(this, args, reply);
+ return reply;
+ }
+
+ public __env_encrypt_reply __DB_env_encrypt_4001(__env_encrypt_msg args)
+ {
+ __env_encrypt_reply reply = new __env_encrypt_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.set_encrypt(this, args, reply);
+ return reply;
+ }
+
+ public __env_flags_reply __DB_env_flags_4001(__env_flags_msg args)
+ {
+ __env_flags_reply reply = new __env_flags_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.set_flags(this, args, reply);
+ return reply;
+ }
+
+ public __env_open_reply __DB_env_open_4001(__env_open_msg args)
+ {
+ __env_open_reply reply = new __env_open_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.open(this, args, reply);
+ return reply;
+ }
+
+ public __env_remove_reply __DB_env_remove_4001(__env_remove_msg args)
+ {
+ __env_remove_reply reply = new __env_remove_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.remove(this, args, reply);
+ return reply;
+ }
+
+ //// Transaction methods
+
+ public __txn_abort_reply __DB_txn_abort_4001(__txn_abort_msg args)
+ {
+ __txn_abort_reply reply = new __txn_abort_reply();
+ RpcDbTxn rdbtxn = getTxn(args.txnpcl_id);
+ if (rdbtxn == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbtxn.abort(this, args, reply);
+ return reply;
+ }
+
+ public __txn_begin_reply __DB_txn_begin_4001(__txn_begin_msg args)
+ {
+ __txn_begin_reply reply = new __txn_begin_reply();
+ RpcDbTxn rdbtxn = new RpcDbTxn(getEnv(args.dbenvcl_id), null);
+ rdbtxn.begin(this, args, reply);
+ return reply;
+ }
+
+ public __txn_commit_reply __DB_txn_commit_4001(__txn_commit_msg args)
+ {
+ __txn_commit_reply reply = new __txn_commit_reply();
+ RpcDbTxn rdbtxn = getTxn(args.txnpcl_id);
+ if (rdbtxn == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbtxn.commit(this, args, reply);
+ return reply;
+ }
+
+ public __txn_discard_reply __DB_txn_discard_4001(__txn_discard_msg args)
+ {
+ __txn_discard_reply reply = new __txn_discard_reply();
+ RpcDbTxn rdbtxn = getTxn(args.txnpcl_id);
+ if (rdbtxn == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbtxn.discard(this, args, reply);
+ return reply;
+ }
+
+ public __txn_prepare_reply __DB_txn_prepare_4001(__txn_prepare_msg args)
+ {
+ __txn_prepare_reply reply = new __txn_prepare_reply();
+ RpcDbTxn rdbtxn = getTxn(args.txnpcl_id);
+ if (rdbtxn == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbtxn.prepare(this, args, reply);
+ return reply;
+ }
+
+ public __txn_recover_reply __DB_txn_recover_4001(__txn_recover_msg args)
+ {
+ __txn_recover_reply reply = new __txn_recover_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.txn_recover(this, args, reply);
+ return reply;
+ }
+}
diff --git a/libdb/rpc_server/java/DbServer.java b/libdb/rpc_server/java/DbServer.java
new file mode 100644
index 0000000..b999cea
--- /dev/null
+++ b/libdb/rpc_server/java/DbServer.java
@@ -0,0 +1,301 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import com.sleepycat.db.*;
+import java.io.*;
+import java.util.*;
+import org.acplt.oncrpc.OncRpcException;
+import org.acplt.oncrpc.server.OncRpcCallInformation;
+
+/**
+ * Main entry point for the Java version of the Berkeley DB RPC server
+ */
+public class DbServer extends DbDispatcher
+{
+ public static long idleto = 10 * 60 * 1000; // 5 minutes
+ public static long defto = 5 * 60 * 1000; // 5 minutes
+ public static long maxto = 60 * 60 * 1000; // 1 hour
+ public static String passwd = null;
+ public static PrintWriter err;
+
+ long now, hint; // updated each operation
+ FreeList env_list = new FreeList();
+ FreeList db_list = new FreeList();
+ FreeList txn_list = new FreeList();
+ FreeList cursor_list = new FreeList();
+
+ public DbServer() throws IOException, OncRpcException
+ {
+ super();
+ init_lists();
+ }
+
+ public void dispatchOncRpcCall(OncRpcCallInformation call, int program,
+ int version, int procedure) throws OncRpcException, IOException
+ {
+ long newnow = System.currentTimeMillis();
+ // DbServer.err.println("Dispatching RPC call " + procedure + " after delay of " + (newnow - now));
+ now = newnow;
+ // DbServer.err.flush();
+ super.dispatchOncRpcCall(call, program, version, procedure);
+
+ try {
+ doTimeouts();
+ } catch(Throwable t) {
+ System.err.println("Caught " + t + " during doTimeouts()");
+ t.printStackTrace(System.err);
+ }
+ }
+
+ // Internal methods to track context
+ private void init_lists()
+ {
+ // We do this so that getEnv/Db/etc(0) == null
+ env_list.add(null);
+ db_list.add(null);
+ txn_list.add(null);
+ cursor_list.add(null);
+ }
+
+ int addEnv(RpcDbEnv rdbenv)
+ {
+ rdbenv.timer.last_access = now;
+ int id = env_list.add(rdbenv);
+ return id;
+ }
+
+ int addDb(RpcDb rdb)
+ {
+ int id = db_list.add(rdb);
+ return id;
+ }
+
+ int addTxn(RpcDbTxn rtxn)
+ {
+ rtxn.timer.last_access = now;
+ int id = txn_list.add(rtxn);
+ return id;
+ }
+
+ int addCursor(RpcDbc rdbc)
+ {
+ rdbc.timer.last_access = now;
+ int id = cursor_list.add(rdbc);
+ return id;
+ }
+
+ void delEnv(RpcDbEnv rdbenv)
+ {
+ // cursors and transactions will already have been cleaned up
+ for(LocalIterator i = db_list.iterator(); i.hasNext(); ) {
+ RpcDb rdb = (RpcDb)i.next();
+ if (rdb != null && rdb.rdbenv == rdbenv)
+ delDb(rdb);
+ }
+
+ env_list.del(rdbenv);
+ rdbenv.dispose();
+ }
+
+ void delDb(RpcDb rdb)
+ {
+ db_list.del(rdb);
+ rdb.dispose();
+
+ for(LocalIterator i = cursor_list.iterator(); i.hasNext(); ) {
+ RpcDbc rdbc = (RpcDbc)i.next();
+ if (rdbc != null && rdbc.timer == rdb)
+ i.remove();
+ }
+ }
+
+ void delTxn(RpcDbTxn rtxn)
+ {
+ txn_list.del(rtxn);
+ rtxn.dispose();
+
+ for(LocalIterator i = cursor_list.iterator(); i.hasNext(); ) {
+ RpcDbc rdbc = (RpcDbc)i.next();
+ if (rdbc != null && rdbc.timer == rtxn)
+ i.remove();
+ }
+
+ for(LocalIterator i = txn_list.iterator(); i.hasNext(); ) {
+ RpcDbTxn rtxn_child = (RpcDbTxn)i.next();
+ if (rtxn_child != null && rtxn_child.timer == rtxn)
+ i.remove();
+ }
+ }
+
+ void delCursor(RpcDbc rdbc)
+ {
+ cursor_list.del(rdbc);
+ rdbc.dispose();
+ }
+
+ RpcDbEnv getEnv(int envid)
+ {
+ RpcDbEnv rdbenv = (RpcDbEnv)env_list.get(envid);
+ if (rdbenv != null)
+ rdbenv.timer.last_access = now;
+ return rdbenv;
+ }
+
+ RpcDb getDb(int dbid)
+ {
+ RpcDb rdb = (RpcDb)db_list.get(dbid);
+ if (rdb != null)
+ rdb.rdbenv.timer.last_access = now;
+ return rdb;
+ }
+
+ RpcDbTxn getTxn(int txnid)
+ {
+ RpcDbTxn rtxn = (RpcDbTxn)txn_list.get(txnid);
+ if (rtxn != null)
+ rtxn.timer.last_access = rtxn.rdbenv.timer.last_access = now;
+ return rtxn;
+ }
+
+ RpcDbc getCursor(int dbcid)
+ {
+ RpcDbc rdbc = (RpcDbc)cursor_list.get(dbcid);
+ if (rdbc != null)
+ rdbc.last_access = rdbc.timer.last_access = rdbc.rdbenv.timer.last_access = now;
+ return rdbc;
+ }
+
+ void doTimeouts()
+ {
+ if (now < hint) {
+ // DbServer.err.println("Skipping cleaner sweep - now = " + now + ", hint = " + hint);
+ return;
+ }
+
+ // DbServer.err.println("Starting a cleaner sweep");
+ hint = now + DbServer.maxto;
+
+ for(LocalIterator i = cursor_list.iterator(); i.hasNext(); ) {
+ RpcDbc rdbc = (RpcDbc)i.next();
+ if (rdbc == null)
+ continue;
+
+ long end_time = rdbc.timer.last_access + rdbc.rdbenv.timeout;
+ // DbServer.err.println("Examining " + rdbc + ", time left = " + (end_time - now));
+ if (end_time < now) {
+ DbServer.err.println("Cleaning up " + rdbc);
+ delCursor(rdbc);
+ } else if (end_time < hint)
+ hint = end_time;
+ }
+
+ for(LocalIterator i = txn_list.iterator(); i.hasNext(); ) {
+ RpcDbTxn rtxn = (RpcDbTxn)i.next();
+ if (rtxn == null)
+ continue;
+
+ long end_time = rtxn.timer.last_access + rtxn.rdbenv.timeout;
+ // DbServer.err.println("Examining " + rtxn + ", time left = " + (end_time - now));
+ if (end_time < now) {
+ DbServer.err.println("Cleaning up " + rtxn);
+ delTxn(rtxn);
+ } else if (end_time < hint)
+ hint = end_time;
+ }
+
+ for(LocalIterator i = env_list.iterator(); i.hasNext(); ) {
+ RpcDbEnv rdbenv = (RpcDbEnv)i.next();
+ if (rdbenv == null)
+ continue;
+
+ long end_time = rdbenv.timer.last_access + rdbenv.idletime;
+ // DbServer.err.println("Examining " + rdbenv + ", time left = " + (end_time - now));
+ if (end_time < now) {
+ DbServer.err.println("Cleaning up " + rdbenv);
+ delEnv(rdbenv);
+ }
+ }
+
+ // if we didn't find anything, reset the hint
+ if (hint == now + DbServer.maxto)
+ hint = 0;
+
+ // DbServer.err.println("Finishing a cleaner sweep");
+ }
+
+ // Some constants that aren't available elsewhere
+ static final int DB_SERVER_FLAGMASK = Db.DB_LOCKDOWN |
+ Db.DB_PRIVATE | Db.DB_RECOVER | Db.DB_RECOVER_FATAL |
+ Db.DB_SYSTEM_MEM | Db.DB_USE_ENVIRON |
+ Db.DB_USE_ENVIRON_ROOT;
+ static final int DB_SERVER_ENVFLAGS = Db.DB_INIT_CDB |
+ Db.DB_INIT_LOCK | Db.DB_INIT_LOG | Db.DB_INIT_MPOOL |
+ Db.DB_INIT_TXN | Db.DB_JOINENV;
+ static final int DB_SERVER_DBFLAGS = Db.DB_DIRTY_READ |
+ Db.DB_NOMMAP | Db.DB_RDONLY;
+ static final int DB_SERVER_DBNOSHARE = Db.DB_EXCL | Db.DB_TRUNCATE;
+
+ public static void main(String[] args)
+ {
+ System.out.println("Starting DbServer...");
+ for (int i = 0; i < args.length; i++) {
+ if (args[i].charAt(0) != '-')
+ usage();
+
+ switch (args[i].charAt(1)) {
+ case 'h':
+ ++i; // add_home(args[++i]);
+ break;
+ case 'I':
+ idleto = Long.parseLong(args[++i]) * 1000L;
+ break;
+ case 'P':
+ passwd = args[++i];
+ break;
+ case 't':
+ defto = Long.parseLong(args[++i]) * 1000L;
+ break;
+ case 'T':
+ maxto = Long.parseLong(args[++i]) * 1000L;
+ break;
+ case 'V':
+ // version;
+ break;
+ case 'v':
+ // verbose
+ break;
+ default:
+ usage();
+ }
+ }
+
+ try {
+ DbServer.err = new PrintWriter(new FileOutputStream("JavaRPCServer.trace", true));
+ DbServer server = new DbServer();
+ server.run();
+ } catch (Throwable e) {
+ System.out.println("DbServer exception:");
+ e.printStackTrace(DbServer.err);
+ } finally {
+ if (DbServer.err != null)
+ DbServer.err.close();
+ }
+
+ System.out.println("DbServer stopped.");
+ }
+
+ static void usage()
+ {
+ System.err.println("usage: java com.sleepycat.db.rpcserver.DbServer \\");
+ System.err.println("[-Vv] [-h home] [-P passwd] [-I idletimeout] [-L logfile] [-t def_timeout] [-T maxtimeout]");
+ System.exit(1);
+ }
+}
diff --git a/libdb/rpc_server/java/FreeList.java b/libdb/rpc_server/java/FreeList.java
new file mode 100644
index 0000000..fee2259
--- /dev/null
+++ b/libdb/rpc_server/java/FreeList.java
@@ -0,0 +1,102 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import java.util.*;
+
+/**
+ * Keep track of a list of objects by id with a free list.
+ * Intentionally package-protected exposure.
+ */
+class FreeList
+{
+ class FreeIndex {
+ int index;
+ FreeIndex(int index) { this.index = index; }
+ int getIndex() { return index; }
+ }
+
+ Vector items = new Vector();
+ FreeIndex free_head = null;
+
+ public synchronized int add(Object obj) {
+ int pos;
+ if (free_head == null) {
+ pos = items.size();
+ items.addElement(obj);
+ if (pos % 1000 == 0)
+ DbServer.err.println(this + " grew to size " + pos);
+ } else {
+ pos = free_head.getIndex();
+ free_head = (FreeIndex)items.elementAt(pos);
+ items.setElementAt(obj, pos);
+ }
+ return pos;
+ }
+
+ public synchronized void del(int pos) {
+ Object obj = items.elementAt(pos);
+ if (obj != null && obj instanceof FreeIndex)
+ throw new NoSuchElementException("index " + pos + " has already been freed");
+ items.setElementAt(free_head, pos);
+ free_head = new FreeIndex(pos);
+ }
+
+ public void del(Object obj) {
+ del(items.indexOf(obj));
+ }
+
+ public Object get(int pos) {
+ Object obj = items.elementAt(pos);
+ if (obj instanceof FreeIndex)
+ obj = null;
+ return obj;
+ }
+
+ public LocalIterator iterator() {
+ return new FreeListIterator();
+ }
+
+ /**
+ * Iterator for a FreeList. Note that this class doesn't implement
+ * java.util.Iterator to maintain compatibility with Java 1.1
+ * Intentionally package-protected exposure.
+ */
+ class FreeListIterator implements LocalIterator {
+ int current;
+
+ FreeListIterator() { current = findNext(-1); }
+
+ private int findNext(int start) {
+ int next = start;
+ while (++next < items.size()) {
+ Object obj = items.elementAt(next);
+ if (obj == null || !(obj instanceof FreeIndex))
+ break;
+ }
+ return next;
+ }
+
+ public boolean hasNext() {
+ return (findNext(current) < items.size());
+ }
+
+ public Object next() {
+ current = findNext(current);
+ if (current == items.size())
+ throw new NoSuchElementException("enumerated past end of FreeList");
+ return items.elementAt(current);
+ }
+
+ public void remove() {
+ del(current);
+ }
+ }
+}
diff --git a/libdb/rpc_server/java/LocalIterator.java b/libdb/rpc_server/java/LocalIterator.java
new file mode 100644
index 0000000..8415de7
--- /dev/null
+++ b/libdb/rpc_server/java/LocalIterator.java
@@ -0,0 +1,23 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import java.util.*;
+
+/**
+ * Iterator interface. Note that this matches java.util.Iterator
+ * but maintains compatibility with Java 1.1
+ * Intentionally package-protected exposure.
+ */
+interface LocalIterator {
+ boolean hasNext();
+ Object next();
+ void remove();
+}
diff --git a/libdb/rpc_server/java/README b/libdb/rpc_server/java/README
new file mode 100644
index 0000000..c2d8f3a
--- /dev/null
+++ b/libdb/rpc_server/java/README
@@ -0,0 +1,24 @@
+Berkeley DB Java RPC server, copyright (C) 2002 Sleepycat Software
+
+The Java implementation of the Berkeley DB RPC server is intended
+primarily for testing purposes. It provides the same interface
+as the C and C++ RPC servers, but is implemented via the Java API
+rather than the C or C++ APIs. This allows the existing Tcl test
+suite to exercise the Java API without modification.
+
+The Java RPC server relies on a Java version of rpcgen to
+automatically generate appropriate Java classes from the RPC
+interface specification (../db_server.x). We use jrpcgen, which
+is part of the Remote Tea for Java project:
+ acplt.plt.rwth-aachen.de/ks/english/remotetea.html
+
+To rebuild the Java stubs from db_server.x, you will need to
+download the full Remote Tea package, but if you just want to
+compile the Java sources and run the Java RPC server, the runtime
+component of Remote Tea is included in oncrpc.jar. Building
+the Java RPC server is automatic when Berkeley DB is configured
+with the both --enable-rpc and --enable-java.
+
+All of the Remote Tea project is licensed under the Library GNU
+Public License, and we have made no modifications to their
+released code.
diff --git a/libdb/rpc_server/java/RpcDb.java b/libdb/rpc_server/java/RpcDb.java
new file mode 100644
index 0000000..55b0d42
--- /dev/null
+++ b/libdb/rpc_server/java/RpcDb.java
@@ -0,0 +1,694 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import com.sleepycat.db.*;
+import java.io.IOException;
+import java.io.*;
+import java.util.*;
+
+/**
+ * RPC wrapper around a db object for the Java RPC server.
+ */
+public class RpcDb extends Timer
+{
+ static final byte[] empty = new byte[0];
+ Db db;
+ RpcDbEnv rdbenv;
+ int refcount = 1;
+ String dbname, subdbname;
+ int type, setflags, openflags;
+
+ public RpcDb(RpcDbEnv rdbenv)
+ {
+ this.rdbenv = rdbenv;
+ }
+
+ void dispose()
+ {
+ if (db != null) {
+ try {
+ db.close(0);
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ }
+ db = null;
+ }
+ }
+
+ public void associate(DbDispatcher server,
+ __db_associate_msg args, __db_associate_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ /*
+ * We do not support DB_CREATE for associate. Users
+ * can only access secondary indices on a read-only basis,
+ * so whatever they are looking for needs to be there already.
+ */
+ db.associate(txn, server.getDb(args.sdbpcl_id).db, null, args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void close(DbDispatcher server,
+ __db_close_msg args, __db_close_reply reply)
+ {
+ if (--refcount != 0) {
+ reply.status = 0;
+ return;
+ }
+
+ try {
+ db.close(args.flags);
+ db = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } finally {
+ server.delDb(this);
+ }
+ }
+
+ public void create(DbDispatcher server,
+ __db_create_msg args, __db_create_reply reply)
+ {
+ try {
+ db = new Db(server.getEnv(args.dbenvcl_id).dbenv, args.flags);
+ reply.dbcl_id = server.addDb(this);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void cursor(DbDispatcher server,
+ __db_cursor_msg args, __db_cursor_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ Dbc dbc = db.cursor(txn, args.flags);
+ RpcDbc rdbc = new RpcDbc(this, dbc, false);
+ rdbc.timer = (rtxn != null) ? rtxn.timer : this;
+ reply.dbcidcl_id = server.addCursor(rdbc);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void del(DbDispatcher server,
+ __db_del_msg args, __db_del_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ Dbt key = new Dbt(args.keydata);
+ key.set_dlen(args.keydlen);
+ key.set_doff(args.keydoff);
+ key.set_ulen(args.keyulen);
+ key.set_flags(args.keyflags);
+
+ db.del(txn, key, args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void get(DbDispatcher server,
+ __db_get_msg args, __db_get_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ Dbt key = new Dbt(args.keydata);
+ key.set_dlen(args.keydlen);
+ key.set_doff(args.keydoff);
+ key.set_ulen(args.keyulen);
+ key.set_flags(Db.DB_DBT_MALLOC |
+ (args.keyflags & Db.DB_DBT_PARTIAL));
+
+ Dbt data = new Dbt(args.datadata);
+ data.set_dlen(args.datadlen);
+ data.set_doff(args.datadoff);
+ data.set_ulen(args.dataulen);
+ if ((args.flags & Db.DB_MULTIPLE) != 0) {
+ if (data.get_data().length == 0)
+ data.set_data(new byte[data.get_ulen()]);
+ data.set_flags(Db.DB_DBT_USERMEM |
+ (args.dataflags & Db.DB_DBT_PARTIAL));
+ } else
+ data.set_flags(Db.DB_DBT_MALLOC |
+ (args.dataflags & Db.DB_DBT_PARTIAL));
+
+ reply.status = db.get(txn, key, data, args.flags);
+
+ if (key.get_data() == args.keydata ||
+ key.get_data().length != key.get_size()) {
+ reply.keydata = new byte[key.get_size()];
+ System.arraycopy(key.get_data(), 0, reply.keydata, 0, key.get_size());
+ } else
+ reply.keydata = key.get_data();
+
+ if (data.get_data() == args.datadata ||
+ data.get_data().length != data.get_size()) {
+ reply.datadata = new byte[data.get_size()];
+ System.arraycopy(data.get_data(), 0, reply.datadata, 0, data.get_size());
+ } else
+ reply.datadata = data.get_data();
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ reply.keydata = reply.datadata = empty;
+ }
+ }
+
+ public void join(DbDispatcher server,
+ __db_join_msg args, __db_join_reply reply)
+ {
+ try {
+ Dbc[] cursors = new Dbc[args.curs.length + 1];
+ for(int i = 0; i < args.curs.length; i++) {
+ RpcDbc rdbc = server.getCursor(args.curs[i]);
+ if (rdbc == null) {
+ reply.status = Db.DB_NOSERVER_ID;
+ return;
+ }
+ cursors[i] = rdbc.dbc;
+ }
+ cursors[args.curs.length] = null;
+
+ Dbc jdbc = db.join(cursors, args.flags);
+
+ RpcDbc rjdbc = new RpcDbc(this, jdbc, true);
+ /*
+ * If our curslist has a parent txn, we need to use it too
+ * for the activity timeout. All cursors must be part of
+ * the same transaction, so just check the first.
+ */
+ RpcDbc rdbc0 = server.getCursor(args.curs[0]);
+ if (rdbc0.timer != rdbc0)
+ rjdbc.timer = rdbc0.timer;
+
+ /*
+ * All of the curslist cursors must point to the join
+ * cursor's timeout so that we do not timeout any of the
+ * curlist cursors while the join cursor is active.
+ */
+ for(int i = 0; i < args.curs.length; i++) {
+ RpcDbc rdbc = server.getCursor(args.curs[i]);
+ rdbc.orig_timer = rdbc.timer;
+ rdbc.timer = rjdbc;
+ }
+ reply.dbcidcl_id = server.addCursor(rjdbc);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void key_range(DbDispatcher server,
+ __db_key_range_msg args, __db_key_range_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ Dbt key = new Dbt(args.keydata);
+ key.set_dlen(args.keydlen);
+ key.set_doff(args.keydoff);
+ key.set_ulen(args.keyulen);
+ key.set_flags(args.keyflags);
+
+ DbKeyRange range = new DbKeyRange();
+
+ db.key_range(txn, key, range, args.flags);
+ reply.status = 0;
+ reply.less = range.less;
+ reply.equal = range.equal;
+ reply.greater = range.greater;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ private boolean findSharedDb(DbDispatcher server, __db_open_reply reply)
+ throws DbException
+ {
+ RpcDb rdb = null;
+ boolean matchFound = false;
+ LocalIterator i = ((DbServer)server).db_list.iterator();
+
+ while (!matchFound && i.hasNext()) {
+ rdb = (RpcDb)i.next();
+ if (rdb != null && rdb != this && rdb.rdbenv == rdbenv &&
+ (type == Db.DB_UNKNOWN || rdb.type == type) &&
+ openflags == rdb.openflags &&
+ setflags == rdb.setflags &&
+ dbname != null && rdb.dbname != null &&
+ dbname.equals(rdb.dbname) &&
+ (subdbname == rdb.subdbname ||
+ (subdbname != null && rdb.subdbname != null &&
+ subdbname.equals(rdb.subdbname))))
+ matchFound = true;
+ }
+
+ if (matchFound) {
+ ++rdb.refcount;
+ reply.dbcl_id = ((FreeList.FreeListIterator)i).current;
+ reply.type = rdb.db.get_type();
+ reply.dbflags = rdb.db.get_flags_raw();
+ // FIXME: not possible to work out byteorder from Java?
+ reply.lorder = rdb.db.get_byteswapped() ? 4321 : 1234;
+ reply.status = 0;
+
+ DbServer.err.println("Sharing Db: " + reply.dbcl_id);
+ }
+
+ return matchFound;
+ }
+
+ public void open(DbDispatcher server,
+ __db_open_msg args, __db_open_reply reply)
+ {
+ try {
+ dbname = (args.name.length() > 0) ? args.name : null;
+ subdbname = (args.subdb.length() > 0) ? args.subdb : null;
+ type = args.type;
+ openflags = args.flags & DbServer.DB_SERVER_DBFLAGS;
+
+ if (findSharedDb(server, reply)) {
+ db.close(0);
+ db = null;
+ server.delDb(this);
+ } else {
+ DbServer.err.println("Calling db.open(" + null + ", " + dbname + ", " + subdbname + ", " + args.type + ", " + Integer.toHexString(args.flags) + ", " + args.mode + ")");
+ db.open(null, dbname, subdbname, args.type, args.flags, args.mode);
+
+ reply.dbcl_id = args.dbpcl_id;
+ reply.type = this.type = db.get_type();
+ reply.dbflags = db.get_flags_raw();
+ // FIXME: not possible to work out byteorder from Java?
+ reply.lorder = db.get_byteswapped() ? 4321 : 1234;
+ reply.status = 0;
+ }
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } catch(FileNotFoundException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = Db.DB_NOTFOUND;
+ }
+
+ // System.err.println("Db.open: reply.status = " + reply.status + ", reply.dbcl_id = " + reply.dbcl_id);
+ }
+
+ public void pget(DbDispatcher server,
+ __db_pget_msg args, __db_pget_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ Dbt skey = new Dbt(args.skeydata);
+ skey.set_dlen(args.skeydlen);
+ skey.set_doff(args.skeydoff);
+ skey.set_ulen(args.skeyulen);
+ skey.set_flags(Db.DB_DBT_MALLOC |
+ (args.skeyflags & Db.DB_DBT_PARTIAL));
+
+ Dbt pkey = new Dbt(args.pkeydata);
+ pkey.set_dlen(args.pkeydlen);
+ pkey.set_doff(args.pkeydoff);
+ pkey.set_ulen(args.pkeyulen);
+ pkey.set_flags(Db.DB_DBT_MALLOC |
+ (args.pkeyflags & Db.DB_DBT_PARTIAL));
+
+ Dbt data = new Dbt(args.datadata);
+ data.set_dlen(args.datadlen);
+ data.set_doff(args.datadoff);
+ data.set_ulen(args.dataulen);
+ data.set_flags(Db.DB_DBT_MALLOC |
+ (args.dataflags & Db.DB_DBT_PARTIAL));
+
+ db.pget(txn, skey, pkey, data, args.flags);
+
+ if (skey.get_data() == args.skeydata ||
+ skey.get_data().length != skey.get_size()) {
+ reply.skeydata = new byte[skey.get_size()];
+ System.arraycopy(skey.get_data(), 0, reply.skeydata, 0, skey.get_size());
+ } else
+ reply.skeydata = skey.get_data();
+
+ if (pkey.get_data() == args.pkeydata ||
+ pkey.get_data().length != pkey.get_size()) {
+ reply.pkeydata = new byte[pkey.get_size()];
+ System.arraycopy(pkey.get_data(), 0, reply.pkeydata, 0, pkey.get_size());
+ } else
+ reply.pkeydata = pkey.get_data();
+
+ if (data.get_data() == args.datadata ||
+ data.get_data().length != data.get_size()) {
+ reply.datadata = new byte[data.get_size()];
+ System.arraycopy(data.get_data(), 0, reply.datadata, 0, data.get_size());
+ } else
+ reply.datadata = data.get_data();
+
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ reply.skeydata = reply.pkeydata = reply.datadata = empty;
+ }
+ }
+
+ public void put(DbDispatcher server,
+ __db_put_msg args, __db_put_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+
+ Dbt key = new Dbt(args.keydata);
+ key.set_dlen(args.keydlen);
+ key.set_doff(args.keydoff);
+ key.set_ulen(args.keyulen);
+ key.set_flags(Db.DB_DBT_MALLOC |
+ (args.keyflags & Db.DB_DBT_PARTIAL));
+
+ Dbt data = new Dbt(args.datadata);
+ data.set_dlen(args.datadlen);
+ data.set_doff(args.datadoff);
+ data.set_ulen(args.dataulen);
+ data.set_flags(args.dataflags);
+
+ reply.status = db.put(txn, key, data, args.flags);
+
+ /*
+ * If the client did a DB_APPEND, set up key in reply.
+ * Otherwise just status.
+ */
+ if ((args.flags & Db.DB_APPEND) != 0) {
+ if (key.get_data() == args.keydata ||
+ key.get_data().length != key.get_size()) {
+ reply.keydata = new byte[key.get_size()];
+ System.arraycopy(key.get_data(), 0, reply.keydata, 0, key.get_size());
+ } else
+ reply.keydata = key.get_data();
+ } else
+ reply.keydata = empty;
+ } catch(DbException e) {
+ reply.keydata = empty;
+ reply.status = e.get_errno();
+ DbServer.err.println("Exception, setting status to " + reply.status);
+ e.printStackTrace(DbServer.err);
+ }
+ }
+
+ public void remove(DbDispatcher server,
+ __db_remove_msg args, __db_remove_reply reply)
+ {
+ try {
+ args.name = (args.name.length() > 0) ? args.name : null;
+ args.subdb = (args.subdb.length() > 0) ? args.subdb : null;
+ db.remove(args.name, args.subdb, args.flags);
+ db = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } catch(FileNotFoundException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = Db.DB_NOTFOUND;
+ } finally {
+ server.delDb(this);
+ }
+ }
+
+ public void rename(DbDispatcher server,
+ __db_rename_msg args, __db_rename_reply reply)
+ {
+ try {
+ args.name = (args.name.length() > 0) ? args.name : null;
+ args.subdb = (args.subdb.length() > 0) ? args.subdb : null;
+ args.newname = (args.newname.length() > 0) ? args.newname : null;
+ db.rename(args.name, args.subdb, args.newname, args.flags);
+ db = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } catch(FileNotFoundException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = Db.DB_NOTFOUND;
+ } finally {
+ server.delDb(this);
+ }
+ }
+
+ public void set_bt_maxkey(DbDispatcher server,
+ __db_bt_maxkey_msg args, __db_bt_maxkey_reply reply)
+ {
+ try {
+ db.set_bt_maxkey(args.maxkey);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_bt_minkey(DbDispatcher server,
+ __db_bt_minkey_msg args, __db_bt_minkey_reply reply)
+ {
+ try {
+ db.set_bt_minkey(args.minkey);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_encrypt(DbDispatcher server,
+ __db_encrypt_msg args, __db_encrypt_reply reply)
+ {
+ try {
+ db.set_encrypt(args.passwd, args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_flags(DbDispatcher server,
+ __db_flags_msg args, __db_flags_reply reply)
+ {
+ try {
+ // DbServer.err.println("Calling db.setflags(" + Integer.toHexString(args.flags) + ")");
+ db.set_flags(args.flags);
+ setflags |= args.flags;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_h_ffactor(DbDispatcher server,
+ __db_h_ffactor_msg args, __db_h_ffactor_reply reply)
+ {
+ try {
+ db.set_h_ffactor(args.ffactor);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_h_nelem(DbDispatcher server,
+ __db_h_nelem_msg args, __db_h_nelem_reply reply)
+ {
+ try {
+ db.set_h_nelem(args.nelem);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_lorder(DbDispatcher server,
+ __db_lorder_msg args, __db_lorder_reply reply)
+ {
+ try {
+ db.set_lorder(args.lorder);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_pagesize(DbDispatcher server,
+ __db_pagesize_msg args, __db_pagesize_reply reply)
+ {
+ try {
+ db.set_pagesize(args.pagesize);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_q_extentsize(DbDispatcher server,
+ __db_extentsize_msg args, __db_extentsize_reply reply)
+ {
+ try {
+ db.set_q_extentsize(args.extentsize);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_re_delim(DbDispatcher server,
+ __db_re_delim_msg args, __db_re_delim_reply reply)
+ {
+ try {
+ db.set_re_delim(args.delim);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_re_len(DbDispatcher server,
+ __db_re_len_msg args, __db_re_len_reply reply)
+ {
+ try {
+ db.set_re_len(args.len);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_re_pad(DbDispatcher server,
+ __db_re_pad_msg args, __db_re_pad_reply reply)
+ {
+ try {
+ db.set_re_pad(args.pad);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void stat(DbDispatcher server,
+ __db_stat_msg args, __db_stat_reply reply)
+ {
+ try {
+ Object raw_stat = db.stat(args.flags);
+
+ if (raw_stat instanceof DbHashStat) {
+ DbHashStat hs = (DbHashStat)raw_stat;
+ int[] raw_stats = {
+ hs.hash_magic, hs.hash_version,
+ hs.hash_metaflags, hs.hash_nkeys,
+ hs.hash_ndata, hs.hash_pagesize,
+ hs.hash_ffactor, hs.hash_buckets,
+ hs.hash_free, hs.hash_bfree,
+ hs.hash_bigpages, hs.hash_big_bfree,
+ hs.hash_overflows, hs.hash_ovfl_free,
+ hs.hash_dup, hs.hash_dup_free
+ };
+ reply.stats = raw_stats;
+ } else if (raw_stat instanceof DbQueueStat) {
+ DbQueueStat qs = (DbQueueStat)raw_stat;
+ int[] raw_stats = {
+ qs.qs_magic, qs.qs_version,
+ qs.qs_metaflags, qs.qs_nkeys,
+ qs.qs_ndata, qs.qs_pagesize,
+ qs.qs_extentsize, qs.qs_pages,
+ qs.qs_re_len, qs.qs_re_pad,
+ qs.qs_pgfree, qs.qs_first_recno,
+ qs.qs_cur_recno
+ };
+ reply.stats = raw_stats;
+ } else if (raw_stat instanceof DbBtreeStat) {
+ DbBtreeStat bs = (DbBtreeStat)raw_stat;
+ int[] raw_stats = {
+ bs.bt_magic, bs.bt_version,
+ bs.bt_metaflags, bs.bt_nkeys,
+ bs.bt_ndata, bs.bt_pagesize,
+ bs.bt_maxkey, bs.bt_minkey,
+ bs.bt_re_len, bs.bt_re_pad,
+ bs.bt_levels, bs.bt_int_pg,
+ bs.bt_leaf_pg, bs.bt_dup_pg,
+ bs.bt_over_pg, bs.bt_free,
+ bs.bt_int_pgfree, bs.bt_leaf_pgfree,
+ bs.bt_dup_pgfree, bs.bt_over_pgfree
+ };
+ reply.stats = raw_stats;
+ } else
+ throw new DbException("Invalid return type from db.stat()", Db.DB_NOTFOUND);
+
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ reply.stats = new int[0];
+ }
+ }
+
+ public void sync(DbDispatcher server,
+ __db_sync_msg args, __db_sync_reply reply)
+ {
+ try {
+ db.sync(args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void truncate(DbDispatcher server,
+ __db_truncate_msg args, __db_truncate_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ reply.count = db.truncate(txn, args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+}
diff --git a/libdb/rpc_server/java/RpcDbEnv.java b/libdb/rpc_server/java/RpcDbEnv.java
new file mode 100644
index 0000000..8b3b172
--- /dev/null
+++ b/libdb/rpc_server/java/RpcDbEnv.java
@@ -0,0 +1,269 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import com.sleepycat.db.*;
+import java.io.IOException;
+import java.io.*;
+import java.util.*;
+
+/**
+ * RPC wrapper around a dbenv for the Java RPC server.
+ */
+public class RpcDbEnv extends Timer
+{
+ DbEnv dbenv;
+ String home;
+ long idletime, timeout;
+ int openflags, onflags, offflags;
+ int refcount = 1;
+
+ void dispose()
+ {
+ if (dbenv != null) {
+ try {
+ dbenv.close(0);
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ }
+ dbenv = null;
+ }
+ }
+
+ public void close(DbDispatcher server,
+ __env_close_msg args, __env_close_reply reply)
+ {
+ if (--refcount != 0) {
+ reply.status = 0;
+ return;
+ }
+
+ try {
+ dbenv.close(args.flags);
+ dbenv = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } finally {
+ server.delEnv(this);
+ }
+ }
+
+ public void create(DbDispatcher server,
+ __env_create_msg args, __env_create_reply reply)
+ {
+ this.idletime = (args.timeout != 0) ? args.timeout : DbServer.idleto;
+ this.timeout = DbServer.defto;
+ try {
+ dbenv = new DbEnv(0);
+ reply.envcl_id = server.addEnv(this);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void dbremove(DbDispatcher server,
+ __env_dbremove_msg args, __env_dbremove_reply reply)
+ {
+ try {
+ args.name = (args.name.length() > 0) ? args.name : null;
+ args.subdb = (args.subdb.length() > 0) ? args.subdb : null;
+
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ dbenv.dbremove(txn, args.name, args.subdb, args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void dbrename(DbDispatcher server,
+ __env_dbrename_msg args, __env_dbrename_reply reply)
+ {
+ try {
+ args.name = (args.name.length() > 0) ? args.name : null;
+ args.subdb = (args.subdb.length() > 0) ? args.subdb : null;
+ args.newname = (args.newname.length() > 0) ? args.newname : null;
+
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ dbenv.dbrename(txn, args.name, args.subdb, args.newname, args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ private boolean findSharedDbEnv(DbDispatcher server, __env_open_reply reply)
+ throws DbException
+ {
+ RpcDbEnv rdbenv = null;
+ boolean matchFound = false;
+ LocalIterator i = ((DbServer)server).env_list.iterator();
+
+ while (!matchFound && i.hasNext()) {
+ rdbenv = (RpcDbEnv)i.next();
+ if (rdbenv != null && rdbenv != this &&
+ (home == rdbenv.home ||
+ (home != null && home.equals(rdbenv.home))) &&
+ openflags == rdbenv.openflags &&
+ onflags == rdbenv.onflags &&
+ offflags == rdbenv.offflags)
+ matchFound = true;
+ }
+
+ if (matchFound) {
+ /*
+ * The only thing left to check is the timeout.
+ * Since the server timeout set by the client is a hint, for sharing
+ * we'll give them the benefit of the doubt and grant them the
+ * longer timeout.
+ */
+ if (rdbenv.timeout < timeout)
+ rdbenv.timeout = timeout;
+
+ ++rdbenv.refcount;
+ reply.envcl_id = ((FreeList.FreeListIterator)i).current;
+ reply.status = 0;
+
+ DbServer.err.println("Sharing DbEnv: " + reply.envcl_id);
+ }
+
+ return matchFound;
+ }
+
+ public void open(DbDispatcher server,
+ __env_open_msg args, __env_open_reply reply)
+ {
+ try {
+ home = (args.home.length() > 0) ? args.home : null;
+
+ /*
+ * If they are using locking do deadlock detection for them,
+ * internally.
+ */
+ if ((args.flags & Db.DB_INIT_LOCK) != 0)
+ dbenv.set_lk_detect(Db.DB_LOCK_DEFAULT);
+
+ // adjust flags for RPC
+ int newflags = (args.flags & ~DbServer.DB_SERVER_FLAGMASK);
+ openflags = (newflags & DbServer.DB_SERVER_ENVFLAGS);
+
+ if (findSharedDbEnv(server, reply)) {
+ dbenv.close(0);
+ dbenv = null;
+ server.delEnv(this);
+ } else {
+ // TODO: check home?
+ dbenv.open(home, newflags, args.mode);
+ reply.status = 0;
+ reply.envcl_id = args.dbenvcl_id;
+ }
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } catch(FileNotFoundException e) {
+ reply.status = Db.DB_NOTFOUND;
+ }
+
+ // System.err.println("DbEnv.open: reply.status = " + reply.status + ", reply.envcl_id = " + reply.envcl_id);
+ }
+
+ public void remove(DbDispatcher server,
+ __env_remove_msg args, __env_remove_reply reply)
+ {
+ try {
+ args.home = (args.home.length() > 0) ? args.home : null;
+ // TODO: check home?
+
+ dbenv.remove(args.home, args.flags);
+ dbenv = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } catch(FileNotFoundException e) {
+ reply.status = Db.DB_NOTFOUND;
+ } finally {
+ server.delEnv(this);
+ }
+ }
+
+ public void set_cachesize(DbDispatcher server,
+ __env_cachesize_msg args, __env_cachesize_reply reply)
+ {
+ try {
+ dbenv.set_cachesize(args.gbytes, args.bytes, args.ncache);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_encrypt(DbDispatcher server,
+ __env_encrypt_msg args, __env_encrypt_reply reply)
+ {
+ try {
+ dbenv.set_encrypt(args.passwd, args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_flags(DbDispatcher server,
+ __env_flags_msg args, __env_flags_reply reply)
+ {
+ try {
+ dbenv.set_flags(args.flags, args.onoff != 0);
+ if (args.onoff != 0)
+ onflags |= args.flags;
+ else
+ offflags |= args.flags;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ // txn_recover implementation
+ public void txn_recover(DbDispatcher server,
+ __txn_recover_msg args, __txn_recover_reply reply)
+ {
+ try {
+ DbPreplist[] prep_list = dbenv.txn_recover(args.count, args.flags);
+ if (prep_list != null && prep_list.length > 0) {
+ int count = prep_list.length;
+ reply.retcount = count;
+ reply.txn = new int[count];
+ reply.gid = new byte[count * Db.DB_XIDDATASIZE];
+
+ for(int i = 0; i < count; i++) {
+ reply.txn[i] = server.addTxn(new RpcDbTxn(this, prep_list[i].txn));
+ System.arraycopy(prep_list[i].gid, 0, reply.gid, i * Db.DB_XIDDATASIZE, Db.DB_XIDDATASIZE);
+ }
+ }
+
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+}
diff --git a/libdb/rpc_server/java/RpcDbTxn.java b/libdb/rpc_server/java/RpcDbTxn.java
new file mode 100644
index 0000000..b9c9688
--- /dev/null
+++ b/libdb/rpc_server/java/RpcDbTxn.java
@@ -0,0 +1,123 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import com.sleepycat.db.*;
+import java.io.IOException;
+import java.io.*;
+import java.util.*;
+
+/**
+ * RPC wrapper around a txn object for the Java RPC server.
+ */
+public class RpcDbTxn extends Timer
+{
+ RpcDbEnv rdbenv;
+ DbTxn txn;
+
+ public RpcDbTxn(RpcDbEnv rdbenv, DbTxn txn)
+ {
+ this.rdbenv = rdbenv;
+ this.txn = txn;
+ }
+
+ void dispose()
+ {
+ if (txn != null) {
+ try {
+ txn.abort();
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ }
+ txn = null;
+ }
+ }
+
+ public void abort(DbDispatcher server,
+ __txn_abort_msg args, __txn_abort_reply reply)
+ {
+ try {
+ txn.abort();
+ txn = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } finally {
+ server.delTxn(this);
+ }
+ }
+
+ public void begin(DbDispatcher server,
+ __txn_begin_msg args, __txn_begin_reply reply)
+ {
+ try {
+ if (rdbenv == null) {
+ reply.status = Db.DB_NOSERVER_ID;
+ return;
+ }
+ DbEnv dbenv = rdbenv.dbenv;
+ RpcDbTxn rparent = server.getTxn(args.parentcl_id);
+ DbTxn parent = (rparent != null) ? rparent.txn : null;
+
+ txn = dbenv.txn_begin(parent, args.flags);
+
+ if (rparent != null)
+ timer = rparent.timer;
+ reply.txnidcl_id = server.addTxn(this);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void commit(DbDispatcher server,
+ __txn_commit_msg args, __txn_commit_reply reply)
+ {
+ try {
+ txn.commit(args.flags);
+ txn = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } finally {
+ server.delTxn(this);
+ }
+ }
+
+ public void discard(DbDispatcher server,
+ __txn_discard_msg args, __txn_discard_reply reply)
+ {
+ try {
+ txn.discard(args.flags);
+ txn = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } finally {
+ server.delTxn(this);
+ }
+ }
+
+ public void prepare(DbDispatcher server,
+ __txn_prepare_msg args, __txn_prepare_reply reply)
+ {
+ try {
+ txn.prepare(args.gid);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+}
diff --git a/libdb/rpc_server/java/RpcDbc.java b/libdb/rpc_server/java/RpcDbc.java
new file mode 100644
index 0000000..e3e6861
--- /dev/null
+++ b/libdb/rpc_server/java/RpcDbc.java
@@ -0,0 +1,238 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import com.sleepycat.db.*;
+import java.io.IOException;
+import java.io.*;
+import java.util.*;
+
+/**
+ * RPC wrapper around a dbc object for the Java RPC server.
+ */
+public class RpcDbc extends Timer
+{
+ static final byte[] empty = new byte[0];
+ RpcDbEnv rdbenv;
+ RpcDb rdb;
+ Dbc dbc;
+ Timer orig_timer;
+ boolean isJoin;
+
+ public RpcDbc(RpcDb rdb, Dbc dbc, boolean isJoin)
+ {
+ this.rdb = rdb;
+ this.rdbenv = rdb.rdbenv;
+ this.dbc = dbc;
+ this.isJoin = isJoin;
+ }
+
+ void dispose()
+ {
+ if (dbc != null) {
+ try {
+ dbc.close();
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ }
+ dbc = null;
+ }
+ }
+
+ public void close(DbDispatcher server,
+ __dbc_close_msg args, __dbc_close_reply reply)
+ {
+ try {
+ dbc.close();
+ dbc = null;
+
+ if (isJoin)
+ for(LocalIterator i = ((DbServer)server).cursor_list.iterator(); i.hasNext(); ) {
+ RpcDbc rdbc = (RpcDbc)i.next();
+ // Unjoin cursors that were joined to create this
+ if (rdbc != null && rdbc.timer == this)
+ rdbc.timer = rdbc.orig_timer;
+ }
+
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } finally {
+ server.delCursor(this);
+ }
+ }
+
+ public void count(DbDispatcher server,
+ __dbc_count_msg args, __dbc_count_reply reply)
+ {
+ try {
+ reply.dupcount = dbc.count(args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void del(DbDispatcher server,
+ __dbc_del_msg args, __dbc_del_reply reply)
+ {
+ try {
+ reply.status = dbc.del(args.flags);
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void dup(DbDispatcher server,
+ __dbc_dup_msg args, __dbc_dup_reply reply)
+ {
+ try {
+ Dbc newdbc = dbc.dup(args.flags);
+ RpcDbc rdbc = new RpcDbc(rdb, newdbc, false);
+ /* If this cursor has a parent txn, we need to use it too. */
+ if (timer != this)
+ rdbc.timer = timer;
+ reply.dbcidcl_id = server.addCursor(rdbc);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void get(DbDispatcher server,
+ __dbc_get_msg args, __dbc_get_reply reply)
+ {
+ try {
+ Dbt key = new Dbt(args.keydata);
+ key.set_dlen(args.keydlen);
+ key.set_ulen(args.keyulen);
+ key.set_doff(args.keydoff);
+ key.set_flags(Db.DB_DBT_MALLOC |
+ (args.keyflags & Db.DB_DBT_PARTIAL));
+
+ Dbt data = new Dbt(args.datadata);
+ data.set_dlen(args.datadlen);
+ data.set_ulen(args.dataulen);
+ data.set_doff(args.datadoff);
+ if ((args.flags & Db.DB_MULTIPLE) != 0 ||
+ (args.flags & Db.DB_MULTIPLE_KEY) != 0) {
+ if (data.get_data().length == 0)
+ data.set_data(new byte[data.get_ulen()]);
+ data.set_flags(Db.DB_DBT_USERMEM |
+ (args.dataflags & Db.DB_DBT_PARTIAL));
+ } else
+ data.set_flags(Db.DB_DBT_MALLOC |
+ (args.dataflags & Db.DB_DBT_PARTIAL));
+
+ reply.status = dbc.get(key, data, args.flags);
+
+ if (key.get_data() == args.keydata) {
+ reply.keydata = new byte[key.get_size()];
+ System.arraycopy(key.get_data(), 0, reply.keydata, 0, key.get_size());
+ } else
+ reply.keydata = key.get_data();
+
+ if (data.get_data() == args.datadata) {
+ reply.datadata = new byte[data.get_size()];
+ System.arraycopy(data.get_data(), 0, reply.datadata, 0, data.get_size());
+ } else
+ reply.datadata = data.get_data();
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ reply.keydata = reply.datadata = empty;
+ }
+ }
+
+ public void pget(DbDispatcher server,
+ __dbc_pget_msg args, __dbc_pget_reply reply)
+ {
+ try {
+ Dbt skey = new Dbt(args.skeydata);
+ skey.set_dlen(args.skeydlen);
+ skey.set_doff(args.skeydoff);
+ skey.set_ulen(args.skeyulen);
+ skey.set_flags(Db.DB_DBT_MALLOC |
+ (args.skeyflags & Db.DB_DBT_PARTIAL));
+
+ Dbt pkey = new Dbt(args.pkeydata);
+ pkey.set_dlen(args.pkeydlen);
+ pkey.set_doff(args.pkeydoff);
+ pkey.set_ulen(args.pkeyulen);
+ pkey.set_flags(Db.DB_DBT_MALLOC |
+ (args.pkeyflags & Db.DB_DBT_PARTIAL));
+
+ Dbt data = new Dbt(args.datadata);
+ data.set_dlen(args.datadlen);
+ data.set_doff(args.datadoff);
+ data.set_ulen(args.dataulen);
+ data.set_flags(Db.DB_DBT_MALLOC |
+ (args.dataflags & Db.DB_DBT_PARTIAL));
+
+ reply.status = dbc.pget(skey, pkey, data, args.flags);
+
+ if (skey.get_data() == args.skeydata) {
+ reply.skeydata = new byte[skey.get_size()];
+ System.arraycopy(skey.get_data(), 0, reply.skeydata, 0, skey.get_size());
+ } else
+ reply.skeydata = skey.get_data();
+
+ if (pkey.get_data() == args.pkeydata) {
+ reply.pkeydata = new byte[pkey.get_size()];
+ System.arraycopy(pkey.get_data(), 0, reply.pkeydata, 0, pkey.get_size());
+ } else
+ reply.pkeydata = pkey.get_data();
+
+ if (data.get_data() == args.datadata) {
+ reply.datadata = new byte[data.get_size()];
+ System.arraycopy(data.get_data(), 0, reply.datadata, 0, data.get_size());
+ } else
+ reply.datadata = data.get_data();
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void put(DbDispatcher server,
+ __dbc_put_msg args, __dbc_put_reply reply)
+ {
+ try {
+ Dbt key = new Dbt(args.keydata);
+ key.set_dlen(args.keydlen);
+ key.set_ulen(args.keyulen);
+ key.set_doff(args.keydoff);
+ key.set_flags(args.keyflags & Db.DB_DBT_PARTIAL);
+
+ Dbt data = new Dbt(args.datadata);
+ data.set_dlen(args.datadlen);
+ data.set_ulen(args.dataulen);
+ data.set_doff(args.datadoff);
+ data.set_flags(args.dataflags);
+
+ reply.status = dbc.put(key, data, args.flags);
+
+ if (reply.status == 0 &&
+ (args.flags == Db.DB_AFTER || args.flags == Db.DB_BEFORE) &&
+ rdb.db.get_type() == Db.DB_RECNO)
+ reply.keydata = key.get_data();
+ else
+ reply.keydata = empty;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ reply.keydata = empty;
+ }
+ }
+}
diff --git a/libdb/rpc_server/java/Timer.java b/libdb/rpc_server/java/Timer.java
new file mode 100644
index 0000000..43421bc
--- /dev/null
+++ b/libdb/rpc_server/java/Timer.java
@@ -0,0 +1,22 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.db.rpcserver;
+
+/**
+ * Class to keep track of access times. This is slightly devious by having
+ * both the access_time and a reference to another Timer that can be
+ * used to group/share access times. This is done to keep the Java code
+ * close to the canonical C implementation of the RPC server.
+ */
+public class Timer
+{
+ Timer timer = this;
+ long last_access;
+}
diff --git a/libdb/rpc_server/java/gen/DbServerStub.java b/libdb/rpc_server/java/gen/DbServerStub.java
new file mode 100644
index 0000000..90fc13a
--- /dev/null
+++ b/libdb/rpc_server/java/gen/DbServerStub.java
@@ -0,0 +1,495 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+import org.acplt.oncrpc.server.*;
+
+/**
+ */
+public abstract class DbServerStub extends OncRpcServerStub implements OncRpcDispatchable {
+
+ public DbServerStub()
+ throws OncRpcException, IOException {
+ this(0);
+ }
+
+ public DbServerStub(int port)
+ throws OncRpcException, IOException {
+ info = new OncRpcServerTransportRegistrationInfo [] {
+ new OncRpcServerTransportRegistrationInfo(db_server.DB_RPC_SERVERPROG, 4001),
+ };
+ transports = new OncRpcServerTransport [] {
+ new OncRpcUdpServerTransport(this, port, info, 32768),
+ new OncRpcTcpServerTransport(this, port, info, 32768)
+ };
+ }
+
+ public void dispatchOncRpcCall(OncRpcCallInformation call, int program, int version, int procedure)
+ throws OncRpcException, IOException {
+ if ( version == 4001 ) {
+ switch ( procedure ) {
+ case 1: {
+ __env_cachesize_msg args$ = new __env_cachesize_msg();
+ call.retrieveCall(args$);
+ __env_cachesize_reply result$ = __DB_env_cachesize_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 2: {
+ __env_close_msg args$ = new __env_close_msg();
+ call.retrieveCall(args$);
+ __env_close_reply result$ = __DB_env_close_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 3: {
+ __env_create_msg args$ = new __env_create_msg();
+ call.retrieveCall(args$);
+ __env_create_reply result$ = __DB_env_create_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 4: {
+ __env_dbremove_msg args$ = new __env_dbremove_msg();
+ call.retrieveCall(args$);
+ __env_dbremove_reply result$ = __DB_env_dbremove_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 5: {
+ __env_dbrename_msg args$ = new __env_dbrename_msg();
+ call.retrieveCall(args$);
+ __env_dbrename_reply result$ = __DB_env_dbrename_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 6: {
+ __env_encrypt_msg args$ = new __env_encrypt_msg();
+ call.retrieveCall(args$);
+ __env_encrypt_reply result$ = __DB_env_encrypt_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 7: {
+ __env_flags_msg args$ = new __env_flags_msg();
+ call.retrieveCall(args$);
+ __env_flags_reply result$ = __DB_env_flags_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 8: {
+ __env_open_msg args$ = new __env_open_msg();
+ call.retrieveCall(args$);
+ __env_open_reply result$ = __DB_env_open_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 9: {
+ __env_remove_msg args$ = new __env_remove_msg();
+ call.retrieveCall(args$);
+ __env_remove_reply result$ = __DB_env_remove_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 10: {
+ __txn_abort_msg args$ = new __txn_abort_msg();
+ call.retrieveCall(args$);
+ __txn_abort_reply result$ = __DB_txn_abort_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 11: {
+ __txn_begin_msg args$ = new __txn_begin_msg();
+ call.retrieveCall(args$);
+ __txn_begin_reply result$ = __DB_txn_begin_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 12: {
+ __txn_commit_msg args$ = new __txn_commit_msg();
+ call.retrieveCall(args$);
+ __txn_commit_reply result$ = __DB_txn_commit_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 13: {
+ __txn_discard_msg args$ = new __txn_discard_msg();
+ call.retrieveCall(args$);
+ __txn_discard_reply result$ = __DB_txn_discard_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 14: {
+ __txn_prepare_msg args$ = new __txn_prepare_msg();
+ call.retrieveCall(args$);
+ __txn_prepare_reply result$ = __DB_txn_prepare_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 15: {
+ __txn_recover_msg args$ = new __txn_recover_msg();
+ call.retrieveCall(args$);
+ __txn_recover_reply result$ = __DB_txn_recover_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 16: {
+ __db_associate_msg args$ = new __db_associate_msg();
+ call.retrieveCall(args$);
+ __db_associate_reply result$ = __DB_db_associate_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 17: {
+ __db_bt_maxkey_msg args$ = new __db_bt_maxkey_msg();
+ call.retrieveCall(args$);
+ __db_bt_maxkey_reply result$ = __DB_db_bt_maxkey_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 18: {
+ __db_bt_minkey_msg args$ = new __db_bt_minkey_msg();
+ call.retrieveCall(args$);
+ __db_bt_minkey_reply result$ = __DB_db_bt_minkey_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 19: {
+ __db_close_msg args$ = new __db_close_msg();
+ call.retrieveCall(args$);
+ __db_close_reply result$ = __DB_db_close_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 20: {
+ __db_create_msg args$ = new __db_create_msg();
+ call.retrieveCall(args$);
+ __db_create_reply result$ = __DB_db_create_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 21: {
+ __db_del_msg args$ = new __db_del_msg();
+ call.retrieveCall(args$);
+ __db_del_reply result$ = __DB_db_del_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 22: {
+ __db_encrypt_msg args$ = new __db_encrypt_msg();
+ call.retrieveCall(args$);
+ __db_encrypt_reply result$ = __DB_db_encrypt_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 23: {
+ __db_extentsize_msg args$ = new __db_extentsize_msg();
+ call.retrieveCall(args$);
+ __db_extentsize_reply result$ = __DB_db_extentsize_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 24: {
+ __db_flags_msg args$ = new __db_flags_msg();
+ call.retrieveCall(args$);
+ __db_flags_reply result$ = __DB_db_flags_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 25: {
+ __db_get_msg args$ = new __db_get_msg();
+ call.retrieveCall(args$);
+ __db_get_reply result$ = __DB_db_get_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 26: {
+ __db_h_ffactor_msg args$ = new __db_h_ffactor_msg();
+ call.retrieveCall(args$);
+ __db_h_ffactor_reply result$ = __DB_db_h_ffactor_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 27: {
+ __db_h_nelem_msg args$ = new __db_h_nelem_msg();
+ call.retrieveCall(args$);
+ __db_h_nelem_reply result$ = __DB_db_h_nelem_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 28: {
+ __db_key_range_msg args$ = new __db_key_range_msg();
+ call.retrieveCall(args$);
+ __db_key_range_reply result$ = __DB_db_key_range_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 29: {
+ __db_lorder_msg args$ = new __db_lorder_msg();
+ call.retrieveCall(args$);
+ __db_lorder_reply result$ = __DB_db_lorder_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 30: {
+ __db_open_msg args$ = new __db_open_msg();
+ call.retrieveCall(args$);
+ __db_open_reply result$ = __DB_db_open_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 31: {
+ __db_pagesize_msg args$ = new __db_pagesize_msg();
+ call.retrieveCall(args$);
+ __db_pagesize_reply result$ = __DB_db_pagesize_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 32: {
+ __db_pget_msg args$ = new __db_pget_msg();
+ call.retrieveCall(args$);
+ __db_pget_reply result$ = __DB_db_pget_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 33: {
+ __db_put_msg args$ = new __db_put_msg();
+ call.retrieveCall(args$);
+ __db_put_reply result$ = __DB_db_put_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 34: {
+ __db_re_delim_msg args$ = new __db_re_delim_msg();
+ call.retrieveCall(args$);
+ __db_re_delim_reply result$ = __DB_db_re_delim_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 35: {
+ __db_re_len_msg args$ = new __db_re_len_msg();
+ call.retrieveCall(args$);
+ __db_re_len_reply result$ = __DB_db_re_len_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 36: {
+ __db_re_pad_msg args$ = new __db_re_pad_msg();
+ call.retrieveCall(args$);
+ __db_re_pad_reply result$ = __DB_db_re_pad_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 37: {
+ __db_remove_msg args$ = new __db_remove_msg();
+ call.retrieveCall(args$);
+ __db_remove_reply result$ = __DB_db_remove_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 38: {
+ __db_rename_msg args$ = new __db_rename_msg();
+ call.retrieveCall(args$);
+ __db_rename_reply result$ = __DB_db_rename_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 39: {
+ __db_stat_msg args$ = new __db_stat_msg();
+ call.retrieveCall(args$);
+ __db_stat_reply result$ = __DB_db_stat_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 40: {
+ __db_sync_msg args$ = new __db_sync_msg();
+ call.retrieveCall(args$);
+ __db_sync_reply result$ = __DB_db_sync_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 41: {
+ __db_truncate_msg args$ = new __db_truncate_msg();
+ call.retrieveCall(args$);
+ __db_truncate_reply result$ = __DB_db_truncate_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 42: {
+ __db_cursor_msg args$ = new __db_cursor_msg();
+ call.retrieveCall(args$);
+ __db_cursor_reply result$ = __DB_db_cursor_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 43: {
+ __db_join_msg args$ = new __db_join_msg();
+ call.retrieveCall(args$);
+ __db_join_reply result$ = __DB_db_join_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 44: {
+ __dbc_close_msg args$ = new __dbc_close_msg();
+ call.retrieveCall(args$);
+ __dbc_close_reply result$ = __DB_dbc_close_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 45: {
+ __dbc_count_msg args$ = new __dbc_count_msg();
+ call.retrieveCall(args$);
+ __dbc_count_reply result$ = __DB_dbc_count_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 46: {
+ __dbc_del_msg args$ = new __dbc_del_msg();
+ call.retrieveCall(args$);
+ __dbc_del_reply result$ = __DB_dbc_del_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 47: {
+ __dbc_dup_msg args$ = new __dbc_dup_msg();
+ call.retrieveCall(args$);
+ __dbc_dup_reply result$ = __DB_dbc_dup_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 48: {
+ __dbc_get_msg args$ = new __dbc_get_msg();
+ call.retrieveCall(args$);
+ __dbc_get_reply result$ = __DB_dbc_get_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 49: {
+ __dbc_pget_msg args$ = new __dbc_pget_msg();
+ call.retrieveCall(args$);
+ __dbc_pget_reply result$ = __DB_dbc_pget_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 50: {
+ __dbc_put_msg args$ = new __dbc_put_msg();
+ call.retrieveCall(args$);
+ __dbc_put_reply result$ = __DB_dbc_put_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ default:
+ call.failProcedureUnavailable();
+ }
+ } else {
+ call.failProcedureUnavailable();
+ }
+ }
+
+ public abstract __env_cachesize_reply __DB_env_cachesize_4001(__env_cachesize_msg arg1);
+
+ public abstract __env_close_reply __DB_env_close_4001(__env_close_msg arg1);
+
+ public abstract __env_create_reply __DB_env_create_4001(__env_create_msg arg1);
+
+ public abstract __env_dbremove_reply __DB_env_dbremove_4001(__env_dbremove_msg arg1);
+
+ public abstract __env_dbrename_reply __DB_env_dbrename_4001(__env_dbrename_msg arg1);
+
+ public abstract __env_encrypt_reply __DB_env_encrypt_4001(__env_encrypt_msg arg1);
+
+ public abstract __env_flags_reply __DB_env_flags_4001(__env_flags_msg arg1);
+
+ public abstract __env_open_reply __DB_env_open_4001(__env_open_msg arg1);
+
+ public abstract __env_remove_reply __DB_env_remove_4001(__env_remove_msg arg1);
+
+ public abstract __txn_abort_reply __DB_txn_abort_4001(__txn_abort_msg arg1);
+
+ public abstract __txn_begin_reply __DB_txn_begin_4001(__txn_begin_msg arg1);
+
+ public abstract __txn_commit_reply __DB_txn_commit_4001(__txn_commit_msg arg1);
+
+ public abstract __txn_discard_reply __DB_txn_discard_4001(__txn_discard_msg arg1);
+
+ public abstract __txn_prepare_reply __DB_txn_prepare_4001(__txn_prepare_msg arg1);
+
+ public abstract __txn_recover_reply __DB_txn_recover_4001(__txn_recover_msg arg1);
+
+ public abstract __db_associate_reply __DB_db_associate_4001(__db_associate_msg arg1);
+
+ public abstract __db_bt_maxkey_reply __DB_db_bt_maxkey_4001(__db_bt_maxkey_msg arg1);
+
+ public abstract __db_bt_minkey_reply __DB_db_bt_minkey_4001(__db_bt_minkey_msg arg1);
+
+ public abstract __db_close_reply __DB_db_close_4001(__db_close_msg arg1);
+
+ public abstract __db_create_reply __DB_db_create_4001(__db_create_msg arg1);
+
+ public abstract __db_del_reply __DB_db_del_4001(__db_del_msg arg1);
+
+ public abstract __db_encrypt_reply __DB_db_encrypt_4001(__db_encrypt_msg arg1);
+
+ public abstract __db_extentsize_reply __DB_db_extentsize_4001(__db_extentsize_msg arg1);
+
+ public abstract __db_flags_reply __DB_db_flags_4001(__db_flags_msg arg1);
+
+ public abstract __db_get_reply __DB_db_get_4001(__db_get_msg arg1);
+
+ public abstract __db_h_ffactor_reply __DB_db_h_ffactor_4001(__db_h_ffactor_msg arg1);
+
+ public abstract __db_h_nelem_reply __DB_db_h_nelem_4001(__db_h_nelem_msg arg1);
+
+ public abstract __db_key_range_reply __DB_db_key_range_4001(__db_key_range_msg arg1);
+
+ public abstract __db_lorder_reply __DB_db_lorder_4001(__db_lorder_msg arg1);
+
+ public abstract __db_open_reply __DB_db_open_4001(__db_open_msg arg1);
+
+ public abstract __db_pagesize_reply __DB_db_pagesize_4001(__db_pagesize_msg arg1);
+
+ public abstract __db_pget_reply __DB_db_pget_4001(__db_pget_msg arg1);
+
+ public abstract __db_put_reply __DB_db_put_4001(__db_put_msg arg1);
+
+ public abstract __db_re_delim_reply __DB_db_re_delim_4001(__db_re_delim_msg arg1);
+
+ public abstract __db_re_len_reply __DB_db_re_len_4001(__db_re_len_msg arg1);
+
+ public abstract __db_re_pad_reply __DB_db_re_pad_4001(__db_re_pad_msg arg1);
+
+ public abstract __db_remove_reply __DB_db_remove_4001(__db_remove_msg arg1);
+
+ public abstract __db_rename_reply __DB_db_rename_4001(__db_rename_msg arg1);
+
+ public abstract __db_stat_reply __DB_db_stat_4001(__db_stat_msg arg1);
+
+ public abstract __db_sync_reply __DB_db_sync_4001(__db_sync_msg arg1);
+
+ public abstract __db_truncate_reply __DB_db_truncate_4001(__db_truncate_msg arg1);
+
+ public abstract __db_cursor_reply __DB_db_cursor_4001(__db_cursor_msg arg1);
+
+ public abstract __db_join_reply __DB_db_join_4001(__db_join_msg arg1);
+
+ public abstract __dbc_close_reply __DB_dbc_close_4001(__dbc_close_msg arg1);
+
+ public abstract __dbc_count_reply __DB_dbc_count_4001(__dbc_count_msg arg1);
+
+ public abstract __dbc_del_reply __DB_dbc_del_4001(__dbc_del_msg arg1);
+
+ public abstract __dbc_dup_reply __DB_dbc_dup_4001(__dbc_dup_msg arg1);
+
+ public abstract __dbc_get_reply __DB_dbc_get_4001(__dbc_get_msg arg1);
+
+ public abstract __dbc_pget_reply __DB_dbc_pget_4001(__dbc_pget_msg arg1);
+
+ public abstract __dbc_put_reply __DB_dbc_put_4001(__dbc_put_msg arg1);
+
+}
+// End of DbServerStub.java
diff --git a/libdb/rpc_server/java/gen/__db_associate_msg.java b/libdb/rpc_server/java/gen/__db_associate_msg.java
new file mode 100644
index 0000000..8977303
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_associate_msg.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 4/25/02 11:01 AM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_associate_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int sdbpcl_id;
+ public int flags;
+
+ public __db_associate_msg() {
+ }
+
+ public __db_associate_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(sdbpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ sdbpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_associate_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_associate_reply.java b/libdb/rpc_server/java/gen/__db_associate_reply.java
new file mode 100644
index 0000000..476d086
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_associate_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_associate_reply implements XdrAble {
+ public int status;
+
+ public __db_associate_reply() {
+ }
+
+ public __db_associate_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_associate_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_bt_maxkey_msg.java b/libdb/rpc_server/java/gen/__db_bt_maxkey_msg.java
new file mode 100644
index 0000000..007ce16
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_bt_maxkey_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_bt_maxkey_msg implements XdrAble {
+ public int dbpcl_id;
+ public int maxkey;
+
+ public __db_bt_maxkey_msg() {
+ }
+
+ public __db_bt_maxkey_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(maxkey);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ maxkey = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_bt_maxkey_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_bt_maxkey_reply.java b/libdb/rpc_server/java/gen/__db_bt_maxkey_reply.java
new file mode 100644
index 0000000..8555732
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_bt_maxkey_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_bt_maxkey_reply implements XdrAble {
+ public int status;
+
+ public __db_bt_maxkey_reply() {
+ }
+
+ public __db_bt_maxkey_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_bt_maxkey_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_bt_minkey_msg.java b/libdb/rpc_server/java/gen/__db_bt_minkey_msg.java
new file mode 100644
index 0000000..c86ec38
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_bt_minkey_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_bt_minkey_msg implements XdrAble {
+ public int dbpcl_id;
+ public int minkey;
+
+ public __db_bt_minkey_msg() {
+ }
+
+ public __db_bt_minkey_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(minkey);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ minkey = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_bt_minkey_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_bt_minkey_reply.java b/libdb/rpc_server/java/gen/__db_bt_minkey_reply.java
new file mode 100644
index 0000000..4d944b6
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_bt_minkey_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_bt_minkey_reply implements XdrAble {
+ public int status;
+
+ public __db_bt_minkey_reply() {
+ }
+
+ public __db_bt_minkey_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_bt_minkey_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_close_msg.java b/libdb/rpc_server/java/gen/__db_close_msg.java
new file mode 100644
index 0000000..ce8d213
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_close_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_close_msg implements XdrAble {
+ public int dbpcl_id;
+ public int flags;
+
+ public __db_close_msg() {
+ }
+
+ public __db_close_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_close_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_close_reply.java b/libdb/rpc_server/java/gen/__db_close_reply.java
new file mode 100644
index 0000000..a9380e9
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_close_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_close_reply implements XdrAble {
+ public int status;
+
+ public __db_close_reply() {
+ }
+
+ public __db_close_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_close_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_create_msg.java b/libdb/rpc_server/java/gen/__db_create_msg.java
new file mode 100644
index 0000000..d21ca50
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_create_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_create_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int flags;
+
+ public __db_create_msg() {
+ }
+
+ public __db_create_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_create_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_create_reply.java b/libdb/rpc_server/java/gen/__db_create_reply.java
new file mode 100644
index 0000000..e3dcbba
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_create_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_create_reply implements XdrAble {
+ public int status;
+ public int dbcl_id;
+
+ public __db_create_reply() {
+ }
+
+ public __db_create_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(dbcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ dbcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_create_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_cursor_msg.java b/libdb/rpc_server/java/gen/__db_cursor_msg.java
new file mode 100644
index 0000000..60e09db
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_cursor_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_cursor_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int flags;
+
+ public __db_cursor_msg() {
+ }
+
+ public __db_cursor_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_cursor_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_cursor_reply.java b/libdb/rpc_server/java/gen/__db_cursor_reply.java
new file mode 100644
index 0000000..bafd281
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_cursor_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_cursor_reply implements XdrAble {
+ public int status;
+ public int dbcidcl_id;
+
+ public __db_cursor_reply() {
+ }
+
+ public __db_cursor_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(dbcidcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ dbcidcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_cursor_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_del_msg.java b/libdb/rpc_server/java/gen/__db_del_msg.java
new file mode 100644
index 0000000..fdf4790
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_del_msg.java
@@ -0,0 +1,53 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_del_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int keydlen;
+ public int keydoff;
+ public int keyulen;
+ public int keyflags;
+ public byte [] keydata;
+ public int flags;
+
+ public __db_del_msg() {
+ }
+
+ public __db_del_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(keydlen);
+ xdr.xdrEncodeInt(keydoff);
+ xdr.xdrEncodeInt(keyulen);
+ xdr.xdrEncodeInt(keyflags);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ keydlen = xdr.xdrDecodeInt();
+ keydoff = xdr.xdrDecodeInt();
+ keyulen = xdr.xdrDecodeInt();
+ keyflags = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_del_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_del_reply.java b/libdb/rpc_server/java/gen/__db_del_reply.java
new file mode 100644
index 0000000..8a55445
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_del_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_del_reply implements XdrAble {
+ public int status;
+
+ public __db_del_reply() {
+ }
+
+ public __db_del_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_del_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_encrypt_msg.java b/libdb/rpc_server/java/gen/__db_encrypt_msg.java
new file mode 100644
index 0000000..46d9f8e
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_encrypt_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 2/13/02 1:05 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_encrypt_msg implements XdrAble {
+ public int dbpcl_id;
+ public String passwd;
+ public int flags;
+
+ public __db_encrypt_msg() {
+ }
+
+ public __db_encrypt_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeString(passwd);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ passwd = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_encrypt_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_encrypt_reply.java b/libdb/rpc_server/java/gen/__db_encrypt_reply.java
new file mode 100644
index 0000000..a97cc98
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_encrypt_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 2/13/02 1:05 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_encrypt_reply implements XdrAble {
+ public int status;
+
+ public __db_encrypt_reply() {
+ }
+
+ public __db_encrypt_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_encrypt_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_extentsize_msg.java b/libdb/rpc_server/java/gen/__db_extentsize_msg.java
new file mode 100644
index 0000000..41a51cf
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_extentsize_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_extentsize_msg implements XdrAble {
+ public int dbpcl_id;
+ public int extentsize;
+
+ public __db_extentsize_msg() {
+ }
+
+ public __db_extentsize_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(extentsize);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ extentsize = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_extentsize_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_extentsize_reply.java b/libdb/rpc_server/java/gen/__db_extentsize_reply.java
new file mode 100644
index 0000000..4096254
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_extentsize_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_extentsize_reply implements XdrAble {
+ public int status;
+
+ public __db_extentsize_reply() {
+ }
+
+ public __db_extentsize_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_extentsize_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_flags_msg.java b/libdb/rpc_server/java/gen/__db_flags_msg.java
new file mode 100644
index 0000000..d8752e2
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_flags_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_flags_msg implements XdrAble {
+ public int dbpcl_id;
+ public int flags;
+
+ public __db_flags_msg() {
+ }
+
+ public __db_flags_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_flags_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_flags_reply.java b/libdb/rpc_server/java/gen/__db_flags_reply.java
new file mode 100644
index 0000000..c4ec253
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_flags_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_flags_reply implements XdrAble {
+ public int status;
+
+ public __db_flags_reply() {
+ }
+
+ public __db_flags_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_flags_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_get_msg.java b/libdb/rpc_server/java/gen/__db_get_msg.java
new file mode 100644
index 0000000..3dfe8e9
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_get_msg.java
@@ -0,0 +1,68 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_get_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int keydlen;
+ public int keydoff;
+ public int keyulen;
+ public int keyflags;
+ public byte [] keydata;
+ public int datadlen;
+ public int datadoff;
+ public int dataulen;
+ public int dataflags;
+ public byte [] datadata;
+ public int flags;
+
+ public __db_get_msg() {
+ }
+
+ public __db_get_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(keydlen);
+ xdr.xdrEncodeInt(keydoff);
+ xdr.xdrEncodeInt(keyulen);
+ xdr.xdrEncodeInt(keyflags);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeInt(datadlen);
+ xdr.xdrEncodeInt(datadoff);
+ xdr.xdrEncodeInt(dataulen);
+ xdr.xdrEncodeInt(dataflags);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ keydlen = xdr.xdrDecodeInt();
+ keydoff = xdr.xdrDecodeInt();
+ keyulen = xdr.xdrDecodeInt();
+ keyflags = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ datadlen = xdr.xdrDecodeInt();
+ datadoff = xdr.xdrDecodeInt();
+ dataulen = xdr.xdrDecodeInt();
+ dataflags = xdr.xdrDecodeInt();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_get_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_get_reply.java b/libdb/rpc_server/java/gen/__db_get_reply.java
new file mode 100644
index 0000000..64ce525
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_get_reply.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_get_reply implements XdrAble {
+ public int status;
+ public byte [] keydata;
+ public byte [] datadata;
+
+ public __db_get_reply() {
+ }
+
+ public __db_get_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ }
+
+}
+// End of __db_get_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_h_ffactor_msg.java b/libdb/rpc_server/java/gen/__db_h_ffactor_msg.java
new file mode 100644
index 0000000..8d2ed1b
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_h_ffactor_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_h_ffactor_msg implements XdrAble {
+ public int dbpcl_id;
+ public int ffactor;
+
+ public __db_h_ffactor_msg() {
+ }
+
+ public __db_h_ffactor_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(ffactor);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ ffactor = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_h_ffactor_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_h_ffactor_reply.java b/libdb/rpc_server/java/gen/__db_h_ffactor_reply.java
new file mode 100644
index 0000000..1885ec5
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_h_ffactor_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_h_ffactor_reply implements XdrAble {
+ public int status;
+
+ public __db_h_ffactor_reply() {
+ }
+
+ public __db_h_ffactor_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_h_ffactor_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_h_nelem_msg.java b/libdb/rpc_server/java/gen/__db_h_nelem_msg.java
new file mode 100644
index 0000000..7d08435
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_h_nelem_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_h_nelem_msg implements XdrAble {
+ public int dbpcl_id;
+ public int nelem;
+
+ public __db_h_nelem_msg() {
+ }
+
+ public __db_h_nelem_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(nelem);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ nelem = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_h_nelem_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_h_nelem_reply.java b/libdb/rpc_server/java/gen/__db_h_nelem_reply.java
new file mode 100644
index 0000000..20c5c77
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_h_nelem_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_h_nelem_reply implements XdrAble {
+ public int status;
+
+ public __db_h_nelem_reply() {
+ }
+
+ public __db_h_nelem_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_h_nelem_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_join_msg.java b/libdb/rpc_server/java/gen/__db_join_msg.java
new file mode 100644
index 0000000..88c72db
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_join_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_join_msg implements XdrAble {
+ public int dbpcl_id;
+ public int [] curs;
+ public int flags;
+
+ public __db_join_msg() {
+ }
+
+ public __db_join_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeIntVector(curs);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ curs = xdr.xdrDecodeIntVector();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_join_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_join_reply.java b/libdb/rpc_server/java/gen/__db_join_reply.java
new file mode 100644
index 0000000..80980e2
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_join_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_join_reply implements XdrAble {
+ public int status;
+ public int dbcidcl_id;
+
+ public __db_join_reply() {
+ }
+
+ public __db_join_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(dbcidcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ dbcidcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_join_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_key_range_msg.java b/libdb/rpc_server/java/gen/__db_key_range_msg.java
new file mode 100644
index 0000000..233077e
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_key_range_msg.java
@@ -0,0 +1,53 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_key_range_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int keydlen;
+ public int keydoff;
+ public int keyulen;
+ public int keyflags;
+ public byte [] keydata;
+ public int flags;
+
+ public __db_key_range_msg() {
+ }
+
+ public __db_key_range_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(keydlen);
+ xdr.xdrEncodeInt(keydoff);
+ xdr.xdrEncodeInt(keyulen);
+ xdr.xdrEncodeInt(keyflags);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ keydlen = xdr.xdrDecodeInt();
+ keydoff = xdr.xdrDecodeInt();
+ keyulen = xdr.xdrDecodeInt();
+ keyflags = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_key_range_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_key_range_reply.java b/libdb/rpc_server/java/gen/__db_key_range_reply.java
new file mode 100644
index 0000000..09244c1
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_key_range_reply.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_key_range_reply implements XdrAble {
+ public int status;
+ public double less;
+ public double equal;
+ public double greater;
+
+ public __db_key_range_reply() {
+ }
+
+ public __db_key_range_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeDouble(less);
+ xdr.xdrEncodeDouble(equal);
+ xdr.xdrEncodeDouble(greater);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ less = xdr.xdrDecodeDouble();
+ equal = xdr.xdrDecodeDouble();
+ greater = xdr.xdrDecodeDouble();
+ }
+
+}
+// End of __db_key_range_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_lorder_msg.java b/libdb/rpc_server/java/gen/__db_lorder_msg.java
new file mode 100644
index 0000000..3399ad8
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_lorder_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_lorder_msg implements XdrAble {
+ public int dbpcl_id;
+ public int lorder;
+
+ public __db_lorder_msg() {
+ }
+
+ public __db_lorder_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(lorder);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ lorder = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_lorder_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_lorder_reply.java b/libdb/rpc_server/java/gen/__db_lorder_reply.java
new file mode 100644
index 0000000..cdcda4d
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_lorder_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_lorder_reply implements XdrAble {
+ public int status;
+
+ public __db_lorder_reply() {
+ }
+
+ public __db_lorder_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_lorder_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_open_msg.java b/libdb/rpc_server/java/gen/__db_open_msg.java
new file mode 100644
index 0000000..14dbd9e
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_open_msg.java
@@ -0,0 +1,50 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 2/13/02 1:05 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_open_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public String name;
+ public String subdb;
+ public int type;
+ public int flags;
+ public int mode;
+
+ public __db_open_msg() {
+ }
+
+ public __db_open_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeString(name);
+ xdr.xdrEncodeString(subdb);
+ xdr.xdrEncodeInt(type);
+ xdr.xdrEncodeInt(flags);
+ xdr.xdrEncodeInt(mode);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ name = xdr.xdrDecodeString();
+ subdb = xdr.xdrDecodeString();
+ type = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ mode = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_open_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_open_reply.java b/libdb/rpc_server/java/gen/__db_open_reply.java
new file mode 100644
index 0000000..d90c375
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_open_reply.java
@@ -0,0 +1,44 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_open_reply implements XdrAble {
+ public int status;
+ public int dbcl_id;
+ public int type;
+ public int dbflags;
+ public int lorder;
+
+ public __db_open_reply() {
+ }
+
+ public __db_open_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(dbcl_id);
+ xdr.xdrEncodeInt(type);
+ xdr.xdrEncodeInt(dbflags);
+ xdr.xdrEncodeInt(lorder);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ dbcl_id = xdr.xdrDecodeInt();
+ type = xdr.xdrDecodeInt();
+ dbflags = xdr.xdrDecodeInt();
+ lorder = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_open_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_pagesize_msg.java b/libdb/rpc_server/java/gen/__db_pagesize_msg.java
new file mode 100644
index 0000000..a452ea4
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_pagesize_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_pagesize_msg implements XdrAble {
+ public int dbpcl_id;
+ public int pagesize;
+
+ public __db_pagesize_msg() {
+ }
+
+ public __db_pagesize_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(pagesize);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ pagesize = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_pagesize_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_pagesize_reply.java b/libdb/rpc_server/java/gen/__db_pagesize_reply.java
new file mode 100644
index 0000000..830b207
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_pagesize_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_pagesize_reply implements XdrAble {
+ public int status;
+
+ public __db_pagesize_reply() {
+ }
+
+ public __db_pagesize_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_pagesize_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_pget_msg.java b/libdb/rpc_server/java/gen/__db_pget_msg.java
new file mode 100644
index 0000000..11d27ca
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_pget_msg.java
@@ -0,0 +1,83 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_pget_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int skeydlen;
+ public int skeydoff;
+ public int skeyulen;
+ public int skeyflags;
+ public byte [] skeydata;
+ public int pkeydlen;
+ public int pkeydoff;
+ public int pkeyulen;
+ public int pkeyflags;
+ public byte [] pkeydata;
+ public int datadlen;
+ public int datadoff;
+ public int dataulen;
+ public int dataflags;
+ public byte [] datadata;
+ public int flags;
+
+ public __db_pget_msg() {
+ }
+
+ public __db_pget_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(skeydlen);
+ xdr.xdrEncodeInt(skeydoff);
+ xdr.xdrEncodeInt(skeyulen);
+ xdr.xdrEncodeInt(skeyflags);
+ xdr.xdrEncodeDynamicOpaque(skeydata);
+ xdr.xdrEncodeInt(pkeydlen);
+ xdr.xdrEncodeInt(pkeydoff);
+ xdr.xdrEncodeInt(pkeyulen);
+ xdr.xdrEncodeInt(pkeyflags);
+ xdr.xdrEncodeDynamicOpaque(pkeydata);
+ xdr.xdrEncodeInt(datadlen);
+ xdr.xdrEncodeInt(datadoff);
+ xdr.xdrEncodeInt(dataulen);
+ xdr.xdrEncodeInt(dataflags);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ skeydlen = xdr.xdrDecodeInt();
+ skeydoff = xdr.xdrDecodeInt();
+ skeyulen = xdr.xdrDecodeInt();
+ skeyflags = xdr.xdrDecodeInt();
+ skeydata = xdr.xdrDecodeDynamicOpaque();
+ pkeydlen = xdr.xdrDecodeInt();
+ pkeydoff = xdr.xdrDecodeInt();
+ pkeyulen = xdr.xdrDecodeInt();
+ pkeyflags = xdr.xdrDecodeInt();
+ pkeydata = xdr.xdrDecodeDynamicOpaque();
+ datadlen = xdr.xdrDecodeInt();
+ datadoff = xdr.xdrDecodeInt();
+ dataulen = xdr.xdrDecodeInt();
+ dataflags = xdr.xdrDecodeInt();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_pget_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_pget_reply.java b/libdb/rpc_server/java/gen/__db_pget_reply.java
new file mode 100644
index 0000000..86c9c21
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_pget_reply.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_pget_reply implements XdrAble {
+ public int status;
+ public byte [] skeydata;
+ public byte [] pkeydata;
+ public byte [] datadata;
+
+ public __db_pget_reply() {
+ }
+
+ public __db_pget_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeDynamicOpaque(skeydata);
+ xdr.xdrEncodeDynamicOpaque(pkeydata);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ skeydata = xdr.xdrDecodeDynamicOpaque();
+ pkeydata = xdr.xdrDecodeDynamicOpaque();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ }
+
+}
+// End of __db_pget_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_put_msg.java b/libdb/rpc_server/java/gen/__db_put_msg.java
new file mode 100644
index 0000000..b6159cf
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_put_msg.java
@@ -0,0 +1,68 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_put_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int keydlen;
+ public int keydoff;
+ public int keyulen;
+ public int keyflags;
+ public byte [] keydata;
+ public int datadlen;
+ public int datadoff;
+ public int dataulen;
+ public int dataflags;
+ public byte [] datadata;
+ public int flags;
+
+ public __db_put_msg() {
+ }
+
+ public __db_put_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(keydlen);
+ xdr.xdrEncodeInt(keydoff);
+ xdr.xdrEncodeInt(keyulen);
+ xdr.xdrEncodeInt(keyflags);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeInt(datadlen);
+ xdr.xdrEncodeInt(datadoff);
+ xdr.xdrEncodeInt(dataulen);
+ xdr.xdrEncodeInt(dataflags);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ keydlen = xdr.xdrDecodeInt();
+ keydoff = xdr.xdrDecodeInt();
+ keyulen = xdr.xdrDecodeInt();
+ keyflags = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ datadlen = xdr.xdrDecodeInt();
+ datadoff = xdr.xdrDecodeInt();
+ dataulen = xdr.xdrDecodeInt();
+ dataflags = xdr.xdrDecodeInt();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_put_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_put_reply.java b/libdb/rpc_server/java/gen/__db_put_reply.java
new file mode 100644
index 0000000..fc89ae1
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_put_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_put_reply implements XdrAble {
+ public int status;
+ public byte [] keydata;
+
+ public __db_put_reply() {
+ }
+
+ public __db_put_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ }
+
+}
+// End of __db_put_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_re_delim_msg.java b/libdb/rpc_server/java/gen/__db_re_delim_msg.java
new file mode 100644
index 0000000..c386bdd
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_re_delim_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_re_delim_msg implements XdrAble {
+ public int dbpcl_id;
+ public int delim;
+
+ public __db_re_delim_msg() {
+ }
+
+ public __db_re_delim_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(delim);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ delim = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_re_delim_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_re_delim_reply.java b/libdb/rpc_server/java/gen/__db_re_delim_reply.java
new file mode 100644
index 0000000..aa8a797
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_re_delim_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_re_delim_reply implements XdrAble {
+ public int status;
+
+ public __db_re_delim_reply() {
+ }
+
+ public __db_re_delim_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_re_delim_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_re_len_msg.java b/libdb/rpc_server/java/gen/__db_re_len_msg.java
new file mode 100644
index 0000000..664de5c
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_re_len_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_re_len_msg implements XdrAble {
+ public int dbpcl_id;
+ public int len;
+
+ public __db_re_len_msg() {
+ }
+
+ public __db_re_len_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(len);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ len = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_re_len_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_re_len_reply.java b/libdb/rpc_server/java/gen/__db_re_len_reply.java
new file mode 100644
index 0000000..dda27c8
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_re_len_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_re_len_reply implements XdrAble {
+ public int status;
+
+ public __db_re_len_reply() {
+ }
+
+ public __db_re_len_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_re_len_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_re_pad_msg.java b/libdb/rpc_server/java/gen/__db_re_pad_msg.java
new file mode 100644
index 0000000..2c1290b
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_re_pad_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_re_pad_msg implements XdrAble {
+ public int dbpcl_id;
+ public int pad;
+
+ public __db_re_pad_msg() {
+ }
+
+ public __db_re_pad_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(pad);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ pad = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_re_pad_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_re_pad_reply.java b/libdb/rpc_server/java/gen/__db_re_pad_reply.java
new file mode 100644
index 0000000..f0aaa9a
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_re_pad_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_re_pad_reply implements XdrAble {
+ public int status;
+
+ public __db_re_pad_reply() {
+ }
+
+ public __db_re_pad_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_re_pad_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_remove_msg.java b/libdb/rpc_server/java/gen/__db_remove_msg.java
new file mode 100644
index 0000000..dfa9066
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_remove_msg.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_remove_msg implements XdrAble {
+ public int dbpcl_id;
+ public String name;
+ public String subdb;
+ public int flags;
+
+ public __db_remove_msg() {
+ }
+
+ public __db_remove_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeString(name);
+ xdr.xdrEncodeString(subdb);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ name = xdr.xdrDecodeString();
+ subdb = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_remove_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_remove_reply.java b/libdb/rpc_server/java/gen/__db_remove_reply.java
new file mode 100644
index 0000000..a2b86c0
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_remove_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_remove_reply implements XdrAble {
+ public int status;
+
+ public __db_remove_reply() {
+ }
+
+ public __db_remove_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_remove_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_rename_msg.java b/libdb/rpc_server/java/gen/__db_rename_msg.java
new file mode 100644
index 0000000..12b434e
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_rename_msg.java
@@ -0,0 +1,44 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_rename_msg implements XdrAble {
+ public int dbpcl_id;
+ public String name;
+ public String subdb;
+ public String newname;
+ public int flags;
+
+ public __db_rename_msg() {
+ }
+
+ public __db_rename_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeString(name);
+ xdr.xdrEncodeString(subdb);
+ xdr.xdrEncodeString(newname);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ name = xdr.xdrDecodeString();
+ subdb = xdr.xdrDecodeString();
+ newname = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_rename_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_rename_reply.java b/libdb/rpc_server/java/gen/__db_rename_reply.java
new file mode 100644
index 0000000..4e4a22b
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_rename_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_rename_reply implements XdrAble {
+ public int status;
+
+ public __db_rename_reply() {
+ }
+
+ public __db_rename_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_rename_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_stat_msg.java b/libdb/rpc_server/java/gen/__db_stat_msg.java
new file mode 100644
index 0000000..af536b5
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_stat_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_stat_msg implements XdrAble {
+ public int dbpcl_id;
+ public int flags;
+
+ public __db_stat_msg() {
+ }
+
+ public __db_stat_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_stat_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_stat_reply.java b/libdb/rpc_server/java/gen/__db_stat_reply.java
new file mode 100644
index 0000000..8df1460
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_stat_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_stat_reply implements XdrAble {
+ public int status;
+ public int [] stats;
+
+ public __db_stat_reply() {
+ }
+
+ public __db_stat_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeIntVector(stats);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ stats = xdr.xdrDecodeIntVector();
+ }
+
+}
+// End of __db_stat_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_sync_msg.java b/libdb/rpc_server/java/gen/__db_sync_msg.java
new file mode 100644
index 0000000..c659467
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_sync_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_sync_msg implements XdrAble {
+ public int dbpcl_id;
+ public int flags;
+
+ public __db_sync_msg() {
+ }
+
+ public __db_sync_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_sync_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_sync_reply.java b/libdb/rpc_server/java/gen/__db_sync_reply.java
new file mode 100644
index 0000000..d0a8bc8
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_sync_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_sync_reply implements XdrAble {
+ public int status;
+
+ public __db_sync_reply() {
+ }
+
+ public __db_sync_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_sync_reply.java
diff --git a/libdb/rpc_server/java/gen/__db_truncate_msg.java b/libdb/rpc_server/java/gen/__db_truncate_msg.java
new file mode 100644
index 0000000..38810d6
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_truncate_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_truncate_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int flags;
+
+ public __db_truncate_msg() {
+ }
+
+ public __db_truncate_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_truncate_msg.java
diff --git a/libdb/rpc_server/java/gen/__db_truncate_reply.java b/libdb/rpc_server/java/gen/__db_truncate_reply.java
new file mode 100644
index 0000000..c4f6886
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__db_truncate_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_truncate_reply implements XdrAble {
+ public int status;
+ public int count;
+
+ public __db_truncate_reply() {
+ }
+
+ public __db_truncate_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(count);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ count = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_truncate_reply.java
diff --git a/libdb/rpc_server/java/gen/__dbc_close_msg.java b/libdb/rpc_server/java/gen/__dbc_close_msg.java
new file mode 100644
index 0000000..eb1ca7f
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__dbc_close_msg.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_close_msg implements XdrAble {
+ public int dbccl_id;
+
+ public __dbc_close_msg() {
+ }
+
+ public __dbc_close_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbccl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbccl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_close_msg.java
diff --git a/libdb/rpc_server/java/gen/__dbc_close_reply.java b/libdb/rpc_server/java/gen/__dbc_close_reply.java
new file mode 100644
index 0000000..47459aa
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__dbc_close_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_close_reply implements XdrAble {
+ public int status;
+
+ public __dbc_close_reply() {
+ }
+
+ public __dbc_close_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_close_reply.java
diff --git a/libdb/rpc_server/java/gen/__dbc_count_msg.java b/libdb/rpc_server/java/gen/__dbc_count_msg.java
new file mode 100644
index 0000000..5f554e1
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__dbc_count_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_count_msg implements XdrAble {
+ public int dbccl_id;
+ public int flags;
+
+ public __dbc_count_msg() {
+ }
+
+ public __dbc_count_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbccl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbccl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_count_msg.java
diff --git a/libdb/rpc_server/java/gen/__dbc_count_reply.java b/libdb/rpc_server/java/gen/__dbc_count_reply.java
new file mode 100644
index 0000000..4daecdd
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__dbc_count_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_count_reply implements XdrAble {
+ public int status;
+ public int dupcount;
+
+ public __dbc_count_reply() {
+ }
+
+ public __dbc_count_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(dupcount);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ dupcount = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_count_reply.java
diff --git a/libdb/rpc_server/java/gen/__dbc_del_msg.java b/libdb/rpc_server/java/gen/__dbc_del_msg.java
new file mode 100644
index 0000000..bc4bd05
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__dbc_del_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_del_msg implements XdrAble {
+ public int dbccl_id;
+ public int flags;
+
+ public __dbc_del_msg() {
+ }
+
+ public __dbc_del_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbccl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbccl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_del_msg.java
diff --git a/libdb/rpc_server/java/gen/__dbc_del_reply.java b/libdb/rpc_server/java/gen/__dbc_del_reply.java
new file mode 100644
index 0000000..e55ac9f
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__dbc_del_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_del_reply implements XdrAble {
+ public int status;
+
+ public __dbc_del_reply() {
+ }
+
+ public __dbc_del_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_del_reply.java
diff --git a/libdb/rpc_server/java/gen/__dbc_dup_msg.java b/libdb/rpc_server/java/gen/__dbc_dup_msg.java
new file mode 100644
index 0000000..9a3894e
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__dbc_dup_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_dup_msg implements XdrAble {
+ public int dbccl_id;
+ public int flags;
+
+ public __dbc_dup_msg() {
+ }
+
+ public __dbc_dup_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbccl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbccl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_dup_msg.java
diff --git a/libdb/rpc_server/java/gen/__dbc_dup_reply.java b/libdb/rpc_server/java/gen/__dbc_dup_reply.java
new file mode 100644
index 0000000..6b942f1
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__dbc_dup_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_dup_reply implements XdrAble {
+ public int status;
+ public int dbcidcl_id;
+
+ public __dbc_dup_reply() {
+ }
+
+ public __dbc_dup_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(dbcidcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ dbcidcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_dup_reply.java
diff --git a/libdb/rpc_server/java/gen/__dbc_get_msg.java b/libdb/rpc_server/java/gen/__dbc_get_msg.java
new file mode 100644
index 0000000..672ace4
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__dbc_get_msg.java
@@ -0,0 +1,65 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_get_msg implements XdrAble {
+ public int dbccl_id;
+ public int keydlen;
+ public int keydoff;
+ public int keyulen;
+ public int keyflags;
+ public byte [] keydata;
+ public int datadlen;
+ public int datadoff;
+ public int dataulen;
+ public int dataflags;
+ public byte [] datadata;
+ public int flags;
+
+ public __dbc_get_msg() {
+ }
+
+ public __dbc_get_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbccl_id);
+ xdr.xdrEncodeInt(keydlen);
+ xdr.xdrEncodeInt(keydoff);
+ xdr.xdrEncodeInt(keyulen);
+ xdr.xdrEncodeInt(keyflags);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeInt(datadlen);
+ xdr.xdrEncodeInt(datadoff);
+ xdr.xdrEncodeInt(dataulen);
+ xdr.xdrEncodeInt(dataflags);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbccl_id = xdr.xdrDecodeInt();
+ keydlen = xdr.xdrDecodeInt();
+ keydoff = xdr.xdrDecodeInt();
+ keyulen = xdr.xdrDecodeInt();
+ keyflags = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ datadlen = xdr.xdrDecodeInt();
+ datadoff = xdr.xdrDecodeInt();
+ dataulen = xdr.xdrDecodeInt();
+ dataflags = xdr.xdrDecodeInt();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_get_msg.java
diff --git a/libdb/rpc_server/java/gen/__dbc_get_reply.java b/libdb/rpc_server/java/gen/__dbc_get_reply.java
new file mode 100644
index 0000000..8671fec
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__dbc_get_reply.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_get_reply implements XdrAble {
+ public int status;
+ public byte [] keydata;
+ public byte [] datadata;
+
+ public __dbc_get_reply() {
+ }
+
+ public __dbc_get_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ }
+
+}
+// End of __dbc_get_reply.java
diff --git a/libdb/rpc_server/java/gen/__dbc_pget_msg.java b/libdb/rpc_server/java/gen/__dbc_pget_msg.java
new file mode 100644
index 0000000..8ca3c61
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__dbc_pget_msg.java
@@ -0,0 +1,80 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_pget_msg implements XdrAble {
+ public int dbccl_id;
+ public int skeydlen;
+ public int skeydoff;
+ public int skeyulen;
+ public int skeyflags;
+ public byte [] skeydata;
+ public int pkeydlen;
+ public int pkeydoff;
+ public int pkeyulen;
+ public int pkeyflags;
+ public byte [] pkeydata;
+ public int datadlen;
+ public int datadoff;
+ public int dataulen;
+ public int dataflags;
+ public byte [] datadata;
+ public int flags;
+
+ public __dbc_pget_msg() {
+ }
+
+ public __dbc_pget_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbccl_id);
+ xdr.xdrEncodeInt(skeydlen);
+ xdr.xdrEncodeInt(skeydoff);
+ xdr.xdrEncodeInt(skeyulen);
+ xdr.xdrEncodeInt(skeyflags);
+ xdr.xdrEncodeDynamicOpaque(skeydata);
+ xdr.xdrEncodeInt(pkeydlen);
+ xdr.xdrEncodeInt(pkeydoff);
+ xdr.xdrEncodeInt(pkeyulen);
+ xdr.xdrEncodeInt(pkeyflags);
+ xdr.xdrEncodeDynamicOpaque(pkeydata);
+ xdr.xdrEncodeInt(datadlen);
+ xdr.xdrEncodeInt(datadoff);
+ xdr.xdrEncodeInt(dataulen);
+ xdr.xdrEncodeInt(dataflags);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbccl_id = xdr.xdrDecodeInt();
+ skeydlen = xdr.xdrDecodeInt();
+ skeydoff = xdr.xdrDecodeInt();
+ skeyulen = xdr.xdrDecodeInt();
+ skeyflags = xdr.xdrDecodeInt();
+ skeydata = xdr.xdrDecodeDynamicOpaque();
+ pkeydlen = xdr.xdrDecodeInt();
+ pkeydoff = xdr.xdrDecodeInt();
+ pkeyulen = xdr.xdrDecodeInt();
+ pkeyflags = xdr.xdrDecodeInt();
+ pkeydata = xdr.xdrDecodeDynamicOpaque();
+ datadlen = xdr.xdrDecodeInt();
+ datadoff = xdr.xdrDecodeInt();
+ dataulen = xdr.xdrDecodeInt();
+ dataflags = xdr.xdrDecodeInt();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_pget_msg.java
diff --git a/libdb/rpc_server/java/gen/__dbc_pget_reply.java b/libdb/rpc_server/java/gen/__dbc_pget_reply.java
new file mode 100644
index 0000000..16cc795
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__dbc_pget_reply.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_pget_reply implements XdrAble {
+ public int status;
+ public byte [] skeydata;
+ public byte [] pkeydata;
+ public byte [] datadata;
+
+ public __dbc_pget_reply() {
+ }
+
+ public __dbc_pget_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeDynamicOpaque(skeydata);
+ xdr.xdrEncodeDynamicOpaque(pkeydata);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ skeydata = xdr.xdrDecodeDynamicOpaque();
+ pkeydata = xdr.xdrDecodeDynamicOpaque();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ }
+
+}
+// End of __dbc_pget_reply.java
diff --git a/libdb/rpc_server/java/gen/__dbc_put_msg.java b/libdb/rpc_server/java/gen/__dbc_put_msg.java
new file mode 100644
index 0000000..98d1242
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__dbc_put_msg.java
@@ -0,0 +1,65 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_put_msg implements XdrAble {
+ public int dbccl_id;
+ public int keydlen;
+ public int keydoff;
+ public int keyulen;
+ public int keyflags;
+ public byte [] keydata;
+ public int datadlen;
+ public int datadoff;
+ public int dataulen;
+ public int dataflags;
+ public byte [] datadata;
+ public int flags;
+
+ public __dbc_put_msg() {
+ }
+
+ public __dbc_put_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbccl_id);
+ xdr.xdrEncodeInt(keydlen);
+ xdr.xdrEncodeInt(keydoff);
+ xdr.xdrEncodeInt(keyulen);
+ xdr.xdrEncodeInt(keyflags);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeInt(datadlen);
+ xdr.xdrEncodeInt(datadoff);
+ xdr.xdrEncodeInt(dataulen);
+ xdr.xdrEncodeInt(dataflags);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbccl_id = xdr.xdrDecodeInt();
+ keydlen = xdr.xdrDecodeInt();
+ keydoff = xdr.xdrDecodeInt();
+ keyulen = xdr.xdrDecodeInt();
+ keyflags = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ datadlen = xdr.xdrDecodeInt();
+ datadoff = xdr.xdrDecodeInt();
+ dataulen = xdr.xdrDecodeInt();
+ dataflags = xdr.xdrDecodeInt();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_put_msg.java
diff --git a/libdb/rpc_server/java/gen/__dbc_put_reply.java b/libdb/rpc_server/java/gen/__dbc_put_reply.java
new file mode 100644
index 0000000..385f9f7
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__dbc_put_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_put_reply implements XdrAble {
+ public int status;
+ public byte [] keydata;
+
+ public __dbc_put_reply() {
+ }
+
+ public __dbc_put_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ }
+
+}
+// End of __dbc_put_reply.java
diff --git a/libdb/rpc_server/java/gen/__env_cachesize_msg.java b/libdb/rpc_server/java/gen/__env_cachesize_msg.java
new file mode 100644
index 0000000..d1fce1f
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__env_cachesize_msg.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_cachesize_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int gbytes;
+ public int bytes;
+ public int ncache;
+
+ public __env_cachesize_msg() {
+ }
+
+ public __env_cachesize_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(gbytes);
+ xdr.xdrEncodeInt(bytes);
+ xdr.xdrEncodeInt(ncache);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ gbytes = xdr.xdrDecodeInt();
+ bytes = xdr.xdrDecodeInt();
+ ncache = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_cachesize_msg.java
diff --git a/libdb/rpc_server/java/gen/__env_cachesize_reply.java b/libdb/rpc_server/java/gen/__env_cachesize_reply.java
new file mode 100644
index 0000000..193f835
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__env_cachesize_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_cachesize_reply implements XdrAble {
+ public int status;
+
+ public __env_cachesize_reply() {
+ }
+
+ public __env_cachesize_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_cachesize_reply.java
diff --git a/libdb/rpc_server/java/gen/__env_close_msg.java b/libdb/rpc_server/java/gen/__env_close_msg.java
new file mode 100644
index 0000000..5e657ba
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__env_close_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_close_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int flags;
+
+ public __env_close_msg() {
+ }
+
+ public __env_close_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_close_msg.java
diff --git a/libdb/rpc_server/java/gen/__env_close_reply.java b/libdb/rpc_server/java/gen/__env_close_reply.java
new file mode 100644
index 0000000..11e61f7
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__env_close_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_close_reply implements XdrAble {
+ public int status;
+
+ public __env_close_reply() {
+ }
+
+ public __env_close_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_close_reply.java
diff --git a/libdb/rpc_server/java/gen/__env_create_msg.java b/libdb/rpc_server/java/gen/__env_create_msg.java
new file mode 100644
index 0000000..dbe546a
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__env_create_msg.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_create_msg implements XdrAble {
+ public int timeout;
+
+ public __env_create_msg() {
+ }
+
+ public __env_create_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(timeout);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ timeout = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_create_msg.java
diff --git a/libdb/rpc_server/java/gen/__env_create_reply.java b/libdb/rpc_server/java/gen/__env_create_reply.java
new file mode 100644
index 0000000..5427fc4
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__env_create_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_create_reply implements XdrAble {
+ public int status;
+ public int envcl_id;
+
+ public __env_create_reply() {
+ }
+
+ public __env_create_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(envcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ envcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_create_reply.java
diff --git a/libdb/rpc_server/java/gen/__env_dbremove_msg.java b/libdb/rpc_server/java/gen/__env_dbremove_msg.java
new file mode 100644
index 0000000..9730a92
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__env_dbremove_msg.java
@@ -0,0 +1,44 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_dbremove_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int txnpcl_id;
+ public String name;
+ public String subdb;
+ public int flags;
+
+ public __env_dbremove_msg() {
+ }
+
+ public __env_dbremove_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeString(name);
+ xdr.xdrEncodeString(subdb);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ name = xdr.xdrDecodeString();
+ subdb = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_dbremove_msg.java
diff --git a/libdb/rpc_server/java/gen/__env_dbremove_reply.java b/libdb/rpc_server/java/gen/__env_dbremove_reply.java
new file mode 100644
index 0000000..75cc5a9
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__env_dbremove_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_dbremove_reply implements XdrAble {
+ public int status;
+
+ public __env_dbremove_reply() {
+ }
+
+ public __env_dbremove_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_dbremove_reply.java
diff --git a/libdb/rpc_server/java/gen/__env_dbrename_msg.java b/libdb/rpc_server/java/gen/__env_dbrename_msg.java
new file mode 100644
index 0000000..0bbda26
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__env_dbrename_msg.java
@@ -0,0 +1,47 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_dbrename_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int txnpcl_id;
+ public String name;
+ public String subdb;
+ public String newname;
+ public int flags;
+
+ public __env_dbrename_msg() {
+ }
+
+ public __env_dbrename_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeString(name);
+ xdr.xdrEncodeString(subdb);
+ xdr.xdrEncodeString(newname);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ name = xdr.xdrDecodeString();
+ subdb = xdr.xdrDecodeString();
+ newname = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_dbrename_msg.java
diff --git a/libdb/rpc_server/java/gen/__env_dbrename_reply.java b/libdb/rpc_server/java/gen/__env_dbrename_reply.java
new file mode 100644
index 0000000..0cc8882
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__env_dbrename_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_dbrename_reply implements XdrAble {
+ public int status;
+
+ public __env_dbrename_reply() {
+ }
+
+ public __env_dbrename_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_dbrename_reply.java
diff --git a/libdb/rpc_server/java/gen/__env_encrypt_msg.java b/libdb/rpc_server/java/gen/__env_encrypt_msg.java
new file mode 100644
index 0000000..84e9a36
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__env_encrypt_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 2/13/02 1:05 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_encrypt_msg implements XdrAble {
+ public int dbenvcl_id;
+ public String passwd;
+ public int flags;
+
+ public __env_encrypt_msg() {
+ }
+
+ public __env_encrypt_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeString(passwd);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ passwd = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_encrypt_msg.java
diff --git a/libdb/rpc_server/java/gen/__env_encrypt_reply.java b/libdb/rpc_server/java/gen/__env_encrypt_reply.java
new file mode 100644
index 0000000..e202a30
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__env_encrypt_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 2/13/02 1:05 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_encrypt_reply implements XdrAble {
+ public int status;
+
+ public __env_encrypt_reply() {
+ }
+
+ public __env_encrypt_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_encrypt_reply.java
diff --git a/libdb/rpc_server/java/gen/__env_flags_msg.java b/libdb/rpc_server/java/gen/__env_flags_msg.java
new file mode 100644
index 0000000..25cd5f8
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__env_flags_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_flags_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int flags;
+ public int onoff;
+
+ public __env_flags_msg() {
+ }
+
+ public __env_flags_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(flags);
+ xdr.xdrEncodeInt(onoff);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ onoff = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_flags_msg.java
diff --git a/libdb/rpc_server/java/gen/__env_flags_reply.java b/libdb/rpc_server/java/gen/__env_flags_reply.java
new file mode 100644
index 0000000..d348a92
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__env_flags_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_flags_reply implements XdrAble {
+ public int status;
+
+ public __env_flags_reply() {
+ }
+
+ public __env_flags_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_flags_reply.java
diff --git a/libdb/rpc_server/java/gen/__env_open_msg.java b/libdb/rpc_server/java/gen/__env_open_msg.java
new file mode 100644
index 0000000..e4649b4
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__env_open_msg.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_open_msg implements XdrAble {
+ public int dbenvcl_id;
+ public String home;
+ public int flags;
+ public int mode;
+
+ public __env_open_msg() {
+ }
+
+ public __env_open_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeString(home);
+ xdr.xdrEncodeInt(flags);
+ xdr.xdrEncodeInt(mode);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ home = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ mode = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_open_msg.java
diff --git a/libdb/rpc_server/java/gen/__env_open_reply.java b/libdb/rpc_server/java/gen/__env_open_reply.java
new file mode 100644
index 0000000..1994afb
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__env_open_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_open_reply implements XdrAble {
+ public int status;
+ public int envcl_id;
+
+ public __env_open_reply() {
+ }
+
+ public __env_open_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(envcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ envcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_open_reply.java
diff --git a/libdb/rpc_server/java/gen/__env_remove_msg.java b/libdb/rpc_server/java/gen/__env_remove_msg.java
new file mode 100644
index 0000000..b32d758
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__env_remove_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_remove_msg implements XdrAble {
+ public int dbenvcl_id;
+ public String home;
+ public int flags;
+
+ public __env_remove_msg() {
+ }
+
+ public __env_remove_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeString(home);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ home = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_remove_msg.java
diff --git a/libdb/rpc_server/java/gen/__env_remove_reply.java b/libdb/rpc_server/java/gen/__env_remove_reply.java
new file mode 100644
index 0000000..19e4d52
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__env_remove_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_remove_reply implements XdrAble {
+ public int status;
+
+ public __env_remove_reply() {
+ }
+
+ public __env_remove_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_remove_reply.java
diff --git a/libdb/rpc_server/java/gen/__txn_abort_msg.java b/libdb/rpc_server/java/gen/__txn_abort_msg.java
new file mode 100644
index 0000000..ff44c53
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__txn_abort_msg.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_abort_msg implements XdrAble {
+ public int txnpcl_id;
+
+ public __txn_abort_msg() {
+ }
+
+ public __txn_abort_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(txnpcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ txnpcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_abort_msg.java
diff --git a/libdb/rpc_server/java/gen/__txn_abort_reply.java b/libdb/rpc_server/java/gen/__txn_abort_reply.java
new file mode 100644
index 0000000..58f275c
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__txn_abort_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_abort_reply implements XdrAble {
+ public int status;
+
+ public __txn_abort_reply() {
+ }
+
+ public __txn_abort_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_abort_reply.java
diff --git a/libdb/rpc_server/java/gen/__txn_begin_msg.java b/libdb/rpc_server/java/gen/__txn_begin_msg.java
new file mode 100644
index 0000000..877031e
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__txn_begin_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_begin_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int parentcl_id;
+ public int flags;
+
+ public __txn_begin_msg() {
+ }
+
+ public __txn_begin_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(parentcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ parentcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_begin_msg.java
diff --git a/libdb/rpc_server/java/gen/__txn_begin_reply.java b/libdb/rpc_server/java/gen/__txn_begin_reply.java
new file mode 100644
index 0000000..65a0c40
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__txn_begin_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_begin_reply implements XdrAble {
+ public int status;
+ public int txnidcl_id;
+
+ public __txn_begin_reply() {
+ }
+
+ public __txn_begin_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(txnidcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ txnidcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_begin_reply.java
diff --git a/libdb/rpc_server/java/gen/__txn_commit_msg.java b/libdb/rpc_server/java/gen/__txn_commit_msg.java
new file mode 100644
index 0000000..4b988d0
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__txn_commit_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_commit_msg implements XdrAble {
+ public int txnpcl_id;
+ public int flags;
+
+ public __txn_commit_msg() {
+ }
+
+ public __txn_commit_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ txnpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_commit_msg.java
diff --git a/libdb/rpc_server/java/gen/__txn_commit_reply.java b/libdb/rpc_server/java/gen/__txn_commit_reply.java
new file mode 100644
index 0000000..b26937b
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__txn_commit_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_commit_reply implements XdrAble {
+ public int status;
+
+ public __txn_commit_reply() {
+ }
+
+ public __txn_commit_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_commit_reply.java
diff --git a/libdb/rpc_server/java/gen/__txn_discard_msg.java b/libdb/rpc_server/java/gen/__txn_discard_msg.java
new file mode 100644
index 0000000..87f5d4f
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__txn_discard_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_discard_msg implements XdrAble {
+ public int txnpcl_id;
+ public int flags;
+
+ public __txn_discard_msg() {
+ }
+
+ public __txn_discard_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ txnpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_discard_msg.java
diff --git a/libdb/rpc_server/java/gen/__txn_discard_reply.java b/libdb/rpc_server/java/gen/__txn_discard_reply.java
new file mode 100644
index 0000000..9792211
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__txn_discard_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_discard_reply implements XdrAble {
+ public int status;
+
+ public __txn_discard_reply() {
+ }
+
+ public __txn_discard_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_discard_reply.java
diff --git a/libdb/rpc_server/java/gen/__txn_prepare_msg.java b/libdb/rpc_server/java/gen/__txn_prepare_msg.java
new file mode 100644
index 0000000..6e09f2c
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__txn_prepare_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_prepare_msg implements XdrAble {
+ public int txnpcl_id;
+ public byte [] gid;
+
+ public __txn_prepare_msg() {
+ }
+
+ public __txn_prepare_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeOpaque(gid, 128);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ txnpcl_id = xdr.xdrDecodeInt();
+ gid = xdr.xdrDecodeOpaque(128);
+ }
+
+}
+// End of __txn_prepare_msg.java
diff --git a/libdb/rpc_server/java/gen/__txn_prepare_reply.java b/libdb/rpc_server/java/gen/__txn_prepare_reply.java
new file mode 100644
index 0000000..d759011
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__txn_prepare_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_prepare_reply implements XdrAble {
+ public int status;
+
+ public __txn_prepare_reply() {
+ }
+
+ public __txn_prepare_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_prepare_reply.java
diff --git a/libdb/rpc_server/java/gen/__txn_recover_msg.java b/libdb/rpc_server/java/gen/__txn_recover_msg.java
new file mode 100644
index 0000000..6515333
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__txn_recover_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_recover_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int count;
+ public int flags;
+
+ public __txn_recover_msg() {
+ }
+
+ public __txn_recover_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(count);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ count = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_recover_msg.java
diff --git a/libdb/rpc_server/java/gen/__txn_recover_reply.java b/libdb/rpc_server/java/gen/__txn_recover_reply.java
new file mode 100644
index 0000000..0161ec9
--- /dev/null
+++ b/libdb/rpc_server/java/gen/__txn_recover_reply.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_recover_reply implements XdrAble {
+ public int status;
+ public int [] txn;
+ public byte [] gid;
+ public int retcount;
+
+ public __txn_recover_reply() {
+ }
+
+ public __txn_recover_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeIntVector(txn);
+ xdr.xdrEncodeDynamicOpaque(gid);
+ xdr.xdrEncodeInt(retcount);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ txn = xdr.xdrDecodeIntVector();
+ gid = xdr.xdrDecodeDynamicOpaque();
+ retcount = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_recover_reply.java
diff --git a/libdb/rpc_server/java/gen/db_server.java b/libdb/rpc_server/java/gen/db_server.java
new file mode 100644
index 0000000..a14a770
--- /dev/null
+++ b/libdb/rpc_server/java/gen/db_server.java
@@ -0,0 +1,67 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+/**
+ * A collection of constants used by the "db_server" ONC/RPC program.
+ */
+public interface db_server {
+ public static final int __DB_db_close_4001 = 19;
+ public static final int __DB_db_flags_4001 = 24;
+ public static final int __DB_dbc_dup_4001 = 47;
+ public static final int __DB_db_encrypt_4001 = 22;
+ public static final int __DB_env_dbrename_4001 = 5;
+ public static final int __DB_env_remove_4001 = 9;
+ public static final int __DB_dbc_pget_4001 = 49;
+ public static final int __DB_env_cachesize_4001 = 1;
+ public static final int __DB_db_lorder_4001 = 29;
+ public static final int __DB_db_key_range_4001 = 28;
+ public static final int __DB_db_bt_minkey_4001 = 18;
+ public static final int __DB_db_sync_4001 = 40;
+ public static final int __DB_dbc_close_4001 = 44;
+ public static final int __DB_db_join_4001 = 43;
+ public static final int __DB_db_pagesize_4001 = 31;
+ public static final int DB_RPC_SERVERVERS = 4001;
+ public static final int __DB_db_open_4001 = 30;
+ public static final int __DB_dbc_get_4001 = 48;
+ public static final int __DB_db_cursor_4001 = 42;
+ public static final int __DB_txn_commit_4001 = 12;
+ public static final int __DB_dbc_del_4001 = 46;
+ public static final int __DB_env_create_4001 = 3;
+ public static final int __DB_env_open_4001 = 8;
+ public static final int __DB_txn_prepare_4001 = 14;
+ public static final int __DB_db_pget_4001 = 32;
+ public static final int __DB_db_stat_4001 = 39;
+ public static final int __DB_db_h_nelem_4001 = 27;
+ public static final int __DB_db_remove_4001 = 37;
+ public static final int __DB_db_re_delim_4001 = 34;
+ public static final int __DB_db_re_pad_4001 = 36;
+ public static final int __DB_txn_abort_4001 = 10;
+ public static final int __DB_txn_recover_4001 = 15;
+ public static final int __DB_db_get_4001 = 25;
+ public static final int __DB_db_extentsize_4001 = 23;
+ public static final int DB_RPC_SERVERPROG = 351457;
+ public static final int __DB_dbc_put_4001 = 50;
+ public static final int __DB_db_truncate_4001 = 41;
+ public static final int __DB_db_del_4001 = 21;
+ public static final int __DB_db_bt_maxkey_4001 = 17;
+ public static final int __DB_env_dbremove_4001 = 4;
+ public static final int __DB_txn_discard_4001 = 13;
+ public static final int __DB_db_re_len_4001 = 35;
+ public static final int __DB_env_close_4001 = 2;
+ public static final int __DB_env_flags_4001 = 7;
+ public static final int __DB_db_rename_4001 = 38;
+ public static final int __DB_db_associate_4001 = 16;
+ public static final int __DB_txn_begin_4001 = 11;
+ public static final int __DB_env_encrypt_4001 = 6;
+ public static final int __DB_db_h_ffactor_4001 = 26;
+ public static final int __DB_db_put_4001 = 33;
+ public static final int __DB_db_create_4001 = 20;
+ public static final int __DB_dbc_count_4001 = 45;
+}
+// End of db_server.java
diff --git a/libdb/rpc_server/java/jrpcgen.jar b/libdb/rpc_server/java/jrpcgen.jar
new file mode 100644
index 0000000..338825b
Binary files /dev/null and b/libdb/rpc_server/java/jrpcgen.jar differ
diff --git a/libdb/rpc_server/java/oncrpc.jar b/libdb/rpc_server/java/oncrpc.jar
new file mode 100644
index 0000000..e0f5cfa
Binary files /dev/null and b/libdb/rpc_server/java/oncrpc.jar differ
diff --git a/libdb/rpc_server/java/s_jrpcgen b/libdb/rpc_server/java/s_jrpcgen
new file mode 100644
index 0000000..fed8cbf
--- /dev/null
+++ b/libdb/rpc_server/java/s_jrpcgen
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+java -jar jrpcgen.jar -d gen -noclient -nobackup -p com.sleepycat.db.rpcserver -s DbServerStub ../db_server.x
diff --git a/libdb/rpc_server/rpc.src b/libdb/rpc_server/rpc.src
new file mode 100644
index 0000000..f15638f
--- /dev/null
+++ b/libdb/rpc_server/rpc.src
@@ -0,0 +1,718 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Syntax:
+# BEGIN function_name {CODE | RETCODE | NOFUNC}
+# CODE: generate XDR and client code, return status
+# Used for functions that just return a status and nothing else.
+# RETCODE:generate XDR and client code, call return function
+# (generate template return function)
+# Used for functions that returns data.
+# NOFUNC: generate a client "unsupported function" with right args
+# Used for unsupported functions.
+#
+# ARG {IGNORE | STRING | INT | DBT | LIST | ID | CONST} C-type varname
+# IGNORE: not passed to server
+# STRING: string passed to server
+# DBT: DBT arg passed to server
+# LIST: list passed to server (NULL-terminated list of something)
+# INT: integer passed to server
+# ID: cl_id from arg passed to server
+# GID: global id passed to server
+# CONST: do not generate COMPQUIET (for NOFUNC only)
+# FUNCPROT prototype
+# FUNCARG functiontype
+# These two *MUST* go together and FUNCPROT *MUST* be first. These
+# are for the tricky user-supplied functions to some methods. They
+# are not supported in RPC, so will be ignored, but the complicated
+# syntax of their argument requires we have a special flag for them
+# that contains the verbatim text to use in the prototype and the
+# c-type, respectively. The FUNCARG must include the function, and
+# must call it 'funcN', where N is the count of functions. Almost
+# always it must be func0. A *very* few methods have more than one
+# user-supplied functions, in those cases, it must be func0, func1, etc.
+#
+# All messages automatically return "status" and return that from
+# the call to the function. RET's are additional things the server
+# may return. RET is like ARG but does not need the IGNORE option.
+# RET {STRING | INT | DBT | LIST | ID} varname [GID | INT | ID]
+# STRING: string from server
+# DBT: DBT arg from server
+# LIST: list from server (NULL-terminated list)
+# Must have list type of GID, ID or INT specified
+# INT: integer from server
+# ID: id from server stored in cl_id
+# END function end.
+
+#
+# Environment functions
+#
+BEGIN env_alloc NOFUNC
+ARG ID DB_ENV * dbenv
+FUNCPROT void *(*)(size_t)
+FUNCARG void *(*func0) __P((size_t))
+FUNCPROT void *(*)(void *, size_t)
+FUNCARG void *(*func1) __P((void *, size_t))
+FUNCPROT void (*)(void *)
+FUNCARG void (*func2) __P((void *))
+END
+BEGIN set_app_dispatch NOFUNC
+ARG ID DB_ENV * dbenv
+FUNCPROT int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)
+FUNCARG int (*func0) __P((DB_ENV *, DBT *, DB_LSN *, db_recops))
+END
+BEGIN env_cachesize CODE
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t gbytes
+ARG INT u_int32_t bytes
+ARG INT int ncache
+END
+BEGIN env_close RETCODE
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t flags
+END
+BEGIN env_create RETCODE
+ARG IGNORE DB_ENV * dbenv
+ARG INT long timeout
+RET ID long env
+END
+BEGIN set_data_dir NOFUNC
+ARG ID DB_ENV * dbenv
+ARG STRING const char * dir
+END
+BEGIN env_dbremove CODE
+ARG ID DB_ENV * dbenv
+ARG ID DB_TXN * txnp
+ARG STRING const char * name
+ARG STRING const char * subdb
+ARG INT u_int32_t flags
+END
+BEGIN env_dbrename CODE
+ARG ID DB_ENV * dbenv
+ARG ID DB_TXN * txnp
+ARG STRING const char * name
+ARG STRING const char * subdb
+ARG STRING const char * newname
+ARG INT u_int32_t flags
+END
+BEGIN env_encrypt CODE
+ARG ID DB_ENV * dbenv
+ARG STRING const char * passwd
+ARG INT u_int32_t flags
+END
+BEGIN env_set_feedback NOFUNC
+ARG ID DB_ENV * dbenv
+FUNCPROT void (*)(DB_ENV *, int, int)
+FUNCARG void (*func0) __P((DB_ENV *, int, int))
+END
+BEGIN env_flags CODE
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t flags
+ARG INT int onoff
+END
+BEGIN set_lg_bsize NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t bsize
+END
+BEGIN set_lg_dir NOFUNC
+ARG ID DB_ENV * dbenv
+ARG STRING const char * dir
+END
+BEGIN set_lg_max NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t max
+END
+BEGIN set_lg_regionmax NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t max
+END
+BEGIN set_lk_conflict NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int8_t * conflicts
+ARG INT int modes
+END
+BEGIN set_lk_detect NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t detect
+END
+BEGIN set_lk_max NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t max
+END
+BEGIN set_lk_max_locks NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t max
+END
+BEGIN set_lk_max_lockers NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t max
+END
+BEGIN set_lk_max_objects NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t max
+END
+BEGIN set_mp_mmapsize NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT size_t mmapsize
+END
+BEGIN env_open RETCODE
+ARG ID DB_ENV * dbenv
+ARG STRING const char * home
+ARG INT u_int32_t flags
+ARG INT int mode
+RET ID long env
+END
+BEGIN env_paniccall NOFUNC
+ARG ID DB_ENV * dbenv
+FUNCPROT void (*)(DB_ENV *, int)
+FUNCARG void (*func0) __P((DB_ENV *, int))
+END
+BEGIN env_remove RETCODE
+ARG ID DB_ENV * dbenv
+ARG STRING const char * home
+ARG INT u_int32_t flags
+END
+BEGIN set_shm_key NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT long shm_key
+END
+BEGIN set_tas_spins NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t tas_spins
+END
+BEGIN set_timeout NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t timeout
+ARG INT u_int32_t flags
+END
+BEGIN set_tmp_dir NOFUNC
+ARG ID DB_ENV * dbenv
+ARG STRING const char * dir
+END
+BEGIN set_tx_max NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t max
+END
+BEGIN set_tx_timestamp NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT time_t * max
+END
+BEGIN set_verbose NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t which
+ARG INT int onoff
+END
+#
+# Transaction functions
+#
+BEGIN txn_abort RETCODE
+ARG ID DB_TXN * txnp
+END
+BEGIN txn_begin RETCODE
+ARG ID DB_ENV * dbenv
+ARG ID DB_TXN * parent
+ARG IGNORE DB_TXN ** txnpp
+ARG INT u_int32_t flags
+RET ID long txnid
+END
+BEGIN txn_checkpoint NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t kbyte
+ARG INT u_int32_t min
+ARG INT u_int32_t flags
+END
+BEGIN txn_commit RETCODE
+ARG ID DB_TXN * txnp
+ARG INT u_int32_t flags
+END
+BEGIN txn_discard RETCODE
+ARG ID DB_TXN * txnp
+ARG INT u_int32_t flags
+END
+BEGIN txn_prepare CODE
+ARG ID DB_TXN * txnp
+ARG GID u_int8_t * gid
+END
+BEGIN txn_recover RETCODE
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_PREPLIST * preplist
+ARG INT long count
+ARG IGNORE long * retp
+ARG INT u_int32_t flags
+RET LIST DB_TXN * txn ID
+RET LIST u_int8_t * gid GID
+RET INT long retcount
+END
+BEGIN txn_stat NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_TXN_STAT ** statp
+ARG INT u_int32_t flags
+END
+BEGIN txn_timeout NOFUNC
+ARG ID DB_TXN * txnp
+ARG INT u_int32_t timeout
+ARG INT u_int32_t flags
+END
+#
+# Replication functions
+#
+BEGIN rep_elect NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT int nsites
+ARG INT int pri
+ARG INT u_int32_t timeout
+ARG IGNORE int * idp
+END
+BEGIN rep_flush NOFUNC
+ARG ID DB_ENV * dbenv
+END
+BEGIN rep_process_message NOFUNC
+ARG ID DB_ENV * dbenv
+ARG DBT DBT * rec
+ARG DBT DBT * control
+ARG IGNORE int * idp
+END
+BEGIN rep_set_limit NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t mbytes
+ARG INT u_int32_t bytes
+END
+BEGIN rep_set_request NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t min
+ARG INT u_int32_t max
+END
+BEGIN rep_set_rep_transport NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT int id
+FUNCPROT int (*)(DB_ENV *, const DBT *, const DBT *, int, u_int32_t)
+FUNCARG int (*func0) __P((DB_ENV *, const DBT *, const DBT *, int, u_int32_t))
+END
+BEGIN rep_start NOFUNC
+ARG ID DB_ENV * dbenv
+ARG DBT DBT * cdata
+ARG INT u_int32_t flags
+END
+BEGIN rep_stat NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_REP_STAT ** statp
+ARG INT u_int32_t flags
+END
+
+#
+# Database functions
+#
+BEGIN db_alloc NOFUNC
+ARG ID DB * dbp
+FUNCPROT void *(*)(size_t)
+FUNCARG void *(*func0) __P((size_t))
+FUNCPROT void *(*)(void *, size_t)
+FUNCARG void *(*func1) __P((void *, size_t))
+FUNCPROT void (*)(void *)
+FUNCARG void (*func2) __P((void *))
+END
+BEGIN db_associate CODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG ID DB * sdbp
+FUNCPROT int (*)(DB *, const DBT *, const DBT *, DBT *)
+FUNCARG int (*func0) __P((DB *, const DBT *, const DBT *, DBT *))
+ARG INT u_int32_t flags
+END
+BEGIN db_bt_compare NOFUNC
+ARG ID DB * dbp
+FUNCPROT int (*)(DB *, const DBT *, const DBT *)
+FUNCARG int (*func0) __P((DB *, const DBT *, const DBT *))
+END
+BEGIN db_bt_maxkey CODE
+ARG ID DB * dbp
+ARG INT u_int32_t maxkey
+END
+BEGIN db_bt_minkey CODE
+ARG ID DB * dbp
+ARG INT u_int32_t minkey
+END
+BEGIN db_bt_prefix NOFUNC
+ARG ID DB * dbp
+FUNCPROT size_t(*)(DB *, const DBT *, const DBT *)
+FUNCARG size_t (*func0) __P((DB *, const DBT *, const DBT *))
+END
+BEGIN db_set_append_recno NOFUNC
+ARG ID DB * dbp
+FUNCPROT int (*)(DB *, DBT *, db_recno_t)
+FUNCARG int (*func0) __P((DB *, DBT *, db_recno_t))
+END
+BEGIN db_cache_priority NOFUNC
+ARG ID DB * dbp
+ARG INT DB_CACHE_PRIORITY priority
+END
+BEGIN db_cachesize NOFUNC
+ARG ID DB * dbp
+ARG INT u_int32_t gbytes
+ARG INT u_int32_t bytes
+ARG INT int ncache
+END
+BEGIN db_close RETCODE
+ARG ID DB * dbp
+ARG INT u_int32_t flags
+END
+BEGIN db_create RETCODE
+ARG IGNORE DB * dbp
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t flags
+RET ID long db
+END
+BEGIN db_del CODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG DBT DBT * key
+ARG INT u_int32_t flags
+END
+BEGIN db_dup_compare NOFUNC
+ARG ID DB * dbp
+FUNCPROT int (*)(DB *, const DBT *, const DBT *)
+FUNCARG int (*func0) __P((DB *, const DBT *, const DBT *))
+END
+BEGIN db_encrypt CODE
+ARG ID DB * dbp
+ARG STRING const char * passwd
+ARG INT u_int32_t flags
+END
+BEGIN db_extentsize CODE
+ARG ID DB * dbp
+ARG INT u_int32_t extentsize
+END
+BEGIN db_fd NOFUNC
+ARG ID DB * dbp
+ARG IGNORE int * fdp
+END
+BEGIN db_feedback NOFUNC
+ARG ID DB * dbp
+FUNCPROT void (*)(DB *, int, int)
+FUNCARG void (*func0) __P((DB *, int, int))
+END
+BEGIN db_flags CODE
+ARG ID DB * dbp
+ARG INT u_int32_t flags
+END
+BEGIN db_get RETCODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG DBT DBT * key
+ARG DBT DBT * data
+ARG INT u_int32_t flags
+RET DBT DBT * key
+RET DBT DBT * data
+END
+BEGIN db_h_ffactor CODE
+ARG ID DB * dbp
+ARG INT u_int32_t ffactor
+END
+BEGIN db_h_hash NOFUNC
+ARG ID DB * dbp
+FUNCPROT u_int32_t(*)(DB *, const void *, u_int32_t)
+FUNCARG u_int32_t (*func0) __P((DB *, const void *, u_int32_t))
+END
+BEGIN db_h_nelem CODE
+ARG ID DB * dbp
+ARG INT u_int32_t nelem
+END
+BEGIN db_key_range RETCODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG DBT DBT * key
+ARG IGNORE DB_KEY_RANGE * range
+ARG INT u_int32_t flags
+RET DBL double less
+RET DBL double equal
+RET DBL double greater
+END
+BEGIN db_lorder CODE
+ARG ID DB * dbp
+ARG INT int lorder
+END
+# XXX
+# The line:
+# RET INT u_int32_t dbflags
+# should go away when a get_flags method exists. It is
+# needed now because Tcl looks at dbp->flags.
+#
+BEGIN db_open RETCODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG STRING const char * name
+ARG STRING const char * subdb
+ARG INT DBTYPE type
+ARG INT u_int32_t flags
+ARG INT int mode
+RET ID long db
+RET INT DBTYPE type
+RET INT u_int32_t dbflags
+RET INT int lorder
+END
+BEGIN db_pagesize CODE
+ARG ID DB * dbp
+ARG INT u_int32_t pagesize
+END
+BEGIN db_panic NOFUNC
+ARG ID DB * dbp
+FUNCPROT void (*)(DB_ENV *, int)
+FUNCARG void (*func0) __P((DB_ENV *, int))
+END
+BEGIN db_pget RETCODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG DBT DBT * skey
+ARG DBT DBT * pkey
+ARG DBT DBT * data
+ARG INT u_int32_t flags
+RET DBT DBT * skey
+RET DBT DBT * pkey
+RET DBT DBT * data
+END
+BEGIN db_put RETCODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG DBT DBT * key
+ARG DBT DBT * data
+ARG INT u_int32_t flags
+RET DBT DBT * key
+END
+BEGIN db_re_delim CODE
+ARG ID DB * dbp
+ARG INT int delim
+END
+BEGIN db_re_len CODE
+ARG ID DB * dbp
+ARG INT u_int32_t len
+END
+BEGIN db_re_pad CODE
+ARG ID DB * dbp
+ARG INT int pad
+END
+BEGIN db_re_source NOFUNC
+ARG ID DB * dbp
+ARG STRING const char * re_source
+END
+BEGIN db_remove RETCODE
+ARG ID DB * dbp
+ARG STRING const char * name
+ARG STRING const char * subdb
+ARG INT u_int32_t flags
+END
+BEGIN db_rename RETCODE
+ARG ID DB * dbp
+ARG STRING const char * name
+ARG STRING const char * subdb
+ARG STRING const char * newname
+ARG INT u_int32_t flags
+END
+BEGIN db_stat RETCODE
+ARG ID DB * dbp
+ARG IGNORE void * sp
+ARG INT u_int32_t flags
+RET LIST u_int32_t * stats INT
+END
+BEGIN db_sync CODE
+ARG ID DB * dbp
+ARG INT u_int32_t flags
+END
+BEGIN db_truncate RETCODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG IGNORE u_int32_t * countp
+ARG INT u_int32_t flags
+RET INT u_int32_t count
+END
+BEGIN db_upgrade NOFUNC
+ARG ID DB * dbp
+ARG STRING const char * fname
+ARG INT u_int32_t flags
+END
+BEGIN db_verify NOFUNC
+ARG ID DB * dbp
+ARG STRING const char * fname
+ARG STRING const char * subdb
+ARG IGNORE FILE * outfile
+ARG INT u_int32_t flags
+END
+#
+# Cursor functions
+#
+BEGIN db_cursor RETCODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG IGNORE DBC ** dbcpp
+ARG INT u_int32_t flags
+RET ID long dbcid
+END
+BEGIN db_join RETCODE
+ARG ID DB * dbp
+ARG LIST DBC ** curs ID
+ARG IGNORE DBC ** dbcp
+ARG INT u_int32_t flags
+RET ID long dbcid
+END
+BEGIN dbc_close RETCODE
+ARG ID DBC * dbc
+END
+BEGIN dbc_count RETCODE
+ARG ID DBC * dbc
+ARG IGNORE db_recno_t * countp
+ARG INT u_int32_t flags
+RET INT db_recno_t dupcount
+END
+BEGIN dbc_del CODE
+ARG ID DBC * dbc
+ARG INT u_int32_t flags
+END
+BEGIN dbc_dup RETCODE
+ARG ID DBC * dbc
+ARG IGNORE DBC ** dbcp
+ARG INT u_int32_t flags
+RET ID long dbcid
+END
+BEGIN dbc_get RETCODE
+ARG ID DBC * dbc
+ARG DBT DBT * key
+ARG DBT DBT * data
+ARG INT u_int32_t flags
+RET DBT DBT * key
+RET DBT DBT * data
+END
+BEGIN dbc_pget RETCODE
+ARG ID DBC * dbc
+ARG DBT DBT * skey
+ARG DBT DBT * pkey
+ARG DBT DBT * data
+ARG INT u_int32_t flags
+RET DBT DBT * skey
+RET DBT DBT * pkey
+RET DBT DBT * data
+END
+BEGIN dbc_put RETCODE
+ARG ID DBC * dbc
+ARG DBT DBT * key
+ARG DBT DBT * data
+ARG INT u_int32_t flags
+RET DBT DBT * key
+END
+
+#
+# Unsupported environment subsystems
+#
+#
+# Locking subsystem
+#
+BEGIN lock_detect NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t flags
+ARG INT u_int32_t atype
+ARG IGNORE int * aborted
+END
+BEGIN lock_get NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t locker
+ARG INT u_int32_t flags
+ARG CONST const DBT * obj
+ARG INT db_lockmode_t mode
+ARG IGNORE DB_LOCK * lock
+END
+BEGIN lock_id NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t * idp
+END
+BEGIN lock_id_free NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t id
+END
+BEGIN lock_put NOFUNC
+ARG ID DB_ENV * dbenv
+ARG ID DB_LOCK * lock
+END
+BEGIN lock_stat NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_LOCK_STAT ** statp
+ARG INT u_int32_t flags
+END
+BEGIN lock_vec NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t locker
+ARG INT u_int32_t flags
+ARG IGNORE DB_LOCKREQ * list
+ARG INT int nlist
+ARG IGNORE DB_LOCKREQ ** elistp
+END
+#
+# Logging subsystem
+#
+BEGIN log_archive NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE char *** listp
+ARG INT u_int32_t flags
+END
+BEGIN log_cursor NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_LOGC ** logcp
+ARG INT u_int32_t flags
+END
+#
+# Don't do log_compare. It doesn't have an env we can get at,
+# and it doesn't manipulate DB internal information.
+#
+BEGIN log_file NOFUNC
+ARG ID DB_ENV * dbenv
+ARG CONST const DB_LSN * lsn
+ARG STRING char * namep
+ARG INT size_t len
+END
+BEGIN log_flush NOFUNC
+ARG ID DB_ENV * dbenv
+ARG CONST const DB_LSN * lsn
+END
+BEGIN log_put NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_LSN * lsn
+ARG DBT const DBT * data
+ARG INT u_int32_t flags
+END
+BEGIN log_stat NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_LOG_STAT ** statp
+ARG INT u_int32_t flags
+END
+#
+# Mpool Subsystem
+#
+BEGIN memp_fcreate NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_MPOOLFILE ** mpf
+ARG IGNORE u_int32_t flags
+END
+BEGIN memp_register NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT int ftype
+FUNCPROT int (*)(DB_ENV *, db_pgno_t, void *, DBT *)
+FUNCARG int (*func0) __P((DB_ENV *, db_pgno_t, void *, DBT *))
+FUNCPROT int (*)(DB_ENV *, db_pgno_t, void *, DBT *)
+FUNCARG int (*func1) __P((DB_ENV *, db_pgno_t, void *, DBT *))
+END
+BEGIN memp_stat NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_MPOOL_STAT ** gstatp
+ARG IGNORE DB_MPOOL_FSTAT *** fstatp
+ARG INT u_int32_t flags
+END
+BEGIN memp_sync NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_LSN * lsn
+END
+BEGIN memp_trickle NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT int pct
+ARG IGNORE int * nwrotep
+END
diff --git a/libdb/tcl/docs/db.html b/libdb/tcl/docs/db.html
new file mode 100644
index 0000000..4f04c2c
--- /dev/null
+++ b/libdb/tcl/docs/db.html
@@ -0,0 +1,263 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 3.3-RELEASE i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<H2>
+<A NAME="Database Commands"></A>Database Commands</H2>
+The database commands provide a fairly straightforward mapping to the
+DB method functions.
+
+<P>
+<B>> berkdb open</B>
+<dl>
+
+<dt><B>[-btcompare <I>proc</I>]</B><dd>
+Sets the Btree comparison function to the Tcl procedure named
+<I>proc</I> using the
+<A HREF="../../docs/api_c/db_set_bt_compare.html">DB->set_bt_compare</A>
+method.
+
+<dt><B>[-btree|-hash|-recno|-queue|-unknown]</B><dd>
+</td><td>
+Select the database type:<br>
+DB_BTREE, DB_HASH, DB_RECNO, DB_QUEUE or DB_UNKNOWN.
+
+
+<dt><B>[-cachesize {<I>gbytes bytes ncaches</I>}]</B><dd>
+Sets the size of the database cache to the size specified by
+<I>gbytes</I> and <I>bytes</I>, broken up into <I>ncaches</I> number of
+caches using the
+<A HREF="../../docs/api_c/db_set_cachesize.html">DB->set_cachesize</A>
+method.
+
+<dt><B>[-create]</B><dd>
+Selects the DB_CREATE flag to create underlying files.
+
+<dt><B>[-delim <I>delim</I>]</B><dd>
+Sets the delimiting byte for variable length records to <I>delim</I>
+using the
+<A HREF="../../docs/api_c/db_set_re_delim.html">DB->set_re_delim</A>
+method.
+
+<dt><B>[-dup]</B><dd>
+Selects the DB_DUP flag to permit duplicates in the database.
+
+<dt><B>[-dupcompare <I>proc</I>]</B><dd>
+Sets the duplicate data comparison function to the Tcl procedure named
+<I>proc</I> using the
+<A HREF="../../docs/api_c/db_set_dup_compare.html">DB->set_dup_compare</A>
+method.
+
+<dt><B>[-dupsort]</B><dd>
+Selects the DB_DUPSORT flag to support sorted duplicates.
+
+<dt><B>[-env <I>env</I>]</B><dd>
+The database environment.
+
+<dt><B>[-errfile <I>filename</I>]</B><dd>
+Specifies the error file to use for this environment to <I>filename</I>
+by calling
+<A HREF="../../docs/api_c/db_set_errfile.html">DB->set_errfile</A>.
+If the file already exists then we will append to the end of the file.
+
+<dt><B>[-excl]</B><dd>
+Selects the DB_EXCL flag to exclusively create underlying files.
+
+<dt><B>[-extent <I>size</I>]</B><dd>
+Sets the size of a Queue database extent to the given <I>size</I> using
+the
+<A HREF="../../docs/api_c/db_set_q_extentsize.html">DB->set_q_extentsize</A>
+method.
+
+<dt><B>[-ffactor <I>density</I>]</B><dd>
+Sets the hash table key density to the given <I>density</I> using the
+<A HREF="../../docs/api_c/db_set_h_ffactor.html">DB->set_h_ffactor</A>
+method.
+
+<dt><B>[-hashproc <I>proc</I>]</B><dd>
+Sets a user-defined hash function to the Tcl procedure named <I>proc</I>
+using the
+<A HREF="../../docs/api_c/db_set_h_hash.html">DB->set_h_hash</A> method.
+
+<dt><B>[-len <I>len</I>]</B><dd>
+Sets the length of fixed-length records to <I>len</I> using the
+<A HREF="../../docs/api_c/db_set_re_len.html">DB->set_re_len</A>
+method.
+
+<dt><B>[-lorder <I>order</I>]</B><dd>
+Sets the byte order for integers stored in the database meta-data to
+the given <I>order</I> using the
+<A HREF="../../docs/api_c/db_set_lorder.html">DB->set_lorder</A>
+method.
+
+<dt><B>[-minkey <I>minkey</I>]</B><dd>
+Sets the minimum number of keys per Btree page to <I>minkey</I> using
+the
+<A HREF="../../docs/api_c/db_set_bt_minkey.html">DB->set_bt_minkey</A>
+method.
+
+<dt><B>[-mode <I>mode</I>]</B><dd>
+Specifies the mode for created files.
+
+<dt><B>[-nelem <I>size</I>]</B><dd>
+Sets the hash table size estimate to the given <I>size</I> using the
+<A HREF="../../docs/api_c/db_set_h_nelem.html">DB->set_h_nelem</A>
+method.
+
+<dt><B>[-nommap]</B><dd>
+Selects the DB_NOMMAP flag to forbid mmaping of files.
+
+<dt><B>[-pad <I>pad</I>]</B><dd>
+Sets the pad character used for fixed length records to <I>pad</I> using
+the
+<A HREF="../../docs/db_set_re_pad.html">DB->set_re_pad</A> method.
+
+<dt><B>[-pagesize <I>pagesize</I>]</B><dd>
+Sets the size of the database page to <I>pagesize</I> using the
+<A HREF="../../docs/api_c/db_set_pagesize.html">DB->set_pagesize</A>
+method.
+
+<dt><B>[-rdonly]</B><dd>
+Selects the DB_RDONLY flag for opening in read-only mode.
+
+<dt><B>[-recnum]</B><dd>
+Selects the DB_RECNUM flag to support record numbers in Btrees.
+
+<dt><B>[-renumber]</B><dd>
+Selects the DB_RENUMBER flag to support mutable record numbers.
+
+<dt><B>[-revsplitoff]</B><dd>
+Selects the DB_REVSPLITOFF flag to suppress reverse splitting of pages
+on deletion.
+
+<dt><B>[-snapshot]</B><dd>
+Selects the DB_SNAPSHOT flag to support database snapshots.
+
+<dt><B>[-source <I>file</I>]</B><dd>
+Sets the backing source file name to <I>file</I> using the
+<A HREF="../../docs/api_c/db_set_re_source.html">DB->set_re_source</A>
+method.
+
+<dt><B>[-truncate]</B><dd>
+Selects the DB_TRUNCATE flag to truncate the database.
+
+<dt><B>[--]</B><dd>
+Terminate the list of options and use remaining arguments as the file
+or subdb names (thus allowing the use of filenames beginning with a dash
+'-').
+
+<dt><B>[<I>filename </I>[<I>subdbname</I>]]</B><dd>
+The names of the database and sub-database.
+</dl>
+
+<HR WIDTH="100%">
+<B>> berkdb upgrade [-dupsort] [-env <I>env</I>] [--] [<I>filename</I>]</B>
+<P>This command will invoke the <A HREF="../../docs/api_c/db_upgrade.html">DB->upgrade</A>
+function.&nbsp; If the command is given the <B>-env</B> option, then we
+will accordingly upgrade the database filename within the context of that
+environment. The <B>-dupsort</B> option selects the DB_DUPSORT flag for
+upgrading. The use of --<B> </B>terminates the list of options, thus allowing
+filenames beginning with a dash.
+<P>
+
+<HR WIDTH="100%">
+<B>> berkdb verify [-env <I>env</I>] [--] [<I>filename</I>]</B>
+<P>This command will invoke the <A HREF="../../docs/api_c/db_verify.html">DB->verify</A>
+function.&nbsp; If the command is given the <B>-env</B> option, then we
+will accordingly verify the database filename within the context of that
+environment.&nbsp; The use of --<B> </B>terminates the list of options,
+thus allowing filenames beginning with a dash.
+<P>
+
+<HR WIDTH="100%"><B>> <I>db</I> del</B>
+<P>There are no undocumented options.
+
+<HR WIDTH="100%">
+<B>> <I>db</I> join [-nosort] <I>db0.c0 db1.c0</I> ...</B>
+<P>This command will invoke the <A HREF="../../docs/api_c/db_join.html">db_join</A>
+function.&nbsp; After it successfully joins a database, we bind it to a
+new Tcl command of the form <B><I>dbN.cX, </I></B>where X is an integer
+starting at 0 (e.g. <B>db2.c0, db3.c0, </B>etc).&nbsp; We use the <I>Tcl_CreateObjCommand()&nbsp;</I>
+to create the top level database function.&nbsp; It is through this cursor
+handle that the user can access the joined data items.
+<P>The options are:
+<UL>
+<LI>
+<B>-nosort -</B> This flag causes DB not to sort the cursors based on the
+number of data items they reference.&nbsp; It results in the DB_JOIN_NOSORT
+flag being set.</LI>
+</UL>
+
+<P>
+This command will invoke the
+<A HREF="../../docs/api_c/db_create.html">db_create</A> function. If
+the command is given the <B>-env</B> option, then we will accordingly
+creating the database within the context of that environment. After it
+successfully gets a handle to a database, we bind it to a new Tcl
+command of the form <B><I>dbX, </I></B>where X is an integer starting
+at 0 (e.g. <B>db0, db1, </B>etc).
+
+<p>
+We use the <I>Tcl_CreateObjCommand()</I> to create the top level
+database function. It is through this handle that the user can access
+all of the commands described in the <A HREF="#Database Commands">
+Database Commands</A> section. Internally, the database handle
+is sent as the <I>ClientData</I> portion of the new command set so that
+all future database calls access the appropriate handle.
+
+<P>
+After parsing all of the optional arguments affecting the setup of the
+database and making the appropriate calls to DB to manipulate those
+values, we open the database for the user. It translates to the
+<A HREF="../../docs/api_c/db_open.html">DB->open</A> method call after
+parsing all of the various optional arguments. We automatically set the
+DB_THREAD flag. The arguments are:
+
+<HR WIDTH="100%">
+<B>> <I>db</I> get_join [-nosort] {db key} {db key} ...</B>
+<P>This command performs a join operation on the keys specified and returns
+a list of the joined {key data} pairs.
+<P>The options are:
+<UL>
+<LI>
+<B>-nosort</B> This flag causes DB not to sort the cursors based on the
+number of data items they reference.&nbsp; It results in the DB_JOIN_NOSORT
+flag being set.</LI>
+</UL>
+
+<HR WIDTH="100%">
+<B>> <I>db</I> keyrange [-txn <I>id</I>] key</B>
+<P>This command returns the range for the given <B>key</B>.&nbsp; It returns
+a list of 3 double elements of the form {<B><I>less equal greater</I></B>}
+where <B><I>less</I></B> is the percentage of keys less than the given
+key, <B><I>equal</I></B> is the percentage equal to the given key and <B><I>greater</I></B>
+is the percentage greater than the given key.&nbsp; If the -txn option
+is specified it performs this operation under transaction protection.
+
+<HR WIDTH="100%"><B>> <I>db</I> put</B>
+<P>The <B>undocumented</B> options are:
+<dl>
+<dt><B>-nodupdata</B><dd>
+This flag causes DB not to insert the key/data pair if it already
+exists, that is, both the key and data items are already in the
+database. The -nodupdata flag may only be specified if the underlying
+database has been configured to support sorted duplicates.
+</dl>
+
+<HR WIDTH="100%"><B>> <I>dbc</I> put</B>
+<P>The <B>undocumented</B> options are:
+<dl>
+<dt><B>-nodupdata</B><dd>
+This flag causes DB not to insert the key/data pair if it already
+exists, that is, both the key and data items are already in the
+database. The -nodupdata flag may only be specified if the underlying
+database has been configured to support sorted duplicates.
+</dl>
+
+</BODY>
+</HTML>
diff --git a/libdb/tcl/docs/env.html b/libdb/tcl/docs/env.html
new file mode 100644
index 0000000..79c3498
--- /dev/null
+++ b/libdb/tcl/docs/env.html
@@ -0,0 +1,354 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+ <meta name="GENERATOR" content="Mozilla/4.75 [en] (X11; U; Linux 2.2.16-22 i686) [Netscape]">
+</head>
+<body>
+
+<h2>
+Environment Commands</h2>
+Environments provide a structure for creating a consistent environment
+for processes using one or more of the features of Berkeley DB.&nbsp; Unlike
+some of the database commands, the environment commands are very low level.
+<br>
+<hr WIDTH="100%">
+<p>The user may create and open a new DB environment&nbsp; by invoking:
+<p><b>> berkdb env</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-cdb] [-cdb_alldb] [-lock] [-log] [-txn [nosync]]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-create] [-home<i> directory</i>] [-mode <i>mode</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-data_dir <i>directory</i>] [-log_dir <i>directory</i>]
+[-tmp_dir <i>directory</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-nommap] [-private] [-recover] [-recover_fatal]
+[-system_mem] [-errfile <i>filename</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-use_environ] [-use_environ_root] [-verbose
+{<i>which </i>on|off}]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-region_init]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-cachesize {<i>gbytes bytes ncaches</i>}]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-mmapsize<i> size</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-log_max <i>max</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-log_buffer <i>size</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-lock_conflict {<i>nmodes </i>{<i>matrix</i>}}]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-lock_detect default|oldest|random|youngest]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-lock_max <i>max</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-lock_max_locks <i>max</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-lock_max_lockers <i>max</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-lock_max_objects <i>max</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-lock_timeout <i>timeout</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-overwrite]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-txn_max <i>max</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-txn_timeout <i>timeout</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-client_timeout <i>seconds</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-server_timeout <i>seconds</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-server <i>hostname</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-rep_master] [-rep_client]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-rep_transport <i>{ machineid sendproc }</i>]</b>
+<br>&nbsp;
+<p>This command opens up an environment.&nbsp;&nbsp; We automatically set
+the DB_THREAD and the DB_INIT_MPOOL flags.&nbsp; The arguments are:
+<ul>
+<li>
+<b>-cdb</b> selects the DB_INIT_CDB flag for Concurrent Data Store</li>
+
+<li>
+<b>-cdb_alldb</b> selects the DB_CDB_ALLDB flag for Concurrent Data Store</li>
+
+<li>
+<b>-lock</b> selects the DB_INIT_LOCK flag for the locking subsystem</li>
+
+<li>
+<b>-log</b> selects the DB_INIT_LOG flag for the logging subsystem</li>
+
+<li>
+<b>-txn</b> selects the DB_INIT_TXN, DB_INIT_LOCK and DB_INIT_LOG flags
+for the transaction subsystem.&nbsp; If <b>nosync</b> is specified, then
+it will also select DB_TXN_NOSYNC to indicate no flushes of log on commits</li>
+
+<li>
+<b>-create </b>selects the DB_CREATE flag to create underlying files</li>
+
+<li>
+<b>-home <i>directory </i></b>selects the home directory of the environment</li>
+
+<li>
+<b>-data_dir <i>directory </i></b>selects the data file directory of the
+environment by calling <a href="../../docs/api_c/env_set_data_dir.html">DBENV->set_data_dir</a>.</li>
+
+<li>
+<b>-log_dir <i>directory </i></b>selects the log file directory of the
+environment&nbsp; by calling <a href="../../docs/api_c/env_set_lg_dir.html">DBENV->set_lg_dir</a>.</li>
+
+<li>
+<b>-tmp_dir <i>directory </i></b>selects the temporary file directory of
+the environment&nbsp; by calling <a href="../../docs/api_c/env_set_tmp_dir.so">DBENV->set_tmp_dir</a>.</li>
+
+<li>
+<b>-mode <i>mode </i></b>sets the permissions of created files to <b><i>mode</i></b></li>
+
+<li>
+<b>-nommap</b> selects the DB_NOMMAP flag to disallow using mmap'ed files</li>
+
+<li>
+<b>-private</b> selects the DB_PRIVATE flag for a private environment</li>
+
+<li>
+<b>-recover</b> selects the DB_RECOVER flag for recovery</li>
+
+<li>
+<b>-recover_fatal</b> selects the DB_RECOVER_FATAL flag for catastrophic
+recovery</li>
+
+<li>
+<b>-system_mem</b> selects the DB_SYSTEM_MEM flag to use system memory</li>
+
+<li>
+<b>-errfile </b>specifies the error file to use for this environment to
+<b><i>filename</i></b>
+by calling <a href="../../docs/api_c/env_set_errfile.html">DBENV->set_errfile</a><b><i>.
+</i></b>If
+the file already exists then we will append to the end of the file</li>
+
+<li>
+<b>-use_environ</b> selects the DB_USE_ENVIRON flag to affect file naming</li>
+
+<li>
+<b>-use_environ_root</b> selects the DB_USE_ENVIRON_ROOT flag to have the
+root environment affect file naming</li>
+
+<li>
+<b>-verbose</b> produces verbose error output for the given which subsystem,
+using the <a href="../../docs/api_c/dbenv_set_verbose.html">DBENV->set_verbose</a>
+method.&nbsp;&nbsp; See the description of <a href="#> <env> verbose which on|off">verbose</a>
+below for valid <b><i>which </i></b>values</li>
+
+<li>
+<b>-region_init </b>specifies that the user wants to page fault the region
+in on startup using the <a href="../../docs/api_c/env_set_region_init.html">DBENV->set_region_init</a>
+method call</li>
+
+<li>
+<b>-cachesize </b>sets the size of the database cache to the size&nbsp;
+specified by <b><i>gbytes </i></b>and <b><i>bytes, </i></b>broken up into
+<b><i>ncaches</i></b>
+number of caches using the <a href="../../docs/api_c/env_set_cachesize.html">DBENV->set_cachesize</a>
+method</li>
+
+<li>
+<b>-mmapsize </b>sets the size of the database page to <b><i>size </i></b>using
+the <a href="../../docs/api_c/env_set_mp_mmapsize.html">DBENV->set_mp_mmapsize</a>
+method</li>
+
+<li>
+<b>-log_max </b>sets the maximum size of the log file to <b><i>max</i></b>
+using the <a href="../../docs/api_c/env_set_lg_max.html">DBENV->set_lg_max</a>
+call</li>
+
+<li>
+<b>-log_regionmax </b>sets the size of the log region to <b><i>max</i></b>
+using the <a href="../../docs/api_c/env_set_lg_regionmax.html">DBENV->set_lg_regionmax</a>
+call</li>
+
+<li>
+<b>-log_buffer </b>sets the size of the log file in bytes to <b><i>size</i></b>
+using the <a href="../../docs/api_c/env_set_lg_bsize.html">DBENV->set_lg_bsize</a>
+call</li>
+
+<li>
+<b>-lock_conflict </b>sets the number of lock modes to <b><i>nmodes</i></b>
+and sets the locking policy for those modes to the <b><i>conflict_matrix</i></b>
+given using the <a href="../../docs/api_c/env_set_lk_conflict.html">DBENV->set_lk_conflict</a>
+method call</li>
+
+<li>
+<b>-lock_detect </b>sets the deadlock detection policy to the given policy
+using the <a href="../../docs/env_set_lk_detect.html">DBENV->set_lk_detect</a>
+method call.&nbsp; The policy choices are:</li>
+
+<ul>
+<li>
+<b>default</b> selects the DB_LOCK_DEFAULT policy for default detection</li>
+
+<li>
+<b>oldest </b>selects DB_LOCK_OLDEST to abort the oldest locker on a deadlock</li>
+
+<li>
+<b>random</b> selects DB_LOCK_RANDOM to abort a random locker on a deadlock</li>
+
+<li>
+<b>youngest</b> selects DB_LOCK_YOUNGEST to abort the youngest locker on
+a deadlock</li>
+</ul>
+
+<li>
+<b>-lock_max </b>sets the maximum size of the lock table to <b><i>max </i></b>using
+the <a href="../../docs/api_c/env_set_lk_max.html">DBENV->set_lk_max</a>
+method call</li>
+
+<li>
+<b>-lock_max_locks </b>sets the maximum number of locks to <b><i>max </i></b>using
+the <a href="../../docs/api_c/env_set_lk_max_locks.html">DBENV->set_lk_max_locks</a>
+method call</li>
+
+<li>
+<b>-lock_max_lockers </b>sets the maximum number of locking entities to
+<b><i>max
+</i></b>using the <a href="../../docs/api_c/env_set_lk_max_lockers.html">DBENV->set_lk_max_lockers</a>
+method call</li>
+
+<li>
+<b>-lock_max_objects </b>sets the maximum number of simultaneously locked
+objects to <b><i>max </i></b>using the <a href="../../docs/api_c/env_set_lk_max_objects.html">DBENV->set_lk_max_objects</a>
+method call</li>
+
+<li>
+<b>-lock_timeout </b>sets the timeout for locks in the environment</li>
+
+<li>
+<b>-overwrite </b>sets DB_OVERWRITE flag</li>
+
+<li>
+<b>-txn_max </b>sets the maximum size of the transaction table to <b><i>max</i></b>
+using the <a href="../../docs/api_c/env_set_txn_max.html">DBENV->set_txn_max</a>
+method call</li>
+
+<li>
+<b>-txn_timeout </b>sets the timeout for transactions in the environment</li>
+
+<li>
+<b>-client_timeout</b> sets the timeout value for the client waiting for
+a reply from the server for RPC operations to <b><i>seconds</i></b>.</li>
+
+<li>
+<b>-server_timeout</b> sets the timeout value for the server to determine
+an idle client is gone to <b><i>seconds</i></b>.</li>
+
+<li>
+<b>-server </b>specifies the <b><i>hostname</i></b> of the server
+to connect to in the <a href="../../docs/api_c/env_set_server.html">DBENV->set_server</a>
+call.</li>
+
+<li>
+<b>-rep_client </b>sets the newly created environment to be a
+replication client, using the <a href="../../docs/api_c/rep_client.html">
+DBENV->rep_client</a> call.</li>
+
+<li>
+<b>-rep_master </b>sets the newly created environment to be a
+replication master, using the <a href="../../docs/api_c/rep_master.html">
+DBENV->rep_master</a> call.</li>
+
+<li>
+<b>-rep_transport </b>specifies the replication transport function,
+using the
+<a href="../../docs/api_c/rep_transport.html">DBENV->set_rep_transport</a>
+call. This site's machine ID is set to <b><i>machineid</i></b> and
+the send function, a Tcl proc, is set to <b><i>sendproc</i></b>.</li>
+
+</ul>
+
+This command will invoke the <a href="../../docs/api_c/env_create.html">db_env_create</a>
+function.&nbsp; After it successfully gets a handle to an environment,
+we bind it to a new Tcl command of the form <b><i>envX</i></b>, where X
+is an integer starting at&nbsp; 0 (e.g. <b>env0, env1, </b>etc).&nbsp;
+We use the <i>Tcl_CreateObjCommand()</i> to create the top level environment
+command function.&nbsp; It is through this handle that the user can access
+all the commands described in the <a href="#Environment Commands">Environment
+Commands</a> section.&nbsp; Internally, the handle we get back from DB
+will be stored as the <i>ClientData</i> portion of the new command set
+so that all future environment calls will have that handle readily available.&nbsp;
+Then we call the <a href="../../docs/api_c/env_open.html">DBENV->open</a>
+method call and possibly some number of setup calls as described above.
+<p>
+<hr WIDTH="100%">
+<br><a NAME="> <env> verbose which on|off"></a><b>> &lt;env> verbose <i>which</i>
+on|off</b>
+<p>This command controls the use of debugging output for the environment.&nbsp;
+This command directly translates to a call to the <a href="../../docs/api_c/dbenv_set_verbose.html">DBENV->set_verbose</a>
+method call.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.&nbsp; The user specifies
+<b><i>which</i></b>
+subsystem to control, and indicates whether debug messages should be turned
+<b>on</b>
+or <b>off</b> for that subsystem.&nbsp; The value of <b><i>which</i></b>
+must be one of the following:
+<ul>
+<li>
+<b>chkpt</b> - Chooses the checkpointing code by using the DB_VERB_CHKPOINT
+value</li>
+
+<li>
+<b>deadlock </b>- Chooses the deadlocking code by using the DB_VERB_DEADLOCK
+value</li>
+
+<li>
+<b>recovery </b>- Chooses the recovery code by using the DB_VERB_RECOVERY
+value</li>
+
+<li>
+<b>wait </b>- Chooses the waitsfor code by using the DB_VERB_WAITSFOR value</li>
+</ul>
+
+<hr WIDTH="100%">
+<p><a NAME="> <env> close"></a><b>> &lt;env> close</b>
+<p>This command closes an environment and deletes the handle.&nbsp; This
+command directly translates to a call to the <a href="../../docs/api_c/env_close.html">DBENV->close</a>
+method call.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.
+<p>Additionally, since the handle is no longer valid, we will call <i>Tcl_DeleteCommand()
+</i>so
+that further uses of the handle will be dealt with properly by Tcl itself.
+<p>Also, the close command will automatically abort any <a href="txn.html">transactions</a>
+and close any <a href="mpool.html">mpool</a> memory files.&nbsp; As such
+we must maintain a list of open transaction and mpool handles so that we
+can call <i>Tcl_DeleteCommand</i> on those as well.
+<p>
+<hr WIDTH="100%">
+
+<b>> berkdb envremove<br>
+[-data_dir <i>directory</i>]<br>
+[-force]<br>
+[-home <i>directory</i>]<br>
+[-log_dir <i>directory</i>]<br>
+[-overwrite]<br>
+[-tmp_dir <i>directory</i>]<br>
+[-use_environ]<br>
+[-use_environ_root]</b>
+
+<p>This command removes the environment if it is not in use and deletes
+the handle.&nbsp; This command directly translates to a call to the <a href="../../docs/api_c/env_remove.html">DBENV->remove</a>
+method call.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.&nbsp; The arguments are:
+<ul>
+<li>
+<b>-force</b> selects the DB_FORCE flag to remove even if other processes
+have the environment open</li>
+
+<li>
+<b>-home <i>directory</i> </b>specifies the home directory of the environment</li>
+
+<li>
+<b>-data_dir <i>directory </i></b>selects the data file directory of the
+environment by calling <a href="../../docs/api_c/env_set_data_dir.html">DBENV->set_data_dir</a>.</li>
+
+<li>
+<b>-log_dir <i>directory </i></b>selects the log file directory of the
+environment&nbsp; by calling <a href="../../docs/api_c/env_set_lg_dir.html">DBENV->set_lg_dir</a>.</li>
+
+<li>
+<b>-overwrite </b>sets DB_OVERWRITE flag</li>
+
+<li>
+<b>-tmp_dir <i>directory </i></b>selects the temporary file directory of
+the environment&nbsp; by calling <a href="../../docs/api_c/env_set_tmp_dir.so">DBENV->set_tmp_dir</a>.</li>
+
+<li>
+<b>-use_environ </b>selects the DB_USE_ENVIRON flag to affect file naming</li>
+
+<li>
+<b>-use_environ_root</b> selects the DB_USE_ENVIRON_ROOT flag to affect
+file naming</li>
+</ul>
+
+</body>
+</html>
diff --git a/libdb/tcl/docs/historic.html b/libdb/tcl/docs/historic.html
new file mode 100644
index 0000000..85f474f
--- /dev/null
+++ b/libdb/tcl/docs/historic.html
@@ -0,0 +1,169 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 2.2.8-19990120-SNAP i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<H2>
+<A NAME="Compatibility Commands"></A>Compatibility Commands</H2>
+The compatibility commands for old Dbm and Ndbm are described in the <A HREF="../../docs/api_c/dbm.html">dbm</A>
+manpage.
+<P><B>> berkdb dbminit <I>filename</I></B>
+<P>This command will invoke the dbminit function.&nbsp;&nbsp; <B><I>Filename</I></B>
+is used as the name of the database.
+<P>
+<HR WIDTH="100%"><B>> berkdb dbmclose</B>
+<P>This command will invoke the dbmclose function.
+<P>
+<HR WIDTH="100%"><B>> berkdb fetch <I>key</I></B>
+<P>This command will invoke the fetch function.&nbsp;&nbsp; It will return
+the data associated with the given <B><I>key </I></B>or a Tcl error.
+<P>
+<HR WIDTH="100%"><B>> berkdb store <I>key data</I></B>
+<P>This command will invoke the store function.&nbsp;&nbsp; It will store
+the <B><I>key/data</I></B> pair.&nbsp; It will return a 0 on success or
+throw a Tcl error.
+<P>
+<HR WIDTH="100%"><B>> berkdb delete <I>key</I></B>
+<P>This command will invoke the deletet function.&nbsp;&nbsp; It will delete
+the <B><I>key</I></B> from the database.&nbsp; It will return a 0 on success
+or throw a Tcl error.
+<P>
+<HR WIDTH="100%"><B>> berkdb firstkey</B>
+<P>This command will invoke the firstkey function.&nbsp;&nbsp; It will
+return the first key in the database or a Tcl error.
+<P>
+<HR WIDTH="100%"><B>> berkdb nextkey <I>key</I></B>
+<P>This command will invoke the nextkey function.&nbsp;&nbsp; It will return
+the next key after the given <B><I>key</I></B> or a Tcl error.
+<P>
+<HR WIDTH="100%"><B>> berkdb hcreate <I>nelem</I></B>
+<P>This command will invoke the hcreate function with <B><I>nelem</I></B>
+elements.&nbsp; It will return a 0 on success or a Tcl error.
+<P>
+<HR WIDTH="100%"><B>> berkdb hsearch <I>key data action</I></B>
+<P>This command will invoke the hsearch function with <B><I>key</I></B>
+and <B><I>data</I></B>.&nbsp; The <B><I>action</I></B> must be either <B>find</B>
+or <B>enter</B>.&nbsp; If it is <B>find</B>, it will return the resultant
+data.&nbsp; If it is <B>enter</B>, it will return a 0 on success or a Tcl
+error.
+<P>
+<HR WIDTH="100%"><B>> berkdb hdestroy</B>
+<P>This command will invoke the hdestroy function.&nbsp; It will return
+a 0.
+<HR WIDTH="100%"><B>> berkdb ndbm_open [-create] [-rdonly] [-truncate]
+[-mode
+<I>mode</I>] [--] <I>filename</I></B>
+<P>This command will invoke the dbm_open function.&nbsp;&nbsp;&nbsp; After
+it successfully gets a handle to a database, we bind it to a new Tcl command
+of the form <B><I>ndbmX, </I></B>where X is an integer starting at 0 (e.g.
+<B>ndbm0,
+ndbm1, </B>etc).&nbsp; We use the <I>Tcl_CreateObjCommand()&nbsp;</I> to
+create the top level database function.&nbsp; It is through this handle
+that the user can access all of the commands described below.&nbsp; Internally,
+the database handle is sent as the <I>ClientData</I> portion of the new
+command set so that all future database calls access the appropriate handle.
+<P>The arguments are:
+<UL>
+<LI>
+<B>-- </B>- Terminate the list of options and use remaining arguments as
+the file or subdb names (thus allowing the use of filenames beginning with
+a dash '-')</LI>
+
+<LI>
+<B>-create</B> selects the O_CREAT flag&nbsp; to create underlying files</LI>
+
+<LI>
+<B>-rdonly</B> selects the O_RDONLY flag for opening in read-only mode</LI>
+
+<LI>
+<B>-truncate</B> selects the O_TRUNC flag to truncate the database</LI>
+
+<LI>
+<B>-mode<I> mode</I></B> specifies the mode for created files</LI>
+
+<LI>
+<B><I>filename</I></B> indicates the name of the database</LI>
+</UL>
+
+<P><BR>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> close</B>
+<P>This command closes the database and renders the handle invalid.&nbsp;&nbsp;
+This command directly translates to the dbm_close function call.&nbsp;
+It returns either a 0 (for success),&nbsp; or it throws a Tcl error with
+a system message.
+<P>Additionally, since the handle is no longer valid, we will call <I>Tcl_DeleteCommand()
+</I>so
+that further uses of the handle will be dealt with properly by Tcl itself.&nbsp;
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> clearerr</B>
+<P>This command clears errors&nbsp; the database.&nbsp;&nbsp; This command
+directly translates to the dbm_clearerr function call.&nbsp; It returns
+either a 0 (for success),&nbsp; or it throws a Tcl error with a system
+message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> delete <I>key</I></B>
+<P>This command deletes the <B><I>key</I></B> from thedatabase.&nbsp;&nbsp;
+This command directly translates to the dbm_delete function call.&nbsp;
+It returns either a 0 (for success),&nbsp; or it throws a Tcl error with
+a system message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> dirfno</B>
+<P>This command directly translates to the dbm_dirfno function call.&nbsp;
+It returns either resultts,&nbsp; or it throws a Tcl error with a system
+message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> error</B>
+<P>This command returns the last error.&nbsp;&nbsp; This command directly
+translates to the dbm_error function call.&nbsp; It returns an error string..
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> fetch <I>key</I></B>
+<P>This command gets the given <B><I>key</I></B> from the database.&nbsp;&nbsp;
+This command directly translates to the dbm_fetch function call.&nbsp;
+It returns either the data,&nbsp; or it throws a Tcl error with a system
+message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> firstkey</B>
+<P>This command returns the first key in the database.&nbsp;&nbsp; This
+command directly translates to the dbm_firstkey function call.&nbsp; It
+returns either the key,&nbsp; or it throws a Tcl error with a system message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> nextkey</B>
+<P>This command returns the next key in the database.&nbsp;&nbsp; This
+command directly translates to the dbm_nextkey function call.&nbsp; It
+returns either the key,&nbsp; or it throws a Tcl error with a system message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> pagfno</B>
+<P>This command directly translates to the dbm_pagfno function call.&nbsp;
+It returns either resultts,&nbsp; or it throws a Tcl error with a system
+message.
+<BR>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> rdonly</B>
+<P>This command changes the database to readonly.&nbsp;&nbsp; This command
+directly translates to the dbm_rdonly function call.&nbsp; It returns either
+a 0 (for success),&nbsp; or it throws a Tcl error with a system message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> store <I>key data </I>insert|replace</B>
+<P>This command puts the given <B><I>key</I></B> and <B><I>data</I></B>
+pair into the database.&nbsp;&nbsp; This command directly translates to
+the dbm_store function call.&nbsp; It will either <B>insert</B> or <B>replace</B>
+the data based on the action given in the third argument.&nbsp; It returns
+either a 0 (for success),&nbsp; or it throws a Tcl error with a system
+message.
+<BR>
+<HR WIDTH="100%">
+</BODY>
+</HTML>
diff --git a/libdb/tcl/docs/index.html b/libdb/tcl/docs/index.html
new file mode 100644
index 0000000..845b6ca
--- /dev/null
+++ b/libdb/tcl/docs/index.html
@@ -0,0 +1,51 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 2.2.8-19990120-SNAP i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<CENTER>
+<H1>
+Complete Tcl Interface for Berkeley DB</H1></CENTER>
+
+<UL type=disc>
+<LI>
+<A HREF="../../docs/api_tcl/tcl_index.html">General use Berkeley DB commands</A></LI>
+</UL>
+
+<UL type=disc>
+<LI>
+<A HREF="./env.html">Environment commands</A></LI>
+
+<LI>
+<A HREF="./lock.html">Locking commands</A></LI>
+
+<LI>
+<A HREF="./log.html">Logging commands</A></LI>
+
+<LI>
+<A HREF="./mpool.html">Memory Pool commands</A></LI>
+
+<LI>
+<A HREF="./rep.html">Replication commands</A></LI>
+
+<LI>
+<A HREF="./txn.html">Transaction commands</A></LI>
+</UL>
+
+<UL>
+<LI>
+<A HREF="./db.html">Access Method commands</A></LI>
+
+<LI>
+<A HREF="./test.html">Debugging and Testing</A></LI>
+
+<LI>
+<A HREF="./historic.html">Compatibility commands</A></LI>
+
+<LI>
+<A HREF="./library.html">Convenience commands</A></LI>
+</UL>
diff --git a/libdb/tcl/docs/library.html b/libdb/tcl/docs/library.html
new file mode 100644
index 0000000..bfb1588
--- /dev/null
+++ b/libdb/tcl/docs/library.html
@@ -0,0 +1,27 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 2.2.8-19990120-SNAP i386) [Netscape]">
+</HEAD>
+<BODY>
+<HR WIDTH="100%">
+<H2>
+<A NAME="Convenience Commands"></A>Convenience Commands</H2>
+The convenience commands are provided for ease of use with the DB test
+suite.
+<P><B>> berkdb rand</B>
+<P>This command will invoke the rand function and return the random number.
+<P>
+<HR WIDTH="100%"><B>> berkdb random_int <I>low high</I></B>
+<P>This command will invoke the rand function and return a number between
+<B><I>low</I></B>
+and <B><I>high</I></B>.
+<P>
+<HR WIDTH="100%">
+<P><B>> berkdb srand <I>seed</I></B>
+<P>This command will invoke the srand function with the given <B><I>seed</I></B>
+and return 0.
+<P>
+<HR WIDTH="100%">
diff --git a/libdb/tcl/docs/lock.html b/libdb/tcl/docs/lock.html
new file mode 100644
index 0000000..d65142b
--- /dev/null
+++ b/libdb/tcl/docs/lock.html
@@ -0,0 +1,207 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+ <meta name="GENERATOR" content="Mozilla/4.75 [en] (X11; U; Linux 2.2.16-22 i686) [Netscape]">
+</head>
+<body>
+
+<h2>
+<a NAME="Locking Commands"></a>Locking Commands</h2>
+Most locking commands work with the environment handle.&nbsp; However,
+when a user gets a lock we create a new lock handle that they then use
+with in a similar manner to all the other handles to release the lock.&nbsp;
+We present the general locking functions first, and then those that manipulate
+locks.
+<p><b>> &lt;env> lock_detect [default|oldest|youngest|random]</b>
+<p>This command runs the deadlock detector.&nbsp; It directly translates
+to the <a href="../../docs/api_c/lock_detect.html">lock_detect</a> DB call.&nbsp;
+It returns either a 0 (for success), a DB error message or it throws a
+Tcl error with a system message.&nbsp; The first argument sets the policy
+for deadlock as follows:
+<ul>
+<li>
+<b>default</b> selects the DB_LOCK_DEFAULT policy for default detection
+(default if not specified)</li>
+
+<li>
+<b>oldest </b>selects DB_LOCK_OLDEST to abort the oldest locker on a deadlock</li>
+
+<li>
+<b>random</b> selects DB_LOCK_RANDOM to abort a random locker on a deadlock</li>
+
+<li>
+<b>youngest</b> selects DB_LOCK_YOUNGEST to abort the youngest locker on
+a deadlock</li>
+</ul>
+
+<hr WIDTH="100%">
+<br><b>> &lt;env> lock_stat</b>
+<p>This command returns a list of name/value pairs where the names correspond
+to the C-structure field names of DB_LOCK_STAT and the values are the data
+returned.&nbsp; This command is a direct translation of the <a href="../../docs/api_c/lock_stat.html">lock_stat</a>
+DB call.
+<hr WIDTH="100%">
+<br><a NAME="> <env> lock_id"></a><b>> &lt;env> lock_id</b>
+<p>This command returns a unique locker ID value.&nbsp; It directly translates
+to the <a href="../../docs/api_c/lock_id.html">lock_id</a> DB call.
+<br>
+<hr WIDTH="100%">
+<br><a NAME="> <env> lock_id"></a><b>> &lt;env> lock_id_free&nbsp; </b><i>locker</i>
+<p>This command frees the locker allockated by the lock_id call. It directly
+translates to the&nbsp; <a href="../../docs/api_c/lock_id.html">lock_id_free
+</a>DB
+call.
+<hr WIDTH="100%">
+<br><a NAME="> <env> lock_id"></a><b>> &lt;env> lock_id_set&nbsp; </b><i>current
+max</i>
+<p>This&nbsp; is a diagnostic command to set the locker id that will get
+allocated next and the maximum id that
+<br>will trigger the id reclaim algorithm.
+<hr WIDTH="100%">
+<br><a NAME="> <env> lock_get"></a><b>> &lt;env> lock_get [-nowait]<i>lockmode
+locker obj</i></b>
+<p>This command gets a lock. It will invoke the <a href="../../docs/api_c/lock_get.html">lock_get</a>
+function.&nbsp; After it successfully gets a handle to a lock, we bind
+it to a new Tcl command of the form <b><i>$env.lockX</i></b>, where X is
+an integer starting at&nbsp; 0 (e.g. <b>$env.lock0, $env.lock1, </b>etc).&nbsp;
+We use the <i>Tcl_CreateObjCommand()</i> to create the top level locking
+command function.&nbsp; It is through this handle that the user can release
+the lock.&nbsp; Internally, the handle we get back from DB will be stored
+as the <i>ClientData</i> portion of the new command set so that future
+locking calls will have that handle readily available.
+<p>The arguments are:
+<ul>
+<li>
+<b><i>locker</i></b> specifies the locker ID returned from the <a href="#> <env> lock_id">lock_id</a>
+command</li>
+
+<li>
+<b><i>obj</i></b> specifies an object to lock</li>
+
+<li>
+the <b><i>lock mode</i></b> is specified as one of the following:</li>
+
+<ul>
+<li>
+<b>ng </b>specifies DB_LOCK_NG for not granted (always 0)</li>
+
+<li>
+<b>read</b> specifies DB_LOCK_READ for a read (shared) lock</li>
+
+<li>
+<b>write</b> specifies DB_LOCK_WRITE for an exclusive write lock</li>
+
+<li>
+<b>iwrite </b>specifies DB_LOCK_IWRITE for intent for exclusive write lock</li>
+
+<li>
+<b>iread </b>specifies DB_LOCK_IREAD for intent for shared read lock</li>
+
+<li>
+<b>iwr </b>specifies DB_LOCK_IWR for intent for eread and write lock</li>
+</ul>
+
+<li>
+<b>-nowait</b> selects the DB_LOCK_NOWAIT to indicate that we do not want
+to wait on the lock</li>
+</ul>
+
+<hr WIDTH="100%">
+<br><b>> &lt;lock> put</b>
+<p>This command releases the lock referenced by the command.&nbsp; It is
+a direct translation of the <a href="../../docs/api_c/lock_put.html">lock_put</a>
+function.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.&nbsp; Additionally, since
+the handle is no longer valid, we will call
+<i>Tcl_DeleteCommand()
+</i>so
+that further uses of the handle will be dealt with properly by Tcl itself.
+<br>
+<hr WIDTH="100%">
+<br><a NAME="> <env> lock_vec"></a><b>> &lt;env> lock_vec [-nowait] <i>locker
+</i>{get|put|put_all|put_obj
+[<i>obj</i>] [<i>lockmode</i>] [<i>lock</i>]} ...</b>
+<p>This command performs a series of lock calls.&nbsp; It is a direct translation
+of the <a href="../../docs/api_c/lock_vec.html">lock_vec</a> function.&nbsp;
+This command will return a list of the return values from each operation
+specified in the argument list.&nbsp; For the 'put' operations the entry
+in the return value list is either a 0 (for success) or an error.&nbsp;
+For the 'get' operation, the entry is the lock widget handle, <b>$env.lockN</b>
+(as described above in <a href="#> <env> lock_get">&lt;env> lock_get</a>)
+or an error.&nbsp; If an error occurs, the return list will contain the
+return values for all the successful operations up the erroneous one and
+the error code for that operation.&nbsp; Subsequent operations will be
+ignored.
+<p>As for the other operations, if we are doing a 'get' we will create
+the commands and if we are doing a 'put' we will have to delete the commands.&nbsp;
+Additionally, we will have to do this after the call to the DB lock_vec
+and iterate over the results, creating and/or deleting Tcl commands.&nbsp;
+It is possible that we may return a lock widget from a get operation that
+is considered invalid, if, for instance, there was a <b>put_all</b> operation
+performed later in the vector of operations.&nbsp; The arguments are:
+<ul>
+<li>
+<b><i>locker</i></b> specifies the locker ID returned from the <a href="#> <env> lock_id">lock_id</a>
+command</li>
+
+<li>
+<b>-nowait</b> selects the DB_LOCK_NOWAIT to indicate that we do not want
+to wait on the lock</li>
+
+<li>
+the lock vectors are tuple consisting of {an operation, lock object, lock
+mode, lock handle} where what is required is based on the operation desired:</li>
+
+<ul>
+<li>
+<b>get</b> specifes DB_LOCK_GET to get a lock.&nbsp; Requires a tuple <b>{get
+<i>objmode</i>}
+</b>where
+<b><i>mode</i></b>
+is:</li>
+
+<ul>
+<li>
+<b>ng </b>specifies DB_LOCK_NG for not granted (always 0)</li>
+
+<li>
+<b>read</b> specifies DB_LOCK_READ for a read (shared) lock</li>
+
+<li>
+<b>write</b> specifies DB_LOCK_WRITE for an exclusive write lock</li>
+
+<li>
+<b>iwrite </b>specifies DB_LOCK_IWRITE for intent for exclusive write lock</li>
+
+<li>
+<b>iread </b>specifies DB_LOCK_IREAD for intent for shared read lock</li>
+
+<li>
+<b>iwr </b>specifies DB_LOCK_IWR for intent for eread and write lock</li>
+</ul>
+
+<li>
+<b>put</b> specifies DB_LOCK_PUT to release a <b><i>lock</i></b>.&nbsp;
+Requires a tuple <b>{put <i>lock}</i></b></li>
+
+<li>
+<b>put_all </b>specifies DB_LOCK_PUT_ALL to release all locks held by <b><i>locker</i></b>.&nbsp;
+Requires a tuple <b>{put_all}</b></li>
+
+<li>
+<b>put_obj</b> specifies DB_LOCK_PUT_OBJ to release all locks held by <b><i>locker</i></b>
+associated with the given <b><i>obj</i></b>.&nbsp; Requires a tuple <b>{put_obj
+<i>obj}</i></b></li>
+</ul>
+</ul>
+
+<hr WIDTH="100%">
+<br><a NAME="> <env> lock_vec"></a><b>> &lt;env> lock_timeout <i>timeout</i></b>
+<p>This command sets the lock timeout for all future locks in this environment.&nbsp;
+The timeout is in micorseconds.
+<br>&nbsp;
+<br>&nbsp;
+</body>
+</html>
diff --git a/libdb/tcl/docs/log.html b/libdb/tcl/docs/log.html
new file mode 100644
index 0000000..49f2f0a
--- /dev/null
+++ b/libdb/tcl/docs/log.html
@@ -0,0 +1,124 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 3.3-RELEASE i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<H2>
+<A NAME="Logging Commands"></A>Logging Commands</H2>
+Logging commands work from the environment handle to control the use of
+the log files.&nbsp; Log files are opened when the environment is opened
+and closed when the environment is closed.&nbsp; In all of the commands
+in the logging subsystem that take or return a log sequence number, it
+is of the form:
+<BR><B>{<I>fileid offset</I>}</B>
+<BR>where the <B><I>fileid</I></B> is an identifier of the log file, as
+returned from the <A HREF="#> <env> log_get">log_get</A> call.
+<P><B>> &lt;env> log_archive [-arch_abs] [-arch_data] [-arch_log]</B>
+<P>This command returns&nbsp; a list of log files that are no longer in
+use.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/log_archive.html">log_archive</A>
+function. The arguments are:
+<UL>
+<LI>
+<B>-arch_abs </B>selects DB_ARCH_ABS to return all pathnames as absolute
+pathnames</LI>
+
+<LI>
+<B>-arch_data </B>selects DB_ARCH_DATA to return a list of database files</LI>
+
+<LI>
+<B>-arch_log </B>selects DB_ARCH_LOG to return a list of log files</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><B>> &lt;env> log_compare <I>lsn1 lsn2</I></B>
+<P>This command compares two log sequence numbers, given as <B><I>lsn1</I></B>
+and <B><I>lsn2</I></B>.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/log_compare.html">log_compare</A>
+function.&nbsp; It will return a -1, 0, 1 to indicate if <B><I>lsn1</I></B>
+is less than, equal to or greater than <B><I>lsn2</I></B> respectively.
+<BR>
+<HR WIDTH="100%">
+<BR><B>> &lt;env> log_file <I>lsn</I></B>
+<P>This command returns&nbsp; the file name associated with the given <B><I>lsn</I></B>.&nbsp;
+It is a direct call to the <A HREF="../../docs/api_c/log_file.html">log_file</A>
+function.
+<BR>
+<HR WIDTH="100%">
+<BR><B>> &lt;env> log_flush [<I>lsn</I>]</B>
+<P>This command&nbsp; flushes the log up to the specified <B><I>lsn</I></B>
+or flushes all records if none is given&nbsp; It is a direct call to the
+<A HREF="../../docs/api_c/log_flush.html">log_flush</A>
+function.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.
+<BR>
+<HR WIDTH="100%">
+<BR><A NAME="<env> log_get"></A><B>> &lt;env> log_get<I> </I>[-checkpoint]
+[-current] [-first] [-last] [-next] [-prev] [-set <I>lsn</I>]</B>
+<P>This command retrieves a record from the log according to the <B><I>lsn</I></B>
+given and returns it and the data.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/log_get.html">log_get</A>
+function.&nbsp; It is a way of implementing a manner of log iteration similar
+to <A HREF="../../docs/api_tcl/db_cursor.html">cursors</A>.&nbsp;&nbsp;
+The information we return is similar to database information.&nbsp; We
+return a list where the first item is the LSN (which is a list itself)
+and the second item is the data.&nbsp; So it looks like, fully expanded,
+<B>{{<I>fileid</I>
+<I>offset</I>}
+<I>data</I>}.</B>&nbsp;
+In the case where DB_NOTFOUND is returned, we return an empty list <B>{}</B>.&nbsp;
+All other errors return a Tcl error.&nbsp; The arguments are:
+<UL>
+<LI>
+<B>-checkpoint </B>selects the DB_CHECKPOINT flag to return the LSN/data
+pair of the last record written through <A HREF="#> <env> log_put">log_put</A>
+with DB_CHECKPOINT specified</LI>
+
+<LI>
+<B>-current</B> selects the DB_CURRENT flag to return the current record</LI>
+
+<LI>
+<B>-first</B> selects the DB_FIRST flag to return the first record in the
+log.</LI>
+
+<LI>
+<B>-last </B>selects the DB_LAST flag to return the last record in the
+log.</LI>
+
+<LI>
+<B>-next</B> selects the DB_NEXT flag to return the next record in the
+log.</LI>
+
+<LI>
+<B>-prev </B>selects the DB_PREV flag to return the&nbsp; previous record
+in the log.</LI>
+
+<LI>
+<B>-set</B> selects the DB_SET flag to return the record specified by the
+given <B><I>lsn</I></B></LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><A NAME="> <env> log_put"></A><B>> &lt;env> log_put<I> </I>[-checkpoint]
+[-flush] <I>record</I></B>
+<P>This command stores a <B><I>record</I></B> into the log and returns
+the LSN of the log record.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/log_put.html">log_put</A>
+function.&nbsp; It returns either an LSN or it throws a Tcl error with
+a system message.&nbsp;<B> </B>The arguments are:
+<UL>
+<LI>
+<B>-checkpoint </B>selects the DB_CHECKPOINT flag</LI>
+
+<LI>
+<B>-flush </B>selects the DB_FLUSH flag to flush the log to disk.</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><B>> &lt;env> log_stat</B>
+<P>This command returns&nbsp; the statistics associated with the logging
+subsystem.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/log_stat.html">log_stat</A>
+function.&nbsp; It returns a list of name/value pairs of the DB_LOG_STAT
+structure.
+</BODY>
+</HTML>
diff --git a/libdb/tcl/docs/mpool.html b/libdb/tcl/docs/mpool.html
new file mode 100644
index 0000000..7f2359b
--- /dev/null
+++ b/libdb/tcl/docs/mpool.html
@@ -0,0 +1,190 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 2.2.8-19990120-SNAP i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<H2>
+<A NAME="Memory Pool Commands"></A>Memory Pool Commands</H2>
+Memory pools are used in a manner similar to the other subsystems.&nbsp;
+We create a handle to the pool and&nbsp; then use it for a variety of operations.&nbsp;
+Some of the memory pool commands use the environment instead. Those are
+presented first.
+<P><B>> &lt;env> mpool_stat</B>
+<P>This command returns&nbsp; the statistics associated with the memory
+pool subsystem.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/memp_stat.html">memp_stat</A>
+function.&nbsp; It returns a list of name/value pairs of the DB_MPOOL_STAT
+structure.
+<BR>
+<HR WIDTH="100%">
+<BR><B>> &lt;env> mpool_sync <I>lsn</I></B>
+<P>This command flushes the memory pool for all pages with a log sequence
+number less than <B><I>lsn</I></B>.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/memp_sync.html">memp_sync&nbsp;</A>
+function.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.
+<BR>
+<HR WIDTH="100%">
+<BR><B>> &lt;env> mpool_trickle <I>percent</I></B>
+<P>This command tells DB to ensure that at least <B><I>percent</I></B>
+percent of the pages are clean by writing out enough to dirty pages to
+achieve that percentage.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/memp_trickle.html">memp_trickle</A>
+function.&nbsp; The command will return the number of pages actually written.&nbsp;
+It returns either the number of pages on success, or it throws a Tcl error
+with a system message.
+<BR>
+<HR WIDTH="100%">
+<P><B>> &lt;env> mpool [-create] [-nommap] [-rdonly] [-mode <I>mode</I>]
+-pagesize <I>size</I> [<I>file</I>]</B>
+<P>This command creates a new memory pool.&nbsp; It invokes the <A HREF="../../docs/api_c/memp_fopen.html">memp_fopen</A>
+function.&nbsp; After it successfully gets a handle to a memory pool, we
+bind it to a new Tcl command of the form <B><I>$env.mpX</I></B>, where
+X is an integer starting at&nbsp; 0 (e.g. <B>$env.mp0, $env.mp1, </B>etc).&nbsp;
+We use the <I>Tcl_CreateObjCommand()</I> to create the top level memory
+pool functions.&nbsp; It is through this handle that the user can manipulate
+the pool.&nbsp; Internally, the handle we get back from DB will be stored
+as the <I>ClientData</I> portion of the new command set so that future
+memory pool calls will have that handle readily available.&nbsp; Additionally,
+we need to maintain this handle in relation to the environment so that
+if the user calls <A HREF="../../docs/api_tcl/env_close.html">&lt;env> close</A> without closing
+the memory pool we can properly clean up.&nbsp; The arguments are:
+<UL>
+<LI>
+<B><I>file</I></B> is the name of the file to open</LI>
+
+<LI>
+<B>-create </B>selects the DB_CREATE flag to create underlying file</LI>
+
+<LI>
+<B>-mode <I>mode </I></B>sets the permissions of created file to <B><I>mode</I></B></LI>
+
+<LI>
+<B>-nommap</B> selects the DB_NOMMAP flag to disallow using mmap'ed files</LI>
+
+<LI>
+<B>-pagesize</B> sets the underlying file page size to <B><I>size</I></B></LI>
+
+<LI>
+<B>-rdonly </B>selects the DB_RDONLY flag for read only access</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><B>> &lt;mp> close</B>
+<P>This command closes the memory pool.&nbsp; It is a direct call to the
+<A HREF="../../docs/api_c/memp_fclose.html">memp_close</A>
+function.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.
+<P>Additionally, since the handle is no longer valid, we will call
+<I>Tcl_DeleteCommand()
+</I>so
+that further uses of the handle will be dealt with properly by Tcl itself.&nbsp;
+We must also remove the reference to this handle from the environment.&nbsp;
+We will go through the list of pinned pages that were acquired by the <A HREF="#> <mp> get">get</A>
+command and
+<A HREF="#> <pg> put">put</A> them back.
+<HR WIDTH="100%">
+<BR><B>> &lt;mp> fsync</B>
+<P>This command flushes all of the file's dirty pages to disk.&nbsp; It
+is a direct call to the <A HREF="../../docs/api_c/memp_fsync.html">memp_fsync</A>
+function.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.
+<HR WIDTH="100%">
+<BR><A NAME="> <mp> get"></A><B>> &lt;mp> get [-create] [-last] [-new]
+[<I>pgno</I>]</B>
+<P>This command gets the&nbsp; <B><I>pgno </I></B>page from the memory
+pool.&nbsp; It invokes the <A HREF="../../docs/api_c/memp_fget.html">memp_fget</A>
+function and possibly the <A HREF="../../docs/api_c/memp_fset.html">memp_fset</A>
+function if any options are chosen to set the page characteristics.&nbsp;
+After it successfully gets a handle to a page,&nbsp; we bind it to and
+return a new Tcl command of the form <B><I>$env.mpN.pX</I></B>, where X
+is an integer starting at&nbsp; 0 (e.g. <B>$env.mp0.p0, $env.mp1.p0, </B>etc).&nbsp;
+We use the <I>Tcl_CreateObjCommand()</I> to create the top level page functions.&nbsp;
+It is through this handle that the user can manipulate the page.&nbsp;
+Internally, the handle we get back from DB will be stored as the <I>ClientData</I>
+portion of the new command set.&nbsp; We need to store this handle in&nbsp;
+relation to the memory pool handle so that if the memory pool is closed,
+we will <A HREF="#> <pg> put">put</A> back the pages (setting the discard
+flag) and delete that set of commands.
+<P>The arguments are:
+<UL>
+<LI>
+<B>-create </B>selects the DB_MPOOL_CREATE flag&nbsp; to create the page
+if it does not exist.</LI>
+
+<LI>
+<B>-last</B> selects the DB_MPOOL_LAST flag to return the last page in
+the file</LI>
+
+<LI>
+<B>-new</B> selects the DB_MPOOL_NEW flag to create a new page</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><B>> &lt;pg> pgnum</B>
+<P>This command returns the page number associated with this memory pool
+page.&nbsp; Primarily it will be used after an <A HREF="#> <mp> get">&lt;mp>
+get</A> call.
+<BR>
+<HR WIDTH="100%"><B>> &lt;pg> pgsize</B>
+<P>This command returns the page size associated with this memory pool
+page.&nbsp; Primarily it will be used after an <A HREF="#> <mp> get">&lt;mp>
+get</A> call.
+<BR>
+<HR WIDTH="100%"><B>> &lt;pg> set [-clean] [-dirty] [-discard]</B>
+<P>This command sets the characteristics of the page.&nbsp; It is a direct
+call to the <A HREF="../../docs/api_c/memp_fset.html">memp_fset</A> function.&nbsp;
+It returns either a 0 (for success), a DB error message or it throws a
+Tcl error with a system message.&nbsp; The arguments are:
+<UL>
+<LI>
+<B>-clean</B> selects the DB_MPOOL_CLEAN flag to indicate this is a clean
+page</LI>
+
+<LI>
+<B>-dirty</B> selects the DB_MPOOL_DIRTY flag to indicate this page should
+be flushed before eviction</LI>
+
+<LI>
+<B>-discard</B> selects the DB_MPOOL_DISCARD flag to indicate this page
+is unimportant</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><A NAME="> <pg> put"></A><B>> &lt;pg> put [-clean] [-dirty] [-discard]</B>
+<P>This command will put back the page to the memory pool.&nbsp; It is
+a direct call to the <A HREF="../../docs/api_c/memp_fput.html">memp_fput</A>
+function.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message. Additionally, since the
+handle is no longer valid, we will call
+<I>Tcl_DeleteCommand()
+</I>so that
+further uses of the handle will be dealt with properly by Tcl itself.&nbsp;
+We must also remove the reference to this handle from the memory pool.
+<P>The arguments are:
+<UL>
+<LI>
+<B>-clean</B> selects the DB_MPOOL_CLEAN flag to indicate this is a clean
+page</LI>
+
+<LI>
+<B>-dirty</B> selects the DB_MPOOL_DIRTY flag to indicate this page should
+be flushed before eviction</LI>
+
+<LI>
+<B>-discard</B> selects the DB_MPOOL_DISCARD flag to indicate this page
+is unimportant</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><B>> &lt;pg> init <I>val|string</I></B>
+<P>This command initializes the page to the <B><I>val</I></B> given or
+places the <B><I>string</I></B> given at the beginning of the page.&nbsp;
+It returns a 0 for success or it throws a Tcl error with an error message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;pg> is_setto <I>val|string</I></B>
+<P>This command verifies the page contains the <B><I>val</I></B> given
+or checks that the <B>string</B> given is at the beginning of the page.&nbsp;
+It returns a 1 if the page is correctly set to the value and a 0 otherwise.
diff --git a/libdb/tcl/docs/rep.html b/libdb/tcl/docs/rep.html
new file mode 100644
index 0000000..079fe44
--- /dev/null
+++ b/libdb/tcl/docs/rep.html
@@ -0,0 +1,51 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+ <title>Replication commands</title>
+</head>
+<body>
+
+<h2>
+<a NAME="Replication Commands"></a>Replication Commands</h2>
+Replication commands are invoked from the environment handle, after
+it has been opened with the appropriate flags defined
+<a href="./env.html">here</a>.<br>
+<hr WIDTH="100%">
+<p><b>> &lt;env> rep_process_message <i>machid</i> <i>control</i>
+<i>rec</i></b>
+<p>This command processes a single incoming replication message.&nbsp; It
+is a direct translation of the <a
+href="../../docs/api_c/rep_process_message.html">rep_process_message</a>
+function.&nbsp;
+It returns either a 0 (for success), a DB error message or it throws a
+Tcl error with a system message.&nbsp; The arguments are:
+<ul>
+<li>
+<b>machid </b>is the machine ID of the machine that <i>sent</i> this
+message.</li>
+
+<li>
+<b>control</b> is a binary string containing the exact contents of the
+<b><i>control</i></b> argument to the <b><i>sendproc</i></b> function
+that was passed this message on another site.</li>
+
+<li>
+<b>rec</b> is a binary string containing the exact contents of the
+<b><i>rec</i></b> argument to the <b><i>sendproc</i></b> function
+that was passed this message on another site.</li>
+</ul>
+
+<hr WIDTH="100%">
+<br><b>> &lt;env> rep_elect <i>nsites</i> <i>pri</i> <i>wait</i>
+<i>sleep</i></b>
+<p>This command causes a replication election.&nbsp; It is a direct translation
+of the <a href="../../docs/api_c/rep_elect.html">rep_elect</a> function.&nbsp;
+Its arguments, all integers, correspond exactly to that C function's
+parameters.
+It will return a list containing two integers, which contain,
+respectively, the integer values returned in the C function's
+<i><b>midp</b></i> and <i><b>selfp</b></i> parameters.
+</body>
+</html>
diff --git a/libdb/tcl/docs/test.html b/libdb/tcl/docs/test.html
new file mode 100644
index 0000000..603ae56
--- /dev/null
+++ b/libdb/tcl/docs/test.html
@@ -0,0 +1,150 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 2.2.8-19990120-SNAP i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<H2>
+<A NAME="Debugging"></A>Debugging and Testing</H2>
+We have imported the debugging system from the old test suite into the
+new interface to aid in debugging problems.&nbsp; There are several variables
+that are available both in gdb as globals to the C code, and variables
+in Tcl that the user can set.&nbsp; These variables are linked together
+so that changes in one venue are reflected in the other.&nbsp; The names
+of the variables have been modified a bit to reduce the likelihood
+<BR>of namespace trampling.&nbsp; We have added a double underscore to
+all the names.
+<P>The variables are all initialized to zero (0) thus resulting in debugging
+being turned off.&nbsp; The purpose of the debugging, fundamentally, is
+to allow the user to set a breakpoint prior to making a DB call.&nbsp;
+This breakpoint is set in the <I>__db_loadme() </I>function.&nbsp; The
+user may selectively turn on various debugging areas each controlled by
+a separate variable (note they all have two (2) underscores prepended to
+the name):
+<UL>
+<LI>
+<B>__debug_on</B> - Turns on the debugging system.&nbsp; This must be on
+for any debugging to occur</LI>
+
+<LI>
+<B>__debug_print - </B>Turns on printing a debug count statement on each
+call</LI>
+
+<LI>
+<B>__debug_test -</B> Hits the breakpoint in <I>__db_loadme</I> on the
+specific iteration</LI>
+
+<LI>
+<B>__debug_stop </B>- Hits the breakpoint in <I>__db_loadme</I> on every
+(or the next) iteration</LI>
+</UL>
+<B>Note to developers:</B>&nbsp; Anyone extending this interface must place
+a call to <B>_debug_check()</B> (no arguments) before every call into the
+DB library.
+<P>There is also a command available that will force a call to the _debug_check
+function.
+<P><B>> berkdb debug_check</B>
+<P>
+<HR WIDTH="100%">
+<BR>For testing purposes we have added several hooks into the DB library
+and a small interface into the environment and/or database commands to
+manipulate the hooks.&nbsp; This command interface and the hooks and everything
+that goes with it is only enabled when the test option is configured into
+DB.
+<P><B>> &lt;env> test copy <I>location</I></B>
+<BR><B>> &lt;db> test copy <I>location</I></B>
+<BR><B>> &lt;env> test abort <I>location</I></B>
+<BR><B>> &lt;db> test abort <I>location</I></B>
+<P>In order to test recovery we need to be able to abort the creation or
+deletion process at various points.&nbsp; Also we want to invoke a copy
+function to copy the database file(s)&nbsp; at various points as well so
+that we can obtain before/after snapshots of the databases.&nbsp; The interface
+provides the test command to specify a <B><I>location</I></B> where we
+wish to invoke a <B>copy</B> or an <B>abort</B>.&nbsp; The command is available
+from either the environment or the database for convenience.&nbsp; The
+<B><I>location</I></B>
+can be one of the following:
+<UL>
+<LI>
+<B>none -</B> Clears the location</LI>
+
+<LI>
+<B>preopen -</B> Sets the location prior to the __os_open call in the creation
+process</LI>
+
+<LI>
+<B>postopen</B> - Sets the location to immediately following the __os_open
+call in creation</LI>
+
+<LI>
+<B>postlogmeta</B> - Sets the location to immediately following the __db_log_page
+call to log the meta data in creation.&nbsp; Only valid for Btree.</LI>
+
+<LI>
+<B>postlog</B> - Sets the location to immediately following the last (or
+only) __db_log_page call in creation.</LI>
+
+<LI>
+<B>postsync</B> - Sets the location to immediately following the sync of
+the log page in creation.</LI>
+
+<LI>
+<B>prerename</B> - Sets the location prior to the __os_rename call in the
+deletion process.</LI>
+
+<LI>
+<B>postrename</B> - Sets the location to immediately following the __os_rename
+call in deletion</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><B>> &lt;env> mutex <I>mode nitems</I></B>
+<P>This command creates a mutex region for testing.&nbsp; It sets the mode
+of the region to <B><I>mode</I></B> and sets up for <B><I>nitems</I></B>
+number of mutex entries.&nbsp; After we successfully get a handle to a
+mutex we create a command of the form <B><I>$env.mutexX</I></B>, where
+X is an integer starting at&nbsp; 0 (e.g. <B>$env.mutex0, $env.mutex1,
+</B>etc).&nbsp;&nbsp;
+We use the <I>Tcl_CreateObjCommand()&nbsp;</I> to create the top level
+mutex function.&nbsp; It is through this handle that the user can access
+all of the commands described below.&nbsp; Internally, the mutex handle
+is sent as the <I>ClientData</I> portion of the new command set so that
+all future mutex calls access the appropriate handle.
+<P>
+<HR WIDTH="100%"><B>> &lt;mutex> close</B>
+<P>This command closes the mutex and renders the handle invalid.&nbsp;&nbsp;
+This command directly translates to the __db_r_detach function call.&nbsp;
+It returns either a 0 (for success),&nbsp; or it throws a Tcl error with
+a system message.
+<P>Additionally, since the handle is no longer valid, we will call <I>Tcl_DeleteCommand()
+</I>so
+that further uses of the handle will be dealt with properly by Tcl itself.&nbsp;
+<HR WIDTH="100%"><B>> &lt;mutex> get <I>id</I></B>
+<P>This command locks the mutex identified by <B><I>id</I></B>.&nbsp; It
+returns either a 0 (for success),&nbsp; or it throws a Tcl error with a
+system message.
+<BR>
+<HR WIDTH="100%"><B>> &lt;mutex> release <I>id</I></B>
+<P>This command releases the mutex identified by <B><I>id</I></B>.&nbsp;
+It returns either a 0 (for success),&nbsp; or it throws a Tcl error with
+a system message.
+<BR>
+<HR WIDTH="100%"><B>> &lt;mutex> getval <I>id</I></B>
+<P>This command gets the value stored for the mutex identified by <B><I>id</I></B>.&nbsp;
+It returns either the value,&nbsp; or it throws a Tcl error with a system
+message.
+<BR>
+<HR WIDTH="100%"><B>> &lt;mutex> setval <I>id val</I></B>
+<P>This command sets the value stored for the mutex identified by <B><I>id
+</I></B>to
+<B><I>val</I></B>.&nbsp;
+It returns either a 0 (for success),&nbsp; or it throws a Tcl error with
+a system message.
+<BR>
+<HR WIDTH="100%">
+<BR>&nbsp;
+</BODY>
+</HTML>
diff --git a/libdb/tcl/docs/txn.html b/libdb/tcl/docs/txn.html
new file mode 100644
index 0000000..07c88c0
--- /dev/null
+++ b/libdb/tcl/docs/txn.html
@@ -0,0 +1,67 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+ <meta name="GENERATOR" content="Mozilla/4.75 [en] (X11; U; Linux 2.2.16-22 i686) [Netscape]">
+</head>
+<body>
+
+<h2>
+<a NAME="Transaction Commands"></a>Transaction Commands</h2>
+Transactions are used in a manner similar to the other subsystems.&nbsp;
+We create a handle to the transaction and&nbsp; then use it for a variety
+of operations.&nbsp; Some of the transaction commands use the environment
+instead.&nbsp; Those are presented first.&nbsp; The transaction command
+handle returned is the handle used by the various commands that can be
+transaction protected, such as <a href="../../docs/api_tcl/db_cursor.html">cursors</a>.
+<br>
+<hr WIDTH="100%">
+<p><b>> &lt;env> txn_checkpoint [-kbyte <i>kb</i>] [-min <i>min</i>]</b>
+<p>This command causes a checkpoint of the transaction region.&nbsp; It
+is a direct translation of the <a href="../../docs/api_c/txn_checkpoint.html">txn_checkpoint
+</a>function.&nbsp;
+It returns either a 0 (for success), a DB error message or it throws a
+Tcl error with a system message.&nbsp; The arguments are:
+<ul>
+<li>
+<b>-kbyte </b>causes the checkpoint to occur only if <b><i>kb</i></b> kilobytes
+of log data has been written since the last checkpoint</li>
+
+<li>
+<b>-min</b> causes the checkpoint to occur only if <b><i>min</i></b> minutes
+have passed since the last checkpoint</li>
+</ul>
+
+<hr WIDTH="100%">
+<br><b>> &lt;env> txn_stat</b>
+<p>This command returns transaction statistics.&nbsp; It is a direct translation
+of the <a href="../../docs/api_c/txn_stat.html">txn_stat</a> function.&nbsp;
+It will return a list of name/value pairs that correspond to the DB_TXN_STAT
+structure.
+<hr WIDTH="100%">
+<br><b>> &lt;env> txn_id_set&nbsp;</b><i> current max</i>
+<p>This is a diagnosic command that sets the next transaction id to be
+allocated and the maximum transaction
+<br>id, which is the point at which the relcaimation algorthm is triggered.
+<hr WIDTH="100%">
+<br><b>>&nbsp; &lt;txn> id</b>
+<p>This command returns the transaction id.&nbsp; It is a direct call to
+the <a href="../../docs/api_c/txn_id.html">txn_id</a> function.&nbsp; The
+typical use of this identifier is as the <b><i>locker</i></b> value for
+the <a href="lock.html">lock_get</a> and <a href="lock.html">lock_vec</a>
+calls.
+<hr WIDTH="100%">
+<br><b>> &lt;txn> prepare</b>
+<p>This command initiates a two-phase commit.&nbsp; It is a direct call
+to the <a href="../../docs/api_c/txn_prepare.html">txn_prepare</a> function.&nbsp;
+It returns either a 0 (for success), a DB error message or it throws a
+Tcl error with a system message.
+<hr WIDTH="100%"><a NAME="> <env> lock_vec"></a><b>> &lt;env> txn_timeout
+<i>timeout</i></b>
+<p>This command sets thetransaction timeout for transactions started in
+the future in this environment.&nbsp; The timeout is in micorseconds.
+<br>&nbsp;
+<br>&nbsp;
+</body>
+</html>
diff --git a/libdb/tcl/tcl_compat.c b/libdb/tcl/tcl_compat.c
new file mode 100644
index 0000000..d2dda70
--- /dev/null
+++ b/libdb/tcl/tcl_compat.c
@@ -0,0 +1,746 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#if CONFIG_TEST
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#define DB_DBM_HSEARCH 1
+
+#include "db_int.h"
+#include "dbinc/tcl_db.h"
+
+/*
+ * bdb_HCommand --
+ * Implements h* functions.
+ *
+ * PUBLIC: int bdb_HCommand __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+ */
+int
+bdb_HCommand(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *hcmds[] = {
+ "hcreate",
+ "hdestroy",
+ "hsearch",
+ NULL
+ };
+ enum hcmds {
+ HHCREATE,
+ HHDESTROY,
+ HHSEARCH
+ };
+ static char *srchacts[] = {
+ "enter",
+ "find",
+ NULL
+ };
+ enum srchacts {
+ ACT_ENTER,
+ ACT_FIND
+ };
+ ENTRY item, *hres;
+ ACTION action;
+ int actindex, cmdindex, nelem, result, ret;
+ Tcl_Obj *res;
+
+ result = TCL_OK;
+ /*
+ * Get the command name index from the object based on the cmds
+ * defined above. This SHOULD NOT fail because we already checked
+ * in the 'berkdb' command.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], hcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum hcmds)cmdindex) {
+ case HHCREATE:
+ /*
+ * Must be 1 arg, nelem. Error if not.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "nelem");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &nelem);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = hcreate(nelem) == 0 ? 1: 0;
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "hcreate");
+ }
+ break;
+ case HHSEARCH:
+ /*
+ * 3 args for this. Error if different.
+ */
+ if (objc != 5) {
+ Tcl_WrongNumArgs(interp, 2, objv, "key data action");
+ return (TCL_ERROR);
+ }
+ item.key = Tcl_GetStringFromObj(objv[2], NULL);
+ item.data = Tcl_GetStringFromObj(objv[3], NULL);
+ if (Tcl_GetIndexFromObj(interp, objv[4], srchacts,
+ "action", TCL_EXACT, &actindex) != TCL_OK)
+ return (IS_HELP(objv[4]));
+ switch ((enum srchacts)actindex) {
+ case ACT_ENTER:
+ action = ENTER;
+ break;
+ default:
+ case ACT_FIND:
+ action = FIND;
+ break;
+ }
+ _debug_check();
+ hres = hsearch(item, action);
+ if (hres == NULL)
+ Tcl_SetResult(interp, "-1", TCL_STATIC);
+ else if (action == FIND)
+ Tcl_SetResult(interp, (char *)hres->data, TCL_STATIC);
+ else
+ /* action is ENTER */
+ Tcl_SetResult(interp, "0", TCL_STATIC);
+
+ break;
+ case HHDESTROY:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ (void)hdestroy();
+ res = Tcl_NewIntObj(0);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ *
+ * bdb_NdbmOpen --
+ * Opens an ndbm database.
+ *
+ * PUBLIC: #if DB_DBM_HSEARCH != 0
+ * PUBLIC: int bdb_NdbmOpen __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBM **));
+ * PUBLIC: #endif
+ */
+int
+bdb_NdbmOpen(interp, objc, objv, dbpp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DBM **dbpp; /* Dbm pointer */
+{
+ static char *ndbopen[] = {
+ "-create",
+ "-mode",
+ "-rdonly",
+ "-truncate",
+ "--",
+ NULL
+ };
+ enum ndbopen {
+ NDB_CREATE,
+ NDB_MODE,
+ NDB_RDONLY,
+ NDB_TRUNC,
+ NDB_ENDARG
+ };
+
+ u_int32_t open_flags;
+ int endarg, i, mode, optindex, read_only, result, ret;
+ char *arg, *db;
+
+ result = TCL_OK;
+ open_flags = 0;
+ endarg = mode = 0;
+ read_only = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the option name index from the object based on the args
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], ndbopen, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum ndbopen)optindex) {
+ case NDB_CREATE:
+ open_flags |= O_CREAT;
+ break;
+ case NDB_RDONLY:
+ read_only = 1;
+ break;
+ case NDB_TRUNC:
+ open_flags |= O_TRUNC;
+ break;
+ case NDB_MODE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-mode mode?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Don't need to check result here because
+ * if TCL_ERROR, the error message is already
+ * set up, and we'll bail out below. If ok,
+ * the mode is set and we go on.
+ */
+ result = Tcl_GetIntFromObj(interp, objv[i++], &mode);
+ break;
+ case NDB_ENDARG:
+ endarg = 1;
+ break;
+ } /* switch */
+
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+
+ /*
+ * Any args we have left, (better be 0, or 1 left) is a
+ * file name. If we have 0, then an in-memory db. If
+ * there is 1, a db name.
+ */
+ db = NULL;
+ if (i != objc && i != objc - 1) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? ?file?");
+ result = TCL_ERROR;
+ goto error;
+ }
+ if (i != objc)
+ db = Tcl_GetStringFromObj(objv[objc - 1], NULL);
+
+ /*
+ * When we get here, we have already parsed all of our args
+ * and made all our calls to set up the database. Everything
+ * is okay so far, no errors, if we get here.
+ *
+ * Now open the database.
+ */
+ if (read_only)
+ open_flags |= O_RDONLY;
+ else
+ open_flags |= O_RDWR;
+ _debug_check();
+ if ((*dbpp = dbm_open(db, open_flags, mode)) == NULL) {
+ ret = Tcl_GetErrno();
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db open");
+ goto error;
+ }
+ return (TCL_OK);
+
+error:
+ *dbpp = NULL;
+ return (result);
+}
+
+/*
+ * bdb_DbmCommand --
+ * Implements "dbm" commands.
+ *
+ * PUBLIC: #if DB_DBM_HSEARCH != 0
+ * PUBLIC: int bdb_DbmCommand
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST*, int, DBM *));
+ * PUBLIC: #endif
+ */
+int
+bdb_DbmCommand(interp, objc, objv, flag, dbm)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ int flag; /* Which db interface */
+ DBM *dbm; /* DBM pointer */
+{
+ static char *dbmcmds[] = {
+ "dbmclose",
+ "dbminit",
+ "delete",
+ "fetch",
+ "firstkey",
+ "nextkey",
+ "store",
+ NULL
+ };
+ enum dbmcmds {
+ DBMCLOSE,
+ DBMINIT,
+ DBMDELETE,
+ DBMFETCH,
+ DBMFIRST,
+ DBMNEXT,
+ DBMSTORE
+ };
+ static char *stflag[] = {
+ "insert", "replace",
+ NULL
+ };
+ enum stflag {
+ STINSERT, STREPLACE
+ };
+ datum key, data;
+ void *dtmp, *ktmp;
+ u_int32_t size;
+ int cmdindex, freedata, freekey, stindex, result, ret;
+ char *name, *t;
+
+ result = TCL_OK;
+ freekey = freedata = 0;
+ /*
+ * Get the command name index from the object based on the cmds
+ * defined above. This SHOULD NOT fail because we already checked
+ * in the 'berkdb' command.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], dbmcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ switch ((enum dbmcmds)cmdindex) {
+ case DBMCLOSE:
+ /*
+ * No arg for this. Error if different.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ if (flag == DBTCL_DBM)
+ ret = dbmclose();
+ else {
+ Tcl_SetResult(interp,
+ "Bad interface flag for command", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "dbmclose");
+ break;
+ case DBMINIT:
+ /*
+ * Must be 1 arg - file.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "file");
+ return (TCL_ERROR);
+ }
+ name = Tcl_GetStringFromObj(objv[2], NULL);
+ if (flag == DBTCL_DBM)
+ ret = dbminit(name);
+ else {
+ Tcl_SetResult(interp, "Bad interface flag for command",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "dbminit");
+ break;
+ case DBMFETCH:
+ /*
+ * 1 arg for this. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "key");
+ return (TCL_ERROR);
+ }
+ if ((ret = _CopyObjBytes(
+ interp, objv[2], &ktmp, &size, &freekey)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "dbm fetch");
+ goto out;
+ }
+ key.dsize = size;
+ key.dptr = (char *)ktmp;
+ _debug_check();
+ if (flag == DBTCL_DBM)
+ data = fetch(key);
+ else if (flag == DBTCL_NDBM)
+ data = dbm_fetch(dbm, key);
+ else {
+ Tcl_SetResult(interp,
+ "Bad interface flag for command", TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+ if (data.dptr == NULL ||
+ (ret = __os_malloc(NULL, data.dsize + 1, &t)) != 0)
+ Tcl_SetResult(interp, "-1", TCL_STATIC);
+ else {
+ memcpy(t, data.dptr, data.dsize);
+ t[data.dsize] = '\0';
+ Tcl_SetResult(interp, t, TCL_VOLATILE);
+ __os_free(NULL, t);
+ }
+ break;
+ case DBMSTORE:
+ /*
+ * 2 args for this. Error if different.
+ */
+ if (objc != 4 && flag == DBTCL_DBM) {
+ Tcl_WrongNumArgs(interp, 2, objv, "key data");
+ return (TCL_ERROR);
+ }
+ if (objc != 5 && flag == DBTCL_NDBM) {
+ Tcl_WrongNumArgs(interp, 2, objv, "key data action");
+ return (TCL_ERROR);
+ }
+ if ((ret = _CopyObjBytes(
+ interp, objv[2], &ktmp, &size, &freekey)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "dbm fetch");
+ goto out;
+ }
+ key.dsize = size;
+ key.dptr = (char *)ktmp;
+ if ((ret = _CopyObjBytes(
+ interp, objv[3], &dtmp, &size, &freedata)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "dbm fetch");
+ goto out;
+ }
+ data.dsize = size;
+ data.dptr = (char *)dtmp;
+ _debug_check();
+ if (flag == DBTCL_DBM)
+ ret = store(key, data);
+ else if (flag == DBTCL_NDBM) {
+ if (Tcl_GetIndexFromObj(interp, objv[4], stflag,
+ "flag", TCL_EXACT, &stindex) != TCL_OK)
+ return (IS_HELP(objv[4]));
+ switch ((enum stflag)stindex) {
+ case STINSERT:
+ flag = DBM_INSERT;
+ break;
+ case STREPLACE:
+ flag = DBM_REPLACE;
+ break;
+ }
+ ret = dbm_store(dbm, key, data, flag);
+ } else {
+ Tcl_SetResult(interp,
+ "Bad interface flag for command", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "store");
+ break;
+ case DBMDELETE:
+ /*
+ * 1 arg for this. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "key");
+ return (TCL_ERROR);
+ }
+ if ((ret = _CopyObjBytes(
+ interp, objv[2], &ktmp, &size, &freekey)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "dbm fetch");
+ goto out;
+ }
+ key.dsize = size;
+ key.dptr = (char *)ktmp;
+ _debug_check();
+ if (flag == DBTCL_DBM)
+ ret = delete(key);
+ else if (flag == DBTCL_NDBM)
+ ret = dbm_delete(dbm, key);
+ else {
+ Tcl_SetResult(interp,
+ "Bad interface flag for command", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "delete");
+ break;
+ case DBMFIRST:
+ /*
+ * No arg for this. Error if different.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ if (flag == DBTCL_DBM)
+ key = firstkey();
+ else if (flag == DBTCL_NDBM)
+ key = dbm_firstkey(dbm);
+ else {
+ Tcl_SetResult(interp,
+ "Bad interface flag for command", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (key.dptr == NULL ||
+ (ret = __os_malloc(NULL, key.dsize + 1, &t)) != 0)
+ Tcl_SetResult(interp, "-1", TCL_STATIC);
+ else {
+ memcpy(t, key.dptr, key.dsize);
+ t[key.dsize] = '\0';
+ Tcl_SetResult(interp, t, TCL_VOLATILE);
+ __os_free(NULL, t);
+ }
+ break;
+ case DBMNEXT:
+ /*
+ * 0 or 1 arg for this. Error if different.
+ */
+ _debug_check();
+ if (flag == DBTCL_DBM) {
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ if ((ret = _CopyObjBytes(
+ interp, objv[2], &ktmp, &size, &freekey)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "dbm fetch");
+ goto out;
+ }
+ key.dsize = size;
+ key.dptr = (char *)ktmp;
+ data = nextkey(key);
+ } else if (flag == DBTCL_NDBM) {
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ data = dbm_nextkey(dbm);
+ } else {
+ Tcl_SetResult(interp,
+ "Bad interface flag for command", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (data.dptr == NULL ||
+ (ret = __os_malloc(NULL, data.dsize + 1, &t)) != 0)
+ Tcl_SetResult(interp, "-1", TCL_STATIC);
+ else {
+ memcpy(t, data.dptr, data.dsize);
+ t[data.dsize] = '\0';
+ Tcl_SetResult(interp, t, TCL_VOLATILE);
+ __os_free(NULL, t);
+ }
+ break;
+ }
+out:
+ if (freedata)
+ (void)__os_free(NULL, dtmp);
+ if (freekey)
+ (void)__os_free(NULL, ktmp);
+ return (result);
+}
+
+/*
+ * ndbm_Cmd --
+ * Implements the "ndbm" widget.
+ *
+ * PUBLIC: int ndbm_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+ */
+int
+ndbm_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* DB handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *ndbcmds[] = {
+ "clearerr",
+ "close",
+ "delete",
+ "dirfno",
+ "error",
+ "fetch",
+ "firstkey",
+ "nextkey",
+ "pagfno",
+ "rdonly",
+ "store",
+ NULL
+ };
+ enum ndbcmds {
+ NDBCLRERR,
+ NDBCLOSE,
+ NDBDELETE,
+ NDBDIRFNO,
+ NDBERR,
+ NDBFETCH,
+ NDBFIRST,
+ NDBNEXT,
+ NDBPAGFNO,
+ NDBRDONLY,
+ NDBSTORE
+ };
+ DBM *dbp;
+ DBTCL_INFO *dbip;
+ Tcl_Obj *res;
+ int cmdindex, result, ret;
+
+ Tcl_ResetResult(interp);
+ dbp = (DBM *)clientData;
+ dbip = _PtrToInfo((void *)dbp);
+ result = TCL_OK;
+ if (objc <= 1) {
+ Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs");
+ return (TCL_ERROR);
+ }
+ if (dbp == NULL) {
+ Tcl_SetResult(interp, "NULL db pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (dbip == NULL) {
+ Tcl_SetResult(interp, "NULL db info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], ndbcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum ndbcmds)cmdindex) {
+ case NDBCLOSE:
+ _debug_check();
+ dbm_close(dbp);
+ (void)Tcl_DeleteCommand(interp, dbip->i_name);
+ _DeleteInfo(dbip);
+ res = Tcl_NewIntObj(0);
+ break;
+ case NDBDELETE:
+ case NDBFETCH:
+ case NDBFIRST:
+ case NDBNEXT:
+ case NDBSTORE:
+ result = bdb_DbmCommand(interp, objc, objv, DBTCL_NDBM, dbp);
+ break;
+ case NDBCLRERR:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbm_clearerr(dbp);
+ if (ret)
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "clearerr");
+ else
+ res = Tcl_NewIntObj(ret);
+ break;
+ case NDBDIRFNO:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbm_dirfno(dbp);
+ res = Tcl_NewIntObj(ret);
+ break;
+ case NDBPAGFNO:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbm_pagfno(dbp);
+ res = Tcl_NewIntObj(ret);
+ break;
+ case NDBERR:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbm_error(dbp);
+ Tcl_SetErrno(ret);
+ Tcl_SetResult(interp, Tcl_PosixError(interp), TCL_STATIC);
+ break;
+ case NDBRDONLY:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbm_rdonly(dbp);
+ if (ret)
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "rdonly");
+ else
+ res = Tcl_NewIntObj(ret);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+#endif /* CONFIG_TEST */
diff --git a/libdb/tcl/tcl_db.c b/libdb/tcl/tcl_db.c
new file mode 100644
index 0000000..12344cd
--- /dev/null
+++ b/libdb/tcl/tcl_db.c
@@ -0,0 +1,2421 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/tcl_db.h"
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static int tcl_DbAssociate __P((Tcl_Interp *,
+ int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbClose __P((Tcl_Interp *,
+ int, Tcl_Obj * CONST*, DB *, DBTCL_INFO *));
+static int tcl_DbDelete __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *, int));
+static int tcl_DbKeyRange __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbPut __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbStat __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbTruncate __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbCursor __P((Tcl_Interp *,
+ int, Tcl_Obj * CONST*, DB *, DBC **));
+static int tcl_DbJoin __P((Tcl_Interp *,
+ int, Tcl_Obj * CONST*, DB *, DBC **));
+static int tcl_DbGetjoin __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbCount __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_second_call __P((DB *, const DBT *, const DBT *, DBT *));
+
+/*
+ * _DbInfoDelete --
+ *
+ * PUBLIC: void _DbInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+ */
+void
+_DbInfoDelete(interp, dbip)
+ Tcl_Interp *interp;
+ DBTCL_INFO *dbip;
+{
+ DBTCL_INFO *nextp, *p;
+ /*
+ * First we have to close any open cursors. Then we close
+ * our db.
+ */
+ for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) {
+ nextp = LIST_NEXT(p, entries);
+ /*
+ * Check if this is a cursor info structure and if
+ * it is, if it belongs to this DB. If so, remove
+ * its commands and info structure.
+ */
+ if (p->i_parent == dbip && p->i_type == I_DBC) {
+ (void)Tcl_DeleteCommand(interp, p->i_name);
+ _DeleteInfo(p);
+ }
+ }
+ (void)Tcl_DeleteCommand(interp, dbip->i_name);
+ _DeleteInfo(dbip);
+}
+
+/*
+ *
+ * PUBLIC: int db_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+ *
+ * db_Cmd --
+ * Implements the "db" widget.
+ */
+int
+db_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* DB handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *dbcmds[] = {
+#if CONFIG_TEST
+ "keyrange",
+ "pget",
+ "rpcid",
+ "test",
+#endif
+ "associate",
+ "close",
+ "count",
+ "cursor",
+ "del",
+ "get",
+ "get_join",
+ "get_type",
+ "is_byteswapped",
+ "join",
+ "put",
+ "stat",
+ "sync",
+ "truncate",
+ NULL
+ };
+ enum dbcmds {
+#if CONFIG_TEST
+ DBKEYRANGE,
+ DBPGET,
+ DBRPCID,
+ DBTEST,
+#endif
+ DBASSOCIATE,
+ DBCLOSE,
+ DBCOUNT,
+ DBCURSOR,
+ DBDELETE,
+ DBGET,
+ DBGETJOIN,
+ DBGETTYPE,
+ DBSWAPPED,
+ DBJOIN,
+ DBPUT,
+ DBSTAT,
+ DBSYNC,
+ DBTRUNCATE
+ };
+ DB *dbp;
+ DBC *dbc;
+ DBTCL_INFO *dbip;
+ DBTCL_INFO *ip;
+ DBTYPE type;
+ Tcl_Obj *res;
+ int cmdindex, isswapped, result, ret;
+ char newname[MSG_SIZE];
+
+ Tcl_ResetResult(interp);
+ dbp = (DB *)clientData;
+ dbip = _PtrToInfo((void *)dbp);
+ memset(newname, 0, MSG_SIZE);
+ result = TCL_OK;
+ if (objc <= 1) {
+ Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs");
+ return (TCL_ERROR);
+ }
+ if (dbp == NULL) {
+ Tcl_SetResult(interp, "NULL db pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (dbip == NULL) {
+ Tcl_SetResult(interp, "NULL db info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], dbcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum dbcmds)cmdindex) {
+#if CONFIG_TEST
+ case DBKEYRANGE:
+ result = tcl_DbKeyRange(interp, objc, objv, dbp);
+ break;
+ case DBPGET:
+ result = tcl_DbGet(interp, objc, objv, dbp, 1);
+ break;
+ case DBRPCID:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ /*
+ * !!! Retrieve the client ID from the dbp handle directly.
+ * This is for testing purposes only. It is dbp-private data.
+ */
+ res = Tcl_NewLongObj(dbp->cl_id);
+ break;
+ case DBTEST:
+ result = tcl_EnvTest(interp, objc, objv, dbp->dbenv);
+ break;
+#endif
+ case DBASSOCIATE:
+ result = tcl_DbAssociate(interp, objc, objv, dbp);
+ break;
+ case DBCLOSE:
+ result = tcl_DbClose(interp, objc, objv, dbp, dbip);
+ break;
+ case DBDELETE:
+ result = tcl_DbDelete(interp, objc, objv, dbp);
+ break;
+ case DBGET:
+ result = tcl_DbGet(interp, objc, objv, dbp, 0);
+ break;
+ case DBPUT:
+ result = tcl_DbPut(interp, objc, objv, dbp);
+ break;
+ case DBCOUNT:
+ result = tcl_DbCount(interp, objc, objv, dbp);
+ break;
+ case DBSWAPPED:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbp->get_byteswapped(dbp, &isswapped);
+ res = Tcl_NewIntObj(isswapped);
+ break;
+ case DBGETTYPE:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbp->get_type(dbp, &type);
+ if (type == DB_BTREE)
+ res = Tcl_NewStringObj("btree", strlen("btree"));
+ else if (type == DB_HASH)
+ res = Tcl_NewStringObj("hash", strlen("hash"));
+ else if (type == DB_RECNO)
+ res = Tcl_NewStringObj("recno", strlen("recno"));
+ else if (type == DB_QUEUE)
+ res = Tcl_NewStringObj("queue", strlen("queue"));
+ else {
+ Tcl_SetResult(interp,
+ "db gettype: Returned unknown type\n", TCL_STATIC);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBSTAT:
+ result = tcl_DbStat(interp, objc, objv, dbp);
+ break;
+ case DBSYNC:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbp->sync(dbp, 0);
+ res = Tcl_NewIntObj(ret);
+ if (ret != 0) {
+ Tcl_SetObjResult(interp, res);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBCURSOR:
+ snprintf(newname, sizeof(newname),
+ "%s.c%d", dbip->i_name, dbip->i_dbdbcid);
+ ip = _NewInfo(interp, NULL, newname, I_DBC);
+ if (ip != NULL) {
+ result = tcl_DbCursor(interp, objc, objv, dbp, &dbc);
+ if (result == TCL_OK) {
+ dbip->i_dbdbcid++;
+ ip->i_parent = dbip;
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)dbc_Cmd,
+ (ClientData)dbc, NULL);
+ res =
+ Tcl_NewStringObj(newname, strlen(newname));
+ _SetInfoData(ip, dbc);
+ } else
+ _DeleteInfo(ip);
+ } else {
+ Tcl_SetResult(interp,
+ "Could not set up info", TCL_STATIC);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBJOIN:
+ snprintf(newname, sizeof(newname),
+ "%s.c%d", dbip->i_name, dbip->i_dbdbcid);
+ ip = _NewInfo(interp, NULL, newname, I_DBC);
+ if (ip != NULL) {
+ result = tcl_DbJoin(interp, objc, objv, dbp, &dbc);
+ if (result == TCL_OK) {
+ dbip->i_dbdbcid++;
+ ip->i_parent = dbip;
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)dbc_Cmd,
+ (ClientData)dbc, NULL);
+ res =
+ Tcl_NewStringObj(newname, strlen(newname));
+ _SetInfoData(ip, dbc);
+ } else
+ _DeleteInfo(ip);
+ } else {
+ Tcl_SetResult(interp,
+ "Could not set up info", TCL_STATIC);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBGETJOIN:
+ result = tcl_DbGetjoin(interp, objc, objv, dbp);
+ break;
+ case DBTRUNCATE:
+ result = tcl_DbTruncate(interp, objc, objv, dbp);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ * tcl_db_stat --
+ */
+static int
+tcl_DbStat(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ DB_BTREE_STAT *bsp;
+ DB_HASH_STAT *hsp;
+ DB_QUEUE_STAT *qsp;
+ void *sp;
+ Tcl_Obj *res, *flaglist, *myobjv[2];
+ DBTYPE type;
+ u_int32_t flag;
+ int result, ret;
+ char *arg;
+
+ result = TCL_OK;
+ flag = 0;
+
+ if (objc > 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-faststat?");
+ return (TCL_ERROR);
+ }
+
+ if (objc == 3) {
+ arg = Tcl_GetStringFromObj(objv[2], NULL);
+ if (strcmp(arg, "-faststat") == 0)
+ flag = DB_FAST_STAT;
+ else {
+ Tcl_SetResult(interp,
+ "db stat: unknown arg", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ }
+
+ _debug_check();
+ ret = dbp->stat(dbp, &sp, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db stat");
+ if (result == TCL_ERROR)
+ return (result);
+
+ (void)dbp->get_type(dbp, &type);
+ /*
+ * Have our stats, now construct the name value
+ * list pairs and free up the memory.
+ */
+ res = Tcl_NewObj();
+
+ /*
+ * MAKE_STAT_LIST assumes 'res' and 'error' label.
+ */
+ if (type == DB_HASH) {
+ hsp = (DB_HASH_STAT *)sp;
+ MAKE_STAT_LIST("Magic", hsp->hash_magic);
+ MAKE_STAT_LIST("Version", hsp->hash_version);
+ MAKE_STAT_LIST("Page size", hsp->hash_pagesize);
+ MAKE_STAT_LIST("Number of keys", hsp->hash_nkeys);
+ MAKE_STAT_LIST("Number of records", hsp->hash_ndata);
+ MAKE_STAT_LIST("Fill factor", hsp->hash_ffactor);
+ MAKE_STAT_LIST("Buckets", hsp->hash_buckets);
+ if (flag != DB_FAST_STAT) {
+ MAKE_STAT_LIST("Free pages", hsp->hash_free);
+ MAKE_STAT_LIST("Bytes free", hsp->hash_bfree);
+ MAKE_STAT_LIST("Number of big pages",
+ hsp->hash_bigpages);
+ MAKE_STAT_LIST("Big pages bytes free",
+ hsp->hash_big_bfree);
+ MAKE_STAT_LIST("Overflow pages", hsp->hash_overflows);
+ MAKE_STAT_LIST("Overflow bytes free",
+ hsp->hash_ovfl_free);
+ MAKE_STAT_LIST("Duplicate pages", hsp->hash_dup);
+ MAKE_STAT_LIST("Duplicate pages bytes free",
+ hsp->hash_dup_free);
+ }
+ } else if (type == DB_QUEUE) {
+ qsp = (DB_QUEUE_STAT *)sp;
+ MAKE_STAT_LIST("Magic", qsp->qs_magic);
+ MAKE_STAT_LIST("Version", qsp->qs_version);
+ MAKE_STAT_LIST("Page size", qsp->qs_pagesize);
+ MAKE_STAT_LIST("Extent size", qsp->qs_extentsize);
+ MAKE_STAT_LIST("Number of records", qsp->qs_nkeys);
+ MAKE_STAT_LIST("Record length", qsp->qs_re_len);
+ MAKE_STAT_LIST("Record pad", qsp->qs_re_pad);
+ MAKE_STAT_LIST("First record number", qsp->qs_first_recno);
+ MAKE_STAT_LIST("Last record number", qsp->qs_cur_recno);
+ if (flag != DB_FAST_STAT) {
+ MAKE_STAT_LIST("Number of pages", qsp->qs_pages);
+ MAKE_STAT_LIST("Bytes free", qsp->qs_pgfree);
+ }
+ } else { /* BTREE and RECNO are same stats */
+ bsp = (DB_BTREE_STAT *)sp;
+ MAKE_STAT_LIST("Magic", bsp->bt_magic);
+ MAKE_STAT_LIST("Version", bsp->bt_version);
+ MAKE_STAT_LIST("Number of keys", bsp->bt_nkeys);
+ MAKE_STAT_LIST("Number of records", bsp->bt_ndata);
+ MAKE_STAT_LIST("Minimum keys per page", bsp->bt_minkey);
+ MAKE_STAT_LIST("Fixed record length", bsp->bt_re_len);
+ MAKE_STAT_LIST("Record pad", bsp->bt_re_pad);
+ MAKE_STAT_LIST("Page size", bsp->bt_pagesize);
+ if (flag != DB_FAST_STAT) {
+ MAKE_STAT_LIST("Levels", bsp->bt_levels);
+ MAKE_STAT_LIST("Internal pages", bsp->bt_int_pg);
+ MAKE_STAT_LIST("Leaf pages", bsp->bt_leaf_pg);
+ MAKE_STAT_LIST("Duplicate pages", bsp->bt_dup_pg);
+ MAKE_STAT_LIST("Overflow pages", bsp->bt_over_pg);
+ MAKE_STAT_LIST("Pages on freelist", bsp->bt_free);
+ MAKE_STAT_LIST("Internal pages bytes free",
+ bsp->bt_int_pgfree);
+ MAKE_STAT_LIST("Leaf pages bytes free",
+ bsp->bt_leaf_pgfree);
+ MAKE_STAT_LIST("Duplicate pages bytes free",
+ bsp->bt_dup_pgfree);
+ MAKE_STAT_LIST("Bytes free in overflow pages",
+ bsp->bt_over_pgfree);
+ }
+ }
+
+ /*
+ * Construct a {name {flag1 flag2 ... flagN}} list for the
+ * dbp flags. These aren't access-method dependent, but they
+ * include all the interesting flags, and the integer value
+ * isn't useful from Tcl--return the strings instead.
+ */
+ myobjv[0] = Tcl_NewStringObj("Flags", strlen("Flags"));
+ myobjv[1] = _GetFlagsList(interp, dbp->flags, __db_inmemdbflags);
+ flaglist = Tcl_NewListObj(2, myobjv);
+ if (flaglist == NULL) {
+ result = TCL_ERROR;
+ goto error;
+ }
+ if ((result =
+ Tcl_ListObjAppendElement(interp, res, flaglist)) != TCL_OK)
+ goto error;
+
+ Tcl_SetObjResult(interp, res);
+error:
+ free(sp);
+ return (result);
+}
+
+/*
+ * tcl_db_close --
+ */
+static int
+tcl_DbClose(interp, objc, objv, dbp, dbip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+ DBTCL_INFO *dbip; /* Info pointer */
+{
+ static char *dbclose[] = {
+ "-nosync", "--", NULL
+ };
+ enum dbclose {
+ TCL_DBCLOSE_NOSYNC,
+ TCL_DBCLOSE_ENDARG
+ };
+ u_int32_t flag;
+ int endarg, i, optindex, result, ret;
+ char *arg;
+
+ result = TCL_OK;
+ endarg = 0;
+ flag = 0;
+ if (objc > 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-nosync?");
+ return (TCL_ERROR);
+ }
+
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbclose,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-')
+ return (IS_HELP(objv[i]));
+ else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbclose)optindex) {
+ case TCL_DBCLOSE_NOSYNC:
+ flag = DB_NOSYNC;
+ break;
+ case TCL_DBCLOSE_ENDARG:
+ endarg = 1;
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ return (result);
+ if (endarg)
+ break;
+ }
+ _DbInfoDelete(interp, dbip);
+ _debug_check();
+
+ /* Paranoia. */
+ dbp->api_internal = NULL;
+
+ ret = (dbp)->close(dbp, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db close");
+ return (result);
+}
+
+/*
+ * tcl_db_put --
+ */
+static int
+tcl_DbPut(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ static char *dbputopts[] = {
+#if CONFIG_TEST
+ "-nodupdata",
+#endif
+ "-append",
+ "-auto_commit",
+ "-nooverwrite",
+ "-partial",
+ "-txn",
+ NULL
+ };
+ enum dbputopts {
+#if CONFIG_TEST
+ DBGET_NODUPDATA,
+#endif
+ DBPUT_APPEND,
+ DBPUT_AUTO_COMMIT,
+ DBPUT_NOOVER,
+ DBPUT_PART,
+ DBPUT_TXN
+ };
+ static char *dbputapp[] = {
+ "-append", NULL
+ };
+ enum dbputapp { DBPUT_APPEND0 };
+ DBT key, data;
+ DBTYPE type;
+ DB_TXN *txn;
+ Tcl_Obj **elemv, *res;
+ void *dtmp, *ktmp;
+ db_recno_t recno;
+ u_int32_t flag;
+ int auto_commit, elemc, end, freekey, freedata;
+ int i, optindex, result, ret;
+ char *arg, msg[MSG_SIZE];
+
+ txn = NULL;
+ result = TCL_OK;
+ flag = 0;
+ if (objc <= 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? key data");
+ return (TCL_ERROR);
+ }
+
+ freekey = freedata = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /*
+ * If it is a QUEUE or RECNO database, the key is a record number
+ * and must be setup up to contain a db_recno_t. Otherwise the
+ * key is a "string".
+ */
+ (void)dbp->get_type(dbp, &type);
+
+ /*
+ * We need to determine where the end of required args are. If we
+ * are using a QUEUE/RECNO db and -append, then there is just one
+ * req arg (data). Otherwise there are two (key data).
+ *
+ * We preparse the list to determine this since we need to know
+ * to properly check # of args for other options below.
+ */
+ end = objc - 2;
+ if (type == DB_QUEUE || type == DB_RECNO) {
+ i = 2;
+ while (i < objc - 1) {
+ if (Tcl_GetIndexFromObj(interp, objv[i++], dbputapp,
+ "option", TCL_EXACT, &optindex) != TCL_OK)
+ continue;
+ switch ((enum dbputapp)optindex) {
+ case DBPUT_APPEND0:
+ end = objc - 1;
+ break;
+ }
+ }
+ }
+ Tcl_ResetResult(interp);
+
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ auto_commit = 0;
+ while (i < end) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ dbputopts, "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[i]));
+ i++;
+ switch ((enum dbputopts)optindex) {
+#if CONFIG_TEST
+ case DBGET_NODUPDATA:
+ FLAG_CHECK(flag);
+ flag = DB_NODUPDATA;
+ break;
+#endif
+ case DBPUT_TXN:
+ if (i > (end - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Put: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBPUT_AUTO_COMMIT:
+ auto_commit = 1;
+ break;
+ case DBPUT_APPEND:
+ FLAG_CHECK(flag);
+ flag = DB_APPEND;
+ break;
+ case DBPUT_NOOVER:
+ FLAG_CHECK(flag);
+ flag = DB_NOOVERWRITE;
+ break;
+ case DBPUT_PART:
+ if (i > (end - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-partial {offset length}?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Get sublist as {offset length}
+ */
+ result = Tcl_ListObjGetElements(interp, objv[i++],
+ &elemc, &elemv);
+ if (elemc != 2) {
+ Tcl_SetResult(interp,
+ "List must be {offset length}", TCL_STATIC);
+ result = TCL_ERROR;
+ break;
+ }
+ data.flags = DB_DBT_PARTIAL;
+ result = _GetUInt32(interp, elemv[0], &data.doff);
+ if (result != TCL_OK)
+ break;
+ result = _GetUInt32(interp, elemv[1], &data.dlen);
+ /*
+ * NOTE: We don't check result here because all we'd
+ * do is break anyway, and we are doing that. If you
+ * add code here, you WILL need to add the check
+ * for result. (See the check for save.doff, a few
+ * lines above and copy that.)
+ */
+ break;
+ }
+ if (result != TCL_OK)
+ break;
+ }
+ if (auto_commit)
+ flag |= DB_AUTO_COMMIT;
+
+ if (result == TCL_ERROR)
+ return (result);
+
+ /*
+ * If we are a recno db and we are NOT using append, then the 2nd
+ * last arg is the key.
+ */
+ if (type == DB_QUEUE || type == DB_RECNO) {
+ key.data = &recno;
+ key.ulen = key.size = sizeof(db_recno_t);
+ key.flags = DB_DBT_USERMEM;
+ if (flag == DB_APPEND)
+ recno = 0;
+ else {
+ result = _GetUInt32(interp, objv[objc-2], &recno);
+ if (result != TCL_OK)
+ return (result);
+ }
+ } else {
+ ret = _CopyObjBytes(interp, objv[objc-2], &ktmp,
+ &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBPUT(ret), "db put");
+ return (result);
+ }
+ key.data = ktmp;
+ }
+ ret = _CopyObjBytes(interp, objv[objc-1], &dtmp,
+ &data.size, &freedata);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBPUT(ret), "db put");
+ goto out;
+ }
+ data.data = dtmp;
+ _debug_check();
+ ret = dbp->put(dbp, txn, &key, &data, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_DBPUT(ret), "db put");
+ if (ret == 0 &&
+ (type == DB_RECNO || type == DB_QUEUE) && flag == DB_APPEND) {
+ res = Tcl_NewLongObj((long)recno);
+ Tcl_SetObjResult(interp, res);
+ }
+out:
+ if (freedata)
+ (void)__os_free(dbp->dbenv, dtmp);
+ if (freekey)
+ (void)__os_free(dbp->dbenv, ktmp);
+ return (result);
+}
+
+/*
+ * tcl_db_get --
+ */
+static int
+tcl_DbGet(interp, objc, objv, dbp, ispget)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+ int ispget; /* 1 for pget, 0 for get */
+{
+ static char *dbgetopts[] = {
+#if CONFIG_TEST
+ "-dirty",
+ "-multi",
+#endif
+ "-consume",
+ "-consume_wait",
+ "-get_both",
+ "-glob",
+ "-partial",
+ "-recno",
+ "-rmw",
+ "-txn",
+ "--",
+ NULL
+ };
+ enum dbgetopts {
+#if CONFIG_TEST
+ DBGET_DIRTY,
+ DBGET_MULTI,
+#endif
+ DBGET_CONSUME,
+ DBGET_CONSUME_WAIT,
+ DBGET_BOTH,
+ DBGET_GLOB,
+ DBGET_PART,
+ DBGET_RECNO,
+ DBGET_RMW,
+ DBGET_TXN,
+ DBGET_ENDARG
+ };
+ DBC *dbc;
+ DBT key, pkey, data, save;
+ DBTYPE type;
+ DB_TXN *txn;
+ Tcl_Obj **elemv, *retlist;
+ void *dtmp, *ktmp;
+ u_int32_t flag, cflag, isdup, mflag, rmw;
+ int bufsize, elemc, end, endarg, freekey, freedata, i;
+ int optindex, result, ret, useglob, useprecno, userecno;
+ char *arg, *pattern, *prefix, msg[MSG_SIZE];
+ db_recno_t precno, recno;
+
+ result = TCL_OK;
+ freekey = freedata = 0;
+ cflag = endarg = flag = mflag = rmw = 0;
+ useglob = userecno = useprecno = 0;
+ txn = NULL;
+ pattern = prefix = NULL;
+
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? key");
+ return (TCL_ERROR);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ memset(&save, 0, sizeof(save));
+
+ /* For the primary key in a pget call. */
+ memset(&pkey, 0, sizeof(pkey));
+
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ (void)dbp->get_type(dbp, &type);
+ end = objc;
+ while (i < end) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbgetopts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto out;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbgetopts)optindex) {
+#if CONFIG_TEST
+ case DBGET_DIRTY:
+ rmw |= DB_DIRTY_READ;
+ break;
+ case DBGET_MULTI:
+ mflag |= DB_MULTIPLE;
+ result = Tcl_GetIntFromObj(interp, objv[i], &bufsize);
+ if (result != TCL_OK)
+ goto out;
+ i++;
+ break;
+#endif
+ case DBGET_BOTH:
+ /*
+ * Change 'end' and make sure we aren't already past
+ * the new end.
+ */
+ if (i > objc - 2) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-get_both key data?");
+ result = TCL_ERROR;
+ break;
+ }
+ end = objc - 2;
+ FLAG_CHECK(flag);
+ flag = DB_GET_BOTH;
+ break;
+ case DBGET_TXN:
+ if (i >= end) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Get: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBGET_GLOB:
+ useglob = 1;
+ end = objc - 1;
+ break;
+ case DBGET_CONSUME:
+ FLAG_CHECK(flag);
+ flag = DB_CONSUME;
+ break;
+ case DBGET_CONSUME_WAIT:
+ FLAG_CHECK(flag);
+ flag = DB_CONSUME_WAIT;
+ break;
+ case DBGET_RECNO:
+ end = objc - 1;
+ userecno = 1;
+ if (type != DB_RECNO && type != DB_QUEUE) {
+ FLAG_CHECK(flag);
+ flag = DB_SET_RECNO;
+ }
+ break;
+ case DBGET_RMW:
+ rmw |= DB_RMW;
+ break;
+ case DBGET_PART:
+ end = objc - 1;
+ if (i == end) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-partial {offset length}?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Get sublist as {offset length}
+ */
+ result = Tcl_ListObjGetElements(interp, objv[i++],
+ &elemc, &elemv);
+ if (elemc != 2) {
+ Tcl_SetResult(interp,
+ "List must be {offset length}", TCL_STATIC);
+ result = TCL_ERROR;
+ break;
+ }
+ save.flags = DB_DBT_PARTIAL;
+ result = _GetUInt32(interp, elemv[0], &save.doff);
+ if (result != TCL_OK)
+ break;
+ result = _GetUInt32(interp, elemv[1], &save.dlen);
+ /*
+ * NOTE: We don't check result here because all we'd
+ * do is break anyway, and we are doing that. If you
+ * add code here, you WILL need to add the check
+ * for result. (See the check for save.doff, a few
+ * lines above and copy that.)
+ */
+ break;
+ case DBGET_ENDARG:
+ endarg = 1;
+ break;
+ } /* switch */
+ if (result != TCL_OK)
+ break;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto out;
+
+ if (type == DB_RECNO || type == DB_QUEUE)
+ userecno = 1;
+
+ /*
+ * Check args we have left versus the flags we were given.
+ * We might have 0, 1 or 2 left. If we have 0, it must
+ * be DB_CONSUME*, if 2, then DB_GET_BOTH, all others should
+ * be 1.
+ */
+ if (((flag == DB_CONSUME || flag == DB_CONSUME_WAIT) && i != objc) ||
+ (flag == DB_GET_BOTH && i != objc - 2)) {
+ Tcl_SetResult(interp,
+ "Wrong number of key/data given based on flags specified\n",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ } else if (flag == 0 && i != objc - 1) {
+ Tcl_SetResult(interp,
+ "Wrong number of key/data given\n", TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+
+ /*
+ * XXX
+ * We technically shouldn't be looking inside the dbp like this,
+ * but this is the only way to figure out whether the primary
+ * key should also be a recno.
+ */
+ if (ispget) {
+ if (dbp->s_primary != NULL &&
+ (dbp->s_primary->type == DB_RECNO ||
+ dbp->s_primary->type == DB_QUEUE))
+ useprecno = 1;
+ }
+
+ /*
+ * Check for illegal combos of options.
+ */
+ if (useglob && (userecno || flag == DB_SET_RECNO ||
+ type == DB_RECNO || type == DB_QUEUE)) {
+ Tcl_SetResult(interp,
+ "Cannot use -glob and record numbers.\n",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+ if (useglob && flag == DB_GET_BOTH) {
+ Tcl_SetResult(interp,
+ "Only one of -glob or -get_both can be specified.\n",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+
+ if (useglob)
+ pattern = Tcl_GetStringFromObj(objv[objc - 1], NULL);
+
+ /*
+ * This is the list we return
+ */
+ retlist = Tcl_NewListObj(0, NULL);
+ save.flags |= DB_DBT_MALLOC;
+
+ /*
+ * isdup is used to know if we support duplicates. If not, we
+ * can just do a db->get call and avoid using cursors.
+ * XXX
+ * When there is a db->get_flags method, it should be used.
+ * isdup = dbp->get_flags(dbp) & DB_DUP;
+ * For now we illegally peek.
+ * XXX
+ */
+ isdup = dbp->flags & DB_AM_DUP;
+
+ /*
+ * If the database doesn't support duplicates or we're performing
+ * ops that don't require returning multiple items, use DB->get
+ * instead of a cursor operation.
+ */
+ if (pattern == NULL && (isdup == 0 || mflag != 0 ||
+ flag == DB_SET_RECNO || flag == DB_GET_BOTH ||
+ flag == DB_CONSUME || flag == DB_CONSUME_WAIT)) {
+ if (flag == DB_GET_BOTH) {
+ if (userecno) {
+ result = _GetUInt32(interp,
+ objv[(objc - 2)], &recno);
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ goto out;
+ } else {
+ /*
+ * Some get calls (SET_*) can change the
+ * key pointers. So, we need to store
+ * the allocated key space in a tmp.
+ */
+ ret = _CopyObjBytes(interp, objv[objc-2],
+ &ktmp, &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBGET(ret), "db get");
+ goto out;
+ }
+ key.data = ktmp;
+ }
+ /*
+ * Already checked args above. Fill in key and save.
+ * Save is used in the dbp->get call below to fill in
+ * data.
+ *
+ * If the "data" here is really a primary key--that
+ * is, if we're in a pget--and that primary key
+ * is a recno, treat it appropriately as an int.
+ */
+ if (useprecno) {
+ result = _GetUInt32(interp,
+ objv[objc - 1], &precno);
+ if (result == TCL_OK) {
+ save.data = &precno;
+ save.size = sizeof(db_recno_t);
+ } else
+ goto out;
+ } else {
+ ret = _CopyObjBytes(interp, objv[objc-1],
+ &dtmp, &save.size, &freedata);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBGET(ret), "db get");
+ goto out;
+ }
+ save.data = dtmp;
+ }
+ } else if (flag != DB_CONSUME && flag != DB_CONSUME_WAIT) {
+ if (userecno) {
+ result = _GetUInt32(
+ interp, objv[(objc - 1)], &recno);
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ goto out;
+ } else {
+ /*
+ * Some get calls (SET_*) can change the
+ * key pointers. So, we need to store
+ * the allocated key space in a tmp.
+ */
+ ret = _CopyObjBytes(interp, objv[objc-1],
+ &ktmp, &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBGET(ret), "db get");
+ goto out;
+ }
+ key.data = ktmp;
+ }
+ if (mflag & DB_MULTIPLE) {
+ if ((ret = __os_malloc(dbp->dbenv,
+ bufsize, &save.data)) != 0) {
+ Tcl_SetResult(interp,
+ db_strerror(ret), TCL_STATIC);
+ goto out;
+ }
+ save.ulen = bufsize;
+ F_CLR(&save, DB_DBT_MALLOC);
+ F_SET(&save, DB_DBT_USERMEM);
+ }
+ }
+
+ data = save;
+
+ if (ispget) {
+ if (flag == DB_GET_BOTH) {
+ pkey.data = save.data;
+ pkey.size = save.size;
+ data.data = NULL;
+ data.size = 0;
+ }
+ F_SET(&pkey, DB_DBT_MALLOC);
+ _debug_check();
+ ret = dbp->pget(dbp,
+ txn, &key, &pkey, &data, flag | rmw);
+ } else {
+ _debug_check();
+ ret = dbp->get(dbp,
+ txn, &key, &data, flag | rmw | mflag);
+ }
+ result = _ReturnSetup(interp, ret, DB_RETOK_DBGET(ret),
+ "db get");
+ if (ret == 0) {
+ /*
+ * Success. Return a list of the form {name value}
+ * If it was a recno in key.data, we need to convert
+ * into a string/object representation of that recno.
+ */
+ if (mflag & DB_MULTIPLE)
+ result = _SetMultiList(interp,
+ retlist, &key, &data, type, flag);
+ else if (type == DB_RECNO || type == DB_QUEUE)
+ if (ispget)
+ result = _Set3DBTList(interp,
+ retlist, &key, 1, &pkey,
+ useprecno, &data);
+ else
+ result = _SetListRecnoElem(interp,
+ retlist, *(db_recno_t *)key.data,
+ data.data, data.size);
+ else {
+ if (ispget)
+ result = _Set3DBTList(interp,
+ retlist, &key, 0, &pkey,
+ useprecno, &data);
+ else
+ result = _SetListElem(interp, retlist,
+ key.data, key.size,
+ data.data, data.size);
+ }
+ }
+ /*
+ * Free space from DBT.
+ *
+ * If we set DB_DBT_MALLOC, we need to free the space if
+ * and only if we succeeded (and thus if DB allocated
+ * anything). If DB_DBT_MALLOC is not set, this is a bulk
+ * get buffer, and needs to be freed no matter what.
+ */
+ if (F_ISSET(&data, DB_DBT_MALLOC) && ret == 0)
+ __os_ufree(dbp->dbenv, data.data);
+ else if (!F_ISSET(&data, DB_DBT_MALLOC))
+ __os_free(dbp->dbenv, data.data);
+ if (ispget && ret == 0)
+ __os_ufree(dbp->dbenv, pkey.data);
+ if (result == TCL_OK)
+ Tcl_SetObjResult(interp, retlist);
+ goto out;
+ }
+
+ if (userecno) {
+ result = _GetUInt32(interp, objv[(objc - 1)], &recno);
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ goto out;
+ } else {
+ /*
+ * Some get calls (SET_*) can change the
+ * key pointers. So, we need to store
+ * the allocated key space in a tmp.
+ */
+ ret = _CopyObjBytes(interp, objv[objc-1], &ktmp,
+ &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBGET(ret), "db get");
+ return (result);
+ }
+ key.data = ktmp;
+ }
+ ret = dbp->cursor(dbp, txn, &dbc, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db cursor");
+ if (result == TCL_ERROR)
+ goto out;
+
+ /*
+ * At this point, we have a cursor, if we have a pattern,
+ * we go to the nearest one and step forward until we don't
+ * have any more that match the pattern prefix. If we have
+ * an exact key, we go to that key position, and step through
+ * all the duplicates. In either case we build up a list of
+ * the form {{key data} {key data}...} along the way.
+ */
+ memset(&data, 0, sizeof(data));
+ /*
+ * Restore any "partial" info we have saved.
+ */
+ data = save;
+ if (pattern) {
+ /*
+ * Note, prefix is returned in new space. Must free it.
+ */
+ ret = _GetGlobPrefix(pattern, &prefix);
+ if (ret) {
+ result = TCL_ERROR;
+ Tcl_SetResult(interp,
+ "Unable to allocate pattern space", TCL_STATIC);
+ goto out1;
+ }
+ key.data = prefix;
+ key.size = strlen(prefix);
+ /*
+ * If they give us an empty pattern string
+ * (i.e. -glob *), go through entire DB.
+ */
+ if (strlen(prefix) == 0)
+ cflag = DB_FIRST;
+ else
+ cflag = DB_SET_RANGE;
+ } else
+ cflag = DB_SET;
+ if (ispget) {
+ _debug_check();
+ F_SET(&pkey, DB_DBT_MALLOC);
+ ret = dbc->c_pget(dbc, &key, &pkey, &data, cflag | rmw);
+ } else {
+ _debug_check();
+ ret = dbc->c_get(dbc, &key, &data, cflag | rmw);
+ }
+ result = _ReturnSetup(interp, ret, DB_RETOK_DBCGET(ret),
+ "db get (cursor)");
+ if (result == TCL_ERROR)
+ goto out1;
+ if (ret == 0 && pattern &&
+ memcmp(key.data, prefix, strlen(prefix)) != 0) {
+ /*
+ * Free space from DB_DBT_MALLOC
+ */
+ free(data.data);
+ goto out1;
+ }
+ if (pattern)
+ cflag = DB_NEXT;
+ else
+ cflag = DB_NEXT_DUP;
+
+ while (ret == 0 && result == TCL_OK) {
+ /*
+ * Build up our {name value} sublist
+ */
+ if (ispget)
+ result = _Set3DBTList(interp, retlist, &key, 0,
+ &pkey, useprecno, &data);
+ else
+ result = _SetListElem(interp, retlist,
+ key.data, key.size, data.data, data.size);
+ /*
+ * Free space from DB_DBT_MALLOC
+ */
+ if (ispget)
+ free(pkey.data);
+ free(data.data);
+ if (result != TCL_OK)
+ break;
+ /*
+ * Append {name value} to return list
+ */
+ memset(&key, 0, sizeof(key));
+ memset(&pkey, 0, sizeof(pkey));
+ memset(&data, 0, sizeof(data));
+ /*
+ * Restore any "partial" info we have saved.
+ */
+ data = save;
+ if (ispget) {
+ F_SET(&pkey, DB_DBT_MALLOC);
+ ret = dbc->c_pget(dbc, &key, &pkey, &data, cflag | rmw);
+ } else
+ ret = dbc->c_get(dbc, &key, &data, cflag | rmw);
+ if (ret == 0 && pattern &&
+ memcmp(key.data, prefix, strlen(prefix)) != 0) {
+ /*
+ * Free space from DB_DBT_MALLOC
+ */
+ free(data.data);
+ break;
+ }
+ }
+out1:
+ dbc->c_close(dbc);
+ if (result == TCL_OK)
+ Tcl_SetObjResult(interp, retlist);
+out:
+ /*
+ * _GetGlobPrefix(), the function which allocates prefix, works
+ * by copying and condensing another string. Thus prefix may
+ * have multiple nuls at the end, so we free using __os_free().
+ */
+ if (prefix != NULL)
+ __os_free(dbp->dbenv, prefix);
+ if (freedata)
+ (void)__os_free(dbp->dbenv, dtmp);
+ if (freekey)
+ (void)__os_free(dbp->dbenv, ktmp);
+ return (result);
+}
+
+/*
+ * tcl_db_delete --
+ */
+static int
+tcl_DbDelete(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ static char *dbdelopts[] = {
+ "-auto_commit",
+ "-glob",
+ "-txn",
+ NULL
+ };
+ enum dbdelopts {
+ DBDEL_AUTO_COMMIT,
+ DBDEL_GLOB,
+ DBDEL_TXN
+ };
+ DBC *dbc;
+ DBT key, data;
+ DBTYPE type;
+ DB_TXN *txn;
+ void *ktmp;
+ db_recno_t recno;
+ int freekey, i, optindex, result, ret;
+ u_int32_t flag;
+ char *arg, *pattern, *prefix, msg[MSG_SIZE];
+
+ result = TCL_OK;
+ freekey = 0;
+ flag = 0;
+ pattern = prefix = NULL;
+ txn = NULL;
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? key");
+ return (TCL_ERROR);
+ }
+
+ memset(&key, 0, sizeof(key));
+ /*
+ * The first arg must be -auto_commit, -glob, -txn or a list of keys.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbdelopts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ /*
+ * If we don't have a -auto_commit, -glob or -txn,
+ * then the remaining args must be exact keys.
+ * Reset the result so we don't get an errant error
+ * message if there is another error.
+ */
+ if (IS_HELP(objv[i]) == TCL_OK)
+ return (TCL_OK);
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbdelopts)optindex) {
+ case DBDEL_TXN:
+ if (i == objc) {
+ /*
+ * Someone could conceivably have a key of
+ * the same name. So just break and use it.
+ */
+ i--;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Delete: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBDEL_AUTO_COMMIT:
+ flag |= DB_AUTO_COMMIT;
+ break;
+ case DBDEL_GLOB:
+ /*
+ * Get the pattern. Get the prefix and use cursors to
+ * get all the data items.
+ */
+ if (i == objc) {
+ /*
+ * Someone could conceivably have a key of
+ * the same name. So just break and use it.
+ */
+ i--;
+ break;
+ }
+ pattern = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+ }
+ if (result != TCL_OK)
+ break;
+ }
+
+ if (result != TCL_OK)
+ goto out;
+ /*
+ * XXX
+ * For consistency with get, we have decided for the moment, to
+ * allow -glob, or one key, not many. The code was originally
+ * written to take many keys and we'll leave it that way, because
+ * tcl_DbGet may one day accept many disjoint keys to get, rather
+ * than one, and at that time we'd make delete be consistent. In
+ * any case, the code is already here and there is no need to remove,
+ * just check that we only have one arg left.
+ *
+ * If we have a pattern AND more keys to process, there is an error.
+ * Either we have some number of exact keys, or we have a pattern.
+ *
+ * If we have a pattern and an auto commit flag, there is an error.
+ */
+ if (pattern == NULL) {
+ if (i != (objc - 1)) {
+ Tcl_WrongNumArgs(
+ interp, 2, objv, "?args? -glob pattern | key");
+ result = TCL_ERROR;
+ goto out;
+ }
+ } else {
+ if (i != objc) {
+ Tcl_WrongNumArgs(
+ interp, 2, objv, "?args? -glob pattern | key");
+ result = TCL_ERROR;
+ goto out;
+ }
+ if (flag & DB_AUTO_COMMIT) {
+ Tcl_SetResult(interp,
+ "Cannot use -auto_commit and patterns.\n",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+ }
+
+ /*
+ * If we have remaining args, they are all exact keys. Call
+ * DB->del on each of those keys.
+ *
+ * If it is a RECNO database, the key is a record number and must be
+ * setup up to contain a db_recno_t. Otherwise the key is a "string".
+ */
+ (void)dbp->get_type(dbp, &type);
+ ret = 0;
+ while (i < objc && ret == 0) {
+ memset(&key, 0, sizeof(key));
+ if (type == DB_RECNO || type == DB_QUEUE) {
+ result = _GetUInt32(interp, objv[i++], &recno);
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ return (result);
+ } else {
+ ret = _CopyObjBytes(interp, objv[i++], &ktmp,
+ &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBDEL(ret), "db del");
+ return (result);
+ }
+ key.data = ktmp;
+ }
+ _debug_check();
+ ret = dbp->del(dbp, txn, &key, flag);
+ /*
+ * If we have any error, set up return result and stop
+ * processing keys.
+ */
+ if (freekey)
+ (void)__os_free(dbp->dbenv, ktmp);
+ if (ret != 0)
+ break;
+ }
+ result = _ReturnSetup(interp, ret, DB_RETOK_DBDEL(ret), "db del");
+
+ /*
+ * At this point we've either finished or, if we have a pattern,
+ * we go to the nearest one and step forward until we don't
+ * have any more that match the pattern prefix.
+ */
+ if (pattern) {
+ ret = dbp->cursor(dbp, txn, &dbc, 0);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db cursor");
+ goto out;
+ }
+ /*
+ * Note, prefix is returned in new space. Must free it.
+ */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ ret = _GetGlobPrefix(pattern, &prefix);
+ if (ret) {
+ result = TCL_ERROR;
+ Tcl_SetResult(interp,
+ "Unable to allocate pattern space", TCL_STATIC);
+ goto out;
+ }
+ key.data = prefix;
+ key.size = strlen(prefix);
+ if (strlen(prefix) == 0)
+ flag = DB_FIRST;
+ else
+ flag = DB_SET_RANGE;
+ ret = dbc->c_get(dbc, &key, &data, flag);
+ while (ret == 0 &&
+ memcmp(key.data, prefix, strlen(prefix)) == 0) {
+ /*
+ * Each time through here the cursor is pointing
+ * at the current valid item. Delete it and
+ * move ahead.
+ */
+ _debug_check();
+ ret = dbc->c_del(dbc, 0);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBCDEL(ret), "db c_del");
+ break;
+ }
+ /*
+ * Deleted the current, now move to the next item
+ * in the list, check if it matches the prefix pattern.
+ */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ ret = dbc->c_get(dbc, &key, &data, DB_NEXT);
+ }
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ /*
+ * _GetGlobPrefix(), the function which allocates prefix, works
+ * by copying and condensing another string. Thus prefix may
+ * have multiple nuls at the end, so we free using __os_free().
+ */
+ __os_free(dbp->dbenv, prefix);
+ dbc->c_close(dbc);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db del");
+ }
+out:
+ return (result);
+}
+
+/*
+ * tcl_db_cursor --
+ */
+static int
+tcl_DbCursor(interp, objc, objv, dbp, dbcp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+ DBC **dbcp; /* Return cursor pointer */
+{
+ static char *dbcuropts[] = {
+#if CONFIG_TEST
+ "-dirty",
+ "-update",
+#endif
+ "-txn",
+ NULL
+ };
+ enum dbcuropts {
+#if CONFIG_TEST
+ DBCUR_DIRTY,
+ DBCUR_UPDATE,
+#endif
+ DBCUR_TXN
+ };
+ DB_TXN *txn;
+ u_int32_t flag;
+ int i, optindex, result, ret;
+ char *arg, msg[MSG_SIZE];
+
+ result = TCL_OK;
+ flag = 0;
+ txn = NULL;
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbcuropts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ goto out;
+ }
+ i++;
+ switch ((enum dbcuropts)optindex) {
+#if CONFIG_TEST
+ case DBCUR_DIRTY:
+ flag |= DB_DIRTY_READ;
+ break;
+ case DBCUR_UPDATE:
+ flag |= DB_WRITECURSOR;
+ break;
+#endif
+ case DBCUR_TXN:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Cursor: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ }
+ if (result != TCL_OK)
+ break;
+ }
+ if (result != TCL_OK)
+ goto out;
+
+ _debug_check();
+ ret = dbp->cursor(dbp, txn, dbcp, flag);
+ if (ret != 0)
+ result = _ErrorSetup(interp, ret, "db cursor");
+out:
+ return (result);
+}
+
+/*
+ * tcl_DbAssociate --
+ * Call DB->associate().
+ */
+static int
+tcl_DbAssociate(interp, objc, objv, dbp)
+ Tcl_Interp *interp;
+ int objc;
+ Tcl_Obj *CONST objv[];
+ DB *dbp;
+{
+ static char *dbaopts[] = {
+ "-auto_commit",
+ "-create",
+ "-txn",
+ NULL
+ };
+ enum dbaopts {
+ DBA_AUTO_COMMIT,
+ DBA_CREATE,
+ DBA_TXN
+ };
+ DB *sdbp;
+ DB_TXN *txn;
+ DBTCL_INFO *sdbip;
+ int i, optindex, result, ret;
+ char *arg, msg[MSG_SIZE];
+ u_int32_t flag;
+
+ txn = NULL;
+ result = TCL_OK;
+ flag = 0;
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "[callback] secondary");
+ return (TCL_ERROR);
+ }
+
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbaopts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ if (result == TCL_OK)
+ return (result);
+ result = TCL_OK;
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbaopts)optindex) {
+ case DBA_AUTO_COMMIT:
+ flag |= DB_AUTO_COMMIT;
+ break;
+ case DBA_CREATE:
+ flag |= DB_CREATE;
+ break;
+ case DBA_TXN:
+ if (i > (objc - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Associate: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ }
+ }
+ if (result != TCL_OK)
+ return (result);
+
+ /*
+ * Better be 1 or 2 args left. The last arg must be the sdb
+ * handle. If 2 args then objc-2 is the callback proc, else
+ * we have a NULL callback.
+ */
+ /* Get the secondary DB handle. */
+ arg = Tcl_GetStringFromObj(objv[objc - 1], NULL);
+ sdbp = NAME_TO_DB(arg);
+ if (sdbp == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Associate: Invalid database handle: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * The callback is simply a Tcl object containing the name
+ * of the callback proc, which is the second-to-last argument.
+ *
+ * Note that the callback needs to go in the *secondary* DB handle's
+ * info struct; we may have multiple secondaries with different
+ * callbacks.
+ */
+ sdbip = (DBTCL_INFO *)sdbp->api_internal;
+ if (i != objc - 1) {
+ /*
+ * We have 2 args, get the callback.
+ */
+ sdbip->i_second_call = objv[objc - 2];
+ Tcl_IncrRefCount(sdbip->i_second_call);
+
+ /* Now call associate. */
+ _debug_check();
+ ret = dbp->associate(dbp, txn, sdbp, tcl_second_call, flag);
+ } else {
+ /*
+ * We have a NULL callback.
+ */
+ sdbip->i_second_call = NULL;
+ ret = dbp->associate(dbp, txn, sdbp, NULL, flag);
+ }
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "associate");
+
+ return (result);
+}
+
+/*
+ * tcl_second_call --
+ * Callback function for secondary indices. Get the callback
+ * out of ip->i_second_call and call it.
+ */
+static int
+tcl_second_call(dbp, pkey, data, skey)
+ DB *dbp;
+ const DBT *pkey, *data;
+ DBT *skey;
+{
+ DBTCL_INFO *ip;
+ Tcl_Interp *interp;
+ Tcl_Obj *pobj, *dobj, *objv[3];
+ int len, result, ret;
+ void *retbuf, *databuf;
+
+ ip = (DBTCL_INFO *)dbp->api_internal;
+ interp = ip->i_interp;
+ objv[0] = ip->i_second_call;
+
+ /*
+ * Create two ByteArray objects, with the contents of the pkey
+ * and data DBTs that are our inputs.
+ */
+ pobj = Tcl_NewByteArrayObj(pkey->data, pkey->size);
+ Tcl_IncrRefCount(pobj);
+ dobj = Tcl_NewByteArrayObj(data->data, data->size);
+ Tcl_IncrRefCount(dobj);
+
+ objv[1] = pobj;
+ objv[2] = dobj;
+
+ result = Tcl_EvalObjv(interp, 3, objv, 0);
+
+ Tcl_DecrRefCount(pobj);
+ Tcl_DecrRefCount(dobj);
+
+ if (result != TCL_OK) {
+ __db_err(dbp->dbenv,
+ "Tcl callback function failed with code %d", result);
+ return (EINVAL);
+ }
+
+ retbuf =
+ Tcl_GetByteArrayFromObj(Tcl_GetObjResult(interp), &len);
+
+ /*
+ * retbuf is owned by Tcl; copy it into malloc'ed memory.
+ * We need to use __os_umalloc rather than ufree because this will
+ * be freed by DB using __os_ufree--the DB_DBT_APPMALLOC flag
+ * tells DB to free application-allocated memory.
+ */
+ if ((ret = __os_umalloc(dbp->dbenv, len, &databuf)) != 0)
+ return (ret);
+ memcpy(databuf, retbuf, len);
+
+ skey->data = databuf;
+ skey->size = len;
+ F_SET(skey, DB_DBT_APPMALLOC);
+
+ return (0);
+}
+
+/*
+ * tcl_db_join --
+ */
+static int
+tcl_DbJoin(interp, objc, objv, dbp, dbcp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+ DBC **dbcp; /* Cursor pointer */
+{
+ static char *dbjopts[] = {
+ "-nosort",
+ NULL
+ };
+ enum dbjopts {
+ DBJ_NOSORT
+ };
+ DBC **listp;
+ u_int32_t flag;
+ int adj, i, j, optindex, size, result, ret;
+ char *arg, msg[MSG_SIZE];
+
+ result = TCL_OK;
+ flag = 0;
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "curs1 curs2 ...");
+ return (TCL_ERROR);
+ }
+
+ i = 2;
+ adj = i;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbjopts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ if (result == TCL_OK)
+ return (result);
+ result = TCL_OK;
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbjopts)optindex) {
+ case DBJ_NOSORT:
+ flag |= DB_JOIN_NOSORT;
+ adj++;
+ break;
+ }
+ }
+ if (result != TCL_OK)
+ return (result);
+ /*
+ * Allocate one more for NULL ptr at end of list.
+ */
+ size = sizeof(DBC *) * ((objc - adj) + 1);
+ ret = __os_malloc(dbp->dbenv, size, &listp);
+ if (ret != 0) {
+ Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ memset(listp, 0, size);
+ for (j = 0, i = adj; i < objc; i++, j++) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ listp[j] = NAME_TO_DBC(arg);
+ if (listp[j] == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Join: Invalid cursor: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ goto out;
+ }
+ }
+ listp[j] = NULL;
+ _debug_check();
+ ret = dbp->join(dbp, listp, dbcp, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db join");
+
+out:
+ __os_free(dbp->dbenv, listp);
+ return (result);
+}
+
+/*
+ * tcl_db_getjoin --
+ */
+static int
+tcl_DbGetjoin(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ static char *dbgetjopts[] = {
+#if CONFIG_TEST
+ "-nosort",
+#endif
+ "-txn",
+ NULL
+ };
+ enum dbgetjopts {
+#if CONFIG_TEST
+ DBGETJ_NOSORT,
+#endif
+ DBGETJ_TXN
+ };
+ DB_TXN *txn;
+ DB *elemdbp;
+ DBC **listp;
+ DBC *dbc;
+ DBT key, data;
+ Tcl_Obj **elemv, *retlist;
+ void *ktmp;
+ u_int32_t flag;
+ int adj, elemc, freekey, i, j, optindex, result, ret, size;
+ char *arg, msg[MSG_SIZE];
+
+ result = TCL_OK;
+ flag = 0;
+ freekey = 0;
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "{db1 key1} {db2 key2} ...");
+ return (TCL_ERROR);
+ }
+
+ txn = NULL;
+ i = 2;
+ adj = i;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbgetjopts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ if (result == TCL_OK)
+ return (result);
+ result = TCL_OK;
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbgetjopts)optindex) {
+#if CONFIG_TEST
+ case DBGETJ_NOSORT:
+ flag |= DB_JOIN_NOSORT;
+ adj++;
+ break;
+#endif
+ case DBGETJ_TXN:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ adj += 2;
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "GetJoin: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ }
+ }
+ if (result != TCL_OK)
+ return (result);
+ size = sizeof(DBC *) * ((objc - adj) + 1);
+ ret = __os_malloc(NULL, size, &listp);
+ if (ret != 0) {
+ Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ memset(listp, 0, size);
+ for (j = 0, i = adj; i < objc; i++, j++) {
+ /*
+ * Get each sublist as {db key}
+ */
+ result = Tcl_ListObjGetElements(interp, objv[i],
+ &elemc, &elemv);
+ if (elemc != 2) {
+ Tcl_SetResult(interp, "Lists must be {db key}",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+ /*
+ * Get a pointer to that open db. Then, open a cursor in
+ * that db, and go to the "key" place.
+ */
+ elemdbp = NAME_TO_DB(Tcl_GetStringFromObj(elemv[0], NULL));
+ if (elemdbp == NULL) {
+ snprintf(msg, MSG_SIZE, "Get_join: Invalid db: %s\n",
+ Tcl_GetStringFromObj(elemv[0], NULL));
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ goto out;
+ }
+ ret = elemdbp->cursor(elemdbp, txn, &listp[j], 0);
+ if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db cursor")) == TCL_ERROR)
+ goto out;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ ret = _CopyObjBytes(interp, elemv[elemc-1], &ktmp,
+ &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "db join");
+ goto out;
+ }
+ key.data = ktmp;
+ ret = (listp[j])->c_get(listp[j], &key, &data, DB_SET);
+ if ((result = _ReturnSetup(interp, ret, DB_RETOK_DBCGET(ret),
+ "db cget")) == TCL_ERROR)
+ goto out;
+ }
+ listp[j] = NULL;
+ _debug_check();
+ ret = dbp->join(dbp, listp, &dbc, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db join");
+ if (result == TCL_ERROR)
+ goto out;
+
+ retlist = Tcl_NewListObj(0, NULL);
+ while (ret == 0 && result == TCL_OK) {
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.flags |= DB_DBT_MALLOC;
+ data.flags |= DB_DBT_MALLOC;
+ ret = dbc->c_get(dbc, &key, &data, 0);
+ /*
+ * Build up our {name value} sublist
+ */
+ if (ret == 0) {
+ result = _SetListElem(interp, retlist,
+ key.data, key.size,
+ data.data, data.size);
+ free(key.data);
+ free(data.data);
+ }
+ }
+ dbc->c_close(dbc);
+ if (result == TCL_OK)
+ Tcl_SetObjResult(interp, retlist);
+out:
+ if (freekey)
+ (void)__os_free(dbp->dbenv, ktmp);
+ while (j) {
+ if (listp[j])
+ (listp[j])->c_close(listp[j]);
+ j--;
+ }
+ __os_free(dbp->dbenv, listp);
+ return (result);
+}
+
+/*
+ * tcl_DbCount --
+ */
+static int
+tcl_DbCount(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ Tcl_Obj *res;
+ DBC *dbc;
+ DBT key, data;
+ void *ktmp;
+ db_recno_t count, recno;
+ int freekey, result, ret;
+
+ result = TCL_OK;
+ count = 0;
+ freekey = 0;
+ res = NULL;
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "key");
+ return (TCL_ERROR);
+ }
+
+ memset(&key, 0, sizeof(key));
+
+ /*
+ * Get the count for our key.
+ * We do this by getting a cursor for this DB. Moving the cursor
+ * to the set location, and getting a count on that cursor.
+ */
+ ret = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ /*
+ * If it's a queue or recno database, we must make sure to
+ * treat the key as a recno rather than as a byte string.
+ */
+ if (dbp->type == DB_RECNO || dbp->type == DB_QUEUE) {
+ result = _GetUInt32(interp, objv[2], &recno);
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ return (result);
+ } else {
+ ret = _CopyObjBytes(interp, objv[2], &ktmp,
+ &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "db count");
+ return (result);
+ }
+ key.data = ktmp;
+ }
+ _debug_check();
+ ret = dbp->cursor(dbp, NULL, &dbc, 0);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db cursor");
+ goto out;
+ }
+ /*
+ * Move our cursor to the key.
+ */
+ ret = dbc->c_get(dbc, &key, &data, DB_SET);
+ if (ret == DB_NOTFOUND)
+ count = 0;
+ else {
+ ret = dbc->c_count(dbc, &count, 0);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db c count");
+ goto out;
+ }
+ }
+ res = Tcl_NewLongObj((long)count);
+ Tcl_SetObjResult(interp, res);
+out:
+ if (freekey)
+ (void)__os_free(dbp->dbenv, ktmp);
+ (void)dbc->c_close(dbc);
+ return (result);
+}
+
+#if CONFIG_TEST
+/*
+ * tcl_DbKeyRange --
+ */
+static int
+tcl_DbKeyRange(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ static char *dbkeyropts[] = {
+ "-txn",
+ NULL
+ };
+ enum dbkeyropts {
+ DBKEYR_TXN
+ };
+ DB_TXN *txn;
+ DB_KEY_RANGE range;
+ DBT key;
+ DBTYPE type;
+ Tcl_Obj *myobjv[3], *retlist;
+ void *ktmp;
+ db_recno_t recno;
+ u_int32_t flag;
+ int freekey, i, myobjc, optindex, result, ret;
+ char *arg, msg[MSG_SIZE];
+
+ result = TCL_OK;
+ flag = 0;
+ freekey = 0;
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id? key");
+ return (TCL_ERROR);
+ }
+
+ txn = NULL;
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbkeyropts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ if (result == TCL_OK)
+ return (result);
+ result = TCL_OK;
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbkeyropts)optindex) {
+ case DBKEYR_TXN:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "KeyRange: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ }
+ }
+ if (result != TCL_OK)
+ return (result);
+ (void)dbp->get_type(dbp, &type);
+ ret = 0;
+ /*
+ * Make sure we have a key.
+ */
+ if (i != (objc - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? key");
+ result = TCL_ERROR;
+ goto out;
+ }
+ memset(&key, 0, sizeof(key));
+ if (type == DB_RECNO || type == DB_QUEUE) {
+ result = _GetUInt32(interp, objv[i], &recno);
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ return (result);
+ } else {
+ ret = _CopyObjBytes(interp, objv[i++], &ktmp,
+ &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "db keyrange");
+ return (result);
+ }
+ key.data = ktmp;
+ }
+ _debug_check();
+ ret = dbp->key_range(dbp, txn, &key, &range, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db keyrange");
+ if (result == TCL_ERROR)
+ goto out;
+
+ /*
+ * If we succeeded, set up return list.
+ */
+ myobjc = 3;
+ myobjv[0] = Tcl_NewDoubleObj(range.less);
+ myobjv[1] = Tcl_NewDoubleObj(range.equal);
+ myobjv[2] = Tcl_NewDoubleObj(range.greater);
+ retlist = Tcl_NewListObj(myobjc, myobjv);
+ if (result == TCL_OK)
+ Tcl_SetObjResult(interp, retlist);
+out:
+ if (freekey)
+ (void)__os_free(dbp->dbenv, ktmp);
+ return (result);
+}
+#endif
+
+/*
+ * tcl_DbTruncate --
+ */
+static int
+tcl_DbTruncate(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ static char *dbcuropts[] = {
+ "-auto_commit",
+ "-txn",
+ NULL
+ };
+ enum dbcuropts {
+ DBTRUNC_AUTO_COMMIT,
+ DBTRUNC_TXN
+ };
+ DB_TXN *txn;
+ Tcl_Obj *res;
+ u_int32_t count, flag;
+ int i, optindex, result, ret;
+ char *arg, msg[MSG_SIZE];
+
+ txn = NULL;
+ flag = 0;
+ result = TCL_OK;
+
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbcuropts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ goto out;
+ }
+ i++;
+ switch ((enum dbcuropts)optindex) {
+ case DBTRUNC_AUTO_COMMIT:
+ flag |= DB_AUTO_COMMIT;
+ break;
+ case DBTRUNC_TXN:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Truncate: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ }
+ if (result != TCL_OK)
+ break;
+ }
+ if (result != TCL_OK)
+ goto out;
+
+ _debug_check();
+ ret = dbp->truncate(dbp, txn, &count, flag);
+ if (ret != 0)
+ result = _ErrorSetup(interp, ret, "db truncate");
+
+ else {
+ res = Tcl_NewLongObj((long)count);
+ Tcl_SetObjResult(interp, res);
+ }
+out:
+ return (result);
+}
diff --git a/libdb/tcl/tcl_db_pkg.c b/libdb/tcl/tcl_db_pkg.c
new file mode 100644
index 0000000..96bf184
--- /dev/null
+++ b/libdb/tcl/tcl_db_pkg.c
@@ -0,0 +1,3117 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#if CONFIG_TEST
+#define DB_DBM_HSEARCH 1
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+#include "dbinc/tcl_db.h"
+
+/* XXX we must declare global data in just one place */
+DBTCL_GLOBAL __dbtcl_global;
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static int berkdb_Cmd __P((ClientData, Tcl_Interp *, int,
+ Tcl_Obj * CONST*));
+static int bdb_EnvOpen __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ DBTCL_INFO *, DB_ENV **));
+static int bdb_DbOpen __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ DBTCL_INFO *, DB **));
+static int bdb_DbRemove __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int bdb_DbRename __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int bdb_DbUpgrade __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int bdb_DbVerify __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int bdb_Version __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int bdb_Handles __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+
+static int tcl_bt_compare __P((DB *, const DBT *, const DBT *));
+static int tcl_compare_callback __P((DB *, const DBT *, const DBT *,
+ Tcl_Obj *, char *));
+static int tcl_dup_compare __P((DB *, const DBT *, const DBT *));
+static u_int32_t tcl_h_hash __P((DB *, const void *, u_int32_t));
+static int tcl_rep_send __P((DB_ENV *,
+ const DBT *, const DBT *, int, u_int32_t));
+
+#ifdef TEST_ALLOC
+static void * tcl_db_malloc __P((size_t));
+static void * tcl_db_realloc __P((void *, size_t));
+static void tcl_db_free __P((void *));
+#endif
+
+/*
+ * Db_tcl_Init --
+ *
+ * This is a package initialization procedure, which is called by Tcl when
+ * this package is to be added to an interpreter. The name is based on the
+ * name of the shared library, currently libdb_tcl-X.Y.so, which Tcl uses
+ * to determine the name of this function.
+ */
+int
+Db_tcl_Init(interp)
+ Tcl_Interp *interp; /* Interpreter in which the package is
+ * to be made available. */
+{
+ int code;
+
+ code = Tcl_PkgProvide(interp, "Db_tcl", "1.0");
+ if (code != TCL_OK)
+ return (code);
+
+ Tcl_CreateObjCommand(interp, "berkdb", (Tcl_ObjCmdProc *)berkdb_Cmd,
+ (ClientData)0, NULL);
+ /*
+ * Create shared global debugging variables
+ */
+ Tcl_LinkVar(interp, "__debug_on", (char *)&__debug_on, TCL_LINK_INT);
+ Tcl_LinkVar(interp, "__debug_print", (char *)&__debug_print,
+ TCL_LINK_INT);
+ Tcl_LinkVar(interp, "__debug_stop", (char *)&__debug_stop,
+ TCL_LINK_INT);
+ Tcl_LinkVar(interp, "__debug_test", (char *)&__debug_test,
+ TCL_LINK_INT);
+ LIST_INIT(&__db_infohead);
+ return (TCL_OK);
+}
+
+/*
+ * berkdb_cmd --
+ * Implements the "berkdb" command.
+ * This command supports three sub commands:
+ * berkdb version - Returns a list {major minor patch}
+ * berkdb env - Creates a new DB_ENV and returns a binding
+ * to a new command of the form dbenvX, where X is an
+ * integer starting at 0 (dbenv0, dbenv1, ...)
+ * berkdb open - Creates a new DB (optionally within
+ * the given environment. Returns a binding to a new
+ * command of the form dbX, where X is an integer
+ * starting at 0 (db0, db1, ...)
+ */
+static int
+berkdb_Cmd(notused, interp, objc, objv)
+ ClientData notused; /* Not used. */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *berkdbcmds[] = {
+#if CONFIG_TEST
+ "dbverify",
+ "handles",
+ "upgrade",
+#endif
+ "dbremove",
+ "dbrename",
+ "env",
+ "envremove",
+ "open",
+ "version",
+#if CONFIG_TEST
+ /* All below are compatibility functions */
+ "hcreate", "hsearch", "hdestroy",
+ "dbminit", "fetch", "store",
+ "delete", "firstkey", "nextkey",
+ "ndbm_open", "dbmclose",
+#endif
+ /* All below are convenience functions */
+ "rand", "random_int", "srand",
+ "debug_check",
+ NULL
+ };
+ /*
+ * All commands enums below ending in X are compatibility
+ */
+ enum berkdbcmds {
+#if CONFIG_TEST
+ BDB_DBVERIFY,
+ BDB_HANDLES,
+ BDB_UPGRADE,
+#endif
+ BDB_DBREMOVE,
+ BDB_DBRENAME,
+ BDB_ENV,
+ BDB_ENVREMOVE,
+ BDB_OPEN,
+ BDB_VERSION,
+#if CONFIG_TEST
+ BDB_HCREATEX, BDB_HSEARCHX, BDB_HDESTROYX,
+ BDB_DBMINITX, BDB_FETCHX, BDB_STOREX,
+ BDB_DELETEX, BDB_FIRSTKEYX, BDB_NEXTKEYX,
+ BDB_NDBMOPENX, BDB_DBMCLOSEX,
+#endif
+ BDB_RANDX, BDB_RAND_INTX, BDB_SRANDX,
+ BDB_DBGCKX
+ };
+ static int env_id = 0;
+ static int db_id = 0;
+
+ DB *dbp;
+#if CONFIG_TEST
+ DBM *ndbmp;
+ static int ndbm_id = 0;
+#endif
+ DBTCL_INFO *ip;
+ DB_ENV *envp;
+ Tcl_Obj *res;
+ int cmdindex, result;
+ char newname[MSG_SIZE];
+
+ COMPQUIET(notused, NULL);
+
+ Tcl_ResetResult(interp);
+ memset(newname, 0, MSG_SIZE);
+ result = TCL_OK;
+ if (objc <= 1) {
+ Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the berkdbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], berkdbcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+ res = NULL;
+ switch ((enum berkdbcmds)cmdindex) {
+#if CONFIG_TEST
+ case BDB_DBVERIFY:
+ result = bdb_DbVerify(interp, objc, objv);
+ break;
+ case BDB_HANDLES:
+ result = bdb_Handles(interp, objc, objv);
+ break;
+ case BDB_UPGRADE:
+ result = bdb_DbUpgrade(interp, objc, objv);
+ break;
+#endif
+ case BDB_VERSION:
+ _debug_check();
+ result = bdb_Version(interp, objc, objv);
+ break;
+ case BDB_ENV:
+ snprintf(newname, sizeof(newname), "env%d", env_id);
+ ip = _NewInfo(interp, NULL, newname, I_ENV);
+ if (ip != NULL) {
+ result = bdb_EnvOpen(interp, objc, objv, ip, &envp);
+ if (result == TCL_OK && envp != NULL) {
+ env_id++;
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)env_Cmd,
+ (ClientData)envp, NULL);
+ /* Use ip->i_name - newname is overwritten */
+ res =
+ Tcl_NewStringObj(newname, strlen(newname));
+ _SetInfoData(ip, envp);
+ } else
+ _DeleteInfo(ip);
+ } else {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ }
+ break;
+ case BDB_DBREMOVE:
+ result = bdb_DbRemove(interp, objc, objv);
+ break;
+ case BDB_DBRENAME:
+ result = bdb_DbRename(interp, objc, objv);
+ break;
+ case BDB_ENVREMOVE:
+ result = tcl_EnvRemove(interp, objc, objv, NULL, NULL);
+ break;
+ case BDB_OPEN:
+ snprintf(newname, sizeof(newname), "db%d", db_id);
+ ip = _NewInfo(interp, NULL, newname, I_DB);
+ if (ip != NULL) {
+ result = bdb_DbOpen(interp, objc, objv, ip, &dbp);
+ if (result == TCL_OK && dbp != NULL) {
+ db_id++;
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)db_Cmd,
+ (ClientData)dbp, NULL);
+ /* Use ip->i_name - newname is overwritten */
+ res =
+ Tcl_NewStringObj(newname, strlen(newname));
+ _SetInfoData(ip, dbp);
+ } else
+ _DeleteInfo(ip);
+ } else {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ }
+ break;
+#if CONFIG_TEST
+ case BDB_HCREATEX:
+ case BDB_HSEARCHX:
+ case BDB_HDESTROYX:
+ result = bdb_HCommand(interp, objc, objv);
+ break;
+ case BDB_DBMINITX:
+ case BDB_DBMCLOSEX:
+ case BDB_FETCHX:
+ case BDB_STOREX:
+ case BDB_DELETEX:
+ case BDB_FIRSTKEYX:
+ case BDB_NEXTKEYX:
+ result = bdb_DbmCommand(interp, objc, objv, DBTCL_DBM, NULL);
+ break;
+ case BDB_NDBMOPENX:
+ snprintf(newname, sizeof(newname), "ndbm%d", ndbm_id);
+ ip = _NewInfo(interp, NULL, newname, I_NDBM);
+ if (ip != NULL) {
+ result = bdb_NdbmOpen(interp, objc, objv, &ndbmp);
+ if (result == TCL_OK) {
+ ndbm_id++;
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)ndbm_Cmd,
+ (ClientData)ndbmp, NULL);
+ /* Use ip->i_name - newname is overwritten */
+ res =
+ Tcl_NewStringObj(newname, strlen(newname));
+ _SetInfoData(ip, ndbmp);
+ } else
+ _DeleteInfo(ip);
+ } else {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ }
+ break;
+#endif
+ case BDB_RANDX:
+ case BDB_RAND_INTX:
+ case BDB_SRANDX:
+ result = bdb_RandCommand(interp, objc, objv);
+ break;
+ case BDB_DBGCKX:
+ _debug_check();
+ res = Tcl_NewIntObj(0);
+ break;
+ }
+ /*
+ * For each different arg call different function to create
+ * new commands (or if version, get/return it).
+ */
+ if (result == TCL_OK && res != NULL)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ * bdb_EnvOpen -
+ * Implements the environment open command.
+ * There are many, many options to the open command.
+ * Here is the general flow:
+ *
+ * 1. Call db_env_create to create the env handle.
+ * 2. Parse args tracking options.
+ * 3. Make any pre-open setup calls necessary.
+ * 4. Call DB_ENV->open to open the env.
+ * 5. Return env widget handle to user.
+ */
+static int
+bdb_EnvOpen(interp, objc, objv, ip, env)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DBTCL_INFO *ip; /* Our internal info */
+ DB_ENV **env; /* Environment pointer */
+{
+ static char *envopen[] = {
+#if CONFIG_TEST
+ "-auto_commit",
+ "-cdb",
+ "-cdb_alldb",
+ "-client_timeout",
+ "-lock",
+ "-lock_conflict",
+ "-lock_detect",
+ "-lock_max",
+ "-lock_max_locks",
+ "-lock_max_lockers",
+ "-lock_max_objects",
+ "-lock_timeout",
+ "-log",
+ "-log_buffer",
+ "-log_max",
+ "-log_regionmax",
+ "-mmapsize",
+ "-nommap",
+ "-overwrite",
+ "-region_init",
+ "-rep_client",
+ "-rep_logsonly",
+ "-rep_master",
+ "-rep_transport",
+ "-server",
+ "-server_timeout",
+ "-txn_timeout",
+ "-txn_timestamp",
+ "-verbose",
+ "-wrnosync",
+#endif
+ "-cachesize",
+ "-create",
+ "-data_dir",
+ "-encryptaes",
+ "-encryptany",
+ "-errfile",
+ "-errpfx",
+ "-home",
+ "-log_dir",
+ "-mode",
+ "-private",
+ "-recover",
+ "-recover_fatal",
+ "-shm_key",
+ "-system_mem",
+ "-tmp_dir",
+ "-txn",
+ "-txn_max",
+ "-use_environ",
+ "-use_environ_root",
+ NULL
+ };
+ /*
+ * !!!
+ * These have to be in the same order as the above,
+ * which is close to but not quite alphabetical.
+ */
+ enum envopen {
+#if CONFIG_TEST
+ ENV_AUTO_COMMIT,
+ ENV_CDB,
+ ENV_CDB_ALLDB,
+ ENV_CLIENT_TO,
+ ENV_LOCK,
+ ENV_CONFLICT,
+ ENV_DETECT,
+ ENV_LOCK_MAX,
+ ENV_LOCK_MAX_LOCKS,
+ ENV_LOCK_MAX_LOCKERS,
+ ENV_LOCK_MAX_OBJECTS,
+ ENV_LOCK_TIMEOUT,
+ ENV_LOG,
+ ENV_LOG_BUFFER,
+ ENV_LOG_MAX,
+ ENV_LOG_REGIONMAX,
+ ENV_MMAPSIZE,
+ ENV_NOMMAP,
+ ENV_OVERWRITE,
+ ENV_REGION_INIT,
+ ENV_REP_CLIENT,
+ ENV_REP_LOGSONLY,
+ ENV_REP_MASTER,
+ ENV_REP_TRANSPORT,
+ ENV_SERVER,
+ ENV_SERVER_TO,
+ ENV_TXN_TIMEOUT,
+ ENV_TXN_TIME,
+ ENV_VERBOSE,
+ ENV_WRNOSYNC,
+#endif
+ ENV_CACHESIZE,
+ ENV_CREATE,
+ ENV_DATA_DIR,
+ ENV_ENCRYPT_AES,
+ ENV_ENCRYPT_ANY,
+ ENV_ERRFILE,
+ ENV_ERRPFX,
+ ENV_HOME,
+ ENV_LOG_DIR,
+ ENV_MODE,
+ ENV_PRIVATE,
+ ENV_RECOVER,
+ ENV_RECOVER_FATAL,
+ ENV_SHM_KEY,
+ ENV_SYSTEM_MEM,
+ ENV_TMP_DIR,
+ ENV_TXN,
+ ENV_TXN_MAX,
+ ENV_USE_ENVIRON,
+ ENV_USE_ENVIRON_ROOT
+ };
+ Tcl_Obj **myobjv, **myobjv1;
+ time_t timestamp;
+ u_int32_t detect, gbytes, bytes, ncaches, logbufset, logmaxset;
+ u_int32_t open_flags, rep_flags, set_flags, size, uintarg;
+ u_int8_t *conflicts;
+ int i, intarg, j, mode, myobjc, nmodes, optindex;
+ int result, ret, temp;
+ long client_to, server_to, shm;
+ char *arg, *home, *passwd, *server;
+
+ result = TCL_OK;
+ mode = 0;
+ rep_flags = set_flags = 0;
+ home = NULL;
+
+ /*
+ * XXX
+ * If/when our Tcl interface becomes thread-safe, we should enable
+ * DB_THREAD here in all cases. For now, turn it on only when testing
+ * so that we exercise MUTEX_THREAD_LOCK cases.
+ *
+ * Historically, a key stumbling block was the log_get interface,
+ * which could only do relative operations in a non-threaded
+ * environment. This is no longer an issue, thanks to log cursors,
+ * but we need to look at making sure DBTCL_INFO structs
+ * are safe to share across threads (they're not mutex-protected)
+ * before we declare the Tcl interface thread-safe. Meanwhile,
+ * there's no strong reason to enable DB_THREAD.
+ */
+ open_flags = DB_JOINENV |
+#ifdef TEST_THREAD
+ DB_THREAD;
+#else
+ 0;
+#endif
+ logmaxset = logbufset = 0;
+
+ if (objc <= 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Server code must go before the call to db_env_create.
+ */
+ server = NULL;
+ server_to = client_to = 0;
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i++], envopen, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ Tcl_ResetResult(interp);
+ continue;
+ }
+ switch ((enum envopen)optindex) {
+#if CONFIG_TEST
+ case ENV_SERVER:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-server hostname");
+ result = TCL_ERROR;
+ break;
+ }
+ server = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+ case ENV_SERVER_TO:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-server_to secs");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetLongFromObj(interp, objv[i++],
+ &server_to);
+ break;
+ case ENV_CLIENT_TO:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-client_to secs");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetLongFromObj(interp, objv[i++],
+ &client_to);
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+ if (server != NULL) {
+ ret = db_env_create(env, DB_CLIENT);
+ if (ret)
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_env_create"));
+ (*env)->set_errpfx((*env), ip->i_name);
+ (*env)->set_errcall((*env), _ErrorFunc);
+ if ((ret = (*env)->set_rpc_server((*env), NULL, server,
+ client_to, server_to, 0)) != 0) {
+ result = TCL_ERROR;
+ goto error;
+ }
+ } else {
+ /*
+ * Create the environment handle before parsing the args
+ * since we'll be modifying the environment as we parse.
+ */
+ ret = db_env_create(env, 0);
+ if (ret)
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_env_create"));
+ (*env)->set_errpfx((*env), ip->i_name);
+ (*env)->set_errcall((*env), _ErrorFunc);
+ }
+
+ /* Hang our info pointer on the env handle, so we can do callbacks. */
+ (*env)->app_private = ip;
+
+ /*
+ * Use a Tcl-local alloc and free function so that we're sure to
+ * test whether we use umalloc/ufree in the right places.
+ */
+#ifdef TEST_ALLOC
+ (*env)->set_alloc(*env, tcl_db_malloc, tcl_db_realloc, tcl_db_free);
+#endif
+
+ /*
+ * Get the command name index from the object based on the bdbcmds
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ Tcl_ResetResult(interp);
+ if (Tcl_GetIndexFromObj(interp, objv[i], envopen, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ goto error;
+ }
+ i++;
+ switch ((enum envopen)optindex) {
+#if CONFIG_TEST
+ case ENV_SERVER:
+ case ENV_SERVER_TO:
+ case ENV_CLIENT_TO:
+ /*
+ * Already handled these, skip them and their arg.
+ */
+ i++;
+ break;
+ case ENV_AUTO_COMMIT:
+ FLD_SET(set_flags, DB_AUTO_COMMIT);
+ break;
+ case ENV_CDB:
+ FLD_SET(open_flags, DB_INIT_CDB | DB_INIT_MPOOL);
+ FLD_CLR(open_flags, DB_JOINENV);
+ break;
+ case ENV_CDB_ALLDB:
+ FLD_SET(set_flags, DB_CDB_ALLDB);
+ break;
+ case ENV_LOCK:
+ FLD_SET(open_flags, DB_INIT_LOCK | DB_INIT_MPOOL);
+ FLD_CLR(open_flags, DB_JOINENV);
+ break;
+ case ENV_CONFLICT:
+ /*
+ * Get conflict list. List is:
+ * {nmodes {matrix}}
+ *
+ * Where matrix must be nmodes*nmodes big.
+ * Set up conflicts array to pass.
+ */
+ result = Tcl_ListObjGetElements(interp, objv[i],
+ &myobjc, &myobjv);
+ if (result == TCL_OK)
+ i++;
+ else
+ break;
+ if (myobjc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-lock_conflict {nmodes {matrix}}?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, myobjv[0], &nmodes);
+ if (result != TCL_OK)
+ break;
+ result = Tcl_ListObjGetElements(interp, myobjv[1],
+ &myobjc, &myobjv1);
+ if (myobjc != (nmodes * nmodes)) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-lock_conflict {nmodes {matrix}}?");
+ result = TCL_ERROR;
+ break;
+ }
+ size = sizeof(u_int8_t) * nmodes*nmodes;
+ ret = __os_malloc(*env, size, &conflicts);
+ if (ret != 0) {
+ result = TCL_ERROR;
+ break;
+ }
+ for (j = 0; j < myobjc; j++) {
+ result = Tcl_GetIntFromObj(interp, myobjv1[j],
+ &temp);
+ conflicts[j] = temp;
+ if (result != TCL_OK) {
+ __os_free(NULL, conflicts);
+ break;
+ }
+ }
+ _debug_check();
+ ret = (*env)->set_lk_conflicts(*env,
+ (u_int8_t *)conflicts, nmodes);
+ __os_free(NULL, conflicts);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_lk_conflicts");
+ break;
+ case ENV_DETECT:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-lock_detect policy?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ if (strcmp(arg, "default") == 0)
+ detect = DB_LOCK_DEFAULT;
+ else if (strcmp(arg, "expire") == 0)
+ detect = DB_LOCK_EXPIRE;
+ else if (strcmp(arg, "maxlocks") == 0)
+ detect = DB_LOCK_MAXLOCKS;
+ else if (strcmp(arg, "minlocks") == 0)
+ detect = DB_LOCK_MINLOCKS;
+ else if (strcmp(arg, "minwrites") == 0)
+ detect = DB_LOCK_MINWRITE;
+ else if (strcmp(arg, "oldest") == 0)
+ detect = DB_LOCK_OLDEST;
+ else if (strcmp(arg, "youngest") == 0)
+ detect = DB_LOCK_YOUNGEST;
+ else if (strcmp(arg, "random") == 0)
+ detect = DB_LOCK_RANDOM;
+ else {
+ Tcl_AddErrorInfo(interp,
+ "lock_detect: illegal policy");
+ result = TCL_ERROR;
+ break;
+ }
+ _debug_check();
+ ret = (*env)->set_lk_detect(*env, detect);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "lock_detect");
+ break;
+ case ENV_LOCK_MAX:
+ case ENV_LOCK_MAX_LOCKS:
+ case ENV_LOCK_MAX_LOCKERS:
+ case ENV_LOCK_MAX_OBJECTS:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-lock_max max?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ switch ((enum envopen)optindex) {
+ case ENV_LOCK_MAX:
+ ret = (*env)->set_lk_max(*env,
+ uintarg);
+ break;
+ case ENV_LOCK_MAX_LOCKS:
+ ret = (*env)->set_lk_max_locks(*env,
+ uintarg);
+ break;
+ case ENV_LOCK_MAX_LOCKERS:
+ ret = (*env)->set_lk_max_lockers(*env,
+ uintarg);
+ break;
+ case ENV_LOCK_MAX_OBJECTS:
+ ret = (*env)->set_lk_max_objects(*env,
+ uintarg);
+ break;
+ default:
+ break;
+ }
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "lock_max");
+ }
+ break;
+ case ENV_TXN_TIME:
+ case ENV_TXN_TIMEOUT:
+ case ENV_LOCK_TIMEOUT:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-txn_timestamp time?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetLongFromObj(interp, objv[i++],
+ (long *)&timestamp);
+ if (result == TCL_OK) {
+ _debug_check();
+ if (optindex == ENV_TXN_TIME)
+ ret = (*env)->
+ set_tx_timestamp(*env, &timestamp);
+ else
+ ret = (*env)->set_timeout(*env,
+ (db_timeout_t)timestamp,
+ optindex == ENV_TXN_TIMEOUT ?
+ DB_SET_TXN_TIMEOUT :
+ DB_SET_LOCK_TIMEOUT);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "txn_timestamp");
+ }
+ break;
+ case ENV_LOG:
+ FLD_SET(open_flags, DB_INIT_LOG | DB_INIT_MPOOL);
+ FLD_CLR(open_flags, DB_JOINENV);
+ break;
+ case ENV_LOG_BUFFER:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-log_buffer size?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*env)->set_lg_bsize(*env, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "log_bsize");
+ logbufset = 1;
+ if (logmaxset) {
+ _debug_check();
+ ret = (*env)->set_lg_max(*env,
+ logmaxset);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "log_max");
+ logmaxset = 0;
+ logbufset = 0;
+ }
+ }
+ break;
+ case ENV_LOG_MAX:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-log_max max?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK && logbufset) {
+ _debug_check();
+ ret = (*env)->set_lg_max(*env, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "log_max");
+ logbufset = 0;
+ } else
+ logmaxset = uintarg;
+ break;
+ case ENV_LOG_REGIONMAX:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-log_regionmax size?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*env)->set_lg_regionmax(*env, uintarg);
+ result =
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "log_regionmax");
+ }
+ break;
+ case ENV_MMAPSIZE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-mmapsize size?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*env)->set_mp_mmapsize(*env,
+ (size_t)intarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "mmapsize");
+ }
+ break;
+ case ENV_NOMMAP:
+ FLD_SET(set_flags, DB_NOMMAP);
+ break;
+ case ENV_OVERWRITE:
+ FLD_SET(set_flags, DB_OVERWRITE);
+ break;
+ case ENV_REGION_INIT:
+ _debug_check();
+ ret = (*env)->set_flags(*env, DB_REGION_INIT, 1);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "region_init");
+ break;
+ case ENV_REP_CLIENT:
+ rep_flags = DB_REP_CLIENT;
+ break;
+ case ENV_REP_LOGSONLY:
+ rep_flags = DB_REP_LOGSONLY;
+ break;
+ case ENV_REP_MASTER:
+ rep_flags = DB_REP_MASTER;
+ break;
+ case ENV_REP_TRANSPORT:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-rep_transport {envid sendproc}");
+ result = TCL_ERROR;
+ break;
+ }
+
+ /*
+ * Store the objects containing the machine ID
+ * and the procedure name. We don't need to crack
+ * the send procedure out now, but we do convert the
+ * machine ID to an int, since set_rep_transport needs
+ * it. Even so, it'll be easier later to deal with
+ * the Tcl_Obj *, so we save that, not the int.
+ *
+ * Note that we Tcl_IncrRefCount both objects
+ * independently; Tcl is free to discard the list
+ * that they're bundled into.
+ */
+ result = Tcl_ListObjGetElements(interp, objv[i++],
+ &myobjc, &myobjv);
+ if (myobjc != 2) {
+ Tcl_SetResult(interp,
+ "List must be {envid sendproc}",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ break;
+ }
+
+ /*
+ * Check that the machine ID is an int. Note that
+ * we do want to use GetIntFromObj; the machine
+ * ID is explicitly an int, not a u_int32_t.
+ */
+ ip->i_rep_eid = myobjv[0];
+ Tcl_IncrRefCount(ip->i_rep_eid);
+ result = Tcl_GetIntFromObj(interp,
+ ip->i_rep_eid, &intarg);
+ if (result != TCL_OK)
+ break;
+
+ ip->i_rep_send = myobjv[1];
+ Tcl_IncrRefCount(ip->i_rep_send);
+ _debug_check();
+ ret = (*env)->set_rep_transport(*env,
+ intarg, tcl_rep_send);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_rep_transport");
+ break;
+ case ENV_VERBOSE:
+ result = Tcl_ListObjGetElements(interp, objv[i],
+ &myobjc, &myobjv);
+ if (result == TCL_OK)
+ i++;
+ else
+ break;
+ if (myobjc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-verbose {which on|off}?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = tcl_EnvVerbose(interp, *env,
+ myobjv[0], myobjv[1]);
+ break;
+ case ENV_WRNOSYNC:
+ FLD_SET(set_flags, DB_TXN_WRITE_NOSYNC);
+ break;
+#endif
+ case ENV_TXN:
+ FLD_SET(open_flags, DB_INIT_LOCK |
+ DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN);
+ FLD_CLR(open_flags, DB_JOINENV);
+ /* Make sure we have an arg to check against! */
+ if (i < objc) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (strcmp(arg, "nosync") == 0) {
+ FLD_SET(set_flags, DB_TXN_NOSYNC);
+ i++;
+ }
+ }
+ break;
+ case ENV_CREATE:
+ FLD_SET(open_flags, DB_CREATE | DB_INIT_MPOOL);
+ FLD_CLR(open_flags, DB_JOINENV);
+ break;
+ case ENV_ENCRYPT_AES:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptaes passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*env)->set_encrypt(*env, passwd, DB_ENCRYPT_AES);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ break;
+ case ENV_ENCRYPT_ANY:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptany passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*env)->set_encrypt(*env, passwd, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ break;
+ case ENV_HOME:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-home dir?");
+ result = TCL_ERROR;
+ break;
+ }
+ home = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+ case ENV_MODE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-mode mode?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Don't need to check result here because
+ * if TCL_ERROR, the error message is already
+ * set up, and we'll bail out below. If ok,
+ * the mode is set and we go on.
+ */
+ result = Tcl_GetIntFromObj(interp, objv[i++], &mode);
+ break;
+ case ENV_PRIVATE:
+ FLD_SET(open_flags, DB_PRIVATE | DB_INIT_MPOOL);
+ FLD_CLR(open_flags, DB_JOINENV);
+ break;
+ case ENV_RECOVER:
+ FLD_SET(open_flags, DB_RECOVER);
+ break;
+ case ENV_RECOVER_FATAL:
+ FLD_SET(open_flags, DB_RECOVER_FATAL);
+ break;
+ case ENV_SYSTEM_MEM:
+ FLD_SET(open_flags, DB_SYSTEM_MEM);
+ break;
+ case ENV_USE_ENVIRON_ROOT:
+ FLD_SET(open_flags, DB_USE_ENVIRON_ROOT);
+ break;
+ case ENV_USE_ENVIRON:
+ FLD_SET(open_flags, DB_USE_ENVIRON);
+ break;
+ case ENV_CACHESIZE:
+ result = Tcl_ListObjGetElements(interp, objv[i],
+ &myobjc, &myobjv);
+ if (result == TCL_OK)
+ i++;
+ else
+ break;
+ if (myobjc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-cachesize {gbytes bytes ncaches}?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, myobjv[0], &gbytes);
+ if (result != TCL_OK)
+ break;
+ result = _GetUInt32(interp, myobjv[1], &bytes);
+ if (result != TCL_OK)
+ break;
+ result = _GetUInt32(interp, myobjv[2], &ncaches);
+ if (result != TCL_OK)
+ break;
+ _debug_check();
+ ret = (*env)->set_cachesize(*env, gbytes, bytes,
+ ncaches);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_cachesize");
+ break;
+ case ENV_SHM_KEY:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-shm_key key?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetLongFromObj(interp, objv[i++], &shm);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*env)->set_shm_key(*env, shm);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "shm_key");
+ }
+ break;
+ case ENV_TXN_MAX:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-txn_max max?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*env)->set_tx_max(*env, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "txn_max");
+ }
+ break;
+ case ENV_ERRFILE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-errfile file");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ /*
+ * If the user already set one, close it.
+ */
+ if (ip->i_err != NULL)
+ fclose(ip->i_err);
+ ip->i_err = fopen(arg, "a");
+ if (ip->i_err != NULL) {
+ _debug_check();
+ (*env)->set_errfile(*env, ip->i_err);
+ }
+ break;
+ case ENV_ERRPFX:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-errpfx prefix");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ /*
+ * If the user already set one, free it.
+ */
+ if (ip->i_errpfx != NULL)
+ __os_free(NULL, ip->i_errpfx);
+ if ((ret =
+ __os_strdup(*env, arg, &ip->i_errpfx)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "__os_strdup");
+ break;
+ }
+ if (ip->i_errpfx != NULL) {
+ _debug_check();
+ (*env)->set_errpfx(*env, ip->i_errpfx);
+ }
+ break;
+ case ENV_DATA_DIR:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-data_dir dir");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*env)->set_data_dir(*env, arg);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_data_dir");
+ break;
+ case ENV_LOG_DIR:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-log_dir dir");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*env)->set_lg_dir(*env, arg);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_lg_dir");
+ break;
+ case ENV_TMP_DIR:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-tmp_dir dir");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*env)->set_tmp_dir(*env, arg);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_tmp_dir");
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ }
+
+ /*
+ * We have to check this here. We want to set the log buffer
+ * size first, if it is specified. So if the user did so,
+ * then we took care of it above. But, if we get out here and
+ * logmaxset is non-zero, then they set the log_max without
+ * resetting the log buffer size, so we now have to do the
+ * call to set_lg_max, since we didn't do it above.
+ */
+ if (logmaxset) {
+ _debug_check();
+ ret = (*env)->set_lg_max(*env, (u_int32_t)logmaxset);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "log_max");
+ }
+
+ if (result != TCL_OK)
+ goto error;
+
+ if (set_flags) {
+ ret = (*env)->set_flags(*env, set_flags, 1);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_flags");
+ if (result == TCL_ERROR)
+ goto error;
+ /*
+ * If we are successful, clear the result so that the
+ * return from set_flags isn't part of the result.
+ */
+ Tcl_ResetResult(interp);
+ }
+ /*
+ * When we get here, we have already parsed all of our args
+ * and made all our calls to set up the environment. Everything
+ * is okay so far, no errors, if we get here.
+ *
+ * Now open the environment.
+ */
+ _debug_check();
+ ret = (*env)->open(*env, home, open_flags, mode);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env open");
+
+ if (rep_flags != 0 && result == TCL_OK) {
+ _debug_check();
+ ret = (*env)->rep_start(*env, NULL, rep_flags);
+ result = _ReturnSetup(interp,
+ ret, DB_RETOK_STD(ret), "rep_start");
+ }
+
+error: if (result == TCL_ERROR) {
+ if (ip->i_err) {
+ fclose(ip->i_err);
+ ip->i_err = NULL;
+ }
+ (void)(*env)->close(*env, 0);
+ *env = NULL;
+ }
+ return (result);
+}
+
+/*
+ * bdb_DbOpen --
+ * Implements the "db_create/db_open" command.
+ * There are many, many options to the open command.
+ * Here is the general flow:
+ *
+ * 0. Preparse args to determine if we have -env.
+ * 1. Call db_create to create the db handle.
+ * 2. Parse args tracking options.
+ * 3. Make any pre-open setup calls necessary.
+ * 4. Call DB->open to open the database.
+ * 5. Return db widget handle to user.
+ */
+static int
+bdb_DbOpen(interp, objc, objv, ip, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DBTCL_INFO *ip; /* Our internal info */
+ DB **dbp; /* DB handle */
+{
+ static char *bdbenvopen[] = {
+ "-env", NULL
+ };
+ enum bdbenvopen {
+ TCL_DB_ENV0
+ };
+ static char *bdbopen[] = {
+#if CONFIG_TEST
+ "-btcompare",
+ "-dirty",
+ "-dupcompare",
+ "-hashproc",
+ "-lorder",
+ "-minkey",
+ "-nommap",
+ "-revsplitoff",
+ "-test",
+#endif
+ "-auto_commit",
+ "-btree",
+ "-cachesize",
+ "-chksum",
+ "-create",
+ "-delim",
+ "-dup",
+ "-dupsort",
+ "-encrypt",
+ "-encryptaes",
+ "-encryptany",
+ "-env",
+ "-errfile",
+ "-errpfx",
+ "-excl",
+ "-extent",
+ "-ffactor",
+ "-hash",
+ "-len",
+ "-mode",
+ "-nelem",
+ "-pad",
+ "-pagesize",
+ "-queue",
+ "-rdonly",
+ "-recno",
+ "-recnum",
+ "-renumber",
+ "-snapshot",
+ "-source",
+ "-truncate",
+ "-txn",
+ "-unknown",
+ "--",
+ NULL
+ };
+ enum bdbopen {
+#if CONFIG_TEST
+ TCL_DB_BTCOMPARE,
+ TCL_DB_DIRTY,
+ TCL_DB_DUPCOMPARE,
+ TCL_DB_HASHPROC,
+ TCL_DB_LORDER,
+ TCL_DB_MINKEY,
+ TCL_DB_NOMMAP,
+ TCL_DB_REVSPLIT,
+ TCL_DB_TEST,
+#endif
+ TCL_DB_AUTO_COMMIT,
+ TCL_DB_BTREE,
+ TCL_DB_CACHESIZE,
+ TCL_DB_CHKSUM,
+ TCL_DB_CREATE,
+ TCL_DB_DELIM,
+ TCL_DB_DUP,
+ TCL_DB_DUPSORT,
+ TCL_DB_ENCRYPT,
+ TCL_DB_ENCRYPT_AES,
+ TCL_DB_ENCRYPT_ANY,
+ TCL_DB_ENV,
+ TCL_DB_ERRFILE,
+ TCL_DB_ERRPFX,
+ TCL_DB_EXCL,
+ TCL_DB_EXTENT,
+ TCL_DB_FFACTOR,
+ TCL_DB_HASH,
+ TCL_DB_LEN,
+ TCL_DB_MODE,
+ TCL_DB_NELEM,
+ TCL_DB_PAD,
+ TCL_DB_PAGESIZE,
+ TCL_DB_QUEUE,
+ TCL_DB_RDONLY,
+ TCL_DB_RECNO,
+ TCL_DB_RECNUM,
+ TCL_DB_RENUMBER,
+ TCL_DB_SNAPSHOT,
+ TCL_DB_SOURCE,
+ TCL_DB_TRUNCATE,
+ TCL_DB_TXN,
+ TCL_DB_UNKNOWN,
+ TCL_DB_ENDARG
+ };
+
+ DBTCL_INFO *envip, *errip;
+ DB_TXN *txn;
+ DBTYPE type;
+ DB_ENV *envp;
+ Tcl_Obj **myobjv;
+ u_int32_t gbytes, bytes, ncaches, open_flags, uintarg;
+ int endarg, i, intarg, mode, myobjc;
+ int optindex, result, ret, set_err, set_flags, set_pfx, subdblen;
+ u_char *subdbtmp;
+ char *arg, *db, *passwd, *subdb, msg[MSG_SIZE];
+
+ type = DB_UNKNOWN;
+ endarg = mode = set_err = set_flags = set_pfx = 0;
+ result = TCL_OK;
+ subdbtmp = NULL;
+ db = subdb = NULL;
+
+ /*
+ * XXX
+ * If/when our Tcl interface becomes thread-safe, we should enable
+ * DB_THREAD here in all cases. See comment in bdb_EnvOpen().
+ * For now, just turn it on when testing so that we exercise
+ * MUTEX_THREAD_LOCK cases.
+ */
+ open_flags =
+#ifdef TEST_THREAD
+ DB_THREAD;
+#else
+ 0;
+#endif
+ envp = NULL;
+ txn = NULL;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * We must first parse for the environment flag, since that
+ * is needed for db_create. Then create the db handle.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i++], bdbenvopen,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ /*
+ * Reset the result so we don't get
+ * an errant error message if there is another error.
+ */
+ Tcl_ResetResult(interp);
+ continue;
+ }
+ switch ((enum bdbenvopen)optindex) {
+ case TCL_DB_ENV0:
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ envp = NAME_TO_ENV(arg);
+ if (envp == NULL) {
+ Tcl_SetResult(interp,
+ "db open: illegal environment", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ }
+ break;
+ }
+
+ /*
+ * Create the db handle before parsing the args
+ * since we'll be modifying the database options as we parse.
+ */
+ ret = db_create(dbp, envp, 0);
+ if (ret)
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_create"));
+
+ /* Hang our info pointer on the DB handle, so we can do callbacks. */
+ (*dbp)->api_internal = ip;
+
+ /*
+ * XXX Remove restriction when err stuff is not tied to env.
+ *
+ * The DB->set_err* functions actually overwrite in the
+ * environment. So, if we are explicitly using an env,
+ * don't overwrite what we have already set up. If we are
+ * not using one, then we set up since we get a private
+ * default env.
+ */
+ /* XXX - remove this conditional if/when err is not tied to env */
+ if (envp == NULL) {
+ (*dbp)->set_errpfx((*dbp), ip->i_name);
+ (*dbp)->set_errcall((*dbp), _ErrorFunc);
+ }
+ envip = _PtrToInfo(envp); /* XXX */
+ /*
+ * If we are using an env, we keep track of err info in the env's ip.
+ * Otherwise use the DB's ip.
+ */
+ if (envip)
+ errip = envip;
+ else
+ errip = ip;
+ /*
+ * Get the option name index from the object based on the args
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ Tcl_ResetResult(interp);
+ if (Tcl_GetIndexFromObj(interp, objv[i], bdbopen, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum bdbopen)optindex) {
+#if CONFIG_TEST
+ case TCL_DB_BTCOMPARE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-btcompare compareproc");
+ result = TCL_ERROR;
+ break;
+ }
+
+ /*
+ * Store the object containing the procedure name.
+ * We don't need to crack it out now--we'll want
+ * to bundle it up to pass into Tcl_EvalObjv anyway.
+ * Tcl's object refcounting will--I hope--take care
+ * of the memory management here.
+ */
+ ip->i_btcompare = objv[i++];
+ Tcl_IncrRefCount(ip->i_btcompare);
+ _debug_check();
+ ret = (*dbp)->set_bt_compare(*dbp, tcl_bt_compare);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_bt_compare");
+ break;
+ case TCL_DB_DIRTY:
+ open_flags |= DB_DIRTY_READ;
+ break;
+ case TCL_DB_DUPCOMPARE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-dupcompare compareproc");
+ result = TCL_ERROR;
+ break;
+ }
+
+ /*
+ * Store the object containing the procedure name.
+ * See TCL_DB_BTCOMPARE.
+ */
+ ip->i_dupcompare = objv[i++];
+ Tcl_IncrRefCount(ip->i_dupcompare);
+ _debug_check();
+ ret = (*dbp)->set_dup_compare(*dbp, tcl_dup_compare);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_dup_compare");
+ break;
+ case TCL_DB_HASHPROC:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-hashproc hashproc");
+ result = TCL_ERROR;
+ break;
+ }
+
+ /*
+ * Store the object containing the procedure name.
+ * See TCL_DB_BTCOMPARE.
+ */
+ ip->i_hashproc = objv[i++];
+ Tcl_IncrRefCount(ip->i_hashproc);
+ _debug_check();
+ ret = (*dbp)->set_h_hash(*dbp, tcl_h_hash);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_h_hash");
+ break;
+ case TCL_DB_LORDER:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-lorder 1234|4321");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_lorder(*dbp, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set_lorder");
+ }
+ break;
+ case TCL_DB_MINKEY:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-minkey minkey");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_bt_minkey(*dbp, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set_bt_minkey");
+ }
+ break;
+ case TCL_DB_NOMMAP:
+ open_flags |= DB_NOMMAP;
+ break;
+ case TCL_DB_REVSPLIT:
+ set_flags |= DB_REVSPLITOFF;
+ break;
+ case TCL_DB_TEST:
+ (*dbp)->set_h_hash(*dbp, __ham_test);
+ break;
+#endif
+ case TCL_DB_AUTO_COMMIT:
+ open_flags |= DB_AUTO_COMMIT;
+ break;
+ case TCL_DB_ENV:
+ /*
+ * Already parsed this, skip it and the env pointer.
+ */
+ i++;
+ continue;
+ case TCL_DB_TXN:
+ if (i > (objc - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Put: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ case TCL_DB_BTREE:
+ if (type != DB_UNKNOWN) {
+ Tcl_SetResult(interp,
+ "Too many DB types specified", TCL_STATIC);
+ result = TCL_ERROR;
+ goto error;
+ }
+ type = DB_BTREE;
+ break;
+ case TCL_DB_HASH:
+ if (type != DB_UNKNOWN) {
+ Tcl_SetResult(interp,
+ "Too many DB types specified", TCL_STATIC);
+ result = TCL_ERROR;
+ goto error;
+ }
+ type = DB_HASH;
+ break;
+ case TCL_DB_RECNO:
+ if (type != DB_UNKNOWN) {
+ Tcl_SetResult(interp,
+ "Too many DB types specified", TCL_STATIC);
+ result = TCL_ERROR;
+ goto error;
+ }
+ type = DB_RECNO;
+ break;
+ case TCL_DB_QUEUE:
+ if (type != DB_UNKNOWN) {
+ Tcl_SetResult(interp,
+ "Too many DB types specified", TCL_STATIC);
+ result = TCL_ERROR;
+ goto error;
+ }
+ type = DB_QUEUE;
+ break;
+ case TCL_DB_UNKNOWN:
+ if (type != DB_UNKNOWN) {
+ Tcl_SetResult(interp,
+ "Too many DB types specified", TCL_STATIC);
+ result = TCL_ERROR;
+ goto error;
+ }
+ break;
+ case TCL_DB_CREATE:
+ open_flags |= DB_CREATE;
+ break;
+ case TCL_DB_EXCL:
+ open_flags |= DB_EXCL;
+ break;
+ case TCL_DB_RDONLY:
+ open_flags |= DB_RDONLY;
+ break;
+ case TCL_DB_TRUNCATE:
+ open_flags |= DB_TRUNCATE;
+ break;
+ case TCL_DB_MODE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-mode mode?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Don't need to check result here because
+ * if TCL_ERROR, the error message is already
+ * set up, and we'll bail out below. If ok,
+ * the mode is set and we go on.
+ */
+ result = Tcl_GetIntFromObj(interp, objv[i++], &mode);
+ break;
+ case TCL_DB_DUP:
+ set_flags |= DB_DUP;
+ break;
+ case TCL_DB_DUPSORT:
+ set_flags |= DB_DUPSORT;
+ break;
+ case TCL_DB_RECNUM:
+ set_flags |= DB_RECNUM;
+ break;
+ case TCL_DB_RENUMBER:
+ set_flags |= DB_RENUMBER;
+ break;
+ case TCL_DB_SNAPSHOT:
+ set_flags |= DB_SNAPSHOT;
+ break;
+ case TCL_DB_CHKSUM:
+ set_flags |= DB_CHKSUM_SHA1;
+ break;
+ case TCL_DB_ENCRYPT:
+ set_flags |= DB_ENCRYPT;
+ break;
+ case TCL_DB_ENCRYPT_AES:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptaes passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*dbp)->set_encrypt(*dbp, passwd, DB_ENCRYPT_AES);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ break;
+ case TCL_DB_ENCRYPT_ANY:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptany passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*dbp)->set_encrypt(*dbp, passwd, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ break;
+ case TCL_DB_FFACTOR:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-ffactor density");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_h_ffactor(*dbp, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set_h_ffactor");
+ }
+ break;
+ case TCL_DB_NELEM:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-nelem nelem");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_h_nelem(*dbp, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set_h_nelem");
+ }
+ break;
+ case TCL_DB_DELIM:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-delim delim");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_re_delim(*dbp, intarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set_re_delim");
+ }
+ break;
+ case TCL_DB_LEN:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-len length");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_re_len(*dbp, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set_re_len");
+ }
+ break;
+ case TCL_DB_PAD:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-pad pad");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_re_pad(*dbp, intarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set_re_pad");
+ }
+ break;
+ case TCL_DB_SOURCE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-source file");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*dbp)->set_re_source(*dbp, arg);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_re_source");
+ break;
+ case TCL_DB_EXTENT:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-extent size");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_q_extentsize(*dbp, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set_q_extentsize");
+ }
+ break;
+ case TCL_DB_CACHESIZE:
+ result = Tcl_ListObjGetElements(interp, objv[i++],
+ &myobjc, &myobjv);
+ if (result != TCL_OK)
+ break;
+ if (myobjc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-cachesize {gbytes bytes ncaches}?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, myobjv[0], &gbytes);
+ if (result != TCL_OK)
+ break;
+ result = _GetUInt32(interp, myobjv[1], &bytes);
+ if (result != TCL_OK)
+ break;
+ result = _GetUInt32(interp, myobjv[2], &ncaches);
+ if (result != TCL_OK)
+ break;
+ _debug_check();
+ ret = (*dbp)->set_cachesize(*dbp, gbytes, bytes,
+ ncaches);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set_cachesize");
+ break;
+ case TCL_DB_PAGESIZE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-pagesize size?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_pagesize(*dbp,
+ (size_t)intarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set pagesize");
+ }
+ break;
+ case TCL_DB_ERRFILE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-errfile file");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ /*
+ * If the user already set one, close it.
+ */
+ if (errip->i_err != NULL)
+ fclose(errip->i_err);
+ errip->i_err = fopen(arg, "a");
+ if (errip->i_err != NULL) {
+ _debug_check();
+ (*dbp)->set_errfile(*dbp, errip->i_err);
+ set_err = 1;
+ }
+ break;
+ case TCL_DB_ERRPFX:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-errpfx prefix");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ /*
+ * If the user already set one, free it.
+ */
+ if (errip->i_errpfx != NULL)
+ __os_free(NULL, errip->i_errpfx);
+ if ((ret = __os_strdup((*dbp)->dbenv,
+ arg, &errip->i_errpfx)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "__os_strdup");
+ break;
+ }
+ if (errip->i_errpfx != NULL) {
+ _debug_check();
+ (*dbp)->set_errpfx(*dbp, errip->i_errpfx);
+ set_pfx = 1;
+ }
+ break;
+ case TCL_DB_ENDARG:
+ endarg = 1;
+ break;
+ } /* switch */
+
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+
+ /*
+ * Any args we have left, (better be 0, 1 or 2 left) are
+ * file names. If we have 0, then an in-memory db. If
+ * there is 1, a db name, if 2 a db and subdb name.
+ */
+ if (i != objc) {
+ /*
+ * Dbs must be NULL terminated file names, but subdbs can
+ * be anything. Use Strings for the db name and byte
+ * arrays for the subdb.
+ */
+ db = Tcl_GetStringFromObj(objv[i++], NULL);
+ if (i != objc) {
+ subdbtmp =
+ Tcl_GetByteArrayFromObj(objv[i++], &subdblen);
+ if ((ret = __os_malloc(envp,
+ subdblen + 1, &subdb)) != 0) {
+ Tcl_SetResult(interp, db_strerror(ret),
+ TCL_STATIC);
+ return (0);
+ }
+ memcpy(subdb, subdbtmp, subdblen);
+ subdb[subdblen] = '\0';
+ }
+ }
+ if (set_flags) {
+ ret = (*dbp)->set_flags(*dbp, set_flags);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_flags");
+ if (result == TCL_ERROR)
+ goto error;
+ /*
+ * If we are successful, clear the result so that the
+ * return from set_flags isn't part of the result.
+ */
+ Tcl_ResetResult(interp);
+ }
+
+ /*
+ * When we get here, we have already parsed all of our args and made
+ * all our calls to set up the database. Everything is okay so far,
+ * no errors, if we get here.
+ */
+ _debug_check();
+
+ /* Open the database. */
+ ret = (*dbp)->open(*dbp, txn, db, subdb, type, open_flags, mode);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db open");
+
+error:
+ if (subdb)
+ __os_free(envp, subdb);
+ if (result == TCL_ERROR) {
+ (void)(*dbp)->close(*dbp, 0);
+ /*
+ * If we opened and set up the error file in the environment
+ * on this open, but we failed for some other reason, clean
+ * up and close the file.
+ *
+ * XXX when err stuff isn't tied to env, change to use ip,
+ * instead of envip. Also, set_err is irrelevant when that
+ * happens. It will just read:
+ * if (ip->i_err)
+ * fclose(ip->i_err);
+ */
+ if (set_err && errip && errip->i_err != NULL) {
+ fclose(errip->i_err);
+ errip->i_err = NULL;
+ }
+ if (set_pfx && errip && errip->i_errpfx != NULL) {
+ __os_free(envp, errip->i_errpfx);
+ errip->i_errpfx = NULL;
+ }
+ *dbp = NULL;
+ }
+ return (result);
+}
+
+/*
+ * bdb_DbRemove --
+ * Implements the DB_ENV->remove and DB->remove command.
+ */
+static int
+bdb_DbRemove(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *bdbrem[] = {
+ "-auto_commit",
+ "-encrypt",
+ "-encryptaes",
+ "-encryptany",
+ "-env",
+ "-txn",
+ "--",
+ NULL
+ };
+ enum bdbrem {
+ TCL_DBREM_AUTOCOMMIT,
+ TCL_DBREM_ENCRYPT,
+ TCL_DBREM_ENCRYPT_AES,
+ TCL_DBREM_ENCRYPT_ANY,
+ TCL_DBREM_ENV,
+ TCL_DBREM_TXN,
+ TCL_DBREM_ENDARG
+ };
+ DB *dbp;
+ DB_ENV *envp;
+ DB_TXN *txn;
+ int endarg, i, optindex, result, ret, subdblen;
+ u_int32_t enc_flag, iflags, set_flags;
+ u_char *subdbtmp;
+ char *arg, *db, msg[MSG_SIZE], *passwd, *subdb;
+
+ db = subdb = NULL;
+ dbp = NULL;
+ endarg = 0;
+ envp = NULL;
+ iflags = enc_flag = set_flags = 0;
+ passwd = NULL;
+ result = TCL_OK;
+ subdbtmp = NULL;
+ txn = NULL;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename ?database?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * We must first parse for the environment flag, since that
+ * is needed for db_create. Then create the db handle.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], bdbrem,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum bdbrem)optindex) {
+ case TCL_DBREM_AUTOCOMMIT:
+ iflags |= DB_AUTO_COMMIT;
+ _debug_check();
+ break;
+ case TCL_DBREM_ENCRYPT:
+ set_flags |= DB_ENCRYPT;
+ _debug_check();
+ break;
+ case TCL_DBREM_ENCRYPT_AES:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptaes passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = DB_ENCRYPT_AES;
+ break;
+ case TCL_DBREM_ENCRYPT_ANY:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptany passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = 0;
+ break;
+ case TCL_DBREM_ENV:
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ envp = NAME_TO_ENV(arg);
+ if (envp == NULL) {
+ Tcl_SetResult(interp,
+ "db remove: illegal environment",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ break;
+ case TCL_DBREM_ENDARG:
+ endarg = 1;
+ break;
+ case TCL_DBREM_TXN:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Put: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ /*
+ * Any args we have left, (better be 1 or 2 left) are
+ * file names. If there is 1, a db name, if 2 a db and subdb name.
+ */
+ if ((i != (objc - 1)) || (i != (objc - 2))) {
+ /*
+ * Dbs must be NULL terminated file names, but subdbs can
+ * be anything. Use Strings for the db name and byte
+ * arrays for the subdb.
+ */
+ db = Tcl_GetStringFromObj(objv[i++], NULL);
+ if (i != objc) {
+ subdbtmp =
+ Tcl_GetByteArrayFromObj(objv[i++], &subdblen);
+ if ((ret = __os_malloc(envp, subdblen + 1,
+ &subdb)) != 0) { Tcl_SetResult(interp,
+ db_strerror(ret), TCL_STATIC);
+ return (0);
+ }
+ memcpy(subdb, subdbtmp, subdblen);
+ subdb[subdblen] = '\0';
+ }
+ } else {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename ?database?");
+ result = TCL_ERROR;
+ goto error;
+ }
+ if (envp == NULL) {
+ ret = db_create(&dbp, envp, 0);
+ if (ret) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_create");
+ goto error;
+ }
+
+ if (passwd != NULL) {
+ ret = dbp->set_encrypt(dbp, passwd, enc_flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ }
+ if (set_flags != 0) {
+ ret = dbp->set_flags(dbp, set_flags);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_flags");
+ }
+ }
+
+ /*
+ * No matter what, we NULL out dbp after this call.
+ */
+ _debug_check();
+ if (dbp == NULL)
+ ret = envp->dbremove(envp, txn, db, subdb, iflags);
+ else
+ ret = dbp->remove(dbp, db, subdb, 0);
+
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db remove");
+ dbp = NULL;
+error:
+ if (subdb)
+ __os_free(envp, subdb);
+ if (result == TCL_ERROR && dbp != NULL)
+ (void)dbp->close(dbp, 0);
+ return (result);
+}
+
+/*
+ * bdb_DbRename --
+ * Implements the DBENV->dbrename and DB->rename commands.
+ */
+static int
+bdb_DbRename(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *bdbmv[] = {
+ "-auto_commit",
+ "-encrypt",
+ "-encryptaes",
+ "-encryptany",
+ "-env",
+ "-txn",
+ "--",
+ NULL
+ };
+ enum bdbmv {
+ TCL_DBMV_AUTOCOMMIT,
+ TCL_DBMV_ENCRYPT,
+ TCL_DBMV_ENCRYPT_AES,
+ TCL_DBMV_ENCRYPT_ANY,
+ TCL_DBMV_ENV,
+ TCL_DBMV_TXN,
+ TCL_DBMV_ENDARG
+ };
+ DB *dbp;
+ DB_ENV *envp;
+ DB_TXN *txn;
+ u_int32_t enc_flag, iflags, set_flags;
+ int endarg, i, newlen, optindex, result, ret, subdblen;
+ u_char *subdbtmp;
+ char *arg, *db, msg[MSG_SIZE], *newname, *passwd, *subdb;
+
+ db = newname = subdb = NULL;
+ dbp = NULL;
+ endarg = 0;
+ envp = NULL;
+ iflags = enc_flag = set_flags = 0;
+ passwd = NULL;
+ result = TCL_OK;
+ subdbtmp = NULL;
+ txn = NULL;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp,
+ 3, objv, "?args? filename ?database? ?newname?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * We must first parse for the environment flag, since that
+ * is needed for db_create. Then create the db handle.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], bdbmv,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum bdbmv)optindex) {
+ case TCL_DBMV_AUTOCOMMIT:
+ iflags |= DB_AUTO_COMMIT;
+ _debug_check();
+ break;
+ case TCL_DBMV_ENCRYPT:
+ set_flags |= DB_ENCRYPT;
+ _debug_check();
+ break;
+ case TCL_DBMV_ENCRYPT_AES:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptaes passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = DB_ENCRYPT_AES;
+ break;
+ case TCL_DBMV_ENCRYPT_ANY:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptany passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = 0;
+ break;
+ case TCL_DBMV_ENV:
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ envp = NAME_TO_ENV(arg);
+ if (envp == NULL) {
+ Tcl_SetResult(interp,
+ "db rename: illegal environment",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ break;
+ case TCL_DBMV_ENDARG:
+ endarg = 1;
+ break;
+ case TCL_DBMV_TXN:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Put: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ /*
+ * Any args we have left, (better be 2 or 3 left) are
+ * file names. If there is 2, a file name, if 3 a file and db name.
+ */
+ if ((i != (objc - 2)) || (i != (objc - 3))) {
+ /*
+ * Dbs must be NULL terminated file names, but subdbs can
+ * be anything. Use Strings for the db name and byte
+ * arrays for the subdb.
+ */
+ db = Tcl_GetStringFromObj(objv[i++], NULL);
+ if (i == objc - 2) {
+ subdbtmp =
+ Tcl_GetByteArrayFromObj(objv[i++], &subdblen);
+ if ((ret = __os_malloc(envp, subdblen + 1,
+ &subdb)) != 0) {
+ Tcl_SetResult(interp,
+ db_strerror(ret), TCL_STATIC);
+ return (0);
+ }
+ memcpy(subdb, subdbtmp, subdblen);
+ subdb[subdblen] = '\0';
+ }
+ subdbtmp =
+ Tcl_GetByteArrayFromObj(objv[i++], &newlen);
+ if ((ret = __os_malloc(envp, newlen + 1,
+ &newname)) != 0) {
+ Tcl_SetResult(interp,
+ db_strerror(ret), TCL_STATIC);
+ return (0);
+ }
+ memcpy(newname, subdbtmp, newlen);
+ newname[newlen] = '\0';
+ } else {
+ Tcl_WrongNumArgs(
+ interp, 3, objv, "?args? filename ?database? ?newname?");
+ result = TCL_ERROR;
+ goto error;
+ }
+ if (envp == NULL) {
+ ret = db_create(&dbp, envp, 0);
+ if (ret) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_create");
+ goto error;
+ }
+ if (passwd != NULL) {
+ ret = dbp->set_encrypt(dbp, passwd, enc_flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ }
+ if (set_flags != 0) {
+ ret = dbp->set_flags(dbp, set_flags);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_flags");
+ }
+ }
+
+ /*
+ * No matter what, we NULL out dbp after this call.
+ */
+ if (dbp == NULL)
+ ret = envp->dbrename(envp, txn, db, subdb, newname, iflags);
+ else
+ ret = dbp->rename(dbp, db, subdb, newname, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db rename");
+ dbp = NULL;
+error:
+ if (subdb)
+ __os_free(envp, subdb);
+ if (newname)
+ __os_free(envp, newname);
+ if (result == TCL_ERROR && dbp != NULL)
+ (void)dbp->close(dbp, 0);
+ return (result);
+}
+
+#if CONFIG_TEST
+/*
+ * bdb_DbVerify --
+ * Implements the DB->verify command.
+ */
+static int
+bdb_DbVerify(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *bdbverify[] = {
+ "-encrypt",
+ "-encryptaes",
+ "-encryptany",
+ "-env",
+ "-errfile",
+ "-errpfx",
+ "--",
+ NULL
+ };
+ enum bdbvrfy {
+ TCL_DBVRFY_ENCRYPT,
+ TCL_DBVRFY_ENCRYPT_AES,
+ TCL_DBVRFY_ENCRYPT_ANY,
+ TCL_DBVRFY_ENV,
+ TCL_DBVRFY_ERRFILE,
+ TCL_DBVRFY_ERRPFX,
+ TCL_DBVRFY_ENDARG
+ };
+ DB_ENV *envp;
+ DB *dbp;
+ FILE *errf;
+ u_int32_t enc_flag, flags, set_flags;
+ int endarg, i, optindex, result, ret;
+ char *arg, *db, *errpfx, *passwd;
+
+ envp = NULL;
+ dbp = NULL;
+ passwd = NULL;
+ result = TCL_OK;
+ db = errpfx = NULL;
+ errf = NULL;
+ flags = endarg = 0;
+ enc_flag = set_flags = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * We must first parse for the environment flag, since that
+ * is needed for db_create. Then create the db handle.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], bdbverify,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum bdbvrfy)optindex) {
+ case TCL_DBVRFY_ENCRYPT:
+ set_flags |= DB_ENCRYPT;
+ _debug_check();
+ break;
+ case TCL_DBVRFY_ENCRYPT_AES:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptaes passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = DB_ENCRYPT_AES;
+ break;
+ case TCL_DBVRFY_ENCRYPT_ANY:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptany passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = 0;
+ break;
+ case TCL_DBVRFY_ENV:
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ envp = NAME_TO_ENV(arg);
+ if (envp == NULL) {
+ Tcl_SetResult(interp,
+ "db verify: illegal environment",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ break;
+ }
+ break;
+ case TCL_DBVRFY_ERRFILE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-errfile file");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ /*
+ * If the user already set one, close it.
+ */
+ if (errf != NULL)
+ fclose(errf);
+ errf = fopen(arg, "a");
+ break;
+ case TCL_DBVRFY_ERRPFX:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-errpfx prefix");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ /*
+ * If the user already set one, free it.
+ */
+ if (errpfx != NULL)
+ __os_free(envp, errpfx);
+ if ((ret = __os_strdup(NULL, arg, &errpfx)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "__os_strdup");
+ break;
+ }
+ break;
+ case TCL_DBVRFY_ENDARG:
+ endarg = 1;
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ /*
+ * The remaining arg is the db filename.
+ */
+ if (i == (objc - 1))
+ db = Tcl_GetStringFromObj(objv[i++], NULL);
+ else {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename");
+ result = TCL_ERROR;
+ goto error;
+ }
+ ret = db_create(&dbp, envp, 0);
+ if (ret) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_create");
+ goto error;
+ }
+
+ if (passwd != NULL) {
+ ret = dbp->set_encrypt(dbp, passwd, enc_flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ }
+
+ if (set_flags != 0) {
+ ret = dbp->set_flags(dbp, set_flags);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_flags");
+ }
+ if (errf != NULL)
+ dbp->set_errfile(dbp, errf);
+ if (errpfx != NULL)
+ dbp->set_errpfx(dbp, errpfx);
+
+ ret = dbp->verify(dbp, db, NULL, NULL, flags);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db verify");
+error:
+ if (errf != NULL)
+ fclose(errf);
+ if (errpfx != NULL)
+ __os_free(envp, errpfx);
+ if (dbp)
+ (void)dbp->close(dbp, 0);
+ return (result);
+}
+#endif
+
+/*
+ * bdb_Version --
+ * Implements the version command.
+ */
+static int
+bdb_Version(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *bdbver[] = {
+ "-string", NULL
+ };
+ enum bdbver {
+ TCL_VERSTRING
+ };
+ int i, optindex, maj, min, patch, result, string, verobjc;
+ char *arg, *v;
+ Tcl_Obj *res, *verobjv[3];
+
+ result = TCL_OK;
+ string = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * We must first parse for the environment flag, since that
+ * is needed for db_create. Then create the db handle.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], bdbver,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum bdbver)optindex) {
+ case TCL_VERSTRING:
+ string = 1;
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ }
+ if (result != TCL_OK)
+ goto error;
+
+ v = db_version(&maj, &min, &patch);
+ if (string)
+ res = Tcl_NewStringObj(v, strlen(v));
+ else {
+ verobjc = 3;
+ verobjv[0] = Tcl_NewIntObj(maj);
+ verobjv[1] = Tcl_NewIntObj(min);
+ verobjv[2] = Tcl_NewIntObj(patch);
+ res = Tcl_NewListObj(verobjc, verobjv);
+ }
+ Tcl_SetObjResult(interp, res);
+error:
+ return (result);
+}
+
+#if CONFIG_TEST
+/*
+ * bdb_Handles --
+ * Implements the handles command.
+ */
+static int
+bdb_Handles(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ DBTCL_INFO *p;
+ Tcl_Obj *res, *handle;
+
+ /*
+ * No args. Error if we have some
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "");
+ return (TCL_ERROR);
+ }
+ res = Tcl_NewListObj(0, NULL);
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL;
+ p = LIST_NEXT(p, entries)) {
+ handle = Tcl_NewStringObj(p->i_name, strlen(p->i_name));
+ if (Tcl_ListObjAppendElement(interp, res, handle) != TCL_OK)
+ return (TCL_ERROR);
+ }
+ Tcl_SetObjResult(interp, res);
+ return (TCL_OK);
+}
+#endif
+
+#if CONFIG_TEST
+/*
+ * bdb_DbUpgrade --
+ * Implements the DB->upgrade command.
+ */
+static int
+bdb_DbUpgrade(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *bdbupg[] = {
+ "-dupsort", "-env", "--", NULL
+ };
+ enum bdbupg {
+ TCL_DBUPG_DUPSORT,
+ TCL_DBUPG_ENV,
+ TCL_DBUPG_ENDARG
+ };
+ DB_ENV *envp;
+ DB *dbp;
+ u_int32_t flags;
+ int endarg, i, optindex, result, ret;
+ char *arg, *db;
+
+ envp = NULL;
+ dbp = NULL;
+ result = TCL_OK;
+ db = NULL;
+ flags = endarg = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename");
+ return (TCL_ERROR);
+ }
+
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], bdbupg,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum bdbupg)optindex) {
+ case TCL_DBUPG_DUPSORT:
+ flags |= DB_DUPSORT;
+ break;
+ case TCL_DBUPG_ENV:
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ envp = NAME_TO_ENV(arg);
+ if (envp == NULL) {
+ Tcl_SetResult(interp,
+ "db upgrade: illegal environment",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ break;
+ case TCL_DBUPG_ENDARG:
+ endarg = 1;
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ /*
+ * The remaining arg is the db filename.
+ */
+ if (i == (objc - 1))
+ db = Tcl_GetStringFromObj(objv[i++], NULL);
+ else {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename");
+ result = TCL_ERROR;
+ goto error;
+ }
+ ret = db_create(&dbp, envp, 0);
+ if (ret) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_create");
+ goto error;
+ }
+
+ ret = dbp->upgrade(dbp, db, flags);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db upgrade");
+error:
+ if (dbp)
+ (void)dbp->close(dbp, 0);
+ return (result);
+}
+#endif
+
+/*
+ * tcl_bt_compare and tcl_dup_compare --
+ * These two are basically identical internally, so may as well
+ * share code. The only differences are the name used in error
+ * reporting and the Tcl_Obj representing their respective procs.
+ */
+static int
+tcl_bt_compare(dbp, dbta, dbtb)
+ DB *dbp;
+ const DBT *dbta, *dbtb;
+{
+ return (tcl_compare_callback(dbp, dbta, dbtb,
+ ((DBTCL_INFO *)dbp->api_internal)->i_btcompare, "bt_compare"));
+}
+
+static int
+tcl_dup_compare(dbp, dbta, dbtb)
+ DB *dbp;
+ const DBT *dbta, *dbtb;
+{
+ return (tcl_compare_callback(dbp, dbta, dbtb,
+ ((DBTCL_INFO *)dbp->api_internal)->i_dupcompare, "dup_compare"));
+}
+
+/*
+ * tcl_compare_callback --
+ * Tcl callback for set_bt_compare and set_dup_compare. What this
+ * function does is stuff the data fields of the two DBTs into Tcl ByteArray
+ * objects, then call the procedure stored in ip->i_btcompare on the two
+ * objects. Then we return that procedure's result as the comparison.
+ */
+static int
+tcl_compare_callback(dbp, dbta, dbtb, procobj, errname)
+ DB *dbp;
+ const DBT *dbta, *dbtb;
+ Tcl_Obj *procobj;
+ char *errname;
+{
+ DBTCL_INFO *ip;
+ Tcl_Interp *interp;
+ Tcl_Obj *a, *b, *resobj, *objv[3];
+ int result, cmp;
+
+ ip = (DBTCL_INFO *)dbp->api_internal;
+ interp = ip->i_interp;
+ objv[0] = procobj;
+
+ /*
+ * Create two ByteArray objects, with the two data we've been passed.
+ * This will involve a copy, which is unpleasantly slow, but there's
+ * little we can do to avoid this (I think).
+ */
+ a = Tcl_NewByteArrayObj(dbta->data, dbta->size);
+ Tcl_IncrRefCount(a);
+ b = Tcl_NewByteArrayObj(dbtb->data, dbtb->size);
+ Tcl_IncrRefCount(b);
+
+ objv[1] = a;
+ objv[2] = b;
+
+ result = Tcl_EvalObjv(interp, 3, objv, 0);
+ if (result != TCL_OK) {
+ /*
+ * XXX
+ * If this or the next Tcl call fails, we're doomed.
+ * There's no way to return an error from comparison functions,
+ * no way to determine what the correct sort order is, and
+ * so no way to avoid corrupting the database if we proceed.
+ * We could play some games stashing return values on the
+ * DB handle, but it's not worth the trouble--no one with
+ * any sense is going to be using this other than for testing,
+ * and failure typically means that the bt_compare proc
+ * had a syntax error in it or something similarly dumb.
+ *
+ * So, drop core. If we're not running with diagnostic
+ * mode, panic--and always return a negative number. :-)
+ */
+panic: __db_err(dbp->dbenv, "Tcl %s callback failed", errname);
+ DB_ASSERT(0);
+ return (__db_panic(dbp->dbenv, DB_RUNRECOVERY));
+ }
+
+ resobj = Tcl_GetObjResult(interp);
+ result = Tcl_GetIntFromObj(interp, resobj, &cmp);
+ if (result != TCL_OK)
+ goto panic;
+
+ Tcl_DecrRefCount(a);
+ Tcl_DecrRefCount(b);
+ return (cmp);
+}
+
+/*
+ * tcl_h_hash --
+ * Tcl callback for the hashing function. See tcl_compare_callback--
+ * this works much the same way, only we're given a buffer and a length
+ * instead of two DBTs.
+ */
+static u_int32_t
+tcl_h_hash(dbp, buf, len)
+ DB *dbp;
+ const void *buf;
+ u_int32_t len;
+{
+ DBTCL_INFO *ip;
+ Tcl_Interp *interp;
+ Tcl_Obj *objv[2];
+ int result, hval;
+
+ ip = (DBTCL_INFO *)dbp->api_internal;
+ interp = ip->i_interp;
+ objv[0] = ip->i_hashproc;
+
+ /*
+ * Create a ByteArray for the buffer.
+ */
+ objv[1] = Tcl_NewByteArrayObj((void *)buf, len);
+ Tcl_IncrRefCount(objv[1]);
+ result = Tcl_EvalObjv(interp, 2, objv, 0);
+ if (result != TCL_OK) {
+ /*
+ * XXX
+ * We drop core on error. See the comment in
+ * tcl_compare_callback.
+ */
+panic: __db_err(dbp->dbenv, "Tcl h_hash callback failed");
+ DB_ASSERT(0);
+ return (__db_panic(dbp->dbenv, DB_RUNRECOVERY));
+ }
+
+ result = Tcl_GetIntFromObj(interp, Tcl_GetObjResult(interp), &hval);
+ if (result != TCL_OK)
+ goto panic;
+
+ Tcl_DecrRefCount(objv[1]);
+ return (hval);
+}
+
+/*
+ * tcl_rep_send --
+ * Replication send callback.
+ */
+static int
+tcl_rep_send(dbenv, control, rec, eid, flags)
+ DB_ENV *dbenv;
+ const DBT *control, *rec;
+ int eid;
+ u_int32_t flags;
+{
+ DBTCL_INFO *ip;
+ Tcl_Interp *interp;
+ Tcl_Obj *control_o, *eid_o, *origobj, *rec_o, *resobj, *objv[5];
+ int result, ret;
+
+ COMPQUIET(flags, 0);
+
+ ip = (DBTCL_INFO *)dbenv->app_private;
+ interp = ip->i_interp;
+ objv[0] = ip->i_rep_send;
+
+ control_o = Tcl_NewByteArrayObj(control->data, control->size);
+ Tcl_IncrRefCount(control_o);
+
+ rec_o = Tcl_NewByteArrayObj(rec->data, rec->size);
+ Tcl_IncrRefCount(rec_o);
+
+ eid_o = Tcl_NewIntObj(eid);
+ Tcl_IncrRefCount(eid_o);
+
+ objv[1] = control_o;
+ objv[2] = rec_o;
+ objv[3] = ip->i_rep_eid; /* From ID */
+ objv[4] = eid_o; /* To ID */
+
+ /*
+ * We really want to return the original result to the
+ * user. So, save the result obj here, and then after
+ * we've taken care of the Tcl_EvalObjv, set the result
+ * back to this original result.
+ */
+ origobj = Tcl_GetObjResult(interp);
+ Tcl_IncrRefCount(origobj);
+ result = Tcl_EvalObjv(interp, 5, objv, 0);
+ if (result != TCL_OK) {
+ /*
+ * XXX
+ * This probably isn't the right error behavior, but
+ * this error should only happen if the Tcl callback is
+ * somehow invalid, which is a fatal scripting bug.
+ */
+err: __db_err(dbenv, "Tcl rep_send failure");
+ return (EINVAL);
+ }
+
+ resobj = Tcl_GetObjResult(interp);
+ result = Tcl_GetIntFromObj(interp, resobj, &ret);
+ if (result != TCL_OK)
+ goto err;
+
+ Tcl_SetObjResult(interp, origobj);
+ Tcl_DecrRefCount(origobj);
+ Tcl_DecrRefCount(control_o);
+ Tcl_DecrRefCount(rec_o);
+ Tcl_DecrRefCount(eid_o);
+
+ return (ret);
+}
+
+#ifdef TEST_ALLOC
+/*
+ * tcl_db_malloc, tcl_db_realloc, tcl_db_free --
+ * Tcl-local malloc, realloc, and free functions to use for user data
+ * to exercise umalloc/urealloc/ufree. Allocate the memory as a Tcl object
+ * so we're sure to exacerbate and catch any shared-library issues.
+ */
+static void *
+tcl_db_malloc(size)
+ size_t size;
+{
+ Tcl_Obj *obj;
+ void *buf;
+
+ obj = Tcl_NewObj();
+ if (obj == NULL)
+ return (NULL);
+ Tcl_IncrRefCount(obj);
+
+ Tcl_SetObjLength(obj, size + sizeof(Tcl_Obj *));
+ buf = Tcl_GetString(obj);
+ memcpy(buf, &obj, sizeof(&obj));
+
+ buf = (Tcl_Obj **)buf + 1;
+ return (buf);
+}
+
+static void *
+tcl_db_realloc(ptr, size)
+ void *ptr;
+ size_t size;
+{
+ Tcl_Obj *obj;
+
+ if (ptr == NULL)
+ return (tcl_db_malloc(size));
+
+ obj = *(Tcl_Obj **)((Tcl_Obj **)ptr - 1);
+ Tcl_SetObjLength(obj, size + sizeof(Tcl_Obj *));
+
+ ptr = Tcl_GetString(obj);
+ memcpy(ptr, &obj, sizeof(&obj));
+
+ ptr = (Tcl_Obj **)ptr + 1;
+ return (ptr);
+}
+
+static void
+tcl_db_free(ptr)
+ void *ptr;
+{
+ Tcl_Obj *obj;
+
+ obj = *(Tcl_Obj **)((Tcl_Obj **)ptr - 1);
+ Tcl_DecrRefCount(obj);
+}
+#endif
diff --git a/libdb/tcl/tcl_dbcursor.c b/libdb/tcl/tcl_dbcursor.c
new file mode 100644
index 0000000..c698892
--- /dev/null
+++ b/libdb/tcl/tcl_dbcursor.c
@@ -0,0 +1,924 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/tcl_db.h"
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static int tcl_DbcDup __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBC *));
+static int tcl_DbcGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBC *, int));
+static int tcl_DbcPut __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBC *));
+
+/*
+ * PUBLIC: int dbc_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+ *
+ * dbc_cmd --
+ * Implements the cursor command.
+ */
+int
+dbc_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Cursor handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *dbccmds[] = {
+#if CONFIG_TEST
+ "pget",
+#endif
+ "close",
+ "del",
+ "dup",
+ "get",
+ "put",
+ NULL
+ };
+ enum dbccmds {
+#if CONFIG_TEST
+ DBCPGET,
+#endif
+ DBCCLOSE,
+ DBCDELETE,
+ DBCDUP,
+ DBCGET,
+ DBCPUT
+ };
+ DBC *dbc;
+ DBTCL_INFO *dbip;
+ int cmdindex, result, ret;
+
+ Tcl_ResetResult(interp);
+ dbc = (DBC *)clientData;
+ dbip = _PtrToInfo((void *)dbc);
+ result = TCL_OK;
+
+ if (objc <= 1) {
+ Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs");
+ return (TCL_ERROR);
+ }
+ if (dbc == NULL) {
+ Tcl_SetResult(interp, "NULL dbc pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (dbip == NULL) {
+ Tcl_SetResult(interp, "NULL dbc info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the berkdbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp, objv[1], dbccmds, "command",
+ TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+ switch ((enum dbccmds)cmdindex) {
+#if CONFIG_TEST
+ case DBCPGET:
+ result = tcl_DbcGet(interp, objc, objv, dbc, 1);
+ break;
+#endif
+ case DBCCLOSE:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbc->c_close(dbc);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "dbc close");
+ if (result == TCL_OK) {
+ (void)Tcl_DeleteCommand(interp, dbip->i_name);
+ _DeleteInfo(dbip);
+ }
+ break;
+ case DBCDELETE:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbc->c_del(dbc, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_DBCDEL(ret),
+ "dbc delete");
+ break;
+ case DBCDUP:
+ result = tcl_DbcDup(interp, objc, objv, dbc);
+ break;
+ case DBCGET:
+ result = tcl_DbcGet(interp, objc, objv, dbc, 0);
+ break;
+ case DBCPUT:
+ result = tcl_DbcPut(interp, objc, objv, dbc);
+ break;
+ }
+ return (result);
+}
+
+/*
+ * tcl_DbcPut --
+ */
+static int
+tcl_DbcPut(interp, objc, objv, dbc)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DBC *dbc; /* Cursor pointer */
+{
+ static char *dbcutopts[] = {
+#if CONFIG_TEST
+ "-nodupdata",
+#endif
+ "-after",
+ "-before",
+ "-current",
+ "-keyfirst",
+ "-keylast",
+ "-partial",
+ NULL
+ };
+ enum dbcutopts {
+#if CONFIG_TEST
+ DBCPUT_NODUPDATA,
+#endif
+ DBCPUT_AFTER,
+ DBCPUT_BEFORE,
+ DBCPUT_CURRENT,
+ DBCPUT_KEYFIRST,
+ DBCPUT_KEYLAST,
+ DBCPUT_PART
+ };
+ DB *thisdbp;
+ DBT key, data;
+ DBTCL_INFO *dbcip, *dbip;
+ DBTYPE type;
+ Tcl_Obj **elemv, *res;
+ void *dtmp, *ktmp;
+ db_recno_t recno;
+ u_int32_t flag;
+ int elemc, freekey, freedata, i, optindex, result, ret;
+
+ result = TCL_OK;
+ flag = 0;
+ freekey = freedata = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? ?key?");
+ return (TCL_ERROR);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ while (i < (objc - 1)) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbcutopts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ /*
+ * Reset the result so we don't get
+ * an errant error message if there is another error.
+ */
+ if (IS_HELP(objv[i]) == TCL_OK) {
+ result = TCL_OK;
+ goto out;
+ }
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbcutopts)optindex) {
+#if CONFIG_TEST
+ case DBCPUT_NODUPDATA:
+ FLAG_CHECK(flag);
+ flag = DB_NODUPDATA;
+ break;
+#endif
+ case DBCPUT_AFTER:
+ FLAG_CHECK(flag);
+ flag = DB_AFTER;
+ break;
+ case DBCPUT_BEFORE:
+ FLAG_CHECK(flag);
+ flag = DB_BEFORE;
+ break;
+ case DBCPUT_CURRENT:
+ FLAG_CHECK(flag);
+ flag = DB_CURRENT;
+ break;
+ case DBCPUT_KEYFIRST:
+ FLAG_CHECK(flag);
+ flag = DB_KEYFIRST;
+ break;
+ case DBCPUT_KEYLAST:
+ FLAG_CHECK(flag);
+ flag = DB_KEYLAST;
+ break;
+ case DBCPUT_PART:
+ if (i > (objc - 2)) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-partial {offset length}?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Get sublist as {offset length}
+ */
+ result = Tcl_ListObjGetElements(interp, objv[i++],
+ &elemc, &elemv);
+ if (elemc != 2) {
+ Tcl_SetResult(interp,
+ "List must be {offset length}", TCL_STATIC);
+ result = TCL_ERROR;
+ break;
+ }
+ data.flags |= DB_DBT_PARTIAL;
+ result = _GetUInt32(interp, elemv[0], &data.doff);
+ if (result != TCL_OK)
+ break;
+ result = _GetUInt32(interp, elemv[1], &data.dlen);
+ /*
+ * NOTE: We don't check result here because all we'd
+ * do is break anyway, and we are doing that. If you
+ * add code here, you WILL need to add the check
+ * for result. (See the check for save.doff, a few
+ * lines above and copy that.)
+ */
+ }
+ if (result != TCL_OK)
+ break;
+ }
+ if (result != TCL_OK)
+ goto out;
+
+ /*
+ * We need to determine if we are a recno database or not. If we are,
+ * then key.data is a recno, not a string.
+ */
+ dbcip = _PtrToInfo(dbc);
+ if (dbcip == NULL)
+ type = DB_UNKNOWN;
+ else {
+ dbip = dbcip->i_parent;
+ if (dbip == NULL) {
+ Tcl_SetResult(interp, "Cursor without parent database",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ return (result);
+ }
+ thisdbp = dbip->i_dbp;
+ (void)thisdbp->get_type(thisdbp, &type);
+ }
+ /*
+ * When we get here, we better have:
+ * 1 arg if -after, -before or -current
+ * 2 args in all other cases
+ */
+ if (flag == DB_AFTER || flag == DB_BEFORE || flag == DB_CURRENT) {
+ if (i != (objc - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-args? data");
+ result = TCL_ERROR;
+ goto out;
+ }
+ /*
+ * We want to get the key back, so we need to set
+ * up the location to get it back in.
+ */
+ if (type == DB_RECNO || type == DB_QUEUE) {
+ recno = 0;
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ }
+ } else {
+ if (i != (objc - 2)) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-args? key data");
+ result = TCL_ERROR;
+ goto out;
+ }
+ if (type == DB_RECNO || type == DB_QUEUE) {
+ result = _GetUInt32(interp, objv[objc-2], &recno);
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ return (result);
+ } else {
+ ret = _CopyObjBytes(interp, objv[objc-2], &ktmp,
+ &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBCPUT(ret), "dbc put");
+ return (result);
+ }
+ key.data = ktmp;
+ }
+ }
+ ret = _CopyObjBytes(interp, objv[objc-1], &dtmp,
+ &data.size, &freedata);
+ data.data = dtmp;
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBCPUT(ret), "dbc put");
+ goto out;
+ }
+ _debug_check();
+ ret = dbc->c_put(dbc, &key, &data, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_DBCPUT(ret),
+ "dbc put");
+ if (ret == 0 &&
+ (flag == DB_AFTER || flag == DB_BEFORE) && type == DB_RECNO) {
+ res = Tcl_NewLongObj((long)*(db_recno_t *)key.data);
+ Tcl_SetObjResult(interp, res);
+ }
+out:
+ if (freedata)
+ (void)__os_free(NULL, dtmp);
+ if (freekey)
+ (void)__os_free(NULL, ktmp);
+ return (result);
+}
+
+/*
+ * tcl_dbc_get --
+ */
+static int
+tcl_DbcGet(interp, objc, objv, dbc, ispget)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DBC *dbc; /* Cursor pointer */
+ int ispget; /* 1 for pget, 0 for get */
+{
+ static char *dbcgetopts[] = {
+#if CONFIG_TEST
+ "-dirty",
+ "-get_both_range",
+ "-multi",
+ "-multi_key",
+#endif
+ "-current",
+ "-first",
+ "-get_both",
+ "-get_recno",
+ "-join_item",
+ "-last",
+ "-next",
+ "-nextdup",
+ "-nextnodup",
+ "-partial",
+ "-prev",
+ "-prevnodup",
+ "-rmw",
+ "-set",
+ "-set_range",
+ "-set_recno",
+ NULL
+ };
+ enum dbcgetopts {
+#if CONFIG_TEST
+ DBCGET_DIRTY,
+ DBCGET_BOTH_RANGE,
+ DBCGET_MULTI,
+ DBCGET_MULTI_KEY,
+#endif
+ DBCGET_CURRENT,
+ DBCGET_FIRST,
+ DBCGET_BOTH,
+ DBCGET_RECNO,
+ DBCGET_JOIN,
+ DBCGET_LAST,
+ DBCGET_NEXT,
+ DBCGET_NEXTDUP,
+ DBCGET_NEXTNODUP,
+ DBCGET_PART,
+ DBCGET_PREV,
+ DBCGET_PREVNODUP,
+ DBCGET_RMW,
+ DBCGET_SET,
+ DBCGET_SETRANGE,
+ DBCGET_SETRECNO
+ };
+ DB *thisdbp;
+ DBT key, data, pdata;
+ DBTCL_INFO *dbcip, *dbip;
+ DBTYPE ptype, type;
+ Tcl_Obj **elemv, *myobj, *retlist;
+ void *dtmp, *ktmp;
+ db_recno_t precno, recno;
+ u_int32_t flag, op;
+ int bufsize, elemc, freekey, freedata, i, optindex, result, ret;
+
+ result = TCL_OK;
+ flag = 0;
+ freekey = freedata = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? ?key?");
+ return (TCL_ERROR);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbcgetopts,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ /*
+ * Reset the result so we don't get
+ * an errant error message if there is another error.
+ */
+ if (IS_HELP(objv[i]) == TCL_OK) {
+ result = TCL_OK;
+ goto out;
+ }
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbcgetopts)optindex) {
+#if CONFIG_TEST
+ case DBCGET_DIRTY:
+ flag |= DB_DIRTY_READ;
+ break;
+ case DBCGET_BOTH_RANGE:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_GET_BOTH_RANGE;
+ break;
+ case DBCGET_MULTI:
+ flag |= DB_MULTIPLE;
+ result = Tcl_GetIntFromObj(interp, objv[i], &bufsize);
+ if (result != TCL_OK)
+ goto out;
+ i++;
+ break;
+ case DBCGET_MULTI_KEY:
+ flag |= DB_MULTIPLE_KEY;
+ result = Tcl_GetIntFromObj(interp, objv[i], &bufsize);
+ if (result != TCL_OK)
+ goto out;
+ i++;
+ break;
+#endif
+ case DBCGET_RMW:
+ flag |= DB_RMW;
+ break;
+ case DBCGET_CURRENT:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_CURRENT;
+ break;
+ case DBCGET_FIRST:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_FIRST;
+ break;
+ case DBCGET_LAST:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_LAST;
+ break;
+ case DBCGET_NEXT:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_NEXT;
+ break;
+ case DBCGET_PREV:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_PREV;
+ break;
+ case DBCGET_PREVNODUP:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_PREV_NODUP;
+ break;
+ case DBCGET_NEXTNODUP:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_NEXT_NODUP;
+ break;
+ case DBCGET_NEXTDUP:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_NEXT_DUP;
+ break;
+ case DBCGET_BOTH:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_GET_BOTH;
+ break;
+ case DBCGET_RECNO:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_GET_RECNO;
+ break;
+ case DBCGET_JOIN:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_JOIN_ITEM;
+ break;
+ case DBCGET_SET:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_SET;
+ break;
+ case DBCGET_SETRANGE:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_SET_RANGE;
+ break;
+ case DBCGET_SETRECNO:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_SET_RECNO;
+ break;
+ case DBCGET_PART:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-partial {offset length}?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Get sublist as {offset length}
+ */
+ result = Tcl_ListObjGetElements(interp, objv[i++],
+ &elemc, &elemv);
+ if (elemc != 2) {
+ Tcl_SetResult(interp,
+ "List must be {offset length}", TCL_STATIC);
+ result = TCL_ERROR;
+ break;
+ }
+ data.flags |= DB_DBT_PARTIAL;
+ result = _GetUInt32(interp, elemv[0], &data.doff);
+ if (result != TCL_OK)
+ break;
+ result = _GetUInt32(interp, elemv[1], &data.dlen);
+ /*
+ * NOTE: We don't check result here because all we'd
+ * do is break anyway, and we are doing that. If you
+ * add code here, you WILL need to add the check
+ * for result. (See the check for save.doff, a few
+ * lines above and copy that.)
+ */
+ break;
+ }
+ if (result != TCL_OK)
+ break;
+ }
+ if (result != TCL_OK)
+ goto out;
+
+ /*
+ * We need to determine if we are a recno database
+ * or not. If we are, then key.data is a recno, not
+ * a string.
+ */
+ dbcip = _PtrToInfo(dbc);
+ if (dbcip == NULL) {
+ type = DB_UNKNOWN;
+ ptype = DB_UNKNOWN;
+ } else {
+ dbip = dbcip->i_parent;
+ if (dbip == NULL) {
+ Tcl_SetResult(interp, "Cursor without parent database",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+ thisdbp = dbip->i_dbp;
+ (void)thisdbp->get_type(thisdbp, &type);
+ if (ispget && thisdbp->s_primary != NULL)
+ (void)thisdbp->
+ s_primary->get_type(thisdbp->s_primary, &ptype);
+ else
+ ptype = DB_UNKNOWN;
+ }
+ /*
+ * When we get here, we better have:
+ * 2 args, key and data if GET_BOTH/GET_BOTH_RANGE was specified.
+ * 1 arg if -set, -set_range or -set_recno
+ * 0 in all other cases.
+ */
+ op = flag & DB_OPFLAGS_MASK;
+ switch (op) {
+ case DB_GET_BOTH:
+#if CONFIG_TEST
+ case DB_GET_BOTH_RANGE:
+#endif
+ if (i != (objc - 2)) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-args? -get_both key data");
+ result = TCL_ERROR;
+ goto out;
+ } else {
+ if (type == DB_RECNO || type == DB_QUEUE) {
+ result = _GetUInt32(
+ interp, objv[objc-2], &recno);
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ goto out;
+ } else {
+ /*
+ * Some get calls (SET_*) can change the
+ * key pointers. So, we need to store
+ * the allocated key space in a tmp.
+ */
+ ret = _CopyObjBytes(interp, objv[objc-2],
+ &ktmp, &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBCGET(ret), "dbc get");
+ return (result);
+ }
+ key.data = ktmp;
+ }
+ if (ptype == DB_RECNO || ptype == DB_QUEUE) {
+ result = _GetUInt32(
+ interp, objv[objc-1], &precno);
+ if (result == TCL_OK) {
+ data.data = &precno;
+ data.size = sizeof(db_recno_t);
+ } else
+ goto out;
+ } else {
+ ret = _CopyObjBytes(interp, objv[objc-1],
+ &dtmp, &data.size, &freedata);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBCGET(ret), "dbc get");
+ goto out;
+ }
+ data.data = dtmp;
+ }
+ }
+ break;
+ case DB_SET:
+ case DB_SET_RANGE:
+ case DB_SET_RECNO:
+ if (i != (objc - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? key");
+ result = TCL_ERROR;
+ goto out;
+ }
+ if (flag & (DB_MULTIPLE|DB_MULTIPLE_KEY)) {
+ (void)__os_malloc(NULL, bufsize, &data.data);
+ data.ulen = bufsize;
+ data.flags |= DB_DBT_USERMEM;
+ } else
+ data.flags |= DB_DBT_MALLOC;
+ if (op == DB_SET_RECNO ||
+ type == DB_RECNO || type == DB_QUEUE) {
+ result = _GetUInt32(interp, objv[objc - 1], &recno);
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else {
+ /*
+ * Some get calls (SET_*) can change the
+ * key pointers. So, we need to store
+ * the allocated key space in a tmp.
+ */
+ ret = _CopyObjBytes(interp, objv[objc-1],
+ &ktmp, &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBCGET(ret), "dbc get");
+ return (result);
+ }
+ key.data = ktmp;
+ }
+ break;
+ default:
+ if (i != objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args?");
+ result = TCL_ERROR;
+ goto out;
+ }
+ key.flags |= DB_DBT_MALLOC;
+ if (flag & (DB_MULTIPLE|DB_MULTIPLE_KEY)) {
+ (void)__os_malloc(NULL, bufsize, &data.data);
+ data.ulen = bufsize;
+ data.flags |= DB_DBT_USERMEM;
+ } else
+ data.flags |= DB_DBT_MALLOC;
+ }
+
+ _debug_check();
+ memset(&pdata, 0, sizeof(DBT));
+ if (ispget) {
+ F_SET(&pdata, DB_DBT_MALLOC);
+ ret = dbc->c_pget(dbc, &key, &data, &pdata, flag);
+ } else
+ ret = dbc->c_get(dbc, &key, &data, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_DBCGET(ret), "dbc get");
+ if (result == TCL_ERROR)
+ goto out;
+
+ retlist = Tcl_NewListObj(0, NULL);
+ if (ret == DB_NOTFOUND)
+ goto out1;
+ if (op == DB_GET_RECNO) {
+ recno = *((db_recno_t *)data.data);
+ myobj = Tcl_NewLongObj((long)recno);
+ result = Tcl_ListObjAppendElement(interp, retlist, myobj);
+ } else {
+ if (flag & (DB_MULTIPLE|DB_MULTIPLE_KEY))
+ result = _SetMultiList(interp,
+ retlist, &key, &data, type, flag);
+ else if ((type == DB_RECNO || type == DB_QUEUE) &&
+ key.data != NULL) {
+ if (ispget)
+ result = _Set3DBTList(interp, retlist, &key, 1,
+ &data,
+ (ptype == DB_RECNO || ptype == DB_QUEUE),
+ &pdata);
+ else
+ result = _SetListRecnoElem(interp, retlist,
+ *(db_recno_t *)key.data,
+ data.data, data.size);
+ } else {
+ if (ispget)
+ result = _Set3DBTList(interp, retlist, &key, 0,
+ &data,
+ (ptype == DB_RECNO || ptype == DB_QUEUE),
+ &pdata);
+ else
+ result = _SetListElem(interp, retlist,
+ key.data, key.size, data.data, data.size);
+ }
+ }
+ if (key.data != NULL && F_ISSET(&key, DB_DBT_MALLOC))
+ __os_ufree(dbc->dbp->dbenv, key.data);
+ if (data.data != NULL && F_ISSET(&data, DB_DBT_MALLOC))
+ __os_ufree(dbc->dbp->dbenv, data.data);
+ if (pdata.data != NULL && F_ISSET(&pdata, DB_DBT_MALLOC))
+ __os_ufree(dbc->dbp->dbenv, pdata.data);
+out1:
+ if (result == TCL_OK)
+ Tcl_SetObjResult(interp, retlist);
+out:
+ if (data.data != NULL && flag & (DB_MULTIPLE|DB_MULTIPLE_KEY))
+ __os_free(dbc->dbp->dbenv, data.data);
+ if (freedata)
+ (void)__os_free(NULL, dtmp);
+ if (freekey)
+ (void)__os_free(NULL, ktmp);
+ return (result);
+
+}
+
+/*
+ * tcl_DbcDup --
+ */
+static int
+tcl_DbcDup(interp, objc, objv, dbc)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DBC *dbc; /* Cursor pointer */
+{
+ static char *dbcdupopts[] = {
+ "-position",
+ NULL
+ };
+ enum dbcdupopts {
+ DBCDUP_POS
+ };
+ DBC *newdbc;
+ DBTCL_INFO *dbcip, *newdbcip, *dbip;
+ Tcl_Obj *res;
+ u_int32_t flag;
+ int i, optindex, result, ret;
+ char newname[MSG_SIZE];
+
+ result = TCL_OK;
+ flag = 0;
+ res = NULL;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbcdupopts,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ /*
+ * Reset the result so we don't get
+ * an errant error message if there is another error.
+ */
+ if (IS_HELP(objv[i]) == TCL_OK) {
+ result = TCL_OK;
+ goto out;
+ }
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbcdupopts)optindex) {
+ case DBCDUP_POS:
+ flag = DB_POSITION;
+ break;
+ }
+ if (result != TCL_OK)
+ break;
+ }
+ if (result != TCL_OK)
+ goto out;
+
+ /*
+ * We need to determine if we are a recno database
+ * or not. If we are, then key.data is a recno, not
+ * a string.
+ */
+ dbcip = _PtrToInfo(dbc);
+ if (dbcip == NULL) {
+ Tcl_SetResult(interp, "Cursor without info structure",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ } else {
+ dbip = dbcip->i_parent;
+ if (dbip == NULL) {
+ Tcl_SetResult(interp, "Cursor without parent database",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+ }
+ /*
+ * Now duplicate the cursor. If successful, we need to create
+ * a new cursor command.
+ */
+
+ snprintf(newname, sizeof(newname),
+ "%s.c%d", dbip->i_name, dbip->i_dbdbcid);
+ newdbcip = _NewInfo(interp, NULL, newname, I_DBC);
+ if (newdbcip != NULL) {
+ ret = dbc->c_dup(dbc, &newdbc, flag);
+ if (ret == 0) {
+ dbip->i_dbdbcid++;
+ newdbcip->i_parent = dbip;
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)dbc_Cmd,
+ (ClientData)newdbc, NULL);
+ res = Tcl_NewStringObj(newname, strlen(newname));
+ _SetInfoData(newdbcip, newdbc);
+ Tcl_SetObjResult(interp, res);
+ } else {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db dup");
+ _DeleteInfo(newdbcip);
+ }
+ } else {
+ Tcl_SetResult(interp, "Could not set up info", TCL_STATIC);
+ result = TCL_ERROR;
+ }
+out:
+ return (result);
+
+}
diff --git a/libdb/tcl/tcl_env.c b/libdb/tcl/tcl_env.c
new file mode 100644
index 0000000..f720079
--- /dev/null
+++ b/libdb/tcl/tcl_env.c
@@ -0,0 +1,1310 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/tcl_db.h"
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static void _EnvInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+static int env_DbRemove __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+static int env_DbRename __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+
+/*
+ * PUBLIC: int env_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+ *
+ * env_Cmd --
+ * Implements the "env" command.
+ */
+int
+env_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Env handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *envcmds[] = {
+#if CONFIG_TEST
+ "attributes",
+ "lock_detect",
+ "lock_id",
+ "lock_id_free",
+ "lock_id_set",
+ "lock_get",
+ "lock_stat",
+ "lock_timeout",
+ "lock_vec",
+ "log_archive",
+ "log_compare",
+ "log_cursor",
+ "log_file",
+ "log_flush",
+ "log_get",
+ "log_put",
+ "log_stat",
+ "mpool",
+ "mpool_stat",
+ "mpool_sync",
+ "mpool_trickle",
+ "mutex",
+ "rep_elect",
+ "rep_flush",
+ "rep_limit",
+ "rep_process_message",
+ "rep_request",
+ "rep_start",
+ "rep_stat",
+ "rpcid",
+ "test",
+ "txn_checkpoint",
+ "txn_id_set",
+ "txn_recover",
+ "txn_stat",
+ "txn_timeout",
+ "verbose",
+#endif
+ "close",
+ "dbremove",
+ "dbrename",
+ "txn",
+ NULL
+ };
+ enum envcmds {
+#if CONFIG_TEST
+ ENVATTR,
+ ENVLKDETECT,
+ ENVLKID,
+ ENVLKFREEID,
+ ENVLKSETID,
+ ENVLKGET,
+ ENVLKSTAT,
+ ENVLKTIMEOUT,
+ ENVLKVEC,
+ ENVLOGARCH,
+ ENVLOGCMP,
+ ENVLOGCURSOR,
+ ENVLOGFILE,
+ ENVLOGFLUSH,
+ ENVLOGGET,
+ ENVLOGPUT,
+ ENVLOGSTAT,
+ ENVMP,
+ ENVMPSTAT,
+ ENVMPSYNC,
+ ENVTRICKLE,
+ ENVMUTEX,
+ ENVREPELECT,
+ ENVREPFLUSH,
+ ENVREPLIMIT,
+ ENVREPPROCMESS,
+ ENVREPREQUEST,
+ ENVREPSTART,
+ ENVREPSTAT,
+ ENVRPCID,
+ ENVTEST,
+ ENVTXNCKP,
+ ENVTXNSETID,
+ ENVTXNRECOVER,
+ ENVTXNSTAT,
+ ENVTXNTIMEOUT,
+ ENVVERB,
+#endif
+ ENVCLOSE,
+ ENVDBREMOVE,
+ ENVDBRENAME,
+ ENVTXN
+ };
+ DBTCL_INFO *envip, *logcip;
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
+ Tcl_Obj *res;
+ char newname[MSG_SIZE];
+ int cmdindex, result, ret;
+ u_int32_t newval;
+#if CONFIG_TEST
+ u_int32_t otherval;
+#endif
+
+ Tcl_ResetResult(interp);
+ dbenv = (DB_ENV *)clientData;
+ envip = _PtrToInfo((void *)dbenv);
+ result = TCL_OK;
+ memset(newname, 0, MSG_SIZE);
+
+ if (objc <= 1) {
+ Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs");
+ return (TCL_ERROR);
+ }
+ if (dbenv == NULL) {
+ Tcl_SetResult(interp, "NULL env pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (envip == NULL) {
+ Tcl_SetResult(interp, "NULL env info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the berkdbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp, objv[1], envcmds, "command",
+ TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+ res = NULL;
+ switch ((enum envcmds)cmdindex) {
+#if CONFIG_TEST
+ case ENVLKDETECT:
+ result = tcl_LockDetect(interp, objc, objv, dbenv);
+ break;
+ case ENVLKSTAT:
+ result = tcl_LockStat(interp, objc, objv, dbenv);
+ break;
+ case ENVLKTIMEOUT:
+ result = tcl_LockTimeout(interp, objc, objv, dbenv);
+ break;
+ case ENVLKID:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbenv->lock_id(dbenv, &newval);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "lock_id");
+ if (result == TCL_OK)
+ res = Tcl_NewLongObj((long)newval);
+ break;
+ case ENVLKFREEID:
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 3, objv, NULL);
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetLongFromObj(interp, objv[2], (long *)&newval);
+ if (result != TCL_OK)
+ return (result);
+ ret = dbenv->lock_id_free(dbenv, newval);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "lock id_free");
+ break;
+ case ENVLKSETID:
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 4, objv, "current max");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetLongFromObj(interp, objv[2], (long *)&newval);
+ if (result != TCL_OK)
+ return (result);
+ result = Tcl_GetLongFromObj(interp, objv[3], (long *)&otherval);
+ if (result != TCL_OK)
+ return (result);
+ ret = dbenv->lock_id_set(dbenv, newval, otherval);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "lock id_free");
+ break;
+ case ENVLKGET:
+ result = tcl_LockGet(interp, objc, objv, dbenv);
+ break;
+ case ENVLKVEC:
+ result = tcl_LockVec(interp, objc, objv, dbenv);
+ break;
+ case ENVLOGARCH:
+ result = tcl_LogArchive(interp, objc, objv, dbenv);
+ break;
+ case ENVLOGCMP:
+ result = tcl_LogCompare(interp, objc, objv);
+ break;
+ case ENVLOGCURSOR:
+ snprintf(newname, sizeof(newname),
+ "%s.logc%d", envip->i_name, envip->i_envlogcid);
+ logcip = _NewInfo(interp, NULL, newname, I_LOGC);
+ if (logcip != NULL) {
+ ret = dbenv->log_cursor(dbenv, &logc, 0);
+ if (ret == 0) {
+ result = TCL_OK;
+ envip->i_envlogcid++;
+ /*
+ * We do NOT want to set i_parent to
+ * envip here because log cursors are
+ * not "tied" to the env. That is, they
+ * are NOT closed if the env is closed.
+ */
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)logc_Cmd,
+ (ClientData)logc, NULL);
+ res =
+ Tcl_NewStringObj(newname, strlen(newname));
+ _SetInfoData(logcip, logc);
+ } else {
+ _DeleteInfo(logcip);
+ result = _ErrorSetup(interp, ret, "log cursor");
+ }
+ } else {
+ Tcl_SetResult(interp,
+ "Could not set up info", TCL_STATIC);
+ result = TCL_ERROR;
+ }
+ break;
+ case ENVLOGFILE:
+ result = tcl_LogFile(interp, objc, objv, dbenv);
+ break;
+ case ENVLOGFLUSH:
+ result = tcl_LogFlush(interp, objc, objv, dbenv);
+ break;
+ case ENVLOGGET:
+ result = tcl_LogGet(interp, objc, objv, dbenv);
+ break;
+ case ENVLOGPUT:
+ result = tcl_LogPut(interp, objc, objv, dbenv);
+ break;
+ case ENVLOGSTAT:
+ result = tcl_LogStat(interp, objc, objv, dbenv);
+ break;
+ case ENVMPSTAT:
+ result = tcl_MpStat(interp, objc, objv, dbenv);
+ break;
+ case ENVMPSYNC:
+ result = tcl_MpSync(interp, objc, objv, dbenv);
+ break;
+ case ENVTRICKLE:
+ result = tcl_MpTrickle(interp, objc, objv, dbenv);
+ break;
+ case ENVMP:
+ result = tcl_Mp(interp, objc, objv, dbenv, envip);
+ break;
+ case ENVREPELECT:
+ result = tcl_RepElect(interp, objc, objv, dbenv);
+ break;
+ case ENVREPFLUSH:
+ result = tcl_RepFlush(interp, objc, objv, dbenv);
+ break;
+ case ENVREPLIMIT:
+ result = tcl_RepLimit(interp, objc, objv, dbenv);
+ break;
+ case ENVREPPROCMESS:
+ result = tcl_RepProcessMessage(interp, objc, objv, dbenv);
+ break;
+ case ENVREPREQUEST:
+ result = tcl_RepRequest(interp, objc, objv, dbenv);
+ break;
+ case ENVREPSTART:
+ result = tcl_RepStart(interp, objc, objv, dbenv);
+ break;
+ case ENVREPSTAT:
+ result = tcl_RepStat(interp, objc, objv, dbenv);
+ break;
+ case ENVRPCID:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ /*
+ * !!! Retrieve the client ID from the dbp handle directly.
+ * This is for testing purposes only. It is dbp-private data.
+ */
+ res = Tcl_NewLongObj(dbenv->cl_id);
+ break;
+ case ENVTXNCKP:
+ result = tcl_TxnCheckpoint(interp, objc, objv, dbenv);
+ break;
+ case ENVTXNSETID:
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 4, objv, "current max");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetLongFromObj(interp, objv[2], (long *)&newval);
+ if (result != TCL_OK)
+ return (result);
+ result = Tcl_GetLongFromObj(interp, objv[3], (long *)&otherval);
+ if (result != TCL_OK)
+ return (result);
+ ret = dbenv->txn_id_set(dbenv, newval, otherval);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "lock id_free");
+ break;
+ case ENVTXNRECOVER:
+ result = tcl_TxnRecover(interp, objc, objv, dbenv, envip);
+ break;
+ case ENVTXNSTAT:
+ result = tcl_TxnStat(interp, objc, objv, dbenv);
+ break;
+ case ENVTXNTIMEOUT:
+ result = tcl_TxnTimeout(interp, objc, objv, dbenv);
+ break;
+ case ENVMUTEX:
+ result = tcl_Mutex(interp, objc, objv, dbenv, envip);
+ break;
+ case ENVATTR:
+ result = tcl_EnvAttr(interp, objc, objv, dbenv);
+ break;
+ case ENVTEST:
+ result = tcl_EnvTest(interp, objc, objv, dbenv);
+ break;
+ case ENVVERB:
+ /*
+ * Two args for this. Error if different.
+ */
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ result = tcl_EnvVerbose(interp, dbenv, objv[2], objv[3]);
+ break;
+#endif
+ case ENVCLOSE:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ /*
+ * Any transactions will be aborted, and an mpools
+ * closed automatically. We must delete any txn
+ * and mp widgets we have here too for this env.
+ * NOTE: envip is freed when we come back from
+ * this function. Set it to NULL to make sure no
+ * one tries to use it later.
+ */
+ _debug_check();
+ ret = dbenv->close(dbenv, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env close");
+ _EnvInfoDelete(interp, envip);
+ envip = NULL;
+ break;
+ case ENVDBREMOVE:
+ result = env_DbRemove(interp, objc, objv, dbenv);
+ break;
+ case ENVDBRENAME:
+ result = env_DbRename(interp, objc, objv, dbenv);
+ break;
+ case ENVTXN:
+ result = tcl_Txn(interp, objc, objv, dbenv, envip);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ * PUBLIC: int tcl_EnvRemove __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ * PUBLIC: DB_ENV *, DBTCL_INFO *));
+ *
+ * tcl_EnvRemove --
+ */
+int
+tcl_EnvRemove(interp, objc, objv, dbenv, envip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv; /* Env pointer */
+ DBTCL_INFO *envip; /* Info pointer */
+{
+ static char *envremopts[] = {
+#if CONFIG_TEST
+ "-overwrite",
+ "-server",
+#endif
+ "-data_dir",
+ "-encryptaes",
+ "-encryptany",
+ "-force",
+ "-home",
+ "-log_dir",
+ "-tmp_dir",
+ "-use_environ",
+ "-use_environ_root",
+ NULL
+ };
+ enum envremopts {
+#if CONFIG_TEST
+ ENVREM_OVERWRITE,
+ ENVREM_SERVER,
+#endif
+ ENVREM_DATADIR,
+ ENVREM_ENCRYPT_AES,
+ ENVREM_ENCRYPT_ANY,
+ ENVREM_FORCE,
+ ENVREM_HOME,
+ ENVREM_LOGDIR,
+ ENVREM_TMPDIR,
+ ENVREM_USE_ENVIRON,
+ ENVREM_USE_ENVIRON_ROOT
+ };
+ DB_ENV *e;
+ u_int32_t cflag, enc_flag, flag, forceflag, sflag;
+ int i, optindex, result, ret;
+ char *datadir, *home, *logdir, *passwd, *server, *tmpdir;
+
+ result = TCL_OK;
+ cflag = flag = forceflag = sflag = 0;
+ home = NULL;
+ passwd = NULL;
+ datadir = logdir = tmpdir = NULL;
+ server = NULL;
+ enc_flag = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args?");
+ return (TCL_ERROR);
+ }
+
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], envremopts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ goto error;
+ }
+ i++;
+ switch ((enum envremopts)optindex) {
+#if CONFIG_TEST
+ case ENVREM_SERVER:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-server name?");
+ result = TCL_ERROR;
+ break;
+ }
+ server = Tcl_GetStringFromObj(objv[i++], NULL);
+ cflag = DB_CLIENT;
+ break;
+#endif
+ case ENVREM_ENCRYPT_AES:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptaes passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = DB_ENCRYPT_AES;
+ break;
+ case ENVREM_ENCRYPT_ANY:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptany passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = 0;
+ break;
+ case ENVREM_FORCE:
+ forceflag |= DB_FORCE;
+ break;
+ case ENVREM_HOME:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-home dir?");
+ result = TCL_ERROR;
+ break;
+ }
+ home = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+#if CONFIG_TEST
+ case ENVREM_OVERWRITE:
+ sflag |= DB_OVERWRITE;
+ break;
+#endif
+ case ENVREM_USE_ENVIRON:
+ flag |= DB_USE_ENVIRON;
+ break;
+ case ENVREM_USE_ENVIRON_ROOT:
+ flag |= DB_USE_ENVIRON_ROOT;
+ break;
+ case ENVREM_DATADIR:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-data_dir dir");
+ result = TCL_ERROR;
+ break;
+ }
+ datadir = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+ case ENVREM_LOGDIR:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-log_dir dir");
+ result = TCL_ERROR;
+ break;
+ }
+ logdir = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+ case ENVREM_TMPDIR:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-tmp_dir dir");
+ result = TCL_ERROR;
+ break;
+ }
+ tmpdir = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ }
+
+ /*
+ * If dbenv is NULL, we don't have an open env and we need to open
+ * one of the user. Don't bother with the info stuff.
+ */
+ if (dbenv == NULL) {
+ if ((ret = db_env_create(&e, cflag)) != 0) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_env_create");
+ goto error;
+ }
+ if (server != NULL) {
+ _debug_check();
+ ret = e->set_rpc_server(e, NULL, server, 0, 0, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_rpc_server");
+ if (result != TCL_OK)
+ goto error;
+ }
+ if (datadir != NULL) {
+ _debug_check();
+ ret = e->set_data_dir(e, datadir);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_data_dir");
+ if (result != TCL_OK)
+ goto error;
+ }
+ if (logdir != NULL) {
+ _debug_check();
+ ret = e->set_lg_dir(e, logdir);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_log_dir");
+ if (result != TCL_OK)
+ goto error;
+ }
+ if (tmpdir != NULL) {
+ _debug_check();
+ ret = e->set_tmp_dir(e, tmpdir);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_tmp_dir");
+ if (result != TCL_OK)
+ goto error;
+ }
+ if (passwd != NULL) {
+ ret = e->set_encrypt(e, passwd, enc_flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ }
+ if (sflag != 0 && (ret = e->set_flags(e, sflag, 1)) != 0) {
+ _debug_check();
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_flags");
+ if (result != TCL_OK)
+ goto error;
+ }
+ } else {
+ /*
+ * We have to clean up any info associated with this env,
+ * regardless of the result of the remove so do it first.
+ * NOTE: envip is freed when we come back from this function.
+ */
+ _EnvInfoDelete(interp, envip);
+ envip = NULL;
+ e = dbenv;
+ }
+
+ flag |= forceflag;
+ /*
+ * When we get here we have parsed all the args. Now remove
+ * the environment.
+ */
+ _debug_check();
+ ret = e->remove(e, home, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env remove");
+error:
+ return (result);
+}
+
+static void
+_EnvInfoDelete(interp, envip)
+ Tcl_Interp *interp; /* Tcl Interpreter */
+ DBTCL_INFO *envip; /* Info for env */
+{
+ DBTCL_INFO *nextp, *p;
+
+ /*
+ * Before we can delete the environment info, we must close
+ * any open subsystems in this env. We will:
+ * 1. Abort any transactions (which aborts any nested txns).
+ * 2. Close any mpools (which will put any pages itself).
+ * 3. Put any locks and close log cursors.
+ * 4. Close the error file.
+ */
+ for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) {
+ /*
+ * Check if this info structure "belongs" to this
+ * env. If so, remove its commands and info structure.
+ * We do not close/abort/whatever here, because we
+ * don't want to replicate DB behavior.
+ *
+ * NOTE: Only those types that can nest need to be
+ * itemized in the switch below. That is txns and mps.
+ * Other types like log cursors and locks will just
+ * get cleaned up here.
+ */
+ if (p->i_parent == envip) {
+ switch (p->i_type) {
+ case I_TXN:
+ _TxnInfoDelete(interp, p);
+ break;
+ case I_MP:
+ _MpInfoDelete(interp, p);
+ break;
+ default:
+ Tcl_SetResult(interp,
+ "_EnvInfoDelete: bad info type",
+ TCL_STATIC);
+ break;
+ }
+ nextp = LIST_NEXT(p, entries);
+ (void)Tcl_DeleteCommand(interp, p->i_name);
+ _DeleteInfo(p);
+ } else
+ nextp = LIST_NEXT(p, entries);
+ }
+ (void)Tcl_DeleteCommand(interp, envip->i_name);
+ _DeleteInfo(envip);
+}
+
+#if CONFIG_TEST
+/*
+ * PUBLIC: int tcl_EnvVerbose __P((Tcl_Interp *, DB_ENV *, Tcl_Obj *,
+ * PUBLIC: Tcl_Obj *));
+ *
+ * tcl_EnvVerbose --
+ */
+int
+tcl_EnvVerbose(interp, dbenv, which, onoff)
+ Tcl_Interp *interp; /* Interpreter */
+ DB_ENV *dbenv; /* Env pointer */
+ Tcl_Obj *which; /* Which subsystem */
+ Tcl_Obj *onoff; /* On or off */
+{
+ static char *verbwhich[] = {
+ "chkpt",
+ "deadlock",
+ "recovery",
+ "rep",
+ "wait",
+ NULL
+ };
+ enum verbwhich {
+ ENVVERB_CHK,
+ ENVVERB_DEAD,
+ ENVVERB_REC,
+ ENVVERB_REP,
+ ENVVERB_WAIT
+ };
+ static char *verbonoff[] = {
+ "off",
+ "on",
+ NULL
+ };
+ enum verbonoff {
+ ENVVERB_OFF,
+ ENVVERB_ON
+ };
+ int on, optindex, ret;
+ u_int32_t wh;
+
+ if (Tcl_GetIndexFromObj(interp, which, verbwhich, "option",
+ TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(which));
+
+ switch ((enum verbwhich)optindex) {
+ case ENVVERB_CHK:
+ wh = DB_VERB_CHKPOINT;
+ break;
+ case ENVVERB_DEAD:
+ wh = DB_VERB_DEADLOCK;
+ break;
+ case ENVVERB_REC:
+ wh = DB_VERB_RECOVERY;
+ break;
+ case ENVVERB_REP:
+ wh = DB_VERB_REPLICATION;
+ break;
+ case ENVVERB_WAIT:
+ wh = DB_VERB_WAITSFOR;
+ break;
+ default:
+ return (TCL_ERROR);
+ }
+ if (Tcl_GetIndexFromObj(interp, onoff, verbonoff, "option",
+ TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(onoff));
+ switch ((enum verbonoff)optindex) {
+ case ENVVERB_OFF:
+ on = 0;
+ break;
+ case ENVVERB_ON:
+ on = 1;
+ break;
+ default:
+ return (TCL_ERROR);
+ }
+ ret = dbenv->set_verbose(dbenv, wh, on);
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env set verbose"));
+}
+#endif
+
+#if CONFIG_TEST
+/*
+ * PUBLIC: int tcl_EnvAttr __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+ *
+ * tcl_EnvAttr --
+ * Return a list of the env's attributes
+ */
+int
+tcl_EnvAttr(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv; /* Env pointer */
+{
+ int result;
+ Tcl_Obj *myobj, *retlist;
+
+ result = TCL_OK;
+
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ retlist = Tcl_NewListObj(0, NULL);
+ /*
+ * XXX
+ * We peek at the dbenv to determine what subsystems
+ * we have available in this env.
+ */
+ myobj = Tcl_NewStringObj("-home", strlen("-home"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ myobj = Tcl_NewStringObj(dbenv->db_home, strlen(dbenv->db_home));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ if (CDB_LOCKING(dbenv)) {
+ myobj = Tcl_NewStringObj("-cdb", strlen("-cdb"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ }
+ if (CRYPTO_ON(dbenv)) {
+ myobj = Tcl_NewStringObj("-crypto", strlen("-crypto"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ }
+ if (LOCKING_ON(dbenv)) {
+ myobj = Tcl_NewStringObj("-lock", strlen("-lock"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ }
+ if (LOGGING_ON(dbenv)) {
+ myobj = Tcl_NewStringObj("-log", strlen("-log"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ }
+ if (MPOOL_ON(dbenv)) {
+ myobj = Tcl_NewStringObj("-mpool", strlen("-mpool"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ }
+ if (RPC_ON(dbenv)) {
+ myobj = Tcl_NewStringObj("-rpc", strlen("-rpc"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ }
+ if (TXN_ON(dbenv)) {
+ myobj = Tcl_NewStringObj("-txn", strlen("-txn"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ }
+ Tcl_SetObjResult(interp, retlist);
+err:
+ return (result);
+}
+
+/*
+ * PUBLIC: int tcl_EnvTest __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+ *
+ * tcl_EnvTest --
+ */
+int
+tcl_EnvTest(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv; /* Env pointer */
+{
+ static char *envtestcmd[] = {
+ "abort",
+ "copy",
+ NULL
+ };
+ enum envtestcmd {
+ ENVTEST_ABORT,
+ ENVTEST_COPY
+ };
+ static char *envtestat[] = {
+ "electinit",
+ "electsend",
+ "electvote1",
+ "electvote2",
+ "electwait1",
+ "electwait2",
+ "none",
+ "predestroy",
+ "preopen",
+ "postdestroy",
+ "postlog",
+ "postlogmeta",
+ "postopen",
+ "postsync",
+ "subdb_lock",
+ NULL
+ };
+ enum envtestat {
+ ENVTEST_ELECTINIT,
+ ENVTEST_ELECTSEND,
+ ENVTEST_ELECTVOTE1,
+ ENVTEST_ELECTVOTE2,
+ ENVTEST_ELECTWAIT1,
+ ENVTEST_ELECTWAIT2,
+ ENVTEST_NONE,
+ ENVTEST_PREDESTROY,
+ ENVTEST_PREOPEN,
+ ENVTEST_POSTDESTROY,
+ ENVTEST_POSTLOG,
+ ENVTEST_POSTLOGMETA,
+ ENVTEST_POSTOPEN,
+ ENVTEST_POSTSYNC,
+ ENVTEST_SUBDB_LOCKS
+ };
+ int *loc, optindex, result, testval;
+
+ result = TCL_OK;
+ loc = NULL;
+
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, "abort|copy location");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * This must be the "copy" or "abort" portion of the command.
+ */
+ if (Tcl_GetIndexFromObj(interp, objv[2], envtestcmd, "command",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[2]);
+ return (result);
+ }
+ switch ((enum envtestcmd)optindex) {
+ case ENVTEST_ABORT:
+ loc = &dbenv->test_abort;
+ break;
+ case ENVTEST_COPY:
+ loc = &dbenv->test_copy;
+ break;
+ default:
+ Tcl_SetResult(interp, "Illegal store location", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * This must be the location portion of the command.
+ */
+ if (Tcl_GetIndexFromObj(interp, objv[3], envtestat, "location",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[3]);
+ return (result);
+ }
+ switch ((enum envtestat)optindex) {
+ case ENVTEST_ELECTINIT:
+ DB_ASSERT(loc == &dbenv->test_abort);
+ testval = DB_TEST_ELECTINIT;
+ break;
+ case ENVTEST_ELECTSEND:
+ DB_ASSERT(loc == &dbenv->test_abort);
+ testval = DB_TEST_ELECTSEND;
+ break;
+ case ENVTEST_ELECTVOTE1:
+ DB_ASSERT(loc == &dbenv->test_abort);
+ testval = DB_TEST_ELECTVOTE1;
+ break;
+ case ENVTEST_ELECTVOTE2:
+ DB_ASSERT(loc == &dbenv->test_abort);
+ testval = DB_TEST_ELECTVOTE2;
+ break;
+ case ENVTEST_ELECTWAIT1:
+ DB_ASSERT(loc == &dbenv->test_abort);
+ testval = DB_TEST_ELECTWAIT1;
+ break;
+ case ENVTEST_ELECTWAIT2:
+ DB_ASSERT(loc == &dbenv->test_abort);
+ testval = DB_TEST_ELECTWAIT2;
+ break;
+ case ENVTEST_NONE:
+ testval = 0;
+ break;
+ case ENVTEST_PREOPEN:
+ testval = DB_TEST_PREOPEN;
+ break;
+ case ENVTEST_PREDESTROY:
+ testval = DB_TEST_PREDESTROY;
+ break;
+ case ENVTEST_POSTLOG:
+ testval = DB_TEST_POSTLOG;
+ break;
+ case ENVTEST_POSTLOGMETA:
+ testval = DB_TEST_POSTLOGMETA;
+ break;
+ case ENVTEST_POSTOPEN:
+ testval = DB_TEST_POSTOPEN;
+ break;
+ case ENVTEST_POSTDESTROY:
+ testval = DB_TEST_POSTDESTROY;
+ break;
+ case ENVTEST_POSTSYNC:
+ testval = DB_TEST_POSTSYNC;
+ break;
+ case ENVTEST_SUBDB_LOCKS:
+ DB_ASSERT(loc == &dbenv->test_abort);
+ testval = DB_TEST_SUBDB_LOCKS;
+ break;
+ default:
+ Tcl_SetResult(interp, "Illegal test location", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ *loc = testval;
+ Tcl_SetResult(interp, "0", TCL_STATIC);
+ return (result);
+}
+#endif
+
+/*
+ * env_DbRemove --
+ * Implements the ENV->dbremove command.
+ */
+static int
+env_DbRemove(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv;
+{
+ static char *envdbrem[] = {
+ "-auto_commit",
+ "-txn",
+ "--",
+ NULL
+ };
+ enum envdbrem {
+ TCL_EDBREM_COMMIT,
+ TCL_EDBREM_TXN,
+ TCL_EDBREM_ENDARG
+ };
+ DB_TXN *txn;
+ u_int32_t flag;
+ int endarg, i, optindex, result, ret, subdblen;
+ u_char *subdbtmp;
+ char *arg, *db, *subdb, msg[MSG_SIZE];
+
+ txn = NULL;
+ result = TCL_OK;
+ subdbtmp = NULL;
+ db = subdb = NULL;
+ endarg = 0;
+ flag = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename ?database?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * We must first parse for the environment flag, since that
+ * is needed for db_create. Then create the db handle.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], envdbrem,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum envdbrem)optindex) {
+ case TCL_EDBREM_COMMIT:
+ flag |= DB_AUTO_COMMIT;
+ break;
+ case TCL_EDBREM_TXN:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "env dbremove: Invalid txn %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ return (TCL_ERROR);
+ }
+ break;
+ case TCL_EDBREM_ENDARG:
+ endarg = 1;
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ /*
+ * Any args we have left, (better be 1 or 2 left) are
+ * file names. If there is 1, a db name, if 2 a db and subdb name.
+ */
+ if ((i != (objc - 1)) || (i != (objc - 2))) {
+ /*
+ * Dbs must be NULL terminated file names, but subdbs can
+ * be anything. Use Strings for the db name and byte
+ * arrays for the subdb.
+ */
+ db = Tcl_GetStringFromObj(objv[i++], NULL);
+ if (i != objc) {
+ subdbtmp =
+ Tcl_GetByteArrayFromObj(objv[i++], &subdblen);
+ if ((ret = __os_malloc(dbenv, subdblen + 1,
+ &subdb)) != 0) {
+ Tcl_SetResult(interp,
+ db_strerror(ret), TCL_STATIC);
+ return (0);
+ }
+ memcpy(subdb, subdbtmp, subdblen);
+ subdb[subdblen] = '\0';
+ }
+ } else {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename ?database?");
+ result = TCL_ERROR;
+ goto error;
+ }
+ ret = dbenv->dbremove(dbenv, txn, db, subdb, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env dbremove");
+error:
+ if (subdb)
+ __os_free(dbenv, subdb);
+ return (result);
+}
+
+/*
+ * env_DbRename --
+ * Implements the ENV->dbrename command.
+ */
+static int
+env_DbRename(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv;
+{
+ static char *envdbmv[] = {
+ "-auto_commit",
+ "-txn",
+ "--",
+ NULL
+ };
+ enum envdbmv {
+ TCL_EDBMV_COMMIT,
+ TCL_EDBMV_TXN,
+ TCL_EDBMV_ENDARG
+ };
+ DB_TXN *txn;
+ u_int32_t flag;
+ int endarg, i, newlen, optindex, result, ret, subdblen;
+ u_char *subdbtmp;
+ char *arg, *db, *newname, *subdb, msg[MSG_SIZE];
+
+ txn = NULL;
+ result = TCL_OK;
+ subdbtmp = NULL;
+ db = newname = subdb = NULL;
+ endarg = 0;
+ flag = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 3, objv,
+ "?args? filename ?database? ?newname?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * We must first parse for the environment flag, since that
+ * is needed for db_create. Then create the db handle.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], envdbmv,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum envdbmv)optindex) {
+ case TCL_EDBMV_COMMIT:
+ flag |= DB_AUTO_COMMIT;
+ break;
+ case TCL_EDBMV_TXN:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "env dbrename: Invalid txn %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ return (TCL_ERROR);
+ }
+ break;
+ case TCL_EDBMV_ENDARG:
+ endarg = 1;
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ /*
+ * Any args we have left, (better be 2 or 3 left) are
+ * file names. If there is 2, a db name, if 3 a db and subdb name.
+ */
+ if ((i != (objc - 2)) || (i != (objc - 3))) {
+ /*
+ * Dbs must be NULL terminated file names, but subdbs can
+ * be anything. Use Strings for the db name and byte
+ * arrays for the subdb.
+ */
+ db = Tcl_GetStringFromObj(objv[i++], NULL);
+ if (i == objc - 2) {
+ subdbtmp =
+ Tcl_GetByteArrayFromObj(objv[i++], &subdblen);
+ if ((ret = __os_malloc(dbenv, subdblen + 1,
+ &subdb)) != 0) {
+ Tcl_SetResult(interp,
+ db_strerror(ret), TCL_STATIC);
+ return (0);
+ }
+ memcpy(subdb, subdbtmp, subdblen);
+ subdb[subdblen] = '\0';
+ }
+ subdbtmp =
+ Tcl_GetByteArrayFromObj(objv[i++], &newlen);
+ if ((ret = __os_malloc(dbenv, newlen + 1,
+ &newname)) != 0) {
+ Tcl_SetResult(interp,
+ db_strerror(ret), TCL_STATIC);
+ return (0);
+ }
+ memcpy(newname, subdbtmp, newlen);
+ newname[newlen] = '\0';
+ } else {
+ Tcl_WrongNumArgs(interp, 3, objv,
+ "?args? filename ?database? ?newname?");
+ result = TCL_ERROR;
+ goto error;
+ }
+ ret = dbenv->dbrename(dbenv, txn, db, subdb, newname, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env dbrename");
+error:
+ if (subdb)
+ __os_free(dbenv, subdb);
+ if (newname)
+ __os_free(dbenv, newname);
+ return (result);
+}
diff --git a/libdb/tcl/tcl_internal.c b/libdb/tcl/tcl_internal.c
new file mode 100644
index 0000000..ffe9b8a
--- /dev/null
+++ b/libdb/tcl/tcl_internal.c
@@ -0,0 +1,717 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/tcl_db.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc_auto/db_ext.h"
+
+/*
+ *
+ * internal.c --
+ *
+ * This file contains internal functions we need to maintain
+ * state for our Tcl interface.
+ *
+ * NOTE: This all uses a linear linked list. If we end up with
+ * too many info structs such that this is a performance hit, it
+ * should be redone using hashes or a list per type. The assumption
+ * is that the user won't have more than a few dozen info structs
+ * in operation at any given point in time. Even a complicated
+ * application with a few environments, nested transactions, locking,
+ * and several databases open, using cursors should not have a
+ * negative performance impact, in terms of searching the list to
+ * get/manipulate the info structure.
+ */
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static void tcl_flag_callback __P((u_int32_t, const FN *, void *));
+
+/*
+ * Private structure type used to pass both an interp and an object into
+ * a callback's single void *.
+ */
+struct __tcl_callback_bundle {
+ Tcl_Interp *interp;
+ Tcl_Obj *obj;
+};
+
+#define GLOB_CHAR(c) ((c) == '*' || (c) == '?')
+
+/*
+ * PUBLIC: DBTCL_INFO *_NewInfo __P((Tcl_Interp *,
+ * PUBLIC: void *, char *, enum INFOTYPE));
+ *
+ * _NewInfo --
+ *
+ * This function will create a new info structure and fill it in
+ * with the name and pointer, id and type.
+ */
+DBTCL_INFO *
+_NewInfo(interp, anyp, name, type)
+ Tcl_Interp *interp;
+ void *anyp;
+ char *name;
+ enum INFOTYPE type;
+{
+ DBTCL_INFO *p;
+ int i, ret;
+
+ if ((ret = __os_malloc(NULL, sizeof(DBTCL_INFO), &p)) != 0) {
+ Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
+ return (NULL);
+ }
+
+ if ((ret = __os_strdup(NULL, name, &p->i_name)) != 0) {
+ Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
+ __os_free(NULL, p);
+ return (NULL);
+ }
+ p->i_interp = interp;
+ p->i_anyp = anyp;
+ p->i_data = 0;
+ p->i_data2 = 0;
+ p->i_type = type;
+ p->i_parent = NULL;
+ p->i_err = NULL;
+ p->i_errpfx = NULL;
+ p->i_lockobj.data = NULL;
+ p->i_btcompare = NULL;
+ p->i_dupcompare = NULL;
+ p->i_hashproc = NULL;
+ p->i_second_call = NULL;
+ p->i_rep_eid = NULL;
+ p->i_rep_send = NULL;
+ for (i = 0; i < MAX_ID; i++)
+ p->i_otherid[i] = 0;
+
+ LIST_INSERT_HEAD(&__db_infohead, p, entries);
+ return (p);
+}
+
+/*
+ * PUBLIC: void *_NameToPtr __P((CONST char *));
+ */
+void *
+_NameToPtr(name)
+ CONST char *name;
+{
+ DBTCL_INFO *p;
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL;
+ p = LIST_NEXT(p, entries))
+ if (strcmp(name, p->i_name) == 0)
+ return (p->i_anyp);
+ return (NULL);
+}
+
+/*
+ * PUBLIC: DBTCL_INFO *_PtrToInfo __P((CONST void *));
+ */
+DBTCL_INFO *
+_PtrToInfo(ptr)
+ CONST void *ptr;
+{
+ DBTCL_INFO *p;
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL;
+ p = LIST_NEXT(p, entries))
+ if (p->i_anyp == ptr)
+ return (p);
+ return (NULL);
+}
+
+/*
+ * PUBLIC: DBTCL_INFO *_NameToInfo __P((CONST char *));
+ */
+DBTCL_INFO *
+_NameToInfo(name)
+ CONST char *name;
+{
+ DBTCL_INFO *p;
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL;
+ p = LIST_NEXT(p, entries))
+ if (strcmp(name, p->i_name) == 0)
+ return (p);
+ return (NULL);
+}
+
+/*
+ * PUBLIC: void _SetInfoData __P((DBTCL_INFO *, void *));
+ */
+void
+_SetInfoData(p, data)
+ DBTCL_INFO *p;
+ void *data;
+{
+ if (p == NULL)
+ return;
+ p->i_anyp = data;
+ return;
+}
+
+/*
+ * PUBLIC: void _DeleteInfo __P((DBTCL_INFO *));
+ */
+void
+_DeleteInfo(p)
+ DBTCL_INFO *p;
+{
+ if (p == NULL)
+ return;
+ LIST_REMOVE(p, entries);
+ if (p->i_lockobj.data != NULL)
+ __os_free(NULL, p->i_lockobj.data);
+ if (p->i_err != NULL) {
+ fclose(p->i_err);
+ p->i_err = NULL;
+ }
+ if (p->i_errpfx != NULL)
+ __os_free(NULL, p->i_errpfx);
+ if (p->i_btcompare != NULL)
+ Tcl_DecrRefCount(p->i_btcompare);
+ if (p->i_dupcompare != NULL)
+ Tcl_DecrRefCount(p->i_dupcompare);
+ if (p->i_hashproc != NULL)
+ Tcl_DecrRefCount(p->i_hashproc);
+ if (p->i_second_call != NULL)
+ Tcl_DecrRefCount(p->i_second_call);
+ if (p->i_rep_eid != NULL)
+ Tcl_DecrRefCount(p->i_rep_eid);
+ if (p->i_rep_send != NULL)
+ Tcl_DecrRefCount(p->i_rep_send);
+ __os_free(NULL, p->i_name);
+ __os_free(NULL, p);
+
+ return;
+}
+
+/*
+ * PUBLIC: int _SetListElem __P((Tcl_Interp *,
+ * PUBLIC: Tcl_Obj *, void *, int, void *, int));
+ */
+int
+_SetListElem(interp, list, elem1, e1cnt, elem2, e2cnt)
+ Tcl_Interp *interp;
+ Tcl_Obj *list;
+ void *elem1, *elem2;
+ int e1cnt, e2cnt;
+{
+ Tcl_Obj *myobjv[2], *thislist;
+ int myobjc;
+
+ myobjc = 2;
+ myobjv[0] = Tcl_NewByteArrayObj((u_char *)elem1, e1cnt);
+ myobjv[1] = Tcl_NewByteArrayObj((u_char *)elem2, e2cnt);
+ thislist = Tcl_NewListObj(myobjc, myobjv);
+ if (thislist == NULL)
+ return (TCL_ERROR);
+ return (Tcl_ListObjAppendElement(interp, list, thislist));
+
+}
+
+/*
+ * PUBLIC: int _SetListElemInt __P((Tcl_Interp *, Tcl_Obj *, void *, int));
+ */
+int
+_SetListElemInt(interp, list, elem1, elem2)
+ Tcl_Interp *interp;
+ Tcl_Obj *list;
+ void *elem1;
+ int elem2;
+{
+ Tcl_Obj *myobjv[2], *thislist;
+ int myobjc;
+
+ myobjc = 2;
+ myobjv[0] = Tcl_NewByteArrayObj((u_char *)elem1, strlen((char *)elem1));
+ myobjv[1] = Tcl_NewIntObj(elem2);
+ thislist = Tcl_NewListObj(myobjc, myobjv);
+ if (thislist == NULL)
+ return (TCL_ERROR);
+ return (Tcl_ListObjAppendElement(interp, list, thislist));
+}
+
+/*
+ * PUBLIC: int _SetListRecnoElem __P((Tcl_Interp *, Tcl_Obj *,
+ * PUBLIC: db_recno_t, u_char *, int));
+ */
+int
+_SetListRecnoElem(interp, list, elem1, elem2, e2size)
+ Tcl_Interp *interp;
+ Tcl_Obj *list;
+ db_recno_t elem1;
+ u_char *elem2;
+ int e2size;
+{
+ Tcl_Obj *myobjv[2], *thislist;
+ int myobjc;
+
+ myobjc = 2;
+ myobjv[0] = Tcl_NewLongObj((long)elem1);
+ myobjv[1] = Tcl_NewByteArrayObj(elem2, e2size);
+ thislist = Tcl_NewListObj(myobjc, myobjv);
+ if (thislist == NULL)
+ return (TCL_ERROR);
+ return (Tcl_ListObjAppendElement(interp, list, thislist));
+
+}
+
+/*
+ * _Set3DBTList --
+ * This is really analogous to both _SetListElem and
+ * _SetListRecnoElem--it's used for three-DBT lists returned by
+ * DB->pget and DBC->pget(). We'd need a family of four functions
+ * to handle all the recno/non-recno cases, however, so we make
+ * this a little more aware of the internals and do the logic inside.
+ *
+ * XXX
+ * One of these days all these functions should probably be cleaned up
+ * to eliminate redundancy and bring them into the standard DB
+ * function namespace.
+ *
+ * PUBLIC: int _Set3DBTList __P((Tcl_Interp *, Tcl_Obj *, DBT *, int,
+ * PUBLIC: DBT *, int, DBT *));
+ */
+int
+_Set3DBTList(interp, list, elem1, is1recno, elem2, is2recno, elem3)
+ Tcl_Interp *interp;
+ Tcl_Obj *list;
+ DBT *elem1, *elem2, *elem3;
+ int is1recno, is2recno;
+{
+
+ Tcl_Obj *myobjv[3], *thislist;
+
+ if (is1recno)
+ myobjv[0] = Tcl_NewLongObj((long)*(db_recno_t *)elem1->data);
+ else
+ myobjv[0] =
+ Tcl_NewByteArrayObj((u_char *)elem1->data, elem1->size);
+
+ if (is2recno)
+ myobjv[1] = Tcl_NewLongObj((long)*(db_recno_t *)elem2->data);
+ else
+ myobjv[1] =
+ Tcl_NewByteArrayObj((u_char *)elem2->data, elem2->size);
+
+ myobjv[2] = Tcl_NewByteArrayObj((u_char *)elem3->data, elem3->size);
+
+ thislist = Tcl_NewListObj(3, myobjv);
+
+ if (thislist == NULL)
+ return (TCL_ERROR);
+ return (Tcl_ListObjAppendElement(interp, list, thislist));
+}
+
+/*
+ * _SetMultiList -- build a list for return from multiple get.
+ *
+ * PUBLIC: int _SetMultiList __P((Tcl_Interp *,
+ * PUBLIC: Tcl_Obj *, DBT *, DBT*, int, int));
+ */
+int
+_SetMultiList(interp, list, key, data, type, flag)
+ Tcl_Interp *interp;
+ Tcl_Obj *list;
+ DBT *key, *data;
+ int type, flag;
+{
+ db_recno_t recno;
+ u_int32_t dlen, klen;
+ int result;
+ void *pointer, *dp, *kp;
+
+ recno = 0;
+ dlen = 0;
+ kp = NULL;
+
+ DB_MULTIPLE_INIT(pointer, data);
+ result = TCL_OK;
+
+ if (type == DB_RECNO || type == DB_QUEUE)
+ recno = *(db_recno_t *) key->data;
+ else
+ kp = key->data;
+ klen = key->size;
+ do {
+ if (flag & DB_MULTIPLE_KEY) {
+ if (type == DB_RECNO || type == DB_QUEUE)
+ DB_MULTIPLE_RECNO_NEXT(pointer,
+ data, recno, dp, dlen);
+ else
+ DB_MULTIPLE_KEY_NEXT(pointer,
+ data, kp, klen, dp, dlen);
+ } else
+ DB_MULTIPLE_NEXT(pointer, data, dp, dlen);
+
+ if (pointer == NULL)
+ break;
+
+ if (type == DB_RECNO || type == DB_QUEUE) {
+ result =
+ _SetListRecnoElem(interp, list, recno, dp, dlen);
+ recno++;
+ } else
+ result = _SetListElem(interp, list, kp, klen, dp, dlen);
+ } while (result == TCL_OK);
+
+ return (result);
+}
+/*
+ * PUBLIC: int _GetGlobPrefix __P((char *, char **));
+ */
+int
+_GetGlobPrefix(pattern, prefix)
+ char *pattern;
+ char **prefix;
+{
+ int i, j;
+ char *p;
+
+ /*
+ * Duplicate it, we get enough space and most of the work is done.
+ */
+ if (__os_strdup(NULL, pattern, prefix) != 0)
+ return (1);
+
+ p = *prefix;
+ for (i = 0, j = 0; p[i] && !GLOB_CHAR(p[i]); i++, j++)
+ /*
+ * Check for an escaped character and adjust
+ */
+ if (p[i] == '\\' && p[i+1]) {
+ p[j] = p[i+1];
+ i++;
+ } else
+ p[j] = p[i];
+ p[j] = 0;
+ return (0);
+}
+
+/*
+ * PUBLIC: int _ReturnSetup __P((Tcl_Interp *, int, int, char *));
+ */
+int
+_ReturnSetup(interp, ret, ok, errmsg)
+ Tcl_Interp *interp;
+ int ret, ok;
+ char *errmsg;
+{
+ char *msg;
+
+ if (ret > 0)
+ return (_ErrorSetup(interp, ret, errmsg));
+
+ /*
+ * We either have success or a DB error. If a DB error, set up the
+ * string. We return an error if not one of the errors we catch.
+ * If anyone wants to reset the result to return anything different,
+ * then the calling function is responsible for doing so via
+ * Tcl_ResetResult or another Tcl_SetObjResult.
+ */
+ if (ret == 0) {
+ Tcl_SetResult(interp, "0", TCL_STATIC);
+ return (TCL_OK);
+ }
+
+ msg = db_strerror(ret);
+ Tcl_AppendResult(interp, msg, NULL);
+
+ if (ok)
+ return (TCL_OK);
+ else {
+ Tcl_SetErrorCode(interp, "BerkeleyDB", msg, NULL);
+ return (TCL_ERROR);
+ }
+}
+
+/*
+ * PUBLIC: int _ErrorSetup __P((Tcl_Interp *, int, char *));
+ */
+int
+_ErrorSetup(interp, ret, errmsg)
+ Tcl_Interp *interp;
+ int ret;
+ char *errmsg;
+{
+ Tcl_SetErrno(ret);
+ Tcl_AppendResult(interp, errmsg, ":", Tcl_PosixError(interp), NULL);
+ return (TCL_ERROR);
+}
+
+/*
+ * PUBLIC: void _ErrorFunc __P((CONST char *, char *));
+ */
+void
+_ErrorFunc(pfx, msg)
+ CONST char *pfx;
+ char *msg;
+{
+ DBTCL_INFO *p;
+ Tcl_Interp *interp;
+ int size;
+ char *err;
+
+ p = _NameToInfo(pfx);
+ if (p == NULL)
+ return;
+ interp = p->i_interp;
+
+ size = strlen(pfx) + strlen(msg) + 4;
+ /*
+ * If we cannot allocate enough to put together the prefix
+ * and message then give them just the message.
+ */
+ if (__os_malloc(NULL, size, &err) != 0) {
+ Tcl_AddErrorInfo(interp, msg);
+ Tcl_AppendResult(interp, msg, "\n", NULL);
+ return;
+ }
+ snprintf(err, size, "%s: %s", pfx, msg);
+ Tcl_AddErrorInfo(interp, err);
+ Tcl_AppendResult(interp, err, "\n", NULL);
+ __os_free(NULL, err);
+ return;
+}
+
+#define INVALID_LSNMSG "Invalid LSN with %d parts. Should have 2.\n"
+
+/*
+ * PUBLIC: int _GetLsn __P((Tcl_Interp *, Tcl_Obj *, DB_LSN *));
+ */
+int
+_GetLsn(interp, obj, lsn)
+ Tcl_Interp *interp;
+ Tcl_Obj *obj;
+ DB_LSN *lsn;
+{
+ Tcl_Obj **myobjv;
+ char msg[MSG_SIZE];
+ int myobjc, result;
+ u_int32_t tmp;
+
+ result = Tcl_ListObjGetElements(interp, obj, &myobjc, &myobjv);
+ if (result == TCL_ERROR)
+ return (result);
+ if (myobjc != 2) {
+ result = TCL_ERROR;
+ snprintf(msg, MSG_SIZE, INVALID_LSNMSG, myobjc);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ return (result);
+ }
+ result = _GetUInt32(interp, myobjv[0], &tmp);
+ if (result == TCL_ERROR)
+ return (result);
+ lsn->file = tmp;
+ result = _GetUInt32(interp, myobjv[1], &tmp);
+ lsn->offset = tmp;
+ return (result);
+}
+
+/*
+ * _GetUInt32 --
+ * Get a u_int32_t from a Tcl object. Tcl_GetIntFromObj does the
+ * right thing most of the time, but on machines where a long is 8 bytes
+ * and an int is 4 bytes, it errors on integers between the maximum
+ * int32_t and the maximum u_int32_t. This is correct, but we generally
+ * want a u_int32_t in the end anyway, so we use Tcl_GetLongFromObj and do
+ * the bounds checking ourselves.
+ *
+ * This code looks much like Tcl_GetIntFromObj, only with a different
+ * bounds check. It's essentially Tcl_GetUnsignedIntFromObj, which
+ * unfortunately doesn't exist.
+ *
+ * PUBLIC: int _GetUInt32 __P((Tcl_Interp *, Tcl_Obj *, u_int32_t *));
+ */
+int
+_GetUInt32(interp, obj, resp)
+ Tcl_Interp *interp;
+ Tcl_Obj *obj;
+ u_int32_t *resp;
+{
+ int result;
+ long ltmp;
+
+ result = Tcl_GetLongFromObj(interp, obj, &ltmp);
+ if (result != TCL_OK)
+ return (result);
+
+ if ((unsigned long)ltmp != (u_int32_t)ltmp) {
+ if (interp != NULL) {
+ Tcl_ResetResult(interp);
+ Tcl_AppendToObj(Tcl_GetObjResult(interp),
+ "integer value too large for u_int32_t", -1);
+ }
+ return (TCL_ERROR);
+ }
+
+ *resp = (u_int32_t)ltmp;
+ return (TCL_OK);
+}
+
+/*
+ * tcl_flag_callback --
+ * Callback for db_pr.c functions that contain the FN struct mapping
+ * flag values to meaningful strings. This function appends a Tcl_Obj
+ * containing each pertinent flag string to the specified Tcl list.
+ */
+static void
+tcl_flag_callback(flags, fn, vtcbp)
+ u_int32_t flags;
+ const FN *fn;
+ void *vtcbp;
+{
+ const FN *fnp;
+ Tcl_Interp *interp;
+ Tcl_Obj *newobj, *listobj;
+ int result;
+ struct __tcl_callback_bundle *tcbp;
+
+ tcbp = (struct __tcl_callback_bundle *)vtcbp;
+ interp = tcbp->interp;
+ listobj = tcbp->obj;
+
+ for (fnp = fn; fnp->mask != 0; ++fnp)
+ if (LF_ISSET(fnp->mask)) {
+ newobj = Tcl_NewStringObj(fnp->name, strlen(fnp->name));
+ result =
+ Tcl_ListObjAppendElement(interp, listobj, newobj);
+
+ /*
+ * Tcl_ListObjAppendElement is defined to return TCL_OK
+ * unless listobj isn't actually a list (or convertible
+ * into one). If this is the case, we screwed up badly
+ * somehow.
+ */
+ DB_ASSERT(result == TCL_OK);
+ }
+}
+
+/*
+ * _GetFlagsList --
+ * Get a new Tcl object, containing a list of the string values
+ * associated with a particular set of flag values, given a function
+ * that can extract the right names for the right flags.
+ *
+ * PUBLIC: Tcl_Obj *_GetFlagsList __P((Tcl_Interp *, u_int32_t,
+ * PUBLIC: void (*)(u_int32_t, void *,
+ * PUBLIC: void (*)(u_int32_t, const FN *, void *))));
+ */
+Tcl_Obj *
+_GetFlagsList(interp, flags, func)
+ Tcl_Interp *interp;
+ u_int32_t flags;
+ void (*func)
+ __P((u_int32_t, void *, void (*)(u_int32_t, const FN *, void *)));
+{
+ Tcl_Obj *newlist;
+ struct __tcl_callback_bundle tcb;
+
+ newlist = Tcl_NewObj();
+
+ memset(&tcb, 0, sizeof(tcb));
+ tcb.interp = interp;
+ tcb.obj = newlist;
+
+ func(flags, &tcb, tcl_flag_callback);
+
+ return (newlist);
+}
+
+int __debug_stop, __debug_on, __debug_print, __debug_test;
+
+/*
+ * PUBLIC: void _debug_check __P((void));
+ */
+void
+_debug_check()
+{
+ if (__debug_on == 0)
+ return;
+
+ if (__debug_print != 0) {
+ printf("\r%7d:", __debug_on);
+ fflush(stdout);
+ }
+ if (__debug_on++ == __debug_test || __debug_stop)
+ __db_loadme();
+}
+
+/*
+ * XXX
+ * Tcl 8.1+ Tcl_GetByteArrayFromObj/Tcl_GetIntFromObj bug.
+ *
+ * There is a bug in Tcl 8.1+ and byte arrays in that if it happens
+ * to use an object as both a byte array and something else like
+ * an int, and you've done a Tcl_GetByteArrayFromObj, then you
+ * do a Tcl_GetIntFromObj, your memory is deleted.
+ *
+ * Workaround is for all byte arrays we want to use, if it can be
+ * represented as an integer, we copy it so that we don't lose the
+ * memory.
+ */
+/*
+ * PUBLIC: int _CopyObjBytes __P((Tcl_Interp *, Tcl_Obj *obj, void **,
+ * PUBLIC: u_int32_t *, int *));
+ */
+int
+_CopyObjBytes(interp, obj, newp, sizep, freep)
+ Tcl_Interp *interp;
+ Tcl_Obj *obj;
+ void **newp;
+ u_int32_t *sizep;
+ int *freep;
+{
+ void *tmp, *new;
+ int i, len, ret;
+
+ /*
+ * If the object is not an int, then just return the byte
+ * array because it won't be transformed out from under us.
+ * If it is a number, we need to copy it.
+ */
+ *freep = 0;
+ ret = Tcl_GetIntFromObj(interp, obj, &i);
+ tmp = Tcl_GetByteArrayFromObj(obj, &len);
+ *sizep = len;
+ if (ret == TCL_ERROR) {
+ Tcl_ResetResult(interp);
+ *newp = tmp;
+ return (0);
+ }
+
+ /*
+ * If we get here, we have an integer that might be reused
+ * at some other point so we cannot count on GetByteArray
+ * keeping our pointer valid.
+ */
+ if ((ret = __os_malloc(NULL, len, &new)) != 0)
+ return (ret);
+ memcpy(new, tmp, len);
+ *newp = new;
+ *freep = 1;
+ return (0);
+}
diff --git a/libdb/tcl/tcl_lock.c b/libdb/tcl/tcl_lock.c
new file mode 100644
index 0000000..c70abd5
--- /dev/null
+++ b/libdb/tcl/tcl_lock.c
@@ -0,0 +1,739 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/tcl_db.h"
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static int lock_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int _LockMode __P((Tcl_Interp *, Tcl_Obj *, db_lockmode_t *));
+static int _GetThisLock __P((Tcl_Interp *, DB_ENV *, u_int32_t,
+ u_int32_t, DBT *, db_lockmode_t, char *));
+static void _LockPutInfo __P((Tcl_Interp *, db_lockop_t, DB_LOCK *,
+ u_int32_t, DBT *));
+#if CONFIG_TEST
+static char *lkmode[] = {
+ "ng",
+ "read",
+ "write",
+ "iwrite",
+ "iread",
+ "iwr",
+ NULL
+};
+enum lkmode {
+ LK_NG,
+ LK_READ,
+ LK_WRITE,
+ LK_IWRITE,
+ LK_IREAD,
+ LK_IWR
+};
+
+/*
+ * tcl_LockDetect --
+ *
+ * PUBLIC: int tcl_LockDetect __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LockDetect(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ static char *ldopts[] = {
+ "expire",
+ "default",
+ "maxlocks",
+ "minlocks",
+ "minwrites",
+ "oldest",
+ "random",
+ "youngest",
+ NULL
+ };
+ enum ldopts {
+ LD_EXPIRE,
+ LD_DEFAULT,
+ LD_MAXLOCKS,
+ LD_MINLOCKS,
+ LD_MINWRITES,
+ LD_OLDEST,
+ LD_RANDOM,
+ LD_YOUNGEST
+ };
+ u_int32_t flag, policy;
+ int i, optindex, result, ret;
+
+ result = TCL_OK;
+ flag = policy = 0;
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ ldopts, "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[i]));
+ i++;
+ switch ((enum ldopts)optindex) {
+ case LD_EXPIRE:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_EXPIRE;
+ break;
+ case LD_DEFAULT:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_DEFAULT;
+ break;
+ case LD_MAXLOCKS:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_MAXLOCKS;
+ break;
+ case LD_MINWRITES:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_MINWRITE;
+ break;
+ case LD_MINLOCKS:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_MINLOCKS;
+ break;
+ case LD_OLDEST:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_OLDEST;
+ break;
+ case LD_YOUNGEST:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_YOUNGEST;
+ break;
+ case LD_RANDOM:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_RANDOM;
+ break;
+ }
+ }
+
+ _debug_check();
+ ret = envp->lock_detect(envp, flag, policy, NULL);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "lock detect");
+ return (result);
+}
+
+/*
+ * tcl_LockGet --
+ *
+ * PUBLIC: int tcl_LockGet __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LockGet(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ static char *lgopts[] = {
+ "-nowait",
+ NULL
+ };
+ enum lgopts {
+ LGNOWAIT
+ };
+ DBT obj;
+ Tcl_Obj *res;
+ void *otmp;
+ db_lockmode_t mode;
+ u_int32_t flag, lockid;
+ int freeobj, optindex, result, ret;
+ char newname[MSG_SIZE];
+
+ result = TCL_OK;
+ freeobj = 0;
+ memset(newname, 0, MSG_SIZE);
+ if (objc != 5 && objc != 6) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-nowait? mode id obj");
+ return (TCL_ERROR);
+ }
+ /*
+ * Work back from required args.
+ * Last arg is obj.
+ * Second last is lock id.
+ * Third last is lock mode.
+ */
+ memset(&obj, 0, sizeof(obj));
+
+ if ((result =
+ _GetUInt32(interp, objv[objc-2], &lockid)) != TCL_OK)
+ return (result);
+
+ ret = _CopyObjBytes(interp, objv[objc-1], &otmp,
+ &obj.size, &freeobj);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "lock get");
+ return (result);
+ }
+ obj.data = otmp;
+ if ((result = _LockMode(interp, objv[(objc - 3)], &mode)) != TCL_OK)
+ goto out;
+
+ /*
+ * Any left over arg is the flag.
+ */
+ flag = 0;
+ if (objc == 6) {
+ if (Tcl_GetIndexFromObj(interp, objv[(objc - 4)],
+ lgopts, "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[(objc - 4)]));
+ switch ((enum lgopts)optindex) {
+ case LGNOWAIT:
+ flag |= DB_LOCK_NOWAIT;
+ break;
+ }
+ }
+
+ result = _GetThisLock(interp, envp, lockid, flag, &obj, mode, newname);
+ if (result == TCL_OK) {
+ res = Tcl_NewStringObj(newname, strlen(newname));
+ Tcl_SetObjResult(interp, res);
+ }
+out:
+ if (freeobj)
+ (void)__os_free(envp, otmp);
+ return (result);
+}
+
+/*
+ * tcl_LockStat --
+ *
+ * PUBLIC: int tcl_LockStat __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LockStat(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ DB_LOCK_STAT *sp;
+ Tcl_Obj *res;
+ int result, ret;
+
+ result = TCL_OK;
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = envp->lock_stat(envp, &sp, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "lock stat");
+ if (result == TCL_ERROR)
+ return (result);
+ /*
+ * Have our stats, now construct the name value
+ * list pairs and free up the memory.
+ */
+ res = Tcl_NewObj();
+ /*
+ * MAKE_STAT_LIST assumes 'res' and 'error' label.
+ */
+ MAKE_STAT_LIST("Region size", sp->st_regsize);
+ MAKE_STAT_LIST("Last allocated locker ID", sp->st_id);
+ MAKE_STAT_LIST("Current maximum unused locker ID", sp->st_cur_maxid);
+ MAKE_STAT_LIST("Maximum locks", sp->st_maxlocks);
+ MAKE_STAT_LIST("Maximum lockers", sp->st_maxlockers);
+ MAKE_STAT_LIST("Maximum objects", sp->st_maxobjects);
+ MAKE_STAT_LIST("Lock modes", sp->st_nmodes);
+ MAKE_STAT_LIST("Current number of locks", sp->st_nlocks);
+ MAKE_STAT_LIST("Maximum number of locks so far", sp->st_maxnlocks);
+ MAKE_STAT_LIST("Current number of lockers", sp->st_nlockers);
+ MAKE_STAT_LIST("Maximum number of lockers so far", sp->st_maxnlockers);
+ MAKE_STAT_LIST("Current number of objects", sp->st_nobjects);
+ MAKE_STAT_LIST("Maximum number of objects so far", sp->st_maxnobjects);
+ MAKE_STAT_LIST("Number of conflicts", sp->st_nconflicts);
+ MAKE_STAT_LIST("Lock requests", sp->st_nrequests);
+ MAKE_STAT_LIST("Lock releases", sp->st_nreleases);
+ MAKE_STAT_LIST("Lock requests that would have waited", sp->st_nnowaits);
+ MAKE_STAT_LIST("Deadlocks detected", sp->st_ndeadlocks);
+ MAKE_STAT_LIST("Number of region lock waits", sp->st_region_wait);
+ MAKE_STAT_LIST("Number of region lock nowaits", sp->st_region_nowait);
+ MAKE_STAT_LIST("Lock timeout value", sp->st_locktimeout);
+ MAKE_STAT_LIST("Number of lock timeouts", sp->st_nlocktimeouts);
+ MAKE_STAT_LIST("Transaction timeout value", sp->st_txntimeout);
+ MAKE_STAT_LIST("Number of transaction timeouts", sp->st_ntxntimeouts);
+ Tcl_SetObjResult(interp, res);
+error:
+ free(sp);
+ return (result);
+}
+
+/*
+ * tcl_LockTimeout --
+ *
+ * PUBLIC: int tcl_LockTimeout __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LockTimeout(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ long timeout;
+ int result, ret;
+
+ /*
+ * One arg, the timeout.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?timeout?");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetLongFromObj(interp, objv[2], &timeout);
+ if (result != TCL_OK)
+ return (result);
+ _debug_check();
+ ret = envp->set_timeout(envp, (u_int32_t)timeout, DB_SET_LOCK_TIMEOUT);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "lock timeout");
+ return (result);
+}
+
+/*
+ * lock_Cmd --
+ * Implements the "lock" widget.
+ */
+static int
+lock_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Lock handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *lkcmds[] = {
+ "put",
+ NULL
+ };
+ enum lkcmds {
+ LKPUT
+ };
+ DB_ENV *env;
+ DB_LOCK *lock;
+ DBTCL_INFO *lkip;
+ int cmdindex, result, ret;
+
+ Tcl_ResetResult(interp);
+ lock = (DB_LOCK *)clientData;
+ lkip = _PtrToInfo((void *)lock);
+ result = TCL_OK;
+
+ if (lock == NULL) {
+ Tcl_SetResult(interp, "NULL lock", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (lkip == NULL) {
+ Tcl_SetResult(interp, "NULL lock info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ env = NAME_TO_ENV(lkip->i_parent->i_name);
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], lkcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ switch ((enum lkcmds)cmdindex) {
+ case LKPUT:
+ _debug_check();
+ ret = env->lock_put(env, lock);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "lock put");
+ (void)Tcl_DeleteCommand(interp, lkip->i_name);
+ _DeleteInfo(lkip);
+ __os_free(env, lock);
+ break;
+ }
+ return (result);
+}
+
+/*
+ * tcl_LockVec --
+ *
+ * PUBLIC: int tcl_LockVec __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LockVec(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* environment pointer */
+{
+ static char *lvopts[] = {
+ "-nowait",
+ NULL
+ };
+ enum lvopts {
+ LVNOWAIT
+ };
+ static char *lkops[] = {
+ "get",
+ "put",
+ "put_all",
+ "put_obj",
+ "timeout",
+ NULL
+ };
+ enum lkops {
+ LKGET,
+ LKPUT,
+ LKPUTALL,
+ LKPUTOBJ,
+ LKTIMEOUT
+ };
+ DB_LOCK *lock;
+ DB_LOCKREQ list;
+ DBT obj;
+ Tcl_Obj **myobjv, *res, *thisop;
+ void *otmp;
+ u_int32_t flag, lockid;
+ int freeobj, i, myobjc, optindex, result, ret;
+ char *lockname, msg[MSG_SIZE], newname[MSG_SIZE];
+
+ result = TCL_OK;
+ memset(newname, 0, MSG_SIZE);
+ flag = 0;
+ freeobj = 0;
+
+ /*
+ * If -nowait is given, it MUST be first arg.
+ */
+ if (Tcl_GetIndexFromObj(interp, objv[2],
+ lvopts, "option", TCL_EXACT, &optindex) == TCL_OK) {
+ switch ((enum lvopts)optindex) {
+ case LVNOWAIT:
+ flag |= DB_LOCK_NOWAIT;
+ break;
+ }
+ i = 3;
+ } else {
+ if (IS_HELP(objv[2]) == TCL_OK)
+ return (TCL_OK);
+ Tcl_ResetResult(interp);
+ i = 2;
+ }
+
+ /*
+ * Our next arg MUST be the locker ID.
+ */
+ result = _GetUInt32(interp, objv[i++], &lockid);
+ if (result != TCL_OK)
+ return (result);
+
+ /*
+ * All other remaining args are operation tuples.
+ * Go through sequentially to decode, execute and build
+ * up list of return values.
+ */
+ res = Tcl_NewListObj(0, NULL);
+ while (i < objc) {
+ /*
+ * Get the list of the tuple.
+ */
+ lock = NULL;
+ result = Tcl_ListObjGetElements(interp, objv[i],
+ &myobjc, &myobjv);
+ if (result == TCL_OK)
+ i++;
+ else
+ break;
+ /*
+ * First we will set up the list of requests.
+ * We will make a "second pass" after we get back
+ * the results from the lock_vec call to create
+ * the return list.
+ */
+ if (Tcl_GetIndexFromObj(interp, myobjv[0],
+ lkops, "option", TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(myobjv[0]);
+ goto error;
+ }
+ switch ((enum lkops)optindex) {
+ case LKGET:
+ if (myobjc != 3) {
+ Tcl_WrongNumArgs(interp, 1, myobjv,
+ "{get obj mode}");
+ result = TCL_ERROR;
+ goto error;
+ }
+ result = _LockMode(interp, myobjv[2], &list.mode);
+ if (result != TCL_OK)
+ goto error;
+ ret = _CopyObjBytes(interp, myobjv[1], &otmp,
+ &obj.size, &freeobj);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "lock vec");
+ return (result);
+ }
+ obj.data = otmp;
+ ret = _GetThisLock(interp, envp, lockid, flag,
+ &obj, list.mode, newname);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "lock vec");
+ thisop = Tcl_NewIntObj(ret);
+ (void)Tcl_ListObjAppendElement(interp, res,
+ thisop);
+ goto error;
+ }
+ thisop = Tcl_NewStringObj(newname, strlen(newname));
+ (void)Tcl_ListObjAppendElement(interp, res, thisop);
+ if (freeobj) {
+ (void)__os_free(envp, otmp);
+ freeobj = 0;
+ }
+ continue;
+ case LKPUT:
+ if (myobjc != 2) {
+ Tcl_WrongNumArgs(interp, 1, myobjv,
+ "{put lock}");
+ result = TCL_ERROR;
+ goto error;
+ }
+ list.op = DB_LOCK_PUT;
+ lockname = Tcl_GetStringFromObj(myobjv[1], NULL);
+ lock = NAME_TO_LOCK(lockname);
+ if (lock == NULL) {
+ snprintf(msg, MSG_SIZE, "Invalid lock: %s\n",
+ lockname);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ goto error;
+ }
+ list.lock = *lock;
+ break;
+ case LKPUTALL:
+ if (myobjc != 1) {
+ Tcl_WrongNumArgs(interp, 1, myobjv,
+ "{put_all}");
+ result = TCL_ERROR;
+ goto error;
+ }
+ list.op = DB_LOCK_PUT_ALL;
+ break;
+ case LKPUTOBJ:
+ if (myobjc != 2) {
+ Tcl_WrongNumArgs(interp, 1, myobjv,
+ "{put_obj obj}");
+ result = TCL_ERROR;
+ goto error;
+ }
+ list.op = DB_LOCK_PUT_OBJ;
+ ret = _CopyObjBytes(interp, myobjv[1], &otmp,
+ &obj.size, &freeobj);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "lock vec");
+ return (result);
+ }
+ obj.data = otmp;
+ list.obj = &obj;
+ break;
+ case LKTIMEOUT:
+ list.op = DB_LOCK_TIMEOUT;
+ break;
+
+ }
+ /*
+ * We get here, we have set up our request, now call
+ * lock_vec.
+ */
+ _debug_check();
+ ret = envp->lock_vec(envp, lockid, flag, &list, 1, NULL);
+ /*
+ * Now deal with whether or not the operation succeeded.
+ * Get's were done above, all these are only puts.
+ */
+ thisop = Tcl_NewIntObj(ret);
+ result = Tcl_ListObjAppendElement(interp, res, thisop);
+ if (ret != 0 && result == TCL_OK)
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "lock put");
+ if (freeobj) {
+ (void)__os_free(envp, otmp);
+ freeobj = 0;
+ }
+ /*
+ * We did a put of some kind. Since we did that,
+ * we have to delete the commands associated with
+ * any of the locks we just put.
+ */
+ _LockPutInfo(interp, list.op, lock, lockid, &obj);
+ }
+
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+error:
+ return (result);
+}
+
+static int
+_LockMode(interp, obj, mode)
+ Tcl_Interp *interp;
+ Tcl_Obj *obj;
+ db_lockmode_t *mode;
+{
+ int optindex;
+
+ if (Tcl_GetIndexFromObj(interp, obj, lkmode, "option",
+ TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(obj));
+ switch ((enum lkmode)optindex) {
+ case LK_NG:
+ *mode = DB_LOCK_NG;
+ break;
+ case LK_READ:
+ *mode = DB_LOCK_READ;
+ break;
+ case LK_WRITE:
+ *mode = DB_LOCK_WRITE;
+ break;
+ case LK_IREAD:
+ *mode = DB_LOCK_IREAD;
+ break;
+ case LK_IWRITE:
+ *mode = DB_LOCK_IWRITE;
+ break;
+ case LK_IWR:
+ *mode = DB_LOCK_IWR;
+ break;
+ }
+ return (TCL_OK);
+}
+
+static void
+_LockPutInfo(interp, op, lock, lockid, objp)
+ Tcl_Interp *interp;
+ db_lockop_t op;
+ DB_LOCK *lock;
+ u_int32_t lockid;
+ DBT *objp;
+{
+ DBTCL_INFO *p, *nextp;
+ int found;
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) {
+ found = 0;
+ nextp = LIST_NEXT(p, entries);
+ if ((op == DB_LOCK_PUT && (p->i_lock == lock)) ||
+ (op == DB_LOCK_PUT_ALL && p->i_locker == lockid) ||
+ (op == DB_LOCK_PUT_OBJ && p->i_lockobj.data &&
+ memcmp(p->i_lockobj.data, objp->data, objp->size) == 0))
+ found = 1;
+ if (found) {
+ (void)Tcl_DeleteCommand(interp, p->i_name);
+ __os_free(NULL, p->i_lock);
+ _DeleteInfo(p);
+ }
+ }
+}
+
+static int
+_GetThisLock(interp, envp, lockid, flag, objp, mode, newname)
+ Tcl_Interp *interp; /* Interpreter */
+ DB_ENV *envp; /* Env handle */
+ u_int32_t lockid; /* Locker ID */
+ u_int32_t flag; /* Lock flag */
+ DBT *objp; /* Object to lock */
+ db_lockmode_t mode; /* Lock mode */
+ char *newname; /* New command name */
+{
+ DB_LOCK *lock;
+ DBTCL_INFO *envip, *ip;
+ int result, ret;
+
+ result = TCL_OK;
+ envip = _PtrToInfo((void *)envp);
+ if (envip == NULL) {
+ Tcl_SetResult(interp, "Could not find env info\n", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ snprintf(newname, MSG_SIZE, "%s.lock%d",
+ envip->i_name, envip->i_envlockid);
+ ip = _NewInfo(interp, NULL, newname, I_LOCK);
+ if (ip == NULL) {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ ret = __os_malloc(envp, sizeof(DB_LOCK), &lock);
+ if (ret != 0) {
+ Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = envp->lock_get(envp, lockid, flag, objp, mode, lock);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "lock get");
+ if (result == TCL_ERROR) {
+ __os_free(envp, lock);
+ _DeleteInfo(ip);
+ return (result);
+ }
+ /*
+ * Success. Set up return. Set up new info
+ * and command widget for this lock.
+ */
+ ret = __os_malloc(envp, objp->size, &ip->i_lockobj.data);
+ if (ret != 0) {
+ Tcl_SetResult(interp, "Could not duplicate obj",
+ TCL_STATIC);
+ (void)envp->lock_put(envp, lock);
+ __os_free(envp, lock);
+ _DeleteInfo(ip);
+ result = TCL_ERROR;
+ goto error;
+ }
+ memcpy(ip->i_lockobj.data, objp->data, objp->size);
+ ip->i_lockobj.size = objp->size;
+ envip->i_envlockid++;
+ ip->i_parent = envip;
+ ip->i_locker = lockid;
+ _SetInfoData(ip, lock);
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)lock_Cmd, (ClientData)lock, NULL);
+error:
+ return (result);
+}
+#endif
diff --git a/libdb/tcl/tcl_log.c b/libdb/tcl/tcl_log.c
new file mode 100644
index 0000000..cdc56cf
--- /dev/null
+++ b/libdb/tcl/tcl_log.c
@@ -0,0 +1,610 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/log.h"
+#include "dbinc/tcl_db.h"
+#include "dbinc/txn.h"
+
+#ifdef CONFIG_TEST
+static int tcl_LogcGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_LOGC *));
+
+/*
+ * tcl_LogArchive --
+ *
+ * PUBLIC: int tcl_LogArchive __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LogArchive(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ static char *archopts[] = {
+ "-arch_abs", "-arch_data", "-arch_log",
+ NULL
+ };
+ enum archopts {
+ ARCH_ABS, ARCH_DATA, ARCH_LOG
+ };
+ Tcl_Obj *fileobj, *res;
+ u_int32_t flag;
+ int i, optindex, result, ret;
+ char **file, **list;
+
+ result = TCL_OK;
+ flag = 0;
+ /*
+ * Get the flag index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ archopts, "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[i]));
+ i++;
+ switch ((enum archopts)optindex) {
+ case ARCH_ABS:
+ flag |= DB_ARCH_ABS;
+ break;
+ case ARCH_DATA:
+ flag |= DB_ARCH_DATA;
+ break;
+ case ARCH_LOG:
+ flag |= DB_ARCH_LOG;
+ break;
+ }
+ }
+ _debug_check();
+ list = NULL;
+ ret = envp->log_archive(envp, &list, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log archive");
+ if (result == TCL_OK) {
+ res = Tcl_NewListObj(0, NULL);
+ for (file = list; file != NULL && *file != NULL; file++) {
+ fileobj = Tcl_NewStringObj(*file, strlen(*file));
+ result = Tcl_ListObjAppendElement(interp, res, fileobj);
+ if (result != TCL_OK)
+ break;
+ }
+ Tcl_SetObjResult(interp, res);
+ }
+ if (list != NULL)
+ __os_ufree(envp, list);
+ return (result);
+}
+
+/*
+ * tcl_LogCompare --
+ *
+ * PUBLIC: int tcl_LogCompare __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*));
+ */
+int
+tcl_LogCompare(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ DB_LSN lsn0, lsn1;
+ Tcl_Obj *res;
+ int result, ret;
+
+ result = TCL_OK;
+ /*
+ * No flags, must be 4 args.
+ */
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, "lsn1 lsn2");
+ return (TCL_ERROR);
+ }
+
+ result = _GetLsn(interp, objv[2], &lsn0);
+ if (result == TCL_ERROR)
+ return (result);
+ result = _GetLsn(interp, objv[3], &lsn1);
+ if (result == TCL_ERROR)
+ return (result);
+
+ _debug_check();
+ ret = log_compare(&lsn0, &lsn1);
+ res = Tcl_NewIntObj(ret);
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ * tcl_LogFile --
+ *
+ * PUBLIC: int tcl_LogFile __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LogFile(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ DB_LSN lsn;
+ Tcl_Obj *res;
+ size_t len;
+ int result, ret;
+ char *name;
+
+ result = TCL_OK;
+ /*
+ * No flags, must be 3 args.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "lsn");
+ return (TCL_ERROR);
+ }
+
+ result = _GetLsn(interp, objv[2], &lsn);
+ if (result == TCL_ERROR)
+ return (result);
+
+ len = MSG_SIZE;
+ ret = ENOMEM;
+ name = NULL;
+ while (ret == ENOMEM) {
+ if (name != NULL)
+ __os_free(envp, name);
+ ret = __os_malloc(envp, len, &name);
+ if (ret != 0) {
+ Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
+ break;
+ }
+ _debug_check();
+ ret = envp->log_file(envp, &lsn, name, len);
+ len *= 2;
+ }
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log_file");
+ if (ret == 0) {
+ res = Tcl_NewStringObj(name, strlen(name));
+ Tcl_SetObjResult(interp, res);
+ }
+
+ if (name != NULL)
+ __os_free(envp, name);
+
+ return (result);
+}
+
+/*
+ * tcl_LogFlush --
+ *
+ * PUBLIC: int tcl_LogFlush __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LogFlush(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ DB_LSN lsn, *lsnp;
+ int result, ret;
+
+ result = TCL_OK;
+ /*
+ * No flags, must be 2 or 3 args.
+ */
+ if (objc > 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?lsn?");
+ return (TCL_ERROR);
+ }
+
+ if (objc == 3) {
+ lsnp = &lsn;
+ result = _GetLsn(interp, objv[2], &lsn);
+ if (result == TCL_ERROR)
+ return (result);
+ } else
+ lsnp = NULL;
+
+ _debug_check();
+ ret = envp->log_flush(envp, lsnp);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log_flush");
+ return (result);
+}
+
+/*
+ * tcl_LogGet --
+ *
+ * PUBLIC: int tcl_LogGet __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LogGet(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+
+ COMPQUIET(objv, NULL);
+ COMPQUIET(objc, 0);
+ COMPQUIET(envp, NULL);
+
+ Tcl_SetResult(interp, "FAIL: log_get deprecated\n", TCL_STATIC);
+ return (TCL_ERROR);
+}
+
+/*
+ * tcl_LogPut --
+ *
+ * PUBLIC: int tcl_LogPut __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LogPut(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ static char *logputopts[] = {
+ "-flush",
+ NULL
+ };
+ enum logputopts {
+ LOGPUT_FLUSH
+ };
+ DB_LSN lsn;
+ DBT data;
+ Tcl_Obj *intobj, *res;
+ void *dtmp;
+ u_int32_t flag;
+ int freedata, optindex, result, ret;
+
+ result = TCL_OK;
+ flag = 0;
+ freedata = 0;
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? record");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Data/record must be the last arg.
+ */
+ memset(&data, 0, sizeof(data));
+ ret = _CopyObjBytes(interp, objv[objc-1], &dtmp,
+ &data.size, &freedata);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "log put");
+ return (result);
+ }
+ data.data = dtmp;
+
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ if (objc == 4) {
+ if (Tcl_GetIndexFromObj(interp, objv[2],
+ logputopts, "option", TCL_EXACT, &optindex) != TCL_OK) {
+ return (IS_HELP(objv[2]));
+ }
+ switch ((enum logputopts)optindex) {
+ case LOGPUT_FLUSH:
+ flag = DB_FLUSH;
+ break;
+ }
+ }
+
+ if (result == TCL_ERROR)
+ return (result);
+
+ _debug_check();
+ ret = envp->log_put(envp, &lsn, &data, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log_put");
+ if (result == TCL_ERROR)
+ return (result);
+ res = Tcl_NewListObj(0, NULL);
+ intobj = Tcl_NewLongObj((long)lsn.file);
+ result = Tcl_ListObjAppendElement(interp, res, intobj);
+ intobj = Tcl_NewLongObj((long)lsn.offset);
+ result = Tcl_ListObjAppendElement(interp, res, intobj);
+ Tcl_SetObjResult(interp, res);
+ if (freedata)
+ (void)__os_free(NULL, dtmp);
+ return (result);
+}
+/*
+ * tcl_LogStat --
+ *
+ * PUBLIC: int tcl_LogStat __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LogStat(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ DB_LOG_STAT *sp;
+ Tcl_Obj *res;
+ int result, ret;
+
+ result = TCL_OK;
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = envp->log_stat(envp, &sp, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log stat");
+ if (result == TCL_ERROR)
+ return (result);
+
+ /*
+ * Have our stats, now construct the name value
+ * list pairs and free up the memory.
+ */
+ res = Tcl_NewObj();
+ /*
+ * MAKE_STAT_LIST assumes 'res' and 'error' label.
+ */
+ MAKE_STAT_LIST("Magic", sp->st_magic);
+ MAKE_STAT_LIST("Log file Version", sp->st_version);
+ MAKE_STAT_LIST("Region size", sp->st_regsize);
+ MAKE_STAT_LIST("Log file mode", sp->st_mode);
+ MAKE_STAT_LIST("Log record cache size", sp->st_lg_bsize);
+ MAKE_STAT_LIST("Current log file size", sp->st_lg_size);
+ MAKE_STAT_LIST("Mbytes written", sp->st_w_mbytes);
+ MAKE_STAT_LIST("Bytes written (over Mb)", sp->st_w_bytes);
+ MAKE_STAT_LIST("Mbytes written since checkpoint", sp->st_wc_mbytes);
+ MAKE_STAT_LIST("Bytes written (over Mb) since checkpoint",
+ sp->st_wc_bytes);
+ MAKE_STAT_LIST("Times log written", sp->st_wcount);
+ MAKE_STAT_LIST("Times log written because cache filled up",
+ sp->st_wcount_fill);
+ MAKE_STAT_LIST("Times log flushed", sp->st_scount);
+ MAKE_STAT_LIST("Current log file number", sp->st_cur_file);
+ MAKE_STAT_LIST("Current log file offset", sp->st_cur_offset);
+ MAKE_STAT_LIST("On-disk log file number", sp->st_disk_file);
+ MAKE_STAT_LIST("On-disk log file offset", sp->st_disk_offset);
+ MAKE_STAT_LIST("Max commits in a log flush", sp->st_maxcommitperflush);
+ MAKE_STAT_LIST("Min commits in a log flush", sp->st_mincommitperflush);
+ MAKE_STAT_LIST("Number of region lock waits", sp->st_region_wait);
+ MAKE_STAT_LIST("Number of region lock nowaits", sp->st_region_nowait);
+ Tcl_SetObjResult(interp, res);
+error:
+ free(sp);
+ return (result);
+}
+
+/*
+ * logc_Cmd --
+ * Implements the log cursor command.
+ *
+ * PUBLIC: int logc_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+ */
+int
+logc_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Cursor handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *logccmds[] = {
+ "close",
+ "get",
+ NULL
+ };
+ enum logccmds {
+ LOGCCLOSE,
+ LOGCGET
+ };
+ DB_LOGC *logc;
+ DBTCL_INFO *logcip;
+ int cmdindex, result, ret;
+
+ Tcl_ResetResult(interp);
+ logc = (DB_LOGC *)clientData;
+ logcip = _PtrToInfo((void *)logc);
+ result = TCL_OK;
+
+ if (objc <= 1) {
+ Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs");
+ return (TCL_ERROR);
+ }
+ if (logc == NULL) {
+ Tcl_SetResult(interp, "NULL logc pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (logcip == NULL) {
+ Tcl_SetResult(interp, "NULL logc info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the berkdbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp, objv[1], logccmds, "command",
+ TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+ switch ((enum logccmds)cmdindex) {
+ case LOGCCLOSE:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = logc->close(logc, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "logc close");
+ if (result == TCL_OK) {
+ (void)Tcl_DeleteCommand(interp, logcip->i_name);
+ _DeleteInfo(logcip);
+ }
+ break;
+ case LOGCGET:
+ result = tcl_LogcGet(interp, objc, objv, logc);
+ break;
+ }
+ return (result);
+}
+
+static int
+tcl_LogcGet(interp, objc, objv, logc)
+ Tcl_Interp *interp;
+ int objc;
+ Tcl_Obj * CONST *objv;
+ DB_LOGC *logc;
+{
+ static char *logcgetopts[] = {
+ "-current",
+ "-first",
+ "-last",
+ "-next",
+ "-prev",
+ "-set",
+ NULL
+ };
+ enum logcgetopts {
+ LOGCGET_CURRENT,
+ LOGCGET_FIRST,
+ LOGCGET_LAST,
+ LOGCGET_NEXT,
+ LOGCGET_PREV,
+ LOGCGET_SET
+ };
+ DB_LSN lsn;
+ DBT data;
+ Tcl_Obj *dataobj, *lsnlist, *myobjv[2], *res;
+ u_int32_t flag;
+ int i, myobjc, optindex, result, ret;
+
+ result = TCL_OK;
+ res = NULL;
+ flag = 0;
+
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? lsn");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ logcgetopts, "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[i]));
+ i++;
+ switch ((enum logcgetopts)optindex) {
+ case LOGCGET_CURRENT:
+ FLAG_CHECK(flag);
+ flag |= DB_CURRENT;
+ break;
+ case LOGCGET_FIRST:
+ FLAG_CHECK(flag);
+ flag |= DB_FIRST;
+ break;
+ case LOGCGET_LAST:
+ FLAG_CHECK(flag);
+ flag |= DB_LAST;
+ break;
+ case LOGCGET_NEXT:
+ FLAG_CHECK(flag);
+ flag |= DB_NEXT;
+ break;
+ case LOGCGET_PREV:
+ FLAG_CHECK(flag);
+ flag |= DB_PREV;
+ break;
+ case LOGCGET_SET:
+ FLAG_CHECK(flag);
+ flag |= DB_SET;
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-set lsn?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetLsn(interp, objv[i++], &lsn);
+ break;
+ }
+ }
+
+ if (result == TCL_ERROR)
+ return (result);
+
+ memset(&data, 0, sizeof(data));
+
+ _debug_check();
+ ret = logc->get(logc, &lsn, &data, flag);
+
+ res = Tcl_NewListObj(0, NULL);
+ if (res == NULL)
+ goto memerr;
+
+ if (ret == 0) {
+ /*
+ * Success. Set up return list as {LSN data} where LSN
+ * is a sublist {file offset}.
+ */
+ myobjc = 2;
+ myobjv[0] = Tcl_NewLongObj((long)lsn.file);
+ myobjv[1] = Tcl_NewLongObj((long)lsn.offset);
+ lsnlist = Tcl_NewListObj(myobjc, myobjv);
+ if (lsnlist == NULL)
+ goto memerr;
+
+ result = Tcl_ListObjAppendElement(interp, res, lsnlist);
+ dataobj = Tcl_NewStringObj(data.data, data.size);
+ if (dataobj == NULL) {
+ goto memerr;
+ }
+ result = Tcl_ListObjAppendElement(interp, res, dataobj);
+ } else
+ result = _ReturnSetup(interp, ret, DB_RETOK_LGGET(ret),
+ "DB_LOGC->get");
+
+ Tcl_SetObjResult(interp, res);
+
+ if (0) {
+memerr: if (res != NULL)
+ Tcl_DecrRefCount(res);
+ Tcl_SetResult(interp, "allocation failed", TCL_STATIC);
+ }
+
+ return (result);
+}
+#endif
diff --git a/libdb/tcl/tcl_mp.c b/libdb/tcl/tcl_mp.c
new file mode 100644
index 0000000..c734670
--- /dev/null
+++ b/libdb/tcl/tcl_mp.c
@@ -0,0 +1,864 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/tcl_db.h"
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static int mp_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int pg_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int tcl_MpGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ DB_MPOOLFILE *, DBTCL_INFO *));
+static int tcl_Pg __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ void *, DB_MPOOLFILE *, DBTCL_INFO *, int));
+static int tcl_PgInit __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ void *, DBTCL_INFO *));
+static int tcl_PgIsset __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ void *, DBTCL_INFO *));
+
+/*
+ * _MpInfoDelete --
+ * Removes "sub" mp page info structures that are children
+ * of this mp.
+ *
+ * PUBLIC: void _MpInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+ */
+void
+_MpInfoDelete(interp, mpip)
+ Tcl_Interp *interp; /* Interpreter */
+ DBTCL_INFO *mpip; /* Info for mp */
+{
+ DBTCL_INFO *nextp, *p;
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) {
+ /*
+ * Check if this info structure "belongs" to this
+ * mp. Remove its commands and info structure.
+ */
+ nextp = LIST_NEXT(p, entries);
+ if (p->i_parent == mpip && p->i_type == I_PG) {
+ (void)Tcl_DeleteCommand(interp, p->i_name);
+ _DeleteInfo(p);
+ }
+ }
+}
+
+#if CONFIG_TEST
+/*
+ * tcl_MpSync --
+ *
+ * PUBLIC: int tcl_MpSync __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_MpSync(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+
+ DB_LSN lsn, *lsnp;
+ int result, ret;
+
+ result = TCL_OK;
+ lsnp = NULL;
+ /*
+ * No flags, must be 3 args.
+ */
+ if (objc == 3) {
+ result = _GetLsn(interp, objv[2], &lsn);
+ if (result == TCL_ERROR)
+ return (result);
+ lsnp = &lsn;
+ }
+ else if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "lsn");
+ return (TCL_ERROR);
+ }
+
+ _debug_check();
+ ret = envp->memp_sync(envp, lsnp);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "memp sync");
+ return (result);
+}
+
+/*
+ * tcl_MpTrickle --
+ *
+ * PUBLIC: int tcl_MpTrickle __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_MpTrickle(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+
+ int pages;
+ int percent;
+ int result;
+ int ret;
+ Tcl_Obj *res;
+
+ result = TCL_OK;
+ /*
+ * No flags, must be 3 args.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "percent");
+ return (TCL_ERROR);
+ }
+
+ result = Tcl_GetIntFromObj(interp, objv[2], &percent);
+ if (result == TCL_ERROR)
+ return (result);
+
+ _debug_check();
+ ret = envp->memp_trickle(envp, percent, &pages);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "memp trickle");
+ if (result == TCL_ERROR)
+ return (result);
+
+ res = Tcl_NewIntObj(pages);
+ Tcl_SetObjResult(interp, res);
+ return (result);
+
+}
+
+/*
+ * tcl_Mp --
+ *
+ * PUBLIC: int tcl_Mp __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *));
+ */
+int
+tcl_Mp(interp, objc, objv, envp, envip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+ DBTCL_INFO *envip; /* Info pointer */
+{
+ static char *mpopts[] = {
+ "-create",
+ "-mode",
+ "-nommap",
+ "-pagesize",
+ "-rdonly",
+ NULL
+ };
+ enum mpopts {
+ MPCREATE,
+ MPMODE,
+ MPNOMMAP,
+ MPPAGE,
+ MPRDONLY
+ };
+ DBTCL_INFO *ip;
+ DB_MPOOLFILE *mpf;
+ Tcl_Obj *res;
+ u_int32_t flag;
+ int i, pgsize, mode, optindex, result, ret;
+ char *file, newname[MSG_SIZE];
+
+ result = TCL_OK;
+ i = 2;
+ flag = 0;
+ mode = 0;
+ pgsize = 0;
+ memset(newname, 0, MSG_SIZE);
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ mpopts, "option", TCL_EXACT, &optindex) != TCL_OK) {
+ /*
+ * Reset the result so we don't get an errant
+ * error message if there is another error.
+ * This arg is the file name.
+ */
+ if (IS_HELP(objv[i]) == TCL_OK)
+ return (TCL_OK);
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum mpopts)optindex) {
+ case MPCREATE:
+ flag |= DB_CREATE;
+ break;
+ case MPNOMMAP:
+ flag |= DB_NOMMAP;
+ break;
+ case MPPAGE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-pagesize size?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Don't need to check result here because
+ * if TCL_ERROR, the error message is already
+ * set up, and we'll bail out below. If ok,
+ * the mode is set and we go on.
+ */
+ result = Tcl_GetIntFromObj(interp, objv[i++], &pgsize);
+ break;
+ case MPRDONLY:
+ flag |= DB_RDONLY;
+ break;
+ case MPMODE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-mode mode?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Don't need to check result here because
+ * if TCL_ERROR, the error message is already
+ * set up, and we'll bail out below. If ok,
+ * the mode is set and we go on.
+ */
+ result = Tcl_GetIntFromObj(interp, objv[i++], &mode);
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ }
+ /*
+ * Any left over arg is a file name. It better be the last arg.
+ */
+ file = NULL;
+ if (i != objc) {
+ if (i != objc - 1) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? ?file?");
+ result = TCL_ERROR;
+ goto error;
+ }
+ file = Tcl_GetStringFromObj(objv[i++], NULL);
+ }
+
+ snprintf(newname, sizeof(newname), "%s.mp%d",
+ envip->i_name, envip->i_envmpid);
+ ip = _NewInfo(interp, NULL, newname, I_MP);
+ if (ip == NULL) {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ _debug_check();
+ if ((ret = envp->memp_fcreate(envp, &mpf, 0)) != 0) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "mpool");
+ _DeleteInfo(ip);
+ goto error;
+ }
+
+ /*
+ * XXX
+ * Interface doesn't currently support DB_MPOOLFILE configuration.
+ */
+ if ((ret = mpf->open(mpf, file, flag, mode, (size_t)pgsize)) != 0) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "mpool");
+ _DeleteInfo(ip);
+
+ (void)mpf->close(mpf, 0);
+ goto error;
+ }
+
+ /*
+ * Success. Set up return. Set up new info and command widget for
+ * this mpool.
+ */
+ envip->i_envmpid++;
+ ip->i_parent = envip;
+ ip->i_pgsz = pgsize;
+ _SetInfoData(ip, mpf);
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)mp_Cmd, (ClientData)mpf, NULL);
+ res = Tcl_NewStringObj(newname, strlen(newname));
+ Tcl_SetObjResult(interp, res);
+
+error:
+ return (result);
+}
+
+/*
+ * tcl_MpStat --
+ *
+ * PUBLIC: int tcl_MpStat __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_MpStat(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ DB_MPOOL_STAT *sp;
+ DB_MPOOL_FSTAT **fsp, **savefsp;
+ int result;
+ int ret;
+ Tcl_Obj *res;
+ Tcl_Obj *res1;
+
+ result = TCL_OK;
+ savefsp = NULL;
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = envp->memp_stat(envp, &sp, &fsp, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "memp stat");
+ if (result == TCL_ERROR)
+ return (result);
+
+ /*
+ * Have our stats, now construct the name value
+ * list pairs and free up the memory.
+ */
+ res = Tcl_NewObj();
+ /*
+ * MAKE_STAT_LIST assumes 'res' and 'error' label.
+ */
+ MAKE_STAT_LIST("Cache size (gbytes)", sp->st_gbytes);
+ MAKE_STAT_LIST("Cache size (bytes)", sp->st_bytes);
+ MAKE_STAT_LIST("Number of caches", sp->st_ncache);
+ MAKE_STAT_LIST("Region size", sp->st_regsize);
+ MAKE_STAT_LIST("Pages mapped into address space", sp->st_map);
+ MAKE_STAT_LIST("Cache hits", sp->st_cache_hit);
+ MAKE_STAT_LIST("Cache misses", sp->st_cache_miss);
+ MAKE_STAT_LIST("Pages created", sp->st_page_create);
+ MAKE_STAT_LIST("Pages read in", sp->st_page_in);
+ MAKE_STAT_LIST("Pages written", sp->st_page_out);
+ MAKE_STAT_LIST("Clean page evictions", sp->st_ro_evict);
+ MAKE_STAT_LIST("Dirty page evictions", sp->st_rw_evict);
+ MAKE_STAT_LIST("Dirty pages trickled", sp->st_page_trickle);
+ MAKE_STAT_LIST("Cached pages", sp->st_pages);
+ MAKE_STAT_LIST("Cached clean pages", sp->st_page_clean);
+ MAKE_STAT_LIST("Cached dirty pages", sp->st_page_dirty);
+ MAKE_STAT_LIST("Hash buckets", sp->st_hash_buckets);
+ MAKE_STAT_LIST("Hash lookups", sp->st_hash_searches);
+ MAKE_STAT_LIST("Longest hash chain found", sp->st_hash_longest);
+ MAKE_STAT_LIST("Hash elements examined", sp->st_hash_examined);
+ MAKE_STAT_LIST("Number of hash bucket nowaits", sp->st_hash_nowait);
+ MAKE_STAT_LIST("Number of hash bucket waits", sp->st_hash_wait);
+ MAKE_STAT_LIST("Maximum number of hash bucket waits",
+ sp->st_hash_max_wait);
+ MAKE_STAT_LIST("Number of region lock nowaits", sp->st_region_nowait);
+ MAKE_STAT_LIST("Number of region lock waits", sp->st_region_wait);
+ MAKE_STAT_LIST("Page allocations", sp->st_alloc);
+ MAKE_STAT_LIST("Buckets examined during allocation",
+ sp->st_alloc_buckets);
+ MAKE_STAT_LIST("Maximum buckets examined during allocation",
+ sp->st_alloc_max_buckets);
+ MAKE_STAT_LIST("Pages examined during allocation", sp->st_alloc_pages);
+ MAKE_STAT_LIST("Maximum pages examined during allocation",
+ sp->st_alloc_max_pages);
+
+ /*
+ * Save global stat list as res1. The MAKE_STAT_LIST
+ * macro assumes 'res' so we'll use that to build up
+ * our per-file sublist.
+ */
+ res1 = res;
+ for (savefsp = fsp; fsp != NULL && *fsp != NULL; fsp++) {
+ res = Tcl_NewObj();
+ result = _SetListElem(interp, res, "File Name",
+ strlen("File Name"), (*fsp)->file_name,
+ strlen((*fsp)->file_name));
+ if (result != TCL_OK)
+ goto error;
+ MAKE_STAT_LIST("Page size", (*fsp)->st_pagesize);
+ MAKE_STAT_LIST("Pages mapped into address space",
+ (*fsp)->st_map);
+ MAKE_STAT_LIST("Cache hits", (*fsp)->st_cache_hit);
+ MAKE_STAT_LIST("Cache misses", (*fsp)->st_cache_miss);
+ MAKE_STAT_LIST("Pages created", (*fsp)->st_page_create);
+ MAKE_STAT_LIST("Pages read in", (*fsp)->st_page_in);
+ MAKE_STAT_LIST("Pages written", (*fsp)->st_page_out);
+ /*
+ * Now that we have a complete "per-file" stat list, append
+ * that to the other list.
+ */
+ result = Tcl_ListObjAppendElement(interp, res1, res);
+ if (result != TCL_OK)
+ goto error;
+ }
+ Tcl_SetObjResult(interp, res1);
+error:
+ free(sp);
+ if (savefsp != NULL)
+ free(savefsp);
+ return (result);
+}
+
+/*
+ * mp_Cmd --
+ * Implements the "mp" widget.
+ */
+static int
+mp_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Mp handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *mpcmds[] = {
+ "close",
+ "fsync",
+ "get",
+ NULL
+ };
+ enum mpcmds {
+ MPCLOSE,
+ MPFSYNC,
+ MPGET
+ };
+ DB_MPOOLFILE *mp;
+ int cmdindex, length, result, ret;
+ DBTCL_INFO *mpip;
+ Tcl_Obj *res;
+ char *obj_name;
+
+ Tcl_ResetResult(interp);
+ mp = (DB_MPOOLFILE *)clientData;
+ obj_name = Tcl_GetStringFromObj(objv[0], &length);
+ mpip = _NameToInfo(obj_name);
+ result = TCL_OK;
+
+ if (mp == NULL) {
+ Tcl_SetResult(interp, "NULL mp pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (mpip == NULL) {
+ Tcl_SetResult(interp, "NULL mp info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], mpcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum mpcmds)cmdindex) {
+ case MPCLOSE:
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = mp->close(mp, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "mp close");
+ _MpInfoDelete(interp, mpip);
+ (void)Tcl_DeleteCommand(interp, mpip->i_name);
+ _DeleteInfo(mpip);
+ break;
+ case MPFSYNC:
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = mp->sync(mp);
+ res = Tcl_NewIntObj(ret);
+ break;
+ case MPGET:
+ result = tcl_MpGet(interp, objc, objv, mp, mpip);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ * tcl_MpGet --
+ */
+static int
+tcl_MpGet(interp, objc, objv, mp, mpip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_MPOOLFILE *mp; /* mp pointer */
+ DBTCL_INFO *mpip; /* mp info pointer */
+{
+ static char *mpget[] = {
+ "-create",
+ "-last",
+ "-new",
+ NULL
+ };
+ enum mpget {
+ MPGET_CREATE,
+ MPGET_LAST,
+ MPGET_NEW
+ };
+
+ DBTCL_INFO *ip;
+ Tcl_Obj *res;
+ db_pgno_t pgno;
+ u_int32_t flag;
+ int i, ipgno, optindex, result, ret;
+ char newname[MSG_SIZE];
+ void *page;
+
+ result = TCL_OK;
+ memset(newname, 0, MSG_SIZE);
+ i = 2;
+ flag = 0;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ mpget, "option", TCL_EXACT, &optindex) != TCL_OK) {
+ /*
+ * Reset the result so we don't get an errant
+ * error message if there is another error.
+ * This arg is the page number.
+ */
+ if (IS_HELP(objv[i]) == TCL_OK)
+ return (TCL_OK);
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum mpget)optindex) {
+ case MPGET_CREATE:
+ flag |= DB_MPOOL_CREATE;
+ break;
+ case MPGET_LAST:
+ flag |= DB_MPOOL_LAST;
+ break;
+ case MPGET_NEW:
+ flag |= DB_MPOOL_NEW;
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ }
+ /*
+ * Any left over arg is a page number. It better be the last arg.
+ */
+ ipgno = 0;
+ if (i != objc) {
+ if (i != objc - 1) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? ?pgno?");
+ result = TCL_ERROR;
+ goto error;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &ipgno);
+ if (result != TCL_OK)
+ goto error;
+ }
+
+ snprintf(newname, sizeof(newname), "%s.pg%d",
+ mpip->i_name, mpip->i_mppgid);
+ ip = _NewInfo(interp, NULL, newname, I_PG);
+ if (ip == NULL) {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ pgno = ipgno;
+ ret = mp->get(mp, &pgno, flag, &page);
+ result = _ReturnSetup(interp, ret, DB_RETOK_MPGET(ret), "mpool get");
+ if (result == TCL_ERROR)
+ _DeleteInfo(ip);
+ else {
+ /*
+ * Success. Set up return. Set up new info
+ * and command widget for this mpool.
+ */
+ mpip->i_mppgid++;
+ ip->i_parent = mpip;
+ ip->i_pgno = pgno;
+ ip->i_pgsz = mpip->i_pgsz;
+ _SetInfoData(ip, page);
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)pg_Cmd, (ClientData)page, NULL);
+ res = Tcl_NewStringObj(newname, strlen(newname));
+ Tcl_SetObjResult(interp, res);
+ }
+error:
+ return (result);
+}
+
+/*
+ * pg_Cmd --
+ * Implements the "pg" widget.
+ */
+static int
+pg_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Page handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *pgcmds[] = {
+ "init",
+ "is_setto",
+ "pgnum",
+ "pgsize",
+ "put",
+ "set",
+ NULL
+ };
+ enum pgcmds {
+ PGINIT,
+ PGISSET,
+ PGNUM,
+ PGSIZE,
+ PGPUT,
+ PGSET
+ };
+ DB_MPOOLFILE *mp;
+ int cmdindex, length, result;
+ char *obj_name;
+ void *page;
+ DBTCL_INFO *pgip;
+ Tcl_Obj *res;
+
+ Tcl_ResetResult(interp);
+ page = (void *)clientData;
+ obj_name = Tcl_GetStringFromObj(objv[0], &length);
+ pgip = _NameToInfo(obj_name);
+ mp = NAME_TO_MP(pgip->i_parent->i_name);
+ result = TCL_OK;
+
+ if (page == NULL) {
+ Tcl_SetResult(interp, "NULL page pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (mp == NULL) {
+ Tcl_SetResult(interp, "NULL mp pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (pgip == NULL) {
+ Tcl_SetResult(interp, "NULL page info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], pgcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum pgcmds)cmdindex) {
+ case PGNUM:
+ res = Tcl_NewLongObj((long)pgip->i_pgno);
+ break;
+ case PGSIZE:
+ res = Tcl_NewLongObj(pgip->i_pgsz);
+ break;
+ case PGSET:
+ case PGPUT:
+ result = tcl_Pg(interp, objc, objv, page, mp, pgip,
+ cmdindex == PGSET ? 0 : 1);
+ break;
+ case PGINIT:
+ result = tcl_PgInit(interp, objc, objv, page, pgip);
+ break;
+ case PGISSET:
+ result = tcl_PgIsset(interp, objc, objv, page, pgip);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+static int
+tcl_Pg(interp, objc, objv, page, mp, pgip, putop)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ void *page; /* Page pointer */
+ DB_MPOOLFILE *mp; /* Mpool pointer */
+ DBTCL_INFO *pgip; /* Info pointer */
+ int putop; /* Operation */
+{
+ static char *pgopt[] = {
+ "-clean",
+ "-dirty",
+ "-discard",
+ NULL
+ };
+ enum pgopt {
+ PGCLEAN,
+ PGDIRTY,
+ PGDISCARD
+ };
+ u_int32_t flag;
+ int i, optindex, result, ret;
+
+ result = TCL_OK;
+ i = 2;
+ flag = 0;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ pgopt, "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[i]));
+ i++;
+ switch ((enum pgopt)optindex) {
+ case PGCLEAN:
+ flag |= DB_MPOOL_CLEAN;
+ break;
+ case PGDIRTY:
+ flag |= DB_MPOOL_DIRTY;
+ break;
+ case PGDISCARD:
+ flag |= DB_MPOOL_DISCARD;
+ break;
+ }
+ }
+
+ _debug_check();
+ if (putop)
+ ret = mp->put(mp, page, flag);
+ else
+ ret = mp->set(mp, page, flag);
+
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "page");
+
+ if (putop) {
+ (void)Tcl_DeleteCommand(interp, pgip->i_name);
+ _DeleteInfo(pgip);
+ }
+ return (result);
+}
+
+static int
+tcl_PgInit(interp, objc, objv, page, pgip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ void *page; /* Page pointer */
+ DBTCL_INFO *pgip; /* Info pointer */
+{
+ Tcl_Obj *res;
+ size_t pgsz;
+ long *p, *endp, newval;
+ int length, result;
+ u_char *s;
+
+ result = TCL_OK;
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "val");
+ return (TCL_ERROR);
+ }
+
+ pgsz = pgip->i_pgsz;
+ result = Tcl_GetLongFromObj(interp, objv[2], &newval);
+ if (result != TCL_OK) {
+ s = Tcl_GetByteArrayFromObj(objv[2], &length);
+ if (s == NULL)
+ return (TCL_ERROR);
+ memcpy(page, s,
+ ((size_t)length < pgsz) ? (size_t)length : pgsz);
+ result = TCL_OK;
+ } else {
+ p = (long *)page;
+ for (endp = p + (pgsz / sizeof(long)); p < endp; p++)
+ *p = newval;
+ }
+ res = Tcl_NewIntObj(0);
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+static int
+tcl_PgIsset(interp, objc, objv, page, pgip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ void *page; /* Page pointer */
+ DBTCL_INFO *pgip; /* Info pointer */
+{
+ Tcl_Obj *res;
+ size_t pgsz;
+ long *p, *endp, newval;
+ int length, result;
+ u_char *s;
+
+ result = TCL_OK;
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "val");
+ return (TCL_ERROR);
+ }
+
+ pgsz = pgip->i_pgsz;
+ result = Tcl_GetLongFromObj(interp, objv[2], &newval);
+ if (result != TCL_OK) {
+ if ((s = Tcl_GetByteArrayFromObj(objv[2], &length)) == NULL)
+ return (TCL_ERROR);
+ result = TCL_OK;
+
+ if (memcmp(page, s,
+ ((size_t)length < pgsz) ? (size_t)length : pgsz ) != 0) {
+ res = Tcl_NewIntObj(0);
+ Tcl_SetObjResult(interp, res);
+ return (result);
+ }
+ } else {
+ p = (long *)page;
+ /*
+ * If any value is not the same, return 0 (is not set to
+ * this value). Otherwise, if we finish the loop, we return 1
+ * (is set to this value).
+ */
+ for (endp = p + (pgsz/sizeof(long)); p < endp; p++)
+ if (*p != newval) {
+ res = Tcl_NewIntObj(0);
+ Tcl_SetObjResult(interp, res);
+ return (result);
+ }
+ }
+
+ res = Tcl_NewIntObj(1);
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+#endif
diff --git a/libdb/tcl/tcl_rep.c b/libdb/tcl/tcl_rep.c
new file mode 100644
index 0000000..f5d3e5e
--- /dev/null
+++ b/libdb/tcl/tcl_rep.c
@@ -0,0 +1,405 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/tcl_db.h"
+
+#if CONFIG_TEST
+/*
+ * tcl_RepElect --
+ * Call DB_ENV->rep_elect().
+ *
+ * PUBLIC: int tcl_RepElect
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+ */
+int
+tcl_RepElect(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv; /* Environment pointer */
+{
+ int eid, nsites, pri, result, ret;
+ u_int32_t timeout;
+
+ if (objc != 5) {
+ Tcl_WrongNumArgs(interp, 5, objv, "nsites pri timeout");
+ return (TCL_ERROR);
+ }
+
+ if ((result = Tcl_GetIntFromObj(interp, objv[2], &nsites)) != TCL_OK)
+ return (result);
+ if ((result = Tcl_GetIntFromObj(interp, objv[3], &pri)) != TCL_OK)
+ return (result);
+ if ((result = _GetUInt32(interp, objv[4], &timeout)) != TCL_OK)
+ return (result);
+
+ _debug_check();
+ if ((ret = dbenv->rep_elect(dbenv, nsites, pri, timeout, &eid)) != 0)
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env rep_elect"));
+
+ Tcl_SetObjResult(interp, Tcl_NewIntObj(eid));
+
+ return (TCL_OK);
+}
+#endif
+
+#if CONFIG_TEST
+/*
+ * tcl_RepFlush --
+ * Call DB_ENV->rep_flush().
+ *
+ * PUBLIC: int tcl_RepFlush
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+ */
+int
+tcl_RepFlush(interp, objc, objv, dbenv)
+ Tcl_Interp *interp;
+ int objc;
+ Tcl_Obj *CONST objv[];
+ DB_ENV *dbenv;
+{
+ int ret;
+
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "");
+ return TCL_ERROR;
+ }
+
+ _debug_check();
+ ret = dbenv->rep_flush(dbenv);
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env rep_flush"));
+}
+#endif
+#if CONFIG_TEST
+/*
+ * tcl_RepLimit --
+ * Call DB_ENV->set_rep_limit().
+ *
+ * PUBLIC: int tcl_RepLimit
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+ */
+int
+tcl_RepLimit(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv; /* Environment pointer */
+{
+ int result, ret;
+ u_int32_t bytes, gbytes;
+
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 4, objv, "gbytes bytes");
+ return (TCL_ERROR);
+ }
+
+ if ((result = _GetUInt32(interp, objv[2], &gbytes)) != TCL_OK)
+ return (result);
+ if ((result = _GetUInt32(interp, objv[3], &bytes)) != TCL_OK)
+ return (result);
+
+ _debug_check();
+ if ((ret = dbenv->set_rep_limit(dbenv, gbytes, bytes)) != 0)
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env set_rep_limit"));
+
+ return (_ReturnSetup(interp,
+ ret, DB_RETOK_STD(ret), "env set_rep_limit"));
+}
+#endif
+
+#if CONFIG_TEST
+/*
+ * tcl_RepRequest --
+ * Call DB_ENV->set_rep_request().
+ *
+ * PUBLIC: int tcl_RepRequest
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+ */
+int
+tcl_RepRequest(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv; /* Environment pointer */
+{
+ int result, ret;
+ u_int32_t min, max;
+
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 4, objv, "min max");
+ return (TCL_ERROR);
+ }
+
+ if ((result = _GetUInt32(interp, objv[2], &min)) != TCL_OK)
+ return (result);
+ if ((result = _GetUInt32(interp, objv[3], &max)) != TCL_OK)
+ return (result);
+
+ _debug_check();
+ if ((ret = dbenv->set_rep_request(dbenv, min, max)) != 0)
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env set_rep_request"));
+
+ return (_ReturnSetup(interp,
+ ret, DB_RETOK_STD(ret), "env set_rep_request"));
+}
+#endif
+
+#if CONFIG_TEST
+/*
+ * tcl_RepStart --
+ * Call DB_ENV->rep_start().
+ *
+ * PUBLIC: int tcl_RepStart
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+ *
+ * Note that this normally can/should be achieved as an argument to
+ * berkdb env, but we need to test forcible upgrading of clients, which
+ * involves calling this on an open environment handle.
+ */
+int
+tcl_RepStart(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv;
+{
+ static char *tclrpstrt[] = {
+ "-client",
+ "-master",
+ NULL
+ };
+ enum tclrpstrt {
+ TCL_RPSTRT_CLIENT,
+ TCL_RPSTRT_MASTER
+ };
+ char *arg;
+ int i, optindex, ret;
+ u_int32_t flag;
+
+ flag = 0;
+
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 3, objv, "[-master/-client]");
+ return (TCL_ERROR);
+ }
+
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], tclrpstrt,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-')
+ return (IS_HELP(objv[i]));
+ else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum tclrpstrt)optindex) {
+ case TCL_RPSTRT_CLIENT:
+ flag |= DB_REP_CLIENT;
+ break;
+ case TCL_RPSTRT_MASTER:
+ flag |= DB_REP_MASTER;
+ break;
+ }
+ }
+
+ _debug_check();
+ ret = dbenv->rep_start(dbenv, NULL, flag);
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env rep_start"));
+}
+#endif
+
+#if CONFIG_TEST
+/*
+ * tcl_RepProcessMessage --
+ * Call DB_ENV->rep_process_message().
+ *
+ * PUBLIC: int tcl_RepProcessMessage
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+ */
+int
+tcl_RepProcessMessage(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv; /* Environment pointer */
+{
+ DBT control, rec;
+ Tcl_Obj *res;
+ void *ctmp, *rtmp;
+ int eid;
+ int freectl, freerec, result, ret;
+
+ if (objc != 5) {
+ Tcl_WrongNumArgs(interp, 5, objv, "id control rec");
+ return (TCL_ERROR);
+ }
+ freectl = freerec = 0;
+
+ memset(&control, 0, sizeof(control));
+ memset(&rec, 0, sizeof(rec));
+
+ if ((result = Tcl_GetIntFromObj(interp, objv[2], &eid)) != TCL_OK)
+ return (result);
+
+ ret = _CopyObjBytes(interp, objv[3], &ctmp,
+ &control.size, &freectl);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_REPPMSG(ret), "rep_proc_msg");
+ return (result);
+ }
+ control.data = ctmp;
+ ret = _CopyObjBytes(interp, objv[4], &rtmp,
+ &rec.size, &freerec);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_REPPMSG(ret), "rep_proc_msg");
+ goto out;
+ }
+ rec.data = rtmp;
+ _debug_check();
+ ret = dbenv->rep_process_message(dbenv, &control, &rec, &eid);
+ result = _ReturnSetup(interp, ret, DB_RETOK_REPPMSG(ret),
+ "env rep_process_message");
+
+ /*
+ * If we have a new master, return its environment ID.
+ *
+ * XXX
+ * We should do something prettier to differentiate success
+ * from an env ID, and figure out how to represent HOLDELECTION.
+ */
+ if (result == TCL_OK && ret == DB_REP_NEWMASTER) {
+ res = Tcl_NewIntObj(eid);
+ Tcl_SetObjResult(interp, res);
+ }
+out:
+ if (freectl)
+ (void)__os_free(NULL, ctmp);
+ if (freerec)
+ (void)__os_free(NULL, rtmp);
+
+ return (result);
+}
+#endif
+
+#if CONFIG_TEST
+/*
+ * tcl_RepStat --
+ * Call DB_ENV->rep_stat().
+ *
+ * PUBLIC: int tcl_RepStat
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+ */
+int
+tcl_RepStat(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv;
+{
+ DB_REP_STAT *sp;
+ Tcl_Obj *myobjv[2], *res, *thislist, *lsnlist;
+ u_int32_t flag;
+ int myobjc, result, ret;
+ char *arg;
+
+ result = TCL_OK;
+ flag = 0;
+
+ if (objc > 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ if (objc == 3) {
+ arg = Tcl_GetStringFromObj(objv[2], NULL);
+ if (strcmp(arg, "-clear") == 0)
+ flag = DB_STAT_CLEAR;
+ else {
+ Tcl_SetResult(interp,
+ "db stat: unknown arg", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ }
+
+ _debug_check();
+ ret = dbenv->rep_stat(dbenv, &sp, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "rep stat");
+ if (result == TCL_ERROR)
+ return (result);
+
+ /*
+ * Have our stats, now construct the name value
+ * list pairs and free up the memory.
+ */
+ res = Tcl_NewObj();
+ /*
+ * MAKE_STAT_* assumes 'res' and 'error' label.
+ */
+ MAKE_STAT_LSN("Next LSN expected", &sp->st_next_lsn);
+ MAKE_STAT_LSN("First missed LSN", &sp->st_waiting_lsn);
+ MAKE_STAT_LIST("Duplicate master conditions", sp->st_dupmasters);
+ MAKE_STAT_LIST("Environment ID", sp->st_env_id);
+ MAKE_STAT_LIST("Environment priority", sp->st_env_priority);
+ MAKE_STAT_LIST("Generation number", sp->st_gen);
+ MAKE_STAT_LIST("Duplicate log records received", sp->st_log_duplicated);
+ MAKE_STAT_LIST("Current log records queued", sp->st_log_queued);
+ MAKE_STAT_LIST("Maximum log records queued", sp->st_log_queued_max);
+ MAKE_STAT_LIST("Total log records queued", sp->st_log_queued_total);
+ MAKE_STAT_LIST("Log records received", sp->st_log_records);
+ MAKE_STAT_LIST("Log records requested", sp->st_log_requested);
+ MAKE_STAT_LIST("Master environment ID", sp->st_master);
+ MAKE_STAT_LIST("Master changes", sp->st_master_changes);
+ MAKE_STAT_LIST("Messages with bad generation number",
+ sp->st_msgs_badgen);
+ MAKE_STAT_LIST("Messages processed", sp->st_msgs_processed);
+ MAKE_STAT_LIST("Messages ignored for recovery", sp->st_msgs_recover);
+ MAKE_STAT_LIST("Message send failures", sp->st_msgs_send_failures);
+ MAKE_STAT_LIST("Messages sent", sp->st_msgs_sent);
+ MAKE_STAT_LIST("New site messages", sp->st_newsites);
+ MAKE_STAT_LIST("Transmission limited", sp->st_nthrottles);
+ MAKE_STAT_LIST("Outdated conditions", sp->st_outdated);
+ MAKE_STAT_LIST("Transactions applied", sp->st_txns_applied);
+ MAKE_STAT_LIST("Elections held", sp->st_elections);
+ MAKE_STAT_LIST("Elections won", sp->st_elections_won);
+ MAKE_STAT_LIST("Election phase", sp->st_election_status);
+ MAKE_STAT_LIST("Election winner", sp->st_election_cur_winner);
+ MAKE_STAT_LIST("Election generation number", sp->st_election_gen);
+ MAKE_STAT_LSN("Election max LSN", &sp->st_election_lsn);
+ MAKE_STAT_LIST("Election sites", sp->st_election_nsites);
+ MAKE_STAT_LIST("Election priority", sp->st_election_priority);
+ MAKE_STAT_LIST("Election tiebreaker", sp->st_election_tiebreaker);
+ MAKE_STAT_LIST("Election votes", sp->st_election_votes);
+
+ Tcl_SetObjResult(interp, res);
+error:
+ free(sp);
+ return (result);
+}
+#endif
diff --git a/libdb/tcl/tcl_txn.c b/libdb/tcl/tcl_txn.c
new file mode 100644
index 0000000..1526cb1
--- /dev/null
+++ b/libdb/tcl/tcl_txn.c
@@ -0,0 +1,657 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/tcl_db.h"
+
+static int tcl_TxnCommit __P((Tcl_Interp *,
+ int, Tcl_Obj * CONST *, DB_TXN *, DBTCL_INFO *));
+static int txn_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST *));
+
+/*
+ * _TxnInfoDelete --
+ * Removes nested txn info structures that are children
+ * of this txn.
+ * RECURSIVE: Transactions can be arbitrarily nested, so we
+ * must recurse down until we get them all.
+ *
+ * PUBLIC: void _TxnInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+ */
+void
+_TxnInfoDelete(interp, txnip)
+ Tcl_Interp *interp; /* Interpreter */
+ DBTCL_INFO *txnip; /* Info for txn */
+{
+ DBTCL_INFO *nextp, *p;
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) {
+ /*
+ * Check if this info structure "belongs" to this
+ * txn. Remove its commands and info structure.
+ */
+ nextp = LIST_NEXT(p, entries);
+ if (p->i_parent == txnip && p->i_type == I_TXN) {
+ _TxnInfoDelete(interp, p);
+ (void)Tcl_DeleteCommand(interp, p->i_name);
+ _DeleteInfo(p);
+ }
+ }
+}
+
+/*
+ * tcl_TxnCheckpoint --
+ *
+ * PUBLIC: int tcl_TxnCheckpoint __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_TxnCheckpoint(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ static char *txnckpopts[] = {
+ "-kbyte", "-min",
+ NULL
+ };
+ enum txnckpopts {
+ TXNCKP_KB, TXNCKP_MIN
+ };
+ int i, kb, min, optindex, result, ret;
+
+ result = TCL_OK;
+ kb = min = 0;
+
+ /*
+ * Get the flag index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ txnckpopts, "option", TCL_EXACT, &optindex) != TCL_OK) {
+ return (IS_HELP(objv[i]));
+ }
+ i++;
+ switch ((enum txnckpopts)optindex) {
+ case TXNCKP_KB:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-kbyte kb?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &kb);
+ break;
+ case TXNCKP_MIN:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-min min?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &min);
+ break;
+ }
+ }
+ _debug_check();
+ ret = envp->txn_checkpoint(envp, (u_int32_t)kb, (u_int32_t)min, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn checkpoint");
+ return (result);
+}
+
+/*
+ * tcl_Txn --
+ *
+ * PUBLIC: int tcl_Txn __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *));
+ */
+int
+tcl_Txn(interp, objc, objv, envp, envip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+ DBTCL_INFO *envip; /* Info pointer */
+{
+ static char *txnopts[] = {
+#if CONFIG_TEST
+ "-dirty",
+ "-lock_timeout",
+ "-txn_timeout",
+#endif
+ "-nosync",
+ "-nowait",
+ "-parent",
+ "-sync",
+ NULL
+ };
+ enum txnopts {
+#if CONFIG_TEST
+ TXNDIRTY,
+ TXN_LOCK_TIMEOUT,
+ TXN_TIMEOUT,
+#endif
+ TXNNOSYNC,
+ TXNNOWAIT,
+ TXNPARENT,
+ TXNSYNC
+ };
+ DBTCL_INFO *ip;
+ DB_TXN *parent;
+ DB_TXN *txn;
+ Tcl_Obj *res;
+ db_timeout_t lk_time, tx_time;
+ u_int32_t flag, lk_timeflag, tx_timeflag;
+ int i, optindex, result, ret;
+ char *arg, msg[MSG_SIZE], newname[MSG_SIZE];
+
+ result = TCL_OK;
+ memset(newname, 0, MSG_SIZE);
+
+ parent = NULL;
+ flag = 0;
+ lk_timeflag = tx_timeflag = 0;
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ txnopts, "option", TCL_EXACT, &optindex) != TCL_OK) {
+ return (IS_HELP(objv[i]));
+ }
+ i++;
+ switch ((enum txnopts)optindex) {
+#ifdef CONFIG_TEST
+ case TXNDIRTY:
+ flag |= DB_DIRTY_READ;
+ break;
+ case TXN_LOCK_TIMEOUT:
+ lk_timeflag = DB_SET_LOCK_TIMEOUT;
+ goto getit;
+ case TXN_TIMEOUT:
+ tx_timeflag = DB_SET_TXN_TIMEOUT;
+getit:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-txn_timestamp time?");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetLongFromObj(interp, objv[i++],
+ (long *)(optindex == TXN_LOCK_TIMEOUT ?
+ &lk_time : &tx_time));
+ if (result != TCL_OK)
+ return (TCL_ERROR);
+ break;
+#endif
+ case TXNNOSYNC:
+ FLAG_CHECK2(flag, DB_DIRTY_READ);
+ flag |= DB_TXN_NOSYNC;
+ break;
+ case TXNNOWAIT:
+ FLAG_CHECK2(flag, DB_DIRTY_READ);
+ flag |= DB_TXN_NOWAIT;
+ break;
+ case TXNPARENT:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-parent txn?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ parent = NAME_TO_TXN(arg);
+ if (parent == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Invalid parent txn: %s\n",
+ arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ return (TCL_ERROR);
+ }
+ break;
+ case TXNSYNC:
+ FLAG_CHECK2(flag, DB_DIRTY_READ);
+ flag |= DB_TXN_SYNC;
+ break;
+ }
+ }
+ snprintf(newname, sizeof(newname), "%s.txn%d",
+ envip->i_name, envip->i_envtxnid);
+ ip = _NewInfo(interp, NULL, newname, I_TXN);
+ if (ip == NULL) {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = envp->txn_begin(envp, parent, &txn, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn");
+ if (result == TCL_ERROR)
+ _DeleteInfo(ip);
+ else {
+ /*
+ * Success. Set up return. Set up new info
+ * and command widget for this txn.
+ */
+ envip->i_envtxnid++;
+ if (parent)
+ ip->i_parent = _PtrToInfo(parent);
+ else
+ ip->i_parent = envip;
+ _SetInfoData(ip, txn);
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)txn_Cmd, (ClientData)txn, NULL);
+ res = Tcl_NewStringObj(newname, strlen(newname));
+ Tcl_SetObjResult(interp, res);
+ if (tx_timeflag != 0) {
+ ret = txn->set_timeout(txn, tx_time, tx_timeflag);
+ if (ret != 0) {
+ result =
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_timeout");
+ _DeleteInfo(ip);
+ }
+ }
+ if (lk_timeflag != 0) {
+ ret = txn->set_timeout(txn, lk_time, lk_timeflag);
+ if (ret != 0) {
+ result =
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_timeout");
+ _DeleteInfo(ip);
+ }
+ }
+ }
+ return (result);
+}
+
+/*
+ * tcl_TxnStat --
+ *
+ * PUBLIC: int tcl_TxnStat __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_TxnStat(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ DBTCL_INFO *ip;
+ DB_TXN_ACTIVE *p;
+ DB_TXN_STAT *sp;
+ Tcl_Obj *myobjv[2], *res, *thislist, *lsnlist;
+ u_int32_t i;
+ int myobjc, result, ret;
+
+ result = TCL_OK;
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = envp->txn_stat(envp, &sp, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn stat");
+ if (result == TCL_ERROR)
+ return (result);
+
+ /*
+ * Have our stats, now construct the name value
+ * list pairs and free up the memory.
+ */
+ res = Tcl_NewObj();
+ /*
+ * MAKE_STAT_LIST assumes 'res' and 'error' label.
+ */
+ MAKE_STAT_LIST("Region size", sp->st_regsize);
+ MAKE_STAT_LSN("LSN of last checkpoint", &sp->st_last_ckp);
+ MAKE_STAT_LIST("Time of last checkpoint", sp->st_time_ckp);
+ MAKE_STAT_LIST("Last txn ID allocated", sp->st_last_txnid);
+ MAKE_STAT_LIST("Max Txns", sp->st_maxtxns);
+ MAKE_STAT_LIST("Number aborted txns", sp->st_naborts);
+ MAKE_STAT_LIST("Number active txns", sp->st_nactive);
+ MAKE_STAT_LIST("Maximum active txns", sp->st_maxnactive);
+ MAKE_STAT_LIST("Number txns begun", sp->st_nbegins);
+ MAKE_STAT_LIST("Number committed txns", sp->st_ncommits);
+ MAKE_STAT_LIST("Number restored txns", sp->st_nrestores);
+ MAKE_STAT_LIST("Number of region lock waits", sp->st_region_wait);
+ MAKE_STAT_LIST("Number of region lock nowaits", sp->st_region_nowait);
+ for (i = 0, p = sp->st_txnarray; i < sp->st_nactive; i++, p++)
+ for (ip = LIST_FIRST(&__db_infohead); ip != NULL;
+ ip = LIST_NEXT(ip, entries)) {
+ if (ip->i_type != I_TXN)
+ continue;
+ if (ip->i_type == I_TXN &&
+ (ip->i_txnp->id(ip->i_txnp) == p->txnid)) {
+ MAKE_STAT_LSN(ip->i_name, &p->lsn);
+ if (p->parentid != 0)
+ MAKE_STAT_STRLIST("Parent",
+ ip->i_parent->i_name);
+ else
+ MAKE_STAT_LIST("Parent", 0);
+ break;
+ }
+ }
+ Tcl_SetObjResult(interp, res);
+error:
+ free(sp);
+ return (result);
+}
+
+/*
+ * tcl_TxnTimeout --
+ *
+ * PUBLIC: int tcl_TxnTimeout __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_TxnTimeout(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ long timeout;
+ int result, ret;
+
+ /*
+ * One arg, the timeout.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?timeout?");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetLongFromObj(interp, objv[2], &timeout);
+ if (result != TCL_OK)
+ return (result);
+ _debug_check();
+ ret = envp->set_timeout(envp, (u_int32_t)timeout, DB_SET_TXN_TIMEOUT);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "lock timeout");
+ return (result);
+}
+
+/*
+ * txn_Cmd --
+ * Implements the "txn" widget.
+ */
+static int
+txn_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Txn handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *txncmds[] = {
+#if CONFIG_TEST
+ "discard",
+ "id",
+ "prepare",
+#endif
+ "abort",
+ "commit",
+ NULL
+ };
+ enum txncmds {
+#if CONFIG_TEST
+ TXNDISCARD,
+ TXNID,
+ TXNPREPARE,
+#endif
+ TXNABORT,
+ TXNCOMMIT
+ };
+ DBTCL_INFO *txnip;
+ DB_TXN *txnp;
+ Tcl_Obj *res;
+ int cmdindex, result, ret;
+ u_int8_t *gid;
+
+ Tcl_ResetResult(interp);
+ txnp = (DB_TXN *)clientData;
+ txnip = _PtrToInfo((void *)txnp);
+ result = TCL_OK;
+ if (txnp == NULL) {
+ Tcl_SetResult(interp, "NULL txn pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (txnip == NULL) {
+ Tcl_SetResult(interp, "NULL txn info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], txncmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum txncmds)cmdindex) {
+#if CONFIG_TEST
+ case TXNDISCARD:
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = txnp->discard(txnp, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn discard");
+ _TxnInfoDelete(interp, txnip);
+ (void)Tcl_DeleteCommand(interp, txnip->i_name);
+ _DeleteInfo(txnip);
+ break;
+ case TXNID:
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = txnp->id(txnp);
+ res = Tcl_NewIntObj(ret);
+ break;
+ case TXNPREPARE:
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ gid = (u_int8_t *)Tcl_GetByteArrayFromObj(objv[2], NULL);
+ ret = txnp->prepare(txnp, gid);
+ /*
+ * !!!
+ * DB_TXN->prepare commits all outstanding children. But it
+ * does NOT destroy the current txn handle. So, we must call
+ * _TxnInfoDelete to recursively remove all nested txn handles,
+ * we do not call _DeleteInfo on ourselves.
+ */
+ _TxnInfoDelete(interp, txnip);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn prepare");
+ break;
+#endif
+ case TXNABORT:
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = txnp->abort(txnp);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn abort");
+ _TxnInfoDelete(interp, txnip);
+ (void)Tcl_DeleteCommand(interp, txnip->i_name);
+ _DeleteInfo(txnip);
+ break;
+ case TXNCOMMIT:
+ result = tcl_TxnCommit(interp, objc, objv, txnp, txnip);
+ _TxnInfoDelete(interp, txnip);
+ (void)Tcl_DeleteCommand(interp, txnip->i_name);
+ _DeleteInfo(txnip);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+static int
+tcl_TxnCommit(interp, objc, objv, txnp, txnip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_TXN *txnp; /* Transaction pointer */
+ DBTCL_INFO *txnip; /* Info pointer */
+{
+ static char *commitopt[] = {
+ "-nosync",
+ "-sync",
+ NULL
+ };
+ enum commitopt {
+ COMSYNC,
+ COMNOSYNC
+ };
+ u_int32_t flag;
+ int optindex, result, ret;
+
+ COMPQUIET(txnip, NULL);
+
+ result = TCL_OK;
+ flag = 0;
+ if (objc != 2 && objc != 3) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ if (objc == 3) {
+ if (Tcl_GetIndexFromObj(interp, objv[2], commitopt,
+ "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[2]));
+ switch ((enum commitopt)optindex) {
+ case COMSYNC:
+ FLAG_CHECK(flag);
+ flag = DB_TXN_SYNC;
+ break;
+ case COMNOSYNC:
+ FLAG_CHECK(flag);
+ flag = DB_TXN_NOSYNC;
+ break;
+ }
+ }
+
+ _debug_check();
+ ret = txnp->commit(txnp, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn commit");
+ return (result);
+}
+
+#if CONFIG_TEST
+/*
+ * tcl_TxnRecover --
+ *
+ * PUBLIC: int tcl_TxnRecover __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *));
+ */
+int
+tcl_TxnRecover(interp, objc, objv, envp, envip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+ DBTCL_INFO *envip; /* Info pointer */
+{
+#define DO_PREPLIST(count) \
+for (i = 0; i < count; i++) { \
+ snprintf(newname, sizeof(newname), "%s.txn%d", \
+ envip->i_name, envip->i_envtxnid); \
+ ip = _NewInfo(interp, NULL, newname, I_TXN); \
+ if (ip == NULL) { \
+ Tcl_SetResult(interp, "Could not set up info", \
+ TCL_STATIC); \
+ return (TCL_ERROR); \
+ } \
+ envip->i_envtxnid++; \
+ ip->i_parent = envip; \
+ p = &prep[i]; \
+ _SetInfoData(ip, p->txn); \
+ Tcl_CreateObjCommand(interp, newname, \
+ (Tcl_ObjCmdProc *)txn_Cmd, (ClientData)p->txn, NULL); \
+ result = _SetListElem(interp, res, newname, strlen(newname), \
+ p->gid, DB_XIDDATASIZE); \
+ if (result != TCL_OK) \
+ goto error; \
+}
+
+ DBTCL_INFO *ip;
+ DB_PREPLIST prep[DBTCL_PREP], *p;
+ Tcl_Obj *res;
+ long count, i;
+ int result, ret;
+ char newname[MSG_SIZE];
+
+ result = TCL_OK;
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = envp->txn_recover(envp, prep, DBTCL_PREP, &count, DB_FIRST);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn recover");
+ if (result == TCL_ERROR)
+ return (result);
+ res = Tcl_NewObj();
+ DO_PREPLIST(count);
+
+ /*
+ * If count returned is the maximum size we have, then there
+ * might be more. Keep going until we get them all.
+ */
+ while (count == DBTCL_PREP) {
+ ret = envp->txn_recover(
+ envp, prep, DBTCL_PREP, &count, DB_NEXT);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn recover");
+ if (result == TCL_ERROR)
+ return (result);
+ DO_PREPLIST(count);
+ }
+ Tcl_SetObjResult(interp, res);
+error:
+ return (result);
+}
+#endif
diff --git a/libdb/tcl/tcl_util.c b/libdb/tcl/tcl_util.c
new file mode 100644
index 0000000..943beab
--- /dev/null
+++ b/libdb/tcl/tcl_util.c
@@ -0,0 +1,381 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/tcl_db.h"
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static int mutex_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+
+/*
+ * bdb_RandCommand --
+ * Implements rand* functions.
+ *
+ * PUBLIC: int bdb_RandCommand __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+ */
+int
+bdb_RandCommand(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *rcmds[] = {
+ "rand", "random_int", "srand",
+ NULL
+ };
+ enum rcmds {
+ RRAND, RRAND_INT, RSRAND
+ };
+ long t;
+ int cmdindex, hi, lo, result, ret;
+ Tcl_Obj *res;
+ char msg[MSG_SIZE];
+
+ result = TCL_OK;
+ /*
+ * Get the command name index from the object based on the cmds
+ * defined above. This SHOULD NOT fail because we already checked
+ * in the 'berkdb' command.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], rcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum rcmds)cmdindex) {
+ case RRAND:
+ /*
+ * Must be 0 args. Error if different.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ ret = rand();
+ res = Tcl_NewIntObj(ret);
+ break;
+ case RRAND_INT:
+ /*
+ * Must be 4 args. Error if different.
+ */
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, "lo hi");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &lo);
+ if (result != TCL_OK)
+ break;
+ result = Tcl_GetIntFromObj(interp, objv[3], &hi);
+ if (result == TCL_OK) {
+#ifndef RAND_MAX
+#define RAND_MAX 0x7fffffff
+#endif
+ t = rand();
+ if (t > RAND_MAX) {
+ snprintf(msg, MSG_SIZE,
+ "Max random is higher than %ld\n",
+ (long)RAND_MAX);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ break;
+ }
+ _debug_check();
+ ret = (int)(((double)t / ((double)(RAND_MAX) + 1)) *
+ (hi - lo + 1));
+ ret += lo;
+ res = Tcl_NewIntObj(ret);
+ }
+ break;
+ case RSRAND:
+ /*
+ * Must be 1 arg. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "seed");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &lo);
+ if (result == TCL_OK) {
+ srand((u_int)lo);
+ res = Tcl_NewIntObj(0);
+ }
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ *
+ * tcl_Mutex --
+ * Opens an env mutex.
+ *
+ * PUBLIC: int tcl_Mutex __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *,
+ * PUBLIC: DBTCL_INFO *));
+ */
+int
+tcl_Mutex(interp, objc, objv, envp, envip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+ DBTCL_INFO *envip; /* Info pointer */
+{
+ DBTCL_INFO *ip;
+ Tcl_Obj *res;
+ _MUTEX_DATA *md;
+ int i, mode, nitems, result, ret;
+ char newname[MSG_SIZE];
+
+ md = NULL;
+ result = TCL_OK;
+ mode = nitems = ret = 0;
+ memset(newname, 0, MSG_SIZE);
+
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, "mode nitems");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &mode);
+ if (result != TCL_OK)
+ return (TCL_ERROR);
+ result = Tcl_GetIntFromObj(interp, objv[3], &nitems);
+ if (result != TCL_OK)
+ return (TCL_ERROR);
+
+ snprintf(newname, sizeof(newname),
+ "%s.mutex%d", envip->i_name, envip->i_envmutexid);
+ ip = _NewInfo(interp, NULL, newname, I_MUTEX);
+ if (ip == NULL) {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ /*
+ * Set up mutex.
+ */
+ /*
+ * Map in the region.
+ *
+ * XXX
+ * We don't bother doing this "right", i.e., using the shalloc
+ * functions, just grab some memory knowing that it's correctly
+ * aligned.
+ */
+ _debug_check();
+ if (__os_calloc(NULL, 1, sizeof(_MUTEX_DATA), &md) != 0)
+ goto posixout;
+ md->env = envp;
+ md->n_mutex = nitems;
+ md->size = sizeof(_MUTEX_ENTRY) * nitems;
+
+ md->reginfo.type = REGION_TYPE_MUTEX;
+ md->reginfo.id = INVALID_REGION_TYPE;
+ md->reginfo.mode = mode;
+ md->reginfo.flags = REGION_CREATE_OK | REGION_JOIN_OK;
+ if ((ret = __db_r_attach(envp, &md->reginfo, md->size)) != 0)
+ goto posixout;
+ md->marray = md->reginfo.addr;
+
+ /* Initialize a created region. */
+ if (F_ISSET(&md->reginfo, REGION_CREATE))
+ for (i = 0; i < nitems; i++) {
+ md->marray[i].val = 0;
+ if ((ret = __db_mutex_init_int(envp,
+ &md->marray[i].m, i, 0)) != 0)
+ goto posixout;
+ }
+ R_UNLOCK(envp, &md->reginfo);
+
+ /*
+ * Success. Set up return. Set up new info
+ * and command widget for this mutex.
+ */
+ envip->i_envmutexid++;
+ ip->i_parent = envip;
+ _SetInfoData(ip, md);
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)mutex_Cmd, (ClientData)md, NULL);
+ res = Tcl_NewStringObj(newname, strlen(newname));
+ Tcl_SetObjResult(interp, res);
+
+ return (TCL_OK);
+
+posixout:
+ if (ret > 0)
+ Tcl_PosixError(interp);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "mutex");
+ _DeleteInfo(ip);
+
+ if (md != NULL) {
+ if (md->reginfo.addr != NULL)
+ (void)__db_r_detach(md->env,
+ &md->reginfo, F_ISSET(&md->reginfo, REGION_CREATE));
+ __os_free(md->env, md);
+ }
+ return (result);
+}
+
+/*
+ * mutex_Cmd --
+ * Implements the "mutex" widget.
+ */
+static int
+mutex_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Mutex handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *mxcmds[] = {
+ "close",
+ "get",
+ "getval",
+ "release",
+ "setval",
+ NULL
+ };
+ enum mxcmds {
+ MXCLOSE,
+ MXGET,
+ MXGETVAL,
+ MXRELE,
+ MXSETVAL
+ };
+ DB_ENV *dbenv;
+ DBTCL_INFO *envip, *mpip;
+ _MUTEX_DATA *mp;
+ Tcl_Obj *res;
+ int cmdindex, id, result, newval;
+
+ Tcl_ResetResult(interp);
+ mp = (_MUTEX_DATA *)clientData;
+ mpip = _PtrToInfo((void *)mp);
+ envip = mpip->i_parent;
+ dbenv = envip->i_envp;
+ result = TCL_OK;
+
+ if (mp == NULL) {
+ Tcl_SetResult(interp, "NULL mp pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (mpip == NULL) {
+ Tcl_SetResult(interp, "NULL mp info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], mxcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum mxcmds)cmdindex) {
+ case MXCLOSE:
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ (void)__db_r_detach(mp->env, &mp->reginfo, 0);
+ res = Tcl_NewIntObj(0);
+ (void)Tcl_DeleteCommand(interp, mpip->i_name);
+ _DeleteInfo(mpip);
+ __os_free(mp->env, mp);
+ break;
+ case MXRELE:
+ /*
+ * Check for 1 arg. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "id");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &id);
+ if (result != TCL_OK)
+ break;
+ MUTEX_UNLOCK(dbenv, &mp->marray[id].m);
+ res = Tcl_NewIntObj(0);
+ break;
+ case MXGET:
+ /*
+ * Check for 1 arg. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "id");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &id);
+ if (result != TCL_OK)
+ break;
+ MUTEX_LOCK(dbenv, &mp->marray[id].m);
+ res = Tcl_NewIntObj(0);
+ break;
+ case MXGETVAL:
+ /*
+ * Check for 1 arg. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "id");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &id);
+ if (result != TCL_OK)
+ break;
+ res = Tcl_NewLongObj((long)mp->marray[id].val);
+ break;
+ case MXSETVAL:
+ /*
+ * Check for 2 args. Error if different.
+ */
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, "id val");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &id);
+ if (result != TCL_OK)
+ break;
+ result = Tcl_GetIntFromObj(interp, objv[3], &newval);
+ if (result != TCL_OK)
+ break;
+ mp->marray[id].val = newval;
+ res = Tcl_NewIntObj(0);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
diff --git a/libdb/test/TESTS b/libdb/test/TESTS
new file mode 100644
index 0000000..eac6396
--- /dev/null
+++ b/libdb/test/TESTS
@@ -0,0 +1,1437 @@
+# Automatically built by dist/s_test; may require local editing.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+bigfile001
+ Create a database greater than 4 GB in size. Close, verify.
+ Grow the database somewhat. Close, reverify. Lather, rinse,
+ repeat. Since it will not work on all systems, this test is
+ not run by default.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+bigfile002
+ This one should be faster and not require so much disk space,
+ although it doesn't test as extensively. Create an mpool file
+ with 1K pages. Dirty page 6000000. Sync.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dbm
+ Historic DBM interface test. Use the first 1000 entries from the
+ dictionary. Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Then reopen the file, re-retrieve everything. Finally, delete
+ everything.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dead001
+ Use two different configurations to test deadlock detection among a
+ variable number of processes. One configuration has the processes
+ deadlocked in a ring. The other has the processes all deadlocked on
+ a single resource.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dead002
+ Same test as dead001, but use "detect on every collision" instead
+ of separate deadlock detector.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dead003
+
+ Same test as dead002, but explicitly specify DB_LOCK_OLDEST and
+ DB_LOCK_YOUNGEST. Verify the correct lock was aborted/granted.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dead006
+ use timeouts rather than the normal dd algorithm.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dead007
+ use timeouts rather than the normal dd algorithm.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env001
+ Test of env remove interface (formerly env_remove).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env002
+ Test of DB_LOG_DIR and env name resolution.
+ With an environment path specified using -home, and then again
+ with it specified by the environment variable DB_HOME:
+ 1) Make sure that the set_lg_dir option is respected
+ a) as a relative pathname.
+ b) as an absolute pathname.
+ 2) Make sure that the DB_LOG_DIR db_config argument is respected,
+ again as relative and absolute pathnames.
+ 3) Make sure that if -both- db_config and a file are present,
+ only the file is respected (see doc/env/naming.html).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env003
+ Test DB_TMP_DIR and env name resolution
+ With an environment path specified using -home, and then again
+ with it specified by the environment variable DB_HOME:
+ 1) Make sure that the DB_TMP_DIR config file option is respected
+ a) as a relative pathname.
+ b) as an absolute pathname.
+ 2) Make sure that the -tmp_dir config option is respected,
+ again as relative and absolute pathnames.
+ 3) Make sure that if -both- -tmp_dir and a file are present,
+ only the file is respected (see doc/env/naming.html).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env004
+ Test multiple data directories. Do a bunch of different opens
+ to make sure that the files are detected in different directories.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env005
+ Test that using subsystems without initializing them correctly
+ returns an error. Cannot test mpool, because it is assumed in
+ the Tcl code.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env006
+ Make sure that all the utilities exist and run.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env007
+ Test various DB_CONFIG config file options.
+ 1) Make sure command line option is respected
+ 2) Make sure that config file option is respected
+ 3) Make sure that if -both- DB_CONFIG and the set_<whatever>
+ method is used, only the file is respected.
+ Then test all known config options.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env008
+ Test environments and subdirectories.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env009
+ Test calls to all the various stat functions. We have several
+ sprinkled throughout the test suite, but this will ensure that
+ we run all of them at least once.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env010
+ Run recovery in an empty directory, and then make sure we can still
+ create a database in that directory.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env011
+ Run with region overwrite flag.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+jointest
+ Test duplicate assisted joins. Executes 1, 2, 3 and 4-way joins
+ with differing index orders and selectivity.
+
+ We'll test 2-way, 3-way, and 4-way joins and figure that if those
+ work, everything else does as well. We'll create test databases
+ called join1.db, join2.db, join3.db, and join4.db. The number on
+ the database describes the duplication -- duplicates are of the
+ form 0, N, 2N, 3N, ... where N is the number of the database.
+ Primary.db is the primary database, and null.db is the database
+ that has no matching duplicates.
+
+ We should test this on all btrees, all hash, and a combination thereof
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock001
+ Make sure that the basic lock tests work. Do some simple gets
+ and puts for a single locker.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock002
+ Exercise basic multi-process aspects of lock.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock003
+ Exercise multi-process aspects of lock. Generate a bunch of parallel
+ testers that try to randomly obtain locks; make sure that the locks
+ correctly protect corresponding objects.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock004
+ Test locker ids wraping around.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock005
+ Check that page locks are being released properly.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log001
+ Read/write log records.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log002
+ Tests multiple logs
+ Log truncation
+ LSN comparison and file functionality.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log003
+ Verify that log_flush is flushing records correctly.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log004
+ Make sure that if we do PREVs on a log, but the beginning of the
+ log has been truncated, we do the right thing.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log005
+ Check that log file sizes can change on the fly.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+memp001
+ Randomly updates pages.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+memp002
+ Tests multiple processes accessing and modifying the same files.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+memp003
+ Test reader-only/writer process combinations; we use the access methods
+ for testing.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+mutex001
+ Test basic mutex functionality
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+mutex002
+ Test basic mutex synchronization
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+mutex003
+ Generate a bunch of parallel testers that try to randomly obtain locks.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd001
+ Per-operation recovery tests for non-duplicate, non-split
+ messages. Makes sure that we exercise redo, undo, and do-nothing
+ condition. Any test that appears with the message (change state)
+ indicates that we've already run the particular test, but we are
+ running it again so that we can change the state of the data base
+ to prepare for the next test (this applies to all other recovery
+ tests as well).
+
+ These are the most basic recovery tests. We do individual recovery
+ tests for each operation in the access method interface. First we
+ create a file and capture the state of the database (i.e., we copy
+ it. Then we run a transaction containing a single operation. In
+ one test, we abort the transaction and compare the outcome to the
+ original copy of the file. In the second test, we restore the
+ original copy of the database and then run recovery and compare
+ this against the actual database.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd002
+ Split recovery tests. For every known split log message, makes sure
+ that we exercise redo, undo, and do-nothing condition.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd003
+ Duplicate recovery tests. For every known duplicate log message,
+ makes sure that we exercise redo, undo, and do-nothing condition.
+
+ Test all the duplicate log messages and recovery operations. We make
+ sure that we exercise all possible recovery actions: redo, undo, undo
+ but no fix necessary and redo but no fix necessary.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd004
+ Big key test where big key gets elevated to internal page.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd005
+ Verify reuse of file ids works on catastrophic recovery.
+
+ Make sure that we can do catastrophic recovery even if we open
+ files using the same log file id.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd006
+ Nested transactions.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd007
+ File create/delete tests.
+
+ This is a recovery test for create/delete of databases. We have
+ hooks in the database so that we can abort the process at various
+ points and make sure that the transaction doesn't commit. We
+ then need to recover and make sure the file is correctly existing
+ or not, as the case may be.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd008
+ Test deeply nested transactions and many-child transactions.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd009
+ Verify record numbering across split/reverse splits and recovery.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd010
+ Test stability of btree duplicates across btree off-page dup splits
+ and reverse splits and across recovery.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd011
+ Verify that recovery to a specific timestamp works.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd012
+ Test of log file ID management. [#2288]
+ Test recovery handling of file opens and closes.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd013
+ Test of cursor adjustment on child transaction aborts. [#2373]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd014
+ This is a recovery test for create/delete of queue extents. We
+ then need to recover and make sure the file is correctly existing
+ or not, as the case may be.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd015
+ This is a recovery test for testing lots of prepared txns.
+ This test is to force the use of txn_recover to call with the
+ DB_FIRST flag and then DB_NEXT.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd016
+ This is a recovery test for testing running recovery while
+ recovery is already running. While bad things may or may not
+ happen, if recovery is then run properly, things should be correct.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd017
+ Test recovery and security. This is basically a watered
+ down version of recd001 just to verify that encrypted environments
+ can be recovered.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd018
+ Test recover of closely interspersed checkpoints and commits.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd019
+ Test txn id wrap-around and recovery.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd020
+ Test recovery after checksum error.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep001
+ Replication rename and forced-upgrade test.
+
+ Run a modified version of test001 in a replicated master environment;
+ verify that the database on the client is correct.
+ Next, remove the database, close the master, upgrade the
+ client, reopen the master, and make sure the new master can correctly
+ run test001 and propagate it in the other direction.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep002
+ Basic replication election test.
+
+ Run a modified version of test001 in a replicated master environment;
+ hold an election among a group of clients to make sure they select
+ a proper master from amongst themselves, in various scenarios.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep003
+ Repeated shutdown/restart replication test
+
+ Run a quick put test in a replicated master environment; start up,
+ shut down, and restart client processes, with and without recovery.
+ To ensure that environment state is transient, use DB_PRIVATE.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep004
+ Test of DB_REP_LOGSONLY.
+
+ Run a quick put test in a master environment that has one logs-only
+ client. Shut down, then run catastrophic recovery in the logs-only
+ client and check that the database is present and populated.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep005
+ Replication election test with error handling.
+
+ Run a modified version of test001 in a replicated master environment;
+ hold an election among a group of clients to make sure they select
+ a proper master from amongst themselves, forcing errors at various
+ locations in the election path.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rpc001
+ Test RPC server timeouts for cursor, txn and env handles.
+ Test RPC specifics, primarily that unsupported functions return
+ errors and such.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rpc002
+ Test invalid RPC functions and make sure we error them correctly
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rpc004
+ Test RPC server and security
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rpc005
+ Test RPC server handle ID sharing
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rsrc001
+ Recno backing file test. Try different patterns of adding
+ records and making sure that the corresponding file matches.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rsrc002
+ Recno backing file test #2: test of set_re_delim. Specify a backing
+ file with colon-delimited records, and make sure they are correctly
+ interpreted.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rsrc003
+ Recno backing file test. Try different patterns of adding
+ records and making sure that the corresponding file matches.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rsrc004
+ Recno backing file test for EOF-terminated records.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+scr###
+ The scr### directories are shell scripts that test a variety of
+ things, including things about the distribution itself. These
+ tests won't run on most systems, so don't even try to run them.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdbtest001
+ Tests multiple access methods in one subdb
+ Open several subdbs, each with a different access method
+ Small keys, small data
+ Put/get per key per subdb
+ Dump file, verify per subdb
+ Close, reopen per subdb
+ Dump file, verify per subdb
+
+ Make several subdb's of different access methods all in one DB.
+ Rotate methods and repeat [#762].
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdbtest002
+ Tests multiple access methods in one subdb access by multiple
+ processes.
+ Open several subdbs, each with a different access method
+ Small keys, small data
+ Put/get per key per subdb
+ Fork off several child procs to each delete selected
+ data from their subdb and then exit
+ Dump file, verify contents of each subdb is correct
+ Close, reopen per subdb
+ Dump file, verify per subdb
+
+ Make several subdb's of different access methods all in one DB.
+ Fork of some child procs to each manipulate one subdb and when
+ they are finished, verify the contents of the databases.
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sec001
+ Test of security interface
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sec002
+ Test of security interface and catching errors in the
+ face of attackers overwriting parts of existing files.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sindex001
+ Basic secondary index put/delete test
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sindex002
+ Basic cursor-based secondary index put/delete test
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sindex003
+ sindex001 with secondaries created and closed mid-test
+ Basic secondary index put/delete test with secondaries
+ created mid-test.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sindex004
+ sindex002 with secondaries created and closed mid-test
+ Basic cursor-based secondary index put/delete test, with
+ secondaries created mid-test.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sindex006
+ Basic secondary index put/delete test with transactions
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb001 Tests mixing db and subdb operations
+ Tests mixing db and subdb operations
+ Create a db, add data, try to create a subdb.
+ Test naming db and subdb with a leading - for correct parsing
+ Existence check -- test use of -excl with subdbs
+
+ Test non-subdb and subdb operations
+ Test naming (filenames begin with -)
+ Test existence (cannot create subdb of same name with -excl)
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb002
+ Tests basic subdb functionality
+ Small keys, small data
+ Put/get per key
+ Dump file
+ Close, reopen
+ Dump file
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
+ Then repeat using an environment.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb003
+ Tests many subdbs
+ Creates many subdbs and puts a small amount of
+ data in each (many defaults to 2000)
+
+ Use the first 10,000 entries from the dictionary as subdbnames.
+ Insert each with entry as name of subdatabase and a partial list
+ as key/data. After all are entered, retrieve all; compare output
+ to original. Close file, reopen, do retrieve and re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb004
+ Tests large subdb names
+ subdb name = filecontents,
+ key = filename, data = filecontents
+ Put/get per key
+ Dump file
+ Dump subdbs, verify data and subdb name match
+
+ Create 1 db with many large subdbs. Use the contents as subdb names.
+ Take the source files and dbtest executable and enter their names as
+ the key with their contents as data. After all are entered, retrieve
+ all; compare output to original. Close file, reopen, do retrieve and
+ re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb005
+ Tests cursor operations in subdbs
+ Put/get per key
+ Verify cursor operations work within subdb
+ Verify cursor operations do not work across subdbs
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb006
+ Tests intra-subdb join
+
+ We'll test 2-way, 3-way, and 4-way joins and figure that if those work,
+ everything else does as well. We'll create test databases called
+ sub1.db, sub2.db, sub3.db, and sub4.db. The number on the database
+ describes the duplication -- duplicates are of the form 0, N, 2N, 3N,
+ ... where N is the number of the database. Primary.db is the primary
+ database, and sub0.db is the database that has no matching duplicates.
+ All of these are within a single database.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb007
+ Tests page size difference errors between subdbs.
+ Test 3 different scenarios for page sizes.
+ 1. Create/open with a default page size, 2nd subdb create with
+ specified different one, should error.
+ 2. Create/open with specific page size, 2nd subdb create with
+ different one, should error.
+ 3. Create/open with specified page size, 2nd subdb create with
+ same specified size, should succeed.
+ (4th combo of using all defaults is a basic test, done elsewhere)
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb008
+ Tests lorder difference errors between subdbs.
+ Test 3 different scenarios for lorder.
+ 1. Create/open with specific lorder, 2nd subdb create with
+ different one, should error.
+ 2. Create/open with a default lorder 2nd subdb create with
+ specified different one, should error.
+ 3. Create/open with specified lorder, 2nd subdb create with
+ same specified lorder, should succeed.
+ (4th combo of using all defaults is a basic test, done elsewhere)
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb009
+ Test DB->rename() method for subdbs
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb010
+ Test DB->remove() method and DB->truncate() for subdbs
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb011
+ Test deleting Subdbs with overflow pages
+ Create 1 db with many large subdbs.
+ Test subdatabases with overflow pages.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb012
+ Test subdbs with locking and transactions
+ Tests creating and removing subdbs while handles
+ are open works correctly, and in the face of txns.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test001
+ Small keys/data
+ Put/get per key
+ Dump file
+ Close, reopen
+ Dump file
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test002
+ Small keys/medium data
+ Put/get per key
+ Dump file
+ Close, reopen
+ Dump file
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and a fixed, medium length data string;
+ retrieve each. After all are entered, retrieve all; compare output
+ to original. Close file, reopen, do retrieve and re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test003
+ Small keys/large data
+ Put/get per key
+ Dump file
+ Close, reopen
+ Dump file
+
+ Take the source files and dbtest executable and enter their names
+ as the key with their contents as data. After all are entered,
+ retrieve all; compare output to original. Close file, reopen, do
+ retrieve and re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test004
+ Small keys/medium data
+ Put/get per key
+ Sequential (cursor) get/delete
+
+ Check that cursor operations work. Create a database.
+ Read through the database sequentially using cursors and
+ delete each element.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test005
+ Small keys/medium data
+ Put/get per key
+ Close, reopen
+ Sequential (cursor) get/delete
+
+ Check that cursor operations work. Create a database; close
+ it and reopen it. Then read through the database sequentially
+ using cursors and delete each element.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test006
+ Small keys/medium data
+ Put/get per key
+ Keyed delete and verify
+
+ Keyed delete test.
+ Create database.
+ Go through database, deleting all entries by key.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test007
+ Small keys/medium data
+ Put/get per key
+ Close, reopen
+ Keyed delete
+
+ Check that delete operations work. Create a database; close
+ database and reopen it. Then issues delete by key for each
+ entry.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test008
+ Small keys/large data
+ Put/get per key
+ Loop through keys by steps (which change)
+ ... delete each key at step
+ ... add each key back
+ ... change step
+ Confirm that overflow pages are getting reused
+
+ Take the source files and dbtest executable and enter their names as
+ the key with their contents as data. After all are entered, begin
+ looping through the entries; deleting some pairs and then readding them.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test009
+ Small keys/large data
+ Same as test008; close and reopen database
+
+ Check that we reuse overflow pages. Create database with lots of
+ big key/data pairs. Go through and delete and add keys back
+ randomly. Then close the DB and make sure that we have everything
+ we think we should.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test010
+ Duplicate test
+ Small key/data pairs.
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; add duplicate records for each.
+ After all are entered, retrieve all; verify output.
+ Close file, reopen, do retrieve and re-verify.
+ This does not work for recno
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test011
+ Duplicate test
+ Small key/data pairs.
+ Test DB_KEYFIRST, DB_KEYLAST, DB_BEFORE and DB_AFTER.
+ To test off-page duplicates, run with small pagesize.
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; add duplicate records for each.
+ Then do some key_first/key_last add_before, add_after operations.
+ This does not work for recno
+
+ To test if dups work when they fall off the main page, run this with
+ a very tiny page size.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test012
+ Large keys/small data
+ Same as test003 except use big keys (source files and
+ executables) and small data (the file/executable names).
+
+ Take the source files and dbtest executable and enter their contents
+ as the key with their names as data. After all are entered, retrieve
+ all; compare output to original. Close file, reopen, do retrieve and
+ re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test013
+ Partial put test
+ Overwrite entire records using partial puts.
+ Make surethat NOOVERWRITE flag works.
+
+ 1. Insert 10000 keys and retrieve them (equal key/data pairs).
+ 2. Attempt to overwrite keys with NO_OVERWRITE set (expect error).
+ 3. Actually overwrite each one with its datum reversed.
+
+ No partial testing here.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test014
+ Exercise partial puts on short data
+ Run 5 combinations of numbers of characters to replace,
+ and number of times to increase the size by.
+
+ Partial put test, small data, replacing with same size. The data set
+ consists of the first nentries of the dictionary. We will insert them
+ (and retrieve them) as we do in test 1 (equal key/data pairs). Then
+ we'll try to perform partial puts of some characters at the beginning,
+ some at the end, and some at the middle.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test015
+ Partial put test
+ Partial put test where the key does not initially exist.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test016
+ Partial put test
+ Partial put where the datum gets shorter as a result of the put.
+
+ Partial put test where partial puts make the record smaller.
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and a fixed, medium length data string;
+ retrieve each. After all are entered, go back and do partial puts,
+ replacing a random-length string with the key value.
+ Then verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test017
+ Basic offpage duplicate test.
+
+ Run duplicates with small page size so that we test off page duplicates.
+ Then after we have an off-page database, test with overflow pages too.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test018
+ Offpage duplicate test
+ Key_{first,last,before,after} offpage duplicates.
+ Run duplicates with small page size so that we test off page
+ duplicates.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test019
+ Partial get test.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test020
+ In-Memory database tests.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test021
+ Btree range tests.
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self, reversed as key and self as data.
+ After all are entered, retrieve each using a cursor SET_RANGE, and
+ getting about 20 keys sequentially after it (in some cases we'll
+ run out towards the end of the file).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test022
+ Test of DB->getbyteswapped().
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test023
+ Duplicate test
+ Exercise deletes and cursor operations within a duplicate set.
+ Add a key with duplicates (first time on-page, second time off-page)
+ Number the dups.
+ Delete dups and make sure that CURRENT/NEXT/PREV work correctly.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test024
+ Record number retrieval test.
+ Test the Btree and Record number get-by-number functionality.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test025
+ DB_APPEND flag test.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test026
+ Small keys/medium data w/duplicates
+ Put/get per key.
+ Loop through keys -- delete each key
+ ... test that cursors delete duplicates correctly
+
+ Keyed delete test through cursor. If ndups is small; this will
+ test on-page dups; if it's large, it will test off-page dups.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test027
+ Off-page duplicate test
+ Test026 with parameters to force off-page duplicates.
+
+ Check that delete operations work. Create a database; close
+ database and reopen it. Then issues delete by key for each
+ entry.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test028
+ Cursor delete test
+ Test put operations after deleting through a cursor.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test029
+ Test the Btree and Record number renumbering.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test030
+ Test DB_NEXT_DUP Functionality.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test031
+ Duplicate sorting functionality
+ Make sure DB_NODUPDATA works.
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and "ndups" duplicates
+ For the data field, prepend random five-char strings (see test032)
+ that we force the duplicate sorting code to do something.
+ Along the way, test that we cannot insert duplicate duplicates
+ using DB_NODUPDATA.
+
+ By setting ndups large, we can make this an off-page test
+ After all are entered, retrieve all; verify output.
+ Close file, reopen, do retrieve and re-verify.
+ This does not work for recno
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test032
+ DB_GET_BOTH, DB_GET_BOTH_RANGE
+
+ Use the first 10,000 entries from the dictionary. Insert each with
+ self as key and "ndups" duplicates. For the data field, prepend the
+ letters of the alphabet in a random order so we force the duplicate
+ sorting code to do something. By setting ndups large, we can make
+ this an off-page test.
+
+ Test the DB_GET_BOTH functionality by retrieving each dup in the file
+ explicitly. Test the DB_GET_BOTH_RANGE functionality by retrieving
+ the unique key prefix (cursor only). Finally test the failure case.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test033
+ DB_GET_BOTH without comparison function
+
+ Use the first 10,000 entries from the dictionary. Insert each with
+ self as key and data; add duplicate records for each. After all are
+ entered, retrieve all and verify output using DB_GET_BOTH (on DB and
+ DBC handles) and DB_GET_BOTH_RANGE (on a DBC handle) on existent and
+ nonexistent keys.
+
+ XXX
+ This does not work for rbtree.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test034
+ test032 with off-page duplicates
+ DB_GET_BOTH, DB_GET_BOTH_RANGE functionality with off-page duplicates.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test035
+ Test033 with off-page duplicates
+ DB_GET_BOTH functionality with off-page duplicates.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test036
+ Test KEYFIRST and KEYLAST when the key doesn't exist
+ Put nentries key/data pairs (from the dictionary) using a cursor
+ and KEYFIRST and KEYLAST (this tests the case where use use cursor
+ put for non-existent keys).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test037
+ Test DB_RMW
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test038
+ DB_GET_BOTH, DB_GET_BOTH_RANGE on deleted items
+
+ Use the first 10,000 entries from the dictionary. Insert each with
+ self as key and "ndups" duplicates. For the data field, prepend the
+ letters of the alphabet in a random order so we force the duplicate
+ sorting code to do something. By setting ndups large, we can make
+ this an off-page test
+
+ Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
+ each dup in the file explicitly. Then remove each duplicate and try
+ the retrieval again.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test039
+ DB_GET_BOTH/DB_GET_BOTH_RANGE on deleted items without comparison
+ function.
+
+ Use the first 10,000 entries from the dictionary. Insert each with
+ self as key and "ndups" duplicates. For the data field, prepend the
+ letters of the alphabet in a random order so we force the duplicate
+ sorting code to do something. By setting ndups large, we can make
+ this an off-page test.
+
+ Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
+ each dup in the file explicitly. Then remove each duplicate and try
+ the retrieval again.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test040
+ Test038 with off-page duplicates
+ DB_GET_BOTH functionality with off-page duplicates.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test041
+ Test039 with off-page duplicates
+ DB_GET_BOTH functionality with off-page duplicates.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test042
+ Concurrent Data Store test (CDB)
+
+ Multiprocess DB test; verify that locking is working for the
+ concurrent access method product.
+
+ Use the first "nentries" words from the dictionary. Insert each with
+ self as key and a fixed, medium length data string. Then fire off
+ multiple processes that bang on the database. Each one should try to
+ read and write random keys. When they rewrite, they'll append their
+ pid to the data string (sometimes doing a rewrite sometimes doing a
+ partial put). Some will use cursors to traverse through a few keys
+ before finding one to write.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test043
+ Recno renumbering and implicit creation test
+ Test the Record number implicit creation and renumbering options.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test044
+ Small system integration tests
+ Test proper functioning of the checkpoint daemon,
+ recovery, transactions, etc.
+
+ System integration DB test: verify that locking, recovery, checkpoint,
+ and all the other utilities basically work.
+
+ The test consists of $nprocs processes operating on $nfiles files. A
+ transaction consists of adding the same key/data pair to some random
+ number of these files. We generate a bimodal distribution in key size
+ with 70% of the keys being small (1-10 characters) and the remaining
+ 30% of the keys being large (uniform distribution about mean $key_avg).
+ If we generate a key, we first check to make sure that the key is not
+ already in the dataset. If it is, we do a lookup.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test045
+ Small random tester
+ Runs a number of random add/delete/retrieve operations.
+ Tests both successful conditions and error conditions.
+
+ Run the random db tester on the specified access method.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test046
+ Overwrite test of small/big key/data with cursor checks.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test047
+ DBcursor->c_get get test with SET_RANGE option.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test048
+ Cursor stability across Btree splits.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test049
+ Cursor operations on uninitialized cursors.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test050
+ Overwrite test of small/big key/data with cursor checks for Recno.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test051
+ Fixed-length record Recno test.
+ 0. Test various flags (legal and illegal) to open
+ 1. Test partial puts where dlen != size (should fail)
+ 2. Partial puts for existent record -- replaces at beg, mid, and
+ end of record, as well as full replace
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test052
+ Renumbering record Recno test.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test053
+ Test of the DB_REVSPLITOFF flag in the Btree and Btree-w-recnum
+ methods.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test054
+ Cursor maintenance during key/data deletion.
+
+ This test checks for cursor maintenance in the presence of deletes.
+ There are N different scenarios to tests:
+ 1. No duplicates. Cursor A deletes a key, do a GET for the key.
+ 2. No duplicates. Cursor is positioned right before key K, Delete K,
+ do a next on the cursor.
+ 3. No duplicates. Cursor is positioned on key K, do a regular delete
+ of K, do a current get on K.
+ 4. Repeat 3 but do a next instead of current.
+ 5. Duplicates. Cursor A is on the first item of a duplicate set, A
+ does a delete. Then we do a non-cursor get.
+ 6. Duplicates. Cursor A is in a duplicate set and deletes the item.
+ do a delete of the entire Key. Test cursor current.
+ 7. Continue last test and try cursor next.
+ 8. Duplicates. Cursor A is in a duplicate set and deletes the item.
+ Cursor B is in the same duplicate set and deletes a different item.
+ Verify that the cursor is in the right place.
+ 9. Cursors A and B are in the place in the same duplicate set. A
+ deletes its item. Do current on B.
+ 10. Continue 8 and do a next on B.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test055
+ Basic cursor operations.
+ This test checks basic cursor operations.
+ There are N different scenarios to tests:
+ 1. (no dups) Set cursor, retrieve current.
+ 2. (no dups) Set cursor, retrieve next.
+ 3. (no dups) Set cursor, retrieve prev.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test056
+ Cursor maintenance during deletes.
+ Check if deleting a key when a cursor is on a duplicate of that
+ key works.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test057
+ Cursor maintenance during key deletes.
+ Check if we handle the case where we delete a key with the cursor on
+ it and then add the same key. The cursor should not get the new item
+ returned, but the item shouldn't disappear.
+ Run test tests, one where the overwriting put is done with a put and
+ one where it's done with a cursor put.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test058
+ Verify that deleting and reading duplicates results in correct ordering.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test059
+ Cursor ops work with a partial length of 0.
+ Make sure that we handle retrieves of zero-length data items correctly.
+ The following ops, should allow a partial data retrieve of 0-length.
+ db_get
+ db_cget FIRST, NEXT, LAST, PREV, CURRENT, SET, SET_RANGE
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test060
+ Test of the DB_EXCL flag to DB->open().
+ 1) Attempt to open and create a nonexistent database; verify success.
+ 2) Attempt to reopen it; verify failure.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test061
+ Test of txn abort and commit for in-memory databases.
+ a) Put + abort: verify absence of data
+ b) Put + commit: verify presence of data
+ c) Overwrite + abort: verify that data is unchanged
+ d) Overwrite + commit: verify that data has changed
+ e) Delete + abort: verify that data is still present
+ f) Delete + commit: verify that data has been deleted
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test062
+ Test of partial puts (using DB_CURRENT) onto duplicate pages.
+ Insert the first 200 words into the dictionary 200 times each with
+ self as key and <random letter>:self as data. Use partial puts to
+ append self again to data; verify correctness.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test063
+ Test of the DB_RDONLY flag to DB->open
+ Attempt to both DB->put and DBC->c_put into a database
+ that has been opened DB_RDONLY, and check for failure.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test064
+ Test of DB->get_type
+ Create a database of type specified by method.
+ Make sure DB->get_type returns the right thing with both a normal
+ and DB_UNKNOWN open.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test065
+ Test of DB->stat(DB_FASTSTAT)
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test066
+ Test of cursor overwrites of DB_CURRENT w/ duplicates.
+
+ Make sure a cursor put to DB_CURRENT acts as an overwrite in a
+ database with duplicates.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test067
+ Test of DB_CURRENT partial puts onto almost empty duplicate
+ pages, with and without DB_DUP_SORT.
+
+ Test of DB_CURRENT partial puts on almost-empty duplicate pages.
+ This test was written to address the following issue, #2 in the
+ list of issues relating to bug #0820:
+
+ 2. DBcursor->put, DB_CURRENT flag, off-page duplicates, hash and btree:
+ In Btree, the DB_CURRENT overwrite of off-page duplicate records
+ first deletes the record and then puts the new one -- this could
+ be a problem if the removal of the record causes a reverse split.
+ Suggested solution is to acquire a cursor to lock down the current
+ record, put a new record after that record, and then delete using
+ the held cursor.
+
+ It also tests the following, #5 in the same list of issues:
+ 5. DBcursor->put, DB_AFTER/DB_BEFORE/DB_CURRENT flags, DB_DBT_PARTIAL
+ set, duplicate comparison routine specified.
+ The partial change does not change how data items sort, but the
+ record to be put isn't built yet, and that record supplied is the
+ one that's checked for ordering compatibility.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test068
+ Test of DB_BEFORE and DB_AFTER with partial puts.
+ Make sure DB_BEFORE and DB_AFTER work properly with partial puts, and
+ check that they return EINVAL if DB_DUPSORT is set or if DB_DUP is not.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test069
+ Test of DB_CURRENT partial puts without duplicates-- test067 w/
+ small ndups to ensure that partial puts to DB_CURRENT work
+ correctly in the absence of duplicate pages.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test070
+ Test of DB_CONSUME (Four consumers, 1000 items.)
+
+ Fork off six processes, four consumers and two producers.
+ The producers will each put 20000 records into a queue;
+ the consumers will each get 10000.
+ Then, verify that no record was lost or retrieved twice.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test071
+ Test of DB_CONSUME (One consumer, 10000 items.)
+ This is DB Test 70, with one consumer, one producers, and 10000 items.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test072
+ Test of cursor stability when duplicates are moved off-page.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test073
+ Test of cursor stability on duplicate pages.
+
+ Does the following:
+ a. Initialize things by DB->putting ndups dups and
+ setting a reference cursor to point to each.
+ b. c_put ndups dups (and correspondingly expanding
+ the set of reference cursors) after the last one, making sure
+ after each step that all the reference cursors still point to
+ the right item.
+ c. Ditto, but before the first one.
+ d. Ditto, but after each one in sequence first to last.
+ e. Ditto, but after each one in sequence from last to first.
+ occur relative to the new datum)
+ f. Ditto for the two sequence tests, only doing a
+ DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+ new one.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test074
+ Test of DB_NEXT_NODUP.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test075
+ Test of DB->rename().
+ (formerly test of DB_TRUNCATE cached page invalidation [#1487])
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test076
+ Test creation of many small databases in a single environment. [#1528].
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test077
+ Test of DB_GET_RECNO [#1206].
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test078
+ Test of DBC->c_count(). [#303]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test079
+ Test of deletes in large trees. (test006 w/ sm. pagesize).
+
+ Check that delete operations work in large btrees. 10000 entries
+ and a pagesize of 512 push this out to a four-level btree, with a
+ small fraction of the entries going on overflow pages.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test080
+ Test of DB->remove()
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test081
+ Test off-page duplicates and overflow pages together with
+ very large keys (key/data as file contents).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test082
+ Test of DB_PREV_NODUP (uses test074).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test083
+ Test of DB->key_range.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test084
+ Basic sanity test (test001) with large (64K) pages.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test085
+ Test of cursor behavior when a cursor is pointing to a deleted
+ btree key which then has duplicates added. [#2473]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test086
+ Test of cursor stability across btree splits/rsplits with
+ subtransaction aborts (a variant of test048). [#2373]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test087
+ Test of cursor stability when converting to and modifying
+ off-page duplicate pages with subtransaction aborts. [#2373]
+
+ Does the following:
+ a. Initialize things by DB->putting ndups dups and
+ setting a reference cursor to point to each. Do each put twice,
+ first aborting, then committing, so we're sure to abort the move
+ to off-page dups at some point.
+ b. c_put ndups dups (and correspondingly expanding
+ the set of reference cursors) after the last one, making sure
+ after each step that all the reference cursors still point to
+ the right item.
+ c. Ditto, but before the first one.
+ d. Ditto, but after each one in sequence first to last.
+ e. Ditto, but after each one in sequence from last to first.
+ occur relative to the new datum)
+ f. Ditto for the two sequence tests, only doing a
+ DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+ new one.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test088
+ Test of cursor stability across btree splits with very
+ deep trees (a variant of test048). [#2514]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test089
+ Concurrent Data Store test (CDB)
+
+ Enhanced CDB testing to test off-page dups, cursor dups and
+ cursor operations like c_del then c_get.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test090
+ Test for functionality near the end of the queue using test001.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test091
+ Test of DB_CONSUME_WAIT.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test092
+ Test of DB_DIRTY_READ [#3395]
+
+ We set up a database with nentries in it. We then open the
+ database read-only twice. One with dirty read and one without.
+ We open the database for writing and update some entries in it.
+ Then read those new entries via db->get (clean and dirty), and
+ via cursors (clean and dirty).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test093
+ Test using set_bt_compare.
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test094
+ Test using set_dup_compare.
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test095
+ Bulk get test. [#2934]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test096
+ Db->truncate test.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test097
+ Open up a large set of database files simultaneously.
+ Adjust for local file descriptor resource limits.
+ Then use the first 1000 entries from the dictionary.
+ Insert each with self as key and a fixed, medium length data string;
+ retrieve each. After all are entered, retrieve all; compare output
+ to original.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test098
+ Test of DB_GET_RECNO and secondary indices. Open a primary and
+ a secondary, and do a normal cursor get followed by a get_recno.
+ (This is a smoke test for "Bug #1" in [#5811].)
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test099
+
+ Test of DB->get and DBC->c_get with set_recno and get_recno.
+
+ Populate a small btree -recnum database.
+ After all are entered, retrieve each using -recno with DB->get.
+ Open a cursor and do the same for DBC->c_get with set_recno.
+ Verify that set_recno sets the record number position properly.
+ Verify that get_recno returns the correct record numbers.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test100
+ Test for functionality near the end of the queue
+ using test025 (DB_APPEND).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test101
+ Test for functionality near the end of the queue
+ using test070 (DB_CONSUME).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn001
+ Begin, commit, abort testing.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn002
+ Verify that read-only transactions do not write log records.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn003
+ Test abort/commit/prepare of txns with outstanding child txns.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn004
+ Test of wraparound txnids (txn001)
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn005
+ Test transaction ID wraparound and recovery.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn008
+ Test of wraparound txnids (txn002)
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn009
+ Test of wraparound txnids (txn003)
diff --git a/libdb/test/archive.tcl b/libdb/test/archive.tcl
new file mode 100644
index 0000000..e1250d0
--- /dev/null
+++ b/libdb/test/archive.tcl
@@ -0,0 +1,230 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Options are:
+# -checkrec <checkpoint frequency"
+# -dir <dbhome directory>
+# -maxfilesize <maxsize of log file>
+proc archive { args } {
+ global alphabet
+ source ./include.tcl
+
+ # Set defaults
+ set maxbsize [expr 8 * 1024]
+ set maxfile [expr 32 * 1024]
+ set checkrec 500
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -c.* { incr i; set checkrec [lindex $args $i] }
+ -d.* { incr i; set testdir [lindex $args $i] }
+ -m.* { incr i; set maxfile [lindex $args $i] }
+ default {
+ puts "FAIL:[timestamp] archive usage"
+ puts "usage: archive -checkrec <checkpt freq> \
+ -dir <directory> -maxfilesize <max size of log files>"
+ return
+ }
+
+ }
+ }
+
+ # Clean out old log if it existed
+ puts "Archive: Log archive test"
+ puts "Unlinking log: error message OK"
+ env_cleanup $testdir
+
+ # Now run the various functionality tests
+ set eflags "-create -txn -home $testdir \
+ -log_buffer $maxbsize -log_max $maxfile"
+ set dbenv [eval {berkdb_env} $eflags]
+ error_check_bad dbenv $dbenv NULL
+ error_check_good dbenv [is_substr $dbenv env] 1
+
+ set logc [$dbenv log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $dbenv] TRUE
+
+ # The basic test structure here is that we write a lot of log
+ # records (enough to fill up 100 log files; each log file it
+ # small). We take periodic checkpoints. Between each pair
+ # of checkpoints, we refer to 2 files, overlapping them each
+ # checkpoint. We also start transactions and let them overlap
+ # checkpoints as well. The pattern that we try to create is:
+ # ---- write log records----|||||--- write log records ---
+ # -T1 T2 T3 --- D1 D2 ------CHECK--- CT1 --- D2 D3 CD1 ----CHECK
+ # where TX is begin transaction, CTx is commit transaction, DX is
+ # open data file and CDx is close datafile.
+
+ set baserec "1:$alphabet:2:$alphabet:3:$alphabet:4:$alphabet"
+ puts "\tArchive.a: Writing log records; checkpoint every $checkrec records"
+ set nrecs $maxfile
+ set rec 0:$baserec
+
+ # Begin transaction and write a log record
+ set t1 [$dbenv txn]
+ error_check_good t1:txn_begin [is_substr $t1 "txn"] 1
+
+ set l1 [$dbenv log_put $rec]
+ error_check_bad l1:log_put [llength $l1] 0
+
+ set lsnlist [list [lindex $l1 0]]
+
+ set t2 [$dbenv txn]
+ error_check_good t2:txn_begin [is_substr $t2 "txn"] 1
+
+ set l1 [$dbenv log_put $rec]
+ lappend lsnlist [lindex $l1 0]
+
+ set t3 [$dbenv txn]
+ set l1 [$dbenv log_put $rec]
+ lappend lsnlist [lindex $l1 0]
+
+ set txnlist [list $t1 $t2 $t3]
+ set db1 [eval {berkdb_open} "-create -mode 0644 -hash -env $dbenv ar1"]
+ set db2 [eval {berkdb_open} "-create -mode 0644 -btree -env $dbenv ar2"]
+ set dbcount 3
+ set dblist [list $db1 $db2]
+
+ for { set i 1 } { $i <= $nrecs } { incr i } {
+ set rec $i:$baserec
+ set lsn [$dbenv log_put $rec]
+ error_check_bad log_put [llength $lsn] 0
+ if { [expr $i % $checkrec] == 0 } {
+ # Take a checkpoint
+ $dbenv txn_checkpoint
+ set ckp_file [lindex [lindex [$logc get -last] 0] 0]
+ catch { archive_command -h $testdir -a } res_log_full
+ if { [string first db_archive $res_log_full] == 0 } {
+ set res_log_full ""
+ }
+ catch { archive_command -h $testdir } res_log
+ if { [string first db_archive $res_log] == 0 } {
+ set res_log ""
+ }
+ catch { archive_command -h $testdir -l } res_alllog
+ catch { archive_command -h $testdir -a -s } \
+ res_data_full
+ catch { archive_command -h $testdir -s } res_data
+ error_check_good nlogfiles [llength $res_alllog] \
+ [lindex [lindex [$logc get -last] 0] 0]
+ error_check_good logs_match [llength $res_log_full] \
+ [llength $res_log]
+ error_check_good data_match [llength $res_data_full] \
+ [llength $res_data]
+
+ # Check right number of log files
+ error_check_good nlogs [llength $res_log] \
+ [expr [lindex $lsnlist 0] - 1]
+
+ # Check that the relative names are a subset of the
+ # full names
+ set n 0
+ foreach x $res_log {
+ error_check_bad log_name_match:$res_log \
+ [string first $x \
+ [lindex $res_log_full $n]] -1
+ incr n
+ }
+
+ set n 0
+ foreach x $res_data {
+ error_check_bad log_name_match:$res_data \
+ [string first $x \
+ [lindex $res_data_full $n]] -1
+ incr n
+ }
+
+ # Begin/commit any transactions
+ set t [lindex $txnlist 0]
+ if { [string length $t] != 0 } {
+ error_check_good txn_commit:$t [$t commit] 0
+ set txnlist [lrange $txnlist 1 end]
+ }
+ set lsnlist [lrange $lsnlist 1 end]
+
+ if { [llength $txnlist] == 0 } {
+ set t1 [$dbenv txn]
+ error_check_bad tx_begin $t1 NULL
+ error_check_good \
+ tx_begin [is_substr $t1 $dbenv] 1
+ set l1 [lindex [$dbenv log_put $rec] 0]
+ lappend lsnlist [min $l1 $ckp_file]
+
+ set t2 [$dbenv txn]
+ error_check_bad tx_begin $t2 NULL
+ error_check_good \
+ tx_begin [is_substr $t2 $dbenv] 1
+ set l1 [lindex [$dbenv log_put $rec] 0]
+ lappend lsnlist [min $l1 $ckp_file]
+
+ set t3 [$dbenv txn]
+ error_check_bad tx_begin $t3 NULL
+ error_check_good \
+ tx_begin [is_substr $t3 $dbenv] 1
+ set l1 [lindex [$dbenv log_put $rec] 0]
+ lappend lsnlist [min $l1 $ckp_file]
+
+ set txnlist [list $t1 $t2 $t3]
+ }
+
+ # Open/close some DB files
+ if { [expr $dbcount % 2] == 0 } {
+ set type "-hash"
+ } else {
+ set type "-btree"
+ }
+ set db [eval {berkdb_open} \
+ "-create -mode 0644 $type -env $dbenv ar$dbcount"]
+ error_check_bad db_open:$dbcount $db NULL
+ error_check_good db_open:$dbcount [is_substr $db db] 1
+ incr dbcount
+
+ lappend dblist $db
+ set db [lindex $dblist 0]
+ error_check_good db_close:$db [$db close] 0
+ set dblist [lrange $dblist 1 end]
+
+ }
+ }
+ # Commit any transactions still running.
+ puts "\tArchive.b: Commit any transactions still running."
+ foreach t $txnlist {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+
+ # Close any files that are still open.
+ puts "\tArchive.c: Close open files."
+ foreach d $dblist {
+ error_check_good db_close:$db [$d close] 0
+ }
+
+ # Close and unlink the file
+ error_check_good log_cursor_close [$logc close] 0
+ reset_env $dbenv
+}
+
+proc archive_command { args } {
+ source ./include.tcl
+
+ # Catch a list of files output by db_archive.
+ catch { eval exec $util_path/db_archive $args } output
+
+ if { $is_windows_test == 1 || 1 } {
+ # On Windows, convert all filenames to use forward slashes.
+ regsub -all {[\\]} $output / output
+ }
+
+ # Output the [possibly-transformed] list.
+ return $output
+}
+
+proc min { a b } {
+ if {$a < $b} {
+ return $a
+ } else {
+ return $b
+ }
+}
diff --git a/libdb/test/bigfile001.tcl b/libdb/test/bigfile001.tcl
new file mode 100644
index 0000000..5abafd8
--- /dev/null
+++ b/libdb/test/bigfile001.tcl
@@ -0,0 +1,85 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST bigfile001
+# TEST Create a database greater than 4 GB in size. Close, verify.
+# TEST Grow the database somewhat. Close, reverify. Lather, rinse,
+# TEST repeat. Since it will not work on all systems, this test is
+# TEST not run by default.
+proc bigfile001 { method \
+ { itemsize 4096 } { nitems 1048576 } { growby 5000 } { growtms 2 } args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Bigfile: $method ($args) $nitems * $itemsize bytes of data"
+
+ env_cleanup $testdir
+
+ # Create the database. Use 64K pages; we want a good fill
+ # factor, and page size doesn't matter much. Use a 50MB
+ # cache; that should be manageable, and will help
+ # performance.
+ set dbname $testdir/big.db
+
+ set db [eval {berkdb_open -create} {-pagesize 65536 \
+ -cachesize {0 50000000 0}} $omethod $args $dbname]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts -nonewline "\tBigfile.a: Creating database...0%..."
+ flush stdout
+
+ set data [string repeat z $itemsize]
+
+ set more_than_ten_already 0
+ for { set i 0 } { $i < $nitems } { incr i } {
+ set key key[format %08u $i]
+
+ error_check_good db_put($i) [$db put $key $data] 0
+
+ if { $i % 5000 == 0 } {
+ set pct [expr 100 * $i / $nitems]
+ puts -nonewline "\b\b\b\b\b"
+ if { $pct >= 10 } {
+ if { $more_than_ten_already } {
+ puts -nonewline "\b"
+ } else {
+ set more_than_ten_already 1
+ }
+ }
+
+ puts -nonewline "$pct%..."
+ flush stdout
+ }
+ }
+ puts "\b\b\b\b\b\b100%..."
+ error_check_good db_close [$db close] 0
+
+ puts "\tBigfile.b: Verifying database..."
+ error_check_good verify \
+ [verify_dir $testdir "\t\t" 0 0 1 50000000] 0
+
+ puts "\tBigfile.c: Grow database $growtms times by $growby items"
+
+ for { set j 0 } { $j < $growtms } { incr j } {
+ set db [eval {berkdb_open} {-cachesize {0 50000000 0}} $dbname]
+ error_check_good db_open [is_valid_db $db] TRUE
+ puts -nonewline "\t\tBigfile.c.1: Adding $growby items..."
+ flush stdout
+ for { set i 0 } { $i < $growby } { incr i } {
+ set key key[format %08u $i].$j
+ error_check_good db_put($j.$i) [$db put $key $data] 0
+ }
+ error_check_good db_close [$db close] 0
+ puts "done."
+
+ puts "\t\tBigfile.c.2: Verifying database..."
+ error_check_good verify($j) \
+ [verify_dir $testdir "\t\t\t" 0 0 1 50000000] 0
+ }
+}
diff --git a/libdb/test/bigfile002.tcl b/libdb/test/bigfile002.tcl
new file mode 100644
index 0000000..8839fd4
--- /dev/null
+++ b/libdb/test/bigfile002.tcl
@@ -0,0 +1,45 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST bigfile002
+# TEST This one should be faster and not require so much disk space,
+# TEST although it doesn't test as extensively. Create an mpool file
+# TEST with 1K pages. Dirty page 6000000. Sync.
+proc bigfile002 { args } {
+ source ./include.tcl
+
+ puts -nonewline \
+ "Bigfile002: Creating large, sparse file through mpool..."
+ flush stdout
+
+ env_cleanup $testdir
+
+ # Create env.
+ set env [berkdb_env -create -home $testdir]
+ error_check_good valid_env [is_valid_env $env] TRUE
+
+ # Create the file.
+ set name big002.file
+ set file [$env mpool -create -pagesize 1024 $name]
+
+ # Dirty page 6000000
+ set pg [$file get -create 6000000]
+ error_check_good pg_init [$pg init A] 0
+ error_check_good pg_set [$pg is_setto A] 1
+
+ # Put page back.
+ error_check_good pg_put [$pg put -dirty] 0
+
+ # Fsync.
+ error_check_good fsync [$file fsync] 0
+
+ puts "succeeded."
+
+ # Close.
+ error_check_good fclose [$file close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/libdb/test/byteorder.tcl b/libdb/test/byteorder.tcl
new file mode 100644
index 0000000..ede13bf
--- /dev/null
+++ b/libdb/test/byteorder.tcl
@@ -0,0 +1,34 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Byte Order Test
+# Use existing tests and run with both byte orders.
+proc byteorder { method {nentries 1000} } {
+ source ./include.tcl
+ puts "Byteorder: $method $nentries"
+
+ eval {test001 $method $nentries 0 "01" 0 -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test001 $method $nentries 0 "01" 0 -lorder 4321}
+ eval {verify_dir $testdir}
+ eval {test003 $method -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test003 $method -lorder 4321}
+ eval {verify_dir $testdir}
+ eval {test010 $method $nentries 5 10 -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test010 $method $nentries 5 10 -lorder 4321}
+ eval {verify_dir $testdir}
+ eval {test011 $method $nentries 5 11 -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test011 $method $nentries 5 11 -lorder 4321}
+ eval {verify_dir $testdir}
+ eval {test018 $method $nentries -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test018 $method $nentries -lorder 4321}
+ eval {verify_dir $testdir}
+}
diff --git a/libdb/test/conscript.tcl b/libdb/test/conscript.tcl
new file mode 100644
index 0000000..ccef73c
--- /dev/null
+++ b/libdb/test/conscript.tcl
@@ -0,0 +1,123 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Script for DB_CONSUME test (test070.tcl).
+# Usage: conscript dir file runtype nitems outputfile tnum args
+# dir: DBHOME directory
+# file: db file on which to operate
+# runtype: PRODUCE or CONSUME--which am I?
+# nitems: number of items to put or get
+# outputfile: where to log consumer results
+# tnum: test number
+
+proc consumescript_produce { db_cmd nitems tnum args } {
+ source ./include.tcl
+ global mydata
+
+ set pid [pid]
+ puts "\tTest0$tnum: Producer $pid starting, producing $nitems items."
+
+ set db [eval $db_cmd]
+ error_check_good db_open:$pid [is_valid_db $db] TRUE
+
+ set oret -1
+ set ret 0
+ for { set ndx 0 } { $ndx < $nitems } { incr ndx } {
+ set oret $ret
+ if { 0xffffffff > 0 && $oret > 0x7fffffff } {
+ incr oret [expr 0 - 0x100000000]
+ }
+ set ret [$db put -append [chop_data q $mydata]]
+ error_check_good db_put \
+ [expr $ret > 0 ? $oret < $ret : \
+ $oret < 0 ? $oret < $ret : $oret > $ret] 1
+
+ }
+
+ set ret [catch {$db close} res]
+ error_check_good db_close:$pid $ret 0
+ puts "\t\tTest0$tnum: Producer $pid finished."
+}
+
+proc consumescript_consume { db_cmd nitems tnum outputfile mode args } {
+ source ./include.tcl
+ global mydata
+ set pid [pid]
+ puts "\tTest0$tnum: Consumer $pid starting, seeking $nitems items."
+
+ set db [eval $db_cmd]
+ error_check_good db_open:$pid [is_valid_db $db] TRUE
+
+ set oid [open $outputfile w]
+
+ for { set ndx 0 } { $ndx < $nitems } { } {
+ set ret [$db get $mode]
+ if { [llength $ret] > 0 } {
+ error_check_good correct_data:$pid \
+ [lindex [lindex $ret 0] 1] [pad_data q $mydata]
+ set rno [lindex [lindex $ret 0] 0]
+ puts $oid $rno
+ incr ndx
+ } else {
+ # No data to consume; wait.
+ }
+ }
+
+ error_check_good output_close:$pid [close $oid] ""
+
+ set ret [catch {$db close} res]
+ error_check_good db_close:$pid $ret 0
+ puts "\t\tTest0$tnum: Consumer $pid finished."
+}
+
+source ./include.tcl
+source $test_path/test.tcl
+
+# Verify usage
+if { $argc < 6 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+set usage "conscript.tcl dir file runtype nitems outputfile tnum"
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set file [lindex $argv 1]
+set runtype [lindex $argv 2]
+set nitems [lindex $argv 3]
+set outputfile [lindex $argv 4]
+set tnum [lindex $argv 5]
+# args is the string "{ -len 20 -pad 0}", so we need to extract the
+# " -len 20 -pad 0" part.
+set args [lindex [lrange $argv 6 end] 0]
+
+set mydata "consumer data"
+
+# Open env
+set dbenv [berkdb_env -home $dir ]
+error_check_good db_env_create [is_valid_env $dbenv] TRUE
+
+# Figure out db opening command.
+set db_cmd [concat {berkdb_open -create -mode 0644 -queue -env}\
+ $dbenv $args $file]
+
+# Invoke consumescript_produce or consumescript_consume based on $runtype
+if { $runtype == "PRODUCE" } {
+ # Producers have nothing to log; make sure outputfile is null.
+ error_check_good no_producer_outputfile $outputfile ""
+ consumescript_produce $db_cmd $nitems $tnum $args
+} elseif { $runtype == "CONSUME" } {
+ consumescript_consume $db_cmd $nitems $tnum $outputfile -consume $args
+} elseif { $runtype == "WAIT" } {
+ consumescript_consume $db_cmd $nitems $tnum $outputfile -consume_wait \
+ $args
+} else {
+ error_check_good bad_args $runtype "either PRODUCE, CONSUME or WAIT"
+}
+error_check_good env_close [$dbenv close] 0
+exit
diff --git a/libdb/test/dbm.tcl b/libdb/test/dbm.tcl
new file mode 100644
index 0000000..ada3ecc
--- /dev/null
+++ b/libdb/test/dbm.tcl
@@ -0,0 +1,128 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST dbm
+# TEST Historic DBM interface test. Use the first 1000 entries from the
+# TEST dictionary. Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Then reopen the file, re-retrieve everything. Finally, delete
+# TEST everything.
+proc dbm { { nentries 1000 } } {
+ source ./include.tcl
+
+ puts "DBM interfaces test: $nentries"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/dbmtest
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir NULL
+
+ error_check_good dbminit [berkdb dbminit $testfile] 0
+ set did [open $dict]
+
+ set flags ""
+ set txn ""
+ set count 0
+ set skippednullkey 0
+
+ puts "\tDBM.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # DBM can't handle zero-length keys
+ if { [string length $str] == 0 } {
+ set skippednullkey 1
+ continue
+ }
+
+ set ret [berkdb store $str $str]
+ error_check_good dbm_store $ret 0
+
+ set d [berkdb fetch $str]
+ error_check_good dbm_fetch $d $str
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tDBM.b: dump file"
+ set oid [open $t1 w]
+ for { set key [berkdb firstkey] } { $key != -1 } {\
+ set key [berkdb nextkey $key] } {
+ puts $oid $key
+ set d [berkdb fetch $key]
+ error_check_good dbm_refetch $d $key
+ }
+
+ # If we had to skip a zero-length key, juggle things to cover up
+ # this fact in the dump.
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ incr nentries 1
+ }
+
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good DBM:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tDBM.c: close, open, and dump file"
+
+ # Now, reopen the file and run the last test again.
+ error_check_good dbminit2 [berkdb dbminit $testfile] 0
+ set oid [open $t1 w]
+
+ for { set key [berkdb firstkey] } { $key != -1 } {\
+ set key [berkdb nextkey $key] } {
+ puts $oid $key
+ set d [berkdb fetch $key]
+ error_check_good dbm_refetch $d $key
+ }
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ filesort $t1 $t3
+
+ error_check_good DBM:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and delete each entry
+ puts "\tDBM.d: sequential scan and delete"
+
+ error_check_good dbminit3 [berkdb dbminit $testfile] 0
+ set oid [open $t1 w]
+
+ for { set key [berkdb firstkey] } { $key != -1 } {\
+ set key [berkdb nextkey $key] } {
+ puts $oid $key
+ set ret [berkdb delete $key]
+ error_check_good dbm_delete $ret 0
+ }
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ filesort $t1 $t3
+
+ error_check_good DBM:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ error_check_good "dbm_close" [berkdb dbmclose] 0
+}
diff --git a/libdb/test/dbscript.tcl b/libdb/test/dbscript.tcl
new file mode 100644
index 0000000..ed535bc
--- /dev/null
+++ b/libdb/test/dbscript.tcl
@@ -0,0 +1,357 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Random db tester.
+# Usage: dbscript file numops min_del max_add key_avg data_avgdups
+# method: method (we pass this in so that fixed-length records work)
+# file: db file on which to operate
+# numops: number of operations to do
+# ncurs: number of cursors
+# min_del: minimum number of keys before you disable deletes.
+# max_add: maximum number of keys before you disable adds.
+# key_avg: average key size
+# data_avg: average data size
+# dups: 1 indicates dups allowed, 0 indicates no dups
+# errpct: What percent of operations should generate errors
+# seed: Random number generator seed (-1 means use pid)
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "dbscript file numops ncurs min_del max_add key_avg data_avg dups errpcnt"
+
+# Verify usage
+if { $argc != 10 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set method [lindex $argv 0]
+set file [lindex $argv 1]
+set numops [ lindex $argv 2 ]
+set ncurs [ lindex $argv 3 ]
+set min_del [ lindex $argv 4 ]
+set max_add [ lindex $argv 5 ]
+set key_avg [ lindex $argv 6 ]
+set data_avg [ lindex $argv 7 ]
+set dups [ lindex $argv 8 ]
+set errpct [ lindex $argv 9 ]
+
+berkdb srand $rand_init
+
+puts "Beginning execution for [pid]"
+puts "$file database"
+puts "$numops Operations"
+puts "$ncurs cursors"
+puts "$min_del keys before deletes allowed"
+puts "$max_add or fewer keys to add"
+puts "$key_avg average key length"
+puts "$data_avg average data length"
+if { $dups != 1 } {
+ puts "No dups"
+} else {
+ puts "Dups allowed"
+}
+puts "$errpct % Errors"
+
+flush stdout
+
+set db [berkdb_open $file]
+set cerr [catch {error_check_good dbopen [is_substr $db db] 1} cret]
+if {$cerr != 0} {
+ puts $cret
+ return
+}
+# set method [$db get_type]
+set record_based [is_record_based $method]
+
+# Initialize globals including data
+global nkeys
+global l_keys
+global a_keys
+
+set nkeys [db_init $db 1]
+puts "Initial number of keys: $nkeys"
+
+set pflags ""
+set gflags ""
+set txn ""
+
+# Open the cursors
+set curslist {}
+for { set i 0 } { $i < $ncurs } { incr i } {
+ set dbc [$db cursor]
+ set cerr [catch {error_check_good dbopen [is_substr $dbc $db.c] 1} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ set cerr [catch {error_check_bad cursor_create $dbc NULL} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ lappend curslist $dbc
+
+}
+
+# On each iteration we're going to generate random keys and
+# data. We'll select either a get/put/delete operation unless
+# we have fewer than min_del keys in which case, delete is not
+# an option or more than max_add in which case, add is not
+# an option. The tcl global arrays a_keys and l_keys keep track
+# of key-data pairs indexed by key and a list of keys, accessed
+# by integer.
+set adds 0
+set puts 0
+set gets 0
+set dels 0
+set bad_adds 0
+set bad_puts 0
+set bad_gets 0
+set bad_dels 0
+
+for { set iter 0 } { $iter < $numops } { incr iter } {
+ set op [pick_op $min_del $max_add $nkeys]
+ set err [is_err $errpct]
+
+ # The op0's indicate that there aren't any duplicates, so we
+ # exercise regular operations. If dups is 1, then we'll use
+ # cursor ops.
+ switch $op$dups$err {
+ add00 {
+ incr adds
+
+ set k [random_data $key_avg 1 a_keys $record_based]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$db put} $txn $pflags \
+ {-nooverwrite $k $data}]
+ set cerr [catch {error_check_good put $ret 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ newpair $k [pad_data $method $data]
+ }
+ add01 {
+ incr bad_adds
+ set k [random_key]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$db put} $txn $pflags \
+ {-nooverwrite $k $data}]
+ set cerr [catch {error_check_good put $ret 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error case so no change to data state
+ }
+ add10 {
+ incr adds
+ set dbcinfo [random_cursor $curslist]
+ set dbc [lindex $dbcinfo 0]
+ if { [berkdb random_int 1 2] == 1 } {
+ # Add a new key
+ set k [random_data $key_avg 1 a_keys \
+ $record_based]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$dbc put} $txn \
+ {-keyfirst $k $data}]
+ newpair $k [pad_data $method $data]
+ } else {
+ # Add a new duplicate
+ set dbc [lindex $dbcinfo 0]
+ set k [lindex $dbcinfo 1]
+ set data [random_data $data_avg 0 0]
+
+ set op [pick_cursput]
+ set data [chop_data $method $data]
+ set ret [eval {$dbc put} $txn {$op $k $data}]
+ adddup $k [lindex $dbcinfo 2] $data
+ }
+ }
+ add11 {
+ # TODO
+ incr bad_adds
+ set ret 1
+ }
+ put00 {
+ incr puts
+ set k [random_key]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$db put} $txn {$k $data}]
+ changepair $k [pad_data $method $data]
+ }
+ put01 {
+ incr bad_puts
+ set k [random_key]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$db put} $txn $pflags \
+ {-nooverwrite $k $data}]
+ set cerr [catch {error_check_good put $ret 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error case so no change to data state
+ }
+ put10 {
+ incr puts
+ set dbcinfo [random_cursor $curslist]
+ set dbc [lindex $dbcinfo 0]
+ set k [lindex $dbcinfo 1]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+
+ set ret [eval {$dbc put} $txn {-current $data}]
+ changedup $k [lindex $dbcinfo 2] $data
+ }
+ put11 {
+ incr bad_puts
+ set k [random_key]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set dbc [$db cursor]
+ set ret [eval {$dbc put} $txn {-current $data}]
+ set cerr [catch {error_check_good curs_close \
+ [$dbc close] 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error case so no change to data state
+ }
+ get00 {
+ incr gets
+ set k [random_key]
+ set val [eval {$db get} $txn {$k}]
+ set data [pad_data $method [lindex [lindex $val 0] 1]]
+ if { $data == $a_keys($k) } {
+ set ret 0
+ } else {
+ set ret "FAIL: Error got |$data| expected |$a_keys($k)|"
+ }
+ # Get command requires no state change
+ }
+ get01 {
+ incr bad_gets
+ set k [random_data $key_avg 1 a_keys $record_based]
+ set ret [eval {$db get} $txn {$k}]
+ # Error case so no change to data state
+ }
+ get10 {
+ incr gets
+ set dbcinfo [random_cursor $curslist]
+ if { [llength $dbcinfo] == 3 } {
+ set ret 0
+ else
+ set ret 0
+ }
+ # Get command requires no state change
+ }
+ get11 {
+ incr bad_gets
+ set k [random_key]
+ set dbc [$db cursor]
+ if { [berkdb random_int 1 2] == 1 } {
+ set dir -next
+ } else {
+ set dir -prev
+ }
+ set ret [eval {$dbc get} $txn {-next $k}]
+ set cerr [catch {error_check_good curs_close \
+ [$dbc close] 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error and get case so no change to data state
+ }
+ del00 {
+ incr dels
+ set k [random_key]
+ set ret [eval {$db del} $txn {$k}]
+ rempair $k
+ }
+ del01 {
+ incr bad_dels
+ set k [random_data $key_avg 1 a_keys $record_based]
+ set ret [eval {$db del} $txn {$k}]
+ # Error case so no change to data state
+ }
+ del10 {
+ incr dels
+ set dbcinfo [random_cursor $curslist]
+ set dbc [lindex $dbcinfo 0]
+ set ret [eval {$dbc del} $txn]
+ remdup [lindex dbcinfo 1] [lindex dbcinfo 2]
+ }
+ del11 {
+ incr bad_dels
+ set c [$db cursor]
+ set ret [eval {$c del} $txn]
+ set cerr [catch {error_check_good curs_close \
+ [$c close] 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error case so no change to data state
+ }
+ }
+ if { $err == 1 } {
+ # Verify failure.
+ set cerr [catch {error_check_good $op$dups$err:$k \
+ [is_substr Error $ret] 1} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ } else {
+ # Verify success
+ set cerr [catch {error_check_good $op$dups$err:$k $ret 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ }
+
+ flush stdout
+}
+
+# Close cursors and file
+foreach i $curslist {
+ set r [$i close]
+ set cerr [catch {error_check_good cursor_close:$i $r 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+}
+
+set r [$db close]
+set cerr [catch {error_check_good db_close:$db $r 0} cret]
+if {$cerr != 0} {
+ puts $cret
+ return
+}
+
+puts "[timestamp] [pid] Complete"
+puts "Successful ops: $adds adds $gets gets $puts puts $dels dels"
+puts "Error ops: $bad_adds adds $bad_gets gets $bad_puts puts $bad_dels dels"
+flush stdout
+
+filecheck $file $txn
+
+exit
diff --git a/libdb/test/ddoyscript.tcl b/libdb/test/ddoyscript.tcl
new file mode 100644
index 0000000..726d11c
--- /dev/null
+++ b/libdb/test/ddoyscript.tcl
@@ -0,0 +1,172 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Deadlock detector script tester.
+# Usage: ddoyscript dir lockerid numprocs
+# dir: DBHOME directory
+# lockerid: Lock id for this locker
+# numprocs: Total number of processes running
+# myid: id of this process --
+# the order that the processes are created is the same
+# in which their lockerid's were allocated so we know
+# that there is a locker age relationship that is isomorphic
+# with the order releationship of myid's.
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "ddoyscript dir lockerid numprocs oldoryoung"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set lockerid [ lindex $argv 1 ]
+set numprocs [ lindex $argv 2 ]
+set old_or_young [lindex $argv 3]
+set myid [lindex $argv 4]
+
+set myenv [berkdb_env -lock -home $dir -create -mode 0644]
+error_check_bad lock_open $myenv NULL
+error_check_good lock_open [is_substr $myenv "env"] 1
+
+# There are two cases here -- oldest/youngest or a ring locker.
+
+if { $myid == 0 || $myid == [expr $numprocs - 1] } {
+ set waitobj NULL
+ set ret 0
+
+ if { $myid == 0 } {
+ set objid 2
+ if { $old_or_young == "o" } {
+ set waitobj [expr $numprocs - 1]
+ }
+ } else {
+ if { $old_or_young == "y" } {
+ set waitobj 0
+ }
+ set objid 4
+ }
+
+ # Acquire own read lock
+ if {[catch {$myenv lock_get read $lockerid $myid} selflock] != 0} {
+ puts $errorInfo
+ } else {
+ error_check_good selfget:$objid [is_substr $selflock $myenv] 1
+ }
+
+ # Acquire read lock
+ if {[catch {$myenv lock_get read $lockerid $objid} lock1] != 0} {
+ puts $errorInfo
+ } else {
+ error_check_good lockget:$objid [is_substr $lock1 $myenv] 1
+ }
+
+ tclsleep 10
+
+ if { $waitobj == "NULL" } {
+ # Sleep for a good long while
+ tclsleep 90
+ } else {
+ # Acquire write lock
+ if {[catch {$myenv lock_get write $lockerid $waitobj} lock2]
+ != 0} {
+ puts $errorInfo
+ set ret ERROR
+ } else {
+ error_check_good lockget:$waitobj \
+ [is_substr $lock2 $myenv] 1
+
+ # Now release it
+ if {[catch {$lock2 put} err] != 0} {
+ puts $errorInfo
+ set ret ERROR
+ } else {
+ error_check_good lockput:oy:$objid $err 0
+ }
+ }
+
+ }
+
+ # Release self lock
+ if {[catch {$selflock put} err] != 0} {
+ puts $errorInfo
+ if { $ret == 0 } {
+ set ret ERROR
+ }
+ } else {
+ error_check_good selfput:oy:$myid $err 0
+ if { $ret == 0 } {
+ set ret 1
+ }
+ }
+
+ # Release first lock
+ if {[catch {$lock1 put} err] != 0} {
+ puts $errorInfo
+ if { $ret == 0 } {
+ set ret ERROR
+ }
+ } else {
+ error_check_good lockput:oy:$objid $err 0
+ if { $ret == 0 } {
+ set ret 1
+ }
+ }
+
+} else {
+ # Make sure that we succeed if we're locking the same object as
+ # oldest or youngest.
+ if { [expr $myid % 2] == 0 } {
+ set mode read
+ } else {
+ set mode write
+ }
+ # Obtain first lock (should always succeed).
+ if {[catch {$myenv lock_get $mode $lockerid $myid} lock1] != 0} {
+ puts $errorInfo
+ } else {
+ error_check_good lockget:$myid [is_substr $lock1 $myenv] 1
+ }
+
+ tclsleep 30
+
+ set nextobj [expr $myid + 1]
+ if { $nextobj == [expr $numprocs - 1] } {
+ set nextobj 1
+ }
+
+ set ret 1
+ if {[catch {$myenv lock_get write $lockerid $nextobj} lock2] != 0} {
+ if {[string match "*DEADLOCK*" $lock2] == 1} {
+ set ret DEADLOCK
+ } else {
+ set ret ERROR
+ }
+ } else {
+ error_check_good lockget:$nextobj [is_substr $lock2 $myenv] 1
+ }
+
+ # Now release the first lock
+ error_check_good lockput:$lock1 [$lock1 put] 0
+
+ if {$ret == 1} {
+ error_check_bad lockget:$nextobj $lock2 NULL
+ error_check_good lockget:$nextobj [is_substr $lock2 $myenv] 1
+ error_check_good lockput:$lock2 [$lock2 put] 0
+ }
+}
+
+puts $ret
+error_check_good lock_id_free [$myenv lock_id_free $lockerid] 0
+error_check_good envclose [$myenv close] 0
+exit
diff --git a/libdb/test/ddscript.tcl b/libdb/test/ddscript.tcl
new file mode 100644
index 0000000..3594eee
--- /dev/null
+++ b/libdb/test/ddscript.tcl
@@ -0,0 +1,44 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Deadlock detector script tester.
+# Usage: ddscript dir test lockerid objid numprocs
+# dir: DBHOME directory
+# test: Which test to run
+# lockerid: Lock id for this locker
+# objid: Object id to lock.
+# numprocs: Total number of processes running
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "ddscript dir test lockerid objid numprocs"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set tnum [ lindex $argv 1 ]
+set lockerid [ lindex $argv 2 ]
+set objid [ lindex $argv 3 ]
+set numprocs [ lindex $argv 4 ]
+
+set myenv [berkdb_env -lock -home $dir -create -mode 0644 ]
+error_check_bad lock_open $myenv NULL
+error_check_good lock_open [is_substr $myenv "env"] 1
+
+puts [eval $tnum $myenv $lockerid $objid $numprocs]
+
+error_check_good lock_id_free [$myenv lock_id_free $lockerid] 0
+error_check_good envclose [$myenv close] 0
+
+exit
diff --git a/libdb/test/dead001.tcl b/libdb/test/dead001.tcl
new file mode 100644
index 0000000..6307adf
--- /dev/null
+++ b/libdb/test/dead001.tcl
@@ -0,0 +1,88 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST dead001
+# TEST Use two different configurations to test deadlock detection among a
+# TEST variable number of processes. One configuration has the processes
+# TEST deadlocked in a ring. The other has the processes all deadlocked on
+# TEST a single resource.
+proc dead001 { { procs "2 4 10" } {tests "ring clump" } \
+ {timeout 0} {tnum "001"} } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ puts "Dead$tnum: Deadlock detector tests"
+
+ env_cleanup $testdir
+
+ # Create the environment.
+ puts "\tDead$tnum.a: creating environment"
+ set env [berkdb_env -create \
+ -mode 0644 -lock -txn_timeout $timeout -home $testdir]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ foreach t $tests {
+ foreach n $procs {
+ if {$timeout == 0 } {
+ set dpid [exec $util_path/db_deadlock -vw \
+ -h $testdir >& $testdir/dd.out &]
+ } else {
+ set dpid [exec $util_path/db_deadlock -vw \
+ -ae -h $testdir >& $testdir/dd.out &]
+ }
+
+ sentinel_init
+ set pidlist ""
+ set ret [$env lock_id_set $lock_curid $lock_maxid]
+ error_check_good lock_id_set $ret 0
+
+ # Fire off the tests
+ puts "\tDead$tnum: $n procs of test $t"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path $test_path/wrap.tcl \
+ $testdir/dead$tnum.log.$i \
+ ddscript.tcl $testdir $t $locker $i $n"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddscript.tcl $testdir/dead$tnum.log.$i \
+ $testdir $t $locker $i $n &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead$tnum.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ tclkill $dpid
+ puts "dead check..."
+ dead_check $t $n $timeout $dead $clean $other
+ }
+ }
+
+ # Windows needs files closed before deleting files, so pause a little
+ tclsleep 3
+ fileremove -f $testdir/dd.out
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead$tnum.log.$i
+ }
+ error_check_good lock_env:close [$env close] 0
+}
diff --git a/libdb/test/dead002.tcl b/libdb/test/dead002.tcl
new file mode 100644
index 0000000..d78bd4a
--- /dev/null
+++ b/libdb/test/dead002.tcl
@@ -0,0 +1,75 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST dead002
+# TEST Same test as dead001, but use "detect on every collision" instead
+# TEST of separate deadlock detector.
+proc dead002 { { procs "2 4 10" } {tests "ring clump" } \
+ {timeout 0} {tnum 002} } {
+ source ./include.tcl
+
+ puts "Dead$tnum: Deadlock detector tests"
+
+ env_cleanup $testdir
+
+ # Create the environment.
+ puts "\tDead$tnum.a: creating environment"
+ set lmode "default"
+ if { $timeout != 0 } {
+ set lmode "expire"
+ }
+ set env [berkdb_env \
+ -create -mode 0644 -home $testdir \
+ -lock -txn_timeout $timeout -lock_detect $lmode]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ foreach t $tests {
+ foreach n $procs {
+ set pidlist ""
+ sentinel_init
+
+ # Fire off the tests
+ puts "\tDead$tnum: $n procs of test $t"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path $test_path/wrap.tcl \
+ $testdir/dead$tnum.log.$i \
+ ddscript.tcl $testdir $t $locker $i $n"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddscript.tcl $testdir/dead$tnum.log.$i \
+ $testdir $t $locker $i $n &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead$tnum.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ dead_check $t $n $timeout $dead $clean $other
+ }
+ }
+
+ fileremove -f $testdir/dd.out
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead$tnum.log.$i
+ }
+ error_check_good lock_env:close [$env close] 0
+}
diff --git a/libdb/test/dead003.tcl b/libdb/test/dead003.tcl
new file mode 100644
index 0000000..14c1f18
--- /dev/null
+++ b/libdb/test/dead003.tcl
@@ -0,0 +1,98 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST dead003
+# TEST
+# TEST Same test as dead002, but explicitly specify DB_LOCK_OLDEST and
+# TEST DB_LOCK_YOUNGEST. Verify the correct lock was aborted/granted.
+proc dead003 { { procs "2 4 10" } {tests "ring clump" } } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set detects { oldest youngest }
+ puts "Dead003: Deadlock detector tests: $detects"
+
+ # Create the environment.
+ foreach d $detects {
+ env_cleanup $testdir
+ puts "\tDead003.a: creating environment for $d"
+ set env [berkdb_env \
+ -create -mode 0644 -home $testdir -lock -lock_detect $d]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ foreach t $tests {
+ foreach n $procs {
+ set pidlist ""
+ sentinel_init
+ set ret [$env lock_id_set \
+ $lock_curid $lock_maxid]
+ error_check_good lock_id_set $ret 0
+
+ # Fire off the tests
+ puts "\tDead003: $n procs of test $t"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path\
+ test_path/ddscript.tcl $testdir \
+ $t $locker $i $n >& \
+ $testdir/dead003.log.$i"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddscript.tcl \
+ $testdir/dead003.log.$i $testdir \
+ $t $locker $i $n &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead003.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ dead_check $t $n 0 $dead $clean $other
+ #
+ # If we get here we know we have the
+ # correct number of dead/clean procs, as
+ # checked by dead_check above. Now verify
+ # that the right process was the one.
+ puts "\tDead003: Verify $d locks were aborted"
+ set l ""
+ if { $d == "oldest" } {
+ set l [expr $n - 1]
+ }
+ if { $d == "youngest" } {
+ set l 0
+ }
+ set did [open $testdir/dead003.log.$l]
+ while { [gets $did val] != -1 } {
+ error_check_good check_abort \
+ $val 1
+ }
+ close $did
+ }
+ }
+
+ fileremove -f $testdir/dd.out
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead003.log.$i
+ }
+ error_check_good lock_env:close [$env close] 0
+ }
+}
diff --git a/libdb/test/dead004.tcl b/libdb/test/dead004.tcl
new file mode 100644
index 0000000..f490b4d
--- /dev/null
+++ b/libdb/test/dead004.tcl
@@ -0,0 +1,108 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Deadlock Test 4.
+# This test is designed to make sure that we handle youngest and oldest
+# deadlock detection even when the youngest and oldest transactions in the
+# system are not involved in the deadlock (that is, we want to abort the
+# youngest/oldest which is actually involved in the deadlock, not simply
+# the youngest/oldest in the system).
+# Since this is used for transaction systems, the locker ID is what we
+# use to identify age (smaller number is older).
+#
+# The set up is that we have a total of 6 processes. The oldest (locker 0)
+# and the youngest (locker 5) simply acquire a lock, hold it for a long time
+# and then release it. The rest form a ring, obtaining lock N and requesting
+# a lock on (N+1) mod 4. The deadlock detector ought to pick locker 1 or 4
+# to abort and not 0 or 5.
+
+proc dead004 { } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ foreach a { o y } {
+ puts "Dead004: Deadlock detector test -a $a"
+ env_cleanup $testdir
+
+ # Create the environment.
+ puts "\tDead004.a: creating environment"
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ set dpid [exec $util_path/db_deadlock -v -t 5 -a $a \
+ -h $testdir >& $testdir/dd.out &]
+
+ set procs 6
+
+ foreach n $procs {
+
+ sentinel_init
+ set pidlist ""
+ set ret [$env lock_id_set $lock_curid $lock_maxid]
+ error_check_good lock_id_set $ret 0
+
+ # Fire off the tests
+ puts "\tDead004: $n procs"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path $test_path/wrap.tcl \
+ $testdir/dead004.log.$i \
+ ddoyscript.tcl $testdir $locker $n $a $i"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddoyscript.tcl $testdir/dead004.log.$i \
+ $testdir $locker $n $a $i &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ }
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead004.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ tclkill $dpid
+
+ puts "dead check..."
+ dead_check oldyoung $n 0 $dead $clean $other
+
+ # Now verify that neither the oldest nor the
+ # youngest were the deadlock.
+ set did [open $testdir/dead004.log.0]
+ error_check_bad file:young [gets $did val] -1
+ error_check_good read:young $val 1
+ close $did
+
+ set did [open $testdir/dead004.log.[expr $procs - 1]]
+ error_check_bad file:old [gets $did val] -1
+ error_check_good read:old $val 1
+ close $did
+
+ # Windows needs files closed before deleting files,
+ # so pause a little
+ tclsleep 2
+ fileremove -f $testdir/dd.out
+
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead004.log.$i
+ }
+ error_check_good lock_env:close [$env close] 0
+ }
+}
diff --git a/libdb/test/dead005.tcl b/libdb/test/dead005.tcl
new file mode 100644
index 0000000..2338a77
--- /dev/null
+++ b/libdb/test/dead005.tcl
@@ -0,0 +1,87 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Deadlock Test 5.
+# Test out the minlocks, maxlocks, and minwrites options
+# to the deadlock detector.
+proc dead005 { { procs "4 6 10" } {tests "maxlocks minwrites minlocks" } } {
+ source ./include.tcl
+
+ puts "Dead005: minlocks, maxlocks, and minwrites deadlock detection tests"
+ foreach t $tests {
+ puts "Dead005.$t: creating environment"
+ env_cleanup $testdir
+
+ # Create the environment.
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+ case $t {
+ minlocks { set to n }
+ maxlocks { set to m }
+ minwrites { set to w }
+ }
+ foreach n $procs {
+ set dpid [exec $util_path/db_deadlock -vw -h $testdir \
+ -a $to >& $testdir/dd.out &]
+ sentinel_init
+ set pidlist ""
+
+ # Fire off the tests
+ puts "\tDead005: $t test with $n procs"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path $test_path/wrap.tcl \
+ $testdir/dead005.log.$i \
+ ddscript.tcl $testdir $t $locker $i $n"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddscript.tcl $testdir/dead005.log.$i \
+ $testdir $t $locker $i $n &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead005.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ tclkill $dpid
+ puts "dead check..."
+ dead_check $t $n 0 $dead $clean $other
+ # Now verify that the correct participant
+ # got deadlocked.
+ switch $t {
+ minlocks {set f 0}
+ minwrites {set f 1}
+ maxlocks {set f [expr $n - 1]}
+ }
+ set did [open $testdir/dead005.log.$f]
+ error_check_bad file:$t [gets $did val] -1
+ error_check_good read($f):$t $val DEADLOCK
+ close $did
+ }
+ error_check_good lock_env:close [$env close] 0
+ # Windows needs files closed before deleting them, so pause
+ tclsleep 2
+ fileremove -f $testdir/dd.out
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead001.log.$i
+ }
+ }
+}
diff --git a/libdb/test/dead006.tcl b/libdb/test/dead006.tcl
new file mode 100644
index 0000000..8fee8c8
--- /dev/null
+++ b/libdb/test/dead006.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST dead006
+# TEST use timeouts rather than the normal dd algorithm.
+proc dead006 { { procs "2 4 10" } {tests "ring clump" } \
+ {timeout 1000} {tnum 006} } {
+ source ./include.tcl
+
+ dead001 $procs $tests $timeout $tnum
+ dead002 $procs $tests $timeout $tnum
+}
diff --git a/libdb/test/dead007.tcl b/libdb/test/dead007.tcl
new file mode 100644
index 0000000..5e8a220
--- /dev/null
+++ b/libdb/test/dead007.tcl
@@ -0,0 +1,34 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST dead007
+# TEST use timeouts rather than the normal dd algorithm.
+proc dead007 { } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set save_curid $lock_curid
+ set save_maxid $lock_maxid
+ puts "Dead007.a -- wrap around"
+ set lock_curid [expr $lock_maxid - 2]
+ dead001 "2 10"
+ ## Oldest/youngest breaks when the id wraps
+ # dead003 "4 10"
+ dead004
+
+ puts "Dead007.b -- extend space"
+ set lock_maxid [expr $lock_maxid - 3]
+ set lock_curid [expr $lock_maxid - 1]
+ dead001 "4 10"
+ ## Oldest/youngest breaks when the id wraps
+ # dead003 "10"
+ dead004
+
+ set lock_curid $save_curid
+ set lock_maxid $save_maxid
+}
diff --git a/libdb/test/env001.tcl b/libdb/test/env001.tcl
new file mode 100644
index 0000000..59cb616
--- /dev/null
+++ b/libdb/test/env001.tcl
@@ -0,0 +1,154 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST env001
+# TEST Test of env remove interface (formerly env_remove).
+proc env001 { } {
+ global errorInfo
+ global errorCode
+
+ source ./include.tcl
+
+ set testfile $testdir/env.db
+ set t1 $testdir/t1
+
+ puts "Env001: Test of environment remove interface."
+ env_cleanup $testdir
+
+ # Try opening without Create flag should error
+ puts "\tEnv001.a: Open without create (should fail)."
+ catch {set env [berkdb_env_noerr -home $testdir]} ret
+ error_check_good env:fail [is_substr $ret "no such file"] 1
+
+ # Now try opening with create
+ puts "\tEnv001.b: Open with create."
+ set env [berkdb_env -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+
+ # Make sure that close works.
+ puts "\tEnv001.c: Verify close."
+ error_check_good env:close:$env [$env close] 0
+
+ # Make sure we can reopen -- this doesn't work on Windows
+ # because if there is only one opener, the region disappears
+ # when it is closed. We can't do a second opener, because
+ # that will fail on HP-UX.
+ puts "\tEnv001.d: Remove on closed environments."
+ if { $is_windows_test != 1 } {
+ puts "\t\tEnv001.d.1: Verify re-open."
+ set env [berkdb_env -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+
+ # remove environment
+ puts "\t\tEnv001.d.2: Close environment."
+ error_check_good env:close [$env close] 0
+ puts "\t\tEnv001.d.3: Try remove with force (should succeed)."
+ error_check_good \
+ envremove [berkdb envremove -force -home $testdir] 0
+ }
+
+ if { $is_windows_test != 1 && $is_hp_test != 1 } {
+ puts "\tEnv001.e: Remove on open environments."
+ puts "\t\tEnv001.e.1: Env is open by single proc,\
+ remove no force."
+ set env [berkdb_env -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+ set stat [catch {berkdb envremove -home $testdir} ret]
+ error_check_good env:remove $stat 1
+ error_check_good env:close [$env close] 0
+ }
+
+ puts \
+ "\t\tEnv001.e.2: Env is open by single proc, remove with force."
+ # Now that envremove doesn't do a close, this won't work on Windows.
+ if { $is_windows_test != 1 && $is_hp_test != 1} {
+ set env [berkdb_env_noerr -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+ set stat [catch {berkdb envremove -force -home $testdir} ret]
+ error_check_good env:remove(force) $ret 0
+ #
+ # Even though the underlying env is gone, we need to close
+ # the handle.
+ #
+ set stat [catch {$env close} ret]
+ error_check_bad env:close_after_remove $stat 0
+ error_check_good env:close_after_remove \
+ [is_substr $ret "recovery"] 1
+ }
+
+ puts "\t\tEnv001.e.3: Env is open by 2 procs, remove no force."
+ # should fail
+ set env [berkdb_env -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 "berkdb_env_noerr -home $testdir"]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+ # First close our env, but leave remote open
+ error_check_good env:close [$env close] 0
+ catch {berkdb envremove -home $testdir} ret
+ error_check_good envremove:2procs:noforce [is_substr $errorCode EBUSY] 1
+ #
+ # even though it failed, $env is no longer valid, so remove it in
+ # the remote process
+ set remote_close [send_cmd $f1 "$remote_env close"]
+ error_check_good remote_close $remote_close 0
+
+ # exit remote process
+ set err [catch { close $f1 } result]
+ error_check_good close_remote_process $err 0
+
+ puts "\t\tEnv001.e.4: Env is open by 2 procs, remove with force."
+ # You cannot do this on windows because you can't remove files that
+ # are open, so we skip this test for Windows. On UNIX, it should
+ # succeed
+ if { $is_windows_test != 1 && $is_hp_test != 1 } {
+ set env [berkdb_env_noerr -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 "berkdb_env -home $testdir"]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ catch {berkdb envremove -force -home $testdir} ret
+ error_check_good envremove:2procs:force $ret 0
+ #
+ # We still need to close our handle.
+ #
+ set stat [catch {$env close} ret]
+ error_check_bad env:close_after_error $stat 0
+ error_check_good env:close_after_error \
+ [is_substr $ret recovery] 1
+
+ # Close down remote process
+ set err [catch { close $f1 } result]
+ error_check_good close_remote_process $err 0
+ }
+
+ # Try opening in a different dir
+ puts "\tEnv001.f: Try opening env in another directory."
+ if { [file exists $testdir/NEWDIR] != 1 } {
+ file mkdir $testdir/NEWDIR
+ }
+ set eflags "-create -home $testdir/NEWDIR -mode 0644"
+ set env [eval {berkdb_env} $eflags]
+ error_check_bad env:open $env NULL
+ error_check_good env:close [$env close] 0
+ error_check_good berkdb:envremove \
+ [berkdb envremove -home $testdir/NEWDIR] 0
+
+ puts "\tEnv001 complete."
+}
diff --git a/libdb/test/env002.tcl b/libdb/test/env002.tcl
new file mode 100644
index 0000000..7675091
--- /dev/null
+++ b/libdb/test/env002.tcl
@@ -0,0 +1,156 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST env002
+# TEST Test of DB_LOG_DIR and env name resolution.
+# TEST With an environment path specified using -home, and then again
+# TEST with it specified by the environment variable DB_HOME:
+# TEST 1) Make sure that the set_lg_dir option is respected
+# TEST a) as a relative pathname.
+# TEST b) as an absolute pathname.
+# TEST 2) Make sure that the DB_LOG_DIR db_config argument is respected,
+# TEST again as relative and absolute pathnames.
+# TEST 3) Make sure that if -both- db_config and a file are present,
+# TEST only the file is respected (see doc/env/naming.html).
+proc env002 { } {
+ # env002 is essentially just a small driver that runs
+ # env002_body--formerly the entire test--twice; once, it
+ # supplies a "home" argument to use with environment opens,
+ # and the second time it sets DB_HOME instead.
+ # Note that env002_body itself calls env002_run_test to run
+ # the body of the actual test and check for the presence
+ # of logs. The nesting, I hope, makes this test's structure simpler.
+
+ global env
+ source ./include.tcl
+
+ puts "Env002: set_lg_dir test."
+
+ puts "\tEnv002: Running with -home argument to berkdb_env."
+ env002_body "-home $testdir"
+
+ puts "\tEnv002: Running with environment variable DB_HOME set."
+ set env(DB_HOME) $testdir
+ env002_body "-use_environ"
+
+ unset env(DB_HOME)
+
+ puts "\tEnv002: Running with both DB_HOME and -home set."
+ # Should respect -only- -home, so we give it a bogus
+ # environment variable setting.
+ set env(DB_HOME) $testdir/bogus_home
+ env002_body "-use_environ -home $testdir"
+ unset env(DB_HOME)
+
+}
+
+proc env002_body { home_arg } {
+ source ./include.tcl
+
+ env_cleanup $testdir
+ set logdir "logs_in_here"
+
+ file mkdir $testdir/$logdir
+
+ # Set up full path to $logdir for when we test absolute paths.
+ set curdir [pwd]
+ cd $testdir/$logdir
+ set fulllogdir [pwd]
+ cd $curdir
+
+ env002_make_config $logdir
+
+ # Run the meat of the test.
+ env002_run_test a 1 "relative path, config file" $home_arg \
+ $testdir/$logdir
+
+ env_cleanup $testdir
+
+ file mkdir $fulllogdir
+ env002_make_config $fulllogdir
+
+ # Run the test again
+ env002_run_test a 2 "absolute path, config file" $home_arg \
+ $fulllogdir
+
+ env_cleanup $testdir
+
+ # Now we try without a config file, but instead with db_config
+ # relative paths
+ file mkdir $testdir/$logdir
+ env002_run_test b 1 "relative path, db_config" "$home_arg \
+ -log_dir $logdir -data_dir ." \
+ $testdir/$logdir
+
+ env_cleanup $testdir
+
+ # absolute
+ file mkdir $fulllogdir
+ env002_run_test b 2 "absolute path, db_config" "$home_arg \
+ -log_dir $fulllogdir -data_dir ." \
+ $fulllogdir
+
+ env_cleanup $testdir
+
+ # Now, set db_config -and- have a # DB_CONFIG file, and make
+ # sure only the latter is honored.
+
+ file mkdir $testdir/$logdir
+ env002_make_config $logdir
+
+ # note that we supply a -nonexistent- log dir to db_config
+ env002_run_test c 1 "relative path, both db_config and file" \
+ "$home_arg -log_dir $testdir/bogus \
+ -data_dir ." $testdir/$logdir
+ env_cleanup $testdir
+
+ file mkdir $fulllogdir
+ env002_make_config $fulllogdir
+
+ # note that we supply a -nonexistent- log dir to db_config
+ env002_run_test c 2 "relative path, both db_config and file" \
+ "$home_arg -log_dir $fulllogdir/bogus \
+ -data_dir ." $fulllogdir
+}
+
+proc env002_run_test { major minor msg env_args log_path} {
+ global testdir
+ set testfile "env002.db"
+
+ puts "\t\tEnv002.$major.$minor: $msg"
+
+ # Create an environment, with logging, and scribble some
+ # stuff in a [btree] database in it.
+ # puts [concat {berkdb_env -create -log -private} $env_args]
+ set dbenv [eval {berkdb_env -create -log -private} $env_args]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+ set db [berkdb_open -env $dbenv -create -btree -mode 0644 $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set key "some_key"
+ set data "some_data"
+
+ error_check_good db_put \
+ [$db put $key [chop_data btree $data]] 0
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+
+ # Now make sure the log file is where we want it to be.
+ error_check_good db_exists [file exists $testdir/$testfile] 1
+ error_check_good log_exists \
+ [file exists $log_path/log.0000000001] 1
+}
+
+proc env002_make_config { logdir } {
+ global testdir
+
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "set_data_dir ."
+ puts $cid "set_lg_dir $logdir"
+ close $cid
+}
diff --git a/libdb/test/env003.tcl b/libdb/test/env003.tcl
new file mode 100644
index 0000000..422ddf9
--- /dev/null
+++ b/libdb/test/env003.tcl
@@ -0,0 +1,149 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST env003
+# TEST Test DB_TMP_DIR and env name resolution
+# TEST With an environment path specified using -home, and then again
+# TEST with it specified by the environment variable DB_HOME:
+# TEST 1) Make sure that the DB_TMP_DIR config file option is respected
+# TEST a) as a relative pathname.
+# TEST b) as an absolute pathname.
+# TEST 2) Make sure that the -tmp_dir config option is respected,
+# TEST again as relative and absolute pathnames.
+# TEST 3) Make sure that if -both- -tmp_dir and a file are present,
+# TEST only the file is respected (see doc/env/naming.html).
+proc env003 { } {
+ # env003 is essentially just a small driver that runs
+ # env003_body twice. First, it supplies a "home" argument
+ # to use with environment opens, and the second time it sets
+ # DB_HOME instead.
+ # Note that env003_body itself calls env003_run_test to run
+ # the body of the actual test.
+
+ global env
+ source ./include.tcl
+
+ puts "Env003: DB_TMP_DIR test."
+
+ puts "\tEnv003: Running with -home argument to berkdb_env."
+ env003_body "-home $testdir"
+
+ puts "\tEnv003: Running with environment variable DB_HOME set."
+ set env(DB_HOME) $testdir
+ env003_body "-use_environ"
+
+ unset env(DB_HOME)
+
+ puts "\tEnv003: Running with both DB_HOME and -home set."
+ # Should respect -only- -home, so we give it a bogus
+ # environment variable setting.
+ set env(DB_HOME) $testdir/bogus_home
+ env003_body "-use_environ -home $testdir"
+ unset env(DB_HOME)
+}
+
+proc env003_body { home_arg } {
+ source ./include.tcl
+
+ env_cleanup $testdir
+ set tmpdir "tmpfiles_in_here"
+ file mkdir $testdir/$tmpdir
+
+ # Set up full path to $tmpdir for when we test absolute paths.
+ set curdir [pwd]
+ cd $testdir/$tmpdir
+ set fulltmpdir [pwd]
+ cd $curdir
+
+ # Create DB_CONFIG
+ env003_make_config $tmpdir
+
+ # Run the meat of the test.
+ env003_run_test a 1 "relative path, config file" $home_arg \
+ $testdir/$tmpdir
+
+ env003_make_config $fulltmpdir
+
+ # Run the test again
+ env003_run_test a 2 "absolute path, config file" $home_arg \
+ $fulltmpdir
+
+ # Now we try without a config file, but instead with db_config
+ # relative paths
+ env003_run_test b 1 "relative path, db_config" "$home_arg \
+ -tmp_dir $tmpdir -data_dir ." \
+ $testdir/$tmpdir
+
+ # absolute paths
+ env003_run_test b 2 "absolute path, db_config" "$home_arg \
+ -tmp_dir $fulltmpdir -data_dir ." \
+ $fulltmpdir
+
+ # Now, set db_config -and- have a # DB_CONFIG file, and make
+ # sure only the latter is honored.
+
+ file mkdir $testdir/bogus
+ env003_make_config $tmpdir
+
+ env003_run_test c 1 "relative path, both db_config and file" \
+ "$home_arg -tmp_dir $testdir/bogus -data_dir ." \
+ $testdir/$tmpdir
+
+ file mkdir $fulltmpdir/bogus
+ env003_make_config $fulltmpdir
+
+ env003_run_test c 2 "absolute path, both db_config and file" \
+ "$home_arg -tmp_dir $fulltmpdir/bogus -data_dir ." \
+ $fulltmpdir
+}
+
+proc env003_run_test { major minor msg env_args tmp_path} {
+ global testdir
+ global alphabet
+ global errorCode
+
+ puts "\t\tEnv003.$major.$minor: $msg"
+
+ # Create an environment and small-cached in-memory database to
+ # use.
+ set dbenv [eval {berkdb_env -create -home $testdir} $env_args \
+ {-cachesize {0 50000 1}}]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ set db [berkdb_open -env $dbenv -create -btree]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Fill the database with more than its cache can fit.
+ #
+ # When CONFIG_TEST is defined, the tempfile is left linked so
+ # we can check for its existence. Size the data to overfill
+ # the cache--the temp file is created lazily, so it is created
+ # when the cache overflows.
+ #
+ set key "key"
+ set data [repeat $alphabet 2000]
+ error_check_good db_put [$db put $key $data] 0
+
+ # Check for exactly one temp file.
+ set ret [glob -nocomplain $tmp_path/BDB*]
+ error_check_good temp_file_exists [llength $ret] 1
+
+ # Can't remove temp file until db is closed on Windows.
+ error_check_good db_close [$db close] 0
+ fileremove -f $ret
+ error_check_good env_close [$dbenv close] 0
+
+}
+
+proc env003_make_config { tmpdir } {
+ global testdir
+
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "set_data_dir ."
+ puts $cid "set_tmp_dir $tmpdir"
+ close $cid
+}
diff --git a/libdb/test/env004.tcl b/libdb/test/env004.tcl
new file mode 100644
index 0000000..d401548
--- /dev/null
+++ b/libdb/test/env004.tcl
@@ -0,0 +1,103 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST env004
+# TEST Test multiple data directories. Do a bunch of different opens
+# TEST to make sure that the files are detected in different directories.
+proc env004 { } {
+ source ./include.tcl
+
+ set method "hash"
+ set omethod [convert_method $method]
+ set args [convert_args $method ""]
+
+ puts "Env004: Multiple data directory test."
+
+ env_cleanup $testdir
+ file mkdir $testdir/data1
+ file mkdir $testdir/data2
+ file mkdir $testdir/data3
+
+ puts "\tEnv004.a: Multiple data directories in DB_CONFIG file"
+
+ # Create a config file
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "set_data_dir ."
+ puts $cid "set_data_dir data1"
+ puts $cid "set_data_dir data2"
+ puts $cid "set_data_dir data3"
+ close $cid
+
+ # Now get pathnames
+ set curdir [pwd]
+ cd $testdir
+ set fulldir [pwd]
+ cd $curdir
+
+ set e [berkdb_env -create -private -home $testdir]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ ddir_test $fulldir $method $e $args
+ error_check_good env_close [$e close] 0
+
+ puts "\tEnv004.b: Multiple data directories in berkdb_env call."
+ env_cleanup $testdir
+ file mkdir $testdir/data1
+ file mkdir $testdir/data2
+ file mkdir $testdir/data3
+
+ # Now call dbenv with config specified
+ set e [berkdb_env -create -private \
+ -data_dir . -data_dir data1 -data_dir data2 \
+ -data_dir data3 -home $testdir]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ ddir_test $fulldir $method $e $args
+ error_check_good env_close [$e close] 0
+
+ env_cleanup $testdir
+}
+
+proc ddir_test { fulldir method e args } {
+ source ./include.tcl
+
+ set args [convert_args $args]
+ set omethod [convert_method $method]
+
+ # Now create one file in each directory
+ set db1 [eval {berkdb_open -create \
+ -truncate -mode 0644 $omethod -env $e} $args {data1/datafile1.db}]
+ error_check_good dbopen1 [is_valid_db $db1] TRUE
+
+ set db2 [eval {berkdb_open -create \
+ -truncate -mode 0644 $omethod -env $e} $args {data2/datafile2.db}]
+ error_check_good dbopen2 [is_valid_db $db2] TRUE
+
+ set db3 [eval {berkdb_open -create \
+ -truncate -mode 0644 $omethod -env $e} $args {data3/datafile3.db}]
+ error_check_good dbopen3 [is_valid_db $db3] TRUE
+
+ # Close the files
+ error_check_good db_close1 [$db1 close] 0
+ error_check_good db_close2 [$db2 close] 0
+ error_check_good db_close3 [$db3 close] 0
+
+ # Now, reopen the files without complete pathnames and make
+ # sure that we find them.
+
+ set db1 [berkdb_open -env $e $fulldir/data1/datafile1.db]
+ error_check_good dbopen1 [is_valid_db $db1] TRUE
+
+ set db2 [berkdb_open -env $e $fulldir/data2/datafile2.db]
+ error_check_good dbopen2 [is_valid_db $db2] TRUE
+
+ set db3 [berkdb_open -env $e $fulldir/data3/datafile3.db]
+ error_check_good dbopen3 [is_valid_db $db3] TRUE
+
+ # Finally close all the files
+ error_check_good db_close1 [$db1 close] 0
+ error_check_good db_close2 [$db2 close] 0
+ error_check_good db_close3 [$db3 close] 0
+}
diff --git a/libdb/test/env005.tcl b/libdb/test/env005.tcl
new file mode 100644
index 0000000..539a983
--- /dev/null
+++ b/libdb/test/env005.tcl
@@ -0,0 +1,53 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST env005
+# TEST Test that using subsystems without initializing them correctly
+# TEST returns an error. Cannot test mpool, because it is assumed in
+# TEST the Tcl code.
+proc env005 { } {
+ source ./include.tcl
+
+ puts "Env005: Uninitialized env subsystems test."
+
+ env_cleanup $testdir
+ puts "\tEnv005.a: Creating env with no subsystems."
+
+ set e [berkdb_env_noerr -create -home $testdir]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ set db [berkdb_open -create -btree $testdir/env005.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set rlist {
+ { "lock_detect" "Env005.b0"}
+ { "lock_get read 1 1" "Env005.b1"}
+ { "lock_id" "Env005.b2"}
+ { "lock_stat" "Env005.b3"}
+ { "lock_timeout 100" "Env005.b4"}
+ { "log_archive" "Env005.c0"}
+ { "log_cursor" "Env005.c1"}
+ { "log_file {1 1}" "Env005.c2"}
+ { "log_flush" "Env005.c3"}
+ { "log_put record" "Env005.c4"}
+ { "log_stat" "Env005.c5"}
+ { "txn" "Env005.d0"}
+ { "txn_checkpoint" "Env005.d1"}
+ { "txn_stat" "Env005.d2"}
+ { "txn_timeout 100" "Env005.d3"}
+ }
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ puts "\t$msg: $cmd"
+ set stat [catch {eval $e $cmd} ret]
+ error_check_good $cmd $stat 1
+ error_check_good $cmd.err [is_substr $ret invalid] 1
+ }
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$e close] 0
+}
diff --git a/libdb/test/env006.tcl b/libdb/test/env006.tcl
new file mode 100644
index 0000000..290501d
--- /dev/null
+++ b/libdb/test/env006.tcl
@@ -0,0 +1,42 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST env006
+# TEST Make sure that all the utilities exist and run.
+proc env006 { } {
+ source ./include.tcl
+
+ puts "Env006: Run underlying utilities."
+
+ set rlist {
+ { "db_archive" "Env006.a"}
+ { "db_checkpoint" "Env006.b"}
+ { "db_deadlock" "Env006.c"}
+ { "db_dump" "Env006.d"}
+ { "db_load" "Env006.e"}
+ { "db_printlog" "Env006.f"}
+ { "db_recover" "Env006.g"}
+ { "db_stat" "Env006.h"}
+ { "db_upgrade" "Env006.h"}
+ { "db_verify" "Env006.h"}
+ }
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+
+ puts "\t$msg: $cmd"
+
+ set stat [catch {exec $util_path/$cmd -?} ret]
+ error_check_good $cmd $stat 1
+
+ #
+ # Check for "usage", but only check "sage" so that
+ # we can handle either Usage or usage.
+ #
+ error_check_good $cmd.err [is_substr $ret sage] 1
+ }
+}
diff --git a/libdb/test/env007.tcl b/libdb/test/env007.tcl
new file mode 100644
index 0000000..37410e2
--- /dev/null
+++ b/libdb/test/env007.tcl
@@ -0,0 +1,223 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST env007
+# TEST Test various DB_CONFIG config file options.
+# TEST 1) Make sure command line option is respected
+# TEST 2) Make sure that config file option is respected
+# TEST 3) Make sure that if -both- DB_CONFIG and the set_<whatever>
+# TEST method is used, only the file is respected.
+# TEST Then test all known config options.
+proc env007 { } {
+ global errorInfo
+
+ # env007 is essentially just a small driver that runs
+ # env007_body twice. First, it supplies a "set" argument
+ # to use with environment opens, and the second time it sets
+ # DB_CONFIG instead.
+ # Note that env007_body itself calls env007_run_test to run
+ # the body of the actual test.
+
+ source ./include.tcl
+
+ puts "Env007: DB_CONFIG test."
+
+ #
+ # Test only those options we can easily check via stat
+ #
+ set rlist {
+ { " -txn_max " "set_tx_max" "19" "31" "Env007.a: Txn Max"
+ "txn_stat" "Max Txns"}
+ { " -lock_max_locks " "set_lk_max_locks" "17" "29" "Env007.b: Lock Max"
+ "lock_stat" "Maximum locks"}
+ { " -lock_max_lockers " "set_lk_max_lockers" "1500" "2000"
+ "Env007.c: Max Lockers" "lock_stat" "Maximum lockers"}
+ { " -lock_max_objects " "set_lk_max_objects" "1500" "2000"
+ "Env007.d: Max Objects" "lock_stat" "Maximum objects"}
+ { " -log_buffer " "set_lg_bsize" "65536" "131072" "Env007.e: Log Bsize"
+ "log_stat" "Log record cache size"}
+ { " -log_max " "set_lg_max" "8388608" "9437184" "Env007.f: Log Max"
+ "log_stat" "Current log file size"}
+ }
+
+ set e "berkdb_env -create -mode 0644 -home $testdir -log -lock -txn "
+ foreach item $rlist {
+ set envarg [lindex $item 0]
+ set configarg [lindex $item 1]
+ set envval [lindex $item 2]
+ set configval [lindex $item 3]
+ set msg [lindex $item 4]
+ set statcmd [lindex $item 5]
+ set statstr [lindex $item 6]
+
+ env_cleanup $testdir
+ # First verify using just env args
+ puts "\t$msg Environment argument only"
+ set env [eval $e $envarg $envval]
+ error_check_good envopen:0 [is_valid_env $env] TRUE
+ env007_check $env $statcmd $statstr $envval
+ error_check_good envclose:0 [$env close] 0
+
+ env_cleanup $testdir
+ env007_make_config $configarg $configval
+
+ # verify using just config file
+ puts "\t$msg Config file only"
+ set env [eval $e]
+ error_check_good envopen:1 [is_valid_env $env] TRUE
+ env007_check $env $statcmd $statstr $configval
+ error_check_good envclose:1 [$env close] 0
+
+ # First verify using just env args
+ puts "\t$msg Environment arg and config file"
+ set env [eval $e $envarg $envval]
+ error_check_good envopen:2 [is_valid_env $env] TRUE
+ env007_check $env $statcmd $statstr $configval
+ error_check_good envclose:2 [$env close] 0
+ }
+
+ #
+ # Test all options. For all config options, write it out
+ # to the file and make sure we can open the env. We cannot
+ # necessarily check via stat that it worked but this execs
+ # the config file code itself.
+ #
+ set cfglist {
+ { "set_cachesize" "0 1048576 0" }
+ { "set_data_dir" "." }
+ { "set_flags" "db_cdb_alldb" }
+ { "set_flags" "db_direct_db" }
+ { "set_flags" "db_direct_log" }
+ { "set_flags" "db_nolocking" }
+ { "set_flags" "db_nommap" }
+ { "set_flags" "db_nopanic" }
+ { "set_flags" "db_overwrite" }
+ { "set_flags" "db_region_init" }
+ { "set_flags" "db_txn_nosync" }
+ { "set_flags" "db_txn_write_nosync" }
+ { "set_flags" "db_yieldcpu" }
+ { "set_lg_bsize" "65536" }
+ { "set_lg_dir" "." }
+ { "set_lg_max" "8388608" }
+ { "set_lg_regionmax" "65536" }
+ { "set_lk_detect" "db_lock_default" }
+ { "set_lk_detect" "db_lock_expire" }
+ { "set_lk_detect" "db_lock_maxlocks" }
+ { "set_lk_detect" "db_lock_minlocks" }
+ { "set_lk_detect" "db_lock_minwrite" }
+ { "set_lk_detect" "db_lock_oldest" }
+ { "set_lk_detect" "db_lock_random" }
+ { "set_lk_detect" "db_lock_youngest" }
+ { "set_lk_max" "50" }
+ { "set_lk_max_lockers" "1500" }
+ { "set_lk_max_locks" "29" }
+ { "set_lk_max_objects" "1500" }
+ { "set_lock_timeout" "100" }
+ { "set_mp_mmapsize" "12582912" }
+ { "set_region_init" "1" }
+ { "set_shm_key" "15" }
+ { "set_tas_spins" "15" }
+ { "set_tmp_dir" "." }
+ { "set_tx_max" "31" }
+ { "set_txn_timeout" "100" }
+ { "set_verbose" "db_verb_chkpoint" }
+ { "set_verbose" "db_verb_deadlock" }
+ { "set_verbose" "db_verb_recovery" }
+ { "set_verbose" "db_verb_waitsfor" }
+ }
+
+ puts "\tEnv007.g: Config file settings"
+ set e "berkdb_env -create -mode 0644 -home $testdir -log -lock -txn "
+ foreach item $cfglist {
+ env_cleanup $testdir
+ set configarg [lindex $item 0]
+ set configval [lindex $item 1]
+
+ env007_make_config $configarg $configval
+
+ # verify using just config file
+ puts "\t\t $configarg $configval"
+ set env [eval $e]
+ error_check_good envvalid:1 [is_valid_env $env] TRUE
+ error_check_good envclose:1 [$env close] 0
+ }
+
+ set cfglist {
+ { "set_cachesize" "1048576" }
+ { "set_flags" "db_xxx" }
+ { "set_flags" "1" }
+ { "set_flags" "db_txn_nosync x" }
+ { "set_lg_bsize" "db_xxx" }
+ { "set_lg_max" "db_xxx" }
+ { "set_lg_regionmax" "db_xxx" }
+ { "set_lk_detect" "db_xxx" }
+ { "set_lk_detect" "1" }
+ { "set_lk_detect" "db_lock_youngest x" }
+ { "set_lk_max" "db_xxx" }
+ { "set_lk_max_locks" "db_xxx" }
+ { "set_lk_max_lockers" "db_xxx" }
+ { "set_lk_max_objects" "db_xxx" }
+ { "set_mp_mmapsize" "db_xxx" }
+ { "set_region_init" "db_xxx" }
+ { "set_shm_key" "db_xxx" }
+ { "set_tas_spins" "db_xxx" }
+ { "set_tx_max" "db_xxx" }
+ { "set_verbose" "db_xxx" }
+ { "set_verbose" "1" }
+ { "set_verbose" "db_verb_recovery x" }
+ }
+ puts "\tEnv007.h: Config value errors"
+ set e "berkdb_env_noerr -create -mode 0644 \
+ -home $testdir -log -lock -txn "
+ foreach item $cfglist {
+ set configarg [lindex $item 0]
+ set configval [lindex $item 1]
+
+ env007_make_config $configarg $configval
+
+ # verify using just config file
+ puts "\t\t $configarg $configval"
+ set stat [catch {eval $e} ret]
+ error_check_good envopen $stat 1
+ error_check_good error [is_substr $errorInfo \
+ "incorrect arguments for name-value pair"] 1
+ }
+
+ puts "\tEnv007.i: Config name error set_xxx"
+ set e "berkdb_env_noerr -create -mode 0644 \
+ -home $testdir -log -lock -txn "
+ env007_make_config "set_xxx" 1
+ set stat [catch {eval $e} ret]
+ error_check_good envopen $stat 1
+ error_check_good error [is_substr $errorInfo \
+ "unrecognized name-value pair"] 1
+}
+
+proc env007_check { env statcmd statstr testval } {
+ set stat [$env $statcmd]
+ set checked 0
+ foreach statpair $stat {
+ if {$checked == 1} {
+ break
+ }
+ set statmsg [lindex $statpair 0]
+ set statval [lindex $statpair 1]
+ if {[is_substr $statmsg $statstr] != 0} {
+ set checked 1
+ error_check_good $statstr:ck $statval $testval
+ }
+ }
+ error_check_good $statstr:test $checked 1
+}
+
+proc env007_make_config { carg cval } {
+ global testdir
+
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "$carg $cval"
+ close $cid
+}
diff --git a/libdb/test/env008.tcl b/libdb/test/env008.tcl
new file mode 100644
index 0000000..9affa72
--- /dev/null
+++ b/libdb/test/env008.tcl
@@ -0,0 +1,73 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST env008
+# TEST Test environments and subdirectories.
+proc env008 { } {
+ global errorInfo
+ global errorCode
+
+ source ./include.tcl
+
+ env_cleanup $testdir
+
+ set subdir 1/1
+ set subdir1 1/2
+ file mkdir $testdir/$subdir $testdir/$subdir1
+ set testfile $subdir/env.db
+
+ puts "Env008: Test of environments and subdirectories."
+
+ puts "\tEnv008.a: Create env and db."
+ set env [berkdb_env -create -mode 0644 -home $testdir -txn]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tEnv008.b: Remove db in subdir."
+ env008_db $env $testfile
+ error_check_good dbremove:$testfile \
+ [berkdb dbremove -env $env $testfile] 0
+
+ #
+ # Rather than remaking the db every time for the renames
+ # just move around the new file name to another new file
+ # name.
+ #
+ puts "\tEnv008.c: Rename db in subdir."
+ env008_db $env $testfile
+ set newfile $subdir/new.db
+ error_check_good dbrename:$testfile/.. \
+ [berkdb dbrename -env $env $testfile $newfile] 0
+ set testfile $newfile
+
+ puts "\tEnv008.d: Rename db to parent dir."
+ set newfile $subdir/../new.db
+ error_check_good dbrename:$testfile/.. \
+ [berkdb dbrename -env $env $testfile $newfile] 0
+ set testfile $newfile
+
+ puts "\tEnv008.e: Rename db to child dir."
+ set newfile $subdir/env.db
+ error_check_good dbrename:$testfile/.. \
+ [berkdb dbrename -env $env $testfile $newfile] 0
+ set testfile $newfile
+
+ puts "\tEnv008.f: Rename db to another dir."
+ set newfile $subdir1/env.db
+ error_check_good dbrename:$testfile/.. \
+ [berkdb dbrename -env $env $testfile $newfile] 0
+
+ error_check_good envclose [$env close] 0
+ puts "\tEnv008 complete."
+}
+
+proc env008_db { env testfile } {
+ set db [berkdb_open -env $env -create -btree $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db put key data]
+ error_check_good dbput $ret 0
+ error_check_good dbclose [$db close] 0
+}
diff --git a/libdb/test/env009.tcl b/libdb/test/env009.tcl
new file mode 100644
index 0000000..0ffca77
--- /dev/null
+++ b/libdb/test/env009.tcl
@@ -0,0 +1,57 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST env009
+# TEST Test calls to all the various stat functions. We have several
+# TEST sprinkled throughout the test suite, but this will ensure that
+# TEST we run all of them at least once.
+proc env009 { } {
+ source ./include.tcl
+
+ puts "Env009: Various stat function test."
+
+ env_cleanup $testdir
+ puts "\tEnv009.a: Setting up env and a database."
+
+ set e [berkdb_env -create -home $testdir -txn]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ set dbbt [berkdb_open -create -btree $testdir/env009bt.db]
+ error_check_good dbopen [is_valid_db $dbbt] TRUE
+ set dbh [berkdb_open -create -hash $testdir/env009h.db]
+ error_check_good dbopen [is_valid_db $dbh] TRUE
+ set dbq [berkdb_open -create -btree $testdir/env009q.db]
+ error_check_good dbopen [is_valid_db $dbq] TRUE
+
+ set rlist {
+ { "lock_stat" "Maximum locks" "Env009.b"}
+ { "log_stat" "Magic" "Env009.c"}
+ { "mpool_stat" "Number of caches" "Env009.d"}
+ { "txn_stat" "Max Txns" "Env009.e"}
+ }
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set str [lindex $pair 1]
+ set msg [lindex $pair 2]
+ puts "\t$msg: $cmd"
+ set ret [$e $cmd]
+ error_check_good $cmd [is_substr $ret $str] 1
+ }
+ puts "\tEnv009.f: btree stats"
+ set ret [$dbbt stat]
+ error_check_good $cmd [is_substr $ret "Magic"] 1
+ puts "\tEnv009.g: hash stats"
+ set ret [$dbh stat]
+ error_check_good $cmd [is_substr $ret "Magic"] 1
+ puts "\tEnv009.f: queue stats"
+ set ret [$dbq stat]
+ error_check_good $cmd [is_substr $ret "Magic"] 1
+ error_check_good dbclose [$dbbt close] 0
+ error_check_good dbclose [$dbh close] 0
+ error_check_good dbclose [$dbq close] 0
+ error_check_good envclose [$e close] 0
+}
diff --git a/libdb/test/env010.tcl b/libdb/test/env010.tcl
new file mode 100644
index 0000000..870b5ae
--- /dev/null
+++ b/libdb/test/env010.tcl
@@ -0,0 +1,49 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST env010
+# TEST Run recovery in an empty directory, and then make sure we can still
+# TEST create a database in that directory.
+proc env010 { } {
+ source ./include.tcl
+
+ puts "Env010: Test of recovery in an empty directory."
+
+ # Create a new directory used only for this test
+
+ if { [file exists $testdir/EMPTYDIR] != 1 } {
+ file mkdir $testdir/EMPTYDIR
+ } else {
+ puts "\nDirectory already exists."
+ }
+
+ # Do the test twice, for regular recovery and catastrophic
+ # Open environment and recover, but don't create a database
+
+ foreach rmethod {recover recover_fatal} {
+
+ puts "\tEnv010: Creating env for $rmethod test."
+ env_cleanup $testdir/EMPTYDIR
+ set e [berkdb_env -create -home $testdir/EMPTYDIR -$rmethod]
+ error_check_good dbenv [is_valid_env $e] TRUE
+
+ # Open and close a database
+ # The method doesn't matter, so picked btree arbitrarily
+
+ set db [eval {berkdb_open -env $e \
+ -btree -create -mode 0644} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ # Close environment
+
+ error_check_good envclose [$e close] 0
+ error_check_good berkdb:envremove \
+ [berkdb envremove -home $testdir/EMPTYDIR] 0
+ }
+ puts "\tEnv010 complete."
+}
diff --git a/libdb/test/env011.tcl b/libdb/test/env011.tcl
new file mode 100644
index 0000000..f9b0dc2
--- /dev/null
+++ b/libdb/test/env011.tcl
@@ -0,0 +1,39 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST env011
+# TEST Run with region overwrite flag.
+proc env011 { } {
+ source ./include.tcl
+
+ puts "Env011: Test of region overwriting."
+ env_cleanup $testdir
+
+ puts "\tEnv011: Creating/closing env for open test."
+ set e [berkdb_env -create -overwrite -home $testdir -txn]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ set db [eval \
+ {berkdb_open -auto_commit -env $e -btree -create -mode 0644} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [eval {$db put} -auto_commit "aaa" "data"]
+ error_check_good put $ret 0
+ set ret [eval {$db put} -auto_commit "bbb" "data"]
+ error_check_good put $ret 0
+ error_check_good db_close [$db close] 0
+ error_check_good envclose [$e close] 0
+
+ puts "\tEnv011: Opening the environment with overwrite set."
+ set e [berkdb_env -create -overwrite -home $testdir -txn -recover]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ error_check_good envclose [$e close] 0
+
+ puts "\tEnv011: Removing the environment with overwrite set."
+ error_check_good berkdb:envremove \
+ [berkdb envremove -home $testdir -overwrite] 0
+
+ puts "\tEnv011 complete."
+}
diff --git a/libdb/test/hsearch.tcl b/libdb/test/hsearch.tcl
new file mode 100644
index 0000000..d3cc5e5
--- /dev/null
+++ b/libdb/test/hsearch.tcl
@@ -0,0 +1,51 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Historic Hsearch interface test.
+# Use the first 1000 entries from the dictionary.
+# Insert each with self as key and data; retrieve each.
+# After all are entered, retrieve all; compare output to original.
+# Then reopen the file, re-retrieve everything.
+# Finally, delete everything.
+proc hsearch { { nentries 1000 } } {
+ source ./include.tcl
+
+ puts "HSEARCH interfaces test: $nentries"
+
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir NULL
+
+ error_check_good hcreate [berkdb hcreate $nentries] 0
+ set did [open $dict]
+ set count 0
+
+ puts "\tHSEARCH.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set ret [berkdb hsearch $str $str enter]
+ error_check_good hsearch:enter $ret 0
+
+ set d [berkdb hsearch $str 0 find]
+ error_check_good hsearch:find $d $str
+ incr count
+ }
+ close $did
+
+ puts "\tHSEARCH.b: re-get loop"
+ set did [open $dict]
+ # Here is the loop where we retrieve each key
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set d [berkdb hsearch $str 0 find]
+ error_check_good hsearch:find $d $str
+ incr count
+ }
+ close $did
+ error_check_good hdestroy [berkdb hdestroy] 0
+}
diff --git a/libdb/test/include.tcl b/libdb/test/include.tcl
new file mode 100644
index 0000000..173793d
--- /dev/null
+++ b/libdb/test/include.tcl
@@ -0,0 +1,23 @@
+# Automatically built by dist/s_test; may require local editing.
+
+set tclsh_path @TCL_TCLSH@
+set tcllib .libs/libdb_tcl-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@MODSUFFIX@
+
+set rpc_server localhost
+set rpc_path .
+set rpc_testdir $rpc_path/TESTDIR
+
+set src_root @srcdir@/..
+set test_path @srcdir@/../test
+
+global testdir
+set testdir ./TESTDIR
+
+global dict
+global util_path
+
+global is_hp_test
+global is_qnx_test
+global is_windows_test
+
+set KILL "@db_cv_path_kill@"
diff --git a/libdb/test/join.tcl b/libdb/test/join.tcl
new file mode 100644
index 0000000..3c1fd69
--- /dev/null
+++ b/libdb/test/join.tcl
@@ -0,0 +1,455 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST jointest
+# TEST Test duplicate assisted joins. Executes 1, 2, 3 and 4-way joins
+# TEST with differing index orders and selectivity.
+# TEST
+# TEST We'll test 2-way, 3-way, and 4-way joins and figure that if those
+# TEST work, everything else does as well. We'll create test databases
+# TEST called join1.db, join2.db, join3.db, and join4.db. The number on
+# TEST the database describes the duplication -- duplicates are of the
+# TEST form 0, N, 2N, 3N, ... where N is the number of the database.
+# TEST Primary.db is the primary database, and null.db is the database
+# TEST that has no matching duplicates.
+# TEST
+# TEST We should test this on all btrees, all hash, and a combination thereof
+proc jointest { {psize 8192} {with_dup_dups 0} {flags 0} } {
+ global testdir
+ global rand_init
+ source ./include.tcl
+
+ env_cleanup $testdir
+ berkdb srand $rand_init
+
+ # Use one environment for all database opens so we don't
+ # need oodles of regions.
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # With the new offpage duplicate code, we don't support
+ # duplicate duplicates in sorted dup sets. Thus, if with_dup_dups
+ # is greater than one, run only with "-dup".
+ if { $with_dup_dups > 1 } {
+ set doptarray {"-dup"}
+ } else {
+ set doptarray {"-dup -dupsort" "-dup" RANDOMMIX RANDOMMIX }
+ }
+
+ # NB: these flags are internal only, ok
+ foreach m "DB_BTREE DB_HASH DB_BOTH" {
+ # run with two different random mixes.
+ foreach dopt $doptarray {
+ set opt [list "-env" $env $dopt]
+
+ puts "Join test: ($m $dopt) psize $psize,\
+ $with_dup_dups dup\
+ dups, flags $flags."
+
+ build_all $m $psize $opt oa $with_dup_dups
+
+ # null.db is db_built fifth but is referenced by
+ # zero; set up the option array appropriately.
+ set oa(0) $oa(5)
+
+ # Build the primary
+ puts "\tBuilding the primary database $m"
+ set oflags "-create -truncate -mode 0644 -env $env\
+ [conv $m [berkdb random_int 1 2]]"
+ set db [eval {berkdb_open} $oflags primary.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ for { set i 0 } { $i < 1000 } { incr i } {
+ set key [format "%04d" $i]
+ set ret [$db put $key stub]
+ error_check_good "primary put" $ret 0
+ }
+ error_check_good "primary close" [$db close] 0
+ set did [open $dict]
+ gets $did str
+ do_join primary.db "1 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "2 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "1" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "2" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "3" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "4" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "1 2" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "1 2 3" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "1 2 3 4" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 3 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "1 3" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "3 1" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "1 4" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "4 1" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "2 3" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "3 2" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "2 4" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "4 2" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "3 4" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "4 3" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "2 3 4" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 4 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "0 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 2 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 3 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 3 0 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 3 3" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "2 2 3 3" $str oa $flags\
+ $with_dup_dups
+ gets $did str2
+ gets $did str
+ do_join primary.db "1 2" $str oa $flags\
+ $with_dup_dups "3" $str2
+
+ # You really don't want to run this section
+ # with $with_dup_dups > 2.
+ if { $with_dup_dups <= 2 } {
+ gets $did str2
+ gets $did str
+ do_join primary.db "1 2 3" $str\
+ oa $flags $with_dup_dups "3 3 1" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "4 0 2" $str\
+ oa $flags $with_dup_dups "4 3 3" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "3 2 1" $str\
+ oa $flags $with_dup_dups "0 2" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "2 2 3 3" $str\
+ oa $flags $with_dup_dups "1 4 4" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "2 2 3 3" $str\
+ oa $flags $with_dup_dups "0 0 4 4" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "2 2 3 3" $str2\
+ oa $flags $with_dup_dups "2 4 4" $str
+ gets $did str2
+ gets $did str
+ do_join primary.db "2 2 3 3" $str2\
+ oa $flags $with_dup_dups "0 0 4 4" $str
+ }
+ close $did
+ }
+ }
+
+ error_check_good env_close [$env close] 0
+}
+
+proc build_all { method psize opt oaname with_dup_dups {nentries 100} } {
+ global testdir
+ db_build join1.db $nentries 50 1 [conv $method 1]\
+ $psize $opt $oaname $with_dup_dups
+ db_build join2.db $nentries 25 2 [conv $method 2]\
+ $psize $opt $oaname $with_dup_dups
+ db_build join3.db $nentries 16 3 [conv $method 3]\
+ $psize $opt $oaname $with_dup_dups
+ db_build join4.db $nentries 12 4 [conv $method 4]\
+ $psize $opt $oaname $with_dup_dups
+ db_build null.db $nentries 0 5 [conv $method 5]\
+ $psize $opt $oaname $with_dup_dups
+}
+
+proc conv { m i } {
+ switch -- $m {
+ DB_HASH { return "-hash"}
+ "-hash" { return "-hash"}
+ DB_BTREE { return "-btree"}
+ "-btree" { return "-btree"}
+ DB_BOTH {
+ if { [expr $i % 2] == 0 } {
+ return "-hash";
+ } else {
+ return "-btree";
+ }
+ }
+ }
+}
+
+proc random_opts { } {
+ set j [berkdb random_int 0 1]
+ if { $j == 0 } {
+ return " -dup"
+ } else {
+ return " -dup -dupsort"
+ }
+}
+
+proc db_build { name nkeys ndups dup_interval method psize lopt oaname \
+ with_dup_dups } {
+ source ./include.tcl
+
+ # Get array of arg names (from two levels up the call stack)
+ upvar 2 $oaname oa
+
+ # Search for "RANDOMMIX" in $opt, and if present, replace
+ # with " -dup" or " -dup -dupsort" at random.
+ set i [lsearch $lopt RANDOMMIX]
+ if { $i != -1 } {
+ set lopt [lreplace $lopt $i $i [random_opts]]
+ }
+
+ # Save off db_open arguments for this database.
+ set opt [eval concat $lopt]
+ set oa($dup_interval) $opt
+
+ # Create the database and open the dictionary
+ set oflags "-create -truncate -mode 0644 $method\
+ -pagesize $psize"
+ set db [eval {berkdb_open} $oflags $opt $name]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ set count 0
+ puts -nonewline "\tBuilding $name: $nkeys keys "
+ puts -nonewline "with $ndups duplicates at interval of $dup_interval"
+ if { $with_dup_dups > 0 } {
+ puts ""
+ puts "\t\tand $with_dup_dups duplicate duplicates."
+ } else {
+ puts "."
+ }
+ for { set count 0 } { [gets $did str] != -1 && $count < $nkeys } {
+ incr count} {
+ set str $str$name
+ # We need to make sure that the dups are inserted in a
+ # random, or near random, order. Do this by generating
+ # them and putting each in a list, then sorting the list
+ # at random.
+ set duplist {}
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set data [format "%04d" [expr $i * $dup_interval]]
+ lappend duplist $data
+ }
+ # randomize the list
+ for { set i 0 } { $i < $ndups } {incr i } {
+ # set j [berkdb random_int $i [expr $ndups - 1]]
+ set j [expr ($i % 2) + $i]
+ if { $j >= $ndups } { set j $i }
+ set dupi [lindex $duplist $i]
+ set dupj [lindex $duplist $j]
+ set duplist [lreplace $duplist $i $i $dupj]
+ set duplist [lreplace $duplist $j $j $dupi]
+ }
+ foreach data $duplist {
+ if { $with_dup_dups != 0 } {
+ for { set j 0 }\
+ { $j < $with_dup_dups }\
+ {incr j} {
+ set ret [$db put $str $data]
+ error_check_good put$j $ret 0
+ }
+ } else {
+ set ret [$db put $str $data]
+ error_check_good put $ret 0
+ }
+ }
+
+ if { $ndups == 0 } {
+ set ret [$db put $str NODUP]
+ error_check_good put $ret 0
+ }
+ }
+ close $did
+ error_check_good close:$name [$db close] 0
+}
+
+proc do_join { primary dbs key oanm flags with_dup_dups {dbs2 ""} {key2 ""} } {
+ global testdir
+ source ./include.tcl
+
+ upvar $oanm oa
+
+ puts -nonewline "\tJoining: $dbs on $key"
+ if { $dbs2 == "" } {
+ puts ""
+ } else {
+ puts " with $dbs2 on $key2"
+ }
+
+ # Open all the databases
+ set p [berkdb_open -unknown $testdir/$primary]
+ error_check_good "primary open" [is_valid_db $p] TRUE
+
+ set dblist ""
+ set curslist ""
+
+ set ndx [llength $dbs]
+
+ foreach i [concat $dbs $dbs2] {
+ set opt $oa($i)
+ set db [eval {berkdb_open -unknown} $opt [n_to_name $i]]
+ error_check_good "[n_to_name $i] open" [is_valid_db $db] TRUE
+ set curs [$db cursor]
+ error_check_good "$db cursor" \
+ [is_substr $curs "$db.c"] 1
+ lappend dblist $db
+ lappend curslist $curs
+
+ if { $ndx > 0 } {
+ set realkey [concat $key[n_to_name $i]]
+ } else {
+ set realkey [concat $key2[n_to_name $i]]
+ }
+
+ set pair [$curs get -set $realkey]
+ error_check_good cursor_set:$realkey:$pair \
+ [llength [lindex $pair 0]] 2
+
+ incr ndx -1
+ }
+
+ set join_curs [eval {$p join} $curslist]
+ error_check_good join_cursor \
+ [is_substr $join_curs "$p.c"] 1
+
+ # Calculate how many dups we expect.
+ # We go through the list of indices. If we find a 0, then we
+ # expect 0 dups. For everything else, we look at pairs of numbers,
+ # if the are relatively prime, multiply them and figure out how
+ # many times that goes into 50. If they aren't relatively prime,
+ # take the number of times the larger goes into 50.
+ set expected 50
+ set last 1
+ foreach n [concat $dbs $dbs2] {
+ if { $n == 0 } {
+ set expected 0
+ break
+ }
+ if { $last == $n } {
+ continue
+ }
+
+ if { [expr $last % $n] == 0 || [expr $n % $last] == 0 } {
+ if { $n > $last } {
+ set last $n
+ set expected [expr 50 / $last]
+ }
+ } else {
+ set last [expr $n * $last / [gcd $n $last]]
+ set expected [expr 50 / $last]
+ }
+ }
+
+ # If $with_dup_dups is greater than zero, each datum has
+ # been inserted $with_dup_dups times. So we expect the number
+ # of dups to go up by a factor of ($with_dup_dups)^(number of databases)
+
+ if { $with_dup_dups > 0 } {
+ foreach n [concat $dbs $dbs2] {
+ set expected [expr $expected * $with_dup_dups]
+ }
+ }
+
+ set ndups 0
+ if { $flags == " -join_item"} {
+ set l 1
+ } else {
+ set flags ""
+ set l 2
+ }
+ for { set pair [eval {$join_curs get} $flags] } { \
+ [llength [lindex $pair 0]] == $l } {
+ set pair [eval {$join_curs get} $flags] } {
+ set k [lindex [lindex $pair 0] 0]
+ foreach i $dbs {
+ error_check_bad valid_dup:$i:$dbs $i 0
+ set kval [string trimleft $k 0]
+ if { [string length $kval] == 0 } {
+ set kval 0
+ }
+ error_check_good valid_dup:$i:$dbs [expr $kval % $i] 0
+ }
+ incr ndups
+ }
+ error_check_good number_of_dups:$dbs $ndups $expected
+
+ error_check_good close_primary [$p close] 0
+ foreach i $curslist {
+ error_check_good close_cursor:$i [$i close] 0
+ }
+ foreach i $dblist {
+ error_check_good close_index:$i [$i close] 0
+ }
+}
+
+proc n_to_name { n } {
+global testdir
+ if { $n == 0 } {
+ return null.db;
+ } else {
+ return join$n.db;
+ }
+}
+
+proc gcd { a b } {
+ set g 1
+
+ for { set i 2 } { $i <= $a } { incr i } {
+ if { [expr $a % $i] == 0 && [expr $b % $i] == 0 } {
+ set g $i
+ }
+ }
+ return $g
+}
diff --git a/libdb/test/lock001.tcl b/libdb/test/lock001.tcl
new file mode 100644
index 0000000..cda58c3
--- /dev/null
+++ b/libdb/test/lock001.tcl
@@ -0,0 +1,122 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+
+# TEST lock001
+# TEST Make sure that the basic lock tests work. Do some simple gets
+# TEST and puts for a single locker.
+proc lock001 { {iterations 1000} {maxlocks 1000} } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set save_curid $lock_curid
+ set save_maxid $lock_maxid
+
+ # Set defaults
+ # Adjusted to make exact match of isqrt
+ #set conflicts { 3 0 0 0 0 0 1 0 1 1}
+ #set conflicts { 3 0 0 0 0 1 0 1 1}
+
+ set conflicts { 0 0 0 0 0 1 0 1 1}
+ set nmodes [isqrt [llength $conflicts]]
+
+ # Cleanup
+ env_cleanup $testdir
+
+ # Open the region we'll use for testing.
+ set eflags "-create -lock -home $testdir -mode 0644 \
+ -lock_max $maxlocks -lock_conflict {$nmodes {$conflicts}}"
+ set env [eval {berkdb_env} $eflags]
+ error_check_good env [is_valid_env $env] TRUE
+ error_check_good lock_id_set \
+ [$env lock_id_set $lock_curid $lock_maxid] 0
+
+ puts "Lock001: test basic lock operations"
+ set locker [$env lock_id]
+ # Get and release each type of lock
+ puts "\tLock001.a: get and release each type of lock"
+ foreach m {ng write read} {
+ set obj obj$m
+ set lockp [$env lock_get $m $locker $obj]
+ error_check_good lock_get:a [is_blocked $lockp] 0
+ error_check_good lock_get:a [is_substr $lockp $env] 1
+ set ret [ $lockp put ]
+ error_check_good lock_put $ret 0
+ }
+
+ # Get a bunch of locks for the same locker; these should work
+ set obj OBJECT
+ puts "\tLock001.b: Get a bunch of locks for the same locker"
+ foreach m {ng write read} {
+ set lockp [$env lock_get $m $locker $obj ]
+ lappend locklist $lockp
+ error_check_good lock_get:b [is_blocked $lockp] 0
+ error_check_good lock_get:b [is_substr $lockp $env] 1
+ }
+ release_list $locklist
+
+ set locklist {}
+ # Check that reference counted locks work
+ puts "\tLock001.c: reference counted locks."
+ for {set i 0} { $i < 10 } {incr i} {
+ set lockp [$env lock_get -nowait write $locker $obj]
+ error_check_good lock_get:c [is_blocked $lockp] 0
+ error_check_good lock_get:c [is_substr $lockp $env] 1
+ lappend locklist $lockp
+ }
+ release_list $locklist
+
+ # Finally try some failing locks
+ set locklist {}
+ foreach i {ng write read} {
+ set lockp [$env lock_get $i $locker $obj]
+ lappend locklist $lockp
+ error_check_good lock_get:d [is_blocked $lockp] 0
+ error_check_good lock_get:d [is_substr $lockp $env] 1
+ }
+
+ # Change the locker
+ set locker [$env lock_id]
+ set blocklist {}
+ # Skip NO_LOCK lock.
+ puts "\tLock001.d: Change the locker, acquire read and write."
+ foreach i {write read} {
+ catch {$env lock_get -nowait $i $locker $obj} ret
+ error_check_good lock_get:e [is_substr $ret "not granted"] 1
+ #error_check_good lock_get:e [is_substr $lockp $env] 1
+ #error_check_good lock_get:e [is_blocked $lockp] 0
+ }
+ # Now release original locks
+ release_list $locklist
+
+ # Now re-acquire blocking locks
+ set locklist {}
+ puts "\tLock001.e: Re-acquire blocking locks."
+ foreach i {write read} {
+ set lockp [$env lock_get -nowait $i $locker $obj ]
+ error_check_good lock_get:f [is_substr $lockp $env] 1
+ error_check_good lock_get:f [is_blocked $lockp] 0
+ lappend locklist $lockp
+ }
+
+ # Now release new locks
+ release_list $locklist
+ error_check_good free_id [$env lock_id_free $locker] 0
+
+ error_check_good envclose [$env close] 0
+
+}
+
+# Blocked locks appear as lockmgrN.lockM\nBLOCKED
+proc is_blocked { l } {
+ if { [string compare $l BLOCKED ] == 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
diff --git a/libdb/test/lock002.tcl b/libdb/test/lock002.tcl
new file mode 100644
index 0000000..3003b9f
--- /dev/null
+++ b/libdb/test/lock002.tcl
@@ -0,0 +1,157 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST lock002
+# TEST Exercise basic multi-process aspects of lock.
+proc lock002 { {maxlocks 1000} {conflicts {0 0 0 0 0 1 0 1 1} } } {
+ source ./include.tcl
+
+ puts "Lock002: Basic multi-process lock tests."
+
+ env_cleanup $testdir
+
+ set nmodes [isqrt [llength $conflicts]]
+
+ # Open the lock
+ mlock_open $maxlocks $nmodes $conflicts
+ mlock_wait
+}
+
+# Make sure that we can create a region; destroy it, attach to it,
+# detach from it, etc.
+proc mlock_open { maxl nmodes conflicts } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ puts "\tLock002.a multi-process open/close test"
+
+ # Open/Create region here. Then close it and try to open from
+ # other test process.
+ set env_cmd [concat "berkdb_env -create -mode 0644 \
+ -lock -lock_max $maxl -lock_conflict" \
+ [list [list $nmodes $conflicts]] "-home $testdir"]
+ set local_env [eval $env_cmd]
+ $local_env lock_id_set $lock_curid $lock_maxid
+ error_check_good env_open [is_valid_env $local_env] TRUE
+
+ set ret [$local_env close]
+ error_check_good env_close $ret 0
+
+ # Open from other test process
+ set env_cmd "berkdb_env -mode 0644 -home $testdir"
+
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 $env_cmd]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ # Now make sure that we can reopen the region.
+ set local_env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $local_env] TRUE
+ set ret [$local_env close]
+ error_check_good env_close $ret 0
+
+ # Try closing the remote region
+ set ret [send_cmd $f1 "$remote_env close"]
+ error_check_good remote:lock_close $ret 0
+
+ # Try opening for create. Will succeed because region exists.
+ set env_cmd [concat "berkdb_env -create -mode 0644 \
+ -lock -lock_max $maxl -lock_conflict" \
+ [list [list $nmodes $conflicts]] "-home $testdir"]
+ set local_env [eval $env_cmd]
+ error_check_good remote:env_open [is_valid_env $local_env] TRUE
+
+ # close locally
+ reset_env $local_env
+
+ # Close and exit remote
+ set ret [send_cmd $f1 "reset_env $remote_env"]
+
+ catch { close $f1 } result
+}
+
+proc mlock_wait { } {
+ source ./include.tcl
+
+ puts "\tLock002.b multi-process get/put wait test"
+
+ # Open region locally
+ set env_cmd "berkdb_env -lock -home $testdir"
+ set local_env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $local_env] TRUE
+
+ # Open region remotely
+ set f1 [open |$tclsh_path r+]
+
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 $env_cmd]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ # Get a write lock locally; try for the read lock
+ # remotely. We hold the locks for several seconds
+ # so that we can use timestamps to figure out if the
+ # other process waited.
+ set locker1 [$local_env lock_id]
+ set local_lock [$local_env lock_get write $locker1 object1]
+ error_check_good lock_get [is_valid_lock $local_lock $local_env] TRUE
+
+ # Now request a lock that we expect to hang; generate
+ # timestamps so we can tell if it actually hangs.
+ set locker2 [send_cmd $f1 "$remote_env lock_id"]
+ set remote_lock [send_timed_cmd $f1 1 \
+ "set lock \[$remote_env lock_get write $locker2 object1\]"]
+
+ # Now sleep before releasing lock
+ tclsleep 5
+ set result [$local_lock put]
+ error_check_good lock_put $result 0
+
+ # Now get the result from the other script
+ set result [rcv_result $f1]
+ error_check_good lock_get:remote_time [expr $result > 4] 1
+
+ # Now get the remote lock
+ set remote_lock [send_cmd $f1 "puts \$lock"]
+ error_check_good remote:lock_get \
+ [is_valid_lock $remote_lock $remote_env] TRUE
+
+ # Now make the other guy wait 5 second and then release his
+ # lock while we try to get a write lock on it
+ set start [timestamp -r]
+
+ set ret [send_cmd $f1 "tclsleep 5"]
+
+ set ret [send_cmd $f1 "$remote_lock put"]
+
+ set local_lock [$local_env lock_get write $locker1 object1]
+ error_check_good lock_get:time \
+ [expr [expr [timestamp -r] - $start] > 2] 1
+ error_check_good lock_get:local \
+ [is_valid_lock $local_lock $local_env] TRUE
+
+ # Now check remote's result
+ set result [rcv_result $f1]
+ error_check_good lock_put:remote $result 0
+
+ # Clean up remote
+ set result [send_cmd $f1 "$remote_env lock_id_free $locker2" ]
+ error_check_good remote_free_id $result 0
+ set ret [send_cmd $f1 "reset_env $remote_env"]
+
+ close $f1
+
+ # Now close up locally
+ set ret [$local_lock put]
+ error_check_good lock_put $ret 0
+ error_check_good lock_id_free [$local_env lock_id_free $locker1] 0
+
+ reset_env $local_env
+}
diff --git a/libdb/test/lock003.tcl b/libdb/test/lock003.tcl
new file mode 100644
index 0000000..7164fcf
--- /dev/null
+++ b/libdb/test/lock003.tcl
@@ -0,0 +1,99 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST lock003
+# TEST Exercise multi-process aspects of lock. Generate a bunch of parallel
+# TEST testers that try to randomly obtain locks; make sure that the locks
+# TEST correctly protect corresponding objects.
+proc lock003 { {iter 500} {max 1000} {procs 5} } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set ldegree 5
+ set objs 75
+ set reads 65
+ set wait 1
+ set conflicts { 0 0 0 0 0 1 0 1 1}
+ set seeds {}
+
+ puts "Lock003: Multi-process random lock test"
+
+ # Clean up after previous runs
+ env_cleanup $testdir
+
+ # Open/create the lock region
+ puts "\tLock003.a: Create environment"
+ set e [berkdb_env -create -lock -home $testdir]
+ error_check_good env_open [is_substr $e env] 1
+ $e lock_id_set $lock_curid $lock_maxid
+
+ error_check_good env_close [$e close] 0
+
+ # Now spawn off processes
+ set pidlist {}
+
+ for { set i 0 } {$i < $procs} {incr i} {
+ if { [llength $seeds] == $procs } {
+ set s [lindex $seeds $i]
+ }
+# puts "$tclsh_path\
+# $test_path/wrap.tcl \
+# lockscript.tcl $testdir/$i.lockout\
+# $testdir $iter $objs $wait $ldegree $reads &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ lockscript.tcl $testdir/lock003.$i.out \
+ $testdir $iter $objs $wait $ldegree $reads &]
+ lappend pidlist $p
+ }
+
+ puts "\tLock003.b: $procs independent processes now running"
+ watch_procs $pidlist 30 10800
+
+ # Check for test failure
+ set e [eval findfail [glob $testdir/lock003.*.out]]
+ error_check_good "FAIL: error message(s) in log files" $e 0
+
+ # Remove log files
+ for { set i 0 } {$i < $procs} {incr i} {
+ fileremove -f $testdir/lock003.$i.out
+ }
+}
+
+# Create and destroy flag files to show we have an object locked, and
+# verify that the correct files exist or don't exist given that we've
+# just read or write locked a file.
+proc lock003_create { rw obj } {
+ source ./include.tcl
+
+ set pref $testdir/L3FLAG
+ set f [open $pref.$rw.[pid].$obj w]
+ close $f
+}
+
+proc lock003_destroy { obj } {
+ source ./include.tcl
+
+ set pref $testdir/L3FLAG
+ set f [glob -nocomplain $pref.*.[pid].$obj]
+ error_check_good l3_destroy [llength $f] 1
+ fileremove $f
+}
+
+proc lock003_vrfy { rw obj } {
+ source ./include.tcl
+
+ set pref $testdir/L3FLAG
+ if { [string compare $rw "write"] == 0 } {
+ set fs [glob -nocomplain $pref.*.*.$obj]
+ error_check_good "number of other locks on $obj" [llength $fs] 0
+ } else {
+ set fs [glob -nocomplain $pref.write.*.$obj]
+ error_check_good "number of write locks on $obj" [llength $fs] 0
+ }
+}
+
diff --git a/libdb/test/lock004.tcl b/libdb/test/lock004.tcl
new file mode 100644
index 0000000..600b9a5
--- /dev/null
+++ b/libdb/test/lock004.tcl
@@ -0,0 +1,29 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST lock004
+# TEST Test locker ids wraping around.
+
+proc lock004 {} {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set save_curid $lock_curid
+ set save_maxid $lock_maxid
+
+ set lock_curid [expr $lock_maxid - 1]
+ puts "Lock004: Locker id wraparound test"
+ puts "\tLock004.a: repeat lock001-lock003 with wraparound lockids"
+
+ lock001
+ lock002
+ lock003
+
+ set lock_curid $save_curid
+ set lock_maxid $save_maxid
+}
diff --git a/libdb/test/lock005.tcl b/libdb/test/lock005.tcl
new file mode 100644
index 0000000..77541df
--- /dev/null
+++ b/libdb/test/lock005.tcl
@@ -0,0 +1,177 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2001
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST lock005
+# TEST Check that page locks are being released properly.
+
+proc lock005 { } {
+ source ./include.tcl
+
+ puts "Lock005: Page lock release test"
+
+ # Clean up after previous runs
+ env_cleanup $testdir
+
+ # Open/create the lock region
+ set e [berkdb_env -create -lock -home $testdir -txn -log]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ # Open/create the database
+ set db [berkdb open -create -auto_commit -env $e -len 10 -queue q.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Check that records are locking by trying to
+ # fetch a record on the wrong transaction.
+ puts "\tLock005.a: Verify that we are locking"
+
+ # Start the first transaction
+ set txn1 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn1 $e] TRUE
+ set ret [catch {$db put -txn $txn1 -append record1} recno1]
+ error_check_good dbput_txn1 $ret 0
+
+ # Start second txn while the first is still running ...
+ set txn2 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn2 $e] TRUE
+
+ # ... and try to get a record from the first txn (should fail)
+ set ret [catch {$db get -txn $txn2 $recno1} res]
+ error_check_good dbget_wrong_record \
+ [is_substr $res "Lock not granted"] 1
+
+ # End transactions
+ error_check_good txn1commit [$txn1 commit] 0
+ how_many_locks 1 $e
+ error_check_good txn2commit [$txn2 commit] 0
+ # The number of locks stays the same here because the first
+ # lock is released and the second lock was never granted.
+ how_many_locks 1 $e
+
+ # Test lock behavior for both abort and commit
+ puts "\tLock005.b: Verify locks after abort or commit"
+ foreach endorder {forward reverse} {
+ end_order_test $db $e commit abort $endorder
+ end_order_test $db $e abort commit $endorder
+ end_order_test $db $e commit commit $endorder
+ end_order_test $db $e abort abort $endorder
+ }
+
+ # Clean up
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$e close] 0
+}
+
+proc end_order_test { db e txn1end txn2end endorder } {
+ # Start one transaction
+ set txn1 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn1 $e] TRUE
+ set ret [catch {$db put -txn $txn1 -append record1} recno1]
+ error_check_good dbput_txn1 $ret 0
+
+ # Check number of locks
+ how_many_locks 2 $e
+
+ # Start a second transaction while first is still running
+ set txn2 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn2 $e] TRUE
+ set ret [catch {$db put -txn $txn2 -append record2} recno2]
+ error_check_good dbput_txn2 $ret 0
+ how_many_locks 3 $e
+
+ # Now commit or abort one txn and make sure the other is okay
+ if {$endorder == "forward"} {
+ # End transaction 1 first
+ puts "\tLock005.b.1: $txn1end txn1 then $txn2end txn2"
+ error_check_good txn_$txn1end [$txn1 $txn1end] 0
+ how_many_locks 2 $e
+
+ # txn1 is now ended, but txn2 is still running
+ set ret1 [catch {$db get -txn $txn2 $recno1} res1]
+ set ret2 [catch {$db get -txn $txn2 $recno2} res2]
+ if { $txn1end == "commit" } {
+ error_check_good txn2_sees_txn1 $ret1 0
+ error_check_good txn2_sees_txn2 $ret2 0
+ } else {
+ # transaction 1 was aborted
+ error_check_good txn2_cantsee_txn1 [llength $res1] 0
+ }
+
+ # End transaction 2 second
+ error_check_good txn_$txn2end [$txn2 $txn2end] 0
+ how_many_locks 1 $e
+
+ # txn1 and txn2 should both now be invalid
+ # The get no longer needs to be transactional
+ set ret3 [catch {$db get $recno1} res3]
+ set ret4 [catch {$db get $recno2} res4]
+
+ if { $txn2end == "commit" } {
+ error_check_good txn2_sees_txn1 $ret3 0
+ error_check_good txn2_sees_txn2 $ret4 0
+ error_check_good txn2_has_record2 \
+ [is_substr $res4 "record2"] 1
+ } else {
+ # transaction 2 was aborted
+ error_check_good txn2_cantsee_txn1 $ret3 0
+ error_check_good txn2_aborted [llength $res4] 0
+ }
+
+ } elseif { $endorder == "reverse" } {
+ # End transaction 2 first
+ puts "\tLock005.b.2: $txn2end txn2 then $txn1end txn1"
+ error_check_good txn_$txn2end [$txn2 $txn2end] 0
+ how_many_locks 2 $e
+
+ # txn2 is ended, but txn1 is still running
+ set ret1 [catch {$db get -txn $txn1 $recno1} res1]
+ set ret2 [catch {$db get -txn $txn1 $recno2} res2]
+ if { $txn2end == "commit" } {
+ error_check_good txn1_sees_txn1 $ret1 0
+ error_check_good txn1_sees_txn2 $ret2 0
+ } else {
+ # transaction 2 was aborted
+ error_check_good txn1_cantsee_txn2 [llength $res2] 0
+ }
+
+ # End transaction 1 second
+ error_check_good txn_$txn1end [$txn1 $txn1end] 0
+ how_many_locks 1 $e
+
+ # txn1 and txn2 should both now be invalid
+ # The get no longer needs to be transactional
+ set ret3 [catch {$db get $recno1} res3]
+ set ret4 [catch {$db get $recno2} res4]
+
+ if { $txn1end == "commit" } {
+ error_check_good txn1_sees_txn1 $ret3 0
+ error_check_good txn1_sees_txn2 $ret4 0
+ error_check_good txn1_has_record1 \
+ [is_substr $res3 "record1"] 1
+ } else {
+ # transaction 1 was aborted
+ error_check_good txn1_cantsee_txn2 $ret4 0
+ error_check_good txn1_aborted [llength $res3] 0
+ }
+ }
+}
+
+proc how_many_locks { expected env } {
+ set stat [$env lock_stat]
+ set str "Current number of locks"
+ set checked 0
+ foreach statpair $stat {
+ if { $checked == 1 } {
+ break
+ }
+ if { [is_substr [lindex $statpair 0] $str] != 0} {
+ set checked 1
+ set nlocks [lindex $statpair 1]
+ error_check_good expected_nlocks $nlocks $expected
+ }
+ }
+ error_check_good checked $checked 1
+}
diff --git a/libdb/test/lockscript.tcl b/libdb/test/lockscript.tcl
new file mode 100644
index 0000000..a17144d
--- /dev/null
+++ b/libdb/test/lockscript.tcl
@@ -0,0 +1,117 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Random lock tester.
+# Usage: lockscript dir numiters numobjs sleepint degree readratio
+# dir: lock directory.
+# numiters: Total number of iterations.
+# numobjs: Number of objects on which to lock.
+# sleepint: Maximum sleep interval.
+# degree: Maximum number of locks to acquire at once
+# readratio: Percent of locks that should be reads.
+
+source ./include.tcl
+source $test_path/test.tcl
+
+set usage "lockscript dir numiters numobjs sleepint degree readratio"
+
+# Verify usage
+if { $argc != 6 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set numiters [ lindex $argv 1 ]
+set numobjs [ lindex $argv 2 ]
+set sleepint [ lindex $argv 3 ]
+set degree [ lindex $argv 4 ]
+set readratio [ lindex $argv 5 ]
+
+# Initialize random number generator
+global rand_init
+berkdb srand $rand_init
+
+
+catch { berkdb_env -create -lock -home $dir } e
+error_check_good env_open [is_substr $e env] 1
+catch { $e lock_id } locker
+error_check_good locker [is_valid_locker $locker] TRUE
+
+puts -nonewline "Beginning execution for $locker: $numiters $numobjs "
+puts "$sleepint $degree $readratio"
+flush stdout
+
+for { set iter 0 } { $iter < $numiters } { incr iter } {
+ set nlocks [berkdb random_int 1 $degree]
+ # We will always lock objects in ascending order to avoid
+ # deadlocks.
+ set lastobj 1
+ set locklist {}
+ set objlist {}
+ for { set lnum 0 } { $lnum < $nlocks } { incr lnum } {
+ # Pick lock parameters
+ set obj [berkdb random_int $lastobj $numobjs]
+ set lastobj [expr $obj + 1]
+ set x [berkdb random_int 1 100 ]
+ if { $x <= $readratio } {
+ set rw read
+ } else {
+ set rw write
+ }
+ puts "[timestamp -c] $locker $lnum: $rw $obj"
+
+ # Do get; add to list
+ catch {$e lock_get $rw $locker $obj} lockp
+ error_check_good lock_get [is_valid_lock $lockp $e] TRUE
+
+ # Create a file to flag that we've a lock of the given
+ # type, after making sure only other read locks exist
+ # (if we're read locking) or no other locks exist (if
+ # we're writing).
+ lock003_vrfy $rw $obj
+ lock003_create $rw $obj
+ lappend objlist [list $obj $rw]
+
+ lappend locklist $lockp
+ if {$lastobj > $numobjs} {
+ break
+ }
+ }
+ # Pick sleep interval
+ puts "[timestamp -c] $locker sleeping"
+ # We used to sleep 1 to $sleepint seconds. This makes the test
+ # run for hours. Instead, make it sleep for 10 to $sleepint * 100
+ # milliseconds, for a maximum sleep time of 0.5 s.
+ after [berkdb random_int 10 [expr $sleepint * 100]]
+ puts "[timestamp -c] $locker awake"
+
+ # Now release locks
+ puts "[timestamp -c] $locker released locks"
+
+ # Delete our locking flag files, then reverify. (Note that the
+ # locking flag verification function assumes that our own lock
+ # is not currently flagged.)
+ foreach pair $objlist {
+ set obj [lindex $pair 0]
+ set rw [lindex $pair 1]
+ lock003_destroy $obj
+ lock003_vrfy $rw $obj
+ }
+
+ release_list $locklist
+ flush stdout
+}
+
+set ret [$e close]
+error_check_good env_close $ret 0
+
+puts "[timestamp -c] $locker Complete"
+flush stdout
+
+exit
diff --git a/libdb/test/log001.tcl b/libdb/test/log001.tcl
new file mode 100644
index 0000000..b17577b
--- /dev/null
+++ b/libdb/test/log001.tcl
@@ -0,0 +1,120 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+
+# TEST log001
+# TEST Read/write log records.
+proc log001 { } {
+ global passwd
+ global rand_init
+
+ berkdb srand $rand_init
+ set iter 1000
+ set max [expr 1024 * 128]
+ log001_body $max $iter 1
+ log001_body $max $iter 0
+ log001_body $max $iter 1 "-encryptaes $passwd"
+ log001_body $max $iter 0 "-encryptaes $passwd"
+ log001_body $max [expr $iter * 15] 1
+ log001_body $max [expr $iter * 15] 0
+ log001_body $max [expr $iter * 15] 1 "-encryptaes $passwd"
+ log001_body $max [expr $iter * 15] 0 "-encryptaes $passwd"
+}
+
+proc log001_body { max nrecs fixedlength {encargs ""} } {
+ source ./include.tcl
+
+ puts -nonewline "Log001: Basic put/get log records "
+ if { $fixedlength == 1 } {
+ puts "(fixed-length $encargs)"
+ } else {
+ puts "(variable-length $encargs)"
+ }
+
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -log -create -home $testdir -mode 0644} \
+ $encargs -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ # We will write records to the log and make sure we can
+ # read them back correctly. We'll use a standard pattern
+ # repeated some number of times for each record.
+ set lsn_list {}
+ set rec_list {}
+ puts "\tLog001.a: Writing $nrecs log records"
+ for { set i 0 } { $i < $nrecs } { incr i } {
+ set rec ""
+ for { set j 0 } { $j < [expr $i % 10 + 1] } {incr j} {
+ set rec $rec$i:logrec:$i
+ }
+ if { $fixedlength != 1 } {
+ set rec $rec:[random_data 237 0 0]
+ }
+ set lsn [$env log_put $rec]
+ error_check_bad log_put [is_substr $lsn log_cmd] 1
+ lappend lsn_list $lsn
+ lappend rec_list $rec
+ }
+
+ # Open a log cursor.
+ set logc [$env log_cursor]
+ error_check_good logc [is_valid_logc $logc $env] TRUE
+
+ puts "\tLog001.b: Retrieving log records sequentially (forward)"
+ set i 0
+ for { set grec [$logc get -first] } { [llength $grec] != 0 } {
+ set grec [$logc get -next]} {
+ error_check_good log_get:seq [lindex $grec 1] \
+ [lindex $rec_list $i]
+ incr i
+ }
+
+ puts "\tLog001.c: Retrieving log records sequentially (backward)"
+ set i [llength $rec_list]
+ for { set grec [$logc get -last] } { [llength $grec] != 0 } {
+ set grec [$logc get -prev] } {
+ incr i -1
+ error_check_good \
+ log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+ }
+
+ puts "\tLog001.d: Retrieving log records sequentially by LSN"
+ set i 0
+ foreach lsn $lsn_list {
+ set grec [$logc get -set $lsn]
+ error_check_good \
+ log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+ incr i
+ }
+
+ puts "\tLog001.e: Retrieving log records randomly by LSN"
+ set m [expr [llength $lsn_list] - 1]
+ for { set i 0 } { $i < $nrecs } { incr i } {
+ set recno [berkdb random_int 0 $m ]
+ set lsn [lindex $lsn_list $recno]
+ set grec [$logc get -set $lsn]
+ error_check_good \
+ log_get:seq [lindex $grec 1] [lindex $rec_list $recno]
+ }
+
+ puts "\tLog001.f: Retrieving first/current, last/current log record"
+ set grec [$logc get -first]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list 0]
+ set grec [$logc get -current]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list 0]
+ set i [expr [llength $rec_list] - 1]
+ set grec [$logc get -last]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+ set grec [$logc get -current]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+}
diff --git a/libdb/test/log002.tcl b/libdb/test/log002.tcl
new file mode 100644
index 0000000..c85cc1c
--- /dev/null
+++ b/libdb/test/log002.tcl
@@ -0,0 +1,85 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+
+# TEST log002
+# TEST Tests multiple logs
+# TEST Log truncation
+# TEST LSN comparison and file functionality.
+proc log002 { } {
+ source ./include.tcl
+
+ puts "Log002: Multiple log test w/trunc, file, compare functionality"
+
+ env_cleanup $testdir
+
+ set max [expr 1024 * 128]
+ set env [berkdb_env -create -home $testdir -mode 0644 \
+ -log -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ # We'll record every hundred'th record for later use
+ set info_list {}
+
+ puts "\tLog002.a: Writing log records"
+ set i 0
+ for {set s 0} { $s < [expr 3 * $max] } { incr s $len } {
+ set rec [random_data 120 0 0]
+ set len [string length $rec]
+ set lsn [$env log_put $rec]
+
+ if { [expr $i % 100 ] == 0 } {
+ lappend info_list [list $lsn $rec]
+ }
+ incr i
+ }
+
+ puts "\tLog002.b: Checking log_compare"
+ set last {0 0}
+ foreach p $info_list {
+ set l [lindex $p 0]
+ if { [llength $last] != 0 } {
+ error_check_good \
+ log_compare [$env log_compare $l $last] 1
+ error_check_good \
+ log_compare [$env log_compare $last $l] -1
+ error_check_good \
+ log_compare [$env log_compare $l $l] 0
+ }
+ set last $l
+ }
+
+ puts "\tLog002.c: Checking log_file"
+ set flist [glob $testdir/log*]
+ foreach p $info_list {
+
+ set lsn [lindex $p 0]
+ set f [$env log_file $lsn]
+
+ # Change all backslash separators on Windows to forward slash
+ # separators, which is what the rest of the test suite expects.
+ regsub -all {\\} $f {/} f
+
+ error_check_bad log_file:$f [lsearch $flist $f] -1
+ }
+
+ puts "\tLog002.d: Verifying records"
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ for {set i [expr [llength $info_list] - 1] } { $i >= 0 } { incr i -1} {
+ set p [lindex $info_list $i]
+ set grec [$logc get -set [lindex $p 0]]
+ error_check_good log_get:$env [lindex $grec 1] [lindex $p 1]
+ }
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+}
diff --git a/libdb/test/log003.tcl b/libdb/test/log003.tcl
new file mode 100644
index 0000000..1153b29
--- /dev/null
+++ b/libdb/test/log003.tcl
@@ -0,0 +1,118 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+
+# TEST log003
+# TEST Verify that log_flush is flushing records correctly.
+proc log003 { } {
+ source ./include.tcl
+
+ puts "Log003: Verify log_flush behavior"
+
+ set max [expr 1024 * 128]
+ env_cleanup $testdir
+ set short_rec "abcdefghijklmnopqrstuvwxyz"
+ set long_rec [repeat $short_rec 200]
+ set very_long_rec [repeat $long_rec 4]
+
+ foreach rec "$short_rec $long_rec $very_long_rec" {
+ puts "\tLog003.a: Verify flush on [string length $rec] byte rec"
+
+ set env [berkdb_env -log -home $testdir \
+ -create -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set lsn [$env log_put $rec]
+ error_check_bad log_put [lindex $lsn 0] "ERROR:"
+ set ret [$env log_flush $lsn]
+ error_check_good log_flush $ret 0
+
+ # Now, we want to crash the region and recheck. Closing the
+ # log does not flush any records, so we'll use a close to
+ # do the "crash"
+ set ret [$env close]
+ error_check_good log_env:close $ret 0
+
+ # Now, remove the log region
+ #set ret [berkdb envremove -home $testdir]
+ #error_check_good env:remove $ret 0
+
+ # Re-open the log and try to read the record.
+ set env [berkdb_env -create -home $testdir \
+ -log -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ set gotrec [$logc get -first]
+ error_check_good lp_get [lindex $gotrec 1] $rec
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close:$env [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+ log_cleanup $testdir
+ }
+
+ foreach rec "$short_rec $long_rec $very_long_rec" {
+ puts "\tLog003.b: \
+ Verify flush on non-last record [string length $rec]"
+ set env [berkdb_env \
+ -create -log -home $testdir -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ # Put 10 random records
+ for { set i 0 } { $i < 10 } { incr i} {
+ set r [random_data 450 0 0]
+ set lsn [$env log_put $r]
+ error_check_bad log_put [lindex $lsn 0] "ERROR:"
+ }
+
+ # Put the record we are interested in
+ set save_lsn [$env log_put $rec]
+ error_check_bad log_put [lindex $save_lsn 0] "ERROR:"
+
+ # Put 10 more random records
+ for { set i 0 } { $i < 10 } { incr i} {
+ set r [random_data 450 0 0]
+ set lsn [$env log_put $r]
+ error_check_bad log_put [lindex $lsn 0] "ERROR:"
+ }
+
+ # Now check the flush
+ set ret [$env log_flush $save_lsn]
+ error_check_good log_flush $ret 0
+
+ # Now, we want to crash the region and recheck. Closing the
+ # log does not flush any records, so we'll use a close to
+ # do the "crash"
+
+ #
+ # Now, close and remove the log region
+ error_check_good env:close:$env [$env close] 0
+ set ret [berkdb envremove -home $testdir]
+ error_check_good env:remove $ret 0
+
+ # Re-open the log and try to read the record.
+ set env [berkdb_env \
+ -home $testdir -create -log -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ set gotrec [$logc get -set $save_lsn]
+ error_check_good lp_get [lindex $gotrec 1] $rec
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close:$env [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+ log_cleanup $testdir
+ }
+}
diff --git a/libdb/test/log004.tcl b/libdb/test/log004.tcl
new file mode 100644
index 0000000..89efe6c
--- /dev/null
+++ b/libdb/test/log004.tcl
@@ -0,0 +1,46 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+
+# TEST log004
+# TEST Make sure that if we do PREVs on a log, but the beginning of the
+# TEST log has been truncated, we do the right thing.
+proc log004 { } {
+ source ./include.tcl
+
+ puts "Log004: Prev on log when beginning of log has been truncated."
+ # Use archive test to populate log
+ env_cleanup $testdir
+ puts "\tLog004.a: Call archive to populate log."
+ archive
+
+ # Delete all log files under 100
+ puts "\tLog004.b: Delete all log files under 100."
+ set ret [catch { glob $testdir/log.00000000* } result]
+ if { $ret == 0 } {
+ eval fileremove -f $result
+ }
+
+ # Now open the log and get the first record and try a prev
+ puts "\tLog004.c: Open truncated log, attempt to access missing portion."
+ set env [berkdb_env -create -log -home $testdir]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ set ret [$logc get -first]
+ error_check_bad log_get [llength $ret] 0
+
+ # This should give DB_NOTFOUND which is a ret of length 0
+ catch {$logc get -prev} ret
+ error_check_good log_get_prev [string length $ret] 0
+
+ puts "\tLog004.d: Close log and environment."
+ error_check_good log_cursor_close [$logc close] 0
+ error_check_good log_close [$env close] 0
+}
diff --git a/libdb/test/log005.tcl b/libdb/test/log005.tcl
new file mode 100644
index 0000000..aacb9d2
--- /dev/null
+++ b/libdb/test/log005.tcl
@@ -0,0 +1,89 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST log005
+# TEST Check that log file sizes can change on the fly.
+proc log005 { } {
+ source ./include.tcl
+
+ puts "Log005: Check that log file sizes can change."
+ env_cleanup $testdir
+
+ # Open the environment, set and check the log file size.
+ puts "\tLog005.a: open, set and check the log file size."
+ set env [berkdb_env \
+ -create -home $testdir -log_buffer 10000 -log_max 1000000 -txn]
+ error_check_good envopen [is_valid_env $env] TRUE
+ set db [berkdb_open \
+ -env $env -create -mode 0644 -btree -auto_commit a.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Get the current log file maximum.
+ set max [log005_stat $env "Current log file size"]
+ error_check_good max_set $max 1000000
+
+ # Reset the log file size using a second open, and make sure
+ # it changes.
+ puts "\tLog005.b: reset during open, check the log file size."
+ set envtmp [berkdb_env -home $testdir -log_max 900000 -txn]
+ error_check_good envtmp_open [is_valid_env $envtmp] TRUE
+ error_check_good envtmp_close [$envtmp close] 0
+
+ set tmp [log005_stat $env "Current log file size"]
+ error_check_good max_changed 900000 $tmp
+
+ puts "\tLog005.c: fill in the current log file size."
+ # Fill in the current log file.
+ set new_lsn 0
+ set data [repeat "a" 1024]
+ for { set i 1 } \
+ { [log005_stat $env "Current log file number"] != 2 } \
+ { incr i } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set ret [$db put -txn $t $i $data]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+
+ set last_lsn $new_lsn
+ set new_lsn [log005_stat $env "Current log file offset"]
+ }
+
+ # The last LSN in the first file should be more than our new
+ # file size.
+ error_check_good "lsn check < 900000" [expr 900000 < $last_lsn] 1
+
+ # Close down the environment.
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+
+ puts "\tLog005.d: check the log file size is unchanged after recovery."
+ # Open again, running recovery. Verify the log file size is as we
+ # left it.
+ set env [berkdb_env -create -home $testdir -recover -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set tmp [log005_stat $env "Current log file size"]
+ error_check_good after_recovery 900000 $tmp
+
+ error_check_good env_close [$env close] 0
+}
+
+# log005_stat --
+# Return the current log statistics.
+proc log005_stat { env s } {
+ set stat [$env log_stat]
+ foreach statpair $stat {
+ set statmsg [lindex $statpair 0]
+ set statval [lindex $statpair 1]
+ if {[is_substr $statmsg $s] != 0} {
+ return $statval
+ }
+ }
+ puts "FAIL: log005: stat string $s not found"
+ return 0
+}
diff --git a/libdb/test/logtrack.list b/libdb/test/logtrack.list
new file mode 100644
index 0000000..631c7b3
--- /dev/null
+++ b/libdb/test/logtrack.list
@@ -0,0 +1,46 @@
+PREFIX __crdel
+BEGIN metasub 142
+PREFIX __db
+BEGIN addrem 41
+BEGIN big 43
+BEGIN ovref 44
+BEGIN relink 45
+BEGIN debug 47
+BEGIN noop 48
+BEGIN pg_alloc 49
+BEGIN pg_free 50
+BEGIN cksum 51
+PREFIX __dbreg
+BEGIN register 2
+PREFIX __bam
+BEGIN split 62
+BEGIN rsplit 63
+BEGIN adj 55
+BEGIN cadjust 56
+BEGIN cdel 57
+BEGIN repl 58
+BEGIN root 59
+BEGIN curadj 64
+BEGIN rcuradj 65
+PREFIX __ham
+BEGIN insdel 21
+BEGIN newpage 22
+BEGIN splitdata 24
+BEGIN replace 25
+BEGIN copypage 28
+BEGIN metagroup 29
+BEGIN groupalloc 32
+BEGIN curadj 33
+BEGIN chgpg 34
+PREFIX __qam
+BEGIN incfirst 84
+BEGIN mvptr 85
+BEGIN del 79
+BEGIN add 80
+BEGIN delext 83
+PREFIX __txn
+BEGIN regop 10
+BEGIN ckp 11
+BEGIN child 12
+BEGIN xa_regop 13
+BEGIN recycle 14
diff --git a/libdb/test/logtrack.tcl b/libdb/test/logtrack.tcl
new file mode 100644
index 0000000..314f9b3
--- /dev/null
+++ b/libdb/test/logtrack.tcl
@@ -0,0 +1,137 @@
+# See the file LICENSE for redistribution information
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# logtrack.tcl: A collection of routines, formerly implemented in Perl
+# as log.pl, to track which log record types the test suite hits.
+
+set ltsname "logtrack_seen.db"
+set ltlist $test_path/logtrack.list
+set tmpname "logtrack_tmp"
+
+proc logtrack_clean { } {
+ global ltsname
+
+ file delete -force $ltsname
+
+ return
+}
+
+proc logtrack_init { } {
+ global ltsname
+
+ logtrack_clean
+
+ # Create an empty tracking database.
+ [berkdb_open -create -truncate -btree $ltsname] close
+
+ return
+}
+
+# Dump the logs for directory dirname and record which log
+# records were seen.
+proc logtrack_read { dirname } {
+ global ltsname tmpname util_path
+ global encrypt passwd
+
+ set seendb [berkdb_open $ltsname]
+ error_check_good seendb_open [is_valid_db $seendb] TRUE
+
+ file delete -force $tmpname
+ set pargs " -N -h $dirname "
+ if { $encrypt > 0 } {
+ append pargs " -P $passwd "
+ }
+ set ret [catch {eval exec $util_path/db_printlog $pargs > $tmpname} res]
+ error_check_good printlog $ret 0
+ error_check_good tmpfile_exists [file exists $tmpname] 1
+
+ set f [open $tmpname r]
+ while { [gets $f record] >= 0 } {
+ set r [regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name]
+ if { $r == 1 } {
+ error_check_good seendb_put [$seendb put $name ""] 0
+ }
+ }
+ close $f
+ file delete -force $tmpname
+
+ error_check_good seendb_close [$seendb close] 0
+}
+
+# Print the log record types that were seen but should not have been
+# seen and the log record types that were not seen but should have been seen.
+proc logtrack_summary { } {
+ global ltsname ltlist testdir
+
+ set seendb [berkdb_open $ltsname]
+ error_check_good seendb_open [is_valid_db $seendb] TRUE
+ set existdb [berkdb_open -create -btree]
+ error_check_good existdb_open [is_valid_db $existdb] TRUE
+ set deprecdb [berkdb_open -create -btree]
+ error_check_good deprecdb_open [is_valid_db $deprecdb] TRUE
+
+ error_check_good ltlist_exists [file exists $ltlist] 1
+ set f [open $ltlist r]
+ set pref ""
+ while { [gets $f line] >= 0 } {
+ # Get the keyword, the first thing on the line:
+ # BEGIN/DEPRECATED/IGNORED/PREFIX
+ set keyword [lindex $line 0]
+
+ if { [string compare $keyword PREFIX] == 0 } {
+ # New prefix.
+ set pref [lindex $line 1]
+ } elseif { [string compare $keyword BEGIN] == 0 } {
+ # A log type we care about; put it on our list.
+
+ # Skip noop and debug.
+ if { [string compare [lindex $line 1] noop] == 0 } {
+ continue
+ }
+ if { [string compare [lindex $line 1] debug] == 0 } {
+ continue
+ }
+
+ error_check_good exist_put [$existdb put \
+ ${pref}_[lindex $line 1] ""] 0
+ } elseif { [string compare $keyword DEPRECATED] == 0 ||
+ [string compare $keyword IGNORED] == 0 } {
+ error_check_good deprec_put [$deprecdb put \
+ ${pref}_[lindex $line 1] ""] 0
+ }
+ }
+
+ error_check_good exist_curs \
+ [is_valid_cursor [set ec [$existdb cursor]] $existdb] TRUE
+ while { [llength [set dbt [$ec get -next]]] != 0 } {
+ set rec [lindex [lindex $dbt 0] 0]
+ if { [$seendb count $rec] == 0 } {
+ puts "FAIL: log record type $rec not seen"
+ }
+ }
+ error_check_good exist_curs_close [$ec close] 0
+
+ error_check_good seen_curs \
+ [is_valid_cursor [set sc [$existdb cursor]] $existdb] TRUE
+ while { [llength [set dbt [$sc get -next]]] != 0 } {
+ set rec [lindex [lindex $dbt 0] 0]
+ if { [$existdb count $rec] == 0 } {
+ if { [$deprecdb count $rec] == 0 } {
+ puts "FAIL: unknown log record type $rec seen"
+ } else {
+ puts "FAIL: deprecated log record type $rec seen"
+ }
+ }
+ }
+ error_check_good seen_curs_close [$sc close] 0
+
+ error_check_good seendb_close [$seendb close] 0
+ error_check_good existdb_close [$existdb close] 0
+ error_check_good deprecdb_close [$deprecdb close] 0
+
+ logtrack_clean
+}
diff --git a/libdb/test/mdbscript.tcl b/libdb/test/mdbscript.tcl
new file mode 100644
index 0000000..b610cb3
--- /dev/null
+++ b/libdb/test/mdbscript.tcl
@@ -0,0 +1,384 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Process script for the multi-process db tester.
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+global dbenv
+global klock
+global l_keys
+global procid
+global alphabet
+
+# In Tcl, when there are multiple catch handlers, *all* handlers
+# are called, so we have to resort to this hack.
+#
+global exception_handled
+
+set exception_handled 0
+
+set datastr $alphabet$alphabet
+
+# Usage: mdbscript dir file nentries iter procid procs seed
+# dir: DBHOME directory
+# file: db file on which to operate
+# nentries: number of entries taken from dictionary
+# iter: number of operations to run
+# procid: this processes' id number
+# procs: total number of processes running
+set usage "mdbscript method dir file nentries iter procid procs"
+
+# Verify usage
+if { $argc != 7 } {
+ puts "FAIL:[timestamp] test042: Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set method [lindex $argv 0]
+set dir [lindex $argv 1]
+set file [lindex $argv 2]
+set nentries [ lindex $argv 3 ]
+set iter [ lindex $argv 4 ]
+set procid [ lindex $argv 5 ]
+set procs [ lindex $argv 6 ]
+
+set pflags ""
+set gflags ""
+set txn ""
+
+set renum [is_rrecno $method]
+set omethod [convert_method $method]
+
+if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+}
+
+# Initialize seed
+global rand_init
+
+# We want repeatable results, but we also want each instance of mdbscript
+# to do something different. So we add the procid to the fixed seed.
+# (Note that this is a serial number given by the caller, not a pid.)
+berkdb srand [expr $rand_init + $procid]
+
+puts "Beginning execution for [pid] $method"
+puts "$dir db_home"
+puts "$file database"
+puts "$nentries data elements"
+puts "$iter iterations"
+puts "$procid process id"
+puts "$procs processes"
+
+set klock NOLOCK
+
+# Note: all I/O operations, and especially flush, are expensive
+# on Win2000 at least with Tcl version 8.3.2. So we'll avoid
+# flushes in the main part of the loop below.
+flush stdout
+
+set dbenv [berkdb_env -create -cdb -home $dir]
+#set dbenv [berkdb_env -create -cdb -log -home $dir]
+error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+set locker [ $dbenv lock_id ]
+
+set db [berkdb_open -env $dbenv -create -mode 0644 $omethod $file]
+error_check_good dbopen [is_valid_db $db] TRUE
+
+# Init globals (no data)
+set nkeys [db_init $db 0]
+puts "Initial number of keys: $nkeys"
+error_check_good db_init $nkeys $nentries
+tclsleep 5
+
+proc get_lock { k } {
+ global dbenv
+ global procid
+ global locker
+ global klock
+ global DB_LOCK_WRITE
+ global DB_LOCK_NOWAIT
+ global errorInfo
+ global exception_handled
+ # Make sure that the key isn't in the middle of
+ # a delete operation
+ if {[catch {$dbenv lock_get -nowait write $locker $k} klock] != 0 } {
+ set exception_handled 1
+
+ error_check_good \
+ get_lock [is_substr $errorInfo "DB_LOCK_NOTGRANTED"] 1
+ puts "Warning: key $k locked"
+ set klock NOLOCK
+ return 1
+ } else {
+ error_check_good get_lock [is_valid_lock $klock $dbenv] TRUE
+ }
+ return 0
+}
+
+# On each iteration we're going to randomly pick a key.
+# 1. We'll either get it (verifying that its contents are reasonable).
+# 2. Put it (using an overwrite to make the data be datastr:ID).
+# 3. Get it and do a put through the cursor, tacking our ID on to
+# 4. Get it, read forward some random number of keys.
+# 5. Get it, read forward some random number of keys and do a put (replace).
+# 6. Get it, read forward some random number of keys and do a del. And then
+# do a put of the key.
+set gets 0
+set getput 0
+set overwrite 0
+set seqread 0
+set seqput 0
+set seqdel 0
+set dlen [string length $datastr]
+
+for { set i 0 } { $i < $iter } { incr i } {
+ set op [berkdb random_int 0 5]
+ puts "iteration $i operation $op"
+ set close_cursor 0
+ if {[catch {
+ switch $op {
+ 0 {
+ incr gets
+ set k [rand_key $method $nkeys $renum $procs]
+ if {[is_record_based $method] == 1} {
+ set key $k
+ } else {
+ set key [lindex $l_keys $k]
+ }
+
+ if { [get_lock $key] == 1 } {
+ incr i -1
+ continue;
+ }
+
+ set rec [eval {$db get} $txn $gflags {$key}]
+ error_check_bad "$db get $key" [llength $rec] 0
+ set partial [string range \
+ [lindex [lindex $rec 0] 1] 0 [expr $dlen - 1]]
+ error_check_good \
+ "$db get $key" $partial [pad_data $method $datastr]
+ }
+ 1 {
+ incr overwrite
+ set k [rand_key $method $nkeys $renum $procs]
+ if {[is_record_based $method] == 1} {
+ set key $k
+ } else {
+ set key [lindex $l_keys $k]
+ }
+
+ set data $datastr:$procid
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $data]}]
+ error_check_good "$db put $key" $ret 0
+ }
+ 2 {
+ incr getput
+ set dbc [$db cursor -update]
+ error_check_good "$db cursor" \
+ [is_valid_cursor $dbc $db] TRUE
+ set close_cursor 1
+ set k [rand_key $method $nkeys $renum $procs]
+ if {[is_record_based $method] == 1} {
+ set key $k
+ } else {
+ set key [lindex $l_keys $k]
+ }
+
+ if { [get_lock $key] == 1 } {
+ incr i -1
+ error_check_good "$dbc close" \
+ [$dbc close] 0
+ set close_cursor 0
+ continue;
+ }
+
+ set ret [$dbc get -set $key]
+ error_check_good \
+ "$dbc get $key" [llength [lindex $ret 0]] 2
+ set rec [lindex [lindex $ret 0] 1]
+ set partial [string range $rec 0 [expr $dlen - 1]]
+ error_check_good \
+ "$dbc get $key" $partial [pad_data $method $datastr]
+ append rec ":$procid"
+ set ret [$dbc put \
+ -current [chop_data $method $rec]]
+ error_check_good "$dbc put $key" $ret 0
+ error_check_good "$dbc close" [$dbc close] 0
+ set close_cursor 0
+ }
+ 3 -
+ 4 -
+ 5 {
+ if { $op == 3 } {
+ set flags ""
+ } else {
+ set flags -update
+ }
+ set dbc [eval {$db cursor} $flags]
+ error_check_good "$db cursor" \
+ [is_valid_cursor $dbc $db] TRUE
+ set close_cursor 1
+ set k [rand_key $method $nkeys $renum $procs]
+ if {[is_record_based $method] == 1} {
+ set key $k
+ } else {
+ set key [lindex $l_keys $k]
+ }
+
+ if { [get_lock $key] == 1 } {
+ incr i -1
+ error_check_good "$dbc close" \
+ [$dbc close] 0
+ set close_cursor 0
+ continue;
+ }
+
+ set ret [$dbc get -set $key]
+ error_check_good \
+ "$dbc get $key" [llength [lindex $ret 0]] 2
+
+ # Now read a few keys sequentially
+ set nloop [berkdb random_int 0 10]
+ if { [berkdb random_int 0 1] == 0 } {
+ set flags -next
+ } else {
+ set flags -prev
+ }
+ while { $nloop > 0 } {
+ set lastret $ret
+ set ret [eval {$dbc get} $flags]
+ # Might read beginning/end of file
+ if { [llength $ret] == 0} {
+ set ret $lastret
+ break
+ }
+ incr nloop -1
+ }
+ switch $op {
+ 3 {
+ incr seqread
+ }
+ 4 {
+ incr seqput
+ set rec [lindex [lindex $ret 0] 1]
+ set partial [string range $rec 0 \
+ [expr $dlen - 1]]
+ error_check_good "$dbc get $key" \
+ $partial [pad_data $method $datastr]
+ append rec ":$procid"
+ set ret [$dbc put -current \
+ [chop_data $method $rec]]
+ error_check_good \
+ "$dbc put $key" $ret 0
+ }
+ 5 {
+ incr seqdel
+ set k [lindex [lindex $ret 0] 0]
+ # We need to lock the item we're
+ # deleting so that someone else can't
+ # try to do a get while we're
+ # deleting
+ error_check_good "$klock put" \
+ [$klock put] 0
+ set klock NOLOCK
+ set cur [$dbc get -current]
+ error_check_bad get_current \
+ [llength $cur] 0
+ set key [lindex [lindex $cur 0] 0]
+ if { [get_lock $key] == 1 } {
+ incr i -1
+ error_check_good "$dbc close" \
+ [$dbc close] 0
+ set close_cursor 0
+ continue
+ }
+ set ret [$dbc del]
+ error_check_good "$dbc del" $ret 0
+ set rec $datastr
+ append rec ":$procid"
+ if { $renum == 1 } {
+ set ret [$dbc put -before \
+ [chop_data $method $rec]]
+ error_check_good \
+ "$dbc put $k" $ret $k
+ } elseif { \
+ [is_record_based $method] == 1 } {
+ error_check_good "$dbc close" \
+ [$dbc close] 0
+ set close_cursor 0
+ set ret [$db put $k \
+ [chop_data $method $rec]]
+ error_check_good \
+ "$db put $k" $ret 0
+ } else {
+ set ret [$dbc put -keylast $k \
+ [chop_data $method $rec]]
+ error_check_good \
+ "$dbc put $k" $ret 0
+ }
+ }
+ }
+ if { $close_cursor == 1 } {
+ error_check_good \
+ "$dbc close" [$dbc close] 0
+ set close_cursor 0
+ }
+ }
+ }
+ } res] != 0} {
+ global errorInfo;
+ global exception_handled;
+
+ puts $errorInfo
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+
+ if { [string compare $klock NOLOCK] != 0 } {
+ catch {$klock put}
+ }
+ if {$close_cursor == 1} {
+ catch {$dbc close}
+ set close_cursor 0
+ }
+
+ if {[string first FAIL $theError] == 0 && \
+ $exception_handled != 1} {
+ flush stdout
+ error "FAIL:[timestamp] test042: key $k: $theError"
+ }
+ set exception_handled 0
+ } else {
+ if { [string compare $klock NOLOCK] != 0 } {
+ error_check_good "$klock put" [$klock put] 0
+ set klock NOLOCK
+ }
+ }
+}
+
+error_check_good db_close_catch [catch {$db close} ret] 0
+error_check_good db_close $ret 0
+error_check_good dbenv_close [$dbenv close] 0
+
+flush stdout
+exit
+
+puts "[timestamp] [pid] Complete"
+puts "Successful ops: "
+puts "\t$gets gets"
+puts "\t$overwrite overwrites"
+puts "\t$getput getputs"
+puts "\t$seqread seqread"
+puts "\t$seqput seqput"
+puts "\t$seqdel seqdel"
+flush stdout
diff --git a/libdb/test/memp001.tcl b/libdb/test/memp001.tcl
new file mode 100644
index 0000000..baff0c6
--- /dev/null
+++ b/libdb/test/memp001.tcl
@@ -0,0 +1,199 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+
+# TEST memp001
+# TEST Randomly updates pages.
+proc memp001 { } {
+
+ memp001_body 1 ""
+ memp001_body 3 ""
+ memp001_body 1 -private
+ memp001_body 3 -private
+ memp001_body 1 "-system_mem -shm_key 1"
+ memp001_body 3 "-system_mem -shm_key 1"
+
+}
+
+proc memp001_body { ncache flags } {
+ source ./include.tcl
+ global rand_init
+
+ set nfiles 5
+ set iter 500
+ set psize 512
+ set cachearg "-cachesize {0 400000 $ncache}"
+
+ puts \
+"Memp001: { $flags } random update $iter iterations on $nfiles files."
+ #
+ # Check if this platform supports this set of flags
+ #
+ if { [mem_chk $flags] == 1 } {
+ return
+ }
+
+ env_cleanup $testdir
+ puts "\tMemp001.a: Create env with $ncache caches"
+ set env [eval {berkdb_env -create -mode 0644} \
+ $cachearg {-home $testdir} $flags]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ #
+ # Do a simple mpool_stat call to verify the number of caches
+ # just to exercise the stat code.
+ set stat [$env mpool_stat]
+ set str "Number of caches"
+ set checked 0
+ foreach statpair $stat {
+ if { $checked == 1 } {
+ break
+ }
+ if { [is_substr [lindex $statpair 0] $str] != 0} {
+ set checked 1
+ error_check_good ncache [lindex $statpair 1] $ncache
+ }
+ }
+ error_check_good checked $checked 1
+
+ # Open N memp files
+ puts "\tMemp001.b: Create $nfiles mpool files"
+ for {set i 1} {$i <= $nfiles} {incr i} {
+ set fname "data_file.$i"
+ file_create $testdir/$fname 50 $psize
+
+ set mpools($i) \
+ [$env mpool -create -pagesize $psize -mode 0644 $fname]
+ error_check_good mp_open [is_substr $mpools($i) $env.mp] 1
+ }
+
+ # Now, loop, picking files at random
+ berkdb srand $rand_init
+ puts "\tMemp001.c: Random page replacement loop"
+ for {set i 0} {$i < $iter} {incr i} {
+ set mpool $mpools([berkdb random_int 1 $nfiles])
+ set p(1) [get_range $mpool 10]
+ set p(2) [get_range $mpool 10]
+ set p(3) [get_range $mpool 10]
+ set p(1) [replace $mpool $p(1)]
+ set p(3) [replace $mpool $p(3)]
+ set p(4) [get_range $mpool 20]
+ set p(4) [replace $mpool $p(4)]
+ set p(5) [get_range $mpool 10]
+ set p(6) [get_range $mpool 20]
+ set p(7) [get_range $mpool 10]
+ set p(8) [get_range $mpool 20]
+ set p(5) [replace $mpool $p(5)]
+ set p(6) [replace $mpool $p(6)]
+ set p(9) [get_range $mpool 40]
+ set p(9) [replace $mpool $p(9)]
+ set p(10) [get_range $mpool 40]
+ set p(7) [replace $mpool $p(7)]
+ set p(8) [replace $mpool $p(8)]
+ set p(9) [replace $mpool $p(9)]
+ set p(10) [replace $mpool $p(10)]
+ #
+ # We now need to put all the pages we have here or
+ # else they end up pinned.
+ #
+ for {set x 1} { $x <= 10} {incr x} {
+ error_check_good pgput [$p($x) put] 0
+ }
+ }
+
+ # Close N memp files, close the environment.
+ puts "\tMemp001.d: Close mpools"
+ for {set i 1} {$i <= $nfiles} {incr i} {
+ error_check_good memp_close:$mpools($i) [$mpools($i) close] 0
+ }
+ error_check_good envclose [$env close] 0
+
+ for {set i 1} {$i <= $nfiles} {incr i} {
+ fileremove -f $testdir/data_file.$i
+ }
+}
+
+proc file_create { fname nblocks blocksize } {
+ set fid [open $fname w]
+ for {set i 0} {$i < $nblocks} {incr i} {
+ seek $fid [expr $i * $blocksize] start
+ puts -nonewline $fid $i
+ }
+ seek $fid [expr $nblocks * $blocksize - 1]
+
+ # We don't end the file with a newline, because some platforms (like
+ # Windows) emit CR/NL. There does not appear to be a BINARY open flag
+ # that prevents this.
+ puts -nonewline $fid "Z"
+ close $fid
+
+ # Make sure it worked
+ if { [file size $fname] != $nblocks * $blocksize } {
+ error "FAIL: file_create could not create correct file size"
+ }
+}
+
+proc get_range { mpool max } {
+ set pno [berkdb random_int 0 $max]
+ set p [$mpool get $pno]
+ error_check_good page [is_valid_page $p $mpool] TRUE
+ set got [$p pgnum]
+ if { $got != $pno } {
+ puts "Get_range: Page mismatch page |$pno| val |$got|"
+ }
+ set ret [$p init "Page is pinned by [pid]"]
+ error_check_good page_init $ret 0
+
+ return $p
+}
+
+proc replace { mpool p } {
+ set pgno [$p pgnum]
+
+ set ret [$p init "Page is unpinned by [pid]"]
+ error_check_good page_init $ret 0
+
+ set ret [$p put -dirty]
+ error_check_good page_put $ret 0
+
+ set p2 [$mpool get $pgno]
+ error_check_good page [is_valid_page $p2 $mpool] TRUE
+
+ return $p2
+}
+
+proc mem_chk { flags } {
+ source ./include.tcl
+ global errorCode
+
+ # Open the memp with region init specified
+ env_cleanup $testdir
+
+ set cachearg " -cachesize {0 400000 3}"
+ set ret [catch {eval {berkdb_env -create -mode 0644}\
+ $cachearg {-region_init -home $testdir} $flags} env]
+ if { $ret != 0 } {
+ # If the env open failed, it may be because we're on a platform
+ # such as HP-UX 10 that won't support mutexes in shmget memory.
+ # Or QNX, which doesn't support system memory at all.
+ # Verify that the return value was EINVAL or EOPNOTSUPP
+ # and bail gracefully.
+ error_check_good is_shm_test [is_substr $flags -system_mem] 1
+ error_check_good returned_error [expr \
+ [is_substr $errorCode EINVAL] || \
+ [is_substr $errorCode EOPNOTSUPP]] 1
+ puts "Warning:\
+ platform does not support mutexes in shmget memory."
+ puts "Skipping shared memory mpool test."
+ return 1
+ }
+ error_check_good env_open [is_valid_env $env] TRUE
+ error_check_good env_close [$env close] 0
+ env_cleanup $testdir
+
+ return 0
+}
diff --git a/libdb/test/memp002.tcl b/libdb/test/memp002.tcl
new file mode 100644
index 0000000..a59c461
--- /dev/null
+++ b/libdb/test/memp002.tcl
@@ -0,0 +1,62 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+
+# TEST memp002
+# TEST Tests multiple processes accessing and modifying the same files.
+proc memp002 { } {
+ #
+ # Multiple processes not supported by private memory so don't
+ # run memp002_body with -private.
+ #
+ memp002_body ""
+ memp002_body "-system_mem -shm_key 1"
+}
+
+proc memp002_body { flags } {
+ source ./include.tcl
+
+ puts "Memp002: {$flags} Multiprocess mpool tester"
+
+ set procs 4
+ set psizes "512 1024 2048 4096 8192"
+ set iterations 500
+ set npages 100
+
+ # Check if this combination of flags is supported by this arch.
+ if { [mem_chk $flags] == 1 } {
+ return
+ }
+
+ set iter [expr $iterations / $procs]
+
+ # Clean up old stuff and create new.
+ env_cleanup $testdir
+
+ for { set i 0 } { $i < [llength $psizes] } { incr i } {
+ fileremove -f $testdir/file$i
+ }
+ set e [eval {berkdb_env -create -lock -home $testdir} $flags]
+ error_check_good dbenv [is_valid_env $e] TRUE
+
+ set pidlist {}
+ for { set i 0 } { $i < $procs } {incr i} {
+
+ puts "$tclsh_path\
+ $test_path/mpoolscript.tcl $testdir $i $procs \
+ $iter $psizes $npages 3 $flags > \
+ $testdir/memp002.$i.out &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ mpoolscript.tcl $testdir/memp002.$i.out $testdir $i $procs \
+ $iter $psizes $npages 3 $flags &]
+ lappend pidlist $p
+ }
+ puts "Memp002: $procs independent processes now running"
+ watch_procs $pidlist
+
+ reset_env $e
+}
diff --git a/libdb/test/memp003.tcl b/libdb/test/memp003.tcl
new file mode 100644
index 0000000..d9e8cb7
--- /dev/null
+++ b/libdb/test/memp003.tcl
@@ -0,0 +1,153 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+
+# TEST memp003
+# TEST Test reader-only/writer process combinations; we use the access methods
+# TEST for testing.
+proc memp003 { } {
+ #
+ # Multiple processes not supported by private memory so don't
+ # run memp003_body with -private.
+ #
+ memp003_body ""
+ memp003_body "-system_mem -shm_key 1"
+}
+
+proc memp003_body { flags } {
+ global alphabet
+ source ./include.tcl
+
+ puts "Memp003: {$flags} Reader/Writer tests"
+
+ if { [mem_chk $flags] == 1 } {
+ return
+ }
+
+ env_cleanup $testdir
+ set psize 1024
+ set nentries 500
+ set testfile mpool.db
+ set t1 $testdir/t1
+
+ # Create an environment that the two processes can share, with
+ # 20 pages per cache.
+ set c [list 0 [expr $psize * 20 * 3] 3]
+ set dbenv [eval {berkdb_env \
+ -create -lock -home $testdir -cachesize $c} $flags]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # First open and create the file.
+ set db [berkdb_open -env $dbenv -create -truncate \
+ -mode 0644 -pagesize $psize -btree $testfile]
+ error_check_good dbopen/RW [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set txn ""
+ set count 0
+
+ puts "\tMemp003.a: create database"
+ set keys ""
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ lappend keys $str
+
+ set ret [eval {$db put} $txn {$str $str}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn {$str}]
+ error_check_good get $ret [list [list $str $str]]
+
+ incr count
+ }
+ close $did
+ error_check_good close [$db close] 0
+
+ # Now open the file for read-only
+ set db [berkdb_open -env $dbenv -rdonly $testfile]
+ error_check_good dbopen/RO [is_substr $db db] 1
+
+ puts "\tMemp003.b: verify a few keys"
+ # Read and verify a couple of keys; saving them to check later
+ set testset ""
+ for { set i 0 } { $i < 10 } { incr i } {
+ set ndx [berkdb random_int 0 [expr $nentries - 1]]
+ set key [lindex $keys $ndx]
+ if { [lsearch $testset $key] != -1 } {
+ incr i -1
+ continue;
+ }
+
+ # The remote process stuff is unhappy with
+ # zero-length keys; make sure we don't pick one.
+ if { [llength $key] == 0 } {
+ incr i -1
+ continue
+ }
+
+ lappend testset $key
+
+ set ret [eval {$db get} $txn {$key}]
+ error_check_good get/RO $ret [list [list $key $key]]
+ }
+
+ puts "\tMemp003.c: retrieve and modify keys in remote process"
+ # Now open remote process where we will open the file RW
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+ puts $f1 "flush stdout"
+ flush $f1
+
+ set c [concat "{" [list 0 [expr $psize * 20 * 3] 3] "}" ]
+ set remote_env [send_cmd $f1 \
+ "berkdb_env -create -lock -home $testdir -cachesize $c $flags"]
+ error_check_good remote_dbenv [is_valid_env $remote_env] TRUE
+
+ set remote_db [send_cmd $f1 "berkdb_open -env $remote_env $testfile"]
+ error_check_good remote_dbopen [is_valid_db $remote_db] TRUE
+
+ foreach k $testset {
+ # Get the key
+ set ret [send_cmd $f1 "$remote_db get $k"]
+ error_check_good remote_get $ret [list [list $k $k]]
+
+ # Now replace the key
+ set ret [send_cmd $f1 "$remote_db put $k $k$k"]
+ error_check_good remote_put $ret 0
+ }
+
+ puts "\tMemp003.d: verify changes in local process"
+ foreach k $testset {
+ set ret [eval {$db get} $txn {$key}]
+ error_check_good get_verify/RO $ret [list [list $key $key$key]]
+ }
+
+ puts "\tMemp003.e: Fill up the cache with dirty buffers"
+ foreach k $testset {
+ # Now rewrite the keys with BIG data
+ set data [replicate $alphabet 32]
+ set ret [send_cmd $f1 "$remote_db put $k $data"]
+ error_check_good remote_put $ret 0
+ }
+
+ puts "\tMemp003.f: Get more pages for the read-only file"
+ dump_file $db $txn $t1 nop
+
+ puts "\tMemp003.g: Sync from the read-only file"
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_close [$db close] 0
+
+ set ret [send_cmd $f1 "$remote_db close"]
+ error_check_good remote_get $ret 0
+
+ # Close the environment both remotely and locally.
+ set ret [send_cmd $f1 "$remote_env close"]
+ error_check_good remote:env_close $ret 0
+ close $f1
+
+ reset_env $dbenv
+}
diff --git a/libdb/test/mpoolscript.tcl b/libdb/test/mpoolscript.tcl
new file mode 100644
index 0000000..423d023
--- /dev/null
+++ b/libdb/test/mpoolscript.tcl
@@ -0,0 +1,171 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Random multiple process mpool tester.
+# Usage: mpoolscript dir id numiters numfiles numpages sleepint
+# dir: lock directory.
+# id: Unique identifier for this process.
+# maxprocs: Number of procs in this test.
+# numiters: Total number of iterations.
+# pgsizes: Pagesizes for the different files. Length of this item indicates
+# how many files to use.
+# numpages: Number of pages per file.
+# sleepint: Maximum sleep interval.
+# flags: Flags for env open
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage \
+ "mpoolscript dir id maxprocs numiters pgsizes numpages sleepint flags"
+
+# Verify usage
+if { $argc != 8 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ puts $argc
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set id [lindex $argv 1]
+set maxprocs [lindex $argv 2]
+set numiters [ lindex $argv 3 ]
+set pgsizes [ lindex $argv 4 ]
+set numpages [ lindex $argv 5 ]
+set sleepint [ lindex $argv 6 ]
+set flags [ lindex $argv 7]
+
+# Initialize seed
+global rand_init
+berkdb srand $rand_init
+
+# Give time for all processes to start up.
+tclsleep 10
+
+puts -nonewline "Beginning execution for $id: $maxprocs $dir $numiters"
+puts " $pgsizes $numpages $sleepint"
+flush stdout
+
+# Figure out how small/large to make the cache
+set max 0
+foreach i $pgsizes {
+ if { $i > $max } {
+ set max $i
+ }
+}
+
+set cache [list 0 [expr $maxprocs * ([lindex $pgsizes 0] + $max)] 1]
+set env_cmd {berkdb_env -lock -cachesize $cache -home $dir}
+set e [eval $env_cmd $flags]
+error_check_good env_open [is_valid_env $e] TRUE
+
+# Now open files
+set mpools {}
+set nfiles 0
+foreach psize $pgsizes {
+ set mp [$e mpool -create -mode 0644 -pagesize $psize file$nfiles]
+ error_check_good memp_fopen:$nfiles [is_valid_mpool $mp $e] TRUE
+ lappend mpools $mp
+ incr nfiles
+}
+
+puts "Establishing long-term pin on file 0 page $id for process $id"
+
+# Set up the long-pin page
+set locker [$e lock_id]
+set lock [$e lock_get write $locker 0:$id]
+error_check_good lock_get [is_valid_lock $lock $e] TRUE
+
+set mp [lindex $mpools 0]
+set master_page [$mp get -create $id]
+error_check_good mp_get:$master_page [is_valid_page $master_page $mp] TRUE
+
+set r [$master_page init MASTER$id]
+error_check_good page_init $r 0
+
+# Release the lock but keep the page pinned
+set r [$lock put]
+error_check_good lock_put $r 0
+
+# Main loop. On each iteration, we'll check every page in each of
+# of the files. On any file, if we see the appropriate tag in the
+# field, we'll rewrite the page, else we won't. Keep track of
+# how many pages we actually process.
+set pages 0
+for { set iter 0 } { $iter < $numiters } { incr iter } {
+ puts "[timestamp]: iteration $iter, $pages pages set so far"
+ flush stdout
+ for { set fnum 1 } { $fnum < $nfiles } { incr fnum } {
+ if { [expr $fnum % 2 ] == 0 } {
+ set pred [expr ($id + $maxprocs - 1) % $maxprocs]
+ } else {
+ set pred [expr ($id + $maxprocs + 1) % $maxprocs]
+ }
+
+ set mpf [lindex $mpools $fnum]
+ for { set p 0 } { $p < $numpages } { incr p } {
+ set lock [$e lock_get write $locker $fnum:$p]
+ error_check_good lock_get:$fnum:$p \
+ [is_valid_lock $lock $e] TRUE
+
+ # Now, get the page
+ set pp [$mpf get -create $p]
+ error_check_good page_get:$fnum:$p \
+ [is_valid_page $pp $mpf] TRUE
+
+ if { [$pp is_setto $pred] == 0 || [$pp is_setto 0] == 0 } {
+ # Set page to self.
+ set r [$pp init $id]
+ error_check_good page_init:$fnum:$p $r 0
+ incr pages
+ set r [$pp put -dirty]
+ error_check_good page_put:$fnum:$p $r 0
+ } else {
+ error_check_good page_put:$fnum:$p [$pp put] 0
+ }
+ error_check_good lock_put:$fnum:$p [$lock put] 0
+ }
+ }
+ tclsleep [berkdb random_int 1 $sleepint]
+}
+
+# Now verify your master page, release its pin, then verify everyone else's
+puts "$id: End of run verification of master page"
+set r [$master_page is_setto MASTER$id]
+error_check_good page_check $r 1
+set r [$master_page put -dirty]
+error_check_good page_put $r 0
+
+set i [expr ($id + 1) % $maxprocs]
+set mpf [lindex $mpools 0]
+
+while { $i != $id } {
+ set p [$mpf get -create $i]
+ error_check_good mp_get [is_valid_page $p $mpf] TRUE
+
+ if { [$p is_setto MASTER$i] != 1 } {
+ puts "Warning: Master page $i not set."
+ }
+ error_check_good page_put:$p [$p put] 0
+
+ set i [expr ($i + 1) % $maxprocs]
+}
+
+# Close files
+foreach i $mpools {
+ set r [$i close]
+ error_check_good mpf_close $r 0
+}
+
+# Close environment system
+set r [$e close]
+error_check_good env_close $r 0
+
+puts "[timestamp] $id Complete"
+flush stdout
diff --git a/libdb/test/mutex001.tcl b/libdb/test/mutex001.tcl
new file mode 100644
index 0000000..e632403
--- /dev/null
+++ b/libdb/test/mutex001.tcl
@@ -0,0 +1,51 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+
+# TEST mutex001
+# TEST Test basic mutex functionality
+proc mutex001 { } {
+ source ./include.tcl
+
+ puts "Mutex001: Basic functionality"
+ env_cleanup $testdir
+ set nlocks 20
+
+ # Test open w/out create; should fail
+ error_check_bad \
+ env_open [catch {berkdb_env -lock -home $testdir} env] 0
+
+ puts "\tMutex001.a: Create lock env"
+ # Now open for real
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ puts "\tMutex001.b: Create $nlocks mutexes"
+ set m [$env mutex 0644 $nlocks]
+ error_check_good mutex_init [is_valid_mutex $m $env] TRUE
+
+ # Get, set each mutex; sleep, then get Release
+ puts "\tMutex001.c: Get/set loop"
+ for { set i 0 } { $i < $nlocks } { incr i } {
+ set r [$m get $i ]
+ error_check_good mutex_get $r 0
+
+ set r [$m setval $i $i]
+ error_check_good mutex_setval $r 0
+ }
+ tclsleep 5
+ for { set i 0 } { $i < $nlocks } { incr i } {
+ set r [$m getval $i]
+ error_check_good mutex_getval $r $i
+
+ set r [$m release $i ]
+ error_check_good mutex_get $r 0
+ }
+
+ error_check_good mutex_close [$m close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/libdb/test/mutex002.tcl b/libdb/test/mutex002.tcl
new file mode 100644
index 0000000..a8c808f
--- /dev/null
+++ b/libdb/test/mutex002.tcl
@@ -0,0 +1,94 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+
+# TEST mutex002
+# TEST Test basic mutex synchronization
+proc mutex002 { } {
+ source ./include.tcl
+
+ puts "Mutex002: Basic synchronization"
+ env_cleanup $testdir
+ set nlocks 20
+
+ # Fork off child before we open any files.
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+ flush $f1
+
+ # Open the environment and the mutex locally
+ puts "\tMutex002.a: Open local and remote env"
+ set local_env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good env_open [is_valid_env $local_env] TRUE
+
+ set local_mutex [$local_env mutex 0644 $nlocks]
+ error_check_good \
+ mutex_init [is_valid_mutex $local_mutex $local_env] TRUE
+
+ # Open the environment and the mutex remotely
+ set remote_env [send_cmd $f1 "berkdb_env -lock -home $testdir"]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ set remote_mutex [send_cmd $f1 "$remote_env mutex 0644 $nlocks"]
+ error_check_good \
+ mutex_init [is_valid_mutex $remote_mutex $remote_env] TRUE
+
+ # Do a get here, then set the value to be pid.
+ # On the remote side fire off a get and getval.
+ puts "\tMutex002.b: Local and remote get/set"
+ set r [$local_mutex get 1]
+ error_check_good lock_get $r 0
+
+ set r [$local_mutex setval 1 [pid]]
+ error_check_good lock_get $r 0
+
+ # Now have the remote side request the lock and check its
+ # value. Then wait 5 seconds, release the mutex and see
+ # what the remote side returned.
+ send_timed_cmd $f1 1 "$remote_mutex get 1"
+ send_timed_cmd $f1 1 "set ret \[$remote_mutex getval 1\]"
+
+ # Now sleep before resetting and releasing lock
+ tclsleep 5
+ set newv [expr [pid] - 1]
+ set r [$local_mutex setval 1 $newv]
+ error_check_good mutex_setval $r 0
+
+ set r [$local_mutex release 1]
+ error_check_good mutex_release $r 0
+
+ # Now get the result from the other script
+ # Timestamp
+ set result [rcv_result $f1]
+ error_check_good lock_get:remote_time [expr $result > 4] 1
+
+ # Timestamp
+ set result [rcv_result $f1]
+
+ # Mutex value
+ set result [send_cmd $f1 "puts \$ret"]
+ error_check_good lock_get:remote_getval $result $newv
+
+ # Close down the remote
+ puts "\tMutex002.c: Close remote"
+ set ret [send_cmd $f1 "$remote_mutex close" 5]
+ # Not sure why we need this, but we do... an extra blank line
+ # someone gets output somewhere
+ gets $f1 ret
+ error_check_good remote:mutex_close $ret 0
+
+ set ret [send_cmd $f1 "$remote_env close"]
+ error_check_good remote:env_close $ret 0
+
+ catch { close $f1 } result
+
+ set ret [$local_mutex close]
+ error_check_good local:mutex_close $ret 0
+
+ set ret [$local_env close]
+ error_check_good local:env_close $ret 0
+}
diff --git a/libdb/test/mutex003.tcl b/libdb/test/mutex003.tcl
new file mode 100644
index 0000000..0d42b39
--- /dev/null
+++ b/libdb/test/mutex003.tcl
@@ -0,0 +1,52 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+
+# TEST mutex003
+# TEST Generate a bunch of parallel testers that try to randomly obtain locks.
+proc mutex003 { } {
+ source ./include.tcl
+
+ set nmutex 20
+ set iter 500
+ set procs 5
+ set mdegree 3
+ set wait 2
+ puts "Mutex003: Multi-process random mutex test"
+
+ env_cleanup $testdir
+
+ puts "\tMutex003.a: Create environment"
+ # Now open the region we'll use for multiprocess testing.
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set mutex [$env mutex 0644 $nmutex]
+ error_check_good mutex_init [is_valid_mutex $mutex $env] TRUE
+
+ error_check_good mutex_close [$mutex close] 0
+
+ # Now spawn off processes
+ puts "\tMutex003.b: Create $procs processes"
+ set pidlist {}
+ for { set i 0 } {$i < $procs} {incr i} {
+ puts "$tclsh_path\
+ $test_path/mutexscript.tcl $testdir\
+ $iter $nmutex $wait $mdegree > $testdir/$i.mutexout &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ mutexscript.tcl $testdir/$i.mutexout $testdir\
+ $iter $nmutex $wait $mdegree &]
+ lappend pidlist $p
+ }
+ puts "\tMutex003.c: $procs independent processes now running"
+ watch_procs $pidlist
+ error_check_good env_close [$env close] 0
+ # Remove output files
+ for { set i 0 } {$i < $procs} {incr i} {
+ fileremove -f $testdir/$i.mutexout
+ }
+}
diff --git a/libdb/test/mutexscript.tcl b/libdb/test/mutexscript.tcl
new file mode 100644
index 0000000..cb895ce
--- /dev/null
+++ b/libdb/test/mutexscript.tcl
@@ -0,0 +1,91 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Random mutex tester.
+# Usage: mutexscript dir numiters mlocks sleepint degree
+# dir: dir in which all the mutexes live.
+# numiters: Total number of iterations.
+# nmutex: Total number of mutexes.
+# sleepint: Maximum sleep interval.
+# degree: Maximum number of locks to acquire at once
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "mutexscript dir numiters nmutex sleepint degree"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set numiters [ lindex $argv 1 ]
+set nmutex [ lindex $argv 2 ]
+set sleepint [ lindex $argv 3 ]
+set degree [ lindex $argv 4 ]
+set locker [pid]
+set mypid [sanitized_pid]
+
+# Initialize seed
+global rand_init
+berkdb srand $rand_init
+
+puts -nonewline "Mutexscript: Beginning execution for $locker:"
+puts " $numiters $nmutex $sleepint $degree"
+flush stdout
+
+# Open the environment and the mutex
+set e [berkdb_env -create -mode 0644 -lock -home $dir]
+error_check_good evn_open [is_valid_env $e] TRUE
+
+set mutex [$e mutex 0644 $nmutex]
+error_check_good mutex_init [is_valid_mutex $mutex $e] TRUE
+
+# Sleep for awhile to make sure that everyone has gotten in
+tclsleep 5
+
+for { set iter 0 } { $iter < $numiters } { incr iter } {
+ set nlocks [berkdb random_int 1 $degree]
+ # We will always lock objects in ascending order to avoid
+ # deadlocks.
+ set lastobj 1
+ set mlist {}
+ for { set lnum 0 } { $lnum < $nlocks } { incr lnum } {
+ # Pick lock parameters
+ set obj [berkdb random_int $lastobj [expr $nmutex - 1]]
+ set lastobj [expr $obj + 1]
+ puts "[timestamp] $locker $lnum: $obj"
+
+ # Do get, set its val to own pid, and then add to list
+ error_check_good mutex_get:$obj [$mutex get $obj] 0
+ error_check_good mutex_setval:$obj [$mutex setval $obj $mypid] 0
+ lappend mlist $obj
+ if {$lastobj >= $nmutex} {
+ break
+ }
+ }
+
+ # Sleep for 10 to (100*$sleepint) ms.
+ after [berkdb random_int 10 [expr $sleepint * 100]]
+
+ # Now release locks
+ foreach i $mlist {
+ error_check_good mutex_getval:$i [$mutex getval $i] $mypid
+ error_check_good mutex_setval:$i \
+ [$mutex setval $i [expr 0 - $mypid]] 0
+ error_check_good mutex_release:$i [$mutex release $i] 0
+ }
+ puts "[timestamp] $locker released mutexes"
+ flush stdout
+}
+
+puts "[timestamp] $locker Complete"
+flush stdout
diff --git a/libdb/test/ndbm.tcl b/libdb/test/ndbm.tcl
new file mode 100644
index 0000000..6f86cb4
--- /dev/null
+++ b/libdb/test/ndbm.tcl
@@ -0,0 +1,144 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Historic NDBM interface test.
+# Use the first 1000 entries from the dictionary.
+# Insert each with self as key and data; retrieve each.
+# After all are entered, retrieve all; compare output to original.
+# Then reopen the file, re-retrieve everything.
+# Finally, delete everything.
+proc ndbm { { nentries 1000 } } {
+ source ./include.tcl
+
+ puts "NDBM interfaces test: $nentries"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/ndbmtest
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir NULL
+
+ set db [berkdb ndbm_open -create -truncate -mode 0644 $testfile]
+ error_check_good ndbm_open [is_substr $db ndbm] 1
+ set did [open $dict]
+
+ error_check_good rdonly_false [$db rdonly] 0
+
+ set flags 0
+ set txn 0
+ set count 0
+ set skippednullkey 0
+
+ puts "\tNDBM.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # NDBM can't handle zero-length keys
+ if { [string length $str] == 0 } {
+ set skippednullkey 1
+ continue
+ }
+
+ set ret [$db store $str $str insert]
+ error_check_good ndbm_store $ret 0
+
+ set d [$db fetch $str]
+ error_check_good ndbm_fetch $d $str
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tNDBM.b: dump file"
+ set oid [open $t1 w]
+ for { set key [$db firstkey] } { $key != -1 } {
+ set key [$db nextkey] } {
+ puts $oid $key
+ set d [$db fetch $key]
+ error_check_good ndbm_refetch $d $key
+ }
+
+ # If we had to skip a zero-length key, juggle things to cover up
+ # this fact in the dump.
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ incr nentries 1
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good NDBM:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # File descriptors tests won't work under Windows.
+ if { $is_windows_test != 1 } {
+ puts "\tNDBM.c: pagf/dirf test"
+ set fd [$db pagfno]
+ error_check_bad pagf $fd -1
+ set fd [$db dirfno]
+ error_check_bad dirf $fd -1
+ }
+
+ puts "\tNDBM.d: close, open, and dump file"
+
+ # Now, reopen the file and run the last test again.
+ error_check_good ndbm_close [$db close] 0
+ set db [berkdb ndbm_open -rdonly $testfile]
+ error_check_good ndbm_open2 [is_substr $db ndbm] 1
+ set oid [open $t1 w]
+
+ error_check_good rdonly_true [$db rdonly] "rdonly:not owner"
+
+ for { set key [$db firstkey] } { $key != -1 } {
+ set key [$db nextkey] } {
+ puts $oid $key
+ set d [$db fetch $key]
+ error_check_good ndbm_refetch2 $d $key
+ }
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ filesort $t1 $t3
+
+ error_check_good NDBM:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and delete each entry
+ puts "\tNDBM.e: sequential scan and delete"
+
+ error_check_good ndbm_close [$db close] 0
+ set db [berkdb ndbm_open $testfile]
+ error_check_good ndbm_open3 [is_substr $db ndbm] 1
+ set oid [open $t1 w]
+
+ for { set key [$db firstkey] } { $key != -1 } {
+ set key [$db nextkey] } {
+ puts $oid $key
+ set ret [$db delete $key]
+ error_check_good ndbm_delete $ret 0
+ }
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ filesort $t1 $t3
+
+ error_check_good NDBM:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+ error_check_good ndbm_close [$db close] 0
+}
diff --git a/libdb/test/parallel.tcl b/libdb/test/parallel.tcl
new file mode 100644
index 0000000..690d47f
--- /dev/null
+++ b/libdb/test/parallel.tcl
@@ -0,0 +1,295 @@
+# Code to load up the tests in to the Queue database
+# $Id$
+proc load_queue { file {dbdir RUNQUEUE} nitems } {
+
+ puts -nonewline "Loading run queue with $nitems items..."
+ flush stdout
+
+ set env [berkdb_env -create -lock -home $dbdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -env $env -create -truncate \
+ -mode 0644 -len 120 -queue queue.db} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set fid [open $file]
+
+ set count 0
+
+ while { [gets $fid str] != -1 } {
+ set testarr($count) $str
+ incr count
+ }
+
+ # Randomize array of tests.
+ set rseed [pid]
+ berkdb srand $rseed
+ puts -nonewline "randomizing..."
+ flush stdout
+ for { set i 0 } { $i < $count } { incr i } {
+ set j [berkdb random_int $i [expr $count - 1]]
+
+ set tmp $testarr($i)
+ set testarr($i) $testarr($j)
+ set testarr($j) $tmp
+ }
+
+ if { [string compare ALL $nitems] != 0 } {
+ set maxload $nitems
+ } else {
+ set maxload $count
+ }
+
+ puts "loading..."
+ flush stdout
+ for { set i 0 } { $i < $maxload } { incr i } {
+ set str $testarr($i)
+ set ret [eval {$db put -append $str} ]
+ error_check_good put:$db $ret [expr $i + 1]
+ }
+
+ puts "Loaded $maxload records (out of $count)."
+ close $fid
+ $db close
+ $env close
+}
+
+proc init_runqueue { {dbdir RUNQUEUE} nitems list} {
+
+ if { [file exists $dbdir] != 1 } {
+ file mkdir $dbdir
+ }
+ puts "Creating test list..."
+ $list -n
+ load_queue ALL.OUT $dbdir $nitems
+ file delete TEST.LIST
+ file rename ALL.OUT TEST.LIST
+# file delete ALL.OUT
+}
+
+proc run_parallel { nprocs {list run_all} {nitems ALL} } {
+ set basename ./PARALLEL_TESTDIR
+ set queuedir ./RUNQUEUE
+ source ./include.tcl
+
+ mkparalleldirs $nprocs $basename $queuedir
+
+ init_runqueue $queuedir $nitems $list
+
+ set basedir [pwd]
+ set pidlist {}
+ set queuedir ../../[string range $basedir \
+ [string last "/" $basedir] end]/$queuedir
+
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ fileremove -f ALL.OUT.$i
+ set ret [catch {
+ set p [exec $tclsh_path << \
+ "source $test_path/test.tcl;\
+ run_queue $i $basename.$i $queuedir $nitems" &]
+ lappend pidlist $p
+ set f [open $testdir/begin.$p w]
+ close $f
+ } res]
+ }
+ watch_procs $pidlist 300 360000
+
+ set failed 0
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ if { [check_failed_run ALL.OUT.$i] != 0 } {
+ set failed 1
+ puts "Regression tests failed in process $i."
+ }
+ }
+ if { $failed == 0 } {
+ puts "Regression tests succeeded."
+ }
+}
+
+proc run_queue { i rundir queuedir nitems } {
+ set builddir [pwd]
+ file delete $builddir/ALL.OUT.$i
+ cd $rundir
+
+ puts "Parallel run_queue process $i (pid [pid]) starting."
+
+ source ./include.tcl
+ global env
+
+ set dbenv [berkdb_env -create -lock -home $queuedir]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set db [eval {berkdb_open -env $dbenv \
+ -mode 0644 -len 120 -queue queue.db} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set dbc [eval $db cursor]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+
+ set count 0
+ set waitcnt 0
+
+ while { $waitcnt < 5 } {
+ set line [$db get -consume]
+ if { [ llength $line ] > 0 } {
+ set cmd [lindex [lindex $line 0] 1]
+ set num [lindex [lindex $line 0] 0]
+ set o [open $builddir/ALL.OUT.$i a]
+ puts $o "\nExecuting record $num ([timestamp -w]):\n"
+ set tdir "TESTDIR.$i"
+ regsub {TESTDIR} $cmd $tdir cmd
+ puts $o $cmd
+ close $o
+ if { [expr {$num % 10} == 0] } {
+ puts "Starting test $num of $nitems"
+ }
+ #puts "Process $i, record $num:\n$cmd"
+ set env(PURIFYOPTIONS) \
+ "-log-file=./test$num.%p -follow-child-processes -messages=first"
+ set env(PURECOVOPTIONS) \
+ "-counts-file=./cov.pcv -log-file=./cov.log -follow-child-processes"
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; $cmd" \
+ >>& $builddir/ALL.OUT.$i } res] {
+ set o [open $builddir/ALL.OUT.$i a]
+ puts $o "FAIL: '$cmd': $res"
+ close $o
+ }
+ env_cleanup $testdir
+ set o [open $builddir/ALL.OUT.$i a]
+ puts $o "\nEnding record $num ([timestamp])\n"
+ close $o
+ incr count
+ } else {
+ incr waitcnt
+ tclsleep 1
+ }
+ }
+
+ puts "Process $i: $count commands executed"
+
+ $dbc close
+ $db close
+ $dbenv close
+
+ #
+ # We need to put the pid file in the builddir's idea
+ # of testdir, not this child process' local testdir.
+ # Therefore source builddir's include.tcl to get its
+ # testdir.
+ # !!! This resets testdir, so don't do anything else
+ # local to the child after this.
+ source $builddir/include.tcl
+
+ set f [open $builddir/$testdir/end.[pid] w]
+ close $f
+}
+
+proc mkparalleldirs { nprocs basename queuedir } {
+ source ./include.tcl
+ set dir [pwd]
+
+ if { $is_windows_test != 1 } {
+ set EXE ""
+ } else {
+ set EXE ".exe"
+ }
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ set destdir $basename.$i
+ catch {file mkdir $destdir}
+ puts "Created $destdir"
+ if { $is_windows_test == 1 } {
+ catch {file mkdir $destdir/Debug}
+ catch {eval file copy \
+ [eval glob {$dir/Debug/*.dll}] $destdir/Debug}
+ }
+ catch {eval file copy \
+ [eval glob {$dir/{.libs,include.tcl}}] $destdir}
+ # catch {eval file copy $dir/$queuedir $destdir}
+ catch {eval file copy \
+ [eval glob {$dir/db_{checkpoint,deadlock}$EXE} \
+ {$dir/db_{dump,load,printlog,recover,stat,upgrade}$EXE} \
+ {$dir/db_{archive,verify}$EXE}] \
+ $destdir}
+
+ # Create modified copies of include.tcl in parallel
+ # directories so paths still work.
+
+ set infile [open ./include.tcl r]
+ set d [read $infile]
+ close $infile
+
+ regsub {test_path } $d {test_path ../} d
+ regsub {src_root } $d {src_root ../} d
+ set tdir "TESTDIR.$i"
+ regsub -all {TESTDIR} $d $tdir d
+ regsub {KILL \.} $d {KILL ..} d
+ set outfile [open $destdir/include.tcl w]
+ puts $outfile $d
+ close $outfile
+
+ global svc_list
+ foreach svc_exe $svc_list {
+ if { [file exists $dir/$svc_exe] } {
+ catch {eval file copy $dir/$svc_exe $destdir}
+ }
+ }
+ }
+}
+
+proc run_ptest { nprocs test args } {
+ global parms
+ set basename ./PARALLEL_TESTDIR
+ set queuedir NULL
+ source ./include.tcl
+
+ mkparalleldirs $nprocs $basename $queuedir
+
+ if { [info exists parms($test)] } {
+ foreach method \
+ "hash queue queueext recno rbtree frecno rrecno btree" {
+ if { [eval exec_ptest $nprocs $basename \
+ $test $method $args] != 0 } {
+ break
+ }
+ }
+ } else {
+ eval exec_ptest $nprocs $basename $test $args
+ }
+}
+
+proc exec_ptest { nprocs basename test args } {
+ source ./include.tcl
+
+ set basedir [pwd]
+ set pidlist {}
+ puts "Running $nprocs parallel runs of $test"
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ set outf ALL.OUT.$i
+ fileremove -f $outf
+ set ret [catch {
+ set p [exec $tclsh_path << \
+ "cd $basename.$i;\
+ source ../$test_path/test.tcl;\
+ $test $args" >& $outf &]
+ lappend pidlist $p
+ set f [open $testdir/begin.$p w]
+ close $f
+ } res]
+ }
+ watch_procs $pidlist 30 36000
+ set failed 0
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ if { [check_failed_run ALL.OUT.$i] != 0 } {
+ set failed 1
+ puts "Test $test failed in process $i."
+ }
+ }
+ if { $failed == 0 } {
+ puts "Test $test succeeded all processes"
+ return 0
+ } else {
+ puts "Test failed: stopping"
+ return 1
+ }
+}
diff --git a/libdb/test/recd001.tcl b/libdb/test/recd001.tcl
new file mode 100644
index 0000000..5c9e4a2
--- /dev/null
+++ b/libdb/test/recd001.tcl
@@ -0,0 +1,242 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST recd001
+# TEST Per-operation recovery tests for non-duplicate, non-split
+# TEST messages. Makes sure that we exercise redo, undo, and do-nothing
+# TEST condition. Any test that appears with the message (change state)
+# TEST indicates that we've already run the particular test, but we are
+# TEST running it again so that we can change the state of the data base
+# TEST to prepare for the next test (this applies to all other recovery
+# TEST tests as well).
+# TEST
+# TEST These are the most basic recovery tests. We do individual recovery
+# TEST tests for each operation in the access method interface. First we
+# TEST create a file and capture the state of the database (i.e., we copy
+# TEST it. Then we run a transaction containing a single operation. In
+# TEST one test, we abort the transaction and compare the outcome to the
+# TEST original copy of the file. In the second test, we restore the
+# TEST original copy of the database and then run recovery and compare
+# TEST this against the actual database.
+proc recd001 { method {select 0} args} {
+ global fixed_len
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd001: $method operation/transaction tests"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ # The recovery tests were originally written to
+ # do a command, abort, do it again, commit, and then
+ # repeat the sequence with another command. Each command
+ # tends to require that the previous command succeeded and
+ # left the database a certain way. To avoid cluttering up the
+ # op_recover interface as well as the test code, we create two
+ # databases; one does abort and then commit for each op, the
+ # other does prepare, prepare-abort, and prepare-commit for each
+ # op. If all goes well, this allows each command to depend
+ # exactly one successful iteration of the previous command.
+ set testfile recd001.db
+ set testfile2 recd001-2.db
+
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd001.a.0: creating environment"
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ #
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
+
+ # Convert the args again because fixed_len is now real.
+ # Create the databases and close the environment.
+ # cannot specify db truncate in txn protected env!!!
+ set opts [convert_args $method ""]
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv $opts $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ error_check_good env_close [$dbenv close] 0
+
+ puts "\tRecd001.a.1: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+
+ # List of recovery tests: {CMD MSG} pairs.
+ set rlist {
+ { {DB put -txn TXNID $key $data} "Recd001.b: put"}
+ { {DB del -txn TXNID $key} "Recd001.c: delete"}
+ { {DB put -txn TXNID $bigkey $data} "Recd001.d: big key put"}
+ { {DB del -txn TXNID $bigkey} "Recd001.e: big key delete"}
+ { {DB put -txn TXNID $key $bigdata} "Recd001.f: big data put"}
+ { {DB del -txn TXNID $key} "Recd001.g: big data delete"}
+ { {DB put -txn TXNID $key $data} "Recd001.h: put (change state)"}
+ { {DB put -txn TXNID $key $newdata} "Recd001.i: overwrite"}
+ { {DB put -txn TXNID -partial {$off $len} $key $partial_grow}
+ "Recd001.j: partial put growing"}
+ { {DB put -txn TXNID $key $newdata} "Recd001.k: overwrite (fix)"}
+ { {DB put -txn TXNID -partial {$off $len} $key $partial_shrink}
+ "Recd001.l: partial put shrinking"}
+ { {DB put -txn TXNID -append $data} "Recd001.m: put -append"}
+ { {DB get -txn TXNID -consume} "Recd001.n: db get -consume"}
+ }
+
+ # These are all the data values that we're going to need to read
+ # through the operation table and run the recovery tests.
+
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key recd001_key
+ }
+ set data recd001_data
+ set newdata NEWrecd001_dataNEW
+ set off 3
+ set len 12
+
+ set partial_grow replacement_record_grow
+ set partial_shrink xxx
+ if { [is_fixed_length $method] == 1 } {
+ set len [string length $partial_grow]
+ set partial_shrink $partial_grow
+ }
+ set bigdata [replicate $key $fixed_len]
+ if { [is_record_based $method] == 1 } {
+ set bigkey $fixed_len
+ } else {
+ set bigkey [replicate $key $fixed_len]
+ }
+
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+
+ if { [is_queue $method] != 1 } {
+ if { [string first append $cmd] != -1 } {
+ continue
+ }
+ if { [string first consume $cmd] != -1 } {
+ continue
+ }
+ }
+
+# if { [is_fixed_length $method] == 1 } {
+# if { [string first partial $cmd] != -1 } {
+# continue
+# }
+# }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ }
+ set fixed_len $orig_fixed_len
+
+ if { [is_fixed_length $method] == 1 } {
+ puts "Skipping remainder of test for fixed length methods"
+ return
+ }
+
+ #
+ # Check partial extensions. If we add a key/data to the database
+ # and then expand it using -partial, then recover, recovery was
+ # failing in #3944. Check that scenario here.
+ #
+ # !!!
+ # We loop here because on each iteration, we need to clean up
+ # the old env (i.e. this test does not depend on earlier runs).
+ # If we run it without cleaning up the env inbetween, we do not
+ # test the scenario of #3944.
+ #
+ set len [string length $data]
+ set len2 256
+ set part_data [replicate "abcdefgh" 32]
+ set p [list 0 $len]
+ set cmd [subst \
+ {DB put -txn TXNID -partial {$len $len2} $key $part_data}]
+ set msg "Recd001.o: partial put prepopulated/expanding"
+ foreach op {abort commit prepare-abort prepare-discard prepare-commit} {
+ env_cleanup $testdir
+
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+ set t [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $t $dbenv] TRUE
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -txn $t $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -txn $t $opts $testfile2"
+ set db2 [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db2] TRUE
+
+ set ret [$db put -txn $t -partial $p $key $data]
+ error_check_good dbput $ret 0
+
+ set ret [$db2 put -txn $t -partial $p $key $data]
+ error_check_good dbput $ret 0
+ error_check_good txncommit [$t commit] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good dbclose [$db2 close] 0
+ error_check_good dbenvclose [$dbenv close] 0
+
+ op_recover $op $testdir $env_cmd $testfile $cmd $msg
+ }
+ return
+}
diff --git a/libdb/test/recd002.tcl b/libdb/test/recd002.tcl
new file mode 100644
index 0000000..27bce81
--- /dev/null
+++ b/libdb/test/recd002.tcl
@@ -0,0 +1,103 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST recd002
+# TEST Split recovery tests. For every known split log message, makes sure
+# TEST that we exercise redo, undo, and do-nothing condition.
+proc recd002 { method {select 0} args} {
+ source ./include.tcl
+ global rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd002: skipping for specific pagesizes"
+ return
+ }
+ berkdb srand $rand_init
+
+ # Queues don't do splits, so we don't really need the small page
+ # size and the small page size is smaller than the record, so it's
+ # a problem.
+ if { [string compare $omethod "-queue"] == 0 } {
+ set pagesize 4096
+ } else {
+ set pagesize 512
+ }
+ puts "Recd002: $method split recovery tests"
+
+ env_cleanup $testdir
+ set testfile recd002.db
+ set testfile2 recd002-2.db
+ set eflags \
+ "-create -txn -lock_max 2000 -home $testdir"
+
+ puts "\tRecd002.a: creating environment"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the databases. We will use a small page size so that splits
+ # happen fairly quickly.
+ set oflags "-create $args $omethod -mode 0644 -env $dbenv\
+ -pagesize $pagesize $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ set oflags "-create $args $omethod -mode 0644 -env $dbenv\
+ -pagesize $pagesize $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ reset_env $dbenv
+
+ # List of recovery tests: {CMD MSG} pairs
+ set slist {
+ { {populate DB $omethod TXNID $n 0 0} "Recd002.b: splits"}
+ { {unpopulate DB TXNID $r} "Recd002.c: Remove keys"}
+ }
+
+ # If pages are 512 bytes, then adding 512 key/data pairs
+ # should be more than sufficient.
+ set n 512
+ set r [expr $n / 2 ]
+ foreach pair $slist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ }
+
+ puts "\tRecd002.d: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
diff --git a/libdb/test/recd003.tcl b/libdb/test/recd003.tcl
new file mode 100644
index 0000000..59e415f
--- /dev/null
+++ b/libdb/test/recd003.tcl
@@ -0,0 +1,119 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST recd003
+# TEST Duplicate recovery tests. For every known duplicate log message,
+# TEST makes sure that we exercise redo, undo, and do-nothing condition.
+# TEST
+# TEST Test all the duplicate log messages and recovery operations. We make
+# TEST sure that we exercise all possible recovery actions: redo, undo, undo
+# TEST but no fix necessary and redo but no fix necessary.
+proc recd003 { method {select 0} args } {
+ source ./include.tcl
+ global rand_init
+
+ set largs [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Recd003 skipping for method $method"
+ return
+ }
+ puts "Recd003: $method duplicate recovery tests"
+
+ berkdb srand $rand_init
+
+ env_cleanup $testdir
+ # See comment in recd001.tcl for why there are two database files...
+ set testfile recd003.db
+ set testfile2 recd003-2.db
+ set eflags "-create -txn -home $testdir"
+
+ puts "\tRecd003.a: creating environment"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the databases.
+ set oflags \
+ "-create $largs -mode 0644 $omethod -dup -env $dbenv $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ set oflags \
+ "-create $largs -mode 0644 $omethod -dup -env $dbenv $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ reset_env $dbenv
+
+ # These are all the data values that we're going to need to read
+ # through the operation table and run the recovery tests.
+ set n 10
+ set dupn 2000
+ set bign 500
+
+ # List of recovery tests: {CMD MSG} pairs
+ set dlist {
+ { {populate DB $omethod TXNID $n 1 0}
+ "Recd003.b: add dups"}
+ { {DB del -txn TXNID duplicate_key}
+ "Recd003.c: remove dups all at once"}
+ { {populate DB $omethod TXNID $n 1 0}
+ "Recd003.d: add dups (change state)"}
+ { {unpopulate DB TXNID 0}
+ "Recd003.e: remove dups 1 at a time"}
+ { {populate DB $omethod TXNID $dupn 1 0}
+ "Recd003.f: dup split"}
+ { {DB del -txn TXNID duplicate_key}
+ "Recd003.g: remove dups (change state)"}
+ { {populate DB $omethod TXNID $n 1 1}
+ "Recd003.h: add big dup"}
+ { {DB del -txn TXNID duplicate_key}
+ "Recd003.i: remove big dup all at once"}
+ { {populate DB $omethod TXNID $n 1 1}
+ "Recd003.j: add big dup (change state)"}
+ { {unpopulate DB TXNID 0}
+ "Recd003.k: remove big dup 1 at a time"}
+ { {populate DB $omethod TXNID $bign 1 1}
+ "Recd003.l: split big dup"}
+ }
+
+ foreach pair $dlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ }
+
+ puts "\tRecd003.m: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
diff --git a/libdb/test/recd004.tcl b/libdb/test/recd004.tcl
new file mode 100644
index 0000000..bafe9b2
--- /dev/null
+++ b/libdb/test/recd004.tcl
@@ -0,0 +1,95 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST recd004
+# TEST Big key test where big key gets elevated to internal page.
+proc recd004 { method {select 0} args} {
+ source ./include.tcl
+ global rand_init
+
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd004: skipping for specific pagesizes"
+ return
+ }
+ if { [is_record_based $method] == 1 } {
+ puts "Recd004 skipping for method $method"
+ return
+ }
+ puts "Recd004: $method big-key on internal page recovery tests"
+
+ berkdb srand $rand_init
+
+ env_cleanup $testdir
+ set testfile recd004.db
+ set testfile2 recd004-2.db
+ set eflags "-create -txn -home $testdir"
+ puts "\tRecd004.a: creating environment"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the databases. We will use a small page size so that we
+ # elevate quickly
+ set oflags "-create -mode 0644 \
+ $omethod -env $dbenv $opts -pagesize 512 $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ set oflags "-create -mode 0644 \
+ $omethod -env $dbenv $opts -pagesize 512 $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ reset_env $dbenv
+
+ # List of recovery tests: {CMD MSG} pairs
+ set slist {
+ { {big_populate DB TXNID $n} "Recd004.b: big key elevation"}
+ { {unpopulate DB TXNID 0} "Recd004.c: Remove keys"}
+ }
+
+ # If pages are 512 bytes, then adding 512 key/data pairs
+ # should be more than sufficient.
+ set n 512
+ foreach pair $slist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ }
+
+ puts "\tRecd004.d: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
diff --git a/libdb/test/recd005.tcl b/libdb/test/recd005.tcl
new file mode 100644
index 0000000..eab16df
--- /dev/null
+++ b/libdb/test/recd005.tcl
@@ -0,0 +1,230 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST recd005
+# TEST Verify reuse of file ids works on catastrophic recovery.
+# TEST
+# TEST Make sure that we can do catastrophic recovery even if we open
+# TEST files using the same log file id.
+proc recd005 { method args} {
+ source ./include.tcl
+ global rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd005: $method catastrophic recovery"
+
+ berkdb srand $rand_init
+
+ set testfile1 recd005.1.db
+ set testfile2 recd005.2.db
+ set eflags \
+ "-create -txn -lock_max 2000 -lock_max_objects 2000 -home $testdir"
+
+ set tnum 0
+ foreach sizes "{1000 10} {10 1000}" {
+ foreach ops "{abort abort} {abort commit} {commit abort} \
+ {commit commit}" {
+ env_cleanup $testdir
+ incr tnum
+
+ set s1 [lindex $sizes 0]
+ set s2 [lindex $sizes 1]
+ set op1 [lindex $ops 0]
+ set op2 [lindex $ops 1]
+ puts "\tRecd005.$tnum: $s1 $s2 $op1 $op2"
+
+ puts "\tRecd005.$tnum.a: creating environment"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the two databases.
+ set oflags \
+ "-create -mode 0644 -env $dbenv $args $omethod"
+ set db1 [eval {berkdb_open} $oflags $testfile1]
+ error_check_bad db_open $db1 NULL
+ error_check_good db_open [is_substr $db1 db] 1
+ error_check_good db_close [$db1 close] 0
+
+ set db2 [eval {berkdb_open} $oflags $testfile2]
+ error_check_bad db_open $db2 NULL
+ error_check_good db_open [is_substr $db2 db] 1
+ error_check_good db_close [$db2 close] 0
+ $dbenv close
+
+ set dbenv [eval $env_cmd]
+ puts "\tRecd005.$tnum.b: Populating databases"
+ do_one_file \
+ $testdir $method $dbenv $env_cmd $testfile1 $s1 $op1
+ do_one_file \
+ $testdir $method $dbenv $env_cmd $testfile2 $s2 $op2
+
+ puts "\tRecd005.$tnum.c: Verifying initial population"
+ check_file $testdir $env_cmd $testfile1 $op1
+ check_file $testdir $env_cmd $testfile2 $op2
+
+ # Now, close the environment (so that recovery will work
+ # on NT which won't allow delete of an open file).
+ reset_env $dbenv
+
+ berkdb debug_check
+ puts -nonewline \
+ "\tRecd005.$tnum.d: About to run recovery ... "
+ flush stdout
+
+ set stat [catch \
+ {exec $util_path/db_recover -h $testdir -c} \
+ result]
+ if { $stat == 1 } {
+ error "Recovery error: $result."
+ }
+ puts "complete"
+
+ # Substitute a file that will need recovery and try
+ # running recovery again.
+ if { $op1 == "abort" } {
+ file copy -force $testdir/$testfile1.afterop \
+ $testdir/$testfile1
+ move_file_extent $testdir $testfile1 \
+ afterop copy
+ } else {
+ file copy -force $testdir/$testfile1.init \
+ $testdir/$testfile1
+ move_file_extent $testdir $testfile1 init copy
+ }
+ if { $op2 == "abort" } {
+ file copy -force $testdir/$testfile2.afterop \
+ $testdir/$testfile2
+ move_file_extent $testdir $testfile2 \
+ afterop copy
+ } else {
+ file copy -force $testdir/$testfile2.init \
+ $testdir/$testfile2
+ move_file_extent $testdir $testfile2 init copy
+ }
+
+ berkdb debug_check
+ puts -nonewline "\tRecd005.$tnum.e:\
+ About to run recovery on pre-op database ... "
+ flush stdout
+
+ set stat \
+ [catch {exec $util_path/db_recover \
+ -h $testdir -c} result]
+ if { $stat == 1 } {
+ error "Recovery error: $result."
+ }
+ puts "complete"
+
+ set dbenv [eval $env_cmd]
+ check_file $testdir $env_cmd $testfile1 $op1
+ check_file $testdir $env_cmd $testfile2 $op2
+ reset_env $dbenv
+
+ puts "\tRecd005.$tnum.f:\
+ Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch \
+ {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+ }
+ }
+}
+
+proc do_one_file { dir method env env_cmd filename num op } {
+ source ./include.tcl
+
+ set init_file $dir/$filename.t1
+ set afterop_file $dir/$filename.t2
+ set final_file $dir/$filename.t3
+
+ # Save the initial file and open the environment and the first file
+ file copy -force $dir/$filename $dir/$filename.init
+ copy_extent_file $dir $filename init
+ set oflags "-auto_commit -unknown -env $env"
+ set db [eval {berkdb_open} $oflags $filename]
+
+ # Dump out file contents for initial case
+ open_and_dump_file $filename $env $init_file nop \
+ dump_file_direction "-first" "-next"
+
+ set txn [$env txn]
+ error_check_bad txn_begin $txn NULL
+ error_check_good txn_begin [is_substr $txn $env] 1
+
+ # Now fill in the db and the txnid in the command
+ populate $db $method $txn $num 0 0
+
+ # Sync the file so that we can capture a snapshot to test
+ # recovery.
+ error_check_good sync:$db [$db sync] 0
+ file copy -force $dir/$filename $dir/$filename.afterop
+ copy_extent_file $dir $filename afterop
+ open_and_dump_file $testdir/$filename.afterop NULL \
+ $afterop_file nop dump_file_direction "-first" "-next"
+ error_check_good txn_$op:$txn [$txn $op] 0
+
+ if { $op == "commit" } {
+ puts "\t\tFile $filename executed and committed."
+ } else {
+ puts "\t\tFile $filename executed and aborted."
+ }
+
+ # Dump out file and save a copy.
+ error_check_good sync:$db [$db sync] 0
+ open_and_dump_file $testdir/$filename NULL $final_file nop \
+ dump_file_direction "-first" "-next"
+ file copy -force $dir/$filename $dir/$filename.final
+ copy_extent_file $dir $filename final
+
+ # If this is an abort, it should match the original file.
+ # If this was a commit, then this file should match the
+ # afterop file.
+ if { $op == "abort" } {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ } else {
+ filesort $afterop_file $afterop_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(post-$op,pre-commit):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ }
+
+ error_check_good close:$db [$db close] 0
+}
+
+proc check_file { dir env_cmd filename op } {
+ source ./include.tcl
+
+ set init_file $dir/$filename.t1
+ set afterop_file $dir/$filename.t2
+ set final_file $dir/$filename.t3
+
+ open_and_dump_file $testdir/$filename NULL $final_file nop \
+ dump_file_direction "-first" "-next"
+ if { $op == "abort" } {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ } else {
+ filesort $afterop_file $afterop_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(pre-commit,post-$op):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ }
+}
diff --git a/libdb/test/recd006.tcl b/libdb/test/recd006.tcl
new file mode 100644
index 0000000..8d8c1a0
--- /dev/null
+++ b/libdb/test/recd006.tcl
@@ -0,0 +1,262 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST recd006
+# TEST Nested transactions.
+proc recd006 { method {select 0} args} {
+ global kvals
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Recd006 skipping for method $method"
+ return
+ }
+ puts "Recd006: $method nested transactions"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set dbfile recd006.db
+ set testfile $testdir/$dbfile
+
+ puts "\tRecd006.a: create database"
+ set oflags "-create $args $omethod $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Make sure that we have enough entries to span a couple of
+ # different pages.
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < 1000 } {
+ if { [string compare $omethod "-recno"] == 0 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ set ret [$db put -nooverwrite $key $str]
+ error_check_good put $ret 0
+
+ incr count
+ }
+ close $did
+
+ # Variables used below:
+ # p1: a pair of keys that are likely to be on the same page.
+ # p2: a pair of keys that are likely to be on the same page,
+ # but on a page different than those in p1.
+ set dbc [$db cursor]
+ error_check_good dbc [is_substr $dbc $db] 1
+
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:DB_FIRST [llength $ret] 0
+ set p1 [lindex [lindex $ret 0] 0]
+ set kvals($p1) [lindex [lindex $ret 0] 1]
+
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:DB_NEXT [llength $ret] 0
+ lappend p1 [lindex [lindex $ret 0] 0]
+ set kvals([lindex [lindex $ret 0] 0]) [lindex [lindex $ret 0] 1]
+
+ set ret [$dbc get -last]
+ error_check_bad dbc_get:DB_LAST [llength $ret] 0
+ set p2 [lindex [lindex $ret 0] 0]
+ set kvals($p2) [lindex [lindex $ret 0] 1]
+
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:DB_PREV [llength $ret] 0
+ lappend p2 [lindex [lindex $ret 0] 0]
+ set kvals([lindex [lindex $ret 0] 0]) [lindex [lindex $ret 0] 1]
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+
+ # Now create the full transaction environment.
+ set eflags "-create -txn -home $testdir"
+
+ puts "\tRecd006.b: creating environment"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Reset the environment.
+ reset_env $dbenv
+
+ set p1 [list $p1]
+ set p2 [list $p2]
+
+ # List of recovery tests: {CMD MSG} pairs
+ set rlist {
+ { {nesttest DB TXNID ENV 1 $p1 $p2 commit commit}
+ "Recd006.c: children (commit commit)"}
+ { {nesttest DB TXNID ENV 0 $p1 $p2 commit commit}
+ "Recd006.d: children (commit commit)"}
+ { {nesttest DB TXNID ENV 1 $p1 $p2 commit abort}
+ "Recd006.e: children (commit abort)"}
+ { {nesttest DB TXNID ENV 0 $p1 $p2 commit abort}
+ "Recd006.f: children (commit abort)"}
+ { {nesttest DB TXNID ENV 1 $p1 $p2 abort abort}
+ "Recd006.g: children (abort abort)"}
+ { {nesttest DB TXNID ENV 0 $p1 $p2 abort abort}
+ "Recd006.h: children (abort abort)"}
+ { {nesttest DB TXNID ENV 1 $p1 $p2 abort commit}
+ "Recd006.i: children (abort commit)"}
+ { {nesttest DB TXNID ENV 0 $p1 $p2 abort commit}
+ "Recd006.j: children (abort commit)"}
+ }
+
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ op_recover abort $testdir $env_cmd $dbfile $cmd $msg
+ op_recover commit $testdir $env_cmd $dbfile $cmd $msg
+ }
+
+ puts "\tRecd006.k: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+# Do the nested transaction test.
+# We want to make sure that children inherit properly from their
+# parents and that locks are properly handed back to parents
+# and that the right thing happens on commit/abort.
+# In particular:
+# Write lock on parent, properly acquired by child.
+# Committed operation on child gives lock to parent so that
+# other child can also get the lock.
+# Aborted op by child releases lock so other child can get it.
+# Correct database state if child commits
+# Correct database state if child aborts
+proc nesttest { db parent env do p1 p2 child1 child2} {
+ global kvals
+ source ./include.tcl
+
+ if { $do == 1 } {
+ set func toupper
+ } else {
+ set func tolower
+ }
+
+ # Do an RMW on the parent to get a write lock.
+ set p10 [lindex $p1 0]
+ set p11 [lindex $p1 1]
+ set p20 [lindex $p2 0]
+ set p21 [lindex $p2 1]
+
+ set ret [$db get -rmw -txn $parent $p10]
+ set res $ret
+ set Dret [lindex [lindex $ret 0] 1]
+ if { [string compare $Dret $kvals($p10)] == 0 ||
+ [string compare $Dret [string toupper $kvals($p10)]] == 0 } {
+ set val 0
+ } else {
+ set val $Dret
+ }
+ error_check_good get_parent_RMW $val 0
+
+ # OK, do child 1
+ set kid1 [$env txn -parent $parent]
+ error_check_good kid1 [is_valid_txn $kid1 $env] TRUE
+
+ # Reading write-locked parent object should be OK
+ #puts "\tRead write-locked parent object for kid1."
+ set ret [$db get -txn $kid1 $p10]
+ error_check_good kid1_get10 $ret $res
+
+ # Now update this child
+ set data [lindex [lindex [string $func $ret] 0] 1]
+ set ret [$db put -txn $kid1 $p10 $data]
+ error_check_good kid1_put10 $ret 0
+
+ #puts "\tKid1 successful put."
+
+ # Now start child2
+ #puts "\tBegin txn for kid2."
+ set kid2 [$env txn -parent $parent]
+ error_check_good kid2 [is_valid_txn $kid2 $env] TRUE
+
+ # Getting anything in the p1 set should deadlock, so let's
+ # work on the p2 set.
+ set data [string $func $kvals($p20)]
+ #puts "\tPut data for kid2."
+ set ret [$db put -txn $kid2 $p20 $data]
+ error_check_good kid2_put20 $ret 0
+
+ #puts "\tKid2 data put successful."
+
+ # Now let's do the right thing to kid1
+ puts -nonewline "\tKid1 $child1..."
+ if { [string compare $child1 "commit"] == 0 } {
+ error_check_good kid1_commit [$kid1 commit] 0
+ } else {
+ error_check_good kid1_abort [$kid1 abort] 0
+ }
+ puts "complete"
+
+ # In either case, child2 should now be able to get the
+ # lock, either because it is inherited by the parent
+ # (commit) or because it was released (abort).
+ set data [string $func $kvals($p11)]
+ set ret [$db put -txn $kid2 $p11 $data]
+ error_check_good kid2_put11 $ret 0
+
+ # Now let's do the right thing to kid2
+ puts -nonewline "\tKid2 $child2..."
+ if { [string compare $child2 "commit"] == 0 } {
+ error_check_good kid2_commit [$kid2 commit] 0
+ } else {
+ error_check_good kid2_abort [$kid2 abort] 0
+ }
+ puts "complete"
+
+ # Now, let parent check that the right things happened.
+ # First get all four values
+ set p10_check [lindex [lindex [$db get -txn $parent $p10] 0] 0]
+ set p11_check [lindex [lindex [$db get -txn $parent $p11] 0] 0]
+ set p20_check [lindex [lindex [$db get -txn $parent $p20] 0] 0]
+ set p21_check [lindex [lindex [$db get -txn $parent $p21] 0] 0]
+
+ if { [string compare $child1 "commit"] == 0 } {
+ error_check_good parent_kid1 $p10_check \
+ [string tolower [string $func $kvals($p10)]]
+ } else {
+ error_check_good \
+ parent_kid1 $p10_check [string tolower $kvals($p10)]
+ }
+ if { [string compare $child2 "commit"] == 0 } {
+ error_check_good parent_kid2 $p11_check \
+ [string tolower [string $func $kvals($p11)]]
+ error_check_good parent_kid2 $p20_check \
+ [string tolower [string $func $kvals($p20)]]
+ } else {
+ error_check_good parent_kid2 $p11_check $kvals($p11)
+ error_check_good parent_kid2 $p20_check $kvals($p20)
+ }
+
+ # Now do a write on the parent for 21 whose lock it should
+ # either have or should be available.
+ set ret [$db put -txn $parent $p21 [string $func $kvals($p21)]]
+ error_check_good parent_put21 $ret 0
+
+ return 0
+}
diff --git a/libdb/test/recd007.tcl b/libdb/test/recd007.tcl
new file mode 100644
index 0000000..7c9e3e5
--- /dev/null
+++ b/libdb/test/recd007.tcl
@@ -0,0 +1,886 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST recd007
+# TEST File create/delete tests.
+# TEST
+# TEST This is a recovery test for create/delete of databases. We have
+# TEST hooks in the database so that we can abort the process at various
+# TEST points and make sure that the transaction doesn't commit. We
+# TEST then need to recover and make sure the file is correctly existing
+# TEST or not, as the case may be.
+proc recd007 { method args} {
+ global fixed_len
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd007: $method operation/transaction tests"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set testfile recd007.db
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd007.a: creating environment"
+ set env_cmd "berkdb_env $flags"
+
+ set env [eval $env_cmd]
+
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 -env $env $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $env $testfile] 0
+ error_check_good envclose [$env close] 0
+
+ # Convert the args again because fixed_len is now real.
+ set opts [convert_args $method ""]
+
+ # List of recovery tests: {HOOKS MSG} pairs
+ # Where each HOOK is a list of {COPY ABORT}
+ #
+ set rlist {
+ { {"none" "preopen"} "Recd007.b0: none/preopen"}
+ { {"none" "postopen"} "Recd007.b1: none/postopen"}
+ { {"none" "postlogmeta"} "Recd007.b2: none/postlogmeta"}
+ { {"none" "postlog"} "Recd007.b3: none/postlog"}
+ { {"none" "postsync"} "Recd007.b4: none/postsync"}
+ { {"postopen" "none"} "Recd007.c0: postopen/none"}
+ { {"postlogmeta" "none"} "Recd007.c1: postlogmeta/none"}
+ { {"postlog" "none"} "Recd007.c2: postlog/none"}
+ { {"postsync" "none"} "Recd007.c3: postsync/none"}
+ { {"postopen" "postopen"} "Recd007.d: postopen/postopen"}
+ { {"postopen" "postlogmeta"} "Recd007.e: postopen/postlogmeta"}
+ { {"postopen" "postlog"} "Recd007.f: postopen/postlog"}
+ { {"postlog" "postlog"} "Recd007.g: postlog/postlog"}
+ { {"postlogmeta" "postlogmeta"} "Recd007.h: postlogmeta/postlogmeta"}
+ { {"postlogmeta" "postlog"} "Recd007.i: postlogmeta/postlog"}
+ { {"postlog" "postsync"} "Recd007.j: postlog/postsync"}
+ { {"postsync" "postsync"} "Recd007.k: postsync/postsync"}
+ }
+
+ # These are all the data values that we're going to need to read
+ # through the operation table and run the recovery tests.
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ file_recover_create $testdir $env_cmd $omethod \
+ $opts $testfile $cmd $msg
+ }
+
+ set rlist {
+ { {"none" "predestroy"} "Recd007.l0: none/predestroy"}
+ { {"none" "postdestroy"} "Recd007.l1: none/postdestroy"}
+ { {"predestroy" "none"} "Recd007.m0: predestroy/none"}
+ { {"postdestroy" "none"} "Recd007.m1: postdestroy/none"}
+ { {"predestroy" "predestroy"} "Recd007.n: predestroy/predestroy"}
+ { {"predestroy" "postdestroy"} "Recd007.o: predestroy/postdestroy"}
+ { {"postdestroy" "postdestroy"} "Recd007.p: postdestroy/postdestroy"}
+ }
+ foreach op { dbremove dbrename dbtruncate } {
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ file_recover_delete $testdir $env_cmd $omethod \
+ $opts $testfile $cmd $msg $op
+ }
+ }
+
+ if { $is_windows_test != 1 } {
+ set env_cmd "berkdb_env_noerr $flags"
+ do_file_recover_delmk $testdir $env_cmd $method $opts $testfile
+ }
+
+ puts "\tRecd007.r: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+proc file_recover_create { dir env_cmd method opts dbfile cmd msg } {
+ #
+ # We run this test on each of these scenarios:
+ # 1. Creating just a database
+ # 2. Creating a database with a subdb
+ # 3. Creating a 2nd subdb in a database
+ puts "\t$msg create with a database"
+ do_file_recover_create $dir $env_cmd $method $opts $dbfile \
+ 0 $cmd $msg
+ if { [is_queue $method] == 1 } {
+ puts "\tSkipping subdatabase tests for method $method"
+ return
+ }
+ puts "\t$msg create with a database and subdb"
+ do_file_recover_create $dir $env_cmd $method $opts $dbfile \
+ 1 $cmd $msg
+ puts "\t$msg create with a database and 2nd subdb"
+ do_file_recover_create $dir $env_cmd $method $opts $dbfile \
+ 2 $cmd $msg
+
+}
+
+proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
+ global log_log_record_types
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ env_cleanup $dir
+ set dflags "-dar"
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+ set copy [lindex $cmd 0]
+ set abort [lindex $cmd 1]
+ error_check_good copy_location [is_valid_create_loc $copy] 1
+ error_check_good abort_location [is_valid_create_loc $abort] 1
+
+ if {([string first "logmeta" $copy] != -1 || \
+ [string first "logmeta" $abort] != -1) && \
+ [is_btree $method] == 0 } {
+ puts "\tSkipping for method $method"
+ $env test copy none
+ $env test abort none
+ error_check_good env_close [$env close] 0
+ return
+ }
+
+ # Basically non-existence is our initial state. When we
+ # abort, it is also our final state.
+ #
+ switch $sub {
+ 0 {
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env $env $opts $dbfile"
+ }
+ 1 {
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env $env $opts $dbfile sub0"
+ }
+ 2 {
+ #
+ # If we are aborting here, then we need to
+ # create a first subdb, then create a second
+ #
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env $env $opts $dbfile sub0"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+ set init_file $dir/$dbfile.init
+ catch { file copy -force $dir/$dbfile $init_file } res
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env $env $opts $dbfile sub1"
+ }
+ default {
+ puts "\tBad value $sub for sub"
+ return
+ }
+ }
+ #
+ # Set our locations to copy and abort
+ #
+ set ret [eval $env test copy $copy]
+ error_check_good test_copy $ret 0
+ set ret [eval $env test abort $abort]
+ error_check_good test_abort $ret 0
+
+ puts "\t\tExecuting command"
+ set ret [catch {eval {berkdb_open} $oflags} db]
+
+ # Sync the mpool so any changes to the file that are
+ # in mpool get written to the disk file before the
+ # diff.
+ $env mpool_sync
+
+ #
+ # If we don't abort, then we expect success.
+ # If we abort, we expect no file created.
+ #
+ if {[string first "none" $abort] == -1} {
+ #
+ # Operation was aborted, verify it does
+ # not exist.
+ #
+ puts "\t\tCommand executed and aborted."
+ error_check_bad db_open ret 0
+
+ #
+ # Check that the file does not exist. Final state.
+ #
+ if { $sub != 2 } {
+ error_check_good db_open:exists \
+ [file exists $dir/$dbfile] 0
+ } else {
+ error_check_good \
+ diff(init,postcreate):diff($init_file,$dir/$dbfile)\
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
+ }
+ } else {
+ #
+ # Operation was committed, verify it exists.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ #
+ # Check that the file exists.
+ #
+ error_check_good db_open [file exists $dir/$dbfile] 1
+ set init_file $dir/$dbfile.init
+ catch { file copy -force $dir/$dbfile $init_file } res
+
+ if { [is_queue $method] == 1 } {
+ copy_extent_file $dir $dbfile init
+ }
+ }
+ error_check_good env_close [$env close] 0
+
+ #
+ # Run recovery here. Should be a no-op. Verify that
+ # the file still doesn't exist or change (depending on sub)
+ # when we are done.
+ #
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ if { $sub != 2 && [string first "none" $abort] == -1} {
+ #
+ # Operation was aborted, verify it still does
+ # not exist. Only done with file creations.
+ #
+ error_check_good after_recover1 [file exists $dir/$dbfile] 0
+ } else {
+ #
+ # Operation was committed or just a subdb was aborted.
+ # Verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
+ #
+ # Need a new copy to get the right LSN into the file.
+ #
+ catch { file copy -force $dir/$dbfile $init_file } res
+
+ if { [is_queue $method] == 1 } {
+ copy_extent_file $dir $dbfile init
+ }
+ }
+
+ # If we didn't make a copy, then we are done.
+ #
+ if {[string first "none" $copy] != -1} {
+ return
+ }
+
+ #
+ # Now move the .afterop file to $dbfile. Run recovery again.
+ #
+ copy_afterop $dir
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ if { $sub != 2 && [string first "none" $abort] == -1} {
+ #
+ # Operation was aborted, verify it still does
+ # not exist. Only done with file creations.
+ #
+ error_check_good after_recover2 [file exists $dir/$dbfile] 0
+ } else {
+ #
+ # Operation was committed or just a subdb was aborted.
+ # Verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
+ }
+
+}
+
+proc file_recover_delete { dir env_cmd method opts dbfile cmd msg op } {
+ #
+ # We run this test on each of these scenarios:
+ # 1. Deleting/Renaming just a database
+ # 2. Deleting/Renaming a database with a subdb
+ # 3. Deleting/Renaming a 2nd subdb in a database
+ puts "\t$msg $op with a database"
+ do_file_recover_delete $dir $env_cmd $method $opts $dbfile \
+ 0 $cmd $msg $op
+ if { [is_queue $method] == 1 } {
+ puts "\tSkipping subdatabase tests for method $method"
+ return
+ }
+ puts "\t$msg $op with a database and subdb"
+ do_file_recover_delete $dir $env_cmd $method $opts $dbfile \
+ 1 $cmd $msg $op
+ puts "\t$msg $op with a database and 2nd subdb"
+ do_file_recover_delete $dir $env_cmd $method $opts $dbfile \
+ 2 $cmd $msg $op
+
+}
+
+proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
+ global log_log_record_types
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ env_cleanup $dir
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+ set copy [lindex $cmd 0]
+ set abort [lindex $cmd 1]
+ error_check_good copy_location [is_valid_delete_loc $copy] 1
+ error_check_good abort_location [is_valid_delete_loc $abort] 1
+
+ if { [is_record_based $method] == 1 } {
+ set key1 1
+ set key2 2
+ } else {
+ set key1 recd007_key1
+ set key2 recd007_key2
+ }
+ set data1 recd007_data0
+ set data2 recd007_data1
+ set data3 NEWrecd007_data2
+
+ #
+ # Depending on what sort of subdb we want, if any, our
+ # args to the open call will be different (and if we
+ # want a 2nd subdb, we create the first here.
+ #
+ # XXX
+ # For dbtruncate, we want oflags to have "$env" in it,
+ # not have the value currently in 'env'. That is why
+ # the '$' is protected below. Later on we use oflags
+ # but with a new $env we just opened.
+ #
+ switch $sub {
+ 0 {
+ set subdb ""
+ set new $dbfile.new
+ set dflags "-dar"
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile"
+ }
+ 1 {
+ set subdb sub0
+ set new $subdb.new
+ set dflags ""
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile $subdb"
+ }
+ 2 {
+ #
+ # If we are aborting here, then we need to
+ # create a first subdb, then create a second
+ #
+ set subdb sub1
+ set new $subdb.new
+ set dflags ""
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile sub0"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn [$env txn]
+ set ret [$db put -txn $txn $key1 $data1]
+ error_check_good db_put $ret 0
+ error_check_good commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile $subdb"
+ }
+ default {
+ puts "\tBad value $sub for sub"
+ return
+ }
+ }
+
+ #
+ # Set our locations to copy and abort
+ #
+ set ret [eval $env test copy $copy]
+ error_check_good test_copy $ret 0
+ set ret [eval $env test abort $abort]
+ error_check_good test_abort $ret 0
+
+ #
+ # Open our db, add some data, close and copy as our
+ # init file.
+ #
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn [$env txn]
+ set ret [$db put -txn $txn $key1 $data1]
+ error_check_good db_put $ret 0
+ set ret [$db put -txn $txn $key2 $data2]
+ error_check_good db_put $ret 0
+ error_check_good commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+
+ $env mpool_sync
+
+ set init_file $dir/$dbfile.init
+ catch { file copy -force $dir/$dbfile $init_file } res
+
+ if { [is_queue $method] == 1} {
+ copy_extent_file $dir $dbfile init
+ }
+
+ #
+ # If we don't abort, then we expect success.
+ # If we abort, we expect no file removed.
+ #
+ switch $op {
+ "dbrename" {
+ set ret [catch { eval {berkdb} $op -env $env -auto_commit \
+ $dbfile $subdb $new } remret]
+ }
+ "dbremove" {
+ set ret [catch { eval {berkdb} $op -env $env -auto_commit \
+ $dbfile $subdb } remret]
+ }
+ "dbtruncate" {
+ set txn [$env txn]
+ set db [eval {berkdb_open_noerr -env} \
+ $env -auto_commit $dbfile $subdb]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good txnbegin [is_valid_txn $txn $env] TRUE
+ set ret [catch {$db truncate -txn $txn} remret]
+ }
+ }
+ $env mpool_sync
+ if { $abort == "none" } {
+ if { $op == "dbtruncate" } {
+ error_check_good txncommit [$txn commit] 0
+ error_check_good dbclose [$db close] 0
+ }
+ #
+ # Operation was committed, verify it.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good $op $ret 0
+ #
+ # If a dbtruncate, check that truncate returned the number
+ # of items previously in the database.
+ #
+ if { [string compare $op "dbtruncate"] == 0 } {
+ error_check_good remret $remret 2
+ }
+ recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
+ } else {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ if { $op == "dbtruncate" } {
+ error_check_good txnabort [$txn abort] 0
+ error_check_good dbclose [$db close] 0
+ }
+ puts "\t\tCommand executed and aborted."
+ error_check_good $op $ret 1
+
+ #
+ # Check that the file exists. Final state.
+ # Compare against initial file.
+ #
+ error_check_good post$op.1 [file exists $dir/$dbfile] 1
+ error_check_good \
+ diff(init,post$op.2):diff($init_file,$dir/$dbfile)\
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
+ }
+ $env mpool_sync
+ error_check_good env_close [$env close] 0
+ catch { file copy -force $dir/$dbfile $init_file } res
+ if { [is_queue $method] == 1} {
+ copy_extent_file $dir $dbfile init
+ }
+
+
+ #
+ # Run recovery here. Should be a no-op. Verify that
+ # the file still doesn't exist or change (depending on abort)
+ # when we are done.
+ #
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+
+ puts "complete"
+
+ if { $abort == "none" } {
+ #
+ # Operate was committed.
+ #
+ set env [eval $env_cmd]
+ recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
+ error_check_good env_close [$env close] 0
+ } else {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ berkdb debug_check
+ error_check_good \
+ diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
+ }
+
+ #
+ # If we didn't make a copy, then we are done.
+ #
+ if {[string first "none" $copy] != -1} {
+ return
+ }
+
+ #
+ # Now restore the .afterop file(s) to their original name.
+ # Run recovery again.
+ #
+ copy_afterop $dir
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+
+ if { [string first "none" $abort] != -1} {
+ set env [eval $env_cmd]
+ recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
+ error_check_good env_close [$env close] 0
+ } else {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
+ }
+
+}
+
+#
+# This function tests a specific case of recovering after a db removal.
+# This is for SR #2538. Basically we want to test that:
+# - Make an env.
+# - Make/close a db.
+# - Remove the db.
+# - Create another db of same name.
+# - Sync db but leave open.
+# - Run recovery.
+# - Verify no recovery errors and that new db is there.
+proc do_file_recover_delmk { dir env_cmd method opts dbfile } {
+ global log_log_record_types
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+ set omethod [convert_method $method]
+
+ puts "\tRecd007.q: Delete and recreate a database"
+ env_cleanup $dir
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key recd007_key
+ }
+ set data1 recd007_data
+ set data2 NEWrecd007_data2
+
+ set oflags \
+ "-create $omethod -auto_commit -mode 0644 $opts $dbfile"
+
+ #
+ # Open our db, add some data, close and copy as our
+ # init file.
+ #
+ set db [eval {berkdb_open_noerr} -env $env $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn [$env txn]
+ set ret [$db put -txn $txn $key $data1]
+ error_check_good db_put $ret 0
+ error_check_good commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+
+ set ret \
+ [catch { berkdb dbremove -env $env -auto_commit $dbfile } remret]
+
+ #
+ # Operation was committed, verify it does
+ # not exist.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good dbremove $ret 0
+ error_check_good dbremove.1 [file exists $dir/$dbfile] 0
+
+ #
+ # Now create a new db with the same name.
+ #
+ set db [eval {berkdb_open_noerr} -env $env $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn [$env txn]
+ set ret [$db put -txn $txn $key [chop_data $method $data2]]
+ error_check_good db_put $ret 0
+ error_check_good commit [$txn commit] 0
+ error_check_good db_sync [$db sync] 0
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ error_check_good db_recover $stat 0
+ error_check_good db_recover.1 [file exists $dir/$dbfile] 1
+ #
+ # Since we ran recovery on the open db/env, we need to
+ # catch these calls. Basically they are there to clean
+ # up the Tcl widgets.
+ #
+ set stat [catch {$db close} ret]
+ error_check_bad dbclose_after_remove $stat 0
+ error_check_good dbclose_after_remove [is_substr $ret recovery] 1
+ set stat [catch {$env close} ret]
+ error_check_bad envclose_after_remove $stat 0
+ error_check_good envclose_after_remove [is_substr $ret recovery] 1
+
+ #
+ # Reopen env and db and verify 2nd database is there.
+ #
+ set env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $env] TRUE
+ set db [eval {berkdb_open} -env $env $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set ret [$db get $key]
+ error_check_good dbget [llength $ret] 1
+ set kd [lindex $ret 0]
+ error_check_good key [lindex $kd 0] $key
+ error_check_good data2 [lindex $kd 1] [pad_data $method $data2]
+
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
+}
+
+proc is_valid_create_loc { loc } {
+ switch $loc {
+ none -
+ preopen -
+ postopen -
+ postlogmeta -
+ postlog -
+ postsync
+ { return 1 }
+ default
+ { return 0 }
+ }
+}
+
+proc is_valid_delete_loc { loc } {
+ switch $loc {
+ none -
+ predestroy -
+ postdestroy -
+ postremcall
+ { return 1 }
+ default
+ { return 0 }
+ }
+}
+
+# Do a logical diff on the db dump files. We expect that either
+# the files are identical, or if they differ, that it is exactly
+# just a free/invalid page.
+# Return 1 if they are different, 0 if logically the same (or identical).
+#
+proc dbdump_diff { flags initfile dir dbfile } {
+ source ./include.tcl
+
+ set initdump $initfile.dump
+ set dbdump $dbfile.dump
+
+ set stat [catch {eval {exec $util_path/db_dump} $flags -f $initdump \
+ $initfile} ret]
+ error_check_good dbdump.init $stat 0
+
+ # Do a dump without the freelist which should eliminate any
+ # recovery differences.
+ set stat [catch {eval {exec $util_path/db_dump} $flags -f $dir/$dbdump \
+ $dir/$dbfile} ret]
+ error_check_good dbdump.db $stat 0
+
+ set stat [filecmp $dir/$dbdump $initdump]
+
+ if {$stat == 0} {
+ return 0
+ }
+ puts "diff: $dbdump $initdump gives:\n$ret"
+ return 1
+}
+
+proc recd007_check { op sub dir dbfile subdb new env oflags } {
+ #
+ # No matter how many subdbs we have, dbtruncate will always
+ # have a file, and if we open our particular db, it should
+ # have no entries.
+ #
+ if { $sub == 0 } {
+ if { $op == "dbremove" } {
+ error_check_good $op:not-exist \
+ [file exists $dir/$dbfile] 0
+ } elseif { $op == "dbrename"} {
+ error_check_good $op:exist \
+ [file exists $dir/$dbfile] 0
+ error_check_good $op:exist2 \
+ [file exists $dir/$dbfile.new] 1
+ } else {
+ error_check_good $op:exist \
+ [file exists $dir/$dbfile] 1
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set dbc [$db cursor]
+ error_check_good dbc_open \
+ [is_valid_cursor $dbc $db] TRUE
+ set ret [$dbc get -first]
+ error_check_good dbget1 [llength $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+ return
+ } else {
+ set t1 $dir/t1
+ #
+ # If we have subdbs, check that all but the last one
+ # are there, and the last one is correctly operated on.
+ #
+ set db [berkdb_open -rdonly -env $env $dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set c [eval {$db cursor}]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+ set d [$c get -last]
+ if { $op == "dbremove" } {
+ if { $sub == 1 } {
+ error_check_good subdb:rem [llength $d] 0
+ } else {
+ error_check_bad subdb:rem [llength $d] 0
+ set sdb [lindex [lindex $d 0] 0]
+ error_check_bad subdb:rem1 $sdb $subdb
+ }
+ } elseif { $op == "dbrename"} {
+ set sdb [lindex [lindex $d 0] 0]
+ error_check_good subdb:ren $sdb $new
+ if { $sub != 1 } {
+ set d [$c get -prev]
+ error_check_bad subdb:ren [llength $d] 0
+ set sdb [lindex [lindex $d 0] 0]
+ error_check_good subdb:ren1 \
+ [is_substr "new" $sdb] 0
+ }
+ } else {
+ set sdb [lindex [lindex $d 0] 0]
+ set dbt [berkdb_open -rdonly -env $env $dbfile $sdb]
+ error_check_good db_open [is_valid_db $dbt] TRUE
+ set dbc [$dbt cursor]
+ error_check_good dbc_open \
+ [is_valid_cursor $dbc $dbt] TRUE
+ set ret [$dbc get -first]
+ error_check_good dbget2 [llength $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$dbt close] 0
+ if { $sub != 1 } {
+ set d [$c get -prev]
+ error_check_bad subdb:ren [llength $d] 0
+ set sdb [lindex [lindex $d 0] 0]
+ set dbt [berkdb_open -rdonly -env $env \
+ $dbfile $sdb]
+ error_check_good db_open [is_valid_db $dbt] TRUE
+ set dbc [$db cursor]
+ error_check_good dbc_open \
+ [is_valid_cursor $dbc $db] TRUE
+ set ret [$dbc get -first]
+ error_check_bad dbget3 [llength $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$dbt close] 0
+ }
+ }
+ error_check_good dbcclose [$c close] 0
+ error_check_good db_close [$db close] 0
+ }
+}
+
+proc copy_afterop { dir } {
+ set r [catch { set filecopy [glob $dir/*.afterop] } res]
+ if { $r == 1 } {
+ return
+ }
+ foreach f $filecopy {
+ set orig [string range $f 0 \
+ [expr [string last "." $f] - 1]]
+ catch { file rename -force $f $orig} res
+ }
+}
diff --git a/libdb/test/recd008.tcl b/libdb/test/recd008.tcl
new file mode 100644
index 0000000..090e8c4
--- /dev/null
+++ b/libdb/test/recd008.tcl
@@ -0,0 +1,227 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST recd008
+# TEST Test deeply nested transactions and many-child transactions.
+proc recd008 { method {breadth 4} {depth 4} args} {
+ global kvals
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 } {
+ puts "Recd008 skipping for method $method"
+ return
+ }
+ puts "Recd008: $method $breadth X $depth deeply nested transactions"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set dbfile recd008.db
+
+ puts "\tRecd008.a: create database"
+ set db [eval {berkdb_open -create} $args $omethod $testdir/$dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Make sure that we have enough entries to span a couple of
+ # different pages.
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < 1000 } {
+ if { [string compare $omethod "-recno"] == 0 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ if { $count == 500} {
+ set p1 $key
+ set kvals($p1) $str
+ }
+ set ret [$db put $key $str]
+ error_check_good put $ret 0
+
+ incr count
+ }
+ close $did
+ error_check_good db_close [$db close] 0
+
+ set txn_max [expr int([expr pow($breadth,$depth)])]
+ if { $txn_max < 20 } {
+ set txn_max 20
+ }
+ puts "\tRecd008.b: create environment for $txn_max transactions"
+
+ set eflags "-mode 0644 -create -txn_max $txn_max \
+ -txn -home $testdir"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ reset_env $dbenv
+
+ set rlist {
+ { {recd008_parent abort ENV DB $p1 TXNID 1 1 $breadth $depth}
+ "Recd008.c: child abort parent" }
+ { {recd008_parent commit ENV DB $p1 TXNID 1 1 $breadth $depth}
+ "Recd008.d: child commit parent" }
+ }
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ op_recover abort $testdir $env_cmd $dbfile $cmd $msg
+ recd008_setkval $dbfile $p1
+ op_recover commit $testdir $env_cmd $dbfile $cmd $msg
+ recd008_setkval $dbfile $p1
+ }
+
+ puts "\tRecd008.e: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+proc recd008_setkval { dbfile p1 } {
+ global kvals
+ source ./include.tcl
+
+ set db [berkdb_open $testdir/$dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get $p1]
+ set kvals($p1) [lindex [lindex $ret 0] 1]
+}
+
+# This is a lot like the op_recover procedure. We cannot use that
+# because it was not meant to be called recursively. This proc
+# knows about depth/breadth and file naming so that recursive calls
+# don't overwrite various initial and afterop files, etc.
+#
+# The basic flow of this is:
+# (Initial file)
+# Parent begin transaction (in op_recover)
+# Parent starts children
+# Recursively call recd008_recover
+# (children modify p1)
+# Parent modifies p1
+# (Afterop file)
+# Parent commit/abort (in op_recover)
+# (Final file)
+# Recovery test (in op_recover)
+proc recd008_parent { op env db p1key parent b0 d0 breadth depth } {
+ global kvals
+ source ./include.tcl
+
+ #
+ # Save copy of original data
+ # Acquire lock on data
+ #
+ set olddata $kvals($p1key)
+ set ret [$db get -rmw -txn $parent $p1key]
+ set Dret [lindex [lindex $ret 0] 1]
+ error_check_good get_parent_RMW $Dret $olddata
+
+ #
+ # Parent spawns off children
+ #
+ set ret [recd008_txn $op $env $db $p1key $parent \
+ $b0 $d0 $breadth $depth]
+
+ puts "Child runs complete. Parent modifies data."
+
+ #
+ # Parent modifies p1
+ #
+ set newdata $olddata.parent
+ set ret [$db put -txn $parent $p1key $newdata]
+ error_check_good db_put $ret 0
+
+ #
+ # Save value in kvals for later comparison
+ #
+ switch $op {
+ "commit" {
+ set kvals($p1key) $newdata
+ }
+ "abort" {
+ set kvals($p1key) $olddata
+ }
+ }
+ return 0
+}
+
+proc recd008_txn { op env db p1key parent b0 d0 breadth depth } {
+ global log_log_record_types
+ global kvals
+ source ./include.tcl
+
+ for {set d 1} {$d < $d0} {incr d} {
+ puts -nonewline "\t"
+ }
+ puts "Recd008_txn: $op parent:$parent $breadth $depth ($b0 $d0)"
+
+ # Save the initial file and open the environment and the file
+ for {set b $b0} {$b <= $breadth} {incr b} {
+ #
+ # Begin child transaction
+ #
+ set t [$env txn -parent $parent]
+ error_check_bad txn_begin $t NULL
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+ set startd [expr $d0 + 1]
+ set child $b:$startd:$t
+ set olddata $kvals($p1key)
+ set newdata $olddata.$child
+ set ret [$db get -rmw -txn $t $p1key]
+ set Dret [lindex [lindex $ret 0] 1]
+ error_check_good get_parent_RMW $Dret $olddata
+
+ #
+ # Recursively call to set up nested transactions/children
+ #
+ for {set d $startd} {$d <= $depth} {incr d} {
+ set ret [recd008_txn commit $env $db $p1key $t \
+ $b $d $breadth $depth]
+ set ret [recd008_txn abort $env $db $p1key $t \
+ $b $d $breadth $depth]
+ }
+ #
+ # Modifies p1.
+ #
+ set ret [$db put -txn $t $p1key $newdata]
+ error_check_good db_put $ret 0
+
+ #
+ # Commit or abort
+ #
+ for {set d 1} {$d < $startd} {incr d} {
+ puts -nonewline "\t"
+ }
+ puts "Executing txn_$op:$t"
+ error_check_good txn_$op:$t [$t $op] 0
+ for {set d 1} {$d < $startd} {incr d} {
+ puts -nonewline "\t"
+ }
+ set ret [$db get -rmw -txn $parent $p1key]
+ set Dret [lindex [lindex $ret 0] 1]
+ switch $op {
+ "commit" {
+ puts "Command executed and committed."
+ error_check_good get_parent_RMW $Dret $newdata
+ set kvals($p1key) $newdata
+ }
+ "abort" {
+ puts "Command executed and aborted."
+ error_check_good get_parent_RMW $Dret $olddata
+ set kvals($p1key) $olddata
+ }
+ }
+ }
+ return 0
+}
diff --git a/libdb/test/recd009.tcl b/libdb/test/recd009.tcl
new file mode 100644
index 0000000..498d12d
--- /dev/null
+++ b/libdb/test/recd009.tcl
@@ -0,0 +1,180 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST recd009
+# TEST Verify record numbering across split/reverse splits and recovery.
+proc recd009 { method {select 0} args} {
+ global fixed_len
+ source ./include.tcl
+
+ if { [is_rbtree $method] != 1 && [is_rrecno $method] != 1} {
+ puts "Recd009 skipping for method $method."
+ return
+ }
+
+ set opts [convert_args $method $args]
+ set method [convert_method $method]
+
+ puts "\tRecd009: Test record numbers across splits and recovery"
+
+ set testfile recd009.db
+ env_cleanup $testdir
+ set mkeys 1000
+ set nkeys 5
+ set data "data"
+
+ puts "\tRecd009.a: Create $method environment and database."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-env $dbenv -pagesize 8192 -create -mode 0644 $opts $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Fill page with small key/data pairs. Keep at leaf.
+ puts "\tRecd009.b: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { [is_recno $method] == 1 } {
+ set key $i
+ } else {
+ set key key000$i
+ }
+ set ret [$db put $key $data$i]
+ error_check_good dbput $ret 0
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+
+ set newnkeys [expr $nkeys + 1]
+ # List of recovery tests: {CMD MSG} pairs.
+ set rlist {
+ { {recd009_split DB TXNID 1 $method $newnkeys $mkeys}
+ "Recd009.c: split"}
+ { {recd009_split DB TXNID 0 $method $newnkeys $mkeys}
+ "Recd009.d: reverse split"}
+ }
+
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ set reverse [string first "reverse" $msg]
+ if { $reverse == -1 } {
+ set abortkeys $nkeys
+ set commitkeys $mkeys
+ set abortpg 0
+ set commitpg 1
+ } else {
+ set abortkeys $mkeys
+ set commitkeys $nkeys
+ set abortpg 1
+ set commitpg 0
+ }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ recd009_recnocheck $testdir $testfile $opts $abortkeys $abortpg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ recd009_recnocheck $testdir $testfile $opts \
+ $commitkeys $commitpg
+ }
+ puts "\tRecd009.e: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+#
+# This procedure verifies that the database has only numkeys number
+# of keys and that they are in order.
+#
+proc recd009_recnocheck { tdir testfile opts numkeys numpg} {
+ source ./include.tcl
+
+ set db [eval {berkdb_open} $opts $tdir/$testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tRecd009_recnocheck: Verify page count of $numpg on split."
+ set stat [$db stat]
+ error_check_bad stat:check-split [is_substr $stat \
+ "{{Internal pages} 0}"] $numpg
+
+ set type [$db get_type]
+ set dbc [$db cursor]
+ error_check_good dbcursor [is_valid_cursor $dbc $db] TRUE
+ set i 1
+ puts "\tRecd009_recnocheck: Checking $numkeys record numbers."
+ for {set d [$dbc get -first]} { [llength $d] != 0 } {
+ set d [$dbc get -next]} {
+ if { [is_btree $type] } {
+ set thisi [$dbc get -get_recno]
+ } else {
+ set thisi [lindex [lindex $d 0] 0]
+ }
+ error_check_good recno_check $i $thisi
+ error_check_good record_count [expr $i <= $numkeys] 1
+ incr i
+ }
+ error_check_good curs_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
+
+proc recd009_split { db txn split method nkeys mkeys } {
+ global errorCode
+ source ./include.tcl
+
+ set data "data"
+
+ set isrecno [is_recno $method]
+ # if mkeys is above 1000, need to adjust below for lexical order
+ if { $split == 1 } {
+ puts "\tRecd009_split: Add $mkeys pairs to force split."
+ for {set i $nkeys} { $i <= $mkeys } { incr i } {
+ if { $isrecno == 1 } {
+ set key $i
+ } else {
+ if { $i >= 100 } {
+ set key key0$i
+ } elseif { $i >= 10 } {
+ set key key00$i
+ } else {
+ set key key000$i
+ }
+ }
+ set ret [$db put -txn $txn $key $data$i]
+ error_check_good dbput:more $ret 0
+ }
+ } else {
+ puts "\tRecd009_split: Delete added keys to force reverse split."
+ # Since rrecno renumbers, we delete downward.
+ for {set i $mkeys} { $i >= $nkeys } { set i [expr $i - 1] } {
+ if { $isrecno == 1 } {
+ set key $i
+ } else {
+ if { $i >= 100 } {
+ set key key0$i
+ } elseif { $i >= 10 } {
+ set key key00$i
+ } else {
+ set key key000$i
+ }
+ }
+ error_check_good db_del:$i [$db del -txn $txn $key] 0
+ }
+ }
+ return 0
+}
diff --git a/libdb/test/recd010.tcl b/libdb/test/recd010.tcl
new file mode 100644
index 0000000..5cfc4fb
--- /dev/null
+++ b/libdb/test/recd010.tcl
@@ -0,0 +1,257 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST recd010
+# TEST Test stability of btree duplicates across btree off-page dup splits
+# TEST and reverse splits and across recovery.
+proc recd010 { method {select 0} args} {
+ if { [is_btree $method] != 1 } {
+ puts "Recd010 skipping for method $method."
+ return
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd010: skipping for specific pagesizes"
+ return
+ }
+ set largs $args
+ append largs " -dup "
+ recd010_main $method $select $largs
+ append largs " -dupsort "
+ recd010_main $method $select $largs
+}
+
+proc recd010_main { method select largs } {
+ global fixed_len
+ global kvals
+ global kvals_dups
+ source ./include.tcl
+
+
+ set opts [convert_args $method $largs]
+ set method [convert_method $method]
+
+ puts "Recd010 ($opts): Test duplicates across splits and recovery"
+
+ set testfile recd010.db
+ env_cleanup $testdir
+ #
+ # Set pagesize small to generate lots of off-page dups
+ #
+ set page 512
+ set mkeys 1000
+ set firstkeys 5
+ set data "data"
+ set key "recd010_key"
+
+ puts "\tRecd010.a: Create environment and database."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-env $dbenv -create -mode 0644 $opts $method"
+ set db [eval {berkdb_open} -pagesize $page $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Fill page with small key/data pairs. Keep at leaf.
+ puts "\tRecd010.b: Fill page with $firstkeys small dups."
+ for { set i 1 } { $i <= $firstkeys } { incr i } {
+ set ret [$db put $key $data$i]
+ error_check_good dbput $ret 0
+ }
+ set kvals 1
+ set kvals_dups $firstkeys
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+
+ # List of recovery tests: {CMD MSG} pairs.
+ if { $mkeys < 100 } {
+ puts "Recd010 mkeys of $mkeys too small"
+ return
+ }
+ set rlist {
+ { {recd010_split DB TXNID 1 2 $mkeys}
+ "Recd010.c: btree split 2 large dups"}
+ { {recd010_split DB TXNID 0 2 $mkeys}
+ "Recd010.d: btree reverse split 2 large dups"}
+ { {recd010_split DB TXNID 1 10 $mkeys}
+ "Recd010.e: btree split 10 dups"}
+ { {recd010_split DB TXNID 0 10 $mkeys}
+ "Recd010.f: btree reverse split 10 dups"}
+ { {recd010_split DB TXNID 1 100 $mkeys}
+ "Recd010.g: btree split 100 dups"}
+ { {recd010_split DB TXNID 0 100 $mkeys}
+ "Recd010.h: btree reverse split 100 dups"}
+ }
+
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ set reverse [string first "reverse" $msg]
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ recd010_check $testdir $testfile $opts abort $reverse $firstkeys
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ recd010_check $testdir $testfile $opts commit $reverse $firstkeys
+ }
+ puts "\tRecd010.i: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+#
+# This procedure verifies that the database has only numkeys number
+# of keys and that they are in order.
+#
+proc recd010_check { tdir testfile opts op reverse origdups } {
+ global kvals
+ global kvals_dups
+ source ./include.tcl
+
+ set db [eval {berkdb_open} $opts $tdir/$testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set data "data"
+
+ if { $reverse == -1 } {
+ puts "\tRecd010_check: Verify split after $op"
+ } else {
+ puts "\tRecd010_check: Verify reverse split after $op"
+ }
+
+ set stat [$db stat]
+ if { [expr ([string compare $op "abort"] == 0 && $reverse == -1) || \
+ ([string compare $op "commit"] == 0 && $reverse != -1)]} {
+ set numkeys 0
+ set allkeys [expr $numkeys + 1]
+ set numdups $origdups
+ #
+ # If we abort the adding of dups, or commit
+ # the removal of dups, either way check that
+ # we are back at the beginning. Check that:
+ # - We have 0 internal pages.
+ # - We have only 1 key (the original we primed the db
+ # with at the beginning of the test).
+ # - We have only the original number of dups we primed
+ # the db with at the beginning of the test.
+ #
+ error_check_good stat:orig0 [is_substr $stat \
+ "{{Internal pages} 0}"] 1
+ error_check_good stat:orig1 [is_substr $stat \
+ "{{Number of keys} 1}"] 1
+ error_check_good stat:orig2 [is_substr $stat \
+ "{{Number of records} $origdups}"] 1
+ } else {
+ set numkeys $kvals
+ set allkeys [expr $numkeys + 1]
+ set numdups $kvals_dups
+ #
+ # If we abort the removal of dups, or commit the
+ # addition of dups, check that:
+ # - We have > 0 internal pages.
+ # - We have the number of keys.
+ #
+ error_check_bad stat:new0 [is_substr $stat \
+ "{{Internal pages} 0}"] 1
+ error_check_good stat:new1 [is_substr $stat \
+ "{{Number of keys} $allkeys}"] 1
+ }
+
+ set dbc [$db cursor]
+ error_check_good dbcursor [is_valid_cursor $dbc $db] TRUE
+ puts "\tRecd010_check: Checking key and duplicate values"
+ set key "recd010_key"
+ #
+ # Check dups are there as they should be.
+ #
+ for {set ki 0} {$ki < $numkeys} {incr ki} {
+ set datacnt 0
+ for {set d [$dbc get -set $key$ki]} { [llength $d] != 0 } {
+ set d [$dbc get -nextdup]} {
+ set thisdata [lindex [lindex $d 0] 1]
+ if { $datacnt < 10 } {
+ set pdata $data.$ki.00$datacnt
+ } elseif { $datacnt < 100 } {
+ set pdata $data.$ki.0$datacnt
+ } else {
+ set pdata $data.$ki.$datacnt
+ }
+ error_check_good dup_check $thisdata $pdata
+ incr datacnt
+ }
+ error_check_good dup_count $datacnt $numdups
+ }
+ #
+ # Check that the number of expected keys (allkeys) are
+ # all of the ones that exist in the database.
+ #
+ set dupkeys 0
+ set lastkey ""
+ for {set d [$dbc get -first]} { [llength $d] != 0 } {
+ set d [$dbc get -next]} {
+ set thiskey [lindex [lindex $d 0] 0]
+ if { [string compare $lastkey $thiskey] != 0 } {
+ incr dupkeys
+ }
+ set lastkey $thiskey
+ }
+ error_check_good key_check $allkeys $dupkeys
+ error_check_good curs_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
+
+proc recd010_split { db txn split nkeys mkeys } {
+ global errorCode
+ global kvals
+ global kvals_dups
+ source ./include.tcl
+
+ set data "data"
+ set key "recd010_key"
+
+ set numdups [expr $mkeys / $nkeys]
+
+ set kvals $nkeys
+ set kvals_dups $numdups
+ if { $split == 1 } {
+ puts \
+"\tRecd010_split: Add $nkeys keys, with $numdups duplicates each to force split."
+ for {set k 0} { $k < $nkeys } { incr k } {
+ for {set i 0} { $i < $numdups } { incr i } {
+ if { $i < 10 } {
+ set pdata $data.$k.00$i
+ } elseif { $i < 100 } {
+ set pdata $data.$k.0$i
+ } else {
+ set pdata $data.$k.$i
+ }
+ set ret [$db put -txn $txn $key$k $pdata]
+ error_check_good dbput:more $ret 0
+ }
+ }
+ } else {
+ puts \
+"\tRecd010_split: Delete $nkeys keys to force reverse split."
+ for {set k 0} { $k < $nkeys } { incr k } {
+ error_check_good db_del:$k [$db del -txn $txn $key$k] 0
+ }
+ }
+ return 0
+}
diff --git a/libdb/test/recd011.tcl b/libdb/test/recd011.tcl
new file mode 100644
index 0000000..4dbdb97
--- /dev/null
+++ b/libdb/test/recd011.tcl
@@ -0,0 +1,116 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST recd011
+# TEST Verify that recovery to a specific timestamp works.
+proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 11
+
+ puts "Recd0$tnum ($args): Test recovery to a specific timestamp."
+
+ set testfile recd0$tnum.db
+ env_cleanup $testdir
+
+ set i 0
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key KEY
+ }
+
+ puts "\tRecd0$tnum.a: Create environment and database."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-auto_commit -env $dbenv -create -mode 0644 $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Main loop: every second or so, increment the db in a txn.
+ puts "\t\tInitial Checkpoint"
+ error_check_good "Initial Checkpoint" [$dbenv txn_checkpoint] 0
+
+ puts "\tRecd0$tnum.b ($niter iterations):\
+ Transaction-protected increment loop."
+ for { set i 0 } { $i <= $niter } { incr i } {
+ set data $i
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+
+ set timeof($i) [timestamp -r]
+
+ # If an appropriate period has elapsed, checkpoint.
+ if { $i % $ckpt_freq == $ckpt_freq - 1 } {
+ puts "\t\tIteration $i: Checkpointing."
+ error_check_good ckpt($i) [$dbenv txn_checkpoint] 0
+ }
+
+ # sleep for N seconds.
+ tclsleep $sleep_time
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+
+ # Now, loop through and recover to each timestamp, verifying the
+ # expected increment.
+ puts "\tRecd0$tnum.c: Recover to each timestamp and check."
+ for { set i $niter } { $i >= 0 } { incr i -1 } {
+
+ # Run db_recover.
+ set t [clock format $timeof($i) -format "%y%m%d%H%M.%S"]
+ berkdb debug_check
+ set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
+ error_check_good db_recover($i,$t) $ret 0
+
+ # Now open the db and check the timestamp.
+ set db [eval {berkdb_open} $testdir/$testfile]
+ error_check_good db_open($i) [is_valid_db $db] TRUE
+
+ set dbt [$db get $key]
+ set datum [lindex [lindex $dbt 0] 1]
+ error_check_good timestamp_recover $datum [pad_data $method $i]
+
+ error_check_good db_close [$db close] 0
+ }
+
+ # Finally, recover to a time well before the first timestamp
+ # and well after the last timestamp. The latter should
+ # be just like the timestamp of the last test performed;
+ # the former should fail.
+ puts "\tRecd0$tnum.d: Recover to before the first timestamp."
+ set t [clock format [expr $timeof(0) - 1000] -format "%y%m%d%H%M.%S"]
+ set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
+ error_check_bad db_recover(before,$t) $ret 0
+
+ puts "\tRecd0$tnum.e: Recover to after the last timestamp."
+ set t [clock format \
+ [expr $timeof($niter) + 1000] -format "%y%m%d%H%M.%S"]
+ set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
+ error_check_good db_recover(after,$t) $ret 0
+
+ # Now open the db and check the timestamp.
+ set db [eval {berkdb_open} $testdir/$testfile]
+ error_check_good db_open(after) [is_valid_db $db] TRUE
+
+ set dbt [$db get $key]
+ set datum2 [lindex [lindex $dbt 0] 1]
+
+ error_check_good timestamp_recover $datum2 $datum
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/recd012.tcl b/libdb/test/recd012.tcl
new file mode 100644
index 0000000..2457d66
--- /dev/null
+++ b/libdb/test/recd012.tcl
@@ -0,0 +1,432 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST recd012
+# TEST Test of log file ID management. [#2288]
+# TEST Test recovery handling of file opens and closes.
+proc recd012 { method {start 0} \
+ {niter 49} {noutiter 25} {niniter 100} {ndbs 5} args } {
+ source ./include.tcl
+
+ set tnum 12
+ set pagesize 512
+
+ if { $is_qnx_test } {
+ set niter 40
+ }
+
+ puts "Recd0$tnum $method ($args): Test recovery file management."
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd012: skipping for specific pagesizes"
+ return
+ }
+
+ for { set i $start } { $i <= $niter } { incr i } {
+ env_cleanup $testdir
+
+ # For repeatability, we pass in the iteration number
+ # as a parameter and use that in recd012_body to seed
+ # the random number generator to randomize our operations.
+ # This lets us re-run a potentially failing iteration
+ # without having to start from the beginning and work
+ # our way to it.
+ #
+ # The number of databases ranges from 4 to 8 and is
+ # a function of $niter
+ # set ndbs [expr ($i % 5) + 4]
+
+ recd012_body \
+ $method $ndbs $i $noutiter $niniter $pagesize $tnum $args
+ }
+}
+
+proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} } {
+ global alphabet rand_init fixed_len recd012_ofkey recd012_ofckptkey
+ source ./include.tcl
+
+ set largs [convert_args $method $largs]
+ set omethod [convert_method $method]
+
+ puts "\tRecd0$tnum $method ($largs): Iteration $iter"
+ puts "\t\tRecd0$tnum.a: Create environment and $ndbs databases."
+
+ # We run out of lockers during some of the recovery runs, so
+ # we need to make sure that we specify a DB_CONFIG that will
+ # give us enough lockers.
+ set f [open $testdir/DB_CONFIG w]
+ puts $f "set_lk_max_lockers 5000"
+ close $f
+
+ set flags "-create -txn -home $testdir"
+ set env_cmd "berkdb_env $flags"
+ error_check_good env_remove [berkdb envremove -home $testdir] 0
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # Initialize random number generator based on $iter.
+ berkdb srand [expr $iter + $rand_init]
+
+ # Initialize database that keeps track of number of open files (so
+ # we don't run out of descriptors).
+ set ofname of.db
+ set txn [$dbenv txn]
+ error_check_good open_txn_begin [is_valid_txn $txn $dbenv] TRUE
+ set ofdb [berkdb_open -env $dbenv -txn $txn\
+ -create -dup -mode 0644 -btree -pagesize 512 $ofname]
+ error_check_good of_open [is_valid_db $ofdb] TRUE
+ error_check_good open_txn_commit [$txn commit] 0
+ set oftxn [$dbenv txn]
+ error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
+ error_check_good of_put [$ofdb put -txn $oftxn $recd012_ofkey 1] 0
+ error_check_good of_put2 [$ofdb put -txn $oftxn $recd012_ofckptkey 0] 0
+ error_check_good of_put3 [$ofdb put -txn $oftxn $recd012_ofckptkey 0] 0
+ error_check_good of_txn_commit [$oftxn commit] 0
+ error_check_good of_close [$ofdb close] 0
+
+ # Create ndbs databases to work in, and a file listing db names to
+ # pick from.
+ set f [open $testdir/dblist w]
+
+ set oflags "-auto_commit -env $dbenv \
+ -create -mode 0644 -pagesize $psz $largs $omethod"
+ for { set i 0 } { $i < $ndbs } { incr i } {
+ # 50-50 chance of being a subdb, unless we're a queue.
+ if { [berkdb random_int 0 1] || [is_queue $method] } {
+ # not a subdb
+ set dbname recd0$tnum-$i.db
+ } else {
+ # subdb
+ set dbname "recd0$tnum-subdb.db s$i"
+ }
+ puts $f $dbname
+ set db [eval berkdb_open $oflags $dbname]
+ error_check_good db($i) [is_valid_db $db] TRUE
+ error_check_good db($i)_close [$db close] 0
+ }
+ close $f
+ error_check_good env_close [$dbenv close] 0
+
+ # Now we get to the meat of things. Our goal is to do some number
+ # of opens, closes, updates, and shutdowns (simulated here by a
+ # close of all open handles and a close/reopen of the environment,
+ # with or without an envremove), matching the regular expression
+ #
+ # ((O[OUC]+S)+R+V)
+ #
+ # We'll repeat the inner + a random number up to $niniter times,
+ # and the outer + a random number up to $noutiter times.
+ #
+ # In order to simulate shutdowns, we'll perform the opens, closes,
+ # and updates in a separate process, which we'll exit without closing
+ # all handles properly. The environment will be left lying around
+ # before we run recovery 50% of the time.
+ set out [berkdb random_int 1 $noutiter]
+ puts \
+ "\t\tRecd0$tnum.b: Performing $out recoveries of up to $niniter ops."
+ for { set i 0 } { $i < $out } { incr i } {
+ set child [open "|$tclsh_path" w]
+
+ # For performance, don't source everything,
+ # just what we'll need.
+ puts $child "load $tcllib"
+ puts $child "set fixed_len $fixed_len"
+ puts $child "source $src_root/test/testutils.tcl"
+ puts $child "source $src_root/test/recd0$tnum.tcl"
+
+ set rnd [expr $iter * 10000 + $i * 100 + $rand_init]
+
+ # Go.
+ berkdb debug_check
+ puts $child "recd012_dochild {$env_cmd} $rnd $i $niniter\
+ $ndbs $tnum $method $ofname $largs"
+ close $child
+
+ # Run recovery 0-3 times.
+ set nrecs [berkdb random_int 0 3]
+ for { set j 0 } { $j < $nrecs } { incr j } {
+ berkdb debug_check
+ set ret [catch {exec $util_path/db_recover \
+ -h $testdir} res]
+ if { $ret != 0 } {
+ puts "FAIL: db_recover returned with nonzero\
+ exit status, output as follows:"
+ file mkdir /tmp/12out
+ set fd [open /tmp/12out/[pid] w]
+ puts $fd $res
+ close $fd
+ }
+ error_check_good recover($j) $ret 0
+ }
+ }
+
+ # Run recovery one final time; it doesn't make sense to
+ # check integrity if we do not.
+ set ret [catch {exec $util_path/db_recover -h $testdir} res]
+ if { $ret != 0 } {
+ puts "FAIL: db_recover returned with nonzero\
+ exit status, output as follows:"
+ puts $res
+ }
+
+ # Make sure each datum is the correct filename.
+ puts "\t\tRecd0$tnum.c: Checking data integrity."
+ set dbenv [berkdb_env -create -private -home $testdir]
+ error_check_good env_open_integrity [is_valid_env $dbenv] TRUE
+ set f [open $testdir/dblist r]
+ set i 0
+ while { [gets $f dbinfo] > 0 } {
+ set db [eval berkdb_open -env $dbenv $dbinfo]
+ error_check_good dbopen($dbinfo) [is_valid_db $db] TRUE
+
+ set dbc [$db cursor]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+
+ for { set dbt [$dbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$dbc get -next] } {
+ error_check_good integrity [lindex [lindex $dbt 0] 1] \
+ [pad_data $method $dbinfo]
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+ close $f
+ error_check_good env_close_integrity [$dbenv close] 0
+
+ # Verify
+ error_check_good verify \
+ [verify_dir $testdir "\t\tRecd0$tnum.d: " 0 0 1] 0
+}
+
+proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
+ ofname args } {
+ global recd012_ofkey
+ source ./include.tcl
+ if { [is_record_based $method] } {
+ set keybase ""
+ } else {
+ set keybase .[repeat abcdefghijklmnopqrstuvwxyz 4]
+ }
+
+ # Initialize our random number generator, repeatably based on an arg.
+ berkdb srand $rnd
+
+ # Open our env.
+ set dbenv [eval $env_cmd]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ # Find out how many databases appear to be open in the log--we
+ # don't want recovery to run out of filehandles.
+ set txn [$dbenv txn]
+ error_check_good child_txn_begin [is_valid_txn $txn $dbenv] TRUE
+ set ofdb [berkdb_open -env $dbenv -txn $txn $ofname]
+ error_check_good child_txn_commit [$txn commit] 0
+
+ set oftxn [$dbenv txn]
+ error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
+ set dbt [$ofdb get -txn $oftxn $recd012_ofkey]
+ error_check_good of_get [lindex [lindex $dbt 0] 0] $recd012_ofkey
+ set nopenfiles [lindex [lindex $dbt 0] 1]
+
+ error_check_good of_commit [$oftxn commit] 0
+
+ # Read our dbnames
+ set f [open $testdir/dblist r]
+ set i 0
+ while { [gets $f dbname($i)] > 0 } {
+ incr i
+ }
+ close $f
+
+ # We now have $ndbs extant databases.
+ # Open one of them, just to get us started.
+ set opendbs {}
+ set oflags "-env $dbenv $args"
+
+ # Start a transaction, just to get us started.
+ set curtxn [$dbenv txn]
+ error_check_good txn [is_valid_txn $curtxn $dbenv] TRUE
+
+ # Inner loop. Do $in iterations of a random open, close, or
+ # update, where $in is between 1 and $niniter.
+ set in [berkdb random_int 1 $niniter]
+ for { set j 0 } { $j < $in } { incr j } {
+ set op [berkdb random_int 0 2]
+ switch $op {
+ 0 {
+ # Open.
+ recd012_open
+ }
+ 1 {
+ # Update. Put random-number$keybase as key,
+ # filename as data, into random database.
+ set num_open [llength $opendbs]
+ if { $num_open == 0 } {
+ # If none are open, do an open first.
+ recd012_open
+ }
+ set n [berkdb random_int 0 [expr $num_open - 1]]
+ set pair [lindex $opendbs $n]
+ set udb [lindex $pair 0]
+ set uname [lindex $pair 1]
+
+ set key [berkdb random_int 1000 1999]$keybase
+ set data [chop_data $method $uname]
+ error_check_good put($uname,$udb,$key,$data) \
+ [$udb put -txn $curtxn $key $data] 0
+
+ # One time in four, commit the transaction.
+ if { [berkdb random_int 0 3] == 0 && 0 } {
+ error_check_good txn_recommit \
+ [$curtxn commit] 0
+ set curtxn [$dbenv txn]
+ error_check_good txn_reopen \
+ [is_valid_txn $curtxn $dbenv] TRUE
+ }
+ }
+ 2 {
+ # Close.
+ if { [llength $opendbs] == 0 } {
+ # If none are open, open instead of closing.
+ recd012_open
+ continue
+ }
+
+ # Commit curtxn first, lest we self-deadlock.
+ error_check_good txn_recommit [$curtxn commit] 0
+
+ # Do it.
+ set which [berkdb random_int 0 \
+ [expr [llength $opendbs] - 1]]
+
+ set db [lindex [lindex $opendbs $which] 0]
+ error_check_good db_choice [is_valid_db $db] TRUE
+ global errorCode errorInfo
+
+ error_check_good db_close \
+ [[lindex [lindex $opendbs $which] 0] close] 0
+
+ set opendbs [lreplace $opendbs $which $which]
+ incr nopenfiles -1
+
+ # Reopen txn.
+ set curtxn [$dbenv txn]
+ error_check_good txn_reopen \
+ [is_valid_txn $curtxn $dbenv] TRUE
+ }
+ }
+
+ # One time in two hundred, checkpoint.
+ if { [berkdb random_int 0 199] == 0 } {
+ puts "\t\t\tRecd0$tnum:\
+ Random checkpoint after operation $outiter.$j."
+ error_check_good txn_ckpt \
+ [$dbenv txn_checkpoint] 0
+ set nopenfiles \
+ [recd012_nopenfiles_ckpt $dbenv $ofdb $nopenfiles]
+ }
+ }
+
+ # We have to commit curtxn. It'd be kind of nice not to, but
+ # if we start in again without running recovery, we may block
+ # ourselves.
+ error_check_good curtxn_commit [$curtxn commit] 0
+
+ # Put back the new number of open files.
+ set oftxn [$dbenv txn]
+ error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
+ error_check_good of_del [$ofdb del -txn $oftxn $recd012_ofkey] 0
+ error_check_good of_put \
+ [$ofdb put -txn $oftxn $recd012_ofkey $nopenfiles] 0
+ error_check_good of_commit [$oftxn commit] 0
+ error_check_good ofdb_close [$ofdb close] 0
+}
+
+proc recd012_open { } {
+ # This is basically an inline and has to modify curtxn,
+ # so use upvars.
+ upvar curtxn curtxn
+ upvar ndbs ndbs
+ upvar dbname dbname
+ upvar dbenv dbenv
+ upvar oflags oflags
+ upvar opendbs opendbs
+ upvar nopenfiles nopenfiles
+
+ # Return without an open if we've already opened too many files--
+ # we don't want to make recovery run out of filehandles.
+ if { $nopenfiles > 30 } {
+ #puts "skipping--too many open files"
+ return -code break
+ }
+
+ # Commit curtxn first, lest we self-deadlock.
+ error_check_good txn_recommit \
+ [$curtxn commit] 0
+
+ # Do it.
+ set which [berkdb random_int 0 [expr $ndbs - 1]]
+
+ set db [eval berkdb_open -auto_commit $oflags $dbname($which)]
+
+ lappend opendbs [list $db $dbname($which)]
+
+ # Reopen txn.
+ set curtxn [$dbenv txn]
+ error_check_good txn_reopen [is_valid_txn $curtxn $dbenv] TRUE
+
+ incr nopenfiles
+}
+
+# Update the database containing the number of files that db_recover has
+# to contend with--we want to avoid letting it run out of file descriptors.
+# We do this by keeping track of the number of unclosed opens since the
+# checkpoint before last.
+# $recd012_ofkey stores this current value; the two dups available
+# at $recd012_ofckptkey store the number of opens since the last checkpoint
+# previous.
+# Thus, if the current value is 17 when we do a checkpoint, and the
+# stored values are 3 and 8, the new current value (which we return)
+# is 14, and the new stored values are 8 and 6.
+proc recd012_nopenfiles_ckpt { env db nopenfiles } {
+ global recd012_ofckptkey
+ set txn [$env txn]
+ error_check_good nopenfiles_ckpt_txn [is_valid_txn $txn $env] TRUE
+
+ set dbc [$db cursor -txn $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # Get the first ckpt value and delete it.
+ set dbt [$dbc get -set $recd012_ofckptkey]
+ error_check_good set [llength $dbt] 1
+
+ set discard [lindex [lindex $dbt 0] 1]
+ error_check_good del [$dbc del] 0
+
+ set nopenfiles [expr $nopenfiles - $discard]
+
+ # Get the next ckpt value
+ set dbt [$dbc get -nextdup]
+ error_check_good set2 [llength $dbt] 1
+
+ # Calculate how many opens we've had since this checkpoint before last.
+ set onlast [lindex [lindex $dbt 0] 1]
+ set sincelast [expr $nopenfiles - $onlast]
+
+ # Put this new number at the end of the dup set.
+ error_check_good put [$dbc put -keylast $recd012_ofckptkey $sincelast] 0
+
+ # We should never deadlock since we're the only one in this db.
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+
+ return $nopenfiles
+}
+
+# globals -- it's not worth passing these around, as they're constants
+set recd012_ofkey OPENFILES
+set recd012_ofckptkey CKPTS
diff --git a/libdb/test/recd013.tcl b/libdb/test/recd013.tcl
new file mode 100644
index 0000000..047fa0b
--- /dev/null
+++ b/libdb/test/recd013.tcl
@@ -0,0 +1,287 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST recd013
+# TEST Test of cursor adjustment on child transaction aborts. [#2373]
+#
+# XXX
+# Other tests that cover more specific variants of the same issue
+# are in the access method tests for now. This is probably wrong; we
+# put this one here because they're closely based on and intertwined
+# with other, non-transactional cursor stability tests that are among
+# the access method tests, and because we need at least one test to
+# fit under recd and keep logtrack from complaining. We'll sort out the mess
+# later; the important thing, for now, is that everything that needs to gets
+# tested. (This really shouldn't be under recd at all, since it doesn't
+# run recovery!)
+proc recd013 { method { nitems 100 } args } {
+ source ./include.tcl
+ global alphabet log_log_record_types
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 13
+ set pgsz 512
+
+ puts "Recd0$tnum $method ($args): Test of aborted cursor adjustments."
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd013: skipping for specific pagesizes"
+ return
+ }
+
+ set testfile recd0$tnum.db
+ env_cleanup $testdir
+
+ set i 0
+ if { [is_record_based $method] == 1 } {
+ set keybase ""
+ } else {
+ set keybase "key"
+ }
+
+ puts "\tRecd0$tnum.a:\
+ Create environment, database, and parent transaction."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set oflags \
+ "-auto_commit -env $env -create -mode 0644 -pagesize $pgsz $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Create a database containing $nitems items, numbered with odds.
+ # We'll then put the even numbers during the body of the test.
+ set txn [$env txn]
+ error_check_good init_txn [is_valid_txn $txn $env] TRUE
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ set key $keybase$i
+ set data [chop_data $method $i$alphabet]
+
+ # First, try to put the item in a child transaction,
+ # then abort and verify all the cursors we've done up until
+ # now.
+ set ctxn [$env txn -parent $txn]
+ error_check_good child_txn($i) [is_valid_txn $ctxn $env] TRUE
+ error_check_good fake_put($i) [$db put -txn $ctxn $key $data] 0
+ error_check_good ctxn_abort($i) [$ctxn abort] 0
+ for { set j 1 } { $j < $i } { incr j 2 } {
+ error_check_good dbc_get($j) [$dbc($j) get -current] \
+ [list [list $keybase$j \
+ [pad_data $method $j$alphabet]]]
+ }
+
+ # Then put for real.
+ error_check_good init_put($i) [$db put -txn $txn $key $data] 0
+
+ # Set a cursor of the parent txn to each item.
+ set dbc($i) [$db cursor -txn $txn]
+ error_check_good dbc_getset($i) \
+ [$dbc($i) get -set $key] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+
+ # And verify all the cursors, including the one we just
+ # created.
+ for { set j 1 } { $j <= $i } { incr j 2 } {
+ error_check_good dbc_get($j) [$dbc($j) get -current] \
+ [list [list $keybase$j \
+ [pad_data $method $j$alphabet]]]
+ }
+ }
+
+ puts "\t\tRecd0$tnum.a.1: Verify cursor stability after init."
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+ }
+
+ puts "\tRecd0$tnum.b: Put test."
+ puts "\t\tRecd0$tnum.b.1: Put items."
+ set ctxn [$env txn -parent $txn]
+ error_check_good txn [is_valid_txn $ctxn $env] TRUE
+ for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
+ set key $keybase$i
+ set data [chop_data $method $i$alphabet]
+ error_check_good child_put($i) [$db put -txn $ctxn $key $data] 0
+
+ # If we're a renumbering recno, this is uninteresting.
+ # Stir things up by putting a few additional records at
+ # the beginning.
+ if { [is_rrecno $method] == 1 } {
+ set curs [$db cursor -txn $ctxn]
+ error_check_bad llength_get_first \
+ [llength [$curs get -first]] 0
+ error_check_good cursor [is_valid_cursor $curs $db] TRUE
+ # expect a recno!
+ error_check_good rrecno_put($i) \
+ [$curs put -before ADDITIONAL.$i] 1
+ error_check_good curs_close [$curs close] 0
+ }
+ }
+
+ puts "\t\tRecd0$tnum.b.2: Verify cursor stability after abort."
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+ }
+
+ # Clean up cursors.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc($i)_close [$dbc($i) close] 0
+ }
+
+ # Sync and verify.
+ error_check_good txn_commit [$txn commit] 0
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_verify \
+ [verify_dir $testdir "\t\tRecd0$tnum.b.3: "] 0
+
+ # Now put back all the even records, this time in the parent.
+ # Commit and re-begin the transaction so we can abort and
+ # get back to a nice full database.
+ for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
+ set key $keybase$i
+ set data [chop_data $method $i$alphabet]
+ error_check_good child_put($i) [$db put -txn $txn $key $data] 0
+ }
+ error_check_good txn_commit [$txn commit] 0
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+
+ # Delete test. Set a cursor to each record. Delete the even ones
+ # in the parent and check cursor stability. Then open a child
+ # transaction, and delete the odd ones. Verify that the database
+ # is empty.
+ puts "\tRecd0$tnum.c: Delete test."
+ unset dbc
+
+ # Create cursors pointing at each item.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i } {
+ set dbc($i) [$db cursor -txn $txn]
+ error_check_good dbc($i)_create [is_valid_cursor $dbc($i) $db] \
+ TRUE
+ error_check_good dbc_getset($i) [$dbc($i) get -set $keybase$i] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+ }
+
+ puts "\t\tRecd0$tnum.c.1: Delete even items in child txn and abort."
+
+ if { [is_rrecno $method] != 1 } {
+ set init 2
+ set bound [expr 2 * $nitems]
+ set step 2
+ } else {
+ # In rrecno, deletes will renumber the items, so we have
+ # to take that into account when we delete by recno.
+ set init 2
+ set bound [expr $nitems + 1]
+ set step 1
+ }
+
+ set ctxn [$env txn -parent $txn]
+ for { set i $init } { $i <= $bound } { incr i $step } {
+ error_check_good del($i) [$db del -txn $ctxn $keybase$i] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ # Verify that no items are deleted.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i } {
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+ }
+
+ puts "\t\tRecd0$tnum.c.2: Delete even items in child txn and commit."
+ set ctxn [$env txn -parent $txn]
+ for { set i $init } { $i <= $bound } { incr i $step } {
+ error_check_good del($i) [$db del -txn $ctxn $keybase$i] 0
+ }
+ error_check_good ctxn_commit [$ctxn commit] 0
+
+ # Verify that even items are deleted and odd items are not.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ if { [is_rrecno $method] != 1 } {
+ set j $i
+ } else {
+ set j [expr ($i - 1) / 2 + 1]
+ }
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list $keybase$j [pad_data $method $i$alphabet]]]
+ }
+ for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list "" ""]]
+ }
+
+ puts "\t\tRecd0$tnum.c.3: Delete odd items in child txn."
+
+ set ctxn [$env txn -parent $txn]
+
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ if { [is_rrecno $method] != 1 } {
+ set j $i
+ } else {
+ # If this is an rrecno, just delete the first
+ # item repeatedly--the renumbering will make
+ # that delete everything.
+ set j 1
+ }
+ error_check_good del($i) [$db del -txn $ctxn $keybase$j] 0
+ }
+
+ # Verify that everyone's deleted.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i } {
+ error_check_good get_deleted($i) \
+ [llength [$db get -txn $ctxn $keybase$i]] 0
+ }
+
+ puts "\t\tRecd0$tnum.c.4: Verify cursor stability after abort."
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ # Verify that even items are deleted and odd items are not.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ if { [is_rrecno $method] != 1 } {
+ set j $i
+ } else {
+ set j [expr ($i - 1) / 2 + 1]
+ }
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list $keybase$j [pad_data $method $i$alphabet]]]
+ }
+ for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list "" ""]]
+ }
+
+ # Clean up cursors.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i } {
+ error_check_good dbc($i)_close [$dbc($i) close] 0
+ }
+
+ # Sync and verify.
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_verify \
+ [verify_dir $testdir "\t\tRecd0$tnum.c.5: "] 0
+
+ puts "\tRecd0$tnum.d: Clean up."
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+ error_check_good verify_dir \
+ [verify_dir $testdir "\t\tRecd0$tnum.d.1: "] 0
+
+ if { $log_log_record_types == 1 } {
+ logtrack_read $testdir
+ }
+}
diff --git a/libdb/test/recd014.tcl b/libdb/test/recd014.tcl
new file mode 100644
index 0000000..9d274cc
--- /dev/null
+++ b/libdb/test/recd014.tcl
@@ -0,0 +1,445 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST recd014
+# TEST This is a recovery test for create/delete of queue extents. We
+# TEST then need to recover and make sure the file is correctly existing
+# TEST or not, as the case may be.
+proc recd014 { method args} {
+ global fixed_len
+ source ./include.tcl
+
+ if { ![is_queueext $method] == 1 } {
+ puts "Recd014: Skipping for method $method"
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd014: skipping for specific pagesizes"
+ return
+ }
+
+ set orig_fixed_len $fixed_len
+ #
+ # We will use 512-byte pages, to be able to control
+ # when extents get created/removed.
+ #
+ set fixed_len 300
+
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+ #
+ # We want to set -extent 1 instead of what
+ # convert_args gave us.
+ #
+ set exti [lsearch -exact $opts "-extent"]
+ incr exti
+ set opts [lreplace $opts $exti $exti 1]
+
+ puts "Recd014: $method extent creation/deletion tests"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set testfile recd014.db
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd014.a: creating environment"
+ set env_cmd "berkdb_env $flags"
+
+ puts "\tRecd014.b: Create test commit"
+ ext_recover_create $testdir $env_cmd $omethod \
+ $opts $testfile commit
+ puts "\tRecd014.b: Create test abort"
+ ext_recover_create $testdir $env_cmd $omethod \
+ $opts $testfile abort
+
+ puts "\tRecd014.c: Consume test commit"
+ ext_recover_consume $testdir $env_cmd $omethod \
+ $opts $testfile commit
+ puts "\tRecd014.c: Consume test abort"
+ ext_recover_consume $testdir $env_cmd $omethod \
+ $opts $testfile abort
+
+ set fixed_len $orig_fixed_len
+ puts "\tRecd014.d: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+proc ext_recover_create { dir env_cmd method opts dbfile txncmd } {
+ global log_log_record_types
+ global fixed_len
+ global alphabet
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ env_cleanup $dir
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+
+ set init_file $dir/$dbfile.init
+ set noenvflags "-create $method -mode 0644 -pagesize 512 $opts $dbfile"
+ set oflags "-env $env $noenvflags"
+
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+
+ set ret [catch {eval {berkdb_open} -txn $t $oflags} db]
+ error_check_good txn_commit [$t commit] 0
+
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+
+ #
+ # The command to execute to create an extent is a put.
+ # We are just creating the first one, so our extnum is 0.
+ #
+ set extnum 0
+ set data [chop_data $method [replicate $alphabet 512]]
+ puts "\t\tExecuting command"
+ set putrecno [$db put -txn $t -append $data]
+ error_check_good db_put $putrecno 1
+
+ # Sync the db so any changes to the file that are
+ # in mpool get written to the disk file before the
+ # diff.
+ puts "\t\tSyncing"
+ error_check_good db_sync [$db sync] 0
+
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
+ copy_extent_file $dir $dbfile afterop
+
+ error_check_good txn_$txncmd:$t [$t $txncmd] 0
+ #
+ # If we don't abort, then we expect success.
+ # If we abort, we expect no file created.
+ #
+ set dbq [make_ext_filename $dir $dbfile $extnum]
+ error_check_good extput:exists1 [file exists $dbq] 1
+ set ret [$db get $putrecno]
+ if {$txncmd == "abort"} {
+ #
+ # Operation was aborted. Verify our entry is not there.
+ #
+ puts "\t\tCommand executed and aborted."
+ error_check_good db_get [llength $ret] 0
+ } else {
+ #
+ # Operation was committed, verify it exists.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good db_get [llength $ret] 1
+ catch { file copy -force $dir/$dbfile $init_file } res
+ copy_extent_file $dir $dbfile init
+ }
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+ error_check_good db_close [$db close] 0
+ error_check_good txn_commit [$t commit] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # Run recovery here. Should be a no-op. Verify that
+ # the file still does/n't exist when we are done.
+ #
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (no-op) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ #
+ # Verify it did not change.
+ #
+ error_check_good extput:exists2 [file exists $dbq] 1
+ ext_create_check $dir $txncmd $init_file $dbfile $noenvflags $putrecno
+
+ #
+ # Need a new copy to get the right LSN into the file.
+ #
+ catch { file copy -force $dir/$dbfile $init_file } res
+ copy_extent_file $dir $dbfile init
+
+ #
+ # Undo.
+ # Now move the .afterop file to $dbfile. Run recovery again.
+ #
+ file copy -force $dir/$dbfile.afterop $dir/$dbfile
+ move_file_extent $dir $dbfile afterop copy
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (afterop) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ ext_create_check $dir $txncmd $init_file $dbfile $noenvflags $putrecno
+
+ #
+ # To redo, remove the dbfiles. Run recovery again.
+ #
+ catch { file rename -force $dir/$dbfile $dir/$dbfile.renamed } res
+ copy_extent_file $dir $dbfile renamed rename
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (init) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ #
+ # !!!
+ # Even though db_recover exits with status 0, it should print out
+ # a warning because the file didn't exist. Db_recover writes this
+ # to stderr. Tcl assumes that ANYTHING written to stderr is an
+ # error, so even though we exit with 0 status, we still get an
+ # error back from 'catch'. Look for the warning.
+ #
+ if { $stat == 1 && [is_substr $result "warning"] == 0 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+
+ #
+ # Verify it was redone. However, since we removed the files
+ # to begin with, recovery with abort will not recreate the
+ # extent. Recovery with commit will.
+ #
+ if {$txncmd == "abort"} {
+ error_check_good extput:exists3 [file exists $dbq] 0
+ } else {
+ error_check_good extput:exists3 [file exists $dbq] 1
+ }
+}
+
+proc ext_create_check { dir txncmd init_file dbfile oflags putrecno } {
+ if { $txncmd == "commit" } {
+ #
+ # Operation was committed. Verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
+ } else {
+ #
+ # Operation aborted. The file is there, but make
+ # sure the item is not.
+ #
+ set xdb [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $xdb] TRUE
+ set ret [$xdb get $putrecno]
+ error_check_good db_get [llength $ret] 0
+ error_check_good db_close [$xdb close] 0
+ }
+}
+
+proc ext_recover_consume { dir env_cmd method opts dbfile txncmd} {
+ global log_log_record_types
+ global alphabet
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ env_cleanup $dir
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+
+ set oflags "-create -auto_commit $method -mode 0644 -pagesize 512 \
+ -env $env $opts $dbfile"
+
+ #
+ # Open our db, add some data, close and copy as our
+ # init file.
+ #
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set extnum 0
+ set data [chop_data $method [replicate $alphabet 512]]
+
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set putrecno [$db put -txn $txn -append $data]
+ error_check_good db_put $putrecno 1
+ error_check_good commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+
+ puts "\t\tExecuting command"
+
+ set init_file $dir/$dbfile.init
+ catch { file copy -force $dir/$dbfile $init_file } res
+ copy_extent_file $dir $dbfile init
+
+ #
+ # If we don't abort, then we expect success.
+ # If we abort, we expect no file removed until recovery is run.
+ #
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+
+ set dbcmd "$db get -txn $t -consume"
+ set ret [eval $dbcmd]
+ error_check_good db_sync [$db sync] 0
+
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
+ copy_extent_file $dir $dbfile afterop
+
+ error_check_good txn_$txncmd:$t [$t $txncmd] 0
+ error_check_good db_sync [$db sync] 0
+ set dbq [make_ext_filename $dir $dbfile $extnum]
+ if {$txncmd == "abort"} {
+ #
+ # Operation was aborted, verify ext did not change.
+ #
+ puts "\t\tCommand executed and aborted."
+
+ #
+ # Check that the file exists. Final state.
+ # Since we aborted the txn, we should be able
+ # to get to our original entry.
+ #
+ error_check_good postconsume.1 [file exists $dbq] 1
+ error_check_good \
+ diff(init,postconsume.2):diff($init_file,$dir/$dbfile)\
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
+ } else {
+ #
+ # Operation was committed, verify it does
+ # not exist.
+ #
+ puts "\t\tCommand executed and committed."
+ #
+ # Check file existence. Consume operations remove
+ # the extent when we move off, which we should have
+ # done.
+ error_check_good consume_exists [file exists $dbq] 0
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # Run recovery here on what we ended up with. Should be a no-op.
+ #
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (no-op) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ if { $txncmd == "abort"} {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
+ } else {
+ #
+ # Operation was committed, verify it does
+ # not exist. Both operations should result
+ # in no file existing now that we've run recovery.
+ #
+ error_check_good after_recover1 [file exists $dbq] 0
+ }
+
+ #
+ # Run recovery here. Re-do the operation.
+ # Verify that the file doesn't exist
+ # (if we committed) or change (if we aborted)
+ # when we are done.
+ #
+ catch { file copy -force $dir/$dbfile $init_file } res
+ copy_extent_file $dir $dbfile init
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (init) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ if { $txncmd == "abort"} {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
+ } else {
+ #
+ # Operation was committed, verify it does
+ # not exist. Both operations should result
+ # in no file existing now that we've run recovery.
+ #
+ error_check_good after_recover2 [file exists $dbq] 0
+ }
+
+ #
+ # Now move the .afterop file to $dbfile. Run recovery again.
+ #
+ set filecopy [glob $dir/*.afterop]
+ set afterop [lindex $filecopy 0]
+ file rename -force $afterop $dir/$dbfile
+ set afterop [string range $afterop \
+ [expr [string last "/" $afterop] + 1] \
+ [string last "." $afterop]]
+ move_file_extent $dir $dbfile afterop rename
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (afterop) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+
+ if { $txncmd == "abort"} {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
+ } else {
+ #
+ # Operation was committed, verify it still does
+ # not exist.
+ #
+ error_check_good after_recover3 [file exists $dbq] 0
+ }
+}
diff --git a/libdb/test/recd015.tcl b/libdb/test/recd015.tcl
new file mode 100644
index 0000000..624495d
--- /dev/null
+++ b/libdb/test/recd015.tcl
@@ -0,0 +1,160 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST recd015
+# TEST This is a recovery test for testing lots of prepared txns.
+# TEST This test is to force the use of txn_recover to call with the
+# TEST DB_FIRST flag and then DB_NEXT.
+proc recd015 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd015: $method ($args) prepared txns test"
+
+ # Create the database and environment.
+
+ set numtxns 1
+ set testfile NULL
+
+ set env_cmd "berkdb_env -create -txn -home $testdir"
+ set msg "\tRecd015.a"
+ puts "$msg Simple test to prepare $numtxns txn "
+ foreach op { abort commit discard } {
+ env_cleanup $testdir
+ recd015_body $env_cmd $testfile $numtxns $msg $op
+ }
+
+ #
+ # Now test large numbers of prepared txns to test DB_NEXT
+ # on txn_recover.
+ #
+ set numtxns 250
+ set testfile recd015.db
+ set txnmax [expr $numtxns + 5]
+ #
+ # For this test we create our database ahead of time so that we
+ # don't need to send methods and args to the script.
+ #
+ env_cleanup $testdir
+ set env_cmd "berkdb_env -create -txn_max $txnmax -txn -home $testdir"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ set db [eval {berkdb_open -create} $omethod -env $env $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
+
+ set msg "\tRecd015.b"
+ puts "$msg Large test to prepare $numtxns txn "
+ foreach op { abort commit discard } {
+ recd015_body $env_cmd $testfile $numtxns $msg $op
+ }
+
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $testdir/LOG } ret]
+ error_check_good db_printlog $stat 0
+ fileremove $testdir/LOG
+}
+
+proc recd015_body { env_cmd testfile numtxns msg op } {
+ source ./include.tcl
+
+ sentinel_init
+ set gidf $testdir/gidfile
+ fileremove -f $gidf
+ set pidlist {}
+ puts "$msg.0: Executing child script to prepare txns"
+ berkdb debug_check
+ set p [exec $tclsh_path $test_path/wrap.tcl recd15scr.tcl \
+ $testdir/recdout $env_cmd $testfile $gidf $numtxns &]
+
+ lappend pidlist $p
+ watch_procs $pidlist 5
+ set f1 [open $testdir/recdout r]
+ set r [read $f1]
+ puts $r
+ close $f1
+ fileremove -f $testdir/recdout
+
+ berkdb debug_check
+ puts -nonewline "$msg.1: Running recovery ... "
+ flush stdout
+ berkdb debug_check
+ set env [eval $env_cmd -recover]
+ error_check_good dbenv-recover [is_valid_env $env] TRUE
+ puts "complete"
+
+ puts "$msg.2: getting txns from txn_recover"
+ set txnlist [$env txn_recover]
+ error_check_good txnlist_len [llength $txnlist] $numtxns
+
+ set gfd [open $gidf r]
+ set i 0
+ while { [gets $gfd gid] != -1 } {
+ set gids($i) $gid
+ incr i
+ }
+ close $gfd
+ #
+ # Make sure we have as many as we expect
+ error_check_good num_gids $i $numtxns
+
+ set i 0
+ puts "$msg.3: comparing GIDs and $op txns"
+ foreach tpair $txnlist {
+ set txn [lindex $tpair 0]
+ set gid [lindex $tpair 1]
+ error_check_good gidcompare $gid $gids($i)
+ error_check_good txn:$op [$txn $op] 0
+ incr i
+ }
+ if { $op != "discard" } {
+ error_check_good envclose [$env close] 0
+ return
+ }
+ #
+ # If we discarded, now do it again and randomly resolve some
+ # until all txns are resolved.
+ #
+ puts "$msg.4: resolving/discarding txns"
+ set txnlist [$env txn_recover]
+ set len [llength $txnlist]
+ set opval(1) "abort"
+ set opcnt(1) 0
+ set opval(2) "commit"
+ set opcnt(2) 0
+ set opval(3) "discard"
+ set opcnt(3) 0
+ while { $len != 0 } {
+ set opicnt(1) 0
+ set opicnt(2) 0
+ set opicnt(3) 0
+ #
+ # Abort/commit or discard them randomly until
+ # all are resolved.
+ #
+ for { set i 0 } { $i < $len } { incr i } {
+ set t [lindex $txnlist $i]
+ set txn [lindex $t 0]
+ set newop [berkdb random_int 1 3]
+ set ret [$txn $opval($newop)]
+ error_check_good txn_$opval($newop):$i $ret 0
+ incr opcnt($newop)
+ incr opicnt($newop)
+ }
+# puts "$opval(1): $opicnt(1) Total: $opcnt(1)"
+# puts "$opval(2): $opicnt(2) Total: $opcnt(2)"
+# puts "$opval(3): $opicnt(3) Total: $opcnt(3)"
+
+ set txnlist [$env txn_recover]
+ set len [llength $txnlist]
+ }
+
+ error_check_good envclose [$env close] 0
+}
diff --git a/libdb/test/recd016.tcl b/libdb/test/recd016.tcl
new file mode 100644
index 0000000..bb07948
--- /dev/null
+++ b/libdb/test/recd016.tcl
@@ -0,0 +1,183 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST recd016
+# TEST This is a recovery test for testing running recovery while
+# TEST recovery is already running. While bad things may or may not
+# TEST happen, if recovery is then run properly, things should be correct.
+proc recd016 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd016: $method ($args) simultaneous recovery test"
+ puts "Recd016: Skipping; waiting on SR #6277"
+ return
+
+ # Create the database and environment.
+ set testfile recd016.db
+
+ #
+ # For this test we create our database ahead of time so that we
+ # don't need to send methods and args to the script.
+ #
+ cleanup $testdir NULL
+
+ #
+ # Use a smaller log to make more files and slow down recovery.
+ #
+ set gflags ""
+ set pflags ""
+ set log_max [expr 256 * 1024]
+ set nentries 10000
+ set nrec 6
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+ set t5 $testdir/t5
+ # Since we are using txns, we need at least 1 lock per
+ # record (for queue). So set lock_max accordingly.
+ set lkmax [expr $nentries * 2]
+
+ puts "\tRecd016.a: Create environment and database"
+ set env_cmd "berkdb_env -create -log_max $log_max \
+ -lock_max $lkmax -txn -home $testdir"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ set db [eval {berkdb_open -create} \
+ $omethod -auto_commit -env $env $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ set abid [open $t4 w]
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc recd016_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc recd016.check
+ }
+ puts "\tRecd016.b: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ if { 0xffffffff > 0 && $key > 0xffffffff } {
+ set key [expr $key - 0x100000000]
+ }
+ if { $key == 0 || $key - 0xffffffff == 1 } {
+ incr key
+ incr count
+ }
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ set str [reverse $str]
+ }
+ #
+ # Start a transaction. Alternately abort and commit them.
+ # This will create a bigger log for recovery to collide.
+ #
+ set txn [$env txn]
+ set ret [eval \
+ {$db put} -txn $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ if {[expr $count % 2] == 0} {
+ set ret [$txn commit]
+ error_check_good txn_commit $ret 0
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good commit_get \
+ $ret [list [list $key [pad_data $method $str]]]
+ } else {
+ set ret [$txn abort]
+ error_check_good txn_abort $ret 0
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good abort_get [llength $ret] 0
+ puts $abid $key
+ }
+ incr count
+ }
+ close $did
+ close $abid
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
+
+ set pidlist {}
+ puts "\tRecd016.c: Start up $nrec recovery processes at once"
+ for {set i 0} {$i < $nrec} {incr i} {
+ set p [exec $util_path/db_recover -h $testdir -c &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+ #
+ # Now that they are all done run recovery correctly
+ puts "\tRecd016.d: Run recovery process"
+ set stat [catch {exec $util_path/db_recover -h $testdir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+
+ puts "\tRecd016.e: Open, dump and check database"
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {incr i} {
+ set j $i
+ if { 0xffffffff > 0 && $j > 0xffffffff } {
+ set j [expr $j - 0x100000000]
+ }
+ if { $j == 0 } {
+ incr i
+ incr j
+ }
+ puts $oid $j
+ }
+ close $oid
+ } else {
+ set q q
+ filehead $nentries $dict $t2
+ }
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t4 $t3
+ file rename -force $t3 $t4
+ fileextract $t2 $t4 $t3
+ file rename -force $t3 $t5
+
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next"
+ filesort $t1 $t3
+ error_check_good envclose [$env close] 0
+
+ error_check_good Recd016:diff($t5,$t3) \
+ [filecmp $t5 $t3] 0
+
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $testdir/LOG } ret]
+ error_check_good db_printlog $stat 0
+ fileremove $testdir/LOG
+}
+
+# Check function for recd016; keys and data are identical
+proc recd016.check { key data } {
+ error_check_good "key/data mismatch" $data [reverse $key]
+}
+
+proc recd016_recno.check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/libdb/test/recd017.tcl b/libdb/test/recd017.tcl
new file mode 100644
index 0000000..f530165
--- /dev/null
+++ b/libdb/test/recd017.tcl
@@ -0,0 +1,151 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST recd017
+# TEST Test recovery and security. This is basically a watered
+# TEST down version of recd001 just to verify that encrypted environments
+# TEST can be recovered.
+proc recd017 { method {select 0} args} {
+ global fixed_len
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd017: $method operation/transaction tests"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ # The recovery tests were originally written to
+ # do a command, abort, do it again, commit, and then
+ # repeat the sequence with another command. Each command
+ # tends to require that the previous command succeeded and
+ # left the database a certain way. To avoid cluttering up the
+ # op_recover interface as well as the test code, we create two
+ # databases; one does abort and then commit for each op, the
+ # other does prepare, prepare-abort, and prepare-commit for each
+ # op. If all goes well, this allows each command to depend
+ # exactly one successful iteration of the previous command.
+ set testfile recd017.db
+ set testfile2 recd017-2.db
+
+ set flags "-create -encryptaes $passwd -txn -home $testdir"
+
+ puts "\tRecd017.a.0: creating environment"
+ set env_cmd "berkdb_env $flags"
+ convert_encrypt $env_cmd
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ #
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -encrypt $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
+
+ # Convert the args again because fixed_len is now real.
+ # Create the databases and close the environment.
+ # cannot specify db truncate in txn protected env!!!
+ set opts [convert_args $method ""]
+ convert_encrypt $env_cmd
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -encrypt $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -encrypt $opts $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ error_check_good env_close [$dbenv close] 0
+
+ puts "\tRecd017.a.1: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir -P $passwd \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+
+ # List of recovery tests: {CMD MSG} pairs.
+ set rlist {
+ { {DB put -txn TXNID $key $data} "Recd017.b: put"}
+ { {DB del -txn TXNID $key} "Recd017.c: delete"}
+ }
+
+ # These are all the data values that we're going to need to read
+ # through the operation table and run the recovery tests.
+
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key recd017_key
+ }
+ set data recd017_data
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+
+ if { [is_queue $method] != 1 } {
+ if { [string first append $cmd] != -1 } {
+ continue
+ }
+ if { [string first consume $cmd] != -1 } {
+ continue
+ }
+ }
+
+# if { [is_fixed_length $method] == 1 } {
+# if { [string first partial $cmd] != -1 } {
+# continue
+# }
+# }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ }
+ set fixed_len $orig_fixed_len
+ return
+}
diff --git a/libdb/test/recd018.tcl b/libdb/test/recd018.tcl
new file mode 100644
index 0000000..233775a
--- /dev/null
+++ b/libdb/test/recd018.tcl
@@ -0,0 +1,110 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST recd018
+# TEST Test recover of closely interspersed checkpoints and commits.
+#
+# This test is from the error case from #4230.
+#
+proc recd018 { method {ndbs 10} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 18
+
+ puts "Recd0$tnum ($args): $method recovery of checkpoints and commits."
+
+ set tname recd0$tnum.db
+ env_cleanup $testdir
+
+ set i 0
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ set key2 2
+ } else {
+ set key KEY
+ set key2 KEY2
+ }
+
+ puts "\tRecd0$tnum.a: Create environment and database."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-auto_commit -env $dbenv -create -mode 0644 $args $omethod"
+ for { set i 0 } { $i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set db($i) [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db($i)] TRUE
+ set file $testdir/$testfile.init
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile init
+ }
+
+ # Main loop: Write a record or two to each database.
+ # Do a commit immediately followed by a checkpoint after each one.
+ error_check_good "Initial Checkpoint" [$dbenv txn_checkpoint] 0
+
+ puts "\tRecd0$tnum.b Put/Commit/Checkpoint to $ndbs databases"
+ for { set i 0 } { $i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set data $i
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db($i) put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good txn_checkpt [$dbenv txn_checkpoint] 0
+ if { [expr $i % 2] == 0 } {
+ set txn [$dbenv txn]
+ error_check_good txn2 [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put [$db($i) put \
+ -txn $txn $key2 [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good txn_checkpt [$dbenv txn_checkpoint] 0
+ }
+ error_check_good db_close [$db($i) close] 0
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile afterop
+ }
+ error_check_good env_close [$dbenv close] 0
+
+ # Now, loop through and recover to each timestamp, verifying the
+ # expected increment.
+ puts "\tRecd0$tnum.c: Run recovery (no-op)"
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd0$tnum.d: Run recovery (initial file)"
+ for { set i 0 } {$i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set file $testdir/$testfile.init
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile init copy
+ }
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd0$tnum.e: Run recovery (after file)"
+ for { set i 0 } {$i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile afterop copy
+ }
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+}
diff --git a/libdb/test/recd019.tcl b/libdb/test/recd019.tcl
new file mode 100644
index 0000000..5f872df
--- /dev/null
+++ b/libdb/test/recd019.tcl
@@ -0,0 +1,121 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST recd019
+# TEST Test txn id wrap-around and recovery.
+proc recd019 { method {numid 50} args} {
+ global fixed_len
+ global txn_curid
+ global log_log_record_types
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd019: $method txn id wrap-around test"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set testfile recd019.db
+
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd019.a: creating environment"
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # Test txn wrapping. Force a txn_recycle msg.
+ #
+ set new_curid $txn_curid
+ set new_maxid [expr $new_curid + $numid]
+ error_check_good txn_id_set [$dbenv txn_id_set $new_curid $new_maxid] 0
+
+ #
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
+
+ # Convert the args again because fixed_len is now real.
+ # Create the databases and close the environment.
+ # cannot specify db truncate in txn protected env!!!
+ set opts [convert_args $method ""]
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -auto_commit $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ #
+ # Force txn ids to wrap twice and then some.
+ #
+ set nument [expr $numid * 3 - 2]
+ puts "\tRecd019.b: Wrapping txn ids after $numid"
+ set file $testdir/$testfile.init
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile init
+ for { set i 1 } { $i <= $nument } { incr i } {
+ # Use 'i' as key so method doesn't matter
+ set key $i
+ set data $i
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile afterop
+ error_check_good env_close [$dbenv close] 0
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $testdir
+ }
+
+ # Now, loop through and recover.
+ puts "\tRecd019.c: Run recovery (no-op)"
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd019.d: Run recovery (initial file)"
+ set file $testdir/$testfile.init
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile init copy
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd019.e: Run recovery (after file)"
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile afterop copy
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+}
diff --git a/libdb/test/recd020.tcl b/libdb/test/recd020.tcl
new file mode 100644
index 0000000..ee7eae6
--- /dev/null
+++ b/libdb/test/recd020.tcl
@@ -0,0 +1,180 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST recd020
+# TEST Test recovery after checksum error.
+proc recd020 { method args} {
+ global fixed_len
+ global log_log_record_types
+ global datastr
+ source ./include.tcl
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd020: skipping for specific pagesizes"
+ return
+ }
+ if { [is_queueext $method] == 1 } {
+ puts "Recd020: skipping for method $method"
+ return
+ }
+
+ puts "Recd020: $method recovery after checksum error"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set testfile recd020.db
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd020.a: creating environment"
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set pgsize 512
+ set orig_fixed_len $fixed_len
+ set fixed_len [expr $pgsize / 4]
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -auto_commit -chksum -pagesize $pgsize $opts $testfile"
+ set db [eval {berkdb_open} -env $dbenv $oflags]
+
+ #
+ # Put some data.
+ #
+ set nument 50
+ puts "\tRecd020.b: Put some data"
+ for { set i 1 } { $i <= $nument } { incr i } {
+ # Use 'i' as key so method doesn't matter
+ set key $i
+ set data $i$datastr
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+ #
+ # We need to remove the env so that we don't get cached
+ # pages.
+ #
+ error_check_good env_remove [berkdb envremove -home $testdir] 0
+
+ puts "\tRecd020.c: Overwrite part of database"
+ #
+ # First just touch some bits in the file. We want to go
+ # through the paging system, so touch some data pages,
+ # like the middle of page 2.
+ # We should get a checksum error for the checksummed file.
+ #
+ set pg 2
+ set fid [open $testdir/$testfile r+]
+ fconfigure $fid -translation binary
+ set seeklen [expr $pgsize * $pg + 200]
+ seek $fid $seeklen start
+ set byte [read $fid 1]
+ binary scan $byte c val
+ set newval [expr ~$val]
+ set newbyte [binary format c $newval]
+ seek $fid $seeklen start
+ puts -nonewline $fid $newbyte
+ close $fid
+
+ #
+ # Verify we get the checksum error. When we get it, it should
+ # log the error as well, so when we run recovery we'll need to
+ # do catastrophic recovery. We do this in a sub-process so that
+ # the files are closed after the panic.
+ #
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set env_cmd "berkdb_env_noerr $flags"
+ set dbenv [send_cmd $f1 $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set db [send_cmd $f1 "{berkdb_open_noerr} -env $dbenv $oflags"]
+ error_check_good db [is_valid_db $db] TRUE
+
+ # We need to set non-blocking mode so that after each command
+ # we can read all the remaining output from that command and
+ # we can know what the output from one command is.
+ fconfigure $f1 -blocking 0
+ set ret [read $f1]
+ set got_err 0
+ for { set i 1 } { $i <= $nument } { incr i } {
+ set stat [send_cmd $f1 "catch {$db get $i} r"]
+ set getret [send_cmd $f1 "puts \$r"]
+ set ret [read $f1]
+ if { $stat == 1 } {
+ error_check_good dbget:fail [is_substr $getret \
+ "checksum error: catastrophic recovery required"] 1
+ set got_err 1
+ # Now verify that it was an error on the page we set.
+ error_check_good dbget:pg$pg [is_substr $ret \
+ "failed for page $pg"] 1
+ break
+ } else {
+ set key [lindex [lindex $getret 0] 0]
+ set data [lindex [lindex $getret 0] 1]
+ error_check_good keychk $key $i
+ error_check_good datachk $data \
+ [pad_data $method $i$datastr]
+ }
+ }
+ error_check_good got_chksum $got_err 1
+ set ret [send_cmd $f1 "$db close"]
+ set extra [read $f1]
+ error_check_good db:fail [is_substr $ret "run recovery"] 1
+
+ set ret [send_cmd $f1 "$dbenv close"]
+ error_check_good env_close:fail [is_substr $ret "run recovery"] 1
+ close $f1
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $testdir
+ }
+
+ puts "\tRecd020.d: Run normal recovery"
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 1
+ error_check_good dbrec:fail \
+ [is_substr $r "checksum error: catastrophic recovery required"] 1
+
+ catch {fileremove $testdir/$testfile} ret
+ puts "\tRecd020.e: Run catastrophic recovery"
+ set ret [catch {exec $util_path/db_recover -c -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ #
+ # Now verify the data was reconstructed correctly.
+ #
+ set env_cmd "berkdb_env_noerr $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set db [eval {berkdb_open} -env $dbenv $oflags]
+ error_check_good db [is_valid_db $db] TRUE
+
+ for { set i 1 } { $i <= $nument } { incr i } {
+ set stat [catch {$db get $i} ret]
+ error_check_good stat $stat 0
+ set key [lindex [lindex $ret 0] 0]
+ set data [lindex [lindex $ret 0] 1]
+ error_check_good keychk $key $i
+ error_check_good datachk $data [pad_data $method $i$datastr]
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+}
diff --git a/libdb/test/recd15scr.tcl b/libdb/test/recd15scr.tcl
new file mode 100644
index 0000000..b22500a
--- /dev/null
+++ b/libdb/test/recd15scr.tcl
@@ -0,0 +1,74 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Recd15 - lots of txns - txn prepare script
+# Usage: recd15script envcmd dbcmd gidf numtxns
+# envcmd: command to open env
+# dbfile: name of database file
+# gidf: name of global id file
+# numtxns: number of txns to start
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "recd15script envcmd dbfile gidfile numtxns"
+
+# Verify usage
+if { $argc != 4 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set envcmd [ lindex $argv 0 ]
+set dbfile [ lindex $argv 1 ]
+set gidfile [ lindex $argv 2 ]
+set numtxns [ lindex $argv 3 ]
+
+set txnmax [expr $numtxns + 5]
+set dbenv [eval $envcmd]
+error_check_good envopen [is_valid_env $dbenv] TRUE
+
+set usedb 0
+if { $dbfile != "NULL" } {
+ set usedb 1
+ set db [berkdb_open -auto_commit -env $dbenv $dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+}
+
+puts "\tRecd015script.a: Begin $numtxns txns"
+for {set i 0} {$i < $numtxns} {incr i} {
+ set t [$dbenv txn]
+ error_check_good txnbegin($i) [is_valid_txn $t $dbenv] TRUE
+ set txns($i) $t
+ if { $usedb } {
+ set dbc [$db cursor -txn $t]
+ error_check_good cursor($i) [is_valid_cursor $dbc $db] TRUE
+ set curs($i) $dbc
+ }
+}
+
+puts "\tRecd015script.b: Prepare $numtxns txns"
+set gfd [open $gidfile w+]
+for {set i 0} {$i < $numtxns} {incr i} {
+ if { $usedb } {
+ set dbc $curs($i)
+ error_check_good dbc_close [$dbc close] 0
+ }
+ set t $txns($i)
+ set gid [make_gid recd015script:$t]
+ puts $gfd $gid
+ error_check_good txn_prepare:$t [$t prepare $gid] 0
+}
+close $gfd
+
+#
+# We do not close the db or env, but exit with the txns outstanding.
+#
+puts "\tRecd015script completed successfully"
+flush stdout
diff --git a/libdb/test/recdscript.tcl b/libdb/test/recdscript.tcl
new file mode 100644
index 0000000..cabaafa
--- /dev/null
+++ b/libdb/test/recdscript.tcl
@@ -0,0 +1,37 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Recovery txn prepare script
+# Usage: recdscript op dir envcmd dbfile cmd
+# op: primary txn operation
+# dir: test directory
+# envcmd: command to open env
+# dbfile: name of database file
+# gidf: name of global id file
+# cmd: db command to execute
+
+source ./include.tcl
+source $test_path/test.tcl
+
+set usage "recdscript op dir envcmd dbfile gidfile cmd"
+
+# Verify usage
+if { $argc != 6 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set op [ lindex $argv 0 ]
+set dir [ lindex $argv 1 ]
+set envcmd [ lindex $argv 2 ]
+set dbfile [ lindex $argv 3 ]
+set gidfile [ lindex $argv 4 ]
+set cmd [ lindex $argv 5 ]
+
+op_recover_prep $op $dir $envcmd $dbfile $gidfile $cmd
+flush stdout
diff --git a/libdb/test/rep001.tcl b/libdb/test/rep001.tcl
new file mode 100644
index 0000000..fd0b2fa
--- /dev/null
+++ b/libdb/test/rep001.tcl
@@ -0,0 +1,249 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST rep001
+# TEST Replication rename and forced-upgrade test.
+# TEST
+# TEST Run a modified version of test001 in a replicated master environment;
+# TEST verify that the database on the client is correct.
+# TEST Next, remove the database, close the master, upgrade the
+# TEST client, reopen the master, and make sure the new master can correctly
+# TEST run test001 and propagate it in the other direction.
+
+proc rep001 { method { niter 1000 } { tnum "01" } args } {
+ global passwd
+
+ puts "Rep0$tnum: Replication sanity test."
+
+ set envargs ""
+ rep001_sub $method $niter $tnum $envargs $args
+
+ puts "Rep0$tnum: Replication and security sanity test."
+ append envargs " -encryptaes $passwd "
+ append args " -encrypt "
+ rep001_sub $method $niter $tnum $envargs $args
+}
+
+proc rep001_sub { method niter tnum envargs largs } {
+ source ./include.tcl
+ global testdir
+ global encrypt
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test001_recno.check
+ } else {
+ set checkfunc test001.check
+ }
+
+ # Open a master.
+ repladd 1
+ set masterenv \
+ [eval {berkdb_env -create -lock_max 2500 -log_max 1000000} \
+ $envargs {-home $masterdir -txn -rep_master -rep_transport \
+ [list 1 replsend]}]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open a client
+ repladd 2
+ set clientenv [eval {berkdb_env -create} $envargs -txn -lock_max 2500 \
+ {-home $clientdir -rep_client -rep_transport [list 2 replsend]}]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ # Bring the client online by processing the startup messages.
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Open a test database on the master (so we can test having handles
+ # open across an upgrade).
+ puts "\tRep0$tnum.a:\
+ Opening test database for post-upgrade client logging test."
+ set master_upg_db [berkdb_open \
+ -create -auto_commit -btree -env $masterenv rep0$tnum-upg.db]
+ set puttxn [$masterenv txn]
+ error_check_good master_upg_db_put \
+ [$master_upg_db put -txn $puttxn hello world] 0
+ error_check_good puttxn_commit [$puttxn commit] 0
+ error_check_good master_upg_db_close [$master_upg_db close] 0
+
+ # Run a modified test001 in the master (and update client).
+ puts "\tRep0$tnum.b: Running test001 in replicated env."
+ eval test001 $method $niter 0 $tnum 1 -env $masterenv $largs
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Open the cross-upgrade database on the client and check its contents.
+ set client_upg_db [berkdb_open \
+ -create -auto_commit -btree -env $clientenv rep0$tnum-upg.db]
+ error_check_good client_upg_db_get [$client_upg_db get hello] \
+ [list [list hello world]]
+ # !!! We use this handle later. Don't close it here.
+
+ # Verify the database in the client dir.
+ puts "\tRep0$tnum.c: Verifying client database contents."
+ set testdir [get_home $masterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test0$tnum.db $clientenv $t1 \
+ $checkfunc dump_file_direction "-first" "-next"
+
+ # Remove the file (and update client).
+ puts "\tRep0$tnum.d: Remove the file on the master and close master."
+ error_check_good remove \
+ [$masterenv dbremove -auto_commit test0$tnum.db] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Don't get confused in Tcl.
+ puts "\tRep0$tnum.e: Upgrade client."
+ set newmasterenv $clientenv
+ error_check_good upgrade_client [$newmasterenv rep_start -master] 0
+
+ # Run test001 in the new master
+ puts "\tRep0$tnum.f: Running test001 in new master."
+ eval test001 $method $niter 0 $tnum 1 -env $newmasterenv $largs
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $newmasterenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ puts "\tRep0$tnum.g: Reopen old master as client and catch up."
+ # Throttle master so it can't send everything at once
+ $newmasterenv rep_limit 0 [expr 64 * 1024]
+ set newclientenv [eval {berkdb_env -create -recover} $envargs \
+ -txn -lock_max 2500 \
+ {-home $masterdir -rep_client -rep_transport [list 1 replsend]}]
+ error_check_good newclient_env [is_valid_env $newclientenv] TRUE
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $newclientenv 1]
+ incr nproced [replprocessqueue $newmasterenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+ set stats [$newmasterenv rep_stat]
+ set nthrottles [getstats $stats {Transmission limited}]
+ error_check_bad nthrottles $nthrottles -1
+ error_check_bad nthrottles $nthrottles 0
+
+ # Run a modified test001 in the new master (and update client).
+ puts "\tRep0$tnum.h: Running test001 in new master."
+ eval test001 $method \
+ $niter $niter $tnum 1 -env $newmasterenv $largs
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $newclientenv 1]
+ incr nproced [replprocessqueue $newmasterenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Test put to the database handle we opened back when the new master
+ # was a client.
+ puts "\tRep0$tnum.i: Test put to handle opened before upgrade."
+ set puttxn [$newmasterenv txn]
+ error_check_good client_upg_db_put \
+ [$client_upg_db put -txn $puttxn hello there] 0
+ error_check_good puttxn_commit [$puttxn commit] 0
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $newclientenv 1]
+ incr nproced [replprocessqueue $newmasterenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Close the new master's handle for the upgrade-test database; we
+ # don't need it. Then check to make sure the client did in fact
+ # update the database.
+ error_check_good client_upg_db_close [$client_upg_db close] 0
+ set newclient_upg_db [berkdb_open -env $newclientenv rep0$tnum-upg.db]
+ error_check_good newclient_upg_db_get [$newclient_upg_db get hello] \
+ [list [list hello there]]
+ error_check_good newclient_upg_db_close [$newclient_upg_db close] 0
+
+ # Verify the database in the client dir.
+ puts "\tRep0$tnum.j: Verifying new client database contents."
+ set testdir [get_home $newmasterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test0$tnum.db $newclientenv $t1 \
+ $checkfunc dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+
+
+ error_check_good newmasterenv_close [$newmasterenv close] 0
+ error_check_good newclientenv_close [$newclientenv close] 0
+
+ if { [lsearch $envargs "-encrypta*"] !=-1 } {
+ set encrypt 1
+ }
+ error_check_good verify \
+ [verify_dir $clientdir "\tRep0$tnum.k: " 0 0 1] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/libdb/test/rep002.tcl b/libdb/test/rep002.tcl
new file mode 100644
index 0000000..bde43ca
--- /dev/null
+++ b/libdb/test/rep002.tcl
@@ -0,0 +1,278 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST rep002
+# TEST Basic replication election test.
+# TEST
+# TEST Run a modified version of test001 in a replicated master environment;
+# TEST hold an election among a group of clients to make sure they select
+# TEST a proper master from amongst themselves, in various scenarios.
+
+proc rep002 { method { niter 10 } { nclients 3 } { tnum "02" } args } {
+ source ./include.tcl
+ global elect_timeout
+
+ set elect_timeout 1000000
+
+ if { [is_record_based $method] == 1 } {
+ puts "Rep002: Skipping for method $method."
+ return
+ }
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ }
+
+ puts "Rep0$tnum: Replication election test with $nclients clients."
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env -create -log_max 1000000 -home \
+ $masterdir -txn -rep_master -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M)]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open the clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set env_cmd($i) "berkdb_env -create -home $clientdir($i) \
+ -txn -rep_client -rep_transport \[list $envid replsend\]"
+ set clientenv($i) [eval $env_cmd($i)]
+ error_check_good \
+ client_env($i) [is_valid_env $clientenv($i)] TRUE
+ }
+
+ # Run a modified test001 in the master.
+ puts "\tRep0$tnum.a: Running test001 in replicated env."
+ eval test001 $method $niter 0 $tnum 0 -env $masterenv $args
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ incr nproced [replprocessqueue $clientenv($i) $envid]
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Verify the database in the client dir.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\tRep0$tnum.b: Verifying contents of client database $i."
+ set testdir [get_home $masterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test0$tnum.db $clientenv($i) $testdir/t1 \
+ test001.check dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+
+ verify_dir $clientdir($i) "\tRep0$tnum.c: " 0 0 1
+ }
+
+ # Start an election in the first client.
+ puts "\tRep0$tnum.d: Starting election without dead master."
+
+ set elect_pipe(0) [start_election \
+ $qdir $env_cmd(0) [expr $nclients + 1] 20 $elect_timeout]
+
+ tclsleep 1
+
+ # We want to verify all the clients but the one that declared an
+ # election get the election message.
+ # We also want to verify that the master declares the election
+ # over by fiat, even if everyone uses a lower priority than 20.
+ # Loop and process all messages, keeping track of which
+ # sites got a HOLDELECTION and checking that the returned newmaster,
+ # if any, is 1 (the master's replication ID).
+ set got_hold_elect(M) 0
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set got_hold_elect($i) 0
+ }
+ while { 1 } {
+ set nproced 0
+ set he 0
+ set nm 0
+
+
+ incr nproced [replprocessqueue $masterenv 1 0 he nm]
+
+ if { $he == 1 } {
+ set elect_pipe(M) [start_election $qdir \
+ $env_cmd(M) [expr $nclients + 1] 0 $elect_timeout]
+ set got_hold_elect(M) 1
+ }
+ if { $nm != 0 } {
+ error_check_good newmaster_is_master $nm 1
+ }
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set he 0
+ set envid [expr $i + 2]
+ incr nproced \
+ [replprocessqueue $clientenv($i) $envid 0 he nm]
+ if { $he == 1 } {
+ # error_check_bad client(0)_in_elect $i 0
+ set elect_pipe(M) [start_election $qdir \
+ $env_cmd($i) [expr $nclients + 1] 0 \
+ $elect_timeout]
+ set got_hold_elect($i) 1
+ }
+ if { $nm != 0 } {
+ error_check_good newmaster_is_master $nm 1
+ }
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ error_check_good got_hold_elect(master) $got_hold_elect(M) 0
+ unset got_hold_elect(M)
+ # error_check_good got_hold_elect(0) $got_hold_elect(0) 0
+ unset got_hold_elect(0)
+ for { set i 1 } { $i < $nclients } { incr i } {
+ error_check_good got_hold_elect($i) $got_hold_elect($i) 1
+ unset got_hold_elect($i)
+ }
+
+ cleanup_elections
+
+ # We need multiple clients to proceed from here.
+ if { $nclients < 2 } {
+ puts "\tRep0$tnum: Skipping for less than two clients."
+ error_check_good masterenv_close [$masterenv close] 0
+ for { set i 0 } { $i < $nclients } { incr i } {
+ error_check_good clientenv_close($i) \
+ [$clientenv($i) close] 0
+ }
+ return
+ }
+
+ # Make sure all the clients are synced up and ready to be good
+ # voting citizens.
+ error_check_good master_flush [$masterenv rep_flush] 0
+ while { 1 } {
+ set nproced 0
+ incr nproced [replprocessqueue $masterenv 1 0]
+ for { set i 0 } { $i < $nclients } { incr i } {
+ incr nproced [replprocessqueue $clientenv($i) \
+ [expr $i + 2] 0]
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Now hold another election in the first client, this time with
+ # a dead master.
+ puts "\tRep0$tnum.e: Starting election with dead master."
+ error_check_good masterenv_close [$masterenv close] 0
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ replclear [expr $i + 2]
+ }
+
+ set elect_pipe(0) [start_election \
+ $qdir $env_cmd(0) [expr $nclients + 1] 20 $elect_timeout]
+
+ tclsleep 1
+
+ # Process messages, and verify that the client with the highest
+ # priority--client #1--wins.
+ set got_newmaster 0
+ set tries 10
+ while { 1 } {
+ set nproced 0
+ set he 0
+ set nm 0
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set he 0
+ set envid [expr $i + 2]
+ incr nproced \
+ [replprocessqueue $clientenv($i) $envid 0 he nm]
+ if { $he == 1 } {
+
+ # Client #1 has priority 100; everyone else
+ # has priority 10.
+ if { $i == 1 } {
+ set pri 100
+ } else {
+ set pri 10
+ }
+ # error_check_bad client(0)_in_elect $i 0
+ set elect_pipe(M) [start_election $qdir \
+ $env_cmd($i) [expr $nclients + 1] $pri \
+ $elect_timeout]
+ set got_hold_elect($i) 1
+ }
+ if { $nm != 0 } {
+ error_check_good newmaster_is_master $nm \
+ [expr 1 + 2]
+ set got_newmaster $nm
+
+ # If this env is the new master, it needs to
+ # configure itself as such--this is a different
+ # env handle from the one that performed the
+ # election.
+ if { $nm == $envid } {
+ error_check_good make_master($i) \
+ [$clientenv($i) rep_start -master] \
+ 0
+ }
+ }
+ }
+
+ # We need to wait around to make doubly sure that the
+ # election has finished...
+ if { $nproced == 0 } {
+ incr tries -1
+ if { $tries == 0 } {
+ break
+ } else {
+ tclsleep 1
+ }
+ }
+ }
+
+ # Verify that client #1 is actually the winner.
+ error_check_good "client 1 wins" $got_newmaster [expr 1 + 2]
+
+ cleanup_elections
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ error_check_good clientenv_close($i) [$clientenv($i) close] 0
+ }
+
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc reptwo { args } { eval rep002 $args }
diff --git a/libdb/test/rep003.tcl b/libdb/test/rep003.tcl
new file mode 100644
index 0000000..7551e85
--- /dev/null
+++ b/libdb/test/rep003.tcl
@@ -0,0 +1,221 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST rep003
+# TEST Repeated shutdown/restart replication test
+# TEST
+# TEST Run a quick put test in a replicated master environment; start up,
+# TEST shut down, and restart client processes, with and without recovery.
+# TEST To ensure that environment state is transient, use DB_PRIVATE.
+
+proc rep003 { method { tnum "03" } args } {
+ source ./include.tcl
+ global testdir rep003_dbname rep003_omethod rep003_oargs
+
+ env_cleanup $testdir
+ set niter 10
+ set rep003_dbname rep003.db
+
+ if { [is_record_based $method] } {
+ puts "Rep0$tnum: Skipping for method $method"
+ return
+ }
+
+ set rep003_omethod [convert_method $method]
+ set rep003_oargs [convert_args $method $args]
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+
+ set clientdir $testdir/CLIENTDIR
+ file mkdir $clientdir
+
+ puts "Rep0$tnum: Replication repeated-startup test"
+
+ # Open a master.
+ repladd 1
+ set masterenv [berkdb_env_noerr -create -log_max 1000000 \
+ -home $masterdir -txn -rep_master -rep_transport [list 1 replsend]]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ puts "\tRep0$tnum.a: Simple client startup test."
+
+ # Put item one.
+ rep003_put $masterenv A1 a-one
+
+ # Open a client.
+ repladd 2
+ set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
+ -rep_client -rep_transport [list 2 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ # Put another quick item.
+ rep003_put $masterenv A2 a-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+ replclear 2
+
+ # Now reopen the client after doing another put.
+ puts "\tRep0$tnum.b: Client restart."
+ rep003_put $masterenv B1 b-one
+
+ unset clientenv
+ set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
+ -rep_client -rep_transport [list 2 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ rep003_put $masterenv B2 b-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ # The items from part A should be present at all times--
+ # if we roll them back, we've screwed up. [#5709]
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv B1 b-one
+ rep003_check $clientenv B2 b-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+
+ replclear 2
+
+ # Now reopen the client after a recovery.
+ puts "\tRep0$tnum.c: Client restart after recovery."
+ rep003_put $masterenv C1 c-one
+
+ unset clientenv
+ set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
+ -recover -rep_client -rep_transport [list 2 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ rep003_put $masterenv C2 c-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ # The items from part A should be present at all times--
+ # if we roll them back, we've screwed up. [#5709]
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+ rep003_check $clientenv B1 b-one
+ rep003_check $clientenv B2 b-two
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv C1 c-one
+ rep003_check $clientenv C2 c-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+
+ replclear 2
+
+ # Now reopen the client after a catastrophic recovery.
+ puts "\tRep0$tnum.d: Client restart after catastrophic recovery."
+ rep003_put $masterenv D1 d-one
+
+ unset clientenv
+ set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
+ -recover_fatal -rep_client -rep_transport [list 2 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ rep003_put $masterenv D2 d-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ # The items from part A should be present at all times--
+ # if we roll them back, we've screwed up. [#5709]
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+ rep003_check $clientenv B1 b-one
+ rep003_check $clientenv B2 b-two
+ rep003_check $clientenv C1 c-one
+ rep003_check $clientenv C2 c-two
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv D1 d-one
+ rep003_check $clientenv D2 d-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+
+ error_check_good masterenv_close [$masterenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc rep003_put { masterenv key data } {
+ global rep003_dbname rep003_omethod rep003_oargs
+
+ set db [eval {berkdb_open_noerr -create -env $masterenv -auto_commit} \
+ $rep003_omethod $rep003_oargs $rep003_dbname]
+ error_check_good rep3_put_open($key,$data) [is_valid_db $db] TRUE
+
+ set txn [$masterenv txn]
+ error_check_good rep3_put($key,$data) [$db put -txn $txn $key $data] 0
+ error_check_good rep3_put_txn_commit($key,$data) [$txn commit] 0
+
+ error_check_good rep3_put_close($key,$data) [$db close] 0
+}
+
+proc rep003_check { env key data } {
+ global rep003_dbname
+
+ set db [berkdb_open_noerr -rdonly -env $env $rep003_dbname]
+ error_check_good rep3_check_open($key,$data) [is_valid_db $db] TRUE
+
+ set dbt [$db get $key]
+ error_check_good rep3_check($key,$data) \
+ [lindex [lindex $dbt 0] 1] $data
+
+ error_check_good rep3_put_close($key,$data) [$db close] 0
+}
diff --git a/libdb/test/rep004.tcl b/libdb/test/rep004.tcl
new file mode 100644
index 0000000..053d8e7
--- /dev/null
+++ b/libdb/test/rep004.tcl
@@ -0,0 +1,198 @@
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST rep004
+# TEST Test of DB_REP_LOGSONLY.
+# TEST
+# TEST Run a quick put test in a master environment that has one logs-only
+# TEST client. Shut down, then run catastrophic recovery in the logs-only
+# TEST client and check that the database is present and populated.
+
+proc rep004 { method { nitems 10 } { tnum "04" } args } {
+ source ./include.tcl
+ global testdir
+
+ env_cleanup $testdir
+ set dbname rep0$tnum.db
+
+ set omethod [convert_method $method]
+ set oargs [convert_args $method $args]
+
+ puts "Rep0$tnum: Test of logs-only replication clients"
+
+ replsetup $testdir/MSGQUEUEDIR
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+ set clientdir $testdir/CLIENTDIR
+ file mkdir $clientdir
+ set logsonlydir $testdir/LOGSONLYDIR
+ file mkdir $logsonlydir
+
+ # Open a master, a logsonly replica, and a normal client.
+ repladd 1
+ set masterenv [berkdb_env -create -home $masterdir -txn -rep_master \
+ -rep_transport [list 1 replsend]]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ repladd 2
+ set loenv [berkdb_env -create -home $logsonlydir -txn -rep_logsonly \
+ -rep_transport [list 2 replsend]]
+ error_check_good logsonly_env [is_valid_env $loenv] TRUE
+
+ repladd 3
+ set clientenv [berkdb_env -create -home $clientdir -txn -rep_client \
+ -rep_transport [list 3 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+
+ puts "\tRep0$tnum.a: Populate database."
+
+ set db [eval {berkdb open -create -mode 0644 -auto_commit} \
+ -env $masterenv $oargs $omethod $dbname]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nitems } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set data $str
+ } else {
+ set key $str
+ set data [reverse $str]
+ }
+ set kvals($count) $key
+ set dvals($count) [pad_data $method $data]
+
+ set txn [$masterenv txn]
+ error_check_good txn($count) [is_valid_txn $txn $masterenv] TRUE
+
+ set ret [eval \
+ {$db put} -txn $txn {$key [chop_data $method $data]}]
+ error_check_good put($count) $ret 0
+
+ error_check_good commit($count) [$txn commit] 0
+
+ incr count
+ }
+
+ puts "\tRep0$tnum.b: Sync up clients."
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $loenv 2]
+ incr nproced [replprocessqueue $clientenv 3]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+
+ puts "\tRep0$tnum.c: Get master and logs-only client ahead."
+ set newcount 0
+ while { [gets $did str] != -1 && $newcount < $nitems } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set data $str
+ } else {
+ set key $str
+ set data [reverse $str]
+ }
+ set kvals($count) $key
+ set dvals($count) [pad_data $method $data]
+
+ set txn [$masterenv txn]
+ error_check_good txn($count) [is_valid_txn $txn $masterenv] TRUE
+
+ set ret [eval \
+ {$db put} -txn $txn {$key [chop_data $method $data]}]
+ error_check_good put($count) $ret 0
+
+ error_check_good commit($count) [$txn commit] 0
+
+ incr count
+ incr newcount
+ }
+
+ error_check_good db_close [$db close] 0
+
+ puts "\tRep0$tnum.d: Sync up logs-only client only, then fail over."
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $loenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+
+ # "Crash" the master, and fail over to the upgradeable client.
+ error_check_good masterenv_close [$masterenv close] 0
+ replclear 3
+
+ error_check_good upgrade_client [$clientenv rep_start -master] 0
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $clientenv 3]
+ incr nproced [replprocessqueue $loenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ error_check_good loenv_close [$loenv close] 0
+
+ puts "\tRep0$tnum.e: Run catastrophic recovery on logs-only client."
+ set loenv [berkdb_env -create -home $logsonlydir -txn -recover_fatal]
+
+ puts "\tRep0$tnum.f: Verify logs-only client contents."
+ set lodb [eval {berkdb open} -env $loenv $oargs $omethod $dbname]
+ set loc [$lodb cursor]
+
+ set cdb [eval {berkdb open} -env $clientenv $oargs $omethod $dbname]
+ set cc [$cdb cursor]
+
+ # Make sure new master and recovered logs-only replica match.
+ for { set cdbt [$cc get -first] } \
+ { [llength $cdbt] > 0 } { set cdbt [$cc get -next] } {
+ set lodbt [$loc get -next]
+
+ error_check_good newmaster_replica_match $cdbt $lodbt
+ }
+
+ # Reset new master cursor.
+ error_check_good cc_close [$cc close] 0
+ set cc [$cdb cursor]
+
+ for { set lodbt [$loc get -first] } \
+ { [llength $lodbt] > 0 } { set lodbt [$loc get -next] } {
+ set cdbt [$cc get -next]
+
+ error_check_good replica_newmaster_match $lodbt $cdbt
+ }
+
+ error_check_good loc_close [$loc close] 0
+ error_check_good lodb_close [$lodb close] 0
+ error_check_good loenv_close [$loenv close] 0
+
+ error_check_good cc_close [$cc close] 0
+ error_check_good cdb_close [$cdb close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+
+ close $did
+
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/libdb/test/rep005.tcl b/libdb/test/rep005.tcl
new file mode 100644
index 0000000..bb8b41f
--- /dev/null
+++ b/libdb/test/rep005.tcl
@@ -0,0 +1,225 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST rep005
+# TEST Replication election test with error handling.
+# TEST
+# TEST Run a modified version of test001 in a replicated master environment;
+# TEST hold an election among a group of clients to make sure they select
+# TEST a proper master from amongst themselves, forcing errors at various
+# TEST locations in the election path.
+
+proc rep005 { method { niter 10 } { tnum "05" } args } {
+ source ./include.tcl
+
+ if { [is_record_based $method] == 1 } {
+ puts "Rep005: Skipping for method $method."
+ return
+ }
+
+ set nclients 3
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ }
+
+ puts "Rep0$tnum: Replication election test with $nclients clients."
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env -create -log_max 1000000 -home \
+ $masterdir -txn -rep_master -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M)]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open the clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set env_cmd($i) "berkdb_env -create -home $clientdir($i) \
+ -txn -rep_client -rep_transport \[list $envid replsend\]"
+ set clientenv($i) [eval $env_cmd($i)]
+ error_check_good \
+ client_env($i) [is_valid_env $clientenv($i)] TRUE
+ }
+
+ # Run a modified test001 in the master.
+ puts "\tRep0$tnum.a: Running test001 in replicated env."
+ eval test001 $method $niter 0 $tnum 0 -env $masterenv $args
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ incr nproced [replprocessqueue $clientenv($i) $envid]
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Verify the database in the client dir.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\tRep0$tnum.b: Verifying contents of client database $i."
+ set testdir [get_home $masterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test0$tnum.db $clientenv($i) $testdir/t1 \
+ test001.check dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+
+ verify_dir $clientdir($i) "\tRep0$tnum.c: " 0 0 1
+ }
+
+ # Make sure all the clients are synced up and ready to be good
+ # voting citizens.
+ error_check_good master_flush [$masterenv rep_flush] 0
+ while { 1 } {
+ set nproced 0
+ incr nproced [replprocessqueue $masterenv 1 0]
+ for { set i 0 } { $i < $nclients } { incr i } {
+ incr nproced [replprocessqueue $clientenv($i) \
+ [expr $i + 2] 0]
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ error_check_good masterenv_close [$masterenv close] 0
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ replclear [expr $i + 2]
+ }
+ #
+ # We set up the error list for each client. We know that the
+ # first client is the one calling the election, therefore, add
+ # the error location on sending the message (electsend) for that one.
+ set m "Rep0$tnum"
+ set count 0
+ foreach c0 { electinit electsend electvote1 electwait1 electvote2 \
+ electwait2 } {
+ foreach c1 { electinit electvote1 electwait1 electvote2 \
+ electwait2 } {
+ foreach c2 { electinit electvote1 electwait1 \
+ electvote2 electwait2 } {
+ set elist [list $c0 $c1 $c2]
+ rep005_elect env_cmd clientenv $qdir $m \
+ $count $elist
+ incr count
+ }
+ }
+ }
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ error_check_good clientenv_close($i) [$clientenv($i) close] 0
+ }
+
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc rep005_elect { ecmd cenv qdir msg count elist } {
+ global elect_timeout
+ upvar $ecmd env_cmd
+ upvar $cenv clientenv
+
+ set elect_timeout 1000000
+ set nclients [llength $elist]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set err_cmd($i) [lindex $elist $i]
+ }
+ puts "\t$msg.d.$count: Starting election with errors $elist"
+ set elect_pipe(0) [start_election $qdir $env_cmd(0) \
+ [expr $nclients + 1] 20 $elect_timeout $err_cmd(0)]
+
+ tclsleep 1
+
+ # Process messages, and verify that the client with the highest
+ # priority--client #1--wins.
+ set got_newmaster 0
+ set tries 10
+ while { 1 } {
+ set nproced 0
+ set he 0
+ set nm 0
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set he 0
+ set envid [expr $i + 2]
+# puts "Processing queue for client $i"
+ incr nproced \
+ [replprocessqueue $clientenv($i) $envid 0 he nm]
+ if { $he == 1 } {
+ # Client #1 has priority 100; everyone else
+ if { $i == 1 } {
+ set pri 100
+ } else {
+ set pri 10
+ }
+ # error_check_bad client(0)_in_elect $i 0
+# puts "Starting election on client $i"
+ set elect_pipe($i) [start_election $qdir \
+ $env_cmd($i) [expr $nclients + 1] $pri \
+ $elect_timeout $err_cmd($i)]
+ set got_hold_elect($i) 1
+ }
+ if { $nm != 0 } {
+ error_check_good newmaster_is_master $nm \
+ [expr 1 + 2]
+ set got_newmaster $nm
+
+ # If this env is the new master, it needs to
+ # configure itself as such--this is a different
+ # env handle from the one that performed the
+ # election.
+ if { $nm == $envid } {
+ error_check_good make_master($i) \
+ [$clientenv($i) rep_start -master] \
+ 0
+ }
+ }
+ }
+
+ # We need to wait around to make doubly sure that the
+ # election has finished...
+ if { $nproced == 0 } {
+ incr tries -1
+ if { $tries == 0 } {
+ break
+ } else {
+ tclsleep 1
+ }
+ }
+ }
+
+ # Verify that client #1 is actually the winner.
+ error_check_good "client 1 wins" $got_newmaster [expr 1 + 2]
+
+ cleanup_elections
+
+}
diff --git a/libdb/test/reputils.tcl b/libdb/test/reputils.tcl
new file mode 100644
index 0000000..f65ee56
--- /dev/null
+++ b/libdb/test/reputils.tcl
@@ -0,0 +1,659 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Replication testing utilities
+
+# Environment handle for the env containing the replication "communications
+# structure" (really a CDB environment).
+
+# The test environment consists of a queue and a # directory (environment)
+# per replication site. The queue is used to hold messages destined for a
+# particular site and the directory will contain the environment for the
+# site. So the environment looks like:
+# $testdir
+# ___________|______________________________
+# / | \ \
+# MSGQUEUEDIR MASTERDIR CLIENTDIR.0 ... CLIENTDIR.N-1
+# | | ... |
+# 1 2 .. N+1
+#
+# The master is site 1 in the MSGQUEUEDIR and clients 1-N map to message
+# queues 2 - N+1.
+#
+# The globals repenv(1-N) contain the environment handles for the sites
+# with a given id (i.e., repenv(1) is the master's environment.
+
+global queueenv
+
+# Array of DB handles, one per machine ID, for the databases that contain
+# messages.
+global queuedbs
+global machids
+
+global elect_timeout
+set elect_timeout 50000000
+set drop 0
+
+# Create the directory structure for replication testing.
+# Open the master and client environments; store these in the global repenv
+# Return the master's environment: "-env masterenv"
+#
+proc repl_envsetup { envargs largs tnum {nclients 1} {droppct 0} { oob 0 } } {
+ source ./include.tcl
+ global clientdir
+ global drop drop_msg
+ global masterdir
+ global repenv
+ global testdir
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+ if { $droppct != 0 } {
+ set drop 1
+ set drop_msg [expr 100 / $droppct]
+ } else {
+ set drop 0
+ }
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ }
+
+ # Open a master.
+ repladd 1
+ #
+ # Set log smaller than default to force changing files,
+ # but big enough so that the tests that use binary files
+ # as keys/data can run.
+ #
+ set lmax [expr 3 * 1024 * 1024]
+ set masterenv [eval {berkdb_env -create -log_max $lmax} $envargs \
+ {-home $masterdir -txn -rep_master -rep_transport \
+ [list 1 replsend]}]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+ set repenv(master) $masterenv
+
+ # Open clients
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set clientenv [eval {berkdb_env -create} $envargs -txn \
+ {-cachesize { 0 10000000 0 }} -lock_max 10000 \
+ {-home $clientdir($i) -rep_client -rep_transport \
+ [list $envid replsend]}]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+ set repenv($i) $clientenv
+ }
+ set repenv($i) NULL
+ append largs " -env $masterenv "
+
+ # Process startup messages
+ repl_envprocq $tnum $nclients $oob
+
+ return $largs
+}
+
+# Process all incoming messages. Iterate until there are no messages left
+# in anyone's queue so that we capture all message exchanges. We verify that
+# the requested number of clients matches the number of client environments
+# we have. The oob parameter indicates if we should process the queue
+# with out-of-order delivery. The replprocess procedure actually does
+# the real work of processing the queue -- this routine simply iterates
+# over the various queues and does the initial setup.
+
+proc repl_envprocq { tnum { nclients 1 } { oob 0 }} {
+ global repenv
+ global drop
+
+ set masterenv $repenv(master)
+ for { set i 0 } { 1 } { incr i } {
+ if { $repenv($i) == "NULL"} {
+ break
+ }
+ }
+ error_check_good i_nclients $nclients $i
+
+ set name [format "Repl%03d" $tnum]
+ berkdb debug_check
+ puts -nonewline "\t$name: Processing master/$i client queues"
+ set rand_skip 0
+ if { $oob } {
+ puts " out-of-order"
+ } else {
+ puts " in order"
+ }
+ set do_check 1
+ set droprestore $drop
+ while { 1 } {
+ set nproced 0
+
+ if { $oob } {
+ set rand_skip [berkdb random_int 2 10]
+ }
+ incr nproced [replprocessqueue $masterenv 1 $rand_skip]
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ if { $oob } {
+ set rand_skip [berkdb random_int 2 10]
+ }
+ set n [replprocessqueue $repenv($i) \
+ $envid $rand_skip]
+ incr nproced $n
+ }
+
+ if { $nproced == 0 } {
+ # Now that we delay requesting records until
+ # we've had a few records go by, we should always
+ # see that the number of requests is lower than the
+ # number of messages that were enqueued.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientenv $repenv($i)
+ set stats [$clientenv rep_stat]
+ set queued [getstats $stats \
+ {Total log records queued}]
+ error_check_bad queued_stats \
+ $queued -1
+ set requested [getstats $stats \
+ {Log records requested}]
+ error_check_bad requested_stats \
+ $requested -1
+ if { $queued != 0 && $do_check != 0 } {
+ error_check_good num_requested \
+ [expr $requested < $queued] 1
+ }
+
+ $clientenv rep_request 1 1
+ }
+
+ # If we were dropping messages, we might need
+ # to flush the log so that we get everything
+ # and end up in the right state.
+ if { $drop != 0 } {
+ set drop 0
+ set do_check 0
+ $masterenv rep_flush
+ berkdb debug_check
+ puts "\t$name: Flushing Master"
+ } else {
+ break
+ }
+ }
+ }
+
+ # Reset the clients back to the default state in case we
+ # have more processing to do.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientenv $repenv($i)
+ $clientenv rep_request 4 128
+ }
+ set drop $droprestore
+}
+
+# Verify that the directories in the master are exactly replicated in
+# each of the client environments.
+
+proc repl_envver0 { tnum method { nclients 1 } } {
+ global clientdir
+ global masterdir
+ global repenv
+
+ # Verify the database in the client dir.
+ # First dump the master.
+ set t1 $masterdir/t1
+ set t2 $masterdir/t2
+ set t3 $masterdir/t3
+ set omethod [convert_method $method]
+ set name [format "Repl%03d" $tnum]
+
+ #
+ # We are interested in the keys of whatever databases are present
+ # in the master environment, so we just call a no-op check function
+ # since we have no idea what the contents of this database really is.
+ # We just need to walk the master and the clients and make sure they
+ # have the same contents.
+ #
+ set cwd [pwd]
+ cd $masterdir
+ set stat [catch {glob test*.db} dbs]
+ cd $cwd
+ if { $stat == 1 } {
+ return
+ }
+ foreach testfile $dbs {
+ open_and_dump_file $testfile $repenv(master) $masterdir/t2 \
+ repl_noop dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ }
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\t$name: Verifying client $i database \
+ $testfile contents."
+ open_and_dump_file $testfile $repenv($i) \
+ $t1 repl_noop dump_file_direction "-first" "-next"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ } else {
+ catch {file copy -force $t1 $t3} ret
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+ }
+ }
+}
+
+# Remove all the elements from the master and verify that these
+# deletions properly propagated to the clients.
+
+proc repl_verdel { tnum method { nclients 1 } } {
+ global clientdir
+ global masterdir
+ global repenv
+
+ # Delete all items in the master.
+ set name [format "Repl%03d" $tnum]
+ set cwd [pwd]
+ cd $masterdir
+ set stat [catch {glob test*.db} dbs]
+ cd $cwd
+ if { $stat == 1 } {
+ return
+ }
+ foreach testfile $dbs {
+ puts "\t$name: Deleting all items from the master."
+ set txn [$repenv(master) txn]
+ error_check_good txn_begin [is_valid_txn $txn \
+ $repenv(master)] TRUE
+ set db [berkdb_open -txn $txn -env $repenv(master) $testfile]
+ error_check_good reopen_master [is_valid_db $db] TRUE
+ set dbc [$db cursor -txn $txn]
+ error_check_good reopen_master_cursor \
+ [is_valid_cursor $dbc $db] TRUE
+ for { set dbt [$dbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$dbc get -next] } {
+ error_check_good del_item [$dbc del] 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+
+ repl_envprocq $tnum $nclients
+
+ # Check clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\t$name: Verifying emptiness of client database $i."
+
+ set db [berkdb_open -env $repenv($i) $testfile]
+ error_check_good reopen_client($i) \
+ [is_valid_db $db] TRUE
+ set dbc [$db cursor]
+ error_check_good reopen_client_cursor($i) \
+ [is_valid_cursor $dbc $db] TRUE
+
+ error_check_good client($i)_empty \
+ [llength [$dbc get -first]] 0
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+ }
+}
+
+# Replication "check" function for the dump procs that expect to
+# be able to verify the keys and data.
+proc repl_noop { k d } {
+ return
+}
+
+# Close all the master and client environments in a replication test directory.
+proc repl_envclose { tnum envargs } {
+ source ./include.tcl
+ global clientdir
+ global encrypt
+ global masterdir
+ global repenv
+ global testdir
+
+ if { [lsearch $envargs "-encrypta*"] !=-1 } {
+ set encrypt 1
+ }
+
+ # In order to make sure that we have fully-synced and ready-to-verify
+ # databases on all the clients, do a checkpoint on the master and
+ # process messages in order to flush all the clients.
+ set drop 0
+ set do_check 0
+ set name [format "Repl%03d" $tnum]
+ berkdb debug_check
+ puts "\t$name: Checkpointing master."
+ error_check_good masterenv_ckp [$repenv(master) txn_checkpoint] 0
+
+ # Count clients.
+ for { set ncli 0 } { 1 } { incr ncli } {
+ if { $repenv($ncli) == "NULL" } {
+ break
+ }
+ }
+ repl_envprocq $tnum $ncli
+
+ error_check_good masterenv_close [$repenv(master) close] 0
+ verify_dir $masterdir "\t$name: " 0 0 1
+ for { set i 0 } { $i < $ncli } { incr i } {
+ error_check_good client($i)_close [$repenv($i) close] 0
+ verify_dir $clientdir($i) "\t$name: " 0 0 1
+ }
+ replclose $testdir/MSGQUEUEDIR
+
+}
+
+# Close up a replication group
+proc replclose { queuedir } {
+ global queueenv queuedbs machids
+
+ foreach m $machids {
+ set db $queuedbs($m)
+ error_check_good dbr_close [$db close] 0
+ }
+ error_check_good qenv_close [$queueenv close] 0
+ set machids {}
+}
+
+# Create a replication group for testing.
+proc replsetup { queuedir } {
+ global queueenv queuedbs machids
+
+ file mkdir $queuedir
+ set queueenv \
+ [berkdb_env -create -txn -lock_max 20000 -home $queuedir]
+ error_check_good queueenv [is_valid_env $queueenv] TRUE
+
+ if { [info exists queuedbs] } {
+ unset queuedbs
+ }
+ set machids {}
+
+ return $queueenv
+}
+
+# Send function for replication.
+proc replsend { control rec fromid toid } {
+ global queuedbs queueenv machids
+ global drop drop_msg
+
+ #
+ # If we are testing with dropped messages, then we drop every
+ # $drop_msg time. If we do that just return 0 and don't do
+ # anything.
+ #
+ if { $drop != 0 } {
+ incr drop
+ if { $drop == $drop_msg } {
+ set drop 1
+ return 0
+ }
+ }
+ # XXX
+ # -1 is DB_BROADCAST_MID
+ if { $toid == -1 } {
+ set machlist $machids
+ } else {
+ if { [info exists queuedbs($toid)] != 1 } {
+ error "replsend: machid $toid not found"
+ }
+ set machlist [list $toid]
+ }
+
+ foreach m $machlist {
+ # XXX should a broadcast include to "self"?
+ if { $m == $fromid } {
+ continue
+ }
+
+ set db $queuedbs($m)
+ set txn [$queueenv txn]
+ $db put -txn $txn -append [list $control $rec $fromid]
+ error_check_good replsend_commit [$txn commit] 0
+ }
+
+ return 0
+}
+
+# Nuke all the pending messages for a particular site.
+proc replclear { machid } {
+ global queuedbs queueenv
+
+ if { [info exists queuedbs($machid)] != 1 } {
+ error "FAIL: replclear: machid $machid not found"
+ }
+
+ set db $queuedbs($machid)
+ set txn [$queueenv txn]
+ set dbc [$db cursor -txn $txn]
+ for { set dbt [$dbc get -rmw -first] } { [llength $dbt] > 0 } \
+ { set dbt [$dbc get -rmw -next] } {
+ error_check_good replclear($machid)_del [$dbc del] 0
+ }
+ error_check_good replclear($machid)_dbc_close [$dbc close] 0
+ error_check_good replclear($machid)_txn_commit [$txn commit] 0
+}
+
+# Add a machine to a replication environment.
+proc repladd { machid } {
+ global queueenv queuedbs machids
+
+ if { [info exists queuedbs($machid)] == 1 } {
+ error "FAIL: repladd: machid $machid already exists"
+ }
+
+ set queuedbs($machid) [berkdb open -auto_commit \
+ -env $queueenv -create -recno -renumber repqueue$machid.db]
+ error_check_good repqueue_create [is_valid_db $queuedbs($machid)] TRUE
+
+ lappend machids $machid
+}
+
+# Process a queue of messages, skipping every "skip_interval" entry.
+# We traverse the entire queue, but since we skip some messages, we
+# may end up leaving things in the queue, which should get picked up
+# on a later run.
+
+proc replprocessqueue { dbenv machid { skip_interval 0 } \
+ { hold_electp NONE } { newmasterp NONE } } {
+ global queuedbs queueenv errorCode
+
+ # hold_electp is a call-by-reference variable which lets our caller
+ # know we need to hold an election.
+ if { [string compare $hold_electp NONE] != 0 } {
+ upvar $hold_electp hold_elect
+ }
+ set hold_elect 0
+
+ # newmasterp is the same idea, only returning the ID of a master
+ # given in a DB_REP_NEWMASTER return.
+ if { [string compare $newmasterp NONE] != 0 } {
+ upvar $newmasterp newmaster
+ }
+ set newmaster 0
+
+ set nproced 0
+
+ set txn [$queueenv txn]
+ set dbc [$queuedbs($machid) cursor -txn $txn]
+
+ error_check_good process_dbc($machid) \
+ [is_valid_cursor $dbc $queuedbs($machid)] TRUE
+
+ for { set dbt [$dbc get -first] } \
+ { [llength $dbt] != 0 } \
+ { set dbt [$dbc get -next] } {
+ set data [lindex [lindex $dbt 0] 1]
+
+ # If skip_interval is nonzero, we want to process messages
+ # out of order. We do this in a simple but slimy way--
+ # continue walking with the cursor without processing the
+ # message or deleting it from the queue, but do increment
+ # "nproced". The way this proc is normally used, the
+ # precise value of nproced doesn't matter--we just don't
+ # assume the queues are empty if it's nonzero. Thus,
+ # if we contrive to make sure it's nonzero, we'll always
+ # come back to records we've skipped on a later call
+ # to replprocessqueue. (If there really are no records,
+ # we'll never get here.)
+ #
+ # Skip every skip_interval'th record (and use a remainder other
+ # than zero so that we're guaranteed to really process at least
+ # one record on every call).
+ if { $skip_interval != 0 } {
+ if { $nproced % $skip_interval == 1 } {
+ incr nproced
+ continue
+ }
+ }
+
+ # We have to play an ugly cursor game here: we currently
+ # hold a lock on the page of messages, but rep_process_message
+ # might need to lock the page with a different cursor in
+ # order to send a response. So save our recno, close
+ # the cursor, and then reopen and reset the cursor.
+ set recno [lindex [lindex $dbt 0] 0]
+ error_check_good dbc_process_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+ set ret [catch {$dbenv rep_process_message \
+ [lindex $data 2] [lindex $data 0] [lindex $data 1]} res]
+ set txn [$queueenv txn]
+ set dbc [$queuedbs($machid) cursor -txn $txn]
+ set dbt [$dbc get -set $recno]
+
+ if { $ret != 0 } {
+ if { [is_substr $res DB_REP_HOLDELECTION] } {
+ set hold_elect 1
+ } else {
+ error "FAIL:[timestamp]\
+ rep_process_message returned $res"
+ }
+ }
+
+ incr nproced
+
+ $dbc del
+
+ if { $ret == 0 && $res != 0 } {
+ if { [is_substr $res DB_REP_NEWSITE] } {
+ # NEWSITE; do nothing.
+ } else {
+ set newmaster $res
+ # Break as soon as we get a NEWMASTER message;
+ # our caller needs to handle it.
+ break
+ }
+ }
+
+ if { $hold_elect == 1 } {
+ # Break also on a HOLDELECTION, for the same reason.
+ break
+ }
+
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+
+ # Return the number of messages processed.
+ return $nproced
+}
+
+set run_repl_flag "-run_repl"
+
+proc extract_repl_args { args } {
+ global run_repl_flag
+
+ for { set arg [lindex $args [set i 0]] } \
+ { [string length $arg] > 0 } \
+ { set arg [lindex $args [incr i]] } {
+ if { [string compare $arg $run_repl_flag] == 0 } {
+ return [lindex $args [expr $i + 1]]
+ }
+ }
+ return ""
+}
+
+proc delete_repl_args { args } {
+ global run_repl_flag
+
+ set ret {}
+
+ for { set arg [lindex $args [set i 0]] } \
+ { [string length $arg] > 0 } \
+ { set arg [lindex $args [incr i]] } {
+ if { [string compare $arg $run_repl_flag] != 0 } {
+ lappend ret $arg
+ } else {
+ incr i
+ }
+ }
+ return $ret
+}
+
+global elect_serial
+global elections_in_progress
+set elect_serial 0
+
+# Start an election in a sub-process.
+proc start_election { qdir envstring nsites pri timeout {err "none"}} {
+ source ./include.tcl
+ global elect_serial elect_timeout elections_in_progress machids
+
+ incr elect_serial
+
+ set t [open "|$tclsh_path >& $testdir/ELECTION_OUTPUT.$elect_serial" w]
+
+ puts $t "source $test_path/test.tcl"
+ puts $t "replsetup $qdir"
+ foreach i $machids { puts $t "repladd $i" }
+ puts $t "set env_cmd \{$envstring\}"
+ puts $t "set dbenv \[eval \$env_cmd -errfile \
+ $testdir/ELECTION_ERRFILE.$elect_serial -errpfx FAIL: \]"
+# puts "Start election err $err, env $envstring"
+ puts $t "\$dbenv test abort $err"
+ puts $t "set res \[catch \{\$dbenv rep_elect $nsites $pri \
+ $elect_timeout\} ret\]"
+ if { $err != "none" } {
+ puts $t "\$dbenv test abort none"
+ puts $t "set res \[catch \{\$dbenv rep_elect $nsites $pri \
+ $elect_timeout\} ret\]"
+ }
+ flush $t
+
+ set elections_in_progress($elect_serial) $t
+ return $elect_serial
+}
+
+proc close_election { i } {
+ global elections_in_progress
+ set t $elections_in_progress($i)
+ puts $t "\$dbenv close"
+ close $t
+ unset elections_in_progress($i)
+}
+
+proc cleanup_elections { } {
+ global elect_serial elections_in_progress
+
+ for { set i 0 } { $i <= $elect_serial } { incr i } {
+ if { [info exists elections_in_progress($i)] != 0 } {
+ close_election $i
+ }
+ }
+
+ set elect_serial 0
+}
diff --git a/libdb/test/rpc001.tcl b/libdb/test/rpc001.tcl
new file mode 100644
index 0000000..2475843
--- /dev/null
+++ b/libdb/test/rpc001.tcl
@@ -0,0 +1,449 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST rpc001
+# TEST Test RPC server timeouts for cursor, txn and env handles.
+# TEST Test RPC specifics, primarily that unsupported functions return
+# TEST errors and such.
+proc rpc001 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ global rpc_svc
+ source ./include.tcl
+
+ #
+ # First test timeouts on server.
+ #
+ set ttime 5
+ set itime 10
+ puts "Rpc001: Server timeouts: resource $ttime sec, idle $itime sec"
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/$rpc_svc \
+ -h $rpc_testdir -t $ttime -I $itime &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
+ -h $rpc_testdir -t $ttime -I $itime&]
+ }
+ puts "\tRpc001.a: Started server, pid $dpid"
+
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ puts "\tRpc001.b: Creating environment"
+
+ set testfile "rpc001.db"
+ set home [file tail $rpc_testdir]
+
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000 -txn}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ puts "\tRpc001.c: Opening a database"
+ #
+ # NOTE: the type of database doesn't matter, just use btree.
+ set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set curs_list {}
+ set txn_list {}
+ puts "\tRpc001.d: Basic timeout test"
+ puts "\tRpc001.d1: Starting a transaction"
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ lappend txn_list $txn
+
+ puts "\tRpc001.d2: Open a cursor in that transaction"
+ set dbc [$db cursor -txn $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ lappend curs_list $dbc
+
+ puts "\tRpc001.d3: Duplicate that cursor"
+ set dbc [$dbc dup]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ lappend curs_list $dbc
+
+ puts "\tRpc001.d4: Starting a nested transaction"
+ set txn [$env txn -parent $txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+
+ puts "\tRpc001.d5: Create a cursor, no transaction"
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ lappend curs_list $dbc
+
+ puts "\tRpc001.d6: Timeout cursor and transactions"
+ set sleeptime [expr $ttime + 2]
+ tclsleep $sleeptime
+
+ #
+ # Perform a generic db operations to cause the timeout routine
+ # to trigger.
+ #
+ set stat [catch {$db stat} ret]
+ error_check_good dbstat $stat 0
+
+ #
+ # Check that every handle we opened above is timed out
+ #
+ foreach c $curs_list {
+ set stat [catch {$c close} ret]
+ error_check_good dbc_close:$c $stat 1
+ error_check_good dbc_timeout:$c \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+ }
+ foreach t $txn_list {
+ set stat [catch {$t commit} ret]
+ error_check_good txn_commit:$t $stat 1
+ error_check_good txn_timeout:$t \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+ }
+
+ set txn_list {}
+ set ntxns 8
+ puts "\tRpc001.e: Nested ($ntxns x $ntxns) transaction activity test"
+ puts "\tRpc001.e1: Starting parent transaction"
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+ set last_txn $txn
+ set parent_txn $txn
+
+ #
+ # First set a breadth of 'ntxns'
+ # We need 2 from this set for testing later on. Just set them
+ # up separately first.
+ #
+ puts "\tRpc001.e2: Creating $ntxns child transactions"
+ set child0 [$env txn -parent $parent_txn]
+ error_check_good txn_begin [is_valid_txn $child0 $env] TRUE
+ set child1 [$env txn -parent $parent_txn]
+ error_check_good txn_begin [is_valid_txn $child1 $env] TRUE
+
+ for {set i 2} {$i < $ntxns} {incr i} {
+ set txn [$env txn -parent $parent_txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+ }
+
+ #
+ # Now make one 'ntxns' deeply nested.
+ # Add one more for testing later on separately.
+ #
+ puts "\tRpc001.e3: Creating $ntxns nested child transactions"
+ for {set i 0} {$i < $ntxns} {incr i} {
+ set txn [$env txn -parent $last_txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+ set last_txn $txn
+ }
+ set last_parent $last_txn
+ set last_txn [$env txn -parent $last_parent]
+ error_check_good txn_begin [is_valid_txn $last_txn $env] TRUE
+
+ puts "\tRpc001.e4: Open a cursor in deepest transaction"
+ set dbc [$db cursor -txn $last_txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tRpc001.e5: Duplicate that cursor"
+ set dbcdup [$dbc dup]
+ error_check_good db_cursor [is_valid_cursor $dbcdup $db] TRUE
+ lappend curs_list $dbcdup
+
+ puts "\tRpc001.f: Timeout then activate duplicate cursor"
+ tclsleep $sleeptime
+ set stat [catch {$dbcdup close} ret]
+ error_check_good dup_close:$dbcdup $stat 0
+ error_check_good dup_close:$dbcdup $ret 0
+
+ #
+ # Make sure that our parent txn is not timed out. We will
+ # try to begin another child tnx using the parent. We expect
+ # that to succeed. Immediately commit that txn.
+ #
+ set stat [catch {$env txn -parent $parent_txn} newchild]
+ error_check_good newchildtxn $stat 0
+ error_check_good newcommit [$newchild commit] 0
+
+ puts "\tRpc001.g: Timeout, then activate cursor"
+ tclsleep $sleeptime
+ set stat [catch {$dbc close} ret]
+ error_check_good dbc_close:$dbc $stat 0
+ error_check_good dbc_close:$dbc $ret 0
+
+ #
+ # Make sure that our parent txn is not timed out. We will
+ # try to begin another child tnx using the parent. We expect
+ # that to succeed. Immediately commit that txn.
+ #
+ set stat [catch {$env txn -parent $parent_txn} newchild]
+ error_check_good newchildtxn $stat 0
+ error_check_good newcommit [$newchild commit] 0
+
+ puts "\tRpc001.h: Timeout, then activate child txn"
+ tclsleep $sleeptime
+ set stat [catch {$child0 commit} ret]
+ error_check_good child_commit $stat 0
+ error_check_good child_commit:$child0 $ret 0
+
+ #
+ #
+ # Make sure that our nested txn is not timed out. We will
+ # try to begin another child tnx using the parent. We expect
+ # that to succeed. Immediately commit that txn.
+ #
+ set stat [catch {$env txn -parent $last_parent} newchild]
+ error_check_good newchildtxn $stat 0
+ error_check_good newcommit [$newchild commit] 0
+
+ puts "\tRpc001.i: Timeout, then activate nested txn"
+ tclsleep $sleeptime
+ set stat [catch {$last_txn commit} ret]
+ error_check_good lasttxn_commit $stat 0
+ error_check_good lasttxn_commit:$child0 $ret 0
+
+ #
+ # Make sure that our child txn is not timed out. We should
+ # be able to commit it.
+ #
+ set stat [catch {$child1 commit} ret]
+ error_check_good child_commit:$child1 $stat 0
+ error_check_good child_commit:$child1 $ret 0
+
+ #
+ # Clean up. They were inserted in LIFO order, so we should
+ # just be able to commit them all.
+ foreach t $txn_list {
+ set stat [catch {$t commit} ret]
+ error_check_good txn_commit:$t $stat 0
+ error_check_good txn_commit:$t $ret 0
+ }
+
+ set stat [catch {$db close} ret]
+ error_check_good db_close $stat 0
+
+ rpc_timeoutjoin $env "Rpc001.j" $sleeptime 0
+ rpc_timeoutjoin $env "Rpc001.k" $sleeptime 1
+
+ #
+ # We need a 2nd env just to do an op to timeout the env.
+ # Make the flags different so we don't end up sharing a handle.
+ #
+ set env1 [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000}]
+ error_check_good lock_env:open [is_valid_env $env1] TRUE
+
+ puts "\tRpc001.l: Timeout idle env handle"
+ set sleeptime [expr $itime + 2]
+ tclsleep $sleeptime
+
+ set stat [catch {$env1 close} ret]
+ error_check_good env1_close $stat 0
+
+ set stat [catch {$env close} ret]
+ error_check_good env_close $stat 1
+ error_check_good env_timeout \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+
+ tclkill $dpid
+}
+
+proc rpc_timeoutjoin {env msg sleeptime use_txn} {
+ #
+ # Check join cursors now.
+ #
+ puts -nonewline "\t$msg: Test join cursors and timeouts"
+ if { $use_txn } {
+ puts " (using txns)"
+ set txnflag "-auto_commit"
+ } else {
+ puts " (without txns)"
+ set txnflag ""
+ }
+ #
+ # Set up a simple set of join databases
+ #
+ puts "\t${msg}0: Set up join databases"
+ set fruit {
+ {blue blueberry}
+ {red apple} {red cherry} {red raspberry}
+ {yellow lemon} {yellow pear}
+ }
+ set price {
+ {expen blueberry} {expen cherry} {expen raspberry}
+ {inexp apple} {inexp lemon} {inexp pear}
+ }
+ set dessert {
+ {blueberry cobbler} {cherry cobbler} {pear cobbler}
+ {apple pie} {raspberry pie} {lemon pie}
+ }
+ set fdb [eval {berkdb_open -create -btree -mode 0644} \
+ $txnflag -env $env -dup fruit.db]
+ error_check_good dbopen [is_valid_db $fdb] TRUE
+ set pdb [eval {berkdb_open -create -btree -mode 0644} \
+ $txnflag -env $env -dup price.db]
+ error_check_good dbopen [is_valid_db $pdb] TRUE
+ set ddb [eval {berkdb_open -create -btree -mode 0644} \
+ $txnflag -env $env -dup dessert.db]
+ error_check_good dbopen [is_valid_db $ddb] TRUE
+ foreach kd $fruit {
+ set k [lindex $kd 0]
+ set d [lindex $kd 1]
+ set ret [eval {$fdb put} $txnflag {$k $d}]
+ error_check_good fruit_put $ret 0
+ }
+ error_check_good sync [$fdb sync] 0
+ foreach kd $price {
+ set k [lindex $kd 0]
+ set d [lindex $kd 1]
+ set ret [eval {$pdb put} $txnflag {$k $d}]
+ error_check_good price_put $ret 0
+ }
+ error_check_good sync [$pdb sync] 0
+ foreach kd $dessert {
+ set k [lindex $kd 0]
+ set d [lindex $kd 1]
+ set ret [eval {$ddb put} $txnflag {$k $d}]
+ error_check_good dessert_put $ret 0
+ }
+ error_check_good sync [$ddb sync] 0
+
+ rpc_join $env $msg $sleeptime $fdb $pdb $ddb $use_txn 0
+ rpc_join $env $msg $sleeptime $fdb $pdb $ddb $use_txn 1
+
+ error_check_good ddb:close [$ddb close] 0
+ error_check_good pdb:close [$pdb close] 0
+ error_check_good fdb:close [$fdb close] 0
+}
+
+proc rpc_join {env msg sleep fdb pdb ddb use_txn op} {
+ global errorInfo
+
+ #
+ # Start a parent and child transaction. We'll do our join in
+ # the child transaction just to make sure everything gets timed
+ # out correctly.
+ #
+ set curs_list {}
+ set txn_list {}
+ set msgnum [expr $op * 2 + 1]
+ if { $use_txn } {
+ puts "\t$msg$msgnum: Set up txns and join cursor"
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+ set child0 [$env txn -parent $txn]
+ error_check_good txn_begin [is_valid_txn $child0 $env] TRUE
+ set txn_list [linsert $txn_list 0 $child0]
+ set child1 [$env txn -parent $txn]
+ error_check_good txn_begin [is_valid_txn $child1 $env] TRUE
+ set txn_list [linsert $txn_list 0 $child1]
+ set txncmd "-txn $child0"
+ } else {
+ puts "\t$msg$msgnum: Set up join cursor"
+ set txncmd ""
+ }
+
+ #
+ # Start a cursor, (using txn child0 in the fruit and price dbs, if
+ # needed). # Just pick something simple to join on.
+ # Then call join on the dessert db.
+ #
+ set fkey yellow
+ set pkey inexp
+ set fdbc [eval $fdb cursor $txncmd]
+ error_check_good fdb_cursor [is_valid_cursor $fdbc $fdb] TRUE
+ set ret [$fdbc get -set $fkey]
+ error_check_bad fget:set [llength $ret] 0
+ set k [lindex [lindex $ret 0] 0]
+ error_check_good fget:set:key $k $fkey
+ set curs_list [linsert $curs_list 0 $fdbc]
+
+ set pdbc [eval $pdb cursor $txncmd]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ set ret [$pdbc get -set $pkey]
+ error_check_bad pget:set [llength $ret] 0
+ set k [lindex [lindex $ret 0] 0]
+ error_check_good pget:set:key $k $pkey
+ set curs_list [linsert $curs_list 0 $pdbc]
+
+ set jdbc [$ddb join $fdbc $pdbc]
+ error_check_good join_cursor [is_valid_cursor $jdbc $ddb] TRUE
+ set ret [$jdbc get]
+ error_check_bad jget [llength $ret] 0
+
+ set msgnum [expr $op * 2 + 2]
+ if { $op == 1 } {
+ puts -nonewline "\t$msg$msgnum: Timeout all cursors"
+ if { $use_txn } {
+ puts " and txns"
+ } else {
+ puts ""
+ }
+ } else {
+ puts "\t$msg$msgnum: Timeout, then activate join cursor"
+ }
+
+ tclsleep $sleep
+
+ if { $op == 1 } {
+ #
+ # Perform a generic db operations to cause the timeout routine
+ # to trigger.
+ #
+ set stat [catch {$fdb stat} ret]
+ error_check_good fdbstat $stat 0
+
+ #
+ # Check that join cursor is timed out.
+ #
+ set stat [catch {$jdbc close} ret]
+ error_check_good dbc_close:$jdbc $stat 1
+ error_check_good dbc_timeout:$jdbc \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+
+ #
+ # Now the server may or may not timeout constituent
+ # cursors when it times out the join cursor. So, just
+ # sleep again and then they should timeout.
+ #
+ tclsleep $sleep
+ set stat [catch {$fdb stat} ret]
+ error_check_good fdbstat $stat 0
+
+ foreach c $curs_list {
+ set stat [catch {$c close} ret]
+ error_check_good dbc_close:$c $stat 1
+ error_check_good dbc_timeout:$c \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+ }
+
+ foreach t $txn_list {
+ set stat [catch {$t commit} ret]
+ error_check_good txn_commit:$t $stat 1
+ error_check_good txn_timeout:$t \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+ }
+ } else {
+ set stat [catch {$jdbc get} ret]
+ error_check_good jget.stat $stat 0
+ error_check_bad jget [llength $ret] 0
+ set curs_list [linsert $curs_list 0 $jdbc]
+ foreach c $curs_list {
+ set stat [catch {$c close} ret]
+ error_check_good dbc_close:$c $stat 0
+ error_check_good dbc_close:$c $ret 0
+ }
+
+ foreach t $txn_list {
+ set stat [catch {$t commit} ret]
+ error_check_good txn_commit:$t $stat 0
+ error_check_good txn_commit:$t $ret 0
+ }
+ }
+}
diff --git a/libdb/test/rpc002.tcl b/libdb/test/rpc002.tcl
new file mode 100644
index 0000000..93aa256
--- /dev/null
+++ b/libdb/test/rpc002.tcl
@@ -0,0 +1,143 @@
+# Sel the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST rpc002
+# TEST Test invalid RPC functions and make sure we error them correctly
+proc rpc002 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ global rpc_svc
+ source ./include.tcl
+
+ set testfile "rpc002.db"
+ set home [file tail $rpc_testdir]
+ #
+ # First start the server.
+ #
+ puts "Rpc002: Unsupported interface test"
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/$rpc_svc -h $rpc_testdir &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
+ -h $rpc_testdir &]
+ }
+ puts "\tRpc002.a: Started server, pid $dpid"
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+
+ puts "\tRpc002.b: Unsupported env options"
+ #
+ # Test each "pre-open" option for env's. These need to be
+ # tested on the 'berkdb_env' line.
+ #
+ set rlist {
+ { "-data_dir $rpc_testdir" "Rpc002.b0"}
+ { "-log_buffer 512" "Rpc002.b1"}
+ { "-log_dir $rpc_testdir" "Rpc002.b2"}
+ { "-log_max 100" "Rpc002.b3"}
+ { "-lock_conflict {3 {0 0 0 0 0 1 0 1 1}}" "Rpc002.b4"}
+ { "-lock_detect default" "Rpc002.b5"}
+ { "-lock_max 100" "Rpc002.b6"}
+ { "-mmapsize 100" "Rpc002.b7"}
+ { "-shm_key 100" "Rpc002.b9"}
+ { "-tmp_dir $rpc_testdir" "Rpc002.b10"}
+ { "-txn_max 100" "Rpc002.b11"}
+ { "-txn_timestamp 100" "Rpc002.b12"}
+ { "-verbose {recovery on}" "Rpc002.b13"}
+ }
+
+ set e "berkdb_env_noerr -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000 -txn"
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ puts "\t$msg: $cmd"
+
+ set stat [catch {eval $e $cmd} ret]
+ error_check_good $cmd $stat 1
+ error_check_good $cmd.err \
+ [is_substr $errorInfo "meaningless in an RPC env"] 1
+ }
+
+ #
+ # Open an env with all the subsystems (-txn implies all
+ # the rest)
+ #
+ puts "\tRpc002.c: Unsupported env related interfaces"
+ set env [eval {berkdb_env_noerr -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000 -txn}]
+ error_check_good envopen [is_valid_env $env] TRUE
+ set dbcmd "berkdb_open_noerr -create -btree -mode 0644 -env $env \
+ $testfile"
+ set db [eval $dbcmd]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ #
+ # Test each "post-open" option relating to envs, txns, locks,
+ # logs and mpools.
+ #
+ set rlist {
+ { " lock_detect default" "Rpc002.c0"}
+ { " lock_get read 1 $env" "Rpc002.c1"}
+ { " lock_id" "Rpc002.c2"}
+ { " lock_stat" "Rpc002.c3"}
+ { " lock_vec 1 {get $env read}" "Rpc002.c4"}
+ { " log_archive" "Rpc002.c5"}
+ { " log_file {0 0}" "Rpc002.c6"}
+ { " log_flush" "Rpc002.c7"}
+ { " log_cursor" "Rpc002.c8"}
+ { " log_stat" "Rpc002.c9"}
+ { " mpool -create -pagesize 512" "Rpc002.c10"}
+ { " mpool_stat" "Rpc002.c11"}
+ { " mpool_sync {0 0}" "Rpc002.c12"}
+ { " mpool_trickle 50" "Rpc002.c13"}
+ { " txn_checkpoint -min 1" "Rpc002.c14"}
+ { " txn_stat" "Rpc002.c15"}
+ }
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ puts "\t$msg: $cmd"
+
+ set stat [catch {eval $env $cmd} ret]
+ error_check_good $cmd $stat 1
+ error_check_good $cmd.err \
+ [is_substr $errorInfo "meaningless in an RPC env"] 1
+ }
+ error_check_good dbclose [$db close] 0
+
+ #
+ # The database operations that aren't supported are few
+ # because mostly they are the ones Tcl doesn't support
+ # either so we have no way to get at them. Test what we can.
+ #
+ puts "\tRpc002.d: Unsupported database related interfaces"
+ #
+ # NOTE: the type of database doesn't matter, just use btree.
+ #
+ puts "\tRpc002.d0: -cachesize"
+ set dbcmd "berkdb_open_noerr -create -btree -mode 0644 -env $env \
+ -cachesize {0 65536 0} $testfile"
+ set stat [catch {eval $dbcmd} ret]
+ error_check_good dbopen_cache $stat 1
+ error_check_good dbopen_cache_err \
+ [is_substr $errorInfo "meaningless in an RPC env"] 1
+
+ puts "\tRpc002.d1: Try to upgrade a database"
+ #
+ # NOTE: the type of database doesn't matter, just use btree.
+ set stat [catch {eval {berkdb upgrade -env} $env $testfile} ret]
+ error_check_good dbupgrade $stat 1
+ error_check_good dbupgrade_err \
+ [is_substr $errorInfo "meaningless in an RPC env"] 1
+
+ error_check_good envclose [$env close] 0
+
+ tclkill $dpid
+}
diff --git a/libdb/test/rpc003.tcl b/libdb/test/rpc003.tcl
new file mode 100644
index 0000000..a935ad1
--- /dev/null
+++ b/libdb/test/rpc003.tcl
@@ -0,0 +1,166 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Test RPC and secondary indices.
+proc rpc003 { } {
+ source ./include.tcl
+ global dict nsecondaries
+ global rpc_svc
+
+ #
+ # First set up the files. Secondary indices only work readonly
+ # over RPC. So we need to create the databases first without
+ # RPC. Then run checking over RPC.
+ #
+ puts "Rpc003: Secondary indices over RPC"
+ if { [string compare $rpc_server "localhost"] != 0 } {
+ puts "Cannot run to non-local RPC server. Skipping."
+ return
+ }
+ cleanup $testdir NULL
+ puts "\tRpc003.a: Creating local secondary index databases"
+
+ # Primary method/args.
+ set pmethod btree
+ set pomethod [convert_method $pmethod]
+ set pargs ""
+ set methods {dbtree dbtree}
+ set argses [convert_argses $methods ""]
+ set omethods [convert_methods $methods]
+
+ set nentries 500
+
+ puts "\tRpc003.b: ($pmethod/$methods) $nentries equal key/data pairs"
+ set pname "primary003.db"
+ set snamebase "secondary003"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # We have set up our databases, so now start the server and
+ # read them over RPC.
+ #
+ set dpid [exec $util_path/$rpc_svc -h $rpc_testdir &]
+ puts "\tRpc003.c: Started server, pid $dpid"
+ tclsleep 2
+
+ set home [file tail $rpc_testdir]
+ set env [eval {berkdb_env_noerr -create -mode 0644 -home $home \
+ -server $rpc_server}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ #
+ # Attempt to send in a NULL callback to associate. It will fail
+ # if the primary and secondary are not both read-only.
+ #
+ set msg "\tRpc003.d"
+ puts "$msg: Using r/w primary and r/w secondary"
+ set popen "berkdb_open_noerr -env $env $pomethod $pargs $pname"
+ set sopen "berkdb_open_noerr -create -env $env \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
+ rpc003_assoc_err $popen $sopen $msg
+
+ set msg "\tRpc003.e"
+ puts "$msg: Using r/w primary and read-only secondary"
+ set popen "berkdb_open_noerr -env $env $pomethod $pargs $pname"
+ set sopen "berkdb_open_noerr -env $env -rdonly \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
+ rpc003_assoc_err $popen $sopen $msg
+
+ set msg "\tRpc003.f"
+ puts "$msg: Using read-only primary and r/w secondary"
+ set popen "berkdb_open_noerr -env $env $pomethod -rdonly $pargs $pname"
+ set sopen "berkdb_open_noerr -create -env $env \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
+ rpc003_assoc_err $popen $sopen $msg
+
+ # Open and associate the secondaries
+ puts "\tRpc003.g: Checking secondaries, both read-only"
+ set pdb [eval {berkdb_open_noerr -env} $env \
+ -rdonly $pomethod $pargs $pname]
+ error_check_good primary_open2 [is_valid_db $pdb] TRUE
+
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -env} $env -rdonly \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open2($i) [is_valid_db $sdb] TRUE
+ error_check_good db_associate2($i) \
+ [eval {$pdb associate} "" $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Rpc003.h"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+
+ tclkill $dpid
+}
+
+proc rpc003_assoc_err { popen sopen msg } {
+ set pdb [eval $popen]
+ error_check_good assoc_err_popen [is_valid_db $pdb] TRUE
+
+ puts "$msg.0: NULL callback"
+ set sdb [eval $sopen]
+ error_check_good assoc_err_sopen [is_valid_db $sdb] TRUE
+ set stat [catch {eval {$pdb associate} "" $sdb} ret]
+ error_check_good db_associate:rdonly $stat 1
+ error_check_good db_associate:inval [is_substr $ret invalid] 1
+
+ puts "$msg.1: non-NULL callback"
+ set stat [catch {eval $pdb associate [callback_n 0] $sdb} ret]
+ error_check_good db_associate:callback $stat 1
+ error_check_good db_associate:rpc \
+ [is_substr $ret "not supported in RPC"] 1
+ error_check_good assoc_sclose [$sdb close] 0
+ error_check_good assoc_pclose [$pdb close] 0
+}
diff --git a/libdb/test/rpc004.tcl b/libdb/test/rpc004.tcl
new file mode 100644
index 0000000..57b6e22
--- /dev/null
+++ b/libdb/test/rpc004.tcl
@@ -0,0 +1,76 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST rpc004
+# TEST Test RPC server and security
+proc rpc004 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ global passwd
+ global rpc_svc
+ source ./include.tcl
+
+ puts "Rpc004: RPC server + security"
+ cleanup $testdir NULL
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/$rpc_svc \
+ -h $rpc_testdir -P $passwd &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
+ -h $rpc_testdir -P $passwd &]
+ }
+ puts "\tRpc004.a: Started server, pid $dpid"
+
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ puts "\tRpc004.b: Creating environment"
+
+ set testfile "rpc004.db"
+ set testfile1 "rpc004a.db"
+ set home [file tail $rpc_testdir]
+
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -encryptaes $passwd -txn}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ puts "\tRpc004.c: Opening a non-encrypted database"
+ #
+ # NOTE: the type of database doesn't matter, just use btree.
+ set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tRpc004.d: Opening an encrypted database"
+ set db1 [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env -encrypt $testfile1]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ puts "\tRpc004.e: Put/get on both databases"
+ set key "key"
+ set data "data"
+
+ set ret [$db put -txn $txn $key $data]
+ error_check_good db_put $ret 0
+ set ret [$db get -txn $txn $key]
+ error_check_good db_get $ret [list [list $key $data]]
+ set ret [$db1 put -txn $txn $key $data]
+ error_check_good db1_put $ret 0
+ set ret [$db1 get -txn $txn $key]
+ error_check_good db1_get $ret [list [list $key $data]]
+
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ error_check_good db1_close [$db1 close] 0
+ error_check_good env_close [$env close] 0
+
+ # Cleanup our environment because it's encrypted
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ tclkill $dpid
+}
diff --git a/libdb/test/rpc005.tcl b/libdb/test/rpc005.tcl
new file mode 100644
index 0000000..2d38ffc
--- /dev/null
+++ b/libdb/test/rpc005.tcl
@@ -0,0 +1,137 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST rpc005
+# TEST Test RPC server handle ID sharing
+proc rpc005 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ global rpc_svc
+ source ./include.tcl
+
+ puts "Rpc005: RPC server handle sharing"
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/$rpc_svc \
+ -h $rpc_testdir &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
+ -h $rpc_testdir &]
+ }
+ puts "\tRpc005.a: Started server, pid $dpid"
+
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ puts "\tRpc005.b: Creating environment"
+
+ set testfile "rpc005.db"
+ set testfile1 "rpc005a.db"
+ set subdb1 "subdb1"
+ set subdb2 "subdb2"
+ set home [file tail $rpc_testdir]
+
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -txn}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ puts "\tRpc005.c: Compare identical and different configured envs"
+ set env_ident [eval {berkdb_env -home $home \
+ -server $rpc_server -txn}]
+ error_check_good lock_env:open [is_valid_env $env_ident] TRUE
+
+ set env_diff [eval {berkdb_env -home $home \
+ -server $rpc_server -txn nosync}]
+ error_check_good lock_env:open [is_valid_env $env_diff] TRUE
+
+ error_check_good ident:id [$env rpcid] [$env_ident rpcid]
+ error_check_bad diff:id [$env rpcid] [$env_diff rpcid]
+
+ error_check_good envclose [$env_diff close] 0
+ error_check_good envclose [$env_ident close] 0
+
+ puts "\tRpc005.d: Opening a database"
+ set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tRpc005.e: Compare identical and different configured dbs"
+ set db_ident [eval {berkdb_open -btree} -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db_ident] TRUE
+
+ set db_diff [eval {berkdb_open -btree} -env $env -rdonly $testfile]
+ error_check_good dbopen [is_valid_db $db_diff] TRUE
+
+ set db_diff2 [eval {berkdb_open -btree} -env $env -rdonly $testfile]
+ error_check_good dbopen [is_valid_db $db_diff2] TRUE
+
+ error_check_good ident:id [$db rpcid] [$db_ident rpcid]
+ error_check_bad diff:id [$db rpcid] [$db_diff rpcid]
+ error_check_good ident2:id [$db_diff rpcid] [$db_diff2 rpcid]
+
+ error_check_good db_close [$db_ident close] 0
+ error_check_good db_close [$db_diff close] 0
+ error_check_good db_close [$db_diff2 close] 0
+ error_check_good db_close [$db close] 0
+
+ puts "\tRpc005.f: Compare with a database and subdatabases"
+ set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile1 $subdb1]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set dbid [$db rpcid]
+
+ set db2 [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db2] TRUE
+ set db2id [$db2 rpcid]
+ error_check_bad 2subdb:id $dbid $db2id
+
+ set db_ident [eval {berkdb_open -btree} -env $env $testfile1 $subdb1]
+ error_check_good dbopen [is_valid_db $db_ident] TRUE
+ set identid [$db_ident rpcid]
+
+ set db_ident2 [eval {berkdb_open -btree} -env $env $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db_ident2] TRUE
+ set ident2id [$db_ident2 rpcid]
+
+ set db_diff1 [eval {berkdb_open -btree} -env $env -rdonly \
+ $testfile1 $subdb1]
+ error_check_good dbopen [is_valid_db $db_diff1] TRUE
+ set diff1id [$db_diff1 rpcid]
+
+ set db_diff2 [eval {berkdb_open -btree} -env $env -rdonly \
+ $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db_diff2] TRUE
+ set diff2id [$db_diff2 rpcid]
+
+ set db_diff [eval {berkdb_open -unknown} -env $env -rdonly $testfile1]
+ error_check_good dbopen [is_valid_db $db_diff] TRUE
+ set diffid [$db_diff rpcid]
+
+ set db_diff2a [eval {berkdb_open -btree} -env $env -rdonly \
+ $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db_diff2a] TRUE
+ set diff2aid [$db_diff2a rpcid]
+
+ error_check_good ident:id $dbid $identid
+ error_check_good ident2:id $db2id $ident2id
+ error_check_bad diff:id $dbid $diffid
+ error_check_bad diff2:id $db2id $diffid
+ error_check_bad diff3:id $diff2id $diffid
+ error_check_bad diff4:id $diff1id $diffid
+ error_check_good diff2a:id $diff2id $diff2aid
+
+ error_check_good db_close [$db_ident close] 0
+ error_check_good db_close [$db_ident2 close] 0
+ error_check_good db_close [$db_diff close] 0
+ error_check_good db_close [$db_diff1 close] 0
+ error_check_good db_close [$db_diff2 close] 0
+ error_check_good db_close [$db_diff2a close] 0
+ error_check_good db_close [$db2 close] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+ tclkill $dpid
+}
diff --git a/libdb/test/rsrc001.tcl b/libdb/test/rsrc001.tcl
new file mode 100644
index 0000000..deaf818
--- /dev/null
+++ b/libdb/test/rsrc001.tcl
@@ -0,0 +1,221 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST rsrc001
+# TEST Recno backing file test. Try different patterns of adding
+# TEST records and making sure that the corresponding file matches.
+proc rsrc001 { } {
+ source ./include.tcl
+
+ puts "Rsrc001: Basic recno backing file writeback tests"
+
+ # We run this test essentially twice, once with a db file
+ # and once without (an in-memory database).
+ set rec1 "This is record 1"
+ set rec2 "This is record 2 This is record 2"
+ set rec3 "This is record 3 This is record 3 This is record 3"
+ set rec4 [replicate "This is record 4 " 512]
+
+ foreach testfile { "$testdir/rsrc001.db" "" } {
+
+ cleanup $testdir NULL
+
+ if { $testfile == "" } {
+ puts "Rsrc001: Testing with in-memory database."
+ } else {
+ puts "Rsrc001: Testing with disk-backed database."
+ }
+
+ # Create backing file for the empty-file test.
+ set oid1 [open $testdir/rsrc.txt w]
+ close $oid1
+
+ puts "\tRsrc001.a: Put to empty file."
+ set db [eval {berkdb_open -create -mode 0644\
+ -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set txn ""
+
+ set ret [eval {$db put} $txn {1 $rec1}]
+ error_check_good put_to_empty $ret 0
+ error_check_good db_close [$db close] 0
+
+ # Now fill out the backing file and create the check file.
+ set oid1 [open $testdir/rsrc.txt a]
+ set oid2 [open $testdir/check.txt w]
+
+ # This one was already put into rsrc.txt.
+ puts $oid2 $rec1
+
+ # These weren't.
+ puts $oid1 $rec2
+ puts $oid2 $rec2
+ puts $oid1 $rec3
+ puts $oid2 $rec3
+ puts $oid1 $rec4
+ puts $oid2 $rec4
+ close $oid1
+ close $oid2
+
+ puts -nonewline "\tRsrc001.b: Read file, rewrite last record;"
+ puts " write it out and diff"
+ set db [eval {berkdb_open -create -mode 0644\
+ -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record; replace it (but we won't change it).
+ # Then close the file and diff the two files.
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -last]
+ error_check_good get_last [llength [lindex $rec 0]] 2
+ set key [lindex [lindex $rec 0] 0]
+ set data [lindex [lindex $rec 0] 1]
+
+ # Get the last record from the text file
+ set oid [open $testdir/rsrc.txt]
+ set laststr ""
+ while { [gets $oid str] != -1 } {
+ set laststr $str
+ }
+ close $oid
+ set data [sanitize_record $data]
+ error_check_good getlast $data $laststr
+
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good replace_last $ret 0
+
+ error_check_good curs_close [$dbc close] 0
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ error_check_good \
+ Rsrc001:diff($testdir/rsrc.txt,$testdir/check.txt) \
+ [filecmp $testdir/rsrc.txt $testdir/check.txt] 0
+
+ puts -nonewline "\tRsrc001.c: "
+ puts "Append some records in tree and verify in file."
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [replicate "New Record $i" $i]
+ puts $oid $rec
+ incr key
+ set ret [eval {$db put} $txn {-append $rec}]
+ error_check_good put_append $ret $key
+ }
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ Rsrc001:diff($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts "\tRsrc001.d: Append by record number"
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [replicate "New Record (set 2) $i" $i]
+ puts $oid $rec
+ incr key
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno $ret 0
+ }
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ Rsrc001:diff($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts "\tRsrc001.e: Put beyond end of file."
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ puts $oid ""
+ incr key
+ }
+ set rec "Last Record"
+ puts $oid $rec
+ incr key
+
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno $ret 0
+
+ puts "\tRsrc001.f: Put beyond end of file, after reopen."
+
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open -create -mode 0644\
+ -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set rec "Last record with reopen"
+ puts $oid $rec
+
+ incr key
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno_with_reopen $ret 0
+
+ puts "\tRsrc001.g:\
+ Put several beyond end of file, after reopen with snapshot."
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open -create -mode 0644\
+ -snapshot -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set rec "Really really last record with reopen"
+ puts $oid ""
+ puts $oid ""
+ puts $oid ""
+ puts $oid $rec
+
+ incr key
+ incr key
+ incr key
+ incr key
+
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno_with_reopen $ret 0
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ Rsrc001:diff($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts "\tRsrc001.h: Verify proper syncing of changes on close."
+ error_check_good Rsrc001:db_close [$db close] 0
+ set db [eval {berkdb_open -create -mode 0644 -recno \
+ -source $testdir/rsrc.txt} $testfile]
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [replicate "New Record $i" $i]
+ puts $oid $rec
+ set ret [eval {$db put} $txn {-append $rec}]
+ # Don't bother checking return; we don't know what
+ # the key number is, and we'll pick up a failure
+ # when we compare.
+ }
+ error_check_good Rsrc001:db_close [$db close] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good Rsrc001:diff($testdir/{rsrc,check}.txt) $ret 0
+ }
+}
+
+# Strip CRs from a record.
+# Needed on Windows when a file is created as text (with CR/LF)
+# but read as binary (where CR is read as a separate character)
+proc sanitize_record { rec } {
+ source ./include.tcl
+
+ if { $is_windows_test != 1 } {
+ return $rec
+ }
+ regsub -all \15 $rec "" data
+ return $data
+}
diff --git a/libdb/test/rsrc002.tcl b/libdb/test/rsrc002.tcl
new file mode 100644
index 0000000..107c081
--- /dev/null
+++ b/libdb/test/rsrc002.tcl
@@ -0,0 +1,66 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST rsrc002
+# TEST Recno backing file test #2: test of set_re_delim. Specify a backing
+# TEST file with colon-delimited records, and make sure they are correctly
+# TEST interpreted.
+proc rsrc002 { } {
+ source ./include.tcl
+
+ puts "Rsrc002: Alternate variable-length record delimiters."
+
+ # We run this test essentially twice, once with a db file
+ # and once without (an in-memory database).
+ foreach testfile { "$testdir/rsrc002.db" "" } {
+
+ cleanup $testdir NULL
+
+ # Create the starting files
+ set oid1 [open $testdir/rsrc.txt w]
+ set oid2 [open $testdir/check.txt w]
+ puts -nonewline $oid1 "ostrich:emu:kiwi:moa:cassowary:rhea:"
+ puts -nonewline $oid2 "ostrich:emu:kiwi:penguin:cassowary:rhea:"
+ close $oid1
+ close $oid2
+
+ if { $testfile == "" } {
+ puts "Rsrc002: Testing with in-memory database."
+ } else {
+ puts "Rsrc002: Testing with disk-backed database."
+ }
+
+ puts "\tRsrc002.a: Read file, verify correctness."
+ set db [eval {berkdb_open -create -mode 0644 -delim 58 \
+ -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record; replace it (but we won't change it).
+ # Then close the file and diff the two files.
+ set txn ""
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -first]
+ error_check_good get_first $rec [list [list 1 "ostrich"]]
+ set rec [$dbc get -next]
+ error_check_good get_next $rec [list [list 2 "emu"]]
+
+ puts "\tRsrc002.b: Write record, verify correctness."
+
+ eval {$dbc get -set 4}
+ set ret [$dbc put -current "penguin"]
+ error_check_good dbc_put $ret 0
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+
+ error_check_good \
+ Rsrc002:diff($testdir/rsrc.txt,$testdir/check.txt) \
+ [filecmp $testdir/rsrc.txt $testdir/check.txt] 0
+ }
+}
diff --git a/libdb/test/rsrc003.tcl b/libdb/test/rsrc003.tcl
new file mode 100644
index 0000000..67f7b28
--- /dev/null
+++ b/libdb/test/rsrc003.tcl
@@ -0,0 +1,173 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST rsrc003
+# TEST Recno backing file test. Try different patterns of adding
+# TEST records and making sure that the corresponding file matches.
+proc rsrc003 { } {
+ source ./include.tcl
+ global fixed_len
+
+ puts "Rsrc003: Basic recno backing file writeback tests fixed length"
+
+ # We run this test essentially twice, once with a db file
+ # and once without (an in-memory database).
+ #
+ # Then run with big fixed-length records
+ set rec1 "This is record 1"
+ set rec2 "This is record 2"
+ set rec3 "This is record 3"
+ set bigrec1 [replicate "This is record 1 " 512]
+ set bigrec2 [replicate "This is record 2 " 512]
+ set bigrec3 [replicate "This is record 3 " 512]
+
+ set orig_fixed_len $fixed_len
+ set rlist {
+ {{$rec1 $rec2 $rec3} "small records" }
+ {{$bigrec1 $bigrec2 $bigrec3} "large records" }}
+
+ foreach testfile { "$testdir/rsrc003.db" "" } {
+
+ foreach rec $rlist {
+ cleanup $testdir NULL
+
+ set recs [lindex $rec 0]
+ set msg [lindex $rec 1]
+ # Create the starting files
+ # Note that for the rest of the test, we are going
+ # to append a LF when we 'put' via DB to maintain
+ # file structure and allow us to use 'gets'.
+ set oid1 [open $testdir/rsrc.txt w]
+ set oid2 [open $testdir/check.txt w]
+ foreach record $recs {
+ set r [subst $record]
+ set fixed_len [string length $r]
+ puts $oid1 $r
+ puts $oid2 $r
+ }
+ close $oid1
+ close $oid2
+
+ set reclen [expr $fixed_len + 1]
+ if { $reclen > [string length $rec1] } {
+ set repl 512
+ } else {
+ set repl 2
+ }
+ if { $testfile == "" } {
+ puts \
+"Rsrc003: Testing with in-memory database with $msg."
+ } else {
+ puts \
+"Rsrc003: Testing with disk-backed database with $msg."
+ }
+
+ puts -nonewline \
+ "\tRsrc003.a: Read file, rewrite last record;"
+ puts " write it out and diff"
+ set db [eval {berkdb_open -create -mode 0644 -recno \
+ -len $reclen -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record; replace it (don't change it).
+ # Then close the file and diff the two files.
+ set txn ""
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor \
+ [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -last]
+ error_check_good get_last [llength [lindex $rec 0]] 2
+ set key [lindex [lindex $rec 0] 0]
+ set data [lindex [lindex $rec 0] 1]
+
+ # Get the last record from the text file
+ set oid [open $testdir/rsrc.txt]
+ set laststr ""
+ while { [gets $oid str] != -1 } {
+ append str \12
+ set laststr $str
+ }
+ close $oid
+ set data [sanitize_record $data]
+ error_check_good getlast $data $laststr
+
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good replace_last $ret 0
+
+ error_check_good curs_close [$dbc close] 0
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ error_check_good \
+ diff1($testdir/rsrc.txt,$testdir/check.txt) \
+ [filecmp $testdir/rsrc.txt $testdir/check.txt] 0
+
+ puts -nonewline "\tRsrc003.b: "
+ puts "Append some records in tree and verify in file."
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [chop_data -frecno [replicate \
+ "This is New Record $i" $repl]]
+ puts $oid $rec
+ append rec \12
+ incr key
+ set ret [eval {$db put} $txn {-append $rec}]
+ error_check_good put_append $ret $key
+ }
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ diff2($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts "\tRsrc003.c: Append by record number"
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [chop_data -frecno [replicate \
+ "New Record (set 2) $i" $repl]]
+ puts $oid $rec
+ append rec \12
+ incr key
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno $ret 0
+ }
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ diff3($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts \
+"\tRsrc003.d: Verify proper syncing of changes on close."
+ error_check_good Rsrc003:db_close [$db close] 0
+ set db [eval {berkdb_open -create -mode 0644 -recno \
+ -len $reclen -source $testdir/rsrc.txt} $testfile]
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [chop_data -frecno [replicate \
+ "New Record (set 3) $i" $repl]]
+ puts $oid $rec
+ append rec \12
+ set ret [eval {$db put} $txn {-append $rec}]
+ # Don't bother checking return;
+ # we don't know what
+ # the key number is, and we'll pick up a failure
+ # when we compare.
+ }
+ error_check_good Rsrc003:db_close [$db close] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ diff5($testdir/{rsrc,check}.txt) $ret 0
+ }
+ }
+ set fixed_len $orig_fixed_len
+ return
+}
diff --git a/libdb/test/rsrc004.tcl b/libdb/test/rsrc004.tcl
new file mode 100644
index 0000000..e7f26e1
--- /dev/null
+++ b/libdb/test/rsrc004.tcl
@@ -0,0 +1,52 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST rsrc004
+# TEST Recno backing file test for EOF-terminated records.
+proc rsrc004 { } {
+ source ./include.tcl
+
+ foreach isfixed { 0 1 } {
+ cleanup $testdir NULL
+
+ # Create the backing text file.
+ set oid1 [open $testdir/rsrc.txt w]
+ if { $isfixed == 1 } {
+ puts -nonewline $oid1 "record 1xxx"
+ puts -nonewline $oid1 "record 2xxx"
+ } else {
+ puts $oid1 "record 1xxx"
+ puts $oid1 "record 2xxx"
+ }
+ puts -nonewline $oid1 "record 3"
+ close $oid1
+
+ set args "-create -mode 0644 -recno -source $testdir/rsrc.txt"
+ if { $isfixed == 1 } {
+ append args " -len [string length "record 1xxx"]"
+ set match "record 3 "
+ puts "Rsrc004: EOF-terminated recs: fixed length"
+ } else {
+ puts "Rsrc004: EOF-terminated recs: variable length"
+ set match "record 3"
+ }
+
+ puts "\tRsrc004.a: Read file, verify correctness."
+ set db [eval berkdb_open $args "$testdir/rsrc004.db"]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record
+ set dbc [eval {$db cursor} ""]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -last]
+ error_check_good get_last $rec [list [list 3 $match]]
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+}
diff --git a/libdb/test/scr001/chk.code b/libdb/test/scr001/chk.code
new file mode 100644
index 0000000..df5a0fe
--- /dev/null
+++ b/libdb/test/scr001/chk.code
@@ -0,0 +1,37 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure that the code samples in the documents build.
+
+d=../..
+
+[ -d $d/docs_src ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+for i in `find $d/docs_src -name '*.cs'`; do
+ echo " compiling $i"
+ sed -e 's/m4_include(\(.*\))/#include <\1>/g' \
+ -e 's/m4_[a-z]*[(\[)]*//g' \
+ -e 's/(\[//g' \
+ -e '/argv/!s/])//g' \
+ -e 's/dnl//g' \
+ -e 's/__GT__/>/g' \
+ -e 's/__LB__/[/g' \
+ -e 's/__LT__/</g' \
+ -e 's/__RB__/]/g' < $i > t.c
+ if cc -Wall -Werror -I.. t.c ../libdb.a -o t; then
+ :
+ else
+ echo "FAIL: unable to compile $i"
+ exit 1
+ fi
+done
+
+exit 0
diff --git a/libdb/test/scr002/chk.def b/libdb/test/scr002/chk.def
new file mode 100644
index 0000000..3272e79
--- /dev/null
+++ b/libdb/test/scr002/chk.def
@@ -0,0 +1,64 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure we haven't forgotten to add any interfaces
+# to the Win32 libdb.def file.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+f=$d/build_win32/libdb.def
+t1=__1
+t2=__2
+
+exitv=0
+
+sed '/; /d' $f |
+ egrep @ |
+ awk '{print $1}' |
+ sed -e '/db_xa_switch/d' \
+ -e '/^__/d' -e '/^;/d' |
+ sort > $t1
+
+egrep __P $d/dbinc_auto/ext_prot.in |
+ sed '/^[a-z]/!d' |
+ awk '{print $2}' |
+ sed 's/^\*//' |
+ sed '/^__/d' | sort > $t2
+
+if cmp -s $t1 $t2 ; then
+ :
+else
+ echo "<<< libdb.def >>> DB include files"
+ diff $t1 $t2
+ echo "FAIL: missing items in libdb.def file."
+ exitv=1
+fi
+
+# Check to make sure we don't have any extras in the libdb.def file.
+sed '/; /d' $f |
+ egrep @ |
+ awk '{print $1}' |
+ sed -e '/__db_global_values/d' > $t1
+
+for i in `cat $t1`; do
+ if egrep $i $d/*/*.c > /dev/null; then
+ :
+ else
+ echo "$f: $i not found in DB sources"
+ fi
+done > $t2
+
+test -s $t2 && {
+ cat $t2
+ echo "FAIL: found unnecessary items in libdb.def file."
+ exitv=1
+}
+
+exit $exitv
diff --git a/libdb/test/scr003/chk.define b/libdb/test/scr003/chk.define
new file mode 100644
index 0000000..a17e6fa
--- /dev/null
+++ b/libdb/test/scr003/chk.define
@@ -0,0 +1,77 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure that all #defines are actually used.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+exitv=0
+t1=__1
+t2=__2
+t3=__3
+
+egrep '^#define' $d/dbinc/*.h $d/dbinc/*.in |
+ sed -e '/db_185.in/d' -e '/xa.h/d' |
+ awk '{print $2}' |
+ sed -e '/^B_DELETE/d' \
+ -e '/^B_MAX/d' \
+ -e '/^CIRCLEQ_/d' \
+ -e '/^DB_BTREEOLDVER/d' \
+ -e '/^DB_HASHOLDVER/d' \
+ -e '/^DB_LOCKVERSION/d' \
+ -e '/^DB_MAX_PAGES/d' \
+ -e '/^DB_QAMOLDVER/d' \
+ -e '/^DB_TXNVERSION/d' \
+ -e '/^DB_UNUSED/d' \
+ -e '/^DEFINE_DB_CLASS/d' \
+ -e '/^HASH_UNUSED/d' \
+ -e '/^LIST_/d' \
+ -e '/^LOG_OP/d' \
+ -e '/^MINFILL/d' \
+ -e '/^MUTEX_FIELDS/d' \
+ -e '/^NCACHED2X/d' \
+ -e '/^NCACHED30/d' \
+ -e '/^PAIR_MASK/d' \
+ -e '/^P_16_COPY/d' \
+ -e '/^P_32_COPY/d' \
+ -e '/^P_32_SWAP/d' \
+ -e '/^P_TO_UINT16/d' \
+ -e '/^QPAGE_CHKSUM/d' \
+ -e '/^QPAGE_NORMAL/d' \
+ -e '/^QPAGE_SEC/d' \
+ -e '/^SH_CIRCLEQ_/d' \
+ -e '/^SH_LIST_/d' \
+ -e '/^SH_TAILQ_/d' \
+ -e '/^SIZEOF_PAGE/d' \
+ -e '/^TAILQ_/d' \
+ -e '/^WRAPPED_CLASS/d' \
+ -e '/^__BIT_TYPES_DEFINED__/d' \
+ -e '/^__DBC_INTERNAL/d' \
+ -e '/^i_/d' \
+ -e '/_H_/d' \
+ -e 's/(.*//' | sort > $t1
+
+find $d -name '*.c' -o -name '*.cpp' > $t2
+for i in `cat $t1`; do
+ if egrep -w $i `cat $t2` > /dev/null; then
+ :;
+ else
+ f=`egrep -l "#define.*$i" $d/dbinc/*.h $d/dbinc/*.in |
+ sed 's;\.\.\/\.\.\/dbinc/;;' | tr -s "[:space:]" " "`
+ echo "FAIL: $i: $f"
+ fi
+done | sort -k 2 > $t3
+
+test -s $t3 && {
+ cat $t3
+ echo "FAIL: found unused #defines"
+ exit 1
+}
+
+exit $exitv
diff --git a/libdb/test/scr004/chk.javafiles b/libdb/test/scr004/chk.javafiles
new file mode 100644
index 0000000..3982125
--- /dev/null
+++ b/libdb/test/scr004/chk.javafiles
@@ -0,0 +1,31 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure we haven't forgotten to add any Java files to the list
+# of source files in the Makefile.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+f=$d/dist/Makefile.in
+j=$d/java/src/com/sleepycat
+
+t1=__1
+t2=__2
+
+find $j/db/ $j/examples $d/rpc_server/java -name \*.java -print |
+ sed -e 's/^.*\///' | sort > $t1
+tr ' \t' '\n' < $f | sed -e '/\.java$/!d' -e 's/^.*\///' | sort > $t2
+
+cmp $t1 $t2 > /dev/null || {
+ echo "<<< java source files >>> Makefile"
+ diff $t1 $t2
+ exit 1
+}
+
+exit 0
diff --git a/libdb/test/scr005/chk.nl b/libdb/test/scr005/chk.nl
new file mode 100644
index 0000000..332dd39
--- /dev/null
+++ b/libdb/test/scr005/chk.nl
@@ -0,0 +1,112 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure that there are no trailing newlines in __db_err calls.
+
+d=../..
+
+[ -f $d/README ] || {
+ echo "FAIL: chk.nl can't find the source directory."
+ exit 1
+}
+
+cat << END_OF_CODE > t.c
+#include <sys/types.h>
+
+#include <errno.h>
+#include <stdio.h>
+
+int chk(FILE *, char *);
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ FILE *fp;
+ int exitv;
+
+ for (exitv = 0; *++argv != NULL;) {
+ if ((fp = fopen(*argv, "r")) == NULL) {
+ fprintf(stderr, "%s: %s\n", *argv, strerror(errno));
+ return (1);
+ }
+ if (chk(fp, *argv))
+ exitv = 1;
+ (void)fclose(fp);
+ }
+ return (exitv);
+}
+
+int
+chk(fp, name)
+ FILE *fp;
+ char *name;
+{
+ int ch, exitv, line, q;
+
+ exitv = 0;
+ for (ch = 'a', line = 1;;) {
+ if ((ch = getc(fp)) == EOF)
+ return (exitv);
+ if (ch == '\n') {
+ ++line;
+ continue;
+ }
+ if (ch != '_') continue;
+ if ((ch = getc(fp)) != '_') continue;
+ if ((ch = getc(fp)) != 'd') continue;
+ if ((ch = getc(fp)) != 'b') continue;
+ if ((ch = getc(fp)) != '_') continue;
+ if ((ch = getc(fp)) != 'e') continue;
+ if ((ch = getc(fp)) != 'r') continue;
+ if ((ch = getc(fp)) != 'r') continue;
+ while ((ch = getc(fp)) != '"') {
+ if (ch == EOF)
+ return (exitv);
+ if (ch == '\n')
+ ++line;
+ }
+ while ((ch = getc(fp)) != '"')
+ switch (ch) {
+ case EOF:
+ return (exitv);
+ case '\\n':
+ ++line;
+ break;
+ case '.':
+ if ((ch = getc(fp)) != '"')
+ ungetc(ch, fp);
+ else {
+ fprintf(stderr,
+ "%s: <period> at line %d\n", name, line);
+ exitv = 1;
+ }
+ break;
+ case '\\\\':
+ if ((ch = getc(fp)) != 'n')
+ ungetc(ch, fp);
+ else if ((ch = getc(fp)) != '"')
+ ungetc(ch, fp);
+ else {
+ fprintf(stderr,
+ "%s: <newline> at line %d\n", name, line);
+ exitv = 1;
+ }
+ break;
+ }
+ }
+ return (exitv);
+}
+END_OF_CODE
+
+cc t.c -o t
+if ./t $d/*/*.[ch] $d/*/*.cpp $d/*/*.in ; then
+ :
+else
+ echo "FAIL: found __db_err calls ending with periods/newlines."
+ exit 1
+fi
+
+exit 0
diff --git a/libdb/test/scr006/chk.offt b/libdb/test/scr006/chk.offt
new file mode 100644
index 0000000..90aff18
--- /dev/null
+++ b/libdb/test/scr006/chk.offt
@@ -0,0 +1,36 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Make sure that no off_t's have snuck into the release.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t=__1
+
+egrep -w off_t $d/*/*.[ch] $d/*/*.in |
+sed -e "/#undef off_t/d" \
+ -e "/mp_fopen.c:.*can't use off_t's here/d" \
+ -e "/mp_fopen.c:.*size or type off_t's or/d" \
+ -e "/mp_fopen.c:.*where an off_t is 32-bits/d" \
+ -e "/mutex\/tm.c:/d" \
+ -e "/os_map.c:.*(off_t)0))/d" \
+ -e "/os_rw.c:.*(off_t)db_iop->pgno/d" \
+ -e "/os_seek.c:.*off_t offset;/d" \
+ -e "/os_seek.c:.*offset = /d" \
+ -e "/test_perf\/perf_misc.c:/d" \
+ -e "/test_server\/dbs.c:/d" \
+ -e "/test_vxworks\/vx_mutex.c:/d" > $t
+
+test -s $t && {
+ cat $t
+ echo "FAIL: found questionable off_t usage"
+ exit 1
+}
+
+exit 0
diff --git a/libdb/test/scr007/chk.proto b/libdb/test/scr007/chk.proto
new file mode 100644
index 0000000..ec1b402
--- /dev/null
+++ b/libdb/test/scr007/chk.proto
@@ -0,0 +1,45 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure that prototypes are actually needed.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+t3=__3
+
+egrep '__P' $d/dbinc_auto/*.h |
+ sed -e 's/[ ][ ]*__P.*//' \
+ -e 's/^.*[ *]//' \
+ -e '/__db_cprint/d' \
+ -e '/__db_lprint/d' \
+ -e '/__db_noop_log/d' \
+ -e '/__db_prnpage/d' \
+ -e '/__db_txnlist_print/d' \
+ -e '/__db_util_arg/d' \
+ -e '/__ham_func2/d' \
+ -e '/__ham_func3/d' \
+ -e '/_getpgnos/d' \
+ -e '/_print$/d' \
+ -e '/_read$/d' > $t1
+
+find $d -name '*.in' -o -name '*.[ch]' -o -name '*.cpp' > $t2
+for i in `cat $t1`; do
+ c=$(egrep -low $i $(cat $t2) | wc -l)
+ echo "$i: $c"
+done | egrep ' 1$' > $t3
+
+test -s $t3 && {
+ cat $t3
+ echo "FAIL: found unnecessary prototypes."
+ exit 1
+}
+
+exit 0
diff --git a/libdb/test/scr008/chk.pubdef b/libdb/test/scr008/chk.pubdef
new file mode 100644
index 0000000..4f59e83
--- /dev/null
+++ b/libdb/test/scr008/chk.pubdef
@@ -0,0 +1,179 @@
+#!/bin/sh -
+#
+# Reconcile the list of public defines with the man pages and the Java files.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+p=$d/dist/pubdef.in
+
+exitv=0
+
+# Check that pubdef.in has everything listed in m4.links.
+f=$d/docs_src/m4/m4.links
+sed -n \
+ -e 's/^\$1, \(DB_[^,]*\).*/\1/p' \
+ -e d < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that m4.links has everything listed in pubdef.in.
+f=$d/docs_src/m4/m4.links
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "^.1, $name" $f > /dev/null`; then
+ [ "X$isdoc" != "XD" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isdoc" = "XD" ] && {
+ echo "$name does not appear in $f"
+ exitv=1;
+ }
+ fi
+done
+
+# Check that pubdef.in has everything listed in db.in.
+f=$d/dbinc/db.in
+sed -n \
+ -e 's/^#define[ ]*\(DB_[A-Z_0-9]*\).*/\1/p' \
+ -e 's/^[ ]*\(DB_[A-Z_]*\)=[0-9].*/\1/p' \
+ -e d < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that db.in has everything listed in pubdef.in.
+f=$d/dbinc/db.in
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "#define[ ]$name|[ ][ ]*$name=[0-9][0-9]*" \
+ $f > /dev/null`; then
+ [ "X$isinc" != "XI" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isinc" = "XI" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+
+# Check that pubdef.in has everything listed in DbConstants.java.
+f=$d/java/src/com/sleepycat/db/DbConstants.java
+sed -n -e 's/.*static final int[ ]*\([^ ]*\).*/\1/p' < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that DbConstants.java has everything listed in pubdef.in.
+f=$d/java/src/com/sleepycat/db/DbConstants.java
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "static final int[ ]$name =" $f > /dev/null`; then
+ [ "X$isjava" != "XJ" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XJ" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+
+# Check that pubdef.in has everything listed in Db.java.
+f=$d/java/src/com/sleepycat/db/Db.java
+sed -n -e 's/.*static final int[ ]*\([^ ;]*\).*/\1/p' < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1;
+ fi
+done
+sed -n -e 's/^[ ]*\([^ ]*\) = DbConstants\..*/\1/p' < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that Db.java has all of the Java case values listed in pubdef.in.
+# Any J entries should appear twice -- once as a static final int, with
+# no initialization value, and once assigned to the DbConstants value. Any
+# C entries should appear once as a static final int, with an initialization
+# value.
+f=$d/java/src/com/sleepycat/db/Db.java
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "static final int[ ]$name;$" $f > /dev/null`; then
+ [ "X$isjava" != "XJ" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XJ" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "= DbConstants.$name;" $f > /dev/null`; then
+ [ "X$isjava" != "XJ" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XJ" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep "static final int[ ]$name =.*;" $f > /dev/null`; then
+ [ "X$isjava" != "XC" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XC" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+
+exit $exitv
diff --git a/libdb/test/scr009/chk.srcfiles b/libdb/test/scr009/chk.srcfiles
new file mode 100644
index 0000000..6218f30
--- /dev/null
+++ b/libdb/test/scr009/chk.srcfiles
@@ -0,0 +1,39 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure we haven't forgotten to add any files to the list
+# of source files Win32 uses to build its dsp files.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+f=$d/dist/srcfiles.in
+t1=__1
+t2=__2
+
+sed -e '/^[ #]/d' \
+ -e '/^$/d' < $f |
+ awk '{print $1}' > $t1
+find $d -type f |
+ sed -e 's/^\.\.\/\.\.\///' \
+ -e '/^build[^_]/d' \
+ -e '/^test\//d' \
+ -e '/^test_server/d' \
+ -e '/^test_thread/d' \
+ -e '/^test_vxworks/d' |
+ egrep '\.c$|\.cpp$|\.def$|\.rc$' |
+ sed -e '/perl.DB_File\/version.c/d' |
+ sort > $t2
+
+cmp $t1 $t2 > /dev/null || {
+ echo "<<< srcfiles.in >>> existing files"
+ diff $t1 $t2
+ exit 1
+}
+
+exit 0
diff --git a/libdb/test/scr010/chk.str b/libdb/test/scr010/chk.str
new file mode 100644
index 0000000..4d6ba96
--- /dev/null
+++ b/libdb/test/scr010/chk.str
@@ -0,0 +1,31 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check spelling in quoted strings.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__t1
+
+sed -e '/^#include/d' \
+ -e '/revid/d' \
+ -e '/"/!d' \
+ -e 's/^[^"]*//' \
+ -e 's/%s/ /g' \
+ -e 's/[^"]*$//' \
+ -e 's/\\[nt]/ /g' $d/*/*.c $d/*/*.cpp |
+spell | sort | comm -23 /dev/stdin spell.ok > $t1
+
+test -s $t1 && {
+ cat $t1
+ echo "FAIL: found questionable spelling in strings."
+ exit 1
+}
+
+exit 0
diff --git a/libdb/test/scr010/spell.ok b/libdb/test/scr010/spell.ok
new file mode 100644
index 0000000..18af8d1
--- /dev/null
+++ b/libdb/test/scr010/spell.ok
@@ -0,0 +1,825 @@
+AES
+AJVX
+ALLDB
+API
+APP
+AccessExample
+Acflmo
+Aclmop
+Ahlm
+Ahm
+BCFILprRsvVxX
+BCc
+BDBXXXXXX
+BH
+BI
+BII
+BINTERNAL
+BTREE
+Bc
+BerkeleyDB
+BtRecExample
+Btree
+CD
+CDB
+CDS
+CDdFILTVvX
+CFILpRsv
+CFLprsvVxX
+CFh
+CHKSUM
+CLpsvxX
+CONFIG
+CdFILTvX
+ClassNotFoundException
+Config
+DBC
+DBENV
+DBP
+DBS
+DBSDIR
+DBT
+DBTYPE
+DBcursor
+DONOTINDEX
+DS
+DUP
+DUPMASTER
+DUPSORT
+Db
+DbAppendRecno
+DbAttachImpl
+DbBtreeCompare
+DbBtreePrefix
+DbBtreeStat
+DbDeadlockException
+DbDupCompare
+DbEnv
+DbEnvFeedback
+DbErrcall
+DbException
+DbFeedback
+DbHash
+DbHashStat
+DbKeyRange
+DbLock
+DbLockNotGrantedException
+DbLockRequest
+DbLockStat
+DbLogStat
+DbLogc
+DbLsn
+DbMemoryException
+DbMpoolFStat
+DbMpoolFile
+DbMpoolStat
+DbPreplist
+DbQueueStat
+DbRecoveryInit
+DbRepStat
+DbRepTransport
+DbRunRecoveryException
+DbSecondaryKeyCreate
+DbTxn
+DbTxnRecover
+DbTxnStat
+DbUtil
+DbXAResource
+DbXid
+Dbc
+Dbt
+Dde
+Deref'ing
+EIO
+EIRT
+EIi
+ENV
+EnvExample
+EnvInfoDelete
+Exp
+FIXEDLEN
+Fd
+Ff
+Fh
+FileNotFoundException
+GetFileInformationByHandle
+GetJavaVM
+GetJoin
+HOFFSET
+HOLDELECTION
+Hashtable
+ILo
+ILprR
+INDX
+INIT
+IREAD
+ISSET
+IWR
+IWRITE
+Ik
+KEYEMPTY
+KEYEXIST
+KeyRange
+LBTREE
+LOCKDOWN
+LOGC
+LRECNO
+LRU
+LSN
+Lcom
+Ljava
+Ll
+LockExample
+LogRegister
+LpRsS
+LprRsS
+MEM
+MMDDhhmm
+MPOOL
+MPOOLFILE
+MapViewOfFile
+Maxid
+Mb
+Mbytes
+Metadata
+Metapage
+Mpool
+MpoolExample
+Mutex
+NEWMASTER
+NEWSITE
+NG
+NODUP
+NODUPDATA
+NOLOCKING
+NOMMAP
+NOMORE
+NOORDERCHK
+NOPANIC
+NOSERVER
+NOSYNC
+NOTFOUND
+NOTGRANTED
+NOTYPE
+NOWAIT
+NP
+NoP
+NoqV
+NqV
+NrV
+NsV
+OLDVERSION
+ORDERCHKONLY
+Offpage
+OpenFileMapping
+OutputStream
+PGNO
+PID
+PREV
+Pgno
+RECNO
+RECNOSYNC
+RECNUM
+RINTERNAL
+RMW
+RPC
+RT
+RUNRECOVERY
+Recno
+RepElectResult
+RepProcessMessage
+SERVERPROG
+SERVERVERS
+SETFD
+SHA
+SS
+Shm
+Sleepycat
+Subdatabase
+TDS
+TESTDIR
+TID
+TMP
+TMPDIR
+TODO
+TPS
+TXN
+TXNID
+TXNs
+Tcl
+TempFolder
+TestKeyRange
+TestLogc
+TpcbExample
+Tt
+Txn
+Txnid
+Txns
+UID
+UNAVAIL
+USERMEM
+Unencrypted
+UnmapViewOfFile
+VM
+VX
+Vv
+VvW
+VvXxZ
+Vvw
+Vx
+VxWorks
+Waitsfor
+XA
+XAException
+Xid
+XxZ
+YIELDCPU
+YY
+abc
+abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq
+abcdef
+abs
+addpage
+addr
+addrem
+adj
+afterop
+ahr
+alldb
+alloc
+alsVv
+amx
+anum
+appl
+appname
+archivedir
+arg
+args
+ata
+badkey
+berkdb
+berkeley
+bfname
+bfree
+bigpages
+bnum
+bostic
+bqual
+bsize
+bt
+btcompare
+btrec
+btree
+buf
+bylsn
+bypage
+byteswap
+byteswapped
+bytevalue
+cachesize
+cadjust
+callpgin
+cd
+cdb
+cdel
+ceVv
+ceh
+celmNrtVZ
+celmNtV
+celmNtVZ
+cget
+charkey
+charset
+chgpg
+chkpoint
+chkpt
+chksum
+ckp
+cksum
+clearerr
+clientrun
+cmdargs
+cnt
+compareproc
+compat
+conf
+config
+copypage
+cp
+crdel
+creat
+curadj
+curlsn
+datalen
+db
+dbc
+dbclient
+dbclose
+dbe
+dbenv
+dbkill
+dbm
+dbmclose
+dbminit
+dbobj
+dbopen
+dbp
+dbreg
+dbremove
+dbrename
+dbs
+dbt
+dbtruncate
+dbverify
+dd
+def
+del
+delext
+delim
+dev
+df
+dh
+dir
+dirfno
+dist
+dists
+dlen
+ds
+dsize
+dup
+dup'ed
+dupcompare
+dups
+dupset
+dupsort
+efh
+eid
+electinit
+electsend
+electvote
+electwait
+encryptaes
+encryptany
+endian
+env
+envid
+envremove
+eof
+errcall
+errfile
+errno
+errpfx
+excl
+extentsize
+faststat
+fclose
+fcntl
+fcreate
+fd
+ff
+ffactor
+fget
+fh
+fid
+fileid
+fileopen
+firstkey
+fiv
+flushcommit
+foo
+fopen
+formatID
+fput
+freelist
+fset
+fstat
+fsync
+ftype
+func
+fv
+gbytes
+gc'ed
+gen
+getBranchQualifier
+getFormatId
+getGlobalTransactionId
+gettime
+gettimeofday
+gettype
+getval
+gid
+groupalloc
+gtrid
+hashproc
+hcreate
+hdestroy
+hdr
+hostname
+hsearch
+icursor
+idletimeout
+ids
+idup
+iitem
+inc
+incfirst
+indx
+init
+inlen
+inp
+insdel
+int
+intValue
+io
+iread
+isdeleted
+itemorder
+iter
+iwr
+iwrite
+javax
+kb
+kbyte
+kbytes
+keyfirst
+keygroup
+keygroups
+keygrp
+keylast
+keyrange
+killinterval
+killiteration
+killtest
+klNpP
+klNprRV
+klNprRs
+krinsky
+lM
+lP
+lang
+lastid
+ld
+len
+lf
+lg
+libdb
+lk
+llsn
+localhost
+localtime
+lockid
+logc
+logclean
+logfile
+logflush
+logsonly
+lorder
+lpgno
+lsVv
+lsn
+lsynch
+lt
+lu
+luB
+luGB
+luKB
+luKb
+luM
+luMB
+luMb
+lx
+mNP
+mNs
+machid
+makedup
+malloc
+margo
+maxcommitperflush
+maxkey
+maxlockers
+maxlocks
+maxnactive
+maxnlockers
+maxnlocks
+maxnobjects
+maxobjects
+maxops
+maxtimeout
+maxtxns
+mbytes
+mem
+memp
+metadata
+metaflags
+metagroup
+metalsn
+metapage
+metasub
+methodID
+mincommitperflush
+minkey
+minlocks
+minwrite
+minwrites
+mis
+mjc
+mkdir
+mlock
+mmap
+mmapped
+mmapsize
+mmetalsn
+mmpgno
+mp
+mpf
+mpgno
+mpool
+msg
+munmap
+mutex
+mutexes
+mutexlocks
+mv
+mvptr
+mydrive
+mydrivexxx
+nO
+nP
+nTV
+nTt
+naborts
+nactive
+nbegins
+nbytes
+ncaches
+ncommits
+nconflicts
+ndata
+ndbm
+ndeadlocks
+ndx
+needswap
+nelem
+nevict
+newalloc
+newclient
+newfile
+newitem
+newmaster
+newname
+newpage
+newpgno
+newsite
+nextdup
+nextkey
+nextlsn
+nextnodup
+nextpgno
+ng
+nitems
+nkeys
+nlockers
+nlocks
+nlsn
+nmodes
+nnext
+nnextlsn
+nnowaits
+nobjects
+nodup
+nodupdata
+nogrant
+nolocking
+nommap
+noop
+nooverwrite
+nopanic
+nosort
+nosync
+notfound
+notgranted
+nowait
+nowaits
+npages
+npgno
+nrec
+nrecords
+nreleases
+nrequests
+nrestores
+nsites
+ntasks
+nthreads
+num
+numdup
+obj
+offpage
+ok
+olddata
+olditem
+oldname
+opd
+opflags
+opmods
+orig
+os
+osynch
+outlen
+ovfl
+ovflpoint
+ovflsize
+ovref
+pageimage
+pagelsn
+pageno
+pagesize
+pagesizes
+pagfno
+panic'ing
+paniccall
+panicstate
+parentid
+passwd
+perf
+perfdb
+pflag
+pg
+pgcookie
+pgdbt
+pget
+pgfree
+pgin
+pgno
+pgnum
+pgout
+pgsize
+pid
+pkey
+plist
+pn
+postdestroy
+postlog
+postlogmeta
+postopen
+postsync
+prR
+prec
+predestroy
+preopen
+prev
+prevlsn
+prevnodup
+prheader
+pri
+printlog
+proc
+procs
+pthread
+pthreads
+ptype
+pv
+qV
+qam
+qs
+qtest
+rRV
+rRs
+rV
+rand
+rcuradj
+rdonly
+readd
+readonly
+realloc
+rec
+reclength
+recno
+recnum
+recnums
+recs
+refcount
+regionmax
+regop
+regsize
+relink
+repl
+revsplitoff
+rf
+rkey
+rlsn
+rm
+rmid
+rmw
+ro
+rootent
+rootlsn
+rpc
+rpcid
+rs
+rsplit
+runlog
+rw
+rwrw
+rwrwrw
+sS
+sV
+sVv
+scount
+secon
+secs
+sendproc
+seq
+setto
+setval
+sh
+shalloc
+shm
+shmat
+shmctl
+shmdt
+shmem
+shmget
+shr
+sleepycat
+splitdata
+splitmeta
+srand
+stat
+str
+strcmp
+strdup
+strerror
+strlen
+subdatabase
+subdb
+sv
+svc
+tV
+tVZ
+tas
+tcl
+tcp
+thr
+threadID
+tid
+tiebreaker
+timestamp
+tlen
+tm
+tmp
+tmpdir
+tmutex
+tnum
+tp
+tpcb
+treeorder
+ttpcbddlk
+ttpcbi
+ttpcbr
+ttype
+tx
+txn
+txnarray
+txnid
+txns
+txt
+ubell
+ud
+uid
+ulen
+uncorrect
+undeleting
+unmap
+unpinned
+upd
+upi
+usec
+usecs
+usr
+util
+vVxXZ
+vZ
+val
+var
+vec
+ver
+vflag
+vrfy
+vw
+vx
+vxmutex
+vxtmp
+waitsfor
+walkdupint
+walkpages
+wb
+wc
+wcount
+wordlist
+writeable
+wrnosync
+wt
+xa
+xid
+xxx
+yieldcpu
diff --git a/libdb/test/scr011/chk.tags b/libdb/test/scr011/chk.tags
new file mode 100644
index 0000000..181dc1d
--- /dev/null
+++ b/libdb/test/scr011/chk.tags
@@ -0,0 +1,41 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure we don't need any more symbolic links to tags files.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+(cd $d && ls -F | egrep / | sort |
+ sed -e 's/\///' \
+ -e '/^CVS$/d' \
+ -e '/^build_vxworks$/d' \
+ -e '/^build_win32$/d' \
+ -e '/^docs$/d' \
+ -e '/^docs_book$/d' \
+ -e '/^docs_src$/d' \
+ -e '/^java$/d' \
+ -e '/^perl$/d' \
+ -e '/^test$/d' \
+ -e '/^test_cxx$/d' \
+ -e '/^test_purify$/d' \
+ -e '/^test_thread$/d' \
+ -e '/^test_vxworks$/d') > $t1
+
+(cd $d && ls */tags | sed 's/\/tags$//' | sort) > $t2
+if diff $t1 $t2 > /dev/null; then
+ exit 0
+else
+ echo "<<< source tree >>> tags files"
+ diff $t1 $t2
+ exit 1
+fi
diff --git a/libdb/test/scr012/chk.vx_code b/libdb/test/scr012/chk.vx_code
new file mode 100644
index 0000000..8b73723
--- /dev/null
+++ b/libdb/test/scr012/chk.vx_code
@@ -0,0 +1,68 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure the auto-generated utility code in the VxWorks build
+# directory compiles.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+rm -f t.c t1.c t2.c
+
+header()
+{
+ echo "int"
+ echo "main(int argc, char *argv[])"
+ echo "{return ($1(argv[1]));}"
+}
+
+(echo "int"
+ echo "main(int argc, char *argv[])"
+ echo "{"
+ echo "int i;") > t1.c
+
+for i in db_archive db_checkpoint db_deadlock db_dump db_load \
+ db_printlog db_recover db_stat db_upgrade db_verify dbdemo; do
+ echo " compiling build_vxworks/$i"
+ (cat $d/build_vxworks/$i/$i.c; header $i) > t.c
+ if cc -Wall -I.. -I$d t.c \
+ $d/clib/getopt.c \
+ $d/common/util_arg.c \
+ $d/common/util_cache.c \
+ $d/common/util_log.c \
+ $d/common/util_sig.c ../libdb.a -o t; then
+ :
+ else
+ echo "FAIL: unable to compile $i"
+ exit 1
+ fi
+
+ cat $d/build_vxworks/$i/$i.c >> t2.c
+ echo "i = $i(argv[1]);" >> t1.c
+done
+
+(cat t2.c t1.c; echo "return (0); }") > t.c
+
+echo " compiling build_vxworks utility composite"
+if cc -Dlint -Wall -I.. -I$d t.c \
+ $d/clib/getopt.c \
+ $d/common/util_arg.c \
+ $d/common/util_cache.c \
+ $d/common/util_log.c \
+ $d/common/util_sig.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile utility composite"
+ exit 1
+fi
+
+exit 0
diff --git a/libdb/test/scr013/chk.stats b/libdb/test/scr013/chk.stats
new file mode 100644
index 0000000..8f8aa8d
--- /dev/null
+++ b/libdb/test/scr013/chk.stats
@@ -0,0 +1,114 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure all of the stat structure members are included in
+# all of the possible formats.
+
+# Top-level directory.
+d=../..
+
+# Path names are from a top-level directory.
+[ -f $d/README ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+exitv=0
+t=__tmp
+
+# Extract the field names for a structure from the db.h file.
+inc_fields()
+{
+ sed -e "/struct $1 {/,/^};$/p" \
+ -e d < $d/dbinc/db.in |
+ sed -e 1d \
+ -e '$d' \
+ -e '/;/!d' \
+ -e 's/;.*//' \
+ -e 's/^[ ].*[ \*]//'
+}
+
+cat << END_OF_IGNORE > IGNORE
+bt_maxkey
+bt_metaflags
+hash_metaflags
+qs_metaflags
+qs_ndata
+END_OF_IGNORE
+
+# Check to make sure the elements of a structure from db.h appear in
+# the other files.
+inc()
+{
+ for i in `inc_fields $1`; do
+ if egrep -w $i IGNORE > /dev/null; then
+ echo " $1: ignoring $i"
+ continue
+ fi
+ for j in $2; do
+ if egrep -w $i $d/$j > /dev/null; then
+ :;
+ else
+ echo " $1: $i not found in $j."
+ exitv=1
+ fi
+ done
+ done
+}
+
+inc "__db_bt_stat" \
+ "tcl/tcl_db.c db_stat/db_stat.c docs_src/db/db_stat.so"
+inc "__db_h_stat" \
+ "tcl/tcl_db.c db_stat/db_stat.c docs_src/db/db_stat.so"
+inc "__db_qam_stat" \
+ "tcl/tcl_db.c db_stat/db_stat.c docs_src/db/db_stat.so"
+inc __db_lock_stat \
+ "tcl/tcl_lock.c db_stat/db_stat.c docs_src/lock/lock_stat.so"
+inc __db_log_stat \
+ "tcl/tcl_log.c db_stat/db_stat.c docs_src/log/log_stat.so"
+inc __db_mpool_stat \
+ "tcl/tcl_mp.c db_stat/db_stat.c docs_src/memp/memp_stat.so"
+inc __db_txn_stat \
+ "tcl/tcl_txn.c db_stat/db_stat.c docs_src/txn/txn_stat.so"
+
+# Check to make sure the elements from a man page appears in db.in.
+man()
+{
+ for i in `cat $t`; do
+ if egrep -w $i IGNORE > /dev/null; then
+ echo " $1: ignoring $i"
+ continue
+ fi
+ if egrep -w $i $d/dbinc/db.in > /dev/null; then
+ :;
+ else
+ echo " $1: $i not found in db.h."
+ exitv=1
+ fi
+ done
+}
+
+sed -e '/m4_stat(/!d' \
+ -e 's/.*m4_stat(\([^)]*\)).*/\1/' < $d/docs_src/db/db_stat.so > $t
+man "checking db_stat.so against db.h"
+
+sed -e '/m4_stat(/!d' \
+ -e 's/.*m4_stat(\([^)]*\)).*/\1/' \
+ -e 's/.* //' < $d/docs_src/lock/lock_stat.so > $t
+man "checking lock_stat.so against db.h"
+
+sed -e '/m4_stat[12](/!d' \
+ -e 's/.*m4_stat[12](\([^)]*\)).*/\1/' < $d/docs_src/log/log_stat.so > $t
+man "checking log_stat.so against db.h"
+
+sed -e '/m4_stat[123](/!d' \
+ -e 's/.*m4_stat[123](\([^)]*\)).*/\1/' < $d/docs_src/memp/memp_stat.so > $t
+man "checking memp_stat.so against db.h"
+
+sed -e '/m4_stat(/!d' \
+ -e 's/.*m4_stat(.*, \([^)]*\)).*/\1/' \
+ -e 's/__[LR]B__//g' < $d/docs_src/txn/txn_stat.so > $t
+man "checking txn_stat.so against db.h"
+
+exit $exitv
diff --git a/libdb/test/scr014/chk.err b/libdb/test/scr014/chk.err
new file mode 100644
index 0000000..1e09b27
--- /dev/null
+++ b/libdb/test/scr014/chk.err
@@ -0,0 +1,34 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure all of the error values have corresponding error
+# message strings in db_strerror().
+
+# Top-level directory.
+d=../..
+
+# Path names are from a top-level directory.
+[ -f $d/README ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__tmp1
+t2=__tmp2
+
+egrep -- "define.*DB_.*-309" $d/dbinc/db.in | awk '{print $2}' > $t1
+sed -e '/^db_strerror/,/^}/{' \
+ -e '/ case DB_/{' \
+ -e 's/:.*//' \
+ -e 's/.* //' \
+ -e p \
+ -e '}' \
+ -e '}' \
+ -e d \
+ < $d/common/db_err.c > $t2
+
+cmp $t1 $t2 > /dev/null ||
+(echo "<<< db.h >>> db_strerror" && diff $t1 $t2 && exit 1)
+
+exit 0
diff --git a/libdb/test/scr015/README b/libdb/test/scr015/README
new file mode 100644
index 0000000..8671eb9
--- /dev/null
+++ b/libdb/test/scr015/README
@@ -0,0 +1,36 @@
+# $Id$
+
+Use the scripts testall or testone to run all, or just one of the C++
+tests. You must be in this directory to run them. For example,
+
+ $ export LIBS="-L/usr/include/BerkeleyDB/lib"
+ $ export CXXFLAGS="-I/usr/include/BerkeleyDB/include"
+ $ export LD_LIBRARY_PATH="/usr/include/BerkeleyDB/lib"
+ $ ./testone TestAppendRecno
+ $ ./testall
+
+The scripts will use c++ in your path. Set environment variables $CXX
+to override this. It will also honor any $CXXFLAGS and $LIBS
+variables that are set, except that -c are silently removed from
+$CXXFLAGS (since we do the compilation in one step).
+
+To run successfully, you will probably need to set $LD_LIBRARY_PATH
+to be the directory containing libdb_cxx-X.Y.so
+
+As an alternative, use the --prefix=<DIR> option, a la configure
+to set the top of the BerkeleyDB install directory. This forces
+the proper options to be added to $LIBS, $CXXFLAGS $LD_LIBRARY_PATH.
+For example,
+
+ $ ./testone --prefix=/usr/include/BerkeleyDB TestAppendRecno
+ $ ./testall --prefix=/usr/include/BerkeleyDB
+
+The test framework is pretty simple. Any <name>.cpp file in this
+directory that is not mentioned in the 'ignore' file represents a
+test. If the test is not compiled successfully, the compiler output
+is left in <name>.compileout . Otherwise, the java program is run in
+a clean subdirectory using as input <name>.testin, or if that doesn't
+exist, /dev/null. Output and error from the test run are put into
+<name>.out, <name>.err . If <name>.testout, <name>.testerr exist,
+they are used as reference files and any differences are reported.
+If either of the reference files does not exist, /dev/null is used.
diff --git a/libdb/test/scr015/TestConstruct01.cpp b/libdb/test/scr015/TestConstruct01.cpp
new file mode 100644
index 0000000..bb9bbd9
--- /dev/null
+++ b/libdb/test/scr015/TestConstruct01.cpp
@@ -0,0 +1,330 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <iostream.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _MSC_VER
+#include <unistd.h>
+#endif
+#endif
+
+#include <iomanip.h>
+#include <db_cxx.h>
+
+#define ERR(a) \
+ do { \
+ cout << "FAIL: " << (a) << "\n"; sysexit(1); \
+ } while (0)
+
+#define ERR2(a1,a2) \
+ do { \
+ cout << "FAIL: " << (a1) << ": " << (a2) << "\n"; sysexit(1); \
+ } while (0)
+
+#define ERR3(a1,a2,a3) \
+ do { \
+ cout << "FAIL: " << (a1) << ": " << (a2) << ": " << (a3) << "\n"; sysexit(1); \
+ } while (0)
+
+#define CHK(a) \
+ do { \
+ int _ret; \
+ if ((_ret = (a)) != 0) { \
+ ERR3("DB function " #a " has bad return", _ret, DbEnv::strerror(_ret)); \
+ } \
+ } while (0)
+
+#ifdef VERBOSE
+#define DEBUGOUT(a) cout << a << "\n"
+#else
+#define DEBUGOUT(a)
+#endif
+
+#define CONSTRUCT01_DBNAME "construct01.db"
+#define CONSTRUCT01_DBDIR "."
+#define CONSTRUCT01_DBFULLPATH (CONSTRUCT01_DBDIR "/" CONSTRUCT01_DBNAME)
+
+int itemcount; // count the number of items in the database
+
+// A good place to put a breakpoint...
+//
+void sysexit(int status)
+{
+ exit(status);
+}
+
+void check_file_removed(const char *name, int fatal)
+{
+ unlink(name);
+#if 0
+ if (access(name, 0) == 0) {
+ if (fatal)
+ cout << "FAIL: ";
+ cout << "File \"" << name << "\" still exists after run\n";
+ if (fatal)
+ sysexit(1);
+ }
+#endif
+}
+
+// Check that key/data for 0 - count-1 are already present,
+// and write a key/data for count. The key and data are
+// both "0123...N" where N == count-1.
+//
+// For some reason on Windows, we need to open using the full pathname
+// of the file when there is no environment, thus the 'has_env'
+// variable.
+//
+void rundb(Db *db, int count, int has_env)
+{
+ const char *name;
+
+ if (has_env)
+ name = CONSTRUCT01_DBNAME;
+ else
+ name = CONSTRUCT01_DBFULLPATH;
+
+ db->set_error_stream(&cerr);
+
+ // We don't really care about the pagesize, but we do want
+ // to make sure adjusting Db specific variables works before
+ // opening the db.
+ //
+ CHK(db->set_pagesize(1024));
+ CHK(db->open(NULL, name, NULL, DB_BTREE, count ? 0 : DB_CREATE, 0664));
+
+ // The bit map of keys we've seen
+ long bitmap = 0;
+
+ // The bit map of keys we expect to see
+ long expected = (1 << (count+1)) - 1;
+
+ char outbuf[10];
+ int i;
+ for (i=0; i<count; i++) {
+ outbuf[i] = '0' + i;
+ }
+ outbuf[i++] = '\0';
+ Dbt key(outbuf, i);
+ Dbt data(outbuf, i);
+
+ DEBUGOUT("Put: " << outbuf);
+ CHK(db->put(0, &key, &data, DB_NOOVERWRITE));
+
+ // Acquire a cursor for the table.
+ Dbc *dbcp;
+ CHK(db->cursor(NULL, &dbcp, 0));
+
+ // Walk through the table, checking
+ Dbt readkey;
+ Dbt readdata;
+ while (dbcp->get(&readkey, &readdata, DB_NEXT) == 0) {
+ char *key_string = (char *)readkey.get_data();
+ char *data_string = (char *)readdata.get_data();
+ DEBUGOUT("Got: " << key_string << ": " << data_string);
+ int len = strlen(key_string);
+ long bit = (1 << len);
+ if (len > count) {
+ ERR("reread length is bad");
+ }
+ else if (strcmp(data_string, key_string) != 0) {
+ ERR("key/data don't match");
+ }
+ else if ((bitmap & bit) != 0) {
+ ERR("key already seen");
+ }
+ else if ((expected & bit) == 0) {
+ ERR("key was not expected");
+ }
+ else {
+ bitmap |= bit;
+ expected &= ~(bit);
+ for (i=0; i<len; i++) {
+ if (key_string[i] != ('0' + i)) {
+ cout << " got " << key_string
+ << " (" << (int)key_string[i] << ")"
+ << ", wanted " << i
+ << " (" << (int)('0' + i) << ")"
+ << " at position " << i << "\n";
+ ERR("key is corrupt");
+ }
+ }
+ }
+ }
+ if (expected != 0) {
+ cout << " expected more keys, bitmap is: " << expected << "\n";
+ ERR("missing keys in database");
+ }
+ CHK(dbcp->close());
+ CHK(db->close(0));
+}
+
+void t1(int except_flag)
+{
+ cout << " Running test 1:\n";
+ Db db(0, except_flag);
+ rundb(&db, itemcount++, 0);
+ cout << " finished.\n";
+}
+
+void t2(int except_flag)
+{
+ cout << " Running test 2:\n";
+ Db db(0, except_flag);
+ rundb(&db, itemcount++, 0);
+ cout << " finished.\n";
+}
+
+void t3(int except_flag)
+{
+ cout << " Running test 3:\n";
+ Db db(0, except_flag);
+ rundb(&db, itemcount++, 0);
+ cout << " finished.\n";
+}
+
+void t4(int except_flag)
+{
+ cout << " Running test 4:\n";
+ DbEnv env(except_flag);
+ CHK(env.open(CONSTRUCT01_DBDIR, DB_CREATE | DB_INIT_MPOOL, 0));
+ Db db(&env, 0);
+ CHK(db.close(0));
+ CHK(env.close(0));
+ cout << " finished.\n";
+}
+
+void t5(int except_flag)
+{
+ cout << " Running test 5:\n";
+ DbEnv env(except_flag);
+ CHK(env.open(CONSTRUCT01_DBDIR, DB_CREATE | DB_INIT_MPOOL, 0));
+ Db db(&env, 0);
+ rundb(&db, itemcount++, 1);
+ // Note we cannot reuse the old Db!
+ Db anotherdb(&env, 0);
+
+ anotherdb.set_errpfx("test5");
+ rundb(&anotherdb, itemcount++, 1);
+ CHK(env.close(0));
+ cout << " finished.\n";
+}
+
+void t6(int except_flag)
+{
+ cout << " Running test 6:\n";
+
+ /* From user [#2939] */
+ int err;
+
+ DbEnv* penv = new DbEnv(DB_CXX_NO_EXCEPTIONS);
+ penv->set_cachesize(0, 32 * 1024, 0);
+ penv->open(CONSTRUCT01_DBDIR, DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL, 0);
+
+ //LEAK: remove this block and leak disappears
+ Db* pdb = new Db(penv,0);
+ if ((err = pdb->close(0)) != 0) {
+ fprintf(stderr, "Error closing Db: %s\n", db_strerror(err));
+ }
+ delete pdb;
+ //LEAK: remove this block and leak disappears
+
+ if ((err = penv->close(0)) != 0) {
+ fprintf(stderr, "Error closing DbEnv: %s\n", db_strerror(err));
+ }
+ delete penv;
+
+ // Make sure we get a message from C++ layer reminding us to close.
+ cerr << "expected error: ";
+ {
+ DbEnv foo(DB_CXX_NO_EXCEPTIONS);
+ foo.open(CONSTRUCT01_DBDIR, DB_CREATE, 0);
+ }
+ cerr << "should have received error.\n";
+ cout << " finished.\n";
+}
+
+// remove any existing environment or database
+void removeall()
+{
+ {
+ DbEnv tmpenv(DB_CXX_NO_EXCEPTIONS);
+ (void)tmpenv.remove(CONSTRUCT01_DBDIR, DB_FORCE);
+ }
+
+ check_file_removed(CONSTRUCT01_DBFULLPATH, 1);
+ for (int i=0; i<8; i++) {
+ char buf[20];
+ sprintf(buf, "__db.00%d", i);
+ check_file_removed(buf, 1);
+ }
+}
+
+int doall(int except_flag)
+{
+ itemcount = 0;
+ try {
+ // before and after the run, removing any
+ // old environment/database.
+ //
+ removeall();
+ t1(except_flag);
+ t2(except_flag);
+ t3(except_flag);
+ t4(except_flag);
+ t5(except_flag);
+ t6(except_flag);
+
+ removeall();
+ return 0;
+ }
+ catch (DbException &dbe) {
+ ERR2("EXCEPTION RECEIVED", dbe.what());
+ }
+ return 1;
+}
+
+int main(int argc, char *argv[])
+{
+ int iterations = 1;
+ if (argc > 1) {
+ iterations = atoi(argv[1]);
+ if (iterations < 0) {
+ ERR("Usage: construct01 count");
+ }
+ }
+ for (int i=0; i<iterations; i++) {
+ if (iterations != 0) {
+ cout << "(" << i << "/" << iterations << ") ";
+ }
+ cout << "construct01 running:\n";
+ if (doall(DB_CXX_NO_EXCEPTIONS) != 0) {
+ ERR("SOME TEST FAILED FOR NO-EXCEPTION TEST");
+ }
+ else if (doall(0) != 0) {
+ ERR("SOME TEST FAILED FOR EXCEPTION TEST");
+ }
+ else {
+ cout << "\nALL TESTS SUCCESSFUL\n";
+ }
+ }
+ return 0;
+}
diff --git a/libdb/test/scr015/TestConstruct01.testerr b/libdb/test/scr015/TestConstruct01.testerr
new file mode 100644
index 0000000..1ba627d
--- /dev/null
+++ b/libdb/test/scr015/TestConstruct01.testerr
@@ -0,0 +1,4 @@
+expected error: DbEnv::_destroy_check: open DbEnv object destroyed
+should have received error.
+expected error: DbEnv::_destroy_check: open DbEnv object destroyed
+should have received error.
diff --git a/libdb/test/scr015/TestConstruct01.testout b/libdb/test/scr015/TestConstruct01.testout
new file mode 100644
index 0000000..9b840f9
--- /dev/null
+++ b/libdb/test/scr015/TestConstruct01.testout
@@ -0,0 +1,27 @@
+(0/1) construct01 running:
+ Running test 1:
+ finished.
+ Running test 2:
+ finished.
+ Running test 3:
+ finished.
+ Running test 4:
+ finished.
+ Running test 5:
+ finished.
+ Running test 6:
+ finished.
+ Running test 1:
+ finished.
+ Running test 2:
+ finished.
+ Running test 3:
+ finished.
+ Running test 4:
+ finished.
+ Running test 5:
+ finished.
+ Running test 6:
+ finished.
+
+ALL TESTS SUCCESSFUL
diff --git a/libdb/test/scr015/TestExceptInclude.cpp b/libdb/test/scr015/TestExceptInclude.cpp
new file mode 100644
index 0000000..7adb08d
--- /dev/null
+++ b/libdb/test/scr015/TestExceptInclude.cpp
@@ -0,0 +1,27 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/* We should be able to include cxx_except.h without db_cxx.h,
+ * and use the DbException class. We do need db.h to get a few
+ * typedefs defined that the DbException classes use.
+ *
+ * This program does nothing, it's just here to make sure
+ * the compilation works.
+ */
+#include <db.h>
+#include <cxx_except.h>
+
+int main(int argc, char *argv[])
+{
+ DbException *dbe = new DbException("something");
+ DbMemoryException *dbme = new DbMemoryException("anything");
+
+ dbe = dbme;
+}
+
diff --git a/libdb/test/scr015/TestGetSetMethods.cpp b/libdb/test/scr015/TestGetSetMethods.cpp
new file mode 100644
index 0000000..737dd11
--- /dev/null
+++ b/libdb/test/scr015/TestGetSetMethods.cpp
@@ -0,0 +1,91 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * Do some regression tests for simple get/set access methods
+ * on DbEnv, DbTxn, Db. We don't currently test that they have
+ * the desired effect, only that they operate and return correctly.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+int main(int argc, char *argv[])
+{
+ try {
+ DbEnv *dbenv = new DbEnv(0);
+ DbTxn *dbtxn;
+ u_int8_t conflicts[10];
+
+ dbenv->set_error_stream(&cerr);
+ dbenv->set_timeout(0x90000000,
+ DB_SET_LOCK_TIMEOUT);
+ dbenv->set_lg_bsize(0x1000);
+ dbenv->set_lg_dir(".");
+ dbenv->set_lg_max(0x10000000);
+ dbenv->set_lg_regionmax(0x100000);
+ dbenv->set_lk_conflicts(conflicts, sizeof(conflicts));
+ dbenv->set_lk_detect(DB_LOCK_DEFAULT);
+ // exists, but is deprecated:
+ // dbenv->set_lk_max(0);
+ dbenv->set_lk_max_lockers(100);
+ dbenv->set_lk_max_locks(10);
+ dbenv->set_lk_max_objects(1000);
+ dbenv->set_mp_mmapsize(0x10000);
+ dbenv->set_tas_spins(1000);
+
+ // Need to open the environment so we
+ // can get a transaction.
+ //
+ dbenv->open(".", DB_CREATE | DB_INIT_TXN |
+ DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_MPOOL,
+ 0644);
+
+ dbenv->txn_begin(NULL, &dbtxn, DB_TXN_NOWAIT);
+ dbtxn->set_timeout(0xA0000000, DB_SET_TXN_TIMEOUT);
+ dbtxn->abort();
+
+ dbenv->close(0);
+
+ // We get a db, one for each type.
+ // That's because once we call (for instance)
+ // set_bt_maxkey, DB 'knows' that this is a
+ // Btree Db, and it cannot be used to try Hash
+ // or Recno functions.
+ //
+ Db *db_bt = new Db(NULL, 0);
+ db_bt->set_bt_maxkey(10000);
+ db_bt->set_bt_minkey(100);
+ db_bt->set_cachesize(0, 0x100000, 0);
+ db_bt->close(0);
+
+ Db *db_h = new Db(NULL, 0);
+ db_h->set_h_ffactor(0x10);
+ db_h->set_h_nelem(100);
+ db_h->set_lorder(0);
+ db_h->set_pagesize(0x10000);
+ db_h->close(0);
+
+ Db *db_re = new Db(NULL, 0);
+ db_re->set_re_delim('@');
+ db_re->set_re_pad(10);
+ db_re->set_re_source("re.in");
+ db_re->close(0);
+
+ Db *db_q = new Db(NULL, 0);
+ db_q->set_q_extentsize(200);
+ db_q->close(0);
+
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what() << "\n";
+ }
+ return 0;
+}
diff --git a/libdb/test/scr015/TestKeyRange.cpp b/libdb/test/scr015/TestKeyRange.cpp
new file mode 100644
index 0000000..858c979
--- /dev/null
+++ b/libdb/test/scr015/TestKeyRange.cpp
@@ -0,0 +1,171 @@
+/*NOTE: AccessExample changed to test Db.key_range.
+ * We made a global change of /AccessExample/TestKeyRange/,
+ * the only other changes are marked with comments that
+ * are notated as 'ADDED'.
+ */
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <iostream.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _MSC_VER
+#include <unistd.h>
+#endif
+#endif
+
+#include <iomanip.h>
+#include <db_cxx.h>
+
+class TestKeyRange
+{
+public:
+ TestKeyRange();
+ void run();
+
+private:
+ static const char FileName[];
+
+ // no need for copy and assignment
+ TestKeyRange(const TestKeyRange &);
+ void operator = (const TestKeyRange &);
+};
+
+static void usage(); // forward
+
+int main(int argc, char *argv[])
+{
+ if (argc > 1) {
+ usage();
+ }
+
+ // Use a try block just to report any errors.
+ // An alternate approach to using exceptions is to
+ // use error models (see DbEnv::set_error_model()) so
+ // that error codes are returned for all Berkeley DB methods.
+ //
+ try {
+ TestKeyRange app;
+ app.run();
+ return 0;
+ }
+ catch (DbException &dbe) {
+ cerr << "TestKeyRange: " << dbe.what() << "\n";
+ return 1;
+ }
+}
+
+static void usage()
+{
+ cerr << "usage: TestKeyRange\n";
+ exit(1);
+}
+
+const char TestKeyRange::FileName[] = "access.db";
+
+TestKeyRange::TestKeyRange()
+{
+}
+
+void TestKeyRange::run()
+{
+ // Remove the previous database.
+ (void)unlink(FileName);
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db db(0, 0);
+
+ db.set_error_stream(&cerr);
+ db.set_errpfx("TestKeyRange");
+ db.set_pagesize(1024); /* Page size: 1K. */
+ db.set_cachesize(0, 32 * 1024, 0);
+ db.open(NULL, FileName, NULL, DB_BTREE, DB_CREATE, 0664);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ char buf[1024];
+ char rbuf[1024];
+ char *t;
+ char *p;
+ int ret;
+ int len;
+ Dbt *firstkey = NULL;
+ char firstbuf[1024];
+
+ for (;;) {
+ cout << "input>";
+ cout.flush();
+
+ cin.getline(buf, sizeof(buf));
+ if (cin.eof())
+ break;
+
+ if ((len = strlen(buf)) <= 0)
+ continue;
+ for (t = rbuf, p = buf + (len - 1); p >= buf;)
+ *t++ = *p--;
+ *t++ = '\0';
+
+ Dbt key(buf, len + 1);
+ Dbt data(rbuf, len + 1);
+ if (firstkey == NULL) {
+ strcpy(firstbuf, buf);
+ firstkey = new Dbt(firstbuf, len + 1);
+ }
+
+ ret = db.put(0, &key, &data, DB_NOOVERWRITE);
+ if (ret == DB_KEYEXIST) {
+ cout << "Key " << buf << " already exists.\n";
+ }
+ cout << "\n";
+ }
+
+ // We put a try block around this section of code
+ // to ensure that our database is properly closed
+ // in the event of an error.
+ //
+ try {
+ // Acquire a cursor for the table.
+ Dbc *dbcp;
+ db.cursor(NULL, &dbcp, 0);
+
+ /*ADDED...*/
+ DB_KEY_RANGE range;
+ memset(&range, 0, sizeof(range));
+
+ db.key_range(NULL, firstkey, &range, 0);
+ printf("less: %f\n", range.less);
+ printf("equal: %f\n", range.equal);
+ printf("greater: %f\n", range.greater);
+ /*end ADDED*/
+
+ Dbt key;
+ Dbt data;
+
+ // Walk through the table, printing the key/data pairs.
+ while (dbcp->get(&key, &data, DB_NEXT) == 0) {
+ char *key_string = (char *)key.get_data();
+ char *data_string = (char *)data.get_data();
+ cout << key_string << " : " << data_string << "\n";
+ }
+ dbcp->close();
+ }
+ catch (DbException &dbe) {
+ cerr << "TestKeyRange: " << dbe.what() << "\n";
+ }
+
+ db.close(0);
+}
diff --git a/libdb/test/scr015/TestKeyRange.testin b/libdb/test/scr015/TestKeyRange.testin
new file mode 100644
index 0000000..a2b6bd7
--- /dev/null
+++ b/libdb/test/scr015/TestKeyRange.testin
@@ -0,0 +1,8 @@
+first line is alphabetically somewhere in the middle.
+Blah blah
+let's have exactly eight lines of input.
+stuff
+more stuff
+and even more stuff
+lastly
+but not leastly.
diff --git a/libdb/test/scr015/TestKeyRange.testout b/libdb/test/scr015/TestKeyRange.testout
new file mode 100644
index 0000000..25b2e1a
--- /dev/null
+++ b/libdb/test/scr015/TestKeyRange.testout
@@ -0,0 +1,19 @@
+input>
+input>
+input>
+input>
+input>
+input>
+input>
+input>
+input>less: 0.375000
+equal: 0.125000
+greater: 0.500000
+Blah blah : halb halB
+and even more stuff : ffuts erom neve dna
+but not leastly. : .yltsael ton tub
+first line is alphabetically somewhere in the middle. : .elddim eht ni erehwemos yllacitebahpla si enil tsrif
+lastly : yltsal
+let's have exactly eight lines of input. : .tupni fo senil thgie yltcaxe evah s'tel
+more stuff : ffuts erom
+stuff : ffuts
diff --git a/libdb/test/scr015/TestLogc.cpp b/libdb/test/scr015/TestLogc.cpp
new file mode 100644
index 0000000..cfad857
--- /dev/null
+++ b/libdb/test/scr015/TestLogc.cpp
@@ -0,0 +1,101 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * A basic regression test for the Logc class.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+static void show_dbt(ostream &os, Dbt *dbt)
+{
+ int i;
+ int size = dbt->get_size();
+ unsigned char *data = (unsigned char *)dbt->get_data();
+
+ os << "size: " << size << " data: ";
+ for (i=0; i<size && i<10; i++) {
+ os << (int)data[i] << " ";
+ }
+ if (i<size)
+ os << "...";
+}
+
+int main(int argc, char *argv[])
+{
+ try {
+ DbEnv *env = new DbEnv(0);
+ env->open(".", DB_CREATE | DB_INIT_LOG | DB_INIT_MPOOL, 0);
+
+ // Do some database activity to get something into the log.
+ Db *db1 = new Db(env, 0);
+ db1->open(NULL, "first.db", NULL, DB_BTREE, DB_CREATE, 0);
+ Dbt *key = new Dbt((char *)"a", 1);
+ Dbt *data = new Dbt((char *)"b", 1);
+ db1->put(NULL, key, data, 0);
+ key->set_data((char *)"c");
+ data->set_data((char *)"d");
+ db1->put(NULL, key, data, 0);
+ db1->close(0);
+
+ Db *db2 = new Db(env, 0);
+ db2->open(NULL, "second.db", NULL, DB_BTREE, DB_CREATE, 0);
+ key->set_data((char *)"w");
+ data->set_data((char *)"x");
+ db2->put(NULL, key, data, 0);
+ key->set_data((char *)"y");
+ data->set_data((char *)"z");
+ db2->put(NULL, key, data, 0);
+ db2->close(0);
+
+ // Now get a log cursor and walk through.
+ DbLogc *logc;
+
+ env->log_cursor(&logc, 0);
+ int ret = 0;
+ DbLsn lsn;
+ Dbt *dbt = new Dbt();
+ u_int32_t flags = DB_FIRST;
+
+ int count = 0;
+ while ((ret = logc->get(&lsn, dbt, flags)) == 0) {
+
+ // We ignore the contents of the log record,
+ // it's not portable. Even the exact count
+ // is may change when the underlying implementation
+ // changes, we'll just make sure at the end we saw
+ // 'enough'.
+ //
+ // cout << "logc.get: " << count;
+ // show_dbt(cout, dbt);
+ // cout << "\n";
+ //
+ count++;
+ flags = DB_NEXT;
+ }
+ if (ret != DB_NOTFOUND) {
+ cerr << "*** FAIL: logc.get returned: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ logc->close(0);
+
+ // There has to be at *least* four log records,
+ // since we did four separate database operations.
+ //
+ if (count < 4)
+ cerr << "*** FAIL: not enough log records\n";
+
+ cout << "TestLogc done.\n";
+ }
+ catch (DbException &dbe) {
+ cerr << "*** FAIL: " << dbe.what() <<"\n";
+ }
+ return 0;
+}
diff --git a/libdb/test/scr015/TestLogc.testout b/libdb/test/scr015/TestLogc.testout
new file mode 100644
index 0000000..afac3af
--- /dev/null
+++ b/libdb/test/scr015/TestLogc.testout
@@ -0,0 +1 @@
+TestLogc done.
diff --git a/libdb/test/scr015/TestSimpleAccess.cpp b/libdb/test/scr015/TestSimpleAccess.cpp
new file mode 100644
index 0000000..2ffda1b
--- /dev/null
+++ b/libdb/test/scr015/TestSimpleAccess.cpp
@@ -0,0 +1,67 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+int main(int argc, char *argv[])
+{
+ try {
+ Db *db = new Db(NULL, 0);
+ db->open(NULL, "my.db", NULL, DB_BTREE, DB_CREATE, 0644);
+
+ // populate our massive database.
+ // all our strings include null for convenience.
+ // Note we have to cast for idiomatic
+ // usage, since newer gcc requires it.
+ Dbt *keydbt = new Dbt((char *)"key", 4);
+ Dbt *datadbt = new Dbt((char *)"data", 5);
+ db->put(NULL, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt *goodkeydbt = new Dbt((char *)"key", 4);
+ Dbt *badkeydbt = new Dbt((char *)"badkey", 7);
+ Dbt *resultdbt = new Dbt();
+ resultdbt->set_flags(DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
+ cout << "get: " << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "got data: " << result << "\n";
+ }
+
+ if ((ret = db->get(NULL, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ cout << "get using bad key: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "*** got data using bad key!!: "
+ << result << "\n";
+ }
+ cout << "finished test\n";
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what();
+ }
+ return 0;
+}
diff --git a/libdb/test/scr015/TestSimpleAccess.testout b/libdb/test/scr015/TestSimpleAccess.testout
new file mode 100644
index 0000000..dc88d47
--- /dev/null
+++ b/libdb/test/scr015/TestSimpleAccess.testout
@@ -0,0 +1,3 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/libdb/test/scr015/TestTruncate.cpp b/libdb/test/scr015/TestTruncate.cpp
new file mode 100644
index 0000000..17d9ec9
--- /dev/null
+++ b/libdb/test/scr015/TestTruncate.cpp
@@ -0,0 +1,84 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+int main(int argc, char *argv[])
+{
+ try {
+ Db *db = new Db(NULL, 0);
+ db->open(NULL, "my.db", NULL, DB_BTREE, DB_CREATE, 0644);
+
+ // populate our massive database.
+ // all our strings include null for convenience.
+ // Note we have to cast for idiomatic
+ // usage, since newer gcc requires it.
+ Dbt *keydbt = new Dbt((char*)"key", 4);
+ Dbt *datadbt = new Dbt((char*)"data", 5);
+ db->put(NULL, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt *goodkeydbt = new Dbt((char*)"key", 4);
+ Dbt *badkeydbt = new Dbt((char*)"badkey", 7);
+ Dbt *resultdbt = new Dbt();
+ resultdbt->set_flags(DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
+ cout << "get: " << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "got data: " << result << "\n";
+ }
+
+ if ((ret = db->get(NULL, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ cout << "get using bad key: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "*** got data using bad key!!: "
+ << result << "\n";
+ }
+
+ // Now, truncate and make sure that it's really gone.
+ cout << "truncating data...\n";
+ u_int32_t nrecords;
+ db->truncate(NULL, &nrecords, 0);
+ cout << "truncate returns " << nrecords << "\n";
+ if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ cout << "after truncate get: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "got data: " << result << "\n";
+ }
+
+ db->close(0);
+ cout << "finished test\n";
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what();
+ }
+ return 0;
+}
diff --git a/libdb/test/scr015/TestTruncate.testout b/libdb/test/scr015/TestTruncate.testout
new file mode 100644
index 0000000..0a4bc98
--- /dev/null
+++ b/libdb/test/scr015/TestTruncate.testout
@@ -0,0 +1,6 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+truncating data...
+truncate returns 1
+after truncate get: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/libdb/test/scr015/chk.cxxtests b/libdb/test/scr015/chk.cxxtests
new file mode 100644
index 0000000..3c22136
--- /dev/null
+++ b/libdb/test/scr015/chk.cxxtests
@@ -0,0 +1,71 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure that regression tests for C++ run.
+
+TEST_CXX_SRCDIR=../test/scr015 # must be a relative directory
+
+# All paths must be relative to a subdirectory of the build directory
+LIBS="-L.. -ldb -ldb_cxx"
+CXXFLAGS="-I.. -I../../dbinc"
+
+# Test must be run from a local build directory, not from a test
+# directory.
+cd ..
+[ -f db_config.h ] || {
+ echo 'FAIL: chk.cxxtests must be run from a local build directory.'
+ exit 1
+}
+[ -d ../docs_src ] || {
+ echo 'FAIL: chk.cxxtests must be run from a local build directory.'
+ exit 1
+}
+[ -f libdb.a ] || make libdb.a || {
+ echo 'FAIL: unable to build libdb.a'
+ exit 1
+}
+[ -f libdb_cxx.a ] || make libdb_cxx.a || {
+ echo 'FAIL: unable to build libdb_cxx.a'
+ exit 1
+}
+CXX=`sed -e '/^CXX=/!d' -e 's/^CXX=//' -e 's/.*mode=compile *//' Makefile`
+echo " ====== cxx tests using $CXX"
+testnames=`cd $TEST_CXX_SRCDIR; ls *.cpp | sed -e 's/\.cpp$//'`
+
+for testname in $testnames; do
+ if grep -x $testname $TEST_CXX_SRCDIR/ignore > /dev/null; then
+ echo " **** cxx test $testname ignored"
+ continue
+ fi
+
+ echo " ==== cxx test $testname"
+ rm -rf TESTCXX; mkdir TESTCXX
+ cd ./TESTCXX
+ testprefix=../$TEST_CXX_SRCDIR/$testname
+
+ ${CXX} ${CXXFLAGS} -o $testname $testprefix.cpp ${LIBS} > ../$testname.compileout 2>&1 || {
+ echo "FAIL: compilation of $testname failed, see ../$testname.compileout"
+ exit 1
+ }
+ rm -f ../$testname.compileout
+ infile=$testprefix.testin
+ [ -f $infile ] || infile=/dev/null
+ goodoutfile=$testprefix.testout
+ [ -f $goodoutfile ] || goodoutfile=/dev/null
+ gooderrfile=$testprefix.testerr
+ [ -f $gooderrfile ] || gooderrfile=/dev/null
+ ./$testname <$infile >../$testname.out 2>../$testname.err
+ cmp ../$testname.out $goodoutfile > /dev/null || {
+ echo "FAIL: $testname output differs: see ../$testname.out, $goodoutfile"
+ exit 1
+ }
+ cmp ../$testname.err $gooderrfile > /dev/null || {
+ echo "FAIL: $testname error differs: see ../$testname.err, $gooderrfile"
+ exit 1
+ }
+ cd ..
+ rm -f $testname.err $testname.out
+done
+rm -rf TESTCXX
+exit 0
diff --git a/libdb/test/scr015/ignore b/libdb/test/scr015/ignore
new file mode 100644
index 0000000..bcd98b5
--- /dev/null
+++ b/libdb/test/scr015/ignore
@@ -0,0 +1,4 @@
+#
+# $Id$
+#
+# A list of tests to ignore
diff --git a/libdb/test/scr015/testall b/libdb/test/scr015/testall
new file mode 100644
index 0000000..5d1ceba
--- /dev/null
+++ b/libdb/test/scr015/testall
@@ -0,0 +1,32 @@
+#!/bin/sh -
+# $Id$
+#
+# Run all the C++ regression tests
+
+ecode=0
+prefixarg=""
+stdinarg=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefixarg="$1"; shift;;
+ --stdin )
+ stdinarg="$1"; shift;;
+ * )
+ break
+ esac
+done
+files="`find . -name \*.cpp -print`"
+for file in $files; do
+ name=`echo $file | sed -e 's:^\./::' -e 's/\.cpp$//'`
+ if grep $name ignore > /dev/null; then
+ echo " **** cxx test $name ignored"
+ else
+ echo " ==== cxx test $name"
+ if ! sh ./testone $prefixarg $stdinarg $name; then
+ ecode=1
+ fi
+ fi
+done
+exit $ecode
diff --git a/libdb/test/scr015/testone b/libdb/test/scr015/testone
new file mode 100644
index 0000000..c1fa93b
--- /dev/null
+++ b/libdb/test/scr015/testone
@@ -0,0 +1,122 @@
+#!/bin/sh -
+# $Id$
+#
+# Run just one C++ regression test, the single argument
+# is the basename of the test, e.g. TestRpcServer
+
+error()
+{
+ echo '' >&2
+ echo "C++ regression error: $@" >&2
+ echo '' >&2
+ ecode=1
+}
+
+# compares the result against the good version,
+# reports differences, and removes the result file
+# if there are no differences.
+#
+compare_result()
+{
+ good="$1"
+ latest="$2"
+ if [ ! -e "$good" ]; then
+ echo "Note: $good does not exist"
+ return
+ fi
+ tmpout=/tmp/blddb$$.tmp
+ diff "$good" "$latest" > $tmpout
+ if [ -s $tmpout ]; then
+ nbad=`grep '^[0-9]' $tmpout | wc -l`
+ error "$good and $latest differ in $nbad places."
+ else
+ rm $latest
+ fi
+ rm -f $tmpout
+}
+
+ecode=0
+stdinflag=n
+gdbflag=n
+CXX=${CXX:-c++}
+LIBS=${LIBS:-}
+
+# remove any -c option in the CXXFLAGS
+CXXFLAGS="`echo " ${CXXFLAGS} " | sed -e 's/ -c //g'`"
+
+# determine the prefix of the install tree
+prefix=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefix="`echo $1 | sed -e 's/--prefix=//'`"; shift
+ LIBS="-L$prefix/lib -ldb_cxx $LIBS"
+ CXXFLAGS="-I$prefix/include $CXXFLAGS"
+ export LD_LIBRARY_PATH="$prefix/lib:$LD_LIBRARY_PATH"
+ ;;
+ --stdin )
+ stdinflag=y; shift
+ ;;
+ --gdb )
+ CXXFLAGS="-g $CXXFLAGS"
+ gdbflag=y; shift
+ ;;
+ * )
+ break
+ ;;
+ esac
+done
+
+if [ "$#" = 0 ]; then
+ echo 'Usage: testone [ --prefix=<dir> | --stdin ] TestName'
+ exit 1
+fi
+name="$1"
+
+# compile
+rm -rf TESTDIR; mkdir TESTDIR
+cd ./TESTDIR
+
+${CXX} ${CXXFLAGS} -o $name ../$name.cpp ${LIBS} > ../$name.compileout 2>&1
+if [ $? != 0 -o -s ../$name.compileout ]; then
+ error "compilation of $name failed, see $name.compileout"
+ exit 1
+fi
+rm -f ../$name.compileout
+
+# find input and error file
+infile=../$name.testin
+if [ ! -f $infile ]; then
+ infile=/dev/null
+fi
+
+# run and diff results
+rm -rf TESTDIR
+if [ "$gdbflag" = y ]; then
+ if [ -s $infile ]; then
+ echo "Input file is $infile"
+ fi
+ gdb ./$name
+ exit 0
+elif [ "$stdinflag" = y ]; then
+ ./$name >../$name.out 2>../$name.err
+else
+ ./$name <$infile >../$name.out 2>../$name.err
+fi
+cd ..
+
+testerr=$name.testerr
+if [ ! -f $testerr ]; then
+ testerr=/dev/null
+fi
+
+testout=$name.testout
+if [ ! -f $testout ]; then
+ testout=/dev/null
+fi
+
+compare_result $testout $name.out
+compare_result $testerr $name.err
+rm -rf TESTDIR
+exit $ecode
diff --git a/libdb/test/scr016/CallbackTest.java b/libdb/test/scr016/CallbackTest.java
new file mode 100644
index 0000000..eede964
--- /dev/null
+++ b/libdb/test/scr016/CallbackTest.java
@@ -0,0 +1,83 @@
+package com.sleepycat.test;
+import com.sleepycat.db.*;
+
+public class CallbackTest
+{
+ public static void main(String args[])
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.set_bt_compare(new BtreeCompare());
+ db.open(null, "test.db", "", Db.DB_BTREE, Db.DB_CREATE, 0666);
+ StringDbt[] keys = new StringDbt[10];
+ StringDbt[] datas = new StringDbt[10];
+ for (int i = 0; i<10; i++) {
+ int val = (i * 3) % 10;
+ keys[i] = new StringDbt("key" + val);
+ datas[i] = new StringDbt("data" + val);
+ System.out.println("put " + val);
+ db.put(null, keys[i], datas[i], 0);
+ }
+ }
+ catch (DbException dbe) {
+ System.err.println("FAIL: " + dbe);
+ }
+ catch (java.io.FileNotFoundException fnfe) {
+ System.err.println("FAIL: " + fnfe);
+ }
+
+ }
+
+
+}
+
+class BtreeCompare
+ implements DbBtreeCompare
+{
+ /* A weird comparator, for example.
+ * In fact, it may not be legal, since it's not monotonically increasing.
+ */
+ public int bt_compare(Db db, Dbt dbt1, Dbt dbt2)
+ {
+ System.out.println("compare function called");
+ byte b1[] = dbt1.get_data();
+ byte b2[] = dbt2.get_data();
+ System.out.println(" " + (new String(b1)) + ", " + (new String(b2)));
+ int len1 = b1.length;
+ int len2 = b2.length;
+ if (len1 != len2)
+ return (len1 < len2) ? 1 : -1;
+ int value = 1;
+ for (int i=0; i<len1; i++) {
+ if (b1[i] != b2[i])
+ return (b1[i] < b2[i]) ? value : -value;
+ value *= -1;
+ }
+ return 0;
+ }
+}
+
+class StringDbt extends Dbt
+{
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+}
diff --git a/libdb/test/scr016/CallbackTest.testout b/libdb/test/scr016/CallbackTest.testout
new file mode 100644
index 0000000..68797d4
--- /dev/null
+++ b/libdb/test/scr016/CallbackTest.testout
@@ -0,0 +1,60 @@
+put 0
+put 3
+compare function called
+ key3, key0
+put 6
+compare function called
+ key6, key3
+put 9
+compare function called
+ key9, key6
+put 2
+compare function called
+ key2, key9
+compare function called
+ key2, key0
+compare function called
+ key2, key6
+compare function called
+ key2, key3
+compare function called
+ key2, key0
+put 5
+compare function called
+ key5, key3
+compare function called
+ key5, key9
+compare function called
+ key5, key6
+put 8
+compare function called
+ key8, key5
+compare function called
+ key8, key9
+compare function called
+ key8, key6
+put 1
+compare function called
+ key1, key9
+compare function called
+ key1, key0
+compare function called
+ key1, key5
+compare function called
+ key1, key2
+compare function called
+ key1, key0
+put 4
+compare function called
+ key4, key5
+compare function called
+ key4, key2
+compare function called
+ key4, key3
+put 7
+compare function called
+ key7, key4
+compare function called
+ key7, key8
+compare function called
+ key7, key6
diff --git a/libdb/test/scr016/README b/libdb/test/scr016/README
new file mode 100644
index 0000000..7e03ae5
--- /dev/null
+++ b/libdb/test/scr016/README
@@ -0,0 +1,37 @@
+# $Id$
+
+Use the scripts testall or testone to run all, or just one of the Java
+tests. You must be in this directory to run them. For example,
+
+ $ export LD_LIBRARY_PATH=/usr/local/Berkeley3.3/lib
+ $ ./testone TestAppendRecno
+ $ ./testall
+
+The scripts will use javac and java in your path. Set environment
+variables $JAVAC and $JAVA to override this. It will also and honor
+any $CLASSPATH that is already set, prepending ../../../../classes to
+it, which is where the test .class files are put, and where the DB
+.class files can normally be found after a build on Unix and Windows.
+If none of these variables are set, everything will probably work
+with whatever java/javac is in your path.
+
+To run successfully, you will probably need to set $LD_LIBRARY_PATH
+to be the directory containing libdb_java-X.Y.so
+
+As an alternative, use the --prefix=<DIR> option, a la configure
+to set the top of the BerkeleyDB install directory. This forces
+the proper options to be added to $LD_LIBRARY_PATH.
+For example,
+
+ $ ./testone --prefix=/usr/include/BerkeleyDB TestAppendRecno
+ $ ./testall --prefix=/usr/include/BerkeleyDB
+
+The test framework is pretty simple. Any <name>.java file in this
+directory that is not mentioned in the 'ignore' file represents a
+test. If the test is not compiled successfully, the compiler output
+is left in <name>.compileout . Otherwise, the java program is run in
+a clean subdirectory using as input <name>.testin, or if that doesn't
+exist, /dev/null. Output and error from the test run are put into
+<name>.out, <name>.err . If <name>.testout, <name>.testerr exist,
+they are used as reference files and any differences are reported.
+If either of the reference files does not exist, /dev/null is used.
diff --git a/libdb/test/scr016/TestAppendRecno.java b/libdb/test/scr016/TestAppendRecno.java
new file mode 100644
index 0000000..2da2000
--- /dev/null
+++ b/libdb/test/scr016/TestAppendRecno.java
@@ -0,0 +1,258 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestAppendRecno
+ implements DbAppendRecno
+{
+ private static final String FileName = "access.db";
+ int callback_count = 0;
+ Db table = null;
+
+ public TestAppendRecno()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestAppendRecno\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestAppendRecno app = new TestAppendRecno();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestAppendRecno: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestAppendRecno: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestAppendRecno");
+ table.set_append_recno(this);
+
+ table.open(null, FileName, null, Db.DB_RECNO, Db.DB_CREATE, 0644);
+ for (int i=0; i<10; i++) {
+ System.out.println("\n*** Iteration " + i );
+ try {
+ RecnoDbt key = new RecnoDbt(77+i);
+ StringDbt data = new StringDbt("data" + i + "_xyz");
+ table.put(null, key, data, Db.DB_APPEND);
+ }
+ catch (DbException dbe) {
+ System.out.println("dbe: " + dbe);
+ }
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ RecnoDbt key = new RecnoDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getRecno() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ System.out.println("Test finished.");
+ }
+
+ public void db_append_recno(Db db, Dbt dbt, int recno)
+ throws DbException
+ {
+ int count = callback_count++;
+
+ System.out.println("====\ncallback #" + count);
+ System.out.println("db is table: " + (db == table));
+ System.out.println("recno = " + recno);
+
+ // This gives variable output.
+ //System.out.println("dbt = " + dbt);
+ if (dbt instanceof RecnoDbt) {
+ System.out.println("dbt = " +
+ ((RecnoDbt)dbt).getRecno());
+ }
+ else if (dbt instanceof StringDbt) {
+ System.out.println("dbt = " +
+ ((StringDbt)dbt).getString());
+ }
+ else {
+ // Note: the dbts are created out of whole
+ // cloth by Berkeley DB, not us!
+ System.out.println("internally created dbt: " +
+ new StringDbt(dbt) + ", size " +
+ dbt.get_size());
+ }
+
+ switch (count) {
+ case 0:
+ // nothing
+ break;
+
+ case 1:
+ dbt.set_size(dbt.get_size() - 1);
+ break;
+
+ case 2:
+ System.out.println("throwing...");
+ throw new DbException("append_recno thrown");
+ //not reached
+
+ case 3:
+ // Should result in an error (size unchanged).
+ dbt.set_offset(1);
+ break;
+
+ case 4:
+ dbt.set_offset(1);
+ dbt.set_size(dbt.get_size() - 1);
+ break;
+
+ case 5:
+ dbt.set_offset(1);
+ dbt.set_size(dbt.get_size() - 2);
+ break;
+
+ case 6:
+ dbt.set_data(new String("abc").getBytes());
+ dbt.set_size(3);
+ break;
+
+ case 7:
+ // Should result in an error.
+ dbt.set_data(null);
+ break;
+
+ case 8:
+ // Should result in an error.
+ dbt.set_data(new String("abc").getBytes());
+ dbt.set_size(4);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+
+ // Here's an example of how you can extend a Dbt to store recno's.
+ //
+ static /*inner*/
+ class RecnoDbt extends Dbt
+ {
+ RecnoDbt()
+ {
+ this(0); // let other constructor do most of the work
+ }
+
+ RecnoDbt(int value)
+ {
+ set_flags(Db.DB_DBT_USERMEM); // do not allocate on retrieval
+ arr = new byte[4];
+ set_data(arr); // use our local array for data
+ set_ulen(4); // size of return storage
+ setRecno(value);
+ }
+
+ public String toString() /*override*/
+ {
+ return String.valueOf(getRecno());
+ }
+
+ void setRecno(int value)
+ {
+ set_recno_key_data(value);
+ set_size(arr.length);
+ }
+
+ int getRecno()
+ {
+ return get_recno_key_data();
+ }
+
+ byte arr[];
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt(Dbt dbt)
+ {
+ set_data(dbt.get_data());
+ set_size(dbt.get_size());
+ }
+
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+
+ public String toString() /*override*/
+ {
+ return getString();
+ }
+ }
+}
+
diff --git a/libdb/test/scr016/TestAppendRecno.testout b/libdb/test/scr016/TestAppendRecno.testout
new file mode 100644
index 0000000..970174e
--- /dev/null
+++ b/libdb/test/scr016/TestAppendRecno.testout
@@ -0,0 +1,82 @@
+
+*** Iteration 0
+====
+callback #0
+db is table: true
+recno = 1
+internally created dbt: data0_xyz, size 9
+
+*** Iteration 1
+====
+callback #1
+db is table: true
+recno = 2
+internally created dbt: data1_xyz, size 9
+
+*** Iteration 2
+====
+callback #2
+db is table: true
+recno = 3
+internally created dbt: data2_xyz, size 9
+throwing...
+dbe: com.sleepycat.db.DbException: append_recno thrown
+
+*** Iteration 3
+====
+callback #3
+db is table: true
+recno = 3
+internally created dbt: data3_xyz, size 9
+dbe: com.sleepycat.db.DbException: Dbt.size + Dbt.offset greater than array length
+
+*** Iteration 4
+====
+callback #4
+db is table: true
+recno = 3
+internally created dbt: data4_xyz, size 9
+
+*** Iteration 5
+====
+callback #5
+db is table: true
+recno = 4
+internally created dbt: data5_xyz, size 9
+
+*** Iteration 6
+====
+callback #6
+db is table: true
+recno = 5
+internally created dbt: data6_xyz, size 9
+
+*** Iteration 7
+====
+callback #7
+db is table: true
+recno = 6
+internally created dbt: data7_xyz, size 9
+dbe: com.sleepycat.db.DbException: Dbt.data is null
+
+*** Iteration 8
+====
+callback #8
+db is table: true
+recno = 6
+internally created dbt: data8_xyz, size 9
+dbe: com.sleepycat.db.DbException: Dbt.size + Dbt.offset greater than array length
+
+*** Iteration 9
+====
+callback #9
+db is table: true
+recno = 6
+internally created dbt: data9_xyz, size 9
+1 : data0_xyz
+2 : data1_xy
+3 : ata4_xyz
+4 : ata5_xy
+5 : abc
+6 : data9_xyz
+Test finished.
diff --git a/libdb/test/scr016/TestAssociate.java b/libdb/test/scr016/TestAssociate.java
new file mode 100644
index 0000000..df1c798
--- /dev/null
+++ b/libdb/test/scr016/TestAssociate.java
@@ -0,0 +1,333 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.Reader;
+import java.io.StringReader;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.Hashtable;
+
+public class TestAssociate
+ implements DbDupCompare
+{
+ private static final String FileName = "access.db";
+ public static Db saveddb1 = null;
+ public static Db saveddb2 = null;
+
+ public TestAssociate()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestAssociate\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestAssociate app = new TestAssociate();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestAssociate: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestAssociate: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ public static int counter = 0;
+ public static String results[] = { "abc", "def", "ghi", "JKL", "MNO", null };
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(Reader reader,
+ PrintStream out, String prompt)
+ {
+ /*
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ */
+ return results[counter++];
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(Reader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ static public String shownull(Object o)
+ {
+ if (o == null)
+ return "null";
+ else
+ return "not null";
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ DbEnv dbenv = new DbEnv(0);
+ dbenv.open("./", Db.DB_CREATE|Db.DB_INIT_MPOOL, 0644);
+ (new java.io.File(FileName)).delete();
+ Db table = new Db(dbenv, 0);
+ Db table2 = new Db(dbenv, 0);
+ table2.set_dup_compare(this);
+ table2.set_flags(Db.DB_DUPSORT);
+ table.set_error_stream(System.err);
+ table2.set_error_stream(System.err);
+ table.set_errpfx("TestAssociate");
+ table2.set_errpfx("TestAssociate(table2)");
+ System.out.println("Primary database is " + shownull(table));
+ System.out.println("Secondary database is " + shownull(table2));
+ saveddb1 = table;
+ saveddb2 = table2;
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ table2.open(null, FileName + "2", null,
+ Db.DB_BTREE, Db.DB_CREATE, 0644);
+ table.associate(null, table2, new Capitalize(), 0);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ Reader reader = new StringReader("abc\ndef\njhi");
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table2.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ StringDbt pkey = new StringDbt();
+
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+
+ key.setString("BC");
+ System.out.println("get BC returns " + table2.get(null, key, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + data.getString());
+ System.out.println("pget BC returns " + table2.pget(null, key, pkey, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + pkey.getString() + " : " + data.getString());
+ key.setString("KL");
+ System.out.println("get KL returns " + table2.get(null, key, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + data.getString());
+ System.out.println("pget KL returns " + table2.pget(null, key, pkey, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + pkey.getString() + " : " + data.getString());
+
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+
+ public String toString()
+ {
+ return "StringDbt=" + getString();
+ }
+ }
+
+ /* creates a stupid secondary index as follows:
+ For an N letter key, we use N-1 letters starting at
+ position 1. If the new letters are already capitalized,
+ we return the old array, but with offset set to 1.
+ If the letters are not capitalized, we create a new,
+ capitalized array. This is pretty stupid for
+ an application, but it tests all the paths in the runtime.
+ */
+ public static class Capitalize implements DbSecondaryKeyCreate
+ {
+ public int secondary_key_create(Db secondary, Dbt key, Dbt value,
+ Dbt result)
+ throws DbException
+ {
+ String which = "unknown db";
+ if (saveddb1.equals(secondary)) {
+ which = "primary";
+ }
+ else if (saveddb2.equals(secondary)) {
+ which = "secondary";
+ }
+ System.out.println("secondary_key_create, Db: " + shownull(secondary) + "(" + which + "), key: " + show_dbt(key) + ", data: " + show_dbt(value));
+ int len = key.get_size();
+ byte[] arr = key.get_data();
+ boolean capped = true;
+
+ if (len < 1)
+ throw new DbException("bad key");
+
+ if (len < 2)
+ return Db.DB_DONOTINDEX;
+
+ result.set_size(len - 1);
+ for (int i=1; capped && i<len; i++) {
+ if (!Character.isUpperCase((char)arr[i]))
+ capped = false;
+ }
+ if (capped) {
+ System.out.println(" creating key(1): " + new String(arr, 1, len-1));
+ result.set_data(arr);
+ result.set_offset(1);
+ }
+ else {
+ System.out.println(" creating key(2): " + (new String(arr)).substring(1).
+ toUpperCase());
+ result.set_data((new String(arr)).substring(1).
+ toUpperCase().getBytes());
+ }
+ return 0;
+ }
+ }
+
+ public int dup_compare(Db db, Dbt dbt1, Dbt dbt2)
+ {
+ System.out.println("compare");
+ int sz1 = dbt1.get_size();
+ int sz2 = dbt2.get_size();
+ if (sz1 < sz2)
+ return -1;
+ if (sz1 > sz2)
+ return 1;
+ byte[] data1 = dbt1.get_data();
+ byte[] data2 = dbt2.get_data();
+ for (int i=0; i<sz1; i++)
+ if (data1[i] != data2[i])
+ return (data1[i] < data2[i] ? -1 : 1);
+ return 0;
+ }
+
+ public static int nseen = 0;
+ public static Hashtable ht = new Hashtable();
+
+ public static String show_dbt(Dbt dbt)
+ {
+ String name;
+
+ if (dbt == null)
+ return "null dbt";
+
+ name = (String)ht.get(dbt);
+ if (name == null) {
+ name = "Dbt" + (nseen++);
+ ht.put(dbt, name);
+ }
+
+ byte[] value = dbt.get_data();
+ if (value == null)
+ return name + "(null)";
+ else
+ return name + "(\"" + new String(value) + "\")";
+ }
+}
+
+
diff --git a/libdb/test/scr016/TestAssociate.testout b/libdb/test/scr016/TestAssociate.testout
new file mode 100644
index 0000000..34414b6
--- /dev/null
+++ b/libdb/test/scr016/TestAssociate.testout
@@ -0,0 +1,30 @@
+Primary database is not null
+Secondary database is not null
+secondary_key_create, Db: not null(secondary), key: Dbt0("abc"), data: Dbt1("cba")
+ creating key(2): BC
+
+secondary_key_create, Db: not null(secondary), key: Dbt2("def"), data: Dbt3("fed")
+ creating key(2): EF
+
+secondary_key_create, Db: not null(secondary), key: Dbt4("ghi"), data: Dbt5("ihg")
+ creating key(2): HI
+
+secondary_key_create, Db: not null(secondary), key: Dbt6("JKL"), data: Dbt7("LKJ")
+ creating key(1): KL
+
+secondary_key_create, Db: not null(secondary), key: Dbt8("MNO"), data: Dbt9("ONM")
+ creating key(1): NO
+
+BC : cba
+EF : fed
+HI : ihg
+KL : LKJ
+NO : ONM
+get BC returns 0
+ values: BC : cba
+pget BC returns 0
+ values: BC : abc : cba
+get KL returns 0
+ values: KL : LKJ
+pget KL returns 0
+ values: KL : JKL : LKJ
diff --git a/libdb/test/scr016/TestClosedDb.java b/libdb/test/scr016/TestClosedDb.java
new file mode 100644
index 0000000..e4e9073
--- /dev/null
+++ b/libdb/test/scr016/TestClosedDb.java
@@ -0,0 +1,62 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * Close the Db, and make sure operations after that fail gracefully.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestClosedDb
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ // populate our massive database.
+ Dbt keydbt = new Dbt("key".getBytes());
+ Dbt datadbt = new Dbt("data".getBytes());
+ db.put(null, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt goodkeydbt = new Dbt("key".getBytes());
+ Dbt badkeydbt = new Dbt("badkey".getBytes());
+ Dbt resultdbt = new Dbt();
+ resultdbt.set_flags(Db.DB_DBT_MALLOC);
+
+ int ret;
+
+ // Close the db - subsequent operations should fail
+ // by throwing an exception.
+ db.close(0);
+ try {
+ db.get(null, goodkeydbt, resultdbt, 0);
+ System.out.println("Error - did not expect to get this far.");
+ }
+ catch (DbException dbe) {
+ System.out.println("Got expected Db Exception: " + dbe);
+ }
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+}
diff --git a/libdb/test/scr016/TestClosedDb.testout b/libdb/test/scr016/TestClosedDb.testout
new file mode 100644
index 0000000..ce13883
--- /dev/null
+++ b/libdb/test/scr016/TestClosedDb.testout
@@ -0,0 +1,2 @@
+Got expected Db Exception: com.sleepycat.db.DbException: null object: Invalid argument
+finished test
diff --git a/libdb/test/scr016/TestConstruct01.java b/libdb/test/scr016/TestConstruct01.java
new file mode 100644
index 0000000..4076960
--- /dev/null
+++ b/libdb/test/scr016/TestConstruct01.java
@@ -0,0 +1,474 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.IOException;
+import java.io.FileNotFoundException;
+
+public class TestConstruct01
+{
+ public static final String CONSTRUCT01_DBNAME = "construct01.db";
+ public static final String CONSTRUCT01_DBDIR = "/tmp";
+ public static final String CONSTRUCT01_DBFULLPATH =
+ CONSTRUCT01_DBDIR + "/" + CONSTRUCT01_DBNAME;
+
+ private int itemcount; // count the number of items in the database
+ public static boolean verbose_flag = false;
+
+ public static void ERR(String a)
+ {
+ System.out.println("FAIL: " + a);
+ System.err.println("FAIL: " + a);
+ sysexit(1);
+ }
+
+ public static void DEBUGOUT(String s)
+ {
+ System.out.println(s);
+ }
+
+ public static void VERBOSEOUT(String s)
+ {
+ if (verbose_flag)
+ System.out.println(s);
+ }
+
+ public static void sysexit(int code)
+ {
+ System.exit(code);
+ }
+
+ private static void check_file_removed(String name, boolean fatal,
+ boolean force_remove_first)
+ {
+ File f = new File(name);
+ if (force_remove_first) {
+ f.delete();
+ }
+ if (f.exists()) {
+ if (fatal)
+ System.out.print("FAIL: ");
+ System.out.print("File \"" + name + "\" still exists after run\n");
+ if (fatal)
+ sysexit(1);
+ }
+ }
+
+
+ // Check that key/data for 0 - count-1 are already present,
+ // and write a key/data for count. The key and data are
+ // both "0123...N" where N == count-1.
+ //
+ // For some reason on Windows, we need to open using the full pathname
+ // of the file when there is no environment, thus the 'has_env'
+ // variable.
+ //
+ void rundb(Db db, int count, boolean has_env, TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ String name;
+
+ if (has_env)
+ name = CONSTRUCT01_DBNAME;
+ else
+ name = CONSTRUCT01_DBFULLPATH;
+
+ db.set_error_stream(System.err);
+
+ // We don't really care about the pagesize, but we do want
+ // to make sure adjusting Db specific variables works before
+ // opening the db.
+ //
+ db.set_pagesize(1024);
+ db.open(null, name, null, Db.DB_BTREE,
+ (count != 0) ? 0 : Db.DB_CREATE, 0664);
+
+
+ // The bit map of keys we've seen
+ long bitmap = 0;
+
+ // The bit map of keys we expect to see
+ long expected = (1 << (count+1)) - 1;
+
+ byte outbuf[] = new byte[count+1];
+ int i;
+ for (i=0; i<count; i++) {
+ outbuf[i] = (byte)('0' + i);
+ //outbuf[i] = System.out.println((byte)('0' + i);
+ }
+ outbuf[i++] = (byte)'x';
+
+ /*
+ System.out.println("byte: " + ('0' + 0) + ", after: " +
+ (int)'0' + "=" + (int)('0' + 0) +
+ "," + (byte)outbuf[0]);
+ */
+
+ Dbt key = new Dbt(outbuf, 0, i);
+ Dbt data = new Dbt(outbuf, 0, i);
+
+ //DEBUGOUT("Put: " + (char)outbuf[0] + ": " + new String(outbuf));
+ db.put(null, key, data, Db.DB_NOOVERWRITE);
+
+ // Acquire a cursor for the table.
+ Dbc dbcp = db.cursor(null, 0);
+
+ // Walk through the table, checking
+ Dbt readkey = new Dbt();
+ Dbt readdata = new Dbt();
+ Dbt whoknows = new Dbt();
+
+ readkey.set_flags(options.dbt_alloc_flags);
+ readdata.set_flags(options.dbt_alloc_flags);
+
+ //DEBUGOUT("Dbc.get");
+ while (dbcp.get(readkey, readdata, Db.DB_NEXT) == 0) {
+ String key_string = new String(readkey.get_data());
+ String data_string = new String(readdata.get_data());
+ //DEBUGOUT("Got: " + key_string + ": " + data_string);
+ int len = key_string.length();
+ if (len <= 0 || key_string.charAt(len-1) != 'x') {
+ ERR("reread terminator is bad");
+ }
+ len--;
+ long bit = (1 << len);
+ if (len > count) {
+ ERR("reread length is bad: expect " + count + " got "+ len + " (" + key_string + ")" );
+ }
+ else if (!data_string.equals(key_string)) {
+ ERR("key/data don't match");
+ }
+ else if ((bitmap & bit) != 0) {
+ ERR("key already seen");
+ }
+ else if ((expected & bit) == 0) {
+ ERR("key was not expected");
+ }
+ else {
+ bitmap |= bit;
+ expected &= ~(bit);
+ for (i=0; i<len; i++) {
+ if (key_string.charAt(i) != ('0' + i)) {
+ System.out.print(" got " + key_string
+ + " (" + (int)key_string.charAt(i)
+ + "), wanted " + i
+ + " (" + (int)('0' + i)
+ + ") at position " + i + "\n");
+ ERR("key is corrupt");
+ }
+ }
+ }
+ }
+ if (expected != 0) {
+ System.out.print(" expected more keys, bitmap is: " + expected + "\n");
+ ERR("missing keys in database");
+ }
+ dbcp.close();
+ db.close(0);
+ }
+
+ void t1(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ rundb(db, itemcount++, false, options);
+ }
+
+ void t2(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ rundb(db, itemcount++, false, options);
+ // rundb(db, itemcount++, false, options);
+ // rundb(db, itemcount++, false, options);
+ }
+
+ void t3(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ // rundb(db, itemcount++, false, options);
+ db.set_errpfx("test3");
+ for (int i=0; i<100; i++)
+ db.set_errpfx("str" + i);
+ rundb(db, itemcount++, false, options);
+ }
+
+ void t4(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ DbEnv env = new DbEnv(0);
+ env.open(CONSTRUCT01_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0);
+ Db db = new Db(env, 0);
+ /**/
+ //rundb(db, itemcount++, true, options);
+ db.set_errpfx("test4");
+ rundb(db, itemcount++, true, options);
+ /**/
+ env.close(0);
+ }
+
+ void t5(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ DbEnv env = new DbEnv(0);
+ env.open(CONSTRUCT01_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0);
+ Db db = new Db(env, 0);
+ // rundb(db, itemcount++, true, options);
+ db.set_errpfx("test5");
+ rundb(db, itemcount++, true, options);
+ /*
+ env.close(0);
+
+ // reopen the environment, don't recreate
+ env.open(CONSTRUCT01_DBDIR, Db.DB_INIT_MPOOL, 0);
+ // Note we cannot reuse the old Db!
+ */
+ Db anotherdb = new Db(env, 0);
+
+ // rundb(anotherdb, itemcount++, true, options);
+ anotherdb.set_errpfx("test5");
+ rundb(anotherdb, itemcount++, true, options);
+ env.close(0);
+ }
+
+ void t6(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ DbEnv dbenv = new DbEnv(0);
+ db.close(0);
+ dbenv.close(0);
+
+ System.gc();
+ System.runFinalization();
+ }
+
+ // By design, t7 leaves a db and dbenv open; it should be detected.
+ void t7(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ DbEnv dbenv = new DbEnv(0);
+
+ System.gc();
+ System.runFinalization();
+ }
+
+ // remove any existing environment or database
+ void removeall(boolean use_db)
+ {
+ {
+ if (use_db) {
+ try {
+ /**/
+ //memory leak for this:
+ Db tmpdb = new Db(null, 0);
+ tmpdb.remove(CONSTRUCT01_DBFULLPATH, null, 0);
+ /**/
+ DbEnv tmpenv = new DbEnv(0);
+ tmpenv.remove(CONSTRUCT01_DBDIR, Db.DB_FORCE);
+ }
+ catch (DbException dbe) {
+ System.err.println("error during remove: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ //expected error:
+ // System.err.println("error during remove: " + fnfe);
+ }
+ }
+ }
+ check_file_removed(CONSTRUCT01_DBFULLPATH, true, !use_db);
+ for (int i=0; i<8; i++) {
+ String fname = "__db.00" + i;
+ check_file_removed(fname, true, !use_db);
+ }
+ }
+
+ boolean doall(TestOptions options)
+ {
+ itemcount = 0;
+ try {
+ removeall((options.testmask & 1) != 0);
+ for (int item=1; item<32; item++) {
+ if ((options.testmask & (1 << item)) != 0) {
+ VERBOSEOUT(" Running test " + item + ":");
+ switch (item) {
+ case 1:
+ t1(options);
+ break;
+ case 2:
+ t2(options);
+ break;
+ case 3:
+ t3(options);
+ break;
+ case 4:
+ t4(options);
+ break;
+ case 5:
+ t5(options);
+ break;
+ case 6:
+ t6(options);
+ break;
+ case 7:
+ t7(options);
+ break;
+ default:
+ ERR("unknown test case: " + item);
+ break;
+ }
+ VERBOSEOUT(" finished.\n");
+ }
+ }
+ removeall((options.testmask & 1) != 0);
+ options.successcounter++;
+ return true;
+ }
+ catch (DbException dbe) {
+ ERR("EXCEPTION RECEIVED: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ ERR("EXCEPTION RECEIVED: " + fnfe);
+ }
+ return false;
+ }
+
+ public static void main(String args[])
+ {
+ int iterations = 200;
+ int mask = 0x7f;
+
+ // Make sure the database file is removed before we start.
+ check_file_removed(CONSTRUCT01_DBFULLPATH, true, true);
+
+ for (int argcnt=0; argcnt<args.length; argcnt++) {
+ String arg = args[argcnt];
+ if (arg.charAt(0) == '-') {
+ // keep on lower bit, which means to remove db between tests.
+ mask = 1;
+ for (int pos=1; pos<arg.length(); pos++) {
+ char ch = arg.charAt(pos);
+ if (ch >= '0' && ch <= '9') {
+ mask |= (1 << (ch - '0'));
+ }
+ else if (ch == 'v') {
+ verbose_flag = true;
+ }
+ else {
+ ERR("Usage: construct01 [-testdigits] count");
+ }
+ }
+ VERBOSEOUT("mask = " + mask);
+
+ }
+ else {
+ try {
+ iterations = Integer.parseInt(arg);
+ if (iterations < 0) {
+ ERR("Usage: construct01 [-testdigits] count");
+ }
+ }
+ catch (NumberFormatException nfe) {
+ ERR("EXCEPTION RECEIVED: " + nfe);
+ }
+ }
+ }
+
+ // Run GC before and after the test to give
+ // a baseline for any Java memory used.
+ //
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+ long starttotal = Runtime.getRuntime().totalMemory();
+ long startfree = Runtime.getRuntime().freeMemory();
+
+ TestConstruct01 con = new TestConstruct01();
+ int[] dbt_flags = { 0, Db.DB_DBT_MALLOC, Db.DB_DBT_REALLOC };
+ String[] dbt_flags_name = { "default", "malloc", "realloc" };
+
+ TestOptions options = new TestOptions();
+ options.testmask = mask;
+
+ for (int flagiter = 0; flagiter < dbt_flags.length; flagiter++) {
+ options.dbt_alloc_flags = dbt_flags[flagiter];
+
+ VERBOSEOUT("Running with DBT alloc flags: " +
+ dbt_flags_name[flagiter]);
+ for (int i=0; i<iterations; i++) {
+ if (iterations != 0) {
+ VERBOSEOUT("(" + i + "/" + iterations + ") ");
+ }
+ VERBOSEOUT("construct01 running:");
+ if (!con.doall(options)) {
+ ERR("SOME TEST FAILED");
+ }
+ else {
+ VERBOSEOUT("\nTESTS SUCCESSFUL");
+ }
+
+ // We continually run GC during the test to keep
+ // the Java memory usage low. That way we can
+ // monitor the total memory usage externally
+ // (e.g. via ps) and verify that we aren't leaking
+ // memory in the JNI or DB layer.
+ //
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+ }
+ }
+
+ if (options.successcounter == 600) {
+ System.out.println("ALL TESTS SUCCESSFUL");
+ }
+ else {
+ System.out.println("***FAIL: " + (600 - options.successcounter) +
+ " tests did not complete");
+ }
+ long endtotal = Runtime.getRuntime().totalMemory();
+ long endfree = Runtime.getRuntime().freeMemory();
+
+ System.out.println("delta for total mem: " + magnitude(endtotal - starttotal));
+ System.out.println("delta for free mem: " + magnitude(endfree - startfree));
+
+ return;
+ }
+
+ static String magnitude(long value)
+ {
+ final long max = 10000000;
+ for (long scale = 10; scale <= max; scale *= 10) {
+ if (value < scale && value > -scale)
+ return "<" + scale;
+ }
+ return ">" + max;
+ }
+
+}
+
+class TestOptions
+{
+ int testmask = 0; // which tests to run
+ int dbt_alloc_flags = 0; // DB_DBT_* flags to use
+ int successcounter =0;
+}
+
diff --git a/libdb/test/scr016/TestConstruct01.testerr b/libdb/test/scr016/TestConstruct01.testerr
new file mode 100644
index 0000000..e69de29
diff --git a/libdb/test/scr016/TestConstruct01.testout b/libdb/test/scr016/TestConstruct01.testout
new file mode 100644
index 0000000..5d2041c
--- /dev/null
+++ b/libdb/test/scr016/TestConstruct01.testout
@@ -0,0 +1,3 @@
+ALL TESTS SUCCESSFUL
+delta for total mem: <10
+delta for free mem: <10000
diff --git a/libdb/test/scr016/TestConstruct02.java b/libdb/test/scr016/TestConstruct02.java
new file mode 100644
index 0000000..43818a8
--- /dev/null
+++ b/libdb/test/scr016/TestConstruct02.java
@@ -0,0 +1,326 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+package com.sleepycat.test;
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.IOException;
+import java.io.FileNotFoundException;
+
+public class TestConstruct02
+{
+ public static final String CONSTRUCT02_DBNAME = "construct02.db";
+ public static final String CONSTRUCT02_DBDIR = "./";
+ public static final String CONSTRUCT02_DBFULLPATH =
+ CONSTRUCT02_DBDIR + "/" + CONSTRUCT02_DBNAME;
+
+ private int itemcount; // count the number of items in the database
+ public static boolean verbose_flag = false;
+
+ private DbEnv dbenv = new DbEnv(0);
+
+ public TestConstruct02()
+ throws DbException, FileNotFoundException
+ {
+ dbenv.open(CONSTRUCT02_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0666);
+ }
+
+ public void close()
+ {
+ try {
+ dbenv.close(0);
+ removeall(true, true);
+ }
+ catch (DbException dbe) {
+ ERR("DbException: " + dbe);
+ }
+ }
+
+ public static void ERR(String a)
+ {
+ System.out.println("FAIL: " + a);
+ sysexit(1);
+ }
+
+ public static void DEBUGOUT(String s)
+ {
+ System.out.println(s);
+ }
+
+ public static void VERBOSEOUT(String s)
+ {
+ if (verbose_flag)
+ System.out.println(s);
+ }
+
+ public static void sysexit(int code)
+ {
+ System.exit(code);
+ }
+
+ private static void check_file_removed(String name, boolean fatal,
+ boolean force_remove_first)
+ {
+ File f = new File(name);
+ if (force_remove_first) {
+ f.delete();
+ }
+ if (f.exists()) {
+ if (fatal)
+ System.out.print("FAIL: ");
+ System.out.print("File \"" + name + "\" still exists after run\n");
+ if (fatal)
+ sysexit(1);
+ }
+ }
+
+
+ // Check that key/data for 0 - count-1 are already present,
+ // and write a key/data for count. The key and data are
+ // both "0123...N" where N == count-1.
+ //
+ void rundb(Db db, int count)
+ throws DbException, FileNotFoundException
+ {
+ if (count >= 64)
+ throw new IllegalArgumentException("rundb count arg >= 64");
+
+ // The bit map of keys we've seen
+ long bitmap = 0;
+
+ // The bit map of keys we expect to see
+ long expected = (1 << (count+1)) - 1;
+
+ byte outbuf[] = new byte[count+1];
+ int i;
+ for (i=0; i<count; i++) {
+ outbuf[i] = (byte)('0' + i);
+ }
+ outbuf[i++] = (byte)'x';
+
+ Dbt key = new Dbt(outbuf, 0, i);
+ Dbt data = new Dbt(outbuf, 0, i);
+
+ db.put(null, key, data, Db.DB_NOOVERWRITE);
+
+ // Acquire a cursor for the table.
+ Dbc dbcp = db.cursor(null, 0);
+
+ // Walk through the table, checking
+ Dbt readkey = new Dbt();
+ Dbt readdata = new Dbt();
+ Dbt whoknows = new Dbt();
+
+ readkey.set_flags(Db.DB_DBT_MALLOC);
+ readdata.set_flags(Db.DB_DBT_MALLOC);
+
+ while (dbcp.get(readkey, readdata, Db.DB_NEXT) == 0) {
+ byte[] key_bytes = readkey.get_data();
+ byte[] data_bytes = readdata.get_data();
+
+ int len = key_bytes.length;
+ if (len != data_bytes.length) {
+ ERR("key and data are different");
+ }
+ for (i=0; i<len-1; i++) {
+ byte want = (byte)('0' + i);
+ if (key_bytes[i] != want || data_bytes[i] != want) {
+ System.out.println(" got " + new String(key_bytes) +
+ "/" + new String(data_bytes));
+ ERR("key or data is corrupt");
+ }
+ }
+ if (len <= 0 ||
+ key_bytes[len-1] != (byte)'x' ||
+ data_bytes[len-1] != (byte)'x') {
+ ERR("reread terminator is bad");
+ }
+ len--;
+ long bit = (1 << len);
+ if (len > count) {
+ ERR("reread length is bad: expect " + count + " got "+ len);
+ }
+ else if ((bitmap & bit) != 0) {
+ ERR("key already seen");
+ }
+ else if ((expected & bit) == 0) {
+ ERR("key was not expected");
+ }
+ bitmap |= bit;
+ expected &= ~(bit);
+ }
+ if (expected != 0) {
+ System.out.print(" expected more keys, bitmap is: " +
+ expected + "\n");
+ ERR("missing keys in database");
+ }
+ dbcp.close();
+ }
+
+ void t1()
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(dbenv, 0);
+ db.set_error_stream(System.err);
+ db.set_pagesize(1024);
+ db.open(null, CONSTRUCT02_DBNAME, null, Db.DB_BTREE,
+ Db.DB_CREATE, 0664);
+
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ db.close(0);
+
+ // Reopen no longer allowed, so we create a new db.
+ db = new Db(dbenv, 0);
+ db.set_error_stream(System.err);
+ db.set_pagesize(1024);
+ db.open(null, CONSTRUCT02_DBNAME, null, Db.DB_BTREE,
+ Db.DB_CREATE, 0664);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ db.close(0);
+ }
+
+ // remove any existing environment or database
+ void removeall(boolean use_db, boolean remove_env)
+ {
+ {
+ try {
+ if (remove_env) {
+ DbEnv tmpenv = new DbEnv(0);
+ tmpenv.remove(CONSTRUCT02_DBDIR, Db.DB_FORCE);
+ }
+ else if (use_db) {
+ /**/
+ //memory leak for this:
+ Db tmpdb = new Db(null, 0);
+ tmpdb.remove(CONSTRUCT02_DBFULLPATH, null, 0);
+ /**/
+ }
+ }
+ catch (DbException dbe) {
+ System.err.println("error during remove: " + dbe);
+ }
+ catch (FileNotFoundException dbe) {
+ System.err.println("error during remove: " + dbe);
+ }
+ }
+ check_file_removed(CONSTRUCT02_DBFULLPATH, true, !use_db);
+ if (remove_env) {
+ for (int i=0; i<8; i++) {
+ String fname = "__db.00" + i;
+ check_file_removed(fname, true, !use_db);
+ }
+ }
+ }
+
+ boolean doall()
+ {
+ itemcount = 0;
+ try {
+ VERBOSEOUT(" Running test 1:\n");
+ t1();
+ VERBOSEOUT(" finished.\n");
+ removeall(true, false);
+ return true;
+ }
+ catch (DbException dbe) {
+ ERR("EXCEPTION RECEIVED: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ ERR("EXCEPTION RECEIVED: " + fnfe);
+ }
+ return false;
+ }
+
+ public static void main(String args[])
+ {
+ int iterations = 200;
+
+ for (int argcnt=0; argcnt<args.length; argcnt++) {
+ String arg = args[argcnt];
+ try {
+ iterations = Integer.parseInt(arg);
+ if (iterations < 0) {
+ ERR("Usage: construct02 [-testdigits] count");
+ }
+ }
+ catch (NumberFormatException nfe) {
+ ERR("EXCEPTION RECEIVED: " + nfe);
+ }
+ }
+
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+ long starttotal = Runtime.getRuntime().totalMemory();
+ long startfree = Runtime.getRuntime().freeMemory();
+ TestConstruct02 con = null;
+
+ try {
+ con = new TestConstruct02();
+ }
+ catch (DbException dbe) {
+ System.err.println("Exception: " + dbe);
+ System.exit(1);
+ }
+ catch (java.io.FileNotFoundException fnfe) {
+ System.err.println("Exception: " + fnfe);
+ System.exit(1);
+ }
+
+ for (int i=0; i<iterations; i++) {
+ if (iterations != 0) {
+ VERBOSEOUT("(" + i + "/" + iterations + ") ");
+ }
+ VERBOSEOUT("construct02 running:\n");
+ if (!con.doall()) {
+ ERR("SOME TEST FAILED");
+ }
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+
+ }
+ con.close();
+
+ System.out.print("ALL TESTS SUCCESSFUL\n");
+
+ long endtotal = Runtime.getRuntime().totalMemory();
+ long endfree = Runtime.getRuntime().freeMemory();
+
+ System.out.println("delta for total mem: " + magnitude(endtotal - starttotal));
+ System.out.println("delta for free mem: " + magnitude(endfree - startfree));
+
+ return;
+ }
+
+ static String magnitude(long value)
+ {
+ final long max = 10000000;
+ for (long scale = 10; scale <= max; scale *= 10) {
+ if (value < scale && value > -scale)
+ return "<" + scale;
+ }
+ return ">" + max;
+ }
+}
diff --git a/libdb/test/scr016/TestConstruct02.testout b/libdb/test/scr016/TestConstruct02.testout
new file mode 100644
index 0000000..5d2041c
--- /dev/null
+++ b/libdb/test/scr016/TestConstruct02.testout
@@ -0,0 +1,3 @@
+ALL TESTS SUCCESSFUL
+delta for total mem: <10
+delta for free mem: <10000
diff --git a/libdb/test/scr016/TestDbtFlags.java b/libdb/test/scr016/TestDbtFlags.java
new file mode 100644
index 0000000..3e89bf7
--- /dev/null
+++ b/libdb/test/scr016/TestDbtFlags.java
@@ -0,0 +1,241 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestDbtFlags
+{
+ private static final String FileName = "access.db";
+ private int flag_value;
+ private int buf_size;
+ private int cur_input_line = 0;
+
+ /*zippy quotes for test input*/
+ static final String[] input_lines = {
+ "If we shadows have offended",
+ "Think but this, and all is mended",
+ "That you have but slumber'd here",
+ "While these visions did appear",
+ "And this weak and idle theme",
+ "No more yielding but a dream",
+ "Gentles, do not reprehend",
+ "if you pardon, we will mend",
+ "And, as I am an honest Puck, if we have unearned luck",
+ "Now to 'scape the serpent's tongue, we will make amends ere long;",
+ "Else the Puck a liar call; so, good night unto you all.",
+ "Give me your hands, if we be friends, and Robin shall restore amends."
+ };
+
+ public TestDbtFlags(int flag_value, int buf_size)
+ {
+ this.flag_value = flag_value;
+ this.buf_size = buf_size;
+ }
+
+ public static void runWithFlags(int flag_value, int size)
+ {
+ String msg = "=-=-=-= Test with DBT flags " + flag_value +
+ " bufsize " + size;
+ System.out.println(msg);
+ System.err.println(msg);
+
+ try
+ {
+ TestDbtFlags app = new TestDbtFlags(flag_value, size);
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestDbtFlags: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestDbtFlags: " + fnfe.toString());
+ System.exit(1);
+ }
+ }
+
+ public static void main(String argv[])
+ {
+ runWithFlags(Db.DB_DBT_MALLOC, -1);
+ runWithFlags(Db.DB_DBT_REALLOC, -1);
+ runWithFlags(Db.DB_DBT_USERMEM, 20);
+ runWithFlags(Db.DB_DBT_USERMEM, 50);
+ runWithFlags(Db.DB_DBT_USERMEM, 200);
+ runWithFlags(0, -1);
+
+ System.exit(0);
+ }
+
+ String get_input_line()
+ {
+ if (cur_input_line >= input_lines.length)
+ return null;
+ return input_lines[cur_input_line++];
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestDbtFlags");
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ for (;;) {
+ //System.err.println("input line " + cur_input_line);
+ String line = get_input_line();
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line, flag_value);
+ StringDbt data = new StringDbt(reversed, flag_value);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ key.check_flags();
+ data.check_flags();
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt(flag_value, buf_size);
+ StringDbt data = new StringDbt(flag_value, buf_size);
+
+ int iteration_count = 0;
+ int dbreturn = 0;
+
+ while (dbreturn == 0) {
+ //System.err.println("iteration " + iteration_count);
+ try {
+ if ((dbreturn = iterator.get(key, data, Db.DB_NEXT)) == 0) {
+ System.out.println(key.get_string() + " : " + data.get_string());
+ }
+ }
+ catch (DbMemoryException dme) {
+ /* In a real application, we'd normally increase
+ * the size of the buffer. Since we've created
+ * this error condition for testing, we'll just report it.
+ * We still need to skip over this record, and we don't
+ * want to mess with our original Dbt's, since we want
+ * to see more errors. So create some temporary
+ * mallocing Dbts to get this record.
+ */
+ System.err.println("exception, iteration " + iteration_count +
+ ": " + dme);
+ System.err.println(" key size: " + key.get_size() +
+ " ulen: " + key.get_ulen());
+ System.err.println(" data size: " + key.get_size() +
+ " ulen: " + key.get_ulen());
+
+ dme.get_dbt().set_size(buf_size);
+ StringDbt tempkey = new StringDbt(Db.DB_DBT_MALLOC, -1);
+ StringDbt tempdata = new StringDbt(Db.DB_DBT_MALLOC, -1);
+ if ((dbreturn = iterator.get(tempkey, tempdata, Db.DB_NEXT)) != 0) {
+ System.err.println("cannot get expected next record");
+ return;
+ }
+ System.out.println(tempkey.get_string() + " : " +
+ tempdata.get_string());
+ }
+ iteration_count++;
+ }
+ key.check_flags();
+ data.check_flags();
+
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ int saved_flags;
+
+ StringDbt(int flags, int buf_size)
+ {
+ this.saved_flags = flags;
+ set_flags(saved_flags);
+ if (buf_size != -1) {
+ set_data(new byte[buf_size]);
+ set_ulen(buf_size);
+ }
+ }
+
+ StringDbt(String value, int flags)
+ {
+ this.saved_flags = flags;
+ set_flags(saved_flags);
+ set_string(value);
+ }
+
+ void set_string(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ check_flags();
+ }
+
+ String get_string()
+ {
+ check_flags();
+ return new String(get_data(), 0, get_size());
+ }
+
+ void check_flags()
+ {
+ int actual_flags = get_flags();
+ if (actual_flags != saved_flags) {
+ System.err.println("flags botch: expected " + saved_flags +
+ ", got " + actual_flags);
+ }
+ }
+ }
+}
diff --git a/libdb/test/scr016/TestDbtFlags.testerr b/libdb/test/scr016/TestDbtFlags.testerr
new file mode 100644
index 0000000..7666868
--- /dev/null
+++ b/libdb/test/scr016/TestDbtFlags.testerr
@@ -0,0 +1,54 @@
+=-=-=-= Test with DBT flags 4 bufsize -1
+=-=-=-= Test with DBT flags 16 bufsize -1
+=-=-=-= Test with DBT flags 32 bufsize 20
+exception, iteration 0: Dbt not large enough for available data
+ key size: 28 ulen: 20
+ data size: 28 ulen: 20
+exception, iteration 1: Dbt not large enough for available data
+ key size: 53 ulen: 20
+ data size: 53 ulen: 20
+exception, iteration 2: Dbt not large enough for available data
+ key size: 55 ulen: 20
+ data size: 55 ulen: 20
+exception, iteration 3: Dbt not large enough for available data
+ key size: 25 ulen: 20
+ data size: 25 ulen: 20
+exception, iteration 4: Dbt not large enough for available data
+ key size: 69 ulen: 20
+ data size: 69 ulen: 20
+exception, iteration 5: Dbt not large enough for available data
+ key size: 27 ulen: 20
+ data size: 27 ulen: 20
+exception, iteration 6: Dbt not large enough for available data
+ key size: 28 ulen: 20
+ data size: 28 ulen: 20
+exception, iteration 7: Dbt not large enough for available data
+ key size: 65 ulen: 20
+ data size: 65 ulen: 20
+exception, iteration 8: Dbt not large enough for available data
+ key size: 32 ulen: 20
+ data size: 32 ulen: 20
+exception, iteration 9: Dbt not large enough for available data
+ key size: 33 ulen: 20
+ data size: 33 ulen: 20
+exception, iteration 10: Dbt not large enough for available data
+ key size: 30 ulen: 20
+ data size: 30 ulen: 20
+exception, iteration 11: Dbt not large enough for available data
+ key size: 27 ulen: 20
+ data size: 27 ulen: 20
+=-=-=-= Test with DBT flags 32 bufsize 50
+exception, iteration 1: Dbt not large enough for available data
+ key size: 53 ulen: 50
+ data size: 53 ulen: 50
+exception, iteration 2: Dbt not large enough for available data
+ key size: 55 ulen: 50
+ data size: 55 ulen: 50
+exception, iteration 4: Dbt not large enough for available data
+ key size: 69 ulen: 50
+ data size: 69 ulen: 50
+exception, iteration 7: Dbt not large enough for available data
+ key size: 65 ulen: 50
+ data size: 65 ulen: 50
+=-=-=-= Test with DBT flags 32 bufsize 200
+=-=-=-= Test with DBT flags 0 bufsize -1
diff --git a/libdb/test/scr016/TestDbtFlags.testout b/libdb/test/scr016/TestDbtFlags.testout
new file mode 100644
index 0000000..b8deb1b
--- /dev/null
+++ b/libdb/test/scr016/TestDbtFlags.testout
@@ -0,0 +1,78 @@
+=-=-=-= Test with DBT flags 4 bufsize -1
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 16 bufsize -1
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 32 bufsize 20
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 32 bufsize 50
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 32 bufsize 200
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 0 bufsize -1
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
diff --git a/libdb/test/scr016/TestGetSetMethods.java b/libdb/test/scr016/TestGetSetMethods.java
new file mode 100644
index 0000000..7c588a2
--- /dev/null
+++ b/libdb/test/scr016/TestGetSetMethods.java
@@ -0,0 +1,99 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * Do some regression tests for simple get/set access methods
+ * on DbEnv, DbTxn, Db. We don't currently test that they have
+ * the desired effect, only that they operate and return correctly.
+ */
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestGetSetMethods
+{
+ public void testMethods()
+ throws DbException, FileNotFoundException
+ {
+ DbEnv dbenv = new DbEnv(0);
+ DbTxn dbtxn;
+ byte[][] conflicts = new byte[10][10];
+
+ dbenv.set_timeout(0x90000000,
+ Db.DB_SET_LOCK_TIMEOUT);
+ dbenv.set_lg_bsize(0x1000);
+ dbenv.set_lg_dir(".");
+ dbenv.set_lg_max(0x10000000);
+ dbenv.set_lg_regionmax(0x100000);
+ dbenv.set_lk_conflicts(conflicts);
+ dbenv.set_lk_detect(Db.DB_LOCK_DEFAULT);
+ // exists, but is deprecated:
+ // dbenv.set_lk_max(0);
+ dbenv.set_lk_max_lockers(100);
+ dbenv.set_lk_max_locks(10);
+ dbenv.set_lk_max_objects(1000);
+ dbenv.set_mp_mmapsize(0x10000);
+ dbenv.set_tas_spins(1000);
+
+ // Need to open the environment so we
+ // can get a transaction.
+ //
+ dbenv.open(".", Db.DB_CREATE | Db.DB_INIT_TXN |
+ Db.DB_INIT_LOCK | Db.DB_INIT_LOG |
+ Db.DB_INIT_MPOOL,
+ 0644);
+
+ dbtxn = dbenv.txn_begin(null, Db.DB_TXN_NOWAIT);
+ dbtxn.set_timeout(0xA0000000, Db.DB_SET_TXN_TIMEOUT);
+ dbtxn.abort();
+
+ dbenv.close(0);
+
+ // We get a db, one for each type.
+ // That's because once we call (for instance)
+ // set_bt_maxkey, DB 'knows' that this is a
+ // Btree Db, and it cannot be used to try Hash
+ // or Recno functions.
+ //
+ Db db_bt = new Db(null, 0);
+ db_bt.set_bt_maxkey(10000);
+ db_bt.set_bt_minkey(100);
+ db_bt.set_cachesize(0, 0x100000, 0);
+ db_bt.close(0);
+
+ Db db_h = new Db(null, 0);
+ db_h.set_h_ffactor(0x10);
+ db_h.set_h_nelem(100);
+ db_h.set_lorder(0);
+ db_h.set_pagesize(0x10000);
+ db_h.close(0);
+
+ Db db_re = new Db(null, 0);
+ db_re.set_re_delim('@');
+ db_re.set_re_pad(10);
+ db_re.set_re_source("re.in");
+ db_re.close(0);
+
+ Db db_q = new Db(null, 0);
+ db_q.set_q_extentsize(200);
+ db_q.close(0);
+ }
+
+ public static void main(String[] args)
+ {
+ try {
+ TestGetSetMethods tester = new TestGetSetMethods();
+ tester.testMethods();
+ }
+ catch (Exception e) {
+ System.err.println("TestGetSetMethods: Exception: " + e);
+ }
+ }
+}
diff --git a/libdb/test/scr016/TestKeyRange.java b/libdb/test/scr016/TestKeyRange.java
new file mode 100644
index 0000000..cfd71de
--- /dev/null
+++ b/libdb/test/scr016/TestKeyRange.java
@@ -0,0 +1,203 @@
+/*NOTE: TestKeyRange is AccessExample changed to test Db.key_range.
+ * See comments with ADDED for specific areas of change.
+ */
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.StringReader;
+import java.io.Reader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestKeyRange
+{
+ private static final String FileName = "access.db";
+
+ public TestKeyRange()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestKeyRange\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestKeyRange app = new TestKeyRange();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestKeyRange: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestKeyRange: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(Reader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(Reader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestKeyRange");
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ Reader reader = new StringReader("abc\nmiddle\nzend\nmoremiddle\nZED\nMAMAMIA");
+
+ int count= 0;/*ADDED*/
+ for (;;) {
+ String line = askForLine(reader, System.out, "input>");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null, key, data, 0)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+
+ /*START ADDED*/
+ {
+ if (count++ > 0) {
+ DbKeyRange range = new DbKeyRange();
+ table.key_range(null, key, range, 0);
+ System.out.println("less: " + range.less);
+ System.out.println("equal: " + range.equal);
+ System.out.println("greater: " + range.greater);
+ }
+ }
+ /*END ADDED*/
+
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+}
diff --git a/libdb/test/scr016/TestKeyRange.testout b/libdb/test/scr016/TestKeyRange.testout
new file mode 100644
index 0000000..c265f32
--- /dev/null
+++ b/libdb/test/scr016/TestKeyRange.testout
@@ -0,0 +1,27 @@
+input>
+input>
+less: 0.5
+equal: 0.5
+greater: 0.0
+input>
+less: 0.6666666666666666
+equal: 0.3333333333333333
+greater: 0.0
+input>
+less: 0.5
+equal: 0.25
+greater: 0.25
+input>
+less: 0.0
+equal: 0.2
+greater: 0.8
+input>
+less: 0.0
+equal: 0.16666666666666666
+greater: 0.8333333333333334
+input>MAMAMIA : AIMAMAM
+ZED : DEZ
+abc : cba
+middle : elddim
+moremiddle : elddimerom
+zend : dnez
diff --git a/libdb/test/scr016/TestLockVec.java b/libdb/test/scr016/TestLockVec.java
new file mode 100644
index 0000000..6dead0d
--- /dev/null
+++ b/libdb/test/scr016/TestLockVec.java
@@ -0,0 +1,249 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * test of DbEnv.lock_vec()
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestLockVec
+{
+ public static int locker1;
+ public static int locker2;
+
+ public static void gdb_pause()
+ {
+ try {
+ System.err.println("attach gdb and type return...");
+ System.in.read(new byte[10]);
+ }
+ catch (java.io.IOException ie) {
+ }
+ }
+
+ public static void main(String[] args)
+ {
+ try {
+ DbEnv dbenv1 = new DbEnv(0);
+ DbEnv dbenv2 = new DbEnv(0);
+ dbenv1.open(".",
+ Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL, 0);
+ dbenv2.open(".",
+ Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL, 0);
+ locker1 = dbenv1.lock_id();
+ locker2 = dbenv1.lock_id();
+ Db db1 = new Db(dbenv1, 0);
+ db1.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0);
+ Db db2 = new Db(dbenv2, 0);
+ db2.open(null, "my.db", null, Db.DB_BTREE, 0, 0);
+
+ // populate our database, just two elements.
+ Dbt Akey = new Dbt("A".getBytes());
+ Dbt Adata = new Dbt("Adata".getBytes());
+ Dbt Bkey = new Dbt("B".getBytes());
+ Dbt Bdata = new Dbt("Bdata".getBytes());
+
+ // We don't allow Dbts to be reused within the
+ // same method call, so we need some duplicates.
+ Dbt Akeyagain = new Dbt("A".getBytes());
+ Dbt Bkeyagain = new Dbt("B".getBytes());
+
+ db1.put(null, Akey, Adata, 0);
+ db1.put(null, Bkey, Bdata, 0);
+
+ Dbt notInDatabase = new Dbt("C".getBytes());
+
+ /* make sure our check mechanisms work */
+ int expectedErrs = 0;
+
+ lock_check_free(dbenv2, Akey);
+ try {
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+ }
+ catch (DbException dbe1) {
+ expectedErrs += 1;
+ }
+ DbLock tmplock = dbenv1.lock_get(locker1, Db.DB_LOCK_NOWAIT,
+ Akey, Db.DB_LOCK_READ);
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_READ);
+ try {
+ lock_check_free(dbenv2, Akey);
+ }
+ catch (DbException dbe2) {
+ expectedErrs += 2;
+ }
+ if (expectedErrs != 1+2) {
+ System.err.println("lock check mechanism is broken");
+ System.exit(1);
+ }
+ dbenv1.lock_put(tmplock);
+
+ /* Now on with the test, a series of lock_vec requests,
+ * with checks between each call.
+ */
+
+ System.out.println("get a few");
+ /* Request: get A(W), B(R), B(R) */
+ DbLockRequest[] reqs = new DbLockRequest[3];
+
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_WRITE,
+ Akey, null);
+ reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkey, null);
+ reqs[2] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkeyagain, null);
+
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 3);
+
+ /* Locks held: A(W), B(R), B(R) */
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_WRITE);
+
+ System.out.println("put a couple");
+ /* Request: put A, B(first) */
+ reqs[0].set_op(Db.DB_LOCK_PUT);
+ reqs[1].set_op(Db.DB_LOCK_PUT);
+
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 2);
+
+ /* Locks held: B(R) */
+ lock_check_free(dbenv2, Akey);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("put one more, test index offset");
+ /* Request: put B(second) */
+ reqs[2].set_op(Db.DB_LOCK_PUT);
+
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 2, 1);
+
+ /* Locks held: <none> */
+ lock_check_free(dbenv2, Akey);
+ lock_check_free(dbenv2, Bkey);
+
+ System.out.println("get a few");
+ /* Request: get A(R), A(R), B(R) */
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Akey, null);
+ reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Akeyagain, null);
+ reqs[2] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkey, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 3);
+
+ /* Locks held: A(R), B(R), B(R) */
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_READ);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("try putobj");
+ /* Request: get B(R), putobj A */
+ reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkey, null);
+ reqs[2] = new DbLockRequest(Db.DB_LOCK_PUT_OBJ, 0,
+ Akey, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 1, 2);
+
+ /* Locks held: B(R), B(R) */
+ lock_check_free(dbenv2, Akey);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("get one more");
+ /* Request: get A(W) */
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_WRITE,
+ Akey, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 1);
+
+ /* Locks held: A(W), B(R), B(R) */
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_WRITE);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("putall");
+ /* Request: putall */
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_PUT_ALL, 0,
+ null, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 1);
+
+ lock_check_free(dbenv2, Akey);
+ lock_check_free(dbenv2, Bkey);
+ db1.close(0);
+ dbenv1.close(0);
+ db2.close(0);
+ dbenv2.close(0);
+ System.out.println("done");
+ }
+ catch (DbLockNotGrantedException nge) {
+ System.err.println("Db Exception: " + nge);
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+ /* Verify that the lock is free, throw an exception if not.
+ * We do this by trying to grab a write lock (no wait).
+ */
+ static void lock_check_free(DbEnv dbenv, Dbt dbt)
+ throws DbException
+ {
+ DbLock tmplock = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_WRITE);
+ dbenv.lock_put(tmplock);
+ }
+
+ /* Verify that the lock is held with the mode, throw an exception if not.
+ * If we have a write lock, we should not be able to get the lock
+ * for reading. If we have a read lock, we should be able to get
+ * it for reading, but not writing.
+ */
+ static void lock_check_held(DbEnv dbenv, Dbt dbt, int mode)
+ throws DbException
+ {
+ DbLock never = null;
+
+ try {
+ if (mode == Db.DB_LOCK_WRITE) {
+ never = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_READ);
+ }
+ else if (mode == Db.DB_LOCK_READ) {
+ DbLock rlock = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_READ);
+ dbenv.lock_put(rlock);
+ never = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_WRITE);
+ }
+ else {
+ throw new DbException("lock_check_held bad mode");
+ }
+ }
+ catch (DbLockNotGrantedException nge) {
+ /* We expect this on our last lock_get call */
+ }
+
+ /* make sure we failed */
+ if (never != null) {
+ try {
+ dbenv.lock_put(never);
+ }
+ catch (DbException dbe2) {
+ System.err.println("Got some real troubles now");
+ System.exit(1);
+ }
+ throw new DbException("lock_check_held: lock was not held");
+ }
+ }
+
+}
diff --git a/libdb/test/scr016/TestLockVec.testout b/libdb/test/scr016/TestLockVec.testout
new file mode 100644
index 0000000..1cf16c6
--- /dev/null
+++ b/libdb/test/scr016/TestLockVec.testout
@@ -0,0 +1,8 @@
+get a few
+put a couple
+put one more, test index offset
+get a few
+try putobj
+get one more
+putall
+done
diff --git a/libdb/test/scr016/TestLogc.java b/libdb/test/scr016/TestLogc.java
new file mode 100644
index 0000000..16704ac
--- /dev/null
+++ b/libdb/test/scr016/TestLogc.java
@@ -0,0 +1,100 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * A basic regression test for the Logc class.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestLogc
+{
+ public static void main(String[] args)
+ {
+ try {
+ DbEnv env = new DbEnv(0);
+ env.open(".", Db.DB_CREATE | Db.DB_INIT_LOG | Db.DB_INIT_MPOOL, 0);
+
+ // Do some database activity to get something into the log.
+ Db db1 = new Db(env, 0);
+ db1.open(null, "first.db", null, Db.DB_BTREE, Db.DB_CREATE, 0);
+ db1.put(null, new Dbt("a".getBytes()), new Dbt("b".getBytes()), 0);
+ db1.put(null, new Dbt("c".getBytes()), new Dbt("d".getBytes()), 0);
+ db1.close(0);
+
+ Db db2 = new Db(env, 0);
+ db2.open(null, "second.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ db2.put(null, new Dbt("w".getBytes()), new Dbt("x".getBytes()), 0);
+ db2.put(null, new Dbt("y".getBytes()), new Dbt("z".getBytes()), 0);
+ db2.close(0);
+
+ // Now get a log cursor and walk through.
+ DbLogc logc = env.log_cursor(0);
+
+ int ret = 0;
+ DbLsn lsn = new DbLsn();
+ Dbt dbt = new Dbt();
+ int flags = Db.DB_FIRST;
+
+ int count = 0;
+ while ((ret = logc.get(lsn, dbt, flags)) == 0) {
+
+ // We ignore the contents of the log record,
+ // it's not portable. Even the exact count
+ // is may change when the underlying implementation
+ // changes, we'll just make sure at the end we saw
+ // 'enough'.
+ //
+ // System.out.println("logc.get: " + count);
+ // System.out.println(showDbt(dbt));
+ //
+ count++;
+ flags = Db.DB_NEXT;
+ }
+ if (ret != Db.DB_NOTFOUND) {
+ System.err.println("*** FAIL: logc.get returned: " +
+ DbEnv.strerror(ret));
+ }
+ logc.close(0);
+
+ // There has to be at *least* four log records,
+ // since we did four separate database operations.
+ //
+ if (count < 4)
+ System.out.println("*** FAIL: not enough log records");
+
+ System.out.println("TestLogc done.");
+ }
+ catch (DbException dbe) {
+ System.err.println("*** FAIL: Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("*** FAIL: FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+ public static String showDbt(Dbt dbt)
+ {
+ StringBuffer sb = new StringBuffer();
+ int size = dbt.get_size();
+ byte[] data = dbt.get_data();
+ int i;
+ for (i=0; i<size && i<10; i++) {
+ sb.append(Byte.toString(data[i]));
+ sb.append(' ');
+ }
+ if (i<size)
+ sb.append("...");
+ return "size: " + size + " data: " + sb.toString();
+ }
+}
diff --git a/libdb/test/scr016/TestLogc.testout b/libdb/test/scr016/TestLogc.testout
new file mode 100644
index 0000000..afac3af
--- /dev/null
+++ b/libdb/test/scr016/TestLogc.testout
@@ -0,0 +1 @@
+TestLogc done.
diff --git a/libdb/test/scr016/TestOpenEmpty.java b/libdb/test/scr016/TestOpenEmpty.java
new file mode 100644
index 0000000..626b9f0
--- /dev/null
+++ b/libdb/test/scr016/TestOpenEmpty.java
@@ -0,0 +1,189 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestOpenEmpty
+{
+ private static final String FileName = "access.db";
+
+ public TestOpenEmpty()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestOpenEmpty\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestOpenEmpty app = new TestOpenEmpty();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestOpenEmpty: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestOpenEmpty: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(InputStreamReader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(InputStreamReader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ try { (new java.io.FileOutputStream(FileName)).close(); }
+ catch (IOException ioe) { }
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestOpenEmpty");
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ InputStreamReader reader = new InputStreamReader(System.in);
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+}
diff --git a/libdb/test/scr016/TestOpenEmpty.testerr b/libdb/test/scr016/TestOpenEmpty.testerr
new file mode 100644
index 0000000..dd3e01c
--- /dev/null
+++ b/libdb/test/scr016/TestOpenEmpty.testerr
@@ -0,0 +1,2 @@
+TestOpenEmpty: access.db: unexpected file type or format
+TestOpenEmpty: com.sleepycat.db.DbException: Invalid argument: Invalid argument
diff --git a/libdb/test/scr016/TestReplication.java b/libdb/test/scr016/TestReplication.java
new file mode 100644
index 0000000..533fe72
--- /dev/null
+++ b/libdb/test/scr016/TestReplication.java
@@ -0,0 +1,289 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * Simple test of replication, merely to exercise the individual
+ * methods in the API. Rather than use TCP/IP, our transport
+ * mechanism is just an ArrayList of byte arrays.
+ * It's managed like a queue, and synchronization is via
+ * the ArrayList object itself and java's wait/notify.
+ * It's not terribly extensible, but it's fine for a small test.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.Vector;
+
+public class TestReplication extends Thread
+ implements DbRepTransport
+{
+ public static final String MASTER_ENVDIR = "./master";
+ public static final String CLIENT_ENVDIR = "./client";
+
+ private Vector queue = new Vector();
+ private DbEnv master_env;
+ private DbEnv client_env;
+
+ private static void mkdir(String name)
+ throws IOException
+ {
+ (new File(name)).mkdir();
+ }
+
+
+ // The client thread runs this
+ public void run()
+ {
+ try {
+ System.err.println("c10");
+ client_env = new DbEnv(0);
+ System.err.println("c11");
+ client_env.set_rep_transport(1, this);
+ System.err.println("c12");
+ client_env.open(CLIENT_ENVDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0);
+ System.err.println("c13");
+ Dbt myid = new Dbt("master01".getBytes());
+ System.err.println("c14");
+ client_env.rep_start(myid, Db.DB_REP_CLIENT);
+ System.err.println("c15");
+ DbEnv.RepProcessMessage processMsg = new DbEnv.RepProcessMessage();
+ processMsg.envid = 2;
+ System.err.println("c20");
+ boolean running = true;
+
+ Dbt control = new Dbt();
+ Dbt rec = new Dbt();
+
+ while (running) {
+ int msgtype = 0;
+
+ System.err.println("c30");
+ synchronized (queue) {
+ if (queue.size() == 0) {
+ System.err.println("c40");
+ sleepShort();
+ }
+ else {
+ msgtype = ((Integer)queue.firstElement()).intValue();
+ queue.removeElementAt(0);
+ byte[] data;
+
+ System.err.println("c50 " + msgtype);
+
+ switch (msgtype) {
+ case -1:
+ running = false;
+ break;
+ case 1:
+ data = (byte[])queue.firstElement();
+ queue.removeElementAt(0);
+ control.set_data(data);
+ control.set_size(data.length);
+ break;
+ case 2:
+ control.set_data(null);
+ control.set_size(0);
+ break;
+ case 3:
+ data = (byte[])queue.firstElement();
+ queue.removeElementAt(0);
+ rec.set_data(data);
+ rec.set_size(data.length);
+ break;
+ case 4:
+ rec.set_data(null);
+ rec.set_size(0);
+ break;
+ }
+
+ }
+ }
+ System.err.println("c60");
+ if (msgtype == 3 || msgtype == 4) {
+ System.out.println("cLIENT: Got message");
+ client_env.rep_process_message(control, rec,
+ processMsg);
+ }
+ }
+ System.err.println("c70");
+ Db db = new Db(client_env, 0);
+ db.open(null, "x.db", null, Db.DB_BTREE, 0, 0);
+ Dbt data = new Dbt();
+ System.err.println("c80");
+ db.get(null, new Dbt("Hello".getBytes()), data, 0);
+ System.err.println("c90");
+ System.out.println("Hello " + new String(data.get_data(), data.get_offset(), data.get_size()));
+ System.err.println("c95");
+ client_env.close(0);
+ }
+ catch (Exception e) {
+ System.err.println("client exception: " + e);
+ }
+ }
+
+ // Implements DbTransport
+ public int send(DbEnv env, Dbt control, Dbt rec, int flags, int envid)
+ throws DbException
+ {
+ System.out.println("Send to " + envid);
+ if (envid == 1) {
+ System.err.println("Unexpected envid = " + envid);
+ return 0;
+ }
+
+ int nbytes = 0;
+
+ synchronized (queue) {
+ System.out.println("Sending message");
+ byte[] data = control.get_data();
+ if (data != null && data.length > 0) {
+ queue.addElement(new Integer(1));
+ nbytes += data.length;
+ byte[] newdata = new byte[data.length];
+ System.arraycopy(data, 0, newdata, 0, data.length);
+ queue.addElement(newdata);
+ }
+ else
+ {
+ queue.addElement(new Integer(2));
+ }
+
+ data = rec.get_data();
+ if (data != null && data.length > 0) {
+ queue.addElement(new Integer(3));
+ nbytes += data.length;
+ byte[] newdata = new byte[data.length];
+ System.arraycopy(data, 0, newdata, 0, data.length);
+ queue.addElement(newdata);
+ }
+ else
+ {
+ queue.addElement(new Integer(4));
+ }
+ System.out.println("MASTER: sent message");
+ }
+ return 0;
+ }
+
+ public void sleepShort()
+ {
+ try {
+ sleep(100);
+ }
+ catch (InterruptedException ie)
+ {
+ }
+ }
+
+ public void send_terminator()
+ {
+ synchronized (queue) {
+ queue.addElement(new Integer(-1));
+ }
+ }
+
+ public void master()
+ {
+ try {
+ master_env = new DbEnv(0);
+ master_env.set_rep_transport(2, this);
+ master_env.open(MASTER_ENVDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0644);
+ System.err.println("10");
+ Dbt myid = new Dbt("client01".getBytes());
+ master_env.rep_start(myid, Db.DB_REP_MASTER);
+ System.err.println("10");
+ Db db = new Db(master_env, 0);
+ System.err.println("20");
+ db.open(null, "x.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ System.err.println("30");
+ db.put(null, new Dbt("Hello".getBytes()),
+ new Dbt("world".getBytes()), 0);
+ System.err.println("40");
+ //DbEnv.RepElectResult electionResult = new DbEnv.RepElectResult();
+ //master_env.rep_elect(2, 2, 3, 4, electionResult);
+ db.close(0);
+ System.err.println("50");
+ master_env.close(0);
+ send_terminator();
+ }
+ catch (Exception e) {
+ System.err.println("client exception: " + e);
+ }
+ }
+
+ public static void main(String[] args)
+ {
+ // The test should only take a few milliseconds.
+ // give it 10 seconds before bailing out.
+ TimelimitThread t = new TimelimitThread(1000*10);
+ t.start();
+
+ try {
+ mkdir(CLIENT_ENVDIR);
+ mkdir(MASTER_ENVDIR);
+
+ TestReplication rep = new TestReplication();
+
+ // Run the client as a seperate thread.
+ rep.start();
+
+ // Run the master.
+ rep.master();
+
+ // Wait for the master to finish.
+ rep.join();
+ }
+ catch (Exception e)
+ {
+ System.err.println("Exception: " + e);
+ }
+ t.finished();
+ }
+
+}
+
+class TimelimitThread extends Thread
+{
+ long nmillis;
+ boolean finished = false;
+
+ TimelimitThread(long nmillis)
+ {
+ this.nmillis = nmillis;
+ }
+
+ public void finished()
+ {
+ finished = true;
+ }
+
+ public void run()
+ {
+ long targetTime = System.currentTimeMillis() + nmillis;
+ long curTime;
+
+ while (!finished &&
+ ((curTime = System.currentTimeMillis()) < targetTime)) {
+ long diff = targetTime - curTime;
+ if (diff > 100)
+ diff = 100;
+ try {
+ sleep(diff);
+ }
+ catch (InterruptedException ie) {
+ }
+ }
+ System.err.println("");
+ System.exit(1);
+ }
+}
diff --git a/libdb/test/scr016/TestRpcServer.java b/libdb/test/scr016/TestRpcServer.java
new file mode 100644
index 0000000..230be99
--- /dev/null
+++ b/libdb/test/scr016/TestRpcServer.java
@@ -0,0 +1,193 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.Reader;
+import java.io.StringReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestRpcServer
+{
+ private static final String FileName = "access.db";
+
+ public TestRpcServer()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestRpcServer\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestRpcServer app = new TestRpcServer();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestRpcServer: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestRpcServer: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(Reader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(Reader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ DbEnv dbenv = new DbEnv(Db.DB_CLIENT);
+ dbenv.set_rpc_server(null, "localhost", 0, 0, 0);
+ dbenv.open(".", Db.DB_CREATE, 0644);
+ System.out.println("server connection set");
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(dbenv, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestRpcServer");
+ table.open(FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ Reader reader =
+ new StringReader("abc\nStuff\nmore Stuff\nlast line\n");
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+}
diff --git a/libdb/test/scr016/TestSameDbt.java b/libdb/test/scr016/TestSameDbt.java
new file mode 100644
index 0000000..cce117b
--- /dev/null
+++ b/libdb/test/scr016/TestSameDbt.java
@@ -0,0 +1,56 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestSameDbt
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ // try reusing the dbt
+ Dbt keydatadbt = new Dbt("stuff".getBytes());
+ int gotexcept = 0;
+
+ try {
+ db.put(null, keydatadbt, keydatadbt, 0);
+ }
+ catch (DbException dbe) {
+ System.out.println("got expected Db Exception: " + dbe);
+ gotexcept++;
+ }
+
+ if (gotexcept != 1) {
+ System.err.println("Missed exception");
+ System.out.println("** FAIL **");
+ }
+ else {
+ System.out.println("Test succeeded.");
+ }
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+}
diff --git a/libdb/test/scr016/TestSameDbt.testout b/libdb/test/scr016/TestSameDbt.testout
new file mode 100644
index 0000000..be4bbbe
--- /dev/null
+++ b/libdb/test/scr016/TestSameDbt.testout
@@ -0,0 +1,2 @@
+got expected Db Exception: com.sleepycat.db.DbException: Dbt is already in use
+Test succeeded.
diff --git a/libdb/test/scr016/TestSimpleAccess.java b/libdb/test/scr016/TestSimpleAccess.java
new file mode 100644
index 0000000..8e6e3e7
--- /dev/null
+++ b/libdb/test/scr016/TestSimpleAccess.java
@@ -0,0 +1,37 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestSimpleAccess
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ TestUtil.populate(db);
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+ }
+}
diff --git a/libdb/test/scr016/TestSimpleAccess.testout b/libdb/test/scr016/TestSimpleAccess.testout
new file mode 100644
index 0000000..dc88d47
--- /dev/null
+++ b/libdb/test/scr016/TestSimpleAccess.testout
@@ -0,0 +1,3 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/libdb/test/scr016/TestStat.java b/libdb/test/scr016/TestStat.java
new file mode 100644
index 0000000..77d10bb
--- /dev/null
+++ b/libdb/test/scr016/TestStat.java
@@ -0,0 +1,57 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestStat
+{
+ public static void main(String[] args)
+ {
+ int envflags =
+ Db.DB_INIT_MPOOL | Db.DB_INIT_LOCK |
+ Db.DB_INIT_LOG | Db.DB_INIT_TXN | Db.DB_CREATE;
+ try {
+ DbEnv dbenv = new DbEnv(0);
+ dbenv.open(".", envflags, 0);
+ Db db = new Db(dbenv, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0);
+
+ TestUtil.populate(db);
+ System.out.println("BtreeStat:");
+ DbBtreeStat stat = (DbBtreeStat)db.stat(0);
+ System.out.println(" bt_magic: " + stat.bt_magic);
+
+ System.out.println("LogStat:");
+ DbLogStat logstat = dbenv.log_stat(0);
+ System.out.println(" st_magic: " + logstat.st_magic);
+ System.out.println(" st_cur_file: " + logstat.st_cur_file);
+
+ System.out.println("RepStat:");
+ DbRepStat repstat = dbenv.rep_stat(0);
+ System.out.println(" st_status: " + repstat.st_status);
+ System.out.println(" st_log_duplication: " +
+ repstat.st_log_duplicated);
+
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+ }
+}
diff --git a/libdb/test/scr016/TestStat.testout b/libdb/test/scr016/TestStat.testout
new file mode 100644
index 0000000..caf9db1
--- /dev/null
+++ b/libdb/test/scr016/TestStat.testout
@@ -0,0 +1,11 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+BtreeStat:
+ bt_magic: 340322
+LogStat:
+ st_magic: 264584
+ st_cur_file: 1
+RepStat:
+ st_status: 0
+ st_log_duplication: 0
+finished test
diff --git a/libdb/test/scr016/TestTruncate.java b/libdb/test/scr016/TestTruncate.java
new file mode 100644
index 0000000..8370d09
--- /dev/null
+++ b/libdb/test/scr016/TestTruncate.java
@@ -0,0 +1,87 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestTruncate
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ // populate our massive database.
+ Dbt keydbt = new Dbt("key".getBytes());
+ Dbt datadbt = new Dbt("data".getBytes());
+ db.put(null, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt goodkeydbt = new Dbt("key".getBytes());
+ Dbt badkeydbt = new Dbt("badkey".getBytes());
+ Dbt resultdbt = new Dbt();
+ resultdbt.set_flags(Db.DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) {
+ System.out.println("get: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("got data: " + result);
+ }
+
+ if ((ret = db.get(null, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ System.out.println("get using bad key: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("*** got data using bad key!!: " + result);
+ }
+
+ // Now, truncate and make sure that it's really gone.
+ System.out.println("truncating data...");
+ int nrecords = db.truncate(null, 0);
+ System.out.println("truncate returns " + nrecords);
+ if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ System.out.println("after trunctate get: " +
+ DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("got data: " + result);
+ }
+
+ db.close(0);
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+}
diff --git a/libdb/test/scr016/TestTruncate.testout b/libdb/test/scr016/TestTruncate.testout
new file mode 100644
index 0000000..23f291d
--- /dev/null
+++ b/libdb/test/scr016/TestTruncate.testout
@@ -0,0 +1,6 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+truncating data...
+truncate returns 1
+after trunctate get: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/libdb/test/scr016/TestUtil.java b/libdb/test/scr016/TestUtil.java
new file mode 100644
index 0000000..a9752ea
--- /dev/null
+++ b/libdb/test/scr016/TestUtil.java
@@ -0,0 +1,57 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * Utilities used by many tests.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestUtil
+{
+ public static void populate(Db db)
+ throws DbException
+ {
+ // populate our massive database.
+ Dbt keydbt = new Dbt("key".getBytes());
+ Dbt datadbt = new Dbt("data".getBytes());
+ db.put(null, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt goodkeydbt = new Dbt("key".getBytes());
+ Dbt badkeydbt = new Dbt("badkey".getBytes());
+ Dbt resultdbt = new Dbt();
+ resultdbt.set_flags(Db.DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) {
+ System.out.println("get: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("got data: " + result);
+ }
+
+ if ((ret = db.get(null, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ System.out.println("get using bad key: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("*** got data using bad key!!: " + result);
+ }
+ }
+}
diff --git a/libdb/test/scr016/TestXAServlet.java b/libdb/test/scr016/TestXAServlet.java
new file mode 100644
index 0000000..9e18158
--- /dev/null
+++ b/libdb/test/scr016/TestXAServlet.java
@@ -0,0 +1,313 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * Simple test of XA, using WebLogic.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import com.sleepycat.db.xa.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.Hashtable;
+import javax.servlet.*;
+import javax.servlet.http.*;
+import javax.transaction.*;
+import javax.transaction.xa.*;
+import javax.naming.Context;
+import javax.naming.InitialContext;
+import javax.naming.NamingException;
+import weblogic.transaction.TxHelper;
+import weblogic.transaction.TransactionManager;
+
+public class TestXAServlet extends HttpServlet
+{
+ public static final String ENV_HOME = "TESTXADIR";
+ public static final String DEFAULT_URL = "t3://localhost:7001";
+ public static String filesep = System.getProperty("file.separator");
+
+ private static TransactionManager tm;
+ private static DbXAResource xaresource;
+ private static boolean initialized = false;
+
+ /**
+ * Utility to remove files recursively.
+ */
+ public static void removeRecursive(File f)
+ {
+ if (f.isDirectory()) {
+ String[] sub = f.list();
+ for (int i=0; i<sub.length; i++)
+ removeRecursive(new File(f.getName() + filesep + sub[i]));
+ }
+ f.delete();
+ }
+
+ /**
+ * Typically done only once, unless shutdown is invoked. This
+ * sets up directories, and removes any work files from previous
+ * runs. Also establishes a transaction manager that we'll use
+ * for various transactions. Each call opens/creates a new DB
+ * environment in our work directory.
+ */
+ public static synchronized void startup()
+ {
+ if (initialized)
+ return;
+
+ try {
+ File dir = new File(ENV_HOME);
+ removeRecursive(dir);
+ dir.mkdirs();
+
+ System.out.println("Getting context");
+ InitialContext ic = getInitialContext(DEFAULT_URL);
+ System.out.println("Creating XAResource");
+ xaresource = new DbXAResource(ENV_HOME, 77, 0);
+ System.out.println("Registering with transaction manager");
+ tm = TxHelper.getTransactionManager();
+ tm.registerStaticResource("DbXA", xaresource);
+ initialized = true;
+ }
+ catch (Exception e) {
+ System.err.println("Exception: " + e);
+ e.printStackTrace();
+ }
+ initialized = true;
+ }
+
+ /**
+ * Closes the XA resource manager.
+ */
+ public static synchronized void shutdown(PrintWriter out)
+ throws XAException
+ {
+ if (!initialized)
+ return;
+
+ out.println("Closing the resource.");
+ xaresource.close(0);
+ out.println("Shutdown complete.");
+ initialized = false;
+ }
+
+
+ /**
+ * Should be called once per chunk of major activity.
+ */
+ public void initialize()
+ {
+ startup();
+ }
+
+ private static int count = 1;
+ private static boolean debugInited = false;
+ private Xid bogusXid;
+
+ public static synchronized int incrCount()
+ {
+ return count++;
+ }
+
+ public void debugSetup(PrintWriter out)
+ throws ServletException, IOException
+ {
+ try {
+ Db.load_db();
+ }
+ catch (Exception e) {
+ out.println("got exception during load: " + e);
+ System.out.println("got exception during load: " + e);
+ }
+ out.println("The servlet has been restarted, and Berkeley DB is loaded");
+ out.println("<p>If you're debugging, you should now start the debugger and set breakpoints.");
+ }
+
+ public void doXATransaction(PrintWriter out, String key, String value,
+ String operation)
+ throws ServletException, IOException
+ {
+ try {
+ int counter = incrCount();
+ if (key == null || key.equals(""))
+ key = "key" + counter;
+ if (value == null || value.equals(""))
+ value = "value" + counter;
+
+ out.println("Adding (\"" + key + "\", \"" + value + "\")");
+
+ System.out.println("XA transaction begin");
+ tm.begin();
+ System.out.println("getting XA transaction");
+ DbXAResource.DbAttach attach = DbXAResource.xa_attach(null, null);
+ DbTxn txn = attach.get_txn();
+ DbEnv env = attach.get_env();
+ Db db = new Db(env, 0);
+ db.open(txn, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ System.out.println("DB put " + key);
+ db.put(txn,
+ new Dbt(key.getBytes()),
+ new Dbt(value.getBytes()),
+ 0);
+
+ if (operation.equals("rollback")) {
+ out.println("<p>ROLLBACK");
+ System.out.println("XA transaction rollback");
+ tm.rollback();
+ System.out.println("XA rollback returned");
+
+ // The old db is no good after the rollback
+ // since the open was part of the transaction.
+ // Get another db for the cursor dump
+ //
+ db = new Db(env, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ }
+ else {
+ out.println("<p>COMMITTED");
+ System.out.println("XA transaction commit");
+ tm.commit();
+ }
+
+ // Show the current state of the database.
+ Dbc dbc = db.cursor(null, 0);
+ Dbt gotkey = new Dbt();
+ Dbt gotdata = new Dbt();
+
+ out.println("<p>Current database values:");
+ while (dbc.get(gotkey, gotdata, Db.DB_NEXT) == 0) {
+ out.println("<br> " + getDbtString(gotkey) + " : "
+ + getDbtString(gotdata));
+ }
+ dbc.close();
+ db.close(0);
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ out.println(" *** Exception received: " + dbe);
+ dbe.printStackTrace();
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ out.println(" *** Exception received: " + fnfe);
+ fnfe.printStackTrace();
+ }
+ // Includes SystemException, NotSupportedException, RollbackException
+ catch (Exception e) {
+ System.err.println("Exception: " + e);
+ out.println(" *** Exception received: " + e);
+ e.printStackTrace();
+ }
+ }
+
+ private static Xid getBogusXid()
+ throws XAException
+ {
+ return new DbXid(1, "BOGUS_gtrid".getBytes(),
+ "BOGUS_bqual".getBytes());
+ }
+
+ private static String getDbtString(Dbt dbt)
+ {
+ return new String(dbt.get_data(), 0, dbt.get_size());
+ }
+
+ /**
+ * doGet is called as a result of invoking the servlet.
+ */
+ public void doGet(HttpServletRequest req, HttpServletResponse resp)
+ throws ServletException, IOException
+ {
+ try {
+ resp.setContentType("text/html");
+ PrintWriter out = resp.getWriter();
+
+ String key = req.getParameter("key");
+ String value = req.getParameter("value");
+ String operation = req.getParameter("operation");
+
+ out.println("<HTML>");
+ out.println("<HEAD>");
+ out.println("<TITLE>Berkeley DB with XA</TITLE>");
+ out.println("</HEAD><BODY>");
+ out.println("<a href=\"TestXAServlet" +
+ "\">Database put and commit</a><br>");
+ out.println("<a href=\"TestXAServlet?operation=rollback" +
+ "\">Database put and rollback</a><br>");
+ out.println("<a href=\"TestXAServlet?operation=close" +
+ "\">Close the XA resource manager</a><br>");
+ out.println("<a href=\"TestXAServlet?operation=forget" +
+ "\">Forget an operation (bypasses TM)</a><br>");
+ out.println("<a href=\"TestXAServlet?operation=prepare" +
+ "\">Prepare an operation (bypasses TM)</a><br>");
+ out.println("<br>");
+
+ if (!debugInited) {
+ // Don't initialize XA yet, give the user
+ // a chance to attach a debugger if necessary.
+ debugSetup(out);
+ debugInited = true;
+ }
+ else {
+ initialize();
+ if (operation == null)
+ operation = "commit";
+
+ if (operation.equals("close")) {
+ shutdown(out);
+ }
+ else if (operation.equals("forget")) {
+ // A bogus test, we just make sure the API is callable.
+ out.println("<p>FORGET");
+ System.out.println("XA forget bogus XID (bypass TM)");
+ xaresource.forget(getBogusXid());
+ }
+ else if (operation.equals("prepare")) {
+ // A bogus test, we just make sure the API is callable.
+ out.println("<p>PREPARE");
+ System.out.println("XA prepare bogus XID (bypass TM)");
+ xaresource.prepare(getBogusXid());
+ }
+ else {
+ // commit, rollback, prepare, forget
+ doXATransaction(out, key, value, operation);
+ }
+ }
+ out.println("</BODY></HTML>");
+
+ System.out.println("Finished.");
+ }
+ // Includes SystemException, NotSupportedException, RollbackException
+ catch (Exception e) {
+ System.err.println("Exception: " + e);
+ e.printStackTrace();
+ }
+
+ }
+
+
+ /**
+ * From weblogic's sample code:
+ * samples/examples/jta/jmsjdbc/Client.java
+ */
+ private static InitialContext getInitialContext(String url)
+ throws NamingException
+ {
+ Hashtable env = new Hashtable();
+ env.put(Context.INITIAL_CONTEXT_FACTORY,
+ "weblogic.jndi.WLInitialContextFactory");
+ env.put(Context.PROVIDER_URL, url);
+ return new InitialContext(env);
+ }
+
+}
diff --git a/libdb/test/scr016/chk.javatests b/libdb/test/scr016/chk.javatests
new file mode 100644
index 0000000..0c470d1
--- /dev/null
+++ b/libdb/test/scr016/chk.javatests
@@ -0,0 +1,79 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure that regression tests for Java run.
+
+TEST_JAVA_SRCDIR=../test/scr016 # must be a relative directory
+JAVA=${JAVA:-java}
+JAVAC=${JAVAC:-javac}
+
+# CLASSPATH is used by javac and java.
+# We use CLASSPATH rather than the -classpath command line option
+# because the latter behaves differently from JDK1.1 and JDK1.2
+export CLASSPATH="./classes:../db.jar"
+export LD_LIBRARY_PATH="../.libs"
+
+
+# All paths must be relative to a subdirectory of the build directory
+LIBS="-L.. -ldb -ldb_cxx"
+CXXFLAGS="-I.. -I../../dbinc"
+
+# Test must be run from a local build directory, not from a test
+# directory.
+cd ..
+[ -f db_config.h ] || {
+ echo 'FAIL: chk.javatests must be run from a local build directory.'
+ exit 1
+}
+[ -d ../docs_src ] || {
+ echo 'FAIL: chk.javatests must be run from a local build directory.'
+ exit 1
+}
+version=`sed -e 's/.* \([0-9]*\.[0-9]*\)\..*/\1/' -e q ../README `
+[ -f libdb_java-$version.la ] || make libdb_java-$version.la || {
+ echo "FAIL: unable to build libdb_java-$version.la"
+ exit 1
+}
+[ -f db.jar ] || make db.jar || {
+ echo 'FAIL: unable to build db.jar'
+ exit 1
+}
+testnames=`cd $TEST_JAVA_SRCDIR; ls *.java | sed -e 's/\.java$//'`
+
+for testname in $testnames; do
+ if grep -x $testname $TEST_JAVA_SRCDIR/ignore > /dev/null; then
+ echo " **** java test $testname ignored"
+ continue
+ fi
+
+ echo " ==== java test $testname"
+ rm -rf TESTJAVA; mkdir -p TESTJAVA/classes
+ cd ./TESTJAVA
+ testprefix=../$TEST_JAVA_SRCDIR/$testname
+ ${JAVAC} -d ./classes $testprefix.java ../$TEST_JAVA_SRCDIR/TestUtil.java > ../$testname.compileout 2>&1 || {
+pwd
+ echo "FAIL: compilation of $testname failed, see ../$testname.compileout"
+ exit 1
+ }
+ rm -f ../$testname.compileout
+ infile=$testprefix.testin
+ [ -f $infile ] || infile=/dev/null
+ goodoutfile=$testprefix.testout
+ [ -f $goodoutfile ] || goodoutfile=/dev/null
+ gooderrfile=$testprefix.testerr
+ [ -f $gooderrfile ] || gooderrfile=/dev/null
+ ${JAVA} com.sleepycat.test.$testname <$infile >../$testname.out 2>../$testname.err
+ cmp ../$testname.out $goodoutfile > /dev/null || {
+ echo "FAIL: $testname output differs: see ../$testname.out, $goodoutfile"
+ exit 1
+ }
+ cmp ../$testname.err $gooderrfile > /dev/null || {
+ echo "FAIL: $testname error differs: see ../$testname.err, $gooderrfile"
+ exit 1
+ }
+ cd ..
+ rm -f $testname.err $testname.out
+done
+rm -rf TESTJAVA
+exit 0
diff --git a/libdb/test/scr016/ignore b/libdb/test/scr016/ignore
new file mode 100644
index 0000000..618bedd
--- /dev/null
+++ b/libdb/test/scr016/ignore
@@ -0,0 +1,22 @@
+#
+# $Id$
+#
+# A list of tests to ignore
+
+# TestRpcServer is not debugged
+TestRpcServer
+
+# TestReplication is not debugged
+TestReplication
+
+# These are currently not working
+TestAppendRecno
+TestAssociate
+TestLogc
+TestConstruct02
+
+# TestUtil is used by the other tests, it does not stand on its own
+TestUtil
+
+# XA needs a special installation, it is not part of testall
+TestXAServlet
diff --git a/libdb/test/scr016/testall b/libdb/test/scr016/testall
new file mode 100644
index 0000000..5b1fb09
--- /dev/null
+++ b/libdb/test/scr016/testall
@@ -0,0 +1,32 @@
+#!/bin/sh -
+# $Id$
+#
+# Run all the Java regression tests
+
+ecode=0
+prefixarg=""
+stdinarg=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefixarg="$1"; shift;;
+ --stdin )
+ stdinarg="$1"; shift;;
+ * )
+ break
+ esac
+done
+files="`find . -name \*.java -print`"
+for file in $files; do
+ name=`echo $file | sed -e 's:^\./::' -e 's/\.java$//'`
+ if grep $name ignore > /dev/null; then
+ echo " **** java test $name ignored"
+ else
+ echo " ==== java test $name"
+ if ! sh ./testone $prefixarg $stdinarg $name; then
+ ecode=1
+ fi
+ fi
+done
+exit $ecode
diff --git a/libdb/test/scr016/testone b/libdb/test/scr016/testone
new file mode 100644
index 0000000..54a0957
--- /dev/null
+++ b/libdb/test/scr016/testone
@@ -0,0 +1,122 @@
+#!/bin/sh -
+# $Id$
+#
+# Run just one Java regression test, the single argument
+# is the classname within this package.
+
+error()
+{
+ echo '' >&2
+ echo "Java regression error: $@" >&2
+ echo '' >&2
+ ecode=1
+}
+
+# compares the result against the good version,
+# reports differences, and removes the result file
+# if there are no differences.
+#
+compare_result()
+{
+ good="$1"
+ latest="$2"
+ if [ ! -e "$good" ]; then
+ echo "Note: $good does not exist"
+ return
+ fi
+ tmpout=/tmp/blddb$$.tmp
+ diff "$good" "$latest" > $tmpout
+ if [ -s $tmpout ]; then
+ nbad=`grep '^[0-9]' $tmpout | wc -l`
+ error "$good and $latest differ in $nbad places."
+ else
+ rm $latest
+ fi
+ rm -f $tmpout
+}
+
+ecode=0
+stdinflag=n
+JAVA=${JAVA:-java}
+JAVAC=${JAVAC:-javac}
+
+# classdir is relative to TESTDIR subdirectory
+classdir=./classes
+
+# CLASSPATH is used by javac and java.
+# We use CLASSPATH rather than the -classpath command line option
+# because the latter behaves differently from JDK1.1 and JDK1.2
+export CLASSPATH="$classdir:$CLASSPATH"
+
+# determine the prefix of the install tree
+prefix=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefix="`echo $1 | sed -e 's/--prefix=//'`"; shift
+ export LD_LIBRARY_PATH="$prefix/lib:$LD_LIBRARY_PATH"
+ export CLASSPATH="$prefix/lib/db.jar:$CLASSPATH"
+ ;;
+ --stdin )
+ stdinflag=y; shift
+ ;;
+ * )
+ break
+ ;;
+ esac
+done
+
+if [ "$#" = 0 ]; then
+ echo 'Usage: testone [ --prefix=<dir> | --stdin ] TestName'
+ exit 1
+fi
+name="$1"
+
+# class must be public
+if ! grep "public.*class.*$name" $name.java > /dev/null; then
+ error "public class $name is not declared in file $name.java"
+ exit 1
+fi
+
+# compile
+rm -rf TESTDIR; mkdir TESTDIR
+cd ./TESTDIR
+mkdir -p $classdir
+${JAVAC} -d $classdir ../$name.java ../TestUtil.java > ../$name.compileout 2>&1
+if [ $? != 0 -o -s ../$name.compileout ]; then
+ error "compilation of $name failed, see $name.compileout"
+ exit 1
+fi
+rm -f ../$name.compileout
+
+# find input and error file
+infile=../$name.testin
+if [ ! -f $infile ]; then
+ infile=/dev/null
+fi
+
+# run and diff results
+rm -rf TESTDIR
+if [ "$stdinflag" = y ]
+then
+ ${JAVA} com.sleepycat.test.$name $TEST_ARGS >../$name.out 2>../$name.err
+else
+ ${JAVA} com.sleepycat.test.$name $TEST_ARGS <$infile >../$name.out 2>../$name.err
+fi
+cd ..
+
+testerr=$name.testerr
+if [ ! -f $testerr ]; then
+ testerr=/dev/null
+fi
+
+testout=$name.testout
+if [ ! -f $testout ]; then
+ testout=/dev/null
+fi
+
+compare_result $testout $name.out
+compare_result $testerr $name.err
+rm -rf TESTDIR
+exit $ecode
diff --git a/libdb/test/scr017/O.BH b/libdb/test/scr017/O.BH
new file mode 100644
index 0000000..cd499d3
--- /dev/null
+++ b/libdb/test/scr017/O.BH
@@ -0,0 +1,196 @@
+abc_10_efg
+abc_10_efg
+abc_11_efg
+abc_11_efg
+abc_12_efg
+abc_12_efg
+abc_13_efg
+abc_13_efg
+abc_14_efg
+abc_14_efg
+abc_15_efg
+abc_15_efg
+abc_16_efg
+abc_16_efg
+abc_17_efg
+abc_17_efg
+abc_18_efg
+abc_18_efg
+abc_19_efg
+abc_19_efg
+abc_1_efg
+abc_1_efg
+abc_20_efg
+abc_20_efg
+abc_21_efg
+abc_21_efg
+abc_22_efg
+abc_22_efg
+abc_23_efg
+abc_23_efg
+abc_24_efg
+abc_24_efg
+abc_25_efg
+abc_25_efg
+abc_26_efg
+abc_26_efg
+abc_27_efg
+abc_27_efg
+abc_28_efg
+abc_28_efg
+abc_29_efg
+abc_29_efg
+abc_2_efg
+abc_2_efg
+abc_30_efg
+abc_30_efg
+abc_31_efg
+abc_31_efg
+abc_32_efg
+abc_32_efg
+abc_33_efg
+abc_33_efg
+abc_34_efg
+abc_34_efg
+abc_36_efg
+abc_36_efg
+abc_37_efg
+abc_37_efg
+abc_38_efg
+abc_38_efg
+abc_39_efg
+abc_39_efg
+abc_3_efg
+abc_3_efg
+abc_40_efg
+abc_40_efg
+abc_41_efg
+abc_41_efg
+abc_42_efg
+abc_42_efg
+abc_43_efg
+abc_43_efg
+abc_44_efg
+abc_44_efg
+abc_45_efg
+abc_45_efg
+abc_46_efg
+abc_46_efg
+abc_47_efg
+abc_47_efg
+abc_48_efg
+abc_48_efg
+abc_49_efg
+abc_49_efg
+abc_4_efg
+abc_4_efg
+abc_50_efg
+abc_50_efg
+abc_51_efg
+abc_51_efg
+abc_52_efg
+abc_52_efg
+abc_53_efg
+abc_53_efg
+abc_54_efg
+abc_54_efg
+abc_55_efg
+abc_55_efg
+abc_56_efg
+abc_56_efg
+abc_57_efg
+abc_57_efg
+abc_58_efg
+abc_58_efg
+abc_59_efg
+abc_59_efg
+abc_5_efg
+abc_5_efg
+abc_60_efg
+abc_60_efg
+abc_61_efg
+abc_61_efg
+abc_62_efg
+abc_62_efg
+abc_63_efg
+abc_63_efg
+abc_64_efg
+abc_64_efg
+abc_65_efg
+abc_65_efg
+abc_66_efg
+abc_66_efg
+abc_67_efg
+abc_67_efg
+abc_68_efg
+abc_68_efg
+abc_69_efg
+abc_69_efg
+abc_6_efg
+abc_6_efg
+abc_70_efg
+abc_70_efg
+abc_71_efg
+abc_71_efg
+abc_72_efg
+abc_72_efg
+abc_73_efg
+abc_73_efg
+abc_74_efg
+abc_74_efg
+abc_75_efg
+abc_75_efg
+abc_76_efg
+abc_76_efg
+abc_77_efg
+abc_77_efg
+abc_78_efg
+abc_78_efg
+abc_79_efg
+abc_79_efg
+abc_7_efg
+abc_7_efg
+abc_80_efg
+abc_80_efg
+abc_81_efg
+abc_81_efg
+abc_82_efg
+abc_82_efg
+abc_83_efg
+abc_83_efg
+abc_84_efg
+abc_84_efg
+abc_85_efg
+abc_85_efg
+abc_86_efg
+abc_86_efg
+abc_87_efg
+abc_87_efg
+abc_88_efg
+abc_88_efg
+abc_89_efg
+abc_89_efg
+abc_8_efg
+abc_8_efg
+abc_90_efg
+abc_90_efg
+abc_91_efg
+abc_91_efg
+abc_92_efg
+abc_92_efg
+abc_93_efg
+abc_93_efg
+abc_94_efg
+abc_94_efg
+abc_95_efg
+abc_95_efg
+abc_96_efg
+abc_96_efg
+abc_97_efg
+abc_97_efg
+abc_98_efg
+abc_98_efg
+abc_99_efg
+abc_99_efg
+abc_9_efg
+abc_9_efg
diff --git a/libdb/test/scr017/O.R b/libdb/test/scr017/O.R
new file mode 100644
index 0000000..d78a047
--- /dev/null
+++ b/libdb/test/scr017/O.R
@@ -0,0 +1,196 @@
+1
+abc_1_efg
+2
+abc_2_efg
+3
+abc_3_efg
+4
+abc_4_efg
+5
+abc_5_efg
+6
+abc_6_efg
+7
+abc_7_efg
+8
+abc_8_efg
+9
+abc_9_efg
+10
+abc_10_efg
+11
+abc_11_efg
+12
+abc_12_efg
+13
+abc_13_efg
+14
+abc_14_efg
+15
+abc_15_efg
+16
+abc_16_efg
+17
+abc_17_efg
+18
+abc_18_efg
+19
+abc_19_efg
+20
+abc_20_efg
+21
+abc_21_efg
+22
+abc_22_efg
+23
+abc_23_efg
+24
+abc_24_efg
+25
+abc_25_efg
+26
+abc_26_efg
+27
+abc_27_efg
+28
+abc_28_efg
+29
+abc_29_efg
+30
+abc_30_efg
+31
+abc_31_efg
+32
+abc_32_efg
+33
+abc_33_efg
+34
+abc_34_efg
+35
+abc_36_efg
+36
+abc_37_efg
+37
+abc_38_efg
+38
+abc_39_efg
+39
+abc_40_efg
+40
+abc_41_efg
+41
+abc_42_efg
+42
+abc_43_efg
+43
+abc_44_efg
+44
+abc_45_efg
+45
+abc_46_efg
+46
+abc_47_efg
+47
+abc_48_efg
+48
+abc_49_efg
+49
+abc_50_efg
+50
+abc_51_efg
+51
+abc_52_efg
+52
+abc_53_efg
+53
+abc_54_efg
+54
+abc_55_efg
+55
+abc_56_efg
+56
+abc_57_efg
+57
+abc_58_efg
+58
+abc_59_efg
+59
+abc_60_efg
+60
+abc_61_efg
+61
+abc_62_efg
+62
+abc_63_efg
+63
+abc_64_efg
+64
+abc_65_efg
+65
+abc_66_efg
+66
+abc_67_efg
+67
+abc_68_efg
+68
+abc_69_efg
+69
+abc_70_efg
+70
+abc_71_efg
+71
+abc_72_efg
+72
+abc_73_efg
+73
+abc_74_efg
+74
+abc_75_efg
+75
+abc_76_efg
+76
+abc_77_efg
+77
+abc_78_efg
+78
+abc_79_efg
+79
+abc_80_efg
+80
+abc_81_efg
+81
+abc_82_efg
+82
+abc_83_efg
+83
+abc_84_efg
+84
+abc_85_efg
+85
+abc_86_efg
+86
+abc_87_efg
+87
+abc_88_efg
+88
+abc_89_efg
+89
+abc_90_efg
+90
+abc_91_efg
+91
+abc_92_efg
+92
+abc_93_efg
+93
+abc_94_efg
+94
+abc_95_efg
+95
+abc_96_efg
+96
+abc_97_efg
+97
+abc_98_efg
+98
+abc_99_efg
diff --git a/libdb/test/scr017/chk.db185 b/libdb/test/scr017/chk.db185
new file mode 100644
index 0000000..3ed4ee6
--- /dev/null
+++ b/libdb/test/scr017/chk.db185
@@ -0,0 +1,26 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure we can run DB 1.85 code.
+
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+if cc -g -Wall -I.. t.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile test program t.c"
+ exit 1
+fi
+
+if ./t; then
+ :
+else
+ echo "FAIL: test program failed"
+ exit 1
+fi
+
+exit 0
diff --git a/libdb/test/scr017/t.c b/libdb/test/scr017/t.c
new file mode 100644
index 0000000..f03b338
--- /dev/null
+++ b/libdb/test/scr017/t.c
@@ -0,0 +1,188 @@
+#include <sys/types.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db_185.h"
+
+void err(char *);
+int mycmp(const DBT *, const DBT *);
+void ops(DB *, int);
+
+int
+main()
+{
+ DB *dbp;
+ HASHINFO h_info;
+ BTREEINFO b_info;
+ RECNOINFO r_info;
+
+ printf("\tBtree...\n");
+ memset(&b_info, 0, sizeof(b_info));
+ b_info.flags = R_DUP;
+ b_info.cachesize = 100 * 1024;
+ b_info.psize = 512;
+ b_info.lorder = 4321;
+ b_info.compare = mycmp;
+ (void)remove("a.db");
+ if ((dbp =
+ dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_BTREE, &b_info)) == NULL)
+ err("dbopen: btree");
+ ops(dbp, DB_BTREE);
+
+ printf("\tHash...\n");
+ memset(&h_info, 0, sizeof(h_info));
+ h_info.bsize = 512;
+ h_info.ffactor = 6;
+ h_info.nelem = 1000;
+ h_info.cachesize = 100 * 1024;
+ h_info.lorder = 1234;
+ (void)remove("a.db");
+ if ((dbp =
+ dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_HASH, &h_info)) == NULL)
+ err("dbopen: hash");
+ ops(dbp, DB_HASH);
+
+ printf("\tRecno...\n");
+ memset(&r_info, 0, sizeof(r_info));
+ r_info.flags = R_FIXEDLEN;
+ r_info.cachesize = 100 * 1024;
+ r_info.psize = 1024;
+ r_info.reclen = 37;
+ (void)remove("a.db");
+ if ((dbp =
+ dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_RECNO, &r_info)) == NULL)
+ err("dbopen: recno");
+ ops(dbp, DB_RECNO);
+
+ return (0);
+}
+
+int
+mycmp(a, b)
+ const DBT *a, *b;
+{
+ size_t len;
+ u_int8_t *p1, *p2;
+
+ len = a->size > b->size ? b->size : a->size;
+ for (p1 = a->data, p2 = b->data; len--; ++p1, ++p2)
+ if (*p1 != *p2)
+ return ((long)*p1 - (long)*p2);
+ return ((long)a->size - (long)b->size);
+}
+
+void
+ops(dbp, type)
+ DB *dbp;
+ int type;
+{
+ FILE *outfp;
+ DBT key, data;
+ recno_t recno;
+ int i, ret;
+ char buf[64];
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ for (i = 1; i < 100; ++i) { /* Test DB->put. */
+ sprintf(buf, "abc_%d_efg", i);
+ if (type == DB_RECNO) {
+ recno = i;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ key.data = data.data = buf;
+ key.size = data.size = strlen(buf);
+ }
+
+ data.data = buf;
+ data.size = strlen(buf);
+ if (dbp->put(dbp, &key, &data, 0))
+ err("DB->put");
+ }
+
+ if (type == DB_RECNO) { /* Test DB->get. */
+ recno = 97;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ key.data = buf;
+ key.size = strlen(buf);
+ }
+ sprintf(buf, "abc_%d_efg", 97);
+ if (dbp->get(dbp, &key, &data, 0) != 0)
+ err("DB->get");
+ if (memcmp(data.data, buf, strlen(buf)))
+ err("DB->get: wrong data returned");
+
+ if (type == DB_RECNO) { /* Test DB->put no-overwrite. */
+ recno = 42;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ key.data = buf;
+ key.size = strlen(buf);
+ }
+ sprintf(buf, "abc_%d_efg", 42);
+ if (dbp->put(dbp, &key, &data, R_NOOVERWRITE) == 0)
+ err("DB->put: no-overwrite succeeded");
+
+ if (type == DB_RECNO) { /* Test DB->del. */
+ recno = 35;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ sprintf(buf, "abc_%d_efg", 35);
+ key.data = buf;
+ key.size = strlen(buf);
+ }
+ if (dbp->del(dbp, &key, 0))
+ err("DB->del");
+
+ /* Test DB->seq. */
+ if ((outfp = fopen("output", "w")) == NULL)
+ err("fopen: output");
+ while ((ret = dbp->seq(dbp, &key, &data, R_NEXT)) == 0) {
+ if (type == DB_RECNO)
+ fprintf(outfp, "%d\n", *(int *)key.data);
+ else
+ fprintf(outfp,
+ "%.*s\n", (int)key.size, (char *)key.data);
+ fprintf(outfp, "%.*s\n", (int)data.size, (char *)data.data);
+ }
+ if (ret != 1)
+ err("DB->seq");
+ fclose(outfp);
+ switch (type) {
+ case DB_BTREE:
+ ret = system("cmp output O.BH");
+ break;
+ case DB_HASH:
+ ret = system("sort output | cmp - O.BH");
+ break;
+ case DB_RECNO:
+ ret = system("cmp output O.R");
+ break;
+ }
+ if (ret != 0)
+ err("output comparison failed");
+
+ if (dbp->sync(dbp, 0)) /* Test DB->sync. */
+ err("DB->sync");
+
+ if (dbp->close(dbp)) /* Test DB->close. */
+ err("DB->close");
+}
+
+void
+err(s)
+ char *s;
+{
+ fprintf(stderr, "\t%s: %s\n", s, strerror(errno));
+ exit (1);
+}
diff --git a/libdb/test/scr018/chk.comma b/libdb/test/scr018/chk.comma
new file mode 100644
index 0000000..ecfd929
--- /dev/null
+++ b/libdb/test/scr018/chk.comma
@@ -0,0 +1,30 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Look for trailing commas in declarations. Some compilers can't handle:
+# enum {
+# foo,
+# bar,
+# };
+
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+if cc -g -Wall -I.. t.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile test program t.c"
+ exit 1
+fi
+
+if ./t ../../*/*.[ch] ../../*/*.in; then
+ :
+else
+ echo "FAIL: test program failed"
+ exit 1
+fi
+
+exit 0
diff --git a/libdb/test/scr018/t.c b/libdb/test/scr018/t.c
new file mode 100644
index 0000000..4056a60
--- /dev/null
+++ b/libdb/test/scr018/t.c
@@ -0,0 +1,46 @@
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <strings.h>
+
+int
+chk(f)
+ char *f;
+{
+ int ch, l, r;
+
+ if (freopen(f, "r", stdin) == NULL) {
+ fprintf(stderr, "%s: %s\n", f, strerror(errno));
+ exit (1);
+ }
+ for (l = 1, r = 0; (ch = getchar()) != EOF;) {
+ if (ch != ',')
+ goto next;
+ do { ch = getchar(); } while (isblank(ch));
+ if (ch != '\n')
+ goto next;
+ ++l;
+ do { ch = getchar(); } while (isblank(ch));
+ if (ch != '}')
+ goto next;
+ r = 1;
+ printf("%s: line %d\n", f, l);
+
+next: if (ch == '\n')
+ ++l;
+ }
+ return (r);
+}
+
+int
+main(int argc, char *argv[])
+{
+ int r;
+
+ for (r = 0; *++argv != NULL;)
+ if (chk(*argv))
+ r = 1;
+ return (r);
+}
diff --git a/libdb/test/scr019/chk.include b/libdb/test/scr019/chk.include
new file mode 100644
index 0000000..9bca351
--- /dev/null
+++ b/libdb/test/scr019/chk.include
@@ -0,0 +1,40 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check for inclusion of files already included in db_int.h.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+egrep -- '#include[ ]' $d/dbinc/db_int.in |
+sed -e '/[ ]db\.h'/d \
+ -e 's/^#include.//' \
+ -e 's/[<>"]//g' \
+ -e 's/[ ].*//' > $t1
+
+for i in `cat $t1`; do
+ (cd $d && egrep "^#include[ ].*[<\"]$i[>\"]" */*.[ch])
+done |
+sed -e '/^build/d' \
+ -e '/^db_dump185/d' \
+ -e '/^examples_c/d' \
+ -e '/^libdb_java.*errno.h/d' \
+ -e '/^libdb_java.*java_util.h/d' \
+ -e '/^test_/d' \
+ -e '/^mutex\/tm.c/d' > $t2
+
+[ -s $t2 ] && {
+ echo 'FAIL: found extraneous includes in the source'
+ cat $t2
+ exit 1
+}
+exit 0
diff --git a/libdb/test/scr020/chk.inc b/libdb/test/scr020/chk.inc
new file mode 100644
index 0000000..d5ea274
--- /dev/null
+++ b/libdb/test/scr020/chk.inc
@@ -0,0 +1,43 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check for inclusion of db_config.h after "const" or other includes.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+(cd $d && find . -name '*.[chys]' -o -name '*.cpp' |
+ xargs egrep -l '#include.*db_config.h') > $t1
+
+:> $t2
+for i in `cat $t1`; do
+ egrep -w 'db_config.h|const' /dev/null $d/$i | head -1 >> $t2
+done
+
+if egrep const $t2 > /dev/null; then
+ echo 'FAIL: found const before include of db_config.h'
+ egrep const $t2
+ exit 1
+fi
+
+:> $t2
+for i in `cat $t1`; do
+ egrep -w '#include' /dev/null $d/$i | head -1 >> $t2
+done
+
+if egrep -v db_config.h $t2 > /dev/null; then
+ echo 'FAIL: found includes before include of db_config.h'
+ egrep -v db_config.h $t2
+ exit 1
+fi
+
+exit 0
diff --git a/libdb/test/scr021/chk.flags b/libdb/test/scr021/chk.flags
new file mode 100644
index 0000000..1d13ac9
--- /dev/null
+++ b/libdb/test/scr021/chk.flags
@@ -0,0 +1,97 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check flag name-spaces.
+
+d=../..
+
+t1=__1
+
+# Check for DB_ENV flags.
+(grep 'F_ISSET([^ ]*dbenv,' $d/*/*.[chys];
+ grep 'F_SET([^ ]*dbenv,' $d/*/*.[chys];
+ grep 'F_CLR([^ ]*dbenv,' $d/*/*.[chys]) |
+ sed -e '/DB_ENV_/d' -e '/F_SET([^ ]*dbenv, db_env_reset)/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+grep 'DB_ENV_' $d/*/*.c |
+sed -e '/F_.*dbenv,/d' \
+ -e '/DB_ENV_TEST_RECOVERY(.*DB_TEST_/d' \
+ -e '/\/libdb_java\//d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+# Check for DB flags.
+(grep 'F_ISSET([^ ]*dbp,' $d/*/*.[chys];
+ grep 'F_SET([^ ]*dbp,' $d/*/*.[chys];
+ grep 'F_CLR([^ ]*dbp,' $d/*/*.[chys]) |
+ sed -e '/DB_AM_/d' \
+ -e '/db.c:.*F_SET.*F_ISSET(subdbp,/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+grep 'DB_AM_' $d/*/*.c |
+sed -e '/F_.*dbp/d' \
+ -e '/"DB->open", dbp->flags, DB_AM_DUP,/d' \
+ -e '/"DB_NODUPDATA" behavior for databases with/d' \
+ -e '/If DB_AM_OPEN_CALLED is not set, then we/d' \
+ -e '/This was checked in set_flags when DB_AM_ENCRYPT/d' \
+ -e '/XA_ABORT, we can safely set DB_AM_RECOVER/d' \
+ -e '/ DB_AM_RECNUM\./d' \
+ -e '/ DB_AM_RECOVER set\./d' \
+ -e '/isdup = dbp->flags & DB_AM_DUP/d' \
+ -e '/otherwise we simply do/d' \
+ -e '/pginfo/d' \
+ -e '/setting DB_AM_RECOVER, we guarantee that we don/d' \
+ -e '/:[ {]*DB_AM_/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+# Check for DBC flags.
+(grep 'F_ISSET([^ ]*dbc,' $d/*/*.[chys];
+ grep 'F_SET([^ ]*dbc,' $d/*/*.[chys];
+ grep 'F_CLR([^ ]*dbc,' $d/*/*.[chys]) |
+ sed -e '/DBC_/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+grep 'DBC_' $d/*/*.c |
+sed -e '/F_.*dbc/d' \
+ -e '/DBC_INTERNAL/d' \
+ -e '/DBC_LOGGING/d' \
+ -e '/Do the actual get. Set DBC_TRANSIENT/d' \
+ -e '/If DBC_WRITEDUP is set, the cursor is an in/d' \
+ -e '/The DBC_TRANSIENT flag indicates that we/d' \
+ -e '/This function replaces the DBC_CONTINUE and DBC_KEYSET/d' \
+ -e '/db_cam.c:.*F_CLR(opd, DBC_ACTIVE);/d' \
+ -e '/{ DBC_/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+# Check for bad use of macros.
+egrep 'case .*F_SET\(|case .*F_CLR\(' $d/*/*.c > $t1
+egrep 'for .*F_SET\(|for .*F_CLR\(' $d/*/*.c >> $t1
+egrep 'if .*F_SET\(|if .*F_CLR\(' $d/*/*.c >> $t1
+egrep 'switch .*F_SET\(|switch .*F_CLR\(' $d/*/*.c >> $t1
+egrep 'while .*F_SET\(|while .*F_CLR\(' $d/*/*.c >> $t1
+[ -s $t1 ] && {
+ echo 'if statement followed by non-test macro'
+ cat $t1
+ exit 1
+}
+
+exit 0
diff --git a/libdb/test/scr022/chk.rr b/libdb/test/scr022/chk.rr
new file mode 100644
index 0000000..5b9a091
--- /dev/null
+++ b/libdb/test/scr022/chk.rr
@@ -0,0 +1,22 @@
+#!/bin/sh -
+#
+# $Id$
+
+d=../..
+
+t1=__1
+
+# Check for DB_RUNRECOVERY being specified instead of a call to db_panic.
+egrep DB_RUNRECOVERY $d/*/*.c |
+ sed -e '/common\/db_err.c:/d' \
+ -e '/libdb_java\/java_util.c:/d' \
+ -e '/db_dispatch.c:.*if (ret == DB_RUNRECOVERY/d' \
+ -e '/txn.c:.* \* DB_RUNRECOVERY and we need to/d' \
+ -e '/__db_panic(.*, DB_RUNRECOVERY)/d' > $t1
+[ -s $t1 ] && {
+ echo "DB_RUNRECOVERY used; should be a call to db_panic."
+ cat $t1
+ exit 1
+}
+
+exit 0
diff --git a/libdb/test/sdb001.tcl b/libdb/test/sdb001.tcl
new file mode 100644
index 0000000..a99e2fb
--- /dev/null
+++ b/libdb/test/sdb001.tcl
@@ -0,0 +1,156 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST subdb001 Tests mixing db and subdb operations
+# TEST Tests mixing db and subdb operations
+# TEST Create a db, add data, try to create a subdb.
+# TEST Test naming db and subdb with a leading - for correct parsing
+# TEST Existence check -- test use of -excl with subdbs
+# TEST
+# TEST Test non-subdb and subdb operations
+# TEST Test naming (filenames begin with -)
+# TEST Test existence (cannot create subdb of same name with -excl)
+proc subdb001 { method args } {
+ source ./include.tcl
+ global errorInfo
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb001: skipping for method $method"
+ return
+ }
+ puts "Subdb001: $method ($args) subdb and non-subdb tests"
+
+ set testfile $testdir/subdb001.db
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ set env NULL
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb001 skipping for env $env"
+ return
+ }
+ # Create the database and open the dictionary
+ set subdb subdb0
+ cleanup $testdir NULL
+ puts "\tSubdb001.a: Non-subdb database and subdb operations"
+ #
+ # Create a db with no subdbs. Add some data. Close. Try to
+ # open/add with a subdb. Should fail.
+ #
+ puts "\tSubdb001.a.0: Create db, add data, close, try subdb"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+ while { [gets $did str] != -1 && $count < 5 } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) $str
+ } else {
+ set key $str
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+ error_check_good db_close [$db close] 0
+ set ret [catch {eval {berkdb_open_noerr -create -mode 0644} $args \
+ {$omethod $testfile $subdb}} db]
+ error_check_bad dbopen $ret 0
+ #
+ # Create a db with no subdbs. Add no data. Close. Try to
+ # open/add with a subdb. Should fail.
+ #
+ set testfile $testdir/subdb001a.db
+ puts "\tSubdb001.a.1: Create db, close, try subdb"
+ #
+ # !!!
+ # Using -truncate is illegal when opening for subdbs, but we
+ # can use it here because we are not using subdbs for this
+ # create.
+ #
+ set db [eval {berkdb_open -create -truncate -mode 0644} $args \
+ {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ set ret [catch {eval {berkdb_open_noerr -create -mode 0644} $args \
+ {$omethod $testfile $subdb}} db]
+ error_check_bad dbopen $ret 0
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb001: skipping remainder of test for method $method"
+ return
+ }
+
+ #
+ # Test naming, db and subdb names beginning with -.
+ #
+ puts "\tSubdb001.b: Naming"
+ set cwd [pwd]
+ cd $testdir
+ set testfile1 -subdb001.db
+ set subdb -subdb
+ puts "\tSubdb001.b.0: Create db and subdb with -name, no --"
+ set ret [catch {eval {berkdb_open -create -mode 0644} $args \
+ {$omethod $testfile1 $subdb}} db]
+ error_check_bad dbopen $ret 0
+ puts "\tSubdb001.b.1: Create db and subdb with -name, with --"
+ set db [eval {berkdb_open -create -mode 0644} $args \
+ {$omethod -- $testfile1 $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ cd $cwd
+
+ #
+ # Create 1 db with 1 subdb. Try to create another subdb of
+ # the same name. Should fail.
+ #
+ puts "\tSubdb001.c: Truncate check"
+ set testfile $testdir/subdb001c.db
+ set subdb subdb
+ set stat [catch {eval {berkdb_open_noerr -create -truncate -mode 0644} \
+ $args {$omethod $testfile $subdb}} ret]
+ error_check_bad dbopen $stat 0
+ error_check_good trunc [is_substr $ret \
+ "illegal with multiple databases"] 1
+
+ puts "\tSubdb001.d: Existence check"
+ set testfile $testdir/subdb001d.db
+ set subdb subdb
+ set ret [catch {eval {berkdb_open -create -excl -mode 0644} $args \
+ {$omethod $testfile $subdb}} db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [catch {eval {berkdb_open_noerr -create -excl -mode 0644} \
+ $args {$omethod $testfile $subdb}} db1]
+ error_check_bad dbopen $ret 0
+ error_check_good db_close [$db close] 0
+
+ return
+}
diff --git a/libdb/test/sdb002.tcl b/libdb/test/sdb002.tcl
new file mode 100644
index 0000000..a3323d2
--- /dev/null
+++ b/libdb/test/sdb002.tcl
@@ -0,0 +1,221 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST subdb002
+# TEST Tests basic subdb functionality
+# TEST Small keys, small data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+# TEST Then repeat using an environment.
+proc subdb002 { method {nentries 10000} args } {
+ global passwd
+
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ set env NULL
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb002 skipping for env $env"
+ return
+ }
+ set largs $args
+ subdb002_main $method $nentries $largs
+ append largs " -chksum "
+ subdb002_main $method $nentries $largs
+ append largs "-encryptaes $passwd "
+ subdb002_main $method $nentries $largs
+}
+
+proc subdb002_main { method nentries largs } {
+ source ./include.tcl
+ global encrypt
+
+ set largs [convert_args $method $largs]
+ set omethod [convert_method $method]
+
+ env_cleanup $testdir
+
+ puts "Subdb002: $method ($largs) basic subdb tests"
+ set testfile $testdir/subdb002.db
+ subdb002_body $method $omethod $nentries $largs $testfile NULL
+
+ # Run convert_encrypt so that old_encrypt will be reset to
+ # the proper value and cleanup will work.
+ convert_encrypt $largs
+ set encargs ""
+ set largs [split_encargs $largs encargs]
+
+ cleanup $testdir NULL
+ if { [is_queue $omethod] == 1 } {
+ set sdb002_env berkdb_env_noerr
+ } else {
+ set sdb002_env berkdb_env
+ }
+ set env [eval {$sdb002_env -create -cachesize {0 10000000 0} \
+ -mode 0644 -txn} -home $testdir $encargs]
+ error_check_good env_open [is_valid_env $env] TRUE
+ puts "Subdb002: $method ($largs) basic subdb tests in an environment"
+
+ # We're in an env--use default path to database rather than specifying
+ # it explicitly.
+ set testfile subdb002.db
+ subdb002_body $method $omethod $nentries $largs $testfile $env
+ error_check_good env_close [$env close] 0
+}
+
+proc subdb002_body { method omethod nentries largs testfile env } {
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ # Create the database and open the dictionary
+ set subdb subdb0
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ if { [is_queue $omethod] == 1 } {
+ set sdb002_open berkdb_open_noerr
+ } else {
+ set sdb002_open berkdb_open
+ }
+
+ if { $env == "NULL" } {
+ set ret [catch {eval {$sdb002_open -create -mode 0644} $largs \
+ {$omethod $testfile $subdb}} db]
+ } else {
+ set ret [catch {eval {$sdb002_open -create -mode 0644} $largs \
+ {-env $env $omethod $testfile $subdb}} db]
+ }
+
+ #
+ # If -queue method, we need to make sure that trying to
+ # create a subdb fails.
+ if { [is_queue $method] == 1 } {
+ error_check_bad dbopen $ret 0
+ puts "Subdb002: skipping remainder of test for method $method"
+ return
+ }
+
+ error_check_good dbopen $ret 0
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdb002_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc subdb002.check
+ }
+ puts "\tSubdb002.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tSubdb002.b: dump file"
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tSubdb002.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next" $subdb
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb002:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tSubdb002.d: close, open, and dump file in reverse direction"
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" $subdb
+
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tSubdb002.e: db_dump with subdatabase"
+ set outfile $testdir/subdb002.dump
+ set dumpargs " -f $outfile -s $subdb "
+ if { $encrypt > 0 } {
+ append dumpargs " -P $passwd "
+ }
+ if { $env != "NULL" } {
+ append dumpargs " -h $testdir "
+ }
+ append dumpargs " $testfile"
+ set stat [catch {eval {exec $util_path/db_dump} $dumpargs} ret]
+ error_check_good dbdump.subdb $stat 0
+}
+
+# Check function for Subdb002; keys and data are identical
+proc subdb002.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdb002_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/libdb/test/sdb003.tcl b/libdb/test/sdb003.tcl
new file mode 100644
index 0000000..2333d5d
--- /dev/null
+++ b/libdb/test/sdb003.tcl
@@ -0,0 +1,179 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST subdb003
+# TEST Tests many subdbs
+# TEST Creates many subdbs and puts a small amount of
+# TEST data in each (many defaults to 2000)
+# TEST
+# TEST Use the first 10,000 entries from the dictionary as subdbnames.
+# TEST Insert each with entry as name of subdatabase and a partial list
+# TEST as key/data. After all are entered, retrieve all; compare output
+# TEST to original. Close file, reopen, do retrieve and re-verify.
+proc subdb003 { method {nentries 1000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb003: skipping for method $method"
+ return
+ }
+
+ puts "Subdb003: $method ($args) many subdb tests"
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb003.db
+ set env NULL
+ } else {
+ set testfile subdb003.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $nentries == 1000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set fcount 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdb003_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc subdb003.check
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ set ndataent 10
+ set fdid [open $dict]
+ while { [gets $fdid str] != -1 && $fcount < $nentries } {
+ set subdb $str
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $ndataent } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret [list [list $key \
+ [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+ incr fcount
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $ndataent} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $ndataent $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb003:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next" $subdb
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb003:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" $subdb
+
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb003:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ if { [expr $fcount % 100] == 0 } {
+ puts -nonewline "$fcount "
+ flush stdout
+ }
+ }
+ close $fdid
+ puts ""
+}
+
+# Check function for Subdb003; keys and data are identical
+proc subdb003.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdb003_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/libdb/test/sdb004.tcl b/libdb/test/sdb004.tcl
new file mode 100644
index 0000000..03f5fca
--- /dev/null
+++ b/libdb/test/sdb004.tcl
@@ -0,0 +1,241 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST subdb004
+# TEST Tests large subdb names
+# TEST subdb name = filecontents,
+# TEST key = filename, data = filecontents
+# TEST Put/get per key
+# TEST Dump file
+# TEST Dump subdbs, verify data and subdb name match
+# TEST
+# TEST Create 1 db with many large subdbs. Use the contents as subdb names.
+# TEST Take the source files and dbtest executable and enter their names as
+# TEST the key with their contents as data. After all are entered, retrieve
+# TEST all; compare output to original. Close file, reopen, do retrieve and
+# TEST re-verify.
+proc subdb004 { method args} {
+ global names
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 || [is_fixed_length $method] == 1 } {
+ puts "Subdb004: skipping for method $method"
+ return
+ }
+
+ puts "Subdb004: $method ($args) \
+ filecontents=subdbname filename=key filecontents=data pairs"
+
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb004.db
+ set env NULL
+ } else {
+ set testfile subdb004.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+ set pflags ""
+ set gflags ""
+ set txn ""
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdb004_recno.check
+ append gflags "-recno"
+ } else {
+ set checkfunc subdb004.check
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ # Note that the subdatabase name is passed in as a char *, not
+ # in a DBT, so it may not contain nulls; use only source files.
+ set file_list [glob $src_root/*/*.c]
+ set fcount [llength $file_list]
+ if { $txnenv == 1 && $fcount > 100 } {
+ set file_list [lrange $file_list 0 99]
+ set fcount 100
+ }
+
+ set count 0
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $fcount} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ } else {
+ set oid [open $t2.tmp w]
+ foreach f $file_list {
+ puts $oid $f
+ }
+ close $oid
+ filesort $t2.tmp $t2
+ }
+ puts "\tSubdb004.a: Set/Check each subdb"
+ foreach f $file_list {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set names([expr $count + 1]) $f
+ } else {
+ set key $f
+ }
+ # Should really catch errors
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set data [read $fid]
+ set subdb $data
+ close $fid
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Should really catch errors
+ set fid [open $t4 w]
+ fconfigure $fid -translation binary
+ if [catch {eval {$db get} $gflags {$key}} data] {
+ puts -nonewline $fid $data
+ } else {
+ # Data looks like {{key data}}
+ set key [lindex [lindex $data 0] 0]
+ set data [lindex [lindex $data 0] 1]
+ puts -nonewline $fid $data
+ }
+ close $fid
+
+ error_check_good Subdb004:diff($f,$t4) \
+ [filecmp $f $t4] 0
+
+ incr count
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ # puts "\tSubdb004.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_bin_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ }
+
+ #
+ # Now for each file, check that the subdb name is the same
+ # as the data in that subdb and that the filename is the key.
+ #
+ puts "\tSubdb004.b: Compare subdb names with key/data"
+ set db [eval {berkdb_open -rdonly} $envargs {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set c [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+
+ for {set d [$c get -first] } { [llength $d] != 0 } \
+ {set d [$c get -next] } {
+ set subdbname [lindex [lindex $d 0] 0]
+ set subdb [eval {berkdb_open} $args {$testfile $subdbname}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Output the subdb name
+ set ofid [open $t3 w]
+ fconfigure $ofid -translation binary
+ if { [string compare "\0" \
+ [string range $subdbname end end]] == 0 } {
+ set slen [expr [string length $subdbname] - 2]
+ set subdbname [string range $subdbname 1 $slen]
+ }
+ puts -nonewline $ofid $subdbname
+ close $ofid
+
+ # Output the data
+ set subc [eval {$subdb cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $subc $subdb] TRUE
+ set d [$subc get -first]
+ error_check_good dbc_get [expr [llength $d] != 0] 1
+ set key [lindex [lindex $d 0] 0]
+ set data [lindex [lindex $d 0] 1]
+
+ set ofid [open $t1 w]
+ fconfigure $ofid -translation binary
+ puts -nonewline $ofid $data
+ close $ofid
+
+ $checkfunc $key $t1
+ $checkfunc $key $t3
+
+ error_check_good Subdb004:diff($t3,$t1) \
+ [filecmp $t3 $t1] 0
+ error_check_good curs_close [$subc close] 0
+ error_check_good db_close [$subdb close] 0
+ }
+ error_check_good curs_close [$c close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ if { [is_record_based $method] != 1 } {
+ fileremove $t2.tmp
+ }
+}
+
+# Check function for subdb004; key should be file name; data should be contents
+proc subdb004.check { binfile tmpfile } {
+ source ./include.tcl
+
+ error_check_good Subdb004:datamismatch($binfile,$tmpfile) \
+ [filecmp $binfile $tmpfile] 0
+}
+proc subdb004_recno.check { binfile tmpfile } {
+ global names
+ source ./include.tcl
+
+ set fname $names($binfile)
+ error_check_good key"$binfile"_exists [info exists names($binfile)] 1
+ error_check_good Subdb004:datamismatch($fname,$tmpfile) \
+ [filecmp $fname $tmpfile] 0
+}
diff --git a/libdb/test/sdb005.tcl b/libdb/test/sdb005.tcl
new file mode 100644
index 0000000..3c57ea9
--- /dev/null
+++ b/libdb/test/sdb005.tcl
@@ -0,0 +1,146 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST subdb005
+# TEST Tests cursor operations in subdbs
+# TEST Put/get per key
+# TEST Verify cursor operations work within subdb
+# TEST Verify cursor operations do not work across subdbs
+# TEST
+#
+# We should test this on all btrees, all hash, and a combination thereof
+proc subdb005 {method {nentries 100} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb005: skipping for method $method"
+ return
+ }
+
+ puts "Subdb005: $method ( $args ) subdb cursor operations test"
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb005.db
+ set env NULL
+ } else {
+ set testfile subdb005.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ if { $nentries == 100 } {
+ set nentries 20
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ cleanup $testdir $env
+ set txn ""
+ set psize 8192
+ set duplist {-1 -1 -1 -1 -1}
+ build_all_subdb \
+ $testfile [list $method] $psize $duplist $nentries $args
+ set numdb [llength $duplist]
+ #
+ # Get a cursor in each subdb and move past the end of each
+ # subdb. Make sure we don't end up in another subdb.
+ #
+ puts "\tSubdb005.a: Cursor ops - first/prev and last/next"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for {set i 0} {$i < $numdb} {incr i} {
+ set db [eval {berkdb_open -unknown} $args {$testfile sub$i.db}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set db_handle($i) $db
+ # Used in 005.c test
+ lappend subdbnames sub$i.db
+
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set d [$dbc get -first]
+ error_check_good dbc_get [expr [llength $d] != 0] 1
+
+ # Used in 005.b test
+ set db_key($i) [lindex [lindex $d 0] 0]
+
+ set d [$dbc get -prev]
+ error_check_good dbc_get [expr [llength $d] == 0] 1
+ set d [$dbc get -last]
+ error_check_good dbc_get [expr [llength $d] != 0] 1
+ set d [$dbc get -next]
+ error_check_good dbc_get [expr [llength $d] == 0] 1
+ error_check_good dbc_close [$dbc close] 0
+ }
+ #
+ # Get a key from each subdb and try to get this key in a
+ # different subdb. Make sure it fails
+ #
+ puts "\tSubdb005.b: Get keys in different subdb's"
+ for {set i 0} {$i < $numdb} {incr i} {
+ set n [expr $i + 1]
+ if {$n == $numdb} {
+ set n 0
+ }
+ set db $db_handle($i)
+ if { [is_record_based $method] == 1 } {
+ set d [eval {$db get -recno} $txn {$db_key($n)}]
+ error_check_good \
+ db_get [expr [llength $d] == 0] 1
+ } else {
+ set d [eval {$db get} $txn {$db_key($n)}]
+ error_check_good db_get [expr [llength $d] == 0] 1
+ }
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ #
+ # Clean up
+ #
+ for {set i 0} {$i < $numdb} {incr i} {
+ error_check_good db_close [$db_handle($i) close] 0
+ }
+
+ #
+ # Check contents of DB for subdb names only. Makes sure that
+ # every subdbname is there and that nothing else is there.
+ #
+ puts "\tSubdb005.c: Check DB is read-only"
+ error_check_bad dbopen [catch \
+ {berkdb_open_noerr -unknown $testfile} ret] 0
+
+ puts "\tSubdb005.d: Check contents of DB for subdb names only"
+ set db [eval {berkdb_open -unknown -rdonly} $envargs {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set subdblist [$db get -glob *]
+ foreach kd $subdblist {
+ # subname also used in subdb005.e,f below
+ set subname [lindex $kd 0]
+ set i [lsearch $subdbnames $subname]
+ error_check_good subdb_search [expr $i != -1] 1
+ set subdbnames [lreplace $subdbnames $i $i]
+ }
+ error_check_good subdb_done [llength $subdbnames] 0
+
+ error_check_good db_close [$db close] 0
+ return
+}
diff --git a/libdb/test/sdb006.tcl b/libdb/test/sdb006.tcl
new file mode 100644
index 0000000..97ad6a8
--- /dev/null
+++ b/libdb/test/sdb006.tcl
@@ -0,0 +1,169 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST subdb006
+# TEST Tests intra-subdb join
+# TEST
+# TEST We'll test 2-way, 3-way, and 4-way joins and figure that if those work,
+# TEST everything else does as well. We'll create test databases called
+# TEST sub1.db, sub2.db, sub3.db, and sub4.db. The number on the database
+# TEST describes the duplication -- duplicates are of the form 0, N, 2N, 3N,
+# TEST ... where N is the number of the database. Primary.db is the primary
+# TEST database, and sub0.db is the database that has no matching duplicates.
+# TEST All of these are within a single database.
+#
+# We should test this on all btrees, all hash, and a combination thereof
+proc subdb006 {method {nentries 100} args } {
+ source ./include.tcl
+ global rand_init
+
+ # NB: these flags are internal only, ok
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] } {
+ puts "\tSubdb006 skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb006.db
+ set env NULL
+ } else {
+ set testfile subdb006.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $nentries == 100 } {
+ # !!!
+ # nentries must be greater than the number
+ # of do_join_subdb calls below.
+ #
+ set nentries 35
+ }
+ }
+ set testdir [get_home $env]
+ }
+ berkdb srand $rand_init
+
+ set oargs $args
+ foreach opt {" -dup" " -dupsort"} {
+ append args $opt
+
+ puts "Subdb006: $method ( $args ) Intra-subdb join"
+ set txn ""
+ #
+ # Get a cursor in each subdb and move past the end of each
+ # subdb. Make sure we don't end up in another subdb.
+ #
+ puts "\tSubdb006.a: Intra-subdb join"
+
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set psize 8192
+ set duplist {0 50 25 16 12}
+ set numdb [llength $duplist]
+ build_all_subdb $testfile [list $method] $psize \
+ $duplist $nentries $args
+
+ # Build the primary
+ puts "Subdb006: Building the primary database $method"
+ set oflags "-create -mode 0644 [conv $omethod \
+ [berkdb random_int 1 2]]"
+ set db [eval {berkdb_open} $oflags $oargs $testfile primary.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ for { set i 0 } { $i < 1000 } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set key [format "%04d" $i]
+ set ret [eval {$db put} $txn {$key stub}]
+ error_check_good "primary put" $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ error_check_good "primary close" [$db close] 0
+ set did [open $dict]
+ gets $did str
+ do_join_subdb $testfile primary.db "1 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 2" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 2 3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 2 3 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 2" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 2" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 3 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 4 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "0 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 2 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3 0 1" $str $oargs
+
+ close $did
+ }
+}
diff --git a/libdb/test/sdb007.tcl b/libdb/test/sdb007.tcl
new file mode 100644
index 0000000..b456983
--- /dev/null
+++ b/libdb/test/sdb007.tcl
@@ -0,0 +1,132 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST subdb007
+# TEST Tests page size difference errors between subdbs.
+# TEST Test 3 different scenarios for page sizes.
+# TEST 1. Create/open with a default page size, 2nd subdb create with
+# TEST specified different one, should error.
+# TEST 2. Create/open with specific page size, 2nd subdb create with
+# TEST different one, should error.
+# TEST 3. Create/open with specified page size, 2nd subdb create with
+# TEST same specified size, should succeed.
+# TEST (4th combo of using all defaults is a basic test, done elsewhere)
+proc subdb007 { method args } {
+ source ./include.tcl
+
+ set db2args [convert_args -btree $args]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb007: skipping for method $method"
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Subdb007: skipping for specific page sizes"
+ return
+ }
+
+ puts "Subdb007: $method ($args) subdb tests with different page sizes"
+
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb007.db
+ set env NULL
+ } else {
+ set testfile subdb007.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ append db2args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set sub1 "sub1"
+ set sub2 "sub2"
+ cleanup $testdir $env
+ set txn ""
+
+ puts "\tSubdb007.a.0: create subdb with default page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ #
+ # Figure out what the default page size is so that we can
+ # guarantee we create it with a different value.
+ set statret [$db stat]
+ set pgsz 0
+ foreach pair $statret {
+ set fld [lindex $pair 0]
+ if { [string compare $fld {Page size}] == 0 } {
+ set pgsz [lindex $pair 1]
+ }
+ }
+ error_check_good dbclose [$db close] 0
+
+ if { $pgsz == 512 } {
+ set pgsz2 2048
+ } else {
+ set pgsz2 512
+ }
+
+ puts "\tSubdb007.a.1: create 2nd subdb with specified page size"
+ set stat [catch {eval {berkdb_open_noerr -create -btree} \
+ $db2args {-pagesize $pgsz2 $testfile $sub2}} ret]
+ error_check_good subdb:pgsz $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different pagesize specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb007.b.0: create subdb with specified page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-pagesize $pgsz2 $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ set statret [$db stat]
+ set newpgsz 0
+ foreach pair $statret {
+ set fld [lindex $pair 0]
+ if { [string compare $fld {Page size}] == 0 } {
+ set newpgsz [lindex $pair 1]
+ }
+ }
+ error_check_good pgsize $pgsz2 $newpgsz
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb007.b.1: create 2nd subdb with different page size"
+ set stat [catch {eval {berkdb_open_noerr -create -btree} \
+ $db2args {-pagesize $pgsz $testfile $sub2}} ret]
+ error_check_good subdb:pgsz $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different pagesize specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb007.c.0: create subdb with specified page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-pagesize $pgsz2 $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb007.c.1: create 2nd subdb with same specified page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-pagesize $pgsz2 $omethod $testfile $sub2}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+}
diff --git a/libdb/test/sdb008.tcl b/libdb/test/sdb008.tcl
new file mode 100644
index 0000000..0179ab8
--- /dev/null
+++ b/libdb/test/sdb008.tcl
@@ -0,0 +1,121 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+# TEST subdb008
+# TEST Tests lorder difference errors between subdbs.
+# TEST Test 3 different scenarios for lorder.
+# TEST 1. Create/open with specific lorder, 2nd subdb create with
+# TEST different one, should error.
+# TEST 2. Create/open with a default lorder 2nd subdb create with
+# TEST specified different one, should error.
+# TEST 3. Create/open with specified lorder, 2nd subdb create with
+# TEST same specified lorder, should succeed.
+# TEST (4th combo of using all defaults is a basic test, done elsewhere)
+proc subdb008 { method args } {
+ source ./include.tcl
+
+ set db2args [convert_args -btree $args]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb008: skipping for method $method"
+ return
+ }
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb008.db
+ set env NULL
+ } else {
+ set testfile subdb008.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs "-env $env"
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append db2args " -auto_commit "
+ append envargs " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ puts "Subdb008: $method ($args) subdb tests with different lorders"
+
+ set sub1 "sub1"
+ set sub2 "sub2"
+ cleanup $testdir $env
+
+ puts "\tSubdb008.b.0: create subdb with specified lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder 4321 $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ # Figure out what the default lorder is so that we can
+ # guarantee we create it with a different value later.
+ set is_swap [$db is_byteswapped]
+ if { $is_swap } {
+ set other 4321
+ } else {
+ set other 1234
+ }
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.b.1: create 2nd subdb with different lorder"
+ set stat [catch {eval {berkdb_open_noerr -create $omethod} \
+ $args {-lorder 1234 $testfile $sub2}} ret]
+ error_check_good subdb:lorder $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different lorder specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb008.c.0: create subdb with opposite specified lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder 1234 $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.c.1: create 2nd subdb with different lorder"
+ set stat [catch {eval {berkdb_open_noerr -create $omethod} \
+ $args {-lorder 4321 $testfile $sub2}} ret]
+ error_check_good subdb:lorder $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different lorder specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb008.d.0: create subdb with default lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.d.1: create 2nd subdb with different lorder"
+ set stat [catch {eval {berkdb_open_noerr -create -btree} \
+ $db2args {-lorder $other $testfile $sub2}} ret]
+ error_check_good subdb:lorder $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different lorder specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb008.e.0: create subdb with specified lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder $other $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.e.1: create 2nd subdb with same specified lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder $other $omethod $testfile $sub2}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+}
diff --git a/libdb/test/sdb009.tcl b/libdb/test/sdb009.tcl
new file mode 100644
index 0000000..0e90ca8
--- /dev/null
+++ b/libdb/test/sdb009.tcl
@@ -0,0 +1,108 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST subdb009
+# TEST Test DB->rename() method for subdbs
+proc subdb009 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Subdb009: $method ($args): Test of DB->rename()"
+
+ if { [is_queue $method] == 1 } {
+ puts "\tSubdb009: Skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb009.db
+ set env NULL
+ } else {
+ set testfile subdb009.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set oldsdb OLDDB
+ set newsdb NEWDB
+
+ # Make sure we're starting from a clean slate.
+ cleanup $testdir $env
+ error_check_bad "$testfile exists" [file exists $testfile] 1
+
+ puts "\tSubdb009.a: Create/rename file"
+ puts "\t\tSubdb009.a.1: create"
+ set db [eval {berkdb_open -create -mode 0644}\
+ $omethod $args {$testfile $oldsdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # The nature of the key and data are unimportant; use numeric key
+ # so record-based methods don't need special treatment.
+ set txn ""
+ set key 1
+ set data [pad_data $method data]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good dbput [eval {$db put} $txn {$key $data}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good dbclose [$db close] 0
+
+ puts "\t\tSubdb009.a.2: rename"
+ error_check_good rename_file [eval {berkdb dbrename} $envargs \
+ {$testfile $oldsdb $newsdb}] 0
+
+ puts "\t\tSubdb009.a.3: check"
+ # Open again with create to make sure we've really completely
+ # disassociated the subdb from the old name.
+ set odb [eval {berkdb_open -create -mode 0644}\
+ $omethod $args $testfile $oldsdb]
+ error_check_good odb_open [is_valid_db $odb] TRUE
+ set odbt [$odb get $key]
+ error_check_good odb_close [$odb close] 0
+
+ set ndb [eval {berkdb_open -create -mode 0644}\
+ $omethod $args $testfile $newsdb]
+ error_check_good ndb_open [is_valid_db $ndb] TRUE
+ set ndbt [$ndb get $key]
+ error_check_good ndb_close [$ndb close] 0
+
+ # The DBT from the "old" database should be empty, not the "new" one.
+ error_check_good odbt_empty [llength $odbt] 0
+ error_check_bad ndbt_empty [llength $ndbt] 0
+ error_check_good ndbt [lindex [lindex $ndbt 0] 1] $data
+
+ # Now there's both an old and a new. Rename the "new" to the "old"
+ # and make sure that fails.
+ puts "\tSubdb009.b: Make sure rename fails instead of overwriting"
+ set ret [catch {eval {berkdb dbrename} $envargs $testfile \
+ $oldsdb $newsdb} res]
+ error_check_bad rename_overwrite $ret 0
+ error_check_good rename_overwrite_ret [is_substr $errorCode EEXIST] 1
+
+ puts "\tSubdb009 succeeded."
+}
diff --git a/libdb/test/sdb010.tcl b/libdb/test/sdb010.tcl
new file mode 100644
index 0000000..f640fb1
--- /dev/null
+++ b/libdb/test/sdb010.tcl
@@ -0,0 +1,166 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST subdb010
+# TEST Test DB->remove() method and DB->truncate() for subdbs
+proc subdb010 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Subdb010: Test of DB->remove() and DB->truncate"
+
+ if { [is_queue $method] == 1 } {
+ puts "\tSubdb010: Skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb010.db
+ set tfpath $testfile
+ set env NULL
+ } else {
+ set testfile subdb010.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ }
+ set testdir [get_home $env]
+ set tfpath $testdir/$testfile
+ }
+ cleanup $testdir $env
+
+ set txn ""
+ set testdb DATABASE
+ set testdb2 DATABASE2
+
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
+ $args $testfile $testdb]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ puts "\tSubdb010.a: Test of DB->remove()"
+ error_check_good file_exists_before [file exists $tfpath] 1
+ error_check_good db_remove [eval {berkdb dbremove} $envargs \
+ $testfile $testdb] 0
+
+ # File should still exist.
+ error_check_good file_exists_after [file exists $tfpath] 1
+
+ # But database should not.
+ set ret [catch {eval berkdb_open $omethod $args $testfile $testdb} res]
+ error_check_bad open_failed ret 0
+ error_check_good open_failed_ret [is_substr $errorCode ENOENT] 1
+
+ puts "\tSubdb010.b: Setup for DB->truncate()"
+ # The nature of the key and data are unimportant; use numeric key
+ # so record-based methods don't need special treatment.
+ set key1 1
+ set key2 2
+ set data1 [pad_data $method data1]
+ set data2 [pad_data $method data2]
+
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
+ $args {$testfile $testdb}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good dbput [eval {$db put} $txn {$key1 $data1}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set db2 [eval {berkdb_open -create -mode 0644} $omethod \
+ $args $testfile $testdb2]
+ error_check_good db_open [is_valid_db $db2] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good dbput [eval {$db2 put} $txn {$key2 $data2}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ error_check_good db_close [$db2 close] 0
+
+ puts "\tSubdb010.c: truncate"
+ #
+ # Return value should be 1, the count of how many items were
+ # destroyed when we truncated.
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
+ $args $testfile $testdb]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good trunc_subdb [eval {$db truncate} $txn] 1
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tSubdb010.d: check"
+ set db [eval {berkdb_open} $args {$testfile $testdb}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set kd [$dbc get -first]
+ error_check_good trunc_dbcget [llength $kd] 0
+ error_check_good dbcclose [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set db2 [eval {berkdb_open} $args {$testfile $testdb2}]
+ error_check_good db_open [is_valid_db $db2] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db2 cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db2] TRUE
+ set kd [$dbc get -first]
+ error_check_bad notrunc_dbcget1 [llength $kd] 0
+ set db2kd [list [list $key2 $data2]]
+ error_check_good key2 $kd $db2kd
+ set kd [$dbc get -next]
+ error_check_good notrunc_dbget2 [llength $kd] 0
+ error_check_good dbcclose [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ error_check_good db_close [$db2 close] 0
+ puts "\tSubdb010 succeeded."
+}
diff --git a/libdb/test/sdb011.tcl b/libdb/test/sdb011.tcl
new file mode 100644
index 0000000..b0f4e67
--- /dev/null
+++ b/libdb/test/sdb011.tcl
@@ -0,0 +1,143 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST subdb011
+# TEST Test deleting Subdbs with overflow pages
+# TEST Create 1 db with many large subdbs.
+# TEST Test subdatabases with overflow pages.
+proc subdb011 { method {ndups 13} {nsubdbs 10} args} {
+ global names
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 || [is_fixed_length $method] == 1 } {
+ puts "Subdb011: skipping for method $method"
+ return
+ }
+ set txnenv 0
+ set envargs ""
+ set max_files 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb011.db
+ set env NULL
+ set tfpath $testfile
+ } else {
+ set testfile subdb011.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ set max_files 50
+ if { $ndups == 13 } {
+ set ndups 7
+ }
+ }
+ set testdir [get_home $env]
+ set tfpath $testdir/$testfile
+ }
+
+ # Create the database and open the dictionary
+
+ cleanup $testdir $env
+ set txn ""
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list]
+ if { $max_files != 0 && [llength $file_list] > $max_files } {
+ set fend [expr $max_files - 1]
+ set file_list [lrange $file_list 0 $fend]
+ }
+ set flen [llength $file_list]
+ puts "Subdb011: $method ($args) $ndups overflow dups with \
+ $flen filename=key filecontents=data pairs"
+
+ puts "\tSubdb011.a: Create each of $nsubdbs subdbs and dups"
+ set slist {}
+ set i 0
+ set count 0
+ foreach f $file_list {
+ set i [expr $i % $nsubdbs]
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set names([expr $count + 1]) $f
+ } else {
+ set key $f
+ }
+ # Should really catch errors
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ set subdb subdb$i
+ lappend slist $subdb
+ close $fid
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ for {set dup 0} {$dup < $ndups} {incr dup} {
+ set data $dup:$filecont
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key \
+ [chop_data $method $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ error_check_good dbclose [$db close] 0
+ incr i
+ incr count
+ }
+
+ puts "\tSubdb011.b: Verify overflow pages"
+ foreach subdb $slist {
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set stat [$db stat]
+
+ # What everyone else calls overflow pages, hash calls "big
+ # pages", so we need to special-case hash here. (Hash
+ # overflow pages are additional pages after the first in a
+ # bucket.)
+ if { [string compare [$db get_type] hash] == 0 } {
+ error_check_bad overflow \
+ [is_substr $stat "{{Number of big pages} 0}"] 1
+ } else {
+ error_check_bad overflow \
+ [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
+ error_check_good dbclose [$db close] 0
+ }
+
+ puts "\tSubdb011.c: Delete subdatabases"
+ for {set i $nsubdbs} {$i > 0} {set i [expr $i - 1]} {
+ #
+ # Randomly delete a subdatabase
+ set sindex [berkdb random_int 0 [expr $i - 1]]
+ set subdb [lindex $slist $sindex]
+ #
+ # Delete the one we did from the list
+ set slist [lreplace $slist $sindex $sindex]
+ error_check_good file_exists_before [file exists $tfpath] 1
+ error_check_good db_remove [eval {berkdb dbremove} $envargs \
+ {$testfile $subdb}] 0
+ }
+}
+
diff --git a/libdb/test/sdb012.tcl b/libdb/test/sdb012.tcl
new file mode 100644
index 0000000..9bcdf80
--- /dev/null
+++ b/libdb/test/sdb012.tcl
@@ -0,0 +1,428 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST subdb012
+# TEST Test subdbs with locking and transactions
+# TEST Tests creating and removing subdbs while handles
+# TEST are open works correctly, and in the face of txns.
+#
+proc subdb012 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb012: skipping for method $method"
+ return
+ }
+
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb012 skipping for env $env"
+ return
+ }
+ set encargs ""
+ set largs [split_encargs $args encargs]
+
+ puts "Subdb012: $method ($largs $encargs) subdb txn/locking tests"
+
+ #
+ # sdb012_body takes a txn list containing 4 elements.
+ # {txn command for first subdb
+ # txn command for second subdb
+ # txn command for first subdb removal
+ # txn command for second subdb removal}
+ #
+ # The allowed commands are 'none' 'one', 'auto', 'abort', 'commit'.
+ # 'none' is a special case meaning run without a txn. In the
+ # case where all 4 items are 'none', we run in a lock-only env.
+ # 'one' is a special case meaning we create the subdbs together
+ # in one single transaction. It is indicated as the value for t1,
+ # and the value in t2 indicates if that single txn should be
+ # aborted or committed. It is not used and has no meaning
+ # in the removal case. 'auto' means use the -auto_commit flag
+ # to the operation, and 'abort' and 'commit' do the obvious.
+ #
+ # First test locking w/o txns. If any in tlist are 'none',
+ # all must be none.
+ #
+ # Now run through the txn-based operations
+ set count 0
+ set sdb "Subdb012."
+ set teststr "abcdefghijklmnopqrstuvwxyz"
+ set testlet [split $teststr {}]
+ foreach t1 { none one abort auto commit } {
+ foreach t2 { none abort auto commit } {
+ if { $t1 == "one" } {
+ if { $t2 == "none" || $t2 == "auto"} {
+ continue
+ }
+ }
+ set tlet [lindex $testlet $count]
+ foreach r1 { none abort auto commit } {
+ foreach r2 { none abort auto commit } {
+ set tlist [list $t1 $t2 $r1 $r2]
+ sdb012_body $testdir $omethod $largs \
+ $encargs $sdb$tlet $tlist
+ }
+ }
+ incr count
+ }
+ }
+
+}
+
+proc s012 { method args } {
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+
+ set encargs ""
+ set largs ""
+
+ puts "Subdb012: $method ($largs $encargs) subdb txn/locking tests"
+
+ set sdb "Subdb012."
+ set tlet X
+ set tlist $args
+ error_check_good tlist [llength $tlist] 4
+ sdb012_body $testdir $omethod $largs $encargs $sdb$tlet $tlist
+}
+
+#
+# This proc checks the tlist values and returns the flags
+# that should be used when opening the env. If we are running
+# with no txns, then just -lock, otherwise -txn.
+#
+proc sdb012_subsys { tlist } {
+ set t1 [lindex $tlist 0]
+ #
+ # If we have no txns, all elements of the list should be none.
+ # In that case we only run with locking turned on.
+ # Otherwise, we use the full txn subsystems.
+ #
+ set allnone {none none none none}
+ if { $allnone == $tlist } {
+ set subsys "-lock"
+ } else {
+ set subsys "-txn"
+ }
+ return $subsys
+}
+
+#
+# This proc parses the tlist and returns a list of 4 items that
+# should be used in operations. I.e. it will begin the txns as
+# needed, or return a -auto_commit flag, etc.
+#
+proc sdb012_tflags { env tlist } {
+ set ret ""
+ set t1 ""
+ foreach t $tlist {
+ switch $t {
+ one {
+ set t1 [$env txn]
+ error_check_good txnbegin [is_valid_txn $t1 $env] TRUE
+ lappend ret "-txn $t1"
+ lappend ret "-txn $t1"
+ }
+ auto {
+ lappend ret "-auto_commit"
+ }
+ abort -
+ commit {
+ #
+ # If the previous command was a "one", skip over
+ # this commit/abort. Otherwise start a new txn
+ # for the removal case.
+ #
+ if { $t1 == "" } {
+ set txn [$env txn]
+ error_check_good txnbegin [is_valid_txn $txn \
+ $env] TRUE
+ lappend ret "-txn $txn"
+ } else {
+ set t1 ""
+ }
+ }
+ none {
+ lappend ret ""
+ }
+ default {
+ error "Txn command $t not implemented"
+ }
+ }
+ }
+ return $ret
+}
+
+#
+# This proc parses the tlist and returns a list of 4 items that
+# should be used in the txn conclusion operations. I.e. it will
+# give "" if using auto_commit (i.e. no final txn op), or a single
+# abort/commit if both subdb's are in one txn.
+#
+proc sdb012_top { tflags tlist } {
+ set ret ""
+ set t1 ""
+ #
+ # We know both lists have 4 items. Iterate over them
+ # using multiple value lists so we know which txn goes
+ # with each op.
+ #
+ # The tflags list is needed to extract the txn command
+ # out for the operation. The tlist list is needed to
+ # determine what operation we are doing.
+ #
+ foreach t $tlist tf $tflags {
+ switch $t {
+ one {
+ set t1 [lindex $tf 1]
+ }
+ auto {
+ lappend ret "sdb012_nop"
+ }
+ abort -
+ commit {
+ #
+ # If the previous command was a "one" (i.e. t1
+ # is set), append a correct command and then
+ # an empty one.
+ #
+ if { $t1 == "" } {
+ set txn [lindex $tf 1]
+ set top "$txn $t"
+ lappend ret $top
+ } else {
+ set top "$t1 $t"
+ lappend ret "sdb012_nop"
+ lappend ret $top
+ set t1 ""
+ }
+ }
+ none {
+ lappend ret "sdb012_nop"
+ }
+ }
+ }
+ return $ret
+}
+
+proc sdb012_nop { } {
+ return 0
+}
+
+proc sdb012_isabort { tlist item } {
+ set i [lindex $tlist $item]
+ if { $i == "one" } {
+ set i [lindex $tlist [expr $item + 1]]
+ }
+ if { $i == "abort" } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc sdb012_body { testdir omethod largs encargs msg tlist } {
+
+ puts "\t$msg: $tlist"
+ set testfile subdb012.db
+ set subdb1 sub1
+ set subdb2 sub2
+
+ set subsys [sdb012_subsys $tlist]
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create -home} $testdir $subsys $encargs]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ error_check_good test_lock [$env test abort subdb_lock] 0
+
+ #
+ # Convert from our tlist txn commands into real flags we
+ # will pass to commands. Use the multiple values feature
+ # of foreach to do this efficiently.
+ #
+ set tflags [sdb012_tflags $env $tlist]
+ foreach {txn1 txn2 rem1 rem2} $tflags {break}
+ foreach {top1 top2 rop1 rop2} [sdb012_top $tflags $tlist] {break}
+
+# puts "txn1 $txn1, txn2 $txn2, rem1 $rem1, rem2 $rem2"
+# puts "top1 $top1, top2 $top2, rop1 $rop1, rop2 $rop2"
+ puts "\t$msg.0: Create sub databases in env with $subsys"
+ set s1 [eval {berkdb_open -env $env -create -mode 0644} \
+ $largs $txn1 {$omethod $testfile $subdb1}]
+ error_check_good dbopen [is_valid_db $s1] TRUE
+
+ set ret [eval $top1]
+ error_check_good t1_end $ret 0
+
+ set s2 [eval {berkdb_open -env $env -create -mode 0644} \
+ $largs $txn2 {$omethod $testfile $subdb2}]
+ error_check_good dbopen [is_valid_db $s2] TRUE
+
+ puts "\t$msg.1: Subdbs are open; resolve txns if necessary"
+ set ret [eval $top2]
+ error_check_good t2_end $ret 0
+
+ set t1_isabort [sdb012_isabort $tlist 0]
+ set t2_isabort [sdb012_isabort $tlist 1]
+ set r1_isabort [sdb012_isabort $tlist 2]
+ set r2_isabort [sdb012_isabort $tlist 3]
+
+# puts "t1_isabort $t1_isabort, t2_isabort $t2_isabort, r1_isabort $r1_isabort, r2_isabort $r2_isabort"
+
+ puts "\t$msg.2: Subdbs are open; verify removal failures"
+ # Verify removes of subdbs with open subdb's fail
+ #
+ # We should fail no matter what. If we aborted, then the
+ # subdb should not exist. If we didn't abort, we should fail
+ # with DB_LOCK_NOTGRANTED.
+ #
+ # XXX - Do we need -auto_commit for all these failing ones?
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ]
+ error_check_bad dbremove1_open $r 0
+ if { $t1_isabort } {
+ error_check_good dbremove1_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove1_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb2} result ]
+ error_check_bad dbremove2_open $r 0
+ if { $t2_isabort } {
+ error_check_good dbremove2_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove2_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ # Verify file remove fails
+ set r [catch {berkdb dbremove -env $env $testfile} result]
+ error_check_bad dbremovef_open $r 0
+
+ #
+ # If both aborted, there should be no file??
+ #
+ if { $t1_isabort && $t2_isabort } {
+ error_check_good dbremovef_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremovef_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ puts "\t$msg.3: Close subdb2; verify removals"
+ error_check_good close_s2 [$s2 close] 0
+ set r [ catch {eval {berkdb dbremove -env} \
+ $env $rem2 $testfile $subdb2} result ]
+ if { $t2_isabort } {
+ error_check_bad dbrem2_ab $r 0
+ error_check_good dbrem2_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbrem2 $result 0
+ }
+ # Resolve subdb2 removal txn
+ set r [eval $rop2]
+ error_check_good rop2 $r 0
+
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ]
+ error_check_bad dbremove1.2_open $r 0
+ if { $t1_isabort } {
+ error_check_good dbremove1.2_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove1.2_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ # There are three cases here:
+ # 1. if both t1 and t2 aborted, the file shouldn't exist
+ # 2. if only t1 aborted, the file still exists and nothing is open
+ # 3. if neither aborted a remove should fail because the first
+ # subdb is still open
+ # In case 2, don't try the remove, because it should succeed
+ # and we won't be able to test anything else.
+ if { !$t1_isabort || $t2_isabort } {
+ set r [catch {berkdb dbremove -env $env $testfile} result]
+ if { $t1_isabort && $t2_isabort } {
+ error_check_bad dbremovef.2_open $r 0
+ error_check_good dbremove.2_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_bad dbremovef.2_open $r 0
+ error_check_good dbremove.2_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+ }
+
+ puts "\t$msg.4: Close subdb1; verify removals"
+ error_check_good close_s1 [$s1 close] 0
+ set r [ catch {eval {berkdb dbremove -env} \
+ $env $rem1 $testfile $subdb1} result ]
+ if { $t1_isabort } {
+ error_check_bad dbremove1_ab $r 0
+ error_check_good dbremove1_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove1 $result 0
+ }
+ # Resolve subdb1 removal txn
+ set r [eval $rop1]
+ error_check_good rop1 $r 0
+
+
+ # Verify removal of subdb2. All DB handles are closed now.
+ # So we have two scenarios:
+ # 1. The removal of subdb2 above was successful and subdb2
+ # doesn't exist and we should fail that way.
+ # 2. The removal of subdb2 above was aborted, and this
+ # removal should succeed.
+ #
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb2} result ]
+ if { $r2_isabort && !$t2_isabort } {
+ error_check_good dbremove2.1_ab $result 0
+ } else {
+ error_check_bad dbremove2.1 $r 0
+ error_check_good dbremove2.1 [is_substr \
+ $result "no such file"] 1
+ }
+
+ # Verify removal of subdb1. All DB handles are closed now.
+ # So we have two scenarios:
+ # 1. The removal of subdb1 above was successful and subdb1
+ # doesn't exist and we should fail that way.
+ # 2. The removal of subdb1 above was aborted, and this
+ # removal should succeed.
+ #
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ]
+ if { $r1_isabort && !$t1_isabort } {
+ error_check_good dbremove1.1 $result 0
+ } else {
+ error_check_bad dbremove_open $r 0
+ error_check_good dbremove.1 [is_substr \
+ $result "no such file"] 1
+ }
+
+ puts "\t$msg.5: All closed; remove file"
+ set r [catch {berkdb dbremove -env $env $testfile} result]
+ if { $t1_isabort && $t2_isabort } {
+ error_check_bad dbremove_final_ab $r 0
+ error_check_good dbremove_file_abstr [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove_final $r 0
+ }
+ error_check_good envclose [$env close] 0
+}
diff --git a/libdb/test/sdbscript.tcl b/libdb/test/sdbscript.tcl
new file mode 100644
index 0000000..875153b
--- /dev/null
+++ b/libdb/test/sdbscript.tcl
@@ -0,0 +1,47 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Usage: subdbscript testfile subdbnumber factor
+# testfile: name of DB itself
+# subdbnumber: n, subdb indicator, of form sub$n.db
+# factor: Delete over factor'th + n'th from my subdb.
+#
+# I.e. if factor is 10, and n is 0, remove entries, 0, 10, 20, ...
+# if factor is 10 and n is 1, remove entries 1, 11, 21, ...
+source ./include.tcl
+source $test_path/test.tcl
+
+set usage "subdbscript testfile subdbnumber factor"
+
+# Verify usage
+if { $argc != 3 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set testfile [lindex $argv 0]
+set n [ lindex $argv 1 ]
+set factor [ lindex $argv 2 ]
+
+set db [berkdb_open -unknown $testfile sub$n.db]
+error_check_good db_open [is_valid_db $db] TRUE
+
+set dbc [$db cursor]
+error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+set i 1
+for {set d [$dbc get -first]} {[llength $d] != 0} {set d [$dbc get -next]} {
+ set x [expr $i - $n]
+ if { $x >= 0 && [expr $x % $factor] == 0 } {
+ puts "Deleting $d"
+ error_check_good dbc_del [$dbc del] 0
+ }
+ incr i
+}
+error_check_good db_close [$db close] 0
+
+exit
diff --git a/libdb/test/sdbtest001.tcl b/libdb/test/sdbtest001.tcl
new file mode 100644
index 0000000..c939f96
--- /dev/null
+++ b/libdb/test/sdbtest001.tcl
@@ -0,0 +1,150 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST sdbtest001
+# TEST Tests multiple access methods in one subdb
+# TEST Open several subdbs, each with a different access method
+# TEST Small keys, small data
+# TEST Put/get per key per subdb
+# TEST Dump file, verify per subdb
+# TEST Close, reopen per subdb
+# TEST Dump file, verify per subdb
+# TEST
+# TEST Make several subdb's of different access methods all in one DB.
+# TEST Rotate methods and repeat [#762].
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc sdbtest001 { {nentries 10000} } {
+ source ./include.tcl
+
+ puts "Subdbtest001: many different subdb access methods in one"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/subdbtest001.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ set txn ""
+ set count 0
+
+ # Set up various methods to rotate through
+ lappend method_list [list "-rrecno" "-rbtree" "-hash" "-recno" "-btree"]
+ lappend method_list [list "-recno" "-hash" "-btree" "-rbtree" "-rrecno"]
+ lappend method_list [list "-btree" "-recno" "-rbtree" "-rrecno" "-hash"]
+ lappend method_list [list "-hash" "-recno" "-rbtree" "-rrecno" "-btree"]
+ lappend method_list [list "-rbtree" "-hash" "-btree" "-rrecno" "-recno"]
+ lappend method_list [list "-rrecno" "-recno"]
+ lappend method_list [list "-recno" "-rrecno"]
+ lappend method_list [list "-hash" "-dhash"]
+ lappend method_list [list "-dhash" "-hash"]
+ lappend method_list [list "-rbtree" "-btree" "-dbtree" "-ddbtree"]
+ lappend method_list [list "-btree" "-rbtree" "-ddbtree" "-dbtree"]
+ lappend method_list [list "-dbtree" "-ddbtree" "-btree" "-rbtree"]
+ lappend method_list [list "-ddbtree" "-dbtree" "-rbtree" "-btree"]
+ set plist [list 512 8192 1024 4096 2048 16384]
+ set mlen [llength $method_list]
+ set plen [llength $plist]
+ while { $plen < $mlen } {
+ set plist [concat $plist $plist]
+ set plen [llength $plist]
+ }
+ set pgsz 0
+ foreach methods $method_list {
+ cleanup $testdir NULL
+ puts "\tSubdbtest001.a: create subdbs of different access methods:"
+ puts "\tSubdbtest001.a: $methods"
+ set nsubdbs [llength $methods]
+ set duplist ""
+ for { set i 0 } { $i < $nsubdbs } { incr i } {
+ lappend duplist -1
+ }
+ set psize [lindex $plist $pgsz]
+ incr pgsz
+ set newent [expr $nentries / $nsubdbs]
+ build_all_subdb $testfile $methods $psize $duplist $newent
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
+
+ set method [lindex $methods $subdb]
+ set method [convert_method $method]
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdbtest001_recno.check
+ } else {
+ set checkfunc subdbtest001.check
+ }
+
+ puts "\tSubdbtest001.b: dump file sub$subdb.db"
+ set db [berkdb_open -unknown $testfile sub$subdb.db]
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the
+ # dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $newent} {incr i} {
+ puts $oid [expr $subdb * $newent + $i]
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ # filehead uses 1-based line numbers
+ set beg [expr $subdb * $newent]
+ incr beg
+ set end [expr $beg + $newent - 1]
+ filehead $end $dict $t3 $beg
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest001:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tSubdbtest001.c: sub$subdb.db: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
+ dump_file_direction "-first" "-next" sub$subdb.db
+ if { [string compare $method "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest001:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tSubdbtest001.d: sub$subdb.db: close, open, and dump file in reverse direction"
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" sub$subdb.db
+
+ if { [string compare $method "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest001:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ }
+ }
+}
+
+# Check function for Subdbtest001; keys and data are identical
+proc subdbtest001.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdbtest001_recno.check { key data } {
+global dict
+global kvals
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/libdb/test/sdbtest002.tcl b/libdb/test/sdbtest002.tcl
new file mode 100644
index 0000000..74a1ae5
--- /dev/null
+++ b/libdb/test/sdbtest002.tcl
@@ -0,0 +1,174 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST sdbtest002
+# TEST Tests multiple access methods in one subdb access by multiple
+# TEST processes.
+# TEST Open several subdbs, each with a different access method
+# TEST Small keys, small data
+# TEST Put/get per key per subdb
+# TEST Fork off several child procs to each delete selected
+# TEST data from their subdb and then exit
+# TEST Dump file, verify contents of each subdb is correct
+# TEST Close, reopen per subdb
+# TEST Dump file, verify per subdb
+# TEST
+# TEST Make several subdb's of different access methods all in one DB.
+# TEST Fork of some child procs to each manipulate one subdb and when
+# TEST they are finished, verify the contents of the databases.
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc sdbtest002 { {nentries 10000} } {
+ source ./include.tcl
+
+ puts "Subdbtest002: many different subdb access methods in one"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/subdbtest002.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ set txn ""
+ set count 0
+
+ # Set up various methods to rotate through
+ set methods \
+ [list "-rbtree" "-recno" "-btree" "-btree" "-recno" "-rbtree"]
+ cleanup $testdir NULL
+ puts "\tSubdbtest002.a: create subdbs of different access methods:"
+ puts "\t\t$methods"
+ set psize 4096
+ set nsubdbs [llength $methods]
+ set duplist ""
+ for { set i 0 } { $i < $nsubdbs } { incr i } {
+ lappend duplist -1
+ }
+ set newent [expr $nentries / $nsubdbs]
+
+ #
+ # XXX We need dict sorted to figure out what was deleted
+ # since things are stored sorted in the btree.
+ #
+ filesort $dict $t4
+ set dictorig $dict
+ set dict $t4
+
+ build_all_subdb $testfile $methods $psize $duplist $newent
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ set pidlist ""
+ puts "\tSubdbtest002.b: create $nsubdbs procs to delete some keys"
+ for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
+ puts "$tclsh_path\
+ $test_path/sdbscript.tcl $testfile \
+ $subdb $nsubdbs >& $testdir/subdb002.log.$subdb"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ sdbscript.tcl \
+ $testdir/subdb002.log.$subdb $testfile $subdb $nsubdbs &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
+ set method [lindex $methods $subdb]
+ set method [convert_method $method]
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdbtest002_recno.check
+ } else {
+ set checkfunc subdbtest002.check
+ }
+
+ puts "\tSubdbtest002.b: dump file sub$subdb.db"
+ set db [berkdb_open -unknown $testfile sub$subdb.db]
+ error_check_good db_open [is_valid_db $db] TRUE
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+ #
+ # This is just so that t2 is there and empty
+ # since we are only appending below.
+ #
+ exec > $t2
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $newent} {incr i} {
+ set x [expr $i - $subdb]
+ if { [expr $x % $nsubdbs] != 0 } {
+ puts $oid [expr $subdb * $newent + $i]
+ }
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set oid [open $t4 r]
+ for {set i 1} {[gets $oid line] >= 0} {incr i} {
+ set farr($i) $line
+ }
+ close $oid
+
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $newent} {incr i} {
+ # Sed uses 1-based line numbers
+ set x [expr $i - $subdb]
+ if { [expr $x % $nsubdbs] != 0 } {
+ set beg [expr $subdb * $newent]
+ set beg [expr $beg + $i]
+ puts $oid $farr($beg)
+ }
+ }
+ close $oid
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tSubdbtest002.c: sub$subdb.db: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
+ dump_file_direction "-first" "-next" sub$subdb.db
+ if { [string compare $method "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest002:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tSubdbtest002.d: sub$subdb.db: close, open, and dump file in reverse direction"
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" sub$subdb.db
+
+ if { [string compare $method "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ }
+ set dict $dictorig
+ return
+}
+
+# Check function for Subdbtest002; keys and data are identical
+proc subdbtest002.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdbtest002_recno.check { key data } {
+global dict
+global kvals
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/libdb/test/sdbutils.tcl b/libdb/test/sdbutils.tcl
new file mode 100644
index 0000000..07c5852
--- /dev/null
+++ b/libdb/test/sdbutils.tcl
@@ -0,0 +1,197 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+proc build_all_subdb { dbname methods psize dups {nentries 100} {dbargs ""}} {
+ set nsubdbs [llength $dups]
+ set mlen [llength $methods]
+ set savearg $dbargs
+ for {set i 0} {$i < $nsubdbs} { incr i } {
+ set m [lindex $methods [expr $i % $mlen]]
+ set dbargs $savearg
+ subdb_build $dbname $nentries [lindex $dups $i] \
+ $i $m $psize sub$i.db $dbargs
+ }
+}
+
+proc subdb_build { name nkeys ndups dup_interval method psize subdb dbargs} {
+ source ./include.tcl
+
+ set dbargs [convert_args $method $dbargs]
+ set omethod [convert_method $method]
+
+ puts "Method: $method"
+
+ set txnenv 0
+ set eindex [lsearch -exact $dbargs "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set txnenv [is_txnenv $env]
+ }
+ # Create the database and open the dictionary
+ set oflags "-create -mode 0644 $omethod \
+ -pagesize $psize $dbargs $name $subdb"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ set count 0
+ if { $ndups >= 0 } {
+ puts "\tBuilding $method $name $subdb. \
+ $nkeys keys with $ndups duplicates at interval of $dup_interval"
+ }
+ if { $ndups < 0 } {
+ puts "\tBuilding $method $name $subdb. \
+ $nkeys unique keys of pagesize $psize"
+ #
+ # If ndups is < 0, we want unique keys in each subdb,
+ # so skip ahead in the dict by nkeys * iteration
+ #
+ for { set count 0 } \
+ { $count < [expr $nkeys * $dup_interval] } {
+ incr count} {
+ set ret [gets $did str]
+ if { $ret == -1 } {
+ break
+ }
+ }
+ }
+ set txn ""
+ for { set count 0 } { [gets $did str] != -1 && $count < $nkeys } {
+ incr count} {
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set data [format "%04d" [expr $i * $dup_interval]]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$str \
+ [chop_data $method $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { $ndups == 0 } {
+ set ret [eval {$db put} $txn {$str \
+ [chop_data $method NODUP]}]
+ error_check_good put $ret 0
+ } elseif { $ndups < 0 } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set num [expr $nkeys * $dup_interval]
+ set num [expr $num + $count + 1]
+ set ret [eval {$db put} $txn {$num \
+ [chop_data $method $str]}]
+ set kvals($num) [pad_data $method $str]
+ error_check_good put $ret 0
+ } else {
+ set ret [eval {$db put} $txn \
+ {$str [chop_data $method $str]}]
+ error_check_good put $ret 0
+ }
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ close $did
+ error_check_good close:$name [$db close] 0
+}
+
+proc do_join_subdb { db primary subdbs key oargs } {
+ source ./include.tcl
+
+ puts "\tJoining: $subdbs on $key"
+
+ # Open all the databases
+ set p [eval {berkdb_open -unknown} $oargs $db $primary]
+ error_check_good "primary open" [is_valid_db $p] TRUE
+
+ set dblist ""
+ set curslist ""
+
+ foreach i $subdbs {
+ set jdb [eval {berkdb_open -unknown} $oargs $db sub$i.db]
+ error_check_good "sub$i.db open" [is_valid_db $jdb] TRUE
+
+ lappend jlist [list $jdb $key]
+ lappend dblist $jdb
+
+ }
+
+ set join_res [eval {$p get_join} $jlist]
+ set ndups [llength $join_res]
+
+ # Calculate how many dups we expect.
+ # We go through the list of indices. If we find a 0, then we
+ # expect 0 dups. For everything else, we look at pairs of numbers,
+ # if the are relatively prime, multiply them and figure out how
+ # many times that goes into 50. If they aren't relatively prime,
+ # take the number of times the larger goes into 50.
+ set expected 50
+ set last 1
+ foreach n $subdbs {
+ if { $n == 0 } {
+ set expected 0
+ break
+ }
+ if { $last == $n } {
+ continue
+ }
+
+ if { [expr $last % $n] == 0 || [expr $n % $last] == 0 } {
+ if { $n > $last } {
+ set last $n
+ set expected [expr 50 / $last]
+ }
+ } else {
+ set last [expr $n * $last / [gcd $n $last]]
+ set expected [expr 50 / $last]
+ }
+ }
+
+ error_check_good number_of_dups:$subdbs $ndups $expected
+
+ #
+ # If we get here, we have the number expected, now loop
+ # through each and see if it is what we expected.
+ #
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set pair [lindex $join_res $i]
+ set k [lindex $pair 0]
+ foreach j $subdbs {
+ error_check_bad valid_dup:$j:$subdbs $j 0
+ set kval [string trimleft $k 0]
+ if { [string length $kval] == 0 } {
+ set kval 0
+ }
+ error_check_good \
+ valid_dup:$j:$subdbs [expr $kval % $j] 0
+ }
+ }
+
+ error_check_good close_primary [$p close] 0
+ foreach i $dblist {
+ error_check_good close_index:$i [$i close] 0
+ }
+}
+
+proc n_to_subname { n } {
+ if { $n == 0 } {
+ return null.db;
+ } else {
+ return sub$n.db;
+ }
+}
diff --git a/libdb/test/sec001.tcl b/libdb/test/sec001.tcl
new file mode 100644
index 0000000..7f14979
--- /dev/null
+++ b/libdb/test/sec001.tcl
@@ -0,0 +1,205 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2001
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST sec001
+# TEST Test of security interface
+proc sec001 { } {
+ global errorInfo
+ global errorCode
+
+ source ./include.tcl
+
+ set testfile1 env1.db
+ set testfile2 $testdir/env2.db
+ set subdb1 sub1
+ set subdb2 sub2
+
+ puts "Sec001: Test of basic encryption interface."
+ env_cleanup $testdir
+
+ set passwd1 "passwd1"
+ set passwd1_bad "passwd1_bad"
+ set passwd2 "passwd2"
+ set key "key"
+ set data "data"
+
+ #
+ # This first group tests bad create scenarios and also
+ # tests attempting to use encryption after creating a
+ # non-encrypted env/db to begin with.
+ #
+ set nopass ""
+ puts "\tSec001.a.1: Create db with encryption."
+ set db [berkdb_open -create -encryptaes $passwd1 -btree $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.a.2: Open db without encryption."
+ set stat [catch {berkdb_open_noerr $testfile2} ret]
+ error_check_good db:nocrypto $stat 1
+ error_check_good db:fail [is_substr $ret "no encryption key"] 1
+
+ set ret [berkdb dbremove -encryptaes $passwd1 $testfile2]
+
+ puts "\tSec001.b.1: Create db without encryption or checksum."
+ set db [berkdb_open -create -btree $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.b.2: Open db with encryption."
+ set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile2} ret]
+ error_check_good db:nocrypto $stat 1
+ error_check_good db:fail [is_substr $ret "supplied encryption key"] 1
+
+ set ret [berkdb dbremove $testfile2]
+
+ puts "\tSec001.c.1: Create db with checksum."
+ set db [berkdb_open -create -chksum -btree $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.c.2: Open db with encryption."
+ set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile2} ret]
+ error_check_good db:nocrypto $stat 1
+ error_check_good db:fail [is_substr $ret "supplied encryption key"] 1
+
+ set ret [berkdb dbremove $testfile2]
+
+ puts "\tSec001.d.1: Create subdb with encryption."
+ set db [berkdb_open -create -encryptaes $passwd1 -btree \
+ $testfile2 $subdb1]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.d.2: Create 2nd subdb without encryption."
+ set stat [catch {berkdb_open_noerr -create -btree \
+ $testfile2 $subdb2} ret]
+ error_check_good subdb:nocrypto $stat 1
+ error_check_good subdb:fail [is_substr $ret "no encryption key"] 1
+
+ set ret [berkdb dbremove -encryptaes $passwd1 $testfile2]
+
+ puts "\tSec001.e.1: Create subdb without encryption or checksum."
+ set db [berkdb_open -create -btree $testfile2 $subdb1]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.e.2: Create 2nd subdb with encryption."
+ set stat [catch {berkdb_open_noerr -create -btree -encryptaes $passwd1 \
+ $testfile2 $subdb2} ret]
+ error_check_good subdb:nocrypto $stat 1
+ error_check_good subdb:fail [is_substr $ret "supplied encryption key"] 1
+
+ env_cleanup $testdir
+
+ puts "\tSec001.f.1: Open env with encryption, empty passwd."
+ set stat [catch {berkdb_env_noerr -create -home $testdir \
+ -encryptaes $nopass} ret]
+ error_check_good env:nopass $stat 1
+ error_check_good env:fail [is_substr $ret "Empty password"] 1
+
+ puts "\tSec001.f.2: Create without encryption algorithm (DB_ENCRYPT_ANY)."
+ set stat [catch {berkdb_env_noerr -create -home $testdir \
+ -encryptany $passwd1} ret]
+ error_check_good env:any $stat 1
+ error_check_good env:fail [is_substr $ret "algorithm not supplied"] 1
+
+ puts "\tSec001.f.3: Create without encryption."
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tSec001.f.4: Open again with encryption."
+ set stat [catch {berkdb_env_noerr -home $testdir \
+ -encryptaes $passwd1} ret]
+ error_check_good env:unencrypted $stat 1
+ error_check_good env:fail [is_substr $ret \
+ "Joining non-encrypted environment"] 1
+
+ error_check_good envclose [$env close] 0
+
+ env_cleanup $testdir
+
+ #
+ # This second group tests creating and opening a secure env.
+ # We test that others can join successfully, and that other's with
+ # bad/no passwords cannot. Also test that we cannot use the
+ # db->set_encrypt method when we've already got a secure dbenv.
+ #
+ puts "\tSec001.g.1: Open with encryption."
+ set env [berkdb_env_noerr -create -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tSec001.g.2: Open again with encryption - same passwd."
+ set env1 [berkdb_env -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env1] TRUE
+ error_check_good envclose [$env1 close] 0
+
+ puts "\tSec001.g.3: Open again with any encryption (DB_ENCRYPT_ANY)."
+ set env1 [berkdb_env -home $testdir -encryptany $passwd1]
+ error_check_good env [is_valid_env $env1] TRUE
+ error_check_good envclose [$env1 close] 0
+
+ puts "\tSec001.g.4: Open with encryption - different length passwd."
+ set stat [catch {berkdb_env_noerr -home $testdir \
+ -encryptaes $passwd1_bad} ret]
+ error_check_good env:$passwd1_bad $stat 1
+ error_check_good env:fail [is_substr $ret "Invalid password"] 1
+
+ puts "\tSec001.g.5: Open with encryption - different passwd."
+ set stat [catch {berkdb_env_noerr -home $testdir \
+ -encryptaes $passwd2} ret]
+ error_check_good env:$passwd2 $stat 1
+ error_check_good env:fail [is_substr $ret "Invalid password"] 1
+
+ puts "\tSec001.g.6: Open env without encryption."
+ set stat [catch {berkdb_env_noerr -home $testdir} ret]
+ error_check_good env:$passwd2 $stat 1
+ error_check_good env:fail [is_substr $ret "Encrypted environment"] 1
+
+ puts "\tSec001.g.7: Open database with encryption in env"
+ set stat [catch {berkdb_open_noerr -env $env -btree -create \
+ -encryptaes $passwd2 $testfile1} ret]
+ error_check_good db:$passwd2 $stat 1
+ error_check_good env:fail [is_substr $ret "method not permitted"] 1
+
+ puts "\tSec001.g.8: Close creating env"
+ error_check_good envclose [$env close] 0
+
+ #
+ # This third group tests opening the env after the original env
+ # handle is closed. Just to make sure we can reopen it in
+ # the right fashion even if no handles are currently open.
+ #
+ puts "\tSec001.h.1: Reopen without encryption."
+ set stat [catch {berkdb_env_noerr -home $testdir} ret]
+ error_check_good env:noencrypt $stat 1
+ error_check_good env:fail [is_substr $ret "Encrypted environment"] 1
+
+ puts "\tSec001.h.2: Reopen with bad passwd."
+ set stat [catch {berkdb_env_noerr -home $testdir -encryptaes \
+ $passwd1_bad} ret]
+ error_check_good env:$passwd1_bad $stat 1
+ error_check_good env:fail [is_substr $ret "Invalid password"] 1
+
+ puts "\tSec001.h.3: Reopen with encryption."
+ set env [berkdb_env -create -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tSec001.h.4: 2nd Reopen with encryption."
+ set env1 [berkdb_env -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env1] TRUE
+
+ error_check_good envclose [$env1 close] 0
+ error_check_good envclose [$env close] 0
+
+ puts "\tSec001 complete."
+}
diff --git a/libdb/test/sec002.tcl b/libdb/test/sec002.tcl
new file mode 100644
index 0000000..2457657
--- /dev/null
+++ b/libdb/test/sec002.tcl
@@ -0,0 +1,143 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2001
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST sec002
+# TEST Test of security interface and catching errors in the
+# TEST face of attackers overwriting parts of existing files.
+proc sec002 { } {
+ global errorInfo
+ global errorCode
+
+ source ./include.tcl
+
+ set testfile1 $testdir/sec002-1.db
+ set testfile2 $testdir/sec002-2.db
+ set testfile3 $testdir/sec002-3.db
+ set testfile4 $testdir/sec002-4.db
+
+ puts "Sec002: Test of basic encryption interface."
+ env_cleanup $testdir
+
+ set passwd1 "passwd1"
+ set passwd2 "passwd2"
+ set key "key"
+ set data "data"
+ set pagesize 1024
+
+ #
+ # Set up 4 databases, two encrypted, but with different passwords
+ # and one unencrypt, but with checksumming turned on and one
+ # unencrypted and no checksumming. Place the exact same data
+ # in each one.
+ #
+ puts "\tSec002.a: Setup databases"
+ set db_cmd "-create -pagesize $pagesize -btree "
+ set db [eval {berkdb_open} -encryptaes $passwd1 $db_cmd $testfile1]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} -encryptaes $passwd2 $db_cmd $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} -chksum $db_cmd $testfile3]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} $db_cmd $testfile4]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ #
+ # First just touch some bits in the file. We know that in btree
+ # meta pages, bytes 92-459 are unused. Scribble on them in both
+ # an encrypted, and both unencrypted files. We should get
+ # a checksum error for the encrypted, and checksummed files.
+ # We should get no error for the normal file.
+ #
+ set fidlist {}
+ set fid [open $testfile1 r+]
+ lappend fidlist $fid
+ set fid [open $testfile3 r+]
+ lappend fidlist $fid
+ set fid [open $testfile4 r+]
+ lappend fidlist $fid
+
+ puts "\tSec002.b: Overwrite unused space in meta-page"
+ foreach f $fidlist {
+ fconfigure $f -translation binary
+ seek $f 100 start
+ set byte [read $f 1]
+ binary scan $byte c val
+ set newval [expr ~$val]
+ set newbyte [binary format c $newval]
+ seek $f 100 start
+ puts -nonewline $f $newbyte
+ close $f
+ }
+ puts "\tSec002.c: Reopen modified databases"
+ set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile1} ret]
+ error_check_good db:$testfile1 $stat 1
+ error_check_good db:$testfile1:fail \
+ [is_substr $ret "metadata page checksum error"] 1
+
+ set stat [catch {berkdb_open_noerr -chksum $testfile3} ret]
+ error_check_good db:$testfile3 $stat 1
+ error_check_good db:$testfile3:fail \
+ [is_substr $ret "metadata page checksum error"] 1
+
+ set stat [catch {berkdb_open_noerr $testfile4} db]
+ error_check_good db:$testfile4 $stat 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec002.d: Replace root page in encrypted w/ encrypted"
+ set fid1 [open $testfile1 r+]
+ set fid2 [open $testfile2 r+]
+ seek $fid1 $pagesize start
+ seek $fid2 $pagesize start
+ set root1 [read $fid1 $pagesize]
+ close $fid1
+ puts -nonewline $fid2 $root1
+ close $fid2
+
+ set db [berkdb_open_noerr -encryptaes $passwd2 $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ set stat [catch {$db get $key} ret]
+ error_check_good dbget $stat 1
+ error_check_good db:$testfile2:fail \
+ [is_substr $ret "checksum error: catastrophic recovery required"] 1
+ set stat [catch {$db close} ret]
+ error_check_good dbclose $stat 1
+ error_check_good db:$testfile2:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ puts "\tSec002.e: Replace root page in encrypted w/ unencrypted"
+ set fid2 [open $testfile2 r+]
+ set fid4 [open $testfile4 r+]
+ seek $fid2 $pagesize start
+ seek $fid4 $pagesize start
+ set root4 [read $fid4 $pagesize]
+ close $fid4
+ puts -nonewline $fid2 $root4
+ close $fid2
+
+ set db [berkdb_open_noerr -encryptaes $passwd2 $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ set stat [catch {$db get $key} ret]
+ error_check_good dbget $stat 1
+ error_check_good db:$testfile2:fail \
+ [is_substr $ret "checksum error: catastrophic recovery required"] 1
+ set stat [catch {$db close} ret]
+ error_check_good dbclose $stat 1
+ error_check_good db:$testfile2:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ cleanup $testdir NULL 1
+ puts "\tSec002 complete."
+}
diff --git a/libdb/test/shelltest.tcl b/libdb/test/shelltest.tcl
new file mode 100644
index 0000000..dadfaf0
--- /dev/null
+++ b/libdb/test/shelltest.tcl
@@ -0,0 +1,88 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST scr###
+# TEST The scr### directories are shell scripts that test a variety of
+# TEST things, including things about the distribution itself. These
+# TEST tests won't run on most systems, so don't even try to run them.
+#
+# shelltest.tcl:
+# Code to run shell script tests, to incorporate Java, C++,
+# example compilation, etc. test scripts into the Tcl framework.
+proc shelltest { { run_one 0 }} {
+ source ./include.tcl
+ global shelltest_list
+
+ set SH /bin/sh
+ if { [file executable $SH] != 1 } {
+ puts "Shell tests require valid shell /bin/sh: not found."
+ puts "Skipping shell tests."
+ return 0
+ }
+
+ if { $run_one == 0 } {
+ puts "Running shell script tests..."
+
+ foreach testpair $shelltest_list {
+ set dir [lindex $testpair 0]
+ set test [lindex $testpair 1]
+
+ env_cleanup $testdir
+ shelltest_copy $test_path/$dir $testdir
+ shelltest_run $SH $dir $test $testdir
+ }
+ } else {
+ set run_one [expr $run_one - 1];
+ set dir [lindex [lindex $shelltest_list $run_one] 0]
+ set test [lindex [lindex $shelltest_list $run_one] 1]
+
+ env_cleanup $testdir
+ shelltest_copy $test_path/$dir $testdir
+ shelltest_run $SH $dir $test $testdir
+ }
+}
+
+proc shelltest_copy { fromdir todir } {
+ set globall [glob $fromdir/*]
+
+ foreach f $globall {
+ file copy $f $todir/
+ }
+}
+
+proc shelltest_run { sh srcdir test testdir } {
+ puts "Running shell script $srcdir ($test)..."
+
+ set ret [catch {exec $sh -c "cd $testdir && sh $test" >&@ stdout} res]
+
+ if { $ret != 0 } {
+ puts "FAIL: shell test $srcdir/$test exited abnormally"
+ }
+}
+
+proc scr001 {} { shelltest 1 }
+proc scr002 {} { shelltest 2 }
+proc scr003 {} { shelltest 3 }
+proc scr004 {} { shelltest 4 }
+proc scr005 {} { shelltest 5 }
+proc scr006 {} { shelltest 6 }
+proc scr007 {} { shelltest 7 }
+proc scr008 {} { shelltest 8 }
+proc scr009 {} { shelltest 9 }
+proc scr010 {} { shelltest 10 }
+proc scr011 {} { shelltest 11 }
+proc scr012 {} { shelltest 12 }
+proc scr013 {} { shelltest 13 }
+proc scr014 {} { shelltest 14 }
+proc scr015 {} { shelltest 15 }
+proc scr016 {} { shelltest 16 }
+proc scr017 {} { shelltest 17 }
+proc scr018 {} { shelltest 18 }
+proc scr019 {} { shelltest 19 }
+proc scr020 {} { shelltest 20 }
+proc scr021 {} { shelltest 21 }
+proc scr022 {} { shelltest 22 }
diff --git a/libdb/test/si001.tcl b/libdb/test/si001.tcl
new file mode 100644
index 0000000..215facc
--- /dev/null
+++ b/libdb/test/si001.tcl
@@ -0,0 +1,116 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST sindex001
+# TEST Basic secondary index put/delete test
+proc sindex001 { methods {nentries 200} {tnum 1} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ puts "\tSindex00$tnum.a: Put loop"
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts "\tSindex00$tnum.b: Put/overwrite loop"
+ for { set n 0 } { $n < $nentries } { incr n } {
+ set newd $data($n).$keys($n)
+ set ret [eval {$pdb put} {$keys($n) [chop_data $pmethod $newd]}]
+ error_check_good put_overwrite($n) $ret 0
+ set data($n) [pad_data $pmethod $newd]
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts "\tSindex00$tnum.c: Primary delete loop: deleting $half entries"
+ for { set n $half } { $n < $nentries } { incr n } {
+ set ret [$pdb del $keys($n)]
+ error_check_good pdel($n) $ret 0
+ }
+ check_secondaries $pdb $sdbs $half keys data "Sindex00$tnum.c"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.d: Secondary delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set callback [callback_n 0]
+ for { set n $quar } { $n < $half } { incr n } {
+ set skey [$callback $keys($n) [pad_data $pmethod $data($n)]]
+ set ret [$sdb del $skey]
+ error_check_good sdel($n) $ret 0
+ }
+ check_secondaries $pdb $sdbs $quar keys data "Sindex00$tnum.d"
+
+ puts "\tSindex00$tnum.e: Closing/disassociating primary first"
+ error_check_good primary_close [$pdb close] 0
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good env_close [$env close] 0
+}
diff --git a/libdb/test/si002.tcl b/libdb/test/si002.tcl
new file mode 100644
index 0000000..3f5d8ca
--- /dev/null
+++ b/libdb/test/si002.tcl
@@ -0,0 +1,167 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST sindex002
+# TEST Basic cursor-based secondary index put/delete test
+proc sindex002 { methods {nentries 200} {tnum 2} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ puts "\tSindex00$tnum.a: Cursor put (-keyfirst/-keylast) loop"
+ set did [open $dict]
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set ns($key) $n
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ if { $n % 2 == 0 } {
+ set pflag " -keyfirst "
+ } else {
+ set pflag " -keylast "
+ }
+
+ set ret [eval {$pdbc put} $pflag \
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ error_check_good pdbc_close [$pdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts "\tSindex00$tnum.b: Cursor put overwrite (-current) loop"
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ set key [lindex [lindex $dbt 0] 0]
+ set datum [lindex [lindex $dbt 0] 1]
+ set newd $datum.$key
+ set ret [eval {$pdbc put -current} [chop_data $pmethod $newd]]
+ error_check_good put_overwrite($key) $ret 0
+ set data($ns($key)) [pad_data $pmethod $newd]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+
+ puts "\tSindex00$tnum.c: Secondary c_pget/primary put overwrite loop"
+ # We walk the first secondary, then put-overwrite each primary key/data
+ # pair we find. This doubles as a DBC->c_pget test.
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ error_check_good sdb_cursor [is_valid_cursor $sdbc $sdb] TRUE
+ for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -next] } {
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdatum [lindex [lindex $dbt 0] 2]
+
+ # Extended entries will be showing up underneath us, in
+ # unpredictable places. Keep track of which pkeys
+ # we've extended, and don't extend them repeatedly.
+ if { [info exists pkeys_done($pkey)] == 1 } {
+ continue
+ } else {
+ set pkeys_done($pkey) 1
+ }
+
+ set newd $pdatum.[string range $pdatum 0 2]
+ set ret [eval {$pdb put} $pkey [chop_data $pmethod $newd]]
+ error_check_good pdb_put($pkey) $ret 0
+ set data($ns($pkey)) [pad_data $pmethod $newd]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.c"
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts "\tSindex00$tnum.d:\
+ Primary cursor delete loop: deleting $half entries"
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ set dbt [$pdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $half } { incr i } {
+ error_check_good pdbc_del [$pdbc del] 0
+ set dbt [$pdbc get -next]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+ cursor_check_secondaries $pdb $sdbs $half "Sindex00$tnum.d"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.e:\
+ Secondary cursor delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ set dbt [$sdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $quar } { incr i } {
+ error_check_good sdbc_del [$sdbc del] 0
+ set dbt [$sdbc get -next]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+ cursor_check_secondaries $pdb $sdbs $quar "Sindex00$tnum.e"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/libdb/test/si003.tcl b/libdb/test/si003.tcl
new file mode 100644
index 0000000..6f14803
--- /dev/null
+++ b/libdb/test/si003.tcl
@@ -0,0 +1,142 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST sindex003
+# TEST sindex001 with secondaries created and closed mid-test
+# TEST Basic secondary index put/delete test with secondaries
+# TEST created mid-test.
+proc sindex003 { methods {nentries 200} {tnum 3} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [eval {berkdb_env -create -home $testdir}]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ puts -nonewline "\tSindex00$tnum.a: Put loop ... "
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+
+ # Open and associate the secondaries
+ set sdbs {}
+ puts "opening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts -nonewline "\tSindex00$tnum.b: Put/overwrite loop ... "
+ for { set n 0 } { $n < $nentries } { incr n } {
+ set newd $data($n).$keys($n)
+ set ret [eval {$pdb put} {$keys($n) [chop_data $pmethod $newd]}]
+ error_check_good put_overwrite($n) $ret 0
+ set data($n) [pad_data $pmethod $newd]
+ }
+
+ # Close the secondaries again.
+ puts "closing secondaries."
+ for { set sdb [lindex $sdbs end] } { [string length $sdb] > 0 } \
+ { set sdb [lindex $sdbs end] } {
+ error_check_good second_close($sdb) [$sdb close] 0
+ set sdbs [lrange $sdbs 0 end-1]
+ check_secondaries \
+ $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+ }
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts -nonewline \
+ "\tSindex00$tnum.c: Primary delete loop: deleting $half entries ..."
+ for { set n $half } { $n < $nentries } { incr n } {
+ set ret [$pdb del $keys($n)]
+ error_check_good pdel($n) $ret 0
+ }
+
+ # Open and associate the secondaries
+ set sdbs {}
+ puts "\n\t\topening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] \
+ $snamebase.r2.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $half keys data "Sindex00$tnum.c"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.d: Secondary delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set callback [callback_n 0]
+ for { set n $quar } { $n < $half } { incr n } {
+ set skey [$callback $keys($n) [pad_data $pmethod $data($n)]]
+ set ret [$sdb del $skey]
+ error_check_good sdel($n) $ret 0
+ }
+ check_secondaries $pdb $sdbs $quar keys data "Sindex00$tnum.d"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/libdb/test/si004.tcl b/libdb/test/si004.tcl
new file mode 100644
index 0000000..d00af5d
--- /dev/null
+++ b/libdb/test/si004.tcl
@@ -0,0 +1,194 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST sindex004
+# TEST sindex002 with secondaries created and closed mid-test
+# TEST Basic cursor-based secondary index put/delete test, with
+# TEST secondaries created mid-test.
+proc sindex004 { methods {nentries 200} {tnum 4} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ puts -nonewline \
+ "\tSindex00$tnum.a: Cursor put (-keyfirst/-keylast) loop ... "
+ set did [open $dict]
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set ns($key) $n
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ if { $n % 2 == 0 } {
+ set pflag " -keyfirst "
+ } else {
+ set pflag " -keylast "
+ }
+
+ set ret [eval {$pdbc put} $pflag \
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ error_check_good pdbc_close [$pdbc close] 0
+
+ # Open and associate the secondaries
+ set sdbs {}
+ puts "\n\t\topening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts "\tSindex00$tnum.b: Cursor put overwrite (-current) loop"
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ set key [lindex [lindex $dbt 0] 0]
+ set datum [lindex [lindex $dbt 0] 1]
+ set newd $datum.$key
+ set ret [eval {$pdbc put -current} [chop_data $pmethod $newd]]
+ error_check_good put_overwrite($key) $ret 0
+ set data($ns($key)) [pad_data $pmethod $newd]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+
+ puts -nonewline "\tSindex00$tnum.c:\
+ Secondary c_pget/primary put overwrite loop ... "
+ # We walk the first secondary, then put-overwrite each primary key/data
+ # pair we find. This doubles as a DBC->c_pget test.
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ error_check_good sdb_cursor [is_valid_cursor $sdbc $sdb] TRUE
+ for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -next] } {
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdatum [lindex [lindex $dbt 0] 2]
+
+ # Extended entries will be showing up underneath us, in
+ # unpredictable places. Keep track of which pkeys
+ # we've extended, and don't extend them repeatedly.
+ if { [info exists pkeys_done($pkey)] == 1 } {
+ continue
+ } else {
+ set pkeys_done($pkey) 1
+ }
+
+ set newd $pdatum.[string range $pdatum 0 2]
+ set ret [eval {$pdb put} $pkey [chop_data $pmethod $newd]]
+ error_check_good pdb_put($pkey) $ret 0
+ set data($ns($pkey)) [pad_data $pmethod $newd]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+
+ # Close the secondaries again.
+ puts "\n\t\tclosing secondaries."
+ for { set sdb [lindex $sdbs end] } { [string length $sdb] > 0 } \
+ { set sdb [lindex $sdbs end] } {
+ error_check_good second_close($sdb) [$sdb close] 0
+ set sdbs [lrange $sdbs 0 end-1]
+ check_secondaries \
+ $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+ }
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts -nonewline "\tSindex00$tnum.d:\
+ Primary cursor delete loop: deleting $half entries ... "
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ set dbt [$pdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $half } { incr i } {
+ error_check_good pdbc_del [$pdbc del] 0
+ set dbt [$pdbc get -next]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+
+ set sdbs {}
+ puts "\n\t\topening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] \
+ $snamebase.r2.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ cursor_check_secondaries $pdb $sdbs $half "Sindex00$tnum.d"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.e:\
+ Secondary cursor delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ set dbt [$sdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $quar } { incr i } {
+ error_check_good sdbc_del [$sdbc del] 0
+ set dbt [$sdbc get -next]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+ cursor_check_secondaries $pdb $sdbs $quar "Sindex00$tnum.e"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/libdb/test/si005.tcl b/libdb/test/si005.tcl
new file mode 100644
index 0000000..43a621b
--- /dev/null
+++ b/libdb/test/si005.tcl
@@ -0,0 +1,179 @@
+
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Sindex005: Secondary index and join test.
+proc sindex005 { methods {nitems 1000} {tnum 5} args } {
+ source ./include.tcl
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Sindex005 does a join within a simulated database schema
+ # in which the primary index maps a record ID to a ZIP code and
+ # name in the form "XXXXXname", and there are two secondaries:
+ # one mapping ZIP to ID, the other mapping name to ID.
+ # The primary may be of any database type; the two secondaries
+ # must be either btree or hash.
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method for the two secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < 2 } { incr i } {
+ lappend methods $pmethod
+ }
+ } elseif { [llength $methods] != 2 } {
+ puts "FAIL: Sindex00$tnum requires exactly two secondaries."
+ return
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) Secondary index join test."
+ env_cleanup $testdir
+
+ set pname "sindex00$tnum-primary.db"
+ set zipname "sindex00$tnum-zip.db"
+ set namename "sindex00$tnum-name.db"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the databases.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ set zipdb [eval {berkdb_open -create -dup -env} $env \
+ [lindex $omethods 0] [lindex $argses 0] $zipname]
+ error_check_good zip_open [is_valid_db $zipdb] TRUE
+ error_check_good zip_associate [$pdb associate s5_getzip $zipdb] 0
+
+ set namedb [eval {berkdb_open -create -dup -env} $env \
+ [lindex $omethods 1] [lindex $argses 1] $namename]
+ error_check_good name_open [is_valid_db $namedb] TRUE
+ error_check_good name_associate [$pdb associate s5_getname $namedb] 0
+
+ puts "\tSindex00$tnum.a: Populate database with $nitems \"names\""
+ s5_populate $pdb $nitems
+ puts "\tSindex00$tnum.b: Perform a join on each \"name\" and \"ZIP\""
+ s5_jointest $pdb $zipdb $namedb
+
+ error_check_good name_close [$namedb close] 0
+ error_check_good zip_close [$zipdb close] 0
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
+
+proc s5_jointest { pdb zipdb namedb } {
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ set item [lindex [lindex $dbt 0] 1]
+ set retlist [s5_dojoin $item $pdb $zipdb $namedb]
+ }
+}
+
+proc s5_dojoin { item pdb zipdb namedb } {
+ set name [s5_getname "" $item]
+ set zip [s5_getzip "" $item]
+
+ set zipc [$zipdb cursor]
+ error_check_good zipc($item) [is_valid_cursor $zipc $zipdb] TRUE
+
+ set namec [$namedb cursor]
+ error_check_good namec($item) [is_valid_cursor $namec $namedb] TRUE
+
+ set pc [$pdb cursor]
+ error_check_good pc($item) [is_valid_cursor $pc $pdb] TRUE
+
+ set ret [$zipc get -set $zip]
+ set zd [lindex [lindex $ret 0] 1]
+ error_check_good zipset($zip) [s5_getzip "" $zd] $zip
+
+ set ret [$namec get -set $name]
+ set nd [lindex [lindex $ret 0] 1]
+ error_check_good nameset($name) [s5_getname "" $nd] $name
+
+ set joinc [$pdb join $zipc $namec]
+
+ set anyreturned 0
+ for { set dbt [$joinc get] } { [llength $dbt] > 0 } \
+ { set dbt [$joinc get] } {
+ set ritem [lindex [lindex $dbt 0] 1]
+ error_check_good returned_item($item) $ritem $item
+ incr anyreturned
+ }
+ error_check_bad anyreturned($item) $anyreturned 0
+
+ error_check_good joinc_close($item) [$joinc close] 0
+ error_check_good pc_close($item) [$pc close] 0
+ error_check_good namec_close($item) [$namec close] 0
+ error_check_good zipc_close($item) [$zipc close] 0
+}
+
+proc s5_populate { db nitems } {
+ global dict
+
+ set did [open $dict]
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ gets $did word
+ if { [string length $word] < 3 } {
+ gets $did word
+ if { [string length $word] < 3 } {
+ puts "FAIL:\
+ unexpected pair of words < 3 chars long"
+ }
+ }
+ set datalist [s5_name2zips $word]
+ foreach data $datalist {
+ error_check_good db_put($data) [$db put $i $data$word] 0
+ }
+ }
+ close $did
+}
+
+proc s5_getzip { key data } { return [string range $data 0 4] }
+proc s5_getname { key data } { return [string range $data 5 end] }
+
+# The dirty secret of this test is that the ZIP code is a function of the
+# name, so we can generate a database and then verify join results easily
+# without having to consult actual data.
+#
+# Any word passed into this function will generate from 1 to 26 ZIP
+# entries, out of the set {00000, 01000 ... 99000}. The number of entries
+# is just the position in the alphabet of the word's first letter; the
+# entries are then hashed to the set {00, 01 ... 99} N different ways.
+proc s5_name2zips { name } {
+ global alphabet
+
+ set n [expr [string first [string index $name 0] $alphabet] + 1]
+ error_check_bad starts_with_abc($name) $n -1
+
+ set ret {}
+ for { set i 0 } { $i < $n } { incr i } {
+ set b 0
+ for { set j 1 } { $j < [string length $name] } \
+ { incr j } {
+ set b [s5_nhash $name $i $j $b]
+ }
+ lappend ret [format %05u [expr $b % 100]000]
+ }
+ return $ret
+}
+proc s5_nhash { name i j b } {
+ global alphabet
+
+ set c [string first [string index $name $j] $alphabet']
+ return [expr (($b * 991) + ($i * 997) + $c) % 10000000]
+}
diff --git a/libdb/test/si006.tcl b/libdb/test/si006.tcl
new file mode 100644
index 0000000..377ddd9
--- /dev/null
+++ b/libdb/test/si006.tcl
@@ -0,0 +1,129 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST sindex006
+# TEST Basic secondary index put/delete test with transactions
+proc sindex006 { methods {nentries 200} {tnum 6} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ puts " with transactions"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -auto_commit -env} $env $pomethod \
+ $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -auto_commit -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -auto_commit [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ puts "\tSindex00$tnum.a: Put loop"
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set txn [$env txn]
+ set ret [eval {$pdb put} -txn $txn \
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ close $did
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts "\tSindex00$tnum.b: Put/overwrite loop"
+ for { set n 0 } { $n < $nentries } { incr n } {
+ set newd $data($n).$keys($n)
+
+ set txn [$env txn]
+ set ret [eval {$pdb put} -txn $txn \
+ {$keys($n) [chop_data $pmethod $newd]}]
+ error_check_good put_overwrite($n) $ret 0
+ set data($n) [pad_data $pmethod $newd]
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts "\tSindex00$tnum.c: Primary delete loop: deleting $half entries"
+ for { set n $half } { $n < $nentries } { incr n } {
+ set txn [$env txn]
+ set ret [$pdb del -txn $txn $keys($n)]
+ error_check_good pdel($n) $ret 0
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ check_secondaries $pdb $sdbs $half keys data "Sindex00$tnum.c"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.d: Secondary delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set callback [callback_n 0]
+ for { set n $quar } { $n < $half } { incr n } {
+ set skey [$callback $keys($n) [pad_data $pmethod $data($n)]]
+ set txn [$env txn]
+ set ret [$sdb del -txn $txn $skey]
+ error_check_good sdel($n) $ret 0
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ check_secondaries $pdb $sdbs $quar keys data "Sindex00$tnum.d"
+
+ puts "\tSindex00$tnum.e: Closing/disassociating primary first"
+ error_check_good primary_close [$pdb close] 0
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good env_close [$env close] 0
+}
diff --git a/libdb/test/sindex.tcl b/libdb/test/sindex.tcl
new file mode 100644
index 0000000..1fd27cb
--- /dev/null
+++ b/libdb/test/sindex.tcl
@@ -0,0 +1,259 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Secondary index test driver and maintenance routines.
+#
+# Breaking from the usual convention, we put the driver function
+# for the secondary index tests here, in its own file. The reason
+# for this is that it's something which compartmentalizes nicely,
+# has little in common with other driver functions, and
+# is likely to be run on its own from time to time.
+#
+# The secondary index tests themselves live in si0*.tcl.
+
+# Standard number of secondary indices to create if a single-element
+# list of methods is passed into the secondary index tests.
+global nsecondaries
+set nsecondaries 2
+
+# Run the secondary index tests.
+proc sindex { {verbose 0} args } {
+ global verbose_check_secondaries
+ set verbose_check_secondaries $verbose
+
+ # Run basic tests with a single secondary index and a small number
+ # of keys, then again with a larger number of keys. (Note that
+ # we can't go above 5000, since we use two items from our
+ # 10K-word list for each key/data pair.)
+ foreach n { 200 5000 } {
+ foreach pm { btree hash recno frecno queue queueext } {
+ foreach sm { dbtree dhash ddbtree ddhash btree hash } {
+ sindex001 [list $pm $sm $sm] $n
+ sindex002 [list $pm $sm $sm] $n
+ # Skip tests 3 & 4 for large lists;
+ # they're not that interesting.
+ if { $n < 1000 } {
+ sindex003 [list $pm $sm $sm] $n
+ sindex004 [list $pm $sm $sm] $n
+ }
+
+ sindex006 [list $pm $sm $sm] $n
+ }
+ }
+ }
+
+ # Run secondary index join test. (There's no point in running
+ # this with both lengths, the primary is unhappy for now with fixed-
+ # length records (XXX), and we need unsorted dups in the secondaries.)
+ foreach pm { btree hash recno } {
+ foreach sm { btree hash } {
+ sindex005 [list $pm $sm $sm] 1000
+ }
+ sindex005 [list $pm btree hash] 1000
+ sindex005 [list $pm hash btree] 1000
+ }
+
+
+ # Run test with 50 secondaries.
+ foreach pm { btree hash } {
+ set methlist [list $pm]
+ for { set i 0 } { $i < 50 } { incr i } {
+ # XXX this should incorporate hash after #3726
+ if { $i % 2 == 0 } {
+ lappend methlist "dbtree"
+ } else {
+ lappend methlist "ddbtree"
+ }
+ }
+ sindex001 $methlist 500
+ sindex002 $methlist 500
+ sindex003 $methlist 500
+ sindex004 $methlist 500
+ }
+}
+
+# The callback function we use for each given secondary in most tests
+# is a simple function of its place in the list of secondaries (0-based)
+# and the access method (since recnos may need different callbacks).
+#
+# !!!
+# Note that callbacks 0-3 return unique secondary keys if the input data
+# are unique; callbacks 4 and higher may not, so don't use them with
+# the normal wordlist and secondaries that don't support dups.
+# The callbacks that incorporate a key don't work properly with recno
+# access methods, at least not in the current test framework (the
+# error_check_good lines test for e.g. 1foo, when the database has
+# e.g. 0x010x000x000x00foo).
+proc callback_n { n } {
+ switch $n {
+ 0 { return _s_reversedata }
+ 1 { return _s_noop }
+ 2 { return _s_concatkeydata }
+ 3 { return _s_concatdatakey }
+ 4 { return _s_reverseconcat }
+ 5 { return _s_truncdata }
+ 6 { return _s_alwayscocacola }
+ }
+ return _s_noop
+}
+
+proc _s_reversedata { a b } { return [reverse $b] }
+proc _s_truncdata { a b } { return [string range $b 1 end] }
+proc _s_concatkeydata { a b } { return $a$b }
+proc _s_concatdatakey { a b } { return $b$a }
+proc _s_reverseconcat { a b } { return [reverse $a$b] }
+proc _s_alwayscocacola { a b } { return "Coca-Cola" }
+proc _s_noop { a b } { return $b }
+
+# Should the check_secondary routines print lots of output?
+set verbose_check_secondaries 0
+
+# Given a primary database handle, a list of secondary handles, a
+# number of entries, and arrays of keys and data, verify that all
+# databases have what they ought to.
+proc check_secondaries { pdb sdbs nentries keyarr dataarr {pref "Check"} } {
+ upvar $keyarr keys
+ upvar $dataarr data
+ global verbose_check_secondaries
+
+ # Make sure each key/data pair is in the primary.
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.1: Each key/data pair is in the primary"
+ }
+ for { set i 0 } { $i < $nentries } { incr i } {
+ error_check_good pdb_get($i) [$pdb get $keys($i)] \
+ [list [list $keys($i) $data($i)]]
+ }
+
+ for { set j 0 } { $j < [llength $sdbs] } { incr j } {
+ # Make sure each key/data pair is in this secondary.
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.2:\
+ Each skey/key/data tuple is in secondary #$j"
+ }
+ for { set i 0 } { $i < $nentries } { incr i } {
+ set sdb [lindex $sdbs $j]
+ set skey [[callback_n $j] $keys($i) $data($i)]
+ error_check_good sdb($j)_pget($i) \
+ [$sdb pget -get_both $skey $keys($i)] \
+ [list [list $skey $keys($i) $data($i)]]
+ }
+
+ # Make sure this secondary contains only $nentries
+ # items.
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.3: Secondary #$j has $nentries items"
+ }
+ set dbc [$sdb cursor]
+ error_check_good dbc($i) \
+ [is_valid_cursor $dbc $sdb] TRUE
+ for { set k 0 } { [llength [$dbc get -next]] > 0 } \
+ { incr k } { }
+ error_check_good numitems($i) $k $nentries
+ error_check_good dbc($i)_close [$dbc close] 0
+ }
+
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.4: Primary has $nentries items"
+ }
+ set dbc [$pdb cursor]
+ error_check_good pdbc [is_valid_cursor $dbc $pdb] TRUE
+ for { set k 0 } { [llength [$dbc get -next]] > 0 } { incr k } { }
+ error_check_good numitems $k $nentries
+ error_check_good pdbc_close [$dbc close] 0
+}
+
+# Given a primary database handle and a list of secondary handles, walk
+# through the primary and make sure all the secondaries are correct,
+# then walk through the secondaries and make sure the primary is correct.
+#
+# This is slightly less rigorous than the normal check_secondaries--we
+# use it whenever we don't have up-to-date "keys" and "data" arrays.
+proc cursor_check_secondaries { pdb sdbs nentries { pref "Check" } } {
+ global verbose_check_secondaries
+
+ # Make sure each key/data pair in the primary is in each secondary.
+ set pdbc [$pdb cursor]
+ error_check_good ccs_pdbc [is_valid_cursor $pdbc $pdb] TRUE
+ set i 0
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.1:\
+ Key/data in primary => key/data in secondaries"
+ }
+
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ incr i
+ set pkey [lindex [lindex $dbt 0] 0]
+ set pdata [lindex [lindex $dbt 0] 1]
+ for { set j 0 } { $j < [llength $sdbs] } { incr j } {
+ set sdb [lindex $sdbs $j]
+ set sdbt [$sdb pget -get_both \
+ [[callback_n $j] $pkey $pdata] $pkey]
+ error_check_good pkey($pkey,$j) \
+ [lindex [lindex $sdbt 0] 1] $pkey
+ error_check_good pdata($pdata,$j) \
+ [lindex [lindex $sdbt 0] 2] $pdata
+ }
+ }
+ error_check_good ccs_pdbc_close [$pdbc close] 0
+ error_check_good primary_has_nentries $i $nentries
+
+ for { set j 0 } { $j < [llength $sdbs] } { incr j } {
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.2:\
+ Key/data in secondary #$j => key/data in primary"
+ }
+ set sdb [lindex $sdbs $j]
+ set sdbc [$sdb cursor]
+ error_check_good ccs_sdbc($j) [is_valid_cursor $sdbc $sdb] TRUE
+ set i 0
+ for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -next] } {
+ incr i
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdata [lindex [lindex $dbt 0] 2]
+ error_check_good pdb_get($pkey/$pdata,$j) \
+ [$pdb get -get_both $pkey $pdata] \
+ [list [list $pkey $pdata]]
+ }
+ error_check_good secondary($j)_has_nentries $i $nentries
+
+ # To exercise pget -last/pget -prev, we do it backwards too.
+ set i 0
+ for { set dbt [$sdbc pget -last] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -prev] } {
+ incr i
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdata [lindex [lindex $dbt 0] 2]
+ error_check_good pdb_get_bkwds($pkey/$pdata,$j) \
+ [$pdb get -get_both $pkey $pdata] \
+ [list [list $pkey $pdata]]
+ }
+ error_check_good secondary($j)_has_nentries_bkwds $i $nentries
+
+ error_check_good ccs_sdbc_close($j) [$sdbc close] 0
+ }
+}
+
+# The secondary index tests take a list of the access methods that
+# each array ought to use. Convert at one blow into a list of converted
+# argses and omethods for each method in the list.
+proc convert_argses { methods largs } {
+ set ret {}
+ foreach m $methods {
+ lappend ret [convert_args $m $largs]
+ }
+ return $ret
+}
+proc convert_methods { methods } {
+ set ret {}
+ foreach m $methods {
+ lappend ret [convert_method $m]
+ }
+ return $ret
+}
diff --git a/libdb/test/sysscript.tcl b/libdb/test/sysscript.tcl
new file mode 100644
index 0000000..31b8f32
--- /dev/null
+++ b/libdb/test/sysscript.tcl
@@ -0,0 +1,282 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# System integration test script.
+# This script runs a single process that tests the full functionality of
+# the system. The database under test contains nfiles files. Each process
+# randomly generates a key and some data. Both keys and data are bimodally
+# distributed between small keys (1-10 characters) and large keys (the avg
+# length is indicated via the command line parameter.
+# The process then decides on a replication factor between 1 and nfiles.
+# It writes the key and data to that many files and tacks on the file ids
+# of the files it writes to the data string. For example, let's say that
+# I randomly generate the key dog and data cat. Then I pick a replication
+# factor of 3. I pick 3 files from the set of n (say 1, 3, and 5). I then
+# rewrite the data as 1:3:5:cat. I begin a transaction, add the key/data
+# pair to each file and then commit. Notice that I may generate replication
+# of the form 1:3:3:cat in which case I simply add a duplicate to file 3.
+#
+# Usage: sysscript dir nfiles key_avg data_avg
+#
+# dir: DB_HOME directory
+# nfiles: number of files in the set
+# key_avg: average big key size
+# data_avg: average big data size
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set mypid [pid]
+
+set usage "sysscript dir nfiles key_avg data_avg method"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+puts [concat "Argc: " $argc " Argv: " $argv]
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set nfiles [ lindex $argv 1 ]
+set key_avg [ lindex $argv 2 ]
+set data_avg [ lindex $argv 3 ]
+set method [ lindex $argv 4 ]
+
+# Initialize seed
+global rand_init
+berkdb srand $rand_init
+
+puts "Beginning execution for $mypid"
+puts "$dir DB_HOME"
+puts "$nfiles files"
+puts "$key_avg average key length"
+puts "$data_avg average data length"
+
+flush stdout
+
+# Create local environment
+set dbenv [berkdb_env -txn -home $dir]
+set err [catch {error_check_good $mypid:dbenv [is_substr $dbenv env] 1} ret]
+if {$err != 0} {
+ puts $ret
+ return
+}
+
+# Now open the files
+for { set i 0 } { $i < $nfiles } { incr i } {
+ set file test044.$i.db
+ set db($i) [berkdb open -auto_commit -env $dbenv $method $file]
+ set err [catch {error_check_bad $mypid:dbopen $db($i) NULL} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set err [catch {error_check_bad $mypid:dbopen [is_substr $db($i) \
+ error] 1} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+}
+
+set record_based [is_record_based $method]
+while { 1 } {
+ # Decide if we're going to create a big key or a small key
+ # We give small keys a 70% chance.
+ if { [berkdb random_int 1 10] < 8 } {
+ set k [random_data 5 0 0 $record_based]
+ } else {
+ set k [random_data $key_avg 0 0 $record_based]
+ }
+ set data [chop_data $method [random_data $data_avg 0 0]]
+
+ set txn [$dbenv txn]
+ set err [catch {error_check_good $mypid:txn_begin [is_substr $txn \
+ $dbenv.txn] 1} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+
+ # Open cursors
+ for { set f 0 } {$f < $nfiles} {incr f} {
+ set cursors($f) [$db($f) cursor -txn $txn]
+ set err [catch {error_check_good $mypid:cursor_open \
+ [is_substr $cursors($f) $db($f)] 1} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set aborted 0
+
+ # Check to see if key is already in database
+ set found 0
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set r [$db($i) get -txn $txn $k]
+ set r [$db($i) get -txn $txn $k]
+ if { $r == "-1" } {
+ for {set f 0 } {$f < $nfiles} {incr f} {
+ set err [catch {error_check_good \
+ $mypid:cursor_close \
+ [$cursors($f) close] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set err [catch {error_check_good $mypid:txn_abort \
+ [$txn abort] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set aborted 1
+ set found 2
+ break
+ } elseif { $r != "Key $k not found." } {
+ set found 1
+ break
+ }
+ }
+ switch $found {
+ 2 {
+ # Transaction aborted, no need to do anything.
+ }
+ 0 {
+ # Key was not found, decide how much to replicate
+ # and then create a list of that many file IDs.
+ set repl [berkdb random_int 1 $nfiles]
+ set fset ""
+ for { set i 0 } { $i < $repl } {incr i} {
+ set f [berkdb random_int 0 [expr $nfiles - 1]]
+ lappend fset $f
+ set data [chop_data $method $f:$data]
+ }
+
+ foreach i $fset {
+ set r [$db($i) put -txn $txn $k $data]
+ if {$r == "-1"} {
+ for {set f 0 } {$f < $nfiles} {incr f} {
+ set err [catch {error_check_good \
+ $mypid:cursor_close \
+ [$cursors($f) close] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set err [catch {error_check_good \
+ $mypid:txn_abort [$txn abort] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set aborted 1
+ break
+ }
+ }
+ }
+ 1 {
+ # Key was found. Make sure that all the data values
+ # look good.
+ set f [zero_list $nfiles]
+ set data $r
+ while { [set ndx [string first : $r]] != -1 } {
+ set fnum [string range $r 0 [expr $ndx - 1]]
+ if { [lindex $f $fnum] == 0 } {
+ #set flag -set
+ set full [record $cursors($fnum) get -set $k]
+ } else {
+ #set flag -next
+ set full [record $cursors($fnum) get -next]
+ }
+ if {[llength $full] == 0} {
+ for {set f 0 } {$f < $nfiles} {incr f} {
+ set err [catch {error_check_good \
+ $mypid:cursor_close \
+ [$cursors($f) close] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set err [catch {error_check_good \
+ $mypid:txn_abort [$txn abort] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set aborted 1
+ break
+ }
+ set err [catch {error_check_bad \
+ $mypid:curs_get($k,$data,$fnum,$flag) \
+ [string length $full] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set key [lindex [lindex $full 0] 0]
+ set rec [pad_data $method [lindex [lindex $full 0] 1]]
+ set err [catch {error_check_good \
+ $mypid:dbget_$fnum:key $key $k} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set err [catch {error_check_good \
+ $mypid:dbget_$fnum:data($k) $rec $data} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set f [lreplace $f $fnum $fnum 1]
+ incr ndx
+ set r [string range $r $ndx end]
+ }
+ }
+ }
+ if { $aborted == 0 } {
+ for {set f 0 } {$f < $nfiles} {incr f} {
+ set err [catch {error_check_good $mypid:cursor_close \
+ [$cursors($f) close] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set err [catch {error_check_good $mypid:commit [$txn commit] \
+ 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+}
+
+# Close files
+for { set i 0 } { $i < $nfiles} { incr i } {
+ set r [$db($i) close]
+ set err [catch {error_check_good $mypid:db_close:$i $r 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+}
+
+# Close tm and environment
+$dbenv close
+
+puts "[timestamp] [pid] Complete"
+flush stdout
+
+filecheck $file 0
diff --git a/libdb/test/test.tcl b/libdb/test/test.tcl
new file mode 100644
index 0000000..c40d460
--- /dev/null
+++ b/libdb/test/test.tcl
@@ -0,0 +1,1863 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+
+source ./include.tcl
+
+# Load DB's TCL API.
+load $tcllib
+
+if { [file exists $testdir] != 1 } {
+ file mkdir $testdir
+}
+
+global __debug_print
+global __debug_on
+global __debug_test
+global util_path
+
+#
+# Test if utilities work to figure out the path. Most systems
+# use ., but QNX has a problem with execvp of shell scripts which
+# causes it to break.
+#
+set stat [catch {exec ./db_printlog -?} ret]
+if { [string first "exec format error" $ret] != -1 } {
+ set util_path ./.libs
+} else {
+ set util_path .
+}
+set __debug_print 0
+set encrypt 0
+set old_encrypt 0
+set passwd test_passwd
+
+# This is where the test numbering and parameters now live.
+source $test_path/testparams.tcl
+
+# Error stream that (should!) always go to the console, even if we're
+# redirecting to ALL.OUT.
+set consoleerr stderr
+
+foreach sub $subs {
+ if { [info exists num_test($sub)] != 1 } {
+ puts stderr "Subsystem $sub has no number of tests specified in\
+ testparams.tcl; skipping."
+ continue
+ }
+ set end $num_test($sub)
+ for { set i 1 } { $i <= $end } {incr i} {
+ set name [format "%s%03d.tcl" $sub $i]
+ source $test_path/$name
+ }
+}
+
+source $test_path/archive.tcl
+source $test_path/byteorder.tcl
+source $test_path/dbm.tcl
+source $test_path/hsearch.tcl
+source $test_path/join.tcl
+source $test_path/logtrack.tcl
+source $test_path/ndbm.tcl
+source $test_path/parallel.tcl
+source $test_path/reputils.tcl
+source $test_path/sdbutils.tcl
+source $test_path/shelltest.tcl
+source $test_path/sindex.tcl
+source $test_path/testutils.tcl
+source $test_path/upgrade.tcl
+
+set dict $test_path/wordlist
+set alphabet "abcdefghijklmnopqrstuvwxyz"
+set datastr "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"
+
+# Random number seed.
+global rand_init
+set rand_init 101301
+
+# Default record length and padding character for
+# fixed record length access method(s)
+set fixed_len 20
+set fixed_pad 0
+
+set recd_debug 0
+set log_log_record_types 0
+set ohandles {}
+
+# Normally, we're not running an all-tests-in-one-env run. This matters
+# for error stream/error prefix settings in berkdb_open.
+global is_envmethod
+set is_envmethod 0
+
+# For testing locker id wrap around.
+global lock_curid
+global lock_maxid
+set lock_curid 0
+set lock_maxid 2147483647
+global txn_curid
+global txn_maxid
+set txn_curid 2147483648
+set txn_maxid 4294967295
+
+# Set up any OS-specific values
+global tcl_platform
+set is_windows_test [is_substr $tcl_platform(os) "Win"]
+set is_hp_test [is_substr $tcl_platform(os) "HP-UX"]
+set is_qnx_test [is_substr $tcl_platform(os) "QNX"]
+
+# From here on out, test.tcl contains the procs that are used to
+# run all or part of the test suite.
+
+proc run_std { args } {
+ global num_test
+ source ./include.tcl
+
+ set exflgs [eval extractflags $args]
+ set args [lindex $exflgs 0]
+ set flags [lindex $exflgs 1]
+
+ set display 1
+ set run 1
+ set am_only 0
+ set no_am 0
+ set std_only 1
+ set rflags {--}
+ foreach f $flags {
+ switch $f {
+ A {
+ set std_only 0
+ }
+ M {
+ set no_am 1
+ puts "run_std: all but access method tests."
+ }
+ m {
+ set am_only 1
+ puts "run_std: access method tests only."
+ }
+ n {
+ set display 1
+ set run 0
+ set rflags [linsert $rflags 0 "-n"]
+ }
+ }
+ }
+
+ if { $std_only == 1 } {
+ fileremove -f ALL.OUT
+
+ set o [open ALL.OUT a]
+ if { $run == 1 } {
+ puts -nonewline "Test suite run started at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts [berkdb version -string]
+
+ puts -nonewline $o "Test suite run started at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ puts $o [berkdb version -string]
+ }
+ close $o
+ }
+
+ set test_list {
+ {"environment" "env"}
+ {"archive" "archive"}
+ {"locking" "lock"}
+ {"logging" "log"}
+ {"memory pool" "memp"}
+ {"mutex" "mutex"}
+ {"transaction" "txn"}
+ {"deadlock detection" "dead"}
+ {"subdatabase" "sdb"}
+ {"byte-order" "byte"}
+ {"recno backing file" "rsrc"}
+ {"DBM interface" "dbm"}
+ {"NDBM interface" "ndbm"}
+ {"Hsearch interface" "hsearch"}
+ {"secondary index" "sindex"}
+ }
+
+ if { $am_only == 0 } {
+
+ foreach pair $test_list {
+ set msg [lindex $pair 0]
+ set cmd [lindex $pair 1]
+ puts "Running $msg tests"
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; r $rflags $cmd" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: $cmd test"
+ close $o
+ }
+ }
+
+ # Run recovery tests.
+ #
+ # XXX These too are broken into separate tclsh instantiations
+ # so we don't require so much memory, but I think it's cleaner
+ # and more useful to do it down inside proc r than here,
+ # since "r recd" gets done a lot and needs to work.
+ #
+ # Note that we still wrap the test in an exec so that
+ # its output goes to ALL.OUT. run_recd will wrap each test
+ # so that both error streams go to stdout (which here goes
+ # to ALL.OUT); information that run_recd wishes to print
+ # to the "real" stderr, but outside the wrapping for each test,
+ # such as which tests are being skipped, it can still send to
+ # stderr.
+ puts "Running recovery tests"
+ if [catch {
+ exec $tclsh_path \
+ << "source $test_path/test.tcl; r $rflags recd" \
+ 2>@ stderr >> ALL.OUT
+ } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: recd tests"
+ close $o
+ }
+
+ # Run join test
+ #
+ # XXX
+ # Broken up into separate tclsh instantiations so we don't
+ # require so much memory.
+ puts "Running join test"
+ foreach i "join1 join2 join3 join4 join5 join6" {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; r $rflags $i" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: $i test"
+ close $o
+ }
+ }
+ }
+
+ if { $no_am == 0 } {
+ # Access method tests.
+ #
+ # XXX
+ # Broken up into separate tclsh instantiations so we don't
+ # require so much memory.
+ foreach i \
+ "btree hash queue queueext recno rbtree frecno rrecno" {
+ puts "Running $i tests"
+ for { set j 1 } { $j <= $num_test(test) } {incr j} {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ run_method -$i $j $j $display $run $o
+ close $o
+ }
+ if { $run } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ run_method -$i $j $j $display $run"\
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL:\
+ [format "test%03d" $j] $i"
+ close $o
+ }
+ }
+ }
+ }
+ }
+
+ # If not actually running, no need to check for failure.
+ # If running in the context of the larger 'run_all' we don't
+ # check for failure here either.
+ if { $run == 0 || $std_only == 0 } {
+ return
+ }
+
+ set failed [check_failed_run ALL.OUT]
+
+ set o [open ALL.OUT a]
+ if { $failed == 0 } {
+ puts "Regression Tests Succeeded"
+ puts $o "Regression Tests Succeeded"
+ } else {
+ puts "Regression Tests Failed; see ALL.OUT for log"
+ puts $o "Regression Tests Failed"
+ }
+
+ puts -nonewline "Test suite run completed at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts -nonewline $o "Test suite run completed at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ close $o
+}
+
+proc check_failed_run { file {text "^FAIL"}} {
+ set failed 0
+ set o [open $file r]
+ while { [gets $o line] >= 0 } {
+ set ret [regexp $text $line]
+ if { $ret != 0 } {
+ set failed 1
+ }
+ }
+ close $o
+
+ return $failed
+}
+
+proc r { args } {
+ global num_test
+ source ./include.tcl
+
+ set exflgs [eval extractflags $args]
+ set args [lindex $exflgs 0]
+ set flags [lindex $exflgs 1]
+
+ set display 1
+ set run 1
+ set saveflags "--"
+ foreach f $flags {
+ switch $f {
+ n {
+ set display 1
+ set run 0
+ set saveflags "-n $saveflags"
+ }
+ }
+ }
+
+ if {[catch {
+ set sub [ lindex $args 0 ]
+ switch $sub {
+ byte {
+ if { $display } {
+ puts "run_test byteorder"
+ }
+ if { $run } {
+ check_handles
+ run_test byteorder
+ }
+ }
+ archive -
+ dbm -
+ hsearch -
+ ndbm -
+ shelltest -
+ sindex {
+ if { $display } { puts "r $sub" }
+ if { $run } {
+ check_handles
+ $sub
+ }
+ }
+ bigfile -
+ dead -
+ env -
+ lock -
+ log -
+ memp -
+ mutex -
+ rsrc -
+ sdbtest -
+ txn {
+ if { $display } { run_subsystem $sub 1 0 }
+ if { $run } {
+ run_subsystem $sub
+ }
+ }
+ join {
+ eval r $saveflags join1
+ eval r $saveflags join2
+ eval r $saveflags join3
+ eval r $saveflags join4
+ eval r $saveflags join5
+ eval r $saveflags join6
+ }
+ join1 {
+ if { $display } { puts jointest }
+ if { $run } {
+ check_handles
+ jointest
+ }
+ }
+ joinbench {
+ puts "[timestamp]"
+ eval r $saveflags join1
+ eval r $saveflags join2
+ puts "[timestamp]"
+ }
+ join2 {
+ if { $display } { puts "jointest 512" }
+ if { $run } {
+ check_handles
+ jointest 512
+ }
+ }
+ join3 {
+ if { $display } {
+ puts "jointest 8192 0 -join_item"
+ }
+ if { $run } {
+ check_handles
+ jointest 8192 0 -join_item
+ }
+ }
+ join4 {
+ if { $display } { puts "jointest 8192 2" }
+ if { $run } {
+ check_handles
+ jointest 8192 2
+ }
+ }
+ join5 {
+ if { $display } { puts "jointest 8192 3" }
+ if { $run } {
+ check_handles
+ jointest 8192 3
+ }
+ }
+ join6 {
+ if { $display } { puts "jointest 512 3" }
+ if { $run } {
+ check_handles
+ jointest 512 3
+ }
+ }
+ recd {
+ check_handles
+ run_recds $run $display [lrange $args 1 end]
+ }
+ rep {
+ for { set j 1 } { $j <= $num_test(test) } \
+ { incr j } {
+ if { $display } {
+ puts "eval run_test \
+ run_repmethod 0 $j $j"
+ }
+ if { $run } {
+ eval run_test \
+ run_repmethod 0 $j $j
+ }
+ }
+ for { set i 1 } \
+ { $i <= $num_test(rep) } {incr i} {
+ set test [format "%s%03d" $sub $i]
+ if { $i == 2 } {
+ if { $run } {
+ puts "Skipping rep002 \
+ (waiting on SR #6195)"
+ }
+ continue
+ }
+ if { $display } {
+ puts "run_test $test"
+ }
+ if { $run } {
+ run_test $test
+ }
+ }
+ }
+ rpc {
+ if { $display } { puts "r $sub" }
+ global rpc_svc svc_list
+ set old_rpc_src $rpc_svc
+ foreach rpc_svc $svc_list {
+ if { !$run || \
+ ![file exist $util_path/$rpc_svc] } {
+ continue
+ }
+ run_subsystem rpc
+ if { [catch {run_rpcmethod -txn} ret] != 0 } {
+ puts $ret
+ }
+ run_test run_rpcmethod
+ }
+ set rpc_svc $old_rpc_src
+ }
+ sec {
+ if { $display } {
+ run_subsystem $sub 1 0
+ }
+ if { $run } {
+ run_subsystem $sub 0 1
+ }
+ for { set j 1 } { $j <= $num_test(test) } \
+ { incr j } {
+ if { $display } {
+ puts "eval run_test \
+ run_secmethod $j $j"
+ puts "eval run_test \
+ run_secenv $j $j"
+ }
+ if { $run } {
+ eval run_test \
+ run_secmethod $j $j
+ eval run_test \
+ run_secenv $j $j
+ }
+ }
+ }
+ sdb {
+ if { $display } {
+ puts "eval r $saveflags sdbtest"
+ for { set j 1 } \
+ { $j <= $num_test(sdb) } \
+ { incr j } {
+ puts "eval run_test \
+ subdb $j $j"
+ }
+ }
+ if { $run } {
+ eval r $saveflags sdbtest
+ for { set j 1 } \
+ { $j <= $num_test(sdb) } \
+ { incr j } {
+ eval run_test subdb $j $j
+ }
+ }
+ }
+ btree -
+ rbtree -
+ hash -
+ queue -
+ queueext -
+ recno -
+ frecno -
+ rrecno {
+ eval run_method [lindex $args 0] \
+ 1 0 $display $run [lrange $args 1 end]
+ }
+
+ default {
+ error \
+ "FAIL:[timestamp] r: $args: unknown command"
+ }
+ }
+ flush stdout
+ flush stderr
+ } res] != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp] r: $args: $theError"
+ } else {
+ error $theError;
+ }
+ }
+}
+
+proc run_subsystem { prefix { display 0 } { run 1} } {
+ global num_test
+ if { [info exists num_test($prefix)] != 1 } {
+ puts stderr "Subsystem $sub has no number of tests specified in\
+ testparams.tcl; skipping."
+ return
+ }
+ for { set i 1 } { $i <= $num_test($prefix) } {incr i} {
+ set name [format "%s%03d" $prefix $i]
+ if { $display } {
+ puts "eval $name"
+ }
+ if { $run } {
+ check_handles
+ catch {eval $name}
+ }
+ }
+}
+
+proc run_test { testname args } {
+ source ./include.tcl
+ foreach method "hash queue queueext recno rbtree frecno rrecno btree" {
+ check_handles
+ eval $testname -$method $args
+ verify_dir $testdir "" 1
+ }
+}
+
+proc run_method { method {start 1} {stop 0} {display 0} {run 1} \
+ { outfile stdout } args } {
+ global __debug_on
+ global __debug_print
+ global num_test
+ global parms
+ source ./include.tcl
+
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ }
+ if { $run == 1 } {
+ puts $outfile "run_method: $method $start $stop $args"
+ }
+
+ if {[catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $display } {
+ puts -nonewline $outfile "eval $name $method"
+ puts -nonewline $outfile " $parms($name) $args"
+ puts $outfile " ; verify_dir $testdir \"\" 1"
+ }
+ if { $run } {
+ check_handles $outfile
+ puts $outfile "[timestamp]"
+ eval $name $method $parms($name) $args
+ if { $__debug_print != 0 } {
+ puts $outfile ""
+ }
+ # verify all databases the test leaves behind
+ verify_dir $testdir "" 1
+ if { $__debug_on != 0 } {
+ debug
+ }
+ }
+ flush stdout
+ flush stderr
+ }
+ } res] != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_method: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+}
+
+proc run_rpcmethod { method {start 1} {stop 0} {largs ""} } {
+ global __debug_on
+ global __debug_print
+ global num_test
+ global parms
+ global is_envmethod
+ global rpc_svc
+ source ./include.tcl
+
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ }
+ puts "run_rpcmethod: $method $start $stop $largs"
+
+ set save_largs $largs
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/$rpc_svc -h $rpc_testdir &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
+ -h $rpc_testdir &]
+ }
+ puts "\tRun_rpcmethod.a: starting server, pid $dpid"
+ tclsleep 10
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+
+ set home [file tail $rpc_testdir]
+
+ set is_envmethod 1
+ set use_txn 0
+ if { [string first "txn" $method] != -1 } {
+ set use_txn 1
+ }
+ if { $use_txn == 1 } {
+ if { $start == 1 } {
+ set ntxns 32
+ } else {
+ set ntxns $start
+ }
+ set i 1
+ check_handles
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000} -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set stat [catch {eval txn001_suba $ntxns $env} res]
+ if { $stat == 0 } {
+ set stat [catch {eval txn001_subb $ntxns $env} res]
+ }
+ error_check_good envclose [$env close] 0
+ set stat [catch {eval txn003} res]
+ } else {
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ check_handles
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i]\
+ disabled in testparams.tcl;\
+ skipping."
+ continue
+ }
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ #
+ # Set server cachesize to 1Mb. Otherwise some
+ # tests won't fit (like test084 -btree).
+ #
+ set env [eval {berkdb_env -create -mode 0644 \
+ -home $home -server $rpc_server \
+ -client_timeout 10000 \
+ -cachesize {0 1048576 1}}]
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ eval $name $method $parms($name) $largs
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug
+ }
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ error_check_good envclose [$env close] 0
+ }
+ } res]
+ }
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ tclkill $dpid
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_rpcmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set is_envmethod 0
+ tclkill $dpid
+}
+
+proc run_rpcnoserver { method {start 1} {stop 0} {largs ""} } {
+ global __debug_on
+ global __debug_print
+ global num_test
+ global parms
+ global is_envmethod
+ source ./include.tcl
+
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ }
+ puts "run_rpcnoserver: $method $start $stop $largs"
+
+ set save_largs $largs
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ set home [file tail $rpc_testdir]
+
+ set is_envmethod 1
+ set use_txn 0
+ if { [string first "txn" $method] != -1 } {
+ set use_txn 1
+ }
+ if { $use_txn == 1 } {
+ if { $start == 1 } {
+ set ntxns 32
+ } else {
+ set ntxns $start
+ }
+ set i 1
+ check_handles
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000} -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set stat [catch {eval txn001_suba $ntxns $env} res]
+ if { $stat == 0 } {
+ set stat [catch {eval txn001_subb $ntxns $env} res]
+ }
+ error_check_good envclose [$env close] 0
+ } else {
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ check_handles
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i]\
+ disabled in testparams.tcl;\
+ skipping."
+ continue
+ }
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ #
+ # Set server cachesize to 1Mb. Otherwise some
+ # tests won't fit (like test084 -btree).
+ #
+ set env [eval {berkdb_env -create -mode 0644 \
+ -home $home -server $rpc_server \
+ -client_timeout 10000 \
+ -cachesize {0 1048576 1} }]
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ eval $name $method $parms($name) $largs
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug
+ }
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ error_check_good envclose [$env close] 0
+ }
+ } res]
+ }
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_rpcnoserver: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ set is_envmethod 0
+ }
+
+}
+
+#
+# Run method tests in secure mode.
+#
+proc run_secmethod { method {start 1} {stop 0} {display 0} {run 1} \
+ { outfile stdout } args } {
+ global passwd
+
+ append largs " -encryptaes $passwd "
+ eval run_method $method $start $stop $display $run $outfile $largs
+}
+
+#
+# Run method tests in its own, new secure environment.
+#
+proc run_secenv { method {start 1} {stop 0} {largs ""} } {
+ global __debug_on
+ global __debug_print
+ global is_envmethod
+ global num_test
+ global parms
+ global passwd
+ source ./include.tcl
+
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ }
+ puts "run_secenv: $method $start $stop $largs"
+
+ set save_largs $largs
+ env_cleanup $testdir
+ set is_envmethod 1
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ check_handles
+ set env [eval {berkdb_env -create -mode 0644 \
+ -home $testdir -encryptaes $passwd \
+ -cachesize {0 1048576 1}}]
+ error_check_good env_open [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+
+ #
+ # Run each test multiple times in the secure env.
+ # Once with a secure env + clear database
+ # Once with a secure env + secure database
+ #
+ eval $name $method $parms($name) $largs
+ append largs " -encrypt "
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug
+ }
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ error_check_good envclose [$env close] 0
+ error_check_good envremove [berkdb envremove \
+ -home $testdir -encryptaes $passwd] 0
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_secenv: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ set is_envmethod 0
+ }
+
+}
+
+#
+# Run replication method tests in master and client env.
+#
+proc run_reptest { method test {droppct 0} {nclients 1} {do_del 0} \
+ {do_sec 0} {do_oob 0} {largs "" } } {
+ source ./include.tcl
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global num_test
+ global parms
+ global passwd
+ global rand_init
+
+ berkdb srand $rand_init
+ set c [string index $test 0]
+ if { $c == "s" } {
+ set i [string range $test 1 end]
+ set name [format "subdb%03d" $i]
+ } else {
+ set i $test
+ set name [format "test%03d" $i]
+ }
+ puts "run_reptest: $method $name"
+
+ env_cleanup $testdir
+ set is_envmethod 1
+ set stat [catch {
+ if { $do_sec } {
+ set envargs "-encryptaes $passwd"
+ append largs " -encrypt "
+ } else {
+ set envargs ""
+ }
+ check_handles
+ #
+ # This will set up the master and client envs
+ # and will return us the args to pass to the
+ # test.
+ set largs [repl_envsetup \
+ $envargs $largs $test $nclients $droppct $do_oob]
+
+ puts "[timestamp]"
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i] \
+ disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ puts -nonewline \
+ "Repl: $name: dropping $droppct%, $nclients clients "
+ if { $do_del } {
+ puts -nonewline " with delete verification;"
+ } else {
+ puts -nonewline " no delete verification;"
+ }
+ if { $do_sec } {
+ puts -nonewline " with security;"
+ } else {
+ puts -nonewline " no security;"
+ }
+ if { $do_oob } {
+ puts -nonewline " with out-of-order msgs;"
+ } else {
+ puts -nonewline " no out-of-order msgs;"
+ }
+ puts ""
+
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ repl_envprocq $i $nclients $do_oob
+ repl_envver0 $i $method $nclients
+ if { $do_del } {
+ repl_verdel $i $method $nclients
+ }
+ repl_envclose $i $envargs
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_reptest: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set is_envmethod 0
+}
+
+#
+# Run replication method tests in master and client env.
+#
+proc run_repmethod { method {numcl 0} {start 1} {stop 0} {display 0}
+ {run 1} {outfile stdout} {largs ""} } {
+ source ./include.tcl
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global num_test
+ global parms
+ global passwd
+ global rand_init
+
+ set stopsdb $num_test(sdb)
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ } else {
+ if { $stopsdb > $stop } {
+ set stopsdb $stop
+ }
+ }
+ berkdb srand $rand_init
+
+ #
+ # We want to run replication both normally and with crypto.
+ # So run it once and then run again with crypto.
+ #
+ set save_largs $largs
+ env_cleanup $testdir
+
+ if { $display == 1 } {
+ for { set i $start } { $i <= $stop } { incr i } {
+ puts $outfile "eval run_repmethod $method \
+ 0 $i $i 0 1 stdout $largs"
+ }
+ }
+ if { $run == 1 } {
+ set is_envmethod 1
+ #
+ # Use an array for number of clients because we really don't
+ # want to evenly-weight all numbers of clients. Favor smaller
+ # numbers but test more clients occasionally.
+ set drop_list { 0 0 0 0 0 1 1 5 5 10 20 }
+ set drop_len [expr [llength $drop_list] - 1]
+ set client_list { 1 1 2 1 1 1 2 2 3 1 }
+ set cl_len [expr [llength $client_list] - 1]
+ set stat [catch {
+ for { set i $start } { $i <= $stopsdb } {incr i} {
+ if { $numcl == 0 } {
+ set clindex [berkdb random_int 0 $cl_len]
+ set nclients [lindex $client_list $clindex]
+ } else {
+ set nclients $numcl
+ }
+ set drindex [berkdb random_int 0 $drop_len]
+ set droppct [lindex $drop_list $drindex]
+ set do_sec [berkdb random_int 0 1]
+ set do_oob [berkdb random_int 0 1]
+ set do_del [berkdb random_int 0 1]
+
+ if { $do_sec } {
+ set envargs "-encryptaes $passwd"
+ append largs " -encrypt "
+ } else {
+ set envargs ""
+ }
+ check_handles
+ #
+ # This will set up the master and client envs
+ # and will return us the args to pass to the
+ # test.
+ set largs [repl_envsetup $envargs $largs \
+ $i $nclients $droppct $do_oob]
+
+ puts "[timestamp]"
+ set name [format "subdb%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Subdb%03d $i] \
+ disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ puts -nonewline "Repl: $name: dropping $droppct%, \
+ $nclients clients "
+ if { $do_del } {
+ puts -nonewline " with delete verification;"
+ } else {
+ puts -nonewline " no delete verification;"
+ }
+ if { $do_sec } {
+ puts -nonewline " with security;"
+ } else {
+ puts -nonewline " no security;"
+ }
+ if { $do_oob } {
+ puts -nonewline " with out-of-order msgs;"
+ } else {
+ puts -nonewline " no out-of-order msgs;"
+ }
+ puts ""
+
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ repl_envprocq $i $nclients $do_oob
+ repl_envver0 $i $method $nclients
+ if { $do_del } {
+ repl_verdel $i $method $nclients
+ }
+ repl_envclose $i $envargs
+ set largs $save_largs
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_repmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ if { $numcl == 0 } {
+ set clindex [berkdb random_int 0 $cl_len]
+ set nclients [lindex $client_list $clindex]
+ } else {
+ set nclients $numcl
+ }
+ set drindex [berkdb random_int 0 $drop_len]
+ set droppct [lindex $drop_list $drindex]
+ set do_sec [berkdb random_int 0 1]
+ set do_oob [berkdb random_int 0 1]
+ set do_del [berkdb random_int 0 1]
+
+ if { $do_sec } {
+ set envargs "-encryptaes $passwd"
+ append largs " -encrypt "
+ } else {
+ set envargs ""
+ }
+ check_handles
+ #
+ # This will set up the master and client envs
+ # and will return us the args to pass to the
+ # test.
+ set largs [repl_envsetup $envargs $largs \
+ $i $nclients $droppct $do_oob]
+
+ puts "[timestamp]"
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i] \
+ disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ puts -nonewline "Repl: $name: dropping $droppct%, \
+ $nclients clients "
+ if { $do_del } {
+ puts -nonewline " with delete verification;"
+ } else {
+ puts -nonewline " no delete verification;"
+ }
+ if { $do_sec } {
+ puts -nonewline " with security;"
+ } else {
+ puts -nonewline " no security;"
+ }
+ if { $do_oob } {
+ puts -nonewline " with out-of-order msgs;"
+ } else {
+ puts -nonewline " no out-of-order msgs;"
+ }
+ puts ""
+
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ repl_envprocq $i $nclients $do_oob
+ repl_envver0 $i $method $nclients
+ if { $do_del } {
+ repl_verdel $i $method $nclients
+ }
+ repl_envclose $i $envargs
+ set largs $save_largs
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_repmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set is_envmethod 0
+ }
+}
+
+#
+# Run method tests, each in its own, new environment. (As opposed to
+# run_envmethod1 which runs all the tests in a single environment.)
+#
+proc run_envmethod { method {start 1} {stop 0} {display 0} {run 1} \
+ {outfile stdout } { largs "" } } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global num_test
+ global parms
+ source ./include.tcl
+
+ set stopsdb $num_test(sdb)
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ } else {
+ if { $stopsdb > $stop } {
+ set stopsdb $stop
+ }
+ }
+
+ set save_largs $largs
+ env_cleanup $testdir
+
+ if { $display == 1 } {
+ for { set i $start } { $i <= $stop } { incr i } {
+ puts $outfile "eval run_envmethod $method \
+ $i $i 0 1 stdout $largs"
+ }
+ }
+
+ if { $run == 1 } {
+ set is_envmethod 1
+ #
+ # Run both subdb and normal tests for as long as there are
+ # some of each type. Start with the subdbs:
+ set stat [catch {
+ for { set i $start } { $i <= $stopsdb } {incr i} {
+ check_handles
+ set env [eval {berkdb_env -create -txn \
+ -mode 0644 -home $testdir}]
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ set name [format "subdb%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr \
+ "[format Subdb%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ eval $name $method $parms($name) $largs
+
+ error_check_good envclose [$env close] 0
+ error_check_good envremove [berkdb envremove \
+ -home $testdir] 0
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ # Subdb tests are done, now run through the regular tests:
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ check_handles
+ set env [eval {berkdb_env -create -txn \
+ -mode 0644 -home $testdir}]
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr \
+ "[format Test%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ error_check_good envclose [$env close] 0
+ error_check_good envremove [berkdb envremove \
+ -home $testdir] 0
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set is_envmethod 0
+ }
+}
+
+proc subdb { method {start 1} {stop 0} {display 0} {run 1} \
+ {outfile stdout} args} {
+ global num_test testdir
+ global parms
+
+ for { set i $start } { $i <= $stop } {incr i} {
+ set name [format "subdb%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Subdb%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $display } {
+ puts -nonewline $outfile "eval $name $method"
+ puts -nonewline $outfile " $parms($name) $args;"
+ puts $outfile "verify_dir $testdir \"\" 1"
+ }
+ if { $run } {
+ check_handles $outfile
+ eval $name $method $parms($name) $args
+ verify_dir $testdir "" 1
+ }
+ flush stdout
+ flush stderr
+ }
+}
+
+proc run_recd { method {start 1} {stop 0} {run 1} {display 0} args } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global parms
+ global num_test
+ global log_log_record_types
+ source ./include.tcl
+
+ if { $stop == 0 } {
+ set stop $num_test(recd)
+ }
+ if { $run == 1 } {
+ puts "run_recd: $method $start $stop $args"
+ }
+
+ if {[catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ set name [format "recd%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Recd%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $display } {
+ puts "eval $name $method $parms($name) $args"
+ }
+ if { $run } {
+ check_handles
+ puts "[timestamp]"
+ # By redirecting stdout to stdout, we make exec
+ # print output rather than simply returning it.
+ # By redirecting stderr to stdout too, we make
+ # sure everything winds up in the ALL.OUT file.
+ set ret [catch { exec $tclsh_path << \
+ "source $test_path/test.tcl; \
+ set log_log_record_types \
+ $log_log_record_types; eval $name \
+ $method $parms($name) $args" \
+ >&@ stdout
+ } res]
+
+ # Don't die if the test failed; we want
+ # to just proceed.
+ if { $ret != 0 } {
+ puts "FAIL:[timestamp] $res"
+ }
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ }
+ }
+ } res] != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_recd: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+}
+
+proc run_recds { {run 1} {display 0} args } {
+ global log_log_record_types
+
+ set log_log_record_types 1
+ logtrack_init
+ foreach method \
+ "btree rbtree hash queue queueext recno frecno rrecno" {
+ check_handles
+ if { [catch {eval \
+ run_recd -$method 1 0 $run $display $args} ret ] != 0 } {
+ puts $ret
+ }
+ }
+ if { $run } {
+ logtrack_summary
+ }
+ set log_log_record_types 0
+}
+
+proc run_all { args } {
+ global num_test
+ source ./include.tcl
+
+ fileremove -f ALL.OUT
+
+ set exflgs [eval extractflags $args]
+ set flags [lindex $exflgs 1]
+ set display 1
+ set run 1
+ set am_only 0
+ set parallel 0
+ set nparalleltests 0
+ set rflags {--}
+ foreach f $flags {
+ switch $f {
+ m {
+ set am_only 1
+ }
+ n {
+ set display 1
+ set run 0
+ set rflags [linsert $rflags 0 "-n"]
+ }
+ }
+ }
+
+ set o [open ALL.OUT a]
+ if { $run == 1 } {
+ puts -nonewline "Test suite run started at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts [berkdb version -string]
+
+ puts -nonewline $o "Test suite run started at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ puts $o [berkdb version -string]
+ }
+ close $o
+ #
+ # First run standard tests. Send in a -A to let run_std know
+ # that it is part of the "run_all" run, so that it doesn't
+ # print out start/end times.
+ #
+ lappend args -A
+ eval {run_std} $args
+
+ set test_pagesizes [get_test_pagesizes]
+ set args [lindex $exflgs 0]
+ set save_args $args
+
+ foreach pgsz $test_pagesizes {
+ set args $save_args
+ append args " -pagesize $pgsz -chksum"
+ if { $am_only == 0 } {
+ # Run recovery tests.
+ #
+ # XXX These don't actually work at multiple pagesizes;
+ # disable them for now.
+ #
+ # XXX These too are broken into separate tclsh
+ # instantiations so we don't require so much
+ # memory, but I think it's cleaner
+ # and more useful to do it down inside proc r than here,
+ # since "r recd" gets done a lot and needs to work.
+ #
+ # XXX See comment in run_std for why this only directs
+ # stdout and not stderr. Don't worry--the right stuff
+ # happens.
+ #puts "Running recovery tests with pagesize $pgsz"
+ #if [catch {exec $tclsh_path \
+ # << "source $test_path/test.tcl; \
+ # r $rflags recd $args" \
+ # 2>@ stderr >> ALL.OUT } res] {
+ # set o [open ALL.OUT a]
+ # puts $o "FAIL: recd test:"
+ # puts $o $res
+ # close $o
+ #}
+ }
+
+ # Access method tests.
+ #
+ # XXX
+ # Broken up into separate tclsh instantiations so
+ # we don't require so much memory.
+ foreach i \
+ "btree rbtree hash queue queueext recno frecno rrecno" {
+ puts "Running $i tests with pagesize $pgsz"
+ for { set j 1 } { $j <= $num_test(test) } {incr j} {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ eval {run_method -$i $j $j $display \
+ $run $o} $args
+ close $o
+ }
+ if { $run } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ eval {run_method -$i $j $j \
+ $display $run stdout} $args" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o \
+ "FAIL: [format \
+ "test%03d" $j] $i"
+ close $o
+ }
+ }
+ }
+
+ #
+ # Run subdb tests with varying pagesizes too.
+ #
+ for { set j 1 } { $j <= $num_test(sdb) } {incr j} {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ eval {subdb -$i $j $j $display \
+ $run $o} $args
+ close $o
+ }
+ if { $run == 1 } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ eval {subdb -$i $j $j $display \
+ $run stdout} $args" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: subdb -$i $j $j"
+ close $o
+ }
+ }
+ }
+ }
+ }
+ set args $save_args
+ #
+ # Run access method tests at default page size in one env.
+ #
+ foreach i "btree rbtree hash queue queueext recno frecno rrecno" {
+ puts "Running $i tests in a txn env"
+ for { set j 1 } { $j <= $num_test(test) } { incr j } {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ run_envmethod -$i $j $j $display \
+ $run $o $args
+ close $o
+ }
+ if { $run } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ run_envmethod -$i $j $j \
+ $display $run stdout $args" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o \
+ "FAIL: run_envmethod $i $j $j"
+ close $o
+ }
+ }
+ }
+ }
+ #
+ # Run tests using proc r. The replication tests have been
+ # moved from run_std to run_all.
+ #
+ set test_list {
+ {"replication" "rep"}
+ {"security" "sec"}
+ }
+ #
+ # If configured for RPC, then run rpc tests too.
+ #
+ if { [file exists ./berkeley_db_svc] ||
+ [file exists ./berkeley_db_cxxsvc] ||
+ [file exists ./berkeley_db_javasvc] } {
+ append test_list {{"RPC" "rpc"}}
+ }
+
+ foreach pair $test_list {
+ set msg [lindex $pair 0]
+ set cmd [lindex $pair 1]
+ puts "Running $msg tests"
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ r $rflags $cmd $args" >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: $cmd test"
+ close $o
+ }
+ }
+
+ # If not actually running, no need to check for failure.
+ if { $run == 0 } {
+ return
+ }
+
+ set failed 0
+ set o [open ALL.OUT r]
+ while { [gets $o line] >= 0 } {
+ if { [regexp {^FAIL} $line] != 0 } {
+ set failed 1
+ }
+ }
+ close $o
+ set o [open ALL.OUT a]
+ if { $failed == 0 } {
+ puts "Regression Tests Succeeded"
+ puts $o "Regression Tests Succeeded"
+ } else {
+ puts "Regression Tests Failed; see ALL.OUT for log"
+ puts $o "Regression Tests Failed"
+ }
+
+ puts -nonewline "Test suite run completed at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts -nonewline $o "Test suite run completed at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ close $o
+}
+
+#
+# Run method tests in one environment. (As opposed to run_envmethod
+# which runs each test in its own, new environment.)
+#
+proc run_envmethod1 { method {start 1} {stop 0} {display 0} {run 1} \
+ { outfile stdout } args } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global num_test
+ global parms
+ source ./include.tcl
+
+ set stopsdb $num_test(sdb)
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ } else {
+ if { $stopsdb > $stop } {
+ set stopsdb $stop
+ }
+ }
+ if { $run == 1 } {
+ puts "run_envmethod1: $method $start $stop $args"
+ }
+
+ set is_envmethod 1
+ if { $run == 1 } {
+ check_handles
+ env_cleanup $testdir
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+ set env [eval {berkdb_env -create -cachesize {0 10000000 0}} \
+ {-mode 0644 -home $testdir}]
+ error_check_good env_open [is_valid_env $env] TRUE
+ append largs " -env $env "
+ }
+
+ if { $display } {
+ # The envmethod1 tests can't be split up, since they share
+ # an env.
+ puts $outfile "eval run_envmethod1 $method $args"
+ }
+
+ set stat [catch {
+ for { set i $start } { $i <= $stopsdb } {incr i} {
+ set name [format "subdb%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Subdb%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $run } {
+ puts $outfile "[timestamp]"
+ eval $name $method $parms($name) $largs
+ if { $__debug_print != 0 } {
+ puts $outfile ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ }
+ flush stdout
+ flush stderr
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $run } {
+ puts $outfile "[timestamp]"
+ eval $name $method $parms($name) $largs
+ if { $__debug_print != 0 } {
+ puts $outfile ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ }
+ flush stdout
+ flush stderr
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod1: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ if { $run == 1 } {
+ error_check_good envclose [$env close] 0
+ check_handles $outfile
+ }
+ set is_envmethod 0
+
+}
+
+# We want to test all of 512b, 8Kb, and 64Kb pages, but chances are one
+# of these is the default pagesize. We don't want to run all the AM tests
+# twice, so figure out what the default page size is, then return the
+# other two.
+proc get_test_pagesizes { } {
+ # Create an in-memory database.
+ set db [berkdb_open -create -btree]
+ error_check_good gtp_create [is_valid_db $db] TRUE
+ set statret [$db stat]
+ set pgsz 0
+ foreach pair $statret {
+ set fld [lindex $pair 0]
+ if { [string compare $fld {Page size}] == 0 } {
+ set pgsz [lindex $pair 1]
+ }
+ }
+
+ error_check_good gtp_close [$db close] 0
+
+ error_check_bad gtp_pgsz $pgsz 0
+ switch $pgsz {
+ 512 { return {8192 32768} }
+ 8192 { return {512 32768} }
+ 32768 { return {512 8192} }
+ default { return {512 8192 32768} }
+ }
+ error_check_good NOTREACHED 0 1
+}
diff --git a/libdb/test/test001.tcl b/libdb/test/test001.tcl
new file mode 100644
index 0000000..31fe437
--- /dev/null
+++ b/libdb/test/test001.tcl
@@ -0,0 +1,247 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test001
+# TEST Small keys/data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc test001 { method {nentries 10000} {start 0} {tnum "01"} {noclean 0} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ # If we are not using an external env, then test setting
+ # the database cache size and using multiple caches.
+ set txnenv 0
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ append args " -cachesize {0 1048576 3} "
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test0$tnum: $method ($args) $nentries equal key/data pairs"
+ if { $start != 0 } {
+ # Sadly enough, we are using start in two different ways.
+ # In test090, it is used to test really big records numbers
+ # in queue. In replication, it is used to be able to run
+ # different iterations of this test using different key/data
+ # pairs. We try to hide all that magic here.
+ puts "\tStarting at $start"
+
+ if { $tnum != 90 } {
+ set did [open $dict]
+ for { set nlines 0 } { [gets $did str] != -1 } \
+ { incr nlines} {
+ }
+ close $did
+ if { $start + $nentries > $nlines } {
+ set start [expr $nlines - $nentries]
+ }
+ }
+ }
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ if { $noclean == 0 } {
+ cleanup $testdir $env
+ }
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test001_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test001.check
+ }
+ puts "\tTest0$tnum.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ if { $start != 0 && $tnum != 90 } {
+ # Skip over "start" entries
+ for { set count 0 } { $count < $start } { incr count } {
+ gets $did str
+ }
+ set count 0
+ }
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1 + $start]
+ if { 0xffffffff > 0 && $key > 0xffffffff } {
+ set key [expr $key - 0x100000000]
+ }
+ if { $key == 0 || $key - 0xffffffff == 1 } {
+ incr key
+ incr count
+ }
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ set str [reverse $str]
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ if { $count % 50 == 0 } {
+ error_check_good txn_checkpoint($count) \
+ [$env txn_checkpoint] 0
+ }
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+
+ # Test DB_GET_BOTH for success
+ set ret [$db get -get_both $key [pad_data $method $str]]
+ error_check_good \
+ getboth $ret [list [list $key [pad_data $method $str]]]
+
+ # Test DB_GET_BOTH for failure
+ set ret [$db get -get_both $key [pad_data $method BAD$str]]
+ error_check_good getbothBAD [llength $ret] 0
+
+ incr count
+ }
+ close $did
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: dump file"
+ dump_file $db $txn $t1 $checkfunc
+ #
+ # dump_file should just have been "get" calls, so
+ # aborting a get should really be a no-op. Abort
+ # just for the fun of it.
+ if { $txnenv == 1 } {
+ error_check_good txn [$t abort] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ # If this is test 90, we're checking wrap and we really
+ # only added nentries number of items starting at start.
+ # However, if this isn't 90, then we started at start and
+ # added an addition nentries number of items.
+ if { $tnum == 90 } {
+ for {set i 1} {$i <= $nentries} {incr i} {
+ set j [expr $i + $start]
+ if { 0xffffffff > 0 && $j > 0xffffffff } {
+ set j [expr $j - 0x100000000]
+ }
+ if { $j == 0 } {
+ incr i
+ incr j
+ }
+ puts $oid $j
+ }
+ } else {
+ for { set i 1 } { $i <= $nentries + $start } {incr i} {
+ puts $oid $i
+ }
+ }
+ close $oid
+ } else {
+ set q q
+ # We assume that when this is used with start != 0, the
+ # test database accumulates data
+ filehead [expr $nentries + $start] $dict $t2
+ }
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tTest0$tnum.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next"
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Test0$tnum:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest0$tnum.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction "-last" "-prev"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test001; keys and data are identical
+proc test001.check { key data } {
+ error_check_good "key/data mismatch" $data [reverse $key]
+}
+
+proc test001_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/libdb/test/test002.tcl b/libdb/test/test002.tcl
new file mode 100644
index 0000000..f227880
--- /dev/null
+++ b/libdb/test/test002.tcl
@@ -0,0 +1,161 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test002
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and a fixed, medium length data string;
+# TEST retrieve each. After all are entered, retrieve all; compare output
+# TEST to original. Close file, reopen, do retrieve and re-verify.
+
+proc test002 { method {nentries 10000} args } {
+ global datastr
+ global pad_datastr
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test002.db
+ set env NULL
+ } else {
+ set testfile test002.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ # Create the database and open the dictionary
+ puts "Test002: $method ($args) $nentries key <fixed data> pairs"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+
+ if { [is_record_based $method] == 1 } {
+ append gflags "-recno"
+ }
+ set pad_datastr [pad_data $method $datastr]
+ puts "\tTest002.a: put/get loop"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+
+ error_check_good get $ret [list [list $key [pad_data $method $datastr]]]
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest002.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test002.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ }
+ filesort $t1 $t3
+
+ error_check_good Test002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest002.c: close, open, and dump file"
+ open_and_dump_file $testfile $env $t1 test002.check \
+ dump_file_direction "-first" "-next"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest002.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 test002.check \
+ dump_file_direction "-last" "-prev"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test002; data should be fixed are identical
+proc test002.check { key data } {
+ global pad_datastr
+ error_check_good "data mismatch for key $key" $data $pad_datastr
+}
diff --git a/libdb/test/test003.tcl b/libdb/test/test003.tcl
new file mode 100644
index 0000000..fac89d6
--- /dev/null
+++ b/libdb/test/test003.tcl
@@ -0,0 +1,210 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test003
+# TEST Small keys/large data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Take the source files and dbtest executable and enter their names
+# TEST as the key with their contents as data. After all are entered,
+# TEST retrieve all; compare output to original. Close file, reopen, do
+# TEST retrieve and re-verify.
+proc test003 { method args} {
+ global names
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if {[is_fixed_length $method] == 1} {
+ puts "Test003 skipping for method $method"
+ return
+ }
+ puts "Test003: $method ($args) filename=key filecontents=data pairs"
+
+ # Create the database and open the dictionary
+ set limit 0
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test003.db
+ set env NULL
+ } else {
+ set testfile test003.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ set limit 100
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set pflags ""
+ set gflags ""
+ set txn ""
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test003_recno.check
+ append gflags "-recno"
+ } else {
+ set checkfunc test003.check
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list]
+ if { $limit } {
+ if { [llength $file_list] > $limit } {
+ set file_list [lrange $file_list 1 $limit]
+ }
+ }
+ set len [llength $file_list]
+ puts "\tTest003.a: put/get loop $len entries"
+ set count 0
+ foreach f $file_list {
+ if { [string compare [file type $f] "file"] != 0 } {
+ continue
+ }
+
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set names([expr $count + 1]) $f
+ } else {
+ set key $f
+ }
+
+ # Should really catch errors
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set data [read $fid]
+ close $fid
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Should really catch errors
+ set fid [open $t4 w]
+ fconfigure $fid -translation binary
+ if [catch {eval {$db get} $gflags {$key}} data] {
+ puts -nonewline $fid $data
+ } else {
+ # Data looks like {{key data}}
+ set key [lindex [lindex $data 0] 0]
+ set data [lindex [lindex $data 0] 1]
+ puts -nonewline $fid [pad_data $method $data]
+ }
+ close $fid
+
+ error_check_good \
+ Test003:diff($f,$t4) [filecmp $f $t4] 0
+
+ incr count
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest003.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_bin_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the entries in the
+ # current directory
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $count} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set oid [open $t2.tmp w]
+ foreach f $file_list {
+ if { [string compare [file type $f] "file"] != 0 } {
+ continue
+ }
+ puts $oid $f
+ }
+ close $oid
+ filesort $t2.tmp $t2
+ fileremove $t2.tmp
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test003:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest003.c: close, open, and dump file"
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_bin_file_direction "-first" "-next"
+
+ if { [is_record_based $method] == 1 } {
+ filesort $t1 $t3 -n
+ }
+
+ error_check_good \
+ Test003:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest003.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_bin_file_direction "-last" "-prev"
+
+ if { [is_record_based $method] == 1 } {
+ filesort $t1 $t3 -n
+ }
+
+ error_check_good \
+ Test003:diff($t3,$t2) [filecmp $t3 $t2] 0
+}
+
+# Check function for test003; key should be file name; data should be contents
+proc test003.check { binfile tmpfile } {
+ source ./include.tcl
+
+ error_check_good Test003:datamismatch($binfile,$tmpfile) \
+ [filecmp $binfile $tmpfile] 0
+}
+proc test003_recno.check { binfile tmpfile } {
+ global names
+ source ./include.tcl
+
+ set fname $names($binfile)
+ error_check_good key"$binfile"_exists [info exists names($binfile)] 1
+ error_check_good Test003:datamismatch($fname,$tmpfile) \
+ [filecmp $fname $tmpfile] 0
+}
diff --git a/libdb/test/test004.tcl b/libdb/test/test004.tcl
new file mode 100644
index 0000000..fcf0214
--- /dev/null
+++ b/libdb/test/test004.tcl
@@ -0,0 +1,169 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test004
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Sequential (cursor) get/delete
+# TEST
+# TEST Check that cursor operations work. Create a database.
+# TEST Read through the database sequentially using cursors and
+# TEST delete each element.
+proc test004 { method {nentries 10000} {reopen 4} {build_only 0} args} {
+ source ./include.tcl
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set tnum test00$reopen
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/$tnum.db
+ set env NULL
+ } else {
+ set testfile $tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ puts -nonewline "$tnum:\
+ $method ($args) $nentries delete small key; medium data pairs"
+ if {$reopen == 5} {
+ puts "(with close)"
+ } else {
+ puts ""
+ }
+
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ set kvals ""
+ puts "\tTest00$reopen.a: put/get loop"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ lappend kvals $str
+ } else {
+ set key $str
+ }
+
+ set datastr [ make_data_str $str ]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags \
+ {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good "$tnum:put" $ret \
+ [list [list $key [pad_data $method $datastr]]]
+ incr count
+ }
+ close $did
+ if { $build_only == 1 } {
+ return $db
+ }
+ if { $reopen == 5 } {
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+ puts "\tTest00$reopen.b: get/delete loop"
+ # Now we will get each key from the DB and compare the results
+ # to the original, then delete it.
+ set outf [open $t1 w]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set c [eval {$db cursor} $txn]
+
+ set count 0
+ for {set d [$c get -first] } { [llength $d] != 0 } {
+ set d [$c get -next] } {
+ set k [lindex [lindex $d 0] 0]
+ set d2 [lindex [lindex $d 0] 1]
+ if { [is_record_based $method] == 1 } {
+ set datastr \
+ [make_data_str [lindex $kvals [expr $k - 1]]]
+ } else {
+ set datastr [make_data_str $k]
+ }
+ error_check_good $tnum:$k $d2 [pad_data $method $datastr]
+ puts $outf $k
+ $c del
+ if { [is_record_based $method] == 1 && \
+ $do_renumber == 1 } {
+ set kvals [lreplace $kvals 0 0]
+ }
+ incr count
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ error_check_good test00$reopen:keys_deleted $count $nentries
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ error_check_good Test00$reopen:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ }
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/test005.tcl b/libdb/test/test005.tcl
new file mode 100644
index 0000000..fe702bc
--- /dev/null
+++ b/libdb/test/test005.tcl
@@ -0,0 +1,19 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test005
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Close, reopen
+# TEST Sequential (cursor) get/delete
+# TEST
+# TEST Check that cursor operations work. Create a database; close
+# TEST it and reopen it. Then read through the database sequentially
+# TEST using cursors and delete each element.
+proc test005 { method {nentries 10000} args } {
+ eval {test004 $method $nentries 5 0} $args
+}
diff --git a/libdb/test/test006.tcl b/libdb/test/test006.tcl
new file mode 100644
index 0000000..851bdac
--- /dev/null
+++ b/libdb/test/test006.tcl
@@ -0,0 +1,150 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test006
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Keyed delete and verify
+# TEST
+# TEST Keyed delete test.
+# TEST Create database.
+# TEST Go through database, deleting all entries by key.
+proc test006 { method {nentries 10000} {reopen 0} {tnum 6} args} {
+ source ./include.tcl
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { $tnum < 10 } {
+ set tname Test00$tnum
+ set dbname test00$tnum
+ } else {
+ set tname Test0$tnum
+ set dbname test0$tnum
+ }
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/$dbname.db
+ set env NULL
+ } else {
+ set testfile $dbname.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts -nonewline "$tname: $method ($args) "
+ puts -nonewline "$nentries equal small key; medium data pairs"
+ if {$reopen == 1} {
+ puts " (with close)"
+ } else {
+ puts ""
+ }
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ # Here is the loop where we put and get each key/data pair
+
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1 ]
+ } else {
+ set key $str
+ }
+
+ set datastr [make_data_str $str]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good "$tname: put $datastr got $ret" \
+ $ret [list [list $key [pad_data $method $datastr]]]
+ incr count
+ }
+ close $did
+
+ if { $reopen == 1 } {
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original, then delete it.
+ set count 0
+ set did [open $dict]
+ set key 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { $do_renumber == 1 } {
+ set key 1
+ } elseif { [is_record_based $method] == 1 } {
+ incr key
+ } else {
+ set key $str
+ }
+
+ set datastr [make_data_str $str]
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good "$tname: get $datastr got $ret" \
+ $ret [list [list $key [pad_data $method $datastr]]]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_del:$key $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/test007.tcl b/libdb/test/test007.tcl
new file mode 100644
index 0000000..f61812c
--- /dev/null
+++ b/libdb/test/test007.tcl
@@ -0,0 +1,19 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test007
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Close, reopen
+# TEST Keyed delete
+# TEST
+# TEST Check that delete operations work. Create a database; close
+# TEST database and reopen it. Then issues delete by key for each
+# TEST entry.
+proc test007 { method {nentries 10000} {tnum 7} args} {
+ eval {test006 $method $nentries 1 $tnum} $args
+}
diff --git a/libdb/test/test008.tcl b/libdb/test/test008.tcl
new file mode 100644
index 0000000..ab3f4d6
--- /dev/null
+++ b/libdb/test/test008.tcl
@@ -0,0 +1,200 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test008
+# TEST Small keys/large data
+# TEST Put/get per key
+# TEST Loop through keys by steps (which change)
+# TEST ... delete each key at step
+# TEST ... add each key back
+# TEST ... change step
+# TEST Confirm that overflow pages are getting reused
+# TEST
+# TEST Take the source files and dbtest executable and enter their names as
+# TEST the key with their contents as data. After all are entered, begin
+# TEST looping through the entries; deleting some pairs and then readding them.
+proc test008 { method {reopen 8} {debug 0} args} {
+ source ./include.tcl
+
+ set tnum test00$reopen
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 } {
+ puts "Test00$reopen skipping for method $method"
+ return
+ }
+
+ puts -nonewline "$tnum: $method filename=key filecontents=data pairs"
+ if {$reopen == 9} {
+ puts "(with close)"
+ } else {
+ puts ""
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/$tnum.db
+ set env NULL
+ } else {
+ set testfile $tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list]
+
+ set count 0
+ puts "\tTest00$reopen.a: Initial put/get loop"
+ foreach f $file_list {
+ set names($count) $f
+ set key $f
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ put_file $db $txn $pflags $f
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ get_file $db $txn $gflags $f $t4
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good Test00$reopen:diff($f,$t4) \
+ [filecmp $f $t4] 0
+
+ incr count
+ }
+
+ if {$reopen == 9} {
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ # Now we will get step through keys again (by increments) and
+ # delete all the entries, then re-insert them.
+
+ puts "\tTest00$reopen.b: Delete re-add loop"
+ foreach i "1 2 4 8 16" {
+ for {set ndx 0} {$ndx < $count} { incr ndx $i} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db del} $txn {$names($ndx)}]
+ error_check_good db_del:$names($ndx) $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ for {set ndx 0} {$ndx < $count} { incr ndx $i} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ put_file $db $txn $pflags $names($ndx)
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ }
+
+ if {$reopen == 9} {
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ # Now, reopen the file and make sure the key/data pairs look right.
+ puts "\tTest00$reopen.c: Dump contents forward"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_bin_file $db $txn $t1 test008.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set oid [open $t2.tmp w]
+ foreach f $file_list {
+ puts $oid $f
+ }
+ close $oid
+ filesort $t2.tmp $t2
+ fileremove $t2.tmp
+ filesort $t1 $t3
+
+ error_check_good Test00$reopen:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest00$reopen.d: Dump contents backward"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_bin_file_direction $db $txn $t1 test008.check "-last" "-prev"
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ filesort $t1 $t3
+
+ error_check_good Test00$reopen:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ error_check_good close:$db [$db close] 0
+}
+
+proc test008.check { binfile tmpfile } {
+ global tnum
+ source ./include.tcl
+
+ error_check_good diff($binfile,$tmpfile) \
+ [filecmp $binfile $tmpfile] 0
+}
diff --git a/libdb/test/test009.tcl b/libdb/test/test009.tcl
new file mode 100644
index 0000000..7a7c126
--- /dev/null
+++ b/libdb/test/test009.tcl
@@ -0,0 +1,18 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test009
+# TEST Small keys/large data
+# TEST Same as test008; close and reopen database
+# TEST
+# TEST Check that we reuse overflow pages. Create database with lots of
+# TEST big key/data pairs. Go through and delete and add keys back
+# TEST randomly. Then close the DB and make sure that we have everything
+# TEST we think we should.
+proc test009 { method args} {
+ eval {test008 $method 9 0} $args
+}
diff --git a/libdb/test/test010.tcl b/libdb/test/test010.tcl
new file mode 100644
index 0000000..f308c2c
--- /dev/null
+++ b/libdb/test/test010.tcl
@@ -0,0 +1,176 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test010
+# TEST Duplicate test
+# TEST Small key/data pairs.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; add duplicate records for each.
+# TEST After all are entered, retrieve all; verify output.
+# TEST Close file, reopen, do retrieve and re-verify.
+# TEST This does not work for recno
+proc test010 { method {nentries 10000} {ndups 5} {tnum 10} args } {
+ source ./include.tcl
+
+ set omethod $method
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test0$tnum: $method ($args) $nentries \
+ small $ndups dup key/data pairs"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644 -dup} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Now retrieve all the keys matching this key
+ set x 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ for {set ret [$dbc get "-set" $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get "-next"] } {
+ if {[llength $ret] == 0} {
+ break
+ }
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ error_check_good "Test0$tnum:get" $d $str
+ set id [ id_of $datastr ]
+ error_check_good "Test0$tnum:dup#" $id $x
+ incr x
+ }
+ error_check_good "Test0$tnum:ndups:$str" [expr $x - 1] $ndups
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.a: Checking file for correct duplicates"
+ set dlist ""
+ for { set i 1 } { $i <= $ndups } {incr i} {
+ lappend dlist $i
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now compare the keys to see if they match the dictionary entries
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.b: Checking file for correct duplicates after close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now compare the keys to see if they match the dictionary entries
+ filesort $t1 $t3
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/test011.tcl b/libdb/test/test011.tcl
new file mode 100644
index 0000000..f25ed43
--- /dev/null
+++ b/libdb/test/test011.tcl
@@ -0,0 +1,470 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test011
+# TEST Duplicate test
+# TEST Small key/data pairs.
+# TEST Test DB_KEYFIRST, DB_KEYLAST, DB_BEFORE and DB_AFTER.
+# TEST To test off-page duplicates, run with small pagesize.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; add duplicate records for each.
+# TEST Then do some key_first/key_last add_before, add_after operations.
+# TEST This does not work for recno
+# TEST
+# TEST To test if dups work when they fall off the main page, run this with
+# TEST a very tiny page size.
+proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
+ global dlist
+ global rand_init
+ source ./include.tcl
+
+ set dlist ""
+
+ if { [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+ if { [is_record_based $method] == 1 } {
+ test011_recno $method $nentries $tnum $args
+ return
+ }
+ if {$ndups < 5} {
+ set ndups 5
+ }
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ berkdb srand $rand_init
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+
+ puts -nonewline "Test0$tnum: $method $nentries small $ndups dup "
+ puts "key/data pairs, cursor ops"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create \
+ -mode 0644} [concat $args "-dup"] {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ # We will add dups with values 1, 3, ... $ndups. Then we'll add
+ # 0 and $ndups+1 using keyfirst/keylast. We'll add 2 and 4 using
+ # add before and add after.
+ puts "\tTest0$tnum.a: put and get duplicate keys."
+ set i ""
+ for { set i 1 } { $i <= $ndups } { incr i 2 } {
+ lappend dlist $i
+ }
+ set maxodd $i
+ while { [gets $did str] != -1 && $count < $nentries } {
+ for { set i 1 } { $i <= $ndups } { incr i 2 } {
+ set datastr $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags {$str $datastr}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Now retrieve all the keys matching this key
+ set x 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ for {set ret [$dbc get "-set" $str ]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get "-next"] } {
+ if {[llength $ret] == 0} {
+ break
+ }
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+
+ error_check_good Test0$tnum:put $d $str
+ set id [ id_of $datastr ]
+ error_check_good Test0$tnum:dup# $id $x
+ incr x 2
+ }
+ error_check_good Test0$tnum:numdups $x $maxodd
+ error_check_good curs_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: \
+ traverse entire file checking duplicates before close."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now compare the keys to see if they match the dictionary entries
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.c: \
+ traverse entire file checking duplicates after close."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now compare the keys to see if they match the dictionary entries
+ filesort $t1 $t3
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tTest0$tnum.d: Testing key_first functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ add_dup $db $txn $nentries "-keyfirst" 0 0
+ set dlist [linsert $dlist 0 0]
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ puts "\tTest0$tnum.e: Testing key_last functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ add_dup $db $txn $nentries "-keylast" [expr $maxodd - 1] 0
+ lappend dlist [expr $maxodd - 1]
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ puts "\tTest0$tnum.f: Testing add_before functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ add_dup $db $txn $nentries "-before" 2 3
+ set dlist [linsert $dlist 2 2]
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ puts "\tTest0$tnum.g: Testing add_after functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ add_dup $db $txn $nentries "-after" 4 4
+ set dlist [linsert $dlist 4 4]
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good db_close [$db close] 0
+}
+
+proc add_dup {db txn nentries flag dataval iter} {
+ source ./include.tcl
+
+ set dbc [eval {$db cursor} $txn]
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set datastr $dataval:$str
+ set ret [$dbc get "-set" $str]
+ error_check_bad "cget(SET)" [is_substr $ret Error] 1
+ for { set i 1 } { $i < $iter } { incr i } {
+ set ret [$dbc get "-next"]
+ error_check_bad "cget(NEXT)" [is_substr $ret Error] 1
+ }
+
+ if { [string compare $flag "-before"] == 0 ||
+ [string compare $flag "-after"] == 0 } {
+ set ret [$dbc put $flag $datastr]
+ } else {
+ set ret [$dbc put $flag $str $datastr]
+ }
+ error_check_good "$dbc put $flag" $ret 0
+ incr count
+ }
+ close $did
+ $dbc close
+}
+
+proc test011_recno { method {nentries 10000} {tnum 11} largs } {
+ global dlist
+ source ./include.tcl
+
+ set largs [convert_args $method $largs]
+ set omethod [convert_method $method]
+ set renum [is_rrecno $method]
+
+ puts "Test0$tnum: \
+ $method ($largs) $nentries test cursor insert functionality"
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $largs "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $largs $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append largs " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ if {$renum == 1} {
+ append largs " -renumber"
+ }
+ set db [eval {berkdb_open \
+ -create -mode 0644} $largs {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # The basic structure of the test is that we pick a random key
+ # in the database and then add items before, after, ?? it. The
+ # trickiness is that with RECNO, these are not duplicates, they
+ # are creating new keys. Therefore, every time we do this, the
+ # keys assigned to other values change. For this reason, we'll
+ # keep the database in tcl as a list and insert properly into
+ # it to verify that the right thing is happening. If we do not
+ # have renumber set, then the BEFORE and AFTER calls should fail.
+
+ # Seed the database with an initial record
+ gets $did str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {1 [chop_data $method $str]}]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good put $ret 0
+ set count 1
+
+ set dlist "NULL $str"
+
+ # Open a cursor
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ puts "\tTest0$tnum.a: put and get entries"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # Pick a random key
+ set key [berkdb random_int 1 $count]
+ set ret [$dbc get -set $key]
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good cget:SET:key $k $key
+ error_check_good \
+ cget:SET $d [pad_data $method [lindex $dlist $key]]
+
+ # Current
+ set ret [$dbc put -current [chop_data $method $str]]
+ error_check_good cput:$key $ret 0
+ set dlist [lreplace $dlist $key $key [pad_data $method $str]]
+
+ # Before
+ if { [gets $did str] == -1 } {
+ continue;
+ }
+
+ if { $renum == 1 } {
+ set ret [$dbc put \
+ -before [chop_data $method $str]]
+ error_check_good cput:$key:BEFORE $ret $key
+ set dlist [linsert $dlist $key $str]
+ incr count
+
+ # After
+ if { [gets $did str] == -1 } {
+ continue;
+ }
+ set ret [$dbc put \
+ -after [chop_data $method $str]]
+ error_check_good cput:$key:AFTER $ret [expr $key + 1]
+ set dlist [linsert $dlist [expr $key + 1] $str]
+ incr count
+ }
+
+ # Now verify that the keys are in the right place
+ set i 0
+ for {set ret [$dbc get "-set" $key]} \
+ {[string length $ret] != 0 && $i < 3} \
+ {set ret [$dbc get "-next"] } {
+ set check_key [expr $key + $i]
+
+ set k [lindex [lindex $ret 0] 0]
+ error_check_good cget:$key:loop $k $check_key
+
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good cget:data $d \
+ [pad_data $method [lindex $dlist $check_key]]
+ incr i
+ }
+ }
+ close $did
+ error_check_good cclose [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Create check key file.
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $count} {incr i} {
+ puts $oid $i
+ }
+ close $oid
+
+ puts "\tTest0$tnum.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test011_check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good Test0$tnum:diff($t2,$t1) \
+ [filecmp $t2 $t1] 0
+
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.c: close, open, and dump file"
+ open_and_dump_file $testfile $env $t1 test011_check \
+ dump_file_direction "-first" "-next"
+ error_check_good Test0$tnum:diff($t2,$t1) \
+ [filecmp $t2 $t1] 0
+
+ puts "\tTest0$tnum.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 test011_check \
+ dump_file_direction "-last" "-prev"
+
+ filesort $t1 $t3 -n
+ error_check_good Test0$tnum:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+}
+
+proc test011_check { key data } {
+ global dlist
+
+ error_check_good "get key $key" $data [lindex $dlist $key]
+}
diff --git a/libdb/test/test012.tcl b/libdb/test/test012.tcl
new file mode 100644
index 0000000..0ce95c0
--- /dev/null
+++ b/libdb/test/test012.tcl
@@ -0,0 +1,139 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test012
+# TEST Large keys/small data
+# TEST Same as test003 except use big keys (source files and
+# TEST executables) and small data (the file/executable names).
+# TEST
+# TEST Take the source files and dbtest executable and enter their contents
+# TEST as the key with their names as data. After all are entered, retrieve
+# TEST all; compare output to original. Close file, reopen, do retrieve and
+# TEST re-verify.
+proc test012 { method args} {
+ global names
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 } {
+ puts "Test012 skipping for method $method"
+ return
+ }
+
+ puts "Test012: $method ($args) filename=data filecontents=key pairs"
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test012.db
+ set env NULL
+ } else {
+ set testfile test012.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list]
+
+ puts "\tTest012.a: put/get loop"
+ set count 0
+ foreach f $file_list {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ put_file_as_key $db $txn $pflags $f
+
+ set kd [get_file_as_key $db $txn $gflags $f]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest012.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_binkey_file $db $txn $t1 test012.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the data to see if they match the .o and dbtest files
+ set oid [open $t2.tmp w]
+ foreach f $file_list {
+ puts $oid $f
+ }
+ close $oid
+ filesort $t2.tmp $t2
+ fileremove $t2.tmp
+ filesort $t1 $t3
+
+ error_check_good Test012:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest012.c: close, open, and dump file"
+ open_and_dump_file $testfile $env $t1 test012.check \
+ dump_binkey_file_direction "-first" "-next"
+
+ filesort $t1 $t3
+
+ error_check_good Test012:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest012.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 test012.check\
+ dump_binkey_file_direction "-last" "-prev"
+
+ filesort $t1 $t3
+
+ error_check_good Test012:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test012; key should be file name; data should be contents
+proc test012.check { binfile tmpfile } {
+ source ./include.tcl
+
+ error_check_good Test012:diff($binfile,$tmpfile) \
+ [filecmp $binfile $tmpfile] 0
+}
diff --git a/libdb/test/test013.tcl b/libdb/test/test013.tcl
new file mode 100644
index 0000000..11c8c1c
--- /dev/null
+++ b/libdb/test/test013.tcl
@@ -0,0 +1,241 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test013
+# TEST Partial put test
+# TEST Overwrite entire records using partial puts.
+# TEST Make surethat NOOVERWRITE flag works.
+# TEST
+# TEST 1. Insert 10000 keys and retrieve them (equal key/data pairs).
+# TEST 2. Attempt to overwrite keys with NO_OVERWRITE set (expect error).
+# TEST 3. Actually overwrite each one with its datum reversed.
+# TEST
+# TEST No partial testing here.
+proc test013 { method {nentries 10000} args } {
+ global errorCode
+ global errorInfo
+ global fixed_pad
+ global fixed_len
+
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test013.db
+ set env NULL
+ } else {
+ set testfile test013.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test013: $method ($args) $nentries equal key/data pairs, put test"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test013_recno.check
+ append gflags " -recno"
+ global kvals
+ } else {
+ set checkfunc test013.check
+ }
+ puts "\tTest013.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags $txn {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will try to overwrite each datum, but set the
+ # NOOVERWRITE flag.
+ puts "\tTest013.b: overwrite values with NOOVERWRITE flag."
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags \
+ {-nooverwrite $key [chop_data $method $str]}]
+ error_check_good put [is_substr $ret "DB_KEYEXIST"] 1
+
+ # Value should be unchanged.
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will replace each item with its datum capitalized.
+ puts "\tTest013.c: overwrite values with capitalized datum"
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set rstr [string toupper $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $rstr]}]
+ error_check_good put $r 0
+
+ # Value should be changed.
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $rstr]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now make sure that everything looks OK
+ puts "\tTest013.d: check entire file contents"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {incr i} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test013:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ puts "\tTest013.e: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next"
+
+ if { [is_record_based $method] == 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test013:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest013.f: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction "-last" "-prev"
+
+ if { [is_record_based $method] == 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test013:diff($t3,$t2) [filecmp $t3 $t2] 0
+}
+
+# Check function for test013; keys and data are identical
+proc test013.check { key data } {
+ error_check_good \
+ "key/data mismatch for $key" $data [string toupper $key]
+}
+
+proc test013_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good \
+ "data mismatch for $key" $data [string toupper $kvals($key)]
+}
diff --git a/libdb/test/test014.tcl b/libdb/test/test014.tcl
new file mode 100644
index 0000000..463a7e9
--- /dev/null
+++ b/libdb/test/test014.tcl
@@ -0,0 +1,253 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test014
+# TEST Exercise partial puts on short data
+# TEST Run 5 combinations of numbers of characters to replace,
+# TEST and number of times to increase the size by.
+# TEST
+# TEST Partial put test, small data, replacing with same size. The data set
+# TEST consists of the first nentries of the dictionary. We will insert them
+# TEST (and retrieve them) as we do in test 1 (equal key/data pairs). Then
+# TEST we'll try to perform partial puts of some characters at the beginning,
+# TEST some at the end, and some at the middle.
+proc test014 { method {nentries 10000} args } {
+ set fixed 0
+ set args [convert_args $method $args]
+
+ if { [is_fixed_length $method] == 1 } {
+ set fixed 1
+ }
+
+ puts "Test014: $method ($args) $nentries equal key/data pairs, put test"
+
+ # flagp indicates whether this is a postpend or a
+ # normal partial put
+ set flagp 0
+
+ eval {test014_body $method $flagp 1 1 $nentries} $args
+ eval {test014_body $method $flagp 1 4 $nentries} $args
+ eval {test014_body $method $flagp 2 4 $nentries} $args
+ eval {test014_body $method $flagp 1 128 $nentries} $args
+ eval {test014_body $method $flagp 2 16 $nentries} $args
+ if { $fixed == 0 } {
+ eval {test014_body $method $flagp 0 1 $nentries} $args
+ eval {test014_body $method $flagp 0 4 $nentries} $args
+ eval {test014_body $method $flagp 0 128 $nentries} $args
+
+ # POST-PENDS :
+ # partial put data after the end of the existent record
+ # chars: number of empty spaces that will be padded with null
+ # increase: is the length of the str to be appended (after pad)
+ #
+ set flagp 1
+ eval {test014_body $method $flagp 1 1 $nentries} $args
+ eval {test014_body $method $flagp 4 1 $nentries} $args
+ eval {test014_body $method $flagp 128 1 $nentries} $args
+ eval {test014_body $method $flagp 1 4 $nentries} $args
+ eval {test014_body $method $flagp 1 128 $nentries} $args
+ }
+ puts "Test014 complete."
+}
+
+proc test014_body { method flagp chars increase {nentries 10000} args } {
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+
+ if { [is_fixed_length $method] == 1 && $chars != $increase } {
+ puts "Test014: $method: skipping replace\
+ $chars chars with string $increase times larger."
+ return
+ }
+
+ if { $flagp == 1} {
+ puts "Test014: Postpending string of len $increase with \
+ gap $chars."
+ } else {
+ puts "Test014: Replace $chars chars with string \
+ $increase times larger"
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test014.db
+ set env NULL
+ } else {
+ set testfile test014.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set gflags ""
+ set pflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest014.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ # We will do the initial put and then three Partial Puts
+ # for the beginning, middle and end of the string.
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ if { $flagp == 1 } {
+ # this is for postpend only
+ global dvals
+
+ # initial put
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $str}]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good dbput $ret 0
+
+ set offset [string length $str]
+
+ # increase is the actual number of new bytes
+ # to be postpended (besides the null padding)
+ set data [repeat "P" $increase]
+
+ # chars is the amount of padding in between
+ # the old data and the new
+ set len [expr $offset + $chars + $increase]
+ set dvals($key) [binary format \
+ a[set offset]x[set chars]a[set increase] \
+ $str $data]
+ set offset [expr $offset + $chars]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put -partial [list $offset 0]} \
+ $txn {$key $data}]
+ error_check_good dbput:post $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ } else {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ partial_put $method $db $txn \
+ $gflags $key $str $chars $increase
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ incr count
+ }
+ close $did
+
+ # Now make sure that everything looks OK
+ puts "\tTest014.b: check entire file contents"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test014.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test014:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ puts "\tTest014.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_file $testfile $env \
+ $t1 test014.check dump_file_direction "-first" "-next"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test014:diff($t3,$t2) [filecmp $t3 $t2] 0
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest014.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 \
+ test014.check dump_file_direction "-last" "-prev"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test014:diff($t3,$t2) [filecmp $t3 $t2] 0
+}
+
+# Check function for test014; keys and data are identical
+proc test014.check { key data } {
+ global dvals
+
+ error_check_good key"$key"_exists [info exists dvals($key)] 1
+ error_check_good "data mismatch for key $key" $data $dvals($key)
+}
diff --git a/libdb/test/test015.tcl b/libdb/test/test015.tcl
new file mode 100644
index 0000000..c4caac1
--- /dev/null
+++ b/libdb/test/test015.tcl
@@ -0,0 +1,276 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test015
+# TEST Partial put test
+# TEST Partial put test where the key does not initially exist.
+proc test015 { method {nentries 7500} { start 0 } args } {
+ global fixed_len testdir
+
+ set low_range 50
+ set mid_range 100
+ set high_range 1000
+
+ if { [is_fixed_length $method] } {
+ set low_range [expr $fixed_len/2 - 2]
+ set mid_range [expr $fixed_len/2]
+ set high_range $fixed_len
+ }
+
+ set t_table {
+ { 1 { 1 1 1 } }
+ { 2 { 1 1 5 } }
+ { 3 { 1 1 $low_range } }
+ { 4 { 1 $mid_range 1 } }
+ { 5 { $mid_range $high_range 5 } }
+ { 6 { 1 $mid_range $low_range } }
+ }
+
+ puts "Test015: \
+ $method ($args) $nentries equal key/data pairs, partial put test"
+ test015_init
+ if { $start == 0 } {
+ set start { 1 2 3 4 5 6 }
+ }
+ foreach entry $t_table {
+ set this [lindex $entry 0]
+ if { [lsearch $start $this] == -1 } {
+ continue
+ }
+ puts -nonewline "$this: "
+ eval [concat test015_body $method [lindex $entry 1] \
+ $nentries $args]
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ set testdir [get_home $env]
+ }
+puts "Verifying testdir $testdir"
+
+ error_check_good verify [verify_dir $testdir "\tTest015.e: "] 0
+ }
+}
+
+proc test015_init { } {
+ global rand_init
+
+ berkdb srand $rand_init
+}
+
+proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
+ global dvals
+ global fixed_len
+ global testdir
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set checkfunc test015.check
+
+ if { [is_fixed_length $method] && \
+ [string compare $omethod "-recno"] == 0} {
+ # is fixed recno method
+ set checkfunc test015.check
+ }
+
+ puts "Put $rcount strings random offsets between $off_low and $off_hi"
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test015.db
+ set env NULL
+ } else {
+ set testfile test015.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries > 5000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set retdir $testdir
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ puts "\tTest015.a: put/get loop for $nentries entries"
+
+ # Here is the loop where we put and get each key/data pair
+ # Each put is a partial put of a record that does not exist.
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ if { [string length $str] > $fixed_len } {
+ continue
+ }
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ if { 0 } {
+ set data [replicate $str $rcount]
+ set off [ berkdb random_int $off_low $off_hi ]
+ set offn [expr $off + 1]
+ if { [is_fixed_length $method] && \
+ [expr [string length $data] + $off] >= $fixed_len} {
+ set data [string range $data 0 [expr $fixed_len-$offn]]
+ }
+ set dvals($key) [partial_shift $data $off right]
+ } else {
+ set data [chop_data $method [replicate $str $rcount]]
+
+ # This is a hack. In DB we will store the records with
+ # some padding, but these will get lost if we just return
+ # them in TCL. As a result, we're going to have to hack
+ # get to check for 0 padding and return a list consisting
+ # of the number of 0's and the actual data.
+ set off [ berkdb random_int $off_low $off_hi ]
+
+ # There is no string concatenation function in Tcl
+ # (although there is one in TclX), so we have to resort
+ # to this hack. Ugh.
+ set slen [string length $data]
+ if {[is_fixed_length $method] && \
+ $slen > $fixed_len - $off} {
+ set $slen [expr $fixed_len - $off]
+ }
+ set a "a"
+ set dvals($key) [pad_data \
+ $method [eval "binary format x$off$a$slen" {$data}]]
+ }
+ if {[is_fixed_length $method] && \
+ [string length $data] > ($fixed_len - $off)} {
+ set slen [expr $fixed_len - $off]
+ set data [eval "binary format a$slen" {$data}]
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn \
+ {-partial [list $off [string length $data]] $key $data}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ incr count
+ }
+ close $did
+
+ # Now make sure that everything looks OK
+ puts "\tTest015.b: check entire file contents"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Test015:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tTest015.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_file $testfile $env $t1 \
+ $checkfunc dump_file_direction "-first" "-next"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Test015:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest015.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 \
+ $checkfunc dump_file_direction "-last" "-prev"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Test015:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ unset dvals
+}
+
+# Check function for test015; keys and data are identical
+proc test015.check { key data } {
+ global dvals
+
+ error_check_good key"$key"_exists [info exists dvals($key)] 1
+ binary scan $data "c[string length $data]" a
+ binary scan $dvals($key) "c[string length $dvals($key)]" b
+ error_check_good "mismatch on padding for key $key" $a $b
+}
+
+proc test015.fixed.check { key data } {
+ global dvals
+ global fixed_len
+
+ error_check_good key"$key"_exists [info exists dvals($key)] 1
+ if { [string length $data] > $fixed_len } {
+ error_check_bad \
+ "data length:[string length $data] \
+ for fixed:$fixed_len" 1 1
+ }
+ puts "$data : $dvals($key)"
+ error_check_good compare_data($data,$dvals($key) \
+ $dvals($key) $data
+}
diff --git a/libdb/test/test016.tcl b/libdb/test/test016.tcl
new file mode 100644
index 0000000..7e00ad3
--- /dev/null
+++ b/libdb/test/test016.tcl
@@ -0,0 +1,207 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test016
+# TEST Partial put test
+# TEST Partial put where the datum gets shorter as a result of the put.
+# TEST
+# TEST Partial put test where partial puts make the record smaller.
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and a fixed, medium length data string;
+# TEST retrieve each. After all are entered, go back and do partial puts,
+# TEST replacing a random-length string with the key value.
+# TEST Then verify.
+
+proc test016 { method {nentries 10000} args } {
+ global datastr
+ global dvals
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_fixed_length $method] == 1 } {
+ puts "Test016: skipping for method $method"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test016.db
+ set env NULL
+ } else {
+ set testfile test016.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test016: $method ($args) $nentries partial put shorten"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest016.a: put/get loop"
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $datastr]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Next we will do a partial put replacement, making the data
+ # shorter
+ puts "\tTest016.b: partial put loop"
+ set did [open $dict]
+ set count 0
+ set len [string length $datastr]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ set repl_len [berkdb random_int [string length $key] $len]
+ set repl_off [berkdb random_int 0 [expr $len - $repl_len] ]
+ set s1 [string range $datastr 0 [ expr $repl_off - 1] ]
+ set s2 [string toupper $key]
+ set s3 [string range $datastr [expr $repl_off + $repl_len] end ]
+ set dvals($key) [pad_data $method $s1$s2$s3]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {-partial \
+ [list $repl_off $repl_len] $key [chop_data $method $s2]}]
+ error_check_good put $ret 0
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ put $ret [list [list $key [pad_data $method $s1$s2$s3]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest016.c: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test016.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Test016:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest016.d: close, open, and dump file"
+ open_and_dump_file $testfile $env $t1 test016.check \
+ dump_file_direction "-first" "-next"
+
+ if { [ is_record_based $method ] == 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test016:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest016.e: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 test016.check \
+ dump_file_direction "-last" "-prev"
+
+ if { [ is_record_based $method ] == 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test016:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test016; data should be whatever is set in dvals
+proc test016.check { key data } {
+ global datastr
+ global dvals
+
+ error_check_good key"$key"_exists [info exists dvals($key)] 1
+ error_check_good "data mismatch for key $key" $data $dvals($key)
+}
diff --git a/libdb/test/test017.tcl b/libdb/test/test017.tcl
new file mode 100644
index 0000000..5cd46f1
--- /dev/null
+++ b/libdb/test/test017.tcl
@@ -0,0 +1,306 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test017
+# TEST Basic offpage duplicate test.
+# TEST
+# TEST Run duplicates with small page size so that we test off page duplicates.
+# TEST Then after we have an off-page database, test with overflow pages too.
+proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ incr pgindex
+ if { [lindex $args $pgindex] > 8192 } {
+ puts "Test0$tnum: Skipping for large pagesizes"
+ return
+ }
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644 -dup} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ set file_list [get_file_list 1]
+ if { $txnenv == 1 } {
+ set flen [llength $file_list]
+ reduce_dups flen ndups
+ set file_list [lrange $file_list 0 $flen]
+ }
+ puts "Test0$tnum: $method ($args) Off page duplicate tests with $ndups duplicates"
+
+ set ovfl ""
+ # Here is the loop where we put and get each key/data pair
+ puts -nonewline "\tTest0$tnum.a: Creating duplicates with "
+ if { $contents != 0 } {
+ puts "file contents as key/data"
+ } else {
+ puts "file name as key/data"
+ }
+ foreach f $file_list {
+ if { $contents != 0 } {
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ #
+ # Prepend file name to guarantee uniqueness
+ set filecont [read $fid]
+ set str $f:$filecont
+ close $fid
+ } else {
+ set str $f
+ }
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ #
+ # Save 10% files for overflow test
+ #
+ if { $contents == 0 && [expr $count % 10] == 0 } {
+ lappend ovfl $f
+ }
+ # Now retrieve all the keys matching this key
+ set ret [$db get $str]
+ error_check_bad $f:dbget_dups [llength $ret] 0
+ error_check_good $f:dbget_dups1 [llength $ret] $ndups
+ set x 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ for {set ret [$dbc get "-set" $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get "-next"] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ if {[string length $d] == 0} {
+ break
+ }
+ error_check_good "Test0$tnum:get" $d $str
+ set id [ id_of $datastr ]
+ error_check_good "Test0$tnum:$f:dup#" $id $x
+ incr x
+ }
+ error_check_good "Test0$tnum:ndups:$str" [expr $x - 1] $ndups
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: Checking file for correct duplicates"
+ set dlist ""
+ for { set i 1 } { $i <= $ndups } {incr i} {
+ lappend dlist $i
+ }
+ set oid [open $t2.tmp w]
+ set o1id [open $t4.tmp w]
+ foreach f $file_list {
+ for {set i 1} {$i <= $ndups} {incr i} {
+ puts $o1id $f
+ }
+ puts $oid $f
+ }
+ close $oid
+ close $o1id
+ filesort $t2.tmp $t2
+ filesort $t4.tmp $t4
+ fileremove $t2.tmp
+ fileremove $t4.tmp
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ if {$contents == 0} {
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ # Now compare the keys to see if they match the file names
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test017.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t4) [filecmp $t3 $t4] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.c: Checking file for correct duplicates after close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if {$contents == 0} {
+ # Now compare the keys to see if they match the filenames
+ filesort $t1 $t3
+ error_check_good Test0$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.d: Verify off page duplicates and overflow status"
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set stat [$db stat]
+ if { [is_btree $method] } {
+ error_check_bad stat:offpage \
+ [is_substr $stat "{{Internal pages} 0}"] 1
+ }
+ if {$contents == 0} {
+ # This check doesn't work in hash, since overflow
+ # pages count extra pages in buckets as well as true
+ # P_OVERFLOW pages.
+ if { [is_hash $method] == 0 } {
+ error_check_good overflow \
+ [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
+ } else {
+ error_check_bad overflow \
+ [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
+
+ #
+ # If doing overflow test, do that now. Else we are done.
+ # Add overflow pages by adding a large entry to a duplicate.
+ #
+ if { [llength $ovfl] == 0} {
+ error_check_good db_close [$db close] 0
+ return
+ }
+
+ puts "\tTest0$tnum.e: Add overflow duplicate entries"
+ set ovfldup [expr $ndups + 1]
+ foreach f $ovfl {
+ #
+ # This is just like put_file, but prepends the dup number
+ #
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set fdata [read $fid]
+ close $fid
+ set data $ovfldup:$fdata:$fdata:$fdata:$fdata
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags {$f $data}]
+ error_check_good ovfl_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ puts "\tTest0$tnum.f: Verify overflow duplicate entries"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist $ovfldup
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ filesort $t1 $t3
+ error_check_good Test0$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ set stat [$db stat]
+ if { [is_hash [$db get_type]] } {
+ error_check_bad overflow1_hash [is_substr $stat \
+ "{{Number of big pages} 0}"] 1
+ } else {
+ error_check_bad \
+ overflow1 [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
+ error_check_good db_close [$db close] 0
+}
+
+# Check function; verify data contains key
+proc test017.check { key data } {
+ error_check_good "data mismatch for key $key" $key [data_of $data]
+}
diff --git a/libdb/test/test018.tcl b/libdb/test/test018.tcl
new file mode 100644
index 0000000..934ce6f
--- /dev/null
+++ b/libdb/test/test018.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test018
+# TEST Offpage duplicate test
+# TEST Key_{first,last,before,after} offpage duplicates.
+# TEST Run duplicates with small page size so that we test off page
+# TEST duplicates.
+proc test018 { method {nentries 10000} args} {
+ puts "Test018: Off page duplicate tests"
+ eval {test011 $method $nentries 19 18 -pagesize 512} $args
+}
diff --git a/libdb/test/test019.tcl b/libdb/test/test019.tcl
new file mode 100644
index 0000000..5c89e6c
--- /dev/null
+++ b/libdb/test/test019.tcl
@@ -0,0 +1,131 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test019
+# TEST Partial get test.
+proc test019 { method {nentries 10000} args } {
+ global fixed_len
+ global rand_init
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test019.db
+ set env NULL
+ } else {
+ set testfile test019.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test019: $method ($args) $nentries partial get test"
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ berkdb srand $rand_init
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest019.a: put/get loop"
+ for { set i 0 } { [gets $did str] != -1 && $i < $nentries } \
+ { incr i } {
+
+ if { [is_record_based $method] == 1 } {
+ set key [expr $i + 1]
+ } else {
+ set key $str
+ }
+ set repl [berkdb random_int $fixed_len 100]
+ set data [chop_data $method [replicate $str $repl]]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {-nooverwrite $key $data}]
+ error_check_good dbput:$key $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ dbget:$key $ret [list [list $key [pad_data $method $data]]]
+ set kvals($key) $repl
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ close $did
+
+ puts "\tTest019.b: partial get loop"
+ set did [open $dict]
+ for { set i 0 } { [gets $did str] != -1 && $i < $nentries } \
+ { incr i } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $i + 1]
+ } else {
+ set key $str
+ }
+ set data [pad_data $method [replicate $str $kvals($key)]]
+
+ set maxndx [expr [string length $data] - 1]
+
+ set beg [berkdb random_int 0 [expr $maxndx - 1]]
+ set len [berkdb random_int 0 [expr $maxndx * 2]]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db get} \
+ $txn {-partial [list $beg $len]} $gflags {$key}]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # In order for tcl to handle this, we have to overwrite the
+ # last character with a NULL. That makes the length one less
+ # than we expect.
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good dbget_key $k $key
+
+ error_check_good dbget_data $d \
+ [string range $data $beg [expr $beg + $len - 1]]
+
+ }
+ error_check_good db_close [$db close] 0
+ close $did
+}
diff --git a/libdb/test/test020.tcl b/libdb/test/test020.tcl
new file mode 100644
index 0000000..4edbb81
--- /dev/null
+++ b/libdb/test/test020.tcl
@@ -0,0 +1,137 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test020
+# TEST In-Memory database tests.
+proc test020 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_queueext $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test020 skipping for method $method"
+ return
+ }
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # Check if we are using an env.
+ if { $eindex == -1 } {
+ set env NULL
+ } else {
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test020: $method ($args) $nentries equal key/data pairs"
+
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test020_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test020.check
+ }
+ puts "\tTest020.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest020.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Test020:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test020; keys and data are identical
+proc test020.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc test020_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "data mismatch: key $key" $data $kvals($key)
+}
diff --git a/libdb/test/test021.tcl b/libdb/test/test021.tcl
new file mode 100644
index 0000000..220f12f
--- /dev/null
+++ b/libdb/test/test021.tcl
@@ -0,0 +1,162 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test021
+# TEST Btree range tests.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self, reversed as key and self as data.
+# TEST After all are entered, retrieve each using a cursor SET_RANGE, and
+# TEST getting about 20 keys sequentially after it (in some cases we'll
+# TEST run out towards the end of the file).
+proc test021 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test021.db
+ set env NULL
+ } else {
+ set testfile test021.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test021: $method ($args) $nentries equal key/data pairs"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test021_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test021.check
+ }
+ puts "\tTest021.a: put loop"
+ # Here is the loop where we put each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key [reverse $str]
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good db_put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and retrieve about 20
+ # records after it.
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest021.b: test ranges"
+ set db [eval {berkdb_open -rdonly} $args $omethod $testfile ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Open a cursor
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ set did [open $dict]
+ set i 0
+ while { [gets $did str] != -1 && $i < $count } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $i + 1]
+ } else {
+ set key [reverse $str]
+ }
+
+ set r [$dbc get -set_range $key]
+ error_check_bad dbc_get:$key [string length $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ $checkfunc $k $d
+
+ for { set nrecs 0 } { $nrecs < 20 } { incr nrecs } {
+ set r [$dbc get "-next"]
+ # no error checking because we may run off the end
+ # of the database
+ if { [llength $r] == 0 } {
+ continue;
+ }
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ $checkfunc $k $d
+ }
+ incr i
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ close $did
+}
+
+# Check function for test021; keys and data are reversed
+proc test021.check { key data } {
+ error_check_good "key/data mismatch for $key" $data [reverse $key]
+}
+
+proc test021_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "data mismatch: key $key" $data $kvals($key)
+}
diff --git a/libdb/test/test022.tcl b/libdb/test/test022.tcl
new file mode 100644
index 0000000..0e8391a
--- /dev/null
+++ b/libdb/test/test022.tcl
@@ -0,0 +1,62 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test022
+# TEST Test of DB->getbyteswapped().
+proc test022 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test022 ($args) $omethod: DB->getbyteswapped()"
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile1 "$testdir/test022a.db"
+ set testfile2 "$testdir/test022b.db"
+ set env NULL
+ } else {
+ set testfile1 "test022a.db"
+ set testfile2 "test022b.db"
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ # Create two databases, one in each byte order.
+ set db1 [eval {berkdb_open -create \
+ -mode 0644} $omethod $args {-lorder 1234} $testfile1]
+ error_check_good db1_open [is_valid_db $db1] TRUE
+
+ set db2 [eval {berkdb_open -create \
+ -mode 0644} $omethod $args {-lorder 4321} $testfile2]
+ error_check_good db2_open [is_valid_db $db2] TRUE
+
+ # Call DB->get_byteswapped on both of them.
+ set db1_order [$db1 is_byteswapped]
+ set db2_order [$db2 is_byteswapped]
+
+ # Make sure that both answers are either 1 or 0,
+ # and that exactly one of them is 1.
+ error_check_good is_byteswapped_sensible_1 \
+ [expr ($db1_order == 1 && $db2_order == 0) || \
+ ($db1_order == 0 && $db2_order == 1)] 1
+
+ error_check_good db1_close [$db1 close] 0
+ error_check_good db2_close [$db2 close] 0
+ puts "\tTest022 complete."
+}
diff --git a/libdb/test/test023.tcl b/libdb/test/test023.tcl
new file mode 100644
index 0000000..ff52f57
--- /dev/null
+++ b/libdb/test/test023.tcl
@@ -0,0 +1,221 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test023
+# TEST Duplicate test
+# TEST Exercise deletes and cursor operations within a duplicate set.
+# TEST Add a key with duplicates (first time on-page, second time off-page)
+# TEST Number the dups.
+# TEST Delete dups and make sure that CURRENT/NEXT/PREV work correctly.
+proc test023 { method args } {
+ global alphabet
+ global dupnum
+ global dupstr
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ puts "Test023: $method delete duplicates/check cursor operations"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test023: skipping for method $omethod"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test023.db
+ set env NULL
+ } else {
+ set testfile test023.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644 -dup} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ foreach i { onpage offpage } {
+ if { $i == "onpage" } {
+ set dupstr DUP
+ } else {
+ set dupstr [repeat $alphabet 50]
+ }
+ puts "\tTest023.a: Insert key w/$i dups"
+ set key "duplicate_val_test"
+ for { set count 0 } { $count < 20 } { incr count } {
+ set ret \
+ [eval {$db put} $txn $pflags {$key $count$dupstr}]
+ error_check_good db_put $ret 0
+ }
+
+ # Now let's get all the items and make sure they look OK.
+ puts "\tTest023.b: Check initial duplicates"
+ set dupnum 0
+ dump_file $db $txn $t1 test023.check
+
+ # Delete a couple of random items (FIRST, LAST one in middle)
+ # Make sure that current returns an error and that NEXT and
+ # PREV do the right things.
+
+ set ret [$dbc get -set $key]
+ error_check_bad dbc_get:SET [llength $ret] 0
+
+ puts "\tTest023.c: Delete first and try gets"
+ # This should be the first duplicate
+ error_check_good \
+ dbc_get:SET $ret [list [list duplicate_val_test 0$dupstr]]
+
+ # Now delete it.
+ set ret [$dbc del]
+ error_check_good dbc_del:FIRST $ret 0
+
+ # Now current should fail
+ set ret [$dbc get -current]
+ error_check_good dbc_get:CURRENT $ret [list [list [] []]]
+
+ # Now Prev should fail
+ set ret [$dbc get -prev]
+ error_check_good dbc_get:prev0 [llength $ret] 0
+
+ # Now 10 nexts should work to get us in the middle
+ for { set j 1 } { $j <= 10 } { incr j } {
+ set ret [$dbc get -next]
+ error_check_good \
+ dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] $j$dupstr
+ }
+
+ puts "\tTest023.d: Delete middle and try gets"
+ # Now do the delete on the current key.
+ set ret [$dbc del]
+ error_check_good dbc_del:10 $ret 0
+
+ # Now current should fail
+ set ret [$dbc get -current]
+ error_check_good \
+ dbc_get:deleted $ret [list [list [] []]]
+
+ # Prev and Next should work
+ set ret [$dbc get -next]
+ error_check_good dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] 11$dupstr
+
+ set ret [$dbc get -prev]
+ error_check_good dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] 9$dupstr
+
+ # Now go to the last one
+ for { set j 11 } { $j <= 19 } { incr j } {
+ set ret [$dbc get -next]
+ error_check_good \
+ dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] $j$dupstr
+ }
+
+ puts "\tTest023.e: Delete last and try gets"
+ # Now do the delete on the current key.
+ set ret [$dbc del]
+ error_check_good dbc_del:LAST $ret 0
+
+ # Now current should fail
+ set ret [$dbc get -current]
+ error_check_good \
+ dbc_get:deleted $ret [list [list [] []]]
+
+ # Next should fail
+ set ret [$dbc get -next]
+ error_check_good dbc_get:next19 [llength $ret] 0
+
+ # Prev should work
+ set ret [$dbc get -prev]
+ error_check_good dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] 18$dupstr
+
+ # Now overwrite the current one, then count the number
+ # of data items to make sure that we have the right number.
+
+ puts "\tTest023.f: Count keys, overwrite current, count again"
+ # At this point we should have 17 keys the (initial 20 minus
+ # 3 deletes)
+ set dbc2 [eval {$db cursor} $txn]
+ error_check_good db_cursor:2 [is_substr $dbc2 $db] 1
+
+ set count_check 0
+ for { set rec [$dbc2 get -first] } {
+ [llength $rec] != 0 } { set rec [$dbc2 get -next] } {
+ incr count_check
+ }
+ error_check_good numdups $count_check 17
+
+ set ret [$dbc put -current OVERWRITE]
+ error_check_good dbc_put:current $ret 0
+
+ set count_check 0
+ for { set rec [$dbc2 get -first] } {
+ [llength $rec] != 0 } { set rec [$dbc2 get -next] } {
+ incr count_check
+ }
+ error_check_good numdups $count_check 17
+ error_check_good dbc2_close [$dbc2 close] 0
+
+ # Done, delete all the keys for next iteration
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_delete $ret 0
+
+ # database should be empty
+
+ set ret [$dbc get -first]
+ error_check_good first_after_empty [llength $ret] 0
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+}
+
+# Check function for test023; keys and data are identical
+proc test023.check { key data } {
+ global dupnum
+ global dupstr
+ error_check_good "bad key" $key duplicate_val_test
+ error_check_good "data mismatch for $key" $data $dupnum$dupstr
+ incr dupnum
+}
diff --git a/libdb/test/test024.tcl b/libdb/test/test024.tcl
new file mode 100644
index 0000000..9200416
--- /dev/null
+++ b/libdb/test/test024.tcl
@@ -0,0 +1,268 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test024
+# TEST Record number retrieval test.
+# TEST Test the Btree and Record number get-by-number functionality.
+proc test024 { method {nentries 10000} args} {
+ source ./include.tcl
+ global rand_init
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test024: $method ($args)"
+
+ if { [string compare $omethod "-hash"] == 0 } {
+ puts "Test024 skipping for method HASH"
+ return
+ }
+
+ berkdb srand $rand_init
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test024.db
+ set env NULL
+ } else {
+ set testfile test024.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ cleanup $testdir $env
+
+ # Read the first nentries dictionary elements and reverse them.
+ # Keep a list of these (these will be the keys).
+ puts "\tTest024.a: initialization"
+ set keys ""
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ lappend keys [reverse $str]
+ incr count
+ }
+ close $did
+
+ # Generate sorted order for the keys
+ set sorted_keys [lsort $keys]
+ # Create the database
+ if { [string compare $omethod "-btree"] == 0 } {
+ set db [eval {berkdb_open -create \
+ -mode 0644 -recnum} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ } else {
+ set db [eval {berkdb_open -create \
+ -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ set gflags " -recno"
+ }
+
+ puts "\tTest024.b: put/get loop"
+ foreach k $keys {
+ if { [is_record_based $method] == 1 } {
+ set key [lsearch $sorted_keys $k]
+ incr key
+ } else {
+ set key $k
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $k]}]
+ error_check_good put $ret 0
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $k]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest024.c: dump file"
+
+ # Put sorted keys in file
+ set oid [open $t1 w]
+ foreach k $sorted_keys {
+ puts $oid [pad_data $method $k]
+ }
+ close $oid
+
+ # Instead of using dump_file; get all the keys by keynum
+ set oid [open $t2 w]
+ if { [string compare $omethod "-btree"] == 0 } {
+ set do_renumber 1
+ }
+
+ set gflags " -recno"
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set k 1 } { $k <= $count } { incr k } {
+ set ret [eval {$db get} $txn $gflags {$k}]
+ puts $oid [lindex [lindex $ret 0] 1]
+ error_check_good recnum_get [lindex [lindex $ret 0] 1] \
+ [pad_data $method [lindex $sorted_keys [expr $k - 1]]]
+ }
+ close $oid
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ error_check_good Test024.c:diff($t1,$t2) \
+ [filecmp $t1 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest024.d: close, open, and dump file"
+ set db [eval {berkdb_open -rdonly} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set oid [open $t2 w]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set k 1 } { $k <= $count } { incr k } {
+ set ret [eval {$db get} $txn $gflags {$k}]
+ puts $oid [lindex [lindex $ret 0] 1]
+ error_check_good recnum_get [lindex [lindex $ret 0] 1] \
+ [pad_data $method [lindex $sorted_keys [expr $k - 1]]]
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $oid
+ error_check_good db_close [$db close] 0
+ error_check_good Test024.d:diff($t1,$t2) \
+ [filecmp $t1 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest024.e: close, open, and dump file in reverse direction"
+ set db [eval {berkdb_open -rdonly} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ # Put sorted keys in file
+ set rsorted ""
+ foreach k $sorted_keys {
+ set rsorted [linsert $rsorted 0 $k]
+ }
+ set oid [open $t1 w]
+ foreach k $rsorted {
+ puts $oid [pad_data $method $k]
+ }
+ close $oid
+
+ set oid [open $t2 w]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set k $count } { $k > 0 } { incr k -1 } {
+ set ret [eval {$db get} $txn $gflags {$k}]
+ puts $oid [lindex [lindex $ret 0] 1]
+ error_check_good recnum_get [lindex [lindex $ret 0] 1] \
+ [pad_data $method [lindex $sorted_keys [expr $k - 1]]]
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $oid
+ error_check_good db_close [$db close] 0
+ error_check_good Test024.e:diff($t1,$t2) \
+ [filecmp $t1 $t2] 0
+
+ # Now try deleting elements and making sure they work
+ puts "\tTest024.f: delete test"
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ while { $count > 0 } {
+ set kndx [berkdb random_int 1 $count]
+ set kval [lindex $keys [expr $kndx - 1]]
+ set recno [expr [lsearch $sorted_keys $kval] + 1]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { [is_record_based $method] == 1 } {
+ set ret [eval {$db del} $txn {$recno}]
+ } else {
+ set ret [eval {$db del} $txn {$kval}]
+ }
+ error_check_good delete $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Remove the key from the key list
+ set ndx [expr $kndx - 1]
+ set keys [lreplace $keys $ndx $ndx]
+
+ if { $do_renumber == 1 } {
+ set r [expr $recno - 1]
+ set sorted_keys [lreplace $sorted_keys $r $r]
+ }
+
+ # Check that the keys after it have been renumbered
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { $do_renumber == 1 && $recno != $count } {
+ set r [expr $recno - 1]
+ set ret [eval {$db get} $txn $gflags {$recno}]
+ error_check_good get_after_del \
+ [lindex [lindex $ret 0] 1] [lindex $sorted_keys $r]
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Decrement count
+ incr count -1
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/test025.tcl b/libdb/test/test025.tcl
new file mode 100644
index 0000000..d72027c
--- /dev/null
+++ b/libdb/test/test025.tcl
@@ -0,0 +1,146 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test025
+# TEST DB_APPEND flag test.
+proc test025 { method {nentries 10000} {start 0 } {tnum "25" } args} {
+ global kvals
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ puts "Test0$tnum: $method ($args)"
+
+ if { [string compare $omethod "-btree"] == 0 } {
+ puts "Test0$tnum skipping for method BTREE"
+ return
+ }
+ if { [string compare $omethod "-hash"] == 0 } {
+ puts "Test0$tnum skipping for method HASH"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ puts "\tTest0$tnum.a: put/get loop"
+ set gflags " -recno"
+ set pflags " -append"
+ set txn ""
+ set checkfunc test025_check
+
+ # Here is the loop where we put and get each key/data pair
+ set count $start
+ set nentries [expr $start + $nentries]
+ if { $count != 0 } {
+ gets $did str
+ set k [expr $count + 1]
+ set kvals($k) [pad_data $method $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$k [chop_data $method $str]}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set k [expr $count + 1]
+ set kvals($k) [pad_data $method $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags {[chop_data $method $str]}]
+ error_check_good db_put $ret $k
+
+ set ret [eval {$db get} $txn $gflags {$k}]
+ error_check_good \
+ get $ret [list [list $k [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # The recno key will be count + 1, so when we hit
+ # UINT32_MAX - 1, reset to 0.
+ if { $count == [expr 0xfffffffe] } {
+ set count 0
+ } else {
+ incr count
+ }
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction -first -next
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest0$tnum.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction -last -prev
+}
+
+proc test025_check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good " key/data mismatch for |$key|" $data $kvals($key)
+}
diff --git a/libdb/test/test026.tcl b/libdb/test/test026.tcl
new file mode 100644
index 0000000..bef044b
--- /dev/null
+++ b/libdb/test/test026.tcl
@@ -0,0 +1,155 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test026
+# TEST Small keys/medium data w/duplicates
+# TEST Put/get per key.
+# TEST Loop through keys -- delete each key
+# TEST ... test that cursors delete duplicates correctly
+# TEST
+# TEST Keyed delete test through cursor. If ndups is small; this will
+# TEST test on-page dups; if it's large, it will test off-page dups.
+proc test026 { method {nentries 2000} {ndups 5} {tnum 26} args} {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the defaults down a bit.
+ # If we are wanting a lot of dups, set that
+ # down a bit or repl testing takes very long.
+ #
+ if { $nentries == 2000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ puts "Test0$tnum: $method ($args) $nentries keys\
+ with $ndups dups; cursor delete test"
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+
+ puts "\tTest0$tnum.a: Put loop"
+ set db [eval {berkdb_open -create \
+ -mode 0644} $args {$omethod -dup $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < [expr $nentries * $ndups] } {
+ set datastr [ make_data_str $str ]
+ for { set j 1 } { $j <= $ndups} {incr j} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $j$datastr]}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ }
+ close $did
+
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Now we will sequentially traverse the database getting each
+ # item and deleting it.
+ set count 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ puts "\tTest0$tnum.b: Get/delete loop"
+ set i 1
+ for { set ret [$dbc get -first] } {
+ [string length $ret] != 0 } {
+ set ret [$dbc get -next] } {
+
+ set key [lindex [lindex $ret 0] 0]
+ set data [lindex [lindex $ret 0] 1]
+ if { $i == 1 } {
+ set curkey $key
+ }
+ error_check_good seq_get:key $key $curkey
+ error_check_good \
+ seq_get:data $data [pad_data $method $i[make_data_str $key]]
+
+ if { $i == $ndups } {
+ set i 1
+ } else {
+ incr i
+ }
+
+ # Now delete the key
+ set ret [$dbc del]
+ error_check_good db_del:$key $ret 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.c: Verify empty file"
+ # Double check that file is now empty
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+ set ret [$dbc get -first]
+ error_check_good get_on_empty [string length $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/test027.tcl b/libdb/test/test027.tcl
new file mode 100644
index 0000000..c284912
--- /dev/null
+++ b/libdb/test/test027.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test027
+# TEST Off-page duplicate test
+# TEST Test026 with parameters to force off-page duplicates.
+# TEST
+# TEST Check that delete operations work. Create a database; close
+# TEST database and reopen it. Then issues delete by key for each
+# TEST entry.
+proc test027 { method {nentries 100} args} {
+ eval {test026 $method $nentries 100 27} $args
+}
diff --git a/libdb/test/test028.tcl b/libdb/test/test028.tcl
new file mode 100644
index 0000000..617b449
--- /dev/null
+++ b/libdb/test/test028.tcl
@@ -0,0 +1,222 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test028
+# TEST Cursor delete test
+# TEST Test put operations after deleting through a cursor.
+proc test028 { method args } {
+ global dupnum
+ global dupstr
+ global alphabet
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test028: $method put after cursor delete test"
+
+ if { [is_rbtree $method] == 1 } {
+ puts "Test028 skipping for method $method"
+ return
+ }
+ if { [is_record_based $method] == 1 } {
+ set key 10
+ } else {
+ append args " -dup"
+ set key "put_after_cursor_del"
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test028.db
+ set env NULL
+ } else {
+ set testfile test028.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set ndups 20
+ set txn ""
+ set pflags ""
+ set gflags ""
+
+ if { [is_record_based $method] == 1 } {
+ set gflags " -recno"
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ foreach i { offpage onpage } {
+ foreach b { bigitem smallitem } {
+ if { $i == "onpage" } {
+ if { $b == "bigitem" } {
+ set dupstr [repeat $alphabet 100]
+ } else {
+ set dupstr DUP
+ }
+ } else {
+ if { $b == "bigitem" } {
+ set dupstr [repeat $alphabet 100]
+ } else {
+ set dupstr [repeat $alphabet 50]
+ }
+ }
+
+ if { $b == "bigitem" } {
+ set dupstr [repeat $dupstr 10]
+ }
+ puts "\tTest028: $i/$b"
+
+ puts "\tTest028.a: Insert key with single data item"
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $dupstr]}]
+ error_check_good db_put $ret 0
+
+ # Now let's get the item and make sure its OK.
+ puts "\tTest028.b: Check initial entry"
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good db_get \
+ $ret [list [list $key [pad_data $method $dupstr]]]
+
+ # Now try a put with NOOVERWRITE SET (should be error)
+ puts "\tTest028.c: No_overwrite test"
+ set ret [eval {$db put} $txn \
+ {-nooverwrite $key [chop_data $method $dupstr]}]
+ error_check_good \
+ db_put [is_substr $ret "DB_KEYEXIST"] 1
+
+ # Now delete the item with a cursor
+ puts "\tTest028.d: Delete test"
+ set ret [$dbc get -set $key]
+ error_check_bad dbc_get:SET [llength $ret] 0
+
+ set ret [$dbc del]
+ error_check_good dbc_del $ret 0
+
+ puts "\tTest028.e: Reput the item"
+ set ret [eval {$db put} $txn \
+ {-nooverwrite $key [chop_data $method $dupstr]}]
+ error_check_good db_put $ret 0
+
+ puts "\tTest028.f: Retrieve the item"
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good db_get $ret \
+ [list [list $key [pad_data $method $dupstr]]]
+
+ # Delete the key to set up for next test
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_del $ret 0
+
+ # Now repeat the above set of tests with
+ # duplicates (if not RECNO).
+ if { [is_record_based $method] == 1 } {
+ continue;
+ }
+
+ puts "\tTest028.g: Insert key with duplicates"
+ for { set count 0 } { $count < $ndups } { incr count } {
+ set ret [eval {$db put} $txn \
+ {$key [chop_data $method $count$dupstr]}]
+ error_check_good db_put $ret 0
+ }
+
+ puts "\tTest028.h: Check dups"
+ set dupnum 0
+ dump_file $db $txn $t1 test028.check
+
+ # Try no_overwrite
+ puts "\tTest028.i: No_overwrite test"
+ set ret [eval {$db put} \
+ $txn {-nooverwrite $key $dupstr}]
+ error_check_good \
+ db_put [is_substr $ret "DB_KEYEXIST"] 1
+
+ # Now delete all the elements with a cursor
+ puts "\tTest028.j: Cursor Deletes"
+ set count 0
+ for { set ret [$dbc get -set $key] } {
+ [string length $ret] != 0 } {
+ set ret [$dbc get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good db_seq(key) $k $key
+ error_check_good db_seq(data) $d $count$dupstr
+ set ret [$dbc del]
+ error_check_good dbc_del $ret 0
+ incr count
+ if { $count == [expr $ndups - 1] } {
+ puts "\tTest028.k:\
+ Duplicate No_Overwrite test"
+ set ret [eval {$db put} $txn \
+ {-nooverwrite $key $dupstr}]
+ error_check_good db_put [is_substr \
+ $ret "DB_KEYEXIST"] 1
+ }
+ }
+
+ # Make sure all the items are gone
+ puts "\tTest028.l: Get after delete"
+ set ret [$dbc get -set $key]
+ error_check_good get_after_del [string length $ret] 0
+
+ puts "\tTest028.m: Reput the item"
+ set ret [eval {$db put} \
+ $txn {-nooverwrite $key 0$dupstr}]
+ error_check_good db_put $ret 0
+ for { set count 1 } { $count < $ndups } { incr count } {
+ set ret [eval {$db put} $txn \
+ {$key $count$dupstr}]
+ error_check_good db_put $ret 0
+ }
+
+ puts "\tTest028.n: Retrieve the item"
+ set dupnum 0
+ dump_file $db $txn $t1 test028.check
+
+ # Clean out in prep for next test
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_del $ret 0
+ }
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+}
+
+# Check function for test028; keys and data are identical
+proc test028.check { key data } {
+ global dupnum
+ global dupstr
+ error_check_good "Bad key" $key put_after_cursor_del
+ error_check_good "data mismatch for $key" $data $dupnum$dupstr
+ incr dupnum
+}
diff --git a/libdb/test/test029.tcl b/libdb/test/test029.tcl
new file mode 100644
index 0000000..dfcb390
--- /dev/null
+++ b/libdb/test/test029.tcl
@@ -0,0 +1,245 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test029
+# TEST Test the Btree and Record number renumbering.
+proc test029 { method {nentries 10000} args} {
+ source ./include.tcl
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test029: $method ($args)"
+
+ if { [string compare $omethod "-hash"] == 0 } {
+ puts "Test029 skipping for method HASH"
+ return
+ }
+ if { [is_record_based $method] == 1 && $do_renumber != 1 } {
+ puts "Test029 skipping for method RECNO (w/out renumbering)"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test029.db
+ set env NULL
+ } else {
+ set testfile test029.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ # Do not set nentries down to 100 until we
+ # fix SR #5958.
+ set nentries 1000
+ }
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ # Read the first nentries dictionary elements and reverse them.
+ # Keep a list of these (these will be the keys).
+ puts "\tTest029.a: initialization"
+ set keys ""
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ lappend keys [reverse $str]
+ incr count
+ }
+ close $did
+
+ # Generate sorted order for the keys
+ set sorted_keys [lsort $keys]
+
+ # Save the first and last keys
+ set last_key [lindex $sorted_keys end]
+ set last_keynum [llength $sorted_keys]
+
+ set first_key [lindex $sorted_keys 0]
+ set first_keynum 1
+
+ # Create the database
+ if { [string compare $omethod "-btree"] == 0 } {
+ set db [eval {berkdb_open -create \
+ -mode 0644 -recnum} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ } else {
+ set db [eval {berkdb_open -create \
+ -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest029.b: put/get loop"
+ foreach k $keys {
+ if { [is_record_based $method] == 1 } {
+ set key [lsearch $sorted_keys $k]
+ incr key
+ } else {
+ set key $k
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $k]}]
+ error_check_good dbput $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good dbget [lindex [lindex $ret 0] 1] $k
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Now delete the first key in the database
+ puts "\tTest029.c: delete and verify renumber"
+
+ # Delete the first key in the file
+ if { [is_record_based $method] == 1 } {
+ set key $first_keynum
+ } else {
+ set key $first_key
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_del $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now we are ready to retrieve records based on
+ # record number
+ if { [string compare $omethod "-btree"] == 0 } {
+ append gflags " -recno"
+ }
+
+ # First try to get the old last key (shouldn't exist)
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db get} $txn $gflags {$last_keynum}]
+ error_check_good get_after_del $ret [list]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now try to get what we think should be the last key
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db get} $txn $gflags {[expr $last_keynum - 1]}]
+ error_check_good \
+ getn_last_after_del [lindex [lindex $ret 0] 1] $last_key
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Create a cursor; we need it for the next test and we
+ # need it for recno here.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # OK, now re-put the first key and make sure that we
+ # renumber the last key appropriately.
+ if { [string compare $omethod "-btree"] == 0 } {
+ set ret [eval {$db put} $txn \
+ {$key [chop_data $method $first_key]}]
+ error_check_good db_put $ret 0
+ } else {
+ # Recno
+ set ret [$dbc get -first]
+ set ret [eval {$dbc put} $pflags {-before $first_key}]
+ error_check_bad dbc_put:DB_BEFORE $ret 0
+ }
+
+ # Now check that the last record matches the last record number
+ set ret [eval {$db get} $txn $gflags {$last_keynum}]
+ error_check_good \
+ getn_last_after_put [lindex [lindex $ret 0] 1] $last_key
+
+ # Now delete the first key in the database using a cursor
+ puts "\tTest029.d: delete with cursor and verify renumber"
+
+ set ret [$dbc get -first]
+ error_check_good dbc_first $ret [list [list $key $first_key]]
+
+ # Now delete at the cursor
+ set ret [$dbc del]
+ error_check_good dbc_del $ret 0
+
+ # Now check the record numbers of the last keys again.
+ # First try to get the old last key (shouldn't exist)
+ set ret [eval {$db get} $txn $gflags {$last_keynum}]
+ error_check_good get_last_after_cursor_del:$ret $ret [list]
+
+ # Now try to get what we think should be the last key
+ set ret [eval {$db get} $txn $gflags {[expr $last_keynum - 1]}]
+ error_check_good \
+ getn_after_cursor_del [lindex [lindex $ret 0] 1] $last_key
+
+ # Re-put the first key and make sure that we renumber the last
+ # key appropriately.
+ puts "\tTest029.e: put with cursor and verify renumber"
+ if { [string compare $omethod "-btree"] == 0 } {
+ set ret [eval {$dbc put} \
+ $pflags {-current $first_key}]
+ error_check_good dbc_put:DB_CURRENT $ret 0
+ } else {
+ set ret [eval {$dbc put} $pflags {-before $first_key}]
+ error_check_bad dbc_put:DB_BEFORE $ret 0
+ }
+
+ # Now check that the last record matches the last record number
+ set ret [eval {$db get} $txn $gflags {$last_keynum}]
+ error_check_good \
+ get_after_cursor_reput [lindex [lindex $ret 0] 1] $last_key
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/test030.tcl b/libdb/test/test030.tcl
new file mode 100644
index 0000000..96c5eb2
--- /dev/null
+++ b/libdb/test/test030.tcl
@@ -0,0 +1,231 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test030
+# TEST Test DB_NEXT_DUP Functionality.
+proc test030 { method {nentries 10000} args } {
+ global rand_init
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 ||
+ [is_rbtree $method] == 1 } {
+ puts "Test030 skipping for method $method"
+ return
+ }
+ berkdb srand $rand_init
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test030.db
+ set cntfile $testdir/cntfile.db
+ set env NULL
+ } else {
+ set testfile test030.db
+ set cntfile cntfile.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test030: $method ($args) $nentries DB_NEXT_DUP testing"
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create \
+ -mode 0644 -dup} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Use a second DB to keep track of how many duplicates
+ # we enter per key
+
+ set cntdb [eval {berkdb_open -create \
+ -mode 0644} $args {-btree $cntfile}]
+ error_check_good dbopen:cntfile [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ # We will add between 1 and 10 dups with values 1 ... dups
+ # We'll verify each addition.
+
+ set did [open $dict]
+ puts "\tTest030.a: put and get duplicate keys."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set ndup [berkdb random_int 1 10]
+
+ for { set i 1 } { $i <= $ndup } { incr i 1 } {
+ set ctxn ""
+ if { $txnenv == 1 } {
+ set ct [$env txn]
+ error_check_good txn \
+ [is_valid_txn $ct $env] TRUE
+ set ctxn "-txn $ct"
+ }
+ set ret [eval {$cntdb put} \
+ $ctxn $pflags {$str [chop_data $method $ndup]}]
+ error_check_good put_cnt $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$ct commit] 0
+ }
+ set datastr $i:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ incr x
+
+ if { [llength $ret] == 0 } {
+ break
+ }
+
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ error_check_good Test030:put $d $str
+
+ set id [ id_of $datastr ]
+ error_check_good Test030:dup# $id $x
+ }
+ error_check_good Test030:numdups $x $ndup
+ incr count
+ }
+ close $did
+
+ # Verify on sequential pass of entire file
+ puts "\tTest030.b: sequential check"
+
+ # We can't just set lastkey to a null string, since that might
+ # be a key now!
+ set lastkey "THIS STRING WILL NEVER BE A KEY"
+
+ for {set ret [$dbc get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -next] } {
+
+ # Outer loop should always get a new key
+
+ set k [lindex [lindex $ret 0] 0]
+ error_check_bad outer_get_loop:key $k $lastkey
+
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ set id [ id_of $datastr ]
+
+ error_check_good outer_get_loop:data $d $k
+ error_check_good outer_get_loop:id $id 1
+
+ set lastkey $k
+ # Figure out how may dups we should have
+ if { $txnenv == 1 } {
+ set ct [$env txn]
+ error_check_good txn [is_valid_txn $ct $env] TRUE
+ set ctxn "-txn $ct"
+ }
+ set ret [eval {$cntdb get} $ctxn $pflags {$k}]
+ set ndup [lindex [lindex $ret 0] 1]
+ if { $txnenv == 1 } {
+ error_check_good txn [$ct commit] 0
+ }
+
+ set howmany 1
+ for { set ret [$dbc get -nextdup] } \
+ { [llength $ret] != 0 } \
+ { set ret [$dbc get -nextdup] } {
+ incr howmany
+
+ set k [lindex [lindex $ret 0] 0]
+ error_check_good inner_get_loop:key $k $lastkey
+
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ set id [ id_of $datastr ]
+
+ error_check_good inner_get_loop:data $d $k
+ error_check_good inner_get_loop:id $id $howmany
+
+ }
+ error_check_good ndups_found $howmany $ndup
+ }
+
+ # Verify on key lookup
+ puts "\tTest030.c: keyed check"
+ set cnt_dbc [$cntdb cursor]
+ for {set ret [$cnt_dbc get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$cnt_dbc get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+
+ set howmany [lindex [lindex $ret 0] 1]
+ error_check_bad cnt_seq:data [string length $howmany] 0
+
+ set i 0
+ for {set ret [$dbc get -set $k]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ incr i
+
+ set k [lindex [lindex $ret 0] 0]
+
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ set id [ id_of $datastr ]
+
+ error_check_good inner_get_loop:data $d $k
+ error_check_good inner_get_loop:id $id $i
+ }
+ error_check_good keyed_count $i $howmany
+
+ }
+ error_check_good cnt_curs_close [$cnt_dbc close] 0
+ error_check_good db_curs_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good cnt_file_close [$cntdb close] 0
+ error_check_good db_file_close [$db close] 0
+}
diff --git a/libdb/test/test031.tcl b/libdb/test/test031.tcl
new file mode 100644
index 0000000..cefe9c0
--- /dev/null
+++ b/libdb/test/test031.tcl
@@ -0,0 +1,230 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test031
+# TEST Duplicate sorting functionality
+# TEST Make sure DB_NODUPDATA works.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and "ndups" duplicates
+# TEST For the data field, prepend random five-char strings (see test032)
+# TEST that we force the duplicate sorting code to do something.
+# TEST Along the way, test that we cannot insert duplicate duplicates
+# TEST using DB_NODUPDATA.
+# TEST
+# TEST By setting ndups large, we can make this an off-page test
+# TEST After all are entered, retrieve all; verify output.
+# TEST Close file, reopen, do retrieve and re-verify.
+# TEST This does not work for recno
+proc test031 { method {nentries 10000} {ndups 5} {tnum 31} args } {
+ global alphabet
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set checkdb $testdir/checkdb.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ set checkdb checkdb.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ puts "Test0$tnum: \
+ $method ($args) $nentries small $ndups sorted dup key/data pairs"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $omethod"
+ return
+ }
+ set db [eval {berkdb_open -create \
+ -mode 0644} $args {$omethod -dup -dupsort $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set check_db [eval {berkdb_open \
+ -create -mode 0644} $args {-hash $checkdb}]
+ error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest0$tnum.a: Put/get loop, check nodupdata"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # Re-initialize random string generator
+ randstring_init $ndups
+
+ set dups ""
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref [randstring]
+ set dups $dups$pref
+ set datastr $pref:$str
+ if { $i == 2 } {
+ set nodupstr $datastr
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+
+ # Test DB_NODUPDATA using the DB handle
+ set ret [eval {$db put -nodupdata} \
+ $txn $pflags {$str [chop_data $method $nodupstr]}]
+ error_check_good db_nodupdata [is_substr $ret "DB_KEYEXIST"] 1
+
+ set ret [eval {$check_db put} \
+ $txn $pflags {$str [chop_data $method $dups]}]
+ error_check_good checkdb_put $ret 0
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ set lastdup ""
+ # Test DB_NODUPDATA using cursor handle
+ set ret [$dbc get -set $str]
+ error_check_bad dbc_get [llength $ret] 0
+ set datastr [lindex [lindex $ret 0] 1]
+ error_check_bad dbc_data [string length $datastr] 0
+ set ret [eval {$dbc put -nodupdata} \
+ {$str [chop_data $method $datastr]}]
+ error_check_good dbc_nodupdata [is_substr $ret "DB_KEYEXIST"] 1
+
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ if {[string length $datastr] == 0} {
+ break
+ }
+ if {[string compare \
+ $lastdup [pad_data $method $datastr]] > 0} {
+ error_check_good \
+ sorted_dups($lastdup,$datastr) 0 1
+ }
+ incr x
+ set lastdup $datastr
+ }
+ error_check_good "Test0$tnum:ndups:$str" $x $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: Checking file for correct duplicates"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open(2) [is_valid_cursor $dbc $db] TRUE
+
+ set lastkey "THIS WILL NEVER BE A KEY VALUE"
+ # no need to delete $lastkey
+ set firsttimethru 1
+ for {set ret [$dbc get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ if { [string compare $k $lastkey] != 0 } {
+ # Remove last key from the checkdb
+ if { $firsttimethru != 1 } {
+ error_check_good check_db:del:$lastkey \
+ [eval {$check_db del} $txn {$lastkey}] 0
+ }
+ set firsttimethru 0
+ set lastdup ""
+ set lastkey $k
+ set dups [lindex [lindex [eval {$check_db get} \
+ $txn {$k}] 0] 1]
+ error_check_good check_db:get:$k \
+ [string length $dups] [expr $ndups * 4]
+ }
+
+ if { [string compare $lastdup $d] > 0 } {
+ error_check_good dup_check:$k:$d 0 1
+ }
+ set lastdup $d
+
+ set pref [string range $d 0 3]
+ set ndx [string first $pref $dups]
+ error_check_good valid_duplicate [expr $ndx >= 0] 1
+ set a [string range $dups 0 [expr $ndx - 1]]
+ set b [string range $dups [expr $ndx + 4] end]
+ set dups $a$b
+ }
+ # Remove last key from the checkdb
+ if { [string length $lastkey] != 0 } {
+ error_check_good check_db:del:$lastkey \
+ [eval {$check_db del} $txn {$lastkey}] 0
+ }
+
+ # Make sure there is nothing left in check_db
+
+ set check_c [eval {$check_db cursor} $txn]
+ set ret [$check_c get -first]
+ error_check_good check_c:get:$ret [llength $ret] 0
+ error_check_good check_c:close [$check_c close] 0
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good check_db:close [$check_db close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/test032.tcl b/libdb/test/test032.tcl
new file mode 100644
index 0000000..4b3bce7
--- /dev/null
+++ b/libdb/test/test032.tcl
@@ -0,0 +1,231 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test032
+# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and "ndups" duplicates. For the data field, prepend the
+# TEST letters of the alphabet in a random order so we force the duplicate
+# TEST sorting code to do something. By setting ndups large, we can make
+# TEST this an off-page test.
+# TEST
+# TEST Test the DB_GET_BOTH functionality by retrieving each dup in the file
+# TEST explicitly. Test the DB_GET_BOTH_RANGE functionality by retrieving
+# TEST the unique key prefix (cursor only). Finally test the failure case.
+proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
+ global alphabet rand_init
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ berkdb srand $rand_init
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set checkdb $testdir/checkdb.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ set checkdb checkdb.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ puts "Test0$tnum:\
+ $method ($args) $nentries small sorted $ndups dup key/data pairs"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $omethod"
+ return
+ }
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup -dupsort} $args {$testfile} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set check_db [eval {berkdb_open \
+ -create -mode 0644} $args {-hash $checkdb}]
+ error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest0$tnum.a: Put/get loop"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # Re-initialize random string generator
+ randstring_init $ndups
+
+ set dups ""
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref [randstring]
+ set dups $dups$pref
+ set datastr $pref:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+ set ret [eval {$check_db put} \
+ $txn $pflags {$str [chop_data $method $dups]}]
+ error_check_good checkdb_put $ret 0
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ set lastdup ""
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ if {[string length $datastr] == 0} {
+ break
+ }
+ if {[string compare $lastdup $datastr] > 0} {
+ error_check_good \
+ sorted_dups($lastdup,$datastr) 0 1
+ }
+ incr x
+ set lastdup $datastr
+ }
+
+ error_check_good "Test0$tnum:ndups:$str" $x $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: Checking file for correct duplicates (no cursor)"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set check_c [eval {$check_db cursor} $txn]
+ error_check_good check_c_open(2) \
+ [is_valid_cursor $check_c $check_db] TRUE
+
+ for {set ndx 0} {$ndx < [expr 4 * $ndups]} {incr ndx 4} {
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set pref [string range $d $ndx [expr $ndx + 3]]
+ set data $pref:$k
+ set ret [eval {$db get} $txn {-get_both $k $data}]
+ error_check_good \
+ get_both_data:$k $ret [list [list $k $data]]
+ }
+ }
+
+ $db sync
+
+ # Now repeat the above test using cursor ops
+ puts "\tTest0$tnum.c: Checking file for correct duplicates (cursor)"
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+
+ for {set ndx 0} {$ndx < [expr 4 * $ndups]} {incr ndx 4} {
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set pref [string range $d $ndx [expr $ndx + 3]]
+ set data $pref:$k
+ set ret [eval {$dbc get} {-get_both $k $data}]
+ error_check_good \
+ curs_get_both_data:$k $ret [list [list $k $data]]
+
+ set ret [eval {$dbc get} {-get_both_range $k $pref}]
+ error_check_good \
+ curs_get_both_range:$k $ret [list [list $k $data]]
+ }
+ }
+
+ # Now check the error case
+ puts "\tTest0$tnum.d: Check error case (no cursor)"
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set data XXX$k
+ set ret [eval {$db get} $txn {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+ }
+
+ # Now check the error case
+ puts "\tTest0$tnum.e: Check error case (cursor)"
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set data XXX$k
+ set ret [eval {$dbc get} {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+ }
+
+ error_check_good check_c:close [$check_c close] 0
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good check_db:close [$check_db close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/test033.tcl b/libdb/test/test033.tcl
new file mode 100644
index 0000000..32831b8
--- /dev/null
+++ b/libdb/test/test033.tcl
@@ -0,0 +1,176 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test033
+# TEST DB_GET_BOTH without comparison function
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and data; add duplicate records for each. After all are
+# TEST entered, retrieve all and verify output using DB_GET_BOTH (on DB and
+# TEST DBC handles) and DB_GET_BOTH_RANGE (on a DBC handle) on existent and
+# TEST nonexistent keys.
+# TEST
+# TEST XXX
+# TEST This does not work for rbtree.
+proc test033 { method {nentries 10000} {ndups 5} {tnum 33} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test0$tnum: $method ($args) $nentries small $ndups dup key/data pairs"
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ # Duplicate data entries are not allowed in record based methods.
+ if { [is_record_based $method] == 1 } {
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod} $args {$testfile}]
+ } else {
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup} $args {$testfile}]
+ }
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ # Allocate a cursor for DB_GET_BOTH_RANGE.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest0$tnum.a: Put/get loop."
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set ret [eval {$db put} $txn $pflags \
+ {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ } else {
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good db_put $ret 0
+ }
+ }
+
+ # Now retrieve all the keys matching this key and dup
+ # for non-record based AMs.
+ if { [is_record_based $method] == 1 } {
+ test033_recno.check $db $dbc $method $str $txn $key
+ } else {
+ test033_check $db $dbc $method $str $txn $ndups
+ }
+ incr count
+ }
+
+ close $did
+
+ puts "\tTest0$tnum.b: Verifying DB_GET_BOTH after creation."
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # Now retrieve all the keys matching this key
+ # for non-record based AMs.
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ test033_recno.check $db $dbc $method $str $txn $key
+ } else {
+ test033_check $db $dbc $method $str $txn $ndups
+ }
+ incr count
+ }
+ close $did
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
+
+# No testing of dups is done on record-based methods.
+proc test033_recno.check {db dbc method str txn key} {
+ set ret [eval {$db get} $txn {-recno $key}]
+ error_check_good "db_get:$method" \
+ [lindex [lindex $ret 0] 1] [pad_data $method $str]
+ set ret [$dbc get -get_both $key [pad_data $method $str]]
+ error_check_good "db_get_both:$method" \
+ [lindex [lindex $ret 0] 1] [pad_data $method $str]
+}
+
+# Testing of non-record-based methods includes duplicates
+# and get_both_range.
+proc test033_check {db dbc method str txn ndups} {
+ for {set i 1} {$i <= $ndups } { incr i } {
+ set datastr $i:$str
+
+ set ret [eval {$db get} $txn {-get_both $str $datastr}]
+ error_check_good "db_get_both:dup#" \
+ [lindex [lindex $ret 0] 1] $datastr
+
+ set ret [$dbc get -get_both $str $datastr]
+ error_check_good "dbc_get_both:dup#" \
+ [lindex [lindex $ret 0] 1] $datastr
+
+ set ret [$dbc get -get_both_range $str $datastr]
+ error_check_good "dbc_get_both_range:dup#" \
+ [lindex [lindex $ret 0] 1] $datastr
+ }
+
+ # Now retrieve non-existent dup (i is ndups + 1)
+ set datastr $i:$str
+ set ret [eval {$db get} $txn {-get_both $str $datastr}]
+ error_check_good db_get_both:dupfailure [llength $ret] 0
+ set ret [$dbc get -get_both $str $datastr]
+ error_check_good dbc_get_both:dupfailure [llength $ret] 0
+ set ret [$dbc get -get_both_range $str $datastr]
+ error_check_good dbc_get_both_range [llength $ret] 0
+}
diff --git a/libdb/test/test034.tcl b/libdb/test/test034.tcl
new file mode 100644
index 0000000..b31d563
--- /dev/null
+++ b/libdb/test/test034.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1998-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test034
+# TEST test032 with off-page duplicates
+# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE functionality with off-page duplicates.
+proc test034 { method {nentries 10000} args} {
+ # Test with off-page duplicates
+ eval {test032 $method $nentries 20 34 -pagesize 512} $args
+
+ # Test with multiple pages of off-page duplicates
+ eval {test032 $method [expr $nentries / 10] 100 34 -pagesize 512} $args
+}
diff --git a/libdb/test/test035.tcl b/libdb/test/test035.tcl
new file mode 100644
index 0000000..5795b9a
--- /dev/null
+++ b/libdb/test/test035.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test035
+# TEST Test033 with off-page duplicates
+# TEST DB_GET_BOTH functionality with off-page duplicates.
+proc test035 { method {nentries 10000} args} {
+ # Test with off-page duplicates
+ eval {test033 $method $nentries 20 35 -pagesize 512} $args
+ # Test with multiple pages of off-page duplicates
+ eval {test033 $method [expr $nentries / 10] 100 35 -pagesize 512} $args
+}
diff --git a/libdb/test/test036.tcl b/libdb/test/test036.tcl
new file mode 100644
index 0000000..2ba6f88
--- /dev/null
+++ b/libdb/test/test036.tcl
@@ -0,0 +1,173 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test036
+# TEST Test KEYFIRST and KEYLAST when the key doesn't exist
+# TEST Put nentries key/data pairs (from the dictionary) using a cursor
+# TEST and KEYFIRST and KEYLAST (this tests the case where use use cursor
+# TEST put for non-existent keys).
+proc test036 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_record_based $method] == 1 } {
+ puts "Test036 skipping for method recno"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test036.db
+ set env NULL
+ } else {
+ set testfile test036.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test036: $method ($args) $nentries equal key/data pairs"
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test036_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test036.check
+ }
+ puts "\tTest036.a: put/get loop KEYFIRST"
+ # Here is the loop where we put and get each key/data pair
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) $str
+ } else {
+ set key $str
+ }
+ set ret [eval {$dbc put} $pflags {-keyfirst $key $str}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good get [lindex [lindex $ret 0] 1] $str
+ incr count
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ puts "\tTest036.a: put/get loop KEYLAST"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) $str
+ } else {
+ set key $str
+ }
+ set ret [eval {$dbc put} $txn $pflags {-keylast $key $str}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good get [lindex [lindex $ret 0] 1] $str
+ incr count
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest036.c: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+}
+
+# Check function for test036; keys and data are identical
+proc test036.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc test036_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/libdb/test/test037.tcl b/libdb/test/test037.tcl
new file mode 100644
index 0000000..35c7980
--- /dev/null
+++ b/libdb/test/test037.tcl
@@ -0,0 +1,196 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test037
+# TEST Test DB_RMW
+proc test037 { method {nentries 100} args } {
+ global encrypt
+
+ source ./include.tcl
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test037 skipping for env $env"
+ return
+ }
+
+ puts "Test037: RMW $method"
+
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ # Create the database
+ env_cleanup $testdir
+ set testfile test037.db
+
+ set local_env \
+ [eval {berkdb_env -create -mode 0644 -txn} $encargs -home $testdir]
+ error_check_good dbenv [is_valid_env $local_env] TRUE
+
+ set db [eval {berkdb_open \
+ -env $local_env -create -mode 0644 $omethod} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set count 0
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest037.a: Creating database"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good get \
+ [lindex [lindex $ret 0] 1] [pad_data $method $str]
+ incr count
+ }
+ close $did
+ error_check_good dbclose [$db close] 0
+ error_check_good envclode [$local_env close] 0
+
+ puts "\tTest037.b: Setting up environments"
+
+ # Open local environment
+ set env_cmd [concat berkdb_env -create -txn $encargs -home $testdir]
+ set local_env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $local_env] TRUE
+
+ # Open local transaction
+ set local_txn [$local_env txn]
+ error_check_good txn_open [is_valid_txn $local_txn $local_env] TRUE
+
+ # Open remote environment
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 $env_cmd]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ # Open remote transaction
+ set remote_txn [send_cmd $f1 "$remote_env txn"]
+ error_check_good \
+ remote:txn_open [is_valid_txn $remote_txn $remote_env] TRUE
+
+ # Now try put test without RMW. Gets on one site should not
+ # lock out gets on another.
+
+ # Open databases and dictionary
+ puts "\tTest037.c: Opening databases"
+ set did [open $dict]
+ set rkey 0
+
+ set db [berkdb_open -auto_commit -env $local_env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set rdb [send_cmd $f1 \
+ "berkdb_open -auto_commit -env $remote_env -mode 0644 $testfile"]
+ error_check_good remote:dbopen [is_valid_db $rdb] TRUE
+
+ puts "\tTest037.d: Testing without RMW"
+
+ # Now, get a key and try to "get" it from both DBs.
+ error_check_bad "gets on new open" [gets $did str] -1
+ incr rkey
+ if { [is_record_based $method] == 1 } {
+ set key $rkey
+ } else {
+ set key $str
+ }
+
+ set rec [eval {$db get -txn $local_txn} $gflags {$key}]
+ error_check_good local_get [lindex [lindex $rec 0] 1] \
+ [pad_data $method $str]
+
+ set r [send_timed_cmd $f1 0 "$rdb get -txn $remote_txn $gflags $key"]
+ error_check_good remote_send $r 0
+
+ # Now sleep before releasing local record lock
+ tclsleep 5
+ error_check_good local_commit [$local_txn commit] 0
+
+ # Now get the remote result
+ set remote_time [rcv_result $f1]
+ error_check_good no_rmw_get:remote_time [expr $remote_time <= 1] 1
+
+ # Commit the remote
+ set r [send_cmd $f1 "$remote_txn commit"]
+ error_check_good remote_commit $r 0
+
+ puts "\tTest037.e: Testing with RMW"
+
+ # Open local transaction
+ set local_txn [$local_env txn]
+ error_check_good \
+ txn_open [is_valid_txn $local_txn $local_env] TRUE
+
+ # Open remote transaction
+ set remote_txn [send_cmd $f1 "$remote_env txn"]
+ error_check_good remote:txn_open \
+ [is_valid_txn $remote_txn $remote_env] TRUE
+
+ # Now, get a key and try to "get" it from both DBs.
+ error_check_bad "gets on new open" [gets $did str] -1
+ incr rkey
+ if { [is_record_based $method] == 1 } {
+ set key $rkey
+ } else {
+ set key $str
+ }
+
+ set rec [eval {$db get -txn $local_txn -rmw} $gflags {$key}]
+ error_check_good \
+ local_get [lindex [lindex $rec 0] 1] [pad_data $method $str]
+
+ set r [send_timed_cmd $f1 0 "$rdb get -txn $remote_txn $gflags $key"]
+ error_check_good remote_send $r 0
+
+ # Now sleep before releasing local record lock
+ tclsleep 5
+ error_check_good local_commit [$local_txn commit] 0
+
+ # Now get the remote result
+ set remote_time [rcv_result $f1]
+ error_check_good rmw_get:remote_time [expr $remote_time > 4] 1
+
+ # Commit the remote
+ set r [send_cmd $f1 "$remote_txn commit"]
+ error_check_good remote_commit $r 0
+
+ # Close everything up: remote first
+ set r [send_cmd $f1 "$rdb close"]
+ error_check_good remote_db_close $r 0
+
+ set r [send_cmd $f1 "$remote_env close"]
+
+ # Close locally
+ error_check_good db_close [$db close] 0
+ $local_env close
+ close $did
+ close $f1
+}
diff --git a/libdb/test/test038.tcl b/libdb/test/test038.tcl
new file mode 100644
index 0000000..f75f430
--- /dev/null
+++ b/libdb/test/test038.tcl
@@ -0,0 +1,227 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test038
+# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE on deleted items
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and "ndups" duplicates. For the data field, prepend the
+# TEST letters of the alphabet in a random order so we force the duplicate
+# TEST sorting code to do something. By setting ndups large, we can make
+# TEST this an off-page test
+# TEST
+# TEST Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
+# TEST each dup in the file explicitly. Then remove each duplicate and try
+# TEST the retrieval again.
+proc test038 { method {nentries 10000} {ndups 5} {tnum 38} args } {
+ global alphabet
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set checkdb $testdir/checkdb.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ set checkdb checkdb.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ puts "Test0$tnum: \
+ $method ($args) $nentries small sorted dup key/data pairs"
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup -dupsort} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set check_db [eval {berkdb_open \
+ -create -mode 0644 -hash} $args {$checkdb}]
+ error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest0$tnum.a: Put/get loop"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set dups ""
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref \
+ [string index $alphabet [berkdb random_int 0 25]]
+ set pref $pref[string \
+ index $alphabet [berkdb random_int 0 25]]
+ while { [string first $pref $dups] != -1 } {
+ set pref [string toupper $pref]
+ if { [string first $pref $dups] != -1 } {
+ set pref [string index $alphabet \
+ [berkdb random_int 0 25]]
+ set pref $pref[string index $alphabet \
+ [berkdb random_int 0 25]]
+ }
+ }
+ if { [string length $dups] == 0 } {
+ set dups $pref
+ } else {
+ set dups "$dups $pref"
+ }
+ set datastr $pref:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+ set ret [eval {$check_db put} \
+ $txn $pflags {$str [chop_data $method $dups]}]
+ error_check_good checkdb_put $ret 0
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ set lastdup ""
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ if {[string length $datastr] == 0} {
+ break
+ }
+ if {[string compare $lastdup $datastr] > 0} {
+ error_check_good sorted_dups($lastdup,$datastr)\
+ 0 1
+ }
+ incr x
+ set lastdup $datastr
+ }
+ error_check_good "Test0$tnum:ndups:$str" $x $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $did
+
+ # Now check the duplicates, then delete then recheck
+ puts "\tTest0$tnum.b: Checking and Deleting duplicates"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ set check_c [eval {$check_db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $check_c $check_db] TRUE
+
+ for {set ndx 0} {$ndx < $ndups} {incr ndx} {
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set nn [expr $ndx * 3]
+ set pref [string range $d $nn [expr $nn + 1]]
+ set data $pref:$k
+ set ret [$dbc get -get_both $k $data]
+ error_check_good \
+ get_both_key:$k [lindex [lindex $ret 0] 0] $k
+ error_check_good \
+ get_both_data:$k [lindex [lindex $ret 0] 1] $data
+
+ set ret [$dbc get -get_both_range $k $pref]
+ error_check_good \
+ get_both_key:$k [lindex [lindex $ret 0] 0] $k
+ error_check_good \
+ get_both_data:$k [lindex [lindex $ret 0] 1] $data
+
+ set ret [$dbc del]
+ error_check_good del $ret 0
+
+ set ret [eval {$db get} $txn {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+
+ # We should either not find anything (if deleting the
+ # largest duplicate in the set) or a duplicate that
+ # sorts larger than the one we deleted.
+ set ret [$dbc get -get_both_range $k $pref]
+ if { [llength $ret] != 0 } {
+ set datastr [lindex [lindex $ret 0] 1]]
+ if {[string compare \
+ $pref [lindex [lindex $ret 0] 1]] >= 0} {
+ error_check_good \
+ error_case_range:sorted_dups($pref,$datastr) 0 1
+ }
+ }
+
+ if {$ndx != 0} {
+ set n [expr ($ndx - 1) * 3]
+ set pref [string range $d $n [expr $n + 1]]
+ set data $pref:$k
+ set ret \
+ [eval {$db get} $txn {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+ }
+ }
+ }
+
+ error_check_good check_c:close [$check_c close] 0
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good check_db:close [$check_db close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/test039.tcl b/libdb/test/test039.tcl
new file mode 100644
index 0000000..08d0e8e
--- /dev/null
+++ b/libdb/test/test039.tcl
@@ -0,0 +1,211 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test039
+# TEST DB_GET_BOTH/DB_GET_BOTH_RANGE on deleted items without comparison
+# TEST function.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and "ndups" duplicates. For the data field, prepend the
+# TEST letters of the alphabet in a random order so we force the duplicate
+# TEST sorting code to do something. By setting ndups large, we can make
+# TEST this an off-page test.
+# TEST
+# TEST Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
+# TEST each dup in the file explicitly. Then remove each duplicate and try
+# TEST the retrieval again.
+proc test039 { method {nentries 10000} {ndups 5} {tnum 39} args } {
+ global alphabet
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set checkdb $testdir/checkdb.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ set checkdb checkdb.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ puts "Test0$tnum: $method $nentries \
+ small $ndups unsorted dup key/data pairs"
+
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set check_db [eval \
+ {berkdb_open -create -mode 0644 -hash} $args {$checkdb}]
+ error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest0$tnum.a: Put/get loop"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set dups ""
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref \
+ [string index $alphabet [berkdb random_int 0 25]]
+ set pref $pref[string \
+ index $alphabet [berkdb random_int 0 25]]
+ while { [string first $pref $dups] != -1 } {
+ set pref [string toupper $pref]
+ if { [string first $pref $dups] != -1 } {
+ set pref [string index $alphabet \
+ [berkdb random_int 0 25]]
+ set pref $pref[string index $alphabet \
+ [berkdb random_int 0 25]]
+ }
+ }
+ if { [string length $dups] == 0 } {
+ set dups $pref
+ } else {
+ set dups "$dups $pref"
+ }
+ set datastr $pref:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+ set ret [eval {$check_db put} \
+ $txn $pflags {$str [chop_data $method $dups]}]
+ error_check_good checkdb_put $ret 0
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ set lastdup ""
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ if {[string length $datastr] == 0} {
+ break
+ }
+ set xx [expr $x * 3]
+ set check_data \
+ [string range $dups $xx [expr $xx + 1]]:$k
+ error_check_good retrieve $datastr $check_data
+ incr x
+ }
+ error_check_good "Test0$tnum:ndups:$str" $x $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $did
+
+ # Now check the duplicates, then delete then recheck
+ puts "\tTest0$tnum.b: Checking and Deleting duplicates"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ set check_c [eval {$check_db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $check_c $check_db] TRUE
+
+ for {set ndx 0} {$ndx < $ndups} {incr ndx} {
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set nn [expr $ndx * 3]
+ set pref [string range $d $nn [expr $nn + 1]]
+ set data $pref:$k
+ set ret [$dbc get -get_both $k $data]
+ error_check_good \
+ get_both_key:$k [lindex [lindex $ret 0] 0] $k
+ error_check_good \
+ get_both_data:$k [lindex [lindex $ret 0] 1] $data
+
+ set ret [$dbc del]
+ error_check_good del $ret 0
+
+ set ret [$dbc get -get_both $k $data]
+ error_check_good get_both:$k [llength $ret] 0
+
+ set ret [$dbc get -get_both_range $k $data]
+ error_check_good get_both_range:$k [llength $ret] 0
+
+ if {$ndx != 0} {
+ set n [expr ($ndx - 1) * 3]
+ set pref [string range $d $n [expr $n + 1]]
+ set data $pref:$k
+ set ret [$dbc get -get_both $k $data]
+ error_check_good error_case:$k [llength $ret] 0
+ }
+ }
+ }
+
+ error_check_good check_c:close [$check_c close] 0
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good check_db:close [$check_db close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/test040.tcl b/libdb/test/test040.tcl
new file mode 100644
index 0000000..1432df8
--- /dev/null
+++ b/libdb/test/test040.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1998-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test040
+# TEST Test038 with off-page duplicates
+# TEST DB_GET_BOTH functionality with off-page duplicates.
+proc test040 { method {nentries 10000} args} {
+ # Test with off-page duplicates
+ eval {test038 $method $nentries 20 40 -pagesize 512} $args
+
+ # Test with multiple pages of off-page duplicates
+ eval {test038 $method [expr $nentries / 10] 100 40 -pagesize 512} $args
+}
diff --git a/libdb/test/test041.tcl b/libdb/test/test041.tcl
new file mode 100644
index 0000000..a3bad13
--- /dev/null
+++ b/libdb/test/test041.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test041
+# TEST Test039 with off-page duplicates
+# TEST DB_GET_BOTH functionality with off-page duplicates.
+proc test041 { method {nentries 10000} args} {
+ # Test with off-page duplicates
+ eval {test039 $method $nentries 20 41 -pagesize 512} $args
+
+ # Test with multiple pages of off-page duplicates
+ eval {test039 $method [expr $nentries / 10] 100 41 -pagesize 512} $args
+}
diff --git a/libdb/test/test042.tcl b/libdb/test/test042.tcl
new file mode 100644
index 0000000..cb38b98
--- /dev/null
+++ b/libdb/test/test042.tcl
@@ -0,0 +1,181 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test042
+# TEST Concurrent Data Store test (CDB)
+# TEST
+# TEST Multiprocess DB test; verify that locking is working for the
+# TEST concurrent access method product.
+# TEST
+# TEST Use the first "nentries" words from the dictionary. Insert each with
+# TEST self as key and a fixed, medium length data string. Then fire off
+# TEST multiple processes that bang on the database. Each one should try to
+# TEST read and write random keys. When they rewrite, they'll append their
+# TEST pid to the data string (sometimes doing a rewrite sometimes doing a
+# TEST partial put). Some will use cursors to traverse through a few keys
+# TEST before finding one to write.
+
+proc test042 { method {nentries 1000} args } {
+ global encrypt
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test042 skipping for env $env"
+ return
+ }
+
+ set args [convert_args $method $args]
+ if { $encrypt != 0 } {
+ puts "Test042 skipping for security"
+ return
+ }
+ test042_body $method $nentries 0 $args
+ test042_body $method $nentries 1 $args
+}
+
+proc test042_body { method nentries alldb args } {
+ source ./include.tcl
+
+ if { $alldb } {
+ set eflag "-cdb -cdb_alldb"
+ } else {
+ set eflag "-cdb"
+ }
+ puts "Test042: CDB Test ($eflag) $method $nentries"
+
+ # Set initial parameters
+ set do_exit 0
+ set iter 10000
+ set procs 5
+
+ # Process arguments
+ set oargs ""
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -dir { incr i; set testdir [lindex $args $i] }
+ -iter { incr i; set iter [lindex $args $i] }
+ -procs { incr i; set procs [lindex $args $i] }
+ -exit { set do_exit 1 }
+ default { append oargs " " [lindex $args $i] }
+ }
+ }
+
+ # Create the database and open the dictionary
+ set testfile test042.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -create} $eflag -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ # Env is created, now set up database
+ test042_dbinit $env $nentries $method $oargs $testfile 0
+ if { $alldb } {
+ for { set i 1 } {$i < $procs} {incr i} {
+ test042_dbinit $env $nentries $method $oargs \
+ $testfile $i
+ }
+ }
+
+ # Remove old mpools and Open/create the lock and mpool regions
+ error_check_good env:close:$env [$env close] 0
+ set ret [berkdb envremove -home $testdir]
+ error_check_good env_remove $ret 0
+
+ set env [eval {berkdb_env -create} $eflag -home $testdir]
+ error_check_good dbenv [is_valid_widget $env env] TRUE
+
+ if { $do_exit == 1 } {
+ return
+ }
+
+ # Now spawn off processes
+ berkdb debug_check
+ puts "\tTest042.b: forking off $procs children"
+ set pidlist {}
+
+ for { set i 0 } {$i < $procs} {incr i} {
+ if { $alldb } {
+ set tf $testfile$i
+ } else {
+ set tf ${testfile}0
+ }
+ puts "exec $tclsh_path $test_path/wrap.tcl \
+ mdbscript.tcl $testdir/test042.$i.log \
+ $method $testdir $tf $nentries $iter $i $procs &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ mdbscript.tcl $testdir/test042.$i.log $method \
+ $testdir $tf $nentries $iter $i $procs &]
+ lappend pidlist $p
+ }
+ puts "Test042: $procs independent processes now running"
+ watch_procs $pidlist
+
+ # Check for test failure
+ set e [eval findfail [glob $testdir/test042.*.log]]
+ error_check_good "FAIL: error message(s) in log files" $e 0
+
+ # Test is done, blow away lock and mpool region
+ reset_env $env
+}
+
+# If we are renumbering, then each time we delete an item, the number of
+# items in the file is temporarily decreased, so the highest record numbers
+# do not exist. To make sure this doesn't happen, we never generate the
+# highest few record numbers as keys.
+#
+# For record-based methods, record numbers begin at 1, while for other keys,
+# we begin at 0 to index into an array.
+proc rand_key { method nkeys renum procs} {
+ if { $renum == 1 } {
+ return [berkdb random_int 1 [expr $nkeys - $procs]]
+ } elseif { [is_record_based $method] == 1 } {
+ return [berkdb random_int 1 $nkeys]
+ } else {
+ return [berkdb random_int 0 [expr $nkeys - 1]]
+ }
+}
+
+proc test042_dbinit { env nentries method oargs tf ext } {
+ global datastr
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+ set db [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $oargs {$tf$ext}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put each key/data pair
+ puts "\tTest042.a: put loop $tf$ext"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put:$db $ret 0
+ incr count
+ }
+ close $did
+ error_check_good close:$db [$db close] 0
+}
diff --git a/libdb/test/test043.tcl b/libdb/test/test043.tcl
new file mode 100644
index 0000000..fa8eb33
--- /dev/null
+++ b/libdb/test/test043.tcl
@@ -0,0 +1,192 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test043
+# TEST Recno renumbering and implicit creation test
+# TEST Test the Record number implicit creation and renumbering options.
+proc test043 { method {nentries 10000} args} {
+ source ./include.tcl
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test043: $method ($args)"
+
+ if { [is_record_based $method] != 1 } {
+ puts "Test043 skipping for method $method"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test043.db
+ set env NULL
+ } else {
+ set testfile test043.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ # Create the database
+ set db [eval {berkdb_open -create -mode 0644} $args \
+ {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags " -recno"
+ set txn ""
+
+ # First test implicit creation and retrieval
+ set count 1
+ set interval 5
+ if { $nentries < $interval } {
+ set nentries [expr $interval + 1]
+ }
+ puts "\tTest043.a: insert keys at $interval record intervals"
+ while { $count <= $nentries } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$count [chop_data $method $count]}]
+ error_check_good "$db put $count" $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ set last $count
+ incr count $interval
+ }
+
+ puts "\tTest043.b: get keys using DB_FIRST/DB_NEXT"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good "$db cursor" [is_valid_cursor $dbc $db] TRUE
+
+ set check 1
+ for { set rec [$dbc get -first] } { [llength $rec] != 0 } {
+ set rec [$dbc get -next] } {
+ set k [lindex [lindex $rec 0] 0]
+ set d [pad_data $method [lindex [lindex $rec 0] 1]]
+ error_check_good "$dbc get key==data" [pad_data $method $k] $d
+ error_check_good "$dbc get sequential" $k $check
+ if { $k > $nentries } {
+ error_check_good "$dbc get key too large" $k $nentries
+ }
+ incr check $interval
+ }
+
+ # Now make sure that we get DB_KEYEMPTY for non-existent keys
+ puts "\tTest043.c: Retrieve non-existent keys"
+ global errorInfo
+
+ set check 1
+ for { set rec [$dbc get -first] } { [llength $rec] != 0 } {
+ set rec [$dbc get -next] } {
+ set k [lindex [lindex $rec 0] 0]
+
+ set ret [eval {$db get} $txn $gflags {[expr $k + 1]}]
+ error_check_good "$db \
+ get [expr $k + 1]" $ret [list]
+
+ incr check $interval
+ # Make sure we don't do a retrieve past the end of file
+ if { $check >= $last } {
+ break
+ }
+ }
+
+ # Now try deleting and make sure the right thing happens.
+ puts "\tTest043.d: Delete tests"
+ set rec [$dbc get -first]
+ error_check_bad "$dbc get -first" [llength $rec] 0
+ error_check_good "$dbc get -first key" [lindex [lindex $rec 0] 0] 1
+ error_check_good "$dbc get -first data" \
+ [lindex [lindex $rec 0] 1] [pad_data $method 1]
+
+ # Delete the first item
+ error_check_good "$dbc del" [$dbc del] 0
+
+ # Retrieving 1 should always fail
+ set ret [eval {$db get} $txn $gflags {1}]
+ error_check_good "$db get 1" $ret [list]
+
+ # Now, retrieving other keys should work; keys will vary depending
+ # upon renumbering.
+ if { $do_renumber == 1 } {
+ set count [expr 0 + $interval]
+ set max [expr $nentries - 1]
+ } else {
+ set count [expr 1 + $interval]
+ set max $nentries
+ }
+
+ while { $count <= $max } {
+ set rec [eval {$db get} $txn $gflags {$count}]
+ if { $do_renumber == 1 } {
+ set data [expr $count + 1]
+ } else {
+ set data $count
+ }
+ error_check_good "$db get $count" \
+ [pad_data $method $data] [lindex [lindex $rec 0] 1]
+ incr count $interval
+ }
+ set max [expr $count - $interval]
+
+ puts "\tTest043.e: Verify LAST/PREV functionality"
+ set count $max
+ for { set rec [$dbc get -last] } { [llength $rec] != 0 } {
+ set rec [$dbc get -prev] } {
+ set k [lindex [lindex $rec 0] 0]
+ set d [lindex [lindex $rec 0] 1]
+ if { $do_renumber == 1 } {
+ set data [expr $k + 1]
+ } else {
+ set data $k
+ }
+ error_check_good \
+ "$dbc get key==data" [pad_data $method $data] $d
+ error_check_good "$dbc get sequential" $k $count
+ if { $k > $nentries } {
+ error_check_good "$dbc get key too large" $k $nentries
+ }
+ set count [expr $count - $interval]
+ if { $count < 1 } {
+ break
+ }
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/test044.tcl b/libdb/test/test044.tcl
new file mode 100644
index 0000000..94c79e4
--- /dev/null
+++ b/libdb/test/test044.tcl
@@ -0,0 +1,250 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test044
+# TEST Small system integration tests
+# TEST Test proper functioning of the checkpoint daemon,
+# TEST recovery, transactions, etc.
+# TEST
+# TEST System integration DB test: verify that locking, recovery, checkpoint,
+# TEST and all the other utilities basically work.
+# TEST
+# TEST The test consists of $nprocs processes operating on $nfiles files. A
+# TEST transaction consists of adding the same key/data pair to some random
+# TEST number of these files. We generate a bimodal distribution in key size
+# TEST with 70% of the keys being small (1-10 characters) and the remaining
+# TEST 30% of the keys being large (uniform distribution about mean $key_avg).
+# TEST If we generate a key, we first check to make sure that the key is not
+# TEST already in the dataset. If it is, we do a lookup.
+#
+# XXX
+# This test uses grow-only files currently!
+proc test044 { method {nprocs 5} {nfiles 10} {cont 0} args } {
+ source ./include.tcl
+ global encrypt
+ global rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ berkdb srand $rand_init
+
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test044 skipping for env $env"
+ return
+ }
+ if { $encrypt != 0 } {
+ puts "Test044 skipping for security"
+ return
+ }
+
+ puts "Test044: system integration test db $method $nprocs processes \
+ on $nfiles files"
+
+ # Parse options
+ set otherargs ""
+ set key_avg 10
+ set data_avg 20
+ set do_exit 0
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -key_avg { incr i; set key_avg [lindex $args $i] }
+ -data_avg { incr i; set data_avg [lindex $args $i] }
+ -testdir { incr i; set testdir [lindex $args $i] }
+ -x.* { set do_exit 1 }
+ default {
+ lappend otherargs [lindex $args $i]
+ }
+ }
+ }
+
+ if { $cont == 0 } {
+ # Create the database and open the dictionary
+ env_cleanup $testdir
+
+ # Create an environment
+ puts "\tTest044.a: creating environment and $nfiles files"
+ set dbenv [berkdb_env -create -txn -home $testdir]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ # Create a bunch of files
+ set m $method
+
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ if { $method == "all" } {
+ switch [berkdb random_int 1 2] {
+ 1 { set m -btree }
+ 2 { set m -hash }
+ }
+ } else {
+ set m $omethod
+ }
+
+ set db [eval {berkdb_open -env $dbenv -create \
+ -mode 0644 $m} $otherargs {test044.$i.db}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+ }
+ }
+
+ # Close the environment
+ $dbenv close
+
+ if { $do_exit == 1 } {
+ return
+ }
+
+ # Database is created, now fork off the kids.
+ puts "\tTest044.b: forking off $nprocs processes and utilities"
+ set cycle 1
+ set ncycles 3
+ while { $cycle <= $ncycles } {
+ set dbenv [berkdb_env -create -txn -home $testdir]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ # Fire off deadlock detector and checkpointer
+ puts "Beginning cycle $cycle"
+ set ddpid [exec $util_path/db_deadlock -h $testdir -t 5 &]
+ set cppid [exec $util_path/db_checkpoint -h $testdir -p 2 &]
+ puts "Deadlock detector: $ddpid Checkpoint daemon $cppid"
+
+ set pidlist {}
+ for { set i 0 } {$i < $nprocs} {incr i} {
+ set p [exec $tclsh_path \
+ $test_path/sysscript.tcl $testdir \
+ $nfiles $key_avg $data_avg $omethod \
+ >& $testdir/test044.$i.log &]
+ lappend pidlist $p
+ }
+ set sleep [berkdb random_int 300 600]
+ puts \
+"[timestamp] $nprocs processes running $pidlist for $sleep seconds"
+ tclsleep $sleep
+
+ # Now simulate a crash
+ puts "[timestamp] Crashing"
+
+ #
+ # The environment must remain open until this point to get
+ # proper sharing (using the paging file) on Win/9X. [#2342]
+ #
+ error_check_good env_close [$dbenv close] 0
+
+ tclkill $ddpid
+ tclkill $cppid
+
+ foreach p $pidlist {
+ tclkill $p
+ }
+
+ # Check for test failure
+ set e [eval findfail [glob $testdir/test044.*.log]]
+ error_check_good "FAIL: error message(s) in log files" $e 0
+
+ # Now run recovery
+ test044_verify $testdir $nfiles
+ incr cycle
+ }
+}
+
+proc test044_usage { } {
+ puts -nonewline "test044 method nentries [-d directory] [-i iterations]"
+ puts " [-p procs] -x"
+}
+
+proc test044_verify { dir nfiles } {
+ source ./include.tcl
+
+ # Save everything away in case something breaks
+# for { set f 0 } { $f < $nfiles } {incr f} {
+# file copy -force $dir/test044.$f.db $dir/test044.$f.save1
+# }
+# foreach f [glob $dir/log.*] {
+# if { [is_substr $f save] == 0 } {
+# file copy -force $f $f.save1
+# }
+# }
+
+ # Run recovery and then read through all the database files to make
+ # sure that they all look good.
+
+ puts "\tTest044.verify: Running recovery and verifying file contents"
+ set stat [catch {exec $util_path/db_recover -h $dir} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+
+ # Save everything away in case something breaks
+# for { set f 0 } { $f < $nfiles } {incr f} {
+# file copy -force $dir/test044.$f.db $dir/test044.$f.save2
+# }
+# foreach f [glob $dir/log.*] {
+# if { [is_substr $f save] == 0 } {
+# file copy -force $f $f.save2
+# }
+# }
+
+ for { set f 0 } { $f < $nfiles } { incr f } {
+ set db($f) [berkdb_open $dir/test044.$f.db]
+ error_check_good $f:dbopen [is_valid_db $db($f)] TRUE
+
+ set cursors($f) [$db($f) cursor]
+ error_check_bad $f:cursor_open $cursors($f) NULL
+ error_check_good \
+ $f:cursor_open [is_substr $cursors($f) $db($f)] 1
+ }
+
+ for { set f 0 } { $f < $nfiles } { incr f } {
+ for {set d [$cursors($f) get -first] } \
+ { [string length $d] != 0 } \
+ { set d [$cursors($f) get -next] } {
+
+ set k [lindex [lindex $d 0] 0]
+ set d [lindex [lindex $d 0] 1]
+
+ set flist [zero_list $nfiles]
+ set r $d
+ while { [set ndx [string first : $r]] != -1 } {
+ set fnum [string range $r 0 [expr $ndx - 1]]
+ if { [lindex $flist $fnum] == 0 } {
+ set fl "-set"
+ } else {
+ set fl "-next"
+ }
+
+ if { $fl != "-set" || $fnum != $f } {
+ if { [string compare $fl "-set"] == 0} {
+ set full [$cursors($fnum) \
+ get -set $k]
+ } else {
+ set full [$cursors($fnum) \
+ get -next]
+ }
+ set key [lindex [lindex $full 0] 0]
+ set rec [lindex [lindex $full 0] 1]
+ error_check_good \
+ $f:dbget_$fnum:key $key $k
+ error_check_good \
+ $f:dbget_$fnum:data $rec $d
+ }
+
+ set flist [lreplace $flist $fnum $fnum 1]
+ incr ndx
+ set r [string range $r $ndx end]
+ }
+ }
+ }
+
+ for { set f 0 } { $f < $nfiles } { incr f } {
+ error_check_good $cursors($f) [$cursors($f) close] 0
+ error_check_good db_close:$f [$db($f) close] 0
+ }
+}
diff --git a/libdb/test/test045.tcl b/libdb/test/test045.tcl
new file mode 100644
index 0000000..6022881
--- /dev/null
+++ b/libdb/test/test045.tcl
@@ -0,0 +1,123 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test045
+# TEST Small random tester
+# TEST Runs a number of random add/delete/retrieve operations.
+# TEST Tests both successful conditions and error conditions.
+# TEST
+# TEST Run the random db tester on the specified access method.
+#
+# Options are:
+# -adds <maximum number of keys before you disable adds>
+# -cursors <number of cursors>
+# -dataavg <average data size>
+# -delete <minimum number of keys before you disable deletes>
+# -dups <allow duplicates in file>
+# -errpct <Induce errors errpct of the time>
+# -init <initial number of entries in database>
+# -keyavg <average key size>
+proc test045 { method {nops 10000} args } {
+ source ./include.tcl
+ global encrypt
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test045 skipping for env $env"
+ return
+ }
+ set args [convert_args $method $args]
+ if { $encrypt != 0 } {
+ puts "Test045 skipping for security"
+ return
+ }
+ set omethod [convert_method $method]
+
+ puts "Test045: Random tester on $method for $nops operations"
+
+ # Set initial parameters
+ set adds [expr $nops * 10]
+ set cursors 5
+ set dataavg 40
+ set delete $nops
+ set dups 0
+ set errpct 0
+ set init 0
+ if { [is_record_based $method] == 1 } {
+ set keyavg 10
+ } else {
+ set keyavg 25
+ }
+
+ # Process arguments
+ set oargs ""
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -adds { incr i; set adds [lindex $args $i] }
+ -cursors { incr i; set cursors [lindex $args $i] }
+ -dataavg { incr i; set dataavg [lindex $args $i] }
+ -delete { incr i; set delete [lindex $args $i] }
+ -dups { incr i; set dups [lindex $args $i] }
+ -errpct { incr i; set errpct [lindex $args $i] }
+ -init { incr i; set init [lindex $args $i] }
+ -keyavg { incr i; set keyavg [lindex $args $i] }
+ -extent { incr i;
+ lappend oargs "-extent" "100" }
+ default { lappend oargs [lindex $args $i] }
+ }
+ }
+
+ # Create the database and and initialize it.
+ set root $testdir/test045
+ set f $root.db
+ env_cleanup $testdir
+
+ # Run the script with 3 times the number of initial elements to
+ # set it up.
+ set db [eval {berkdb_open \
+ -create -mode 0644 $omethod} $oargs {$f}]
+ error_check_good dbopen:$f [is_valid_db $db] TRUE
+
+ set r [$db close]
+ error_check_good dbclose:$f $r 0
+
+ # We redirect standard out, but leave standard error here so we
+ # can see errors.
+
+ puts "\tTest045.a: Initializing database"
+ if { $init != 0 } {
+ set n [expr 3 * $init]
+ exec $tclsh_path \
+ $test_path/dbscript.tcl $method $f $n \
+ 1 $init $n $keyavg $dataavg $dups 0 -1 \
+ > $testdir/test045.init
+ }
+ # Check for test failure
+ set e [findfail $testdir/test045.init]
+ error_check_good "FAIL: error message(s) in init file" $e 0
+
+ puts "\tTest045.b: Now firing off berkdb rand dbscript, running: "
+ # Now the database is initialized, run a test
+ puts "$tclsh_path\
+ $test_path/dbscript.tcl $method $f $nops $cursors $delete $adds \
+ $keyavg $dataavg $dups $errpct > $testdir/test045.log"
+
+ exec $tclsh_path \
+ $test_path/dbscript.tcl $method $f \
+ $nops $cursors $delete $adds $keyavg \
+ $dataavg $dups $errpct \
+ > $testdir/test045.log
+
+ # Check for test failure
+ set e [findfail $testdir/test045.log]
+ error_check_good "FAIL: error message(s) in log file" $e 0
+
+}
diff --git a/libdb/test/test046.tcl b/libdb/test/test046.tcl
new file mode 100644
index 0000000..9f2e328
--- /dev/null
+++ b/libdb/test/test046.tcl
@@ -0,0 +1,813 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test046
+# TEST Overwrite test of small/big key/data with cursor checks.
+proc test046 { method args } {
+ global alphabet
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "\tTest046: Overwrite test with cursor and small/big key/data."
+ puts "\tTest046:\t$method $args"
+
+ if { [is_rrecno $method] == 1} {
+ puts "\tTest046: skipping for method $method."
+ return
+ }
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+
+ if { [is_record_based $method] == 1} {
+ set key ""
+ }
+
+ puts "\tTest046: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test046.db
+ set env NULL
+ } else {
+ set testfile test046.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile.a]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # keep nkeys even
+ set nkeys 20
+
+ # Fill page w/ small key/data pairs
+ puts "\tTest046: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { [is_record_based $method] == 1} {
+ set ret [eval {$db put} $txn {$i $data$i}]
+ } elseif { $i < 10 } {
+ set ret [eval {$db put} $txn [set key]00$i \
+ [set data]00$i]
+ } elseif { $i < 100 } {
+ set ret [eval {$db put} $txn [set key]0$i \
+ [set data]0$i]
+ } else {
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ }
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # open curs to db
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ # get db order of keys
+ for {set i 1; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ puts "\tTest046.a: Deletes by key."
+ puts "\t\tTest046.a.1: Get data with SET, then delete before cursor."
+ # get key in middle of page, call this the nth set curr to it
+ set i [expr $nkeys/2]
+ set ret [$dbc get -set $key_set($i)]
+ error_check_bad dbc_get:set [llength $ret] 0
+ set curr $ret
+
+ # delete before cursor(n-1), make sure it is gone
+ set i [expr $i - 1]
+ error_check_good db_del [eval {$db del} $txn {$key_set($i)}] 0
+
+ # use set_range to get first key starting at n-1, should
+ # give us nth--but only works for btree
+ if { [is_btree $method] == 1 } {
+ set ret [$dbc get -set_range $key_set($i)]
+ } else {
+ if { [is_record_based $method] == 1 } {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good \
+ dbc_get:deleted(recno) [llength [lindex $ret 1]] 0
+ #error_check_good \
+ # catch:get [catch {$dbc get -set $key_set($i)} ret] 1
+ #error_check_good \
+ # dbc_get:deleted(recno) [is_substr $ret "KEYEMPTY"] 1
+ } else {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good dbc_get:deleted [llength $ret] 0
+ }
+ set ret [$dbc get -set $key_set([incr i])]
+ incr i -1
+ }
+ error_check_bad dbc_get:set(R)(post-delete) [llength $ret] 0
+ error_check_good dbc_get(match):set $ret $curr
+
+ puts "\t\tTest046.a.2: Delete cursor item by key."
+ # nth key, which cursor should be on now
+ set i [incr i]
+ set ret [eval {$db del} $txn {$key_set($i)}]
+ error_check_good db_del $ret 0
+
+ # this should return n+1 key/data, curr has nth key/data
+ if { [string compare $omethod "-btree"] == 0 } {
+ set ret [$dbc get -set_range $key_set($i)]
+ } else {
+ if { [is_record_based $method] == 1 } {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good \
+ dbc_get:deleted(recno) [llength [lindex $ret 1]] 0
+ #error_check_good \
+ # catch:get [catch {$dbc get -set $key_set($i)} ret] 1
+ #error_check_good \
+ # dbc_get:deleted(recno) [is_substr $ret "KEYEMPTY"] 1
+ } else {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good dbc_get:deleted [llength $ret] 0
+ }
+ set ret [$dbc get -set $key_set([expr $i+1])]
+ }
+ error_check_bad dbc_get(post-delete):set_range [llength $ret] 0
+ error_check_bad dbc_get(no-match):set_range $ret $curr
+
+ puts "\t\tTest046.a.3: Delete item after cursor."
+ # we'll delete n+2, since we have deleted n-1 and n
+ # i still equal to nth, cursor on n+1
+ set i [incr i]
+ set ret [$dbc get -set $key_set($i)]
+ error_check_bad dbc_get:set [llength $ret] 0
+ set curr [$dbc get -next]
+ error_check_bad dbc_get:next [llength $curr] 0
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $curr] 0
+ # delete *after* cursor pos.
+ error_check_good db:del [eval {$db del} $txn {$key_set([incr i])}] 0
+
+ # make sure item is gone, try to get it
+ if { [string compare $omethod "-btree"] == 0} {
+ set ret [$dbc get -set_range $key_set($i)]
+ } else {
+ if { [is_record_based $method] == 1 } {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good \
+ dbc_get:deleted(recno) [llength [lindex $ret 1]] 0
+ #error_check_good \
+ # catch:get [catch {$dbc get -set $key_set($i)} ret] 1
+ #error_check_good \
+ # dbc_get:deleted(recno) [is_substr $ret "KEYEMPTY"] 1
+ } else {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good dbc_get:deleted [llength $ret] 0
+ }
+ set ret [$dbc get -set $key_set([expr $i +1])]
+ }
+ error_check_bad dbc_get:set(_range) [llength $ret] 0
+ error_check_bad dbc_get:set(_range) $ret $curr
+ error_check_good dbc_get:set [lindex [lindex $ret 0] 0] \
+ $key_set([expr $i+1])
+
+ puts "\tTest046.b: Deletes by cursor."
+ puts "\t\tTest046.b.1: Delete, do DB_NEXT."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ set i [expr $i+2]
+ # i = n+4
+ error_check_good dbc_get:next(match) \
+ [lindex [lindex $ret 0] 0] $key_set($i)
+
+ puts "\t\tTest046.b.2: Delete, do DB_PREV."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+ set i [expr $i-3]
+ # i = n+1 (deleted all in between)
+ error_check_good dbc_get:prev(match) \
+ [lindex [lindex $ret 0] 0] $key_set($i)
+
+ puts "\t\tTest046.b.3: Delete, do DB_CURRENT."
+ error_check_good dbc:del [$dbc del] 0
+ # we just deleted, so current item should be KEYEMPTY, throws err
+ set ret [$dbc get -current]
+ error_check_good dbc_get:curr:deleted [llength [lindex $ret 1]] 0
+ #error_check_good catch:get:current [catch {$dbc get -current} ret] 1
+ #error_check_good dbc_get:curr:deleted [is_substr $ret "DB_KEYEMPTY"] 1
+
+ puts "\tTest046.c: Inserts (before/after), by key then cursor."
+ puts "\t\tTest046.c.1: Insert by key before the cursor."
+ # i is at curs pos, i=n+1, we want to go BEFORE
+ set i [incr i -1]
+ set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}]
+ error_check_good db_put:before $ret 0
+
+ puts "\t\tTest046.c.2: Insert by key after the cursor."
+ set i [incr i +2]
+ set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}]
+ error_check_good db_put:after $ret 0
+
+ puts "\t\tTest046.c.3: Insert by curs with deleted curs (should fail)."
+ # cursor is on n+1, we'll change i to match
+ set i [incr i -1]
+
+ error_check_good dbc:close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db:close [$db close] 0
+ if { [is_record_based $method] == 1} {
+ puts "\t\tSkipping the rest of test for method $method."
+ puts "\tTest046 ($method) complete."
+ return
+ } else {
+ # Reopen without printing __db_errs.
+ set db [eval {berkdb_open_noerr} $oflags $testfile.a]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+
+ # should fail with EINVAL (deleted cursor)
+ set errorCode NONE
+ error_check_good catch:put:before 1 \
+ [catch {$dbc put -before $data_set($i)} ret]
+ error_check_good dbc_put:deleted:before \
+ [is_substr $errorCode "EINVAL"] 1
+
+ # should fail with EINVAL
+ set errorCode NONE
+ error_check_good catch:put:after 1 \
+ [catch {$dbc put -after $data_set($i)} ret]
+ error_check_good dbc_put:deleted:after \
+ [is_substr $errorCode "EINVAL"] 1
+
+ puts "\t\tTest046.c.4:\
+ Insert by cursor before/after existent cursor."
+ # can't use before after w/o dup except renumber in recno
+ # first, restore an item so they don't fail
+ #set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}]
+ #error_check_good db_put $ret 0
+
+ #set ret [$dbc get -set $key_set($i)]
+ #error_check_bad dbc_get:set [llength $ret] 0
+ #set i [incr i -2]
+ # i = n - 1
+ #set ret [$dbc get -prev]
+ #set ret [$dbc put -before $key_set($i) $data_set($i)]
+ #error_check_good dbc_put:before $ret 0
+ # cursor pos is adjusted to match prev, recently inserted
+ #incr i
+ # i = n
+ #set ret [$dbc put -after $key_set($i) $data_set($i)]
+ #error_check_good dbc_put:after $ret 0
+ }
+
+ # For the next part of the test, we need a db with no dups to test
+ # overwrites
+ puts "\tTest046.d.0: Cleanup, close db, open new db with no dups."
+ error_check_good dbc:close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db:close [$db close] 0
+
+ set db [eval {berkdb_open} $oflags $testfile.d]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ # Fill page w/ small key/data pairs
+ puts "\tTest046.d.0: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set nkeys 20
+
+ # Prepare cursor on item
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+
+ # Prepare unique big/small values for an initial
+ # and an overwrite set of key/data
+ foreach ptype {init over} {
+ foreach size {big small} {
+ if { [string compare $size big] == 0 } {
+ set key_$ptype$size \
+ KEY_$size[repeat alphabet 250]
+ set data_$ptype$size \
+ DATA_$size[repeat alphabet 250]
+ } else {
+ set key_$ptype$size \
+ KEY_$size[repeat alphabet 10]
+ set data_$ptype$size \
+ DATA_$size[repeat alphabet 10]
+ }
+ }
+ }
+
+ set i 0
+ # Do all overwrites for key and cursor
+ foreach type {key_over curs_over} {
+ # Overwrite (i=initial) four different kinds of pairs
+ incr i
+ puts "\tTest046.d: Overwrites $type."
+ foreach i_pair {\
+ {small small} {big small} {small big} {big big} } {
+ # Overwrite (w=write) with four different kinds of data
+ foreach w_pair {\
+ {small small} {big small} {small big} {big big} } {
+
+ # we can only overwrite if key size matches
+ if { [string compare [lindex \
+ $i_pair 0] [lindex $w_pair 0]] != 0} {
+ continue
+ }
+
+ # first write the initial key/data
+ set ret [$dbc put -keyfirst \
+ key_init[lindex $i_pair 0] \
+ data_init[lindex $i_pair 1]]
+ error_check_good \
+ dbc_put:curr:init:$i_pair $ret 0
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good dbc_get:curr:data \
+ [lindex [lindex $ret 0] 1] \
+ data_init[lindex $i_pair 1]
+
+ # Now, try to overwrite: dups not supported in
+ # this db
+ if { [string compare $type key_over] == 0 } {
+ puts "\t\tTest046.d.$i: Key\
+ Overwrite:($i_pair) by ($w_pair)."
+ set ret [eval {$db put} $txn \
+ $"key_init[lindex $i_pair 0]" \
+ $"data_over[lindex $w_pair 1]"]
+ error_check_good \
+ dbput:over:i($i_pair):o($w_pair) $ret 0
+ # check value
+ set ret [eval {$db get} $txn \
+ $"key_init[lindex $i_pair 0]"]
+ error_check_bad \
+ db:get:check [llength $ret] 0
+ error_check_good db:get:compare_data \
+ [lindex [lindex $ret 0] 1] \
+ $"data_over[lindex $w_pair 1]"
+ } else {
+ # This is a cursor overwrite
+ puts \
+ "\t\tTest046.d.$i:Curs Overwrite:($i_pair) by ($w_pair)."
+ set ret [$dbc put -current \
+ $"data_over[lindex $w_pair 1]"]
+ error_check_good \
+ dbcput:over:i($i_pair):o($w_pair) $ret 0
+ # check value
+ set ret [$dbc get -current]
+ error_check_bad \
+ dbc_get:curr [llength $ret] 0
+ error_check_good dbc_get:curr:data \
+ [lindex [lindex $ret 0] 1] \
+ $"data_over[lindex $w_pair 1]"
+ }
+ } ;# foreach write pair
+ } ;# foreach initial pair
+ } ;# foreach type big/small
+
+ puts "\tTest046.d.3: Cleanup for next part of test."
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ if { [is_rbtree $method] == 1} {
+ puts "\tSkipping the rest of Test046 for method $method."
+ puts "\tTest046 complete."
+ return
+ }
+
+ puts "\tTest046.e.1: Open db with sorted dups."
+ set db [eval {berkdb_open_noerr} $oflags -dup -dupsort $testfile.e]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # keep nkeys even
+ set nkeys 20
+ set ndups 20
+
+ # Fill page w/ small key/data pairs
+ puts "\tTest046.e.2:\
+ Put $nkeys small key/data pairs and $ndups sorted dups."
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { $i < 10 } {
+ set ret [eval {$db put} $txn [set key]0$i [set data]0$i]
+ } else {
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ }
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # open curs to db
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ # get db order of keys
+ for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ # put 20 sorted duplicates on key in middle of page
+ set i [expr $nkeys/2]
+ set ret [$dbc get -set $key_set($i)]
+ error_check_bad dbc_get:set [llength $ret] 0
+
+ set keym $key_set($i)
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ if { $i < 10 } {
+ set ret [eval {$db put} $txn {$keym DUPLICATE_0$i}]
+ } else {
+ set ret [eval {$db put} $txn {$keym DUPLICATE_$i}]
+ }
+ error_check_good db_put:DUP($i) $ret 0
+ }
+
+ puts "\tTest046.e.3: Check duplicate duplicates"
+ set ret [eval {$db put} $txn {$keym DUPLICATE_00}]
+ error_check_good dbput:dupdup [is_substr $ret "DB_KEYEXIST"] 1
+
+ # get dup ordering
+ for {set i 0; set ret [$dbc get -set $keym]} { [llength $ret] != 0} {\
+ set ret [$dbc get -nextdup] } {
+ set dup_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ # put cursor on item in middle of dups
+ set i [expr $ndups/2]
+ set ret [$dbc get -get_both $keym $dup_set($i)]
+ error_check_bad dbc_get:get_both [llength $ret] 0
+
+ puts "\tTest046.f: Deletes by cursor."
+ puts "\t\tTest046.f.1: Delete by cursor, do a DB_NEXT, check cursor."
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:current [llength $ret] 0
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ error_check_good \
+ dbc_get:nextdup [lindex [lindex $ret 0] 1] $dup_set([incr i])
+
+ puts "\t\tTest046.f.2: Delete by cursor, do DB_PREV, check cursor."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+ set i [incr i -2]
+ error_check_good dbc_get:prev [lindex [lindex $ret 0] 1] $dup_set($i)
+
+ puts "\t\tTest046.f.3: Delete by cursor, do DB_CURRENT, check cursor."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current:deleted [llength [lindex $ret 1]] 0
+ #error_check_good catch:dbc_get:curr [catch {$dbc get -current} ret] 1
+ #error_check_good \
+ # dbc_get:current:deleted [is_substr $ret "DB_KEYEMPTY"] 1
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # restore deleted keys
+ error_check_good db_put:1 [eval {$db put} $txn {$keym $dup_set($i)}] 0
+ error_check_good db_put:2 [eval {$db put} $txn \
+ {$keym $dup_set([incr i])}] 0
+ error_check_good db_put:3 [eval {$db put} $txn \
+ {$keym $dup_set([incr i])}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # tested above
+
+ # Reopen database without __db_err, reset cursor
+ error_check_good dbclose [$db close] 0
+ set db [eval {berkdb_open_noerr} $oflags -dup -dupsort $testfile.e]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set ret [$dbc get -set $keym]
+ error_check_bad dbc_get:set [llength $ret] 0
+ set ret2 [$dbc get -current]
+ error_check_bad dbc_get:current [llength $ret2] 0
+ # match
+ error_check_good dbc_get:current/set(match) $ret $ret2
+ # right one?
+ error_check_good \
+ dbc_get:curr/set(matchdup) [lindex [lindex $ret 0] 1] $dup_set(0)
+
+ # cursor is on first dup
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ # now on second dup
+ error_check_good dbc_get:next [lindex [lindex $ret 0] 1] $dup_set(1)
+ # check cursor
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good \
+ dbcget:curr(compare) [lindex [lindex $ret 0] 1] $dup_set(1)
+
+ puts "\tTest046.g: Inserts."
+ puts "\t\tTest046.g.1: Insert by key before cursor."
+ set i 0
+
+ # use "spam" to prevent a duplicate duplicate.
+ set ret [eval {$db put} $txn {$keym $dup_set($i)spam}]
+ error_check_good db_put:before $ret 0
+ # make sure cursor was maintained
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good \
+ dbc_get:current(post-put) [lindex [lindex $ret 0] 1] $dup_set(1)
+
+ puts "\t\tTest046.g.2: Insert by key after cursor."
+ set i [expr $i + 2]
+ # use "eggs" to prevent a duplicate duplicate
+ set ret [eval {$db put} $txn {$keym $dup_set($i)eggs}]
+ error_check_good db_put:after $ret 0
+ # make sure cursor was maintained
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good \
+ dbc_get:curr(post-put,after) [lindex [lindex $ret 0] 1] $dup_set(1)
+
+ puts "\t\tTest046.g.3: Insert by curs before/after curs (should fail)."
+ # should return EINVAL (dupsort specified)
+ error_check_good dbc_put:before:catch \
+ [catch {$dbc put -before $dup_set([expr $i -1])} ret] 1
+ error_check_good \
+ dbc_put:before:deleted [is_substr $errorCode "EINVAL"] 1
+ error_check_good dbc_put:after:catch \
+ [catch {$dbc put -after $dup_set([expr $i +2])} ret] 1
+ error_check_good \
+ dbc_put:after:deleted [is_substr $errorCode "EINVAL"] 1
+
+ puts "\tTest046.h: Cursor overwrites."
+ puts "\t\tTest046.h.1: Test that dupsort disallows current overwrite."
+ set ret [$dbc get -set $keym]
+ error_check_bad dbc_get:set [llength $ret] 0
+ error_check_good \
+ catch:dbc_put:curr [catch {$dbc put -current DATA_OVERWRITE} ret] 1
+ error_check_good dbc_put:curr:dupsort [is_substr $errorCode EINVAL] 1
+
+ puts "\t\tTest046.h.2: New db (no dupsort)."
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} \
+ $oflags -dup $testfile.h]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ for {set i 0} {$i < $nkeys} {incr i} {
+ if { $i < 10 } {
+ set ret [eval {$db put} $txn {key0$i datum0$i}]
+ error_check_good db_put $ret 0
+ } else {
+ set ret [eval {$db put} $txn {key$i datum$i}]
+ error_check_good db_put $ret 0
+ }
+ if { $i == 0 } {
+ for {set j 0} {$j < $ndups} {incr j} {
+ if { $i < 10 } {
+ set keyput key0$i
+ } else {
+ set keyput key$i
+ }
+ if { $j < 10 } {
+ set ret [eval {$db put} $txn \
+ {$keyput DUP_datum0$j}]
+ } else {
+ set ret [eval {$db put} $txn \
+ {$keyput DUP_datum$j}]
+ }
+ error_check_good dbput:dup $ret 0
+ }
+ }
+ }
+
+ for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ for {set i 0; set ret [$dbc get -set key00]} {\
+ [llength $ret] != 0} {set ret [$dbc get -nextdup]} {
+ set dup_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+ set i 0
+ set keym key0$i
+ set ret [$dbc get -set $keym]
+ error_check_bad dbc_get:set [llength $ret] 0
+ error_check_good \
+ dbc_get:set(match) [lindex [lindex $ret 0] 1] $dup_set($i)
+
+ set ret [$dbc get -nextdup]
+ error_check_bad dbc_get:nextdup [llength $ret] 0
+ error_check_good dbc_get:nextdup(match) \
+ [lindex [lindex $ret 0] 1] $dup_set([expr $i + 1])
+
+ puts "\t\tTest046.h.3: Insert by cursor before cursor (DB_BEFORE)."
+ set ret [$dbc put -before BEFOREPUT]
+ error_check_good dbc_put:before $ret 0
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good \
+ dbc_get:curr:match [lindex [lindex $ret 0] 1] BEFOREPUT
+ # make sure that this is actually a dup w/ dup before
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+ error_check_good dbc_get:prev:match \
+ [lindex [lindex $ret 0] 1] $dup_set($i)
+ set ret [$dbc get -prev]
+ # should not be a dup
+ error_check_bad dbc_get:prev(no_dup) \
+ [lindex [lindex $ret 0] 0] $keym
+
+ puts "\t\tTest046.h.4: Insert by cursor after cursor (DB_AFTER)."
+ set ret [$dbc get -set $keym]
+
+ # delete next 3 when fix
+ #puts "[$dbc get -current]\
+ # [$dbc get -next] [$dbc get -next] [$dbc get -next] [$dbc get -next]"
+ #set ret [$dbc get -set $keym]
+
+ error_check_bad dbc_get:set [llength $ret] 0
+ set ret [$dbc put -after AFTERPUT]
+ error_check_good dbc_put:after $ret 0
+ #puts [$dbc get -current]
+
+ # delete next 3 when fix
+ #set ret [$dbc get -set $keym]
+ #puts "[$dbc get -current] next: [$dbc get -next] [$dbc get -next]"
+ #set ret [$dbc get -set AFTERPUT]
+ #set ret [$dbc get -set $keym]
+ #set ret [$dbc get -next]
+ #puts $ret
+
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good dbc_get:curr:match [lindex [lindex $ret 0] 1] AFTERPUT
+ set ret [$dbc get -prev]
+ # now should be on first item (non-dup) of keym
+ error_check_bad dbc_get:prev1 [llength $ret] 0
+ error_check_good \
+ dbc_get:match [lindex [lindex $ret 0] 1] $dup_set($i)
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ error_check_good \
+ dbc_get:match2 [lindex [lindex $ret 0] 1] AFTERPUT
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ # this is the dup we added previously
+ error_check_good \
+ dbc_get:match3 [lindex [lindex $ret 0] 1] BEFOREPUT
+
+ # now get rid of the dups we added
+ error_check_good dbc_del [$dbc del] 0
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev2 [llength $ret] 0
+ error_check_good dbc_del2 [$dbc del] 0
+ # put cursor on first dup item for the rest of test
+ set ret [$dbc get -set $keym]
+ error_check_bad dbc_get:first [llength $ret] 0
+ error_check_good \
+ dbc_get:first:check [lindex [lindex $ret 0] 1] $dup_set($i)
+
+ puts "\t\tTest046.h.5: Overwrite small by small."
+ set ret [$dbc put -current DATA_OVERWRITE]
+ error_check_good dbc_put:current:overwrite $ret 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current(put,small/small) \
+ [lindex [lindex $ret 0] 1] DATA_OVERWRITE
+
+ puts "\t\tTest046.h.6: Overwrite small with big."
+ set ret [$dbc put -current DATA_BIG_OVERWRITE[repeat $alphabet 200]]
+ error_check_good dbc_put:current:overwrite:big $ret 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current(put,small/big) \
+ [is_substr [lindex [lindex $ret 0] 1] DATA_BIG_OVERWRITE] 1
+
+ puts "\t\tTest046.h.7: Overwrite big with big."
+ set ret [$dbc put -current DATA_BIG_OVERWRITE2[repeat $alphabet 200]]
+ error_check_good dbc_put:current:overwrite(2):big $ret 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current(put,big/big) \
+ [is_substr [lindex [lindex $ret 0] 1] DATA_BIG_OVERWRITE2] 1
+
+ puts "\t\tTest046.h.8: Overwrite big with small."
+ set ret [$dbc put -current DATA_OVERWRITE2]
+ error_check_good dbc_put:current:overwrite:small $ret 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current(put,big/small) \
+ [is_substr [lindex [lindex $ret 0] 1] DATA_OVERWRITE2] 1
+
+ puts "\tTest046.i: Cleaning up from test."
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest046 complete."
+}
diff --git a/libdb/test/test047.tcl b/libdb/test/test047.tcl
new file mode 100644
index 0000000..d91168d
--- /dev/null
+++ b/libdb/test/test047.tcl
@@ -0,0 +1,258 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test047
+# TEST DBcursor->c_get get test with SET_RANGE option.
+proc test047 { method args } {
+ source ./include.tcl
+
+ set tstn 047
+ set args [convert_args $method $args]
+
+ if { [is_btree $method] != 1 } {
+ puts "Test$tstn skipping for method $method"
+ return
+ }
+
+ set method "-btree"
+
+ puts "\tTest$tstn: Test of SET_RANGE interface to DB->c_get ($method)."
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+
+ puts "\tTest$tstn.a: Create $method database."
+ set eindex [lsearch -exact $args "-env"]
+ set txnenv 0
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tstn.db
+ set testfile1 $testdir/test0$tstn.a.db
+ set testfile2 $testdir/test0$tstn.b.db
+ set env NULL
+ } else {
+ set testfile test0$tstn.db
+ set testfile1 test0$tstn.a.db
+ set testfile2 test0$tstn.b.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 -dup $args $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 20
+ # Fill page w/ small key/data pairs
+ #
+ puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # open curs to db
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest$tstn.c: Get data with SET_RANGE, then delete by cursor."
+ set i 0
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+ set curr $ret
+
+ # delete by cursor, make sure it is gone
+ error_check_good dbc_del [$dbc del] 0
+
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get(post-delete):set_range [llength $ret] 0
+ error_check_bad dbc_get(no-match):set_range $ret $curr
+
+ puts "\tTest$tstn.d: \
+ Use another cursor to fix item on page, delete by db."
+ set dbcurs2 [eval {$db cursor} $txn]
+ error_check_good db:cursor2 [is_valid_cursor $dbcurs2 $db] TRUE
+
+ set ret [$dbcurs2 get -set [lindex [lindex $ret 0] 0]]
+ error_check_bad dbc_get(2):set [llength $ret] 0
+ set curr $ret
+ error_check_good db:del [eval {$db del} $txn \
+ {[lindex [lindex $ret 0] 0]}] 0
+
+ # make sure item is gone
+ set ret [$dbcurs2 get -set_range [lindex [lindex $curr 0] 0]]
+ error_check_bad dbc2_get:set_range [llength $ret] 0
+ error_check_bad dbc2_get:set_range $ret $curr
+
+ puts "\tTest$tstn.e: Close for second part of test, close db/cursors."
+ error_check_good dbc:close [$dbc close] 0
+ error_check_good dbc2:close [$dbcurs2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good dbclose [$db close] 0
+
+ # open db
+ set db [eval {berkdb_open} $oflags $testfile1]
+ error_check_good dbopen2 [is_valid_db $db] TRUE
+
+ set nkeys 10
+ puts "\tTest$tstn.f: Fill page with $nkeys pairs, one set of dups."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ # a pair
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ error_check_good dbput($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set j 0
+ for {set i 0} { $i < $nkeys } {incr i} {
+ # a dup set for same 1 key
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i DUP_$data$i}]
+ error_check_good dbput($i):dup $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ puts "\tTest$tstn.g: \
+ Get dups key w/ SET_RANGE, pin onpage with another cursor."
+ set i 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+
+ set dbc2 [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc2 $db] TRUE
+ set ret2 [$dbc2 get -set_range $key$i]
+ error_check_bad dbc2_get:set_range [llength $ret] 0
+
+ error_check_good dbc_compare $ret $ret2
+ puts "\tTest$tstn.h: \
+ Delete duplicates' key, use SET_RANGE to get next dup."
+ set ret [$dbc2 del]
+ error_check_good dbc2_del $ret 0
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+ error_check_bad dbc_get:set_range $ret $ret2
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good dbc2_close [$dbc2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $oflags $testfile2]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 10
+ set ndups 1000
+
+ puts "\tTest$tstn.i: Fill page with $nkeys pairs and $ndups dups."
+ for {set i 0} { $i < $nkeys } { incr i} {
+ # a pair
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ error_check_good dbput $ret 0
+
+ # dups for single pair
+ if { $i == 0} {
+ for {set j 0} { $j < $ndups } { incr j } {
+ set ret [eval {$db put} $txn \
+ {$key$i DUP_$data$i:$j}]
+ error_check_good dbput:dup $ret 0
+ }
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ set i 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set dbc2 [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc2 $db] TRUE
+ puts "\tTest$tstn.j: \
+ Get key of first dup with SET_RANGE, fix with 2 curs."
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+
+ set ret2 [$dbc2 get -set_range $key$i]
+ error_check_bad dbc2_get:set_range [llength $ret] 0
+ set curr $ret2
+
+ error_check_good dbc_compare $ret $ret2
+
+ puts "\tTest$tstn.k: Delete item by cursor, use SET_RANGE to verify."
+ set ret [$dbc2 del]
+ error_check_good dbc2_del $ret 0
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+ error_check_bad dbc_get:set_range $ret $curr
+
+ puts "\tTest$tstn.l: Cleanup."
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good dbc2_close [$dbc2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/libdb/test/test048.tcl b/libdb/test/test048.tcl
new file mode 100644
index 0000000..4222a57
--- /dev/null
+++ b/libdb/test/test048.tcl
@@ -0,0 +1,170 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test048
+# TEST Cursor stability across Btree splits.
+proc test048 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set tstn 048
+ set args [convert_args $method $args]
+
+ if { [is_btree $method] != 1 } {
+ puts "Test$tstn skipping for method $method."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ incr pgindex
+ if { [lindex $args $pgindex] > 8192 } {
+ puts "Test048: Skipping for large pagesizes"
+ return
+ }
+ }
+
+ set method "-btree"
+
+ puts "\tTest$tstn: Test of cursor stability across btree splits."
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+
+ puts "\tTest$tstn.a: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tstn.db
+ set env NULL
+ } else {
+ set testfile test0$tstn.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 $args $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 5
+ # Fill page w/ small key/data pairs, keep at leaf
+ #
+ puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {key000$i $data$i}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # get db ordering, set cursors
+ puts "\tTest$tstn.c: Set cursors on each of $nkeys pairs."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for {set i 0; set ret [$db get key000$i]} {\
+ $i < $nkeys && [llength $ret] != 0} {\
+ incr i; set ret [$db get key000$i]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ set dbc [eval {$db cursor} $txn]
+ set dbc_set($i) $dbc
+ error_check_good db_cursor:$i \
+ [is_valid_cursor $dbc_set($i) $db] TRUE
+ set ret [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc_set($i)_get:set [llength $ret] 0
+ }
+
+ # if mkeys is above 1000, need to adjust below for lexical order
+ set mkeys 1000
+ puts "\tTest$tstn.d: Add $mkeys pairs to force split."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ set ret [eval {$db put} $txn {key0$i $data$i}]
+ } elseif { $i >= 10 } {
+ set ret [eval {$db put} $txn {key00$i $data$i}]
+ } else {
+ set ret [eval {$db put} $txn {key000$i $data$i}]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tstn.e: Make sure split happened."
+ # XXX We cannot call stat with active txns or we deadlock.
+ if { $txnenv != 1 } {
+ error_check_bad stat:check-split [is_substr [$db stat] \
+ "{{Internal pages} 0}"] 1
+ }
+
+ puts "\tTest$tstn.f: Check to see that cursors maintained reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.g: Delete added keys to force reverse split."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ error_check_good db_del:$i \
+ [eval {$db del} $txn {key0$i}] 0
+ } elseif { $i >= 10 } {
+ error_check_good db_del:$i \
+ [eval {$db del} $txn {key00$i}] 0
+ } else {
+ error_check_good db_del:$i \
+ [eval {$db del} $txn {key000$i}] 0
+ }
+ }
+
+ puts "\tTest$tstn.h: Verify cursor reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.i: Cleanup."
+ # close cursors
+ for {set i 0} { $i < $nkeys } {incr i} {
+ error_check_good dbc_close:$i [$dbc_set($i) close] 0
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ puts "\tTest$tstn.j: Verify reverse split."
+ error_check_good stat:check-reverse_split [is_substr [$db stat] \
+ "{{Internal pages} 0}"] 1
+
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/libdb/test/test049.tcl b/libdb/test/test049.tcl
new file mode 100644
index 0000000..63b2bf0
--- /dev/null
+++ b/libdb/test/test049.tcl
@@ -0,0 +1,184 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test049
+# TEST Cursor operations on uninitialized cursors.
+proc test049 { method args } {
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set tstn 049
+ set renum [is_rrecno $method]
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "\tTest$tstn: Test of cursor routines with uninitialized cursors."
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+ set rflags ""
+
+ if { [is_record_based $method] == 1 } {
+ set key ""
+ }
+
+ puts "\tTest$tstn.a: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tstn.db
+ set env NULL
+ } else {
+ set testfile test0$tstn.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 $rflags $omethod $args"
+ if { [is_record_based $method] == 0 && [is_rbtree $method] != 1 } {
+ append oflags " -dup"
+ }
+ set db [eval {berkdb_open_noerr} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 10
+ puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ error_check_good dbput:$i $ret 0
+ if { $i == 1 } {
+ for {set j 0} { $j < [expr $nkeys / 2]} {incr j} {
+ set ret [eval {$db put} $txn \
+ {$key$i DUPLICATE$j}]
+ error_check_good dbput:dup:$j $ret 0
+ }
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # DBC GET
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc_u [eval {$db cursor} $txn]
+ error_check_good db:cursor [is_valid_cursor $dbc_u $db] TRUE
+
+ puts "\tTest$tstn.c: Test dbc->get interfaces..."
+ set i 0
+ foreach flag { current first last next prev nextdup} {
+ puts "\t\t...dbc->get($flag)"
+ catch {$dbc_u get -$flag} ret
+ error_check_good dbc:get:$flag [is_substr $errorCode EINVAL] 1
+ }
+
+ foreach flag { set set_range get_both} {
+ puts "\t\t...dbc->get($flag)"
+ if { [string compare $flag get_both] == 0} {
+ catch {$dbc_u get -$flag $key$i data0} ret
+ } else {
+ catch {$dbc_u get -$flag $key$i} ret
+ }
+ error_check_good dbc:get:$flag [is_substr $errorCode EINVAL] 1
+ }
+
+ puts "\t\t...dbc->get(current, partial)"
+ catch {$dbc_u get -current -partial {0 0}} ret
+ error_check_good dbc:get:partial [is_substr $errorCode EINVAL] 1
+
+ puts "\t\t...dbc->get(current, rmw)"
+ catch {$dbc_u get -rmw -current } ret
+ error_check_good dbc_get:rmw [is_substr $errorCode EINVAL] 1
+
+ puts "\tTest$tstn.d: Test dbc->put interface..."
+ # partial...depends on another
+ foreach flag { after before current keyfirst keylast } {
+ puts "\t\t...dbc->put($flag)"
+ if { [string match key* $flag] == 1 } {
+ if { [is_record_based $method] == 1 } {
+ # keyfirst/keylast not allowed in recno
+ puts "\t\t...Skipping dbc->put($flag) for $method."
+ continue
+ } else {
+ # keyfirst/last should succeed
+ puts "\t\t...dbc->put($flag)...should succeed for $method"
+ error_check_good dbcput:$flag \
+ [$dbc_u put -$flag $key$i data0] 0
+
+ # now uninitialize cursor
+ error_check_good dbc_close [$dbc_u close] 0
+ set dbc_u [eval {$db cursor} $txn]
+ error_check_good \
+ db_cursor [is_substr $dbc_u $db] 1
+ }
+ } elseif { [string compare $flag before ] == 0 ||
+ [string compare $flag after ] == 0 } {
+ if { [is_record_based $method] == 0 &&
+ [is_rbtree $method] == 0} {
+ set ret [$dbc_u put -$flag data0]
+ error_check_good "$dbc_u:put:-$flag" $ret 0
+ } elseif { $renum == 1 } {
+ # Renumbering recno will return a record number
+ set currecno \
+ [lindex [lindex [$dbc_u get -current] 0] 0]
+ set ret [$dbc_u put -$flag data0]
+ if { [string compare $flag after] == 0 } {
+ error_check_good "$dbc_u put $flag" \
+ $ret [expr $currecno + 1]
+ } else {
+ error_check_good "$dbc_u put $flag" \
+ $ret $currecno
+ }
+ } else {
+ puts "\t\tSkipping $flag for $method"
+ }
+ } else {
+ set ret [$dbc_u put -$flag data0]
+ error_check_good "$dbc_u:put:-$flag" $ret 0
+ }
+ }
+ # and partial
+ puts "\t\t...dbc->put(partial)"
+ catch {$dbc_u put -partial {0 0} $key$i $data$i} ret
+ error_check_good dbc_put:partial [is_substr $errorCode EINVAL] 1
+
+ # XXX dbc->dup, db->join (dbc->get join_item)
+ # dbc del
+ puts "\tTest$tstn.e: Test dbc->del interface."
+ catch {$dbc_u del} ret
+ error_check_good dbc_del [is_substr $errorCode EINVAL] 1
+
+ error_check_good dbc_close [$dbc_u close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/libdb/test/test050.tcl b/libdb/test/test050.tcl
new file mode 100644
index 0000000..97f4235
--- /dev/null
+++ b/libdb/test/test050.tcl
@@ -0,0 +1,221 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test050
+# TEST Overwrite test of small/big key/data with cursor checks for Recno.
+proc test050 { method args } {
+ global alphabet
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set tstn 050
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_rrecno $method] != 1 } {
+ puts "Test$tstn skipping for method $method."
+ return
+ }
+
+ puts "\tTest$tstn:\
+ Overwrite test with cursor and small/big key/data ($method)."
+
+ set data "data"
+ set txn ""
+ set flags ""
+
+ puts "\tTest$tstn: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tstn.db
+ set env NULL
+ } else {
+ set testfile test0$tstn.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 $args $omethod"
+ set db [eval {berkdb_open_noerr} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # keep nkeys even
+ set nkeys 20
+
+ # Fill page w/ small key/data pairs
+ #
+ puts "\tTest$tstn: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i [chop_data $method $data$i]}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # open curs to db
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # get db order of keys
+ for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ # verify ordering: should be unnecessary, but hey, why take chances?
+ # key_set is zero indexed but keys start at 1
+ for {set i 0} { $i < $nkeys } {incr i} {
+ error_check_good \
+ verify_order:$i $key_set($i) [pad_data $method [expr $i+1]]
+ }
+
+ puts "\tTest$tstn.a: Inserts before/after by cursor."
+ puts "\t\tTest$tstn.a.1:\
+ Insert with uninitialized cursor (should fail)."
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ catch {$dbc put -before DATA1} ret
+ error_check_good dbc_put:before:uninit [is_substr $errorCode EINVAL] 1
+
+ catch {$dbc put -after DATA2} ret
+ error_check_good dbc_put:after:uninit [is_substr $errorCode EINVAL] 1
+
+ puts "\t\tTest$tstn.a.2: Insert with deleted cursor (should succeed)."
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+ error_check_good dbc_del [$dbc del] 0
+ set ret [$dbc put -current DATAOVER1]
+ error_check_good dbc_put:current:deleted $ret 0
+
+ puts "\t\tTest$tstn.a.3: Insert by cursor before cursor (DB_BEFORE)."
+ set currecno [lindex [lindex [$dbc get -current] 0] 0]
+ set ret [$dbc put -before DATAPUTBEFORE]
+ error_check_good dbc_put:before $ret $currecno
+ set old1 [$dbc get -next]
+ error_check_bad dbc_get:next [llength $old1] 0
+ error_check_good \
+ dbc_get:next(compare) [lindex [lindex $old1 0] 1] DATAOVER1
+
+ puts "\t\tTest$tstn.a.4: Insert by cursor after cursor (DB_AFTER)."
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+ error_check_good dbc_get:first [lindex [lindex $ret 0] 1] DATAPUTBEFORE
+ set currecno [lindex [lindex [$dbc get -current] 0] 0]
+ set ret [$dbc put -after DATAPUTAFTER]
+ error_check_good dbc_put:after $ret [expr $currecno + 1]
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+ error_check_good \
+ dbc_get:prev [lindex [lindex $ret 0] 1] DATAPUTBEFORE
+
+ puts "\t\tTest$tstn.a.5: Verify that all keys have been renumbered."
+ # should be $nkeys + 2 keys, starting at 1
+ for {set i 1; set ret [$dbc get -first]} { \
+ $i <= $nkeys && [llength $ret] != 0 } {\
+ incr i; set ret [$dbc get -next]} {
+ error_check_good check_renumber $i [lindex [lindex $ret 0] 0]
+ }
+
+ # tested above
+
+ puts "\tTest$tstn.b: Overwrite tests (cursor and key)."
+ # For the next part of the test, we need a db with no dups to test
+ # overwrites
+ #
+ # we should have ($nkeys + 2) keys, ordered:
+ # DATAPUTBEFORE, DATAPUTAFTER, DATAOVER1, data1, ..., data$nkeys
+ #
+ # Prepare cursor on item
+ #
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+
+ # Prepare unique big/small values for an initial
+ # and an overwrite set of data
+ set databig DATA_BIG_[repeat alphabet 250]
+ set datasmall DATA_SMALL
+
+ # Now, we want to overwrite data:
+ # by key and by cursor
+ # 1. small by small
+ # 2. small by big
+ # 3. big by small
+ # 4. big by big
+ #
+ set i 0
+ # Do all overwrites for key and cursor
+ foreach type { by_key by_cursor } {
+ incr i
+ puts "\tTest$tstn.b.$i: Overwrites $type."
+ foreach pair { {small small} \
+ {small big} {big small} {big big} } {
+ # put in initial type
+ set data $data[lindex $pair 0]
+ set ret [$dbc put -current $data]
+ error_check_good dbc_put:curr:init:($pair) $ret 0
+
+ # Now, try to overwrite: dups not supported in this db
+ if { [string compare $type by_key] == 0 } {
+ puts "\t\tTest$tstn.b.$i:\
+ Overwrite:($pair):$type"
+ set ret [eval {$db put} $txn \
+ 1 {OVER$pair$data[lindex $pair 1]}]
+ error_check_good dbput:over:($pair) $ret 0
+ } else {
+ # This is a cursor overwrite
+ puts "\t\tTest$tstn.b.$i:\
+ Overwrite:($pair) by cursor."
+ set ret [$dbc put \
+ -current OVER$pair$data[lindex $pair 1]]
+ error_check_good dbcput:over:($pair) $ret 0
+ }
+ } ;# foreach pair
+ } ;# foreach type key/cursor
+
+ puts "\tTest$tstn.c: Cleanup and close cursor."
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+}
diff --git a/libdb/test/test051.tcl b/libdb/test/test051.tcl
new file mode 100644
index 0000000..2383926
--- /dev/null
+++ b/libdb/test/test051.tcl
@@ -0,0 +1,219 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test051
+# TEST Fixed-length record Recno test.
+# TEST 0. Test various flags (legal and illegal) to open
+# TEST 1. Test partial puts where dlen != size (should fail)
+# TEST 2. Partial puts for existent record -- replaces at beg, mid, and
+# TEST end of record, as well as full replace
+proc test051 { method { args "" } } {
+ global fixed_len
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test051: Test of the fixed length records."
+ if { [is_fixed_length $method] != 1 } {
+ puts "Test051: skipping for method $method"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test051.db
+ set testfile1 $testdir/test051a.db
+ set env NULL
+ } else {
+ set testfile test051.db
+ set testfile1 test051a.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ set oflags "-create -mode 0644 $args"
+
+ # Test various flags (legal and illegal) to open
+ puts "\tTest051.a: Test correct flag behavior on open."
+ set errorCode NONE
+ foreach f { "-dup" "-dup -dupsort" "-recnum" } {
+ puts "\t\tTest051.a: Test flag $f"
+ set stat [catch {eval {berkdb_open_noerr} $oflags $f $omethod \
+ $testfile} ret]
+ error_check_good dbopen:flagtest:catch $stat 1
+ error_check_good \
+ dbopen:flagtest:$f [is_substr $errorCode EINVAL] 1
+ set errorCode NONE
+ }
+ set f "-renumber"
+ puts "\t\tTest051.a: Test $f"
+ if { [is_frecno $method] == 1 } {
+ set db [eval {berkdb_open} $oflags $f $omethod $testfile]
+ error_check_good dbopen:flagtest:$f [is_valid_db $db] TRUE
+ $db close
+ } else {
+ error_check_good \
+ dbopen:flagtest:catch [catch {eval {berkdb_open_noerr}\
+ $oflags $f $omethod $testfile} ret] 1
+ error_check_good \
+ dbopen:flagtest:$f [is_substr $errorCode EINVAL] 1
+ }
+
+ # Test partial puts where dlen != size (should fail)
+ # it is an error to specify a partial put w/ different
+ # dlen and size in fixed length recno/queue
+ set key 1
+ set data ""
+ set txn ""
+ set test_char "a"
+
+ set db [eval {berkdb_open_noerr} $oflags $omethod $testfile1]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ puts "\tTest051.b: Partial puts with dlen != size."
+ foreach dlen { 1 16 20 32 } {
+ foreach doff { 0 10 20 32 } {
+ # dlen < size
+ puts "\t\tTest051.e: dlen: $dlen, doff: $doff, \
+ size: [expr $dlen+1]"
+ set data [repeat $test_char [expr $dlen + 1]]
+ error_check_good catch:put 1 [catch {eval {$db put -partial \
+ [list $doff $dlen]} $txn {$key $data}} ret]
+ #
+ # We don't get back the server error string just
+ # the result.
+ #
+ if { $eindex == -1 } {
+ error_check_good "dbput:partial: dlen < size" \
+ [is_substr $errorInfo "Length improper"] 1
+ } else {
+ error_check_good "dbput:partial: dlen < size" \
+ [is_substr $errorCode "EINVAL"] 1
+ }
+
+ # dlen > size
+ puts "\t\tTest051.e: dlen: $dlen, doff: $doff, \
+ size: [expr $dlen-1]"
+ set data [repeat $test_char [expr $dlen - 1]]
+ error_check_good catch:put 1 [catch {eval {$db put -partial \
+ [list $doff $dlen]} $txn {$key $data}} ret]
+ if { $eindex == -1 } {
+ error_check_good "dbput:partial: dlen > size" \
+ [is_substr $errorInfo "Length improper"] 1
+ } else {
+ error_check_good "dbput:partial: dlen < size" \
+ [is_substr $errorCode "EINVAL"] 1
+ }
+ }
+ }
+
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ $db close
+
+ # Partial puts for existent record -- replaces at beg, mid, and
+ # end of record, as well as full replace
+ puts "\tTest051.f: Partial puts within existent record."
+ set db [eval {berkdb_open} $oflags $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\t\tTest051.f: First try a put and then a full replace."
+ set data [repeat "a" $fixed_len]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {1 $data}]
+ error_check_good dbput $ret 0
+ set ret [eval {$db get} $txn {-recno 1}]
+ error_check_good dbget $data [lindex [lindex $ret 0] 1]
+
+ set data [repeat "b" $fixed_len]
+ set ret [eval {$db put -partial [list 0 $fixed_len]} $txn {1 $data}]
+ error_check_good dbput $ret 0
+ set ret [eval {$db get} $txn {-recno 1}]
+ error_check_good dbget $data [lindex [lindex $ret 0] 1]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set data "InitialData"
+ set pdata "PUT"
+ set dlen [string length $pdata]
+ set ilen [string length $data]
+ set mid [expr $ilen/2]
+
+ # put initial data
+ set key 0
+
+ set offlist [list 0 $mid [expr $ilen -1] [expr $fixed_len - $dlen]]
+ puts "\t\tTest051.g: Now replace at different offsets ($offlist)."
+ foreach doff $offlist {
+ incr key
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good dbput:init $ret 0
+
+ puts "\t\t Test051.g: Replace at offset $doff."
+ set ret [eval {$db put -partial [list $doff $dlen]} $txn \
+ {$key $pdata}]
+ error_check_good dbput:partial $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if { $doff == 0} {
+ set beg ""
+ set end [string range $data $dlen $ilen]
+ } else {
+ set beg [string range $data 0 [expr $doff - 1]]
+ set end [string range $data [expr $doff + $dlen] $ilen]
+ }
+ if { $doff > $ilen } {
+ # have to put padding between record and inserted
+ # string
+ set newdata [format %s%s $beg $end]
+ set diff [expr $doff - $ilen]
+ set nlen [string length $newdata]
+ set newdata [binary \
+ format a[set nlen]x[set diff]a$dlen $newdata $pdata]
+ } else {
+ set newdata [make_fixed_length \
+ frecno [format %s%s%s $beg $pdata $end]]
+ }
+ set ret [$db get -recno $key]
+ error_check_good compare($newdata,$ret) \
+ [binary_compare [lindex [lindex $ret 0] 1] $newdata] 0
+ }
+
+ $db close
+}
diff --git a/libdb/test/test052.tcl b/libdb/test/test052.tcl
new file mode 100644
index 0000000..c82ecb4
--- /dev/null
+++ b/libdb/test/test052.tcl
@@ -0,0 +1,276 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test052
+# TEST Renumbering record Recno test.
+proc test052 { method args } {
+ global alphabet
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test052: Test of renumbering recno."
+ if { [is_rrecno $method] != 1} {
+ puts "Test052: skipping for method $method."
+ return
+ }
+
+ set data "data"
+ set txn ""
+ set flags ""
+
+ puts "\tTest052: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test052.db
+ set env NULL
+ } else {
+ set testfile test052.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # keep nkeys even
+ set nkeys 20
+
+ # Fill page w/ small key/data pairs
+ puts "\tTest052: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i $data$i}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # open curs to db
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # get db order of keys
+ for {set i 1; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set keys($i) [lindex [lindex $ret 0] 0]
+ set darray($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ puts "\tTest052: Deletes by key."
+ puts "\t Test052.a: Get data with SET, then delete before cursor."
+ # get key in middle of page, call this the nth set curr to it
+ set i [expr $nkeys/2]
+ set k $keys($i)
+ set ret [$dbc get -set $k]
+ error_check_bad dbc_get:set [llength $ret] 0
+ error_check_good dbc_get:set [lindex [lindex $ret 0] 1] $darray($i)
+
+ # delete by key before current
+ set i [incr i -1]
+ error_check_good db_del:before [eval {$db del} $txn {$keys($i)}] 0
+ # with renumber, current's data should be constant, but key==--key
+ set i [incr i +1]
+ error_check_good dbc:data \
+ [lindex [lindex [$dbc get -current] 0] 1] $darray($i)
+ error_check_good dbc:keys \
+ [lindex [lindex [$dbc get -current] 0] 0] $keys([expr $nkeys/2 - 1])
+
+ puts "\t Test052.b: Delete cursor item by key."
+ set i [expr $nkeys/2 ]
+
+ set ret [$dbc get -set $keys($i)]
+ error_check_bad dbc:get [llength $ret] 0
+ error_check_good dbc:get:curs [lindex [lindex $ret 0] 1] \
+ $darray([expr $i + 1])
+ error_check_good db_del:curr [eval {$db del} $txn {$keys($i)}] 0
+ set ret [$dbc get -current]
+
+ # After a delete, cursor should return DB_NOTFOUND.
+ error_check_good dbc:get:key [llength [lindex [lindex $ret 0] 0]] 0
+ error_check_good dbc:get:data [llength [lindex [lindex $ret 0] 1]] 0
+
+ # And the item after the cursor should now be
+ # key: $nkeys/2, data: $nkeys/2 + 2
+ set ret [$dbc get -next]
+ error_check_bad dbc:getnext [llength $ret] 0
+ error_check_good dbc:getnext:data \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 2])
+ error_check_good dbc:getnext:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+
+ puts "\t Test052.c: Delete item after cursor."
+ # should be { keys($nkeys/2), darray($nkeys/2 + 2) }
+ set i [expr $nkeys/2]
+ # deleting data for key after current (key $nkeys/2 + 1)
+ error_check_good db_del [eval {$db del} $txn {$keys([expr $i + 1])}] 0
+
+ # current should be constant
+ set ret [$dbc get -current]
+ error_check_bad dbc:get:current [llength $ret] 0
+ error_check_good dbc:get:keys [lindex [lindex $ret 0] 0] \
+ $keys($i)
+ error_check_good dbc:get:data [lindex [lindex $ret 0] 1] \
+ $darray([expr $i + 2])
+
+ puts "\tTest052: Deletes by cursor."
+ puts "\t Test052.d: Delete, do DB_NEXT."
+ set i 1
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+ error_check_good dbc_get:first [lindex [lindex $ret 0] 1] $darray($i)
+ error_check_good dbc_del [$dbc del] 0
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:current [llength $ret] 0
+ error_check_good dbc:getcurrent:key \
+ [llength [lindex [lindex $ret 0] 0]] 0
+ error_check_good dbc:getcurrent:data \
+ [llength [lindex [lindex $ret 0] 1]] 0
+
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ error_check_good dbc:get:curs \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 1])
+ error_check_good dbc:get:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+
+ # Move one more forward, so we're not on the first item.
+ error_check_bad dbc:getnext [llength [$dbc get -next]] 0
+
+ puts "\t Test052.e: Delete, do DB_PREV."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -current]
+ error_check_bad dbc:get:curr [llength $ret] 0
+ error_check_good dbc:getcurrent:key \
+ [llength [lindex [lindex $ret 0] 0]] 0
+ error_check_good dbc:getcurrent:data \
+ [llength [lindex [lindex $ret 0] 1]] 0
+
+ # next should now reference the record that was previously after
+ # old current
+ set ret [$dbc get -next]
+ error_check_bad get:next [llength $ret] 0
+ error_check_good dbc:get:next:data \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 3])
+ error_check_good dbc:get:next:keys \
+ [lindex [lindex $ret 0] 0] $keys([expr $i + 1])
+
+ set ret [$dbc get -prev]
+ error_check_bad dbc:get:curr [llength $ret] 0
+ error_check_good dbc:get:curr:compare \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 1])
+ error_check_good dbc:get:curr:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+
+ # The rest of the test was written with the old rrecno semantics,
+ # which required a separate c_del(CURRENT) test; to leave
+ # the database in the expected state, we now delete the first item.
+ set ret [$dbc get -first]
+ error_check_bad getfirst [llength $ret] 0
+ error_check_good delfirst [$dbc del] 0
+
+ puts "\tTest052: Inserts."
+ puts "\t Test052.g: Insert before (DB_BEFORE)."
+ set i 1
+ set ret [$dbc get -first]
+ error_check_bad dbc:get:first [llength $ret] 0
+ error_check_good dbc_get:first \
+ [lindex [lindex $ret 0] 0] $keys($i)
+ error_check_good dbc_get:first:data \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 3])
+
+ set ret [$dbc put -before $darray($i)]
+ # should return new key, which should be $keys($i)
+ error_check_good dbc_put:before $ret $keys($i)
+ # cursor should adjust to point to new item
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good dbc_put:before:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+ error_check_good dbc_put:before:data \
+ [lindex [lindex $ret 0] 1] $darray($i)
+
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ error_check_good dbc_get:next:compare \
+ $ret [list [list $keys([expr $i + 1]) $darray([expr $i + 3])]]
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+
+ puts "\t Test052.h: Insert by cursor after (DB_AFTER)."
+ set i [incr i]
+ set ret [$dbc put -after $darray($i)]
+ # should return new key, which should be $keys($i)
+ error_check_good dbcput:after $ret $keys($i)
+ # cursor should reference new item
+ set ret [$dbc get -current]
+ error_check_good dbc:get:current:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+ error_check_good dbc:get:current:data \
+ [lindex [lindex $ret 0] 1] $darray($i)
+
+ # items after curs should be adjusted
+ set ret [$dbc get -next]
+ error_check_bad dbc:get:next [llength $ret] 0
+ error_check_good dbc:get:next:compare \
+ $ret [list [list $keys([expr $i + 1]) $darray([expr $i + 2])]]
+
+ puts "\t Test052.i: Insert (overwrite) current item (DB_CURRENT)."
+ set i 1
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+ # choose a datum that is not currently in db
+ set ret [$dbc put -current $darray([expr $i + 2])]
+ error_check_good dbc_put:curr $ret 0
+ # curs should be on new item
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:current [llength $ret] 0
+ error_check_good dbc_get:curr:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+ error_check_good dbc_get:curr:data \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 2])
+
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ set i [incr i]
+ error_check_good dbc_get:next \
+ $ret [list [list $keys($i) $darray($i)]]
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest052 complete."
+}
diff --git a/libdb/test/test053.tcl b/libdb/test/test053.tcl
new file mode 100644
index 0000000..37bff41
--- /dev/null
+++ b/libdb/test/test053.tcl
@@ -0,0 +1,225 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test053
+# TEST Test of the DB_REVSPLITOFF flag in the Btree and Btree-w-recnum
+# TEST methods.
+proc test053 { method args } {
+ global alphabet
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "\tTest053: Test of cursor stability across btree splits."
+ if { [is_btree $method] != 1 && [is_rbtree $method] != 1 } {
+ puts "Test053: skipping for method $method."
+ return
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test053: skipping for specific pagesizes"
+ return
+ }
+
+ set txn ""
+ set flags ""
+
+ puts "\tTest053.a: Create $omethod $args database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test053.db
+ set env NULL
+ } else {
+ set testfile test053.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags \
+ "-create -revsplitoff -pagesize 1024 $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 8
+ set npages 15
+
+ # We want to create a db with npages leaf pages, and have each page
+ # be near full with keys that we can predict. We set pagesize above
+ # to 1024 bytes, it should breakdown as follows (per page):
+ #
+ # ~20 bytes overhead
+ # key: ~4 bytes overhead, XXX0N where X is a letter, N is 0-9
+ # data: ~4 bytes overhead, + 100 bytes
+ #
+ # then, with 8 keys/page we should be just under 1024 bytes
+ puts "\tTest053.b: Create $npages pages with $nkeys pairs on each."
+ set keystring [string range $alphabet 0 [expr $npages -1]]
+ set data [repeat DATA 22]
+ for { set i 0 } { $i < $npages } {incr i } {
+ set key ""
+ set keyroot \
+ [repeat [string toupper [string range $keystring $i $i]] 3]
+ set key_set($i) $keyroot
+ for {set j 0} { $j < $nkeys} {incr j} {
+ if { $j < 10 } {
+ set key [set keyroot]0$j
+ } else {
+ set key $keyroot$j
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ }
+
+ puts "\tTest053.c: Check page count."
+ error_check_good page_count:check \
+ [is_substr [$db stat] "{Leaf pages} $npages"] 1
+
+ puts "\tTest053.d: Delete all but one key per page."
+ for {set i 0} { $i < $npages } {incr i } {
+ for {set j 1} { $j < $nkeys } {incr j } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db del} $txn {$key_set($i)0$j}]
+ error_check_good dbdel $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ }
+ puts "\tTest053.e: Check to make sure all pages are still there."
+ error_check_good page_count:check \
+ [is_substr [$db stat] "{Leaf pages} $npages"] 1
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db:cursor [is_valid_cursor $dbc $db] TRUE
+
+ # walk cursor through tree forward, backward.
+ # delete one key, repeat
+ for {set i 0} { $i < $npages} {incr i} {
+ puts -nonewline \
+ "\tTest053.f.$i: Walk curs through tree: forward..."
+ for { set j $i; set curr [$dbc get -first]} { $j < $npages} { \
+ incr j; set curr [$dbc get -next]} {
+ error_check_bad dbc:get:next [llength $curr] 0
+ error_check_good dbc:get:keys \
+ [lindex [lindex $curr 0] 0] $key_set($j)00
+ }
+ puts -nonewline "backward..."
+ for { set j [expr $npages - 1]; set curr [$dbc get -last]} { \
+ $j >= $i } { \
+ set j [incr j -1]; set curr [$dbc get -prev]} {
+ error_check_bad dbc:get:prev [llength $curr] 0
+ error_check_good dbc:get:keys \
+ [lindex [lindex $curr 0] 0] $key_set($j)00
+ }
+ puts "complete."
+
+ if { [is_rbtree $method] == 1} {
+ puts "\t\tTest053.f.$i:\
+ Walk through tree with record numbers."
+ for {set j 1} {$j <= [expr $npages - $i]} {incr j} {
+ set curr [eval {$db get} $txn {-recno $j}]
+ error_check_bad \
+ db_get:recno:$j [llength $curr] 0
+ error_check_good db_get:recno:keys:$j \
+ [lindex [lindex $curr 0] 0] \
+ $key_set([expr $j + $i - 1])00
+ }
+ }
+ puts "\tTest053.g.$i:\
+ Delete single key ([expr $npages - $i] keys left)."
+ set ret [eval {$db del} $txn {$key_set($i)00}]
+ error_check_good dbdel $ret 0
+ error_check_good del:check \
+ [llength [eval {$db get} $txn {$key_set($i)00}]] 0
+ }
+
+ # end for loop, verify db_notfound
+ set ret [$dbc get -first]
+ error_check_good dbc:get:verify [llength $ret] 0
+
+ # loop: until single key restored on each page
+ for {set i 0} { $i < $npages} {incr i} {
+ puts "\tTest053.i.$i:\
+ Restore single key ([expr $i + 1] keys in tree)."
+ set ret [eval {$db put} $txn {$key_set($i)00 $data}]
+ error_check_good dbput $ret 0
+
+ puts -nonewline \
+ "\tTest053.j: Walk cursor through tree: forward..."
+ for { set j 0; set curr [$dbc get -first]} { $j <= $i} {\
+ incr j; set curr [$dbc get -next]} {
+ error_check_bad dbc:get:next [llength $curr] 0
+ error_check_good dbc:get:keys \
+ [lindex [lindex $curr 0] 0] $key_set($j)00
+ }
+ error_check_good dbc:get:next [llength $curr] 0
+
+ puts -nonewline "backward..."
+ for { set j $i; set curr [$dbc get -last]} { \
+ $j >= 0 } { \
+ set j [incr j -1]; set curr [$dbc get -prev]} {
+ error_check_bad dbc:get:prev [llength $curr] 0
+ error_check_good dbc:get:keys \
+ [lindex [lindex $curr 0] 0] $key_set($j)00
+ }
+ puts "complete."
+ error_check_good dbc:get:prev [llength $curr] 0
+
+ if { [is_rbtree $method] == 1} {
+ puts "\t\tTest053.k.$i:\
+ Walk through tree with record numbers."
+ for {set j 1} {$j <= [expr $i + 1]} {incr j} {
+ set curr [eval {$db get} $txn {-recno $j}]
+ error_check_bad \
+ db_get:recno:$j [llength $curr] 0
+ error_check_good db_get:recno:keys:$j \
+ [lindex [lindex $curr 0] 0] \
+ $key_set([expr $j - 1])00
+ }
+ }
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "Test053 complete."
+}
diff --git a/libdb/test/test054.tcl b/libdb/test/test054.tcl
new file mode 100644
index 0000000..77fccd5
--- /dev/null
+++ b/libdb/test/test054.tcl
@@ -0,0 +1,461 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test054
+# TEST Cursor maintenance during key/data deletion.
+# TEST
+# TEST This test checks for cursor maintenance in the presence of deletes.
+# TEST There are N different scenarios to tests:
+# TEST 1. No duplicates. Cursor A deletes a key, do a GET for the key.
+# TEST 2. No duplicates. Cursor is positioned right before key K, Delete K,
+# TEST do a next on the cursor.
+# TEST 3. No duplicates. Cursor is positioned on key K, do a regular delete
+# TEST of K, do a current get on K.
+# TEST 4. Repeat 3 but do a next instead of current.
+# TEST 5. Duplicates. Cursor A is on the first item of a duplicate set, A
+# TEST does a delete. Then we do a non-cursor get.
+# TEST 6. Duplicates. Cursor A is in a duplicate set and deletes the item.
+# TEST do a delete of the entire Key. Test cursor current.
+# TEST 7. Continue last test and try cursor next.
+# TEST 8. Duplicates. Cursor A is in a duplicate set and deletes the item.
+# TEST Cursor B is in the same duplicate set and deletes a different item.
+# TEST Verify that the cursor is in the right place.
+# TEST 9. Cursors A and B are in the place in the same duplicate set. A
+# TEST deletes its item. Do current on B.
+# TEST 10. Continue 8 and do a next on B.
+proc test054 { method args } {
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ append args " -create -mode 0644"
+ puts "Test054 ($method $args):\
+ interspersed cursor and normal operations"
+ if { [is_record_based $method] == 1 } {
+ puts "Test054 skipping for method $method"
+ return
+ }
+
+ # Find the environment in the argument list, we'll need it
+ # later.
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ }
+
+ # Create the database and open the dictionary
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test054-nodup.db
+ set env NULL
+ } else {
+ set testfile test054-nodup.db
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ puts "\tTest054.a: No Duplicate Tests"
+ set db [eval {berkdb_open} $args {$omethod $testfile}]
+ error_check_good db_open:nodup [is_valid_db $db] TRUE
+
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ for {set d [$curs get -first] } \
+ {[llength $d] != 0 } \
+ {set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Test case #1.
+ puts "\tTest054.a1: Delete w/cursor, regular get"
+
+ # Now set the cursor on the middle on.
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(2)
+
+ # Now do the delete
+ set r [$curs del]
+ error_check_good curs_del $r 0
+
+ # Now do the get
+ set r [eval {$db get} $txn {$key_set(2)}]
+ error_check_good get_after_del [llength $r] 0
+
+ # Free up the cursor.
+ error_check_good cursor_close [eval {$curs close}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Test case #2.
+ puts "\tTest054.a2: Cursor before K, delete K, cursor next"
+
+ # Replace key 2
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn {$key_set(2) datum$key_set(2)}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Open and position cursor on first item.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ for {set d [eval {$curs get} -first] } \
+ {[llength $d] != 0 } \
+ {set d [$curs get -nextdup] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ set r [eval {$curs get} -set {$key_set(1)} ]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(1)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(1)
+
+ # Now delete (next item) $key_set(2)
+ error_check_good \
+ db_del:$key_set(2) [eval {$db del} $txn {$key_set(2)}] 0
+
+ # Now do next on cursor
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(3)
+ error_check_good curs_get:DB_NEXT:data $d datum$key_set(3)
+
+ # Test case #3.
+ puts "\tTest054.a3: Cursor on K, delete K, cursor current"
+
+ # delete item 3
+ error_check_good \
+ db_del:$key_set(3) [eval {$db del} $txn {$key_set(3)}] 0
+ # NEEDS TO COME BACK IN, BUG CHECK
+ set ret [$curs get -current]
+ error_check_good current_after_del $ret [list [list [] []]]
+ error_check_good cursor_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ puts "\tTest054.a4: Cursor on K, delete K, cursor next"
+
+ # Restore keys 2 and 3
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn {$key_set(2) datum$key_set(2)}]
+ error_check_good put $r 0
+ set r [eval {$db put} $txn {$key_set(3) datum$key_set(3)}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # Create the new cursor and put it on 1
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
+ set r [$curs get -set $key_set(1)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(1)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(1)
+
+ # Delete 2
+ error_check_good \
+ db_del:$key_set(2) [eval {$db del} $txn {$key_set(2)}] 0
+
+ # Now do next on cursor
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(3)
+ error_check_good curs_get:DB_NEXT:data $d datum$key_set(3)
+
+ # Close cursor
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now get ready for duplicate tests
+
+ if { [is_rbtree $method] == 1 } {
+ puts "Test054: skipping remainder of test for method $method."
+ return
+ }
+
+ puts "\tTest054.b: Duplicate Tests"
+ append args " -dup"
+
+ # Open a new database for the dup tests so -truncate is not needed.
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test054-dup.db
+ set env NULL
+ } else {
+ set testfile test054-dup.db
+ set env [lindex $args $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ set db [eval {berkdb_open} $args {$omethod $testfile}]
+ error_check_good db_open:dup [is_valid_db $db] TRUE
+
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE
+
+ set i 1
+ for {set d [$curs get -first] } \
+ {[llength $d] != 0 } \
+ {set d [$curs get -nextdup] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Now put in a bunch of duplicates for key 2
+ for { set d 1 } { $d <= 5 } {incr d} {
+ set r [eval {$db put} $txn $flags {$key_set(2) dup_$d}]
+ error_check_good dup:put $r 0
+ }
+
+ # Test case #5.
+ puts "\tTest054.b1: Delete dup w/cursor on first item. Get on key."
+
+ # Now set the cursor on the first of the duplicate set.
+ set r [eval {$curs get} -set {$key_set(2)}]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(2)
+
+ # Now do the delete
+ set r [$curs del]
+ error_check_good curs_del $r 0
+
+ # Now do the get
+ set r [eval {$db get} $txn {$key_set(2)}]
+ error_check_good get_after_del [lindex [lindex $r 0] 1] dup_1
+
+ # Test case #6.
+ puts "\tTest054.b2: Now get the next duplicate from the cursor."
+
+ # Now do next on cursor
+ set r [$curs get -nextdup]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_1
+
+ # Test case #3.
+ puts "\tTest054.b3: Two cursors in set; each delete different items"
+
+ # Open a new cursor.
+ set curs2 [eval {$db cursor} $txn]
+ error_check_good curs_open [is_valid_cursor $curs2 $db] TRUE
+
+ # Set on last of duplicate set.
+ set r [$curs2 get -set $key_set(3)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(3)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(3)
+
+ set r [$curs2 get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(2)
+ error_check_good curs_get:DB_PREV:data $d dup_5
+
+ # Delete the item at cursor 1 (dup_1)
+ error_check_good curs1_del [$curs del] 0
+
+ # Verify curs1 and curs2
+ # current should fail
+ set ret [$curs get -current]
+ error_check_good \
+ curs1_get_after_del $ret [list [list [] []]]
+
+ set r [$curs2 get -current]
+ error_check_bad curs2_get [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_CURRENT:key $k $key_set(2)
+ error_check_good curs_get:DB_CURRENT:data $d dup_5
+
+ # Now delete the item at cursor 2 (dup_5)
+ error_check_good curs2_del [$curs2 del] 0
+
+ # Verify curs1 and curs2
+ set ret [$curs get -current]
+ error_check_good curs1_get:del2 $ret [list [list [] []]]
+
+ set ret [$curs2 get -current]
+ error_check_good curs2_get:del2 $ret [list [list [] []]]
+
+ # Now verify that next and prev work.
+
+ set r [$curs2 get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(2)
+ error_check_good curs_get:DB_PREV:data $d dup_4
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_2
+
+ puts "\tTest054.b4: Two cursors same item, one delete, one get"
+
+ # Move curs2 onto dup_2
+ set r [$curs2 get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(2)
+ error_check_good curs_get:DB_PREV:data $d dup_3
+
+ set r [$curs2 get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(2)
+ error_check_good curs_get:DB_PREV:data $d dup_2
+
+ # delete on curs 1
+ error_check_good curs1_del [$curs del] 0
+
+ # Verify gets on both 1 and 2
+ set ret [$curs get -current]
+ error_check_good \
+ curs1_get:deleted $ret [list [list [] []]]
+ set ret [$curs2 get -current]
+ error_check_good \
+ curs2_get:deleted $ret [list [list [] []]]
+
+ puts "\tTest054.b5: Now do a next on both cursors"
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_3
+
+ set r [$curs2 get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_3
+
+ # Close cursor
+ error_check_good curs_close [$curs close] 0
+ error_check_good curs2_close [$curs2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/test055.tcl b/libdb/test/test055.tcl
new file mode 100644
index 0000000..987e1f5
--- /dev/null
+++ b/libdb/test/test055.tcl
@@ -0,0 +1,141 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test055
+# TEST Basic cursor operations.
+# TEST This test checks basic cursor operations.
+# TEST There are N different scenarios to tests:
+# TEST 1. (no dups) Set cursor, retrieve current.
+# TEST 2. (no dups) Set cursor, retrieve next.
+# TEST 3. (no dups) Set cursor, retrieve prev.
+proc test055 { method args } {
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test055: $method interspersed cursor and normal operations"
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test055.db
+ set env NULL
+ } else {
+ set testfile test055.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ puts "\tTest055.a: No duplicates"
+ set db [eval {berkdb_open -create -mode 0644 $omethod } \
+ $args {$testfile}]
+ error_check_good db_open:nodup [is_valid_db $db] TRUE
+
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
+
+ for {set d [$curs get -first] } { [llength $d] != 0 } {\
+ set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Test case #1.
+ puts "\tTest055.a1: Set cursor, retrieve current"
+
+ # Now set the cursor on the middle on.
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good \
+ curs_get:DB_SET:data $d [pad_data $method datum$key_set(2)]
+
+ # Now retrieve current
+ set r [$curs get -current]
+ error_check_bad cursor_get:DB_CURRENT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_CURRENT:key $k $key_set(2)
+ error_check_good \
+ curs_get:DB_CURRENT:data $d [pad_data $method datum$key_set(2)]
+
+ # Test case #2.
+ puts "\tTest055.a2: Set cursor, retrieve previous"
+ set r [$curs get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(1)
+ error_check_good \
+ curs_get:DB_PREV:data $d [pad_data $method datum$key_set(1)]
+
+ # Test case #3.
+ puts "\tTest055.a2: Set cursor, retrieve next"
+
+ # Now set the cursor on the middle one.
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good \
+ curs_get:DB_SET:data $d [pad_data $method datum$key_set(2)]
+
+ # Now retrieve next
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(3)
+ error_check_good \
+ curs_get:DB_NEXT:data $d [pad_data $method datum$key_set(3)]
+
+ # Close cursor and database.
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/test056.tcl b/libdb/test/test056.tcl
new file mode 100644
index 0000000..e014f62
--- /dev/null
+++ b/libdb/test/test056.tcl
@@ -0,0 +1,169 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test056
+# TEST Cursor maintenance during deletes.
+# TEST Check if deleting a key when a cursor is on a duplicate of that
+# TEST key works.
+proc test056 { method args } {
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ append args " -create -mode 0644 -dup "
+ if { [is_record_based $method] == 1 || [is_rbtree $method] } {
+ puts "Test056: skipping for method $method"
+ return
+ }
+ puts "Test056: $method delete of key in presence of cursor"
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test056.db
+ set env NULL
+ } else {
+ set testfile test056.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ set db [eval {berkdb_open} $args {$omethod $testfile}]
+ error_check_good db_open:dup [is_valid_db $db] TRUE
+
+ puts "\tTest056.a: Key delete with cursor on duplicate."
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE
+
+ for {set d [$curs get -first] } { [llength $d] != 0 } {
+ set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Now put in a bunch of duplicates for key 2
+ for { set d 1 } { $d <= 5 } {incr d} {
+ set r [eval {$db put} $txn $flags {$key_set(2) dup_$d}]
+ error_check_good dup:put $r 0
+ }
+
+ # Now put the cursor on a duplicate of key 2
+
+ # Now set the cursor on the first of the duplicate set.
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(2)
+
+ # Now do two nexts
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_1
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_2
+
+ # Now do the delete
+ set r [eval {$db del} $txn $flags {$key_set(2)}]
+ error_check_good delete $r 0
+
+ # Now check the get current on the cursor.
+ set ret [$curs get -current]
+ error_check_good curs_after_del $ret [list [list [] []]]
+
+ # Now check that the rest of the database looks intact. There
+ # should be only two keys, 1 and 3.
+
+ set r [$curs get -first]
+ error_check_bad cursor_get:DB_FIRST [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_FIRST:key $k $key_set(1)
+ error_check_good curs_get:DB_FIRST:data $d datum$key_set(1)
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(3)
+ error_check_good curs_get:DB_NEXT:data $d datum$key_set(3)
+
+ set r [$curs get -next]
+ error_check_good cursor_get:DB_NEXT [llength $r] 0
+
+ puts "\tTest056.b:\
+ Cursor delete of first item, followed by cursor FIRST"
+ # Set to beginning
+ set r [$curs get -first]
+ error_check_bad cursor_get:DB_FIRST [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_FIRST:key $k $key_set(1)
+ error_check_good curs_get:DB_FIRST:data $d datum$key_set(1)
+
+ # Now do delete
+ error_check_good curs_del [$curs del] 0
+
+ # Now do DB_FIRST
+ set r [$curs get -first]
+ error_check_bad cursor_get:DB_FIRST [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_FIRST:key $k $key_set(3)
+ error_check_good curs_get:DB_FIRST:data $d datum$key_set(3)
+
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/test057.tcl b/libdb/test/test057.tcl
new file mode 100644
index 0000000..a73a748
--- /dev/null
+++ b/libdb/test/test057.tcl
@@ -0,0 +1,248 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test057
+# TEST Cursor maintenance during key deletes.
+# TEST Check if we handle the case where we delete a key with the cursor on
+# TEST it and then add the same key. The cursor should not get the new item
+# TEST returned, but the item shouldn't disappear.
+# TEST Run test tests, one where the overwriting put is done with a put and
+# TEST one where it's done with a cursor put.
+proc test057 { method args } {
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ append args " -create -mode 0644 -dup "
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test057: skipping for method $method"
+ return
+ }
+ puts "Test057: $method delete and replace in presence of cursor."
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test057.db
+ set env NULL
+ } else {
+ set testfile test057.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ set db [eval {berkdb_open} $args {$omethod $testfile}]
+ error_check_good dbopen:dup [is_valid_db $db] TRUE
+
+ puts "\tTest057.a: Set cursor, delete cursor, put with key."
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE
+
+ for {set d [$curs get -first] } {[llength $d] != 0 } \
+ {set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Now put in a bunch of duplicates for key 2
+ for { set d 1 } { $d <= 5 } {incr d} {
+ set r [eval {$db put} $txn $flags {$key_set(2) dup_$d}]
+ error_check_good dup:put $r 0
+ }
+
+ # Now put the cursor on key 1
+
+ # Now set the cursor on the first of the duplicate set.
+ set r [$curs get -set $key_set(1)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(1)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(1)
+
+ # Now do the delete
+ set r [$curs del]
+ error_check_good delete $r 0
+
+ # Now check the get current on the cursor.
+ error_check_good curs_get:del [$curs get -current] [list [list [] []]]
+
+ # Now do a put on the key
+ set r [eval {$db put} $txn $flags {$key_set(1) new_datum$key_set(1)}]
+ error_check_good put $r 0
+
+ # Do a get
+ set r [eval {$db get} $txn {$key_set(1)}]
+ error_check_good get [lindex [lindex $r 0] 1] new_datum$key_set(1)
+
+ # Recheck cursor
+ error_check_good curs_get:deleted [$curs get -current] [list [list [] []]]
+
+ # Move cursor and see if we get the key.
+ set r [$curs get -first]
+ error_check_bad cursor_get:DB_FIRST [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_FIRST:key $k $key_set(1)
+ error_check_good curs_get:DB_FIRST:data $d new_datum$key_set(1)
+
+ puts "\tTest057.b: Set two cursor on a key, delete one, overwrite other"
+ set curs2 [eval {$db cursor} $txn]
+ error_check_good curs2_open [is_valid_cursor $curs2 $db] TRUE
+
+ # Set both cursors on the 4rd key
+ set r [$curs get -set $key_set(3)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(3)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(3)
+
+ set r [$curs2 get -set $key_set(3)]
+ error_check_bad cursor2_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs2_get:DB_SET:key $k $key_set(3)
+ error_check_good curs2_get:DB_SET:data $d datum$key_set(3)
+
+ # Now delete through cursor 1
+ error_check_good curs1_del [$curs del] 0
+
+ # Verify gets on both 1 and 2
+ error_check_good curs_get:deleted [$curs get -current] \
+ [list [list [] []]]
+ error_check_good curs_get:deleted [$curs2 get -current] \
+ [list [list [] []]]
+
+ # Now do a replace through cursor 2
+ set pflags "-current"
+ if {[is_hash $method] == 1} {
+ error_check_good curs1_get_after_del [is_substr \
+ [$curs2 put $pflags new_datum$key_set(3)] "DB_NOTFOUND"] 1
+
+ # Gets fail
+ error_check_good curs1_get:deleted \
+ [$curs get -current] \
+ [list [list [] []]]
+ error_check_good curs2_get:deleted \
+ [$curs get -current] \
+ [list [list [] []]]
+ } else {
+ # btree only, recno is skipped this test
+ set ret [$curs2 put $pflags new_datum$key_set(3)]
+ error_check_good curs_replace $ret 0
+ }
+
+ # Gets fail
+ #error_check_good curs1_get:deleted [catch {$curs get -current} r] 1
+ #error_check_good curs1_get_after_del \
+ [is_substr $errorInfo "DB_KEYEMPTY"] 1
+ #error_check_good curs2_get:deleted [catch {$curs2 get -current} r] 1
+ #error_check_good curs2_get_after_del \
+ [is_substr $errorInfo "DB_KEYEMPTY"] 1
+
+ puts "\tTest057.c:\
+ Set two cursors on a dup, delete one, overwrite other"
+
+ # Set both cursors on the 2nd duplicate of key 2
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(2)
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_1
+
+ set r [$curs2 get -set $key_set(2)]
+ error_check_bad cursor2_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs2_get:DB_SET:key $k $key_set(2)
+ error_check_good curs2_get:DB_SET:data $d datum$key_set(2)
+
+ set r [$curs2 get -next]
+ error_check_bad cursor2_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs2_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs2_get:DB_NEXT:data $d dup_1
+
+ # Now delete through cursor 1
+ error_check_good curs1_del [$curs del] 0
+
+ # Verify gets on both 1 and 2
+ error_check_good curs_get:deleted [$curs get -current] \
+ [list [list [] []]]
+ error_check_good curs_get:deleted [$curs2 get -current] \
+ [list [list [] []]]
+
+ # Now do a replace through cursor 2 -- this will work on btree but
+ # not on hash
+ if {[is_hash $method] == 1} {
+ error_check_good hash_replace \
+ [is_substr [$curs2 put -current new_dup_1] "DB_NOTFOUND"] 1
+ } else {
+ error_check_good curs_replace [$curs2 put -current new_dup_1] 0
+ }
+
+ # Both gets should fail
+ #error_check_good curs1_get:deleted [catch {$curs get -current} r] 1
+ #error_check_good curs1_get_after_del \
+ [is_substr $errorInfo "DB_KEYEMPTY"] 1
+ #error_check_good curs2_get:deleted [catch {$curs2 get -current} r] 1
+ #error_check_good curs2_get_after_del \
+ [is_substr $errorInfo "DB_KEYEMPTY"] 1
+
+ error_check_good curs2_close [$curs2 close] 0
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/test058.tcl b/libdb/test/test058.tcl
new file mode 100644
index 0000000..30cbb86
--- /dev/null
+++ b/libdb/test/test058.tcl
@@ -0,0 +1,103 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test058
+# TEST Verify that deleting and reading duplicates results in correct ordering.
+proc test058 { method args } {
+ source ./include.tcl
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test058 skipping for env $env"
+ return
+ }
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test058: skipping for method $method"
+ return
+ }
+ puts "Test058: $method delete dups after inserting after duped key."
+
+ # environment
+ env_cleanup $testdir
+ set eflags "-create -txn $encargs -home $testdir"
+ set env [eval {berkdb_env} $eflags]
+ error_check_good env [is_valid_env $env] TRUE
+
+ # db open
+ set flags "-auto_commit -create -mode 0644 -dup -env $env $args"
+ set db [eval {berkdb_open} $flags $omethod "test058.db"]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set tn ""
+ set tid ""
+ set tn [$env txn]
+ set tflags "-txn $tn"
+
+ puts "\tTest058.a: Adding 10 duplicates"
+ # Add a bunch of dups
+ for { set i 0 } { $i < 10 } {incr i} {
+ set ret \
+ [eval {$db put} $tflags {doghouse $i"DUPLICATE_DATA_VALUE"}]
+ error_check_good db_put $ret 0
+ }
+
+ puts "\tTest058.b: Adding key after duplicates"
+ # Now add one more key/data AFTER the dup set.
+ set ret [eval {$db put} $tflags {zebrahouse NOT_A_DUP}]
+ error_check_good db_put $ret 0
+
+ error_check_good txn_commit [$tn commit] 0
+
+ set tn [$env txn]
+ error_check_good txnbegin [is_substr $tn $env] 1
+ set tflags "-txn $tn"
+
+ # Now delete everything
+ puts "\tTest058.c: Deleting duplicated key"
+ set ret [eval {$db del} $tflags {doghouse}]
+ error_check_good del $ret 0
+
+ # Now reput everything
+ set pad \
+ abcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuv
+
+ puts "\tTest058.d: Reputting duplicates with big data vals"
+ for { set i 0 } { $i < 10 } {incr i} {
+ set ret [eval {$db put} \
+ $tflags {doghouse $i"DUPLICATE_DATA_VALUE"$pad}]
+ error_check_good db_put $ret 0
+ }
+ error_check_good txn_commit [$tn commit] 0
+
+ # Check duplicates for order
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ puts "\tTest058.e: Verifying that duplicates are in order."
+ set i 0
+ for { set ret [$dbc get -set doghouse] } \
+ {$i < 10 && [llength $ret] != 0} \
+ { set ret [$dbc get -nextdup] } {
+ set data [lindex [lindex $ret 0] 1]
+ error_check_good \
+ duplicate_value $data $i"DUPLICATE_DATA_VALUE"$pad
+ incr i
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ reset_env $env
+}
diff --git a/libdb/test/test059.tcl b/libdb/test/test059.tcl
new file mode 100644
index 0000000..1860d3f
--- /dev/null
+++ b/libdb/test/test059.tcl
@@ -0,0 +1,150 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test059
+# TEST Cursor ops work with a partial length of 0.
+# TEST Make sure that we handle retrieves of zero-length data items correctly.
+# TEST The following ops, should allow a partial data retrieve of 0-length.
+# TEST db_get
+# TEST db_cget FIRST, NEXT, LAST, PREV, CURRENT, SET, SET_RANGE
+proc test059 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test059: $method 0-length partial data retrieval"
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test059.db
+ set env NULL
+ } else {
+ set testfile test059.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest059.a: Populate a database"
+ set oflags "-create -mode 0644 $omethod $args $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_create [is_substr $db db] 1
+
+ # Put ten keys in the database
+ for { set key 1 } { $key <= 10 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $pflags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good db_curs [is_valid_cursor $curs $db] TRUE
+
+ for {set d [$curs get -first] } { [llength $d] != 0 } {
+ set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ puts "\tTest059.a: db get with 0 partial length retrieve"
+
+ # Now set the cursor on the middle one.
+ set ret [eval {$db get -partial {0 0}} $txn $gflags {$key_set(5)}]
+ error_check_bad db_get_0 [llength $ret] 0
+
+ puts "\tTest059.a: db cget FIRST with 0 partial length retrieve"
+ set ret [$curs get -first -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_first $key $key_set(1)
+ error_check_good db_cget_first [string length $data] 0
+
+ puts "\tTest059.b: db cget NEXT with 0 partial length retrieve"
+ set ret [$curs get -next -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_next $key $key_set(2)
+ error_check_good db_cget_next [string length $data] 0
+
+ puts "\tTest059.c: db cget LAST with 0 partial length retrieve"
+ set ret [$curs get -last -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_last $key $key_set(10)
+ error_check_good db_cget_last [string length $data] 0
+
+ puts "\tTest059.d: db cget PREV with 0 partial length retrieve"
+ set ret [$curs get -prev -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_prev $key $key_set(9)
+ error_check_good db_cget_prev [string length $data] 0
+
+ puts "\tTest059.e: db cget CURRENT with 0 partial length retrieve"
+ set ret [$curs get -current -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_current $key $key_set(9)
+ error_check_good db_cget_current [string length $data] 0
+
+ puts "\tTest059.f: db cget SET with 0 partial length retrieve"
+ set ret [$curs get -set -partial {0 0} $key_set(7)]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_set $key $key_set(7)
+ error_check_good db_cget_set [string length $data] 0
+
+ if {[is_btree $method] == 1} {
+ puts "\tTest059.g:\
+ db cget SET_RANGE with 0 partial length retrieve"
+ set ret [$curs get -set_range -partial {0 0} $key_set(5)]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_set $key $key_set(5)
+ error_check_good db_cget_set [string length $data] 0
+ }
+
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/test060.tcl b/libdb/test/test060.tcl
new file mode 100644
index 0000000..9509e03
--- /dev/null
+++ b/libdb/test/test060.tcl
@@ -0,0 +1,60 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test060
+# TEST Test of the DB_EXCL flag to DB->open().
+# TEST 1) Attempt to open and create a nonexistent database; verify success.
+# TEST 2) Attempt to reopen it; verify failure.
+proc test060 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test060: $method ($args) Test of the DB_EXCL flag to DB->open"
+
+ # Set the database location and make sure the db doesn't exist yet
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test060.db
+ set env NULL
+ } else {
+ set testfile test060.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ # Create the database and check success
+ puts "\tTest060.a: open and close non-existent file with DB_EXCL"
+ set db [eval {berkdb_open \
+ -create -excl -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen:excl [is_valid_db $db] TRUE
+
+ # Close it and check success
+ error_check_good db_close [$db close] 0
+
+ # Try to open it again, and make sure the open fails
+ puts "\tTest060.b: open it again with DB_EXCL and make sure it fails"
+ set errorCode NONE
+ error_check_good open:excl:catch [catch { \
+ set db [eval {berkdb_open_noerr \
+ -create -excl -mode 0644} $args {$omethod $testfile}]
+ } ret ] 1
+
+ error_check_good dbopen:excl [is_substr $errorCode EEXIST] 1
+}
diff --git a/libdb/test/test061.tcl b/libdb/test/test061.tcl
new file mode 100644
index 0000000..85724d2
--- /dev/null
+++ b/libdb/test/test061.tcl
@@ -0,0 +1,226 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test061
+# TEST Test of txn abort and commit for in-memory databases.
+# TEST a) Put + abort: verify absence of data
+# TEST b) Put + commit: verify presence of data
+# TEST c) Overwrite + abort: verify that data is unchanged
+# TEST d) Overwrite + commit: verify that data has changed
+# TEST e) Delete + abort: verify that data is still present
+# TEST f) Delete + commit: verify that data has been deleted
+proc test061 { method args } {
+ global alphabet
+ global encrypt
+ global errorCode
+ global passwd
+ source ./include.tcl
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test061 skipping for env $env"
+ return
+ }
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_queueext $method] == 1} {
+ puts "Test061 skipping for method $method"
+ return
+ }
+ set encargs ""
+ set args [split_encargs $args encargs]
+
+ puts "Test061: Transaction abort and commit test for in-memory data."
+ puts "Test061: $method $args"
+
+ set key "key"
+ set data "data"
+ set otherdata "otherdata"
+ set txn ""
+ set flags ""
+ set gflags ""
+
+ if { [is_record_based $method] == 1} {
+ set key 1
+ set gflags " -recno"
+ }
+
+ puts "\tTest061: Create environment and $method database."
+ env_cleanup $testdir
+
+ # create environment
+ set eflags "-create -txn $encargs -home $testdir"
+ set dbenv [eval {berkdb_env} $eflags]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # db open -- no file specified, in-memory database
+ set flags "-auto_commit -create $args $omethod"
+ set db [eval {berkdb_open -env} $dbenv $flags]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Here we go with the six test cases. Since we need to verify
+ # a different thing each time, and since we can't just reuse
+ # the same data if we're to test overwrite, we just
+ # plow through rather than writing some impenetrable loop code;
+ # each of the cases is only a few lines long, anyway.
+
+ puts "\tTest061.a: put/abort"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # put a key
+ set ret [eval {$db put} -txn $txn {$key [chop_data $method $data]}]
+ error_check_good db_put $ret 0
+
+ # check for existence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $data]]]
+
+ # abort
+ error_check_good txn_abort [$txn abort] 0
+
+ # check for *non*-existence
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret {}
+
+ puts "\tTest061.b: put/commit"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # put a key
+ set ret [eval {$db put} -txn $txn {$key [chop_data $method $data]}]
+ error_check_good db_put $ret 0
+
+ # check for existence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $data]]]
+
+ # commit
+ error_check_good txn_commit [$txn commit] 0
+
+ # check again for existence
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $data]]]
+
+ puts "\tTest061.c: overwrite/abort"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # overwrite {key,data} with {key,otherdata}
+ set ret [eval {$db put} -txn $txn {$key [chop_data $method $otherdata]}]
+ error_check_good db_put $ret 0
+
+ # check for existence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret \
+ [list [list $key [pad_data $method $otherdata]]]
+
+ # abort
+ error_check_good txn_abort [$txn abort] 0
+
+ # check that data is unchanged ($data not $otherdata)
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $data]]]
+
+ puts "\tTest061.d: overwrite/commit"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # overwrite {key,data} with {key,otherdata}
+ set ret [eval {$db put} -txn $txn {$key [chop_data $method $otherdata]}]
+ error_check_good db_put $ret 0
+
+ # check for existence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret \
+ [list [list $key [pad_data $method $otherdata]]]
+
+ # commit
+ error_check_good txn_commit [$txn commit] 0
+
+ # check that data has changed ($otherdata not $data)
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret \
+ [list [list $key [pad_data $method $otherdata]]]
+
+ puts "\tTest061.e: delete/abort"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # delete
+ set ret [eval {$db del} -txn $txn {$key}]
+ error_check_good db_put $ret 0
+
+ # check for nonexistence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret {}
+
+ # abort
+ error_check_good txn_abort [$txn abort] 0
+
+ # check for existence
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret \
+ [list [list $key [pad_data $method $otherdata]]]
+
+ puts "\tTest061.f: delete/commit"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # put a key
+ set ret [eval {$db del} -txn $txn {$key}]
+ error_check_good db_put $ret 0
+
+ # check for nonexistence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret {}
+
+ # commit
+ error_check_good txn_commit [$txn commit] 0
+
+ # check for continued nonexistence
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret {}
+
+ # We're done; clean up.
+ error_check_good db_close [eval {$db close}] 0
+ error_check_good env_close [eval {$dbenv close}] 0
+
+ # Now run db_recover and ensure that it runs cleanly.
+ set utilflag ""
+ if { $encrypt != 0 } {
+ set utilflag "-P $passwd"
+ }
+ puts "\tTest061.g: Running db_recover -h"
+ set ret [catch {eval {exec} $util_path/db_recover -h $testdir \
+ $utilflag} res]
+ if { $ret != 0 } {
+ puts "FAIL: db_recover outputted $res"
+ }
+ error_check_good db_recover $ret 0
+
+ puts "\tTest061.h: Running db_recover -c -h"
+ set ret [catch {eval {exec} $util_path/db_recover -c -h $testdir \
+ $utilflag} res]
+ error_check_good db_recover-c $ret 0
+}
diff --git a/libdb/test/test062.tcl b/libdb/test/test062.tcl
new file mode 100644
index 0000000..a316a08
--- /dev/null
+++ b/libdb/test/test062.tcl
@@ -0,0 +1,153 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test062
+# TEST Test of partial puts (using DB_CURRENT) onto duplicate pages.
+# TEST Insert the first 200 words into the dictionary 200 times each with
+# TEST self as key and <random letter>:self as data. Use partial puts to
+# TEST append self again to data; verify correctness.
+proc test062 { method {nentries 200} {ndups 200} {tnum 62} args } {
+ global alphabet
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $omethod"
+ return
+ }
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 200 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "Test0$tnum:\
+ $method ($args) $nentries Partial puts and $ndups duplicates."
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup} $args {$testfile} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put each key/data pair
+ puts "\tTest0$tnum.a: Put loop (initialize database)"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref \
+ [string index $alphabet [berkdb random_int 0 25]]
+ set datastr $pref:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ set keys($count) $str
+
+ incr count
+ }
+ close $did
+
+ puts "\tTest0$tnum.b: Partial puts."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_substr $dbc $db] 1
+
+ # Do a partial write to extend each datum in
+ # the regular db by the corresponding dictionary word.
+ # We have to go through each key's dup set using -set
+ # because cursors are not stable in the hash AM and we
+ # want to make sure we hit all the keys.
+ for { set i 0 } { $i < $count } { incr i } {
+ set key $keys($i)
+ for {set ret [$dbc get -set $key]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup]} {
+
+ set k [lindex [lindex $ret 0] 0]
+ set orig_d [lindex [lindex $ret 0] 1]
+ set d [string range $orig_d 2 end]
+ set doff [expr [string length $d] + 2]
+ set dlen 0
+ error_check_good data_and_key_sanity $d $k
+
+ set ret [$dbc get -current]
+ error_check_good before_sanity \
+ [lindex [lindex $ret 0] 0] \
+ [string range [lindex [lindex $ret 0] 1] 2 end]
+
+ error_check_good partial_put [eval {$dbc put -current \
+ -partial [list $doff $dlen] $d}] 0
+
+ set ret [$dbc get -current]
+ error_check_good partial_put_correct \
+ [lindex [lindex $ret 0] 1] $orig_d$d
+ }
+ }
+
+ puts "\tTest0$tnum.c: Double-checking get loop."
+ # Double-check that each datum in the regular db has
+ # been appropriately modified.
+
+ for {set ret [$dbc get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -next]} {
+
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good modification_correct \
+ [string range $d 2 end] [repeat $k 2]
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/test063.tcl b/libdb/test/test063.tcl
new file mode 100644
index 0000000..1764168
--- /dev/null
+++ b/libdb/test/test063.tcl
@@ -0,0 +1,174 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test063
+# TEST Test of the DB_RDONLY flag to DB->open
+# TEST Attempt to both DB->put and DBC->c_put into a database
+# TEST that has been opened DB_RDONLY, and check for failure.
+proc test063 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 63
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set key "key"
+ set data "data"
+ set key2 "another_key"
+ set data2 "more_data"
+
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ set key "1"
+ set key2 "2"
+ append gflags " -recno"
+ }
+
+ puts "Test0$tnum: $method ($args) DB_RDONLY test."
+
+ # Create a test database.
+ puts "\tTest0$tnum.a: Creating test database."
+ set db [eval {berkdb_open_noerr -create -mode 0644} \
+ $omethod $args $testfile]
+ error_check_good db_create [is_valid_db $db] TRUE
+
+ # Put and get an item so it's nonempty.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key [chop_data $method $data]}]
+ error_check_good initial_put $ret 0
+
+ set dbt [eval {$db get} $txn $gflags {$key}]
+ error_check_good initial_get $dbt \
+ [list [list $key [pad_data $method $data]]]
+
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ if { $eindex == -1 } {
+ # Confirm that database is writable. If we are
+ # using an env (that may be remote on a server)
+ # we cannot do this check.
+ error_check_good writable [file writable $testfile] 1
+ }
+
+ puts "\tTest0$tnum.b: Re-opening DB_RDONLY and attempting to put."
+
+ # Now open it read-only and make sure we can get but not put.
+ set db [eval {berkdb_open_noerr -rdonly} $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbt [eval {$db get} $txn $gflags {$key}]
+ error_check_good db_get $dbt \
+ [list [list $key [pad_data $method $data]]]
+
+ set ret [catch {eval {$db put} $txn \
+ {$key2 [chop_data $method $data]}} res]
+ error_check_good put_failed $ret 1
+ error_check_good db_put_rdonly [is_substr $errorCode "EACCES"] 1
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set errorCode "NONE"
+
+ puts "\tTest0$tnum.c: Attempting cursor put."
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
+
+ error_check_good cursor_set [$dbc get -first] $dbt
+ set ret [catch {eval {$dbc put} -current $data} res]
+ error_check_good c_put_failed $ret 1
+ error_check_good dbc_put_rdonly [is_substr $errorCode "EACCES"] 1
+
+ set dbt [eval {$db get} $gflags {$key2}]
+ error_check_good db_get_key2 $dbt ""
+
+ puts "\tTest0$tnum.d: Attempting ordinary delete."
+
+ set errorCode "NONE"
+ set ret [catch {eval {$db del} $txn {$key}} 1]
+ error_check_good del_failed $ret 1
+ error_check_good db_del_rdonly [is_substr $errorCode "EACCES"] 1
+
+ set dbt [eval {$db get} $txn $gflags {$key}]
+ error_check_good db_get_key $dbt \
+ [list [list $key [pad_data $method $data]]]
+
+ puts "\tTest0$tnum.e: Attempting cursor delete."
+ # Just set the cursor to the beginning; we don't care what's there...
+ # yet.
+ set dbt2 [$dbc get -first]
+ error_check_good db_get_first_key $dbt2 $dbt
+ set errorCode "NONE"
+ set ret [catch {$dbc del} res]
+ error_check_good c_del_failed $ret 1
+ error_check_good dbc_del_rdonly [is_substr $errorCode "EACCES"] 1
+
+ set dbt2 [$dbc get -current]
+ error_check_good db_get_key $dbt2 $dbt
+
+ puts "\tTest0$tnum.f: Close, reopen db; verify unchanged."
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $omethod $args $testfile]
+ error_check_good db_reopen [is_valid_db $db] TRUE
+
+ set dbc [$db cursor]
+ error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
+
+ error_check_good first_there [$dbc get -first] \
+ [list [list $key [pad_data $method $data]]]
+ error_check_good nomore_there [$dbc get -next] ""
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/test064.tcl b/libdb/test/test064.tcl
new file mode 100644
index 0000000..79a0232
--- /dev/null
+++ b/libdb/test/test064.tcl
@@ -0,0 +1,69 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test064
+# TEST Test of DB->get_type
+# TEST Create a database of type specified by method.
+# TEST Make sure DB->get_type returns the right thing with both a normal
+# TEST and DB_UNKNOWN open.
+proc test064 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 64
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "Test0$tnum: $method ($args) DB->get_type test."
+
+ # Create a test database.
+ puts "\tTest0$tnum.a: Creating test database of type $method."
+ set db [eval {berkdb_open -create -mode 0644} \
+ $omethod $args $testfile]
+ error_check_good db_create [is_valid_db $db] TRUE
+
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.b: get_type after method specifier."
+
+ set db [eval {berkdb_open} $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set type [$db get_type]
+ error_check_good get_type $type [string range $omethod 1 end]
+
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.c: get_type after DB_UNKNOWN."
+
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set type [$db get_type]
+ error_check_good get_type $type [string range $omethod 1 end]
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/test065.tcl b/libdb/test/test065.tcl
new file mode 100644
index 0000000..94166c0
--- /dev/null
+++ b/libdb/test/test065.tcl
@@ -0,0 +1,199 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test065
+# TEST Test of DB->stat(DB_FASTSTAT)
+proc test065 { method args } {
+ source ./include.tcl
+ global errorCode
+ global alphabet
+
+ set nentries 10000
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 65
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "Test0$tnum: $method ($args) DB->stat(DB_FAST_STAT) test."
+
+ puts "\tTest0$tnum.a: Create database and check it while empty."
+
+ set db [eval {berkdb_open_noerr -create -mode 0644} \
+ $omethod $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set ret [catch {eval $db stat -faststat} res]
+
+ error_check_good db_close [$db close] 0
+
+ if { ([is_record_based $method] && ![is_queue $method]) \
+ || [is_rbtree $method] } {
+ error_check_good recordcount_ok [is_substr $res \
+ "{{Number of keys} 0}"] 1
+ } else {
+ puts "\tTest0$tnum: Test complete for method $method."
+ return
+ }
+
+ # If we've got this far, we're on an access method for
+ # which record counts makes sense. Thus, we no longer
+ # catch EINVALs, and no longer care about __db_errs.
+ set db [eval {berkdb_open -create -mode 0644} $omethod $args $testfile]
+
+ puts "\tTest0$tnum.b: put $nentries keys."
+
+ if { [is_record_based $method] } {
+ set gflags " -recno "
+ set keypfx ""
+ } else {
+ set gflags ""
+ set keypfx "key"
+ }
+
+ set txn ""
+ set data [pad_data $method $alphabet]
+
+ for { set ndx 1 } { $ndx <= $nentries } { incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$keypfx$ndx $data}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [$db stat -faststat]
+ error_check_good recordcount_after_puts \
+ [is_substr $ret "{{Number of keys} $nentries}"] 1
+
+ puts "\tTest0$tnum.c: delete 90% of keys."
+ set end [expr {$nentries / 10 * 9}]
+ for { set ndx 1 } { $ndx <= $end } { incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { [is_rrecno $method] == 1 } {
+ # if we're renumbering, when we hit key 5001 we'll
+ # have deleted 5000 and we'll croak! So delete key
+ # 1, repeatedly.
+ set ret [eval {$db del} $txn {[concat $keypfx 1]}]
+ } else {
+ set ret [eval {$db del} $txn {$keypfx$ndx}]
+ }
+ error_check_good db_del $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [$db stat -faststat]
+ if { [is_rrecno $method] == 1 || [is_rbtree $method] == 1 } {
+ # We allow renumbering--thus the stat should return 10%
+ # of nentries.
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} [expr {$nentries / 10}]}"] 1
+ } else {
+ # No renumbering--no change in RECORDCOUNT!
+ error_check_good recordcount_after_dels \
+ [is_substr $ret "{{Number of keys} $nentries}"] 1
+ }
+
+ puts "\tTest0$tnum.d: put new keys at the beginning."
+ set end [expr {$nentries / 10 * 8}]
+ for { set ndx 1 } { $ndx <= $end } {incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$keypfx$ndx $data}]
+ error_check_good db_put_beginning $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [$db stat -faststat]
+ if { [is_rrecno $method] == 1 } {
+ # With renumbering we're back up to 80% of $nentries
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} [expr {$nentries / 10 * 8}]}"] 1
+ } elseif { [is_rbtree $method] == 1 } {
+ # Total records in a btree is now 90% of $nentries
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} [expr {$nentries / 10 * 9}]}"] 1
+ } else {
+ # No renumbering--still no change in RECORDCOUNT.
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} $nentries}"] 1
+ }
+
+ puts "\tTest0$tnum.e: put new keys at the end."
+ set start [expr {1 + $nentries / 10 * 9}]
+ set end [expr {($nentries / 10 * 9) + ($nentries / 10 * 8)}]
+ for { set ndx $start } { $ndx <= $end } { incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$keypfx$ndx $data}]
+ error_check_good db_put_end $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [$db stat -faststat]
+ if { [is_rbtree $method] != 1 } {
+ # If this is a recno database, the record count should be up
+ # to (1.7 x nentries), the largest number we've seen, with
+ # or without renumbering.
+ error_check_good recordcount_after_puts2 [is_substr $ret \
+ "{{Number of keys} [expr {$start - 1 + $nentries / 10 * 8}]}"] 1
+ } else {
+ # In an rbtree, 1000 of those keys were overwrites, so there
+ # are (.7 x nentries) new keys and (.9 x nentries) old keys
+ # for a total of (1.6 x nentries).
+ error_check_good recordcount_after_puts2 [is_substr $ret \
+ "{{Number of keys} [expr {$start -1 + $nentries / 10 * 7}]}"] 1
+ }
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/test066.tcl b/libdb/test/test066.tcl
new file mode 100644
index 0000000..80bb813
--- /dev/null
+++ b/libdb/test/test066.tcl
@@ -0,0 +1,99 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test066
+# TEST Test of cursor overwrites of DB_CURRENT w/ duplicates.
+# TEST
+# TEST Make sure a cursor put to DB_CURRENT acts as an overwrite in a
+# TEST database with duplicates.
+proc test066 { method args } {
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ set tnum 66
+
+ if { [is_record_based $method] || [is_rbtree $method] } {
+ puts "Test0$tnum: Skipping for method $method."
+ return
+ }
+
+ puts "Test0$tnum: Test of cursor put to DB_CURRENT with duplicates."
+
+ source ./include.tcl
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test066.db
+ set env NULL
+ } else {
+ set testfile test066.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set txn ""
+ set key "test"
+ set data "olddata"
+
+ set db [eval {berkdb_open -create -mode 0644 -dup} $omethod $args \
+ $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key [chop_data $method $data]}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set ret [$dbc get -first]
+ error_check_good db_get $ret [list [list $key [pad_data $method $data]]]
+
+ set newdata "newdata"
+ set ret [$dbc put -current [chop_data $method $newdata]]
+ error_check_good dbc_put $ret 0
+
+ # There should be only one (key,data) pair in the database, and this
+ # is it.
+ set ret [$dbc get -first]
+ error_check_good db_get_first $ret \
+ [list [list $key [pad_data $method $newdata]]]
+
+ # and this one should come up empty.
+ set ret [$dbc get -next]
+ error_check_good db_get_next $ret ""
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum: Test completed successfully."
+}
diff --git a/libdb/test/test067.tcl b/libdb/test/test067.tcl
new file mode 100644
index 0000000..313d12a
--- /dev/null
+++ b/libdb/test/test067.tcl
@@ -0,0 +1,155 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test067
+# TEST Test of DB_CURRENT partial puts onto almost empty duplicate
+# TEST pages, with and without DB_DUP_SORT.
+# TEST
+# TEST Test of DB_CURRENT partial puts on almost-empty duplicate pages.
+# TEST This test was written to address the following issue, #2 in the
+# TEST list of issues relating to bug #0820:
+# TEST
+# TEST 2. DBcursor->put, DB_CURRENT flag, off-page duplicates, hash and btree:
+# TEST In Btree, the DB_CURRENT overwrite of off-page duplicate records
+# TEST first deletes the record and then puts the new one -- this could
+# TEST be a problem if the removal of the record causes a reverse split.
+# TEST Suggested solution is to acquire a cursor to lock down the current
+# TEST record, put a new record after that record, and then delete using
+# TEST the held cursor.
+# TEST
+# TEST It also tests the following, #5 in the same list of issues:
+# TEST 5. DBcursor->put, DB_AFTER/DB_BEFORE/DB_CURRENT flags, DB_DBT_PARTIAL
+# TEST set, duplicate comparison routine specified.
+# TEST The partial change does not change how data items sort, but the
+# TEST record to be put isn't built yet, and that record supplied is the
+# TEST one that's checked for ordering compatibility.
+proc test067 { method {ndups 1000} {tnum 67} args } {
+ source ./include.tcl
+ global alphabet
+ global errorCode
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "\tTest0$tnum: skipping for method $method."
+ return
+ }
+ set txn ""
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $ndups == 1000 } {
+ set ndups 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test0$tnum:\
+ $method ($args) Partial puts on near-empty duplicate pages."
+
+ foreach dupopt { "-dup" "-dup -dupsort" } {
+ #
+ # Testdir might get reset from the env's home dir back
+ # to the default if this calls something that sources
+ # include.tcl, since testdir is a global. Set it correctly
+ # here each time through the loop.
+ #
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod} $args $dupopt {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.a ($dupopt): Put $ndups duplicates."
+
+ set key "key_test$tnum"
+
+ for { set ndx 0 } { $ndx < $ndups } { incr ndx } {
+ set data $alphabet$ndx
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # No need for pad_data since we're skipping recno.
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good put($key,$data) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Sync so we can inspect database if the next section bombs.
+ error_check_good db_sync [$db sync] 0
+ puts "\tTest0$tnum.b ($dupopt):\
+ Deleting dups (last first), overwriting each."
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
+
+ set count 0
+ while { $count < $ndups - 1 } {
+ # set cursor to last item in db
+ set ret [$dbc get -last]
+ error_check_good \
+ verify_key [lindex [lindex $ret 0] 0] $key
+
+ # for error reporting
+ set currdatum [lindex [lindex $ret 0] 1]
+
+ # partial-overwrite it
+ # (overwrite offsets 1-4 with "bcde"--which they
+ # already are)
+
+ # Even though we expect success, we catch this
+ # since it might return EINVAL, and we want that
+ # to FAIL.
+ set errorCode NONE
+ set ret [catch {eval $dbc put -current \
+ {-partial [list 1 4]} "bcde"} \
+ res]
+ error_check_good \
+ partial_put_valid($currdatum) $errorCode NONE
+ error_check_good partial_put($currdatum) $res 0
+
+ # delete it
+ error_check_good dbc_del [$dbc del] 0
+
+ #puts $currdatum
+
+ incr count
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ }
+}
diff --git a/libdb/test/test068.tcl b/libdb/test/test068.tcl
new file mode 100644
index 0000000..7737c5c
--- /dev/null
+++ b/libdb/test/test068.tcl
@@ -0,0 +1,226 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test068
+# TEST Test of DB_BEFORE and DB_AFTER with partial puts.
+# TEST Make sure DB_BEFORE and DB_AFTER work properly with partial puts, and
+# TEST check that they return EINVAL if DB_DUPSORT is set or if DB_DUP is not.
+proc test068 { method args } {
+ source ./include.tcl
+ global alphabet
+ global errorCode
+
+ set tnum 68
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set nkeys 1000
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ set nkeys 100
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test0$tnum:\
+ $method ($args) Test of DB_BEFORE/DB_AFTER and partial puts."
+ if { [is_record_based $method] == 1 } {
+ puts "\tTest0$tnum: skipping for method $method."
+ return
+ }
+
+ # Create a list of $nkeys words to insert into db.
+ puts "\tTest0$tnum.a: Initialize word list."
+ set txn ""
+ set wordlist {}
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nkeys } {
+ lappend wordlist $str
+ incr count
+ }
+ close $did
+
+ # Sanity check: did we get $nkeys words?
+ error_check_good enough_keys [llength $wordlist] $nkeys
+
+ # rbtree can't handle dups, so just test the non-dup case
+ # if it's the current method.
+ if { [is_rbtree $method] == 1 } {
+ set dupoptlist { "" }
+ } else {
+ set dupoptlist { "" "-dup" "-dup -dupsort" }
+ }
+
+ foreach dupopt $dupoptlist {
+ #
+ # Testdir might be reset in the loop by some proc sourcing
+ # include.tcl. Reset it to the env's home here, before
+ # cleanup.
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ set db [eval {berkdb_open_noerr -create -mode 0644 \
+ $omethod} $args $dupopt {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.b ($dupopt): DB initialization: put loop."
+ foreach word $wordlist {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$word $word}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ puts "\tTest0$tnum.c ($dupopt): get loop."
+ foreach word $wordlist {
+ # Make sure that the Nth word has been correctly
+ # inserted, and also that the Nth word is the
+ # Nth one we pull out of the database using a cursor.
+
+ set dbt [$db get $word]
+ error_check_good get_key [list [list $word $word]] $dbt
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest0$tnum.d ($dupopt): DBC->put w/ DB_AFTER."
+
+ # Set cursor to the first key; make sure it succeeds.
+ # With an unsorted wordlist, we can't be sure that the
+ # first item returned will equal the first item in the
+ # wordlist, so we just make sure it got something back.
+ set dbt [eval {$dbc get -first}]
+ error_check_good \
+ dbc_get_first [llength $dbt] 1
+
+ # If -dup is not set, or if -dupsort is set too, we
+ # need to verify that DB_BEFORE and DB_AFTER fail
+ # and then move on to the next $dupopt.
+ if { $dupopt != "-dup" } {
+ set errorCode "NONE"
+ set ret [catch {eval $dbc put -after \
+ {-partial [list 6 0]} "after"} res]
+ error_check_good dbc_put_after_fail $ret 1
+ error_check_good dbc_put_after_einval \
+ [is_substr $errorCode EINVAL] 1
+ puts "\tTest0$tnum ($dupopt): DB_AFTER returns EINVAL."
+ set errorCode "NONE"
+ set ret [catch {eval $dbc put -before \
+ {-partial [list 6 0]} "before"} res]
+ error_check_good dbc_put_before_fail $ret 1
+ error_check_good dbc_put_before_einval \
+ [is_substr $errorCode EINVAL] 1
+ puts "\tTest0$tnum ($dupopt): DB_BEFORE returns EINVAL."
+ puts "\tTest0$tnum ($dupopt): Correct error returns,\
+ skipping further test."
+ # continue with broad foreach
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ continue
+ }
+
+ puts "\tTest0$tnum.e ($dupopt): DBC->put(DB_AFTER) loop."
+ foreach word $wordlist {
+ # set cursor to $word
+ set dbt [$dbc get -set $word]
+ error_check_good \
+ dbc_get_set $dbt [list [list $word $word]]
+ # put after it
+ set ret [$dbc put -after -partial {4 0} after]
+ error_check_good dbc_put_after $ret 0
+ }
+
+ puts "\tTest0$tnum.f ($dupopt): DBC->put(DB_BEFORE) loop."
+ foreach word $wordlist {
+ # set cursor to $word
+ set dbt [$dbc get -set $word]
+ error_check_good \
+ dbc_get_set $dbt [list [list $word $word]]
+ # put before it
+ set ret [$dbc put -before -partial {6 0} before]
+ error_check_good dbc_put_before $ret 0
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ eval $db sync
+ puts "\tTest0$tnum.g ($dupopt): Verify correctness."
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # loop through the whole db beginning to end,
+ # make sure we have, in order, {$word "\0\0\0\0\0\0before"},
+ # {$word $word}, {$word "\0\0\0\0after"} for each word.
+ set count 0
+ while { $count < $nkeys } {
+ # Get the first item of each set of three.
+ # We don't know what the word is, but set $word to
+ # the key and check that the data is
+ # "\0\0\0\0\0\0before".
+ set dbt [$dbc get -next]
+ set word [lindex [lindex $dbt 0] 0]
+
+ error_check_good dbc_get_one $dbt \
+ [list [list $word "\0\0\0\0\0\0before"]]
+
+ set dbt [$dbc get -next]
+ error_check_good \
+ dbc_get_two $dbt [list [list $word $word]]
+
+ set dbt [$dbc get -next]
+ error_check_good dbc_get_three $dbt \
+ [list [list $word "\0\0\0\0after"]]
+
+ incr count
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ }
+}
diff --git a/libdb/test/test069.tcl b/libdb/test/test069.tcl
new file mode 100644
index 0000000..696b320
--- /dev/null
+++ b/libdb/test/test069.tcl
@@ -0,0 +1,14 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test069
+# TEST Test of DB_CURRENT partial puts without duplicates-- test067 w/
+# TEST small ndups to ensure that partial puts to DB_CURRENT work
+# TEST correctly in the absence of duplicate pages.
+proc test069 { method {ndups 50} {tnum 69} args } {
+ eval test067 $method $ndups $tnum $args
+}
diff --git a/libdb/test/test070.tcl b/libdb/test/test070.tcl
new file mode 100644
index 0000000..a02c7e2
--- /dev/null
+++ b/libdb/test/test070.tcl
@@ -0,0 +1,142 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test070
+# TEST Test of DB_CONSUME (Four consumers, 1000 items.)
+# TEST
+# TEST Fork off six processes, four consumers and two producers.
+# TEST The producers will each put 20000 records into a queue;
+# TEST the consumers will each get 10000.
+# TEST Then, verify that no record was lost or retrieved twice.
+proc test070 { method {nconsumers 4} {nproducers 2} \
+ {nitems 1000} {mode CONSUME } {start 0} {txn -txn} {tnum 70} args } {
+ source ./include.tcl
+ global alphabet
+ global encrypt
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test0$tnum skipping for env $env"
+ return
+ }
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+ if { $encrypt != 0 } {
+ puts "Test0$tnum skipping for security"
+ return
+ }
+
+ puts "Test0$tnum: $method ($args) Test of DB_$mode flag to DB->get."
+ puts "\tUsing $txn environment."
+
+ error_check_good enough_consumers [expr $nconsumers > 0] 1
+ error_check_good enough_producers [expr $nproducers > 0] 1
+
+ if { [is_queue $method] != 1 } {
+ puts "\tSkipping Test0$tnum for method $method."
+ return
+ }
+
+ env_cleanup $testdir
+ set testfile test0$tnum.db
+
+ # Create environment
+ set dbenv [eval {berkdb_env -create $txn -home } $testdir]
+ error_check_good dbenv_create [is_valid_env $dbenv] TRUE
+
+ # Create database
+ set db [eval {berkdb_open -create -mode 0644 -queue}\
+ -env $dbenv $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ if { $start != 0 } {
+ error_check_good set_seed [$db put $start "consumer data"] 0
+ puts "\tStarting at $start."
+ } else {
+ incr start
+ }
+
+ set pidlist {}
+
+ # Divvy up the total number of records amongst the consumers and
+ # producers.
+ error_check_good cons_div_evenly [expr $nitems % $nconsumers] 0
+ error_check_good prod_div_evenly [expr $nitems % $nproducers] 0
+ set nperconsumer [expr $nitems / $nconsumers]
+ set nperproducer [expr $nitems / $nproducers]
+
+ set consumerlog $testdir/CONSUMERLOG.
+
+ # Fork consumer processes (we want them to be hungry)
+ for { set ndx 0 } { $ndx < $nconsumers } { incr ndx } {
+ set output $consumerlog$ndx
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ conscript.tcl $testdir/conscript.log.consumer$ndx \
+ $testdir $testfile $mode $nperconsumer $output $tnum \
+ $args &]
+ lappend pidlist $p
+ }
+ for { set ndx 0 } { $ndx < $nproducers } { incr ndx } {
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ conscript.tcl $testdir/conscript.log.producer$ndx \
+ $testdir $testfile PRODUCE $nperproducer "" $tnum \
+ $args &]
+ lappend pidlist $p
+ }
+
+ # Wait for all children.
+ watch_procs $pidlist 10
+
+ # Verify: slurp all record numbers into list, sort, and make
+ # sure each appears exactly once.
+ puts "\tTest0$tnum: Verifying results."
+ set reclist {}
+ for { set ndx 0 } { $ndx < $nconsumers } { incr ndx } {
+ set input $consumerlog$ndx
+ set iid [open $input r]
+ while { [gets $iid str] != -1 } {
+ # Convert high ints to negative ints, to
+ # simulate Tcl's behavior on a 32-bit machine
+ # even if we're on a 64-bit one.
+ if { $str > 0x7fffffff } {
+ set str [expr $str - 1 - 0xffffffff]
+ }
+ lappend reclist $str
+ }
+ close $iid
+ }
+ set sortreclist [lsort -integer $reclist]
+
+ set nitems [expr $start + $nitems]
+ for { set ndx $start } { $ndx < $nitems } { incr ndx } {
+ # Convert high ints to negative ints, to simulate
+ # 32-bit behavior on 64-bit platforms.
+ if { $ndx > 0x7fffffff } {
+ set cmp [expr $ndx - 1 - 0xffffffff]
+ } else {
+ set cmp [expr $ndx + 0]
+ }
+ # Skip 0 if we are wrapping around
+ if { $cmp == 0 } {
+ incr ndx
+ incr nitems
+ incr cmp
+ }
+ # Be sure to convert ndx to a number before comparing.
+ error_check_good pop_num [lindex $sortreclist 0] $cmp
+ set sortreclist [lreplace $sortreclist 0 0]
+ }
+ error_check_good list_ends_empty $sortreclist {}
+ error_check_good db_close [$db close] 0
+ error_check_good dbenv_close [$dbenv close] 0
+
+ puts "\tTest0$tnum completed successfully."
+}
diff --git a/libdb/test/test071.tcl b/libdb/test/test071.tcl
new file mode 100644
index 0000000..ce601df
--- /dev/null
+++ b/libdb/test/test071.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test071
+# TEST Test of DB_CONSUME (One consumer, 10000 items.)
+# TEST This is DB Test 70, with one consumer, one producers, and 10000 items.
+proc test071 { method {nconsumers 1} {nproducers 1}\
+ {nitems 10000} {mode CONSUME} {start 0 } {txn -txn} {tnum 71} args } {
+
+ eval test070 $method \
+ $nconsumers $nproducers $nitems $mode $start $txn $tnum $args
+}
diff --git a/libdb/test/test072.tcl b/libdb/test/test072.tcl
new file mode 100644
index 0000000..a6e919d
--- /dev/null
+++ b/libdb/test/test072.tcl
@@ -0,0 +1,252 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test072
+# TEST Test of cursor stability when duplicates are moved off-page.
+proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
+ source ./include.tcl
+ global alphabet
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ # Keys must sort $prekey < $key < $postkey.
+ set prekey "a key"
+ set key "the key"
+ set postkey "z key"
+
+ # Make these distinguishable from each other and from the
+ # alphabets used for the $key's data.
+ set predatum "1234567890"
+ set postdatum "0987654321"
+
+ puts -nonewline "Test0$tnum $omethod ($args): "
+ if { [is_record_based $method] || [is_rbtree $method] } {
+ puts "Skipping for method $method."
+ return
+ } else {
+ puts "\n Test of cursor stability when\
+ duplicates are moved off-page."
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test0$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ append args " -pagesize $pagesize "
+ set txn ""
+
+ set dlist [list "-dup" "-dup -dupsort"]
+ set testid 0
+ foreach dupopt $dlist {
+ incr testid
+ set duptestfile $testfile$testid
+ set db [eval {berkdb_open -create -mode 0644} \
+ $omethod $args $dupopt {$duptestfile}]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ puts \
+"\tTest0$tnum.a: ($dupopt) Set up surrounding keys and cursors."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$prekey $predatum}]
+ error_check_good pre_put $ret 0
+ set ret [eval {$db put} $txn {$postkey $postdatum}]
+ error_check_good post_put $ret 0
+
+ set precursor [eval {$db cursor} $txn]
+ error_check_good precursor [is_valid_cursor $precursor \
+ $db] TRUE
+ set postcursor [eval {$db cursor} $txn]
+ error_check_good postcursor [is_valid_cursor $postcursor \
+ $db] TRUE
+ error_check_good preset [$precursor get -set $prekey] \
+ [list [list $prekey $predatum]]
+ error_check_good postset [$postcursor get -set $postkey] \
+ [list [list $postkey $postdatum]]
+
+ puts "\tTest0$tnum.b: Put/create cursor/verify all cursor loop."
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set datum [format "%4d$alphabet" [expr $i + 1000]]
+ set data($i) $datum
+
+ # Uncomment these lines to see intermediate steps.
+ # error_check_good db_sync($i) [$db sync] 0
+ # error_check_good db_dump($i) \
+ # [catch {exec $util_path/db_dump \
+ # -da $duptestfile > $testdir/out.$i}] 0
+
+ set ret [eval {$db put} $txn {$key $datum}]
+ error_check_good "db put ($i)" $ret 0
+
+ set dbc($i) [eval {$db cursor} $txn]
+ error_check_good "db cursor ($i)"\
+ [is_valid_cursor $dbc($i) $db] TRUE
+
+ error_check_good "dbc get -get_both ($i)"\
+ [$dbc($i) get -get_both $key $datum]\
+ [list [list $key $datum]]
+
+ for { set j 0 } { $j < $i } { incr j } {
+ set dbt [$dbc($j) get -current]
+ set k [lindex [lindex $dbt 0] 0]
+ set d [lindex [lindex $dbt 0] 1]
+
+ #puts "cursor $j after $i: $d"
+
+ eval {$db sync}
+
+ error_check_good\
+ "cursor $j key correctness after $i puts" \
+ $k $key
+ error_check_good\
+ "cursor $j data correctness after $i puts" \
+ $d $data($j)
+ }
+
+ # Check correctness of pre- and post- cursors. Do an
+ # error_check_good on the lengths first so that we don't
+ # spew garbage as the "got" field and screw up our
+ # terminal. (It's happened here.)
+ set pre_dbt [$precursor get -current]
+ set post_dbt [$postcursor get -current]
+ error_check_good \
+ "key earlier cursor correctness after $i puts" \
+ [string length [lindex [lindex $pre_dbt 0] 0]] \
+ [string length $prekey]
+ error_check_good \
+ "data earlier cursor correctness after $i puts" \
+ [string length [lindex [lindex $pre_dbt 0] 1]] \
+ [string length $predatum]
+ error_check_good \
+ "key later cursor correctness after $i puts" \
+ [string length [lindex [lindex $post_dbt 0] 0]] \
+ [string length $postkey]
+ error_check_good \
+ "data later cursor correctness after $i puts" \
+ [string length [lindex [lindex $post_dbt 0] 1]]\
+ [string length $postdatum]
+
+ error_check_good \
+ "earlier cursor correctness after $i puts" \
+ $pre_dbt [list [list $prekey $predatum]]
+ error_check_good \
+ "later cursor correctness after $i puts" \
+ $post_dbt [list [list $postkey $postdatum]]
+ }
+
+ puts "\tTest0$tnum.c: Reverse Put/create cursor/verify all cursor loop."
+ set end [expr $ndups * 2 - 1]
+ for { set i $end } { $i >= $ndups } { set i [expr $i - 1] } {
+ set datum [format "%4d$alphabet" [expr $i + 1000]]
+ set data($i) $datum
+
+ # Uncomment these lines to see intermediate steps.
+ # error_check_good db_sync($i) [$db sync] 0
+ # error_check_good db_dump($i) \
+ # [catch {exec $util_path/db_dump \
+ # -da $duptestfile > $testdir/out.$i}] 0
+
+ set ret [eval {$db put} $txn {$key $datum}]
+ error_check_good "db put ($i)" $ret 0
+
+ error_check_bad dbc($i)_stomped [info exists dbc($i)] 1
+ set dbc($i) [eval {$db cursor} $txn]
+ error_check_good "db cursor ($i)"\
+ [is_valid_cursor $dbc($i) $db] TRUE
+
+ error_check_good "dbc get -get_both ($i)"\
+ [$dbc($i) get -get_both $key $datum]\
+ [list [list $key $datum]]
+
+ for { set j $i } { $j < $end } { incr j } {
+ set dbt [$dbc($j) get -current]
+ set k [lindex [lindex $dbt 0] 0]
+ set d [lindex [lindex $dbt 0] 1]
+
+ #puts "cursor $j after $i: $d"
+
+ eval {$db sync}
+
+ error_check_good\
+ "cursor $j key correctness after $i puts" \
+ $k $key
+ error_check_good\
+ "cursor $j data correctness after $i puts" \
+ $d $data($j)
+ }
+
+ # Check correctness of pre- and post- cursors. Do an
+ # error_check_good on the lengths first so that we don't
+ # spew garbage as the "got" field and screw up our
+ # terminal. (It's happened here.)
+ set pre_dbt [$precursor get -current]
+ set post_dbt [$postcursor get -current]
+ error_check_good \
+ "key earlier cursor correctness after $i puts" \
+ [string length [lindex [lindex $pre_dbt 0] 0]] \
+ [string length $prekey]
+ error_check_good \
+ "data earlier cursor correctness after $i puts" \
+ [string length [lindex [lindex $pre_dbt 0] 1]] \
+ [string length $predatum]
+ error_check_good \
+ "key later cursor correctness after $i puts" \
+ [string length [lindex [lindex $post_dbt 0] 0]] \
+ [string length $postkey]
+ error_check_good \
+ "data later cursor correctness after $i puts" \
+ [string length [lindex [lindex $post_dbt 0] 1]]\
+ [string length $postdatum]
+
+ error_check_good \
+ "earlier cursor correctness after $i puts" \
+ $pre_dbt [list [list $prekey $predatum]]
+ error_check_good \
+ "later cursor correctness after $i puts" \
+ $post_dbt [list [list $postkey $postdatum]]
+ }
+
+ # Close cursors.
+ puts "\tTest0$tnum.d: Closing cursors."
+ for { set i 0 } { $i <= $end } { incr i } {
+ error_check_good "dbc close ($i)" [$dbc($i) close] 0
+ }
+ unset dbc
+ error_check_good precursor_close [$precursor close] 0
+ error_check_good postcursor_close [$postcursor close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good "db close" [$db close] 0
+ }
+}
diff --git a/libdb/test/test073.tcl b/libdb/test/test073.tcl
new file mode 100644
index 0000000..c720cdd
--- /dev/null
+++ b/libdb/test/test073.tcl
@@ -0,0 +1,290 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test073
+# TEST Test of cursor stability on duplicate pages.
+# TEST
+# TEST Does the following:
+# TEST a. Initialize things by DB->putting ndups dups and
+# TEST setting a reference cursor to point to each.
+# TEST b. c_put ndups dups (and correspondingly expanding
+# TEST the set of reference cursors) after the last one, making sure
+# TEST after each step that all the reference cursors still point to
+# TEST the right item.
+# TEST c. Ditto, but before the first one.
+# TEST d. Ditto, but after each one in sequence first to last.
+# TEST e. Ditto, but after each one in sequence from last to first.
+# TEST occur relative to the new datum)
+# TEST f. Ditto for the two sequence tests, only doing a
+# TEST DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+# TEST new one.
+proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
+ source ./include.tcl
+ global alphabet
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set key "the key"
+ set txn ""
+
+ puts -nonewline "Test0$tnum $omethod ($args): "
+ if { [is_record_based $method] || [is_rbtree $method] } {
+ puts "Skipping for method $method."
+ return
+ } else {
+ puts "cursor stability on duplicate pages."
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test073: skipping for specific pagesizes"
+ return
+ }
+
+ append args " -pagesize $pagesize -dup"
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $omethod $args $testfile]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ # Number of outstanding keys.
+ set keys 0
+
+ puts "\tTest0$tnum.a.1: Initializing put loop; $ndups dups, short data."
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set datum [makedatum_t73 $i 0]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $datum}]
+ error_check_good "db put ($i)" $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set is_long($i) 0
+ incr keys
+ }
+
+ puts "\tTest0$tnum.a.2: Initializing cursor get loop; $keys dups."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set i 0 } { $i < $keys } { incr i } {
+ set datum [makedatum_t73 $i 0]
+
+ set dbc($i) [eval {$db cursor} $txn]
+ error_check_good "db cursor ($i)"\
+ [is_valid_cursor $dbc($i) $db] TRUE
+ error_check_good "dbc get -get_both ($i)"\
+ [$dbc($i) get -get_both $key $datum]\
+ [list [list $key $datum]]
+ }
+
+ puts "\tTest0$tnum.b: Cursor put (DB_KEYLAST); $ndups new dups,\
+ short data."
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ # !!! keys contains the number of the next dup
+ # to be added (since they start from zero)
+
+ set datum [makedatum_t73 $keys 0]
+ set curs [eval {$db cursor} $txn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+ error_check_good "c_put(DB_KEYLAST, $keys)"\
+ [$curs put -keylast $key $datum] 0
+
+ set dbc($keys) $curs
+ set is_long($keys) 0
+ incr keys
+
+ verify_t73 is_long dbc $keys $key
+ }
+
+ puts "\tTest0$tnum.c: Cursor put (DB_KEYFIRST); $ndups new dups,\
+ short data."
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ # !!! keys contains the number of the next dup
+ # to be added (since they start from zero)
+
+ set datum [makedatum_t73 $keys 0]
+ set curs [eval {$db cursor} $txn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+ error_check_good "c_put(DB_KEYFIRST, $keys)"\
+ [$curs put -keyfirst $key $datum] 0
+
+ set dbc($keys) $curs
+ set is_long($keys) 0
+ incr keys
+
+ verify_t73 is_long dbc $keys $key
+ }
+
+ puts "\tTest0$tnum.d: Cursor put (DB_AFTER) first to last;\
+ $keys new dups, short data"
+ # We want to add a datum after each key from 0 to the current
+ # value of $keys, which we thus need to save.
+ set keysnow $keys
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set datum [makedatum_t73 $keys 0]
+ set curs [eval {$db cursor} $txn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ # Which datum to insert this guy after.
+ set curdatum [makedatum_t73 $i 0]
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $curdatum]\
+ [list [list $key $curdatum]]
+ error_check_good "c_put(DB_AFTER, $i)"\
+ [$curs put -after $datum] 0
+
+ set dbc($keys) $curs
+ set is_long($keys) 0
+ incr keys
+
+ verify_t73 is_long dbc $keys $key
+ }
+
+ puts "\tTest0$tnum.e: Cursor put (DB_BEFORE) last to first;\
+ $keys new dups, short data"
+
+ for { set i [expr $keys - 1] } { $i >= 0 } { incr i -1 } {
+ set datum [makedatum_t73 $keys 0]
+ set curs [eval {$db cursor} $txn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ # Which datum to insert this guy before.
+ set curdatum [makedatum_t73 $i 0]
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $curdatum]\
+ [list [list $key $curdatum]]
+ error_check_good "c_put(DB_BEFORE, $i)"\
+ [$curs put -before $datum] 0
+
+ set dbc($keys) $curs
+ set is_long($keys) 0
+ incr keys
+
+ if { $i % 10 == 1 } {
+ verify_t73 is_long dbc $keys $key
+ }
+ }
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest0$tnum.f: Cursor put (DB_CURRENT), first to last,\
+ growing $keys data."
+ set keysnow $keys
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set olddatum [makedatum_t73 $i 0]
+ set newdatum [makedatum_t73 $i 1]
+ set curs [eval {$db cursor} $txn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $olddatum]\
+ [list [list $key $olddatum]]
+ error_check_good "c_put(DB_CURRENT, $i)"\
+ [$curs put -current $newdatum] 0
+
+ error_check_good "cursor close" [$curs close] 0
+
+ set is_long($i) 1
+
+ if { $i % 10 == 1 } {
+ verify_t73 is_long dbc $keys $key
+ }
+ }
+ verify_t73 is_long dbc $keys $key
+
+ # Close cursors.
+ puts "\tTest0$tnum.g: Closing cursors."
+ for { set i 0 } { $i < $keys } { incr i } {
+ error_check_good "dbc close ($i)" [$dbc($i) close] 0
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good "db close" [$db close] 0
+}
+
+# !!!: This procedure is also used by test087.
+proc makedatum_t73 { num is_long } {
+ global alphabet
+ if { $is_long == 1 } {
+ set a $alphabet$alphabet$alphabet
+ } else {
+ set a abcdefghijklm
+ }
+
+ # format won't do leading zeros, alas.
+ if { $num / 1000 > 0 } {
+ set i $num
+ } elseif { $num / 100 > 0 } {
+ set i 0$num
+ } elseif { $num / 10 > 0 } {
+ set i 00$num
+ } else {
+ set i 000$num
+ }
+
+ return $i$a
+}
+
+# !!!: This procedure is also used by test087.
+proc verify_t73 { is_long_array curs_array numkeys key } {
+ upvar $is_long_array is_long
+ upvar $curs_array dbc
+ upvar db db
+
+ #useful for debugging, perhaps.
+ eval $db sync
+
+ for { set j 0 } { $j < $numkeys } { incr j } {
+ set dbt [$dbc($j) get -current]
+ set k [lindex [lindex $dbt 0] 0]
+ set d [lindex [lindex $dbt 0] 1]
+
+ error_check_good\
+ "cursor $j key correctness (with $numkeys total items)"\
+ $k $key
+ error_check_good\
+ "cursor $j data correctness (with $numkeys total items)"\
+ $d [makedatum_t73 $j $is_long($j)]
+ }
+}
diff --git a/libdb/test/test074.tcl b/libdb/test/test074.tcl
new file mode 100644
index 0000000..00d8c7e
--- /dev/null
+++ b/libdb/test/test074.tcl
@@ -0,0 +1,271 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test074
+# TEST Test of DB_NEXT_NODUP.
+proc test074 { method {dir -nextnodup} {nitems 100} {tnum 74} args } {
+ source ./include.tcl
+ global alphabet
+ global rand_init
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ berkdb srand $rand_init
+
+ # Data prefix--big enough that we get a mix of on-page, off-page,
+ # and multi-off-page dups with the default nitems
+ if { [is_fixed_length $method] == 1 } {
+ set globaldata "somedata"
+ } else {
+ set globaldata [repeat $alphabet 4]
+ }
+
+ puts "Test0$tnum $omethod ($args): Test of $dir"
+
+ # First, test non-dup (and not-very-interesting) case with
+ # all db types.
+
+ puts "\tTest0$tnum.a: No duplicates."
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-nodup.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-nodup.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ set db [eval {berkdb_open -create -mode 0644} $omethod\
+ $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn ""
+
+ # Insert nitems items.
+ puts "\t\tTest0$tnum.a.1: Put loop."
+ for {set i 1} {$i <= $nitems} {incr i} {
+ #
+ # If record based, set key to $i * 2 to leave
+ # holes/unused entries for further testing.
+ #
+ if {[is_record_based $method] == 1} {
+ set key [expr $i * 2]
+ } else {
+ set key "key$i"
+ }
+ set data "$globaldata$i"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key \
+ [chop_data $method $data]}]
+ error_check_good put($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ puts "\t\tTest0$tnum.a.2: Get($dir)"
+
+ # foundarray($i) is set when key number i is found in the database
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # Initialize foundarray($i) to zero for all $i
+ for {set i 1} {$i < $nitems} {incr i} {
+ set foundarray($i) 0
+ }
+
+ # Walk database using $dir and record each key gotten.
+ for {set i 1} {$i <= $nitems} {incr i} {
+ set dbt [$dbc get $dir]
+ set key [lindex [lindex $dbt 0] 0]
+ if {[is_record_based $method] == 1} {
+ set num [expr $key / 2]
+ set desired_key $key
+ error_check_good $method:num $key [expr $num * 2]
+ } else {
+ set num [string range $key 3 end]
+ set desired_key key$num
+ }
+
+ error_check_good dbt_correct($i) $dbt\
+ [list [list $desired_key\
+ [pad_data $method $globaldata$num]]]
+
+ set foundarray($num) 1
+ }
+
+ puts "\t\tTest0$tnum.a.3: Final key."
+ error_check_good last_db_get [$dbc get $dir] [list]
+
+ puts "\t\tTest0$tnum.a.4: Verify loop."
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ error_check_good found_key($i) $foundarray($i) 1
+ }
+
+ error_check_good dbc_close(nodup) [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # If we are a method that doesn't allow dups, verify that
+ # we get an empty list if we try to use DB_NEXT_DUP
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ puts "\t\tTest0$tnum.a.5: Check DB_NEXT_DUP for $method."
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set dbt [$dbc get $dir]
+ error_check_good $method:nextdup [$dbc get -nextdup] [list]
+ error_check_good dbc_close(nextdup) [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ error_check_good db_close(nodup) [$db close] 0
+
+ # Quit here if we're a method that won't allow dups.
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "\tTest0$tnum: Skipping remainder for method $method."
+ return
+ }
+
+ foreach opt { "-dup" "-dupsort" } {
+
+ #
+ # If we are using an env, then testfile should just be the
+ # db name. Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum$opt.db
+ } else {
+ set testfile test0$tnum$opt.db
+ }
+
+ if { [string compare $opt "-dupsort"] == 0 } {
+ set opt "-dup -dupsort"
+ }
+
+ puts "\tTest0$tnum.b: Duplicates ($opt)."
+
+ puts "\t\tTest0$tnum.b.1 ($opt): Put loop."
+ set db [eval {berkdb_open -create -mode 0644}\
+ $opt $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Insert nitems different keys such that key i has i dups.
+ for {set i 1} {$i <= $nitems} {incr i} {
+ set key key$i
+
+ for {set j 1} {$j <= $i} {incr j} {
+ if { $j < 10 } {
+ set data "${globaldata}00$j"
+ } elseif { $j < 100 } {
+ set data "${globaldata}0$j"
+ } else {
+ set data "$globaldata$j"
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good put($i,$j) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ }
+
+ # Initialize foundarray($i) to 0 for all i.
+ unset foundarray
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ set foundarray($i) 0
+ }
+
+ # Get loop--after each get, move forward a random increment
+ # within the duplicate set.
+ puts "\t\tTest0$tnum.b.2 ($opt): Get loop."
+ set one "001"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good dbc($opt) [is_valid_cursor $dbc $db] TRUE
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ set dbt [$dbc get $dir]
+ set key [lindex [lindex $dbt 0] 0]
+ set num [string range $key 3 end]
+
+ set desired_key key$num
+ if { [string compare $dir "-prevnodup"] == 0 } {
+ if { $num < 10 } {
+ set one "00$num"
+ } elseif { $num < 100 } {
+ set one "0$num"
+ } else {
+ set one $num
+ }
+ }
+
+ error_check_good dbt_correct($i) $dbt\
+ [list [list $desired_key\
+ "$globaldata$one"]]
+
+ set foundarray($num) 1
+
+ # Go forward by some number w/i dup set.
+ set inc [berkdb random_int 0 [expr $num - 1]]
+ for { set j 0 } { $j < $inc } { incr j } {
+ eval {$dbc get -nextdup}
+ }
+ }
+
+ puts "\t\tTest0$tnum.b.3 ($opt): Final key."
+ error_check_good last_db_get($opt) [$dbc get $dir] [list]
+
+ # Verify
+ puts "\t\tTest0$tnum.b.4 ($opt): Verify loop."
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ error_check_good found_key($i) $foundarray($i) 1
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ }
+}
diff --git a/libdb/test/test075.tcl b/libdb/test/test075.tcl
new file mode 100644
index 0000000..2438f29
--- /dev/null
+++ b/libdb/test/test075.tcl
@@ -0,0 +1,205 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test075
+# TEST Test of DB->rename().
+# TEST (formerly test of DB_TRUNCATE cached page invalidation [#1487])
+proc test075 { method { tnum 75 } args } {
+ global encrypt
+ global errorCode
+ global errorInfo
+
+ source ./include.tcl
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test0$tnum: $method ($args): Test of DB->rename()"
+ # If we are using an env, then testfile should just be the
+ # db name. Otherwise it is the test directory and the name.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ # If we are using an env, then skip this test.
+ # It needs its own.
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Skipping test075 for env $env"
+ return
+ }
+ if { $encrypt != 0 } {
+ puts "Skipping test075 for security"
+ return
+ }
+
+ # Define absolute pathnames
+ set curdir [pwd]
+ cd $testdir
+ set fulldir [pwd]
+ cd $curdir
+ set reldir $testdir
+
+ # Set up absolute and relative pathnames for test
+ set paths [list $fulldir $reldir]
+ foreach path $paths {
+ puts "\tTest0$tnum: starting test of $path path"
+ set oldfile $path/test0$tnum-old.db
+ set newfile $path/test0$tnum.db
+ set env NULL
+ set envargs ""
+
+ # Loop through test using the following rename options
+ # 1. no environment, not in transaction
+ # 2. with environment, not in transaction
+ # 3. rename with auto-commit
+ # 4. rename in committed transaction
+ # 5. rename in aborted transaction
+
+ foreach op "noenv env auto commit abort" {
+
+ puts "\tTest0$tnum.a: Create/rename file with $op"
+
+ # Make sure we're starting with a clean slate.
+
+ if { $op == "noenv" } {
+ cleanup $path $env
+ if { $env == "NULL" } {
+ error_check_bad "$oldfile exists" \
+ [file exists $oldfile] 1
+ error_check_bad "$newfile exists" \
+ [file exists $newfile] 1
+ }
+ }
+
+ if { $op == "env" } {
+ env_cleanup $path
+ set env [berkdb_env -create -home $path]
+ set envargs "-env $env"
+ error_check_good env_open [is_valid_env $env] TRUE
+ }
+
+ if { $op == "auto" || $op == "commit" || $op == "abort" } {
+ env_cleanup $path
+ set env [berkdb_env -create -home $path -txn]
+ set envargs "-env $env"
+ error_check_good env_open [is_valid_env $env] TRUE
+ }
+
+ puts "\t\tTest0$tnum.a.1: create"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $omethod $envargs $args $oldfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ if { $env == "NULL" } {
+ error_check_bad \
+ "$oldfile exists" [file exists $oldfile] 0
+ error_check_bad \
+ "$newfile exists" [file exists $newfile] 1
+ }
+
+ # The nature of the key and data are unimportant;
+ # use numeric key to record-based methods don't need
+ # special treatment.
+ set key 1
+ set data [pad_data $method data]
+
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\t\tTest0$tnum.a.2: rename"
+ if { $env == "NULL" } {
+ error_check_bad \
+ "$oldfile exists" [file exists $oldfile] 0
+ error_check_bad \
+ "$newfile exists" [file exists $newfile] 1
+ }
+
+ # Regular renames use berkdb dbrename but transaction
+ # protected renames must use $env dbrename.
+ if { $op == "noenv" || $op == "env" } {
+ error_check_good rename_file [eval {berkdb dbrename} \
+ $envargs $oldfile $newfile] 0
+ } elseif { $op == "auto" } {
+ error_check_good rename_file [eval {$env dbrename} \
+ -auto_commit $oldfile $newfile] 0
+ } else {
+ # $op is "abort" or "commit"
+ set txn [$env txn]
+ error_check_good rename_file [eval {$env dbrename} \
+ -txn $txn $oldfile $newfile] 0
+ error_check_good txn_$op [$txn $op] 0
+ }
+
+ if { $env == "NULL" } {
+ error_check_bad \
+ "$oldfile exists" [file exists $oldfile] 1
+ error_check_bad \
+ "$newfile exists" [file exists $newfile] 0
+ }
+
+ puts "\t\tTest0$tnum.a.3: check"
+ # Open again with create to make sure we're not caching or
+ # anything silly. In the normal case (no env), we already
+ # know the file doesn't exist.
+ set odb [eval {berkdb_open -create -mode 0644} \
+ $envargs $omethod $args $oldfile]
+ set ndb [eval {berkdb_open -create -mode 0644} \
+ $envargs $omethod $args $newfile]
+ error_check_good odb_open [is_valid_db $odb] TRUE
+ error_check_good ndb_open [is_valid_db $ndb] TRUE
+
+ # The DBT from the "old" database should be empty,
+ # not the "new" one, except in the case of an abort.
+ set odbt [$odb get $key]
+ if { $op == "abort" } {
+ error_check_good odbt_has_data [llength $odbt] 1
+ } else {
+ set ndbt [$ndb get $key]
+ error_check_good odbt_empty [llength $odbt] 0
+ error_check_bad ndbt_empty [llength $ndbt] 0
+ error_check_good ndbt [lindex \
+ [lindex $ndbt 0] 1] $data
+ }
+ error_check_good odb_close [$odb close] 0
+ error_check_good ndb_close [$ndb close] 0
+
+ # Now there's both an old and a new. Rename the
+ # "new" to the "old" and make sure that fails.
+ #
+ # XXX Ideally we'd do this test even when there's
+ # an external environment, but that env has
+ # errpfx/errfile set now. :-(
+ puts "\tTest0$tnum.b: Make sure rename fails\
+ instead of overwriting"
+ if { $env != "NULL" } {
+ error_check_good env_close [$env close] 0
+ set env [berkdb_env_noerr -home $path]
+ error_check_good env_open2 \
+ [is_valid_env $env] TRUE
+ set ret [catch {eval {berkdb dbrename} \
+ -env $env $newfile $oldfile} res]
+ error_check_bad rename_overwrite $ret 0
+ error_check_good rename_overwrite_ret \
+ [is_substr $errorCode EEXIST] 1
+ }
+
+ # Verify and then start over from a clean slate.
+ verify_dir $path "\tTest0$tnum.c: "
+ cleanup $path $env
+ if { $env != "NULL" } {
+ error_check_good env_close [$env close] 0
+ }
+ if { $env == "NULL" } {
+ error_check_bad "$oldfile exists" \
+ [file exists $oldfile] 1
+ error_check_bad "$newfile exists" \
+ [file exists $newfile] 1
+
+ set oldfile test0$tnum-old.db
+ set newfile test0$tnum.db
+ }
+ }
+ }
+}
diff --git a/libdb/test/test076.tcl b/libdb/test/test076.tcl
new file mode 100644
index 0000000..538ee6e
--- /dev/null
+++ b/libdb/test/test076.tcl
@@ -0,0 +1,80 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test076
+# TEST Test creation of many small databases in a single environment. [#1528].
+proc test076 { method { ndbs 1000 } { tnum 76 } args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 } {
+ set key ""
+ } else {
+ set key "key"
+ }
+ set data "datamoredatamoredata"
+
+ # Create an env if we weren't passed one.
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set deleteenv 1
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create -home} $testdir $encargs]
+ error_check_good env [is_valid_env $env] TRUE
+ set args "$args -env $env"
+ } else {
+ set deleteenv 0
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $ndbs == 1000 } {
+ set ndbs 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts -nonewline "Test0$tnum $method ($args): "
+ puts -nonewline "Create $ndbs"
+ puts " small databases in one env."
+
+ cleanup $testdir $env
+ set txn ""
+
+ for { set i 1 } { $i <= $ndbs } { incr i } {
+ set testfile test0$tnum.$i.db
+
+ set db [eval {berkdb_open -create -mode 0644}\
+ $args $omethod $testfile]
+ error_check_good db_open($i) [is_valid_db $db] TRUE
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i \
+ [chop_data $method $data$i]}]
+ error_check_good db_put($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close($i) [$db close] 0
+ }
+
+ if { $deleteenv == 1 } {
+ error_check_good env_close [$env close] 0
+ }
+
+ puts "\tTest0$tnum passed."
+}
diff --git a/libdb/test/test077.tcl b/libdb/test/test077.tcl
new file mode 100644
index 0000000..6e3b959
--- /dev/null
+++ b/libdb/test/test077.tcl
@@ -0,0 +1,93 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test077
+# TEST Test of DB_GET_RECNO [#1206].
+proc test077 { method { nkeys 1000 } { pagesize 512 } { tnum 77 } args } {
+ source ./include.tcl
+ global alphabet
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test0$tnum: Test of DB_GET_RECNO."
+
+ if { [is_rbtree $method] != 1 } {
+ puts "\tTest0$tnum: Skipping for method $method."
+ return
+ }
+
+ set data $alphabet
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create -mode 0644\
+ -pagesize $pagesize} $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.a: Populating database."
+ set txn ""
+
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ set key [format %5d $i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good db_put($key) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ puts "\tTest0$tnum.b: Verifying record numbers."
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good dbc_open [is_valid_cursor $dbc $db] TRUE
+
+ set i 1
+ for { set dbt [$dbc get -first] } \
+ { [string length $dbt] != 0 } \
+ { set dbt [$dbc get -next] } {
+ set recno [$dbc get -get_recno]
+ set keynum [expr [lindex [lindex $dbt 0] 0]]
+
+ # Verify that i, the number that is the key, and recno
+ # are all equal.
+ error_check_good key($i) $keynum $i
+ error_check_good recno($i) $recno $i
+ incr i
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/libdb/test/test078.tcl b/libdb/test/test078.tcl
new file mode 100644
index 0000000..d4dea0f
--- /dev/null
+++ b/libdb/test/test078.tcl
@@ -0,0 +1,130 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test078
+# TEST Test of DBC->c_count(). [#303]
+proc test078 { method { nkeys 100 } { pagesize 512 } { tnum 78 } args } {
+ source ./include.tcl
+ global alphabet rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test0$tnum: Test of key counts."
+
+ berkdb srand $rand_init
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ }
+
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-a.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-a.db
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "\tTest0$tnum.a: No duplicates, trivial answer."
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test078: skipping for specific pagesizes"
+ return
+ }
+
+ set db [eval {berkdb_open -create -mode 0644\
+ -pagesize $pagesize} $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn ""
+
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i\
+ [pad_data $method $alphabet$i]}]
+ error_check_good put.a($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good count.a [$db count $i] 1
+ }
+ error_check_good db_close.a [$db close] 0
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts \
+ "\tTest0$tnum.b: Duplicates not supported in $method, skipping."
+ return
+ }
+
+ foreach tuple {{b sorted "-dup -dupsort"} {c unsorted "-dup"}} {
+ set letter [lindex $tuple 0]
+ set dupopt [lindex $tuple 2]
+
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-b.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-b.db
+ set env [lindex $args $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "\tTest0$tnum.$letter: Duplicates ([lindex $tuple 1])."
+
+ puts "\t\tTest0$tnum.$letter.1: Populating database."
+
+ set db [eval {berkdb_open -create -mode 0644\
+ -pagesize $pagesize} $dupopt $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ for { set j 0 } { $j < $i } { incr j } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i\
+ [pad_data $method $j$alphabet]}]
+ error_check_good put.$letter,$i $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ }
+
+ puts -nonewline "\t\tTest0$tnum.$letter.2: "
+ puts "Verifying dup counts on first dup."
+ for { set i 1 } { $i < $nkeys } { incr i } {
+ error_check_good count.$letter,$i \
+ [$db count $i] $i
+ }
+
+ puts -nonewline "\t\tTest0$tnum.$letter.3: "
+ puts "Verifying dup counts on random dup."
+ for { set i 1 } { $i < $nkeys } { incr i } {
+ set key [berkdb random_int 1 $nkeys]
+ error_check_good count.$letter,$i \
+ [$db count $i] $i
+ }
+ error_check_good db_close.$letter [$db close] 0
+ }
+}
diff --git a/libdb/test/test079.tcl b/libdb/test/test079.tcl
new file mode 100644
index 0000000..33dee9e
--- /dev/null
+++ b/libdb/test/test079.tcl
@@ -0,0 +1,20 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test079
+# TEST Test of deletes in large trees. (test006 w/ sm. pagesize).
+# TEST
+# TEST Check that delete operations work in large btrees. 10000 entries
+# TEST and a pagesize of 512 push this out to a four-level btree, with a
+# TEST small fraction of the entries going on overflow pages.
+proc test079 { method {nentries 10000} {pagesize 512} {tnum 79} args} {
+ if { [ is_queueext $method ] == 1 } {
+ set method "queue";
+ lappend args "-extent" "20"
+ }
+ eval {test006 $method $nentries 1 $tnum -pagesize $pagesize} $args
+}
diff --git a/libdb/test/test080.tcl b/libdb/test/test080.tcl
new file mode 100644
index 0000000..11d72a4
--- /dev/null
+++ b/libdb/test/test080.tcl
@@ -0,0 +1,126 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test080
+# TEST Test of DB->remove()
+proc test080 { method {tnum 80} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test0$tnum: Test of DB->remove()"
+
+ # Determine full path
+ set curdir [pwd]
+ cd $testdir
+ set fulldir [pwd]
+ cd $curdir
+
+ # Test both relative and absolute path
+ set paths [list $fulldir $testdir]
+
+ # If we are using an env, then skip this test.
+ # It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ if { $encargs != ""} {
+ puts "Skipping test080 for security"
+ return
+ }
+ if { $eindex != -1 } {
+ incr eindex
+ set e [lindex $args $eindex]
+ puts "Skipping test080 for env $e"
+ return
+ }
+
+ foreach path $paths {
+
+ set dbfile test0$tnum.db
+ set testfile $path/$dbfile
+
+ # Loop through test using the following remove options
+ # 1. no environment, not in transaction
+ # 2. with environment, not in transaction
+ # 3. rename with auto-commit
+ # 4. rename in committed transaction
+ # 5. rename in aborted transaction
+
+ foreach op "noenv env auto commit abort" {
+
+ # Make sure we're starting with a clean slate.
+ env_cleanup $testdir
+ if { $op == "noenv" } {
+ set dbfile $testfile
+ set e NULL
+ set envargs ""
+ } else {
+ if { $op == "env" } {
+ set largs ""
+ } else {
+ set largs " -txn"
+ }
+ set e [eval {berkdb_env -create -home $path} $largs]
+ set envargs "-env $e"
+ error_check_good env_open [is_valid_env $e] TRUE
+ }
+
+ puts "\tTest0$tnum: dbremove with $op in $path"
+ puts "\tTest0$tnum.a.1: Create file"
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
+ $envargs $args {$dbfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # The nature of the key and data are unimportant;
+ # use numeric key to record-based methods don't need
+ # special treatment.
+ set key 1
+ set data [pad_data $method data]
+
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good file_exists_before \
+ [file exists $testfile] 1
+
+ # Use berkdb dbremove for non-transactional tests
+ # and $env dbremove for transactional tests
+ puts "\tTest0$tnum.a.2: Remove file"
+ if { $op == "noenv" || $op == "env" } {
+ error_check_good remove_$op \
+ [eval {berkdb dbremove} $envargs $dbfile] 0
+ } elseif { $op == "auto" } {
+ error_check_good remove_$op \
+ [eval {$e dbremove} -auto_commit $dbfile] 0
+ } else {
+ # $op is "abort" or "commit"
+ set txn [$e txn]
+ error_check_good remove_$op \
+ [eval {$e dbremove} -txn $txn $dbfile] 0
+ error_check_good txn_$op [$txn $op] 0
+ }
+
+ puts "\tTest0$tnum.a.3: Check that file is gone"
+ # File should now be gone, except in the case of an abort.
+ if { $op != "abort" } {
+ error_check_good exists_after \
+ [file exists $testfile] 0
+ } else {
+ error_check_good exists_after \
+ [file exists $testfile] 1
+ }
+
+ if { $e != "NULL" } {
+ error_check_good env_close [$e close] 0
+ }
+
+ set dbfile test0$tnum-old.db
+ set testfile $path/$dbfile
+ }
+ }
+}
diff --git a/libdb/test/test081.tcl b/libdb/test/test081.tcl
new file mode 100644
index 0000000..140b8c4
--- /dev/null
+++ b/libdb/test/test081.tcl
@@ -0,0 +1,15 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test081
+# TEST Test off-page duplicates and overflow pages together with
+# TEST very large keys (key/data as file contents).
+proc test081 { method {ndups 13} {tnum 81} args} {
+ source ./include.tcl
+
+ eval {test017 $method 1 $ndups $tnum} $args
+}
diff --git a/libdb/test/test082.tcl b/libdb/test/test082.tcl
new file mode 100644
index 0000000..0d065e2
--- /dev/null
+++ b/libdb/test/test082.tcl
@@ -0,0 +1,14 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test082
+# TEST Test of DB_PREV_NODUP (uses test074).
+proc test082 { method {dir -prevnodup} {nitems 100} {tnum 82} args} {
+ source ./include.tcl
+
+ eval {test074 $method $dir $nitems $tnum} $args
+}
diff --git a/libdb/test/test083.tcl b/libdb/test/test083.tcl
new file mode 100644
index 0000000..5c5f8ae
--- /dev/null
+++ b/libdb/test/test083.tcl
@@ -0,0 +1,162 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test083
+# TEST Test of DB->key_range.
+proc test083 { method {pgsz 512} {maxitems 5000} {step 2} args} {
+ source ./include.tcl
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test083 $method ($args): Test of DB->key_range"
+ if { [is_btree $method] != 1 } {
+ puts "\tTest083: Skipping for method $method."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test083: skipping for specific pagesizes"
+ return
+ }
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set testfile $testdir/test083.db
+ set env NULL
+ } else {
+ set testfile test083.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+
+ # We assume that numbers will be at most six digits wide
+ error_check_bad maxitems_range [expr $maxitems > 999999] 1
+
+ # We want to test key_range on a variety of sizes of btree.
+ # Start at ten keys and work up to $maxitems keys, at each step
+ # multiplying the number of keys by $step.
+ for { set nitems 10 } { $nitems <= $maxitems }\
+ { set nitems [expr $nitems * $step] } {
+
+ puts "\tTest083.a: Opening new database"
+ if { $env != "NULL"} {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ set db [eval {berkdb_open -create -mode 0644} \
+ -pagesize $pgsz $omethod $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ t83_build $db $nitems $env $txnenv
+ t83_test $db $nitems $env $txnenv
+
+ error_check_good db_close [$db close] 0
+ }
+}
+
+proc t83_build { db nitems env txnenv } {
+ source ./include.tcl
+
+ puts "\tTest083.b: Populating database with $nitems keys"
+
+ set keylist {}
+ puts "\t\tTest083.b.1: Generating key list"
+ for { set i 0 } { $i < $nitems } { incr i } {
+ lappend keylist $i
+ }
+
+ # With randomly ordered insertions, the range of errors we
+ # get from key_range can be unpredictably high [#2134]. For now,
+ # just skip the randomization step.
+ #puts "\t\tTest083.b.2: Randomizing key list"
+ #set keylist [randomize_list $keylist]
+ #puts "\t\tTest083.b.3: Populating database with randomized keys"
+
+ puts "\t\tTest083.b.2: Populating database"
+ set data [repeat . 50]
+ set txn ""
+ foreach keynum $keylist {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {key[format %6d $keynum] $data}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+}
+
+proc t83_test { db nitems env txnenv } {
+ # Look at the first key, then at keys about 1/4, 1/2, 3/4, and
+ # all the way through the database. Make sure the key_ranges
+ # aren't off by more than 10%.
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ } else {
+ set txn ""
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good dbc [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest083.c: Verifying ranges..."
+
+ for { set i 0 } { $i < $nitems } \
+ { incr i [expr $nitems / [berkdb random_int 3 16]] } {
+ puts "\t\t...key $i"
+ error_check_bad key0 [llength [set dbt [$dbc get -first]]] 0
+
+ for { set j 0 } { $j < $i } { incr j } {
+ error_check_bad key$j \
+ [llength [set dbt [$dbc get -next]]] 0
+ }
+
+ set ranges [$db keyrange [lindex [lindex $dbt 0] 0]]
+
+ #puts $ranges
+ error_check_good howmanyranges [llength $ranges] 3
+
+ set lessthan [lindex $ranges 0]
+ set morethan [lindex $ranges 2]
+
+ set rangesum [expr $lessthan + [lindex $ranges 1] + $morethan]
+
+ roughly_equal $rangesum 1 0.05
+
+ # Wild guess.
+ if { $nitems < 500 } {
+ set tol 0.3
+ } elseif { $nitems > 500 } {
+ set tol 0.15
+ }
+
+ roughly_equal $lessthan [expr $i * 1.0 / $nitems] $tol
+
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+}
+
+proc roughly_equal { a b tol } {
+ error_check_good "$a =~ $b" [expr $a - $b < $tol] 1
+}
diff --git a/libdb/test/test084.tcl b/libdb/test/test084.tcl
new file mode 100644
index 0000000..762835e
--- /dev/null
+++ b/libdb/test/test084.tcl
@@ -0,0 +1,53 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test084
+# TEST Basic sanity test (test001) with large (64K) pages.
+proc test084 { method {nentries 10000} {tnum 84} {pagesize 65536} args} {
+ source ./include.tcl
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-empty.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-empty.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test084: skipping for specific pagesizes"
+ return
+ }
+
+ cleanup $testdir $env
+
+ set args "-pagesize $pagesize $args"
+
+ eval {test001 $method $nentries 0 $tnum 0} $args
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ # For good measure, create a second database that's empty
+ # with the large page size. (There was a verifier bug that
+ # choked on empty 64K pages. [#2408])
+ set db [eval {berkdb_open -create -mode 0644} $args $omethod $testfile]
+ error_check_good empty_db [is_valid_db $db] TRUE
+ error_check_good empty_db_close [$db close] 0
+}
diff --git a/libdb/test/test085.tcl b/libdb/test/test085.tcl
new file mode 100644
index 0000000..078e6c2
--- /dev/null
+++ b/libdb/test/test085.tcl
@@ -0,0 +1,332 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test085
+# TEST Test of cursor behavior when a cursor is pointing to a deleted
+# TEST btree key which then has duplicates added. [#2473]
+proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
+ source ./include.tcl
+ global alphabet
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test085: skipping for specific pagesizes"
+ return
+ }
+ cleanup $testdir $env
+
+ # Keys must sort $prekey < $key < $postkey.
+ set prekey "AA"
+ set key "BBB"
+ set postkey "CCCC"
+
+ # Make these distinguishable from each other and from the
+ # alphabets used for the $key's data.
+ set predatum "1234567890"
+ set datum $alphabet
+ set postdatum "0987654321"
+ set txn ""
+
+ append args " -pagesize $pagesize -dup"
+
+ puts -nonewline "Test0$tnum $omethod ($args): "
+
+ # Skip for all non-btrees. (Rbtrees don't count as btrees, for
+ # now, since they don't support dups.)
+ if { [is_btree $method] != 1 } {
+ puts "Skipping for method $method."
+ return
+ } else {
+ puts "Duplicates w/ deleted item cursor."
+ }
+
+ # Repeat the test with both on-page and off-page numbers of dups.
+ foreach ndups "$onp $offp" {
+ # Put operations we want to test on a cursor set to the
+ # deleted item, the key to use with them, and what should
+ # come before and after them given a placement of
+ # the deleted item at the beginning or end of the dupset.
+ set final [expr $ndups - 1]
+ set putops {
+ {{-before} "" $predatum {[test085_ddatum 0]} beginning}
+ {{-before} "" {[test085_ddatum $final]} $postdatum end}
+ {{-current} "" $predatum {[test085_ddatum 0]} beginning}
+ {{-current} "" {[test085_ddatum $final]} $postdatum end}
+ {{-keyfirst} $key $predatum {[test085_ddatum 0]} beginning}
+ {{-keyfirst} $key $predatum {[test085_ddatum 0]} end}
+ {{-keylast} $key {[test085_ddatum $final]} $postdatum beginning}
+ {{-keylast} $key {[test085_ddatum $final]} $postdatum end}
+ {{-after} "" $predatum {[test085_ddatum 0]} beginning}
+ {{-after} "" {[test085_ddatum $final]} $postdatum end}
+ }
+
+ # Get operations we want to test on a cursor set to the
+ # deleted item, any args to get, and the expected key/data pair.
+ set getops {
+ {{-current} "" "" "" beginning}
+ {{-current} "" "" "" end}
+ {{-next} "" $key {[test085_ddatum 0]} beginning}
+ {{-next} "" $postkey $postdatum end}
+ {{-prev} "" $prekey $predatum beginning}
+ {{-prev} "" $key {[test085_ddatum $final]} end}
+ {{-first} "" $prekey $predatum beginning}
+ {{-first} "" $prekey $predatum end}
+ {{-last} "" $postkey $postdatum beginning}
+ {{-last} "" $postkey $postdatum end}
+ {{-nextdup} "" $key {[test085_ddatum 0]} beginning}
+ {{-nextdup} "" EMPTYLIST "" end}
+ {{-nextnodup} "" $postkey $postdatum beginning}
+ {{-nextnodup} "" $postkey $postdatum end}
+ {{-prevnodup} "" $prekey $predatum beginning}
+ {{-prevnodup} "" $prekey $predatum end}
+ }
+
+ set txn ""
+ foreach pair $getops {
+ set op [lindex $pair 0]
+ puts "\tTest0$tnum: Get ($op) with $ndups duplicates,\
+ cursor at the [lindex $pair 4]."
+ set db [eval {berkdb_open -create \
+ -mode 0644} $omethod $encargs $args $testfile]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [test085_setup $db $txn]
+
+ set beginning [expr [string compare \
+ [lindex $pair 4] "beginning"] == 0]
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ if { $beginning } {
+ error_check_good db_put($i) \
+ [eval {$db put} $txn \
+ {$key [test085_ddatum $i]}] 0
+ } else {
+ set c [eval {$db cursor} $txn]
+ set j [expr $ndups - $i - 1]
+ error_check_good db_cursor($j) \
+ [is_valid_cursor $c $db] TRUE
+ set d [test085_ddatum $j]
+ error_check_good dbc_put($j) \
+ [$c put -keyfirst $key $d] 0
+ error_check_good c_close [$c close] 0
+ }
+ }
+
+ set gargs [lindex $pair 1]
+ set ekey ""
+ set edata ""
+ eval set ekey [lindex $pair 2]
+ eval set edata [lindex $pair 3]
+
+ set dbt [eval $dbc get $op $gargs]
+ if { [string compare $ekey EMPTYLIST] == 0 } {
+ error_check_good dbt($op,$ndups) \
+ [llength $dbt] 0
+ } else {
+ error_check_good dbt($op,$ndups) $dbt \
+ [list [list $ekey $edata]]
+ }
+ error_check_good "dbc close" [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good "db close" [$db close] 0
+ verify_dir $testdir "\t\t"
+
+ # Remove testfile so we can do without truncate flag.
+ # This is okay because we've already done verify and
+ # dump/load.
+ if { $env == "NULL" } {
+ set ret [eval {berkdb dbremove} \
+ $encargs $testfile]
+ } elseif { $txnenv == 1 } {
+ set ret [eval "$env dbremove" \
+ -auto_commit $encargs $testfile]
+ } else {
+ set ret [eval {berkdb dbremove} \
+ -env $env $encargs $testfile]
+ }
+ error_check_good dbremove $ret 0
+
+ }
+
+ foreach pair $putops {
+ # Open and set up database.
+ set op [lindex $pair 0]
+ puts "\tTest0$tnum: Put ($op) with $ndups duplicates,\
+ cursor at the [lindex $pair 4]."
+ set db [eval {berkdb_open -create \
+ -mode 0644} $omethod $args $encargs $testfile]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ set beginning [expr [string compare \
+ [lindex $pair 4] "beginning"] == 0]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [test085_setup $db $txn]
+
+ # Put duplicates.
+ for { set i 0 } { $i < $ndups } { incr i } {
+ if { $beginning } {
+ error_check_good db_put($i) \
+ [eval {$db put} $txn \
+ {$key [test085_ddatum $i]}] 0
+ } else {
+ set c [eval {$db cursor} $txn]
+ set j [expr $ndups - $i - 1]
+ error_check_good db_cursor($j) \
+ [is_valid_cursor $c $db] TRUE
+ set d [test085_ddatum $j]
+ error_check_good dbc_put($j) \
+ [$c put -keyfirst $key $d] 0
+ error_check_good c_close [$c close] 0
+ }
+ }
+
+ # Set up cursors for stability test.
+ set pre_dbc [eval {$db cursor} $txn]
+ error_check_good pre_set [$pre_dbc get -set $prekey] \
+ [list [list $prekey $predatum]]
+ set post_dbc [eval {$db cursor} $txn]
+ error_check_good post_set [$post_dbc get -set $postkey]\
+ [list [list $postkey $postdatum]]
+ set first_dbc [eval {$db cursor} $txn]
+ error_check_good first_set \
+ [$first_dbc get -get_both $key [test085_ddatum 0]] \
+ [list [list $key [test085_ddatum 0]]]
+ set last_dbc [eval {$db cursor} $txn]
+ error_check_good last_set \
+ [$last_dbc get -get_both $key [test085_ddatum \
+ [expr $ndups - 1]]] \
+ [list [list $key [test085_ddatum [expr $ndups -1]]]]
+
+ set k [lindex $pair 1]
+ set d_before ""
+ set d_after ""
+ eval set d_before [lindex $pair 2]
+ eval set d_after [lindex $pair 3]
+ set newdatum "NewDatum"
+ error_check_good dbc_put($op,$ndups) \
+ [eval $dbc put $op $k $newdatum] 0
+ error_check_good dbc_prev($op,$ndups) \
+ [lindex [lindex [$dbc get -prev] 0] 1] \
+ $d_before
+ error_check_good dbc_current($op,$ndups) \
+ [lindex [lindex [$dbc get -next] 0] 1] \
+ $newdatum
+
+ error_check_good dbc_next($op,$ndups) \
+ [lindex [lindex [$dbc get -next] 0] 1] \
+ $d_after
+
+ # Verify stability of pre- and post- cursors.
+ error_check_good pre_stable [$pre_dbc get -current] \
+ [list [list $prekey $predatum]]
+ error_check_good post_stable [$post_dbc get -current] \
+ [list [list $postkey $postdatum]]
+ error_check_good first_stable \
+ [$first_dbc get -current] \
+ [list [list $key [test085_ddatum 0]]]
+ error_check_good last_stable \
+ [$last_dbc get -current] \
+ [list [list $key [test085_ddatum [expr $ndups -1]]]]
+
+ foreach c "$pre_dbc $post_dbc $first_dbc $last_dbc" {
+ error_check_good ${c}_close [$c close] 0
+ }
+
+ error_check_good "dbc close" [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good "db close" [$db close] 0
+ verify_dir $testdir "\t\t"
+
+ # Remove testfile so we can do without truncate flag.
+ # This is okay because we've already done verify and
+ # dump/load.
+ if { $env == "NULL" } {
+ set ret [eval {berkdb dbremove} \
+ $encargs $testfile]
+ } elseif { $txnenv == 1 } {
+ set ret [eval "$env dbremove" \
+ -auto_commit $encargs $testfile]
+ } else {
+ set ret [eval {berkdb dbremove} \
+ -env $env $encargs $testfile]
+ }
+ error_check_good dbremove $ret 0
+ }
+ }
+}
+
+# Set up the test database; put $prekey, $key, and $postkey with their
+# respective data, and then delete $key with a new cursor. Return that
+# cursor, still pointing to the deleted item.
+proc test085_setup { db txn } {
+ upvar key key
+ upvar prekey prekey
+ upvar postkey postkey
+ upvar predatum predatum
+ upvar postdatum postdatum
+
+ # no one else should ever see this one!
+ set datum "bbbbbbbb"
+
+ error_check_good pre_put [eval {$db put} $txn {$prekey $predatum}] 0
+ error_check_good main_put [eval {$db put} $txn {$key $datum}] 0
+ error_check_good post_put [eval {$db put} $txn {$postkey $postdatum}] 0
+
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ error_check_good dbc_getset [$dbc get -get_both $key $datum] \
+ [list [list $key $datum]]
+
+ error_check_good dbc_del [$dbc del] 0
+
+ return $dbc
+}
+
+proc test085_ddatum { a } {
+ global alphabet
+ return $a$alphabet
+}
diff --git a/libdb/test/test086.tcl b/libdb/test/test086.tcl
new file mode 100644
index 0000000..a334b51
--- /dev/null
+++ b/libdb/test/test086.tcl
@@ -0,0 +1,166 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test086
+# TEST Test of cursor stability across btree splits/rsplits with
+# TEST subtransaction aborts (a variant of test048). [#2373]
+proc test086 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set tstn 086
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+
+ if { [is_btree $method] != 1 } {
+ puts "Test$tstn skipping for method $method."
+ return
+ }
+
+ set method "-btree"
+
+ puts "\tTest$tstn: Test of cursor stability across aborted\
+ btree splits."
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then this test won't work.
+ if { $eindex == -1 } {
+ # But we will be using our own env...
+ set testfile test0$tstn.db
+ } else {
+ puts "\tTest$tstn: Environment provided; skipping test."
+ return
+ }
+ set t1 $testdir/t1
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -create -home $testdir -txn} $encargs]
+ error_check_good berkdb_env [is_valid_env $env] TRUE
+
+ puts "\tTest$tstn.a: Create $method database."
+ set oflags "-auto_commit -create -env $env -mode 0644 $args $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 5
+ # Fill page w/ small key/data pairs, keep at leaf
+ #
+ puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ set ret [$db put -txn $txn key000$i $data$i]
+ error_check_good dbput $ret 0
+ }
+ error_check_good commit [$txn commit] 0
+
+ # get db ordering, set cursors
+ puts "\tTest$tstn.c: Set cursors on each of $nkeys pairs."
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ for {set i 0; set ret [$db get -txn $txn key000$i]} {\
+ $i < $nkeys && [llength $ret] != 0} {\
+ incr i; set ret [$db get -txn $txn key000$i]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ set dbc [$db cursor -txn $txn]
+ set dbc_set($i) $dbc
+ error_check_good db_cursor:$i [is_substr $dbc_set($i) $db] 1
+ set ret [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc_set($i)_get:set [llength $ret] 0
+ }
+
+ # Create child txn.
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn [is_valid_txn $txn $env] TRUE
+
+ # if mkeys is above 1000, need to adjust below for lexical order
+ set mkeys 1000
+ puts "\tTest$tstn.d: Add $mkeys pairs to force split."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ set ret [$db put -txn $ctxn key0$i $data$i]
+ } elseif { $i >= 10 } {
+ set ret [$db put -txn $ctxn key00$i $data$i]
+ } else {
+ set ret [$db put -txn $ctxn key000$i $data$i]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tstn.e: Abort."
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ puts "\tTest$tstn.f: Check and see that cursors maintained reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ # Put (and this time keep) the keys that caused the split.
+ # We'll delete them to test reverse splits.
+ puts "\tTest$tstn.g: Put back added keys."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ set ret [$db put -txn $txn key0$i $data$i]
+ } elseif { $i >= 10 } {
+ set ret [$db put -txn $txn key00$i $data$i]
+ } else {
+ set ret [$db put -txn $txn key000$i $data$i]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tstn.h: Delete added keys to force reverse split."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn [is_valid_txn $txn $env] TRUE
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ error_check_good db_del:$i [$db del -txn $ctxn key0$i] 0
+ } elseif { $i >= 10 } {
+ error_check_good db_del:$i \
+ [$db del -txn $ctxn key00$i] 0
+ } else {
+ error_check_good db_del:$i \
+ [$db del -txn $ctxn key000$i] 0
+ }
+ }
+
+ puts "\tTest$tstn.i: Abort."
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ puts "\tTest$tstn.j: Verify cursor reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.j: Cleanup."
+ # close cursors
+ for {set i 0} { $i < $nkeys } {incr i} {
+ error_check_good dbc_close:$i [$dbc_set($i) close] 0
+ }
+
+ error_check_good commit [$txn commit] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/libdb/test/test087.tcl b/libdb/test/test087.tcl
new file mode 100644
index 0000000..cd50c2d
--- /dev/null
+++ b/libdb/test/test087.tcl
@@ -0,0 +1,290 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test087
+# TEST Test of cursor stability when converting to and modifying
+# TEST off-page duplicate pages with subtransaction aborts. [#2373]
+# TEST
+# TEST Does the following:
+# TEST a. Initialize things by DB->putting ndups dups and
+# TEST setting a reference cursor to point to each. Do each put twice,
+# TEST first aborting, then committing, so we're sure to abort the move
+# TEST to off-page dups at some point.
+# TEST b. c_put ndups dups (and correspondingly expanding
+# TEST the set of reference cursors) after the last one, making sure
+# TEST after each step that all the reference cursors still point to
+# TEST the right item.
+# TEST c. Ditto, but before the first one.
+# TEST d. Ditto, but after each one in sequence first to last.
+# TEST e. Ditto, but after each one in sequence from last to first.
+# TEST occur relative to the new datum)
+# TEST f. Ditto for the two sequence tests, only doing a
+# TEST DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+# TEST new one.
+proc test087 { method {pagesize 512} {ndups 50} {tnum 87} args } {
+ source ./include.tcl
+ global alphabet
+
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ puts "Test0$tnum $omethod ($args): "
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then return
+ if { $eindex != -1 } {
+ puts "Environment specified; skipping."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test087: skipping for specific pagesizes"
+ return
+ }
+ env_cleanup $testdir
+ set testfile test0$tnum.db
+ set key "the key"
+ append args " -pagesize $pagesize -dup"
+
+ if { [is_record_based $method] || [is_rbtree $method] } {
+ puts "Skipping for method $method."
+ return
+ } else {
+ puts "Cursor stability on dup. pages w/ aborts."
+ }
+
+ set env [eval {berkdb_env -create -home $testdir -txn} $encargs]
+ error_check_good env_create [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -auto_commit \
+ -create -env $env -mode 0644} $omethod $args $testfile]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ # Number of outstanding keys.
+ set keys $ndups
+
+ puts "\tTest0$tnum.a: put/abort/put/commit loop;\
+ $ndups dups, short data."
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set datum [makedatum_t73 $i 0]
+
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn(abort,$i) [is_valid_txn $ctxn $env] TRUE
+ error_check_good "db put/abort ($i)" \
+ [$db put -txn $ctxn $key $datum] 0
+ error_check_good ctxn_abort($i) [$ctxn abort] 0
+
+ verify_t73 is_long dbc [expr $i - 1] $key
+
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn(commit,$i) [is_valid_txn $ctxn $env] TRUE
+ error_check_good "db put/commit ($i)" \
+ [$db put -txn $ctxn $key $datum] 0
+ error_check_good ctxn_commit($i) [$ctxn commit] 0
+
+ set is_long($i) 0
+
+ set dbc($i) [$db cursor -txn $txn]
+ error_check_good "db cursor ($i)"\
+ [is_valid_cursor $dbc($i) $db] TRUE
+ error_check_good "dbc get -get_both ($i)"\
+ [$dbc($i) get -get_both $key $datum]\
+ [list [list $key $datum]]
+
+ verify_t73 is_long dbc $i $key
+ }
+
+ puts "\tTest0$tnum.b: Cursor put (DB_KEYLAST); $ndups new dups,\
+ short data."
+
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ for { set i 0 } { $i < $ndups } { incr i } {
+ # !!! keys contains the number of the next dup
+ # to be added (since they start from zero)
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+ error_check_good "c_put(DB_KEYLAST, $keys)"\
+ [$curs put -keylast $key $datum] 0
+
+ # We can't do a verification while a child txn is active,
+ # or we'll run into trouble when DEBUG_ROP is enabled.
+ # If this test has trouble, though, uncommenting this
+ # might be illuminating--it makes things a bit more rigorous
+ # and works fine when DEBUG_ROP is not enabled.
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest0$tnum.c: Cursor put (DB_KEYFIRST); $ndups new dups,\
+ short data."
+
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ for { set i 0 } { $i < $ndups } { incr i } {
+ # !!! keys contains the number of the next dup
+ # to be added (since they start from zero)
+
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+ error_check_good "c_put(DB_KEYFIRST, $keys)"\
+ [$curs put -keyfirst $key $datum] 0
+
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ # verify_t73 is_long dbc $keys $key
+ # verify_t73 is_long dbc $keys $key
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest0$tnum.d: Cursor put (DB_AFTER) first to last;\
+ $keys new dups, short data"
+ # We want to add a datum after each key from 0 to the current
+ # value of $keys, which we thus need to save.
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ set keysnow $keys
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ # Which datum to insert this guy after.
+ set curdatum [makedatum_t73 $i 0]
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $curdatum]\
+ [list [list $key $curdatum]]
+ error_check_good "c_put(DB_AFTER, $i)"\
+ [$curs put -after $datum] 0
+
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest0$tnum.e: Cursor put (DB_BEFORE) last to first;\
+ $keys new dups, short data"
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ for { set i [expr $keys - 1] } { $i >= 0 } { incr i -1 } {
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ # Which datum to insert this guy before.
+ set curdatum [makedatum_t73 $i 0]
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $curdatum]\
+ [list [list $key $curdatum]]
+ error_check_good "c_put(DB_BEFORE, $i)"\
+ [$curs put -before $datum] 0
+
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest0$tnum.f: Cursor put (DB_CURRENT), first to last,\
+ growing $keys data."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set olddatum [makedatum_t73 $i 0]
+ set newdatum [makedatum_t73 $i 1]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $olddatum]\
+ [list [list $key $olddatum]]
+ error_check_good "c_put(DB_CURRENT, $i)"\
+ [$curs put -current $newdatum] 0
+
+ set is_long($i) 1
+
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set is_long($i) 0
+ }
+ verify_t73 is_long dbc $keys $key
+
+ # Now delete the first item, abort the deletion, and make sure
+ # we're still sane.
+ puts "\tTest0$tnum.g: Cursor delete first item, then abort delete."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db] TRUE
+ set datum [makedatum_t73 0 0]
+ error_check_good "c_get(DB_GET_BOTH, 0)"\
+ [$curs get -get_both $key $datum] [list [list $key $datum]]
+ error_check_good "c_del(0)" [$curs del] 0
+ error_check_good curs_close [$curs close] 0
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ # Ditto, for the last item.
+ puts "\tTest0$tnum.h: Cursor delete last item, then abort delete."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db] TRUE
+ set datum [makedatum_t73 [expr $keys - 1] 0]
+ error_check_good "c_get(DB_GET_BOTH, [expr $keys - 1])"\
+ [$curs get -get_both $key $datum] [list [list $key $datum]]
+ error_check_good "c_del(0)" [$curs del] 0
+ error_check_good curs_close [$curs close] 0
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ # Ditto, for all the items.
+ puts "\tTest0$tnum.i: Cursor delete all items, then abort delete."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db] TRUE
+ set datum [makedatum_t73 0 0]
+ error_check_good "c_get(DB_GET_BOTH, 0)"\
+ [$curs get -get_both $key $datum] [list [list $key $datum]]
+ error_check_good "c_del(0)" [$curs del] 0
+ for { set i 1 } { $i < $keys } { incr i } {
+ error_check_good "c_get(DB_NEXT, $i)"\
+ [$curs get -next] [list [list $key [makedatum_t73 $i 0]]]
+ error_check_good "c_del($i)" [$curs del] 0
+ }
+ error_check_good curs_close [$curs close] 0
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ # Close cursors.
+ puts "\tTest0$tnum.j: Closing cursors."
+ for { set i 0 } { $i < $keys } { incr i } {
+ error_check_good "dbc close ($i)" [$dbc($i) close] 0
+ }
+ error_check_good "db close" [$db close] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good "env close" [$env close] 0
+}
diff --git a/libdb/test/test088.tcl b/libdb/test/test088.tcl
new file mode 100644
index 0000000..f106adb
--- /dev/null
+++ b/libdb/test/test088.tcl
@@ -0,0 +1,172 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test088
+# TEST Test of cursor stability across btree splits with very
+# TEST deep trees (a variant of test048). [#2514]
+proc test088 { method args } {
+ global errorCode alphabet
+ source ./include.tcl
+
+ set tstn 088
+ set args [convert_args $method $args]
+
+ if { [is_btree $method] != 1 } {
+ puts "Test$tstn skipping for method $method."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test088: skipping for specific pagesizes"
+ return
+ }
+
+ set method "-btree"
+
+ puts "\tTest$tstn: Test of cursor stability across btree splits."
+
+ set key "key$alphabet$alphabet$alphabet"
+ set data "data$alphabet$alphabet$alphabet"
+ set txn ""
+ set flags ""
+
+ puts "\tTest$tstn.a: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tstn.db
+ set env NULL
+ } else {
+ set testfile test$tstn.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set ps 512
+ set txn ""
+ set oflags "-create -pagesize $ps -mode 0644 $args $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 5
+ # Fill page w/ key/data pairs.
+ #
+ puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {${key}00000$i $data$i}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # get db ordering, set cursors
+ puts "\tTest$tstn.c: Set cursors on each of $nkeys pairs."
+ # if mkeys is above 1000, need to adjust below for lexical order
+ set mkeys 30000
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set mkeys 300
+ }
+ for {set i 0; set ret [$db get ${key}00000$i]} {\
+ $i < $nkeys && [llength $ret] != 0} {\
+ incr i; set ret [$db get ${key}00000$i]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ set dbc [eval {$db cursor} $txn]
+ set dbc_set($i) $dbc
+ error_check_good db_cursor:$i [is_substr $dbc_set($i) $db] 1
+ set ret [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc_set($i)_get:set [llength $ret] 0
+ }
+
+ puts "\tTest$tstn.d: Add $mkeys pairs to force splits."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 10000 } {
+ set ret [eval {$db put} $txn {${key}0$i $data$i}]
+ } elseif { $i >= 1000 } {
+ set ret [eval {$db put} $txn {${key}00$i $data$i}]
+ } elseif { $i >= 100 } {
+ set ret [eval {$db put} $txn {${key}000$i $data$i}]
+ } elseif { $i >= 10 } {
+ set ret [eval {$db put} $txn {${key}0000$i $data$i}]
+ } else {
+ set ret [eval {$db put} $txn {${key}00000$i $data$i}]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tstn.e: Make sure splits happened."
+ # XXX cannot execute stat in presence of txns and cursors.
+ if { $txnenv == 0 } {
+ error_check_bad stat:check-split [is_substr [$db stat] \
+ "{{Internal pages} 0}"] 1
+ }
+
+ puts "\tTest$tstn.f: Check to see that cursors maintained reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.g: Delete added keys to force reverse splits."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 10000 } {
+ set ret [eval {$db del} $txn {${key}0$i}]
+ } elseif { $i >= 1000 } {
+ set ret [eval {$db del} $txn {${key}00$i}]
+ } elseif { $i >= 100 } {
+ set ret [eval {$db del} $txn {${key}000$i}]
+ } elseif { $i >= 10 } {
+ set ret [eval {$db del} $txn {${key}0000$i}]
+ } else {
+ set ret [eval {$db del} $txn {${key}00000$i}]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tstn.h: Verify cursor reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.i: Cleanup."
+ # close cursors
+ for {set i 0} { $i < $nkeys } {incr i} {
+ error_check_good dbc_close:$i [$dbc_set($i) close] 0
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/libdb/test/test089.tcl b/libdb/test/test089.tcl
new file mode 100644
index 0000000..4ed766c
--- /dev/null
+++ b/libdb/test/test089.tcl
@@ -0,0 +1,180 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test089
+# TEST Concurrent Data Store test (CDB)
+# TEST
+# TEST Enhanced CDB testing to test off-page dups, cursor dups and
+# TEST cursor operations like c_del then c_get.
+proc test089 { method {nentries 1000} args } {
+ global datastr
+ global encrypt
+ source ./include.tcl
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test089 skipping for env $env"
+ return
+ }
+ set encargs ""
+ set args [convert_args $method $args]
+ set oargs [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ puts "Test089: ($oargs) $method CDB Test cursor/dup operations"
+
+ # Process arguments
+ # Create the database and open the dictionary
+ set testfile test089.db
+ set testfile1 test089a.db
+
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -create -cdb} $encargs -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $oargs {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set db1 [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $oargs {$testfile1}]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put each key/data pair
+ puts "\tTest089.a: put loop"
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put:$db $ret 0
+ set ret [eval {$db1 put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put:$db1 $ret 0
+ incr count
+ }
+ close $did
+ error_check_good close:$db [$db close] 0
+ error_check_good close:$db1 [$db1 close] 0
+
+ # Database is created, now set up environment
+
+ # Remove old mpools and Open/create the lock and mpool regions
+ error_check_good env:close:$env [$env close] 0
+ set ret [eval {berkdb envremove} $encargs -home $testdir]
+ error_check_good env_remove $ret 0
+
+ set env [eval {berkdb_env_noerr -create -cdb} $encargs -home $testdir]
+ error_check_good dbenv [is_valid_widget $env env] TRUE
+
+ # This tests the failure found in #1923
+ puts "\tTest089.b: test delete then get"
+
+ set db1 [eval {berkdb_open_noerr -env $env -create \
+ -mode 0644 $omethod} $oargs {$testfile1}]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ set dbc [$db1 cursor -update]
+ error_check_good dbcursor [is_valid_cursor $dbc $db1] TRUE
+
+ for {set kd [$dbc get -first] } { [llength $kd] != 0 } \
+ {set kd [$dbc get -next] } {
+ error_check_good dbcdel [$dbc del] 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+
+ puts "\tTest089.c: CDB cursor dups"
+ set dbc [$db1 cursor -update]
+ error_check_good dbcursor [is_valid_cursor $dbc $db1] TRUE
+ set stat [catch {$dbc dup} ret]
+ error_check_bad wr_cdup_stat $stat 0
+ error_check_good wr_cdup [is_substr $ret \
+ "Cannot duplicate writeable cursor"] 1
+
+ set dbc_ro [$db1 cursor]
+ error_check_good dbcursor [is_valid_cursor $dbc_ro $db1] TRUE
+ set dup_dbc [$dbc_ro dup]
+ error_check_good rd_cdup [is_valid_cursor $dup_dbc $db1] TRUE
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good dbc_close [$dbc_ro close] 0
+ error_check_good dbc_close [$dup_dbc close] 0
+ error_check_good db_close [$db1 close] 0
+ error_check_good env_close [$env close] 0
+
+ if { [is_btree $method] != 1 } {
+ puts "Skipping rest of test089 for $method method."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Skipping rest of test089 for specific pagesizes"
+ return
+ }
+ append oargs " -dup "
+ test089_dup $testdir $encargs $oargs $omethod $nentries
+ append oargs " -dupsort "
+ test089_dup $testdir $encargs $oargs $omethod $nentries
+}
+
+proc test089_dup { testdir encargs oargs method nentries } {
+
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create -cdb} $encargs -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ #
+ # Set pagesize small to generate lots of off-page dups
+ #
+ set page 512
+ set nkeys 5
+ set data "data"
+ set key "test089_key"
+ set testfile test089.db
+ puts "\tTest089.d: CDB ($oargs) off-page dups"
+ set oflags "-env $env -create -mode 0644 $oargs $method"
+ set db [eval {berkdb_open} -pagesize $page $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest089.e: Fill page with $nkeys keys, with $nentries dups"
+ for { set k 0 } { $k < $nkeys } { incr k } {
+ for { set i 0 } { $i < $nentries } { incr i } {
+ set ret [$db put $key $i$data$k]
+ error_check_good dbput $ret 0
+ }
+ }
+
+ # Verify we have off-page duplicates
+ set stat [$db stat]
+ error_check_bad stat:offpage [is_substr $stat "{{Internal pages} 0}"] 1
+
+ set dbc [$db cursor -update]
+ error_check_good dbcursor [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest089.f: test delete then get of off-page dups"
+ for {set kd [$dbc get -first] } { [llength $kd] != 0 } \
+ {set kd [$dbc get -next] } {
+ error_check_good dbcdel [$dbc del] 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/libdb/test/test090.tcl b/libdb/test/test090.tcl
new file mode 100644
index 0000000..de745a5
--- /dev/null
+++ b/libdb/test/test090.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test090
+# TEST Test for functionality near the end of the queue using test001.
+proc test090 { method {nentries 10000} {txn -txn} {tnum "90"} args} {
+ if { [is_queueext $method ] == 0 } {
+ puts "Skipping test0$tnum for $method."
+ return;
+ }
+ eval {test001 $method $nentries 4294967000 $tnum 0} $args
+}
diff --git a/libdb/test/test091.tcl b/libdb/test/test091.tcl
new file mode 100644
index 0000000..9beae1f
--- /dev/null
+++ b/libdb/test/test091.tcl
@@ -0,0 +1,20 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test091
+# TEST Test of DB_CONSUME_WAIT.
+proc test091 { method {nconsumers 4} \
+ {nproducers 2} {nitems 1000} {start 0 } {tnum "91"} args} {
+ if { [is_queue $method ] == 0 } {
+ puts "Skipping test0$tnum for $method."
+ return;
+ }
+ eval {test070 $method \
+ $nconsumers $nproducers $nitems WAIT $start -txn $tnum } $args
+ eval {test070 $method \
+ $nconsumers $nproducers $nitems WAIT $start -cdb $tnum } $args
+}
diff --git a/libdb/test/test092.tcl b/libdb/test/test092.tcl
new file mode 100644
index 0000000..a441a8e
--- /dev/null
+++ b/libdb/test/test092.tcl
@@ -0,0 +1,241 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test092
+# TEST Test of DB_DIRTY_READ [#3395]
+# TEST
+# TEST We set up a database with nentries in it. We then open the
+# TEST database read-only twice. One with dirty read and one without.
+# TEST We open the database for writing and update some entries in it.
+# TEST Then read those new entries via db->get (clean and dirty), and
+# TEST via cursors (clean and dirty).
+proc test092 { method {nentries 1000} args } {
+ source ./include.tcl
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test092 skipping for env $env"
+ return
+ }
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ puts "Test092: Dirty Read Test $method $nentries"
+
+ # Create the database and open the dictionary
+ set testfile test092.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ env_cleanup $testdir
+
+ set lmax [expr $nentries * 2]
+ set lomax [expr $nentries * 2]
+ set env [eval {berkdb_env -create -txn} $encargs -home $testdir \
+ -lock_max_locks $lmax -lock_max_objects $lomax]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Here is the loop where we put each key/data pair.
+ # Key is entry, data is entry also.
+ puts "\tTest092.a: put loop"
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} {$key [chop_data $method $str]}]
+ error_check_good put:$db $ret 0
+ incr count
+ }
+ close $did
+ error_check_good close:$db [$db close] 0
+
+ puts "\tTest092.b: Opening all the handles"
+ #
+ # Open all of our handles.
+ # We need:
+ # 1. Our main txn (t).
+ # 2. A txn that can read dirty data (tdr).
+ # 3. A db handle for writing via txn (dbtxn).
+ # 4. A db handle for clean data (dbcl).
+ # 5. A db handle for dirty data (dbdr).
+ # 6. A cursor handle for dirty txn data (clean db handle using
+ # the dirty txn handle on the cursor call) (dbccl1).
+ # 7. A cursor handle for dirty data (dirty on get call) (dbcdr0).
+ # 8. A cursor handle for dirty data (dirty on cursor call) (dbcdr1).
+ set t [$env txn]
+ error_check_good txnbegin [is_valid_txn $t $env] TRUE
+
+ set tdr [$env txn -dirty]
+ error_check_good txnbegin:dr [is_valid_txn $tdr $env] TRUE
+ set dbtxn [eval {berkdb_open -auto_commit -env $env -dirty \
+ -mode 0644 $omethod} {$testfile}]
+ error_check_good dbopen:dbtxn [is_valid_db $dbtxn] TRUE
+
+ set dbcl [eval {berkdb_open -auto_commit -env $env \
+ -rdonly -mode 0644 $omethod} {$testfile}]
+ error_check_good dbopen:dbcl [is_valid_db $dbcl] TRUE
+
+ set dbdr [eval {berkdb_open -auto_commit -env $env -dirty \
+ -rdonly -mode 0644 $omethod} {$testfile}]
+ error_check_good dbopen:dbdr [is_valid_db $dbdr] TRUE
+
+ set dbccl [$dbcl cursor -txn $tdr]
+ error_check_good dbcurs:dbcl [is_valid_cursor $dbccl $dbcl] TRUE
+
+ set dbcdr0 [$dbdr cursor]
+ error_check_good dbcurs:dbdr0 [is_valid_cursor $dbcdr0 $dbdr] TRUE
+
+ set dbcdr1 [$dbdr cursor -dirty]
+ error_check_good dbcurs:dbdr1 [is_valid_cursor $dbcdr1 $dbdr] TRUE
+
+ #
+ # Now that we have all of our handles, change all the data in there
+ # to be the key and data the same, but data is capitalized.
+ puts "\tTest092.c: put/get data within a txn"
+ set gflags ""
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test092dr_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test092dr.check
+ }
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set ustr [string toupper $str]
+ set clret [list [list $key [pad_data $method $str]]]
+ set drret [list [list $key [pad_data $method $ustr]]]
+ #
+ # Put the data in the txn.
+ #
+ set ret [eval {$dbtxn put} -txn $t \
+ {$key [chop_data $method $ustr]}]
+ error_check_good put:$dbtxn $ret 0
+
+ #
+ # Now get the data using the different db handles and
+ # make sure it is dirty or clean data.
+ #
+ # Using the dirty txn should show us dirty data
+ set ret [eval {$dbcl get -txn $tdr} $gflags {$key}]
+ error_check_good dbdr2:get $ret $drret
+
+ set ret [eval {$dbdr get -dirty} $gflags {$key}]
+ error_check_good dbdr1:get $ret $drret
+
+ set ret [eval {$dbdr get -txn $tdr} $gflags {$key}]
+ error_check_good dbdr2:get $ret $drret
+
+ incr count
+ }
+ close $did
+
+ puts "\tTest092.d: Check dirty data using dirty txn and clean db/cursor"
+ dump_file_walk $dbccl $t1 $checkfunc "-first" "-next"
+
+ puts "\tTest092.e: Check dirty data using -dirty cget flag"
+ dump_file_walk $dbcdr0 $t2 $checkfunc "-first" "-next" "-dirty"
+
+ puts "\tTest092.f: Check dirty data using -dirty cursor"
+ dump_file_walk $dbcdr1 $t3 $checkfunc "-first" "-next"
+
+ #
+ # We must close these before aborting the real txn
+ # because they all hold read locks on the pages.
+ #
+ error_check_good dbccl:close [$dbccl close] 0
+ error_check_good dbcdr0:close [$dbcdr0 close] 0
+ error_check_good dbcdr1:close [$dbcdr1 close] 0
+
+ #
+ # Now abort the modifying transaction and rerun the data checks.
+ #
+ puts "\tTest092.g: Aborting the write-txn"
+ error_check_good txnabort [$t abort] 0
+
+ set dbccl [$dbcl cursor -txn $tdr]
+ error_check_good dbcurs:dbcl [is_valid_cursor $dbccl $dbcl] TRUE
+
+ set dbcdr0 [$dbdr cursor]
+ error_check_good dbcurs:dbdr0 [is_valid_cursor $dbcdr0 $dbdr] TRUE
+
+ set dbcdr1 [$dbdr cursor -dirty]
+ error_check_good dbcurs:dbdr1 [is_valid_cursor $dbcdr1 $dbdr] TRUE
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test092cl_recno.check
+ } else {
+ set checkfunc test092cl.check
+ }
+ puts "\tTest092.h: Check clean data using -dirty cget flag"
+ dump_file_walk $dbccl $t1 $checkfunc "-first" "-next"
+
+ puts "\tTest092.i: Check clean data using -dirty cget flag"
+ dump_file_walk $dbcdr0 $t2 $checkfunc "-first" "-next" "-dirty"
+
+ puts "\tTest092.j: Check clean data using -dirty cursor"
+ dump_file_walk $dbcdr1 $t3 $checkfunc "-first" "-next"
+
+ # Clean up our handles
+ error_check_good dbccl:close [$dbccl close] 0
+ error_check_good tdrcommit [$tdr commit] 0
+ error_check_good dbcdr0:close [$dbcdr0 close] 0
+ error_check_good dbcdr1:close [$dbcdr1 close] 0
+ error_check_good dbclose [$dbcl close] 0
+ error_check_good dbclose [$dbdr close] 0
+ error_check_good dbclose [$dbtxn close] 0
+ error_check_good envclose [$env close] 0
+}
+
+# Check functions for test092; keys and data are identical
+# Clean checks mean keys and data are identical.
+# Dirty checks mean data are uppercase versions of keys.
+proc test092cl.check { key data } {
+ error_check_good "key/data mismatch" $key $data
+}
+
+proc test092cl_recno.check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
+
+proc test092dr.check { key data } {
+ error_check_good "key/data mismatch" $key [string tolower $data]
+}
+
+proc test092dr_recno.check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data \
+ [string toupper $kvals($key)]
+}
+
diff --git a/libdb/test/test093.tcl b/libdb/test/test093.tcl
new file mode 100644
index 0000000..acebb9d
--- /dev/null
+++ b/libdb/test/test093.tcl
@@ -0,0 +1,393 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test093
+# TEST Test using set_bt_compare.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc test093 { method {nentries 10000} {tnum "93"} args} {
+ source ./include.tcl
+ global btvals
+ global btvalsck
+ global errorInfo
+
+ set dbargs [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_btree $method] != 1 } {
+ puts "Test0$tnum: skipping for method $method."
+ return
+ }
+ set txnenv 0
+ set eindex [lsearch -exact $dbargs "-env"]
+ if { $eindex != -1 } {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test0$tnum: skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append dbargs " -auto_commit "
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ cleanup $testdir $env
+ }
+ puts "Test0$tnum: $method ($args) $nentries using btcompare"
+
+
+ test093_run $omethod $dbargs $nentries $tnum test093_cmp1 test093_sort1
+ test093_runbig $omethod $dbargs $nentries $tnum \
+ test093_cmp1 test093_sort1
+ test093_run $omethod $dbargs $nentries $tnum test093_cmp2 test093_sort2
+ #
+ # Don't bother running the second, really slow, comparison
+ # function on test093_runbig (file contents).
+
+ # Clean up so verification doesn't fail. (There's currently
+ # no way to specify a comparison function to berkdb dbverify.)
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set eindex [lsearch -exact $dbargs "-env"]
+ if { $eindex == -1 } {
+ set env NULL
+ } else {
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+}
+
+proc test093_run { method dbargs nentries tnum cmpfunc sortfunc } {
+ source ./include.tcl
+ global btvals
+ global btvalsck
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $dbargs "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set txnenv [is_txnenv $env]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -btcompare $cmpfunc \
+ -create -mode 0644} $method $dbargs $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set btvals {}
+ set btvalsck {}
+ set checkfunc test093_check
+ puts "\tTest0$tnum.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ set str [reverse $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ lappend btvals $key
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ set q q
+ filehead $nentries $dict $t2
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tTest0$tnum.c: dump file in order"
+ # Now, reopen the file and run the last test again.
+ # We open it here, ourselves, because all uses of the db
+ # need to have the correct comparison func set. Then
+ # call dump_file_direction directly.
+ set btvalsck {}
+ set db [eval {berkdb_open -btcompare $cmpfunc -rdonly} \
+ $dbargs $method $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file_direction $db $txn $t1 $checkfunc "-first" "-next"
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ #
+ # We need to sort btvals according to the comparison function.
+ # Once that is done, btvalsck and btvals should be the same.
+ puts "\tTest0$tnum.d: check file order"
+
+ $sortfunc
+
+ error_check_good btvals:len [llength $btvals] [llength $btvalsck]
+ for {set i 0} {$i < $nentries} {incr i} {
+ error_check_good vals:$i [lindex $btvals $i] \
+ [lindex $btvalsck $i]
+ }
+}
+
+proc test093_runbig { method dbargs nentries tnum cmpfunc sortfunc } {
+ source ./include.tcl
+ global btvals
+ global btvalsck
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $dbargs "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set txnenv [is_txnenv $env]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -btcompare $cmpfunc \
+ -create -mode 0644} $method $dbargs $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+ set t5 $testdir/t5
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set btvals {}
+ set btvalsck {}
+ set checkfunc test093_checkbig
+ puts "\tTest0$tnum.e:\
+ big key put/get loop key=filecontents data=filename"
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list 1]
+
+ set count 0
+ foreach f $file_list {
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set key [read $fid]
+ close $fid
+
+ set key $f$key
+
+ set fcopy [open $t5 w]
+ fconfigure $fcopy -translation binary
+ puts -nonewline $fcopy $key
+ close $fcopy
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags {$key \
+ [chop_data $method $f]}]
+ error_check_good put_file $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ lappend btvals $key
+
+ # Should really catch errors
+ set fid [open $t4 w]
+ fconfigure $fid -translation binary
+ if [catch {eval {$db get} $gflags {$key}} data] {
+ puts -nonewline $fid $data
+ } else {
+ # Data looks like {{key data}}
+ set key [lindex [lindex $data 0] 0]
+ puts -nonewline $fid $key
+ }
+ close $fid
+ error_check_good \
+ Test093:diff($t5,$t4) [filecmp $t5 $t4] 0
+
+ incr count
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.f: big dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.g: dump file in order"
+ # Now, reopen the file and run the last test again.
+ # We open it here, ourselves, because all uses of the db
+ # need to have the correct comparison func set. Then
+ # call dump_file_direction directly.
+
+ set btvalsck {}
+ set db [eval {berkdb_open -btcompare $cmpfunc -rdonly} \
+ $dbargs $method $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file_direction $db $txn $t1 $checkfunc "-first" "-next"
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ #
+ # We need to sort btvals according to the comparison function.
+ # Once that is done, btvalsck and btvals should be the same.
+ puts "\tTest0$tnum.h: check file order"
+
+ $sortfunc
+ error_check_good btvals:len [llength $btvals] [llength $btvalsck]
+
+ set end [llength $btvals]
+ for {set i 0} {$i < $end} {incr i} {
+ error_check_good vals:$i [lindex $btvals $i] \
+ [lindex $btvalsck $i]
+ }
+}
+
+# Simple bt comparison.
+proc test093_cmp1 { a b } {
+ return [string compare $b $a]
+}
+
+# Simple bt sorting.
+proc test093_sort1 {} {
+ global btvals
+ #
+ # This one is easy, just sort in reverse.
+ #
+ set btvals [lsort -decreasing $btvals]
+}
+
+proc test093_cmp2 { a b } {
+ set arev [reverse $a]
+ set brev [reverse $b]
+ return [string compare $arev $brev]
+}
+
+proc test093_sort2 {} {
+ global btvals
+
+ # We have to reverse them, then sorts them.
+ # Then reverse them back to real words.
+ set rbtvals {}
+ foreach i $btvals {
+ lappend rbtvals [reverse $i]
+ }
+ set rbtvals [lsort -increasing $rbtvals]
+ set newbtvals {}
+ foreach i $rbtvals {
+ lappend newbtvals [reverse $i]
+ }
+ set btvals $newbtvals
+}
+
+# Check function for test093; keys and data are identical
+proc test093_check { key data } {
+ global btvalsck
+
+ error_check_good "key/data mismatch" $data [reverse $key]
+ lappend btvalsck $key
+}
+
+# Check function for test093 big keys;
+proc test093_checkbig { key data } {
+ source ./include.tcl
+ global btvalsck
+
+ set fid [open $data r]
+ fconfigure $fid -translation binary
+ set cont [read $fid]
+ close $fid
+ error_check_good "key/data mismatch" $key $data$cont
+ lappend btvalsck $key
+}
+
diff --git a/libdb/test/test094.tcl b/libdb/test/test094.tcl
new file mode 100644
index 0000000..dc841bb
--- /dev/null
+++ b/libdb/test/test094.tcl
@@ -0,0 +1,251 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test094
+# TEST Test using set_dup_compare.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc test094 { method {nentries 10000} {ndups 10} {tnum "94"} args} {
+ source ./include.tcl
+ global errorInfo
+
+ set dbargs [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_btree $method] != 1 && [is_hash $method] != 1 } {
+ puts "Test0$tnum: skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $dbargs "-env"]
+ # Create the database and open the dictionary
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-a.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-a.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test0$tnum: skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append dbargs " -auto_commit "
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test0$tnum: $method ($args) $nentries \
+ with $ndups dups using dupcompare"
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open_noerr -dupcompare test094_cmp \
+ -dup -dupsort -create -mode 0644} $omethod $dbargs {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set t1 $testdir/t1
+ set pflags ""
+ set gflags ""
+ set txn ""
+ puts "\tTest0$tnum.a: $nentries put/get duplicates loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ set dlist {}
+ for {set i 0} {$i < $ndups} {incr i} {
+ set dlist [linsert $dlist 0 $i]
+ }
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ for {set i 0} {$i < $ndups} {incr i} {
+ set data $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $omethod $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get [llength $ret] $ndups
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: traverse checking duplicates before close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Set up second testfile so truncate flag is not needed.
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-b.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-b.db
+ set env [lindex $dbargs $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ #
+ # Test dupcompare with data items big enough to force offpage dups.
+ #
+ puts "\tTest0$tnum.c: big key put/get dup loop key=filename data=filecontents"
+ set db [eval {berkdb_open -dupcompare test094_cmp -dup -dupsort \
+ -create -mode 0644} $omethod $dbargs $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list 1]
+ if { [llength $file_list] > $nentries } {
+ set file_list [lrange $file_list 1 $nentries]
+ }
+
+ set count 0
+ foreach f $file_list {
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set cont [read $fid]
+ close $fid
+
+ set key $f
+ for {set i 0} {$i < $ndups} {incr i} {
+ set data $i:$cont
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $omethod $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get [llength $ret] $ndups
+ incr count
+ }
+
+ puts "\tTest0$tnum.d: traverse checking duplicates before close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_file_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ set testdir [get_home $env]
+ }
+ error_check_good db_close [$db close] 0
+
+ # Clean up the test directory, since there's currently
+ # no way to specify a dup_compare function to berkdb dbverify
+ # and without one it will fail.
+ cleanup $testdir $env
+}
+
+# Simple dup comparison.
+proc test094_cmp { a b } {
+ return [string compare $b $a]
+}
+
+# Check if each key appears exactly [llength dlist] times in the file with
+# the duplicate tags matching those that appear in dlist.
+proc test094_dup_big { db txn tmpfile dlist {extra 0}} {
+ source ./include.tcl
+
+ set outf [open $tmpfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ set lastkey ""
+ set done 0
+ while { $done != 1} {
+ foreach did $dlist {
+ set rec [$c get "-next"]
+ if { [string length $rec] == 0 } {
+ set done 1
+ break
+ }
+ set key [lindex [lindex $rec 0] 0]
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ if { [string compare $key $lastkey] != 0 && \
+ $id != [lindex $dlist 0] } {
+ set e [lindex $dlist 0]
+ error "FAIL: \tKey \
+ $key, expected dup id $e, got $id"
+ }
+ error_check_good dupget.data $d $key
+ error_check_good dupget.id $id $did
+ set lastkey $key
+ }
+ #
+ # Some tests add an extra dup (like overflow entries)
+ # Check id if it exists.
+ if { $extra != 0} {
+ set okey $key
+ set rec [$c get "-next"]
+ if { [string length $rec] != 0 } {
+ set key [lindex [lindex $rec 0] 0]
+ #
+ # If this key has no extras, go back for
+ # next iteration.
+ if { [string compare $key $lastkey] != 0 } {
+ set key $okey
+ set rec [$c get "-prev"]
+ } else {
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ error_check_bad dupget.data1 $d $key
+ error_check_good dupget.id1 $id $extra
+ }
+ }
+ }
+ if { $done != 1 } {
+ puts $outf $key
+ }
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
diff --git a/libdb/test/test095.tcl b/libdb/test/test095.tcl
new file mode 100644
index 0000000..e6d0ba5
--- /dev/null
+++ b/libdb/test/test095.tcl
@@ -0,0 +1,296 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test095
+# TEST Bulk get test. [#2934]
+proc test095 { method {nsets 1000} {noverflows 25} {tnum 95} args } {
+ source ./include.tcl
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set basename $testdir/test0$tnum
+ set env NULL
+ # If we've our own env, no reason to swap--this isn't
+ # an mpool test.
+ set carg { -cachesize {0 25000000 0} }
+ } else {
+ set basename test0$tnum
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ puts "Skipping for environment with txns"
+ return
+ }
+ set testdir [get_home $env]
+ set carg {}
+ }
+ cleanup $testdir $env
+
+ puts "Test0$tnum: $method ($args) Bulk get test"
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+
+ # We run the meat of the test twice: once with unsorted dups,
+ # once with sorted dups.
+ for { set dflag "-dup"; set sort "unsorted"; set diter 0 } \
+ { $diter < 2 } \
+ { set dflag "-dup -dupsort"; set sort "sorted"; incr diter } {
+ set testfile $basename-$sort.db
+ set did [open $dict]
+
+ # Open and populate the database with $nsets sets of dups.
+ # Each set contains as many dups as its number
+ puts "\tTest0$tnum.a:\
+ Creating database with $nsets sets of $sort dups."
+ set dargs "$dflag $carg $args"
+ set db [eval {berkdb_open -create} $omethod $dargs $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ t95_populate $db $did $nsets 0
+
+ # Run basic get tests.
+ t95_gettest $db $tnum b [expr 8192] 1
+ t95_gettest $db $tnum c [expr 10 * 8192] 0
+
+ # Run cursor get tests.
+ t95_cgettest $db $tnum d [expr 100] 1
+ t95_cgettest $db $tnum e [expr 10 * 8192] 0
+
+ # Run invalid flag combination tests
+ # Sync and reopen test file so errors won't be sent to stderr
+ error_check_good db_sync [$db sync] 0
+ set noerrdb [eval berkdb_open_noerr $dargs $testfile]
+ t95_flagtest $noerrdb $tnum f [expr 8192]
+ t95_cflagtest $noerrdb $tnum g [expr 100]
+ error_check_good noerrdb_close [$noerrdb close] 0
+
+ # Set up for overflow tests
+ set max [expr 4000 * $noverflows]
+ puts "\tTest0$tnum.h: Growing\
+ database with $noverflows overflow sets (max item size $max)"
+ t95_populate $db $did $noverflows 4000
+
+ # Run overflow get tests.
+ t95_gettest $db $tnum i [expr 10 * 8192] 1
+ t95_gettest $db $tnum j [expr $max * 2] 1
+ t95_gettest $db $tnum k [expr $max * $noverflows * 2] 0
+
+ # Run overflow cursor get tests.
+ t95_cgettest $db $tnum l [expr 10 * 8192] 1
+ t95_cgettest $db $tnum m [expr $max * 2] 0
+
+ error_check_good db_close [$db close] 0
+ close $did
+ }
+}
+
+proc t95_gettest { db tnum letter bufsize expectfail } {
+ t95_gettest_body $db $tnum $letter $bufsize $expectfail 0
+}
+proc t95_cgettest { db tnum letter bufsize expectfail } {
+ t95_gettest_body $db $tnum $letter $bufsize $expectfail 1
+}
+proc t95_flagtest { db tnum letter bufsize } {
+ t95_flagtest_body $db $tnum $letter $bufsize 0
+}
+proc t95_cflagtest { db tnum letter bufsize } {
+ t95_flagtest_body $db $tnum $letter $bufsize 1
+}
+
+# Basic get test
+proc t95_gettest_body { db tnum letter bufsize expectfail usecursor } {
+ global errorCode
+
+ if { $usecursor == 0 } {
+ set action "db get -multi"
+ } else {
+ set action "dbc get -multi -set/-next"
+ }
+ puts "\tTest0$tnum.$letter: $action with bufsize $bufsize"
+
+ set allpassed TRUE
+ set saved_err ""
+
+ # Cursor for $usecursor.
+ if { $usecursor != 0 } {
+ set getcurs [$db cursor]
+ error_check_good getcurs [is_valid_cursor $getcurs $db] TRUE
+ }
+
+ # Traverse DB with cursor; do get/c_get(DB_MULTIPLE) on each item.
+ set dbc [$db cursor]
+ error_check_good is_valid_dbc [is_valid_cursor $dbc $db] TRUE
+ for { set dbt [$dbc get -first] } { [llength $dbt] != 0 } \
+ { set dbt [$dbc get -nextnodup] } {
+ set key [lindex [lindex $dbt 0] 0]
+ set datum [lindex [lindex $dbt 0] 1]
+
+ if { $usecursor == 0 } {
+ set ret [catch {eval $db get -multi $bufsize $key} res]
+ } else {
+ set res {}
+ for { set ret [catch {eval $getcurs get -multi $bufsize\
+ -set $key} tres] } \
+ { $ret == 0 && [llength $tres] != 0 } \
+ { set ret [catch {eval $getcurs get -multi $bufsize\
+ -nextdup} tres]} {
+ eval lappend res $tres
+ }
+ }
+
+ # If we expect a failure, be more tolerant if the above fails;
+ # just make sure it's an ENOMEM, mark it, and move along.
+ if { $expectfail != 0 && $ret != 0 } {
+ error_check_good multi_failure_errcode \
+ [is_substr $errorCode ENOMEM] 1
+ set allpassed FALSE
+ continue
+ }
+ error_check_good get_multi($key) $ret 0
+ t95_verify $res FALSE
+ }
+
+ set ret [catch {eval $db get -multi $bufsize} res]
+
+ if { $expectfail == 1 } {
+ error_check_good allpassed $allpassed FALSE
+ puts "\t\tTest0$tnum.$letter:\
+ returned at least one ENOMEM (as expected)"
+ } else {
+ error_check_good allpassed $allpassed TRUE
+ puts "\t\tTest0$tnum.$letter: succeeded (as expected)"
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $usecursor != 0 } {
+ error_check_good getcurs_close [$getcurs close] 0
+ }
+}
+
+# Test of invalid flag combinations for -multi
+proc t95_flagtest_body { db tnum letter bufsize usecursor } {
+ global errorCode
+
+ if { $usecursor == 0 } {
+ set action "db get -multi "
+ } else {
+ set action "dbc get -multi "
+ }
+ puts "\tTest0$tnum.$letter: $action with invalid flag combinations"
+
+ # Cursor for $usecursor.
+ if { $usecursor != 0 } {
+ set getcurs [$db cursor]
+ error_check_good getcurs [is_valid_cursor $getcurs $db] TRUE
+ }
+
+ if { $usecursor == 0 } {
+ # Disallowed flags for basic -multi get
+ set badflags [list consume consume_wait {rmw some_key}]
+
+ foreach flag $badflags {
+ catch {eval $db get -multi $bufsize -$flag} ret
+ error_check_good \
+ db:get:multi:$flag [is_substr $errorCode EINVAL] 1
+ }
+ } else {
+ # Disallowed flags for cursor -multi get
+ set cbadflags [list last get_recno join_item \
+ {multi_key 1000} prev prevnodup]
+
+ set dbc [$db cursor]
+ $dbc get -first
+ foreach flag $cbadflags {
+ catch {eval $dbc get -multi $bufsize -$flag} ret
+ error_check_good dbc:get:multi:$flag \
+ [is_substr $errorCode EINVAL] 1
+ }
+ error_check_good dbc_close [$dbc close] 0
+ }
+ if { $usecursor != 0 } {
+ error_check_good getcurs_close [$getcurs close] 0
+ }
+ puts "\t\tTest0$tnum.$letter completed"
+}
+
+# Verify that a passed-in list of key/data pairs all match the predicted
+# structure (e.g. {{thing1 thing1.0}}, {{key2 key2.0} {key2 key2.1}}).
+proc t95_verify { res multiple_keys } {
+ global alphabet
+
+ set i 0
+
+ set orig_key [lindex [lindex $res 0] 0]
+ set nkeys [string trim $orig_key $alphabet']
+ set base_key [string trim $orig_key 0123456789]
+ set datum_count 0
+
+ while { 1 } {
+ set key [lindex [lindex $res $i] 0]
+ set datum [lindex [lindex $res $i] 1]
+
+ if { $datum_count >= $nkeys } {
+ if { [llength $key] != 0 } {
+ # If there are keys beyond $nkeys, we'd
+ # better have multiple_keys set.
+ error_check_bad "keys beyond number $i allowed"\
+ $multiple_keys FALSE
+
+ # If multiple_keys is set, accept the new key.
+ set orig_key $key
+ set nkeys [eval string trim \
+ $orig_key {$alphabet'}]
+ set base_key [eval string trim \
+ $orig_key 0123456789]
+ set datum_count 0
+ } else {
+ # datum_count has hit nkeys. We're done.
+ return
+ }
+ }
+
+ error_check_good returned_key($i) $key $orig_key
+ error_check_good returned_datum($i) \
+ $datum $base_key.[format %4u $datum_count]
+ incr datum_count
+ incr i
+ }
+}
+
+# Add nsets dup sets, each consisting of {word$ndups word$n} pairs,
+# with "word" having (i * pad_bytes) bytes extra padding.
+proc t95_populate { db did nsets pad_bytes } {
+ set txn ""
+ for { set i 1 } { $i <= $nsets } { incr i } {
+ # basekey is a padded dictionary word
+ gets $did basekey
+
+ append basekey [repeat "a" [expr $pad_bytes * $i]]
+
+ # key is basekey with the number of dups stuck on.
+ set key $basekey$i
+
+ for { set j 0 } { $j < $i } { incr j } {
+ set data $basekey.[format %4u $j]
+ error_check_good db_put($key,$data) \
+ [eval {$db put} $txn {$key $data}] 0
+ }
+ }
+
+ # This will make debugging easier, and since the database is
+ # read-only from here out, it's cheap.
+ error_check_good db_sync [$db sync] 0
+}
diff --git a/libdb/test/test096.tcl b/libdb/test/test096.tcl
new file mode 100644
index 0000000..24bded2
--- /dev/null
+++ b/libdb/test/test096.tcl
@@ -0,0 +1,202 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test096
+# TEST Db->truncate test.
+proc test096 { method {pagesize 512} {nentries 50} {ndups 4} args} {
+ global fixed_len
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ puts "Test096: $method db truncate method test"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test096 skipping for method $method"
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test096: Skipping for specific pagesizes"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ set testfile test096.db
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 0 } {
+ puts "Environment w/o txns specified; skipping."
+ return
+ }
+ if { $nentries == 1000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ set testdir [get_home $env]
+ set closeenv 0
+ } else {
+ env_cleanup $testdir
+
+ #
+ # We need an env for exclusive-use testing.
+ set env [eval {berkdb_env -create -home $testdir -txn} $encargs]
+ error_check_good env_create [is_valid_env $env] TRUE
+ set closeenv 1
+ }
+
+ set t1 $testdir/t1
+
+ puts "\tTest096.a: Create $nentries entries"
+ set db [eval {berkdb_open -create -auto_commit \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set count 0
+ set txn ""
+ set pflags ""
+ set gflags ""
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ set datastr [reverse $str]
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good $key:dbget [llength $ret] 1
+
+ incr count
+ }
+ close $did
+
+ puts "\tTest096.b: Truncate database"
+ error_check_good dbclose [$db close] 0
+ set dbtr [eval {berkdb_open -create -auto_commit \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $dbtr] TRUE
+
+ set ret [$dbtr truncate -auto_commit]
+ error_check_good dbtrunc $ret $nentries
+ error_check_good db_close [$dbtr close] 0
+
+ set db [eval {berkdb_open -env $env} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get -glob *]
+ error_check_good dbget [llength $ret] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good dbverify [verify_dir $testdir "\tTest096.c: "] 0
+
+ #
+ # Remove database, and create a new one with dups.
+ #
+ puts "\tTest096.d: Create $nentries entries with $ndups duplicates"
+ set ret [berkdb dbremove -env $env -auto_commit $testfile]
+ set db [eval {berkdb_open -pagesize $pagesize -dup -auto_commit \
+ -create -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set did [open $dict]
+ set count 0
+ set txn ""
+ set pflags ""
+ set gflags ""
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_bad $key:dbget_dups [llength $ret] 0
+ error_check_good $key:dbget_dups1 [llength $ret] $ndups
+
+ incr count
+ }
+ close $did
+ set dlist ""
+ for { set i 1 } {$i <= $ndups} {incr i} {
+ lappend dlist $i
+ }
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ dup_check $db $txn $t1 $dlist
+ error_check_good txn [$t commit] 0
+ puts "\tTest096.e: Verify off page duplicates status"
+ set stat [$db stat]
+ error_check_bad stat:offpage [is_substr $stat \
+ "{{Duplicate pages} 0}"] 1
+
+ set recs [expr $ndups * $count]
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest096.f: Truncate database in a txn then abort"
+ set txn [$env txn]
+
+ set dbtr [eval {berkdb_open -auto_commit -create \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $dbtr] TRUE
+ error_check_good txnbegin [is_valid_txn $txn $env] TRUE
+
+ set ret [$dbtr truncate -txn $txn]
+ error_check_good dbtrunc $ret $recs
+
+ error_check_good txnabort [$txn abort] 0
+ error_check_good db_close [$dbtr close] 0
+
+ set db [eval {berkdb_open -auto_commit -env $env} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get -glob *]
+ error_check_good dbget [llength $ret] $recs
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest096.g: Truncate database in a txn then commit"
+ set txn [$env txn]
+ error_check_good txnbegin [is_valid_txn $txn $env] TRUE
+
+ set dbtr [eval {berkdb_open -auto_commit -create \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $dbtr] TRUE
+
+ set ret [$dbtr truncate -txn $txn]
+ error_check_good dbtrunc $ret $recs
+
+ error_check_good txncommit [$txn commit] 0
+ error_check_good db_close [$dbtr close] 0
+
+ set db [berkdb_open -auto_commit -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get -glob *]
+ error_check_good dbget [llength $ret] 0
+ error_check_good dbclose [$db close] 0
+
+ set testdir [get_home $env]
+ error_check_good dbverify [verify_dir $testdir "\tTest096.h: "] 0
+
+ if { $closeenv == 1 } {
+ error_check_good envclose [$env close] 0
+ }
+}
diff --git a/libdb/test/test097.tcl b/libdb/test/test097.tcl
new file mode 100644
index 0000000..b53d128
--- /dev/null
+++ b/libdb/test/test097.tcl
@@ -0,0 +1,188 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test097
+# TEST Open up a large set of database files simultaneously.
+# TEST Adjust for local file descriptor resource limits.
+# TEST Then use the first 1000 entries from the dictionary.
+# TEST Insert each with self as key and a fixed, medium length data string;
+# TEST retrieve each. After all are entered, retrieve all; compare output
+# TEST to original.
+
+proc test097 { method {ndbs 500} {nentries 400} args } {
+ global pad_datastr
+ source ./include.tcl
+
+ set largs [convert_args $method $args]
+ set encargs ""
+ set largs [split_encargs $largs encargs]
+
+ # Open an environment, with a 1MB cache.
+ set eindex [lsearch -exact $largs "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $largs $eindex]
+ puts "Test097: $method: skipping for env $env"
+ return
+ }
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create \
+ -cachesize { 0 1048576 1 } -txn} -home $testdir $encargs]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ # Create the database and open the dictionary
+ set testfile test097.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ #
+ # When running with HAVE_MUTEX_SYSTEM_RESOURCES,
+ # we can run out of mutex lock slots due to the nature of this test.
+ # So, for this test, increase the number of pages per extent
+ # to consume fewer resources.
+ #
+ if { [is_queueext $method] } {
+ set numdb [expr $ndbs / 4]
+ set eindex [lsearch -exact $largs "-extent"]
+ error_check_bad extent $eindex -1
+ incr eindex
+ set extval [lindex $largs $eindex]
+ set extval [expr $extval * 4]
+ set largs [lreplace $largs $eindex $eindex $extval]
+ }
+ puts -nonewline "Test097: $method ($largs) "
+ puts "$nentries entries in at most $ndbs simultaneous databases"
+
+ puts "\tTest097.a: Simultaneous open"
+ set numdb [test097_open tdb $ndbs $method $env $testfile $largs]
+ if { $numdb == 0 } {
+ puts "\tTest097: Insufficient resources available -- skipping."
+ error_check_good envclose [$env close] 0
+ return
+ }
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ if { [is_record_based $method] == 1 } {
+ append gflags "-recno"
+ }
+ puts "\tTest097.b: put/get on $numdb databases"
+ set datastr "abcdefghij"
+ set pad_datastr [pad_data $method $datastr]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ for { set i 1 } { $i <= $numdb } { incr i } {
+ set ret [eval {$tdb($i) put} $txn $pflags \
+ {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ set ret [eval {$tdb($i) get} $gflags {$key}]
+ error_check_good get $ret [list [list $key \
+ [pad_data $method $datastr]]]
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest097.c: dump and check files"
+ for { set j 1 } { $j <= $numdb } { incr j } {
+ dump_file $tdb($j) $txn $t1 test097.check
+ error_check_good db_close [$tdb($j) close] 0
+
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ }
+ filesort $t1 $t3
+
+ error_check_good Test097:diff($t3,$t2) [filecmp $t3 $t2] 0
+ }
+ error_check_good envclose [$env close] 0
+}
+
+# Check function for test097; data should be fixed are identical
+proc test097.check { key data } {
+ global pad_datastr
+ error_check_good "data mismatch for key $key" $data $pad_datastr
+}
+
+proc test097_open { tdb ndbs method env testfile largs } {
+ global errorCode
+ upvar $tdb db
+
+ set j 0
+ set numdb $ndbs
+ if { [is_queueext $method] } {
+ set numdb [expr $ndbs / 4]
+ }
+ set omethod [convert_method $method]
+ for { set i 1 } {$i <= $numdb } { incr i } {
+ set stat [catch {eval {berkdb_open -env $env \
+ -pagesize 512 -create -mode 0644} \
+ $largs {$omethod $testfile.$i}} db($i)]
+ #
+ # Check if we've reached our limit
+ #
+ if { $stat == 1 } {
+ set min 20
+ set em [is_substr $errorCode EMFILE]
+ set en [is_substr $errorCode ENFILE]
+ error_check_good open_ret [expr $em || $en] 1
+ puts \
+ "\tTest097.a.1 Encountered resource limits opening $i files, adjusting"
+ if { [is_queueext $method] } {
+ set end [expr $j / 4]
+ set min 10
+ } else {
+ set end [expr $j - 10]
+ }
+ #
+ # If we cannot open even $min files, then this test is
+ # not very useful. Close up shop and go back.
+ #
+ if { $end < $min } {
+ test097_close db 1 $j
+ return 0
+ }
+ test097_close db [expr $end + 1] $j
+ return $end
+ } else {
+ error_check_good dbopen [is_valid_db $db($i)] TRUE
+ set j $i
+ }
+ }
+ return $j
+}
+
+proc test097_close { tdb start end } {
+ upvar $tdb db
+
+ for { set i $start } { $i <= $end } { incr i } {
+ error_check_good db($i)close [$db($i) close] 0
+ }
+}
diff --git a/libdb/test/test098.tcl b/libdb/test/test098.tcl
new file mode 100644
index 0000000..d5936fc
--- /dev/null
+++ b/libdb/test/test098.tcl
@@ -0,0 +1,91 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test098
+# TEST Test of DB_GET_RECNO and secondary indices. Open a primary and
+# TEST a secondary, and do a normal cursor get followed by a get_recno.
+# TEST (This is a smoke test for "Bug #1" in [#5811].)
+
+proc test098 { method args } {
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test098: $omethod ($args): DB_GET_RECNO and secondary indices."
+
+ if { [is_rbtree $method] != 1 } {
+ puts "\tTest098: Skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ set txn ""
+ set auto ""
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set base $testdir/test098
+ set env NULL
+ } else {
+ set base test098
+ incr eindex
+ set env [lindex $args $eindex]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test098: Skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ set auto " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "\tTest098.a: Set up databases."
+
+ set adb [eval {berkdb_open} $omethod $args $auto \
+ {-create} $base-primary.db]
+ error_check_good adb_create [is_valid_db $adb] TRUE
+
+ set bdb [eval {berkdb_open} $omethod $args $auto \
+ {-create} $base-secondary.db]
+ error_check_good bdb_create [is_valid_db $bdb] TRUE
+
+ set ret [eval $adb associate $auto [callback_n 0] $bdb]
+ error_check_good associate $ret 0
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$adb put} $txn aaa data1]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set bc [$bdb cursor]
+ error_check_good cursor [is_valid_cursor $bc $bdb] TRUE
+
+ puts "\tTest098.b: c_get(DB_FIRST) on the secondary."
+ error_check_good get_first [$bc get -first] \
+ [list [list [[callback_n 0] aaa data1] data1]]
+
+ puts "\tTest098.c: c_get(DB_GET_RECNO) on the secondary."
+ error_check_good get_recno [$bc get -get_recno] 1
+
+ error_check_good c_close [$bc close] 0
+
+ error_check_good bdb_close [$bdb close] 0
+ error_check_good adb_close [$adb close] 0
+}
diff --git a/libdb/test/test099.tcl b/libdb/test/test099.tcl
new file mode 100644
index 0000000..1aec360
--- /dev/null
+++ b/libdb/test/test099.tcl
@@ -0,0 +1,177 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test099
+# TEST
+# TEST Test of DB->get and DBC->c_get with set_recno and get_recno.
+# TEST
+# TEST Populate a small btree -recnum database.
+# TEST After all are entered, retrieve each using -recno with DB->get.
+# TEST Open a cursor and do the same for DBC->c_get with set_recno.
+# TEST Verify that set_recno sets the record number position properly.
+# TEST Verify that get_recno returns the correct record numbers.
+proc test099 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test099: Test of set_recno and get_recno in DBC->c_get."
+ if { [is_rbtree $method] != 1 } {
+ puts "Test099: skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test099.db
+ set env NULL
+ } else {
+ set testfile test099.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ # Create the database and open the dictionary
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 1
+
+ append gflags " -recno"
+
+ puts "\tTest099.a: put loop"
+ # Here is the loop where we put each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+# global kvals
+# set key [expr $count]
+# set kvals($key) [pad_data $method $str]
+ set key $str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good db_put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ puts "\tTest099.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test099.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest099.c: Test set_recno then get_recno"
+ set db [eval {berkdb_open -rdonly} $args $omethod $testfile ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Open a cursor
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ set did [open $t1]
+ set recno 1
+
+ # Create key(recno) array to use for later comparison
+ while { [gets $did str] != -1 } {
+ set kvals($recno) $str
+ incr recno
+ }
+
+ set recno 1
+ set ret [$dbc get -first]
+ error_check_bad dbc_get_first [llength $ret] 0
+
+ # First walk forward through the database ....
+ while { $recno < $count } {
+ # Test set_recno: verify it sets the record number properly.
+ set current [$dbc get -current]
+ set r [$dbc get -set_recno $recno]
+ error_check_good set_recno $current $r
+ # Test set_recno: verify that we find the expected key
+ # at the current record number position.
+ set k [lindex [lindex $r 0] 0]
+ error_check_good set_recno $kvals($recno) $k
+
+ # Test get_recno: verify that the return from
+ # get_recno matches the record number just set.
+ set g [$dbc get -get_recno]
+ error_check_good get_recno $recno $g
+ set ret [$dbc get -next]
+ incr recno
+ }
+
+ # ... and then backward.
+ set recno [expr $count - 1]
+ while { $recno > 0 } {
+ # Test set_recno: verify that we find the expected key
+ # at the current record number position.
+ set r [$dbc get -set_recno $recno]
+ set k [lindex [lindex $r 0] 0]
+ error_check_good set_recno $kvals($recno) $k
+
+ # Test get_recno: verify that the return from
+ # get_recno matches the record number just set.
+ set g [$dbc get -get_recno]
+ error_check_good get_recno $recno $g
+ set recno [expr $recno - 1]
+ }
+
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ close $did
+}
+
+# Check function for dumped file; data should be fixed are identical
+proc test099.check { key data } {
+ error_check_good "data mismatch for key $key" $key $data
+}
diff --git a/libdb/test/test100.tcl b/libdb/test/test100.tcl
new file mode 100644
index 0000000..b860888
--- /dev/null
+++ b/libdb/test/test100.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test100
+# TEST Test for functionality near the end of the queue
+# TEST using test025 (DB_APPEND).
+proc test100 { method {nentries 10000} {txn -txn} {tnum "100"} args} {
+ if { [is_queueext $method ] == 0 } {
+ puts "Skipping test0$tnum for $method."
+ return;
+ }
+ eval {test025 $method $nentries 4294967000 $tnum} $args
+}
diff --git a/libdb/test/test101.tcl b/libdb/test/test101.tcl
new file mode 100644
index 0000000..c5e11ff
--- /dev/null
+++ b/libdb/test/test101.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# TEST test101
+# TEST Test for functionality near the end of the queue
+# TEST using test070 (DB_CONSUME).
+proc test101 { method {nentries 10000} {txn -txn} {tnum "101"} args} {
+ if { [is_queueext $method ] == 0 } {
+ puts "Skipping test0$tnum for $method."
+ return;
+ }
+ eval {test070 $method 4 2 1000 WAIT 4294967000 $txn $tnum} $args
+}
diff --git a/libdb/test/testparams.tcl b/libdb/test/testparams.tcl
new file mode 100644
index 0000000..28dc65b
--- /dev/null
+++ b/libdb/test/testparams.tcl
@@ -0,0 +1,194 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+
+set subs {bigfile dead env lock log memp mutex recd rep rpc rsrc \
+ sdb sdbtest sec si test txn}
+
+set num_test(bigfile) 2
+set num_test(dead) 7
+set num_test(env) 11
+set num_test(lock) 5
+set num_test(log) 5
+set num_test(memp) 3
+set num_test(mutex) 3
+set num_test(recd) 20
+set num_test(rep) 5
+set num_test(rpc) 5
+set num_test(rsrc) 4
+set num_test(sdb) 12
+set num_test(sdbtest) 2
+set num_test(sec) 2
+set num_test(si) 6
+set num_test(test) 101
+set num_test(txn) 9
+
+set parms(recd001) 0
+set parms(recd002) 0
+set parms(recd003) 0
+set parms(recd004) 0
+set parms(recd005) ""
+set parms(recd006) 0
+set parms(recd007) ""
+set parms(recd008) {4 4}
+set parms(recd009) 0
+set parms(recd010) 0
+set parms(recd011) {200 15 1}
+set parms(recd012) {0 49 25 100 5}
+set parms(recd013) 100
+set parms(recd014) ""
+set parms(recd015) ""
+set parms(recd016) ""
+set parms(recd017) 0
+set parms(recd018) 10
+set parms(recd019) 50
+set parms(recd020) ""
+set parms(subdb001) ""
+set parms(subdb002) 10000
+set parms(subdb003) 1000
+set parms(subdb004) ""
+set parms(subdb005) 100
+set parms(subdb006) 100
+set parms(subdb007) ""
+set parms(subdb008) ""
+set parms(subdb009) ""
+set parms(subdb010) ""
+set parms(subdb011) {13 10}
+set parms(subdb012) ""
+set parms(test001) {10000 0 "01" 0}
+set parms(test002) 10000
+set parms(test003) ""
+set parms(test004) {10000 4 0}
+set parms(test005) 10000
+set parms(test006) {10000 0 6}
+set parms(test007) {10000 7}
+set parms(test008) {8 0}
+set parms(test009) ""
+set parms(test010) {10000 5 10}
+set parms(test011) {10000 5 11}
+set parms(test012) ""
+set parms(test013) 10000
+set parms(test014) 10000
+set parms(test015) {7500 0}
+set parms(test016) 10000
+set parms(test017) {0 19 17}
+set parms(test018) 10000
+set parms(test019) 10000
+set parms(test020) 10000
+set parms(test021) 10000
+set parms(test022) ""
+set parms(test023) ""
+set parms(test024) 10000
+set parms(test025) {10000 0 25}
+set parms(test026) {2000 5 26}
+set parms(test027) {100}
+set parms(test028) ""
+set parms(test029) 10000
+set parms(test030) 10000
+set parms(test031) {10000 5 31}
+set parms(test032) {10000 5 32}
+set parms(test033) {10000 5 33}
+set parms(test034) 10000
+set parms(test035) 10000
+set parms(test036) 10000
+set parms(test037) 100
+set parms(test038) {10000 5 38}
+set parms(test039) {10000 5 39}
+set parms(test040) 10000
+set parms(test041) 10000
+set parms(test042) 1000
+set parms(test043) 10000
+set parms(test044) {5 10 0}
+set parms(test045) 1000
+set parms(test046) ""
+set parms(test047) ""
+set parms(test048) ""
+set parms(test049) ""
+set parms(test050) ""
+set parms(test051) ""
+set parms(test052) ""
+set parms(test053) ""
+set parms(test054) ""
+set parms(test055) ""
+set parms(test056) ""
+set parms(test057) ""
+set parms(test058) ""
+set parms(test059) ""
+set parms(test060) ""
+set parms(test061) ""
+set parms(test062) {200 200 62}
+set parms(test063) ""
+set parms(test064) ""
+set parms(test065) ""
+set parms(test066) ""
+set parms(test067) {1000 67}
+set parms(test068) ""
+set parms(test069) {50 69}
+set parms(test070) {4 2 1000 CONSUME 0 -txn 70}
+set parms(test071) {1 1 10000 CONSUME 0 -txn 71}
+set parms(test072) {512 20 72}
+set parms(test073) {512 50 73}
+set parms(test074) {-nextnodup 100 74}
+set parms(test075) {75}
+set parms(test076) {1000 76}
+set parms(test077) {1000 512 77}
+set parms(test078) {100 512 78}
+set parms(test079) {10000 512 79}
+set parms(test080) {80}
+set parms(test081) {13 81}
+set parms(test082) {-prevnodup 100 82}
+set parms(test083) {512 5000 2}
+set parms(test084) {10000 84 65536}
+set parms(test085) {512 3 10 85}
+set parms(test086) ""
+set parms(test087) {512 50 87}
+set parms(test088) ""
+set parms(test089) 1000
+set parms(test090) {10000 -txn 90}
+set parms(test091) {4 2 1000 0 91}
+set parms(test092) {1000}
+set parms(test093) {10000 93}
+set parms(test094) {10000 10 94}
+set parms(test095) {1000 25 95}
+set parms(test096) {512 1000 19}
+set parms(test097) {500 400}
+set parms(test098) ""
+set parms(test099) 10000
+set parms(test100) {10000 -txn 100}
+set parms(test101) {10000 -txn 101}
+
+# RPC server executables. Each of these is tested (if it exists)
+# when running the RPC tests.
+set svc_list { berkeley_db_svc berkeley_db_cxxsvc \
+ berkeley_db_javasvc }
+set rpc_svc berkeley_db_svc
+
+# Shell script tests. Each list entry is a {directory filename} pair,
+# invoked with "/bin/sh filename".
+set shelltest_list {
+ { scr001 chk.code }
+ { scr002 chk.def }
+ { scr003 chk.define }
+ { scr004 chk.javafiles }
+ { scr005 chk.nl }
+ { scr006 chk.offt }
+ { scr007 chk.proto }
+ { scr008 chk.pubdef }
+ { scr009 chk.srcfiles }
+ { scr010 chk.str }
+ { scr011 chk.tags }
+ { scr012 chk.vx_code }
+ { scr013 chk.stats }
+ { scr014 chk.err }
+ { scr015 chk.cxxtests }
+ { scr016 chk.javatests }
+ { scr017 chk.db185 }
+ { scr018 chk.comma }
+ { scr019 chk.include }
+ { scr020 chk.inc }
+ { scr021 chk.flags }
+ { scr022 chk.rr }
+}
diff --git a/libdb/test/testutils.tcl b/libdb/test/testutils.tcl
new file mode 100644
index 0000000..2018b94
--- /dev/null
+++ b/libdb/test/testutils.tcl
@@ -0,0 +1,3209 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Test system utilities
+#
+# Timestamp -- print time along with elapsed time since last invocation
+# of timestamp.
+proc timestamp {{opt ""}} {
+ global __timestamp_start
+
+ set now [clock seconds]
+
+ # -c accurate to the click, instead of the second.
+ # -r seconds since the Epoch
+ # -t current time in the format expected by db_recover -t.
+ # -w wallclock time
+ # else wallclock plus elapsed time.
+ if {[string compare $opt "-r"] == 0} {
+ return $now
+ } elseif {[string compare $opt "-t"] == 0} {
+ return [clock format $now -format "%y%m%d%H%M.%S"]
+ } elseif {[string compare $opt "-w"] == 0} {
+ return [clock format $now -format "%c"]
+ } else {
+ if {[string compare $opt "-c"] == 0} {
+ set printclicks 1
+ } else {
+ set printclicks 0
+ }
+
+ if {[catch {set start $__timestamp_start}] != 0} {
+ set __timestamp_start $now
+ }
+ set start $__timestamp_start
+
+ set elapsed [expr $now - $start]
+ set the_time [clock format $now -format ""]
+ set __timestamp_start $now
+
+ if { $printclicks == 1 } {
+ set pc_print [format ".%08u" [__fix_num [clock clicks]]]
+ } else {
+ set pc_print ""
+ }
+
+ format "%02d:%02d:%02d$pc_print (%02d:%02d:%02d)" \
+ [__fix_num [clock format $now -format "%H"]] \
+ [__fix_num [clock format $now -format "%M"]] \
+ [__fix_num [clock format $now -format "%S"]] \
+ [expr $elapsed / 3600] \
+ [expr ($elapsed % 3600) / 60] \
+ [expr ($elapsed % 3600) % 60]
+ }
+}
+
+proc __fix_num { num } {
+ set num [string trimleft $num "0"]
+ if {[string length $num] == 0} {
+ set num "0"
+ }
+ return $num
+}
+
+# Add a {key,data} pair to the specified database where
+# key=filename and data=file contents.
+proc put_file { db txn flags file } {
+ source ./include.tcl
+
+ set fid [open $file r]
+ fconfigure $fid -translation binary
+ set data [read $fid]
+ close $fid
+
+ set ret [eval {$db put} $txn $flags {$file $data}]
+ error_check_good put_file $ret 0
+}
+
+# Get a {key,data} pair from the specified database where
+# key=filename and data=file contents and then write the
+# data to the specified file.
+proc get_file { db txn flags file outfile } {
+ source ./include.tcl
+
+ set fid [open $outfile w]
+ fconfigure $fid -translation binary
+ if [catch {eval {$db get} $txn $flags {$file}} data] {
+ puts -nonewline $fid $data
+ } else {
+ # Data looks like {{key data}}
+ set data [lindex [lindex $data 0] 1]
+ puts -nonewline $fid $data
+ }
+ close $fid
+}
+
+# Add a {key,data} pair to the specified database where
+# key=file contents and data=file name.
+proc put_file_as_key { db txn flags file } {
+ source ./include.tcl
+
+ set fid [open $file r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ close $fid
+
+ # Use not the file contents, but the file name concatenated
+ # before the file contents, as a key, to ensure uniqueness.
+ set data $file$filecont
+
+ set ret [eval {$db put} $txn $flags {$data $file}]
+ error_check_good put_file $ret 0
+}
+
+# Get a {key,data} pair from the specified database where
+# key=file contents and data=file name
+proc get_file_as_key { db txn flags file} {
+ source ./include.tcl
+
+ set fid [open $file r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ close $fid
+
+ set data $file$filecont
+
+ return [eval {$db get} $txn $flags {$data}]
+}
+
+# open file and call dump_file to dumpkeys to tempfile
+proc open_and_dump_file {
+ dbname env outfile checkfunc dump_func beg cont } {
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ set encarg ""
+ if { $encrypt > 0 && $env == "NULL" } {
+ set encarg "-encryptany $passwd"
+ }
+ set envarg ""
+ set txn ""
+ set txnenv 0
+ if { $env != "NULL" } {
+ append envarg " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append envarg " -auto_commit "
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ }
+ set db [eval {berkdb open} $envarg -rdonly -unknown $encarg $dbname]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ $dump_func $db $txn $outfile $checkfunc $beg $cont
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
+
+# open file and call dump_file to dumpkeys to tempfile
+proc open_and_dump_subfile {
+ dbname env outfile checkfunc dump_func beg cont subdb} {
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ set encarg ""
+ if { $encrypt > 0 && $env == "NULL" } {
+ set encarg "-encryptany $passwd"
+ }
+ set envarg ""
+ set txn ""
+ set txnenv 0
+ if { $env != "NULL" } {
+ append envarg "-env $env"
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append envarg " -auto_commit "
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ }
+ set db [eval {berkdb open -rdonly -unknown} \
+ $envarg $encarg {$dbname $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ $dump_func $db $txn $outfile $checkfunc $beg $cont
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
+
+# Sequentially read a file and call checkfunc on each key/data pair.
+# Dump the keys out to the file specified by outfile.
+proc dump_file { db txn outfile checkfunc } {
+ source ./include.tcl
+
+ dump_file_direction $db $txn $outfile $checkfunc "-first" "-next"
+}
+
+proc dump_file_direction { db txn outfile checkfunc start continue } {
+ source ./include.tcl
+
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+ dump_file_walk $c $outfile $checkfunc $start $continue
+ error_check_good curs_close [$c close] 0
+}
+
+proc dump_file_walk { c outfile checkfunc start continue {flag ""} } {
+ set outf [open $outfile w]
+ for {set d [eval {$c get} $flag $start] } \
+ { [llength $d] != 0 } \
+ {set d [eval {$c get} $flag $continue] } {
+ set kd [lindex $d 0]
+ set k [lindex $kd 0]
+ set d2 [lindex $kd 1]
+ $checkfunc $k $d2
+ puts $outf $k
+ # XXX: Geoff Mainland
+ # puts $outf "$k $d2"
+ }
+ close $outf
+}
+
+proc dump_binkey_file { db txn outfile checkfunc } {
+ source ./include.tcl
+
+ dump_binkey_file_direction $db $txn $outfile $checkfunc \
+ "-first" "-next"
+}
+proc dump_bin_file { db txn outfile checkfunc } {
+ source ./include.tcl
+
+ dump_bin_file_direction $db $txn $outfile $checkfunc "-first" "-next"
+}
+
+# Note: the following procedure assumes that the binary-file-as-keys were
+# inserted into the database by put_file_as_key, and consist of the file
+# name followed by the file contents as key, to ensure uniqueness.
+proc dump_binkey_file_direction { db txn outfile checkfunc begin cont } {
+ source ./include.tcl
+
+ set d1 $testdir/d1
+
+ set outf [open $outfile w]
+
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+
+ set inf $d1
+ for {set d [$c get $begin] } { [llength $d] != 0 } \
+ {set d [$c get $cont] } {
+ set kd [lindex $d 0]
+ set keyfile [lindex $kd 0]
+ set data [lindex $kd 1]
+
+ set ofid [open $d1 w]
+ fconfigure $ofid -translation binary
+
+ # Chop off the first few bytes--that's the file name,
+ # added for uniqueness in put_file_as_key, which we don't
+ # want in the regenerated file.
+ set namelen [string length $data]
+ set keyfile [string range $keyfile $namelen end]
+ puts -nonewline $ofid $keyfile
+ close $ofid
+
+ $checkfunc $data $d1
+ puts $outf $data
+ flush $outf
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+ fileremove $d1
+}
+
+proc dump_bin_file_direction { db txn outfile checkfunc begin cont } {
+ source ./include.tcl
+
+ set d1 $testdir/d1
+
+ set outf [open $outfile w]
+
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+
+ for {set d [$c get $begin] } \
+ { [llength $d] != 0 } {set d [$c get $cont] } {
+ set k [lindex [lindex $d 0] 0]
+ set data [lindex [lindex $d 0] 1]
+ set ofid [open $d1 w]
+ fconfigure $ofid -translation binary
+ puts -nonewline $ofid $data
+ close $ofid
+
+ $checkfunc $k $d1
+ puts $outf $k
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+ fileremove -f $d1
+}
+
+proc make_data_str { key } {
+ set datastr ""
+ for {set i 0} {$i < 10} {incr i} {
+ append datastr $key
+ }
+ return $datastr
+}
+
+proc error_check_bad { func result bad {txn 0}} {
+ if { [binary_compare $result $bad] == 0 } {
+ if { $txn != 0 } {
+ $txn abort
+ }
+ flush stdout
+ flush stderr
+ error "FAIL:[timestamp] $func returned error value $bad"
+ }
+}
+
+proc error_check_good { func result desired {txn 0} } {
+ if { [binary_compare $desired $result] != 0 } {
+ if { $txn != 0 } {
+ $txn abort
+ }
+ flush stdout
+ flush stderr
+ error "FAIL:[timestamp]\
+ $func: expected $desired, got $result"
+ }
+}
+
+# Locks have the prefix of their manager.
+proc is_substr { str sub } {
+ if { [string first $sub $str] == -1 } {
+ return 0
+ } else {
+ return 1
+ }
+}
+
+proc release_list { l } {
+
+ # Now release all the locks
+ foreach el $l {
+ catch { $el put } ret
+ error_check_good lock_put $ret 0
+ }
+}
+
+proc debug { {stop 0} } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+
+ set __debug_on 1
+ set __debug_print 1
+ set __debug_test $stop
+}
+
+# Check if each key appears exactly [llength dlist] times in the file with
+# the duplicate tags matching those that appear in dlist.
+proc dup_check { db txn tmpfile dlist {extra 0}} {
+ source ./include.tcl
+
+ set outf [open $tmpfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ set lastkey ""
+ set done 0
+ while { $done != 1} {
+ foreach did $dlist {
+ set rec [$c get "-next"]
+ if { [string length $rec] == 0 } {
+ set done 1
+ break
+ }
+ set key [lindex [lindex $rec 0] 0]
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ if { [string compare $key $lastkey] != 0 && \
+ $id != [lindex $dlist 0] } {
+ set e [lindex $dlist 0]
+ error "FAIL: \tKey \
+ $key, expected dup id $e, got $id"
+ }
+ error_check_good dupget.data $d $key
+ error_check_good dupget.id $id $did
+ set lastkey $key
+ }
+ #
+ # Some tests add an extra dup (like overflow entries)
+ # Check id if it exists.
+ if { $extra != 0} {
+ set okey $key
+ set rec [$c get "-next"]
+ if { [string length $rec] != 0 } {
+ set key [lindex [lindex $rec 0] 0]
+ #
+ # If this key has no extras, go back for
+ # next iteration.
+ if { [string compare $key $lastkey] != 0 } {
+ set key $okey
+ set rec [$c get "-prev"]
+ } else {
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ error_check_bad dupget.data1 $d $key
+ error_check_good dupget.id1 $id $extra
+ }
+ }
+ }
+ if { $done != 1 } {
+ puts $outf $key
+ }
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
+
+# Check if each key appears exactly [llength dlist] times in the file with
+# the duplicate tags matching those that appear in dlist.
+proc dup_file_check { db txn tmpfile dlist } {
+ source ./include.tcl
+
+ set outf [open $tmpfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ set lastkey ""
+ set done 0
+ while { $done != 1} {
+ foreach did $dlist {
+ set rec [$c get "-next"]
+ if { [string length $rec] == 0 } {
+ set done 1
+ break
+ }
+ set key [lindex [lindex $rec 0] 0]
+ if { [string compare $key $lastkey] != 0 } {
+ #
+ # If we changed files read in new contents.
+ #
+ set fid [open $key r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ close $fid
+ }
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ if { [string compare $key $lastkey] != 0 && \
+ $id != [lindex $dlist 0] } {
+ set e [lindex $dlist 0]
+ error "FAIL: \tKey \
+ $key, expected dup id $e, got $id"
+ }
+ error_check_good dupget.data $d $filecont
+ error_check_good dupget.id $id $did
+ set lastkey $key
+ }
+ if { $done != 1 } {
+ puts $outf $key
+ }
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
+
+# Parse duplicate data entries of the form N:data. Data_of returns
+# the data part; id_of returns the numerical part
+proc data_of {str} {
+ set ndx [string first ":" $str]
+ if { $ndx == -1 } {
+ return ""
+ }
+ return [ string range $str [expr $ndx + 1] end]
+}
+
+proc id_of {str} {
+ set ndx [string first ":" $str]
+ if { $ndx == -1 } {
+ return ""
+ }
+
+ return [ string range $str 0 [expr $ndx - 1]]
+}
+
+proc nop { {args} } {
+ return
+}
+
+# Partial put test procedure.
+# Munges a data val through three different partial puts. Stores
+# the final munged string in the dvals array so that you can check
+# it later (dvals should be global). We take the characters that
+# are being replaced, make them capitals and then replicate them
+# some number of times (n_add). We do this at the beginning of the
+# data, at the middle and at the end. The parameters are:
+# db, txn, key -- as per usual. Data is the original data element
+# from which we are starting. n_replace is the number of characters
+# that we will replace. n_add is the number of times we will add
+# the replaced string back in.
+proc partial_put { method db txn gflags key data n_replace n_add } {
+ global dvals
+ source ./include.tcl
+
+ # Here is the loop where we put and get each key/data pair
+ # We will do the initial put and then three Partial Puts
+ # for the beginning, middle and end of the string.
+
+ eval {$db put} $txn {$key [chop_data $method $data]}
+
+ # Beginning change
+ set s [string range $data 0 [ expr $n_replace - 1 ] ]
+ set repl [ replicate [string toupper $s] $n_add ]
+
+ # This is gross, but necessary: if this is a fixed-length
+ # method, and the chopped length of $repl is zero,
+ # it's because the original string was zero-length and our data item
+ # is all nulls. Set repl to something non-NULL.
+ if { [is_fixed_length $method] && \
+ [string length [chop_data $method $repl]] == 0 } {
+ set repl [replicate "." $n_add]
+ }
+
+ set newstr [chop_data $method $repl[string range $data $n_replace end]]
+ set ret [eval {$db put} $txn {-partial [list 0 $n_replace] \
+ $key [chop_data $method $repl]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags $txn {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $newstr]]]
+
+ # End Change
+ set len [string length $newstr]
+ set spl [expr $len - $n_replace]
+ # Handle case where $n_replace > $len
+ if { $spl < 0 } {
+ set spl 0
+ }
+
+ set s [string range $newstr [ expr $len - $n_replace ] end ]
+ # Handle zero-length keys
+ if { [string length $s] == 0 } { set s "A" }
+
+ set repl [ replicate [string toupper $s] $n_add ]
+ set newstr [chop_data $method \
+ [string range $newstr 0 [expr $spl - 1 ] ]$repl]
+
+ set ret [eval {$db put} $txn \
+ {-partial [list $spl $n_replace] $key [chop_data $method $repl]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags $txn {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $newstr]]]
+
+ # Middle Change
+ set len [string length $newstr]
+ set mid [expr $len / 2 ]
+ set beg [expr $mid - [expr $n_replace / 2] ]
+ set end [expr $beg + $n_replace - 1]
+ set s [string range $newstr $beg $end]
+ set repl [ replicate [string toupper $s] $n_add ]
+ set newstr [chop_data $method [string range $newstr 0 \
+ [expr $beg - 1 ] ]$repl[string range $newstr [expr $end + 1] end]]
+
+ set ret [eval {$db put} $txn {-partial [list $beg $n_replace] \
+ $key [chop_data $method $repl]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags $txn {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $newstr]]]
+
+ set dvals($key) [pad_data $method $newstr]
+}
+
+proc replicate { str times } {
+ set res $str
+ for { set i 1 } { $i < $times } { set i [expr $i * 2] } {
+ append res $res
+ }
+ return $res
+}
+
+proc repeat { str n } {
+ set ret ""
+ while { $n > 0 } {
+ set ret $str$ret
+ incr n -1
+ }
+ return $ret
+}
+
+proc isqrt { l } {
+ set s [expr sqrt($l)]
+ set ndx [expr [string first "." $s] - 1]
+ return [string range $s 0 $ndx]
+}
+
+# If we run watch_procs multiple times without an intervening
+# testdir cleanup, it's possible that old sentinel files will confuse
+# us. Make sure they're wiped out before we spawn any other processes.
+proc sentinel_init { } {
+ source ./include.tcl
+
+ set filelist {}
+ set ret [catch {glob $testdir/begin.*} result]
+ if { $ret == 0 } {
+ set filelist $result
+ }
+
+ set ret [catch {glob $testdir/end.*} result]
+ if { $ret == 0 } {
+ set filelist [concat $filelist $result]
+ }
+
+ foreach f $filelist {
+ fileremove $f
+ }
+}
+
+proc watch_procs { pidlist {delay 30} {max 3600} {quiet 0} } {
+ source ./include.tcl
+
+ set elapsed 0
+
+ # Don't start watching the processes until a sentinel
+ # file has been created for each one.
+ foreach pid $pidlist {
+ while { [file exists $testdir/begin.$pid] == 0 } {
+ tclsleep $delay
+ incr elapsed $delay
+ # If pids haven't been created in one-tenth
+ # of the time allowed for the whole test,
+ # there's a problem. Report an error and fail.
+ if { $elapsed > [expr {$max / 10}] } {
+ puts "FAIL: begin.pid not created"
+ break
+ }
+ }
+ }
+
+ while { 1 } {
+
+ tclsleep $delay
+ incr elapsed $delay
+
+ # Find the list of processes with outstanding sentinel
+ # files (i.e. a begin.pid and no end.pid).
+ set beginlist {}
+ set endlist {}
+ set ret [catch {glob $testdir/begin.*} result]
+ if { $ret == 0 } {
+ set beginlist $result
+ }
+ set ret [catch {glob $testdir/end.*} result]
+ if { $ret == 0 } {
+ set endlist $result
+ }
+
+ set bpids {}
+ catch {unset epids}
+ foreach begfile $beginlist {
+ lappend bpids [string range $begfile \
+ [string length $testdir/begin.] end]
+ }
+ foreach endfile $endlist {
+ set epids([string range $endfile \
+ [string length $testdir/end.] end]) 1
+ }
+
+ # The set of processes that we still want to watch, $l,
+ # is the set of pids that have begun but not ended
+ # according to their sentinel files.
+ set l {}
+ foreach p $bpids {
+ if { [info exists epids($p)] == 0 } {
+ lappend l $p
+ }
+ }
+
+ set rlist {}
+ foreach i $l {
+ set r [ catch { exec $KILL -0 $i } result ]
+ if { $r == 0 } {
+ lappend rlist $i
+ }
+ }
+ if { [ llength $rlist] == 0 } {
+ break
+ } else {
+ puts "[timestamp] processes running: $rlist"
+ }
+
+ if { $elapsed > $max } {
+ # We have exceeded the limit; kill processes
+ # and report an error
+ foreach i $l {
+ tclkill $i
+ }
+ }
+ }
+ if { $quiet == 0 } {
+ puts "All processes have exited."
+ }
+}
+
+# These routines are all used from within the dbscript.tcl tester.
+proc db_init { dbp do_data } {
+ global a_keys
+ global l_keys
+ source ./include.tcl
+
+ set txn ""
+ set nk 0
+ set lastkey ""
+
+ set a_keys() BLANK
+ set l_keys ""
+
+ set c [$dbp cursor]
+ for {set d [$c get -first] } { [llength $d] != 0 } {
+ set d [$c get -next] } {
+ set k [lindex [lindex $d 0] 0]
+ set d2 [lindex [lindex $d 0] 1]
+ incr nk
+ if { $do_data == 1 } {
+ if { [info exists a_keys($k)] } {
+ lappend a_keys($k) $d2]
+ } else {
+ set a_keys($k) $d2
+ }
+ }
+
+ lappend l_keys $k
+ }
+ error_check_good curs_close [$c close] 0
+
+ return $nk
+}
+
+proc pick_op { min max n } {
+ if { $n == 0 } {
+ return add
+ }
+
+ set x [berkdb random_int 1 12]
+ if {$n < $min} {
+ if { $x <= 4 } {
+ return put
+ } elseif { $x <= 8} {
+ return get
+ } else {
+ return add
+ }
+ } elseif {$n > $max} {
+ if { $x <= 4 } {
+ return put
+ } elseif { $x <= 8 } {
+ return get
+ } else {
+ return del
+ }
+
+ } elseif { $x <= 3 } {
+ return del
+ } elseif { $x <= 6 } {
+ return get
+ } elseif { $x <= 9 } {
+ return put
+ } else {
+ return add
+ }
+}
+
+# random_data: Generate a string of random characters.
+# If recno is 0 - Use average to pick a length between 1 and 2 * avg.
+# If recno is non-0, generate a number between 1 and 2 ^ (avg * 2),
+# that will fit into a 32-bit integer.
+# If the unique flag is 1, then make sure that the string is unique
+# in the array "where".
+proc random_data { avg unique where {recno 0} } {
+ upvar #0 $where arr
+ global debug_on
+ set min 1
+ set max [expr $avg+$avg-1]
+ if { $recno } {
+ #
+ # Tcl seems to have problems with values > 30.
+ #
+ if { $max > 30 } {
+ set max 30
+ }
+ set maxnum [expr int(pow(2, $max))]
+ }
+ while {1} {
+ set len [berkdb random_int $min $max]
+ set s ""
+ if {$recno} {
+ set s [berkdb random_int 1 $maxnum]
+ } else {
+ for {set i 0} {$i < $len} {incr i} {
+ append s [int_to_char [berkdb random_int 0 25]]
+ }
+ }
+
+ if { $unique == 0 || [info exists arr($s)] == 0 } {
+ break
+ }
+ }
+
+ return $s
+}
+
+proc random_key { } {
+ global l_keys
+ global nkeys
+ set x [berkdb random_int 0 [expr $nkeys - 1]]
+ return [lindex $l_keys $x]
+}
+
+proc is_err { desired } {
+ set x [berkdb random_int 1 100]
+ if { $x <= $desired } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc pick_cursput { } {
+ set x [berkdb random_int 1 4]
+ switch $x {
+ 1 { return "-keylast" }
+ 2 { return "-keyfirst" }
+ 3 { return "-before" }
+ 4 { return "-after" }
+ }
+}
+
+proc random_cursor { curslist } {
+ global l_keys
+ global nkeys
+
+ set x [berkdb random_int 0 [expr [llength $curslist] - 1]]
+ set dbc [lindex $curslist $x]
+
+ # We want to randomly set the cursor. Pick a key.
+ set k [random_key]
+ set r [$dbc get "-set" $k]
+ error_check_good cursor_get:$k [is_substr Error $r] 0
+
+ # Now move forward or backward some hops to randomly
+ # position the cursor.
+ set dist [berkdb random_int -10 10]
+
+ set dir "-next"
+ set boundary "-first"
+ if { $dist < 0 } {
+ set dir "-prev"
+ set boundary "-last"
+ set dist [expr 0 - $dist]
+ }
+
+ for { set i 0 } { $i < $dist } { incr i } {
+ set r [ record $dbc get $dir $k ]
+ if { [llength $d] == 0 } {
+ set r [ record $dbc get $k $boundary ]
+ }
+ error_check_bad dbcget [llength $r] 0
+ }
+ return { [linsert r 0 $dbc] }
+}
+
+proc record { args } {
+# Recording every operation makes tests ridiculously slow on
+# NT, so we are commenting this out; for debugging purposes,
+# it will undoubtedly be useful to uncomment this.
+# puts $args
+# flush stdout
+ return [eval $args]
+}
+
+proc newpair { k data } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set a_keys($k) $data
+ lappend l_keys $k
+ incr nkeys
+}
+
+proc rempair { k } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ unset a_keys($k)
+ set n [lsearch $l_keys $k]
+ error_check_bad rempair:$k $n -1
+ set l_keys [lreplace $l_keys $n $n]
+ incr nkeys -1
+}
+
+proc changepair { k data } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set a_keys($k) $data
+}
+
+proc changedup { k olddata newdata } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set d $a_keys($k)
+ error_check_bad changedup:$k [llength $d] 0
+
+ set n [lsearch $d $olddata]
+ error_check_bad changedup:$k $n -1
+
+ set a_keys($k) [lreplace $a_keys($k) $n $n $newdata]
+}
+
+# Insert a dup into the a_keys array with DB_KEYFIRST.
+proc adddup { k olddata newdata } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set d $a_keys($k)
+ if { [llength $d] == 0 } {
+ lappend l_keys $k
+ incr nkeys
+ set a_keys($k) { $newdata }
+ }
+
+ set ndx 0
+
+ set d [linsert d $ndx $newdata]
+ set a_keys($k) $d
+}
+
+proc remdup { k data } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set d [$a_keys($k)]
+ error_check_bad changedup:$k [llength $d] 0
+
+ set n [lsearch $d $olddata]
+ error_check_bad changedup:$k $n -1
+
+ set a_keys($k) [lreplace $a_keys($k) $n $n]
+}
+
+proc dump_full_file { db txn outfile checkfunc start continue } {
+ source ./include.tcl
+
+ set outf [open $outfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ error_check_good dbcursor [is_valid_cursor $c $db] TRUE
+
+ for {set d [$c get $start] } { [string length $d] != 0 } {
+ set d [$c get $continue] } {
+ set k [lindex [lindex $d 0] 0]
+ set d2 [lindex [lindex $d 0] 1]
+ $checkfunc $k $d2
+ puts $outf "$k\t$d2"
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
+
+proc int_to_char { i } {
+ global alphabet
+
+ return [string index $alphabet $i]
+}
+
+proc dbcheck { key data } {
+ global l_keys
+ global a_keys
+ global nkeys
+ global check_array
+
+ if { [lsearch $l_keys $key] == -1 } {
+ error "FAIL: Key |$key| not in list of valid keys"
+ }
+
+ set d $a_keys($key)
+
+ if { [info exists check_array($key) ] } {
+ set check $check_array($key)
+ } else {
+ set check {}
+ }
+
+ if { [llength $d] > 1 } {
+ if { [llength $check] != [llength $d] } {
+ # Make the check array the right length
+ for { set i [llength $check] } { $i < [llength $d] } \
+ {incr i} {
+ lappend check 0
+ }
+ set check_array($key) $check
+ }
+
+ # Find this data's index
+ set ndx [lsearch $d $data]
+ if { $ndx == -1 } {
+ error "FAIL: \
+ Data |$data| not found for key $key. Found |$d|"
+ }
+
+ # Set the bit in the check array
+ set check_array($key) [lreplace $check_array($key) $ndx $ndx 1]
+ } elseif { [string compare $d $data] != 0 } {
+ error "FAIL: \
+ Invalid data |$data| for key |$key|. Expected |$d|."
+ } else {
+ set check_array($key) 1
+ }
+}
+
+# Dump out the file and verify it
+proc filecheck { file txn } {
+ global check_array
+ global l_keys
+ global nkeys
+ global a_keys
+ source ./include.tcl
+
+ if { [info exists check_array] == 1 } {
+ unset check_array
+ }
+
+ open_and_dump_file $file NULL $file.dump dbcheck dump_full_file \
+ "-first" "-next"
+
+ # Check that everything we checked had all its data
+ foreach i [array names check_array] {
+ set count 0
+ foreach j $check_array($i) {
+ if { $j != 1 } {
+ puts -nonewline "Key |$i| never found datum"
+ puts " [lindex $a_keys($i) $count]"
+ }
+ incr count
+ }
+ }
+
+ # Check that all keys appeared in the checked array
+ set count 0
+ foreach k $l_keys {
+ if { [info exists check_array($k)] == 0 } {
+ puts "filecheck: key |$k| not found. Data: $a_keys($k)"
+ }
+ incr count
+ }
+
+ if { $count != $nkeys } {
+ puts "filecheck: Got $count keys; expected $nkeys"
+ }
+}
+
+proc cleanup { dir env { quiet 0 } } {
+ global gen_upgrade
+ global is_qnx_test
+ global old_encrypt
+ global passwd
+ global upgrade_dir
+ global upgrade_be
+ global upgrade_method
+ global upgrade_name
+ source ./include.tcl
+
+ if { $gen_upgrade == 1 } {
+ set vers [berkdb version]
+ set maj [lindex $vers 0]
+ set min [lindex $vers 1]
+
+ # Is this machine big or little endian? We want to mark
+ # the test directories appropriately, since testing
+ # little-endian databases generated by a big-endian machine,
+ # and/or vice versa, is interesting.
+ if { [big_endian] } {
+ set myendianness be
+ } else {
+ set myendianness le
+ }
+
+ if { $upgrade_be == 1 } {
+ set version_dir "$myendianness-$maj.${min}be"
+ set en be
+ } else {
+ set version_dir "$myendianness-$maj.${min}le"
+ set en le
+ }
+
+ set dest $upgrade_dir/$version_dir/$upgrade_method
+ exec mkdir -p $dest
+
+ set dbfiles [glob -nocomplain $dir/*.db]
+ foreach dbfile $dbfiles {
+ set basename [string range $dbfile \
+ [expr [string length $dir] + 1] end-3]
+
+ set newbasename $upgrade_name-$basename
+
+ # db_dump file
+ error_check_good db_dump($dbfile) \
+ [catch {exec $util_path/db_dump -k $dbfile > \
+ $dir/$newbasename.dump}] 0
+
+ # tcl_dump file
+ upgrade_dump $dbfile \
+ $dir/$newbasename.tcldump
+
+ # Rename dbfile and any dbq files.
+ file rename $dbfile $dir/$newbasename-$en.db
+ foreach dbq \
+ [glob -nocomplain $dir/__dbq.$basename.db.*] {
+ set s [string length $dir/__dbq.]
+ set newname [string replace $dbq $s \
+ [expr [string length $basename] + $s - 1] \
+ $newbasename-$en]
+ file rename $dbq $newname
+ }
+ set cwd [pwd]
+ cd $dir
+ catch {eval exec tar -cvf $dest/$newbasename.tar \
+ [glob $newbasename* __dbq.$newbasename-$en.db.*]}
+ catch {exec gzip -9v $dest/$newbasename.tar}
+ cd $cwd
+ }
+ }
+
+# check_handles
+ set remfiles {}
+ set ret [catch { glob $dir/* } result]
+ if { $ret == 0 } {
+ foreach fileorig $result {
+ #
+ # We:
+ # - Ignore any env-related files, which are
+ # those that have __db.* or log.* if we are
+ # running in an env. Also ignore files whose
+ # names start with REPDIR_; these are replication
+ # subdirectories.
+ # - Call 'dbremove' on any databases.
+ # Remove any remaining temp files.
+ #
+ switch -glob -- $fileorig {
+ */DIR_* -
+ */__db.* -
+ */log.* {
+ if { $env != "NULL" } {
+ continue
+ } else {
+ if { $is_qnx_test } {
+ catch {berkdb envremove -force \
+ -home $dir} r
+ }
+ lappend remfiles $fileorig
+ }
+ }
+ *.db {
+ set envargs ""
+ set encarg ""
+ #
+ # If in an env, it should be open crypto
+ # or not already.
+ #
+ if { $env != "NULL"} {
+ set file [file tail $fileorig]
+ set envargs " -env $env "
+ if { [is_txnenv $env] } {
+ append envargs " -auto_commit "
+ }
+ } else {
+ if { $old_encrypt != 0 } {
+ set encarg "-encryptany $passwd"
+ }
+ set file $fileorig
+ }
+
+ # If a database is left in a corrupt
+ # state, dbremove might not be able to handle
+ # it (it does an open before the remove).
+ # Be prepared for this, and if necessary,
+ # just forcibly remove the file with a warning
+ # message.
+ set ret [catch \
+ {eval {berkdb dbremove} $envargs $encarg \
+ $file} res]
+ if { $ret != 0 } {
+ # If it failed, there is a chance
+ # that the previous run was using
+ # encryption and we cannot know about
+ # it (different tclsh instantiation).
+ # Try to remove it with crypto.
+ if { $env == "NULL" && \
+ $old_encrypt == 0} {
+ set ret [catch \
+ {eval {berkdb dbremove} \
+ -encryptany $passwd \
+ $envargs $file} res]
+ }
+ if { $ret != 0 } {
+ if { $quiet == 0 } {
+ puts \
+ "FAIL: dbremove in cleanup failed: $res"
+ }
+ set file $fileorig
+ lappend remfiles $file
+ }
+ }
+ }
+ default {
+ lappend remfiles $fileorig
+ }
+ }
+ }
+ if {[llength $remfiles] > 0} {
+ eval fileremove -f $remfiles
+ }
+ }
+}
+
+proc log_cleanup { dir } {
+ source ./include.tcl
+
+ set files [glob -nocomplain $dir/log.*]
+ if { [llength $files] != 0} {
+ foreach f $files {
+ fileremove -f $f
+ }
+ }
+}
+
+proc env_cleanup { dir } {
+ global old_encrypt
+ global passwd
+ source ./include.tcl
+
+ set encarg ""
+ if { $old_encrypt != 0 } {
+ set encarg "-encryptany $passwd"
+ }
+ set stat [catch {eval {berkdb envremove -home} $dir $encarg} ret]
+ #
+ # If something failed and we are left with a region entry
+ # in /dev/shmem that is zero-length, the envremove will
+ # succeed, and the shm_unlink will succeed, but it will not
+ # remove the zero-length entry from /dev/shmem. Remove it
+ # using fileremove or else all other tests using an env
+ # will immediately fail.
+ #
+ if { $is_qnx_test == 1 } {
+ set region_files [glob -nocomplain /dev/shmem/$dir*]
+ if { [llength $region_files] != 0 } {
+ foreach f $region_files {
+ fileremove -f $f
+ }
+ }
+ }
+ log_cleanup $dir
+ cleanup $dir NULL
+}
+
+proc remote_cleanup { server dir localdir } {
+ set home [file tail $dir]
+ error_check_good cleanup:remove [berkdb envremove -home $home \
+ -server $server] 0
+ catch {exec rsh $server rm -f $dir/*} ret
+ cleanup $localdir NULL
+}
+
+proc help { cmd } {
+ if { [info command $cmd] == $cmd } {
+ set is_proc [lsearch [info procs $cmd] $cmd]
+ if { $is_proc == -1 } {
+ # Not a procedure; must be a C command
+ # Let's hope that it takes some parameters
+ # and that it prints out a message
+ puts "Usage: [eval $cmd]"
+ } else {
+ # It is a tcl procedure
+ puts -nonewline "Usage: $cmd"
+ set args [info args $cmd]
+ foreach a $args {
+ set is_def [info default $cmd $a val]
+ if { $is_def != 0 } {
+ # Default value
+ puts -nonewline " $a=$val"
+ } elseif {$a == "args"} {
+ # Print out flag values
+ puts " options"
+ args
+ } else {
+ # No default value
+ puts -nonewline " $a"
+ }
+ }
+ puts ""
+ }
+ } else {
+ puts "$cmd is not a command"
+ }
+}
+
+# Run a recovery test for a particular operation
+# Notice that we catch the return from CP and do not do anything with it.
+# This is because Solaris CP seems to exit non-zero on occasion, but
+# everything else seems to run just fine.
+#
+# We split it into two functions so that the preparation and command
+# could be executed in a different process than the recovery.
+#
+proc op_codeparse { encodedop op } {
+ set op1 ""
+ set op2 ""
+ switch $encodedop {
+ "abort" {
+ set op1 $encodedop
+ set op2 ""
+ }
+ "commit" {
+ set op1 $encodedop
+ set op2 ""
+ }
+ "prepare-abort" {
+ set op1 "prepare"
+ set op2 "abort"
+ }
+ "prepare-commit" {
+ set op1 "prepare"
+ set op2 "commit"
+ }
+ "prepare-discard" {
+ set op1 "prepare"
+ set op2 "discard"
+ }
+ }
+
+ if { $op == "op" } {
+ return $op1
+ } else {
+ return $op2
+ }
+}
+
+proc op_recover { encodedop dir env_cmd dbfile cmd msg } {
+ source ./include.tcl
+
+ set op [op_codeparse $encodedop "op"]
+ set op2 [op_codeparse $encodedop "sub"]
+ puts "\t$msg $encodedop"
+ set gidf ""
+ if { $op == "prepare" } {
+ sentinel_init
+
+ # Fork off a child to run the cmd
+ # We append the gid, so start here making sure
+ # we don't have old gid's around.
+ set outfile $testdir/childlog
+ fileremove -f $testdir/gidfile
+ set gidf $testdir/gidfile
+ set pidlist {}
+ # puts "$tclsh_path $test_path/recdscript.tcl $testdir/recdout \
+ # $op $dir $env_cmd $dbfile $gidf $cmd"
+ set p [exec $tclsh_path $test_path/wrap.tcl recdscript.tcl \
+ $testdir/recdout $op $dir $env_cmd $dbfile $gidf $cmd &]
+ lappend pidlist $p
+ watch_procs $pidlist 5
+ set f1 [open $testdir/recdout r]
+ set r [read $f1]
+ puts -nonewline $r
+ close $f1
+ fileremove -f $testdir/recdout
+ } else {
+ op_recover_prep $op $dir $env_cmd $dbfile $gidf $cmd
+ }
+ op_recover_rec $op $op2 $dir $env_cmd $dbfile $gidf
+}
+
+proc op_recover_prep { op dir env_cmd dbfile gidf cmd } {
+ global log_log_record_types
+ global recd_debug
+ global recd_id
+ global recd_op
+ source ./include.tcl
+
+ #puts "op_recover: $op $dir $env $dbfile $cmd"
+
+ set init_file $dir/t1
+ set afterop_file $dir/t2
+ set final_file $dir/t3
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ # Save the initial file and open the environment and the file
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.init } res
+ copy_extent_file $dir $dbfile init
+
+ convert_encrypt $env_cmd
+ set env [eval $env_cmd]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set db [berkdb open -auto_commit -env $env $dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Dump out file contents for initial case
+ open_and_dump_file $dbfile $env $init_file nop \
+ dump_file_direction "-first" "-next"
+
+ set t [$env txn]
+ error_check_bad txn_begin $t NULL
+ error_check_good txn_begin [is_substr $t "txn"] 1
+
+ # Now fill in the db, tmgr, and the txnid in the command
+ set exec_cmd $cmd
+
+ set i [lsearch $cmd ENV]
+ if { $i != -1 } {
+ set exec_cmd [lreplace $exec_cmd $i $i $env]
+ }
+
+ set i [lsearch $cmd TXNID]
+ if { $i != -1 } {
+ set exec_cmd [lreplace $exec_cmd $i $i $t]
+ }
+
+ set i [lsearch $exec_cmd DB]
+ if { $i != -1 } {
+ set exec_cmd [lreplace $exec_cmd $i $i $db]
+ }
+
+ # To test DB_CONSUME, we need to expect a record return, not "0".
+ set i [lsearch $exec_cmd "-consume"]
+ if { $i != -1 } {
+ set record_exec_cmd_ret 1
+ } else {
+ set record_exec_cmd_ret 0
+ }
+
+ # For the DB_APPEND test, we need to expect a return other than
+ # 0; set this flag to be more lenient in the error_check_good.
+ set i [lsearch $exec_cmd "-append"]
+ if { $i != -1 } {
+ set lenient_exec_cmd_ret 1
+ } else {
+ set lenient_exec_cmd_ret 0
+ }
+
+ # Execute command and commit/abort it.
+ set ret [eval $exec_cmd]
+ if { $record_exec_cmd_ret == 1 } {
+ error_check_good "\"$exec_cmd\"" [llength [lindex $ret 0]] 2
+ } elseif { $lenient_exec_cmd_ret == 1 } {
+ error_check_good "\"$exec_cmd\"" [expr $ret > 0] 1
+ } else {
+ error_check_good "\"$exec_cmd\"" $ret 0
+ }
+
+ set record_exec_cmd_ret 0
+ set lenient_exec_cmd_ret 0
+
+ # Sync the file so that we can capture a snapshot to test recovery.
+ error_check_good sync:$db [$db sync] 0
+
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
+ copy_extent_file $dir $dbfile afterop
+ open_and_dump_file $dir/$dbfile.afterop NULL \
+ $afterop_file nop dump_file_direction "-first" "-next"
+
+ #puts "\t\t\tExecuting txn_$op:$t"
+ if { $op == "prepare" } {
+ set gid [make_gid global:$t]
+ set gfd [open $gidf w+]
+ puts $gfd $gid
+ close $gfd
+ error_check_good txn_$op:$t [$t $op $gid] 0
+ } else {
+ error_check_good txn_$op:$t [$t $op] 0
+ }
+
+ switch $op {
+ "commit" { puts "\t\tCommand executed and committed." }
+ "abort" { puts "\t\tCommand executed and aborted." }
+ "prepare" { puts "\t\tCommand executed and prepared." }
+ }
+
+ # Sync the file so that we can capture a snapshot to test recovery.
+ error_check_good sync:$db [$db sync] 0
+
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.final } res
+ copy_extent_file $dir $dbfile final
+ open_and_dump_file $dir/$dbfile.final NULL \
+ $final_file nop dump_file_direction "-first" "-next"
+
+ # If this is an abort or prepare-abort, it should match the
+ # original file.
+ # If this was a commit or prepare-commit, then this file should
+ # match the afterop file.
+ # If this was a prepare without an abort or commit, we still
+ # have transactions active, and peering at the database from
+ # another environment will show data from uncommitted transactions.
+ # Thus we just skip this in the prepare-only case; what
+ # we care about are the results of a prepare followed by a
+ # recovery, which we test later.
+ if { $op == "commit" } {
+ filesort $afterop_file $afterop_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(post-$op,pre-commit):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ } elseif { $op == "abort" } {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ } else {
+ # Make sure this really is one of the prepare tests
+ error_check_good assert:prepare-test $op "prepare"
+ }
+
+ # Running recovery on this database should not do anything.
+ # Flush all data to disk, close the environment and save the
+ # file.
+ # XXX DO NOT CLOSE FILE ON PREPARE -- if you are prepared,
+ # you really have an active transaction and you're not allowed
+ # to close files that are being acted upon by in-process
+ # transactions.
+ if { $op != "prepare" } {
+ error_check_good close:$db [$db close] 0
+ }
+
+ #
+ # If we are running 'prepare' don't close the env with an
+ # active transaction. Leave it alone so the close won't
+ # quietly abort it on us.
+ if { [is_substr $op "prepare"] != 1 } {
+ error_check_good envclose [$env close] 0
+ }
+ return
+}
+
+proc op_recover_rec { op op2 dir env_cmd dbfile gidf} {
+ global log_log_record_types
+ global recd_debug
+ global recd_id
+ global recd_op
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ #puts "op_recover_rec: $op $op2 $dir $env_cmd $dbfile $gidf"
+
+ set init_file $dir/t1
+ set afterop_file $dir/t2
+ set final_file $dir/t3
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ berkdb debug_check
+ puts -nonewline "\t\top_recover_rec: Running recovery ... "
+ flush stdout
+
+ set recargs "-h $dir -c "
+ if { $encrypt > 0 } {
+ append recargs " -P $passwd "
+ }
+ set stat [catch {eval exec $util_path/db_recover -e $recargs} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+ puts -nonewline "complete ... "
+
+ #
+ # We cannot run db_recover here because that will open an env, run
+ # recovery, then close it, which will abort the outstanding txns.
+ # We want to do it ourselves.
+ #
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_widget $env env] TRUE
+
+ error_check_good db_verify [verify_dir $testdir "\t\t" 0 1] 0
+ puts "verified"
+
+ # If we left a txn as prepared, but not aborted or committed,
+ # we need to do a txn_recover. Make sure we have the same
+ # number of txns we want.
+ if { $op == "prepare"} {
+ set txns [$env txn_recover]
+ error_check_bad txnrecover [llength $txns] 0
+ set gfd [open $gidf r]
+ set origgid [read -nonewline $gfd]
+ close $gfd
+ set txnlist [lindex $txns 0]
+ set t [lindex $txnlist 0]
+ set gid [lindex $txnlist 1]
+ error_check_good gidcompare $gid $origgid
+ puts "\t\t\tExecuting txn_$op2:$t"
+ error_check_good txn_$op2:$t [$t $op2] 0
+ #
+ # If we are testing discard, we do need to resolve
+ # the txn, so get the list again and now abort it.
+ #
+ if { $op2 == "discard" } {
+ set txns [$env txn_recover]
+ error_check_bad txnrecover [llength $txns] 0
+ set txnlist [lindex $txns 0]
+ set t [lindex $txnlist 0]
+ set gid [lindex $txnlist 1]
+ error_check_good gidcompare $gid $origgid
+ puts "\t\t\tExecuting txn_abort:$t"
+ error_check_good disc_txn_abort:$t [$t abort] 0
+ }
+ }
+
+ open_and_dump_file $dir/$dbfile NULL $final_file nop \
+ dump_file_direction "-first" "-next"
+ if { $op == "commit" || $op2 == "commit" } {
+ filesort $afterop_file $afterop_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(post-$op,pre-commit):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ } else {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ }
+
+ # Now close the environment, substitute a file that will need
+ # recovery and try running recovery again.
+ reset_env $env
+ if { $op == "commit" || $op2 == "commit" } {
+ catch { file copy -force $dir/$dbfile.init $dir/$dbfile } res
+ move_file_extent $dir $dbfile init copy
+ } else {
+ catch { file copy -force $dir/$dbfile.afterop $dir/$dbfile } res
+ move_file_extent $dir $dbfile afterop copy
+ }
+
+ berkdb debug_check
+ puts -nonewline "\t\tRunning recovery on pre-op database ... "
+ flush stdout
+
+ set stat [catch {eval exec $util_path/db_recover $recargs} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+ puts -nonewline "complete ... "
+
+ error_check_good db_verify_preop [verify_dir $testdir "\t\t" 0 1] 0
+
+ puts "verified"
+
+ set env [eval $env_cmd]
+
+ open_and_dump_file $dir/$dbfile NULL $final_file nop \
+ dump_file_direction "-first" "-next"
+ if { $op == "commit" || $op2 == "commit" } {
+ filesort $final_file $final_file.sort
+ filesort $afterop_file $afterop_file.sort
+ error_check_good \
+ diff(post-$op,recovered):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ } else {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ }
+
+ # This should just close the environment, not blow it away.
+ reset_env $env
+}
+
+proc populate { db method txn n dups bigdata } {
+ source ./include.tcl
+
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $n } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } elseif { $dups == 1 } {
+ set key duplicate_key
+ } else {
+ set key $str
+ }
+ if { $bigdata == 1 && [berkdb random_int 1 3] == 1} {
+ set str [replicate $str 1000]
+ }
+
+ set ret [$db put -txn $txn $key $str]
+ error_check_good db_put:$key $ret 0
+ incr count
+ }
+ close $did
+ return 0
+}
+
+proc big_populate { db txn n } {
+ source ./include.tcl
+
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $n } {
+ set key [replicate $str 50]
+ set ret [$db put -txn $txn $key $str]
+ error_check_good db_put:$key $ret 0
+ incr count
+ }
+ close $did
+ return 0
+}
+
+proc unpopulate { db txn num } {
+ source ./include.tcl
+
+ set c [eval {$db cursor} "-txn $txn"]
+ error_check_bad $db:cursor $c NULL
+ error_check_good $db:cursor [is_substr $c $db] 1
+
+ set i 0
+ for {set d [$c get -first] } { [llength $d] != 0 } {
+ set d [$c get -next] } {
+ $c del
+ incr i
+ if { $num != 0 && $ >= $num } {
+ break
+ }
+ }
+ error_check_good cursor_close [$c close] 0
+ return 0
+}
+
+proc reset_env { env } {
+ error_check_good env_close [$env close] 0
+}
+
+proc minlocks { myenv locker_id obj_id num } {
+ return [countlocks $myenv $locker_id $obj_id $num ]
+}
+
+proc maxlocks { myenv locker_id obj_id num } {
+ return [countlocks $myenv $locker_id $obj_id $num ]
+}
+
+proc minwrites { myenv locker_id obj_id num } {
+ return [countlocks $myenv $locker_id $obj_id $num ]
+}
+
+proc countlocks { myenv locker_id obj_id num } {
+ set locklist ""
+ for { set i 0} {$i < [expr $obj_id * 4]} { incr i } {
+ set r [catch {$myenv lock_get read $locker_id \
+ [expr $obj_id * 1000 + $i]} l ]
+ if { $r != 0 } {
+ puts $l
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id [is_substr $l $myenv] 1
+ lappend locklist $l
+ }
+ }
+
+ # Now acquire a write lock
+ if { $obj_id != 1 } {
+ set r [catch {$myenv lock_get write $locker_id \
+ [expr $obj_id * 1000 + 10]} l ]
+ if { $r != 0 } {
+ puts $l
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id [is_substr $l $myenv] 1
+ lappend locklist $l
+ }
+ }
+
+ set ret [ring $myenv $locker_id $obj_id $num]
+
+ foreach l $locklist {
+ error_check_good lockput:$l [$l put] 0
+ }
+
+ return $ret
+}
+
+# This routine will let us obtain a ring of deadlocks.
+# Each locker will get a lock on obj_id, then sleep, and
+# then try to lock (obj_id + 1) % num.
+# When the lock is finally granted, we release our locks and
+# return 1 if we got both locks and DEADLOCK if we deadlocked.
+# The results here should be that 1 locker deadlocks and the
+# rest all finish successfully.
+proc ring { myenv locker_id obj_id num } {
+ source ./include.tcl
+
+ if {[catch {$myenv lock_get write $locker_id $obj_id} lock1] != 0} {
+ puts $lock1
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id [is_substr $lock1 $myenv] 1
+ }
+
+ tclsleep 30
+ set nextobj [expr ($obj_id + 1) % $num]
+ set ret 1
+ if {[catch {$myenv lock_get write $locker_id $nextobj} lock2] != 0} {
+ if {[string match "*DEADLOCK*" $lock2] == 1} {
+ set ret DEADLOCK
+ } else {
+ puts $lock2
+ set ret ERROR
+ }
+ } else {
+ error_check_good lockget:$obj_id [is_substr $lock2 $myenv] 1
+ }
+
+ # Now release the first lock
+ error_check_good lockput:$lock1 [$lock1 put] 0
+
+ if {$ret == 1} {
+ error_check_bad lockget:$obj_id $lock2 NULL
+ error_check_good lockget:$obj_id [is_substr $lock2 $myenv] 1
+ error_check_good lockput:$lock2 [$lock2 put] 0
+ }
+ return $ret
+}
+
+# This routine will create massive deadlocks.
+# Each locker will get a readlock on obj_id, then sleep, and
+# then try to upgrade the readlock to a write lock.
+# When the lock is finally granted, we release our first lock and
+# return 1 if we got both locks and DEADLOCK if we deadlocked.
+# The results here should be that 1 locker succeeds in getting all
+# the locks and everyone else deadlocks.
+proc clump { myenv locker_id obj_id num } {
+ source ./include.tcl
+
+ set obj_id 10
+ if {[catch {$myenv lock_get read $locker_id $obj_id} lock1] != 0} {
+ puts $lock1
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id \
+ [is_valid_lock $lock1 $myenv] TRUE
+ }
+
+ tclsleep 30
+ set ret 1
+ if {[catch {$myenv lock_get write $locker_id $obj_id} lock2] != 0} {
+ if {[string match "*DEADLOCK*" $lock2] == 1} {
+ set ret DEADLOCK
+ } else {
+ set ret ERROR
+ }
+ } else {
+ error_check_good \
+ lockget:$obj_id [is_valid_lock $lock2 $myenv] TRUE
+ }
+
+ # Now release the first lock
+ error_check_good lockput:$lock1 [$lock1 put] 0
+
+ if {$ret == 1} {
+ error_check_good \
+ lockget:$obj_id [is_valid_lock $lock2 $myenv] TRUE
+ error_check_good lockput:$lock2 [$lock2 put] 0
+ }
+ return $ret
+ }
+
+proc dead_check { t procs timeout dead clean other } {
+ error_check_good $t:$procs:other $other 0
+ switch $t {
+ ring {
+ # with timeouts the number of deadlocks is unpredictable
+ if { $timeout != 0 && $dead > 1 } {
+ set clean [ expr $clean + $dead - 1]
+ set dead 1
+ }
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ clump {
+ error_check_good $t:$procs:deadlocks $dead \
+ [expr $procs - 1]
+ error_check_good $t:$procs:success $clean 1
+ }
+ oldyoung {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ minlocks {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ maxlocks {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ minwrites {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ default {
+ error "Test $t not implemented"
+ }
+ }
+}
+
+proc rdebug { id op where } {
+ global recd_debug
+ global recd_id
+ global recd_op
+
+ set recd_debug $where
+ set recd_id $id
+ set recd_op $op
+}
+
+proc rtag { msg id } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { $id == $tag } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc zero_list { n } {
+ set ret ""
+ while { $n > 0 } {
+ lappend ret 0
+ incr n -1
+ }
+ return $ret
+}
+
+proc check_dump { k d } {
+ puts "key: $k data: $d"
+}
+
+proc reverse { s } {
+ set res ""
+ for { set i 0 } { $i < [string length $s] } { incr i } {
+ set res "[string index $s $i]$res"
+ }
+
+ return $res
+}
+
+#
+# This is a internal only proc. All tests should use 'is_valid_db' etc.
+#
+proc is_valid_widget { w expected } {
+ # First N characters must match "expected"
+ set l [string length $expected]
+ incr l -1
+ if { [string compare [string range $w 0 $l] $expected] != 0 } {
+ return $w
+ }
+
+ # Remaining characters must be digits
+ incr l 1
+ for { set i $l } { $i < [string length $w] } { incr i} {
+ set c [string index $w $i]
+ if { $c < "0" || $c > "9" } {
+ return $w
+ }
+ }
+
+ return TRUE
+}
+
+proc is_valid_db { db } {
+ return [is_valid_widget $db db]
+}
+
+proc is_valid_env { env } {
+ return [is_valid_widget $env env]
+}
+
+proc is_valid_cursor { dbc db } {
+ return [is_valid_widget $dbc $db.c]
+}
+
+proc is_valid_lock { lock env } {
+ return [is_valid_widget $lock $env.lock]
+}
+
+proc is_valid_logc { logc env } {
+ return [is_valid_widget $logc $env.logc]
+}
+
+proc is_valid_mpool { mpool env } {
+ return [is_valid_widget $mpool $env.mp]
+}
+
+proc is_valid_page { page mpool } {
+ return [is_valid_widget $page $mpool.pg]
+}
+
+proc is_valid_txn { txn env } {
+ return [is_valid_widget $txn $env.txn]
+}
+
+proc is_valid_mutex { m env } {
+ return [is_valid_widget $m $env.mutex]
+}
+
+proc is_valid_lock {l env} {
+ return [is_valid_widget $l $env.lock]
+}
+
+proc is_valid_locker {l } {
+ return [is_valid_widget $l ""]
+}
+
+proc send_cmd { fd cmd {sleep 2}} {
+ source ./include.tcl
+
+ puts $fd "if \[catch {set v \[$cmd\] ; puts \$v} ret\] { \
+ puts \"FAIL: \$ret\" \
+ }"
+ puts $fd "flush stdout"
+ flush $fd
+ berkdb debug_check
+ tclsleep $sleep
+
+ set r [rcv_result $fd]
+ return $r
+}
+
+proc rcv_result { fd } {
+ set r [gets $fd result]
+ error_check_bad remote_read $r -1
+
+ return $result
+}
+
+proc send_timed_cmd { fd rcv_too cmd } {
+ set c1 "set start \[timestamp -r\]; "
+ set c2 "puts \[expr \[timestamp -r\] - \$start\]"
+ set full_cmd [concat $c1 $cmd ";" $c2]
+
+ puts $fd $full_cmd
+ puts $fd "flush stdout"
+ flush $fd
+ return 0
+}
+
+#
+# The rationale behind why we have *two* "data padding" routines is outlined
+# below:
+#
+# Both pad_data and chop_data truncate data that is too long. However,
+# pad_data also adds the pad character to pad data out to the fixed length
+# record length.
+#
+# Which routine you call does not depend on the length of the data you're
+# using, but on whether you're doing a put or a get. When we do a put, we
+# have to make sure the data isn't longer than the size of a record because
+# otherwise we'll get an error (use chop_data). When we do a get, we want to
+# check that db padded everything correctly (use pad_data on the value against
+# which we are comparing).
+#
+# We don't want to just use the pad_data routine for both purposes, because
+# we want to be able to test whether or not db is padding correctly. For
+# example, the queue access method had a bug where when a record was
+# overwritten (*not* a partial put), only the first n bytes of the new entry
+# were written, n being the new entry's (unpadded) length. So, if we did
+# a put with key,value pair (1, "abcdef") and then a put (1, "z"), we'd get
+# back (1,"zbcdef"). If we had used pad_data instead of chop_data, we would
+# have gotten the "correct" result, but we wouldn't have found this bug.
+proc chop_data {method data} {
+ global fixed_len
+
+ if {[is_fixed_length $method] == 1 && \
+ [string length $data] > $fixed_len} {
+ return [eval {binary format a$fixed_len $data}]
+ } else {
+ return $data
+ }
+}
+
+proc pad_data {method data} {
+ global fixed_len
+
+ if {[is_fixed_length $method] == 1} {
+ return [eval {binary format a$fixed_len $data}]
+ } else {
+ return $data
+ }
+}
+
+proc make_fixed_length {method data {pad 0}} {
+ global fixed_len
+ global fixed_pad
+
+ if {[is_fixed_length $method] == 1} {
+ if {[string length $data] > $fixed_len } {
+ error_check_bad make_fixed_len:TOO_LONG 1 1
+ }
+ while { [string length $data] < $fixed_len } {
+ set data [format $data%c $fixed_pad]
+ }
+ }
+ return $data
+}
+
+proc make_gid {data} {
+ while { [string length $data] < 127 } {
+ set data [format ${data}0]
+ }
+ return $data
+}
+
+proc make_gid {data} {
+ while { [string length $data] < 128 } {
+ set data [format ${data}0]
+ }
+ return $data
+}
+
+# shift data for partial
+# pad with fixed pad (which is NULL)
+proc partial_shift { data offset direction} {
+ global fixed_len
+
+ set len [expr $fixed_len - 1]
+
+ if { [string compare $direction "right"] == 0 } {
+ for { set i 1} { $i <= $offset } {incr i} {
+ set data [binary format x1a$len $data]
+ }
+ } elseif { [string compare $direction "left"] == 0 } {
+ for { set i 1} { $i <= $offset } {incr i} {
+ set data [string range $data 1 end]
+ set data [binary format a$len $data]
+ }
+ }
+ return $data
+}
+
+# string compare does not always work to compare
+# this data, nor does expr (==)
+# specialized routine for comparison
+# (for use in fixed len recno and q)
+proc binary_compare { data1 data2 } {
+ if { [string length $data1] != [string length $data2] || \
+ [string compare -length \
+ [string length $data1] $data1 $data2] != 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc convert_method { method } {
+ switch -- $method {
+ -btree -
+ -dbtree -
+ dbtree -
+ -ddbtree -
+ ddbtree -
+ -rbtree -
+ BTREE -
+ DB_BTREE -
+ DB_RBTREE -
+ RBTREE -
+ bt -
+ btree -
+ db_btree -
+ db_rbtree -
+ rbt -
+ rbtree { return "-btree" }
+
+ -dhash -
+ -ddhash -
+ -hash -
+ DB_HASH -
+ HASH -
+ dhash -
+ ddhash -
+ db_hash -
+ h -
+ hash { return "-hash" }
+
+ -queue -
+ DB_QUEUE -
+ QUEUE -
+ db_queue -
+ q -
+ qam -
+ queue { return "-queue" }
+
+ -queueextent -
+ QUEUEEXTENT -
+ qe -
+ qamext -
+ -queueext -
+ queueextent -
+ queueext { return "-queue" }
+
+ -frecno -
+ -recno -
+ -rrecno -
+ DB_FRECNO -
+ DB_RECNO -
+ DB_RRECNO -
+ FRECNO -
+ RECNO -
+ RRECNO -
+ db_frecno -
+ db_recno -
+ db_rrecno -
+ frec -
+ frecno -
+ rec -
+ recno -
+ rrec -
+ rrecno { return "-recno" }
+
+ default { error "FAIL:[timestamp] $method: unknown method" }
+ }
+}
+
+proc split_encargs { largs encargsp } {
+ global encrypt
+ upvar $encargsp e
+ set eindex [lsearch $largs "-encrypta*"]
+ if { $eindex == -1 } {
+ set e ""
+ set newl $largs
+ } else {
+ set eend [expr $eindex + 1]
+ set e [lrange $largs $eindex $eend]
+ set newl [lreplace $largs $eindex $eend "-encrypt"]
+ }
+ return $newl
+}
+
+proc convert_encrypt { largs } {
+ global encrypt
+ global old_encrypt
+
+ set old_encrypt $encrypt
+ set encrypt 0
+ if { [lsearch $largs "-encrypt*"] != -1 } {
+ set encrypt 1
+ }
+}
+
+# If recno-with-renumbering or btree-with-renumbering is specified, then
+# fix the arguments to specify the DB_RENUMBER/DB_RECNUM option for the
+# -flags argument.
+proc convert_args { method {largs ""} } {
+ global fixed_len
+ global fixed_pad
+ global gen_upgrade
+ global upgrade_be
+ source ./include.tcl
+
+ if { [string first - $largs] == -1 &&\
+ [string compare $largs ""] != 0 &&\
+ [string compare $largs {{}}] != 0 } {
+ set errstring "args must contain a hyphen; does this test\
+ have no numeric args?"
+ puts "FAIL:[timestamp] $errstring (largs was $largs)"
+ return -code return
+ }
+
+ convert_encrypt $largs
+ if { $gen_upgrade == 1 && $upgrade_be == 1 } {
+ append largs " -lorder 4321 "
+ } elseif { $gen_upgrade == 1 && $upgrade_be != 1 } {
+ append largs " -lorder 1234 "
+ }
+
+ if { [is_rrecno $method] == 1 } {
+ append largs " -renumber "
+ } elseif { [is_rbtree $method] == 1 } {
+ append largs " -recnum "
+ } elseif { [is_dbtree $method] == 1 } {
+ append largs " -dup "
+ } elseif { [is_ddbtree $method] == 1 } {
+ append largs " -dup "
+ append largs " -dupsort "
+ } elseif { [is_dhash $method] == 1 } {
+ append largs " -dup "
+ } elseif { [is_ddhash $method] == 1 } {
+ append largs " -dup "
+ append largs " -dupsort "
+ } elseif { [is_queueext $method] == 1 } {
+ append largs " -extent 2 "
+ }
+
+ if {[is_fixed_length $method] == 1} {
+ append largs " -len $fixed_len -pad $fixed_pad "
+ }
+ return $largs
+}
+
+proc is_btree { method } {
+ set names { -btree BTREE DB_BTREE bt btree }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_dbtree { method } {
+ set names { -dbtree dbtree }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_ddbtree { method } {
+ set names { -ddbtree ddbtree }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_rbtree { method } {
+ set names { -rbtree rbtree RBTREE db_rbtree DB_RBTREE rbt }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_recno { method } {
+ set names { -recno DB_RECNO RECNO db_recno rec recno}
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_rrecno { method } {
+ set names { -rrecno rrecno RRECNO db_rrecno DB_RRECNO rrec }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_frecno { method } {
+ set names { -frecno frecno frec FRECNO db_frecno DB_FRECNO}
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_hash { method } {
+ set names { -hash DB_HASH HASH db_hash h hash }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_dhash { method } {
+ set names { -dhash dhash }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_ddhash { method } {
+ set names { -ddhash ddhash }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_queue { method } {
+ if { [is_queueext $method] == 1 } {
+ return 1
+ }
+
+ set names { -queue DB_QUEUE QUEUE db_queue q queue qam }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_queueext { method } {
+ set names { -queueextent queueextent QUEUEEXTENT qe qamext \
+ queueext -queueext }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_record_based { method } {
+ if { [is_recno $method] || [is_frecno $method] ||
+ [is_rrecno $method] || [is_queue $method] } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_fixed_length { method } {
+ if { [is_queue $method] || [is_frecno $method] } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+# Sort lines in file $in and write results to file $out.
+# This is a more portable alternative to execing the sort command,
+# which has assorted issues on NT [#1576].
+# The addition of a "-n" argument will sort numerically.
+proc filesort { in out { arg "" } } {
+ set i [open $in r]
+
+ set ilines {}
+ while { [gets $i line] >= 0 } {
+ lappend ilines $line
+ }
+
+ if { [string compare $arg "-n"] == 0 } {
+ set olines [lsort -integer $ilines]
+ } else {
+ set olines [lsort $ilines]
+ }
+
+ close $i
+
+ set o [open $out w]
+ foreach line $olines {
+ puts $o $line
+ }
+
+ close $o
+}
+
+# Print lines up to the nth line of infile out to outfile, inclusive.
+# The optional beg argument tells us where to start.
+proc filehead { n infile outfile { beg 0 } } {
+ set in [open $infile r]
+ set out [open $outfile w]
+
+ # Sed uses 1-based line numbers, and so we do too.
+ for { set i 1 } { $i < $beg } { incr i } {
+ if { [gets $in junk] < 0 } {
+ break
+ }
+ }
+
+ for { } { $i <= $n } { incr i } {
+ if { [gets $in line] < 0 } {
+ break
+ }
+ puts $out $line
+ }
+
+ close $in
+ close $out
+}
+
+# Remove file (this replaces $RM).
+# Usage: fileremove filenames =~ rm; fileremove -f filenames =~ rm -rf.
+proc fileremove { args } {
+ set forceflag ""
+ foreach a $args {
+ if { [string first - $a] == 0 } {
+ # It's a flag. Better be f.
+ if { [string first f $a] != 1 } {
+ return -code error "bad flag to fileremove"
+ } else {
+ set forceflag "-force"
+ }
+ } else {
+ eval {file delete $forceflag $a}
+ }
+ }
+}
+
+proc findfail { args } {
+ foreach a $args {
+ if { [file exists $a] == 0 } {
+ continue
+ }
+ set f [open $a r]
+ while { [gets $f line] >= 0 } {
+ if { [string first FAIL $line] == 0 } {
+ close $f
+ return 1
+ }
+ }
+ close $f
+ }
+ return 0
+}
+
+# Sleep for s seconds.
+proc tclsleep { s } {
+ # On Windows, the system time-of-day clock may update as much
+ # as 55 ms late due to interrupt timing. Don't take any
+ # chances; sleep extra-long so that when tclsleep 1 returns,
+ # it's guaranteed to be a new second.
+ after [expr $s * 1000 + 56]
+}
+
+# Kill a process.
+proc tclkill { id } {
+ source ./include.tcl
+
+ while { [ catch {exec $KILL -0 $id} ] == 0 } {
+ catch {exec $KILL -9 $id}
+ tclsleep 5
+ }
+}
+
+# Compare two files, a la diff. Returns 1 if non-identical, 0 if identical.
+proc filecmp { file_a file_b } {
+ set fda [open $file_a r]
+ set fdb [open $file_b r]
+
+ set nra 0
+ set nrb 0
+
+ # The gets can't be in the while condition because we'll
+ # get short-circuit evaluated.
+ while { $nra >= 0 && $nrb >= 0 } {
+ set nra [gets $fda aline]
+ set nrb [gets $fdb bline]
+
+ if { $nra != $nrb || [string compare $aline $bline] != 0} {
+ close $fda
+ close $fdb
+ return 1
+ }
+ }
+
+ close $fda
+ close $fdb
+ return 0
+}
+
+# Give two SORTED files, one of which is a complete superset of the other,
+# extract out the unique portions of the superset and put them in
+# the given outfile.
+proc fileextract { superset subset outfile } {
+ set sup [open $superset r]
+ set sub [open $subset r]
+ set outf [open $outfile w]
+
+ # The gets can't be in the while condition because we'll
+ # get short-circuit evaluated.
+ set nrp [gets $sup pline]
+ set nrb [gets $sub bline]
+ while { $nrp >= 0 } {
+ if { $nrp != $nrb || [string compare $pline $bline] != 0} {
+ puts $outf $pline
+ } else {
+ set nrb [gets $sub bline]
+ }
+ set nrp [gets $sup pline]
+ }
+
+ close $sup
+ close $sub
+ close $outf
+ return 0
+}
+
+# Verify all .db files in the specified directory.
+proc verify_dir { {directory $testdir} \
+ { pref "" } { noredo 0 } { quiet 0 } { nodump 0 } { cachesize 0 } } {
+ global encrypt
+ global passwd
+
+ # If we're doing database verification between tests, we don't
+ # want to do verification twice without an intervening cleanup--some
+ # test was skipped. Always verify by default (noredo == 0) so
+ # that explicit calls to verify_dir during tests don't require
+ # cleanup commands.
+ if { $noredo == 1 } {
+ if { [file exists $directory/NOREVERIFY] == 1 } {
+ if { $quiet == 0 } {
+ puts "Skipping verification."
+ }
+ return
+ }
+ set f [open $directory/NOREVERIFY w]
+ close $f
+ }
+
+ if { [catch {glob $directory/*.db} dbs] != 0 } {
+ # No files matched
+ return
+ }
+ if { [file exists /dev/stderr] == 1 } {
+ set errfilearg "-errfile /dev/stderr "
+ } else {
+ set errfilearg ""
+ }
+ set errpfxarg {-errpfx "FAIL: verify" }
+ set errarg $errfilearg$errpfxarg
+ set ret 0
+
+ # Open an env, so that we have a large enough cache. Pick
+ # a fairly generous default if we haven't specified something else.
+
+ if { $cachesize == 0 } {
+ set cachesize [expr 1024 * 1024]
+ }
+ set encarg ""
+ if { $encrypt != 0 } {
+ set encarg "-encryptaes $passwd"
+ }
+
+ set env [eval {berkdb_env -create -private} $encarg \
+ {-cachesize [list 0 $cachesize 0]}]
+ set earg " -env $env $errarg "
+
+ foreach db $dbs {
+ if { [catch {eval {berkdb dbverify} $earg $db} res] != 0 } {
+ puts $res
+ puts "FAIL:[timestamp] Verification of $db failed."
+ set ret 1
+ continue
+ } else {
+ error_check_good verify:$db $res 0
+ if { $quiet == 0 } {
+ puts "${pref}Verification of $db succeeded."
+ }
+ }
+
+ # Skip the dump if it's dangerous to do it.
+ if { $nodump == 0 } {
+ if { [catch {eval dumploadtest $db} res] != 0 } {
+ puts $res
+ puts "FAIL:[timestamp] Dump/load of $db failed."
+ set ret 1
+ continue
+ } else {
+ error_check_good dumpload:$db $res 0
+ if { $quiet == 0 } {
+ puts \
+ "${pref}Dump/load of $db succeeded."
+ }
+ }
+ }
+ }
+
+ error_check_good vrfyenv_close [$env close] 0
+
+ return $ret
+}
+
+# Is the database handle in $db a master database containing subdbs?
+proc check_for_subdbs { db } {
+ set stat [$db stat]
+ for { set i 0 } { [string length [lindex $stat $i]] > 0 } { incr i } {
+ set elem [lindex $stat $i]
+ if { [string compare [lindex $elem 0] Flags] == 0 } {
+ # This is the list of flags; look for
+ # "subdatabases".
+ if { [is_substr [lindex $elem 1] subdatabases] } {
+ return 1
+ }
+ }
+ }
+ return 0
+}
+
+proc dumploadtest { db {subdb ""} } {
+ global util_path
+ global encrypt
+ global passwd
+
+ set newdbname $db-dumpload.db
+
+ # Open original database, or subdb if we have one.
+ set dbarg ""
+ set utilflag ""
+ if { $encrypt != 0 } {
+ set dbarg "-encryptany $passwd"
+ set utilflag "-P $passwd"
+ }
+ set max_size [expr 15 * 1024]
+ if { [string length $subdb] == 0 } {
+ set olddb [eval {berkdb_open -rdonly} $dbarg $db]
+ error_check_good olddb($db) [is_valid_db $olddb] TRUE
+
+ if { [check_for_subdbs $olddb] } {
+ # If $db has subdatabases, dumploadtest each one
+ # separately.
+ set oc [$olddb cursor]
+ error_check_good orig_cursor($db) \
+ [is_valid_cursor $oc $olddb] TRUE
+
+ for { set dbt [$oc get -first] } \
+ { [llength $dbt] > 0 } \
+ { set dbt [$oc get -next] } {
+ set subdb [lindex [lindex $dbt 0] 0]
+
+ # Skip any files over this size. The problem is
+ # that when when we dump/load it, files that are
+ # too big result in E2BIG errors because the
+ # arguments to db_dump are too long. 64K seems
+ # to be the limit (on FreeBSD), cut it to 32K
+ # just to be safe.
+ if {[string length $subdb] < $max_size && \
+ [string length $subdb] != 0} {
+ dumploadtest $db $subdb
+ }
+ }
+ error_check_good oldcclose [$oc close] 0
+ error_check_good olddbclose [$olddb close] 0
+ return 0
+ }
+ # No subdatabase
+ set have_subdb 0
+ } else {
+ set olddb [eval {berkdb_open -rdonly} $dbarg {$db $subdb}]
+ error_check_good olddb($db) [is_valid_db $olddb] TRUE
+
+ set have_subdb 1
+ }
+
+ # Do a db_dump test. Dump/load each file.
+ if { $have_subdb } {
+ set rval [catch {eval {exec $util_path/db_dump} $utilflag -k \
+ -s {$subdb} $db | \
+ $util_path/db_load $utilflag $newdbname} res]
+ } else {
+ set rval [catch {eval {exec $util_path/db_dump} $utilflag -k \
+ $db | $util_path/db_load $utilflag $newdbname} res]
+ }
+ error_check_good db_dump/db_load($db:$res) $rval 0
+
+ # Now open new database.
+ set newdb [eval {berkdb_open -rdonly} $dbarg $newdbname]
+ error_check_good newdb($db) [is_valid_db $newdb] TRUE
+
+ # Walk through olddb and newdb and make sure their contents
+ # are identical.
+ set oc [$olddb cursor]
+ set nc [$newdb cursor]
+ error_check_good orig_cursor($db) \
+ [is_valid_cursor $oc $olddb] TRUE
+ error_check_good new_cursor($db) \
+ [is_valid_cursor $nc $newdb] TRUE
+
+ for { set odbt [$oc get -first] } { [llength $odbt] > 0 } \
+ { set odbt [$oc get -next] } {
+ set ndbt [$nc get -get_both \
+ [lindex [lindex $odbt 0] 0] [lindex [lindex $odbt 0] 1]]
+ error_check_good db_compare($db/$newdbname) $ndbt $odbt
+ }
+
+ for { set ndbt [$nc get -first] } { [llength $ndbt] > 0 } \
+ { set ndbt [$nc get -next] } {
+ set odbt [$oc get -get_both \
+ [lindex [lindex $ndbt 0] 0] [lindex [lindex $ndbt 0] 1]]
+ error_check_good db_compare_back($db) $odbt $ndbt
+ }
+
+ error_check_good orig_cursor_close($db) [$oc close] 0
+ error_check_good new_cursor_close($db) [$nc close] 0
+
+ error_check_good orig_db_close($db) [$olddb close] 0
+ error_check_good new_db_close($db) [$newdb close] 0
+
+ eval berkdb dbremove $dbarg $newdbname
+
+ return 0
+}
+
+# Generate randomly ordered, guaranteed-unique four-character strings that can
+# be used to differentiate duplicates without creating duplicate duplicates.
+# (test031 & test032) randstring_init is required before the first call to
+# randstring and initializes things for up to $i distinct strings; randstring
+# gets the next string.
+proc randstring_init { i } {
+ global rs_int_list alphabet
+
+ # Fail if we can't generate sufficient unique strings.
+ if { $i > [expr 26 * 26 * 26 * 26] } {
+ set errstring\
+ "Duplicate set too large for random string generator"
+ puts "FAIL:[timestamp] $errstring"
+ return -code return $errstring
+ }
+
+ set rs_int_list {}
+
+ # generate alphabet array
+ for { set j 0 } { $j < 26 } { incr j } {
+ set a($j) [string index $alphabet $j]
+ }
+
+ # Generate a list with $i elements, { aaaa, aaab, ... aaaz, aaba ...}
+ for { set d1 0 ; set j 0 } { $d1 < 26 && $j < $i } { incr d1 } {
+ for { set d2 0 } { $d2 < 26 && $j < $i } { incr d2 } {
+ for { set d3 0 } { $d3 < 26 && $j < $i } { incr d3 } {
+ for { set d4 0 } { $d4 < 26 && $j < $i } \
+ { incr d4 } {
+ lappend rs_int_list \
+ $a($d1)$a($d2)$a($d3)$a($d4)
+ incr j
+ }
+ }
+ }
+ }
+
+ # Randomize the list.
+ set rs_int_list [randomize_list $rs_int_list]
+}
+
+# Randomize a list. Returns a randomly-reordered copy of l.
+proc randomize_list { l } {
+ set i [llength $l]
+
+ for { set j 0 } { $j < $i } { incr j } {
+ # Pick a random element from $j to the end
+ set k [berkdb random_int $j [expr $i - 1]]
+
+ # Swap it with element $j
+ set t1 [lindex $l $j]
+ set t2 [lindex $l $k]
+
+ set l [lreplace $l $j $j $t2]
+ set l [lreplace $l $k $k $t1]
+ }
+
+ return $l
+}
+
+proc randstring {} {
+ global rs_int_list
+
+ if { [info exists rs_int_list] == 0 || [llength $rs_int_list] == 0 } {
+ set errstring "randstring uninitialized or used too often"
+ puts "FAIL:[timestamp] $errstring"
+ return -code return $errstring
+ }
+
+ set item [lindex $rs_int_list 0]
+ set rs_int_list [lreplace $rs_int_list 0 0]
+
+ return $item
+}
+
+# Takes a variable-length arg list, and returns a list containing the list of
+# the non-hyphenated-flag arguments, followed by a list of each alphanumeric
+# flag it finds.
+proc extractflags { args } {
+ set inflags 1
+ set flags {}
+ while { $inflags == 1 } {
+ set curarg [lindex $args 0]
+ if { [string first "-" $curarg] == 0 } {
+ set i 1
+ while {[string length [set f \
+ [string index $curarg $i]]] > 0 } {
+ incr i
+ if { [string compare $f "-"] == 0 } {
+ set inflags 0
+ break
+ } else {
+ lappend flags $f
+ }
+ }
+ set args [lrange $args 1 end]
+ } else {
+ set inflags 0
+ }
+ }
+ return [list $args $flags]
+}
+
+# Wrapper for berkdb open, used throughout the test suite so that we can
+# set an errfile/errpfx as appropriate.
+proc berkdb_open { args } {
+ global is_envmethod
+
+ if { [info exists is_envmethod] == 0 } {
+ set is_envmethod 0
+ }
+
+ set errargs {}
+ if { $is_envmethod == 0 && [file exists /dev/stderr] == 1 } {
+ append errargs " -errfile /dev/stderr "
+ append errargs " -errpfx \\F\\A\\I\\L"
+ }
+
+ eval {berkdb open} $errargs $args
+}
+
+# Version without errpfx/errfile, used when we're expecting a failure.
+proc berkdb_open_noerr { args } {
+ eval {berkdb open} $args
+}
+
+# Wrapper for berkdb env, used throughout the test suite so that we can
+# set an errfile/errpfx as appropriate.
+proc berkdb_env { args } {
+ global is_envmethod
+
+ if { [info exists is_envmethod] == 0 } {
+ set is_envmethod 0
+ }
+
+ set errargs {}
+ if { $is_envmethod == 0 && [file exists /dev/stderr] == 1 } {
+ append errargs " -errfile /dev/stderr "
+ append errargs " -errpfx \\F\\A\\I\\L"
+ }
+
+ eval {berkdb env} $errargs $args
+}
+
+# Version without errpfx/errfile, used when we're expecting a failure.
+proc berkdb_env_noerr { args } {
+ eval {berkdb env} $args
+}
+
+proc check_handles { {outf stdout} } {
+ global ohandles
+
+ set handles [berkdb handles]
+ if {[llength $handles] != [llength $ohandles]} {
+ puts $outf "WARNING: Open handles during cleanup: $handles"
+ }
+ set ohandles $handles
+}
+
+proc open_handles { } {
+ return [llength [berkdb handles]]
+}
+
+proc move_file_extent { dir dbfile tag op } {
+ set curfiles [get_extfiles $dir $dbfile ""]
+ set tagfiles [get_extfiles $dir $dbfile $tag]
+ #
+ # We want to copy or rename only those that have been saved,
+ # so delete all the current extent files so that we don't
+ # end up with extra ones we didn't restore from our saved ones.
+ foreach extfile $curfiles {
+ file delete -force $extfile
+ }
+ foreach extfile $tagfiles {
+ set i [string last "." $extfile]
+ incr i
+ set extnum [string range $extfile $i end]
+ set dbq [make_ext_filename $dir $dbfile $extnum]
+ #
+ # We can either copy or rename
+ #
+ file $op -force $extfile $dbq
+ }
+}
+
+proc copy_extent_file { dir dbfile tag { op copy } } {
+ set files [get_extfiles $dir $dbfile ""]
+ foreach extfile $files {
+ set i [string last "." $extfile]
+ incr i
+ set extnum [string range $extfile $i end]
+ file $op -force $extfile $dir/__dbq.$dbfile.$tag.$extnum
+ }
+}
+
+proc get_extfiles { dir dbfile tag } {
+ if { $tag == "" } {
+ set filepat $dir/__dbq.$dbfile.\[0-9\]*
+ } else {
+ set filepat $dir/__dbq.$dbfile.$tag.\[0-9\]*
+ }
+ return [glob -nocomplain -- $filepat]
+}
+
+proc make_ext_filename { dir dbfile extnum } {
+ return $dir/__dbq.$dbfile.$extnum
+}
+
+# All pids for Windows 9X are negative values. When we want to have
+# unsigned int values, unique to the process, we'll take the absolute
+# value of the pid. This avoids unsigned/signed mistakes, yet
+# guarantees uniqueness, since each system has pids that are all
+# either positive or negative.
+#
+proc sanitized_pid { } {
+ set mypid [pid]
+ if { $mypid < 0 } {
+ set mypid [expr - $mypid]
+ }
+ puts "PID: [pid] $mypid\n"
+ return $mypid
+}
+
+#
+# Extract the page size field from a stat record. Return -1 if
+# none is found.
+#
+proc get_pagesize { stat } {
+ foreach field $stat {
+ set title [lindex $field 0]
+ if {[string compare $title "Page size"] == 0} {
+ return [lindex $field 1]
+ }
+ }
+ return -1
+}
+
+# Get a globbed list of source files and executables to use as large
+# data items in overflow page tests.
+proc get_file_list { {small 0} } {
+ global is_windows_test
+ global is_qnx_test
+ global src_root
+
+ if { $is_qnx_test } {
+ set small 1
+ }
+ if { $small && $is_windows_test } {
+ return [glob $src_root/*/*.c */env*.obj]
+ } elseif { $small } {
+ return [glob $src_root/*/*.c ./env*.o]
+ } elseif { $is_windows_test } {
+ return \
+ [glob $src_root/*/*.c */*.obj */libdb??.dll */libdb??d.dll]
+ } else {
+ return [glob $src_root/*/*.c ./*.o ./.libs/libdb-?.?.s?]
+ }
+}
+
+proc is_cdbenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -cdb] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_lockenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -lock] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_logenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -log] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_mpoolenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -mpool] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_rpcenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -rpc] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_secenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -crypto] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_txnenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -txn] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc get_home { env } {
+ set sys [$env attributes]
+ set h [lsearch $sys -home]
+ if { $h == -1 } {
+ return NULL
+ }
+ incr h
+ return [lindex $sys $h]
+}
+
+proc reduce_dups { nent ndp } {
+ upvar $nent nentries
+ upvar $ndp ndups
+
+ # If we are using a txnenv, assume it is using
+ # the default maximum number of locks, cut back
+ # so that we don't run out of locks. Reduce
+ # by 25% until we fit.
+ #
+ while { [expr $nentries * $ndups] > 5000 } {
+ set nentries [expr ($nentries / 4) * 3]
+ set ndups [expr ($ndups / 4) * 3]
+ }
+}
+
+proc getstats { statlist field } {
+ foreach pair $statlist {
+ set txt [lindex $pair 0]
+ if { [string equal $txt $field] == 1 } {
+ return [lindex $pair 1]
+ }
+ }
+ return -1
+}
+
+proc big_endian { } {
+ global tcl_platform
+ set e $tcl_platform(byteOrder)
+ if { [string compare $e littleEndian] == 0 } {
+ return 0
+ } elseif { [string compare $e bigEndian] == 0 } {
+ return 1
+ } else {
+ error "FAIL: Unknown endianness $e"
+ }
+}
diff --git a/libdb/test/txn001.tcl b/libdb/test/txn001.tcl
new file mode 100644
index 0000000..1eb544d
--- /dev/null
+++ b/libdb/test/txn001.tcl
@@ -0,0 +1,116 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+
+# TEST txn001
+# TEST Begin, commit, abort testing.
+proc txn001 { {tnum "01"} { max 1024 } { ntxns 50 } } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ puts -nonewline "Txn0$tnum: Basic begin, commit, abort"
+
+ if { $tnum != "01"} {
+ puts " (with ID wrap)"
+ } else {
+ puts ""
+ }
+
+ # Open environment
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -create -mode 0644 -txn \
+ -txn_max $max -home $testdir}]
+ error_check_good evn_open [is_valid_env $env] TRUE
+ error_check_good txn_id_set \
+ [ $env txn_id_set $txn_curid $txn_maxid ] 0
+ txn001_suba $ntxns $env $tnum
+ txn001_subb $ntxns $env $tnum
+ txn001_subc $ntxns $env $tnum
+ # Close and unlink the file
+ error_check_good env_close:$env [$env close] 0
+}
+
+proc txn001_suba { ntxns env tnum } {
+ source ./include.tcl
+
+ # We will create a bunch of transactions and commit them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn0$tnum.a: Beginning/Committing $ntxns Transactions in $env"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now commit them all
+ foreach t $txn_list {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+}
+
+proc txn001_subb { ntxns env tnum } {
+ # We will create a bunch of transactions and abort them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn0$tnum.b: Beginning/Aborting Transactions"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now abort them all
+ foreach t $txn_list {
+ error_check_good txn_abort:$t [$t abort] 0
+ }
+}
+
+proc txn001_subc { ntxns env tnum } {
+ # We will create a bunch of transactions and commit them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn0$tnum.c: Beginning/Prepare/Committing Transactions"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now prepare them all
+ foreach t $txn_list {
+ error_check_good txn_prepare:$t \
+ [$t prepare [make_gid global:$t]] 0
+ }
+
+ # Now commit them all
+ foreach t $txn_list {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+
+}
+
diff --git a/libdb/test/txn002.tcl b/libdb/test/txn002.tcl
new file mode 100644
index 0000000..b2db660
--- /dev/null
+++ b/libdb/test/txn002.tcl
@@ -0,0 +1,91 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+
+# TEST txn002
+# TEST Verify that read-only transactions do not write log records.
+proc txn002 { {tnum "02" } { max 1024 } { ntxns 50 } } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ puts -nonewline "Txn0$tnum: Read-only transaction test ($max) ($ntxns)"
+
+ if { $tnum != "02" } {
+ puts " (with ID wrap)"
+ } else {
+ puts ""
+ }
+
+ env_cleanup $testdir
+ set env [berkdb \
+ env -create -mode 0644 -txn -txn_max $max -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ error_check_good txn_id_set \
+ [$env txn_id_set $txn_curid $txn_maxid ] 0
+
+ # Save the current bytes in the log.
+ set off_start [txn002_logoff $env]
+
+ # We will create a bunch of transactions and commit them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn0$tnum.a: Beginning/Committing Transactions"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+ foreach t $txn_list {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+
+ # Make sure we haven't written any new log records except
+ # potentially some recycle records if we were wrapping txnids.
+ set off_stop [txn002_logoff $env]
+ if { $off_stop != $off_start } {
+ txn002_recycle_only $testdir
+ }
+
+ error_check_good env_close [$env close] 0
+}
+
+proc txn002_logoff { env } {
+ set stat [$env log_stat]
+ foreach i $stat {
+ foreach {txt val} $i {break}
+ if { [string compare \
+ $txt {Current log file offset}] == 0 } {
+ return $val
+ }
+ }
+}
+
+# Make sure that the only log records found are txn_recycle records
+proc txn002_recycle_only { dir } {
+ global util_path
+
+ set tmpfile $dir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $dir > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+
+ set f [open $tmpfile r]
+ while { [gets $f record] >= 0 } {
+ set r [regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name]
+ if { $r == 1 } {
+ error_check_good record_type __txn_recycle $name
+ }
+ }
+ close $f
+ fileremove $tmpfile
+}
diff --git a/libdb/test/txn003.tcl b/libdb/test/txn003.tcl
new file mode 100644
index 0000000..9ac654a
--- /dev/null
+++ b/libdb/test/txn003.tcl
@@ -0,0 +1,238 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+
+# TEST txn003
+# TEST Test abort/commit/prepare of txns with outstanding child txns.
+proc txn003 { {tnum "03"} } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ puts -nonewline "Txn0$tnum: Outstanding child transaction test"
+
+ if { $tnum != "03" } {
+ puts " (with ID wrap)"
+ } else {
+ puts ""
+ }
+ env_cleanup $testdir
+ set testfile txn003.db
+
+ set env_cmd "berkdb_env_noerr -create -txn -home $testdir"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ error_check_good txn_id_set \
+ [$env txn_id_set $txn_curid $txn_maxid] 0
+
+ set oflags {-auto_commit -create -btree -mode 0644 -env $env $testfile}
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ #
+ # Put some data so that we can check commit or abort of child
+ #
+ set key 1
+ set origdata some_data
+ set newdata this_is_new_data
+ set newdata2 some_other_new_data
+
+ error_check_good db_put [$db put -auto_commit $key $origdata] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ txn003_check $db $key "Origdata" $origdata
+
+ puts "\tTxn0$tnum.a: Parent abort"
+ set parent [$env txn]
+ error_check_good txn_begin [is_valid_txn $parent $env] TRUE
+ set child [$env txn -parent $parent]
+ error_check_good txn_begin [is_valid_txn $child $env] TRUE
+ error_check_good db_put [$db put -txn $child $key $newdata] 0
+ error_check_good parent_abort [$parent abort] 0
+ txn003_check $db $key "parent_abort" $origdata
+ # Check child handle is invalid
+ set stat [catch {$child abort} ret]
+ error_check_good child_handle $stat 1
+ error_check_good child_h2 [is_substr $ret "invalid command name"] 1
+
+ puts "\tTxn0$tnum.b: Parent commit"
+ set parent [$env txn]
+ error_check_good txn_begin [is_valid_txn $parent $env] TRUE
+ set child [$env txn -parent $parent]
+ error_check_good txn_begin [is_valid_txn $child $env] TRUE
+ error_check_good db_put [$db put -txn $child $key $newdata] 0
+ error_check_good parent_commit [$parent commit] 0
+ txn003_check $db $key "parent_commit" $newdata
+ # Check child handle is invalid
+ set stat [catch {$child abort} ret]
+ error_check_good child_handle $stat 1
+ error_check_good child_h2 [is_substr $ret "invalid command name"] 1
+ error_check_good dbclose [$db close] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # Since the data check assumes what has come before, the 'commit'
+ # operation must be last.
+ #
+ set hdr "\tTxn0$tnum"
+ set rlist {
+ {begin ".c"}
+ {prepare ".d"}
+ {abort ".e"}
+ {commit ".f"}
+ }
+ set count 0
+ foreach pair $rlist {
+ incr count
+ set op [lindex $pair 0]
+ set msg [lindex $pair 1]
+ set msg $hdr$msg
+ txn003_body $env_cmd $testfile $testdir $key $newdata2 $msg $op
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ berkdb debug_check
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ #
+ # For prepare we'll then just
+ # end up aborting after we test what we need to.
+ # So set gooddata to the same as abort.
+ switch $op {
+ abort {
+ set gooddata $newdata
+ }
+ begin {
+ set gooddata $newdata
+ }
+ commit {
+ set gooddata $newdata2
+ }
+ prepare {
+ set gooddata $newdata
+ }
+ }
+ txn003_check $db $key "parent_$op" $gooddata
+ error_check_good dbclose [$db close] 0
+ error_check_good env_close [$env close] 0
+ }
+
+ # We can't do the attempted child discard on Windows
+ # because it will leave open files that can't be removed.
+ # Skip the remainder of the test for Windows.
+ if { $is_windows_test == 1 } {
+ puts "Skipping remainder of test for Windows"
+ return
+ }
+ puts "\tTxn0$tnum.g: Attempt child prepare"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ berkdb debug_check
+ set db [eval {berkdb_open_noerr} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set parent [$env txn]
+ error_check_good txn_begin [is_valid_txn $parent $env] TRUE
+ set child [$env txn -parent $parent]
+ error_check_good txn_begin [is_valid_txn $child $env] TRUE
+ error_check_good db_put [$db put -txn $child $key $newdata] 0
+ set gid [make_gid child_prepare:$child]
+ set stat [catch {$child prepare $gid} ret]
+ error_check_good child_prepare $stat 1
+ error_check_good child_prep_err [is_substr $ret "txn prepare"] 1
+
+ puts "\tTxn0$tnum.h: Attempt child discard"
+ set stat [catch {$child discard} ret]
+ error_check_good child_discard $stat 1
+
+ # We just panic'd the region, so the next operations will fail.
+ # No matter, we still have to clean up all the handles.
+
+ set stat [catch {$parent commit} ret]
+ error_check_good parent_commit $stat 1
+ error_check_good parent_commit:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ set stat [catch {$db close} ret]
+ error_check_good db_close $stat 1
+ error_check_good db_close:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ set stat [catch {$env close} ret]
+ error_check_good env_close $stat 1
+ error_check_good env_close:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+}
+
+proc txn003_body { env_cmd testfile dir key newdata2 msg op } {
+ source ./include.tcl
+
+ berkdb debug_check
+ sentinel_init
+ set gidf $dir/gidfile
+ fileremove -f $gidf
+ set pidlist {}
+ puts "$msg.0: Executing child script to prepare txns"
+ berkdb debug_check
+ set p [exec $tclsh_path $test_path/wrap.tcl txnscript.tcl \
+ $testdir/txnout $env_cmd $testfile $gidf $key $newdata2 &]
+ lappend pidlist $p
+ watch_procs $pidlist 5
+ set f1 [open $testdir/txnout r]
+ set r [read $f1]
+ puts $r
+ close $f1
+ fileremove -f $testdir/txnout
+
+ berkdb debug_check
+ puts -nonewline "$msg.1: Running recovery ... "
+ flush stdout
+ berkdb debug_check
+ set env [eval $env_cmd "-recover"]
+ error_check_good dbenv-recover [is_valid_env $env] TRUE
+ puts "complete"
+
+ puts "$msg.2: getting txns from txn_recover"
+ set txnlist [$env txn_recover]
+ error_check_good txnlist_len [llength $txnlist] 1
+ set tpair [lindex $txnlist 0]
+
+ set gfd [open $gidf r]
+ set ret [gets $gfd parentgid]
+ close $gfd
+ set txn [lindex $tpair 0]
+ set gid [lindex $tpair 1]
+ if { $op == "begin" } {
+ puts "$msg.2: $op new txn"
+ } else {
+ puts "$msg.2: $op parent"
+ }
+ error_check_good gidcompare $gid $parentgid
+ if { $op == "prepare" } {
+ set gid [make_gid prepare_recover:$txn]
+ set stat [catch {$txn $op $gid} ret]
+ error_check_good prep_error $stat 1
+ error_check_good prep_err \
+ [is_substr $ret "transaction already prepared"] 1
+ error_check_good txn:prep_abort [$txn abort] 0
+ } elseif { $op == "begin" } {
+ set stat [catch {$env txn} ret]
+ error_check_good begin_error $stat 1
+ error_check_good begin_err \
+ [is_substr $ret "not yet committed transactions is incomplete"] 1
+ error_check_good txn:prep_abort [$txn abort] 0
+ } else {
+ error_check_good txn:$op [$txn $op] 0
+ }
+ error_check_good envclose [$env close] 0
+}
+
+proc txn003_check { db key msg gooddata } {
+ set kd [$db get $key]
+ set data [lindex [lindex $kd 0] 1]
+ error_check_good $msg $data $gooddata
+}
diff --git a/libdb/test/txn004.tcl b/libdb/test/txn004.tcl
new file mode 100644
index 0000000..79c967d
--- /dev/null
+++ b/libdb/test/txn004.tcl
@@ -0,0 +1,62 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+
+# TEST txn004
+# TEST Test of wraparound txnids (txn001)
+proc txn004 { } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ set orig_curid $txn_curid
+ set orig_maxid $txn_maxid
+ puts "\tTxn004.1: wraparound txnids"
+ set txn_curid [expr $txn_maxid - 2]
+ txn001 "04.1"
+ puts "\tTxn004.2: closer wraparound txnids"
+ set txn_curid [expr $txn_maxid - 3]
+ set txn_maxid [expr $txn_maxid - 2]
+ txn001 "04.2"
+
+ puts "\tTxn004.3: test wraparound txnids"
+ txn_idwrap_check $testdir
+ set txn_curid $orig_curid
+ set txn_maxid $orig_maxid
+ return
+}
+
+proc txn_idwrap_check { testdir } {
+ global txn_curid
+ global txn_maxid
+
+ env_cleanup $testdir
+
+ # Open/create the txn region
+ set e [berkdb_env -create -txn -home $testdir]
+ error_check_good env_open [is_substr $e env] 1
+
+ set txn1 [$e txn]
+ error_check_good txn1 [is_valid_txn $txn1 $e] TRUE
+ error_check_good txn_id_set \
+ [$e txn_id_set [expr $txn_maxid - 1] $txn_maxid] 0
+
+ set txn2 [$e txn]
+ error_check_good txn2 [is_valid_txn $txn2 $e] TRUE
+
+ # txn3 will require a wraparound txnid
+ # XXX How can we test it has a wrapped id?
+ set txn3 [$e txn]
+ error_check_good wrap_txn3 [is_valid_txn $txn3 $e] TRUE
+
+ error_check_good free_txn1 [$txn1 commit] 0
+ error_check_good free_txn2 [$txn2 commit] 0
+ error_check_good free_txn3 [$txn3 commit] 0
+
+ error_check_good close [$e close] 0
+}
+
diff --git a/libdb/test/txn005.tcl b/libdb/test/txn005.tcl
new file mode 100644
index 0000000..af010a6
--- /dev/null
+++ b/libdb/test/txn005.tcl
@@ -0,0 +1,75 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+
+# TEST txn005
+# TEST Test transaction ID wraparound and recovery.
+proc txn005 {} {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ env_cleanup $testdir
+ puts "Txn005: Test transaction wraparound recovery"
+
+ # Open/create the txn region
+ puts "\tTxn005.a: Create environment"
+ set e [berkdb_env -create -txn -home $testdir]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ set txn1 [$e txn]
+ error_check_good txn1 [is_valid_txn $txn1 $e] TRUE
+
+ set db [berkdb_open -env $e -txn $txn1 -create -btree txn005.db]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good txn1_commit [$txn1 commit] 0
+
+ puts "\tTxn005.b: Set txn ids"
+ error_check_good txn_id_set \
+ [$e txn_id_set [expr $txn_maxid - 1] $txn_maxid] 0
+
+ # txn2 and txn3 will require a wraparound txnid
+ set txn2 [$e txn]
+ error_check_good txn2 [is_valid_txn $txn2 $e] TRUE
+
+ error_check_good put [$db put -txn $txn2 "a" ""] 0
+ error_check_good txn2_commit [$txn2 commit] 0
+
+ error_check_good get_a [$db get "a"] "{a {}}"
+
+ error_check_good close [$db close] 0
+
+ set txn3 [$e txn]
+ error_check_good txn3 [is_valid_txn $txn3 $e] TRUE
+
+ set db [berkdb_open -env $e -txn $txn3 -btree txn005.db]
+ error_check_good db [is_valid_db $db] TRUE
+
+ error_check_good put2 [$db put -txn $txn3 "b" ""] 0
+ error_check_good sync [$db sync] 0
+ error_check_good txn3_abort [$txn3 abort] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good eclose [$e close] 0
+
+ puts "\tTxn005.c: Run recovery"
+ set stat [catch {exec $util_path/db_recover -h $testdir -e -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+
+ puts "\tTxn005.d: Check data"
+ set e [berkdb_env -txn -home $testdir]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ set db [berkdb_open -env $e -auto_commit -btree txn005.db]
+ error_check_good db [is_valid_db $db] TRUE
+
+ error_check_good get_a [$db get "a"] "{a {}}"
+ error_check_bad get_b [$db get "b"] "{b {}}"
+ error_check_good dbclose [$db close] 0
+ error_check_good eclose [$e close] 0
+}
diff --git a/libdb/test/txn006.tcl b/libdb/test/txn006.tcl
new file mode 100644
index 0000000..9fc0362
--- /dev/null
+++ b/libdb/test/txn006.tcl
@@ -0,0 +1,47 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+#
+#TEST txn006
+#TEST Test dump/load in transactional environment.
+proc txn006 { { iter 50 } } {
+ source ./include.tcl
+ set testfile txn006.db
+
+ puts "Txn006: Test dump/load in transaction environment"
+ env_cleanup $testdir
+
+ puts "\tTxn006.a: Create environment and database"
+ # Open/create the txn region
+ set e [berkdb_env -create -home $testdir -txn]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ # Open/create database
+ set db [berkdb_open -auto_commit -env $e \
+ -create -btree -dup $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Start a transaction
+ set txn [$e txn]
+ error_check_good txn [is_valid_txn $txn $e] TRUE
+
+ puts "\tTxn006.b: Put data"
+ # Put some data
+ for { set i 1 } { $i < $iter } { incr i } {
+ error_check_good put [$db put -txn $txn key$i data$i] 0
+ }
+
+ # End transaction, close db
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$e close] 0
+
+ puts "\tTxn006.c: dump/load"
+ # Dump and load
+ exec $util_path/db_dump -p -h $testdir $testfile | \
+ $util_path/db_load -h $testdir $testfile
+}
diff --git a/libdb/test/txn007.tcl b/libdb/test/txn007.tcl
new file mode 100644
index 0000000..98924cf
--- /dev/null
+++ b/libdb/test/txn007.tcl
@@ -0,0 +1,57 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+#TEST txn007
+#TEST Test of DB_TXN_WRITE_NOSYNC
+proc txn007 { { iter 50 } } {
+ source ./include.tcl
+ set testfile txn007.db
+
+ puts "Txn007: DB_TXN_WRITE_NOSYNC"
+ env_cleanup $testdir
+
+ # Open/create the txn region
+ puts "\tTxn007.a: Create env and database with -wrnosync"
+ set e [berkdb_env -create -home $testdir -txn -wrnosync]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ # Open/create database
+ set db [berkdb open -auto_commit -env $e \
+ -create -btree -dup $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Put some data
+ puts "\tTxn007.b: Put $iter data items in individual transactions"
+ for { set i 1 } { $i < $iter } { incr i } {
+ # Start a transaction
+ set txn [$e txn]
+ error_check_good txn [is_valid_txn $txn $e] TRUE
+ $db put -txn $txn key$i data$i
+ error_check_good txn_commit [$txn commit] 0
+ }
+ set stat [$e log_stat]
+ puts "\tTxn007.c: Check log stats"
+ foreach i $stat {
+ set txt [lindex $i 0]
+ if { [string equal $txt {Times log written}] == 1 } {
+ set wrval [lindex $i 1]
+ }
+ if { [string equal $txt {Times log flushed}] == 1 } {
+ set syncval [lindex $i 1]
+ }
+ }
+ error_check_good wrval [expr $wrval >= $iter] 1
+ #
+ # We should have written at least 'iter' number of times,
+ # but not synced on any of those.
+ #
+ set val [expr $wrval - $iter]
+ error_check_good syncval [expr $syncval <= $val] 1
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$e close] 0
+}
diff --git a/libdb/test/txn008.tcl b/libdb/test/txn008.tcl
new file mode 100644
index 0000000..0484ad4
--- /dev/null
+++ b/libdb/test/txn008.tcl
@@ -0,0 +1,32 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+
+# TEST txn008
+# TEST Test of wraparound txnids (txn002)
+proc txn008 { } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ set orig_curid $txn_curid
+ set orig_maxid $txn_maxid
+ puts "\tTxn008.1: wraparound txnids"
+ set txn_curid [expr $txn_maxid - 2]
+ txn002 "08.1"
+ puts "\tTxn008.2: closer wraparound txnids"
+ set txn_curid [expr $txn_maxid - 3]
+ set txn_maxid [expr $txn_maxid - 2]
+ txn002 "08.2"
+
+ puts "\tTxn008.3: test wraparound txnids"
+ txn_idwrap_check $testdir
+ set txn_curid $orig_curid
+ set txn_maxid $orig_maxid
+ return
+}
+
diff --git a/libdb/test/txn009.tcl b/libdb/test/txn009.tcl
new file mode 100644
index 0000000..98a9614
--- /dev/null
+++ b/libdb/test/txn009.tcl
@@ -0,0 +1,32 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+
+# TEST txn009
+# TEST Test of wraparound txnids (txn003)
+proc txn009 { } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ set orig_curid $txn_curid
+ set orig_maxid $txn_maxid
+ puts "\tTxn009.1: wraparound txnids"
+ set txn_curid [expr $txn_maxid - 2]
+ txn003 "09.1"
+ puts "\tTxn009.2: closer wraparound txnids"
+ set txn_curid [expr $txn_maxid - 3]
+ set txn_maxid [expr $txn_maxid - 2]
+ txn003 "09.2"
+
+ puts "\tTxn009.3: test wraparound txnids"
+ txn_idwrap_check $testdir
+ set txn_curid $orig_curid
+ set txn_maxid $orig_maxid
+ return
+}
+
diff --git a/libdb/test/txnscript.tcl b/libdb/test/txnscript.tcl
new file mode 100644
index 0000000..1ddda3c
--- /dev/null
+++ b/libdb/test/txnscript.tcl
@@ -0,0 +1,67 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Txn003 script - outstanding child prepare script
+# Usage: txnscript envcmd dbcmd gidf key data
+# envcmd: command to open env
+# dbfile: name of database file
+# gidf: name of global id file
+# key: key to use
+# data: new data to use
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "txnscript envcmd dbfile gidfile key data"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set envcmd [ lindex $argv 0 ]
+set dbfile [ lindex $argv 1 ]
+set gidfile [ lindex $argv 2 ]
+set key [ lindex $argv 3 ]
+set data [ lindex $argv 4 ]
+
+set dbenv [eval $envcmd]
+error_check_good envopen [is_valid_env $dbenv] TRUE
+
+set usedb 1
+set db [berkdb_open -auto_commit -env $dbenv $dbfile]
+error_check_good dbopen [is_valid_db $db] TRUE
+
+puts "\tTxnscript.a: begin parent and child txn"
+set parent [$dbenv txn]
+error_check_good parent [is_valid_txn $parent $dbenv] TRUE
+set child [$dbenv txn -parent $parent]
+error_check_good parent [is_valid_txn $child $dbenv] TRUE
+
+puts "\tTxnscript.b: Modify data"
+error_check_good db_put [$db put -txn $child $key $data] 0
+
+set gfd [open $gidfile w+]
+set gid [make_gid txnscript:$parent]
+puts $gfd $gid
+puts "\tTxnscript.c: Prepare parent only"
+error_check_good txn_prepare:$parent [$parent prepare $gid] 0
+close $gfd
+
+puts "\tTxnscript.d: Check child handle"
+set stat [catch {$child abort} ret]
+error_check_good child_handle $stat 1
+error_check_good child_h2 [is_substr $ret "invalid command name"] 1
+
+#
+# We do not close the db or env, but exit with the txns outstanding.
+#
+puts "\tTxnscript completed successfully"
+flush stdout
diff --git a/libdb/test/update.tcl b/libdb/test/update.tcl
new file mode 100644
index 0000000..77b124f
--- /dev/null
+++ b/libdb/test/update.tcl
@@ -0,0 +1,93 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+
+source ./include.tcl
+global update_dir
+set update_dir "$test_path/update_test"
+
+proc update { } {
+ source ./include.tcl
+ global update_dir
+
+ foreach version [glob $update_dir/*] {
+ regexp \[^\/\]*$ $version version
+ foreach method [glob $update_dir/$version/*] {
+ regexp \[^\/\]*$ $method method
+ foreach file [glob $update_dir/$version/$method/*] {
+ regexp (\[^\/\]*)\.tar\.gz$ $file dummy name
+ foreach endianness {"le" "be"} {
+ puts "Update:\
+ $version $method $name $endianness"
+ set ret [catch {_update $update_dir $testdir $version $method $name $endianness 1 1} message]
+ if { $ret != 0 } {
+ puts $message
+ }
+ }
+ }
+ }
+ }
+}
+
+proc _update { source_dir temp_dir \
+ version method file endianness do_db_load_test do_update_test } {
+ source include.tcl
+ global errorInfo
+
+ cleanup $temp_dir NULL
+
+ exec sh -c \
+"gzcat $source_dir/$version/$method/$file.tar.gz | (cd $temp_dir && tar xf -)"
+
+ if { $do_db_load_test } {
+ set ret [catch \
+ {exec $util_path/db_load -f "$temp_dir/$file.dump" \
+ "$temp_dir/update.db"} message]
+ error_check_good \
+ "Update load: $version $method $file $message" $ret 0
+
+ set ret [catch \
+ {exec $util_path/db_dump -f "$temp_dir/update.dump" \
+ "$temp_dir/update.db"} message]
+ error_check_good \
+ "Update dump: $version $method $file $message" $ret 0
+
+ error_check_good "Update diff.1.1: $version $method $file" \
+ [filecmp "$temp_dir/$file.dump" "$temp_dir/update.dump"] 0
+ error_check_good \
+ "Update diff.1.2: $version $method $file" $ret ""
+ }
+
+ if { $do_update_test } {
+ set ret [catch \
+ {berkdb open -update "$temp_dir/$file-$endianness.db"} db]
+ if { $ret == 1 } {
+ if { ![is_substr $errorInfo "version upgrade"] } {
+ set fnl [string first "\n" $errorInfo]
+ set theError \
+ [string range $errorInfo 0 [expr $fnl - 1]]
+ error $theError
+ }
+ } else {
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ set ret [catch \
+ {exec $util_path/db_dump -f \
+ "$temp_dir/update.dump" \
+ "$temp_dir/$file-$endianness.db"} message]
+ error_check_good "Update\
+ dump: $version $method $file $message" $ret 0
+
+ error_check_good \
+ "Update diff.2: $version $method $file" \
+ [filecmp "$temp_dir/$file.dump" \
+ "$temp_dir/update.dump"] 0
+ error_check_good \
+ "Update diff.2: $version $method $file" $ret ""
+ }
+ }
+}
diff --git a/libdb/test/upgrade.tcl b/libdb/test/upgrade.tcl
new file mode 100644
index 0000000..c175093
--- /dev/null
+++ b/libdb/test/upgrade.tcl
@@ -0,0 +1,294 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+
+source ./include.tcl
+
+global upgrade_dir
+# set upgrade_dir "$test_path/upgrade_test"
+set upgrade_dir "$test_path/upgrade/databases"
+
+global gen_upgrade
+set gen_upgrade 0
+
+global upgrade_dir
+global upgrade_be
+global upgrade_method
+global upgrade_name
+
+proc upgrade { { archived_test_loc "DEFAULT" } } {
+ source ./include.tcl
+ global upgrade_dir
+
+ set saved_upgrade_dir $upgrade_dir
+
+ puts -nonewline "Upgrade test: "
+ if { $archived_test_loc == "DEFAULT" } {
+ puts "using default archived databases in $upgrade_dir."
+ } else {
+ set upgrade_dir $archived_test_loc
+ puts "using archived databases in $upgrade_dir."
+ }
+
+ foreach version [glob $upgrade_dir/*] {
+ if { [string first CVS $version] != -1 } { continue }
+ regexp \[^\/\]*$ $version version
+ foreach method [glob $upgrade_dir/$version/*] {
+ regexp \[^\/\]*$ $method method
+ foreach file [glob $upgrade_dir/$version/$method/*] {
+ regexp (\[^\/\]*)\.tar\.gz$ $file dummy name
+
+ cleanup $testdir NULL 1
+ #puts "$upgrade_dir/$version/$method/$name.tar.gz"
+ set curdir [pwd]
+ cd $testdir
+ set tarfd [open "|tar xf -" w]
+ cd $curdir
+
+ catch {exec gunzip -c "$upgrade_dir/$version/$method/$name.tar.gz" >@$tarfd}
+ close $tarfd
+
+ set f [open $testdir/$name.tcldump {RDWR CREAT}]
+ close $f
+
+ # It may seem suboptimal to exec a separate
+ # tclsh for each subtest, but this is
+ # necessary to keep the testing process
+ # from consuming a tremendous amount of
+ # memory.
+ if { [file exists $testdir/$name-le.db] } {
+ set ret [catch {exec $tclsh_path\
+ << "source $test_path/test.tcl;\
+ _upgrade_test $testdir $version\
+ $method\
+ $name le"} message]
+ puts $message
+ if { $ret != 0 } {
+ #exit
+ }
+ }
+
+ if { [file exists $testdir/$name-be.db] } {
+ set ret [catch {exec $tclsh_path\
+ << "source $test_path/test.tcl;\
+ _upgrade_test $testdir $version\
+ $method\
+ $name be"} message]
+ puts $message
+ if { $ret != 0 } {
+ #exit
+ }
+ }
+
+ set ret [catch {exec $tclsh_path\
+ << "source $test_path/test.tcl;\
+ _db_load_test $testdir $version $method\
+ $name"} message]
+ puts $message
+ if { $ret != 0 } {
+ #exit
+ }
+
+ }
+ }
+ }
+ set upgrade_dir $saved_upgrade_dir
+
+ # Don't provide a return value.
+ return
+}
+
+proc _upgrade_test { temp_dir version method file endianness } {
+ source include.tcl
+ global errorInfo
+
+ puts "Upgrade: $version $method $file $endianness"
+
+ set ret [berkdb upgrade "$temp_dir/$file-$endianness.db"]
+ error_check_good dbupgrade $ret 0
+
+ error_check_good dbupgrade_verify [verify_dir $temp_dir "" 0 0 1] 0
+
+ upgrade_dump "$temp_dir/$file-$endianness.db" "$temp_dir/temp.dump"
+
+ error_check_good "Upgrade diff.$endianness: $version $method $file" \
+ [filecmp "$temp_dir/$file.tcldump" "$temp_dir/temp.dump"] 0
+}
+
+proc _db_load_test { temp_dir version method file } {
+ source include.tcl
+ global errorInfo
+
+ puts "db_load: $version $method $file"
+
+ set ret [catch \
+ {exec $util_path/db_load -f "$temp_dir/$file.dump" \
+ "$temp_dir/upgrade.db"} message]
+ error_check_good \
+ "Upgrade load: $version $method $file $message" $ret 0
+
+ upgrade_dump "$temp_dir/upgrade.db" "$temp_dir/temp.dump"
+
+ error_check_good "Upgrade diff.1.1: $version $method $file" \
+ [filecmp "$temp_dir/$file.tcldump" "$temp_dir/temp.dump"] 0
+}
+
+proc gen_upgrade { dir } {
+ global gen_upgrade
+ global upgrade_dir
+ global upgrade_be
+ global upgrade_method
+ global upgrade_name
+ global num_test
+ global parms
+ source ./include.tcl
+
+ set gen_upgrade 1
+ set upgrade_dir $dir
+
+ foreach i "btree rbtree hash recno rrecno frecno queue queueext" {
+ puts "Running $i tests"
+ set upgrade_method $i
+ set start 1
+ for { set j $start } { $j <= $num_test(test) } { incr j } {
+ set upgrade_name [format "test%03d" $j]
+ if { [info exists parms($upgrade_name)] != 1 } {
+ continue
+ }
+
+ foreach upgrade_be { 0 1 } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl;\
+ global gen_upgrade upgrade_be;\
+ global upgrade_method upgrade_name;\
+ set gen_upgrade 1;\
+ set upgrade_be $upgrade_be;\
+ set upgrade_method $upgrade_method;\
+ set upgrade_name $upgrade_name;\
+ run_method -$i $j $j"} res] {
+ puts "FAIL: $upgrade_name $i"
+ }
+ puts $res
+ cleanup $testdir NULL 1
+ }
+ }
+ }
+ set gen_upgrade 0
+}
+
+proc upgrade_dump { database file {stripnulls 0} } {
+ global errorInfo
+
+ set db [berkdb open $database]
+ set dbc [$db cursor]
+
+ set f [open $file w+]
+ fconfigure $f -encoding binary -translation binary
+
+ #
+ # Get a sorted list of keys
+ #
+ set key_list ""
+ set pair [$dbc get -first]
+
+ while { 1 } {
+ if { [llength $pair] == 0 } {
+ break
+ }
+ set k [lindex [lindex $pair 0] 0]
+ lappend key_list $k
+ set pair [$dbc get -next]
+ }
+
+ # Discard duplicated keys; we now have a key for each
+ # duplicate, not each unique key, and we don't want to get each
+ # duplicate multiple times when we iterate over key_list.
+ set uniq_keys ""
+ foreach key $key_list {
+ if { [info exists existence_list($key)] == 0 } {
+ lappend uniq_keys $key
+ }
+ set existence_list($key) 1
+ }
+ set key_list $uniq_keys
+
+ set key_list [lsort -command _comp $key_list]
+
+ #
+ # Get the data for each key
+ #
+ set i 0
+ foreach key $key_list {
+ set pair [$dbc get -set $key]
+ if { $stripnulls != 0 } {
+ # the Tcl interface to db versions before 3.X
+ # added nulls at the end of all keys and data, so
+ # we provide functionality to strip that out.
+ set key [strip_null $key]
+ }
+ set data_list {}
+ catch { while { [llength $pair] != 0 } {
+ set data [lindex [lindex $pair 0] 1]
+ if { $stripnulls != 0 } {
+ set data [strip_null $data]
+ }
+ lappend data_list [list $data]
+ set pair [$dbc get -nextdup]
+ } }
+ #lsort -command _comp data_list
+ set data_list [lsort -command _comp $data_list]
+ puts -nonewline $f [binary format i [string length $key]]
+ puts -nonewline $f $key
+ puts -nonewline $f [binary format i [llength $data_list]]
+ for { set j 0 } { $j < [llength $data_list] } { incr j } {
+ puts -nonewline $f [binary format i [string length [concat [lindex $data_list $j]]]]
+ puts -nonewline $f [concat [lindex $data_list $j]]
+ }
+ if { [llength $data_list] == 0 } {
+ puts "WARNING: zero-length data list"
+ }
+ incr i
+ }
+
+ close $f
+ error_check_good upgrade_dump_c_close [$dbc close] 0
+ error_check_good upgrade_dump_db_close [$db close] 0
+}
+
+proc _comp { a b } {
+ if { 0 } {
+ # XXX
+ set a [strip_null [concat $a]]
+ set b [strip_null [concat $b]]
+ #return [expr [concat $a] < [concat $b]]
+ } else {
+ set an [string first "\0" $a]
+ set bn [string first "\0" $b]
+
+ if { $an != -1 } {
+ set a [string range $a 0 [expr $an - 1]]
+ }
+ if { $bn != -1 } {
+ set b [string range $b 0 [expr $bn - 1]]
+ }
+ }
+ #puts "$a $b"
+ return [string compare $a $b]
+}
+
+proc strip_null { str } {
+ set len [string length $str]
+ set last [expr $len - 1]
+
+ set termchar [string range $str $last $last]
+ if { [string compare $termchar \0] == 0 } {
+ set ret [string range $str 0 [expr $last - 1]]
+ } else {
+ set ret $str
+ }
+
+ return $ret
+}
diff --git a/libdb/test/wordlist b/libdb/test/wordlist
new file mode 100644
index 0000000..03ea15f
--- /dev/null
+++ b/libdb/test/wordlist
@@ -0,0 +1,10001 @@
+cooperate
+benighted
+apologist's
+addresser
+cataract
+colonially
+atoned
+avow
+bathroom
+anaesthesia
+columnated
+bogs
+astral
+barbed
+captives
+acclaims
+adjutants
+affidavits
+baptisms
+bubbling
+classic
+allaying
+component
+battlement
+backtrack
+
+courage
+bore
+advertisement
+attests
+bunny's
+airlifts
+cajole
+cataloging
+airily
+collected
+abridged
+compel
+aftermath
+barrow
+approve
+chillier
+bequest
+attendant
+abjures
+adjudication
+banished
+asymptotes
+borrower
+caustic
+claim
+cohabitation
+corporacies
+buoy
+benchmark's
+averting
+anecdote's
+caress
+annihilate
+cajoles
+anywhere
+apparitions
+coves
+bribed
+casually
+clue's
+asserted
+architects
+abstained
+attitude
+accumulating
+coalesced
+angelic
+agnostic
+breathed
+bother
+congregating
+amatory
+caging
+countryside
+chapel
+buttonhole
+bartenders
+bridging
+bombardment
+accurately
+confirmed
+alleviated
+acquiring
+bruise
+antelope
+albums
+allusive
+corker
+cavity's
+compliment
+climb
+caterpillar
+almond
+authenticated
+balkan
+assembly's
+acidity
+abases
+bonny
+been
+abbots
+abductor's
+aerials
+cancels
+chalked
+beeps
+affirms
+contrariness
+clearest
+appropriations
+critiquing
+affluence
+bouts
+abiding
+comprises
+brunches
+biology
+conceptualization's
+assaying
+abutter
+adorable
+beatable
+appenders
+aggressors
+agrarian
+bottleneck
+angled
+beholds
+bereaved
+creation
+animated
+candied
+bar
+aeronautics
+cousin's
+cleaver
+alienation
+billet
+bungler
+contention
+businessman
+braids
+assert
+boisterous
+consolidate
+breathing
+ballot
+averted
+conscientiously
+bellow
+brazenness
+coaches
+bulldog
+classify
+checksum
+almond's
+cornered
+caskets
+capacitors
+beefer
+connoisseurs
+consisted
+adore
+circumvented
+colonels
+addenda
+boost
+compatibility's
+bumblebee
+commonest
+containment
+active
+absorption's
+creaks
+administer
+beset
+aborted
+aforesaid
+aridity
+broken
+azimuths
+aerial
+addition's
+aggrieve
+anthology
+circuitous
+checks
+alley's
+beam
+boss
+corrupting
+absolutes
+asteroid's
+bandstands
+beatitude's
+analogue's
+busts
+confession
+bedstead
+affairs
+blackmailers
+collared
+buckboard
+assassin
+accessor
+adjudging
+binders
+constituent's
+blister
+aromas
+approved
+absorbent
+barbarously
+cat's
+builder
+brandish
+assailing
+constitute
+christening
+acutely
+amount
+blurry
+blocks
+advertise
+chain
+brigade's
+confusion
+beds
+arrangers
+colonizers
+beautifying
+bankruptcy
+bedazzles
+candidates
+clearness
+admonishment's
+behind
+abbreviations
+basting
+ballasts
+amateurism
+celled
+constituted
+bonfire
+bugled
+advisee's
+battled
+budded
+burners
+causeway's
+calibrate
+brambly
+befuddles
+azure
+busiest
+admiringly
+appropriator
+accumulator
+cables
+abhor
+civil
+botulinus
+creaked
+bismuth
+astronomical
+abscissas
+bodice
+aunt
+cascades
+cares
+comradeship
+assemblages
+boater
+bellmen
+admission's
+ambitious
+baldness
+abortive
+controlled
+chinked
+coded
+courtrooms
+arteriolar
+cooler's
+cared
+brewer
+christians
+barbecues
+contacts
+blackjack's
+buzzing
+blasters
+accords
+braziers
+allegretto
+catered
+breveting
+cleaning
+amicably
+bummed
+consulted
+allegro's
+accumulator's
+compartmented
+condemned
+concludes
+bitwise
+cheered
+appropriator's
+accessors
+casting
+carolina's
+accompanying
+budding
+correspond
+bach's
+angel's
+bearing
+arresters
+biweekly
+character
+badgering
+cantankerous
+avalanching
+adjudges
+barometer
+append
+continuations
+burped
+boxtop's
+abstention
+amp
+axiomatized
+bimonthlies
+aghast
+arresting
+breakwater's
+continuing
+bridle
+bobbin's
+antagonistically
+blindly
+biochemical
+biologically
+antifundamentalist
+confer
+cloudiness
+bonded
+comfortingly
+caption
+blackmailed
+bidders
+breakpoint
+brigadier
+criminals
+coyotes
+casserole's
+annex
+cereals
+breadboxes
+belgian
+conductivity
+counterexample
+anarchist
+couches
+atavistic
+clipped
+button
+axiomatic
+capping
+correcting
+chase
+chastise
+angle
+burnished
+beauteously
+antipodes
+crippling
+crowns
+amends
+bah
+brigadiers
+alleged
+correctives
+bristles
+buzzards
+barbs
+bagel
+adaptation
+caliber
+browner
+apprehensions
+bonnet
+anachronistically
+composites
+bothered
+assurer
+arc
+chaser
+bastards
+calmed
+bunches
+apocalypse
+countably
+crowned
+contrivance
+boomerang's
+airplane's
+boarded
+consumption
+attuning
+blamed
+cooing
+annihilation
+abused
+absence
+coin
+coronaries
+applicatively
+binomial
+ablates
+banishes
+boating
+companions
+bilking
+captivate
+comment
+claimants
+admonish
+ameliorated
+bankruptcies
+author
+cheat
+chocolates
+botch
+averring
+beneath
+crudely
+creeping
+acolytes
+ass's
+cheese's
+checksum's
+chillers
+bracelet
+archenemy
+assistantship
+baroque
+butterfly
+coolie's
+anecdote
+coring
+cleansing
+accreditation
+ceaselessly
+attitudes
+bag
+belong
+assented
+aped
+constrains
+balalaikas
+consent
+carpeting
+conspiracy
+allude
+contradictory
+adverb's
+constitutive
+arterial
+admirable
+begot
+affectation
+antiquate
+attribution
+competition's
+bovine
+commodores
+alerters
+abatements
+corks
+battlements
+cave
+buoys
+credible
+bowdlerizes
+connector
+amorphously
+boredom
+bashing
+creams
+arthropods
+amalgamated
+ballets
+chafe
+autograph
+age
+aid
+colleague's
+atrocious
+carbonizing
+chutes
+barbecued
+circuits
+bandages
+corporations
+beehive
+bandwagon
+accommodated
+councillor's
+belted
+airdrop
+confrontations
+chieftain's
+canonicalization
+amyl
+abjectness
+choke
+consider
+adjuster
+crossover's
+agreeing
+consolations
+capitalizers
+binges
+annihilating
+callers
+coordinate
+banshees
+biscuits
+absorbency
+corollary
+corresponded
+aristocrat's
+banally
+cruiser
+bathtub's
+abbreviated
+balkiness
+crew
+acidulous
+air
+birdies
+canvassing
+concretion
+blackjacks
+controller's
+aquarius
+charm
+clip
+awarder
+consistently
+calibrated
+bushwhacking
+avaricious
+ceaselessness
+basically
+accolades
+adduction
+commending
+consulates
+certifiable
+admire
+bankers
+appropriateness
+bandlimits
+chill
+adds
+constable
+chirping
+cologne
+cowardice
+baklava
+amusedly
+blackberry
+crises
+bedeviling
+botching
+backbend
+attaining
+continuity
+artistry
+beginner
+cleaner's
+adores
+commemorating
+amusement
+burial
+bungalow's
+abstinence
+contractually
+advancement's
+conjecture
+buckling
+conferrer
+cherub's
+belonged
+classifications
+baseball
+carbonation
+craved
+bans
+aphid
+arbor
+ague
+acropolis
+applied
+aspired
+calibrating
+abundance
+appeased
+chanted
+ascent
+convenes
+beep
+bottles
+aborigines
+clips
+acquainting
+aiming
+creditor's
+abolitionists
+cloves
+containments
+bungling
+bunt
+anchors
+brazed
+communicator's
+brew
+accumulate
+addicting
+actively
+befog
+anachronisms
+bumblers
+closest
+calculators
+absurdity
+colleagues
+college
+assesses
+conflicted
+associational
+betide
+conceptualization
+adjutant
+alliances
+corresponding
+barometers
+cot
+brooch's
+coiled
+arboreal
+convicted
+artless
+certificates
+bourbon
+astonish
+bust
+correlate
+amounts
+anal
+abstraction's
+corns
+conqueror's
+boldly
+bob's
+beer
+blanks
+corpses
+contingent
+blackly
+backed
+appearances
+cancers
+actuating
+apprehension's
+colorings
+anglicanism
+armament
+armer
+bizarre
+begotten
+actions
+archly
+capriciously
+clue
+contractor
+contributions
+agendas
+coached
+blamable
+annoyers
+coupons
+brooked
+assortment
+axes
+celebrates
+courageously
+baroqueness
+blasphemous
+asserter
+contents
+correctly
+challenged
+bulldoze
+casement
+acknowledge
+bitterness
+belongs
+allotments
+chalice's
+bequest's
+adjacent
+consumer's
+conservatively
+coalition
+background's
+backache
+befouls
+brushfire's
+analysts
+branch
+airways
+awaiting
+breakfast
+anoints
+baying
+contrary
+bilge
+chasm's
+babes
+afresh
+centerpiece's
+barked
+coffin
+assumed
+actresses
+accentuating
+aching
+abet
+balancers
+consumptively
+cagers
+backing
+angiography
+chord's
+cheapened
+bewailed
+arson
+begged
+convergent
+bowlers
+conflicting
+confiscated
+bitch
+bloody
+brushfires
+bleach
+computation's
+choppers
+circuitously
+chancing
+bunker
+concept's
+alacrity
+boyhood
+ammo
+bobwhites
+carter
+ardent
+bier
+airway's
+brownies
+aura
+cannibalizing
+confirms
+australian
+barrage
+closures
+assertive
+abstainer
+bicarbonate
+clone
+back
+cipher
+crown
+cannibalizes
+away
+crafty
+airings
+amtrak
+comical
+burnish
+continuum
+apparition
+apologizing
+blot
+blacker
+characters
+built
+apparent
+applicative
+assiduous
+attorneys
+affectionately
+bobbing
+baggy
+comic's
+attempt
+appealers
+amortize
+bonanza
+backwards
+bowers
+anemometer
+ambulance's
+creeps
+abduction's
+coal
+chiller
+adjudications
+clogging
+ascending
+bookkeeper
+crawlers
+battery's
+artifacts
+attributions
+amusements
+aftermost
+allophones
+bemoaned
+comptroller
+bugger's
+buoyancy
+booboo
+award
+amplifying
+certify
+bivariate
+attunes
+asteroidal
+chant
+collectively
+chasteness
+chapels
+copiousness
+benign
+armies
+competing
+buss
+awakened
+breakpoint's
+conceptualizing
+cleansers
+acorns
+conveyance's
+bluer
+battle
+budges
+characteristically
+be
+contour
+beguiling
+awarding
+armhole
+airship's
+bathtub
+breathable
+crowded
+compiles
+certain
+brutalizing
+bacteria
+baronies
+abode
+blacksmith
+brinkmanship
+capitalizations
+cousin
+botany
+avionic
+companion
+consists
+connoisseur's
+avalanched
+claimant's
+backstitches
+affixes
+bikes
+atomically
+cowed
+asleep
+becomingly
+acorn's
+complainers
+appreciated
+cross
+cringed
+booting
+attitudinal
+broadcasting
+childishly
+breeze's
+craven
+boll
+clause's
+burden
+appendages
+atemporal
+allah
+carnival's
+anchorage
+adjures
+besought
+abounding
+crucifying
+arrangements
+antiquarians
+burrows
+antipode
+canvas
+constable's
+coopers
+ascended
+companionship
+bakery's
+bayonets
+conclusively
+boasters
+beneficiaries
+conspicuous
+contriver
+architecture
+breakthroughs
+brownie's
+blur
+academics
+antagonist
+contemplates
+arena
+caravan's
+administers
+comprehensively
+convey
+bigot
+blitz
+bibliography's
+coerced
+assail
+amazons
+banned
+alabaster
+concluding
+bouquet
+barks
+acquaintances
+astonishment
+constraint
+backpack's
+breakthroughes
+blocking
+accomplishers
+catastrophe
+bushels
+algae
+ailment's
+anemometers
+beginning's
+chefs
+converse
+cornerstone
+astound
+assuring
+adornment
+anyone
+alumni
+club
+bestselling
+businessmen
+constructed
+attendee's
+cooped
+ablute
+chronicler
+alaska
+clam
+canonicals
+concerned
+aligned
+creek
+burrow
+allay
+admirals
+blackens
+compressing
+confirm
+cows
+battleship's
+belched
+affixing
+chalices
+choirs
+absentee's
+baseboard's
+apportionment
+adheres
+accounts
+chef
+access
+clearings
+accompanists
+concentrating
+ado
+bathos
+bailiff
+continuance
+ball
+bearer
+congress
+cites
+can't
+balloon
+crams
+consults
+bungled
+bike's
+apes
+assassinations
+colt's
+consecrate
+ancients
+chick
+analyst
+adsorbing
+burntly
+accompanist's
+apprehensive
+bengal
+boughs
+ankles
+anchored
+benefits
+accommodation
+amiss
+brink
+chewers
+blueberry's
+chairs
+adjoin
+bivalve
+autobiography's
+automated
+comparisons
+climbed
+artists
+congruent
+cold
+atonement
+cashier
+armageddon
+allocations
+bereavements
+bumblebees
+blew
+busboys
+bottoming
+alternations
+apprenticed
+bestial
+cinder's
+consumption's
+abbey's
+amended
+continued
+birefringent
+barbados
+ability's
+compulsory
+antler
+centerpieces
+accountant's
+arrogant
+ballads
+ascenders
+appliers
+adjustment's
+blabbed
+baits
+activity's
+clod's
+adjudicating
+bleak
+commutes
+bumming
+beating
+cohesiveness
+branded
+acknowledger
+communications
+blockhouses
+booklets
+consenters
+creek's
+consulting
+binary
+coaster
+ascription
+bushwhack
+boggles
+affidavit's
+arrangement's
+congressionally
+convenient
+avoider
+abaft
+bootlegger's
+befriending
+ceases
+carbonizes
+clumps
+commented
+competence
+conversing
+butting
+astonishing
+armful
+allegory's
+crisis
+critiques
+concurred
+conservative
+aristotelian
+blizzard's
+corner
+amateur's
+compare
+affiliations
+bestseller
+batch
+cleanly
+assayed
+bravos
+bowls
+conceptualized
+babe's
+algorithm's
+baptist
+cheeks
+conquerer
+bidder's
+behaving
+briefcase's
+analogues
+amply
+attitude's
+apple
+crossable
+ambushed
+besmirches
+creditors
+bandwagons
+continentally
+adjuncts
+concerns
+agers
+cop
+amoebas
+bisected
+bombing
+appendices
+cocking
+bused
+babied
+compounds
+asserts
+believably
+alert
+apostate
+catalysts
+aureomycin
+convex
+beetle's
+banishing
+agitating
+bystanders
+bow
+connotes
+blanch
+charmingly
+animal's
+baritones
+brier
+astronomer
+company's
+balding
+actually
+aunt's
+avalanches
+acquisition
+base
+compilations
+bathtubs
+actualization
+chanced
+atom
+banged
+befuddled
+apologized
+componentwise
+britisher
+began
+conservationist
+actuate
+crosser
+appended
+bitten
+ambivalence
+acetate
+conversions
+buzzwords
+askance
+abolishing
+birdied
+creeds
+anglers
+colossal
+bereft
+chock
+apprentice
+cooper
+besmirching
+allocating
+antiques
+bikini's
+bonders
+afflictive
+augmentation
+atheist
+bucket
+bibliophile
+annexes
+beguiles
+birdbaths
+amendments
+animators
+asymptotically
+communally
+barber
+biographers
+arguable
+confidant
+apologies
+adorns
+contacting
+coarsest
+artichokes
+arraign
+absorbing
+alden
+commercially
+cabbage's
+coincides
+clumping
+cents
+alleviater
+buzzard
+braked
+anesthetized
+bugling
+capitalist
+befriended
+appreciatively
+boomtown's
+cozier
+critic's
+correspondent
+bard
+attenuator
+bake
+brings
+chews
+anechoic
+brutal
+colder
+buckshot
+canvassers
+analytic
+allies
+alloys
+awake
+alienates
+bin's
+crimes
+constructible
+classifiers
+bulb
+cream
+banquet
+axiomatize
+adjourn
+converted
+auditioned
+comfortably
+bandwidth
+cannibalize
+ascensions
+bussing
+balloons
+contenders
+commemoration
+aspersions
+consultation
+cashes
+belting
+augurs
+architectural
+bluebird's
+breastworks
+absconded
+bullets
+bloodstain's
+blunder
+astronautics
+coo
+approves
+authority
+assure
+amsterdam
+acquitted
+adversity
+celebrate
+bred
+bridged
+bloc's
+bullied
+affinity
+breezes
+baptistry's
+constitutions
+avouch
+amazingly
+consolation
+abnormality
+clashes
+buttes
+buzzard's
+breathers
+chipmunk
+contented
+carol's
+armers
+amazedly
+comprehends
+canonicalize
+breakthrough
+arbitrator
+butterfat
+cases
+besiegers
+affianced
+amelia
+bush
+airplane
+annulled
+bike
+alternated
+attackers
+convene
+aficionado
+anachronism's
+crude
+carelessness
+akin
+combated
+assisting
+clocker
+attacked
+briefed
+antic's
+attendants
+attracting
+cope
+allotting
+bandwidths
+add
+assaulting
+breakage
+climes
+arrival's
+burp
+accelerator
+capacitance
+arabians
+bankruptcy's
+archeological
+coins
+browbeating
+chasm
+cardinalities
+compartmentalize
+courter
+assess
+abreaction
+brakes
+compatibly
+compression
+characterizable
+briefing's
+alto's
+classifiable
+contrast
+correlation
+colonial
+applying
+authorizers
+contesters
+basely
+cherries
+clicking
+cornfield's
+alarmingly
+conferences
+business's
+banker
+bloomed
+airfield
+attracts
+building
+commutative
+atomization
+competitions
+boatsmen
+acquirable
+arkansas
+command
+beings
+compactors
+anodize
+arguments
+conforming
+adsorption
+accustomed
+blends
+bowstring's
+blackout
+appender
+buggy
+bricklaying
+chart
+calmer
+cage
+attractive
+causation's
+athenian
+advise
+cranks
+containers
+besotter
+beret
+attender
+cone
+bills
+aligns
+brushlike
+brownest
+bosom's
+berth
+accountably
+bequeathed
+affirmatively
+boundless
+alleyways
+commute
+bendable
+abhors
+calculation
+affidavit
+answerable
+bellicose
+counterfeiting
+admiral's
+chisel
+bridesmaids
+believers
+aggregated
+conspicuously
+abased
+armenian
+conspirator
+canonical
+assignable
+barrage's
+clearance's
+casts
+administratively
+befoul
+chaffer
+amazer
+colorer
+broaching
+crevice
+aniline
+coursing
+compassionate
+adhesive
+bibliographies
+corrects
+augments
+between
+causer
+amorist
+cellist's
+acoustical
+baseless
+cigarettes
+astuteness
+appropriators
+convincing
+bellhop's
+bemoaning
+calmingly
+chronologically
+castles
+algebraically
+appointees
+academic
+blunderings
+assassins
+barrel
+accuracy
+amortized
+ballpark
+acrobat's
+brazier's
+abortively
+coarser
+airfields
+contester
+circus's
+creased
+amorphous
+accomplisher
+blabs
+butchers
+crackles
+bachelor
+aviators
+chariot's
+circumflex
+binocular
+alienating
+artificially
+agreement's
+aglow
+afghan
+abrupt
+annihilates
+apologetic
+barge
+betters
+algorithms
+conjurer
+chargeable
+brindle
+alphabetizes
+coder
+availing
+bandpass
+arrogance
+convent's
+advertiser
+connected
+basso
+breakfaster
+comic
+congenial
+beau
+courters
+adapters
+abruptly
+chemicals
+bringed
+creaming
+butterer
+attained
+actuals
+averred
+brainwash
+centerpiece
+blabbermouth
+byproduct's
+adaptable
+automata
+art
+cheery
+beheld
+beehive's
+claimed
+crucial
+brokenness
+agility
+combating
+cleft
+amenity
+after
+configuration
+contrasting
+coarsely
+brass
+barnstormed
+bowel
+bridesmaid's
+cornfield
+crazing
+autocracies
+adult
+conceptualizations
+corroboration
+bedders
+arroyo
+alarmist
+boatman
+chests
+burglary
+budgets
+canary's
+arraigning
+chin
+barnstorms
+blamers
+brimful
+calculate
+cellular
+contended
+challenges
+brusque
+bikinis
+arithmetics
+chairpersons
+class
+aircraft
+capably
+centralize
+awhile
+compacting
+courteous
+archaeologist's
+cram
+adagio
+affronts
+amplitude's
+bureau's
+audaciously
+autism
+blueberries
+an
+chips
+confiner
+chopper's
+chronology
+breaching
+bead
+amass
+camouflage
+compensation
+aspect
+broker
+atrophy
+balk
+bloodless
+barnyard
+benefactor's
+airdrops
+caused
+anthem
+activist's
+bottomless
+arrogates
+avoided
+bouncy
+clarified
+articulate
+almoner
+communists
+blokes
+butternut
+clockings
+barium
+blows
+criticism's
+associations
+brute
+bleeds
+alliteration's
+bluestocking
+boxwood
+clearer
+allegiance
+conceptualizes
+captivating
+bolshevik's
+belabored
+biographic
+contaminates
+chanticleer's
+adjusted
+childhood
+arguing
+cape
+conversantly
+compensating
+collaborations
+arraignment's
+blasted
+charging
+aggregation
+apprentices
+bird
+codifiers
+ballistic
+breve
+bells
+carolina
+chalk
+buckles
+boyfriend's
+adorn
+accoutrements
+availability
+antisymmetry
+blades
+alluded
+asterisks
+bookcases
+additive
+consents
+advanced
+balalaika
+coders
+caliph
+alundum
+are
+controllable
+blazing
+clattered
+asiatic
+axiomatizes
+ace
+coining
+column
+auditor's
+carol
+concatenated
+arrayed
+capital
+cautioner
+clan
+beauteous
+abbreviate
+asteroids
+canal's
+consolidation
+closets
+concealer
+crevices
+abed
+complex
+conviction's
+abide
+arrests
+begrudges
+adolescent
+conceals
+cells
+circles
+bravest
+compromiser
+bagels
+areas
+afore
+allergies
+arrangement
+attraction's
+amulets
+abstraction
+captured
+crouched
+brothers
+cash
+achieving
+bastard
+compete
+boiling
+beaching
+amphetamines
+clerking
+congestion
+alleviates
+angry
+bared
+comprehended
+bloodstain
+constituency's
+automating
+aerial's
+counterfeit
+besotted
+basses
+biofeedback
+compilation's
+band
+consulate
+appellant
+cough
+antennae
+contend
+anniversary
+boor
+artifactually
+aerobics
+booths
+chubbiest
+consumable
+assignments
+bromide's
+confined
+breakers
+alongside
+courtier
+boisterously
+bilaterally
+alternation
+auspiciously
+arbitrated
+condemning
+burns
+correspondents
+composition
+cavalierly
+coverlets
+capacities
+clatter
+apotheoses
+cartography
+ceased
+capitalized
+auditor
+appendicitis
+chops
+barony
+anemometry
+befouled
+briefer
+chest
+begetting
+bloats
+bookseller's
+commitment
+confides
+carcass's
+battering
+altruistically
+ballots
+adornments
+broaden
+angularly
+coefficient
+cataloged
+brae
+advantage
+anthems
+calculated
+counseling
+agitate
+accentuated
+camel
+ambivalent
+bedposts
+beacons
+chubbier
+cheerer
+assumes
+concord
+autumns
+convention's
+alpha
+adulterates
+arbiters
+archaically
+criteria
+achilles
+cheaper
+bulling
+associators
+bloater
+brawler
+ability
+adherents
+commonwealth
+coyote's
+centrally
+bequeathing
+abandonment
+circumstantially
+courteously
+borrow
+countermeasure's
+capricious
+allied
+anagram's
+absorptive
+assuage
+asset
+booked
+aspects
+commits
+crates
+capacitive
+condones
+assimilates
+carriage
+competitor's
+cocoons
+aggravated
+caravans
+arbitrator's
+baked
+balanced
+annihilated
+addressable
+autonomous
+bandwagon's
+contesting
+burrowing
+coroutines
+abjection
+correctable
+applauded
+bragged
+code
+aggressiveness
+cluttered
+attacking
+chide
+am
+coasters
+blizzard
+contentment
+altruism
+certifier
+capturing
+combinators
+carefree
+activate
+blindfolding
+assassinating
+approximate
+biplane's
+aplenty
+arteriosclerosis
+concentrates
+antisymmetric
+assurances
+anarchist's
+ascend
+advancing
+atrocities
+butt's
+bearable
+craftiness
+categorized
+barn
+contributor's
+arises
+bushy
+bisque
+coasted
+bargaining
+area's
+couples
+cabs
+barter
+bulletin
+chisels
+broadcasters
+contingency
+bywords
+antimicrobial
+coexisted
+blinding
+arithmetize
+coweringly
+convince
+competed
+bauble's
+crab
+boggling
+advocacy
+atlas
+assembled
+ancient
+bloodstream
+balking
+bin
+bully
+affirm
+cruelest
+atone
+conserved
+confession's
+bat
+captive
+aster
+blames
+colonel's
+bones
+borderline
+cleanses
+classified
+crudest
+contiguity
+bailing
+ablaze
+bender
+attendee
+clobbers
+aliasing
+autopilot
+coolers
+cache
+allayed
+barnyards
+britons
+appointment
+adaptor
+blockers
+abridges
+bloodiest
+betrothal
+bombards
+bony
+bus
+canary
+antinomy
+awash
+comrades
+ablating
+collectible
+boats
+brand
+church
+bandy
+adhering
+barred
+ammunition
+chime
+accompaniment's
+battleground's
+composing
+caveats
+armor
+amoeba
+composure
+collides
+avowed
+banding
+counsels
+asymmetric
+abbreviates
+balky
+adjudicates
+anointing
+accursed
+copse
+action
+construction's
+accents
+ambition's
+caressing
+cosmetic
+accession
+clutters
+censures
+allusions
+belittled
+armchair
+abode's
+conception's
+ascribe
+aliases
+ancestry
+ax
+companionable
+aright
+boxed
+brighteners
+alloy's
+checkable
+arraignments
+bed
+bunkhouses
+abbeys
+ceasing
+companies
+cherishing
+chunk's
+barony's
+chinning
+burdens
+briskness
+beggarly
+beloved
+clambered
+constitutionality
+beguiled
+archers
+alleyway
+apostle's
+consulate's
+antiformant
+categories
+construct
+aliments
+acquired
+blotted
+alterations
+adolescent's
+cranes
+bluntest
+accusation
+chafer
+airstrips
+abolished
+bothersome
+churchly
+airy
+bedded
+awareness
+alliterative
+arose
+amputates
+civilization's
+arenas
+certifying
+aspirators
+carbon's
+bunching
+aerates
+bilked
+checking
+cloned
+administrations
+canvasses
+colorless
+chamber
+circumspectly
+benedictine
+advisedly
+classifier
+approachable
+banners
+concurrently
+chores
+agape
+convention
+bindings
+budget
+comedies
+ants
+ambassadors
+chroniclers
+carrots
+colorful
+bulkhead's
+coherence
+buyer
+aggressions
+congressional
+commoners
+cheapen
+concealed
+columnates
+anarchy
+actress's
+baseboards
+creature's
+centuries
+barbarian
+concrete
+bicycles
+acceptably
+acclimating
+biceps
+bloodhound's
+becalmed
+apostle
+bible
+conjunctive
+comb
+ballers
+bickering
+adulterous
+austrian
+applicable
+blackberries
+creasing
+catalogs
+avert
+asparagus
+cambridge
+bird's
+belgians
+admonished
+admirations
+conscientious
+crescent's
+connectives
+blissful
+commenting
+bagged
+assimilate
+abounded
+copyright's
+advancement
+axiom's
+compilation
+circumlocution's
+catheter
+chances
+concretely
+codification
+browned
+clustering
+bum's
+clauses
+boundlessness
+arteriole's
+alfresco
+begrudged
+blustered
+anglican
+adjoined
+bamboo
+bathed
+consortium
+carrot's
+cloak
+album
+bunglers
+approbate
+colored
+aim
+cowboy
+alienate
+cleverest
+ambiguous
+confrontation's
+clear
+africa
+bowline's
+astronauts
+belayed
+censorship
+animation
+bedrooms
+chasms
+compared
+cogitated
+barbarians
+accomplices
+columnizes
+beaming
+busied
+counterpointing
+aluminum
+coconut's
+acclamation
+chokers
+biomedicine
+basalt
+buckwheat
+cardinality's
+bafflers
+arid
+chap's
+abound
+biblical
+backbone
+anticipation
+condemner
+angular
+advisability
+believing
+boiler
+arclike
+abetter
+bespeaks
+axiomatically
+coarse
+auditions
+bludgeoning
+clam's
+chief
+arrow
+cementing
+anxiety
+aberrations
+brushes
+cherub
+corollary's
+bunters
+beefers
+barbiturate
+circumlocution
+conjoined
+charities
+coverage
+campaigner
+burrowed
+barracks
+bristling
+accomplice
+abandoned
+bull
+caked
+century's
+bantu
+bristled
+airer
+bench
+bevy
+chamberlain's
+attention
+cloning
+camouflaging
+alder
+counter
+credibly
+approvingly
+breakup
+artillery
+celestially
+bail
+baker
+bullish
+canvass
+conversationally
+bringers
+augment
+creditably
+butterers
+botswana
+contemptible
+bribing
+adumbrate
+barb
+calico
+alludes
+amplified
+chills
+cloak's
+aver
+arthropod's
+budgeter
+bereavement
+cellars
+crewing
+blackmailer
+ayes
+bedsteads
+breachers
+bazaar
+centered
+celebrity
+blameless
+abscissa
+aerators
+awaited
+british
+adversary
+cowslip
+buttons
+confusing
+buggy's
+belts
+canceled
+addresses
+bribes
+condoning
+bonneted
+coarsen
+amazement
+angels
+chemise
+carbonates
+apostolic
+bandit's
+contending
+consummate
+counterclockwise
+beneficence
+benefitted
+contradicts
+comfortabilities
+anemone
+conductive
+articles
+bookcase
+burst
+baptizes
+countless
+costs
+agonizes
+byte
+creeper
+begs
+bunnies
+attract
+able
+calories
+baskets
+american
+brunt
+cognition
+closing
+chef's
+backbone's
+complicates
+cloister
+bedsprings
+arrays
+brigs
+archbishop
+buckler
+clove
+catholic's
+bellboys
+chairmen
+clap
+clarifications
+ambuscade
+bight
+bellyfull
+allowance's
+academy's
+acquiescence
+ambush
+catches
+at
+billion
+contact
+bees
+adopters
+approximately
+chiseled
+attributively
+criers
+codification's
+cowslips
+contradictions
+buttock's
+categorically
+counterpart's
+confessor
+appreciably
+adjusts
+altitude
+construe
+cancer
+bay
+aristocratic
+alleviaters
+binoculars
+axiomatizing
+changer
+bustle
+civic
+bostonians
+crops
+authorizations
+cogitation
+baptize
+caressed
+abase
+ariser
+axiomatization
+aggravates
+confiscation
+bowdlerize
+backspaced
+alters
+clarity
+blots
+bland
+belligerent's
+burgher
+cardinally
+bookcase's
+buggers
+byte's
+avarice
+crowding
+beriberi
+allegories
+coronets
+cell
+calculative
+adduce
+amperes
+bladders
+adages
+contests
+cognizant
+actuates
+ambiguity
+brighten
+concert
+conviction
+booty
+ashtray
+braves
+blouses
+avoiders
+confederate
+bombings
+couplings
+convictions
+attractiveness
+chronicled
+corers
+anger
+covertly
+aural
+asynchrony
+arrowheads
+breakdown's
+bulletins
+ceremonialness
+clipper
+bracelets
+anthropomorphically
+benedict
+connecting
+bacterium
+achievers
+abutter's
+autocorrelate
+coupling
+blanketer
+continental
+assignment
+conundrum
+arab
+besides
+cheerful
+blowup
+bastion
+arrive
+combines
+agar
+cookie
+astronaut's
+constraint's
+article's
+confiscations
+bounded
+adjudicate
+belligerently
+boron
+brownness
+adept
+creep
+abduction
+accosting
+asylum
+autographed
+clash
+chiseler
+clumsily
+capitally
+braking
+absenting
+bagatelle's
+comet
+basked
+anything
+buffeted
+absentia
+bounty
+carols
+characteristic's
+constructive
+comforting
+aflame
+brainwashed
+booby
+aspirations
+adjudge
+behaviorism
+computability
+assessment
+consultations
+bowstring
+acknowledgment
+arranger
+chancellor
+attest
+compresses
+concessions
+asymmetrically
+administering
+clamoring
+arraigned
+archived
+admonition
+actor's
+aimers
+colorers
+booklet
+calibers
+affix
+bushel's
+atomizes
+creeks
+bleedings
+casuals
+archives
+certainly
+animate
+cons
+affiliate
+answered
+coyote
+coughed
+alligator's
+antagonized
+arousal
+assisted
+aerated
+competently
+conquering
+acclaimed
+assign
+announcer
+controllers
+amalgamation
+comfort
+antihistorical
+availed
+balsa
+annoyed
+basted
+asymptomatically
+cropped
+combinational
+barging
+conversant
+causality
+botches
+bedspread
+considerately
+bookstores
+climate
+blessing
+accordion's
+cdr
+bonanza's
+construing
+bearings
+bluster
+backspaces
+babyish
+countermeasure
+crime
+battered
+audit
+associating
+corps
+application
+archangel's
+aided
+breasted
+compelled
+acrobats
+breakfasts
+chronologies
+beet's
+averts
+convergence
+attributable
+adverbial
+churns
+arrest
+breastwork
+beefs
+brownie
+create
+contradistinctions
+coordinators
+abandoning
+byline
+beatitude
+autosuggestibility
+bipartite
+annals
+assents
+conceives
+amalgams
+cleft's
+clicked
+appointers
+bible's
+boots
+caret
+attaches
+controversy's
+combinatorial
+bazaars
+cardinals
+bored
+catering
+christian's
+ashman
+consequence's
+austere
+clay
+birthday's
+amongst
+arbitrariness
+brainstorms
+chateaus
+coaxer
+applause
+cautiousness
+adorned
+compromises
+creatures
+compliance
+apartheid
+archiving
+amoeba's
+communal
+comedian's
+aggressive
+crop
+ante
+better
+chalice
+aristocrats
+circling
+belittle
+abortion's
+coldly
+certification
+befriends
+courthouse
+anesthesia
+accorder
+athletic
+blithe
+bedder
+abasements
+councils
+beware
+abductor
+assonant
+clench
+aspersion
+abortion
+abating
+birches
+breakpoints
+acyclic
+ablate
+canners
+cistern
+boxtop
+composite
+cloudless
+computation
+chastely
+abusing
+bunker's
+compounding
+alveolar
+chaplains
+bias
+audiological
+capability's
+bangle
+barren
+antidote's
+cranking
+baptizing
+bond
+borders
+automobile's
+allegoric
+chargers
+baltic
+autumn
+columns
+absolute
+connoisseur
+cranberry
+contiguous
+consoled
+confirmations
+argot
+blouse
+annotated
+callous
+astounded
+crashed
+autonavigators
+chivalry
+columnating
+beefed
+convincer
+allegorical
+bagger
+assume
+containable
+artistically
+calibration
+architectonic
+campaigns
+addressability
+crazier
+buy
+brightener
+bastion's
+blurb
+awaits
+commands
+chocolate
+bleaching
+antenna
+blowers
+chorused
+composers
+assigners
+aspires
+coils
+bid
+application's
+clamped
+bedding
+awkwardly
+coppers
+costumes
+borax
+caged
+candler
+badges
+clutches
+consign
+apprised
+buys
+adiabatically
+aggregately
+canned
+abstract
+acrimony
+coax
+analytically
+absurd
+alluring
+contradicted
+aspersion's
+bribe
+boos
+chattererz
+backache's
+complying
+continent
+cohabitate
+causation
+astronomer's
+cities
+bookie
+bleating
+cracking
+bicameral
+convoluted
+adjustable
+ambulance
+can
+boulders
+consideration
+announces
+briars
+antipode's
+bartered
+ancestor
+biplanes
+characterize
+crested
+bum
+bridling
+consolable
+bungles
+coffee
+buffets
+congratulation
+commitment's
+adequately
+clown
+capacitor's
+broomsticks
+agglutinate
+activations
+asians
+canon's
+authenticity
+complexities
+cripple
+bracket
+counselor's
+beatably
+bounced
+baton's
+crankiest
+barbell's
+caster
+casseroles
+ballad's
+bob
+batched
+attenuated
+beakers
+biologist
+bleary
+condescend
+blondes
+augustness
+boldface
+battlefronts
+acumen
+bolting
+articulatory
+butyrate
+bowel's
+backwater's
+colonel
+creating
+authorized
+bijection
+accruing
+admirably
+correctness
+citadels
+clasps
+bandlimit
+bib
+appalachia
+contrives
+bundle
+audiology
+circumventing
+blinker
+choked
+bilks
+clears
+affirmations
+arbitrating
+bites
+bootstraps
+capitals
+commuters
+billeted
+authentication
+choice
+attentively
+aggressor
+arterioles
+crowds
+chestnut
+backstitched
+attachments
+assimilating
+bewilderment
+atrophied
+chintz
+blackjack
+armadillos
+bonfire's
+ballast
+agonies
+busier
+coefficient's
+adventurous
+ballet's
+coil
+chewed
+come
+bonder
+catalogue
+coursed
+arise
+biennium
+ceremony's
+blanching
+appraisers
+acolyte
+argues
+beholden
+appanage
+astatine
+banana's
+coons
+civilians
+bodyguard
+archipelago
+bug's
+candles
+antique's
+accidently
+blighted
+belgium
+besieged
+burned
+abuse
+asian
+chute
+awkwardness
+abasing
+bottler
+ardently
+blab
+breakwater
+cavity
+cheated
+befall
+according
+chronicle
+airframes
+bats
+choring
+authorize
+consumed
+chatter
+annunciated
+capers
+anomalous
+clustered
+burner
+acquaintance's
+badger's
+basic
+affectations
+buzzy
+coast
+attendances
+activating
+beams
+cohesive
+attainable
+barbecueing
+beautiful
+acronyms
+communion
+client
+atypical
+antagonists
+conservations
+arguers
+agglomerate
+antigen
+battalion
+ambition
+countered
+assistant
+classed
+arming
+alveoli
+buff's
+backplanes
+busted
+bermuda
+converting
+brutish
+boot
+acidities
+confrontation
+chapel's
+berlin
+ascender
+behead
+buddy's
+commandment
+actuated
+brilliancy
+chance
+bedrock's
+bridgeheads
+arable
+avid
+arteries
+caresser
+ballyhoo
+attested
+african
+comradely
+consciences
+commencing
+antennas
+annulments
+bobolink's
+advisee
+acceptance
+crack
+ascendent
+appendage's
+accommodates
+accumulated
+clones
+apocryphal
+ages
+cluster
+capitols
+camper
+beading
+amble
+buffeting
+circumspect
+advances
+analyzes
+courier's
+aperiodic
+appealer
+atonally
+attentive
+conspire
+appropriating
+armed
+allergic
+agglomeration
+consternation
+blinks
+audibly
+aspirins
+bunions
+adverbs
+armload
+bet's
+caring
+carryover
+coordinator's
+afterthoughts
+allays
+abided
+brownish
+baiting
+capitalism
+coined
+conspirators
+automatic
+contradistinction
+conductor's
+backstitching
+conjure
+casings
+accountant
+clinched
+constrain
+alcohol
+bee
+anticompetitive
+britain
+bade
+camera's
+antimony
+activated
+burglarizes
+compatible
+cotyledon's
+artificiality
+bath
+citadel
+archivist
+chandelier
+addiction
+ampersand
+bitterer
+constructively
+afield
+bing
+attractor's
+cringe
+allergy's
+bigots
+assimilation
+ate
+capitalization
+abridge
+buzzword
+befit
+bandlimited
+commandant
+alabama
+acculturated
+brightening
+bulldozing
+cooky
+bunks
+centers
+bespectacled
+adherent's
+abducts
+another's
+condensation
+billeting
+bye
+chess
+craziest
+ballgown's
+archaism
+consorted
+chinned
+cowl
+beat
+bootlegger
+bravado
+classically
+bulging
+browbeat
+accommodate
+borne
+bronzed
+artifice
+arcade
+become
+backlog
+addressers
+amphitheaters
+befogging
+crochet
+aiding
+celebrated
+conversational
+backbends
+authentications
+advertisement's
+blockade
+bulldozes
+contraction's
+bricklayer's
+brain
+conveying
+anemia
+chronology's
+channeling
+caution
+commanding
+crosses
+artisan
+conditions
+admired
+authenticator
+airships
+blunter
+bridesmaid
+counseled
+cheeriness
+chiefs
+boils
+clerical
+atrocity's
+balls
+ambled
+canvases
+consoles
+abscessed
+abetting
+blitzkrieg
+bottlers
+beveled
+condemn
+alumna
+cords
+admittance
+annotates
+citing
+corrector
+appreciative
+branching
+betrays
+buttoned
+ailment
+boulevards
+bottlenecks
+chamberlains
+bedbug
+covenant's
+crispness
+considering
+broadcasts
+audubon
+arousing
+correction
+barrack
+closure
+contrastingly
+brittleness
+assassin's
+bursa
+bungalows
+balked
+conceptual
+carcasses
+arabia
+blueprint's
+affectingly
+consorting
+buses
+auger
+appointed
+brute's
+bosoms
+anyway
+arrowed
+anaphorically
+clarify
+approachability
+assistance
+buzzes
+commonplace
+bluebonnet's
+adroitness
+availers
+aquifers
+architecture's
+action's
+backgrounds
+abduct
+attired
+briber
+admissibility
+cease
+beck
+auctioneers
+birdbath's
+atomic
+crossing
+considerate
+biconvex
+bulge
+bedridden
+arising
+aggression's
+cherish
+bureaucratic
+abater
+amputating
+atop
+climber
+clutched
+afford
+bisections
+bonnets
+commendations
+bloke
+abundant
+clamp
+aloes
+aboard
+atheistic
+advantageously
+buffs
+chimney's
+cheerily
+benefactor
+ample
+bushwhacked
+captain
+buckskins
+contextually
+antiquarian's
+browns
+bubble
+ban's
+brine
+acculturates
+anhydrously
+beaver's
+advantaged
+bibliographic
+clasping
+clattering
+coerce
+colorado
+airmen
+bandlimiting
+balks
+boners
+attached
+chosen
+convened
+bordello
+composer
+botanist
+backtracks
+civilization
+commutativity
+bloodshed
+cohere
+bunkhouse
+archdiocese
+boycotted
+crosswords
+bedspread's
+anteaters
+cove
+apothecary
+chute's
+addressee
+climatically
+blower
+bane
+cask's
+beetling
+ambiguities
+before
+abstain
+arachnids
+bucket's
+amateurs
+blackouts
+adverb
+butchery
+conjunction's
+barricade
+audiologists
+aphorism
+complete
+butts
+bishops
+allotment's
+confusingly
+channeller's
+blanches
+bragging
+bathe
+comedians
+celestial
+citizens
+couple
+backpack
+aphasic
+brothels
+axles
+cancellations
+bonus's
+consolidates
+authoritative
+axle's
+acclimatization
+carolinas
+chime's
+antibiotic
+bisons
+biographically
+achieve
+bleachers
+bicentennial
+behavioral
+accomplish
+concealment
+biddies
+antitoxins
+arriving
+apprehend
+affluent
+cliffs
+bleached
+astronomers
+connection
+bride
+backs
+bog's
+casket's
+continual
+ampere
+cat
+alternator
+cotton
+athletes
+communicant's
+best
+befuddling
+benefactors
+appease
+annoyingly
+context
+astonished
+cracked
+amnesty
+autumn's
+binder
+babying
+contributory
+assumption
+cowls
+cocks
+airless
+consummated
+atypically
+beneficially
+chairing
+accusative
+commanded
+bufferrer's
+alerter
+arbiter
+civilly
+charms
+backscattering
+cheater
+bushes
+caverns
+chieftain
+calf
+comparing
+aurora
+butyl
+cower
+bemoans
+baptistry
+carpenter's
+capes
+bordered
+arrows
+blocker
+crest
+appeal
+arabic
+conventions
+axis
+brains
+bookkeeper's
+circle
+cooks
+circumlocutions
+adventists
+barringer
+affording
+anatomically
+basements
+barbarities
+configuration's
+contributes
+collaborating
+beach
+comet's
+bakes
+assigns
+ballerina
+cheapens
+clinging
+conquered
+bisecting
+closenesses
+bugle
+boatmen
+beatings
+complicator
+bight's
+banister's
+archaic
+anthropologists
+clams
+beginners
+committee's
+communicants
+alone
+bounteously
+bastes
+ascertain
+alphabetical
+bringing
+batters
+amazon's
+constituent
+benders
+being
+constitutionally
+audiometric
+blast
+copings
+bailiffs
+colts
+coolies
+airlift's
+boomerang
+bifocal
+clothes
+cashiers
+congenially
+billows
+boilerplate
+biochemistry
+betting
+brimmed
+complementers
+breading
+bragger
+adducting
+bisectors
+abrogates
+criticized
+comrade
+bucolic
+birthright
+blurs
+challenger
+complicated
+bluebonnet
+biscuit's
+classmates
+campus's
+boundary
+bedbug's
+adjustor's
+acre
+bicycling
+awe
+additions
+baiter
+authorizes
+beautify
+copier
+buffet
+belfries
+acquisitions
+brooch
+crickets
+caterpillars
+beefsteak
+complicating
+bedpost
+criminal
+celebrity's
+bookseller
+christened
+coerces
+clamors
+all
+boatyard's
+canoe's
+begin
+anaerobic
+bushing
+agreers
+concedes
+countermeasures
+beg
+agglutinin
+bunted
+ammonium
+aspiration's
+bathrobes
+changeable
+beached
+bestowal
+beaner
+catsup
+admires
+clockwise
+agile
+alarms
+ached
+chinks
+buffer's
+cartesian
+annunciate
+chanticleer
+avenue
+anchor
+alliterations
+blanking
+bargained
+breathtaking
+crime's
+assiduity
+argentina
+contiguously
+aqua
+bested
+borderlands
+appetite
+captive's
+bipolar
+conceal
+counters
+costumed
+arrestingly
+bunting
+blight
+champagne
+brusquely
+address
+bloodhounds
+associative
+creed
+arithmetical
+balustrade's
+belabors
+complementing
+checkout
+archivers
+badlands
+behaviors
+ampoules
+bridgehead's
+antiquarian
+clumsiness
+considerable
+apportions
+anglicans
+appealingly
+barfly's
+absorptions
+awards
+congregates
+cloister's
+armour
+avoid
+correctively
+chucks
+burps
+bums
+berry
+batches
+administration
+atones
+bishop's
+blonde's
+casualty's
+cores
+bodied
+alter
+assonance
+apprise
+antitoxin
+avariciously
+checkpoint's
+affirmative
+conjures
+angstrom
+aesthetically
+canyon
+binge
+crazed
+breastwork's
+aids
+boston
+conceits
+announcement's
+beechen
+accessory
+authorities
+constrained
+automation
+anaplasmosis
+commander
+commendation's
+belabor
+cornfields
+artemis
+asphalt
+contracted
+brochure
+crafted
+allegedly
+alien's
+auditory
+blowfish
+adducible
+confederations
+annuals
+britches
+acquaintance
+appallingly
+abounds
+burglarproof
+crossers
+bayous
+brisk
+authority's
+covetousness
+averse
+accomplished
+aromatic
+admiral
+bijective
+avenging
+bran
+boatyards
+beseeching
+challenging
+bares
+acts
+abductions
+compendium
+compulsion's
+calendar's
+clad
+blockage
+conventional
+craze
+cajoling
+acceptability
+bungalow
+buff
+cramps
+attackable
+calculator's
+asp
+braved
+colors
+balling
+contaminate
+crackling
+comes
+complimenters
+across
+astronomy
+aborigine
+bobwhite's
+autopilot's
+chattered
+appall
+autonavigator
+bashed
+acoustics
+beachhead's
+apartments
+convenience
+blackout's
+bands
+autonomously
+amounters
+centripetal
+achievable
+astringency
+attuned
+concatenating
+copyright
+coding
+assumption's
+anastomoses
+confiscate
+asking
+beneficial
+adhesions
+busboy
+bronzes
+audacity
+bruises
+crash
+beau's
+circuit's
+aborts
+baubles
+beliefs
+assuaged
+costed
+blinking
+characterized
+bowled
+block
+conquests
+confesses
+amusers
+ceiling
+berets
+berliner
+abstentions
+child
+authoritatively
+closeness
+bushel
+considered
+communicates
+cheerlessly
+autofluorescence
+aquarium
+affects
+appurtenances
+airbag
+approaches
+admonishments
+bets
+bounden
+courtly
+bodybuilder's
+campus
+brainstorm
+americans
+chairperson's
+botanical
+askew
+amazon
+bleed
+clime's
+cooperations
+commonness
+boatloads
+blinked
+courtyard
+adapted
+aforethought
+backwater
+burr
+cathode
+awaking
+buzzed
+bridgeable
+arrives
+adventuring
+beseech
+attrition
+copied
+colon
+client's
+bandstand's
+advice
+baptistries
+antithetical
+alcohol's
+contradicting
+ambidextrous
+belches
+category
+bluntness
+coupon's
+assimilations
+comfortable
+caller
+affliction's
+attends
+compactest
+baler
+beacon
+blind
+bleakness
+beseeches
+courts
+couch
+consequential
+adulterers
+craving
+biggest
+astray
+bigoted
+barfly
+charges
+ambiguity's
+commentary
+crankily
+cowerer
+carnival
+bachelor's
+bituminous
+continuance's
+calamities
+claws
+apiece
+century
+ascendancy
+charts
+animations
+aggression
+chickadee's
+carve
+confidence
+actor
+bubbled
+becalming
+convulsion
+chivalrous
+brightest
+centralized
+beautifies
+amateurishness
+birthrights
+alligator
+circumstantial
+constructors
+conceptions
+arranging
+cart
+cent
+ager
+congruence
+carrot
+chariots
+cloudier
+captivity
+conquerers
+compartmentalizes
+condensing
+celebrities
+chalks
+accordance
+chilled
+conversations
+apples
+conceiving
+average
+blessed
+creator
+ant
+cling
+annoyer
+aviation
+cohesively
+correspondences
+boor's
+apprehended
+bessel
+both
+characterizes
+bards
+cots
+acculturating
+cemeteries
+carting
+alcohols
+bitterest
+ascetic's
+conducts
+caking
+airspace
+autocrats
+ashes
+chimes
+broadcaster
+commuter
+basket
+borderland's
+broadened
+boyish
+allegretto's
+ban
+bidder
+christen
+blessings
+bury
+arranged
+choir's
+apathetic
+boring
+aryan
+appearing
+binds
+cooperates
+bounces
+airspeed
+complicators
+adapting
+babbled
+agglomerates
+bedraggled
+addictions
+bolt
+calmly
+blur's
+boatload's
+anesthetic
+bugs
+colt
+completing
+boxer
+billers
+affronting
+absurdity's
+chides
+comparatively
+braided
+clipper's
+cot's
+calves
+articulations
+branchings
+attraction
+concatenates
+alligators
+cake
+boom
+crashing
+afar
+abler
+beamed
+adverse
+adrenaline
+agriculture
+beehives
+crankier
+courthouses
+advises
+consigns
+bisect
+azimuth's
+carpets
+arthropod
+brewery's
+commonalities
+altruist
+astride
+appreciate
+carved
+briefs
+admitter
+celery
+congregate
+clocking
+assassinated
+adding
+canvasser
+civics
+contemptuously
+calculates
+advisees
+bumbling
+algorithmically
+cloudy
+algebras
+addiction's
+cop's
+assurers
+confidently
+affector
+analyzers
+chimneys
+burdening
+antitrust
+admix
+avoidance
+choking
+coexists
+accustoms
+cellar
+anchovy
+constructor's
+confinements
+consequently
+accelerations
+accoutrement
+churchman
+biller
+affected
+brigades
+cremating
+corridor's
+bagging
+ah
+berating
+collective
+acuteness
+arrestors
+cab's
+border
+agitation
+animism
+arches
+alveolus
+cessation's
+averrer
+abash
+counterrevolution
+attesting
+animateness
+bawdy
+americana
+bloodstained
+applicator
+annotating
+annunciator
+clamored
+acting
+aerosols
+axiomatization's
+brags
+coalesces
+avocation
+combining
+crazily
+bravery
+burying
+adored
+airfield's
+accounting
+broadeners
+anise
+chimney
+added
+avenges
+bellicosity
+cranberries
+arsenic
+communities
+comparable
+bunkered
+architect
+alphabetically
+beautified
+apogees
+communist
+anatomical
+complexity
+accost
+autographing
+browsing
+ameliorate
+bookers
+bandaging
+clinical
+appellants
+counteract
+clairvoyantly
+bootstrap's
+canner
+boastful
+attainer
+ash
+beaded
+brake
+barest
+befriend
+burglarproofing
+allegorically
+bunts
+believes
+accession's
+buck
+boathouse's
+byword's
+anthracite
+accuse
+conjunction
+burping
+commandant's
+creativity
+affirming
+bark
+amuses
+balcony's
+auditors
+counsel
+clamber
+borates
+cowboy's
+bickered
+boors
+combing
+biting
+breeze
+crowder
+corn
+bloke's
+bombast
+bookstore
+blared
+bedlam
+carbohydrate
+coops
+bundles
+blistering
+antarctic
+anterior
+bilinear
+chocolate's
+context's
+alternating
+annoyance
+constancy
+ambivalently
+buddy
+brutalize
+bobbin
+alleles
+commotion
+attributes
+airborne
+creed's
+bolstering
+coaxed
+airframe
+breaker
+accept
+abashes
+attentional
+contributor
+comparability
+auscultating
+cocked
+computationally
+buffered
+career's
+analyzable
+absently
+courtyard's
+buildups
+apportioned
+balkanized
+annulling
+cremation
+buffetings
+conditional
+confided
+airliner
+bulldozer
+approaching
+anagram
+apollonian
+canaries
+bloat
+bluebird
+collision
+cool
+connectedness
+abasement
+artisan's
+avoidably
+clerks
+afflict
+briton
+corroborates
+cameras
+counted
+boldest
+burglars
+brutes
+brows
+abhorrent
+configuring
+averaged
+ace's
+buying
+abandon
+bayou
+cottons
+auditioning
+amplifies
+clippers
+brainstorm's
+alto
+brutalities
+bunch
+agricultural
+bursts
+blunting
+archer
+activity
+carefulness
+bedroom's
+concomitant
+balm's
+artificer
+barking
+breathy
+babies
+acacia
+bodies
+cap's
+criticised
+conversed
+crewed
+ascendant
+budgeting
+coroutine's
+charmed
+bellboy's
+conservatism
+butler
+acculturation
+conclusion's
+adapt
+cellist
+contempt
+adumbrates
+borrowed
+confounds
+allegiance's
+blabbermouths
+accrues
+captor
+coop
+baseballs
+cottages
+apartment's
+assertiveness
+assent
+artfully
+bagger's
+abolishment
+acetylene
+accessory's
+blackbird
+baptist's
+consist
+cavern
+buttock
+corporal's
+autoregressive
+bailiff's
+birds
+corder
+bracketing
+antlered
+barbiturates
+county's
+addicted
+agglutinated
+abashed
+competitively
+captains
+bloating
+accepts
+choose
+ashamed
+backyard's
+apiary
+contradiction
+balalaika's
+arctic
+broom
+anvils
+coffee's
+alliance's
+agitator's
+change
+adjusters
+cremates
+complexes
+bodyguard's
+burl
+antithyroid
+ambient
+airfoil
+apricots
+athleticism
+abjectly
+bankrupts
+answerers
+alternatively
+confronter
+breaking
+baronial
+cannibalized
+appetites
+breaded
+blackboard's
+battlegrounds
+cosine
+barrenness
+abbreviation
+budging
+boolean
+acrobatics
+again
+ashtrays
+clashed
+contingent's
+compulsion
+bedazzled
+collapsing
+comparison's
+businesses
+compassionately
+achievement
+buffering
+candlesticks
+austerely
+awls
+associate
+absolved
+annexed
+airway
+clipping
+counselors
+conscience
+attempters
+constructing
+biases
+cautioners
+comma's
+cosines
+char
+auscultates
+afire
+comely
+amity
+beverage's
+anew
+ballplayer's
+adulterated
+authorship
+alterers
+burdened
+attributive
+afflictions
+blinded
+barrier's
+attachment
+brotherhood
+bridegroom
+atoms
+cobweb's
+copes
+controversies
+complexion
+crawling
+atomized
+adjust
+accuracies
+concern
+cinders
+authorization
+appraisingly
+bladder's
+cooked
+cowers
+batter
+commissioner
+close
+burglar's
+allocated
+anvil
+aftershock
+abrogating
+chemistries
+advisable
+conduct
+committee
+blaring
+appalling
+braveness
+alertly
+artificialities
+brevet
+collision's
+arizona
+bower
+creamers
+awnings
+arsenals
+crane
+city
+contemplative
+catheters
+administrators
+attorney
+churned
+attractions
+columnation
+bobbed
+centipedes
+bostonian's
+apprises
+buries
+allege
+botulism
+adobe
+ambassador's
+covenants
+boon
+asynchronously
+bigness
+axial
+chaffing
+battleships
+ant's
+anthropological
+accent
+brushing
+brassy
+consumptions
+battleship
+absorb
+beckons
+brook
+connectors
+clinches
+accesses
+beaters
+archaicness
+bursitis
+chided
+bomb
+assimilated
+addicts
+convening
+arianists
+counting
+altar's
+confusions
+attachment's
+clipping's
+amazing
+corset
+bossed
+attach
+commandingly
+animatedly
+allegations
+assuages
+annulment
+compress
+aptitude
+absurdities
+autobiographic
+aspect's
+concentrator
+burgesses
+anagrams
+bedeviled
+assemblers
+convinced
+commentary's
+agglomerated
+biological
+callousness
+axolotl's
+atmospheres
+authoritarian
+cancer's
+above
+charting
+aldermen
+battler
+cistern's
+bouncer
+amassed
+conquest
+altering
+arrogantly
+brokenly
+comparator
+counsellor's
+attenders
+cackle
+criticize
+authored
+ably
+believed
+compelling
+accepter
+cleansed
+afflicted
+backslash
+computed
+almighty
+attache
+braes
+carriage's
+benediction
+brigadier's
+contemporariness
+boomtown
+amplitudes
+breakwaters
+clod
+catch
+bar's
+activist
+caves
+assenting
+camp
+attainments
+brotherliness
+continuances
+appearance
+applicator's
+browbeats
+banjos
+addendum
+became
+adduces
+armadillo
+brothel
+almanac
+courageous
+assault
+chunk
+coaching
+atheist's
+blunted
+aperiodicity
+congresses
+boastfully
+burglarproofed
+broadest
+bashfulness
+affect
+acne
+bottleneck's
+criticisms
+corrupts
+colonized
+closeted
+canonicalizing
+auditorium
+antenna's
+awfully
+anti
+consumes
+agonize
+algebra's
+championing
+blush
+bugger
+antagonize
+beethoven
+blase
+boycotts
+compensatory
+bugged
+boroughs
+anatomic
+batons
+arguably
+affricates
+appreciations
+cavalry
+alumna's
+arcing
+backpacks
+braces
+contextual
+coupon
+chillingly
+allocates
+abuts
+contribution
+commodity
+admonishing
+coolly
+cabinet's
+collapsed
+confessions
+adjured
+capriciousness
+chastising
+babe
+aerodynamics
+accepting
+concept
+contour's
+consequentialities
+birthday
+bankrupted
+birthed
+benefit
+concentrations
+azalea
+channels
+chestnuts
+contenting
+antedate
+censors
+contagious
+abbot's
+channellers
+apt
+commend
+avocation's
+admonition's
+abolition
+confederation
+carried
+clumsy
+coincidences
+bumper
+burr's
+bugles
+bribers
+attainably
+consume
+comma
+creativeness
+accuser
+bombs
+abbey
+baffled
+aside
+clip's
+appeases
+compass
+bundling
+abstractionism
+confide
+creases
+apropos
+confronted
+corrective
+concurrencies
+autocratic
+alien
+attending
+antagonistic
+broadcast
+asymptote's
+belied
+breasts
+contrapositives
+coiner
+accordingly
+cohering
+computers
+cow
+bibs
+ancestral
+controller
+attacker
+alerts
+coconut
+agency
+alerted
+alcoholism
+ammoniac
+actinometers
+acquitter
+bud
+cessation
+alleging
+centralizes
+articulators
+council's
+carvings
+arduously
+blown
+anode's
+arrogate
+bisects
+centimeters
+burgeoning
+course
+appointee's
+ascribable
+communicate
+contrivance's
+adoptions
+attune
+acres
+abyss's
+corporal
+certifiers
+analyze
+augusta
+bestseller's
+checkpoint
+coexist
+attainers
+argon
+bearded
+crudeness
+averaging
+brick
+adducing
+annulment's
+chicks
+blocked
+cisterns
+afoul
+affiliates
+briskly
+adhesion
+ascertainable
+appeasement
+blueprints
+agreements
+blindfolds
+communicator
+characterization
+annoyances
+breeches
+brushed
+clinic
+competes
+chuckled
+cradled
+balmy
+antisubmarine
+alternate
+armpits
+barn's
+conjuncts
+adhere
+allows
+counteracted
+appetizer
+capturers
+cleanse
+avant
+abbe
+corpse's
+arduousness
+badge
+begets
+contemplated
+caveat
+copiously
+athena
+aggrieving
+alibi
+accumulation
+basket's
+aftershocks
+bass
+conjuncted
+chaps
+brunch
+colonials
+bibbed
+clusters
+antagonizing
+constituencies
+combings
+bearish
+continuously
+adequacy
+brow's
+catalog
+alderman
+comedic
+chemists
+concernedly
+conceded
+alarm
+arced
+buckle
+confidingly
+coherent
+closes
+buffoon
+brace
+adjustably
+crackers
+contamination
+burgess's
+aerobic
+constitutes
+baptismal
+broadness
+blimps
+concatenation
+claiming
+bard's
+aerosolize
+adjoins
+copies
+coats
+boggle
+corroborated
+concreteness
+bill
+cautions
+bantam
+bearably
+armchair's
+birthright's
+cravat's
+cone's
+courtiers
+asunder
+bulletin's
+biopsies
+alley
+contrive
+blasphemies
+amuser
+ballerinas
+blushed
+causticly
+brandy
+blinkers
+complimenting
+crimsoning
+angola
+apprehensiveness
+bolster
+columnate
+byproducts
+berths
+accusal
+chubby
+arrived
+camps
+blemish's
+anaconda
+cook
+airfoils
+atlantic
+boosted
+converge
+availer
+appalachians
+coffin's
+boarding
+alga
+crouch
+columnizing
+consul's
+chastises
+angling
+apple's
+billiard
+attentiveness
+adroit
+apprehensible
+cereal
+blouse's
+browning
+bodybuilder
+coaxing
+assertion's
+connective's
+commemorated
+accountability
+crooked
+blips
+chandeliers
+aristocracy
+bangs
+coke
+abutment
+community
+calculus
+congregated
+crepe
+compromised
+airlines
+contributing
+contingencies
+coordinated
+alginate
+batted
+contender
+alma
+antagonisms
+accompanied
+airport
+administrator's
+appraisal
+breadbox
+condemnation
+backlog's
+available
+consequents
+crooks
+commonwealths
+barring
+channeller
+crucially
+archaeological
+charming
+adventist
+credits
+appetizing
+breads
+clients
+climbing
+aloneness
+abstractness
+appearer
+astute
+clockers
+antagonizes
+agonized
+bastard's
+conjectured
+aqueducts
+aureole
+boatswains
+conjured
+chauffeur
+complementer
+behold
+bustards
+bivouac
+cluck
+anus
+bless
+catastrophic
+bounty's
+allowed
+answer
+concealers
+brainchild's
+coercion
+buzzword's
+bordellos
+appertain
+applier
+couriers
+aesthetic's
+craft
+capacitances
+capped
+coupler
+category's
+anvil's
+conquest's
+checksums
+clucking
+bronchus
+acrimonious
+changeably
+accenting
+argued
+conditioning
+brewing
+backwardness
+cascaded
+atomize
+contours
+arianist
+apart
+conflict
+carefully
+banshee's
+conveys
+arbitrates
+amphitheater's
+amen
+alimony
+bound
+buzz
+courtroom
+apparently
+coalescing
+circulating
+amounter
+bypasses
+breadth
+choral
+completion
+arisen
+anticipating
+bilges
+contractions
+bedspring
+commune
+blacklisted
+beagle
+alkaline
+atolls
+carelessly
+blimp
+corking
+brevity
+alterable
+canada
+bear
+bluntly
+cartridges
+connoted
+countries
+corroborate
+consecration
+corrupted
+appreciating
+combatant's
+alkalis
+affecting
+blues
+casserole
+ballad
+bewitches
+common
+as
+because
+bathroom's
+anchorages
+beguile
+connect
+convenience's
+counteracting
+assorted
+care
+contains
+centimeter
+ancestors
+briefings
+busses
+churchyards
+breakable
+amortizing
+courthouse's
+click
+courses
+ajar
+county
+covet
+confidences
+capitalizer
+agog
+backtracking
+copious
+bestsellers
+chilliness
+bringer
+browse
+centipede
+bawled
+bricklayer
+breath
+assailants
+abysses
+command's
+characterizer
+calculating
+america's
+aurally
+contain
+alias
+commentators
+confounded
+appending
+accidents
+chatters
+coordinates
+bleeder
+blueness
+badger
+bolsters
+astounding
+capitalist's
+conservation's
+commences
+aimed
+bun
+comparators
+competition
+bauble
+backbend's
+bled
+assassinate
+chop
+anemometer's
+cobbler
+coldness
+audiometry
+affinity's
+amalgamates
+cowardly
+consolidating
+beads
+brackish
+bookings
+accuses
+bog
+compartmentalizing
+clutching
+calming
+collars
+clambers
+banqueting
+beaked
+authoring
+correspondence
+apostrophes
+affirmation's
+bespeak
+costing
+brought
+complainer
+battalions
+asymmetry
+boathouse
+canyon's
+awarded
+amplitude
+anarchical
+anticipatory
+bolder
+cooperatives
+caterer
+adviser
+balkanizing
+augur
+cannibal's
+balustrades
+attaching
+collector's
+commercials
+capaciously
+coincidence's
+bumps
+ascot
+bale
+blackmail
+baby
+aftereffect
+bloomers
+buttresses
+avenues
+climaxes
+aqueduct
+cater
+brainchild
+avail
+bypassed
+bowl
+california
+cements
+boxes
+brained
+bedevils
+captors
+acuity
+ascends
+breakthrough's
+assigner
+caner
+bequests
+ceilings
+axers
+bookshelf
+autistic
+celebrations
+axons
+chiding
+asterisk
+allophonic
+blindingly
+cherubim
+boaster
+confining
+anxious
+clowning
+advisement
+approach
+anesthetic's
+crescent
+alertedly
+birdbath
+beardless
+bras
+auspices
+choosers
+approval's
+afflicts
+corrosion
+arpeggio's
+bodyweight
+cranky
+battlefront
+affirmation
+churchyard's
+aeroacoustic
+anders
+adjustment
+baneful
+citation's
+acetone
+blend
+binuclear
+boner
+annotation
+announce
+claimable
+contemporary
+clothing
+acquitting
+choosing
+attacher
+bananas
+binaural
+arrestor's
+aches
+conclude
+collaborators
+await
+blaspheme
+bequeaths
+crows
+balconies
+begging
+conducting
+abstracts
+assignee's
+causations
+approximation
+articulated
+considerably
+apricot's
+afferent
+assertively
+bonding
+calms
+cranberry's
+cost
+captaining
+agenda
+corridors
+complaint
+christens
+aggravate
+countess
+arbitrators
+ascribing
+breech's
+bellwether's
+burglarized
+confinement's
+animating
+adjectives
+cannister's
+bemoan
+cleanest
+acme
+cheapest
+activities
+allophone
+boy
+belaboring
+captions
+compactor's
+actuator's
+befouling
+arachnid's
+computerizes
+compile
+absorption
+bridled
+absorber
+convicts
+birch
+alkaloid's
+cannot
+bacilli
+charitableness
+abated
+ceaseless
+beavers
+bookshelves
+commensurate
+appreciates
+basil
+cartoons
+aides
+buxom
+cages
+cantor's
+acceptances
+antiquated
+amalgamate
+babyhood
+beers
+conforms
+bouquets
+canner's
+baste
+cashed
+argue
+butcher
+backbones
+absolve
+crib's
+cafes
+abstracted
+book
+committees
+authentically
+conference
+antisera
+bourgeoisie
+attribute
+biddy
+autobiographies
+chivalrousness
+coverlet
+ambiguously
+calorie
+anhydrous
+alignments
+around
+archfool
+advance
+bedpost's
+affective
+contained
+amain
+bromides
+clogs
+bricker
+arduous
+consistent
+amidst
+confess
+complain
+anniversaries
+coasting
+cobwebs
+aries
+benchmark
+aviaries
+bombard
+boxers
+ashtray's
+assyriology
+blaze
+ablative
+chaos
+burro
+arguer
+ashamedly
+crier
+allocator's
+aggressively
+carts
+advisory
+airship
+alkali's
+backup
+chaining
+continue
+cartoon
+circumference
+breadwinners
+autonomy
+banking
+armored
+cabin
+chunks
+antigens
+blistered
+airers
+breakaway
+belief's
+belays
+coveting
+auburn
+careful
+anybody
+bumbled
+cautious
+adopter
+ballplayers
+anteater
+citadel's
+avails
+agent's
+caliphs
+bridgehead
+already
+caterpillar's
+coachman
+centralizing
+alphabet
+concede
+barbell
+breadboard
+ballast's
+activators
+attendance
+blandly
+calculator
+codeword
+addressee's
+avenue's
+alcoves
+alternately
+admonishes
+concentrate
+crossbars
+adjoining
+basset
+carbons
+beast
+blonde
+castle
+clarification
+bitch's
+abrasion's
+books
+amputate
+bicycler
+aphonic
+arraigns
+acquiesce
+buster
+chaperon
+advisements
+buyer's
+attack
+birthdays
+blazed
+confuser
+crag
+ballet
+airports
+bison
+counterexamples
+arteriole
+colony's
+adamantly
+blunders
+chivalrously
+adult's
+authors
+amplifiers
+counterfeited
+complicity
+astrophysical
+axolotl
+bash
+battleground
+butterfly's
+axioms
+allegory
+blitzes
+blindfold
+bufferrers
+approximating
+byways
+computations
+alight
+avoiding
+assurance's
+barrages
+canonicalized
+callously
+auditing
+authenticating
+bag's
+asters
+artistic
+bonanzas
+applaud
+certainties
+auto's
+concession's
+cascade
+chubbiness
+churchyard
+afternoons
+antigen's
+baron's
+amphibian
+banister
+capitalize
+approval
+appropriated
+bureaucrat's
+covets
+cloisters
+circulate
+bivalve's
+beta
+collector
+among
+cane
+birdlike
+attenuating
+conjunctions
+appliance's
+coral
+crucify
+abnormal
+combined
+classroom
+buckskin
+commissions
+abolishments
+arching
+croak
+americium
+associates
+car's
+assuringly
+agreer
+anticoagulation
+closure's
+corkers
+attend
+alphabet's
+awakening
+composedly
+attracted
+construed
+cricket's
+applicability
+autonavigator's
+chloroplast's
+ashen
+beggars
+corporation
+another
+conflicts
+bootlegs
+archeologist
+alcove's
+agitates
+cargoes
+creditor
+cops
+advisably
+coronation
+bourgeois
+crochets
+cropper's
+cramp's
+adulterer's
+corroborations
+changing
+combinatorics
+calm
+comprehensible
+blooms
+coolness
+copying
+blacksmiths
+commodore
+compulsions
+clump
+afterward
+crucified
+brooder
+buckets
+accelerating
+accented
+boat
+adventitious
+baseline's
+courier
+calamity's
+atoll's
+brutalizes
+bundled
+chairperson
+cheeses
+continuation
+celebrating
+apologists
+behest
+bumpers
+consonants
+circulation
+betraying
+commuting
+breezily
+circumstance
+coughing
+benefiting
+conquerors
+chemically
+commencement
+adjustors
+angel
+congratulate
+conspired
+causally
+bud's
+conquers
+augmented
+bereaving
+advisor
+articulation
+angler
+admission
+bide
+competitors
+amusement's
+collecting
+adder
+arithmetized
+cheek's
+apostrophe
+blockages
+clockwork
+bubbly
+apricot
+adjudicated
+banter
+amused
+breacher
+bracketed
+aimer
+comprehending
+bunkers
+canton
+arcane
+absent
+capitol
+consequence
+cognitive
+abjuring
+clever
+coronet
+anathema
+artichoke
+controls
+credulous
+acid
+crawled
+coupled
+boomtowns
+aspen
+acted
+anyhow
+burdensome
+backdrop's
+apocalyptic
+cornerstone's
+cautiously
+blisters
+conveniences
+arbor's
+accessories
+alleges
+clubs
+accompaniment
+blazes
+annually
+clique's
+beamers
+ballgown
+autumnal
+acreage
+conjunct
+balances
+consoling
+canvas's
+competent
+aggrieves
+although
+afraid
+clearly
+cognizance
+acoustic
+colleague
+causing
+absences
+closers
+airs
+cinder
+adversaries
+altruistic
+brews
+ceremonially
+appraisal's
+commissioners
+army's
+assists
+acceptor
+comparison
+cooling
+conveniently
+couching
+changes
+clinic's
+confronting
+adjunct's
+blandness
+alternates
+bunter
+consequent
+clean
+autos
+accumulators
+carver
+aprons
+awful
+bobbins
+blasphemy
+assuming
+abscess
+assemble
+cabinet
+atomics
+blacklists
+audacious
+assay
+anthropology
+barnstorm
+awl
+bumping
+assembles
+capture
+compensates
+coverable
+amend
+array
+continually
+absented
+cigarette
+antiresonance
+backspace
+branched
+appellate
+courtroom's
+alienated
+austerity
+cement
+asked
+antelopes
+cottager
+bluebonnets
+booze
+amendment's
+backslashes
+begun
+bijections
+cafe's
+boatload
+collect
+appeals
+belittles
+befit's
+beauty
+arrogated
+academia
+contagion
+blemishes
+coverlet's
+comfortability
+antecedent
+controllably
+congressman
+complicate
+coincide
+arrears
+clumped
+credited
+buffoon's
+catholic
+accompanist
+beauty's
+aster's
+blatantly
+bothering
+bewilder
+canceling
+carbonizer
+accentuation
+backstairs
+anticipations
+bestowed
+civilian
+blooming
+blunts
+airlocks
+argo
+blueprint
+aristocrat
+cakes
+complements
+ale
+camping
+army
+adrift
+bengali
+barely
+blasphemes
+briefcase
+brooches
+ailments
+blazers
+crevice's
+bankrupt
+archiver
+articulator
+alphabets
+bonds
+colliding
+candidate
+cashier's
+bellwethers
+airstrip
+announcers
+calendars
+corrupter
+aqueduct's
+axiom
+bathing
+blusters
+ascribed
+admittedly
+angrily
+analytical
+contraption
+convertibility
+abysmal
+cathedral's
+aversion's
+algol
+articulately
+breveted
+bickers
+chatterer
+adoptive
+bijectively
+cloudiest
+coarseness
+carted
+cocktail's
+capacious
+anion
+buffoons
+bleeding
+bedrock
+adventurer
+compositions
+camouflages
+brittle
+chip's
+aloe
+chorus
+cargo
+critical
+biographer's
+abject
+blasphemousness
+charmer
+betray
+blacking
+awoke
+allele
+bags
+claimant
+clover
+biographies
+confound
+advertises
+crafter
+cripples
+bygone
+concentric
+couldn't
+contentions
+acrid
+costume
+aft
+aesthetic
+bandits
+adducts
+constellations
+coffer's
+created
+commercial
+art's
+cookie's
+ammonia
+adjunct
+articulateness
+congratulated
+crags
+brandishes
+annual
+byword
+affection's
+college's
+aboriginal
+bikini
+buttering
+allotter
+console
+advent
+activates
+beverage
+april
+acceptable
+barrel's
+boys
+attractor
+azimuth
+critics
+ballooner
+aren't
+adulterating
+criticise
+abeyance
+automatically
+collaborative
+capabilities
+crawls
+anomaly's
+climaxed
+animately
+aroma
+belie
+attires
+argumentation
+baseboard
+bluebirds
+cactus
+byproduct
+balancer
+beholder
+conservationist's
+betrayer
+agony
+accusingly
+convict
+coaxes
+breeds
+agitated
+championship
+brevets
+auscultate
+counselling
+cornerstones
+america
+canoes
+aspirator
+compensate
+antiseptic
+bereave
+absinthe
+compose
+collide
+alabamian
+candid
+civilized
+clamps
+authoritarianism
+colonist
+bugging
+bins
+abashing
+battlers
+canning
+berate
+assembler
+amateurish
+boasted
+angriest
+bluffs
+colonize
+balcony
+bleat
+bustard's
+attenuate
+contagiously
+bicep
+babel
+beatniks
+brush
+analogy's
+audiologist
+assessment's
+camera
+arbitrary
+alleyway's
+concession
+constructions
+accompanies
+accretion's
+aroused
+charcoaled
+belated
+bottom
+bloodshot
+bisques
+advocate
+arabs
+cathodes
+adamant
+challenge
+absurdly
+abolitionist
+cleavers
+bludgeons
+bassinet
+clause
+coiling
+cask
+boob
+azalea's
+afghanistan
+carriages
+blade's
+bobby
+asinine
+acclaiming
+absorbed
+blacken
+cheating
+bootleg
+anonymous
+addict
+astonishes
+awry
+adequate
+categorization
+casks
+blaster
+aspirants
+abscesses
+airing
+assumptions
+capitalists
+board
+asynchronism
+body
+aye
+contraction
+athens
+arsine
+cohabitations
+below
+bows
+aviator's
+ampoule
+connective
+adapter
+authenticate
+blackboard
+brilliant
+appoints
+attics
+conquer
+boning
+comestible
+camped
+blonds
+aisle
+coals
+billboards
+characterizers
+crow
+clout
+admirer
+actuarially
+abstruse
+accessing
+bonfires
+clenched
+characteristic
+catching
+chars
+canons
+barrier
+championed
+butterflies
+completely
+calendar
+artwork
+abjections
+burgher's
+correlates
+arrivals
+accepters
+circuses
+breadboards
+accomplishment
+analyzed
+appropriates
+cancel
+bordering
+aperture
+civilizing
+assortments
+blackest
+blitz's
+copy
+commenced
+admirers
+cheers
+croppers
+cliff's
+circumstance's
+bibles
+buttressed
+consecutively
+birefringence
+automaton
+cheerless
+chopping
+ballooned
+convent
+acknowledgers
+appointing
+belies
+comeliness
+bangle's
+communication
+bisector
+avocations
+clique
+brainstem
+campusses
+allocators
+bramble's
+assaults
+commemorate
+appendix
+agent
+apportioning
+bottled
+artifact's
+block's
+archery
+bagatelles
+candies
+catched
+cognitively
+creepers
+concentrated
+bout
+balustrade
+abodes
+carrying
+confirming
+cannibal
+chinners
+carbonate
+anguish
+butt
+colons
+ablated
+corporation's
+cock
+convincers
+beret's
+bluish
+compressive
+authenticates
+commemorative
+bureaucracies
+coinage
+coach
+assigning
+concentrators
+capitalizing
+appraisals
+belaying
+candy
+blossomed
+bricks
+atonal
+analogue
+caters
+barbaric
+applique
+clink
+audio
+actress
+assyrian
+apprehension
+conversation
+apsis
+bedevil
+comics
+affricate
+comings
+buttress
+angering
+buckboards
+bombed
+adversely
+adequacies
+commended
+causeways
+adherers
+codes
+aquaria
+ape
+bulks
+compactly
+brainwashes
+bleats
+commandants
+conditionally
+adjourns
+clobbering
+allowances
+buildings
+complemented
+blanker
+algeria
+brief
+creak
+adductor
+categorizer
+approacher
+argument's
+clocked
+bedazzle
+cause
+coordinator
+buildup
+countenance
+abhorrer
+backtracked
+bogus
+closer
+broilers
+chirps
+adjournment
+belles
+bitingly
+befogged
+contexts
+amorous
+breeding
+abortions
+blockage's
+alternatives
+bouncing
+beryl
+ballistics
+banters
+carpenters
+auction
+bowdlerizing
+brazen
+bonuses
+circulated
+adultery
+archival
+bears
+baptized
+burglaries
+borrowing
+barbarous
+casher
+adolescents
+atrophic
+busily
+aerating
+coatings
+athenians
+casing
+consuming
+alphanumeric
+beaches
+bisection's
+conjecturing
+aspirate
+biography's
+accompany
+bureaucrat
+broomstick's
+colony
+coalesce
+clock
+bequeath
+collaborates
+belonging
+configured
+burlesques
+anode
+consenter
+bug
+counterpoint
+counts
+bangladesh
+analogical
+accident
+bulky
+affinities
+abysmally
+boorish
+assiduously
+cannisters
+autocollimator
+bassinet's
+barrelling
+blurts
+carbonize
+candle
+act
+addressees
+constraints
+boast
+complaining
+coziness
+avocado
+coolest
+blank
+beadles
+anytime
+covetous
+appellant's
+angers
+academies
+ageless
+chased
+constitution
+consonant's
+boosting
+ascetics
+aerosol
+apse
+blushes
+clang
+confers
+confidentiality
+coolie
+colon's
+chickadees
+badminton
+argonaut
+constituting
+aloha
+contracts
+broomstick
+brackets
+attendant's
+connection's
+conciseness
+abstractor's
+composes
+chaste
+assures
+conjuring
+barbital
+bunion
+bases
+clowns
+barrelled
+audience
+auctioneer
+complexly
+aviator
+conjectures
+backscatters
+cheerfulness
+communicating
+agreement
+bricklayers
+bilabial
+abstruseness
+cobol
+cooperating
+admit
+blundering
+accelerates
+assaulted
+concealing
+anachronism
+bowels
+butane
+anniversary's
+converts
+convoyed
+climates
+barriers
+clubbing
+additives
+bask
+confessing
+caravan
+colonizes
+continuous
+cheerlessness
+boggled
+armpit's
+bridgework
+allegro
+cricket
+cannon
+adoption
+clanging
+auscultations
+billowed
+alphabetize
+airlift
+appointee
+boyfriend
+chaotic
+corrections
+bonus
+contrasted
+convulsion's
+confessors
+adumbrating
+autocrat's
+coronary
+authentic
+barley
+brawling
+aegis
+appends
+bolshevism
+charted
+applicant
+aileron
+considers
+chin's
+alkyl
+amendment
+boulevard's
+avian
+breather
+canyons
+cannon's
+apportion
+badgered
+augers
+advisers
+censuses
+beveling
+aught
+arthogram
+anonymity
+appliance
+atmospheric
+anesthetizing
+ambulances
+blustering
+burnt
+chestnut's
+collects
+aliment
+anxieties
+championship's
+channeled
+arrival
+amassing
+corpse
+bedtime
+blackbirds
+cats
+constants
+chemistry
+brewery
+brother's
+boasts
+accentual
+bellwether
+bely
+courted
+baroness
+configure
+collection
+aviary
+achieves
+belfry's
+beech
+baseman
+bacterial
+contestable
+blond
+contracting
+comparably
+consultation's
+booster
+conspiracies
+belief
+candidate's
+boardinghouses
+connectivity
+check
+crazy
+collided
+assistant's
+critic
+bilateral
+cheapening
+appalled
+autopsy
+balled
+abnormally
+acquires
+aloofness
+backwaters
+combative
+computerizing
+craters
+contributorily
+behaved
+comers
+axiomatizations
+analogously
+banjo's
+cleanser
+capitalizes
+chamberlain
+aggregates
+amenorrhea
+begins
+condone
+cleaved
+bustard
+adsorb
+airedale
+bridles
+audited
+could
+amour
+checkbooks
+admiring
+arrested
+commerce
+asbestos
+can's
+clamping
+bathers
+acknowledgments
+census
+acrobat
+bargains
+apogee
+creaking
+busboy's
+additional
+chants
+circumvents
+afloat
+anyplace
+alumnae
+anions
+classroom's
+ballerina's
+convents
+angered
+climbers
+citation
+cools
+clamor
+capaciousness
+beatific
+abrades
+advocating
+coverings
+claims
+brethren
+advertised
+atrophies
+coffer
+beagle's
+brazenly
+bitterly
+clergyman
+braiding
+compressible
+convicting
+agreeableness
+antithesis
+cogently
+botanist's
+bidirectional
+bewilders
+airlock
+costumer
+blamelessness
+agglutinins
+catalyst's
+allocation
+annunciates
+borderings
+accomplishes
+confronters
+clinically
+breadbox's
+canvassed
+communicative
+coercing
+backpointer's
+bramble
+congregations
+crave
+courtesy's
+cocoon's
+admitting
+chieftains
+acclimate
+consequences
+cones
+contradict
+axolotls
+contractual
+artist
+atrociously
+consecutive
+berated
+bluing
+attacks
+choruses
+blatant
+balance
+amplifier
+assist
+analyst's
+ambler
+conveyance
+compromising
+baffler
+corridor
+bed's
+condoned
+boulevard
+anomie
+averages
+basics
+apologia
+cabbages
+concretes
+alcoholic
+aliased
+chocks
+balsam
+collies
+censor
+arouses
+conundrum's
+academically
+bent
+codings
+coastal
+allots
+acclaim
+citations
+cantor
+circularly
+boarder
+caribou
+biologist's
+cowling
+connects
+chasing
+bootstrap
+backscatter
+abstractly
+corrupt
+alleviating
+biasing
+abrade
+arraignment
+beaten
+blanketing
+compactness
+adage
+coincided
+borate
+bra's
+concepts
+bootleger
+christian
+argos
+basal
+abate
+campuses
+abridging
+confusers
+cabin's
+audition's
+amphibians
+attractively
+adhesive's
+ascendency
+beforehand
+ache
+brokers
+bowler
+criminally
+american's
+chock's
+artillerist
+appropriation
+characterization's
+artifices
+annoys
+constituents
+bottle
+beaned
+consisting
+beholding
+ceremony
+carpeted
+absolutely
+anorexia
+accredited
+azaleas
+amaze
+commit
+afflicting
+contriving
+adventure
+blood
+blabbing
+absoluteness
+appreciable
+approachers
+bumptious
+behavioristic
+anticipates
+adults
+barnyard's
+banging
+banana
+bilge's
+aware
+coheres
+bronchi
+commissioned
+arrogation
+confines
+core
+attenuation
+afterwards
+clearing
+applies
+alphabetized
+cemetery's
+campaigning
+abolishes
+brig
+cheer
+combers
+backtracker
+clinker
+clouds
+clog
+berries
+advising
+childish
+clobbered
+bride's
+astrophysics
+canker
+concatenate
+bite
+chagrin
+bodybuilders
+calamity
+admiralty
+councillors
+competitive
+assessments
+copper's
+cabling
+casket
+conducted
+backplane
+boyfriends
+bingo
+broader
+confiscates
+communicated
+baton
+cocktails
+albanians
+boardinghouse's
+brats
+akimbo
+categorizers
+comparator's
+blackbird's
+accidentally
+companion's
+clippings
+accosted
+bell's
+burly
+aggregations
+boathouses
+airmails
+abreactions
+changers
+carbon
+cleaners
+bookkeeping
+correlations
+backer
+conclusions
+brainstem's
+anecdotes
+chateau
+cogitating
+amphibious
+compounded
+completeness
+comptroller's
+boatswain's
+bolstered
+acquiescing
+actors
+calorie's
+adaptability
+abstractor
+bimolecular
+belly's
+automobile
+automotive
+analyticities
+awesome
+colonizer
+approximated
+chemist
+coronet's
+classmate
+anteater's
+altars
+adulthood
+amid
+assails
+blizzards
+corroborative
+biographer
+compartment
+blooded
+bipartisan
+bluff
+aloof
+bronchiole
+clincher
+congratulations
+ablation
+caught
+collier
+chooses
+antidotes
+artery
+clearance
+civility
+basketball
+auscultated
+behaviorally
+crowning
+autobiographical
+cheaply
+brutally
+agonizing
+clerk
+comprising
+baller
+confuses
+acquiesced
+astonishingly
+birthplace
+covered
+chopper
+combinator
+benignly
+bedside
+blasts
+billboard
+appraise
+aboveground
+comforter
+credulousness
+battlefield
+barefoot
+cleverness
+apparatus
+bartering
+bromine
+aerodynamic
+crabs
+chains
+airflow
+allegrettos
+armchairs
+blacklist
+approvals
+bait
+collections
+antecedent's
+airbags
+casted
+content
+conferrer's
+crouching
+coughs
+canal
+amphetamine
+augustly
+bedraggle
+arithmetic
+cataloger
+alluding
+credulity
+coffees
+crueler
+beautifully
+caresses
+correlative
+consul
+criticizing
+couched
+baths
+alchemy
+bargain
+accomplishments
+conveyer
+benevolence
+broil
+chilling
+axed
+attire
+collisions
+categorizes
+cited
+aeration
+accommodating
+coordinations
+boxcar
+cattle
+bullion
+afternoon's
+captures
+afghans
+comets
+component's
+ark
+bounds
+adjusting
+bravely
+capability
+chap
+absolving
+aspirating
+arcs
+conspires
+collaborated
+admonishment
+astounds
+brasses
+compromise
+changed
+consumers
+connoting
+buttonholes
+cordial
+anionic
+chastisers
+archive
+alleviate
+burglarize
+acquainted
+copiers
+cashers
+antisocial
+creations
+bookie's
+censure
+beadle's
+banded
+circled
+bulged
+cheapness
+attorney's
+chewer
+bookshelf's
+councillor
+assertion
+broom's
+contemplations
+club's
+balkans
+cherubs
+alas
+chair
+apologizes
+compartments
+beyond
+aptly
+censured
+allegros
+boosts
+card
+arithmetizes
+attainment's
+arrester
+anding
+asker
+compatibilities
+confidentially
+commissioning
+cleaner
+aversion
+cooperative
+battalion's
+cemented
+charity's
+conceited
+capable
+anymore
+computing
+aping
+chiefly
+affair
+beaners
+allying
+caption's
+antipathy
+causal
+abyss
+botchers
+burglarizing
+confidant's
+activator
+continent's
+census's
+brat's
+antagonism
+bedspring's
+antiserum
+charge
+connector's
+alike
+believable
+belfry
+cast's
+bureaus
+beneficiary
+abolisher
+artichoke's
+broadly
+concurrent
+alteration
+bookies
+crafts
+bays
+ass
+bouquet's
+ave
+chords
+crazes
+anemic
+appoint
+beets
+billing
+contest
+assassination
+allot
+brindled
+acute
+absolves
+adsorbed
+auxiliaries
+belatedly
+businesslike
+assassinates
+bookkeepers
+bevel
+adders
+automate
+archangels
+breakfasted
+changeability
+contested
+cradles
+combatants
+besieging
+certainty
+attempts
+bankrupting
+compiler's
+complications
+banquets
+ancestor's
+ail
+abbreviating
+compacter
+approvers
+acknowledges
+comically
+almonds
+counsellors
+calmness
+assailed
+crane's
+baser
+big
+corruption
+circuitry
+briefness
+community's
+banquetings
+alms
+bass's
+bellowing
+adoption's
+blockading
+compellingly
+builders
+befallen
+bombproof
+cartons
+chore
+crimson
+anther
+clucks
+assemblies
+beatitudes
+aspiration
+compels
+angst
+balancing
+bowstrings
+bayonet's
+butte
+biomedical
+casualness
+accolade
+blackberry's
+bunched
+affright
+clung
+burlesque
+bare
+corrected
+arbitrate
+cropping
+coherently
+bloodhound
+circularity
+courtesies
+articulating
+concluded
+analogy
+brutalized
+airmail
+cooperator
+cousins
+centralization
+bibbing
+beside
+bravo
+abductors
+cars
+bovines
+bump
+absconding
+chins
+chasers
+boundary's
+antecedents
+awed
+counselled
+aback
+attenuator's
+blazer
+bettered
+awaken
+abreast
+beagles
+artisans
+buckled
+credence
+control's
+bewhiskered
+calloused
+breathe
+collaring
+blossoms
+bring
+actualities
+bivalves
+animals
+cowboys
+constituency
+affordable
+acrobatic
+attiring
+boatswain
+concurrence
+abrasions
+babel's
+cowerers
+chiffon
+bostonian
+criterion
+blinds
+cased
+affections
+conditioners
+clutter
+accrued
+attractors
+botcher
+compunction
+bludgeoned
+censored
+allah's
+chronic
+burrs
+commodity's
+appraiser
+asserters
+cheaters
+besting
+anchorite
+combine
+afforded
+cigarette's
+bathrooms
+apostles
+chloroplast
+bootlegging
+bibliographical
+beans
+bylaw
+benefited
+brochure's
+cordially
+brashly
+beastly
+bologna
+alderman's
+burning
+billow
+convert
+buffaloes
+comparatives
+assistances
+camouflaged
+announcement
+bobwhite
+brawl
+adducted
+cavern's
+affectation's
+bandying
+brunette
+architect's
+aphorisms
+cremate
+bray
+billed
+conception
+battlefield's
+bandaged
+broaches
+bazaar's
+beatification
+bigotry
+clergy
+abstains
+befits
+bantering
+conceivable
+attachers
+analogies
+bimonthly
+august
+additionally
+confirmation's
+ballooning
+cardboard
+belle's
+counterparts
+candor
+bishop
+comprehension
+affronted
+bravura
+courting
+antidote
+buggies
+arisings
+appendix's
+bright
+categorize
+cooking
+agnostic's
+billets
+amok
+bewitching
+audiograms
+column's
+bussed
+checkbook
+alteration's
+atherosclerosis
+broached
+based
+cacti
+boardinghouse
+bowdlerized
+anchoritism
+achievement's
+bald
+cover
+codifications
+capacitor
+brashness
+causes
+acyclically
+argument
+boarders
+audiometer
+compute
+contribute
+crisply
+bitters
+circumvent
+assailant
+bosun
+buyers
+alibis
+blurting
+coasts
+bivouacs
+arrogating
+albanian
+attempted
+acquisitiveness
+applauding
+alfalfa
+cantors
+canonicalizes
+alkaloid
+bruising
+associativity
+budgetary
+carbolic
+clashing
+buffalo
+acorn
+analyzing
+backyards
+comedian
+betwixt
+aces
+chartered
+additivity
+becalm
+combat
+characterizations
+clinics
+bulbs
+bloc
+amenable
+civilian's
+breech
+attainment
+bounding
+compiler
+cotyledons
+billboard's
+caper
+aphasia
+chester
+combats
+biddable
+articulates
+caps
+assignees
+bifocals
+beady
+chinese
+assertions
+allegation
+championships
+accrue
+containment's
+croaking
+classifying
+annum
+brightened
+bits
+appointer
+besieger
+citizen's
+cerebral
+canto
+bakers
+capitol's
+authorizer
+blockaded
+anodizes
+alarmed
+buttressing
+attenuates
+bumptiously
+chronological
+colleges
+coward
+contraption's
+abstractions
+controversial
+boric
+bids
+agents
+backpointer
+bumped
+bottoms
+bowlines
+captivated
+article
+cliche's
+chases
+choker
+bremsstrahlung
+consult
+adjudged
+auctioneer's
+covers
+accurateness
+clues
+bugler
+bareness
+cedar
+alleviation
+anesthetically
+backpointers
+arched
+administered
+arrowhead
+continues
+asks
+confessor's
+allure
+backlogs
+childishness
+appointive
+covering
+conscience's
+bellows
+blanked
+considerations
+appalachian
+aerate
+budged
+city's
+accordion
+cliche
+collectors
+comprehensive
+boomed
+chariot
+baffling
+bunkmate's
+bumbles
+contaminating
+corroborating
+applications
+bursting
+cabbage
+befalling
+acquittal
+compromisers
+components
+arpeggio
+brothel's
+credibility
+begrudge
+confirmation
+academy
+appertains
+calibrates
+bureaucrats
+bawl
+costuming
+biography
+adoration
+cloaks
+aggregating
+business
+aphorism's
+carters
+admixture
+coexistence
+anomalously
+adapts
+amide
+affiliation
+capillary
+biscuit
+brainy
+bellhops
+chartings
+cohered
+austria
+champions
+basin's
+cascading
+consultants
+bison's
+admixed
+arithmetically
+clothed
+betterments
+conspirator's
+addition
+adolescence
+bolsheviks
+abominable
+breathless
+cozy
+arouse
+bumble
+about
+apace
+astronaut
+asteroid
+cable
+crab's
+beachhead
+assets
+analyses
+bisection
+coconuts
+alleys
+armament's
+bloodstains
+arpeggios
+apologist
+blithely
+anabaptist's
+beadle
+channelled
+confuse
+annoy
+beautifiers
+cheats
+clenches
+amuse
+bewail
+constitutional
+birth
+appendixes
+amazed
+berry's
+bilingual
+blustery
+amplification
+clogged
+blackmailing
+breakables
+adduct
+bondsmen
+conferred
+codewords
+bequeathal
+abundantly
+banner's
+atrocity
+congested
+closely
+absolution
+concatenations
+anarchic
+crag's
+communicators
+cavities
+comptrollers
+backstage
+bewailing
+charcoal
+conveyances
+collar
+bores
+briefest
+comments
+awning's
+associator's
+antarctica
+correspondingly
+bidden
+ad
+clings
+bit's
+apollo
+bulldogs
+chateau's
+amounting
+cogitates
+bellhop
+bookish
+bout's
+cannister
+bicep's
+asses
+beef
+battlefields
+consort
+auspicious
+breezy
+buried
+beverages
+approximates
+conduction
+bleakly
+blanketers
+ascertained
+absentminded
+bolivia
+births
+behave
+bilk
+breaths
+charter
+abstaining
+appareled
+boulder's
+breadwinner's
+correct
+accessed
+befitted
+adulterer
+axe
+activation
+betrothed
+asymptote
+bullet's
+clusterings
+baud
+bustling
+ballplayer
+constraining
+cleared
+brown
+affirmed
+agencies
+churches
+backyard
+burntness
+bronchioles
+charmers
+backscattered
+abridgment
+claw
+blow
+adjourning
+constantly
+brightens
+autobiography
+cards
+bypassing
+alcibiades
+concurrency
+chuckles
+bests
+belligerents
+adjustments
+bolshevik
+cabins
+astronomically
+cartridge
+boxcars
+boned
+bottomed
+burgeoned
+adjourned
+apprenticeship
+chastiser
+breached
+boycott
+butchered
+coordinating
+cottage
+brainwashing
+confinement
+bandies
+absentee
+collapses
+cruel
+along
+alloy
+convoying
+assignment's
+crisp
+ambidextrously
+blindfolded
+chilly
+condenses
+avers
+broiler
+anesthetics
+beaker
+cholera
+brag
+coffins
+cranked
+allocator
+brutality
+acquire
+blushing
+briar
+abolish
+crossovers
+broiling
+consolers
+beatify
+almanac's
+cooled
+commencements
+clasp
+committing
+condemnations
+altar
+by
+bombastic
+confederates
+bong
+concerted
+compilers
+counterproductive
+brig's
+accurate
+avidity
+cleavage
+blame
+conceive
+assessor
+consolingly
+concise
+computes
+alliance
+clucked
+axon's
+annunciating
+baseball's
+allusion
+brays
+auras
+blond's
+bronchitis
+ciphers
+blowing
+broth
+canonically
+baseness
+byline's
+appetite's
+colonists
+condensed
+cawing
+beaning
+broadening
+colonist's
+apocrypha
+chauffeured
+cored
+branding
+carrier
+assessed
+collegiate
+chirped
+accounted
+clubbed
+antibodies
+behalf
+alphabetizing
+conqueror
+alpine
+budgeters
+casements
+appropriate
+compliments
+cast
+accountancy
+cathedral
+conserve
+accorders
+arbitrarily
+cowing
+bars
+bagel's
+climax
+attention's
+cautioning
+centipede's
+almost
+abstractionist
+carpenter
+containing
+arab's
+courtesy
+carton
+accelerated
+bowman
+boastings
+banal
+bucking
+accomplishment's
+classification
+baldly
+abruptness
+calibrations
+blocs
+biking
+assenter
+adversities
+compartmentalized
+chemical
+attic
+audiogram's
+applauds
+crests
+bad
+bounce
+accelerators
+contemptuous
+attentions
+cancellation
+battles
+aging
+advantages
+anthologies
+answers
+bruised
+castes
+any
+coped
+arcade's
+adaptively
+arsenal's
+confessed
+controllability
+acceptor's
+abrogated
+abutted
+amusingly
+apology
+broils
+court
+boundaries
+bode
+collie
+adiabatic
+ambitions
+charged
+awfulness
+consorts
+botanists
+blurring
+absents
+batten
+backwoods
+breaks
+certified
+chattering
+admitted
+bathrobe's
+analogous
+corporacy
+bijection's
+combatant
+checked
+condition
+amoral
+bayed
+bedroom
+chanting
+antics
+charity
+blip's
+biped
+brilliance
+catchers
+booted
+anabaptist
+clothe
+comforted
+complaints
+coacher
+admissible
+bang
+concisely
+cookery
+capita
+assurance
+codifying
+benchmarks
+aunts
+commentaries
+anon
+applicators
+constructor
+associated
+abuses
+choicest
+confiding
+antislavery
+apron
+ashore
+cheerfully
+betterment
+administration's
+campaign
+cremated
+ambulatory
+bleacher
+afterthought
+barkers
+choir
+crossly
+conducive
+cache's
+battery
+actinium
+countryman
+cajoled
+appeasing
+beamer
+cleaves
+anthem's
+clearing's
+cooperated
+barker
+crowing
+apprising
+accusation's
+beginning
+associator
+booking
+caved
+amicable
+codify
+clairvoyant
+bevels
+becalms
+brawn
+bunkhouse's
+arms
+antiredeposition
+belt
+antiphonal
+cried
+brae's
+bridal
+acronym
+clay's
+checkers
+auxiliary
+bind
+compares
+agilely
+askers
+blankly
+antagonist's
+bimodal
+captivation
+creditable
+concentration
+calling
+bartender's
+autopsied
+correspondent's
+carnivals
+abjure
+bystander's
+bungle
+chanticleers
+conceding
+burghers
+boards
+accessions
+compensations
+arabian
+churn
+crowed
+centering
+abnormalities
+courtier's
+congregation
+aberrant
+annexing
+blockhouse
+anthropomorphic
+bedder's
+abutting
+conundrums
+affiliated
+cancellation's
+bolts
+ballgowns
+augmenting
+bureaucracy's
+bootlegged
+audiometers
+blueberry
+affliction
+appreciation
+codifier
+amasses
+countering
+crackle
+canoe
+consuls
+breathes
+broiled
+amalgam's
+bodes
+ballooners
+coating
+corollaries
+amphibology
+agenda's
+chafing
+alcoholics
+accredit
+anisotropy
+anchovies
+carriers
+acceptors
+betrayed
+buttocks
+busy
+bunny
+cropper
+accreditations
+bumblebee's
+adhesives
+civilize
+accedes
+abroad
+arch
+crept
+cotyledon
+alphabetic
+braille
+amateur
+adjure
+ascertaining
+budge
+adulterate
+additive's
+cardiac
+born
+brewed
+borneo
+bun's
+blue
+cackled
+acclimates
+airline
+blinder
+brokerage
+communicant
+central
+aggrieved
+asynchronous
+bough's
+acidly
+archaeology
+complementary
+animator's
+bodyguards
+climbs
+apathy
+constellation's
+acculturate
+archaeologists
+contingents
+control
+anglophilia
+billings
+corporate
+athlete
+accusing
+appear
+announcing
+accordions
+computerize
+combinations
+bile
+abut
+charger
+columnize
+computer
+blacks
+converges
+blamer
+bulked
+convincingly
+checker
+correspondence's
+accelerate
+accessible
+conceivably
+abscissa's
+adsorbs
+anglophobia
+anomic
+casters
+churning
+crease
+brood
+appendage
+bulwark
+bombers
+arcaded
+breadboard's
+aphrodite
+color
+commodore's
+answerer
+bobolink
+cloth
+conversion
+clime
+artery's
+birthplaces
+compiled
+arrack
+beetles
+bobs
+compatibility
+cocoon
+counterpart
+audible
+colonies
+airport's
+beige
+cogent
+bromide
+begrudging
+acids
+crucifies
+beggary
+archipelagoes
+availably
+counterfeiter
+blanketed
+amending
+accelerometer's
+advisors
+byway
+alignment
+amber
+austin
+copyrights
+beaus
+brigantine
+comforts
+appointment's
+crawler
+bangles
+contemplation
+concur
+characterizing
+censoring
+charters
+catalogues
+appropriately
+builds
+aeronautic
+confused
+comber
+axially
+cackler
+coercive
+ambassador
+arcades
+brash
+amorality
+belittling
+battling
+bloodied
+acrylic
+bantered
+clasped
+carcass
+archangel
+annunciators
+aristotle
+boulder
+burglarproofs
+chooser
+abilities
+calmest
+bach
+always
+blaspheming
+crossover
+bakeries
+clocks
+ankle's
+accidental
+arbitration
+chirp
+aeronautical
+boy's
+acidic
+bowline
+anonymously
+cod
+couplers
+beautifications
+bluffing
+backarrows
+brow
+covenant
+acronym's
+banning
+albeit
+ascetic
+burn
+animator
+beatnik's
+coveted
+cipher's
+broke
+cap
+bellman
+bulldozed
+clarifies
+bathes
+blip
+availabilities
+booth
+clangs
+audiences
+cathedrals
+confounding
+bigot's
+beecher
+arts
+company
+attributed
+avenged
+bawling
+caustics
+alee
+bordello's
+banks
+affords
+complied
+commas
+collaborate
+aquatic
+ambitiously
+burro's
+beard
+bittersweet
+candlestick
+bylaws
+broadcastings
+believe
+barrels
+braying
+certifications
+contrasts
+crashes
+audition
+confine
+bucks
+abates
+bureaucracy
+ambles
+besiege
+broccoli
+antibiotics
+attenuators
+accelerometer
+caste
+bib's
+browbeaten
+appurtenance
+bauxite
+asceticism
+case
+chewing
+aerator
+achievements
+barricade's
+agglutinates
+bewildering
+cartridge's
+children
+bufferrer
+actuator
+converging
+bolted
+chat
+combs
+chemist's
+adduced
+algebraic
+circular
+bloated
+conclusion
+burgess
+certifies
+absconds
+comprise
+benzedrine
+bumbler
+banjo
+allow
+appealing
+cooperation
+abraded
+chaperoned
+biracial
+braced
+censurer
+acoustician
+appraised
+benefitting
+constructs
+convertible
+administrative
+asocial
+area
+creature
+besetting
+crater
+begrudgingly
+blanket
+ablest
+alba
+airplanes
+allowing
+briefly
+beneficences
+concurring
+adjective's
+cork
+aerospace
+anomalies
+asher
+auger's
+boilers
+abhorring
+broadenings
+bladder
+belay
+approver
+abdominal
+commends
+cringing
+billiards
+beater
+auspice
+contrasters
+bights
+absentees
+atoll
+cooler
+activator's
+basement
+burgeon
+allusiveness
+codeword's
+bandage
+contemplate
+adopted
+coping
+carving
+baptism
+colds
+altos
+background
+closet
+commuted
+acre's
+aliens
+council
+cans
+cheese
+ally
+aseptic
+belgian's
+crossbar
+addressed
+commons
+call
+careers
+breakfasting
+brazilian
+catholics
+bachelors
+consultant
+brighter
+crossword's
+burglar
+avoidable
+batting
+cigar
+amps
+axiological
+combed
+comforters
+albumin
+cookies
+booming
+archaize
+canton's
+bunkmate
+combination
+bondsman
+anxiously
+affixed
+associatively
+cigar's
+backstitch
+calls
+captivates
+commodities
+atmosphere's
+asserting
+beaver
+beatnik
+container
+activists
+consoler
+commoner
+buttonhole's
+abhorred
+aggregate
+cliff
+antidisestablishmentarianism
+broach
+ambling
+comer
+bited
+advocated
+behaves
+bosom
+continents
+conserves
+bashful
+ago
+backarrow
+circumventable
+avocados
+briar's
+annuls
+barnstorming
+aired
+carry
+crossbar's
+aspire
+beards
+abides
+cliques
+completes
+brassiere
+absorbs
+annul
+chairman
+baron
+battens
+africans
+abatement
+colonization
+carries
+borough
+allurement
+breakfasters
+alkali
+acoustically
+corners
+capturer
+casualties
+asphyxia
+animized
+administrator
+belying
+basketballs
+bylines
+bandit
+autopsies
+braining
+contradiction's
+antic
+butted
+bacillus
+blurt
+conditioned
+backers
+agreeable
+almanacs
+cider
+chicken
+chambers
+clutch
+assailant's
+conveyers
+amazers
+beribboned
+breeder
+caveat's
+buffers
+combination's
+ampersand's
+crafting
+clanged
+caving
+aspirant
+butlers
+adjective
+auckland
+announced
+creators
+caches
+baseline
+codifies
+baptism's
+coarsened
+cohesion
+airman
+avenge
+backaches
+budgeted
+armpit
+bicycled
+converged
+besmirched
+autonomic
+coming
+assemblage's
+chained
+admissions
+alcoholic's
+branches
+bunk
+anciently
+bloods
+adventurers
+amazes
+coloring
+abstractors
+adaptation's
+boar
+amulet
+agglutination
+conquerable
+booker
+confronts
+barometer's
+bedbugs
+barricades
+cheap
+bewitch
+circus
+backward
+archeology
+automobiles
+bending
+amino
+beckoning
+admits
+berliners
+borer
+clambering
+atomizing
+banner
+blissfully
+catchable
+breakdown
+abjured
+computerized
+chaplain's
+amphitheater
+ballot's
+craziness
+croaks
+counties
+adopting
+breast
+airstrip's
+basin
+contemplating
+commitments
+critique
+appears
+bellies
+baccalaureate
+abducted
+blackened
+animosity
+appraising
+antiquity
+assistants
+asthma
+bootstrapping
+bounties
+agleam
+advertisements
+benches
+artful
+broadens
+chuck's
+betrayal
+blasphemed
+brooms
+castled
+coroutine
+conscious
+beetle
+banshee
+advertising
+baring
+awakens
+balm
+billions
+compromisingly
+ballroom's
+burrower
+bayou's
+ambiance
+beheading
+bought
+adagios
+adornment's
+anointed
+abolishment's
+anesthetizes
+badly
+boyishness
+consultant's
+cheek
+cannibals
+breakdowns
+assured
+agates
+bicker
+appliances
+cafe
+bagpipes
+adrenal
+combinatorially
+belligerence
+bricked
+adjacency
+aimless
+crook
+cherry's
+assessing
+brushfire
+cormorant
+captained
+blundered
+conceptually
+congress's
+contraster
+ambushes
+bronze
+autotransformer
+corded
+brisker
+contently
+announcements
+bullet
+apportionments
+columnized
+canon
+conservation
+algaecide
+blackening
+compassion
+beaks
+constructibility
+chapter
+abscond
+costly
+bacon
+coldest
+aptness
+billionth
+altercation
+approbation
+alternator's
+criticizes
+befell
+canopy
+buoyant
+brazil
+anticipate
+absenteeism
+champion
+aesthetics
+cadence
+betroth
+confidants
+bean
+braid
+aphids
+cluttering
+cantankerously
+bloom
+barbarity
+clawing
+bogged
+agreed
+asia
+abrasion
+corporals
+baselines
+box
+chartering
+apotheosis
+ampersands
+conceit
+creamer
+adhered
+circuit
+carpet
+accompaniments
+boomerangs
+blindness
+chipmunks
+bewitched
+allocate
+bicycle
+compacted
+cab
+calcium
+cellists
+apex
+borrows
+completed
+brightly
+constables
+ascertains
+conspiracy's
+badgers
+bunion's
+anabaptists
+broadband
+clefts
+accepted
+benched
+catalogued
+cadenced
+alliteration
+acquiesces
+boxcar's
+athlete's
+bracing
+cremations
+analysis
+crossings
+assorts
+apologize
+brazier
+configurable
+basking
+craves
+belle
+conversation's
+belligerent
+anesthetize
+brewers
+cackles
+adventures
+airlock's
+booklet's
+apply
+anecdotal
+bewails
+computer's
+autographs
+acclimated
+coefficients
+avidly
+beckoned
+broadener
+bulk
+blacklisting
+belly
+acquit
+convoy
+achiever
+aversions
+advisor's
+captor's
+camel's
+asset's
+advantageous
+basement's
+confident
+crescents
+compiling
+butler's
+cartoon's
+adaptive
+chlorine
+abets
+cruelly
+amiable
+baleful
+ceiling's
+adumbrated
+cherry
+aspirant's
+cashing
+candidly
+chaff
+bitter
+brim
+alcove
+bulb's
+carbonizers
+citizen
+attic's
+breed
+consumer
+conferrers
+accommodations
+contrapositive
+beget
+brilliantly
+attentionality
+continuation's
+bosses
+brave
+configurations
+benediction's
+conferring
+accessor's
+bobolinks
+bulled
+cleanness
+algorithm
+advancements
+altogether
+accumulations
+albacore
+bowing
+belching
+apical
+consequentiality
+bagpipe's
+ambrosial
+bullying
+cleans
+attendance's
+complimenter
+blink
+cager
+assembling
+coat
+allowable
+astringent
+antiresonator
+cardinal
+clicks
+commentator's
+blossom
+categorizing
+amphibian's
+commonality
+consonant
+classics
+affable
+accorded
+aimlessly
+archetype
+administerings
+boldness
+anatomy
+apprehensively
+absence's
+actuality
+attempting
+categorical
+checkpoints
+allemande
+corer
+behoove
+bleaches
+bough
+blended
+blotting
+baptists
+courtship
+benevolent
+bumptiousness
+chum
+anguished
+auto
+career
+bookstore's
+carbonized
+autocratically
+cherishes
+attendees
+contends
+anastomotic
+attributing
+abbot
+came
+blunt
+battlement's
+affection
+coordination
+annotate
+besets
+bucked
+boasting
+benedictions
+adherent
+blimp's
+acknowledging
+cleverly
+applejack
+annexation
+bat's
+cantons
+beetled
+closed
+country
+creatively
+bakery
+blasphemously
+chalking
+bold
+attended
+crasher
+backtrackers
+artist's
+bracelet's
+allowably
+affiliating
+arrant
+brayed
+barbells
+consigned
+abolishers
+climatic
+atrophying
+amigo
+arsenal
+ascribes
+converses
+aura's
+allotted
+bliss
+classical
+bigger
+ahead
+chopped
+blade
+casualty
+acceded
+bottling
+axon
+casement's
+battlefront's
+convinces
+alerting
+advertisers
+blemish
+agglutinating
+commonplaces
+autocorrelation
+armistice
+crediting
+besmirch
+amplify
+auscultation
+befalls
+called
+alnico
+arbiter's
+abort
+argonauts
+cessations
+cribs
+blare
+aforementioned
+condemners
+contaminated
+complained
+bootstrapped
+criticism
+cooperatively
+binding
+bullies
+basins
+contrived
+assort
+adulterously
+booms
+abandons
+also
+appealed
+count
+contributed
+beet
+crashers
+carryovers
+clays
+blackness
+cosmetics
+awkward
+blurted
+bothers
+analyzer
+backups
+alarming
+bicyclers
+credit
+abrogate
+audience's
+architecturally
+alibi's
+complicator's
+chuckle
+corporately
+banishment
+communist's
+birdie
+asymptotic
+break
+braze
+benzene
+bridgework's
+beak
+agitators
+collateral
+arranges
+bayonet
+breathlessly
+counsellor
+creates
+convulsions
+backdrops
+applicants
+altercation's
+commission
+breathtakingly
+corresponds
+backdrop
+armaments
+build
+biannual
+buttoning
+computational
+chaired
+bather
+critically
+amanuensis
+bantus
+confidential
+annoyance's
+carder
+authorizing
+acquits
+bipeds
+cocktail
+cinnamon
+burros
+brocade
+abdomen's
+creative
+acquisition's
+abdomen
+baited
+aristocratically
+alive
+committed
+arrestor
+cleaving
+comedy's
+baggage
+bra
+adaptors
+afoot
+bulls
+contoured
+amalgam
+comprehensibility
+amortizes
+biographical
+confront
+covert
+cravat
+animates
+booksellers
+bypass
+bootleggers
+bedfast
+affair's
+buzzer
+bellowed
+aligning
+bystander
+acclimatized
+accomplishing
+against
+blankness
+adopt
+addressing
+croaked
+boaters
+behooves
+audits
+boatyard
+cruise
+agnostics
+ailing
+anchorage's
+adaptations
+conceptualize
+advised
+cries
+bank
+actuators
+brazing
+catalyst
+beachheads
+aplomb
+compressed
+amputated
+contractor's
+bedspreads
+bowed
+coon
+chaplain
+cannons
+coffers
+assembly
+bouffant
+converters
+ampoule's
+borderland
+archaeologist
+blankets
+conserving
+avalanche
+assortment's
+aspic
+axle
+bereaves
+allowance
+carbonization
+bartender
+clawed
+coincidental
+appeared
+chipmunk's
+countable
+authenticators
+bestow
+alps
+caw
+aniseikonic
+avows
+blackmails
+controlling
+correlating
+audiologist's
+bit
+approving
+collapse
+coon's
+cleave
+atheists
+brigade
+autopilots
+bounteous
+commercialness
+accede
+cavalierness
+accustoming
+burnishing
+clobber
+aspirates
+brochures
+cellar's
+communes
+berkelium
+chickadee
+cobweb
+circumstances
+chose
+comprehend
+baritone's
+aggravation
+adopts
+cruelty
+and
+axer
+cautioned
+carbonic
+babbles
+bet
+charitable
+computable
+cardinality
+amenities
+confiscating
+catcher
+audaciousness
+complaint's
+cooperator's
+buddies
+baking
+constant
+classmate's
+accentuate
+choices
+crop's
+authorization's
+comedy
+brushy
+brotherly
+canals
+ads
+causeway
+abrading
+cemetery
+autocrat
+briefing
+abdomens
+apparition's
+consummately
+alkaloids
+bulkheads
+cravats
+bales
+campaigners
+bagpipe
+accentuates
+arm
+barometric
+bas
+agitator
+behavior
+abutters
+blockades
+alertness
+civilizes
+chinner
+anthropologist
+artificialness
+balkanize
+automates
+cackling
+anarchists
+amounted
+cereal's
+anodized
+cobblers
+acknowledgment's
+blear
+copper
+alphabetics
+blackboards
+apish
+answering
+afternoon
+arbors
+accused
+chickens
+agency's
+contractors
+contraptions
+cosmology
+anomaly
+bandstand
+attempter
+account
+challengers
+admiration
+calculations
+autocracy
+analyticity
+accord
+buildup's
+commonly
+babbling
+adjudication's
+attain
+ameliorating
+candlestick's
+chronicles
+align
+consensus
+agate
+adulation
+aspirated
+conclusive
+biologists
+cracks
+conform
+chambered
+beryllium
+connote
+amusing
+aquifer
+ankle
+batteries
+conservationists
+accountants
+apiaries
+actinometer
+beckon
+clearances
+clouded
+antitoxin's
+consolation's
+collectives
+boxtops
+bombarded
+bombarding
+bluest
+allusion's
+construction
+ballpark's
+codified
+coincidence
+celebration
+chip
+beginner's
+algerian
+boo
+athletics
+condenser
+bytes
+beauties
+concerts
+conductors
+awl's
+agitations
+buttered
+codifier's
+armory
+ascii
+aspirin
+arthritis
+bylaw's
+conformity
+blasting
+coinciding
+aphid's
+ceremonial
+banisters
+bristle
+bid's
+buckboard's
+bandied
+biopsy
+ballrooms
+chloroplasts
+bidding
+boil
+algebra
+constellation
+chuck
+cringes
+cleanliness
+apron's
+cosmopolitan
+bashes
+abusive
+believer
+conductor
+butters
+breweries
+allotment
+artfulness
+bunkmates
+blares
+connections
+anticipated
+classifies
+commandments
+beginnings
+bend
+brambles
+blacked
+basketball's
+affectionate
+cocoa
+anacondas
+busing
+bone
+birchen
+creamed
+aged
+commemorates
+brother
+aberration
+crawl
+actuarial
+apology's
+alumnus
+adversary's
+anaphoric
+aspiring
+consciousness
+cokes
+assignee
+boxing
+blanched
+camels
+contemporaries
+carnivorous
+assigned
+apologetically
+corpus
+accusations
+beefing
+champaign
+claps
+adherence
+aloft
+complication
+citizenship
+becomes
+compound
+arabesque
+bronchiole's
+appraises
+breach
+collection's
+botched
+bitches
+biblically
+bronchial
+amalgamating
+commoner's
+barbarian's
+arrange
+cradle
+conformed
+complimentary
+anodes
+cowering
+anoint
+brocaded
+bedazzling
+avionics
+burnishes
+bulkhead
+chink
+consciously
+contract
+clinch
+applicant's
+awning
+aloud
+chandelier's
+cathode's
+babble
+arachnid
+biplane
+clamorous
+assuredly
+consented
+axing
+avenger
+commence
+braving
+brandishing
+careless
+burningly
+boatsman
+channelling
+clarifying
+beggar
+berates
+cite
+cowered
+buffer
+condescending
+admixes
+bettering
+bedazzlement
+cord
+burglary's
+characteristics
+aptitudes
+adieu
+agree
+bends
+ceremonies
+accustom
+accessibly
+commanders
+ask
+cavalier
+brayer
+affront
+courser
+becoming
+carves
+configures
+beasts
+biters
+conditionals
+bodybuilding
+accretions
+chapter's
+cleverer
+corning
+brat
+classes
+almsman
+consumptive
+antique
+comprised
+beholders
+anthropologically
+buns
+bridge
+accretion
+acceptance's
+confederacy
+armorer
+argumentative
+crossword
+cowslip's
+analog
+counselor
+chastised
+barters
+clerked
+americas
+cloud
+aide
+alternators
+admitters
+bagatelle
+bridges
+civilizations
+anion's
+briton's
+apartment
+acquaints
+consummation
+chord
+coated
+barer
+carnivorously
+cheering
+allergy
+capacity
+classrooms
+assistantships
+complimented
+amphibiously
+commandment's
+audiogram
+corked
+badness
+bewildered
+assemblage
+backplane's
+asterisk's
+blob
+coexisting
+approximations
+counteractive
+barns
+adherer
+aborigine's
+brooding
+conceived
+adjustor
+cabled
+belongings
+breadwinner
+blot's
+brightness
+consigning
+barflies
+bisector's
+basing
+complement
+conditioner
+brazes
+crank
+antinomian
+crowd
+accelerometers
+befitting
+backlash
+bastions
+acceleration
+briefcases
+correlated
+baffle
+chew
+accosts
+agreeably
+bassinets
+cogitate
+concerning
+contouring
+broadside
+compact
+brainstems
+atom's
+bondage
+biter
+archdioceses
+basis
+bellboy
+blobs
+barons
+clods
+campaigned
+assessors
+bubbles
+annal
+casual
+altercations
+clog's
+biased
+arianism
+ancillary
+collaborator
+butter
+bureau
+blending
+antiquities
+brands
+activism
+crews
+beats
+broad
+buds
+baggers
+cobbler's
+condemns
+cabinets
+bomber
+blinders
+center
+contacted
+bewilderingly
+circulates
+burnings
+achieved
+belch
+barbecue
+angles
+comparative
+befuddle
+cherished
+chapters
+chanter
+allegation's
+armstrong
+converter
+combinatoric
+angrier
+brooks
+clinked
+blubber
+appointments
+compactor
+cleaned
+car
+contention's
+artificial
+cramp
+consistency
+aborting
+collaboration
+awarders
+crippled
+anaphora
+creamy
+buoyed
+baptistery
+altered
+anchoring
+alterer
+adjuring
+beacon's
+commencement's
+ascension
+candidness
+clouding
+cigars
+boiled
+christmas
+contingency's
+alum
+apparel
+contributors
+anisotropic
+annotations
+bushwhacks
+brides
+continuities
+carton's
+blurred
+antibody
+aorta
+blankest
+combinator's
+banish
+breaches
+accumulates
+bowling
+braver
+antibacterial
+cooperators
+banked
+compensated
+chartable
+conjunctively
+antelope's
+bluefish
+annoying
+composed
+barges
+biconcave
+australia
+ballparks
+bearers
+acknowledged
+advocates
+crossed
+competitor
+blaming
+andorra
+baritone
+collaborator's
+accessibility
+complains
+commentator
+bibliography
+conference's
+atmosphere
+agrees
+bedstead's
+ardor
+character's
+conventionally
+arena's
+chokes
+channel
+bludgeon
+convoys
+condense
+beautifier
+ailerons
+compacts
+black
+bell
+completions
+ballroom
+besotting
+conservatives
+adventured
+bulldog's
+conversely
+arroyos
+compositional
+alternative
+association
+broods
+beefy
+consolidated
+balms
+acquaint
+animal
+certificate
+combustion
+aims
+cracker
+abetted
+cautionings
+bread
+attains
+agriculturally
+courtyards
+bawls
+country's
+creator's
+checkbook's
+cliches
+colonizing
+biennial
+aqueous
+craftsman
+contrivances
+algorithmic
+crate
+barefooted
+bodily
+anthropologist's
+but
+climate's
+campers
+crackled
+awakes
+conveyed
+borrowers
+approached
+avoids
+crib
+albania
+bathrobe
+admonitions
+architectures
+consenting
+anastomosis
+blob's
+actual
+arrowhead's
+accountable
+allegiances
+commendation
+appearers
+comply
+concurs
+controversy
+abstracting
+artifact
diff --git a/libdb/test/wrap.tcl b/libdb/test/wrap.tcl
new file mode 100644
index 0000000..eb900f9
--- /dev/null
+++ b/libdb/test/wrap.tcl
@@ -0,0 +1,71 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id$
+#
+# Sentinel file wrapper for multi-process tests. This is designed to avoid a
+# set of nasty bugs, primarily on Windows, where pid reuse causes watch_procs
+# to sit around waiting for some random process that's not DB's and is not
+# exiting.
+
+source ./include.tcl
+source $test_path/testutils.tcl
+
+# Arguments:
+if { $argc < 3 } {
+ puts "FAIL: wrap.tcl: Usage: wrap.tcl script log scriptargs"
+ exit
+}
+
+set script [lindex $argv 0]
+set logfile [lindex $argv 1]
+set args [lrange $argv 2 end]
+
+# Create a sentinel file to mark our creation and signal that watch_procs
+# should look for us.
+set parentpid [pid]
+set parentsentinel $testdir/begin.$parentpid
+set f [open $parentsentinel w]
+close $f
+
+# Create a Tcl subprocess that will actually run the test.
+set t [open "|$tclsh_path >& $logfile" w]
+
+# Create a sentinel for the subprocess.
+set childpid [pid $t]
+puts "Script watcher process $parentpid launching $script process $childpid."
+set childsentinel $testdir/begin.$childpid
+set f [open $childsentinel w]
+close $f
+
+puts $t "source $test_path/test.tcl"
+puts $t "set script $script"
+
+# Set up argv for the subprocess, since the args aren't passed in as true
+# arguments thanks to the pipe structure.
+puts $t "set argc [llength $args]"
+puts $t "set argv [list $args]"
+
+puts $t {set ret [catch { source $test_path/$script } result]}
+puts $t {if { [string length $result] > 0 } { puts $result }}
+puts $t {error_check_good "$test_path/$script run: pid [pid]" $ret 0}
+
+# Close the pipe. This will flush the above commands and actually run the
+# test, and will also return an error a la exec if anything bad happens
+# to the subprocess. The magic here is that closing a pipe blocks
+# and waits for the exit of processes in the pipeline, at least according
+# to Ousterhout (p. 115).
+
+set ret [catch {close $t} res]
+
+# Write ending sentinel files--we're done.
+set f [open $testdir/end.$childpid w]
+close $f
+set f [open $testdir/end.$parentpid w]
+close $f
+
+error_check_good "Pipe close ($childpid: $script $argv: logfile $logfile)"\
+ $ret 0
+exit $ret
diff --git a/libdb/txn/txn.c b/libdb/txn/txn.c
new file mode 100644
index 0000000..2e54d83
--- /dev/null
+++ b/libdb/txn/txn.c
@@ -0,0 +1,1428 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <stdlib.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/hmac.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+#define SET_LOG_FLAGS(dbenv, txnp, lflags) \
+ do { \
+ lflags = DB_COMMIT | DB_PERMANENT; \
+ if (F_ISSET(txnp, TXN_SYNC)) \
+ lflags |= DB_FLUSH; \
+ else if (!F_ISSET(txnp, TXN_NOSYNC) && \
+ !F_ISSET(dbenv, DB_ENV_TXN_NOSYNC)) { \
+ if (F_ISSET(dbenv, DB_ENV_TXN_WRITE_NOSYNC)) \
+ lflags |= DB_WRNOSYNC; \
+ else \
+ lflags |= DB_FLUSH; \
+ } \
+ } while (0)
+
+/*
+ * __txn_isvalid enumerated types. We cannot simply use the transaction
+ * statuses, because different statuses need to be handled differently
+ * depending on the caller.
+ */
+typedef enum {
+ TXN_OP_ABORT,
+ TXN_OP_COMMIT,
+ TXN_OP_DISCARD,
+ TXN_OP_PREPARE
+} txnop_t;
+
+static int __txn_begin_int __P((DB_TXN *, int));
+static int __txn_end __P((DB_TXN *, int));
+static int __txn_isvalid __P((const DB_TXN *, TXN_DETAIL **, txnop_t));
+static int __txn_set_timeout __P(( DB_TXN *, db_timeout_t, u_int32_t));
+static int __txn_undo __P((DB_TXN *));
+
+#ifndef db_create
+/*
+ * txn_abort --
+ * txn_begin --
+ * txn_commit --
+ *
+ * When we switched to methods in 4.0, we guessed txn_{abort,begin,commit}
+ * were the interfaces applications would likely use and not be willing to
+ * change, due to the sheer volume of the calls. Provide wrappers -- we
+ * could do txn_abort and txn_commit using macros, but not txn_begin, as
+ * the name of the field is txn_begin, we didn't want to modify it.
+ *
+ * The issue with txn_begin hits us in another way. If configured with the
+ * --with-uniquename option, we use #defines to re-define DB's interfaces
+ * to unique names. We can't do that for these functions because txn_begin
+ * is also a field name in the DB_ENV structure, and the #defines we use go
+ * at the end of the db.h file -- we get control too late to #define a field
+ * name. So, modify the script that generates the unique names #defines to
+ * not generate them for these three functions, and don't include the three
+ * functions in libraries built with that configuration option.
+ *
+ * EXTERN: int txn_abort __P((DB_TXN *));
+ * EXTERN: int txn_begin __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
+ * EXTERN: int txn_commit __P((DB_TXN *, u_int32_t));
+ */
+int
+txn_abort(txnp)
+ DB_TXN *txnp;
+{
+ return (txnp->abort(txnp));
+}
+
+int
+txn_begin(dbenv, parent, txnpp, flags)
+ DB_ENV *dbenv;
+ DB_TXN *parent, **txnpp;
+ u_int32_t flags;
+{
+ return (dbenv->txn_begin(dbenv, parent, txnpp, flags));
+}
+
+int
+txn_commit(txnp, flags)
+ DB_TXN *txnp;
+ u_int32_t flags;
+{
+ return (txnp->commit(txnp, flags));
+}
+#endif /* !db_create */
+
+/*
+ * __txn_begin --
+ * This is a wrapper to the actual begin process. Normal transaction
+ * begin allocates a DB_TXN structure for the caller, while XA transaction
+ * begin does not. Other than that, both call into common __txn_begin_int
+ * code.
+ *
+ * Internally, we use TXN_DETAIL structures, but the DB_TXN structure
+ * provides access to the transaction ID and the offset in the transaction
+ * region of the TXN_DETAIL structure.
+ *
+ * PUBLIC: int __txn_begin __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
+ */
+int
+__txn_begin(dbenv, parent, txnpp, flags)
+ DB_ENV *dbenv;
+ DB_TXN *parent, **txnpp;
+ u_int32_t flags;
+{
+ DB_LOCKREGION *region;
+ DB_TXN *txn;
+ int ret;
+
+ *txnpp = NULL;
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "txn_begin", DB_INIT_TXN);
+
+ if ((ret = __db_fchk(dbenv,
+ "txn_begin", flags,
+ DB_DIRTY_READ | DB_TXN_NOWAIT |
+ DB_TXN_NOSYNC | DB_TXN_SYNC)) != 0)
+ return (ret);
+ if ((ret = __db_fcchk(dbenv,
+ "txn_begin", flags, DB_TXN_NOSYNC, DB_TXN_SYNC)) != 0)
+ return (ret);
+
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_TXN), &txn)) != 0)
+ return (ret);
+
+ txn->mgrp = dbenv->tx_handle;
+ txn->parent = parent;
+ TAILQ_INIT(&txn->kids);
+ TAILQ_INIT(&txn->events);
+ txn->flags = TXN_MALLOC;
+ if (LF_ISSET(DB_DIRTY_READ))
+ F_SET(txn, TXN_DIRTY_READ);
+ if (LF_ISSET(DB_TXN_NOSYNC))
+ F_SET(txn, TXN_NOSYNC);
+ if (LF_ISSET(DB_TXN_SYNC))
+ F_SET(txn, TXN_SYNC);
+ if (LF_ISSET(DB_TXN_NOWAIT))
+ F_SET(txn, TXN_NOWAIT);
+
+ if ((ret = __txn_begin_int(txn, 0)) != 0)
+ goto err;
+
+ if (parent != NULL)
+ TAILQ_INSERT_HEAD(&parent->kids, txn, klinks);
+
+ if (LOCKING_ON(dbenv)) {
+ region = ((DB_LOCKTAB *)dbenv->lk_handle)->reginfo.primary;
+ if (parent != NULL) {
+ ret = __lock_inherit_timeout(dbenv,
+ parent->txnid, txn->txnid);
+ /* No parent locker set yet. */
+ if (ret == EINVAL) {
+ parent = NULL;
+ ret = 0;
+ }
+ if (ret != 0)
+ goto err;
+ }
+
+ /*
+ * Parent is NULL if we have no parent
+ * or it has no timeouts set.
+ */
+ if (parent == NULL && region->tx_timeout != 0)
+ if ((ret = __lock_set_timeout(dbenv, txn->txnid,
+ region->tx_timeout, DB_SET_TXN_TIMEOUT)) != 0)
+ goto err;
+ }
+
+ *txnpp = txn;
+ return (0);
+
+err:
+ __os_free(dbenv, txn);
+ return (ret);
+}
+
+/*
+ * __txn_xa_begin --
+ * XA version of txn_begin.
+ *
+ * PUBLIC: int __txn_xa_begin __P((DB_ENV *, DB_TXN *));
+ */
+int
+__txn_xa_begin(dbenv, txn)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+{
+ PANIC_CHECK(dbenv);
+
+ memset(txn, 0, sizeof(DB_TXN));
+
+ txn->mgrp = dbenv->tx_handle;
+ TAILQ_INIT(&txn->kids);
+ TAILQ_INIT(&txn->events);
+
+ return (__txn_begin_int(txn, 0));
+}
+
+/*
+ * __txn_compensate_begin
+ * Begin an compensation transaction. This is a special interface
+ * that is used only for transactions that must be started to compensate
+ * for actions during an abort. Currently only used for allocations.
+ *
+ * PUBLIC: int __txn_compensate_begin __P((DB_ENV *, DB_TXN **txnp));
+ */
+int
+__txn_compensate_begin(dbenv, txnpp)
+ DB_ENV *dbenv;
+ DB_TXN **txnpp;
+{
+ DB_TXN *txn;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_TXN), &txn)) != 0)
+ return (ret);
+
+ txn->mgrp = dbenv->tx_handle;
+ TAILQ_INIT(&txn->kids);
+ TAILQ_INIT(&txn->events);
+ txn->flags = TXN_MALLOC;
+ F_SET(txn, TXN_COMPENSATE);
+
+ *txnpp = txn;
+ return (__txn_begin_int(txn, 1));
+}
+
+/*
+ * __txn_begin_int --
+ * Normal DB version of txn_begin.
+ */
+static int
+__txn_begin_int(txn, internal)
+ DB_TXN *txn;
+ int internal;
+{
+ DB_ENV *dbenv;
+ DB_LSN begin_lsn, null_lsn;
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+ TXN_DETAIL *td;
+ size_t off;
+ u_int32_t id, *ids;
+ int nids, ret;
+
+ mgr = txn->mgrp;
+ dbenv = mgr->dbenv;
+ region = mgr->reginfo.primary;
+
+ /*
+ * We do not have to write begin records (and if we do not, then we
+ * need never write records for read-only transactions). However,
+ * we do need to find the current LSN so that we can store it in the
+ * transaction structure, so we can know where to take checkpoints.
+ *
+ * XXX
+ * We should set this value when we write the first log record, not
+ * here.
+ */
+ if (DBENV_LOGGING(dbenv))
+ __log_txn_lsn(dbenv, &begin_lsn, NULL, NULL);
+
+ R_LOCK(dbenv, &mgr->reginfo);
+ if (!F_ISSET(txn, TXN_COMPENSATE) && F_ISSET(region, TXN_IN_RECOVERY)) {
+ __db_err(dbenv, "operation not permitted during recovery");
+ ret = EINVAL;
+ goto err;
+ }
+
+ /* Make sure that we aren't still recovering prepared transactions. */
+ if (!internal && region->stat.st_nrestores != 0) {
+ __db_err(dbenv,
+ "recovery of prepared but not yet committed transactions is incomplete");
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Allocate a new transaction id. Our current valid range can span
+ * the maximum valid value, so check for it and wrap manually.
+ */
+ if (region->last_txnid == TXN_MAXIMUM &&
+ region->cur_maxid != TXN_MAXIMUM)
+ region->last_txnid = TXN_MINIMUM - 1;
+
+ if (region->last_txnid == region->cur_maxid) {
+ if ((ret = __os_malloc(dbenv,
+ sizeof(u_int32_t) * region->maxtxns, &ids)) != 0)
+ goto err;
+ nids = 0;
+ for (td = SH_TAILQ_FIRST(&region->active_txn, __txn_detail);
+ td != NULL;
+ td = SH_TAILQ_NEXT(td, links, __txn_detail))
+ ids[nids++] = td->txnid;
+ region->last_txnid = TXN_MINIMUM - 1;
+ region->cur_maxid = TXN_MAXIMUM;
+ if (nids != 0)
+ __db_idspace(ids, nids,
+ &region->last_txnid, &region->cur_maxid);
+ __os_free(dbenv, ids);
+ if (DBENV_LOGGING(dbenv) &&
+ (ret = __txn_recycle_log(dbenv, NULL,
+ &null_lsn, 0, region->last_txnid, region->cur_maxid)) != 0)
+ goto err;
+ }
+
+ /* Allocate a new transaction detail structure. */
+ if ((ret =
+ __db_shalloc(mgr->reginfo.addr, sizeof(TXN_DETAIL), 0, &td)) != 0) {
+ __db_err(dbenv,
+ "Unable to allocate memory for transaction detail");
+ goto err;
+ }
+
+ /* Place transaction on active transaction list. */
+ SH_TAILQ_INSERT_HEAD(&region->active_txn, td, links, __txn_detail);
+
+ id = ++region->last_txnid;
+ ++region->stat.st_nbegins;
+ if (++region->stat.st_nactive > region->stat.st_maxnactive)
+ region->stat.st_maxnactive = region->stat.st_nactive;
+
+ td->txnid = id;
+ td->begin_lsn = begin_lsn;
+ ZERO_LSN(td->last_lsn);
+ td->status = TXN_RUNNING;
+ if (txn->parent != NULL)
+ td->parent = txn->parent->off;
+ else
+ td->parent = INVALID_ROFF;
+
+ td->flags = 0;
+ off = R_OFFSET(&mgr->reginfo, td);
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ ZERO_LSN(txn->last_lsn);
+ txn->txnid = id;
+ txn->off = (u_int32_t)off;
+
+ txn->abort = __txn_abort;
+ txn->commit = __txn_commit;
+ txn->discard = __txn_discard;
+ txn->id = __txn_id;
+ txn->prepare = __txn_prepare;
+ txn->set_timeout = __txn_set_timeout;
+
+ /*
+ * If this is a transaction family, we must link the child to the
+ * maximal grandparent in the lock table for deadlock detection.
+ */
+ if (txn->parent != NULL && LOCKING_ON(dbenv))
+ if ((ret = __lock_addfamilylocker(dbenv,
+ txn->parent->txnid, txn->txnid)) != 0)
+ return (ret);
+
+ if (F_ISSET(txn, TXN_MALLOC)) {
+ MUTEX_THREAD_LOCK(dbenv, mgr->mutexp);
+ TAILQ_INSERT_TAIL(&mgr->txn_chain, txn, links);
+ MUTEX_THREAD_UNLOCK(dbenv, mgr->mutexp);
+ }
+
+ return (0);
+
+err: R_UNLOCK(dbenv, &mgr->reginfo);
+ return (ret);
+}
+
+/*
+ * __txn_commit --
+ * Commit a transaction.
+ *
+ * PUBLIC: int __txn_commit __P((DB_TXN *, u_int32_t));
+ */
+int
+__txn_commit(txnp, flags)
+ DB_TXN *txnp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_LOCKREQ request;
+ DB_TXN *kid;
+ TXN_DETAIL *td;
+ u_int32_t lflags;
+ int ret, t_ret;
+
+ dbenv = txnp->mgrp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ if ((ret = __txn_isvalid(txnp, &td, TXN_OP_COMMIT)) != 0)
+ return (ret);
+
+ /*
+ * We clear flags that are incorrect, ignoring any flag errors, and
+ * default to synchronous operations. By definition, transaction
+ * handles are dead when we return, and this error should never
+ * happen, but we don't want to fail in the field 'cause the app is
+ * specifying the wrong flag for some reason.
+ */
+ if (__db_fchk(dbenv,
+ "DB_TXN->commit", flags, DB_TXN_NOSYNC | DB_TXN_SYNC) != 0)
+ flags = DB_TXN_SYNC;
+ if (__db_fcchk(dbenv,
+ "DB_TXN->commit", flags, DB_TXN_NOSYNC, DB_TXN_SYNC) != 0)
+ flags = DB_TXN_SYNC;
+ if (LF_ISSET(DB_TXN_NOSYNC)) {
+ F_CLR(txnp, TXN_SYNC);
+ F_SET(txnp, TXN_NOSYNC);
+ }
+ if (LF_ISSET(DB_TXN_SYNC)) {
+ F_CLR(txnp, TXN_NOSYNC);
+ F_SET(txnp, TXN_SYNC);
+ }
+
+ /*
+ * Commit any unresolved children. If anyone fails to commit,
+ * then try to abort the rest of the kids and then abort the parent.
+ * Abort should never fail; if it does, we bail out immediately.
+ */
+ while ((kid = TAILQ_FIRST(&txnp->kids)) != NULL)
+ if ((ret = kid->commit(kid, flags)) != 0)
+ while ((kid = TAILQ_FIRST(&txnp->kids)) != NULL)
+ if ((t_ret = kid->abort(kid)) != 0)
+ return (__db_panic(dbenv, t_ret));
+
+ /*
+ * Process any aborted pages from our children.
+ * We delay putting pages on the free list that are newly
+ * allocated and then aborted so that we can undo other
+ * allocations, if necessary, without worrying about
+ * these pages which were not on the free list before.
+ */
+ if (txnp->txn_list != NULL) {
+ t_ret = __db_do_the_limbo(dbenv, NULL, txnp, txnp->txn_list);
+ __db_txnlist_end(dbenv, txnp->txn_list);
+ txnp->txn_list = NULL;
+ if (t_ret != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ if (ret != 0)
+ goto err;
+
+ /*
+ * If there are any log records, write a log record and sync the log,
+ * else do no log writes. If the commit is for a child transaction,
+ * we do not need to commit the child synchronously since it may still
+ * abort (if its parent aborts), and otherwise its parent or ultimate
+ * ancestor will write synchronously.
+ */
+ if (DBENV_LOGGING(dbenv) && !IS_ZERO_LSN(txnp->last_lsn)) {
+ if (txnp->parent == NULL) {
+ /*
+ * We are about to free all the read locks
+ * for this transaction below. Some of those
+ * locks might be handle locks which should
+ * not be freed, because they will be freed
+ * when the handle is closed. Check the
+ * events and preprocess any trades now so
+ * that we don't release the locks below.
+ */
+ if ((ret = __txn_doevents(dbenv, txnp, 0, 1)) != 0)
+ goto err;
+ request.op = DB_LOCK_PUT_READ;
+ if (LOCKING_ON(dbenv) && (ret = dbenv->lock_vec(
+ dbenv, txnp->txnid, 0, &request, 1, NULL)) != 0)
+ goto err;
+
+ SET_LOG_FLAGS(dbenv, txnp, lflags);
+ if ((ret = __txn_regop_log(dbenv,
+ txnp, &txnp->last_lsn, lflags,
+ TXN_COMMIT, (int32_t)time(NULL))) != 0)
+ goto err;
+ } else {
+ /* Log the commit in the parent! */
+ if ((ret = __txn_child_log(dbenv,
+ txnp->parent, &txnp->parent->last_lsn,
+ 0, txnp->txnid, &txnp->last_lsn)) != 0) {
+ goto err;
+ }
+
+ F_SET(txnp->parent, TXN_CHILDCOMMIT);
+ }
+ }
+
+ /* This is OK because __txn_end can only fail with a panic. */
+ return (__txn_end(txnp, 1));
+
+err: /*
+ * If we are prepared, then we "must" be able to commit. We
+ * panic here because even though the coordinator might be
+ * able to retry it is not clear it would know to do that.
+ * Otherwise we'll try to abort. If that is successful,
+ * then we return whatever was in ret (i.e., the reason we failed).
+ * If the abort was unsuccessful, then abort probably returned
+ * DB_RUNRECOVERY and we need to propagate that up.
+ */
+ if (td->status == TXN_PREPARED)
+ return (__db_panic(dbenv, ret));
+
+ if ((t_ret = txnp->abort(txnp)) != 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __txn_abort --
+ * Abort a transaction.
+ *
+ * PUBLIC: int __txn_abort __P((DB_TXN *));
+ */
+int
+__txn_abort(txnp)
+ DB_TXN *txnp;
+{
+ DB_ENV *dbenv;
+ DB_LOCKREQ request;
+ DB_TXN *kid;
+ TXN_DETAIL *td;
+ u_int32_t lflags;
+ int ret;
+
+ dbenv = txnp->mgrp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /* Ensure that abort always fails fatally. */
+ if ((ret = __txn_isvalid(txnp, &td, TXN_OP_ABORT)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ /*
+ * Try to abort any unresolved children.
+ *
+ * Abort either succeeds or panics the region. As soon as we
+ * see any failure, we just get out of here and return the panic
+ * up.
+ */
+ while ((kid = TAILQ_FIRST(&txnp->kids)) != NULL)
+ if ((ret = kid->abort(kid)) != 0)
+ return (ret);
+
+ if (LOCKING_ON(dbenv)) {
+ /*
+ * We are about to free all the read locks for this transaction
+ * below. Some of those locks might be handle locks which
+ * should not be freed, because they will be freed when the
+ * handle is closed. Check the events and preprocess any
+ * trades now so that we don't release the locks below.
+ */
+ if ((ret = __txn_doevents(dbenv, txnp, 0, 1)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ /* Turn off timeouts. */
+ if ((ret = __lock_set_timeout(dbenv,
+ txnp->txnid, 0, DB_SET_TXN_TIMEOUT)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ if ((ret = __lock_set_timeout(dbenv,
+ txnp->txnid, 0, DB_SET_LOCK_TIMEOUT)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ request.op = DB_LOCK_UPGRADE_WRITE;
+ if ((ret = dbenv->lock_vec(
+ dbenv, txnp->txnid, 0, &request, 1, NULL)) != 0)
+ return (__db_panic(dbenv, ret));
+ }
+ if ((ret = __txn_undo(txnp)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ /*
+ * Normally, we do not need to log aborts. However, if we
+ * are a distributed transaction (i.e., we have a prepare),
+ * then we log the abort so we know that this transaction
+ * was actually completed.
+ */
+ SET_LOG_FLAGS(dbenv, txnp, lflags);
+ if (DBENV_LOGGING(dbenv) && td->status == TXN_PREPARED &&
+ (ret = __txn_regop_log(dbenv, txnp, &txnp->last_lsn,
+ lflags, TXN_ABORT, (int32_t)time(NULL))) != 0)
+ return (__db_panic(dbenv, ret));
+
+ /* __txn_end always panics if it errors, so pass the return along. */
+ return (__txn_end(txnp, 0));
+}
+
+/*
+ * __txn_discard --
+ * Free the per-process resources associated with this txn handle.
+ *
+ * PUBLIC: int __txn_discard __P((DB_TXN *, u_int32_t flags));
+ */
+int
+__txn_discard(txnp, flags)
+ DB_TXN *txnp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_TXN *freep;
+ TXN_DETAIL *td;
+ int ret;
+
+ COMPQUIET(flags, 0);
+
+ dbenv = txnp->mgrp->dbenv;
+ freep = NULL;
+
+ PANIC_CHECK(dbenv);
+
+ if ((ret = __txn_isvalid(txnp, &td, TXN_OP_DISCARD)) != 0)
+ return (ret);
+
+ /* Should be no children. */
+ DB_ASSERT(TAILQ_FIRST(&txnp->kids) == NULL);
+ DB_ASSERT(F_ISSET(td, TXN_RESTORED));
+
+ /* Free the space. */
+ MUTEX_THREAD_LOCK(dbenv, txnp->mgrp->mutexp);
+ txnp->mgrp->n_discards++;
+ if (F_ISSET(txnp, TXN_MALLOC)) {
+ TAILQ_REMOVE(&txnp->mgrp->txn_chain, txnp, links);
+ freep = txnp;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, txnp->mgrp->mutexp);
+ if (freep != NULL)
+ __os_free(dbenv, freep);
+
+ return (0);
+}
+
+/*
+ * __txn_prepare --
+ * Flush the log so a future commit is guaranteed to succeed.
+ *
+ * PUBLIC: int __txn_prepare __P((DB_TXN *, u_int8_t *));
+ */
+int
+__txn_prepare(txnp, gid)
+ DB_TXN *txnp;
+ u_int8_t *gid;
+{
+ DBT xid;
+ DB_ENV *dbenv;
+ DB_TXN *kid;
+ TXN_DETAIL *td;
+ u_int32_t lflags;
+ int ret;
+
+ dbenv = txnp->mgrp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ if ((ret = __txn_isvalid(txnp, &td, TXN_OP_PREPARE)) != 0)
+ return (ret);
+
+ /* Commit any unresolved children. */
+ while ((kid = TAILQ_FIRST(&txnp->kids)) != NULL)
+ if ((ret = kid->commit(kid, DB_TXN_NOSYNC)) != 0)
+ return (ret);
+
+ /*
+ * In XA, the global transaction ID in the txn_detail structure is
+ * already set; in a non-XA environment, we must set it here. XA
+ * requires that the transaction be either ENDED or SUSPENDED when
+ * prepare is called, so we know that if the xa_status isn't in one
+ * of those states, then we are calling prepare directly and we need
+ * to fill in the td->xid.
+ */
+ if (DBENV_LOGGING(dbenv)) {
+ memset(&xid, 0, sizeof(xid));
+ if (td->xa_status != TXN_XA_ENDED &&
+ td->xa_status != TXN_XA_SUSPENDED)
+ /* Regular prepare; fill in the gid. */
+ memcpy(td->xid, gid, sizeof(td->xid));
+
+ xid.size = sizeof(td->xid);
+ xid.data = td->xid;
+
+ SET_LOG_FLAGS(dbenv, txnp, lflags);
+ if ((ret = __txn_xa_regop_log(dbenv, txnp, &txnp->last_lsn,
+ lflags, TXN_PREPARE, &xid, td->format, td->gtrid, td->bqual,
+ &td->begin_lsn)) != 0) {
+ __db_err(dbenv, "DB_TXN->prepare: log_write failed %s",
+ db_strerror(ret));
+ return (ret);
+ }
+ }
+
+ MUTEX_THREAD_LOCK(dbenv, txnp->mgrp->mutexp);
+ td->status = TXN_PREPARED;
+ MUTEX_THREAD_UNLOCK(dbenv, txnp->mgrp->mutexp);
+ return (0);
+}
+
+/*
+ * __txn_id --
+ * Return the transaction ID.
+ *
+ * PUBLIC: u_int32_t __txn_id __P((DB_TXN *));
+ */
+u_int32_t
+__txn_id(txnp)
+ DB_TXN *txnp;
+{
+ return (txnp->txnid);
+}
+
+/*
+ * __txn_set_timeout --
+ * Set timeout values in the txn structure.
+ */
+static int
+__txn_set_timeout(txnp, timeout, op)
+ DB_TXN *txnp;
+ db_timeout_t timeout;
+ u_int32_t op;
+{
+ if (op != DB_SET_TXN_TIMEOUT && op != DB_SET_LOCK_TIMEOUT)
+ return (__db_ferr(txnp->mgrp->dbenv, "DB_TXN->set_timeout", 0));
+
+ return (__lock_set_timeout(
+ txnp->mgrp->dbenv, txnp->txnid, timeout, op));
+}
+
+/*
+ * __txn_isvalid --
+ * Return 0 if the txnp is reasonable, otherwise panic.
+ */
+static int
+__txn_isvalid(txnp, tdp, op)
+ const DB_TXN *txnp;
+ TXN_DETAIL **tdp;
+ txnop_t op;
+{
+ DB_TXNMGR *mgrp;
+ DB_TXNREGION *region;
+ TXN_DETAIL *tp;
+
+ mgrp = txnp->mgrp;
+ region = mgrp->reginfo.primary;
+
+ /* Check for recovery. */
+ if (!F_ISSET(txnp, TXN_COMPENSATE) &&
+ F_ISSET(region, TXN_IN_RECOVERY)) {
+ __db_err(mgrp->dbenv,
+ "operation not permitted during recovery");
+ goto err;
+ }
+
+ /* Check for live cursors. */
+ if (txnp->cursors != 0) {
+ __db_err(mgrp->dbenv, "transaction has active cursors");
+ goto err;
+ }
+
+ /* Check transaction's state. */
+ tp = (TXN_DETAIL *)R_ADDR(&mgrp->reginfo, txnp->off);
+ if (tdp != NULL)
+ *tdp = tp;
+
+ /* Handle any operation specific checks. */
+ switch (op) {
+ case TXN_OP_DISCARD:
+ /*
+ * Since we're just tossing the per-process space; there are
+ * a lot of problems with the transaction that we can tolerate.
+ */
+
+ /* Transaction is already been reused. */
+ if (txnp->txnid != tp->txnid)
+ return (0);
+
+ /* What we've got had better be a restored transaction. */
+ if (!F_ISSET(tp, TXN_RESTORED)) {
+ __db_err(mgrp->dbenv, "not a restored transaction");
+ return (__db_panic(mgrp->dbenv, EINVAL));
+ }
+
+ return (0);
+ case TXN_OP_PREPARE:
+ if (txnp->parent != NULL) {
+ /*
+ * This is not fatal, because you could imagine an
+ * application that simply prepares everybody because
+ * it doesn't distinguish between children and parents.
+ * I'm not arguing this is good, but I could imagine
+ * someone doing it.
+ */
+ __db_err(mgrp->dbenv,
+ "Prepare disallowed on child transactions");
+ return (EINVAL);
+ }
+ break;
+ case TXN_OP_ABORT:
+ case TXN_OP_COMMIT:
+ default:
+ break;
+ }
+
+ switch (tp->status) {
+ case TXN_PREPARED:
+ if (op == TXN_OP_PREPARE) {
+ __db_err(mgrp->dbenv, "transaction already prepared");
+ /*
+ * Txn_prepare doesn't blow away the user handle, so
+ * in this case, give the user the opportunity to
+ * abort or commit.
+ */
+ return (EINVAL);
+ }
+ break;
+ case TXN_RUNNING:
+ break;
+ case TXN_ABORTED:
+ case TXN_COMMITTED:
+ default:
+ __db_err(mgrp->dbenv, "transaction already %s",
+ tp->status == TXN_COMMITTED ? "committed" : "aborted");
+ goto err;
+ }
+
+ return (0);
+
+err: /*
+ * If there's a serious problem with the transaction, panic. TXN
+ * handles are dead by definition when we return, and if you use
+ * a cursor you forgot to close, we have no idea what will happen.
+ */
+ return (__db_panic(mgrp->dbenv, EINVAL));
+}
+
+/*
+ * __txn_end --
+ * Internal transaction end routine.
+ */
+static int
+__txn_end(txnp, is_commit)
+ DB_TXN *txnp;
+ int is_commit;
+{
+ DB_ENV *dbenv;
+ DB_LOCKREQ request;
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+ TXN_DETAIL *tp;
+ int do_closefiles, ret;
+
+ mgr = txnp->mgrp;
+ dbenv = mgr->dbenv;
+ region = mgr->reginfo.primary;
+ do_closefiles = 0;
+
+ /* Process commit events. */
+ if ((ret = __txn_doevents(dbenv, txnp, is_commit, 0)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ /* Release the locks. */
+ request.op = txnp->parent == NULL ||
+ is_commit == 0 ? DB_LOCK_PUT_ALL : DB_LOCK_INHERIT;
+
+ /*
+ * __txn_end cannot return an simple error, we MUST return
+ * success/failure from commit or abort, ignoring any internal
+ * errors. So, we panic if something goes wrong. We can't
+ * deadlock here because we're not acquiring any new locks,
+ * so DB_LOCK_DEADLOCK is just as fatal as any other error.
+ */
+ if (LOCKING_ON(dbenv) && (ret = dbenv->lock_vec(
+ dbenv, txnp->txnid, DB_LOCK_FREE_LOCKER, &request, 1, NULL)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ /* End the transaction. */
+ R_LOCK(dbenv, &mgr->reginfo);
+
+ tp = (TXN_DETAIL *)R_ADDR(&mgr->reginfo, txnp->off);
+ SH_TAILQ_REMOVE(&region->active_txn, tp, links, __txn_detail);
+ if (F_ISSET(tp, TXN_RESTORED)) {
+ region->stat.st_nrestores--;
+ do_closefiles = region->stat.st_nrestores == 0;
+ }
+
+ __db_shalloc_free(mgr->reginfo.addr, tp);
+
+ if (is_commit)
+ region->stat.st_ncommits++;
+ else
+ region->stat.st_naborts++;
+ --region->stat.st_nactive;
+
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ /*
+ * The transaction cannot get more locks, remove its locker info,
+ * if any.
+ */
+ if (LOCKING_ON(dbenv) && (ret =
+ __lock_freefamilylocker(dbenv->lk_handle, txnp->txnid)) != 0)
+ return (__db_panic(dbenv, ret));
+ if (txnp->parent != NULL)
+ TAILQ_REMOVE(&txnp->parent->kids, txnp, klinks);
+
+ /* Free the space. */
+ if (F_ISSET(txnp, TXN_MALLOC)) {
+ MUTEX_THREAD_LOCK(dbenv, mgr->mutexp);
+ TAILQ_REMOVE(&mgr->txn_chain, txnp, links);
+ MUTEX_THREAD_UNLOCK(dbenv, mgr->mutexp);
+
+ __os_free(dbenv, txnp);
+ }
+
+ if (do_closefiles) {
+ F_SET((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+ (void)__dbreg_close_files(dbenv);
+ F_CLR((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+ mgr->n_discards = 0;
+ (void)dbenv->txn_checkpoint(dbenv, 0, 0, DB_FORCE);
+ }
+ return (0);
+}
+
+/*
+ * __txn_undo --
+ * Undo the transaction with id txnid. Returns 0 on success and
+ * errno on failure.
+ */
+static int
+__txn_undo(txnp)
+ DB_TXN *txnp;
+{
+ DBT rdbt;
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
+ DB_LSN key_lsn;
+ DB_TXN *ptxn;
+ DB_TXNMGR *mgr;
+ int ret, t_ret;
+ void *txnlist;
+
+ mgr = txnp->mgrp;
+ dbenv = mgr->dbenv;
+ logc = NULL;
+ txnlist = NULL;
+
+ if (!DBENV_LOGGING(dbenv))
+ return (0);
+
+ /*
+ * This is the simplest way to code this, but if the mallocs during
+ * recovery turn out to be a performance issue, we can do the
+ * allocation here and use DB_DBT_USERMEM.
+ */
+ memset(&rdbt, 0, sizeof(rdbt));
+
+ key_lsn = txnp->last_lsn;
+
+ /*
+ * Allocate a txnlist for children and aborted page allocs.
+ * We need to associate the list with the maximal parent
+ * so that aborted pages are recovered when that transaction
+ * is commited or aborted.
+ */
+ for (ptxn = txnp->parent; ptxn != NULL && ptxn->parent != NULL;)
+ ptxn = ptxn->parent;
+
+ if (ptxn != NULL && ptxn->txn_list != NULL)
+ txnlist = ptxn->txn_list;
+ else if (txnp->txn_list != NULL)
+ txnlist = txnp->txn_list;
+ else if ((ret = __db_txnlist_init(dbenv, 0, 0, NULL, &txnlist)) != 0)
+ return (ret);
+ else if (ptxn != NULL)
+ ptxn->txn_list = txnlist;
+
+ if (F_ISSET(txnp, TXN_CHILDCOMMIT) &&
+ (ret = __db_txnlist_lsninit(dbenv, txnlist, &txnp->last_lsn)) != 0)
+ return (ret);
+
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+ while (ret == 0 && !IS_ZERO_LSN(key_lsn)) {
+ /*
+ * The dispatch routine returns the lsn of the record
+ * before the current one in the key_lsn argument.
+ */
+ if ((ret = logc->get(logc, &key_lsn, &rdbt, DB_SET)) == 0) {
+ ret = __db_dispatch(dbenv, dbenv->recover_dtab,
+ dbenv->recover_dtab_size, &rdbt, &key_lsn,
+ DB_TXN_ABORT, txnlist);
+ if (F_ISSET(txnp, TXN_CHILDCOMMIT))
+ (void)__db_txnlist_lsnadd(dbenv,
+ txnlist, &key_lsn, 0);
+ }
+ if (ret == DB_SURPRISE_KID) {
+ if ((ret = __db_txnlist_lsninit(
+ dbenv, txnlist, &key_lsn)) == 0)
+ F_SET(txnp, TXN_CHILDCOMMIT);
+ } else if (ret != 0) {
+ __db_err(txnp->mgrp->dbenv,
+ "DB_TXN->abort: Log undo failed for LSN: %lu %lu: %s",
+ (u_long)key_lsn.file, (u_long)key_lsn.offset,
+ db_strerror(ret));
+ goto err;
+ }
+ }
+
+ ret = __db_do_the_limbo(dbenv, ptxn, txnp, txnlist);
+
+err: if (logc != NULL && (t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (ptxn == NULL && txnlist != NULL)
+ __db_txnlist_end(dbenv, txnlist);
+ return (ret);
+}
+
+/*
+ * Transaction checkpoint.
+ * If either kbytes or minutes is non-zero, then we only take the checkpoint
+ * more than "minutes" minutes have passed since the last checkpoint or if
+ * more than "kbytes" of log data have been written since the last checkpoint.
+ * When taking a checkpoint, find the oldest active transaction and figure out
+ * its first LSN. This is the lowest LSN we can checkpoint, since any record
+ * written after since that point may be involved in a transaction and may
+ * therefore need to be undone in the case of an abort.
+ *
+ * PUBLIC: int __txn_checkpoint
+ * PUBLIC: __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t));
+ */
+int
+__txn_checkpoint(dbenv, kbytes, minutes, flags)
+ DB_ENV *dbenv;
+ u_int32_t kbytes, minutes, flags;
+{
+ DB_LSN ckp_lsn, last_ckp;
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+ TXN_DETAIL *txnp;
+ time_t last_ckp_time, now;
+ u_int32_t bytes, mbytes;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->tx_handle, "txn_checkpoint", DB_INIT_TXN);
+
+ /*
+ * On a replication client, all transactions are read-only; therefore,
+ * a checkpoint is a null-op.
+ *
+ * We permit txn_checkpoint, instead of just rendering it illegal,
+ * so that an application can just let a checkpoint thread continue
+ * to operate as it gets promoted or demoted between being a
+ * master and a client.
+ */
+ if (F_ISSET(dbenv, DB_ENV_REP_CLIENT))
+ return (0);
+
+ mgr = dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+
+ /*
+ * The checkpoint LSN is an LSN such that all transactions begun before
+ * it are complete. Our first guess (corrected below based on the list
+ * of active transactions) is the last-written LSN.
+ */
+ __log_txn_lsn(dbenv, &ckp_lsn, &mbytes, &bytes);
+
+ if (!LF_ISSET(DB_FORCE)) {
+ /* Don't checkpoint a quiescent database. */
+ if (bytes == 0 && mbytes == 0)
+ return (0);
+
+ if (kbytes != 0 &&
+ mbytes * 1024 + bytes / 1024 >= (u_int32_t)kbytes)
+ goto do_ckp;
+
+ if (minutes != 0) {
+ (void)time(&now);
+
+ R_LOCK(dbenv, &mgr->reginfo);
+ last_ckp_time = region->time_ckp;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ if (now - last_ckp_time >= (time_t)(minutes * 60))
+ goto do_ckp;
+ }
+
+ /*
+ * If we checked time and data and didn't go to checkpoint,
+ * we're done.
+ */
+ if (minutes != 0 || kbytes != 0)
+ return (0);
+ }
+
+do_ckp: /* Look through the active transactions for the lowest begin LSN. */
+ R_LOCK(dbenv, &mgr->reginfo);
+ for (txnp = SH_TAILQ_FIRST(&region->active_txn, __txn_detail);
+ txnp != NULL;
+ txnp = SH_TAILQ_NEXT(txnp, links, __txn_detail))
+ if (!IS_ZERO_LSN(txnp->begin_lsn) &&
+ log_compare(&txnp->begin_lsn, &ckp_lsn) < 0)
+ ckp_lsn = txnp->begin_lsn;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ if (MPOOL_ON(dbenv) && (ret = dbenv->memp_sync(dbenv, NULL)) != 0) {
+ __db_err(dbenv,
+ "txn_checkpoint: failed to flush the buffer cache %s",
+ db_strerror(ret));
+ return (ret);
+ }
+
+ /*
+ * Because we can't be a replication client here, and because
+ * recovery (somewhat unusually) calls txn_checkpoint and expects
+ * it to write a log message, LOGGING_ON is the correct macro here.
+ */
+ if (LOGGING_ON(dbenv)) {
+ R_LOCK(dbenv, &mgr->reginfo);
+ last_ckp = region->last_ckp;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ /*
+ * Put out records for the open files before we log
+ * the checkpoint. The records are certain to be at
+ * or after ckp_lsn, but before the checkpoint record
+ * itself, so they're sure to be included if we start
+ * recovery from the ckp_lsn contained in this
+ * checkpoint.
+ */
+ if ((ret = __dbreg_open_files(dbenv)) != 0 ||
+ (ret = __txn_ckp_log(dbenv,
+ NULL, &ckp_lsn, DB_PERMANENT | DB_FLUSH, &ckp_lsn,
+ &last_ckp, (int32_t)time(NULL))) != 0) {
+ __db_err(dbenv,
+ "txn_checkpoint: log failed at LSN [%ld %ld] %s",
+ (long)ckp_lsn.file, (long)ckp_lsn.offset,
+ db_strerror(ret));
+ return (ret);
+ }
+
+ __txn_updateckp(dbenv, &ckp_lsn);
+ }
+ return (0);
+}
+
+/*
+ * __txn_getckp --
+ * Get the LSN of the last transaction checkpoint.
+ *
+ * PUBLIC: int __txn_getckp __P((DB_ENV *, DB_LSN *));
+ */
+int
+__txn_getckp(dbenv, lsnp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+{
+ DB_LSN lsn;
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+
+ mgr = dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+
+ R_LOCK(dbenv, &mgr->reginfo);
+ lsn = region->last_ckp;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ if (IS_ZERO_LSN(lsn))
+ return (DB_NOTFOUND);
+
+ *lsnp = lsn;
+ return (0);
+}
+
+/*
+ * __txn_activekids --
+ * Return if this transaction has any active children.
+ *
+ * PUBLIC: int __txn_activekids __P((DB_ENV *, u_int32_t, DB_TXN *));
+ */
+int
+__txn_activekids(dbenv, rectype, txnp)
+ DB_ENV *dbenv;
+ u_int32_t rectype;
+ DB_TXN *txnp;
+{
+ /*
+ * On a child commit, we know that there are children (i.e., the
+ * commiting child at the least. In that case, skip this check.
+ */
+ if (F_ISSET(txnp, TXN_COMPENSATE) || rectype == DB___txn_child)
+ return (0);
+
+ if (TAILQ_FIRST(&txnp->kids) != NULL) {
+ __db_err(dbenv, "Child transaction is active");
+ return (EPERM);
+ }
+ return (0);
+}
+
+/*
+ * __txn_force_abort --
+ * Force an abort record into the log if the commit record
+ * failed to get to disk.
+ *
+ * PUBLIC: int __txn_force_abort __P((DB_ENV *, u_int8_t *));
+ */
+int
+__txn_force_abort(dbenv, buffer)
+ DB_ENV *dbenv;
+ u_int8_t *buffer;
+{
+ DB_CIPHER *db_cipher;
+ HDR *hdr;
+ u_int32_t offset, opcode, rec_len, rec_type, sum_len;
+ u_int8_t *bp, *key, chksum[DB_MAC_KEY];
+ size_t hdrsize;
+ int ret;
+
+ db_cipher = dbenv->crypto_handle;
+
+ /*
+ * This routine depends on the layout of HDR and the __txn_regop
+ * record in txn.src. We are passed the beginning of the commit
+ * record in the log buffer and overwrite the commit with an abort
+ * and recalculate the checksum. We may be passed a txn_xa_regop
+ * that is, an XA prepare), there's no need to overwrite that one.
+ */
+ hdr = (HDR *)buffer;
+ memcpy(&rec_type, hdr, sizeof(rec_type));
+ if (rec_type == DB___txn_xa_regop)
+ return (0);
+
+ offset = sizeof(u_int32_t) + sizeof(u_int32_t) + sizeof(DB_LSN);
+ rec_len = offset + sizeof(u_int32_t) + sizeof(int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ key = db_cipher->mac_key;
+ hdrsize = HDR_CRYPTO_SZ;
+ sum_len = DB_MAC_KEY;
+ if ((ret = db_cipher->decrypt(dbenv, db_cipher->data,
+ &hdr->iv[0], buffer + hdrsize, rec_len)) != 0)
+ return (__db_panic(dbenv, ret));
+ } else {
+ key = NULL;
+ hdrsize = HDR_NORMAL_SZ;
+ sum_len = sizeof(u_int32_t);
+ }
+ bp = buffer + hdrsize + offset;
+ opcode = TXN_ABORT;
+ memcpy(bp, &opcode, sizeof(opcode));
+
+ if (CRYPTO_ON(dbenv) &&
+ (ret = db_cipher->encrypt(dbenv,
+ db_cipher->data, &hdr->iv[0], buffer + hdrsize, rec_len)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ __db_chksum(buffer + hdrsize, rec_len, key, chksum);
+ memcpy(buffer + SSZ(HDR, chksum), &chksum, sum_len);
+
+ return (0);
+}
+
+/*
+ * __txn_preclose
+ * Before we can close an environment, we need to check if we
+ * were in the midst of taking care of restored transactions. If
+ * so, then we need to close the files that we opened.
+ *
+ * PUBLIC: int __txn_preclose __P((DB_ENV *));
+ */
+int
+__txn_preclose(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+ int do_closefiles, ret;
+
+ mgr = (DB_TXNMGR *)dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+ do_closefiles = 0;
+
+ R_LOCK(dbenv, &mgr->reginfo);
+ if (region != NULL &&
+ region->stat.st_nrestores
+ <= mgr->n_discards && mgr->n_discards != 0)
+ do_closefiles = 1;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ if (do_closefiles) {
+ /*
+ * Set the DBLOG_RECOVER flag while closing these
+ * files so they do not create additional log records
+ * that will confuse future recoveries.
+ */
+ F_SET((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+ ret = __dbreg_close_files(dbenv);
+ F_CLR((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+ } else
+ ret = 0;
+
+ return (ret);
+}
+
+/*
+ * __txn_reset --
+ * Reset the last txnid to its minimum value, and log the reset.
+ *
+ * PUBLIC: int __txn_reset __P((DB_ENV *));
+ */
+int
+__txn_reset(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LSN scrap;
+ DB_TXNREGION *region;
+
+ region = ((DB_TXNMGR *)dbenv->tx_handle)->reginfo.primary;
+ region->last_txnid = TXN_MINIMUM;
+
+ DB_ASSERT(LOGGING_ON(dbenv));
+ return (__txn_recycle_log(dbenv,
+ NULL, &scrap, 0, TXN_MINIMUM, TXN_MAXIMUM));
+}
+
+/*
+ * __txn_updateckp --
+ * Update the last_ckp field in the transaction region. This happens
+ * at the end of a normal checkpoint and also when a replication client
+ * receives a checkpoint record.
+ *
+ * PUBLIC: void __txn_updateckp __P((DB_ENV *, DB_LSN *));
+ */
+void
+__txn_updateckp(dbenv, lsnp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+{
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+
+ mgr = dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+
+ /*
+ * We want to make sure last_ckp only moves forward; since
+ * we drop locks above and in log_put, it's possible
+ * for two calls to __txn_ckp_log to finish in a different
+ * order from how they were called.
+ */
+ R_LOCK(dbenv, &mgr->reginfo);
+ if (log_compare(&region->last_ckp, lsnp) < 0) {
+ region->last_ckp = *lsnp;
+ (void)time(&region->time_ckp);
+ }
+ R_UNLOCK(dbenv, &mgr->reginfo);
+}
diff --git a/libdb/txn/txn.src b/libdb/txn/txn.src
new file mode 100644
index 0000000..e15d8b2
--- /dev/null
+++ b/libdb/txn/txn.src
@@ -0,0 +1,93 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id$
+ */
+
+PREFIX __txn
+DBPRIVATE
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/txn.h"
+INCLUDE
+
+/*
+ * This is the standard log operation for commit.
+ * Note that we are using an int32_t for the timestamp. This means that
+ * in 2039 we will need to deprecate this log record and create one that
+ * either changes the Epoch or has a 64-bit offset.
+ */
+BEGIN regop 10
+ARG opcode u_int32_t lu
+ARG timestamp int32_t ld
+END
+
+/*
+ * This is the checkpoint record. It contains the lsn that the checkpoint
+ * guarantees and a pointer to the last checkpoint so we can walk backwards
+ * by checkpoint.
+ *
+ * ckp_lsn:
+ * The lsn in the log of the most recent point at which all begun
+ * transactions have been aborted. This is the point for which
+ * the checkpoint is relevant.
+ * last_ckp:
+ * The previous checkpoint.
+ * timestamp:
+ * See comment in commit about timestamps.
+ */
+BEGIN ckp 11
+POINTER ckp_lsn DB_LSN * lu
+POINTER last_ckp DB_LSN * lu
+ARG timestamp int32_t ld
+END
+
+/*
+ * This is the (new) log operation for a child commit. It is
+ * logged as a record in the PARENT. The child field contains
+ * the transaction ID of the child committing and the c_lsn is
+ * the last LSN of the child's log trail.
+ */
+BEGIN child 12
+ARG child u_int32_t lx
+POINTER c_lsn DB_LSN * lu
+END
+
+
+/*
+ * This is the standard log operation for prepare.
+ */
+BEGIN xa_regop 13
+ARG opcode u_int32_t lu
+DBT xid DBT s
+ARG formatID int32_t ld
+ARG gtrid u_int32_t u
+ARG bqual u_int32_t u
+POINTER begin_lsn DB_LSN * lu
+END
+
+/*
+ * Log the fact that we are recycling txnids.
+ */
+BEGIN recycle 14
+ARG min u_int32_t u
+ARG max u_int32_t u
+END
diff --git a/libdb/txn/txn_auto.c b/libdb/txn/txn_auto.c
new file mode 100644
index 0000000..55fb4bb
--- /dev/null
+++ b/libdb/txn/txn_auto.c
@@ -0,0 +1,1171 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+/*
+ * PUBLIC: int __txn_regop_log __P((DB_ENV *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, u_int32_t, int32_t));
+ */
+int
+__txn_regop_log(dbenv, txnid, ret_lsnp, flags,
+ opcode, timestamp)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t opcode;
+ int32_t timestamp;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB___txn_regop;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ uinttmp = (u_int32_t)opcode;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)timestamp;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__txn_regop_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __txn_regop_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__txn_regop_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __txn_regop_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__txn_regop_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __txn_regop_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __txn_regop_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__txn_regop: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\topcode: %lu\n", (u_long)argp->opcode);
+ (void)printf("\ttimestamp: %ld\n", (long)argp->timestamp);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __txn_regop_read __P((DB_ENV *, void *, __txn_regop_args **));
+ */
+int
+__txn_regop_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __txn_regop_args **argpp;
+{
+ __txn_regop_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__txn_regop_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->opcode = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->timestamp = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __txn_ckp_log __P((DB_ENV *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, DB_LSN *, DB_LSN *, int32_t));
+ */
+int
+__txn_ckp_log(dbenv, txnid, ret_lsnp, flags,
+ ckp_lsn, last_ckp, timestamp)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ DB_LSN * ckp_lsn;
+ DB_LSN * last_ckp;
+ int32_t timestamp;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB___txn_ckp;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(*ckp_lsn)
+ + sizeof(*last_ckp)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ if (ckp_lsn != NULL)
+ memcpy(bp, ckp_lsn, sizeof(*ckp_lsn));
+ else
+ memset(bp, 0, sizeof(*ckp_lsn));
+ bp += sizeof(*ckp_lsn);
+
+ if (last_ckp != NULL)
+ memcpy(bp, last_ckp, sizeof(*last_ckp));
+ else
+ memset(bp, 0, sizeof(*last_ckp));
+ bp += sizeof(*last_ckp);
+
+ uinttmp = (u_int32_t)timestamp;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__txn_ckp_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __txn_ckp_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__txn_ckp_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __txn_ckp_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__txn_ckp_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __txn_ckp_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __txn_ckp_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__txn_ckp: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tckp_lsn: [%lu][%lu]\n",
+ (u_long)argp->ckp_lsn.file, (u_long)argp->ckp_lsn.offset);
+ (void)printf("\tlast_ckp: [%lu][%lu]\n",
+ (u_long)argp->last_ckp.file, (u_long)argp->last_ckp.offset);
+ (void)printf("\ttimestamp: %ld\n", (long)argp->timestamp);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __txn_ckp_read __P((DB_ENV *, void *, __txn_ckp_args **));
+ */
+int
+__txn_ckp_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __txn_ckp_args **argpp;
+{
+ __txn_ckp_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__txn_ckp_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&argp->ckp_lsn, bp, sizeof(argp->ckp_lsn));
+ bp += sizeof(argp->ckp_lsn);
+
+ memcpy(&argp->last_ckp, bp, sizeof(argp->last_ckp));
+ bp += sizeof(argp->last_ckp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->timestamp = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __txn_child_log __P((DB_ENV *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, u_int32_t, DB_LSN *));
+ */
+int
+__txn_child_log(dbenv, txnid, ret_lsnp, flags,
+ child, c_lsn)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t child;
+ DB_LSN * c_lsn;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB___txn_child;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(*c_lsn);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ uinttmp = (u_int32_t)child;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (c_lsn != NULL)
+ memcpy(bp, c_lsn, sizeof(*c_lsn));
+ else
+ memset(bp, 0, sizeof(*c_lsn));
+ bp += sizeof(*c_lsn);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__txn_child_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __txn_child_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__txn_child_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __txn_child_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__txn_child_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __txn_child_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __txn_child_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__txn_child: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tchild: 0x%lx\n", (u_long)argp->child);
+ (void)printf("\tc_lsn: [%lu][%lu]\n",
+ (u_long)argp->c_lsn.file, (u_long)argp->c_lsn.offset);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __txn_child_read __P((DB_ENV *, void *, __txn_child_args **));
+ */
+int
+__txn_child_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __txn_child_args **argpp;
+{
+ __txn_child_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__txn_child_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->child = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->c_lsn, bp, sizeof(argp->c_lsn));
+ bp += sizeof(argp->c_lsn);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __txn_xa_regop_log __P((DB_ENV *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, u_int32_t, const DBT *, int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: DB_LSN *));
+ */
+int
+__txn_xa_regop_log(dbenv, txnid, ret_lsnp, flags,
+ opcode, xid, formatID, gtrid, bqual, begin_lsn)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t opcode;
+ const DBT *xid;
+ int32_t formatID;
+ u_int32_t gtrid;
+ u_int32_t bqual;
+ DB_LSN * begin_lsn;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB___txn_xa_regop;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t) + (xid == NULL ? 0 : xid->size)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(*begin_lsn);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ uinttmp = (u_int32_t)opcode;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (xid == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &xid->size, sizeof(xid->size));
+ bp += sizeof(xid->size);
+ memcpy(bp, xid->data, xid->size);
+ bp += xid->size;
+ }
+
+ uinttmp = (u_int32_t)formatID;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)gtrid;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)bqual;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (begin_lsn != NULL)
+ memcpy(bp, begin_lsn, sizeof(*begin_lsn));
+ else
+ memset(bp, 0, sizeof(*begin_lsn));
+ bp += sizeof(*begin_lsn);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__txn_xa_regop_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __txn_xa_regop_getpgnos __P((DB_ENV *, DBT *,
+ * PUBLIC: DB_LSN *, db_recops, void *));
+ */
+int
+__txn_xa_regop_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __txn_xa_regop_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__txn_xa_regop_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __txn_xa_regop_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __txn_xa_regop_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__txn_xa_regop: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\topcode: %lu\n", (u_long)argp->opcode);
+ (void)printf("\txid: ");
+ for (i = 0; i < argp->xid.size; i++) {
+ ch = ((u_int8_t *)argp->xid.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tformatID: %ld\n", (long)argp->formatID);
+ (void)printf("\tgtrid: %u\n", argp->gtrid);
+ (void)printf("\tbqual: %u\n", argp->bqual);
+ (void)printf("\tbegin_lsn: [%lu][%lu]\n",
+ (u_long)argp->begin_lsn.file, (u_long)argp->begin_lsn.offset);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __txn_xa_regop_read __P((DB_ENV *, void *,
+ * PUBLIC: __txn_xa_regop_args **));
+ */
+int
+__txn_xa_regop_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __txn_xa_regop_args **argpp;
+{
+ __txn_xa_regop_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__txn_xa_regop_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->opcode = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memset(&argp->xid, 0, sizeof(argp->xid));
+ memcpy(&argp->xid.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->xid.data = bp;
+ bp += argp->xid.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->formatID = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->gtrid = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->bqual = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&argp->begin_lsn, bp, sizeof(argp->begin_lsn));
+ bp += sizeof(argp->begin_lsn);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __txn_recycle_log __P((DB_ENV *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, u_int32_t, u_int32_t));
+ */
+int
+__txn_recycle_log(dbenv, txnid, ret_lsnp, flags,
+ min, max)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t min;
+ u_int32_t max;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB___txn_recycle;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ uinttmp = (u_int32_t)min;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)max;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__txn_recycle_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __txn_recycle_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__txn_recycle_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __txn_recycle_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__txn_recycle_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __txn_recycle_args *argp;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __txn_recycle_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__txn_recycle: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tmin: %u\n", argp->min);
+ (void)printf("\tmax: %u\n", argp->max);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __txn_recycle_read __P((DB_ENV *, void *,
+ * PUBLIC: __txn_recycle_args **));
+ */
+int
+__txn_recycle_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __txn_recycle_args **argpp;
+{
+ __txn_recycle_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__txn_recycle_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->min = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->max = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __txn_init_print __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__txn_init_print(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __txn_regop_print, DB___txn_regop)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __txn_ckp_print, DB___txn_ckp)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __txn_child_print, DB___txn_child)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __txn_xa_regop_print, DB___txn_xa_regop)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __txn_recycle_print, DB___txn_recycle)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __txn_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__txn_init_getpgnos(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __txn_regop_getpgnos, DB___txn_regop)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __txn_ckp_getpgnos, DB___txn_ckp)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __txn_child_getpgnos, DB___txn_child)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __txn_xa_regop_getpgnos, DB___txn_xa_regop)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __txn_recycle_getpgnos, DB___txn_recycle)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __txn_init_recover __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__txn_init_recover(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __txn_regop_recover, DB___txn_regop)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __txn_ckp_recover, DB___txn_ckp)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __txn_child_recover, DB___txn_child)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __txn_xa_regop_recover, DB___txn_xa_regop)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __txn_recycle_recover, DB___txn_recycle)) != 0)
+ return (ret);
+ return (0);
+}
diff --git a/libdb/txn/txn_method.c b/libdb/txn/txn_method.c
new file mode 100644
index 0000000..aed1387
--- /dev/null
+++ b/libdb/txn/txn_method.c
@@ -0,0 +1,105 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/txn.h"
+
+#ifdef HAVE_RPC
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+#endif
+
+static int __txn_set_tx_max __P((DB_ENV *, u_int32_t));
+static int __txn_set_tx_timestamp __P((DB_ENV *, time_t *));
+
+/*
+ * __txn_dbenv_create --
+ * Transaction specific initialization of the DB_ENV structure.
+ *
+ * PUBLIC: void __txn_dbenv_create __P((DB_ENV *));
+ */
+void
+__txn_dbenv_create(dbenv)
+ DB_ENV *dbenv;
+{
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ */
+
+ dbenv->tx_max = DEF_MAX_TXNS;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ dbenv->set_tx_max = __dbcl_set_tx_max;
+ dbenv->set_tx_timestamp = __dbcl_set_tx_timestamp;
+ dbenv->txn_checkpoint = __dbcl_txn_checkpoint;
+ dbenv->txn_recover = __dbcl_txn_recover;
+ dbenv->txn_stat = __dbcl_txn_stat;
+ dbenv->txn_begin = __dbcl_txn_begin;
+ } else
+#endif
+ {
+ dbenv->set_tx_max = __txn_set_tx_max;
+ dbenv->set_tx_timestamp = __txn_set_tx_timestamp;
+ dbenv->txn_checkpoint = __txn_checkpoint;
+#ifdef CONFIG_TEST
+ dbenv->txn_id_set = __txn_id_set;
+#endif
+ dbenv->txn_recover = __txn_recover;
+ dbenv->txn_stat = __txn_stat;
+ dbenv->txn_begin = __txn_begin;
+ }
+}
+
+/*
+ * __txn_set_tx_max --
+ * Set the size of the transaction table.
+ */
+static int
+__txn_set_tx_max(dbenv, tx_max)
+ DB_ENV *dbenv;
+ u_int32_t tx_max;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_tx_max");
+
+ dbenv->tx_max = tx_max;
+ return (0);
+}
+
+/*
+ * __txn_set_tx_timestamp --
+ * Set the transaction recovery timestamp.
+ */
+static int
+__txn_set_tx_timestamp(dbenv, timestamp)
+ DB_ENV *dbenv;
+ time_t *timestamp;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_tx_timestamp");
+
+ dbenv->tx_timestamp = *timestamp;
+ return (0);
+}
diff --git a/libdb/txn/txn_rec.c b/libdb/txn/txn_rec.c
new file mode 100644
index 0000000..14db7f5
--- /dev/null
+++ b/libdb/txn/txn_rec.c
@@ -0,0 +1,436 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/txn.h"
+#include "dbinc/db_am.h"
+#include "dbinc/db_dispatch.h"
+
+#define IS_XA_TXN(R) (R->xid.size != 0)
+
+/*
+ * PUBLIC: int __txn_regop_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ *
+ * These records are only ever written for commits. Normally, we redo any
+ * committed transaction, however if we are doing recovery to a timestamp, then
+ * we may treat transactions that commited after the timestamp as aborted.
+ */
+int
+__txn_regop_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ DB_TXNHEAD *headp;
+ __txn_regop_args *argp;
+ int ret;
+
+#ifdef DEBUG_RECOVER
+ (void)__txn_regop_print(dbenv, dbtp, lsnp, op, info);
+#endif
+
+ if ((ret = __txn_regop_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+
+ headp = info;
+ /*
+ * We are only ever called during FORWARD_ROLL or BACKWARD_ROLL.
+ * We check for the former explicitly and the last two clauses
+ * apply to the BACKWARD_ROLL case.
+ */
+
+ if (op == DB_TXN_FORWARD_ROLL)
+ /*
+ * If this was a 2-phase-commit transaction, then it
+ * might already have been removed from the list, and
+ * that's OK. Ignore the return code from remove.
+ */
+ (void)__db_txnlist_remove(dbenv, info, argp->txnid->txnid);
+ else if ((dbenv->tx_timestamp != 0 &&
+ argp->timestamp > (int32_t)dbenv->tx_timestamp) ||
+ (!IS_ZERO_LSN(headp->trunc_lsn) &&
+ log_compare(&headp->trunc_lsn, lsnp) < 0)) {
+ /*
+ * We failed either the timestamp check or the trunc_lsn check,
+ * so we treat this as an abort even if it was a commit record.
+ */
+ ret = __db_txnlist_update(dbenv,
+ info, argp->txnid->txnid, TXN_ABORT, NULL);
+
+ if (ret == TXN_NOTFOUND)
+ ret = __db_txnlist_add(dbenv,
+ info, argp->txnid->txnid, TXN_IGNORE, NULL);
+ else if (ret != TXN_OK)
+ goto err;
+ /* else ret = 0; Not necessary because TXN_OK == 0 */
+ } else {
+ /* This is a normal commit; mark it appropriately. */
+ ret = __db_txnlist_update(dbenv,
+ info, argp->txnid->txnid, argp->opcode, lsnp);
+
+ if (ret == TXN_NOTFOUND)
+ ret = __db_txnlist_add(dbenv,
+ info, argp->txnid->txnid,
+ argp->opcode == TXN_ABORT ?
+ TXN_IGNORE : argp->opcode, lsnp);
+ else if (ret != TXN_OK)
+ goto err;
+ /* else ret = 0; Not necessary because TXN_OK == 0 */
+ }
+
+ if (ret == 0)
+ *lsnp = argp->prev_lsn;
+
+ if (0) {
+err: __db_err(dbenv,
+ "txnid %lx commit record found, already on commit list",
+ argp->txnid->txnid);
+ ret = EINVAL;
+ }
+ __os_free(dbenv, argp);
+
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __txn_xa_regop_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ *
+ * These records are only ever written for prepares.
+ */
+int
+__txn_xa_regop_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_xa_regop_args *argp;
+ int ret;
+
+#ifdef DEBUG_RECOVER
+ (void)__txn_xa_regop_print(dbenv, dbtp, lsnp, op, info);
+#endif
+
+ if ((ret = __txn_xa_regop_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+
+ if (argp->opcode != TXN_PREPARE) {
+ ret = EINVAL;
+ goto err;
+ }
+
+ ret = __db_txnlist_find(dbenv, info, argp->txnid->txnid);
+
+ /*
+ * If we are rolling forward, then an aborted prepare
+ * indicates that this may the last record we'll see for
+ * this transaction ID, so we should remove it from the
+ * list.
+ */
+
+ if (op == DB_TXN_FORWARD_ROLL) {
+ if ((ret = __db_txnlist_remove(dbenv,
+ info, argp->txnid->txnid)) != TXN_OK)
+ goto txn_err;
+ } else if (op == DB_TXN_BACKWARD_ROLL && ret == TXN_PREPARE) {
+ /*
+ * On the backward pass, we have three possibilities:
+ * 1. The transaction is already committed, no-op.
+ * 2. The transaction is already aborted, no-op.
+ * 3. The transaction is neither committed nor aborted.
+ * Treat this like a commit and roll forward so that
+ * the transaction can be resurrected in the region.
+ * We handle case 3 here; cases 1 and 2 are the final clause
+ * below.
+ * This is prepared, but not yet committed transaction. We
+ * need to add it to the transaction list, so that it gets
+ * rolled forward. We also have to add it to the region's
+ * internal state so it can be properly aborted or committed
+ * after recovery (see txn_recover).
+ */
+ if ((ret = __db_txnlist_remove(dbenv,
+ info, argp->txnid->txnid)) != TXN_OK) {
+txn_err: __db_err(dbenv,
+ "Transaction not in list %x", argp->txnid->txnid);
+ ret = DB_NOTFOUND;
+ } else if ((ret = __db_txnlist_add(dbenv,
+ info, argp->txnid->txnid, TXN_COMMIT, lsnp)) == 0)
+ ret = __txn_restore_txn(dbenv, lsnp, argp);
+ } else
+ ret = 0;
+
+ if (ret == 0)
+ *lsnp = argp->prev_lsn;
+
+err: __os_free(dbenv, argp);
+
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __txn_ckp_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_ckp_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_ckp_args *argp;
+ int ret;
+
+#ifdef DEBUG_RECOVER
+ __txn_ckp_print(dbenv, dbtp, lsnp, op, info);
+#endif
+ COMPQUIET(dbenv, NULL);
+
+ if ((ret = __txn_ckp_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+
+ if (op == DB_TXN_BACKWARD_ROLL)
+ __db_txnlist_ckp(dbenv, info, lsnp);
+
+ *lsnp = argp->last_ckp;
+ __os_free(dbenv, argp);
+ return (DB_TXN_CKP);
+}
+
+/*
+ * __txn_child_recover
+ * Recover a commit record for a child transaction.
+ *
+ * PUBLIC: int __txn_child_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_child_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_child_args *argp;
+ int c_stat, p_stat, ret;
+
+#ifdef DEBUG_RECOVER
+ (void)__txn_child_print(dbenv, dbtp, lsnp, op, info);
+#endif
+ if ((ret = __txn_child_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+
+ /*
+ * This is a record in a PARENT's log trail indicating that a
+ * child commited. If we are aborting, we need to update the
+ * parent's LSN array. If we are in recovery, then if the
+ * parent is commiting, we set ourselves up to commit, else
+ * we do nothing.
+ */
+ if (op == DB_TXN_ABORT) {
+ /* Note that __db_txnlist_lsnadd rewrites its LSN
+ * parameter, so you cannot reuse the argp->c_lsn field.
+ */
+ ret = __db_txnlist_lsnadd(dbenv,
+ info, &argp->c_lsn, TXNLIST_NEW);
+ } else if (op == DB_TXN_BACKWARD_ROLL) {
+ /* Child might exist -- look for it. */
+ c_stat = __db_txnlist_find(dbenv, info, argp->child);
+ p_stat = __db_txnlist_find(dbenv, info, argp->txnid->txnid);
+
+ if (c_stat == TXN_EXPECTED) {
+ /*
+ * The open after this create succeeded. If the
+ * parent succeeded, we don't want to redo; if the
+ * parent aborted, we do want to undo.
+ */
+ ret = __db_txnlist_update(dbenv,
+ info, argp->child,
+ p_stat == TXN_COMMIT ? TXN_IGNORE : TXN_ABORT,
+ NULL);
+ if (ret > 0)
+ ret = 0;
+ } else if (c_stat == TXN_UNEXPECTED) {
+ /*
+ * The open after this create failed. If the parent
+ * is rolling forward, we need to roll forward. If
+ * the parent failed, then we do not want to abort
+ * (because the file may not be the one in which we
+ * are interested).
+ */
+ ret = __db_txnlist_update(dbenv, info, argp->child,
+ p_stat == TXN_COMMIT ? TXN_COMMIT : TXN_IGNORE,
+ NULL);
+ if (ret > 0)
+ ret = 0;
+ } else if (c_stat != TXN_IGNORE) {
+ ret = __db_txnlist_add(dbenv, info, argp->child,
+ p_stat == TXN_COMMIT ? TXN_COMMIT : TXN_ABORT,
+ NULL);
+ }
+ } else {
+ /* Forward Roll */
+ if ((ret =
+ __db_txnlist_remove(dbenv, info, argp->child)) != TXN_OK) {
+ __db_err(dbenv,
+ "Transaction not in list %x", argp->txnid->txnid);
+ ret = DB_NOTFOUND;
+ }
+ }
+
+ if (ret == 0)
+ *lsnp = argp->prev_lsn;
+
+ __os_free(dbenv, argp);
+
+ return (ret);
+}
+
+/*
+ * __txn_restore_txn --
+ * Using only during XA recovery. If we find any transactions that are
+ * prepared, but not yet committed, then we need to restore the transaction's
+ * state into the shared region, because the TM is going to issue an abort
+ * or commit and we need to respond correctly.
+ *
+ * lsnp is the LSN of the returned LSN
+ * argp is the perpare record (in an appropriate structure)
+ *
+ * PUBLIC: int __txn_restore_txn __P((DB_ENV *,
+ * PUBLIC: DB_LSN *, __txn_xa_regop_args *));
+ */
+int
+__txn_restore_txn(dbenv, lsnp, argp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ __txn_xa_regop_args *argp;
+{
+ DB_TXNMGR *mgr;
+ TXN_DETAIL *td;
+ DB_TXNREGION *region;
+ int ret;
+
+ if (argp->xid.size == 0)
+ return (0);
+
+ mgr = dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+ R_LOCK(dbenv, &mgr->reginfo);
+
+ /* Allocate a new transaction detail structure. */
+ if ((ret =
+ __db_shalloc(mgr->reginfo.addr, sizeof(TXN_DETAIL), 0, &td)) != 0) {
+ R_UNLOCK(dbenv, &mgr->reginfo);
+ return (ret);
+ }
+
+ /* Place transaction on active transaction list. */
+ SH_TAILQ_INSERT_HEAD(&region->active_txn, td, links, __txn_detail);
+
+ td->txnid = argp->txnid->txnid;
+ td->begin_lsn = argp->begin_lsn;
+ td->last_lsn = *lsnp;
+ td->parent = 0;
+ td->status = TXN_PREPARED;
+ td->xa_status = TXN_XA_PREPARED;
+ memcpy(td->xid, argp->xid.data, argp->xid.size);
+ td->bqual = argp->bqual;
+ td->gtrid = argp->gtrid;
+ td->format = argp->formatID;
+ td->flags = 0;
+ F_SET(td, TXN_RESTORED);
+
+ region->stat.st_nrestores++;
+ region->stat.st_nactive++;
+ if (region->stat.st_nactive > region->stat.st_maxnactive)
+ region->stat.st_maxnactive = region->stat.st_nactive;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+ return (0);
+}
+
+/*
+ * __txn_recycle_recover --
+ * Recovery function for recycle.
+ *
+ * PUBLIC: int __txn_recycle_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_recycle_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_recycle_args *argp;
+ int ret;
+
+#ifdef DEBUG_RECOVER
+ (void)__txn_child_print(dbenv, dbtp, lsnp, op, info);
+#endif
+ if ((ret = __txn_recycle_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+
+ COMPQUIET(lsnp, NULL);
+
+ if ((ret = __db_txnlist_gen(dbenv, info,
+ DB_UNDO(op) ? -1 : 1, argp->min, argp->max)) != 0)
+ return (ret);
+
+ __os_free(dbenv, argp);
+
+ return (0);
+}
diff --git a/libdb/txn/txn_recover.c b/libdb/txn/txn_recover.c
new file mode 100644
index 0000000..0a077e4
--- /dev/null
+++ b/libdb/txn/txn_recover.c
@@ -0,0 +1,306 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/txn.h"
+#include "dbinc/db_page.h"
+#include "dbinc/log.h"
+#include "dbinc_auto/db_auto.h"
+#include "dbinc_auto/crdel_auto.h"
+#include "dbinc_auto/db_ext.h"
+
+/*
+ * __txn_continue
+ * Fill in the fields of the local transaction structure given
+ * the detail transaction structure.
+ *
+ * XXX
+ * I'm not sure that we work correctly with nested txns.
+ *
+ * PUBLIC: void __txn_continue __P((DB_ENV *, DB_TXN *, TXN_DETAIL *, size_t));
+ */
+void
+__txn_continue(env, txnp, td, off)
+ DB_ENV *env;
+ DB_TXN *txnp;
+ TXN_DETAIL *td;
+ size_t off;
+{
+ txnp->mgrp = env->tx_handle;
+ txnp->parent = NULL;
+ txnp->last_lsn = td->last_lsn;
+ txnp->txnid = td->txnid;
+ txnp->off = (roff_t)off;
+
+ txnp->abort = __txn_abort;
+ txnp->commit = __txn_commit;
+ txnp->discard = __txn_discard;
+ txnp->id = __txn_id;
+ txnp->prepare = __txn_prepare;
+
+ txnp->flags = 0;
+}
+
+/*
+ * __txn_map_gid
+ * Return the txn that corresponds to this global ID.
+ *
+ * PUBLIC: int __txn_map_gid __P((DB_ENV *,
+ * PUBLIC: u_int8_t *, TXN_DETAIL **, size_t *));
+ */
+int
+__txn_map_gid(dbenv, gid, tdp, offp)
+ DB_ENV *dbenv;
+ u_int8_t *gid;
+ TXN_DETAIL **tdp;
+ size_t *offp;
+{
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *tmr;
+
+ mgr = dbenv->tx_handle;
+ tmr = mgr->reginfo.primary;
+
+ /*
+ * Search the internal active transaction table to find the
+ * matching xid. If this is a performance hit, then we
+ * can create a hash table, but I doubt it's worth it.
+ */
+ R_LOCK(dbenv, &mgr->reginfo);
+ for (*tdp = SH_TAILQ_FIRST(&tmr->active_txn, __txn_detail);
+ *tdp != NULL;
+ *tdp = SH_TAILQ_NEXT(*tdp, links, __txn_detail))
+ if (memcmp(gid, (*tdp)->xid, sizeof((*tdp)->xid)) == 0)
+ break;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ if (*tdp == NULL)
+ return (EINVAL);
+
+ *offp = R_OFFSET(&mgr->reginfo, *tdp);
+ return (0);
+}
+
+/*
+ * __txn_recover --
+ * Public interface to retrieve the list of prepared, but not yet
+ * commited transactions. See __txn_get_prepared for details. This
+ * function and __db_xa_recover both wrap that one.
+ *
+ * PUBLIC: int __txn_recover
+ * PUBLIC: __P((DB_ENV *, DB_PREPLIST *, long, long *, u_int32_t));
+ */
+int
+__txn_recover(dbenv, preplist, count, retp, flags)
+ DB_ENV *dbenv;
+ DB_PREPLIST *preplist;
+ long count, *retp;
+ u_int32_t flags;
+{
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(
+ dbenv, dbenv->tx_handle, "txn_recover", DB_INIT_TXN);
+
+ if (F_ISSET((DB_TXNREGION *)
+ ((DB_TXNMGR *)dbenv->tx_handle)->reginfo.primary,
+ TXN_IN_RECOVERY)) {
+ __db_err(dbenv, "operation not permitted while in recovery");
+ return (EINVAL);
+ }
+ return (__txn_get_prepared(dbenv, NULL, preplist, count, retp, flags));
+}
+
+/*
+ * __txn_get_prepared --
+ * Returns a list of prepared (and for XA, heuristically completed)
+ * transactions (less than or equal to the count parameter). One of
+ * xids or txns must be set to point to an array of the appropriate type.
+ * The count parameter indicates the number of entries in the xids and/or
+ * txns array. The retp parameter will be set to indicate the number of
+ * entries returned in the xids/txns array. Flags indicates the operation,
+ * one of DB_FIRST or DB_NEXT.
+ *
+ * PUBLIC: int __txn_get_prepared __P((DB_ENV *,
+ * PUBLIC: XID *, DB_PREPLIST *, long, long *, u_int32_t));
+ */
+int
+__txn_get_prepared(dbenv, xids, txns, count, retp, flags)
+ DB_ENV *dbenv;
+ XID *xids;
+ DB_PREPLIST *txns;
+ long count; /* This is long for XA compatibility. */
+ long *retp;
+ u_int32_t flags;
+{
+ DBT data;
+ DB_LOGC *logc;
+ DB_LSN min, open_lsn;
+ DB_PREPLIST *prepp;
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *tmr;
+ TXN_DETAIL *td;
+ XID *xidp;
+ __txn_ckp_args *ckp_args;
+ long i;
+ int nrestores, open_files, ret, t_ret;
+ void *txninfo;
+
+ *retp = 0;
+
+ logc = NULL;
+ MAX_LSN(min);
+ prepp = txns;
+ xidp = xids;
+ nrestores = ret = 0;
+ open_files = 1;
+
+ /*
+ * If we are starting a scan, then we traverse the active transaction
+ * list once making sure that all transactions are marked as not having
+ * been collected. Then on each pass, we mark the ones we collected
+ * so that if we cannot collect them all at once, we can finish up
+ * next time with a continue.
+ */
+
+ mgr = dbenv->tx_handle;
+ tmr = mgr->reginfo.primary;
+
+ /*
+ * During this pass we need to figure out if we are going to need
+ * to open files. We need to open files if we've never collected
+ * before (in which case, none of the COLLECTED bits will be set)
+ * and the ones that we are collecting are restored (if they aren't
+ * restored, then we never crashed; just the main server did).
+ */
+ R_LOCK(dbenv, &mgr->reginfo);
+ if (flags == DB_FIRST) {
+ for (td = SH_TAILQ_FIRST(&tmr->active_txn, __txn_detail);
+ td != NULL;
+ td = SH_TAILQ_NEXT(td, links, __txn_detail)) {
+ if (F_ISSET(td, TXN_RESTORED))
+ nrestores++;
+ if (F_ISSET(td, TXN_COLLECTED))
+ open_files = 0;
+ F_CLR(td, TXN_COLLECTED);
+ }
+ mgr->n_discards = 0;
+ } else
+ open_files = 0;
+
+ /* Now begin collecting active transactions. */
+ for (td = SH_TAILQ_FIRST(&tmr->active_txn, __txn_detail);
+ td != NULL && *retp < count;
+ td = SH_TAILQ_NEXT(td, links, __txn_detail)) {
+ if (td->status != TXN_PREPARED || F_ISSET(td, TXN_COLLECTED))
+ continue;
+
+ if (xids != NULL) {
+ xidp->formatID = td->format;
+ xidp->gtrid_length = td->gtrid;
+ xidp->bqual_length = td->bqual;
+ memcpy(xidp->data, td->xid, sizeof(td->xid));
+ xidp++;
+ }
+
+ if (txns != NULL) {
+ if ((ret = __os_calloc(dbenv,
+ 1, sizeof(DB_TXN), &prepp->txn)) != 0)
+ goto err;
+ __txn_continue(dbenv,
+ prepp->txn, td, R_OFFSET(&mgr->reginfo, td));
+ F_SET(prepp->txn, TXN_MALLOC);
+ memcpy(prepp->gid, td->xid, sizeof(td->xid));
+ prepp++;
+ }
+
+ if (log_compare(&td->begin_lsn, &min) < 0)
+ min = td->begin_lsn;
+
+ (*retp)++;
+ F_SET(td, TXN_COLLECTED);
+ }
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ /*
+ * Now link all the transactions into the transaction manager's list.
+ */
+ if (txns != NULL) {
+ MUTEX_THREAD_LOCK(dbenv, mgr->mutexp);
+ for (i = 0; i < *retp; i++)
+ TAILQ_INSERT_TAIL(&mgr->txn_chain, txns[i].txn, links);
+ MUTEX_THREAD_UNLOCK(dbenv, mgr->mutexp);
+ }
+
+ if (open_files && nrestores && *retp != 0 && !IS_MAX_LSN(min)) {
+ /*
+ * Figure out the last checkpoint before the smallest
+ * start_lsn in the region.
+ */
+ F_SET((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+
+ memset(&data, 0, sizeof(data));
+ if ((ret = __txn_getckp(dbenv, &open_lsn)) == 0)
+ while (!IS_ZERO_LSN(open_lsn) && (ret =
+ logc->get(logc, &open_lsn, &data, DB_SET)) == 0 &&
+ log_compare(&min, &open_lsn) < 0) {
+ /* Format the log record. */
+ if ((ret = __txn_ckp_read(dbenv,
+ data.data, &ckp_args)) != 0) {
+ __db_err(dbenv,
+ "Invalid checkpoint record at [%lu][%lu]",
+ (u_long)open_lsn.file,
+ (u_long)open_lsn.offset);
+ goto err;
+ }
+ open_lsn = ckp_args->last_ckp;
+ __os_free(dbenv, ckp_args);
+ }
+
+ /*
+ * There are three ways by which we may have gotten here.
+ * - We got a DB_NOTFOUND -- we need to read the first
+ * log record.
+ * - We found a checkpoint before min. We're done.
+ * - We found a checkpoint after min who's last_ckp is 0. We
+ * need to start at the beginning of the log.
+ */
+ if ((ret == DB_NOTFOUND || IS_ZERO_LSN(open_lsn)) &&
+ (ret = logc->get(logc, &open_lsn, &data, DB_FIRST)) != 0) {
+ __db_err(dbenv, "No log records");
+ goto err;
+ }
+
+ if ((ret = __db_txnlist_init(dbenv, 0, 0, NULL, &txninfo)) != 0)
+ goto err;
+ ret = __env_openfiles(dbenv, logc,
+ txninfo, &data, &open_lsn, NULL, 0, 0);
+ if (txninfo != NULL)
+ __db_txnlist_end(dbenv, txninfo);
+ }
+
+err: F_CLR((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+
+ if (logc != NULL && (t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
diff --git a/libdb/txn/txn_region.c b/libdb/txn/txn_region.c
new file mode 100644
index 0000000..8a8e445
--- /dev/null
+++ b/libdb/txn/txn_region.c
@@ -0,0 +1,374 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+static int __txn_findlastckp __P((DB_ENV *, DB_LSN *));
+static int __txn_init __P((DB_ENV *, DB_TXNMGR *));
+static size_t __txn_region_size __P((DB_ENV *));
+
+/*
+ * __txn_open --
+ * Open a transaction region.
+ *
+ * PUBLIC: int __txn_open __P((DB_ENV *));
+ */
+int
+__txn_open(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_TXNMGR *tmgrp;
+ int ret;
+
+ /* Create/initialize the transaction manager structure. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_TXNMGR), &tmgrp)) != 0)
+ return (ret);
+ TAILQ_INIT(&tmgrp->txn_chain);
+ tmgrp->dbenv = dbenv;
+
+ /* Join/create the txn region. */
+ tmgrp->reginfo.type = REGION_TYPE_TXN;
+ tmgrp->reginfo.id = INVALID_REGION_ID;
+ tmgrp->reginfo.mode = dbenv->db_mode;
+ tmgrp->reginfo.flags = REGION_JOIN_OK;
+ if (F_ISSET(dbenv, DB_ENV_CREATE))
+ F_SET(&tmgrp->reginfo, REGION_CREATE_OK);
+ if ((ret = __db_r_attach(dbenv,
+ &tmgrp->reginfo, __txn_region_size(dbenv))) != 0)
+ goto err;
+
+ /* If we created the region, initialize it. */
+ if (F_ISSET(&tmgrp->reginfo, REGION_CREATE))
+ if ((ret = __txn_init(dbenv, tmgrp)) != 0)
+ goto err;
+
+ /* Set the local addresses. */
+ tmgrp->reginfo.primary =
+ R_ADDR(&tmgrp->reginfo, tmgrp->reginfo.rp->primary);
+
+ /* Acquire a mutex to protect the active TXN list. */
+ if (F_ISSET(dbenv, DB_ENV_THREAD) &&
+ (ret = __db_mutex_setup(dbenv, &tmgrp->reginfo, &tmgrp->mutexp,
+ MUTEX_ALLOC | MUTEX_NO_RLOCK | MUTEX_THREAD)) != 0)
+ goto err;
+
+ R_UNLOCK(dbenv, &tmgrp->reginfo);
+
+ dbenv->tx_handle = tmgrp;
+ return (0);
+
+err: if (tmgrp->reginfo.addr != NULL) {
+ if (F_ISSET(&tmgrp->reginfo, REGION_CREATE))
+ ret = __db_panic(dbenv, ret);
+ R_UNLOCK(dbenv, &tmgrp->reginfo);
+
+ (void)__db_r_detach(dbenv, &tmgrp->reginfo, 0);
+ }
+ if (tmgrp->mutexp != NULL)
+ __db_mutex_free(dbenv, &tmgrp->reginfo, tmgrp->mutexp);
+ __os_free(dbenv, tmgrp);
+ return (ret);
+}
+
+/*
+ * __txn_init --
+ * Initialize a transaction region in shared memory.
+ */
+static int
+__txn_init(dbenv, tmgrp)
+ DB_ENV *dbenv;
+ DB_TXNMGR *tmgrp;
+{
+ DB_LSN last_ckp;
+ DB_TXNREGION *region;
+ int ret;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ u_int8_t *addr;
+#endif
+
+ /*
+ * Find the last checkpoint in the log.
+ */
+ ZERO_LSN(last_ckp);
+ if (LOGGING_ON(dbenv)) {
+ /*
+ * The log system has already walked through the last
+ * file. Get the LSN of a checkpoint it may have found.
+ */
+ __log_get_cached_ckp_lsn(dbenv, &last_ckp);
+
+ /*
+ * If that didn't work, look backwards from the beginning of
+ * the last log file until we find the last checkpoint.
+ */
+ if (IS_ZERO_LSN(last_ckp) &&
+ (ret = __txn_findlastckp(dbenv, &last_ckp)) != 0)
+ return (ret);
+ }
+
+ if ((ret = __db_shalloc(tmgrp->reginfo.addr,
+ sizeof(DB_TXNREGION), 0, &tmgrp->reginfo.primary)) != 0) {
+ __db_err(dbenv,
+ "Unable to allocate memory for the transaction region");
+ return (ret);
+ }
+ tmgrp->reginfo.rp->primary =
+ R_OFFSET(&tmgrp->reginfo, tmgrp->reginfo.primary);
+ region = tmgrp->reginfo.primary;
+ memset(region, 0, sizeof(*region));
+
+ region->maxtxns = dbenv->tx_max;
+ region->last_txnid = TXN_MINIMUM;
+ region->cur_maxid = TXN_MAXIMUM;
+ region->last_ckp = last_ckp;
+ region->time_ckp = time(NULL);
+
+ /*
+ * XXX
+ * If we ever do more types of locking and logging, this changes.
+ */
+ region->logtype = 0;
+ region->locktype = 0;
+
+ memset(&region->stat, 0, sizeof(region->stat));
+ region->stat.st_maxtxns = region->maxtxns;
+
+ SH_TAILQ_INIT(&region->active_txn);
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ /* Allocate room for the txn maintenance info and initialize it. */
+ if ((ret = __db_shalloc(tmgrp->reginfo.addr,
+ sizeof(REGMAINT) + TXN_MAINT_SIZE, 0, &addr)) != 0) {
+ __db_err(dbenv,
+ "Unable to allocate memory for mutex maintenance");
+ return (ret);
+ }
+ __db_maintinit(&tmgrp->reginfo, addr, TXN_MAINT_SIZE);
+ region->maint_off = R_OFFSET(&tmgrp->reginfo, addr);
+#endif
+ return (0);
+}
+
+/*
+ * __txn_findlastckp --
+ * Find the last checkpoint in the log, walking backwards from the
+ * beginning of the last log file. (The log system looked through
+ * the last log file when it started up.)
+ */
+static int
+__txn_findlastckp(dbenv, lsnp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+{
+ DB_LOGC *logc;
+ DB_LSN lsn;
+ DBT dbt;
+ int ret, t_ret;
+ u_int32_t rectype;
+
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+
+ /* Get the last LSN. */
+ memset(&dbt, 0, sizeof(dbt));
+ if ((ret = logc->get(logc, &lsn, &dbt, DB_LAST)) != 0)
+ goto err;
+
+ /*
+ * Twiddle the last LSN so it points to the beginning of the last
+ * file; we know there's no checkpoint after that, since the log
+ * system already looked there.
+ */
+ lsn.offset = 0;
+
+ /* Read backwards, looking for checkpoints. */
+ while ((ret = logc->get(logc, &lsn, &dbt, DB_PREV)) == 0) {
+ if (dbt.size < sizeof(u_int32_t))
+ continue;
+ memcpy(&rectype, dbt.data, sizeof(u_int32_t));
+ if (rectype == DB___txn_ckp) {
+ *lsnp = lsn;
+ break;
+ }
+ }
+
+err: if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ /*
+ * Not finding a checkpoint is not an error; there may not exist
+ * one in the log.
+ */
+ return ((ret == 0 || ret == DB_NOTFOUND) ? 0 : ret);
+}
+
+/*
+ * __txn_dbenv_refresh --
+ * Clean up after the transaction system on a close or failed open.
+ * Called only from __dbenv_refresh. (Formerly called __txn_close.)
+ *
+ * PUBLIC: int __txn_dbenv_refresh __P((DB_ENV *));
+ */
+int
+__txn_dbenv_refresh(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_TXN *txnp;
+ DB_TXNMGR *tmgrp;
+ u_int32_t txnid;
+ int ret, t_ret;
+
+ ret = 0;
+ tmgrp = dbenv->tx_handle;
+
+ /*
+ * This function can only be called once per process (i.e., not
+ * once per thread), so no synchronization is required.
+ *
+ * The caller is doing something wrong if close is called with
+ * active transactions. Try and abort any active transactions,
+ * but it's quite likely the aborts will fail because recovery
+ * won't find open files. If we can't abort any transaction,
+ * panic, we have to run recovery to get back to a known state.
+ */
+ if (TAILQ_FIRST(&tmgrp->txn_chain) != NULL) {
+ __db_err(dbenv,
+ "Error: closing the transaction region with active transactions");
+ ret = EINVAL;
+ while ((txnp = TAILQ_FIRST(&tmgrp->txn_chain)) != NULL) {
+ txnid = txnp->txnid;
+ if ((t_ret = txnp->abort(txnp)) != 0) {
+ __db_err(dbenv,
+ "Unable to abort transaction 0x%x: %s",
+ txnid, db_strerror(t_ret));
+ ret = __db_panic(dbenv, t_ret);
+ break;
+ }
+ }
+ }
+
+ /* Flush the log. */
+ if (LOGGING_ON(dbenv) &&
+ (t_ret = dbenv->log_flush(dbenv, NULL)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Discard the per-thread lock. */
+ if (tmgrp->mutexp != NULL)
+ __db_mutex_free(dbenv, &tmgrp->reginfo, tmgrp->mutexp);
+
+ /* Detach from the region. */
+ if ((t_ret = __db_r_detach(dbenv, &tmgrp->reginfo, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ __os_free(dbenv, tmgrp);
+
+ dbenv->tx_handle = NULL;
+ return (ret);
+}
+
+/*
+ * __txn_region_size --
+ * Return the amount of space needed for the txn region. Make the
+ * region large enough to hold txn_max transaction detail structures
+ * plus some space to hold thread handles and the beginning of the
+ * shalloc region and anything we need for mutex system resource
+ * recording.
+ */
+static size_t
+__txn_region_size(dbenv)
+ DB_ENV *dbenv;
+{
+ size_t s;
+
+ s = sizeof(DB_TXNREGION) +
+ dbenv->tx_max * sizeof(TXN_DETAIL) + 10 * 1024;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ s += sizeof(REGMAINT) + TXN_MAINT_SIZE;
+#endif
+ return (s);
+}
+
+/*
+ * __txn_region_destroy
+ * Destroy any region maintenance info.
+ *
+ * PUBLIC: void __txn_region_destroy __P((DB_ENV *, REGINFO *));
+ */
+void
+__txn_region_destroy(dbenv, infop)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+{
+ __db_shlocks_destroy(infop, (REGMAINT *)R_ADDR(infop,
+ ((DB_TXNREGION *)R_ADDR(infop, infop->rp->primary))->maint_off));
+
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(infop, NULL);
+}
+
+#ifdef CONFIG_TEST
+/*
+ * __txn_id_set --
+ * Set the current transaction ID and current maximum unused ID (for
+ * testing purposes only).
+ *
+ * PUBLIC: int __txn_id_set __P((DB_ENV *, u_int32_t, u_int32_t));
+ */
+int
+__txn_id_set(dbenv, cur_txnid, max_txnid)
+ DB_ENV *dbenv;
+ u_int32_t cur_txnid, max_txnid;
+{
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+ int ret;
+
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "txn_id_set", DB_INIT_TXN);
+
+ mgr = dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+ region->last_txnid = cur_txnid;
+ region->cur_maxid = max_txnid;
+
+ ret = 0;
+ if (cur_txnid < TXN_MINIMUM) {
+ __db_err(dbenv, "Current ID value %lu below minimum",
+ cur_txnid);
+ ret = EINVAL;
+ }
+ if (max_txnid < TXN_MINIMUM) {
+ __db_err(dbenv, "Maximum ID value %lu below minimum",
+ max_txnid);
+ ret = EINVAL;
+ }
+ return (ret);
+}
+#endif
diff --git a/libdb/txn/txn_stat.c b/libdb/txn/txn_stat.c
new file mode 100644
index 0000000..6228785
--- /dev/null
+++ b/libdb/txn/txn_stat.c
@@ -0,0 +1,102 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/txn.h"
+
+/*
+ * __txn_stat --
+ *
+ * PUBLIC: int __txn_stat __P((DB_ENV *, DB_TXN_STAT **, u_int32_t));
+ */
+int
+__txn_stat(dbenv, statp, flags)
+ DB_ENV *dbenv;
+ DB_TXN_STAT **statp;
+ u_int32_t flags;
+{
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+ DB_TXN_STAT *stats;
+ TXN_DETAIL *txnp;
+ size_t nbytes;
+ u_int32_t ndx;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "txn_stat", DB_INIT_TXN);
+
+ *statp = NULL;
+ if ((ret = __db_fchk(dbenv,
+ "DB_ENV->txn_stat", flags, DB_STAT_CLEAR)) != 0)
+ return (ret);
+
+ mgr = dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+
+ /*
+ * Allocate for the maximum active transactions -- the DB_TXN_ACTIVE
+ * struct is small and the maximum number of active transactions is
+ * not going to be that large. Don't have to lock anything to look
+ * at the region's maximum active transactions value, it's read-only
+ * and never changes after the region is created.
+ */
+ nbytes = sizeof(DB_TXN_STAT) + sizeof(DB_TXN_ACTIVE) * region->maxtxns;
+ if ((ret = __os_umalloc(dbenv, nbytes, &stats)) != 0)
+ return (ret);
+
+ R_LOCK(dbenv, &mgr->reginfo);
+ memcpy(stats, &region->stat, sizeof(*stats));
+ stats->st_last_txnid = region->last_txnid;
+ stats->st_last_ckp = region->last_ckp;
+ stats->st_time_ckp = region->time_ckp;
+ stats->st_txnarray = (DB_TXN_ACTIVE *)&stats[1];
+
+ ndx = 0;
+ for (txnp = SH_TAILQ_FIRST(&region->active_txn, __txn_detail);
+ txnp != NULL;
+ txnp = SH_TAILQ_NEXT(txnp, links, __txn_detail)) {
+ stats->st_txnarray[ndx].txnid = txnp->txnid;
+ if (txnp->parent == INVALID_ROFF)
+ stats->st_txnarray[ndx].parentid = TXN_INVALID;
+ else
+ stats->st_txnarray[ndx].parentid =
+ ((TXN_DETAIL *)R_ADDR(&mgr->reginfo,
+ txnp->parent))->txnid;
+ stats->st_txnarray[ndx].lsn = txnp->begin_lsn;
+ ndx++;
+ }
+
+ stats->st_region_wait = mgr->reginfo.rp->mutex.mutex_set_wait;
+ stats->st_region_nowait = mgr->reginfo.rp->mutex.mutex_set_nowait;
+ stats->st_regsize = mgr->reginfo.rp->size;
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ mgr->reginfo.rp->mutex.mutex_set_wait = 0;
+ mgr->reginfo.rp->mutex.mutex_set_nowait = 0;
+ memset(&region->stat, 0, sizeof(region->stat));
+ region->stat.st_maxtxns = region->maxtxns;
+ region->stat.st_maxnactive =
+ region->stat.st_nactive = stats->st_nactive;
+ }
+
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ *statp = stats;
+ return (0);
+}
diff --git a/libdb/txn/txn_util.c b/libdb/txn/txn_util.c
new file mode 100644
index 0000000..238aac5
--- /dev/null
+++ b/libdb/txn/txn_util.c
@@ -0,0 +1,234 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/txn.h"
+
+typedef struct __txn_event TXN_EVENT;
+struct __txn_event {
+ TXN_EVENT_T op;
+ TAILQ_ENTRY(__txn_event) links;
+ union {
+ struct {
+ /* Delayed remove. */
+ char *name;
+ u_int8_t *fileid;
+ } r;
+ struct {
+ /* Lock event. */
+ DB_LOCK lock;
+ u_int32_t locker;
+ DB *dbp;
+ } t;
+ } u;
+};
+
+/*
+ * __txn_remevent --
+ *
+ * Creates a remove event that can be added to the commit list.
+ *
+ * PUBLIC: int __txn_remevent __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, const char *, u_int8_t*));
+ */
+int
+__txn_remevent(dbenv, txn, name, fileid)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ const char *name;
+ u_int8_t *fileid;
+{
+ int ret;
+ TXN_EVENT *e;
+
+ e = NULL;
+ if ((ret = __os_calloc(dbenv, 1, sizeof(TXN_EVENT), &e)) != 0)
+ return (ret);
+
+ if ((ret = __os_strdup(dbenv, name, &e->u.r.name)) != 0)
+ goto err;
+
+ if (fileid != NULL) {
+ if ((ret = __os_calloc(dbenv,
+ 1, DB_FILE_ID_LEN, &e->u.r.fileid)) != 0)
+ return (ret);
+ memcpy(e->u.r.fileid, fileid, DB_FILE_ID_LEN);
+ }
+
+ e->op = TXN_REMOVE;
+ TAILQ_INSERT_TAIL(&txn->events, e, links);
+
+ return (0);
+
+err: if (e != NULL)
+ __os_free(dbenv, e);
+
+ return (ret);
+}
+
+/*
+ * __txn_lockevent --
+ *
+ * Add a lockevent to the commit-queue. The lock event indicates a locker
+ * trade.
+ *
+ * PUBLIC: int __txn_lockevent __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, DB *, DB_LOCK *, u_int32_t));
+ */
+int
+__txn_lockevent(dbenv, txn, dbp, lock, locker)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB *dbp;
+ DB_LOCK *lock;
+ u_int32_t locker;
+{
+ int ret;
+ TXN_EVENT *e;
+
+ if (!LOCKING_ON(dbenv))
+ return (0);
+
+ e = NULL;
+ if ((ret = __os_calloc(dbenv, 1, sizeof(TXN_EVENT), &e)) != 0)
+ return (ret);
+
+ e->u.t.locker = locker;
+ e->u.t.lock = *lock;
+ e->u.t.dbp = dbp;
+ e->op = TXN_TRADE;
+ TAILQ_INSERT_TAIL(&txn->events, e, links);
+
+ return (0);
+}
+
+/*
+ * __txn_remlock --
+ * Remove a lock event because the locker is going away. We can remove
+ * by lock (using offset) or by locker_id (or by both).
+ *
+ * PUBLIC: void __txn_remlock __P((DB_ENV *, DB_TXN *, DB_LOCK *, u_int32_t));
+ */
+void
+__txn_remlock(dbenv, txn, lock, locker)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB_LOCK *lock;
+ u_int32_t locker;
+{
+ TXN_EVENT *e, *next_e;
+
+ for (e = TAILQ_FIRST(&txn->events); e != NULL; e = next_e) {
+ next_e = TAILQ_NEXT(e, links);
+ if ((e->op != TXN_TRADE && e->op != TXN_TRADED) ||
+ (e->u.t.lock.off != lock->off && e->u.t.locker != locker))
+ continue;
+ TAILQ_REMOVE(&txn->events, e, links);
+ __os_free(dbenv, e);
+ }
+
+ return;
+}
+
+/*
+ * __txn_doevents --
+ * Process the list of events associated with a transaction. On commit,
+ * apply the events; on abort, just toss the entries.
+ *
+ * PUBLIC: int __txn_doevents __P((DB_ENV *, DB_TXN *, int, int));
+ */
+#define DO_TRADE do { \
+ memset(&req, 0, sizeof(req)); \
+ req.lock = e->u.t.lock; \
+ req.op = DB_LOCK_TRADE; \
+ t_ret = __lock_vec(dbenv, e->u.t.locker, 0, &req, 1, NULL); \
+ if (t_ret == 0) \
+ e->u.t.dbp->cur_lid = e->u.t.locker; \
+ else if (t_ret == DB_NOTFOUND) \
+ t_ret = 0; \
+ if (t_ret != 0 && ret == 0) \
+ ret = t_ret; \
+ e->op = TXN_TRADED; \
+} while (0)
+
+int
+__txn_doevents(dbenv, txn, is_commit, preprocess)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ int is_commit, preprocess;
+{
+ DB_LOCKREQ req;
+ TXN_EVENT *e;
+ int ret, t_ret;
+
+ ret = 0;
+
+ /*
+ * This phase only gets called if we have a phase where we
+ * release read locks. Since not all paths will call this
+ * phase, we have to check for it below as well. So, when
+ * we do the trade, we update the opcode of the entry so that
+ * we don't try the trade again.
+ */
+ if (preprocess) {
+ for (e = TAILQ_FIRST(&txn->events);
+ e != NULL; e = TAILQ_NEXT(e, links)) {
+ if (e->op != TXN_TRADE)
+ continue;
+ DO_TRADE;
+ }
+ return (ret);
+ }
+
+ while ((e = TAILQ_FIRST(&txn->events)) != NULL) {
+ TAILQ_REMOVE(&txn->events, e, links);
+ if (!is_commit)
+ goto dofree;
+ switch (e->op) {
+ case TXN_REMOVE:
+ if (e->u.r.fileid != NULL) {
+ if ((t_ret = dbenv->memp_nameop(dbenv,
+ e->u.r.fileid,
+ NULL, e->u.r.name, NULL)) != 0 && ret == 0)
+ ret = t_ret;
+ __os_free(dbenv, e->u.r.fileid);
+ } else if ((t_ret =
+ __os_unlink(dbenv, e->u.r.name)) != 0 && ret == 0)
+ ret = t_ret;
+ __os_free(dbenv, e->u.r.name);
+ break;
+ case TXN_TRADE:
+ DO_TRADE;
+ /* Fall through */
+ case TXN_TRADED:
+ /* Downgrade the lock. */
+ if ((t_ret = __lock_downgrade(dbenv,
+ &e->u.t.lock, DB_LOCK_READ, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ break;
+ default:
+ /* This had better never happen. */
+ DB_ASSERT(0);
+ }
+dofree: __os_free(dbenv, e);
+ }
+
+ return (ret);
+}
diff --git a/libdb/xa/xa.c b/libdb/xa/xa.c
new file mode 100644
index 0000000..8cabbfa
--- /dev/null
+++ b/libdb/xa/xa.c
@@ -0,0 +1,539 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/txn.h"
+
+static int __db_xa_close __P((char *, int, long));
+static int __db_xa_commit __P((XID *, int, long));
+static int __db_xa_complete __P((int *, int *, int, long));
+static int __db_xa_end __P((XID *, int, long));
+static int __db_xa_forget __P((XID *, int, long));
+static int __db_xa_open __P((char *, int, long));
+static int __db_xa_prepare __P((XID *, int, long));
+static int __db_xa_recover __P((XID *, long, int, long));
+static int __db_xa_rollback __P((XID *, int, long));
+static int __db_xa_start __P((XID *, int, long));
+static void __xa_txn_end __P((DB_TXN *));
+
+/*
+ * Possible flag values:
+ * Dynamic registration 0 => no dynamic registration
+ * TMREGISTER => dynamic registration
+ * Asynchronous operation 0 => no support for asynchrony
+ * TMUSEASYNC => async support
+ * Migration support 0 => migration of transactions across
+ * threads is possible
+ * TMNOMIGRATE => no migration across threads
+ */
+const struct xa_switch_t db_xa_switch = {
+ "Berkeley DB", /* name[RMNAMESZ] */
+ TMNOMIGRATE, /* flags */
+ 0, /* version */
+ __db_xa_open, /* xa_open_entry */
+ __db_xa_close, /* xa_close_entry */
+ __db_xa_start, /* xa_start_entry */
+ __db_xa_end, /* xa_end_entry */
+ __db_xa_rollback, /* xa_rollback_entry */
+ __db_xa_prepare, /* xa_prepare_entry */
+ __db_xa_commit, /* xa_commit_entry */
+ __db_xa_recover, /* xa_recover_entry */
+ __db_xa_forget, /* xa_forget_entry */
+ __db_xa_complete /* xa_complete_entry */
+};
+
+/*
+ * __db_xa_open --
+ * The open call in the XA protocol. The rmid field is an id number
+ * that the TM assigned us and will pass us on every xa call. We need to
+ * map that rmid number into a dbenv structure that we create during
+ * initialization. Since this id number is thread specific, we do not
+ * need to store it in shared memory. The file xa_map.c implements all
+ * such xa->db mappings.
+ * The xa_info field is instance specific information. We require
+ * that the value of DB_HOME be passed in xa_info. Since xa_info is the
+ * only thing that we get to pass to db_env_create, any config information
+ * will have to be done via a config file instead of via the db_env_create
+ * call.
+ */
+static int
+__db_xa_open(xa_info, rmid, flags)
+ char *xa_info;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+
+ if (LF_ISSET(TMASYNC))
+ return (XAER_ASYNC);
+ if (flags != TMNOFLAGS)
+ return (XAER_INVAL);
+
+ /* Verify if we already have this environment open. */
+ if (__db_rmid_to_env(rmid, &env) == 0)
+ return (XA_OK);
+ if (__os_calloc(env, 1, sizeof(DB_ENV), &env) != 0)
+ return (XAER_RMERR);
+
+ /* Open a new environment. */
+#define XA_FLAGS \
+ DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN
+ if (db_env_create(&env, 0) != 0)
+ return (XAER_RMERR);
+ if (env->open(env, xa_info, XA_FLAGS, 0) != 0)
+ goto err;
+
+ /* Create the mapping. */
+ if (__db_map_rmid(rmid, env) != 0)
+ goto err;
+
+ /* Allocate space for the current transaction. */
+ if (__os_calloc(env, 1, sizeof(DB_TXN), &env->xa_txn) != 0)
+ goto err;
+ env->xa_txn->txnid = TXN_INVALID;
+
+ return (XA_OK);
+
+err: (void)env->close(env, 0);
+
+ return (XAER_RMERR);
+}
+
+/*
+ * __db_xa_close --
+ * The close call of the XA protocol. The only trickiness here
+ * is that if there are any active transactions, we must fail. It is
+ * *not* an error to call close on an environment that has already been
+ * closed (I am interpreting that to mean it's OK to call close on an
+ * environment that has never been opened).
+ */
+static int
+__db_xa_close(xa_info, rmid, flags)
+ char *xa_info;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+ int ret, t_ret;
+
+ COMPQUIET(xa_info, NULL);
+
+ if (LF_ISSET(TMASYNC))
+ return (XAER_ASYNC);
+ if (flags != TMNOFLAGS)
+ return (XAER_INVAL);
+
+ /* If the environment is closed, then we're done. */
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XA_OK);
+
+ /* Check if there are any pending transactions. */
+ if (env->xa_txn != NULL && env->xa_txn->txnid != TXN_INVALID)
+ return (XAER_PROTO);
+
+ /* Destroy the mapping. */
+ ret = __db_unmap_rmid(rmid);
+
+ /* Discard space held for the current transaction. */
+ if (env->xa_txn != NULL)
+ __os_free(env, env->xa_txn);
+
+ /* Close the environment. */
+ if ((t_ret = env->close(env, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret == 0 ? XA_OK : XAER_RMERR);
+}
+
+/*
+ * __db_xa_start --
+ * Begin a transaction for the current resource manager.
+ */
+static int
+__db_xa_start(xid, rmid, flags)
+ XID *xid;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+ TXN_DETAIL *td;
+ size_t off;
+ int is_known;
+
+#define OK_FLAGS (TMJOIN | TMRESUME | TMNOWAIT | TMASYNC | TMNOFLAGS)
+ if (LF_ISSET(~OK_FLAGS))
+ return (XAER_INVAL);
+
+ if (LF_ISSET(TMJOIN) && LF_ISSET(TMRESUME))
+ return (XAER_INVAL);
+
+ if (LF_ISSET(TMASYNC))
+ return (XAER_ASYNC);
+
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XAER_PROTO);
+
+ is_known = __db_xid_to_txn(env, xid, &off) == 0;
+
+ if (is_known && !LF_ISSET(TMRESUME) && !LF_ISSET(TMJOIN))
+ return (XAER_DUPID);
+
+ if (!is_known && LF_ISSET(TMRESUME | TMJOIN))
+ return (XAER_NOTA);
+
+ /*
+ * This can't block, so we can ignore TMNOWAIT.
+ *
+ * Other error conditions: RMERR, RMFAIL, OUTSIDE, PROTO, RB*
+ */
+ if (is_known) {
+ td = (TXN_DETAIL *)
+ R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off);
+ if (td->xa_status == TXN_XA_SUSPENDED &&
+ !LF_ISSET(TMRESUME | TMJOIN))
+ return (XAER_PROTO);
+ if (td->xa_status == TXN_XA_DEADLOCKED)
+ return (XA_RBDEADLOCK);
+ if (td->xa_status == TXN_XA_ABORTED)
+ return (XA_RBOTHER);
+
+ /* Now, fill in the global transaction structure. */
+ __txn_continue(env, env->xa_txn, td, off);
+ td->xa_status = TXN_XA_STARTED;
+ } else {
+ if (__txn_xa_begin(env, env->xa_txn) != 0)
+ return (XAER_RMERR);
+ (void)__db_map_xid(env, xid, env->xa_txn->off);
+ td = (TXN_DETAIL *)
+ R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo,
+ env->xa_txn->off);
+ td->xa_status = TXN_XA_STARTED;
+ }
+ return (XA_OK);
+}
+
+/*
+ * __db_xa_end --
+ * Disassociate the current transaction from the current process.
+ */
+static int
+__db_xa_end(xid, rmid, flags)
+ XID *xid;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+ DB_TXN *txn;
+ TXN_DETAIL *td;
+ size_t off;
+
+ if (flags != TMNOFLAGS && !LF_ISSET(TMSUSPEND | TMSUCCESS | TMFAIL))
+ return (XAER_INVAL);
+
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XAER_PROTO);
+
+ if (__db_xid_to_txn(env, xid, &off) != 0)
+ return (XAER_NOTA);
+
+ txn = env->xa_txn;
+ if (off != txn->off)
+ return (XAER_PROTO);
+
+ td = (TXN_DETAIL *)R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off);
+ if (td->xa_status == TXN_XA_DEADLOCKED)
+ return (XA_RBDEADLOCK);
+
+ if (td->status == TXN_ABORTED)
+ return (XA_RBOTHER);
+
+ if (td->xa_status != TXN_XA_STARTED)
+ return (XAER_PROTO);
+
+ /* Update the shared memory last_lsn field */
+ td->last_lsn = txn->last_lsn;
+
+ /*
+ * If we ever support XA migration, we cannot keep SUSPEND/END
+ * status in the shared region; it would have to be process local.
+ */
+ if (LF_ISSET(TMSUSPEND))
+ td->xa_status = TXN_XA_SUSPENDED;
+ else
+ td->xa_status = TXN_XA_ENDED;
+
+ txn->txnid = TXN_INVALID;
+ return (XA_OK);
+}
+
+/*
+ * __db_xa_prepare --
+ * Sync the log to disk so we can guarantee recoverability.
+ */
+static int
+__db_xa_prepare(xid, rmid, flags)
+ XID *xid;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+ TXN_DETAIL *td;
+ size_t off;
+
+ if (LF_ISSET(TMASYNC))
+ return (XAER_ASYNC);
+ if (flags != TMNOFLAGS)
+ return (XAER_INVAL);
+
+ /*
+ * We need to know if we've ever called prepare on this.
+ * As part of the prepare, we set the xa_status field to
+ * reflect that fact that prepare has been called, and if
+ * it's ever called again, it's an error.
+ */
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XAER_PROTO);
+
+ if (__db_xid_to_txn(env, xid, &off) != 0)
+ return (XAER_NOTA);
+
+ td = (TXN_DETAIL *)R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off);
+ if (td->xa_status == TXN_XA_DEADLOCKED)
+ return (XA_RBDEADLOCK);
+
+ if (td->xa_status != TXN_XA_ENDED && td->xa_status != TXN_XA_SUSPENDED)
+ return (XAER_PROTO);
+
+ /* Now, fill in the global transaction structure. */
+ __txn_continue(env, env->xa_txn, td, off);
+
+ if (env->xa_txn->prepare(env->xa_txn, (u_int8_t *)xid->data) != 0)
+ return (XAER_RMERR);
+
+ td->xa_status = TXN_XA_PREPARED;
+
+ /* No fatal value that would require an XAER_RMFAIL. */
+ __xa_txn_end(env->xa_txn);
+ return (XA_OK);
+}
+
+/*
+ * __db_xa_commit --
+ * Commit the transaction
+ */
+static int
+__db_xa_commit(xid, rmid, flags)
+ XID *xid;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+ TXN_DETAIL *td;
+ size_t off;
+
+ if (LF_ISSET(TMASYNC))
+ return (XAER_ASYNC);
+#undef OK_FLAGS
+#define OK_FLAGS (TMNOFLAGS | TMNOWAIT | TMONEPHASE)
+ if (LF_ISSET(~OK_FLAGS))
+ return (XAER_INVAL);
+
+ /*
+ * We need to know if we've ever called prepare on this.
+ * We can verify this by examining the xa_status field.
+ */
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XAER_PROTO);
+
+ if (__db_xid_to_txn(env, xid, &off) != 0)
+ return (XAER_NOTA);
+
+ td = (TXN_DETAIL *)R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off);
+ if (td->xa_status == TXN_XA_DEADLOCKED)
+ return (XA_RBDEADLOCK);
+
+ if (td->xa_status == TXN_XA_ABORTED)
+ return (XA_RBOTHER);
+
+ if (LF_ISSET(TMONEPHASE) &&
+ td->xa_status != TXN_XA_ENDED && td->xa_status != TXN_XA_SUSPENDED)
+ return (XAER_PROTO);
+
+ if (!LF_ISSET(TMONEPHASE) && td->xa_status != TXN_XA_PREPARED)
+ return (XAER_PROTO);
+
+ /* Now, fill in the global transaction structure. */
+ __txn_continue(env, env->xa_txn, td, off);
+
+ if (env->xa_txn->commit(env->xa_txn, 0) != 0)
+ return (XAER_RMERR);
+
+ /* No fatal value that would require an XAER_RMFAIL. */
+ __xa_txn_end(env->xa_txn);
+ return (XA_OK);
+}
+
+/*
+ * __db_xa_recover --
+ * Returns a list of prepared and heuristically completed transactions.
+ *
+ * The return value is the number of xids placed into the xid array (less
+ * than or equal to the count parameter). The flags are going to indicate
+ * whether we are starting a scan or continuing one.
+ */
+static int
+__db_xa_recover(xids, count, rmid, flags)
+ XID *xids;
+ long count, flags;
+ int rmid;
+{
+ DB_ENV *env;
+ u_int32_t newflags;
+ long rval;
+
+ /* If the environment is closed, then we're done. */
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XAER_PROTO);
+
+ if (LF_ISSET(TMSTARTRSCAN))
+ newflags = DB_FIRST;
+ else if (LF_ISSET(TMENDRSCAN))
+ newflags = DB_LAST;
+ else
+ newflags = DB_NEXT;
+
+ rval = 0;
+ if (__txn_get_prepared(env, xids, NULL, count, &rval, newflags) != 0)
+ return (XAER_RMERR);
+ else
+ return (rval);
+}
+
+/*
+ * __db_xa_rollback
+ * Abort an XA transaction.
+ */
+static int
+__db_xa_rollback(xid, rmid, flags)
+ XID *xid;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+ TXN_DETAIL *td;
+ size_t off;
+
+ if (LF_ISSET(TMASYNC))
+ return (XAER_ASYNC);
+ if (flags != TMNOFLAGS)
+ return (XAER_INVAL);
+
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XAER_PROTO);
+
+ if (__db_xid_to_txn(env, xid, &off) != 0)
+ return (XAER_NOTA);
+
+ td = (TXN_DETAIL *)R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off);
+ if (td->xa_status == TXN_XA_DEADLOCKED)
+ return (XA_RBDEADLOCK);
+
+ if (td->xa_status == TXN_XA_ABORTED)
+ return (XA_RBOTHER);
+
+ if (td->xa_status != TXN_XA_ENDED && td->xa_status != TXN_XA_SUSPENDED
+ && td->xa_status != TXN_XA_PREPARED)
+ return (XAER_PROTO);
+
+ /* Now, fill in the global transaction structure. */
+ __txn_continue(env, env->xa_txn, td, off);
+ if (env->xa_txn->abort(env->xa_txn) != 0)
+ return (XAER_RMERR);
+
+ /* No fatal value that would require an XAER_RMFAIL. */
+ __xa_txn_end(env->xa_txn);
+ return (XA_OK);
+}
+
+/*
+ * __db_xa_forget --
+ * Forget about an XID for a transaction that was heuristically
+ * completed. Since we do not heuristically complete anything, I
+ * don't think we have to do anything here, but we should make sure
+ * that we reclaim the slots in the txnid table.
+ */
+static int
+__db_xa_forget(xid, rmid, flags)
+ XID *xid;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+ size_t off;
+
+ if (LF_ISSET(TMASYNC))
+ return (XAER_ASYNC);
+ if (flags != TMNOFLAGS)
+ return (XAER_INVAL);
+
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XAER_PROTO);
+
+ /*
+ * If mapping is gone, then we're done.
+ */
+ if (__db_xid_to_txn(env, xid, &off) != 0)
+ return (XA_OK);
+
+ __db_unmap_xid(env, xid, off);
+
+ /* No fatal value that would require an XAER_RMFAIL. */
+ return (XA_OK);
+}
+
+/*
+ * __db_xa_complete --
+ * Used to wait for asynchronous operations to complete. Since we're
+ * not doing asynch, this is an invalid operation.
+ */
+static int
+__db_xa_complete(handle, retval, rmid, flags)
+ int *handle, *retval, rmid;
+ long flags;
+{
+ COMPQUIET(handle, NULL);
+ COMPQUIET(retval, NULL);
+ COMPQUIET(rmid, 0);
+ COMPQUIET(flags, 0);
+
+ return (XAER_INVAL);
+}
+
+/*
+ * __xa_txn_end --
+ * Invalidate a transaction structure that was generated by __txn_continue.
+ */
+static void
+__xa_txn_end(txn)
+ DB_TXN *txn;
+{
+ if (txn != NULL)
+ txn->txnid = TXN_INVALID;
+}
diff --git a/libdb/xa/xa_db.c b/libdb/xa/xa_db.c
new file mode 100644
index 0000000..7035cab
--- /dev/null
+++ b/libdb/xa/xa_db.c
@@ -0,0 +1,182 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/xa.h"
+#include "dbinc/txn.h"
+
+static int __xa_close __P((DB *, u_int32_t));
+static int __xa_cursor __P((DB *, DB_TXN *, DBC **, u_int32_t));
+static int __xa_del __P((DB *, DB_TXN *, DBT *, u_int32_t));
+static int __xa_get __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+static int __xa_open __P((DB *, DB_TXN *,
+ const char *, const char *, DBTYPE, u_int32_t, int));
+static int __xa_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+
+typedef struct __xa_methods {
+ int (*close) __P((DB *, u_int32_t));
+ int (*cursor) __P((DB *, DB_TXN *, DBC **, u_int32_t));
+ int (*del) __P((DB *, DB_TXN *, DBT *, u_int32_t));
+ int (*get) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ int (*open) __P((DB *, DB_TXN *,
+ const char *, const char *, DBTYPE, u_int32_t, int));
+ int (*put) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+} XA_METHODS;
+
+/*
+ * __db_xa_create --
+ * DB XA constructor.
+ *
+ * PUBLIC: int __db_xa_create __P((DB *));
+ */
+int
+__db_xa_create(dbp)
+ DB *dbp;
+{
+ XA_METHODS *xam;
+ int ret;
+
+ /*
+ * Interpose XA routines in front of any method that takes a TXN
+ * ID as an argument.
+ */
+ if ((ret = __os_calloc(dbp->dbenv, 1, sizeof(XA_METHODS), &xam)) != 0)
+ return (ret);
+
+ dbp->xa_internal = xam;
+ xam->open = dbp->open;
+ dbp->open = __xa_open;
+ xam->close = dbp->close;
+ dbp->close = __xa_close;
+
+ return (0);
+}
+
+/*
+ * __xa_open --
+ * XA open wrapper.
+ */
+
+static int
+__xa_open(dbp, txn, name, subdb, type, flags, mode)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb;
+ DBTYPE type;
+ u_int32_t flags;
+ int mode;
+{
+ XA_METHODS *xam;
+ int ret;
+
+ xam = (XA_METHODS *)dbp->xa_internal;
+
+ if ((ret = xam->open(dbp, txn, name, subdb, type, flags, mode)) != 0)
+ return (ret);
+
+ xam->cursor = dbp->cursor;
+ xam->del = dbp->del;
+ xam->get = dbp->get;
+ xam->put = dbp->put;
+ dbp->cursor = __xa_cursor;
+ dbp->del = __xa_del;
+ dbp->get = __xa_get;
+ dbp->put = __xa_put;
+
+ return (0);
+}
+
+static int
+__xa_cursor(dbp, txn, dbcp, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBC **dbcp;
+ u_int32_t flags;
+{
+ DB_TXN *t;
+
+ t = txn != NULL ? txn : dbp->dbenv->xa_txn;
+ if (t->txnid == TXN_INVALID)
+ t = NULL;
+
+ return (((XA_METHODS *)dbp->xa_internal)->cursor (dbp, t, dbcp, flags));
+}
+
+static int
+__xa_del(dbp, txn, key, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key;
+ u_int32_t flags;
+{
+ DB_TXN *t;
+
+ t = txn != NULL ? txn : dbp->dbenv->xa_txn;
+ if (t->txnid == TXN_INVALID)
+ t = NULL;
+
+ return (((XA_METHODS *)dbp->xa_internal)->del(dbp, t, key, flags));
+}
+
+static int
+__xa_close(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ int (*real_close) __P((DB *, u_int32_t));
+
+ real_close = ((XA_METHODS *)dbp->xa_internal)->close;
+
+ __os_free(dbp->dbenv, dbp->xa_internal);
+ dbp->xa_internal = NULL;
+
+ return (real_close(dbp, flags));
+}
+
+static int
+__xa_get(dbp, txn, key, data, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DB_TXN *t;
+
+ t = txn != NULL ? txn : dbp->dbenv->xa_txn;
+ if (t->txnid == TXN_INVALID)
+ t = NULL;
+
+ return (((XA_METHODS *)dbp->xa_internal)->get
+ (dbp, t, key, data, flags));
+}
+
+static int
+__xa_put(dbp, txn, key, data, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DB_TXN *t;
+
+ t = txn != NULL ? txn : dbp->dbenv->xa_txn;
+ if (t->txnid == TXN_INVALID)
+ t = NULL;
+
+ return (((XA_METHODS *)dbp->xa_internal)->put
+ (dbp, t, key, data, flags));
+}
diff --git a/libdb/xa/xa_map.c b/libdb/xa/xa_map.c
new file mode 100644
index 0000000..d0141ed
--- /dev/null
+++ b/libdb/xa/xa_map.c
@@ -0,0 +1,167 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id$";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/txn.h"
+
+/*
+ * This file contains all the mapping information that we need to support
+ * the DB/XA interface.
+ */
+
+/*
+ * __db_rmid_to_env
+ * Return the environment associated with a given XA rmid.
+ *
+ * PUBLIC: int __db_rmid_to_env __P((int rmid, DB_ENV **envp));
+ */
+int
+__db_rmid_to_env(rmid, envp)
+ int rmid;
+ DB_ENV **envp;
+{
+ DB_ENV *env;
+
+ env = TAILQ_FIRST(&DB_GLOBAL(db_envq));
+ if (env != NULL && env->xa_rmid == rmid) {
+ *envp = env;
+ return (0);
+ }
+
+ /*
+ * When we map an rmid, move that environment to be the first one in
+ * the list of environments, so we acquire the correct environment
+ * in DB->open.
+ */
+ for (; env != NULL; env = TAILQ_NEXT(env, links))
+ if (env->xa_rmid == rmid) {
+ TAILQ_REMOVE(&DB_GLOBAL(db_envq), env, links);
+ TAILQ_INSERT_HEAD(&DB_GLOBAL(db_envq), env, links);
+ *envp = env;
+ return (0);
+ }
+
+ return (1);
+}
+
+/*
+ * __db_xid_to_txn
+ * Return the txn that corresponds to this XID.
+ *
+ * PUBLIC: int __db_xid_to_txn __P((DB_ENV *, XID *, size_t *));
+ */
+int
+__db_xid_to_txn(dbenv, xid, offp)
+ DB_ENV *dbenv;
+ XID *xid;
+ size_t *offp;
+{
+ struct __txn_detail *td;
+
+ return (__txn_map_gid(dbenv, (u_int8_t *)xid->data, &td, offp));
+}
+
+/*
+ * __db_map_rmid
+ * Create a mapping between the specified rmid and environment.
+ *
+ * PUBLIC: int __db_map_rmid __P((int, DB_ENV *));
+ */
+int
+__db_map_rmid(rmid, env)
+ int rmid;
+ DB_ENV *env;
+{
+ env->xa_rmid = rmid;
+ TAILQ_INSERT_TAIL(&DB_GLOBAL(db_envq), env, links);
+ return (0);
+}
+
+/*
+ * __db_unmap_rmid
+ * Destroy the mapping for the given rmid.
+ *
+ * PUBLIC: int __db_unmap_rmid __P((int));
+ */
+int
+__db_unmap_rmid(rmid)
+ int rmid;
+{
+ DB_ENV *e;
+
+ for (e = TAILQ_FIRST(&DB_GLOBAL(db_envq));
+ e->xa_rmid != rmid;
+ e = TAILQ_NEXT(e, links));
+
+ if (e == NULL)
+ return (EINVAL);
+
+ TAILQ_REMOVE(&DB_GLOBAL(db_envq), e, links);
+ return (0);
+}
+
+/*
+ * __db_map_xid
+ * Create a mapping between this XID and the transaction at
+ * "off" in the shared region.
+ *
+ * PUBLIC: int __db_map_xid __P((DB_ENV *, XID *, size_t));
+ */
+int
+__db_map_xid(env, xid, off)
+ DB_ENV *env;
+ XID *xid;
+ size_t off;
+{
+ REGINFO *infop;
+ TXN_DETAIL *td;
+
+ infop = &((DB_TXNMGR *)env->tx_handle)->reginfo;
+ td = (TXN_DETAIL *)R_ADDR(infop, off);
+
+ R_LOCK(env, infop);
+ memcpy(td->xid, xid->data, XIDDATASIZE);
+ td->bqual = (u_int32_t)xid->bqual_length;
+ td->gtrid = (u_int32_t)xid->gtrid_length;
+ td->format = (int32_t)xid->formatID;
+ R_UNLOCK(env, infop);
+
+ return (0);
+}
+
+/*
+ * __db_unmap_xid
+ * Destroy the mapping for the specified XID.
+ *
+ * PUBLIC: void __db_unmap_xid __P((DB_ENV *, XID *, size_t));
+ */
+
+void
+__db_unmap_xid(env, xid, off)
+ DB_ENV *env;
+ XID *xid;
+ size_t off;
+{
+ TXN_DETAIL *td;
+
+ COMPQUIET(xid, NULL);
+
+ td = (TXN_DETAIL *)R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off);
+ memset(td->xid, 0, sizeof(td->xid));
+}